aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/arm/mach-pxa/viper-pcmcia.h (renamed from include/linux/platform_data/pcmcia-pxa2xx_viper.h)0
-rw-r--r--block/elevator.h (renamed from include/linux/elevator.h)29
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.h (renamed from include/drm/ttm/ttm_module.h)11
-rw-r--r--drivers/power/supply/ab8500-bm.h (renamed from include/linux/mfd/abx500/ab8500-bm.h)131
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146.h (renamed from include/media/drv-intf/saa7146.h)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_vv.h (renamed from include/media/drv-intf/saa7146_vv.h)2
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h (renamed from include/media/davinci/dm355_ccdc.h)0
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h (renamed from include/media/davinci/dm644x_ccdc.h)0
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif.h (renamed from include/media/davinci/isif.h)8
-rw-r--r--drivers/staging/vme_user/vme.h (renamed from include/linux/vme.h)2
-rw-r--r--include/acpi/acbuffer.h12
-rw-r--r--include/acpi/acconfig.h6
-rw-r--r--include/acpi/acexcep.h16
-rw-r--r--include/acpi/acnames.h3
-rw-r--r--include/acpi/acoutput.h6
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h156
-rw-r--r--include/acpi/acpi_drivers.h31
-rw-r--r--include/acpi/acpi_io.h2
-rw-r--r--include/acpi/acpi_numa.h25
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h12
-rw-r--r--include/acpi/acrestyp.h11
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h128
-rw-r--r--include/acpi/actbl2.h998
-rw-r--r--include/acpi/actbl3.h97
-rw-r--r--include/acpi/actypes.h56
-rw-r--r--include/acpi/acuuid.h17
-rw-r--r--include/acpi/apei.h9
-rw-r--r--include/acpi/battery.h2
-rw-r--r--include/acpi/button.h4
-rw-r--r--include/acpi/cppc_acpi.h71
-rw-r--r--include/acpi/ghes.h23
-rw-r--r--include/acpi/pcc.h21
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h33
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/acintel.h2
-rw-r--r--include/acpi/platform/aclinux.h14
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/acpi/processor.h24
-rw-r--r--include/acpi/video.h9
-rw-r--r--include/asm-generic/5level-fixup.h58
-rw-r--r--include/asm-generic/Kbuild58
-rw-r--r--include/asm-generic/access_ok.h48
-rw-r--r--include/asm-generic/archrandom.h15
-rw-r--r--include/asm-generic/atomic-instrumented.h1788
-rw-r--r--include/asm-generic/atomic-long.h1013
-rw-r--r--include/asm-generic/atomic.h120
-rw-r--r--include/asm-generic/atomic64.h45
-rw-r--r--include/asm-generic/barrier.h117
-rw-r--r--include/asm-generic/bitops.h6
-rw-r--r--include/asm-generic/bitops/atomic.h38
-rw-r--r--include/asm-generic/bitops/builtin-ffs.h7
-rw-r--r--include/asm-generic/bitops/ffs.h2
-rw-r--r--include/asm-generic/bitops/find.h100
-rw-r--r--include/asm-generic/bitops/generic-non-atomic.h175
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h29
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h13
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h87
-rw-r--r--include/asm-generic/bitops/le.h34
-rw-r--r--include/asm-generic/bitops/lock.h39
-rw-r--r--include/asm-generic/bitops/non-atomic.h109
-rw-r--r--include/asm-generic/bitops/non-instrumented-non-atomic.h17
-rw-r--r--include/asm-generic/bitsperlong.h12
-rw-r--r--include/asm-generic/bug.h61
-rw-r--r--include/asm-generic/cacheflush.h40
-rw-r--r--include/asm-generic/checksum.h23
-rw-r--r--include/asm-generic/cmpxchg-local.h4
-rw-r--r--include/asm-generic/cmpxchg.h42
-rw-r--r--include/asm-generic/compat.h143
-rw-r--r--include/asm-generic/div64.h24
-rw-r--r--include/asm-generic/dma-contiguous.h10
-rw-r--r--include/asm-generic/early_ioremap.h6
-rw-r--r--include/asm-generic/error-injection.h6
-rw-r--r--include/asm-generic/export.h24
-rw-r--r--include/asm-generic/futex.h33
-rw-r--r--include/asm-generic/getorder.h2
-rw-r--r--include/asm-generic/gpio.h4
-rw-r--r--include/asm-generic/hardirq.h6
-rw-r--r--include/asm-generic/hugetlb.h30
-rw-r--r--include/asm-generic/hyperv-tlfs.h793
-rw-r--r--include/asm-generic/io.h267
-rw-r--r--include/asm-generic/iomap.h43
-rw-r--r--include/asm-generic/kmap_size.h12
-rw-r--r--include/asm-generic/kmap_types.h11
-rw-r--r--include/asm-generic/kprobes.h4
-rw-r--r--include/asm-generic/kvm_types.h5
-rw-r--r--include/asm-generic/logic_io.h78
-rw-r--r--include/asm-generic/memory_model.h37
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/asm-generic/mmiowb.h6
-rw-r--r--include/asm-generic/mmu_context.h58
-rw-r--r--include/asm-generic/module.lds.h10
-rw-r--r--include/asm-generic/mshyperv.h144
-rw-r--r--include/asm-generic/msi.h8
-rw-r--r--include/asm-generic/nommu_context.h19
-rw-r--r--include/asm-generic/numa.h52
-rw-r--r--include/asm-generic/page.h4
-rw-r--r--include/asm-generic/pci.h39
-rw-r--r--include/asm-generic/pci_iomap.h7
-rw-r--r--include/asm-generic/percpu.h18
-rw-r--r--include/asm-generic/pgalloc.h92
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h64
-rw-r--r--include/asm-generic/pgtable-nop4d.h3
-rw-r--r--include/asm-generic/pgtable-nopmd.h5
-rw-r--r--include/asm-generic/pgtable-nopud.h7
-rw-r--r--include/asm-generic/pgtable.h1261
-rw-r--r--include/asm-generic/pgtable_uffd.h66
-rw-r--r--include/asm-generic/preempt.h2
-rw-r--r--include/asm-generic/qrwlock.h63
-rw-r--r--include/asm-generic/qrwlock_types.h2
-rw-r--r--include/asm-generic/qspinlock.h38
-rw-r--r--include/asm-generic/qspinlock_types.h8
-rw-r--r--include/asm-generic/rwonce.h90
-rw-r--r--include/asm-generic/seccomp.h2
-rw-r--r--include/asm-generic/sections.h122
-rw-r--r--include/asm-generic/signal.h2
-rw-r--r--include/asm-generic/softirq_stack.h14
-rw-r--r--include/asm-generic/spinlock.h94
-rw-r--r--include/asm-generic/spinlock_types.h17
-rw-r--r--include/asm-generic/syscall.h36
-rw-r--r--include/asm-generic/termios-base.h78
-rw-r--r--include/asm-generic/termios.h108
-rw-r--r--include/asm-generic/tlb.h159
-rw-r--r--include/asm-generic/topology.h4
-rw-r--r--include/asm-generic/uaccess.h192
-rw-r--r--include/asm-generic/unaligned.h167
-rw-r--r--include/asm-generic/vdso/vsyscall.h14
-rw-r--r--include/asm-generic/vermagic.h7
-rw-r--r--include/asm-generic/vmlinux.lds.h304
-rw-r--r--include/asm-generic/xor.h84
-rw-r--r--include/clocksource/arm_arch_timer.h4
-rw-r--r--include/clocksource/hyperv_timer.h14
-rw-r--r--include/clocksource/samsung_pwm.h3
-rw-r--r--include/clocksource/timer-goldfish.h31
-rw-r--r--include/clocksource/timer-riscv.h16
-rw-r--r--include/clocksource/timer-sp804.h29
-rw-r--r--include/clocksource/timer-ti-dm.h258
-rw-r--r--include/clocksource/timer-xilinx.h73
-rw-r--r--include/crypto/acompress.h22
-rw-r--r--include/crypto/aead.h63
-rw-r--r--include/crypto/akcipher.h4
-rw-r--r--include/crypto/algapi.h99
-rw-r--r--include/crypto/aria.h458
-rw-r--r--include/crypto/asym_tpm_subtype.h19
-rw-r--r--include/crypto/blake2b.h66
-rw-r--r--include/crypto/blake2s.h68
-rw-r--r--include/crypto/cbc.h141
-rw-r--r--include/crypto/chacha.h24
-rw-r--r--include/crypto/chacha20poly1305.h2
-rw-r--r--include/crypto/cryptd.h3
-rw-r--r--include/crypto/curve25519.h8
-rw-r--r--include/crypto/dh.h26
-rw-r--r--include/crypto/drbg.h17
-rw-r--r--include/crypto/ecc_curve.h60
-rw-r--r--include/crypto/ecdh.h3
-rw-r--r--include/crypto/engine.h28
-rw-r--r--include/crypto/gf128mul.h2
-rw-r--r--include/crypto/hash.h57
-rw-r--r--include/crypto/hash_info.h3
-rw-r--r--include/crypto/if_alg.h11
-rw-r--r--include/crypto/internal/acompress.h2
-rw-r--r--include/crypto/internal/aead.h25
-rw-r--r--include/crypto/internal/blake2b.h115
-rw-r--r--include/crypto/internal/blake2s.h27
-rw-r--r--include/crypto/internal/cipher.h218
-rw-r--r--include/crypto/internal/ecc.h (renamed from crypto/ecc.h)100
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/internal/hash.h27
-rw-r--r--include/crypto/internal/kdf_selftest.h71
-rw-r--r--include/crypto/internal/kpp.h158
-rw-r--r--include/crypto/internal/poly1305.h3
-rw-r--r--include/crypto/internal/skcipher.h2
-rw-r--r--include/crypto/kdf_sp800108.h61
-rw-r--r--include/crypto/kpp.h6
-rw-r--r--include/crypto/pcrypt.h2
-rw-r--r--include/crypto/poly1305.h6
-rw-r--r--include/crypto/polyval.h22
-rw-r--r--include/crypto/public_key.h10
-rw-r--r--include/crypto/rng.h2
-rw-r--r--include/crypto/scatterwalk.h16
-rw-r--r--include/crypto/sha.h162
-rw-r--r--include/crypto/sha1.h46
-rw-r--r--include/crypto/sha1_base.h5
-rw-r--r--include/crypto/sha2.h134
-rw-r--r--include/crypto/sha256_base.h11
-rw-r--r--include/crypto/sha512_base.h5
-rw-r--r--include/crypto/skcipher.h12
-rw-r--r--include/crypto/sm2.h25
-rw-r--r--include/crypto/sm3.h34
-rw-r--r--include/crypto/sm3_base.h3
-rw-r--r--include/crypto/sm4.h29
-rw-r--r--include/drm/amd_asic_type.h15
-rw-r--r--include/drm/bridge/analogix_dp.h5
-rw-r--r--include/drm/bridge/dw_hdmi.h42
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h5
-rw-r--r--include/drm/bridge/mhl.h4
-rw-r--r--include/drm/display/drm_dp.h1693
-rw-r--r--include/drm/display/drm_dp_aux_bus.h85
-rw-r--r--include/drm/display/drm_dp_dual_mode_helper.h (renamed from include/drm/drm_dp_dual_mode_helper.h)14
-rw-r--r--include/drm/display/drm_dp_helper.h766
-rw-r--r--include/drm/display/drm_dp_mst_helper.h1007
-rw-r--r--include/drm/display/drm_dsc.h (renamed from include/drm/drm_dsc.h)12
-rw-r--r--include/drm/display/drm_dsc_helper.h20
-rw-r--r--include/drm/display/drm_hdcp.h (renamed from include/drm/drm_hdcp.h)32
-rw-r--r--include/drm/display/drm_hdcp_helper.h22
-rw-r--r--include/drm/display/drm_hdmi_helper.h27
-rw-r--r--include/drm/display/drm_scdc.h88
-rw-r--r--include/drm/display/drm_scdc_helper.h79
-rw-r--r--include/drm/drm_agpsupport.h135
-rw-r--r--include/drm/drm_aperture.h39
-rw-r--r--include/drm/drm_atomic.h162
-rw-r--r--include/drm/drm_atomic_helper.h33
-rw-r--r--include/drm/drm_atomic_state_helper.h13
-rw-r--r--include/drm/drm_atomic_uapi.h2
-rw-r--r--include/drm/drm_audio_component.h4
-rw-r--r--include/drm/drm_auth.h68
-rw-r--r--include/drm/drm_bridge.h491
-rw-r--r--include/drm/drm_bridge_connector.h18
-rw-r--r--include/drm/drm_buddy.h159
-rw-r--r--include/drm/drm_cache.h15
-rw-r--r--include/drm/drm_client.h21
-rw-r--r--include/drm/drm_connector.h400
-rw-r--r--include/drm/drm_crtc.h177
-rw-r--r--include/drm/drm_damage_helper.h18
-rw-r--r--include/drm/drm_debugfs.h16
-rw-r--r--include/drm/drm_device.h84
-rw-r--r--include/drm/drm_displayid.h135
-rw-r--r--include/drm/drm_dp_helper.h1581
-rw-r--r--include/drm/drm_dp_mst_helper.h908
-rw-r--r--include/drm/drm_drv.h442
-rw-r--r--include/drm/drm_edid.h161
-rw-r--r--include/drm/drm_encoder.h68
-rw-r--r--include/drm/drm_encoder_slave.h2
-rw-r--r--include/drm/drm_fb_cma_helper.h18
-rw-r--r--include/drm/drm_fb_dma_helper.h23
-rw-r--r--include/drm/drm_fb_helper.h102
-rw-r--r--include/drm/drm_file.h37
-rw-r--r--include/drm/drm_format_helper.h61
-rw-r--r--include/drm/drm_fourcc.h24
-rw-r--r--include/drm/drm_framebuffer.h54
-rw-r--r--include/drm/drm_gem.h137
-rw-r--r--include/drm/drm_gem_atomic_helper.h146
-rw-r--r--include/drm/drm_gem_cma_helper.h133
-rw-r--r--include/drm/drm_gem_dma_helper.h279
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h31
-rw-r--r--include/drm/drm_gem_shmem_helper.h179
-rw-r--r--include/drm/drm_gem_ttm_helper.h13
-rw-r--r--include/drm/drm_gem_vram_helper.h79
-rw-r--r--include/drm/drm_hashtab.h79
-rw-r--r--include/drm/drm_ioctl.h2
-rw-r--r--include/drm/drm_irq.h32
-rw-r--r--include/drm/drm_legacy.h123
-rw-r--r--include/drm/drm_managed.h110
-rw-r--r--include/drm/drm_mipi_dbi.h46
-rw-r--r--include/drm/drm_mipi_dsi.h40
-rw-r--r--include/drm/drm_mm.h9
-rw-r--r--include/drm/drm_mode_config.h73
-rw-r--r--include/drm/drm_mode_object.h7
-rw-r--r--include/drm/drm_modes.h266
-rw-r--r--include/drm/drm_modeset_helper_vtables.h234
-rw-r--r--include/drm/drm_modeset_lock.h18
-rw-r--r--include/drm/drm_module.h125
-rw-r--r--include/drm/drm_of.h29
-rw-r--r--include/drm/drm_panel.h43
-rw-r--r--include/drm/drm_pci.h74
-rw-r--r--include/drm/drm_plane.h190
-rw-r--r--include/drm/drm_plane_helper.h40
-rw-r--r--include/drm/drm_prime.h17
-rw-r--r--include/drm/drm_print.h228
-rw-r--r--include/drm/drm_privacy_screen_consumer.h65
-rw-r--r--include/drm/drm_privacy_screen_driver.h95
-rw-r--r--include/drm/drm_privacy_screen_machine.h46
-rw-r--r--include/drm/drm_probe_helper.h14
-rw-r--r--include/drm/drm_property.h11
-rw-r--r--include/drm/drm_rect.h34
-rw-r--r--include/drm/drm_scdc_helper.h136
-rw-r--r--include/drm/drm_simple_kms_helper.h96
-rw-r--r--include/drm/drm_sysfs.h1
-rw-r--r--include/drm/drm_vblank.h57
-rw-r--r--include/drm/drm_vblank_work.h71
-rw-r--r--include/drm/drm_vma_manager.h2
-rw-r--r--include/drm/drm_writeback.h20
-rw-r--r--include/drm/gpu_scheduler.h319
-rw-r--r--include/drm/gud.h335
-rw-r--r--include/drm/i915_component.h1
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/drm/i915_mei_hdcp_interface.h3
-rw-r--r--include/drm/i915_pciids.h301
-rw-r--r--include/drm/i915_pxp_tee_interface.h42
-rw-r--r--include/drm/intel-gtt.h35
-rw-r--r--include/drm/ttm/ttm_bo_api.h437
-rw-r--r--include/drm/ttm/ttm_bo_driver.h773
-rw-r--r--include/drm/ttm/ttm_caching.h55
-rw-r--r--include/drm/ttm/ttm_debug.h31
-rw-r--r--include/drm/ttm/ttm_device.h302
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h19
-rw-r--r--include/drm/ttm/ttm_kmap_iter.h61
-rw-r--r--include/drm/ttm/ttm_memory.h97
-rw-r--r--include/drm/ttm/ttm_page_alloc.h122
-rw-r--r--include/drm/ttm/ttm_placement.h41
-rw-r--r--include/drm/ttm/ttm_pool.h92
-rw-r--r--include/drm/ttm/ttm_range_manager.h56
-rw-r--r--include/drm/ttm/ttm_resource.h428
-rw-r--r--include/drm/ttm/ttm_set_memory.h150
-rw-r--r--include/drm/ttm/ttm_tt.h246
-rw-r--r--include/dt-bindings/arm/coresight-cti-dt.h37
-rw-r--r--include/dt-bindings/ata/ahci.h20
-rw-r--r--include/dt-bindings/bus/moxtet.h2
-rw-r--r--include/dt-bindings/bus/ti-sysc.h4
-rw-r--r--include/dt-bindings/clock/actions,s500-cmu.h9
-rw-r--r--include/dt-bindings/clock/agilex-clock.h72
-rw-r--r--include/dt-bindings/clock/alphascale,asm9260.h2
-rw-r--r--include/dt-bindings/clock/am3.h93
-rw-r--r--include/dt-bindings/clock/am4.h99
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h1
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h1
-rw-r--r--include/dt-bindings/clock/at91.h17
-rw-r--r--include/dt-bindings/clock/axg-clkc.h26
-rw-r--r--include/dt-bindings/clock/axis,artpec6-clkctrl.h2
-rw-r--r--include/dt-bindings/clock/bcm21664.h10
-rw-r--r--include/dt-bindings/clock/bcm281xx.h10
-rw-r--r--include/dt-bindings/clock/bcm3368-clock.h24
-rw-r--r--include/dt-bindings/clock/bcm6318-clock.h42
-rw-r--r--include/dt-bindings/clock/bcm63268-clock.h30
-rw-r--r--include/dt-bindings/clock/bcm6328-clock.h19
-rw-r--r--include/dt-bindings/clock/bcm6358-clock.h18
-rw-r--r--include/dt-bindings/clock/bcm6362-clock.h26
-rw-r--r--include/dt-bindings/clock/bcm6368-clock.h24
-rw-r--r--include/dt-bindings/clock/boston-clock.h3
-rw-r--r--include/dt-bindings/clock/bt1-ccu.h48
-rw-r--r--include/dt-bindings/clock/cirrus,cs2000-cp.h14
-rw-r--r--include/dt-bindings/clock/dm814.h5
-rw-r--r--include/dt-bindings/clock/dra7.h179
-rw-r--r--include/dt-bindings/clock/efm32-cmu.h43
-rw-r--r--include/dt-bindings/clock/en7523-clk.h17
-rw-r--r--include/dt-bindings/clock/exynos4.h4
-rw-r--r--include/dt-bindings/clock/exynos5250.h6
-rw-r--r--include/dt-bindings/clock/exynos5420.h6
-rw-r--r--include/dt-bindings/clock/exynos7885.h151
-rw-r--r--include/dt-bindings/clock/exynos850.h325
-rw-r--r--include/dt-bindings/clock/fsd-clk.h150
-rw-r--r--include/dt-bindings/clock/fsl,qoriq-clockgen.h15
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h6
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h1
-rw-r--r--include/dt-bindings/clock/hi3559av100-clock.h165
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h3
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h5
-rw-r--r--include/dt-bindings/clock/imx8-clock.h126
-rw-r--r--include/dt-bindings/clock/imx8-lpcg.h14
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h18
-rw-r--r--include/dt-bindings/clock/imx8mn-clock.h31
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h106
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h42
-rw-r--r--include/dt-bindings/clock/imx8ulp-clock.h258
-rw-r--r--include/dt-bindings/clock/imx93-clock.h208
-rw-r--r--include/dt-bindings/clock/imxrt1050-clock.h72
-rw-r--r--include/dt-bindings/clock/ingenic,jz4725b-cgu.h (renamed from include/dt-bindings/clock/jz4725b-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4740-cgu.h (renamed from include/dt-bindings/clock/jz4740-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4760-cgu.h56
-rw-r--r--include/dt-bindings/clock/ingenic,jz4770-cgu.h59
-rw-r--r--include/dt-bindings/clock/ingenic,jz4780-cgu.h91
-rw-r--r--include/dt-bindings/clock/ingenic,sysost.h35
-rw-r--r--include/dt-bindings/clock/ingenic,x1000-cgu.h54
-rw-r--r--include/dt-bindings/clock/ingenic,x1830-cgu.h57
-rw-r--r--include/dt-bindings/clock/intel,lgm-clk.h165
-rw-r--r--include/dt-bindings/clock/jz4770-cgu.h58
-rw-r--r--include/dt-bindings/clock/jz4780-cgu.h89
-rw-r--r--include/dt-bindings/clock/k210-clk.h53
-rw-r--r--include/dt-bindings/clock/lochnagar.h (renamed from include/dt-bindings/clk/lochnagar.h)0
-rw-r--r--include/dt-bindings/clock/marvell,mmp2-audio.h10
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h20
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h10
-rw-r--r--include/dt-bindings/clock/marvell,pxa910.h4
-rw-r--r--include/dt-bindings/clock/mediatek,mt6795-clk.h275
-rw-r--r--include/dt-bindings/clock/mediatek,mt8365-clk.h373
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h13
-rw-r--r--include/dt-bindings/clock/microchip,lan966x.h34
-rw-r--r--include/dt-bindings/clock/microchip,mpfs-clock.h71
-rw-r--r--include/dt-bindings/clock/microchip,sparx5.h23
-rw-r--r--include/dt-bindings/clock/mstar-msc313-mpll.h19
-rw-r--r--include/dt-bindings/clock/mt6765-clk.h313
-rw-r--r--include/dt-bindings/clock/mt7621-clk.h41
-rw-r--r--include/dt-bindings/clock/mt7986-clk.h169
-rw-r--r--include/dt-bindings/clock/mt8167-clk.h131
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h1
-rw-r--r--include/dt-bindings/clock/mt8186-clk.h445
-rw-r--r--include/dt-bindings/clock/mt8192-clk.h585
-rw-r--r--include/dt-bindings/clock/mt8195-clk.h866
-rw-r--r--include/dt-bindings/clock/nuvoton,npcm7xx-clock.h2
-rw-r--r--include/dt-bindings/clock/nuvoton,npcm845-clk.h49
-rw-r--r--include/dt-bindings/clock/omap5.h2
-rw-r--r--include/dt-bindings/clock/qcom,apss-ipq.h12
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sc7180.h121
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sc7280.h127
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sm8250.h138
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-qcm2290.h34
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sc7280.h55
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm6125.h41
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm6350.h48
l---------include/dt-bindings/clock/qcom,dispcc-sm8150.h1
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm8250.h75
l---------include/dt-bindings/clock/qcom,dispcc-sm8350.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq806x.h5
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq8074.h8
-rw-r--r--include/dt-bindings/clock/qcom,gcc-mdm9607.h104
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8909.h218
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8939.h207
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8953.h234
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8976.h241
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8994.h49
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h7
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcm2290.h188
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7180.h9
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7280.h226
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8180x.h309
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8280xp.h496
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm660.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdx55.h117
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdx65.h122
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm6115.h201
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm6125.h240
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm6350.h178
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8150.h9
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8250.h271
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8350.h266
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8450.h244
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc7180.h3
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc7280.h35
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc8280xp.h35
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sdm660.h28
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm6350.h37
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm8150.h33
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm8250.h34
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm8350.h52
-rw-r--r--include/dt-bindings/clock/qcom,lcc-ipq806x.h2
-rw-r--r--include/dt-bindings/clock/qcom,lpass-sc7280.h16
-rw-r--r--include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h48
-rw-r--r--include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h29
-rw-r--r--include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h28
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8994.h155
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-sdm660.h162
-rw-r--r--include/dt-bindings/clock/qcom,mss-sc7180.h12
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h39
-rw-r--r--include/dt-bindings/clock/qcom,rpmh.h16
-rw-r--r--include/dt-bindings/clock/qcom,sm6115-dispcc.h36
-rw-r--r--include/dt-bindings/clock/qcom,sm6375-gcc.h234
-rw-r--r--include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h11
-rw-r--r--include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h13
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-camcc.h159
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-dispcc.h103
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sc7280.h27
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sm8150.h25
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sm8250.h36
-rw-r--r--include/dt-bindings/clock/r8a7742-cpg-mssr.h42
-rw-r--r--include/dt-bindings/clock/r8a774e1-cpg-mssr.h59
-rw-r--r--include/dt-bindings/clock/r8a779a0-cpg-mssr.h55
-rw-r--r--include/dt-bindings/clock/r8a779f0-cpg-mssr.h64
-rw-r--r--include/dt-bindings/clock/r8a779g0-cpg-mssr.h90
-rw-r--r--include/dt-bindings/clock/r9a06g032-sysctrl.h1
-rw-r--r--include/dt-bindings/clock/r9a07g043-cpg.h204
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h220
-rw-r--r--include/dt-bindings/clock/r9a07g054-cpg.h229
-rw-r--r--include/dt-bindings/clock/r9a09g011-cpg.h352
-rw-r--r--include/dt-bindings/clock/rk3036-cru.h1
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h3
-rw-r--r--include/dt-bindings/clock/rk3568-cru.h926
-rw-r--r--include/dt-bindings/clock/rockchip,rv1126-cru.h632
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h367
-rw-r--r--include/dt-bindings/clock/sifive-fu540-prci.h8
-rw-r--r--include/dt-bindings/clock/sifive-fu740-prci.h24
-rw-r--r--include/dt-bindings/clock/sprd,sc9863a-clk.h339
-rw-r--r--include/dt-bindings/clock/sprd,ums512-clk.h397
-rw-r--r--include/dt-bindings/clock/starfive-jh7100-audio.h41
-rw-r--r--include/dt-bindings/clock/starfive-jh7100.h202
-rw-r--r--include/dt-bindings/clock/ste-db8500-clkout.h17
-rw-r--r--include/dt-bindings/clock/stm32fx-clock.h4
-rw-r--r--include/dt-bindings/clock/stm32mp1-clks.h23
-rw-r--r--include/dt-bindings/clock/stm32mp13-clks.h229
-rw-r--r--include/dt-bindings/clock/stratix10-clock.h2
-rw-r--r--include/dt-bindings/clock/sun20i-d1-ccu.h156
-rw-r--r--include/dt-bindings/clock/sun20i-d1-r-ccu.h19
-rw-r--r--include/dt-bindings/clock/sun50i-a100-ccu.h116
-rw-r--r--include/dt-bindings/clock/sun50i-a100-r-ccu.h23
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h4
-rw-r--r--include/dt-bindings/clock/sun50i-h6-r-ccu.h3
-rw-r--r--include/dt-bindings/clock/sun50i-h616-ccu.h116
-rw-r--r--include/dt-bindings/clock/sun6i-rtc.h10
-rw-r--r--include/dt-bindings/clock/sun8i-h3-ccu.h2
-rw-r--r--include/dt-bindings/clock/sunplus,sp7021-clkc.h88
-rw-r--r--include/dt-bindings/clock/tegra114-car.h18
-rw-r--r--include/dt-bindings/clock/tegra124-car-common.h18
-rw-r--r--include/dt-bindings/clock/tegra20-car.h2
-rw-r--r--include/dt-bindings/clock/tegra210-car.h24
-rw-r--r--include/dt-bindings/clock/tegra234-clock.h278
-rw-r--r--include/dt-bindings/clock/tegra30-car.h18
-rw-r--r--include/dt-bindings/clock/ti-dra7-atl.h10
-rw-r--r--include/dt-bindings/clock/toshiba,tmpv770x.h181
-rw-r--r--include/dt-bindings/clock/versaclock.h13
-rw-r--r--include/dt-bindings/clock/vf610-clock.h4
-rw-r--r--include/dt-bindings/clock/x1000-cgu.h44
-rw-r--r--include/dt-bindings/clock/xlnx-vcu.h15
-rw-r--r--include/dt-bindings/clock/zx296702-clock.h180
-rw-r--r--include/dt-bindings/clock/zx296718-clock.h164
-rw-r--r--include/dt-bindings/display/sdtv-standards.h76
-rw-r--r--include/dt-bindings/dma/jz4775-dma.h44
-rw-r--r--include/dt-bindings/dma/qcom-gpi.h11
-rw-r--r--include/dt-bindings/dma/x2000-dma.h54
-rw-r--r--include/dt-bindings/dma/xlnx-zynqmp-dpdma.h16
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h92
-rw-r--r--include/dt-bindings/gce/mt6779-gce.h222
-rw-r--r--include/dt-bindings/gce/mt8186-gce.h421
-rw-r--r--include/dt-bindings/gce/mt8192-gce.h335
-rw-r--r--include/dt-bindings/gce/mt8195-gce.h812
-rw-r--r--include/dt-bindings/gpio/gpio.h3
-rw-r--r--include/dt-bindings/gpio/meson-s4-gpio.h99
-rw-r--r--include/dt-bindings/gpio/msc313-gpio.h124
-rw-r--r--include/dt-bindings/gpio/tegra186-gpio.h4
-rw-r--r--include/dt-bindings/gpio/tegra234-gpio.h63
-rw-r--r--include/dt-bindings/gpio/tegra241-gpio.h42
-rw-r--r--include/dt-bindings/iio/adc/at91-sama5d2_adc.h3
-rw-r--r--include/dt-bindings/iio/adc/ingenic,adc.h7
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6370_adc.h18
-rw-r--r--include/dt-bindings/iio/addac/adi,ad74413r.h21
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h67
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h88
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h46
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h28
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h28
-rw-r--r--include/dt-bindings/iio/qcom,spmi-vadc.h78
-rw-r--r--include/dt-bindings/input/atmel-maxtouch.h10
-rw-r--r--include/dt-bindings/input/cros-ec-keyboard.h103
-rw-r--r--include/dt-bindings/interconnect/fsl,imx8mp.h59
-rw-r--r--include/dt-bindings/interconnect/imx8mm.h50
-rw-r--r--include/dt-bindings/interconnect/imx8mn.h41
-rw-r--r--include/dt-bindings/interconnect/imx8mq.h48
-rw-r--r--include/dt-bindings/interconnect/qcom,icc.h26
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8939.h105
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8996.h163
-rw-r--r--include/dt-bindings/interconnect/qcom,osm-l3.h15
-rw-r--r--include/dt-bindings/interconnect/qcom,qcm2290.h94
-rw-r--r--include/dt-bindings/interconnect/qcom,sc7180.h161
-rw-r--r--include/dt-bindings/interconnect/qcom,sc7280.h165
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8180x.h192
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8280xp.h232
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm660.h116
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm845.h265
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx55.h76
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx65.h67
-rw-r--r--include/dt-bindings/interconnect/qcom,sm6350.h148
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8150.h162
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8250.h172
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8350.h172
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8450.h171
-rw-r--r--include/dt-bindings/interrupt-controller/apple-aic.h17
-rw-r--r--include/dt-bindings/interrupt-controller/irqc-rzg2l.h25
-rw-r--r--include/dt-bindings/leds/common.h53
-rw-r--r--include/dt-bindings/leds/rt4831-backlight.h23
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h37
-rw-r--r--include/dt-bindings/mailbox/tegra186-hsp.h5
-rw-r--r--include/dt-bindings/media/tvp5150.h2
-rw-r--r--include/dt-bindings/memory/mt2701-larb-port.h4
-rw-r--r--include/dt-bindings/memory/mt2712-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt6779-larb-port.h206
-rw-r--r--include/dt-bindings/memory/mt6795-larb-port.h95
-rw-r--r--include/dt-bindings/memory/mt8167-larb-port.h51
-rw-r--r--include/dt-bindings/memory/mt8173-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt8183-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt8186-memory-port.h217
-rw-r--r--include/dt-bindings/memory/mt8192-larb-port.h243
-rw-r--r--include/dt-bindings/memory/mt8195-memory-port.h408
-rw-r--r--include/dt-bindings/memory/mtk-memory-port.h17
-rw-r--r--include/dt-bindings/memory/tegra124-mc.h68
-rw-r--r--include/dt-bindings/memory/tegra20-mc.h53
-rw-r--r--include/dt-bindings/memory/tegra210-mc.h10
-rw-r--r--include/dt-bindings/memory/tegra234-mc.h143
-rw-r--r--include/dt-bindings/memory/tegra30-mc.h67
-rw-r--r--include/dt-bindings/mfd/cros_ec.h18
-rw-r--r--include/dt-bindings/mfd/qcom-pm8008.h19
-rw-r--r--include/dt-bindings/mux/mux.h2
-rw-r--r--include/dt-bindings/mux/ti-serdes.h120
-rw-r--r--include/dt-bindings/net/pcs-rzn1-miic.h33
-rw-r--r--include/dt-bindings/nvmem/microchip,sama7g5-otpc.h12
-rw-r--r--include/dt-bindings/phy/phy-cadence.h23
-rw-r--r--include/dt-bindings/phy/phy-imx8-pcie.h14
-rw-r--r--include/dt-bindings/phy/phy-lan966x-serdes.h14
-rw-r--r--include/dt-bindings/phy/phy-ti.h21
-rw-r--r--include/dt-bindings/phy/phy.h6
-rw-r--r--include/dt-bindings/pinctrl/apple.h13
-rw-r--r--include/dt-bindings/pinctrl/hisi.h12
-rw-r--r--include/dt-bindings/pinctrl/k210-fpioa.h276
-rw-r--r--include/dt-bindings/pinctrl/k3.h14
-rw-r--r--include/dt-bindings/pinctrl/keystone.h10
-rw-r--r--include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h1280
-rw-r--r--include/dt-bindings/pinctrl/mt65xx.h9
-rw-r--r--include/dt-bindings/pinctrl/mt6779-pinfunc.h1242
-rw-r--r--include/dt-bindings/pinctrl/mt6795-pinfunc.h908
-rw-r--r--include/dt-bindings/pinctrl/mt8135-pinfunc.h (renamed from arch/arm/boot/dts/mt8135-pinfunc.h)0
-rw-r--r--include/dt-bindings/pinctrl/mt8183-pinfunc.h (renamed from arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h)0
-rw-r--r--include/dt-bindings/pinctrl/mt8186-pinfunc.h1174
-rw-r--r--include/dt-bindings/pinctrl/mt8192-pinfunc.h1344
-rw-r--r--include/dt-bindings/pinctrl/mt8195-pinfunc.h962
-rw-r--r--include/dt-bindings/pinctrl/mt8365-pinfunc.h858
-rw-r--r--include/dt-bindings/pinctrl/omap.h4
-rw-r--r--include/dt-bindings/pinctrl/pads-imx8dxl.h639
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h275
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-zynq.h17
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-zynqmp.h19
-rw-r--r--include/dt-bindings/pinctrl/r7s9210-pinctrl.h2
-rw-r--r--include/dt-bindings/pinctrl/rockchip.h11
-rw-r--r--include/dt-bindings/pinctrl/rzg2l-pinctrl.h23
-rw-r--r--include/dt-bindings/pinctrl/rzv2m-pinctrl.h23
-rw-r--r--include/dt-bindings/pinctrl/samsung.h20
-rw-r--r--include/dt-bindings/pinctrl/sppctl-sp7021.h179
-rw-r--r--include/dt-bindings/pinctrl/sppctl.h31
-rw-r--r--include/dt-bindings/power/fsl,imx93-power.h15
-rw-r--r--include/dt-bindings/power/imx8mm-power.h31
-rw-r--r--include/dt-bindings/power/imx8mn-power.h20
-rw-r--r--include/dt-bindings/power/imx8mp-power.h59
-rw-r--r--include/dt-bindings/power/imx8mq-power.h3
-rw-r--r--include/dt-bindings/power/imx8ulp-power.h26
-rw-r--r--include/dt-bindings/power/marvell,mmp2.h11
-rw-r--r--include/dt-bindings/power/meson-a1-power.h32
-rw-r--r--include/dt-bindings/power/meson-axg-power.h14
-rw-r--r--include/dt-bindings/power/meson-gxbb-power.h13
-rw-r--r--include/dt-bindings/power/meson-s4-power.h19
-rw-r--r--include/dt-bindings/power/meson8-power.h13
-rw-r--r--include/dt-bindings/power/mt6795-power.h16
-rw-r--r--include/dt-bindings/power/mt6797-power.h9
-rw-r--r--include/dt-bindings/power/mt8167-power.h17
-rw-r--r--include/dt-bindings/power/mt8183-power.h26
-rw-r--r--include/dt-bindings/power/mt8186-power.h32
-rw-r--r--include/dt-bindings/power/mt8192-power.h32
-rw-r--r--include/dt-bindings/power/mt8195-power.h46
-rw-r--r--include/dt-bindings/power/qcom-aoss-qmp.h14
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h213
-rw-r--r--include/dt-bindings/power/r8a7742-sysc.h29
-rw-r--r--include/dt-bindings/power/r8a774e1-sysc.h36
-rw-r--r--include/dt-bindings/power/r8a779a0-sysc.h59
-rw-r--r--include/dt-bindings/power/r8a779f0-sysc.h30
-rw-r--r--include/dt-bindings/power/r8a779g0-sysc.h45
-rw-r--r--include/dt-bindings/power/rk3568-power.h32
-rw-r--r--include/dt-bindings/power/rk3588-power.h69
-rw-r--r--include/dt-bindings/power/rockchip,rv1126-power.h35
-rw-r--r--include/dt-bindings/power/summit,smb347-charger.h23
-rw-r--r--include/dt-bindings/power/tegra234-powergate.h24
-rw-r--r--include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h13
-rw-r--r--include/dt-bindings/regulator/dlg,da9121-regulator.h22
-rw-r--r--include/dt-bindings/regulator/dlg,da9211-regulator.h16
-rw-r--r--include/dt-bindings/regulator/mediatek,mt6360-regulator.h16
-rw-r--r--include/dt-bindings/regulator/mediatek,mt6397-regulator.h15
-rw-r--r--include/dt-bindings/regulator/richtek,rt5190a-regulator.h15
-rw-r--r--include/dt-bindings/regulator/ti,tps62864.h9
-rw-r--r--include/dt-bindings/reset/actions,s500-reset.h67
-rw-r--r--include/dt-bindings/reset/amlogic,meson-gxbb-reset.h2
-rw-r--r--include/dt-bindings/reset/amlogic,meson-s4-reset.h125
-rw-r--r--include/dt-bindings/reset/bcm6318-reset.h20
-rw-r--r--include/dt-bindings/reset/bcm63268-reset.h26
-rw-r--r--include/dt-bindings/reset/bcm6328-reset.h18
-rw-r--r--include/dt-bindings/reset/bcm6358-reset.h15
-rw-r--r--include/dt-bindings/reset/bcm6362-reset.h22
-rw-r--r--include/dt-bindings/reset/bcm6368-reset.h16
-rw-r--r--include/dt-bindings/reset/bt1-ccu.h34
-rw-r--r--include/dt-bindings/reset/delta,tn48m-reset.h20
-rw-r--r--include/dt-bindings/reset/imx8mp-reset.h50
-rw-r--r--include/dt-bindings/reset/imx8mq-reset.h61
-rw-r--r--include/dt-bindings/reset/imx8ulp-pcc-reset.h59
-rw-r--r--include/dt-bindings/reset/k210-rst.h42
-rw-r--r--include/dt-bindings/reset/mediatek,mt6795-resets.h53
-rw-r--r--include/dt-bindings/reset/mt2712-resets.h (renamed from include/dt-bindings/reset-controller/mt2712-resets.h)0
-rw-r--r--include/dt-bindings/reset/mt7621-reset.h37
-rw-r--r--include/dt-bindings/reset/mt7986-resets.h55
-rw-r--r--include/dt-bindings/reset/mt8173-resets.h2
-rw-r--r--include/dt-bindings/reset/mt8183-resets.h (renamed from include/dt-bindings/reset-controller/mt8183-resets.h)3
-rw-r--r--include/dt-bindings/reset/mt8186-resets.h41
-rw-r--r--include/dt-bindings/reset/mt8192-resets.h41
-rw-r--r--include/dt-bindings/reset/mt8195-resets.h38
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq806x.h5
-rw-r--r--include/dt-bindings/reset/qcom,gcc-msm8939.h110
-rw-r--r--include/dt-bindings/reset/qcom,sdm845-pdc.h2
-rw-r--r--include/dt-bindings/reset/raspberrypi,firmware-reset.h13
-rw-r--r--include/dt-bindings/reset/realtek,rtd1195.h74
-rw-r--r--include/dt-bindings/reset/realtek,rtd1295.h3
-rw-r--r--include/dt-bindings/reset/sama7g5-reset.h10
-rw-r--r--include/dt-bindings/reset/starfive-jh7100.h126
-rw-r--r--include/dt-bindings/reset/stericsson,db8500-prcc-reset.h51
-rw-r--r--include/dt-bindings/reset/stm32mp1-resets.h15
-rw-r--r--include/dt-bindings/reset/stm32mp13-resets.h100
-rw-r--r--include/dt-bindings/reset/sun20i-d1-ccu.h77
-rw-r--r--include/dt-bindings/reset/sun20i-d1-r-ccu.h16
-rw-r--r--include/dt-bindings/reset/sun50i-a100-ccu.h68
-rw-r--r--include/dt-bindings/reset/sun50i-a100-r-ccu.h18
-rw-r--r--include/dt-bindings/reset/sun50i-h6-r-ccu.h1
-rw-r--r--include/dt-bindings/reset/sun50i-h616-ccu.h70
-rw-r--r--include/dt-bindings/reset/sunplus,sp7021-reset.h87
-rw-r--r--include/dt-bindings/reset/tegra234-reset.h73
-rw-r--r--include/dt-bindings/reset/ti-syscon.h2
-rw-r--r--include/dt-bindings/reset/toshiba,tmpv770x.h41
-rw-r--r--include/dt-bindings/reset/xlnx-versal-resets.h105
-rw-r--r--include/dt-bindings/soc/bcm-pmb.h12
-rw-r--r--include/dt-bindings/soc/bcm6318-pm.h17
-rw-r--r--include/dt-bindings/soc/bcm63268-pm.h21
-rw-r--r--include/dt-bindings/soc/bcm6328-pm.h17
-rw-r--r--include/dt-bindings/soc/bcm6362-pm.h21
-rw-r--r--include/dt-bindings/soc/qcom,gpr.h19
-rw-r--r--include/dt-bindings/soc/rockchip,vop2.h14
-rw-r--r--include/dt-bindings/soc/samsung,boot-mode.h18
-rw-r--r--include/dt-bindings/soc/samsung,exynos-usi.h17
-rw-r--r--include/dt-bindings/soc/tegra-pmc.h16
-rw-r--r--include/dt-bindings/soc/zte,pm_domains.h24
-rw-r--r--include/dt-bindings/sound/adi,adau1977.h15
-rw-r--r--include/dt-bindings/sound/apq8016-lpass.h7
-rw-r--r--include/dt-bindings/sound/cs35l45.h20
-rw-r--r--include/dt-bindings/sound/meson-aiu.h18
-rw-r--r--include/dt-bindings/sound/meson-g12a-toacodec.h10
-rw-r--r--include/dt-bindings/sound/microchip,pdmc.h13
-rw-r--r--include/dt-bindings/sound/qcom,lpass.h46
-rw-r--r--include/dt-bindings/sound/qcom,q6afe.h109
-rw-r--r--include/dt-bindings/sound/qcom,q6asm.h4
-rw-r--r--include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h226
-rw-r--r--include/dt-bindings/sound/qcom,wcd9335.h15
-rw-r--r--include/dt-bindings/sound/rt5640.h1
-rw-r--r--include/dt-bindings/sound/sc7180-lpass.h9
-rw-r--r--include/dt-bindings/sound/tlv320adc3xxx.h28
-rw-r--r--include/dt-bindings/sound/tlv320aic31xx-micbias.h9
-rw-r--r--include/dt-bindings/sound/tlv320aic31xx.h14
-rw-r--r--include/dt-bindings/usb/pd.h382
-rw-r--r--include/keys/asymmetric-parser.h2
-rw-r--r--include/keys/asymmetric-subtype.h2
-rw-r--r--include/keys/asymmetric-type.h14
-rw-r--r--include/keys/big_key-type.h3
-rw-r--r--include/keys/encrypted-type.h4
-rw-r--r--include/keys/rxrpc-type.h60
-rw-r--r--include/keys/system_keyring.h50
-rw-r--r--include/keys/trusted-type.h55
-rw-r--r--include/keys/trusted_caam.h11
-rw-r--r--include/keys/trusted_tee.h16
-rw-r--r--include/keys/trusted_tpm.h29
-rw-r--r--include/keys/user-type.h5
-rw-r--r--include/kunit/assert.h220
-rw-r--r--include/kunit/resource.h390
-rw-r--r--include/kunit/test-bug.h29
-rw-r--r--include/kunit/test.h1342
-rw-r--r--include/kvm/arm_arch_timer.h15
-rw-r--r--include/kvm/arm_hypercalls.h8
-rw-r--r--include/kvm/arm_pmu.h63
-rw-r--r--include/kvm/arm_psci.h16
-rw-r--r--include/kvm/arm_vgic.h66
-rw-r--r--include/linux/a.out.h18
-rw-r--r--include/linux/acct.h1
-rw-r--r--include/linux/acpi.h382
-rw-r--r--include/linux/acpi_agdi.h13
-rw-r--r--include/linux/acpi_iort.h44
-rw-r--r--include/linux/acpi_mdio.h26
-rw-r--r--include/linux/acpi_viot.h21
-rw-r--r--include/linux/adreno-smmu-priv.h72
-rw-r--r--include/linux/aer.h9
-rw-r--r--include/linux/ahci_platform.h8
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/align.h15
-rw-r--r--include/linux/amba/bus.h74
-rw-r--r--include/linux/amba/mmci.h6
-rw-r--r--include/linux/amba/pl022.h10
-rw-r--r--include/linux/amd-iommu.h42
-rw-r--r--include/linux/amd-pstate.h77
-rw-r--r--include/linux/anon_inodes.h9
-rw-r--r--include/linux/aperture.h56
-rw-r--r--include/linux/apple-mailbox.h19
-rw-r--r--include/linux/arch_topology.h50
-rw-r--r--include/linux/arm-smccc.h313
-rw-r--r--include/linux/arm_ffa.h284
-rw-r--r--include/linux/arm_sdei.h2
-rw-r--r--include/linux/armada-37xx-rwtm-mailbox.h2
-rw-r--r--include/linux/ascii85.h3
-rw-r--r--include/linux/asn1_encoder.h32
-rw-r--r--include/linux/async.h1
-rw-r--r--include/linux/async_tx.h25
-rw-r--r--include/linux/ata.h42
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/atm_suni.h12
-rw-r--r--include/linux/atm_tcp.h2
-rw-r--r--include/linux/atmdev.h14
-rw-r--r--include/linux/atmel-isc-media.h58
-rw-r--r--include/linux/atomic-fallback.h2295
-rw-r--r--include/linux/atomic.h12
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h2459
-rw-r--r--include/linux/atomic/atomic-instrumented.h2086
-rw-r--r--include/linux/atomic/atomic-long.h1014
-rw-r--r--include/linux/audit.h88
-rw-r--r--include/linux/audit_arch.h24
-rw-r--r--include/linux/auxiliary_bus.h251
-rw-r--r--include/linux/avf/virtchnl.h748
-rw-r--r--include/linux/backing-dev-defs.h81
-rw-r--r--include/linux/backing-dev.h235
-rw-r--r--include/linux/backlight.h400
-rw-r--r--include/linux/balloon_compaction.h28
-rw-r--r--include/linux/base64.h16
-rw-r--r--include/linux/bch.h11
-rw-r--r--include/linux/bcm47xx_sprom.h10
-rw-r--r--include/linux/bcm963xx_tag.h2
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h6
-rw-r--r--include/linux/binfmts.h80
-rw-r--r--include/linux/bio.h400
-rw-r--r--include/linux/bitfield.h38
-rw-r--r--include/linux/bitmap.h266
-rw-r--r--include/linux/bitops.h203
-rw-r--r--include/linux/bits.h23
-rw-r--r--include/linux/blk-cgroup.h737
-rw-r--r--include/linux/blk-crypto-profile.h166
-rw-r--r--include/linux/blk-crypto.h135
-rw-r--r--include/linux/blk-integrity.h184
-rw-r--r--include/linux/blk-mq-pci.h4
-rw-r--r--include/linux/blk-mq-rdma.h2
-rw-r--r--include/linux/blk-mq-virtio.h2
-rw-r--r--include/linux/blk-mq.h822
-rw-r--r--include/linux/blk-pm.h2
-rw-r--r--include/linux/blk_types.h337
-rw-r--r--include/linux/blkdev.h1721
-rw-r--r--include/linux/blktrace_api.h32
-rw-r--r--include/linux/bma150.h4
-rw-r--r--include/linux/bootconfig.h105
-rw-r--r--include/linux/bootmem_info.h66
-rw-r--r--include/linux/bottom_half.h9
-rw-r--r--include/linux/bpf-cgroup-defs.h79
-rw-r--r--include/linux/bpf-cgroup.h361
-rw-r--r--include/linux/bpf-netns.h62
-rw-r--r--include/linux/bpf.h1708
-rw-r--r--include/linux/bpf_local_storage.h171
-rw-r--r--include/linux/bpf_lsm.h77
-rw-r--r--include/linux/bpf_mem_alloc.h28
-rw-r--r--include/linux/bpf_types.h37
-rw-r--r--include/linux/bpf_verifier.h244
-rw-r--r--include/linux/bpfilter.h13
-rw-r--r--include/linux/bpfptr.h88
-rw-r--r--include/linux/brcmphy.h98
-rw-r--r--include/linux/bsearch.h26
-rw-r--r--include/linux/bsg-lib.h2
-rw-r--r--include/linux/bsg.h38
-rw-r--r--include/linux/btf.h322
-rw-r--r--include/linux/btf_ids.h269
-rw-r--r--include/linux/btree.h2
-rw-r--r--include/linux/buffer_head.h141
-rw-r--r--include/linux/bug.h10
-rw-r--r--include/linux/buildid.h20
-rw-r--r--include/linux/bvec.h104
-rw-r--r--include/linux/byteorder/generic.h4
-rw-r--r--include/linux/cache.h25
-rw-r--r--include/linux/cacheflush.h20
-rw-r--r--include/linux/cacheinfo.h41
-rw-r--r--include/linux/can/bittiming.h159
-rw-r--r--include/linux/can/can-ml.h12
-rw-r--r--include/linux/can/core.h9
-rw-r--r--include/linux/can/dev.h196
-rw-r--r--include/linux/can/dev/peak_canfd.h6
-rw-r--r--include/linux/can/led.h51
-rw-r--r--include/linux/can/length.h174
-rw-r--r--include/linux/can/platform/flexcan.h23
-rw-r--r--include/linux/can/rx-offload.h18
-rw-r--r--include/linux/can/skb.h110
-rw-r--r--include/linux/capability.h29
-rw-r--r--include/linux/cb710.h2
-rw-r--r--include/linux/cc_platform.h117
-rw-r--r--include/linux/ccp.h3
-rw-r--r--include/linux/cdrom.h17
-rw-r--r--include/linux/ceph/auth.h72
-rw-r--r--include/linux/ceph/ceph_features.h21
-rw-r--r--include/linux/ceph/ceph_fs.h79
-rw-r--r--include/linux/ceph/debugfs.h14
-rw-r--r--include/linux/ceph/decode.h8
-rw-r--r--include/linux/ceph/libceph.h37
-rw-r--r--include/linux/ceph/mdsmap.h3
-rw-r--r--include/linux/ceph/messenger.h301
-rw-r--r--include/linux/ceph/mon_client.h4
-rw-r--r--include/linux/ceph/msgr.h66
-rw-r--r--include/linux/ceph/osd_client.h54
-rw-r--r--include/linux/ceph/osdmap.h41
-rw-r--r--include/linux/ceph/rados.h22
-rw-r--r--include/linux/cfag12864b.h2
-rw-r--r--include/linux/cfi.h39
-rw-r--r--include/linux/cfi_types.h45
-rw-r--r--include/linux/cgroup-defs.h185
-rw-r--r--include/linux/cgroup.h116
-rw-r--r--include/linux/cgroup_api.h1
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/cleancache.h124
-rw-r--r--include/linux/clk-provider.h260
-rw-r--r--include/linux/clk.h195
-rw-r--r--include/linux/clk/at91_pmc.h59
-rw-r--r--include/linux/clk/davinci.h8
-rw-r--r--include/linux/clk/imx.h15
-rw-r--r--include/linux/clk/pxa.h16
-rw-r--r--include/linux/clk/samsung.h56
-rw-r--r--include/linux/clk/spear.h37
-rw-r--r--include/linux/clk/sunxi-ng.h15
-rw-r--r--include/linux/clk/tegra.h139
-rw-r--r--include/linux/clk/ti.h32
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/clock_cooling.h57
-rw-r--r--include/linux/clocksource.h107
-rw-r--r--include/linux/clocksource_ids.h12
-rw-r--r--include/linux/cma.h33
-rw-r--r--include/linux/cmdline-parser.h46
-rw-r--r--include/linux/comedi/comedi_8254.h (renamed from drivers/staging/comedi/drivers/comedi_8254.h)0
-rw-r--r--include/linux/comedi/comedi_8255.h42
-rw-r--r--include/linux/comedi/comedi_isadma.h (renamed from drivers/staging/comedi/drivers/comedi_isadma.h)0
-rw-r--r--include/linux/comedi/comedi_pci.h (renamed from drivers/staging/comedi/comedi_pci.h)3
-rw-r--r--include/linux/comedi/comedi_pcmcia.h (renamed from drivers/staging/comedi/comedi_pcmcia.h)3
-rw-r--r--include/linux/comedi/comedi_usb.h (renamed from drivers/staging/comedi/comedi_usb.h)3
-rw-r--r--include/linux/comedi/comedidev.h (renamed from drivers/staging/comedi/comedidev.h)5
-rw-r--r--include/linux/comedi/comedilib.h (renamed from drivers/staging/comedi/comedilib.h)0
-rw-r--r--include/linux/compaction.h40
-rw-r--r--include/linux/compat.h314
-rw-r--r--include/linux/compiler-clang.h108
-rw-r--r--include/linux/compiler-gcc.h114
-rw-r--r--include/linux/compiler-version.h14
-rw-r--r--include/linux/compiler.h239
-rw-r--r--include/linux/compiler_attributes.h161
-rw-r--r--include/linux/compiler_types.h240
-rw-r--r--include/linux/completion.h14
-rw-r--r--include/linux/component.h24
-rw-r--r--include/linux/configfs.h6
-rw-r--r--include/linux/connector.h12
-rw-r--r--include/linux/console.h29
-rw-r--r--include/linux/console_struct.h100
-rw-r--r--include/linux/consolemap.h59
-rw-r--r--include/linux/const.h11
-rw-r--r--include/linux/container_of.h40
-rw-r--r--include/linux/context_tracking.h156
-rw-r--r--include/linux/context_tracking_irq.h21
-rw-r--r--include/linux/context_tracking_state.h123
-rw-r--r--include/linux/cookie.h51
-rw-r--r--include/linux/coredump.h37
-rw-r--r--include/linux/coresight-pmu.h22
-rw-r--r--include/linux/coresight.h336
-rw-r--r--include/linux/counter.h878
-rw-r--r--include/linux/counter_enum.h45
-rw-r--r--include/linux/cper.h36
-rw-r--r--include/linux/cpu.h42
-rw-r--r--include/linux/cpu_cooling.h12
-rw-r--r--include/linux/cpu_rmap.h2
-rw-r--r--include/linux/cpufreq.h357
-rw-r--r--include/linux/cpuhotplug.h169
-rw-r--r--include/linux/cpuidle.h33
-rw-r--r--include/linux/cpumask.h555
-rw-r--r--include/linux/cpumask_api.h1
-rw-r--r--include/linux/cpuset.h25
-rw-r--r--include/linux/crash_core.h9
-rw-r--r--include/linux/crash_dump.h55
-rw-r--r--include/linux/crc-itu-t.h2
-rw-r--r--include/linux/crc64.h7
-rw-r--r--include/linux/crc8.h2
-rw-r--r--include/linux/cred.h11
-rw-r--r--include/linux/crush/crush.h19
-rw-r--r--include/linux/crypto.h248
-rw-r--r--include/linux/cryptohash.h14
-rw-r--r--include/linux/ctype.h15
-rw-r--r--include/linux/cuda.h2
-rw-r--r--include/linux/cyclades.h364
-rw-r--r--include/linux/damon.h571
-rw-r--r--include/linux/dasd_mod.h11
-rw-r--r--include/linux/dax.h202
-rw-r--r--include/linux/dcache.h42
-rw-r--r--include/linux/dccp.h2
-rw-r--r--include/linux/dcookies.h69
-rw-r--r--include/linux/debug_locks.h9
-rw-r--r--include/linux/debugfs.h112
-rw-r--r--include/linux/debugobjects.h34
-rw-r--r--include/linux/decompress/mm.h12
-rw-r--r--include/linux/decompress/unzstd.h11
-rw-r--r--include/linux/delay.h29
-rw-r--r--include/linux/delayacct.h170
-rw-r--r--include/linux/dev_printk.h84
-rw-r--r--include/linux/devfreq-event.h14
-rw-r--r--include/linux/devfreq.h166
-rw-r--r--include/linux/devfreq_cooling.h38
-rw-r--r--include/linux/device-mapper.h126
-rw-r--r--include/linux/device.h605
-rw-r--r--include/linux/device/bus.h6
-rw-r--r--include/linux/device/class.h14
-rw-r--r--include/linux/device/driver.h8
-rw-r--r--include/linux/device_cgroup.h17
-rw-r--r--include/linux/devm-helpers.h79
-rw-r--r--include/linux/dfl.h87
-rw-r--r--include/linux/digsig.h4
-rw-r--r--include/linux/dim.h2
-rw-r--r--include/linux/dio.h5
-rw-r--r--include/linux/dirent.h2
-rw-r--r--include/linux/dlm.h5
-rw-r--r--include/linux/dm-bufio.h21
-rw-r--r--include/linux/dm-io.h4
-rw-r--r--include/linux/dm-kcopyd.h1
-rw-r--r--include/linux/dm-verity-loadpin.h27
-rw-r--r--include/linux/dma-buf.h334
-rw-r--r--include/linux/dma-contiguous.h176
-rw-r--r--include/linux/dma-debug.h166
-rw-r--r--include/linux/dma-direct.h111
-rw-r--r--include/linux/dma-direction.h8
-rw-r--r--include/linux/dma-fence-array.h34
-rw-r--r--include/linux/dma-fence-chain.h72
-rw-r--r--include/linux/dma-fence-unwrap.h75
-rw-r--r--include/linux/dma-fence.h86
-rw-r--r--include/linux/dma-heap.h21
-rw-r--r--include/linux/dma-iommu.h82
-rw-r--r--include/linux/dma-map-ops.h449
-rw-r--r--include/linux/dma-mapping.h627
-rw-r--r--include/linux/dma-noncoherent.h114
-rw-r--r--include/linux/dma-resv.h386
-rw-r--r--include/linux/dma/edma.h61
-rw-r--r--include/linux/dma/hsu.h6
-rw-r--r--include/linux/dma/imx-dma.h101
-rw-r--r--include/linux/dma/k3-event-router.h16
-rw-r--r--include/linux/dma/k3-psil.h27
-rw-r--r--include/linux/dma/k3-udma-glue.h14
-rw-r--r--include/linux/dma/mmp-pdma.h16
-rw-r--r--include/linux/dma/qcom-gpi-dma.h83
-rw-r--r--include/linux/dma/qcom_adm.h12
-rw-r--r--include/linux/dma/ti-cppi5.h6
-rw-r--r--include/linux/dma/xilinx_dpdma.h11
-rw-r--r--include/linux/dmaengine.h180
-rw-r--r--include/linux/dmar.h23
-rw-r--r--include/linux/dnotify.h3
-rw-r--r--include/linux/dsa/8021q.h82
-rw-r--r--include/linux/dsa/brcm.h16
-rw-r--r--include/linux/dsa/loop.h42
-rw-r--r--include/linux/dsa/mv88e6xxx.h13
-rw-r--r--include/linux/dsa/ocelot.h276
-rw-r--r--include/linux/dsa/sja1105.h55
-rw-r--r--include/linux/dsa/tag_qca.h87
-rw-r--r--include/linux/dtpm.h73
-rw-r--r--include/linux/dw_apb_timer.h1
-rw-r--r--include/linux/dynamic_debug.h191
-rw-r--r--include/linux/dynamic_queue_limits.h2
-rw-r--r--include/linux/edac.h91
-rw-r--r--include/linux/eeprom_93xx46.h5
-rw-r--r--include/linux/efi.h996
-rw-r--r--include/linux/efi_embedded_fw.h41
-rw-r--r--include/linux/elf.h53
-rw-r--r--include/linux/elfcore-compat.h24
-rw-r--r--include/linux/elfcore.h107
-rw-r--r--include/linux/elfnote-lto.h14
-rw-r--r--include/linux/elfnote.h4
-rw-r--r--include/linux/enclosure.h2
-rw-r--r--include/linux/energy_model.h299
-rw-r--r--include/linux/entry-common.h468
-rw-r--r--include/linux/entry-kvm.h99
-rw-r--r--include/linux/err.h3
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/etherdevice.h92
-rw-r--r--include/linux/ethtool.h449
-rw-r--r--include/linux/ethtool_netlink.h57
-rw-r--r--include/linux/eventfd.h17
-rw-r--r--include/linux/eventpoll.h31
-rw-r--r--include/linux/evm.h40
-rw-r--r--include/linux/export-internal.h19
-rw-r--r--include/linux/export.h51
-rw-r--r--include/linux/exportfs.h16
-rw-r--r--include/linux/extcon-provider.h28
-rw-r--r--include/linux/extcon.h32
-rw-r--r--include/linux/f2fs_fs.h53
-rw-r--r--include/linux/fanotify.h72
-rw-r--r--include/linux/fault-inject-usercopy.h22
-rw-r--r--include/linux/fault-inject.h4
-rw-r--r--include/linux/fb.h57
-rw-r--r--include/linux/fbcon.h4
-rw-r--r--include/linux/fcntl.h8
-rw-r--r--include/linux/fdtable.h46
-rw-r--r--include/linux/fiemap.h21
-rw-r--r--include/linux/file.h19
-rw-r--r--include/linux/fileattr.h59
-rw-r--r--include/linux/filter.h512
-rw-r--r--include/linux/find.h600
-rw-r--r--include/linux/firewire.h14
-rw-r--r--include/linux/firmware.h136
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h328
-rw-r--r--include/linux/firmware/cirrus/wmfw.h (renamed from sound/soc/codecs/wmfw.h)4
-rw-r--r--include/linux/firmware/imx/dsp.h10
-rw-r--r--include/linux/firmware/imx/ipc.h14
-rw-r--r--include/linux/firmware/imx/s4.h20
-rw-r--r--include/linux/firmware/imx/sci.h30
-rw-r--r--include/linux/firmware/imx/svc/misc.h19
-rw-r--r--include/linux/firmware/imx/svc/rm.h74
-rw-r--r--include/linux/firmware/imx/types.h65
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h288
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h153
-rw-r--r--include/linux/firmware/mediatek/mtk-adsp-ipc.h65
-rw-r--r--include/linux/firmware/meson/meson_sm.h2
-rw-r--r--include/linux/firmware/trusted_foundations.h9
-rw-r--r--include/linux/firmware/xlnx-event-manager.h36
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h863
-rw-r--r--include/linux/fixp-arith.h19
-rw-r--r--include/linux/flex_proportions.h9
-rw-r--r--include/linux/font.h20
-rw-r--r--include/linux/fortify-string.h624
-rw-r--r--include/linux/fpga/adi-axi-common.h6
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h1
-rw-r--r--include/linux/fpga/fpga-bridge.h32
-rw-r--r--include/linux/fpga/fpga-mgr.h88
-rw-r--r--include/linux/fpga/fpga-region.h42
-rw-r--r--include/linux/fprobe.h105
-rw-r--r--include/linux/frame.h24
-rw-r--r--include/linux/freelist.h129
-rw-r--r--include/linux/freezer.h232
-rw-r--r--include/linux/frontswap.h35
-rw-r--r--include/linux/fs.h1609
-rw-r--r--include/linux/fs_api.h1
-rw-r--r--include/linux/fs_context.h8
-rw-r--r--include/linux/fs_parser.h4
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/fscache-cache.h616
-rw-r--r--include/linux/fscache.h1034
-rw-r--r--include/linux/fscrypt.h697
-rw-r--r--include/linux/fsi-occ.h2
-rw-r--r--include/linux/fsl/bestcomm/bestcomm.h2
-rw-r--r--include/linux/fsl/enetc_mdio.h1
-rw-r--r--include/linux/fsl/guts.h4
-rw-r--r--include/linux/fsl/mc.h128
-rw-r--r--include/linux/fsl/ptp_qoriq.h7
-rw-r--r--include/linux/fsnotify.h277
-rw-r--r--include/linux/fsnotify_backend.h494
-rw-r--r--include/linux/fsverity.h63
-rw-r--r--include/linux/ftrace.h281
-rw-r--r--include/linux/ftrace_irq.h24
-rw-r--r--include/linux/fwnode.h124
-rw-r--r--include/linux/fwnode_mdio.h35
-rw-r--r--include/linux/gameport.h2
-rw-r--r--include/linux/genalloc.h2
-rw-r--r--include/linux/generic-radix-tree.h5
-rw-r--r--include/linux/genetlink.h23
-rw-r--r--include/linux/genhd.h786
-rw-r--r--include/linux/genl_magic_func.h2
-rw-r--r--include/linux/genl_magic_struct.h3
-rw-r--r--include/linux/gfp.h444
-rw-r--r--include/linux/gfp_api.h1
-rw-r--r--include/linux/gfp_types.h348
-rw-r--r--include/linux/goldfish.h15
-rw-r--r--include/linux/gpio.h8
-rw-r--r--include/linux/gpio/consumer.h107
-rw-r--r--include/linux/gpio/driver.h433
-rw-r--r--include/linux/gpio/machine.h36
-rw-r--r--include/linux/gpio/regmap.h94
-rw-r--r--include/linux/greybus/greybus_manifest.h4
-rw-r--r--include/linux/greybus/greybus_protocols.h44
-rw-r--r--include/linux/greybus/hd.h2
-rw-r--r--include/linux/greybus/module.h2
-rw-r--r--include/linux/hardirq.h115
-rw-r--r--include/linux/hash.h5
-rw-r--r--include/linux/hashtable.h8
-rw-r--r--include/linux/hashtable_api.h1
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/hdlcdrv.h2
-rw-r--r--include/linux/hdmi.h14
-rw-r--r--include/linux/hid-sensor-hub.h18
-rw-r--r--include/linux/hid-sensor-ids.h15
-rw-r--r--include/linux/hid.h235
-rw-r--r--include/linux/hidden.h19
-rw-r--r--include/linux/highmem-internal.h287
-rw-r--r--include/linux/highmem.h489
-rw-r--r--include/linux/hil_mlc.h2
-rw-r--r--include/linux/hippidevice.h4
-rw-r--r--include/linux/hisi_acc_qm.h520
-rw-r--r--include/linux/hmm.h267
-rw-r--r--include/linux/host1x.h208
-rw-r--r--include/linux/host1x_context_bus.h15
-rw-r--r--include/linux/hp_sdc.h2
-rw-r--r--include/linux/hrtimer.h17
-rw-r--r--include/linux/hrtimer_api.h1
-rw-r--r--include/linux/htcpld.h2
-rw-r--r--include/linux/hte.h271
-rw-r--r--include/linux/huge_mm.h326
-rw-r--r--include/linux/hugetlb.h561
-rw-r--r--include/linux/hugetlb_cgroup.h221
-rw-r--r--include/linux/hw_breakpoint.h9
-rw-r--r--include/linux/hw_random.h5
-rw-r--r--include/linux/hwmon.h35
-rw-r--r--include/linux/hyperv.h352
-rw-r--r--include/linux/hypervisor.h8
-rw-r--r--include/linux/i2c-algo-pca.h15
-rw-r--r--include/linux/i2c-mux.h2
-rw-r--r--include/linux/i2c-smbus.h29
-rw-r--r--include/linux/i2c.h131
-rw-r--r--include/linux/i3c/ccc.h6
-rw-r--r--include/linux/i3c/device.h2
-rw-r--r--include/linux/i3c/master.h1
-rw-r--r--include/linux/icmp.h5
-rw-r--r--include/linux/icmpv6.h51
-rw-r--r--include/linux/ide.h1628
-rw-r--r--include/linux/idle_inject.h4
-rw-r--r--include/linux/idr.h15
-rw-r--r--include/linux/ieee80211.h1389
-rw-r--r--include/linux/ieee802154.h81
-rw-r--r--include/linux/if_arp.h2
-rw-r--r--include/linux/if_bridge.h68
-rw-r--r--include/linux/if_eql.h2
-rw-r--r--include/linux/if_frad.h92
-rw-r--r--include/linux/if_hsr.h47
-rw-r--r--include/linux/if_macvlan.h11
-rw-r--r--include/linux/if_pppol2tp.h2
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/if_rmnet.h95
-rw-r--r--include/linux/if_tap.h11
-rw-r--r--include/linux/if_team.h17
-rw-r--r--include/linux/if_tun.h19
-rw-r--r--include/linux/if_vlan.h48
-rw-r--r--include/linux/igmp.h9
-rw-r--r--include/linux/ihex.h2
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h106
-rw-r--r--include/linux/iio/adc/adi-axi-adc.h64
-rw-r--r--include/linux/iio/adc/qcom-vadc-common.h (renamed from drivers/iio/adc/qcom-vadc-common.h)51
-rw-r--r--include/linux/iio/afe/rescale.h36
-rw-r--r--include/linux/iio/buffer-dma.h7
-rw-r--r--include/linux/iio/buffer-dmaengine.h8
-rw-r--r--include/linux/iio/buffer.h16
-rw-r--r--include/linux/iio/buffer_impl.h41
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h117
-rw-r--r--include/linux/iio/common/st_sensors.h44
-rw-r--r--include/linux/iio/consumer.h57
-rw-r--r--include/linux/iio/dac/mcp4725.h2
-rw-r--r--include/linux/iio/driver.h14
-rw-r--r--include/linux/iio/hw-consumer.h1
-rw-r--r--include/linux/iio/iio-opaque.h80
-rw-r--r--include/linux/iio/iio.h219
-rw-r--r--include/linux/iio/imu/adis.h225
-rw-r--r--include/linux/iio/kfifo_buf.h11
-rw-r--r--include/linux/iio/sysfs.h3
-rw-r--r--include/linux/iio/trigger.h36
-rw-r--r--include/linux/iio/trigger_consumer.h9
-rw-r--r--include/linux/iio/triggered_buffer.h32
-rw-r--r--include/linux/iio/types.h7
-rw-r--r--include/linux/ima.h112
-rw-r--r--include/linux/indirect_call_wrapper.h20
-rw-r--r--include/linux/inet_diag.h35
-rw-r--r--include/linux/inetdevice.h22
-rw-r--r--include/linux/init.h135
-rw-r--r--include/linux/init_syscalls.h19
-rw-r--r--include/linux/init_task.h5
-rw-r--r--include/linux/initrd.h21
-rw-r--r--include/linux/inotify.h3
-rw-r--r--include/linux/input-polldev.h58
-rw-r--r--include/linux/input.h16
-rw-r--r--include/linux/input/adp5589.h7
-rw-r--r--include/linux/input/auo-pixcir-ts.h44
-rw-r--r--include/linux/input/cy8ctmg110_pdata.h11
-rw-r--r--include/linux/input/cyttsp.h29
-rw-r--r--include/linux/input/elan-i2c-ids.h12
-rw-r--r--include/linux/input/gp2ap002a00f.h23
-rw-r--r--include/linux/input/lm8333.h2
-rw-r--r--include/linux/input/mt.h5
-rw-r--r--include/linux/input/sparse-keymap.h1
-rw-r--r--include/linux/input/vivaldi-fmap.h27
-rw-r--r--include/linux/instruction_pointer.h8
-rw-r--r--include/linux/instrumentation.h61
-rw-r--r--include/linux/instrumented.h192
-rw-r--r--include/linux/integrity.h1
-rw-r--r--include/linux/intel-iommu.h734
-rw-r--r--include/linux/intel-ish-client-if.h14
-rw-r--r--include/linux/intel-pti.h35
-rw-r--r--include/linux/intel-svm.h114
-rw-r--r--include/linux/intel_rapl.h20
-rw-r--r--include/linux/interconnect-provider.h41
-rw-r--r--include/linux/interconnect.h82
-rw-r--r--include/linux/interrupt.h209
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h4
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h4
-rw-r--r--include/linux/io-mapping.h56
-rw-r--r--include/linux/io-pgtable.h72
-rw-r--r--include/linux/io.h32
-rw-r--r--include/linux/io_uring.h97
-rw-r--r--include/linux/io_uring_types.h580
-rw-r--r--include/linux/ioam6.h13
-rw-r--r--include/linux/ioam6_genl.h13
-rw-r--r--include/linux/ioam6_iptunnel.h13
-rw-r--r--include/linux/ioasid.h13
-rw-r--r--include/linux/ioc3.h93
-rw-r--r--include/linux/iocontext.h56
-rw-r--r--include/linux/iomap.h176
-rw-r--r--include/linux/iommu-helper.h4
-rw-r--r--include/linux/iommu.h661
-rw-r--r--include/linux/iopoll.h80
-rw-r--r--include/linux/ioport.h67
-rw-r--r--include/linux/ioprio.h66
-rw-r--r--include/linux/iosys-map.h516
-rw-r--r--include/linux/iova.h124
-rw-r--r--include/linux/iova_bitmap.h26
-rw-r--r--include/linux/ipc_namespace.h62
-rw-r--r--include/linux/ipmi.h10
-rw-r--r--include/linux/ipmi_smi.h65
-rw-r--r--include/linux/ipv6.h50
-rw-r--r--include/linux/irq.h177
-rw-r--r--include/linux/irq_cpustat.h28
-rw-r--r--include/linux/irq_sim.h33
-rw-r--r--include/linux/irq_work.h48
-rw-r--r--include/linux/irqchip.h45
-rw-r--r--include/linux/irqchip/arm-gic-common.h23
-rw-r--r--include/linux/irqchip/arm-gic-v3.h91
-rw-r--r--include/linux/irqchip/arm-gic-v4.h31
-rw-r--r--include/linux/irqchip/arm-vgic-info.h45
-rw-r--r--include/linux/irqchip/arm-vic.h11
-rw-r--r--include/linux/irqchip/irq-bcm2836.h2
-rw-r--r--include/linux/irqchip/irq-ixp4xx.h12
-rw-r--r--include/linux/irqchip/irq-omap-intc.h2
-rw-r--r--include/linux/irqchip/mmp.h3
-rw-r--r--include/linux/irqchip/versatile-fpga.h14
-rw-r--r--include/linux/irqdesc.h87
-rw-r--r--include/linux/irqdomain.h146
-rw-r--r--include/linux/irqflags.h181
-rw-r--r--include/linux/irqhandler.h1
-rw-r--r--include/linux/isa-dma.h14
-rw-r--r--include/linux/isa.h54
-rw-r--r--include/linux/isapnp.h6
-rw-r--r--include/linux/iscsi_ibft.h18
-rw-r--r--include/linux/isicom.h85
-rw-r--r--include/linux/iversion.h85
-rw-r--r--include/linux/jbd2.h259
-rw-r--r--include/linux/jhash.h30
-rw-r--r--include/linux/jiffies.h7
-rw-r--r--include/linux/jump_label.h75
-rw-r--r--include/linux/kallsyms.h67
-rw-r--r--include/linux/kasan-checks.h8
-rw-r--r--include/linux/kasan-enabled.h35
-rw-r--r--include/linux/kasan-tags.h15
-rw-r--r--include/linux/kasan.h463
-rw-r--r--include/linux/kbd_kern.h13
-rw-r--r--include/linux/kconfig.h6
-rw-r--r--include/linux/kcore.h3
-rw-r--r--include/linux/kcov.h22
-rw-r--r--include/linux/kcsan-checks.h533
-rw-r--r--include/linux/kcsan.h75
-rw-r--r--include/linux/kd.h8
-rw-r--r--include/linux/kdb.h30
-rw-r--r--include/linux/kdev_t.h22
-rw-r--r--include/linux/kernel-page-flags.h1
-rw-r--r--include/linux/kernel.h682
-rw-r--r--include/linux/kernel_read_file.h55
-rw-r--r--include/linux/kernel_stat.h9
-rw-r--r--include/linux/kernfs.h130
-rw-r--r--include/linux/kexec.h162
-rw-r--r--include/linux/key-type.h3
-rw-r--r--include/linux/key.h44
-rw-r--r--include/linux/kfence.h250
-rw-r--r--include/linux/kfifo.h75
-rw-r--r--include/linux/kgdb.h54
-rw-r--r--include/linux/khugepaged.h64
-rw-r--r--include/linux/kmemleak.h8
-rw-r--r--include/linux/kmsan-checks.h83
-rw-r--r--include/linux/kmsan.h330
-rw-r--r--include/linux/kmsan_string.h21
-rw-r--r--include/linux/kmsan_types.h35
-rw-r--r--include/linux/kmsg_dump.h59
-rw-r--r--include/linux/kobject.h41
-rw-r--r--include/linux/kobject_api.h1
-rw-r--r--include/linux/kobject_ns.h2
-rw-r--r--include/linux/kprobes.h270
-rw-r--r--include/linux/kref_api.h1
-rw-r--r--include/linux/ks0108.h2
-rw-r--r--include/linux/ksm.h18
-rw-r--r--include/linux/kstrtox.h155
-rw-r--r--include/linux/kthread.h70
-rw-r--r--include/linux/ktime.h11
-rw-r--r--include/linux/ktime_api.h1
-rw-r--r--include/linux/kvm_dirty_ring.h97
-rw-r--r--include/linux/kvm_host.h1225
-rw-r--r--include/linux/kvm_irqfd.h2
-rw-r--r--include/linux/kvm_types.h68
-rw-r--r--include/linux/lapb.h5
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/led-class-flash.h42
-rw-r--r--include/linux/led-class-multicolor.h109
-rw-r--r--include/linux/leds-tca6507.h21
-rw-r--r--include/linux/leds-ti-lmu-common.h2
-rw-r--r--include/linux/leds.h37
-rw-r--r--include/linux/leds_pwm.h22
-rw-r--r--include/linux/libata.h438
-rw-r--r--include/linux/libnvdimm.h85
-rw-r--r--include/linux/lightnvm.h700
-rw-r--r--include/linux/limits.h15
-rw-r--r--include/linux/linear_range.h61
-rw-r--r--include/linux/linkage.h81
-rw-r--r--include/linux/linkmode.h19
-rw-r--r--include/linux/list.h133
-rw-r--r--include/linux/list_lru.h21
-rw-r--r--include/linux/list_sort.h7
-rw-r--r--include/linux/litex.h83
-rw-r--r--include/linux/livepatch.h19
-rw-r--r--include/linux/llist.h27
-rw-r--r--include/linux/llist_api.h1
-rw-r--r--include/linux/local_lock.h54
-rw-r--r--include/linux/local_lock_internal.h141
-rw-r--r--include/linux/lockd/bind.h3
-rw-r--r--include/linux/lockd/lockd.h17
-rw-r--r--include/linux/lockd/xdr.h35
-rw-r--r--include/linux/lockd/xdr4.h32
-rw-r--r--include/linux/lockdep.h435
-rw-r--r--include/linux/lockdep_api.h1
-rw-r--r--include/linux/lockdep_types.h208
-rw-r--r--include/linux/lockref.h1
-rw-r--r--include/linux/log2.h9
-rw-r--r--include/linux/logic_iomem.h62
-rw-r--r--include/linux/lru_cache.h8
-rw-r--r--include/linux/lsm_audit.h13
-rw-r--r--include/linux/lsm_hook_defs.h412
-rw-r--r--include/linux/lsm_hooks.h856
-rw-r--r--include/linux/mISDNif.h1
-rw-r--r--include/linux/mailbox/arm_mhuv2_message.h20
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h28
-rw-r--r--include/linux/mailbox/zynqmp-ipi-message.h2
-rw-r--r--include/linux/mailbox_controller.h1
-rw-r--r--include/linux/map_benchmark.h31
-rw-r--r--include/linux/maple_tree.h692
-rw-r--r--include/linux/marvell_phy.h12
-rw-r--r--include/linux/math.h189
-rw-r--r--include/linux/math64.h81
-rw-r--r--include/linux/max17040_battery.h16
-rw-r--r--include/linux/mbcache.h33
-rw-r--r--include/linux/mc146818rtc.h8
-rw-r--r--include/linux/mcb.h2
-rw-r--r--include/linux/mdev.h165
-rw-r--r--include/linux/mdio-bitbang.h6
-rw-r--r--include/linux/mdio.h179
-rw-r--r--include/linux/mdio/mdio-i2c.h24
-rw-r--r--include/linux/mdio/mdio-mscc-miim.h19
-rw-r--r--include/linux/mdio/mdio-xgene.h (renamed from drivers/net/phy/mdio-xgene.h)4
-rw-r--r--include/linux/mei_aux.h31
-rw-r--r--include/linux/mei_cl_bus.h16
-rw-r--r--include/linux/mem_encrypt.h4
-rw-r--r--include/linux/memblock.h209
-rw-r--r--include/linux/memcontrol.h1284
-rw-r--r--include/linux/memory-tiers.h102
-rw-r--r--include/linux/memory.h100
-rw-r--r--include/linux/memory_hotplug.h317
-rw-r--r--include/linux/mempolicy.h61
-rw-r--r--include/linux/memregion.h2
-rw-r--r--include/linux/memremap.h139
-rw-r--r--include/linux/memstick.h3
-rw-r--r--include/linux/mfd/ab3100.h128
-rw-r--r--include/linux/mfd/abx500.h276
-rw-r--r--include/linux/mfd/abx500/ab8500.h3
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h51
-rw-r--r--include/linux/mfd/atc260x/atc2603c.h281
-rw-r--r--include/linux/mfd/atc260x/atc2609a.h308
-rw-r--r--include/linux/mfd/atc260x/core.h58
-rw-r--r--include/linux/mfd/axp20x.h2
-rw-r--r--include/linux/mfd/bcm2835-pm.h1
-rw-r--r--include/linux/mfd/bd9571mwv.h45
-rw-r--r--include/linux/mfd/core.h50
-rw-r--r--include/linux/mfd/da9055/pdata.h2
-rw-r--r--include/linux/mfd/da9063/core.h2
-rw-r--r--include/linux/mfd/da9063/registers.h18
-rw-r--r--include/linux/mfd/db8500-prcmu.h2
-rw-r--r--include/linux/mfd/dbx500-prcmu.h25
-rw-r--r--include/linux/mfd/gsc.h76
-rw-r--r--include/linux/mfd/hi6421-pmic.h2
-rw-r--r--include/linux/mfd/hi655x-pmic.h6
-rw-r--r--include/linux/mfd/idt82p33_reg.h115
-rw-r--r--include/linux/mfd/idt8a340_reg.h768
-rw-r--r--include/linux/mfd/intel-m10-bmc.h162
-rw-r--r--include/linux/mfd/intel_msic.h453
-rw-r--r--include/linux/mfd/intel_pmc_bxt.h53
-rw-r--r--include/linux/mfd/intel_soc_pmic.h23
-rw-r--r--include/linux/mfd/ipaq-micro.h4
-rw-r--r--include/linux/mfd/iqs62x.h143
-rw-r--r--include/linux/mfd/khadas-mcu.h91
-rw-r--r--include/linux/mfd/lp873x.h12
-rw-r--r--include/linux/mfd/lp87565.h46
-rw-r--r--include/linux/mfd/lpc_ich.h2
-rw-r--r--include/linux/mfd/madera/core.h1
-rw-r--r--include/linux/mfd/madera/pdata.h3
-rw-r--r--include/linux/mfd/madera/registers.h635
-rw-r--r--include/linux/mfd/max77686-private.h28
-rw-r--r--include/linux/mfd/max77693-private.h2
-rw-r--r--include/linux/mfd/max77714.h60
-rw-r--r--include/linux/mfd/max8997.h8
-rw-r--r--include/linux/mfd/max8998.h1
-rw-r--r--include/linux/mfd/mp2629.h26
-rw-r--r--include/linux/mfd/mt6331/core.h40
-rw-r--r--include/linux/mfd/mt6331/registers.h584
-rw-r--r--include/linux/mfd/mt6332/core.h65
-rw-r--r--include/linux/mfd/mt6332/registers.h642
-rw-r--r--include/linux/mfd/mt6357/core.h119
-rw-r--r--include/linux/mfd/mt6357/registers.h1574
-rw-r--r--include/linux/mfd/mt6358/core.h156
-rw-r--r--include/linux/mfd/mt6358/registers.h291
-rw-r--r--include/linux/mfd/mt6359/core.h133
-rw-r--r--include/linux/mfd/mt6359/registers.h531
-rw-r--r--include/linux/mfd/mt6359p/registers.h249
-rw-r--r--include/linux/mfd/mt6397/core.h10
-rw-r--r--include/linux/mfd/mt6397/rtc.h11
-rw-r--r--include/linux/mfd/ntxec.h38
-rw-r--r--include/linux/mfd/ocelot.h62
-rw-r--r--include/linux/mfd/rk808.h175
-rw-r--r--include/linux/mfd/rn5t618.h27
-rw-r--r--include/linux/mfd/rohm-bd70528.h391
-rw-r--r--include/linux/mfd/rohm-bd71815.h562
-rw-r--r--include/linux/mfd/rohm-bd71828.h13
-rw-r--r--include/linux/mfd/rohm-bd718x7.h13
-rw-r--r--include/linux/mfd/rohm-bd957x.h140
-rw-r--r--include/linux/mfd/rohm-generic.h38
-rw-r--r--include/linux/mfd/rsmu.h36
-rw-r--r--include/linux/mfd/rt5033-private.h28
-rw-r--r--include/linux/mfd/samsung/core.h33
-rw-r--r--include/linux/mfd/sc27xx-pmic.h7
-rw-r--r--include/linux/mfd/si476x-core.h2
-rw-r--r--include/linux/mfd/sky81452.h2
-rw-r--r--include/linux/mfd/smsc.h104
-rw-r--r--include/linux/mfd/stm32-lptimer.h10
-rw-r--r--include/linux/mfd/stm32-timers.h4
-rw-r--r--include/linux/mfd/stmfx.h1
-rw-r--r--include/linux/mfd/sy7636a.h34
-rw-r--r--include/linux/mfd/syscon.h11
-rw-r--r--include/linux/mfd/syscon/xlnx-vcu.h39
-rw-r--r--include/linux/mfd/t7l66xb.h1
-rw-r--r--include/linux/mfd/tc3589x.h6
-rw-r--r--include/linux/mfd/tc6387xb.h1
-rw-r--r--include/linux/mfd/tc6393xb.h5
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h120
-rw-r--r--include/linux/mfd/tmio.h7
-rw-r--r--include/linux/mfd/tps65086.h12
-rw-r--r--include/linux/mfd/tps65217.h12
-rw-r--r--include/linux/mfd/tps65218.h14
-rw-r--r--include/linux/mfd/tps65910.h40
-rw-r--r--include/linux/mfd/tps65912.h14
-rw-r--r--include/linux/mfd/tps68470.h11
-rw-r--r--include/linux/mfd/tps80031.h637
-rw-r--r--include/linux/mfd/twl.h59
-rw-r--r--include/linux/mfd/wcd934x/registers.h57
-rw-r--r--include/linux/mfd/wm831x/pdata.h1
-rw-r--r--include/linux/mfd/wm8994/pdata.h2
-rw-r--r--include/linux/mhi.h817
-rw-r--r--include/linux/mhi_ep.h277
-rw-r--r--include/linux/mic_bus.h100
-rw-r--r--include/linux/micrel_phy.h18
-rw-r--r--include/linux/migrate.h201
-rw-r--r--include/linux/migrate_mode.h13
-rw-r--r--include/linux/mii.h144
-rw-r--r--include/linux/min_heap.h134
-rw-r--r--include/linux/minmax.h147
-rw-r--r--include/linux/misc_cgroup.h136
-rw-r--r--include/linux/miscdevice.h26
-rw-r--r--include/linux/mlx4/device.h26
-rw-r--r--include/linux/mlx4/driver.h22
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/accel.h144
-rw-r--r--include/linux/mlx5/cmd.h51
-rw-r--r--include/linux/mlx5/cq.h5
-rw-r--r--include/linux/mlx5/device.h292
-rw-r--r--include/linux/mlx5/driver.h456
-rw-r--r--include/linux/mlx5/eq.h4
-rw-r--r--include/linux/mlx5/eswitch.h128
-rw-r--r--include/linux/mlx5/fs.h78
-rw-r--r--include/linux/mlx5/fs_helpers.h48
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2033
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h235
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h215
-rw-r--r--include/linux/mlx5/mpfs.h18
-rw-r--r--include/linux/mlx5/port.h34
-rw-r--r--include/linux/mlx5/qp.h154
-rw-r--r--include/linux/mlx5/rsc_dump.h51
-rw-r--r--include/linux/mlx5/transobj.h20
-rw-r--r--include/linux/mlx5/vport.h13
-rw-r--r--include/linux/mm-arch-hooks.h22
-rw-r--r--include/linux/mm.h1713
-rw-r--r--include/linux/mm_api.h1
-rw-r--r--include/linux/mm_inline.h588
-rw-r--r--include/linux/mm_types.h564
-rw-r--r--include/linux/mm_types_task.h12
-rw-r--r--include/linux/mman.h31
-rw-r--r--include/linux/mmap_lock.h170
-rw-r--r--include/linux/mmc/card.h38
-rw-r--r--include/linux/mmc/core.h12
-rw-r--r--include/linux/mmc/host.h100
-rw-r--r--include/linux/mmc/mmc.h17
-rw-r--r--include/linux/mmc/sd.h4
-rw-r--r--include/linux/mmc/sdhci-pci-data.h18
-rw-r--r--include/linux/mmc/sdio.h7
-rw-r--r--include/linux/mmc/sdio_func.h4
-rw-r--r--include/linux/mmc/sdio_ids.h104
-rw-r--r--include/linux/mmdebug.h52
-rw-r--r--include/linux/mmu_context.h24
-rw-r--r--include/linux/mmu_notifier.h58
-rw-r--r--include/linux/mmzone.h1077
-rw-r--r--include/linux/mnt_idmapping.h475
-rw-r--r--include/linux/mnt_namespace.h2
-rw-r--r--include/linux/mod_devicetable.h102
-rw-r--r--include/linux/module.h197
-rw-r--r--include/linux/moduleloader.h8
-rw-r--r--include/linux/moduleparam.h23
-rw-r--r--include/linux/most.h (renamed from drivers/staging/most/most.h)0
-rw-r--r--include/linux/mount.h47
-rw-r--r--include/linux/moxtet.h2
-rw-r--r--include/linux/mpage.h8
-rw-r--r--include/linux/mpi.h197
-rw-r--r--include/linux/mroute.h11
-rw-r--r--include/linux/mroute6.h12
-rw-r--r--include/linux/mroute_base.h17
-rw-r--r--include/linux/msdos_partition.h50
-rw-r--r--include/linux/msi.h367
-rw-r--r--include/linux/mtd/bbm.h2
-rw-r--r--include/linux/mtd/blktrans.h11
-rw-r--r--include/linux/mtd/cfi.h7
-rw-r--r--include/linux/mtd/hyperbus.h19
-rw-r--r--include/linux/mtd/latch-addr-flash.h29
-rw-r--r--include/linux/mtd/mtd.h159
-rw-r--r--include/linux/mtd/nand-ecc-mtk.h (renamed from drivers/mtd/nand/raw/mtk_ecc.h)0
-rw-r--r--include/linux/mtd/nand-ecc-mxic.h49
-rw-r--r--include/linux/mtd/nand-ecc-sw-bch.h71
-rw-r--r--include/linux/mtd/nand-ecc-sw-hamming.h89
-rw-r--r--include/linux/mtd/nand.h325
-rw-r--r--include/linux/mtd/nand_bch.h66
-rw-r--r--include/linux/mtd/nand_ecc.h39
-rw-r--r--include/linux/mtd/onfi.h41
-rw-r--r--include/linux/mtd/partitions.h3
-rw-r--r--include/linux/mtd/pfow.h35
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/mtd/rawnand.h614
-rw-r--r--include/linux/mtd/sharpsl.h1
-rw-r--r--include/linux/mtd/spi-nor.h393
-rw-r--r--include/linux/mtd/spinand.h104
-rw-r--r--include/linux/mtd/xip.h2
-rw-r--r--include/linux/mutex.h107
-rw-r--r--include/linux/mutex_api.h1
-rw-r--r--include/linux/mux/consumer.h41
-rw-r--r--include/linux/mux/driver.h4
-rw-r--r--include/linux/mv643xx.h8
-rw-r--r--include/linux/n_r3964.h175
-rw-r--r--include/linux/namei.h17
-rw-r--r--include/linux/nd.h83
-rw-r--r--include/linux/net.h61
-rw-r--r--include/linux/net/intel/i40e_client.h (renamed from drivers/net/ethernet/intel/i40e/i40e_client.h)34
-rw-r--r--include/linux/net/intel/iidc.h107
-rw-r--r--include/linux/netdev_features.h30
-rw-r--r--include/linux/netdevice.h1904
-rw-r--r--include/linux/netfilter.h50
-rw-r--r--include/linux/netfilter/ipset/ip_set.h12
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h13
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h109
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h40
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h75
-rw-r--r--include/linux/netfilter/x_tables.h38
-rw-r--r--include/linux/netfilter_arp/arp_tables.h15
-rw-r--r--include/linux/netfilter_bridge/ebtables.h21
-rw-r--r--include/linux/netfilter_defs.h8
-rw-r--r--include/linux/netfilter_ingress.h58
-rw-r--r--include/linux/netfilter_ipv4.h2
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h17
-rw-r--r--include/linux/netfilter_ipv6.h28
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h17
-rw-r--r--include/linux/netfilter_netdev.h150
-rw-r--r--include/linux/netfs.h349
-rw-r--r--include/linux/netlink.h70
-rw-r--r--include/linux/netpoll.h14
-rw-r--r--include/linux/nfs.h8
-rw-r--r--include/linux/nfs4.h54
-rw-r--r--include/linux/nfs_fs.h180
-rw-r--r--include/linux/nfs_fs_sb.h42
-rw-r--r--include/linux/nfs_page.h12
-rw-r--r--include/linux/nfs_ssc.h81
-rw-r--r--include/linux/nfs_xdr.h170
-rw-r--r--include/linux/nfsacl.h6
-rw-r--r--include/linux/nitro_enclaves.h11
-rw-r--r--include/linux/nl802154.h2
-rw-r--r--include/linux/nmi.h17
-rw-r--r--include/linux/node.h45
-rw-r--r--include/linux/nodemask.h91
-rw-r--r--include/linux/nospec.h2
-rw-r--r--include/linux/notifier.h20
-rw-r--r--include/linux/ns_common.h3
-rw-r--r--include/linux/nsproxy.h24
-rw-r--r--include/linux/ntb.h6
-rw-r--r--include/linux/nubus.h2
-rw-r--r--include/linux/numa.h51
-rw-r--r--include/linux/nvme-auth.h41
-rw-r--r--include/linux/nvme-fc-driver.h397
-rw-r--r--include/linux/nvme-fc.h11
-rw-r--r--include/linux/nvme-rdma.h2
-rw-r--r--include/linux/nvme-tcp.h1
-rw-r--r--include/linux/nvme.h560
-rw-r--r--include/linux/nvmem-consumer.h27
-rw-r--r--include/linux/nvmem-provider.h40
-rw-r--r--include/linux/objtool.h197
-rw-r--r--include/linux/of.h536
-rw-r--r--include/linux/of_address.h71
-rw-r--r--include/linux/of_device.h31
-rw-r--r--include/linux/of_fdt.h15
-rw-r--r--include/linux/of_gpio.h25
-rw-r--r--include/linux/of_graph.h6
-rw-r--r--include/linux/of_iommu.h23
-rw-r--r--include/linux/of_irq.h34
-rw-r--r--include/linux/of_mdio.h60
-rw-r--r--include/linux/of_net.h14
-rw-r--r--include/linux/of_platform.h22
-rw-r--r--include/linux/of_reserved_mem.h26
-rw-r--r--include/linux/oid_registry.h36
-rw-r--r--include/linux/omap-dma.h35
-rw-r--r--include/linux/omap-gpmc.h3
-rw-r--r--include/linux/once.h38
-rw-r--r--include/linux/once_lite.h36
-rw-r--r--include/linux/oom.h20
-rw-r--r--include/linux/oprofile.h209
-rw-r--r--include/linux/overflow.h350
-rw-r--r--include/linux/packing.h2
-rw-r--r--include/linux/padata.h73
-rw-r--r--include/linux/page-flags-layout.h66
-rw-r--r--include/linux/page-flags.h627
-rw-r--r--include/linux/page-isolation.h6
-rw-r--r--include/linux/page_counter.h29
-rw-r--r--include/linux/page_ext.h34
-rw-r--r--include/linux/page_idle.h126
-rw-r--r--include/linux/page_owner.h32
-rw-r--r--include/linux/page_ref.h211
-rw-r--r--include/linux/page_reporting.h29
-rw-r--r--include/linux/page_table_check.h166
-rw-r--r--include/linux/pageblock-flags.h37
-rw-r--r--include/linux/pagemap.h1257
-rw-r--r--include/linux/pagevec.h86
-rw-r--r--include/linux/pagewalk.h14
-rw-r--r--include/linux/panic.h93
-rw-r--r--include/linux/panic_notifier.h12
-rw-r--r--include/linux/parport.h59
-rw-r--r--include/linux/parser.h6
-rw-r--r--include/linux/part_stat.h82
-rw-r--r--include/linux/pci-acpi.h37
-rw-r--r--include/linux/pci-ats.h7
-rw-r--r--include/linux/pci-dma-compat.h129
-rw-r--r--include/linux/pci-doe.h77
-rw-r--r--include/linux/pci-ecam.h56
-rw-r--r--include/linux/pci-ep-cfs.h6
-rw-r--r--include/linux/pci-epc.h154
-rw-r--r--include/linux/pci-epf.h80
-rw-r--r--include/linux/pci-p2pdma.h27
-rw-r--r--include/linux/pci.h495
-rw-r--r--include/linux/pci_hotplug.h4
-rw-r--r--include/linux/pci_ids.h152
-rw-r--r--include/linux/pcs-altera-tse.h17
-rw-r--r--include/linux/pcs-lynx.h18
-rw-r--r--include/linux/pcs-rzn1-miic.h18
-rw-r--r--include/linux/pcs/pcs-xpcs.h42
-rw-r--r--include/linux/pe.h26
-rw-r--r--include/linux/peci-cpu.h40
-rw-r--r--include/linux/peci.h112
-rw-r--r--include/linux/percpu-defs.h4
-rw-r--r--include/linux/percpu-refcount.h89
-rw-r--r--include/linux/percpu-rwsem.h91
-rw-r--r--include/linux/percpu.h20
-rw-r--r--include/linux/percpu_counter.h40
-rw-r--r--include/linux/perf/arm_pmu.h17
-rw-r--r--include/linux/perf/riscv_pmu.h79
-rw-r--r--include/linux/perf_event.h412
-rw-r--r--include/linux/perf_event_api.h1
-rw-r--r--include/linux/perf_regs.h6
-rw-r--r--include/linux/pgtable.h1728
-rw-r--r--include/linux/pgtable_api.h1
-rw-r--r--include/linux/phy.h925
-rw-r--r--include/linux/phy/omap_usb.h69
-rw-r--r--include/linux/phy/pcie.h12
-rw-r--r--include/linux/phy/phy-lvds.h32
-rw-r--r--include/linux/phy/phy.h32
-rw-r--r--include/linux/phy/tegra/xusb.h14
-rw-r--r--include/linux/phy_fixed.h3
-rw-r--r--include/linux/phylink.h421
-rw-r--r--include/linux/pid.h9
-rw-r--r--include/linux/pid_namespace.h23
-rw-r--r--include/linux/pinctrl/pinconf-generic.h33
-rw-r--r--include/linux/pinctrl/pinctrl.h26
-rw-r--r--include/linux/pipe_fs_i.h107
-rw-r--r--include/linux/pkeys.h6
-rw-r--r--include/linux/pktcdvd.h12
-rw-r--r--include/linux/pl353-smc.h30
-rw-r--r--include/linux/platform_data/ad5755.h102
-rw-r--r--include/linux/platform_data/ad5761.h2
-rw-r--r--include/linux/platform_data/ad7291.h13
-rw-r--r--include/linux/platform_data/ad7298.h19
-rw-r--r--include/linux/platform_data/ad7303.h20
-rw-r--r--include/linux/platform_data/ad7793.h2
-rw-r--r--include/linux/platform_data/ad7887.h4
-rw-r--r--include/linux/platform_data/adau1977.h44
-rw-r--r--include/linux/platform_data/adp5588.h171
-rw-r--r--include/linux/platform_data/asoc-mx27vis.h12
-rw-r--r--include/linux/platform_data/asoc-poodle.h16
-rw-r--r--include/linux/platform_data/asoc-pxa.h31
-rw-r--r--include/linux/platform_data/at91_adc.h49
-rw-r--r--include/linux/platform_data/atmel.h12
-rw-r--r--include/linux/platform_data/bcm7038_wdt.h8
-rw-r--r--include/linux/platform_data/brcmfmac.h4
-rw-r--r--include/linux/platform_data/brcmnand.h12
-rw-r--r--include/linux/platform_data/clk-fch.h18
-rw-r--r--include/linux/platform_data/clk-integrator.h2
-rw-r--r--include/linux/platform_data/clk-s3c2410.h19
-rw-r--r--include/linux/platform_data/clk-st.h17
-rw-r--r--include/linux/platform_data/clk-u300.h1
-rw-r--r--include/linux/platform_data/cros_ec_commands.h499
-rw-r--r--include/linux/platform_data/cros_ec_proto.h21
-rw-r--r--include/linux/platform_data/cros_ec_sensorhub.h164
-rw-r--r--include/linux/platform_data/cros_usbpd_notify.h17
-rw-r--r--include/linux/platform_data/davinci-cpufreq.h2
-rw-r--r--include/linux/platform_data/davinci_asp.h13
-rw-r--r--include/linux/platform_data/dma-atmel.h61
-rw-r--r--include/linux/platform_data/dma-coh901318.h72
-rw-r--r--include/linux/platform_data/dma-dw.h33
-rw-r--r--include/linux/platform_data/dma-hsu.h2
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h71
-rw-r--r--include/linux/platform_data/dma-imx.h68
-rw-r--r--include/linux/platform_data/dmtimer-omap.h6
-rw-r--r--include/linux/platform_data/efm32-spi.h15
-rw-r--r--include/linux/platform_data/efm32-uart.h19
-rw-r--r--include/linux/platform_data/elm.h2
-rw-r--r--include/linux/platform_data/emc2305.h22
-rw-r--r--include/linux/platform_data/eth_ixp4xx.h19
-rw-r--r--include/linux/platform_data/fb-s3c2410.h99
-rw-r--r--include/linux/platform_data/gpio-davinci.h12
-rw-r--r--include/linux/platform_data/gpio-dwapb.h24
-rw-r--r--include/linux/platform_data/gpio-omap.h3
-rw-r--r--include/linux/platform_data/gpio/gpio-amd-fch.h2
-rw-r--r--include/linux/platform_data/gpmc-omap.h2
-rw-r--r--include/linux/platform_data/gsc_hwmon.h46
-rw-r--r--include/linux/platform_data/hirschmann-hellcreek.h24
-rw-r--r--include/linux/platform_data/i2c-designware.h13
-rw-r--r--include/linux/platform_data/i2c-hid.h41
-rw-r--r--include/linux/platform_data/i2c-pxa.h48
-rw-r--r--include/linux/platform_data/intel-spi.h29
-rw-r--r--include/linux/platform_data/invensense_mpu6050.h2
-rw-r--r--include/linux/platform_data/itco_wdt.h11
-rw-r--r--include/linux/platform_data/jz4740/jz4740_nand.h25
-rw-r--r--include/linux/platform_data/leds-kirkwood-ns2.h38
-rw-r--r--include/linux/platform_data/leds-lp55xx.h13
-rw-r--r--include/linux/platform_data/leds-pca963x.h35
-rw-r--r--include/linux/platform_data/leds-s3c24xx.h6
-rw-r--r--include/linux/platform_data/macb.h20
-rw-r--r--include/linux/platform_data/max732x.h12
-rw-r--r--include/linux/platform_data/media/camera-mx2.h31
-rw-r--r--include/linux/platform_data/media/camera-mx3.h43
-rw-r--r--include/linux/platform_data/media/coda.h14
-rw-r--r--include/linux/platform_data/media/omap1_camera.h32
-rw-r--r--include/linux/platform_data/mlxcpld.h31
-rw-r--r--include/linux/platform_data/mlxreg.h129
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h41
-rw-r--r--include/linux/platform_data/mmc-esdhc-mcf.h17
-rw-r--r--include/linux/platform_data/mmc-omap.h6
-rw-r--r--include/linux/platform_data/mmc-s3cmci.h2
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h2
-rw-r--r--include/linux/platform_data/mtd-davinci.h9
-rw-r--r--include/linux/platform_data/mtd-mxc_nand.h19
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h10
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h2
-rw-r--r--include/linux/platform_data/ntc_thermistor.h50
-rw-r--r--include/linux/platform_data/omap-twl4030.h2
-rw-r--r--include/linux/platform_data/pca953x.h2
-rw-r--r--include/linux/platform_data/pcf857x.h2
-rw-r--r--include/linux/platform_data/pm33xx.h9
-rw-r--r--include/linux/platform_data/pwm_omap_dmtimer.h90
-rw-r--r--include/linux/platform_data/remoteproc-omap.h51
-rw-r--r--include/linux/platform_data/s3c-hsudc.h2
-rw-r--r--include/linux/platform_data/serial-imx.h15
-rw-r--r--include/linux/platform_data/sh_mmcif.h (renamed from include/linux/mmc/sh_mmcif.h)2
-rw-r--r--include/linux/platform_data/shmob_drm.h2
-rw-r--r--include/linux/platform_data/simplefb.h3
-rw-r--r--include/linux/platform_data/sky81452-backlight.h35
-rw-r--r--include/linux/platform_data/spi-ath79.h16
-rw-r--r--include/linux/platform_data/spi-clps711x.h17
-rw-r--r--include/linux/platform_data/spi-imx.h33
-rw-r--r--include/linux/platform_data/spi-mt65xx.h1
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h14
-rw-r--r--include/linux/platform_data/ssm2518.h21
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h3
-rw-r--r--include/linux/platform_data/ti-sysc.h11
-rw-r--r--include/linux/platform_data/timer-ixp4xx.h11
-rw-r--r--include/linux/platform_data/tps68470.h40
-rw-r--r--include/linux/platform_data/uio_dmem_genirq.h10
-rw-r--r--include/linux/platform_data/uio_pruss.h12
-rw-r--r--include/linux/platform_data/usb-ehci-mxc.h14
-rw-r--r--include/linux/platform_data/usb-mx2.h29
-rw-r--r--include/linux/platform_data/usb-omap.h18
-rw-r--r--include/linux/platform_data/usb-omap1.h4
-rw-r--r--include/linux/platform_data/usb-s3c2410_udc.h6
-rw-r--r--include/linux/platform_data/ux500_wdt.h18
-rw-r--r--include/linux/platform_data/video-imxfb.h70
-rw-r--r--include/linux/platform_data/video-pxafb.h22
-rw-r--r--include/linux/platform_data/wan_ixp4xx_hss.h17
-rw-r--r--include/linux/platform_data/wilco-ec.h10
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h25
-rw-r--r--include/linux/platform_data/x86/clk-lpss.h2
-rw-r--r--include/linux/platform_data/x86/mlxcpld.h52
-rw-r--r--include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h76
-rw-r--r--include/linux/platform_data/x86/p2sb.h28
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h13
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h28
-rw-r--r--include/linux/platform_data/x86/simatic-ipc.h74
-rw-r--r--include/linux/platform_data/x86/soc.h65
-rw-r--r--include/linux/platform_data/x86/spi-intel.h31
-rw-r--r--include/linux/platform_device.h39
-rw-r--r--include/linux/platform_profile.h41
-rw-r--r--include/linux/pldmfw.h165
-rw-r--r--include/linux/plist.h5
-rw-r--r--include/linux/pm.h172
-rw-r--r--include/linux/pm2301_charger.h48
-rw-r--r--include/linux/pm_clock.h5
-rw-r--r--include/linux/pm_domain.h92
-rw-r--r--include/linux/pm_opp.h421
-rw-r--r--include/linux/pm_qos.h81
-rw-r--r--include/linux/pm_runtime.h338
-rw-r--r--include/linux/pm_wakeirq.h23
-rw-r--r--include/linux/pm_wakeup.h39
-rw-r--r--include/linux/pmbus.h39
-rw-r--r--include/linux/pmu.h2
-rw-r--r--include/linux/pnfs_osd_xdr.h317
-rw-r--r--include/linux/pnp.h31
-rw-r--r--include/linux/poison.h14
-rw-r--r--include/linux/poll.h2
-rw-r--r--include/linux/polynomial.h35
-rw-r--r--include/linux/posix-timers.h33
-rw-r--r--include/linux/posix_acl.h27
-rw-r--r--include/linux/posix_acl_xattr.h12
-rw-r--r--include/linux/power/ab8500.h16
-rw-r--r--include/linux/power/bq2415x_charger.h6
-rw-r--r--include/linux/power/bq25890_charger.h15
-rw-r--r--include/linux/power/bq27xxx_battery.h5
-rw-r--r--include/linux/power/charger-manager.h41
-rw-r--r--include/linux/power/generic-adc-battery.h4
-rw-r--r--include/linux/power/gpio-charger.h6
-rw-r--r--include/linux/power/max17042_battery.h16
-rw-r--r--include/linux/power/max8903_charger.h43
-rw-r--r--include/linux/power/smartreflex.h2
-rw-r--r--include/linux/power/smb347-charger.h114
-rw-r--r--include/linux/power_supply.h520
-rw-r--r--include/linux/powercap.h11
-rw-r--r--include/linux/ppp-comp.h2
-rw-r--r--include/linux/ppp_channel.h5
-rw-r--r--include/linux/ppp_defs.h14
-rw-r--r--include/linux/pps-gpio.h19
-rw-r--r--include/linux/prandom.h75
-rw-r--r--include/linux/preempt.h177
-rw-r--r--include/linux/prefetch.h8
-rw-r--r--include/linux/printk.h389
-rw-r--r--include/linux/prmt.h7
-rw-r--r--include/linux/proc_fs.h92
-rw-r--r--include/linux/proc_ns.h11
-rw-r--r--include/linux/profile.h48
-rw-r--r--include/linux/property.h100
-rw-r--r--include/linux/pruss_driver.h54
-rw-r--r--include/linux/psci.h18
-rw-r--r--include/linux/pse-pd/pse.h129
-rw-r--r--include/linux/psi.h26
-rw-r--r--include/linux/psi_types.h47
-rw-r--r--include/linux/psp-sev.h60
-rw-r--r--include/linux/pstore.h19
-rw-r--r--include/linux/pstore_blk.h55
-rw-r--r--include/linux/pstore_ram.h2
-rw-r--r--include/linux/pstore_zone.h60
-rw-r--r--include/linux/ptdump.h3
-rw-r--r--include/linux/ptp_classify.h102
-rw-r--r--include/linux/ptp_clock_kernel.h157
-rw-r--r--include/linux/ptp_kvm.h21
-rw-r--r--include/linux/ptp_pch.h26
-rw-r--r--include/linux/ptr_ring.h2
-rw-r--r--include/linux/ptrace.h112
-rw-r--r--include/linux/ptrace_api.h1
-rw-r--r--include/linux/purgatory.h2
-rw-r--r--include/linux/pwm.h111
-rw-r--r--include/linux/pwm_backlight.h2
-rw-r--r--include/linux/pxa2xx_ssp.h214
-rw-r--r--include/linux/qcom-geni-se.h65
-rw-r--r--include/linux/qcom_scm.h91
-rw-r--r--include/linux/qed/common_hsi.h171
-rw-r--r--include/linux/qed/eth_common.h31
-rw-r--r--include/linux/qed/fcoe_common.h365
-rw-r--r--include/linux/qed/iscsi_common.h390
-rw-r--r--include/linux/qed/iwarp_common.h30
-rw-r--r--include/linux/qed/nvmetcp_common.h531
-rw-r--r--include/linux/qed/qed_chain.h487
-rw-r--r--include/linux/qed/qed_eth_if.h53
-rw-r--r--include/linux/qed/qed_fcoe_if.h8
-rw-r--r--include/linux/qed/qed_if.h584
-rw-r--r--include/linux/qed/qed_iov_if.h30
-rw-r--r--include/linux/qed/qed_iscsi_if.h36
-rw-r--r--include/linux/qed/qed_ll2_if.h75
-rw-r--r--include/linux/qed/qed_nvmetcp_if.h257
-rw-r--r--include/linux/qed/qed_rdma_if.h59
-rw-r--r--include/linux/qed/qede_rdma.h35
-rw-r--r--include/linux/qed/rdma_common.h31
-rw-r--r--include/linux/qed/roce_common.h30
-rw-r--r--include/linux/qed/storage_common.h30
-rw-r--r--include/linux/qed/tcp_common.h30
-rw-r--r--include/linux/quota.h17
-rw-r--r--include/linux/quotaops.h18
-rw-r--r--include/linux/radix-tree.h18
-rw-r--r--include/linux/raid/detect.h11
-rw-r--r--include/linux/raid/md_u.h13
-rw-r--r--include/linux/raid/pq.h2
-rw-r--r--include/linux/raid/xor.h21
-rw-r--r--include/linux/random.h178
-rw-r--r--include/linux/randomize_kstack.h92
-rw-r--r--include/linux/range.h6
-rw-r--r--include/linux/ras.h5
-rw-r--r--include/linux/ratelimit.h36
-rw-r--r--include/linux/ratelimit_types.h47
-rw-r--r--include/linux/rbtree.h241
-rw-r--r--include/linux/rbtree_augmented.h2
-rw-r--r--include/linux/rbtree_latch.h6
-rw-r--r--include/linux/rbtree_types.h34
-rw-r--r--include/linux/rcu_node_tree.h2
-rw-r--r--include/linux/rcu_segcblist.h144
-rw-r--r--include/linux/rculist.h112
-rw-r--r--include/linux/rculist_nulls.h4
-rw-r--r--include/linux/rcupdate.h370
-rw-r--r--include/linux/rcupdate_trace.h100
-rw-r--r--include/linux/rcupdate_wait.h19
-rw-r--r--include/linux/rcutiny.h109
-rw-r--r--include/linux/rcutree.h68
-rw-r--r--include/linux/rcuwait.h40
-rw-r--r--include/linux/rcuwait_api.h1
-rw-r--r--include/linux/reboot.h104
-rw-r--r--include/linux/ref_tracker.h79
-rw-r--r--include/linux/refcount.h136
-rw-r--r--include/linux/refcount_api.h1
-rw-r--r--include/linux/regmap.h650
-rw-r--r--include/linux/regset.h230
-rw-r--r--include/linux/regulator/ab8500.h166
-rw-r--r--include/linux/regulator/consumer.h112
-rw-r--r--include/linux/regulator/coupler.h13
-rw-r--r--include/linux/regulator/da9121.h36
-rw-r--r--include/linux/regulator/driver.h288
-rw-r--r--include/linux/regulator/gpio-regulator.h2
-rw-r--r--include/linux/regulator/lp872x.h17
-rw-r--r--include/linux/regulator/machine.h27
-rw-r--r--include/linux/regulator/mt6315-regulator.h44
-rw-r--r--include/linux/regulator/mt6331-regulator.h46
-rw-r--r--include/linux/regulator/mt6332-regulator.h27
-rw-r--r--include/linux/regulator/mt6358-regulator.h45
-rw-r--r--include/linux/regulator/mt6359-regulator.h59
-rw-r--r--include/linux/regulator/pca9450.h236
-rw-r--r--include/linux/regulator/pfuze100.h6
-rw-r--r--include/linux/regulator/tps62360.h6
-rw-r--r--include/linux/relay.h31
-rw-r--r--include/linux/remoteproc.h180
-rw-r--r--include/linux/remoteproc/mtk_scp.h2
-rw-r--r--include/linux/remoteproc/qcom_rproc.h36
-rw-r--r--include/linux/resctrl.h242
-rw-r--r--include/linux/reset-controller.h22
-rw-r--r--include/linux/reset.h360
-rw-r--r--include/linux/reset/bcm63xx_pmb.h10
-rw-r--r--include/linux/reset/reset-simple.h48
-rw-r--r--include/linux/resource.h2
-rw-r--r--include/linux/restart_block.h1
-rw-r--r--include/linux/resume_user_mode.h64
-rw-r--r--include/linux/rethook.h100
-rw-r--r--include/linux/rfkill.h36
-rw-r--r--include/linux/rhashtable.h71
-rw-r--r--include/linux/ring_buffer.h12
-rw-r--r--include/linux/rio.h4
-rw-r--r--include/linux/rio_drv.h3
-rw-r--r--include/linux/rio_ids.h13
-rw-r--r--include/linux/rmap.h289
-rw-r--r--include/linux/rmi.h13
-rw-r--r--include/linux/rpmsg.h92
-rw-r--r--include/linux/rpmsg/byteorder.h67
-rw-r--r--include/linux/rpmsg/ns.h45
-rw-r--r--include/linux/rpmsg/qcom_glink.h7
-rw-r--r--include/linux/rslib.h2
-rw-r--r--include/linux/rtc.h114
-rw-r--r--include/linux/rtc/ds1685.h1
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h21
-rw-r--r--include/linux/rtmutex.h107
-rw-r--r--include/linux/rtnetlink.h4
-rw-r--r--include/linux/rtsx_pci.h96
-rw-r--r--include/linux/rtsx_usb.h2
-rw-r--r--include/linux/rv.h70
-rw-r--r--include/linux/rwbase_rt.h39
-rw-r--r--include/linux/rwlock.h30
-rw-r--r--include/linux/rwlock_api_smp.h14
-rw-r--r--include/linux/rwlock_rt.h150
-rw-r--r--include/linux/rwlock_types.h49
-rw-r--r--include/linux/rwsem.h96
-rw-r--r--include/linux/s3c_adc_battery.h3
-rw-r--r--include/linux/sbitmap.h192
-rw-r--r--include/linux/scatterlist.h240
-rw-r--r--include/linux/sched.h755
-rw-r--r--include/linux/sched/affinity.h1
-rw-r--r--include/linux/sched/cond_resched.h1
-rw-r--r--include/linux/sched/coredump.h15
-rw-r--r--include/linux/sched/cpufreq.h7
-rw-r--r--include/linux/sched/cputime.h5
-rw-r--r--include/linux/sched/deadline.h2
-rw-r--r--include/linux/sched/debug.h7
-rw-r--r--include/linux/sched/hotplug.h2
-rw-r--r--include/linux/sched/idle.h4
-rw-r--r--include/linux/sched/isolation.h42
-rw-r--r--include/linux/sched/jobctl.h8
-rw-r--r--include/linux/sched/loadavg.h2
-rw-r--r--include/linux/sched/mm.h234
-rw-r--r--include/linux/sched/posix-timers.h1
-rw-r--r--include/linux/sched/prio.h18
-rw-r--r--include/linux/sched/rseq_api.h1
-rw-r--r--include/linux/sched/rt.h8
-rw-r--r--include/linux/sched/sd_flags.h166
-rw-r--r--include/linux/sched/signal.h184
-rw-r--r--include/linux/sched/stat.h16
-rw-r--r--include/linux/sched/sysctl.h88
-rw-r--r--include/linux/sched/task.h54
-rw-r--r--include/linux/sched/task_flags.h1
-rw-r--r--include/linux/sched/task_stack.h8
-rw-r--r--include/linux/sched/thread_info_api.h1
-rw-r--r--include/linux/sched/topology.h84
-rw-r--r--include/linux/sched/user.h19
-rw-r--r--include/linux/sched/wake_q.h7
-rw-r--r--include/linux/sched_clock.h30
-rw-r--r--include/linux/scif.h1339
-rw-r--r--include/linux/scmi_protocol.h822
-rw-r--r--include/linux/scpi_protocol.h14
-rw-r--r--include/linux/scs.h68
-rw-r--r--include/linux/sctp.h63
-rw-r--r--include/linux/sdb.h160
-rw-r--r--include/linux/sdla.h240
-rw-r--r--include/linux/seccomp.h25
-rw-r--r--include/linux/secretmem.h54
-rw-r--r--include/linux/security.h314
-rw-r--r--include/linux/sed-opal.h1
-rw-r--r--include/linux/selection.h22
-rw-r--r--include/linux/seq_buf.h27
-rw-r--r--include/linux/seq_file.h59
-rw-r--r--include/linux/seq_file_net.h4
-rw-r--r--include/linux/seqlock.h1185
-rw-r--r--include/linux/seqlock_api.h1
-rw-r--r--include/linux/seqno-fence.h109
-rw-r--r--include/linux/serdev.h15
-rw-r--r--include/linux/serial.h17
-rw-r--r--include/linux/serial_8250.h27
-rw-r--r--include/linux/serial_core.h594
-rw-r--r--include/linux/serial_pnx8xxx.h67
-rw-r--r--include/linux/serial_s3c.h23
-rw-r--r--include/linux/set_memory.h25
-rw-r--r--include/linux/sfi.h210
-rw-r--r--include/linux/sfi_acpi.h93
-rw-r--r--include/linux/sfp.h15
-rw-r--r--include/linux/shmem_fs.h83
-rw-r--r--include/linux/shrinker.h43
-rw-r--r--include/linux/signal.h58
-rw-r--r--include/linux/signal_types.h19
-rw-r--r--include/linux/siox.h2
-rw-r--r--include/linux/siphash.h49
-rw-r--r--include/linux/sirfsoc_dma.h7
-rw-r--r--include/linux/sizes.h5
-rw-r--r--include/linux/skbuff.h1154
-rw-r--r--include/linux/skmsg.h311
-rw-r--r--include/linux/slab.h405
-rw-r--r--include/linux/slab_def.h23
-rw-r--r--include/linux/slub_def.h69
-rw-r--r--include/linux/smp.h86
-rw-r--r--include/linux/smp_types.h69
-rw-r--r--include/linux/soc/apple/rtkit.h167
-rw-r--r--include/linux/soc/apple/sart.h53
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h16
-rw-r--r--include/linux/soc/ixp4xx/cpu.h120
-rw-r--r--include/linux/soc/ixp4xx/npe.h2
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h57
-rw-r--r--include/linux/soc/mediatek/infracfg.h251
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h147
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h70
-rw-r--r--include/linux/soc/mediatek/mtk-mutex.h55
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h3
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h148
-rw-r--r--include/linux/soc/pxa/cpu.h252
-rw-r--r--include/linux/soc/pxa/mfp.h470
-rw-r--r--include/linux/soc/pxa/smemc.h13
-rw-r--r--include/linux/soc/qcom/apr.h73
-rw-r--r--include/linux/soc/qcom/irq.h2
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h55
-rw-r--r--include/linux/soc/qcom/mdt_loader.h50
-rw-r--r--include/linux/soc/qcom/pdr.h29
-rw-r--r--include/linux/soc/qcom/qcom_aoss.h38
-rw-r--r--include/linux/soc/qcom/qmi.h25
-rw-r--r--include/linux/soc/qcom/smd-rpm.h10
-rw-r--r--include/linux/soc/qcom/smem_state.h8
-rw-r--r--include/linux/soc/renesas/r9a06g032-sysctrl.h11
-rw-r--r--include/linux/soc/renesas/rcar-rst.h2
-rw-r--r--include/linux/soc/samsung/exynos-chipid.h6
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h6
-rw-r--r--include/linux/soc/samsung/s3c-adc.h32
-rw-r--r--include/linux/soc/samsung/s3c-cpu-freq.h145
-rw-r--r--include/linux/soc/samsung/s3c-cpufreq-core.h299
-rw-r--r--include/linux/soc/samsung/s3c-pm.h94
-rw-r--r--include/linux/soc/sunxi/sunxi_sram.h2
-rw-r--r--include/linux/soc/ti/k3-ringacc.h28
-rw-r--r--include/linux/soc/ti/knav_dma.h10
-rw-r--r--include/linux/soc/ti/knav_qmss.h12
-rw-r--r--include/linux/soc/ti/omap1-io.h143
-rw-r--r--include/linux/soc/ti/omap1-mux.h311
-rw-r--r--include/linux/soc/ti/omap1-soc.h198
-rw-r--r--include/linux/soc/ti/omap1-usb.h116
-rw-r--r--include/linux/soc/ti/ti-msgmgr.h18
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h4
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h104
-rw-r--r--include/linux/sock_diag.h14
-rw-r--r--include/linux/socket.h54
-rw-r--r--include/linux/sockptr.h118
-rw-r--r--include/linux/softirq.h1
-rw-r--r--include/linux/sony-laptop.h2
-rw-r--r--include/linux/sort.h2
-rw-r--r--include/linux/soundwire/sdw.h208
-rw-r--r--include/linux/soundwire/sdw_intel.h117
-rw-r--r--include/linux/soundwire/sdw_registers.h167
-rw-r--r--include/linux/soundwire/sdw_type.h9
-rw-r--r--include/linux/spi/ads7846.h15
-rw-r--r--include/linux/spi/altera.h50
-rw-r--r--include/linux/spi/corgi_lcd.h3
-rw-r--r--include/linux/spi/eeprom.h2
-rw-r--r--include/linux/spi/ifx_modem.h20
-rw-r--r--include/linux/spi/l4f00242t03.h17
-rw-r--r--include/linux/spi/lms283gf05.h16
-rw-r--r--include/linux/spi/max7301.h4
-rw-r--r--include/linux/spi/mcp23s08.h18
-rw-r--r--include/linux/spi/mmc_spi.h9
-rw-r--r--include/linux/spi/pxa2xx_spi.h27
-rw-r--r--include/linux/spi/s3c24xx-fiq.h33
-rw-r--r--include/linux/spi/s3c24xx.h7
-rw-r--r--include/linux/spi/spi-mem.h49
-rw-r--r--include/linux/spi/spi.h411
-rw-r--r--include/linux/spinlock.h71
-rw-r--r--include/linux/spinlock_api.h1
-rw-r--r--include/linux/spinlock_api_smp.h14
-rw-r--r--include/linux/spinlock_api_up.h3
-rw-r--r--include/linux/spinlock_rt.h159
-rw-r--r--include/linux/spinlock_types.h89
-rw-r--r--include/linux/spinlock_types_raw.h73
-rw-r--r--include/linux/spinlock_types_up.h2
-rw-r--r--include/linux/spinlock_up.h3
-rw-r--r--include/linux/splice.h6
-rw-r--r--include/linux/spmi.h4
-rw-r--r--include/linux/sram.h14
-rw-r--r--include/linux/srcu.h12
-rw-r--r--include/linux/srcutiny.h17
-rw-r--r--include/linux/srcutree.h34
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/ssb/ssb_driver_extif.h2
-rw-r--r--include/linux/ssb/ssb_driver_gige.h16
-rw-r--r--include/linux/stackdepot.h53
-rw-r--r--include/linux/stackleak.h60
-rw-r--r--include/linux/stackprotector.h2
-rw-r--r--include/linux/stacktrace.h60
-rw-r--r--include/linux/stat.h9
-rw-r--r--include/linux/statfs.h14
-rw-r--r--include/linux/static_call.h344
-rw-r--r--include/linux/static_call_types.h103
-rw-r--r--include/linux/stdarg.h11
-rw-r--r--include/linux/stddef.h71
-rw-r--r--include/linux/stm.h2
-rw-r--r--include/linux/stmmac.h76
-rw-r--r--include/linux/stop_machine.h32
-rw-r--r--include/linux/string.h313
-rw-r--r--include/linux/string_helpers.h80
-rw-r--r--include/linux/sungem_phy.h2
-rw-r--r--include/linux/sunrpc/auth.h6
-rw-r--r--include/linux/sunrpc/bc_xprt.h17
-rw-r--r--include/linux/sunrpc/cache.h21
-rw-r--r--include/linux/sunrpc/clnt.h28
-rw-r--r--include/linux/sunrpc/gss_api.h4
-rw-r--r--include/linux/sunrpc/gss_krb5.h17
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h9
-rw-r--r--include/linux/sunrpc/msg_prot.h6
-rw-r--r--include/linux/sunrpc/rpc_rdma.h77
-rw-r--r--include/linux/sunrpc/rpc_rdma_cid.h24
-rw-r--r--include/linux/sunrpc/sched.h38
-rw-r--r--include/linux/sunrpc/svc.h187
-rw-r--r--include/linux/sunrpc/svc_rdma.h94
-rw-r--r--include/linux/sunrpc/svc_rdma_pcl.h128
-rw-r--r--include/linux/sunrpc/svc_xprt.h23
-rw-r--r--include/linux/sunrpc/svcauth.h4
-rw-r--r--include/linux/sunrpc/svcauth_gss.h3
-rw-r--r--include/linux/sunrpc/svcsock.h8
-rw-r--r--include/linux/sunrpc/xdr.h280
-rw-r--r--include/linux/sunrpc/xprt.h49
-rw-r--r--include/linux/sunrpc/xprtmultipath.h14
-rw-r--r--include/linux/sunrpc/xprtsock.h2
-rw-r--r--include/linux/sunxi-rsb.h2
-rw-r--r--include/linux/surface_acpi_notify.h39
-rw-r--r--include/linux/surface_aggregator/controller.h994
-rw-r--r--include/linux/surface_aggregator/device.h640
-rw-r--r--include/linux/surface_aggregator/serial_hub.h677
-rw-r--r--include/linux/suspend.h117
-rw-r--r--include/linux/swab.h25
-rw-r--r--include/linux/swait.h23
-rw-r--r--include/linux/swait_api.h1
-rw-r--r--include/linux/swap.h362
-rw-r--r--include/linux/swap_cgroup.h4
-rw-r--r--include/linux/swap_slots.h4
-rw-r--r--include/linux/swapfile.h10
-rw-r--r--include/linux/swapops.h415
-rw-r--r--include/linux/swiotlb.h158
-rw-r--r--include/linux/switchtec.h3
-rw-r--r--include/linux/syscall_user_dispatch.h40
-rw-r--r--include/linux/syscalls.h193
-rw-r--r--include/linux/syscalls_api.h1
-rw-r--r--include/linux/sysctl.h143
-rw-r--r--include/linux/sysfb.h106
-rw-r--r--include/linux/sysfs.h109
-rw-r--r--include/linux/syslog.h3
-rw-r--r--include/linux/sysrq.h25
-rw-r--r--include/linux/t10-pi.h31
-rw-r--r--include/linux/task_work.h18
-rw-r--r--include/linux/tboot.h12
-rw-r--r--include/linux/tcp.h113
-rw-r--r--include/linux/tee_drv.h220
-rw-r--r--include/linux/termios_internal.h49
-rw-r--r--include/linux/textsearch.h2
-rw-r--r--include/linux/thermal.h248
-rw-r--r--include/linux/thread_info.h87
-rw-r--r--include/linux/threads.h4
-rw-r--r--include/linux/thunderbolt.h100
-rw-r--r--include/linux/ti-emif-sram.h10
-rw-r--r--include/linux/ti_wilink_st.h6
-rw-r--r--include/linux/tick.h33
-rw-r--r--include/linux/tifm.h2
-rw-r--r--include/linux/time.h19
-rw-r--r--include/linux/time32.h12
-rw-r--r--include/linux/time64.h20
-rw-r--r--include/linux/time_namespace.h43
-rw-r--r--include/linux/timecounter.h2
-rw-r--r--include/linux/timekeeping.h45
-rw-r--r--include/linux/timekeeping32.h14
-rw-r--r--include/linux/timer.h13
-rw-r--r--include/linux/timex.h13
-rw-r--r--include/linux/tnum.h32
-rw-r--r--include/linux/topology.h54
-rw-r--r--include/linux/torture.h41
-rw-r--r--include/linux/tpm.h28
-rw-r--r--include/linux/tpm_eventlog.h33
-rw-r--r--include/linux/trace.h52
-rw-r--r--include/linux/trace_events.h231
-rw-r--r--include/linux/trace_recursion.h210
-rw-r--r--include/linux/trace_seq.h4
-rw-r--r--include/linux/tracehook.h201
-rw-r--r--include/linux/tracepoint-defs.h39
-rw-r--r--include/linux/tracepoint.h167
-rw-r--r--include/linux/tty.h731
-rw-r--r--include/linux/tty_buffer.h60
-rw-r--r--include/linux/tty_driver.h626
-rw-r--r--include/linux/tty_flip.h29
-rw-r--r--include/linux/tty_ldisc.h357
-rw-r--r--include/linux/tty_port.h256
-rw-r--r--include/linux/typecheck.h9
-rw-r--r--include/linux/types.h11
-rw-r--r--include/linux/u64_stats_sync.h163
-rw-r--r--include/linux/u64_stats_sync_api.h1
-rw-r--r--include/linux/uacce.h149
-rw-r--r--include/linux/uaccess.h197
-rw-r--r--include/linux/ucb1400.h4
-rw-r--r--include/linux/udp.h28
-rw-r--r--include/linux/uio.h220
-rw-r--r--include/linux/uio_driver.h49
-rw-r--r--include/linux/umh.h24
-rw-r--r--include/linux/unaligned/access_ok.h68
-rw-r--r--include/linux/unaligned/be_byteshift.h71
-rw-r--r--include/linux/unaligned/be_memmove.h37
-rw-r--r--include/linux/unaligned/be_struct.h37
-rw-r--r--include/linux/unaligned/generic.h69
-rw-r--r--include/linux/unaligned/le_byteshift.h71
-rw-r--r--include/linux/unaligned/le_memmove.h37
-rw-r--r--include/linux/unaligned/le_struct.h37
-rw-r--r--include/linux/unaligned/memmove.h46
-rw-r--r--include/linux/unaligned/packed_struct.h2
-rw-r--r--include/linux/unicode.h52
-rw-r--r--include/linux/units.h29
-rw-r--r--include/linux/usb.h72
-rw-r--r--include/linux/usb/audio-v2.h31
-rw-r--r--include/linux/usb/audio-v3.h2
-rw-r--r--include/linux/usb/audio.h3
-rw-r--r--include/linux/usb/c67x00.h15
-rw-r--r--include/linux/usb/cdc-wdm.h7
-rw-r--r--include/linux/usb/cdc.h4
-rw-r--r--include/linux/usb/cdc_ncm.h21
-rw-r--r--include/linux/usb/ch9.h73
-rw-r--r--include/linux/usb/chipidea.h10
-rw-r--r--include/linux/usb/composite.h27
-rw-r--r--include/linux/usb/ehci_def.h49
-rw-r--r--include/linux/usb/ehci_pdriver.h15
-rw-r--r--include/linux/usb/g_hid.h14
-rw-r--r--include/linux/usb/gadget.h96
-rw-r--r--include/linux/usb/hcd.h42
-rw-r--r--include/linux/usb/input.h4
-rw-r--r--include/linux/usb/isp1301.h10
-rw-r--r--include/linux/usb/isp1760.h19
-rw-r--r--include/linux/usb/m66592.h14
-rw-r--r--include/linux/usb/musb-ux500.h10
-rw-r--r--include/linux/usb/net2280.h14
-rw-r--r--include/linux/usb/of.h2
-rw-r--r--include/linux/usb/ohci_pdriver.h14
-rw-r--r--include/linux/usb/onboard_hub.h18
-rw-r--r--include/linux/usb/otg-fsm.h24
-rw-r--r--include/linux/usb/otg.h3
-rw-r--r--include/linux/usb/pd.h72
-rw-r--r--include/linux/usb/pd_bdo.h2
-rw-r--r--include/linux/usb/pd_ext_sdb.h4
-rw-r--r--include/linux/usb/pd_vdo.h317
-rw-r--r--include/linux/usb/phy_companion.h12
-rw-r--r--include/linux/usb/quirks.h6
-rw-r--r--include/linux/usb/r8152.h37
-rw-r--r--include/linux/usb/r8a66597.h14
-rw-r--r--include/linux/usb/rndis_host.h15
-rw-r--r--include/linux/usb/role.h35
-rw-r--r--include/linux/usb/serial.h126
-rw-r--r--include/linux/usb/storage.h2
-rw-r--r--include/linux/usb/tcpci.h232
-rw-r--r--include/linux/usb/tcpm.h51
-rw-r--r--include/linux/usb/tegra_usb_phy.h17
-rw-r--r--include/linux/usb/typec.h77
-rw-r--r--include/linux/usb/typec_altmode.h49
-rw-r--r--include/linux/usb/typec_dp.h7
-rw-r--r--include/linux/usb/typec_mux.h79
-rw-r--r--include/linux/usb/typec_retimer.h45
-rw-r--r--include/linux/usb/typec_tbt.h57
-rw-r--r--include/linux/usb/ulpi.h4
-rw-r--r--include/linux/usb/usb338x.h11
-rw-r--r--include/linux/usb/usbnet.h36
-rw-r--r--include/linux/usb/xhci-dbgp.h6
-rw-r--r--include/linux/usb_usual.h6
-rw-r--r--include/linux/usbdevice_fs.h2
-rw-r--r--include/linux/user_events.h54
-rw-r--r--include/linux/user_namespace.h54
-rw-r--r--include/linux/userfaultfd_k.h191
-rw-r--r--include/linux/usermode_driver.h19
-rw-r--r--include/linux/utsname.h10
-rw-r--r--include/linux/uuid.h24
-rw-r--r--include/linux/vbox_utils.h1
-rw-r--r--include/linux/vdpa.h513
-rw-r--r--include/linux/verification.h10
-rw-r--r--include/linux/vermagic.h24
-rw-r--r--include/linux/vexpress.h30
-rw-r--r--include/linux/vfio.h286
-rw-r--r--include/linux/vfio_pci_core.h133
-rw-r--r--include/linux/vgaarb.h122
-rw-r--r--include/linux/vhost_iotlb.h52
-rw-r--r--include/linux/via-core.h2
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/linux/virtio.h24
-rw-r--r--include/linux/virtio_anchor.h19
-rw-r--r--include/linux/virtio_caif.h6
-rw-r--r--include/linux/virtio_config.h275
-rw-r--r--include/linux/virtio_dma_buf.h37
-rw-r--r--include/linux/virtio_net.h92
-rw-r--r--include/linux/virtio_pci_legacy.h40
-rw-r--r--include/linux/virtio_pci_modern.h118
-rw-r--r--include/linux/virtio_ring.h29
-rw-r--r--include/linux/virtio_vsock.h11
-rw-r--r--include/linux/visorbus.h344
-rw-r--r--include/linux/vm_event_item.h55
-rw-r--r--include/linux/vmacache.h28
-rw-r--r--include/linux/vmalloc.h206
-rw-r--r--include/linux/vmpressure.h2
-rw-r--r--include/linux/vmstat.h295
-rw-r--r--include/linux/vmw_vmci_defs.h90
-rw-r--r--include/linux/vringh.h66
-rw-r--r--include/linux/vt_kern.h77
-rw-r--r--include/linux/vtime.h136
-rw-r--r--include/linux/w1.h4
-rw-r--r--include/linux/wait.h98
-rw-r--r--include/linux/wait_api.h1
-rw-r--r--include/linux/wait_bit.h8
-rw-r--r--include/linux/watch_queue.h134
-rw-r--r--include/linux/watchdog.h16
-rw-r--r--include/linux/wimax/debug.h491
-rw-r--r--include/linux/wireless.h10
-rw-r--r--include/linux/wkup_m3_ipc.h25
-rw-r--r--include/linux/wm97xx.h5
-rw-r--r--include/linux/wmi.h3
-rw-r--r--include/linux/workqueue.h98
-rw-r--r--include/linux/workqueue_api.h1
-rw-r--r--include/linux/writeback.h85
-rw-r--r--include/linux/ww_mutex.h87
-rw-r--r--include/linux/wwan.h186
-rw-r--r--include/linux/xarray.h118
-rw-r--r--include/linux/xattr.h38
-rw-r--r--include/linux/xxhash.h2
-rw-r--r--include/linux/xz.h110
-rw-r--r--include/linux/z2_battery.h1
-rw-r--r--include/linux/zbud.h23
-rw-r--r--include/linux/zlib.h2
-rw-r--r--include/linux/zorro.h13
-rw-r--r--include/linux/zpool.h3
-rw-r--r--include/linux/zsmalloc.h3
-rw-r--r--include/linux/zstd.h1252
-rw-r--r--include/linux/zstd_errors.h77
-rw-r--r--include/linux/zstd_lib.h2432
-rw-r--r--include/math-emu/op-common.h15
-rw-r--r--include/media/cec-notifier.h41
-rw-r--r--include/media/cec-pin.h16
-rw-r--r--include/media/cec.h84
-rw-r--r--include/media/davinci/vpbe_display.h8
-rw-r--r--include/media/davinci/vpbe_osd.h6
-rw-r--r--include/media/davinci/vpif_types.h2
-rw-r--r--include/media/dmxdev.h1
-rw-r--r--include/media/drv-intf/s3c_camif.h4
-rw-r--r--include/media/drv-intf/soc_mediabus.h107
-rw-r--r--include/media/dvb-usb-ids.h631
-rw-r--r--include/media/dvb_frontend.h13
-rw-r--r--include/media/dvbdev.h10
-rw-r--r--include/media/frame_vector.h47
-rw-r--r--include/media/fwht-ctrls.h31
-rw-r--r--include/media/h264-ctrls.h210
-rw-r--r--include/media/hevc-ctrls.h212
-rw-r--r--include/media/i2c/adv7343.h12
-rw-r--r--include/media/i2c/adv7393.h10
-rw-r--r--include/media/i2c/ir-kbd-i2c.h1
-rw-r--r--include/media/i2c/m5mols.h4
-rw-r--r--include/media/i2c/mt9p031.h1
-rw-r--r--include/media/i2c/mt9t112.h4
-rw-r--r--include/media/i2c/noon010pc30.h5
-rw-r--r--include/media/i2c/ov2659.h14
-rw-r--r--include/media/i2c/ov772x.h2
-rw-r--r--include/media/i2c/s5c73m3.h1
-rw-r--r--include/media/i2c/s5k4ecgx.h6
-rw-r--r--include/media/i2c/s5k6aa.h1
-rw-r--r--include/media/i2c/smiapp.h63
-rw-r--r--include/media/i2c/tvp514x.h11
-rw-r--r--include/media/i2c/tw9910.h8
-rw-r--r--include/media/i2c/wm8775.h2
-rw-r--r--include/media/media-dev-allocator.h2
-rw-r--r--include/media/media-device.h68
-rw-r--r--include/media/media-devnode.h2
-rw-r--r--include/media/media-entity.h326
-rw-r--r--include/media/mipi-csi2.h46
-rw-r--r--include/media/mpeg2-ctrls.h82
-rw-r--r--include/media/rc-core.h38
-rw-r--r--include/media/rc-map.h123
-rw-r--r--include/media/soc_camera.h397
-rw-r--r--include/media/tpg/v4l2-tpg.h20
-rw-r--r--include/media/tuner.h1
-rw-r--r--include/media/v4l2-async.h200
-rw-r--r--include/media/v4l2-clk.h73
-rw-r--r--include/media/v4l2-common.h57
-rw-r--r--include/media/v4l2-ctrls.h285
-rw-r--r--include/media/v4l2-dev.h120
-rw-r--r--include/media/v4l2-device.h66
-rw-r--r--include/media/v4l2-dv-timings.h2
-rw-r--r--include/media/v4l2-event.h13
-rw-r--r--include/media/v4l2-fh.h2
-rw-r--r--include/media/v4l2-fwnode.h473
-rw-r--r--include/media/v4l2-h264.h89
-rw-r--r--include/media/v4l2-image-sizes.h6
-rw-r--r--include/media/v4l2-ioctl.h10
-rw-r--r--include/media/v4l2-jpeg.h157
-rw-r--r--include/media/v4l2-mc.h94
-rw-r--r--include/media/v4l2-mediabus.h127
-rw-r--r--include/media/v4l2-mem2mem.h172
-rw-r--r--include/media/v4l2-rect.h20
-rw-r--r--include/media/v4l2-subdev.h489
-rw-r--r--include/media/v4l2-uvc.h359
-rw-r--r--include/media/v4l2-vp9.h233
-rw-r--r--include/media/videobuf-dma-sg.h4
-rw-r--r--include/media/videobuf2-core.h132
-rw-r--r--include/media/videobuf2-dma-contig.h2
-rw-r--r--include/media/videobuf2-dvb.h2
-rw-r--r--include/media/videobuf2-v4l2.h81
-rw-r--r--include/media/vp8-ctrls.h112
-rw-r--r--include/media/vsp1.h2
-rw-r--r--include/memory/renesas-rpc-if.h104
-rw-r--r--include/misc/ocxl-config.h1
-rw-r--r--include/misc/ocxl.h114
-rw-r--r--include/net/6lowpan.h2
-rw-r--r--include/net/9p/9p.h17
-rw-r--r--include/net/9p/client.h84
-rw-r--r--include/net/9p/transport.h35
-rw-r--r--include/net/Space.h10
-rw-r--r--include/net/act_api.h101
-rw-r--r--include/net/addrconf.h15
-rw-r--r--include/net/af_rxrpc.h20
-rw-r--r--include/net/af_unix.h29
-rw-r--r--include/net/af_vsock.h14
-rw-r--r--include/net/amt.h408
-rw-r--r--include/net/arp.h9
-rw-r--r--include/net/ax25.h42
-rw-r--r--include/net/ax88796.h9
-rw-r--r--include/net/bareudp.h16
-rw-r--r--include/net/bluetooth/bluetooth.h230
-rw-r--r--include/net/bluetooth/hci.h639
-rw-r--r--include/net/bluetooth/hci_core.h669
-rw-r--r--include/net/bluetooth/hci_sock.h10
-rw-r--r--include/net/bluetooth/hci_sync.h131
-rw-r--r--include/net/bluetooth/iso.h32
-rw-r--r--include/net/bluetooth/l2cap.h62
-rw-r--r--include/net/bluetooth/mgmt.h330
-rw-r--r--include/net/bluetooth/rfcomm.h3
-rw-r--r--include/net/bluetooth/sco.h2
-rw-r--r--include/net/bond_3ad.h7
-rw-r--r--include/net/bond_alb.h6
-rw-r--r--include/net/bond_options.h51
-rw-r--r--include/net/bonding.h131
-rw-r--r--include/net/bpf_sk_storage.h40
-rw-r--r--include/net/busy_poll.h51
-rw-r--r--include/net/caif/caif_dev.h2
-rw-r--r--include/net/caif/caif_hsi.h200
-rw-r--r--include/net/caif/caif_layer.h4
-rw-r--r--include/net/caif/caif_spi.h155
-rw-r--r--include/net/caif/cfcnfg.h2
-rw-r--r--include/net/caif/cfserl.h1
-rw-r--r--include/net/cfg80211.h1882
-rw-r--r--include/net/cfg802154.h20
-rw-r--r--include/net/checksum.h98
-rw-r--r--include/net/cipso_ipv4.h12
-rw-r--r--include/net/cls_cgroup.h7
-rw-r--r--include/net/codel.h7
-rw-r--r--include/net/codel_impl.h20
-rw-r--r--include/net/codel_qdisc.h3
-rw-r--r--include/net/compat.h71
-rw-r--r--include/net/datalink.h9
-rw-r--r--include/net/dcbevent.h2
-rw-r--r--include/net/dcbnl.h2
-rw-r--r--include/net/devlink.h1176
-rw-r--r--include/net/dn.h231
-rw-r--r--include/net/dn_dev.h199
-rw-r--r--include/net/dn_fib.h167
-rw-r--r--include/net/dn_neigh.h30
-rw-r--r--include/net/dn_nsp.h195
-rw-r--r--include/net/dn_route.h115
-rw-r--r--include/net/drop_monitor.h33
-rw-r--r--include/net/dropreason.h323
-rw-r--r--include/net/dsa.h984
-rw-r--r--include/net/dst.h64
-rw-r--r--include/net/dst_cache.h11
-rw-r--r--include/net/dst_metadata.h59
-rw-r--r--include/net/dst_ops.h4
-rw-r--r--include/net/erspan.h22
-rw-r--r--include/net/esp.h17
-rw-r--r--include/net/espintcp.h1
-rw-r--r--include/net/ethoc.h3
-rw-r--r--include/net/failover.h1
-rw-r--r--include/net/fib_rules.h43
-rw-r--r--include/net/firewire.h5
-rw-r--r--include/net/flow.h56
-rw-r--r--include/net/flow_dissector.h86
-rw-r--r--include/net/flow_offload.h303
-rw-r--r--include/net/fq.h16
-rw-r--r--include/net/fq_impl.h173
-rw-r--r--include/net/garp.h2
-rw-r--r--include/net/gen_stats.h59
-rw-r--r--include/net/genetlink.h110
-rw-r--r--include/net/gre.h19
-rw-r--r--include/net/gro.h450
-rw-r--r--include/net/gtp.h46
-rw-r--r--include/net/gue.h5
-rw-r--r--include/net/hwbm.h2
-rw-r--r--include/net/icmp.h7
-rw-r--r--include/net/ieee80211_radiotap.h11
-rw-r--r--include/net/ieee802154_netdev.h43
-rw-r--r--include/net/if_inet6.h56
-rw-r--r--include/net/ila.h2
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet6_hashtables.h30
-rw-r--r--include/net/inet_common.h16
-rw-r--r--include/net/inet_connection_sock.h77
-rw-r--r--include/net/inet_dscp.h57
-rw-r--r--include/net/inet_ecn.h101
-rw-r--r--include/net/inet_frag.h17
-rw-r--r--include/net/inet_hashtables.h183
-rw-r--r--include/net/inet_sock.h53
-rw-r--r--include/net/inet_timewait_sock.h5
-rw-r--r--include/net/ioam6.h68
-rw-r--r--include/net/ip.h113
-rw-r--r--include/net/ip6_checksum.h30
-rw-r--r--include/net/ip6_fib.h82
-rw-r--r--include/net/ip6_route.h56
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_fib.h83
-rw-r--r--include/net/ip_tunnels.h61
-rw-r--r--include/net/ip_vs.h69
-rw-r--r--include/net/ipcomp.h4
-rw-r--r--include/net/ipconfig.h2
-rw-r--r--include/net/ipv6.h212
-rw-r--r--include/net/ipv6_frag.h34
-rw-r--r--include/net/ipv6_stubs.h20
-rw-r--r--include/net/ipx.h171
-rw-r--r--include/net/iucv/af_iucv.h21
-rw-r--r--include/net/l3mdev.h39
-rw-r--r--include/net/lapb.h2
-rw-r--r--include/net/llc.h6
-rw-r--r--include/net/llc_c_ac.h7
-rw-r--r--include/net/llc_c_st.h4
-rw-r--r--include/net/llc_conn.h1
-rw-r--r--include/net/llc_if.h3
-rw-r--r--include/net/llc_pdu.h31
-rw-r--r--include/net/llc_s_ac.h4
-rw-r--r--include/net/llc_s_ev.h1
-rw-r--r--include/net/llc_s_st.h6
-rw-r--r--include/net/lwtunnel.h11
-rw-r--r--include/net/mac80211.h1159
-rw-r--r--include/net/mac802154.h31
-rw-r--r--include/net/macsec.h105
-rw-r--r--include/net/mctp.h298
-rw-r--r--include/net/mctpdevice.h56
-rw-r--r--include/net/mip6.h2
-rw-r--r--include/net/mld.h9
-rw-r--r--include/net/mpls.h17
-rw-r--r--include/net/mpls_iptunnel.h5
-rw-r--r--include/net/mptcp.h157
-rw-r--r--include/net/mrp.h4
-rw-r--r--include/net/ncsi.h2
-rw-r--r--include/net/ndisc.h36
-rw-r--r--include/net/neighbour.h94
-rw-r--r--include/net/net_debug.h157
-rw-r--r--include/net/net_namespace.h114
-rw-r--r--include/net/net_trackers.h18
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/ipv4/nf_defrag_ipv4.h3
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h14
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h3
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h8
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h14
-rw-r--r--include/net/netfilter/nf_conntrack.h69
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h12
-rw-r--r--include/net/netfilter/nf_conntrack_act_ct.h50
-rw-r--r--include/net/netfilter/nf_conntrack_bpf.h46
-rw-r--r--include/net/netfilter/nf_conntrack_core.h23
-rw-r--r--include/net/netfilter/nf_conntrack_count.h1
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h129
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h49
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h1
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h42
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h13
-rw-r--r--include/net/netfilter/nf_conntrack_seqadj.h3
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h30
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h13
-rw-r--r--include/net/netfilter/nf_flow_table.h203
-rw-r--r--include/net/netfilter/nf_hooks_lwtunnel.h7
-rw-r--r--include/net/netfilter/nf_log.h24
-rw-r--r--include/net/netfilter/nf_nat.h4
-rw-r--r--include/net/netfilter/nf_nat_helper.h1
-rw-r--r--include/net/netfilter/nf_queue.h15
-rw-r--r--include/net/netfilter/nf_reject.h21
-rw-r--r--include/net/netfilter/nf_tables.h413
-rw-r--r--include/net/netfilter/nf_tables_core.h89
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h64
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h78
-rw-r--r--include/net/netfilter/nf_tables_offload.h22
-rw-r--r--include/net/netfilter/nft_fib.h5
-rw-r--r--include/net/netfilter/nft_meta.h7
-rw-r--r--include/net/netfilter/xt_rateest.h2
-rw-r--r--include/net/netlink.h251
-rw-r--r--include/net/netns/bpf.h28
-rw-r--r--include/net/netns/can.h2
-rw-r--r--include/net/netns/conntrack.h42
-rw-r--r--include/net/netns/core.h4
-rw-r--r--include/net/netns/dccp.h12
-rw-r--r--include/net/netns/flow_table.h14
-rw-r--r--include/net/netns/generic.h3
-rw-r--r--include/net/netns/ipv4.h157
-rw-r--r--include/net/netns/ipv6.h53
-rw-r--r--include/net/netns/mctp.h37
-rw-r--r--include/net/netns/mib.h33
-rw-r--r--include/net/netns/mpls.h2
-rw-r--r--include/net/netns/netfilter.h8
-rw-r--r--include/net/netns/nexthop.h2
-rw-r--r--include/net/netns/nftables.h6
-rw-r--r--include/net/netns/sctp.h14
-rw-r--r--include/net/netns/smc.h26
-rw-r--r--include/net/netns/unix.h8
-rw-r--r--include/net/netns/x_tables.h21
-rw-r--r--include/net/netns/xfrm.h9
-rw-r--r--include/net/netrom.h1
-rw-r--r--include/net/nexthop.h314
-rw-r--r--include/net/nfc/digital.h4
-rw-r--r--include/net/nfc/hci.h6
-rw-r--r--include/net/nfc/nci.h48
-rw-r--r--include/net/nfc/nci_core.h34
-rw-r--r--include/net/nfc/nfc.h18
-rw-r--r--include/net/nl802154.h13
-rw-r--r--include/net/p8022.h5
-rw-r--r--include/net/page_pool.h226
-rw-r--r--include/net/phonet/pep.h3
-rw-r--r--include/net/phonet/phonet.h4
-rw-r--r--include/net/phonet/pn_dev.h5
-rw-r--r--include/net/pie.h31
-rw-r--r--include/net/ping.h4
-rw-r--r--include/net/pkt_cls.h231
-rw-r--r--include/net/pkt_sched.h116
-rw-r--r--include/net/pptp.h3
-rw-r--r--include/net/protocol.h5
-rw-r--r--include/net/psample.h23
-rw-r--r--include/net/psnap.h5
-rw-r--r--include/net/raw.h20
-rw-r--r--include/net/rawv6.h7
-rw-r--r--include/net/red.h59
-rw-r--r--include/net/regulatory.h12
-rw-r--r--include/net/request_sock.h13
-rw-r--r--include/net/rose.h12
-rw-r--r--include/net/route.h60
-rw-r--r--include/net/rpl.h40
-rw-r--r--include/net/rtnetlink.h33
-rw-r--r--include/net/sch_generic.h273
-rw-r--r--include/net/sctp/command.h2
-rw-r--r--include/net/sctp/constants.h34
-rw-r--r--include/net/sctp/sctp.h89
-rw-r--r--include/net/sctp/sm.h16
-rw-r--r--include/net/sctp/structs.h93
-rw-r--r--include/net/sctp/ulpevent.h2
-rw-r--r--include/net/secure_seq.h6
-rw-r--r--include/net/seg6.h23
-rw-r--r--include/net/selftests.h31
-rw-r--r--include/net/smc.h13
-rw-r--r--include/net/sock.h796
-rw-r--r--include/net/sock_reuseport.h22
-rw-r--r--include/net/stp.h2
-rw-r--r--include/net/strparser.h27
-rw-r--r--include/net/switchdev.h286
-rw-r--r--include/net/tc_act/tc_ct.h34
-rw-r--r--include/net/tc_act/tc_gact.h15
-rw-r--r--include/net/tc_act/tc_gate.h141
-rw-r--r--include/net/tc_act/tc_mirred.h1
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/net/tc_act/tc_police.h130
-rw-r--r--include/net/tc_act/tc_skbedit.h54
-rw-r--r--include/net/tc_act/tc_tunnel_key.h15
-rw-r--r--include/net/tc_act/tc_vlan.h13
-rw-r--r--include/net/tcp.h427
-rw-r--r--include/net/tls.h397
-rw-r--r--include/net/transp_v6.h5
-rw-r--r--include/net/tso.h23
-rw-r--r--include/net/tun_proto.h3
-rw-r--r--include/net/udp.h80
-rw-r--r--include/net/udp_tunnel.h210
-rw-r--r--include/net/udplite.h52
-rw-r--r--include/net/vxlan.h83
-rw-r--r--include/net/wimax.h503
-rw-r--r--include/net/xdp.h286
-rw-r--r--include/net/xdp_priv.h2
-rw-r--r--include/net/xdp_sock.h334
-rw-r--r--include/net/xdp_sock_drv.h289
-rw-r--r--include/net/xfrm.h246
-rw-r--r--include/net/xsk_buff_pool.h229
-rw-r--r--include/pcmcia/cistpl.h6
-rw-r--r--include/pcmcia/soc_common.h125
-rw-r--r--include/ras/ras_event.h4
-rw-r--r--include/rdma/ib.h33
-rw-r--r--include/rdma/ib_addr.h32
-rw-r--r--include/rdma/ib_cache.h51
-rw-r--r--include/rdma/ib_cm.h24
-rw-r--r--include/rdma/ib_fmr_pool.h93
-rw-r--r--include/rdma/ib_hdrs.h50
-rw-r--r--include/rdma/ib_mad.h110
-rw-r--r--include/rdma/ib_marshall.h31
-rw-r--r--include/rdma/ib_pack.h29
-rw-r--r--include/rdma/ib_pma.h31
-rw-r--r--include/rdma/ib_sa.h73
-rw-r--r--include/rdma/ib_smi.h43
-rw-r--r--include/rdma/ib_sysfs.h37
-rw-r--r--include/rdma/ib_umem.h181
-rw-r--r--include/rdma/ib_umem_odp.h50
-rw-r--r--include/rdma/ib_verbs.h1387
-rw-r--r--include/rdma/ibta_vol1_c12.h6
-rw-r--r--include/rdma/iw_cm.h31
-rw-r--r--include/rdma/iw_portmap.h30
-rw-r--r--include/rdma/lag.h23
-rw-r--r--include/rdma/opa_addr.h44
-rw-r--r--include/rdma/opa_port_info.h41
-rw-r--r--include/rdma/opa_smi.h31
-rw-r--r--include/rdma/opa_vnic.h56
-rw-r--r--include/rdma/rdma_cm.h120
-rw-r--r--include/rdma/rdma_cm_ib.h31
-rw-r--r--include/rdma/rdma_counter.h21
-rw-r--r--include/rdma/rdma_netlink.h4
-rw-r--r--include/rdma/rdma_vt.h70
-rw-r--r--include/rdma/rdmavt_cq.h53
-rw-r--r--include/rdma/rdmavt_mr.h52
-rw-r--r--include/rdma/rdmavt_qp.h102
-rw-r--r--include/rdma/restrack.h49
-rw-r--r--include/rdma/rw.h18
-rw-r--r--include/rdma/uverbs_ioctl.h169
-rw-r--r--include/rdma/uverbs_named_ioctl.h31
-rw-r--r--include/rdma/uverbs_std_types.h47
-rw-r--r--include/rdma/uverbs_types.h41
-rw-r--r--include/rv/automata.h75
-rw-r--r--include/rv/da_monitor.h544
-rw-r--r--include/rv/instrumentation.h29
-rw-r--r--include/scsi/fc/fc_ms.h59
-rw-r--r--include/scsi/fc_encode.h727
-rw-r--r--include/scsi/fc_frame.h30
-rw-r--r--include/scsi/iscsi_if.h11
-rw-r--r--include/scsi/iscsi_proto.h2
-rw-r--r--include/scsi/libfc.h15
-rw-r--r--include/scsi/libfcoe.h5
-rw-r--r--include/scsi/libiscsi.h58
-rw-r--r--include/scsi/libsas.h74
-rw-r--r--include/scsi/sas.h67
-rw-r--r--include/scsi/sas_ata.h21
-rw-r--r--include/scsi/scsi.h178
-rw-r--r--include/scsi/scsi_bsg_iscsi.h4
-rw-r--r--include/scsi/scsi_cmnd.h145
-rw-r--r--include/scsi/scsi_common.h7
-rw-r--r--include/scsi/scsi_device.h66
-rw-r--r--include/scsi/scsi_devinfo.h6
-rw-r--r--include/scsi/scsi_dh.h5
-rw-r--r--include/scsi/scsi_driver.h9
-rw-r--r--include/scsi/scsi_eh.h6
-rw-r--r--include/scsi/scsi_host.h149
-rw-r--r--include/scsi/scsi_ioctl.h11
-rw-r--r--include/scsi/scsi_proto.h67
-rw-r--r--include/scsi/scsi_request.h33
-rw-r--r--include/scsi/scsi_status.h74
-rw-r--r--include/scsi/scsi_tcq.h2
-rw-r--r--include/scsi/scsi_transport_fc.h66
-rw-r--r--include/scsi/scsi_transport_iscsi.h41
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/scsi/scsicam.h7
-rw-r--r--include/scsi/sg.h41
-rw-r--r--include/scsi/srp.h32
-rw-r--r--include/scsi/viosrp.h17
-rw-r--r--include/soc/arc/aux.h2
-rw-r--r--include/soc/arc/timers.h4
-rw-r--r--include/soc/at91/atmel_tcb.h5
-rw-r--r--include/soc/at91/sama7-ddr.h85
-rw-r--r--include/soc/at91/sama7-sfrbu.h34
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h13
-rw-r--r--include/soc/brcmstb/common.h12
-rw-r--r--include/soc/canaan/k210-sysctl.h43
-rw-r--r--include/soc/fsl/caam-blob.h103
-rw-r--r--include/soc/fsl/dpaa2-fd.h3
-rw-r--r--include/soc/fsl/dpaa2-io.h15
-rw-r--r--include/soc/fsl/qe/immap_qe.h3
-rw-r--r--include/soc/fsl/qe/qe.h51
-rw-r--r--include/soc/fsl/qe/qe_tdm.h4
-rw-r--r--include/soc/fsl/qe/ucc_fast.h9
-rw-r--r--include/soc/fsl/qe/ucc_slow.h15
-rw-r--r--include/soc/fsl/qman.h16
-rw-r--r--include/soc/imx/cpu.h37
-rw-r--r--include/soc/mediatek/smi.h28
-rw-r--r--include/soc/microchip/mpfs.h51
-rw-r--r--include/soc/mscc/ocelot.h955
-rw-r--r--include/soc/mscc/ocelot_ana.h18
-rw-r--r--include/soc/mscc/ocelot_dev.h78
-rw-r--r--include/soc/mscc/ocelot_ptp.h60
-rw-r--r--include/soc/mscc/ocelot_qsys.h20
-rw-r--r--include/soc/mscc/ocelot_sys.h23
-rw-r--r--include/soc/mscc/ocelot_vcap.h731
-rw-r--r--include/soc/mscc/vsc7514_regs.h29
-rw-r--r--include/soc/nps/common.h166
-rw-r--r--include/soc/nps/mtm.h59
-rw-r--r--include/soc/qcom/cmd-db.h1
-rw-r--r--include/soc/qcom/kryo-l2-accessors.h12
-rw-r--r--include/soc/qcom/qcom-spmi-pmic.h61
-rw-r--r--include/soc/qcom/rpmh.h12
-rw-r--r--include/soc/qcom/spm.h43
-rw-r--r--include/soc/qcom/tcs.h9
-rw-r--r--include/soc/rockchip/pm_domains.h25
-rw-r--r--include/soc/sifive/sifive_ccache.h16
-rw-r--r--include/soc/sifive/sifive_l2_cache.h16
-rw-r--r--include/soc/tegra/bpmp-abi.h923
-rw-r--r--include/soc/tegra/common.h46
-rw-r--r--include/soc/tegra/cpuidle.h2
-rw-r--r--include/soc/tegra/emc.h16
-rw-r--r--include/soc/tegra/fuse.h58
-rw-r--r--include/soc/tegra/irq.h20
-rw-r--r--include/soc/tegra/mc.h108
-rw-r--r--include/soc/tegra/pm.h39
-rw-r--r--include/soc/tegra/pmc.h23
-rw-r--r--include/soc/tegra/tegra-cbb.h47
-rw-r--r--include/sound/acp62_chip_offset_byte.h444
-rw-r--r--include/sound/compress_driver.h68
-rw-r--r--include/sound/control.h89
-rw-r--r--include/sound/core.h83
-rw-r--r--include/sound/cs35l41.h896
-rw-r--r--include/sound/cs42l42.h811
-rw-r--r--include/sound/dmaengine_pcm.h20
-rw-r--r--include/sound/emu10k1.h6
-rw-r--r--include/sound/emu8000.h3
-rw-r--r--include/sound/graph_card.h35
-rw-r--r--include/sound/gus.h4
-rw-r--r--include/sound/hda_codec.h47
-rw-r--r--include/sound/hda_register.h30
-rw-r--r--include/sound/hda_verbs.h2
-rw-r--r--include/sound/hdaudio.h44
-rw-r--r--include/sound/hdaudio_ext.h80
-rw-r--r--include/sound/hdmi-codec.h32
-rw-r--r--include/sound/intel-dsp-config.h10
-rw-r--r--include/sound/intel-nhlt.h81
-rw-r--r--include/sound/jack.h2
-rw-r--r--include/sound/madera-pdata.h2
-rw-r--r--include/sound/memalloc.h125
-rw-r--r--include/sound/omap-hdmi-audio.h2
-rw-r--r--include/sound/pcm.h207
-rw-r--r--include/sound/pcm_iec958.h8
-rw-r--r--include/sound/pcm_params.h12
-rw-r--r--include/sound/pxa2xx-lib.h17
-rw-r--r--include/sound/rawmidi.h10
-rw-r--r--include/sound/rt1015.h15
-rw-r--r--include/sound/rt5645.h30
-rw-r--r--include/sound/rt5670.h25
-rw-r--r--include/sound/rt5682.h11
-rw-r--r--include/sound/rt5682s.h49
-rw-r--r--include/sound/simple_card_utils.h154
-rw-r--r--include/sound/soc-acpi-intel-match.h8
-rw-r--r--include/sound/soc-acpi.h91
-rw-r--r--include/sound/soc-card.h73
-rw-r--r--include/sound/soc-component.h164
-rw-r--r--include/sound/soc-dai.h245
-rw-r--r--include/sound/soc-dapm.h29
-rw-r--r--include/sound/soc-dpcm.h33
-rw-r--r--include/sound/soc-jack.h132
-rw-r--r--include/sound/soc-link.h32
-rw-r--r--include/sound/soc-topology.h20
-rw-r--r--include/sound/soc.h431
-rw-r--r--include/sound/sof.h73
-rw-r--r--include/sound/sof/channel_map.h6
-rw-r--r--include/sound/sof/control.h8
-rw-r--r--include/sound/sof/dai-amd.h28
-rw-r--r--include/sound/sof/dai-imx.h2
-rw-r--r--include/sound/sof/dai-intel.h44
-rw-r--r--include/sound/sof/dai-mediatek.h23
-rw-r--r--include/sound/sof/dai.h58
-rw-r--r--include/sound/sof/debug.h43
-rw-r--r--include/sound/sof/ext_manifest.h123
-rw-r--r--include/sound/sof/ext_manifest4.h119
-rw-r--r--include/sound/sof/header.h27
-rw-r--r--include/sound/sof/info.h57
-rw-r--r--include/sound/sof/ipc4/header.h473
-rw-r--r--include/sound/sof/pm.h2
-rw-r--r--include/sound/sof/stream.h9
-rw-r--r--include/sound/sof/topology.h25
-rw-r--r--include/sound/sof/trace.h32
-rw-r--r--include/sound/sof/xtensa.h2
-rw-r--r--include/sound/timer.h8
-rw-r--r--include/sound/wm8960.h17
-rw-r--r--include/target/iscsi/iscsi_target_core.h116
-rw-r--r--include/target/iscsi/iscsi_transport.h128
-rw-r--r--include/target/target_core_backend.h21
-rw-r--r--include/target/target_core_base.h153
-rw-r--r--include/target/target_core_fabric.h31
-rw-r--r--include/trace/bpf_probe.h71
-rw-r--r--include/trace/define_custom_trace.h77
-rw-r--r--include/trace/define_trace.h14
-rw-r--r--include/trace/events/9p.h48
-rw-r--r--include/trace/events/afs.h410
-rw-r--r--include/trace/events/asoc.h1
-rw-r--r--include/trace/events/avc.h53
-rw-r--r--include/trace/events/bcache.h14
-rw-r--r--include/trace/events/block.h311
-rw-r--r--include/trace/events/btrfs.h522
-rw-r--r--include/trace/events/cachefiles.h788
-rw-r--r--include/trace/events/cgroup.h12
-rw-r--r--include/trace/events/clk.h44
-rw-r--r--include/trace/events/cma.h68
-rw-r--r--include/trace/events/compaction.h72
-rw-r--r--include/trace/events/damon.h46
-rw-r--r--include/trace/events/devfreq.h30
-rw-r--r--include/trace/events/devlink.h96
-rw-r--r--include/trace/events/dlm.h336
-rw-r--r--include/trace/events/dma_fence.h4
-rw-r--r--include/trace/events/erofs.h29
-rw-r--r--include/trace/events/error_report.h76
-rw-r--r--include/trace/events/ext4.h634
-rw-r--r--include/trace/events/f2fs.h522
-rw-r--r--include/trace/events/fib.h6
-rw-r--r--include/trace/events/fib6.h8
-rw-r--r--include/trace/events/filelock.h8
-rw-r--r--include/trace/events/filemap.h32
-rw-r--r--include/trace/events/fs.h122
-rw-r--r--include/trace/events/fscache.h676
-rw-r--r--include/trace/events/fsi.h86
-rw-r--r--include/trace/events/fsi_master_aspeed.h12
-rw-r--r--include/trace/events/gpu_mem.h57
-rw-r--r--include/trace/events/habanalabs.h93
-rw-r--r--include/trace/events/hswadsp.h385
-rw-r--r--include/trace/events/huge_memory.h42
-rw-r--r--include/trace/events/i2c_slave.h67
-rw-r--r--include/trace/events/intel_ifs.h41
-rw-r--r--include/trace/events/intel_iommu.h142
-rw-r--r--include/trace/events/io_uring.h531
-rw-r--r--include/trace/events/iocost.h91
-rw-r--r--include/trace/events/iommu.h10
-rw-r--r--include/trace/events/iscsi.h4
-rw-r--r--include/trace/events/jbd2.h113
-rw-r--r--include/trace/events/kmem.h154
-rw-r--r--include/trace/events/kvm.h157
-rw-r--r--include/trace/events/kyber.h19
-rw-r--r--include/trace/events/libata.h417
-rw-r--r--include/trace/events/lock.h63
-rw-r--r--include/trace/events/maple_tree.h123
-rw-r--r--include/trace/events/mctp.h78
-rw-r--r--include/trace/events/migrate.h74
-rw-r--r--include/trace/events/mmap.h121
-rw-r--r--include/trace/events/mmap_lock.h87
-rw-r--r--include/trace/events/mmflags.h113
-rw-r--r--include/trace/events/mptcp.h184
-rw-r--r--include/trace/events/neigh.h2
-rw-r--r--include/trace/events/net.h14
-rw-r--r--include/trace/events/netfs.h322
-rw-r--r--include/trace/events/netlink.h29
-rw-r--r--include/trace/events/nfs.h375
-rw-r--r--include/trace/events/nilfs2.h4
-rw-r--r--include/trace/events/osnoise.h142
-rw-r--r--include/trace/events/page_pool.h4
-rw-r--r--include/trace/events/page_ref.h4
-rw-r--r--include/trace/events/pagemap.h51
-rw-r--r--include/trace/events/percpu.h23
-rw-r--r--include/trace/events/power.h110
-rw-r--r--include/trace/events/qdisc.h103
-rw-r--r--include/trace/events/qla.h46
-rw-r--r--include/trace/events/qrtr.h115
-rw-r--r--include/trace/events/random.h313
-rw-r--r--include/trace/events/rcu.h166
-rw-r--r--include/trace/events/rdma.h41
-rw-r--r--include/trace/events/regulator.h32
-rw-r--r--include/trace/events/rpcgss.h298
-rw-r--r--include/trace/events/rpcrdma.h1540
-rw-r--r--include/trace/events/rv.h142
-rw-r--r--include/trace/events/rwmmio.h97
-rw-r--r--include/trace/events/rxrpc.h367
-rw-r--r--include/trace/events/sched.h174
-rw-r--r--include/trace/events/scmi.h106
-rw-r--r--include/trace/events/scsi.h83
-rw-r--r--include/trace/events/skb.h31
-rw-r--r--include/trace/events/sock.h66
-rw-r--r--include/trace/events/sof.h121
-rw-r--r--include/trace/events/sof_intel.h148
-rw-r--r--include/trace/events/spi.h57
-rw-r--r--include/trace/events/spmi.h12
-rw-r--r--include/trace/events/sunrpc.h1595
-rw-r--r--include/trace/events/sunrpc_base.h18
-rw-r--r--include/trace/events/swiotlb.h29
-rw-r--r--include/trace/events/target.h23
-rw-r--r--include/trace/events/tcp.h143
-rw-r--r--include/trace/events/thermal.h47
-rw-r--r--include/trace/events/thermal_pressure.h29
-rw-r--r--include/trace/events/thp.h54
-rw-r--r--include/trace/events/timer.h7
-rw-r--r--include/trace/events/ufs.h184
-rw-r--r--include/trace/events/vmscan.h127
-rw-r--r--include/trace/events/vsock_virtio_transport_common.h5
-rw-r--r--include/trace/events/watchdog.h66
-rw-r--r--include/trace/events/wbt.h16
-rw-r--r--include/trace/events/workqueue.h14
-rw-r--r--include/trace/events/writeback.h94
-rw-r--r--include/trace/events/xdp.h84
-rw-r--r--include/trace/events/xen.h22
-rw-r--r--include/trace/perf.h23
-rw-r--r--include/trace/stages/init.h37
-rw-r--r--include/trace/stages/stage1_struct_define.h54
-rw-r--r--include/trace/stages/stage2_data_offsets.h57
-rw-r--r--include/trace/stages/stage3_trace_output.h135
-rw-r--r--include/trace/stages/stage4_event_fields.h68
-rw-r--r--include/trace/stages/stage5_get_offsets.h93
-rw-r--r--include/trace/stages/stage6_event_callback.h118
-rw-r--r--include/trace/stages/stage7_class_define.h36
-rw-r--r--include/trace/syscall.h6
-rw-r--r--include/trace/trace_custom_events.h221
-rw-r--r--include/trace/trace_events.h393
-rw-r--r--include/uapi/asm-generic/fcntl.h27
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h25
-rw-r--r--include/uapi/asm-generic/mman-common.h7
-rw-r--r--include/uapi/asm-generic/poll.h2
-rw-r--r--include/uapi/asm-generic/sembuf.h6
-rw-r--r--include/uapi/asm-generic/shmbuf.h4
-rw-r--r--include/uapi/asm-generic/siginfo.h36
-rw-r--r--include/uapi/asm-generic/signal-defs.h64
-rw-r--r--include/uapi/asm-generic/signal.h31
-rw-r--r--include/uapi/asm-generic/socket.h13
-rw-r--r--include/uapi/asm-generic/termbits-common.h66
-rw-r--r--include/uapi/asm-generic/termbits.h239
-rw-r--r--include/uapi/asm-generic/unistd.h68
-rw-r--r--include/uapi/drm/amdgpu_drm.h144
-rw-r--r--include/uapi/drm/drm.h278
-rw-r--r--include/uapi/drm/drm_fourcc.h714
-rw-r--r--include/uapi/drm/drm_mode.h377
-rw-r--r--include/uapi/drm/etnaviv_drm.h3
-rw-r--r--include/uapi/drm/i915_drm.h1844
-rw-r--r--include/uapi/drm/lima_drm.h9
-rw-r--r--include/uapi/drm/mga_drm.h22
-rw-r--r--include/uapi/drm/msm_drm.h81
-rw-r--r--include/uapi/drm/panfrost_drm.h56
-rw-r--r--include/uapi/drm/tegra_drm.h425
-rw-r--r--include/uapi/drm/v3d_drm.h214
-rw-r--r--include/uapi/drm/virtgpu_drm.h73
-rw-r--r--include/uapi/drm/vmwgfx_drm.h67
-rw-r--r--include/uapi/linux/acct.h3
-rw-r--r--include/uapi/linux/acrn.h650
-rw-r--r--include/uapi/linux/agpgart.h9
-rw-r--r--include/uapi/linux/amt.h62
-rw-r--r--include/uapi/linux/android/binder.h59
-rw-r--r--include/uapi/linux/atmioc.h2
-rw-r--r--include/uapi/linux/audit.h36
-rw-r--r--include/uapi/linux/auto_dev-ioctl.h2
-rw-r--r--include/uapi/linux/auxvec.h3
-rw-r--r--include/uapi/linux/batadv_packet.h52
-rw-r--r--include/uapi/linux/batman_adv.h35
-rw-r--r--include/uapi/linux/bcache.h429
-rw-r--r--include/uapi/linux/binfmts.h4
-rw-r--r--include/uapi/linux/blkpg.h28
-rw-r--r--include/uapi/linux/blkzoned.h32
-rw-r--r--include/uapi/linux/bpf.h3709
-rw-r--r--include/uapi/linux/btf.h76
-rw-r--r--include/uapi/linux/btrfs.h230
-rw-r--r--include/uapi/linux/btrfs_tree.h95
-rw-r--r--include/uapi/linux/byteorder/big_endian.h1
-rw-r--r--include/uapi/linux/byteorder/little_endian.h1
-rw-r--r--include/uapi/linux/cachefiles.h68
-rw-r--r--include/uapi/linux/caif/caif_socket.h2
-rw-r--r--include/uapi/linux/can.h102
-rw-r--r--include/uapi/linux/can/bcm.h2
-rw-r--r--include/uapi/linux/can/error.h20
-rw-r--r--include/uapi/linux/can/gw.h4
-rw-r--r--include/uapi/linux/can/isotp.h182
-rw-r--r--include/uapi/linux/can/j1939.h9
-rw-r--r--include/uapi/linux/can/netlink.h45
-rw-r--r--include/uapi/linux/can/raw.h4
-rw-r--r--include/uapi/linux/capability.h55
-rw-r--r--include/uapi/linux/ccs.h18
-rw-r--r--include/uapi/linux/cdrom.h28
-rw-r--r--include/uapi/linux/cec-funcs.h16
-rw-r--r--include/uapi/linux/cec.h27
-rw-r--r--include/uapi/linux/cfm_bridge.h64
-rw-r--r--include/uapi/linux/cifs/cifs_mount.h1
-rw-r--r--include/uapi/linux/cifs/cifs_netlink.h63
-rw-r--r--include/uapi/linux/close_range.h12
-rw-r--r--include/uapi/linux/comedi.h (renamed from drivers/staging/comedi/comedi.h)6
-rw-r--r--include/uapi/linux/connector.h2
-rw-r--r--include/uapi/linux/const.h5
-rw-r--r--include/uapi/linux/coresight-stm.h7
-rw-r--r--include/uapi/linux/counter.h164
-rw-r--r--include/uapi/linux/cxl_mem.h184
-rw-r--r--include/uapi/linux/cyclades.h497
-rw-r--r--include/uapi/linux/cycx_cfm.h2
-rw-r--r--include/uapi/linux/devlink.h209
-rw-r--r--include/uapi/linux/dlm.h1
-rw-r--r--include/uapi/linux/dlm_device.h4
-rw-r--r--include/uapi/linux/dm-ioctl.h32
-rw-r--r--include/uapi/linux/dm-log-userspace.h2
-rw-r--r--include/uapi/linux/dma-buf.h140
-rw-r--r--include/uapi/linux/dn.h149
-rw-r--r--include/uapi/linux/dqblk_xfs.h21
-rw-r--r--include/uapi/linux/dw100.h14
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/elf.h33
-rw-r--r--include/uapi/linux/elfcore.h101
-rw-r--r--include/uapi/linux/errqueue.h16
-rw-r--r--include/uapi/linux/ethtool.h310
-rw-r--r--include/uapi/linux/ethtool_netlink.h655
-rw-r--r--include/uapi/linux/f2fs.h98
-rw-r--r--include/uapi/linux/fanotify.h63
-rw-r--r--include/uapi/linux/fb.h3
-rw-r--r--include/uapi/linux/fcntl.h10
-rw-r--r--include/uapi/linux/fd.h72
-rw-r--r--include/uapi/linux/fdreg.h30
-rw-r--r--include/uapi/linux/fiemap.h8
-rw-r--r--include/uapi/linux/firewire-cdev.h16
-rw-r--r--include/uapi/linux/fpga-dfl.h82
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--include/uapi/linux/fscrypt.h14
-rw-r--r--include/uapi/linux/fsi.h14
-rw-r--r--include/uapi/linux/fsl_mc.h34
-rw-r--r--include/uapi/linux/fsmap.h2
-rw-r--r--include/uapi/linux/fsverity.h63
-rw-r--r--include/uapi/linux/fuse.h165
-rw-r--r--include/uapi/linux/futex.h27
-rw-r--r--include/uapi/linux/genetlink.h14
-rw-r--r--include/uapi/linux/gfs2_ondisk.h11
-rw-r--r--include/uapi/linux/gpio.h399
-rw-r--r--include/uapi/linux/gtp.h3
-rw-r--r--include/uapi/linux/hid.h26
-rw-r--r--include/uapi/linux/hidraw.h6
-rw-r--r--include/uapi/linux/hsr_netlink.h2
-rw-r--r--include/uapi/linux/hyperv.h19
-rw-r--r--include/uapi/linux/i2c-dev.h25
-rw-r--r--include/uapi/linux/i2c.h128
-rw-r--r--include/uapi/linux/icmp.h63
-rw-r--r--include/uapi/linux/icmpv6.h5
-rw-r--r--include/uapi/linux/idxd.h166
-rw-r--r--include/uapi/linux/if.h1
-rw-r--r--include/uapi/linux/if_addr.h9
-rw-r--r--include/uapi/linux/if_alg.h19
-rw-r--r--include/uapi/linux/if_arcnet.h6
-rw-r--r--include/uapi/linux/if_arp.h1
-rw-r--r--include/uapi/linux/if_bonding.h12
-rw-r--r--include/uapi/linux/if_bridge.h447
-rw-r--r--include/uapi/linux/if_ether.h12
-rw-r--r--include/uapi/linux/if_fddi.h2
-rw-r--r--include/uapi/linux/if_frad.h123
-rw-r--r--include/uapi/linux/if_link.h416
-rw-r--r--include/uapi/linux/if_macsec.h10
-rw-r--r--include/uapi/linux/if_packet.h12
-rw-r--r--include/uapi/linux/if_pppol2tp.h2
-rw-r--r--include/uapi/linux/if_pppox.h4
-rw-r--r--include/uapi/linux/if_tun.h4
-rw-r--r--include/uapi/linux/if_tunnel.h4
-rw-r--r--include/uapi/linux/if_x25.h2
-rw-r--r--include/uapi/linux/if_xdp.h5
-rw-r--r--include/uapi/linux/igmp.h6
-rw-r--r--include/uapi/linux/iio/buffer.h10
-rw-r--r--include/uapi/linux/iio/types.h12
-rw-r--r--include/uapi/linux/in.h35
-rw-r--r--include/uapi/linux/in6.h2
-rw-r--r--include/uapi/linux/inet_diag.h29
-rw-r--r--include/uapi/linux/inotify.h2
-rw-r--r--include/uapi/linux/input-event-codes.h39
-rw-r--r--include/uapi/linux/input.h14
-rw-r--r--include/uapi/linux/io_uring.h477
-rw-r--r--include/uapi/linux/ioam6.h133
-rw-r--r--include/uapi/linux/ioam6_genl.h52
-rw-r--r--include/uapi/linux/ioam6_iptunnel.h58
-rw-r--r--include/uapi/linux/iommu.h177
-rw-r--r--include/uapi/linux/ioprio.h52
-rw-r--r--include/uapi/linux/ip.h5
-rw-r--r--include/uapi/linux/ip_vs.h4
-rw-r--r--include/uapi/linux/ipmi.h16
-rw-r--r--include/uapi/linux/ipmi_msgdefs.h2
-rw-r--r--include/uapi/linux/ipv6.h8
-rw-r--r--include/uapi/linux/ipx.h87
-rw-r--r--include/uapi/linux/iso_fs.h4
-rw-r--r--include/uapi/linux/isst_if.h2
-rw-r--r--include/uapi/linux/jffs2.h8
-rw-r--r--include/uapi/linux/kcov.h2
-rw-r--r--include/uapi/linux/kd.h2
-rw-r--r--include/uapi/linux/kernel.h9
-rw-r--r--include/uapi/linux/kexec.h6
-rw-r--r--include/uapi/linux/keyboard.h2
-rw-r--r--include/uapi/linux/keyctl.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h362
-rw-r--r--include/uapi/linux/kfd_sysfs.h108
-rw-r--r--include/uapi/linux/kvm.h628
-rw-r--r--include/uapi/linux/kvm_para.h1
-rw-r--r--include/uapi/linux/l2tp.h10
-rw-r--r--include/uapi/linux/landlock.h165
-rw-r--r--include/uapi/linux/lightnvm.h225
-rw-r--r--include/uapi/linux/lirc.h26
-rw-r--r--include/uapi/linux/loadpin.h22
-rw-r--r--include/uapi/linux/loop.h42
-rw-r--r--include/uapi/linux/lwtunnel.h12
-rw-r--r--include/uapi/linux/magic.h13
-rw-r--r--include/uapi/linux/major.h2
-rw-r--r--include/uapi/linux/map_to_14segment.h241
-rw-r--r--include/uapi/linux/map_to_7segment.h27
-rw-r--r--include/uapi/linux/mctp.h68
-rw-r--r--include/uapi/linux/mdio.h112
-rw-r--r--include/uapi/linux/media-bus-format.h18
-rw-r--r--include/uapi/linux/media.h5
-rw-r--r--include/uapi/linux/mei.h49
-rw-r--r--include/uapi/linux/membarrier.h26
-rw-r--r--include/uapi/linux/mempolicy.h13
-rw-r--r--include/uapi/linux/mic_common.h235
-rw-r--r--include/uapi/linux/mic_ioctl.h77
-rw-r--r--include/uapi/linux/mii.h7
-rw-r--r--include/uapi/linux/minix_fs.h4
-rw-r--r--include/uapi/linux/misc/bcm_vk.h84
-rw-r--r--include/uapi/linux/mman.h6
-rw-r--r--include/uapi/linux/mmc/ioctl.h3
-rw-r--r--include/uapi/linux/module.h1
-rw-r--r--include/uapi/linux/mount.h21
-rw-r--r--include/uapi/linux/mptcp.h243
-rw-r--r--include/uapi/linux/mroute.h5
-rw-r--r--include/uapi/linux/mroute6.h3
-rw-r--r--include/uapi/linux/mrp_bridge.h74
-rw-r--r--include/uapi/linux/n_r3964.h99
-rw-r--r--include/uapi/linux/nbd-netlink.h1
-rw-r--r--include/uapi/linux/ndctl.h18
-rw-r--r--include/uapi/linux/neighbour.h66
-rw-r--r--include/uapi/linux/net_dropmon.h6
-rw-r--r--include/uapi/linux/net_tstamp.h40
-rw-r--r--include/uapi/linux/netfilter.h6
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h10
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h10
-rw-r--r--include/uapi/linux/netfilter/nf_nat.h4
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h96
-rw-r--r--include/uapi/linux/netfilter/nfnetlink.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h14
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cthelper.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_hook.h64
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_log.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h5
-rw-r--r--include/uapi/linux/netfilter/x_tables.h6
-rw-r--r--include/uapi/linux/netfilter/xt_AUDIT.h4
-rw-r--r--include/uapi/linux/netfilter/xt_IDLETIMER.h30
-rw-r--r--include/uapi/linux/netfilter/xt_SECMARK.h6
-rw-r--r--include/uapi/linux/netfilter/xt_connmark.h13
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h14
-rw-r--r--include/uapi/linux/netfilter_arp/arp_tables.h6
-rw-r--r--include/uapi/linux/netfilter_bridge/ebt_among.h2
-rw-r--r--include/uapi/linux/netfilter_decnet.h72
-rw-r--r--include/uapi/linux/netfilter_ipv4/ip_tables.h6
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6_tables.h4
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_LOG.h2
-rw-r--r--include/uapi/linux/netlink.h146
-rw-r--r--include/uapi/linux/nexthop.h50
-rw-r--r--include/uapi/linux/nfc.h6
-rw-r--r--include/uapi/linux/nfs3.h6
-rw-r--r--include/uapi/linux/nfs4.h13
-rw-r--r--include/uapi/linux/nfs_fs.h3
-rw-r--r--include/uapi/linux/nfsacl.h2
-rw-r--r--include/uapi/linux/nfsd/nfsfh.h105
-rw-r--r--include/uapi/linux/nitro_enclaves.h359
-rw-r--r--include/uapi/linux/nl80211-vnd-intel.h106
-rw-r--r--include/uapi/linux/nl80211.h1306
-rw-r--r--include/uapi/linux/nvme_ioctl.h34
-rw-r--r--include/uapi/linux/omap3isp.h21
-rw-r--r--include/uapi/linux/openat2.h4
-rw-r--r--include/uapi/linux/openvswitch.h45
-rw-r--r--include/uapi/linux/pci_regs.h202
-rw-r--r--include/uapi/linux/pcitest.h10
-rw-r--r--include/uapi/linux/perf_event.h309
-rw-r--r--include/uapi/linux/pfkeyv2.h2
-rw-r--r--include/uapi/linux/pfrut.h262
-rw-r--r--include/uapi/linux/pidfd.h12
-rw-r--r--include/uapi/linux/pkt_cls.h90
-rw-r--r--include/uapi/linux/pkt_sched.h47
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/prctl.h46
-rw-r--r--include/uapi/linux/psample.h32
-rw-r--r--include/uapi/linux/psci.h18
-rw-r--r--include/uapi/linux/psp-sev.h2
-rw-r--r--include/uapi/linux/ptp_clock.h38
-rw-r--r--include/uapi/linux/ptrace.h15
-rw-r--r--include/uapi/linux/raid/md_p.h4
-rw-r--r--include/uapi/linux/random.h2
-rw-r--r--include/uapi/linux/raw.h19
-rw-r--r--include/uapi/linux/rds.h4
-rw-r--r--include/uapi/linux/reiserfs_xattr.h2
-rw-r--r--include/uapi/linux/remoteproc_cdev.h37
-rw-r--r--include/uapi/linux/resource.h13
-rw-r--r--include/uapi/linux/rfkill.h106
-rw-r--r--include/uapi/linux/rkisp1-config.h995
-rw-r--r--include/uapi/linux/romfs_fs.h4
-rw-r--r--include/uapi/linux/rpl.h48
-rw-r--r--include/uapi/linux/rpl_iptunnel.h21
-rw-r--r--include/uapi/linux/rpmsg.h23
-rw-r--r--include/uapi/linux/rpmsg_types.h11
-rw-r--r--include/uapi/linux/rseq.h20
-rw-r--r--include/uapi/linux/rtc.h47
-rw-r--r--include/uapi/linux/rtnetlink.h103
-rw-r--r--include/uapi/linux/rxrpc.h2
-rw-r--r--include/uapi/linux/sched.h5
-rw-r--r--include/uapi/linux/sched/types.h2
-rw-r--r--include/uapi/linux/sctp.h25
-rw-r--r--include/uapi/linux/sdla.h117
-rw-r--r--include/uapi/linux/seccomp.h29
-rw-r--r--include/uapi/linux/sed-opal.h13
-rw-r--r--include/uapi/linux/seg6.h2
-rw-r--r--include/uapi/linux/seg6_iptunnel.h25
-rw-r--r--include/uapi/linux/seg6_local.h57
-rw-r--r--include/uapi/linux/serial.h24
-rw-r--r--include/uapi/linux/serial_core.h29
-rw-r--r--include/uapi/linux/serial_reg.h5
-rw-r--r--include/uapi/linux/serio.h10
-rw-r--r--include/uapi/linux/sev-guest.h80
-rw-r--r--include/uapi/linux/smc.h267
-rw-r--r--include/uapi/linux/snmp.h7
-rw-r--r--include/uapi/linux/sock_diag.h26
-rw-r--r--include/uapi/linux/socket.h9
-rw-r--r--include/uapi/linux/soundcard.h2
-rw-r--r--include/uapi/linux/spi/spi.h42
-rw-r--r--include/uapi/linux/spi/spidev.h26
-rw-r--r--include/uapi/linux/stat.h28
-rw-r--r--include/uapi/linux/stddef.h41
-rw-r--r--include/uapi/linux/stm.h2
-rw-r--r--include/uapi/linux/surface_aggregator/cdev.h147
-rw-r--r--include/uapi/linux/surface_aggregator/dtx.h146
-rw-r--r--include/uapi/linux/swab.h6
-rw-r--r--include/uapi/linux/sysctl.h41
-rw-r--r--include/uapi/linux/target_core_user.h27
-rw-r--r--include/uapi/linux/taskstats.h30
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h5
-rw-r--r--include/uapi/linux/tc_act/tc_gate.h47
-rw-r--r--include/uapi/linux/tc_act/tc_mpls.h1
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h15
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h8
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h5
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h9
-rw-r--r--include/uapi/linux/tcp.h31
-rw-r--r--include/uapi/linux/tee.h28
-rw-r--r--include/uapi/linux/termios.h15
-rw-r--r--include/uapi/linux/thermal.h93
-rw-r--r--include/uapi/linux/tipc.h2
-rw-r--r--include/uapi/linux/tipc_config.h28
-rw-r--r--include/uapi/linux/tipc_netlink.h2
-rw-r--r--include/uapi/linux/tls.h79
-rw-r--r--include/uapi/linux/tty.h8
-rw-r--r--include/uapi/linux/tty_flags.h8
-rw-r--r--include/uapi/linux/types.h11
-rw-r--r--include/uapi/linux/ublk_cmd.h233
-rw-r--r--include/uapi/linux/um_timetravel.h128
-rw-r--r--include/uapi/linux/usb/audio.h2
-rw-r--r--include/uapi/linux/usb/cdc.h19
-rw-r--r--include/uapi/linux/usb/ch9.h34
-rw-r--r--include/uapi/linux/usb/raw_gadget.h249
-rw-r--r--include/uapi/linux/usb/tmc.h3
-rw-r--r--include/uapi/linux/usb/video.h3
-rw-r--r--include/uapi/linux/usbdevice_fs.h4
-rw-r--r--include/uapi/linux/usbip.h26
-rw-r--r--include/uapi/linux/userfaultfd.h104
-rw-r--r--include/uapi/linux/uuid.h10
-rw-r--r--include/uapi/linux/uvcvideo.h10
-rw-r--r--include/uapi/linux/v4l2-controls.h2061
-rw-r--r--include/uapi/linux/v4l2-mediabus.h15
-rw-r--r--include/uapi/linux/v4l2-subdev.h38
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h3
-rw-r--r--include/uapi/linux/vboxguest.h28
-rw-r--r--include/uapi/linux/vdpa.h65
-rw-r--r--include/uapi/linux/vduse.h353
-rw-r--r--include/uapi/linux/vfio.h550
-rw-r--r--include/uapi/linux/vfio_ccw.h19
-rw-r--r--include/uapi/linux/vfio_zdev.h85
-rw-r--r--include/uapi/linux/vhost.h70
-rw-r--r--include/uapi/linux/vhost_types.h43
-rw-r--r--include/uapi/linux/videodev2.h203
-rw-r--r--include/uapi/linux/virtio_9p.h6
-rw-r--r--include/uapi/linux/virtio_balloon.h18
-rw-r--r--include/uapi/linux/virtio_blk.h45
-rw-r--r--include/uapi/linux/virtio_bt.h31
-rw-r--r--include/uapi/linux/virtio_config.h23
-rw-r--r--include/uapi/linux/virtio_console.h8
-rw-r--r--include/uapi/linux/virtio_crypto.h106
-rw-r--r--include/uapi/linux/virtio_fs.h5
-rw-r--r--include/uapi/linux/virtio_gpio.h72
-rw-r--r--include/uapi/linux/virtio_gpu.h127
-rw-r--r--include/uapi/linux/virtio_i2c.h47
-rw-r--r--include/uapi/linux/virtio_ids.h68
-rw-r--r--include/uapi/linux/virtio_input.h18
-rw-r--r--include/uapi/linux/virtio_iommu.h8
-rw-r--r--include/uapi/linux/virtio_mem.h214
-rw-r--r--include/uapi/linux/virtio_mmio.h11
-rw-r--r--include/uapi/linux/virtio_net.h144
-rw-r--r--include/uapi/linux/virtio_pci.h13
-rw-r--r--include/uapi/linux/virtio_pcidev.h65
-rw-r--r--include/uapi/linux/virtio_pmem.h4
-rw-r--r--include/uapi/linux/virtio_ring.h64
-rw-r--r--include/uapi/linux/virtio_scmi.h24
-rw-r--r--include/uapi/linux/virtio_scsi.h20
-rw-r--r--include/uapi/linux/virtio_snd.h334
-rw-r--r--include/uapi/linux/virtio_vsock.h10
-rw-r--r--include/uapi/linux/vm_sockets.h39
-rw-r--r--include/uapi/linux/watch_queue.h104
-rw-r--r--include/uapi/linux/wimax.h239
-rw-r--r--include/uapi/linux/wimax/i2400m.h572
-rw-r--r--include/uapi/linux/wireless.h11
-rw-r--r--include/uapi/linux/wwan.h16
-rw-r--r--include/uapi/linux/xattr.h4
-rw-r--r--include/uapi/linux/xdp_diag.h11
-rw-r--r--include/uapi/linux/xfrm.h36
-rw-r--r--include/uapi/misc/fastrpc.h86
-rw-r--r--include/uapi/misc/habanalabs.h1710
-rw-r--r--include/uapi/misc/uacce/hisi_qm.h39
-rw-r--r--include/uapi/misc/uacce/uacce.h38
-rw-r--r--include/uapi/mtd/mtd-abi.h73
-rw-r--r--include/uapi/mtd/ubi-user.h8
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h11
-rw-r--r--include/uapi/rdma/efa-abi.h34
-rw-r--r--include/uapi/rdma/erdma-abi.h49
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h5
-rw-r--r--include/uapi/rdma/hns-abi.h16
-rw-r--r--include/uapi/rdma/i40iw-abi.h107
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h126
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h60
-rw-r--r--include/uapi/rdma/ib_user_mad.h2
-rw-r--r--include/uapi/rdma/ib_user_verbs.h139
-rw-r--r--include/uapi/rdma/irdma-abi.h111
-rw-r--r--include/uapi/rdma/mlx5-abi.h34
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h106
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h36
-rw-r--r--include/uapi/rdma/qedr-abi.h10
-rw-r--r--include/uapi/rdma/rdma_netlink.h30
-rw-r--r--include/uapi/rdma/rdma_user_cm.h17
-rw-r--r--include/uapi/rdma/rdma_user_ioctl.h2
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h4
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h61
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h7
-rw-r--r--include/uapi/scsi/fc/fc_els.h429
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h2
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h582
-rw-r--r--include/uapi/scsi/scsi_netlink_fc.h7
-rw-r--r--include/uapi/sound/asequencer.h16
-rw-r--r--include/uapi/sound/asoc.h31
-rw-r--r--include/uapi/sound/asound.h63
-rw-r--r--include/uapi/sound/asound_fm.h15
-rw-r--r--include/uapi/sound/compress_offload.h23
-rw-r--r--include/uapi/sound/compress_params.h81
-rw-r--r--include/uapi/sound/emu10k1.h16
-rw-r--r--include/uapi/sound/firewire.h156
-rw-r--r--include/uapi/sound/hdsp.h14
-rw-r--r--include/uapi/sound/hdspm.h15
-rw-r--r--include/uapi/sound/intel/avs/tokens.h126
-rw-r--r--include/uapi/sound/sb16_csp.h15
-rw-r--r--include/uapi/sound/sfnt_info.h15
-rw-r--r--include/uapi/sound/skl-tplg-interface.h4
-rw-r--r--include/uapi/sound/snd_ar_tokens.h208
-rw-r--r--include/uapi/sound/snd_sst_tokens.h16
-rw-r--r--include/uapi/sound/sof/abi.h4
-rw-r--r--include/uapi/sound/sof/header.h32
-rw-r--r--include/uapi/sound/sof/tokens.h63
-rw-r--r--include/uapi/sound/tlv.h11
-rw-r--r--include/uapi/sound/usb_stream.h16
-rw-r--r--include/uapi/xen/gntdev.h10
-rw-r--r--include/ufs/ufs.h (renamed from drivers/scsi/ufs/ufs.h)219
-rw-r--r--include/ufs/ufs_quirks.h (renamed from drivers/scsi/ufs/ufs_quirks.h)39
-rw-r--r--include/ufs/ufshcd.h1241
-rw-r--r--include/ufs/ufshci.h (renamed from drivers/scsi/ufs/ufshci.h)155
-rw-r--r--include/ufs/unipro.h (renamed from drivers/scsi/ufs/unipro.h)141
-rw-r--r--include/vdso/bits.h9
-rw-r--r--include/vdso/clocksource.h22
-rw-r--r--include/vdso/const.h10
-rw-r--r--include/vdso/datapage.h50
-rw-r--r--include/vdso/jiffies.h11
-rw-r--r--include/vdso/ktime.h16
-rw-r--r--include/vdso/limits.h19
-rw-r--r--include/vdso/math64.h24
-rw-r--r--include/vdso/processor.h14
-rw-r--r--include/vdso/time.h12
-rw-r--r--include/vdso/time32.h17
-rw-r--r--include/vdso/time64.h15
-rw-r--r--include/vdso/vsyscall.h3
-rw-r--r--include/video/imx-ipu-v3.h5
-rw-r--r--include/video/mbxfb.h99
-rw-r--r--include/video/mmp_disp.h2
-rw-r--r--include/video/of_display_timing.h2
-rw-r--r--include/video/radeon.h2
-rw-r--r--include/video/samsung_fimd.h6
-rw-r--r--include/video/sstfb.h4
-rw-r--r--include/video/vga.h20
-rw-r--r--include/xen/acpi.h35
-rw-r--r--include/xen/arm/hypercall.h15
-rw-r--r--include/xen/arm/page-coherent.h20
-rw-r--r--include/xen/arm/page.h15
-rw-r--r--include/xen/arm/swiotlb-xen.h20
-rw-r--r--include/xen/arm/xen-ops.h18
-rw-r--r--include/xen/balloon.h8
-rw-r--r--include/xen/events.h56
-rw-r--r--include/xen/grant_table.h61
-rw-r--r--include/xen/hvm.h4
-rw-r--r--include/xen/interface/callback.h19
-rw-r--r--include/xen/interface/elfnote.h29
-rw-r--r--include/xen/interface/event_channel.h4
-rw-r--r--include/xen/interface/features.h16
-rw-r--r--include/xen/interface/grant_table.h180
-rw-r--r--include/xen/interface/hvm/dm_op.h19
-rw-r--r--include/xen/interface/hvm/hvm_op.h41
-rw-r--r--include/xen/interface/hvm/hvm_vcpu.h29
-rw-r--r--include/xen/interface/hvm/params.h20
-rw-r--r--include/xen/interface/hvm/start_info.h19
-rw-r--r--include/xen/interface/io/9pfs.h19
-rw-r--r--include/xen/interface/io/blkif.h2
-rw-r--r--include/xen/interface/io/console.h2
-rw-r--r--include/xen/interface/io/displif.h110
-rw-r--r--include/xen/interface/io/fbif.h19
-rw-r--r--include/xen/interface/io/kbdif.h19
-rw-r--r--include/xen/interface/io/netif.h39
-rw-r--r--include/xen/interface/io/pciif.h19
-rw-r--r--include/xen/interface/io/protocols.h2
-rw-r--r--include/xen/interface/io/pvcalls.h2
-rw-r--r--include/xen/interface/io/ring.h270
-rw-r--r--include/xen/interface/io/sndif.h19
-rw-r--r--include/xen/interface/io/usbif.h405
-rw-r--r--include/xen/interface/io/vscsiif.h152
-rw-r--r--include/xen/interface/io/xenbus.h12
-rw-r--r--include/xen/interface/io/xs_wire.h39
-rw-r--r--include/xen/interface/memory.h2
-rw-r--r--include/xen/interface/nmi.h2
-rw-r--r--include/xen/interface/physdev.h20
-rw-r--r--include/xen/interface/platform.h19
-rw-r--r--include/xen/interface/sched.h19
-rw-r--r--include/xen/interface/vcpu.h19
-rw-r--r--include/xen/interface/version.h2
-rw-r--r--include/xen/interface/xen-mca.h1
-rw-r--r--include/xen/interface/xen.h26
-rw-r--r--include/xen/interface/xenpmu.h2
-rw-r--r--include/xen/page.h1
-rw-r--r--include/xen/pci.h28
-rw-r--r--include/xen/swiotlb-xen.h10
-rw-r--r--include/xen/xen-ops.h71
-rw-r--r--include/xen/xen.h19
-rw-r--r--include/xen/xenbus.h42
-rw-r--r--include/xen/xenbus_dev.h2
3667 files changed, 261986 insertions, 98187 deletions
diff --git a/include/linux/platform_data/pcmcia-pxa2xx_viper.h b/arch/arm/mach-pxa/viper-pcmcia.h
index a23b58aff9e1..a23b58aff9e1 100644
--- a/include/linux/platform_data/pcmcia-pxa2xx_viper.h
+++ b/arch/arm/mach-pxa/viper-pcmcia.h
diff --git a/include/linux/elevator.h b/block/elevator.h
index 901bda352dcb..3f0593b3bf9d 100644
--- a/include/linux/elevator.h
+++ b/block/elevator.h
@@ -1,17 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_ELEVATOR_H
-#define _LINUX_ELEVATOR_H
+#ifndef _ELEVATOR_H
+#define _ELEVATOR_H
#include <linux/percpu.h>
#include <linux/hashtable.h>
-#ifdef CONFIG_BLOCK
-
struct io_cq;
struct elevator_type;
-#ifdef CONFIG_BLK_DEBUG_FS
struct blk_mq_debugfs_attr;
-#endif
/*
* Return values from elevator merger
@@ -34,12 +30,12 @@ struct elevator_mq_ops {
void (*depth_updated)(struct blk_mq_hw_ctx *);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
- bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
+ bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
void (*requests_merged)(struct request_queue *, struct request *, struct request *);
- void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
- void (*prepare_request)(struct request *, struct bio *bio);
+ void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *);
+ void (*prepare_request)(struct request *);
void (*finish_request)(struct request *);
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
@@ -117,9 +113,11 @@ extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *,
enum elv_merge);
-extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
+extern bool elv_attempt_insert_merge(struct request_queue *, struct request *,
+ struct list_head *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
+void elevator_init_mq(struct request_queue *q);
/*
* io scheduler registration
@@ -160,18 +158,9 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
#define ELEVATOR_INSERT_FLUSH 5
#define ELEVATOR_INSERT_SORT_MERGE 6
-#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
-/*
- * Elevator features.
- */
-
-/* Supports zoned block devices sequential write constraint */
-#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
-
-#endif /* CONFIG_BLOCK */
-#endif
+#endif /* _ELEVATOR_H */
diff --git a/include/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h
index 45fa318c1585..767fe22aed48 100644
--- a/include/drm/ttm/ttm_module.h
+++ b/drivers/gpu/drm/ttm/ttm_module.h
@@ -31,10 +31,13 @@
#ifndef _TTM_MODULE_H_
#define _TTM_MODULE_H_
-#include <linux/kernel.h>
-struct kobject;
-
#define TTM_PFX "[TTM] "
-extern struct kobject *ttm_get_kobj(void);
+
+struct dentry;
+struct ttm_device;
+
+extern struct dentry *ttm_debugfs_root;
+
+void ttm_sys_man_init(struct ttm_device *bdev);
#endif /* _TTM_MODULE_H_ */
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/drivers/power/supply/ab8500-bm.h
index 903e94c189d8..180a016b3662 100644
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ b/drivers/power/supply/ab8500-bm.h
@@ -1,15 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright ST-Ericsson 2012.
- *
- * Author: Arun Murthy <arun.murthy@stericsson.com>
- */
-#ifndef _AB8500_BM_H
-#define _AB8500_BM_H
+#ifndef _AB8500_CHARGER_H_
+#define _AB8500_CHARGER_H_
#include <linux/kernel.h>
-#include <linux/mfd/abx500.h>
/*
* System control 2 register offsets.
@@ -166,13 +160,6 @@
#define BTEMP_HIGH_TH_57_1 0x02
#define BTEMP_HIGH_TH_62 0x03
-/* current is mA */
-#define USB_0P1A 100
-#define USB_0P2A 200
-#define USB_0P3A 300
-#define USB_0P4A 400
-#define USB_0P5A 500
-
#define LOW_BAT_3P1V 0x20
#define LOW_BAT_2P3V 0x00
#define LOW_BAT_RESET 0x01
@@ -209,8 +196,8 @@ enum bup_vch_sel {
#define BATT_OVV_TH_3P7 0x00
#define BATT_OVV_TH_4P75 0x01
-/* A value to indicate over voltage */
-#define BATT_OVV_VALUE 4750
+/* A value to indicate over voltage (microvolts) */
+#define BATT_OVV_VALUE 4750000
/* VBUS OVV constants */
#define VBUS_OVV_SELECT_MASK 0x78
@@ -273,29 +260,6 @@ enum bup_vch_sel {
#define BUS_PP_PRECHG_CURRENT_MASK 0x0E
#define BUS_POWER_PATH_PRECHG_ENA 0x01
-/**
- * struct res_to_temp - defines one point in a temp to res curve. To
- * be used in battery packs that combines the identification resistor with a
- * NTC resistor.
- * @temp: battery pack temperature in Celsius
- * @resist: NTC resistor net total resistance
- */
-struct res_to_temp {
- int temp;
- int resist;
-};
-
-/**
- * struct batres_vs_temp - defines one point in a temp vs battery internal
- * resistance curve.
- * @temp: battery pack temperature in Celsius
- * @resist: battery internal reistance in mOhm
- */
-struct batres_vs_temp {
- int temp;
- int resist;
-};
-
/* Forward declaration */
struct ab8500_fg;
@@ -309,9 +273,9 @@ struct ab8500_fg;
* @init_total_time: Total init time during startup
* @high_curr_time: Time current has to be high to go to recovery
* @accu_charging: FG accumulation time while charging
- * @accu_high_curr: FG accumulation time in high current mode
- * @high_curr_threshold: High current threshold, in mA
- * @lowbat_threshold: Low battery threshold, in mV
+ * @accu_high_curr_ua: FG accumulation time in high current mode
+ * @high_curr_threshold_ua: High current threshold, in uA
+ * @lowbat_threshold_uv: Low battery threshold, in uV
* @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
* Resolution in 50 mV step.
* @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
@@ -325,7 +289,7 @@ struct ab8500_fg;
* @pcut_max_time: Max time threshold
* @pcut_flag_time: Flagtime threshold
* @pcut_max_restart: Max number of restarts
- * @pcut_debunce_time: Sets battery debounce time
+ * @pcut_debounce_time: Sets battery debounce time
*/
struct ab8500_fg_parameters {
int recovery_sleep_timer;
@@ -336,8 +300,8 @@ struct ab8500_fg_parameters {
int high_curr_time;
int accu_charging;
int accu_high_curr;
- int high_curr_threshold;
- int lowbat_threshold;
+ int high_curr_threshold_ua;
+ int lowbat_threshold_uv;
int battok_falling_th_sel0;
int battok_raising_th_sel1;
int user_cap_limit;
@@ -346,21 +310,21 @@ struct ab8500_fg_parameters {
u8 pcut_max_time;
u8 pcut_flag_time;
u8 pcut_max_restart;
- u8 pcut_debunce_time;
+ u8 pcut_debounce_time;
};
/**
* struct ab8500_charger_maximization - struct used by the board config.
* @use_maxi: Enable maximization for this battery type
- * @maxi_chg_curr: Maximum charger current allowed
+ * @maxi_chg_curr_ua: Maximum charger current allowed in microampere
* @maxi_wait_cycles: cycles to wait before setting charger current
- * @charger_curr_step delta between two charger current settings (mA)
+ * @charger_curr_step_ua: delta between two charger current settings (uA)
*/
struct ab8500_maxim_parameters {
bool ena_maxi;
- int chg_curr;
+ int chg_curr_ua;
int wait_cycles;
- int charger_curr_step;
+ int charger_curr_step_ua;
};
/**
@@ -381,96 +345,83 @@ struct ab8500_bm_capacity_levels {
/**
* struct ab8500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max: maximum allowed USB charger voltage in mV
- * @usb_curr_max: maximum allowed USB charger current in mA
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
+ * @usb_volt_max_uv: maximum allowed USB charger voltage in uV
+ * @usb_curr_max_ua: maximum allowed USB charger current in uA
+ * @ac_volt_max_uv: maximum allowed AC charger voltage in uV
+ * @ac_curr_max_ua: maximum allowed AC charger current in uA
*/
struct ab8500_bm_charger_parameters {
- int usb_volt_max;
- int usb_curr_max;
- int ac_volt_max;
- int ac_curr_max;
+ int usb_volt_max_uv;
+ int usb_curr_max_ua;
+ int ac_volt_max_uv;
+ int ac_curr_max_ua;
};
/**
* struct ab8500_bm_data - ab8500 battery management data
- * @temp_under under this temp, charging is stopped
- * @temp_low between this temp and temp_under charging is reduced
- * @temp_high between this temp and temp_over charging is reduced
- * @temp_over over this temp, charging is stopped
+ * @bi battery info from device tree
+ * @temp_now present battery temperature
* @temp_interval_chg temperature measurement interval in s when charging
* @temp_interval_nochg temperature measurement interval in s when not charging
* @main_safety_tmr_h safety timer for main charger
* @usb_safety_tmr_h safety timer for usb charger
* @bkup_bat_v voltage which we charge the backup battery with
* @bkup_bat_i current which we charge the backup battery with
- * @no_maintenance indicates that maintenance charging is disabled
* @capacity_scaling indicates whether capacity scaling is to be used
- * @adc_therm placement of thermistor, batctrl or battemp adc
* @chg_unknown_bat flag to enable charging of unknown batteries
* @enable_overshoot flag to enable VBAT overshoot control
+ * @auto_trig flag to enable auto adc trigger
* @fg_res resistance of FG resistor in 0.1mOhm
- * @n_btypes number of elements in array bat_type
- * @batt_id index of the identified battery in array bat_type
* @interval_charging charge alg cycle period time when charging (sec)
* @interval_not_charging charge alg cycle period time when not charging (sec)
* @temp_hysteresis temperature hysteresis
- * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
- * @maxi: maximization parameters
+ * @maxi maximization parameters
* @cap_levels capacity in percent for the different capacity levels
- * @bat_type table of supported battery types
* @chg_params charger parameters
* @fg_params fuel gauge parameters
*/
struct ab8500_bm_data {
- int temp_under;
- int temp_low;
- int temp_high;
- int temp_over;
+ struct power_supply_battery_info *bi;
+ int temp_now;
int temp_interval_chg;
int temp_interval_nochg;
int main_safety_tmr_h;
int usb_safety_tmr_h;
int bkup_bat_v;
int bkup_bat_i;
- bool no_maintenance;
bool capacity_scaling;
bool chg_unknown_bat;
bool enable_overshoot;
- enum abx500_adc_therm adc_therm;
+ bool auto_trig;
int fg_res;
- int n_btypes;
- int batt_id;
int interval_charging;
int interval_not_charging;
int temp_hysteresis;
- int gnd_lift_resistance;
const struct ab8500_maxim_parameters *maxi;
const struct ab8500_bm_capacity_levels *cap_levels;
const struct ab8500_bm_charger_parameters *chg_params;
const struct ab8500_fg_parameters *fg_params;
};
-struct ab8500_btemp;
-struct ab8500_gpadc;
+/* Forward declaration */
struct ab8500_fg;
-#ifdef CONFIG_AB8500_BM
-extern struct abx500_bm_data ab8500_bm_data;
+extern struct ab8500_bm_data ab8500_bm_data;
void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
-struct ab8500_btemp *ab8500_btemp_get(void);
-int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
-int ab8500_btemp_get_temp(struct ab8500_btemp *btemp);
struct ab8500_fg *ab8500_fg_get(void);
int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
+int ab8500_bm_of_probe(struct power_supply *psy,
+ struct ab8500_bm_data *bm);
+void ab8500_bm_of_remove(struct power_supply *psy,
+ struct ab8500_bm_data *bm);
+
+extern struct platform_driver ab8500_fg_driver;
+extern struct platform_driver ab8500_btemp_driver;
+extern struct platform_driver ab8500_chargalg_driver;
-#else
-static struct abx500_bm_data ab8500_bm_data;
-#endif
-#endif /* _AB8500_BM_H */
+#endif /* _AB8500_CHARGER_H_ */
diff --git a/include/media/drv-intf/saa7146.h b/drivers/staging/media/deprecated/saa7146/common/saa7146.h
index 71ce63c99cb4..71ce63c99cb4 100644
--- a/include/media/drv-intf/saa7146.h
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146.h
diff --git a/include/media/drv-intf/saa7146_vv.h b/drivers/staging/media/deprecated/saa7146/common/saa7146_vv.h
index 635805fb35e8..d7bd916fe3ad 100644
--- a/include/media/drv-intf/saa7146_vv.h
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146_vv.h
@@ -5,8 +5,8 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fh.h>
-#include <media/drv-intf/saa7146.h>
#include <media/videobuf-dma-sg.h>
+#include "saa7146.h"
#define MAX_SAA7146_CAPTURE_BUFFERS 32 /* arbitrary */
#define BUFFER_TIMEOUT (HZ/2) /* 0.5 seconds */
diff --git a/include/media/davinci/dm355_ccdc.h b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h
index 1f3d00aa46d1..1f3d00aa46d1 100644
--- a/include/media/davinci/dm355_ccdc.h
+++ b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h
diff --git a/include/media/davinci/dm644x_ccdc.h b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h
index c20dba3d76d6..c20dba3d76d6 100644
--- a/include/media/davinci/dm644x_ccdc.h
+++ b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h
diff --git a/include/media/davinci/isif.h b/drivers/staging/media/deprecated/vpfe_capture/isif.h
index e66589c4022d..8369acd26e7e 100644
--- a/include/media/davinci/isif.h
+++ b/drivers/staging/media/deprecated/vpfe_capture/isif.h
@@ -177,7 +177,7 @@ struct isif_black_clamp {
* 1 - clamp value calculated separately for all colors
*/
__u8 bc_mode_color;
- /* Vrtical start position for bc subtraction */
+ /* Vertical start position for bc subtraction */
__u16 vert_start_sub;
/* Black clamp for horizontal direction */
struct isif_horz_bclamp horz;
@@ -193,7 +193,7 @@ struct isif_color_space_conv {
/* Enable color space conversion */
__u8 en;
/*
- * csc coeffient table. S8Q5, M00 at index 0, M01 at index 1, and
+ * csc coefficient table. S8Q5, M00 at index 0, M01 at index 1, and
* so forth
*/
struct isif_float_8 coeff[ISIF_CSC_NUM_COEFF];
@@ -340,7 +340,7 @@ struct isif_data_formatter {
};
struct isif_df_csc {
- /* Color Space Conversion confguration, 0 - csc, 1 - df */
+ /* Color Space Conversion configuration, 0 - csc, 1 - df */
__u8 df_or_csc;
/* csc configuration valid if df_or_csc is 0 */
struct isif_color_space_conv csc;
@@ -406,7 +406,7 @@ struct isif_config_params_raw {
struct isif_linearize linearize;
/* Data formatter or CSC */
struct isif_df_csc df_csc;
- /* Defect Pixel Correction (DFC) confguration */
+ /* Defect Pixel Correction (DFC) configuration */
struct isif_dfc dfc;
/* Black/Digital Clamp configuration */
struct isif_black_clamp bclamp;
diff --git a/include/linux/vme.h b/drivers/staging/vme_user/vme.h
index 7e82bf500f01..b204a9b4be1b 100644
--- a/include/linux/vme.h
+++ b/drivers/staging/vme_user/vme.h
@@ -122,7 +122,7 @@ struct vme_driver {
const char *name;
int (*match)(struct vme_dev *);
int (*probe)(struct vme_dev *);
- int (*remove)(struct vme_dev *);
+ void (*remove)(struct vme_dev *);
struct device_driver driver;
struct list_head devices;
};
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 531c1e9a7d10..8cbfcbca7b7e 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -3,7 +3,7 @@
*
* Name: acbuffer.h - Support for buffers returned by ACPI predefined names
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -207,4 +207,14 @@ struct acpi_pld_info {
#define ACPI_PLD_GET_HORIZ_OFFSET(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK)
#define ACPI_PLD_SET_HORIZ_OFFSET(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 128+16=144, Len 16 */
+/* Panel position defined in _PLD section of ACPI Specification 6.3 */
+
+#define ACPI_PLD_PANEL_TOP 0
+#define ACPI_PLD_PANEL_BOTTOM 1
+#define ACPI_PLD_PANEL_LEFT 2
+#define ACPI_PLD_PANEL_RIGHT 3
+#define ACPI_PLD_PANEL_FRONT 4
+#define ACPI_PLD_PANEL_BACK 5
+#define ACPI_PLD_PANEL_UNKNOWN 6
+
#endif /* ACBUFFER_H */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 5940a3c68a96..c3ae3ea88e17 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -3,7 +3,7 @@
*
* Name: acconfig.h - Global configuration constants
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -121,7 +121,7 @@
*
*****************************************************************************/
-/* Method info (in WALK_STATE), containing local variables and argumetns */
+/* Method info (in WALK_STATE), containing local variables and arguments */
#define ACPI_METHOD_NUM_LOCALS 8
#define ACPI_METHOD_MAX_LOCAL 7
@@ -188,6 +188,8 @@
#define ACPI_MAX_GSBUS_DATA_SIZE 255
#define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE
+#define ACPI_PRM_INPUT_BUFFER_SIZE 26
+
/* _sx_d and _sx_w control methods */
#define ACPI_NUM_sx_d_METHODS 4
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 436cd1411c3a..28943c900be7 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -3,7 +3,7 @@
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -40,12 +40,12 @@
struct acpi_exception_info {
char *name;
-#ifdef ACPI_HELP_APP
+#if defined (ACPI_HELP_APP) || defined (ACPI_ASL_COMPILER)
char *description;
#endif
};
-#ifdef ACPI_HELP_APP
+#if defined (ACPI_HELP_APP) || defined (ACPI_ASL_COMPILER)
#define EXCEP_TXT(name,description) {name, description}
#else
#define EXCEP_TXT(name,description) {name}
@@ -59,11 +59,11 @@ struct acpi_exception_info {
#define AE_OK (acpi_status) 0x0000
-#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL)
-#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML)
-#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER)
-#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES)
-#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL)
+#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL)
+#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML)
+#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER)
+#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES)
+#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL)
/*
* Environmental exceptions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 8922edb32730..6f22e92b1744 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -3,7 +3,7 @@
*
* Name: acnames.h - Global names and strings
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -20,6 +20,7 @@
#define METHOD_NAME__CLS "_CLS"
#define METHOD_NAME__CRS "_CRS"
#define METHOD_NAME__DDN "_DDN"
+#define METHOD_NAME__DIS "_DIS"
#define METHOD_NAME__DMA "_DMA"
#define METHOD_NAME__HID "_HID"
#define METHOD_NAME__INI "_INI"
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index c5d900c0ecda..73781aae2119 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -3,7 +3,7 @@
*
* Name: acoutput.h -- debug output
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -362,7 +362,7 @@
*
* A less-safe version of the macros is provided for optional use if the
* compiler uses excessive CPU stack (for example, this may happen in the
- * debug case if code optimzation is disabled.)
+ * debug case if code optimization is disabled.)
*/
/* Exit trace helper macro */
@@ -415,7 +415,7 @@
/* Conditional execution */
#define ACPI_DEBUG_EXEC(a) a
-#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
+#define ACPI_DEBUG_ONLY_MEMBERS(a) a
#define _VERBOSE_STRUCTURES
/* Various object display routines for debug */
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index e3e8051d4812..416e59bcf149 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -3,7 +3,7 @@
*
* Name: acpi.h - Master public include file used to interface to ACPICA
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 0c23fd0548d1..c09d72986968 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -44,6 +44,7 @@ acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
u64 arg);
acpi_status acpi_evaluate_ej0(acpi_handle handle);
acpi_status acpi_evaluate_lck(acpi_handle handle, int lock);
+acpi_status acpi_evaluate_reg(acpi_handle handle, u8 space_id, u32 function);
bool acpi_ata_match(acpi_handle handle);
bool acpi_bay_match(acpi_handle handle);
bool acpi_dock_match(acpi_handle handle);
@@ -77,10 +78,11 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
+bool acpi_reduced_hardware(void);
#ifdef CONFIG_ACPI
-#include <linux/proc_fs.h>
+struct proc_dir_entry;
#define ACPI_BUS_FILE_ROOT "acpi"
extern struct proc_dir_entry *acpi_root_dir;
@@ -200,7 +202,8 @@ struct acpi_device_flags {
u32 coherent_dma:1;
u32 cca_seen:1;
u32 enumeration_by_parent:1;
- u32 reserved:19;
+ u32 honor_deps:1;
+ u32 reserved:18;
};
/* File System */
@@ -232,6 +235,7 @@ struct acpi_pnp_type {
struct acpi_device_pnp {
acpi_bus_id bus_id; /* Object name */
+ int instance_no; /* Instance number of this object */
struct acpi_pnp_type type; /* ID type */
acpi_bus_address bus_address; /* _ADR */
char *unique_id; /* _UID */
@@ -275,6 +279,14 @@ struct acpi_device_power {
int state; /* Current state */
struct acpi_device_power_flags flags;
struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */
+ u8 state_for_enumeration; /* Deepest power state for enumeration */
+};
+
+struct acpi_dep_data {
+ struct list_head node;
+ acpi_handle supplier;
+ acpi_handle consumer;
+ bool honor_dep;
};
/* Performance Management */
@@ -332,8 +344,9 @@ struct acpi_device_physical_node {
struct acpi_device_properties {
const guid_t *guid;
- const union acpi_object *properties;
+ union acpi_object *properties;
struct list_head list;
+ void **bufs;
};
/* ACPI Device Specific Data (_DSD) */
@@ -348,12 +361,10 @@ struct acpi_gpio_mapping;
/* Device */
struct acpi_device {
+ u32 pld_crc;
int device_type;
acpi_handle handle; /* no handle for fixed hardware */
struct fwnode_handle fwnode;
- struct acpi_device *parent;
- struct list_head children;
- struct list_head node;
struct list_head wakeup_list;
struct list_head del_list;
struct acpi_device_status status;
@@ -366,7 +377,6 @@ struct acpi_device {
struct acpi_device_data data;
struct acpi_scan_handler *handler;
struct acpi_hotplug_context *hp;
- struct acpi_driver *driver;
const struct acpi_gpio_mapping *driver_gpios;
void *driver_data;
struct device dev;
@@ -447,6 +457,14 @@ static inline void *acpi_driver_data(struct acpi_device *d)
#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
+static inline struct acpi_device *acpi_dev_parent(struct acpi_device *adev)
+{
+ if (adev->dev.parent)
+ return to_acpi_device(adev->dev.parent);
+
+ return NULL;
+}
+
static inline void acpi_set_device_status(struct acpi_device *adev, u32 sta)
{
*((u32 *)&adev->status) = sta;
@@ -467,6 +485,13 @@ void acpi_initialize_hp_context(struct acpi_device *adev,
/* acpi_device.dev.bus == &acpi_bus_type */
extern struct bus_type acpi_bus_type;
+int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data);
+int acpi_dev_for_each_child(struct acpi_device *adev,
+ int (*fn)(struct acpi_device *, void *), void *data);
+int acpi_dev_for_each_child_reverse(struct acpi_device *adev,
+ int (*fn)(struct acpi_device *, void *),
+ void *data);
+
/*
* Events
* ------
@@ -494,9 +519,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
* External Functions
*/
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
-struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
-void acpi_bus_put_acpi_device(struct acpi_device *adev);
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta);
int acpi_bus_get_status(struct acpi_device *device);
@@ -506,9 +528,11 @@ const char *acpi_power_state_string(int state);
int acpi_device_set_power(struct acpi_device *device, int state);
int acpi_bus_init_power(struct acpi_device *device);
int acpi_device_fix_up_power(struct acpi_device *device);
+void acpi_device_fix_up_power_extended(struct acpi_device *adev);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
int acpi_device_update_power(struct acpi_device *device, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
+void acpi_dev_power_up_children_with_adr(struct acpi_device *adev);
int acpi_device_power_add_dependent(struct acpi_device *adev,
struct device *dev);
void acpi_device_power_remove_dependent(struct acpi_device *adev,
@@ -563,34 +587,50 @@ struct acpi_bus_type {
bool (*match)(struct device *dev);
struct acpi_device * (*find_companion)(struct device *);
void (*setup)(struct device *);
- void (*cleanup)(struct device *);
};
int register_acpi_bus_type(struct acpi_bus_type *);
int unregister_acpi_bus_type(struct acpi_bus_type *);
int acpi_bind_one(struct device *dev, struct acpi_device *adev);
int acpi_unbind_one(struct device *dev);
+enum acpi_bridge_type {
+ ACPI_BRIDGE_TYPE_PCIE = 1,
+ ACPI_BRIDGE_TYPE_CXL,
+};
+
struct acpi_pci_root {
struct acpi_device * device;
struct pci_bus *bus;
u16 segment;
+ int bridge_type;
struct resource secondary; /* downstream bus range */
- u32 osc_support_set; /* _OSC state of support bits */
- u32 osc_control_set; /* _OSC state of control bits */
+ u32 osc_support_set; /* _OSC state of support bits */
+ u32 osc_control_set; /* _OSC state of control bits */
+ u32 osc_ext_support_set; /* _OSC state of extended support bits */
+ u32 osc_ext_control_set; /* _OSC state of extended control bits */
phys_addr_t mcfg_addr;
};
/* helper */
-bool acpi_dma_supported(struct acpi_device *adev);
+bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
-int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
- u64 *size);
-int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
-
+int acpi_iommu_fwspec_init(struct device *dev, u32 id,
+ struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops);
+int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map);
+int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
+ const u32 *input_id);
+static inline int acpi_dma_configure(struct device *dev,
+ enum dev_dma_attr attr)
+{
+ return acpi_dma_configure_id(dev, attr, NULL);
+}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children);
+struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev,
+ acpi_bus_address adr);
int acpi_is_root_bridge(acpi_handle);
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
@@ -598,12 +638,34 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
#ifdef CONFIG_X86
-bool acpi_device_always_present(struct acpi_device *adev);
+bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status);
+bool acpi_quirk_skip_acpi_ac_and_battery(void);
+#else
+static inline bool acpi_device_override_status(struct acpi_device *adev,
+ unsigned long long *status)
+{
+ return false;
+}
+static inline bool acpi_quirk_skip_acpi_ac_and_battery(void)
+{
+ return false;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
+int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
#else
-static inline bool acpi_device_always_present(struct acpi_device *adev)
+static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
{
return false;
}
+static inline int
+acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+ *skip = false;
+ return 0;
+}
#endif
#ifdef CONFIG_PM
@@ -614,7 +676,6 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
bool acpi_pm_device_can_wakeup(struct device *dev);
int acpi_pm_device_sleep_state(struct device *, int *, int);
int acpi_pm_set_device_wakeup(struct device *dev, bool enable);
-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable);
#else
static inline void acpi_pm_wakeup_event(struct device *dev)
{
@@ -645,10 +706,6 @@ static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
{
return -ENODEV;
}
-static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
-{
- return -ENODEV;
-}
#endif
#ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT
@@ -681,13 +738,60 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
}
bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer);
+void acpi_dev_clear_dependencies(struct acpi_device *supplier);
+bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
+struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
+ struct acpi_device *start);
+
+/**
+ * for_each_acpi_consumer_dev - iterate over the consumer ACPI devices for a
+ * given supplier
+ * @supplier: Pointer to the supplier's ACPI device
+ * @consumer: Pointer to &struct acpi_device to hold the consumer, initially NULL
+ */
+#define for_each_acpi_consumer_dev(supplier, consumer) \
+ for (consumer = acpi_dev_get_next_consumer_dev(supplier, NULL); \
+ consumer; \
+ consumer = acpi_dev_get_next_consumer_dev(supplier, consumer))
+
+struct acpi_device *
+acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv);
struct acpi_device *
acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
+/**
+ * for_each_acpi_dev_match - iterate over ACPI devices that matching the criteria
+ * @adev: pointer to the matching ACPI device, NULL at the end of the loop
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * The caller is responsible for invoking acpi_dev_put() on the returned device.
+ */
+#define for_each_acpi_dev_match(adev, hid, uid, hrv) \
+ for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \
+ adev; \
+ adev = acpi_dev_get_next_match_dev(adev, hid, uid, hrv))
+
+static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev)
+{
+ return adev ? to_acpi_device(get_device(&adev->dev)) : NULL;
+}
+
static inline void acpi_dev_put(struct acpi_device *adev)
{
- put_device(&adev->dev);
+ if (adev)
+ put_device(&adev->dev);
+}
+
+struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
+struct acpi_device *acpi_get_acpi_dev(acpi_handle handle);
+
+static inline void acpi_put_acpi_dev(struct acpi_device *adev)
+{
+ acpi_dev_put(adev);
}
#else /* CONFIG_ACPI */
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 5eb175933a5b..8372b0e7fd15 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -12,25 +12,6 @@
#define ACPI_MAX_STRING 80
/*
- * Please update drivers/acpi/debug.c and Documentation/firmware-guide/acpi/debug.rst
- * if you add to this list.
- */
-#define ACPI_BUS_COMPONENT 0x00010000
-#define ACPI_AC_COMPONENT 0x00020000
-#define ACPI_BATTERY_COMPONENT 0x00040000
-#define ACPI_BUTTON_COMPONENT 0x00080000
-#define ACPI_SBS_COMPONENT 0x00100000
-#define ACPI_FAN_COMPONENT 0x00200000
-#define ACPI_PCI_COMPONENT 0x00400000
-#define ACPI_POWER_COMPONENT 0x00800000
-#define ACPI_CONTAINER_COMPONENT 0x01000000
-#define ACPI_SYSTEM_COMPONENT 0x02000000
-#define ACPI_THERMAL_COMPONENT 0x04000000
-#define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000
-#define ACPI_VIDEO_COMPONENT 0x10000000
-#define ACPI_PROCESSOR_COMPONENT 0x20000000
-
-/*
* _HID definitions
* HIDs must conform to ACPI spec(6.1.4)
* Linux specific HIDs do not apply to this and begin with LNX:
@@ -64,14 +45,14 @@
-------------------------------------------------------------------------- */
-/* ACPI PCI Interrupt Link (pci_link.c) */
+/* ACPI PCI Interrupt Link */
int acpi_irq_penalty_init(void);
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name);
int acpi_pci_link_free_irq(acpi_handle handle);
-/* ACPI PCI Device Binding (pci_bind.c) */
+/* ACPI PCI Device Binding */
struct pci_bus;
@@ -94,14 +75,6 @@ void pci_acpi_crs_quirks(void);
static inline void pci_acpi_crs_quirks(void) { }
#endif
-/* --------------------------------------------------------------------------
- Processor
- -------------------------------------------------------------------------- */
-
-#define ACPI_PROCESSOR_LIMIT_NONE 0x00
-#define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01
-#define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02
-
/*--------------------------------------------------------------------------
Dock Station
-------------------------------------------------------------------------- */
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index 12d8bd333fe7..027faa8883aa 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -21,7 +21,7 @@ void __iomem __ref
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
-int acpi_os_map_generic_address(struct acpi_generic_address *addr);
+void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *addr);
void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
#endif
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index fdebcfc6c8df..b5f594754a9e 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -3,7 +3,6 @@
#define __ACPI_NUMA_H
#ifdef CONFIG_ACPI_NUMA
-#include <linux/kernel.h>
#include <linux/numa.h>
/* Proximity bitmap length */
@@ -17,10 +16,30 @@ extern int pxm_to_node(int);
extern int node_to_pxm(int);
extern int acpi_map_pxm_to_node(int);
extern unsigned char acpi_srat_revision;
-extern int acpi_numa __initdata;
+extern void disable_srat(void);
extern void bad_srat(void);
extern int srat_disabled(void);
+#else /* CONFIG_ACPI_NUMA */
+static inline void disable_srat(void)
+{
+}
+static inline int pxm_to_node(int pxm)
+{
+ return 0;
+}
+static inline int node_to_pxm(int node)
+{
+ return 0;
+}
#endif /* CONFIG_ACPI_NUMA */
-#endif /* __ACP_NUMA_H */
+
+#ifdef CONFIG_ACPI_HMAT
+extern void disable_hmat(void);
+#else /* CONFIG_ACPI_HMAT */
+static inline void disable_hmat(void)
+{
+}
+#endif /* CONFIG_ACPI_HMAT */
+#endif /* __ACPI_NUMA_H */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 33bb8c9a089d..52844cc5eeb5 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -5,7 +5,7 @@
* interfaces must be implemented by OSL to interface the
* ACPI components to the host operating system.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 8e8be989c2a6..67c0b9e734b6 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -3,7 +3,7 @@
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20200110
+#define ACPI_CA_VERSION 0x20220331
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -454,9 +454,11 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
* ACPI table load/unload interfaces
*/
ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
- acpi_install_table(acpi_physical_address address,
- u8 physical))
+ acpi_install_table(struct acpi_table_header *table))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+ acpi_install_physical_table(acpi_physical_address
+ address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_load_table(struct acpi_table_header *table,
u32 *table_idx))
@@ -752,7 +754,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
-ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
+ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(u32 gpe_skip_number))
ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index d3521894ce6a..a7fb8ddb3dc6 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -3,7 +3,7 @@
*
* Name: acrestyp.h - Defines, types, and structures for resource descriptors
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -381,7 +381,7 @@ struct acpi_resource_gpio {
#define ACPI_IO_RESTRICT_OUTPUT 2
#define ACPI_IO_RESTRICT_NONE_PRESERVE 3
-/* Common structure for I2C, SPI, and UART serial descriptors */
+/* Common structure for I2C, SPI, UART, CSI2 serial descriptors */
#define ACPI_RESOURCE_SERIAL_COMMON \
u8 revision_id; \
@@ -403,6 +403,7 @@ ACPI_RESOURCE_SERIAL_COMMON};
#define ACPI_RESOURCE_SERIAL_TYPE_I2C 1
#define ACPI_RESOURCE_SERIAL_TYPE_SPI 2
#define ACPI_RESOURCE_SERIAL_TYPE_UART 3
+#define ACPI_RESOURCE_SERIAL_TYPE_CSI2 4
/* Values for slave_mode field above */
@@ -505,6 +506,11 @@ struct acpi_resource_uart_serialbus {
#define ACPI_UART_CLEAR_TO_SEND (1<<6)
#define ACPI_UART_REQUEST_TO_SEND (1<<7)
+struct acpi_resource_csi2_serialbus {
+ ACPI_RESOURCE_SERIAL_COMMON u8 local_port_instance;
+ u8 phy_type;
+};
+
struct acpi_resource_pin_function {
u8 revision_id;
u8 pin_config;
@@ -634,6 +640,7 @@ union acpi_resource_data {
struct acpi_resource_i2c_serialbus i2c_serial_bus;
struct acpi_resource_spi_serialbus spi_serial_bus;
struct acpi_resource_uart_serialbus uart_serial_bus;
+ struct acpi_resource_csi2_serialbus csi2_serial_bus;
struct acpi_resource_common_serialbus common_serial_bus;
struct acpi_resource_pin_function pin_function;
struct acpi_resource_pin_config pin_config;
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 5007c41f4d54..c6af579f74f4 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -3,7 +3,7 @@
*
* Name: actbl.h - Basic ACPI Table Definitions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 02d06b79e1cd..15c78678c5d3 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -3,7 +3,7 @@
*
* Name: actbl1.h - Additional ACPI table definitions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -24,10 +24,12 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
+#define ACPI_SIG_AEST "AEST" /* Arm Error Source Table */
#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
#define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */
#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */
#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
+#define ACPI_SIG_CEDT "CEDT" /* CXL Early Discovery Table */
#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */
#define ACPI_SIG_CSRT "CSRT" /* Core System Resource Table */
#define ACPI_SIG_DBG2 "DBG2" /* Debug Port table type 2 */
@@ -303,6 +305,92 @@ struct acpi_table_boot {
/*******************************************************************************
*
+ * CEDT - CXL Early Discovery Table
+ * Version 1
+ *
+ * Conforms to the "CXL Early Discovery Table" (CXL 2.0)
+ *
+ ******************************************************************************/
+
+struct acpi_table_cedt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/* CEDT subtable header (Performance Record Structure) */
+
+struct acpi_cedt_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_cedt_type {
+ ACPI_CEDT_TYPE_CHBS = 0,
+ ACPI_CEDT_TYPE_CFMWS = 1,
+ ACPI_CEDT_TYPE_RESERVED = 2,
+};
+
+/* Values for version field above */
+
+#define ACPI_CEDT_CHBS_VERSION_CXL11 (0)
+#define ACPI_CEDT_CHBS_VERSION_CXL20 (1)
+
+/* Values for length field above */
+
+#define ACPI_CEDT_CHBS_LENGTH_CXL11 (0x2000)
+#define ACPI_CEDT_CHBS_LENGTH_CXL20 (0x10000)
+
+/*
+ * CEDT subtables
+ */
+
+/* 0: CXL Host Bridge Structure */
+
+struct acpi_cedt_chbs {
+ struct acpi_cedt_header header;
+ u32 uid;
+ u32 cxl_version;
+ u32 reserved;
+ u64 base;
+ u64 length;
+};
+
+/* 1: CXL Fixed Memory Window Structure */
+
+struct acpi_cedt_cfmws {
+ struct acpi_cedt_header header;
+ u32 reserved1;
+ u64 base_hpa;
+ u64 window_size;
+ u8 interleave_ways;
+ u8 interleave_arithmetic;
+ u16 reserved2;
+ u32 granularity;
+ u16 restrictions;
+ u16 qtg_id;
+ u32 interleave_targets[];
+};
+
+struct acpi_cedt_cfmws_target_element {
+ u32 interleave_target;
+};
+
+/* Values for Interleave Arithmetic field above */
+
+#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0)
+
+/* Values for Restrictions field above */
+
+#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2 (1)
+#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3 (1<<1)
+#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2)
+#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3)
+#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4)
+
+/*******************************************************************************
+ *
* CPEP - Corrected Platform Error Polling table (ACPI 4.0)
* Version 1
*
@@ -399,7 +487,7 @@ struct acpi_csrt_descriptor {
* DBG2 - Debug Port Table 2
* Version 0 (Both main table and subtables)
*
- * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
+ * Conforms to "Microsoft Debug Port Table 2 (DBG2)", September 21, 2020
*
******************************************************************************/
@@ -449,11 +537,24 @@ struct acpi_dbg2_device {
#define ACPI_DBG2_16550_COMPATIBLE 0x0000
#define ACPI_DBG2_16550_SUBSET 0x0001
+#define ACPI_DBG2_MAX311XE_SPI 0x0002
#define ACPI_DBG2_ARM_PL011 0x0003
+#define ACPI_DBG2_MSM8X60 0x0004
+#define ACPI_DBG2_16550_NVIDIA 0x0005
+#define ACPI_DBG2_TI_OMAP 0x0006
+#define ACPI_DBG2_APM88XXXX 0x0008
+#define ACPI_DBG2_MSM8974 0x0009
+#define ACPI_DBG2_SAM5250 0x000A
+#define ACPI_DBG2_INTEL_USIF 0x000B
+#define ACPI_DBG2_IMX6 0x000C
#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D
#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E
#define ACPI_DBG2_ARM_DCC 0x000F
#define ACPI_DBG2_BCM2835 0x0010
+#define ACPI_DBG2_SDM845_1_8432MHZ 0x0011
+#define ACPI_DBG2_16550_WITH_GAS 0x0012
+#define ACPI_DBG2_SDM845_7_372MHZ 0x0013
+#define ACPI_DBG2_INTEL_LPSS 0x0014
#define ACPI_DBG2_1394_STANDARD 0x0000
@@ -514,7 +615,8 @@ enum acpi_dmar_type {
ACPI_DMAR_TYPE_ROOT_ATS = 2,
ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3,
ACPI_DMAR_TYPE_NAMESPACE = 4,
- ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */
+ ACPI_DMAR_TYPE_SATC = 5,
+ ACPI_DMAR_TYPE_RESERVED = 6 /* 6 and greater are reserved */
};
/* DMAR Device Scope structure */
@@ -607,6 +709,14 @@ struct acpi_dmar_andd {
char device_name[1];
};
+/* 5: SOC Integrated Address Translation Cache Reporting Structure */
+
+struct acpi_dmar_satc {
+ struct acpi_dmar_header header;
+ u8 flags;
+ u8 reserved;
+ u16 segment;
+};
/*******************************************************************************
*
* DRTM - Dynamic Root of Trust for Measurement table
@@ -862,7 +972,7 @@ enum acpi_erst_instructions {
/* Command status return values */
enum acpi_erst_command_status {
- ACPI_ERST_SUCESS = 0,
+ ACPI_ERST_SUCCESS = 0,
ACPI_ERST_NO_SPACE = 1,
ACPI_ERST_NOT_AVAILABLE = 2,
ACPI_ERST_FAILURE = 3,
@@ -1436,7 +1546,8 @@ struct acpi_hmat_locality {
struct acpi_hmat_structure header;
u8 flags;
u8 data_type;
- u16 reserved1;
+ u8 min_transfer_size;
+ u8 reserved1;
u32 number_of_initiator_Pds;
u32 number_of_target_Pds;
u32 reserved2;
@@ -1445,15 +1556,18 @@ struct acpi_hmat_locality {
/* Masks for Flags field above */
-#define ACPI_HMAT_MEMORY_HIERARCHY (0x0F)
+#define ACPI_HMAT_MEMORY_HIERARCHY (0x0F) /* Bits 0-3 */
-/* Values for Memory Hierarchy flag */
+/* Values for Memory Hierarchy flags */
#define ACPI_HMAT_MEMORY 0
#define ACPI_HMAT_LAST_LEVEL_CACHE 1
#define ACPI_HMAT_1ST_LEVEL_CACHE 2
#define ACPI_HMAT_2ND_LEVEL_CACHE 3
#define ACPI_HMAT_3RD_LEVEL_CACHE 4
+#define ACPI_HMAT_MINIMUM_XFER_SIZE 0x10 /* Bit 4: ACPI 6.4 */
+#define ACPI_HMAT_NON_SEQUENTIAL_XFERS 0x20 /* Bit 5: ACPI 6.4 */
+
/* Values for data_type field above */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index b818ba60e19d..655102bc6d14 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -3,7 +3,7 @@
*
* Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -24,6 +24,9 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
+#define ACPI_SIG_AGDI "AGDI" /* Arm Generic Diagnostic Dump and Reset Device Interface */
+#define ACPI_SIG_APMT "APMT" /* Arm Performance Monitoring Unit table */
+#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
@@ -33,16 +36,21 @@
#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */
#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
-#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
+#define ACPI_SIG_NHLT "NHLT" /* Non HD Audio Link Table */
#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
+#define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */
#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
+#define ACPI_SIG_PRMT "PRMT" /* Platform Runtime Mechanism Table */
#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
+#define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
+#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
+#define ACPI_SIG_TDEL "TDEL" /* TD Event Log Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -64,10 +72,292 @@
/*******************************************************************************
*
+ * AEST - Arm Error Source Table
+ *
+ * Conforms to: ACPI for the Armv8 RAS Extensions 1.1 Platform Design Document
+ * September 2020.
+ *
+ ******************************************************************************/
+
+struct acpi_table_aest {
+ struct acpi_table_header header;
+ void *node_array[];
+};
+
+/* Common Subtable header - one per Node Structure (Subtable) */
+
+struct acpi_aest_hdr {
+ u8 type;
+ u16 length;
+ u8 reserved;
+ u32 node_specific_offset;
+ u32 node_interface_offset;
+ u32 node_interrupt_offset;
+ u32 node_interrupt_count;
+ u64 timestamp_rate;
+ u64 reserved1;
+ u64 error_injection_rate;
+};
+
+/* Values for Type above */
+
+#define ACPI_AEST_PROCESSOR_ERROR_NODE 0
+#define ACPI_AEST_MEMORY_ERROR_NODE 1
+#define ACPI_AEST_SMMU_ERROR_NODE 2
+#define ACPI_AEST_VENDOR_ERROR_NODE 3
+#define ACPI_AEST_GIC_ERROR_NODE 4
+#define ACPI_AEST_NODE_TYPE_RESERVED 5 /* 5 and above are reserved */
+
+/*
+ * AEST subtables (Error nodes)
+ */
+
+/* 0: Processor Error */
+
+typedef struct acpi_aest_processor {
+ u32 processor_id;
+ u8 resource_type;
+ u8 reserved;
+ u8 flags;
+ u8 revision;
+ u64 processor_affinity;
+
+} acpi_aest_processor;
+
+/* Values for resource_type above, related structs below */
+
+#define ACPI_AEST_CACHE_RESOURCE 0
+#define ACPI_AEST_TLB_RESOURCE 1
+#define ACPI_AEST_GENERIC_RESOURCE 2
+#define ACPI_AEST_RESOURCE_RESERVED 3 /* 3 and above are reserved */
+
+/* 0R: Processor Cache Resource Substructure */
+
+typedef struct acpi_aest_processor_cache {
+ u32 cache_reference;
+ u32 reserved;
+
+} acpi_aest_processor_cache;
+
+/* Values for cache_type above */
+
+#define ACPI_AEST_CACHE_DATA 0
+#define ACPI_AEST_CACHE_INSTRUCTION 1
+#define ACPI_AEST_CACHE_UNIFIED 2
+#define ACPI_AEST_CACHE_RESERVED 3 /* 3 and above are reserved */
+
+/* 1R: Processor TLB Resource Substructure */
+
+typedef struct acpi_aest_processor_tlb {
+ u32 tlb_level;
+ u32 reserved;
+
+} acpi_aest_processor_tlb;
+
+/* 2R: Processor Generic Resource Substructure */
+
+typedef struct acpi_aest_processor_generic {
+ u32 resource;
+
+} acpi_aest_processor_generic;
+
+/* 1: Memory Error */
+
+typedef struct acpi_aest_memory {
+ u32 srat_proximity_domain;
+
+} acpi_aest_memory;
+
+/* 2: Smmu Error */
+
+typedef struct acpi_aest_smmu {
+ u32 iort_node_reference;
+ u32 subcomponent_reference;
+
+} acpi_aest_smmu;
+
+/* 3: Vendor Defined */
+
+typedef struct acpi_aest_vendor {
+ u32 acpi_hid;
+ u32 acpi_uid;
+ u8 vendor_specific_data[16];
+
+} acpi_aest_vendor;
+
+/* 4: Gic Error */
+
+typedef struct acpi_aest_gic {
+ u32 interface_type;
+ u32 instance_id;
+
+} acpi_aest_gic;
+
+/* Values for interface_type above */
+
+#define ACPI_AEST_GIC_CPU 0
+#define ACPI_AEST_GIC_DISTRIBUTOR 1
+#define ACPI_AEST_GIC_REDISTRIBUTOR 2
+#define ACPI_AEST_GIC_ITS 3
+#define ACPI_AEST_GIC_RESERVED 4 /* 4 and above are reserved */
+
+/* Node Interface Structure */
+
+typedef struct acpi_aest_node_interface {
+ u8 type;
+ u8 reserved[3];
+ u32 flags;
+ u64 address;
+ u32 error_record_index;
+ u32 error_record_count;
+ u64 error_record_implemented;
+ u64 error_status_reporting;
+ u64 addressing_mode;
+
+} acpi_aest_node_interface;
+
+/* Values for Type field above */
+
+#define ACPI_AEST_NODE_SYSTEM_REGISTER 0
+#define ACPI_AEST_NODE_MEMORY_MAPPED 1
+#define ACPI_AEST_XFACE_RESERVED 2 /* 2 and above are reserved */
+
+/* Node Interrupt Structure */
+
+typedef struct acpi_aest_node_interrupt {
+ u8 type;
+ u8 reserved[2];
+ u8 flags;
+ u32 gsiv;
+ u8 iort_id;
+ u8 reserved1[3];
+
+} acpi_aest_node_interrupt;
+
+/* Values for Type field above */
+
+#define ACPI_AEST_NODE_FAULT_HANDLING 0
+#define ACPI_AEST_NODE_ERROR_RECOVERY 1
+#define ACPI_AEST_XRUPT_RESERVED 2 /* 2 and above are reserved */
+
+/*******************************************************************************
+ * AGDI - Arm Generic Diagnostic Dump and Reset Device Interface
+ *
+ * Conforms to "ACPI for Arm Components 1.1, Platform Design Document"
+ * ARM DEN0093 v1.1
+ *
+ ******************************************************************************/
+struct acpi_table_agdi {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 flags;
+ u8 reserved[3];
+ u32 sdei_event;
+ u32 gsiv;
+};
+
+/* Mask for Flags field above */
+
+#define ACPI_AGDI_SIGNALING_MODE (1)
+
+/*******************************************************************************
+ *
+ * APMT - ARM Performance Monitoring Unit Table
+ *
+ * Conforms to:
+ * ARM Performance Monitoring Unit Architecture 1.0 Platform Design Document
+ * ARM DEN0117 v1.0 November 25, 2021
+ *
+ ******************************************************************************/
+
+struct acpi_table_apmt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+#define ACPI_APMT_NODE_ID_LENGTH 4
+
+/*
+ * APMT subtables
+ */
+struct acpi_apmt_node {
+ u16 length;
+ u8 flags;
+ u8 type;
+ u32 id;
+ u64 inst_primary;
+ u32 inst_secondary;
+ u64 base_address0;
+ u64 base_address1;
+ u32 ovflw_irq;
+ u32 reserved;
+ u32 ovflw_irq_flags;
+ u32 proc_affinity;
+ u32 impl_id;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_APMT_FLAGS_DUAL_PAGE (1<<0)
+#define ACPI_APMT_FLAGS_AFFINITY (1<<1)
+#define ACPI_APMT_FLAGS_ATOMIC (1<<2)
+
+/* Values for Flags dual page field above */
+
+#define ACPI_APMT_FLAGS_DUAL_PAGE_NSUPP (0<<0)
+#define ACPI_APMT_FLAGS_DUAL_PAGE_SUPP (1<<0)
+
+/* Values for Flags processor affinity field above */
+#define ACPI_APMT_FLAGS_AFFINITY_PROC (0<<1)
+#define ACPI_APMT_FLAGS_AFFINITY_PROC_CONTAINER (1<<1)
+
+/* Values for Flags 64-bit atomic field above */
+#define ACPI_APMT_FLAGS_ATOMIC_NSUPP (0<<2)
+#define ACPI_APMT_FLAGS_ATOMIC_SUPP (1<<2)
+
+/* Values for Type field above */
+
+enum acpi_apmt_node_type {
+ ACPI_APMT_NODE_TYPE_MC = 0x00,
+ ACPI_APMT_NODE_TYPE_SMMU = 0x01,
+ ACPI_APMT_NODE_TYPE_PCIE_ROOT = 0x02,
+ ACPI_APMT_NODE_TYPE_ACPI = 0x03,
+ ACPI_APMT_NODE_TYPE_CACHE = 0x04,
+ ACPI_APMT_NODE_TYPE_COUNT
+};
+
+/* Masks for ovflw_irq_flags field above */
+
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE (1<<0)
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_TYPE (1<<1)
+
+/* Values for ovflw_irq_flags mode field above */
+
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_LEVEL (0<<0)
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_EDGE (1<<0)
+
+/* Values for ovflw_irq_flags type field above */
+
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_TYPE_WIRED (0<<1)
+
+/*******************************************************************************
+ *
+ * BDAT - BIOS Data ACPI Table
+ *
+ * Conforms to "BIOS Data ACPI Table", Interface Specification v4.0 Draft 5
+ * Nov 2020
+ *
+ ******************************************************************************/
+
+struct acpi_table_bdat {
+ struct acpi_table_header header;
+ struct acpi_generic_address gas;
+};
+
+/*******************************************************************************
+ *
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049D, March 2018
+ * Document number: ARM DEN 0049E.d, Feb 2022
*
******************************************************************************/
@@ -85,7 +375,7 @@ struct acpi_iort_node {
u8 type;
u16 length;
u8 revision;
- u32 reserved;
+ u32 identifier;
u32 mapping_count;
u32 mapping_offset;
char node_data[1];
@@ -99,7 +389,8 @@ enum acpi_iort_node_type {
ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
ACPI_IORT_NODE_SMMU = 0x03,
ACPI_IORT_NODE_SMMU_V3 = 0x04,
- ACPI_IORT_NODE_PMCG = 0x05
+ ACPI_IORT_NODE_PMCG = 0x05,
+ ACPI_IORT_NODE_RMR = 0x06,
};
struct acpi_iort_id_mapping {
@@ -163,13 +454,18 @@ struct acpi_iort_root_complex {
u32 ats_attribute;
u32 pci_segment_number;
u8 memory_address_limit; /* Memory address size limit */
- u8 reserved[3]; /* Reserved, must be zero */
+ u16 pasid_capabilities; /* PASID Capabilities */
+ u8 reserved[1]; /* Reserved, must be zero */
};
-/* Values for ats_attribute field above */
+/* Masks for ats_attribute field above */
+
+#define ACPI_IORT_ATS_SUPPORTED (1) /* The root complex ATS support */
+#define ACPI_IORT_PRI_SUPPORTED (1<<1) /* The root complex PRI support */
+#define ACPI_IORT_PASID_FWD_SUPPORTED (1<<2) /* The root complex PASID forward support */
-#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */
-#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */
+/* Masks for pasid_capabilities field above */
+#define ACPI_IORT_PASID_MAX_WIDTH (0x1F) /* Bits 0-4 */
struct acpi_iort_smmu {
u64 base_address; /* SMMU base address */
@@ -240,6 +536,37 @@ struct acpi_iort_pmcg {
u64 page1_base_address;
};
+struct acpi_iort_rmr {
+ u32 flags;
+ u32 rmr_count;
+ u32 rmr_offset;
+};
+
+/* Masks for Flags field above */
+#define ACPI_IORT_RMR_REMAP_PERMITTED (1)
+#define ACPI_IORT_RMR_ACCESS_PRIVILEGE (1<<1)
+
+/*
+ * Macro to access the Access Attributes in flags field above:
+ * Access Attributes is encoded in bits 9:2
+ */
+#define ACPI_IORT_RMR_ACCESS_ATTRIBUTES(flags) (((flags) >> 2) & 0xFF)
+
+/* Values for above Access Attributes */
+
+#define ACPI_IORT_RMR_ATTR_DEVICE_NGNRNE 0x00
+#define ACPI_IORT_RMR_ATTR_DEVICE_NGNRE 0x01
+#define ACPI_IORT_RMR_ATTR_DEVICE_NGRE 0x02
+#define ACPI_IORT_RMR_ATTR_DEVICE_GRE 0x03
+#define ACPI_IORT_RMR_ATTR_NORMAL_NC 0x04
+#define ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB 0x05
+
+struct acpi_iort_rmr_desc {
+ u64 base_address;
+ u64 length;
+ u32 reserved;
+};
+
/*******************************************************************************
*
* IVRS - I/O Virtualization Reporting Structure
@@ -274,7 +601,9 @@ struct acpi_ivrs_header {
/* Values for subtable Type above */
enum acpi_ivrs_type {
- ACPI_IVRS_TYPE_HARDWARE = 0x10,
+ ACPI_IVRS_TYPE_HARDWARE1 = 0x10,
+ ACPI_IVRS_TYPE_HARDWARE2 = 0x11,
+ ACPI_IVRS_TYPE_HARDWARE3 = 0x40,
ACPI_IVRS_TYPE_MEMORY1 = 0x20,
ACPI_IVRS_TYPE_MEMORY2 = 0x21,
ACPI_IVRS_TYPE_MEMORY3 = 0x22
@@ -301,13 +630,26 @@ enum acpi_ivrs_type {
/* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */
-struct acpi_ivrs_hardware {
+struct acpi_ivrs_hardware_10 {
struct acpi_ivrs_header header;
u16 capability_offset; /* Offset for IOMMU control fields */
u64 base_address; /* IOMMU control registers */
u16 pci_segment_group;
u16 info; /* MSI number and unit ID */
- u32 reserved;
+ u32 feature_reporting;
+};
+
+/* 0x11: I/O Virtualization Hardware Definition Block (IVHD) */
+
+struct acpi_ivrs_hardware_11 {
+ struct acpi_ivrs_header header;
+ u16 capability_offset; /* Offset for IOMMU control fields */
+ u64 base_address; /* IOMMU control registers */
+ u16 pci_segment_group;
+ u16 info; /* MSI number and unit ID */
+ u32 attributes;
+ u64 efr_register_image;
+ u64 reserved;
};
/* Masks for Info field above */
@@ -350,7 +692,11 @@ enum acpi_ivrs_device_entry_type {
ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses struct acpi_ivrs_device8a */
ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */
ACPI_IVRS_TYPE_EXT_START = 71, /* Uses struct acpi_ivrs_device8b */
- ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses struct acpi_ivrs_device8c */
+ ACPI_IVRS_TYPE_SPECIAL = 72, /* Uses struct acpi_ivrs_device8c */
+
+ /* Variable-length device entries */
+
+ ACPI_IVRS_TYPE_HID = 240 /* Uses ACPI_IVRS_DEVICE_HID */
};
/* Values for Data field above */
@@ -402,6 +748,22 @@ struct acpi_ivrs_device8c {
#define ACPI_IVHD_IOAPIC 1
#define ACPI_IVHD_HPET 2
+/* Type 240: variable-length device entry */
+
+struct acpi_ivrs_device_hid {
+ struct acpi_ivrs_de_header header;
+ u64 acpi_hid;
+ u64 acpi_cid;
+ u8 uid_type;
+ u8 uid_length;
+};
+
+/* Values for uid_type above */
+
+#define ACPI_IVRS_UID_NOT_PRESENT 0
+#define ACPI_IVRS_UID_IS_INTEGER 1
+#define ACPI_IVRS_UID_IS_STRING 2
+
/* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */
struct acpi_ivrs_memory {
@@ -502,7 +864,9 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
- ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */
+ ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16,
+ ACPI_MADT_TYPE_RESERVED = 17, /* 17 to 0x7F are reserved */
+ ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */
};
/*
@@ -709,6 +1073,35 @@ struct acpi_madt_generic_translator {
u32 reserved2;
};
+/* 16: Multiprocessor wakeup (ACPI 6.4) */
+
+struct acpi_madt_multiproc_wakeup {
+ struct acpi_subtable_header header;
+ u16 mailbox_version;
+ u32 reserved; /* reserved - must be zero */
+ u64 base_address;
+};
+
+#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032
+#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048
+
+struct acpi_madt_multiproc_wakeup_mailbox {
+ u16 command;
+ u16 reserved; /* reserved - must be zero */
+ u32 apic_id;
+ u64 wakeup_vector;
+ u8 reserved_os[ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE]; /* reserved for OS use */
+ u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */
+};
+
+#define ACPI_MP_WAKE_COMMAND_WAKEUP 1
+
+/* 17: OEM data */
+
+struct acpi_madt_oem_data {
+ u8 oem_data[0];
+};
+
/*
* Common flags fields for MADT subtables
*/
@@ -716,6 +1109,7 @@ struct acpi_madt_generic_translator {
/* MADT Local APIC flags */
#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
+#define ACPI_MADT_ONLINE_CAPABLE (2) /* 01: System HW supports enabling processor at runtime */
/* MADT MPS INTI flags (inti_flags) */
@@ -922,29 +1316,6 @@ struct acpi_table_msdm {
/*******************************************************************************
*
- * MTMR - MID Timer Table
- * Version 1
- *
- * Conforms to "Simple Firmware Interface Specification",
- * Draft 0.8.2, Oct 19, 2010
- * NOTE: The ACPI MTMR is equivalent to the SFI MTMR table.
- *
- ******************************************************************************/
-
-struct acpi_table_mtmr {
- struct acpi_table_header header; /* Common ACPI table header */
-};
-
-/* MTMR entry */
-
-struct acpi_mtmr_entry {
- struct acpi_generic_address physical_address;
- u32 frequency;
- u32 irq;
-};
-
-/*******************************************************************************
- *
* NFIT - NVDIMM Interface Table (ACPI 6.0+)
* Version 1
*
@@ -992,12 +1363,14 @@ struct acpi_nfit_system_address {
u64 address;
u64 length;
u64 memory_mapping;
+ u64 location_cookie; /* ACPI 6.4 */
};
/* Flags */
#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */
#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */
+#define ACPI_NFIT_LOCATION_COOKIE_VALID (1<<2) /* 02: SPA location cookie valid (ACPI 6.4) */
/* Range Type GUIDs appear in the include/acuuid.h file */
@@ -1170,6 +1543,260 @@ struct nfit_device_handle {
/*******************************************************************************
*
+ * NHLT - Non HD Audio Link Table
+ *
+ * Conforms to: Intel Smart Sound Technology NHLT Specification
+ * Version 0.8.1, January 2020.
+ *
+ ******************************************************************************/
+
+/* Main table */
+
+struct acpi_table_nhlt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 endpoint_count;
+};
+
+struct acpi_nhlt_endpoint {
+ u32 descriptor_length;
+ u8 link_type;
+ u8 instance_id;
+ u16 vendor_id;
+ u16 device_id;
+ u16 revision_id;
+ u32 subsystem_id;
+ u8 device_type;
+ u8 direction;
+ u8 virtual_bus_id;
+};
+
+/* Types for link_type field above */
+
+#define ACPI_NHLT_RESERVED_HD_AUDIO 0
+#define ACPI_NHLT_RESERVED_DSP 1
+#define ACPI_NHLT_PDM 2
+#define ACPI_NHLT_SSP 3
+#define ACPI_NHLT_RESERVED_SLIMBUS 4
+#define ACPI_NHLT_RESERVED_SOUNDWIRE 5
+#define ACPI_NHLT_TYPE_RESERVED 6 /* 6 and above are reserved */
+
+/* All other values above are reserved */
+
+/* Values for device_id field above */
+
+#define ACPI_NHLT_PDM_DMIC 0xAE20
+#define ACPI_NHLT_BT_SIDEBAND 0xAE30
+#define ACPI_NHLT_I2S_TDM_CODECS 0xAE23
+
+/* Values for device_type field above */
+
+/* SSP Link */
+
+#define ACPI_NHLT_LINK_BT_SIDEBAND 0
+#define ACPI_NHLT_LINK_FM 1
+#define ACPI_NHLT_LINK_MODEM 2
+/* 3 is reserved */
+#define ACPI_NHLT_LINK_SSP_ANALOG_CODEC 4
+
+/* PDM Link */
+
+#define ACPI_NHLT_PDM_ON_CAVS_1P8 0
+#define ACPI_NHLT_PDM_ON_CAVS_1P5 1
+
+/* Values for Direction field above */
+
+#define ACPI_NHLT_DIR_RENDER 0
+#define ACPI_NHLT_DIR_CAPTURE 1
+#define ACPI_NHLT_DIR_RENDER_LOOPBACK 2
+#define ACPI_NHLT_DIR_RENDER_FEEDBACK 3
+#define ACPI_NHLT_DIR_RESERVED 4 /* 4 and above are reserved */
+
+struct acpi_nhlt_device_specific_config {
+ u32 capabilities_size;
+ u8 virtual_slot;
+ u8 config_type;
+};
+
+struct acpi_nhlt_device_specific_config_a {
+ u32 capabilities_size;
+ u8 virtual_slot;
+ u8 config_type;
+ u8 array_type;
+};
+
+/* Values for Config Type above */
+
+#define ACPI_NHLT_CONFIG_TYPE_GENERIC 0x00
+#define ACPI_NHLT_CONFIG_TYPE_MIC_ARRAY 0x01
+#define ACPI_NHLT_CONFIG_TYPE_RENDER_FEEDBACK 0x03
+#define ACPI_NHLT_CONFIG_TYPE_RESERVED 0x04 /* 4 and above are reserved */
+
+struct acpi_nhlt_device_specific_config_b {
+ u32 capabilities_size;
+};
+
+struct acpi_nhlt_device_specific_config_c {
+ u32 capabilities_size;
+ u8 virtual_slot;
+};
+
+struct acpi_nhlt_render_device_specific_config {
+ u32 capabilities_size;
+ u8 virtual_slot;
+};
+
+struct acpi_nhlt_wave_extensible {
+ u16 format_tag;
+ u16 channel_count;
+ u32 samples_per_sec;
+ u32 avg_bytes_per_sec;
+ u16 block_align;
+ u16 bits_per_sample;
+ u16 extra_format_size;
+ u16 valid_bits_per_sample;
+ u32 channel_mask;
+ u8 sub_format_guid[16];
+};
+
+/* Values for channel_mask above */
+
+#define ACPI_NHLT_SPKR_FRONT_LEFT 0x1
+#define ACPI_NHLT_SPKR_FRONT_RIGHT 0x2
+#define ACPI_NHLT_SPKR_FRONT_CENTER 0x4
+#define ACPI_NHLT_SPKR_LOW_FREQ 0x8
+#define ACPI_NHLT_SPKR_BACK_LEFT 0x10
+#define ACPI_NHLT_SPKR_BACK_RIGHT 0x20
+#define ACPI_NHLT_SPKR_FRONT_LEFT_OF_CENTER 0x40
+#define ACPI_NHLT_SPKR_FRONT_RIGHT_OF_CENTER 0x80
+#define ACPI_NHLT_SPKR_BACK_CENTER 0x100
+#define ACPI_NHLT_SPKR_SIDE_LEFT 0x200
+#define ACPI_NHLT_SPKR_SIDE_RIGHT 0x400
+#define ACPI_NHLT_SPKR_TOP_CENTER 0x800
+#define ACPI_NHLT_SPKR_TOP_FRONT_LEFT 0x1000
+#define ACPI_NHLT_SPKR_TOP_FRONT_CENTER 0x2000
+#define ACPI_NHLT_SPKR_TOP_FRONT_RIGHT 0x4000
+#define ACPI_NHLT_SPKR_TOP_BACK_LEFT 0x8000
+#define ACPI_NHLT_SPKR_TOP_BACK_CENTER 0x10000
+#define ACPI_NHLT_SPKR_TOP_BACK_RIGHT 0x20000
+
+struct acpi_nhlt_format_config {
+ struct acpi_nhlt_wave_extensible format;
+ u32 capability_size;
+ u8 capabilities[];
+};
+
+struct acpi_nhlt_formats_config {
+ u8 formats_count;
+};
+
+struct acpi_nhlt_device_specific_hdr {
+ u8 virtual_slot;
+ u8 config_type;
+};
+
+/* Types for config_type above */
+
+#define ACPI_NHLT_GENERIC 0
+#define ACPI_NHLT_MIC 1
+#define ACPI_NHLT_RENDER 3
+
+struct acpi_nhlt_mic_device_specific_config {
+ struct acpi_nhlt_device_specific_hdr device_config;
+ u8 array_type_ext;
+};
+
+/* Values for array_type_ext above */
+
+#define ACPI_NHLT_ARRAY_TYPE_RESERVED 0x09 /* 9 and below are reserved */
+#define ACPI_NHLT_SMALL_LINEAR_2ELEMENT 0x0A
+#define ACPI_NHLT_BIG_LINEAR_2ELEMENT 0x0B
+#define ACPI_NHLT_FIRST_GEOMETRY_LINEAR_4ELEMENT 0x0C
+#define ACPI_NHLT_PLANAR_LSHAPED_4ELEMENT 0x0D
+#define ACPI_NHLT_SECOND_GEOMETRY_LINEAR_4ELEMENT 0x0E
+#define ACPI_NHLT_VENDOR_DEFINED 0x0F
+#define ACPI_NHLT_ARRAY_TYPE_MASK 0x0F
+#define ACPI_NHLT_ARRAY_TYPE_EXT_MASK 0x10
+
+#define ACPI_NHLT_NO_EXTENSION 0x0
+#define ACPI_NHLT_MIC_SNR_SENSITIVITY_EXT (1<<4)
+
+struct acpi_nhlt_vendor_mic_count {
+ u8 microphone_count;
+};
+
+struct acpi_nhlt_vendor_mic_config {
+ u8 type;
+ u8 panel;
+ u16 speaker_position_distance; /* mm */
+ u16 horizontal_offset; /* mm */
+ u16 vertical_offset; /* mm */
+ u8 frequency_low_band; /* 5*Hz */
+ u8 frequency_high_band; /* 500*Hz */
+ u16 direction_angle; /* -180 - + 180 */
+ u16 elevation_angle; /* -180 - + 180 */
+ u16 work_vertical_angle_begin; /* -180 - + 180 with 2 deg step */
+ u16 work_vertical_angle_end; /* -180 - + 180 with 2 deg step */
+ u16 work_horizontal_angle_begin; /* -180 - + 180 with 2 deg step */
+ u16 work_horizontal_angle_end; /* -180 - + 180 with 2 deg step */
+};
+
+/* Values for Type field above */
+
+#define ACPI_NHLT_MIC_OMNIDIRECTIONAL 0
+#define ACPI_NHLT_MIC_SUBCARDIOID 1
+#define ACPI_NHLT_MIC_CARDIOID 2
+#define ACPI_NHLT_MIC_SUPER_CARDIOID 3
+#define ACPI_NHLT_MIC_HYPER_CARDIOID 4
+#define ACPI_NHLT_MIC_8_SHAPED 5
+#define ACPI_NHLT_MIC_RESERVED6 6 /* 6 is reserved */
+#define ACPI_NHLT_MIC_VENDOR_DEFINED 7
+#define ACPI_NHLT_MIC_RESERVED 8 /* 8 and above are reserved */
+
+/* Values for Panel field above */
+
+#define ACPI_NHLT_MIC_POSITION_TOP 0
+#define ACPI_NHLT_MIC_POSITION_BOTTOM 1
+#define ACPI_NHLT_MIC_POSITION_LEFT 2
+#define ACPI_NHLT_MIC_POSITION_RIGHT 3
+#define ACPI_NHLT_MIC_POSITION_FRONT 4
+#define ACPI_NHLT_MIC_POSITION_BACK 5
+#define ACPI_NHLT_MIC_POSITION_RESERVED 6 /* 6 and above are reserved */
+
+struct acpi_nhlt_vendor_mic_device_specific_config {
+ struct acpi_nhlt_mic_device_specific_config mic_array_device_config;
+ u8 number_of_microphones;
+ struct acpi_nhlt_vendor_mic_config mic_config[]; /* Indexed by number_of_microphones */
+};
+
+/* Microphone SNR and Sensitivity extension */
+
+struct acpi_nhlt_mic_snr_sensitivity_extension {
+ u32 SNR;
+ u32 sensitivity;
+};
+
+/* Render device with feedback */
+
+struct acpi_nhlt_render_feedback_device_specific_config {
+ u8 feedback_virtual_slot; /* Render slot in case of capture */
+ u16 feedback_channels; /* Informative only */
+ u16 feedback_valid_bits_per_sample;
+};
+
+/* Non documented structures */
+
+struct acpi_nhlt_device_info_count {
+ u8 structure_count;
+};
+
+struct acpi_nhlt_device_info {
+ u8 device_id[16];
+ u8 device_instance_id;
+ u8 device_port_id;
+};
+
+/*******************************************************************************
+ *
* PCCT - Platform Communications Channel Table (ACPI 5.0)
* Version 2 (ACPI 6.2)
*
@@ -1193,7 +1820,8 @@ enum acpi_pcct_type {
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */
ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, /* ACPI 6.2 */
ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, /* ACPI 6.2 */
- ACPI_PCCT_TYPE_RESERVED = 5 /* 5 and greater are reserved */
+ ACPI_PCCT_TYPE_HW_REG_COMM_SUBSPACE = 5, /* ACPI 6.4 */
+ ACPI_PCCT_TYPE_RESERVED = 6 /* 6 and greater are reserved */
};
/*
@@ -1308,6 +1936,24 @@ struct acpi_pcct_ext_pcc_slave {
u64 error_status_mask;
};
+/* 5: HW Registers based Communications Subspace */
+
+struct acpi_pcct_hw_reg {
+ struct acpi_subtable_header header;
+ u16 version;
+ u64 base_address;
+ u64 length;
+ struct acpi_generic_address doorbell_register;
+ u64 doorbell_preserve;
+ u64 doorbell_write;
+ struct acpi_generic_address cmd_complete_register;
+ u64 cmd_complete_mask;
+ struct acpi_generic_address error_status_register;
+ u64 error_status_mask;
+ u32 nominal_latency;
+ u32 min_turnaround_time;
+};
+
/* Values for doorbell flags above */
#define ACPI_PCCT_INTERRUPT_POLARITY (1)
@@ -1366,6 +2012,66 @@ struct acpi_pdtt_channel {
/*******************************************************************************
*
+ * PHAT - Platform Health Assessment Table (ACPI 6.4)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_phat {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/* Common header for PHAT subtables that follow main table */
+
+struct acpi_phat_header {
+ u16 type;
+ u16 length;
+ u8 revision;
+};
+
+/* Values for Type field above */
+
+#define ACPI_PHAT_TYPE_FW_VERSION_DATA 0
+#define ACPI_PHAT_TYPE_FW_HEALTH_DATA 1
+#define ACPI_PHAT_TYPE_RESERVED 2 /* 0x02-0xFFFF are reserved */
+
+/*
+ * PHAT subtables, correspond to Type in struct acpi_phat_header
+ */
+
+/* 0: Firmware Version Data Record */
+
+struct acpi_phat_version_data {
+ struct acpi_phat_header header;
+ u8 reserved[3];
+ u32 element_count;
+};
+
+struct acpi_phat_version_element {
+ u8 guid[16];
+ u64 version_value;
+ u32 producer_id;
+};
+
+/* 1: Firmware Health Data Record */
+
+struct acpi_phat_health_data {
+ struct acpi_phat_header header;
+ u8 reserved[2];
+ u8 health;
+ u8 device_guid[16];
+ u32 device_specific_offset; /* Zero if no Device-specific data */
+};
+
+/* Values for Health field above */
+
+#define ACPI_PHAT_ERRORS_FOUND 0
+#define ACPI_PHAT_NO_ERRORS 1
+#define ACPI_PHAT_UNKNOWN_ERRORS 2
+#define ACPI_PHAT_ADVISORY 3
+
+/*******************************************************************************
+ *
* PMTT - Platform Memory Topology Table (ACPI 5.0)
* Version 1
*
@@ -1373,7 +2079,11 @@ struct acpi_pdtt_channel {
struct acpi_table_pmtt {
struct acpi_table_header header; /* Common ACPI table header */
- u32 reserved;
+ u32 memory_device_count;
+ /*
+ * Immediately followed by:
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
};
/* Common header for PMTT subtables that follow main table */
@@ -1384,6 +2094,12 @@ struct acpi_pmtt_header {
u16 length;
u16 flags;
u16 reserved2;
+ u32 memory_device_count; /* Zero means no memory device structs follow */
+ /*
+ * Immediately followed by:
+ * u8 type_specific_data[]
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
};
/* Values for Type field above */
@@ -1391,7 +2107,8 @@ struct acpi_pmtt_header {
#define ACPI_PMTT_TYPE_SOCKET 0
#define ACPI_PMTT_TYPE_CONTROLLER 1
#define ACPI_PMTT_TYPE_DIMM 2
-#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */
+#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFE are reserved */
+#define ACPI_PMTT_TYPE_VENDOR 0xFF
/* Values for Flags field above */
@@ -1410,37 +2127,43 @@ struct acpi_pmtt_socket {
u16 socket_id;
u16 reserved;
};
+ /*
+ * Immediately followed by:
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
/* 1: Memory Controller subtable */
struct acpi_pmtt_controller {
struct acpi_pmtt_header header;
- u32 read_latency;
- u32 write_latency;
- u32 read_bandwidth;
- u32 write_bandwidth;
- u16 access_width;
- u16 alignment;
+ u16 controller_id;
u16 reserved;
- u16 domain_count;
-};
-
-/* 1a: Proximity Domain substructure */
-
-struct acpi_pmtt_domain {
- u32 proximity_domain;
};
+ /*
+ * Immediately followed by:
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
/* 2: Physical Component Identifier (DIMM) */
struct acpi_pmtt_physical_component {
struct acpi_pmtt_header header;
- u16 component_id;
- u16 reserved;
- u32 memory_size;
u32 bios_handle;
};
+/* 0xFF: Vendor Specific Data */
+
+struct acpi_pmtt_vendor_specific {
+ struct acpi_pmtt_header header;
+ u8 type_uuid[16];
+ u8 specific[];
+ /*
+ * Immediately followed by:
+ * u8 vendor_specific_data[];
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
+};
+
/*******************************************************************************
*
* PPTT - Processor Properties Topology Table (ACPI 6.2)
@@ -1494,6 +2217,12 @@ struct acpi_pptt_cache {
u16 line_size;
};
+/* 1: Cache Type Structure for PPTT version 3 */
+
+struct acpi_pptt_cache_v1 {
+ u32 cache_id;
+};
+
/* Flags */
#define ACPI_PPTT_SIZE_PROPERTY_VALID (1) /* Physical property valid */
@@ -1503,6 +2232,7 @@ struct acpi_pptt_cache {
#define ACPI_PPTT_CACHE_TYPE_VALID (1<<4) /* Cache type valid */
#define ACPI_PPTT_WRITE_POLICY_VALID (1<<5) /* Write policy valid */
#define ACPI_PPTT_LINE_SIZE_VALID (1<<6) /* Line size valid */
+#define ACPI_PPTT_CACHE_ID_VALID (1<<7) /* Cache ID valid */
/* Masks for Attributes */
@@ -1539,6 +2269,48 @@ struct acpi_pptt_id {
/*******************************************************************************
*
+ * PRMT - Platform Runtime Mechanism Table
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_prmt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+struct acpi_table_prmt_header {
+ u8 platform_guid[16];
+ u32 module_info_offset;
+ u32 module_info_count;
+};
+
+struct acpi_prmt_module_header {
+ u16 revision;
+ u16 length;
+};
+
+struct acpi_prmt_module_info {
+ u16 revision;
+ u16 length;
+ u8 module_guid[16];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 handler_info_count;
+ u32 handler_info_offset;
+ u64 mmio_list_pointer;
+};
+
+struct acpi_prmt_handler_info {
+ u16 revision;
+ u16 length;
+ u8 handler_guid[16];
+ u64 handler_address;
+ u64 static_data_buffer_address;
+ u64 acpi_param_buffer_address;
+};
+
+/*******************************************************************************
+ *
* RASF - RAS Feature Table (ACPI 5.0)
* Version 1
*
@@ -1635,6 +2407,32 @@ enum acpi_rasf_status {
/*******************************************************************************
*
+ * RGRT - Regulatory Graphics Resource Table
+ * Version 1
+ *
+ * Conforms to "ACPI RGRT" available at:
+ * https://microsoft.github.io/mu/dyn/mu_plus/ms_core_pkg/acpi_RGRT/feature_acpi_rgrt/
+ *
+ ******************************************************************************/
+
+struct acpi_table_rgrt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 version;
+ u8 image_type;
+ u8 reserved;
+ u8 image[];
+};
+
+/* image_type values */
+
+enum acpi_rgrt_image_type {
+ ACPI_RGRT_TYPE_RESERVED0 = 0,
+ ACPI_RGRT_IMAGE_TYPE_PNG = 1,
+ ACPI_RGRT_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/*******************************************************************************
+ *
* SBST - Smart Battery Specification Table
* Version 1
*
@@ -1688,6 +2486,7 @@ enum acpi_sdev_type {
/* Values for flags above */
#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1)
+#define ACPI_SDEV_SECURE_COMPONENTS_PRESENT (1<<1)
/*
* SDEV subtables
@@ -1703,6 +2502,46 @@ struct acpi_sdev_namespace {
u16 vendor_data_length;
};
+struct acpi_sdev_secure_component {
+ u16 secure_component_offset;
+ u16 secure_component_length;
+};
+
+/*
+ * SDEV sub-subtables ("Components") for above
+ */
+struct acpi_sdev_component {
+ struct acpi_sdev_header header;
+};
+
+/* Values for sub-subtable type above */
+
+enum acpi_sac_type {
+ ACPI_SDEV_TYPE_ID_COMPONENT = 0,
+ ACPI_SDEV_TYPE_MEM_COMPONENT = 1
+};
+
+struct acpi_sdev_id_component {
+ struct acpi_sdev_header header;
+ u16 hardware_id_offset;
+ u16 hardware_id_length;
+ u16 subsystem_id_offset;
+ u16 subsystem_id_length;
+ u16 hardware_revision;
+ u8 hardware_rev_present;
+ u8 class_code_present;
+ u8 pci_base_class;
+ u8 pci_sub_class;
+ u8 pci_programming_xface;
+};
+
+struct acpi_sdev_mem_component {
+ struct acpi_sdev_header header;
+ u32 reserved;
+ u64 memory_base_address;
+ u64 memory_length;
+};
+
/* 1: PCIe Endpoint Device Based Device Structure */
struct acpi_sdev_pcie {
@@ -1722,6 +2561,53 @@ struct acpi_sdev_pcie_path {
u8 function;
};
+/*******************************************************************************
+ *
+ * SVKL - Storage Volume Key Location Table (ACPI 6.4)
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)".
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_svkl {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 count;
+};
+
+struct acpi_svkl_key {
+ u16 type;
+ u16 format;
+ u32 size;
+ u64 address;
+};
+
+enum acpi_svkl_type {
+ ACPI_SVKL_TYPE_MAIN_STORAGE = 0,
+ ACPI_SVKL_TYPE_RESERVED = 1 /* 1 and greater are reserved */
+};
+
+enum acpi_svkl_format {
+ ACPI_SVKL_FORMAT_RAW_BINARY = 0,
+ ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */
+};
+
+/*******************************************************************************
+ *
+ * TDEL - TD-Event Log
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)".
+ * September 2020
+ *
+ ******************************************************************************/
+
+struct acpi_table_tdel {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved;
+ u64 log_area_minimum_length;
+ u64 log_area_start_address;
+};
+
/* Reset to default packing */
#pragma pack()
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 2bf3baf819bb..7b9571e00cc4 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -3,7 +3,7 @@
*
* Name: actbl3.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -33,13 +33,13 @@
#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */
-#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */
+#define ACPI_SIG_VIOT "VIOT" /* Virtual I/O Translation Table */
#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */
#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */
#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */
#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
-#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Migrations Table */
+#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Mitigations Table */
#define ACPI_SIG_XENV "XENV" /* Xen Environment table */
#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */
@@ -191,7 +191,8 @@ enum acpi_srat_type {
ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */
ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */
- ACPI_SRAT_TYPE_RESERVED = 6 /* 5 and greater are reserved */
+ ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, /* ACPI 6.4 */
+ ACPI_SRAT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
};
/*
@@ -272,7 +273,11 @@ struct acpi_srat_gic_its_affinity {
u32 its_id;
};
-/* 5: Generic Initiator Affinity Structure (ACPI 6.3) */
+/*
+ * Common structure for SRAT subtable types:
+ * 5: ACPI_SRAT_TYPE_GENERIC_AFFINITY
+ * 6: ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY
+ */
struct acpi_srat_generic_affinity {
struct acpi_subtable_header header;
@@ -286,7 +291,8 @@ struct acpi_srat_generic_affinity {
/* Flags for struct acpi_srat_generic_affinity */
-#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */
+#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */
+#define ACPI_SRAT_ARCHITECTURAL_TRANSACTIONS (1<<1) /* ACPI 6.4 */
/*******************************************************************************
*
@@ -415,6 +421,13 @@ struct acpi_table_tpm2 {
/* Platform-specific data follows */
};
+/* Optional trailer for revision 4 holding platform-specific data */
+struct acpi_tpm2_phy {
+ u8 start_method_specific[12];
+ u32 log_area_minimum_length;
+ u64 log_area_start_address;
+};
+
/* Values for start_method above */
#define ACPI_TPM2_NOT_ALLOWED 0
@@ -479,24 +492,68 @@ struct acpi_table_uefi {
/*******************************************************************************
*
- * VRTC - Virtual Real Time Clock Table
+ * VIOT - Virtual I/O Translation Table
* Version 1
*
- * Conforms to "Simple Firmware Interface Specification",
- * Draft 0.8.2, Oct 19, 2010
- * NOTE: The ACPI VRTC is equivalent to The SFI MRTC table.
- *
******************************************************************************/
-struct acpi_table_vrtc {
+struct acpi_table_viot {
struct acpi_table_header header; /* Common ACPI table header */
+ u16 node_count;
+ u16 node_offset;
+ u8 reserved[8];
+};
+
+/* VIOT subtable header */
+
+struct acpi_viot_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
};
-/* VRTC entry */
+/* Values for Type field above */
-struct acpi_vrtc_entry {
- struct acpi_generic_address physical_address;
- u32 irq;
+enum acpi_viot_node_type {
+ ACPI_VIOT_NODE_PCI_RANGE = 0x01,
+ ACPI_VIOT_NODE_MMIO = 0x02,
+ ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI = 0x03,
+ ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO = 0x04,
+ ACPI_VIOT_RESERVED = 0x05
+};
+
+/* VIOT subtables */
+
+struct acpi_viot_pci_range {
+ struct acpi_viot_header header;
+ u32 endpoint_start;
+ u16 segment_start;
+ u16 segment_end;
+ u16 bdf_start;
+ u16 bdf_end;
+ u16 output_node;
+ u8 reserved[6];
+};
+
+struct acpi_viot_mmio {
+ struct acpi_viot_header header;
+ u32 endpoint;
+ u64 base_address;
+ u16 output_node;
+ u8 reserved[6];
+};
+
+struct acpi_viot_virtio_iommu_pci {
+ struct acpi_viot_header header;
+ u16 segment;
+ u16 bdf;
+ u8 reserved[8];
+};
+
+struct acpi_viot_virtio_iommu_mmio {
+ struct acpi_viot_header header;
+ u8 reserved[4];
+ u64 base_address;
};
/*******************************************************************************
@@ -671,12 +728,16 @@ struct acpi_table_wpbt {
u16 arguments_length;
};
+struct acpi_wpbt_unicode {
+ u16 *unicode_string;
+};
+
/*******************************************************************************
*
- * WSMT - Windows SMM Security Migrations Table
+ * WSMT - Windows SMM Security Mitigations Table
* Version 1
*
- * Conforms to "Windows SMM Security Migrations Table",
+ * Conforms to "Windows SMM Security Mitigations Table",
* Version 1.0, April 18, 2016
*
******************************************************************************/
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 4defed58ea33..3491e454b2ab 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -3,7 +3,7 @@
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -507,9 +507,12 @@ typedef u64 acpi_integer;
/* Pointer/Integer type conversions */
#define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (acpi_size) (i))
+#ifndef ACPI_TO_INTEGER
#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0)
+#endif
+#ifndef ACPI_OFFSET
#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0)
-#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
+#endif
#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
/* Optimizations for 4-character (32-bit) acpi_name manipulation */
@@ -536,8 +539,14 @@ typedef u64 acpi_integer;
* Can be used with access_width of struct acpi_generic_address and access_size of
* struct acpi_resource_generic_register.
*/
-#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
-#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
+#define ACPI_ACCESS_BIT_SHIFT 2
+#define ACPI_ACCESS_BYTE_SHIFT -1
+#define ACPI_ACCESS_BIT_MAX (31 - ACPI_ACCESS_BIT_SHIFT)
+#define ACPI_ACCESS_BYTE_MAX (31 - ACPI_ACCESS_BYTE_SHIFT)
+#define ACPI_ACCESS_BIT_DEFAULT (8 - ACPI_ACCESS_BIT_SHIFT)
+#define ACPI_ACCESS_BYTE_DEFAULT (8 - ACPI_ACCESS_BYTE_SHIFT)
+#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BIT_SHIFT))
+#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT))
/*******************************************************************************
*
@@ -815,15 +824,16 @@ typedef u8 acpi_adr_space_type;
#define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8
#define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9
#define ACPI_ADR_SPACE_PLATFORM_COMM (acpi_adr_space_type) 10
+#define ACPI_ADR_SPACE_PLATFORM_RT (acpi_adr_space_type) 11
-#define ACPI_NUM_PREDEFINED_REGIONS 11
+#define ACPI_NUM_PREDEFINED_REGIONS 12
/*
* Special Address Spaces
*
* Note: A Data Table region is a special type of operation region
* that has its own AML opcode. However, internally, the AML
- * interpreter simply creates an operation region with an an address
+ * interpreter simply creates an operation region with an address
* space type of ACPI_ADR_SPACE_DATA_TABLE.
*/
#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 0x7E /* Internal to ACPICA only */
@@ -1097,6 +1107,14 @@ struct acpi_connection_info {
u8 access_length;
};
+/* Special Context data for PCC Opregion (ACPI 6.3) */
+
+struct acpi_pcc_info {
+ u8 subspace_id;
+ u16 length;
+ u8 *internal_buffer;
+};
+
typedef
acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
u32 function,
@@ -1145,7 +1163,7 @@ struct acpi_pnp_device_id {
struct acpi_pnp_device_id_list {
u32 count; /* Number of IDs in Ids array */
u32 list_size; /* Size of list, including ID strings */
- struct acpi_pnp_device_id ids[1]; /* ID array */
+ struct acpi_pnp_device_id ids[]; /* ID array */
};
/*
@@ -1200,12 +1218,22 @@ struct acpi_pci_id {
u16 function;
};
+struct acpi_mem_mapping {
+ acpi_physical_address physical_address;
+ u8 *logical_address;
+ acpi_size length;
+ struct acpi_mem_mapping *next_mm;
+};
+
struct acpi_mem_space_context {
u32 length;
acpi_physical_address address;
- acpi_physical_address mapped_physical_address;
- u8 *mapped_logical_address;
- acpi_size mapped_length;
+ struct acpi_mem_mapping *cur_mm;
+ struct acpi_mem_mapping *first_mm;
+};
+
+struct acpi_data_table_space_context {
+ void *pointer;
};
/*
@@ -1274,9 +1302,17 @@ typedef enum {
#define ACPI_OSI_WIN_10_RS4 0x12
#define ACPI_OSI_WIN_10_RS5 0x13
#define ACPI_OSI_WIN_10_19H1 0x14
+#define ACPI_OSI_WIN_10_20H1 0x15
+#define ACPI_OSI_WIN_11 0x16
/* Definitions of getopt */
#define ACPI_OPT_END -1
+/* Definitions for explicit fallthrough */
+
+#ifndef ACPI_FALLTHROUGH
+#define ACPI_FALLTHROUGH do {} while(0)
+#endif
+
#endif /* __ACTYPES_H__ */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 9dd4689a39cf..8f1e7c489df5 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -3,7 +3,7 @@
*
* Name: acuuid.h - ACPI-related UUID/GUID definitions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -27,6 +27,10 @@
#define UUID_PCI_HOST_BRIDGE "33db4d5b-1ff7-401c-9657-7441c03dd766"
#define UUID_I2C_DEVICE "3cdff6f7-4267-4555-ad05-b30a3d8938de"
#define UUID_POWER_BUTTON "dfbcf3c5-e7a5-44e6-9c1f-29c76f6e059c"
+#define UUID_MEMORY_DEVICE "03b19910-f473-11dd-87af-0800200c9a66"
+#define UUID_GENERIC_BUTTONS_DEVICE "fa6bd625-9ce8-470d-a2c7-b3ca36c4282e"
+#define UUID_NVDIMM_ROOT_DEVICE "2f10e7a4-9e91-11e4-89d3-123b93f75cba"
+#define UUID_CONTROL_METHOD_BATTERY "f18fc78b-0f15-4978-b793-53f833a1d35b"
/* Interfaces */
@@ -35,6 +39,7 @@
/* NVDIMM - NFIT table */
+#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
#define UUID_VOLATILE_MEMORY "7305944f-fdda-44e3-b16c-3f22d252e5d0"
#define UUID_PERSISTENT_MEMORY "66f0d379-b4f3-4074-ac43-0d3318b78cdb"
#define UUID_CONTROL_REGION "92f701f6-13b4-405d-910b-299367e8234c"
@@ -43,6 +48,10 @@
#define UUID_VOLATILE_VIRTUAL_CD "3d5abd30-4175-87ce-6d64-d2ade523c4bb"
#define UUID_PERSISTENT_VIRTUAL_DISK "5cea02c9-4d07-69d3-269f-4496fbe096f9"
#define UUID_PERSISTENT_VIRTUAL_CD "08018188-42cd-bb48-100f-5387d53ded3d"
+#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05"
+#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
+#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e"
+#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80"
/* Processor Properties (ACPI 6.2) */
@@ -56,5 +65,9 @@
#define UUID_BATTERY_THERMAL_LIMIT "4c2067e3-887d-475c-9720-4af1d3ed602e"
#define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500"
#define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301"
+#define UUID_DEVICE_GRAPHS "ab02a46b-74c7-45a2-bd68-f7d344ef2153"
+#define UUID_HIERARCHICAL_DATA_EXTENSION "dbb8e3e6-5886-4ba6-8795-1319f52a966b"
+#define UUID_CORESIGHT_GRAPH "3ecbc8b6-1d0e-4fb3-8107-e627f805c6cd"
+#define UUID_USB4_CAPABILITIES "23a0d13a-26ab-486c-9c5f-0ffa525a575a"
-#endif /* __AUUID_H__ */
+#endif /* __ACUUID_H__ */
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 680f80960c3d..dc60f7db5524 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -27,19 +27,18 @@ extern int hest_disable;
extern int erst_disable;
#ifdef CONFIG_ACPI_APEI_GHES
extern bool ghes_disable;
+void __init acpi_ghes_init(void);
#else
#define ghes_disable 1
+static inline void acpi_ghes_init(void) { }
#endif
#ifdef CONFIG_ACPI_APEI
void __init acpi_hest_init(void);
#else
-static inline void acpi_hest_init(void) { return; }
+static inline void acpi_hest_init(void) { }
#endif
-typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
-int apei_hest_parse(apei_hest_func_t func, void *data);
-
int erst_write(const struct cper_record_header *record);
ssize_t erst_get_record_count(void);
int erst_get_record_id_begin(int *pos);
@@ -47,6 +46,8 @@ int erst_get_record_id_next(int *pos, u64 *record_id);
void erst_get_record_id_end(void);
ssize_t erst_read(u64 record_id, struct cper_record_header *record,
size_t buflen);
+ssize_t erst_read_record(u64 record_id, struct cper_record_header *record,
+ size_t buflen, size_t recordlen, const guid_t *creatorid);
int erst_clear(u64 record_id);
int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data);
diff --git a/include/acpi/battery.h b/include/acpi/battery.h
index 5d8f5d910c82..b8d56b702c7a 100644
--- a/include/acpi/battery.h
+++ b/include/acpi/battery.h
@@ -2,6 +2,8 @@
#ifndef __ACPI_BATTERY_H
#define __ACPI_BATTERY_H
+#include <linux/power_supply.h>
+
#define ACPI_BATTERY_CLASS "battery"
#define ACPI_BATTERY_NOTIFY_STATUS 0x80
diff --git a/include/acpi/button.h b/include/acpi/button.h
index 340da7784cc8..af2fce5d2ee3 100644
--- a/include/acpi/button.h
+++ b/include/acpi/button.h
@@ -2,6 +2,10 @@
#ifndef ACPI_BUTTON_H
#define ACPI_BUTTON_H
+#define ACPI_BUTTON_HID_POWER "PNP0C0C"
+#define ACPI_BUTTON_HID_LID "PNP0C0D"
+#define ACPI_BUTTON_HID_SLEEP "PNP0C0E"
+
#if IS_ENABLED(CONFIG_ACPI_BUTTON)
extern int acpi_lid_open(void);
#else
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index a6a9373ab863..c5614444031f 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -11,12 +11,13 @@
#define _CPPC_ACPI_H
#include <linux/acpi.h>
+#include <linux/cpufreq.h>
#include <linux/types.h>
#include <acpi/pcc.h>
#include <acpi/processor.h>
-/* Support CPPCv2 and CPPCv3 */
+/* CPPCv2 and CPPCv3 support */
#define CPPC_V2_REV 2
#define CPPC_V3_REV 3
#define CPPC_V2_NUM_ENT 21
@@ -39,7 +40,7 @@ struct cpc_reg {
u8 bit_width;
u8 bit_offset;
u8 access_width;
- u64 __iomem address;
+ u64 address;
} __packed;
/*
@@ -124,23 +125,83 @@ struct cppc_perf_fb_ctrs {
/* Per CPU container for runtime CPPC management. */
struct cppc_cpudata {
- int cpu;
+ struct list_head node;
struct cppc_perf_caps perf_caps;
struct cppc_perf_ctrls perf_ctrls;
struct cppc_perf_fb_ctrs perf_fb_ctrs;
- struct cpufreq_policy *cur_policy;
unsigned int shared_type;
cpumask_var_t shared_cpu_map;
};
+#ifdef CONFIG_ACPI_CPPC_LIB
extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
+extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
+extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
-extern int acpi_get_psd_map(struct cppc_cpudata **);
+extern bool cppc_perf_ctrs_in_pcc(void);
+extern bool acpi_cpc_valid(void);
+extern bool cppc_allow_fast_switch(void);
+extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
extern unsigned int cppc_get_transition_latency(int cpu);
extern bool cpc_ffh_supported(void);
+extern bool cpc_supported_by_cpu(void);
extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
+#else /* !CONFIG_ACPI_CPPC_LIB */
+static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_set_enable(int cpu, bool enable)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
+{
+ return -ENOTSUPP;
+}
+static inline bool cppc_perf_ctrs_in_pcc(void)
+{
+ return false;
+}
+static inline bool acpi_cpc_valid(void)
+{
+ return false;
+}
+static inline bool cppc_allow_fast_switch(void)
+{
+ return false;
+}
+static inline unsigned int cppc_get_transition_latency(int cpu)
+{
+ return CPUFREQ_ETERNAL;
+}
+static inline bool cpc_ffh_supported(void)
+{
+ return false;
+}
+static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
+{
+ return -ENOTSUPP;
+}
+static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
+{
+ return -ENOTSUPP;
+}
+#endif /* !CONFIG_ACPI_CPPC_LIB */
#endif /* _CPPC_ACPI_H*/
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index e3f1cddb4ac8..292a5c40bd0c 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -33,6 +33,9 @@ struct ghes_estatus_node {
struct llist_node llnode;
struct acpi_hest_generic *generic;
struct ghes *ghes;
+
+ int task_work_cpu;
+ struct callback_head task_work;
};
struct ghes_estatus_cache {
@@ -50,7 +53,25 @@ enum {
GHES_SEV_PANIC = 0x3,
};
-int ghes_estatus_pool_init(int num_ghes);
+#ifdef CONFIG_ACPI_APEI_GHES
+/**
+ * ghes_register_vendor_record_notifier - register a notifier for vendor
+ * records that the kernel would otherwise ignore.
+ * @nb: pointer to the notifier_block structure of the event handler.
+ *
+ * return 0 : SUCCESS, non-zero : FAIL
+ */
+int ghes_register_vendor_record_notifier(struct notifier_block *nb);
+
+/**
+ * ghes_unregister_vendor_record_notifier - unregister the previously
+ * registered vendor record notifier.
+ * @nb: pointer to the notifier_block structure of the vendor record handler.
+ */
+void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
+#endif
+
+int ghes_estatus_pool_init(unsigned int num_ghes);
/* From drivers/edac/ghes_edac.c */
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 4dec4ed138cd..73e806fe7ce7 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -9,18 +9,27 @@
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
+struct pcc_mbox_chan {
+ struct mbox_chan *mchan;
+ u64 shmem_base_addr;
+ u64 shmem_size;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+};
+
#define MAX_PCC_SUBSPACES 256
#ifdef CONFIG_PCC
-extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id);
-extern void pcc_mbox_free_channel(struct mbox_chan *chan);
+extern struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id);
+extern void pcc_mbox_free_channel(struct pcc_mbox_chan *chan);
#else
-static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id)
+static inline struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
return ERR_PTR(-ENODEV);
}
-static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
+static inline void pcc_mbox_free_channel(struct pcc_mbox_chan *chan) { }
#endif
#endif /* _PCC_H */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 8f6b2654c0b3..03eb3d977075 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -3,7 +3,7 @@
*
* Name: acenv.h - Host and compiler configuration
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index c3facf5f8495..3a6b1db9a984 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -3,7 +3,7 @@
*
* Name: acenvex.h - Extra host and compiler configuration
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 7d63d03cf507..ac80111f503c 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -3,28 +3,20 @@
*
* Name: acgcc.h - GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
#ifndef __ACGCC_H__
#define __ACGCC_H__
-/*
- * Use compiler specific <stdarg.h> is a good practice for even when
- * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
- */
#ifndef va_arg
-#ifdef ACPI_USE_BUILTIN_STDARG
-typedef __builtin_va_list va_list;
-#define va_start(v, l) __builtin_va_start(v, l)
-#define va_end(v) __builtin_va_end(v)
-#define va_arg(v, l) __builtin_va_arg(v, l)
-#define va_copy(d, s) __builtin_va_copy(d, s)
+#ifdef __KERNEL__
+#include <linux/stdarg.h>
#else
#include <stdarg.h>
-#endif
-#endif
+#endif /* __KERNEL__ */
+#endif /* ! va_arg */
#define ACPI_INLINE __inline__
@@ -54,4 +46,19 @@ typedef __builtin_va_list va_list;
#define ACPI_USE_NATIVE_MATH64
+/* GCC did not support __has_attribute until 5.1. */
+
+#ifndef __has_attribute
+#define __has_attribute(x) 0
+#endif
+
+/*
+ * Explicitly mark intentional explicit fallthrough to silence
+ * -Wimplicit-fallthrough in GCC 7.1+.
+ */
+
+#if __has_attribute(__fallthrough__)
+#define ACPI_FALLTHROUGH __attribute__((__fallthrough__))
+#endif
+
#endif /* __ACGCC_H__ */
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
index 7c88fd1de955..302ea1b724b9 100644
--- a/include/acpi/platform/acgccex.h
+++ b/include/acpi/platform/acgccex.h
@@ -3,7 +3,7 @@
*
* Name: acgccex.h - Extra GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
index e7fd5e71be62..85b1ae86ee63 100644
--- a/include/acpi/platform/acintel.h
+++ b/include/acpi/platform/acintel.h
@@ -3,7 +3,7 @@
*
* Name: acintel.h - VC specific defines, etc.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 987e2af7c335..a5550dd4d507 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -3,7 +3,7 @@
*
* Name: aclinux.h - OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
@@ -114,10 +114,19 @@
#define acpi_raw_spinlock raw_spinlock_t *
#define acpi_cpu_flags unsigned long
+#define acpi_uintptr_t uintptr_t
+
+#define ACPI_TO_INTEGER(p) ((uintptr_t)(p))
+#define ACPI_OFFSET(d, f) offsetof(d, f)
+
/* Use native linux version of acpi_os_allocate_zeroed */
#define USE_NATIVE_ALLOCATE_ZEROED
+/* Use logical addresses for accessing GPE registers in system memory */
+
+#define ACPI_GPE_USE_LOGICAL_ADDRESSES
+
/*
* Overrides for in-kernel ACPICA
*/
@@ -190,7 +199,8 @@
#if defined(__ia64__) || (defined(__x86_64__) && !defined(__ILP32__)) ||\
defined(__aarch64__) || defined(__PPC64__) ||\
- defined(__s390x__)
+ defined(__s390x__) ||\
+ (defined(__riscv) && (defined(__LP64__) || defined(_LP64)))
#define ACPI_MACHINE_WIDTH 64
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 04f88f2de781..28c72744decf 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -3,7 +3,7 @@
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 47805172e73d..9fa49686957a 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -2,11 +2,16 @@
#ifndef __ACPI_PROCESSOR_H
#define __ACPI_PROCESSOR_H
-#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/pm_qos.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/thermal.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <asm/acpi.h>
#define ACPI_PROCESSOR_CLASS "processor"
@@ -297,6 +302,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
}
#endif
+static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg,
+ bool direct)
+{
+ if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
+ return fn(arg);
+ return work_on_cpu(cpu, fn, arg);
+}
+
/* in processor_perflib.c */
#ifdef CONFIG_CPU_FREQ
@@ -428,9 +441,12 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
/* in processor_thermal.c */
-int acpi_processor_get_limit_info(struct acpi_processor *pr);
+int acpi_processor_thermal_init(struct acpi_processor *pr,
+ struct acpi_device *device);
+void acpi_processor_thermal_exit(struct acpi_processor *pr,
+ struct acpi_device *device);
extern const struct thermal_cooling_device_ops processor_cooling_ops;
-#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
+#ifdef CONFIG_CPU_FREQ
void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
#else
@@ -442,6 +458,6 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
return;
}
-#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
+#endif /* CONFIG_CPU_FREQ */
#endif
diff --git a/include/acpi/video.h b/include/acpi/video.h
index db8548ff03ce..a275c35e5249 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -48,15 +48,18 @@ enum acpi_backlight_type {
acpi_backlight_video,
acpi_backlight_vendor,
acpi_backlight_native,
+ acpi_backlight_nvidia_wmi_ec,
+ acpi_backlight_apple_gmux,
};
#if IS_ENABLED(CONFIG_ACPI_VIDEO)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
+extern void acpi_video_register_backlight(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
-extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
+extern bool acpi_video_backlight_use_native(void);
/*
* Note: The value returned by acpi_video_handles_brightness_key_presses()
* may change over time and should not be cached.
@@ -68,6 +71,7 @@ extern int acpi_video_get_levels(struct acpi_device *device,
#else
static inline int acpi_video_register(void) { return -ENODEV; }
static inline void acpi_video_unregister(void) { return; }
+static inline void acpi_video_register_backlight(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
{
@@ -77,8 +81,9 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
{
return acpi_backlight_vendor;
}
-static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+static inline bool acpi_video_backlight_use_native(void)
{
+ return true;
}
static inline bool acpi_video_handles_brightness_key_presses(void)
{
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
deleted file mode 100644
index 4c74b1c1d13b..000000000000
--- a/include/asm-generic/5level-fixup.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _5LEVEL_FIXUP_H
-#define _5LEVEL_FIXUP_H
-
-#define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED 1
-
-#define P4D_SHIFT PGDIR_SHIFT
-#define P4D_SIZE PGDIR_SIZE
-#define P4D_MASK PGDIR_MASK
-#define MAX_PTRS_PER_P4D 1
-#define PTRS_PER_P4D 1
-
-#define p4d_t pgd_t
-
-#define pud_alloc(mm, p4d, address) \
- ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \
- NULL : pud_offset(p4d, address))
-
-#define p4d_alloc(mm, pgd, address) (pgd)
-#define p4d_offset(pgd, start) (pgd)
-
-#ifndef __ASSEMBLY__
-static inline int p4d_none(p4d_t p4d)
-{
- return 0;
-}
-
-static inline int p4d_bad(p4d_t p4d)
-{
- return 0;
-}
-
-static inline int p4d_present(p4d_t p4d)
-{
- return 1;
-}
-#endif
-
-#define p4d_ERROR(p4d) do { } while (0)
-#define p4d_clear(p4d) pgd_clear(p4d)
-#define p4d_val(p4d) pgd_val(p4d)
-#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
-#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud)
-#define p4d_page(p4d) pgd_page(p4d)
-#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
-
-#define __p4d(x) __pgd(x)
-#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d)
-
-#undef p4d_free_tlb
-#define p4d_free_tlb(tlb, x, addr) do { } while (0)
-#define p4d_free(mm, x) do { } while (0)
-
-#undef p4d_addr_end
-#define p4d_addr_end(addr, end) (end)
-
-#endif
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index cd17d50697cc..941be574bbe0 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -4,6 +4,62 @@
# (This file is not included when SRCARCH=um since UML borrows several
# asm headers from the host architecutre.)
-mandatory-y += dma-contiguous.h
+mandatory-y += atomic.h
+mandatory-y += archrandom.h
+mandatory-y += barrier.h
+mandatory-y += bitops.h
+mandatory-y += bug.h
+mandatory-y += bugs.h
+mandatory-y += cacheflush.h
+mandatory-y += checksum.h
+mandatory-y += compat.h
+mandatory-y += current.h
+mandatory-y += delay.h
+mandatory-y += device.h
+mandatory-y += div64.h
+mandatory-y += dma-mapping.h
+mandatory-y += dma.h
+mandatory-y += emergency-restart.h
+mandatory-y += exec.h
+mandatory-y += fb.h
+mandatory-y += ftrace.h
+mandatory-y += futex.h
+mandatory-y += hardirq.h
+mandatory-y += hw_irq.h
+mandatory-y += io.h
+mandatory-y += irq.h
+mandatory-y += irq_regs.h
+mandatory-y += irq_work.h
+mandatory-y += kdebug.h
+mandatory-y += kmap_size.h
+mandatory-y += kprobes.h
+mandatory-y += linkage.h
+mandatory-y += local.h
+mandatory-y += local64.h
+mandatory-y += mmiowb.h
+mandatory-y += mmu.h
+mandatory-y += mmu_context.h
+mandatory-y += module.h
+mandatory-y += module.lds.h
mandatory-y += msi.h
+mandatory-y += pci.h
+mandatory-y += percpu.h
+mandatory-y += pgalloc.h
+mandatory-y += preempt.h
+mandatory-y += rwonce.h
+mandatory-y += sections.h
+mandatory-y += serial.h
+mandatory-y += shmparam.h
mandatory-y += simd.h
+mandatory-y += softirq_stack.h
+mandatory-y += switch_to.h
+mandatory-y += timex.h
+mandatory-y += tlbflush.h
+mandatory-y += topology.h
+mandatory-y += trace_clock.h
+mandatory-y += uaccess.h
+mandatory-y += unaligned.h
+mandatory-y += vermagic.h
+mandatory-y += vga.h
+mandatory-y += word-at-a-time.h
+mandatory-y += xor.h
diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h
new file mode 100644
index 000000000000..2866ae61b1cd
--- /dev/null
+++ b/include/asm-generic/access_ok.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ACCESS_OK_H__
+#define __ASM_GENERIC_ACCESS_OK_H__
+
+/*
+ * Checking whether a pointer is valid for user space access.
+ * These definitions work on most architectures, but overrides can
+ * be used where necessary.
+ */
+
+/*
+ * architectures with compat tasks have a variable TASK_SIZE and should
+ * override this to a constant.
+ */
+#ifndef TASK_SIZE_MAX
+#define TASK_SIZE_MAX TASK_SIZE
+#endif
+
+#ifndef __access_ok
+/*
+ * 'size' is a compile-time constant for most callers, so optimize for
+ * this case to turn the check into a single comparison against a constant
+ * limit and catch all possible overflows.
+ * On architectures with separate user address space (m68k, s390, parisc,
+ * sparc64) or those without an MMU, this should always return true.
+ *
+ * This version was originally contributed by Jonas Bonn for the
+ * OpenRISC architecture, and was found to be the most efficient
+ * for constant 'size' and 'limit' values.
+ */
+static inline int __access_ok(const void __user *ptr, unsigned long size)
+{
+ unsigned long limit = TASK_SIZE_MAX;
+ unsigned long addr = (unsigned long)ptr;
+
+ if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) ||
+ !IS_ENABLED(CONFIG_MMU))
+ return true;
+
+ return (size <= limit) && (addr <= (limit - size));
+}
+#endif
+
+#ifndef access_ok
+#define access_ok(addr, size) likely(__access_ok(addr, size))
+#endif
+
+#endif
diff --git a/include/asm-generic/archrandom.h b/include/asm-generic/archrandom.h
new file mode 100644
index 000000000000..3cd7f980cfdc
--- /dev/null
+++ b/include/asm-generic/archrandom.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ARCHRANDOM_H__
+#define __ASM_GENERIC_ARCHRANDOM_H__
+
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
deleted file mode 100644
index e8730c6b9fe2..000000000000
--- a/include/asm-generic/atomic-instrumented.h
+++ /dev/null
@@ -1,1788 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-instrumented.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-/*
- * This file provides wrappers with KASAN instrumentation for atomic operations.
- * To use this functionality an arch's atomic.h file needs to define all
- * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
- * this file at the end. This file provides atomic_read() that forwards to
- * arch_atomic_read() for actual atomic operation.
- * Note: if an arch atomic operation is implemented by means of other atomic
- * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
- * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
- * double instrumentation.
- */
-#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-
-#include <linux/build_bug.h>
-#include <linux/kasan-checks.h>
-
-static inline int
-atomic_read(const atomic_t *v)
-{
- kasan_check_read(v, sizeof(*v));
- return arch_atomic_read(v);
-}
-#define atomic_read atomic_read
-
-#if defined(arch_atomic_read_acquire)
-static inline int
-atomic_read_acquire(const atomic_t *v)
-{
- kasan_check_read(v, sizeof(*v));
- return arch_atomic_read_acquire(v);
-}
-#define atomic_read_acquire atomic_read_acquire
-#endif
-
-static inline void
-atomic_set(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_set(v, i);
-}
-#define atomic_set atomic_set
-
-#if defined(arch_atomic_set_release)
-static inline void
-atomic_set_release(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_set_release(v, i);
-}
-#define atomic_set_release atomic_set_release
-#endif
-
-static inline void
-atomic_add(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_add(i, v);
-}
-#define atomic_add atomic_add
-
-#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
-static inline int
-atomic_add_return(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_return(i, v);
-}
-#define atomic_add_return atomic_add_return
-#endif
-
-#if defined(arch_atomic_add_return_acquire)
-static inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_return_acquire(i, v);
-}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-
-#if defined(arch_atomic_add_return_release)
-static inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_return_release(i, v);
-}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-
-#if defined(arch_atomic_add_return_relaxed)
-static inline int
-atomic_add_return_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_return_relaxed(i, v);
-}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#endif
-
-#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
-static inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add(i, v);
-}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-
-#if defined(arch_atomic_fetch_add_acquire)
-static inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add_acquire(i, v);
-}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-
-#if defined(arch_atomic_fetch_add_release)
-static inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add_release(i, v);
-}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-
-#if defined(arch_atomic_fetch_add_relaxed)
-static inline int
-atomic_fetch_add_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add_relaxed(i, v);
-}
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#endif
-
-static inline void
-atomic_sub(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_sub(i, v);
-}
-#define atomic_sub atomic_sub
-
-#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
-static inline int
-atomic_sub_return(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_return(i, v);
-}
-#define atomic_sub_return atomic_sub_return
-#endif
-
-#if defined(arch_atomic_sub_return_acquire)
-static inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_return_acquire(i, v);
-}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-
-#if defined(arch_atomic_sub_return_release)
-static inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_return_release(i, v);
-}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-
-#if defined(arch_atomic_sub_return_relaxed)
-static inline int
-atomic_sub_return_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_return_relaxed(i, v);
-}
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#endif
-
-#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
-static inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_sub(i, v);
-}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-
-#if defined(arch_atomic_fetch_sub_acquire)
-static inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_acquire(i, v);
-}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-
-#if defined(arch_atomic_fetch_sub_release)
-static inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_release(i, v);
-}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-
-#if defined(arch_atomic_fetch_sub_relaxed)
-static inline int
-atomic_fetch_sub_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_relaxed(i, v);
-}
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#endif
-
-#if defined(arch_atomic_inc)
-static inline void
-atomic_inc(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_inc(v);
-}
-#define atomic_inc atomic_inc
-#endif
-
-#if defined(arch_atomic_inc_return)
-static inline int
-atomic_inc_return(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_return(v);
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#if defined(arch_atomic_inc_return_acquire)
-static inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_return_acquire(v);
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#if defined(arch_atomic_inc_return_release)
-static inline int
-atomic_inc_return_release(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_return_release(v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#if defined(arch_atomic_inc_return_relaxed)
-static inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_return_relaxed(v);
-}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-
-#if defined(arch_atomic_fetch_inc)
-static inline int
-atomic_fetch_inc(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_inc(v);
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#if defined(arch_atomic_fetch_inc_acquire)
-static inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_acquire(v);
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#if defined(arch_atomic_fetch_inc_release)
-static inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_release(v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#if defined(arch_atomic_fetch_inc_relaxed)
-static inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_relaxed(v);
-}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-
-#if defined(arch_atomic_dec)
-static inline void
-atomic_dec(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_dec(v);
-}
-#define atomic_dec atomic_dec
-#endif
-
-#if defined(arch_atomic_dec_return)
-static inline int
-atomic_dec_return(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_return(v);
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#if defined(arch_atomic_dec_return_acquire)
-static inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_return_acquire(v);
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#if defined(arch_atomic_dec_return_release)
-static inline int
-atomic_dec_return_release(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_return_release(v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#if defined(arch_atomic_dec_return_relaxed)
-static inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_return_relaxed(v);
-}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-
-#if defined(arch_atomic_fetch_dec)
-static inline int
-atomic_fetch_dec(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_dec(v);
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#if defined(arch_atomic_fetch_dec_acquire)
-static inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_acquire(v);
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#if defined(arch_atomic_fetch_dec_release)
-static inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_release(v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#if defined(arch_atomic_fetch_dec_relaxed)
-static inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_relaxed(v);
-}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
-
-static inline void
-atomic_and(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_and(i, v);
-}
-#define atomic_and atomic_and
-
-#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
-static inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_and(i, v);
-}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-
-#if defined(arch_atomic_fetch_and_acquire)
-static inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_and_acquire(i, v);
-}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-
-#if defined(arch_atomic_fetch_and_release)
-static inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_and_release(i, v);
-}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-
-#if defined(arch_atomic_fetch_and_relaxed)
-static inline int
-atomic_fetch_and_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_and_relaxed(i, v);
-}
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#endif
-
-#if defined(arch_atomic_andnot)
-static inline void
-atomic_andnot(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_andnot(i, v);
-}
-#define atomic_andnot atomic_andnot
-#endif
-
-#if defined(arch_atomic_fetch_andnot)
-static inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot(i, v);
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#if defined(arch_atomic_fetch_andnot_acquire)
-static inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_acquire(i, v);
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#if defined(arch_atomic_fetch_andnot_release)
-static inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_release(i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#if defined(arch_atomic_fetch_andnot_relaxed)
-static inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
-
-static inline void
-atomic_or(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_or(i, v);
-}
-#define atomic_or atomic_or
-
-#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
-static inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_or(i, v);
-}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-
-#if defined(arch_atomic_fetch_or_acquire)
-static inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_or_acquire(i, v);
-}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-
-#if defined(arch_atomic_fetch_or_release)
-static inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_or_release(i, v);
-}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-
-#if defined(arch_atomic_fetch_or_relaxed)
-static inline int
-atomic_fetch_or_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_or_relaxed(i, v);
-}
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#endif
-
-static inline void
-atomic_xor(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_xor(i, v);
-}
-#define atomic_xor atomic_xor
-
-#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
-static inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_xor(i, v);
-}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-
-#if defined(arch_atomic_fetch_xor_acquire)
-static inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_acquire(i, v);
-}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-
-#if defined(arch_atomic_fetch_xor_release)
-static inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_release(i, v);
-}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-
-#if defined(arch_atomic_fetch_xor_relaxed)
-static inline int
-atomic_fetch_xor_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_relaxed(i, v);
-}
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#endif
-
-#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
-static inline int
-atomic_xchg(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_xchg(v, i);
-}
-#define atomic_xchg atomic_xchg
-#endif
-
-#if defined(arch_atomic_xchg_acquire)
-static inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_xchg_acquire(v, i);
-}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-
-#if defined(arch_atomic_xchg_release)
-static inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_xchg_release(v, i);
-}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-
-#if defined(arch_atomic_xchg_relaxed)
-static inline int
-atomic_xchg_relaxed(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_xchg_relaxed(v, i);
-}
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#endif
-
-#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
-static inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_cmpxchg(v, old, new);
-}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-
-#if defined(arch_atomic_cmpxchg_acquire)
-static inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_acquire(v, old, new);
-}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic_cmpxchg_release)
-static inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_release(v, old, new);
-}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-
-#if defined(arch_atomic_cmpxchg_relaxed)
-static inline int
-atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic_try_cmpxchg)
-static inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg(v, old, new);
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_acquire)
-static inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_acquire(v, old, new);
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_release)
-static inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_release(v, old, new);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_relaxed)
-static inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic_sub_and_test)
-static inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_and_test(i, v);
-}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-
-#if defined(arch_atomic_dec_and_test)
-static inline bool
-atomic_dec_and_test(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_and_test(v);
-}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-
-#if defined(arch_atomic_inc_and_test)
-static inline bool
-atomic_inc_and_test(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_and_test(v);
-}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-
-#if defined(arch_atomic_add_negative)
-static inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_negative(i, v);
-}
-#define atomic_add_negative atomic_add_negative
-#endif
-
-#if defined(arch_atomic_fetch_add_unless)
-static inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add_unless(v, a, u);
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-
-#if defined(arch_atomic_add_unless)
-static inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_unless(v, a, u);
-}
-#define atomic_add_unless atomic_add_unless
-#endif
-
-#if defined(arch_atomic_inc_not_zero)
-static inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_not_zero(v);
-}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-
-#if defined(arch_atomic_inc_unless_negative)
-static inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_unless_negative(v);
-}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-
-#if defined(arch_atomic_dec_unless_positive)
-static inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_unless_positive(v);
-}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-
-#if defined(arch_atomic_dec_if_positive)
-static inline int
-atomic_dec_if_positive(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_if_positive(v);
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
-
-static inline s64
-atomic64_read(const atomic64_t *v)
-{
- kasan_check_read(v, sizeof(*v));
- return arch_atomic64_read(v);
-}
-#define atomic64_read atomic64_read
-
-#if defined(arch_atomic64_read_acquire)
-static inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
- kasan_check_read(v, sizeof(*v));
- return arch_atomic64_read_acquire(v);
-}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
-
-static inline void
-atomic64_set(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_set(v, i);
-}
-#define atomic64_set atomic64_set
-
-#if defined(arch_atomic64_set_release)
-static inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_set_release(v, i);
-}
-#define atomic64_set_release atomic64_set_release
-#endif
-
-static inline void
-atomic64_add(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_add(i, v);
-}
-#define atomic64_add atomic64_add
-
-#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
-static inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_return(i, v);
-}
-#define atomic64_add_return atomic64_add_return
-#endif
-
-#if defined(arch_atomic64_add_return_acquire)
-static inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_return_acquire(i, v);
-}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-
-#if defined(arch_atomic64_add_return_release)
-static inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_return_release(i, v);
-}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-
-#if defined(arch_atomic64_add_return_relaxed)
-static inline s64
-atomic64_add_return_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_return_relaxed(i, v);
-}
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#endif
-
-#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
-static inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add(i, v);
-}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-
-#if defined(arch_atomic64_fetch_add_acquire)
-static inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_acquire(i, v);
-}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_add_release)
-static inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_release(i, v);
-}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-
-#if defined(arch_atomic64_fetch_add_relaxed)
-static inline s64
-atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_relaxed(i, v);
-}
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#endif
-
-static inline void
-atomic64_sub(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_sub(i, v);
-}
-#define atomic64_sub atomic64_sub
-
-#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
-static inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_return(i, v);
-}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-
-#if defined(arch_atomic64_sub_return_acquire)
-static inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_return_acquire(i, v);
-}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-
-#if defined(arch_atomic64_sub_return_release)
-static inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_return_release(i, v);
-}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-
-#if defined(arch_atomic64_sub_return_relaxed)
-static inline s64
-atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_return_relaxed(i, v);
-}
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#endif
-
-#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
-static inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub(i, v);
-}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-
-#if defined(arch_atomic64_fetch_sub_acquire)
-static inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_acquire(i, v);
-}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_sub_release)
-static inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_release(i, v);
-}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-
-#if defined(arch_atomic64_fetch_sub_relaxed)
-static inline s64
-atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#endif
-
-#if defined(arch_atomic64_inc)
-static inline void
-atomic64_inc(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_inc(v);
-}
-#define atomic64_inc atomic64_inc
-#endif
-
-#if defined(arch_atomic64_inc_return)
-static inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_return(v);
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#if defined(arch_atomic64_inc_return_acquire)
-static inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_return_acquire(v);
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#if defined(arch_atomic64_inc_return_release)
-static inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_return_release(v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#if defined(arch_atomic64_inc_return_relaxed)
-static inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_return_relaxed(v);
-}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-
-#if defined(arch_atomic64_fetch_inc)
-static inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc(v);
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#if defined(arch_atomic64_fetch_inc_acquire)
-static inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_acquire(v);
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_inc_release)
-static inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_release(v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#if defined(arch_atomic64_fetch_inc_relaxed)
-static inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_relaxed(v);
-}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-
-#if defined(arch_atomic64_dec)
-static inline void
-atomic64_dec(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_dec(v);
-}
-#define atomic64_dec atomic64_dec
-#endif
-
-#if defined(arch_atomic64_dec_return)
-static inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_return(v);
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#if defined(arch_atomic64_dec_return_acquire)
-static inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_return_acquire(v);
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#if defined(arch_atomic64_dec_return_release)
-static inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_return_release(v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#if defined(arch_atomic64_dec_return_relaxed)
-static inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_return_relaxed(v);
-}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-
-#if defined(arch_atomic64_fetch_dec)
-static inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec(v);
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#if defined(arch_atomic64_fetch_dec_acquire)
-static inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_acquire(v);
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_dec_release)
-static inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_release(v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#if defined(arch_atomic64_fetch_dec_relaxed)
-static inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_relaxed(v);
-}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
-
-static inline void
-atomic64_and(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_and(i, v);
-}
-#define atomic64_and atomic64_and
-
-#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
-static inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_and(i, v);
-}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-
-#if defined(arch_atomic64_fetch_and_acquire)
-static inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_acquire(i, v);
-}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_and_release)
-static inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_release(i, v);
-}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-
-#if defined(arch_atomic64_fetch_and_relaxed)
-static inline s64
-atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_relaxed(i, v);
-}
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#endif
-
-#if defined(arch_atomic64_andnot)
-static inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_andnot(i, v);
-}
-#define atomic64_andnot atomic64_andnot
-#endif
-
-#if defined(arch_atomic64_fetch_andnot)
-static inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot(i, v);
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_acquire)
-static inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_acquire(i, v);
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_release)
-static inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_release(i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_relaxed)
-static inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
-
-static inline void
-atomic64_or(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_or(i, v);
-}
-#define atomic64_or atomic64_or
-
-#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
-static inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_or(i, v);
-}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-
-#if defined(arch_atomic64_fetch_or_acquire)
-static inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_acquire(i, v);
-}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_or_release)
-static inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_release(i, v);
-}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-
-#if defined(arch_atomic64_fetch_or_relaxed)
-static inline s64
-atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_relaxed(i, v);
-}
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#endif
-
-static inline void
-atomic64_xor(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_xor(i, v);
-}
-#define atomic64_xor atomic64_xor
-
-#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
-static inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor(i, v);
-}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-
-#if defined(arch_atomic64_fetch_xor_acquire)
-static inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_acquire(i, v);
-}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_xor_release)
-static inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_release(i, v);
-}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-
-#if defined(arch_atomic64_fetch_xor_relaxed)
-static inline s64
-atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#endif
-
-#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
-static inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_xchg(v, i);
-}
-#define atomic64_xchg atomic64_xchg
-#endif
-
-#if defined(arch_atomic64_xchg_acquire)
-static inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_xchg_acquire(v, i);
-}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-
-#if defined(arch_atomic64_xchg_release)
-static inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_xchg_release(v, i);
-}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-
-#if defined(arch_atomic64_xchg_relaxed)
-static inline s64
-atomic64_xchg_relaxed(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_xchg_relaxed(v, i);
-}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
-#endif
-
-#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
-static inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg(v, old, new);
-}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-
-#if defined(arch_atomic64_cmpxchg_acquire)
-static inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_acquire(v, old, new);
-}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic64_cmpxchg_release)
-static inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_release(v, old, new);
-}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-
-#if defined(arch_atomic64_cmpxchg_relaxed)
-static inline s64
-atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg)
-static inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg(v, old, new);
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_acquire)
-static inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_acquire(v, old, new);
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_release)
-static inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_release(v, old, new);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_relaxed)
-static inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic64_sub_and_test)
-static inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_and_test(i, v);
-}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-
-#if defined(arch_atomic64_dec_and_test)
-static inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_and_test(v);
-}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-
-#if defined(arch_atomic64_inc_and_test)
-static inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_and_test(v);
-}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-
-#if defined(arch_atomic64_add_negative)
-static inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_negative(i, v);
-}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-
-#if defined(arch_atomic64_fetch_add_unless)
-static inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_unless(v, a, u);
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-
-#if defined(arch_atomic64_add_unless)
-static inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
-}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-
-#if defined(arch_atomic64_inc_not_zero)
-static inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_not_zero(v);
-}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-
-#if defined(arch_atomic64_inc_unless_negative)
-static inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_unless_negative(v);
-}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-
-#if defined(arch_atomic64_dec_unless_positive)
-static inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_unless_positive(v);
-}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-
-#if defined(arch_atomic64_dec_if_positive)
-static inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_if_positive(v);
-}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-
-#if !defined(arch_xchg_relaxed) || defined(arch_xchg)
-#define xchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_acquire)
-#define xchg_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_release)
-#define xchg_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_relaxed)
-#define xchg_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg)
-#define cmpxchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_acquire)
-#define cmpxchg_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_release)
-#define cmpxchg_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_relaxed)
-#define cmpxchg_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64)
-#define cmpxchg64(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_acquire)
-#define cmpxchg64_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_release)
-#define cmpxchg64_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_relaxed)
-#define cmpxchg64_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#define cmpxchg_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define sync_cmpxchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_double(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
-})
-
-
-#define cmpxchg_double_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// b29b625d5de9280f680e42c7be859b55b15e5f6a
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
deleted file mode 100644
index 881c7e27af28..000000000000
--- a/include/asm-generic/atomic-long.h
+++ /dev/null
@@ -1,1013 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-long.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _ASM_GENERIC_ATOMIC_LONG_H
-#define _ASM_GENERIC_ATOMIC_LONG_H
-
-#include <asm/types.h>
-
-#ifdef CONFIG_64BIT
-typedef atomic64_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
-#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
-#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
-#else
-typedef atomic_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
-#define atomic_long_cond_read_acquire atomic_cond_read_acquire
-#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
-#endif
-
-#ifdef CONFIG_64BIT
-
-static inline long
-atomic_long_read(const atomic_long_t *v)
-{
- return atomic64_read(v);
-}
-
-static inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
- return atomic64_read_acquire(v);
-}
-
-static inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
- atomic64_set(v, i);
-}
-
-static inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
- atomic64_set_release(v, i);
-}
-
-static inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
- atomic64_add(i, v);
-}
-
-static inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
- return atomic64_add_return(i, v);
-}
-
-static inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
- return atomic64_add_return_acquire(i, v);
-}
-
-static inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
- return atomic64_add_return_release(i, v);
-}
-
-static inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_add_return_relaxed(i, v);
-}
-
-static inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_relaxed(i, v);
-}
-
-static inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic64_sub(i, v);
-}
-
-static inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return atomic64_sub_return(i, v);
-}
-
-static inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_acquire(i, v);
-}
-
-static inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_release(i, v);
-}
-
-static inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_relaxed(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_relaxed(i, v);
-}
-
-static inline void
-atomic_long_inc(atomic_long_t *v)
-{
- atomic64_inc(v);
-}
-
-static inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
- return atomic64_inc_return(v);
-}
-
-static inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
- return atomic64_inc_return_acquire(v);
-}
-
-static inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
- return atomic64_inc_return_release(v);
-}
-
-static inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
- return atomic64_inc_return_relaxed(v);
-}
-
-static inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
- return atomic64_fetch_inc(v);
-}
-
-static inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
- return atomic64_fetch_inc_acquire(v);
-}
-
-static inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
- return atomic64_fetch_inc_release(v);
-}
-
-static inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
- return atomic64_fetch_inc_relaxed(v);
-}
-
-static inline void
-atomic_long_dec(atomic_long_t *v)
-{
- atomic64_dec(v);
-}
-
-static inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
- return atomic64_dec_return(v);
-}
-
-static inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
- return atomic64_dec_return_acquire(v);
-}
-
-static inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
- return atomic64_dec_return_release(v);
-}
-
-static inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
- return atomic64_dec_return_relaxed(v);
-}
-
-static inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
- return atomic64_fetch_dec(v);
-}
-
-static inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
- return atomic64_fetch_dec_acquire(v);
-}
-
-static inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
- return atomic64_fetch_dec_release(v);
-}
-
-static inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
- return atomic64_fetch_dec_relaxed(v);
-}
-
-static inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
- atomic64_and(i, v);
-}
-
-static inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_relaxed(i, v);
-}
-
-static inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
- atomic64_andnot(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_relaxed(i, v);
-}
-
-static inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
- atomic64_or(i, v);
-}
-
-static inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_relaxed(i, v);
-}
-
-static inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
- atomic64_xor(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_relaxed(i, v);
-}
-
-static inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
- return atomic64_xchg(v, i);
-}
-
-static inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
- return atomic64_xchg_acquire(v, i);
-}
-
-static inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
- return atomic64_xchg_release(v, i);
-}
-
-static inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
- return atomic64_xchg_relaxed(v, i);
-}
-
-static inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_acquire(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_release(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_relaxed(v, old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg(v, (s64 *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
-}
-
-static inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
- return atomic64_sub_and_test(i, v);
-}
-
-static inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
- return atomic64_dec_and_test(v);
-}
-
-static inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
- return atomic64_inc_and_test(v);
-}
-
-static inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
- return atomic64_add_negative(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic64_fetch_add_unless(v, a, u);
-}
-
-static inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic64_add_unless(v, a, u);
-}
-
-static inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
- return atomic64_inc_not_zero(v);
-}
-
-static inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
- return atomic64_inc_unless_negative(v);
-}
-
-static inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
- return atomic64_dec_unless_positive(v);
-}
-
-static inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
- return atomic64_dec_if_positive(v);
-}
-
-#else /* CONFIG_64BIT */
-
-static inline long
-atomic_long_read(const atomic_long_t *v)
-{
- return atomic_read(v);
-}
-
-static inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
- return atomic_read_acquire(v);
-}
-
-static inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
- atomic_set(v, i);
-}
-
-static inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
- atomic_set_release(v, i);
-}
-
-static inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
- atomic_add(i, v);
-}
-
-static inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
- return atomic_add_return(i, v);
-}
-
-static inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
- return atomic_add_return_acquire(i, v);
-}
-
-static inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
- return atomic_add_return_release(i, v);
-}
-
-static inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic_add_return_relaxed(i, v);
-}
-
-static inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
- return atomic_fetch_add(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_relaxed(i, v);
-}
-
-static inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic_sub(i, v);
-}
-
-static inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return atomic_sub_return(i, v);
-}
-
-static inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
- return atomic_sub_return_acquire(i, v);
-}
-
-static inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
- return atomic_sub_return_release(i, v);
-}
-
-static inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic_sub_return_relaxed(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_relaxed(i, v);
-}
-
-static inline void
-atomic_long_inc(atomic_long_t *v)
-{
- atomic_inc(v);
-}
-
-static inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
- return atomic_inc_return(v);
-}
-
-static inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
- return atomic_inc_return_acquire(v);
-}
-
-static inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
- return atomic_inc_return_release(v);
-}
-
-static inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
- return atomic_inc_return_relaxed(v);
-}
-
-static inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
- return atomic_fetch_inc(v);
-}
-
-static inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
- return atomic_fetch_inc_acquire(v);
-}
-
-static inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
- return atomic_fetch_inc_release(v);
-}
-
-static inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
- return atomic_fetch_inc_relaxed(v);
-}
-
-static inline void
-atomic_long_dec(atomic_long_t *v)
-{
- atomic_dec(v);
-}
-
-static inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
- return atomic_dec_return(v);
-}
-
-static inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
- return atomic_dec_return_acquire(v);
-}
-
-static inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
- return atomic_dec_return_release(v);
-}
-
-static inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
- return atomic_dec_return_relaxed(v);
-}
-
-static inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
- return atomic_fetch_dec(v);
-}
-
-static inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
- return atomic_fetch_dec_acquire(v);
-}
-
-static inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
- return atomic_fetch_dec_release(v);
-}
-
-static inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
- return atomic_fetch_dec_relaxed(v);
-}
-
-static inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
- atomic_and(i, v);
-}
-
-static inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
- return atomic_fetch_and(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_relaxed(i, v);
-}
-
-static inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
- atomic_andnot(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_relaxed(i, v);
-}
-
-static inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
- atomic_or(i, v);
-}
-
-static inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
- return atomic_fetch_or(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_relaxed(i, v);
-}
-
-static inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
- atomic_xor(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_relaxed(i, v);
-}
-
-static inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
- return atomic_xchg(v, i);
-}
-
-static inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
- return atomic_xchg_acquire(v, i);
-}
-
-static inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
- return atomic_xchg_release(v, i);
-}
-
-static inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
- return atomic_xchg_relaxed(v, i);
-}
-
-static inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_acquire(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_release(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_relaxed(v, old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg(v, (int *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_acquire(v, (int *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_release(v, (int *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
-}
-
-static inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
- return atomic_sub_and_test(i, v);
-}
-
-static inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
- return atomic_dec_and_test(v);
-}
-
-static inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
- return atomic_inc_and_test(v);
-}
-
-static inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
- return atomic_add_negative(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic_fetch_add_unless(v, a, u);
-}
-
-static inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic_add_unless(v, a, u);
-}
-
-static inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
- return atomic_inc_not_zero(v);
-}
-
-static inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
- return atomic_inc_unless_negative(v);
-}
-
-static inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
- return atomic_dec_unless_positive(v);
-}
-
-static inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
- return atomic_dec_if_positive(v);
-}
-
-#endif /* CONFIG_64BIT */
-#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// 77558968132ce4f911ad53f6f52ce423006f6268
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 286867f593d2..04b8be9f1a77 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Generic C implementation of atomic counter operations. Usable on
- * UP systems only. Do not include in machine independent code.
+ * Generic C implementation of atomic counter operations. Do not include in
+ * machine independent code.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,56 +12,39 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-/*
- * atomic_$op() - $op integer to atomic variable
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
- * smp_mb__{before,after}_atomic().
- */
-
-/*
- * atomic_$op_return() - $op interer to atomic variable and returns the result
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does imply a full memory barrier.
- */
-
#ifdef CONFIG_SMP
/* we can build all atomic primitives from cmpxchg */
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c c_op i; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c; \
@@ -72,7 +55,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -82,7 +65,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -95,7 +78,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -110,89 +93,44 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#endif /* CONFIG_SMP */
-#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
-#endif
-
-#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
-#endif
-#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
-#endif
-
-#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
-#endif
-
-#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
-#endif
-
-#ifndef atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
-#endif
-
-#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)
-#endif
-#ifndef atomic_and
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
ATOMIC_OP(and, &)
-#endif
-
-#ifndef atomic_or
ATOMIC_OP(or, |)
-#endif
-
-#ifndef atomic_xor
ATOMIC_OP(xor, ^)
-#endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
+#define arch_atomic_add_return generic_atomic_add_return
+#define arch_atomic_sub_return generic_atomic_sub_return
-#define ATOMIC_INIT(i) { (i) }
+#define arch_atomic_fetch_add generic_atomic_fetch_add
+#define arch_atomic_fetch_sub generic_atomic_fetch_sub
+#define arch_atomic_fetch_and generic_atomic_fetch_and
+#define arch_atomic_fetch_or generic_atomic_fetch_or
+#define arch_atomic_fetch_xor generic_atomic_fetch_xor
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#ifndef atomic_read
-#define atomic_read(v) READ_ONCE((v)->counter)
-#endif
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#include <linux/irqflags.h>
+#define arch_atomic_add generic_atomic_add
+#define arch_atomic_sub generic_atomic_sub
+#define arch_atomic_and generic_atomic_and
+#define arch_atomic_or generic_atomic_or
+#define arch_atomic_xor generic_atomic_xor
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 370f01d4450f..100d24b02e52 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -15,19 +15,17 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-extern s64 atomic64_read(const atomic64_t *v);
-extern void atomic64_set(atomic64_t *v, s64 i);
-
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+extern s64 generic_atomic64_read(const atomic64_t *v);
+extern void generic_atomic64_set(atomic64_t *v, s64 i);
#define ATOMIC64_OP(op) \
-extern void atomic64_##op(s64 a, atomic64_t *v);
+extern void generic_atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
-extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
-extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -46,11 +44,32 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-extern s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
-extern s64 atomic64_xchg(atomic64_t *v, s64 new);
-extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+extern s64 generic_atomic64_dec_if_positive(atomic64_t *v);
+extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
+
+#define arch_atomic64_read generic_atomic64_read
+#define arch_atomic64_set generic_atomic64_set
+#define arch_atomic64_set_release generic_atomic64_set
+
+#define arch_atomic64_add generic_atomic64_add
+#define arch_atomic64_add_return generic_atomic64_add_return
+#define arch_atomic64_fetch_add generic_atomic64_fetch_add
+#define arch_atomic64_sub generic_atomic64_sub
+#define arch_atomic64_sub_return generic_atomic64_sub_return
+#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub
+
+#define arch_atomic64_and generic_atomic64_and
+#define arch_atomic64_fetch_and generic_atomic64_fetch_and
+#define arch_atomic64_or generic_atomic64_or
+#define arch_atomic64_fetch_or generic_atomic64_fetch_or
+#define arch_atomic64_xor generic_atomic64_xor
+#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor
+
+#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive
+#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg
+#define arch_atomic64_xchg generic_atomic64_xchg
+#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 85b28eb80b11..961f4d88f9ef 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -14,12 +14,43 @@
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
+#include <asm/rwonce.h>
#ifndef nop
#define nop() asm volatile ("nop")
#endif
/*
+ * Architectures that want generic instrumentation can define __ prefixed
+ * variants of all barriers.
+ */
+
+#ifdef __mb
+#define mb() do { kcsan_mb(); __mb(); } while (0)
+#endif
+
+#ifdef __rmb
+#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
+#endif
+
+#ifdef __wmb
+#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
+#endif
+
+#ifdef __dma_mb
+#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0)
+#endif
+
+#ifdef __dma_rmb
+#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
+#endif
+
+#ifdef __dma_wmb
+#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
+#endif
+
+/*
* Force strict CPU ordering. And yes, this is required on UP too when we're
* talking to devices.
*
@@ -38,6 +69,10 @@
#define wmb() mb()
#endif
+#ifndef dma_mb
+#define dma_mb() mb()
+#endif
+
#ifndef dma_rmb
#define dma_rmb() rmb()
#endif
@@ -46,10 +81,6 @@
#define dma_wmb() wmb()
#endif
-#ifndef read_barrier_depends
-#define read_barrier_depends() do { } while (0)
-#endif
-
#ifndef __smp_mb
#define __smp_mb() mb()
#endif
@@ -62,26 +93,18 @@
#define __smp_wmb() wmb()
#endif
-#ifndef __smp_read_barrier_depends
-#define __smp_read_barrier_depends() read_barrier_depends()
-#endif
-
#ifdef CONFIG_SMP
#ifndef smp_mb
-#define smp_mb() __smp_mb()
+#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
#endif
#ifndef smp_rmb
-#define smp_rmb() __smp_rmb()
+#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
#endif
#ifndef smp_wmb
-#define smp_wmb() __smp_wmb()
-#endif
-
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() __smp_read_barrier_depends()
+#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
#endif
#else /* !CONFIG_SMP */
@@ -98,10 +121,6 @@
#define smp_wmb() barrier()
#endif
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() do { } while (0)
-#endif
-
#endif /* CONFIG_SMP */
#ifndef __smp_store_mb
@@ -128,29 +147,29 @@ do { \
#ifndef __smp_load_acquire
#define __smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
__smp_mb(); \
- ___p1; \
+ (typeof(*p))___p1; \
})
#endif
#ifdef CONFIG_SMP
#ifndef smp_store_mb
-#define smp_store_mb(var, value) __smp_store_mb(var, value)
+#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
#endif
#ifndef smp_mb__before_atomic
-#define smp_mb__before_atomic() __smp_mb__before_atomic()
+#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
#endif
#ifndef smp_mb__after_atomic
-#define smp_mb__after_atomic() __smp_mb__after_atomic()
+#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
#endif
#ifndef smp_store_release
-#define smp_store_release(p, v) __smp_store_release(p, v)
+#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#endif
#ifndef smp_load_acquire
@@ -183,24 +202,23 @@ do { \
#ifndef smp_load_acquire
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
- ___p1; \
+ (typeof(*p))___p1; \
})
#endif
#endif /* CONFIG_SMP */
/* Barriers for virtual machine guests when talking to an SMP host */
-#define virt_mb() __smp_mb()
-#define virt_rmb() __smp_rmb()
-#define virt_wmb() __smp_wmb()
-#define virt_read_barrier_depends() __smp_read_barrier_depends()
-#define virt_store_mb(var, value) __smp_store_mb(var, value)
-#define virt_mb__before_atomic() __smp_mb__before_atomic()
-#define virt_mb__after_atomic() __smp_mb__after_atomic()
-#define virt_store_release(p, v) __smp_store_release(p, v)
+#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
+#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
+#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
+#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
+#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
+#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
+#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#define virt_load_acquire(p) __smp_load_acquire(p)
/**
@@ -229,14 +247,14 @@ do { \
#ifndef smp_cond_load_relaxed
#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
cpu_relax(); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#endif
@@ -250,12 +268,33 @@ do { \
*/
#ifndef smp_cond_load_acquire
#define smp_cond_load_acquire(ptr, cond_expr) ({ \
- typeof(*ptr) _val; \
+ __unqual_scalar_typeof(*ptr) _val; \
_val = smp_cond_load_relaxed(ptr, cond_expr); \
smp_acquire__after_ctrl_dep(); \
- _val; \
+ (typeof(*ptr))_val; \
})
#endif
+/*
+ * pmem_wmb() ensures that all stores for which the modification
+ * are written to persistent storage by preceding instructions have
+ * updated persistent storage before any data access or data transfer
+ * caused by subsequent instructions is initiated.
+ */
+#ifndef pmem_wmb
+#define pmem_wmb() wmb()
+#endif
+
+/*
+ * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
+ * this kind of memory accesses, the CPU may wait for prior accesses to be
+ * merged with subsequent ones. In some situation, such wait is bad for the
+ * performance. io_stop_wc() can be used to prevent the merging of
+ * write-combining memory accesses before this macro with those after it.
+ */
+#ifndef io_stop_wc
+#define io_stop_wc() do { } while (0)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index bfc96bf6606e..a47b8a71d6fe 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -4,8 +4,9 @@
/*
* For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents. You should
- * recode these in the native assembly language, if at all possible.
+ * architecture, here are some C-language equivalents. They should
+ * generate reasonable code, so take a look at what your compiler spits
+ * out before rolling your own buggy implementation in assembly language.
*
* C language equivalents written by Theodore Ts'o, 9/26/92
*/
@@ -19,7 +20,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index dd90c9792909..71ab4ba9c25d 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -11,58 +11,60 @@
* See Documentation/atomic_bitops.txt for details.
*/
-static inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_set_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_change_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (READ_ONCE(*p) & mask)
- return 1;
-
- old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (!(READ_ONCE(*p) & mask))
- return 0;
-
- old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
return !!(old & mask);
}
+#include <asm-generic/bitops/instrumented-atomic.h>
+
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h
index 458c85ebcd15..7b129329046b 100644
--- a/include/asm-generic/bitops/builtin-ffs.h
+++ b/include/asm-generic/bitops/builtin-ffs.h
@@ -8,11 +8,8 @@
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from ffz (man ffs).
*/
-static __always_inline int ffs(int x)
-{
- return __builtin_ffs(x);
-}
+#define ffs(x) __builtin_ffs(x)
#endif
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
index e81868b2c0f0..323fd5d6ae26 100644
--- a/include/asm-generic/bitops/ffs.h
+++ b/include/asm-generic/bitops/ffs.h
@@ -8,7 +8,7 @@
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from ffz (man ffs).
*/
static inline int ffs(int x)
{
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
deleted file mode 100644
index 9fdf21302fdf..000000000000
--- a/include/asm-generic/bitops/find.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_BITOPS_FIND_H_
-#define _ASM_GENERIC_BITOPS_FIND_H_
-
-#ifndef find_next_bit
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
- size, unsigned long offset);
-#endif
-
-#ifndef find_next_and_bit
-/**
- * find_next_and_bit - find the next set bit in both memory regions
- * @addr1: The first address to base the search on
- * @addr2: The second address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-extern unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
- unsigned long offset);
-#endif
-
-#ifndef find_next_zero_bit
-/**
- * find_next_zero_bit - find the next cleared bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number of the next zero bit
- * If no bits are zero, returns @size.
- */
-extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
- long size, unsigned long offset);
-#endif
-
-#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first set bit.
- * If no bits are set, returns @size.
- */
-extern unsigned long find_first_bit(const unsigned long *addr,
- unsigned long size);
-
-/**
- * find_first_zero_bit - find the first cleared bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first cleared bit.
- * If no bits are zero, returns @size.
- */
-extern unsigned long find_first_zero_bit(const unsigned long *addr,
- unsigned long size);
-#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
-
-#ifndef find_first_bit
-#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
-#endif
-#ifndef find_first_zero_bit
-#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
-#endif
-
-#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
-
-/**
- * find_next_clump8 - find next 8-bit clump with set bits in a memory region
- * @clump: location to store copy of found clump
- * @addr: address to base the search on
- * @size: bitmap size in number of bits
- * @offset: bit offset at which to start searching
- *
- * Returns the bit offset for the next set clump; the found clump value is
- * copied to the location pointed by @clump. If no bits are set, returns @size.
- */
-extern unsigned long find_next_clump8(unsigned long *clump,
- const unsigned long *addr,
- unsigned long size, unsigned long offset);
-
-#define find_first_clump8(clump, bits, size) \
- find_next_clump8((clump), (bits), (size), 0)
-
-#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h
new file mode 100644
index 000000000000..564a8c675d85
--- /dev/null
+++ b/include/asm-generic/bitops/generic-non-atomic.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+
+#include <linux/bits.h>
+#include <asm/barrier.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+/*
+ * Generic definitions for bit operations, should not be used in regular code
+ * directly.
+ */
+
+/**
+ * generic___set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+static __always_inline void
+generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * generic___change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * generic___test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic___test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static __always_inline bool
+generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ /*
+ * Unlike the bitops with the '__' prefix above, this one *is* atomic,
+ * so `volatile` must always stay here with no cast-aways. See
+ * `Documentation/atomic_bitops.txt` for the details.
+ */
+ return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+/**
+ * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
+}
+
+/*
+ * const_*() definitions provide good compile-time optimizations when
+ * the passed arguments can be resolved at compile time.
+ */
+#define const___set_bit generic___set_bit
+#define const___clear_bit generic___clear_bit
+#define const___change_bit generic___change_bit
+#define const___test_and_set_bit generic___test_and_set_bit
+#define const___test_and_clear_bit generic___test_and_clear_bit
+#define const___test_and_change_bit generic___test_and_change_bit
+#define const_test_bit_acquire generic_test_bit_acquire
+
+/**
+ * const_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ *
+ * A version of generic_test_bit() which discards the `volatile` qualifier to
+ * allow a compiler to optimize code harder. Non-atomic and to be called only
+ * for testing compile-time constants, e.g. by the corresponding macros, not
+ * directly from "regular" code.
+ */
+static __always_inline bool
+const_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr);
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long val = *p;
+
+ return !!(val & mask);
+}
+
+#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 18ce3c9e8eec..4225a8ca9c1a 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* set_bit - Atomically set a bit in memory
@@ -23,9 +23,9 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
}
@@ -36,9 +36,9 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*
* This is a relaxed atomic operation (no implied memory barriers).
*/
-static inline void clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
}
@@ -52,9 +52,9 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
}
@@ -65,9 +65,10 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -78,9 +79,10 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -91,9 +93,10 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index ec53fdeea9ec..eb64bd4f11f3 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -22,7 +22,8 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_release();
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
@@ -37,7 +38,8 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_release();
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
@@ -52,7 +54,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
@@ -71,7 +73,8 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
static inline bool
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_release();
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
}
/* Let everybody know we have it. */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 95ff28d128a1..2b238b161a62 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -11,10 +11,10 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
- * __set_bit - Set a bit in memory
+ * ___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -22,14 +22,15 @@
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___set_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
/**
- * __clear_bit - Clears a bit in memory
+ * ___clear_bit - Clears a bit in memory
* @nr: the bit to clear
* @addr: the address to start counting from
*
@@ -37,14 +38,15 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
/**
- * __change_bit - Toggle a bit in memory
+ * ___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
@@ -52,63 +54,104 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___change_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
+static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
+{
+ if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
+ /*
+ * We treat non-atomic read-write bitops a little more special.
+ * Given the operations here only modify a single bit, assuming
+ * non-atomicity of the writer is sufficient may be reasonable
+ * for certain usage (and follows the permissible nature of the
+ * assume-plain-writes-atomic rule):
+ * 1. report read-modify-write races -> check read;
+ * 2. do not report races with marked readers, but do report
+ * races with unmarked readers -> check "atomic" write.
+ */
+ kcsan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ /*
+ * Use generic write instrumentation, in case other sanitizers
+ * or tools are enabled alongside KCSAN.
+ */
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ } else {
+ instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+ }
+}
+
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
}
/**
- * __test_and_change_bit - Change a bit and return its old value
+ * ___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr);
}
/**
- * test_bit - Determine whether a bit is set
+ * _test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static inline bool test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool
+_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
- kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
+/**
+ * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_bit_acquire(nr, addr);
+}
+
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 188d3eba3ace..d51beff60375 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -9,46 +9,12 @@
#define BITOP_LE_SWIZZLE 0
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_zero_bit(addr, size, offset);
-}
-
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_bit(addr, size, offset);
-}
-
-static inline unsigned long find_first_zero_bit_le(const void *addr,
- unsigned long size)
-{
- return find_first_zero_bit(addr, size);
-}
-
#elif defined(__BIG_ENDIAN)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-#ifndef find_next_zero_bit_le
-extern unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
-#endif
-
-#ifndef find_next_bit_le
-extern unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
#endif
-#ifndef find_first_zero_bit_le
-#define find_first_zero_bit_le(addr, size) \
- find_next_zero_bit_le((addr), (size), 0)
-#endif
-
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
static inline int test_bit_le(int nr, const void *addr)
{
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 3ae021368f48..630f2f6b9595 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -7,7 +7,7 @@
#include <asm/barrier.h>
/**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
@@ -15,8 +15,8 @@
* the returned value is 0.
* It can be used to implement bit locks.
*/
-static inline int test_and_set_bit_lock(unsigned int nr,
- volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
@@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr,
if (READ_ONCE(*p) & mask)
return 1;
- old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
return !!(old & mask);
}
/**
- * clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
}
/**
- * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -54,38 +55,40 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
*
* See for example x86's implementation.
*/
-static inline void __clear_bit_unlock(unsigned int nr,
- volatile unsigned long *p)
+static inline void
+arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
unsigned long old;
p += BIT_WORD(nr);
old = READ_ONCE(*p);
old &= ~BIT_MASK(nr);
- atomic_long_set_release((atomic_long_t *)p, old);
+ arch_atomic_long_set_release((atomic_long_t *)p, old);
}
/**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- * byte is negative, for unlock.
+ * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
* @nr: the bit to clear
* @addr: the address to start counting from
*
* This is a bit of a one-trick-pony for the filemap code, which clears
* PG_locked and tests PG_waiters,
*/
-#ifndef clear_bit_unlock_is_negative_byte
-static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
- volatile unsigned long *p)
+#ifndef arch_clear_bit_unlock_is_negative_byte
+static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
+ volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
return !!(old & BIT(7));
}
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
#endif
+#include <asm-generic/bitops/instrumented-lock.h>
+
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
index 7e10c4b50c5d..71f8d54a5195 100644
--- a/include/asm-generic/bitops/non-atomic.h
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -2,108 +2,19 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
-#include <asm/types.h>
+#include <asm-generic/bitops/generic-non-atomic.h>
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch___set_bit generic___set_bit
+#define arch___clear_bit generic___clear_bit
+#define arch___change_bit generic___change_bit
- *p |= mask;
-}
+#define arch___test_and_set_bit generic___test_and_set_bit
+#define arch___test_and_clear_bit generic___test_and_clear_bit
+#define arch___test_and_change_bit generic___test_and_change_bit
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
- *p &= ~mask;
-}
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-
- *p ^= mask;
-}
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-
-/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
-
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
-{
- return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h
new file mode 100644
index 000000000000..0ddc78dfc358
--- /dev/null
+++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+
+#define ___set_bit arch___set_bit
+#define ___clear_bit arch___clear_bit
+#define ___change_bit arch___change_bit
+
+#define ___test_and_set_bit arch___test_and_set_bit
+#define ___test_and_clear_bit arch___test_and_clear_bit
+#define ___test_and_change_bit arch___test_and_change_bit
+
+#define _test_bit arch_test_bit
+#define _test_bit_acquire arch_test_bit_acquire
+
+#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
index 3905c1c93dc2..1023e2a4bd37 100644
--- a/include/asm-generic/bitsperlong.h
+++ b/include/asm-generic/bitsperlong.h
@@ -23,4 +23,16 @@
#define BITS_PER_LONG_LONG 64
#endif
+/*
+ * small_const_nbits(n) is true precisely when it is known at compile-time
+ * that BITMAP_SIZE(n) is 1, i.e. 1 <= n <= BITS_PER_LONG. This allows
+ * various bit/bitmap APIs to provide a fast inline implementation. Bitmaps
+ * of size 0 are very rare, and a compile-time-known-size 0 is most likely
+ * a sign of error. They will be handled correctly by the bit/bitmap APIs,
+ * but using the out-of-line functions, so that the inline implementations
+ * can unconditionally dereference the pointer(s).
+ */
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
+
#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 384b5c835ced..4050b191e1a9 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -3,6 +3,8 @@
#define _ASM_GENERIC_BUG_H
#include <linux/compiler.h>
+#include <linux/instrumentation.h>
+#include <linux/once_lite.h>
#define CUT_HERE "------------[ cut here ]------------\n"
@@ -16,7 +18,14 @@
#endif
#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
+#include <linux/panic.h>
+#include <linux/printk.h>
+
+struct warn_args;
+struct pt_regs;
+
+void __warn(const char *file, int line, void *caller, unsigned taint,
+ struct pt_regs *regs, struct warn_args *args);
#ifdef CONFIG_BUG
@@ -83,14 +92,19 @@ extern __printf(4, 5)
void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
const char *fmt, ...);
#define __WARN() __WARN_printf(TAINT_WARN, NULL)
-#define __WARN_printf(taint, arg...) \
- warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
+#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
+ instrumentation_end(); \
+ } while (0)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
__warn_printk(arg); \
__WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ instrumentation_end(); \
} while (0)
#define WARN_ON_ONCE(condition) ({ \
int __ret_warn_on = !!(condition); \
@@ -102,11 +116,6 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#endif
/* used internally by panic.c */
-struct warn_args;
-struct pt_regs;
-
-void __warn(const char *file, int line, void *caller, unsigned taint,
- struct pt_regs *regs, struct warn_args *args);
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
@@ -134,39 +143,15 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
})
#ifndef WARN_ON_ONCE
-#define WARN_ON_ONCE(condition) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_ON(1); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_ON_ONCE(condition) \
+ DO_ONCE_LITE_IF(condition, WARN_ON, 1)
#endif
-#define WARN_ONCE(condition, format...) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN(1, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_ONCE(condition, format...) \
+ DO_ONCE_LITE_IF(condition, WARN, 1, format)
-#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_TAINT(1, taint, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_TAINT_ONCE(condition, taint, format...) \
+ DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format)
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index cac7404b2bdd..f46258d1a080 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_CACHEFLUSH_H
-#define __ASM_CACHEFLUSH_H
+#ifndef _ASM_GENERIC_CACHEFLUSH_H
+#define _ASM_GENERIC_CACHEFLUSH_H
-/* Keep includes the same across arches. */
-#include <linux/mm.h>
+#include <linux/instrumented.h>
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+struct mm_struct;
+struct vm_area_struct;
+struct page;
+struct address_space;
/*
* The cache doesn't need to be flushed when TLB entries change when
@@ -45,10 +47,12 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
}
#endif
-#ifndef flush_dcache_page
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
static inline void flush_dcache_page(struct page *page)
{
}
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#endif
#ifndef flush_dcache_mmap_lock
@@ -69,6 +73,10 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
}
#endif
+#ifndef flush_icache_user_range
+#define flush_icache_user_range flush_icache_range
+#endif
+
#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
@@ -76,8 +84,8 @@ static inline void flush_icache_page(struct vm_area_struct *vma,
}
#endif
-#ifndef flush_icache_user_range
-static inline void flush_icache_user_range(struct vm_area_struct *vma,
+#ifndef flush_icache_user_page
+static inline void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
@@ -99,14 +107,22 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#ifndef copy_to_user_page
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
+ instrument_copy_to_user((void __user *)dst, src, len); \
memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
+ flush_icache_user_page(vma, page, vaddr, len); \
} while (0)
#endif
+
#ifndef copy_from_user_page
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ instrument_copy_from_user_before(dst, (void __user *)src, \
+ len); \
+ memcpy(dst, src, len); \
+ instrument_copy_from_user_after(dst, (void __user *)src, len, \
+ 0); \
+ } while (0)
#endif
-#endif /* __ASM_CACHEFLUSH_H */
+#endif /* _ASM_GENERIC_CACHEFLUSH_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index 34785c0f57b0..43e18db89c14 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -16,29 +16,6 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
-
-/*
- * the same as csum_partial_copy, but copies from user space.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err);
-
-#ifndef csum_partial_copy_nocheck
-#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy((src), (dst), (len), (sum))
-#endif
-
#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index f17f14f84d09..380cdc824e4b 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -12,7 +12,7 @@ extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
* long parameter, supporting various types of architectures.
*/
-static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
+static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
unsigned long old, unsigned long new, int size)
{
unsigned long flags, prev;
@@ -51,7 +51,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
/*
* Generic version of __cmpxchg64_local. Takes an u64 parameter.
*/
-static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
+static inline u64 __generic_cmpxchg64_local(volatile void *ptr,
u64 old, u64 new)
{
u64 prev;
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 9a24510cd8c1..dca4419922a9 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -14,16 +14,14 @@
#include <linux/types.h>
#include <linux/irqflags.h>
-#ifndef xchg
-
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
-extern void __xchg_called_with_bad_pointer(void);
+extern void __generic_xchg_called_with_bad_pointer(void);
static inline
-unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, flags;
@@ -75,35 +73,43 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#endif /* CONFIG_64BIT */
default:
- __xchg_called_with_bad_pointer();
+ __generic_xchg_called_with_bad_pointer();
return x;
}
}
-#define xchg(ptr, x) ({ \
- ((__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+#define generic_xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})
-#endif /* xchg */
-
/*
* Atomic compare and exchange.
*/
#include <asm-generic/cmpxchg-local.h>
-#ifndef cmpxchg_local
-#define cmpxchg_local(ptr, o, n) ({ \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr)))); \
+#define generic_cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
})
+
+#define generic_cmpxchg64_local(ptr, o, n) \
+ __generic_cmpxchg64_local((ptr), (o), (n))
+
+
+#ifndef arch_xchg
+#define arch_xchg generic_xchg
+#endif
+
+#ifndef arch_cmpxchg_local
+#define arch_cmpxchg_local generic_cmpxchg_local
#endif
-#ifndef cmpxchg64_local
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#ifndef arch_cmpxchg64_local
+#define arch_cmpxchg64_local generic_cmpxchg64_local
#endif
-#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg arch_cmpxchg_local
+#define arch_cmpxchg64 arch_cmpxchg64_local
#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index a86f65bffab8..8392caea398f 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -2,6 +2,30 @@
#ifndef __ASM_GENERIC_COMPAT_H
#define __ASM_GENERIC_COMPAT_H
+#ifndef COMPAT_USER_HZ
+#define COMPAT_USER_HZ 100
+#endif
+
+#ifndef COMPAT_RLIM_INFINITY
+#define COMPAT_RLIM_INFINITY 0xffffffff
+#endif
+
+#ifndef COMPAT_OFF_T_MAX
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#endif
+
+#ifndef compat_arg_u64
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi
+#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi
+#else
+#define compat_arg_u64(name) u32 name##_hi, u32 name##_lo
+#define compat_arg_u64_dual(name) u32, name##_hi, u32, name##_lo
+#endif
+#define compat_arg_u64_glue(name) (((u64)name##_lo & 0xffffffffUL) | \
+ ((u64)name##_hi << 32))
+#endif /* compat_arg_u64 */
+
/* These types are common across all compat ABIs */
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -20,6 +44,125 @@ typedef u16 compat_ushort_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u32 compat_uptr_t;
+typedef u32 compat_caddr_t;
typedef u32 compat_aio_context_t;
+typedef u32 compat_old_sigset_t;
+
+#ifndef __compat_uid_t
+typedef u32 __compat_uid_t;
+typedef u32 __compat_gid_t;
+#endif
+
+#ifndef __compat_uid32_t
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+#endif
+
+#ifndef compat_mode_t
+typedef u32 compat_mode_t;
+#endif
+
+#ifdef CONFIG_COMPAT_FOR_U64_ALIGNMENT
+typedef s64 __attribute__((aligned(4))) compat_s64;
+typedef u64 __attribute__((aligned(4))) compat_u64;
+#else
+typedef s64 compat_s64;
+typedef u64 compat_u64;
+#endif
+
+#ifndef _COMPAT_NSIG
+typedef u32 compat_sigset_word;
+#define _COMPAT_NSIG _NSIG
+#define _COMPAT_NSIG_BPW 32
+#endif
+
+#ifndef compat_dev_t
+typedef u32 compat_dev_t;
+#endif
+
+#ifndef compat_ipc_pid_t
+typedef s32 compat_ipc_pid_t;
+#endif
+
+#ifndef compat_fsid_t
+typedef __kernel_fsid_t compat_fsid_t;
+#endif
+
+#ifndef compat_statfs
+struct compat_statfs {
+ compat_int_t f_type;
+ compat_int_t f_bsize;
+ compat_int_t f_blocks;
+ compat_int_t f_bfree;
+ compat_int_t f_bavail;
+ compat_int_t f_files;
+ compat_int_t f_ffree;
+ compat_fsid_t f_fsid;
+ compat_int_t f_namelen;
+ compat_int_t f_frsize;
+ compat_int_t f_flags;
+ compat_int_t f_spare[4];
+};
+#endif
+
+#ifndef compat_ipc64_perm
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ compat_mode_t mode;
+ unsigned char __pad1[4 - sizeof(compat_mode_t)];
+ compat_ushort_t seq;
+ compat_ushort_t __pad2;
+ compat_ulong_t unused1;
+ compat_ulong_t unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_ulong_t sem_otime;
+ compat_ulong_t sem_otime_high;
+ compat_ulong_t sem_ctime;
+ compat_ulong_t sem_ctime_high;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ compat_ulong_t msg_stime;
+ compat_ulong_t msg_stime_high;
+ compat_ulong_t msg_rtime;
+ compat_ulong_t msg_rtime_high;
+ compat_ulong_t msg_ctime;
+ compat_ulong_t msg_ctime_high;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_ulong_t shm_atime;
+ compat_ulong_t shm_atime_high;
+ compat_ulong_t shm_dtime;
+ compat_ulong_t shm_dtime_high;
+ compat_ulong_t shm_ctime;
+ compat_ulong_t shm_ctime_high;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+#endif
#endif
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index a3b98c86f077..13f5aa68a455 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -8,12 +8,14 @@
* Optimization for constant divisors on 32-bit machines:
* Copyright (C) 2006-2015 Nicolas Pitre
*
- * The semantics of do_div() are:
+ * The semantics of do_div() is, in C++ notation, observing that the name
+ * is a function-like macro and the n parameter has the semantics of a C++
+ * reference:
*
- * uint32_t do_div(uint64_t *n, uint32_t base)
+ * uint32_t do_div(uint64_t &n, uint32_t base)
* {
- * uint32_t remainder = *n % base;
- * *n = *n / base;
+ * uint32_t remainder = n % base;
+ * n = n / base;
* return remainder;
* }
*
@@ -55,17 +57,11 @@
/*
* If the divisor happens to be constant, we determine the appropriate
* inverse at compile time to turn the division into a few inline
- * multiplications which ought to be much faster. And yet only if compiling
- * with a sufficiently recent gcc version to perform proper 64-bit constant
- * propagation.
+ * multiplications which ought to be much faster.
*
* (It is unfortunate that gcc doesn't perform all this internally.)
*/
-#ifndef __div64_const32_is_OK
-#define __div64_const32_is_OK (__GNUC__ >= 4)
-#endif
-
#define __div64_const32(n, ___b) \
({ \
/* \
@@ -228,8 +224,7 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
is_power_of_2(__base)) { \
__rem = (n) & (__base - 1); \
(n) >>= ilog2(__base); \
- } else if (__div64_const32_is_OK && \
- __builtin_constant_p(__base) && \
+ } else if (__builtin_constant_p(__base) && \
__base != 0) { \
uint32_t __res_lo, __n_lo = (n); \
(n) = __div64_const32(n, __base); \
@@ -239,8 +234,9 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
} else if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
- } else \
+ } else { \
__rem = __div64_32(&(n), __base); \
+ } \
__rem; \
})
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
deleted file mode 100644
index f24b0f9a4f05..000000000000
--- a/include/asm-generic/dma-contiguous.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H
-#define _ASM_GENERIC_DMA_CONTIGUOUS_H
-
-#include <linux/types.h>
-
-static inline void
-dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
-
-#endif
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index 9def22e6e2b3..9d0479f50f97 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -19,12 +19,6 @@ extern void *early_memremap_prot(resource_size_t phys_addr,
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
-/*
- * Weak function called by early_ioremap_reset(). It does nothing, but
- * architectures may provide their own version to do any needed cleanups.
- */
-extern void early_ioremap_shutdown(void);
-
#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
/* Arch-specific initialization */
extern void early_ioremap_init(void);
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index 80ca61058dd2..fbca56bd9cbc 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -20,16 +20,16 @@ struct pt_regs;
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
/*
- * Whitelist ganerating macro. Specify functions which can be
+ * Whitelist generating macro. Specify functions which can be
* error-injectable using this macro.
*/
#define ALLOW_ERROR_INJECTION(fname, _etype) \
static struct error_injection_entry __used \
- __attribute__((__section__("_error_injection_whitelist"))) \
+ __section("_error_injection_whitelist") \
_eil_addr_##fname = { \
.addr = (unsigned long)fname, \
.etype = EI_ETYPE_##_etype, \
- };
+ }
void override_function_with_return(struct pt_regs *regs);
#else
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 365345f9a9e3..5e4b1f2369d2 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -2,6 +2,14 @@
#ifndef __ASM_GENERIC_EXPORT_H
#define __ASM_GENERIC_EXPORT_H
+/*
+ * This comment block is used by fixdep. Please do not remove.
+ *
+ * When CONFIG_MODVERSIONS is changed from n to y, all source files having
+ * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
+ * side effect of the *.o build rule.
+ */
+
#ifndef KSYM_FUNC
#define KSYM_FUNC(x) x
#endif
@@ -12,9 +20,6 @@
#else
#define KSYM_ALIGN 4
#endif
-#ifndef KCRC_ALIGN
-#define KCRC_ALIGN 4
-#endif
.macro __put, val, name
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
@@ -33,7 +38,7 @@
*/
.macro ___EXPORT_SYMBOL name,val,sec
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS)
.section ___ksymtab\sec+\name,"a"
.balign KSYM_ALIGN
__ksymtab_\name:
@@ -43,17 +48,6 @@ __ksymtab_\name:
__kstrtab_\name:
.asciz "\name"
.previous
-#ifdef CONFIG_MODVERSIONS
- .section ___kcrctab\sec+\name,"a"
- .balign KCRC_ALIGN
-#if defined(CONFIG_MODULE_REL_CRCS)
- .long __crc_\name - .
-#else
- .long __crc_\name
-#endif
- .weak __crc_\name
- .previous
-#endif
#endif
.endm
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 02970b11f71f..2a19215baae5 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -6,15 +6,22 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
+#ifndef futex_atomic_cmpxchg_inatomic
#ifndef CONFIG_SMP
/*
* The following implementation only for uniprocessor machines.
* It relies on preempt_disable() ensuring mutual exclusion.
*
*/
+#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \
+ futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval)
+#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \
+ futex_atomic_op_inuser_local(op, oparg, oval, uaddr)
+#endif /* CONFIG_SMP */
+#endif
/**
- * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant
* argument and comparison of the previous
* futex value with another constant.
*
@@ -28,13 +35,12 @@
* -ENOSYS - Operation not supported
*/
static inline int
-arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
int oldval, ret;
u32 tmp;
preempt_disable();
- pagefault_disable();
ret = -EFAULT;
if (unlikely(get_user(oldval, uaddr) != 0))
@@ -67,7 +73,6 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
ret = -EFAULT;
out_pagefault_enable:
- pagefault_enable();
preempt_enable();
if (ret == 0)
@@ -77,7 +82,7 @@ out_pagefault_enable:
}
/**
- * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
+ * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the
* uaddr with newval if the current value is
* oldval.
* @uval: pointer to store content of @uaddr
@@ -89,10 +94,9 @@ out_pagefault_enable:
* 0 - On success
* -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention
- * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
u32 val;
@@ -114,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return 0;
}
-#else
-static inline int
-arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
-{
- return -ENOSYS;
-}
-
-static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_SMP */
#endif
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index e9f20b813a69..f2979e3a96b6 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -26,7 +26,7 @@
*
* The result is undefined if the size is 0.
*/
-static inline __attribute_const__ int get_order(unsigned long size)
+static __always_inline __attribute_const__ int get_order(unsigned long size)
{
if (__builtin_constant_p(size)) {
if (!size)
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 19eadac415c4..aea9aee1f3e9 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -2,10 +2,8 @@
#ifndef _ASM_GENERIC_GPIO_H
#define _ASM_GENERIC_GPIO_H
-#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/of.h>
#ifdef CONFIG_GPIOLIB
@@ -140,6 +138,8 @@ static inline void gpio_unexport(unsigned gpio)
#else /* !CONFIG_GPIOLIB */
+#include <linux/kernel.h>
+
static inline bool gpio_is_valid(int number)
{
/* only non-negative numbers are valid */
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index d14214dfc10b..7317e8258b48 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -7,9 +7,13 @@
typedef struct {
unsigned int __softirq_pending;
+#ifdef ARCH_WANTS_NMI_IRQSTAT
+ unsigned int __nmi_count;
+#endif
} ____cacheline_aligned irq_cpustat_t;
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
+
#include <linux/irq.h>
#ifndef ack_bad_irq
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 822f433ac95c..a57d667addd2 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -2,6 +2,9 @@
#ifndef _ASM_GENERIC_HUGETLB_H
#define _ASM_GENERIC_HUGETLB_H
+#include <linux/swap.h>
+#include <linux/swapops.h>
+
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
{
return mk_pte(page, pgprot);
@@ -32,6 +35,21 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
+static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
+{
+ return pte_mkuffd_wp(pte);
+}
+
+static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
+{
+ return pte_clear_uffd_wp(pte);
+}
+
+static inline int huge_pte_uffd_wp(pte_t pte)
+{
+ return pte_uffd_wp(pte);
+}
+
#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
@@ -66,10 +84,10 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- ptep_clear_flush(vma, addr, ptep);
+ return ptep_clear_flush(vma, addr, ptep);
}
#endif
@@ -80,6 +98,12 @@ static inline int huge_pte_none(pte_t pte)
}
#endif
+/* Please refer to comments above pte_none_mostly() for the usage */
+static inline int huge_pte_none_mostly(pte_t pte)
+{
+ return huge_pte_none(pte) || is_pte_marker(pte);
+}
+
#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
@@ -122,7 +146,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
static inline pte_t huge_ptep_get(pte_t *ptep)
{
- return *ptep;
+ return ptep_get(ptep);
}
#endif
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
new file mode 100644
index 000000000000..b17c6eeb9afa
--- /dev/null
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -0,0 +1,793 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file contains definitions from Hyper-V Hypervisor Top-Level Functional
+ * Specification (TLFS):
+ * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
+ */
+
+#ifndef _ASM_GENERIC_HYPERV_TLFS_H
+#define _ASM_GENERIC_HYPERV_TLFS_H
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/time64.h>
+
+/*
+ * While not explicitly listed in the TLFS, Hyper-V always runs with a page size
+ * of 4096. These definitions are used when communicating with Hyper-V using
+ * guest physical pages and guest physical page addresses, since the guest page
+ * size may not be 4096 on all architectures.
+ */
+#define HV_HYP_PAGE_SHIFT 12
+#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT)
+#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
+
+/*
+ * Hyper-V provides two categories of flags relevant to guest VMs. The
+ * "Features" category indicates specific functionality that is available
+ * to guests on this particular instance of Hyper-V. The "Features"
+ * are presented in four groups, each of which is 32 bits. The group A
+ * and B definitions are common across architectures and are listed here.
+ * However, not all flags are relevant on all architectures.
+ *
+ * Groups C and D vary across architectures and are listed in the
+ * architecture specific portion of hyperv-tlfs.h. Some of these flags exist
+ * on multiple architectures, but the bit positions are different so they
+ * cannot appear in the generic portion of hyperv-tlfs.h.
+ *
+ * The "Enlightenments" category provides recommendations on whether to use
+ * specific enlightenments that are available. The Enlighenments are a single
+ * group of 32 bits, but they vary across architectures and are listed in
+ * the architecture specific portion of hyperv-tlfs.h.
+ */
+
+/*
+ * Group A Features.
+ */
+
+/* VP Runtime register available */
+#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
+/* Partition Reference Counter available*/
+#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
+/* Basic SynIC register available */
+#define HV_MSR_SYNIC_AVAILABLE BIT(2)
+/* Synthetic Timer registers available */
+#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
+/* Virtual APIC assist and VP assist page registers available */
+#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
+/* Hypercall and Guest OS ID registers available*/
+#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
+/* Access virtual processor index register available*/
+#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
+/* Virtual system reset register available*/
+#define HV_MSR_RESET_AVAILABLE BIT(7)
+/* Access statistics page registers available */
+#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
+/* Partition reference TSC register is available */
+#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
+/* Partition Guest IDLE register is available */
+#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
+/* Partition local APIC and TSC frequency registers available */
+#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
+/* AccessReenlightenmentControls privilege */
+#define HV_ACCESS_REENLIGHTENMENT BIT(13)
+/* AccessTscInvariantControls privilege */
+#define HV_ACCESS_TSC_INVARIANT BIT(15)
+
+/*
+ * Group B features.
+ */
+#define HV_CREATE_PARTITIONS BIT(0)
+#define HV_ACCESS_PARTITION_ID BIT(1)
+#define HV_ACCESS_MEMORY_POOL BIT(2)
+#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
+#define HV_POST_MESSAGES BIT(4)
+#define HV_SIGNAL_EVENTS BIT(5)
+#define HV_CREATE_PORT BIT(6)
+#define HV_CONNECT_PORT BIT(7)
+#define HV_ACCESS_STATS BIT(8)
+#define HV_DEBUGGING BIT(11)
+#define HV_CPU_MANAGEMENT BIT(12)
+#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
+#define HV_ISOLATION BIT(22)
+
+/*
+ * TSC page layout.
+ */
+struct ms_hyperv_tsc_page {
+ volatile u32 tsc_sequence;
+ u32 reserved1;
+ volatile u64 tsc_scale;
+ volatile s64 tsc_offset;
+} __packed;
+
+union hv_reference_tsc_msr {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:11;
+ u64 pfn:52;
+ } __packed;
+};
+
+/*
+ * The guest OS needs to register the guest ID with the hypervisor.
+ * The guest ID is a 64 bit entity and the structure of this ID is
+ * specified in the Hyper-V specification:
+ *
+ * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
+ *
+ * While the current guideline does not specify how Linux guest ID(s)
+ * need to be generated, our plan is to publish the guidelines for
+ * Linux and other guest operating systems that currently are hosted
+ * on Hyper-V. The implementation here conforms to this yet
+ * unpublished guidelines.
+ *
+ *
+ * Bit(s)
+ * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
+ * 62:56 - Os Type; Linux is 0x100
+ * 55:48 - Distro specific identification
+ * 47:16 - Linux kernel version number
+ * 15:0 - Distro specific identification
+ *
+ *
+ */
+
+#define HV_LINUX_VENDOR_ID 0x8100
+
+/*
+ * Crash notification flags.
+ */
+#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
+#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
+
+/* Declare the various hypercall operations. */
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
+#define HVCALL_SEND_IPI_EX 0x0015
+#define HVCALL_GET_PARTITION_ID 0x0046
+#define HVCALL_DEPOSIT_MEMORY 0x0048
+#define HVCALL_CREATE_VP 0x004e
+#define HVCALL_GET_VP_REGISTERS 0x0050
+#define HVCALL_SET_VP_REGISTERS 0x0051
+#define HVCALL_POST_MESSAGE 0x005c
+#define HVCALL_SIGNAL_EVENT 0x005d
+#define HVCALL_POST_DEBUG_DATA 0x0069
+#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
+#define HVCALL_RESET_DEBUG_SESSION 0x006b
+#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
+#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
+#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
+#define HVCALL_RETARGET_INTERRUPT 0x007e
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
+#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
+
+/* Extended hypercalls */
+#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
+#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
+
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+
+/* Extended capability bits */
+#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
+
+enum HV_GENERIC_SET_FORMAT {
+ HV_GENERIC_SET_SPARSE_4K,
+ HV_GENERIC_SET_ALL,
+};
+
+#define HV_PARTITION_ID_SELF ((u64)-1)
+#define HV_VP_INDEX_SELF ((u32)-2)
+
+#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
+#define HV_HYPERCALL_FAST_BIT BIT(16)
+#define HV_HYPERCALL_VARHEAD_OFFSET 17
+#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17)
+#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27)
+#define HV_HYPERCALL_REP_COMP_OFFSET 32
+#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
+#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
+#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44)
+#define HV_HYPERCALL_REP_START_OFFSET 48
+#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
+#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60)
+#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \
+ HV_HYPERCALL_RSVD1_MASK | \
+ HV_HYPERCALL_RSVD2_MASK)
+
+/* hypercall status code */
+#define HV_STATUS_SUCCESS 0
+#define HV_STATUS_INVALID_HYPERCALL_CODE 2
+#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
+#define HV_STATUS_INVALID_ALIGNMENT 4
+#define HV_STATUS_INVALID_PARAMETER 5
+#define HV_STATUS_ACCESS_DENIED 6
+#define HV_STATUS_OPERATION_DENIED 8
+#define HV_STATUS_INSUFFICIENT_MEMORY 11
+#define HV_STATUS_INVALID_PORT_ID 17
+#define HV_STATUS_INVALID_CONNECTION_ID 18
+#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+
+/*
+ * The Hyper-V TimeRefCount register and the TSC
+ * page provide a guest VM clock with 100ns tick rate
+ */
+#define HV_CLOCK_HZ (NSEC_PER_SEC/100)
+
+/* Define the number of synthetic interrupt sources. */
+#define HV_SYNIC_SINT_COUNT (16)
+/* Define the expected SynIC version. */
+#define HV_SYNIC_VERSION_1 (0x1)
+/* Valid SynIC vectors are 16-255. */
+#define HV_SYNIC_FIRST_VALID_VECTOR (16)
+
+#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SINT_MASKED (1ULL << 16)
+#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
+#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
+
+#define HV_SYNIC_STIMER_COUNT (4)
+
+/* Define synthetic interrupt controller message constants. */
+#define HV_MESSAGE_SIZE (256)
+#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
+#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
+
+/*
+ * Define hypervisor message types. Some of the message types
+ * are x86/x64 specific, but there's no good way to separate
+ * them out into the arch-specific version of hyperv-tlfs.h
+ * because C doesn't provide a way to extend enum types.
+ * Keeping them all in the arch neutral hyperv-tlfs.h seems
+ * the least messy compromise.
+ */
+enum hv_message_type {
+ HVMSG_NONE = 0x00000000,
+
+ /* Memory access messages. */
+ HVMSG_UNMAPPED_GPA = 0x80000000,
+ HVMSG_GPA_INTERCEPT = 0x80000001,
+
+ /* Timer notification messages. */
+ HVMSG_TIMER_EXPIRED = 0x80000010,
+
+ /* Error messages. */
+ HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+ HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
+ HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
+
+ /* Trace buffer complete messages. */
+ HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
+
+ /* Platform-specific processor intercept messages. */
+ HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
+ HVMSG_X64_MSR_INTERCEPT = 0x80010001,
+ HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
+ HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
+ HVMSG_X64_APIC_EOI = 0x80010004,
+ HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
+};
+
+/* Define synthetic interrupt controller message flags. */
+union hv_message_flags {
+ __u8 asu8;
+ struct {
+ __u8 msg_pending:1;
+ __u8 reserved:7;
+ } __packed;
+};
+
+/* Define port identifier type. */
+union hv_port_id {
+ __u32 asu32;
+ struct {
+ __u32 id:24;
+ __u32 reserved:8;
+ } __packed u;
+};
+
+/* Define synthetic interrupt controller message header. */
+struct hv_message_header {
+ __u32 message_type;
+ __u8 payload_size;
+ union hv_message_flags message_flags;
+ __u8 reserved[2];
+ union {
+ __u64 sender;
+ union hv_port_id port;
+ };
+} __packed;
+
+/* Define synthetic interrupt controller message format. */
+struct hv_message {
+ struct hv_message_header header;
+ union {
+ __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+ } u;
+} __packed;
+
+/* Define the synthetic interrupt message page layout. */
+struct hv_message_page {
+ struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
+} __packed;
+
+/* Define timer message payload structure. */
+struct hv_timer_message_payload {
+ __u32 timer_index;
+ __u32 reserved;
+ __u64 expiration_time; /* When the timer expired */
+ __u64 delivery_time; /* When the message was delivered */
+} __packed;
+
+
+/* Define synthetic interrupt controller flag constants. */
+#define HV_EVENT_FLAGS_COUNT (256 * 8)
+#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
+
+/*
+ * Synthetic timer configuration.
+ */
+union hv_stimer_config {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 periodic:1;
+ u64 lazy:1;
+ u64 auto_enable:1;
+ u64 apic_vector:8;
+ u64 direct_mode:1;
+ u64 reserved_z0:3;
+ u64 sintx:4;
+ u64 reserved_z1:44;
+ } __packed;
+};
+
+
+/* Define the synthetic interrupt controller event flags format. */
+union hv_synic_event_flags {
+ unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
+};
+
+/* Define SynIC control register. */
+union hv_synic_scontrol {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:63;
+ } __packed;
+};
+
+/* Define synthetic interrupt source. */
+union hv_synic_sint {
+ u64 as_uint64;
+ struct {
+ u64 vector:8;
+ u64 reserved1:8;
+ u64 masked:1;
+ u64 auto_eoi:1;
+ u64 polling:1;
+ u64 reserved2:45;
+ } __packed;
+};
+
+/* Define the format of the SIMP register */
+union hv_synic_simp {
+ u64 as_uint64;
+ struct {
+ u64 simp_enabled:1;
+ u64 preserved:11;
+ u64 base_simp_gpa:52;
+ } __packed;
+};
+
+/* Define the format of the SIEFP register */
+union hv_synic_siefp {
+ u64 as_uint64;
+ struct {
+ u64 siefp_enabled:1;
+ u64 preserved:11;
+ u64 base_siefp_gpa:52;
+ } __packed;
+};
+
+struct hv_vpset {
+ u64 format;
+ u64 valid_bank_mask;
+ u64 bank_contents[];
+} __packed;
+
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi {
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+} __packed;
+
+/* HvCallSendSyntheticClusterIpiEx hypercall */
+struct hv_send_ipi_ex {
+ u32 vector;
+ u32 reserved;
+ struct hv_vpset vp_set;
+} __packed;
+
+/* HvFlushGuestPhysicalAddressSpace hypercalls */
+struct hv_guest_mapping_flush {
+ u64 address_space;
+ u64 flags;
+} __packed;
+
+/*
+ * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
+ * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
+ */
+#define HV_MAX_FLUSH_PAGES (2048)
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
+
+/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
+union hv_gpa_page_range {
+ u64 address_space;
+ struct {
+ u64 additional_pages:11;
+ u64 largepage:1;
+ u64 basepfn:52;
+ } page;
+ struct {
+ u64 reserved:12;
+ u64 page_size:1;
+ u64 reserved1:8;
+ u64 base_large_pfn:43;
+ };
+};
+
+/*
+ * All input flush parameters should be in single page. The max flush
+ * count is equal with how many entries of union hv_gpa_page_range can
+ * be populated into the input parameter page.
+ */
+#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
+ sizeof(union hv_gpa_page_range))
+
+struct hv_guest_mapping_flush_list {
+ u64 address_space;
+ u64 flags;
+ union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
+};
+
+/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
+struct hv_tlb_flush {
+ u64 address_space;
+ u64 flags;
+ u64 processor_mask;
+ u64 gva_list[];
+} __packed;
+
+/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
+struct hv_tlb_flush_ex {
+ u64 address_space;
+ u64 flags;
+ struct hv_vpset hv_vp_set;
+ u64 gva_list[];
+} __packed;
+
+/* HvGetPartitionId hypercall (output only) */
+struct hv_get_partition_id {
+ u64 partition_id;
+} __packed;
+
+/* HvDepositMemory hypercall */
+struct hv_deposit_memory {
+ u64 partition_id;
+ u64 gpa_page_list[];
+} __packed;
+
+struct hv_proximity_domain_flags {
+ u32 proximity_preferred : 1;
+ u32 reserved : 30;
+ u32 proximity_info_valid : 1;
+} __packed;
+
+/* Not a union in windows but useful for zeroing */
+union hv_proximity_domain_info {
+ struct {
+ u32 domain_id;
+ struct hv_proximity_domain_flags flags;
+ };
+ u64 as_uint64;
+} __packed;
+
+struct hv_lp_startup_status {
+ u64 hv_status;
+ u64 substatus1;
+ u64 substatus2;
+ u64 substatus3;
+ u64 substatus4;
+ u64 substatus5;
+ u64 substatus6;
+} __packed;
+
+/* HvAddLogicalProcessor hypercall */
+struct hv_add_logical_processor_in {
+ u32 lp_index;
+ u32 apic_id;
+ union hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+struct hv_add_logical_processor_out {
+ struct hv_lp_startup_status startup_status;
+} __packed;
+
+enum HV_SUBNODE_TYPE
+{
+ HvSubnodeAny = 0,
+ HvSubnodeSocket = 1,
+ HvSubnodeAmdNode = 2,
+ HvSubnodeL3 = 3,
+ HvSubnodeCount = 4,
+ HvSubnodeInvalid = -1
+};
+
+/* HvCreateVp hypercall */
+struct hv_create_vp {
+ u64 partition_id;
+ u32 vp_index;
+ u8 padding[3];
+ u8 subnode_type;
+ u64 subnode_id;
+ union hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+enum hv_interrupt_source {
+ HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
+ HV_INTERRUPT_SOURCE_IOAPIC,
+};
+
+union hv_ioapic_rte {
+ u64 as_uint64;
+
+ struct {
+ u32 vector:8;
+ u32 delivery_mode:3;
+ u32 destination_mode:1;
+ u32 delivery_status:1;
+ u32 interrupt_polarity:1;
+ u32 remote_irr:1;
+ u32 trigger_mode:1;
+ u32 interrupt_mask:1;
+ u32 reserved1:15;
+
+ u32 reserved2:24;
+ u32 destination_id:8;
+ };
+
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ };
+} __packed;
+
+struct hv_interrupt_entry {
+ u32 source;
+ u32 reserved1;
+ union {
+ union hv_msi_entry msi_entry;
+ union hv_ioapic_rte ioapic_rte;
+ };
+} __packed;
+
+/*
+ * flags for hv_device_interrupt_target.flags
+ */
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
+
+struct hv_device_interrupt_target {
+ u32 vector;
+ u32 flags;
+ union {
+ u64 vp_mask;
+ struct hv_vpset vp_set;
+ };
+} __packed;
+
+struct hv_retarget_device_interrupt {
+ u64 partition_id; /* use "self" */
+ u64 device_id;
+ struct hv_interrupt_entry int_entry;
+ u64 reserved2;
+ struct hv_device_interrupt_target int_target;
+} __packed __aligned(8);
+
+
+/* HvGetVpRegisters hypercall input with variable size reg name list*/
+struct hv_get_vp_registers_input {
+ struct {
+ u64 partitionid;
+ u32 vpindex;
+ u8 inputvtl;
+ u8 padding[3];
+ } header;
+ struct input {
+ u32 name0;
+ u32 name1;
+ } element[];
+} __packed;
+
+
+/* HvGetVpRegisters returns an array of these output elements */
+struct hv_get_vp_registers_output {
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ u32 c;
+ u32 d;
+ } as32 __packed;
+ struct {
+ u64 low;
+ u64 high;
+ } as64 __packed;
+ };
+};
+
+/* HvSetVpRegisters hypercall with variable size reg name/value list*/
+struct hv_set_vp_registers_input {
+ struct {
+ u64 partitionid;
+ u32 vpindex;
+ u8 inputvtl;
+ u8 padding[3];
+ } header;
+ struct {
+ u32 name;
+ u32 padding1;
+ u64 padding2;
+ u64 valuelow;
+ u64 valuehigh;
+ } element[];
+} __packed;
+
+enum hv_device_type {
+ HV_DEVICE_TYPE_LOGICAL = 0,
+ HV_DEVICE_TYPE_PCI = 1,
+ HV_DEVICE_TYPE_IOAPIC = 2,
+ HV_DEVICE_TYPE_ACPI = 3,
+};
+
+typedef u16 hv_pci_rid;
+typedef u16 hv_pci_segment;
+typedef u64 hv_logical_device_id;
+union hv_pci_bdf {
+ u16 as_uint16;
+
+ struct {
+ u8 function:3;
+ u8 device:5;
+ u8 bus;
+ };
+} __packed;
+
+union hv_pci_bus_range {
+ u16 as_uint16;
+
+ struct {
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ };
+} __packed;
+
+union hv_device_id {
+ u64 as_uint64;
+
+ struct {
+ u64 reserved0:62;
+ u64 device_type:2;
+ };
+
+ /* HV_DEVICE_TYPE_LOGICAL */
+ struct {
+ u64 id:62;
+ u64 device_type:2;
+ } logical;
+
+ /* HV_DEVICE_TYPE_PCI */
+ struct {
+ union {
+ hv_pci_rid rid;
+ union hv_pci_bdf bdf;
+ };
+
+ hv_pci_segment segment;
+ union hv_pci_bus_range shadow_bus_range;
+
+ u16 phantom_function_bits:2;
+ u16 source_shadow:1;
+
+ u16 rsvdz0:11;
+ u16 device_type:2;
+ } pci;
+
+ /* HV_DEVICE_TYPE_IOAPIC */
+ struct {
+ u8 ioapic_id;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u16 rsvdz2;
+
+ u16 rsvdz3:14;
+ u16 device_type:2;
+ } ioapic;
+
+ /* HV_DEVICE_TYPE_ACPI */
+ struct {
+ u32 input_mapping_base;
+ u32 input_mapping_count:30;
+ u32 device_type:2;
+ } acpi;
+} __packed;
+
+enum hv_interrupt_trigger_mode {
+ HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
+ HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
+};
+
+struct hv_device_interrupt_descriptor {
+ u32 interrupt_type;
+ u32 trigger_mode;
+ u32 vector_count;
+ u32 reserved;
+ struct hv_device_interrupt_target target;
+} __packed;
+
+struct hv_input_map_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ u64 flags;
+ struct hv_interrupt_entry logical_interrupt_entry;
+ struct hv_device_interrupt_descriptor interrupt_descriptor;
+} __packed;
+
+struct hv_output_map_device_interrupt {
+ struct hv_interrupt_entry interrupt_entry;
+} __packed;
+
+struct hv_input_unmap_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ struct hv_interrupt_entry interrupt_entry;
+} __packed;
+
+#define HV_SOURCE_SHADOW_NONE 0x0
+#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
+
+/*
+ * The whole argument should fit in a page to be able to pass to the hypervisor
+ * in one hypercall.
+ */
+#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
+ sizeof(union hv_gpa_page_range))
+
+/* HvExtCallMemoryHeatHint hypercall */
+#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
+struct hv_memory_hint {
+ u64 type:2;
+ u64 reserved:62;
+ union hv_gpa_page_range ranges[];
+} __packed;
+
+#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d39ac997dda8..a68f8fbf423b 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -10,6 +10,7 @@
#include <asm/page.h> /* I/O is all done through memory accesses */
#include <linux/string.h> /* for memset() and memcpy() */
#include <linux/types.h>
+#include <linux/instruction_pointer.h>
#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
@@ -61,6 +62,44 @@
#define __io_par(v) __io_ar(v)
#endif
+/*
+ * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for
+ * specific kernel drivers in case of excessive/unwanted logging.
+ *
+ * Usage: Add a #define flag at the beginning of the driver file.
+ * Ex: #define __DISABLE_TRACE_MMIO__
+ * #include <...>
+ * ...
+ */
+#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
+#include <linux/tracepoint-defs.h>
+
+DECLARE_TRACEPOINT(rwmmio_write);
+DECLARE_TRACEPOINT(rwmmio_post_write);
+DECLARE_TRACEPOINT(rwmmio_read);
+DECLARE_TRACEPOINT(rwmmio_post_read);
+
+void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr);
+
+#else
+
+static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+
+#endif /* CONFIG_TRACE_MMIO_ACCESS */
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
@@ -149,9 +188,11 @@ static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
+ log_read_mmio(8, addr, _THIS_IP_);
__io_br();
val = __raw_readb(addr);
__io_ar(val);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_);
return val;
}
#endif
@@ -162,9 +203,11 @@ static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
+ log_read_mmio(16, addr, _THIS_IP_);
__io_br();
- val = __le16_to_cpu(__raw_readw(addr));
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
+ log_post_read_mmio(val, 16, addr, _THIS_IP_);
return val;
}
#endif
@@ -175,9 +218,11 @@ static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
+ log_read_mmio(32, addr, _THIS_IP_);
__io_br();
- val = __le32_to_cpu(__raw_readl(addr));
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
+ log_post_read_mmio(val, 32, addr, _THIS_IP_);
return val;
}
#endif
@@ -189,9 +234,11 @@ static inline u64 readq(const volatile void __iomem *addr)
{
u64 val;
+ log_read_mmio(64, addr, _THIS_IP_);
__io_br();
val = __le64_to_cpu(__raw_readq(addr));
__io_ar(val);
+ log_post_read_mmio(val, 64, addr, _THIS_IP_);
return val;
}
#endif
@@ -201,9 +248,11 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 8, addr, _THIS_IP_);
__io_bw();
__raw_writeb(value, addr);
__io_aw();
+ log_post_write_mmio(value, 8, addr, _THIS_IP_);
}
#endif
@@ -211,9 +260,11 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 16, addr, _THIS_IP_);
__io_bw();
- __raw_writew(cpu_to_le16(value), addr);
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
+ log_post_write_mmio(value, 16, addr, _THIS_IP_);
}
#endif
@@ -221,9 +272,11 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 32, addr, _THIS_IP_);
__io_bw();
- __raw_writel(__cpu_to_le32(value), addr);
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
+ log_post_write_mmio(value, 32, addr, _THIS_IP_);
}
#endif
@@ -232,9 +285,11 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 64, addr, _THIS_IP_);
__io_bw();
__raw_writeq(__cpu_to_le64(value), addr);
__io_aw();
+ log_post_write_mmio(value, 64, addr, _THIS_IP_);
}
#endif
#endif /* CONFIG_64BIT */
@@ -248,7 +303,12 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readb_relaxed readb_relaxed
static inline u8 readb_relaxed(const volatile void __iomem *addr)
{
- return __raw_readb(addr);
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_);
+ val = __raw_readb(addr);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -256,7 +316,12 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
#define readw_relaxed readw_relaxed
static inline u16 readw_relaxed(const volatile void __iomem *addr)
{
- return __le16_to_cpu(__raw_readw(addr));
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_);
+ val = __le16_to_cpu(__raw_readw(addr));
+ log_post_read_mmio(val, 16, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -264,7 +329,12 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
#define readl_relaxed readl_relaxed
static inline u32 readl_relaxed(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_);
+ val = __le32_to_cpu(__raw_readl(addr));
+ log_post_read_mmio(val, 32, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -272,7 +342,12 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
#define readq_relaxed readq_relaxed
static inline u64 readq_relaxed(const volatile void __iomem *addr)
{
- return __le64_to_cpu(__raw_readq(addr));
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_);
+ val = __le64_to_cpu(__raw_readq(addr));
+ log_post_read_mmio(val, 64, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -280,7 +355,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
#define writeb_relaxed writeb_relaxed
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 8, addr, _THIS_IP_);
__raw_writeb(value, addr);
+ log_post_write_mmio(value, 8, addr, _THIS_IP_);
}
#endif
@@ -288,7 +365,9 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
#define writew_relaxed writew_relaxed
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 16, addr, _THIS_IP_);
__raw_writew(cpu_to_le16(value), addr);
+ log_post_write_mmio(value, 16, addr, _THIS_IP_);
}
#endif
@@ -296,7 +375,9 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
#define writel_relaxed writel_relaxed
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 32, addr, _THIS_IP_);
__raw_writel(__cpu_to_le32(value), addr);
+ log_post_write_mmio(value, 32, addr, _THIS_IP_);
}
#endif
@@ -304,7 +385,9 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
#define writeq_relaxed writeq_relaxed
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 64, addr, _THIS_IP_);
__raw_writeq(__cpu_to_le64(value), addr);
+ log_post_write_mmio(value, 64, addr, _THIS_IP_);
}
#endif
@@ -448,17 +531,15 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define IO_SPACE_LIMIT 0xffff
#endif
-#include <linux/logic_pio.h>
-
/*
* {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
* implemented on hardware that needs an additional delay for I/O accesses to
* take effect.
*/
-#ifndef inb
-#define inb inb
-static inline u8 inb(unsigned long addr)
+#if !defined(inb) && !defined(_inb)
+#define _inb _inb
+static inline u8 _inb(unsigned long addr)
{
u8 val;
@@ -469,35 +550,35 @@ static inline u8 inb(unsigned long addr)
}
#endif
-#ifndef inw
-#define inw inw
-static inline u16 inw(unsigned long addr)
+#if !defined(inw) && !defined(_inw)
+#define _inw _inw
+static inline u16 _inw(unsigned long addr)
{
u16 val;
__io_pbr();
- val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
+ val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
__io_par(val);
return val;
}
#endif
-#ifndef inl
-#define inl inl
-static inline u32 inl(unsigned long addr)
+#if !defined(inl) && !defined(_inl)
+#define _inl _inl
+static inline u32 _inl(unsigned long addr)
{
u32 val;
__io_pbr();
- val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
+ val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
__io_par(val);
return val;
}
#endif
-#ifndef outb
-#define outb outb
-static inline void outb(u8 value, unsigned long addr)
+#if !defined(outb) && !defined(_outb)
+#define _outb _outb
+static inline void _outb(u8 value, unsigned long addr)
{
__io_pbw();
__raw_writeb(value, PCI_IOBASE + addr);
@@ -505,26 +586,52 @@ static inline void outb(u8 value, unsigned long addr)
}
#endif
-#ifndef outw
-#define outw outw
-static inline void outw(u16 value, unsigned long addr)
+#if !defined(outw) && !defined(_outw)
+#define _outw _outw
+static inline void _outw(u16 value, unsigned long addr)
{
__io_pbw();
- __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
+ __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
__io_paw();
}
#endif
-#ifndef outl
-#define outl outl
-static inline void outl(u32 value, unsigned long addr)
+#if !defined(outl) && !defined(_outl)
+#define _outl _outl
+static inline void _outl(u32 value, unsigned long addr)
{
__io_pbw();
- __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
+ __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
__io_paw();
}
#endif
+#include <linux/logic_pio.h>
+
+#ifndef inb
+#define inb _inb
+#endif
+
+#ifndef inw
+#define inw _inw
+#endif
+
+#ifndef inl
+#define inl _inl
+#endif
+
+#ifndef outb
+#define outb _outb
+#endif
+
+#ifndef outw
+#define outw _outw
+#endif
+
+#ifndef outl
+#define outl _outl
+#endif
+
#ifndef inb_p
#define inb_p inb_p
static inline u8 inb_p(unsigned long addr)
@@ -887,18 +994,6 @@ static inline void iowrite64_rep(volatile void __iomem *addr,
#include <linux/vmalloc.h>
#define __io_virt(x) ((void __force *)(x))
-#ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-}
-#endif
-#endif /* CONFIG_GENERIC_IOMAP */
-
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
@@ -930,7 +1025,9 @@ static inline void *phys_to_virt(unsigned long address)
*
* ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
* for specific drivers if the architecture choses to implement them. If they
- * are not implemented we fall back to plain ioremap.
+ * are not implemented we fall back to plain ioremap. Conversely, ioremap_np()
+ * can provide stricter non-posted write semantics if the architecture
+ * implements them.
*/
#ifndef CONFIG_MMU
#ifndef ioremap
@@ -943,14 +1040,41 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
#ifndef iounmap
#define iounmap iounmap
-static inline void iounmap(void __iomem *addr)
+static inline void iounmap(volatile void __iomem *addr)
{
}
#endif
#elif defined(CONFIG_GENERIC_IOREMAP)
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
+
+/*
+ * Arch code can implement the following two hooks when using GENERIC_IOREMAP
+ * ioremap_allowed() return a bool,
+ * - true means continue to remap
+ * - false means skip remap and return directly
+ * iounmap_allowed() return a bool,
+ * - true means continue to vunmap
+ * - false means skip vunmap and return directly
+ */
+#ifndef ioremap_allowed
+#define ioremap_allowed ioremap_allowed
+static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
+{
+ return true;
+}
+#endif
+
+#ifndef iounmap_allowed
+#define iounmap_allowed iounmap_allowed
+static inline bool iounmap_allowed(void *addr)
+{
+ return true;
+}
+#endif
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot);
void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
@@ -983,6 +1107,23 @@ static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
}
#endif
+/*
+ * ioremap_np needs an explicit architecture implementation, as it
+ * requests stronger semantics than regular ioremap(). Portable drivers
+ * should instead use one of the higher-level abstractions, like
+ * devm_ioremap_resource(), to choose the correct variant for any given
+ * device and bus. Portable drivers with a good reason to want non-posted
+ * write semantics should always provide an ioremap() fallback in case
+ * ioremap_np() is not available.
+ */
+#ifndef ioremap_np
+#define ioremap_np ioremap_np
+static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
#ifndef ioport_map
@@ -992,6 +1133,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
port &= IO_SPACE_LIMIT;
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
#endif
#ifndef ioport_unmap
@@ -1006,15 +1148,10 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */
#endif /* CONFIG_HAS_IOPORT_MAP */
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#ifndef xlate_dev_kmem_ptr
-#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
-static inline void *xlate_dev_kmem_ptr(void *addr)
-{
- return addr;
-}
+#ifndef CONFIG_GENERIC_IOMAP
+#ifndef pci_iounmap
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
#endif
#ifndef xlate_dev_mem_ptr
@@ -1032,20 +1169,6 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
}
#endif
-#ifdef CONFIG_VIRT_TO_BUS
-#ifndef virt_to_bus
-static inline unsigned long virt_to_bus(void *address)
-{
- return (unsigned long)address;
-}
-
-static inline void *bus_to_virt(unsigned long address)
-{
- return (void *)address;
-}
-#endif
-#endif
-
#ifndef memset_io
#define memset_io memset_io
/**
@@ -1098,6 +1221,8 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
}
#endif
+extern int devmem_is_allowed(unsigned long pfn);
+
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 9d28a5e82f73..08237ae8b840 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -26,14 +26,14 @@
* in the low address range. Architectures for which this is not
* true can't use this generic implementation.
*/
-extern unsigned int ioread8(void __iomem *);
-extern unsigned int ioread16(void __iomem *);
-extern unsigned int ioread16be(void __iomem *);
-extern unsigned int ioread32(void __iomem *);
-extern unsigned int ioread32be(void __iomem *);
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread16be(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern unsigned int ioread32be(const void __iomem *);
#ifdef CONFIG_64BIT
-extern u64 ioread64(void __iomem *);
-extern u64 ioread64be(void __iomem *);
+extern u64 ioread64(const void __iomem *);
+extern u64 ioread64be(const void __iomem *);
#endif
#ifdef readq
@@ -41,10 +41,10 @@ extern u64 ioread64be(void __iomem *);
#define ioread64_hi_lo ioread64_hi_lo
#define ioread64be_lo_hi ioread64be_lo_hi
#define ioread64be_hi_lo ioread64be_hi_lo
-extern u64 ioread64_lo_hi(void __iomem *addr);
-extern u64 ioread64_hi_lo(void __iomem *addr);
-extern u64 ioread64be_lo_hi(void __iomem *addr);
-extern u64 ioread64be_hi_lo(void __iomem *addr);
+extern u64 ioread64_lo_hi(const void __iomem *addr);
+extern u64 ioread64_hi_lo(const void __iomem *addr);
+extern u64 ioread64be_lo_hi(const void __iomem *addr);
+extern u64 ioread64be_hi_lo(const void __iomem *addr);
#endif
extern void iowrite8(u8, void __iomem *);
@@ -79,9 +79,9 @@ extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
* memory across multiple ports, use "memcpy_toio()"
* and friends.
*/
-extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count);
-extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count);
-extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count);
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
@@ -101,14 +101,13 @@ extern void ioport_unmap(void __iomem *);
#define ioremap_wt ioremap
#endif
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
+#ifndef ARCH_HAS_IOREMAP_NP
+/* See the comment in asm-generic/io.h about ioremap_np(). */
+#define ioremap_np ioremap_np
+static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
#endif
#include <asm-generic/pci_iomap.h>
diff --git a/include/asm-generic/kmap_size.h b/include/asm-generic/kmap_size.h
new file mode 100644
index 000000000000..6e36b2443ece
--- /dev/null
+++ b/include/asm-generic/kmap_size.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KMAP_SIZE_H
+#define _ASM_GENERIC_KMAP_SIZE_H
+
+/* For debug this provides guard pages between the maps */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL
+# define KM_MAX_IDX 33
+#else
+# define KM_MAX_IDX 16
+#endif
+
+#endif
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
deleted file mode 100644
index 9f95b7b63d19..000000000000
--- a/include/asm-generic/kmap_types.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_KMAP_TYPES_H
-#define _ASM_GENERIC_KMAP_TYPES_H
-
-#ifdef __WITH_KM_FENCE
-# define KM_TYPE_NR 41
-#else
-# define KM_TYPE_NR 20
-#endif
-
-#endif
diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h
index 4a982089c95c..060eab094e5a 100644
--- a/include/asm-generic/kprobes.h
+++ b/include/asm-generic/kprobes.h
@@ -10,11 +10,11 @@
*/
# define __NOKPROBE_SYMBOL(fname) \
static unsigned long __used \
- __attribute__((__section__("_kprobe_blacklist"))) \
+ __section("_kprobe_blacklist") \
_kbl_addr_##fname = (unsigned long)fname;
# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
/* Use this to forbid a kprobes attach on very low level functions */
-# define __kprobes __attribute__((__section__(".kprobes.text")))
+# define __kprobes __section(".kprobes.text")
# define nokprobe_inline __always_inline
#else
# define NOKPROBE_SYMBOL(fname)
diff --git a/include/asm-generic/kvm_types.h b/include/asm-generic/kvm_types.h
new file mode 100644
index 000000000000..2a82daf110f1
--- /dev/null
+++ b/include/asm-generic/kvm_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KVM_TYPES_H
+#define _ASM_GENERIC_KVM_TYPES_H
+
+#endif
diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h
new file mode 100644
index 000000000000..8a59b6e567df
--- /dev/null
+++ b/include/asm-generic/logic_io.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef _LOGIC_IO_H
+#define _LOGIC_IO_H
+#include <linux/types.h>
+
+/* include this file into asm/io.h */
+
+#ifdef CONFIG_INDIRECT_IOMEM
+
+#ifdef CONFIG_INDIRECT_IOMEM_FALLBACK
+/*
+ * If you want emulated IO memory to fall back to 'normal' IO memory
+ * if a region wasn't registered as emulated, then you need to have
+ * all of the real_* functions implemented.
+ */
+#if !defined(real_ioremap) || !defined(real_iounmap) || \
+ !defined(real_raw_readb) || !defined(real_raw_writeb) || \
+ !defined(real_raw_readw) || !defined(real_raw_writew) || \
+ !defined(real_raw_readl) || !defined(real_raw_writel) || \
+ (defined(CONFIG_64BIT) && \
+ (!defined(real_raw_readq) || !defined(real_raw_writeq))) || \
+ !defined(real_memset_io) || \
+ !defined(real_memcpy_fromio) || \
+ !defined(real_memcpy_toio)
+#error "Must provide fallbacks for real IO memory access"
+#endif /* defined ... */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+#define ioremap ioremap
+void __iomem *ioremap(phys_addr_t offset, size_t size);
+
+#define iounmap iounmap
+void iounmap(void volatile __iomem *addr);
+
+#define __raw_readb __raw_readb
+u8 __raw_readb(const volatile void __iomem *addr);
+
+#define __raw_readw __raw_readw
+u16 __raw_readw(const volatile void __iomem *addr);
+
+#define __raw_readl __raw_readl
+u32 __raw_readl(const volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+u64 __raw_readq(const volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define __raw_writeb __raw_writeb
+void __raw_writeb(u8 value, volatile void __iomem *addr);
+
+#define __raw_writew __raw_writew
+void __raw_writew(u16 value, volatile void __iomem *addr);
+
+#define __raw_writel __raw_writel
+void __raw_writel(u32 value, volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+void __raw_writeq(u64 value, volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define memset_io memset_io
+void memset_io(volatile void __iomem *addr, int value, size_t size);
+
+#define memcpy_fromio memcpy_fromio
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size);
+
+#define memcpy_toio memcpy_toio
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size);
+
+#endif /* CONFIG_INDIRECT_IOMEM */
+#endif /* _LOGIC_IO_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 7637fb46ba4f..a2c8ed60233a 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -6,47 +6,18 @@
#ifndef __ASSEMBLY__
+/*
+ * supports 3 memory models.
+ */
#if defined(CONFIG_FLATMEM)
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (0UL)
#endif
-#elif defined(CONFIG_DISCONTIGMEM)
-
-#ifndef arch_pfn_to_nid
-#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
-#endif
-
-#ifndef arch_local_page_offset
-#define arch_local_page_offset(pfn, nid) \
- ((pfn) - NODE_DATA(nid)->node_start_pfn)
-#endif
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-/*
- * supports 3 memory models.
- */
-#if defined(CONFIG_FLATMEM)
-
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
-#elif defined(CONFIG_DISCONTIGMEM)
-
-#define __pfn_to_page(pfn) \
-({ unsigned long __pfn = (pfn); \
- unsigned long __nid = arch_pfn_to_nid(__pfn); \
- NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
-})
-
-#define __page_to_pfn(pg) \
-({ const struct page *__pg = (pg); \
- struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
- (unsigned long)(__pg - __pgdat->node_mem_map) + \
- __pgdat->node_start_pfn; \
-})
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -70,7 +41,7 @@
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
-#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+#endif /* CONFIG_FLATMEM/SPARSEMEM */
/*
* Convert a physical address to a Page Frame Number and back
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
deleted file mode 100644
index 5ff0e5193f85..000000000000
--- a/include/asm-generic/mm-arch-hooks.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Architecture specific mm hooks
- */
-
-#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
-#define _ASM_GENERIC_MM_ARCH_HOOKS_H
-
-/*
- * This file should be included through arch/../include/asm/Kbuild for
- * the architecture which doesn't need specific mm hooks.
- *
- * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
- * are used.
- */
-
-#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
index 9439ff037b2d..5698fca3bf56 100644
--- a/include/asm-generic/mmiowb.h
+++ b/include/asm-generic/mmiowb.h
@@ -27,7 +27,7 @@
#include <asm/smp.h>
DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
-#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state)
+#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state)
#else
#define __mmiowb_state() arch_mmiowb_state()
#endif /* arch_mmiowb_state */
@@ -35,7 +35,9 @@ DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
static inline void mmiowb_set_pending(void)
{
struct mmiowb_state *ms = __mmiowb_state();
- ms->mmiowb_pending = ms->nesting_count;
+
+ if (likely(ms->nesting_count))
+ ms->mmiowb_pending = ms->nesting_count;
}
static inline void mmiowb_spin_lock(void)
diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h
index 6be9106fb6fb..91727065bacb 100644
--- a/include/asm-generic/mmu_context.h
+++ b/include/asm-generic/mmu_context.h
@@ -3,44 +3,74 @@
#define __ASM_GENERIC_MMU_CONTEXT_H
/*
- * Generic hooks for NOMMU architectures, which do not need to do
- * anything special here.
+ * Generic hooks to implement no-op functionality.
*/
-#include <asm-generic/mm_hooks.h>
-
struct task_struct;
struct mm_struct;
+/*
+ * enter_lazy_tlb - Called when "tsk" is about to enter lazy TLB mode.
+ *
+ * @mm: the currently active mm context which is becoming lazy
+ * @tsk: task which is entering lazy tlb
+ *
+ * tsk->mm will be NULL
+ */
+#ifndef enter_lazy_tlb
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
+#endif
+/**
+ * init_new_context - Initialize context of a new mm_struct.
+ * @tsk: task struct for the mm
+ * @mm: the new mm struct
+ * @return: 0 on success, -errno on failure
+ */
+#ifndef init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
return 0;
}
+#endif
+/**
+ * destroy_context - Undo init_new_context when the mm is going away
+ * @mm: old mm struct
+ */
+#ifndef destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
}
+#endif
-static inline void deactivate_mm(struct task_struct *task,
- struct mm_struct *mm)
-{
-}
-
-static inline void switch_mm(struct mm_struct *prev,
- struct mm_struct *next,
- struct task_struct *tsk)
+/**
+ * activate_mm - called after exec switches the current task to a new mm, to switch to it
+ * @prev_mm: previous mm of this task
+ * @next_mm: new mm
+ */
+#ifndef activate_mm
+static inline void activate_mm(struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
{
+ switch_mm(prev_mm, next_mm, current);
}
+#endif
-static inline void activate_mm(struct mm_struct *prev_mm,
- struct mm_struct *next_mm)
+/**
+ * dectivate_mm - called when an mm is released after exit or exec switches away from it
+ * @tsk: the task
+ * @mm: the old mm
+ */
+#ifndef deactivate_mm
+static inline void deactivate_mm(struct task_struct *tsk,
+ struct mm_struct *mm)
{
}
+#endif
#endif /* __ASM_GENERIC_MMU_CONTEXT_H */
diff --git a/include/asm-generic/module.lds.h b/include/asm-generic/module.lds.h
new file mode 100644
index 000000000000..f210d5c1b78b
--- /dev/null
+++ b/include/asm-generic/module.lds.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GENERIC_MODULE_LDS_H
+#define __ASM_GENERIC_MODULE_LDS_H
+
+/*
+ * <asm/module.lds.h> can specify arch-specific sections for linking modules.
+ * Empty for the asm-generic header.
+ */
+
+#endif /* __ASM_GENERIC_MODULE_LDS_H */
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index b3f1082cc435..bfb9eb9d7215 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -22,38 +22,99 @@
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
+#include <linux/nmi.h>
#include <asm/ptrace.h>
#include <asm/hyperv-tlfs.h>
struct ms_hyperv_info {
u32 features;
+ u32 priv_high;
u32 misc_features;
u32 hints;
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
+ u32 isolation_config_a;
+ union {
+ u32 isolation_config_b;
+ struct {
+ u32 cvm_type : 4;
+ u32 reserved1 : 1;
+ u32 shared_gpa_boundary_active : 1;
+ u32 shared_gpa_boundary_bits : 6;
+ u32 reserved2 : 20;
+ };
+ };
+ u64 shared_gpa_boundary;
};
extern struct ms_hyperv_info ms_hyperv;
+extern void * __percpu *hyperv_pcpu_input_arg;
+extern void * __percpu *hyperv_pcpu_output_arg;
+
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+extern bool hv_isolation_type_snp(void);
+
+/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
+static inline int hv_result(u64 status)
+{
+ return status & HV_HYPERCALL_RESULT_MASK;
+}
+static inline bool hv_result_success(u64 status)
+{
+ return hv_result(status) == HV_STATUS_SUCCESS;
+}
+
+static inline unsigned int hv_repcomp(u64 status)
+{
+ /* Bits [43:32] of status have 'Reps completed' data. */
+ return (status & HV_HYPERCALL_REP_COMP_MASK) >>
+ HV_HYPERCALL_REP_COMP_OFFSET;
+}
+
+/*
+ * Rep hypercalls. Callers of this functions are supposed to ensure that
+ * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ */
+static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
+ void *input, void *output)
+{
+ u64 control = code;
+ u64 status;
+ u16 rep_comp;
+
+ control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
+ control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+
+ do {
+ status = hv_do_hypercall(control, input, output);
+ if (!hv_result_success(status))
+ return status;
+
+ rep_comp = hv_repcomp(status);
+
+ control &= ~HV_HYPERCALL_REP_START_MASK;
+ control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
+
+ touch_nmi_watchdog();
+ } while (rep_comp < rep_count);
+
+ return status;
+}
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
-static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
- __u64 d_info2)
+static inline u64 hv_generate_guest_id(u64 kernel_version)
{
- __u64 guest_id = 0;
+ u64 guest_id;
- guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
- guest_id |= (d_info1 << 48);
+ guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48);
guest_id |= (kernel_version << 16);
- guest_id |= d_info2;
return guest_id;
}
-
/* Free the message slot and signal end-of-message if required */
static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
{
@@ -85,20 +146,25 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
* possibly deliver another msg from the
* hypervisor
*/
- hv_signal_eom();
+ hv_set_register(HV_REGISTER_EOM, 0);
}
}
-void hv_setup_vmbus_irq(void (*handler)(void));
-void hv_remove_vmbus_irq(void);
-void hv_enable_vmbus_irq(void);
-void hv_disable_vmbus_irq(void);
+void hv_setup_vmbus_handler(void (*handler)(void));
+void hv_remove_vmbus_handler(void);
+void hv_setup_stimer0_handler(void (*handler)(void));
+void hv_remove_stimer0_handler(void);
void hv_setup_kexec_handler(void (*handler)(void));
void hv_remove_kexec_handler(void);
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
void hv_remove_crash_handler(void);
+extern int vmbus_interrupt;
+extern int vmbus_irq;
+
+extern bool hv_root_partition;
+
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Hypervisor's notion of virtual processor ID is different from
@@ -109,9 +175,20 @@ void hv_remove_crash_handler(void);
extern u32 *hv_vp_index;
extern u32 hv_max_vp_index;
+extern u64 (*hv_read_reference_counter)(void);
+
/* Sentinel value for an uninitialized entry in hv_vp_index array */
#define VP_INVAL U32_MAX
+int __init hv_common_init(void);
+void __init hv_common_free(void);
+int hv_common_cpu_init(unsigned int cpu);
+int hv_common_cpu_die(unsigned int cpu);
+
+void *hv_alloc_hyperv_page(void);
+void *hv_alloc_hyperv_zeroed_page(void);
+void hv_free_hyperv_page(unsigned long addr);
+
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
* @cpu_number: CPU number in Linux terms
@@ -128,10 +205,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
return hv_vp_index[cpu_number];
}
-static inline int cpumask_to_vpset(struct hv_vpset *vpset,
- const struct cpumask *cpus)
+static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus,
+ bool exclude_self)
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+ int this_cpu = smp_processor_id();
/* valid_bank_mask can represent up to 64 banks */
if (hv_max_vp_index / 64 >= 64)
@@ -149,6 +228,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
* Some banks may end up being empty but this is acceptable.
*/
for_each_cpu(cpu, cpus) {
+ if (exclude_self && cpu == this_cpu)
+ continue;
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL)
return -1;
@@ -163,21 +244,40 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
return nr_bank;
}
-void hyperv_report_panic(struct pt_regs *regs, long err);
-void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ return __cpumask_to_vpset(vpset, cpus, false);
+}
+
+static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ WARN_ON_ONCE(preemptible());
+ return __cpumask_to_vpset(vpset, cpus, true);
+}
+
+void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
+enum hv_isolation_type hv_get_isolation_type(void);
+bool hv_is_isolation_supported(void);
+bool hv_isolation_type_snp(void);
+u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
void hyperv_cleanup(void);
-void hv_setup_sched_clock(void *sched_clock);
+bool hv_query_ext_cap(u64 cap_query);
+void hv_setup_dma_ops(struct device *dev, bool coherent);
+void *hv_map_memory(void *addr, unsigned long size);
+void hv_unmap_memory(void *addr);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
+static inline bool hv_is_isolation_supported(void) { return false; }
+static inline enum hv_isolation_type hv_get_isolation_type(void)
+{
+ return HV_ISOLATION_TYPE_NONE;
+}
#endif /* CONFIG_HYPERV */
-#if IS_ENABLED(CONFIG_HYPERV)
-extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
-extern void hv_remove_stimer0_irq(int irq);
-#endif
-
#endif
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
index e6795f088bdd..bf910d47e900 100644
--- a/include/asm-generic/msi.h
+++ b/include/asm-generic/msi.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+
#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
#endif
@@ -22,12 +24,18 @@ struct msi_desc;
typedef struct msi_alloc_info {
struct msi_desc *desc;
irq_hw_number_t hwirq;
+ unsigned long flags;
union {
unsigned long ul;
void *ptr;
} scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS];
} msi_alloc_info_t;
+/* Device generating MSIs is proxying for another device */
+#define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0)
+
#define GENERIC_MSI_DOMAIN_OPS 1
+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+
#endif
diff --git a/include/asm-generic/nommu_context.h b/include/asm-generic/nommu_context.h
new file mode 100644
index 000000000000..4f916f9e16cd
--- /dev/null
+++ b/include/asm-generic/nommu_context.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_NOMMU_H
+#define __ASM_GENERIC_NOMMU_H
+
+/*
+ * Generic hooks for NOMMU architectures, which do not need to do
+ * anything special here.
+ */
+#include <asm-generic/mm_hooks.h>
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* __ASM_GENERIC_NOMMU_H */
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
new file mode 100644
index 000000000000..1a3ad6d29833
--- /dev/null
+++ b/include/asm-generic/numa.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_NUMA_H
+#define __ASM_GENERIC_NUMA_H
+
+#ifdef CONFIG_NUMA
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
+int __node_distance(int from, int to);
+#define node_distance(a, b) __node_distance(a, b)
+
+extern nodemask_t numa_nodes_parsed __initdata;
+
+extern bool numa_off;
+
+/* Mappings between node number and cpus on that node. */
+extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+void numa_clear_node(unsigned int cpu);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+const struct cpumask *cpumask_of_node(int node);
+#else
+/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+static inline const struct cpumask *cpumask_of_node(int node)
+{
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
+ return node_to_cpumask_map[node];
+}
+#endif
+
+void __init arch_numa_init(void);
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_free_distance(void);
+void __init early_map_cpu_to_node(unsigned int cpu, int nid);
+void numa_store_cpu_info(unsigned int cpu);
+void numa_add_cpu(unsigned int cpu);
+void numa_remove_cpu(unsigned int cpu);
+
+#else /* CONFIG_NUMA */
+
+static inline void numa_store_cpu_info(unsigned int cpu) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
+static inline void arch_numa_init(void) { }
+static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
+
+#endif /* CONFIG_NUMA */
+
+#endif /* __ASM_GENERIC_NUMA_H */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index fe801f01625e..6fc47561814c 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -63,11 +63,7 @@ extern unsigned long memory_end;
#endif /* !__ASSEMBLY__ */
-#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS
-#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS)
-#else
#define PAGE_OFFSET (0)
-#endif
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index 6bb3cd3d695a..6869f1061528 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -1,17 +1,30 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/include/asm-generic/pci.h
- *
- * Copyright (C) 2003 Russell King
- */
-#ifndef _ASM_GENERIC_PCI_H
-#define _ASM_GENERIC_PCI_H
+/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+#ifndef __ASM_GENERIC_PCI_H
+#define __ASM_GENERIC_PCI_H
+
+#ifndef PCIBIOS_MIN_IO
+#define PCIBIOS_MIN_IO 0
+#endif
+
+#ifndef PCIBIOS_MIN_MEM
+#define PCIBIOS_MIN_MEM 0
+#endif
+
+#ifndef pcibios_assign_all_busses
+/* For bootloaders that do not initialize the PCI bus */
+#define pcibios_assign_all_busses() 1
+#endif
+
+/* Enable generic resource mapping code in drivers/pci/ */
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_proc_domain(struct pci_bus *bus)
{
- return channel ? 15 : 14;
+ /* always show the domain in /proc */
+ return 1;
}
-#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+#endif /* CONFIG_PCI_DOMAINS */
-#endif /* _ASM_GENERIC_PCI_H */
+#endif /* __ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index d4f16dcc2ed7..8fbb0a55545d 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -18,12 +18,15 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
unsigned int nr);
+#elif !defined(CONFIG_HAS_IOPORT_MAP)
+#define __pci_ioport_map(dev, port, nr) NULL
#else
#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
#endif
@@ -50,6 +53,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
{
return NULL;
}
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
#endif
-#endif /* __ASM_GENERIC_IO_H */
+#endif /* __ASM_GENERIC_PCI_IOMAP_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 35e4a53b83e6..6432a7fade91 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -114,21 +114,21 @@ do { \
#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
- typeof(pcp) __ret; \
+ typeof(pcp) ___ret; \
preempt_disable_notrace(); \
- __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
+ ___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable_notrace(); \
- __ret; \
+ ___ret; \
})
#define __this_cpu_generic_read_noirq(pcp) \
({ \
- typeof(pcp) __ret; \
- unsigned long __flags; \
- raw_local_irq_save(__flags); \
- __ret = raw_cpu_generic_read(pcp); \
- raw_local_irq_restore(__flags); \
- __ret; \
+ typeof(pcp) ___ret; \
+ unsigned long ___flags; \
+ raw_local_irq_save(___flags); \
+ ___ret = raw_cpu_generic_read(pcp); \
+ raw_local_irq_restore(___flags); \
+ ___ret; \
})
#define this_cpu_generic_read(pcp) \
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 73f7421413cb..977bea16cf1b 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -102,6 +102,98 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
__free_page(pte_page);
}
+
+#if CONFIG_PGTABLE_LEVELS > 2
+
+#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
+/**
+ * pmd_alloc_one - allocate a page for PMD-level page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page and runs the pgtable_pmd_page_ctor().
+ * Allocations use %GFP_PGTABLE_USER in user context and
+ * %GFP_PGTABLE_KERNEL in kernel context.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ struct page *page;
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ page = alloc_pages(gfp, 0);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_pages(page, 0);
+ return NULL;
+ }
+ return (pmd_t *)page_address(page);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMD_FREE
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
+ free_page((unsigned long)pmd);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
+
+#if CONFIG_PGTABLE_LEVELS > 3
+
+static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ return (pud_t *)get_zeroed_page(gfp);
+}
+
+#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
+/**
+ * pud_alloc_one - allocate a page for PUD-level page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page using %GFP_PGTABLE_USER for user context and
+ * %GFP_PGTABLE_KERNEL for kernel context.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return __pud_alloc_one(mm, addr);
+}
+#endif
+
+static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ free_page((unsigned long)pud);
+}
+
+#ifndef __HAVE_ARCH_PUD_FREE
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ __pud_free(mm, pud);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+
+#ifndef __HAVE_ARCH_PGD_FREE
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+#endif
+
#endif /* CONFIG_MMU */
#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
deleted file mode 100644
index 829bdb0d6327..000000000000
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _PGTABLE_NOP4D_HACK_H
-#define _PGTABLE_NOP4D_HACK_H
-
-#ifndef __ASSEMBLY__
-#include <asm-generic/5level-fixup.h>
-
-#define __PAGETABLE_PUD_FOLDED 1
-
-/*
- * Having the pud type consist of a pgd gets the size right, and allows
- * us to conceptually access the pgd entry that this pud is folded into
- * without casting.
- */
-typedef struct { pgd_t pgd; } pud_t;
-
-#define PUD_SHIFT PGDIR_SHIFT
-#define PTRS_PER_PUD 1
-#define PUD_SIZE (1UL << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pud is never bad, and a pud always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline void pgd_clear(pgd_t *pgd) { }
-#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
-
-#define pgd_populate(mm, pgd, pud) do { } while (0)
-#define pgd_populate_safe(mm, pgd, pud) do { } while (0)
-/*
- * (puds are folded into pgds so this doesn't get actually called,
- * but the define is needed for a generic inline function.)
- */
-#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
-
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
-{
- return (pud_t *)pgd;
-}
-
-#define pud_val(x) (pgd_val((x).pgd))
-#define __pud(x) ((pud_t) { __pgd(x) })
-
-#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
-#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
-
-/*
- * allocating and freeing a pud is trivial: the 1-entry pud is
- * inside the pgd, so has no extra memory associated with it.
- */
-#define pud_alloc_one(mm, address) NULL
-#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x, a) do { } while (0)
-
-#undef pud_addr_end
-#define pud_addr_end(addr, end) (end)
-
-#endif /* __ASSEMBLY__ */
-#endif /* _PGTABLE_NOP4D_HACK_H */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index ce2cbb3c380f..03b7dae47dd4 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -9,7 +9,6 @@
typedef struct { pgd_t pgd; } p4d_t;
#define P4D_SHIFT PGDIR_SHIFT
-#define MAX_PTRS_PER_P4D 1
#define PTRS_PER_P4D 1
#define P4D_SIZE (1UL << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE-1))
@@ -42,7 +41,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
#define __p4d(x) ((p4d_t) { __pgd(x) })
#define pgd_page(pgd) (p4d_page((p4d_t){ pgd }))
-#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd }))
+#define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd })))
/*
* allocating and freeing a p4d is trivial: the 1-entry p4d is
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 0d9b28cba16d..8ffd64e7a24c 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -30,6 +30,8 @@ typedef struct { pud_t pud; } pmd_t;
static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
+static inline int pud_user(pud_t pud) { return 0; }
+static inline int pud_leaf(pud_t pud) { return 0; }
static inline void pud_clear(pud_t *pud) { }
#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
@@ -45,12 +47,13 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
{
return (pmd_t *)pud;
}
+#define pmd_offset pmd_offset
#define pmd_val(x) (pud_val((x).pud))
#define __pmd(x) ((pmd_t) { __pud(x) } )
#define pud_page(pud) (pmd_page((pmd_t){ pud }))
-#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
+#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud })))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index d3776cb494c0..eb70c6d7ceff 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -4,9 +4,6 @@
#ifndef __ASSEMBLY__
-#ifdef __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nop4d-hack.h>
-#else
#include <asm-generic/pgtable-nop4d.h>
#define __PAGETABLE_PUD_FOLDED 1
@@ -46,12 +43,13 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
return (pud_t *)p4d;
}
+#define pud_offset pud_offset
#define pud_val(x) (p4d_val((x).p4d))
#define __pud(x) ((pud_t) { __p4d(x) })
#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
-#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d }))
+#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d })))
/*
* allocating and freeing a pud is trivial: the 1-entry pud is
@@ -65,5 +63,4 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
#define pud_addr_end(addr, end) (end)
#endif /* __ASSEMBLY__ */
-#endif /* !__ARCH_USE_5LEVEL_HACK */
#endif /* _PGTABLE_NOPUD_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
deleted file mode 100644
index e2e2bef07dd2..000000000000
--- a/include/asm-generic/pgtable.h
+++ /dev/null
@@ -1,1261 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_PGTABLE_H
-#define _ASM_GENERIC_PGTABLE_H
-
-#include <linux/pfn.h>
-
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_MMU
-
-#include <linux/mm_types.h>
-#include <linux/bug.h>
-#include <linux/errno.h>
-
-#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
- defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
-#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
-#endif
-
-/*
- * On almost all architectures and configurations, 0 can be used as the
- * upper ceiling to free_pgtables(): on many architectures it has the same
- * effect as using TASK_SIZE. However, there is one configuration which
- * must impose a more careful limit, to avoid freeing kernel pgtables.
- */
-#ifndef USER_PGTABLES_CEILING
-#define USER_PGTABLES_CEILING 0UL
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry, int dirty);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern int pmdp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp,
- pmd_t entry, int dirty);
-extern int pudp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pud_t *pudp,
- pud_t entry, int dirty);
-#else
-static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp,
- pmd_t entry, int dirty)
-{
- BUILD_BUG();
- return 0;
-}
-static inline int pudp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pud_t *pudp,
- pud_t entry, int dirty)
-{
- BUILD_BUG();
- return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
-{
- pte_t pte = *ptep;
- int r = 1;
- if (!pte_young(pte))
- r = 0;
- else
- set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
- return r;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- pmd_t pmd = *pmdp;
- int r = 1;
- if (!pmd_young(pmd))
- r = 0;
- else
- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
- return r;
-}
-#else
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- BUILD_BUG();
- return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-int ptep_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
-#else
-/*
- * Despite relevant to THP only, this API is called from generic rmap code
- * under PageTransHuge(), hence needs a dummy implementation for !THP
- */
-static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
-{
- BUILD_BUG();
- return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep)
-{
- pte_t pte = *ptep;
- pte_clear(mm, address, ptep);
- return pte;
-}
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
-static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmdp)
-{
- pmd_t pmd = *pmdp;
- pmd_clear(pmdp);
- return pmd;
-}
-#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
-#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
-static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pud_t *pudp)
-{
- pud_t pud = *pudp;
-
- pud_clear(pudp);
- return pud;
-}
-#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
-static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp,
- int full)
-{
- return pmdp_huge_get_and_clear(mm, address, pmdp);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
-static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
- unsigned long address, pud_t *pudp,
- int full)
-{
- return pudp_huge_get_and_clear(mm, address, pudp);
-}
-#endif
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
- unsigned long address, pte_t *ptep,
- int full)
-{
- pte_t pte;
- pte = ptep_get_and_clear(mm, address, ptep);
- return pte;
-}
-#endif
-
-/*
- * Some architectures may be able to avoid expensive synchronization
- * primitives when modifications are made to PTE's which are already
- * not present, or in the process of an address space destruction.
- */
-#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
-static inline void pte_clear_not_present_full(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep,
- int full)
-{
- pte_clear(mm, address, ptep);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
-extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp);
-extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
- unsigned long address,
- pud_t *pudp);
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
-struct mm_struct;
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
-{
- pte_t old_pte = *ptep;
- set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
-}
-#endif
-
-#ifndef pte_savedwrite
-#define pte_savedwrite pte_write
-#endif
-
-#ifndef pte_mk_savedwrite
-#define pte_mk_savedwrite pte_mkwrite
-#endif
-
-#ifndef pte_clear_savedwrite
-#define pte_clear_savedwrite pte_wrprotect
-#endif
-
-#ifndef pmd_savedwrite
-#define pmd_savedwrite pmd_write
-#endif
-
-#ifndef pmd_mk_savedwrite
-#define pmd_mk_savedwrite pmd_mkwrite
-#endif
-
-#ifndef pmd_clear_savedwrite
-#define pmd_clear_savedwrite pmd_wrprotect
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline void pmdp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
-{
- pmd_t old_pmd = *pmdp;
- set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
-}
-#else
-static inline void pmdp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
-{
- BUILD_BUG();
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static inline void pudp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pud_t *pudp)
-{
- pud_t old_pud = *pudp;
-
- set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
-}
-#else
-static inline void pudp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pud_t *pudp)
-{
- BUILD_BUG();
-}
-#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
-#endif
-
-#ifndef pmdp_collapse_flush
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
-#else
-static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- BUILD_BUG();
- return *pmdp;
-}
-#define pmdp_collapse_flush pmdp_collapse_flush
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pgtable);
-#endif
-
-#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * This is an implementation of pmdp_establish() that is only suitable for an
- * architecture that doesn't have hardware dirty/accessed bits. In this case we
- * can't race with CPU which sets these bits and non-atomic aproach is fine.
- */
-static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp, pmd_t pmd)
-{
- pmd_t old_pmd = *pmdp;
- set_pmd_at(vma->vm_mm, address, pmdp, pmd);
- return old_pmd;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_INVALIDATE
-extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp);
-#endif
-
-#ifndef __HAVE_ARCH_PTE_SAME
-static inline int pte_same(pte_t pte_a, pte_t pte_b)
-{
- return pte_val(pte_a) == pte_val(pte_b);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTE_UNUSED
-/*
- * Some architectures provide facilities to virtualization guests
- * so that they can flag allocated pages as unused. This allows the
- * host to transparently reclaim unused pages. This function returns
- * whether the pte's page is unused.
- */
-static inline int pte_unused(pte_t pte)
-{
- return 0;
-}
-#endif
-
-#ifndef pte_access_permitted
-#define pte_access_permitted(pte, write) \
- (pte_present(pte) && (!(write) || pte_write(pte)))
-#endif
-
-#ifndef pmd_access_permitted
-#define pmd_access_permitted(pmd, write) \
- (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
-#endif
-
-#ifndef pud_access_permitted
-#define pud_access_permitted(pud, write) \
- (pud_present(pud) && (!(write) || pud_write(pud)))
-#endif
-
-#ifndef p4d_access_permitted
-#define p4d_access_permitted(p4d, write) \
- (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
-#endif
-
-#ifndef pgd_access_permitted
-#define pgd_access_permitted(pgd, write) \
- (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
-#endif
-
-#ifndef __HAVE_ARCH_PMD_SAME
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
-{
- return pmd_val(pmd_a) == pmd_val(pmd_b);
-}
-
-static inline int pud_same(pud_t pud_a, pud_t pud_b)
-{
- return pud_val(pud_a) == pud_val(pud_b);
-}
-#endif
-
-#ifndef __HAVE_ARCH_P4D_SAME
-static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
-{
- return p4d_val(p4d_a) == p4d_val(p4d_b);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PGD_SAME
-static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
-{
- return pgd_val(pgd_a) == pgd_val(pgd_b);
-}
-#endif
-
-/*
- * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
- * TLB flush will be required as a result of the "set". For example, use
- * in scenarios where it is known ahead of time that the routine is
- * setting non-present entries, or re-setting an existing entry to the
- * same value. Otherwise, use the typical "set" helpers and flush the
- * TLB.
- */
-#define set_pte_safe(ptep, pte) \
-({ \
- WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
- set_pte(ptep, pte); \
-})
-
-#define set_pmd_safe(pmdp, pmd) \
-({ \
- WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
- set_pmd(pmdp, pmd); \
-})
-
-#define set_pud_safe(pudp, pud) \
-({ \
- WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
- set_pud(pudp, pud); \
-})
-
-#define set_p4d_safe(p4dp, p4d) \
-({ \
- WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
- set_p4d(p4dp, p4d); \
-})
-
-#define set_pgd_safe(pgdp, pgd) \
-({ \
- WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
- set_pgd(pgdp, pgd); \
-})
-
-#ifndef __HAVE_ARCH_DO_SWAP_PAGE
-/*
- * Some architectures support metadata associated with a page. When a
- * page is being swapped out, this metadata must be saved so it can be
- * restored when the page is swapped back in. SPARC M7 and newer
- * processors support an ADI (Application Data Integrity) tag for the
- * page as metadata for the page. arch_do_swap_page() can restore this
- * metadata when a page is swapped back in.
- */
-static inline void arch_do_swap_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr,
- pte_t pte, pte_t oldpte)
-{
-
-}
-#endif
-
-#ifndef __HAVE_ARCH_UNMAP_ONE
-/*
- * Some architectures support metadata associated with a page. When a
- * page is being swapped out, this metadata must be saved so it can be
- * restored when the page is swapped back in. SPARC M7 and newer
- * processors support an ADI (Application Data Integrity) tag for the
- * page as metadata for the page. arch_unmap_one() can save this
- * metadata on a swap-out of a page.
- */
-static inline int arch_unmap_one(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr,
- pte_t orig_pte)
-{
- return 0;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
-#endif
-
-#ifndef __HAVE_ARCH_MOVE_PTE
-#define move_pte(pte, prot, old_addr, new_addr) (pte)
-#endif
-
-#ifndef pte_accessible
-# define pte_accessible(mm, pte) ((void)(pte), 1)
-#endif
-
-#ifndef flush_tlb_fix_spurious_fault
-#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
-#endif
-
-#ifndef pgprot_noncached
-#define pgprot_noncached(prot) (prot)
-#endif
-
-#ifndef pgprot_writecombine
-#define pgprot_writecombine pgprot_noncached
-#endif
-
-#ifndef pgprot_writethrough
-#define pgprot_writethrough pgprot_noncached
-#endif
-
-#ifndef pgprot_device
-#define pgprot_device pgprot_noncached
-#endif
-
-#ifndef pgprot_modify
-#define pgprot_modify pgprot_modify
-static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
-{
- if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
- newprot = pgprot_noncached(newprot);
- if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
- newprot = pgprot_writecombine(newprot);
- if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
- newprot = pgprot_device(newprot);
- return newprot;
-}
-#endif
-
-/*
- * When walking page tables, get the address of the next boundary,
- * or the end address of the range if that comes earlier. Although no
- * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
- */
-
-#define pgd_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-
-#ifndef p4d_addr_end
-#define p4d_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-#ifndef pud_addr_end
-#define pud_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-#ifndef pmd_addr_end
-#define pmd_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-/*
- * When walking page tables, we usually want to skip any p?d_none entries;
- * and any p?d_bad entries - reporting the error before resetting to none.
- * Do the tests inline, but report and clear the bad entry in mm/memory.c.
- */
-void pgd_clear_bad(pgd_t *);
-
-#ifndef __PAGETABLE_P4D_FOLDED
-void p4d_clear_bad(p4d_t *);
-#else
-#define p4d_clear_bad(p4d) do { } while (0)
-#endif
-
-#ifndef __PAGETABLE_PUD_FOLDED
-void pud_clear_bad(pud_t *);
-#else
-#define pud_clear_bad(p4d) do { } while (0)
-#endif
-
-void pmd_clear_bad(pmd_t *);
-
-static inline int pgd_none_or_clear_bad(pgd_t *pgd)
-{
- if (pgd_none(*pgd))
- return 1;
- if (unlikely(pgd_bad(*pgd))) {
- pgd_clear_bad(pgd);
- return 1;
- }
- return 0;
-}
-
-static inline int p4d_none_or_clear_bad(p4d_t *p4d)
-{
- if (p4d_none(*p4d))
- return 1;
- if (unlikely(p4d_bad(*p4d))) {
- p4d_clear_bad(p4d);
- return 1;
- }
- return 0;
-}
-
-static inline int pud_none_or_clear_bad(pud_t *pud)
-{
- if (pud_none(*pud))
- return 1;
- if (unlikely(pud_bad(*pud))) {
- pud_clear_bad(pud);
- return 1;
- }
- return 0;
-}
-
-static inline int pmd_none_or_clear_bad(pmd_t *pmd)
-{
- if (pmd_none(*pmd))
- return 1;
- if (unlikely(pmd_bad(*pmd))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-
-static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep)
-{
- /*
- * Get the current pte state, but zero it out to make it
- * non-present, preventing the hardware from asynchronously
- * updating it.
- */
- return ptep_get_and_clear(vma->vm_mm, addr, ptep);
-}
-
-static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- /*
- * The pte is non-present, so there's no hardware state to
- * preserve.
- */
- set_pte_at(vma->vm_mm, addr, ptep, pte);
-}
-
-#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
-/*
- * Start a pte protection read-modify-write transaction, which
- * protects against asynchronous hardware modifications to the pte.
- * The intention is not to prevent the hardware from making pte
- * updates, but to prevent any updates it may make from being lost.
- *
- * This does not protect against other software modifications of the
- * pte; the appropriate pte lock must be held over the transation.
- *
- * Note that this interface is intended to be batchable, meaning that
- * ptep_modify_prot_commit may not actually update the pte, but merely
- * queue the update to be done at some later time. The update must be
- * actually committed before the pte lock is released, however.
- */
-static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep)
-{
- return __ptep_modify_prot_start(vma, addr, ptep);
-}
-
-/*
- * Commit an update to a pte, leaving any hardware-controlled bits in
- * the PTE unmodified.
- */
-static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep, pte_t old_pte, pte_t pte)
-{
- __ptep_modify_prot_commit(vma, addr, ptep, pte);
-}
-#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
-#endif /* CONFIG_MMU */
-
-/*
- * No-op macros that just return the current protection value. Defined here
- * because these macros can be used used even if CONFIG_MMU is not defined.
- */
-#ifndef pgprot_encrypted
-#define pgprot_encrypted(prot) (prot)
-#endif
-
-#ifndef pgprot_decrypted
-#define pgprot_decrypted(prot) (prot)
-#endif
-
-/*
- * A facility to provide lazy MMU batching. This allows PTE updates and
- * page invalidations to be delayed until a call to leave lazy MMU mode
- * is issued. Some architectures may benefit from doing this, and it is
- * beneficial for both shadow and direct mode hypervisors, which may batch
- * the PTE updates which happen during this window. Note that using this
- * interface requires that read hazards be removed from the code. A read
- * hazard could result in the direct mode hypervisor case, since the actual
- * write to the page tables may not yet have taken place, so reads though
- * a raw PTE pointer after it has been modified are not guaranteed to be
- * up to date. This mode can only be entered and left under the protection of
- * the page table locks for all page tables which may be modified. In the UP
- * case, this is required so that preemption is disabled, and in the SMP case,
- * it must synchronize the delayed page table writes properly on other CPUs.
- */
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define arch_enter_lazy_mmu_mode() do {} while (0)
-#define arch_leave_lazy_mmu_mode() do {} while (0)
-#define arch_flush_lazy_mmu_mode() do {} while (0)
-#endif
-
-/*
- * A facility to provide batching of the reload of page tables and
- * other process state with the actual context switch code for
- * paravirtualized guests. By convention, only one of the batched
- * update (lazy) modes (CPU, MMU) should be active at any given time,
- * entry should never be nested, and entry and exits should always be
- * paired. This is for sanity of maintaining and reasoning about the
- * kernel code. In this case, the exit (end of the context switch) is
- * in architecture-specific code, and so doesn't need a generic
- * definition.
- */
-#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
-#define arch_start_context_switch(prev) do {} while (0)
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
-#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
-static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline int pmd_swp_soft_dirty(pmd_t pmd)
-{
- return 0;
-}
-
-static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-#endif
-#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
-static inline int pte_soft_dirty(pte_t pte)
-{
- return 0;
-}
-
-static inline int pmd_soft_dirty(pmd_t pmd)
-{
- return 0;
-}
-
-static inline pte_t pte_mksoft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline pte_t pte_clear_soft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline int pte_swp_soft_dirty(pte_t pte)
-{
- return 0;
-}
-
-static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline int pmd_swp_soft_dirty(pmd_t pmd)
-{
- return 0;
-}
-
-static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-#endif
-
-#ifndef __HAVE_PFNMAP_TRACKING
-/*
- * Interfaces that can be used by architecture code to keep track of
- * memory type of pfn mappings specified by the remap_pfn_range,
- * vmf_insert_pfn.
- */
-
-/*
- * track_pfn_remap is called when a _new_ pfn mapping is being established
- * by remap_pfn_range() for physical range indicated by pfn and size.
- */
-static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size)
-{
- return 0;
-}
-
-/*
- * track_pfn_insert is called when a _new_ single pfn is established
- * by vmf_insert_pfn().
- */
-static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn)
-{
-}
-
-/*
- * track_pfn_copy is called when vma that is covering the pfnmap gets
- * copied through copy_page_range().
- */
-static inline int track_pfn_copy(struct vm_area_struct *vma)
-{
- return 0;
-}
-
-/*
- * untrack_pfn is called while unmapping a pfnmap for a region.
- * untrack can be called for a specific region indicated by pfn and size or
- * can be for the entire vma (in which case pfn, size are zero).
- */
-static inline void untrack_pfn(struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size)
-{
-}
-
-/*
- * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
- */
-static inline void untrack_pfn_moved(struct vm_area_struct *vma)
-{
-}
-#else
-extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size);
-extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn);
-extern int track_pfn_copy(struct vm_area_struct *vma);
-extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
- unsigned long size);
-extern void untrack_pfn_moved(struct vm_area_struct *vma);
-#endif
-
-#ifdef __HAVE_COLOR_ZERO_PAGE
-static inline int is_zero_pfn(unsigned long pfn)
-{
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
-}
-
-#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
-
-#else
-static inline int is_zero_pfn(unsigned long pfn)
-{
- extern unsigned long zero_pfn;
- return pfn == zero_pfn;
-}
-
-static inline unsigned long my_zero_pfn(unsigned long addr)
-{
- extern unsigned long zero_pfn;
- return zero_pfn;
-}
-#endif
-
-#ifdef CONFIG_MMU
-
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmd_trans_huge(pmd_t pmd)
-{
- return 0;
-}
-#ifndef pmd_write
-static inline int pmd_write(pmd_t pmd)
-{
- BUG();
- return 0;
-}
-#endif /* pmd_write */
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
- BUG();
- return 0;
-}
-#endif /* pud_write */
-
-#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
-{
- return 0;
-}
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
-
-#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
- (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
-static inline int pud_trans_huge(pud_t pud)
-{
- return 0;
-}
-#endif
-
-/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
-static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
-{
- pud_t pudval = READ_ONCE(*pud);
-
- if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
- return 1;
- if (unlikely(pud_bad(pudval))) {
- pud_clear_bad(pud);
- return 1;
- }
- return 0;
-}
-
-/* See pmd_trans_unstable for discussion. */
-static inline int pud_trans_unstable(pud_t *pud)
-{
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
- return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
-#else
- return 0;
-#endif
-}
-
-#ifndef pmd_read_atomic
-static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-{
- /*
- * Depend on compiler for an atomic pmd read. NOTE: this is
- * only going to work, if the pmdval_t isn't larger than
- * an unsigned long.
- */
- return *pmdp;
-}
-#endif
-
-#ifndef arch_needs_pgtable_deposit
-#define arch_needs_pgtable_deposit() (false)
-#endif
-/*
- * This function is meant to be used by sites walking pagetables with
- * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
- * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
- * into a null pmd and the transhuge page fault can convert a null pmd
- * into an hugepmd or into a regular pmd (if the hugepage allocation
- * fails). While holding the mmap_sem in read mode the pmd becomes
- * stable and stops changing under us only if it's not null and not a
- * transhuge pmd. When those races occurs and this function makes a
- * difference vs the standard pmd_none_or_clear_bad, the result is
- * undefined so behaving like if the pmd was none is safe (because it
- * can return none anyway). The compiler level barrier() is critically
- * important to compute the two checks atomically on the same pmdval.
- *
- * For 32bit kernels with a 64bit large pmd_t this automatically takes
- * care of reading the pmd atomically to avoid SMP race conditions
- * against pmd_populate() when the mmap_sem is hold for reading by the
- * caller (a special atomic read not done by "gcc" as in the generic
- * version above, is also needed when THP is disabled because the page
- * fault can populate the pmd from under us).
- */
-static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
-{
- pmd_t pmdval = pmd_read_atomic(pmd);
- /*
- * The barrier will stabilize the pmdval in a register or on
- * the stack so that it will stop changing under the code.
- *
- * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
- * pmd_read_atomic is allowed to return a not atomic pmdval
- * (for example pointing to an hugepage that has never been
- * mapped in the pmd). The below checks will only care about
- * the low part of the pmd with 32bit PAE x86 anyway, with the
- * exception of pmd_none(). So the important thing is that if
- * the low part of the pmd is found null, the high part will
- * be also null or the pmd_none() check below would be
- * confused.
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- barrier();
-#endif
- /*
- * !pmd_present() checks for pmd migration entries
- *
- * The complete check uses is_pmd_migration_entry() in linux/swapops.h
- * But using that requires moving current function and pmd_trans_unstable()
- * to linux/swapops.h to resovle dependency, which is too much code move.
- *
- * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
- * because !pmd_present() pages can only be under migration not swapped
- * out.
- *
- * pmd_none() is preseved for future condition checks on pmd migration
- * entries and not confusing with this function name, although it is
- * redundant with !pmd_present().
- */
- if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
- (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
- return 1;
- if (unlikely(pmd_bad(pmdval))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-
-/*
- * This is a noop if Transparent Hugepage Support is not built into
- * the kernel. Otherwise it is equivalent to
- * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
- * places that already verified the pmd is not none and they want to
- * walk ptes while holding the mmap sem in read mode (write mode don't
- * need this). If THP is not enabled, the pmd can't go away under the
- * code even if MADV_DONTNEED runs, but if THP is enabled we need to
- * run a pmd_trans_unstable before walking the ptes after
- * split_huge_pmd returns (because it may have run when the pmd become
- * null, but then a page fault can map in a THP and not a regular page).
- */
-static inline int pmd_trans_unstable(pmd_t *pmd)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- return pmd_none_or_trans_huge_or_clear_bad(pmd);
-#else
- return 0;
-#endif
-}
-
-#ifndef CONFIG_NUMA_BALANCING
-/*
- * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
- * the only case the kernel cares is for NUMA balancing and is only ever set
- * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
- * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
- * is the responsibility of the caller to distinguish between PROT_NONE
- * protections and NUMA hinting fault protections.
- */
-static inline int pte_protnone(pte_t pte)
-{
- return 0;
-}
-
-static inline int pmd_protnone(pmd_t pmd)
-{
- return 0;
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
-#endif /* CONFIG_MMU */
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-
-#ifndef __PAGETABLE_P4D_FOLDED
-int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
-int p4d_clear_huge(p4d_t *p4d);
-#else
-static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
-#endif /* !__PAGETABLE_P4D_FOLDED */
-
-int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
-int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
-int pud_clear_huge(pud_t *pud);
-int pmd_clear_huge(pmd_t *pmd);
-int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
-int pud_free_pmd_page(pud_t *pud, unsigned long addr);
-int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
-#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
-static inline int pud_clear_huge(pud_t *pud)
-{
- return 0;
-}
-static inline int pmd_clear_huge(pmd_t *pmd)
-{
- return 0;
-}
-static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
-{
- return 0;
-}
-static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
-{
- return 0;
-}
-static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
-#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * ARCHes with special requirements for evicting THP backing TLB entries can
- * implement this. Otherwise also, it can help optimize normal TLB flush in
- * THP regime. stock flush_tlb_range() typically has optimization to nuke the
- * entire TLB TLB if flush span is greater than a threshold, which will
- * likely be true for a single huge page. Thus a single thp flush will
- * invalidate the entire TLB which is not desitable.
- * e.g. see arch/arc: flush_pmd_tlb_range
- */
-#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
-#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
-#else
-#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
-#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
-#endif
-#endif
-
-struct file;
-int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t *vma_prot);
-
-#ifndef CONFIG_X86_ESPFIX64
-static inline void init_espfix_bsp(void) { }
-#endif
-
-extern void __init pgtable_cache_init(void);
-
-#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
-static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
-{
- return true;
-}
-
-static inline bool arch_has_pfn_modify_check(void)
-{
- return false;
-}
-#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
-
-/*
- * Architecture PAGE_KERNEL_* fallbacks
- *
- * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
- * because they really don't support them, or the port needs to be updated to
- * reflect the required functionality. Below are a set of relatively safe
- * fallbacks, as best effort, which we can count on in lieu of the architectures
- * not defining them on their own yet.
- */
-
-#ifndef PAGE_KERNEL_RO
-# define PAGE_KERNEL_RO PAGE_KERNEL
-#endif
-
-#ifndef PAGE_KERNEL_EXEC
-# define PAGE_KERNEL_EXEC PAGE_KERNEL
-#endif
-
-#endif /* !__ASSEMBLY__ */
-
-#ifndef io_remap_pfn_range
-#define io_remap_pfn_range remap_pfn_range
-#endif
-
-#ifndef has_transparent_hugepage
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define has_transparent_hugepage() 1
-#else
-#define has_transparent_hugepage() 0
-#endif
-#endif
-
-/*
- * On some architectures it depends on the mm if the p4d/pud or pmd
- * layer of the page table hierarchy is folded or not.
- */
-#ifndef mm_p4d_folded
-#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
-#endif
-
-#ifndef mm_pud_folded
-#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
-#endif
-
-#ifndef mm_pmd_folded
-#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
-#endif
-
-/*
- * p?d_leaf() - true if this entry is a final mapping to a physical address.
- * This differs from p?d_huge() by the fact that they are always available (if
- * the architecture supports large pages at the appropriate level) even
- * if CONFIG_HUGETLB_PAGE is not defined.
- * Only meaningful when called on a valid entry.
- */
-#ifndef pgd_leaf
-#define pgd_leaf(x) 0
-#endif
-#ifndef p4d_leaf
-#define p4d_leaf(x) 0
-#endif
-#ifndef pud_leaf
-#define pud_leaf(x) 0
-#endif
-#ifndef pmd_leaf
-#define pmd_leaf(x) 0
-#endif
-
-#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/pgtable_uffd.h b/include/asm-generic/pgtable_uffd.h
new file mode 100644
index 000000000000..828966d4c281
--- /dev/null
+++ b/include/asm-generic/pgtable_uffd.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_GENERIC_PGTABLE_UFFD_H
+#define _ASM_GENERIC_PGTABLE_UFFD_H
+
+#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+static __always_inline int pte_uffd_wp(pte_t pte)
+{
+ return 0;
+}
+
+static __always_inline int pmd_uffd_wp(pmd_t pmd)
+{
+ return 0;
+}
+
+static __always_inline pte_t pte_mkuffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static __always_inline pte_t pte_clear_uffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static __always_inline pte_t pte_swp_mkuffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline int pte_swp_uffd_wp(pte_t pte)
+{
+ return 0;
+}
+
+static __always_inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_uffd_wp(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
+
+#endif /* _ASM_GENERIC_PGTABLE_UFFD_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index d683f5e6d791..b4d43a4af5f7 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc)
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
static __always_inline void set_preempt_need_resched(void)
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 3aefde23dcea..75b8f4601b28 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -2,6 +2,10 @@
/*
* Queue read/write lock
*
+ * These use generic atomic and locking routines, but depend on a fair spinlock
+ * implementation in order to be fair themselves. The implementation in
+ * asm-generic/spinlock.h meets these requirements.
+ *
* (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
*
* Authors: Waiman Long <waiman.long@hp.com>
@@ -15,6 +19,8 @@
#include <asm-generic/qrwlock_types.h>
+/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */
+
/*
* Writer states & reader shift and bias.
*/
@@ -31,13 +37,13 @@ extern void queued_read_lock_slowpath(struct qrwlock *lock);
extern void queued_write_lock_slowpath(struct qrwlock *lock);
/**
- * queued_read_trylock - try to acquire read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_trylock - try to acquire read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static inline int queued_read_trylock(struct qrwlock *lock)
{
- u32 cnts;
+ int cnts;
cnts = atomic_read(&lock->cnts);
if (likely(!(cnts & _QW_WMASK))) {
@@ -50,13 +56,13 @@ static inline int queued_read_trylock(struct qrwlock *lock)
}
/**
- * queued_write_trylock - try to acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_trylock - try to acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static inline int queued_write_trylock(struct qrwlock *lock)
{
- u32 cnts;
+ int cnts;
cnts = atomic_read(&lock->cnts);
if (unlikely(cnts))
@@ -66,12 +72,12 @@ static inline int queued_write_trylock(struct qrwlock *lock)
_QW_LOCKED));
}
/**
- * queued_read_lock - acquire read lock of a queue rwlock
- * @lock: Pointer to queue rwlock structure
+ * queued_read_lock - acquire read lock of a queued rwlock
+ * @lock: Pointer to queued rwlock structure
*/
static inline void queued_read_lock(struct qrwlock *lock)
{
- u32 cnts;
+ int cnts;
cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
@@ -82,12 +88,12 @@ static inline void queued_read_lock(struct qrwlock *lock)
}
/**
- * queued_write_lock - acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_lock - acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_write_lock(struct qrwlock *lock)
{
- u32 cnts = 0;
+ int cnts = 0;
/* Optimize for the unfair lock case where the fair flag is 0. */
if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
return;
@@ -96,8 +102,8 @@ static inline void queued_write_lock(struct qrwlock *lock)
}
/**
- * queued_read_unlock - release read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_unlock - release read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_read_unlock(struct qrwlock *lock)
{
@@ -108,23 +114,34 @@ static inline void queued_read_unlock(struct qrwlock *lock)
}
/**
- * queued_write_unlock - release write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_unlock - release write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_write_unlock(struct qrwlock *lock)
{
smp_store_release(&lock->wlocked, 0);
}
+/**
+ * queued_rwlock_is_contended - check if the lock is contended
+ * @lock : Pointer to queued rwlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static inline int queued_rwlock_is_contended(struct qrwlock *lock)
+{
+ return arch_spin_is_locked(&lock->wait_lock);
+}
+
/*
* Remapping rwlock architecture specific functions to the corresponding
- * queue rwlock functions.
+ * queued rwlock functions.
*/
-#define arch_read_lock(l) queued_read_lock(l)
-#define arch_write_lock(l) queued_write_lock(l)
-#define arch_read_trylock(l) queued_read_trylock(l)
-#define arch_write_trylock(l) queued_write_trylock(l)
-#define arch_read_unlock(l) queued_read_unlock(l)
-#define arch_write_unlock(l) queued_write_unlock(l)
+#define arch_read_lock(l) queued_read_lock(l)
+#define arch_write_lock(l) queued_write_lock(l)
+#define arch_read_trylock(l) queued_read_trylock(l)
+#define arch_write_trylock(l) queued_write_trylock(l)
+#define arch_read_unlock(l) queued_read_unlock(l)
+#define arch_write_unlock(l) queued_write_unlock(l)
+#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l)
#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index c36f1d5a2572..12392c14c4d0 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -7,7 +7,7 @@
#include <asm/spinlock_types.h>
/*
- * The queue read/write lock data structure
+ * The queued read/write lock data structure
*/
typedef struct qrwlock {
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index fde943d180e0..995513fa2690 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -2,6 +2,35 @@
/*
* Queued spinlock
*
+ * A 'generic' spinlock implementation that is based on MCS locks. For an
+ * architecture that's looking for a 'generic' spinlock, please first consider
+ * ticket-lock.h and only come looking here when you've considered all the
+ * constraints below and can show your hardware does actually perform better
+ * with qspinlock.
+ *
+ * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
+ * weaker than RCtso if you're power), where regular code only expects atomic_t
+ * to be RCpc.
+ *
+ * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
+ * of atomic operations to behave well together, please audit them carefully to
+ * ensure they all have forward progress. Many atomic operations may default to
+ * cmpxchg() loops which will not have good forward progress properties on
+ * LL/SC architectures.
+ *
+ * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
+ * do. Carefully read the patches that introduced
+ * queued_fetch_set_pending_acquire().
+ *
+ * qspinlock also heavily relies on mixed size atomic operations, in specific
+ * it requires architectures to have xchg16; something which many LL/SC
+ * architectures need to implement as a 32bit and+or in order to satisfy the
+ * forward progress guarantees mentioned above.
+ *
+ * Further reading on mixed size atomics that might be relevant:
+ *
+ * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
+ *
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
*
@@ -11,7 +40,9 @@
#define __ASM_GENERIC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
+#include <linux/atomic.h>
+#ifndef queued_spin_is_locked
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
@@ -25,6 +56,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
*/
return atomic_read(&lock->val);
}
+#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -57,7 +89,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
*/
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
- u32 val = atomic_read(&lock->val);
+ int val = atomic_read(&lock->val);
if (unlikely(val))
return 0;
@@ -67,19 +99,21 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#ifndef queued_spin_lock
/**
* queued_spin_lock - acquire a queued spinlock
* @lock: Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
- u32 val = 0;
+ int val = 0;
if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
return;
queued_spin_lock_slowpath(lock, val);
}
+#endif
#ifndef queued_spin_unlock
/**
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 56d1309d32f8..2fd1fb89ec36 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -9,15 +9,7 @@
#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
#define __ASM_GENERIC_QSPINLOCK_TYPES_H
-/*
- * Including atomic.h with PARAVIRT on will cause compilation errors because
- * of recursive header file incluson via paravirt_types.h. So don't include
- * it if PARAVIRT is on.
- */
-#ifndef CONFIG_PARAVIRT
#include <linux/types.h>
-#include <linux/atomic.h>
-#endif
typedef struct qspinlock {
union {
diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h
new file mode 100644
index 000000000000..8d0a6280e982
--- /dev/null
+++ b/include/asm-generic/rwonce.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
+ *
+ * These two macros will also work on aggregate data types like structs or
+ * unions.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+#ifndef __ASM_GENERIC_RWONCE_H
+#define __ASM_GENERIC_RWONCE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler_types.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+
+/*
+ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
+ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
+ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
+ * (e.g. a virtual address) and a strong prevailing wind.
+ */
+#define compiletime_assert_rwonce_type(t) \
+ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
+ "Unsupported access size for {READ,WRITE}_ONCE().")
+
+/*
+ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
+ * atomicity. Note that this may result in tears!
+ */
+#ifndef __READ_ONCE
+#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
+#endif
+
+#define READ_ONCE(x) \
+({ \
+ compiletime_assert_rwonce_type(x); \
+ __READ_ONCE(x); \
+})
+
+#define __WRITE_ONCE(x, val) \
+do { \
+ *(volatile typeof(x) *)&(x) = (val); \
+} while (0)
+
+#define WRITE_ONCE(x, val) \
+do { \
+ compiletime_assert_rwonce_type(x); \
+ __WRITE_ONCE(x, val); \
+} while (0)
+
+static __no_sanitize_or_inline
+unsigned long __read_once_word_nocheck(const void *addr)
+{
+ return __READ_ONCE(*(unsigned long *)addr);
+}
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
+ * word from memory atomically but without telling KASAN/KCSAN. This is
+ * usually used by unwinding code when walking the stack of a running process.
+ */
+#define READ_ONCE_NOCHECK(x) \
+({ \
+ compiletime_assert(sizeof(x) == sizeof(unsigned long), \
+ "Unsupported access size for READ_ONCE_NOCHECK()."); \
+ (typeof(x))__read_once_word_nocheck(&(x)); \
+})
+
+static __no_kasan_or_inline
+unsigned long read_word_at_a_time(const void *addr)
+{
+ kasan_check_read(addr, 1);
+ return *(unsigned long *)addr;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_GENERIC_RWONCE_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index 1321ac7821d7..6b6f42bc58f9 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -33,7 +33,7 @@ static inline const int *get_compat_mode1_syscalls(void)
static const int mode1_syscalls_32[] = {
__NR_seccomp_read_32, __NR_seccomp_write_32,
__NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
- 0, /* null terminated */
+ -1, /* negative terminated */
};
return mode1_syscalls_32;
}
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d1779d442aa5..db13bb620f52 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -53,43 +53,29 @@ extern char __ctors_start[], __ctors_end[];
/* Start and end of .opd section - used for function descriptors. */
extern char __start_opd[], __end_opd[];
+/* Start and end of instrumentation protected text section */
+extern char __noinstr_text_start[], __noinstr_text_end[];
+
extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */
-#ifndef dereference_function_descriptor
-#define dereference_function_descriptor(p) (p)
-#define dereference_kernel_function_descriptor(p) (p)
-#endif
-
-/* random extra sections (if any). Override
- * in asm/sections.h */
-#ifndef arch_is_kernel_text
-static inline int arch_is_kernel_text(unsigned long addr)
-{
- return 0;
-}
-#endif
-
-#ifndef arch_is_kernel_data
-static inline int arch_is_kernel_data(unsigned long addr)
-{
- return 0;
-}
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
+void *dereference_function_descriptor(void *ptr);
+void *dereference_kernel_function_descriptor(void *ptr);
+#else
+#define dereference_function_descriptor(p) ((void *)(p))
+#define dereference_kernel_function_descriptor(p) ((void *)(p))
+
+/* An address is simply the address of the function. */
+typedef struct {
+ unsigned long addr;
+} func_desc_t;
#endif
-/*
- * Check if an address is part of freed initmem. This is needed on architectures
- * with virt == phys kernel mapping, for code that wants to check if an address
- * is part of a static object within [_stext, _end]. After initmem is freed,
- * memory can be allocated from it, and such allocations would then have
- * addresses within the range [_stext, _end].
- */
-#ifndef arch_is_kernel_initmem_freed
-static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+static inline bool have_function_descriptors(void)
{
- return 0;
+ return IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS);
}
-#endif
/**
* memory_contains - checks if an object is contained within a memory region
@@ -111,7 +97,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
/**
* memory_intersects - checks if the region occupied by an object intersects
* with another memory region
- * @begin: virtual address of the beginning of the memory regien
+ * @begin: virtual address of the beginning of the memory region
* @end: virtual address of the end of the memory region
* @virt: virtual address of the memory object
* @size: size of the memory object
@@ -124,7 +110,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
{
void *vend = virt + size;
- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
+ if (virt < end && vend > begin)
+ return true;
+
+ return false;
}
/**
@@ -156,6 +145,28 @@ static inline bool init_section_intersects(void *virt, size_t size)
}
/**
+ * is_kernel_core_data - checks if the pointer address is located in the
+ * .data or .bss section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .data or .bss, false otherwise.
+ * Note: On some archs it may return true for core RODATA, and false
+ * for others. But will always be true for core RW data.
+ */
+static inline bool is_kernel_core_data(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
+ return true;
+
+ if (addr >= (unsigned long)__bss_start &&
+ addr < (unsigned long)__bss_stop)
+ return true;
+
+ return false;
+}
+
+/**
* is_kernel_rodata - checks if the pointer address is located in the
* .rodata section
*
@@ -169,4 +180,51 @@ static inline bool is_kernel_rodata(unsigned long addr)
addr < (unsigned long)__end_rodata;
}
+/**
+ * is_kernel_inittext - checks if the pointer address is located in the
+ * .init.text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .init.text, false otherwise.
+ */
+static inline bool is_kernel_inittext(unsigned long addr)
+{
+ return addr >= (unsigned long)_sinittext &&
+ addr < (unsigned long)_einittext;
+}
+
+/**
+ * __is_kernel_text - checks if the pointer address is located in the
+ * .text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .text, false otherwise.
+ * Note: an internal helper, only check the range of _stext to _etext.
+ */
+static inline bool __is_kernel_text(unsigned long addr)
+{
+ return addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_etext;
+}
+
+/**
+ * __is_kernel - checks if the pointer address is located in the kernel range
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in the kernel range, false otherwise.
+ * Note: an internal helper, check the range of _stext to _end,
+ * and range from __init_begin to __init_end, which can be outside
+ * of the _stext to _end range.
+ */
+static inline bool __is_kernel(unsigned long addr)
+{
+ return ((addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_end) ||
+ (addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end));
+}
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
index c53984fa9761..663dd6d0795d 100644
--- a/include/asm-generic/signal.h
+++ b/include/asm-generic/signal.h
@@ -5,8 +5,6 @@
#include <uapi/asm-generic/signal.h>
#ifndef __ASSEMBLY__
-#ifdef SA_RESTORER
-#endif
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h
new file mode 100644
index 000000000000..2a67aed9ac52
--- /dev/null
+++ b/include/asm-generic/softirq_stack.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
+#define __ASM_GENERIC_SOFTIRQ_STACK_H
+
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+void do_softirq_own_stack(void);
+#else
+static inline void do_softirq_own_stack(void)
+{
+ __do_softirq();
+}
+#endif
+
+#endif
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index adaf6acab172..fdfebcb050f4 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -1,12 +1,92 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_SPINLOCK_H
-#define __ASM_GENERIC_SPINLOCK_H
+
/*
- * You need to implement asm/spinlock.h for SMP support. The generic
- * version does not handle SMP.
+ * 'Generic' ticket-lock implementation.
+ *
+ * It relies on atomic_fetch_add() having well defined forward progress
+ * guarantees under contention. If your architecture cannot provide this, stick
+ * to a test-and-set lock.
+ *
+ * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
+ * sub-word of the value. This is generally true for anything LL/SC although
+ * you'd be hard pressed to find anything useful in architecture specifications
+ * about this. If your architecture cannot do this you might be better off with
+ * a test-and-set.
+ *
+ * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
+ * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
+ * a full fence after the spin to upgrade the otherwise-RCpc
+ * atomic_cond_read_acquire().
+ *
+ * The implementation uses smp_cond_load_acquire() to spin, so if the
+ * architecture has WFE like instructions to sleep instead of poll for word
+ * modifications be sure to implement that (see ARM64 for example).
+ *
*/
-#ifdef CONFIG_SMP
-#error need an architecture specific asm/spinlock.h
-#endif
+
+#ifndef __ASM_GENERIC_SPINLOCK_H
+#define __ASM_GENERIC_SPINLOCK_H
+
+#include <linux/atomic.h>
+#include <asm-generic/spinlock_types.h>
+
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ u32 val = atomic_fetch_add(1<<16, lock);
+ u16 ticket = val >> 16;
+
+ if (ticket == (u16)val)
+ return;
+
+ /*
+ * atomic_cond_read_acquire() is RCpc, but rather than defining a
+ * custom cond_read_rcsc() here we just emit a full fence. We only
+ * need the prior reads before subsequent writes ordering from
+ * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
+ * have no outstanding writes due to the atomic_fetch_add() the extra
+ * orderings are free.
+ */
+ atomic_cond_read_acquire(lock, ticket == (u16)VAL);
+ smp_mb();
+}
+
+static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 old = atomic_read(lock);
+
+ if ((old >> 16) != (old & 0xffff))
+ return false;
+
+ return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */
+}
+
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ u32 val = atomic_read(lock);
+
+ smp_store_release(ptr, (u16)val + 1);
+}
+
+static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ u32 val = atomic_read(lock);
+
+ return ((val >> 16) != (val & 0xffff));
+}
+
+static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ u32 val = atomic_read(lock);
+
+ return (s16)((val >> 16) - (val & 0xffff)) > 1;
+}
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return !arch_spin_is_locked(&lock);
+}
+
+#include <asm/qrwlock.h>
#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h
new file mode 100644
index 000000000000..8962bb730945
--- /dev/null
+++ b/include/asm-generic/spinlock_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_SPINLOCK_TYPES_H
+#define __ASM_GENERIC_SPINLOCK_TYPES_H
+
+#include <linux/types.h>
+typedef atomic_t arch_spinlock_t;
+
+/*
+ * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the
+ * include.
+ */
+#include <asm/qrwlock_types.h>
+
+#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0)
+
+#endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index f3135e734387..5a80fe728dc8 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -43,9 +43,9 @@ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
* @regs: task_pt_regs() of @task
*
* It's only valid to call this when @task is stopped for system
- * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT),
- * after tracehook_report_syscall_entry() returned nonzero to prevent
- * the system call from taking place.
+ * call exit tracing (due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT), after ptrace_report_syscall_entry()
+ * returned nonzero to prevent the system call from taking place.
*
* This rolls back the register state in @regs so it's as if the
* system call instruction was a no-op. The registers containing
@@ -63,7 +63,8 @@ void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
* Returns 0 if the system call succeeded, or -ERRORCODE if it failed.
*
* It's only valid to call this when @task is stopped for tracing on exit
- * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
@@ -76,7 +77,8 @@ long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
* This value is meaningless if syscall_get_error() returned nonzero.
*
* It's only valid to call this when @task is stopped for tracing on exit
- * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
@@ -93,7 +95,8 @@ long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
* code; the user sees a failed system call with this errno code.
*
* It's only valid to call this when @task is stopped for tracing on exit
- * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
int error, long val);
@@ -108,34 +111,21 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
* @args[0], and so on.
*
* It's only valid to call this when @task is stopped for tracing on
- * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned long *args);
/**
- * syscall_set_arguments - change system call parameter value
- * @task: task of interest, must be in system call entry tracing
- * @regs: task_pt_regs() of @task
- * @args: array of argument values to store
- *
- * Changes 6 arguments to the system call.
- * The first argument gets value @args[0], and so on.
- *
- * It's only valid to call this when @task is stopped for tracing on
- * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- */
-void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- const unsigned long *args);
-
-/**
* syscall_get_arch - return the AUDIT_ARCH for the current system call
* @task: task of interest, must be blocked
*
* Returns the AUDIT_ARCH_* based on the system call convention in use.
*
* It's only valid to call this when @task is stopped on entry to a system
- * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
+ * call, due to %SYSCALL_WORK_SYSCALL_TRACE, %SYSCALL_WORK_SYSCALL_AUDIT, or
+ * %SYSCALL_WORK_SECCOMP.
*
* Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
* provide an implementation of this.
diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h
deleted file mode 100644
index 59c5a3bd4a6e..000000000000
--- a/include/asm-generic/termios-base.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* termios.h: generic termios/termio user copying/translation
- */
-
-#ifndef _ASM_GENERIC_TERMIOS_BASE_H
-#define _ASM_GENERIC_TERMIOS_BASE_H
-
-#include <linux/uaccess.h>
-
-#ifndef __ARCH_TERMIO_GETPUT
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifndef user_termios_to_kernel_termios
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#endif
-
-#ifndef kernel_termios_to_user_termios
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif
-
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* __ARCH_TERMIO_GETPUT */
-
-#endif /* _ASM_GENERIC_TERMIOS_BASE_H */
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
deleted file mode 100644
index b1398d0d4a1d..000000000000
--- a/include/asm-generic/termios.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_TERMIOS_H
-#define _ASM_GENERIC_TERMIOS_H
-
-
-#include <linux/uaccess.h>
-#include <uapi/asm-generic/termios.h>
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- const struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifdef TCGETS2
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios2 __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios2));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios2));
-}
-
-static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#else /* TCGETS2 */
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#endif /* TCGETS2 */
-
-#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f391f6b500b4..492dce43236e 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -13,7 +13,7 @@
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
-#include <asm/pgalloc.h>
+#include <linux/hugetlb_inline.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -46,7 +46,9 @@
*
* The mmu_gather API consists of:
*
- * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
+ * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
+ *
+ * start and finish a mmu_gather
*
* Finish in particular will issue a (final) TLB invalidate and free
* all (remaining) queued pages.
@@ -91,7 +93,7 @@
*
* - mmu_gather::fullmm
*
- * A flag set by tlb_gather_mmu() to indicate we're going to free
+ * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
* the entire mm; this allows a number of optimizations.
*
* - We can ignore tlb_{start,end}_vma(); because we don't
@@ -156,9 +158,24 @@
* Useful if your architecture doesn't use IPIs for remote TLB invalidates
* and therefore doesn't naturally serialize with software page-table walkers.
*
+ * MMU_GATHER_NO_FLUSH_CACHE
+ *
+ * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
+ * before unmapping a VMA.
+ *
+ * NOTE: strictly speaking we shouldn't have this knob and instead rely on
+ * flush_cache_range() being a NOP, except Sparc64 seems to be
+ * different here.
+ *
+ * MMU_GATHER_MERGE_VMAS
+ *
+ * Indicates the architecture wants to merge ranges over VMAs; typical when
+ * multiple range invalidates are more expensive than a full invalidate.
+ *
* MMU_GATHER_NO_RANGE
*
- * Use this if your architecture lacks an efficient flush_tlb_range().
+ * Use this if your architecture lacks an efficient flush_tlb_range(). This
+ * option implies MMU_GATHER_MERGE_VMAS above.
*
* MMU_GATHER_NO_GATHER
*
@@ -178,7 +195,7 @@ struct mmu_table_batch {
struct rcu_head rcu;
#endif
unsigned int nr;
- void *tables[0];
+ void *tables[];
};
#define MAX_TABLE_BATCH \
@@ -225,7 +242,7 @@ struct mmu_gather_batch {
struct mmu_gather_batch *next;
unsigned int nr;
unsigned int max;
- struct page *pages[0];
+ struct page *pages[];
};
#define MAX_GATHER_BATCH \
@@ -286,6 +303,7 @@ struct mmu_gather {
*/
unsigned int vma_exec : 1;
unsigned int vma_huge : 1;
+ unsigned int vma_pfn : 1;
unsigned int batch_count;
@@ -332,8 +350,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#ifdef CONFIG_MMU_GATHER_NO_RANGE
-#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
-#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#if defined(tlb_flush)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
#endif
/*
@@ -350,20 +368,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
}
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#define tlb_end_vma tlb_end_vma
-static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
#else /* CONFIG_MMU_GATHER_NO_RANGE */
#ifndef tlb_flush
-
-#if defined(tlb_start_vma) || defined(tlb_end_vma)
-#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
-#endif
-
/*
* When an architecture does not provide its own tlb_flush() implementation
* but does have a reasonably efficient flush_vma_range() implementation
@@ -383,6 +390,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_range(&vma, tlb->start, tlb->end);
}
}
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
@@ -398,19 +408,11 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
* We rely on tlb_end_vma() to issue a flush, such that when we reset
* these values the batch is empty.
*/
- tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
+ tlb->vma_huge = is_vm_hugetlb_page(vma);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+ tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
}
-#else
-
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#endif
-
-#endif /* CONFIG_MMU_GATHER_NO_RANGE */
-
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
/*
@@ -484,32 +486,68 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
-#ifndef tlb_start_vma
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
tlb_update_vma_flags(tlb, vma);
+#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
flush_cache_range(vma, vma->vm_start, vma->vm_end);
-}
#endif
+}
-#ifndef tlb_end_vma
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
/*
- * Do a TLB flush and reset the range at VMA boundaries; this avoids
- * the ranges growing with the unused space between consecutive VMAs,
- * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
- * this.
+ * VM_PFNMAP is more fragile because the core mm will not track the
+ * page mapcount -- there might not be page-frames for these PFNs after
+ * all. Force flush TLBs for such ranges to avoid munmap() vs
+ * unmap_mapping_range() races.
*/
- tlb_flush_mmu_tlbonly(tlb);
+ if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
+ /*
+ * Do a TLB flush and reset the range at VMA boundaries; this avoids
+ * the ranges growing with the unused space between consecutive VMAs.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+ }
+}
+
+/*
+ * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
+ * and set corresponding cleared_*.
+ */
+static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_ptes = 1;
+}
+
+static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_pmds = 1;
+}
+
+static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_puds = 1;
+}
+
+static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_p4ds = 1;
}
-#endif
#ifndef __tlb_remove_tlb_entry
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
@@ -524,19 +562,21 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->cleared_ptes = 1; \
+ tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
- __tlb_adjust_range(tlb, address, _sz); \
- if (_sz == PMD_SIZE) \
- tlb->cleared_pmds = 1; \
- else if (_sz == PUD_SIZE) \
- tlb->cleared_puds = 1; \
+ if (_sz >= P4D_SIZE) \
+ tlb_flush_p4d_range(tlb, address, _sz); \
+ else if (_sz >= PUD_SIZE) \
+ tlb_flush_pud_range(tlb, address, _sz); \
+ else if (_sz >= PMD_SIZE) \
+ tlb_flush_pmd_range(tlb, address, _sz); \
+ else \
+ tlb_flush_pte_range(tlb, address, _sz); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -550,8 +590,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
- tlb->cleared_pmds = 1; \
+ tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
@@ -565,8 +604,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
- tlb->cleared_puds = 1; \
+ tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
@@ -591,9 +629,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_pmds = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
@@ -601,9 +638,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_puds = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
@@ -611,9 +647,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_p4ds = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
@@ -627,6 +662,20 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
} while (0)
#endif
+#ifndef pte_needs_flush
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return true;
+}
+#endif
+
+#ifndef huge_pmd_needs_flush
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return true;
+}
+#endif
+
#endif /* CONFIG_MMU */
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 238873739550..4dbe715be65b 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -45,10 +45,10 @@
#endif
#ifndef cpumask_of_node
- #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #ifdef CONFIG_NUMA
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else
- #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
#endif
#endif
#ifndef pcibus_to_node
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index e935318804f8..a5be9e61a2a2 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -8,100 +8,92 @@
* address space, e.g. all NOMMU machines.
*/
#include <linux/string.h>
+#include <asm-generic/access_ok.h>
#ifdef CONFIG_UACCESS_MEMCPY
-static inline __must_check unsigned long
-raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+#include <asm/unaligned.h>
+
+static __always_inline int
+__get_user_fn(size_t size, const void __user *from, void *to)
{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 *)to = *(u8 __force *)from;
- return 0;
- case 2:
- *(u16 *)to = *(u16 __force *)from;
- return 0;
- case 4:
- *(u32 *)to = *(u32 __force *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 *)to = *(u64 __force *)from;
- return 0;
-#endif
- }
+ BUILD_BUG_ON(!__builtin_constant_p(size));
+
+ switch (size) {
+ case 1:
+ *(u8 *)to = *((u8 __force *)from);
+ return 0;
+ case 2:
+ *(u16 *)to = get_unaligned((u16 __force *)from);
+ return 0;
+ case 4:
+ *(u32 *)to = get_unaligned((u32 __force *)from);
+ return 0;
+ case 8:
+ *(u64 *)to = get_unaligned((u64 __force *)from);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
}
- memcpy(to, (const void __force *)from, n);
- return 0;
}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
-static inline __must_check unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+static __always_inline int
+__put_user_fn(size_t size, void __user *to, void *from)
{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 __force *)to = *(u8 *)from;
- return 0;
- case 2:
- *(u16 __force *)to = *(u16 *)from;
- return 0;
- case 4:
- *(u32 __force *)to = *(u32 *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 __force *)to = *(u64 *)from;
- return 0;
-#endif
- default:
- break;
- }
- }
+ BUILD_BUG_ON(!__builtin_constant_p(size));
- memcpy((void __force *)to, from, n);
- return 0;
+ switch (size) {
+ case 1:
+ *(u8 __force *)to = *(u8 *)from;
+ return 0;
+ case 2:
+ put_unaligned(*(u16 *)from, (u16 __force *)to);
+ return 0;
+ case 4:
+ put_unaligned(*(u32 *)from, (u32 __force *)to);
+ return 0;
+ case 8:
+ put_unaligned(*(u64 *)from, (u64 __force *)to);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
+ }
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-#endif /* CONFIG_UACCESS_MEMCPY */
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ *((type *)dst) = get_unaligned((type *)(src)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
-#ifndef KERNEL_DS
-#define KERNEL_DS MAKE_MM_SEG(~0UL)
-#endif
-
-#ifndef USER_DS
-#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
-#endif
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ put_unaligned(*((type *)src), (type *)(dst)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
-#ifndef get_fs
-#define get_fs() (current_thread_info()->addr_limit)
-
-static inline void set_fs(mm_segment_t fs)
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
{
- current_thread_info()->addr_limit = fs;
+ memcpy(to, (const void __force *)from, n);
+ return 0;
}
-#endif
-
-#ifndef segment_eq
-#define segment_eq(a, b) ((a).seg == (b).seg)
-#endif
-
-#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
-/*
- * The architecture should really override this if possible, at least
- * doing a check on the get_fs()
- */
-#ifndef __access_ok
-static inline int __access_ok(unsigned long addr, unsigned long size)
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return 1;
+ memcpy((void __force *)to, from, n);
+ return 0;
}
-#endif
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+#endif /* CONFIG_UACCESS_MEMCPY */
/*
* These are the main single-value transfer routines. They automatically
@@ -213,50 +205,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
extern int __get_user_bad(void) __attribute__((noreturn));
/*
- * Copy a null terminated string from userspace.
- */
-#ifndef __strncpy_from_user
-static inline long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
- char *tmp;
- strncpy(dst, (const char __force *)src, count);
- for (tmp = dst; *tmp && count > 0; tmp++, count--)
- ;
- return (tmp - dst);
-}
-#endif
-
-static inline long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- if (!access_ok(src, 1))
- return -EFAULT;
- return __strncpy_from_user(dst, src, count);
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-#ifndef __strnlen_user
-#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
-#endif
-
-/*
- * Unlike strnlen, strnlen_user includes the nul terminator in
- * its returned count. Callers should check for a returned value
- * greater than N as an indication the string is too long.
- */
-static inline long strnlen_user(const char __user *src, long n)
-{
- if (!access_ok(src, 1))
- return 0;
- return __strnlen_user(src, n);
-}
-
-/*
* Zero Userspace
*/
#ifndef __clear_user
@@ -280,4 +228,8 @@ clear_user(void __user *to, unsigned long n)
#include <asm/extable.h>
+__must_check long strncpy_from_user(char *dst, const char __user *src,
+ long count);
+__must_check long strnlen_user(const char __user *src, long n);
+
#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 374c940e9be1..699650f81970 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -6,31 +6,150 @@
* This is the most generic implementation of unaligned accesses
* and should work almost anywhere.
*/
+#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
-/* Set by the arch if it can handle unaligned accesses in hardware. */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/access_ok.h>
-#endif
-
-#if defined(__LITTLE_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return be64_to_cpu(__get_unaligned_t(__be64, p));
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_t(__be16, cpu_to_be16(val), p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_t(__be32, cpu_to_be32(val), p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_t(__be64, cpu_to_be64(val), p);
+}
+
+static inline u32 __get_unaligned_be24(const u8 *p)
+{
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+ return __get_unaligned_be24(p);
+}
+
+static inline u32 __get_unaligned_le24(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16;
+}
+
+static inline u32 get_unaligned_le24(const void *p)
+{
+ return __get_unaligned_le24(p);
+}
+
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
+{
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be24(const u32 val, void *p)
+{
+ __put_unaligned_be24(val, p);
+}
+
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline void put_unaligned_le24(const u32 val, void *p)
+{
+ __put_unaligned_le24(val, p);
+}
+
+static inline void __put_unaligned_be48(const u64 val, u8 *p)
+{
+ *p++ = val >> 40;
+ *p++ = val >> 32;
+ *p++ = val >> 24;
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be48(const u64 val, void *p)
+{
+ __put_unaligned_be48(val, p);
+}
+
+static inline u64 __get_unaligned_be48(const u8 *p)
+{
+ return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
+ p[3] << 16 | p[4] << 8 | p[5];
+}
+
+static inline u64 get_unaligned_be48(const void *p)
+{
+ return __get_unaligned_be48(p);
+}
#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index cec543d9e87b..c835607f78ae 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -11,20 +11,6 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
}
#endif /* __arch_get_k_vdso_data */
-#ifndef __arch_update_vdso_data
-static __always_inline bool __arch_update_vdso_data(void)
-{
- return true;
-}
-#endif /* __arch_update_vdso_data */
-
-#ifndef __arch_get_clock_mode
-static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
-{
- return 0;
-}
-#endif /* __arch_get_clock_mode */
-
#ifndef __arch_update_vsyscall
static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
struct timekeeper *tk)
diff --git a/include/asm-generic/vermagic.h b/include/asm-generic/vermagic.h
new file mode 100644
index 000000000000..084274a1219e
--- /dev/null
+++ b/include/asm-generic/vermagic.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_GENERIC_VERMAGIC_H
+#define _ASM_GENERIC_VERMAGIC_H
+
+#define MODULE_ARCH_VERMAGIC ""
+
+#endif /* _ASM_GENERIC_VERMAGIC_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index e00f41aa8ec4..3dc5824141cd 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -34,6 +34,7 @@
*
* STABS_DEBUG
* DWARF_DEBUG
+ * ELF_DETAILS
*
* DISCARDS // must be the last
* }
@@ -89,15 +90,18 @@
* .data. We don't want to pull in .data..other sections, which Linux
* has defined. Same for text and bss.
*
+ * With LTO_CLANG, the linker also splits sections by default, so we need
+ * these macros to combine the sections during the final link.
+ *
* RODATA_MAIN is not used because existing code already defines .rodata.x
* sections to be brought in with rodata.
*/
-#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
-#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
-#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
+#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
#else
#define TEXT_MAIN .text
@@ -109,12 +113,27 @@
#endif
/*
- * Align to a 32 byte boundary equal to the
- * alignment gcc 4.5 uses for a struct
+ * GCC 4.5 and later have a 32 bytes section alignment for structures.
+ * Except GCC 4.9, that feels the need to align on 64 bytes.
*/
#define STRUCT_ALIGNMENT 32
#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
+/*
+ * The order of the sched class addresses are important, as they are
+ * used to determine the order of the priority of each sched class in
+ * relation to each other.
+ */
+#define SCHED_DATA \
+ STRUCT_ALIGN(); \
+ __sched_class_highest = .; \
+ *(__stop_sched_class) \
+ *(__dl_sched_class) \
+ *(__rt_sched_class) \
+ *(__fair_sched_class) \
+ *(__idle_sched_class) \
+ __sched_class_lowest = .;
+
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
@@ -135,6 +154,24 @@
#define MEM_DISCARD(sec) *(.mem##sec)
#endif
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
+#define KEEP_PATCHABLE KEEP(*(__patchable_function_entries))
+#define PATCHABLE_DISCARDS
+#else
+#define KEEP_PATCHABLE
+#define PATCHABLE_DISCARDS *(__patchable_function_entries)
+#endif
+
+#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
+/*
+ * Simply points to ftrace_stub, but with the proper protocol.
+ * Defined by the linker script in linux/vmlinux.lds.h
+ */
+#define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub;
+#else
+#define FTRACE_STUB_HACK
+#endif
+
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/*
* The ftrace call sites are logged to a section whose name depends on the
@@ -142,19 +179,21 @@
* FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
* dependencies for FTRACE_CALLSITE_SECTION's definition.
*
- * Need to also make ftrace_stub_graph point to ftrace_stub
- * so that the same stub location may have different protocols
- * and not mess up with C verifiers.
+ * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
+ * as some archs will have a different prototype for that function
+ * but ftrace_ops_list_func() will have a single prototype.
*/
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
- KEEP(*(__patchable_function_entries)) \
+ KEEP_PATCHABLE \
__stop_mcount_loc = .; \
- ftrace_stub_graph = ftrace_stub;
+ FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
#else
# ifdef CONFIG_FUNCTION_TRACER
-# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub;
+# define MCOUNT_REC() FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
# else
# define MCOUNT_REC()
# endif
@@ -308,6 +347,7 @@
#define DATA_DATA \
*(.xiptext) \
*(DATA_MAIN) \
+ *(.data..decrypted) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data*) \
@@ -320,9 +360,12 @@
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
- __start___verbose = .; \
- KEEP(*(__verbose)) \
- __stop___verbose = .; \
+ __start___dyndbg_classes = .; \
+ KEEP(*(__dyndbg_classes)) \
+ __stop___dyndbg_classes = .; \
+ __start___dyndbg = .; \
+ KEEP(*(__dyndbg)) \
+ __stop___dyndbg = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
@@ -341,7 +384,8 @@
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
- *(.data..page_aligned)
+ *(.data..page_aligned) \
+ . = ALIGN(page_align);
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
@@ -368,19 +412,50 @@
KEEP(*(__jump_table)) \
__stop___jump_table = .;
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+#define STATIC_CALL_DATA \
+ . = ALIGN(8); \
+ __start_static_call_sites = .; \
+ KEEP(*(.static_call_sites)) \
+ __stop_static_call_sites = .; \
+ __start_static_call_tramp_key = .; \
+ KEEP(*(.static_call_tramp_key)) \
+ __stop_static_call_tramp_key = .;
+#else
+#define STATIC_CALL_DATA
+#endif
+
/*
* Allow architectures to handle ro_after_init data on their
* own by defining an empty RO_AFTER_INIT_DATA.
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
+ . = ALIGN(8); \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
JUMP_TABLE_DATA \
+ STATIC_CALL_DATA \
__end_ro_after_init = .;
#endif
/*
+ * .kcfi_traps contains a list KCFI trap locations.
+ */
+#ifndef KCFI_TRAPS
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+#define KCFI_TRAPS \
+ __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \
+ __start___kcfi_traps = .; \
+ KEEP(*(.kcfi_traps)) \
+ __stop___kcfi_traps = .; \
+ }
+#else
+#define KCFI_TRAPS
+#endif
+#endif
+
+/*
* Read only Data
*/
#define RO_DATA(align) \
@@ -388,6 +463,7 @@
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
*(.rodata) *(.rodata.*) \
+ SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
__start___tracepoints_ptrs = .; \
@@ -428,15 +504,11 @@
__end_pci_fixups_suspend_late = .; \
} \
\
- /* Built-in firmware blobs */ \
- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
- __start_builtin_fw = .; \
- KEEP(*(.builtin_fw)) \
- __end_builtin_fw = .; \
- } \
- \
+ FW_LOADER_BUILT_IN_DATA \
TRACEDATA \
\
+ PRINTK_INDEX \
+ \
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
__start___ksymtab = .; \
@@ -451,27 +523,6 @@
__stop___ksymtab_gpl = .; \
} \
\
- /* Kernel symbol table: Normal unused symbols */ \
- __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
- __start___ksymtab_unused = .; \
- KEEP(*(SORT(___ksymtab_unused+*))) \
- __stop___ksymtab_unused = .; \
- } \
- \
- /* Kernel symbol table: GPL-only unused symbols */ \
- __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
- __start___ksymtab_unused_gpl = .; \
- KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
- __stop___ksymtab_unused_gpl = .; \
- } \
- \
- /* Kernel symbol table: GPL-future-only symbols */ \
- __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
- __start___ksymtab_gpl_future = .; \
- KEEP(*(SORT(___ksymtab_gpl_future+*))) \
- __stop___ksymtab_gpl_future = .; \
- } \
- \
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
__start___kcrctab = .; \
@@ -486,27 +537,6 @@
__stop___kcrctab_gpl = .; \
} \
\
- /* Kernel symbol table: Normal unused symbols */ \
- __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
- __start___kcrctab_unused = .; \
- KEEP(*(SORT(___kcrctab_unused+*))) \
- __stop___kcrctab_unused = .; \
- } \
- \
- /* Kernel symbol table: GPL-only unused symbols */ \
- __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
- __start___kcrctab_unused_gpl = .; \
- KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
- __stop___kcrctab_unused_gpl = .; \
- } \
- \
- /* Kernel symbol table: GPL-future-only symbols */ \
- __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
- __start___kcrctab_gpl_future = .; \
- KEEP(*(SORT(___kcrctab_gpl_future+*))) \
- __stop___kcrctab_gpl_future = .; \
- } \
- \
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
*(__ksymtab_strings) \
@@ -533,12 +563,25 @@
__stop___modver = .; \
} \
\
+ KCFI_TRAPS \
+ \
RO_EXCEPTION_TABLE \
NOTES \
+ BTF \
\
. = ALIGN((align)); \
__end_rodata = .;
+
+/*
+ * Non-instrumentable text section
+ */
+#define NOINSTR_TEXT \
+ ALIGN_FUNCTION(); \
+ __noinstr_text_start = .; \
+ *(.noinstr.text) \
+ __noinstr_text_end = .;
+
/*
* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map
@@ -549,9 +592,14 @@
*/
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
+ *(.text.hot .text.hot.*) \
+ *(TEXT_MAIN .text.fixup) \
+ *(.text.unlikely .text.unlikely.*) \
+ *(.text.unknown .text.unknown.*) \
+ NOINSTR_TEXT \
*(.text..refcount) \
*(.ref.text) \
+ *(.text.asan.* .text.tsan.*) \
MEM_KEEP(init.text*) \
MEM_KEEP(exit.text*) \
@@ -602,6 +650,12 @@
*(.softirqentry.text) \
__softirqentry_text_end = .;
+#define STATIC_CALL_TEXT \
+ ALIGN_FUNCTION(); \
+ __static_call_text_start = .; \
+ *(.static_call.text) \
+ __static_call_text_end = .;
+
/* Section used for early init (in .S files) */
#define HEAD_TEXT KEEP(*(.head.text))
@@ -622,6 +676,24 @@
}
/*
+ * .BTF
+ */
+#ifdef CONFIG_DEBUG_INFO_BTF
+#define BTF \
+ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
+ __start_BTF = .; \
+ KEEP(*(.BTF)) \
+ __stop_BTF = .; \
+ } \
+ . = ALIGN(4); \
+ .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
+ *(.BTF_ids) \
+ }
+#else
+#define BTF
+#endif
+
+/*
* Init task
*/
#define INIT_TASK_DATA_SECTION(align) \
@@ -633,6 +705,7 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
__ctors_start = .; \
+ KEEP(*(SORT(.ctors.*))) \
KEEP(*(.ctors)) \
KEEP(*(SORT(.init_array.*))) \
KEEP(*(.init_array)) \
@@ -666,7 +739,8 @@
THERMAL_TABLE(governor) \
EARLYCON_TABLE() \
LSM_TABLE() \
- EARLY_LSM_TABLE()
+ EARLY_LSM_TABLE() \
+ KUNIT_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
@@ -712,7 +786,9 @@
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
BSS_FIRST_SECTIONS \
+ . = ALIGN(PAGE_SIZE); \
*(.bss..page_aligned) \
+ . = ALIGN(PAGE_SIZE); \
*(.dynbss) \
*(BSS_MAIN) \
*(COMMON) \
@@ -756,18 +832,29 @@
/* DWARF 4 */ \
.debug_types 0 : { *(.debug_types) } \
/* DWARF 5 */ \
+ .debug_addr 0 : { *(.debug_addr) } \
+ .debug_line_str 0 : { *(.debug_line_str) } \
+ .debug_loclists 0 : { *(.debug_loclists) } \
.debug_macro 0 : { *(.debug_macro) } \
- .debug_addr 0 : { *(.debug_addr) }
+ .debug_names 0 : { *(.debug_names) } \
+ .debug_rnglists 0 : { *(.debug_rnglists) } \
+ .debug_str_offsets 0 : { *(.debug_str_offsets) }
- /* Stabs debugging sections. */
+/* Stabs debugging sections. */
#define STABS_DEBUG \
.stab 0 : { *(.stab) } \
.stabstr 0 : { *(.stabstr) } \
.stab.excl 0 : { *(.stab.excl) } \
.stab.exclstr 0 : { *(.stab.exclstr) } \
.stab.index 0 : { *(.stab.index) } \
- .stab.indexstr 0 : { *(.stab.indexstr) } \
- .comment 0 : { *(.comment) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+
+/* Required sections not related to debugging. */
+#define ELF_DETAILS \
+ .comment 0 : { *(.comment) } \
+ .symtab 0 : { *(.symtab) } \
+ .strtab 0 : { *(.strtab) } \
+ .shstrtab 0 : { *(.shstrtab) }
#ifdef CONFIG_GENERIC_BUG
#define BUG_TABLE \
@@ -795,10 +882,11 @@
KEEP(*(.orc_unwind)) \
__stop_orc_unwind = .; \
} \
+ text_size = _etext - _stext; \
. = ALIGN(4); \
.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
orc_lookup = .; \
- . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
+ . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \
LOOKUP_BLOCK_SIZE) + 1) * 4; \
orc_lookup_end = .; \
}
@@ -806,6 +894,18 @@
#define ORC_UNWIND_TABLE
#endif
+/* Built-in firmware blobs */
+#ifdef CONFIG_FW_LOADER
+#define FW_LOADER_BUILT_IN_DATA \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
+ __start_builtin_fw = .; \
+ KEEP(*(.builtin_fw)) \
+ __end_builtin_fw = .; \
+ }
+#else
+#define FW_LOADER_BUILT_IN_DATA
+#endif
+
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
@@ -818,6 +918,17 @@
#define TRACEDATA
#endif
+#ifdef CONFIG_PRINTK_INDEX
+#define PRINTK_INDEX \
+ .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \
+ __start_printk_index = .; \
+ *(.printk_index) \
+ __stop_printk_index = .; \
+ }
+#else
+#define PRINTK_INDEX
+#endif
+
#define NOTES \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
__start_notes = .; \
@@ -856,6 +967,13 @@
KEEP(*(.con_initcall.init)) \
__con_initcall_end = .;
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_TABLE() \
+ . = ALIGN(8); \
+ __kunit_suites_start = .; \
+ KEEP(*(.kunit_test_suites)) \
+ __kunit_suites_end = .;
+
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
@@ -894,14 +1012,48 @@
* section definitions so that such archs put those in earlier section
* definitions.
*/
-#define DISCARDS \
- /DISCARD/ : { \
+#ifdef RUNTIME_DISCARD_EXIT
+#define EXIT_DISCARDS
+#else
+#define EXIT_DISCARDS \
EXIT_TEXT \
- EXIT_DATA \
- EXIT_CALL \
+ EXIT_DATA
+#endif
+
+/*
+ * Clang's -fprofile-arcs, -fsanitize=kernel-address, and
+ * -fsanitize=thread produce unwanted sections (.eh_frame
+ * and .init_array.*), but CONFIG_CONSTRUCTORS wants to
+ * keep any .init_array.* sections.
+ * https://bugs.llvm.org/show_bug.cgi?id=46478
+ */
+#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
+# ifdef CONFIG_CONSTRUCTORS
+# define SANITIZER_DISCARDS \
+ *(.eh_frame)
+# else
+# define SANITIZER_DISCARDS \
+ *(.init_array) *(.init_array.*) \
+ *(.eh_frame)
+# endif
+#else
+# define SANITIZER_DISCARDS
+#endif
+
+#define COMMON_DISCARDS \
+ SANITIZER_DISCARDS \
+ PATCHABLE_DISCARDS \
*(.discard) \
*(.discard.*) \
*(.modinfo) \
+ /* ld.bfd warns about .gnu.version* even when not emitted */ \
+ *(.gnu.version*) \
+
+#define DISCARDS \
+ /DISCARD/ : { \
+ EXIT_DISCARDS \
+ EXIT_CALL \
+ COMMON_DISCARDS \
}
/**
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
index b62a2a56a4d4..44509d48fca2 100644
--- a/include/asm-generic/xor.h
+++ b/include/asm-generic/xor.h
@@ -8,7 +8,8 @@
#include <linux/prefetch.h>
static void
-xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -27,8 +28,9 @@ xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -48,8 +50,10 @@ xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -70,8 +74,11 @@ xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -93,7 +100,8 @@ xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -129,8 +137,9 @@ xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -175,8 +184,10 @@ xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -230,8 +241,11 @@ xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -294,7 +308,8 @@ xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -320,8 +335,9 @@ xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -350,8 +366,10 @@ xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -384,8 +402,11 @@ xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -421,7 +442,8 @@ xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -466,8 +488,9 @@ xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -523,8 +546,10 @@ xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -591,8 +616,11 @@ xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 1d68d5613dae..057c8964aefb 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -24,7 +24,7 @@
enum arch_timer_reg {
ARCH_TIMER_REG_CTRL,
- ARCH_TIMER_REG_TVAL,
+ ARCH_TIMER_REG_CVAL,
};
enum arch_timer_ppi_nr {
@@ -32,6 +32,7 @@ enum arch_timer_ppi_nr {
ARCH_TIMER_PHYS_NONSECURE_PPI,
ARCH_TIMER_VIRT_PPI,
ARCH_TIMER_HYP_PPI,
+ ARCH_TIMER_HYP_VIRT_PPI,
ARCH_TIMER_MAX_TIMER_PPI
};
@@ -55,6 +56,7 @@ enum arch_timer_spi_nr {
#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT)
#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
+#define ARCH_TIMER_EVT_INTERVAL_SCALE (1 << 17) /* EVNTIS in the ARMv8 ARM */
#define ARCH_TIMER_EVT_STREAM_PERIOD_US 100
#define ARCH_TIMER_EVT_STREAM_FREQ \
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index 34eef083c988..b3f5d73ae1d6 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -20,17 +20,16 @@
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
#define HV_MIN_DELTA_TICKS 1
+#ifdef CONFIG_HYPERV_TIMER
+
/* Routines called by the VMbus driver */
-extern int hv_stimer_alloc(void);
-extern void hv_stimer_free(void);
+extern int hv_stimer_alloc(bool have_percpu_irqs);
extern int hv_stimer_cleanup(unsigned int cpu);
extern void hv_stimer_legacy_init(unsigned int cpu, int sint);
extern void hv_stimer_legacy_cleanup(unsigned int cpu);
extern void hv_stimer_global_cleanup(void);
extern void hv_stimer0_isr(void);
-#ifdef CONFIG_HYPERV_TIMER
-extern u64 (*hv_read_reference_counter)(void);
extern void hv_init_clocksource(void);
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
@@ -101,6 +100,13 @@ static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
{
return U64_MAX;
}
+
+static inline int hv_stimer_cleanup(unsigned int cpu) { return 0; }
+static inline void hv_stimer_legacy_init(unsigned int cpu, int sint) {}
+static inline void hv_stimer_legacy_cleanup(unsigned int cpu) {}
+static inline void hv_stimer_global_cleanup(void) {}
+static inline void hv_stimer0_isr(void) {}
+
#endif /* CONFIG_HYPERV_TIMER */
#endif
diff --git a/include/clocksource/samsung_pwm.h b/include/clocksource/samsung_pwm.h
index c395238d0922..9b435caa95fe 100644
--- a/include/clocksource/samsung_pwm.h
+++ b/include/clocksource/samsung_pwm.h
@@ -27,6 +27,7 @@ struct samsung_pwm_variant {
};
void samsung_pwm_clocksource_init(void __iomem *base,
- unsigned int *irqs, struct samsung_pwm_variant *variant);
+ unsigned int *irqs,
+ const struct samsung_pwm_variant *variant);
#endif /* __CLOCKSOURCE_SAMSUNG_PWM_H */
diff --git a/include/clocksource/timer-goldfish.h b/include/clocksource/timer-goldfish.h
new file mode 100644
index 000000000000..05a3a4f610d6
--- /dev/null
+++ b/include/clocksource/timer-goldfish.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * goldfish-timer clocksource
+ * Registers definition for the goldfish-timer device
+ */
+
+#ifndef _CLOCKSOURCE_TIMER_GOLDFISH_H
+#define _CLOCKSOURCE_TIMER_GOLDFISH_H
+
+/*
+ * TIMER_TIME_LOW get low bits of current time and update TIMER_TIME_HIGH
+ * TIMER_TIME_HIGH get high bits of time at last TIMER_TIME_LOW read
+ * TIMER_ALARM_LOW set low bits of alarm and activate it
+ * TIMER_ALARM_HIGH set high bits of next alarm
+ * TIMER_IRQ_ENABLED enable alarm interrupt
+ * TIMER_CLEAR_ALARM disarm an existing alarm
+ * TIMER_ALARM_STATUS alarm status (running or not)
+ * TIMER_CLEAR_INTERRUPT clear interrupt
+ */
+#define TIMER_TIME_LOW 0x00
+#define TIMER_TIME_HIGH 0x04
+#define TIMER_ALARM_LOW 0x08
+#define TIMER_ALARM_HIGH 0x0c
+#define TIMER_IRQ_ENABLED 0x10
+#define TIMER_CLEAR_ALARM 0x14
+#define TIMER_ALARM_STATUS 0x18
+#define TIMER_CLEAR_INTERRUPT 0x1c
+
+extern int goldfish_timer_init(int irq, void __iomem *base);
+
+#endif /* _CLOCKSOURCE_TIMER_GOLDFISH_H */
diff --git a/include/clocksource/timer-riscv.h b/include/clocksource/timer-riscv.h
new file mode 100644
index 000000000000..d7f455754e60
--- /dev/null
+++ b/include/clocksource/timer-riscv.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __TIMER_RISCV_H
+#define __TIMER_RISCV_H
+
+#include <linux/types.h>
+
+extern void riscv_cs_get_mult_shift(u32 *mult, u32 *shift);
+
+#endif
diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h
deleted file mode 100644
index a5b41f31a1c2..000000000000
--- a/include/clocksource/timer-sp804.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CLKSOURCE_TIMER_SP804_H
-#define __CLKSOURCE_TIMER_SP804_H
-
-struct clk;
-
-int __sp804_clocksource_and_sched_clock_init(void __iomem *,
- const char *, struct clk *, int);
-int __sp804_clockevents_init(void __iomem *, unsigned int,
- struct clk *, const char *);
-void sp804_timer_disable(void __iomem *);
-
-static inline void sp804_clocksource_init(void __iomem *base, const char *name)
-{
- __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0);
-}
-
-static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base,
- const char *name)
-{
- __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1);
-}
-
-static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name)
-{
- __sp804_clockevents_init(base, irq, NULL, name);
-
-}
-#endif
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
index 7d9598dc578d..77eceeae708c 100644
--- a/include/clocksource/timer-ti-dm.h
+++ b/include/clocksource/timer-ti-dm.h
@@ -1,7 +1,7 @@
/*
* OMAP Dual-Mode Timers
*
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
* Tarun Kanti DebBarma <tarun.kanti@ti.com>
* Thara Gopinath <thara@ti.com>
*
@@ -52,10 +52,6 @@
#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01
#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
-/* posted mode types */
-#define OMAP_TIMER_NONPOSTED 0x00
-#define OMAP_TIMER_POSTED 0x01
-
/* timer capabilities used in hwmod database */
#define OMAP_TIMER_SECURE 0x80000000
#define OMAP_TIMER_ALWON 0x40000000
@@ -63,72 +59,13 @@
#define OMAP_TIMER_NEEDS_RESET 0x10000000
#define OMAP_TIMER_HAS_DSP_IRQ 0x08000000
-/*
- * timer errata flags
- *
- * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
- * errata prevents us from using posted mode on these devices, unless the
- * timer counter register is never read. For more details please refer to
- * the OMAP3/4/5 errata documents.
- */
-#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
-
-struct timer_regs {
- u32 tidr;
- u32 tier;
- u32 twer;
- u32 tclr;
- u32 tcrr;
- u32 tldr;
- u32 ttrg;
- u32 twps;
- u32 tmar;
- u32 tcar1;
- u32 tsicr;
- u32 tcar2;
- u32 tpir;
- u32 tnir;
- u32 tcvr;
- u32 tocr;
- u32 towr;
-};
-
struct omap_dm_timer {
- int id;
- int irq;
- struct clk *fclk;
-
- void __iomem *io_base;
- void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */
- void __iomem *irq_ena; /* irq enable */
- void __iomem *irq_dis; /* irq disable, only on v2 ip */
- void __iomem *pend; /* write pending */
- void __iomem *func_base; /* function register base */
-
- unsigned long rate;
- unsigned reserved:1;
- unsigned posted:1;
- struct timer_regs context;
- int (*get_context_loss_count)(struct device *);
- int ctx_loss_count;
- int revision;
- u32 capability;
- u32 errata;
- struct platform_device *pdev;
- struct list_head node;
};
-int omap_dm_timer_reserve_systimer(int id);
-struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap);
-
int omap_dm_timer_get_irq(struct omap_dm_timer *timer);
u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
-int omap_dm_timer_trigger(struct omap_dm_timer *timer);
-
-int omap_dm_timers_active(void);
-
/*
* Do not use the defines below, they are not needed. They should be only
* used by dmtimer.c and sys_timer related code.
@@ -198,197 +135,4 @@ int omap_dm_timers_active(void);
#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
-/* register offsets with the write pending bit encoded */
-#define WPSHIFT 16
-
-#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
- | (WP_TCLR << WPSHIFT))
-
-#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
- | (WP_TCRR << WPSHIFT))
-
-#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
- | (WP_TLDR << WPSHIFT))
-
-#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
- | (WP_TTGR << WPSHIFT))
-
-#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
- | (WP_TMAR << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
- | (WP_TPIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
- | (WP_TNIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
- | (WP_TCVR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
- (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
- (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
-
-/*
- * The below are inlined to optimize code size for system timers. Other code
- * should not need these at all, see
- * include/linux/platform_data/pwm_omap_dmtimer.h
- */
-#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS)
-static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
- int posted)
-{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
- cpu_relax();
-
- return readl_relaxed(timer->func_base + (reg & 0xff));
-}
-
-static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
- u32 reg, u32 val, int posted)
-{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
- cpu_relax();
-
- writel_relaxed(val, timer->func_base + (reg & 0xff));
-}
-
-static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
-{
- u32 tidr;
-
- /* Assume v1 ip if bits [31:16] are zero */
- tidr = readl_relaxed(timer->io_base);
- if (!(tidr >> 16)) {
- timer->revision = 1;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
- timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
- timer->func_base = timer->io_base;
- } else {
- timer->revision = 2;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
- timer->pend = timer->io_base +
- _OMAP_TIMER_WRITE_PEND_OFFSET +
- OMAP_TIMER_V2_FUNC_OFFSET;
- timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
- }
-}
-
-/*
- * __omap_dm_timer_enable_posted - enables write posted mode
- * @timer: pointer to timer instance handle
- *
- * Enables the write posted mode for the timer. When posted mode is enabled
- * writes to certain timer registers are immediately acknowledged by the
- * internal bus and hence prevents stalling the CPU waiting for the write to
- * complete. Enabling this feature can improve performance for writing to the
- * timer registers.
- */
-static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
-{
- if (timer->posted)
- return;
-
- if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
- timer->posted = OMAP_TIMER_NONPOSTED;
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
- return;
- }
-
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
- OMAP_TIMER_CTRL_POSTED, 0);
- timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
- timer->posted = OMAP_TIMER_POSTED;
-}
-
-/**
- * __omap_dm_timer_override_errata - override errata flags for a timer
- * @timer: pointer to timer handle
- * @errata: errata flags to be ignored
- *
- * For a given timer, override a timer errata by clearing the flags
- * specified by the errata argument. A specific erratum should only be
- * overridden for a timer if the timer is used in such a way the erratum
- * has no impact.
- */
-static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer,
- u32 errata)
-{
- timer->errata &= ~errata;
-}
-
-static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
- int posted, unsigned long rate)
-{
- u32 l;
-
- l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
- if (l & OMAP_TIMER_CTRL_ST) {
- l &= ~0x1;
- __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
-#ifdef CONFIG_ARCH_OMAP2PLUS
- /* Readback to make sure write has completed */
- __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
- /*
- * Wait for functional clock period x 3.5 to make sure that
- * timer is stopped
- */
- udelay(3500000 / rate + 1);
-#endif
- }
-
- /* Ack possibly pending interrupt */
- writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
-}
-
-static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer,
- u32 ctrl, unsigned int load,
- int posted)
-{
- __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted);
- __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted);
-}
-
-static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
- unsigned int value)
-{
- writel_relaxed(value, timer->irq_ena);
- __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
-}
-
-static inline unsigned int
-__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
-{
- return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
-}
-
-static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
- unsigned int value)
-{
- writel_relaxed(value, timer->irq_stat);
-}
-#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */
#endif /* __CLOCKSOURCE_DMTIMER_H */
diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
new file mode 100644
index 000000000000..c0f56fe6d22a
--- /dev/null
+++ b/include/clocksource/timer-xilinx.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2021 Sean Anderson <sean.anderson@seco.com>
+ */
+
+#ifndef XILINX_TIMER_H
+#define XILINX_TIMER_H
+
+#include <linux/compiler.h>
+
+#define TCSR0 0x00
+#define TLR0 0x04
+#define TCR0 0x08
+#define TCSR1 0x10
+#define TLR1 0x14
+#define TCR1 0x18
+
+#define TCSR_MDT BIT(0)
+#define TCSR_UDT BIT(1)
+#define TCSR_GENT BIT(2)
+#define TCSR_CAPT BIT(3)
+#define TCSR_ARHT BIT(4)
+#define TCSR_LOAD BIT(5)
+#define TCSR_ENIT BIT(6)
+#define TCSR_ENT BIT(7)
+#define TCSR_TINT BIT(8)
+#define TCSR_PWMA BIT(9)
+#define TCSR_ENALL BIT(10)
+#define TCSR_CASC BIT(11)
+
+struct clk;
+struct device_node;
+struct regmap;
+
+/**
+ * struct xilinx_timer_priv - Private data for Xilinx AXI timer drivers
+ * @map: Regmap of the device, possibly with an offset
+ * @clk: Parent clock
+ * @max: Maximum value of the counters
+ */
+struct xilinx_timer_priv {
+ struct regmap *map;
+ struct clk *clk;
+ u32 max;
+};
+
+/**
+ * xilinx_timer_tlr_cycles() - Calculate the TLR for a period specified
+ * in clock cycles
+ * @priv: The timer's private data
+ * @tcsr: The value of the TCSR register for this counter
+ * @cycles: The number of cycles in this period
+ *
+ * Callers of this function MUST ensure that @cycles is representable as
+ * a TLR.
+ *
+ * Return: The calculated value for TLR
+ */
+u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr,
+ u64 cycles);
+
+/**
+ * xilinx_timer_get_period() - Get the current period of a counter
+ * @priv: The timer's private data
+ * @tlr: The value of TLR for this counter
+ * @tcsr: The value of TCSR for this counter
+ *
+ * Return: The period, in ns
+ */
+unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv,
+ u32 tlr, u32 tcsr);
+
+#endif /* XILINX_TIMER_H */
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index d873f999b334..cb3d6b1c655d 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -106,6 +106,24 @@ struct acomp_alg {
*/
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
u32 mask);
+/**
+ * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * compression algorithm e.g. "deflate"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ * @node: specifies the NUMA node the ZIP hardware belongs to
+ *
+ * Allocate a handle for a compression algorithm. Drivers should try to use
+ * (de)compressors on the specified NUMA node.
+ * The returned struct crypto_acomp is the handle that is required for any
+ * subsequent API invocation for the compression operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
+ u32 mask, int node);
static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
{
@@ -147,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
* crypto_free_acomp() -- free ACOMPRESS tfm handle
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_acomp(struct crypto_acomp *tfm)
{
@@ -157,7 +177,7 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_ACOMPRESS;
- mask |= CRYPTO_ALG_TYPE_MASK;
+ mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
return crypto_has_alg(alg_name, type, mask);
}
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 1b3ebe8593c0..14db3bee0519 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -8,9 +8,10 @@
#ifndef _CRYPTO_AEAD_H
#define _CRYPTO_AEAD_H
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/types.h>
/**
* DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
@@ -43,30 +44,37 @@
*
* Memory Structure:
*
- * To support the needs of the most prominent user of AEAD ciphers, namely
- * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere
- * to.
- *
- * The scatter list pointing to the input data must contain:
- *
- * * for RFC4106 ciphers, the concatenation of
- * associated authentication data || IV || plaintext or ciphertext. Note, the
- * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
- * the API call of aead_request_set_ad must provide the length of the AAD and
- * the IV. The API call of aead_request_set_crypt only points to the size of
- * the input plaintext or ciphertext.
- *
- * * for "normal" AEAD ciphers, the concatenation of
- * associated authentication data || plaintext or ciphertext.
- *
- * It is important to note that if multiple scatter gather list entries form
- * the input data mentioned above, the first entry must not point to a NULL
- * buffer. If there is any potential where the AAD buffer can be NULL, the
- * calling code must contain a precaution to ensure that this does not result
- * in the first scatter gather list entry pointing to a NULL buffer.
+ * The source scatterlist must contain the concatenation of
+ * associated data || plaintext or ciphertext.
+ *
+ * The destination scatterlist has the same layout, except that the plaintext
+ * (resp. ciphertext) will grow (resp. shrink) by the authentication tag size
+ * during encryption (resp. decryption).
+ *
+ * In-place encryption/decryption is enabled by using the same scatterlist
+ * pointer for both the source and destination.
+ *
+ * Even in the out-of-place case, space must be reserved in the destination for
+ * the associated data, even though it won't be written to. This makes the
+ * in-place and out-of-place cases more consistent. It is permissible for the
+ * "destination" associated data to alias the "source" associated data.
+ *
+ * As with the other scatterlist crypto APIs, zero-length scatterlist elements
+ * are not allowed in the used part of the scatterlist. Thus, if there is no
+ * associated data, the first element must point to the plaintext/ciphertext.
+ *
+ * To meet the needs of IPsec, a special quirk applies to rfc4106, rfc4309,
+ * rfc4543, and rfc7539esp ciphers. For these ciphers, the final 'ivsize' bytes
+ * of the associated data buffer must contain a second copy of the IV. This is
+ * in addition to the copy passed to aead_request_set_crypt(). These two IV
+ * copies must not differ; different implementations of the same algorithm may
+ * behave differently in that case. Note that the algorithm might not actually
+ * treat the IV as associated data; nevertheless the length passed to
+ * aead_request_set_ad() must include it.
*/
struct crypto_aead;
+struct scatterlist;
/**
* struct aead_request - AEAD request
@@ -179,12 +187,19 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_aead(struct crypto_aead *tfm)
{
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
+static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
+}
+
static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
{
return container_of(crypto_aead_tfm(tfm)->__crt_alg,
@@ -419,7 +434,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
*/
static inline void aead_request_free(struct aead_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
/**
@@ -477,7 +492,7 @@ static inline void aead_request_set_callback(struct aead_request *req,
* The memory structure for cipher operation has the following structure:
*
* - AEAD encryption input: assoc data || plaintext
- * - AEAD encryption output: assoc data || cipherntext || auth tag
+ * - AEAD encryption output: assoc data || ciphertext || auth tag
* - AEAD decryption input: assoc data || ciphertext || auth tag
* - AEAD decryption output: assoc data || plaintext
*
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 6924b091adec..5764b46bd1ec 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
* crypto_free_akcipher() - free AKCIPHER tfm handle
*
* @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
{
@@ -207,7 +209,7 @@ static inline struct akcipher_request *akcipher_request_alloc(
*/
static inline void akcipher_request_free(struct akcipher_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index e115f9215ed5..f50c5d1725da 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -7,10 +7,13 @@
#ifndef _CRYPTO_ALGAPI_H
#define _CRYPTO_ALGAPI_H
+#include <linux/align.h>
#include <linux/crypto.h>
+#include <linux/kconfig.h>
#include <linux/list.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
/*
* Maximum values for blocksize and alignmask, used to allocate
@@ -25,8 +28,10 @@
struct crypto_aead;
struct crypto_instance;
struct module;
+struct notifier_block;
struct rtattr;
struct seq_file;
+struct sk_buff;
struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
@@ -96,6 +101,15 @@ struct scatter_walk {
unsigned int offset;
};
+struct crypto_attr_alg {
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct crypto_attr_type {
+ u32 type;
+ u32 mask;
+};
+
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
@@ -116,15 +130,16 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
-int crypto_check_attr_type(struct rtattr **tb, u32 type);
+int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request);
+void crypto_enqueue_request_head(struct crypto_queue *queue,
+ struct crypto_async_request *request);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
{
@@ -141,9 +156,11 @@ static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
(size % sizeof(unsigned long)) == 0) {
unsigned long *d = (unsigned long *)dst;
unsigned long *s = (unsigned long *)src;
+ unsigned long l;
while (size > 0) {
- *d++ ^= *s++;
+ l = get_unaligned(d) ^ get_unaligned(s++);
+ put_unaligned(l, d++);
size -= sizeof(unsigned long);
}
} else {
@@ -160,9 +177,11 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
unsigned long *d = (unsigned long *)dst;
unsigned long *s1 = (unsigned long *)src1;
unsigned long *s2 = (unsigned long *)src2;
+ unsigned long l;
while (size > 0) {
- *d++ = *s1++ ^ *s2++;
+ l = get_unaligned(s1++) ^ get_unaligned(s2++);
+ put_unaligned(l, d++);
size -= sizeof(unsigned long);
}
} else {
@@ -187,45 +206,6 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst)
return inst->__ctx;
}
-struct crypto_cipher_spawn {
- struct crypto_spawn base;
-};
-
-static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
- struct crypto_instance *inst,
- const char *name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
- return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
-}
-
-static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
-{
- crypto_drop_spawn(&spawn->base);
-}
-
-static inline struct crypto_alg *crypto_spawn_cipher_alg(
- struct crypto_cipher_spawn *spawn)
-{
- return spawn->base.alg;
-}
-
-static inline struct crypto_cipher *crypto_spawn_cipher(
- struct crypto_cipher_spawn *spawn)
-{
- u32 type = CRYPTO_ALG_TYPE_CIPHER;
- u32 mask = CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
-}
-
-static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
-{
- return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
-}
-
static inline struct crypto_async_request *crypto_get_backlog(
struct crypto_queue *queue)
{
@@ -233,18 +213,29 @@ static inline struct crypto_async_request *crypto_get_backlog(
container_of(queue->backlog, struct crypto_async_request, list);
}
-static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
+static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
{
- return (type ^ off) & mask & off;
+ return (algt->type ^ off) & algt->mask & off;
}
/*
- * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
- * Otherwise returns zero.
+ * When an algorithm uses another algorithm (e.g., if it's an instance of a
+ * template), these are the flags that should always be set on the "outer"
+ * algorithm if any "inner" algorithm has them set.
+ */
+#define CRYPTO_ALG_INHERITED_FLAGS \
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \
+ CRYPTO_ALG_ALLOCATES_MEMORY)
+
+/*
+ * Given the type and mask that specify the flags restrictions on a template
+ * instance being created, return the mask that should be passed to
+ * crypto_grab_*() (along with type=0) to honor any request the user made to
+ * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
*/
-static inline int crypto_requires_sync(u32 type, u32 mask)
+static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
{
- return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
+ return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
}
noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
@@ -264,12 +255,6 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size)
return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
}
-static inline void crypto_yield(u32 flags)
-{
- if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
- cond_resched();
-}
-
int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
diff --git a/include/crypto/aria.h b/include/crypto/aria.h
new file mode 100644
index 000000000000..254da46cc385
--- /dev/null
+++ b/include/crypto/aria.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * ARIA Cipher Algorithm.
+ *
+ * Documentation of ARIA can be found in RFC 5794.
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ *
+ * Information for ARIA
+ * http://210.104.33.10/ARIA/index-e.html (English)
+ * http://seed.kisa.or.kr/ (Korean)
+ *
+ * Public domain version is distributed above.
+ */
+
+#ifndef _CRYPTO_ARIA_H
+#define _CRYPTO_ARIA_H
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/crypto.h>
+#include <asm/byteorder.h>
+
+#define ARIA_MIN_KEY_SIZE 16
+#define ARIA_MAX_KEY_SIZE 32
+#define ARIA_BLOCK_SIZE 16
+#define ARIA_MAX_RD_KEYS 17
+#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32))
+
+struct aria_ctx {
+ u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
+ u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
+ int rounds;
+ int key_length;
+};
+
+static const u32 s1[256] = {
+ 0x00636363, 0x007c7c7c, 0x00777777, 0x007b7b7b,
+ 0x00f2f2f2, 0x006b6b6b, 0x006f6f6f, 0x00c5c5c5,
+ 0x00303030, 0x00010101, 0x00676767, 0x002b2b2b,
+ 0x00fefefe, 0x00d7d7d7, 0x00ababab, 0x00767676,
+ 0x00cacaca, 0x00828282, 0x00c9c9c9, 0x007d7d7d,
+ 0x00fafafa, 0x00595959, 0x00474747, 0x00f0f0f0,
+ 0x00adadad, 0x00d4d4d4, 0x00a2a2a2, 0x00afafaf,
+ 0x009c9c9c, 0x00a4a4a4, 0x00727272, 0x00c0c0c0,
+ 0x00b7b7b7, 0x00fdfdfd, 0x00939393, 0x00262626,
+ 0x00363636, 0x003f3f3f, 0x00f7f7f7, 0x00cccccc,
+ 0x00343434, 0x00a5a5a5, 0x00e5e5e5, 0x00f1f1f1,
+ 0x00717171, 0x00d8d8d8, 0x00313131, 0x00151515,
+ 0x00040404, 0x00c7c7c7, 0x00232323, 0x00c3c3c3,
+ 0x00181818, 0x00969696, 0x00050505, 0x009a9a9a,
+ 0x00070707, 0x00121212, 0x00808080, 0x00e2e2e2,
+ 0x00ebebeb, 0x00272727, 0x00b2b2b2, 0x00757575,
+ 0x00090909, 0x00838383, 0x002c2c2c, 0x001a1a1a,
+ 0x001b1b1b, 0x006e6e6e, 0x005a5a5a, 0x00a0a0a0,
+ 0x00525252, 0x003b3b3b, 0x00d6d6d6, 0x00b3b3b3,
+ 0x00292929, 0x00e3e3e3, 0x002f2f2f, 0x00848484,
+ 0x00535353, 0x00d1d1d1, 0x00000000, 0x00ededed,
+ 0x00202020, 0x00fcfcfc, 0x00b1b1b1, 0x005b5b5b,
+ 0x006a6a6a, 0x00cbcbcb, 0x00bebebe, 0x00393939,
+ 0x004a4a4a, 0x004c4c4c, 0x00585858, 0x00cfcfcf,
+ 0x00d0d0d0, 0x00efefef, 0x00aaaaaa, 0x00fbfbfb,
+ 0x00434343, 0x004d4d4d, 0x00333333, 0x00858585,
+ 0x00454545, 0x00f9f9f9, 0x00020202, 0x007f7f7f,
+ 0x00505050, 0x003c3c3c, 0x009f9f9f, 0x00a8a8a8,
+ 0x00515151, 0x00a3a3a3, 0x00404040, 0x008f8f8f,
+ 0x00929292, 0x009d9d9d, 0x00383838, 0x00f5f5f5,
+ 0x00bcbcbc, 0x00b6b6b6, 0x00dadada, 0x00212121,
+ 0x00101010, 0x00ffffff, 0x00f3f3f3, 0x00d2d2d2,
+ 0x00cdcdcd, 0x000c0c0c, 0x00131313, 0x00ececec,
+ 0x005f5f5f, 0x00979797, 0x00444444, 0x00171717,
+ 0x00c4c4c4, 0x00a7a7a7, 0x007e7e7e, 0x003d3d3d,
+ 0x00646464, 0x005d5d5d, 0x00191919, 0x00737373,
+ 0x00606060, 0x00818181, 0x004f4f4f, 0x00dcdcdc,
+ 0x00222222, 0x002a2a2a, 0x00909090, 0x00888888,
+ 0x00464646, 0x00eeeeee, 0x00b8b8b8, 0x00141414,
+ 0x00dedede, 0x005e5e5e, 0x000b0b0b, 0x00dbdbdb,
+ 0x00e0e0e0, 0x00323232, 0x003a3a3a, 0x000a0a0a,
+ 0x00494949, 0x00060606, 0x00242424, 0x005c5c5c,
+ 0x00c2c2c2, 0x00d3d3d3, 0x00acacac, 0x00626262,
+ 0x00919191, 0x00959595, 0x00e4e4e4, 0x00797979,
+ 0x00e7e7e7, 0x00c8c8c8, 0x00373737, 0x006d6d6d,
+ 0x008d8d8d, 0x00d5d5d5, 0x004e4e4e, 0x00a9a9a9,
+ 0x006c6c6c, 0x00565656, 0x00f4f4f4, 0x00eaeaea,
+ 0x00656565, 0x007a7a7a, 0x00aeaeae, 0x00080808,
+ 0x00bababa, 0x00787878, 0x00252525, 0x002e2e2e,
+ 0x001c1c1c, 0x00a6a6a6, 0x00b4b4b4, 0x00c6c6c6,
+ 0x00e8e8e8, 0x00dddddd, 0x00747474, 0x001f1f1f,
+ 0x004b4b4b, 0x00bdbdbd, 0x008b8b8b, 0x008a8a8a,
+ 0x00707070, 0x003e3e3e, 0x00b5b5b5, 0x00666666,
+ 0x00484848, 0x00030303, 0x00f6f6f6, 0x000e0e0e,
+ 0x00616161, 0x00353535, 0x00575757, 0x00b9b9b9,
+ 0x00868686, 0x00c1c1c1, 0x001d1d1d, 0x009e9e9e,
+ 0x00e1e1e1, 0x00f8f8f8, 0x00989898, 0x00111111,
+ 0x00696969, 0x00d9d9d9, 0x008e8e8e, 0x00949494,
+ 0x009b9b9b, 0x001e1e1e, 0x00878787, 0x00e9e9e9,
+ 0x00cecece, 0x00555555, 0x00282828, 0x00dfdfdf,
+ 0x008c8c8c, 0x00a1a1a1, 0x00898989, 0x000d0d0d,
+ 0x00bfbfbf, 0x00e6e6e6, 0x00424242, 0x00686868,
+ 0x00414141, 0x00999999, 0x002d2d2d, 0x000f0f0f,
+ 0x00b0b0b0, 0x00545454, 0x00bbbbbb, 0x00161616
+};
+
+static const u32 s2[256] = {
+ 0xe200e2e2, 0x4e004e4e, 0x54005454, 0xfc00fcfc,
+ 0x94009494, 0xc200c2c2, 0x4a004a4a, 0xcc00cccc,
+ 0x62006262, 0x0d000d0d, 0x6a006a6a, 0x46004646,
+ 0x3c003c3c, 0x4d004d4d, 0x8b008b8b, 0xd100d1d1,
+ 0x5e005e5e, 0xfa00fafa, 0x64006464, 0xcb00cbcb,
+ 0xb400b4b4, 0x97009797, 0xbe00bebe, 0x2b002b2b,
+ 0xbc00bcbc, 0x77007777, 0x2e002e2e, 0x03000303,
+ 0xd300d3d3, 0x19001919, 0x59005959, 0xc100c1c1,
+ 0x1d001d1d, 0x06000606, 0x41004141, 0x6b006b6b,
+ 0x55005555, 0xf000f0f0, 0x99009999, 0x69006969,
+ 0xea00eaea, 0x9c009c9c, 0x18001818, 0xae00aeae,
+ 0x63006363, 0xdf00dfdf, 0xe700e7e7, 0xbb00bbbb,
+ 0x00000000, 0x73007373, 0x66006666, 0xfb00fbfb,
+ 0x96009696, 0x4c004c4c, 0x85008585, 0xe400e4e4,
+ 0x3a003a3a, 0x09000909, 0x45004545, 0xaa00aaaa,
+ 0x0f000f0f, 0xee00eeee, 0x10001010, 0xeb00ebeb,
+ 0x2d002d2d, 0x7f007f7f, 0xf400f4f4, 0x29002929,
+ 0xac00acac, 0xcf00cfcf, 0xad00adad, 0x91009191,
+ 0x8d008d8d, 0x78007878, 0xc800c8c8, 0x95009595,
+ 0xf900f9f9, 0x2f002f2f, 0xce00cece, 0xcd00cdcd,
+ 0x08000808, 0x7a007a7a, 0x88008888, 0x38003838,
+ 0x5c005c5c, 0x83008383, 0x2a002a2a, 0x28002828,
+ 0x47004747, 0xdb00dbdb, 0xb800b8b8, 0xc700c7c7,
+ 0x93009393, 0xa400a4a4, 0x12001212, 0x53005353,
+ 0xff00ffff, 0x87008787, 0x0e000e0e, 0x31003131,
+ 0x36003636, 0x21002121, 0x58005858, 0x48004848,
+ 0x01000101, 0x8e008e8e, 0x37003737, 0x74007474,
+ 0x32003232, 0xca00caca, 0xe900e9e9, 0xb100b1b1,
+ 0xb700b7b7, 0xab00abab, 0x0c000c0c, 0xd700d7d7,
+ 0xc400c4c4, 0x56005656, 0x42004242, 0x26002626,
+ 0x07000707, 0x98009898, 0x60006060, 0xd900d9d9,
+ 0xb600b6b6, 0xb900b9b9, 0x11001111, 0x40004040,
+ 0xec00ecec, 0x20002020, 0x8c008c8c, 0xbd00bdbd,
+ 0xa000a0a0, 0xc900c9c9, 0x84008484, 0x04000404,
+ 0x49004949, 0x23002323, 0xf100f1f1, 0x4f004f4f,
+ 0x50005050, 0x1f001f1f, 0x13001313, 0xdc00dcdc,
+ 0xd800d8d8, 0xc000c0c0, 0x9e009e9e, 0x57005757,
+ 0xe300e3e3, 0xc300c3c3, 0x7b007b7b, 0x65006565,
+ 0x3b003b3b, 0x02000202, 0x8f008f8f, 0x3e003e3e,
+ 0xe800e8e8, 0x25002525, 0x92009292, 0xe500e5e5,
+ 0x15001515, 0xdd00dddd, 0xfd00fdfd, 0x17001717,
+ 0xa900a9a9, 0xbf00bfbf, 0xd400d4d4, 0x9a009a9a,
+ 0x7e007e7e, 0xc500c5c5, 0x39003939, 0x67006767,
+ 0xfe00fefe, 0x76007676, 0x9d009d9d, 0x43004343,
+ 0xa700a7a7, 0xe100e1e1, 0xd000d0d0, 0xf500f5f5,
+ 0x68006868, 0xf200f2f2, 0x1b001b1b, 0x34003434,
+ 0x70007070, 0x05000505, 0xa300a3a3, 0x8a008a8a,
+ 0xd500d5d5, 0x79007979, 0x86008686, 0xa800a8a8,
+ 0x30003030, 0xc600c6c6, 0x51005151, 0x4b004b4b,
+ 0x1e001e1e, 0xa600a6a6, 0x27002727, 0xf600f6f6,
+ 0x35003535, 0xd200d2d2, 0x6e006e6e, 0x24002424,
+ 0x16001616, 0x82008282, 0x5f005f5f, 0xda00dada,
+ 0xe600e6e6, 0x75007575, 0xa200a2a2, 0xef00efef,
+ 0x2c002c2c, 0xb200b2b2, 0x1c001c1c, 0x9f009f9f,
+ 0x5d005d5d, 0x6f006f6f, 0x80008080, 0x0a000a0a,
+ 0x72007272, 0x44004444, 0x9b009b9b, 0x6c006c6c,
+ 0x90009090, 0x0b000b0b, 0x5b005b5b, 0x33003333,
+ 0x7d007d7d, 0x5a005a5a, 0x52005252, 0xf300f3f3,
+ 0x61006161, 0xa100a1a1, 0xf700f7f7, 0xb000b0b0,
+ 0xd600d6d6, 0x3f003f3f, 0x7c007c7c, 0x6d006d6d,
+ 0xed00eded, 0x14001414, 0xe000e0e0, 0xa500a5a5,
+ 0x3d003d3d, 0x22002222, 0xb300b3b3, 0xf800f8f8,
+ 0x89008989, 0xde00dede, 0x71007171, 0x1a001a1a,
+ 0xaf00afaf, 0xba00baba, 0xb500b5b5, 0x81008181
+};
+
+static const u32 x1[256] = {
+ 0x52520052, 0x09090009, 0x6a6a006a, 0xd5d500d5,
+ 0x30300030, 0x36360036, 0xa5a500a5, 0x38380038,
+ 0xbfbf00bf, 0x40400040, 0xa3a300a3, 0x9e9e009e,
+ 0x81810081, 0xf3f300f3, 0xd7d700d7, 0xfbfb00fb,
+ 0x7c7c007c, 0xe3e300e3, 0x39390039, 0x82820082,
+ 0x9b9b009b, 0x2f2f002f, 0xffff00ff, 0x87870087,
+ 0x34340034, 0x8e8e008e, 0x43430043, 0x44440044,
+ 0xc4c400c4, 0xdede00de, 0xe9e900e9, 0xcbcb00cb,
+ 0x54540054, 0x7b7b007b, 0x94940094, 0x32320032,
+ 0xa6a600a6, 0xc2c200c2, 0x23230023, 0x3d3d003d,
+ 0xeeee00ee, 0x4c4c004c, 0x95950095, 0x0b0b000b,
+ 0x42420042, 0xfafa00fa, 0xc3c300c3, 0x4e4e004e,
+ 0x08080008, 0x2e2e002e, 0xa1a100a1, 0x66660066,
+ 0x28280028, 0xd9d900d9, 0x24240024, 0xb2b200b2,
+ 0x76760076, 0x5b5b005b, 0xa2a200a2, 0x49490049,
+ 0x6d6d006d, 0x8b8b008b, 0xd1d100d1, 0x25250025,
+ 0x72720072, 0xf8f800f8, 0xf6f600f6, 0x64640064,
+ 0x86860086, 0x68680068, 0x98980098, 0x16160016,
+ 0xd4d400d4, 0xa4a400a4, 0x5c5c005c, 0xcccc00cc,
+ 0x5d5d005d, 0x65650065, 0xb6b600b6, 0x92920092,
+ 0x6c6c006c, 0x70700070, 0x48480048, 0x50500050,
+ 0xfdfd00fd, 0xeded00ed, 0xb9b900b9, 0xdada00da,
+ 0x5e5e005e, 0x15150015, 0x46460046, 0x57570057,
+ 0xa7a700a7, 0x8d8d008d, 0x9d9d009d, 0x84840084,
+ 0x90900090, 0xd8d800d8, 0xabab00ab, 0x00000000,
+ 0x8c8c008c, 0xbcbc00bc, 0xd3d300d3, 0x0a0a000a,
+ 0xf7f700f7, 0xe4e400e4, 0x58580058, 0x05050005,
+ 0xb8b800b8, 0xb3b300b3, 0x45450045, 0x06060006,
+ 0xd0d000d0, 0x2c2c002c, 0x1e1e001e, 0x8f8f008f,
+ 0xcaca00ca, 0x3f3f003f, 0x0f0f000f, 0x02020002,
+ 0xc1c100c1, 0xafaf00af, 0xbdbd00bd, 0x03030003,
+ 0x01010001, 0x13130013, 0x8a8a008a, 0x6b6b006b,
+ 0x3a3a003a, 0x91910091, 0x11110011, 0x41410041,
+ 0x4f4f004f, 0x67670067, 0xdcdc00dc, 0xeaea00ea,
+ 0x97970097, 0xf2f200f2, 0xcfcf00cf, 0xcece00ce,
+ 0xf0f000f0, 0xb4b400b4, 0xe6e600e6, 0x73730073,
+ 0x96960096, 0xacac00ac, 0x74740074, 0x22220022,
+ 0xe7e700e7, 0xadad00ad, 0x35350035, 0x85850085,
+ 0xe2e200e2, 0xf9f900f9, 0x37370037, 0xe8e800e8,
+ 0x1c1c001c, 0x75750075, 0xdfdf00df, 0x6e6e006e,
+ 0x47470047, 0xf1f100f1, 0x1a1a001a, 0x71710071,
+ 0x1d1d001d, 0x29290029, 0xc5c500c5, 0x89890089,
+ 0x6f6f006f, 0xb7b700b7, 0x62620062, 0x0e0e000e,
+ 0xaaaa00aa, 0x18180018, 0xbebe00be, 0x1b1b001b,
+ 0xfcfc00fc, 0x56560056, 0x3e3e003e, 0x4b4b004b,
+ 0xc6c600c6, 0xd2d200d2, 0x79790079, 0x20200020,
+ 0x9a9a009a, 0xdbdb00db, 0xc0c000c0, 0xfefe00fe,
+ 0x78780078, 0xcdcd00cd, 0x5a5a005a, 0xf4f400f4,
+ 0x1f1f001f, 0xdddd00dd, 0xa8a800a8, 0x33330033,
+ 0x88880088, 0x07070007, 0xc7c700c7, 0x31310031,
+ 0xb1b100b1, 0x12120012, 0x10100010, 0x59590059,
+ 0x27270027, 0x80800080, 0xecec00ec, 0x5f5f005f,
+ 0x60600060, 0x51510051, 0x7f7f007f, 0xa9a900a9,
+ 0x19190019, 0xb5b500b5, 0x4a4a004a, 0x0d0d000d,
+ 0x2d2d002d, 0xe5e500e5, 0x7a7a007a, 0x9f9f009f,
+ 0x93930093, 0xc9c900c9, 0x9c9c009c, 0xefef00ef,
+ 0xa0a000a0, 0xe0e000e0, 0x3b3b003b, 0x4d4d004d,
+ 0xaeae00ae, 0x2a2a002a, 0xf5f500f5, 0xb0b000b0,
+ 0xc8c800c8, 0xebeb00eb, 0xbbbb00bb, 0x3c3c003c,
+ 0x83830083, 0x53530053, 0x99990099, 0x61610061,
+ 0x17170017, 0x2b2b002b, 0x04040004, 0x7e7e007e,
+ 0xbaba00ba, 0x77770077, 0xd6d600d6, 0x26260026,
+ 0xe1e100e1, 0x69690069, 0x14140014, 0x63630063,
+ 0x55550055, 0x21210021, 0x0c0c000c, 0x7d7d007d
+};
+
+static const u32 x2[256] = {
+ 0x30303000, 0x68686800, 0x99999900, 0x1b1b1b00,
+ 0x87878700, 0xb9b9b900, 0x21212100, 0x78787800,
+ 0x50505000, 0x39393900, 0xdbdbdb00, 0xe1e1e100,
+ 0x72727200, 0x09090900, 0x62626200, 0x3c3c3c00,
+ 0x3e3e3e00, 0x7e7e7e00, 0x5e5e5e00, 0x8e8e8e00,
+ 0xf1f1f100, 0xa0a0a000, 0xcccccc00, 0xa3a3a300,
+ 0x2a2a2a00, 0x1d1d1d00, 0xfbfbfb00, 0xb6b6b600,
+ 0xd6d6d600, 0x20202000, 0xc4c4c400, 0x8d8d8d00,
+ 0x81818100, 0x65656500, 0xf5f5f500, 0x89898900,
+ 0xcbcbcb00, 0x9d9d9d00, 0x77777700, 0xc6c6c600,
+ 0x57575700, 0x43434300, 0x56565600, 0x17171700,
+ 0xd4d4d400, 0x40404000, 0x1a1a1a00, 0x4d4d4d00,
+ 0xc0c0c000, 0x63636300, 0x6c6c6c00, 0xe3e3e300,
+ 0xb7b7b700, 0xc8c8c800, 0x64646400, 0x6a6a6a00,
+ 0x53535300, 0xaaaaaa00, 0x38383800, 0x98989800,
+ 0x0c0c0c00, 0xf4f4f400, 0x9b9b9b00, 0xededed00,
+ 0x7f7f7f00, 0x22222200, 0x76767600, 0xafafaf00,
+ 0xdddddd00, 0x3a3a3a00, 0x0b0b0b00, 0x58585800,
+ 0x67676700, 0x88888800, 0x06060600, 0xc3c3c300,
+ 0x35353500, 0x0d0d0d00, 0x01010100, 0x8b8b8b00,
+ 0x8c8c8c00, 0xc2c2c200, 0xe6e6e600, 0x5f5f5f00,
+ 0x02020200, 0x24242400, 0x75757500, 0x93939300,
+ 0x66666600, 0x1e1e1e00, 0xe5e5e500, 0xe2e2e200,
+ 0x54545400, 0xd8d8d800, 0x10101000, 0xcecece00,
+ 0x7a7a7a00, 0xe8e8e800, 0x08080800, 0x2c2c2c00,
+ 0x12121200, 0x97979700, 0x32323200, 0xababab00,
+ 0xb4b4b400, 0x27272700, 0x0a0a0a00, 0x23232300,
+ 0xdfdfdf00, 0xefefef00, 0xcacaca00, 0xd9d9d900,
+ 0xb8b8b800, 0xfafafa00, 0xdcdcdc00, 0x31313100,
+ 0x6b6b6b00, 0xd1d1d100, 0xadadad00, 0x19191900,
+ 0x49494900, 0xbdbdbd00, 0x51515100, 0x96969600,
+ 0xeeeeee00, 0xe4e4e400, 0xa8a8a800, 0x41414100,
+ 0xdadada00, 0xffffff00, 0xcdcdcd00, 0x55555500,
+ 0x86868600, 0x36363600, 0xbebebe00, 0x61616100,
+ 0x52525200, 0xf8f8f800, 0xbbbbbb00, 0x0e0e0e00,
+ 0x82828200, 0x48484800, 0x69696900, 0x9a9a9a00,
+ 0xe0e0e000, 0x47474700, 0x9e9e9e00, 0x5c5c5c00,
+ 0x04040400, 0x4b4b4b00, 0x34343400, 0x15151500,
+ 0x79797900, 0x26262600, 0xa7a7a700, 0xdedede00,
+ 0x29292900, 0xaeaeae00, 0x92929200, 0xd7d7d700,
+ 0x84848400, 0xe9e9e900, 0xd2d2d200, 0xbababa00,
+ 0x5d5d5d00, 0xf3f3f300, 0xc5c5c500, 0xb0b0b000,
+ 0xbfbfbf00, 0xa4a4a400, 0x3b3b3b00, 0x71717100,
+ 0x44444400, 0x46464600, 0x2b2b2b00, 0xfcfcfc00,
+ 0xebebeb00, 0x6f6f6f00, 0xd5d5d500, 0xf6f6f600,
+ 0x14141400, 0xfefefe00, 0x7c7c7c00, 0x70707000,
+ 0x5a5a5a00, 0x7d7d7d00, 0xfdfdfd00, 0x2f2f2f00,
+ 0x18181800, 0x83838300, 0x16161600, 0xa5a5a500,
+ 0x91919100, 0x1f1f1f00, 0x05050500, 0x95959500,
+ 0x74747400, 0xa9a9a900, 0xc1c1c100, 0x5b5b5b00,
+ 0x4a4a4a00, 0x85858500, 0x6d6d6d00, 0x13131300,
+ 0x07070700, 0x4f4f4f00, 0x4e4e4e00, 0x45454500,
+ 0xb2b2b200, 0x0f0f0f00, 0xc9c9c900, 0x1c1c1c00,
+ 0xa6a6a600, 0xbcbcbc00, 0xececec00, 0x73737300,
+ 0x90909000, 0x7b7b7b00, 0xcfcfcf00, 0x59595900,
+ 0x8f8f8f00, 0xa1a1a100, 0xf9f9f900, 0x2d2d2d00,
+ 0xf2f2f200, 0xb1b1b100, 0x00000000, 0x94949400,
+ 0x37373700, 0x9f9f9f00, 0xd0d0d000, 0x2e2e2e00,
+ 0x9c9c9c00, 0x6e6e6e00, 0x28282800, 0x3f3f3f00,
+ 0x80808000, 0xf0f0f000, 0x3d3d3d00, 0xd3d3d300,
+ 0x25252500, 0x8a8a8a00, 0xb5b5b500, 0xe7e7e700,
+ 0x42424200, 0xb3b3b300, 0xc7c7c700, 0xeaeaea00,
+ 0xf7f7f700, 0x4c4c4c00, 0x11111100, 0x33333300,
+ 0x03030300, 0xa2a2a200, 0xacacac00, 0x60606000
+};
+
+static inline u32 rotl32(u32 v, u32 r)
+{
+ return ((v << r) | (v >> (32 - r)));
+}
+
+static inline u32 rotr32(u32 v, u32 r)
+{
+ return ((v >> r) | (v << (32 - r)));
+}
+
+static inline u32 bswap32(u32 v)
+{
+ return ((v << 24) ^
+ (v >> 24) ^
+ ((v & 0x0000ff00) << 8) ^
+ ((v & 0x00ff0000) >> 8));
+}
+
+static inline u8 get_u8(u32 x, u32 y)
+{
+ return (x >> ((3 - y) * 8));
+}
+
+static inline u32 make_u32(u8 v0, u8 v1, u8 v2, u8 v3)
+{
+ return ((u32)v0 << 24) | ((u32)v1 << 16) | ((u32)v2 << 8) | ((u32)v3);
+}
+
+static inline u32 aria_m(u32 t0)
+{
+ return rotr32(t0, 8) ^ rotr32(t0 ^ rotr32(t0, 8), 16);
+}
+
+/* S-Box Layer 1 + M */
+static inline void aria_sbox_layer1_with_pre_diff(u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 = s1[get_u8(*t0, 0)] ^
+ s2[get_u8(*t0, 1)] ^
+ x1[get_u8(*t0, 2)] ^
+ x2[get_u8(*t0, 3)];
+ *t1 = s1[get_u8(*t1, 0)] ^
+ s2[get_u8(*t1, 1)] ^
+ x1[get_u8(*t1, 2)] ^
+ x2[get_u8(*t1, 3)];
+ *t2 = s1[get_u8(*t2, 0)] ^
+ s2[get_u8(*t2, 1)] ^
+ x1[get_u8(*t2, 2)] ^
+ x2[get_u8(*t2, 3)];
+ *t3 = s1[get_u8(*t3, 0)] ^
+ s2[get_u8(*t3, 1)] ^
+ x1[get_u8(*t3, 2)] ^
+ x2[get_u8(*t3, 3)];
+}
+
+/* S-Box Layer 2 + M */
+static inline void aria_sbox_layer2_with_pre_diff(u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 = x1[get_u8(*t0, 0)] ^
+ x2[get_u8(*t0, 1)] ^
+ s1[get_u8(*t0, 2)] ^
+ s2[get_u8(*t0, 3)];
+ *t1 = x1[get_u8(*t1, 0)] ^
+ x2[get_u8(*t1, 1)] ^
+ s1[get_u8(*t1, 2)] ^
+ s2[get_u8(*t1, 3)];
+ *t2 = x1[get_u8(*t2, 0)] ^
+ x2[get_u8(*t2, 1)] ^
+ s1[get_u8(*t2, 2)] ^
+ s2[get_u8(*t2, 3)];
+ *t3 = x1[get_u8(*t3, 0)] ^
+ x2[get_u8(*t3, 1)] ^
+ s1[get_u8(*t3, 2)] ^
+ s2[get_u8(*t3, 3)];
+}
+
+/* Word-level diffusion */
+static inline void aria_diff_word(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ *t1 ^= *t2;
+ *t2 ^= *t3;
+ *t0 ^= *t1;
+
+ *t3 ^= *t1;
+ *t2 ^= *t0;
+ *t1 ^= *t2;
+}
+
+/* Byte-level diffusion */
+static inline void aria_diff_byte(u32 *t1, u32 *t2, u32 *t3)
+{
+ *t1 = ((*t1 << 8) & 0xff00ff00) ^ ((*t1 >> 8) & 0x00ff00ff);
+ *t2 = rotr32(*t2, 16);
+ *t3 = bswap32(*t3);
+}
+
+/* Key XOR Layer */
+static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 ^= rk[0];
+ *t1 ^= rk[1];
+ *t2 ^= rk[2];
+ *t3 ^= rk[3];
+}
+/* Odd round Substitution & Diffusion */
+static inline void aria_subst_diff_odd(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ aria_sbox_layer1_with_pre_diff(t0, t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+ aria_diff_byte(t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+}
+
+/* Even round Substitution & Diffusion */
+static inline void aria_subst_diff_even(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ aria_sbox_layer2_with_pre_diff(t0, t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+ aria_diff_byte(t3, t0, t1);
+ aria_diff_word(t0, t1, t2, t3);
+}
+
+/* Q, R Macro expanded ARIA GSRK */
+static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n)
+{
+ int q = 4 - (n / 32);
+ int r = n % 32;
+
+ rk[0] = (x[0]) ^
+ ((y[q % 4]) >> r) ^
+ ((y[(q + 3) % 4]) << (32 - r));
+ rk[1] = (x[1]) ^
+ ((y[(q + 1) % 4]) >> r) ^
+ ((y[q % 4]) << (32 - r));
+ rk[2] = (x[2]) ^
+ ((y[(q + 2) % 4]) >> r) ^
+ ((y[(q + 1) % 4]) << (32 - r));
+ rk[3] = (x[3]) ^
+ ((y[(q + 3) % 4]) >> r) ^
+ ((y[(q + 2) % 4]) << (32 - r));
+}
+
+void aria_encrypt(void *ctx, u8 *out, const u8 *in);
+void aria_decrypt(void *ctx, u8 *out, const u8 *in);
+int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+
+#endif
diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h
deleted file mode 100644
index 48198c36d6b9..000000000000
--- a/include/crypto/asym_tpm_subtype.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef _LINUX_ASYM_TPM_SUBTYPE_H
-#define _LINUX_ASYM_TPM_SUBTYPE_H
-
-#include <linux/keyctl.h>
-
-struct tpm_key {
- void *blob;
- u32 blob_len;
- uint16_t key_len; /* Size in bits of the key */
- const void *pub_key; /* pointer inside blob to the public key bytes */
- uint16_t pub_key_len; /* length of the public key */
-};
-
-struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len);
-
-extern struct asymmetric_key_subtype asym_tpm_subtype;
-
-#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */
diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h
new file mode 100644
index 000000000000..0c0176285349
--- /dev/null
+++ b/include/crypto/blake2b.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef _CRYPTO_BLAKE2B_H
+#define _CRYPTO_BLAKE2B_H
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+enum blake2b_lengths {
+ BLAKE2B_BLOCK_SIZE = 128,
+ BLAKE2B_HASH_SIZE = 64,
+ BLAKE2B_KEY_SIZE = 64,
+
+ BLAKE2B_160_HASH_SIZE = 20,
+ BLAKE2B_256_HASH_SIZE = 32,
+ BLAKE2B_384_HASH_SIZE = 48,
+ BLAKE2B_512_HASH_SIZE = 64,
+};
+
+struct blake2b_state {
+ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
+ u64 h[8];
+ u64 t[2];
+ u64 f[2];
+ u8 buf[BLAKE2B_BLOCK_SIZE];
+ unsigned int buflen;
+ unsigned int outlen;
+};
+
+enum blake2b_iv {
+ BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL,
+ BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL,
+ BLAKE2B_IV2 = 0x3C6EF372FE94F82BULL,
+ BLAKE2B_IV3 = 0xA54FF53A5F1D36F1ULL,
+ BLAKE2B_IV4 = 0x510E527FADE682D1ULL,
+ BLAKE2B_IV5 = 0x9B05688C2B3E6C1FULL,
+ BLAKE2B_IV6 = 0x1F83D9ABFB41BD6BULL,
+ BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL,
+};
+
+static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
+ const void *key, size_t keylen)
+{
+ state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ state->h[1] = BLAKE2B_IV1;
+ state->h[2] = BLAKE2B_IV2;
+ state->h[3] = BLAKE2B_IV3;
+ state->h[4] = BLAKE2B_IV4;
+ state->h[5] = BLAKE2B_IV5;
+ state->h[6] = BLAKE2B_IV6;
+ state->h[7] = BLAKE2B_IV7;
+ state->t[0] = 0;
+ state->t[1] = 0;
+ state->f[0] = 0;
+ state->f[1] = 0;
+ state->buflen = 0;
+ state->outlen = outlen;
+ if (keylen) {
+ memcpy(state->buf, key, keylen);
+ memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
+ state->buflen = BLAKE2B_BLOCK_SIZE;
+ }
+}
+
+#endif /* _CRYPTO_BLAKE2B_H */
diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
index b471deac28ff..f9ffd39194eb 100644
--- a/include/crypto/blake2s.h
+++ b/include/crypto/blake2s.h
@@ -3,15 +3,14 @@
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
-#ifndef BLAKE2S_H
-#define BLAKE2S_H
+#ifndef _CRYPTO_BLAKE2S_H
+#define _CRYPTO_BLAKE2S_H
+#include <linux/bug.h>
+#include <linux/kconfig.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/string.h>
-#include <asm/bug.h>
-
enum blake2s_lengths {
BLAKE2S_BLOCK_SIZE = 64,
BLAKE2S_HASH_SIZE = 32,
@@ -24,6 +23,7 @@ enum blake2s_lengths {
};
struct blake2s_state {
+ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
u32 h[8];
u32 t[2];
u32 f[2];
@@ -43,29 +43,34 @@ enum blake2s_iv {
BLAKE2S_IV7 = 0x5BE0CD19UL,
};
-void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
-void blake2s_final(struct blake2s_state *state, u8 *out);
-
-static inline void blake2s_init_param(struct blake2s_state *state,
- const u32 param)
+static inline void __blake2s_init(struct blake2s_state *state, size_t outlen,
+ const void *key, size_t keylen)
{
- *state = (struct blake2s_state){{
- BLAKE2S_IV0 ^ param,
- BLAKE2S_IV1,
- BLAKE2S_IV2,
- BLAKE2S_IV3,
- BLAKE2S_IV4,
- BLAKE2S_IV5,
- BLAKE2S_IV6,
- BLAKE2S_IV7,
- }};
+ state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ state->h[1] = BLAKE2S_IV1;
+ state->h[2] = BLAKE2S_IV2;
+ state->h[3] = BLAKE2S_IV3;
+ state->h[4] = BLAKE2S_IV4;
+ state->h[5] = BLAKE2S_IV5;
+ state->h[6] = BLAKE2S_IV6;
+ state->h[7] = BLAKE2S_IV7;
+ state->t[0] = 0;
+ state->t[1] = 0;
+ state->f[0] = 0;
+ state->f[1] = 0;
+ state->buflen = 0;
+ state->outlen = outlen;
+ if (keylen) {
+ memcpy(state->buf, key, keylen);
+ memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen);
+ state->buflen = BLAKE2S_BLOCK_SIZE;
+ }
}
static inline void blake2s_init(struct blake2s_state *state,
const size_t outlen)
{
- blake2s_init_param(state, 0x01010000 | outlen);
- state->outlen = outlen;
+ __blake2s_init(state, outlen, NULL, 0);
}
static inline void blake2s_init_key(struct blake2s_state *state,
@@ -75,12 +80,12 @@ static inline void blake2s_init_key(struct blake2s_state *state,
WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE ||
!key || !keylen || keylen > BLAKE2S_KEY_SIZE));
- blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen);
- memcpy(state->buf, key, keylen);
- state->buflen = BLAKE2S_BLOCK_SIZE;
- state->outlen = outlen;
+ __blake2s_init(state, outlen, key, keylen);
}
+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
+void blake2s_final(struct blake2s_state *state, u8 *out);
+
static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
const size_t outlen, const size_t inlen,
const size_t keylen)
@@ -91,16 +96,9 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE ||
(!key && keylen)));
- if (keylen)
- blake2s_init_key(&state, outlen, key, keylen);
- else
- blake2s_init(&state, outlen);
-
+ __blake2s_init(&state, outlen, key, keylen);
blake2s_update(&state, in, inlen);
blake2s_final(&state, out);
}
-void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
- const size_t keylen);
-
-#endif /* BLAKE2S_H */
+#endif /* _CRYPTO_BLAKE2S_H */
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
deleted file mode 100644
index 2b6422db42e2..000000000000
--- a/include/crypto/cbc.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CBC: Cipher Block Chaining mode
- *
- * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#ifndef _CRYPTO_CBC_H
-#define _CRYPTO_CBC_H
-
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-static inline int crypto_cbc_encrypt_segment(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(iv, src, bsize);
- fn(tfm, iv, dst);
- memcpy(iv, dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_inplace(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(src, iv, bsize);
- fn(tfm, src, src);
- iv = src;
-
- src += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
- void (*fn)(struct crypto_skcipher *,
- const u8 *, u8 *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
- else
- err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
- err = skcipher_walk_done(&walk, err);
- }
-
- return err;
-}
-
-static inline int crypto_cbc_decrypt_segment(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- fn(tfm, src, dst);
- crypto_xor(dst, iv, bsize);
- iv = src;
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_inplace(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 last_iv[MAX_CIPHER_BLOCKSIZE];
-
- /* Start of the last block. */
- src += nbytes - (nbytes & (bsize - 1)) - bsize;
- memcpy(last_iv, src, bsize);
-
- for (;;) {
- fn(tfm, src, src);
- if ((nbytes -= bsize) < bsize)
- break;
- crypto_xor(src, src - bsize, bsize);
- src -= bsize;
- }
-
- crypto_xor(src, walk->iv, bsize);
- memcpy(walk->iv, last_iv, bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_blocks(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- if (walk->src.virt.addr == walk->dst.virt.addr)
- return crypto_cbc_decrypt_inplace(walk, tfm, fn);
- else
- return crypto_cbc_decrypt_segment(walk, tfm, fn);
-}
-
-#endif /* _CRYPTO_CBC_H */
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index 2676f4fbd4c1..b3ea73b81944 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -25,11 +25,7 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
-#ifdef CONFIG_X86_64
-#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32))
-#else
#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
-#endif
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
@@ -51,13 +47,25 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
hchacha_block_generic(state, out, nrounds);
}
+enum chacha_constants { /* expand 32-byte k */
+ CHACHA_CONSTANT_EXPA = 0x61707865U,
+ CHACHA_CONSTANT_ND_3 = 0x3320646eU,
+ CHACHA_CONSTANT_2_BY = 0x79622d32U,
+ CHACHA_CONSTANT_TE_K = 0x6b206574U
+};
+
+static inline void chacha_init_consts(u32 *state)
+{
+ state[0] = CHACHA_CONSTANT_EXPA;
+ state[1] = CHACHA_CONSTANT_ND_3;
+ state[2] = CHACHA_CONSTANT_2_BY;
+ state[3] = CHACHA_CONSTANT_TE_K;
+}
+
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
{
- state[0] = 0x61707865; /* "expa" */
- state[1] = 0x3320646e; /* "nd 3" */
- state[2] = 0x79622d32; /* "2-by" */
- state[3] = 0x6b206574; /* "te k" */
+ chacha_init_consts(state);
state[4] = key[0];
state[5] = key[1];
state[6] = key[2];
diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h
index 234ee28078ef..d2ac3ff7dc1e 100644
--- a/include/crypto/chacha20poly1305.h
+++ b/include/crypto/chacha20poly1305.h
@@ -45,4 +45,6 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+bool chacha20poly1305_selftest(void);
+
#endif /* __CHACHA20POLY1305_H */
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 23169f4d87e6..796d986e58e1 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -13,7 +13,8 @@
#ifndef _CRYPTO_CRYPT_H
#define _CRYPTO_CRYPT_H
-#include <linux/kernel.h>
+#include <linux/types.h>
+
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h
index 9ecb3c1f0f15..ece6a9b5fafc 100644
--- a/include/crypto/curve25519.h
+++ b/include/crypto/curve25519.h
@@ -28,13 +28,14 @@ void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE]);
+bool curve25519_selftest(void);
+
static inline
bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE])
{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
- (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
curve25519_arch(mypublic, secret, basepoint);
else
curve25519_generic(mypublic, secret, basepoint);
@@ -50,8 +51,7 @@ __must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
CURVE25519_KEY_SIZE)))
return false;
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
- (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
curve25519_base_arch(pub, secret);
else
curve25519_generic(pub, secret, curve25519_base_point);
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index d71e9858ab86..7b863e911cb4 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -24,21 +24,17 @@
*
* @key: Private DH key
* @p: Diffie-Hellman parameter P
- * @q: Diffie-Hellman parameter Q
* @g: Diffie-Hellman generator G
* @key_size: Size of the private DH key
* @p_size: Size of DH parameter P
- * @q_size: Size of DH parameter Q
* @g_size: Size of DH generator G
*/
struct dh {
- void *key;
- void *p;
- void *q;
- void *g;
+ const void *key;
+ const void *p;
+ const void *g;
unsigned int key_size;
unsigned int p_size;
- unsigned int q_size;
unsigned int g_size;
};
@@ -83,4 +79,20 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
*/
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
+/**
+ * __crypto_dh_decode_key() - decode a private key without parameter checks
+ * @buf: Buffer holding a packet key that should be decoded
+ * @len: Length of the packet private key buffer
+ * @params: Buffer allocated by the caller that is filled with the
+ * unpacked DH private key.
+ *
+ * Internal function providing the same services as the exported
+ * crypto_dh_decode_key(), but without any of those basic parameter
+ * checks conducted by the latter.
+ *
+ * Return: -EINVAL if buffer has insufficient size, 0 on success
+ */
+int __crypto_dh_decode_key(const char *buf, unsigned int len,
+ struct dh *params);
+
#endif
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 8c9af21efce1..af5ad51d3eef 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -105,6 +105,12 @@ struct drbg_test_data {
struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
};
+enum drbg_seed_state {
+ DRBG_SEED_STATE_UNSEEDED,
+ DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
+ DRBG_SEED_STATE_FULL,
+};
+
struct drbg_state {
struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
@@ -127,16 +133,15 @@ struct drbg_state {
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
- bool seeded; /* DRBG fully seeded? */
+ enum drbg_seed_state seeded; /* DRBG fully seeded? */
+ unsigned long last_seed_time;
bool pr; /* Prediction resistance enabled? */
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
- struct work_struct seed_work; /* asynchronous seeding support */
struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;
const struct drbg_core *core;
struct drbg_string test_data;
- struct random_ready_callback random_ready;
};
static inline __u8 drbg_statelen(struct drbg_state *drbg)
@@ -184,11 +189,7 @@ static inline size_t drbg_max_addtl(struct drbg_state *drbg)
static inline size_t drbg_max_requests(struct drbg_state *drbg)
{
/* SP800-90A requires 2**48 maximum requests before reseeding */
-#if (__BITS_PER_LONG == 32)
- return SIZE_MAX;
-#else
- return (1UL<<48);
-#endif
+ return (1<<20);
}
/*
diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h
new file mode 100644
index 000000000000..70964781eb68
--- /dev/null
+++ b/include/crypto/ecc_curve.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 HiSilicon */
+
+#ifndef _CRYTO_ECC_CURVE_H
+#define _CRYTO_ECC_CURVE_H
+
+#include <linux/types.h>
+
+/**
+ * struct ecc_point - elliptic curve point in affine coordinates
+ *
+ * @x: X coordinate in vli form.
+ * @y: Y coordinate in vli form.
+ * @ndigits: Length of vlis in u64 qwords.
+ */
+struct ecc_point {
+ u64 *x;
+ u64 *y;
+ u8 ndigits;
+};
+
+/**
+ * struct ecc_curve - definition of elliptic curve
+ *
+ * @name: Short name of the curve.
+ * @g: Generator point of the curve.
+ * @p: Prime number, if Barrett's reduction is used for this curve
+ * pre-calculated value 'mu' is appended to the @p after ndigits.
+ * Use of Barrett's reduction is heuristically determined in
+ * vli_mmod_fast().
+ * @n: Order of the curve group.
+ * @a: Curve parameter a.
+ * @b: Curve parameter b.
+ */
+struct ecc_curve {
+ char *name;
+ struct ecc_point g;
+ u64 *p;
+ u64 *n;
+ u64 *a;
+ u64 *b;
+};
+
+/**
+ * ecc_get_curve() - get elliptic curve;
+ * @curve_id: Curves IDs:
+ * defined in 'include/crypto/ecdh.h';
+ *
+ * Returns curve if get curve succssful, NULL otherwise
+ */
+const struct ecc_curve *ecc_get_curve(unsigned int curve_id);
+
+/**
+ * ecc_get_curve25519() - get curve25519 curve;
+ *
+ * Returns curve25519
+ */
+const struct ecc_curve *ecc_get_curve25519(void);
+
+#endif
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index a5b805b5526d..a9f98078d29c 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -25,16 +25,15 @@
/* Curves IDs */
#define ECC_CURVE_NIST_P192 0x0001
#define ECC_CURVE_NIST_P256 0x0002
+#define ECC_CURVE_NIST_P384 0x0003
/**
* struct ecdh - define an ECDH private key
*
- * @curve_id: ECC curve the key is based on.
* @key: Private ECDH key
* @key_size: Size of the private ECDH key
*/
struct ecdh {
- unsigned short curve_id;
char *key;
unsigned short key_size;
};
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index e29cd67f93c7..ae133e98d813 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -9,13 +9,18 @@
#include <linux/crypto.h>
#include <linux/list.h>
-#include <linux/kernel.h>
#include <linux/kthread.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
+#include <crypto/kpp.h>
+
+struct device;
#define ENGINE_NAME_LEN 30
/*
@@ -24,9 +29,11 @@
* @idling: the engine is entering idle state
* @busy: request pump is busy
* @running: the engine is on working
- * @cur_req_prepared: current request is prepared
+ * @retry_support: indication that the hardware allows re-execution
+ * of a failed backlog request
+ * crypto-engine, in head position to keep order
* @list: link with the global crypto engine list
- * @queue_lock: spinlock to syncronise access to request queue
+ * @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
* @rt: whether this queue is set to run as a realtime task
* @prepare_crypt_hardware: a request will soon arrive from the queue
@@ -35,6 +42,8 @@
* @unprepare_crypt_hardware: there are currently no more requests on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
+ * @do_batch_requests: execute a batch of requests. Depends on multiple
+ * requests support.
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
@@ -45,7 +54,8 @@ struct crypto_engine {
bool idling;
bool busy;
bool running;
- bool cur_req_prepared;
+
+ bool retry_support;
struct list_head list;
spinlock_t queue_lock;
@@ -56,6 +66,8 @@ struct crypto_engine {
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*do_batch_requests)(struct crypto_engine *engine);
+
struct kthread_worker *kworker;
struct kthread_work pump_requests;
@@ -89,6 +101,8 @@ int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
struct akcipher_request *req);
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req);
+int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
+ struct kpp_request *req);
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
struct skcipher_request *req);
void crypto_finalize_aead_request(struct crypto_engine *engine,
@@ -97,11 +111,17 @@ void crypto_finalize_akcipher_request(struct crypto_engine *engine,
struct akcipher_request *req, int err);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err);
+void crypto_finalize_kpp_request(struct crypto_engine *engine,
+ struct kpp_request *req, int err);
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
struct skcipher_request *req, int err);
int crypto_engine_start(struct crypto_engine *engine);
int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
+struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
+ bool retry_support,
+ int (*cbk_do_batch)(struct crypto_engine *engine),
+ bool rt, int qlen);
int crypto_engine_exit(struct crypto_engine *engine);
#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index fa0a63d298dc..81330c6446f6 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -230,7 +230,7 @@ void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
- kzfree(t);
+ kfree_sensitive(t);
}
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index cee446c59497..f5841992dc9b 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -59,11 +59,6 @@ struct ahash_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-#define AHASH_REQUEST_ON_STACK(name, ahash) \
- char __##name##_desc[sizeof(struct ahash_request) + \
- crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
- struct ahash_request *name = (void *)__##name##_desc
-
/**
* struct ahash_alg - asynchronous message digest definition
* @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
@@ -123,6 +118,17 @@ struct ahash_request {
* data so the transformation can continue from this point onward. No
* data processing happens at this point. Driver must not use
* req->result.
+ * @init_tfm: Initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation
+ * time, right after the transformation context was
+ * allocated. In case the cryptographic hardware has
+ * some special requirements which need to be handled
+ * by software, this function shall check for the precise
+ * requirement of the transformation and put any software
+ * fallbacks in place.
+ * @exit_tfm: Deinitialize the cryptographic transformation object.
+ * This is a counterpart to @init_tfm, used to remove
+ * various changes set in @init_tfm.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
@@ -135,13 +141,15 @@ struct ahash_alg {
int (*import)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+ int (*init_tfm)(struct crypto_ahash *tfm);
+ void (*exit_tfm)(struct crypto_ahash *tfm);
struct hash_alg_common halg;
};
struct shash_desc {
struct crypto_shash *tfm;
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
+ void *__ctx[] __aligned(ARCH_SLAB_MINALIGN);
};
#define HASH_MAX_DIGESTSIZE 64
@@ -154,9 +162,9 @@ struct shash_desc {
#define HASH_MAX_STATESIZE 512
-#define SHASH_DESC_ON_STACK(shash, ctx) \
- char __##shash##_desc[sizeof(struct shash_desc) + \
- HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
+#define SHASH_DESC_ON_STACK(shash, ctx) \
+ char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
+ __aligned(__alignof__(struct shash_desc)); \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
/**
@@ -273,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
/**
* crypto_free_ahash() - zeroize and free the ahash handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
@@ -448,7 +458,7 @@ int crypto_ahash_finup(struct ahash_request *req);
*
* Return:
* 0 if the message digest was successfully calculated;
- * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later;
+ * -EINPROGRESS if data is fed into hardware (DMA) or queued for later;
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
@@ -606,7 +616,7 @@ static inline struct ahash_request *ahash_request_alloc(
*/
static inline void ahash_request_free(struct ahash_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
static inline void ahash_request_zero(struct ahash_request *req)
@@ -687,7 +697,7 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
* The message digest API is able to maintain state information for the
* caller.
*
- * The synchronous message digest API can store user-related context in in its
+ * The synchronous message digest API can store user-related context in its
* shash_desc request data structure.
*/
@@ -708,6 +718,8 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask);
+int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
{
return &tfm->base;
@@ -716,6 +728,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
/**
* crypto_free_shash() - zeroize and free the message digest handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
@@ -856,6 +870,25 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
/**
+ * crypto_shash_tfm_digest() - calculate message digest for buffer
+ * @tfm: hash transformation object
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This is a simplified version of crypto_shash_digest() for users who don't
+ * want to allocate their own hash descriptor (shash_desc). Instead,
+ * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash)
+ * directly, and it allocates a hash descriptor on the stack internally.
+ * Note that this stack allocation may be fairly large.
+ *
+ * Context: Any context.
+ * Return: 0 on success; < 0 if an error occurred.
+ */
+int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
+ unsigned int len, u8 *out);
+
+/**
* crypto_shash_export() - extract operational state for message digest
* @desc: reference to the operational state handle whose state is exported
* @out: output buffer of sufficient size that can hold the hash state
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
index eb9d2e368969..dd4f06785049 100644
--- a/include/crypto/hash_info.h
+++ b/include/crypto/hash_info.h
@@ -8,7 +8,8 @@
#ifndef _CRYPTO_HASH_INFO_H
#define _CRYPTO_HASH_INFO_H
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <crypto/md5.h>
#include <crypto/streebog.h>
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 24cfa96f98ea..a5db86670bdf 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -29,8 +29,8 @@ struct alg_sock {
struct sock *parent;
- unsigned int refcnt;
- unsigned int nokey_refcnt;
+ atomic_t refcnt;
+ atomic_t nokey_refcnt;
const struct af_alg_type *type;
void *private;
@@ -46,6 +46,7 @@ struct af_alg_type {
void *(*bind)(const char *name, u32 type, u32 mask);
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+ int (*setentropy)(void *private, sockptr_t entropy, unsigned int len);
int (*accept)(void *private, struct sock *sk);
int (*accept_nokey)(void *private, struct sock *sk);
int (*setauthsize)(void *private, unsigned int authsize);
@@ -66,7 +67,7 @@ struct af_alg_sgl {
struct af_alg_tsgl {
struct list_head list;
unsigned int cur; /* Last processed SG entry */
- struct scatterlist sg[0]; /* Array of SGs forming the SGL */
+ struct scatterlist sg[]; /* Array of SGs forming the SGL */
};
#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \
@@ -135,6 +136,7 @@ struct af_alg_async_req {
* SG?
* @enc: Cryptographic operation to be performed when
* recvmsg is invoked.
+ * @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
*/
struct af_alg_ctx {
@@ -151,6 +153,7 @@ struct af_alg_ctx {
bool more;
bool merge;
bool enc;
+ bool init;
unsigned int len;
};
@@ -226,7 +229,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset);
void af_alg_wmem_wakeup(struct sock *sk);
-int af_alg_wait_for_data(struct sock *sk, unsigned flags);
+int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index cf478681b53e..cfc47e18820f 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -46,7 +46,7 @@ static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
static inline void __acomp_request_free(struct acomp_req *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
/**
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 27b7b0224ea6..d482017f3e20 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -114,31 +114,6 @@ static inline void aead_init_queue(struct aead_queue *queue,
crypto_init_queue(&queue->base, max_qlen);
}
-static inline int aead_enqueue_request(struct aead_queue *queue,
- struct aead_request *request)
-{
- return crypto_enqueue_request(&queue->base, &request->base);
-}
-
-static inline struct aead_request *aead_dequeue_request(
- struct aead_queue *queue)
-{
- struct crypto_async_request *req;
-
- req = crypto_dequeue_request(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
-}
-
-static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
-{
- struct crypto_async_request *req;
-
- req = crypto_get_backlog(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
-}
-
static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
{
return alg->chunksize;
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
new file mode 100644
index 000000000000..982fe5e8471c
--- /dev/null
+++ b/include/crypto/internal/blake2b.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Helper functions for BLAKE2b implementations.
+ * Keep this in sync with the corresponding BLAKE2s header.
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
+#define _CRYPTO_INTERNAL_BLAKE2B_H
+
+#include <crypto/blake2b.h>
+#include <crypto/internal/hash.h>
+#include <linux/string.h>
+
+void blake2b_compress_generic(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
+
+static inline void blake2b_set_lastblock(struct blake2b_state *state)
+{
+ state->f[0] = -1;
+}
+
+typedef void (*blake2b_compress_t)(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
+
+static inline void __blake2b_update(struct blake2b_state *state,
+ const u8 *in, size_t inlen,
+ blake2b_compress_t compress)
+{
+ const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2B_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
+ /* Hash one less (full) block than strictly possible */
+ (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
+ in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+}
+
+static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
+ blake2b_compress_t compress)
+{
+ int i;
+
+ blake2b_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
+ (*compress)(state, state->buf, 1, state->buflen);
+ for (i = 0; i < ARRAY_SIZE(state->h); i++)
+ __cpu_to_le64s(&state->h[i]);
+ memcpy(out, state->h, state->outlen);
+}
+
+/* Helper functions for shash implementations of BLAKE2b */
+
+struct blake2b_tfm_ctx {
+ u8 key[BLAKE2B_KEY_SIZE];
+ unsigned int keylen;
+};
+
+static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
+ return -EINVAL;
+
+ memcpy(tctx->key, key, keylen);
+ tctx->keylen = keylen;
+
+ return 0;
+}
+
+static inline int crypto_blake2b_init(struct shash_desc *desc)
+{
+ const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct blake2b_state *state = shash_desc_ctx(desc);
+ unsigned int outlen = crypto_shash_digestsize(desc->tfm);
+
+ __blake2b_init(state, outlen, tctx->key, tctx->keylen);
+ return 0;
+}
+
+static inline int crypto_blake2b_update(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen,
+ blake2b_compress_t compress)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+
+ __blake2b_update(state, in, inlen, compress);
+ return 0;
+}
+
+static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
+ blake2b_compress_t compress)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+
+ __blake2b_final(state, out, compress);
+ return 0;
+}
+
+#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
index 74ff77032e52..506d56530ca9 100644
--- a/include/crypto/internal/blake2s.h
+++ b/include/crypto/internal/blake2s.h
@@ -1,24 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Helper functions for BLAKE2s implementations.
+ * Keep this in sync with the corresponding BLAKE2b header.
+ */
-#ifndef BLAKE2S_INTERNAL_H
-#define BLAKE2S_INTERNAL_H
+#ifndef _CRYPTO_INTERNAL_BLAKE2S_H
+#define _CRYPTO_INTERNAL_BLAKE2S_H
#include <crypto/blake2s.h>
+#include <linux/string.h>
-struct blake2s_tfm_ctx {
- u8 key[BLAKE2S_KEY_SIZE];
- unsigned int keylen;
-};
-
-void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
+void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc);
-void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
- size_t nblocks, const u32 inc);
+void blake2s_compress(struct blake2s_state *state, const u8 *block,
+ size_t nblocks, const u32 inc);
-static inline void blake2s_set_lastblock(struct blake2s_state *state)
-{
- state->f[0] = -1;
-}
+bool blake2s_selftest(void);
-#endif /* BLAKE2S_INTERNAL_H */
+#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */
diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h
new file mode 100644
index 000000000000..a9174ba90250
--- /dev/null
+++ b/include/crypto/internal/cipher.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
+ * and Nettle, by Niels Möller.
+ */
+
+#ifndef _CRYPTO_INTERNAL_CIPHER_H
+#define _CRYPTO_INTERNAL_CIPHER_H
+
+#include <crypto/algapi.h>
+
+struct crypto_cipher {
+ struct crypto_tfm base;
+};
+
+/**
+ * DOC: Single Block Cipher API
+ *
+ * The single block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
+ *
+ * Using the single block cipher API calls, operations with the basic cipher
+ * primitive can be implemented. These cipher primitives exclude any block
+ * chaining operations including IV handling.
+ *
+ * The purpose of this single block cipher API is to support the implementation
+ * of templates or other concepts that only need to perform the cipher operation
+ * on one block at a time. Templates invoke the underlying cipher primitive
+ * block-wise and process either the input or the output data of these cipher
+ * operations.
+ */
+
+static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_cipher *)tfm;
+}
+
+/**
+ * crypto_alloc_cipher() - allocate single block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a single block cipher. The returned struct
+ * crypto_cipher is the cipher handle that is required for any subsequent API
+ * invocation for that single block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_cipher() - zeroize and free the single block cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_cipher(struct crypto_cipher *tfm)
+{
+ crypto_free_tfm(crypto_cipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_cipher() - Search for the availability of a single block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the single block cipher is known to the kernel crypto API;
+ * false otherwise
+ */
+static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * crypto_cipher_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the single block cipher referenced with the cipher handle
+ * tfm is returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
+}
+
+static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
+}
+
+static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_cipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the single block cipher referenced by the
+ * cipher handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen);
+
+/**
+ * crypto_cipher_encrypt_one() - encrypt one block of plaintext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the ciphertext
+ * @src: buffer holding the plaintext to be encrypted
+ *
+ * Invoke the encryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
+
+/**
+ * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the plaintext
+ * @src: buffer holding the ciphertext to be decrypted
+ *
+ * Invoke the decryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
+
+struct crypto_cipher_spawn {
+ struct crypto_spawn base;
+};
+
+static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+
+static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct crypto_alg *crypto_spawn_cipher_alg(
+ struct crypto_cipher_spawn *spawn)
+{
+ return spawn->base.alg;
+}
+
+static inline struct crypto_cipher *crypto_spawn_cipher(
+ struct crypto_cipher_spawn *spawn)
+{
+ u32 type = CRYPTO_ALG_TYPE_CIPHER;
+ u32 mask = CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
+}
+
+static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
+{
+ return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
+}
+
+#endif
diff --git a/crypto/ecc.h b/include/crypto/internal/ecc.h
index ab0eb70b9c09..4f6c1a68882f 100644
--- a/crypto/ecc.h
+++ b/include/crypto/internal/ecc.h
@@ -26,49 +26,35 @@
#ifndef _CRYPTO_ECC_H
#define _CRYPTO_ECC_H
+#include <crypto/ecc_curve.h>
+#include <asm/unaligned.h>
+
/* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3
#define ECC_CURVE_NIST_P256_DIGITS 4
-#define ECC_MAX_DIGITS (512 / 64)
+#define ECC_CURVE_NIST_P384_DIGITS 6
+#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */
#define ECC_DIGITS_TO_BYTES_SHIFT 3
-/**
- * struct ecc_point - elliptic curve point in affine coordinates
- *
- * @x: X coordinate in vli form.
- * @y: Y coordinate in vli form.
- * @ndigits: Length of vlis in u64 qwords.
- */
-struct ecc_point {
- u64 *x;
- u64 *y;
- u8 ndigits;
-};
+#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT)
#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
/**
- * struct ecc_curve - definition of elliptic curve
- *
- * @name: Short name of the curve.
- * @g: Generator point of the curve.
- * @p: Prime number, if Barrett's reduction is used for this curve
- * pre-calculated value 'mu' is appended to the @p after ndigits.
- * Use of Barrett's reduction is heuristically determined in
- * vli_mmod_fast().
- * @n: Order of the curve group.
- * @a: Curve parameter a.
- * @b: Curve parameter b.
+ * ecc_swap_digits() - Copy ndigits from big endian array to native array
+ * @in: Input array
+ * @out: Output array
+ * @ndigits: Number of digits to copy
*/
-struct ecc_curve {
- char *name;
- struct ecc_point g;
- u64 *p;
- u64 *n;
- u64 *a;
- u64 *b;
-};
+static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
+{
+ const __be64 *src = (__force __be64 *)in;
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
+}
/**
* ecc_is_key_valid() - Validate a given ECDH private key
@@ -148,6 +134,20 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
struct ecc_point *pk);
/**
+ * ecc_is_pubkey_valid_full() - Full public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full
+ * Public-Key Validation Routine.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
* vli_is_zero() - Determine is vli is zero
*
* @vli: vli to check.
@@ -226,6 +226,41 @@ void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
const u64 *mod, unsigned int ndigits);
/**
+ * vli_num_bits() - Counts the number of bits required for vli.
+ *
+ * @vli: vli to check.
+ * @ndigits: Length of the @vli
+ *
+ * Return: The number of bits required to represent @vli.
+ */
+unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits);
+
+/**
+ * ecc_aloc_point() - Allocate ECC point.
+ *
+ * @ndigits: Length of vlis in u64 qwords.
+ *
+ * Return: Pointer to the allocated point or NULL if allocation failed.
+ */
+struct ecc_point *ecc_alloc_point(unsigned int ndigits);
+
+/**
+ * ecc_free_point() - Free ECC point.
+ *
+ * @p: The point to free.
+ */
+void ecc_free_point(struct ecc_point *p);
+
+/**
+ * ecc_point_is_zero() - Check if point is zero.
+ *
+ * @p: Point to check for zero.
+ *
+ * Return: true if point is the point at infinity, false otherwise.
+ */
+bool ecc_point_is_zero(const struct ecc_point *point);
+
+/**
* ecc_point_mult_shamir() - Add two points multiplied by scalars
*
* @result: resulting point
@@ -242,4 +277,5 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
const u64 *x, const struct ecc_point *p,
const u64 *y, const struct ecc_point *q,
const struct ecc_curve *curve);
+
#endif
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 229d37681a9d..7fd7126f593a 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -20,7 +20,7 @@ struct aead_geniv_ctx {
};
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type, u32 mask);
+ struct rtattr **tb);
int aead_init_geniv(struct crypto_aead *tfm);
void aead_exit_geniv(struct crypto_aead *tfm);
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 89f6f46ab2b8..25806141db59 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -62,25 +62,12 @@ struct crypto_shash_spawn {
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
-int crypto_ahash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk);
-
-static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk,
- int err)
-{
- return crypto_hash_walk_done(walk, err);
-}
static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
{
return !(walk->entrylen | walk->total);
}
-static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
-{
- return crypto_hash_walk_last(walk);
-}
-
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
@@ -88,13 +75,7 @@ void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen);
-
-static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
-{
- return alg->setkey != shash_no_setkey;
-}
+bool crypto_shash_alg_has_setkey(struct shash_alg *alg);
static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
{
@@ -177,6 +158,12 @@ static inline struct ahash_instance *ahash_instance(
return container_of(inst, struct ahash_instance, s.base);
}
+static inline struct ahash_instance *ahash_alg_instance(
+ struct crypto_ahash *ahash)
+{
+ return ahash_instance(crypto_tfm_alg_instance(&ahash->base));
+}
+
static inline void *ahash_instance_ctx(struct ahash_instance *inst)
{
return crypto_instance_ctx(ahash_crypto_instance(inst));
diff --git a/include/crypto/internal/kdf_selftest.h b/include/crypto/internal/kdf_selftest.h
new file mode 100644
index 000000000000..4d03d2af57b7
--- /dev/null
+++ b/include/crypto/internal/kdf_selftest.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _CRYPTO_KDF_SELFTEST_H
+#define _CRYPTO_KDF_SELFTEST_H
+
+#include <crypto/hash.h>
+#include <linux/uio.h>
+
+struct kdf_testvec {
+ unsigned char *key;
+ size_t keylen;
+ unsigned char *ikm;
+ size_t ikmlen;
+ struct kvec info;
+ unsigned char *expected;
+ size_t expectedlen;
+};
+
+static inline int
+kdf_test(const struct kdf_testvec *test, const char *name,
+ int (*crypto_kdf_setkey)(struct crypto_shash *kmd,
+ const u8 *key, size_t keylen,
+ const u8 *ikm, size_t ikmlen),
+ int (*crypto_kdf_generate)(struct crypto_shash *kmd,
+ const struct kvec *info,
+ unsigned int info_nvec,
+ u8 *dst, unsigned int dlen))
+{
+ struct crypto_shash *kmd;
+ int ret;
+ u8 *buf = kzalloc(test->expectedlen, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ kmd = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(kmd)) {
+ pr_err("alg: kdf: could not allocate hash handle for %s\n",
+ name);
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ ret = crypto_kdf_setkey(kmd, test->key, test->keylen,
+ test->ikm, test->ikmlen);
+ if (ret) {
+ pr_err("alg: kdf: could not set key derivation key\n");
+ goto err;
+ }
+
+ ret = crypto_kdf_generate(kmd, &test->info, 1, buf, test->expectedlen);
+ if (ret) {
+ pr_err("alg: kdf: could not obtain key data\n");
+ goto err;
+ }
+
+ ret = memcmp(test->expected, buf, test->expectedlen);
+ if (ret)
+ ret = -EINVAL;
+
+err:
+ crypto_free_shash(kmd);
+ kfree(buf);
+ return ret;
+}
+
+#endif /* _CRYPTO_KDF_SELFTEST_H */
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
index 659b642efada..9cb0662ebe87 100644
--- a/include/crypto/internal/kpp.h
+++ b/include/crypto/internal/kpp.h
@@ -10,6 +10,38 @@
#include <crypto/kpp.h>
#include <crypto/algapi.h>
+/**
+ * struct kpp_instance - KPP template instance
+ * @free: Callback getting invoked upon instance destruction. Must be set.
+ * @s: Internal. Generic crypto core instance state properly layout
+ * to alias with @alg as needed.
+ * @alg: The &struct kpp_alg implementation provided by the instance.
+ */
+struct kpp_instance {
+ void (*free)(struct kpp_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct kpp_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct kpp_alg alg;
+ };
+};
+
+/**
+ * struct crypto_kpp_spawn - KPP algorithm spawn
+ * @base: Internal. Generic crypto core spawn state.
+ *
+ * Template instances can get a hold on some inner KPP algorithm by
+ * binding a &struct crypto_kpp_spawn via
+ * crypto_grab_kpp(). Transforms may subsequently get instantiated
+ * from the referenced inner &struct kpp_alg by means of
+ * crypto_spawn_kpp().
+ */
+struct crypto_kpp_spawn {
+ struct crypto_spawn base;
+};
+
/*
* Transform internal helpers.
*/
@@ -33,6 +65,62 @@ static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
}
+/*
+ * Template instance internal helpers.
+ */
+/**
+ * kpp_crypto_instance() - Cast a &struct kpp_instance to the corresponding
+ * generic &struct crypto_instance.
+ * @inst: Pointer to the &struct kpp_instance to be cast.
+ * Return: A pointer to the &struct crypto_instance embedded in @inst.
+ */
+static inline struct crypto_instance *kpp_crypto_instance(
+ struct kpp_instance *inst)
+{
+ return &inst->s.base;
+}
+
+/**
+ * kpp_instance() - Cast a generic &struct crypto_instance to the corresponding
+ * &struct kpp_instance.
+ * @inst: Pointer to the &struct crypto_instance to be cast.
+ * Return: A pointer to the &struct kpp_instance @inst is embedded in.
+ */
+static inline struct kpp_instance *kpp_instance(struct crypto_instance *inst)
+{
+ return container_of(inst, struct kpp_instance, s.base);
+}
+
+/**
+ * kpp_alg_instance() - Get the &struct kpp_instance a given KPP transform has
+ * been instantiated from.
+ * @kpp: The KPP transform instantiated from some &struct kpp_instance.
+ * Return: The &struct kpp_instance associated with @kpp.
+ */
+static inline struct kpp_instance *kpp_alg_instance(struct crypto_kpp *kpp)
+{
+ return kpp_instance(crypto_tfm_alg_instance(&kpp->base));
+}
+
+/**
+ * kpp_instance_ctx() - Get a pointer to a &struct kpp_instance's implementation
+ * specific context data.
+ * @inst: The &struct kpp_instance whose context data to access.
+ *
+ * A KPP template implementation may allocate extra memory beyond the
+ * end of a &struct kpp_instance instantiated from &crypto_template.create().
+ * This function provides a means to obtain a pointer to this area.
+ *
+ * Return: A pointer to the implementation specific context data.
+ */
+static inline void *kpp_instance_ctx(struct kpp_instance *inst)
+{
+ return crypto_instance_ctx(kpp_crypto_instance(inst));
+}
+
+/*
+ * KPP algorithm (un)registration functions.
+ */
/**
* crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
*
@@ -56,4 +144,74 @@ int crypto_register_kpp(struct kpp_alg *alg);
*/
void crypto_unregister_kpp(struct kpp_alg *alg);
+/**
+ * kpp_register_instance() - Register a KPP template instance.
+ * @tmpl: The instantiating template.
+ * @inst: The KPP template instance to be registered.
+ * Return: %0 on success, negative error code otherwise.
+ */
+int kpp_register_instance(struct crypto_template *tmpl,
+ struct kpp_instance *inst);
+
+/*
+ * KPP spawn related functions.
+ */
+/**
+ * crypto_grab_kpp() - Look up a KPP algorithm and bind a spawn to it.
+ * @spawn: The KPP spawn to bind.
+ * @inst: The template instance owning @spawn.
+ * @name: The KPP algorithm name to look up.
+ * @type: The type bitset to pass on to the lookup.
+ * @mask: The mask bismask to pass on to the lookup.
+ * Return: %0 on success, a negative error code otherwise.
+ */
+int crypto_grab_kpp(struct crypto_kpp_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+/**
+ * crypto_drop_kpp() - Release a spawn previously bound via crypto_grab_kpp().
+ * @spawn: The spawn to release.
+ */
+static inline void crypto_drop_kpp(struct crypto_kpp_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+/**
+ * crypto_spawn_kpp_alg() - Get the algorithm a KPP spawn has been bound to.
+ * @spawn: The spawn to get the referenced &struct kpp_alg for.
+ *
+ * This function as well as the returned result are safe to use only
+ * after @spawn has been successfully bound via crypto_grab_kpp() and
+ * up to until the template instance owning @spawn has either been
+ * registered successfully or the spawn has been released again via
+ * crypto_drop_spawn().
+ *
+ * Return: A pointer to the &struct kpp_alg referenced from the spawn.
+ */
+static inline struct kpp_alg *crypto_spawn_kpp_alg(
+ struct crypto_kpp_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct kpp_alg, base);
+}
+
+/**
+ * crypto_spawn_kpp() - Create a transform from a KPP spawn.
+ * @spawn: The spawn previously bound to some &struct kpp_alg via
+ * crypto_grab_kpp().
+ *
+ * Once a &struct crypto_kpp_spawn has been successfully bound to a
+ * &struct kpp_alg via crypto_grab_kpp(), transforms for the latter
+ * may get instantiated from the former by means of this function.
+ *
+ * Return: A pointer to the freshly created KPP transform on success
+ * or an ``ERR_PTR()`` otherwise.
+ */
+static inline struct crypto_kpp *crypto_spawn_kpp(
+ struct crypto_kpp_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
#endif
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index 064e52ca5248..196aa769f296 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -18,7 +18,8 @@
* only the ε-almost-∆-universal hash function (not the full MAC) is computed.
*/
-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
static inline void poly1305_core_init(struct poly1305_state *state)
{
*state = (struct poly1305_state){};
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 10226c12c5df..a2339f80a615 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -9,6 +9,7 @@
#define _CRYPTO_INTERNAL_SKCIPHER_H
#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
#include <crypto/skcipher.h>
#include <linux/list.h>
#include <linux/types.h>
@@ -132,7 +133,6 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err);
int skcipher_walk_virt(struct skcipher_walk *walk,
struct skcipher_request *req,
bool atomic);
-void skcipher_walk_atomise(struct skcipher_walk *walk);
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
diff --git a/include/crypto/kdf_sp800108.h b/include/crypto/kdf_sp800108.h
new file mode 100644
index 000000000000..b7b20a778fb7
--- /dev/null
+++ b/include/crypto/kdf_sp800108.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _CRYPTO_KDF108_H
+#define _CRYPTO_KDF108_H
+
+#include <crypto/hash.h>
+#include <linux/uio.h>
+
+/**
+ * Counter KDF generate operation according to SP800-108 section 5.1
+ * as well as SP800-56A section 5.8.1 (Single-step KDF).
+ *
+ * @kmd Keyed message digest whose key was set with crypto_kdf108_setkey or
+ * unkeyed message digest
+ * @info optional context and application specific information - this may be
+ * NULL
+ * @info_vec number of optional context/application specific information entries
+ * @dst destination buffer that the caller already allocated
+ * @dlen length of the destination buffer - the KDF derives that amount of
+ * bytes.
+ *
+ * To comply with SP800-108, the caller must provide Label || 0x00 || Context
+ * in the info parameter.
+ *
+ * @return 0 on success, < 0 on error
+ */
+int crypto_kdf108_ctr_generate(struct crypto_shash *kmd,
+ const struct kvec *info, unsigned int info_nvec,
+ u8 *dst, unsigned int dlen);
+
+/**
+ * Counter KDF setkey operation
+ *
+ * @kmd Keyed message digest allocated by the caller. The key should not have
+ * been set.
+ * @key Seed key to be used to initialize the keyed message digest context.
+ * @keylen This length of the key buffer.
+ * @ikm The SP800-108 KDF does not support IKM - this parameter must be NULL
+ * @ikmlen This parameter must be 0.
+ *
+ * According to SP800-108 section 7.2, the seed key must be at least as large as
+ * the message digest size of the used keyed message digest. This limitation
+ * is enforced by the implementation.
+ *
+ * SP800-108 allows the use of either a HMAC or a hash primitive. When
+ * the caller intends to use a hash primitive, the call to
+ * crypto_kdf108_setkey is not required and the key derivation operation can
+ * immediately performed using crypto_kdf108_ctr_generate after allocating
+ * a handle.
+ *
+ * @return 0 on success, < 0 on error
+ */
+int crypto_kdf108_setkey(struct crypto_shash *kmd,
+ const u8 *key, size_t keylen,
+ const u8 *ikm, size_t ikmlen);
+
+#endif /* _CRYPTO_KDF108_H */
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index cd9a9b500624..24d01e9877c1 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -104,6 +104,8 @@ struct kpp_alg {
*/
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask);
+int crypto_has_kpp(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm)
{
return &tfm->base;
@@ -154,6 +156,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
* crypto_free_kpp() - free KPP tfm handle
*
* @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_kpp(struct crypto_kpp *tfm)
{
@@ -187,7 +191,7 @@ static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm,
*/
static inline void kpp_request_free(struct kpp_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
/**
diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h
index b9bc3436196a..234d7cf3cf5e 100644
--- a/include/crypto/pcrypt.h
+++ b/include/crypto/pcrypt.h
@@ -9,8 +9,8 @@
#ifndef _CRYPTO_PCRYPT_H
#define _CRYPTO_PCRYPT_H
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/padata.h>
struct pcrypt_request {
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index f1f67fc749cf..090692ec3bc7 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
};
};
-void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
+void poly1305_init_arch(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE]);
+void poly1305_init_generic(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE]);
static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
{
diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h
new file mode 100644
index 000000000000..1d630f371f77
--- /dev/null
+++ b/include/crypto/polyval.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Polyval hash algorithm
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#ifndef _CRYPTO_POLYVAL_H
+#define _CRYPTO_POLYVAL_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define POLYVAL_BLOCK_SIZE 16
+#define POLYVAL_DIGEST_SIZE 16
+
+void polyval_mul_non4k(u8 *op1, const u8 *op2);
+
+void polyval_update_non4k(const u8 *key, const u8 *in,
+ size_t nblocks, u8 *accumulator);
+
+#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 0588ef3bc6ff..68f7aa2a7e55 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Asymmetric public-key algorithm definitions
*
- * See Documentation/crypto/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -36,14 +36,16 @@ extern void public_key_free(struct public_key *key);
* Public key cryptography signature data
*/
struct public_key_signature {
- struct asymmetric_key_id *auth_ids[2];
+ struct asymmetric_key_id *auth_ids[3];
u8 *s; /* Signature */
- u32 s_size; /* Number of bytes in signature */
u8 *digest;
- u8 digest_size; /* Number of bytes in digest */
+ u32 s_size; /* Number of bytes in signature */
+ u32 digest_size; /* Number of bytes in digest */
const char *pkey_algo;
const char *hash_algo;
const char *encoding;
+ const void *data;
+ unsigned int data_size;
};
extern void public_key_signature_free(struct public_key_signature *sig);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 8b4b844b4eef..17bb3673d3c1 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
/**
* crypto_free_rng() - zeroize and free RNG handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index c837d0775474..ccdb05f68a75 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -12,8 +12,9 @@
#define _CRYPTO_SCATTERWALK_H
#include <crypto/algapi.h>
+
#include <linux/highmem.h>
-#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/scatterlist.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
@@ -45,12 +46,6 @@ static inline void scatterwalk_advance(struct scatter_walk *walk,
walk->offset += nbytes;
}
-static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
- unsigned int alignmask)
-{
- return !(walk->offset & alignmask);
-}
-
static inline struct page *scatterwalk_page(struct scatter_walk *walk)
{
return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
@@ -81,12 +76,7 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
struct page *page;
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
- * PageSlab cannot be optimised away per se due to
- * use of volatile pointer.
- */
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
- flush_dcache_page(page);
+ flush_dcache_page(page);
}
if (more && walk->offset >= walk->sg->offset + walk->sg->length)
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
deleted file mode 100644
index 5c2132c71900..000000000000
--- a/include/crypto/sha.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Common values for SHA algorithms
- */
-
-#ifndef _CRYPTO_SHA_H
-#define _CRYPTO_SHA_H
-
-#include <linux/types.h>
-
-#define SHA1_DIGEST_SIZE 20
-#define SHA1_BLOCK_SIZE 64
-
-#define SHA224_DIGEST_SIZE 28
-#define SHA224_BLOCK_SIZE 64
-
-#define SHA256_DIGEST_SIZE 32
-#define SHA256_BLOCK_SIZE 64
-
-#define SHA384_DIGEST_SIZE 48
-#define SHA384_BLOCK_SIZE 128
-
-#define SHA512_DIGEST_SIZE 64
-#define SHA512_BLOCK_SIZE 128
-
-#define SHA1_H0 0x67452301UL
-#define SHA1_H1 0xefcdab89UL
-#define SHA1_H2 0x98badcfeUL
-#define SHA1_H3 0x10325476UL
-#define SHA1_H4 0xc3d2e1f0UL
-
-#define SHA224_H0 0xc1059ed8UL
-#define SHA224_H1 0x367cd507UL
-#define SHA224_H2 0x3070dd17UL
-#define SHA224_H3 0xf70e5939UL
-#define SHA224_H4 0xffc00b31UL
-#define SHA224_H5 0x68581511UL
-#define SHA224_H6 0x64f98fa7UL
-#define SHA224_H7 0xbefa4fa4UL
-
-#define SHA256_H0 0x6a09e667UL
-#define SHA256_H1 0xbb67ae85UL
-#define SHA256_H2 0x3c6ef372UL
-#define SHA256_H3 0xa54ff53aUL
-#define SHA256_H4 0x510e527fUL
-#define SHA256_H5 0x9b05688cUL
-#define SHA256_H6 0x1f83d9abUL
-#define SHA256_H7 0x5be0cd19UL
-
-#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
-#define SHA384_H1 0x629a292a367cd507ULL
-#define SHA384_H2 0x9159015a3070dd17ULL
-#define SHA384_H3 0x152fecd8f70e5939ULL
-#define SHA384_H4 0x67332667ffc00b31ULL
-#define SHA384_H5 0x8eb44a8768581511ULL
-#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
-#define SHA384_H7 0x47b5481dbefa4fa4ULL
-
-#define SHA512_H0 0x6a09e667f3bcc908ULL
-#define SHA512_H1 0xbb67ae8584caa73bULL
-#define SHA512_H2 0x3c6ef372fe94f82bULL
-#define SHA512_H3 0xa54ff53a5f1d36f1ULL
-#define SHA512_H4 0x510e527fade682d1ULL
-#define SHA512_H5 0x9b05688c2b3e6c1fULL
-#define SHA512_H6 0x1f83d9abfb41bd6bULL
-#define SHA512_H7 0x5be0cd19137e2179ULL
-
-extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE];
-
-extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
-
-extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
-
-extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
-
-extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
-
-struct sha1_state {
- u32 state[SHA1_DIGEST_SIZE / 4];
- u64 count;
- u8 buffer[SHA1_BLOCK_SIZE];
-};
-
-struct sha256_state {
- u32 state[SHA256_DIGEST_SIZE / 4];
- u64 count;
- u8 buf[SHA256_BLOCK_SIZE];
-};
-
-struct sha512_state {
- u64 state[SHA512_DIGEST_SIZE / 8];
- u64 count[2];
- u8 buf[SHA512_BLOCK_SIZE];
-};
-
-struct shash_desc;
-
-extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-/*
- * Stand-alone implementation of the SHA256 algorithm. It is designed to
- * have as little dependencies as possible so it can be used in the
- * kexec_file purgatory. In other cases you should generally use the
- * hash APIs from include/crypto/hash.h. Especially when hashing large
- * amounts of data as those APIs may be hw-accelerated.
- *
- * For details see lib/crypto/sha256.c
- */
-
-static inline int sha256_init(struct sha256_state *sctx)
-{
- sctx->state[0] = SHA256_H0;
- sctx->state[1] = SHA256_H1;
- sctx->state[2] = SHA256_H2;
- sctx->state[3] = SHA256_H3;
- sctx->state[4] = SHA256_H4;
- sctx->state[5] = SHA256_H5;
- sctx->state[6] = SHA256_H6;
- sctx->state[7] = SHA256_H7;
- sctx->count = 0;
-
- return 0;
-}
-extern int sha256_update(struct sha256_state *sctx, const u8 *input,
- unsigned int length);
-extern int sha256_final(struct sha256_state *sctx, u8 *hash);
-
-static inline int sha224_init(struct sha256_state *sctx)
-{
- sctx->state[0] = SHA224_H0;
- sctx->state[1] = SHA224_H1;
- sctx->state[2] = SHA224_H2;
- sctx->state[3] = SHA224_H3;
- sctx->state[4] = SHA224_H4;
- sctx->state[5] = SHA224_H5;
- sctx->state[6] = SHA224_H6;
- sctx->state[7] = SHA224_H7;
- sctx->count = 0;
-
- return 0;
-}
-extern int sha224_update(struct sha256_state *sctx, const u8 *input,
- unsigned int length);
-extern int sha224_final(struct sha256_state *sctx, u8 *hash);
-
-#endif
diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h
new file mode 100644
index 000000000000..044ecea60ac8
--- /dev/null
+++ b/include/crypto/sha1.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for SHA-1 algorithms
+ */
+
+#ifndef _CRYPTO_SHA1_H
+#define _CRYPTO_SHA1_H
+
+#include <linux/types.h>
+
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE];
+
+struct sha1_state {
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buffer[SHA1_BLOCK_SIZE];
+};
+
+struct shash_desc;
+
+extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
+/*
+ * An implementation of SHA-1's compression function. Don't use in new code!
+ * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
+ * the correct way to hash something with SHA-1 (use crypto_shash instead).
+ */
+#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4)
+#define SHA1_WORKSPACE_WORDS 16
+void sha1_init(__u32 *buf);
+void sha1_transform(__u32 *digest, const char *data, __u32 *W);
+
+#endif /* _CRYPTO_SHA1_H */
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
index 20fd1f7468af..2e0e7c3827d1 100644
--- a/include/crypto/sha1_base.h
+++ b/include/crypto/sha1_base.h
@@ -9,9 +9,10 @@
#define _CRYPTO_SHA1_BASE_H
#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
@@ -101,7 +102,7 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
- *sctx = (struct sha1_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
new file mode 100644
index 000000000000..2838f529f31e
--- /dev/null
+++ b/include/crypto/sha2.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for SHA-2 algorithms
+ */
+
+#ifndef _CRYPTO_SHA2_H
+#define _CRYPTO_SHA2_H
+
+#include <linux/types.h>
+
+#define SHA224_DIGEST_SIZE 28
+#define SHA224_BLOCK_SIZE 64
+
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
+
+#define SHA384_DIGEST_SIZE 48
+#define SHA384_BLOCK_SIZE 128
+
+#define SHA512_DIGEST_SIZE 64
+#define SHA512_BLOCK_SIZE 128
+
+#define SHA224_H0 0xc1059ed8UL
+#define SHA224_H1 0x367cd507UL
+#define SHA224_H2 0x3070dd17UL
+#define SHA224_H3 0xf70e5939UL
+#define SHA224_H4 0xffc00b31UL
+#define SHA224_H5 0x68581511UL
+#define SHA224_H6 0x64f98fa7UL
+#define SHA224_H7 0xbefa4fa4UL
+
+#define SHA256_H0 0x6a09e667UL
+#define SHA256_H1 0xbb67ae85UL
+#define SHA256_H2 0x3c6ef372UL
+#define SHA256_H3 0xa54ff53aUL
+#define SHA256_H4 0x510e527fUL
+#define SHA256_H5 0x9b05688cUL
+#define SHA256_H6 0x1f83d9abUL
+#define SHA256_H7 0x5be0cd19UL
+
+#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1 0x629a292a367cd507ULL
+#define SHA384_H2 0x9159015a3070dd17ULL
+#define SHA384_H3 0x152fecd8f70e5939ULL
+#define SHA384_H4 0x67332667ffc00b31ULL
+#define SHA384_H5 0x8eb44a8768581511ULL
+#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7 0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0 0x6a09e667f3bcc908ULL
+#define SHA512_H1 0xbb67ae8584caa73bULL
+#define SHA512_H2 0x3c6ef372fe94f82bULL
+#define SHA512_H3 0xa54ff53a5f1d36f1ULL
+#define SHA512_H4 0x510e527fade682d1ULL
+#define SHA512_H5 0x9b05688c2b3e6c1fULL
+#define SHA512_H6 0x1f83d9abfb41bd6bULL
+#define SHA512_H7 0x5be0cd19137e2179ULL
+
+extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
+
+extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
+
+extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
+
+extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
+
+struct sha256_state {
+ u32 state[SHA256_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buf[SHA256_BLOCK_SIZE];
+};
+
+struct sha512_state {
+ u64 state[SHA512_DIGEST_SIZE / 8];
+ u64 count[2];
+ u8 buf[SHA512_BLOCK_SIZE];
+};
+
+struct shash_desc;
+
+extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
+extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
+/*
+ * Stand-alone implementation of the SHA256 algorithm. It is designed to
+ * have as little dependencies as possible so it can be used in the
+ * kexec_file purgatory. In other cases you should generally use the
+ * hash APIs from include/crypto/hash.h. Especially when hashing large
+ * amounts of data as those APIs may be hw-accelerated.
+ *
+ * For details see lib/crypto/sha256.c
+ */
+
+static inline void sha256_init(struct sha256_state *sctx)
+{
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+}
+void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
+void sha256_final(struct sha256_state *sctx, u8 *out);
+void sha256(const u8 *data, unsigned int len, u8 *out);
+
+static inline void sha224_init(struct sha256_state *sctx)
+{
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+}
+void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
+void sha224_final(struct sha256_state *sctx, u8 *out);
+
+#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
index cea60cff80bd..76173c613058 100644
--- a/include/crypto/sha256_base.h
+++ b/include/crypto/sha256_base.h
@@ -9,9 +9,10 @@
#define _CRYPTO_SHA256_BASE_H
#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
@@ -22,14 +23,16 @@ static inline int sha224_base_init(struct shash_desc *desc)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- return sha224_init(sctx);
+ sha224_init(sctx);
+ return 0;
}
static inline int sha256_base_init(struct shash_desc *desc)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- return sha256_init(sctx);
+ sha256_init(sctx);
+ return 0;
}
static inline int sha256_base_do_update(struct shash_desc *desc,
@@ -103,7 +106,7 @@ static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
put_unaligned_be32(sctx->state[i], digest++);
- *sctx = (struct sha256_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
index fb19c77494dc..b370b3340b16 100644
--- a/include/crypto/sha512_base.h
+++ b/include/crypto/sha512_base.h
@@ -9,9 +9,10 @@
#define _CRYPTO_SHA512_BASE_H
#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
@@ -126,7 +127,7 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
put_unaligned_be64(sctx->state[i], digest++);
- *sctx = (struct sha512_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 141e7690f9c3..39f5b67c3069 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -8,9 +8,13 @@
#ifndef _CRYPTO_SKCIPHER_H
#define _CRYPTO_SKCIPHER_H
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+struct scatterlist;
/**
* struct skcipher_request - Symmetric key cipher request
@@ -18,7 +22,7 @@
* @iv: Initialisation Vector
* @src: Source SG list
* @dst: Destination SG list
- * @base: Underlying async request request
+ * @base: Underlying async request
* @__ctx: Start of private context data
*/
struct skcipher_request {
@@ -196,6 +200,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
{
@@ -508,7 +514,7 @@ static inline struct skcipher_request *skcipher_request_alloc(
*/
static inline void skcipher_request_free(struct skcipher_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
static inline void skcipher_request_zero(struct skcipher_request *req)
diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h
new file mode 100644
index 000000000000..af452556dcd4
--- /dev/null
+++ b/include/crypto/sm2.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * sm2.h - SM2 asymmetric public-key algorithm
+ * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
+ * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
+ *
+ * Copyright (c) 2020, Alibaba Group.
+ * Written by Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#ifndef _CRYPTO_SM2_H
+#define _CRYPTO_SM2_H
+
+#include <crypto/sm3.h>
+#include <crypto/akcipher.h>
+
+/* The default user id as specified in GM/T 0009-2012 */
+#define SM2_DEFAULT_USERID "1234567812345678"
+#define SM2_DEFAULT_USERID_LEN 16
+
+extern int sm2_compute_z_digest(struct crypto_akcipher *tfm,
+ const unsigned char *id, size_t id_len,
+ unsigned char dgst[SM3_DIGEST_SIZE]);
+
+#endif /* _CRYPTO_SM2_H */
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index 1438942dc773..1f021ad0533f 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -1,5 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Common values for SM3 algorithm
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#ifndef _CRYPTO_SM3_H
@@ -30,11 +35,30 @@ struct sm3_state {
u8 buffer[SM3_BLOCK_SIZE];
};
-struct shash_desc;
+/*
+ * Stand-alone implementation of the SM3 algorithm. It is designed to
+ * have as little dependencies as possible so it can be used in the
+ * kexec_file purgatory. In other cases you should generally use the
+ * hash APIs from include/crypto/hash.h. Especially when hashing large
+ * amounts of data as those APIs may be hw-accelerated.
+ *
+ * For details see lib/crypto/sm3.c
+ */
+
+static inline void sm3_init(struct sm3_state *sctx)
+{
+ sctx->state[0] = SM3_IVA;
+ sctx->state[1] = SM3_IVB;
+ sctx->state[2] = SM3_IVC;
+ sctx->state[3] = SM3_IVD;
+ sctx->state[4] = SM3_IVE;
+ sctx->state[5] = SM3_IVF;
+ sctx->state[6] = SM3_IVG;
+ sctx->state[7] = SM3_IVH;
+ sctx->count = 0;
+}
-extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len);
+void sm3_final(struct sm3_state *sctx, u8 *out);
-extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index 1cbf9aa1fe52..2f3a32ab97bb 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -13,6 +13,7 @@
#include <crypto/sm3.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
@@ -104,7 +105,7 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
- *sctx = (struct sm3_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
index 7afd730d16ff..9656a9a40326 100644
--- a/include/crypto/sm4.h
+++ b/include/crypto/sm4.h
@@ -3,6 +3,7 @@
/*
* Common values for the SM4 algorithm
* Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#ifndef _CRYPTO_SM4_H
@@ -15,17 +16,33 @@
#define SM4_BLOCK_SIZE 16
#define SM4_RKEY_WORDS 32
-struct crypto_sm4_ctx {
+struct sm4_ctx {
u32 rkey_enc[SM4_RKEY_WORDS];
u32 rkey_dec[SM4_RKEY_WORDS];
};
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len);
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+extern const u32 crypto_sm4_fk[];
+extern const u32 crypto_sm4_ck[];
+extern const u8 crypto_sm4_sbox[];
+
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx: The location where the computed key will be stored.
+ * @in_key: The supplied key.
+ * @key_len: The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
unsigned int key_len);
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk: The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out: Buffer to store output data
+ * @in: Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in);
#endif
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index b1230e33d506..90b69270f2fa 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -51,9 +51,18 @@ enum amd_asic_type {
CHIP_RAVEN, /* 22 */
CHIP_ARCTURUS, /* 23 */
CHIP_RENOIR, /* 24 */
- CHIP_NAVI10, /* 25 */
- CHIP_NAVI14, /* 26 */
- CHIP_NAVI12, /* 27 */
+ CHIP_ALDEBARAN, /* 25 */
+ CHIP_NAVI10, /* 26 */
+ CHIP_CYAN_SKILLFISH, /* 27 */
+ CHIP_NAVI14, /* 28 */
+ CHIP_NAVI12, /* 29 */
+ CHIP_SIENNA_CICHLID, /* 30 */
+ CHIP_NAVY_FLOUNDER, /* 31 */
+ CHIP_VANGOGH, /* 32 */
+ CHIP_DIMGREY_CAVEFISH, /* 33 */
+ CHIP_BEIGE_GOBY, /* 34 */
+ CHIP_YELLOW_CARP, /* 35 */
+ CHIP_IP_DISCOVERY, /* 36 */
CHIP_LAST,
};
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index 7aa2f93da49c..b0dcc07334a1 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -42,9 +42,10 @@ int analogix_dp_resume(struct analogix_dp_device *dp);
int analogix_dp_suspend(struct analogix_dp_device *dp);
struct analogix_dp_device *
-analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
- struct analogix_dp_plat_data *plat_data);
+analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data);
+int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev);
void analogix_dp_unbind(struct analogix_dp_device *dp);
+void analogix_dp_remove(struct analogix_dp_device *dp);
int analogix_dp_start_crc(struct drm_connector *connector);
int analogix_dp_stop_crc(struct drm_connector *connector);
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 9d4d5cc47969..f668e75fbabe 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -8,7 +8,7 @@
#include <sound/hdmi-codec.h>
-struct drm_connector;
+struct drm_display_info;
struct drm_display_mode;
struct drm_encoder;
struct dw_hdmi;
@@ -114,7 +114,8 @@ struct dw_hdmi_phy_config {
struct dw_hdmi_phy_ops {
int (*init)(struct dw_hdmi *hdmi, void *data,
- struct drm_display_mode *mode);
+ const struct drm_display_info *display,
+ const struct drm_display_mode *mode);
void (*disable)(struct dw_hdmi *hdmi, void *data);
enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data);
void (*update_hpd)(struct dw_hdmi *hdmi, void *data,
@@ -124,11 +125,28 @@ struct dw_hdmi_phy_ops {
struct dw_hdmi_plat_data {
struct regmap *regm;
- enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
- const struct drm_display_mode *mode);
- unsigned long input_bus_format;
+
+ unsigned int output_port;
+
unsigned long input_bus_encoding;
bool use_drm_infoframe;
+ bool ycbcr_420_allowed;
+
+ /*
+ * Private data passed to all the .mode_valid() and .configure_phy()
+ * callback functions.
+ */
+ void *priv_data;
+
+ /* Platform-specific mode validation (optional). */
+ enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode);
+
+ /* Platform-specific audio enable/disable (optional) */
+ void (*enable_audio)(struct dw_hdmi *hdmi, int channel,
+ int width, int rate, int non_pcm);
+ void (*disable_audio)(struct dw_hdmi *hdmi);
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
@@ -140,9 +158,10 @@ struct dw_hdmi_plat_data {
const struct dw_hdmi_mpll_config *mpll_cfg;
const struct dw_hdmi_curr_ctrl *cur_ctr;
const struct dw_hdmi_phy_config *phy_config;
- int (*configure_phy)(struct dw_hdmi *hdmi,
- const struct dw_hdmi_plat_data *pdata,
+ int (*configure_phy)(struct dw_hdmi *hdmi, void *data,
unsigned long mpixelclock);
+
+ unsigned int disable_cec : 1;
};
struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
@@ -159,22 +178,27 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev);
+void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm);
+void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status);
void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca);
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
-void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi);
+void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi,
+ const struct drm_display_info *display);
/* PHY configuration */
void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address);
void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
unsigned char addr);
+void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi);
+
void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable);
void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable);
-void dw_hdmi_phy_reset(struct dw_hdmi *hdmi);
+void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi);
enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
void *data);
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index b0e390b3288e..5286a53a1875 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -36,6 +36,7 @@ struct dw_mipi_dsi_phy_ops {
unsigned int *lane_mbps);
int (*get_timing)(void *priv_data, unsigned int lane_mbps,
struct dw_mipi_dsi_dphy_timing *timing);
+ int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate);
};
struct dw_mipi_dsi_host_ops {
@@ -50,7 +51,9 @@ struct dw_mipi_dsi_plat_data {
unsigned int max_data_lanes;
enum drm_mode_status (*mode_valid)(void *priv_data,
- const struct drm_display_mode *mode);
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags,
+ u32 lanes, u32 format);
const struct dw_mipi_dsi_phy_ops *phy_ops;
const struct dw_mipi_dsi_host_ops *host_ops;
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
index 1cc77bf38324..d96626a0e3fa 100644
--- a/include/drm/bridge/mhl.h
+++ b/include/drm/bridge/mhl.h
@@ -327,13 +327,13 @@ struct mhl_burst_bits_per_pixel_fmt {
struct {
u8 stream_id;
u8 pixel_format;
- } __packed desc[0];
+ } __packed desc[];
} __packed;
struct mhl_burst_emsc_support {
struct mhl3_burst_header hdr;
u8 num_entries;
- __be16 burst_id[0];
+ __be16 burst_id[];
} __packed;
struct mhl_burst_audio_descr {
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
new file mode 100644
index 000000000000..e934aab357be
--- /dev/null
+++ b/include/drm/display/drm_dp.h
@@ -0,0 +1,1693 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _DRM_DP_H_
+#define _DRM_DP_H_
+
+#include <linux/types.h>
+
+/*
+ * Unless otherwise noted, all values are from the DP 1.1a spec. Note that
+ * DP and DPCD versions are independent. Differences from 1.0 are not noted,
+ * 1.0 devices basically don't exist in the wild.
+ *
+ * Abbreviations, in chronological order:
+ *
+ * eDP: Embedded DisplayPort version 1
+ * DPI: DisplayPort Interoperability Guideline v1.1a
+ * 1.2: DisplayPort 1.2
+ * MST: Multistream Transport - part of DP 1.2a
+ *
+ * 1.2 formally includes both eDP and DPI definitions.
+ */
+
+/* MSA (Main Stream Attribute) MISC bits (as MISC1<<8|MISC0) */
+#define DP_MSA_MISC_SYNC_CLOCK (1 << 0)
+#define DP_MSA_MISC_INTERLACE_VTOTAL_EVEN (1 << 8)
+#define DP_MSA_MISC_STEREO_NO_3D (0 << 9)
+#define DP_MSA_MISC_STEREO_PROG_RIGHT_EYE (1 << 9)
+#define DP_MSA_MISC_STEREO_PROG_LEFT_EYE (3 << 9)
+/* bits per component for non-RAW */
+#define DP_MSA_MISC_6_BPC (0 << 5)
+#define DP_MSA_MISC_8_BPC (1 << 5)
+#define DP_MSA_MISC_10_BPC (2 << 5)
+#define DP_MSA_MISC_12_BPC (3 << 5)
+#define DP_MSA_MISC_16_BPC (4 << 5)
+/* bits per component for RAW */
+#define DP_MSA_MISC_RAW_6_BPC (1 << 5)
+#define DP_MSA_MISC_RAW_7_BPC (2 << 5)
+#define DP_MSA_MISC_RAW_8_BPC (3 << 5)
+#define DP_MSA_MISC_RAW_10_BPC (4 << 5)
+#define DP_MSA_MISC_RAW_12_BPC (5 << 5)
+#define DP_MSA_MISC_RAW_14_BPC (6 << 5)
+#define DP_MSA_MISC_RAW_16_BPC (7 << 5)
+/* pixel encoding/colorimetry format */
+#define _DP_MSA_MISC_COLOR(misc1_7, misc0_21, misc0_3, misc0_4) \
+ ((misc1_7) << 15 | (misc0_4) << 4 | (misc0_3) << 3 | ((misc0_21) << 1))
+#define DP_MSA_MISC_COLOR_RGB _DP_MSA_MISC_COLOR(0, 0, 0, 0)
+#define DP_MSA_MISC_COLOR_CEA_RGB _DP_MSA_MISC_COLOR(0, 0, 1, 0)
+#define DP_MSA_MISC_COLOR_RGB_WIDE_FIXED _DP_MSA_MISC_COLOR(0, 3, 0, 0)
+#define DP_MSA_MISC_COLOR_RGB_WIDE_FLOAT _DP_MSA_MISC_COLOR(0, 3, 0, 1)
+#define DP_MSA_MISC_COLOR_Y_ONLY _DP_MSA_MISC_COLOR(1, 0, 0, 0)
+#define DP_MSA_MISC_COLOR_RAW _DP_MSA_MISC_COLOR(1, 1, 0, 0)
+#define DP_MSA_MISC_COLOR_YCBCR_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 1, 0)
+#define DP_MSA_MISC_COLOR_YCBCR_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 1, 1)
+#define DP_MSA_MISC_COLOR_YCBCR_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 1, 0)
+#define DP_MSA_MISC_COLOR_YCBCR_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 1, 1)
+#define DP_MSA_MISC_COLOR_XVYCC_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 0, 0)
+#define DP_MSA_MISC_COLOR_XVYCC_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 0, 1)
+#define DP_MSA_MISC_COLOR_XVYCC_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 0, 0)
+#define DP_MSA_MISC_COLOR_XVYCC_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 0, 1)
+#define DP_MSA_MISC_COLOR_OPRGB _DP_MSA_MISC_COLOR(0, 0, 1, 1)
+#define DP_MSA_MISC_COLOR_DCI_P3 _DP_MSA_MISC_COLOR(0, 3, 1, 0)
+#define DP_MSA_MISC_COLOR_COLOR_PROFILE _DP_MSA_MISC_COLOR(0, 3, 1, 1)
+#define DP_MSA_MISC_COLOR_VSC_SDP (1 << 14)
+
+#define DP_AUX_MAX_PAYLOAD_BYTES 16
+
+#define DP_AUX_I2C_WRITE 0x0
+#define DP_AUX_I2C_READ 0x1
+#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2
+#define DP_AUX_I2C_MOT 0x4
+#define DP_AUX_NATIVE_WRITE 0x8
+#define DP_AUX_NATIVE_READ 0x9
+
+#define DP_AUX_NATIVE_REPLY_ACK (0x0 << 0)
+#define DP_AUX_NATIVE_REPLY_NACK (0x1 << 0)
+#define DP_AUX_NATIVE_REPLY_DEFER (0x2 << 0)
+#define DP_AUX_NATIVE_REPLY_MASK (0x3 << 0)
+
+#define DP_AUX_I2C_REPLY_ACK (0x0 << 2)
+#define DP_AUX_I2C_REPLY_NACK (0x1 << 2)
+#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2)
+#define DP_AUX_I2C_REPLY_MASK (0x3 << 2)
+
+/* DPCD Field Address Mapping */
+
+/* Receiver Capability */
+#define DP_DPCD_REV 0x000
+# define DP_DPCD_REV_10 0x10
+# define DP_DPCD_REV_11 0x11
+# define DP_DPCD_REV_12 0x12
+# define DP_DPCD_REV_13 0x13
+# define DP_DPCD_REV_14 0x14
+
+#define DP_MAX_LINK_RATE 0x001
+
+#define DP_MAX_LANE_COUNT 0x002
+# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
+# define DP_ENHANCED_FRAME_CAP (1 << 7)
+
+#define DP_MAX_DOWNSPREAD 0x003
+# define DP_MAX_DOWNSPREAD_0_5 (1 << 0)
+# define DP_STREAM_REGENERATION_STATUS_CAP (1 << 1) /* 2.0 */
+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+# define DP_TPS4_SUPPORTED (1 << 7)
+
+#define DP_NORP 0x004
+
+#define DP_DOWNSTREAMPORT_PRESENT 0x005
+# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
+# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
+# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1)
+# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1)
+# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1)
+# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1)
+# define DP_FORMAT_CONVERSION (1 << 3)
+# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
+
+#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+# define DP_CAP_ANSI_8B10B (1 << 0)
+# define DP_CAP_ANSI_128B132B (1 << 1) /* 2.0 */
+
+#define DP_DOWN_STREAM_PORT_COUNT 0x007
+# define DP_PORT_COUNT_MASK 0x0f
+# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
+# define DP_OUI_SUPPORT (1 << 7)
+
+#define DP_RECEIVE_PORT_0_CAP_0 0x008
+# define DP_LOCAL_EDID_PRESENT (1 << 1)
+# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
+
+#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
+
+#define DP_RECEIVE_PORT_1_CAP_0 0x00a
+#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b
+
+#define DP_I2C_SPEED_CAP 0x00c /* DPI */
+# define DP_I2C_SPEED_1K 0x01
+# define DP_I2C_SPEED_5K 0x02
+# define DP_I2C_SPEED_10K 0x04
+# define DP_I2C_SPEED_100K 0x08
+# define DP_I2C_SPEED_400K 0x10
+# define DP_I2C_SPEED_1M 0x20
+
+#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0)
+# define DP_FRAMING_CHANGE_CAP (1 << 1)
+# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
+
+#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+# define DP_TRAINING_AUX_RD_MASK 0x7F /* DP 1.3 */
+# define DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT (1 << 7) /* DP 1.3 */
+
+#define DP_ADAPTER_CAP 0x00f /* 1.2 */
+# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
+# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1)
+
+#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */
+# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */
+
+/* Multiple stream transport */
+#define DP_FAUX_CAP 0x020 /* 1.2 */
+# define DP_FAUX_CAP_1 (1 << 0)
+
+#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020 /* 2.0 */
+# define DP_FALLBACK_1024x768_60HZ_24BPP (1 << 0)
+# define DP_FALLBACK_1280x720_60HZ_24BPP (1 << 1)
+# define DP_FALLBACK_1920x1080_60HZ_24BPP (1 << 2)
+
+#define DP_MSTM_CAP 0x021 /* 1.2 */
+# define DP_MST_CAP (1 << 0)
+# define DP_SINGLE_STREAM_SIDEBAND_MSG (1 << 1) /* 2.0 */
+
+#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
+
+/* AV_SYNC_DATA_BLOCK 1.2 */
+#define DP_AV_GRANULARITY 0x023
+# define DP_AG_FACTOR_MASK (0xf << 0)
+# define DP_AG_FACTOR_3MS (0 << 0)
+# define DP_AG_FACTOR_2MS (1 << 0)
+# define DP_AG_FACTOR_1MS (2 << 0)
+# define DP_AG_FACTOR_500US (3 << 0)
+# define DP_AG_FACTOR_200US (4 << 0)
+# define DP_AG_FACTOR_100US (5 << 0)
+# define DP_AG_FACTOR_10US (6 << 0)
+# define DP_AG_FACTOR_1US (7 << 0)
+# define DP_VG_FACTOR_MASK (0xf << 4)
+# define DP_VG_FACTOR_3MS (0 << 4)
+# define DP_VG_FACTOR_2MS (1 << 4)
+# define DP_VG_FACTOR_1MS (2 << 4)
+# define DP_VG_FACTOR_500US (3 << 4)
+# define DP_VG_FACTOR_200US (4 << 4)
+# define DP_VG_FACTOR_100US (5 << 4)
+
+#define DP_AUD_DEC_LAT0 0x024
+#define DP_AUD_DEC_LAT1 0x025
+
+#define DP_AUD_PP_LAT0 0x026
+#define DP_AUD_PP_LAT1 0x027
+
+#define DP_VID_INTER_LAT 0x028
+
+#define DP_VID_PROG_LAT 0x029
+
+#define DP_REP_LAT 0x02a
+
+#define DP_AUD_DEL_INS0 0x02b
+#define DP_AUD_DEL_INS1 0x02c
+#define DP_AUD_DEL_INS2 0x02d
+/* End of AV_SYNC_DATA_BLOCK */
+
+#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
+# define DP_ALPM_CAP (1 << 0)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
+
+#define DP_GUID 0x030 /* 1.2 */
+
+#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */
+# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0)
+# define DP_DSC_PASSTHROUGH_IS_SUPPORTED (1 << 1)
+
+#define DP_DSC_REV 0x061
+# define DP_DSC_MAJOR_MASK (0xf << 0)
+# define DP_DSC_MINOR_MASK (0xf << 4)
+# define DP_DSC_MAJOR_SHIFT 0
+# define DP_DSC_MINOR_SHIFT 4
+
+#define DP_DSC_RC_BUF_BLK_SIZE 0x062
+# define DP_DSC_RC_BUF_BLK_SIZE_1 0x0
+# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1
+# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2
+# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3
+
+#define DP_DSC_RC_BUF_SIZE 0x063
+
+#define DP_DSC_SLICE_CAP_1 0x064
+# define DP_DSC_1_PER_DP_DSC_SINK (1 << 0)
+# define DP_DSC_2_PER_DP_DSC_SINK (1 << 1)
+# define DP_DSC_4_PER_DP_DSC_SINK (1 << 3)
+# define DP_DSC_6_PER_DP_DSC_SINK (1 << 4)
+# define DP_DSC_8_PER_DP_DSC_SINK (1 << 5)
+# define DP_DSC_10_PER_DP_DSC_SINK (1 << 6)
+# define DP_DSC_12_PER_DP_DSC_SINK (1 << 7)
+
+#define DP_DSC_LINE_BUF_BIT_DEPTH 0x065
+# define DP_DSC_LINE_BUF_BIT_DEPTH_MASK (0xf << 0)
+# define DP_DSC_LINE_BUF_BIT_DEPTH_9 0x0
+# define DP_DSC_LINE_BUF_BIT_DEPTH_10 0x1
+# define DP_DSC_LINE_BUF_BIT_DEPTH_11 0x2
+# define DP_DSC_LINE_BUF_BIT_DEPTH_12 0x3
+# define DP_DSC_LINE_BUF_BIT_DEPTH_13 0x4
+# define DP_DSC_LINE_BUF_BIT_DEPTH_14 0x5
+# define DP_DSC_LINE_BUF_BIT_DEPTH_15 0x6
+# define DP_DSC_LINE_BUF_BIT_DEPTH_16 0x7
+# define DP_DSC_LINE_BUF_BIT_DEPTH_8 0x8
+
+#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066
+# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0)
+
+#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
+
+#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
+
+#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
+# define DP_DSC_RGB (1 << 0)
+# define DP_DSC_YCbCr444 (1 << 1)
+# define DP_DSC_YCbCr422_Simple (1 << 2)
+# define DP_DSC_YCbCr422_Native (1 << 3)
+# define DP_DSC_YCbCr420_Native (1 << 4)
+
+#define DP_DSC_DEC_COLOR_DEPTH_CAP 0x06A
+# define DP_DSC_8_BPC (1 << 1)
+# define DP_DSC_10_BPC (1 << 2)
+# define DP_DSC_12_BPC (1 << 3)
+
+#define DP_DSC_PEAK_THROUGHPUT 0x06B
+# define DP_DSC_THROUGHPUT_MODE_0_MASK (0xf << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_SHIFT 0
+# define DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED 0
+# define DP_DSC_THROUGHPUT_MODE_0_340 (1 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_400 (2 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_450 (3 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_500 (4 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_550 (5 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_600 (6 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_650 (7 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_700 (8 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_750 (9 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_800 (10 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_850 (11 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0)
+# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 0) /* 1.4a */
+# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4
+# define DP_DSC_THROUGHPUT_MODE_1_UNSUPPORTED 0
+# define DP_DSC_THROUGHPUT_MODE_1_340 (1 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_400 (2 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_450 (3 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_500 (4 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_550 (5 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_600 (6 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_650 (7 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_700 (8 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_750 (9 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_800 (10 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_850 (11 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_900 (12 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_950 (13 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4)
+# define DP_DSC_THROUGHPUT_MODE_1_170 (15 << 4)
+
+#define DP_DSC_MAX_SLICE_WIDTH 0x06C
+#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560
+#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320
+
+#define DP_DSC_SLICE_CAP_2 0x06D
+# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0)
+# define DP_DSC_20_PER_DP_DSC_SINK (1 << 1)
+# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2)
+
+#define DP_DSC_BITS_PER_PIXEL_INC 0x06F
+# define DP_DSC_BITS_PER_PIXEL_1_16 0x0
+# define DP_DSC_BITS_PER_PIXEL_1_8 0x1
+# define DP_DSC_BITS_PER_PIXEL_1_4 0x2
+# define DP_DSC_BITS_PER_PIXEL_1_2 0x3
+# define DP_DSC_BITS_PER_PIXEL_1 0x4
+
+#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
+# define DP_PSR_IS_SUPPORTED 1
+# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
+# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
+# define DP_PSR2_WITH_Y_COORD_ET_SUPPORTED 4 /* eDP 1.5, adopted eDP 1.4b SCR */
+
+#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
+# define DP_PSR_NO_TRAIN_ON_EXIT 1
+# define DP_PSR_SETUP_TIME_330 (0 << 1)
+# define DP_PSR_SETUP_TIME_275 (1 << 1)
+# define DP_PSR_SETUP_TIME_220 (2 << 1)
+# define DP_PSR_SETUP_TIME_165 (3 << 1)
+# define DP_PSR_SETUP_TIME_110 (4 << 1)
+# define DP_PSR_SETUP_TIME_55 (5 << 1)
+# define DP_PSR_SETUP_TIME_0 (6 << 1)
+# define DP_PSR_SETUP_TIME_MASK (7 << 1)
+# define DP_PSR_SETUP_TIME_SHIFT 1
+# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */
+# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */
+# define DP_PSR2_SU_AUX_FRAME_SYNC_NOT_NEEDED (1 << 6)/* eDP 1.5, adopted eDP 1.4b SCR */
+
+#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */
+#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */
+
+/*
+ * 0x80-0x8f describe downstream port capabilities, but there are two layouts
+ * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
+ * each port's descriptor is one byte wide. If it was set, each port's is
+ * four bytes wide, starting with the one byte from the base info. As of
+ * DP interop v1.1a only VGA defines additional detail.
+ */
+
+/* offset 0 */
+#define DP_DOWNSTREAM_PORT_0 0x80
+# define DP_DS_PORT_TYPE_MASK (7 << 0)
+# define DP_DS_PORT_TYPE_DP 0
+# define DP_DS_PORT_TYPE_VGA 1
+# define DP_DS_PORT_TYPE_DVI 2
+# define DP_DS_PORT_TYPE_HDMI 3
+# define DP_DS_PORT_TYPE_NON_EDID 4
+# define DP_DS_PORT_TYPE_DP_DUALMODE 5
+# define DP_DS_PORT_TYPE_WIRELESS 6
+# define DP_DS_PORT_HPD (1 << 3)
+# define DP_DS_NON_EDID_MASK (0xf << 4)
+# define DP_DS_NON_EDID_720x480i_60 (1 << 4)
+# define DP_DS_NON_EDID_720x480i_50 (2 << 4)
+# define DP_DS_NON_EDID_1920x1080i_60 (3 << 4)
+# define DP_DS_NON_EDID_1920x1080i_50 (4 << 4)
+# define DP_DS_NON_EDID_1280x720_60 (5 << 4)
+# define DP_DS_NON_EDID_1280x720_50 (7 << 4)
+/* offset 1 for VGA is maximum megapixels per second / 8 */
+/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */
+/* offset 2 for VGA/DVI/HDMI */
+# define DP_DS_MAX_BPC_MASK (3 << 0)
+# define DP_DS_8BPC 0
+# define DP_DS_10BPC 1
+# define DP_DS_12BPC 2
+# define DP_DS_16BPC 3
+/* HDMI2.1 PCON FRL CONFIGURATION */
+# define DP_PCON_MAX_FRL_BW (7 << 2)
+# define DP_PCON_MAX_0GBPS (0 << 2)
+# define DP_PCON_MAX_9GBPS (1 << 2)
+# define DP_PCON_MAX_18GBPS (2 << 2)
+# define DP_PCON_MAX_24GBPS (3 << 2)
+# define DP_PCON_MAX_32GBPS (4 << 2)
+# define DP_PCON_MAX_40GBPS (5 << 2)
+# define DP_PCON_MAX_48GBPS (6 << 2)
+# define DP_PCON_SOURCE_CTL_MODE (1 << 5)
+
+/* offset 3 for DVI */
+# define DP_DS_DVI_DUAL_LINK (1 << 1)
+# define DP_DS_DVI_HIGH_COLOR_DEPTH (1 << 2)
+/* offset 3 for HDMI */
+# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0)
+# define DP_DS_HDMI_YCBCR422_PASS_THROUGH (1 << 1)
+# define DP_DS_HDMI_YCBCR420_PASS_THROUGH (1 << 2)
+# define DP_DS_HDMI_YCBCR444_TO_422_CONV (1 << 3)
+# define DP_DS_HDMI_YCBCR444_TO_420_CONV (1 << 4)
+
+/*
+ * VESA DP-to-HDMI PCON Specification adds caps for colorspace
+ * conversion in DFP cap DPCD 83h. Sec6.1 Table-3.
+ * Based on the available support the source can enable
+ * color conversion by writing into PROTOCOL_COVERTER_CONTROL_2
+ * DPCD 3052h.
+ */
+# define DP_DS_HDMI_BT601_RGB_YCBCR_CONV (1 << 5)
+# define DP_DS_HDMI_BT709_RGB_YCBCR_CONV (1 << 6)
+# define DP_DS_HDMI_BT2020_RGB_YCBCR_CONV (1 << 7)
+
+#define DP_MAX_DOWNSTREAM_PORTS 0x10
+
+/* DP Forward error Correction Registers */
+#define DP_FEC_CAPABILITY 0x090 /* 1.4 */
+# define DP_FEC_CAPABLE (1 << 0)
+# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
+# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
+# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
+#define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */
+
+/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
+#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xD /* 0x92 through 0x9E */
+#define DP_PCON_DSC_ENCODER 0x092
+# define DP_PCON_DSC_ENCODER_SUPPORTED (1 << 0)
+# define DP_PCON_DSC_PPS_ENC_OVERRIDE (1 << 1)
+
+/* DP-HDMI2.1 PCON DSC Version */
+#define DP_PCON_DSC_VERSION 0x093
+# define DP_PCON_DSC_MAJOR_MASK (0xF << 0)
+# define DP_PCON_DSC_MINOR_MASK (0xF << 4)
+# define DP_PCON_DSC_MAJOR_SHIFT 0
+# define DP_PCON_DSC_MINOR_SHIFT 4
+
+/* DP-HDMI2.1 PCON DSC RC Buffer block size */
+#define DP_PCON_DSC_RC_BUF_BLK_INFO 0x094
+# define DP_PCON_DSC_RC_BUF_BLK_SIZE (0x3 << 0)
+# define DP_PCON_DSC_RC_BUF_BLK_1KB 0
+# define DP_PCON_DSC_RC_BUF_BLK_4KB 1
+# define DP_PCON_DSC_RC_BUF_BLK_16KB 2
+# define DP_PCON_DSC_RC_BUF_BLK_64KB 3
+
+/* DP-HDMI2.1 PCON DSC RC Buffer size */
+#define DP_PCON_DSC_RC_BUF_SIZE 0x095
+
+/* DP-HDMI2.1 PCON DSC Slice capabilities-1 */
+#define DP_PCON_DSC_SLICE_CAP_1 0x096
+# define DP_PCON_DSC_1_PER_DSC_ENC (0x1 << 0)
+# define DP_PCON_DSC_2_PER_DSC_ENC (0x1 << 1)
+# define DP_PCON_DSC_4_PER_DSC_ENC (0x1 << 3)
+# define DP_PCON_DSC_6_PER_DSC_ENC (0x1 << 4)
+# define DP_PCON_DSC_8_PER_DSC_ENC (0x1 << 5)
+# define DP_PCON_DSC_10_PER_DSC_ENC (0x1 << 6)
+# define DP_PCON_DSC_12_PER_DSC_ENC (0x1 << 7)
+
+#define DP_PCON_DSC_BUF_BIT_DEPTH 0x097
+# define DP_PCON_DSC_BIT_DEPTH_MASK (0xF << 0)
+# define DP_PCON_DSC_DEPTH_9_BITS 0
+# define DP_PCON_DSC_DEPTH_10_BITS 1
+# define DP_PCON_DSC_DEPTH_11_BITS 2
+# define DP_PCON_DSC_DEPTH_12_BITS 3
+# define DP_PCON_DSC_DEPTH_13_BITS 4
+# define DP_PCON_DSC_DEPTH_14_BITS 5
+# define DP_PCON_DSC_DEPTH_15_BITS 6
+# define DP_PCON_DSC_DEPTH_16_BITS 7
+# define DP_PCON_DSC_DEPTH_8_BITS 8
+
+#define DP_PCON_DSC_BLOCK_PREDICTION 0x098
+# define DP_PCON_DSC_BLOCK_PRED_SUPPORT (0x1 << 0)
+
+#define DP_PCON_DSC_ENC_COLOR_FMT_CAP 0x099
+# define DP_PCON_DSC_ENC_RGB (0x1 << 0)
+# define DP_PCON_DSC_ENC_YUV444 (0x1 << 1)
+# define DP_PCON_DSC_ENC_YUV422_S (0x1 << 2)
+# define DP_PCON_DSC_ENC_YUV422_N (0x1 << 3)
+# define DP_PCON_DSC_ENC_YUV420_N (0x1 << 4)
+
+#define DP_PCON_DSC_ENC_COLOR_DEPTH_CAP 0x09A
+# define DP_PCON_DSC_ENC_8BPC (0x1 << 1)
+# define DP_PCON_DSC_ENC_10BPC (0x1 << 2)
+# define DP_PCON_DSC_ENC_12BPC (0x1 << 3)
+
+#define DP_PCON_DSC_MAX_SLICE_WIDTH 0x09B
+
+/* DP-HDMI2.1 PCON DSC Slice capabilities-2 */
+#define DP_PCON_DSC_SLICE_CAP_2 0x09C
+# define DP_PCON_DSC_16_PER_DSC_ENC (0x1 << 0)
+# define DP_PCON_DSC_20_PER_DSC_ENC (0x1 << 1)
+# define DP_PCON_DSC_24_PER_DSC_ENC (0x1 << 2)
+
+/* DP-HDMI2.1 PCON HDMI TX Encoder Bits/pixel increment */
+#define DP_PCON_DSC_BPP_INCR 0x09E
+# define DP_PCON_DSC_BPP_INCR_MASK (0x7 << 0)
+# define DP_PCON_DSC_ONE_16TH_BPP 0
+# define DP_PCON_DSC_ONE_8TH_BPP 1
+# define DP_PCON_DSC_ONE_4TH_BPP 2
+# define DP_PCON_DSC_ONE_HALF_BPP 3
+# define DP_PCON_DSC_ONE_BPP 4
+
+/* DP Extended DSC Capabilities */
+#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
+#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
+#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+
+/* DFP Capability Extension */
+#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+
+/* Link Configuration */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+# define DP_LINK_BW_5_4 0x14 /* 1.2 */
+# define DP_LINK_BW_8_1 0x1e /* 1.4 */
+# define DP_LINK_BW_10 0x01 /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_13_5 0x04 /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_20 0x02 /* 2.0 128b/132b Link Layer */
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_2_CDS 3 /* 2.0 E11 */
+# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
+# define DP_TRAINING_PATTERN_4 7 /* 1.4 */
+# define DP_TRAINING_PATTERN_MASK 0x3
+# define DP_TRAINING_PATTERN_MASK_1_4 0xf
+
+/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
+# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+# define DP_TX_FFE_PRESET_VALUE_MASK (0xf << 0) /* 2.0 128b/132b Link Layer */
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+# define DP_SET_ANSI_128B132B (1 << 1)
+
+#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
+/* bitmask as for DP_I2C_SPEED_CAP */
+
+#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0)
+# define DP_FRAMING_CHANGE_ENABLE (1 << 1)
+# define DP_PANEL_SELF_TEST_ENABLE (1 << 7)
+
+#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */
+#define DP_LINK_QUAL_LANE1_SET 0x10c
+#define DP_LINK_QUAL_LANE2_SET 0x10d
+#define DP_LINK_QUAL_LANE3_SET 0x10e
+# define DP_LINK_QUAL_PATTERN_DISABLE 0
+# define DP_LINK_QUAL_PATTERN_D10_2 1
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
+# define DP_LINK_QUAL_PATTERN_PRBS7 3
+# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_1 5
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_2 6
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_3 7
+/* DP 2.0 UHBR10, UHBR13.5, UHBR20 */
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS1 0x08
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS2 0x10
+# define DP_LINK_QUAL_PATTERN_PRSBS9 0x18
+# define DP_LINK_QUAL_PATTERN_PRSBS11 0x20
+# define DP_LINK_QUAL_PATTERN_PRSBS15 0x28
+# define DP_LINK_QUAL_PATTERN_PRSBS23 0x30
+# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38
+# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40
+# define DP_LINK_QUAL_PATTERN_SQUARE 0x48
+
+#define DP_TRAINING_LANE0_1_SET2 0x10f
+#define DP_TRAINING_LANE2_3_SET2 0x110
+# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0)
+# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2)
+# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4)
+# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6)
+
+#define DP_MSTM_CTRL 0x111 /* 1.2 */
+# define DP_MST_EN (1 << 0)
+# define DP_UP_REQ_EN (1 << 1)
+# define DP_UPSTREAM_IS_SRC (1 << 2)
+
+#define DP_AUDIO_DELAY0 0x112 /* 1.2 */
+#define DP_AUDIO_DELAY1 0x113
+#define DP_AUDIO_DELAY2 0x114
+
+#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */
+# define DP_LINK_RATE_SET_SHIFT 0
+# define DP_LINK_RATE_SET_MASK (7 << 0)
+
+#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
+# define DP_ALPM_ENABLE (1 << 0)
+# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
+# define DP_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
+# define DP_PWR_NOT_NEEDED (1 << 0)
+
+#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */
+# define DP_FEC_READY (1 << 0)
+# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1)
+# define DP_FEC_ERR_COUNT_DIS (0 << 1)
+# define DP_FEC_UNCORR_BLK_ERROR_COUNT (1 << 1)
+# define DP_FEC_CORR_BLK_ERROR_COUNT (2 << 1)
+# define DP_FEC_BIT_ERROR_COUNT (3 << 1)
+# define DP_FEC_LANE_SELECT_MASK (3 << 4)
+# define DP_FEC_LANE_0_SELECT (0 << 4)
+# define DP_FEC_LANE_1_SELECT (1 << 4)
+# define DP_FEC_LANE_2_SELECT (2 << 4)
+# define DP_FEC_LANE_3_SELECT (3 << 4)
+
+#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
+
+#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
+# define DP_DECOMPRESSION_EN (1 << 0)
+#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */
+
+#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
+# define DP_PSR_ENABLE BIT(0)
+# define DP_PSR_MAIN_LINK_ACTIVE BIT(1)
+# define DP_PSR_CRC_VERIFICATION BIT(2)
+# define DP_PSR_FRAME_CAPTURE BIT(3)
+# define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */
+# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */
+# define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */
+
+#define DP_ADAPTER_CTRL 0x1a0
+# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
+
+#define DP_BRANCH_DEVICE_CTRL 0x1a1
+# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
+
+#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
+#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
+#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
+
+/* Link/Sink Device Status */
+#define DP_SINK_COUNT 0x200
+/* prior to 1.2 bit 7 was reserved mbz */
+# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
+# define DP_SINK_CP_READY (1 << 6)
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
+# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
+# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
+# define DP_CP_IRQ (1 << 2)
+# define DP_MCCS_IRQ (1 << 3)
+# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */
+# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */
+# define DP_SINK_SPECIFIC_IRQ (1 << 6)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
+ DP_LANE_CHANNEL_EQ_DONE | \
+ DP_LANE_SYMBOL_LOCKED)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */
+#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */
+#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+# define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */
+# define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) /* 2.0 */
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+/* DP 2.0 128b/132b Link Layer */
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_MASK (0xf << 0)
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT 0
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_MASK (0xf << 4)
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT 4
+
+#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c
+# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03
+# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
+# define DP_ADJUST_POST_CURSOR2_LANE1_MASK 0x0c
+# define DP_ADJUST_POST_CURSOR2_LANE1_SHIFT 2
+# define DP_ADJUST_POST_CURSOR2_LANE2_MASK 0x30
+# define DP_ADJUST_POST_CURSOR2_LANE2_SHIFT 4
+# define DP_ADJUST_POST_CURSOR2_LANE3_MASK 0xc0
+# define DP_ADJUST_POST_CURSOR2_LANE3_SHIFT 6
+
+#define DP_TEST_REQUEST 0x218
+# define DP_TEST_LINK_TRAINING (1 << 0)
+# define DP_TEST_LINK_VIDEO_PATTERN (1 << 1)
+# define DP_TEST_LINK_EDID_READ (1 << 2)
+# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
+# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */
+# define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) /* DPCD >= 1.2 */
+# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) /* DPCD >= 1.2 */
+
+#define DP_TEST_LINK_RATE 0x219
+# define DP_LINK_RATE_162 (0x6)
+# define DP_LINK_RATE_27 (0xa)
+
+#define DP_TEST_LANE_COUNT 0x220
+
+#define DP_TEST_PATTERN 0x221
+# define DP_NO_TEST_PATTERN 0x0
+# define DP_COLOR_RAMP 0x1
+# define DP_BLACK_AND_WHITE_VERTICAL_LINES 0x2
+# define DP_COLOR_SQUARE 0x3
+
+#define DP_TEST_H_TOTAL_HI 0x222
+#define DP_TEST_H_TOTAL_LO 0x223
+
+#define DP_TEST_V_TOTAL_HI 0x224
+#define DP_TEST_V_TOTAL_LO 0x225
+
+#define DP_TEST_H_START_HI 0x226
+#define DP_TEST_H_START_LO 0x227
+
+#define DP_TEST_V_START_HI 0x228
+#define DP_TEST_V_START_LO 0x229
+
+#define DP_TEST_HSYNC_HI 0x22A
+# define DP_TEST_HSYNC_POLARITY (1 << 7)
+# define DP_TEST_HSYNC_WIDTH_HI_MASK (127 << 0)
+#define DP_TEST_HSYNC_WIDTH_LO 0x22B
+
+#define DP_TEST_VSYNC_HI 0x22C
+# define DP_TEST_VSYNC_POLARITY (1 << 7)
+# define DP_TEST_VSYNC_WIDTH_HI_MASK (127 << 0)
+#define DP_TEST_VSYNC_WIDTH_LO 0x22D
+
+#define DP_TEST_H_WIDTH_HI 0x22E
+#define DP_TEST_H_WIDTH_LO 0x22F
+
+#define DP_TEST_V_HEIGHT_HI 0x230
+#define DP_TEST_V_HEIGHT_LO 0x231
+
+#define DP_TEST_MISC0 0x232
+# define DP_TEST_SYNC_CLOCK (1 << 0)
+# define DP_TEST_COLOR_FORMAT_MASK (3 << 1)
+# define DP_TEST_COLOR_FORMAT_SHIFT 1
+# define DP_COLOR_FORMAT_RGB (0 << 1)
+# define DP_COLOR_FORMAT_YCbCr422 (1 << 1)
+# define DP_COLOR_FORMAT_YCbCr444 (2 << 1)
+# define DP_TEST_DYNAMIC_RANGE_VESA (0 << 3)
+# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3)
+# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4)
+# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4)
+# define DP_YCBCR_COEFFICIENTS_ITU709 (1 << 4)
+# define DP_TEST_BIT_DEPTH_MASK (7 << 5)
+# define DP_TEST_BIT_DEPTH_SHIFT 5
+# define DP_TEST_BIT_DEPTH_6 (0 << 5)
+# define DP_TEST_BIT_DEPTH_8 (1 << 5)
+# define DP_TEST_BIT_DEPTH_10 (2 << 5)
+# define DP_TEST_BIT_DEPTH_12 (3 << 5)
+# define DP_TEST_BIT_DEPTH_16 (4 << 5)
+
+#define DP_TEST_MISC1 0x233
+# define DP_TEST_REFRESH_DENOMINATOR (1 << 0)
+# define DP_TEST_INTERLACED (1 << 1)
+
+#define DP_TEST_REFRESH_RATE_NUMERATOR 0x234
+
+#define DP_TEST_MISC0 0x232
+
+#define DP_TEST_CRC_R_CR 0x240
+#define DP_TEST_CRC_G_Y 0x242
+#define DP_TEST_CRC_B_CB 0x244
+
+#define DP_TEST_SINK_MISC 0x246
+# define DP_TEST_CRC_SUPPORTED (1 << 5)
+# define DP_TEST_COUNT_MASK 0xf
+
+#define DP_PHY_TEST_PATTERN 0x248
+# define DP_PHY_TEST_PATTERN_SEL_MASK 0x7
+# define DP_PHY_TEST_PATTERN_NONE 0x0
+# define DP_PHY_TEST_PATTERN_D10_2 0x1
+# define DP_PHY_TEST_PATTERN_ERROR_COUNT 0x2
+# define DP_PHY_TEST_PATTERN_PRBS7 0x3
+# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4
+# define DP_PHY_TEST_PATTERN_CP2520 0x5
+
+#define DP_PHY_SQUARE_PATTERN 0x249
+
+#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A
+#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250
+#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251
+#define DP_TEST_80BIT_CUSTOM_PATTERN_23_16 0x252
+#define DP_TEST_80BIT_CUSTOM_PATTERN_31_24 0x253
+#define DP_TEST_80BIT_CUSTOM_PATTERN_39_32 0x254
+#define DP_TEST_80BIT_CUSTOM_PATTERN_47_40 0x255
+#define DP_TEST_80BIT_CUSTOM_PATTERN_55_48 0x256
+#define DP_TEST_80BIT_CUSTOM_PATTERN_63_56 0x257
+#define DP_TEST_80BIT_CUSTOM_PATTERN_71_64 0x258
+#define DP_TEST_80BIT_CUSTOM_PATTERN_79_72 0x259
+
+#define DP_TEST_RESPONSE 0x260
+# define DP_TEST_ACK (1 << 0)
+# define DP_TEST_NAK (1 << 1)
+# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
+
+#define DP_TEST_EDID_CHECKSUM 0x261
+
+#define DP_TEST_SINK 0x270
+# define DP_TEST_SINK_START (1 << 0)
+#define DP_TEST_AUDIO_MODE 0x271
+#define DP_TEST_AUDIO_PATTERN_TYPE 0x272
+#define DP_TEST_AUDIO_PERIOD_CH1 0x273
+#define DP_TEST_AUDIO_PERIOD_CH2 0x274
+#define DP_TEST_AUDIO_PERIOD_CH3 0x275
+#define DP_TEST_AUDIO_PERIOD_CH4 0x276
+#define DP_TEST_AUDIO_PERIOD_CH5 0x277
+#define DP_TEST_AUDIO_PERIOD_CH6 0x278
+#define DP_TEST_AUDIO_PERIOD_CH7 0x279
+#define DP_TEST_AUDIO_PERIOD_CH8 0x27A
+
+#define DP_FEC_STATUS 0x280 /* 1.4 */
+# define DP_FEC_DECODE_EN_DETECTED (1 << 0)
+# define DP_FEC_DECODE_DIS_DETECTED (1 << 1)
+
+#define DP_FEC_ERROR_COUNT_LSB 0x0281 /* 1.4 */
+
+#define DP_FEC_ERROR_COUNT_MSB 0x0282 /* 1.4 */
+# define DP_FEC_ERROR_COUNT_MASK 0x7F
+# define DP_FEC_ERR_COUNT_VALID (1 << 7)
+
+#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
+# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
+# define DP_PAYLOAD_ACT_HANDLED (1 << 1)
+
+#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */
+/* up to ID_SLOT_63 at 0x2ff */
+
+/* Source Device-specific */
+#define DP_SOURCE_OUI 0x300
+
+/* Sink Device-specific */
+#define DP_SINK_OUI 0x400
+
+/* Branch Device-specific */
+#define DP_BRANCH_OUI 0x500
+#define DP_BRANCH_ID 0x503
+#define DP_BRANCH_REVISION_START 0x509
+#define DP_BRANCH_HW_REV 0x509
+#define DP_BRANCH_SW_REV 0x50A
+
+/* Link/Sink Device Power Control */
+#define DP_SET_POWER 0x600
+# define DP_SET_POWER_D0 0x1
+# define DP_SET_POWER_D3 0x2
+# define DP_SET_POWER_MASK 0x3
+# define DP_SET_POWER_D3_AUX_ON 0x5
+
+/* eDP-specific */
+#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
+# define DP_EDP_11 0x00
+# define DP_EDP_12 0x01
+# define DP_EDP_13 0x02
+# define DP_EDP_14 0x03
+# define DP_EDP_14a 0x04 /* eDP 1.4a */
+# define DP_EDP_14b 0x05 /* eDP 1.4b */
+
+#define DP_EDP_GENERAL_CAP_1 0x701
+# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
+# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1)
+# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2)
+# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3)
+# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4)
+# define DP_EDP_FRC_ENABLE_CAP (1 << 5)
+# define DP_EDP_COLOR_ENGINE_CAP (1 << 6)
+# define DP_EDP_SET_POWER_CAP (1 << 7)
+
+#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
+# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0)
+# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1)
+# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2)
+# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3)
+# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4)
+# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5)
+# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6)
+# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7)
+
+#define DP_EDP_GENERAL_CAP_2 0x703
+# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
+
+#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
+# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
+# define DP_EDP_X_REGION_CAP_SHIFT 0
+# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4)
+# define DP_EDP_Y_REGION_CAP_SHIFT 4
+
+#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
+# define DP_EDP_BACKLIGHT_ENABLE (1 << 0)
+# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1)
+# define DP_EDP_FRC_ENABLE (1 << 2)
+# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3)
+# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7)
+
+#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
+# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0)
+# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0)
+# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0)
+# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0)
+# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0)
+# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2)
+# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3)
+# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4)
+# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5)
+# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */
+
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
+
+#define DP_EDP_PWMGEN_BIT_COUNT 0x724
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726
+# define DP_EDP_PWMGEN_BIT_COUNT_MASK (0x1f << 0)
+
+#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727
+
+#define DP_EDP_BACKLIGHT_FREQ_SET 0x728
+# define DP_EDP_BACKLIGHT_FREQ_BASE_KHZ 27000
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f
+
+#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
+#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
+
+#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
+#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
+
+#define DP_EDP_MSO_LINK_CAPABILITIES 0x7a4 /* eDP 1.4 */
+# define DP_EDP_MSO_NUMBER_OF_LINKS_MASK (7 << 0)
+# define DP_EDP_MSO_NUMBER_OF_LINKS_SHIFT 0
+# define DP_EDP_MSO_INDEPENDENT_LINK_BIT (1 << 3)
+
+/* Sideband MSG Buffers */
+#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
+#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
+#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
+#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
+
+/* DPRX Event Status Indicator */
+#define DP_SINK_COUNT_ESI 0x2002 /* same as 0x200 */
+#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* same as 0x201 */
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */
+# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0)
+# define DP_LOCK_ACQUISITION_REQUEST (1 << 1)
+# define DP_CEC_IRQ (1 << 2)
+
+#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */
+# define RX_CAP_CHANGED (1 << 0)
+# define LINK_STATUS_CHANGED (1 << 1)
+# define STREAM_STATUS_CHANGED (1 << 2)
+# define HDMI_LINK_STATUS_CHANGED (1 << 3)
+# define CONNECTED_OFF_ENTRY_REQUESTED (1 << 4)
+
+#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
+# define DP_PSR_LINK_CRC_ERROR (1 << 0)
+# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */
+
+#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
+# define DP_PSR_CAPS_CHANGE (1 << 0)
+
+#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */
+# define DP_PSR_SINK_INACTIVE 0
+# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
+# define DP_PSR_SINK_ACTIVE_RFB 2
+# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
+# define DP_PSR_SINK_ACTIVE_RESYNC 4
+# define DP_PSR_SINK_INTERNAL_ERROR 7
+# define DP_PSR_SINK_STATE_MASK 0x07
+
+#define DP_SYNCHRONIZATION_LATENCY_IN_SINK 0x2009 /* edp 1.4 */
+# define DP_MAX_RESYNC_FRAME_COUNT_MASK (0xf << 0)
+# define DP_MAX_RESYNC_FRAME_COUNT_SHIFT 0
+# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4)
+# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4
+
+#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */
+# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */
+# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */
+# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */
+# define DP_SU_VALID (1 << 3) /* eDP 1.4 */
+# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */
+# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */
+# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */
+
+#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
+# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
+
+#define DP_LANE0_1_STATUS_ESI 0x200c /* status same as 0x202 */
+#define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */
+#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
+#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
+
+/* Extended Receiver Capability: See DP_DPCD_REV for definitions */
+#define DP_DP13_DPCD_REV 0x2200
+
+#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */
+# define DP_GTC_CAP (1 << 0) /* DP 1.3 */
+# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */
+# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */
+# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */
+# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */
+# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */
+# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
+# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+
+#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */
+# define DP_UHBR10 (1 << 0)
+# define DP_UHBR20 (1 << 1)
+# define DP_UHBR13_5 (1 << 2)
+
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT (1 << 7)
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS 0x02
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS 0x03
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS 0x04
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS 0x05
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS 0x06
+
+#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230
+#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250
+
+/* DSC Extended Capability Branch Total DSC Resources */
+#define DP_DSC_SUPPORT_AND_DSC_DECODER_COUNT 0x2260 /* 2.0 */
+# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
+# define DP_DSC_DECODER_COUNT_SHIFT 5
+#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 /* 2.0 */
+# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
+
+/* Protocol Converter Extension */
+/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
+#define DP_CEC_TUNNELING_CAPABILITY 0x3000
+# define DP_CEC_TUNNELING_CAPABLE (1 << 0)
+# define DP_CEC_SNOOPING_CAPABLE (1 << 1)
+# define DP_CEC_MULTIPLE_LA_CAPABLE (1 << 2)
+
+#define DP_CEC_TUNNELING_CONTROL 0x3001
+# define DP_CEC_TUNNELING_ENABLE (1 << 0)
+# define DP_CEC_SNOOPING_ENABLE (1 << 1)
+
+#define DP_CEC_RX_MESSAGE_INFO 0x3002
+# define DP_CEC_RX_MESSAGE_LEN_MASK (0xf << 0)
+# define DP_CEC_RX_MESSAGE_LEN_SHIFT 0
+# define DP_CEC_RX_MESSAGE_HPD_STATE (1 << 4)
+# define DP_CEC_RX_MESSAGE_HPD_LOST (1 << 5)
+# define DP_CEC_RX_MESSAGE_ACKED (1 << 6)
+# define DP_CEC_RX_MESSAGE_ENDED (1 << 7)
+
+#define DP_CEC_TX_MESSAGE_INFO 0x3003
+# define DP_CEC_TX_MESSAGE_LEN_MASK (0xf << 0)
+# define DP_CEC_TX_MESSAGE_LEN_SHIFT 0
+# define DP_CEC_TX_RETRY_COUNT_MASK (0x7 << 4)
+# define DP_CEC_TX_RETRY_COUNT_SHIFT 4
+# define DP_CEC_TX_MESSAGE_SEND (1 << 7)
+
+#define DP_CEC_TUNNELING_IRQ_FLAGS 0x3004
+# define DP_CEC_RX_MESSAGE_INFO_VALID (1 << 0)
+# define DP_CEC_RX_MESSAGE_OVERFLOW (1 << 1)
+# define DP_CEC_TX_MESSAGE_SENT (1 << 4)
+# define DP_CEC_TX_LINE_ERROR (1 << 5)
+# define DP_CEC_TX_ADDRESS_NACK_ERROR (1 << 6)
+# define DP_CEC_TX_DATA_NACK_ERROR (1 << 7)
+
+#define DP_CEC_LOGICAL_ADDRESS_MASK 0x300E /* 0x300F word */
+# define DP_CEC_LOGICAL_ADDRESS_0 (1 << 0)
+# define DP_CEC_LOGICAL_ADDRESS_1 (1 << 1)
+# define DP_CEC_LOGICAL_ADDRESS_2 (1 << 2)
+# define DP_CEC_LOGICAL_ADDRESS_3 (1 << 3)
+# define DP_CEC_LOGICAL_ADDRESS_4 (1 << 4)
+# define DP_CEC_LOGICAL_ADDRESS_5 (1 << 5)
+# define DP_CEC_LOGICAL_ADDRESS_6 (1 << 6)
+# define DP_CEC_LOGICAL_ADDRESS_7 (1 << 7)
+#define DP_CEC_LOGICAL_ADDRESS_MASK_2 0x300F /* 0x300E word */
+# define DP_CEC_LOGICAL_ADDRESS_8 (1 << 0)
+# define DP_CEC_LOGICAL_ADDRESS_9 (1 << 1)
+# define DP_CEC_LOGICAL_ADDRESS_10 (1 << 2)
+# define DP_CEC_LOGICAL_ADDRESS_11 (1 << 3)
+# define DP_CEC_LOGICAL_ADDRESS_12 (1 << 4)
+# define DP_CEC_LOGICAL_ADDRESS_13 (1 << 5)
+# define DP_CEC_LOGICAL_ADDRESS_14 (1 << 6)
+# define DP_CEC_LOGICAL_ADDRESS_15 (1 << 7)
+
+#define DP_CEC_RX_MESSAGE_BUFFER 0x3010
+#define DP_CEC_TX_MESSAGE_BUFFER 0x3020
+#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10
+
+/* PCON CONFIGURE-1 FRL FOR HDMI SINK */
+#define DP_PCON_HDMI_LINK_CONFIG_1 0x305A
+# define DP_PCON_ENABLE_MAX_FRL_BW (7 << 0)
+# define DP_PCON_ENABLE_MAX_BW_0GBPS 0
+# define DP_PCON_ENABLE_MAX_BW_9GBPS 1
+# define DP_PCON_ENABLE_MAX_BW_18GBPS 2
+# define DP_PCON_ENABLE_MAX_BW_24GBPS 3
+# define DP_PCON_ENABLE_MAX_BW_32GBPS 4
+# define DP_PCON_ENABLE_MAX_BW_40GBPS 5
+# define DP_PCON_ENABLE_MAX_BW_48GBPS 6
+# define DP_PCON_ENABLE_SOURCE_CTL_MODE (1 << 3)
+# define DP_PCON_ENABLE_CONCURRENT_LINK (1 << 4)
+# define DP_PCON_ENABLE_SEQUENTIAL_LINK (0 << 4)
+# define DP_PCON_ENABLE_LINK_FRL_MODE (1 << 5)
+# define DP_PCON_ENABLE_HPD_READY (1 << 6)
+# define DP_PCON_ENABLE_HDMI_LINK (1 << 7)
+
+/* PCON CONFIGURE-2 FRL FOR HDMI SINK */
+#define DP_PCON_HDMI_LINK_CONFIG_2 0x305B
+# define DP_PCON_MAX_LINK_BW_MASK (0x3F << 0)
+# define DP_PCON_FRL_BW_MASK_9GBPS (1 << 0)
+# define DP_PCON_FRL_BW_MASK_18GBPS (1 << 1)
+# define DP_PCON_FRL_BW_MASK_24GBPS (1 << 2)
+# define DP_PCON_FRL_BW_MASK_32GBPS (1 << 3)
+# define DP_PCON_FRL_BW_MASK_40GBPS (1 << 4)
+# define DP_PCON_FRL_BW_MASK_48GBPS (1 << 5)
+# define DP_PCON_FRL_LINK_TRAIN_EXTENDED (1 << 6)
+# define DP_PCON_FRL_LINK_TRAIN_NORMAL (0 << 6)
+
+/* PCON HDMI LINK STATUS */
+#define DP_PCON_HDMI_TX_LINK_STATUS 0x303B
+# define DP_PCON_HDMI_TX_LINK_ACTIVE (1 << 0)
+# define DP_PCON_FRL_READY (1 << 1)
+
+/* PCON HDMI POST FRL STATUS */
+#define DP_PCON_HDMI_POST_FRL_STATUS 0x3036
+# define DP_PCON_HDMI_LINK_MODE (1 << 0)
+# define DP_PCON_HDMI_MODE_TMDS 0
+# define DP_PCON_HDMI_MODE_FRL 1
+# define DP_PCON_HDMI_FRL_TRAINED_BW (0x3F << 1)
+# define DP_PCON_FRL_TRAINED_BW_9GBPS (1 << 1)
+# define DP_PCON_FRL_TRAINED_BW_18GBPS (1 << 2)
+# define DP_PCON_FRL_TRAINED_BW_24GBPS (1 << 3)
+# define DP_PCON_FRL_TRAINED_BW_32GBPS (1 << 4)
+# define DP_PCON_FRL_TRAINED_BW_40GBPS (1 << 5)
+# define DP_PCON_FRL_TRAINED_BW_48GBPS (1 << 6)
+
+#define DP_PROTOCOL_CONVERTER_CONTROL_0 0x3050 /* DP 1.3 */
+# define DP_HDMI_DVI_OUTPUT_CONFIG (1 << 0) /* DP 1.3 */
+#define DP_PROTOCOL_CONVERTER_CONTROL_1 0x3051 /* DP 1.3 */
+# define DP_CONVERSION_TO_YCBCR420_ENABLE (1 << 0) /* DP 1.3 */
+# define DP_HDMI_EDID_PROCESSING_DISABLE (1 << 1) /* DP 1.4 */
+# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE (1 << 2) /* DP 1.4 */
+# define DP_HDMI_FORCE_SCRAMBLING (1 << 3) /* DP 1.4 */
+#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */
+# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */
+# define DP_PCON_ENABLE_DSC_ENCODER (1 << 1)
+# define DP_PCON_ENCODER_PPS_OVERRIDE_MASK (0x3 << 2)
+# define DP_PCON_ENC_PPS_OVERRIDE_DISABLED 0
+# define DP_PCON_ENC_PPS_OVERRIDE_EN_PARAMS 1
+# define DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER 2
+# define DP_CONVERSION_RGB_YCBCR_MASK (7 << 4)
+# define DP_CONVERSION_BT601_RGB_YCBCR_ENABLE (1 << 4)
+# define DP_CONVERSION_BT709_RGB_YCBCR_ENABLE (1 << 5)
+# define DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE (1 << 6)
+
+/* PCON Downstream HDMI ERROR Status per Lane */
+#define DP_PCON_HDMI_ERROR_STATUS_LN0 0x3037
+#define DP_PCON_HDMI_ERROR_STATUS_LN1 0x3038
+#define DP_PCON_HDMI_ERROR_STATUS_LN2 0x3039
+#define DP_PCON_HDMI_ERROR_STATUS_LN3 0x303A
+# define DP_PCON_HDMI_ERROR_COUNT_MASK (0x7 << 0)
+# define DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS (1 << 0)
+# define DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS (1 << 1)
+# define DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS (1 << 2)
+
+/* PCON HDMI CONFIG PPS Override Buffer
+ * Valid Offsets to be added to Base : 0-127
+ */
+#define DP_PCON_HDMI_PPS_OVERRIDE_BASE 0x3100
+
+/* PCON HDMI CONFIG PPS Override Parameter: Slice height
+ * Offset-0 8LSBs of the Slice height.
+ * Offset-1 8MSBs of the Slice height.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT 0x3180
+
+/* PCON HDMI CONFIG PPS Override Parameter: Slice width
+ * Offset-0 8LSBs of the Slice width.
+ * Offset-1 8MSBs of the Slice width.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH 0x3182
+
+/* PCON HDMI CONFIG PPS Override Parameter: bits_per_pixel
+ * Offset-0 8LSBs of the bits_per_pixel.
+ * Offset-1 2MSBs of the bits_per_pixel.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_BPP 0x3184
+
+/* HDCP 1.3 and HDCP 2.2 */
+#define DP_AUX_HDCP_BKSV 0x68000
+#define DP_AUX_HDCP_RI_PRIME 0x68005
+#define DP_AUX_HDCP_AKSV 0x68007
+#define DP_AUX_HDCP_AN 0x6800C
+#define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4)
+#define DP_AUX_HDCP_BCAPS 0x68028
+# define DP_BCAPS_REPEATER_PRESENT BIT(1)
+# define DP_BCAPS_HDCP_CAPABLE BIT(0)
+#define DP_AUX_HDCP_BSTATUS 0x68029
+# define DP_BSTATUS_REAUTH_REQ BIT(3)
+# define DP_BSTATUS_LINK_FAILURE BIT(2)
+# define DP_BSTATUS_R0_PRIME_READY BIT(1)
+# define DP_BSTATUS_READY BIT(0)
+#define DP_AUX_HDCP_BINFO 0x6802A
+#define DP_AUX_HDCP_KSV_FIFO 0x6802C
+#define DP_AUX_HDCP_AINFO 0x6803B
+
+/* DP HDCP2.2 parameter offsets in DPCD address space */
+#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000
+#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008
+#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B
+#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215
+#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D
+#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220
+#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0
+#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0
+#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0
+#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0
+#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0
+#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8
+#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318
+#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328
+#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330
+#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332
+#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335
+#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345
+#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0
+#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0
+#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3
+#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5
+#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473
+#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493
+#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
+#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
+
+/* LTTPR: Link Training (LT)-tunable PHY Repeaters */
+#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
+#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
+#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */
+#define DP_PHY_REPEATER_MODE 0xf0003 /* 1.3 */
+#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
+#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
+#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */
+# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
+/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
+#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */
+#define DP_PHY_REPEATER_EQ_DONE 0xf0008 /* 2.0 E11 */
+
+enum drm_dp_phy {
+ DP_PHY_DPRX,
+
+ DP_PHY_LTTPR1,
+ DP_PHY_LTTPR2,
+ DP_PHY_LTTPR3,
+ DP_PHY_LTTPR4,
+ DP_PHY_LTTPR5,
+ DP_PHY_LTTPR6,
+ DP_PHY_LTTPR7,
+ DP_PHY_LTTPR8,
+
+ DP_MAX_LTTPR_COUNT = DP_PHY_LTTPR8,
+};
+
+#define DP_PHY_LTTPR(i) (DP_PHY_LTTPR1 + (i))
+
+#define __DP_LTTPR1_BASE 0xf0010 /* 1.3 */
+#define __DP_LTTPR2_BASE 0xf0060 /* 1.3 */
+#define DP_LTTPR_BASE(dp_phy) \
+ (__DP_LTTPR1_BASE + (__DP_LTTPR2_BASE - __DP_LTTPR1_BASE) * \
+ ((dp_phy) - DP_PHY_LTTPR1))
+
+#define DP_LTTPR_REG(dp_phy, lttpr1_reg) \
+ (DP_LTTPR_BASE(dp_phy) - DP_LTTPR_BASE(DP_PHY_LTTPR1) + (lttpr1_reg))
+
+#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */
+#define DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_TRAINING_PATTERN_SET_PHY_REPEATER1)
+
+#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */
+#define DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_TRAINING_LANE0_SET_PHY_REPEATER1)
+
+#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */
+#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */
+#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */
+#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */
+#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1)
+
+#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */
+# define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED BIT(0)
+# define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED BIT(1)
+
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0022 /* 2.0 */
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1)
+/* see DP_128B132B_TRAINING_AUX_RD_INTERVAL for values */
+
+#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */
+#define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1)
+
+#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */
+
+#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */
+#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */
+#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE0_PHY_REPEATER1 0xf0035 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+
+#define __DP_FEC1_BASE 0xf0290 /* 1.4 */
+#define __DP_FEC2_BASE 0xf0298 /* 1.4 */
+#define DP_FEC_BASE(dp_phy) \
+ (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \
+ ((dp_phy) - DP_PHY_LTTPR1)))
+
+#define DP_FEC_REG(dp_phy, fec1_reg) \
+ (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg)
+
+#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */
+#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \
+ DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1)
+
+#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */
+#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */
+
+#define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */
+
+#define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */
+
+/* Repeater modes */
+#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */
+#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */
+
+/* DP HDCP message start offsets in DPCD address space */
+#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
+#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET
+#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \
+ DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET
+#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET
+#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET
+#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET
+#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET
+#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET
+
+#define HDCP_2_2_DP_RXSTATUS_LEN 1
+#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0))
+#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1))
+#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2))
+#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4))
+
+/* DP 1.2 Sideband message defines */
+/* peer device type - DP 1.2a Table 2-92 */
+#define DP_PEER_DEVICE_NONE 0x0
+#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1
+#define DP_PEER_DEVICE_MST_BRANCHING 0x2
+#define DP_PEER_DEVICE_SST_SINK 0x3
+#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4
+
+/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */
+#define DP_GET_MSG_TRANSACTION_VERSION 0x00 /* DP 1.3 */
+#define DP_LINK_ADDRESS 0x01
+#define DP_CONNECTION_STATUS_NOTIFY 0x02
+#define DP_ENUM_PATH_RESOURCES 0x10
+#define DP_ALLOCATE_PAYLOAD 0x11
+#define DP_QUERY_PAYLOAD 0x12
+#define DP_RESOURCE_STATUS_NOTIFY 0x13
+#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14
+#define DP_REMOTE_DPCD_READ 0x20
+#define DP_REMOTE_DPCD_WRITE 0x21
+#define DP_REMOTE_I2C_READ 0x22
+#define DP_REMOTE_I2C_WRITE 0x23
+#define DP_POWER_UP_PHY 0x24
+#define DP_POWER_DOWN_PHY 0x25
+#define DP_SINK_EVENT_NOTIFY 0x30
+#define DP_QUERY_STREAM_ENC_STATUS 0x38
+#define DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST 0
+#define DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE 1
+#define DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE 2
+
+/* DP 1.2 MST sideband reply types */
+#define DP_SIDEBAND_REPLY_ACK 0x00
+#define DP_SIDEBAND_REPLY_NAK 0x01
+
+/* DP 1.2 MST sideband nak reasons - table 2.84 */
+#define DP_NAK_WRITE_FAILURE 0x01
+#define DP_NAK_INVALID_READ 0x02
+#define DP_NAK_CRC_FAILURE 0x03
+#define DP_NAK_BAD_PARAM 0x04
+#define DP_NAK_DEFER 0x05
+#define DP_NAK_LINK_FAILURE 0x06
+#define DP_NAK_NO_RESOURCES 0x07
+#define DP_NAK_DPCD_FAIL 0x08
+#define DP_NAK_I2C_NAK 0x09
+#define DP_NAK_ALLOCATE_FAIL 0x0a
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
+#define DP_MST_PHYSICAL_PORT_0 0
+#define DP_MST_LOGICAL_PORT_0 8
+
+#define DP_LINK_CONSTANT_N_VALUE 0x8000
+#define DP_LINK_STATUS_SIZE 6
+
+#define DP_BRANCH_OUI_HEADER_SIZE 0xc
+#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0xf
+#define EDP_PSR_RECEIVER_CAP_SIZE 2
+#define EDP_DISPLAY_CTL_CAP_SIZE 3
+#define DP_LTTPR_COMMON_CAP_SIZE 8
+#define DP_LTTPR_PHY_CAP_SIZE 3
+
+#define DP_SDP_AUDIO_TIMESTAMP 0x01
+#define DP_SDP_AUDIO_STREAM 0x02
+#define DP_SDP_EXTENSION 0x04 /* DP 1.1 */
+#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */
+#define DP_SDP_ISRC 0x06 /* DP 1.2 */
+#define DP_SDP_VSC 0x07 /* DP 1.2 */
+#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */
+#define DP_SDP_PPS 0x10 /* DP 1.4 */
+#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */
+#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
+/* 0x80+ CEA-861 infoframe types */
+
+#define DP_SDP_AUDIO_INFOFRAME_HB2 0x1b
+
+/**
+ * struct dp_sdp_header - DP secondary data packet header
+ * @HB0: Secondary Data Packet ID
+ * @HB1: Secondary Data Packet Type
+ * @HB2: Secondary Data Packet Specific header, Byte 0
+ * @HB3: Secondary Data packet Specific header, Byte 1
+ */
+struct dp_sdp_header {
+ u8 HB0;
+ u8 HB1;
+ u8 HB2;
+ u8 HB3;
+} __packed;
+
+#define EDP_SDP_HEADER_REVISION_MASK 0x1F
+#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
+#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
+
+/**
+ * struct dp_sdp - DP secondary data packet
+ * @sdp_header: DP secondary data packet header
+ * @db: DP secondaray data packet data blocks
+ * VSC SDP Payload for PSR
+ * db[0]: Stereo Interface
+ * db[1]: 0 - PSR State; 1 - Update RFB; 2 - CRC Valid
+ * db[2]: CRC value bits 7:0 of the R or Cr component
+ * db[3]: CRC value bits 15:8 of the R or Cr component
+ * db[4]: CRC value bits 7:0 of the G or Y component
+ * db[5]: CRC value bits 15:8 of the G or Y component
+ * db[6]: CRC value bits 7:0 of the B or Cb component
+ * db[7]: CRC value bits 15:8 of the B or Cb component
+ * db[8] - db[31]: Reserved
+ * VSC SDP Payload for Pixel Encoding/Colorimetry Format
+ * db[0] - db[15]: Reserved
+ * db[16]: Pixel Encoding and Colorimetry Formats
+ * db[17]: Dynamic Range and Component Bit Depth
+ * db[18]: Content Type
+ * db[19] - db[31]: Reserved
+ */
+struct dp_sdp {
+ struct dp_sdp_header sdp_header;
+ u8 db[32];
+} __packed;
+
+#define EDP_VSC_PSR_STATE_ACTIVE (1<<0)
+#define EDP_VSC_PSR_UPDATE_RFB (1<<1)
+#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
+
+/**
+ * enum dp_pixelformat - drm DP Pixel encoding formats
+ *
+ * This enum is used to indicate DP VSC SDP Pixel encoding formats.
+ * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
+ * DB18]
+ *
+ * @DP_PIXELFORMAT_RGB: RGB pixel encoding format
+ * @DP_PIXELFORMAT_YUV444: YCbCr 4:4:4 pixel encoding format
+ * @DP_PIXELFORMAT_YUV422: YCbCr 4:2:2 pixel encoding format
+ * @DP_PIXELFORMAT_YUV420: YCbCr 4:2:0 pixel encoding format
+ * @DP_PIXELFORMAT_Y_ONLY: Y Only pixel encoding format
+ * @DP_PIXELFORMAT_RAW: RAW pixel encoding format
+ * @DP_PIXELFORMAT_RESERVED: Reserved pixel encoding format
+ */
+enum dp_pixelformat {
+ DP_PIXELFORMAT_RGB = 0,
+ DP_PIXELFORMAT_YUV444 = 0x1,
+ DP_PIXELFORMAT_YUV422 = 0x2,
+ DP_PIXELFORMAT_YUV420 = 0x3,
+ DP_PIXELFORMAT_Y_ONLY = 0x4,
+ DP_PIXELFORMAT_RAW = 0x5,
+ DP_PIXELFORMAT_RESERVED = 0x6,
+};
+
+/**
+ * enum dp_colorimetry - drm DP Colorimetry formats
+ *
+ * This enum is used to indicate DP VSC SDP Colorimetry formats.
+ * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
+ * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition.
+ *
+ * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or
+ * ITU-R BT.601 colorimetry format
+ * @DP_COLORIMETRY_RGB_WIDE_FIXED: RGB wide gamut fixed point colorimetry format
+ * @DP_COLORIMETRY_BT709_YCC: ITU-R BT.709 colorimetry format
+ * @DP_COLORIMETRY_RGB_WIDE_FLOAT: RGB wide gamut floating point
+ * (scRGB (IEC 61966-2-2)) colorimetry format
+ * @DP_COLORIMETRY_XVYCC_601: xvYCC601 colorimetry format
+ * @DP_COLORIMETRY_OPRGB: OpRGB colorimetry format
+ * @DP_COLORIMETRY_XVYCC_709: xvYCC709 colorimetry format
+ * @DP_COLORIMETRY_DCI_P3_RGB: DCI-P3 (SMPTE RP 431-2) colorimetry format
+ * @DP_COLORIMETRY_SYCC_601: sYCC601 colorimetry format
+ * @DP_COLORIMETRY_RGB_CUSTOM: RGB Custom Color Profile colorimetry format
+ * @DP_COLORIMETRY_OPYCC_601: opYCC601 colorimetry format
+ * @DP_COLORIMETRY_BT2020_RGB: ITU-R BT.2020 R' G' B' colorimetry format
+ * @DP_COLORIMETRY_BT2020_CYCC: ITU-R BT.2020 Y'c C'bc C'rc colorimetry format
+ * @DP_COLORIMETRY_BT2020_YCC: ITU-R BT.2020 Y' C'b C'r colorimetry format
+ */
+enum dp_colorimetry {
+ DP_COLORIMETRY_DEFAULT = 0,
+ DP_COLORIMETRY_RGB_WIDE_FIXED = 0x1,
+ DP_COLORIMETRY_BT709_YCC = 0x1,
+ DP_COLORIMETRY_RGB_WIDE_FLOAT = 0x2,
+ DP_COLORIMETRY_XVYCC_601 = 0x2,
+ DP_COLORIMETRY_OPRGB = 0x3,
+ DP_COLORIMETRY_XVYCC_709 = 0x3,
+ DP_COLORIMETRY_DCI_P3_RGB = 0x4,
+ DP_COLORIMETRY_SYCC_601 = 0x4,
+ DP_COLORIMETRY_RGB_CUSTOM = 0x5,
+ DP_COLORIMETRY_OPYCC_601 = 0x5,
+ DP_COLORIMETRY_BT2020_RGB = 0x6,
+ DP_COLORIMETRY_BT2020_CYCC = 0x6,
+ DP_COLORIMETRY_BT2020_YCC = 0x7,
+};
+
+/**
+ * enum dp_dynamic_range - drm DP Dynamic Range
+ *
+ * This enum is used to indicate DP VSC SDP Dynamic Range.
+ * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
+ * DB18]
+ *
+ * @DP_DYNAMIC_RANGE_VESA: VESA range
+ * @DP_DYNAMIC_RANGE_CTA: CTA range
+ */
+enum dp_dynamic_range {
+ DP_DYNAMIC_RANGE_VESA = 0,
+ DP_DYNAMIC_RANGE_CTA = 1,
+};
+
+/**
+ * enum dp_content_type - drm DP Content Type
+ *
+ * This enum is used to indicate DP VSC SDP Content Types.
+ * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
+ * DB18]
+ * CTA-861-G defines content types and expected processing by a sink device
+ *
+ * @DP_CONTENT_TYPE_NOT_DEFINED: Not defined type
+ * @DP_CONTENT_TYPE_GRAPHICS: Graphics type
+ * @DP_CONTENT_TYPE_PHOTO: Photo type
+ * @DP_CONTENT_TYPE_VIDEO: Video type
+ * @DP_CONTENT_TYPE_GAME: Game type
+ */
+enum dp_content_type {
+ DP_CONTENT_TYPE_NOT_DEFINED = 0x00,
+ DP_CONTENT_TYPE_GRAPHICS = 0x01,
+ DP_CONTENT_TYPE_PHOTO = 0x02,
+ DP_CONTENT_TYPE_VIDEO = 0x03,
+ DP_CONTENT_TYPE_GAME = 0x04,
+};
+
+#endif /* _DRM_DP_H_ */
diff --git a/include/drm/display/drm_dp_aux_bus.h b/include/drm/display/drm_dp_aux_bus.h
new file mode 100644
index 000000000000..8a0a486383c5
--- /dev/null
+++ b/include/drm/display/drm_dp_aux_bus.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * The DP AUX bus is used for devices that are connected over a DisplayPort
+ * AUX bus. The devices on the far side of the bus are referred to as
+ * endpoints in this code.
+ */
+
+#ifndef _DP_AUX_BUS_H_
+#define _DP_AUX_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * struct dp_aux_ep_device - Main dev structure for DP AUX endpoints
+ *
+ * This is used to instantiate devices that are connected via a DP AUX
+ * bus. Usually the device is a panel, but conceivable other devices could
+ * be hooked up there.
+ */
+struct dp_aux_ep_device {
+ /** @dev: The normal dev pointer */
+ struct device dev;
+ /** @aux: Pointer to the aux bus */
+ struct drm_dp_aux *aux;
+};
+
+struct dp_aux_ep_driver {
+ int (*probe)(struct dp_aux_ep_device *aux_ep);
+ void (*remove)(struct dp_aux_ep_device *aux_ep);
+ void (*shutdown)(struct dp_aux_ep_device *aux_ep);
+ struct device_driver driver;
+};
+
+static inline struct dp_aux_ep_device *to_dp_aux_ep_dev(struct device *dev)
+{
+ return container_of(dev, struct dp_aux_ep_device, dev);
+}
+
+static inline struct dp_aux_ep_driver *to_dp_aux_ep_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct dp_aux_ep_driver, driver);
+}
+
+int of_dp_aux_populate_bus(struct drm_dp_aux *aux,
+ int (*done_probing)(struct drm_dp_aux *aux));
+void of_dp_aux_depopulate_bus(struct drm_dp_aux *aux);
+int devm_of_dp_aux_populate_bus(struct drm_dp_aux *aux,
+ int (*done_probing)(struct drm_dp_aux *aux));
+
+/* Deprecated versions of the above functions. To be removed when no callers. */
+static inline int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = of_dp_aux_populate_bus(aux, NULL);
+
+ /* New API returns -ENODEV for no child case; adapt to old assumption */
+ return (ret != -ENODEV) ? ret : 0;
+}
+
+static inline int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = devm_of_dp_aux_populate_bus(aux, NULL);
+
+ /* New API returns -ENODEV for no child case; adapt to old assumption */
+ return (ret != -ENODEV) ? ret : 0;
+}
+
+static inline void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux)
+{
+ of_dp_aux_depopulate_bus(aux);
+}
+
+#define dp_aux_dp_driver_register(aux_ep_drv) \
+ __dp_aux_dp_driver_register(aux_ep_drv, THIS_MODULE)
+int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *aux_ep_drv,
+ struct module *owner);
+void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *aux_ep_drv);
+
+#endif /* _DP_AUX_BUS_H_ */
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/display/drm_dp_dual_mode_helper.h
index 4c42db81fcb4..7ee482265087 100644
--- a/include/drm/drm_dp_dual_mode_helper.h
+++ b/include/drm/display/drm_dp_dual_mode_helper.h
@@ -62,6 +62,7 @@
#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41
#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1
+struct drm_device;
struct i2c_adapter;
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
@@ -103,17 +104,18 @@ enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_LSPCON,
};
-enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
-int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+enum drm_dp_dual_mode_type
+drm_dp_dual_mode_detect(const struct drm_device *dev, struct i2c_adapter *adapter);
+int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter);
-int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool *enabled);
-int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable);
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
-int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode *current_mode);
-int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode reqd_mode);
#endif
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
new file mode 100644
index 000000000000..ab55453f2d2c
--- /dev/null
+++ b/include/drm/display/drm_dp_helper.h
@@ -0,0 +1,766 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+
+#include <drm/display/drm_dp.h>
+#include <drm/drm_connector.h>
+
+struct drm_device;
+struct drm_dp_aux;
+struct drm_panel;
+
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+
+int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy, bool uhbr);
+int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy, bool uhbr);
+
+void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_lttpr_link_train_clock_recovery_delay(void);
+void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+
+int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux);
+bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+const char *drm_dp_phy_name(enum drm_dp_phy dp_phy);
+
+/**
+ * struct drm_dp_vsc_sdp - drm DP VSC SDP
+ *
+ * This structure represents a DP VSC SDP of drm
+ * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and
+ * [Table 2-117: VSC SDP Payload for DB16 through DB18]
+ *
+ * @sdp_type: secondary-data packet type
+ * @revision: revision number
+ * @length: number of valid data bytes
+ * @pixelformat: pixel encoding format
+ * @colorimetry: colorimetry format
+ * @bpc: bit per color
+ * @dynamic_range: dynamic range information
+ * @content_type: CTA-861-G defines content types and expected processing by a sink device
+ */
+struct drm_dp_vsc_sdp {
+ unsigned char sdp_type;
+ unsigned char revision;
+ unsigned char length;
+ enum dp_pixelformat pixelformat;
+ enum dp_colorimetry colorimetry;
+ int bpc;
+ enum dp_dynamic_range dynamic_range;
+ enum dp_content_type content_type;
+};
+
+void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
+ const struct drm_dp_vsc_sdp *vsc);
+
+int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
+
+static inline int
+drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
+
+static inline u8
+drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
+static inline bool
+drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
+}
+
+static inline bool
+drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+}
+
+static inline bool
+drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x12 &&
+ dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
+}
+
+static inline bool
+drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 ||
+ dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5;
+}
+
+static inline bool
+drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x14 &&
+ dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED;
+}
+
+static inline u8
+drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 :
+ DP_TRAINING_PATTERN_MASK;
+}
+
+static inline bool
+drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
+}
+
+/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp);
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3]);
+
+static inline bool
+drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_DECOMPRESSION_IS_SUPPORTED;
+}
+
+static inline u16
+drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+ (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
+}
+
+static inline u32
+drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ /* Max Slicewidth = Number of Pixels * 320 */
+ return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
+ DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+
+/* Forward Error Correction Support on DP 1.4 */
+static inline bool
+drm_dp_sink_supports_fec(const u8 fec_capable)
+{
+ return fec_capable & DP_FEC_CAPABLE;
+}
+
+static inline bool
+drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B;
+}
+
+static inline bool
+drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_EDP_CONFIGURATION_CAP] &
+ DP_ALTERNATE_SCRAMBLER_RESET_CAP;
+}
+
+/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */
+static inline bool
+drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWN_STREAM_PORT_COUNT] &
+ DP_MSA_TIMING_PAR_IGNORED;
+}
+
+/**
+ * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support
+ * @edp_dpcd: The DPCD to check
+ *
+ * Note that currently this function will return %false for panels which support various DPCD
+ * backlight features but which require the brightness be set through PWM, and don't support setting
+ * the brightness level via the DPCD.
+ *
+ * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false
+ * otherwise
+ */
+static inline bool
+drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE])
+{
+ return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP);
+}
+
+/*
+ * DisplayPort AUX channel
+ */
+
+/**
+ * struct drm_dp_aux_msg - DisplayPort AUX channel transaction
+ * @address: address of the (first) register to access
+ * @request: contains the type of transaction (see DP_AUX_* macros)
+ * @reply: upon completion, contains the reply type of the transaction
+ * @buffer: pointer to a transmission or reception buffer
+ * @size: size of @buffer
+ */
+struct drm_dp_aux_msg {
+ unsigned int address;
+ u8 request;
+ u8 reply;
+ void *buffer;
+ size_t size;
+};
+
+struct cec_adapter;
+struct edid;
+struct drm_connector;
+
+/**
+ * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
+ * @lock: mutex protecting this struct
+ * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
+ * @connector: the connector this CEC adapter is associated with
+ * @unregister_work: unregister the CEC adapter
+ */
+struct drm_dp_aux_cec {
+ struct mutex lock;
+ struct cec_adapter *adap;
+ struct drm_connector *connector;
+ struct delayed_work unregister_work;
+};
+
+/**
+ * struct drm_dp_aux - DisplayPort AUX channel
+ *
+ * An AUX channel can also be used to transport I2C messages to a sink. A
+ * typical application of that is to access an EDID that's present in the sink
+ * device. The @transfer() function can also be used to execute such
+ * transactions. The drm_dp_aux_register() function registers an I2C adapter
+ * that can be passed to drm_probe_ddc(). Upon removal, drivers should call
+ * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long
+ * transfers by default; if a partial response is received, the adapter will
+ * drop down to the size given by the partial response for this transaction
+ * only.
+ */
+struct drm_dp_aux {
+ /**
+ * @name: user-visible name of this AUX channel and the
+ * I2C-over-AUX adapter.
+ *
+ * It's also used to specify the name of the I2C adapter. If set
+ * to %NULL, dev_name() of @dev will be used.
+ */
+ const char *name;
+
+ /**
+ * @ddc: I2C adapter that can be used for I2C-over-AUX
+ * communication
+ */
+ struct i2c_adapter ddc;
+
+ /**
+ * @dev: pointer to struct device that is the parent for this
+ * AUX channel.
+ */
+ struct device *dev;
+
+ /**
+ * @drm_dev: pointer to the &drm_device that owns this AUX channel.
+ * Beware, this may be %NULL before drm_dp_aux_register() has been
+ * called.
+ *
+ * It should be set to the &drm_device that will be using this AUX
+ * channel as early as possible. For many graphics drivers this should
+ * happen before drm_dp_aux_init(), however it's perfectly fine to set
+ * this field later so long as it's assigned before calling
+ * drm_dp_aux_register().
+ */
+ struct drm_device *drm_dev;
+
+ /**
+ * @crtc: backpointer to the crtc that is currently using this
+ * AUX channel
+ */
+ struct drm_crtc *crtc;
+
+ /**
+ * @hw_mutex: internal mutex used for locking transfers.
+ *
+ * Note that if the underlying hardware is shared among multiple
+ * channels, the driver needs to do additional locking to
+ * prevent concurrent access.
+ */
+ struct mutex hw_mutex;
+
+ /**
+ * @crc_work: worker that captures CRCs for each frame
+ */
+ struct work_struct crc_work;
+
+ /**
+ * @crc_count: counter of captured frame CRCs
+ */
+ u8 crc_count;
+
+ /**
+ * @transfer: transfers a message representing a single AUX
+ * transaction.
+ *
+ * This is a hardware-specific implementation of how
+ * transactions are executed that the drivers must provide.
+ *
+ * A pointer to a &drm_dp_aux_msg structure describing the
+ * transaction is passed into this function. Upon success, the
+ * implementation should return the number of payload bytes that
+ * were transferred, or a negative error-code on failure.
+ *
+ * Helpers will propagate these errors, with the exception of
+ * the %-EBUSY error, which causes a transaction to be retried.
+ * On a short, helpers will return %-EPROTO to make it simpler
+ * to check for failure.
+ *
+ * The @transfer() function must only modify the reply field of
+ * the &drm_dp_aux_msg structure. The retry logic and i2c
+ * helpers assume this is the case.
+ *
+ * Also note that this callback can be called no matter the
+ * state @dev is in and also no matter what state the panel is
+ * in. It's expected:
+ *
+ * - If the @dev providing the AUX bus is currently unpowered then
+ * it will power itself up for the transfer.
+ *
+ * - If we're on eDP (using a drm_panel) and the panel is not in a
+ * state where it can respond (it's not powered or it's in a
+ * low power state) then this function may return an error, but
+ * not crash. It's up to the caller of this code to make sure that
+ * the panel is powered on if getting an error back is not OK. If a
+ * drm_panel driver is initiating a DP AUX transfer it may power
+ * itself up however it wants. All other code should ensure that
+ * the pre_enable() bridge chain (which eventually calls the
+ * drm_panel prepare function) has powered the panel.
+ */
+ ssize_t (*transfer)(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg);
+
+ /**
+ * @wait_hpd_asserted: wait for HPD to be asserted
+ *
+ * This is mainly useful for eDP panels drivers to wait for an eDP
+ * panel to finish powering on. This is an optional function.
+ *
+ * This function will efficiently wait for the HPD signal to be
+ * asserted. The `wait_us` parameter that is passed in says that we
+ * know that the HPD signal is expected to be asserted within `wait_us`
+ * microseconds. This function could wait for longer than `wait_us` if
+ * the logic in the DP controller has a long debouncing time. The
+ * important thing is that if this function returns success that the
+ * DP controller is ready to send AUX transactions.
+ *
+ * This function returns 0 if HPD was asserted or -ETIMEDOUT if time
+ * expired and HPD wasn't asserted. This function should not print
+ * timeout errors to the log.
+ *
+ * The semantics of this function are designed to match the
+ * readx_poll_timeout() function. That means a `wait_us` of 0 means
+ * to wait forever. Like readx_poll_timeout(), this function may sleep.
+ *
+ * NOTE: this function specifically reports the state of the HPD pin
+ * that's associated with the DP AUX channel. This is different from
+ * the HPD concept in much of the rest of DRM which is more about
+ * physical presence of a display. For eDP, for instance, a display is
+ * assumed always present even if the HPD pin is deasserted.
+ */
+ int (*wait_hpd_asserted)(struct drm_dp_aux *aux, unsigned long wait_us);
+
+ /**
+ * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
+ */
+ unsigned i2c_nack_count;
+ /**
+ * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
+ */
+ unsigned i2c_defer_count;
+ /**
+ * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
+ */
+ struct drm_dp_aux_cec cec;
+ /**
+ * @is_remote: Is this AUX CH actually using sideband messaging.
+ */
+ bool is_remote;
+};
+
+int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
+ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size);
+ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size);
+
+/**
+ * drm_dp_dpcd_readb() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure.
+ */
+static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read(aux, offset, valuep, 1);
+}
+
+/**
+ * drm_dp_dpcd_writeb() - write a single byte to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to write
+ * @value: value to write to the register
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure.
+ */
+static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 value)
+{
+ return drm_dp_dpcd_write(aux, offset, &value, 1);
+}
+
+int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
+ u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
+ u8 status[DP_LINK_STATUS_SIZE]);
+
+int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
+ enum drm_dp_phy dp_phy,
+ u8 link_status[DP_LINK_STATUS_SIZE]);
+
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+ u8 real_edid_checksum);
+
+int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]);
+bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], u8 type);
+bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct edid *edid);
+int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct edid *edid);
+int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct edid *edid);
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct edid *edid);
+bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
+void drm_dp_downstream_debug(struct seq_file *m,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct edid *edid,
+ struct drm_dp_aux *aux);
+enum drm_mode_subconnector
+drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+void drm_dp_set_subconnector_property(struct drm_connector *connector,
+ enum drm_connector_status status,
+ const u8 *dpcd,
+ const u8 port_cap[4]);
+
+struct drm_dp_desc;
+bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const struct drm_dp_desc *desc);
+int drm_dp_read_sink_count(struct drm_dp_aux *aux);
+
+int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy,
+ u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
+void drm_dp_aux_init(struct drm_dp_aux *aux);
+int drm_dp_aux_register(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister(struct drm_dp_aux *aux);
+
+int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
+int drm_dp_stop_crc(struct drm_dp_aux *aux);
+
+struct drm_dp_dpcd_ident {
+ u8 oui[3];
+ u8 device_id[6];
+ u8 hw_rev;
+ u8 sw_major_rev;
+ u8 sw_minor_rev;
+} __packed;
+
+/**
+ * struct drm_dp_desc - DP branch/sink device descriptor
+ * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
+ * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
+ */
+struct drm_dp_desc {
+ struct drm_dp_dpcd_ident ident;
+ u32 quirks;
+};
+
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+ bool is_branch);
+
+/**
+ * enum drm_dp_quirk - Display Port sink/branch device specific quirks
+ *
+ * Display Port sink and branch devices in the wild have a variety of bugs, try
+ * to collect them here. The quirks are shared, but it's up to the drivers to
+ * implement workarounds for them.
+ */
+enum drm_dp_quirk {
+ /**
+ * @DP_DPCD_QUIRK_CONSTANT_N:
+ *
+ * The device requires main link attributes Mvid and Nvid to be limited
+ * to 16 bits. So will give a constant value (0x8000) for compatability.
+ */
+ DP_DPCD_QUIRK_CONSTANT_N,
+ /**
+ * @DP_DPCD_QUIRK_NO_PSR:
+ *
+ * The device does not support PSR even if reports that it supports or
+ * driver still need to implement proper handling for such device.
+ */
+ DP_DPCD_QUIRK_NO_PSR,
+ /**
+ * @DP_DPCD_QUIRK_NO_SINK_COUNT:
+ *
+ * The device does not set SINK_COUNT to a non-zero value.
+ * The driver should ignore SINK_COUNT during detection. Note that
+ * drm_dp_read_sink_count_cap() automatically checks for this quirk.
+ */
+ DP_DPCD_QUIRK_NO_SINK_COUNT,
+ /**
+ * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
+ *
+ * The device supports MST DSC despite not supporting Virtual DPCD.
+ * The DSC caps can be read from the physical aux instead.
+ */
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
+ /**
+ * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS:
+ *
+ * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite
+ * the DP_MAX_LINK_RATE register reporting a lower max multiplier.
+ */
+ DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS,
+};
+
+/**
+ * drm_dp_has_quirk() - does the DP device have a specific quirk
+ * @desc: Device descriptor filled by drm_dp_read_desc()
+ * @quirk: Quirk to query for
+ *
+ * Return true if DP device identified by @desc has @quirk.
+ */
+static inline bool
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+{
+ return desc->quirks & BIT(quirk);
+}
+
+/**
+ * struct drm_edp_backlight_info - Probed eDP backlight info struct
+ * @pwmgen_bit_count: The pwmgen bit count
+ * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any
+ * @max: The maximum backlight level that may be set
+ * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register?
+ * @aux_enable: Does the panel support the AUX enable cap?
+ * @aux_set: Does the panel support setting the brightness through AUX?
+ *
+ * This structure contains various data about an eDP backlight, which can be populated by using
+ * drm_edp_backlight_init().
+ */
+struct drm_edp_backlight_info {
+ u8 pwmgen_bit_count;
+ u8 pwm_freq_pre_divider;
+ u16 max;
+
+ bool lsb_reg_used : 1;
+ bool aux_enable : 1;
+ bool aux_set : 1;
+};
+
+int
+drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
+ u16 *current_level, u8 *current_mode);
+int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
+ u16 level);
+int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
+ u16 level);
+int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl);
+
+#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
+
+int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux);
+
+#else
+
+static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel,
+ struct drm_dp_aux *aux)
+{
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_DRM_DP_CEC
+void drm_dp_cec_irq(struct drm_dp_aux *aux);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector);
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
+#else
+static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+}
+
+static inline void
+drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
+{
+}
+
+static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
+ const struct edid *edid)
+{
+}
+
+static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
+/**
+ * struct drm_dp_phy_test_params - DP Phy Compliance parameters
+ * @link_rate: Requested Link rate from DPCD 0x219
+ * @num_lanes: Number of lanes requested by sing through DPCD 0x220
+ * @phy_pattern: DP Phy test pattern from DPCD 0x248
+ * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B
+ * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259
+ * @enhanced_frame_cap: flag for enhanced frame capability.
+ */
+struct drm_dp_phy_test_params {
+ int link_rate;
+ u8 num_lanes;
+ u8 phy_pattern;
+ u8 hbr2_reset[2];
+ u8 custom80[10];
+ bool enhanced_frame_cap;
+};
+
+int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data);
+int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data, u8 dp_rev);
+int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd);
+bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
+ u8 frl_mode);
+int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
+ u8 frl_type);
+int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux);
+
+bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux);
+int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask);
+void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
+ struct drm_connector *connector);
+bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_pps_default(struct drm_dp_aux *aux);
+int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]);
+int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]);
+bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], u8 color_spc);
+int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc);
+
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
new file mode 100644
index 000000000000..41fd8352ab65
--- /dev/null
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -0,0 +1,1007 @@
+/*
+ * Copyright © 2014 Red Hat.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+#ifndef _DRM_DP_MST_HELPER_H_
+#define _DRM_DP_MST_HELPER_H_
+
+#include <linux/types.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_atomic.h>
+
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+#include <linux/stackdepot.h>
+#include <linux/timekeeping.h>
+
+enum drm_dp_mst_topology_ref_type {
+ DRM_DP_MST_TOPOLOGY_REF_GET,
+ DRM_DP_MST_TOPOLOGY_REF_PUT,
+};
+
+struct drm_dp_mst_topology_ref_history {
+ struct drm_dp_mst_topology_ref_entry {
+ enum drm_dp_mst_topology_ref_type type;
+ int count;
+ ktime_t ts_nsec;
+ depot_stack_handle_t backtrace;
+ } *entries;
+ int len;
+};
+#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
+
+struct drm_dp_mst_branch;
+
+/**
+ * struct drm_dp_mst_port - MST port
+ * @port_num: port number
+ * @input: if this port is an input port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @mcs: message capability status - DP 1.2 spec. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @pdt: Peer Device Type. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @ldps: Legacy Device Plug Status. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @dpcd_rev: DPCD revision of device on this port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @num_sdp_streams: Number of simultaneous streams. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @num_sdp_stream_sinks: Number of stream sinks. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @full_pbn: Max possible bandwidth for this port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @next: link to next port on this branch device
+ * @aux: i2c aux transport to talk to device connected to this port, protected
+ * by &drm_dp_mst_topology_mgr.base.lock.
+ * @passthrough_aux: parent aux to which DSC pass-through requests should be
+ * sent, only set if DSC pass-through is possible.
+ * @parent: branch device parent of this port
+ * @vcpi: Virtual Channel Payload info for this port.
+ * @connector: DRM connector this port is connected to. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @mgr: topology manager this port lives under.
+ *
+ * This structure represents an MST port endpoint on a device somewhere
+ * in the MST topology.
+ */
+struct drm_dp_mst_port {
+ /**
+ * @topology_kref: refcount for this port's lifetime in the topology,
+ * only the DP MST helpers should need to touch this
+ */
+ struct kref topology_kref;
+
+ /**
+ * @malloc_kref: refcount for the memory allocation containing this
+ * structure. See drm_dp_mst_get_port_malloc() and
+ * drm_dp_mst_put_port_malloc().
+ */
+ struct kref malloc_kref;
+
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history: A history of each topology
+ * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
+ */
+ struct drm_dp_mst_topology_ref_history topology_ref_history;
+#endif
+
+ u8 port_num;
+ bool input;
+ bool mcs;
+ bool ddps;
+ u8 pdt;
+ bool ldps;
+ u8 dpcd_rev;
+ u8 num_sdp_streams;
+ u8 num_sdp_stream_sinks;
+ uint16_t full_pbn;
+ struct list_head next;
+ /**
+ * @mstb: the branch device connected to this port, if there is one.
+ * This should be considered protected for reading by
+ * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
+ * &drm_dp_mst_topology_mgr.up_req_work and
+ * &drm_dp_mst_topology_mgr.work, which do not grab
+ * &drm_dp_mst_topology_mgr.lock during reads but are the only
+ * updaters of this list and are protected from writing concurrently
+ * by &drm_dp_mst_topology_mgr.probe_lock.
+ */
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_aux aux; /* i2c bus for this port? */
+ struct drm_dp_aux *passthrough_aux;
+ struct drm_dp_mst_branch *parent;
+
+ struct drm_connector *connector;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ /**
+ * @cached_edid: for DP logical ports - make tiling work by ensuring
+ * that the EDID for all connectors is read immediately.
+ */
+ struct edid *cached_edid;
+ /**
+ * @has_audio: Tracks whether the sink connector to this port is
+ * audio-capable.
+ */
+ bool has_audio;
+
+ /**
+ * @fec_capable: bool indicating if FEC can be supported up to that
+ * point in the MST topology.
+ */
+ bool fec_capable;
+};
+
+/* sideband msg header - not bit struct */
+struct drm_dp_sideband_msg_hdr {
+ u8 lct;
+ u8 lcr;
+ u8 rad[8];
+ bool broadcast;
+ bool path_msg;
+ u8 msg_len;
+ bool somt;
+ bool eomt;
+ bool seqno;
+};
+
+struct drm_dp_sideband_msg_rx {
+ u8 chunk[48];
+ u8 msg[256];
+ u8 curchunk_len;
+ u8 curchunk_idx; /* chunk we are parsing now */
+ u8 curchunk_hdrlen;
+ u8 curlen; /* total length of the msg */
+ bool have_somt;
+ bool have_eomt;
+ struct drm_dp_sideband_msg_hdr initial_hdr;
+};
+
+/**
+ * struct drm_dp_mst_branch - MST branch device.
+ * @rad: Relative Address to talk to this branch device.
+ * @lct: Link count total to talk to this branch device.
+ * @num_ports: number of ports on the branch.
+ * @port_parent: pointer to the port parent, NULL if toplevel.
+ * @mgr: topology manager for this branch device.
+ * @link_address_sent: if a link address message has been sent to this device yet.
+ * @guid: guid for DP 1.2 branch device. port under this branch can be
+ * identified by port #.
+ *
+ * This structure represents an MST branch device, there is one
+ * primary branch device at the root, along with any other branches connected
+ * to downstream port of parent branches.
+ */
+struct drm_dp_mst_branch {
+ /**
+ * @topology_kref: refcount for this branch device's lifetime in the
+ * topology, only the DP MST helpers should need to touch this
+ */
+ struct kref topology_kref;
+
+ /**
+ * @malloc_kref: refcount for the memory allocation containing this
+ * structure. See drm_dp_mst_get_mstb_malloc() and
+ * drm_dp_mst_put_mstb_malloc().
+ */
+ struct kref malloc_kref;
+
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history: A history of each topology
+ * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
+ */
+ struct drm_dp_mst_topology_ref_history topology_ref_history;
+#endif
+
+ /**
+ * @destroy_next: linked-list entry used by
+ * drm_dp_delayed_destroy_work()
+ */
+ struct list_head destroy_next;
+
+ u8 rad[8];
+ u8 lct;
+ int num_ports;
+
+ /**
+ * @ports: the list of ports on this branch device. This should be
+ * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
+ * There are two exceptions to this:
+ * &drm_dp_mst_topology_mgr.up_req_work and
+ * &drm_dp_mst_topology_mgr.work, which do not grab
+ * &drm_dp_mst_topology_mgr.lock during reads but are the only
+ * updaters of this list and are protected from updating the list
+ * concurrently by @drm_dp_mst_topology_mgr.probe_lock
+ */
+ struct list_head ports;
+
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ bool link_address_sent;
+
+ /* global unique identifier to identify branch devices */
+ u8 guid[16];
+};
+
+
+struct drm_dp_nak_reply {
+ u8 guid[16];
+ u8 reason;
+ u8 nak_data;
+};
+
+struct drm_dp_link_address_ack_reply {
+ u8 guid[16];
+ u8 nports;
+ struct drm_dp_link_addr_reply_port {
+ bool input_port;
+ u8 peer_device_type;
+ u8 port_number;
+ bool mcs;
+ bool ddps;
+ bool legacy_device_plug_status;
+ u8 dpcd_revision;
+ u8 peer_guid[16];
+ u8 num_sdp_streams;
+ u8 num_sdp_stream_sinks;
+ } ports[16];
+};
+
+struct drm_dp_remote_dpcd_read_ack_reply {
+ u8 port_number;
+ u8 num_bytes;
+ u8 bytes[255];
+};
+
+struct drm_dp_remote_dpcd_write_ack_reply {
+ u8 port_number;
+};
+
+struct drm_dp_remote_dpcd_write_nak_reply {
+ u8 port_number;
+ u8 reason;
+ u8 bytes_written_before_failure;
+};
+
+struct drm_dp_remote_i2c_read_ack_reply {
+ u8 port_number;
+ u8 num_bytes;
+ u8 bytes[255];
+};
+
+struct drm_dp_remote_i2c_read_nak_reply {
+ u8 port_number;
+ u8 nak_reason;
+ u8 i2c_nak_transaction;
+};
+
+struct drm_dp_remote_i2c_write_ack_reply {
+ u8 port_number;
+};
+
+struct drm_dp_query_stream_enc_status_ack_reply {
+ /* Bit[23:16]- Stream Id */
+ u8 stream_id;
+
+ /* Bit[15]- Signed */
+ bool reply_signed;
+
+ /* Bit[10:8]- Stream Output Sink Type */
+ bool unauthorizable_device_present;
+ bool legacy_device_present;
+ bool query_capable_device_present;
+
+ /* Bit[12:11]- Stream Output CP Type */
+ bool hdcp_1x_device_present;
+ bool hdcp_2x_device_present;
+
+ /* Bit[4]- Stream Authentication */
+ bool auth_completed;
+
+ /* Bit[3]- Stream Encryption */
+ bool encryption_enabled;
+
+ /* Bit[2]- Stream Repeater Function Present */
+ bool repeater_present;
+
+ /* Bit[1:0]- Stream State */
+ u8 state;
+};
+
+#define DRM_DP_MAX_SDP_STREAMS 16
+struct drm_dp_allocate_payload {
+ u8 port_number;
+ u8 number_sdp_streams;
+ u8 vcpi;
+ u16 pbn;
+ u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
+};
+
+struct drm_dp_allocate_payload_ack_reply {
+ u8 port_number;
+ u8 vcpi;
+ u16 allocated_pbn;
+};
+
+struct drm_dp_connection_status_notify {
+ u8 guid[16];
+ u8 port_number;
+ bool legacy_device_plug_status;
+ bool displayport_device_plug_status;
+ bool message_capability_status;
+ bool input_port;
+ u8 peer_device_type;
+};
+
+struct drm_dp_remote_dpcd_read {
+ u8 port_number;
+ u32 dpcd_address;
+ u8 num_bytes;
+};
+
+struct drm_dp_remote_dpcd_write {
+ u8 port_number;
+ u32 dpcd_address;
+ u8 num_bytes;
+ u8 *bytes;
+};
+
+#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
+struct drm_dp_remote_i2c_read {
+ u8 num_transactions;
+ u8 port_number;
+ struct drm_dp_remote_i2c_read_tx {
+ u8 i2c_dev_id;
+ u8 num_bytes;
+ u8 *bytes;
+ u8 no_stop_bit;
+ u8 i2c_transaction_delay;
+ } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
+ u8 read_i2c_device_id;
+ u8 num_bytes_read;
+};
+
+struct drm_dp_remote_i2c_write {
+ u8 port_number;
+ u8 write_i2c_device_id;
+ u8 num_bytes;
+ u8 *bytes;
+};
+
+struct drm_dp_query_stream_enc_status {
+ u8 stream_id;
+ u8 client_id[7]; /* 56-bit nonce */
+ u8 stream_event;
+ bool valid_stream_event;
+ u8 stream_behavior;
+ u8 valid_stream_behavior;
+};
+
+/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
+struct drm_dp_port_number_req {
+ u8 port_number;
+};
+
+struct drm_dp_enum_path_resources_ack_reply {
+ u8 port_number;
+ bool fec_capable;
+ u16 full_payload_bw_number;
+ u16 avail_payload_bw_number;
+};
+
+/* covers POWER_DOWN_PHY, POWER_UP_PHY */
+struct drm_dp_port_number_rep {
+ u8 port_number;
+};
+
+struct drm_dp_query_payload {
+ u8 port_number;
+ u8 vcpi;
+};
+
+struct drm_dp_resource_status_notify {
+ u8 port_number;
+ u8 guid[16];
+ u16 available_pbn;
+};
+
+struct drm_dp_query_payload_ack_reply {
+ u8 port_number;
+ u16 allocated_pbn;
+};
+
+struct drm_dp_sideband_msg_req_body {
+ u8 req_type;
+ union ack_req {
+ struct drm_dp_connection_status_notify conn_stat;
+ struct drm_dp_port_number_req port_num;
+ struct drm_dp_resource_status_notify resource_stat;
+
+ struct drm_dp_query_payload query_payload;
+ struct drm_dp_allocate_payload allocate_payload;
+
+ struct drm_dp_remote_dpcd_read dpcd_read;
+ struct drm_dp_remote_dpcd_write dpcd_write;
+
+ struct drm_dp_remote_i2c_read i2c_read;
+ struct drm_dp_remote_i2c_write i2c_write;
+
+ struct drm_dp_query_stream_enc_status enc_status;
+ } u;
+};
+
+struct drm_dp_sideband_msg_reply_body {
+ u8 reply_type;
+ u8 req_type;
+ union ack_replies {
+ struct drm_dp_nak_reply nak;
+ struct drm_dp_link_address_ack_reply link_addr;
+ struct drm_dp_port_number_rep port_number;
+
+ struct drm_dp_enum_path_resources_ack_reply path_resources;
+ struct drm_dp_allocate_payload_ack_reply allocate_payload;
+ struct drm_dp_query_payload_ack_reply query_payload;
+
+ struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
+ struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
+ struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
+
+ struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
+ struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
+ struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
+
+ struct drm_dp_query_stream_enc_status_ack_reply enc_status;
+ } u;
+};
+
+/* msg is queued to be put into a slot */
+#define DRM_DP_SIDEBAND_TX_QUEUED 0
+/* msg has started transmitting on a slot - still on msgq */
+#define DRM_DP_SIDEBAND_TX_START_SEND 1
+/* msg has finished transmitting on a slot - removed from msgq only in slot */
+#define DRM_DP_SIDEBAND_TX_SENT 2
+/* msg has received a response - removed from slot */
+#define DRM_DP_SIDEBAND_TX_RX 3
+#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
+
+struct drm_dp_sideband_msg_tx {
+ u8 msg[256];
+ u8 chunk[48];
+ u8 cur_offset;
+ u8 cur_len;
+ struct drm_dp_mst_branch *dst;
+ struct list_head next;
+ int seqno;
+ int state;
+ bool path_msg;
+ struct drm_dp_sideband_msg_reply_body reply;
+};
+
+/* sideband msg handler */
+struct drm_dp_mst_topology_mgr;
+struct drm_dp_mst_topology_cbs {
+ /* create a connector for a port */
+ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
+ /*
+ * Checks for any pending MST interrupts, passing them to MST core for
+ * processing, the same way an HPD IRQ pulse handler would do this.
+ * If provided MST core calls this callback from a poll-waiting loop
+ * when waiting for MST down message replies. The driver is expected
+ * to guard against a race between this callback and the driver's HPD
+ * IRQ pulse handler.
+ */
+ void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
+};
+
+#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
+
+/**
+ * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
+ *
+ * The primary atomic state structure for a given MST payload. Stores information like current
+ * bandwidth allocation, intended action for this payload, etc.
+ */
+struct drm_dp_mst_atomic_payload {
+ /** @port: The MST port assigned to this payload */
+ struct drm_dp_mst_port *port;
+
+ /**
+ * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
+ * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
+ * check time. This shouldn't usually matter, as the start slot should never be relevant for
+ * atomic state computations.
+ *
+ * Since this value is determined at commit time instead of check time, this value is
+ * protected by the MST helpers ensuring that async commits operating on the given topology
+ * never run in parallel. In the event that a driver does need to read this value (e.g. to
+ * inform hardware of the starting timeslot for a payload), the driver may either:
+ *
+ * * Read this field during the atomic commit after
+ * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
+ * previous MST states payload start slots have been copied over to the new state. Note
+ * that a new start slot won't be assigned/removed from this payload until
+ * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called.
+ * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
+ * get committed to hardware by calling drm_crtc_commit_wait() on each of the
+ * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
+ *
+ * If neither of the two above solutions suffice (e.g. the driver needs to read the start
+ * slot in the middle of an atomic commit without waiting for some reason), then drivers
+ * should cache this value themselves after changing payloads.
+ */
+ s8 vc_start_slot;
+
+ /** @vcpi: The Virtual Channel Payload Identifier */
+ u8 vcpi;
+ /**
+ * @time_slots:
+ * The number of timeslots allocated to this payload from the source DP Tx to
+ * the immediate downstream DP Rx
+ */
+ int time_slots;
+ /** @pbn: The payload bandwidth for this payload */
+ int pbn;
+
+ /** @delete: Whether or not we intend to delete this payload during this atomic commit */
+ bool delete : 1;
+ /** @dsc_enabled: Whether or not this payload has DSC enabled */
+ bool dsc_enabled : 1;
+
+ /** @next: The list node for this payload */
+ struct list_head next;
+};
+
+/**
+ * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
+ *
+ * This struct represents the atomic state of the toplevel DisplayPort MST manager
+ */
+struct drm_dp_mst_topology_state {
+ /** @base: Base private state for atomic */
+ struct drm_private_state base;
+
+ /** @mgr: The topology manager */
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ /**
+ * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
+ * modify this to add additional dependencies if needed.
+ */
+ u32 pending_crtc_mask;
+ /**
+ * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
+ * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
+ */
+ struct drm_crtc_commit **commit_deps;
+ /** @num_commit_deps: The number of CRTC commits in @commit_deps */
+ size_t num_commit_deps;
+
+ /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
+ u32 payload_mask;
+ /** @payloads: The list of payloads being created/destroyed in this state */
+ struct list_head payloads;
+
+ /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
+ u8 total_avail_slots;
+ /** @start_slot: The first usable time slot in this topology (1 or 0) */
+ u8 start_slot;
+
+ /**
+ * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
+ * out itself.
+ */
+ int pbn_div;
+};
+
+#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
+
+/**
+ * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
+ *
+ * This struct represents the toplevel displayport MST topology manager.
+ * There should be one instance of this for every MST capable DP connector
+ * on the GPU.
+ */
+struct drm_dp_mst_topology_mgr {
+ /**
+ * @base: Base private object for atomic
+ */
+ struct drm_private_obj base;
+
+ /**
+ * @dev: device pointer for adding i2c devices etc.
+ */
+ struct drm_device *dev;
+ /**
+ * @cbs: callbacks for connector addition and destruction.
+ */
+ const struct drm_dp_mst_topology_cbs *cbs;
+ /**
+ * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
+ * in one go.
+ */
+ int max_dpcd_transaction_bytes;
+ /**
+ * @aux: AUX channel for the DP MST connector this topolgy mgr is
+ * controlling.
+ */
+ struct drm_dp_aux *aux;
+ /**
+ * @max_payloads: maximum number of payloads the GPU can generate.
+ */
+ int max_payloads;
+ /**
+ * @conn_base_id: DRM connector ID this mgr is connected to. Only used
+ * to build the MST connector path value.
+ */
+ int conn_base_id;
+
+ /**
+ * @up_req_recv: Message receiver state for up requests.
+ */
+ struct drm_dp_sideband_msg_rx up_req_recv;
+
+ /**
+ * @down_rep_recv: Message receiver state for replies to down
+ * requests.
+ */
+ struct drm_dp_sideband_msg_rx down_rep_recv;
+
+ /**
+ * @lock: protects @mst_state, @mst_primary, @dpcd, and
+ * @payload_id_table_cleared.
+ */
+ struct mutex lock;
+
+ /**
+ * @probe_lock: Prevents @work and @up_req_work, the only writers of
+ * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
+ * while they update the topology.
+ */
+ struct mutex probe_lock;
+
+ /**
+ * @mst_state: If this manager is enabled for an MST capable port. False
+ * if no MST sink/branch devices is connected.
+ */
+ bool mst_state : 1;
+
+ /**
+ * @payload_id_table_cleared: Whether or not we've cleared the payload
+ * ID table for @mst_primary. Protected by @lock.
+ */
+ bool payload_id_table_cleared : 1;
+
+ /**
+ * @payload_count: The number of currently active payloads in hardware. This value is only
+ * intended to be used internally by MST helpers for payload tracking, and is only safe to
+ * read/write from the atomic commit (not check) context.
+ */
+ u8 payload_count;
+
+ /**
+ * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
+ * internally by MST helpers for payload tracking, and is only safe to read/write from the
+ * atomic commit (not check) context.
+ */
+ u8 next_start_slot;
+
+ /**
+ * @mst_primary: Pointer to the primary/first branch device.
+ */
+ struct drm_dp_mst_branch *mst_primary;
+
+ /**
+ * @dpcd: Cache of DPCD for primary port.
+ */
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ /**
+ * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
+ */
+ u8 sink_count;
+
+ /**
+ * @funcs: Atomic helper callbacks
+ */
+ const struct drm_private_state_funcs *funcs;
+
+ /**
+ * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
+ */
+ struct mutex qlock;
+
+ /**
+ * @tx_msg_downq: List of pending down requests
+ */
+ struct list_head tx_msg_downq;
+
+ /**
+ * @tx_waitq: Wait to queue stall for the tx worker.
+ */
+ wait_queue_head_t tx_waitq;
+ /**
+ * @work: Probe work.
+ */
+ struct work_struct work;
+ /**
+ * @tx_work: Sideband transmit worker. This can nest within the main
+ * @work worker for each transaction @work launches.
+ */
+ struct work_struct tx_work;
+
+ /**
+ * @destroy_port_list: List of to be destroyed connectors.
+ */
+ struct list_head destroy_port_list;
+ /**
+ * @destroy_branch_device_list: List of to be destroyed branch
+ * devices.
+ */
+ struct list_head destroy_branch_device_list;
+ /**
+ * @delayed_destroy_lock: Protects @destroy_port_list and
+ * @destroy_branch_device_list.
+ */
+ struct mutex delayed_destroy_lock;
+
+ /**
+ * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
+ * A dedicated WQ makes it possible to drain any requeued work items
+ * on it.
+ */
+ struct workqueue_struct *delayed_destroy_wq;
+
+ /**
+ * @delayed_destroy_work: Work item to destroy MST port and branch
+ * devices, needed to avoid locking inversion.
+ */
+ struct work_struct delayed_destroy_work;
+
+ /**
+ * @up_req_list: List of pending up requests from the topology that
+ * need to be processed, in chronological order.
+ */
+ struct list_head up_req_list;
+ /**
+ * @up_req_lock: Protects @up_req_list
+ */
+ struct mutex up_req_lock;
+ /**
+ * @up_req_work: Work item to process up requests received from the
+ * topology. Needed to avoid blocking hotplug handling and sideband
+ * transmissions.
+ */
+ struct work_struct up_req_work;
+
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history_lock: protects
+ * &drm_dp_mst_port.topology_ref_history and
+ * &drm_dp_mst_branch.topology_ref_history.
+ */
+ struct mutex topology_ref_history_lock;
+#endif
+};
+
+int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_device *dev, struct drm_dp_aux *aux,
+ int max_dpcd_transaction_bytes,
+ int max_payloads, int conn_base_id);
+
+void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
+
+bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
+
+int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
+
+
+int
+drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
+
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int link_lane_count);
+
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
+
+void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
+
+int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
+int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_atomic_state *state,
+ struct drm_dp_mst_atomic_payload *payload);
+void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
+
+int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
+
+void drm_dp_mst_dump_topology(struct seq_file *m,
+ struct drm_dp_mst_topology_mgr *mgr);
+
+void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
+int __must_check
+drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ bool sync);
+
+ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
+ unsigned int offset, void *buffer, size_t size);
+ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
+ unsigned int offset, void *buffer, size_t size);
+
+int drm_dp_mst_connector_late_register(struct drm_connector *connector,
+ struct drm_dp_mst_port *port);
+void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
+ struct drm_dp_mst_port *port);
+
+struct drm_dp_mst_topology_state *
+drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
+drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_atomic_payload *
+drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port *port);
+int __must_check
+drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, int pbn);
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ struct drm_dp_mst_port *port,
+ int pbn, bool enable);
+int __must_check
+drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+int __must_check
+drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
+void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
+int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
+int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, bool power_up);
+int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_query_stream_enc_status_ack_reply *status);
+int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
+int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
+ struct drm_dp_mst_topology_mgr *mgr);
+
+void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
+void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
+
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
+
+static inline struct drm_dp_mst_topology_state *
+to_drm_dp_mst_topology_state(struct drm_private_state *state)
+{
+ return container_of(state, struct drm_dp_mst_topology_state, base);
+}
+
+extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
+
+/**
+ * __drm_dp_mst_state_iter_get - private atomic state iterator function for
+ * macro-internal use
+ * @state: &struct drm_atomic_state pointer
+ * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
+ * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
+ * iteration cursor
+ * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
+ * iteration cursor
+ * @i: int iteration cursor, for macro-internal use
+ *
+ * Used by for_each_oldnew_mst_mgr_in_state(),
+ * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
+ * call this directly.
+ *
+ * Returns:
+ * True if the current &struct drm_private_obj is a &struct
+ * drm_dp_mst_topology_mgr, false otherwise.
+ */
+static inline bool
+__drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr **mgr,
+ struct drm_dp_mst_topology_state **old_state,
+ struct drm_dp_mst_topology_state **new_state,
+ int i)
+{
+ struct __drm_private_objs_state *objs_state = &state->private_objs[i];
+
+ if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
+ return false;
+
+ *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
+ if (old_state)
+ *old_state = to_dp_mst_topology_state(objs_state->old_state);
+ if (new_state)
+ *new_state = to_dp_mst_topology_state(objs_state->new_state);
+
+ return true;
+}
+
+/**
+ * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
+ * managers in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
+ * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
+ * state
+ * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
+ * state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all DRM DP MST topology managers in an atomic update,
+ * tracking both old and new state. This is useful in places where the state
+ * delta needs to be considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
+ for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
+ for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
+
+/**
+ * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
+ * in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
+ * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
+ * state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all DRM DP MST topology managers in an atomic update,
+ * tracking only the old state. This is useful in disable functions, where we
+ * need the old state the hardware is still in.
+ */
+#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
+ for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
+ for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
+
+/**
+ * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
+ * in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
+ * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
+ * state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all DRM DP MST topology managers in an atomic update,
+ * tracking only the new state. This is useful in enable functions, where we
+ * need the new state the hardware should be in when the atomic commit
+ * operation has completed.
+ */
+#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
+ for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
+ for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
+
+#endif
diff --git a/include/drm/drm_dsc.h b/include/drm/display/drm_dsc.h
index 887954cbfc60..bc90273d06a6 100644
--- a/include/drm/drm_dsc.h
+++ b/include/drm/display/drm_dsc.h
@@ -8,7 +8,7 @@
#ifndef DRM_DSC_H_
#define DRM_DSC_H_
-#include <drm/drm_dp_helper.h>
+#include <drm/display/drm_dp.h>
/* VESA Display Stream Compression DSC 1.2 constants */
#define DSC_NUM_BUF_RANGES 15
@@ -273,7 +273,8 @@ struct drm_dsc_config {
};
/**
- * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set
+ * struct drm_dsc_picture_parameter_set - Represents 128 bytes of
+ * Picture Parameter Set
*
* The VESA DSC standard defines picture parameter set (PPS) which display
* stream compression encoders must communicate to decoders.
@@ -588,7 +589,7 @@ struct drm_dsc_picture_parameter_set {
* This structure represents the DSC PPS infoframe required to send the Picture
* Parameter Set metadata required before enabling VESA Display Stream
* Compression. This is based on the DP Secondary Data Packet structure and
- * comprises of SDP Header as defined &struct struct dp_sdp_header in drm_dp_helper.h
+ * comprises of SDP Header as defined &struct dp_sdp_header in drm_dp_helper.h
* and PPS payload defined in &struct drm_dsc_picture_parameter_set.
*
* @pps_header: Header for PPS as per DP SDP header format of type
@@ -601,9 +602,4 @@ struct drm_dsc_pps_infoframe {
struct drm_dsc_picture_parameter_set pps_payload;
} __packed;
-void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
-void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
- const struct drm_dsc_config *dsc_cfg);
-int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
-
#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h
new file mode 100644
index 000000000000..8b41edbbabab
--- /dev/null
+++ b/include/drm/display/drm_dsc_helper.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_HELPER_H_
+#define DRM_DSC_HELPER_H_
+
+#include <drm/display/drm_dsc.h>
+
+void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
+int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size);
+void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg);
+int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
+
+#endif /* _DRM_DSC_HELPER_H_ */
+
diff --git a/include/drm/drm_hdcp.h b/include/drm/display/drm_hdcp.h
index 06a11202a097..96a99b1377c0 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/display/drm_hdcp.h
@@ -6,8 +6,8 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#ifndef _DRM_HDCP_H_INCLUDED_
-#define _DRM_HDCP_H_INCLUDED_
+#ifndef _DRM_HDCP_H_
+#define _DRM_HDCP_H_
#include <linux/types.h>
@@ -29,6 +29,9 @@
/* Slave address for the HDCP registers in the receiver */
#define DRM_HDCP_DDC_ADDR 0x3A
+/* Value to use at the end of the SHA-1 bytestream used for repeaters */
+#define DRM_HDCP_SHA1_TERMINATOR 0x80
+
/* HDCP register offsets for HDMI/DVI devices */
#define DRM_HDCP_DDC_BKSV 0x00
#define DRM_HDCP_DDC_RI_PRIME 0x08
@@ -98,11 +101,11 @@
/* Following Macros take a byte at a time for bit(s) masking */
/*
- * TODO: This has to be changed for DP MST, as multiple stream on
- * same port is possible.
- * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ * TODO: HDCP_2_2_MAX_CONTENT_STREAMS_CNT is based upon actual
+ * H/W MST streams capacity.
+ * This required to be moved out to platform specific header.
*/
-#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 4
#define HDCP_2_2_TXCAP_MASK_LEN 2
#define HDCP_2_2_RXCAPS_LEN 3
#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0))
@@ -221,11 +224,14 @@ struct hdcp2_rep_stream_ready {
/* HDCP2.2 TIMEOUTs in mSec */
#define HDCP_2_2_CERT_TIMEOUT_MS 100
+#define HDCP_2_2_DP_CERT_READ_TIMEOUT_MS 110
#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000
#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200
+#define HDCP_2_2_DP_HPRIME_READ_TIMEOUT_MS 7
#define HDCP_2_2_PAIRING_TIMEOUT_MS 200
+#define HDCP_2_2_DP_PAIRING_READ_TIMEOUT_MS 5
#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20
-#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7
+#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 16
#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000
#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100
@@ -276,7 +282,7 @@ void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
#define DRM_HDCP_2_VRL_LENGTH_SIZE 3
#define DRM_HDCP_2_DCP_SIG_SIZE 384
#define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4
-#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC) >> 6)
+#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC0) >> 6)
struct hdcp_srm_header {
u8 srm_id;
@@ -285,16 +291,6 @@ struct hdcp_srm_header {
u8 srm_gen_no;
} __packed;
-struct drm_device;
-struct drm_connector;
-
-bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
- u8 *ksvs, u32 ksv_count);
-int drm_connector_attach_content_protection_property(
- struct drm_connector *connector, bool hdcp_content_type);
-void drm_hdcp_update_content_protection(struct drm_connector *connector,
- u64 val);
-
/* Content Type classification for HDCP2.2 vs others */
#define DRM_MODE_HDCP_CONTENT_TYPE0 0
#define DRM_MODE_HDCP_CONTENT_TYPE1 1
diff --git a/include/drm/display/drm_hdcp_helper.h b/include/drm/display/drm_hdcp_helper.h
new file mode 100644
index 000000000000..8aaf87bf2735
--- /dev/null
+++ b/include/drm/display/drm_hdcp_helper.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#ifndef _DRM_HDCP_HELPER_H_INCLUDED_
+#define _DRM_HDCP_HELPER_H_INCLUDED_
+
+#include <drm/display/drm_hdcp.h>
+
+struct drm_device;
+struct drm_connector;
+
+int drm_hdcp_check_ksvs_revoked(struct drm_device *dev, u8 *ksvs, u32 ksv_count);
+int drm_connector_attach_content_protection_property(struct drm_connector *connector,
+ bool hdcp_content_type);
+void drm_hdcp_update_content_protection(struct drm_connector *connector, u64 val);
+
+#endif
diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h
new file mode 100644
index 000000000000..76d234826e22
--- /dev/null
+++ b/include/drm/display/drm_hdmi_helper.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_HDMI_HELPER
+#define DRM_HDMI_HELPER
+
+#include <linux/hdmi.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_display_mode;
+
+void
+drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+int
+drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+#endif
diff --git a/include/drm/display/drm_scdc.h b/include/drm/display/drm_scdc.h
new file mode 100644
index 000000000000..3d58f37e8ed8
--- /dev/null
+++ b/include/drm/display/drm_scdc.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015 NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_SCDC_H
+#define DRM_SCDC_H
+
+#define SCDC_SINK_VERSION 0x01
+
+#define SCDC_SOURCE_VERSION 0x02
+
+#define SCDC_UPDATE_0 0x10
+#define SCDC_READ_REQUEST_TEST (1 << 2)
+#define SCDC_CED_UPDATE (1 << 1)
+#define SCDC_STATUS_UPDATE (1 << 0)
+
+#define SCDC_UPDATE_1 0x11
+
+#define SCDC_TMDS_CONFIG 0x20
+#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 (1 << 1)
+#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_10 (0 << 1)
+#define SCDC_SCRAMBLING_ENABLE (1 << 0)
+
+#define SCDC_SCRAMBLER_STATUS 0x21
+#define SCDC_SCRAMBLING_STATUS (1 << 0)
+
+#define SCDC_CONFIG_0 0x30
+#define SCDC_READ_REQUEST_ENABLE (1 << 0)
+
+#define SCDC_STATUS_FLAGS_0 0x40
+#define SCDC_CH2_LOCK (1 << 3)
+#define SCDC_CH1_LOCK (1 << 2)
+#define SCDC_CH0_LOCK (1 << 1)
+#define SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK)
+#define SCDC_CLOCK_DETECT (1 << 0)
+
+#define SCDC_STATUS_FLAGS_1 0x41
+
+#define SCDC_ERR_DET_0_L 0x50
+#define SCDC_ERR_DET_0_H 0x51
+#define SCDC_ERR_DET_1_L 0x52
+#define SCDC_ERR_DET_1_H 0x53
+#define SCDC_ERR_DET_2_L 0x54
+#define SCDC_ERR_DET_2_H 0x55
+#define SCDC_CHANNEL_VALID (1 << 7)
+
+#define SCDC_ERR_DET_CHECKSUM 0x56
+
+#define SCDC_TEST_CONFIG_0 0xc0
+#define SCDC_TEST_READ_REQUEST (1 << 7)
+#define SCDC_TEST_READ_REQUEST_DELAY(x) ((x) & 0x7f)
+
+#define SCDC_MANUFACTURER_IEEE_OUI 0xd0
+#define SCDC_MANUFACTURER_IEEE_OUI_SIZE 3
+
+#define SCDC_DEVICE_ID 0xd3
+#define SCDC_DEVICE_ID_SIZE 8
+
+#define SCDC_DEVICE_HARDWARE_REVISION 0xdb
+#define SCDC_GET_DEVICE_HARDWARE_REVISION_MAJOR(x) (((x) >> 4) & 0xf)
+#define SCDC_GET_DEVICE_HARDWARE_REVISION_MINOR(x) (((x) >> 0) & 0xf)
+
+#define SCDC_DEVICE_SOFTWARE_MAJOR_REVISION 0xdc
+#define SCDC_DEVICE_SOFTWARE_MINOR_REVISION 0xdd
+
+#define SCDC_MANUFACTURER_SPECIFIC 0xde
+#define SCDC_MANUFACTURER_SPECIFIC_SIZE 34
+
+#endif
diff --git a/include/drm/display/drm_scdc_helper.h b/include/drm/display/drm_scdc_helper.h
new file mode 100644
index 000000000000..ded01fd948b4
--- /dev/null
+++ b/include/drm/display/drm_scdc_helper.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015 NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_SCDC_HELPER_H
+#define DRM_SCDC_HELPER_H
+
+#include <linux/types.h>
+
+#include <drm/display/drm_scdc.h>
+
+struct i2c_adapter;
+
+ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer,
+ size_t size);
+ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
+ const void *buffer, size_t size);
+
+/**
+ * drm_scdc_readb - read a single byte from SCDC
+ * @adapter: I2C adapter
+ * @offset: offset of register to read
+ * @value: return location for the register value
+ *
+ * Reads a single byte from SCDC. This is a convenience wrapper around the
+ * drm_scdc_read() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset,
+ u8 *value)
+{
+ return drm_scdc_read(adapter, offset, value, sizeof(*value));
+}
+
+/**
+ * drm_scdc_writeb - write a single byte to SCDC
+ * @adapter: I2C adapter
+ * @offset: offset of register to read
+ * @value: return location for the register value
+ *
+ * Writes a single byte to SCDC. This is a convenience wrapper around the
+ * drm_scdc_write() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset,
+ u8 value)
+{
+ return drm_scdc_write(adapter, offset, &value, sizeof(value));
+}
+
+bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter);
+
+bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable);
+bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set);
+
+#endif
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
deleted file mode 100644
index 664e120b93e6..000000000000
--- a/include/drm/drm_agpsupport.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DRM_AGPSUPPORT_H_
-#define _DRM_AGPSUPPORT_H_
-
-#include <linux/agp_backend.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <uapi/drm/drm.h>
-
-struct drm_device;
-struct drm_file;
-
-struct drm_agp_head {
- struct agp_kern_info agp_info;
- struct list_head memory;
- unsigned long mode;
- struct agp_bridge_data *bridge;
- int enabled;
- int acquired;
- unsigned long base;
- int agp_mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
-};
-
-#if IS_ENABLED(CONFIG_AGP)
-
-void drm_free_agp(struct agp_memory * handle, int pages);
-int drm_bind_agp(struct agp_memory * handle, unsigned int start);
-int drm_unbind_agp(struct agp_memory * handle);
-
-struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-void drm_legacy_agp_clear(struct drm_device *dev);
-int drm_agp_acquire(struct drm_device *dev);
-int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_release(struct drm_device *dev);
-int drm_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-int drm_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-#else /* CONFIG_AGP */
-
-static inline void drm_free_agp(struct agp_memory * handle, int pages)
-{
-}
-
-static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start)
-{
- return -ENODEV;
-}
-
-static inline int drm_unbind_agp(struct agp_memory * handle)
-{
- return -ENODEV;
-}
-
-static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
-{
- return NULL;
-}
-
-static inline void drm_legacy_agp_clear(struct drm_device *dev)
-{
-}
-
-static inline int drm_agp_acquire(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_release(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_enable(struct drm_device *dev,
- struct drm_agp_mode mode)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_info(struct drm_device *dev,
- struct drm_agp_info *info)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_alloc(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_free(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_unbind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_bind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-#endif /* CONFIG_AGP */
-
-#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h
new file mode 100644
index 000000000000..7096703c3949
--- /dev/null
+++ b/include/drm/drm_aperture.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _DRM_APERTURE_H_
+#define _DRM_APERTURE_H_
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_driver;
+struct pci_dev;
+
+int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
+ resource_size_t size);
+
+int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
+ bool primary, const struct drm_driver *req_driver);
+
+int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
+ const struct drm_driver *req_driver);
+
+/**
+ * drm_aperture_remove_framebuffers - remove all existing framebuffers
+ * @primary: also kick vga16fb if present
+ * @req_driver: requesting DRM driver
+ *
+ * This function removes all graphics device drivers. Use this function on systems
+ * that can have their framebuffer located anywhere in memory.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+static inline int
+drm_aperture_remove_framebuffers(bool primary, const struct drm_driver *req_driver)
+{
+ return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary,
+ req_driver);
+}
+
+#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 951dfb15c27b..10b1990bc1f6 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -66,6 +66,8 @@
*
* For an implementation of how to use this look at
* drm_atomic_helper_setup_commit() from the atomic helper library.
+ *
+ * See also drm_crtc_commit_wait().
*/
struct drm_crtc_commit {
/**
@@ -103,7 +105,7 @@ struct drm_crtc_commit {
*
* Will be signalled when all hw register changes for this commit have
* been written out. Especially when disabling a pipe this can be much
- * later than than @flip_done, since that can signal already when the
+ * later than @flip_done, since that can signal already when the
* screen goes black, whereas to fully shut down a pipe more register
* I/O is required.
*
@@ -225,6 +227,18 @@ struct drm_private_state_funcs {
*/
void (*atomic_destroy_state)(struct drm_private_obj *obj,
struct drm_private_state *state);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses &struct drm_private_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_private_obj_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_private_state *state);
};
/**
@@ -248,6 +262,26 @@ struct drm_private_state_funcs {
* drm_dev_register()
* 2/ all calls to drm_atomic_private_obj_fini() must be done after calling
* drm_dev_unregister()
+ *
+ * If that private object is used to store a state shared by multiple
+ * CRTCs, proper care must be taken to ensure that non-blocking commits are
+ * properly ordered to avoid a use-after-free issue.
+ *
+ * Indeed, assuming a sequence of two non-blocking &drm_atomic_commit on two
+ * different &drm_crtc using different &drm_plane and &drm_connector, so with no
+ * resources shared, there's no guarantee on which commit is going to happen
+ * first. However, the second &drm_atomic_commit will consider the first
+ * &drm_private_obj its old state, and will be in charge of freeing it whenever
+ * the second &drm_atomic_commit is done.
+ *
+ * If the first &drm_atomic_commit happens after it, it will consider its
+ * &drm_private_obj the new state and will be likely to access it, resulting in
+ * an access to a freed memory region. Drivers should store (and get a reference
+ * to) the &drm_crtc_commit structure in our private state in
+ * &drm_mode_config_helper_funcs.atomic_commit_setup, and then wait for that
+ * commit to complete as the first step of
+ * &drm_mode_config_helper_funcs.atomic_commit_tail, similar to
+ * drm_atomic_helper_wait_for_dependencies().
*/
struct drm_private_obj {
/**
@@ -289,14 +323,21 @@ struct drm_private_obj {
/**
* struct drm_private_state - base struct for driver private object state
- * @state: backpointer to global drm_atomic_state
*
- * Currently only contains a backpointer to the overall atomic update, but in
- * the future also might hold synchronization information similar to e.g.
- * &drm_crtc.commit.
+ * Currently only contains a backpointer to the overall atomic update,
+ * and the relevant private object but in the future also might hold
+ * synchronization information similar to e.g. &drm_crtc.commit.
*/
struct drm_private_state {
+ /**
+ * @state: backpointer to global drm_atomic_state
+ */
struct drm_atomic_state *state;
+
+ /**
+ * @obj: backpointer to the private object
+ */
+ struct drm_private_obj *obj;
};
struct __drm_private_objs_state {
@@ -308,7 +349,6 @@ struct __drm_private_objs_state {
* struct drm_atomic_state - the global state object for atomic updates
* @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
- * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @async_update: hint for asynchronous plane update
* @planes: pointer to array of structures with per-plane data
* @crtcs: pointer to array of CRTC pointers
@@ -336,6 +376,17 @@ struct drm_atomic_state {
* drm_atomic_crtc_needs_modeset().
*/
bool allow_modeset : 1;
+ /**
+ * @legacy_cursor_update:
+ *
+ * Hint to enforce legacy cursor IOCTL semantics.
+ *
+ * WARNING: This is thoroughly broken and pretty much impossible to
+ * implement correctly. Drivers must ignore this and should instead
+ * implement &drm_plane_helper_funcs.atomic_async_check and
+ * &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this
+ * flag are not allowed.
+ */
bool legacy_cursor_update : 1;
bool async_update : 1;
/**
@@ -406,6 +457,8 @@ static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit)
kref_put(&commit->ref, __drm_crtc_commit_free);
}
+int drm_crtc_commit_wait(struct drm_crtc_commit *commit);
+
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
@@ -670,6 +723,9 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
}
int __must_check
+drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
+ struct drm_encoder *encoder);
+int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int __must_check
@@ -770,7 +826,8 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
(old_crtc_state) = (__state)->crtcs[__i].old_state, \
(void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \
- (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
+ (new_crtc_state) = (__state)->crtcs[__i].new_state, \
+ (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
* for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -789,6 +846,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->crtcs[__i].ptr && \
((crtc) = (__state)->crtcs[__i].ptr, \
+ (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
(old_crtc_state) = (__state)->crtcs[__i].old_state, 1))
/**
@@ -857,6 +915,22 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
+ * for_each_new_plane_in_state_reverse - other than only tracking new state,
+ * it's the same as for_each_oldnew_plane_in_state_reverse
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ */
+#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \
+ for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
+ (__i) >= 0; \
+ (__i)--) \
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (new_plane_state) = (__state)->planes[__i].new_state, 1))
+
+/**
* for_each_old_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
@@ -948,6 +1022,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
for ((__i) = 0; \
(__i) < (__state)->num_private_objs && \
((obj) = (__state)->private_objs[__i].ptr, \
+ (void)(obj) /* Only to avoid unused-but-set-variable warning */, \
(new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
(__i)++)
@@ -992,4 +1067,77 @@ drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state)
return state->active || state->self_refresh_active;
}
+/**
+ * struct drm_bus_cfg - bus configuration
+ *
+ * This structure stores the configuration of a physical bus between two
+ * components in an output pipeline, usually between two bridges, an encoder
+ * and a bridge, or a bridge and a connector.
+ *
+ * The bus configuration is stored in &drm_bridge_state separately for the
+ * input and output buses, as seen from the point of view of each bridge. The
+ * bus configuration of a bridge output is usually identical to the
+ * configuration of the next bridge's input, but may differ if the signals are
+ * modified between the two bridges, for instance by an inverter on the board.
+ * The input and output configurations of a bridge may differ if the bridge
+ * modifies the signals internally, for instance by performing format
+ * conversion, or modifying signals polarities.
+ */
+struct drm_bus_cfg {
+ /**
+ * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format)
+ *
+ * This field should not be directly modified by drivers
+ * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus
+ * format negotiation).
+ */
+ u32 format;
+
+ /**
+ * @flags: DRM_BUS_* flags used on this bus
+ */
+ u32 flags;
+};
+
+/**
+ * struct drm_bridge_state - Atomic bridge state object
+ */
+struct drm_bridge_state {
+ /**
+ * @base: inherit from &drm_private_state
+ */
+ struct drm_private_state base;
+
+ /**
+ * @bridge: the bridge this state refers to
+ */
+ struct drm_bridge *bridge;
+
+ /**
+ * @input_bus_cfg: input bus configuration
+ */
+ struct drm_bus_cfg input_bus_cfg;
+
+ /**
+ * @output_bus_cfg: input bus configuration
+ */
+ struct drm_bus_cfg output_bus_cfg;
+};
+
+static inline struct drm_bridge_state *
+drm_priv_to_bridge_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct drm_bridge_state, base);
+}
+
+struct drm_bridge_state *
+drm_atomic_get_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge);
+struct drm_bridge_state *
+drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge);
+struct drm_bridge_state *
+drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge);
+
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 9db3cac48f4f..06d8902a8097 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -34,18 +34,32 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_util.h>
+/*
+ * Drivers that don't allow primary plane scaling may pass this macro in place
+ * of the min/max scale parameters of the plane-state checker function.
+ *
+ * Due to src being in 16.16 fixed point and dest being in integer pixels,
+ * 1<<16 represents no scaling.
+ */
+#define DRM_PLANE_NO_SCALING (1<<16)
+
struct drm_atomic_state;
struct drm_private_obj;
struct drm_private_state;
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
+int
+drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
+ struct drm_connector_state *conn_state);
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
const struct drm_crtc_state *crtc_state,
int min_scale,
int max_scale,
bool can_position,
bool can_update_disabled);
+int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
+ bool can_disable_primary_plane);
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
@@ -74,6 +88,9 @@ void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
struct drm_atomic_state *old_state);
+void
+drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state);
+
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
@@ -144,10 +161,6 @@ int drm_atomic_helper_page_flip_target(
uint32_t flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
-int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t size,
- struct drm_modeset_acquire_ctx *ctx);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
@@ -164,7 +177,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
/**
- * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
+ * drm_atomic_crtc_state_for_each_plane - iterate over attached planes in new state
* @plane: the loop cursor
* @crtc_state: the incoming CRTC state
*
@@ -177,7 +190,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
/**
- * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
+ * drm_atomic_crtc_state_for_each_plane_state - iterate over attached planes in new state
* @plane: the loop cursor
* @plane_state: loop cursor for the plane's state, must be const
* @crtc_state: the incoming CRTC state
@@ -224,4 +237,12 @@ drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state,
return old_plane_state->crtc && !new_plane_state->crtc;
}
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index 8171dea4cc22..3f8f1d627f7c 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -26,6 +26,8 @@
#include <linux/types.h>
+struct drm_bridge;
+struct drm_bridge_state;
struct drm_crtc;
struct drm_crtc_state;
struct drm_plane;
@@ -80,3 +82,14 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
struct drm_private_state *state);
+
+void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+struct drm_bridge_state *
+drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge);
+void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+struct drm_bridge_state *
+drm_atomic_helper_bridge_reset(struct drm_bridge *bridge);
diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h
index 8cec52ad1277..4c6d39d7bdb2 100644
--- a/include/drm/drm_atomic_uapi.h
+++ b/include/drm/drm_atomic_uapi.h
@@ -49,8 +49,6 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
-void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
- struct dma_fence *fence);
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index a45f93487039..0d36bfd1a4cd 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -117,6 +117,10 @@ struct drm_audio_component {
* @audio_ops: Ops implemented by hda driver, called by DRM driver
*/
const struct drm_audio_component_audio_ops *audio_ops;
+ /**
+ * @master_bind_complete: completion held during component master binding
+ */
+ struct completion master_bind_complete;
};
#endif /* _DRM_AUDIO_COMPONENT_H_ */
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index 6bf8b2b78991..ba248ca8866f 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -58,12 +58,6 @@ struct drm_lock_data {
* @refcount: Refcount for this master object.
* @dev: Link back to the DRM device
* @driver_priv: Pointer to driver-private information.
- * @lessor: Lease holder
- * @lessee_id: id for lessees. Owners always have id 0
- * @lessee_list: other lessees of the same master
- * @lessees: drm_masters leasing from this one
- * @leases: Objects leased to this drm_master.
- * @lessee_idr: All lessees under this owner (only used where lessor == NULL)
*
* Note that master structures are only relevant for the legacy/primary device
* nodes, hence there can only be one per device, not one per drm_minor.
@@ -88,17 +82,68 @@ struct drm_master {
struct idr magic_map;
void *driver_priv;
- /* Tree of display resource leases, each of which is a drm_master struct
- * All of these get activated simultaneously, so drm_device master points
- * at the top of the tree (for which lessor is NULL). Protected by
- * &drm_device.mode_config.idr_mutex.
+ /**
+ * @lessor:
+ *
+ * Lease grantor, only set if this &struct drm_master represents a
+ * lessee holding a lease of objects from @lessor. Full owners of the
+ * device have this set to NULL.
+ *
+ * The lessor does not change once it's set in drm_lease_create(), and
+ * each lessee holds a reference to its lessor that it releases upon
+ * being destroyed in drm_lease_destroy().
+ *
+ * See also the :ref:`section on display resource leasing
+ * <drm_leasing>`.
*/
-
struct drm_master *lessor;
+
+ /**
+ * @lessee_id:
+ *
+ * ID for lessees. Owners (i.e. @lessor is NULL) always have ID 0.
+ * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ */
int lessee_id;
+
+ /**
+ * @lessee_list:
+ *
+ * List entry of lessees of @lessor, where they are linked to @lessees.
+ * Not used for owners. Protected by &drm_device.mode_config's
+ * &drm_mode_config.idr_mutex.
+ */
struct list_head lessee_list;
+
+ /**
+ * @lessees:
+ *
+ * List of drm_masters leasing from this one. Protected by
+ * &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ *
+ * This list is empty if no leases have been granted, or if all lessees
+ * have been destroyed. Since lessors are referenced by all their
+ * lessees, this master cannot be destroyed unless the list is empty.
+ */
struct list_head lessees;
+
+ /**
+ * @leases:
+ *
+ * Objects leased to this drm_master. Protected by
+ * &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ *
+ * Objects are leased all together in drm_lease_create(), and are
+ * removed all together when the lease is revoked.
+ */
struct idr leases;
+
+ /**
+ * @lessee_idr:
+ *
+ * All lessees under this owner (only used where @lessor is NULL).
+ * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ */
struct idr lessee_idr;
/* private: */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
@@ -107,6 +152,7 @@ struct drm_master {
};
struct drm_master *drm_master_get(struct drm_master *master);
+struct drm_master *drm_file_get_master(struct drm_file *file_priv);
void drm_master_put(struct drm_master **master);
bool drm_is_current_master(struct drm_file *fpriv);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 694e153a7531..6b65b0dfb4fb 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -23,15 +23,33 @@
#ifndef __DRM_BRIDGE_H__
#define __DRM_BRIDGE_H__
-#include <linux/list.h>
#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_atomic.h>
#include <drm/drm_encoder.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
struct drm_bridge;
struct drm_bridge_timings;
+struct drm_connector;
+struct drm_display_info;
struct drm_panel;
+struct edid;
+struct i2c_adapter;
+
+/**
+ * enum drm_bridge_attach_flags - Flags for &drm_bridge_funcs.attach
+ */
+enum drm_bridge_attach_flags {
+ /**
+ * @DRM_BRIDGE_ATTACH_NO_CONNECTOR: When this flag is set the bridge
+ * shall not create a drm_connector.
+ */
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR = BIT(0),
+};
/**
* struct drm_bridge_funcs - drm_bridge control functions
@@ -41,7 +59,8 @@ struct drm_bridge_funcs {
* @attach:
*
* This callback is invoked whenever our bridge is being attached to a
- * &drm_encoder.
+ * &drm_encoder. The flags argument tunes the behaviour of the attach
+ * operation (see DRM_BRIDGE_ATTACH_*).
*
* The @attach callback is optional.
*
@@ -49,7 +68,8 @@ struct drm_bridge_funcs {
*
* Zero on success, error code on failure.
*/
- int (*attach)(struct drm_bridge *bridge);
+ int (*attach)(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags);
/**
* @detach:
@@ -93,6 +113,7 @@ struct drm_bridge_funcs {
* drm_mode_status Enum
*/
enum drm_mode_status (*mode_valid)(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
const struct drm_display_mode *mode);
/**
@@ -109,7 +130,9 @@ struct drm_bridge_funcs {
* this function passes all other callbacks must succeed for this
* configuration.
*
- * The @mode_fixup callback is optional.
+ * The mode_fixup callback is optional. &drm_bridge_funcs.mode_fixup()
+ * is not called when &drm_bridge_funcs.atomic_check() is implemented,
+ * so only one of them should be provided.
*
* NOTE:
*
@@ -148,6 +171,11 @@ struct drm_bridge_funcs {
* signals) feeding it is still running when this callback is called.
*
* The @disable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_disable.
*/
void (*disable)(struct drm_bridge *bridge);
@@ -167,6 +195,11 @@ struct drm_bridge_funcs {
* called.
*
* The @post_disable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_post_disable.
*/
void (*post_disable)(struct drm_bridge *bridge);
@@ -192,9 +225,9 @@ struct drm_bridge_funcs {
*
* NOTE:
*
- * If a need arises to store and access modes adjusted for other
- * locations than the connection between the CRTC and the first bridge,
- * the DRM framework will have to be extended with DRM bridge states.
+ * This is deprecated, do not use!
+ * New drivers shall set their mode in the
+ * &drm_bridge_funcs.atomic_enable operation.
*/
void (*mode_set)(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
@@ -216,6 +249,11 @@ struct drm_bridge_funcs {
* there is one) when this callback is called.
*
* The @pre_enable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_pre_enable.
*/
void (*pre_enable)(struct drm_bridge *bridge);
@@ -236,6 +274,11 @@ struct drm_bridge_funcs {
* chain if there is one.
*
* The @enable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_enable.
*/
void (*enable)(struct drm_bridge *bridge);
@@ -263,7 +306,7 @@ struct drm_bridge_funcs {
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
/**
* @atomic_enable:
@@ -288,7 +331,7 @@ struct drm_bridge_funcs {
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
/**
* @atomic_disable:
*
@@ -311,7 +354,7 @@ struct drm_bridge_funcs {
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
/**
* @atomic_post_disable:
@@ -337,7 +380,282 @@ struct drm_bridge_funcs {
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
+
+ /**
+ * @atomic_duplicate_state:
+ *
+ * Duplicate the current bridge state object (which is guaranteed to be
+ * non-NULL).
+ *
+ * The atomic_duplicate_state hook is mandatory if the bridge
+ * implements any of the atomic hooks, and should be left unassigned
+ * otherwise. For bridges that don't subclass &drm_bridge_state, the
+ * drm_atomic_helper_bridge_duplicate_state() helper function shall be
+ * used to implement this hook.
+ *
+ * RETURNS:
+ * A valid drm_bridge_state object or NULL if the allocation fails.
+ */
+ struct drm_bridge_state *(*atomic_duplicate_state)(struct drm_bridge *bridge);
+
+ /**
+ * @atomic_destroy_state:
+ *
+ * Destroy a bridge state object previously allocated by
+ * &drm_bridge_funcs.atomic_duplicate_state().
+ *
+ * The atomic_destroy_state hook is mandatory if the bridge implements
+ * any of the atomic hooks, and should be left unassigned otherwise.
+ * For bridges that don't subclass &drm_bridge_state, the
+ * drm_atomic_helper_bridge_destroy_state() helper function shall be
+ * used to implement this hook.
+ */
+ void (*atomic_destroy_state)(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+
+ /**
+ * @atomic_get_output_bus_fmts:
+ *
+ * Return the supported bus formats on the output end of a bridge.
+ * The returned array must be allocated with kmalloc() and will be
+ * freed by the caller. If the allocation fails, NULL should be
+ * returned. num_output_fmts must be set to the returned array size.
+ * Formats listed in the returned array should be listed in decreasing
+ * preference order (the core will try all formats until it finds one
+ * that works).
+ *
+ * This method is only called on the last element of the bridge chain
+ * as part of the bus format negotiation process that happens in
+ * &drm_atomic_bridge_chain_select_bus_fmts().
+ * This method is optional. When not implemented, the core will
+ * fall back to &drm_connector.display_info.bus_formats[0] if
+ * &drm_connector.display_info.num_bus_formats > 0,
+ * or to MEDIA_BUS_FMT_FIXED otherwise.
+ */
+ u32 *(*atomic_get_output_bus_fmts)(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts);
+
+ /**
+ * @atomic_get_input_bus_fmts:
+ *
+ * Return the supported bus formats on the input end of a bridge for
+ * a specific output bus format.
+ *
+ * The returned array must be allocated with kmalloc() and will be
+ * freed by the caller. If the allocation fails, NULL should be
+ * returned. num_output_fmts must be set to the returned array size.
+ * Formats listed in the returned array should be listed in decreasing
+ * preference order (the core will try all formats until it finds one
+ * that works). When the format is not supported NULL should be
+ * returned and num_output_fmts should be set to 0.
+ *
+ * This method is called on all elements of the bridge chain as part of
+ * the bus format negotiation process that happens in
+ * drm_atomic_bridge_chain_select_bus_fmts().
+ * This method is optional. When not implemented, the core will bypass
+ * bus format negotiation on this element of the bridge without
+ * failing, and the previous element in the chain will be passed
+ * MEDIA_BUS_FMT_FIXED as its output bus format.
+ *
+ * Bridge drivers that need to support being linked to bridges that are
+ * not supporting bus format negotiation should handle the
+ * output_fmt == MEDIA_BUS_FMT_FIXED case appropriately, by selecting a
+ * sensible default value or extracting this information from somewhere
+ * else (FW property, &drm_display_mode, &drm_display_info, ...)
+ *
+ * Note: Even if input format selection on the first bridge has no
+ * impact on the negotiation process (bus format negotiation stops once
+ * we reach the first element of the chain), drivers are expected to
+ * return accurate input formats as the input format may be used to
+ * configure the CRTC output appropriately.
+ */
+ u32 *(*atomic_get_input_bus_fmts)(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
+ /**
+ * @atomic_check:
+ *
+ * This method is responsible for checking bridge state correctness.
+ * It can also check the state of the surrounding components in chain
+ * to make sure the whole pipeline can work properly.
+ *
+ * &drm_bridge_funcs.atomic_check() hooks are called in reverse
+ * order (from the last to the first bridge).
+ *
+ * This method is optional. &drm_bridge_funcs.mode_fixup() is not
+ * called when &drm_bridge_funcs.atomic_check() is implemented, so only
+ * one of them should be provided.
+ *
+ * If drivers need to tweak &drm_bridge_state.input_bus_cfg.flags or
+ * &drm_bridge_state.output_bus_cfg.flags it should happen in
+ * this function. By default the &drm_bridge_state.output_bus_cfg.flags
+ * field is set to the next bridge
+ * &drm_bridge_state.input_bus_cfg.flags value or
+ * &drm_connector.display_info.bus_flags if the bridge is the last
+ * element in the chain.
+ *
+ * RETURNS:
+ * zero if the check passed, a negative error code otherwise.
+ */
+ int (*atomic_check)(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+
+ /**
+ * @atomic_reset:
+ *
+ * Reset the bridge to a predefined state (or retrieve its current
+ * state) and return a &drm_bridge_state object matching this state.
+ * This function is called at attach time.
+ *
+ * The atomic_reset hook is mandatory if the bridge implements any of
+ * the atomic hooks, and should be left unassigned otherwise. For
+ * bridges that don't subclass &drm_bridge_state, the
+ * drm_atomic_helper_bridge_reset() helper function shall be used to
+ * implement this hook.
+ *
+ * Note that the atomic_reset() semantics is not exactly matching the
+ * reset() semantics found on other components (connector, plane, ...).
+ *
+ * 1. The reset operation happens when the bridge is attached, not when
+ * drm_mode_config_reset() is called
+ * 2. It's meant to be used exclusively on bridges that have been
+ * converted to the ATOMIC API
+ *
+ * RETURNS:
+ * A valid drm_bridge_state object in case of success, an ERR_PTR()
+ * giving the reason of the failure otherwise.
+ */
+ struct drm_bridge_state *(*atomic_reset)(struct drm_bridge *bridge);
+
+ /**
+ * @detect:
+ *
+ * Check if anything is attached to the bridge output.
+ *
+ * This callback is optional, if not implemented the bridge will be
+ * considered as always having a component attached to its output.
+ * Bridges that implement this callback shall set the
+ * DRM_BRIDGE_OP_DETECT flag in their &drm_bridge->ops.
+ *
+ * RETURNS:
+ *
+ * drm_connector_status indicating the bridge output status.
+ */
+ enum drm_connector_status (*detect)(struct drm_bridge *bridge);
+
+ /**
+ * @get_modes:
+ *
+ * Fill all modes currently valid for the sink into the &drm_connector
+ * with drm_mode_probed_add().
+ *
+ * The @get_modes callback is mostly intended to support non-probeable
+ * displays such as many fixed panels. Bridges that support reading
+ * EDID shall leave @get_modes unimplemented and implement the
+ * &drm_bridge_funcs->get_edid callback instead.
+ *
+ * This callback is optional. Bridges that implement it shall set the
+ * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops.
+ *
+ * The connector parameter shall be used for the sole purpose of
+ * filling modes, and shall not be stored internally by bridge drivers
+ * for future usage.
+ *
+ * RETURNS:
+ *
+ * The number of modes added by calling drm_mode_probed_add().
+ */
+ int (*get_modes)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @get_edid:
+ *
+ * Read and parse the EDID data of the connected display.
+ *
+ * The @get_edid callback is the preferred way of reporting mode
+ * information for a display connected to the bridge output. Bridges
+ * that support reading EDID shall implement this callback and leave
+ * the @get_modes callback unimplemented.
+ *
+ * The caller of this operation shall first verify the output
+ * connection status and refrain from reading EDID from a disconnected
+ * output.
+ *
+ * This callback is optional. Bridges that implement it shall set the
+ * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
+ *
+ * The connector parameter shall be used for the sole purpose of EDID
+ * retrieval and parsing, and shall not be stored internally by bridge
+ * drivers for future usage.
+ *
+ * RETURNS:
+ *
+ * An edid structure newly allocated with kmalloc() (or similar) on
+ * success, or NULL otherwise. The caller is responsible for freeing
+ * the returned edid structure with kfree().
+ */
+ struct edid *(*get_edid)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @hpd_notify:
+ *
+ * Notify the bridge of hot plug detection.
+ *
+ * This callback is optional, it may be implemented by bridges that
+ * need to be notified of display connection or disconnection for
+ * internal reasons. One use case is to reset the internal state of CEC
+ * controllers for HDMI bridges.
+ */
+ void (*hpd_notify)(struct drm_bridge *bridge,
+ enum drm_connector_status status);
+
+ /**
+ * @hpd_enable:
+ *
+ * Enable hot plug detection. From now on the bridge shall call
+ * drm_bridge_hpd_notify() each time a change is detected in the output
+ * connection status, until hot plug detection gets disabled with
+ * @hpd_disable.
+ *
+ * This callback is optional and shall only be implemented by bridges
+ * that support hot-plug notification without polling. Bridges that
+ * implement it shall also implement the @hpd_disable callback and set
+ * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
+ */
+ void (*hpd_enable)(struct drm_bridge *bridge);
+
+ /**
+ * @hpd_disable:
+ *
+ * Disable hot plug detection. Once this function returns the bridge
+ * shall not call drm_bridge_hpd_notify() when a change in the output
+ * connection status occurs.
+ *
+ * This callback is optional and shall only be implemented by bridges
+ * that support hot-plug notification without polling. Bridges that
+ * implement it shall also implement the @hpd_enable callback and set
+ * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
+ */
+ void (*hpd_disable)(struct drm_bridge *bridge);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows bridges to create bridge-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_bridge *bridge, struct dentry *root);
};
/**
@@ -377,9 +695,44 @@ struct drm_bridge_timings {
};
/**
+ * enum drm_bridge_ops - Bitmask of operations supported by the bridge
+ */
+enum drm_bridge_ops {
+ /**
+ * @DRM_BRIDGE_OP_DETECT: The bridge can detect displays connected to
+ * its output. Bridges that set this flag shall implement the
+ * &drm_bridge_funcs->detect callback.
+ */
+ DRM_BRIDGE_OP_DETECT = BIT(0),
+ /**
+ * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display
+ * connected to its output. Bridges that set this flag shall implement
+ * the &drm_bridge_funcs->get_edid callback.
+ */
+ DRM_BRIDGE_OP_EDID = BIT(1),
+ /**
+ * @DRM_BRIDGE_OP_HPD: The bridge can detect hot-plug and hot-unplug
+ * without requiring polling. Bridges that set this flag shall
+ * implement the &drm_bridge_funcs->hpd_enable and
+ * &drm_bridge_funcs->hpd_disable callbacks if they support enabling
+ * and disabling hot-plug detection dynamically.
+ */
+ DRM_BRIDGE_OP_HPD = BIT(2),
+ /**
+ * @DRM_BRIDGE_OP_MODES: The bridge can retrieve the modes supported
+ * by the display at its output. This does not include reading EDID
+ * which is separately covered by @DRM_BRIDGE_OP_EDID. Bridges that set
+ * this flag shall implement the &drm_bridge_funcs->get_modes callback.
+ */
+ DRM_BRIDGE_OP_MODES = BIT(3),
+};
+
+/**
* struct drm_bridge - central DRM bridge control structure
*/
struct drm_bridge {
+ /** @base: inherit from &drm_private_object */
+ struct drm_private_obj base;
/** @dev: DRM device this bridge belongs to */
struct drm_device *dev;
/** @encoder: encoder to which this bridge is connected */
@@ -402,13 +755,61 @@ struct drm_bridge {
const struct drm_bridge_funcs *funcs;
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
+ /** @ops: bitmask of operations supported by the bridge */
+ enum drm_bridge_ops ops;
+ /**
+ * @type: Type of the connection at the bridge output
+ * (DRM_MODE_CONNECTOR_*). For bridges at the end of this chain this
+ * identifies the type of connected display.
+ */
+ int type;
+ /**
+ * @interlace_allowed: Indicate that the bridge can handle interlaced
+ * modes.
+ */
+ bool interlace_allowed;
+ /**
+ * @ddc: Associated I2C adapter for DDC access, if any.
+ */
+ struct i2c_adapter *ddc;
+ /** private: */
+ /**
+ * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
+ */
+ struct mutex hpd_mutex;
+ /**
+ * @hpd_cb: Hot plug detection callback, registered with
+ * drm_bridge_hpd_enable().
+ */
+ void (*hpd_cb)(void *data, enum drm_connector_status status);
+ /**
+ * @hpd_data: Private data passed to the Hot plug detection callback
+ * @hpd_cb.
+ */
+ void *hpd_data;
};
+static inline struct drm_bridge *
+drm_priv_to_bridge(struct drm_private_obj *priv)
+{
+ return container_of(priv, struct drm_bridge, base);
+}
+
void drm_bridge_add(struct drm_bridge *bridge);
+int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
-struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
- struct drm_bridge *previous);
+ struct drm_bridge *previous,
+ enum drm_bridge_attach_flags flags);
+
+#ifdef CONFIG_OF
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+#else
+static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
+{
+ return NULL;
+}
+#endif
/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
@@ -473,6 +874,7 @@ bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
struct drm_display_mode *adjusted_mode);
enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
const struct drm_display_mode *mode);
void drm_bridge_chain_disable(struct drm_bridge *bridge);
void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
@@ -482,6 +884,9 @@ void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
void drm_bridge_chain_enable(struct drm_bridge *bridge);
+int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
@@ -491,17 +896,77 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
+enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
+int drm_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+ void (*cb)(void *data,
+ enum drm_connector_status status),
+ void *data);
+void drm_bridge_hpd_disable(struct drm_bridge *bridge);
+void drm_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status);
+
#ifdef CONFIG_DRM_PANEL_BRIDGE
+bool drm_bridge_is_panel(const struct drm_bridge *bridge);
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
u32 connector_type);
void drm_panel_bridge_remove(struct drm_bridge *bridge);
+int drm_panel_bridge_set_orientation(struct drm_connector *connector,
+ struct drm_bridge *bridge);
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_panel *panel);
struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
struct drm_panel *panel,
u32 connector_type);
+struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
+ struct drm_panel *panel);
struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
+#else
+static inline bool drm_bridge_is_panel(const struct drm_bridge *bridge)
+{
+ return false;
+}
+
+static inline int drm_panel_bridge_set_orientation(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ return -EINVAL;
+}
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE)
+struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node,
+ u32 port, u32 endpoint);
+struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, struct device_node *node,
+ u32 port, u32 endpoint);
+#else
+static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
#endif
#endif
diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h
new file mode 100644
index 000000000000..33f6c3bbdb4a
--- /dev/null
+++ b/include/drm/drm_bridge_connector.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __DRM_BRIDGE_CONNECTOR_H__
+#define __DRM_BRIDGE_CONNECTOR_H__
+
+struct drm_connector;
+struct drm_device;
+struct drm_encoder;
+
+void drm_bridge_connector_enable_hpd(struct drm_connector *connector);
+void drm_bridge_connector_disable_hpd(struct drm_connector *connector);
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+ struct drm_encoder *encoder);
+
+#endif /* __DRM_BRIDGE_CONNECTOR_H__ */
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
new file mode 100644
index 000000000000..572077ff8ae7
--- /dev/null
+++ b/include/drm/drm_buddy.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __DRM_BUDDY_H__
+#define __DRM_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include <drm/drm_print.h>
+
+#define range_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ >= max__ || size__ > max__ - start__; \
+})
+
+#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
+#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
+
+struct drm_buddy_block {
+#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
+#define DRM_BUDDY_ALLOCATED (1 << 10)
+#define DRM_BUDDY_FREE (2 << 10)
+#define DRM_BUDDY_SPLIT (3 << 10)
+/* Free to be used, if needed in the future */
+#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
+#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
+ u64 header;
+
+ struct drm_buddy_block *left;
+ struct drm_buddy_block *right;
+ struct drm_buddy_block *parent;
+
+ void *private; /* owned by creator */
+
+ /*
+ * While the block is allocated by the user through drm_buddy_alloc*,
+ * the user has ownership of the link, for example to maintain within
+ * a list, if so desired. As soon as the block is freed with
+ * drm_buddy_free* ownership is given back to the mm.
+ */
+ struct list_head link;
+ struct list_head tmp_link;
+};
+
+/* Order-zero must be at least PAGE_SIZE */
+#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
+
+/*
+ * Binary Buddy System.
+ *
+ * Locking should be handled by the user, a simple mutex around
+ * drm_buddy_alloc* and drm_buddy_free* should suffice.
+ */
+struct drm_buddy {
+ /* Maintain a free list for each order. */
+ struct list_head *free_list;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
+ * address space. This gives us a simple way of finding a buddy block
+ * and performing the potentially recursive merge step when freeing a
+ * block. Nodes are either allocated or free, in which case they will
+ * also exist on the respective free list.
+ */
+ struct drm_buddy_block **roots;
+
+ /*
+ * Anything from here is public, and remains static for the lifetime of
+ * the mm. Everything above is considered do-not-touch.
+ */
+ unsigned int n_roots;
+ unsigned int max_order;
+
+ /* Must be at least PAGE_SIZE */
+ u64 chunk_size;
+ u64 size;
+ u64 avail;
+};
+
+static inline u64
+drm_buddy_block_offset(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+drm_buddy_block_order(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_ORDER;
+}
+
+static inline unsigned int
+drm_buddy_block_state(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_STATE;
+}
+
+static inline bool
+drm_buddy_block_is_allocated(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED;
+}
+
+static inline bool
+drm_buddy_block_is_free(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_FREE;
+}
+
+static inline bool
+drm_buddy_block_is_split(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT;
+}
+
+static inline u64
+drm_buddy_block_size(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ return mm->chunk_size << drm_buddy_block_order(block);
+}
+
+int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size);
+
+void drm_buddy_fini(struct drm_buddy *mm);
+
+struct drm_buddy_block *
+drm_get_buddy(struct drm_buddy_block *block);
+
+int drm_buddy_alloc_blocks(struct drm_buddy *mm,
+ u64 start, u64 end, u64 size,
+ u64 min_page_size,
+ struct list_head *blocks,
+ unsigned long flags);
+
+int drm_buddy_block_trim(struct drm_buddy *mm,
+ u64 new_size,
+ struct list_head *blocks);
+
+void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
+
+void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects);
+
+void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
+void drm_buddy_block_print(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ struct drm_printer *p);
+
+#endif
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index e9ad4863d915..08e0e3ffad13 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,6 +35,8 @@
#include <linux/scatterlist.h>
+struct iosys_map;
+
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
@@ -65,9 +67,22 @@ static inline bool drm_arch_can_wc_memory(void)
* optimization entirely for ARM and arm64.
*/
return false;
+#elif defined(CONFIG_LOONGARCH)
+ /*
+ * LoongArch maintains cache coherency in hardware, but its WUC attribute
+ * (Weak-ordered UnCached, which is similar to WC) is out of the scope of
+ * cache coherency machanism. This means WUC can only used for write-only
+ * memory regions.
+ */
+ return false;
#else
return true;
#endif
}
+void drm_memcpy_init_early(void);
+
+void drm_memcpy_from_wc(struct iosys_map *dst,
+ const struct iosys_map *src,
+ unsigned long len);
#endif
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 5cf2c5dd8b1e..4fc8018eddda 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -1,8 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
#ifndef _DRM_CLIENT_H_
#define _DRM_CLIENT_H_
+#include <linux/iosys-map.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/types.h>
@@ -44,6 +45,11 @@ struct drm_client_funcs {
* returns zero gets the privilege to restore and no more clients are
* called. This callback is not called after @unregister has been called.
*
+ * Note that the core does not guarantee exclusion against concurrent
+ * drm_open(). Clients need to ensure this themselves, for example by
+ * using drm_master_internal_acquire() and
+ * drm_master_internal_release().
+ *
* This callback is optional.
*/
int (*restore)(struct drm_client_dev *client);
@@ -136,9 +142,9 @@ struct drm_client_buffer {
struct drm_gem_object *gem;
/**
- * @vaddr: Virtual address for the buffer
+ * @map: Virtual address for the buffer
*/
- void *vaddr;
+ struct iosys_map map;
/**
* @fb: DRM framebuffer
@@ -149,14 +155,17 @@ struct drm_client_buffer {
struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
-void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
+int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
+int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
+ struct iosys_map *map);
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
int drm_client_modeset_create(struct drm_client_dev *client);
void drm_client_modeset_free(struct drm_client_dev *client);
int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height);
bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation);
-int drm_client_modeset_commit_force(struct drm_client_dev *client);
+int drm_client_modeset_check(struct drm_client_dev *client);
+int drm_client_modeset_commit_locked(struct drm_client_dev *client);
int drm_client_modeset_commit(struct drm_client_dev *client);
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
@@ -183,6 +192,6 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
drm_for_each_connector_iter(connector, iter) \
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
-int drm_client_debugfs_init(struct drm_minor *minor);
+void drm_client_debugfs_init(struct drm_minor *minor);
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 221910948b37..56aee949c6fa 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -27,6 +27,7 @@
#include <linux/llist.h>
#include <linux/ctype.h>
#include <linux/hdmi.h>
+#include <linux/notifier.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_util.h>
@@ -37,9 +38,11 @@ struct drm_modeset_acquire_ctx;
struct drm_device;
struct drm_crtc;
struct drm_encoder;
+struct drm_panel;
struct drm_property;
struct drm_property_blob;
struct drm_printer;
+struct drm_privacy_screen;
struct edid;
struct i2c_adapter;
@@ -84,7 +87,7 @@ enum drm_connector_status {
};
/**
- * enum drm_connector_registration_status - userspace registration status for
+ * enum drm_connector_registration_state - userspace registration status for
* a &drm_connector
*
* This enum is used to track the status of initializing a connector and
@@ -175,6 +178,46 @@ struct drm_scdc {
struct drm_scrambling scrambling;
};
+/**
+ * struct drm_hdmi_dsc_cap - DSC capabilities of HDMI sink
+ *
+ * Describes the DSC support provided by HDMI 2.1 sink.
+ * The information is fetched fom additional HFVSDB blocks defined
+ * for HDMI 2.1.
+ */
+struct drm_hdmi_dsc_cap {
+ /** @v_1p2: flag for dsc1.2 version support by sink */
+ bool v_1p2;
+
+ /** @native_420: Does sink support DSC with 4:2:0 compression */
+ bool native_420;
+
+ /**
+ * @all_bpp: Does sink support all bpp with 4:4:4: or 4:2:2
+ * compressed formats
+ */
+ bool all_bpp;
+
+ /**
+ * @bpc_supported: compressed bpc supported by sink : 10, 12 or 16 bpc
+ */
+ u8 bpc_supported;
+
+ /** @max_slices: maximum number of Horizontal slices supported by */
+ u8 max_slices;
+
+ /** @clk_per_slice : max pixel clock in MHz supported per slice */
+ int clk_per_slice;
+
+ /** @max_lanes : dsc max lanes supported for Fixed rate Link training */
+ u8 max_lanes;
+
+ /** @max_frl_rate_per_lane : maximum frl rate with DSC per lane */
+ u8 max_frl_rate_per_lane;
+
+ /** @total_chunk_kbytes: max size of chunks in KBs supported per line*/
+ u8 total_chunk_kbytes;
+};
/**
* struct drm_hdmi_info - runtime information about the connected HDMI sink
@@ -207,6 +250,15 @@ struct drm_hdmi_info {
/** @y420_dc_modes: bitmap of deep color support index */
u8 y420_dc_modes;
+
+ /** @max_frl_rate_per_lane: support fixed rate link */
+ u8 max_frl_rate_per_lane;
+
+ /** @max_lanes: supported by sink */
+ u8 max_lanes;
+
+ /** @dsc_cap: DSC capabilities of the sink */
+ struct drm_hdmi_dsc_cap dsc_cap;
};
/**
@@ -254,6 +306,63 @@ enum drm_panel_orientation {
DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+/**
+ * struct drm_monitor_range_info - Panel's Monitor range in EDID for
+ * &drm_display_info
+ *
+ * This struct is used to store a frequency range supported by panel
+ * as parsed from EDID's detailed monitor range descriptor block.
+ *
+ * @min_vfreq: This is the min supported refresh rate in Hz from
+ * EDID's detailed monitor range.
+ * @max_vfreq: This is the max supported refresh rate in Hz from
+ * EDID's detailed monitor range
+ */
+struct drm_monitor_range_info {
+ u16 min_vfreq;
+ u16 max_vfreq;
+};
+
+/**
+ * struct drm_luminance_range_info - Panel's luminance range for
+ * &drm_display_info. Calculated using data in EDID
+ *
+ * This struct is used to store a luminance range supported by panel
+ * as calculated using data from EDID's static hdr metadata.
+ *
+ * @min_luminance: This is the min supported luminance value
+ *
+ * @max_luminance: This is the max supported luminance value
+ */
+struct drm_luminance_range_info {
+ u32 min_luminance;
+ u32 max_luminance;
+};
+
+/**
+ * enum drm_privacy_screen_status - privacy screen status
+ *
+ * This enum is used to track and control the state of the integrated privacy
+ * screen present on some display panels, via the "privacy-screen sw-state"
+ * and "privacy-screen hw-state" properties. Note the _LOCKED enum values
+ * are only valid for the "privacy-screen hw-state" property.
+ *
+ * @PRIVACY_SCREEN_DISABLED:
+ * The privacy-screen on the panel is disabled
+ * @PRIVACY_SCREEN_ENABLED:
+ * The privacy-screen on the panel is enabled
+ * @PRIVACY_SCREEN_DISABLED_LOCKED:
+ * The privacy-screen on the panel is disabled and locked (cannot be changed)
+ * @PRIVACY_SCREEN_ENABLED_LOCKED:
+ * The privacy-screen on the panel is enabled and locked (cannot be changed)
+ */
+enum drm_privacy_screen_status {
+ PRIVACY_SCREEN_DISABLED = 0,
+ PRIVACY_SCREEN_ENABLED,
+ PRIVACY_SCREEN_DISABLED_LOCKED,
+ PRIVACY_SCREEN_ENABLED_LOCKED,
+};
+
/*
* This is a consolidated colorimetry list supported by HDMI and
* DP protocol standard. The respective connectors will register
@@ -303,51 +412,97 @@ enum drm_panel_orientation {
* opposite edge of the driving edge. Transmitters and receivers may however
* need to take other signal timings into account to convert between driving
* and sample edges.
- *
- * @DRM_BUS_FLAG_DE_LOW: The Data Enable signal is active low
- * @DRM_BUS_FLAG_DE_HIGH: The Data Enable signal is active high
- * @DRM_BUS_FLAG_PIXDATA_POSEDGE: Legacy value, do not use
- * @DRM_BUS_FLAG_PIXDATA_NEGEDGE: Legacy value, do not use
- * @DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE: Data is driven on the rising edge of
- * the pixel clock
- * @DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE: Data is driven on the falling edge of
- * the pixel clock
- * @DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE: Data is sampled on the rising edge of
- * the pixel clock
- * @DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE: Data is sampled on the falling edge of
- * the pixel clock
- * @DRM_BUS_FLAG_DATA_MSB_TO_LSB: Data is transmitted MSB to LSB on the bus
- * @DRM_BUS_FLAG_DATA_LSB_TO_MSB: Data is transmitted LSB to MSB on the bus
- * @DRM_BUS_FLAG_SYNC_POSEDGE: Legacy value, do not use
- * @DRM_BUS_FLAG_SYNC_NEGEDGE: Legacy value, do not use
- * @DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE: Sync signals are driven on the rising
- * edge of the pixel clock
- * @DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE: Sync signals are driven on the falling
- * edge of the pixel clock
- * @DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE: Sync signals are sampled on the rising
- * edge of the pixel clock
- * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE: Sync signals are sampled on the falling
- * edge of the pixel clock
- * @DRM_BUS_FLAG_SHARP_SIGNALS: Set if the Sharp-specific signals
- * (SPL, CLS, PS, REV) must be used
*/
enum drm_bus_flags {
+ /**
+ * @DRM_BUS_FLAG_DE_LOW:
+ *
+ * The Data Enable signal is active low
+ */
DRM_BUS_FLAG_DE_LOW = BIT(0),
+
+ /**
+ * @DRM_BUS_FLAG_DE_HIGH:
+ *
+ * The Data Enable signal is active high
+ */
DRM_BUS_FLAG_DE_HIGH = BIT(1),
- DRM_BUS_FLAG_PIXDATA_POSEDGE = BIT(2),
- DRM_BUS_FLAG_PIXDATA_NEGEDGE = BIT(3),
- DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE = DRM_BUS_FLAG_PIXDATA_POSEDGE,
- DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
- DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
- DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE:
+ *
+ * Data is driven on the rising edge of the pixel clock
+ */
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE = BIT(2),
+
+ /**
+ * @DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE:
+ *
+ * Data is driven on the falling edge of the pixel clock
+ */
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE = BIT(3),
+
+ /**
+ * @DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE:
+ *
+ * Data is sampled on the rising edge of the pixel clock
+ */
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE:
+ *
+ * Data is sampled on the falling edge of the pixel clock
+ */
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_DATA_MSB_TO_LSB:
+ *
+ * Data is transmitted MSB to LSB on the bus
+ */
DRM_BUS_FLAG_DATA_MSB_TO_LSB = BIT(4),
+
+ /**
+ * @DRM_BUS_FLAG_DATA_LSB_TO_MSB:
+ *
+ * Data is transmitted LSB to MSB on the bus
+ */
DRM_BUS_FLAG_DATA_LSB_TO_MSB = BIT(5),
- DRM_BUS_FLAG_SYNC_POSEDGE = BIT(6),
- DRM_BUS_FLAG_SYNC_NEGEDGE = BIT(7),
- DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE = DRM_BUS_FLAG_SYNC_POSEDGE,
- DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE,
- DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE,
- DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_POSEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE:
+ *
+ * Sync signals are driven on the rising edge of the pixel clock
+ */
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE = BIT(6),
+
+ /**
+ * @DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE:
+ *
+ * Sync signals are driven on the falling edge of the pixel clock
+ */
+ DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = BIT(7),
+
+ /**
+ * @DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE:
+ *
+ * Sync signals are sampled on the rising edge of the pixel clock
+ */
+ DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE:
+ *
+ * Sync signals are sampled on the falling edge of the pixel clock
+ */
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_SHARP_SIGNALS:
+ *
+ * Set if the Sharp-specific signals (SPL, CLS, PS, REV) must be used
+ */
DRM_BUS_FLAG_SHARP_SIGNALS = BIT(8),
};
@@ -384,9 +539,9 @@ struct drm_display_info {
enum subpixel_order subpixel_order;
#define DRM_COLOR_FORMAT_RGB444 (1<<0)
-#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
-#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
-#define DRM_COLOR_FORMAT_YCRCB420 (1<<3)
+#define DRM_COLOR_FORMAT_YCBCR444 (1<<1)
+#define DRM_COLOR_FORMAT_YCBCR422 (1<<2)
+#define DRM_COLOR_FORMAT_YCBCR420 (1<<3)
/**
* @panel_orientation: Read only connector property for built-in panels,
@@ -435,6 +590,14 @@ struct drm_display_info {
bool dvi_dual;
/**
+ * @is_hdmi: True if the sink is an HDMI device.
+ *
+ * This field shall be used instead of calling
+ * drm_detect_hdmi_monitor() when possible.
+ */
+ bool is_hdmi;
+
+ /**
* @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
*/
bool has_hdmi_infoframe;
@@ -446,10 +609,16 @@ struct drm_display_info {
bool rgb_quant_range_selectable;
/**
- * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
- * more stuff redundant with @bus_formats.
+ * @edid_hdmi_rgb444_dc_modes: Mask of supported hdmi deep color modes
+ * in RGB 4:4:4. Even more stuff redundant with @bus_formats.
*/
- u8 edid_hdmi_dc_modes;
+ u8 edid_hdmi_rgb444_dc_modes;
+
+ /**
+ * @edid_hdmi_ycbcr444_dc_modes: Mask of supported hdmi deep color
+ * modes in YCbCr 4:4:4. Even more stuff redundant with @bus_formats.
+ */
+ u8 edid_hdmi_ycbcr444_dc_modes;
/**
* @cea_rev: CEA revision of the HDMI sink.
@@ -465,6 +634,28 @@ struct drm_display_info {
* @non_desktop: Non desktop display (HMD).
*/
bool non_desktop;
+
+ /**
+ * @monitor_range: Frequency range supported by monitor range descriptor
+ */
+ struct drm_monitor_range_info monitor_range;
+
+ /**
+ * @luminance_range: Luminance range supported by panel
+ */
+ struct drm_luminance_range_info luminance_range;
+
+ /**
+ * @mso_stream_count: eDP Multi-SST Operation (MSO) stream count from
+ * the DisplayID VESA vendor block. 0 for conventional Single-Stream
+ * Transport (SST), or 2 or 4 MSO streams.
+ */
+ u8 mso_stream_count;
+
+ /**
+ * @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels.
+ */
+ u8 mso_pixel_overlap;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -657,6 +848,12 @@ struct drm_connector_state {
u8 max_bpc;
/**
+ * @privacy_screen_sw_state: See :ref:`Standard Connector
+ * Properties<standard_connector_properties>`
+ */
+ enum drm_privacy_screen_status privacy_screen_sw_state;
+
+ /**
* @hdr_output_metadata:
* DRM blob property for HDR output metadata
*/
@@ -723,6 +920,11 @@ struct drm_connector_funcs {
* locks to avoid races with concurrent modeset changes need to use
* &drm_connector_helper_funcs.detect_ctx instead.
*
+ * Also note that this callback can be called no matter the
+ * state the connector is in. Drivers that need the underlying
+ * device to be powered to perform the detection will first need
+ * to make sure it's been properly enabled.
+ *
* RETURNS:
*
* drm_connector_status indicating the connector's status.
@@ -954,6 +1156,21 @@ struct drm_connector_funcs {
*/
void (*atomic_print_state)(struct drm_printer *p,
const struct drm_connector_state *state);
+
+ /**
+ * @oob_hotplug_event:
+ *
+ * This will get called when a hotplug-event for a drm-connector
+ * has been received from a source outside the display driver / device.
+ */
+ void (*oob_hotplug_event)(struct drm_connector *connector);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows connectors to create connector-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_connector *connector, struct dentry *root);
};
/**
@@ -1098,6 +1315,14 @@ struct drm_connector {
struct device *kdev;
/** @attr: sysfs attributes */
struct device_attribute *attr;
+ /**
+ * @fwnode: associated fwnode supplied by platform firmware
+ *
+ * Drivers can set this to associate a fwnode with a connector, drivers
+ * are expected to get a reference on the fwnode when setting this.
+ * drm_connector_cleanup() will call fwnode_handle_put() on this.
+ */
+ struct fwnode_handle *fwnode;
/**
* @head:
@@ -1109,6 +1334,14 @@ struct drm_connector {
*/
struct list_head head;
+ /**
+ * @global_connector_list_entry:
+ *
+ * Connector entry in the global connector-list, used by
+ * drm_connector_find_by_fwnode().
+ */
+ struct list_head global_connector_list_entry;
+
/** @base: base KMS object */
struct drm_mode_object base;
@@ -1255,6 +1488,24 @@ struct drm_connector {
*/
struct drm_property *max_bpc_property;
+ /** @privacy_screen: drm_privacy_screen for this connector, or NULL. */
+ struct drm_privacy_screen *privacy_screen;
+
+ /** @privacy_screen_notifier: privacy-screen notifier_block */
+ struct notifier_block privacy_screen_notifier;
+
+ /**
+ * @privacy_screen_sw_state_property: Optional atomic property for the
+ * connector to control the integrated privacy screen.
+ */
+ struct drm_property *privacy_screen_sw_state_property;
+
+ /**
+ * @privacy_screen_hw_state_property: Optional atomic property for the
+ * connector to report the actual integrated privacy screen state.
+ */
+ struct drm_property *privacy_screen_hw_state_property;
+
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1297,8 +1548,14 @@ struct drm_connector {
struct drm_cmdline_mode cmdline_mode;
/** @force: a DRM_FORCE_<foo> state for forced mode sets */
enum drm_connector_force force;
- /** @override_edid: has the EDID been overwritten through debugfs for testing? */
+ /**
+ * @override_edid: has the EDID been overwritten through debugfs for
+ * testing? Do not modify outside of drm_edid_override_set() and
+ * drm_edid_override_reset().
+ */
bool override_edid;
+ /** @epoch_counter: used to detect any other changes in connector, besides status */
+ u64 epoch_counter;
/**
* @possible_encoders: Bit mask of encoders that can drive this
@@ -1357,6 +1614,12 @@ struct drm_connector {
* rev1.1 4.2.2.6
*/
bool edid_corrupt;
+ /**
+ * @real_edid_checksum: real edid checksum for corrupted edid block.
+ * Required in Displayport 1.4 compliance testing
+ * rev1.1 4.2.2.6
+ */
+ u8 real_edid_checksum;
/** @debugfs_entry: debugfs directory for this connector */
struct dentry *debugfs_entry;
@@ -1435,6 +1698,11 @@ int drm_connector_init_with_ddc(struct drm_device *dev,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc);
+int drmm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc);
void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
@@ -1512,6 +1780,8 @@ drm_connector_is_unregistered(struct drm_connector *connector)
DRM_CONNECTOR_UNREGISTERED;
}
+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode);
+const char *drm_get_connector_type_name(unsigned int connector_type);
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
const char *drm_get_dpms_name(int val);
@@ -1519,10 +1789,13 @@ const char *drm_get_dvi_i_subconnector_name(int val);
const char *drm_get_dvi_i_select_name(int val);
const char *drm_get_tv_subconnector_name(int val);
const char *drm_get_tv_select_name(int val);
+const char *drm_get_dp_subconnector_name(int val);
const char *drm_get_content_protection_name(int val);
const char *drm_get_hdcp_content_type_name(int val);
int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector);
+
int drm_mode_create_tv_margin_properties(struct drm_device *dev);
int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes,
@@ -1534,13 +1807,14 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
+int drm_connector_attach_colorspace_property(struct drm_connector *connector);
+int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector);
+bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector);
int drm_mode_create_dp_colorspace_property(struct drm_connector *connector);
int drm_mode_create_content_type_property(struct drm_device *dev);
-void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
int drm_connector_set_path_property(struct drm_connector *connector,
@@ -1552,10 +1826,23 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
uint64_t link_status);
void drm_connector_set_vrr_capable_property(
struct drm_connector *connector, bool capable);
-int drm_connector_init_panel_orientation_property(
- struct drm_connector *connector, int width, int height);
+int drm_connector_set_panel_orientation(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation);
+int drm_connector_set_panel_orientation_with_quirk(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation,
+ int width, int height);
+int drm_connector_set_orientation_from_panel(
+ struct drm_connector *connector,
+ struct drm_panel *panel);
int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
int min, int max);
+void drm_connector_create_privacy_screen_properties(struct drm_connector *conn);
+void drm_connector_attach_privacy_screen_properties(struct drm_connector *conn);
+void drm_connector_attach_privacy_screen_provider(
+ struct drm_connector *connector, struct drm_privacy_screen *priv);
+void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state);
/**
* struct drm_tile_group - Tile group metadata
@@ -1575,9 +1862,9 @@ struct drm_tile_group {
};
struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8]);
+ const char topology[8]);
struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8]);
+ const char topology[8]);
void drm_mode_put_tile_group(struct drm_device *dev,
struct drm_tile_group *tg);
@@ -1589,6 +1876,11 @@ void drm_mode_put_tile_group(struct drm_device *dev,
* drm_connector_list_iter_begin(), drm_connector_list_iter_end() and
* drm_connector_list_iter_next() respectively the convenience macro
* drm_for_each_connector_iter().
+ *
+ * Note that the return value of drm_connector_list_iter_next() is only valid
+ * up to the next drm_connector_list_iter_next() or
+ * drm_connector_list_iter_end() call. If you want to use the connector later,
+ * then you need to grab your own reference first using drm_connector_get().
*/
struct drm_connector_list_iter {
/* private: */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 5e9b15a0e8c5..8e1cbc75143e 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -25,37 +25,24 @@
#ifndef __DRM_CRTC_H__
#define __DRM_CRTC_H__
-#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/fb.h>
-#include <linux/hdmi.h>
-#include <linux/media-bus-format.h>
-#include <uapi/drm/drm_mode.h>
-#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
-#include <drm/drm_rect.h>
#include <drm/drm_mode_object.h>
-#include <drm/drm_framebuffer.h>
#include <drm/drm_modes.h>
-#include <drm/drm_connector.h>
#include <drm/drm_device.h>
-#include <drm/drm_property.h>
-#include <drm/drm_edid.h>
#include <drm/drm_plane.h>
-#include <drm/drm_blend.h>
-#include <drm/drm_color_mgmt.h>
#include <drm/drm_debugfs_crc.h>
#include <drm/drm_mode_config.h>
+struct drm_connector;
struct drm_device;
+struct drm_framebuffer;
struct drm_mode_set;
struct drm_file;
-struct drm_clip_rect;
struct drm_printer;
struct drm_self_refresh_data;
struct device_node;
-struct dma_fence;
struct edid;
static inline int64_t U642I64(uint64_t val)
@@ -174,12 +161,25 @@ struct drm_crtc_state {
* @no_vblank:
*
* Reflects the ability of a CRTC to send VBLANK events. This state
- * usually depends on the pipeline configuration, and the main usuage
- * is CRTCs feeding a writeback connector operating in oneshot mode.
- * In this case the VBLANK event is only generated when a job is queued
- * to the writeback connector, and we want the core to fake VBLANK
- * events when this part of the pipeline hasn't changed but others had
- * or when the CRTC and connectors are being disabled.
+ * usually depends on the pipeline configuration. If set to true, DRM
+ * atomic helpers will send out a fake VBLANK event during display
+ * updates after all hardware changes have been committed. This is
+ * implemented in drm_atomic_helper_fake_vblank().
+ *
+ * One usage is for drivers and/or hardware without support for VBLANK
+ * interrupts. Such drivers typically do not initialize vblanking
+ * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs
+ * without initialized vblanking, this field is set to true in
+ * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be
+ * send out on each update of the display pipeline by
+ * drm_atomic_helper_fake_vblank().
+ *
+ * Another usage is CRTCs feeding a writeback connector operating in
+ * oneshot mode. In this case the fake VBLANK event is only generated
+ * when a job is queued to the writeback connector, and we want the
+ * core to fake VBLANK events when this part of the pipeline hasn't
+ * changed but others had or when the CRTC and connectors are being
+ * disabled.
*
* __drm_atomic_helper_crtc_duplicate_state() will not reset the value
* from the current state, the CRTC driver is then responsible for
@@ -272,6 +272,10 @@ struct drm_crtc_state {
* Lookup table for converting pixel data after the color conversion
* matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not
* NULL) is an array of &struct drm_color_lut.
+ *
+ * Note that for mostly historical reasons stemming from Xorg heritage,
+ * this is also used to store the color map (also sometimes color lut,
+ * CLUT or color palette) for indexed formats like DRM_FORMAT_C8.
*/
struct drm_property_blob *gamma_lut;
@@ -312,6 +316,13 @@ struct drm_crtc_state {
bool self_refresh_active;
/**
+ * @scaling_filter:
+ *
+ * Scaling filter to be applied
+ */
+ enum drm_scaling_filter scaling_filter;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -335,7 +346,14 @@ struct drm_crtc_state {
* - Events for disabled CRTCs are not allowed, and drivers can ignore
* that case.
*
- * This can be handled by the drm_crtc_send_vblank_event() function,
+ * For very simple hardware without VBLANK interrupt, enabling
+ * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers
+ * send a fake VBLANK event at the end of the display update after all
+ * hardware changes have been applied. See
+ * drm_atomic_helper_fake_vblank().
+ *
+ * For more complex hardware this
+ * can be handled by the drm_crtc_send_vblank_event() function,
* which the driver should call on the provided event upon completion of
* the atomic commit. Note that if the driver supports vblank signalling
* and timestamping the vblank counters and timestamps must agree with
@@ -867,6 +885,47 @@ struct drm_crtc_funcs {
* new drivers as the replacement of &drm_driver.disable_vblank hook.
*/
void (*disable_vblank)(struct drm_crtc *crtc);
+
+ /**
+ * @get_vblank_timestamp:
+ *
+ * Called by drm_get_last_vbltimestamp(). Should return a precise
+ * timestamp when the most recent vblank interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of vblank will start scanning out,
+ * the time immediately after end of the vblank interval. If the
+ * @crtc is currently inside vblank, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * Parameters:
+ *
+ * crtc:
+ * CRTC for which timestamp should be returned.
+ * max_error:
+ * Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * vblank_time:
+ * Target location for returned vblank timestamp.
+ * in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq quirks
+ * if flag is set.
+ *
+ * Returns:
+ *
+ * True on success, false on failure, which means the core should
+ * fallback to a simple timestamp taken in drm_crtc_handle_vblank().
+ */
+ bool (*get_vblank_timestamp)(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
};
/**
@@ -974,11 +1033,12 @@ struct drm_crtc {
* Programmed mode in hw, after adjustments for encoders, crtc, panel
* scaling etc. Should only be used by legacy drivers, for high
* precision vblank timestamps in
- * drm_calc_vbltimestamp_from_scanoutpos().
+ * drm_crtc_vblank_helper_get_vblank_timestamp().
*
* Note that atomic drivers should not use this, but instead use
* &drm_crtc_state.adjusted_mode. And for high-precision timestamps
- * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode,
+ * drm_crtc_vblank_helper_get_vblank_timestamp() used
+ * &drm_vblank_crtc.hwmode,
* which is filled out by calling drm_calc_timestamping_constants().
*/
struct drm_display_mode hwmode;
@@ -1006,12 +1066,18 @@ struct drm_crtc {
/**
* @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
* by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint32_t gamma_size;
/**
* @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
* GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint16_t *gamma_store;
@@ -1022,6 +1088,12 @@ struct drm_crtc {
struct drm_object_properties properties;
/**
+ * @scaling_filter_property: property to apply a particular filter while
+ * scaling.
+ */
+ struct drm_property *scaling_filter_property;
+
+ /**
* @state:
*
* Current atomic state for this CRTC.
@@ -1060,14 +1132,12 @@ struct drm_crtc {
*/
spinlock_t commit_lock;
-#ifdef CONFIG_DEBUG_FS
/**
* @debugfs_entry:
*
* Debugfs directory for this CRTC.
*/
struct dentry *debugfs_entry;
-#endif
/**
* @crc:
@@ -1146,8 +1216,50 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
+
+__printf(6, 7)
+int drmm_crtc_init_with_planes(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...);
+
void drm_crtc_cleanup(struct drm_crtc *crtc);
+__printf(7, 8)
+void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
+ size_t size, size_t offset,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...);
+
+/**
+ * drmm_crtc_alloc_with_planes - Allocate and initialize a new CRTC object with
+ * specified primary and cursor planes.
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_crtc
+ * @member: the name of the &drm_crtc within @type.
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Allocates and initializes a new crtc object. Cleanup is automatically
+ * handled through registering drmm_crtc_cleanup() with drmm_add_action().
+ *
+ * The @drm_crtc_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new crtc, or ERR_PTR on failure.
+ */
+#define drmm_crtc_alloc_with_planes(dev, type, member, primary, cursor, funcs, name, ...) \
+ ((type *)__drmm_crtc_alloc_with_planes(dev, sizeof(type), \
+ offsetof(type, member), \
+ primary, cursor, funcs, \
+ name, ##__VA_ARGS__))
+
/**
* drm_crtc_index - find the index of a registered CRTC
* @crtc: CRTC to find index for
@@ -1204,4 +1316,17 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
#define drm_for_each_crtc(crtc, dev) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+/**
+ * drm_for_each_crtc_reverse - iterate over all CRTCs in reverse order
+ * @crtc: a &struct drm_crtc as the loop cursor
+ * @dev: the &struct drm_device
+ *
+ * Iterate over all CRTCs of @dev.
+ */
+#define drm_for_each_crtc_reverse(crtc, dev) \
+ list_for_each_entry_reverse(crtc, &(dev)->mode_config.crtc_list, head)
+
+int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
+ unsigned int supported_filters);
+
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
index 40c34a5bf149..effda42cce31 100644
--- a/include/drm/drm_damage_helper.h
+++ b/include/drm/drm_damage_helper.h
@@ -64,7 +64,6 @@ struct drm_atomic_helper_damage_iter {
bool full_update;
};
-void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
struct drm_plane_state *plane_state);
int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
@@ -82,21 +81,4 @@ bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
struct drm_plane_state *state,
struct drm_rect *rect);
-/**
- * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect.
- * @state: Plane state.
- *
- * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect
- * can be obtained by simply typecasting &drm_mode_rect. This is because both
- * are signed 32 and during drm_atomic_check_only() it is verified that damage
- * clips are inside fb.
- *
- * Return: Clips in plane fb_damage_clips blob property.
- */
-static inline struct drm_rect *
-drm_helper_get_plane_damage_clips(const struct drm_plane_state *state)
-{
- return (struct drm_rect *)drm_plane_get_damage_clips(state);
-}
-
#endif
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index 7501e323d383..2188dc83957f 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -80,18 +80,16 @@ struct drm_info_node {
};
#if defined(CONFIG_DEBUG_FS)
-int drm_debugfs_create_files(const struct drm_info_list *files,
- int count, struct dentry *root,
- struct drm_minor *minor);
+void drm_debugfs_create_files(const struct drm_info_list *files,
+ int count, struct dentry *root,
+ struct drm_minor *minor);
int drm_debugfs_remove_files(const struct drm_info_list *files,
int count, struct drm_minor *minor);
#else
-static inline int drm_debugfs_create_files(const struct drm_info_list *files,
- int count, struct dentry *root,
- struct drm_minor *minor)
-{
- return 0;
-}
+static inline void drm_debugfs_create_files(const struct drm_info_list *files,
+ int count, struct dentry *root,
+ struct drm_minor *minor)
+{}
static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
int count, struct drm_minor *minor)
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 1acfc3bbd3fb..9923c7a6885e 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -6,16 +6,13 @@
#include <linux/mutex.h>
#include <linux/idr.h>
-#include <drm/drm_hashtab.h>
+#include <drm/drm_legacy.h>
#include <drm/drm_mode_config.h>
struct drm_driver;
struct drm_minor;
struct drm_master;
-struct drm_device_dma;
struct drm_vblank_crtc;
-struct drm_sg_mem;
-struct drm_local_map;
struct drm_vma_offset_manager;
struct drm_vram_mm;
struct drm_fb_helper;
@@ -27,7 +24,7 @@ struct pci_controller;
/**
- * enum drm_switch_power - power state of drm device
+ * enum switch_power_state - power state of drm device
*/
enum switch_power_state {
@@ -51,13 +48,6 @@ enum switch_power_state {
* may contain multiple heads.
*/
struct drm_device {
- /**
- * @legacy_dev_list:
- *
- * List of devices per driver for stealth attach cleanup
- */
- struct list_head legacy_dev_list;
-
/** @if_version: Highest interface version set */
int if_version;
@@ -67,15 +57,33 @@ struct drm_device {
/** @dev: Device structure of bus-device */
struct device *dev;
+ /**
+ * @managed:
+ *
+ * Managed resources linked to the lifetime of this &drm_device as
+ * tracked by @ref.
+ */
+ struct {
+ /** @managed.resources: managed resources list */
+ struct list_head resources;
+ /** @managed.final_kfree: pointer for final kfree() call */
+ void *final_kfree;
+ /** @managed.lock: protects @managed.resources */
+ spinlock_t lock;
+ } managed;
+
/** @driver: DRM driver managing the device */
- struct drm_driver *driver;
+ const struct drm_driver *driver;
/**
* @dev_private:
*
- * DRM driver private data. Instead of using this pointer it is
- * recommended that drivers use drm_dev_init() and embed struct
- * &drm_device in their larger per-device structure.
+ * DRM driver private data. This is deprecated and should be left set to
+ * NULL.
+ *
+ * Instead of using this pointer it is recommended that drivers use
+ * devm_drm_dev_alloc() and embed struct &drm_device in their larger
+ * per-device structure.
*/
void *dev_private;
@@ -128,6 +136,9 @@ struct drm_device {
* @struct_mutex:
*
* Lock for others (not &drm_minor.master and &drm_file.is_master)
+ *
+ * WARNING:
+ * Only drivers annotated with DRIVER_LEGACY should be using this.
*/
struct mutex struct_mutex;
@@ -144,7 +155,7 @@ struct drm_device {
* Usage counter for outstanding files open,
* protected by drm_global_mutex
*/
- int open_count;
+ atomic_t open_count;
/** @filelist_mutex: Protects @filelist. */
struct mutex filelist_mutex;
@@ -178,20 +189,6 @@ struct drm_device {
struct list_head clientlist;
/**
- * @irq_enabled:
- *
- * Indicates that interrupt handling is enabled, specifically vblank
- * handling. Drivers which don't use drm_irq_install() need to set this
- * to true manually.
- */
- bool irq_enabled;
-
- /**
- * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers.
- */
- int irq;
-
- /**
* @vblank_disable_immediate:
*
* If true, vblank interrupt will be disabled immediately when the
@@ -262,16 +259,6 @@ struct drm_device {
*/
spinlock_t event_lock;
- /** @agp: AGP data */
- struct drm_agp_head *agp;
-
- /** @pdev: PCI device structure */
- struct pci_dev *pdev;
-
-#ifdef __alpha__
- /** @hose: PCI hose, only used on ALPHA platforms. */
- struct pci_controller *hose;
-#endif
/** @num_crtcs: Number of CRTCs on this device */
unsigned int num_crtcs;
@@ -311,6 +298,17 @@ struct drm_device {
/* Everything below here is for legacy driver, never use! */
/* private: */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
+ /* List of devices per driver for stealth attach cleanup */
+ struct list_head legacy_dev_list;
+
+#ifdef __alpha__
+ /** @hose: PCI hose, only used on ALPHA platforms. */
+ struct pci_controller *hose;
+#endif
+
+ /* AGP data */
+ struct drm_agp_head *agp;
+
/* Context handle management - linked list of context handles */
struct list_head ctxlist;
@@ -357,6 +355,10 @@ struct drm_device {
/* Scatter gather memory */
struct drm_sg_mem *sg;
+
+ /* IRQs */
+ bool irq_enabled;
+ int irq;
#endif
};
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index 9d3b745c3107..49649eb8447e 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -22,37 +22,74 @@
#ifndef DRM_DISPLAYID_H
#define DRM_DISPLAYID_H
-#define DATA_BLOCK_PRODUCT_ID 0x00
-#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
-#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
-#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
-#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
-#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
-#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
-#define DATA_BLOCK_VESA_TIMING 0x07
-#define DATA_BLOCK_CEA_TIMING 0x08
-#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
-#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
-#define DATA_BLOCK_GP_ASCII_STRING 0x0b
-#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
-#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
-#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
-#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
-#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
-#define DATA_BLOCK_TILED_DISPLAY 0x12
-#define DATA_BLOCK_CTA 0x81
-
-#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
-
-#define PRODUCT_TYPE_EXTENSION 0
-#define PRODUCT_TYPE_TEST 1
-#define PRODUCT_TYPE_PANEL 2
-#define PRODUCT_TYPE_MONITOR 3
-#define PRODUCT_TYPE_TV 4
-#define PRODUCT_TYPE_REPEATER 5
-#define PRODUCT_TYPE_DIRECT_DRIVE 6
-
-struct displayid_hdr {
+#include <linux/types.h>
+#include <linux/bits.h>
+
+struct drm_edid;
+
+#define VESA_IEEE_OUI 0x3a0292
+
+/* DisplayID Structure versions */
+#define DISPLAY_ID_STRUCTURE_VER_12 0x12
+#define DISPLAY_ID_STRUCTURE_VER_20 0x20
+
+/* DisplayID Structure v1r2 Data Blocks */
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+#define DATA_BLOCK_CTA 0x81
+
+/* DisplayID Structure v2r0 Data Blocks */
+#define DATA_BLOCK_2_PRODUCT_ID 0x20
+#define DATA_BLOCK_2_DISPLAY_PARAMETERS 0x21
+#define DATA_BLOCK_2_TYPE_7_DETAILED_TIMING 0x22
+#define DATA_BLOCK_2_TYPE_8_ENUMERATED_TIMING 0x23
+#define DATA_BLOCK_2_TYPE_9_FORMULA_TIMING 0x24
+#define DATA_BLOCK_2_DYNAMIC_VIDEO_TIMING 0x25
+#define DATA_BLOCK_2_DISPLAY_INTERFACE_FEATURES 0x26
+#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
+#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
+#define DATA_BLOCK_2_CONTAINER_ID 0x29
+#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
+#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
+
+/* DisplayID Structure v1r2 Product Type */
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+/* DisplayID Structure v2r0 Display Product Primary Use Case (~Product Type) */
+#define PRIMARY_USE_EXTENSION 0
+#define PRIMARY_USE_TEST 1
+#define PRIMARY_USE_GENERIC 2
+#define PRIMARY_USE_TV 3
+#define PRIMARY_USE_DESKTOP_PRODUCTIVITY 4
+#define PRIMARY_USE_DESKTOP_GAMING 5
+#define PRIMARY_USE_PRESENTATION 6
+#define PRIMARY_USE_HEAD_MOUNTED_VR 7
+#define PRIMARY_USE_HEAD_MOUNTED_AR 8
+
+struct displayid_header {
u8 rev;
u8 bytes;
u8 prod_id;
@@ -89,15 +126,35 @@ struct displayid_detailed_timings_1 {
struct displayid_detailed_timing_block {
struct displayid_block base;
- struct displayid_detailed_timings_1 timings[0];
+ struct displayid_detailed_timings_1 timings[];
+};
+
+#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
+#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
+
+struct displayid_vesa_vendor_specific_block {
+ struct displayid_block base;
+ u8 oui[3];
+ u8 data_structure_type;
+ u8 mso;
+} __packed;
+
+/* DisplayID iteration */
+struct displayid_iter {
+ const struct drm_edid *drm_edid;
+
+ const u8 *section;
+ int length;
+ int idx;
+ int ext_index;
};
-#define for_each_displayid_db(displayid, block, idx, length) \
- for ((block) = (struct displayid_block *)&(displayid)[idx]; \
- (idx) + sizeof(struct displayid_block) <= (length) && \
- (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
- (block)->num_bytes > 0; \
- (idx) += (block)->num_bytes + sizeof(struct displayid_block), \
- (block) = (struct displayid_block *)&(displayid)[idx])
+void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
+ struct displayid_iter *iter);
+const struct displayid_block *
+__displayid_iter_next(struct displayid_iter *iter);
+#define displayid_iter_for_each(__block, __iter) \
+ while (((__block) = __displayid_iter_next(__iter)))
+void displayid_iter_end(struct displayid_iter *iter);
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
deleted file mode 100644
index bc04467f7c3a..000000000000
--- a/include/drm/drm_dp_helper.h
+++ /dev/null
@@ -1,1581 +0,0 @@
-/*
- * Copyright © 2008 Keith Packard
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- */
-
-#ifndef _DRM_DP_HELPER_H_
-#define _DRM_DP_HELPER_H_
-
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/types.h>
-
-/*
- * Unless otherwise noted, all values are from the DP 1.1a spec. Note that
- * DP and DPCD versions are independent. Differences from 1.0 are not noted,
- * 1.0 devices basically don't exist in the wild.
- *
- * Abbreviations, in chronological order:
- *
- * eDP: Embedded DisplayPort version 1
- * DPI: DisplayPort Interoperability Guideline v1.1a
- * 1.2: DisplayPort 1.2
- * MST: Multistream Transport - part of DP 1.2a
- *
- * 1.2 formally includes both eDP and DPI definitions.
- */
-
-/* MSA (Main Stream Attribute) MISC bits (as MISC1<<8|MISC0) */
-#define DP_MSA_MISC_SYNC_CLOCK (1 << 0)
-#define DP_MSA_MISC_INTERLACE_VTOTAL_EVEN (1 << 8)
-#define DP_MSA_MISC_STEREO_NO_3D (0 << 9)
-#define DP_MSA_MISC_STEREO_PROG_RIGHT_EYE (1 << 9)
-#define DP_MSA_MISC_STEREO_PROG_LEFT_EYE (3 << 9)
-/* bits per component for non-RAW */
-#define DP_MSA_MISC_6_BPC (0 << 5)
-#define DP_MSA_MISC_8_BPC (1 << 5)
-#define DP_MSA_MISC_10_BPC (2 << 5)
-#define DP_MSA_MISC_12_BPC (3 << 5)
-#define DP_MSA_MISC_16_BPC (4 << 5)
-/* bits per component for RAW */
-#define DP_MSA_MISC_RAW_6_BPC (1 << 5)
-#define DP_MSA_MISC_RAW_7_BPC (2 << 5)
-#define DP_MSA_MISC_RAW_8_BPC (3 << 5)
-#define DP_MSA_MISC_RAW_10_BPC (4 << 5)
-#define DP_MSA_MISC_RAW_12_BPC (5 << 5)
-#define DP_MSA_MISC_RAW_14_BPC (6 << 5)
-#define DP_MSA_MISC_RAW_16_BPC (7 << 5)
-/* pixel encoding/colorimetry format */
-#define _DP_MSA_MISC_COLOR(misc1_7, misc0_21, misc0_3, misc0_4) \
- ((misc1_7) << 15 | (misc0_4) << 4 | (misc0_3) << 3 | ((misc0_21) << 1))
-#define DP_MSA_MISC_COLOR_RGB _DP_MSA_MISC_COLOR(0, 0, 0, 0)
-#define DP_MSA_MISC_COLOR_CEA_RGB _DP_MSA_MISC_COLOR(0, 0, 1, 0)
-#define DP_MSA_MISC_COLOR_RGB_WIDE_FIXED _DP_MSA_MISC_COLOR(0, 3, 0, 0)
-#define DP_MSA_MISC_COLOR_RGB_WIDE_FLOAT _DP_MSA_MISC_COLOR(0, 3, 0, 1)
-#define DP_MSA_MISC_COLOR_Y_ONLY _DP_MSA_MISC_COLOR(1, 0, 0, 0)
-#define DP_MSA_MISC_COLOR_RAW _DP_MSA_MISC_COLOR(1, 1, 0, 0)
-#define DP_MSA_MISC_COLOR_YCBCR_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 1, 0)
-#define DP_MSA_MISC_COLOR_YCBCR_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 1, 1)
-#define DP_MSA_MISC_COLOR_YCBCR_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 1, 0)
-#define DP_MSA_MISC_COLOR_YCBCR_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 1, 1)
-#define DP_MSA_MISC_COLOR_XVYCC_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 0, 0)
-#define DP_MSA_MISC_COLOR_XVYCC_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 0, 1)
-#define DP_MSA_MISC_COLOR_XVYCC_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 0, 0)
-#define DP_MSA_MISC_COLOR_XVYCC_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 0, 1)
-#define DP_MSA_MISC_COLOR_OPRGB _DP_MSA_MISC_COLOR(0, 0, 1, 1)
-#define DP_MSA_MISC_COLOR_DCI_P3 _DP_MSA_MISC_COLOR(0, 3, 1, 0)
-#define DP_MSA_MISC_COLOR_COLOR_PROFILE _DP_MSA_MISC_COLOR(0, 3, 1, 1)
-#define DP_MSA_MISC_COLOR_VSC_SDP (1 << 14)
-
-#define DP_AUX_MAX_PAYLOAD_BYTES 16
-
-#define DP_AUX_I2C_WRITE 0x0
-#define DP_AUX_I2C_READ 0x1
-#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2
-#define DP_AUX_I2C_MOT 0x4
-#define DP_AUX_NATIVE_WRITE 0x8
-#define DP_AUX_NATIVE_READ 0x9
-
-#define DP_AUX_NATIVE_REPLY_ACK (0x0 << 0)
-#define DP_AUX_NATIVE_REPLY_NACK (0x1 << 0)
-#define DP_AUX_NATIVE_REPLY_DEFER (0x2 << 0)
-#define DP_AUX_NATIVE_REPLY_MASK (0x3 << 0)
-
-#define DP_AUX_I2C_REPLY_ACK (0x0 << 2)
-#define DP_AUX_I2C_REPLY_NACK (0x1 << 2)
-#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2)
-#define DP_AUX_I2C_REPLY_MASK (0x3 << 2)
-
-/* AUX CH addresses */
-/* DPCD */
-#define DP_DPCD_REV 0x000
-# define DP_DPCD_REV_10 0x10
-# define DP_DPCD_REV_11 0x11
-# define DP_DPCD_REV_12 0x12
-# define DP_DPCD_REV_13 0x13
-# define DP_DPCD_REV_14 0x14
-
-#define DP_MAX_LINK_RATE 0x001
-
-#define DP_MAX_LANE_COUNT 0x002
-# define DP_MAX_LANE_COUNT_MASK 0x1f
-# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
-# define DP_ENHANCED_FRAME_CAP (1 << 7)
-
-#define DP_MAX_DOWNSPREAD 0x003
-# define DP_MAX_DOWNSPREAD_0_5 (1 << 0)
-# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
-# define DP_TPS4_SUPPORTED (1 << 7)
-
-#define DP_NORP 0x004
-
-#define DP_DOWNSTREAMPORT_PRESENT 0x005
-# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
-# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
-# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1)
-# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1)
-# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1)
-# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1)
-# define DP_FORMAT_CONVERSION (1 << 3)
-# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
-
-#define DP_MAIN_LINK_CHANNEL_CODING 0x006
-# define DP_CAP_ANSI_8B10B (1 << 0)
-
-#define DP_DOWN_STREAM_PORT_COUNT 0x007
-# define DP_PORT_COUNT_MASK 0x0f
-# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
-# define DP_OUI_SUPPORT (1 << 7)
-
-#define DP_RECEIVE_PORT_0_CAP_0 0x008
-# define DP_LOCAL_EDID_PRESENT (1 << 1)
-# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
-
-#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
-
-#define DP_RECEIVE_PORT_1_CAP_0 0x00a
-#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b
-
-#define DP_I2C_SPEED_CAP 0x00c /* DPI */
-# define DP_I2C_SPEED_1K 0x01
-# define DP_I2C_SPEED_5K 0x02
-# define DP_I2C_SPEED_10K 0x04
-# define DP_I2C_SPEED_100K 0x08
-# define DP_I2C_SPEED_400K 0x10
-# define DP_I2C_SPEED_1M 0x20
-
-#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
-# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0)
-# define DP_FRAMING_CHANGE_CAP (1 << 1)
-# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
-
-#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
-# define DP_TRAINING_AUX_RD_MASK 0x7F /* DP 1.3 */
-# define DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT (1 << 7) /* DP 1.3 */
-
-#define DP_ADAPTER_CAP 0x00f /* 1.2 */
-# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
-# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1)
-
-#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */
-# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */
-
-/* Multiple stream transport */
-#define DP_FAUX_CAP 0x020 /* 1.2 */
-# define DP_FAUX_CAP_1 (1 << 0)
-
-#define DP_MSTM_CAP 0x021 /* 1.2 */
-# define DP_MST_CAP (1 << 0)
-
-#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
-
-/* AV_SYNC_DATA_BLOCK 1.2 */
-#define DP_AV_GRANULARITY 0x023
-# define DP_AG_FACTOR_MASK (0xf << 0)
-# define DP_AG_FACTOR_3MS (0 << 0)
-# define DP_AG_FACTOR_2MS (1 << 0)
-# define DP_AG_FACTOR_1MS (2 << 0)
-# define DP_AG_FACTOR_500US (3 << 0)
-# define DP_AG_FACTOR_200US (4 << 0)
-# define DP_AG_FACTOR_100US (5 << 0)
-# define DP_AG_FACTOR_10US (6 << 0)
-# define DP_AG_FACTOR_1US (7 << 0)
-# define DP_VG_FACTOR_MASK (0xf << 4)
-# define DP_VG_FACTOR_3MS (0 << 4)
-# define DP_VG_FACTOR_2MS (1 << 4)
-# define DP_VG_FACTOR_1MS (2 << 4)
-# define DP_VG_FACTOR_500US (3 << 4)
-# define DP_VG_FACTOR_200US (4 << 4)
-# define DP_VG_FACTOR_100US (5 << 4)
-
-#define DP_AUD_DEC_LAT0 0x024
-#define DP_AUD_DEC_LAT1 0x025
-
-#define DP_AUD_PP_LAT0 0x026
-#define DP_AUD_PP_LAT1 0x027
-
-#define DP_VID_INTER_LAT 0x028
-
-#define DP_VID_PROG_LAT 0x029
-
-#define DP_REP_LAT 0x02a
-
-#define DP_AUD_DEL_INS0 0x02b
-#define DP_AUD_DEL_INS1 0x02c
-#define DP_AUD_DEL_INS2 0x02d
-/* End of AV_SYNC_DATA_BLOCK */
-
-#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
-# define DP_ALPM_CAP (1 << 0)
-
-#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
-# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
-
-#define DP_GUID 0x030 /* 1.2 */
-
-#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */
-# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0)
-
-#define DP_DSC_REV 0x061
-# define DP_DSC_MAJOR_MASK (0xf << 0)
-# define DP_DSC_MINOR_MASK (0xf << 4)
-# define DP_DSC_MAJOR_SHIFT 0
-# define DP_DSC_MINOR_SHIFT 4
-
-#define DP_DSC_RC_BUF_BLK_SIZE 0x062
-# define DP_DSC_RC_BUF_BLK_SIZE_1 0x0
-# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1
-# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2
-# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3
-
-#define DP_DSC_RC_BUF_SIZE 0x063
-
-#define DP_DSC_SLICE_CAP_1 0x064
-# define DP_DSC_1_PER_DP_DSC_SINK (1 << 0)
-# define DP_DSC_2_PER_DP_DSC_SINK (1 << 1)
-# define DP_DSC_4_PER_DP_DSC_SINK (1 << 3)
-# define DP_DSC_6_PER_DP_DSC_SINK (1 << 4)
-# define DP_DSC_8_PER_DP_DSC_SINK (1 << 5)
-# define DP_DSC_10_PER_DP_DSC_SINK (1 << 6)
-# define DP_DSC_12_PER_DP_DSC_SINK (1 << 7)
-
-#define DP_DSC_LINE_BUF_BIT_DEPTH 0x065
-# define DP_DSC_LINE_BUF_BIT_DEPTH_MASK (0xf << 0)
-# define DP_DSC_LINE_BUF_BIT_DEPTH_9 0x0
-# define DP_DSC_LINE_BUF_BIT_DEPTH_10 0x1
-# define DP_DSC_LINE_BUF_BIT_DEPTH_11 0x2
-# define DP_DSC_LINE_BUF_BIT_DEPTH_12 0x3
-# define DP_DSC_LINE_BUF_BIT_DEPTH_13 0x4
-# define DP_DSC_LINE_BUF_BIT_DEPTH_14 0x5
-# define DP_DSC_LINE_BUF_BIT_DEPTH_15 0x6
-# define DP_DSC_LINE_BUF_BIT_DEPTH_16 0x7
-# define DP_DSC_LINE_BUF_BIT_DEPTH_8 0x8
-
-#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066
-# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0)
-
-#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
-
-#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
-# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
-# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
-
-#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
-# define DP_DSC_RGB (1 << 0)
-# define DP_DSC_YCbCr444 (1 << 1)
-# define DP_DSC_YCbCr422_Simple (1 << 2)
-# define DP_DSC_YCbCr422_Native (1 << 3)
-# define DP_DSC_YCbCr420_Native (1 << 4)
-
-#define DP_DSC_DEC_COLOR_DEPTH_CAP 0x06A
-# define DP_DSC_8_BPC (1 << 1)
-# define DP_DSC_10_BPC (1 << 2)
-# define DP_DSC_12_BPC (1 << 3)
-
-#define DP_DSC_PEAK_THROUGHPUT 0x06B
-# define DP_DSC_THROUGHPUT_MODE_0_MASK (0xf << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_SHIFT 0
-# define DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED 0
-# define DP_DSC_THROUGHPUT_MODE_0_340 (1 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_400 (2 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_450 (3 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_500 (4 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_550 (5 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_600 (6 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_650 (7 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_700 (8 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_750 (9 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_800 (10 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_850 (11 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 0) /* 1.4a */
-# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4
-# define DP_DSC_THROUGHPUT_MODE_1_UPSUPPORTED 0
-# define DP_DSC_THROUGHPUT_MODE_1_340 (1 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_400 (2 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_450 (3 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_500 (4 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_550 (5 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_600 (6 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_650 (7 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_700 (8 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_750 (9 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_800 (10 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_850 (11 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_900 (12 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_950 (13 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4)
-# define DP_DSC_THROUGHPUT_MODE_1_170 (15 << 4)
-
-#define DP_DSC_MAX_SLICE_WIDTH 0x06C
-#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560
-#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320
-
-#define DP_DSC_SLICE_CAP_2 0x06D
-# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0)
-# define DP_DSC_20_PER_DP_DSC_SINK (1 << 1)
-# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2)
-
-#define DP_DSC_BITS_PER_PIXEL_INC 0x06F
-# define DP_DSC_BITS_PER_PIXEL_1_16 0x0
-# define DP_DSC_BITS_PER_PIXEL_1_8 0x1
-# define DP_DSC_BITS_PER_PIXEL_1_4 0x2
-# define DP_DSC_BITS_PER_PIXEL_1_2 0x3
-# define DP_DSC_BITS_PER_PIXEL_1 0x4
-
-#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
-# define DP_PSR_IS_SUPPORTED 1
-# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
-# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
-
-#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
-# define DP_PSR_NO_TRAIN_ON_EXIT 1
-# define DP_PSR_SETUP_TIME_330 (0 << 1)
-# define DP_PSR_SETUP_TIME_275 (1 << 1)
-# define DP_PSR_SETUP_TIME_220 (2 << 1)
-# define DP_PSR_SETUP_TIME_165 (3 << 1)
-# define DP_PSR_SETUP_TIME_110 (4 << 1)
-# define DP_PSR_SETUP_TIME_55 (5 << 1)
-# define DP_PSR_SETUP_TIME_0 (6 << 1)
-# define DP_PSR_SETUP_TIME_MASK (7 << 1)
-# define DP_PSR_SETUP_TIME_SHIFT 1
-# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */
-# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */
-
-#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */
-#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */
-
-/*
- * 0x80-0x8f describe downstream port capabilities, but there are two layouts
- * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
- * each port's descriptor is one byte wide. If it was set, each port's is
- * four bytes wide, starting with the one byte from the base info. As of
- * DP interop v1.1a only VGA defines additional detail.
- */
-
-/* offset 0 */
-#define DP_DOWNSTREAM_PORT_0 0x80
-# define DP_DS_PORT_TYPE_MASK (7 << 0)
-# define DP_DS_PORT_TYPE_DP 0
-# define DP_DS_PORT_TYPE_VGA 1
-# define DP_DS_PORT_TYPE_DVI 2
-# define DP_DS_PORT_TYPE_HDMI 3
-# define DP_DS_PORT_TYPE_NON_EDID 4
-# define DP_DS_PORT_TYPE_DP_DUALMODE 5
-# define DP_DS_PORT_TYPE_WIRELESS 6
-# define DP_DS_PORT_HPD (1 << 3)
-/* offset 1 for VGA is maximum megapixels per second / 8 */
-/* offset 2 */
-# define DP_DS_MAX_BPC_MASK (3 << 0)
-# define DP_DS_8BPC 0
-# define DP_DS_10BPC 1
-# define DP_DS_12BPC 2
-# define DP_DS_16BPC 3
-
-/* DP Forward error Correction Registers */
-#define DP_FEC_CAPABILITY 0x090 /* 1.4 */
-# define DP_FEC_CAPABLE (1 << 0)
-# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
-# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
-# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
-
-/* DP Extended DSC Capabilities */
-#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
-#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
-#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
-
-/* link configuration */
-#define DP_LINK_BW_SET 0x100
-# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
-# define DP_LINK_BW_1_62 0x06
-# define DP_LINK_BW_2_7 0x0a
-# define DP_LINK_BW_5_4 0x14 /* 1.2 */
-# define DP_LINK_BW_8_1 0x1e /* 1.4 */
-
-#define DP_LANE_COUNT_SET 0x101
-# define DP_LANE_COUNT_MASK 0x0f
-# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
-
-#define DP_TRAINING_PATTERN_SET 0x102
-# define DP_TRAINING_PATTERN_DISABLE 0
-# define DP_TRAINING_PATTERN_1 1
-# define DP_TRAINING_PATTERN_2 2
-# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
-# define DP_TRAINING_PATTERN_4 7 /* 1.4 */
-# define DP_TRAINING_PATTERN_MASK 0x3
-# define DP_TRAINING_PATTERN_MASK_1_4 0xf
-
-/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
-# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
-# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2)
-# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2)
-# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2)
-# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2)
-
-# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
-# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
-
-# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
-# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
-# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
-# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
-
-#define DP_TRAINING_LANE0_SET 0x103
-#define DP_TRAINING_LANE1_SET 0x104
-#define DP_TRAINING_LANE2_SET 0x105
-#define DP_TRAINING_LANE3_SET 0x106
-
-# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
-# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
-# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
-# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0)
-# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0)
-# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0)
-# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0)
-
-# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
-# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3)
-# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3)
-# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3)
-# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3)
-
-# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
-# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
-
-#define DP_DOWNSPREAD_CTRL 0x107
-# define DP_SPREAD_AMP_0_5 (1 << 4)
-# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
-
-#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
-# define DP_SET_ANSI_8B10B (1 << 0)
-
-#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
-/* bitmask as for DP_I2C_SPEED_CAP */
-
-#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
-# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0)
-# define DP_FRAMING_CHANGE_ENABLE (1 << 1)
-# define DP_PANEL_SELF_TEST_ENABLE (1 << 7)
-
-#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */
-#define DP_LINK_QUAL_LANE1_SET 0x10c
-#define DP_LINK_QUAL_LANE2_SET 0x10d
-#define DP_LINK_QUAL_LANE3_SET 0x10e
-# define DP_LINK_QUAL_PATTERN_DISABLE 0
-# define DP_LINK_QUAL_PATTERN_D10_2 1
-# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
-# define DP_LINK_QUAL_PATTERN_PRBS7 3
-# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
-# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
-# define DP_LINK_QUAL_PATTERN_MASK 7
-
-#define DP_TRAINING_LANE0_1_SET2 0x10f
-#define DP_TRAINING_LANE2_3_SET2 0x110
-# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0)
-# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2)
-# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4)
-# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6)
-
-#define DP_MSTM_CTRL 0x111 /* 1.2 */
-# define DP_MST_EN (1 << 0)
-# define DP_UP_REQ_EN (1 << 1)
-# define DP_UPSTREAM_IS_SRC (1 << 2)
-
-#define DP_AUDIO_DELAY0 0x112 /* 1.2 */
-#define DP_AUDIO_DELAY1 0x113
-#define DP_AUDIO_DELAY2 0x114
-
-#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */
-# define DP_LINK_RATE_SET_SHIFT 0
-# define DP_LINK_RATE_SET_MASK (7 << 0)
-
-#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
-# define DP_ALPM_ENABLE (1 << 0)
-# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
-
-#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
-# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
-# define DP_IRQ_HPD_ENABLE (1 << 1)
-
-#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
-# define DP_PWR_NOT_NEEDED (1 << 0)
-
-#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */
-# define DP_FEC_READY (1 << 0)
-# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1)
-# define DP_FEC_ERR_COUNT_DIS (0 << 1)
-# define DP_FEC_UNCORR_BLK_ERROR_COUNT (1 << 1)
-# define DP_FEC_CORR_BLK_ERROR_COUNT (2 << 1)
-# define DP_FEC_BIT_ERROR_COUNT (3 << 1)
-# define DP_FEC_LANE_SELECT_MASK (3 << 4)
-# define DP_FEC_LANE_0_SELECT (0 << 4)
-# define DP_FEC_LANE_1_SELECT (1 << 4)
-# define DP_FEC_LANE_2_SELECT (2 << 4)
-# define DP_FEC_LANE_3_SELECT (3 << 4)
-
-#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
-# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
-
-#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
-# define DP_DECOMPRESSION_EN (1 << 0)
-
-#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
-# define DP_PSR_ENABLE (1 << 0)
-# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
-# define DP_PSR_CRC_VERIFICATION (1 << 2)
-# define DP_PSR_FRAME_CAPTURE (1 << 3)
-# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
-# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
-# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */
-
-#define DP_ADAPTER_CTRL 0x1a0
-# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
-
-#define DP_BRANCH_DEVICE_CTRL 0x1a1
-# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
-
-#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
-#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
-#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
-
-#define DP_SINK_COUNT 0x200
-/* prior to 1.2 bit 7 was reserved mbz */
-# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
-# define DP_SINK_CP_READY (1 << 6)
-
-#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
-# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
-# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
-# define DP_CP_IRQ (1 << 2)
-# define DP_MCCS_IRQ (1 << 3)
-# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */
-# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */
-# define DP_SINK_SPECIFIC_IRQ (1 << 6)
-
-#define DP_LANE0_1_STATUS 0x202
-#define DP_LANE2_3_STATUS 0x203
-# define DP_LANE_CR_DONE (1 << 0)
-# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
-# define DP_LANE_SYMBOL_LOCKED (1 << 2)
-
-#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
- DP_LANE_CHANNEL_EQ_DONE | \
- DP_LANE_SYMBOL_LOCKED)
-
-#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
-
-#define DP_INTERLANE_ALIGN_DONE (1 << 0)
-#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
-#define DP_LINK_STATUS_UPDATED (1 << 7)
-
-#define DP_SINK_STATUS 0x205
-
-#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
-#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
-
-#define DP_ADJUST_REQUEST_LANE0_1 0x206
-#define DP_ADJUST_REQUEST_LANE2_3 0x207
-# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
-# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
-# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
-# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
-# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
-# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
-# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
-# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
-
-#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c
-# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03
-# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
-# define DP_ADJUST_POST_CURSOR2_LANE1_MASK 0x0c
-# define DP_ADJUST_POST_CURSOR2_LANE1_SHIFT 2
-# define DP_ADJUST_POST_CURSOR2_LANE2_MASK 0x30
-# define DP_ADJUST_POST_CURSOR2_LANE2_SHIFT 4
-# define DP_ADJUST_POST_CURSOR2_LANE3_MASK 0xc0
-# define DP_ADJUST_POST_CURSOR2_LANE3_SHIFT 6
-
-#define DP_TEST_REQUEST 0x218
-# define DP_TEST_LINK_TRAINING (1 << 0)
-# define DP_TEST_LINK_VIDEO_PATTERN (1 << 1)
-# define DP_TEST_LINK_EDID_READ (1 << 2)
-# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
-# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */
-# define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) /* DPCD >= 1.2 */
-# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) /* DPCD >= 1.2 */
-
-#define DP_TEST_LINK_RATE 0x219
-# define DP_LINK_RATE_162 (0x6)
-# define DP_LINK_RATE_27 (0xa)
-
-#define DP_TEST_LANE_COUNT 0x220
-
-#define DP_TEST_PATTERN 0x221
-# define DP_NO_TEST_PATTERN 0x0
-# define DP_COLOR_RAMP 0x1
-# define DP_BLACK_AND_WHITE_VERTICAL_LINES 0x2
-# define DP_COLOR_SQUARE 0x3
-
-#define DP_TEST_H_TOTAL_HI 0x222
-#define DP_TEST_H_TOTAL_LO 0x223
-
-#define DP_TEST_V_TOTAL_HI 0x224
-#define DP_TEST_V_TOTAL_LO 0x225
-
-#define DP_TEST_H_START_HI 0x226
-#define DP_TEST_H_START_LO 0x227
-
-#define DP_TEST_V_START_HI 0x228
-#define DP_TEST_V_START_LO 0x229
-
-#define DP_TEST_HSYNC_HI 0x22A
-# define DP_TEST_HSYNC_POLARITY (1 << 7)
-# define DP_TEST_HSYNC_WIDTH_HI_MASK (127 << 0)
-#define DP_TEST_HSYNC_WIDTH_LO 0x22B
-
-#define DP_TEST_VSYNC_HI 0x22C
-# define DP_TEST_VSYNC_POLARITY (1 << 7)
-# define DP_TEST_VSYNC_WIDTH_HI_MASK (127 << 0)
-#define DP_TEST_VSYNC_WIDTH_LO 0x22D
-
-#define DP_TEST_H_WIDTH_HI 0x22E
-#define DP_TEST_H_WIDTH_LO 0x22F
-
-#define DP_TEST_V_HEIGHT_HI 0x230
-#define DP_TEST_V_HEIGHT_LO 0x231
-
-#define DP_TEST_MISC0 0x232
-# define DP_TEST_SYNC_CLOCK (1 << 0)
-# define DP_TEST_COLOR_FORMAT_MASK (3 << 1)
-# define DP_TEST_COLOR_FORMAT_SHIFT 1
-# define DP_COLOR_FORMAT_RGB (0 << 1)
-# define DP_COLOR_FORMAT_YCbCr422 (1 << 1)
-# define DP_COLOR_FORMAT_YCbCr444 (2 << 1)
-# define DP_TEST_DYNAMIC_RANGE_VESA (0 << 3)
-# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3)
-# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4)
-# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4)
-# define DP_YCBCR_COEFFICIENTS_ITU709 (1 << 4)
-# define DP_TEST_BIT_DEPTH_MASK (7 << 5)
-# define DP_TEST_BIT_DEPTH_SHIFT 5
-# define DP_TEST_BIT_DEPTH_6 (0 << 5)
-# define DP_TEST_BIT_DEPTH_8 (1 << 5)
-# define DP_TEST_BIT_DEPTH_10 (2 << 5)
-# define DP_TEST_BIT_DEPTH_12 (3 << 5)
-# define DP_TEST_BIT_DEPTH_16 (4 << 5)
-
-#define DP_TEST_MISC1 0x233
-# define DP_TEST_REFRESH_DENOMINATOR (1 << 0)
-# define DP_TEST_INTERLACED (1 << 1)
-
-#define DP_TEST_REFRESH_RATE_NUMERATOR 0x234
-
-#define DP_TEST_MISC0 0x232
-
-#define DP_TEST_CRC_R_CR 0x240
-#define DP_TEST_CRC_G_Y 0x242
-#define DP_TEST_CRC_B_CB 0x244
-
-#define DP_TEST_SINK_MISC 0x246
-# define DP_TEST_CRC_SUPPORTED (1 << 5)
-# define DP_TEST_COUNT_MASK 0xf
-
-#define DP_TEST_PHY_PATTERN 0x248
-#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250
-#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251
-#define DP_TEST_80BIT_CUSTOM_PATTERN_23_16 0x252
-#define DP_TEST_80BIT_CUSTOM_PATTERN_31_24 0x253
-#define DP_TEST_80BIT_CUSTOM_PATTERN_39_32 0x254
-#define DP_TEST_80BIT_CUSTOM_PATTERN_47_40 0x255
-#define DP_TEST_80BIT_CUSTOM_PATTERN_55_48 0x256
-#define DP_TEST_80BIT_CUSTOM_PATTERN_63_56 0x257
-#define DP_TEST_80BIT_CUSTOM_PATTERN_71_64 0x258
-#define DP_TEST_80BIT_CUSTOM_PATTERN_79_72 0x259
-
-#define DP_TEST_RESPONSE 0x260
-# define DP_TEST_ACK (1 << 0)
-# define DP_TEST_NAK (1 << 1)
-# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
-
-#define DP_TEST_EDID_CHECKSUM 0x261
-
-#define DP_TEST_SINK 0x270
-# define DP_TEST_SINK_START (1 << 0)
-#define DP_TEST_AUDIO_MODE 0x271
-#define DP_TEST_AUDIO_PATTERN_TYPE 0x272
-#define DP_TEST_AUDIO_PERIOD_CH1 0x273
-#define DP_TEST_AUDIO_PERIOD_CH2 0x274
-#define DP_TEST_AUDIO_PERIOD_CH3 0x275
-#define DP_TEST_AUDIO_PERIOD_CH4 0x276
-#define DP_TEST_AUDIO_PERIOD_CH5 0x277
-#define DP_TEST_AUDIO_PERIOD_CH6 0x278
-#define DP_TEST_AUDIO_PERIOD_CH7 0x279
-#define DP_TEST_AUDIO_PERIOD_CH8 0x27A
-
-#define DP_FEC_STATUS 0x280 /* 1.4 */
-# define DP_FEC_DECODE_EN_DETECTED (1 << 0)
-# define DP_FEC_DECODE_DIS_DETECTED (1 << 1)
-
-#define DP_FEC_ERROR_COUNT_LSB 0x0281 /* 1.4 */
-
-#define DP_FEC_ERROR_COUNT_MSB 0x0282 /* 1.4 */
-# define DP_FEC_ERROR_COUNT_MASK 0x7F
-# define DP_FEC_ERR_COUNT_VALID (1 << 7)
-
-#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
-# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
-# define DP_PAYLOAD_ACT_HANDLED (1 << 1)
-
-#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */
-/* up to ID_SLOT_63 at 0x2ff */
-
-#define DP_SOURCE_OUI 0x300
-#define DP_SINK_OUI 0x400
-#define DP_BRANCH_OUI 0x500
-#define DP_BRANCH_ID 0x503
-#define DP_BRANCH_REVISION_START 0x509
-#define DP_BRANCH_HW_REV 0x509
-#define DP_BRANCH_SW_REV 0x50A
-
-#define DP_SET_POWER 0x600
-# define DP_SET_POWER_D0 0x1
-# define DP_SET_POWER_D3 0x2
-# define DP_SET_POWER_MASK 0x3
-# define DP_SET_POWER_D3_AUX_ON 0x5
-
-#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
-# define DP_EDP_11 0x00
-# define DP_EDP_12 0x01
-# define DP_EDP_13 0x02
-# define DP_EDP_14 0x03
-# define DP_EDP_14a 0x04 /* eDP 1.4a */
-# define DP_EDP_14b 0x05 /* eDP 1.4b */
-
-#define DP_EDP_GENERAL_CAP_1 0x701
-# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
-# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1)
-# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2)
-# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3)
-# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4)
-# define DP_EDP_FRC_ENABLE_CAP (1 << 5)
-# define DP_EDP_COLOR_ENGINE_CAP (1 << 6)
-# define DP_EDP_SET_POWER_CAP (1 << 7)
-
-#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
-# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0)
-# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1)
-# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2)
-# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3)
-# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4)
-# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5)
-# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6)
-# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7)
-
-#define DP_EDP_GENERAL_CAP_2 0x703
-# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
-
-#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
-# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
-# define DP_EDP_X_REGION_CAP_SHIFT 0
-# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4)
-# define DP_EDP_Y_REGION_CAP_SHIFT 4
-
-#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
-# define DP_EDP_BACKLIGHT_ENABLE (1 << 0)
-# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1)
-# define DP_EDP_FRC_ENABLE (1 << 2)
-# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3)
-# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7)
-
-#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
-# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0)
-# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0)
-# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0)
-# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0)
-# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0)
-# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2)
-# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3)
-# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4)
-# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5)
-# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */
-
-#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
-#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
-
-#define DP_EDP_PWMGEN_BIT_COUNT 0x724
-#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725
-#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726
-# define DP_EDP_PWMGEN_BIT_COUNT_MASK (0x1f << 0)
-
-#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727
-
-#define DP_EDP_BACKLIGHT_FREQ_SET 0x728
-# define DP_EDP_BACKLIGHT_FREQ_BASE_KHZ 27000
-
-#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a
-#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b
-#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c
-
-#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d
-#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e
-#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f
-
-#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
-#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
-
-#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
-#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
-
-#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
-#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
-#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
-#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
-
-#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
-/* 0-5 sink count */
-# define DP_SINK_COUNT_CP_READY (1 << 6)
-
-#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */
-
-#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */
-# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0)
-# define DP_LOCK_ACQUISITION_REQUEST (1 << 1)
-# define DP_CEC_IRQ (1 << 2)
-
-#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */
-
-#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
-# define DP_PSR_LINK_CRC_ERROR (1 << 0)
-# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
-# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */
-
-#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
-# define DP_PSR_CAPS_CHANGE (1 << 0)
-
-#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */
-# define DP_PSR_SINK_INACTIVE 0
-# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
-# define DP_PSR_SINK_ACTIVE_RFB 2
-# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
-# define DP_PSR_SINK_ACTIVE_RESYNC 4
-# define DP_PSR_SINK_INTERNAL_ERROR 7
-# define DP_PSR_SINK_STATE_MASK 0x07
-
-#define DP_SYNCHRONIZATION_LATENCY_IN_SINK 0x2009 /* edp 1.4 */
-# define DP_MAX_RESYNC_FRAME_COUNT_MASK (0xf << 0)
-# define DP_MAX_RESYNC_FRAME_COUNT_SHIFT 0
-# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4)
-# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4
-
-#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */
-# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */
-# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */
-# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */
-# define DP_SU_VALID (1 << 3) /* eDP 1.4 */
-# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */
-# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */
-# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */
-
-#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
-# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
-
-#define DP_LANE0_1_STATUS_ESI 0x200c /* status same as 0x202 */
-#define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */
-#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
-#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
-
-#define DP_DP13_DPCD_REV 0x2200
-#define DP_DP13_MAX_LINK_RATE 0x2201
-
-#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */
-# define DP_GTC_CAP (1 << 0) /* DP 1.3 */
-# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */
-# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */
-# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */
-# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */
-# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */
-# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
-# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
-
-/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
-#define DP_CEC_TUNNELING_CAPABILITY 0x3000
-# define DP_CEC_TUNNELING_CAPABLE (1 << 0)
-# define DP_CEC_SNOOPING_CAPABLE (1 << 1)
-# define DP_CEC_MULTIPLE_LA_CAPABLE (1 << 2)
-
-#define DP_CEC_TUNNELING_CONTROL 0x3001
-# define DP_CEC_TUNNELING_ENABLE (1 << 0)
-# define DP_CEC_SNOOPING_ENABLE (1 << 1)
-
-#define DP_CEC_RX_MESSAGE_INFO 0x3002
-# define DP_CEC_RX_MESSAGE_LEN_MASK (0xf << 0)
-# define DP_CEC_RX_MESSAGE_LEN_SHIFT 0
-# define DP_CEC_RX_MESSAGE_HPD_STATE (1 << 4)
-# define DP_CEC_RX_MESSAGE_HPD_LOST (1 << 5)
-# define DP_CEC_RX_MESSAGE_ACKED (1 << 6)
-# define DP_CEC_RX_MESSAGE_ENDED (1 << 7)
-
-#define DP_CEC_TX_MESSAGE_INFO 0x3003
-# define DP_CEC_TX_MESSAGE_LEN_MASK (0xf << 0)
-# define DP_CEC_TX_MESSAGE_LEN_SHIFT 0
-# define DP_CEC_TX_RETRY_COUNT_MASK (0x7 << 4)
-# define DP_CEC_TX_RETRY_COUNT_SHIFT 4
-# define DP_CEC_TX_MESSAGE_SEND (1 << 7)
-
-#define DP_CEC_TUNNELING_IRQ_FLAGS 0x3004
-# define DP_CEC_RX_MESSAGE_INFO_VALID (1 << 0)
-# define DP_CEC_RX_MESSAGE_OVERFLOW (1 << 1)
-# define DP_CEC_TX_MESSAGE_SENT (1 << 4)
-# define DP_CEC_TX_LINE_ERROR (1 << 5)
-# define DP_CEC_TX_ADDRESS_NACK_ERROR (1 << 6)
-# define DP_CEC_TX_DATA_NACK_ERROR (1 << 7)
-
-#define DP_CEC_LOGICAL_ADDRESS_MASK 0x300E /* 0x300F word */
-# define DP_CEC_LOGICAL_ADDRESS_0 (1 << 0)
-# define DP_CEC_LOGICAL_ADDRESS_1 (1 << 1)
-# define DP_CEC_LOGICAL_ADDRESS_2 (1 << 2)
-# define DP_CEC_LOGICAL_ADDRESS_3 (1 << 3)
-# define DP_CEC_LOGICAL_ADDRESS_4 (1 << 4)
-# define DP_CEC_LOGICAL_ADDRESS_5 (1 << 5)
-# define DP_CEC_LOGICAL_ADDRESS_6 (1 << 6)
-# define DP_CEC_LOGICAL_ADDRESS_7 (1 << 7)
-#define DP_CEC_LOGICAL_ADDRESS_MASK_2 0x300F /* 0x300E word */
-# define DP_CEC_LOGICAL_ADDRESS_8 (1 << 0)
-# define DP_CEC_LOGICAL_ADDRESS_9 (1 << 1)
-# define DP_CEC_LOGICAL_ADDRESS_10 (1 << 2)
-# define DP_CEC_LOGICAL_ADDRESS_11 (1 << 3)
-# define DP_CEC_LOGICAL_ADDRESS_12 (1 << 4)
-# define DP_CEC_LOGICAL_ADDRESS_13 (1 << 5)
-# define DP_CEC_LOGICAL_ADDRESS_14 (1 << 6)
-# define DP_CEC_LOGICAL_ADDRESS_15 (1 << 7)
-
-#define DP_CEC_RX_MESSAGE_BUFFER 0x3010
-#define DP_CEC_TX_MESSAGE_BUFFER 0x3020
-#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10
-
-#define DP_AUX_HDCP_BKSV 0x68000
-#define DP_AUX_HDCP_RI_PRIME 0x68005
-#define DP_AUX_HDCP_AKSV 0x68007
-#define DP_AUX_HDCP_AN 0x6800C
-#define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4)
-#define DP_AUX_HDCP_BCAPS 0x68028
-# define DP_BCAPS_REPEATER_PRESENT BIT(1)
-# define DP_BCAPS_HDCP_CAPABLE BIT(0)
-#define DP_AUX_HDCP_BSTATUS 0x68029
-# define DP_BSTATUS_REAUTH_REQ BIT(3)
-# define DP_BSTATUS_LINK_FAILURE BIT(2)
-# define DP_BSTATUS_R0_PRIME_READY BIT(1)
-# define DP_BSTATUS_READY BIT(0)
-#define DP_AUX_HDCP_BINFO 0x6802A
-#define DP_AUX_HDCP_KSV_FIFO 0x6802C
-#define DP_AUX_HDCP_AINFO 0x6803B
-
-/* DP HDCP2.2 parameter offsets in DPCD address space */
-#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000
-#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008
-#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B
-#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215
-#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D
-#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220
-#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0
-#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0
-#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0
-#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0
-#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0
-#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8
-#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318
-#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328
-#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330
-#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332
-#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335
-#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345
-#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0
-#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0
-#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3
-#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5
-#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473
-#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493
-#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
-#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
-
-/* Link Training (LT)-tunable PHY Repeaters */
-#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
-#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
-#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */
-#define DP_PHY_REPEATER_MODE 0xf0003 /* 1.3 */
-#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
-#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
-#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
-#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */
-#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */
-#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */
-#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */
-#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */
-#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */
-#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */
-#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */
-#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */
-#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */
-#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */
-#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */
-#define DP_SYMBOL_ERROR_COUNT_LANE0_PHY_REPEATER1 0xf0035 /* 1.3 */
-#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */
-#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
-#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
-#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */
-#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */
-#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */
-
-/* Repeater modes */
-#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */
-#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */
-
-/* DP HDCP message start offsets in DPCD address space */
-#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
-#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
-#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET
-#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET
-#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET
-#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \
- DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET
-#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET
-#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET
-#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET
-#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET
-#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET
-#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET
-#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET
-
-#define HDCP_2_2_DP_RXSTATUS_LEN 1
-#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0))
-#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1))
-#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2))
-#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
-#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4))
-
-/* DP 1.2 Sideband message defines */
-/* peer device type - DP 1.2a Table 2-92 */
-#define DP_PEER_DEVICE_NONE 0x0
-#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1
-#define DP_PEER_DEVICE_MST_BRANCHING 0x2
-#define DP_PEER_DEVICE_SST_SINK 0x3
-#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4
-
-/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */
-#define DP_GET_MSG_TRANSACTION_VERSION 0x00 /* DP 1.3 */
-#define DP_LINK_ADDRESS 0x01
-#define DP_CONNECTION_STATUS_NOTIFY 0x02
-#define DP_ENUM_PATH_RESOURCES 0x10
-#define DP_ALLOCATE_PAYLOAD 0x11
-#define DP_QUERY_PAYLOAD 0x12
-#define DP_RESOURCE_STATUS_NOTIFY 0x13
-#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14
-#define DP_REMOTE_DPCD_READ 0x20
-#define DP_REMOTE_DPCD_WRITE 0x21
-#define DP_REMOTE_I2C_READ 0x22
-#define DP_REMOTE_I2C_WRITE 0x23
-#define DP_POWER_UP_PHY 0x24
-#define DP_POWER_DOWN_PHY 0x25
-#define DP_SINK_EVENT_NOTIFY 0x30
-#define DP_QUERY_STREAM_ENC_STATUS 0x38
-
-/* DP 1.2 MST sideband reply types */
-#define DP_SIDEBAND_REPLY_ACK 0x00
-#define DP_SIDEBAND_REPLY_NAK 0x01
-
-/* DP 1.2 MST sideband nak reasons - table 2.84 */
-#define DP_NAK_WRITE_FAILURE 0x01
-#define DP_NAK_INVALID_READ 0x02
-#define DP_NAK_CRC_FAILURE 0x03
-#define DP_NAK_BAD_PARAM 0x04
-#define DP_NAK_DEFER 0x05
-#define DP_NAK_LINK_FAILURE 0x06
-#define DP_NAK_NO_RESOURCES 0x07
-#define DP_NAK_DPCD_FAIL 0x08
-#define DP_NAK_I2C_NAK 0x09
-#define DP_NAK_ALLOCATE_FAIL 0x0a
-
-#define MODE_I2C_START 1
-#define MODE_I2C_WRITE 2
-#define MODE_I2C_READ 4
-#define MODE_I2C_STOP 8
-
-/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
-#define DP_MST_PHYSICAL_PORT_0 0
-#define DP_MST_LOGICAL_PORT_0 8
-
-#define DP_LINK_STATUS_SIZE 6
-bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count);
-bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count);
-u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane);
-u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane);
-u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
- unsigned int lane);
-
-#define DP_BRANCH_OUI_HEADER_SIZE 0xc
-#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_DSC_RECEIVER_CAP_SIZE 0xf
-#define EDP_PSR_RECEIVER_CAP_SIZE 2
-#define EDP_DISPLAY_CTL_CAP_SIZE 3
-
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-
-u8 drm_dp_link_rate_to_bw_code(int link_rate);
-int drm_dp_bw_code_to_link_rate(u8 link_bw);
-
-#define DP_SDP_AUDIO_TIMESTAMP 0x01
-#define DP_SDP_AUDIO_STREAM 0x02
-#define DP_SDP_EXTENSION 0x04 /* DP 1.1 */
-#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */
-#define DP_SDP_ISRC 0x06 /* DP 1.2 */
-#define DP_SDP_VSC 0x07 /* DP 1.2 */
-#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */
-#define DP_SDP_PPS 0x10 /* DP 1.4 */
-#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */
-#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
-/* 0x80+ CEA-861 infoframe types */
-
-/**
- * struct dp_sdp_header - DP secondary data packet header
- * @HB0: Secondary Data Packet ID
- * @HB1: Secondary Data Packet Type
- * @HB2: Secondary Data Packet Specific header, Byte 0
- * @HB3: Secondary Data packet Specific header, Byte 1
- */
-struct dp_sdp_header {
- u8 HB0;
- u8 HB1;
- u8 HB2;
- u8 HB3;
-} __packed;
-
-#define EDP_SDP_HEADER_REVISION_MASK 0x1F
-#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
-#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
-
-/**
- * struct dp_sdp - DP secondary data packet
- * @sdp_header: DP secondary data packet header
- * @db: DP secondaray data packet data blocks
- * VSC SDP Payload for PSR
- * db[0]: Stereo Interface
- * db[1]: 0 - PSR State; 1 - Update RFB; 2 - CRC Valid
- * db[2]: CRC value bits 7:0 of the R or Cr component
- * db[3]: CRC value bits 15:8 of the R or Cr component
- * db[4]: CRC value bits 7:0 of the G or Y component
- * db[5]: CRC value bits 15:8 of the G or Y component
- * db[6]: CRC value bits 7:0 of the B or Cb component
- * db[7]: CRC value bits 15:8 of the B or Cb component
- * db[8] - db[31]: Reserved
- * VSC SDP Payload for Pixel Encoding/Colorimetry Format
- * db[0] - db[15]: Reserved
- * db[16]: Pixel Encoding and Colorimetry Formats
- * db[17]: Dynamic Range and Component Bit Depth
- * db[18]: Content Type
- * db[19] - db[31]: Reserved
- */
-struct dp_sdp {
- struct dp_sdp_header sdp_header;
- u8 db[32];
-} __packed;
-
-#define EDP_VSC_PSR_STATE_ACTIVE (1<<0)
-#define EDP_VSC_PSR_UPDATE_RFB (1<<1)
-#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
-
-int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
-
-static inline int
-drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
-}
-
-static inline u8
-drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
-}
-
-static inline bool
-drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x11 &&
- (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
-}
-
-static inline bool
-drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x11 &&
- (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
-}
-
-static inline bool
-drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x12 &&
- dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
-}
-
-static inline bool
-drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x14 &&
- dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED;
-}
-
-static inline u8
-drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 :
- DP_TRAINING_PATTERN_MASK;
-}
-
-static inline bool
-drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
-}
-
-/* DP/eDP DSC support */
-u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
- bool is_edp);
-u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
-int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
- u8 dsc_bpc[3]);
-
-static inline bool
-drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
-{
- return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
- DP_DSC_DECOMPRESSION_IS_SUPPORTED;
-}
-
-static inline u16
-drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
-{
- return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
- (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
- DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
- DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
-}
-
-static inline u32
-drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
-{
- /* Max Slicewidth = Number of Pixels * 320 */
- return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
- DP_DSC_SLICE_WIDTH_MULTIPLIER;
-}
-
-/* Forward Error Correction Support on DP 1.4 */
-static inline bool
-drm_dp_sink_supports_fec(const u8 fec_capable)
-{
- return fec_capable & DP_FEC_CAPABLE;
-}
-
-static inline bool
-drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B;
-}
-
-static inline bool
-drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_EDP_CONFIGURATION_CAP] &
- DP_ALTERNATE_SCRAMBLER_RESET_CAP;
-}
-
-/*
- * DisplayPort AUX channel
- */
-
-/**
- * struct drm_dp_aux_msg - DisplayPort AUX channel transaction
- * @address: address of the (first) register to access
- * @request: contains the type of transaction (see DP_AUX_* macros)
- * @reply: upon completion, contains the reply type of the transaction
- * @buffer: pointer to a transmission or reception buffer
- * @size: size of @buffer
- */
-struct drm_dp_aux_msg {
- unsigned int address;
- u8 request;
- u8 reply;
- void *buffer;
- size_t size;
-};
-
-struct cec_adapter;
-struct edid;
-struct drm_connector;
-
-/**
- * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
- * @lock: mutex protecting this struct
- * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
- * @connector: the connector this CEC adapter is associated with
- * @unregister_work: unregister the CEC adapter
- */
-struct drm_dp_aux_cec {
- struct mutex lock;
- struct cec_adapter *adap;
- struct drm_connector *connector;
- struct delayed_work unregister_work;
-};
-
-/**
- * struct drm_dp_aux - DisplayPort AUX channel
- * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
- * @ddc: I2C adapter that can be used for I2C-over-AUX communication
- * @dev: pointer to struct device that is the parent for this AUX channel
- * @crtc: backpointer to the crtc that is currently using this AUX channel
- * @hw_mutex: internal mutex used for locking transfers
- * @crc_work: worker that captures CRCs for each frame
- * @crc_count: counter of captured frame CRCs
- * @transfer: transfers a message representing a single AUX transaction
- *
- * The .dev field should be set to a pointer to the device that implements
- * the AUX channel.
- *
- * The .name field may be used to specify the name of the I2C adapter. If set to
- * NULL, dev_name() of .dev will be used.
- *
- * Drivers provide a hardware-specific implementation of how transactions
- * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg
- * structure describing the transaction is passed into this function. Upon
- * success, the implementation should return the number of payload bytes
- * that were transferred, or a negative error-code on failure. Helpers
- * propagate errors from the .transfer() function, with the exception of
- * the -EBUSY error, which causes a transaction to be retried. On a short,
- * helpers will return -EPROTO to make it simpler to check for failure.
- *
- * An AUX channel can also be used to transport I2C messages to a sink. A
- * typical application of that is to access an EDID that's present in the
- * sink device. The .transfer() function can also be used to execute such
- * transactions. The drm_dp_aux_register() function registers an I2C
- * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
- * should call drm_dp_aux_unregister() to remove the I2C adapter.
- * The I2C adapter uses long transfers by default; if a partial response is
- * received, the adapter will drop down to the size given by the partial
- * response for this transaction only.
- *
- * Note that the aux helper code assumes that the .transfer() function
- * only modifies the reply field of the drm_dp_aux_msg structure. The
- * retry logic and i2c helpers assume this is the case.
- */
-struct drm_dp_aux {
- const char *name;
- struct i2c_adapter ddc;
- struct device *dev;
- struct drm_crtc *crtc;
- struct mutex hw_mutex;
- struct work_struct crc_work;
- u8 crc_count;
- ssize_t (*transfer)(struct drm_dp_aux *aux,
- struct drm_dp_aux_msg *msg);
- /**
- * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
- */
- unsigned i2c_nack_count;
- /**
- * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
- */
- unsigned i2c_defer_count;
- /**
- * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
- */
- struct drm_dp_aux_cec cec;
- /**
- * @is_remote: Is this AUX CH actually using sideband messaging.
- */
- bool is_remote;
-};
-
-ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
- void *buffer, size_t size);
-ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
- void *buffer, size_t size);
-
-/**
- * drm_dp_dpcd_readb() - read a single byte from the DPCD
- * @aux: DisplayPort AUX channel
- * @offset: address of the register to read
- * @valuep: location where the value of the register will be stored
- *
- * Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
- */
-static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
- unsigned int offset, u8 *valuep)
-{
- return drm_dp_dpcd_read(aux, offset, valuep, 1);
-}
-
-/**
- * drm_dp_dpcd_writeb() - write a single byte to the DPCD
- * @aux: DisplayPort AUX channel
- * @offset: address of the register to write
- * @value: value to write to the register
- *
- * Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
- */
-static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
- unsigned int offset, u8 value)
-{
- return drm_dp_dpcd_write(aux, offset, &value, 1);
-}
-
-int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
- u8 status[DP_LINK_STATUS_SIZE]);
-
-int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
-void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4], struct drm_dp_aux *aux);
-
-void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
-void drm_dp_aux_init(struct drm_dp_aux *aux);
-int drm_dp_aux_register(struct drm_dp_aux *aux);
-void drm_dp_aux_unregister(struct drm_dp_aux *aux);
-
-int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
-int drm_dp_stop_crc(struct drm_dp_aux *aux);
-
-struct drm_dp_dpcd_ident {
- u8 oui[3];
- u8 device_id[6];
- u8 hw_rev;
- u8 sw_major_rev;
- u8 sw_minor_rev;
-} __packed;
-
-/**
- * struct drm_dp_desc - DP branch/sink device descriptor
- * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
- * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
- */
-struct drm_dp_desc {
- struct drm_dp_dpcd_ident ident;
- u32 quirks;
-};
-
-int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
- bool is_branch);
-
-/**
- * enum drm_dp_quirk - Display Port sink/branch device specific quirks
- *
- * Display Port sink and branch devices in the wild have a variety of bugs, try
- * to collect them here. The quirks are shared, but it's up to the drivers to
- * implement workarounds for them.
- */
-enum drm_dp_quirk {
- /**
- * @DP_DPCD_QUIRK_CONSTANT_N:
- *
- * The device requires main link attributes Mvid and Nvid to be limited
- * to 16 bits. So will give a constant value (0x8000) for compatability.
- */
- DP_DPCD_QUIRK_CONSTANT_N,
- /**
- * @DP_DPCD_QUIRK_NO_PSR:
- *
- * The device does not support PSR even if reports that it supports or
- * driver still need to implement proper handling for such device.
- */
- DP_DPCD_QUIRK_NO_PSR,
- /**
- * @DP_DPCD_QUIRK_NO_SINK_COUNT:
- *
- * The device does not set SINK_COUNT to a non-zero value.
- * The driver should ignore SINK_COUNT during detection.
- */
- DP_DPCD_QUIRK_NO_SINK_COUNT,
- /**
- * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
- *
- * The device supports MST DSC despite not supporting Virtual DPCD.
- * The DSC caps can be read from the physical aux instead.
- */
- DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
-};
-
-/**
- * drm_dp_has_quirk() - does the DP device have a specific quirk
- * @desc: Device decriptor filled by drm_dp_read_desc()
- * @quirk: Quirk to query for
- *
- * Return true if DP device identified by @desc has @quirk.
- */
-static inline bool
-drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
-{
- return desc->quirks & BIT(quirk);
-}
-
-#ifdef CONFIG_DRM_DP_CEC
-void drm_dp_cec_irq(struct drm_dp_aux *aux);
-void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
- struct drm_connector *connector);
-void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
-void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
-void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
-#else
-static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
-{
-}
-
-static inline void
-drm_dp_cec_register_connector(struct drm_dp_aux *aux,
- struct drm_connector *connector)
-{
-}
-
-static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
-{
-}
-
-static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
- const struct edid *edid)
-{
-}
-
-static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
-{
-}
-
-#endif
-
-#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
deleted file mode 100644
index 41725d88d27e..000000000000
--- a/include/drm/drm_dp_mst_helper.h
+++ /dev/null
@@ -1,908 +0,0 @@
-/*
- * Copyright © 2014 Red Hat.
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- */
-#ifndef _DRM_DP_MST_HELPER_H_
-#define _DRM_DP_MST_HELPER_H_
-
-#include <linux/types.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_atomic.h>
-
-#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
-#include <linux/stackdepot.h>
-#include <linux/timekeeping.h>
-
-enum drm_dp_mst_topology_ref_type {
- DRM_DP_MST_TOPOLOGY_REF_GET,
- DRM_DP_MST_TOPOLOGY_REF_PUT,
-};
-
-struct drm_dp_mst_topology_ref_history {
- struct drm_dp_mst_topology_ref_entry {
- enum drm_dp_mst_topology_ref_type type;
- int count;
- ktime_t ts_nsec;
- depot_stack_handle_t backtrace;
- } *entries;
- int len;
-};
-#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
-
-struct drm_dp_mst_branch;
-
-/**
- * struct drm_dp_vcpi - Virtual Channel Payload Identifier
- * @vcpi: Virtual channel ID.
- * @pbn: Payload Bandwidth Number for this channel
- * @aligned_pbn: PBN aligned with slot size
- * @num_slots: number of slots for this PBN
- */
-struct drm_dp_vcpi {
- int vcpi;
- int pbn;
- int aligned_pbn;
- int num_slots;
-};
-
-/**
- * struct drm_dp_mst_port - MST port
- * @port_num: port number
- * @input: if this port is an input port. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @mcs: message capability status - DP 1.2 spec. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @pdt: Peer Device Type. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @ldps: Legacy Device Plug Status. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @dpcd_rev: DPCD revision of device on this port. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @num_sdp_streams: Number of simultaneous streams. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @num_sdp_stream_sinks: Number of stream sinks. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @full_pbn: Max possible bandwidth for this port. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @next: link to next port on this branch device
- * @aux: i2c aux transport to talk to device connected to this port, protected
- * by &drm_dp_mst_topology_mgr.base.lock.
- * @parent: branch device parent of this port
- * @vcpi: Virtual Channel Payload info for this port.
- * @connector: DRM connector this port is connected to. Protected by
- * &drm_dp_mst_topology_mgr.base.lock.
- * @mgr: topology manager this port lives under.
- *
- * This structure represents an MST port endpoint on a device somewhere
- * in the MST topology.
- */
-struct drm_dp_mst_port {
- /**
- * @topology_kref: refcount for this port's lifetime in the topology,
- * only the DP MST helpers should need to touch this
- */
- struct kref topology_kref;
-
- /**
- * @malloc_kref: refcount for the memory allocation containing this
- * structure. See drm_dp_mst_get_port_malloc() and
- * drm_dp_mst_put_port_malloc().
- */
- struct kref malloc_kref;
-
-#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
- /**
- * @topology_ref_history: A history of each topology
- * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
- */
- struct drm_dp_mst_topology_ref_history topology_ref_history;
-#endif
-
- u8 port_num;
- bool input;
- bool mcs;
- bool ddps;
- u8 pdt;
- bool ldps;
- u8 dpcd_rev;
- u8 num_sdp_streams;
- u8 num_sdp_stream_sinks;
- uint16_t full_pbn;
- struct list_head next;
- /**
- * @mstb: the branch device connected to this port, if there is one.
- * This should be considered protected for reading by
- * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
- * &drm_dp_mst_topology_mgr.up_req_work and
- * &drm_dp_mst_topology_mgr.work, which do not grab
- * &drm_dp_mst_topology_mgr.lock during reads but are the only
- * updaters of this list and are protected from writing concurrently
- * by &drm_dp_mst_topology_mgr.probe_lock.
- */
- struct drm_dp_mst_branch *mstb;
- struct drm_dp_aux aux; /* i2c bus for this port? */
- struct drm_dp_mst_branch *parent;
-
- struct drm_dp_vcpi vcpi;
- struct drm_connector *connector;
- struct drm_dp_mst_topology_mgr *mgr;
-
- /**
- * @cached_edid: for DP logical ports - make tiling work by ensuring
- * that the EDID for all connectors is read immediately.
- */
- struct edid *cached_edid;
- /**
- * @has_audio: Tracks whether the sink connector to this port is
- * audio-capable.
- */
- bool has_audio;
-
- bool fec_capable;
-};
-
-/**
- * struct drm_dp_mst_branch - MST branch device.
- * @rad: Relative Address to talk to this branch device.
- * @lct: Link count total to talk to this branch device.
- * @num_ports: number of ports on the branch.
- * @msg_slots: one bit per transmitted msg slot.
- * @port_parent: pointer to the port parent, NULL if toplevel.
- * @mgr: topology manager for this branch device.
- * @tx_slots: transmission slots for this device.
- * @last_seqno: last sequence number used to talk to this.
- * @link_address_sent: if a link address message has been sent to this device yet.
- * @guid: guid for DP 1.2 branch device. port under this branch can be
- * identified by port #.
- *
- * This structure represents an MST branch device, there is one
- * primary branch device at the root, along with any other branches connected
- * to downstream port of parent branches.
- */
-struct drm_dp_mst_branch {
- /**
- * @topology_kref: refcount for this branch device's lifetime in the
- * topology, only the DP MST helpers should need to touch this
- */
- struct kref topology_kref;
-
- /**
- * @malloc_kref: refcount for the memory allocation containing this
- * structure. See drm_dp_mst_get_mstb_malloc() and
- * drm_dp_mst_put_mstb_malloc().
- */
- struct kref malloc_kref;
-
-#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
- /**
- * @topology_ref_history: A history of each topology
- * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
- */
- struct drm_dp_mst_topology_ref_history topology_ref_history;
-#endif
-
- /**
- * @destroy_next: linked-list entry used by
- * drm_dp_delayed_destroy_work()
- */
- struct list_head destroy_next;
-
- u8 rad[8];
- u8 lct;
- int num_ports;
-
- int msg_slots;
- /**
- * @ports: the list of ports on this branch device. This should be
- * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
- * There are two exceptions to this:
- * &drm_dp_mst_topology_mgr.up_req_work and
- * &drm_dp_mst_topology_mgr.work, which do not grab
- * &drm_dp_mst_topology_mgr.lock during reads but are the only
- * updaters of this list and are protected from updating the list
- * concurrently by @drm_dp_mst_topology_mgr.probe_lock
- */
- struct list_head ports;
-
- /* list of tx ops queue for this port */
- struct drm_dp_mst_port *port_parent;
- struct drm_dp_mst_topology_mgr *mgr;
-
- /* slots are protected by mstb->mgr->qlock */
- struct drm_dp_sideband_msg_tx *tx_slots[2];
- int last_seqno;
- bool link_address_sent;
-
- /* global unique identifier to identify branch devices */
- u8 guid[16];
-};
-
-
-/* sideband msg header - not bit struct */
-struct drm_dp_sideband_msg_hdr {
- u8 lct;
- u8 lcr;
- u8 rad[8];
- bool broadcast;
- bool path_msg;
- u8 msg_len;
- bool somt;
- bool eomt;
- bool seqno;
-};
-
-struct drm_dp_nak_reply {
- u8 guid[16];
- u8 reason;
- u8 nak_data;
-};
-
-struct drm_dp_link_address_ack_reply {
- u8 guid[16];
- u8 nports;
- struct drm_dp_link_addr_reply_port {
- bool input_port;
- u8 peer_device_type;
- u8 port_number;
- bool mcs;
- bool ddps;
- bool legacy_device_plug_status;
- u8 dpcd_revision;
- u8 peer_guid[16];
- u8 num_sdp_streams;
- u8 num_sdp_stream_sinks;
- } ports[16];
-};
-
-struct drm_dp_remote_dpcd_read_ack_reply {
- u8 port_number;
- u8 num_bytes;
- u8 bytes[255];
-};
-
-struct drm_dp_remote_dpcd_write_ack_reply {
- u8 port_number;
-};
-
-struct drm_dp_remote_dpcd_write_nak_reply {
- u8 port_number;
- u8 reason;
- u8 bytes_written_before_failure;
-};
-
-struct drm_dp_remote_i2c_read_ack_reply {
- u8 port_number;
- u8 num_bytes;
- u8 bytes[255];
-};
-
-struct drm_dp_remote_i2c_read_nak_reply {
- u8 port_number;
- u8 nak_reason;
- u8 i2c_nak_transaction;
-};
-
-struct drm_dp_remote_i2c_write_ack_reply {
- u8 port_number;
-};
-
-
-struct drm_dp_sideband_msg_rx {
- u8 chunk[48];
- u8 msg[256];
- u8 curchunk_len;
- u8 curchunk_idx; /* chunk we are parsing now */
- u8 curchunk_hdrlen;
- u8 curlen; /* total length of the msg */
- bool have_somt;
- bool have_eomt;
- struct drm_dp_sideband_msg_hdr initial_hdr;
-};
-
-#define DRM_DP_MAX_SDP_STREAMS 16
-struct drm_dp_allocate_payload {
- u8 port_number;
- u8 number_sdp_streams;
- u8 vcpi;
- u16 pbn;
- u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
-};
-
-struct drm_dp_allocate_payload_ack_reply {
- u8 port_number;
- u8 vcpi;
- u16 allocated_pbn;
-};
-
-struct drm_dp_connection_status_notify {
- u8 guid[16];
- u8 port_number;
- bool legacy_device_plug_status;
- bool displayport_device_plug_status;
- bool message_capability_status;
- bool input_port;
- u8 peer_device_type;
-};
-
-struct drm_dp_remote_dpcd_read {
- u8 port_number;
- u32 dpcd_address;
- u8 num_bytes;
-};
-
-struct drm_dp_remote_dpcd_write {
- u8 port_number;
- u32 dpcd_address;
- u8 num_bytes;
- u8 *bytes;
-};
-
-#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
-struct drm_dp_remote_i2c_read {
- u8 num_transactions;
- u8 port_number;
- struct drm_dp_remote_i2c_read_tx {
- u8 i2c_dev_id;
- u8 num_bytes;
- u8 *bytes;
- u8 no_stop_bit;
- u8 i2c_transaction_delay;
- } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
- u8 read_i2c_device_id;
- u8 num_bytes_read;
-};
-
-struct drm_dp_remote_i2c_write {
- u8 port_number;
- u8 write_i2c_device_id;
- u8 num_bytes;
- u8 *bytes;
-};
-
-/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
-struct drm_dp_port_number_req {
- u8 port_number;
-};
-
-struct drm_dp_enum_path_resources_ack_reply {
- u8 port_number;
- bool fec_capable;
- u16 full_payload_bw_number;
- u16 avail_payload_bw_number;
-};
-
-/* covers POWER_DOWN_PHY, POWER_UP_PHY */
-struct drm_dp_port_number_rep {
- u8 port_number;
-};
-
-struct drm_dp_query_payload {
- u8 port_number;
- u8 vcpi;
-};
-
-struct drm_dp_resource_status_notify {
- u8 port_number;
- u8 guid[16];
- u16 available_pbn;
-};
-
-struct drm_dp_query_payload_ack_reply {
- u8 port_number;
- u16 allocated_pbn;
-};
-
-struct drm_dp_sideband_msg_req_body {
- u8 req_type;
- union ack_req {
- struct drm_dp_connection_status_notify conn_stat;
- struct drm_dp_port_number_req port_num;
- struct drm_dp_resource_status_notify resource_stat;
-
- struct drm_dp_query_payload query_payload;
- struct drm_dp_allocate_payload allocate_payload;
-
- struct drm_dp_remote_dpcd_read dpcd_read;
- struct drm_dp_remote_dpcd_write dpcd_write;
-
- struct drm_dp_remote_i2c_read i2c_read;
- struct drm_dp_remote_i2c_write i2c_write;
- } u;
-};
-
-struct drm_dp_sideband_msg_reply_body {
- u8 reply_type;
- u8 req_type;
- union ack_replies {
- struct drm_dp_nak_reply nak;
- struct drm_dp_link_address_ack_reply link_addr;
- struct drm_dp_port_number_rep port_number;
-
- struct drm_dp_enum_path_resources_ack_reply path_resources;
- struct drm_dp_allocate_payload_ack_reply allocate_payload;
- struct drm_dp_query_payload_ack_reply query_payload;
-
- struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
- struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
- struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
-
- struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
- struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
- struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
- } u;
-};
-
-/* msg is queued to be put into a slot */
-#define DRM_DP_SIDEBAND_TX_QUEUED 0
-/* msg has started transmitting on a slot - still on msgq */
-#define DRM_DP_SIDEBAND_TX_START_SEND 1
-/* msg has finished transmitting on a slot - removed from msgq only in slot */
-#define DRM_DP_SIDEBAND_TX_SENT 2
-/* msg has received a response - removed from slot */
-#define DRM_DP_SIDEBAND_TX_RX 3
-#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
-
-struct drm_dp_sideband_msg_tx {
- u8 msg[256];
- u8 chunk[48];
- u8 cur_offset;
- u8 cur_len;
- struct drm_dp_mst_branch *dst;
- struct list_head next;
- int seqno;
- int state;
- bool path_msg;
- struct drm_dp_sideband_msg_reply_body reply;
-};
-
-/* sideband msg handler */
-struct drm_dp_mst_topology_mgr;
-struct drm_dp_mst_topology_cbs {
- /* create a connector for a port */
- struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
- void (*register_connector)(struct drm_connector *connector);
- void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector);
-};
-
-#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
-
-#define DP_PAYLOAD_LOCAL 1
-#define DP_PAYLOAD_REMOTE 2
-#define DP_PAYLOAD_DELETE_LOCAL 3
-
-struct drm_dp_payload {
- int payload_state;
- int start_slot;
- int num_slots;
- int vcpi;
-};
-
-#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
-
-struct drm_dp_vcpi_allocation {
- struct drm_dp_mst_port *port;
- int vcpi;
- int pbn;
- bool dsc_enabled;
- struct list_head next;
-};
-
-struct drm_dp_mst_topology_state {
- struct drm_private_state base;
- struct list_head vcpis;
- struct drm_dp_mst_topology_mgr *mgr;
-};
-
-#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
-
-/**
- * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
- *
- * This struct represents the toplevel displayport MST topology manager.
- * There should be one instance of this for every MST capable DP connector
- * on the GPU.
- */
-struct drm_dp_mst_topology_mgr {
- /**
- * @base: Base private object for atomic
- */
- struct drm_private_obj base;
-
- /**
- * @dev: device pointer for adding i2c devices etc.
- */
- struct drm_device *dev;
- /**
- * @cbs: callbacks for connector addition and destruction.
- */
- const struct drm_dp_mst_topology_cbs *cbs;
- /**
- * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
- * in one go.
- */
- int max_dpcd_transaction_bytes;
- /**
- * @aux: AUX channel for the DP MST connector this topolgy mgr is
- * controlling.
- */
- struct drm_dp_aux *aux;
- /**
- * @max_payloads: maximum number of payloads the GPU can generate.
- */
- int max_payloads;
- /**
- * @conn_base_id: DRM connector ID this mgr is connected to. Only used
- * to build the MST connector path value.
- */
- int conn_base_id;
-
- /**
- * @down_rep_recv: Message receiver state for down replies.
- */
- struct drm_dp_sideband_msg_rx down_rep_recv;
- /**
- * @up_req_recv: Message receiver state for up requests.
- */
- struct drm_dp_sideband_msg_rx up_req_recv;
-
- /**
- * @lock: protects @mst_state, @mst_primary, @dpcd, and
- * @payload_id_table_cleared.
- */
- struct mutex lock;
-
- /**
- * @probe_lock: Prevents @work and @up_req_work, the only writers of
- * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
- * while they update the topology.
- */
- struct mutex probe_lock;
-
- /**
- * @mst_state: If this manager is enabled for an MST capable port. False
- * if no MST sink/branch devices is connected.
- */
- bool mst_state : 1;
-
- /**
- * @payload_id_table_cleared: Whether or not we've cleared the payload
- * ID table for @mst_primary. Protected by @lock.
- */
- bool payload_id_table_cleared : 1;
-
- /**
- * @mst_primary: Pointer to the primary/first branch device.
- */
- struct drm_dp_mst_branch *mst_primary;
-
- /**
- * @dpcd: Cache of DPCD for primary port.
- */
- u8 dpcd[DP_RECEIVER_CAP_SIZE];
- /**
- * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
- */
- u8 sink_count;
- /**
- * @pbn_div: PBN to slots divisor.
- */
- int pbn_div;
-
- /**
- * @funcs: Atomic helper callbacks
- */
- const struct drm_private_state_funcs *funcs;
-
- /**
- * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and
- * &drm_dp_sideband_msg_tx.state once they are queued
- */
- struct mutex qlock;
-
- /**
- * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
- */
- bool is_waiting_for_dwn_reply;
-
- /**
- * @tx_msg_downq: List of pending down replies.
- */
- struct list_head tx_msg_downq;
-
- /**
- * @payload_lock: Protect payload information.
- */
- struct mutex payload_lock;
- /**
- * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
- * VCPI structure itself is &drm_dp_mst_port.vcpi.
- */
- struct drm_dp_vcpi **proposed_vcpis;
- /**
- * @payloads: Array of payloads.
- */
- struct drm_dp_payload *payloads;
- /**
- * @payload_mask: Elements of @payloads actually in use. Since
- * reallocation of active outputs isn't possible gaps can be created by
- * disabling outputs out of order compared to how they've been enabled.
- */
- unsigned long payload_mask;
- /**
- * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
- */
- unsigned long vcpi_mask;
-
- /**
- * @tx_waitq: Wait to queue stall for the tx worker.
- */
- wait_queue_head_t tx_waitq;
- /**
- * @work: Probe work.
- */
- struct work_struct work;
- /**
- * @tx_work: Sideband transmit worker. This can nest within the main
- * @work worker for each transaction @work launches.
- */
- struct work_struct tx_work;
-
- /**
- * @destroy_port_list: List of to be destroyed connectors.
- */
- struct list_head destroy_port_list;
- /**
- * @destroy_branch_device_list: List of to be destroyed branch
- * devices.
- */
- struct list_head destroy_branch_device_list;
- /**
- * @delayed_destroy_lock: Protects @destroy_port_list and
- * @destroy_branch_device_list.
- */
- struct mutex delayed_destroy_lock;
- /**
- * @delayed_destroy_work: Work item to destroy MST port and branch
- * devices, needed to avoid locking inversion.
- */
- struct work_struct delayed_destroy_work;
-
- /**
- * @up_req_list: List of pending up requests from the topology that
- * need to be processed, in chronological order.
- */
- struct list_head up_req_list;
- /**
- * @up_req_lock: Protects @up_req_list
- */
- struct mutex up_req_lock;
- /**
- * @up_req_work: Work item to process up requests received from the
- * topology. Needed to avoid blocking hotplug handling and sideband
- * transmissions.
- */
- struct work_struct up_req_work;
-
-#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
- /**
- * @topology_ref_history_lock: protects
- * &drm_dp_mst_port.topology_ref_history and
- * &drm_dp_mst_branch.topology_ref_history.
- */
- struct mutex topology_ref_history_lock;
-#endif
-};
-
-int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_device *dev, struct drm_dp_aux *aux,
- int max_dpcd_transaction_bytes,
- int max_payloads, int conn_base_id);
-
-void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
-
-
-int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
-
-
-int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-
-
-int
-drm_dp_mst_detect_port(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port);
-
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port);
-struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-
-int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
-
-bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn, int slots);
-
-int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-
-void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port);
-
-
-int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
- int pbn);
-
-
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
-
-
-int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
-
-int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
-
-void drm_dp_mst_dump_topology(struct seq_file *m,
- struct drm_dp_mst_topology_mgr *mgr);
-
-void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
-int __must_check
-drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
- bool sync);
-
-ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
- unsigned int offset, void *buffer, size_t size);
-ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
- unsigned int offset, void *buffer, size_t size);
-
-int drm_dp_mst_connector_late_register(struct drm_connector *connector,
- struct drm_dp_mst_port *port);
-void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
- struct drm_dp_mst_port *port);
-
-struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr);
-int __must_check
-drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn,
- int pbn_div);
-int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
- struct drm_dp_mst_port *port,
- int pbn, int pbn_div,
- bool enable);
-int __must_check
-drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr);
-int __must_check
-drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port);
-int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, bool power_up);
-int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
-
-void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
-void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
-
-struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
-
-extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
-
-/**
- * __drm_dp_mst_state_iter_get - private atomic state iterator function for
- * macro-internal use
- * @state: &struct drm_atomic_state pointer
- * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
- * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
- * iteration cursor
- * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
- * iteration cursor
- * @i: int iteration cursor, for macro-internal use
- *
- * Used by for_each_oldnew_mst_mgr_in_state(),
- * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
- * call this directly.
- *
- * Returns:
- * True if the current &struct drm_private_obj is a &struct
- * drm_dp_mst_topology_mgr, false otherwise.
- */
-static inline bool
-__drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr **mgr,
- struct drm_dp_mst_topology_state **old_state,
- struct drm_dp_mst_topology_state **new_state,
- int i)
-{
- struct __drm_private_objs_state *objs_state = &state->private_objs[i];
-
- if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
- return false;
-
- *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
- if (old_state)
- *old_state = to_dp_mst_topology_state(objs_state->old_state);
- if (new_state)
- *new_state = to_dp_mst_topology_state(objs_state->new_state);
-
- return true;
-}
-
-/**
- * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
- * managers in an atomic update
- * @__state: &struct drm_atomic_state pointer
- * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
- * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
- * state
- * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
- * state
- * @__i: int iteration cursor, for macro-internal use
- *
- * This iterates over all DRM DP MST topology managers in an atomic update,
- * tracking both old and new state. This is useful in places where the state
- * delta needs to be considered, for example in atomic check functions.
- */
-#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
- for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
- for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
-
-/**
- * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
- * in an atomic update
- * @__state: &struct drm_atomic_state pointer
- * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
- * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
- * state
- * @__i: int iteration cursor, for macro-internal use
- *
- * This iterates over all DRM DP MST topology managers in an atomic update,
- * tracking only the old state. This is useful in disable functions, where we
- * need the old state the hardware is still in.
- */
-#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
- for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
- for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
-
-/**
- * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
- * in an atomic update
- * @__state: &struct drm_atomic_state pointer
- * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
- * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
- * state
- * @__i: int iteration cursor, for macro-internal use
- *
- * This iterates over all DRM DP MST topology managers in an atomic update,
- * tracking only the new state. This is useful in enable functions, where we
- * need the new state the hardware should be in when the atomic commit
- * operation has completed.
- */
-#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
- for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
- for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
-
-#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index cf13470810a5..f6159acb8856 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -36,10 +36,12 @@ struct drm_file;
struct drm_gem_object;
struct drm_master;
struct drm_minor;
+struct dma_buf;
struct dma_buf_attachment;
struct drm_display_mode;
struct drm_mode_create_dumb;
struct drm_printer;
+struct sg_table;
/**
* enum drm_driver_feature - feature flags
@@ -72,7 +74,7 @@ enum drm_driver_feature {
* @DRIVER_ATOMIC:
*
* Driver supports the full atomic modesetting userspace API. Drivers
- * which only use atomic internally, but do not the support the full
+ * which only use atomic internally, but do not support the full
* userspace API (e.g. not all properties converted to atomic, or
* multi-plane updates are not guaranteed to be tear-free) should not
* set this flag.
@@ -135,10 +137,6 @@ enum drm_driver_feature {
* @DRIVER_HAVE_IRQ:
*
* Legacy irq support. Only for legacy drivers. Do not use.
- *
- * New drivers can either use the drm_irq_install() and
- * drm_irq_uninstall() helper functions, or roll their own irq support
- * code by calling request_irq() directly.
*/
DRIVER_HAVE_IRQ = BIT(30),
/**
@@ -163,13 +161,12 @@ struct drm_driver {
/**
* @load:
*
- * Backward-compatible driver callback to complete
- * initialization steps after the driver is registered. For
- * this reason, may suffer from race conditions and its use is
- * deprecated for new drivers. It is therefore only supported
- * for existing drivers not yet converted to the new scheme.
- * See drm_dev_init() and drm_dev_register() for proper and
- * race-free way to set up a &struct drm_device.
+ * Backward-compatible driver callback to complete initialization steps
+ * after the driver is registered. For this reason, may suffer from
+ * race conditions and its use is deprecated for new drivers. It is
+ * therefore only supported for existing drivers not yet converted to
+ * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for
+ * proper and race-free way to set up a &struct drm_device.
*
* This is deprecated, do not use!
*
@@ -262,222 +259,21 @@ struct drm_driver {
* @release:
*
* Optional callback for destroying device data after the final
- * reference is released, i.e. the device is being destroyed. Drivers
- * using this callback are responsible for calling drm_dev_fini()
- * to finalize the device and then freeing the struct themselves.
- */
- void (*release) (struct drm_device *);
-
- /**
- * @get_vblank_counter:
- *
- * Driver callback for fetching a raw hardware vblank counter for the
- * CRTC specified with the pipe argument. If a device doesn't have a
- * hardware counter, the driver can simply leave the hook as NULL.
- * The DRM core will account for missed vblank events while interrupts
- * where disabled based on system timestamps.
- *
- * Wraparound handling and loss of events due to modesetting is dealt
- * with in the DRM core code, as long as drivers call
- * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
- * enabling a CRTC.
- *
- * This is deprecated and should not be used by new drivers.
- * Use &drm_crtc_funcs.get_vblank_counter instead.
- *
- * Returns:
- *
- * Raw vblank counter value.
- */
- u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * @enable_vblank:
- *
- * Enable vblank interrupts for the CRTC specified with the pipe
- * argument.
- *
- * This is deprecated and should not be used by new drivers.
- * Use &drm_crtc_funcs.enable_vblank instead.
- *
- * Returns:
+ * reference is released, i.e. the device is being destroyed.
*
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
+ * This is deprecated, clean up all memory allocations associated with a
+ * &drm_device using drmm_add_action(), drmm_kmalloc() and related
+ * managed resources functions.
*/
- int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * @disable_vblank:
- *
- * Disable vblank interrupts for the CRTC specified with the pipe
- * argument.
- *
- * This is deprecated and should not be used by new drivers.
- * Use &drm_crtc_funcs.disable_vblank instead.
- */
- void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * @get_scanout_position:
- *
- * Called by vblank timestamping code.
- *
- * Returns the current display scanout position from a crtc, and an
- * optional accurate ktime_get() timestamp of when position was
- * measured. Note that this is a helper callback which is only used if a
- * driver uses drm_calc_vbltimestamp_from_scanoutpos() for the
- * @get_vblank_timestamp callback.
- *
- * Parameters:
- *
- * dev:
- * DRM device.
- * pipe:
- * Id of the crtc to query.
- * in_vblank_irq:
- * True when called from drm_crtc_handle_vblank(). Some drivers
- * need to apply some workarounds for gpu-specific vblank irq quirks
- * if flag is set.
- * vpos:
- * Target location for current vertical scanout position.
- * hpos:
- * Target location for current horizontal scanout position.
- * stime:
- * Target location for timestamp taken immediately before
- * scanout position query. Can be NULL to skip timestamp.
- * etime:
- * Target location for timestamp taken immediately after
- * scanout position query. Can be NULL to skip timestamp.
- * mode:
- * Current display timings.
- *
- * Returns vpos as a positive number while in active scanout area.
- * Returns vpos as a negative number inside vblank, counting the number
- * of scanlines to go until end of vblank, e.g., -1 means "one scanline
- * until start of active scanout / end of vblank."
- *
- * Returns:
- *
- * True on success, false if a reliable scanout position counter could
- * not be read out.
- *
- * FIXME:
- *
- * Since this is a helper to implement @get_vblank_timestamp, we should
- * move it to &struct drm_crtc_helper_funcs, like all the other
- * helper-internal hooks.
- */
- bool (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
- /**
- * @get_vblank_timestamp:
- *
- * Called by drm_get_last_vbltimestamp(). Should return a precise
- * timestamp when the most recent VBLANK interval ended or will end.
- *
- * Specifically, the timestamp in @vblank_time should correspond as
- * closely as possible to the time when the first video scanline of
- * the video frame after the end of VBLANK will start scanning out,
- * the time immediately after end of the VBLANK interval. If the
- * @crtc is currently inside VBLANK, this will be a time in the future.
- * If the @crtc is currently scanning out a frame, this will be the
- * past start time of the current scanout. This is meant to adhere
- * to the OpenML OML_sync_control extension specification.
- *
- * Paramters:
- *
- * dev:
- * dev DRM device handle.
- * pipe:
- * crtc for which timestamp should be returned.
- * max_error:
- * Maximum allowable timestamp error in nanoseconds.
- * Implementation should strive to provide timestamp
- * with an error of at most max_error nanoseconds.
- * Returns true upper bound on error for timestamp.
- * vblank_time:
- * Target location for returned vblank timestamp.
- * in_vblank_irq:
- * True when called from drm_crtc_handle_vblank(). Some drivers
- * need to apply some workarounds for gpu-specific vblank irq quirks
- * if flag is set.
- *
- * Returns:
- *
- * True on success, false on failure, which means the core should
- * fallback to a simple timestamp taken in drm_crtc_handle_vblank().
- *
- * FIXME:
- *
- * We should move this hook to &struct drm_crtc_funcs like all the other
- * vblank hooks.
- */
- bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
- int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq);
-
- /**
- * @irq_handler:
- *
- * Interrupt handler called when using drm_irq_install(). Not used by
- * drivers which implement their own interrupt handling.
- */
- irqreturn_t(*irq_handler) (int irq, void *arg);
-
- /**
- * @irq_preinstall:
- *
- * Optional callback used by drm_irq_install() which is called before
- * the interrupt handler is registered. This should be used to clear out
- * any pending interrupts (from e.g. firmware based drives) and reset
- * the interrupt handling registers.
- */
- void (*irq_preinstall) (struct drm_device *dev);
-
- /**
- * @irq_postinstall:
- *
- * Optional callback used by drm_irq_install() which is called after
- * the interrupt handler is registered. This should be used to enable
- * interrupt generation in the hardware.
- */
- int (*irq_postinstall) (struct drm_device *dev);
-
- /**
- * @irq_uninstall:
- *
- * Optional callback used by drm_irq_uninstall() which is called before
- * the interrupt handler is unregistered. This should be used to disable
- * interrupt generation in the hardware.
- */
- void (*irq_uninstall) (struct drm_device *dev);
-
- /**
- * @master_create:
- *
- * Called whenever a new master is created. Only used by vmwgfx.
- */
- int (*master_create)(struct drm_device *dev, struct drm_master *master);
-
- /**
- * @master_destroy:
- *
- * Called whenever a master is destroyed. Only used by vmwgfx.
- */
- void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+ void (*release) (struct drm_device *);
/**
* @master_set:
*
* Called whenever the minor master is set. Only used by vmwgfx.
*/
- int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
- bool from_open);
+ void (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
/**
* @master_drop:
*
@@ -490,69 +286,18 @@ struct drm_driver {
*
* Allows drivers to create driver-specific debugfs files.
*/
- int (*debugfs_init)(struct drm_minor *minor);
-
- /**
- * @gem_free_object: deconstructor for drm_gem_objects
- *
- * This is deprecated and should not be used by new drivers. Use
- * &drm_gem_object_funcs.free instead.
- */
- void (*gem_free_object) (struct drm_gem_object *obj);
-
- /**
- * @gem_free_object_unlocked: deconstructor for drm_gem_objects
- *
- * This is deprecated and should not be used by new drivers. Use
- * &drm_gem_object_funcs.free instead.
- * Compared to @gem_free_object this is not encumbered with
- * &drm_device.struct_mutex legacy locking schemes.
- */
- void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
-
- /**
- * @gem_open_object:
- *
- * This callback is deprecated in favour of &drm_gem_object_funcs.open.
- *
- * Driver hook called upon gem handle creation
- */
- int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
- * @gem_close_object:
- *
- * This callback is deprecated in favour of &drm_gem_object_funcs.close.
- *
- * Driver hook called upon gem handle release
- */
- void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
- * @gem_print_info:
- *
- * This callback is deprecated in favour of
- * &drm_gem_object_funcs.print_info.
- *
- * If driver subclasses struct &drm_gem_object, it can implement this
- * optional hook for printing additional driver specific info.
- *
- * drm_printf_indent() should be used in the callback passing it the
- * indent argument.
- *
- * This callback is called from drm_gem_print_info().
- */
- void (*gem_print_info)(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj);
+ void (*debugfs_init)(struct drm_minor *minor);
/**
* @gem_create_object: constructor for gem objects
*
- * Hook for allocating the GEM object struct, for use by the CMA and
- * SHMEM GEM helpers.
+ * Hook for allocating the GEM object struct, for use by the CMA
+ * and SHMEM GEM helpers. Returns a GEM object on success, or an
+ * ERR_PTR()-encoded error code otherwise.
*/
struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
size_t size);
+
/**
* @prime_handle_to_fd:
*
@@ -575,14 +320,7 @@ struct drm_driver {
*/
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
- /**
- * @gem_prime_export:
- *
- * Export hook for GEM drivers. Deprecated in favour of
- * &drm_gem_object_funcs.export.
- */
- struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj,
- int flags);
+
/**
* @gem_prime_import:
*
@@ -592,29 +330,6 @@ struct drm_driver {
*/
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
-
- /**
- * @gem_prime_pin:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.pin.
- */
- int (*gem_prime_pin)(struct drm_gem_object *obj);
-
- /**
- * @gem_prime_unpin:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.unpin.
- */
- void (*gem_prime_unpin)(struct drm_gem_object *obj);
-
-
- /**
- * @gem_prime_get_sg_table:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table.
- */
- struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
-
/**
* @gem_prime_import_sg_table:
*
@@ -626,32 +341,19 @@ struct drm_driver {
struct dma_buf_attachment *attach,
struct sg_table *sgt);
/**
- * @gem_prime_vmap:
- *
- * Deprecated vmap hook for GEM drivers. Please use
- * &drm_gem_object_funcs.vmap instead.
- */
- void *(*gem_prime_vmap)(struct drm_gem_object *obj);
-
- /**
- * @gem_prime_vunmap:
- *
- * Deprecated vunmap hook for GEM drivers. Please use
- * &drm_gem_object_funcs.vunmap instead.
- */
- void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
-
- /**
* @gem_prime_mmap:
*
* mmap hook for GEM drivers, used to implement dma-buf mmap in the
* PRIME helpers.
*
- * FIXME: There's way too much duplication going on here, and also moved
- * to &drm_gem_object_funcs.
+ * This hook only exists for historical reasons. Drivers must use
+ * drm_gem_prime_mmap() to implement it.
+ *
+ * FIXME: Convert all drivers to implement mmap in struct
+ * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into
+ * its callers. This hook should be removed afterwards.
*/
- int (*gem_prime_mmap)(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
+ int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**
* @dumb_create:
@@ -715,14 +417,6 @@ struct drm_driver {
struct drm_device *dev,
uint32_t handle);
- /**
- * @gem_vm_ops: Driver private ops for this object
- *
- * For GEM drivers this is deprecated in favour of
- * &drm_gem_object_funcs.vm_ops.
- */
- const struct vm_operations_struct *gem_vm_ops;
-
/** @major: driver major number */
int major;
/** @minor: driver minor number */
@@ -765,28 +459,61 @@ struct drm_driver {
*/
const struct file_operations *fops;
+#ifdef CONFIG_DRM_LEGACY
/* Everything below here is for legacy driver, never use! */
/* private: */
- /* List of devices hanging off this driver with stealth attach. */
- struct list_head legacy_dev_list;
int (*firstopen) (struct drm_device *);
void (*preclose) (struct drm_device *, struct drm_file *file_priv);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
int (*dma_quiescent) (struct drm_device *);
int (*context_dtor) (struct drm_device *dev, int context);
+ irqreturn_t (*irq_handler)(int irq, void *arg);
+ void (*irq_preinstall)(struct drm_device *dev);
+ int (*irq_postinstall)(struct drm_device *dev);
+ void (*irq_uninstall)(struct drm_device *dev);
+ u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
+ int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
+ void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
int dev_priv_size;
+#endif
};
-int drm_dev_init(struct drm_device *dev,
- struct drm_driver *driver,
- struct device *parent);
-int devm_drm_dev_init(struct device *parent,
- struct drm_device *dev,
- struct drm_driver *driver);
-void drm_dev_fini(struct drm_device *dev);
+void *__devm_drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset);
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+/**
+ * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance
+ * @parent: Parent device object
+ * @driver: DRM driver
+ * @type: the type of the struct which contains struct &drm_device
+ * @member: the name of the &drm_device within @type.
+ *
+ * This allocates and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems. This should be done last in the device
+ * initialization sequence to make sure userspace can't access an inconsistent
+ * state.
+ *
+ * The initial ref-count of the object is 1. Use drm_dev_get() and
+ * drm_dev_put() to take and drop further ref-counts.
+ *
+ * It is recommended that drivers embed &struct drm_device into their own device
+ * structure.
+ *
+ * Note that this manages the lifetime of the resulting &drm_device
+ * automatically using devres. The DRM device initialized with this function is
+ * automatically put on driver detach using drm_dev_put().
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or ERR_PTR on failure.
+ */
+#define devm_drm_dev_alloc(parent, driver, type, member) \
+ ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \
+ offsetof(type, member)))
+
+struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
struct device *parent);
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
@@ -824,6 +551,25 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
}
/**
+ * drm_core_check_all_features - check driver feature flags mask
+ * @dev: DRM device to check
+ * @features: feature flag(s) mask
+ *
+ * This checks @dev for driver features, see &drm_driver.driver_features,
+ * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
+ *
+ * Returns true if all features in the @features mask are supported, false
+ * otherwise.
+ */
+static inline bool drm_core_check_all_features(const struct drm_device *dev,
+ u32 features)
+{
+ u32 supported = dev->driver->driver_features & dev->driver_features;
+
+ return features && (supported & features) == features;
+}
+
+/**
* drm_core_check_feature - check driver feature flags
* @dev: DRM device to check
* @feature: feature flag
@@ -833,9 +579,10 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
*
* Returns true if the @feature is supported, false otherwise.
*/
-static inline bool drm_core_check_feature(const struct drm_device *dev, u32 feature)
+static inline bool drm_core_check_feature(const struct drm_device *dev,
+ enum drm_driver_feature feature)
{
- return dev->driver->driver_features & dev->driver_features & feature;
+ return drm_core_check_all_features(dev, feature);
}
/**
@@ -855,5 +602,6 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
int drm_dev_set_unique(struct drm_device *dev, const char *name);
+extern bool drm_firmware_drivers_only(void);
#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index f0b03d401c27..1ed61e2b30a4 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -28,6 +28,7 @@
#include <drm/drm_mode.h>
struct drm_device;
+struct drm_edid;
struct i2c_adapter;
#define EDID_LENGTH 128
@@ -91,6 +92,16 @@ struct detailed_data_string {
u8 str[13];
} __attribute__((packed));
+#define DRM_EDID_RANGE_OFFSET_MIN_VFREQ (1 << 0) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_VFREQ (1 << 1) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MIN_HFREQ (1 << 2) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_HFREQ (1 << 3) /* 1.4 */
+
+#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00
+#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01
+#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02
+#define DRM_EDID_CVT_SUPPORT_FLAG 0x04
+
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
@@ -116,7 +127,7 @@ struct detailed_data_monitor_range {
u8 supported_scalings;
u8 preferred_refresh;
} __attribute__((packed)) cvt;
- } formula;
+ } __attribute__((packed)) formula;
} __attribute__((packed));
struct detailed_data_wpindex {
@@ -149,7 +160,7 @@ struct detailed_non_pixel {
struct detailed_data_wpindex color;
struct std_timing timings[6];
struct cvt_timing cvt[4];
- } data;
+ } __attribute__((packed)) data;
} __attribute__((packed));
#define EDID_DETAIL_EST_TIMINGS 0xf7
@@ -167,7 +178,7 @@ struct detailed_timing {
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
- } data;
+ } __attribute__((packed)) data;
} __attribute__((packed));
#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
@@ -224,6 +235,36 @@ struct detailed_timing {
DRM_EDID_YCBCR420_DC_36 | \
DRM_EDID_YCBCR420_DC_30)
+/* HDMI 2.1 additional fields */
+#define DRM_EDID_MAX_FRL_RATE_MASK 0xf0
+#define DRM_EDID_FAPA_START_LOCATION (1 << 0)
+#define DRM_EDID_ALLM (1 << 1)
+#define DRM_EDID_FVA (1 << 2)
+
+/* Deep Color specific */
+#define DRM_EDID_DC_30BIT_420 (1 << 0)
+#define DRM_EDID_DC_36BIT_420 (1 << 1)
+#define DRM_EDID_DC_48BIT_420 (1 << 2)
+
+/* VRR specific */
+#define DRM_EDID_CNMVRR (1 << 3)
+#define DRM_EDID_CINEMA_VRR (1 << 4)
+#define DRM_EDID_MDELTA (1 << 5)
+#define DRM_EDID_VRR_MAX_UPPER_MASK 0xc0
+#define DRM_EDID_VRR_MAX_LOWER_MASK 0xff
+#define DRM_EDID_VRR_MIN_MASK 0x3f
+
+/* DSC specific */
+#define DRM_EDID_DSC_10BPC (1 << 0)
+#define DRM_EDID_DSC_12BPC (1 << 1)
+#define DRM_EDID_DSC_16BPC (1 << 2)
+#define DRM_EDID_DSC_ALL_BPP (1 << 3)
+#define DRM_EDID_DSC_NATIVE_420 (1 << 6)
+#define DRM_EDID_DSC_1P2 (1 << 7)
+#define DRM_EDID_DSC_MAX_FRL_RATE_MASK 0xf0
+#define DRM_EDID_DSC_MAX_SLICES 0xf
+#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
+
/* ELD Header Block */
#define DRM_ELD_HEADER_BLOCK_SIZE 4
@@ -301,7 +342,7 @@ struct edid {
u8 features;
/* Color characteristics */
u8 red_green_lo;
- u8 black_white_lo;
+ u8 blue_white_lo;
u8 red_x;
u8 red_y;
u8 green_x;
@@ -337,8 +378,8 @@ struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
-int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
-int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
+int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads);
+int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
@@ -354,33 +395,23 @@ drm_load_edid_firmware(struct drm_connector *connector)
}
#endif
+bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2);
+
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
const struct drm_display_mode *mode);
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
const struct drm_display_mode *mode);
void
-drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
-void
-drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
-void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range);
-int
-drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
/**
* drm_eld_mnl - Get ELD monitor name length in bytes.
* @eld: pointer to an eld memory structure with mnl set
@@ -471,6 +502,65 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
}
+/**
+ * drm_edid_decode_mfg_id - Decode the manufacturer ID
+ * @mfg_id: The manufacturer ID
+ * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
+ * termination
+ */
+static inline const char *drm_edid_decode_mfg_id(u16 mfg_id, char vend[4])
+{
+ vend[0] = '@' + ((mfg_id >> 10) & 0x1f);
+ vend[1] = '@' + ((mfg_id >> 5) & 0x1f);
+ vend[2] = '@' + ((mfg_id >> 0) & 0x1f);
+ vend[3] = '\0';
+
+ return vend;
+}
+
+/**
+ * drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id()
+ * @vend_chr_0: First character of the vendor string.
+ * @vend_chr_1: Second character of the vendor string.
+ * @vend_chr_2: Third character of the vendor string.
+ * @product_id: The 16-bit product ID.
+ *
+ * This is a macro so that it can be calculated at compile time and used
+ * as an initializer.
+ *
+ * For instance:
+ * drm_edid_encode_panel_id('B', 'O', 'E', 0x2d08) => 0x09e52d08
+ *
+ * Return: a 32-bit ID per panel.
+ */
+#define drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, product_id) \
+ ((((u32)(vend_chr_0) - '@') & 0x1f) << 26 | \
+ (((u32)(vend_chr_1) - '@') & 0x1f) << 21 | \
+ (((u32)(vend_chr_2) - '@') & 0x1f) << 16 | \
+ ((product_id) & 0xffff))
+
+/**
+ * drm_edid_decode_panel_id - Decode a panel ID from drm_edid_encode_panel_id()
+ * @panel_id: The panel ID to decode.
+ * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
+ * termination
+ * @product_id: The product ID will be returned here.
+ *
+ * For instance, after:
+ * drm_edid_decode_panel_id(0x09e52d08, vend, &product_id)
+ * These will be true:
+ * vend[0] = 'B'
+ * vend[1] = 'O'
+ * vend[2] = 'E'
+ * vend[3] = '\0'
+ * product_id = 0x2d08
+ */
+static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *product_id)
+{
+ *product_id = (u16)(panel_id & 0xffff);
+ drm_edid_decode_mfg_id(panel_id >> 16, vend);
+}
+
bool drm_probe_ddc(struct i2c_adapter *adapter);
struct edid *drm_do_get_edid(struct drm_connector *connector,
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
@@ -478,6 +568,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
+u32 drm_edid_get_panel_id(struct i2c_adapter *adapter);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
@@ -485,8 +576,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
int drm_add_override_edid_modes(struct drm_connector *connector);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-bool drm_detect_hdmi_monitor(struct edid *edid);
-bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_detect_hdmi_monitor(const struct edid *edid);
+bool drm_detect_monitor_audio(const struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
@@ -494,13 +585,33 @@ int drm_add_modes_noedid(struct drm_connector *connector,
void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref);
-int drm_edid_header_is_valid(const u8 *raw_edid);
+int drm_edid_header_is_valid(const void *edid);
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
bool *edid_corrupt);
bool drm_edid_is_valid(struct edid *edid);
-void drm_edid_get_monitor_name(struct edid *edid, char *name,
+void drm_edid_get_monitor_name(const struct edid *edid, char *name,
int buflen);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb);
+struct drm_display_mode *
+drm_display_mode_from_cea_vic(struct drm_device *dev,
+ u8 video_code);
+
+/* Interface based on struct drm_edid */
+const struct drm_edid *drm_edid_alloc(const void *edid, size_t size);
+const struct drm_edid *drm_edid_dup(const struct drm_edid *drm_edid);
+void drm_edid_free(const struct drm_edid *drm_edid);
+const struct edid *drm_edid_raw(const struct drm_edid *drm_edid);
+const struct drm_edid *drm_edid_read(struct drm_connector *connector);
+const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector,
+ int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len),
+ void *context);
+int drm_edid_connector_update(struct drm_connector *connector,
+ const struct drm_edid *edid);
+const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid,
+ int ext_id, int *ext_index);
+
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 5623994b6e9e..3a09682af685 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -89,8 +89,7 @@ struct drm_encoder_funcs {
* @head: list management
* @base: base KMS object
* @name: human readable name, can be overwritten by the driver
- * @bridge: bridge associated to the encoder
- * @funcs: control functions
+ * @funcs: control functions, can be NULL for simple managed encoders
* @helper_private: mid-layer private data
*
* CRTCs drive pixels to encoders, which convert them into signals
@@ -142,7 +141,7 @@ struct drm_encoder {
* the bits for all &drm_crtc objects this encoder can be connected to
* before calling drm_dev_register().
*
- * In reality almost every driver gets this wrong.
+ * You will get a WARN if you get this wrong in the driver.
*
* Note that since CRTC objects can't be hotplugged the assigned indices
* are stable and hence known before registering all objects.
@@ -159,7 +158,11 @@ struct drm_encoder {
* encoders can be used in a cloned configuration, they both should have
* each another bits set.
*
- * In reality almost every driver gets this wrong.
+ * As an exception to the above rule if the driver doesn't implement
+ * any cloning it can leave @possible_clones set to 0. The core will
+ * automagically fix this up by setting the bit for the encoder itself.
+ *
+ * You will get a WARN if you get this wrong in the driver.
*
* Note that since encoder objects can't be hotplugged the assigned indices
* are stable and hence known before registering all objects.
@@ -174,7 +177,8 @@ struct drm_encoder {
struct drm_crtc *crtc;
/**
- * @bridge_chain: Bridges attached to this encoder.
+ * @bridge_chain: Bridges attached to this encoder. Drivers shall not
+ * access this field directly.
*/
struct list_head bridge_chain;
@@ -190,6 +194,60 @@ int drm_encoder_init(struct drm_device *dev,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
+__printf(5, 6)
+int drmm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...);
+
+__printf(6, 7)
+void *__drmm_encoder_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type,
+ const char *name, ...);
+
+/**
+ * drmm_encoder_alloc - Allocate and initialize an encoder
+ * @dev: drm device
+ * @type: the type of the struct which contains struct &drm_encoder
+ * @member: the name of the &drm_encoder within @type
+ * @funcs: callbacks for this encoder (optional)
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Allocates and initializes an encoder. Encoder should be subclassed as part of
+ * driver encoder objects. Cleanup is automatically handled through registering
+ * drm_encoder_cleanup() with drmm_add_action().
+ *
+ * The @drm_encoder_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new encoder, or ERR_PTR on failure.
+ */
+#define drmm_encoder_alloc(dev, type, member, funcs, encoder_type, name, ...) \
+ ((type *)__drmm_encoder_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs, \
+ encoder_type, name, ##__VA_ARGS__))
+
+/**
+ * drmm_plain_encoder_alloc - Allocate and initialize an encoder
+ * @dev: drm device
+ * @funcs: callbacks for this encoder (optional)
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * This is a simplified version of drmm_encoder_alloc(), which only allocates
+ * and returns a struct drm_encoder instance, with no subclassing.
+ *
+ * Returns:
+ * Pointer to the new drm_encoder struct, or ERR_PTR on failure.
+ */
+#define drmm_plain_encoder_alloc(dev, funcs, encoder_type, name, ...) \
+ ((struct drm_encoder *) \
+ __drmm_encoder_alloc(dev, sizeof(struct drm_encoder), \
+ 0, funcs, encoder_type, name, ##__VA_ARGS__))
+
/**
* drm_encoder_index - find the index of a registered encoder
* @encoder: encoder to find index for
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index a09864f6d684..7214101fd731 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -27,6 +27,8 @@
#ifndef __DRM_ENCODER_SLAVE_H__
#define __DRM_ENCODER_SLAVE_H__
+#include <linux/i2c.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
deleted file mode 100644
index 795aea1d0a25..000000000000
--- a/include/drm/drm_fb_cma_helper.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_FB_CMA_HELPER_H__
-#define __DRM_FB_CMA_HELPER_H__
-
-#include <linux/types.h>
-
-struct drm_framebuffer;
-struct drm_plane_state;
-
-struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane);
-
-dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
- struct drm_plane_state *state,
- unsigned int plane);
-
-#endif
-
diff --git a/include/drm/drm_fb_dma_helper.h b/include/drm/drm_fb_dma_helper.h
new file mode 100644
index 000000000000..d5e036c57801
--- /dev/null
+++ b/include/drm/drm_fb_dma_helper.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRM_FB_DMA_HELPER_H__
+#define __DRM_FB_DMA_HELPER_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_framebuffer;
+struct drm_plane_state;
+
+struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane);
+
+dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
+ struct drm_plane_state *state,
+ unsigned int plane);
+
+void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
+ struct drm_plane_state *old_state,
+ struct drm_plane_state *state);
+
+#endif
+
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 1c6633da0f91..fddd0d1af689 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -35,8 +35,8 @@ struct drm_fb_helper;
#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <linux/fb.h>
#include <linux/kgdb.h>
-#include <linux/vgaarb.h>
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
@@ -100,10 +100,10 @@ struct drm_fb_helper_funcs {
* @funcs: driver callbacks for fb helper
* @fbdev: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
- * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to
- * the screen buffer
- * @dirty_lock: spinlock protecting @dirty_clip
- * @dirty_work: worker used to flush the framebuffer
+ * @damage_clip: clip rectangle used with deferred_io to accumulate damage to
+ * the screen buffer
+ * @damage_lock: spinlock protecting @damage_clip
+ * @damage_work: worker used to flush the framebuffer
* @resume_work: worker used during resume if the console lock is already taken
*
* This is the main structure used by the fbdev helpers. Drivers supporting
@@ -131,9 +131,9 @@ struct drm_fb_helper {
const struct drm_fb_helper_funcs *funcs;
struct fb_info *fbdev;
u32 pseudo_palette[17];
- struct drm_clip_rect dirty_clip;
- spinlock_t dirty_lock;
- struct work_struct dirty_work;
+ struct drm_clip_rect damage_clip;
+ spinlock_t damage_lock;
+ struct work_struct damage_work;
struct work_struct resume_work;
/**
@@ -213,8 +213,7 @@ drm_fb_helper_from_client(struct drm_client_dev *client)
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
-int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper, int max_conn);
+int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper);
void drm_fb_helper_fini(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
@@ -231,8 +230,7 @@ void drm_fb_helper_fill_info(struct fb_info *info,
struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
-void drm_fb_helper_deferred_io(struct fb_info *info,
- struct list_head *pagelist);
+void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist);
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
@@ -270,7 +268,8 @@ int drm_fb_helper_debug_leave(struct fb_info *info);
void drm_fb_helper_lastclose(struct drm_device *dev);
void drm_fb_helper_output_poll_changed(struct drm_device *dev);
-int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
+void drm_fbdev_generic_setup(struct drm_device *dev,
+ unsigned int preferred_bpp);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -279,8 +278,7 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev,
}
static inline int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper,
- int max_conn)
+ struct drm_fb_helper *helper)
{
/* So drivers can use it to free the struct */
helper->dev = dev;
@@ -445,83 +443,11 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
{
}
-static inline int
+static inline void
drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
{
- return 0;
}
#endif
-/* TODO: There's a todo entry to remove these three */
-static inline int
-drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
-
-static inline int
-drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- return 0;
-}
-
-static inline int
-drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- return 0;
-}
-
-/**
- * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
- * @a: memory range, users of which are to be removed
- * @name: requesting driver name
- * @primary: also kick vga16fb if present
- *
- * This function removes framebuffer devices (initialized by firmware/bootloader)
- * which use memory range described by @a. If @a is NULL all such devices are
- * removed.
- */
-static inline int
-drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
-{
-#if IS_REACHABLE(CONFIG_FB)
- return remove_conflicting_framebuffers(a, name, primary);
-#else
- return 0;
-#endif
-}
-
-/**
- * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
- * @pdev: PCI device
- * @name: requesting driver name
- *
- * This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for any of @pdev's memory bars.
- *
- * The function assumes that PCI device with shadowed ROM drives a primary
- * display and so kicks out vga16fb.
- */
-static inline int
-drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const char *name)
-{
- int ret = 0;
-
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
-#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, name);
-#endif
- if (ret == 0)
- ret = vga_remove_vgacon(pdev);
- return ret;
-}
-
#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 8b099b347817..d780fd151789 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -202,6 +202,17 @@ struct drm_file {
bool writeback_connectors;
/**
+ * @was_master:
+ *
+ * This client has or had, master capability. Protected by struct
+ * &drm_device.master_mutex.
+ *
+ * This is used to ensure that CAP_SYS_ADMIN is not enforced, if the
+ * client is or was master in the past.
+ */
+ bool was_master;
+
+ /**
* @is_master:
*
* This client is the creator of @master. Protected by struct
@@ -215,15 +226,31 @@ struct drm_file {
/**
* @master:
*
- * Master this node is currently associated with. Only relevant if
- * drm_is_primary_client() returns true. Note that this only
- * matches &drm_device.master if the master is the currently active one.
+ * Master this node is currently associated with. Protected by struct
+ * &drm_device.master_mutex, and serialized by @master_lookup_lock.
+ *
+ * Only relevant if drm_is_primary_client() returns true. Note that
+ * this only matches &drm_device.master if the master is the currently
+ * active one.
+ *
+ * To update @master, both &drm_device.master_mutex and
+ * @master_lookup_lock need to be held, therefore holding either of
+ * them is safe and enough for the read side.
+ *
+ * When dereferencing this pointer, either hold struct
+ * &drm_device.master_mutex for the duration of the pointer's use, or
+ * use drm_file_get_master() if struct &drm_device.master_mutex is not
+ * currently held and there is no other need to hold it. This prevents
+ * @master from being freed during use.
*
* See also @authentication and @is_master and the :ref:`section on
* primary nodes and authentication <drm_primary_node>`.
*/
struct drm_master *master;
+ /** @master_lookup_lock: Serializes @master. */
+ spinlock_t master_lookup_lock;
+
/** @pid: Process that opened this file. */
struct pid *pid;
@@ -374,6 +401,7 @@ int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
+int drm_release_noglobal(struct inode *inode, struct file *filp);
__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
@@ -387,6 +415,9 @@ void drm_event_cancel_free(struct drm_device *dev,
struct drm_pending_event *p);
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
+void drm_send_event_timestamp_locked(struct drm_device *dev,
+ struct drm_pending_event *e,
+ ktime_t timestamp);
struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index ac220aa1a245..eb5c98cf82b8 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -6,26 +6,51 @@
#ifndef __LINUX_DRM_FORMAT_HELPER_H
#define __LINUX_DRM_FORMAT_HELPER_H
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_rect;
-void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swab);
-void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
- void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swab);
-void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
- void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
+struct iosys_map;
+
+unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format,
+ const struct drm_rect *clip);
+
+void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool cached);
+void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool swab);
+void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+
+int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *rect);
+
+void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+
+size_t drm_fb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ const u32 *extra_fourccs, size_t extra_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out);
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 156b122c0ad5..532ae78ca747 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,6 +25,11 @@
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+/**
+ * DRM_FORMAT_MAX_PLANES - maximum number of planes a DRM format can have
+ */
+#define DRM_FORMAT_MAX_PLANES 4u
+
/*
* DRM formats are little endian. Define host endian variants for the
* most common formats here, to reduce the #ifdefs needed in drivers.
@@ -78,7 +83,7 @@ struct drm_format_info {
* triplet @char_per_block, @block_w, @block_h for better
* describing the pixel format.
*/
- u8 cpp[4];
+ u8 cpp[DRM_FORMAT_MAX_PLANES];
/**
* @char_per_block:
@@ -104,7 +109,7 @@ struct drm_format_info {
* information from their drm_mode_config.get_format_info hook
* if they want the core to be validating the pitch.
*/
- u8 char_per_block[4];
+ u8 char_per_block[DRM_FORMAT_MAX_PLANES];
};
/**
@@ -113,7 +118,7 @@ struct drm_format_info {
* Block width in pixels, this is intended to be accessed through
* drm_format_info_block_width()
*/
- u8 block_w[4];
+ u8 block_w[DRM_FORMAT_MAX_PLANES];
/**
* @block_h:
@@ -121,7 +126,7 @@ struct drm_format_info {
* Block height in pixels, this is intended to be accessed through
* drm_format_info_block_height()
*/
- u8 block_h[4];
+ u8 block_h[DRM_FORMAT_MAX_PLANES];
/** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
@@ -133,14 +138,9 @@ struct drm_format_info {
/** @is_yuv: Is it a YUV format? */
bool is_yuv;
-};
-/**
- * struct drm_format_name_buf - name of a DRM format
- * @str: string buffer containing the format name
- */
-struct drm_format_name_buf {
- char str[32];
+ /** @is_color_indexed: Is it a color-indexed format? */
+ bool is_color_indexed;
};
/**
@@ -316,8 +316,8 @@ unsigned int drm_format_info_block_width(const struct drm_format_info *info,
int plane);
unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane);
+unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane);
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width);
-const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index c0e0256e3e98..0dcc07b68654 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -27,12 +27,12 @@
#include <linux/list.h>
#include <linux/sched.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_mode_object.h>
struct drm_clip_rect;
struct drm_device;
struct drm_file;
-struct drm_format_info;
struct drm_framebuffer;
struct drm_gem_object;
@@ -147,17 +147,17 @@ struct drm_framebuffer {
* @pitches: Line stride per buffer. For userspace created object this
* is copied from drm_mode_fb_cmd2.
*/
- unsigned int pitches[4];
+ unsigned int pitches[DRM_FORMAT_MAX_PLANES];
/**
* @offsets: Offset from buffer start to the actual pixel data in bytes,
* per buffer. For userspace created object this is copied from
* drm_mode_fb_cmd2.
*
* Note that this is a linear offset and does not take into account
- * tiling or buffer laytou per @modifier. It meant to be used when the
- * actual pixel data for this framebuffer plane starts at an offset,
- * e.g. when multiple planes are allocated within the same backing
- * storage buffer object. For tiled layouts this generally means it
+ * tiling or buffer layout per @modifier. It is meant to be used when
+ * the actual pixel data for this framebuffer plane starts at an offset,
+ * e.g. when multiple planes are allocated within the same backing
+ * storage buffer object. For tiled layouts this generally means its
* @offsets must at least be tile-size aligned, but hardware often has
* stricter requirements.
*
@@ -165,7 +165,7 @@ struct drm_framebuffer {
* data (even for linear buffers). Specifying an x/y pixel offset is
* instead done through the source rectangle in &struct drm_plane_state.
*/
- unsigned int offsets[4];
+ unsigned int offsets[DRM_FORMAT_MAX_PLANES];
/**
* @modifier: Data layout modifier. This is used to describe
* tiling, or also special layouts (like compression) of auxiliary
@@ -210,7 +210,7 @@ struct drm_framebuffer {
* This is used by the GEM framebuffer helpers, see e.g.
* drm_gem_fb_create().
*/
- struct drm_gem_object *obj[4];
+ struct drm_gem_object *obj[DRM_FORMAT_MAX_PLANES];
};
#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
@@ -297,4 +297,42 @@ int drm_framebuffer_plane_width(int width,
int drm_framebuffer_plane_height(int height,
const struct drm_framebuffer *fb, int plane);
+/**
+ * struct drm_afbc_framebuffer - a special afbc frame buffer object
+ *
+ * A derived class of struct drm_framebuffer, dedicated for afbc use cases.
+ */
+struct drm_afbc_framebuffer {
+ /**
+ * @base: base framebuffer structure.
+ */
+ struct drm_framebuffer base;
+ /**
+ * @block_width: width of a single afbc block
+ */
+ u32 block_width;
+ /**
+ * @block_height: height of a single afbc block
+ */
+ u32 block_height;
+ /**
+ * @aligned_width: aligned frame buffer width
+ */
+ u32 aligned_width;
+ /**
+ * @aligned_height: aligned frame buffer height
+ */
+ u32 aligned_height;
+ /**
+ * @offset: offset of the first afbc header
+ */
+ u32 offset;
+ /**
+ * @afbc_size: minimum size of afbc buffer
+ */
+ u32 afbc_size;
+};
+
+#define fb_to_afbc_fb(x) container_of(x, struct drm_afbc_framebuffer, base)
+
#endif
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 0b375069cd48..bd42f25e449c 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -39,6 +39,7 @@
#include <drm/drm_vma_manager.h>
+struct iosys_map;
struct drm_gem_object;
/**
@@ -138,17 +139,17 @@ struct drm_gem_object_funcs {
*
* This callback is optional.
*/
- void *(*vmap)(struct drm_gem_object *obj);
+ int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map);
/**
* @vunmap:
*
- * Releases the the address previously returned by @vmap. Used by the
+ * Releases the address previously returned by @vmap. Used by the
* drm_gem_dmabuf_vunmap() helper.
*
* This callback is optional.
*/
- void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
+ void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map);
/**
* @mmap:
@@ -157,7 +158,7 @@ struct drm_gem_object_funcs {
*
* This callback is optional.
*
- * The callback is used by by both drm_gem_mmap_obj() and
+ * The callback is used by both drm_gem_mmap_obj() and
* drm_gem_prime_mmap(). When @mmap is present @vm_ops is not
* used, the @mmap callback must set vma->vm_ops instead.
*/
@@ -174,6 +175,41 @@ struct drm_gem_object_funcs {
};
/**
+ * struct drm_gem_lru - A simple LRU helper
+ *
+ * A helper for tracking GEM objects in a given state, to aid in
+ * driver's shrinker implementation. Tracks the count of pages
+ * for lockless &shrinker.count_objects, and provides
+ * &drm_gem_lru_scan for driver's &shrinker.scan_objects
+ * implementation.
+ */
+struct drm_gem_lru {
+ /**
+ * @lock:
+ *
+ * Lock protecting movement of GEM objects between LRUs. All
+ * LRUs that the object can move between should be protected
+ * by the same lock.
+ */
+ struct mutex *lock;
+
+ /**
+ * @count:
+ *
+ * The total number of backing pages of the GEM objects in
+ * this LRU.
+ */
+ long count;
+
+ /**
+ * @list:
+ *
+ * The LRU list.
+ */
+ struct list_head list;
+};
+
+/**
* struct drm_gem_object - GEM buffer object
*
* This structure defines the generic parts for GEM buffer objects, which are
@@ -187,8 +223,8 @@ struct drm_gem_object {
*
* Reference count of this object
*
- * Please use drm_gem_object_get() to acquire and drm_gem_object_put()
- * or drm_gem_object_put_unlocked() to release a reference to a GEM
+ * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked()
+ * or drm_gem_object_put() to release a reference to a GEM
* buffer object.
*/
struct kref refcount;
@@ -216,7 +252,7 @@ struct drm_gem_object {
*
* SHMEM file node used as backing storage for swappable buffer objects.
* GEM also supports driver private objects with driver-specific backing
- * storage (contiguous CMA memory, special reserved blocks). In this
+ * storage (contiguous DMA memory, special reserved blocks). In this
* case @filp is NULL.
*/
struct file *filp;
@@ -272,8 +308,9 @@ struct drm_gem_object {
* attachment point for the device. This is invariant over the lifetime
* of a gem object.
*
- * The &drm_driver.gem_free_object callback is responsible for cleaning
- * up the dma_buf attachment and references acquired at import time.
+ * The &drm_gem_object_funcs.free callback is responsible for
+ * cleaning up the dma_buf attachment and references acquired at import
+ * time.
*
* Note that the drm gem/prime core does not depend upon drivers setting
* this field any more. So for drivers where this doesn't make sense
@@ -310,9 +347,40 @@ struct drm_gem_object {
*
*/
const struct drm_gem_object_funcs *funcs;
+
+ /**
+ * @lru_node:
+ *
+ * List node in a &drm_gem_lru.
+ */
+ struct list_head lru_node;
+
+ /**
+ * @lru:
+ *
+ * The current LRU list that the GEM object is on.
+ */
+ struct drm_gem_lru *lru;
};
/**
+ * DRM_GEM_FOPS - Default drm GEM file operations
+ *
+ * This macro provides a shorthand for setting the GEM file ops in the
+ * &file_operations structure. If all you need are the default ops, use
+ * DEFINE_DRM_GEM_FOPS instead.
+ */
+#define DRM_GEM_FOPS \
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_mmap
+
+/**
* DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
* @name: name for the generated structure
*
@@ -328,14 +396,7 @@ struct drm_gem_object {
#define DEFINE_DRM_GEM_FOPS(name) \
static const struct file_operations name = {\
.owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_mmap,\
+ DRM_GEM_FOPS,\
}
void drm_gem_object_release(struct drm_gem_object *obj);
@@ -362,30 +423,26 @@ static inline void drm_gem_object_get(struct drm_gem_object *obj)
kref_get(&obj->refcount);
}
+__attribute__((nonnull))
+static inline void
+__drm_gem_object_put(struct drm_gem_object *obj)
+{
+ kref_put(&obj->refcount, drm_gem_object_free);
+}
+
/**
- * __drm_gem_object_put - raw function to release a GEM buffer object reference
+ * drm_gem_object_put - drop a GEM buffer object reference
* @obj: GEM buffer object
*
- * This function is meant to be used by drivers which are not encumbered with
- * &drm_device.struct_mutex legacy locking and which are using the
- * gem_free_object_unlocked callback. It avoids all the locking checks and
- * locking overhead of drm_gem_object_put() and drm_gem_object_put_unlocked().
- *
- * Drivers should never call this directly in their code. Instead they should
- * wrap it up into a ``driver_gem_object_put(struct driver_gem_object *obj)``
- * wrapper function, and use that. Shared code should never call this, to
- * avoid breaking drivers by accident which still depend upon
- * &drm_device.struct_mutex locking.
+ * This releases a reference to @obj.
*/
static inline void
-__drm_gem_object_put(struct drm_gem_object *obj)
+drm_gem_object_put(struct drm_gem_object *obj)
{
- kref_put(&obj->refcount, drm_gem_object_free);
+ if (obj)
+ __drm_gem_object_put(obj);
}
-void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
-void drm_gem_object_put(struct drm_gem_object *obj);
-
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -409,15 +466,13 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
-int drm_gem_fence_array_add(struct xarray *fence_array,
- struct dma_fence *fence);
-int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
- struct drm_gem_object *obj,
- bool write);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
-int drm_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
+
+void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
+void drm_gem_lru_remove(struct drm_gem_object *obj);
+void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
+unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+ bool (*shrink)(struct drm_gem_object *obj));
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
new file mode 100644
index 000000000000..6e3319e9001a
--- /dev/null
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __DRM_GEM_ATOMIC_HELPER_H__
+#define __DRM_GEM_ATOMIC_HELPER_H__
+
+#include <linux/iosys-map.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+
+struct drm_simple_display_pipe;
+
+/*
+ * Plane Helpers
+ */
+
+int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state);
+int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+/*
+ * Helpers for planes with shadow buffers
+ */
+
+/**
+ * DRM_SHADOW_PLANE_MAX_WIDTH - Maximum width of a plane's shadow buffer in pixels
+ *
+ * For drivers with shadow planes, the maximum width of the framebuffer is
+ * usually independent from hardware limitations. Drivers can initialize struct
+ * drm_mode_config.max_width from DRM_SHADOW_PLANE_MAX_WIDTH.
+ */
+#define DRM_SHADOW_PLANE_MAX_WIDTH (4096u)
+
+/**
+ * DRM_SHADOW_PLANE_MAX_HEIGHT - Maximum height of a plane's shadow buffer in scanlines
+ *
+ * For drivers with shadow planes, the maximum height of the framebuffer is
+ * usually independent from hardware limitations. Drivers can initialize struct
+ * drm_mode_config.max_height from DRM_SHADOW_PLANE_MAX_HEIGHT.
+ */
+#define DRM_SHADOW_PLANE_MAX_HEIGHT (4096u)
+
+/**
+ * struct drm_shadow_plane_state - plane state for planes with shadow buffers
+ *
+ * For planes that use a shadow buffer, struct drm_shadow_plane_state
+ * provides the regular plane state plus mappings of the shadow buffer
+ * into kernel address space.
+ */
+struct drm_shadow_plane_state {
+ /** @base: plane state */
+ struct drm_plane_state base;
+
+ /* Transitional state - do not export or duplicate */
+
+ /**
+ * @map: Mappings of the plane's framebuffer BOs in to kernel address space
+ *
+ * The memory mappings stored in map should be established in the plane's
+ * prepare_fb callback and removed in the cleanup_fb callback.
+ */
+ struct iosys_map map[DRM_FORMAT_MAX_PLANES];
+
+ /**
+ * @data: Address of each framebuffer BO's data
+ *
+ * The address of the data stored in each mapping. This is different
+ * for framebuffers with non-zero offset fields.
+ */
+ struct iosys_map data[DRM_FORMAT_MAX_PLANES];
+};
+
+/**
+ * to_drm_shadow_plane_state - upcasts from struct drm_plane_state
+ * @state: the plane state
+ */
+static inline struct drm_shadow_plane_state *
+to_drm_shadow_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct drm_shadow_plane_state, base);
+}
+
+void __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
+ struct drm_shadow_plane_state *new_shadow_plane_state);
+void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state);
+void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
+ struct drm_shadow_plane_state *shadow_plane_state);
+
+void drm_gem_reset_shadow_plane(struct drm_plane *plane);
+struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane);
+void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_plane_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_funcs to use the rsp helper functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_FUNCS \
+ .reset = drm_gem_reset_shadow_plane, \
+ .atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \
+ .atomic_destroy_state = drm_gem_destroy_shadow_plane_state
+
+int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS -
+ * Initializes struct drm_plane_helper_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_helper_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \
+ .prepare_fb = drm_gem_prepare_shadow_fb, \
+ .cleanup_fb = drm_gem_cleanup_shadow_fb
+
+int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe);
+struct drm_plane_state *
+drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe);
+void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_simple_display_pipe_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_simple_display_pipe_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \
+ .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \
+ .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \
+ .reset_plane = drm_gem_simple_kms_reset_shadow_plane, \
+ .duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \
+ .destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state
+
+#endif /* __DRM_GEM_ATOMIC_HELPER_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
deleted file mode 100644
index 947ac95eb24a..000000000000
--- a/include/drm/drm_gem_cma_helper.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_GEM_CMA_HELPER_H__
-#define __DRM_GEM_CMA_HELPER_H__
-
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_gem.h>
-
-struct drm_mode_create_dumb;
-
-/**
- * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
- * @base: base GEM object
- * @paddr: physical address of the backing memory
- * @sgt: scatter/gather table for imported PRIME buffers. The table can have
- * more than one entry but they are guaranteed to have contiguous
- * DMA addresses.
- * @vaddr: kernel virtual address of the backing memory
- */
-struct drm_gem_cma_object {
- struct drm_gem_object base;
- dma_addr_t paddr;
- struct sg_table *sgt;
-
- /* For objects with DMA memory allocated by GEM CMA */
- void *vaddr;
-};
-
-#define to_drm_gem_cma_obj(gem_obj) \
- container_of(gem_obj, struct drm_gem_cma_object, base)
-
-#ifndef CONFIG_MMU
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- .get_unmapped_area = drm_gem_cma_get_unmapped_area,
-#else
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
-#endif
-
-/**
- * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
- * @name: name for the generated structure
- *
- * This macro autogenerates a suitable &struct file_operations for CMA based
- * drivers, which can be assigned to &drm_driver.fops. Note that this structure
- * cannot be shared between drivers, because it contains a reference to the
- * current module using THIS_MODULE.
- *
- * Note that the declaration is already marked as static - if you need a
- * non-static version of this you're probably doing it wrong and will break the
- * THIS_MODULE reference by accident.
- */
-#define DEFINE_DRM_GEM_CMA_FOPS(name) \
- static const struct file_operations name = {\
- .owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_cma_mmap,\
- DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- }
-
-/* free GEM object */
-void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-
-/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args);
-
-/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args);
-
-/* set vm_flags and we can change the VM attribute to other one at here */
-int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
-
-/* allocate physical memory */
-struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- size_t size);
-
-extern const struct vm_operations_struct drm_gem_cma_vm_ops;
-
-#ifndef CONFIG_MMU
-unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags);
-#endif
-
-void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj);
-
-struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj);
-void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-
-struct drm_gem_object *
-drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size);
-
-/**
- * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual
- * address on the buffer
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure for drivers that need the virtual address also on
- * imported buffers.
- */
-#define DRM_GEM_CMA_VMAP_DRIVER_OPS \
- .gem_create_object = drm_cma_gem_create_object_default_funcs, \
- .dumb_create = drm_gem_cma_dumb_create, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
- .gem_prime_mmap = drm_gem_prime_mmap
-
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-
-#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
new file mode 100644
index 000000000000..8a043235dad8
--- /dev/null
+++ b/include/drm/drm_gem_dma_helper.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRM_GEM_DMA_HELPER_H__
+#define __DRM_GEM_DMA_HELPER_H__
+
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_gem.h>
+
+struct drm_mode_create_dumb;
+
+/**
+ * struct drm_gem_dma_object - GEM object backed by DMA memory allocations
+ * @base: base GEM object
+ * @dma_addr: DMA address of the backing memory
+ * @sgt: scatter/gather table for imported PRIME buffers. The table can have
+ * more than one entry but they are guaranteed to have contiguous
+ * DMA addresses.
+ * @vaddr: kernel virtual address of the backing memory
+ * @map_noncoherent: if true, the GEM object is backed by non-coherent memory
+ */
+struct drm_gem_dma_object {
+ struct drm_gem_object base;
+ dma_addr_t dma_addr;
+ struct sg_table *sgt;
+
+ /* For objects with DMA memory allocated by GEM DMA */
+ void *vaddr;
+
+ bool map_noncoherent;
+};
+
+#define to_drm_gem_dma_obj(gem_obj) \
+ container_of(gem_obj, struct drm_gem_dma_object, base)
+
+struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
+ size_t size);
+void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj);
+void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
+ struct drm_printer *p, unsigned int indent);
+struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj);
+int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
+ struct iosys_map *map);
+int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct drm_gem_dma_vm_ops;
+
+/*
+ * GEM object functions
+ */
+
+/**
+ * drm_gem_dma_object_free - GEM object function for drm_gem_dma_free()
+ * @obj: GEM object to free
+ *
+ * This function wraps drm_gem_dma_free_object(). Drivers that employ the DMA helpers
+ * should use it as their &drm_gem_object_funcs.free handler.
+ */
+static inline void drm_gem_dma_object_free(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ drm_gem_dma_free(dma_obj);
+}
+
+/**
+ * drm_gem_dma_object_print_info() - Print &drm_gem_dma_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_dma_print_info(). Drivers that employ the DMA helpers
+ * should use this function as their &drm_gem_object_funcs.print_info handler.
+ */
+static inline void drm_gem_dma_object_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ drm_gem_dma_print_info(dma_obj, p, indent);
+}
+
+/**
+ * drm_gem_dma_object_get_sg_table - GEM object function for drm_gem_dma_get_sg_table()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_dma_get_sg_table(). Drivers that employ the DMA helpers should
+ * use it as their &drm_gem_object_funcs.get_sg_table handler.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+static inline struct sg_table *drm_gem_dma_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_get_sg_table(dma_obj);
+}
+
+/*
+ * drm_gem_dma_object_vmap - GEM object function for drm_gem_dma_vmap()
+ * @obj: GEM object
+ * @map: Returns the kernel virtual address of the DMA GEM object's backing store.
+ *
+ * This function wraps drm_gem_dma_vmap(). Drivers that employ the DMA helpers should
+ * use it as their &drm_gem_object_funcs.vmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_dma_object_vmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_vmap(dma_obj, map);
+}
+
+/**
+ * drm_gem_dma_object_mmap - GEM object function for drm_gem_dma_mmap()
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function wraps drm_gem_dma_mmap(). Drivers that employ the dma helpers should
+ * use it as their &drm_gem_object_funcs.mmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_dma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_mmap(dma_obj, vma);
+}
+
+/*
+ * Driver ops
+ */
+
+/* create memory region for DRM framebuffer */
+int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+/* create memory region for DRM framebuffer */
+int drm_gem_dma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE - DMA GEM driver operations
+ * @dumb_create_func: callback function for .dumb_create
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS for drivers that
+ * override the default implementation of &struct rm_driver.dumb_create. Use
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
+ .dumb_create = (dumb_create_func), \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \
+ .gem_prime_mmap = drm_gem_prime_mmap
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use DRM_GEM_DMA_DRIVER_OPS_VMAP instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS \
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - DMA GEM driver operations
+ * ensuring a virtual address
+ * on the buffer
+ * @dumb_create_func: callback function for .dumb_create
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ *
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS_VMAP for drivers that
+ * override the default implementation of &struct drm_driver.dumb_create. Use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
+ .dumb_create = dumb_create_func, \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \
+ .gem_prime_mmap = drm_gem_prime_mmap
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual
+ * address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use DRM_GEM_DMA_DRIVER_OPS
+ * instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP \
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
+
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+/*
+ * File ops
+ */
+
+#ifndef CONFIG_MMU
+unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
+ .get_unmapped_area = drm_gem_dma_get_unmapped_area,
+#else
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS
+#endif
+
+/**
+ * DEFINE_DRM_GEM_DMA_FOPS() - macro to generate file operations for DMA drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for DMA based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_DMA_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_mmap,\
+ DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
+ }
+
+#endif /* __DRM_GEM_DMA_HELPER_H__ */
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index d9f13fd25b0a..d302521f3dd4 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -1,6 +1,10 @@
#ifndef __DRM_GEM_FB_HELPER_H__
#define __DRM_GEM_FB_HELPER_H__
+#include <linux/dma-buf.h>
+#include <linux/iosys-map.h>
+
+struct drm_afbc_framebuffer;
struct drm_device;
struct drm_fb_helper_surface_size;
struct drm_file;
@@ -8,9 +12,8 @@ struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
-struct drm_plane;
-struct drm_plane_state;
-struct drm_simple_display_pipe;
+
+#define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52)
struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
unsigned int plane);
@@ -18,6 +21,11 @@ void drm_gem_fb_destroy(struct drm_framebuffer *fb);
int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
unsigned int *handle);
+int drm_gem_fb_init_with_funcs(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd,
@@ -29,8 +37,17 @@ struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd);
-int drm_gem_fb_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state);
-int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
+int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
+ struct iosys_map *data);
+void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map);
+int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir);
+void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir);
+
+#define drm_is_afbc(modifier) \
+ (((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0))
+
+int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_afbc_framebuffer *afbc_fb);
+
#endif
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 294b2931c4cc..a2201b2488c5 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -98,25 +98,28 @@ struct drm_gem_shmem_object {
unsigned int vmap_use_count;
/**
- * @map_cached: map object cached (instead of using writecombine).
+ * @map_wc: map object write-combined (instead of using shmem defaults).
*/
- bool map_cached;
+ bool map_wc;
};
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
-void drm_gem_shmem_free_object(struct drm_gem_object *obj);
+void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_pin(struct drm_gem_object *obj);
-void drm_gem_shmem_unpin(struct drm_gem_object *obj);
-void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
-void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
+int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
-int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
+int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
@@ -125,28 +128,160 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
!shmem->base.dma_buf && !shmem->base.import_attach;
}
-void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
-bool drm_gem_shmem_purge(struct drm_gem_object *obj);
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
+bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
-struct drm_gem_shmem_object *
-drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
- struct drm_device *dev, size_t size,
- uint32_t *handle);
-int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
+
+void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
+ struct drm_printer *p, unsigned int indent);
+
+extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+
+/*
+ * GEM object functions
+ */
+
+/**
+ * drm_gem_shmem_object_free - GEM object function for drm_gem_shmem_free()
+ * @obj: GEM object to free
+ *
+ * This function wraps drm_gem_shmem_free(). Drivers that employ the shmem helpers
+ * should use it as their &drm_gem_object_funcs.free handler.
+ */
+static inline void drm_gem_shmem_object_free(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/**
+ * drm_gem_shmem_object_print_info() - Print &drm_gem_shmem_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_print_info(). Drivers that employ the shmem helpers should
+ * use this function as their &drm_gem_object_funcs.print_info handler.
+ */
+static inline void drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_print_info(shmem, p, indent);
+}
+
+/**
+ * drm_gem_shmem_object_pin - GEM object function for drm_gem_shmem_pin()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_pin(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.pin handler.
+ */
+static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_pin(shmem);
+}
+
+/**
+ * drm_gem_shmem_object_unpin - GEM object function for drm_gem_shmem_unpin()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_unpin(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.unpin handler.
+ */
+static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_unpin(shmem);
+}
-int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+/**
+ * drm_gem_shmem_object_get_sg_table - GEM object function for drm_gem_shmem_get_sg_table()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_get_sg_table(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.get_sg_table handler.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
+ */
+static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj);
+ return drm_gem_shmem_get_sg_table(shmem);
+}
+
+/*
+ * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
+ * @obj: GEM object
+ * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
+ *
+ * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.vmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_vmap(shmem, map);
+}
+
+/*
+ * drm_gem_shmem_object_vunmap - GEM object function for drm_gem_shmem_vunmap()
+ * @obj: GEM object
+ * @map: Kernel virtual address where the SHMEM GEM object was mapped
+ *
+ * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.vunmap handler.
+ */
+static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_vunmap(shmem, map);
+}
+
+/**
+ * drm_gem_shmem_object_mmap - GEM object function for drm_gem_shmem_mmap()
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function wraps drm_gem_shmem_mmap(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.mmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_mmap(shmem, vma);
+}
+
+/*
+ * Driver ops
+ */
-struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
-
-struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
/**
* DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
index 118cef76f84f..4c003b4f173e 100644
--- a/include/drm/drm_gem_ttm_helper.h
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -3,19 +3,28 @@
#ifndef DRM_GEM_TTM_HELPER_H
#define DRM_GEM_TTM_HELPER_H
-#include <linux/kernel.h>
+#include <linux/container_of.h>
-#include <drm/drm_gem.h>
#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
+struct iosys_map;
+
#define drm_gem_ttm_of_gem(gem_obj) \
container_of(gem_obj, struct ttm_buffer_object, base)
void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *gem);
+int drm_gem_ttm_vmap(struct drm_gem_object *gem,
+ struct iosys_map *map);
+void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
+ struct iosys_map *map);
int drm_gem_ttm_mmap(struct drm_gem_object *gem,
struct vm_area_struct *vma);
+int drm_gem_ttm_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+
#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 573e9fd109bf..c083a1d71cf4 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -5,12 +5,14 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_modes.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <linux/kernel.h> /* for container_of() */
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
struct drm_mode_create_dumb;
struct drm_plane;
@@ -19,9 +21,9 @@ struct drm_simple_display_pipe;
struct filp;
struct vm_area_struct;
-#define DRM_GEM_VRAM_PL_FLAG_VRAM TTM_PL_FLAG_VRAM
-#define DRM_GEM_VRAM_PL_FLAG_SYSTEM TTM_PL_FLAG_SYSTEM
-#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN TTM_PL_FLAG_TOPDOWN
+#define DRM_GEM_VRAM_PL_FLAG_SYSTEM (1 << 0)
+#define DRM_GEM_VRAM_PL_FLAG_VRAM (1 << 1)
+#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN (1 << 2)
/*
* Buffer-object helpers
@@ -29,13 +31,11 @@ struct vm_area_struct;
/**
* struct drm_gem_vram_object - GEM object backed by VRAM
- * @gem: GEM object
* @bo: TTM buffer object
- * @kmap: Mapping information for @bo
+ * @map: Mapping information for @bo
* @placement: TTM placement information. Supported placements are \
%TTM_PL_VRAM and %TTM_PL_SYSTEM
* @placements: TTM placement information.
- * @pin_count: Pin counter
*
* The type struct drm_gem_vram_object represents a GEM object that is
* backed by VRAM. It can be used for simple framebuffer devices with
@@ -51,26 +51,24 @@ struct vm_area_struct;
*/
struct drm_gem_vram_object {
struct ttm_buffer_object bo;
- struct ttm_bo_kmap_obj kmap;
+ struct iosys_map map;
/**
- * @kmap_use_count:
+ * @vmap_use_count:
*
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int kmap_use_count;
+ unsigned int vmap_use_count;
/* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
struct ttm_placement placement;
struct ttm_place placements[2];
-
- int pin_count;
};
/**
- * Returns the container of type &struct drm_gem_vram_object
- * for field bo.
+ * drm_gem_vram_of_bo - Returns the container of type
+ * &struct drm_gem_vram_object for field bo.
* @bo: the VRAM buffer object
* Returns: The containing GEM VRAM object
*/
@@ -81,8 +79,8 @@ static inline struct drm_gem_vram_object *drm_gem_vram_of_bo(
}
/**
- * Returns the container of type &struct drm_gem_vram_object
- * for field gem.
+ * drm_gem_vram_of_gem - Returns the container of type
+ * &struct drm_gem_vram_object for field gem.
* @gem: the GEM object
* Returns: The containing GEM VRAM object
*/
@@ -96,15 +94,12 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
size_t size,
unsigned long pg_align);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
-u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
-void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
- bool *is_iomem);
-void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo);
-void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo);
-void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr);
+int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map);
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
+ struct iosys_map *map);
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
@@ -119,9 +114,6 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
int drm_gem_vram_driver_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
/*
* Helpers for struct drm_plane_helper_funcs
@@ -133,6 +125,18 @@ void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
+/**
+ * DRM_GEM_VRAM_PLANE_HELPER_FUNCS -
+ * Initializes struct drm_plane_helper_funcs for VRAM handling
+ *
+ * Drivers may use GEM BOs as VRAM helpers for the framebuffer memory. This
+ * macro initializes struct drm_plane_helper_funcs to use the respective helper
+ * functions.
+ */
+#define DRM_GEM_VRAM_PLANE_HELPER_FUNCS \
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb
+
/*
* Helpers for struct drm_simple_display_pipe_funcs
*/
@@ -155,7 +159,7 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb(
#define DRM_GEM_VRAM_DRIVER \
.debugfs_init = drm_vram_mm_debugfs_init, \
.dumb_create = drm_gem_vram_driver_dumb_create, \
- .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \
.gem_prime_mmap = drm_gem_prime_mmap
/*
@@ -178,31 +182,38 @@ struct drm_vram_mm {
uint64_t vram_base;
size_t vram_size;
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
};
/**
* drm_vram_mm_of_bdev() - \
- Returns the container of type &struct ttm_bo_device for field bdev.
+ Returns the container of type &struct ttm_device for field bdev.
* @bdev: the TTM BO device
*
* Returns:
* The containing instance of &struct drm_vram_mm
*/
static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
- struct ttm_bo_device *bdev)
+ struct ttm_device *bdev)
{
return container_of(bdev, struct drm_vram_mm, bdev);
}
-int drm_vram_mm_debugfs_init(struct drm_minor *minor);
+void drm_vram_mm_debugfs_init(struct drm_minor *minor);
/*
* Helpers for integration with struct drm_device
*/
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size);
-void drm_vram_helper_release_mm(struct drm_device *dev);
+int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base,
+ size_t vram_size);
+
+/*
+ * Mode-config helpers
+ */
+
+enum drm_mode_status
+drm_vram_helper_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode);
#endif
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
deleted file mode 100644
index bb95ff011baf..000000000000
--- a/include/drm/drm_hashtab.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#ifndef DRM_HASHTAB_H
-#define DRM_HASHTAB_H
-
-#include <linux/list.h>
-
-#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-
-struct drm_hash_item {
- struct hlist_node head;
- unsigned long key;
-};
-
-struct drm_open_hash {
- struct hlist_head *table;
- u8 order;
-};
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add);
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-void drm_ht_remove(struct drm_open_hash *ht);
-
-/*
- * RCU-safe interface
- *
- * The user of this API needs to make sure that two or more instances of the
- * hash table manipulation functions are never run simultaneously.
- * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
- * with any of the manipulation functions as long as it's called from within
- * an RCU read-locked section.
- */
-#define drm_ht_insert_item_rcu drm_ht_insert_item
-#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
-#define drm_ht_remove_key_rcu drm_ht_remove_key
-#define drm_ht_remove_item_rcu drm_ht_remove_item
-#define drm_ht_find_item_rcu drm_ht_find_item
-
-#endif
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index 10100a4bbe2a..6ed61c371f6c 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n)
#define DRM_MAJOR 226
/**
@@ -166,7 +167,6 @@ struct drm_ioctl_desc {
.name = #ioctl \
}
-int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32);
#ifdef CONFIG_COMPAT
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
deleted file mode 100644
index d77f6e65b1c6..000000000000
--- a/include/drm/drm_irq.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2016 Intel Corp.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DRM_IRQ_H_
-#define _DRM_IRQ_H_
-
-struct drm_device;
-
-int drm_irq_install(struct drm_device *dev, int irq);
-int drm_irq_uninstall(struct drm_device *dev);
-
-#endif
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 5745710453c8..0fc85418aad8 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -33,9 +33,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/agp_backend.h>
+
#include <drm/drm.h>
#include <drm/drm_auth.h>
-#include <drm/drm_hashtab.h>
struct drm_device;
struct drm_driver;
@@ -49,6 +50,20 @@ struct pci_driver;
* you're doing it terribly wrong.
*/
+/*
+ * Hash-table Support
+ */
+
+struct drm_hash_item {
+ struct hlist_node head;
+ unsigned long key;
+};
+
+struct drm_open_hash {
+ struct hlist_head *table;
+ u8 order;
+};
+
/**
* DMA buffer.
*/
@@ -136,7 +151,7 @@ struct drm_sg_mem {
* Kernel side of a mapping
*/
struct drm_local_map {
- resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
+ dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
enum drm_map_type type; /**< Type of memory to map */
enum drm_map_flags flags; /**< Flags */
@@ -190,34 +205,124 @@ do { \
void drm_legacy_idlelock_take(struct drm_lock_data *lock);
void drm_legacy_idlelock_release(struct drm_lock_data *lock);
+/* drm_irq.c */
+int drm_legacy_irq_uninstall(struct drm_device *dev);
+
/* drm_pci.c */
#ifdef CONFIG_PCI
-void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+int drm_legacy_pci_init(const struct drm_driver *driver,
+ struct pci_driver *pdriver);
+void drm_legacy_pci_exit(const struct drm_driver *driver,
+ struct pci_driver *pdriver);
#else
-static inline void __drm_legacy_pci_free(struct drm_device *dev,
- drm_dma_handle_t *dmah)
+static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
+ size_t size, size_t align)
+{
+ return NULL;
+}
+
+static inline void drm_pci_free(struct drm_device *dev,
+ struct drm_dma_handle *dmah)
{
}
-static inline int drm_legacy_pci_init(struct drm_driver *driver,
+static inline int drm_legacy_pci_init(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
return -EINVAL;
}
-static inline void drm_legacy_pci_exit(struct drm_driver *driver,
+static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
}
#endif
+/*
+ * AGP Support
+ */
+
+struct drm_agp_head {
+ struct agp_kern_info agp_info;
+ struct list_head memory;
+ unsigned long mode;
+ struct agp_bridge_data *bridge;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+};
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
+struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev);
+int drm_legacy_agp_acquire(struct drm_device *dev);
+int drm_legacy_agp_release(struct drm_device *dev);
+int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+#else
+static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
+{
+ return NULL;
+}
+
+static inline int drm_legacy_agp_acquire(struct drm_device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_release(struct drm_device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_enable(struct drm_device *dev,
+ struct drm_agp_mode mode)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_info(struct drm_device *dev,
+ struct drm_agp_info *info)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_alloc(struct drm_device *dev,
+ struct drm_agp_buffer *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_free(struct drm_device *dev,
+ struct drm_agp_buffer *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_unbind(struct drm_device *dev,
+ struct drm_agp_binding *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_bind(struct drm_device *dev,
+ struct drm_agp_binding *request)
+{
+ return -ENODEV;
+}
+#endif
+
/* drm_memory.c */
void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
new file mode 100644
index 000000000000..359883942612
--- /dev/null
+++ b/include/drm/drm_managed.h
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _DRM_MANAGED_H_
+#define _DRM_MANAGED_H_
+
+#include <linux/gfp.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+
+struct drm_device;
+struct mutex;
+
+typedef void (*drmres_release_t)(struct drm_device *dev, void *res);
+
+/**
+ * drmm_add_action - add a managed release action to a &drm_device
+ * @dev: DRM device
+ * @action: function which should be called when @dev is released
+ * @data: opaque pointer, passed to @action
+ *
+ * This function adds the @release action with optional parameter @data to the
+ * list of cleanup actions for @dev. The cleanup actions will be run in reverse
+ * order in the final drm_dev_put() call for @dev.
+ */
+#define drmm_add_action(dev, action, data) \
+ __drmm_add_action(dev, action, data, #action)
+
+int __must_check __drmm_add_action(struct drm_device *dev,
+ drmres_release_t action,
+ void *data, const char *name);
+
+/**
+ * drmm_add_action_or_reset - add a managed release action to a &drm_device
+ * @dev: DRM device
+ * @action: function which should be called when @dev is released
+ * @data: opaque pointer, passed to @action
+ *
+ * Similar to drmm_add_action(), with the only difference that upon failure
+ * @action is directly called for any cleanup work necessary on failures.
+ */
+#define drmm_add_action_or_reset(dev, action, data) \
+ __drmm_add_action_or_reset(dev, action, data, #action)
+
+int __must_check __drmm_add_action_or_reset(struct drm_device *dev,
+ drmres_release_t action,
+ void *data, const char *name);
+
+void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
+
+/**
+ * drmm_kzalloc - &drm_device managed kzalloc()
+ * @dev: DRM device
+ * @size: size of the memory allocation
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kzalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put(). Memory can also be freed
+ * before the final drm_dev_put() by calling drmm_kfree().
+ */
+static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp)
+{
+ return drmm_kmalloc(dev, size, gfp | __GFP_ZERO);
+}
+
+/**
+ * drmm_kmalloc_array - &drm_device managed kmalloc_array()
+ * @dev: DRM device
+ * @n: number of array elements to allocate
+ * @size: size of array member
+ * @flags: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kmalloc_array(). The allocated
+ * memory is automatically freed on the final drm_dev_put() and works exactly
+ * like a memory allocation obtained by drmm_kmalloc().
+ */
+static inline void *drmm_kmalloc_array(struct drm_device *dev,
+ size_t n, size_t size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ return drmm_kmalloc(dev, bytes, flags);
+}
+
+/**
+ * drmm_kcalloc - &drm_device managed kcalloc()
+ * @dev: DRM device
+ * @n: number of array elements to allocate
+ * @size: size of array member
+ * @flags: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kcalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put() and works exactly like a
+ * memory allocation obtained by drmm_kmalloc().
+ */
+static inline void *drmm_kcalloc(struct drm_device *dev,
+ size_t n, size_t size, gfp_t flags)
+{
+ return drmm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+}
+
+char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
+
+void drmm_kfree(struct drm_device *dev, void *data);
+
+int drmm_mutex_init(struct drm_device *dev, struct mutex *lock);
+
+#endif
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 67c66f5ee591..14eaecb1825c 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -95,11 +95,6 @@ struct mipi_dbi_dev {
struct drm_display_mode mode;
/**
- * @enabled: Pipeline is enabled
- */
- bool enabled;
-
- /**
* @tx_buf: Buffer used for transfer (copy clip rect area)
*/
u16 *tx_buf;
@@ -110,6 +105,18 @@ struct mipi_dbi_dev {
unsigned int rotation;
/**
+ * @left_offset: Horizontal offset of the display relative to the
+ * controller's driver array
+ */
+ unsigned int left_offset;
+
+ /**
+ * @top_offset: Vertical offset of the display relative to the
+ * controller's driver array
+ */
+ unsigned int top_offset;
+
+ /**
* @backlight: backlight device (optional)
*/
struct backlight_device *backlight;
@@ -123,6 +130,14 @@ struct mipi_dbi_dev {
* @dbi: MIPI DBI interface
*/
struct mipi_dbi dbi;
+
+ /**
+ * @driver_private: Driver private data.
+ * Necessary for drivers with private data since devm_drm_dev_alloc()
+ * can't allocate structures that embed a structure which then again
+ * embeds drm_device.
+ */
+ void *driver_private;
};
static inline struct mipi_dbi_dev *drm_to_mipi_dbi_dev(struct drm_device *drm)
@@ -140,7 +155,8 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation);
-void mipi_dbi_release(struct drm_device *drm);
+enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode);
void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state);
void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
@@ -158,14 +174,15 @@ int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val);
int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
-int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
+ size_t len);
int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap);
/**
* mipi_dbi_command - MIPI DCS command with optional parameter(s)
* @dbi: MIPI DBI structure
* @cmd: Command
- * @seq...: Optional parameter(s)
+ * @seq: Optional parameter(s)
*
* Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for
* get/read.
@@ -175,14 +192,19 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
*/
#define mipi_dbi_command(dbi, cmd, seq...) \
({ \
- u8 d[] = { seq }; \
- mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
+ const u8 d[] = { seq }; \
+ struct device *dev = &(dbi)->spi->dev; \
+ int ret; \
+ ret = mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret) \
+ dev_err_ratelimited(dev, "error %d when sending command %#02x\n", ret, cmd); \
+ ret; \
})
#ifdef CONFIG_DEBUG_FS
-int mipi_dbi_debugfs_init(struct drm_minor *minor);
+void mipi_dbi_debugfs_init(struct drm_minor *minor);
#else
-#define mipi_dbi_debugfs_init NULL
+static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {}
#endif
#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 360e6377e84b..20b21b577dea 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -80,6 +80,11 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
* Note that typically DSI packet transmission is atomic, so the .transfer()
* function will seldomly return anything other than the number of bytes
* contained in the transmit buffer on success.
+ *
+ * Also note that those callbacks can be called no matter the state the
+ * host is in. Drivers that need the underlying device to be powered to
+ * perform these operations will first need to make sure it's been
+ * properly enabled.
*/
struct mipi_dsi_host_ops {
int (*attach)(struct mipi_dsi_host *host,
@@ -119,19 +124,21 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
/* enable hsync-end packets in vsync-pulse and v-porch area */
#define MIPI_DSI_MODE_VIDEO_HSE BIT(4)
/* disable hfront-porch area */
-#define MIPI_DSI_MODE_VIDEO_HFP BIT(5)
+#define MIPI_DSI_MODE_VIDEO_NO_HFP BIT(5)
/* disable hback-porch area */
-#define MIPI_DSI_MODE_VIDEO_HBP BIT(6)
+#define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6)
/* disable hsync-active area */
-#define MIPI_DSI_MODE_VIDEO_HSA BIT(7)
+#define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7)
/* flush display FIFO on vsync pulse */
#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8)
/* disable EoT packets in HS mode */
-#define MIPI_DSI_MODE_EOT_PACKET BIT(9)
+#define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9)
/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
/* transmit data in low power */
#define MIPI_DSI_MODE_LPM BIT(11)
+/* transmit data ending at the same time for all lanes within one hsync */
+#define MIPI_DSI_HS_PKT_END_ALIGNED BIT(12)
enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB888,
@@ -172,6 +179,7 @@ struct mipi_dsi_device_info {
* @lp_rate: maximum lane frequency for low power mode in hertz, this should
* be set to the real limits of the hardware, zero is only accepted for
* legacy drivers
+ * @dsc: panel/bridge DSC pps payload to be sent
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
@@ -184,6 +192,7 @@ struct mipi_dsi_device {
unsigned long mode_flags;
unsigned long hs_rate;
unsigned long lp_rate;
+ struct drm_dsc_config *dsc;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
@@ -222,9 +231,13 @@ struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info);
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
+struct mipi_dsi_device *
+devm_mipi_dsi_device_register_full(struct device *dev, struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int devm_mipi_dsi_attach(struct device *dev, struct mipi_dsi_device *dsi);
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
@@ -285,6 +298,23 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
u16 *brightness);
/**
+ * mipi_dsi_dcs_write_seq - transmit a DCS command with payload
+ * @dsi: DSI peripheral device
+ * @cmd: Command
+ * @seq: buffer containing data to be transmitted
+ */
+#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { cmd, seq }; \
+ struct device *dev = &dsi->dev; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) { \
+ dev_err_ratelimited(dev, "sending command %#02x failed: %d\n", cmd, ret); \
+ return ret; \
+ } \
+ } while (0)
+
+/**
* struct mipi_dsi_driver - DSI driver
* @driver: device driver model driver
* @probe: callback for device binding
@@ -294,7 +324,7 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
struct mipi_dsi_driver {
struct device_driver driver;
int(*probe)(struct mipi_dsi_device *dsi);
- int(*remove)(struct mipi_dsi_device *dsi);
+ void (*remove)(struct mipi_dsi_device *dsi);
void (*shutdown)(struct mipi_dsi_device *dsi);
};
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index d7939c054259..ac33ba1b18bc 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -39,13 +39,15 @@
*/
#include <linux/bug.h>
#include <linux/rbtree.h>
-#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/mm_types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#ifdef CONFIG_DRM_DEBUG_MM
#include <linux/stackdepot.h>
#endif
+#include <linux/types.h>
+
#include <drm/drm_print.h>
#ifdef CONFIG_DRM_DEBUG_MM
@@ -168,6 +170,7 @@ struct drm_mm_node {
struct rb_node rb_hole_addr;
u64 __subtree_last;
u64 hole_size;
+ u64 subtree_max_hole;
unsigned long flags;
#define DRM_MM_NODE_ALLOCATED_BIT 0
#define DRM_MM_NODE_SCANNED_BIT 1
@@ -272,7 +275,7 @@ static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
*/
static inline bool drm_mm_initialized(const struct drm_mm *mm)
{
- return mm->hole_stack.next;
+ return READ_ONCE(mm->hole_stack.next);
}
/**
@@ -337,7 +340,7 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
/**
* drm_mm_nodes - list of nodes under the drm_mm range manager
- * @mm: the struct drm_mm range manger
+ * @mm: the struct drm_mm range manager
*
* As the drm_mm range manager hides its node_list deep with its
* structure, extracting it looks painful and repetitive. This is
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 3bcbe30339f0..6b5e01295348 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -58,6 +58,12 @@ struct drm_mode_config_funcs {
* actual modifier used if the request doesn't have it specified,
* ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
*
+ * IMPORTANT: These implied modifiers for legacy userspace must be
+ * stored in struct &drm_framebuffer, including all relevant metadata
+ * like &drm_framebuffer.pitches and &drm_framebuffer.offsets if the
+ * modifier enables additional planes beyond the fourcc pixel format
+ * code. This is required by the GETFB2 ioctl.
+ *
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
* a new &drm_framebuffer structure, subclassed to contain
@@ -97,14 +103,13 @@ struct drm_mode_config_funcs {
* Callback used by helpers to inform the driver of output configuration
* changes.
*
- * Drivers implementing fbdev emulation with the helpers can call
- * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
- * helper of output changes.
- *
- * FIXME:
+ * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event()
+ * to call this hook to inform the fbdev helper of output changes.
*
- * Except that there's no vtable for device-level helper callbacks
- * there's no reason this is a core function.
+ * This hook is deprecated, drivers should instead use
+ * drm_fbdev_generic_setup() which takes care of any necessary
+ * hotplug event forwarding already without further involvement by
+ * the driver.
*/
void (*output_poll_changed)(struct drm_device *dev);
@@ -354,6 +359,19 @@ struct drm_mode_config_funcs {
* Core mode resource tracking structure. All CRTC, encoders, and connectors
* enumerated by the driver are added here, as are global properties. Some
* global restrictions are also here, e.g. dimension restrictions.
+ *
+ * Framebuffer sizes refer to the virtual screen that can be displayed by
+ * the CRTC. This can be different from the physical resolution programmed.
+ * The minimum width and height, stored in @min_width and @min_height,
+ * describe the smallest size of the framebuffer. It correlates to the
+ * minimum programmable resolution.
+ * The maximum width, stored in @max_width, is typically limited by the
+ * maximum pitch between two adjacent scanlines. The maximum height, stored
+ * in @max_height, is usually only limited by the amount of addressable video
+ * memory. For hardware that has no real maximum, drivers should pick a
+ * reasonable default.
+ *
+ * See also @DRM_SHADOW_PLANE_MAX_WIDTH and @DRM_SHADOW_PLANE_MAX_HEIGHT.
*/
struct drm_mode_config {
/**
@@ -603,22 +621,22 @@ struct drm_mode_config {
struct drm_property *prop_src_h;
/**
* @prop_crtc_x: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
+ * position in the &drm_crtc is being shown on.
*/
struct drm_property *prop_crtc_x;
/**
* @prop_crtc_y: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
+ * position in the &drm_crtc is being shown on.
*/
struct drm_property *prop_crtc_y;
/**
* @prop_crtc_w: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
+ * position in the &drm_crtc is being shown on.
*/
struct drm_property *prop_crtc_w;
/**
* @prop_crtc_h: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
+ * position in the &drm_crtc is being shown on.
*/
struct drm_property *prop_crtc_h;
/**
@@ -681,6 +699,12 @@ struct drm_mode_config {
struct drm_property *dvi_i_select_subconnector_property;
/**
+ * @dp_subconnector_property: Optional DP property to differentiate
+ * between different DP downstream port types.
+ */
+ struct drm_property *dp_subconnector_property;
+
+ /**
* @tv_subconnector_property: Optional TV property to differentiate
* between different TV connector types.
*/
@@ -894,11 +918,14 @@ struct drm_mode_config {
bool async_page_flip;
/**
- * @allow_fb_modifiers:
+ * @fb_modifiers_not_supported:
*
- * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ * When this flag is set, the DRM device will not expose modifier
+ * support to userspace. This is only used by legacy drivers that infer
+ * the buffer layout through heuristics without using modifiers. New
+ * drivers shall not set fhis flag.
*/
- bool allow_fb_modifiers;
+ bool fb_modifiers_not_supported;
/**
* @normalize_zpos:
@@ -929,7 +956,23 @@ struct drm_mode_config {
const struct drm_mode_config_helper_funcs *helper_private;
};
-void drm_mode_config_init(struct drm_device *dev);
+int __must_check drmm_mode_config_init(struct drm_device *dev);
+
+/**
+ * drm_mode_config_init - DRM mode_configuration structure initialization
+ * @dev: DRM device
+ *
+ * This is the unmanaged version of drmm_mode_config_init() for drivers which
+ * still explicitly call drm_mode_config_cleanup().
+ *
+ * FIXME: This function is deprecated and drivers should be converted over to
+ * drmm_mode_config_init().
+ */
+static inline int drm_mode_config_init(struct drm_device *dev)
+{
+ return drmm_mode_config_init(dev);
+}
+
void drm_mode_config_reset(struct drm_device *dev);
void drm_mode_config_cleanup(struct drm_device *dev);
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index c34a3e8030e1..912f1e415685 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -98,6 +98,10 @@ struct drm_object_properties {
* Hence atomic drivers should not use drm_object_property_set_value()
* and drm_object_property_get_value() on mutable objects, i.e. those
* without the DRM_MODE_PROP_IMMUTABLE flag set.
+ *
+ * For atomic drivers the default value of properties is stored in this
+ * array, so drm_object_property_get_default_value can be used to
+ * retrieve it.
*/
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
@@ -126,6 +130,9 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *value);
+int drm_object_property_get_default_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *val);
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index e946e20c61d8..b0c680e6f670 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -48,7 +48,7 @@ struct videomode;
* @MODE_HSYNC: hsync out of range
* @MODE_VSYNC: vsync out of range
* @MODE_H_ILLEGAL: mode has illegal horizontal timings
- * @MODE_V_ILLEGAL: mode has illegal horizontal timings
+ * @MODE_V_ILLEGAL: mode has illegal vertical timings
* @MODE_BAD_WIDTH: requires an unsupported linepitch
* @MODE_NOMODE: no mode with a matching name
* @MODE_NO_INTERLACE: interlaced mode not supported
@@ -139,6 +139,35 @@ enum drm_mode_status {
.vscan = (vs), .flags = (f)
/**
+ * DRM_MODE_RES_MM - Calculates the display size from resolution and DPI
+ * @res: The resolution in pixel
+ * @dpi: The number of dots per inch
+ */
+#define DRM_MODE_RES_MM(res, dpi) \
+ (((res) * 254ul) / ((dpi) * 10ul))
+
+#define __DRM_MODE_INIT(pix, hd, vd, hd_mm, vd_mm) \
+ .type = DRM_MODE_TYPE_DRIVER, .clock = (pix), \
+ .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \
+ .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \
+ .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \
+ .height_mm = (vd_mm)
+
+/**
+ * DRM_MODE_INIT - Initialize display mode
+ * @hz: Vertical refresh rate in Hertz
+ * @hd: Horizontal resolution, width
+ * @vd: Vertical resolution, height
+ * @hd_mm: Display width in millimeters
+ * @vd_mm: Display height in millimeters
+ *
+ * This macro initializes a &drm_display_mode that contains information about
+ * refresh rate, resolution and physical size.
+ */
+#define DRM_MODE_INIT(hz, hd, vd, hd_mm, vd_mm) \
+ __DRM_MODE_INIT((hd) * (vd) * (hz) / 1000 /* kHz */, hd, vd, hd_mm, vd_mm)
+
+/**
* DRM_SIMPLE_MODE - Simple display mode
* @hd: Horizontal resolution, width
* @vd: Vertical resolution, height
@@ -149,11 +178,7 @@ enum drm_mode_status {
* resolution and physical size.
*/
#define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \
- .type = DRM_MODE_TYPE_DRIVER, .clock = 1 /* pass validation */, \
- .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \
- .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \
- .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \
- .height_mm = (vd_mm)
+ __DRM_MODE_INIT(1 /* pass validation */, hd, vd, hd_mm, vd_mm)
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
@@ -195,6 +220,9 @@ enum drm_mode_status {
* @crtc_vsync_end: hardware mode vertical sync end
* @crtc_vtotal: hardware mode vertical total size
*
+ * This is the kernel API display mode information structure. For the
+ * user-space version see struct drm_mode_modeinfo.
+ *
* The horizontal and vertical timings are defined per the following diagram.
*
* ::
@@ -223,71 +251,21 @@ enum drm_mode_status {
*/
struct drm_display_mode {
/**
- * @head:
- *
- * struct list_head for mode lists.
- */
- struct list_head head;
-
- /**
- * @name:
- *
- * Human-readable name of the mode, filled out with drm_mode_set_name().
- */
- char name[DRM_DISPLAY_MODE_LEN];
-
- /**
- * @status:
- *
- * Status of the mode, used to filter out modes not supported by the
- * hardware. See enum &drm_mode_status.
- */
- enum drm_mode_status status;
-
- /**
- * @type:
- *
- * A bitmask of flags, mostly about the source of a mode. Possible flags
- * are:
- *
- * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native
- * resolution of an LCD panel. There should only be one preferred
- * mode per connector at any given time.
- * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of
- * them really. Drivers must set this bit for all modes they create
- * and expose to userspace.
- * - DRM_MODE_TYPE_USERDEF: Mode defined via kernel command line
- *
- * Plus a big list of flags which shouldn't be used at all, but are
- * still around since these flags are also used in the userspace ABI.
- * We no longer accept modes with these types though:
- *
- * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused.
- * Use DRM_MODE_TYPE_DRIVER instead.
- * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use
- * DRM_MODE_TYPE_PREFERRED instead.
- * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers
- * which are stuck around for hysterical raisins only. No one has an
- * idea what they were meant for. Don't use.
- */
- unsigned int type;
-
- /**
* @clock:
*
* Pixel clock in kHz.
*/
int clock; /* in kHz */
- int hdisplay;
- int hsync_start;
- int hsync_end;
- int htotal;
- int hskew;
- int vdisplay;
- int vsync_start;
- int vsync_end;
- int vtotal;
- int vscan;
+ u16 hdisplay;
+ u16 hsync_start;
+ u16 hsync_end;
+ u16 htotal;
+ u16 hskew;
+ u16 vdisplay;
+ u16 vsync_start;
+ u16 vsync_end;
+ u16 vtotal;
+ u16 vscan;
/**
* @flags:
*
@@ -322,7 +300,37 @@ struct drm_display_mode {
* - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and
* right parts.
*/
- unsigned int flags;
+ u32 flags;
+
+ /**
+ * @crtc_clock:
+ *
+ * Actual pixel or dot clock in the hardware. This differs from the
+ * logical @clock when e.g. using interlacing, double-clocking, stereo
+ * modes or other fancy stuff that changes the timings and signals
+ * actually sent over the wire.
+ *
+ * This is again in kHz.
+ *
+ * Note that with digital outputs like HDMI or DP there's usually a
+ * massive confusion between the dot clock and the signal clock at the
+ * bit encoding level. Especially when a 8b/10b encoding is used and the
+ * difference is exactly a factor of 10.
+ */
+ int crtc_clock;
+ u16 crtc_hdisplay;
+ u16 crtc_hblank_start;
+ u16 crtc_hblank_end;
+ u16 crtc_hsync_start;
+ u16 crtc_hsync_end;
+ u16 crtc_htotal;
+ u16 crtc_hskew;
+ u16 crtc_vdisplay;
+ u16 crtc_vblank_start;
+ u16 crtc_vblank_end;
+ u16 crtc_vsync_start;
+ u16 crtc_vsync_end;
+ u16 crtc_vtotal;
/**
* @width_mm:
@@ -330,7 +338,7 @@ struct drm_display_mode {
* Addressable size of the output in mm, projectors should set this to
* 0.
*/
- int width_mm;
+ u16 width_mm;
/**
* @height_mm:
@@ -338,74 +346,69 @@ struct drm_display_mode {
* Addressable size of the output in mm, projectors should set this to
* 0.
*/
- int height_mm;
+ u16 height_mm;
/**
- * @crtc_clock:
+ * @type:
*
- * Actual pixel or dot clock in the hardware. This differs from the
- * logical @clock when e.g. using interlacing, double-clocking, stereo
- * modes or other fancy stuff that changes the timings and signals
- * actually sent over the wire.
+ * A bitmask of flags, mostly about the source of a mode. Possible flags
+ * are:
*
- * This is again in kHz.
+ * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native
+ * resolution of an LCD panel. There should only be one preferred
+ * mode per connector at any given time.
+ * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of
+ * them really. Drivers must set this bit for all modes they create
+ * and expose to userspace.
+ * - DRM_MODE_TYPE_USERDEF: Mode defined or selected via the kernel
+ * command line.
*
- * Note that with digital outputs like HDMI or DP there's usually a
- * massive confusion between the dot clock and the signal clock at the
- * bit encoding level. Especially when a 8b/10b encoding is used and the
- * difference is exactly a factor of 10.
+ * Plus a big list of flags which shouldn't be used at all, but are
+ * still around since these flags are also used in the userspace ABI.
+ * We no longer accept modes with these types though:
+ *
+ * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused.
+ * Use DRM_MODE_TYPE_DRIVER instead.
+ * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use
+ * DRM_MODE_TYPE_PREFERRED instead.
+ * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers
+ * which are stuck around for hysterical raisins only. No one has an
+ * idea what they were meant for. Don't use.
*/
- int crtc_clock;
- int crtc_hdisplay;
- int crtc_hblank_start;
- int crtc_hblank_end;
- int crtc_hsync_start;
- int crtc_hsync_end;
- int crtc_htotal;
- int crtc_hskew;
- int crtc_vdisplay;
- int crtc_vblank_start;
- int crtc_vblank_end;
- int crtc_vsync_start;
- int crtc_vsync_end;
- int crtc_vtotal;
+ u8 type;
/**
- * @private:
+ * @expose_to_userspace:
*
- * Pointer for driver private data. This can only be used for mode
- * objects passed to drivers in modeset operations. It shouldn't be used
- * by atomic drivers since they can store any additional data by
- * subclassing state structures.
+ * Indicates whether the mode is to be exposed to the userspace.
+ * This is to maintain a set of exposed modes while preparing
+ * user-mode's list in drm_mode_getconnector ioctl. The purpose of
+ * this only lies in the ioctl function, and is not to be used
+ * outside the function.
*/
- int *private;
+ bool expose_to_userspace;
/**
- * @private_flags:
+ * @head:
*
- * Similar to @private, but just an integer.
+ * struct list_head for mode lists.
*/
- int private_flags;
+ struct list_head head;
/**
- * @vrefresh:
- *
- * Vertical refresh rate, for debug output in human readable form. Not
- * used in a functional way.
+ * @name:
*
- * This value is in Hz.
+ * Human-readable name of the mode, filled out with drm_mode_set_name().
*/
- int vrefresh;
+ char name[DRM_DISPLAY_MODE_LEN];
/**
- * @hsync:
- *
- * Horizontal refresh rate, for debug output in human readable form. Not
- * used in a functional way.
+ * @status:
*
- * This value is in kHz.
+ * Status of the mode, used to filter out modes not supported by the
+ * hardware. See enum &drm_mode_status.
*/
- int hsync;
+ enum drm_mode_status status;
/**
* @picture_aspect_ratio:
@@ -414,18 +417,6 @@ struct drm_display_mode {
*/
enum hdmi_picture_aspect picture_aspect_ratio;
- /**
- * @export_head:
- *
- * struct list_head for modes to be exposed to the userspace.
- * This is to maintain a list of exposed modes while preparing
- * user-mode's list in drm_mode_getconnector ioctl. The purpose of this
- * list_head only lies in the ioctl function, and is not expected to be
- * used outside the function.
- * Once used, the stale pointers are not reset, but left as it is, to
- * avoid overhead of protecting it by mode_config.mutex.
- */
- struct list_head export_head;
};
/**
@@ -438,7 +429,7 @@ struct drm_display_mode {
* @m: display mode
*/
#define DRM_MODE_ARG(m) \
- (m)->name, (m)->vrefresh, (m)->clock, \
+ (m)->name, drm_mode_vrefresh(m), (m)->clock, \
(m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \
(m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \
(m)->type, (m)->flags
@@ -495,12 +486,29 @@ void drm_display_mode_from_videomode(const struct videomode *vm,
void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
struct videomode *vm);
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags);
+
+#if defined(CONFIG_OF)
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode, u32 *bus_flags,
int index);
+int of_get_drm_panel_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, u32 *bus_flags);
+#else
+static inline int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode,
+ u32 *bus_flags, int index)
+{
+ return -EINVAL;
+}
+
+static inline int of_get_drm_panel_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, u32 *bus_flags)
+{
+ return -EINVAL;
+}
+#endif
void drm_mode_set_name(struct drm_display_mode *mode);
-int drm_mode_hsync(const struct drm_display_mode *mode);
int drm_mode_vrefresh(const struct drm_display_mode *mode);
void drm_mode_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
@@ -509,6 +517,8 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
void drm_mode_copy(struct drm_display_mode *dst,
const struct drm_display_mode *src);
+void drm_mode_init(struct drm_display_mode *dst,
+ const struct drm_display_mode *src);
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
bool drm_mode_match(const struct drm_display_mode *mode1,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 5a87f1bd7a3f..fafa70ac1337 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -336,8 +336,7 @@ struct drm_crtc_helper_funcs {
*
* This function is called in the check phase of an atomic update. The
* driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * state object passed-in.
*
* Also beware that userspace can request its own custom modes, neither
* core nor helpers filter modes to the list of probe modes reported by
@@ -353,7 +352,7 @@ struct drm_crtc_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_begin:
@@ -374,7 +373,7 @@ struct drm_crtc_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_begin)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_flush:
*
@@ -398,7 +397,7 @@ struct drm_crtc_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_flush)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_enable:
@@ -417,14 +416,10 @@ struct drm_crtc_helper_funcs {
* @atomic_enable must be the inverse of @atomic_disable for atomic
* drivers.
*
- * Drivers can use the @old_crtc_state input parameter if the operations
- * needed to enable the CRTC don't depend solely on the new state but
- * also on the transition between the old state and the new state.
- *
* This function is optional.
*/
void (*atomic_enable)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
@@ -441,15 +436,57 @@ struct drm_crtc_helper_funcs {
* need to implement it if there's no need to disable anything at the
* CRTC level.
*
- * Comparing to @disable, this one provides the additional input
- * parameter @old_crtc_state which could be used to access the old
- * state. Atomic drivers should consider to use this one instead
- * of @disable.
- *
* This function is optional.
*/
void (*atomic_disable)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
+
+ /**
+ * @get_scanout_position:
+ *
+ * Called by vblank timestamping code.
+ *
+ * Returns the current display scanout position from a CRTC and an
+ * optional accurate ktime_get() timestamp of when the position was
+ * measured. Note that this is a helper callback which is only used
+ * if a driver uses drm_crtc_vblank_helper_get_vblank_timestamp()
+ * for the @drm_crtc_funcs.get_vblank_timestamp callback.
+ *
+ * Parameters:
+ *
+ * crtc:
+ * The CRTC.
+ * in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq
+ * quirks if the flag is set.
+ * vpos:
+ * Target location for current vertical scanout position.
+ * hpos:
+ * Target location for current horizontal scanout position.
+ * stime:
+ * Target location for timestamp taken immediately before
+ * scanout position query. Can be NULL to skip timestamp.
+ * etime:
+ * Target location for timestamp taken immediately after
+ * scanout position query. Can be NULL to skip timestamp.
+ * mode:
+ * Current display timings.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * Returns:
+ *
+ * True on success, false if a reliable scanout position counter could
+ * not be read out.
+ */
+ bool (*get_scanout_position)(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
};
/**
@@ -646,22 +683,6 @@ struct drm_encoder_helper_funcs {
struct drm_connector_state *conn_state);
/**
- * @get_crtc:
- *
- * This callback is used by the legacy CRTC helpers to work around
- * deficiencies in its own book-keeping.
- *
- * Do not use, use atomic helpers instead, which get the book keeping
- * right.
- *
- * FIXME:
- *
- * Currently only nouveau is using this, and as soon as nouveau is
- * atomic we can ditch this hook.
- */
- struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
-
- /**
* @detect:
*
* This callback can be used by drivers who want to do detection on the
@@ -845,13 +866,19 @@ struct drm_connector_helper_funcs {
* The usual way to implement this is to cache the EDID retrieved in the
* probe callback somewhere in the driver-private connector structure.
* In this function drivers then parse the modes in the EDID and add
- * them by calling drm_add_edid_modes(). But connectors that driver a
+ * them by calling drm_add_edid_modes(). But connectors that drive a
* fixed panel can also manually add specific modes using
* drm_mode_probed_add(). Drivers which manually add modes should also
* make sure that the &drm_connector.display_info,
* &drm_connector.width_mm and &drm_connector.height_mm fields are
* filled in.
*
+ * Note that the caller function will automatically add standard VESA
+ * DMT modes up to 1024x768 if the .get_modes() helper operation returns
+ * no mode and if the connector status is connector_status_connected or
+ * connector_status_unknown. There is no need to call
+ * drm_add_modes_noedid() manually in that case.
+ *
* Virtual drivers that just want some standard VESA mode with a given
* resolution can call drm_add_modes_noedid(), and mark the preferred
* one using drm_set_preferred_mode().
@@ -937,6 +964,48 @@ struct drm_connector_helper_funcs {
*/
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
+
+ /**
+ * @mode_valid_ctx:
+ *
+ * Callback to validate a mode for a connector, irrespective of the
+ * specific display configuration.
+ *
+ * This callback is used by the probe helpers to filter the mode list
+ * (which is usually derived from the EDID data block from the sink).
+ * See e.g. drm_helper_probe_single_connector_modes().
+ *
+ * This function is optional, and is the atomic version of
+ * &drm_connector_helper_funcs.mode_valid.
+ *
+ * To allow for accessing the atomic state of modesetting objects, the
+ * helper libraries always call this with ctx set to a valid context,
+ * and &drm_mode_config.connection_mutex will always be locked with
+ * the ctx parameter set to @ctx. This allows for taking additional
+ * locks as required.
+ *
+ * Even though additional locks may be acquired, this callback is
+ * still expected not to take any constraints into account which would
+ * be influenced by the currently set display state - such constraints
+ * should be handled in the driver's atomic check. For example, if a
+ * connector shares display bandwidth with other connectors then it
+ * would be ok to validate the minimum bandwidth requirement of a mode
+ * against the maximum possible bandwidth of the connector. But it
+ * wouldn't be ok to take the current bandwidth usage of other
+ * connectors into account, as this would change depending on the
+ * display state.
+ *
+ * Returns:
+ * 0 if &drm_connector_helper_funcs.mode_valid_ctx succeeded and wrote
+ * the &enum drm_mode_status value to @status, or a negative error
+ * code otherwise.
+ *
+ */
+ int (*mode_valid_ctx)(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status);
+
/**
* @best_encoder:
*
@@ -981,9 +1050,8 @@ struct drm_connector_helper_funcs {
* NOTE:
*
* This function is called in the check phase of an atomic update. The
- * driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * driver is not allowed to change anything outside of the
+ * &drm_atomic_state update tracking structure passed in.
*
* RETURNS:
*
@@ -993,7 +1061,7 @@ struct drm_connector_helper_funcs {
* for this.
*/
struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
- struct drm_connector_state *connector_state);
+ struct drm_atomic_state *state);
/**
* @atomic_check:
@@ -1034,18 +1102,45 @@ struct drm_connector_helper_funcs {
*
* This hook is to be used by drivers implementing writeback connectors
* that need a point when to commit the writeback job to the hardware.
- * The writeback_job to commit is available in
- * &drm_connector_state.writeback_job.
+ * The writeback_job to commit is available in the new connector state,
+ * in &drm_connector_state.writeback_job.
*
* This hook is optional.
*
* This callback is used by the atomic modeset helpers.
*/
void (*atomic_commit)(struct drm_connector *connector,
- struct drm_connector_state *state);
+ struct drm_atomic_state *state);
+ /**
+ * @prepare_writeback_job:
+ *
+ * As writeback jobs contain a framebuffer, drivers may need to
+ * prepare and clean them up the same way they can prepare and
+ * clean up framebuffers for planes. This optional connector operation
+ * is used to support the preparation of writeback jobs. The job
+ * prepare operation is called from drm_atomic_helper_prepare_planes()
+ * for struct &drm_writeback_connector connectors only.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the atomic modeset helpers.
+ */
int (*prepare_writeback_job)(struct drm_writeback_connector *connector,
struct drm_writeback_job *job);
+ /**
+ * @cleanup_writeback_job:
+ *
+ * This optional connector operation is used to support the
+ * cleanup of writeback jobs. The job cleanup operation is called
+ * from the existing drm_writeback_cleanup_job() function, invoked
+ * both when destroying the job as part of an aborted commit, or when
+ * the job completes.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the atomic modeset helpers.
+ */
void (*cleanup_writeback_job)(struct drm_writeback_connector *connector,
struct drm_writeback_job *job);
};
@@ -1083,8 +1178,11 @@ struct drm_plane_helper_funcs {
* equivalent functionality should be implemented through private
* members in the plane structure.
*
- * Drivers which always have their buffers pinned should use
- * drm_gem_fb_prepare_fb() for this hook.
+ * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
+ * set drm_gem_plane_helper_prepare_fb() is called automatically to
+ * implement this. Other drivers which need additional plane processing
+ * can call drm_gem_plane_helper_prepare_fb() from their @prepare_fb
+ * hook.
*
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
@@ -1138,9 +1236,8 @@ struct drm_plane_helper_funcs {
* NOTE:
*
* This function is called in the check phase of an atomic update. The
- * driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * driver is not allowed to change anything outside of the
+ * &drm_atomic_state update tracking structure.
*
* RETURNS:
*
@@ -1150,7 +1247,7 @@ struct drm_plane_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_update:
@@ -1168,7 +1265,7 @@ struct drm_plane_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_update)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
*
@@ -1192,14 +1289,14 @@ struct drm_plane_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_disable)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_check:
*
- * Drivers should set this function pointer to check if the plane state
- * can be updated in a async fashion. Here async means "not vblank
- * synchronized".
+ * Drivers should set this function pointer to check if the plane's
+ * atomic state can be updated in a async fashion. Here async means
+ * "not vblank synchronized".
*
* This hook is called by drm_atomic_async_check() to establish if a
* given update can be committed asynchronously, that is, if it can
@@ -1211,7 +1308,7 @@ struct drm_plane_helper_funcs {
* can not be applied in asynchronous manner.
*/
int (*atomic_async_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_update:
@@ -1227,11 +1324,9 @@ struct drm_plane_helper_funcs {
* update won't happen if there is an outstanding commit modifying
* the same plane.
*
- * Note that unlike &drm_plane_helper_funcs.atomic_update this hook
- * takes the new &drm_plane_state as parameter. When doing async_update
- * drivers shouldn't replace the &drm_plane_state but update the
- * current one with the new plane configurations in the new
- * plane_state.
+ * When doing async_update drivers shouldn't replace the
+ * &drm_plane_state but update the current one with the new plane
+ * configurations in the new plane_state.
*
* Drivers should also swap the framebuffers between current plane
* state (&drm_plane.state) and new_state.
@@ -1250,7 +1345,7 @@ struct drm_plane_helper_funcs {
* for deferring if needed, until a common solution is created.
*/
void (*atomic_async_update)(struct drm_plane *plane,
- struct drm_plane_state *new_state);
+ struct drm_atomic_state *state);
};
/**
@@ -1289,7 +1384,7 @@ struct drm_mode_config_helper_funcs {
* starting to commit the update to the hardware.
*
* After the atomic update is committed to the hardware this hook needs
- * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
+ * to call drm_atomic_helper_commit_hw_done(). Then wait for the update
* to be executed by the hardware, for example using
* drm_atomic_helper_wait_for_vblanks() or
* drm_atomic_helper_wait_for_flip_done(), and then clean up the old
@@ -1306,6 +1401,27 @@ struct drm_mode_config_helper_funcs {
* drm_atomic_helper_commit_tail().
*/
void (*atomic_commit_tail)(struct drm_atomic_state *state);
+
+ /**
+ * @atomic_commit_setup:
+ *
+ * This hook is used by the default atomic_commit() hook implemented in
+ * drm_atomic_helper_commit() together with the nonblocking helpers (see
+ * drm_atomic_helper_setup_commit()) to extend the DRM commit setup. It
+ * is not used by the atomic helpers.
+ *
+ * This function is called at the end of
+ * drm_atomic_helper_setup_commit(), so once the commit has been
+ * properly setup across the generic DRM object states. It allows
+ * drivers to do some additional commit tracking that isn't related to a
+ * CRTC, plane or connector, tracked in a &drm_private_obj structure.
+ *
+ * Note that the documentation of &drm_private_obj has more details on
+ * how one should implement this.
+ *
+ * This hook is optional.
+ */
+ int (*atomic_commit_setup)(struct drm_atomic_state *state);
};
#endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 4fc9a43ac45a..ec4f543c3d95 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -24,6 +24,8 @@
#ifndef DRM_MODESET_LOCK_H_
#define DRM_MODESET_LOCK_H_
+#include <linux/types.h> /* stackdepot.h is not self-contained */
+#include <linux/stackdepot.h>
#include <linux/ww_mutex.h>
struct drm_modeset_lock;
@@ -32,6 +34,7 @@ struct drm_modeset_lock;
* struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
* @ww_ctx: base acquire ctx
* @contended: used internally for -EDEADLK handling
+ * @stack_depot: used internally for contention debugging
* @locked: list of held locks
* @trylock_only: trylock mode used in atomic contexts/panic notifiers
* @interruptible: whether interruptible locking should be used.
@@ -52,6 +55,12 @@ struct drm_modeset_acquire_ctx {
struct drm_modeset_lock *contended;
/*
+ * Stack depot for debugging when a contended lock was not backed off
+ * from.
+ */
+ depot_stack_handle_t stack_depot;
+
+ /*
* list of held locks (drm_modeset_lock)
*/
struct list_head locked;
@@ -164,6 +173,8 @@ int drm_modeset_lock_all_ctx(struct drm_device *dev,
* is 0, so no error checking is necessary
*/
#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \
+ if (!drm_drv_uses_atomic_modeset(dev)) \
+ mutex_lock(&dev->mode_config.mutex); \
drm_modeset_acquire_init(&ctx, flags); \
modeset_lock_retry: \
ret = drm_modeset_lock_all_ctx(dev, &ctx); \
@@ -172,6 +183,7 @@ modeset_lock_retry: \
/**
* DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
+ * @dev: drm device
* @ctx: local modeset acquire context, will be dereferenced
* @ret: local ret/err/etc variable to track error status
*
@@ -188,7 +200,7 @@ modeset_lock_retry: \
* to that failure. In both of these cases the code between BEGIN/END will not
* be run, so the failure will reflect the inability to grab the locks.
*/
-#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \
+#define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \
modeset_lock_fail: \
if (ret == -EDEADLK) { \
ret = drm_modeset_backoff(&ctx); \
@@ -196,6 +208,8 @@ modeset_lock_fail: \
goto modeset_lock_retry; \
} \
drm_modeset_drop_locks(&ctx); \
- drm_modeset_acquire_fini(&ctx);
+ drm_modeset_acquire_fini(&ctx); \
+ if (!drm_drv_uses_atomic_modeset(dev)) \
+ mutex_unlock(&dev->mode_config.mutex);
#endif /* DRM_MODESET_LOCK_H_ */
diff --git a/include/drm/drm_module.h b/include/drm/drm_module.h
new file mode 100644
index 000000000000..4db1ae03d9a5
--- /dev/null
+++ b/include/drm/drm_module.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_MODULE_H
+#define DRM_MODULE_H
+
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_drv.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers registering DRM drivers during module
+ * initialization and shutdown. The provided helpers act like bus-specific
+ * module helpers, such as module_pci_driver(), but respect additional
+ * parameters that control DRM driver registration.
+ *
+ * Below is an example of initializing a DRM driver for a device on the
+ * PCI bus.
+ *
+ * .. code-block:: c
+ *
+ * struct pci_driver my_pci_drv = {
+ * };
+ *
+ * drm_module_pci_driver(my_pci_drv);
+ *
+ * The generated code will test if DRM drivers are enabled and register
+ * the PCI driver my_pci_drv. For more complex module initialization, you
+ * can still use module_init() and module_exit() in your driver.
+ */
+
+/*
+ * PCI drivers
+ */
+
+static inline int __init drm_pci_register_driver(struct pci_driver *pci_drv)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return pci_register_driver(pci_drv);
+}
+
+/**
+ * drm_module_pci_driver - Register a DRM driver for PCI-based devices
+ * @__pci_drv: the PCI driver structure
+ *
+ * Registers a DRM driver for devices on the PCI bus. The helper
+ * macro behaves like module_pci_driver() but tests the state of
+ * drm_firmware_drivers_only(). For more complex module initialization,
+ * use module_init() and module_exit() directly.
+ *
+ * Each module may only use this macro once. Calling it replaces
+ * module_init() and module_exit().
+ */
+#define drm_module_pci_driver(__pci_drv) \
+ module_driver(__pci_drv, drm_pci_register_driver, pci_unregister_driver)
+
+static inline int __init
+drm_pci_register_driver_if_modeset(struct pci_driver *pci_drv, int modeset)
+{
+ if (drm_firmware_drivers_only() && modeset == -1)
+ return -ENODEV;
+ if (modeset == 0)
+ return -ENODEV;
+
+ return pci_register_driver(pci_drv);
+}
+
+static inline void __exit
+drm_pci_unregister_driver_if_modeset(struct pci_driver *pci_drv, int modeset)
+{
+ pci_unregister_driver(pci_drv);
+}
+
+/**
+ * drm_module_pci_driver_if_modeset - Register a DRM driver for PCI-based devices
+ * @__pci_drv: the PCI driver structure
+ * @__modeset: an additional parameter that disables the driver
+ *
+ * This macro is deprecated and only provided for existing drivers. For
+ * new drivers, use drm_module_pci_driver().
+ *
+ * Registers a DRM driver for devices on the PCI bus. The helper macro
+ * behaves like drm_module_pci_driver() with an additional driver-specific
+ * flag. If __modeset is 0, the driver has been disabled, if __modeset is
+ * -1 the driver state depends on the global DRM state. For all other
+ * values, the PCI driver has been enabled. The default should be -1.
+ */
+#define drm_module_pci_driver_if_modeset(__pci_drv, __modeset) \
+ module_driver(__pci_drv, drm_pci_register_driver_if_modeset, \
+ drm_pci_unregister_driver_if_modeset, __modeset)
+
+/*
+ * Platform drivers
+ */
+
+static inline int __init
+drm_platform_driver_register(struct platform_driver *platform_drv)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return platform_driver_register(platform_drv);
+}
+
+/**
+ * drm_module_platform_driver - Register a DRM driver for platform devices
+ * @__platform_drv: the platform driver structure
+ *
+ * Registers a DRM driver for devices on the platform bus. The helper
+ * macro behaves like module_platform_driver() but tests the state of
+ * drm_firmware_drivers_only(). For more complex module initialization,
+ * use module_init() and module_exit() directly.
+ *
+ * Each module may only use this macro once. Calling it replaces
+ * module_init() and module_exit().
+ */
+#define drm_module_platform_driver(__platform_drv) \
+ module_driver(__platform_drv, drm_platform_driver_register, \
+ platform_driver_unregister)
+
+#endif
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index b9b093add92e..10ab58c40746 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -49,6 +49,13 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_bridge **bridge);
int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
const struct device_node *port2);
+int drm_of_lvds_get_data_mapping(const struct device_node *port);
+int drm_of_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min, const unsigned int max);
+int drm_of_get_data_lanes_count_ep(const struct device_node *port,
+ int port_reg, int reg,
+ const unsigned int min,
+ const unsigned int max);
#else
static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
struct device_node *port)
@@ -98,6 +105,28 @@ drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
{
return -EINVAL;
}
+
+static inline int
+drm_of_lvds_get_data_mapping(const struct device_node *port)
+{
+ return -EINVAL;
+}
+
+static inline int
+drm_of_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min, const unsigned int max)
+{
+ return -EINVAL;
+}
+
+static inline int
+drm_of_get_data_lanes_count_ep(const struct device_node *port,
+ int port_reg, int reg,
+ const unsigned int min,
+ const unsigned int max)
+{
+ return -EINVAL;
+}
#endif
/*
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 121f7aabccd1..994bfcdd84c5 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -29,12 +29,15 @@
#include <linux/list.h>
struct backlight_device;
+struct dentry;
struct device_node;
struct drm_connector;
struct drm_device;
struct drm_panel;
struct display_timing;
+enum drm_panel_orientation;
+
/**
* struct drm_panel_funcs - perform operations on a given panel
*
@@ -62,8 +65,8 @@ struct display_timing;
* the panel. This is the job of the .unprepare() function.
*
* Backlight can be handled automatically if configured using
- * drm_panel_of_backlight(). Then the driver does not need to implement the
- * functionality to enable/disable backlight.
+ * drm_panel_of_backlight() or drm_panel_dp_aux_backlight(). Then the driver
+ * does not need to implement the functionality to enable/disable backlight.
*/
struct drm_panel_funcs {
/**
@@ -114,6 +117,15 @@ struct drm_panel_funcs {
struct drm_connector *connector);
/**
+ * @get_orientation:
+ *
+ * Return the panel orientation set by device tree or EDID.
+ *
+ * This function is optional.
+ */
+ enum drm_panel_orientation (*get_orientation)(struct drm_panel *panel);
+
+ /**
* @get_timings:
*
* Copy display timings into the provided array and return
@@ -123,6 +135,13 @@ struct drm_panel_funcs {
*/
int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
struct display_timing *timings);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows panels to create panels-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_panel *panel, struct dentry *root);
};
/**
@@ -142,8 +161,8 @@ struct drm_panel {
* Backlight device, used to turn on backlight after the call
* to enable(), and to turn off backlight before the call to
* disable().
- * backlight is set by drm_panel_of_backlight() and drivers
- * shall not assign it.
+ * backlight is set by drm_panel_of_backlight() or
+ * drm_panel_dp_aux_backlight() and drivers shall not assign it.
*/
struct backlight_device *backlight;
@@ -175,12 +194,9 @@ void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs,
int connector_type);
-int drm_panel_add(struct drm_panel *panel);
+void drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
-int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
-void drm_panel_detach(struct drm_panel *panel);
-
int drm_panel_prepare(struct drm_panel *panel);
int drm_panel_unprepare(struct drm_panel *panel);
@@ -191,14 +207,23 @@ int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector
#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
struct drm_panel *of_drm_find_panel(const struct device_node *np);
+int of_drm_get_panel_orientation(const struct device_node *np,
+ enum drm_panel_orientation *orientation);
#else
static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
return ERR_PTR(-ENODEV);
}
+
+static inline int of_drm_get_panel_orientation(const struct device_node *np,
+ enum drm_panel_orientation *orientation)
+{
+ return -ENODEV;
+}
#endif
-#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
int drm_panel_of_backlight(struct drm_panel *panel);
#else
static inline int drm_panel_of_backlight(struct drm_panel *panel)
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
deleted file mode 100644
index 9031e217b506..000000000000
--- a/include/drm/drm_pci.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Internal Header for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DRM_PCI_H_
-#define _DRM_PCI_H_
-
-#include <linux/pci.h>
-
-struct drm_dma_handle;
-struct drm_device;
-struct drm_driver;
-struct drm_master;
-
-#ifdef CONFIG_PCI
-
-struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
- size_t align);
-void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
-
-int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver);
-
-#else
-
-static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
- size_t size, size_t align)
-{
- return NULL;
-}
-
-static inline void drm_pci_free(struct drm_device *dev,
- struct drm_dma_handle *dmah)
-{
-}
-
-static inline int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- return -ENOSYS;
-}
-
-#endif
-
-#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 3f396d94afe4..447e664e49d5 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -35,10 +35,15 @@ struct drm_crtc;
struct drm_printer;
struct drm_modeset_acquire_ctx;
+enum drm_scaling_filter {
+ DRM_SCALING_FILTER_DEFAULT,
+ DRM_SCALING_FILTER_NEAREST_NEIGHBOR,
+};
+
/**
* struct drm_plane_state - mutable plane state
*
- * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
+ * Please note that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
* @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
* raw coordinates provided by userspace. Drivers should use
* drm_atomic_helper_check_plane_state() and only use the derived rectangles in
@@ -69,13 +74,11 @@ struct drm_plane_state {
*
* Optional fence to wait for before scanning out @fb. The core atomic
* code will set this when userspace is using explicit fencing. Do not
- * write this field directly for a driver's implicit fence, use
- * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is
- * preserved.
+ * write this field directly for a driver's implicit fence.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
- * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
+ * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb()
+ * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers.
*/
struct dma_fence *fence;
@@ -181,6 +184,9 @@ struct drm_plane_state {
* since last plane update) as an array of &drm_mode_rect in framebuffer
* coodinates of the attached framebuffer. Note that unlike plane src,
* damage clips are not in 16.16 fixed point.
+ *
+ * See drm_plane_get_damage_clips() and
+ * drm_plane_get_damage_clips_count() for accessing these.
*/
struct drm_property_blob *fb_damage_clips;
@@ -215,6 +221,13 @@ struct drm_plane_state {
bool visible;
/**
+ * @scaling_filter:
+ *
+ * Scaling filter to be applied
+ */
+ enum drm_scaling_filter scaling_filter;
+
+ /**
* @commit: Tracks the pending commit to prevent use-after-free conditions,
* and for async plane updates.
*
@@ -501,7 +514,7 @@ struct drm_plane_funcs {
* This optional hook is used for the DRM to determine if the given
* format/modifier combination is valid for the plane. This allows the
* DRM to generate the correct format bitmask (which formats apply to
- * which modifier), and to valdiate modifiers at atomic_check time.
+ * which modifier), and to validate modifiers at atomic_check time.
*
* If not present, then any modifier in the plane's modifier
* list is allowed with any of the plane's formats.
@@ -526,10 +539,14 @@ struct drm_plane_funcs {
*
* For compatibility with legacy userspace, only overlay planes are made
* available to userspace by default. Userspace clients may set the
- * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
+ * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
* wish to receive a universal plane list containing all plane types. See also
* drm_for_each_legacy_plane().
*
+ * In addition to setting each plane's type, drivers need to setup the
+ * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy
+ * IOCTLs. See drm_crtc_init_with_planes().
+ *
* WARNING: The values of this enum is UABI since they're exposed in the "type"
* property.
*/
@@ -545,19 +562,20 @@ enum drm_plane_type {
/**
* @DRM_PLANE_TYPE_PRIMARY:
*
- * Primary planes represent a "main" plane for a CRTC. Primary planes
- * are the planes operated upon by CRTC modesetting and flipping
- * operations described in the &drm_crtc_funcs.page_flip and
- * &drm_crtc_funcs.set_config hooks.
+ * A primary plane attached to a CRTC is the most likely to be able to
+ * light up the CRTC when no scaling/cropping is used and the plane
+ * covers the whole CRTC.
*/
DRM_PLANE_TYPE_PRIMARY,
/**
* @DRM_PLANE_TYPE_CURSOR:
*
- * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes
- * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
- * DRM_IOCTL_MODE_CURSOR2 IOCTLs.
+ * A cursor plane attached to a CRTC is more likely to be able to be
+ * enabled when no scaling/cropping is used and the framebuffer has the
+ * size indicated by &drm_mode_config.cursor_width and
+ * &drm_mode_config.cursor_height. Additionally, if the driver doesn't
+ * support modifiers, the framebuffer should have a linear layout.
*/
DRM_PLANE_TYPE_CURSOR,
};
@@ -613,7 +631,7 @@ struct drm_plane {
unsigned int format_count;
/**
* @format_default: driver hasn't supplied supported formats for the
- * plane. Used by the drm_plane_init compatibility wrapper only.
+ * plane. Used by the non-atomic driver compatibility wrapper only.
*/
bool format_default;
@@ -724,6 +742,12 @@ struct drm_plane {
* See drm_plane_create_color_properties().
*/
struct drm_property *color_range_property;
+
+ /**
+ * @scaling_filter_property: property to apply a particular filter while
+ * scaling.
+ */
+ struct drm_property *scaling_filter_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
@@ -738,14 +762,97 @@ int drm_universal_plane_init(struct drm_device *dev,
const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...);
-int drm_plane_init(struct drm_device *dev,
- struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- bool is_primary);
void drm_plane_cleanup(struct drm_plane *plane);
+__printf(10, 11)
+void *__drmm_universal_plane_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type plane_type,
+ const char *name, ...);
+
+/**
+ * drmm_universal_plane_alloc - Allocate and initialize an universal plane object
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_plane
+ * @member: the name of the &drm_plane within @type
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @plane_type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Allocates and initializes a plane object of type @type. Cleanup is
+ * automatically handled through registering drm_plane_cleanup() with
+ * drmm_add_action().
+ *
+ * The @drm_plane_funcs.destroy hook must be NULL.
+ *
+ * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
+ * @format_modifiers to NULL. The plane will advertise the linear modifier.
+ *
+ * Returns:
+ * Pointer to new plane, or ERR_PTR on failure.
+ */
+#define drmm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, plane_type, name, ...) \
+ ((type *)__drmm_universal_plane_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, \
+ plane_type, name, ##__VA_ARGS__))
+
+__printf(10, 11)
+void *__drm_universal_plane_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type plane_type,
+ const char *name, ...);
+
+/**
+ * drm_universal_plane_alloc() - Allocate and initialize an universal plane object
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_plane
+ * @member: the name of the &drm_plane within @type
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @plane_type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Allocates and initializes a plane object of type @type. The caller
+ * is responsible for releasing the allocated memory with kfree().
+ *
+ * Drivers are encouraged to use drmm_universal_plane_alloc() instead.
+ *
+ * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
+ * @format_modifiers to NULL. The plane will advertise the linear modifier.
+ *
+ * Returns:
+ * Pointer to new plane, or ERR_PTR on failure.
+ */
+#define drm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, plane_type, name, ...) \
+ ((type *)__drm_universal_plane_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, \
+ plane_type, name, ##__VA_ARGS__))
+
/**
* drm_plane_index - find the index of a registered plane
* @plane: plane to find index for
@@ -829,37 +936,14 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
bool drm_any_plane_has_format(struct drm_device *dev,
u32 format, u64 modifier);
-/**
- * drm_plane_get_damage_clips_count - Returns damage clips count.
- * @state: Plane state.
- *
- * Simple helper to get the number of &drm_mode_rect clips set by user-space
- * during plane update.
- *
- * Return: Number of clips in plane fb_damage_clips blob property.
- */
-static inline unsigned int
-drm_plane_get_damage_clips_count(const struct drm_plane_state *state)
-{
- return (state && state->fb_damage_clips) ?
- state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0;
-}
-/**
- * drm_plane_get_damage_clips - Returns damage clips.
- * @state: Plane state.
- *
- * Note that this function returns uapi type &drm_mode_rect. Drivers might
- * instead be interested in internal &drm_rect which can be obtained by calling
- * drm_helper_get_plane_damage_clips().
- *
- * Return: Damage clips in plane fb_damage_clips blob property.
- */
-static inline struct drm_mode_rect *
-drm_plane_get_damage_clips(const struct drm_plane_state *state)
-{
- return (struct drm_mode_rect *)((state && state->fb_damage_clips) ?
- state->fb_damage_clips->data : NULL);
-}
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
+unsigned int
+drm_plane_get_damage_clips_count(const struct drm_plane_state *state);
+struct drm_mode_rect *
+drm_plane_get_damage_clips(const struct drm_plane_state *state);
+
+int drm_plane_create_scaling_filter_property(struct drm_plane *plane,
+ unsigned int supported_filters);
#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 331ebd60b3a3..ff83d2621687 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -24,21 +24,35 @@
#ifndef DRM_PLANE_HELPER_H
#define DRM_PLANE_HELPER_H
-#include <drm/drm_rect.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_modeset_helper.h>
+#include <linux/types.h>
-/*
- * Drivers that don't allow primary plane scaling may pass this macro in place
- * of the min/max scale parameters of the update checker function.
+struct drm_crtc;
+struct drm_framebuffer;
+struct drm_modeset_acquire_ctx;
+struct drm_plane;
+
+int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_plane_helper_disable_primary(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
+void drm_plane_helper_destroy(struct drm_plane *plane);
+int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state);
+
+/**
+ * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers
*
- * Due to src being in 16.16 fixed point and dest being in integer pixels,
- * 1<<16 represents no scaling.
+ * This macro initializes plane functions for non-atomic drivers to default
+ * values. Non-atomic interfaces are deprecated and should not be used in new
+ * drivers.
*/
-#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-
-void drm_primary_helper_destroy(struct drm_plane *plane);
-extern const struct drm_plane_funcs drm_primary_helper_funcs;
+#define DRM_PLANE_NON_ATOMIC_FUNCS \
+ .update_plane = drm_plane_helper_update_primary, \
+ .disable_plane = drm_plane_helper_disable_primary, \
+ .destroy = drm_plane_helper_destroy
#endif
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 9af7422b44cf..2a1d01e5b56b 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -54,6 +54,7 @@ struct device;
struct dma_buf_export_info;
struct dma_buf;
struct dma_buf_attachment;
+struct iosys_map;
enum dma_data_direction;
@@ -82,16 +83,19 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir);
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map);
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map);
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
+struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+ struct page **pages, unsigned int nr_pages);
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags);
+unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
+
/* helper functions for importing */
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
@@ -101,8 +105,9 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
-int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_pages);
-
+int drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages,
+ int max_pages);
+int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
+ int max_pages);
#endif /* __DRM_PRIME_H__ */
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 8f99d389792d..a44fb7ef257f 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -31,11 +31,12 @@
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/debugfs.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm.h>
/* Do *not* use outside of drm_print.[ch]! */
-extern unsigned int __drm_debug;
+extern unsigned long __drm_debug;
/**
* DOC: print
@@ -275,66 +276,93 @@ static inline struct drm_printer drm_err_printer(const char *prefix)
*
*/
enum drm_debug_category {
+ /* These names must match those in DYNAMIC_DEBUG_CLASSBITS */
/**
* @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c,
* drm_memory.c, ...
*/
- DRM_UT_CORE = 0x01,
+ DRM_UT_CORE,
/**
* @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915,
* radeon, ... macro.
*/
- DRM_UT_DRIVER = 0x02,
+ DRM_UT_DRIVER,
/**
* @DRM_UT_KMS: Used in the modesetting code.
*/
- DRM_UT_KMS = 0x04,
+ DRM_UT_KMS,
/**
* @DRM_UT_PRIME: Used in the prime code.
*/
- DRM_UT_PRIME = 0x08,
+ DRM_UT_PRIME,
/**
* @DRM_UT_ATOMIC: Used in the atomic code.
*/
- DRM_UT_ATOMIC = 0x10,
+ DRM_UT_ATOMIC,
/**
* @DRM_UT_VBL: Used for verbose debug message in the vblank code.
*/
- DRM_UT_VBL = 0x20,
+ DRM_UT_VBL,
/**
* @DRM_UT_STATE: Used for verbose atomic state debugging.
*/
- DRM_UT_STATE = 0x40,
+ DRM_UT_STATE,
/**
* @DRM_UT_LEASE: Used in the lease code.
*/
- DRM_UT_LEASE = 0x80,
+ DRM_UT_LEASE,
/**
* @DRM_UT_DP: Used in the DP code.
*/
- DRM_UT_DP = 0x100,
+ DRM_UT_DP,
+ /**
+ * @DRM_UT_DRMRES: Used in the drm managed resources code.
+ */
+ DRM_UT_DRMRES
};
-static inline bool drm_debug_enabled(enum drm_debug_category category)
+static inline bool drm_debug_enabled_raw(enum drm_debug_category category)
{
- return unlikely(__drm_debug & category);
+ return unlikely(__drm_debug & BIT(category));
}
+#define drm_debug_enabled_instrumented(category) \
+ ({ \
+ pr_debug("todo: is this frequent enough to optimize ?\n"); \
+ drm_debug_enabled_raw(category); \
+ })
+
+#if defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+/*
+ * the drm.debug API uses dyndbg, so each drm_*dbg macro/callsite gets
+ * a descriptor, and only enabled callsites are reachable. They use
+ * the private macro to avoid re-testing the enable-bit.
+ */
+#define __drm_debug_enabled(category) true
+#define drm_debug_enabled(category) drm_debug_enabled_instrumented(category)
+#else
+#define __drm_debug_enabled(category) drm_debug_enabled_raw(category)
+#define drm_debug_enabled(category) drm_debug_enabled_raw(category)
+#endif
+
/*
* struct device based logging
*
- * Prefer drm_device based logging over device or prink based logging.
+ * Prefer drm_device based logging over device or printk based logging.
*/
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...);
-__printf(3, 4)
-void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
- const char *format, ...);
+struct _ddebug;
+__printf(4, 5)
+void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
+ enum drm_debug_category category, const char *format, ...);
/**
- * Error output.
+ * DRM_DEV_ERROR() - Error output.
+ *
+ * NOTE: this is deprecated in favor of drm_err() or dev_err().
*
* @dev: device pointer
* @fmt: printf() like format string.
@@ -343,10 +371,15 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
/**
- * Rate limited error output. Like DRM_ERROR() but won't flood the log.
+ * DRM_DEV_ERROR_RATELIMITED() - Rate limited error output.
+ *
+ * NOTE: this is deprecated in favor of drm_err_ratelimited() or
+ * dev_err_ratelimited().
*
* @dev: device pointer
* @fmt: printf() like format string.
+ *
+ * Like DRM_ERROR() but won't flood the log.
*/
#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \
({ \
@@ -358,9 +391,11 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
})
+/* NOTE: this is deprecated in favor of drm_info() or dev_info(). */
#define DRM_DEV_INFO(dev, fmt, ...) \
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
({ \
static bool __print_once __read_mostly; \
@@ -370,54 +405,45 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
} \
})
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+#define drm_dev_dbg(dev, cat, fmt, ...) \
+ __drm_dev_dbg(NULL, dev, cat, fmt, ##__VA_ARGS__)
+#else
+#define drm_dev_dbg(dev, cat, fmt, ...) \
+ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \
+ dev, cat, fmt, ##__VA_ARGS__)
+#endif
+
/**
- * Debug output.
+ * DRM_DEV_DEBUG() - Debug output for generic drm code
+ *
+ * NOTE: this is deprecated in favor of drm_dbg_core().
*
* @dev: device pointer
* @fmt: printf() like format string.
*/
#define DRM_DEV_DEBUG(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+/**
+ * DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver
+ *
+ * NOTE: this is deprecated in favor of drm_dbg() or dev_dbg().
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
-
-#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
-({ \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__); \
-})
-
/**
- * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
+ * DRM_DEV_DEBUG_KMS() - Debug output for modesetting code
+ *
+ * NOTE: this is deprecated in favor of drm_dbg_kms().
*
* @dev: device pointer
* @fmt: printf() like format string.
*/
-#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \
- _DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \
- fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \
- fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \
- fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \
- fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
/*
* struct drm_device based logging
@@ -461,24 +487,27 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
#define drm_dbg_core(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define drm_dbg(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define drm_dbg_driver(drm, fmt, ...) \
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define drm_dbg_kms(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define drm_dbg_prime(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define drm_dbg_atomic(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define drm_dbg_vbl(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define drm_dbg_state(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__)
#define drm_dbg_lease(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
#define drm_dbg_dp(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__)
+#define drm_dbg_drmres(drm, fmt, ...) \
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+#define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__)
/*
* printk based logging
@@ -486,71 +515,126 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
* Prefer drm_device based logging over device or prink based logging.
*/
-__printf(2, 3)
-void __drm_dbg(enum drm_debug_category category, const char *format, ...);
+__printf(3, 4)
+void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...);
__printf(1, 2)
void __drm_err(const char *format, ...);
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+#define __drm_dbg(fmt, ...) ___drm_dbg(NULL, fmt, ##__VA_ARGS__)
+#else
+#define __drm_dbg(cat, fmt, ...) \
+ _dynamic_func_call_cls(cat, fmt, ___drm_dbg, \
+ cat, fmt, ##__VA_ARGS__)
+#endif
+
/* Macros to make printk easier */
#define _DRM_PRINTK(once, level, fmt, ...) \
printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info(). */
#define DRM_INFO(fmt, ...) \
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice(). */
#define DRM_NOTE(fmt, ...) \
_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn(). */
#define DRM_WARN(fmt, ...) \
_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info_once(). */
#define DRM_INFO_ONCE(fmt, ...) \
_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice_once(). */
#define DRM_NOTE_ONCE(fmt, ...) \
_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn_once(). */
#define DRM_WARN_ONCE(fmt, ...) \
_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err(). */
#define DRM_ERROR(fmt, ...) \
__drm_err(fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err_ratelimited(). */
#define DRM_ERROR_RATELIMITED(fmt, ...) \
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */
#define DRM_DEBUG(fmt, ...) \
__drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */
#define DRM_DEBUG_DRIVER(fmt, ...) \
__drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */
#define DRM_DEBUG_KMS(fmt, ...) \
__drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */
#define DRM_DEBUG_PRIME(fmt, ...) \
__drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */
#define DRM_DEBUG_ATOMIC(fmt, ...) \
__drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */
#define DRM_DEBUG_VBL(fmt, ...) \
__drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */
#define DRM_DEBUG_LEASE(fmt, ...) \
__drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */
#define DRM_DEBUG_DP(fmt, ...) \
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
+#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\
+ const struct drm_device *drm_ = (drm); \
+ \
+ if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \
+ drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \
+})
+
+#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
+ __DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
+
+/* NOTE: this is deprecated in favor of drm_dbg_kms_ratelimited(NULL, ...). */
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__)
+
+/*
+ * struct drm_device based WARNs
+ *
+ * drm_WARN*() acts like WARN*(), but with the key difference of
+ * using device specific information so that we know from which device
+ * warning is originating from.
+ *
+ * Prefer drm_device based drm_WARN* over regular WARN*
+ */
-#define DRM_DEBUG_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/* Helper for struct drm_device based WARNs */
+#define drm_WARN(drm, condition, format, arg...) \
+ WARN(condition, "%s %s: " format, \
+ dev_driver_string((drm)->dev), \
+ dev_name((drm)->dev), ## arg)
-#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+#define drm_WARN_ONCE(drm, condition, format, arg...) \
+ WARN_ONCE(condition, "%s %s: " format, \
+ dev_driver_string((drm)->dev), \
+ dev_name((drm)->dev), ## arg)
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+#define drm_WARN_ON(drm, x) \
+ drm_WARN((drm), (x), "%s", \
+ "drm_WARN_ON(" __stringify(x) ")")
-#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+#define drm_WARN_ON_ONCE(drm, x) \
+ drm_WARN_ONCE((drm), (x), "%s", \
+ "drm_WARN_ON_ONCE(" __stringify(x) ")")
#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/drm_privacy_screen_consumer.h b/include/drm/drm_privacy_screen_consumer.h
new file mode 100644
index 000000000000..7f66a90d15b7
--- /dev/null
+++ b/include/drm/drm_privacy_screen_consumer.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_PRIVACY_SCREEN_CONSUMER_H__
+#define __DRM_PRIVACY_SCREEN_CONSUMER_H__
+
+#include <linux/device.h>
+#include <drm/drm_connector.h>
+
+struct drm_privacy_screen;
+
+#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN)
+struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev,
+ const char *con_id);
+void drm_privacy_screen_put(struct drm_privacy_screen *priv);
+
+int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state);
+void drm_privacy_screen_get_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status *sw_state_ret,
+ enum drm_privacy_screen_status *hw_state_ret);
+
+int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb);
+int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb);
+#else
+static inline struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev,
+ const char *con_id)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void drm_privacy_screen_put(struct drm_privacy_screen *priv)
+{
+}
+static inline int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state)
+{
+ return -ENODEV;
+}
+static inline void drm_privacy_screen_get_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status *sw_state_ret,
+ enum drm_privacy_screen_status *hw_state_ret)
+{
+ *sw_state_ret = PRIVACY_SCREEN_DISABLED;
+ *hw_state_ret = PRIVACY_SCREEN_DISABLED;
+}
+static inline int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/drm/drm_privacy_screen_driver.h b/include/drm/drm_privacy_screen_driver.h
new file mode 100644
index 000000000000..4ef246d5706f
--- /dev/null
+++ b/include/drm/drm_privacy_screen_driver.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_PRIVACY_SCREEN_DRIVER_H__
+#define __DRM_PRIVACY_SCREEN_DRIVER_H__
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <drm/drm_connector.h>
+
+struct drm_privacy_screen;
+
+/**
+ * struct drm_privacy_screen_ops - drm_privacy_screen operations
+ *
+ * Defines the operations which the privacy-screen class code may call.
+ * These functions should be implemented by the privacy-screen driver.
+ */
+struct drm_privacy_screen_ops {
+ /**
+ * @set_sw_state: Called to request a change of the privacy-screen
+ * state. The privacy-screen class code contains a check to avoid this
+ * getting called when the hw_state reports the state is locked.
+ * It is the driver's responsibility to update sw_state and hw_state.
+ * This is always called with the drm_privacy_screen's lock held.
+ */
+ int (*set_sw_state)(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state);
+ /**
+ * @get_hw_state: Called to request that the driver gets the current
+ * privacy-screen state from the hardware and then updates sw_state and
+ * hw_state accordingly. This will be called by the core just before
+ * the privacy-screen is registered in sysfs.
+ */
+ void (*get_hw_state)(struct drm_privacy_screen *priv);
+};
+
+/**
+ * struct drm_privacy_screen - central privacy-screen structure
+ *
+ * Central privacy-screen structure, this contains the struct device used
+ * to register the screen in sysfs, the screen's state, ops, etc.
+ */
+struct drm_privacy_screen {
+ /** @dev: device used to register the privacy-screen in sysfs. */
+ struct device dev;
+ /** @lock: mutex protection all fields in this struct. */
+ struct mutex lock;
+ /** @list: privacy-screen devices list list-entry. */
+ struct list_head list;
+ /** @notifier_head: privacy-screen notifier head. */
+ struct blocking_notifier_head notifier_head;
+ /**
+ * @ops: &struct drm_privacy_screen_ops for this privacy-screen.
+ * This is NULL if the driver has unregistered the privacy-screen.
+ */
+ const struct drm_privacy_screen_ops *ops;
+ /**
+ * @sw_state: The privacy-screen's software state, see
+ * :ref:`Standard Connector Properties<standard_connector_properties>`
+ * for more info.
+ */
+ enum drm_privacy_screen_status sw_state;
+ /**
+ * @hw_state: The privacy-screen's hardware state, see
+ * :ref:`Standard Connector Properties<standard_connector_properties>`
+ * for more info.
+ */
+ enum drm_privacy_screen_status hw_state;
+ /**
+ * @drvdata: Private data owned by the privacy screen provider
+ */
+ void *drvdata;
+};
+
+static inline
+void *drm_privacy_screen_get_drvdata(struct drm_privacy_screen *priv)
+{
+ return priv->drvdata;
+}
+
+struct drm_privacy_screen *drm_privacy_screen_register(
+ struct device *parent, const struct drm_privacy_screen_ops *ops,
+ void *data);
+void drm_privacy_screen_unregister(struct drm_privacy_screen *priv);
+
+void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv);
+
+#endif
diff --git a/include/drm/drm_privacy_screen_machine.h b/include/drm/drm_privacy_screen_machine.h
new file mode 100644
index 000000000000..02e5371904d3
--- /dev/null
+++ b/include/drm/drm_privacy_screen_machine.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_PRIVACY_SCREEN_MACHINE_H__
+#define __DRM_PRIVACY_SCREEN_MACHINE_H__
+
+#include <linux/list.h>
+
+/**
+ * struct drm_privacy_screen_lookup - static privacy-screen lookup list entry
+ *
+ * Used for the static lookup-list for mapping privacy-screen consumer
+ * dev-connector pairs to a privacy-screen provider.
+ */
+struct drm_privacy_screen_lookup {
+ /** @list: Lookup list list-entry. */
+ struct list_head list;
+ /** @dev_id: Consumer device name or NULL to match all devices. */
+ const char *dev_id;
+ /** @con_id: Consumer connector name or NULL to match all connectors. */
+ const char *con_id;
+ /** @provider: dev_name() of the privacy_screen provider. */
+ const char *provider;
+};
+
+void drm_privacy_screen_lookup_add(struct drm_privacy_screen_lookup *lookup);
+void drm_privacy_screen_lookup_remove(struct drm_privacy_screen_lookup *lookup);
+
+#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN) && IS_ENABLED(CONFIG_X86)
+void drm_privacy_screen_lookup_init(void);
+void drm_privacy_screen_lookup_exit(void);
+#else
+static inline void drm_privacy_screen_lookup_init(void)
+{
+}
+static inline void drm_privacy_screen_lookup_exit(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 8d3ed2834d34..5880daa14624 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -3,9 +3,10 @@
#ifndef __DRM_PROBE_HELPER_H__
#define __DRM_PROBE_HELPER_H__
-#include <linux/types.h>
+#include <drm/drm_modes.h>
struct drm_connector;
+struct drm_crtc;
struct drm_device;
struct drm_modeset_acquire_ctx;
@@ -18,10 +19,21 @@ int drm_helper_probe_detect(struct drm_connector *connector,
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector);
void drm_kms_helper_hotplug_event(struct drm_device *dev);
+void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector);
void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
bool drm_kms_helper_is_poll_worker(void);
+enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *fixed_mode);
+
+int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector);
+int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
+ const struct drm_display_mode *fixed_mode);
+int drm_connector_helper_get_modes(struct drm_connector *connector);
+
#endif
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 4a0a80d658c7..65bc9710a470 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -31,7 +31,6 @@
/**
* struct drm_property_enum - symbolic values for enumerations
- * @value: numeric property value for this enum entry
* @head: list of enum values, linked to &drm_property.enum_list
* @name: symbolic name for the enum
*
@@ -39,6 +38,14 @@
* decoding for each value. This is used for example for the rotation property.
*/
struct drm_property_enum {
+ /**
+ * @value: numeric property value for this enum entry
+ *
+ * If the property has the type &DRM_MODE_PROP_BITMASK, @value stores a
+ * bitshift, not a bitmask. In other words, the enum entry is enabled
+ * if the bit number @value is set in the property's value. This enum
+ * entry has the bitmask ``1 << value``.
+ */
uint64_t value;
struct list_head head;
char name[DRM_PROP_NAME_LEN];
@@ -114,7 +121,7 @@ struct drm_property {
* by the property. Bitmask properties are created using
* drm_property_create_bitmask().
*
- * DRM_MODE_PROB_OBJECT
+ * DRM_MODE_PROP_OBJECT
* Object properties are used to link modeset objects. This is used
* extensively in the atomic support to create the display pipeline,
* by linking &drm_framebuffer to &drm_plane, &drm_plane to
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 57a3be9e53e4..e8d94fca2703 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -39,12 +39,31 @@
* @x2: horizontal ending coordinate (exclusive)
* @y1: vertical starting coordinate (inclusive)
* @y2: vertical ending coordinate (exclusive)
+ *
+ * Note that this must match the layout of struct drm_mode_rect or the damage
+ * helpers like drm_atomic_helper_damage_iter_init() break.
*/
struct drm_rect {
int x1, y1, x2, y2;
};
/**
+ * DRM_RECT_INIT - initialize a rectangle from x/y/w/h
+ * @x: x coordinate
+ * @y: y coordinate
+ * @w: width
+ * @h: height
+ *
+ * RETURNS:
+ * A new rectangle of the specified size.
+ */
+#define DRM_RECT_INIT(x, y, w, h) ((struct drm_rect){ \
+ .x1 = (x), \
+ .y1 = (y), \
+ .x2 = (x) + (w), \
+ .y2 = (y) + (h) })
+
+/**
* DRM_RECT_FMT - printf string for &struct drm_rect
*/
#define DRM_RECT_FMT "%dx%d%+d%+d"
@@ -180,7 +199,7 @@ static inline int drm_rect_height(const struct drm_rect *r)
}
/**
- * drm_rect_visible - determine if the the rectangle is visible
+ * drm_rect_visible - determine if the rectangle is visible
* @r: rectangle whose visibility is returned
*
* RETURNS:
@@ -206,6 +225,19 @@ static inline bool drm_rect_equals(const struct drm_rect *r1,
r1->y1 == r2->y1 && r1->y2 == r2->y2;
}
+/**
+ * drm_rect_fp_to_int - Convert a rect in 16.16 fixed point form to int form.
+ * @dst: rect to be stored the converted value
+ * @src: rect in 16.16 fixed point form
+ */
+static inline void drm_rect_fp_to_int(struct drm_rect *dst,
+ const struct drm_rect *src)
+{
+ drm_rect_init(dst, src->x1 >> 16, src->y1 >> 16,
+ drm_rect_width(src) >> 16,
+ drm_rect_height(src) >> 16);
+}
+
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip);
diff --git a/include/drm/drm_scdc_helper.h b/include/drm/drm_scdc_helper.h
deleted file mode 100644
index 6a483533aae4..000000000000
--- a/include/drm/drm_scdc_helper.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2015 NVIDIA Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef DRM_SCDC_HELPER_H
-#define DRM_SCDC_HELPER_H
-
-#include <linux/i2c.h>
-#include <linux/types.h>
-
-#define SCDC_SINK_VERSION 0x01
-
-#define SCDC_SOURCE_VERSION 0x02
-
-#define SCDC_UPDATE_0 0x10
-#define SCDC_READ_REQUEST_TEST (1 << 2)
-#define SCDC_CED_UPDATE (1 << 1)
-#define SCDC_STATUS_UPDATE (1 << 0)
-
-#define SCDC_UPDATE_1 0x11
-
-#define SCDC_TMDS_CONFIG 0x20
-#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 (1 << 1)
-#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_10 (0 << 1)
-#define SCDC_SCRAMBLING_ENABLE (1 << 0)
-
-#define SCDC_SCRAMBLER_STATUS 0x21
-#define SCDC_SCRAMBLING_STATUS (1 << 0)
-
-#define SCDC_CONFIG_0 0x30
-#define SCDC_READ_REQUEST_ENABLE (1 << 0)
-
-#define SCDC_STATUS_FLAGS_0 0x40
-#define SCDC_CH2_LOCK (1 << 3)
-#define SCDC_CH1_LOCK (1 << 2)
-#define SCDC_CH0_LOCK (1 << 1)
-#define SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK)
-#define SCDC_CLOCK_DETECT (1 << 0)
-
-#define SCDC_STATUS_FLAGS_1 0x41
-
-#define SCDC_ERR_DET_0_L 0x50
-#define SCDC_ERR_DET_0_H 0x51
-#define SCDC_ERR_DET_1_L 0x52
-#define SCDC_ERR_DET_1_H 0x53
-#define SCDC_ERR_DET_2_L 0x54
-#define SCDC_ERR_DET_2_H 0x55
-#define SCDC_CHANNEL_VALID (1 << 7)
-
-#define SCDC_ERR_DET_CHECKSUM 0x56
-
-#define SCDC_TEST_CONFIG_0 0xc0
-#define SCDC_TEST_READ_REQUEST (1 << 7)
-#define SCDC_TEST_READ_REQUEST_DELAY(x) ((x) & 0x7f)
-
-#define SCDC_MANUFACTURER_IEEE_OUI 0xd0
-#define SCDC_MANUFACTURER_IEEE_OUI_SIZE 3
-
-#define SCDC_DEVICE_ID 0xd3
-#define SCDC_DEVICE_ID_SIZE 8
-
-#define SCDC_DEVICE_HARDWARE_REVISION 0xdb
-#define SCDC_GET_DEVICE_HARDWARE_REVISION_MAJOR(x) (((x) >> 4) & 0xf)
-#define SCDC_GET_DEVICE_HARDWARE_REVISION_MINOR(x) (((x) >> 0) & 0xf)
-
-#define SCDC_DEVICE_SOFTWARE_MAJOR_REVISION 0xdc
-#define SCDC_DEVICE_SOFTWARE_MINOR_REVISION 0xdd
-
-#define SCDC_MANUFACTURER_SPECIFIC 0xde
-#define SCDC_MANUFACTURER_SPECIFIC_SIZE 34
-
-ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer,
- size_t size);
-ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
- const void *buffer, size_t size);
-
-/**
- * drm_scdc_readb - read a single byte from SCDC
- * @adapter: I2C adapter
- * @offset: offset of register to read
- * @value: return location for the register value
- *
- * Reads a single byte from SCDC. This is a convenience wrapper around the
- * drm_scdc_read() function.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset,
- u8 *value)
-{
- return drm_scdc_read(adapter, offset, value, sizeof(*value));
-}
-
-/**
- * drm_scdc_writeb - write a single byte to SCDC
- * @adapter: I2C adapter
- * @offset: offset of register to read
- * @value: return location for the register value
- *
- * Writes a single byte to SCDC. This is a convenience wrapper around the
- * drm_scdc_write() function.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset,
- u8 value)
-{
- return drm_scdc_write(adapter, offset, &value, sizeof(value));
-}
-
-bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter);
-
-bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable);
-bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set);
-#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 15afee9cf049..0b3647e614dd 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -100,8 +100,11 @@ struct drm_simple_display_pipe_funcs {
* This is the function drivers should submit the
* &drm_pending_vblank_event from. Using either
* drm_crtc_arm_vblank_event(), when the driver supports vblank
- * interrupt handling, or drm_crtc_send_vblank_event() directly in case
- * the hardware lacks vblank support entirely.
+ * interrupt handling, or drm_crtc_send_vblank_event() for more
+ * complex case. In case the hardware lacks vblank support entirely,
+ * drivers can set &struct drm_crtc_state.no_vblank in
+ * &struct drm_simple_display_pipe_funcs.check and let DRM's
+ * atomic helper fake a vblank event.
*/
void (*update)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state);
@@ -113,8 +116,11 @@ struct drm_simple_display_pipe_funcs {
* the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
* more details.
*
- * Drivers which always have their buffers pinned should use
- * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook.
+ * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
+ * set drm_gem_simple_display_pipe_prepare_fb() is called automatically
+ * to implement this. Other drivers which need additional plane
+ * processing can call drm_gem_simple_display_pipe_prepare_fb() from
+ * their @prepare_fb hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
@@ -146,6 +152,60 @@ struct drm_simple_display_pipe_funcs {
* more details.
*/
void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @reset_crtc:
+ *
+ * Optional, called by &drm_crtc_funcs.reset. Please read the
+ * documentation for the &drm_crtc_funcs.reset hook for more details.
+ */
+ void (*reset_crtc)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @duplicate_crtc_state:
+ *
+ * Optional, called by &drm_crtc_funcs.atomic_duplicate_state. Please
+ * read the documentation for the &drm_crtc_funcs.atomic_duplicate_state
+ * hook for more details.
+ */
+ struct drm_crtc_state * (*duplicate_crtc_state)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @destroy_crtc_state:
+ *
+ * Optional, called by &drm_crtc_funcs.atomic_destroy_state. Please
+ * read the documentation for the &drm_crtc_funcs.atomic_destroy_state
+ * hook for more details.
+ */
+ void (*destroy_crtc_state)(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state);
+
+ /**
+ * @reset_plane:
+ *
+ * Optional, called by &drm_plane_funcs.reset. Please read the
+ * documentation for the &drm_plane_funcs.reset hook for more details.
+ */
+ void (*reset_plane)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @duplicate_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_duplicate_state
+ * hook for more details.
+ */
+ struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @destroy_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_destroy_state
+ * hook for more details.
+ */
+ void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
};
/**
@@ -178,4 +238,32 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
const uint64_t *format_modifiers,
struct drm_connector *connector);
+int drm_simple_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ int encoder_type);
+
+void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
+ size_t offset, int encoder_type);
+
+/**
+ * drmm_simple_encoder_alloc - Allocate and initialize an encoder with basic
+ * functionality.
+ * @dev: drm device
+ * @type: the type of the struct which contains struct &drm_encoder
+ * @member: the name of the &drm_encoder within @type.
+ * @encoder_type: user visible type of the encoder
+ *
+ * Allocates and initializes an encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * Cleanup is automatically handled through registering drm_encoder_cleanup()
+ * with drmm_add_action().
+ *
+ * Returns:
+ * Pointer to new encoder, or ERR_PTR on failure.
+ */
+#define drmm_simple_encoder_alloc(dev, type, member, encoder_type) \
+ ((type *)__drmm_simple_encoder_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ encoder_type))
+
#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index d454ef617b2c..6273cac44e47 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -11,6 +11,7 @@ int drm_class_device_register(struct device *dev);
void drm_class_device_unregister(struct device *dev);
void drm_sysfs_hotplug_event(struct drm_device *dev);
+void drm_sysfs_connector_hotplug_event(struct drm_connector *connector);
void drm_sysfs_connector_status_event(struct drm_connector *connector,
struct drm_property *property);
#endif
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index c16c44052b3d..733a3e2d1d10 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -27,12 +27,14 @@
#include <linux/seqlock.h>
#include <linux/idr.h>
#include <linux/poll.h>
+#include <linux/kthread.h>
#include <drm/drm_file.h>
#include <drm/drm_modes.h>
struct drm_device;
struct drm_crtc;
+struct drm_vblank_work;
/**
* struct drm_pending_vblank_event - pending vblank event tracking
@@ -174,13 +176,13 @@ struct drm_vblank_crtc {
unsigned int pipe;
/**
* @framedur_ns: Frame/Field duration in ns, used by
- * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by
* drm_calc_timestamping_constants().
*/
int framedur_ns;
/**
* @linedur_ns: Line duration in ns, used by
- * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by
* drm_calc_timestamping_constants().
*/
int linedur_ns;
@@ -190,8 +192,8 @@ struct drm_vblank_crtc {
*
* Cache of the current hardware display mode. Only valid when @enabled
* is set. This is used by helpers like
- * drm_calc_vbltimestamp_from_scanoutpos(). We can't just access the
- * hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode,
+ * drm_crtc_vblank_helper_get_vblank_timestamp(). We can't just access
+ * the hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode,
* because that one is really hard to get from interrupt context.
*/
struct drm_display_mode hwmode;
@@ -203,9 +205,28 @@ struct drm_vblank_crtc {
* disabling functions multiple times.
*/
bool enabled;
+
+ /**
+ * @worker: The &kthread_worker used for executing vblank works.
+ */
+ struct kthread_worker *worker;
+
+ /**
+ * @pending_work: A list of scheduled &drm_vblank_work items that are
+ * waiting for a future vblank.
+ */
+ struct list_head pending_work;
+
+ /**
+ * @work_wait_queue: The wait queue used for signaling that a
+ * &drm_vblank_work item has either finished executing, or was
+ * cancelled.
+ */
+ wait_queue_head_t work_wait_queue;
};
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+bool drm_dev_has_vblank(const struct drm_device *dev);
u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime);
@@ -226,16 +247,34 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
-void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe, int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq);
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count);
+
+/*
+ * Helpers for struct drm_crtc_funcs
+ */
+
+typedef bool (*drm_vblank_get_scanout_position_func)(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime,
+ ktime_t *etime,
+ const struct drm_display_mode *mode);
+
+bool
+drm_crtc_vblank_helper_get_vblank_timestamp_internal(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq,
+ drm_vblank_get_scanout_position_func get_scanout_position);
+bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
+
#endif
diff --git a/include/drm/drm_vblank_work.h b/include/drm/drm_vblank_work.h
new file mode 100644
index 000000000000..eb41d0810c4f
--- /dev/null
+++ b/include/drm/drm_vblank_work.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _DRM_VBLANK_WORK_H_
+#define _DRM_VBLANK_WORK_H_
+
+#include <linux/kthread.h>
+
+struct drm_crtc;
+
+/**
+ * struct drm_vblank_work - A delayed work item which delays until a target
+ * vblank passes, and then executes at realtime priority outside of IRQ
+ * context.
+ *
+ * See also:
+ * drm_vblank_work_schedule()
+ * drm_vblank_work_init()
+ * drm_vblank_work_cancel_sync()
+ * drm_vblank_work_flush()
+ */
+struct drm_vblank_work {
+ /**
+ * @base: The base &kthread_work item which will be executed by
+ * &drm_vblank_crtc.worker. Drivers should not interact with this
+ * directly, and instead rely on drm_vblank_work_init() to initialize
+ * this.
+ */
+ struct kthread_work base;
+
+ /**
+ * @vblank: A pointer to &drm_vblank_crtc this work item belongs to.
+ */
+ struct drm_vblank_crtc *vblank;
+
+ /**
+ * @count: The target vblank this work will execute on. Drivers should
+ * not modify this value directly, and instead use
+ * drm_vblank_work_schedule()
+ */
+ u64 count;
+
+ /**
+ * @cancelling: The number of drm_vblank_work_cancel_sync() calls that
+ * are currently running. A work item cannot be rescheduled until all
+ * calls have finished.
+ */
+ int cancelling;
+
+ /**
+ * @node: The position of this work item in
+ * &drm_vblank_crtc.pending_work.
+ */
+ struct list_head node;
+};
+
+/**
+ * to_drm_vblank_work - Retrieve the respective &drm_vblank_work item from a
+ * &kthread_work
+ * @_work: The &kthread_work embedded inside a &drm_vblank_work
+ */
+#define to_drm_vblank_work(_work) \
+ container_of((_work), struct drm_vblank_work, base)
+
+int drm_vblank_work_schedule(struct drm_vblank_work *work,
+ u64 count, bool nextonmiss);
+void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
+ void (*func)(struct kthread_work *work));
+bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work);
+void drm_vblank_work_flush(struct drm_vblank_work *work);
+
+#endif /* !_DRM_VBLANK_WORK_H_ */
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 76ac5e97a559..4f8c35206f7c 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -53,7 +53,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
- bool readonly:1;
+ void *driver_private;
};
struct drm_vma_offset_manager {
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
index 777c14c847f0..17e576c80169 100644
--- a/include/drm/drm_writeback.h
+++ b/include/drm/drm_writeback.h
@@ -15,7 +15,13 @@
#include <drm/drm_encoder.h>
#include <linux/workqueue.h>
+/**
+ * struct drm_writeback_connector - DRM writeback connector
+ */
struct drm_writeback_connector {
+ /**
+ * @base: base drm_connector object
+ */
struct drm_connector base;
/**
@@ -24,6 +30,8 @@ struct drm_writeback_connector {
* @drm_writeback_connector control the behaviour of the @encoder
* by passing the @enc_funcs parameter to drm_writeback_connector_init()
* function.
+ * For users of drm_writeback_connector_init_with_encoder(), this field
+ * is not valid as the encoder is managed within their drivers.
*/
struct drm_encoder encoder;
@@ -78,6 +86,9 @@ struct drm_writeback_connector {
char timeline_name[32];
};
+/**
+ * struct drm_writeback_job - DRM writeback job
+ */
struct drm_writeback_job {
/**
* @connector:
@@ -141,7 +152,14 @@ int drm_writeback_connector_init(struct drm_device *dev,
struct drm_writeback_connector *wb_connector,
const struct drm_connector_funcs *con_funcs,
const struct drm_encoder_helper_funcs *enc_helper_funcs,
- const u32 *formats, int n_formats);
+ const u32 *formats, int n_formats,
+ u32 possible_crtcs);
+
+int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ struct drm_encoder *enc,
+ const struct drm_connector_funcs *con_funcs, const u32 *formats,
+ int n_formats);
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 589be851f8a1..2ae4fd62e01c 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,21 +27,35 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
#include <linux/completion.h>
+#include <linux/xarray.h>
+#include <linux/workqueue.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+/**
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
+ *
+ * Setting this flag on a scheduler fence prevents pipelining of jobs depending
+ * on this fence. In other words we always insert a full CPU round trip before
+ * dependen jobs are pushed to the hw queue.
+ */
+#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
+
+struct drm_gem_object;
+
struct drm_gpu_scheduler;
struct drm_sched_rq;
+/* These are often used as an (initial) index
+ * to an array, and as such should start at 0.
+ */
enum drm_sched_priority {
DRM_SCHED_PRIORITY_MIN,
- DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
DRM_SCHED_PRIORITY_NORMAL,
- DRM_SCHED_PRIORITY_HIGH_SW,
- DRM_SCHED_PRIORITY_HIGH_HW,
+ DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
- DRM_SCHED_PRIORITY_MAX,
- DRM_SCHED_PRIORITY_INVALID = -1,
+
+ DRM_SCHED_PRIORITY_COUNT,
DRM_SCHED_PRIORITY_UNSET = -2
};
@@ -49,55 +63,147 @@ enum drm_sched_priority {
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
*
- * @list: used to append this struct to the list of entities in the
- * runqueue.
- * @rq: runqueue on which this entity is currently scheduled.
- * @sched_list: A list of schedulers (drm_gpu_schedulers).
- * Jobs from this entity can be scheduled on any scheduler
- * on this list.
- * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
- * @rq_lock: lock to modify the runqueue to which this entity belongs.
- * @job_queue: the list of jobs of this entity.
- * @fence_seq: a linearly increasing seqno incremented with each
- * new &drm_sched_fence which is part of the entity.
- * @fence_context: a unique context for all the fences which belong
- * to this entity.
- * The &drm_sched_fence.scheduled uses the
- * fence_context but &drm_sched_fence.finished uses
- * fence_context + 1.
- * @dependency: the dependency fence of the job which is on the top
- * of the job queue.
- * @cb: callback for the dependency fence above.
- * @guilty: points to ctx's guilty.
- * @fini_status: contains the exit status in case the process was signalled.
- * @last_scheduled: points to the finished fence of the last scheduled job.
- * @last_user: last group leader pushing a job into the entity.
- * @stopped: Marks the enity as removed from rq and destined for termination.
- * @entity_idle: Signals when enityt is not in use
- *
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
* scheduling policy.
*/
struct drm_sched_entity {
+ /**
+ * @list:
+ *
+ * Used to append this struct to the list of entities in the runqueue
+ * @rq under &drm_sched_rq.entities.
+ *
+ * Protected by &drm_sched_rq.lock of @rq.
+ */
struct list_head list;
+
+ /**
+ * @rq:
+ *
+ * Runqueue on which this entity is currently scheduled.
+ *
+ * FIXME: Locking is very unclear for this. Writers are protected by
+ * @rq_lock, but readers are generally lockless and seem to just race
+ * with not even a READ_ONCE.
+ */
struct drm_sched_rq *rq;
+
+ /**
+ * @sched_list:
+ *
+ * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
+ * be scheduled on any scheduler on this list.
+ *
+ * This can be modified by calling drm_sched_entity_modify_sched().
+ * Locking is entirely up to the driver, see the above function for more
+ * details.
+ *
+ * This will be set to NULL if &num_sched_list equals 1 and @rq has been
+ * set already.
+ *
+ * FIXME: This means priority changes through
+ * drm_sched_entity_set_priority() will be lost henceforth in this case.
+ */
struct drm_gpu_scheduler **sched_list;
+
+ /**
+ * @num_sched_list:
+ *
+ * Number of drm_gpu_schedulers in the @sched_list.
+ */
unsigned int num_sched_list;
+
+ /**
+ * @priority:
+ *
+ * Priority of the entity. This can be modified by calling
+ * drm_sched_entity_set_priority(). Protected by &rq_lock.
+ */
enum drm_sched_priority priority;
+
+ /**
+ * @rq_lock:
+ *
+ * Lock to modify the runqueue to which this entity belongs.
+ */
spinlock_t rq_lock;
+ /**
+ * @job_queue: the list of jobs of this entity.
+ */
struct spsc_queue job_queue;
+ /**
+ * @fence_seq:
+ *
+ * A linearly increasing seqno incremented with each new
+ * &drm_sched_fence which is part of the entity.
+ *
+ * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
+ * this doesn't need to be atomic.
+ */
atomic_t fence_seq;
+
+ /**
+ * @fence_context:
+ *
+ * A unique context for all the fences which belong to this entity. The
+ * &drm_sched_fence.scheduled uses the fence_context but
+ * &drm_sched_fence.finished uses fence_context + 1.
+ */
uint64_t fence_context;
+ /**
+ * @dependency:
+ *
+ * The dependency fence of the job which is on the top of the job queue.
+ */
struct dma_fence *dependency;
+
+ /**
+ * @cb:
+ *
+ * Callback for the dependency fence above.
+ */
struct dma_fence_cb cb;
+
+ /**
+ * @guilty:
+ *
+ * Points to entities' guilty.
+ */
atomic_t *guilty;
+
+ /**
+ * @last_scheduled:
+ *
+ * Points to the finished fence of the last scheduled job. Only written
+ * by the scheduler thread, can be accessed locklessly from
+ * drm_sched_job_arm() iff the queue is empty.
+ */
struct dma_fence *last_scheduled;
+
+ /**
+ * @last_user: last group leader pushing a job into the entity.
+ */
struct task_struct *last_user;
+
+ /**
+ * @stopped:
+ *
+ * Marks the enity as removed from rq and destined for
+ * termination. This is set by calling drm_sched_entity_flush() and by
+ * drm_sched_fini().
+ */
bool stopped;
+
+ /**
+ * @entity_idle:
+ *
+ * Signals when entity is not in use, used to sequence entity cleanup in
+ * drm_sched_entity_fini().
+ */
struct completion entity_idle;
};
@@ -169,10 +275,11 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* struct drm_sched_job - A job to be run by an entity.
*
* @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @list: a job participates in a "pending" and "done" lists.
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
- * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @work: Helper to reschdeule job kill to different context.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
@@ -187,34 +294,66 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
*/
struct drm_sched_job {
struct spsc_node queue_node;
+ struct list_head list;
struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence;
- struct dma_fence_cb finish_cb;
- struct list_head node;
+
+ /*
+ * work is used only after finish_cb has been used and will not be
+ * accessed anymore.
+ */
+ union {
+ struct dma_fence_cb finish_cb;
+ struct work_struct work;
+ };
+
uint64_t id;
atomic_t karma;
enum drm_sched_priority s_priority;
- struct drm_sched_entity *entity;
+ struct drm_sched_entity *entity;
struct dma_fence_cb cb;
+ /**
+ * @dependencies:
+ *
+ * Contains the dependencies as struct dma_fence for this job, see
+ * drm_sched_job_add_dependency() and
+ * drm_sched_job_add_implicit_dependencies().
+ */
+ struct xarray dependencies;
+
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned long last_dependency;
};
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
int threshold)
{
- return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
}
+enum drm_gpu_sched_stat {
+ DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
+ DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_ENODEV,
+};
+
/**
- * struct drm_sched_backend_ops
+ * struct drm_sched_backend_ops - Define the backend operations
+ * called by the scheduler
*
- * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side.
+ * These functions should be implemented in the driver side.
*/
struct drm_sched_backend_ops {
/**
- * @dependency: Called when the scheduler is considering scheduling
- * this job next, to get another struct dma_fence for this job to
- * block on. Once it returns NULL, run_job() may be called.
+ * @dependency:
+ *
+ * Called when the scheduler is considering scheduling this job next, to
+ * get another struct dma_fence for this job to block on. Once it
+ * returns NULL, run_job() may be called.
+ *
+ * If a driver exclusively uses drm_sched_job_add_dependency() and
+ * drm_sched_job_add_implicit_dependencies() this can be ommitted and
+ * left as NULL.
*/
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity);
@@ -228,10 +367,48 @@ struct drm_sched_backend_ops {
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
/**
- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
+ *
+ * This method is called in a workqueue context.
+ *
+ * Drivers typically issue a reset to recover from GPU hangs, and this
+ * procedure usually follows the following workflow:
+ *
+ * 1. Stop the scheduler using drm_sched_stop(). This will park the
+ * scheduler thread and cancel the timeout work, guaranteeing that
+ * nothing is queued while we reset the hardware queue
+ * 2. Try to gracefully stop non-faulty jobs (optional)
+ * 3. Issue a GPU reset (driver-specific)
+ * 4. Re-submit jobs using drm_sched_resubmit_jobs()
+ * 5. Restart the scheduler using drm_sched_start(). At that point, new
+ * jobs can be queued, and the scheduler thread is unblocked
+ *
+ * Note that some GPUs have distinct hardware queues but need to reset
+ * the GPU globally, which requires extra synchronization between the
+ * timeout handler of the different &drm_gpu_scheduler. One way to
+ * achieve this synchronization is to create an ordered workqueue
+ * (using alloc_ordered_workqueue()) at the driver level, and pass this
+ * queue to drm_sched_init(), to guarantee that timeout handlers are
+ * executed sequentially. The above workflow needs to be slightly
+ * adjusted in that case:
+ *
+ * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
+ * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
+ * the reset (optional)
+ * 3. Issue a GPU reset on all faulty queues (driver-specific)
+ * 4. Re-submit jobs on all schedulers impacted by the reset using
+ * drm_sched_resubmit_jobs()
+ * 5. Restart all schedulers that were stopped in step #1 using
+ * drm_sched_start()
+ *
+ * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
+ * and the underlying driver has started or completed recovery.
+ *
+ * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
+ * available, i.e. has been unplugged.
*/
- void (*timedout_job)(struct drm_sched_job *sched_job);
+ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
/**
* @free_job: Called once the job's finished fence has been signaled
@@ -241,7 +418,7 @@ struct drm_sched_backend_ops {
};
/**
- * struct drm_gpu_scheduler
+ * struct drm_gpu_scheduler - scheduler instance-specific data
*
* @ops: backend operations provided by the driver.
* @hw_submission_limit: the max size of the hardware queue.
@@ -255,16 +432,19 @@ struct drm_sched_backend_ops {
* finished.
* @hw_rq_count: the number of jobs currently in the hardware queue.
* @job_id_count: used to assign unique id to the each job.
+ * @timeout_wq: workqueue used to queue @work_tdr
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over.
* @thread: the kthread on which the scheduler which run.
- * @ring_mirror_list: the list of jobs which are currently in the job queue.
- * @job_list_lock: lock to protect the ring_mirror_list.
+ * @pending_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
- * guilty and it will be considered for scheduling further.
+ * guilty and it will no longer be considered for scheduling.
* @score: score to help loadbalancer pick a idle sched
+ * @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
+ * @dev: system &struct device
*
* One scheduler is implemented for each hardware ring.
*/
@@ -273,36 +453,55 @@ struct drm_gpu_scheduler {
uint32_t hw_submission_limit;
long timeout;
const char *name;
- struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
+ struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled;
atomic_t hw_rq_count;
atomic64_t job_id_count;
+ struct workqueue_struct *timeout_wq;
struct delayed_work work_tdr;
struct task_struct *thread;
- struct list_head ring_mirror_list;
+ struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t score;
+ atomic_t *score;
+ atomic_t _score;
bool ready;
bool free_guilty;
+ struct device *dev;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
- uint32_t hw_submission, unsigned hang_limit, long timeout,
- const char *name);
+ uint32_t hw_submission, unsigned hang_limit,
+ long timeout, struct workqueue_struct *timeout_wq,
+ atomic_t *score, const char *name, struct device *dev);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner);
+void drm_sched_job_arm(struct drm_sched_job *job);
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write);
+
+
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
+
void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
+void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
void drm_sched_increase_karma(struct drm_sched_job *bad);
+void drm_sched_reset_karma(struct drm_sched_job *bad);
+void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
@@ -323,19 +522,25 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
-struct drm_sched_fence *drm_sched_fence_create(
+struct drm_sched_fence *drm_sched_fence_alloc(
struct drm_sched_entity *s_entity, void *owner);
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity);
+void drm_sched_fence_free(struct drm_sched_fence *fence);
+
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
unsigned long remaining);
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
#endif
diff --git a/include/drm/gud.h b/include/drm/gud.h
new file mode 100644
index 000000000000..c52a8ba4ae4e
--- /dev/null
+++ b/include/drm/gud.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2020 Noralf Trønnes
+ */
+
+#ifndef __LINUX_GUD_H
+#define __LINUX_GUD_H
+
+#include <linux/types.h>
+
+/*
+ * struct gud_display_descriptor_req - Display descriptor
+ * @magic: Magic value GUD_DISPLAY_MAGIC
+ * @version: Protocol version
+ * @flags: Flags
+ * - STATUS_ON_SET: Always do a status request after a SET request.
+ * This is used by the Linux gadget driver since it has
+ * no way to control the status stage of a control OUT
+ * request that has a payload.
+ * - FULL_UPDATE: Always send the entire framebuffer when flushing changes.
+ * The GUD_REQ_SET_BUFFER request will not be sent
+ * before each bulk transfer, it will only be sent if the
+ * previous bulk transfer had failed. This gives the device
+ * a chance to reset its state machine if needed.
+ * This flag can not be used in combination with compression.
+ * @compression: Supported compression types
+ * - GUD_COMPRESSION_LZ4: LZ4 lossless compression.
+ * @max_buffer_size: Maximum buffer size the device can handle (optional).
+ * This is useful for devices that don't have a big enough
+ * buffer to decompress the entire framebuffer in one go.
+ * @min_width: Minimum pixel width the controller can handle
+ * @max_width: Maximum width
+ * @min_height: Minimum height
+ * @max_height: Maximum height
+ *
+ * Devices that have only one display mode will have min_width == max_width
+ * and min_height == max_height.
+ */
+struct gud_display_descriptor_req {
+ __le32 magic;
+#define GUD_DISPLAY_MAGIC 0x1d50614d
+ __u8 version;
+ __le32 flags;
+#define GUD_DISPLAY_FLAG_STATUS_ON_SET BIT(0)
+#define GUD_DISPLAY_FLAG_FULL_UPDATE BIT(1)
+ __u8 compression;
+#define GUD_COMPRESSION_LZ4 BIT(0)
+ __le32 max_buffer_size;
+ __le32 min_width;
+ __le32 max_width;
+ __le32 min_height;
+ __le32 max_height;
+} __packed;
+
+/*
+ * struct gud_property_req - Property
+ * @prop: Property
+ * @val: Value
+ */
+struct gud_property_req {
+ __le16 prop;
+ __le64 val;
+} __packed;
+
+/*
+ * struct gud_display_mode_req - Display mode
+ * @clock: Pixel clock in kHz
+ * @hdisplay: Horizontal display size
+ * @hsync_start: Horizontal sync start
+ * @hsync_end: Horizontal sync end
+ * @htotal: Horizontal total size
+ * @vdisplay: Vertical display size
+ * @vsync_start: Vertical sync start
+ * @vsync_end: Vertical sync end
+ * @vtotal: Vertical total size
+ * @flags: Bits 0-13 are the same as in the RandR protocol and also what DRM uses.
+ * The deprecated bits are reused for internal protocol flags leaving us
+ * free to follow DRM for the other bits in the future.
+ * - FLAG_PREFERRED: Set on the preferred display mode.
+ */
+struct gud_display_mode_req {
+ __le32 clock;
+ __le16 hdisplay;
+ __le16 hsync_start;
+ __le16 hsync_end;
+ __le16 htotal;
+ __le16 vdisplay;
+ __le16 vsync_start;
+ __le16 vsync_end;
+ __le16 vtotal;
+ __le32 flags;
+#define GUD_DISPLAY_MODE_FLAG_PHSYNC BIT(0)
+#define GUD_DISPLAY_MODE_FLAG_NHSYNC BIT(1)
+#define GUD_DISPLAY_MODE_FLAG_PVSYNC BIT(2)
+#define GUD_DISPLAY_MODE_FLAG_NVSYNC BIT(3)
+#define GUD_DISPLAY_MODE_FLAG_INTERLACE BIT(4)
+#define GUD_DISPLAY_MODE_FLAG_DBLSCAN BIT(5)
+#define GUD_DISPLAY_MODE_FLAG_CSYNC BIT(6)
+#define GUD_DISPLAY_MODE_FLAG_PCSYNC BIT(7)
+#define GUD_DISPLAY_MODE_FLAG_NCSYNC BIT(8)
+#define GUD_DISPLAY_MODE_FLAG_HSKEW BIT(9)
+/* BCast and PixelMultiplex are deprecated */
+#define GUD_DISPLAY_MODE_FLAG_DBLCLK BIT(12)
+#define GUD_DISPLAY_MODE_FLAG_CLKDIV2 BIT(13)
+#define GUD_DISPLAY_MODE_FLAG_USER_MASK \
+ (GUD_DISPLAY_MODE_FLAG_PHSYNC | GUD_DISPLAY_MODE_FLAG_NHSYNC | \
+ GUD_DISPLAY_MODE_FLAG_PVSYNC | GUD_DISPLAY_MODE_FLAG_NVSYNC | \
+ GUD_DISPLAY_MODE_FLAG_INTERLACE | GUD_DISPLAY_MODE_FLAG_DBLSCAN | \
+ GUD_DISPLAY_MODE_FLAG_CSYNC | GUD_DISPLAY_MODE_FLAG_PCSYNC | \
+ GUD_DISPLAY_MODE_FLAG_NCSYNC | GUD_DISPLAY_MODE_FLAG_HSKEW | \
+ GUD_DISPLAY_MODE_FLAG_DBLCLK | GUD_DISPLAY_MODE_FLAG_CLKDIV2)
+/* Internal protocol flags */
+#define GUD_DISPLAY_MODE_FLAG_PREFERRED BIT(10)
+} __packed;
+
+/*
+ * struct gud_connector_descriptor_req - Connector descriptor
+ * @connector_type: Connector type (GUD_CONNECTOR_TYPE_*).
+ * If the host doesn't support the type it should fall back to PANEL.
+ * @flags: Flags
+ * - POLL_STATUS: Connector status can change (polled every 10 seconds)
+ * - INTERLACE: Interlaced modes are supported
+ * - DOUBLESCAN: Doublescan modes are supported
+ */
+struct gud_connector_descriptor_req {
+ __u8 connector_type;
+#define GUD_CONNECTOR_TYPE_PANEL 0
+#define GUD_CONNECTOR_TYPE_VGA 1
+#define GUD_CONNECTOR_TYPE_COMPOSITE 2
+#define GUD_CONNECTOR_TYPE_SVIDEO 3
+#define GUD_CONNECTOR_TYPE_COMPONENT 4
+#define GUD_CONNECTOR_TYPE_DVI 5
+#define GUD_CONNECTOR_TYPE_DISPLAYPORT 6
+#define GUD_CONNECTOR_TYPE_HDMI 7
+ __le32 flags;
+#define GUD_CONNECTOR_FLAGS_POLL_STATUS BIT(0)
+#define GUD_CONNECTOR_FLAGS_INTERLACE BIT(1)
+#define GUD_CONNECTOR_FLAGS_DOUBLESCAN BIT(2)
+} __packed;
+
+/*
+ * struct gud_set_buffer_req - Set buffer transfer info
+ * @x: X position of rectangle
+ * @y: Y position
+ * @width: Pixel width of rectangle
+ * @height: Pixel height
+ * @length: Buffer length in bytes
+ * @compression: Transfer compression
+ * @compressed_length: Compressed buffer length
+ *
+ * This request is issued right before the bulk transfer.
+ * @x, @y, @width and @height specifies the rectangle where the buffer should be
+ * placed inside the framebuffer.
+ */
+struct gud_set_buffer_req {
+ __le32 x;
+ __le32 y;
+ __le32 width;
+ __le32 height;
+ __le32 length;
+ __u8 compression;
+ __le32 compressed_length;
+} __packed;
+
+/*
+ * struct gud_state_req - Display state
+ * @mode: Display mode
+ * @format: Pixel format GUD_PIXEL_FORMAT_*
+ * @connector: Connector index
+ * @properties: Array of properties
+ *
+ * The entire state is transferred each time there's a change.
+ */
+struct gud_state_req {
+ struct gud_display_mode_req mode;
+ __u8 format;
+ __u8 connector;
+ struct gud_property_req properties[];
+} __packed;
+
+/* List of supported connector properties: */
+
+/* Margins in pixels to deal with overscan, range 0-100 */
+#define GUD_PROPERTY_TV_LEFT_MARGIN 1
+#define GUD_PROPERTY_TV_RIGHT_MARGIN 2
+#define GUD_PROPERTY_TV_TOP_MARGIN 3
+#define GUD_PROPERTY_TV_BOTTOM_MARGIN 4
+#define GUD_PROPERTY_TV_MODE 5
+/* Brightness in percent, range 0-100 */
+#define GUD_PROPERTY_TV_BRIGHTNESS 6
+/* Contrast in percent, range 0-100 */
+#define GUD_PROPERTY_TV_CONTRAST 7
+/* Flicker reduction in percent, range 0-100 */
+#define GUD_PROPERTY_TV_FLICKER_REDUCTION 8
+/* Overscan in percent, range 0-100 */
+#define GUD_PROPERTY_TV_OVERSCAN 9
+/* Saturation in percent, range 0-100 */
+#define GUD_PROPERTY_TV_SATURATION 10
+/* Hue in percent, range 0-100 */
+#define GUD_PROPERTY_TV_HUE 11
+
+/*
+ * Backlight brightness is in the range 0-100 inclusive. The value represents the human perceptual
+ * brightness and not a linear PWM value. 0 is minimum brightness which should not turn the
+ * backlight completely off. The DPMS connector property should be used to control power which will
+ * trigger a GUD_REQ_SET_DISPLAY_ENABLE request.
+ *
+ * This does not map to a DRM property, it is used with the backlight device.
+ */
+#define GUD_PROPERTY_BACKLIGHT_BRIGHTNESS 12
+
+/* List of supported properties that are not connector propeties: */
+
+/*
+ * Plane rotation. Should return the supported bitmask on
+ * GUD_REQ_GET_PROPERTIES. GUD_ROTATION_0 is mandatory.
+ *
+ * Note: This is not display rotation so 90/270 will need scaling to make it fit (unless squared).
+ */
+#define GUD_PROPERTY_ROTATION 50
+ #define GUD_ROTATION_0 BIT(0)
+ #define GUD_ROTATION_90 BIT(1)
+ #define GUD_ROTATION_180 BIT(2)
+ #define GUD_ROTATION_270 BIT(3)
+ #define GUD_ROTATION_REFLECT_X BIT(4)
+ #define GUD_ROTATION_REFLECT_Y BIT(5)
+ #define GUD_ROTATION_MASK (GUD_ROTATION_0 | GUD_ROTATION_90 | \
+ GUD_ROTATION_180 | GUD_ROTATION_270 | \
+ GUD_ROTATION_REFLECT_X | GUD_ROTATION_REFLECT_Y)
+
+/* USB Control requests: */
+
+/* Get status from the last GET/SET control request. Value is u8. */
+#define GUD_REQ_GET_STATUS 0x00
+ /* Status values: */
+ #define GUD_STATUS_OK 0x00
+ #define GUD_STATUS_BUSY 0x01
+ #define GUD_STATUS_REQUEST_NOT_SUPPORTED 0x02
+ #define GUD_STATUS_PROTOCOL_ERROR 0x03
+ #define GUD_STATUS_INVALID_PARAMETER 0x04
+ #define GUD_STATUS_ERROR 0x05
+
+/* Get display descriptor as a &gud_display_descriptor_req */
+#define GUD_REQ_GET_DESCRIPTOR 0x01
+
+/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */
+#define GUD_REQ_GET_FORMATS 0x40
+ #define GUD_FORMATS_MAX_NUM 32
+ #define GUD_PIXEL_FORMAT_R1 0x01 /* 1-bit monochrome */
+ #define GUD_PIXEL_FORMAT_R8 0x08 /* 8-bit greyscale */
+ #define GUD_PIXEL_FORMAT_XRGB1111 0x20
+ #define GUD_PIXEL_FORMAT_RGB332 0x30
+ #define GUD_PIXEL_FORMAT_RGB565 0x40
+ #define GUD_PIXEL_FORMAT_RGB888 0x50
+ #define GUD_PIXEL_FORMAT_XRGB8888 0x80
+ #define GUD_PIXEL_FORMAT_ARGB8888 0x81
+
+/*
+ * Get supported properties that are not connector propeties as a &gud_property_req array.
+ * gud_property_req.val often contains the initial value for the property.
+ */
+#define GUD_REQ_GET_PROPERTIES 0x41
+ #define GUD_PROPERTIES_MAX_NUM 32
+
+/* Connector requests have the connector index passed in the wValue field */
+
+/* Get connector descriptors as an array of &gud_connector_descriptor_req */
+#define GUD_REQ_GET_CONNECTORS 0x50
+ #define GUD_CONNECTORS_MAX_NUM 32
+
+/*
+ * Get properties supported by the connector as a &gud_property_req array.
+ * gud_property_req.val often contains the initial value for the property.
+ */
+#define GUD_REQ_GET_CONNECTOR_PROPERTIES 0x51
+ #define GUD_CONNECTOR_PROPERTIES_MAX_NUM 32
+
+/*
+ * Issued when there's a TV_MODE property present.
+ * Gets an array of the supported TV_MODE names each entry of length
+ * GUD_CONNECTOR_TV_MODE_NAME_LEN. Names must be NUL-terminated.
+ */
+#define GUD_REQ_GET_CONNECTOR_TV_MODE_VALUES 0x52
+ #define GUD_CONNECTOR_TV_MODE_NAME_LEN 16
+ #define GUD_CONNECTOR_TV_MODE_MAX_NUM 16
+
+/* When userspace checks connector status, this is issued first, not used for poll requests. */
+#define GUD_REQ_SET_CONNECTOR_FORCE_DETECT 0x53
+
+/*
+ * Get connector status. Value is u8.
+ *
+ * Userspace will get a HOTPLUG uevent if one of the following is true:
+ * - Connection status has changed since last
+ * - CHANGED is set
+ */
+#define GUD_REQ_GET_CONNECTOR_STATUS 0x54
+ #define GUD_CONNECTOR_STATUS_DISCONNECTED 0x00
+ #define GUD_CONNECTOR_STATUS_CONNECTED 0x01
+ #define GUD_CONNECTOR_STATUS_UNKNOWN 0x02
+ #define GUD_CONNECTOR_STATUS_CONNECTED_MASK 0x03
+ #define GUD_CONNECTOR_STATUS_CHANGED BIT(7)
+
+/*
+ * Display modes can be fetched as either EDID data or an array of &gud_display_mode_req.
+ *
+ * If GUD_REQ_GET_CONNECTOR_MODES returns zero, EDID is used to create display modes.
+ * If both display modes and EDID are returned, EDID is just passed on to userspace
+ * in the EDID connector property.
+ */
+
+/* Get &gud_display_mode_req array of supported display modes */
+#define GUD_REQ_GET_CONNECTOR_MODES 0x55
+ #define GUD_CONNECTOR_MAX_NUM_MODES 128
+
+/* Get Extended Display Identification Data */
+#define GUD_REQ_GET_CONNECTOR_EDID 0x56
+ #define GUD_CONNECTOR_MAX_EDID_LEN 2048
+
+/* Set buffer properties before bulk transfer as &gud_set_buffer_req */
+#define GUD_REQ_SET_BUFFER 0x60
+
+/* Check display configuration as &gud_state_req */
+#define GUD_REQ_SET_STATE_CHECK 0x61
+
+/* Apply the previous STATE_CHECK configuration */
+#define GUD_REQ_SET_STATE_COMMIT 0x62
+
+/* Enable/disable the display controller, value is u8: 0/1 */
+#define GUD_REQ_SET_CONTROLLER_ENABLE 0x63
+
+/* Enable/disable display/output (DPMS), value is u8: 0/1 */
+#define GUD_REQ_SET_DISPLAY_ENABLE 0x64
+
+#endif
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 55c3b123581b..c1e2a43d2d1e 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -29,6 +29,7 @@
enum i915_component_type {
I915_COMPONENT_AUDIO = 1,
I915_COMPONENT_HDCP,
+ I915_COMPONENT_PXP
};
/* MAX_PORT is the number of port
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 6722005884db..7adce327c1c2 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -26,8 +26,7 @@
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
-#include <drm/i915_pciids.h>
-#include <uapi/drm/i915_drm.h>
+#include <linux/types.h>
/* For use by IPS driver */
unsigned long i915_read_mch_val(void);
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
index 4d48de8890ca..f441cbcd95a4 100644
--- a/include/drm/i915_mei_hdcp_interface.h
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -11,8 +11,7 @@
#include <linux/mutex.h>
#include <linux/device.h>
-#include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
+#include <drm/display/drm_hdcp.h>
/**
* enum hdcp_port_type - HDCP port implementation type defined by ME FW
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 1d2c12219f44..4a4c190f7698 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -170,9 +170,9 @@
#define INTEL_HSW_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
- INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0A06, info) /* ULT GT1 mobile */
+ INTEL_VGA_DEVICE(0x0A0B, info) /* ULT GT1 reserved */
#define INTEL_HSW_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */
@@ -181,26 +181,26 @@
INTEL_HSW_ULT_GT1_IDS(info), \
INTEL_HSW_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
- INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
+ INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x040A, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */
+ INTEL_VGA_DEVICE(0x0D0E, info) /* CRW GT1 reserved */
#define INTEL_HSW_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
- INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0A16, info) /* ULT GT2 mobile */
+ INTEL_VGA_DEVICE(0x0A1B, info) /* ULT GT2 reserved */ \
#define INTEL_HSW_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \
@@ -209,45 +209,45 @@
INTEL_HSW_ULT_GT2_IDS(info), \
INTEL_HSW_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
- INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
+ INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x041A, info), /* GT2 server */ \
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */
+ INTEL_VGA_DEVICE(0x0D1E, info) /* CRW GT2 reserved */
#define INTEL_HSW_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */
#define INTEL_HSW_GT3_IDS(info) \
INTEL_HSW_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
- INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
+ INTEL_VGA_DEVICE(0x0426, info), /* GT3 mobile */ \
+ INTEL_VGA_DEVICE(0x042A, info), /* GT3 server */ \
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0D26, info), /* CRW GT3 mobile */ \
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
+ INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */
#define INTEL_HSW_IDS(info) \
INTEL_HSW_GT1_IDS(info), \
@@ -258,9 +258,7 @@
INTEL_VGA_DEVICE(0x0f30, info), \
INTEL_VGA_DEVICE(0x0f31, info), \
INTEL_VGA_DEVICE(0x0f32, info), \
- INTEL_VGA_DEVICE(0x0f33, info), \
- INTEL_VGA_DEVICE(0x0157, info), \
- INTEL_VGA_DEVICE(0x0155, info)
+ INTEL_VGA_DEVICE(0x0f33, info)
#define INTEL_BDW_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
@@ -331,17 +329,20 @@
INTEL_VGA_DEVICE(0x22b3, info)
#define INTEL_SKL_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x1906, info) /* ULT GT1 */
+ INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
+ INTEL_VGA_DEVICE(0x1913, info) /* ULT GT1.5 */
#define INTEL_SKL_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x190E, info) /* ULX GT1 */
+ INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x1915, info) /* ULX GT1.5 */
#define INTEL_SKL_GT1_IDS(info) \
INTEL_SKL_ULT_GT1_IDS(info), \
INTEL_SKL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
+ INTEL_VGA_DEVICE(0x1917, info) /* DT GT1.5 */
#define INTEL_SKL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
@@ -354,26 +355,26 @@
INTEL_SKL_ULT_GT2_IDS(info), \
INTEL_SKL_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
#define INTEL_SKL_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x1926, info) /* ULT GT3 */
+ INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3e */ \
+ INTEL_VGA_DEVICE(0x1927, info) /* ULT GT3e */
#define INTEL_SKL_GT3_IDS(info) \
INTEL_SKL_ULT_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
- INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */
+ INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3e */ \
+ INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3e */
#define INTEL_SKL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
- INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
- INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
- INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \
- INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */
+ INTEL_VGA_DEVICE(0x193A, info), /* SRV GT4e */ \
+ INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4e */ \
+ INTEL_VGA_DEVICE(0x193D, info) /* WKS GT4e */
#define INTEL_SKL_IDS(info) \
INTEL_SKL_GT1_IDS(info), \
@@ -405,8 +406,8 @@
INTEL_KBL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
+ INTEL_VGA_DEVICE(0x590A, info), /* SRV GT1 */ \
+ INTEL_VGA_DEVICE(0x590B, info) /* Halo GT1 */
#define INTEL_KBL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
@@ -418,10 +419,10 @@
#define INTEL_KBL_GT2_IDS(info) \
INTEL_KBL_ULT_GT2_IDS(info), \
INTEL_KBL_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
+ INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
#define INTEL_KBL_ULT_GT3_IDS(info) \
@@ -446,10 +447,10 @@
/* CML GT1 */
#define INTEL_CML_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9BA5, info), \
- INTEL_VGA_DEVICE(0x9BA8, info), \
+ INTEL_VGA_DEVICE(0x9BA2, info), \
INTEL_VGA_DEVICE(0x9BA4, info), \
- INTEL_VGA_DEVICE(0x9BA2, info)
+ INTEL_VGA_DEVICE(0x9BA5, info), \
+ INTEL_VGA_DEVICE(0x9BA8, info)
#define INTEL_CML_U_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x9B21, info), \
@@ -458,11 +459,11 @@
/* CML GT2 */
#define INTEL_CML_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9BC5, info), \
- INTEL_VGA_DEVICE(0x9BC8, info), \
- INTEL_VGA_DEVICE(0x9BC4, info), \
INTEL_VGA_DEVICE(0x9BC2, info), \
+ INTEL_VGA_DEVICE(0x9BC4, info), \
+ INTEL_VGA_DEVICE(0x9BC5, info), \
INTEL_VGA_DEVICE(0x9BC6, info), \
+ INTEL_VGA_DEVICE(0x9BC8, info), \
INTEL_VGA_DEVICE(0x9BE6, info), \
INTEL_VGA_DEVICE(0x9BF6, info)
@@ -496,8 +497,8 @@
INTEL_VGA_DEVICE(0x3E9C, info)
#define INTEL_CFL_H_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
+ INTEL_VGA_DEVICE(0x3E94, info), /* Halo GT2 */ \
+ INTEL_VGA_DEVICE(0x3E9B, info) /* Halo GT2 */
/* CFL U GT2 */
#define INTEL_CFL_U_GT2_IDS(info) \
@@ -542,63 +543,209 @@
/* CNL */
#define INTEL_CNL_PORT_F_IDS(info) \
- INTEL_VGA_DEVICE(0x5A54, info), \
- INTEL_VGA_DEVICE(0x5A5C, info), \
INTEL_VGA_DEVICE(0x5A44, info), \
- INTEL_VGA_DEVICE(0x5A4C, info)
+ INTEL_VGA_DEVICE(0x5A4C, info), \
+ INTEL_VGA_DEVICE(0x5A54, info), \
+ INTEL_VGA_DEVICE(0x5A5C, info)
#define INTEL_CNL_IDS(info) \
INTEL_CNL_PORT_F_IDS(info), \
- INTEL_VGA_DEVICE(0x5A51, info), \
- INTEL_VGA_DEVICE(0x5A59, info), \
+ INTEL_VGA_DEVICE(0x5A40, info), \
INTEL_VGA_DEVICE(0x5A41, info), \
- INTEL_VGA_DEVICE(0x5A49, info), \
- INTEL_VGA_DEVICE(0x5A52, info), \
- INTEL_VGA_DEVICE(0x5A5A, info), \
INTEL_VGA_DEVICE(0x5A42, info), \
+ INTEL_VGA_DEVICE(0x5A49, info), \
INTEL_VGA_DEVICE(0x5A4A, info), \
INTEL_VGA_DEVICE(0x5A50, info), \
- INTEL_VGA_DEVICE(0x5A40, info)
+ INTEL_VGA_DEVICE(0x5A51, info), \
+ INTEL_VGA_DEVICE(0x5A52, info), \
+ INTEL_VGA_DEVICE(0x5A59, info), \
+ INTEL_VGA_DEVICE(0x5A5A, info)
/* ICL */
#define INTEL_ICL_PORT_F_IDS(info) \
INTEL_VGA_DEVICE(0x8A50, info), \
- INTEL_VGA_DEVICE(0x8A5C, info), \
- INTEL_VGA_DEVICE(0x8A59, info), \
- INTEL_VGA_DEVICE(0x8A58, info), \
INTEL_VGA_DEVICE(0x8A52, info), \
+ INTEL_VGA_DEVICE(0x8A53, info), \
+ INTEL_VGA_DEVICE(0x8A54, info), \
+ INTEL_VGA_DEVICE(0x8A56, info), \
+ INTEL_VGA_DEVICE(0x8A57, info), \
+ INTEL_VGA_DEVICE(0x8A58, info), \
+ INTEL_VGA_DEVICE(0x8A59, info), \
INTEL_VGA_DEVICE(0x8A5A, info), \
INTEL_VGA_DEVICE(0x8A5B, info), \
- INTEL_VGA_DEVICE(0x8A57, info), \
- INTEL_VGA_DEVICE(0x8A56, info), \
- INTEL_VGA_DEVICE(0x8A71, info), \
+ INTEL_VGA_DEVICE(0x8A5C, info), \
INTEL_VGA_DEVICE(0x8A70, info), \
- INTEL_VGA_DEVICE(0x8A53, info), \
- INTEL_VGA_DEVICE(0x8A54, info)
+ INTEL_VGA_DEVICE(0x8A71, info)
#define INTEL_ICL_11_IDS(info) \
INTEL_ICL_PORT_F_IDS(info), \
INTEL_VGA_DEVICE(0x8A51, info), \
INTEL_VGA_DEVICE(0x8A5D, info)
-/* EHL/JSL */
+/* EHL */
#define INTEL_EHL_IDS(info) \
- INTEL_VGA_DEVICE(0x4500, info), \
- INTEL_VGA_DEVICE(0x4571, info), \
- INTEL_VGA_DEVICE(0x4551, info), \
INTEL_VGA_DEVICE(0x4541, info), \
- INTEL_VGA_DEVICE(0x4E71, info), \
+ INTEL_VGA_DEVICE(0x4551, info), \
+ INTEL_VGA_DEVICE(0x4555, info), \
+ INTEL_VGA_DEVICE(0x4557, info), \
+ INTEL_VGA_DEVICE(0x4571, info)
+
+/* JSL */
+#define INTEL_JSL_IDS(info) \
+ INTEL_VGA_DEVICE(0x4E51, info), \
+ INTEL_VGA_DEVICE(0x4E55, info), \
+ INTEL_VGA_DEVICE(0x4E57, info), \
INTEL_VGA_DEVICE(0x4E61, info), \
- INTEL_VGA_DEVICE(0x4E51, info)
+ INTEL_VGA_DEVICE(0x4E71, info)
/* TGL */
-#define INTEL_TGL_12_IDS(info) \
- INTEL_VGA_DEVICE(0x9A49, info), \
- INTEL_VGA_DEVICE(0x9A40, info), \
- INTEL_VGA_DEVICE(0x9A59, info), \
+#define INTEL_TGL_12_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x9A60, info), \
INTEL_VGA_DEVICE(0x9A68, info), \
- INTEL_VGA_DEVICE(0x9A70, info), \
- INTEL_VGA_DEVICE(0x9A78, info)
+ INTEL_VGA_DEVICE(0x9A70, info)
+
+#define INTEL_TGL_12_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x9A40, info), \
+ INTEL_VGA_DEVICE(0x9A49, info), \
+ INTEL_VGA_DEVICE(0x9A59, info), \
+ INTEL_VGA_DEVICE(0x9A78, info), \
+ INTEL_VGA_DEVICE(0x9AC0, info), \
+ INTEL_VGA_DEVICE(0x9AC9, info), \
+ INTEL_VGA_DEVICE(0x9AD9, info), \
+ INTEL_VGA_DEVICE(0x9AF8, info)
+
+#define INTEL_TGL_12_IDS(info) \
+ INTEL_TGL_12_GT1_IDS(info), \
+ INTEL_TGL_12_GT2_IDS(info)
+
+/* RKL */
+#define INTEL_RKL_IDS(info) \
+ INTEL_VGA_DEVICE(0x4C80, info), \
+ INTEL_VGA_DEVICE(0x4C8A, info), \
+ INTEL_VGA_DEVICE(0x4C8B, info), \
+ INTEL_VGA_DEVICE(0x4C8C, info), \
+ INTEL_VGA_DEVICE(0x4C90, info), \
+ INTEL_VGA_DEVICE(0x4C9A, info)
+
+/* DG1 */
+#define INTEL_DG1_IDS(info) \
+ INTEL_VGA_DEVICE(0x4905, info), \
+ INTEL_VGA_DEVICE(0x4906, info), \
+ INTEL_VGA_DEVICE(0x4907, info), \
+ INTEL_VGA_DEVICE(0x4908, info), \
+ INTEL_VGA_DEVICE(0x4909, info)
+
+/* ADL-S */
+#define INTEL_ADLS_IDS(info) \
+ INTEL_VGA_DEVICE(0x4680, info), \
+ INTEL_VGA_DEVICE(0x4682, info), \
+ INTEL_VGA_DEVICE(0x4688, info), \
+ INTEL_VGA_DEVICE(0x468A, info), \
+ INTEL_VGA_DEVICE(0x468B, info), \
+ INTEL_VGA_DEVICE(0x4690, info), \
+ INTEL_VGA_DEVICE(0x4692, info), \
+ INTEL_VGA_DEVICE(0x4693, info)
+
+/* ADL-P */
+#define INTEL_ADLP_IDS(info) \
+ INTEL_VGA_DEVICE(0x46A0, info), \
+ INTEL_VGA_DEVICE(0x46A1, info), \
+ INTEL_VGA_DEVICE(0x46A2, info), \
+ INTEL_VGA_DEVICE(0x46A3, info), \
+ INTEL_VGA_DEVICE(0x46A6, info), \
+ INTEL_VGA_DEVICE(0x46A8, info), \
+ INTEL_VGA_DEVICE(0x46AA, info), \
+ INTEL_VGA_DEVICE(0x462A, info), \
+ INTEL_VGA_DEVICE(0x4626, info), \
+ INTEL_VGA_DEVICE(0x4628, info), \
+ INTEL_VGA_DEVICE(0x46B0, info), \
+ INTEL_VGA_DEVICE(0x46B1, info), \
+ INTEL_VGA_DEVICE(0x46B2, info), \
+ INTEL_VGA_DEVICE(0x46B3, info), \
+ INTEL_VGA_DEVICE(0x46C0, info), \
+ INTEL_VGA_DEVICE(0x46C1, info), \
+ INTEL_VGA_DEVICE(0x46C2, info), \
+ INTEL_VGA_DEVICE(0x46C3, info)
+
+/* ADL-N */
+#define INTEL_ADLN_IDS(info) \
+ INTEL_VGA_DEVICE(0x46D0, info), \
+ INTEL_VGA_DEVICE(0x46D1, info), \
+ INTEL_VGA_DEVICE(0x46D2, info)
+
+/* RPL-S */
+#define INTEL_RPLS_IDS(info) \
+ INTEL_VGA_DEVICE(0xA780, info), \
+ INTEL_VGA_DEVICE(0xA781, info), \
+ INTEL_VGA_DEVICE(0xA782, info), \
+ INTEL_VGA_DEVICE(0xA783, info), \
+ INTEL_VGA_DEVICE(0xA788, info), \
+ INTEL_VGA_DEVICE(0xA789, info), \
+ INTEL_VGA_DEVICE(0xA78A, info), \
+ INTEL_VGA_DEVICE(0xA78B, info)
+
+/* RPL-P */
+#define INTEL_RPLP_IDS(info) \
+ INTEL_VGA_DEVICE(0xA720, info), \
+ INTEL_VGA_DEVICE(0xA721, info), \
+ INTEL_VGA_DEVICE(0xA7A0, info), \
+ INTEL_VGA_DEVICE(0xA7A1, info), \
+ INTEL_VGA_DEVICE(0xA7A8, info), \
+ INTEL_VGA_DEVICE(0xA7A9, info)
+
+/* DG2 */
+#define INTEL_DG2_G10_IDS(info) \
+ INTEL_VGA_DEVICE(0x5690, info), \
+ INTEL_VGA_DEVICE(0x5691, info), \
+ INTEL_VGA_DEVICE(0x5692, info), \
+ INTEL_VGA_DEVICE(0x56A0, info), \
+ INTEL_VGA_DEVICE(0x56A1, info), \
+ INTEL_VGA_DEVICE(0x56A2, info)
+
+#define INTEL_DG2_G11_IDS(info) \
+ INTEL_VGA_DEVICE(0x5693, info), \
+ INTEL_VGA_DEVICE(0x5694, info), \
+ INTEL_VGA_DEVICE(0x5695, info), \
+ INTEL_VGA_DEVICE(0x5698, info), \
+ INTEL_VGA_DEVICE(0x56A5, info), \
+ INTEL_VGA_DEVICE(0x56A6, info), \
+ INTEL_VGA_DEVICE(0x56B0, info), \
+ INTEL_VGA_DEVICE(0x56B1, info)
+
+#define INTEL_DG2_G12_IDS(info) \
+ INTEL_VGA_DEVICE(0x5696, info), \
+ INTEL_VGA_DEVICE(0x5697, info), \
+ INTEL_VGA_DEVICE(0x56A3, info), \
+ INTEL_VGA_DEVICE(0x56A4, info), \
+ INTEL_VGA_DEVICE(0x56B2, info), \
+ INTEL_VGA_DEVICE(0x56B3, info)
+
+#define INTEL_DG2_IDS(info) \
+ INTEL_DG2_G10_IDS(info), \
+ INTEL_DG2_G11_IDS(info), \
+ INTEL_DG2_G12_IDS(info)
+
+#define INTEL_ATS_M150_IDS(info) \
+ INTEL_VGA_DEVICE(0x56C0, info)
+
+#define INTEL_ATS_M75_IDS(info) \
+ INTEL_VGA_DEVICE(0x56C1, info)
+
+#define INTEL_ATS_M_IDS(info) \
+ INTEL_ATS_M150_IDS(info), \
+ INTEL_ATS_M75_IDS(info)
+/* MTL */
+#define INTEL_MTL_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x7D40, info), \
+ INTEL_VGA_DEVICE(0x7D60, info)
+
+#define INTEL_MTL_P_IDS(info) \
+ INTEL_VGA_DEVICE(0x7D45, info), \
+ INTEL_VGA_DEVICE(0x7D55, info), \
+ INTEL_VGA_DEVICE(0x7DD5, info)
+
+#define INTEL_MTL_IDS(info) \
+ INTEL_MTL_M_IDS(info), \
+ INTEL_MTL_P_IDS(info)
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h
new file mode 100644
index 000000000000..af593ec64469
--- /dev/null
+++ b/include/drm/i915_pxp_tee_interface.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I915_PXP_TEE_INTERFACE_H_
+#define _I915_PXP_TEE_INTERFACE_H_
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+/**
+ * struct i915_pxp_component_ops - ops for PXP services.
+ * @owner: Module providing the ops
+ * @send: sends data to PXP
+ * @receive: receives data from PXP
+ */
+struct i915_pxp_component_ops {
+ /**
+ * @owner: owner of the module provding the ops
+ */
+ struct module *owner;
+
+ int (*send)(struct device *dev, const void *message, size_t size);
+ int (*recv)(struct device *dev, void *buffer, size_t size);
+};
+
+/**
+ * struct i915_pxp_component - Used for communication between i915 and TEE
+ * drivers for the PXP services
+ * @tee_dev: device that provide the PXP service from TEE Bus.
+ * @pxp_ops: Ops implemented by TEE driver, used by i915 driver.
+ */
+struct i915_pxp_component {
+ struct device *tee_dev;
+ const struct i915_pxp_component_ops *ops;
+
+ /* To protect the above members. */
+ struct mutex mutex;
+};
+
+#endif /* _I915_TEE_PXP_INTERFACE_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 71d81923e6b0..cb0d5b7200c7 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -4,27 +4,30 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
-#include <linux/agp_backend.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
-void intel_gtt_get(u64 *gtt_total,
- phys_addr_t *mappable_base,
- resource_size_t *mappable_end);
+struct agp_bridge_data;
+struct pci_dev;
+struct sg_table;
+
+void intel_gmch_gtt_get(u64 *gtt_total,
+ phys_addr_t *mappable_base,
+ resource_size_t *mappable_end);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
void intel_gmch_remove(void);
-bool intel_enable_gtt(void);
+bool intel_gmch_enable_gtt(void);
-void intel_gtt_chipset_flush(void);
-void intel_gtt_insert_page(dma_addr_t addr,
- unsigned int pg,
- unsigned int flags);
-void intel_gtt_insert_sg_entries(struct sg_table *st,
- unsigned int pg_start,
- unsigned int flags);
-void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+void intel_gmch_gtt_flush(void);
+void intel_gmch_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags);
+void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
+ unsigned int pg_start,
+ unsigned int flags);
+void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
@@ -33,8 +36,4 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
/* flag for GFDT type */
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
-#ifdef CONFIG_INTEL_IOMMU
-extern int intel_iommu_gfx_mapped;
-#endif
-
#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 66ca49db9633..44a538ee5e2a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -32,7 +32,6 @@
#define _TTM_BO_API_H_
#include <drm/drm_gem.h>
-#include <drm/drm_hashtab.h>
#include <drm/drm_vma_manager.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -42,9 +41,13 @@
#include <linux/bitmap.h>
#include <linux/dma-resv.h>
-struct ttm_bo_global;
+#include "ttm_resource.h"
-struct ttm_bo_device;
+struct ttm_global;
+
+struct ttm_device;
+
+struct iosys_map;
struct drm_mm_node;
@@ -52,57 +55,6 @@ struct ttm_placement;
struct ttm_place;
-struct ttm_lru_bulk_move;
-
-/**
- * struct ttm_bus_placement
- *
- * @addr: mapped virtual address
- * @base: bus base address
- * @is_iomem: is this io memory ?
- * @size: size in byte
- * @offset: offset from the base address
- * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
- * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
- *
- * Structure indicating the bus placement of an object.
- */
-struct ttm_bus_placement {
- void *addr;
- phys_addr_t base;
- unsigned long size;
- unsigned long offset;
- bool is_iomem;
- bool io_reserved_vm;
- uint64_t io_reserved_count;
-};
-
-
-/**
- * struct ttm_mem_reg
- *
- * @mm_node: Memory manager node.
- * @size: Requested size of memory region.
- * @num_pages: Actual size of memory region in pages.
- * @page_alignment: Page alignment.
- * @placement: Placement flags.
- * @bus: Placement on io bus accessible to the CPU
- *
- * Structure indicating the placement and space resources used by a
- * buffer object.
- */
-
-struct ttm_mem_reg {
- void *mm_node;
- unsigned long start;
- unsigned long size;
- unsigned long num_pages;
- uint32_t page_alignment;
- uint32_t mem_type;
- uint32_t placement;
- struct ttm_bus_placement bus;
-};
-
/**
* enum ttm_bo_type
*
@@ -131,26 +83,17 @@ struct ttm_tt;
* @base: drm_gem_object superclass data.
* @bdev: Pointer to the buffer object device structure.
* @type: The bo type.
+ * @page_alignment: Page alignment.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
- * @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
- * zero, the object is put on the delayed delete list.
- * @list_kref: List reference count of this buffer object. This member is
- * used to avoid destruction while the buffer object is still on a list.
- * Lru lists may keep one refcount, the delayed delete list, and kref != 0
- * keeps one refcount. When this refcount reaches zero,
- * the object is destroyed.
+ * zero, the object is destroyed or put on the delayed delete list.
* @mem: structure describing current placement.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
- * @lru: List head for the lru list.
+ * @deleted: True if the object is only a zombie and already deleted.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
- * @moving: Fence set when BO is moving
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
@@ -174,43 +117,37 @@ struct ttm_buffer_object {
* Members constant at init.
*/
- struct ttm_bo_device *bdev;
+ struct ttm_device *bdev;
enum ttm_bo_type type;
+ uint32_t page_alignment;
void (*destroy) (struct ttm_buffer_object *);
- unsigned long num_pages;
- size_t acc_size;
/**
* Members not needing protection.
*/
-
struct kref kref;
- struct kref list_kref;
/**
* Members protected by the bo::resv::reserved lock.
*/
- struct ttm_mem_reg mem;
- struct file *persistent_swap_storage;
+ struct ttm_resource *resource;
struct ttm_tt *ttm;
- bool evicted;
+ bool deleted;
+ struct ttm_lru_bulk_move *bulk_move;
/**
* Members protected by the bdev::lru_lock.
*/
- struct list_head lru;
struct list_head ddestroy;
- struct list_head swap;
- struct list_head io_reserve_lru;
/**
* Members protected by a bo reservation.
*/
- struct dma_fence *moving;
unsigned priority;
+ unsigned pin_count;
/**
* Special members that are protected by the reserve lock
@@ -218,8 +155,6 @@ struct ttm_buffer_object {
* either of these locks held.
*/
- uint64_t offset; /* GPU address space is independent of CPU word size */
-
struct sg_table *sg;
};
@@ -254,8 +189,12 @@ struct ttm_bo_kmap_obj {
*
* @interruptible: Sleep interruptible if sleeping.
* @no_wait_gpu: Return immediately if the GPU is busy.
+ * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
+ * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
+ * BOs share the same reservation object.
+ * @force_alloc: Don't check the memory account during suspend or CPU page
+ * faults. Should only be used by TTM internally.
* @resv: Reservation object to allow reserved evictions with.
- * @flags: Including the following flags
*
* Context for TTM operations like changing buffer placement or general memory
* allocation.
@@ -263,16 +202,13 @@ struct ttm_bo_kmap_obj {
struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
+ bool gfp_retry_mayfail;
+ bool allow_res_evict;
+ bool force_alloc;
struct dma_resv *resv;
uint64_t bytes_moved;
- uint32_t flags;
};
-/* Allow eviction of reserved BOs */
-#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1
-/* when serving page fault or suspend, allow alloc anyway */
-#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
-
/**
* ttm_bo_get - reference a struct ttm_buffer_object
*
@@ -317,17 +253,10 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
*/
int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
-/**
- * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
- *
- * @placement: Return immediately if buffer is busy.
- * @mem: The struct ttm_mem_reg indicating the region where the bo resides
- * @new_flags: Describes compatible placement found
- *
- * Returns true if the placement is compatible
- */
-bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
- uint32_t *new_flags);
+static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+}
/**
* ttm_bo_validate
@@ -357,28 +286,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
void ttm_bo_put(struct ttm_buffer_object *bo);
-/**
- * ttm_bo_move_to_lru_tail
- *
- * @bo: The buffer object.
- * @bulk: optional bulk move structure to remember BO positions
- *
- * Move this BO to the tail of all lru lists used to lookup and reserve an
- * object. This function must be called with struct ttm_bo_global::lru_lock
- * held, and is used to make a BO less likely to be considered for eviction.
- */
-void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
- struct ttm_lru_bulk_move *bulk);
-
-/**
- * ttm_bo_bulk_move_lru_tail
- *
- * @bulk: bulk move structure
- *
- * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
- * BO order never changes. Should be called with ttm_bo_global::lru_lock held.
- */
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
+void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk);
/**
* ttm_bo_lock_delayed_workqueue
@@ -387,14 +297,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
* Returns
* True if the workqueue was queued at the time
*/
-int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
/**
* ttm_bo_unlock_delayed_workqueue
*
* Allows the delayed workqueue to run.
*/
-void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
+void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
/**
* ttm_bo_eviction_valuable
@@ -407,202 +317,16 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
-/**
- * ttm_bo_acc_size
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @bo_size: size of the buffer object in byte.
- * @struct_size: size of the structure holding buffer object datas
- *
- * Returns size to account for a buffer object
- */
-size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size);
-size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size);
-
-/**
- * ttm_bo_init_reserved
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @flags: Initial placement flags.
- * @page_alignment: Data alignment in pages.
- * @ctx: TTM operation context for memory allocation.
- * @acc_size: Accounted size for this object.
- * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
- * @destroy: Destroy function. Use NULL for kfree().
- *
- * This function initializes a pre-allocated struct ttm_buffer_object.
- * As this object may be part of a larger structure, this function,
- * together with the @destroy function,
- * enables driver-specific objects derived from a ttm_buffer_object.
- *
- * On successful return, the caller owns an object kref to @bo. The kref and
- * list_kref are usually set to 1, but note that in some situations, other
- * tasks may already be holding references to @bo as well.
- * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
- * and it is the caller's responsibility to call ttm_bo_unreserve.
- *
- * If a failure occurs, the function will call the @destroy function, or
- * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
- * illegal and will likely cause memory corruption.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
- */
-
-int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
- struct ttm_buffer_object *bo,
- unsigned long size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- struct ttm_operation_ctx *ctx,
- size_t acc_size,
- struct sg_table *sg,
- struct dma_resv *resv,
+int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy) (struct ttm_buffer_object *));
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, bool interruptible,
+ struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
-
-/**
- * ttm_bo_init
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @flags: Initial placement flags.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep to wait for GPU resources,
- * sleep interruptible.
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object. Typically, this would
- * point to the shmem object backing a GEM object if TTM is used to back a
- * GEM user interface.
- * @acc_size: Accounted size for this object.
- * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
- * @destroy: Destroy function. Use NULL for kfree().
- *
- * This function initializes a pre-allocated struct ttm_buffer_object.
- * As this object may be part of a larger structure, this function,
- * together with the @destroy function,
- * enables driver-specific objects derived from a ttm_buffer_object.
- *
- * On successful return, the caller owns an object kref to @bo. The kref and
- * list_kref are usually set to 1, but note that in some situations, other
- * tasks may already be holding references to @bo as well.
- *
- * If a failure occurs, the function will call the @destroy function, or
- * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
- * illegal and will likely cause memory corruption.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
- */
-int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
- unsigned long size, enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment, bool interrubtible, size_t acc_size,
- struct sg_table *sg, struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
-
-/**
- * ttm_bo_create
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @placement: Initial placement.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep while waiting for GPU resources,
- * sleep interruptible.
- * @p_bo: On successful completion *p_bo points to the created object.
- *
- * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
- * on that object. The destroy function is set to kfree().
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while waiting for resources.
- */
-int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
- enum ttm_bo_type type, struct ttm_placement *placement,
- uint32_t page_alignment, bool interruptible,
- struct ttm_buffer_object **p_bo);
-
-/**
- * ttm_bo_init_mm
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @mem_type: The memory type.
- * @p_size: size managed area in pages.
- *
- * Initialize a manager for a given memory type.
- * Note: if part of driver firstopen, it must be protected from a
- * potentially racing lastclose.
- * Returns:
- * -EINVAL: invalid size or memory type.
- * -ENOMEM: Not enough memory.
- * May also return driver-specified errors.
- */
-int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_size);
-
-/**
- * ttm_bo_clean_mm
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @mem_type: The memory type.
- *
- * Take down a manager for a given memory type after first walking
- * the LRU list to evict any buffers left alive.
- *
- * Normally, this function is part of lastclose() or unload(), and at that
- * point there shouldn't be any buffers left created by user-space, since
- * there should've been removed by the file descriptor release() method.
- * However, before this function is run, make sure to signal all sync objects,
- * and verify that the delayed delete queue is empty. The driver must also
- * make sure that there are no NO_EVICT buffers present in this memory type
- * when the call is made.
- *
- * If this function is part of a VT switch, the caller must make sure that
- * there are no appications currently validating buffers before this
- * function is called. The caller can do that by first taking the
- * struct ttm_bo_device::ttm_lock in write mode.
- *
- * Returns:
- * -EINVAL: invalid or uninitialized memory type.
- * -EBUSY: There are still buffers left in this memory type.
- */
-int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
-
-/**
- * ttm_bo_evict_mm
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @mem_type: The memory type.
- *
- * Evicts all buffers on the lru list of the memory type.
- * This is normally part of a VT switch or an
- * out-of-memory-space-due-to-fragmentation handler.
- * The caller must make sure that there are no other processes
- * currently validating buffers, and can do that by taking the
- * struct ttm_bo_device::ttm_lock in write mode.
- *
- * Returns:
- * -EINVAL: Invalid or uninitialized memory type.
- * -ERESTARTSYS: The call was interrupted by a signal while waiting to
- * evict a buffer.
- */
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
/**
* ttm_kmap_obj_virtual
@@ -651,36 +375,45 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
/**
- * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
+ * ttm_bo_vmap
*
- * @vma: vma as input from the fbdev mmap method.
- * @bo: The bo backing the address space.
+ * @bo: The buffer object.
+ * @map: pointer to a struct iosys_map representing the map.
*
- * Maps a buffer object.
+ * Sets up a kernel virtual mapping, using ioremap or vmap to the
+ * data in the buffer object. The parameter @map returns the virtual
+ * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
*/
-int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
+int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
/**
- * ttm_bo_mmap - mmap out of the ttm device address space.
+ * ttm_bo_vunmap
*
- * @filp: filp as input from the mmap method.
- * @vma: vma as input from the mmap method.
- * @bdev: Pointer to the ttm_bo_device with the address space manager.
+ * @bo: The buffer object.
+ * @map: Object describing the map to unmap.
*
- * This function is intended to be called by the device mmap method.
- * if the device address space is to be backed by the bo manager.
+ * Unmaps a kernel map set up by ttm_bo_vmap().
*/
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_bo_device *bdev);
+void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
-
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
+/**
+ * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
+ *
+ * @vma: vma as input from the fbdev mmap method.
+ * @bo: The bo backing the address space.
+ *
+ * Maps a buffer object.
+ */
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
/**
* ttm_bo_io
*
- * @bdev: Pointer to the struct ttm_bo_device.
+ * @bdev: Pointer to the struct ttm_device.
* @filp: Pointer to the struct file attempting to read / write.
* @wbuf: User-space pointer to address of buffer to write. NULL on read.
* @rbuf: User-space pointer to address of buffer to read into.
@@ -697,32 +430,21 @@ void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
* the function may return -ERESTARTSYS if
* interrupted by a signal.
*/
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write);
-int ttm_bo_swapout(struct ttm_bo_global *glob,
- struct ttm_operation_ctx *ctx);
-void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
+ gfp_t gfp_flags);
-/**
- * ttm_bo_uses_embedded_gem_object - check if the given bo uses the
- * embedded drm_gem_object.
- *
- * Most ttm drivers are using gem too, so the embedded
- * ttm_buffer_object.base will be initialized by the driver (before
- * calling ttm_bo_init). It is also possible to use ttm without gem
- * though (vmwgfx does that).
- *
- * This helper will figure whenever a given ttm bo is a gem object too
- * or not.
- *
- * @bo: The bo to check.
- */
-static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
-{
- return bo->base.dev != NULL;
-}
+void ttm_bo_pin(struct ttm_buffer_object *bo);
+void ttm_bo_unpin(struct ttm_buffer_object *bo);
+
+int ttm_mem_evict_first(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket);
/* Default number of pre-faulted pages in the TTM fault handler */
#define TTM_BO_VM_NUM_PREFAULT 16
@@ -742,5 +464,8 @@ void ttm_bo_vm_close(struct vm_area_struct *vma);
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
+
+vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index cac7a8a0825a..1afa891f488a 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -37,532 +37,25 @@
#include <linux/spinlock.h>
#include <linux/dma-resv.h>
+#include <drm/ttm/ttm_device.h>
+
#include "ttm_bo_api.h"
-#include "ttm_memory.h"
-#include "ttm_module.h"
+#include "ttm_kmap_iter.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
-
-#define TTM_MAX_BO_PRIORITY 4U
-
-#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
-#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
-#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
-
-struct ttm_mem_type_manager;
-
-struct ttm_mem_type_manager_func {
- /**
- * struct ttm_mem_type_manager member init
- *
- * @man: Pointer to a memory type manager.
- * @p_size: Implementation dependent, but typically the size of the
- * range to be managed in pages.
- *
- * Called to initialize a private range manager. The function is
- * expected to initialize the man::priv member.
- * Returns 0 on success, negative error code on failure.
- */
- int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
-
- /**
- * struct ttm_mem_type_manager member takedown
- *
- * @man: Pointer to a memory type manager.
- *
- * Called to undo the setup done in init. All allocated resources
- * should be freed.
- */
- int (*takedown)(struct ttm_mem_type_manager *man);
-
- /**
- * struct ttm_mem_type_manager member get_node
- *
- * @man: Pointer to a memory type manager.
- * @bo: Pointer to the buffer object we're allocating space for.
- * @placement: Placement details.
- * @flags: Additional placement flags.
- * @mem: Pointer to a struct ttm_mem_reg to be filled in.
- *
- * This function should allocate space in the memory type managed
- * by @man. Placement details if
- * applicable are given by @placement. If successful,
- * @mem::mm_node should be set to a non-null value, and
- * @mem::start should be set to a value identifying the beginning
- * of the range allocated, and the function should return zero.
- * If the memory region accommodate the buffer object, @mem::mm_node
- * should be set to NULL, and the function should return 0.
- * If a system error occurred, preventing the request to be fulfilled,
- * the function should return a negative error code.
- *
- * Note that @mem::mm_node will only be dereferenced by
- * struct ttm_mem_type_manager functions and optionally by the driver,
- * which has knowledge of the underlying type.
- *
- * This function may not be called from within atomic context, so
- * an implementation can and must use either a mutex or a spinlock to
- * protect any data structures managing the space.
- */
- int (*get_node)(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem);
-
- /**
- * struct ttm_mem_type_manager member put_node
- *
- * @man: Pointer to a memory type manager.
- * @mem: Pointer to a struct ttm_mem_reg to be filled in.
- *
- * This function frees memory type resources previously allocated
- * and that are identified by @mem::mm_node and @mem::start. May not
- * be called from within atomic context.
- */
- void (*put_node)(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem);
-
- /**
- * struct ttm_mem_type_manager member debug
- *
- * @man: Pointer to a memory type manager.
- * @printer: Prefix to be used in printout to identify the caller.
- *
- * This function is called to print out the state of the memory
- * type manager to aid debugging of out-of-memory conditions.
- * It may not be called from within atomic context.
- */
- void (*debug)(struct ttm_mem_type_manager *man,
- struct drm_printer *printer);
-};
-
-/**
- * struct ttm_mem_type_manager
- *
- * @has_type: The memory type has been initialized.
- * @use_type: The memory type is enabled.
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
- * managed by this memory type.
- * @gpu_offset: If used, the GPU offset of the first managed page of
- * fixed memory or the first managed location in an aperture.
- * @size: Size of the managed region.
- * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
- * as defined in ttm_placement_common.h
- * @default_caching: The default caching policy used for a buffer object
- * placed in this memory type if the user doesn't provide one.
- * @func: structure pointer implementing the range manager. See above
- * @priv: Driver private closure for @func.
- * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
- * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
- * reserved by the TTM vm system.
- * @io_reserve_lru: Optional lru list for unreserving io mem regions.
- * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
- * @move_lock: lock for move fence
- * static information. bdev::driver::io_mem_free is never used.
- * @lru: The lru list for this memory type.
- * @move: The fence of the last pipelined move operation.
- *
- * This structure is used to identify and manage memory types for a device.
- * It's set up by the ttm_bo_driver::init_mem_type method.
- */
-
-
-
-struct ttm_mem_type_manager {
- struct ttm_bo_device *bdev;
-
- /*
- * No protection. Constant from start.
- */
-
- bool has_type;
- bool use_type;
- uint32_t flags;
- uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
- uint64_t size;
- uint32_t available_caching;
- uint32_t default_caching;
- const struct ttm_mem_type_manager_func *func;
- void *priv;
- struct mutex io_reserve_mutex;
- bool use_io_reserve_lru;
- bool io_reserve_fastpath;
- spinlock_t move_lock;
-
- /*
- * Protected by @io_reserve_mutex:
- */
-
- struct list_head io_reserve_lru;
-
- /*
- * Protected by the global->lru_lock.
- */
-
- struct list_head lru[TTM_MAX_BO_PRIORITY];
-
- /*
- * Protected by @move_lock.
- */
- struct dma_fence *move;
-};
-
-/**
- * struct ttm_bo_driver
- *
- * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
- * @invalidate_caches: Callback to invalidate read caches when a buffer object
- * has been evicted.
- * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
- * structure.
- * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
- * @move: Callback for a driver to hook in accelerated functions to
- * move a buffer.
- * If set to NULL, a potentially slow memcpy() move is used.
- */
-
-struct ttm_bo_driver {
- /**
- * ttm_tt_create
- *
- * @bo: The buffer object to create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- *
- * Create a struct ttm_tt to back data with system memory pages.
- * No pages are actually allocated.
- * Returns:
- * NULL: Out of memory.
- */
- struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
- uint32_t page_flags);
-
- /**
- * ttm_tt_populate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Allocate all backing pages
- * Returns:
- * -ENOMEM: Out of memory.
- */
- int (*ttm_tt_populate)(struct ttm_tt *ttm,
- struct ttm_operation_ctx *ctx);
-
- /**
- * ttm_tt_unpopulate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Free all backing page
- */
- void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
-
- /**
- * struct ttm_bo_driver member invalidate_caches
- *
- * @bdev: the buffer object device.
- * @flags: new placement of the rebound buffer object.
- *
- * A previosly evicted buffer has been rebound in a
- * potentially new location. Tell the driver that it might
- * consider invalidating read (texture) caches on the next command
- * submission as a consequence.
- */
-
- int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
- int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man);
-
- /**
- * struct ttm_bo_driver member eviction_valuable
- *
- * @bo: the buffer object to be evicted
- * @place: placement we need room for
- *
- * Check with the driver if it is valuable to evict a BO to make room
- * for a certain placement.
- */
- bool (*eviction_valuable)(struct ttm_buffer_object *bo,
- const struct ttm_place *place);
- /**
- * struct ttm_bo_driver member evict_flags:
- *
- * @bo: the buffer object to be evicted
- *
- * Return the bo flags for a buffer which is not mapped to the hardware.
- * These will be placed in proposed_flags so that when the move is
- * finished, they'll end up in bo->mem.flags
- */
-
- void (*evict_flags)(struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
-
- /**
- * struct ttm_bo_driver member move:
- *
- * @bo: the buffer to move
- * @evict: whether this motion is evicting the buffer from
- * the graphics address space
- * @ctx: context for this move with parameters
- * @new_mem: the new memory region receiving the buffer
- *
- * Move a buffer between two memory regions.
- */
- int (*move)(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem);
-
- /**
- * struct ttm_bo_driver_member verify_access
- *
- * @bo: Pointer to a buffer object.
- * @filp: Pointer to a struct file trying to access the object.
- *
- * Called from the map / write / read methods to verify that the
- * caller is permitted to access the buffer object.
- * This member may be set to NULL, which will refuse this kind of
- * access for all buffer objects.
- * This function should return 0 if access is granted, -EPERM otherwise.
- */
- int (*verify_access)(struct ttm_buffer_object *bo,
- struct file *filp);
-
- /**
- * Hook to notify driver about a driver move so it
- * can do tiling things and book-keeping.
- *
- * @evict: whether this move is evicting the buffer from the graphics
- * address space
- */
- void (*move_notify)(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_mem_reg *new_mem);
- /* notify the driver we are taking a fault on this BO
- * and have reserved it */
- int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
-
- /**
- * notify the driver that we're about to swap out this bo
- */
- void (*swap_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Driver callback on when mapping io memory (for bo_move_memcpy
- * for instance). TTM will take care to call io_mem_free whenever
- * the mapping is not use anymore. io_mem_reserve & io_mem_free
- * are balanced.
- */
- int (*io_mem_reserve)(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
- void (*io_mem_free)(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-
- /**
- * Return the pfn for a given page_offset inside the BO.
- *
- * @bo: the BO to look up the pfn for
- * @page_offset: the offset to look up
- */
- unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
- unsigned long page_offset);
-
- /**
- * Read/write memory buffers for ptrace access
- *
- * @bo: the BO to access
- * @offset: the offset from the start of the BO
- * @buf: pointer to source/destination buffer
- * @len: number of bytes to copy
- * @write: whether to read (0) from or write (non-0) to BO
- *
- * If successful, this function should return the number of
- * bytes copied, -EIO otherwise. If the number of bytes
- * returned is < len, the function may be called again with
- * the remainder of the buffer to copy.
- */
- int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
- void *buf, int len, int write);
-
- /**
- * struct ttm_bo_driver member del_from_lru_notify
- *
- * @bo: the buffer object deleted from lru
- *
- * notify driver that a BO was deleted from LRU.
- */
- void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Notify the driver that we're about to release a BO
- *
- * @bo: BO that is about to be released
- *
- * Gives the driver a chance to do any cleanup, including
- * adding fences that may force a delayed delete
- */
- void (*release_notify)(struct ttm_buffer_object *bo);
-};
-
-/**
- * struct ttm_bo_global - Buffer object driver global data.
- *
- * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
- * @dummy_read_page: Pointer to a dummy page used for mapping requests
- * of unpopulated pages.
- * @shrink: A shrink callback object used for buffer object swap.
- * @device_list_mutex: Mutex protecting the device list.
- * This mutex is held while traversing the device list for pm options.
- * @lru_lock: Spinlock protecting the bo subsystem lru lists.
- * @device_list: List of buffer object devices.
- * @swap_lru: Lru list of buffer objects used for swapping.
- */
-
-extern struct ttm_bo_global {
-
- /**
- * Constant after init.
- */
-
- struct kobject kobj;
- struct page *dummy_read_page;
- spinlock_t lru_lock;
-
- /**
- * Protected by ttm_global_mutex.
- */
- struct list_head device_list;
-
- /**
- * Protected by the lru_lock.
- */
- struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
-
- /**
- * Internal protection.
- */
- atomic_t bo_count;
-} ttm_bo_glob;
-
-
-#define TTM_NUM_MEM_TYPES 8
-
-/**
- * struct ttm_bo_device - Buffer object driver device-specific data.
- *
- * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
- * @man: An array of mem_type_managers.
- * @vma_manager: Address space manager (pointer)
- * lru_lock: Spinlock that protects the buffer+device lru lists and
- * ddestroy lists.
- * @dev_mapping: A pointer to the struct address_space representing the
- * device address space.
- * @wq: Work queue structure for the delayed delete workqueue.
- * @no_retry: Don't retry allocation if it fails
- *
- */
-
-struct ttm_bo_device {
-
- /*
- * Constant after bo device init / atomic.
- */
- struct list_head device_list;
- struct ttm_bo_driver *driver;
- struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
-
- /*
- * Protected by internal locks.
- */
- struct drm_vma_offset_manager *vma_manager;
-
- /*
- * Protected by the global:lru lock.
- */
- struct list_head ddestroy;
-
- /*
- * Protected by load / firstopen / lastclose /unload sync.
- */
-
- struct address_space *dev_mapping;
-
- /*
- * Internal protection.
- */
-
- struct delayed_work wq;
-
- bool need_dma32;
-
- bool no_retry;
-};
-
-/**
- * struct ttm_lru_bulk_move_pos
- *
- * @first: first BO in the bulk move range
- * @last: last BO in the bulk move range
- *
- * Positions for a lru bulk move.
- */
-struct ttm_lru_bulk_move_pos {
- struct ttm_buffer_object *first;
- struct ttm_buffer_object *last;
-};
-
-/**
- * struct ttm_lru_bulk_move
- *
- * @tt: first/last lru entry for BOs in the TT domain
- * @vram: first/last lru entry for BOs in the VRAM domain
- * @swap: first/last lru entry for BOs on the swap list
- *
- * Helper structure for bulk moves on the LRU list.
- */
-struct ttm_lru_bulk_move {
- struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
- struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
- struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
-};
-
-/**
- * ttm_flag_masked
- *
- * @old: Pointer to the result and original value.
- * @new: New value of bits.
- * @mask: Mask of bits to change.
- *
- * Convenience function to change a number of bits identified by a mask.
- */
-
-static inline uint32_t
-ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
-{
- *old ^= (*old ^ new) & mask;
- return *old;
-}
+#include "ttm_pool.h"
/*
* ttm_bo.c
*/
/**
- * ttm_mem_reg_is_pci
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @mem: A valid struct ttm_mem_reg.
- *
- * Returns true if the memory described by @mem is PCI memory,
- * false otherwise.
- */
-bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
-
-/**
* ttm_bo_mem_space
*
* @bo: Pointer to a struct ttm_buffer_object. the data of which
* we want to allocate space for.
* @proposed_placement: Proposed new placement for the buffer object.
- * @mem: A struct ttm_mem_reg.
+ * @mem: A struct ttm_resource.
* @interruptible: Sleep interruptible when sliping.
* @no_wait_gpu: Return immediately if the GPU is busy.
*
@@ -577,37 +70,9 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx);
-void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
-void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
-
-int ttm_bo_device_release(struct ttm_bo_device *bdev);
-
-/**
- * ttm_bo_device_init
- *
- * @bdev: A pointer to a struct ttm_bo_device to initialize.
- * @glob: A pointer to an initialized struct ttm_bo_global.
- * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
- * @mapping: The address space to use for this bo.
- * @vma_manager: A pointer to a vma manager.
- * @file_page_offset: Offset into the device address space that is available
- * for buffer data. This ensures compatibility with other users of the
- * address space.
- *
- * Initializes a struct ttm_bo_device:
- * Returns:
- * !0: Failure.
- */
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_driver *driver,
- struct address_space *mapping,
- struct drm_vma_offset_manager *vma_manager,
- bool need_dma32);
-
/**
* ttm_bo_unmap_virtual
*
@@ -616,45 +81,32 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- *
- * The caller must take ttm_mem_io_lock before calling this function.
- */
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
-
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-
-/**
- * __ttm_bo_reserve:
+ * ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @ticket: ticket used to acquire the ww_mutex.
*
- * Will not remove reserved buffers from the lru lists.
- * Otherwise identical to ttm_bo_reserve.
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation), while taking a number of measures to prevent
+ * deadlocks.
*
* Returns:
* -EDEADLK: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and
- * try again. (only if use_sequence == 1).
+ * try again.
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
* -EBUSY: The function needed to sleep, but @no_wait was true
* -EALREADY: Bo already reserved using @ticket. This error code will only
* be returned if @use_ticket is set to true.
*/
-static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait,
- struct ww_acquire_ctx *ticket)
+static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible, bool no_wait,
+ struct ww_acquire_ctx *ticket)
{
- int ret = 0;
+ int ret;
if (no_wait) {
bool success;
@@ -675,59 +127,6 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
}
/**
- * ttm_bo_reserve:
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @ticket: ticket used to acquire the ww_mutex.
- *
- * Locks a buffer object for validation. (Or prevents other processes from
- * locking it for validation) and removes it from lru lists, while taking
- * a number of measures to prevent deadlocks.
- *
- * Deadlocks may occur when two processes try to reserve multiple buffers in
- * different order, either by will or as a result of a buffer being evicted
- * to make room for a buffer already reserved. (Buffers are reserved before
- * they are evicted). The following algorithm prevents such deadlocks from
- * occurring:
- * Processes attempting to reserve multiple buffers other than for eviction,
- * (typically execbuf), should first obtain a unique 32-bit
- * validation sequence number,
- * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
- * sequence number. If upon call of this function, the buffer object is already
- * reserved, the validation sequence is checked against the validation
- * sequence of the process currently reserving the buffer,
- * and if the current validation sequence is greater than that of the process
- * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
- * waiting for the buffer to become unreserved, after which it retries
- * reserving.
- * The caller should, when receiving an -EDEADLK error
- * release all its buffer reservations, wait for @bo to become unreserved, and
- * then rerun the validation with the same validation sequence. This procedure
- * will always guarantee that the process with the lowest validation sequence
- * will eventually succeed, preventing both deadlocks and starvation.
- *
- * Returns:
- * -EDEADLK: The reservation may cause a deadlock.
- * Release all buffer reservations, wait for @bo to become unreserved and
- * try again. (only if use_sequence == 1).
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- * -EBUSY: The function needed to sleep, but @no_wait was true
- * -EALREADY: Bo already reserved using @ticket. This error code will only
- * be returned if @use_ticket is set to true.
- */
-static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait,
- struct ww_acquire_ctx *ticket)
-{
- WARN_ON(!kref_read(&bo->kref));
-
- return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
-}
-
-/**
* ttm_bo_reserve_slowpath:
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
@@ -741,20 +140,44 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
bool interruptible,
struct ww_acquire_ctx *ticket)
{
- int ret = 0;
-
- WARN_ON(!kref_read(&bo->kref));
+ if (interruptible) {
+ int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
+ ticket);
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
+ dma_resv_lock_slow(bo->base.resv, ticket);
+ return 0;
+}
- if (interruptible)
- ret = dma_resv_lock_slow_interruptible(bo->base.resv,
- ticket);
- else
- dma_resv_lock_slow(bo->base.resv, ticket);
+static inline void
+ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
+{
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_bo_move_to_lru_tail(bo);
+ spin_unlock(&bo->bdev->lru_lock);
+}
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
+static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ WARN_ON(bo->resource);
+ bo->resource = new_mem;
+}
- return ret;
+/**
+ * ttm_bo_move_null = assign memory for a buffer object.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
+ *
+ * Assign the memory from new_mem to the memory of the buffer object bo.
+ */
+static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, new_mem);
}
/**
@@ -766,41 +189,17 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
- spin_lock(&ttm_bo_glob.lru_lock);
- ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
/*
* ttm_bo_util.c
*/
-
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-/**
- * ttm_bo_move_ttm
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_mem_reg indicating where to move.
- *
- * Optimized move function for a buffer object with both old and
- * new placement backed by a TTM. The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem);
+int ttm_mem_io_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+void ttm_mem_io_free(struct ttm_device *bdev,
+ struct ttm_resource *mem);
/**
* ttm_bo_move_memcpy
@@ -808,7 +207,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_mem_reg indicating where to move.
+ * @new_mem: struct ttm_resource indicating where to move.
*
* Fallback move function for a mappable buffer object in mappable memory.
* The function will, if successful,
@@ -822,16 +221,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem);
-
-/**
- * ttm_bo_free_old_node
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Utility function to free an old placement after a successful move.
- */
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
+ struct ttm_resource *new_mem);
/**
* ttm_bo_move_accel_cleanup.
@@ -839,7 +229,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
- * @new_mem: struct ttm_mem_reg indicating where to move.
+ * @pipeline: evictions are to be pipelined.
+ * @new_mem: struct ttm_resource indicating where to move.
*
* Accelerated move function to be called when an accelerated move
* has been scheduled. The function will create a new temporary buffer object
@@ -850,22 +241,20 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence, bool evict,
- struct ttm_mem_reg *new_mem);
+ bool pipeline,
+ struct ttm_resource *new_mem);
/**
- * ttm_bo_pipeline_move.
+ * ttm_bo_move_sync_cleanup.
*
* @bo: A pointer to a struct ttm_buffer_object.
- * @fence: A fence object that signals when moving is complete.
- * @evict: This is an evict move. Don't return until the buffer is idle.
- * @new_mem: struct ttm_mem_reg indicating where to move.
+ * @new_mem: struct ttm_resource indicating where to move.
*
- * Function for pipelining accelerated moves. Either free the memory
- * immediately or hang it on a temporary buffer object.
+ * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
+ * by the caller to be idle. Typically used after memcpy buffer moves.
*/
-int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct dma_fence *fence, bool evict,
- struct ttm_mem_reg *new_mem);
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem);
/**
* ttm_bo_pipeline_gutting.
@@ -879,14 +268,36 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
/**
* ttm_io_prot
*
- * @c_state: Caching state.
+ * bo: ttm buffer object
+ * res: ttm resource object
* @tmp: Page protection flag for a normal, cached mapping.
*
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @c_state.
*/
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp);
-extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
+/**
+ * ttm_bo_tt_bind
+ *
+ * Bind the object tt to a memory resource.
+ */
+int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
+/**
+ * ttm_bo_tt_destroy.
+ */
+void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
+
+void ttm_move_memcpy(bool clear,
+ u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter);
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start);
#endif
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
new file mode 100644
index 000000000000..235a743d90e1
--- /dev/null
+++ b/include/drm/ttm/ttm_caching.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_CACHING_H_
+#define _TTM_CACHING_H_
+
+#define TTM_NUM_CACHING_TYPES 3
+
+/**
+ * enum ttm_caching - CPU caching and BUS snooping behavior.
+ */
+enum ttm_caching {
+ /**
+ * @ttm_uncached: Most defensive option for device mappings,
+ * don't even allow write combining.
+ */
+ ttm_uncached,
+
+ /**
+ * @ttm_write_combined: Don't cache read accesses, but allow at least
+ * writes to be combined.
+ */
+ ttm_write_combined,
+
+ /**
+ * @ttm_cached: Fully cached like normal system memory, requires that
+ * devices snoop the CPU cache on accesses.
+ */
+ ttm_cached
+};
+
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp);
+
+#endif
diff --git a/include/drm/ttm/ttm_debug.h b/include/drm/ttm/ttm_debug.h
deleted file mode 100644
index b5e460fa5086..000000000000
--- a/include/drm/ttm/ttm_debug.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2017 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Tom St Denis <tom.stdenis@amd.com>
- */
-extern void ttm_trace_dma_map(struct device *dev, struct ttm_dma_tt *tt);
-extern void ttm_trace_dma_unmap(struct device *dev, struct ttm_dma_tt *tt);
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
new file mode 100644
index 000000000000..95b3c04b1ab9
--- /dev/null
+++ b/include/drm/ttm/ttm_device.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_DEVICE_H_
+#define _TTM_DEVICE_H_
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_pool.h>
+
+struct ttm_device;
+struct ttm_placement;
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+
+/**
+ * struct ttm_global - Buffer object driver global data.
+ */
+extern struct ttm_global {
+
+ /**
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages. Constant after init.
+ */
+ struct page *dummy_read_page;
+
+ /**
+ * @device_list: List of buffer object devices. Protected by
+ * ttm_global_mutex.
+ */
+ struct list_head device_list;
+
+ /**
+ * @bo_count: Number of buffer objects allocated by devices.
+ */
+ atomic_t bo_count;
+} ttm_glob;
+
+struct ttm_device_funcs {
+ /**
+ * ttm_tt_create
+ *
+ * @bo: The buffer object to create the ttm for.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_destroy
+ *
+ * @bdev: Pointer to a ttm device
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_bo_driver member eviction_valuable
+ *
+ * @bo: the buffer object to be evicted
+ * @place: placement we need room for
+ *
+ * Check with the driver if it is valuable to evict a BO to make room
+ * for a certain placement.
+ */
+ bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+ /**
+ * struct ttm_bo_driver member evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ * This should not cause multihop evictions, and the core will warn
+ * if one is proposed.
+ */
+
+ void (*evict_flags)(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
+ /**
+ * struct ttm_bo_driver member move:
+ *
+ * @bo: the buffer to move
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ * @ctx: context for this move with parameters
+ * @new_mem: the new memory region receiving the buffer
+ @ @hop: placement for driver directed intermediate hop
+ *
+ * Move a buffer between two memory regions.
+ * Returns errno -EMULTIHOP if driver requests a hop
+ */
+ int (*move)(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop);
+
+ /**
+ * Hook to notify driver about a resource delete.
+ */
+ void (*delete_mem_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * notify the driver that we're about to swap out this bo
+ */
+ void (*swap_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy
+ * for instance). TTM will take care to call io_mem_free whenever
+ * the mapping is not use anymore. io_mem_reserve & io_mem_free
+ * are balanced.
+ */
+ int (*io_mem_reserve)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+ void (*io_mem_free)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+ /**
+ * Return the pfn for a given page_offset inside the BO.
+ *
+ * @bo: the BO to look up the pfn for
+ * @page_offset: the offset to look up
+ */
+ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
+
+ /**
+ * Read/write memory buffers for ptrace access
+ *
+ * @bo: the BO to access
+ * @offset: the offset from the start of the BO
+ * @buf: pointer to source/destination buffer
+ * @len: number of bytes to copy
+ * @write: whether to read (0) from or write (non-0) to BO
+ *
+ * If successful, this function should return the number of
+ * bytes copied, -EIO otherwise. If the number of bytes
+ * returned is < len, the function may be called again with
+ * the remainder of the buffer to copy.
+ */
+ int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write);
+
+ /**
+ * Notify the driver that we're about to release a BO
+ *
+ * @bo: BO that is about to be released
+ *
+ * Gives the driver a chance to do any cleanup, including
+ * adding fences that may force a delayed delete
+ */
+ void (*release_notify)(struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_device - Buffer object driver device-specific data.
+ */
+struct ttm_device {
+ /**
+ * @device_list: Our entry in the global device list.
+ * Constant after bo device init
+ */
+ struct list_head device_list;
+
+ /**
+ * @funcs: Function table for the device.
+ * Constant after bo device init
+ */
+ struct ttm_device_funcs *funcs;
+
+ /**
+ * @sysman: Resource manager for the system domain.
+ * Access via ttm_manager_type.
+ */
+ struct ttm_resource_manager sysman;
+
+ /**
+ * @man_drv: An array of resource_managers, one per resource type.
+ */
+ struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
+
+ /**
+ * @vma_manager: Address space manager for finding BOs to mmap.
+ */
+ struct drm_vma_offset_manager *vma_manager;
+
+ /**
+ * @pool: page pool for the device.
+ */
+ struct ttm_pool pool;
+
+ /**
+ * @lru_lock: Protection for the per manager LRU and ddestroy lists.
+ */
+ spinlock_t lru_lock;
+
+ /**
+ * @ddestroy: Destroyed but not yet cleaned up buffer objects.
+ */
+ struct list_head ddestroy;
+
+ /**
+ * @pinned: Buffer objects which are pinned and so not on any LRU list.
+ */
+ struct list_head pinned;
+
+ /**
+ * @dev_mapping: A pointer to the struct address_space for invalidating
+ * CPU mappings on buffer move. Protected by load/unload sync.
+ */
+ struct address_space *dev_mapping;
+
+ /**
+ * @wq: Work queue structure for the delayed delete workqueue.
+ */
+ struct delayed_work wq;
+};
+
+int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
+int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ gfp_t gfp_flags);
+
+static inline struct ttm_resource_manager *
+ttm_manager_type(struct ttm_device *bdev, int mem_type)
+{
+ BUILD_BUG_ON(__builtin_constant_p(mem_type)
+ && mem_type >= TTM_NUM_MEM_TYPES);
+ return bdev->man_drv[mem_type];
+}
+
+static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
+ struct ttm_resource_manager *manager)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ bdev->man_drv[type] = manager;
+}
+
+int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
+ struct device *dev, struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
+ bool use_dma_alloc, bool use_dma32);
+void ttm_device_fini(struct ttm_device *bdev);
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
+
+#endif
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 5a19843bb80d..a99d7fdf2964 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -58,9 +58,8 @@ struct ttm_validate_buffer {
* Undoes all buffer validation reservations for bos pointed to by
* the list entries.
*/
-
-extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
- struct list_head *list);
+void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
+ struct list_head *list);
/**
* function ttm_eu_reserve_buffers
@@ -96,10 +95,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* ttm_eu_fence_buffer_objects() when command submission is complete or
* has failed.
*/
-
-extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr,
- struct list_head *dups);
+int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
+ struct list_head *list, bool intr,
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.
@@ -113,9 +111,8 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
* It also unreserves all buffers, putting them on lru lists.
*
*/
-
-extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list,
- struct dma_fence *fence);
+void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
+ struct list_head *list,
+ struct dma_fence *fence);
#endif
diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h
new file mode 100644
index 000000000000..cc5c09a211b4
--- /dev/null
+++ b/include/drm/ttm/ttm_kmap_iter.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __TTM_KMAP_ITER_H__
+#define __TTM_KMAP_ITER_H__
+
+#include <linux/types.h>
+
+struct ttm_kmap_iter;
+struct iosys_map;
+
+/**
+ * struct ttm_kmap_iter_ops - Ops structure for a struct
+ * ttm_kmap_iter.
+ * @maps_tt: Whether the iterator maps TT memory directly, as opposed
+ * mapping a TT through an aperture. Both these modes have
+ * struct ttm_resource_manager::use_tt set, but the latter typically
+ * returns is_iomem == true from ttm_mem_io_reserve.
+ */
+struct ttm_kmap_iter_ops {
+ /**
+ * kmap_local() - Map a PAGE_SIZE part of the resource using
+ * kmap_local semantics.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct iosys_map holding the virtual address after
+ * the operation.
+ * @i: The location within the resource to map. PAGE_SIZE granularity.
+ */
+ void (*map_local)(struct ttm_kmap_iter *res_iter,
+ struct iosys_map *dmap, pgoff_t i);
+ /**
+ * unmap_local() - Unmap a PAGE_SIZE part of the resource previously
+ * mapped using kmap_local.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct iosys_map holding the virtual address after
+ * the operation.
+ */
+ void (*unmap_local)(struct ttm_kmap_iter *res_iter,
+ struct iosys_map *dmap);
+ bool maps_tt;
+};
+
+/**
+ * struct ttm_kmap_iter - Iterator for kmap_local type operations on a
+ * resource.
+ * @ops: Pointer to the operations struct.
+ *
+ * This struct is intended to be embedded in a resource-specific specialization
+ * implementing operations for the resource.
+ *
+ * Nothing stops us from extending the operations to vmap, vmap_pfn etc,
+ * replacing some or parts of the ttm_bo_util. cpu-map functionality.
+ */
+struct ttm_kmap_iter {
+ const struct ttm_kmap_iter_ops *ops;
+};
+
+#endif /* __TTM_KMAP_ITER_H__ */
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
deleted file mode 100644
index c78ea99c42cf..000000000000
--- a/include/drm/ttm/ttm_memory.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TTM_MEMORY_H
-#define TTM_MEMORY_H
-
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-#include <linux/bug.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kobject.h>
-#include <linux/mm.h>
-#include "ttm_bo_api.h"
-
-/**
- * struct ttm_mem_global - Global memory accounting structure.
- *
- * @shrink: A single callback to shrink TTM memory usage. Extend this
- * to a linked list to be able to handle multiple callbacks when needed.
- * @swap_queue: A workqueue to handle shrinking in low memory situations. We
- * need a separate workqueue since it will spend a lot of time waiting
- * for the GPU, and this will otherwise block other workqueue tasks(?)
- * At this point we use only a single-threaded workqueue.
- * @work: The workqueue callback for the shrink queue.
- * @lock: Lock to protect the @shrink - and the memory accounting members,
- * that is, essentially the whole structure with some exceptions.
- * @lower_mem_limit: include lower limit of swap space and lower limit of
- * system memory.
- * @zones: Array of pointers to accounting zones.
- * @num_zones: Number of populated entries in the @zones array.
- * @zone_kernel: Pointer to the kernel zone.
- * @zone_highmem: Pointer to the highmem zone if there is one.
- * @zone_dma32: Pointer to the dma32 zone if there is one.
- *
- * Note that this structure is not per device. It should be global for all
- * graphics devices.
- */
-
-#define TTM_MEM_MAX_ZONES 2
-struct ttm_mem_zone;
-extern struct ttm_mem_global {
- struct kobject kobj;
- struct workqueue_struct *swap_queue;
- struct work_struct work;
- spinlock_t lock;
- uint64_t lower_mem_limit;
- struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
- unsigned int num_zones;
- struct ttm_mem_zone *zone_kernel;
-#ifdef CONFIG_HIGHMEM
- struct ttm_mem_zone *zone_highmem;
-#else
- struct ttm_mem_zone *zone_dma32;
-#endif
-} ttm_mem_glob;
-
-extern int ttm_mem_global_init(struct ttm_mem_global *glob);
-extern void ttm_mem_global_release(struct ttm_mem_global *glob);
-extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx);
-extern void ttm_mem_global_free(struct ttm_mem_global *glob,
- uint64_t amount);
-extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx);
-extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size);
-extern size_t ttm_round_pot(size_t size);
-extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
-extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
- uint64_t num_pages, struct ttm_operation_ctx *ctx);
-#endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
deleted file mode 100644
index a6b6ef5f9bf4..000000000000
--- a/include/drm/ttm/ttm_page_alloc.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) Red Hat Inc.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie <airlied@redhat.com>
- * Jerome Glisse <jglisse@redhat.com>
- */
-#ifndef TTM_PAGE_ALLOC
-#define TTM_PAGE_ALLOC
-
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_memory.h>
-
-struct device;
-
-/**
- * Initialize pool allocator.
- */
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-/**
- * Free pool allocator.
- */
-void ttm_page_alloc_fini(void);
-
-/**
- * ttm_pool_populate:
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Add backing pages to all of @ttm
- */
-int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_pool_unpopulate:
- *
- * @ttm: The struct ttm_tt which to free backing pages.
- *
- * Free all pages of @ttm
- */
-void ttm_pool_unpopulate(struct ttm_tt *ttm);
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
- struct ttm_operation_ctx *ctx);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-
-#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL)
-/**
- * Initialize pool allocator.
- */
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-
-/**
- * Free pool allocator.
- */
-void ttm_dma_page_alloc_fini(void);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
- struct ttm_operation_ctx *ctx);
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
-
-#else
-static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
- unsigned max_pages)
-{
- return -ENODEV;
-}
-
-static inline void ttm_dma_page_alloc_fini(void) { return; }
-
-static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- return 0;
-}
-static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
- struct device *dev,
- struct ttm_operation_ctx *ctx)
-{
- return -ENOMEM;
-}
-static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
- struct device *dev)
-{
-}
-#endif
-
-#endif
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index e88a8e39767b..8074d0f6cae5 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -35,6 +35,17 @@
/*
* Memory regions for data placement.
+ *
+ * Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can
+ * be swapped out whenever TTMs thinks it is a good idea.
+ * In cases where drivers would like to use TTM_PL_SYSTEM as a valid
+ * placement they need to be able to handle the issues that arise due to the
+ * above manually.
+ *
+ * For BO's which reside in system memory but for which the accelerator
+ * requires direct access (i.e. their usage needs to be synchronized
+ * between the CPU and accelerator via fences) a new, driver private
+ * placement that can handle such scenarios is a good idea.
*/
#define TTM_PL_SYSTEM 0
@@ -42,42 +53,23 @@
#define TTM_PL_VRAM 2
#define TTM_PL_PRIV 3
-#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
-#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
-#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
-#define TTM_PL_FLAG_PRIV (1 << TTM_PL_PRIV)
-#define TTM_PL_MASK_MEM 0x0000FFFF
-
/*
- * Other flags that affects data placement.
- * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
- * if available.
- * TTM_PL_FLAG_SHARED means that another application may
- * reference the buffer.
- * TTM_PL_FLAG_NO_EVICT means that the buffer may never
- * be evicted to make room for other buffers.
* TTM_PL_FLAG_TOPDOWN requests to be placed from the
* top of the memory area, instead of the bottom.
*/
-#define TTM_PL_FLAG_CACHED (1 << 16)
-#define TTM_PL_FLAG_UNCACHED (1 << 17)
-#define TTM_PL_FLAG_WC (1 << 18)
-#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
-#define TTM_PL_FLAG_NO_EVICT (1 << 21)
-#define TTM_PL_FLAG_TOPDOWN (1 << 22)
-
-#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
- TTM_PL_FLAG_UNCACHED | \
- TTM_PL_FLAG_WC)
+#define TTM_PL_FLAG_CONTIGUOUS (1 << 0)
+#define TTM_PL_FLAG_TOPDOWN (1 << 1)
-#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
+/* For multihop handling */
+#define TTM_PL_FLAG_TEMPORARY (1 << 2)
/**
* struct ttm_place
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
+ * @mem_type: One of TTM_PL_* where the resource should be allocated from.
* @flags: memory domain and caching flags for the object
*
* Structure indicating a possible place to put an object.
@@ -85,6 +77,7 @@
struct ttm_place {
unsigned fpfn;
unsigned lpfn;
+ uint32_t mem_type;
uint32_t flags;
};
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
new file mode 100644
index 000000000000..ef09b23d29e3
--- /dev/null
+++ b/include/drm/ttm/ttm_pool.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_PAGE_POOL_H_
+#define _TTM_PAGE_POOL_H_
+
+#include <linux/mmzone.h>
+#include <linux/llist.h>
+#include <linux/spinlock.h>
+#include <drm/ttm/ttm_caching.h>
+
+struct device;
+struct ttm_tt;
+struct ttm_pool;
+struct ttm_operation_ctx;
+
+/**
+ * struct ttm_pool_type - Pool for a certain memory type
+ *
+ * @pool: the pool we belong to, might be NULL for the global ones
+ * @order: the allocation order our pages have
+ * @caching: the caching type our pages have
+ * @shrinker_list: our place on the global shrinker list
+ * @lock: protection of the page list
+ * @pages: the list of pages in the pool
+ */
+struct ttm_pool_type {
+ struct ttm_pool *pool;
+ unsigned int order;
+ enum ttm_caching caching;
+
+ struct list_head shrinker_list;
+
+ spinlock_t lock;
+ struct list_head pages;
+};
+
+/**
+ * struct ttm_pool - Pool for all caching and orders
+ *
+ * @dev: the device we allocate pages for
+ * @use_dma_alloc: if coherent DMA allocations should be used
+ * @use_dma32: if GFP_DMA32 should be used
+ * @caching: pools for each caching/order
+ */
+struct ttm_pool {
+ struct device *dev;
+
+ bool use_dma_alloc;
+ bool use_dma32;
+
+ struct {
+ struct ttm_pool_type orders[MAX_ORDER];
+ } caching[TTM_NUM_CACHING_TYPES];
+};
+
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx);
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
+
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+ bool use_dma_alloc, bool use_dma32);
+void ttm_pool_fini(struct ttm_pool *pool);
+
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
+
+int ttm_pool_mgr_init(unsigned long num_pages);
+void ttm_pool_mgr_fini(void);
+
+#endif
diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h
new file mode 100644
index 000000000000..7963b957e9ef
--- /dev/null
+++ b/include/drm/ttm/ttm_range_manager.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef _TTM_RANGE_MANAGER_H_
+#define _TTM_RANGE_MANAGER_H_
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/drm_mm.h>
+
+/**
+ * struct ttm_range_mgr_node
+ *
+ * @base: base clase we extend
+ * @mm_nodes: MM nodes, usually 1
+ *
+ * Extending the ttm_resource object to manage an address space allocation with
+ * one or more drm_mm_nodes.
+ */
+struct ttm_range_mgr_node {
+ struct ttm_resource base;
+ struct drm_mm_node mm_nodes[];
+};
+
+/**
+ * to_ttm_range_mgr_node
+ *
+ * @res: the resource to upcast
+ *
+ * Upcast the ttm_resource object into a ttm_range_mgr_node object.
+ */
+static inline struct ttm_range_mgr_node *
+to_ttm_range_mgr_node(struct ttm_resource *res)
+{
+ return container_of(res, struct ttm_range_mgr_node, base);
+}
+
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
+ unsigned type, bool use_tt,
+ unsigned long p_size);
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
+ unsigned type);
+static __always_inline int ttm_range_man_init(struct ttm_device *bdev,
+ unsigned int type, bool use_tt,
+ unsigned long p_size)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size);
+}
+
+static __always_inline int ttm_range_man_fini(struct ttm_device *bdev,
+ unsigned int type)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_fini_nocheck(bdev, type);
+}
+#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
new file mode 100644
index 000000000000..5afc6d664fde
--- /dev/null
+++ b/include/drm/ttm/ttm_resource.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_RESOURCE_H_
+#define _TTM_RESOURCE_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/iosys-map.h>
+#include <linux/dma-fence.h>
+
+#include <drm/drm_print.h>
+#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
+
+#define TTM_MAX_BO_PRIORITY 4U
+#define TTM_NUM_MEM_TYPES 8
+
+struct ttm_device;
+struct ttm_resource_manager;
+struct ttm_resource;
+struct ttm_place;
+struct ttm_buffer_object;
+struct ttm_placement;
+struct iosys_map;
+struct io_mapping;
+struct sg_table;
+struct scatterlist;
+
+struct ttm_resource_manager_func {
+ /**
+ * struct ttm_resource_manager_func member alloc
+ *
+ * @man: Pointer to a memory type manager.
+ * @bo: Pointer to the buffer object we're allocating space for.
+ * @place: Placement details.
+ * @res: Resulting pointer to the ttm_resource.
+ *
+ * This function should allocate space in the memory type managed
+ * by @man. Placement details if applicable are given by @place. If
+ * successful, a filled in ttm_resource object should be returned in
+ * @res. @res::start should be set to a value identifying the beginning
+ * of the range allocated, and the function should return zero.
+ * If the manager can't fulfill the request -ENOSPC should be returned.
+ * If a system error occurred, preventing the request to be fulfilled,
+ * the function should return a negative error code.
+ *
+ * This function may not be called from within atomic context and needs
+ * to take care of its own locking to protect any data structures
+ * managing the space.
+ */
+ int (*alloc)(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res);
+
+ /**
+ * struct ttm_resource_manager_func member free
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be freed.
+ *
+ * This function frees memory type resources previously allocated.
+ * May not be called from within atomic context.
+ */
+ void (*free)(struct ttm_resource_manager *man,
+ struct ttm_resource *res);
+
+ /**
+ * struct ttm_resource_manager_func member intersects
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be checked.
+ * @place: Placement to check against.
+ * @size: Size of the check.
+ *
+ * Test if @res intersects with @place + @size. Used to judge if
+ * evictions are valueable or not.
+ */
+ bool (*intersects)(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+
+ /**
+ * struct ttm_resource_manager_func member compatible
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be checked.
+ * @place: Placement to check against.
+ * @size: Size of the check.
+ *
+ * Test if @res compatible with @place + @size. Used to check of
+ * the need to move the backing store or not.
+ */
+ bool (*compatible)(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+
+ /**
+ * struct ttm_resource_manager_func member debug
+ *
+ * @man: Pointer to a memory type manager.
+ * @printer: Prefix to be used in printout to identify the caller.
+ *
+ * This function is called to print out the state of the memory
+ * type manager to aid debugging of out-of-memory conditions.
+ * It may not be called from within atomic context.
+ */
+ void (*debug)(struct ttm_resource_manager *man,
+ struct drm_printer *printer);
+};
+
+/**
+ * struct ttm_resource_manager
+ *
+ * @use_type: The memory type is enabled.
+ * @use_tt: If a TT object should be used for the backing store.
+ * @size: Size of the managed region.
+ * @bdev: ttm device this manager belongs to
+ * @func: structure pointer implementing the range manager. See above
+ * @move_lock: lock for move fence
+ * @move: The fence of the last pipelined move operation.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ */
+struct ttm_resource_manager {
+ /*
+ * No protection. Constant from start.
+ */
+ bool use_type;
+ bool use_tt;
+ struct ttm_device *bdev;
+ uint64_t size;
+ const struct ttm_resource_manager_func *func;
+ spinlock_t move_lock;
+
+ /*
+ * Protected by @move_lock.
+ */
+ struct dma_fence *move;
+
+ /*
+ * Protected by the bdev->lru_lock.
+ */
+ struct list_head lru[TTM_MAX_BO_PRIORITY];
+
+ /**
+ * @usage: How much of the resources are used, protected by the
+ * bdev->lru_lock.
+ */
+ uint64_t usage;
+};
+
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr: mapped virtual address
+ * @offset: physical addr
+ * @is_iomem: is this io memory ?
+ * @caching: See enum ttm_caching
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+ void *addr;
+ phys_addr_t offset;
+ bool is_iomem;
+ enum ttm_caching caching;
+};
+
+/**
+ * struct ttm_resource
+ *
+ * @start: Start of the allocation.
+ * @num_pages: Actual size of resource in pages.
+ * @mem_type: Resource type of the allocation.
+ * @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
+ * @bo: weak reference to the BO, protected by ttm_device::lru_lock
+ *
+ * Structure indicating the placement and space resources used by a
+ * buffer object.
+ */
+struct ttm_resource {
+ unsigned long start;
+ unsigned long num_pages;
+ uint32_t mem_type;
+ uint32_t placement;
+ struct ttm_bus_placement bus;
+ struct ttm_buffer_object *bo;
+
+ /**
+ * @lru: Least recently used list, see &ttm_resource_manager.lru
+ */
+ struct list_head lru;
+};
+
+/**
+ * struct ttm_resource_cursor
+ *
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+ unsigned int priority;
+};
+
+/**
+ * struct ttm_lru_bulk_move_pos
+ *
+ * @first: first res in the bulk move range
+ * @last: last res in the bulk move range
+ *
+ * Range of resources for a lru bulk move.
+ */
+struct ttm_lru_bulk_move_pos {
+ struct ttm_resource *first;
+ struct ttm_resource *last;
+};
+
+/**
+ * struct ttm_lru_bulk_move
+ *
+ * @pos: first/last lru entry for resources in the each domain/priority
+ *
+ * Container for the current bulk move state. Should be used with
+ * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
+ */
+struct ttm_lru_bulk_move {
+ struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+};
+
+/**
+ * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
+ * struct sg_table backed struct ttm_resource.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface.
+ * @iomap: struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ * @cache: Scatterlist traversal cache for fast lookups.
+ * @cache.sg: Pointer to the currently cached scatterlist segment.
+ * @cache.i: First index of @sg. PAGE_SIZE granularity.
+ * @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity.
+ * @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity.
+ */
+struct ttm_kmap_iter_iomap {
+ struct ttm_kmap_iter base;
+ struct io_mapping *iomap;
+ struct sg_table *st;
+ resource_size_t start;
+ struct {
+ struct scatterlist *sg;
+ pgoff_t i;
+ pgoff_t end;
+ pgoff_t offs;
+ } cache;
+};
+
+/**
+ * struct ttm_kmap_iter_linear_io - Iterator specialization for linear io
+ * @base: The base iterator
+ * @dmap: Points to the starting address of the region
+ * @needs_unmap: Whether we need to unmap on fini
+ */
+struct ttm_kmap_iter_linear_io {
+ struct ttm_kmap_iter base;
+ struct iosys_map dmap;
+ bool needs_unmap;
+};
+
+/**
+ * ttm_resource_manager_set_used
+ *
+ * @man: A memory manager object.
+ * @used: usage state to set.
+ *
+ * Set the manager in use flag. If disabled the manager is no longer
+ * used for object placement.
+ */
+static inline void
+ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used)
+{
+ int i;
+
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; i++)
+ WARN_ON(!list_empty(&man->lru[i]));
+ man->use_type = used;
+}
+
+/**
+ * ttm_resource_manager_used
+ *
+ * @man: Manager to get used state for
+ *
+ * Get the in use flag for a manager.
+ * Returns:
+ * true is used, false if not.
+ */
+static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man)
+{
+ return man->use_type;
+}
+
+/**
+ * ttm_resource_manager_cleanup
+ *
+ * @man: A memory manager object.
+ *
+ * Cleanup the move fences from the memory manager object.
+ */
+static inline void
+ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
+{
+ dma_fence_put(man->move);
+ man->move = NULL;
+}
+
+void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+
+void ttm_resource_add_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
+void ttm_resource_del_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res);
+
+void ttm_resource_init(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *res);
+void ttm_resource_fini(struct ttm_resource_manager *man,
+ struct ttm_resource *res);
+
+int ttm_resource_alloc(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res);
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
+bool ttm_resource_intersects(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+bool ttm_resource_compatible(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+bool ttm_resource_compat(struct ttm_resource *res,
+ struct ttm_placement *placement);
+void ttm_resource_set_bo(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
+
+void ttm_resource_manager_init(struct ttm_resource_manager *man,
+ struct ttm_device *bdev,
+ uint64_t size);
+
+int ttm_resource_manager_evict_all(struct ttm_device *bdev,
+ struct ttm_resource_manager *man);
+
+uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man);
+void ttm_resource_manager_debug(struct ttm_resource_manager *man,
+ struct drm_printer *p);
+
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor);
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor,
+ struct ttm_resource *res);
+
+/**
+ * ttm_resource_manager_for_each_res - iterate over all resources
+ * @man: the resource manager
+ * @cursor: struct ttm_resource_cursor for the current position
+ * @res: the current resource
+ *
+ * Iterate over all the evictable resources in a resource manager.
+ */
+#define ttm_resource_manager_for_each_res(man, cursor, res) \
+ for (res = ttm_resource_manager_first(man, cursor); res; \
+ res = ttm_resource_manager_next(man, cursor, res))
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start);
+
+struct ttm_kmap_iter_linear_io;
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
+ struct dentry * parent,
+ const char *name);
+#endif
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
deleted file mode 100644
index 7c492b49e38c..000000000000
--- a/include/drm/ttm/ttm_set_memory.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2018 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Huang Rui <ray.huang@amd.com>
- */
-
-#ifndef TTM_SET_MEMORY
-#define TTM_SET_MEMORY
-
-#include <linux/mm.h>
-
-#ifdef CONFIG_X86
-
-#include <asm/set_memory.h>
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- return set_pages_array_wb(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- return set_pages_array_wc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- return set_pages_array_uc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- return set_pages_wb(page, numpages);
-}
-
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
- unsigned long addr = (unsigned long)page_address(page);
-
- return set_memory_wc(addr, numpages);
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
- return set_pages_uc(page, numpages);
-}
-
-#else /* for CONFIG_X86 */
-
-#if IS_ENABLED(CONFIG_AGP)
-
-#include <asm/agp.h>
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
- return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
- return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
- return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- int i;
-
- for (i = 0; i < numpages; i++)
- unmap_page_from_agp(page++);
- return 0;
-}
-
-#else /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- return 0;
-}
-
-#endif /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
- return 0;
-}
-
-#endif /* for CONFIG_X86 */
-
-#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index c0e928abf592..17a0310e8aaa 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -27,114 +27,101 @@
#ifndef _TTM_TT_H_
#define _TTM_TT_H_
+#include <linux/pagemap.h>
#include <linux/types.h>
+#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
+struct ttm_device;
struct ttm_tt;
-struct ttm_mem_reg;
+struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
-#define TTM_PAGE_FLAG_WRITE (1 << 3)
-#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
-#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_DMA32 (1 << 7)
-#define TTM_PAGE_FLAG_SG (1 << 8)
-#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
-
-enum ttm_caching_state {
- tt_uncached,
- tt_wc,
- tt_cached
-};
-
-struct ttm_backend_func {
+/**
+ * struct ttm_tt - This is a structure holding the pages, caching- and aperture
+ * binding status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_tt {
+ /** @pages: Array of pages backing the data. */
+ struct page **pages;
/**
- * struct ttm_backend_func member bind
+ * @page_flags: The page flags.
*
- * @ttm: Pointer to a struct ttm_tt.
- * @bo_mem: Pointer to a struct ttm_mem_reg describing the
- * memory type and location for binding.
+ * Supported values:
*
- * Bind the backend pages into the aperture in the location
- * indicated by @bo_mem. This function should be able to handle
- * differences between aperture and system page sizes.
- */
- int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
-
- /**
- * struct ttm_backend_func member unbind
+ * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated
+ * and swapped out by TTM. Calling ttm_tt_populate() will then swap the
+ * pages back in, and unset the flag. Drivers should in general never
+ * need to touch this.
*
- * @ttm: Pointer to a struct ttm_tt.
+ * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on
+ * allocation.
*
- * Unbind previously bound backend pages. This function should be
- * able to handle differences between aperture and system page sizes.
- */
- int (*unbind) (struct ttm_tt *ttm);
-
- /**
- * struct ttm_backend_func member destroy
+ * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated
+ * externally, like with dma-buf or userptr. This effectively disables
+ * TTM swapping out such pages. Also important is to prevent TTM from
+ * ever directly mapping these pages.
*
- * @ttm: Pointer to a struct ttm_tt.
+ * Note that enum ttm_bo_type.ttm_bo_type_sg objects will always enable
+ * this flag.
*
- * Destroy the backend. This will be call back from ttm_tt_destroy so
- * don't call ttm_tt_destroy from the callback or infinite loop.
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE: Same behaviour as
+ * TTM_TT_FLAG_EXTERNAL, but with the reduced restriction that it is
+ * still valid to use TTM to map the pages directly. This is useful when
+ * implementing a ttm_tt backend which still allocates driver owned
+ * pages underneath(say with shmem).
+ *
+ * Note that since this also implies TTM_TT_FLAG_EXTERNAL, the usage
+ * here should always be:
+ *
+ * page_flags = TTM_TT_FLAG_EXTERNAL |
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+ *
+ * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
+ * set by TTM after ttm_tt_populate() has successfully returned, and is
+ * then unset when TTM calls ttm_tt_unpopulate().
*/
- void (*destroy) (struct ttm_tt *ttm);
-};
+#define TTM_TT_FLAG_SWAPPED (1 << 0)
+#define TTM_TT_FLAG_ZERO_ALLOC (1 << 1)
+#define TTM_TT_FLAG_EXTERNAL (1 << 2)
+#define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3)
-/**
- * struct ttm_tt
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @func: Pointer to a struct ttm_backend_func that describes
- * the backend methods.
- * pointer.
- * @pages: Array of pages backing the data.
- * @num_pages: Number of pages in the page array.
- * @bdev: Pointer to the current struct ttm_bo_device.
- * @be: Pointer to the ttm backend.
- * @swap_storage: Pointer to shmem struct file for swap storage.
- * @caching_state: The current caching state of the pages.
- * @state: The current binding state of the pages.
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-struct ttm_tt {
- struct ttm_bo_device *bdev;
- struct ttm_backend_func *func;
- struct page **pages;
+#define TTM_TT_FLAG_PRIV_POPULATED (1 << 31)
uint32_t page_flags;
- unsigned long num_pages;
- struct sg_table *sg; /* for SG objects via dma-buf */
+ /** @num_pages: Number of pages in the page array. */
+ uint32_t num_pages;
+ /** @sg: for SG objects via dma-buf. */
+ struct sg_table *sg;
+ /** @dma_address: The DMA (bus) addresses of the pages. */
+ dma_addr_t *dma_address;
+ /** @swap_storage: Pointer to shmem struct file for swap storage. */
struct file *swap_storage;
- enum ttm_caching_state caching_state;
- enum {
- tt_bound,
- tt_unbound,
- tt_unpopulated,
- } state;
+ /**
+ * @caching: The current caching state of the pages, see enum
+ * ttm_caching.
+ */
+ enum ttm_caching caching;
};
/**
- * struct ttm_dma_tt
- *
- * @ttm: Base ttm_tt struct.
- * @dma_address: The DMA (bus) addresses of the pages
- * @pages_list: used by some page allocation backend
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
+ * struct ttm_kmap_iter_tt - Specialization of a mappig iterator for a tt.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface
+ * @tt: Cached struct ttm_tt.
+ * @prot: Cached page protection for mapping.
*/
-struct ttm_dma_tt {
- struct ttm_tt ttm;
- dma_addr_t *dma_address;
- struct list_head pages_list;
+struct ttm_kmap_iter_tt {
+ struct ttm_kmap_iter base;
+ struct ttm_tt *tt;
+ pgprot_t prot;
};
+static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
+{
+ return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
+}
+
/**
* ttm_tt_create
*
@@ -151,7 +138,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
*
* @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
+ * @caching: the desired caching state of the pages
+ * @extra_pages: Extra pages needed for the driver.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
@@ -159,11 +148,10 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* NULL: Out of memory.
*/
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags);
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags);
+ uint32_t page_flags, enum ttm_caching caching,
+ unsigned long extra_pages);
+int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching);
/**
* ttm_tt_fini
@@ -173,36 +161,16 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
* Free memory of ttm_tt structure
*/
void ttm_tt_fini(struct ttm_tt *ttm);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
-
-/**
- * ttm_ttm_bind:
- *
- * @ttm: The struct ttm_tt containing backing pages.
- * @bo_mem: The struct ttm_mem_reg identifying the binding location.
- *
- * Bind the pages of @ttm to an aperture location identified by @bo_mem
- */
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
- struct ttm_operation_ctx *ctx);
/**
- * ttm_ttm_destroy:
+ * ttm_tt_destroy:
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: The struct ttm_tt.
*
* Unbind, unpopulate and destroy common struct ttm_tt.
*/
-void ttm_tt_destroy(struct ttm_tt *ttm);
-
-/**
- * ttm_ttm_unbind:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind a struct ttm_tt.
- */
-void ttm_tt_unbind(struct ttm_tt *ttm);
+void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_swapin:
@@ -212,40 +180,48 @@ void ttm_tt_unbind(struct ttm_tt *ttm);
* Swap in a previously swap out ttm_tt.
*/
int ttm_tt_swapin(struct ttm_tt *ttm);
+int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
+ gfp_t gfp_flags);
/**
- * ttm_tt_set_placement_caching:
+ * ttm_tt_populate - allocate pages for a ttm
*
- * @ttm A struct ttm_tt the backing pages of which will change caching policy.
- * @placement: Flag indicating the desired caching policy.
+ * @bdev: the ttm_device this object belongs to
+ * @ttm: Pointer to the ttm_tt structure
+ * @ctx: operation context for populating the tt object.
*
- * This function will change caching policy of any default kernel mappings of
- * the pages backing @ttm. If changing from cached to uncached or
- * write-combined,
- * all CPU caches will first be flushed to make sure the data of the pages
- * hit RAM. This function may be very costly as it involves global TLB
- * and cache flushes and potential page splitting / combining.
+ * Calls the driver method to allocate pages for a ttm
*/
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
-int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
+int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
/**
- * ttm_tt_populate - allocate pages for a ttm
+ * ttm_tt_unpopulate - free pages from a ttm
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: Pointer to the ttm_tt structure
*
- * Calls the driver method to allocate pages for a ttm
+ * Calls the driver method to free all pages from a ttm
*/
-int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
- * ttm_tt_unpopulate - free pages from a ttm
+ * ttm_tt_mark_for_clear - Mark pages for clearing on populate.
*
* @ttm: Pointer to the ttm_tt structure
*
- * Calls the driver method to free all pages from a ttm
+ * Marks pages for clearing so that the next time the page vector is
+ * populated, the pages will be cleared.
*/
-void ttm_tt_unpopulate(struct ttm_tt *ttm);
+static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
+{
+ ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
+}
+
+void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
+
+struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+ struct ttm_tt *tt);
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
@@ -255,7 +231,7 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm);
*
* @bo: Buffer object we allocate the ttm for.
* @bridge: The agp bridge this device is sitting on.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
@@ -265,8 +241,10 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm);
struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
struct agp_bridge_data *bridge,
uint32_t page_flags);
-int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem);
+void ttm_agp_unbind(struct ttm_tt *ttm);
+void ttm_agp_destroy(struct ttm_tt *ttm);
+bool ttm_agp_is_bound(struct ttm_tt *ttm);
#endif
#endif
diff --git a/include/dt-bindings/arm/coresight-cti-dt.h b/include/dt-bindings/arm/coresight-cti-dt.h
new file mode 100644
index 000000000000..61e7bdf8ea6e
--- /dev/null
+++ b/include/dt-bindings/arm/coresight-cti-dt.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for the defined trigger signal
+ * types on CoreSight CTI.
+ */
+
+#ifndef _DT_BINDINGS_ARM_CORESIGHT_CTI_DT_H
+#define _DT_BINDINGS_ARM_CORESIGHT_CTI_DT_H
+
+#define GEN_IO 0
+#define GEN_INTREQ 1
+#define GEN_INTACK 2
+#define GEN_HALTREQ 3
+#define GEN_RESTARTREQ 4
+#define PE_EDBGREQ 5
+#define PE_DBGRESTART 6
+#define PE_CTIIRQ 7
+#define PE_PMUIRQ 8
+#define PE_DBGTRIGGER 9
+#define ETM_EXTOUT 10
+#define ETM_EXTIN 11
+#define SNK_FULL 12
+#define SNK_ACQCOMP 13
+#define SNK_FLUSHCOMP 14
+#define SNK_FLUSHIN 15
+#define SNK_TRIGIN 16
+#define STM_ASYNCOUT 17
+#define STM_TOUT_SPTE 18
+#define STM_TOUT_SW 19
+#define STM_TOUT_HETE 20
+#define STM_HWEVENT 21
+#define ELA_TSTART 22
+#define ELA_TSTOP 23
+#define ELA_DBGREQ 24
+#define CTI_TRIG_MAX 25
+
+#endif /*_DT_BINDINGS_ARM_CORESIGHT_CTI_DT_H */
diff --git a/include/dt-bindings/ata/ahci.h b/include/dt-bindings/ata/ahci.h
new file mode 100644
index 000000000000..77997b35612c
--- /dev/null
+++ b/include/dt-bindings/ata/ahci.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause */
+/*
+ * This header provides constants for most AHCI bindings.
+ */
+
+#ifndef _DT_BINDINGS_ATA_AHCI_H
+#define _DT_BINDINGS_ATA_AHCI_H
+
+/* Host Bus Adapter generic platform capabilities */
+#define HBA_SSS (1 << 27)
+#define HBA_SMPS (1 << 28)
+
+/* Host Bus Adapter port-specific platform capabilities */
+#define HBA_PORT_HPCP (1 << 18)
+#define HBA_PORT_MPSP (1 << 19)
+#define HBA_PORT_CPD (1 << 20)
+#define HBA_PORT_ESP (1 << 21)
+#define HBA_PORT_FBSCP (1 << 22)
+
+#endif
diff --git a/include/dt-bindings/bus/moxtet.h b/include/dt-bindings/bus/moxtet.h
index dc9345440ebe..10528de7b3ef 100644
--- a/include/dt-bindings/bus/moxtet.h
+++ b/include/dt-bindings/bus/moxtet.h
@@ -2,7 +2,7 @@
/*
* Constant for device tree bindings for Turris Mox module configuration bus
*
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
*/
#ifndef _DT_BINDINGS_BUS_MOXTET_H
diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h
index babd08a1d226..76b07826ed05 100644
--- a/include/dt-bindings/bus/ti-sysc.h
+++ b/include/dt-bindings/bus/ti-sysc.h
@@ -18,6 +18,10 @@
#define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4)
+/* PRUSS sysc found on AM33xx/AM43xx/AM57xx */
+#define SYSC_PRUSS_SUB_MWAIT (1 << 5)
+#define SYSC_PRUSS_STANDBY_INIT (1 << 4)
+
/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */
#define SYSC_IDLE_FORCE 0
#define SYSC_IDLE_NO 1
diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h
index 030981cd2d56..a237eb26accb 100644
--- a/include/dt-bindings/clock/actions,s500-cmu.h
+++ b/include/dt-bindings/clock/actions,s500-cmu.h
@@ -72,7 +72,14 @@
#define CLK_NAND 52
#define CLK_ECC 53
#define CLK_RMII_REF 54
+#define CLK_GPIO 55
-#define CLK_NR_CLKS (CLK_RMII_REF + 1)
+/* additional clocks */
+#define CLK_APB 56
+#define CLK_DMAC 57
+#define CLK_NIC 58
+#define CLK_ETHERNET 59
+
+#define CLK_NR_CLKS (CLK_ETHERNET + 1)
#endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */
diff --git a/include/dt-bindings/clock/agilex-clock.h b/include/dt-bindings/clock/agilex-clock.h
new file mode 100644
index 000000000000..06feca07e08e
--- /dev/null
+++ b/include/dt-bindings/clock/agilex-clock.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#ifndef __AGILEX_CLOCK_H
+#define __AGILEX_CLOCK_H
+
+/* fixed rate clocks */
+#define AGILEX_OSC1 0
+#define AGILEX_CB_INTOSC_HS_DIV2_CLK 1
+#define AGILEX_CB_INTOSC_LS_CLK 2
+#define AGILEX_L4_SYS_FREE_CLK 3
+#define AGILEX_F2S_FREE_CLK 4
+
+/* PLL clocks */
+#define AGILEX_MAIN_PLL_CLK 5
+#define AGILEX_MAIN_PLL_C0_CLK 6
+#define AGILEX_MAIN_PLL_C1_CLK 7
+#define AGILEX_MAIN_PLL_C2_CLK 8
+#define AGILEX_MAIN_PLL_C3_CLK 9
+#define AGILEX_PERIPH_PLL_CLK 10
+#define AGILEX_PERIPH_PLL_C0_CLK 11
+#define AGILEX_PERIPH_PLL_C1_CLK 12
+#define AGILEX_PERIPH_PLL_C2_CLK 13
+#define AGILEX_PERIPH_PLL_C3_CLK 14
+#define AGILEX_MPU_FREE_CLK 15
+#define AGILEX_MPU_CCU_CLK 16
+#define AGILEX_BOOT_CLK 17
+
+/* fixed factor clocks */
+#define AGILEX_L3_MAIN_FREE_CLK 18
+#define AGILEX_NOC_FREE_CLK 19
+#define AGILEX_S2F_USR0_CLK 20
+#define AGILEX_NOC_CLK 21
+#define AGILEX_EMAC_A_FREE_CLK 22
+#define AGILEX_EMAC_B_FREE_CLK 23
+#define AGILEX_EMAC_PTP_FREE_CLK 24
+#define AGILEX_GPIO_DB_FREE_CLK 25
+#define AGILEX_SDMMC_FREE_CLK 26
+#define AGILEX_S2F_USER0_FREE_CLK 27
+#define AGILEX_S2F_USER1_FREE_CLK 28
+#define AGILEX_PSI_REF_FREE_CLK 29
+
+/* Gate clocks */
+#define AGILEX_MPU_CLK 30
+#define AGILEX_MPU_L2RAM_CLK 31
+#define AGILEX_MPU_PERIPH_CLK 32
+#define AGILEX_L4_MAIN_CLK 33
+#define AGILEX_L4_MP_CLK 34
+#define AGILEX_L4_SP_CLK 35
+#define AGILEX_CS_AT_CLK 36
+#define AGILEX_CS_TRACE_CLK 37
+#define AGILEX_CS_PDBG_CLK 38
+#define AGILEX_CS_TIMER_CLK 39
+#define AGILEX_S2F_USER0_CLK 40
+#define AGILEX_EMAC0_CLK 41
+#define AGILEX_EMAC1_CLK 43
+#define AGILEX_EMAC2_CLK 44
+#define AGILEX_EMAC_PTP_CLK 45
+#define AGILEX_GPIO_DB_CLK 46
+#define AGILEX_NAND_CLK 47
+#define AGILEX_PSI_REF_CLK 48
+#define AGILEX_S2F_USER1_CLK 49
+#define AGILEX_SDMMC_CLK 50
+#define AGILEX_SPI_M_CLK 51
+#define AGILEX_USB_CLK 52
+#define AGILEX_NAND_X_CLK 53
+#define AGILEX_NAND_ECC_CLK 54
+#define AGILEX_NUM_CLKS 55
+
+#endif /* __AGILEX_CLOCK_H */
diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h
index d3871c63308b..f53f8b16883d 100644
--- a/include/dt-bindings/clock/alphascale,asm9260.h
+++ b/include/dt-bindings/clock/alphascale,asm9260.h
@@ -55,7 +55,7 @@
#define CLKID_AHB_I2S1 45
#define CLKID_AHB_MAC1 46
-/* devider */
+/* divider */
#define CLKID_SYS_CPU 47
#define CLKID_SYS_AHB 48
#define CLKID_SYS_I2S0M 49
diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h
index 894951541276..dfbad5c87933 100644
--- a/include/dt-bindings/clock/am3.h
+++ b/include/dt-bindings/clock/am3.h
@@ -8,99 +8,6 @@
#define AM3_CLKCTRL_OFFSET 0x0
#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET)
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* l4_per clocks */
-#define AM3_L4_PER_CLKCTRL_OFFSET 0x14
-#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET)
-#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14)
-#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18)
-#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c)
-#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24)
-#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28)
-#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c)
-#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30)
-#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34)
-#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38)
-#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c)
-#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40)
-#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44)
-#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48)
-#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c)
-#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50)
-#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60)
-#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68)
-#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c)
-#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70)
-#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74)
-#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78)
-#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c)
-#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80)
-#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84)
-#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88)
-#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90)
-#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94)
-#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0)
-#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac)
-#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0)
-#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4)
-#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc)
-#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0)
-#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4)
-#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc)
-#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4)
-#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8)
-#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc)
-#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0)
-#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8)
-#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec)
-#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0)
-#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4)
-#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8)
-#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc)
-#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100)
-#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c)
-#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110)
-#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120)
-#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130)
-#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c)
-
-/* l4_wkup clocks */
-#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4
-#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET)
-#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4)
-#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8)
-#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc)
-#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14)
-#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0)
-#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4)
-#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8)
-#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc)
-#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0)
-#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4)
-#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8)
-#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4)
-
-/* mpu clocks */
-#define AM3_MPU_CLKCTRL_OFFSET 0x4
-#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET)
-#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4)
-
-/* l4_rtc clocks */
-#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0)
-
-/* gfx_l3 clocks */
-#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4
-#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET)
-#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4)
-
-/* l4_cefuse clocks */
-#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20
-#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET)
-#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20)
-
-/* XXX: Compatibility part end */
-
/* l4ls clocks */
#define AM3_L4LS_CLKCTRL_OFFSET 0x38
#define AM3_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4LS_CLKCTRL_OFFSET)
diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h
index d961e7cb3682..a65b082e9cff 100644
--- a/include/dt-bindings/clock/am4.h
+++ b/include/dt-bindings/clock/am4.h
@@ -8,104 +8,6 @@
#define AM4_CLKCTRL_OFFSET 0x20
#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET)
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* l4_wkup clocks */
-#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120)
-#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
-#define AM4_WKUP_M3_CLKCTRL AM4_CLKCTRL_INDEX(0x228)
-#define AM4_COUNTER_32K_CLKCTRL AM4_CLKCTRL_INDEX(0x230)
-#define AM4_TIMER1_CLKCTRL AM4_CLKCTRL_INDEX(0x328)
-#define AM4_WD_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x338)
-#define AM4_I2C1_CLKCTRL AM4_CLKCTRL_INDEX(0x340)
-#define AM4_UART1_CLKCTRL AM4_CLKCTRL_INDEX(0x348)
-#define AM4_SMARTREFLEX0_CLKCTRL AM4_CLKCTRL_INDEX(0x350)
-#define AM4_SMARTREFLEX1_CLKCTRL AM4_CLKCTRL_INDEX(0x358)
-#define AM4_CONTROL_CLKCTRL AM4_CLKCTRL_INDEX(0x360)
-#define AM4_GPIO1_CLKCTRL AM4_CLKCTRL_INDEX(0x368)
-
-/* mpu clocks */
-#define AM4_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-
-/* gfx_l3 clocks */
-#define AM4_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-
-/* l4_rtc clocks */
-#define AM4_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-
-/* l4_per clocks */
-#define AM4_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-#define AM4_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28)
-#define AM4_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30)
-#define AM4_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40)
-#define AM4_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50)
-#define AM4_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58)
-#define AM4_VPFE0_CLKCTRL AM4_CLKCTRL_INDEX(0x68)
-#define AM4_VPFE1_CLKCTRL AM4_CLKCTRL_INDEX(0x70)
-#define AM4_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78)
-#define AM4_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80)
-#define AM4_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88)
-#define AM4_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90)
-#define AM4_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0)
-#define AM4_GPMC_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
-#define AM4_MCASP0_CLKCTRL AM4_CLKCTRL_INDEX(0x238)
-#define AM4_MCASP1_CLKCTRL AM4_CLKCTRL_INDEX(0x240)
-#define AM4_MMC3_CLKCTRL AM4_CLKCTRL_INDEX(0x248)
-#define AM4_QSPI_CLKCTRL AM4_CLKCTRL_INDEX(0x258)
-#define AM4_USB_OTG_SS0_CLKCTRL AM4_CLKCTRL_INDEX(0x260)
-#define AM4_USB_OTG_SS1_CLKCTRL AM4_CLKCTRL_INDEX(0x268)
-#define AM4_PRUSS_CLKCTRL AM4_CLKCTRL_INDEX(0x320)
-#define AM4_L4_LS_CLKCTRL AM4_CLKCTRL_INDEX(0x420)
-#define AM4_D_CAN0_CLKCTRL AM4_CLKCTRL_INDEX(0x428)
-#define AM4_D_CAN1_CLKCTRL AM4_CLKCTRL_INDEX(0x430)
-#define AM4_EPWMSS0_CLKCTRL AM4_CLKCTRL_INDEX(0x438)
-#define AM4_EPWMSS1_CLKCTRL AM4_CLKCTRL_INDEX(0x440)
-#define AM4_EPWMSS2_CLKCTRL AM4_CLKCTRL_INDEX(0x448)
-#define AM4_EPWMSS3_CLKCTRL AM4_CLKCTRL_INDEX(0x450)
-#define AM4_EPWMSS4_CLKCTRL AM4_CLKCTRL_INDEX(0x458)
-#define AM4_EPWMSS5_CLKCTRL AM4_CLKCTRL_INDEX(0x460)
-#define AM4_ELM_CLKCTRL AM4_CLKCTRL_INDEX(0x468)
-#define AM4_GPIO2_CLKCTRL AM4_CLKCTRL_INDEX(0x478)
-#define AM4_GPIO3_CLKCTRL AM4_CLKCTRL_INDEX(0x480)
-#define AM4_GPIO4_CLKCTRL AM4_CLKCTRL_INDEX(0x488)
-#define AM4_GPIO5_CLKCTRL AM4_CLKCTRL_INDEX(0x490)
-#define AM4_GPIO6_CLKCTRL AM4_CLKCTRL_INDEX(0x498)
-#define AM4_HDQ1W_CLKCTRL AM4_CLKCTRL_INDEX(0x4a0)
-#define AM4_I2C2_CLKCTRL AM4_CLKCTRL_INDEX(0x4a8)
-#define AM4_I2C3_CLKCTRL AM4_CLKCTRL_INDEX(0x4b0)
-#define AM4_MAILBOX_CLKCTRL AM4_CLKCTRL_INDEX(0x4b8)
-#define AM4_MMC1_CLKCTRL AM4_CLKCTRL_INDEX(0x4c0)
-#define AM4_MMC2_CLKCTRL AM4_CLKCTRL_INDEX(0x4c8)
-#define AM4_RNG_CLKCTRL AM4_CLKCTRL_INDEX(0x4e0)
-#define AM4_SPI0_CLKCTRL AM4_CLKCTRL_INDEX(0x500)
-#define AM4_SPI1_CLKCTRL AM4_CLKCTRL_INDEX(0x508)
-#define AM4_SPI2_CLKCTRL AM4_CLKCTRL_INDEX(0x510)
-#define AM4_SPI3_CLKCTRL AM4_CLKCTRL_INDEX(0x518)
-#define AM4_SPI4_CLKCTRL AM4_CLKCTRL_INDEX(0x520)
-#define AM4_SPINLOCK_CLKCTRL AM4_CLKCTRL_INDEX(0x528)
-#define AM4_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x530)
-#define AM4_TIMER3_CLKCTRL AM4_CLKCTRL_INDEX(0x538)
-#define AM4_TIMER4_CLKCTRL AM4_CLKCTRL_INDEX(0x540)
-#define AM4_TIMER5_CLKCTRL AM4_CLKCTRL_INDEX(0x548)
-#define AM4_TIMER6_CLKCTRL AM4_CLKCTRL_INDEX(0x550)
-#define AM4_TIMER7_CLKCTRL AM4_CLKCTRL_INDEX(0x558)
-#define AM4_TIMER8_CLKCTRL AM4_CLKCTRL_INDEX(0x560)
-#define AM4_TIMER9_CLKCTRL AM4_CLKCTRL_INDEX(0x568)
-#define AM4_TIMER10_CLKCTRL AM4_CLKCTRL_INDEX(0x570)
-#define AM4_TIMER11_CLKCTRL AM4_CLKCTRL_INDEX(0x578)
-#define AM4_UART2_CLKCTRL AM4_CLKCTRL_INDEX(0x580)
-#define AM4_UART3_CLKCTRL AM4_CLKCTRL_INDEX(0x588)
-#define AM4_UART4_CLKCTRL AM4_CLKCTRL_INDEX(0x590)
-#define AM4_UART5_CLKCTRL AM4_CLKCTRL_INDEX(0x598)
-#define AM4_UART6_CLKCTRL AM4_CLKCTRL_INDEX(0x5a0)
-#define AM4_OCP2SCP0_CLKCTRL AM4_CLKCTRL_INDEX(0x5b8)
-#define AM4_OCP2SCP1_CLKCTRL AM4_CLKCTRL_INDEX(0x5c0)
-#define AM4_EMIF_CLKCTRL AM4_CLKCTRL_INDEX(0x720)
-#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20)
-#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20)
-
-/* XXX: Compatibility part end. */
-
/* l3s_tsc clocks */
#define AM4_L3S_TSC_CLKCTRL_OFFSET 0x120
#define AM4_L3S_TSC_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_TSC_CLKCTRL_OFFSET)
@@ -158,6 +60,7 @@
#define AM4_L3S_VPFE0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x68)
#define AM4_L3S_VPFE1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x70)
#define AM4_L3S_GPMC_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x220)
+#define AM4_L3S_ADC1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x230)
#define AM4_L3S_MCASP0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x238)
#define AM4_L3S_MCASP1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x240)
#define AM4_L3S_MMC3_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x248)
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 9ff4f6e4558c..06d568382c77 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -52,5 +52,6 @@
#define ASPEED_RESET_I2C 7
#define ASPEED_RESET_AHB 8
#define ASPEED_RESET_CRT1 9
+#define ASPEED_RESET_HACE 10
#endif
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 62b9520a00fd..d8b0db2f7a7d 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -111,6 +111,7 @@
#define ASPEED_RESET_PCIE_RC_O 19
#define ASPEED_RESET_PCIE_RC_OEN 18
#define ASPEED_RESET_PCI_DP 5
+#define ASPEED_RESET_HACE 4
#define ASPEED_RESET_AHB 1
#define ASPEED_RESET_SDRAM 0
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index 38b5554153c8..3e3972a814c1 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -12,6 +12,7 @@
#define PMC_TYPE_SYSTEM 1
#define PMC_TYPE_PERIPHERAL 2
#define PMC_TYPE_GCK 3
+#define PMC_TYPE_PROGRAMMABLE 4
#define PMC_SLOW 0
#define PMC_MCK 1
@@ -20,6 +21,22 @@
#define PMC_MCK2 4
#define PMC_I2S0_MUX 5
#define PMC_I2S1_MUX 6
+#define PMC_PLLACK 7
+#define PMC_PLLBCK 8
+#define PMC_AUDIOPLLCK 9
+#define PMC_AUDIOPINCK 10
+
+/* SAMA7G5 */
+#define PMC_CPUPLL (PMC_MAIN + 1)
+#define PMC_SYSPLL (PMC_MAIN + 2)
+#define PMC_DDRPLL (PMC_MAIN + 3)
+#define PMC_IMGPLL (PMC_MAIN + 4)
+#define PMC_BAUDPLL (PMC_MAIN + 5)
+#define PMC_AUDIOPMCPLL (PMC_MAIN + 6)
+#define PMC_AUDIOIOPLL (PMC_MAIN + 7)
+#define PMC_ETHPLL (PMC_MAIN + 8)
+#define PMC_CPU (PMC_MAIN + 9)
+#define PMC_MCK1 (PMC_MAIN + 10)
#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index fd1f938c38d1..93752ea107e3 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -70,7 +70,31 @@
#define CLKID_HIFI_PLL 69
#define CLKID_PCIE_CML_EN0 79
#define CLKID_PCIE_CML_EN1 80
-#define CLKID_MIPI_ENABLE 81
#define CLKID_GEN_CLK 84
+#define CLKID_VPU_0_SEL 92
+#define CLKID_VPU_0 93
+#define CLKID_VPU_1_SEL 95
+#define CLKID_VPU_1 96
+#define CLKID_VPU 97
+#define CLKID_VAPB_0_SEL 99
+#define CLKID_VAPB_0 100
+#define CLKID_VAPB_1_SEL 102
+#define CLKID_VAPB_1 103
+#define CLKID_VAPB_SEL 104
+#define CLKID_VAPB 105
+#define CLKID_VCLK 106
+#define CLKID_VCLK2 107
+#define CLKID_VCLK_DIV1 122
+#define CLKID_VCLK_DIV2 123
+#define CLKID_VCLK_DIV4 124
+#define CLKID_VCLK_DIV6 125
+#define CLKID_VCLK_DIV12 126
+#define CLKID_VCLK2_DIV1 127
+#define CLKID_VCLK2_DIV2 128
+#define CLKID_VCLK2_DIV4 129
+#define CLKID_VCLK2_DIV6 130
+#define CLKID_VCLK2_DIV12 131
+#define CLKID_CTS_ENCL 133
+#define CLKID_VDIN_MEAS 136
#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/axis,artpec6-clkctrl.h b/include/dt-bindings/clock/axis,artpec6-clkctrl.h
index b1f4971642e6..14e424a7c08c 100644
--- a/include/dt-bindings/clock/axis,artpec6-clkctrl.h
+++ b/include/dt-bindings/clock/axis,artpec6-clkctrl.h
@@ -2,7 +2,7 @@
/*
* ARTPEC-6 clock controller indexes
*
- * Copyright 2016 Axis Comunications AB.
+ * Copyright 2016 Axis Communications AB.
*/
#ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H
diff --git a/include/dt-bindings/clock/bcm21664.h b/include/dt-bindings/clock/bcm21664.h
index 5a7f0e4750a8..7c7492742f3d 100644
--- a/include/dt-bindings/clock/bcm21664.h
+++ b/include/dt-bindings/clock/bcm21664.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CLOCK_BCM21664_H
diff --git a/include/dt-bindings/clock/bcm281xx.h b/include/dt-bindings/clock/bcm281xx.h
index a763460cf1af..d74ca42112e7 100644
--- a/include/dt-bindings/clock/bcm281xx.h
+++ b/include/dt-bindings/clock/bcm281xx.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CLOCK_BCM281XX_H
diff --git a/include/dt-bindings/clock/bcm3368-clock.h b/include/dt-bindings/clock/bcm3368-clock.h
new file mode 100644
index 000000000000..74a7382f77b8
--- /dev/null
+++ b/include/dt-bindings/clock/bcm3368-clock.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM3368_H
+#define __DT_BINDINGS_CLOCK_BCM3368_H
+
+#define BCM3368_CLK_MAC 3
+#define BCM3368_CLK_TC 5
+#define BCM3368_CLK_US_TOP 6
+#define BCM3368_CLK_DS_TOP 7
+#define BCM3368_CLK_ACM 8
+#define BCM3368_CLK_SPI 9
+#define BCM3368_CLK_USBS 10
+#define BCM3368_CLK_BMU 11
+#define BCM3368_CLK_PCM 12
+#define BCM3368_CLK_NTP 13
+#define BCM3368_CLK_ACP_B 14
+#define BCM3368_CLK_ACP_A 15
+#define BCM3368_CLK_EMUSB 17
+#define BCM3368_CLK_ENET0 18
+#define BCM3368_CLK_ENET1 19
+#define BCM3368_CLK_USBSU 20
+#define BCM3368_CLK_EPHY 21
+
+#endif /* __DT_BINDINGS_CLOCK_BCM3368_H */
diff --git a/include/dt-bindings/clock/bcm6318-clock.h b/include/dt-bindings/clock/bcm6318-clock.h
new file mode 100644
index 000000000000..c4417f8983ab
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6318-clock.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6318_H
+#define __DT_BINDINGS_CLOCK_BCM6318_H
+
+#define BCM6318_CLK_ADSL_ASB 0
+#define BCM6318_CLK_USB_ASB 1
+#define BCM6318_CLK_MIPS_ASB 2
+#define BCM6318_CLK_PCIE_ASB 3
+#define BCM6318_CLK_PHYMIPS_ASB 4
+#define BCM6318_CLK_ROBOSW_ASB 5
+#define BCM6318_CLK_SAR_ASB 6
+#define BCM6318_CLK_SDR_ASB 7
+#define BCM6318_CLK_SWREG_ASB 8
+#define BCM6318_CLK_PERIPH_ASB 9
+#define BCM6318_CLK_CPUBUS160 10
+#define BCM6318_CLK_ADSL 11
+#define BCM6318_CLK_SAR125 12
+#define BCM6318_CLK_MIPS 13
+#define BCM6318_CLK_PCIE 14
+#define BCM6318_CLK_ROBOSW250 16
+#define BCM6318_CLK_ROBOSW025 17
+#define BCM6318_CLK_SDR 19
+#define BCM6318_CLK_USBD 20
+#define BCM6318_CLK_HSSPI 25
+#define BCM6318_CLK_PCIE25 27
+#define BCM6318_CLK_PHYMIPS 28
+#define BCM6318_CLK_AFE 29
+#define BCM6318_CLK_QPROC 30
+
+#define BCM6318_UCLK_ADSL 0
+#define BCM6318_UCLK_ARB 1
+#define BCM6318_UCLK_MIPS 2
+#define BCM6318_UCLK_PCIE 3
+#define BCM6318_UCLK_PERIPH 4
+#define BCM6318_UCLK_PHYMIPS 5
+#define BCM6318_UCLK_ROBOSW 6
+#define BCM6318_UCLK_SAR 7
+#define BCM6318_UCLK_SDR 8
+#define BCM6318_UCLK_USB 9
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6318_H */
diff --git a/include/dt-bindings/clock/bcm63268-clock.h b/include/dt-bindings/clock/bcm63268-clock.h
new file mode 100644
index 000000000000..da23e691d359
--- /dev/null
+++ b/include/dt-bindings/clock/bcm63268-clock.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM63268_H
+#define __DT_BINDINGS_CLOCK_BCM63268_H
+
+#define BCM63268_CLK_DIS_GLESS 0
+#define BCM63268_CLK_VDSL_QPROC 1
+#define BCM63268_CLK_VDSL_AFE 2
+#define BCM63268_CLK_VDSL 3
+#define BCM63268_CLK_MIPS 4
+#define BCM63268_CLK_WLAN_OCP 5
+#define BCM63268_CLK_DECT 6
+#define BCM63268_CLK_FAP0 7
+#define BCM63268_CLK_FAP1 8
+#define BCM63268_CLK_SAR 9
+#define BCM63268_CLK_ROBOSW 10
+#define BCM63268_CLK_PCM 11
+#define BCM63268_CLK_USBD 12
+#define BCM63268_CLK_USBH 13
+#define BCM63268_CLK_IPSEC 14
+#define BCM63268_CLK_SPI 15
+#define BCM63268_CLK_HSSPI 16
+#define BCM63268_CLK_PCIE 17
+#define BCM63268_CLK_PHYMIPS 18
+#define BCM63268_CLK_GMAC 19
+#define BCM63268_CLK_NAND 20
+#define BCM63268_CLK_TBUS 27
+#define BCM63268_CLK_ROBOSW250 31
+
+#endif /* __DT_BINDINGS_CLOCK_BCM63268_H */
diff --git a/include/dt-bindings/clock/bcm6328-clock.h b/include/dt-bindings/clock/bcm6328-clock.h
new file mode 100644
index 000000000000..1f6a3103f3dc
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6328-clock.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6328_H
+#define __DT_BINDINGS_CLOCK_BCM6328_H
+
+#define BCM6328_CLK_PHYMIPS 0
+#define BCM6328_CLK_ADSL_QPROC 1
+#define BCM6328_CLK_ADSL_AFE 2
+#define BCM6328_CLK_ADSL 3
+#define BCM6328_CLK_MIPS 4
+#define BCM6328_CLK_SAR 5
+#define BCM6328_CLK_PCM 6
+#define BCM6328_CLK_USBD 7
+#define BCM6328_CLK_USBH 8
+#define BCM6328_CLK_HSSPI 9
+#define BCM6328_CLK_PCIE 10
+#define BCM6328_CLK_ROBOSW 11
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6328_H */
diff --git a/include/dt-bindings/clock/bcm6358-clock.h b/include/dt-bindings/clock/bcm6358-clock.h
new file mode 100644
index 000000000000..980c9cac4765
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6358-clock.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6358_H
+#define __DT_BINDINGS_CLOCK_BCM6358_H
+
+#define BCM6358_CLK_ENET 4
+#define BCM6358_CLK_ADSLPHY 5
+#define BCM6358_CLK_PCM 8
+#define BCM6358_CLK_SPI 9
+#define BCM6358_CLK_USBS 10
+#define BCM6358_CLK_SAR 11
+#define BCM6358_CLK_EMUSB 17
+#define BCM6358_CLK_ENET0 18
+#define BCM6358_CLK_ENET1 19
+#define BCM6358_CLK_USBSU 20
+#define BCM6358_CLK_EPHY 21
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6358_H */
diff --git a/include/dt-bindings/clock/bcm6362-clock.h b/include/dt-bindings/clock/bcm6362-clock.h
new file mode 100644
index 000000000000..17655cd5bf25
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6362-clock.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6362_H
+#define __DT_BINDINGS_CLOCK_BCM6362_H
+
+#define BCM6362_CLK_ADSL_QPROC 1
+#define BCM6362_CLK_ADSL_AFE 2
+#define BCM6362_CLK_ADSL 3
+#define BCM6362_CLK_MIPS 4
+#define BCM6362_CLK_WLAN_OCP 5
+#define BCM6362_CLK_SWPKT_USB 7
+#define BCM6362_CLK_SWPKT_SAR 8
+#define BCM6362_CLK_SAR 9
+#define BCM6362_CLK_ROBOSW 10
+#define BCM6362_CLK_PCM 11
+#define BCM6362_CLK_USBD 12
+#define BCM6362_CLK_USBH 13
+#define BCM6362_CLK_IPSEC 14
+#define BCM6362_CLK_SPI 15
+#define BCM6362_CLK_HSSPI 16
+#define BCM6362_CLK_PCIE 17
+#define BCM6362_CLK_FAP 18
+#define BCM6362_CLK_PHYMIPS 19
+#define BCM6362_CLK_NAND 20
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6362_H */
diff --git a/include/dt-bindings/clock/bcm6368-clock.h b/include/dt-bindings/clock/bcm6368-clock.h
new file mode 100644
index 000000000000..f161d5333883
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6368-clock.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6368_H
+#define __DT_BINDINGS_CLOCK_BCM6368_H
+
+#define BCM6368_CLK_VDSL_QPROC 2
+#define BCM6368_CLK_VDSL_AFE 3
+#define BCM6368_CLK_VDSL_BONDING 4
+#define BCM6368_CLK_VDSL 5
+#define BCM6368_CLK_PHYMIPS 6
+#define BCM6368_CLK_SWPKT_USB 7
+#define BCM6368_CLK_SWPKT_SAR 8
+#define BCM6368_CLK_SPI 9
+#define BCM6368_CLK_USBD 10
+#define BCM6368_CLK_SAR 11
+#define BCM6368_CLK_ROBOSW 12
+#define BCM6368_CLK_UTOPIA 13
+#define BCM6368_CLK_PCM 14
+#define BCM6368_CLK_USBH 15
+#define BCM6368_CLK_DIS_GLESS 16
+#define BCM6368_CLK_NAND 17
+#define BCM6368_CLK_IPSEC 18
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6368_H */
diff --git a/include/dt-bindings/clock/boston-clock.h b/include/dt-bindings/clock/boston-clock.h
index a6f009821137..38140fa87b09 100644
--- a/include/dt-bindings/clock/boston-clock.h
+++ b/include/dt-bindings/clock/boston-clock.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2016 Imagination Technologies
- *
- * SPDX-License-Identifier: GPL-2.0
*/
#ifndef __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__
diff --git a/include/dt-bindings/clock/bt1-ccu.h b/include/dt-bindings/clock/bt1-ccu.h
new file mode 100644
index 000000000000..5f166d27a00a
--- /dev/null
+++ b/include/dt-bindings/clock/bt1-ccu.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU clock indices
+ */
+#ifndef __DT_BINDINGS_CLOCK_BT1_CCU_H
+#define __DT_BINDINGS_CLOCK_BT1_CCU_H
+
+#define CCU_CPU_PLL 0
+#define CCU_SATA_PLL 1
+#define CCU_DDR_PLL 2
+#define CCU_PCIE_PLL 3
+#define CCU_ETH_PLL 4
+
+#define CCU_AXI_MAIN_CLK 0
+#define CCU_AXI_DDR_CLK 1
+#define CCU_AXI_SATA_CLK 2
+#define CCU_AXI_GMAC0_CLK 3
+#define CCU_AXI_GMAC1_CLK 4
+#define CCU_AXI_XGMAC_CLK 5
+#define CCU_AXI_PCIE_M_CLK 6
+#define CCU_AXI_PCIE_S_CLK 7
+#define CCU_AXI_USB_CLK 8
+#define CCU_AXI_HWA_CLK 9
+#define CCU_AXI_SRAM_CLK 10
+
+#define CCU_SYS_SATA_REF_CLK 0
+#define CCU_SYS_APB_CLK 1
+#define CCU_SYS_GMAC0_TX_CLK 2
+#define CCU_SYS_GMAC0_PTP_CLK 3
+#define CCU_SYS_GMAC1_TX_CLK 4
+#define CCU_SYS_GMAC1_PTP_CLK 5
+#define CCU_SYS_XGMAC_REF_CLK 6
+#define CCU_SYS_XGMAC_PTP_CLK 7
+#define CCU_SYS_USB_CLK 8
+#define CCU_SYS_PVT_CLK 9
+#define CCU_SYS_HWA_CLK 10
+#define CCU_SYS_UART_CLK 11
+#define CCU_SYS_I2C1_CLK 12
+#define CCU_SYS_I2C2_CLK 13
+#define CCU_SYS_GPIO_CLK 14
+#define CCU_SYS_TIMER0_CLK 15
+#define CCU_SYS_TIMER1_CLK 16
+#define CCU_SYS_TIMER2_CLK 17
+#define CCU_SYS_WDT_CLK 18
+
+#endif /* __DT_BINDINGS_CLOCK_BT1_CCU_H */
diff --git a/include/dt-bindings/clock/cirrus,cs2000-cp.h b/include/dt-bindings/clock/cirrus,cs2000-cp.h
new file mode 100644
index 000000000000..fe3ac71750a8
--- /dev/null
+++ b/include/dt-bindings/clock/cirrus,cs2000-cp.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Daniel Mack
+ */
+
+#ifndef __DT_BINDINGS_CS2000CP_CLK_H
+#define __DT_BINDINGS_CS2000CP_CLK_H
+
+#define CS2000CP_AUX_OUTPUT_REF_CLK 0
+#define CS2000CP_AUX_OUTPUT_CLK_IN 1
+#define CS2000CP_AUX_OUTPUT_CLK_OUT 2
+#define CS2000CP_AUX_OUTPUT_PLL_LOCK 3
+
+#endif /* __DT_BINDINGS_CS2000CP_CLK_H */
diff --git a/include/dt-bindings/clock/dm814.h b/include/dt-bindings/clock/dm814.h
index f0f04e0a249e..33b8826d936b 100644
--- a/include/dt-bindings/clock/dm814.h
+++ b/include/dt-bindings/clock/dm814.h
@@ -34,4 +34,9 @@
#define DM814_MMC2_CLKCTRL DM814_CLKCTRL_INDEX(0x220)
#define DM814_MMC3_CLKCTRL DM814_CLKCTRL_INDEX(0x224)
+/* alwon_ethernet clocks */
+#define DM814_ETHERNET_CLKCTRL_OFFSET 0x1d4
+#define DM814_ETHERNET_CLKCTRL_INDEX(offset) ((offset) - DM814_ETHERNET_CLKCTRL_OFFSET)
+#define DM814_ETHERNET_CPGMAC0_CLKCTRL DM814_ETHERNET_CLKCTRL_INDEX(0x1d4)
+
#endif
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index 8cec5a1e1806..8a903c78c5a5 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -8,177 +8,6 @@
#define DRA7_CLKCTRL_OFFSET 0x20
#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET)
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* mpu clocks */
-#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* ipu clocks */
-#define _DRA7_IPU_CLKCTRL_OFFSET 0x40
-#define _DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - _DRA7_IPU_CLKCTRL_OFFSET)
-#define DRA7_MCASP1_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x50)
-#define DRA7_TIMER5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x58)
-#define DRA7_TIMER6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x60)
-#define DRA7_TIMER7_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x68)
-#define DRA7_TIMER8_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x70)
-#define DRA7_I2C5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x78)
-#define DRA7_UART6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x80)
-
-/* rtc clocks */
-#define DRA7_RTC_CLKCTRL_OFFSET 0x40
-#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET)
-#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44)
-
-/* vip clocks */
-#define DRA7_VIP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_VIP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_VIP3_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-
-/* vpe clocks */
-#define DRA7_VPE_CLKCTRL_OFFSET 0x60
-#define DRA7_VPE_CLKCTRL_INDEX(offset) ((offset) - DRA7_VPE_CLKCTRL_OFFSET)
-#define DRA7_VPE_CLKCTRL DRA7_VPE_CLKCTRL_INDEX(0x64)
-
-/* coreaon clocks */
-#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
-
-/* l3main1 clocks */
-#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
-#define DRA7_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
-#define DRA7_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
-
-/* dma clocks */
-#define DRA7_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* emif clocks */
-#define DRA7_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* atl clocks */
-#define DRA7_ATL_CLKCTRL_OFFSET 0x0
-#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET)
-#define DRA7_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0)
-
-/* l4cfg clocks */
-#define DRA7_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58)
-#define DRA7_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60)
-#define DRA7_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68)
-#define DRA7_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
-#define DRA7_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
-#define DRA7_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
-#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98)
-#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
-
-/* l3instr clocks */
-#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-
-/* dss clocks */
-#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-
-/* gpu clocks */
-#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* l3init clocks */
-#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
-#define DRA7_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_PCIE1_CLKCTRL DRA7_CLKCTRL_INDEX(0xb0)
-#define DRA7_PCIE2_CLKCTRL DRA7_CLKCTRL_INDEX(0xb8)
-#define DRA7_GMAC_CLKCTRL DRA7_CLKCTRL_INDEX(0xd0)
-#define DRA7_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0)
-#define DRA7_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8)
-#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0)
-
-/* l4per clocks */
-#define _DRA7_L4PER_CLKCTRL_OFFSET 0x0
-#define _DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - _DRA7_L4PER_CLKCTRL_OFFSET)
-#define DRA7_L4_PER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc)
-#define DRA7_L4_PER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x14)
-#define DRA7_TIMER10_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x28)
-#define DRA7_TIMER11_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x30)
-#define DRA7_TIMER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x48)
-#define DRA7_TIMER9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x50)
-#define DRA7_ELM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x58)
-#define DRA7_GPIO2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x60)
-#define DRA7_GPIO3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x68)
-#define DRA7_GPIO4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x70)
-#define DRA7_GPIO5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x78)
-#define DRA7_GPIO6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x80)
-#define DRA7_HDQ1W_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x88)
-#define DRA7_EPWMSS1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x90)
-#define DRA7_EPWMSS2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x98)
-#define DRA7_I2C1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa0)
-#define DRA7_I2C2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa8)
-#define DRA7_I2C3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb0)
-#define DRA7_I2C4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb8)
-#define DRA7_L4_PER1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc0)
-#define DRA7_EPWMSS0_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc4)
-#define DRA7_TIMER13_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc8)
-#define DRA7_TIMER14_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd0)
-#define DRA7_TIMER15_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd8)
-#define DRA7_MCSPI1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf0)
-#define DRA7_MCSPI2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf8)
-#define DRA7_MCSPI3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x100)
-#define DRA7_MCSPI4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x108)
-#define DRA7_GPIO7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x110)
-#define DRA7_GPIO8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x118)
-#define DRA7_MMC3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x120)
-#define DRA7_MMC4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x128)
-#define DRA7_TIMER16_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x130)
-#define DRA7_QSPI_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x138)
-#define DRA7_UART1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x140)
-#define DRA7_UART2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x148)
-#define DRA7_UART3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x150)
-#define DRA7_UART4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x158)
-#define DRA7_MCASP2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x160)
-#define DRA7_MCASP3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x168)
-#define DRA7_UART5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x170)
-#define DRA7_MCASP5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x178)
-#define DRA7_MCASP8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x190)
-#define DRA7_MCASP4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x198)
-#define DRA7_AES1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
-#define DRA7_AES2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
-#define DRA7_DES_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
-#define DRA7_RNG_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
-#define DRA7_SHAM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
-#define DRA7_UART7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
-#define DRA7_UART8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
-#define DRA7_UART9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
-#define DRA7_DCAN2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
-#define DRA7_MCASP6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x204)
-#define DRA7_MCASP7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x208)
-
-/* wkupaon clocks */
-#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
-
-/* XXX: Compatibility part end. */
-
/* mpu clocks */
#define DRA7_MPU_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
@@ -263,10 +92,17 @@
#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+/* iva clocks */
+#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+
/* dss clocks */
#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
/* l3init clocks */
#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
@@ -332,6 +168,7 @@
#define DRA7_L4SEC_DES_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1b0)
#define DRA7_L4SEC_RNG_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c0)
#define DRA7_L4SEC_SHAM_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c8)
+#define DRA7_L4SEC_SHAM2_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1f8)
/* l4per2 clocks */
#define DRA7_L4PER2_CLKCTRL_OFFSET 0xc
diff --git a/include/dt-bindings/clock/efm32-cmu.h b/include/dt-bindings/clock/efm32-cmu.h
deleted file mode 100644
index 4b48d15fe194..000000000000
--- a/include/dt-bindings/clock/efm32-cmu.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DT_BINDINGS_CLOCK_EFM32_CMU_H
-#define __DT_BINDINGS_CLOCK_EFM32_CMU_H
-
-#define clk_HFXO 0
-#define clk_HFRCO 1
-#define clk_LFXO 2
-#define clk_LFRCO 3
-#define clk_ULFRCO 4
-#define clk_AUXHFRCO 5
-#define clk_HFCLKNODIV 6
-#define clk_HFCLK 7
-#define clk_HFPERCLK 8
-#define clk_HFCORECLK 9
-#define clk_LFACLK 10
-#define clk_LFBCLK 11
-#define clk_WDOGCLK 12
-#define clk_HFCORECLKDMA 13
-#define clk_HFCORECLKAES 14
-#define clk_HFCORECLKUSBC 15
-#define clk_HFCORECLKUSB 16
-#define clk_HFCORECLKLE 17
-#define clk_HFCORECLKEBI 18
-#define clk_HFPERCLKUSART0 19
-#define clk_HFPERCLKUSART1 20
-#define clk_HFPERCLKUSART2 21
-#define clk_HFPERCLKUART0 22
-#define clk_HFPERCLKUART1 23
-#define clk_HFPERCLKTIMER0 24
-#define clk_HFPERCLKTIMER1 25
-#define clk_HFPERCLKTIMER2 26
-#define clk_HFPERCLKTIMER3 27
-#define clk_HFPERCLKACMP0 28
-#define clk_HFPERCLKACMP1 29
-#define clk_HFPERCLKI2C0 30
-#define clk_HFPERCLKI2C1 31
-#define clk_HFPERCLKGPIO 32
-#define clk_HFPERCLKVCMP 33
-#define clk_HFPERCLKPRS 34
-#define clk_HFPERCLKADC0 35
-#define clk_HFPERCLKDAC0 36
-
-#endif /* __DT_BINDINGS_CLOCK_EFM32_CMU_H */
diff --git a/include/dt-bindings/clock/en7523-clk.h b/include/dt-bindings/clock/en7523-clk.h
new file mode 100644
index 000000000000..717d23a5e5ae
--- /dev/null
+++ b/include/dt-bindings/clock/en7523-clk.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_
+#define _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_
+
+#define EN7523_CLK_GSW 0
+#define EN7523_CLK_EMI 1
+#define EN7523_CLK_BUS 2
+#define EN7523_CLK_SLIC 3
+#define EN7523_CLK_SPI 4
+#define EN7523_CLK_NPU 5
+#define EN7523_CLK_CRYPTO 6
+#define EN7523_CLK_PCIE 7
+
+#define EN7523_NUM_CLOCKS 8
+
+#endif /* _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ */
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 88ec3968b90a..acbfbab875ec 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -209,6 +209,7 @@
#define CLK_ACLK400_MCUISP 395 /* Exynos4x12 only */
#define CLK_MOUT_HDMI 396
#define CLK_MOUT_MIXER 397
+#define CLK_MOUT_VPLLSRC 398
/* gate clocks - ppmu */
#define CLK_PPMULEFT 400
@@ -236,9 +237,10 @@
#define CLK_DIV_C2C 458 /* Exynos4x12 only */
#define CLK_DIV_GDL 459
#define CLK_DIV_GDR 460
+#define CLK_DIV_CORE2 461
/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 461
+#define CLK_NR_CLKS 462
/* Exynos4x12 ISP clocks */
#define CLK_ISP_FIMC_ISP 1
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index bc8a3c53a54b..4680da7357d3 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -19,6 +19,7 @@
#define CLK_FOUT_EPLL 7
#define CLK_FOUT_VPLL 8
#define CLK_ARM_CLK 9
+#define CLK_DIV_ARM2 10
/* gate for special clocks (sclk) */
#define CLK_SCLK_CAM_BAYER 128
@@ -172,8 +173,11 @@
#define CLK_MOUT_GPLL 1025
#define CLK_MOUT_ACLK200_DISP1_SUB 1026
#define CLK_MOUT_ACLK300_DISP1_SUB 1027
+#define CLK_MOUT_APLL 1028
+#define CLK_MOUT_MPLL 1029
+#define CLK_MOUT_VPLLSRC 1030
/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 1028
+#define CLK_NR_CLKS 1031
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 02d5ac469a3d..9fffc6ceaadd 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -230,6 +230,12 @@
#define CLK_MOUT_USER_MAU_EPLL 659
#define CLK_MOUT_SCLK_SPLL 660
#define CLK_MOUT_MX_MSPLL_CCORE_PHY 661
+#define CLK_MOUT_SW_ACLK_G3D 662
+#define CLK_MOUT_APLL 663
+#define CLK_MOUT_MSPLL_CPU 664
+#define CLK_MOUT_KPLL 665
+#define CLK_MOUT_MSPLL_KFC 666
+
/* divider clocks */
#define CLK_DOUT_PIXEL 768
diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
new file mode 100644
index 000000000000..8256e7430b63
--- /dev/null
+++ b/include/dt-bindings/clock/exynos7885.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Dávid Virág
+ *
+ * Device Tree binding constants for Exynos7885 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_7885_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_7885_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_DOUT_SHARED0_DIV2 3
+#define CLK_DOUT_SHARED0_DIV3 4
+#define CLK_DOUT_SHARED0_DIV4 5
+#define CLK_DOUT_SHARED0_DIV5 6
+#define CLK_DOUT_SHARED1_DIV2 7
+#define CLK_DOUT_SHARED1_DIV3 8
+#define CLK_DOUT_SHARED1_DIV4 9
+#define CLK_MOUT_CORE_BUS 10
+#define CLK_MOUT_CORE_CCI 11
+#define CLK_MOUT_CORE_G3D 12
+#define CLK_DOUT_CORE_BUS 13
+#define CLK_DOUT_CORE_CCI 14
+#define CLK_DOUT_CORE_G3D 15
+#define CLK_GOUT_CORE_BUS 16
+#define CLK_GOUT_CORE_CCI 17
+#define CLK_GOUT_CORE_G3D 18
+#define CLK_MOUT_PERI_BUS 19
+#define CLK_MOUT_PERI_SPI0 20
+#define CLK_MOUT_PERI_SPI1 21
+#define CLK_MOUT_PERI_UART0 22
+#define CLK_MOUT_PERI_UART1 23
+#define CLK_MOUT_PERI_UART2 24
+#define CLK_MOUT_PERI_USI0 25
+#define CLK_MOUT_PERI_USI1 26
+#define CLK_MOUT_PERI_USI2 27
+#define CLK_DOUT_PERI_BUS 28
+#define CLK_DOUT_PERI_SPI0 29
+#define CLK_DOUT_PERI_SPI1 30
+#define CLK_DOUT_PERI_UART0 31
+#define CLK_DOUT_PERI_UART1 32
+#define CLK_DOUT_PERI_UART2 33
+#define CLK_DOUT_PERI_USI0 34
+#define CLK_DOUT_PERI_USI1 35
+#define CLK_DOUT_PERI_USI2 36
+#define CLK_GOUT_PERI_BUS 37
+#define CLK_GOUT_PERI_SPI0 38
+#define CLK_GOUT_PERI_SPI1 39
+#define CLK_GOUT_PERI_UART0 40
+#define CLK_GOUT_PERI_UART1 41
+#define CLK_GOUT_PERI_UART2 42
+#define CLK_GOUT_PERI_USI0 43
+#define CLK_GOUT_PERI_USI1 44
+#define CLK_GOUT_PERI_USI2 45
+#define CLK_MOUT_FSYS_BUS 46
+#define CLK_MOUT_FSYS_MMC_CARD 47
+#define CLK_MOUT_FSYS_MMC_EMBD 48
+#define CLK_MOUT_FSYS_MMC_SDIO 49
+#define CLK_MOUT_FSYS_USB30DRD 50
+#define CLK_DOUT_FSYS_BUS 51
+#define CLK_DOUT_FSYS_MMC_CARD 52
+#define CLK_DOUT_FSYS_MMC_EMBD 53
+#define CLK_DOUT_FSYS_MMC_SDIO 54
+#define CLK_DOUT_FSYS_USB30DRD 55
+#define CLK_GOUT_FSYS_BUS 56
+#define CLK_GOUT_FSYS_MMC_CARD 57
+#define CLK_GOUT_FSYS_MMC_EMBD 58
+#define CLK_GOUT_FSYS_MMC_SDIO 59
+#define CLK_GOUT_FSYS_USB30DRD 60
+#define TOP_NR_CLK 61
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_MOUT_CORE_CCI_USER 2
+#define CLK_MOUT_CORE_G3D_USER 3
+#define CLK_MOUT_CORE_GIC 4
+#define CLK_DOUT_CORE_BUSP 5
+#define CLK_GOUT_CCI_ACLK 6
+#define CLK_GOUT_GIC400_CLK 7
+#define CLK_GOUT_TREX_D_CORE_ACLK 8
+#define CLK_GOUT_TREX_D_CORE_GCLK 9
+#define CLK_GOUT_TREX_D_CORE_PCLK 10
+#define CLK_GOUT_TREX_P_CORE_ACLK_P_CORE 11
+#define CLK_GOUT_TREX_P_CORE_CCLK_P_CORE 12
+#define CLK_GOUT_TREX_P_CORE_PCLK 13
+#define CLK_GOUT_TREX_P_CORE_PCLK_P_CORE 14
+#define CORE_NR_CLK 15
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_BUS_USER 1
+#define CLK_MOUT_PERI_SPI0_USER 2
+#define CLK_MOUT_PERI_SPI1_USER 3
+#define CLK_MOUT_PERI_UART0_USER 4
+#define CLK_MOUT_PERI_UART1_USER 5
+#define CLK_MOUT_PERI_UART2_USER 6
+#define CLK_MOUT_PERI_USI0_USER 7
+#define CLK_MOUT_PERI_USI1_USER 8
+#define CLK_MOUT_PERI_USI2_USER 9
+#define CLK_GOUT_GPIO_TOP_PCLK 10
+#define CLK_GOUT_HSI2C0_PCLK 11
+#define CLK_GOUT_HSI2C1_PCLK 12
+#define CLK_GOUT_HSI2C2_PCLK 13
+#define CLK_GOUT_HSI2C3_PCLK 14
+#define CLK_GOUT_I2C0_PCLK 15
+#define CLK_GOUT_I2C1_PCLK 16
+#define CLK_GOUT_I2C2_PCLK 17
+#define CLK_GOUT_I2C3_PCLK 18
+#define CLK_GOUT_I2C4_PCLK 19
+#define CLK_GOUT_I2C5_PCLK 20
+#define CLK_GOUT_I2C6_PCLK 21
+#define CLK_GOUT_I2C7_PCLK 22
+#define CLK_GOUT_PWM_MOTOR_PCLK 23
+#define CLK_GOUT_SPI0_PCLK 24
+#define CLK_GOUT_SPI0_EXT_CLK 25
+#define CLK_GOUT_SPI1_PCLK 26
+#define CLK_GOUT_SPI1_EXT_CLK 27
+#define CLK_GOUT_UART0_EXT_UCLK 28
+#define CLK_GOUT_UART0_PCLK 29
+#define CLK_GOUT_UART1_EXT_UCLK 30
+#define CLK_GOUT_UART1_PCLK 31
+#define CLK_GOUT_UART2_EXT_UCLK 32
+#define CLK_GOUT_UART2_PCLK 33
+#define CLK_GOUT_USI0_PCLK 34
+#define CLK_GOUT_USI0_SCLK 35
+#define CLK_GOUT_USI1_PCLK 36
+#define CLK_GOUT_USI1_SCLK 37
+#define CLK_GOUT_USI2_PCLK 38
+#define CLK_GOUT_USI2_SCLK 39
+#define CLK_GOUT_MCT_PCLK 40
+#define CLK_GOUT_SYSREG_PERI_PCLK 41
+#define CLK_GOUT_WDT0_PCLK 42
+#define CLK_GOUT_WDT1_PCLK 43
+#define PERI_NR_CLK 44
+
+/* CMU_FSYS */
+#define CLK_MOUT_FSYS_BUS_USER 1
+#define CLK_MOUT_FSYS_MMC_CARD_USER 2
+#define CLK_MOUT_FSYS_MMC_EMBD_USER 3
+#define CLK_MOUT_FSYS_MMC_SDIO_USER 4
+#define CLK_MOUT_FSYS_USB30DRD_USER 4
+#define CLK_GOUT_MMC_CARD_ACLK 5
+#define CLK_GOUT_MMC_CARD_SDCLKIN 6
+#define CLK_GOUT_MMC_EMBD_ACLK 7
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
+#define CLK_GOUT_MMC_SDIO_ACLK 9
+#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
+#define FSYS_NR_CLK 11
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
new file mode 100644
index 000000000000..88d5289883d3
--- /dev/null
+++ b/include/dt-bindings/clock/exynos850.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Device Tree binding constants for Exynos850 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_850_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_850_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_MMC_PLL 3
+#define CLK_MOUT_SHARED0_PLL 4
+#define CLK_MOUT_SHARED1_PLL 5
+#define CLK_MOUT_MMC_PLL 6
+#define CLK_MOUT_CORE_BUS 7
+#define CLK_MOUT_CORE_CCI 8
+#define CLK_MOUT_CORE_MMC_EMBD 9
+#define CLK_MOUT_CORE_SSS 10
+#define CLK_MOUT_DPU 11
+#define CLK_MOUT_HSI_BUS 12
+#define CLK_MOUT_HSI_MMC_CARD 13
+#define CLK_MOUT_HSI_USB20DRD 14
+#define CLK_MOUT_PERI_BUS 15
+#define CLK_MOUT_PERI_UART 16
+#define CLK_MOUT_PERI_IP 17
+#define CLK_DOUT_SHARED0_DIV3 18
+#define CLK_DOUT_SHARED0_DIV2 19
+#define CLK_DOUT_SHARED1_DIV3 20
+#define CLK_DOUT_SHARED1_DIV2 21
+#define CLK_DOUT_SHARED0_DIV4 22
+#define CLK_DOUT_SHARED1_DIV4 23
+#define CLK_DOUT_CORE_BUS 24
+#define CLK_DOUT_CORE_CCI 25
+#define CLK_DOUT_CORE_MMC_EMBD 26
+#define CLK_DOUT_CORE_SSS 27
+#define CLK_DOUT_DPU 28
+#define CLK_DOUT_HSI_BUS 29
+#define CLK_DOUT_HSI_MMC_CARD 30
+#define CLK_DOUT_HSI_USB20DRD 31
+#define CLK_DOUT_PERI_BUS 32
+#define CLK_DOUT_PERI_UART 33
+#define CLK_DOUT_PERI_IP 34
+#define CLK_GOUT_CORE_BUS 35
+#define CLK_GOUT_CORE_CCI 36
+#define CLK_GOUT_CORE_MMC_EMBD 37
+#define CLK_GOUT_CORE_SSS 38
+#define CLK_GOUT_DPU 39
+#define CLK_GOUT_HSI_BUS 40
+#define CLK_GOUT_HSI_MMC_CARD 41
+#define CLK_GOUT_HSI_USB20DRD 42
+#define CLK_GOUT_PERI_BUS 43
+#define CLK_GOUT_PERI_UART 44
+#define CLK_GOUT_PERI_IP 45
+#define CLK_MOUT_CLKCMU_APM_BUS 46
+#define CLK_DOUT_CLKCMU_APM_BUS 47
+#define CLK_GOUT_CLKCMU_APM_BUS 48
+#define CLK_MOUT_AUD 49
+#define CLK_GOUT_AUD 50
+#define CLK_DOUT_AUD 51
+#define CLK_MOUT_IS_BUS 52
+#define CLK_MOUT_IS_ITP 53
+#define CLK_MOUT_IS_VRA 54
+#define CLK_MOUT_IS_GDC 55
+#define CLK_GOUT_IS_BUS 56
+#define CLK_GOUT_IS_ITP 57
+#define CLK_GOUT_IS_VRA 58
+#define CLK_GOUT_IS_GDC 59
+#define CLK_DOUT_IS_BUS 60
+#define CLK_DOUT_IS_ITP 61
+#define CLK_DOUT_IS_VRA 62
+#define CLK_DOUT_IS_GDC 63
+#define CLK_MOUT_MFCMSCL_MFC 64
+#define CLK_MOUT_MFCMSCL_M2M 65
+#define CLK_MOUT_MFCMSCL_MCSC 66
+#define CLK_MOUT_MFCMSCL_JPEG 67
+#define CLK_GOUT_MFCMSCL_MFC 68
+#define CLK_GOUT_MFCMSCL_M2M 69
+#define CLK_GOUT_MFCMSCL_MCSC 70
+#define CLK_GOUT_MFCMSCL_JPEG 71
+#define CLK_DOUT_MFCMSCL_MFC 72
+#define CLK_DOUT_MFCMSCL_M2M 73
+#define CLK_DOUT_MFCMSCL_MCSC 74
+#define CLK_DOUT_MFCMSCL_JPEG 75
+#define TOP_NR_CLK 76
+
+/* CMU_APM */
+#define CLK_RCO_I3C_PMIC 1
+#define OSCCLK_RCO_APM 2
+#define CLK_RCO_APM__ALV 3
+#define CLK_DLL_DCO 4
+#define CLK_MOUT_APM_BUS_USER 5
+#define CLK_MOUT_RCO_APM_I3C_USER 6
+#define CLK_MOUT_RCO_APM_USER 7
+#define CLK_MOUT_DLL_USER 8
+#define CLK_MOUT_CLKCMU_CHUB_BUS 9
+#define CLK_MOUT_APM_BUS 10
+#define CLK_MOUT_APM_I3C 11
+#define CLK_DOUT_CLKCMU_CHUB_BUS 12
+#define CLK_DOUT_APM_BUS 13
+#define CLK_DOUT_APM_I3C 14
+#define CLK_GOUT_CLKCMU_CMGP_BUS 15
+#define CLK_GOUT_CLKCMU_CHUB_BUS 16
+#define CLK_GOUT_RTC_PCLK 17
+#define CLK_GOUT_TOP_RTC_PCLK 18
+#define CLK_GOUT_I3C_PCLK 19
+#define CLK_GOUT_I3C_SCLK 20
+#define CLK_GOUT_SPEEDY_PCLK 21
+#define CLK_GOUT_GPIO_ALIVE_PCLK 22
+#define CLK_GOUT_PMU_ALIVE_PCLK 23
+#define CLK_GOUT_SYSREG_APM_PCLK 24
+#define APM_NR_CLK 25
+
+/* CMU_AUD */
+#define CLK_DOUT_AUD_AUDIF 1
+#define CLK_DOUT_AUD_BUSD 2
+#define CLK_DOUT_AUD_BUSP 3
+#define CLK_DOUT_AUD_CNT 4
+#define CLK_DOUT_AUD_CPU 5
+#define CLK_DOUT_AUD_CPU_ACLK 6
+#define CLK_DOUT_AUD_CPU_PCLKDBG 7
+#define CLK_DOUT_AUD_FM 8
+#define CLK_DOUT_AUD_FM_SPDY 9
+#define CLK_DOUT_AUD_MCLK 10
+#define CLK_DOUT_AUD_UAIF0 11
+#define CLK_DOUT_AUD_UAIF1 12
+#define CLK_DOUT_AUD_UAIF2 13
+#define CLK_DOUT_AUD_UAIF3 14
+#define CLK_DOUT_AUD_UAIF4 15
+#define CLK_DOUT_AUD_UAIF5 16
+#define CLK_DOUT_AUD_UAIF6 17
+#define CLK_FOUT_AUD_PLL 18
+#define CLK_GOUT_AUD_ABOX_ACLK 19
+#define CLK_GOUT_AUD_ASB_CCLK 20
+#define CLK_GOUT_AUD_CA32_CCLK 21
+#define CLK_GOUT_AUD_CNT_BCLK 22
+#define CLK_GOUT_AUD_CODEC_MCLK 23
+#define CLK_GOUT_AUD_DAP_CCLK 24
+#define CLK_GOUT_AUD_GPIO_PCLK 25
+#define CLK_GOUT_AUD_PPMU_ACLK 26
+#define CLK_GOUT_AUD_PPMU_PCLK 27
+#define CLK_GOUT_AUD_SPDY_BCLK 28
+#define CLK_GOUT_AUD_SYSMMU_CLK 29
+#define CLK_GOUT_AUD_SYSREG_PCLK 30
+#define CLK_GOUT_AUD_TZPC_PCLK 31
+#define CLK_GOUT_AUD_UAIF0_BCLK 32
+#define CLK_GOUT_AUD_UAIF1_BCLK 33
+#define CLK_GOUT_AUD_UAIF2_BCLK 34
+#define CLK_GOUT_AUD_UAIF3_BCLK 35
+#define CLK_GOUT_AUD_UAIF4_BCLK 36
+#define CLK_GOUT_AUD_UAIF5_BCLK 37
+#define CLK_GOUT_AUD_UAIF6_BCLK 38
+#define CLK_GOUT_AUD_WDT_PCLK 39
+#define CLK_MOUT_AUD_CPU 40
+#define CLK_MOUT_AUD_CPU_HCH 41
+#define CLK_MOUT_AUD_CPU_USER 42
+#define CLK_MOUT_AUD_FM 43
+#define CLK_MOUT_AUD_PLL 44
+#define CLK_MOUT_AUD_TICK_USB_USER 45
+#define CLK_MOUT_AUD_UAIF0 46
+#define CLK_MOUT_AUD_UAIF1 47
+#define CLK_MOUT_AUD_UAIF2 48
+#define CLK_MOUT_AUD_UAIF3 49
+#define CLK_MOUT_AUD_UAIF4 50
+#define CLK_MOUT_AUD_UAIF5 51
+#define CLK_MOUT_AUD_UAIF6 52
+#define IOCLK_AUDIOCDCLK0 53
+#define IOCLK_AUDIOCDCLK1 54
+#define IOCLK_AUDIOCDCLK2 55
+#define IOCLK_AUDIOCDCLK3 56
+#define IOCLK_AUDIOCDCLK4 57
+#define IOCLK_AUDIOCDCLK5 58
+#define IOCLK_AUDIOCDCLK6 59
+#define TICK_USB 60
+#define AUD_NR_CLK 61
+
+/* CMU_CMGP */
+#define CLK_RCO_CMGP 1
+#define CLK_MOUT_CMGP_ADC 2
+#define CLK_MOUT_CMGP_USI0 3
+#define CLK_MOUT_CMGP_USI1 4
+#define CLK_DOUT_CMGP_ADC 5
+#define CLK_DOUT_CMGP_USI0 6
+#define CLK_DOUT_CMGP_USI1 7
+#define CLK_GOUT_CMGP_ADC_S0_PCLK 8
+#define CLK_GOUT_CMGP_ADC_S1_PCLK 9
+#define CLK_GOUT_CMGP_GPIO_PCLK 10
+#define CLK_GOUT_CMGP_USI0_IPCLK 11
+#define CLK_GOUT_CMGP_USI0_PCLK 12
+#define CLK_GOUT_CMGP_USI1_IPCLK 13
+#define CLK_GOUT_CMGP_USI1_PCLK 14
+#define CLK_GOUT_SYSREG_CMGP_PCLK 15
+#define CMGP_NR_CLK 16
+
+/* CMU_HSI */
+#define CLK_MOUT_HSI_BUS_USER 1
+#define CLK_MOUT_HSI_MMC_CARD_USER 2
+#define CLK_MOUT_HSI_USB20DRD_USER 3
+#define CLK_MOUT_HSI_RTC 4
+#define CLK_GOUT_USB_RTC_CLK 5
+#define CLK_GOUT_USB_REF_CLK 6
+#define CLK_GOUT_USB_PHY_REF_CLK 7
+#define CLK_GOUT_USB_PHY_ACLK 8
+#define CLK_GOUT_USB_BUS_EARLY_CLK 9
+#define CLK_GOUT_GPIO_HSI_PCLK 10
+#define CLK_GOUT_MMC_CARD_ACLK 11
+#define CLK_GOUT_MMC_CARD_SDCLKIN 12
+#define CLK_GOUT_SYSREG_HSI_PCLK 13
+#define HSI_NR_CLK 14
+
+/* CMU_IS */
+#define CLK_MOUT_IS_BUS_USER 1
+#define CLK_MOUT_IS_ITP_USER 2
+#define CLK_MOUT_IS_VRA_USER 3
+#define CLK_MOUT_IS_GDC_USER 4
+#define CLK_DOUT_IS_BUSP 5
+#define CLK_GOUT_IS_CMU_IS_PCLK 6
+#define CLK_GOUT_IS_CSIS0_ACLK 7
+#define CLK_GOUT_IS_CSIS1_ACLK 8
+#define CLK_GOUT_IS_CSIS2_ACLK 9
+#define CLK_GOUT_IS_TZPC_PCLK 10
+#define CLK_GOUT_IS_CSIS_DMA_CLK 11
+#define CLK_GOUT_IS_GDC_CLK 12
+#define CLK_GOUT_IS_IPP_CLK 13
+#define CLK_GOUT_IS_ITP_CLK 14
+#define CLK_GOUT_IS_MCSC_CLK 15
+#define CLK_GOUT_IS_VRA_CLK 16
+#define CLK_GOUT_IS_PPMU_IS0_ACLK 17
+#define CLK_GOUT_IS_PPMU_IS0_PCLK 18
+#define CLK_GOUT_IS_PPMU_IS1_ACLK 19
+#define CLK_GOUT_IS_PPMU_IS1_PCLK 20
+#define CLK_GOUT_IS_SYSMMU_IS0_CLK 21
+#define CLK_GOUT_IS_SYSMMU_IS1_CLK 22
+#define CLK_GOUT_IS_SYSREG_PCLK 23
+#define IS_NR_CLK 24
+
+/* CMU_MFCMSCL */
+#define CLK_MOUT_MFCMSCL_MFC_USER 1
+#define CLK_MOUT_MFCMSCL_M2M_USER 2
+#define CLK_MOUT_MFCMSCL_MCSC_USER 3
+#define CLK_MOUT_MFCMSCL_JPEG_USER 4
+#define CLK_DOUT_MFCMSCL_BUSP 5
+#define CLK_GOUT_MFCMSCL_CMU_MFCMSCL_PCLK 6
+#define CLK_GOUT_MFCMSCL_TZPC_PCLK 7
+#define CLK_GOUT_MFCMSCL_JPEG_ACLK 8
+#define CLK_GOUT_MFCMSCL_M2M_ACLK 9
+#define CLK_GOUT_MFCMSCL_MCSC_CLK 10
+#define CLK_GOUT_MFCMSCL_MFC_ACLK 11
+#define CLK_GOUT_MFCMSCL_PPMU_ACLK 12
+#define CLK_GOUT_MFCMSCL_PPMU_PCLK 13
+#define CLK_GOUT_MFCMSCL_SYSMMU_CLK 14
+#define CLK_GOUT_MFCMSCL_SYSREG_PCLK 15
+#define MFCMSCL_NR_CLK 16
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_BUS_USER 1
+#define CLK_MOUT_PERI_UART_USER 2
+#define CLK_MOUT_PERI_HSI2C_USER 3
+#define CLK_MOUT_PERI_SPI_USER 4
+#define CLK_DOUT_PERI_HSI2C0 5
+#define CLK_DOUT_PERI_HSI2C1 6
+#define CLK_DOUT_PERI_HSI2C2 7
+#define CLK_DOUT_PERI_SPI0 8
+#define CLK_GOUT_PERI_HSI2C0 9
+#define CLK_GOUT_PERI_HSI2C1 10
+#define CLK_GOUT_PERI_HSI2C2 11
+#define CLK_GOUT_GPIO_PERI_PCLK 12
+#define CLK_GOUT_HSI2C0_IPCLK 13
+#define CLK_GOUT_HSI2C0_PCLK 14
+#define CLK_GOUT_HSI2C1_IPCLK 15
+#define CLK_GOUT_HSI2C1_PCLK 16
+#define CLK_GOUT_HSI2C2_IPCLK 17
+#define CLK_GOUT_HSI2C2_PCLK 18
+#define CLK_GOUT_I2C0_PCLK 19
+#define CLK_GOUT_I2C1_PCLK 20
+#define CLK_GOUT_I2C2_PCLK 21
+#define CLK_GOUT_I2C3_PCLK 22
+#define CLK_GOUT_I2C4_PCLK 23
+#define CLK_GOUT_I2C5_PCLK 24
+#define CLK_GOUT_I2C6_PCLK 25
+#define CLK_GOUT_MCT_PCLK 26
+#define CLK_GOUT_PWM_MOTOR_PCLK 27
+#define CLK_GOUT_SPI0_IPCLK 28
+#define CLK_GOUT_SPI0_PCLK 29
+#define CLK_GOUT_SYSREG_PERI_PCLK 30
+#define CLK_GOUT_UART_IPCLK 31
+#define CLK_GOUT_UART_PCLK 32
+#define CLK_GOUT_WDT0_PCLK 33
+#define CLK_GOUT_WDT1_PCLK 34
+#define PERI_NR_CLK 35
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_MOUT_CORE_CCI_USER 2
+#define CLK_MOUT_CORE_MMC_EMBD_USER 3
+#define CLK_MOUT_CORE_SSS_USER 4
+#define CLK_MOUT_CORE_GIC 5
+#define CLK_DOUT_CORE_BUSP 6
+#define CLK_GOUT_CCI_ACLK 7
+#define CLK_GOUT_GIC_CLK 8
+#define CLK_GOUT_MMC_EMBD_ACLK 9
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 10
+#define CLK_GOUT_SSS_ACLK 11
+#define CLK_GOUT_SSS_PCLK 12
+#define CLK_GOUT_GPIO_CORE_PCLK 13
+#define CLK_GOUT_SYSREG_CORE_PCLK 14
+#define CORE_NR_CLK 15
+
+/* CMU_DPU */
+#define CLK_MOUT_DPU_USER 1
+#define CLK_DOUT_DPU_BUSP 2
+#define CLK_GOUT_DPU_CMU_DPU_PCLK 3
+#define CLK_GOUT_DPU_DECON0_ACLK 4
+#define CLK_GOUT_DPU_DMA_ACLK 5
+#define CLK_GOUT_DPU_DPP_ACLK 6
+#define CLK_GOUT_DPU_PPMU_ACLK 7
+#define CLK_GOUT_DPU_PPMU_PCLK 8
+#define CLK_GOUT_DPU_SMMU_CLK 9
+#define CLK_GOUT_DPU_SYSREG_PCLK 10
+#define DPU_NR_CLK 11
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS_850_H */
diff --git a/include/dt-bindings/clock/fsd-clk.h b/include/dt-bindings/clock/fsd-clk.h
new file mode 100644
index 000000000000..c8a2af1dd1ad
--- /dev/null
+++ b/include/dt-bindings/clock/fsd-clk.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 - 2022: Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2017-2022 Tesla, Inc.
+ * https://www.tesla.com
+ *
+ * The constants defined in this header are being used in dts
+ * and fsd platform driver.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_FSD_H
+#define _DT_BINDINGS_CLOCK_FSD_H
+
+/* CMU */
+#define DOUT_CMU_PLL_SHARED0_DIV4 1
+#define DOUT_CMU_PERIC_SHARED1DIV36 2
+#define DOUT_CMU_PERIC_SHARED0DIV3_TBUCLK 3
+#define DOUT_CMU_PERIC_SHARED0DIV20 4
+#define DOUT_CMU_PERIC_SHARED1DIV4_DMACLK 5
+#define DOUT_CMU_PLL_SHARED0_DIV6 6
+#define DOUT_CMU_FSYS0_SHARED1DIV4 7
+#define DOUT_CMU_FSYS0_SHARED0DIV4 8
+#define DOUT_CMU_FSYS1_SHARED0DIV8 9
+#define DOUT_CMU_FSYS1_SHARED0DIV4 10
+#define CMU_CPUCL_SWITCH_GATE 11
+#define DOUT_CMU_IMEM_TCUCLK 12
+#define DOUT_CMU_IMEM_ACLK 13
+#define DOUT_CMU_IMEM_DMACLK 14
+#define GAT_CMU_FSYS0_SHARED0DIV4 15
+#define CMU_NR_CLK 16
+
+/* PERIC */
+#define PERIC_SCLK_UART0 1
+#define PERIC_PCLK_UART0 2
+#define PERIC_SCLK_UART1 3
+#define PERIC_PCLK_UART1 4
+#define PERIC_DMA0_IPCLKPORT_ACLK 5
+#define PERIC_DMA1_IPCLKPORT_ACLK 6
+#define PERIC_PWM0_IPCLKPORT_I_PCLK_S0 7
+#define PERIC_PWM1_IPCLKPORT_I_PCLK_S0 8
+#define PERIC_PCLK_SPI0 9
+#define PERIC_SCLK_SPI0 10
+#define PERIC_PCLK_SPI1 11
+#define PERIC_SCLK_SPI1 12
+#define PERIC_PCLK_SPI2 13
+#define PERIC_SCLK_SPI2 14
+#define PERIC_PCLK_TDM0 15
+#define PERIC_PCLK_HSI2C0 16
+#define PERIC_PCLK_HSI2C1 17
+#define PERIC_PCLK_HSI2C2 18
+#define PERIC_PCLK_HSI2C3 19
+#define PERIC_PCLK_HSI2C4 20
+#define PERIC_PCLK_HSI2C5 21
+#define PERIC_PCLK_HSI2C6 22
+#define PERIC_PCLK_HSI2C7 23
+#define PERIC_MCAN0_IPCLKPORT_CCLK 24
+#define PERIC_MCAN0_IPCLKPORT_PCLK 25
+#define PERIC_MCAN1_IPCLKPORT_CCLK 26
+#define PERIC_MCAN1_IPCLKPORT_PCLK 27
+#define PERIC_MCAN2_IPCLKPORT_CCLK 28
+#define PERIC_MCAN2_IPCLKPORT_PCLK 29
+#define PERIC_MCAN3_IPCLKPORT_CCLK 30
+#define PERIC_MCAN3_IPCLKPORT_PCLK 31
+#define PERIC_PCLK_ADCIF 32
+#define PERIC_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I 33
+#define PERIC_EQOS_TOP_IPCLKPORT_ACLK_I 34
+#define PERIC_EQOS_TOP_IPCLKPORT_HCLK_I 35
+#define PERIC_EQOS_TOP_IPCLKPORT_RGMII_CLK_I 36
+#define PERIC_EQOS_TOP_IPCLKPORT_CLK_RX_I 37
+#define PERIC_BUS_D_PERIC_IPCLKPORT_EQOSCLK 38
+#define PERIC_BUS_P_PERIC_IPCLKPORT_EQOSCLK 39
+#define PERIC_HCLK_TDM0 40
+#define PERIC_PCLK_TDM1 41
+#define PERIC_HCLK_TDM1 42
+#define PERIC_EQOS_PHYRXCLK_MUX 43
+#define PERIC_EQOS_PHYRXCLK 44
+#define PERIC_DOUT_RGMII_CLK 45
+#define PERIC_NR_CLK 46
+
+/* FSYS0 */
+#define UFS0_MPHY_REFCLK_IXTAL24 1
+#define UFS0_MPHY_REFCLK_IXTAL26 2
+#define UFS1_MPHY_REFCLK_IXTAL24 3
+#define UFS1_MPHY_REFCLK_IXTAL26 4
+#define UFS0_TOP0_HCLK_BUS 5
+#define UFS0_TOP0_ACLK 6
+#define UFS0_TOP0_CLK_UNIPRO 7
+#define UFS0_TOP0_FMP_CLK 8
+#define UFS1_TOP1_HCLK_BUS 9
+#define UFS1_TOP1_ACLK 10
+#define UFS1_TOP1_CLK_UNIPRO 11
+#define UFS1_TOP1_FMP_CLK 12
+#define PCIE_SUBCTRL_INST0_DBI_ACLK_SOC 13
+#define PCIE_SUBCTRL_INST0_AUX_CLK_SOC 14
+#define PCIE_SUBCTRL_INST0_MSTR_ACLK_SOC 15
+#define PCIE_SUBCTRL_INST0_SLV_ACLK_SOC 16
+#define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I 17
+#define FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I 18
+#define FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I 19
+#define FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I 20
+#define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I 21
+#define FSYS0_DOUT_FSYS0_PERIBUS_GRP 22
+#define FSYS0_NR_CLK 23
+
+/* FSYS1 */
+#define PCIE_LINK0_IPCLKPORT_DBI_ACLK 1
+#define PCIE_LINK0_IPCLKPORT_AUX_ACLK 2
+#define PCIE_LINK0_IPCLKPORT_MSTR_ACLK 3
+#define PCIE_LINK0_IPCLKPORT_SLV_ACLK 4
+#define PCIE_LINK1_IPCLKPORT_DBI_ACLK 5
+#define PCIE_LINK1_IPCLKPORT_AUX_ACLK 6
+#define PCIE_LINK1_IPCLKPORT_MSTR_ACLK 7
+#define PCIE_LINK1_IPCLKPORT_SLV_ACLK 8
+#define FSYS1_NR_CLK 9
+
+/* IMEM */
+#define IMEM_DMA0_IPCLKPORT_ACLK 1
+#define IMEM_DMA1_IPCLKPORT_ACLK 2
+#define IMEM_WDT0_IPCLKPORT_PCLK 3
+#define IMEM_WDT1_IPCLKPORT_PCLK 4
+#define IMEM_WDT2_IPCLKPORT_PCLK 5
+#define IMEM_MCT_PCLK 6
+#define IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS 7
+#define IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS 8
+#define IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS 9
+#define IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS 10
+#define IMEM_TMU_GT_IPCLKPORT_I_CLK_TS 11
+#define IMEM_NR_CLK 12
+
+/* MFC */
+#define MFC_MFC_IPCLKPORT_ACLK 1
+#define MFC_NR_CLK 2
+
+/* CAM_CSI */
+#define CAM_CSI0_0_IPCLKPORT_I_ACLK 1
+#define CAM_CSI0_1_IPCLKPORT_I_ACLK 2
+#define CAM_CSI0_2_IPCLKPORT_I_ACLK 3
+#define CAM_CSI0_3_IPCLKPORT_I_ACLK 4
+#define CAM_CSI1_0_IPCLKPORT_I_ACLK 5
+#define CAM_CSI1_1_IPCLKPORT_I_ACLK 6
+#define CAM_CSI1_2_IPCLKPORT_I_ACLK 7
+#define CAM_CSI1_3_IPCLKPORT_I_ACLK 8
+#define CAM_CSI2_0_IPCLKPORT_I_ACLK 9
+#define CAM_CSI2_1_IPCLKPORT_I_ACLK 10
+#define CAM_CSI2_2_IPCLKPORT_I_ACLK 11
+#define CAM_CSI2_3_IPCLKPORT_I_ACLK 12
+#define CAM_CSI_NR_CLK 13
+
+#endif /*_DT_BINDINGS_CLOCK_FSD_H */
diff --git a/include/dt-bindings/clock/fsl,qoriq-clockgen.h b/include/dt-bindings/clock/fsl,qoriq-clockgen.h
new file mode 100644
index 000000000000..ddec7d0bdc7f
--- /dev/null
+++ b/include/dt-bindings/clock/fsl,qoriq-clockgen.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef DT_CLOCK_FSL_QORIQ_CLOCKGEN_H
+#define DT_CLOCK_FSL_QORIQ_CLOCKGEN_H
+
+#define QORIQ_CLK_SYSCLK 0
+#define QORIQ_CLK_CMUX 1
+#define QORIQ_CLK_HWACCEL 2
+#define QORIQ_CLK_FMAN 3
+#define QORIQ_CLK_PLATFORM_PLL 4
+#define QORIQ_CLK_CORECLK 5
+
+#define QORIQ_CLK_PLL_DIV(x) ((x) - 1)
+
+#endif /* DT_CLOCK_FSL_QORIQ_CLOCKGEN_H */
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index 0837c1a7ae49..a93b58c5e18e 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -143,5 +143,11 @@
#define CLKID_CPU1_CLK 253
#define CLKID_CPU2_CLK 254
#define CLKID_CPU3_CLK 255
+#define CLKID_SPICC0_SCLK 258
+#define CLKID_SPICC1_SCLK 261
+#define CLKID_NNA_AXI_CLK 264
+#define CLKID_NNA_CORE_CLK 267
+#define CLKID_MIPI_DSI_PXCLK_SEL 269
+#define CLKID_MIPI_DSI_PXCLK 270
#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index db0763e96173..4073eb7a9da1 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -146,5 +146,6 @@
#define CLKID_CTS_VDAC 201
#define CLKID_HDMI_TX 202
#define CLKID_HDMI 205
+#define CLKID_ACODEC 206
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/hi3559av100-clock.h b/include/dt-bindings/clock/hi3559av100-clock.h
new file mode 100644
index 000000000000..5fe7689010a0
--- /dev/null
+++ b/include/dt-bindings/clock/hi3559av100-clock.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later or BSD-2-Clause */
+/*
+ * Copyright (c) 2019-2020, Huawei Tech. Co., Ltd.
+ *
+ * Author: Dongjiu Geng <gengdongjiu@huawei.com>
+ */
+
+#ifndef __DTS_HI3559AV100_CLOCK_H
+#define __DTS_HI3559AV100_CLOCK_H
+
+/* fixed rate */
+#define HI3559AV100_FIXED_1188M 1
+#define HI3559AV100_FIXED_1000M 2
+#define HI3559AV100_FIXED_842M 3
+#define HI3559AV100_FIXED_792M 4
+#define HI3559AV100_FIXED_750M 5
+#define HI3559AV100_FIXED_710M 6
+#define HI3559AV100_FIXED_680M 7
+#define HI3559AV100_FIXED_667M 8
+#define HI3559AV100_FIXED_631M 9
+#define HI3559AV100_FIXED_600M 10
+#define HI3559AV100_FIXED_568M 11
+#define HI3559AV100_FIXED_500M 12
+#define HI3559AV100_FIXED_475M 13
+#define HI3559AV100_FIXED_428M 14
+#define HI3559AV100_FIXED_400M 15
+#define HI3559AV100_FIXED_396M 16
+#define HI3559AV100_FIXED_300M 17
+#define HI3559AV100_FIXED_250M 18
+#define HI3559AV100_FIXED_198M 19
+#define HI3559AV100_FIXED_187p5M 20
+#define HI3559AV100_FIXED_150M 21
+#define HI3559AV100_FIXED_148p5M 22
+#define HI3559AV100_FIXED_125M 23
+#define HI3559AV100_FIXED_107M 24
+#define HI3559AV100_FIXED_100M 25
+#define HI3559AV100_FIXED_99M 26
+#define HI3559AV100_FIXED_74p25M 27
+#define HI3559AV100_FIXED_72M 28
+#define HI3559AV100_FIXED_60M 29
+#define HI3559AV100_FIXED_54M 30
+#define HI3559AV100_FIXED_50M 31
+#define HI3559AV100_FIXED_49p5M 32
+#define HI3559AV100_FIXED_37p125M 33
+#define HI3559AV100_FIXED_36M 34
+#define HI3559AV100_FIXED_32p4M 35
+#define HI3559AV100_FIXED_27M 36
+#define HI3559AV100_FIXED_25M 37
+#define HI3559AV100_FIXED_24M 38
+#define HI3559AV100_FIXED_12M 39
+#define HI3559AV100_FIXED_3M 40
+#define HI3559AV100_FIXED_1p6M 41
+#define HI3559AV100_FIXED_400K 42
+#define HI3559AV100_FIXED_100K 43
+#define HI3559AV100_FIXED_200M 44
+#define HI3559AV100_FIXED_75M 75
+
+#define HI3559AV100_I2C0_CLK 50
+#define HI3559AV100_I2C1_CLK 51
+#define HI3559AV100_I2C2_CLK 52
+#define HI3559AV100_I2C3_CLK 53
+#define HI3559AV100_I2C4_CLK 54
+#define HI3559AV100_I2C5_CLK 55
+#define HI3559AV100_I2C6_CLK 56
+#define HI3559AV100_I2C7_CLK 57
+#define HI3559AV100_I2C8_CLK 58
+#define HI3559AV100_I2C9_CLK 59
+#define HI3559AV100_I2C10_CLK 60
+#define HI3559AV100_I2C11_CLK 61
+
+#define HI3559AV100_SPI0_CLK 62
+#define HI3559AV100_SPI1_CLK 63
+#define HI3559AV100_SPI2_CLK 64
+#define HI3559AV100_SPI3_CLK 65
+#define HI3559AV100_SPI4_CLK 66
+#define HI3559AV100_SPI5_CLK 67
+#define HI3559AV100_SPI6_CLK 68
+
+#define HI3559AV100_EDMAC_CLK 69
+#define HI3559AV100_EDMAC_AXICLK 70
+#define HI3559AV100_EDMAC1_CLK 71
+#define HI3559AV100_EDMAC1_AXICLK 72
+#define HI3559AV100_VDMAC_CLK 73
+
+/* mux clocks */
+#define HI3559AV100_FMC_MUX 80
+#define HI3559AV100_SYSAPB_MUX 81
+#define HI3559AV100_UART_MUX 82
+#define HI3559AV100_SYSBUS_MUX 83
+#define HI3559AV100_A73_MUX 84
+#define HI3559AV100_MMC0_MUX 85
+#define HI3559AV100_MMC1_MUX 86
+#define HI3559AV100_MMC2_MUX 87
+#define HI3559AV100_MMC3_MUX 88
+
+/* gate clocks */
+#define HI3559AV100_FMC_CLK 90
+#define HI3559AV100_UART0_CLK 91
+#define HI3559AV100_UART1_CLK 92
+#define HI3559AV100_UART2_CLK 93
+#define HI3559AV100_UART3_CLK 94
+#define HI3559AV100_UART4_CLK 95
+#define HI3559AV100_MMC0_CLK 96
+#define HI3559AV100_MMC1_CLK 97
+#define HI3559AV100_MMC2_CLK 98
+#define HI3559AV100_MMC3_CLK 99
+
+#define HI3559AV100_ETH_CLK 100
+#define HI3559AV100_ETH_MACIF_CLK 101
+#define HI3559AV100_ETH1_CLK 102
+#define HI3559AV100_ETH1_MACIF_CLK 103
+
+/* complex */
+#define HI3559AV100_MAC0_CLK 110
+#define HI3559AV100_MAC1_CLK 111
+#define HI3559AV100_SATA_CLK 112
+#define HI3559AV100_USB_CLK 113
+#define HI3559AV100_USB1_CLK 114
+
+/* pll clocks */
+#define HI3559AV100_APLL_CLK 250
+#define HI3559AV100_GPLL_CLK 251
+
+#define HI3559AV100_CRG_NR_CLKS 256
+
+#define HI3559AV100_SHUB_SOURCE_SOC_24M 0
+#define HI3559AV100_SHUB_SOURCE_SOC_200M 1
+#define HI3559AV100_SHUB_SOURCE_SOC_300M 2
+#define HI3559AV100_SHUB_SOURCE_PLL 3
+#define HI3559AV100_SHUB_SOURCE_CLK 4
+
+#define HI3559AV100_SHUB_I2C0_CLK 10
+#define HI3559AV100_SHUB_I2C1_CLK 11
+#define HI3559AV100_SHUB_I2C2_CLK 12
+#define HI3559AV100_SHUB_I2C3_CLK 13
+#define HI3559AV100_SHUB_I2C4_CLK 14
+#define HI3559AV100_SHUB_I2C5_CLK 15
+#define HI3559AV100_SHUB_I2C6_CLK 16
+#define HI3559AV100_SHUB_I2C7_CLK 17
+
+#define HI3559AV100_SHUB_SPI_SOURCE_CLK 20
+#define HI3559AV100_SHUB_SPI4_SOURCE_CLK 21
+#define HI3559AV100_SHUB_SPI0_CLK 22
+#define HI3559AV100_SHUB_SPI1_CLK 23
+#define HI3559AV100_SHUB_SPI2_CLK 24
+#define HI3559AV100_SHUB_SPI3_CLK 25
+#define HI3559AV100_SHUB_SPI4_CLK 26
+
+#define HI3559AV100_SHUB_UART_CLK_32K 30
+#define HI3559AV100_SHUB_UART_SOURCE_CLK 31
+#define HI3559AV100_SHUB_UART_DIV_CLK 32
+#define HI3559AV100_SHUB_UART0_CLK 33
+#define HI3559AV100_SHUB_UART1_CLK 34
+#define HI3559AV100_SHUB_UART2_CLK 35
+#define HI3559AV100_SHUB_UART3_CLK 36
+#define HI3559AV100_SHUB_UART4_CLK 37
+#define HI3559AV100_SHUB_UART5_CLK 38
+#define HI3559AV100_SHUB_UART6_CLK 39
+
+#define HI3559AV100_SHUB_EDMAC_CLK 40
+
+#define HI3559AV100_SHUB_NR_CLKS 50
+
+#endif /* __DTS_HI3559AV100_CLOCK_H */
+
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
index e6a670e1a3f8..1d4c0dfe0202 100644
--- a/include/dt-bindings/clock/imx7d-clock.h
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -451,5 +451,6 @@
#define IMX7D_SNVS_CLK 442
#define IMX7D_CAAM_CLK 443
#define IMX7D_KPP_ROOT_CLK 444
-#define IMX7D_CLK_END 445
+#define IMX7D_PXP_CLK 445
+#define IMX7D_CLK_END 446
#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */
diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h
index 38145bdcd975..b58370d146e2 100644
--- a/include/dt-bindings/clock/imx7ulp-clock.h
+++ b/include/dt-bindings/clock/imx7ulp-clock.h
@@ -58,7 +58,10 @@
#define IMX7ULP_CLK_HSRUN_SYS_SEL 44
#define IMX7ULP_CLK_HSRUN_CORE_DIV 45
-#define IMX7ULP_CLK_SCG1_END 46
+#define IMX7ULP_CLK_CORE 46
+#define IMX7ULP_CLK_HSRUN_CORE 47
+
+#define IMX7ULP_CLK_SCG1_END 48
/* PCC2 */
#define IMX7ULP_CLK_DMA1 0
diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h
index 673a8c662340..2e60ce4d2622 100644
--- a/include/dt-bindings/clock/imx8-clock.h
+++ b/include/dt-bindings/clock/imx8-clock.h
@@ -7,132 +7,6 @@
#ifndef __DT_BINDINGS_CLOCK_IMX_H
#define __DT_BINDINGS_CLOCK_IMX_H
-/* SCU Clocks */
-
-#define IMX_CLK_DUMMY 0
-
-/* CPU */
-#define IMX_A35_CLK 1
-
-/* LSIO SS */
-#define IMX_LSIO_MEM_CLK 2
-#define IMX_LSIO_BUS_CLK 3
-#define IMX_LSIO_PWM0_CLK 10
-#define IMX_LSIO_PWM1_CLK 11
-#define IMX_LSIO_PWM2_CLK 12
-#define IMX_LSIO_PWM3_CLK 13
-#define IMX_LSIO_PWM4_CLK 14
-#define IMX_LSIO_PWM5_CLK 15
-#define IMX_LSIO_PWM6_CLK 16
-#define IMX_LSIO_PWM7_CLK 17
-#define IMX_LSIO_GPT0_CLK 18
-#define IMX_LSIO_GPT1_CLK 19
-#define IMX_LSIO_GPT2_CLK 20
-#define IMX_LSIO_GPT3_CLK 21
-#define IMX_LSIO_GPT4_CLK 22
-#define IMX_LSIO_FSPI0_CLK 23
-#define IMX_LSIO_FSPI1_CLK 24
-
-/* Connectivity SS */
-#define IMX_CONN_AXI_CLK_ROOT 30
-#define IMX_CONN_AHB_CLK_ROOT 31
-#define IMX_CONN_IPG_CLK_ROOT 32
-#define IMX_CONN_SDHC0_CLK 40
-#define IMX_CONN_SDHC1_CLK 41
-#define IMX_CONN_SDHC2_CLK 42
-#define IMX_CONN_ENET0_ROOT_CLK 43
-#define IMX_CONN_ENET0_BYPASS_CLK 44
-#define IMX_CONN_ENET0_RGMII_CLK 45
-#define IMX_CONN_ENET1_ROOT_CLK 46
-#define IMX_CONN_ENET1_BYPASS_CLK 47
-#define IMX_CONN_ENET1_RGMII_CLK 48
-#define IMX_CONN_GPMI_BCH_IO_CLK 49
-#define IMX_CONN_GPMI_BCH_CLK 50
-#define IMX_CONN_USB2_ACLK 51
-#define IMX_CONN_USB2_BUS_CLK 52
-#define IMX_CONN_USB2_LPM_CLK 53
-
-/* HSIO SS */
-#define IMX_HSIO_AXI_CLK 60
-#define IMX_HSIO_PER_CLK 61
-
-/* Display controller SS */
-#define IMX_DC_AXI_EXT_CLK 70
-#define IMX_DC_AXI_INT_CLK 71
-#define IMX_DC_CFG_CLK 72
-#define IMX_DC0_PLL0_CLK 80
-#define IMX_DC0_PLL1_CLK 81
-#define IMX_DC0_DISP0_CLK 82
-#define IMX_DC0_DISP1_CLK 83
-
-/* MIPI-LVDS SS */
-#define IMX_MIPI_IPG_CLK 90
-#define IMX_MIPI0_PIXEL_CLK 100
-#define IMX_MIPI0_BYPASS_CLK 101
-#define IMX_MIPI0_LVDS_PIXEL_CLK 102
-#define IMX_MIPI0_LVDS_BYPASS_CLK 103
-#define IMX_MIPI0_LVDS_PHY_CLK 104
-#define IMX_MIPI0_I2C0_CLK 105
-#define IMX_MIPI0_I2C1_CLK 106
-#define IMX_MIPI0_PWM0_CLK 107
-#define IMX_MIPI1_PIXEL_CLK 108
-#define IMX_MIPI1_BYPASS_CLK 109
-#define IMX_MIPI1_LVDS_PIXEL_CLK 110
-#define IMX_MIPI1_LVDS_BYPASS_CLK 111
-#define IMX_MIPI1_LVDS_PHY_CLK 112
-#define IMX_MIPI1_I2C0_CLK 113
-#define IMX_MIPI1_I2C1_CLK 114
-#define IMX_MIPI1_PWM0_CLK 115
-
-/* IMG SS */
-#define IMX_IMG_AXI_CLK 120
-#define IMX_IMG_IPG_CLK 121
-#define IMX_IMG_PXL_CLK 122
-
-/* MIPI-CSI SS */
-#define IMX_CSI0_CORE_CLK 130
-#define IMX_CSI0_ESC_CLK 131
-#define IMX_CSI0_PWM0_CLK 132
-#define IMX_CSI0_I2C0_CLK 133
-
-/* PARALLER CSI SS */
-#define IMX_PARALLEL_CSI_DPLL_CLK 140
-#define IMX_PARALLEL_CSI_PIXEL_CLK 141
-#define IMX_PARALLEL_CSI_MCLK_CLK 142
-
-/* VPU SS */
-#define IMX_VPU_ENC_CLK 150
-#define IMX_VPU_DEC_CLK 151
-
-/* GPU SS */
-#define IMX_GPU0_CORE_CLK 160
-#define IMX_GPU0_SHADER_CLK 161
-
-/* ADMA SS */
-#define IMX_ADMA_IPG_CLK_ROOT 165
-#define IMX_ADMA_UART0_CLK 170
-#define IMX_ADMA_UART1_CLK 171
-#define IMX_ADMA_UART2_CLK 172
-#define IMX_ADMA_UART3_CLK 173
-#define IMX_ADMA_SPI0_CLK 174
-#define IMX_ADMA_SPI1_CLK 175
-#define IMX_ADMA_SPI2_CLK 176
-#define IMX_ADMA_SPI3_CLK 177
-#define IMX_ADMA_CAN0_CLK 178
-#define IMX_ADMA_CAN1_CLK 179
-#define IMX_ADMA_CAN2_CLK 180
-#define IMX_ADMA_I2C0_CLK 181
-#define IMX_ADMA_I2C1_CLK 182
-#define IMX_ADMA_I2C2_CLK 183
-#define IMX_ADMA_I2C3_CLK 184
-#define IMX_ADMA_FTM0_CLK 185
-#define IMX_ADMA_FTM1_CLK 186
-#define IMX_ADMA_ADC0_CLK 187
-#define IMX_ADMA_PWM_CLK 188
-#define IMX_ADMA_LCD_CLK 189
-
-#define IMX_SCU_CLK_END 190
-
/* LPCG clocks */
/* LSIO SS LPCG */
diff --git a/include/dt-bindings/clock/imx8-lpcg.h b/include/dt-bindings/clock/imx8-lpcg.h
new file mode 100644
index 000000000000..d202715652c3
--- /dev/null
+++ b/include/dt-bindings/clock/imx8-lpcg.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2019-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#define IMX_LPCG_CLK_0 0
+#define IMX_LPCG_CLK_1 4
+#define IMX_LPCG_CLK_2 8
+#define IMX_LPCG_CLK_3 12
+#define IMX_LPCG_CLK_4 16
+#define IMX_LPCG_CLK_5 20
+#define IMX_LPCG_CLK_6 24
+#define IMX_LPCG_CLK_7 28
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
index edeece2289f0..1f768b2eeb1a 100644
--- a/include/dt-bindings/clock/imx8mm-clock.h
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -265,6 +265,22 @@
#define IMX8MM_SYS_PLL2_333M_CG 244
#define IMX8MM_SYS_PLL2_500M_CG 245
-#define IMX8MM_CLK_END 246
+#define IMX8MM_CLK_M4_CORE 246
+#define IMX8MM_CLK_VPU_CORE 247
+#define IMX8MM_CLK_GPU3D_CORE 248
+#define IMX8MM_CLK_GPU2D_CORE 249
+
+#define IMX8MM_CLK_CLKO2 250
+
+#define IMX8MM_CLK_A53_CORE 251
+
+#define IMX8MM_CLK_CLKOUT1_SEL 252
+#define IMX8MM_CLK_CLKOUT1_DIV 253
+#define IMX8MM_CLK_CLKOUT1 254
+#define IMX8MM_CLK_CLKOUT2_SEL 255
+#define IMX8MM_CLK_CLKOUT2_DIV 256
+#define IMX8MM_CLK_CLKOUT2 257
+
+#define IMX8MM_CLK_END 258
#endif
diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
index 65ac6eb6c733..07b8a282c268 100644
--- a/include/dt-bindings/clock/imx8mn-clock.h
+++ b/include/dt-bindings/clock/imx8mn-clock.h
@@ -228,6 +228,35 @@
#define IMX8MN_SYS_PLL2_333M_CG 209
#define IMX8MN_SYS_PLL2_500M_CG 210
-#define IMX8MN_CLK_END 211
+#define IMX8MN_CLK_SNVS_ROOT 211
+#define IMX8MN_CLK_GPU_CORE 212
+#define IMX8MN_CLK_GPU_SHADER 213
+
+#define IMX8MN_CLK_A53_CORE 214
+
+#define IMX8MN_CLK_CLKOUT1_SEL 215
+#define IMX8MN_CLK_CLKOUT1_DIV 216
+#define IMX8MN_CLK_CLKOUT1 217
+#define IMX8MN_CLK_CLKOUT2_SEL 218
+#define IMX8MN_CLK_CLKOUT2_DIV 219
+#define IMX8MN_CLK_CLKOUT2 220
+
+#define IMX8MN_CLK_M7_CORE 221
+
+#define IMX8MN_CLK_GPT_3M 222
+#define IMX8MN_CLK_GPT1 223
+#define IMX8MN_CLK_GPT1_ROOT 224
+#define IMX8MN_CLK_GPT2 225
+#define IMX8MN_CLK_GPT2_ROOT 226
+#define IMX8MN_CLK_GPT3 227
+#define IMX8MN_CLK_GPT3_ROOT 228
+#define IMX8MN_CLK_GPT4 229
+#define IMX8MN_CLK_GPT4_ROOT 230
+#define IMX8MN_CLK_GPT5 231
+#define IMX8MN_CLK_GPT5_ROOT 232
+#define IMX8MN_CLK_GPT6 233
+#define IMX8MN_CLK_GPT6_ROOT 234
+
+#define IMX8MN_CLK_END 235
#endif
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 2fab63186bca..9d5cc2ddde89 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -117,7 +117,6 @@
#define IMX8MP_CLK_AUDIO_AHB 108
#define IMX8MP_CLK_MIPI_DSI_ESC_RX 109
#define IMX8MP_CLK_IPG_ROOT 110
-#define IMX8MP_CLK_IPG_AUDIO_ROOT 111
#define IMX8MP_CLK_DRAM_ALT 112
#define IMX8MP_CLK_DRAM_APB 113
#define IMX8MP_CLK_VPU_G1 114
@@ -125,7 +124,6 @@
#define IMX8MP_CLK_CAN1 116
#define IMX8MP_CLK_CAN2 117
#define IMX8MP_CLK_MEMREPAIR 118
-#define IMX8MP_CLK_PCIE_PHY 119
#define IMX8MP_CLK_PCIE_AUX 120
#define IMX8MP_CLK_I2C5 121
#define IMX8MP_CLK_I2C6 122
@@ -173,17 +171,15 @@
#define IMX8MP_CLK_IPP_DO_CLKO1 164
#define IMX8MP_CLK_IPP_DO_CLKO2 165
#define IMX8MP_CLK_HDMI_FDCC_TST 166
-#define IMX8MP_CLK_HDMI_27M 167
+#define IMX8MP_CLK_HDMI_24M 167
#define IMX8MP_CLK_HDMI_REF_266M 168
#define IMX8MP_CLK_USDHC3 169
#define IMX8MP_CLK_MEDIA_CAM1_PIX 170
#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF 171
#define IMX8MP_CLK_MEDIA_DISP1_PIX 172
#define IMX8MP_CLK_MEDIA_CAM2_PIX 173
-#define IMX8MP_CLK_MEDIA_MIPI_PHY2_REF 174
+#define IMX8MP_CLK_MEDIA_LDB 174
#define IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC 175
-#define IMX8MP_CLK_PCIE2_CTRL 176
-#define IMX8MP_CLK_PCIE2_PHY 177
#define IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE 178
#define IMX8MP_CLK_ECSPI3 179
#define IMX8MP_CLK_PDM 180
@@ -294,7 +290,103 @@
#define IMX8MP_CLK_DRAM_ALT_ROOT 285
#define IMX8MP_CLK_DRAM_CORE 286
#define IMX8MP_CLK_ARM 287
+#define IMX8MP_CLK_A53_CORE 288
-#define IMX8MP_CLK_END 288
+#define IMX8MP_SYS_PLL1_40M_CG 289
+#define IMX8MP_SYS_PLL1_80M_CG 290
+#define IMX8MP_SYS_PLL1_100M_CG 291
+#define IMX8MP_SYS_PLL1_133M_CG 292
+#define IMX8MP_SYS_PLL1_160M_CG 293
+#define IMX8MP_SYS_PLL1_200M_CG 294
+#define IMX8MP_SYS_PLL1_266M_CG 295
+#define IMX8MP_SYS_PLL1_400M_CG 296
+#define IMX8MP_SYS_PLL2_50M_CG 297
+#define IMX8MP_SYS_PLL2_100M_CG 298
+#define IMX8MP_SYS_PLL2_125M_CG 299
+#define IMX8MP_SYS_PLL2_166M_CG 300
+#define IMX8MP_SYS_PLL2_200M_CG 301
+#define IMX8MP_SYS_PLL2_250M_CG 302
+#define IMX8MP_SYS_PLL2_333M_CG 303
+#define IMX8MP_SYS_PLL2_500M_CG 304
+
+#define IMX8MP_CLK_M7_CORE 305
+#define IMX8MP_CLK_ML_CORE 306
+#define IMX8MP_CLK_GPU3D_CORE 307
+#define IMX8MP_CLK_GPU3D_SHADER_CORE 308
+#define IMX8MP_CLK_GPU2D_CORE 309
+#define IMX8MP_CLK_AUDIO_AXI 310
+#define IMX8MP_CLK_HSIO_AXI 311
+#define IMX8MP_CLK_MEDIA_ISP 312
+#define IMX8MP_CLK_MEDIA_DISP2_PIX 313
+#define IMX8MP_CLK_CLKOUT1_SEL 314
+#define IMX8MP_CLK_CLKOUT1_DIV 315
+#define IMX8MP_CLK_CLKOUT1 316
+#define IMX8MP_CLK_CLKOUT2_SEL 317
+#define IMX8MP_CLK_CLKOUT2_DIV 318
+#define IMX8MP_CLK_CLKOUT2 319
+
+#define IMX8MP_CLK_END 320
+
+#define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2 2
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK3 3
+#define IMX8MP_CLK_AUDIOMIX_SAI2_IPG 4
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1 5
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2 6
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK3 7
+#define IMX8MP_CLK_AUDIOMIX_SAI3_IPG 8
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1 9
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2 10
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK3 11
+#define IMX8MP_CLK_AUDIOMIX_SAI5_IPG 12
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1 13
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2 14
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK3 15
+#define IMX8MP_CLK_AUDIOMIX_SAI6_IPG 16
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1 17
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2 18
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK3 19
+#define IMX8MP_CLK_AUDIOMIX_SAI7_IPG 20
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1 21
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2 22
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK3 23
+#define IMX8MP_CLK_AUDIOMIX_ASRC_IPG 24
+#define IMX8MP_CLK_AUDIOMIX_PDM_IPG 25
+#define IMX8MP_CLK_AUDIOMIX_SDMA2_ROOT 26
+#define IMX8MP_CLK_AUDIOMIX_SDMA3_ROOT 27
+#define IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT 28
+#define IMX8MP_CLK_AUDIOMIX_DSP_ROOT 29
+#define IMX8MP_CLK_AUDIOMIX_DSPDBG_ROOT 30
+#define IMX8MP_CLK_AUDIOMIX_EARC_IPG 31
+#define IMX8MP_CLK_AUDIOMIX_OCRAMA_IPG 32
+#define IMX8MP_CLK_AUDIOMIX_AUD2HTX_IPG 33
+#define IMX8MP_CLK_AUDIOMIX_EDMA_ROOT 34
+#define IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT 35
+#define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36
+#define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37
+#define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38
+#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2_SEL 43
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1_SEL 44
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2_SEL 45
+#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK1_SEL 46
+#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK2_SEL 47
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1_SEL 48
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2_SEL 49
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1_SEL 50
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2_SEL 51
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1_SEL 52
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2_SEL 53
+#define IMX8MP_CLK_AUDIOMIX_PDM_SEL 54
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL 55
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL 56
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS 57
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT 58
+
+#define IMX8MP_CLK_AUDIOMIX_END 59
#endif
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index 3bab9b21c8d7..afa74d7ba100 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -405,25 +405,27 @@
#define IMX8MQ_VIDEO2_PLL1_REF_SEL 266
-#define IMX8MQ_SYS1_PLL_40M_CG 267
-#define IMX8MQ_SYS1_PLL_80M_CG 268
-#define IMX8MQ_SYS1_PLL_100M_CG 269
-#define IMX8MQ_SYS1_PLL_133M_CG 270
-#define IMX8MQ_SYS1_PLL_160M_CG 271
-#define IMX8MQ_SYS1_PLL_200M_CG 272
-#define IMX8MQ_SYS1_PLL_266M_CG 273
-#define IMX8MQ_SYS1_PLL_400M_CG 274
-#define IMX8MQ_SYS1_PLL_800M_CG 275
-#define IMX8MQ_SYS2_PLL_50M_CG 276
-#define IMX8MQ_SYS2_PLL_100M_CG 277
-#define IMX8MQ_SYS2_PLL_125M_CG 278
-#define IMX8MQ_SYS2_PLL_166M_CG 279
-#define IMX8MQ_SYS2_PLL_200M_CG 280
-#define IMX8MQ_SYS2_PLL_250M_CG 281
-#define IMX8MQ_SYS2_PLL_333M_CG 282
-#define IMX8MQ_SYS2_PLL_500M_CG 283
-#define IMX8MQ_SYS2_PLL_1000M_CG 284
-
-#define IMX8MQ_CLK_END 285
+#define IMX8MQ_CLK_GPU_CORE 285
+#define IMX8MQ_CLK_GPU_SHADER 286
+#define IMX8MQ_CLK_M4_CORE 287
+#define IMX8MQ_CLK_VPU_CORE 288
+
+#define IMX8MQ_CLK_A53_CORE 289
+
+#define IMX8MQ_CLK_MON_AUDIO_PLL1_DIV 290
+#define IMX8MQ_CLK_MON_AUDIO_PLL2_DIV 291
+#define IMX8MQ_CLK_MON_VIDEO_PLL1_DIV 292
+#define IMX8MQ_CLK_MON_GPU_PLL_DIV 293
+#define IMX8MQ_CLK_MON_VPU_PLL_DIV 294
+#define IMX8MQ_CLK_MON_ARM_PLL_DIV 295
+#define IMX8MQ_CLK_MON_SYS_PLL1_DIV 296
+#define IMX8MQ_CLK_MON_SYS_PLL2_DIV 297
+#define IMX8MQ_CLK_MON_SYS_PLL3_DIV 298
+#define IMX8MQ_CLK_MON_DRAM_PLL_DIV 299
+#define IMX8MQ_CLK_MON_VIDEO_PLL2_DIV 300
+#define IMX8MQ_CLK_MON_SEL 301
+#define IMX8MQ_CLK_MON_CLK2_OUT 302
+
+#define IMX8MQ_CLK_END 303
#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/imx8ulp-clock.h b/include/dt-bindings/clock/imx8ulp-clock.h
new file mode 100644
index 000000000000..953ecfe8ebcc
--- /dev/null
+++ b/include/dt-bindings/clock/imx8ulp-clock.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8ULP_H
+#define __DT_BINDINGS_CLOCK_IMX8ULP_H
+
+#define IMX8ULP_CLK_DUMMY 0
+
+/* CGC1 */
+#define IMX8ULP_CLK_SPLL2 5
+#define IMX8ULP_CLK_SPLL3 6
+#define IMX8ULP_CLK_A35_SEL 7
+#define IMX8ULP_CLK_A35_DIV 8
+#define IMX8ULP_CLK_SPLL2_PRE_SEL 9
+#define IMX8ULP_CLK_SPLL3_PRE_SEL 10
+#define IMX8ULP_CLK_SPLL3_PFD0 11
+#define IMX8ULP_CLK_SPLL3_PFD1 12
+#define IMX8ULP_CLK_SPLL3_PFD2 13
+#define IMX8ULP_CLK_SPLL3_PFD3 14
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV1 15
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV2 16
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV1 17
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV2 18
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV1 19
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV2 20
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV1 21
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV2 22
+#define IMX8ULP_CLK_NIC_SEL 23
+#define IMX8ULP_CLK_NIC_AD_DIVPLAT 24
+#define IMX8ULP_CLK_NIC_PER_DIVPLAT 25
+#define IMX8ULP_CLK_XBAR_SEL 26
+#define IMX8ULP_CLK_XBAR_AD_DIVPLAT 27
+#define IMX8ULP_CLK_XBAR_DIVBUS 28
+#define IMX8ULP_CLK_XBAR_AD_SLOW 29
+#define IMX8ULP_CLK_SOSC_DIV1 30
+#define IMX8ULP_CLK_SOSC_DIV2 31
+#define IMX8ULP_CLK_SOSC_DIV3 32
+#define IMX8ULP_CLK_FROSC_DIV1 33
+#define IMX8ULP_CLK_FROSC_DIV2 34
+#define IMX8ULP_CLK_FROSC_DIV3 35
+#define IMX8ULP_CLK_SPLL3_VCODIV 36
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV1_GATE 37
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV2_GATE 38
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV1_GATE 39
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV2_GATE 40
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV1_GATE 41
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV2_GATE 42
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV1_GATE 43
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV2_GATE 44
+#define IMX8ULP_CLK_SOSC_DIV1_GATE 45
+#define IMX8ULP_CLK_SOSC_DIV2_GATE 46
+#define IMX8ULP_CLK_SOSC_DIV3_GATE 47
+#define IMX8ULP_CLK_FROSC_DIV1_GATE 48
+#define IMX8ULP_CLK_FROSC_DIV2_GATE 49
+#define IMX8ULP_CLK_FROSC_DIV3_GATE 50
+#define IMX8ULP_CLK_SAI4_SEL 51
+#define IMX8ULP_CLK_SAI5_SEL 52
+#define IMX8ULP_CLK_AUD_CLK1 53
+#define IMX8ULP_CLK_ARM 54
+#define IMX8ULP_CLK_ENET_TS_SEL 55
+
+#define IMX8ULP_CLK_CGC1_END 56
+
+/* CGC2 */
+#define IMX8ULP_CLK_PLL4_PRE_SEL 0
+#define IMX8ULP_CLK_PLL4 1
+#define IMX8ULP_CLK_PLL4_VCODIV 2
+#define IMX8ULP_CLK_DDR_SEL 3
+#define IMX8ULP_CLK_DDR_DIV 4
+#define IMX8ULP_CLK_LPAV_AXI_SEL 5
+#define IMX8ULP_CLK_LPAV_AXI_DIV 6
+#define IMX8ULP_CLK_LPAV_AHB_DIV 7
+#define IMX8ULP_CLK_LPAV_BUS_DIV 8
+#define IMX8ULP_CLK_PLL4_PFD0 9
+#define IMX8ULP_CLK_PLL4_PFD1 10
+#define IMX8ULP_CLK_PLL4_PFD2 11
+#define IMX8ULP_CLK_PLL4_PFD3 12
+#define IMX8ULP_CLK_PLL4_PFD0_DIV1_GATE 13
+#define IMX8ULP_CLK_PLL4_PFD0_DIV2_GATE 14
+#define IMX8ULP_CLK_PLL4_PFD1_DIV1_GATE 15
+#define IMX8ULP_CLK_PLL4_PFD1_DIV2_GATE 16
+#define IMX8ULP_CLK_PLL4_PFD2_DIV1_GATE 17
+#define IMX8ULP_CLK_PLL4_PFD2_DIV2_GATE 18
+#define IMX8ULP_CLK_PLL4_PFD3_DIV1_GATE 19
+#define IMX8ULP_CLK_PLL4_PFD3_DIV2_GATE 20
+#define IMX8ULP_CLK_PLL4_PFD0_DIV1 21
+#define IMX8ULP_CLK_PLL4_PFD0_DIV2 22
+#define IMX8ULP_CLK_PLL4_PFD1_DIV1 23
+#define IMX8ULP_CLK_PLL4_PFD1_DIV2 24
+#define IMX8ULP_CLK_PLL4_PFD2_DIV1 25
+#define IMX8ULP_CLK_PLL4_PFD2_DIV2 26
+#define IMX8ULP_CLK_PLL4_PFD3_DIV1 27
+#define IMX8ULP_CLK_PLL4_PFD3_DIV2 28
+#define IMX8ULP_CLK_CGC2_SOSC_DIV1_GATE 29
+#define IMX8ULP_CLK_CGC2_SOSC_DIV2_GATE 30
+#define IMX8ULP_CLK_CGC2_SOSC_DIV3_GATE 31
+#define IMX8ULP_CLK_CGC2_SOSC_DIV1 32
+#define IMX8ULP_CLK_CGC2_SOSC_DIV2 33
+#define IMX8ULP_CLK_CGC2_SOSC_DIV3 34
+#define IMX8ULP_CLK_CGC2_FROSC_DIV1_GATE 35
+#define IMX8ULP_CLK_CGC2_FROSC_DIV2_GATE 36
+#define IMX8ULP_CLK_CGC2_FROSC_DIV3_GATE 37
+#define IMX8ULP_CLK_CGC2_FROSC_DIV1 38
+#define IMX8ULP_CLK_CGC2_FROSC_DIV2 39
+#define IMX8ULP_CLK_CGC2_FROSC_DIV3 40
+#define IMX8ULP_CLK_AUD_CLK2 41
+#define IMX8ULP_CLK_SAI6_SEL 42
+#define IMX8ULP_CLK_SAI7_SEL 43
+#define IMX8ULP_CLK_SPDIF_SEL 44
+#define IMX8ULP_CLK_HIFI_SEL 45
+#define IMX8ULP_CLK_HIFI_DIVCORE 46
+#define IMX8ULP_CLK_HIFI_DIVPLAT 47
+#define IMX8ULP_CLK_DSI_PHY_REF 48
+
+#define IMX8ULP_CLK_CGC2_END 49
+
+/* PCC3 */
+#define IMX8ULP_CLK_WDOG3 0
+#define IMX8ULP_CLK_WDOG4 1
+#define IMX8ULP_CLK_LPIT1 2
+#define IMX8ULP_CLK_TPM4 3
+#define IMX8ULP_CLK_TPM5 4
+#define IMX8ULP_CLK_FLEXIO1 5
+#define IMX8ULP_CLK_I3C2 6
+#define IMX8ULP_CLK_LPI2C4 7
+#define IMX8ULP_CLK_LPI2C5 8
+#define IMX8ULP_CLK_LPUART4 9
+#define IMX8ULP_CLK_LPUART5 10
+#define IMX8ULP_CLK_LPSPI4 11
+#define IMX8ULP_CLK_LPSPI5 12
+#define IMX8ULP_CLK_DMA1_MP 13
+#define IMX8ULP_CLK_DMA1_CH0 14
+#define IMX8ULP_CLK_DMA1_CH1 15
+#define IMX8ULP_CLK_DMA1_CH2 16
+#define IMX8ULP_CLK_DMA1_CH3 17
+#define IMX8ULP_CLK_DMA1_CH4 18
+#define IMX8ULP_CLK_DMA1_CH5 19
+#define IMX8ULP_CLK_DMA1_CH6 20
+#define IMX8ULP_CLK_DMA1_CH7 21
+#define IMX8ULP_CLK_DMA1_CH8 22
+#define IMX8ULP_CLK_DMA1_CH9 23
+#define IMX8ULP_CLK_DMA1_CH10 24
+#define IMX8ULP_CLK_DMA1_CH11 25
+#define IMX8ULP_CLK_DMA1_CH12 26
+#define IMX8ULP_CLK_DMA1_CH13 27
+#define IMX8ULP_CLK_DMA1_CH14 28
+#define IMX8ULP_CLK_DMA1_CH15 29
+#define IMX8ULP_CLK_DMA1_CH16 30
+#define IMX8ULP_CLK_DMA1_CH17 31
+#define IMX8ULP_CLK_DMA1_CH18 32
+#define IMX8ULP_CLK_DMA1_CH19 33
+#define IMX8ULP_CLK_DMA1_CH20 34
+#define IMX8ULP_CLK_DMA1_CH21 35
+#define IMX8ULP_CLK_DMA1_CH22 36
+#define IMX8ULP_CLK_DMA1_CH23 37
+#define IMX8ULP_CLK_DMA1_CH24 38
+#define IMX8ULP_CLK_DMA1_CH25 39
+#define IMX8ULP_CLK_DMA1_CH26 40
+#define IMX8ULP_CLK_DMA1_CH27 41
+#define IMX8ULP_CLK_DMA1_CH28 42
+#define IMX8ULP_CLK_DMA1_CH29 43
+#define IMX8ULP_CLK_DMA1_CH30 44
+#define IMX8ULP_CLK_DMA1_CH31 45
+#define IMX8ULP_CLK_MU3_A 46
+#define IMX8ULP_CLK_MU0_B 47
+
+#define IMX8ULP_CLK_PCC3_END 48
+
+/* PCC4 */
+#define IMX8ULP_CLK_FLEXSPI2 0
+#define IMX8ULP_CLK_TPM6 1
+#define IMX8ULP_CLK_TPM7 2
+#define IMX8ULP_CLK_LPI2C6 3
+#define IMX8ULP_CLK_LPI2C7 4
+#define IMX8ULP_CLK_LPUART6 5
+#define IMX8ULP_CLK_LPUART7 6
+#define IMX8ULP_CLK_SAI4 7
+#define IMX8ULP_CLK_SAI5 8
+#define IMX8ULP_CLK_PCTLE 9
+#define IMX8ULP_CLK_PCTLF 10
+#define IMX8ULP_CLK_USDHC0 11
+#define IMX8ULP_CLK_USDHC1 12
+#define IMX8ULP_CLK_USDHC2 13
+#define IMX8ULP_CLK_USB0 14
+#define IMX8ULP_CLK_USB0_PHY 15
+#define IMX8ULP_CLK_USB1 16
+#define IMX8ULP_CLK_USB1_PHY 17
+#define IMX8ULP_CLK_USB_XBAR 18
+#define IMX8ULP_CLK_ENET 19
+#define IMX8ULP_CLK_SFA1 20
+#define IMX8ULP_CLK_RGPIOE 21
+#define IMX8ULP_CLK_RGPIOF 22
+
+#define IMX8ULP_CLK_PCC4_END 23
+
+/* PCC5 */
+#define IMX8ULP_CLK_TPM8 0
+#define IMX8ULP_CLK_SAI6 1
+#define IMX8ULP_CLK_SAI7 2
+#define IMX8ULP_CLK_SPDIF 3
+#define IMX8ULP_CLK_ISI 4
+#define IMX8ULP_CLK_CSI_REGS 5
+#define IMX8ULP_CLK_PCTLD 6
+#define IMX8ULP_CLK_CSI 7
+#define IMX8ULP_CLK_DSI 8
+#define IMX8ULP_CLK_WDOG5 9
+#define IMX8ULP_CLK_EPDC 10
+#define IMX8ULP_CLK_PXP 11
+#define IMX8ULP_CLK_SFA2 12
+#define IMX8ULP_CLK_GPU2D 13
+#define IMX8ULP_CLK_GPU3D 14
+#define IMX8ULP_CLK_DC_NANO 15
+#define IMX8ULP_CLK_CSI_CLK_UI 16
+#define IMX8ULP_CLK_CSI_CLK_ESC 17
+#define IMX8ULP_CLK_RGPIOD 18
+#define IMX8ULP_CLK_DMA2_MP 19
+#define IMX8ULP_CLK_DMA2_CH0 20
+#define IMX8ULP_CLK_DMA2_CH1 21
+#define IMX8ULP_CLK_DMA2_CH2 22
+#define IMX8ULP_CLK_DMA2_CH3 23
+#define IMX8ULP_CLK_DMA2_CH4 24
+#define IMX8ULP_CLK_DMA2_CH5 25
+#define IMX8ULP_CLK_DMA2_CH6 26
+#define IMX8ULP_CLK_DMA2_CH7 27
+#define IMX8ULP_CLK_DMA2_CH8 28
+#define IMX8ULP_CLK_DMA2_CH9 29
+#define IMX8ULP_CLK_DMA2_CH10 30
+#define IMX8ULP_CLK_DMA2_CH11 31
+#define IMX8ULP_CLK_DMA2_CH12 32
+#define IMX8ULP_CLK_DMA2_CH13 33
+#define IMX8ULP_CLK_DMA2_CH14 34
+#define IMX8ULP_CLK_DMA2_CH15 35
+#define IMX8ULP_CLK_DMA2_CH16 36
+#define IMX8ULP_CLK_DMA2_CH17 37
+#define IMX8ULP_CLK_DMA2_CH18 38
+#define IMX8ULP_CLK_DMA2_CH19 39
+#define IMX8ULP_CLK_DMA2_CH20 40
+#define IMX8ULP_CLK_DMA2_CH21 41
+#define IMX8ULP_CLK_DMA2_CH22 42
+#define IMX8ULP_CLK_DMA2_CH23 43
+#define IMX8ULP_CLK_DMA2_CH24 44
+#define IMX8ULP_CLK_DMA2_CH25 45
+#define IMX8ULP_CLK_DMA2_CH26 46
+#define IMX8ULP_CLK_DMA2_CH27 47
+#define IMX8ULP_CLK_DMA2_CH28 48
+#define IMX8ULP_CLK_DMA2_CH29 49
+#define IMX8ULP_CLK_DMA2_CH30 50
+#define IMX8ULP_CLK_DMA2_CH31 51
+#define IMX8ULP_CLK_MU2_B 52
+#define IMX8ULP_CLK_MU3_B 53
+#define IMX8ULP_CLK_AVD_SIM 54
+#define IMX8ULP_CLK_DSI_TX_ESC 55
+
+#define IMX8ULP_CLK_PCC5_END 56
+
+#endif
diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
new file mode 100644
index 000000000000..19bc32788d81
--- /dev/null
+++ b/include/dt-bindings/clock/imx93-clock.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * Copyright 2022 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX93_CLK_H
+#define __DT_BINDINGS_CLOCK_IMX93_CLK_H
+
+#define IMX93_CLK_DUMMY 0
+#define IMX93_CLK_24M 1
+#define IMX93_CLK_EXT1 2
+#define IMX93_CLK_SYS_PLL_PFD0 3
+#define IMX93_CLK_SYS_PLL_PFD0_DIV2 4
+#define IMX93_CLK_SYS_PLL_PFD1 5
+#define IMX93_CLK_SYS_PLL_PFD1_DIV2 6
+#define IMX93_CLK_SYS_PLL_PFD2 7
+#define IMX93_CLK_SYS_PLL_PFD2_DIV2 8
+#define IMX93_CLK_AUDIO_PLL 9
+#define IMX93_CLK_VIDEO_PLL 10
+#define IMX93_CLK_A55_PERIPH 11
+#define IMX93_CLK_A55_MTR_BUS 12
+#define IMX93_CLK_A55 13
+#define IMX93_CLK_M33 14
+#define IMX93_CLK_BUS_WAKEUP 15
+#define IMX93_CLK_BUS_AON 16
+#define IMX93_CLK_WAKEUP_AXI 17
+#define IMX93_CLK_SWO_TRACE 18
+#define IMX93_CLK_M33_SYSTICK 19
+#define IMX93_CLK_FLEXIO1 20
+#define IMX93_CLK_FLEXIO2 21
+#define IMX93_CLK_LPIT1 22
+#define IMX93_CLK_LPIT2 23
+#define IMX93_CLK_LPTMR1 24
+#define IMX93_CLK_LPTMR2 25
+#define IMX93_CLK_TPM1 26
+#define IMX93_CLK_TPM2 27
+#define IMX93_CLK_TPM3 28
+#define IMX93_CLK_TPM4 29
+#define IMX93_CLK_TPM5 30
+#define IMX93_CLK_TPM6 31
+#define IMX93_CLK_FLEXSPI1 32
+#define IMX93_CLK_CAN1 33
+#define IMX93_CLK_CAN2 34
+#define IMX93_CLK_LPUART1 35
+#define IMX93_CLK_LPUART2 36
+#define IMX93_CLK_LPUART3 37
+#define IMX93_CLK_LPUART4 38
+#define IMX93_CLK_LPUART5 39
+#define IMX93_CLK_LPUART6 40
+#define IMX93_CLK_LPUART7 41
+#define IMX93_CLK_LPUART8 42
+#define IMX93_CLK_LPI2C1 43
+#define IMX93_CLK_LPI2C2 44
+#define IMX93_CLK_LPI2C3 45
+#define IMX93_CLK_LPI2C4 46
+#define IMX93_CLK_LPI2C5 47
+#define IMX93_CLK_LPI2C6 48
+#define IMX93_CLK_LPI2C7 49
+#define IMX93_CLK_LPI2C8 50
+#define IMX93_CLK_LPSPI1 51
+#define IMX93_CLK_LPSPI2 52
+#define IMX93_CLK_LPSPI3 53
+#define IMX93_CLK_LPSPI4 54
+#define IMX93_CLK_LPSPI5 55
+#define IMX93_CLK_LPSPI6 56
+#define IMX93_CLK_LPSPI7 57
+#define IMX93_CLK_LPSPI8 58
+#define IMX93_CLK_I3C1 59
+#define IMX93_CLK_I3C2 60
+#define IMX93_CLK_USDHC1 61
+#define IMX93_CLK_USDHC2 62
+#define IMX93_CLK_USDHC3 63
+#define IMX93_CLK_SAI1 64
+#define IMX93_CLK_SAI2 65
+#define IMX93_CLK_SAI3 66
+#define IMX93_CLK_CCM_CKO1 67
+#define IMX93_CLK_CCM_CKO2 68
+#define IMX93_CLK_CCM_CKO3 69
+#define IMX93_CLK_CCM_CKO4 70
+#define IMX93_CLK_HSIO 71
+#define IMX93_CLK_HSIO_USB_TEST_60M 72
+#define IMX93_CLK_HSIO_ACSCAN_80M 73
+#define IMX93_CLK_HSIO_ACSCAN_480M 74
+#define IMX93_CLK_ML_APB 75
+#define IMX93_CLK_ML 76
+#define IMX93_CLK_MEDIA_AXI 77
+#define IMX93_CLK_MEDIA_APB 78
+#define IMX93_CLK_MEDIA_LDB 79
+#define IMX93_CLK_MEDIA_DISP_PIX 80
+#define IMX93_CLK_CAM_PIX 81
+#define IMX93_CLK_MIPI_TEST_BYTE 82
+#define IMX93_CLK_MIPI_PHY_CFG 83
+#define IMX93_CLK_ADC 84
+#define IMX93_CLK_PDM 85
+#define IMX93_CLK_TSTMR1 86
+#define IMX93_CLK_TSTMR2 87
+#define IMX93_CLK_MQS1 88
+#define IMX93_CLK_MQS2 89
+#define IMX93_CLK_AUDIO_XCVR 90
+#define IMX93_CLK_SPDIF 91
+#define IMX93_CLK_ENET 92
+#define IMX93_CLK_ENET_TIMER1 93
+#define IMX93_CLK_ENET_TIMER2 94
+#define IMX93_CLK_ENET_REF 95
+#define IMX93_CLK_ENET_REF_PHY 96
+#define IMX93_CLK_I3C1_SLOW 97
+#define IMX93_CLK_I3C2_SLOW 98
+#define IMX93_CLK_USB_PHY_BURUNIN 99
+#define IMX93_CLK_PAL_CAME_SCAN 100
+#define IMX93_CLK_A55_GATE 101
+#define IMX93_CLK_CM33_GATE 102
+#define IMX93_CLK_ADC1_GATE 103
+#define IMX93_CLK_WDOG1_GATE 104
+#define IMX93_CLK_WDOG2_GATE 105
+#define IMX93_CLK_WDOG3_GATE 106
+#define IMX93_CLK_WDOG4_GATE 107
+#define IMX93_CLK_WDOG5_GATE 108
+#define IMX93_CLK_SEMA1_GATE 109
+#define IMX93_CLK_SEMA2_GATE 110
+#define IMX93_CLK_MU_A_GATE 111
+#define IMX93_CLK_MU_B_GATE 112
+#define IMX93_CLK_EDMA1_GATE 113
+#define IMX93_CLK_EDMA2_GATE 114
+#define IMX93_CLK_FLEXSPI1_GATE 115
+#define IMX93_CLK_GPIO1_GATE 116
+#define IMX93_CLK_GPIO2_GATE 117
+#define IMX93_CLK_GPIO3_GATE 118
+#define IMX93_CLK_GPIO4_GATE 119
+#define IMX93_CLK_FLEXIO1_GATE 120
+#define IMX93_CLK_FLEXIO2_GATE 121
+#define IMX93_CLK_LPIT1_GATE 122
+#define IMX93_CLK_LPIT2_GATE 123
+#define IMX93_CLK_LPTMR1_GATE 124
+#define IMX93_CLK_LPTMR2_GATE 125
+#define IMX93_CLK_TPM1_GATE 126
+#define IMX93_CLK_TPM2_GATE 127
+#define IMX93_CLK_TPM3_GATE 128
+#define IMX93_CLK_TPM4_GATE 129
+#define IMX93_CLK_TPM5_GATE 130
+#define IMX93_CLK_TPM6_GATE 131
+#define IMX93_CLK_CAN1_GATE 132
+#define IMX93_CLK_CAN2_GATE 133
+#define IMX93_CLK_LPUART1_GATE 134
+#define IMX93_CLK_LPUART2_GATE 135
+#define IMX93_CLK_LPUART3_GATE 136
+#define IMX93_CLK_LPUART4_GATE 137
+#define IMX93_CLK_LPUART5_GATE 138
+#define IMX93_CLK_LPUART6_GATE 139
+#define IMX93_CLK_LPUART7_GATE 140
+#define IMX93_CLK_LPUART8_GATE 141
+#define IMX93_CLK_LPI2C1_GATE 142
+#define IMX93_CLK_LPI2C2_GATE 143
+#define IMX93_CLK_LPI2C3_GATE 144
+#define IMX93_CLK_LPI2C4_GATE 145
+#define IMX93_CLK_LPI2C5_GATE 146
+#define IMX93_CLK_LPI2C6_GATE 147
+#define IMX93_CLK_LPI2C7_GATE 148
+#define IMX93_CLK_LPI2C8_GATE 149
+#define IMX93_CLK_LPSPI1_GATE 150
+#define IMX93_CLK_LPSPI2_GATE 151
+#define IMX93_CLK_LPSPI3_GATE 152
+#define IMX93_CLK_LPSPI4_GATE 153
+#define IMX93_CLK_LPSPI5_GATE 154
+#define IMX93_CLK_LPSPI6_GATE 155
+#define IMX93_CLK_LPSPI7_GATE 156
+#define IMX93_CLK_LPSPI8_GATE 157
+#define IMX93_CLK_I3C1_GATE 158
+#define IMX93_CLK_I3C2_GATE 159
+#define IMX93_CLK_USDHC1_GATE 160
+#define IMX93_CLK_USDHC2_GATE 161
+#define IMX93_CLK_USDHC3_GATE 162
+#define IMX93_CLK_SAI1_GATE 163
+#define IMX93_CLK_SAI2_GATE 164
+#define IMX93_CLK_SAI3_GATE 165
+#define IMX93_CLK_MIPI_CSI_GATE 166
+#define IMX93_CLK_MIPI_DSI_GATE 167
+#define IMX93_CLK_LVDS_GATE 168
+#define IMX93_CLK_LCDIF_GATE 169
+#define IMX93_CLK_PXP_GATE 170
+#define IMX93_CLK_ISI_GATE 171
+#define IMX93_CLK_NIC_MEDIA_GATE 172
+#define IMX93_CLK_USB_CONTROLLER_GATE 173
+#define IMX93_CLK_USB_TEST_60M_GATE 174
+#define IMX93_CLK_HSIO_TROUT_24M_GATE 175
+#define IMX93_CLK_PDM_GATE 176
+#define IMX93_CLK_MQS1_GATE 177
+#define IMX93_CLK_MQS2_GATE 178
+#define IMX93_CLK_AUD_XCVR_GATE 179
+#define IMX93_CLK_SPDIF_GATE 180
+#define IMX93_CLK_HSIO_32K_GATE 181
+#define IMX93_CLK_ENET1_GATE 182
+#define IMX93_CLK_ENET_QOS_GATE 183
+#define IMX93_CLK_SYS_CNT_GATE 184
+#define IMX93_CLK_TSTMR1_GATE 185
+#define IMX93_CLK_TSTMR2_GATE 186
+#define IMX93_CLK_TMC_GATE 187
+#define IMX93_CLK_PMRO_GATE 188
+#define IMX93_CLK_32K 189
+#define IMX93_CLK_SAI1_IPG 190
+#define IMX93_CLK_SAI2_IPG 191
+#define IMX93_CLK_SAI3_IPG 192
+#define IMX93_CLK_MU1_A_GATE 193
+#define IMX93_CLK_MU1_B_GATE 194
+#define IMX93_CLK_MU2_A_GATE 195
+#define IMX93_CLK_MU2_B_GATE 196
+#define IMX93_CLK_END 197
+
+#endif
diff --git a/include/dt-bindings/clock/imxrt1050-clock.h b/include/dt-bindings/clock/imxrt1050-clock.h
new file mode 100644
index 000000000000..93bef0832d16
--- /dev/null
+++ b/include/dt-bindings/clock/imxrt1050-clock.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright(C) 2019
+ * Author(s): Giulio Benetti <giulio.benetti@benettiengineering.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMXRT1050_H
+#define __DT_BINDINGS_CLOCK_IMXRT1050_H
+
+#define IMXRT1050_CLK_DUMMY 0
+#define IMXRT1050_CLK_CKIL 1
+#define IMXRT1050_CLK_CKIH 2
+#define IMXRT1050_CLK_OSC 3
+#define IMXRT1050_CLK_PLL2_PFD0_352M 4
+#define IMXRT1050_CLK_PLL2_PFD1_594M 5
+#define IMXRT1050_CLK_PLL2_PFD2_396M 6
+#define IMXRT1050_CLK_PLL3_PFD0_720M 7
+#define IMXRT1050_CLK_PLL3_PFD1_664_62M 8
+#define IMXRT1050_CLK_PLL3_PFD2_508_24M 9
+#define IMXRT1050_CLK_PLL3_PFD3_454_74M 10
+#define IMXRT1050_CLK_PLL2_198M 11
+#define IMXRT1050_CLK_PLL3_120M 12
+#define IMXRT1050_CLK_PLL3_80M 13
+#define IMXRT1050_CLK_PLL3_60M 14
+#define IMXRT1050_CLK_PLL1_BYPASS 15
+#define IMXRT1050_CLK_PLL2_BYPASS 16
+#define IMXRT1050_CLK_PLL3_BYPASS 17
+#define IMXRT1050_CLK_PLL5_BYPASS 19
+#define IMXRT1050_CLK_PLL1_REF_SEL 20
+#define IMXRT1050_CLK_PLL2_REF_SEL 21
+#define IMXRT1050_CLK_PLL3_REF_SEL 22
+#define IMXRT1050_CLK_PLL5_REF_SEL 23
+#define IMXRT1050_CLK_PRE_PERIPH_SEL 24
+#define IMXRT1050_CLK_PERIPH_SEL 25
+#define IMXRT1050_CLK_SEMC_ALT_SEL 26
+#define IMXRT1050_CLK_SEMC_SEL 27
+#define IMXRT1050_CLK_USDHC1_SEL 28
+#define IMXRT1050_CLK_USDHC2_SEL 29
+#define IMXRT1050_CLK_LPUART_SEL 30
+#define IMXRT1050_CLK_LCDIF_SEL 31
+#define IMXRT1050_CLK_VIDEO_POST_DIV_SEL 32
+#define IMXRT1050_CLK_VIDEO_DIV 33
+#define IMXRT1050_CLK_ARM_PODF 34
+#define IMXRT1050_CLK_LPUART_PODF 35
+#define IMXRT1050_CLK_USDHC1_PODF 36
+#define IMXRT1050_CLK_USDHC2_PODF 37
+#define IMXRT1050_CLK_SEMC_PODF 38
+#define IMXRT1050_CLK_AHB_PODF 39
+#define IMXRT1050_CLK_LCDIF_PRED 40
+#define IMXRT1050_CLK_LCDIF_PODF 41
+#define IMXRT1050_CLK_USDHC1 42
+#define IMXRT1050_CLK_USDHC2 43
+#define IMXRT1050_CLK_LPUART1 44
+#define IMXRT1050_CLK_SEMC 45
+#define IMXRT1050_CLK_LCDIF_APB 46
+#define IMXRT1050_CLK_PLL1_ARM 47
+#define IMXRT1050_CLK_PLL2_SYS 48
+#define IMXRT1050_CLK_PLL3_USB_OTG 49
+#define IMXRT1050_CLK_PLL4_AUDIO 50
+#define IMXRT1050_CLK_PLL5_VIDEO 51
+#define IMXRT1050_CLK_PLL6_ENET 52
+#define IMXRT1050_CLK_PLL7_USB_HOST 53
+#define IMXRT1050_CLK_LCDIF_PIX 54
+#define IMXRT1050_CLK_USBOH3 55
+#define IMXRT1050_CLK_IPG_PDOF 56
+#define IMXRT1050_CLK_PER_CLK_SEL 57
+#define IMXRT1050_CLK_PER_PDOF 58
+#define IMXRT1050_CLK_DMA 59
+#define IMXRT1050_CLK_DMA_MUX 60
+#define IMXRT1050_CLK_END 61
+
+#endif /* __DT_BINDINGS_CLOCK_IMXRT1050_H */
diff --git a/include/dt-bindings/clock/jz4725b-cgu.h b/include/dt-bindings/clock/ingenic,jz4725b-cgu.h
index 31f1ab0fe42c..31f1ab0fe42c 100644
--- a/include/dt-bindings/clock/jz4725b-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4725b-cgu.h
diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/ingenic,jz4740-cgu.h
index e82d77028581..e82d77028581 100644
--- a/include/dt-bindings/clock/jz4740-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4740-cgu.h
diff --git a/include/dt-bindings/clock/ingenic,jz4760-cgu.h b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
new file mode 100644
index 000000000000..9fb04ebac6de
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,jz4760-cgu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4760_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4760_CGU_H__
+
+#define JZ4760_CLK_EXT 0
+#define JZ4760_CLK_OSC32K 1
+#define JZ4760_CLK_PLL0 2
+#define JZ4760_CLK_PLL0_HALF 3
+#define JZ4760_CLK_PLL1 4
+#define JZ4760_CLK_CCLK 5
+#define JZ4760_CLK_HCLK 6
+#define JZ4760_CLK_SCLK 7
+#define JZ4760_CLK_H2CLK 8
+#define JZ4760_CLK_MCLK 9
+#define JZ4760_CLK_PCLK 10
+#define JZ4760_CLK_MMC_MUX 11
+#define JZ4760_CLK_MMC0 12
+#define JZ4760_CLK_MMC1 13
+#define JZ4760_CLK_MMC2 14
+#define JZ4760_CLK_CIM 15
+#define JZ4760_CLK_UHC 16
+#define JZ4760_CLK_GPU 17
+#define JZ4760_CLK_GPS 18
+#define JZ4760_CLK_SSI_MUX 19
+#define JZ4760_CLK_PCM 20
+#define JZ4760_CLK_I2S 21
+#define JZ4760_CLK_OTG 22
+#define JZ4760_CLK_SSI0 23
+#define JZ4760_CLK_SSI1 24
+#define JZ4760_CLK_SSI2 25
+#define JZ4760_CLK_DMA 26
+#define JZ4760_CLK_I2C0 27
+#define JZ4760_CLK_I2C1 28
+#define JZ4760_CLK_UART0 29
+#define JZ4760_CLK_UART1 30
+#define JZ4760_CLK_UART2 31
+#define JZ4760_CLK_UART3 32
+#define JZ4760_CLK_IPU 33
+#define JZ4760_CLK_ADC 34
+#define JZ4760_CLK_AIC 35
+#define JZ4760_CLK_VPU 36
+#define JZ4760_CLK_UHC_PHY 37
+#define JZ4760_CLK_OTG_PHY 38
+#define JZ4760_CLK_EXT512 39
+#define JZ4760_CLK_RTC 40
+#define JZ4760_CLK_LPCLK_DIV 41
+#define JZ4760_CLK_TVE 42
+#define JZ4760_CLK_LPCLK 43
+#define JZ4760_CLK_MDMA 44
+#define JZ4760_CLK_BDMA 45
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ */
diff --git a/include/dt-bindings/clock/ingenic,jz4770-cgu.h b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
new file mode 100644
index 000000000000..0b475e8ae321
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,jz4770-cgu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4770_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4770_CGU_H__
+
+#define JZ4770_CLK_EXT 0
+#define JZ4770_CLK_OSC32K 1
+#define JZ4770_CLK_PLL0 2
+#define JZ4770_CLK_PLL1 3
+#define JZ4770_CLK_CCLK 4
+#define JZ4770_CLK_H0CLK 5
+#define JZ4770_CLK_H1CLK 6
+#define JZ4770_CLK_H2CLK 7
+#define JZ4770_CLK_C1CLK 8
+#define JZ4770_CLK_PCLK 9
+#define JZ4770_CLK_MMC0_MUX 10
+#define JZ4770_CLK_MMC0 11
+#define JZ4770_CLK_MMC1_MUX 12
+#define JZ4770_CLK_MMC1 13
+#define JZ4770_CLK_MMC2_MUX 14
+#define JZ4770_CLK_MMC2 15
+#define JZ4770_CLK_CIM 16
+#define JZ4770_CLK_UHC 17
+#define JZ4770_CLK_GPU 18
+#define JZ4770_CLK_BCH 19
+#define JZ4770_CLK_LPCLK_MUX 20
+#define JZ4770_CLK_GPS 21
+#define JZ4770_CLK_SSI_MUX 22
+#define JZ4770_CLK_PCM_MUX 23
+#define JZ4770_CLK_I2S 24
+#define JZ4770_CLK_OTG 25
+#define JZ4770_CLK_SSI0 26
+#define JZ4770_CLK_SSI1 27
+#define JZ4770_CLK_SSI2 28
+#define JZ4770_CLK_PCM0 29
+#define JZ4770_CLK_PCM1 30
+#define JZ4770_CLK_DMA 31
+#define JZ4770_CLK_I2C0 32
+#define JZ4770_CLK_I2C1 33
+#define JZ4770_CLK_I2C2 34
+#define JZ4770_CLK_UART0 35
+#define JZ4770_CLK_UART1 36
+#define JZ4770_CLK_UART2 37
+#define JZ4770_CLK_UART3 38
+#define JZ4770_CLK_IPU 39
+#define JZ4770_CLK_ADC 40
+#define JZ4770_CLK_AIC 41
+#define JZ4770_CLK_AUX 42
+#define JZ4770_CLK_VPU 43
+#define JZ4770_CLK_UHC_PHY 44
+#define JZ4770_CLK_OTG_PHY 45
+#define JZ4770_CLK_EXT512 46
+#define JZ4770_CLK_RTC 47
+#define JZ4770_CLK_BDMA 48
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */
diff --git a/include/dt-bindings/clock/ingenic,jz4780-cgu.h b/include/dt-bindings/clock/ingenic,jz4780-cgu.h
new file mode 100644
index 000000000000..85cf8eb5081b
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,jz4780-cgu.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,jz4780-cgu DT binding.
+ *
+ * They are roughly ordered as:
+ * - external clocks
+ * - PLLs
+ * - muxes/dividers in the order they appear in the jz4780 programmers manual
+ * - gates in order of their bit in the CLKGR* registers
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
+
+#define JZ4780_CLK_EXCLK 0
+#define JZ4780_CLK_RTCLK 1
+#define JZ4780_CLK_APLL 2
+#define JZ4780_CLK_MPLL 3
+#define JZ4780_CLK_EPLL 4
+#define JZ4780_CLK_VPLL 5
+#define JZ4780_CLK_OTGPHY 6
+#define JZ4780_CLK_SCLKA 7
+#define JZ4780_CLK_CPUMUX 8
+#define JZ4780_CLK_CPU 9
+#define JZ4780_CLK_L2CACHE 10
+#define JZ4780_CLK_AHB0 11
+#define JZ4780_CLK_AHB2PMUX 12
+#define JZ4780_CLK_AHB2 13
+#define JZ4780_CLK_PCLK 14
+#define JZ4780_CLK_DDR 15
+#define JZ4780_CLK_VPU 16
+#define JZ4780_CLK_I2SPLL 17
+#define JZ4780_CLK_I2S 18
+#define JZ4780_CLK_LCD0PIXCLK 19
+#define JZ4780_CLK_LCD1PIXCLK 20
+#define JZ4780_CLK_MSCMUX 21
+#define JZ4780_CLK_MSC0 22
+#define JZ4780_CLK_MSC1 23
+#define JZ4780_CLK_MSC2 24
+#define JZ4780_CLK_UHC 25
+#define JZ4780_CLK_SSIPLL 26
+#define JZ4780_CLK_SSI 27
+#define JZ4780_CLK_CIMMCLK 28
+#define JZ4780_CLK_PCMPLL 29
+#define JZ4780_CLK_PCM 30
+#define JZ4780_CLK_GPU 31
+#define JZ4780_CLK_HDMI 32
+#define JZ4780_CLK_BCH 33
+#define JZ4780_CLK_NEMC 34
+#define JZ4780_CLK_OTG0 35
+#define JZ4780_CLK_SSI0 36
+#define JZ4780_CLK_SMB0 37
+#define JZ4780_CLK_SMB1 38
+#define JZ4780_CLK_SCC 39
+#define JZ4780_CLK_AIC 40
+#define JZ4780_CLK_TSSI0 41
+#define JZ4780_CLK_OWI 42
+#define JZ4780_CLK_KBC 43
+#define JZ4780_CLK_SADC 44
+#define JZ4780_CLK_UART0 45
+#define JZ4780_CLK_UART1 46
+#define JZ4780_CLK_UART2 47
+#define JZ4780_CLK_UART3 48
+#define JZ4780_CLK_SSI1 49
+#define JZ4780_CLK_SSI2 50
+#define JZ4780_CLK_PDMA 51
+#define JZ4780_CLK_GPS 52
+#define JZ4780_CLK_MAC 53
+#define JZ4780_CLK_SMB2 54
+#define JZ4780_CLK_CIM 55
+#define JZ4780_CLK_LCD 56
+#define JZ4780_CLK_TVE 57
+#define JZ4780_CLK_IPU 58
+#define JZ4780_CLK_DDR0 59
+#define JZ4780_CLK_DDR1 60
+#define JZ4780_CLK_SMB3 61
+#define JZ4780_CLK_TSSI1 62
+#define JZ4780_CLK_COMPRESS 63
+#define JZ4780_CLK_AIC1 64
+#define JZ4780_CLK_GPVLC 65
+#define JZ4780_CLK_OTG1 66
+#define JZ4780_CLK_UART4 67
+#define JZ4780_CLK_AHBMON 68
+#define JZ4780_CLK_SMB4 69
+#define JZ4780_CLK_DES 70
+#define JZ4780_CLK_X2D 71
+#define JZ4780_CLK_CORE1 72
+#define JZ4780_CLK_EXCLK_DIV512 73
+#define JZ4780_CLK_RTC 74
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */
diff --git a/include/dt-bindings/clock/ingenic,sysost.h b/include/dt-bindings/clock/ingenic,sysost.h
new file mode 100644
index 000000000000..d7aa42c08ded
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,sysost.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the Ingenic OST DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_INGENIC_OST_H__
+#define __DT_BINDINGS_CLOCK_INGENIC_OST_H__
+
+#define OST_CLK_PERCPU_TIMER 1
+#define OST_CLK_GLOBAL_TIMER 0
+#define OST_CLK_PERCPU_TIMER0 1
+#define OST_CLK_PERCPU_TIMER1 2
+#define OST_CLK_PERCPU_TIMER2 3
+#define OST_CLK_PERCPU_TIMER3 4
+
+#define OST_CLK_EVENT_TIMER 1
+
+#define OST_CLK_EVENT_TIMER0 0
+#define OST_CLK_EVENT_TIMER1 1
+#define OST_CLK_EVENT_TIMER2 2
+#define OST_CLK_EVENT_TIMER3 3
+#define OST_CLK_EVENT_TIMER4 4
+#define OST_CLK_EVENT_TIMER5 5
+#define OST_CLK_EVENT_TIMER6 6
+#define OST_CLK_EVENT_TIMER7 7
+#define OST_CLK_EVENT_TIMER8 8
+#define OST_CLK_EVENT_TIMER9 9
+#define OST_CLK_EVENT_TIMER10 10
+#define OST_CLK_EVENT_TIMER11 11
+#define OST_CLK_EVENT_TIMER12 12
+#define OST_CLK_EVENT_TIMER13 13
+#define OST_CLK_EVENT_TIMER14 14
+#define OST_CLK_EVENT_TIMER15 15
+
+#endif /* __DT_BINDINGS_CLOCK_INGENIC_OST_H__ */
diff --git a/include/dt-bindings/clock/ingenic,x1000-cgu.h b/include/dt-bindings/clock/ingenic,x1000-cgu.h
new file mode 100644
index 000000000000..f187e0719fd3
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,x1000-cgu.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,x1000-cgu DT binding.
+ *
+ * They are roughly ordered as:
+ * - external clocks
+ * - PLLs
+ * - muxes/dividers in the order they appear in the x1000 programmers manual
+ * - gates in order of their bit in the CLKGR* registers
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_X1000_CGU_H__
+#define __DT_BINDINGS_CLOCK_X1000_CGU_H__
+
+#define X1000_CLK_EXCLK 0
+#define X1000_CLK_RTCLK 1
+#define X1000_CLK_APLL 2
+#define X1000_CLK_MPLL 3
+#define X1000_CLK_OTGPHY 4
+#define X1000_CLK_SCLKA 5
+#define X1000_CLK_CPUMUX 6
+#define X1000_CLK_CPU 7
+#define X1000_CLK_L2CACHE 8
+#define X1000_CLK_AHB0 9
+#define X1000_CLK_AHB2PMUX 10
+#define X1000_CLK_AHB2 11
+#define X1000_CLK_PCLK 12
+#define X1000_CLK_DDR 13
+#define X1000_CLK_MAC 14
+#define X1000_CLK_LCD 15
+#define X1000_CLK_MSCMUX 16
+#define X1000_CLK_MSC0 17
+#define X1000_CLK_MSC1 18
+#define X1000_CLK_OTG 19
+#define X1000_CLK_SSIPLL 20
+#define X1000_CLK_SSIPLL_DIV2 21
+#define X1000_CLK_SSIMUX 22
+#define X1000_CLK_EMC 23
+#define X1000_CLK_EFUSE 24
+#define X1000_CLK_SFC 25
+#define X1000_CLK_I2C0 26
+#define X1000_CLK_I2C1 27
+#define X1000_CLK_I2C2 28
+#define X1000_CLK_UART0 29
+#define X1000_CLK_UART1 30
+#define X1000_CLK_UART2 31
+#define X1000_CLK_TCU 32
+#define X1000_CLK_SSI 33
+#define X1000_CLK_OST 34
+#define X1000_CLK_PDMA 35
+#define X1000_CLK_EXCLK_DIV512 36
+#define X1000_CLK_RTC 37
+
+#endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
diff --git a/include/dt-bindings/clock/ingenic,x1830-cgu.h b/include/dt-bindings/clock/ingenic,x1830-cgu.h
new file mode 100644
index 000000000000..88455376a950
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,x1830-cgu.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,x1830-cgu DT binding.
+ *
+ * They are roughly ordered as:
+ * - external clocks
+ * - PLLs
+ * - muxes/dividers in the order they appear in the x1830 programmers manual
+ * - gates in order of their bit in the CLKGR* registers
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_X1830_CGU_H__
+#define __DT_BINDINGS_CLOCK_X1830_CGU_H__
+
+#define X1830_CLK_EXCLK 0
+#define X1830_CLK_RTCLK 1
+#define X1830_CLK_APLL 2
+#define X1830_CLK_MPLL 3
+#define X1830_CLK_EPLL 4
+#define X1830_CLK_VPLL 5
+#define X1830_CLK_OTGPHY 6
+#define X1830_CLK_SCLKA 7
+#define X1830_CLK_CPUMUX 8
+#define X1830_CLK_CPU 9
+#define X1830_CLK_L2CACHE 10
+#define X1830_CLK_AHB0 11
+#define X1830_CLK_AHB2PMUX 12
+#define X1830_CLK_AHB2 13
+#define X1830_CLK_PCLK 14
+#define X1830_CLK_DDR 15
+#define X1830_CLK_MAC 16
+#define X1830_CLK_LCD 17
+#define X1830_CLK_MSCMUX 18
+#define X1830_CLK_MSC0 19
+#define X1830_CLK_MSC1 20
+#define X1830_CLK_SSIPLL 21
+#define X1830_CLK_SSIPLL_DIV2 22
+#define X1830_CLK_SSIMUX 23
+#define X1830_CLK_EMC 24
+#define X1830_CLK_EFUSE 25
+#define X1830_CLK_OTG 26
+#define X1830_CLK_SSI0 27
+#define X1830_CLK_SMB0 28
+#define X1830_CLK_SMB1 29
+#define X1830_CLK_SMB2 30
+#define X1830_CLK_UART0 31
+#define X1830_CLK_UART1 32
+#define X1830_CLK_SSI1 33
+#define X1830_CLK_SFC 34
+#define X1830_CLK_PDMA 35
+#define X1830_CLK_TCU 36
+#define X1830_CLK_DTRNG 37
+#define X1830_CLK_OST 38
+#define X1830_CLK_EXCLK_DIV512 39
+#define X1830_CLK_RTC 40
+
+#endif /* __DT_BINDINGS_CLOCK_X1830_CGU_H__ */
diff --git a/include/dt-bindings/clock/intel,lgm-clk.h b/include/dt-bindings/clock/intel,lgm-clk.h
new file mode 100644
index 000000000000..92f5be6490bb
--- /dev/null
+++ b/include/dt-bindings/clock/intel,lgm-clk.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ * Lei Chuanhua <Chuanhua.lei@intel.com>
+ * Zhu Yixin <Yixin.zhu@intel.com>
+ */
+#ifndef __INTEL_LGM_CLK_H
+#define __INTEL_LGM_CLK_H
+
+/* PLL clocks */
+#define LGM_CLK_OSC 1
+#define LGM_CLK_PLLPP 2
+#define LGM_CLK_PLL2 3
+#define LGM_CLK_PLL0CZ 4
+#define LGM_CLK_PLL0B 5
+#define LGM_CLK_PLL1 6
+#define LGM_CLK_LJPLL3 7
+#define LGM_CLK_LJPLL4 8
+#define LGM_CLK_PLL0CM0 9
+#define LGM_CLK_PLL0CM1 10
+
+/* clocks from PLLs */
+
+/* ROPLL clocks */
+#define LGM_CLK_PP_HW 15
+#define LGM_CLK_PP_UC 16
+#define LGM_CLK_PP_FXD 17
+#define LGM_CLK_PP_TBM 18
+
+/* PLL2 clocks */
+#define LGM_CLK_DDR 20
+
+/* PLL0CZ */
+#define LGM_CLK_CM 25
+#define LGM_CLK_IC 26
+#define LGM_CLK_SDXC3 27
+
+/* PLL0B */
+#define LGM_CLK_NGI 30
+#define LGM_CLK_NOC4 31
+#define LGM_CLK_SW 32
+#define LGM_CLK_QSPI 33
+#define LGM_CLK_CQEM LGM_CLK_SW
+#define LGM_CLK_EMMC5 LGM_CLK_NOC4
+
+/* PLL1 */
+#define LGM_CLK_CT 35
+#define LGM_CLK_DSP 36
+#define LGM_CLK_VIF 37
+
+/* LJPLL3 */
+#define LGM_CLK_CML 40
+#define LGM_CLK_SERDES 41
+#define LGM_CLK_POOL 42
+#define LGM_CLK_PTP 43
+
+/* LJPLL4 */
+#define LGM_CLK_PCIE 45
+#define LGM_CLK_SATA LGM_CLK_PCIE
+
+/* PLL0CM0 */
+#define LGM_CLK_CPU0 50
+
+/* PLL0CM1 */
+#define LGM_CLK_CPU1 55
+
+/* Miscellaneous clocks */
+#define LGM_CLK_EMMC4 60
+#define LGM_CLK_SDXC2 61
+#define LGM_CLK_EMMC 62
+#define LGM_CLK_SDXC 63
+#define LGM_CLK_SLIC 64
+#define LGM_CLK_DCL 65
+#define LGM_CLK_DOCSIS 66
+#define LGM_CLK_PCM 67
+#define LGM_CLK_DDR_PHY 68
+#define LGM_CLK_PONDEF 69
+#define LGM_CLK_PL25M 70
+#define LGM_CLK_PL10M 71
+#define LGM_CLK_PL1544K 72
+#define LGM_CLK_PL2048K 73
+#define LGM_CLK_PL8K 74
+#define LGM_CLK_PON_NTR 75
+#define LGM_CLK_SYNC0 76
+#define LGM_CLK_SYNC1 77
+#define LGM_CLK_PROGDIV 78
+#define LGM_CLK_OD0 79
+#define LGM_CLK_OD1 80
+#define LGM_CLK_CBPHY0 81
+#define LGM_CLK_CBPHY1 82
+#define LGM_CLK_CBPHY2 83
+#define LGM_CLK_CBPHY3 84
+
+/* Gate clocks */
+/* Gate CLK0 */
+#define LGM_GCLK_C55 100
+#define LGM_GCLK_QSPI 101
+#define LGM_GCLK_EIP197 102
+#define LGM_GCLK_VAULT 103
+#define LGM_GCLK_TOE 104
+#define LGM_GCLK_SDXC 105
+#define LGM_GCLK_EMMC 106
+#define LGM_GCLK_SPI_DBG 107
+#define LGM_GCLK_DMA3 108
+
+/* Gate CLK1 */
+#define LGM_GCLK_DMA0 120
+#define LGM_GCLK_LEDC0 121
+#define LGM_GCLK_LEDC1 122
+#define LGM_GCLK_I2S0 123
+#define LGM_GCLK_I2S1 124
+#define LGM_GCLK_EBU 125
+#define LGM_GCLK_PWM 126
+#define LGM_GCLK_I2C0 127
+#define LGM_GCLK_I2C1 128
+#define LGM_GCLK_I2C2 129
+#define LGM_GCLK_I2C3 130
+#define LGM_GCLK_SSC0 131
+#define LGM_GCLK_SSC1 132
+#define LGM_GCLK_SSC2 133
+#define LGM_GCLK_SSC3 134
+#define LGM_GCLK_GPTC0 135
+#define LGM_GCLK_GPTC1 136
+#define LGM_GCLK_GPTC2 137
+#define LGM_GCLK_GPTC3 138
+#define LGM_GCLK_ASC0 139
+#define LGM_GCLK_ASC1 140
+#define LGM_GCLK_ASC2 141
+#define LGM_GCLK_ASC3 142
+#define LGM_GCLK_PCM0 143
+#define LGM_GCLK_PCM1 144
+#define LGM_GCLK_PCM2 145
+
+/* Gate CLK2 */
+#define LGM_GCLK_PCIE10 150
+#define LGM_GCLK_PCIE11 151
+#define LGM_GCLK_PCIE30 152
+#define LGM_GCLK_PCIE31 153
+#define LGM_GCLK_PCIE20 154
+#define LGM_GCLK_PCIE21 155
+#define LGM_GCLK_PCIE40 156
+#define LGM_GCLK_PCIE41 157
+#define LGM_GCLK_XPCS0 158
+#define LGM_GCLK_XPCS1 159
+#define LGM_GCLK_XPCS2 160
+#define LGM_GCLK_XPCS3 161
+#define LGM_GCLK_SATA0 162
+#define LGM_GCLK_SATA1 163
+#define LGM_GCLK_SATA2 164
+#define LGM_GCLK_SATA3 165
+
+/* Gate CLK3 */
+#define LGM_GCLK_ARCEM4 170
+#define LGM_GCLK_IDMAR1 171
+#define LGM_GCLK_IDMAT0 172
+#define LGM_GCLK_IDMAT1 173
+#define LGM_GCLK_IDMAT2 174
+#define LGM_GCLK_PPV4 175
+#define LGM_GCLK_GSWIPO 176
+#define LGM_GCLK_CQEM 177
+#define LGM_GCLK_XPCS5 178
+#define LGM_GCLK_USB1 179
+#define LGM_GCLK_USB2 180
+
+#endif /* __INTEL_LGM_CLK_H */
diff --git a/include/dt-bindings/clock/jz4770-cgu.h b/include/dt-bindings/clock/jz4770-cgu.h
deleted file mode 100644
index d68a7695a1f8..000000000000
--- a/include/dt-bindings/clock/jz4770-cgu.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides clock numbers for the ingenic,jz4770-cgu DT binding.
- */
-
-#ifndef __DT_BINDINGS_CLOCK_JZ4770_CGU_H__
-#define __DT_BINDINGS_CLOCK_JZ4770_CGU_H__
-
-#define JZ4770_CLK_EXT 0
-#define JZ4770_CLK_OSC32K 1
-#define JZ4770_CLK_PLL0 2
-#define JZ4770_CLK_PLL1 3
-#define JZ4770_CLK_CCLK 4
-#define JZ4770_CLK_H0CLK 5
-#define JZ4770_CLK_H1CLK 6
-#define JZ4770_CLK_H2CLK 7
-#define JZ4770_CLK_C1CLK 8
-#define JZ4770_CLK_PCLK 9
-#define JZ4770_CLK_MMC0_MUX 10
-#define JZ4770_CLK_MMC0 11
-#define JZ4770_CLK_MMC1_MUX 12
-#define JZ4770_CLK_MMC1 13
-#define JZ4770_CLK_MMC2_MUX 14
-#define JZ4770_CLK_MMC2 15
-#define JZ4770_CLK_CIM 16
-#define JZ4770_CLK_UHC 17
-#define JZ4770_CLK_GPU 18
-#define JZ4770_CLK_BCH 19
-#define JZ4770_CLK_LPCLK_MUX 20
-#define JZ4770_CLK_GPS 21
-#define JZ4770_CLK_SSI_MUX 22
-#define JZ4770_CLK_PCM_MUX 23
-#define JZ4770_CLK_I2S 24
-#define JZ4770_CLK_OTG 25
-#define JZ4770_CLK_SSI0 26
-#define JZ4770_CLK_SSI1 27
-#define JZ4770_CLK_SSI2 28
-#define JZ4770_CLK_PCM0 29
-#define JZ4770_CLK_PCM1 30
-#define JZ4770_CLK_DMA 31
-#define JZ4770_CLK_I2C0 32
-#define JZ4770_CLK_I2C1 33
-#define JZ4770_CLK_I2C2 34
-#define JZ4770_CLK_UART0 35
-#define JZ4770_CLK_UART1 36
-#define JZ4770_CLK_UART2 37
-#define JZ4770_CLK_UART3 38
-#define JZ4770_CLK_IPU 39
-#define JZ4770_CLK_ADC 40
-#define JZ4770_CLK_AIC 41
-#define JZ4770_CLK_AUX 42
-#define JZ4770_CLK_VPU 43
-#define JZ4770_CLK_UHC_PHY 44
-#define JZ4770_CLK_OTG_PHY 45
-#define JZ4770_CLK_EXT512 46
-#define JZ4770_CLK_RTC 47
-
-#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */
diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/jz4780-cgu.h
deleted file mode 100644
index 1859ce53ee38..000000000000
--- a/include/dt-bindings/clock/jz4780-cgu.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides clock numbers for the ingenic,jz4780-cgu DT binding.
- *
- * They are roughly ordered as:
- * - external clocks
- * - PLLs
- * - muxes/dividers in the order they appear in the jz4780 programmers manual
- * - gates in order of their bit in the CLKGR* registers
- */
-
-#ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
-#define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
-
-#define JZ4780_CLK_EXCLK 0
-#define JZ4780_CLK_RTCLK 1
-#define JZ4780_CLK_APLL 2
-#define JZ4780_CLK_MPLL 3
-#define JZ4780_CLK_EPLL 4
-#define JZ4780_CLK_VPLL 5
-#define JZ4780_CLK_OTGPHY 6
-#define JZ4780_CLK_SCLKA 7
-#define JZ4780_CLK_CPUMUX 8
-#define JZ4780_CLK_CPU 9
-#define JZ4780_CLK_L2CACHE 10
-#define JZ4780_CLK_AHB0 11
-#define JZ4780_CLK_AHB2PMUX 12
-#define JZ4780_CLK_AHB2 13
-#define JZ4780_CLK_PCLK 14
-#define JZ4780_CLK_DDR 15
-#define JZ4780_CLK_VPU 16
-#define JZ4780_CLK_I2SPLL 17
-#define JZ4780_CLK_I2S 18
-#define JZ4780_CLK_LCD0PIXCLK 19
-#define JZ4780_CLK_LCD1PIXCLK 20
-#define JZ4780_CLK_MSCMUX 21
-#define JZ4780_CLK_MSC0 22
-#define JZ4780_CLK_MSC1 23
-#define JZ4780_CLK_MSC2 24
-#define JZ4780_CLK_UHC 25
-#define JZ4780_CLK_SSIPLL 26
-#define JZ4780_CLK_SSI 27
-#define JZ4780_CLK_CIMMCLK 28
-#define JZ4780_CLK_PCMPLL 29
-#define JZ4780_CLK_PCM 30
-#define JZ4780_CLK_GPU 31
-#define JZ4780_CLK_HDMI 32
-#define JZ4780_CLK_BCH 33
-#define JZ4780_CLK_NEMC 34
-#define JZ4780_CLK_OTG0 35
-#define JZ4780_CLK_SSI0 36
-#define JZ4780_CLK_SMB0 37
-#define JZ4780_CLK_SMB1 38
-#define JZ4780_CLK_SCC 39
-#define JZ4780_CLK_AIC 40
-#define JZ4780_CLK_TSSI0 41
-#define JZ4780_CLK_OWI 42
-#define JZ4780_CLK_KBC 43
-#define JZ4780_CLK_SADC 44
-#define JZ4780_CLK_UART0 45
-#define JZ4780_CLK_UART1 46
-#define JZ4780_CLK_UART2 47
-#define JZ4780_CLK_UART3 48
-#define JZ4780_CLK_SSI1 49
-#define JZ4780_CLK_SSI2 50
-#define JZ4780_CLK_PDMA 51
-#define JZ4780_CLK_GPS 52
-#define JZ4780_CLK_MAC 53
-#define JZ4780_CLK_SMB2 54
-#define JZ4780_CLK_CIM 55
-#define JZ4780_CLK_LCD 56
-#define JZ4780_CLK_TVE 57
-#define JZ4780_CLK_IPU 58
-#define JZ4780_CLK_DDR0 59
-#define JZ4780_CLK_DDR1 60
-#define JZ4780_CLK_SMB3 61
-#define JZ4780_CLK_TSSI1 62
-#define JZ4780_CLK_COMPRESS 63
-#define JZ4780_CLK_AIC1 64
-#define JZ4780_CLK_GPVLC 65
-#define JZ4780_CLK_OTG1 66
-#define JZ4780_CLK_UART4 67
-#define JZ4780_CLK_AHBMON 68
-#define JZ4780_CLK_SMB4 69
-#define JZ4780_CLK_DES 70
-#define JZ4780_CLK_X2D 71
-#define JZ4780_CLK_CORE1 72
-
-#endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */
diff --git a/include/dt-bindings/clock/k210-clk.h b/include/dt-bindings/clock/k210-clk.h
new file mode 100644
index 000000000000..b2de702cbf75
--- /dev/null
+++ b/include/dt-bindings/clock/k210-clk.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef CLOCK_K210_CLK_H
+#define CLOCK_K210_CLK_H
+
+/*
+ * Kendryte K210 SoC clock identifiers (arbitrary values).
+ */
+#define K210_CLK_CPU 0
+#define K210_CLK_SRAM0 1
+#define K210_CLK_SRAM1 2
+#define K210_CLK_AI 3
+#define K210_CLK_DMA 4
+#define K210_CLK_FFT 5
+#define K210_CLK_ROM 6
+#define K210_CLK_DVP 7
+#define K210_CLK_APB0 8
+#define K210_CLK_APB1 9
+#define K210_CLK_APB2 10
+#define K210_CLK_I2S0 11
+#define K210_CLK_I2S1 12
+#define K210_CLK_I2S2 13
+#define K210_CLK_I2S0_M 14
+#define K210_CLK_I2S1_M 15
+#define K210_CLK_I2S2_M 16
+#define K210_CLK_WDT0 17
+#define K210_CLK_WDT1 18
+#define K210_CLK_SPI0 19
+#define K210_CLK_SPI1 20
+#define K210_CLK_SPI2 21
+#define K210_CLK_I2C0 22
+#define K210_CLK_I2C1 23
+#define K210_CLK_I2C2 24
+#define K210_CLK_SPI3 25
+#define K210_CLK_TIMER0 26
+#define K210_CLK_TIMER1 27
+#define K210_CLK_TIMER2 28
+#define K210_CLK_GPIO 29
+#define K210_CLK_UART1 30
+#define K210_CLK_UART2 31
+#define K210_CLK_UART3 32
+#define K210_CLK_FPIOA 33
+#define K210_CLK_SHA 34
+#define K210_CLK_AES 35
+#define K210_CLK_OTP 36
+#define K210_CLK_RTC 37
+
+#define K210_NUM_CLKS 38
+
+#endif /* CLOCK_K210_CLK_H */
diff --git a/include/dt-bindings/clk/lochnagar.h b/include/dt-bindings/clock/lochnagar.h
index 8fa20551ff17..8fa20551ff17 100644
--- a/include/dt-bindings/clk/lochnagar.h
+++ b/include/dt-bindings/clock/lochnagar.h
diff --git a/include/dt-bindings/clock/marvell,mmp2-audio.h b/include/dt-bindings/clock/marvell,mmp2-audio.h
new file mode 100644
index 000000000000..20664776f497
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,mmp2-audio.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+#ifndef __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H
+#define __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H
+
+#define MMP2_CLK_AUDIO_SYSCLK 0
+#define MMP2_CLK_AUDIO_SSPA0 1
+#define MMP2_CLK_AUDIO_SSPA1 2
+
+#define MMP2_CLK_AUDIO_NR_CLKS 3
+#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 4b1a7724f20d..f0819d66b230 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -26,8 +26,13 @@
#define MMP2_CLK_VCTCXO_4 25
#define MMP2_CLK_UART_PLL 26
#define MMP2_CLK_USB_PLL 27
+#define MMP3_CLK_PLL1_P 28
+#define MMP3_CLK_PLL2_P 29
+#define MMP3_CLK_PLL3 30
+#define MMP2_CLK_I2S0 31
+#define MMP2_CLK_I2S1 32
-/* apb periphrals */
+/* apb peripherals */
#define MMP2_CLK_TWSI0 60
#define MMP2_CLK_TWSI1 61
#define MMP2_CLK_TWSI2 62
@@ -50,8 +55,12 @@
#define MMP2_CLK_SSP2 79
#define MMP2_CLK_SSP3 80
#define MMP2_CLK_TIMER 81
+#define MMP2_CLK_THERMAL0 82
+#define MMP3_CLK_THERMAL1 83
+#define MMP3_CLK_THERMAL2 84
+#define MMP3_CLK_THERMAL3 85
-/* axi periphrals */
+/* axi peripherals */
#define MMP2_CLK_SDH0 101
#define MMP2_CLK_SDH1 102
#define MMP2_CLK_SDH2 103
@@ -74,6 +83,13 @@
#define MMP2_CLK_DISP0_LCDC 120
#define MMP2_CLK_USBHSIC0 121
#define MMP2_CLK_USBHSIC1 122
+#define MMP2_CLK_GPU_BUS 123
+#define MMP3_CLK_GPU_BUS MMP2_CLK_GPU_BUS
+#define MMP2_CLK_GPU_3D 124
+#define MMP3_CLK_GPU_3D MMP2_CLK_GPU_3D
+#define MMP3_CLK_GPU_2D 125
+#define MMP3_CLK_SDH4 126
+#define MMP2_CLK_AUDIO 127
#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
index caf90436b848..c92d969ae941 100644
--- a/include/dt-bindings/clock/marvell,pxa168.h
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -20,10 +20,13 @@
#define PXA168_CLK_PLL1_2_1_5 19
#define PXA168_CLK_PLL1_3_16 20
#define PXA168_CLK_PLL1_192 21
+#define PXA168_CLK_PLL1_2_1_10 22
+#define PXA168_CLK_PLL1_2_3_16 23
#define PXA168_CLK_UART_PLL 27
#define PXA168_CLK_USB_PLL 28
+#define PXA168_CLK_CLK32_2 50
-/* apb periphrals */
+/* apb peripherals */
#define PXA168_CLK_TWSI0 60
#define PXA168_CLK_TWSI1 61
#define PXA168_CLK_TWSI2 62
@@ -45,7 +48,7 @@
#define PXA168_CLK_SSP4 78
#define PXA168_CLK_TIMER 79
-/* axi periphrals */
+/* axi peripherals */
#define PXA168_CLK_DFC 100
#define PXA168_CLK_SDH0 101
#define PXA168_CLK_SDH1 102
@@ -56,6 +59,9 @@
#define PXA168_CLK_CCIC0 107
#define PXA168_CLK_CCIC0_PHY 108
#define PXA168_CLK_CCIC0_SPHY 109
+#define PXA168_CLK_SDH3 110
+#define PXA168_CLK_SDH01_AXI 111
+#define PXA168_CLK_SDH23_AXI 112
#define PXA168_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
index 7bf46238946e..c9018ab354d0 100644
--- a/include/dt-bindings/clock/marvell,pxa910.h
+++ b/include/dt-bindings/clock/marvell,pxa910.h
@@ -23,7 +23,7 @@
#define PXA910_CLK_UART_PLL 27
#define PXA910_CLK_USB_PLL 28
-/* apb periphrals */
+/* apb peripherals */
#define PXA910_CLK_TWSI0 60
#define PXA910_CLK_TWSI1 61
#define PXA910_CLK_TWSI2 62
@@ -43,7 +43,7 @@
#define PXA910_CLK_TIMER0 76
#define PXA910_CLK_TIMER1 77
-/* axi periphrals */
+/* axi peripherals */
#define PXA910_CLK_DFC 100
#define PXA910_CLK_SDH0 101
#define PXA910_CLK_SDH1 102
diff --git a/include/dt-bindings/clock/mediatek,mt6795-clk.h b/include/dt-bindings/clock/mediatek,mt6795-clk.h
new file mode 100644
index 000000000000..9902906ac902
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6795-clk.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT6795_H
+#define _DT_BINDINGS_CLK_MT6795_H
+
+/* TOPCKGEN */
+#define CLK_TOP_ADSYS_26M 0
+#define CLK_TOP_CLKPH_MCK_O 1
+#define CLK_TOP_USB_SYSPLL_125M 2
+#define CLK_TOP_DSI0_DIG 3
+#define CLK_TOP_DSI1_DIG 4
+#define CLK_TOP_ARMCA53PLL_754M 5
+#define CLK_TOP_ARMCA53PLL_502M 6
+#define CLK_TOP_MAIN_H546M 7
+#define CLK_TOP_MAIN_H364M 8
+#define CLK_TOP_MAIN_H218P4M 9
+#define CLK_TOP_MAIN_H156M 10
+#define CLK_TOP_TVDPLL_445P5M 11
+#define CLK_TOP_TVDPLL_594M 12
+#define CLK_TOP_UNIV_624M 13
+#define CLK_TOP_UNIV_416M 14
+#define CLK_TOP_UNIV_249P6M 15
+#define CLK_TOP_UNIV_178P3M 16
+#define CLK_TOP_UNIV_48M 17
+#define CLK_TOP_CLKRTC_EXT 18
+#define CLK_TOP_CLKRTC_INT 19
+#define CLK_TOP_FPC 20
+#define CLK_TOP_HDMITXPLL_D2 21
+#define CLK_TOP_HDMITXPLL_D3 22
+#define CLK_TOP_ARMCA53PLL_D2 23
+#define CLK_TOP_ARMCA53PLL_D3 24
+#define CLK_TOP_APLL1 25
+#define CLK_TOP_APLL2 26
+#define CLK_TOP_DMPLL 27
+#define CLK_TOP_DMPLL_D2 28
+#define CLK_TOP_DMPLL_D4 29
+#define CLK_TOP_DMPLL_D8 30
+#define CLK_TOP_DMPLL_D16 31
+#define CLK_TOP_MMPLL 32
+#define CLK_TOP_MMPLL_D2 33
+#define CLK_TOP_MSDCPLL 34
+#define CLK_TOP_MSDCPLL_D2 35
+#define CLK_TOP_MSDCPLL_D4 36
+#define CLK_TOP_MSDCPLL2 37
+#define CLK_TOP_MSDCPLL2_D2 38
+#define CLK_TOP_MSDCPLL2_D4 39
+#define CLK_TOP_SYSPLL_D2 40
+#define CLK_TOP_SYSPLL1_D2 41
+#define CLK_TOP_SYSPLL1_D4 42
+#define CLK_TOP_SYSPLL1_D8 43
+#define CLK_TOP_SYSPLL1_D16 44
+#define CLK_TOP_SYSPLL_D3 45
+#define CLK_TOP_SYSPLL2_D2 46
+#define CLK_TOP_SYSPLL2_D4 47
+#define CLK_TOP_SYSPLL_D5 48
+#define CLK_TOP_SYSPLL3_D2 49
+#define CLK_TOP_SYSPLL3_D4 50
+#define CLK_TOP_SYSPLL_D7 51
+#define CLK_TOP_SYSPLL4_D2 52
+#define CLK_TOP_SYSPLL4_D4 53
+#define CLK_TOP_TVDPLL 54
+#define CLK_TOP_TVDPLL_D2 55
+#define CLK_TOP_TVDPLL_D4 56
+#define CLK_TOP_TVDPLL_D8 57
+#define CLK_TOP_TVDPLL_D16 58
+#define CLK_TOP_UNIVPLL_D2 59
+#define CLK_TOP_UNIVPLL1_D2 60
+#define CLK_TOP_UNIVPLL1_D4 61
+#define CLK_TOP_UNIVPLL1_D8 62
+#define CLK_TOP_UNIVPLL_D3 63
+#define CLK_TOP_UNIVPLL2_D2 64
+#define CLK_TOP_UNIVPLL2_D4 65
+#define CLK_TOP_UNIVPLL2_D8 66
+#define CLK_TOP_UNIVPLL_D5 67
+#define CLK_TOP_UNIVPLL3_D2 68
+#define CLK_TOP_UNIVPLL3_D4 69
+#define CLK_TOP_UNIVPLL3_D8 70
+#define CLK_TOP_UNIVPLL_D7 71
+#define CLK_TOP_UNIVPLL_D26 72
+#define CLK_TOP_UNIVPLL_D52 73
+#define CLK_TOP_VCODECPLL 74
+#define CLK_TOP_VCODECPLL_370P5 75
+#define CLK_TOP_VENCPLL 76
+#define CLK_TOP_VENCPLL_D2 77
+#define CLK_TOP_VENCPLL_D4 78
+#define CLK_TOP_AXI_SEL 79
+#define CLK_TOP_MEM_SEL 80
+#define CLK_TOP_DDRPHYCFG_SEL 81
+#define CLK_TOP_MM_SEL 82
+#define CLK_TOP_PWM_SEL 83
+#define CLK_TOP_VDEC_SEL 84
+#define CLK_TOP_VENC_SEL 85
+#define CLK_TOP_MFG_SEL 86
+#define CLK_TOP_CAMTG_SEL 87
+#define CLK_TOP_UART_SEL 88
+#define CLK_TOP_SPI_SEL 89
+#define CLK_TOP_USB20_SEL 90
+#define CLK_TOP_USB30_SEL 91
+#define CLK_TOP_MSDC50_0_H_SEL 92
+#define CLK_TOP_MSDC50_0_SEL 93
+#define CLK_TOP_MSDC30_1_SEL 94
+#define CLK_TOP_MSDC30_2_SEL 95
+#define CLK_TOP_MSDC30_3_SEL 96
+#define CLK_TOP_AUDIO_SEL 97
+#define CLK_TOP_AUD_INTBUS_SEL 98
+#define CLK_TOP_PMICSPI_SEL 99
+#define CLK_TOP_SCP_SEL 100
+#define CLK_TOP_MJC_SEL 101
+#define CLK_TOP_DPI0_SEL 102
+#define CLK_TOP_IRDA_SEL 103
+#define CLK_TOP_CCI400_SEL 104
+#define CLK_TOP_AUD_1_SEL 105
+#define CLK_TOP_AUD_2_SEL 106
+#define CLK_TOP_MEM_MFG_IN_SEL 107
+#define CLK_TOP_AXI_MFG_IN_SEL 108
+#define CLK_TOP_SCAM_SEL 109
+#define CLK_TOP_I2S0_M_SEL 110
+#define CLK_TOP_I2S1_M_SEL 111
+#define CLK_TOP_I2S2_M_SEL 112
+#define CLK_TOP_I2S3_M_SEL 113
+#define CLK_TOP_I2S3_B_SEL 114
+#define CLK_TOP_APLL1_DIV0 115
+#define CLK_TOP_APLL1_DIV1 116
+#define CLK_TOP_APLL1_DIV2 117
+#define CLK_TOP_APLL1_DIV3 118
+#define CLK_TOP_APLL1_DIV4 119
+#define CLK_TOP_APLL1_DIV5 120
+#define CLK_TOP_APLL2_DIV0 121
+#define CLK_TOP_APLL2_DIV1 122
+#define CLK_TOP_APLL2_DIV2 123
+#define CLK_TOP_APLL2_DIV3 124
+#define CLK_TOP_APLL2_DIV4 125
+#define CLK_TOP_APLL2_DIV5 126
+#define CLK_TOP_NR_CLK 127
+
+/* APMIXED_SYS */
+#define CLK_APMIXED_ARMCA53PLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MMPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_VENCPLL 5
+#define CLK_APMIXED_TVDPLL 6
+#define CLK_APMIXED_MPLL 7
+#define CLK_APMIXED_VCODECPLL 8
+#define CLK_APMIXED_APLL1 9
+#define CLK_APMIXED_APLL2 10
+#define CLK_APMIXED_REF2USB_TX 11
+#define CLK_APMIXED_NR_CLK 12
+
+/* INFRA_SYS */
+#define CLK_INFRA_DBGCLK 0
+#define CLK_INFRA_SMI 1
+#define CLK_INFRA_AUDIO 2
+#define CLK_INFRA_GCE 3
+#define CLK_INFRA_L2C_SRAM 4
+#define CLK_INFRA_M4U 5
+#define CLK_INFRA_MD1MCU 6
+#define CLK_INFRA_MD1BUS 7
+#define CLK_INFRA_MD1DBB 8
+#define CLK_INFRA_DEVICE_APC 9
+#define CLK_INFRA_TRNG 10
+#define CLK_INFRA_MD1LTE 11
+#define CLK_INFRA_CPUM 12
+#define CLK_INFRA_KP 13
+#define CLK_INFRA_CA53_C0_SEL 14
+#define CLK_INFRA_CA53_C1_SEL 15
+#define CLK_INFRA_NR_CLK 16
+
+/* PERI_SYS */
+#define CLK_PERI_NFI 0
+#define CLK_PERI_THERM 1
+#define CLK_PERI_PWM1 2
+#define CLK_PERI_PWM2 3
+#define CLK_PERI_PWM3 4
+#define CLK_PERI_PWM4 5
+#define CLK_PERI_PWM5 6
+#define CLK_PERI_PWM6 7
+#define CLK_PERI_PWM7 8
+#define CLK_PERI_PWM 9
+#define CLK_PERI_USB0 10
+#define CLK_PERI_USB1 11
+#define CLK_PERI_AP_DMA 12
+#define CLK_PERI_MSDC30_0 13
+#define CLK_PERI_MSDC30_1 14
+#define CLK_PERI_MSDC30_2 15
+#define CLK_PERI_MSDC30_3 16
+#define CLK_PERI_NLI_ARB 17
+#define CLK_PERI_IRDA 18
+#define CLK_PERI_UART0 19
+#define CLK_PERI_UART1 20
+#define CLK_PERI_UART2 21
+#define CLK_PERI_UART3 22
+#define CLK_PERI_I2C0 23
+#define CLK_PERI_I2C1 24
+#define CLK_PERI_I2C2 25
+#define CLK_PERI_I2C3 26
+#define CLK_PERI_I2C4 27
+#define CLK_PERI_AUXADC 28
+#define CLK_PERI_SPI0 29
+#define CLK_PERI_UART0_SEL 30
+#define CLK_PERI_UART1_SEL 31
+#define CLK_PERI_UART2_SEL 32
+#define CLK_PERI_UART3_SEL 33
+#define CLK_PERI_NR_CLK 34
+
+/* MFG */
+#define CLK_MFG_BAXI 0
+#define CLK_MFG_BMEM 1
+#define CLK_MFG_BG3D 2
+#define CLK_MFG_B26M 3
+#define CLK_MFG_NR_CLK 4
+
+/* MM_SYS */
+#define CLK_MM_SMI_COMMON 0
+#define CLK_MM_SMI_LARB0 1
+#define CLK_MM_CAM_MDP 2
+#define CLK_MM_MDP_RDMA0 3
+#define CLK_MM_MDP_RDMA1 4
+#define CLK_MM_MDP_RSZ0 5
+#define CLK_MM_MDP_RSZ1 6
+#define CLK_MM_MDP_RSZ2 7
+#define CLK_MM_MDP_TDSHP0 8
+#define CLK_MM_MDP_TDSHP1 9
+#define CLK_MM_MDP_CROP 10
+#define CLK_MM_MDP_WDMA 11
+#define CLK_MM_MDP_WROT0 12
+#define CLK_MM_MDP_WROT1 13
+#define CLK_MM_FAKE_ENG 14
+#define CLK_MM_MUTEX_32K 15
+#define CLK_MM_DISP_OVL0 16
+#define CLK_MM_DISP_OVL1 17
+#define CLK_MM_DISP_RDMA0 18
+#define CLK_MM_DISP_RDMA1 19
+#define CLK_MM_DISP_RDMA2 20
+#define CLK_MM_DISP_WDMA0 21
+#define CLK_MM_DISP_WDMA1 22
+#define CLK_MM_DISP_COLOR0 23
+#define CLK_MM_DISP_COLOR1 24
+#define CLK_MM_DISP_AAL 25
+#define CLK_MM_DISP_GAMMA 26
+#define CLK_MM_DISP_UFOE 27
+#define CLK_MM_DISP_SPLIT0 28
+#define CLK_MM_DISP_SPLIT1 29
+#define CLK_MM_DISP_MERGE 30
+#define CLK_MM_DISP_OD 31
+#define CLK_MM_DISP_PWM0MM 32
+#define CLK_MM_DISP_PWM026M 33
+#define CLK_MM_DISP_PWM1MM 34
+#define CLK_MM_DISP_PWM126M 35
+#define CLK_MM_DSI0_ENGINE 36
+#define CLK_MM_DSI0_DIGITAL 37
+#define CLK_MM_DSI1_ENGINE 38
+#define CLK_MM_DSI1_DIGITAL 39
+#define CLK_MM_DPI_PIXEL 40
+#define CLK_MM_DPI_ENGINE 41
+#define CLK_MM_NR_CLK 42
+
+/* VDEC_SYS */
+#define CLK_VDEC_CKEN 0
+#define CLK_VDEC_LARB_CKEN 1
+#define CLK_VDEC_NR_CLK 2
+
+/* VENC_SYS */
+#define CLK_VENC_LARB 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+#define CLK_VENC_NR_CLK 4
+
+#endif /* _DT_BINDINGS_CLK_MT6795_H */
diff --git a/include/dt-bindings/clock/mediatek,mt8365-clk.h b/include/dt-bindings/clock/mediatek,mt8365-clk.h
new file mode 100644
index 000000000000..f9aff1775810
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt8365-clk.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8365_H
+#define _DT_BINDINGS_CLK_MT8365_H
+
+/* TOPCKGEN */
+#define CLK_TOP_CLK_NULL 0
+#define CLK_TOP_I2S0_BCK 1
+#define CLK_TOP_DSI0_LNTC_DSICK 2
+#define CLK_TOP_VPLL_DPIX 3
+#define CLK_TOP_LVDSTX_CLKDIG_CTS 4
+#define CLK_TOP_MFGPLL 5
+#define CLK_TOP_SYSPLL_D2 6
+#define CLK_TOP_SYSPLL1_D2 7
+#define CLK_TOP_SYSPLL1_D4 8
+#define CLK_TOP_SYSPLL1_D8 9
+#define CLK_TOP_SYSPLL1_D16 10
+#define CLK_TOP_SYSPLL_D3 11
+#define CLK_TOP_SYSPLL2_D2 12
+#define CLK_TOP_SYSPLL2_D4 13
+#define CLK_TOP_SYSPLL2_D8 14
+#define CLK_TOP_SYSPLL_D5 15
+#define CLK_TOP_SYSPLL3_D2 16
+#define CLK_TOP_SYSPLL3_D4 17
+#define CLK_TOP_SYSPLL_D7 18
+#define CLK_TOP_SYSPLL4_D2 19
+#define CLK_TOP_SYSPLL4_D4 20
+#define CLK_TOP_UNIVPLL 21
+#define CLK_TOP_UNIVPLL_D2 22
+#define CLK_TOP_UNIVPLL1_D2 23
+#define CLK_TOP_UNIVPLL1_D4 24
+#define CLK_TOP_UNIVPLL_D3 25
+#define CLK_TOP_UNIVPLL2_D2 26
+#define CLK_TOP_UNIVPLL2_D4 27
+#define CLK_TOP_UNIVPLL2_D8 28
+#define CLK_TOP_UNIVPLL2_D32 29
+#define CLK_TOP_UNIVPLL_D5 30
+#define CLK_TOP_UNIVPLL3_D2 31
+#define CLK_TOP_UNIVPLL3_D4 32
+#define CLK_TOP_MMPLL 33
+#define CLK_TOP_MMPLL_D2 34
+#define CLK_TOP_LVDSPLL_D2 35
+#define CLK_TOP_LVDSPLL_D4 36
+#define CLK_TOP_LVDSPLL_D8 37
+#define CLK_TOP_LVDSPLL_D16 38
+#define CLK_TOP_USB20_192M 39
+#define CLK_TOP_USB20_192M_D4 40
+#define CLK_TOP_USB20_192M_D8 41
+#define CLK_TOP_USB20_192M_D16 42
+#define CLK_TOP_USB20_192M_D32 43
+#define CLK_TOP_APLL1 44
+#define CLK_TOP_APLL1_D2 45
+#define CLK_TOP_APLL1_D4 46
+#define CLK_TOP_APLL1_D8 47
+#define CLK_TOP_APLL2 48
+#define CLK_TOP_APLL2_D2 49
+#define CLK_TOP_APLL2_D4 50
+#define CLK_TOP_APLL2_D8 51
+#define CLK_TOP_SYS_26M_D2 52
+#define CLK_TOP_MSDCPLL 53
+#define CLK_TOP_MSDCPLL_D2 54
+#define CLK_TOP_DSPPLL 55
+#define CLK_TOP_DSPPLL_D2 56
+#define CLK_TOP_DSPPLL_D4 57
+#define CLK_TOP_DSPPLL_D8 58
+#define CLK_TOP_APUPLL 59
+#define CLK_TOP_CLK26M_D52 60
+#define CLK_TOP_AXI_SEL 61
+#define CLK_TOP_MEM_SEL 62
+#define CLK_TOP_MM_SEL 63
+#define CLK_TOP_SCP_SEL 64
+#define CLK_TOP_MFG_SEL 65
+#define CLK_TOP_ATB_SEL 66
+#define CLK_TOP_CAMTG_SEL 67
+#define CLK_TOP_CAMTG1_SEL 68
+#define CLK_TOP_UART_SEL 69
+#define CLK_TOP_SPI_SEL 70
+#define CLK_TOP_MSDC50_0_HC_SEL 71
+#define CLK_TOP_MSDC2_2_HC_SEL 72
+#define CLK_TOP_MSDC50_0_SEL 73
+#define CLK_TOP_MSDC50_2_SEL 74
+#define CLK_TOP_MSDC30_1_SEL 75
+#define CLK_TOP_AUDIO_SEL 76
+#define CLK_TOP_AUD_INTBUS_SEL 77
+#define CLK_TOP_AUD_1_SEL 78
+#define CLK_TOP_AUD_2_SEL 79
+#define CLK_TOP_AUD_ENGEN1_SEL 80
+#define CLK_TOP_AUD_ENGEN2_SEL 81
+#define CLK_TOP_AUD_SPDIF_SEL 82
+#define CLK_TOP_DISP_PWM_SEL 83
+#define CLK_TOP_DXCC_SEL 84
+#define CLK_TOP_SSUSB_SYS_SEL 85
+#define CLK_TOP_SSUSB_XHCI_SEL 86
+#define CLK_TOP_SPM_SEL 87
+#define CLK_TOP_I2C_SEL 88
+#define CLK_TOP_PWM_SEL 89
+#define CLK_TOP_SENIF_SEL 90
+#define CLK_TOP_AES_FDE_SEL 91
+#define CLK_TOP_CAMTM_SEL 92
+#define CLK_TOP_DPI0_SEL 93
+#define CLK_TOP_DPI1_SEL 94
+#define CLK_TOP_DSP_SEL 95
+#define CLK_TOP_NFI2X_SEL 96
+#define CLK_TOP_NFIECC_SEL 97
+#define CLK_TOP_ECC_SEL 98
+#define CLK_TOP_ETH_SEL 99
+#define CLK_TOP_GCPU_SEL 100
+#define CLK_TOP_GCPU_CPM_SEL 101
+#define CLK_TOP_APU_SEL 102
+#define CLK_TOP_APU_IF_SEL 103
+#define CLK_TOP_MBIST_DIAG_SEL 104
+#define CLK_TOP_APLL_I2S0_SEL 105
+#define CLK_TOP_APLL_I2S1_SEL 106
+#define CLK_TOP_APLL_I2S2_SEL 107
+#define CLK_TOP_APLL_I2S3_SEL 108
+#define CLK_TOP_APLL_TDMOUT_SEL 109
+#define CLK_TOP_APLL_TDMIN_SEL 110
+#define CLK_TOP_APLL_SPDIF_SEL 111
+#define CLK_TOP_APLL12_CK_DIV0 112
+#define CLK_TOP_APLL12_CK_DIV1 113
+#define CLK_TOP_APLL12_CK_DIV2 114
+#define CLK_TOP_APLL12_CK_DIV3 115
+#define CLK_TOP_APLL12_CK_DIV4 116
+#define CLK_TOP_APLL12_CK_DIV4B 117
+#define CLK_TOP_APLL12_CK_DIV5 118
+#define CLK_TOP_APLL12_CK_DIV5B 119
+#define CLK_TOP_APLL12_CK_DIV6 120
+#define CLK_TOP_AUD_I2S0_M 121
+#define CLK_TOP_AUD_I2S1_M 122
+#define CLK_TOP_AUD_I2S2_M 123
+#define CLK_TOP_AUD_I2S3_M 124
+#define CLK_TOP_AUD_TDMOUT_M 125
+#define CLK_TOP_AUD_TDMOUT_B 126
+#define CLK_TOP_AUD_TDMIN_M 127
+#define CLK_TOP_AUD_TDMIN_B 128
+#define CLK_TOP_AUD_SPDIF_M 129
+#define CLK_TOP_USB20_48M_EN 130
+#define CLK_TOP_UNIVPLL_48M_EN 131
+#define CLK_TOP_LVDSTX_CLKDIG_EN 132
+#define CLK_TOP_VPLL_DPIX_EN 133
+#define CLK_TOP_SSUSB_TOP_CK_EN 134
+#define CLK_TOP_SSUSB_PHY_CK_EN 135
+#define CLK_TOP_CONN_32K 136
+#define CLK_TOP_CONN_26M 137
+#define CLK_TOP_DSP_32K 138
+#define CLK_TOP_DSP_26M 139
+#define CLK_TOP_NR_CLK 140
+
+/* INFRACFG */
+#define CLK_IFR_PMIC_TMR 0
+#define CLK_IFR_PMIC_AP 1
+#define CLK_IFR_PMIC_MD 2
+#define CLK_IFR_PMIC_CONN 3
+#define CLK_IFR_ICUSB 4
+#define CLK_IFR_GCE 5
+#define CLK_IFR_THERM 6
+#define CLK_IFR_PWM_HCLK 7
+#define CLK_IFR_PWM1 8
+#define CLK_IFR_PWM2 9
+#define CLK_IFR_PWM3 10
+#define CLK_IFR_PWM4 11
+#define CLK_IFR_PWM5 12
+#define CLK_IFR_PWM 13
+#define CLK_IFR_UART0 14
+#define CLK_IFR_UART1 15
+#define CLK_IFR_UART2 16
+#define CLK_IFR_DSP_UART 17
+#define CLK_IFR_GCE_26M 18
+#define CLK_IFR_CQ_DMA_FPC 19
+#define CLK_IFR_BTIF 20
+#define CLK_IFR_SPI0 21
+#define CLK_IFR_MSDC0_HCLK 22
+#define CLK_IFR_MSDC2_HCLK 23
+#define CLK_IFR_MSDC1_HCLK 24
+#define CLK_IFR_DVFSRC 25
+#define CLK_IFR_GCPU 26
+#define CLK_IFR_TRNG 27
+#define CLK_IFR_AUXADC 28
+#define CLK_IFR_CPUM 29
+#define CLK_IFR_AUXADC_MD 30
+#define CLK_IFR_AP_DMA 31
+#define CLK_IFR_DEBUGSYS 32
+#define CLK_IFR_AUDIO 33
+#define CLK_IFR_PWM_FBCLK6 34
+#define CLK_IFR_DISP_PWM 35
+#define CLK_IFR_AUD_26M_BK 36
+#define CLK_IFR_CQ_DMA 37
+#define CLK_IFR_MSDC0_SF 38
+#define CLK_IFR_MSDC1_SF 39
+#define CLK_IFR_MSDC2_SF 40
+#define CLK_IFR_AP_MSDC0 41
+#define CLK_IFR_MD_MSDC0 42
+#define CLK_IFR_MSDC0_SRC 43
+#define CLK_IFR_MSDC1_SRC 44
+#define CLK_IFR_MSDC2_SRC 45
+#define CLK_IFR_PWRAP_TMR 46
+#define CLK_IFR_PWRAP_SPI 47
+#define CLK_IFR_PWRAP_SYS 48
+#define CLK_IFR_MCU_PM_BK 49
+#define CLK_IFR_IRRX_26M 50
+#define CLK_IFR_IRRX_32K 51
+#define CLK_IFR_I2C0_AXI 52
+#define CLK_IFR_I2C1_AXI 53
+#define CLK_IFR_I2C2_AXI 54
+#define CLK_IFR_I2C3_AXI 55
+#define CLK_IFR_NIC_AXI 56
+#define CLK_IFR_NIC_SLV_AXI 57
+#define CLK_IFR_APU_AXI 58
+#define CLK_IFR_NFIECC 59
+#define CLK_IFR_NFIECC_BK 60
+#define CLK_IFR_NFI1X_BK 61
+#define CLK_IFR_NFI_BK 62
+#define CLK_IFR_MSDC2_AP_BK 63
+#define CLK_IFR_MSDC2_MD_BK 64
+#define CLK_IFR_MSDC2_BK 65
+#define CLK_IFR_SUSB_133_BK 66
+#define CLK_IFR_SUSB_66_BK 67
+#define CLK_IFR_SSUSB_SYS 68
+#define CLK_IFR_SSUSB_REF 69
+#define CLK_IFR_SSUSB_XHCI 70
+#define CLK_IFR_NR_CLK 71
+
+/* PERICFG */
+#define CLK_PERIAXI 0
+#define CLK_PERI_NR_CLK 1
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MFGPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_MMPLL 5
+#define CLK_APMIXED_APLL1 6
+#define CLK_APMIXED_APLL2 7
+#define CLK_APMIXED_LVDSPLL 8
+#define CLK_APMIXED_DSPPLL 9
+#define CLK_APMIXED_APUPLL 10
+#define CLK_APMIXED_UNIV_EN 11
+#define CLK_APMIXED_USB20_EN 12
+#define CLK_APMIXED_NR_CLK 13
+
+/* GCE */
+#define CLK_GCE_FAXI 0
+#define CLK_GCE_NR_CLK 1
+
+/* AUDIOTOP */
+#define CLK_AUD_AFE 0
+#define CLK_AUD_I2S 1
+#define CLK_AUD_22M 2
+#define CLK_AUD_24M 3
+#define CLK_AUD_INTDIR 4
+#define CLK_AUD_APLL2_TUNER 5
+#define CLK_AUD_APLL_TUNER 6
+#define CLK_AUD_SPDF 7
+#define CLK_AUD_HDMI 8
+#define CLK_AUD_HDMI_IN 9
+#define CLK_AUD_ADC 10
+#define CLK_AUD_DAC 11
+#define CLK_AUD_DAC_PREDIS 12
+#define CLK_AUD_TML 13
+#define CLK_AUD_I2S1_BK 14
+#define CLK_AUD_I2S2_BK 15
+#define CLK_AUD_I2S3_BK 16
+#define CLK_AUD_I2S4_BK 17
+#define CLK_AUD_NR_CLK 18
+
+/* MIPI_CSI0A */
+#define CLK_MIPI0A_CSR_CSI_EN_0A 0
+#define CLK_MIPI_RX_ANA_CSI0A_NR_CLK 1
+
+/* MIPI_CSI0B */
+#define CLK_MIPI0B_CSR_CSI_EN_0B 0
+#define CLK_MIPI_RX_ANA_CSI0B_NR_CLK 1
+
+/* MIPI_CSI1A */
+#define CLK_MIPI1A_CSR_CSI_EN_1A 0
+#define CLK_MIPI_RX_ANA_CSI1A_NR_CLK 1
+
+/* MIPI_CSI1B */
+#define CLK_MIPI1B_CSR_CSI_EN_1B 0
+#define CLK_MIPI_RX_ANA_CSI1B_NR_CLK 1
+
+/* MIPI_CSI2A */
+#define CLK_MIPI2A_CSR_CSI_EN_2A 0
+#define CLK_MIPI_RX_ANA_CSI2A_NR_CLK 1
+
+/* MIPI_CSI2B */
+#define CLK_MIPI2B_CSR_CSI_EN_2B 0
+#define CLK_MIPI_RX_ANA_CSI2B_NR_CLK 1
+
+/* MCUCFG */
+#define CLK_MCU_BUS_SEL 0
+#define CLK_MCU_NR_CLK 1
+
+/* MFGCFG */
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_MBIST_DIAG 1
+#define CLK_MFG_NR_CLK 2
+
+/* MMSYS */
+#define CLK_MM_MM_MDP_RDMA0 0
+#define CLK_MM_MM_MDP_CCORR0 1
+#define CLK_MM_MM_MDP_RSZ0 2
+#define CLK_MM_MM_MDP_RSZ1 3
+#define CLK_MM_MM_MDP_TDSHP0 4
+#define CLK_MM_MM_MDP_WROT0 5
+#define CLK_MM_MM_MDP_WDMA0 6
+#define CLK_MM_MM_DISP_OVL0 7
+#define CLK_MM_MM_DISP_OVL0_2L 8
+#define CLK_MM_MM_DISP_RSZ0 9
+#define CLK_MM_MM_DISP_RDMA0 10
+#define CLK_MM_MM_DISP_WDMA0 11
+#define CLK_MM_MM_DISP_COLOR0 12
+#define CLK_MM_MM_DISP_CCORR0 13
+#define CLK_MM_MM_DISP_AAL0 14
+#define CLK_MM_MM_DISP_GAMMA0 15
+#define CLK_MM_MM_DISP_DITHER0 16
+#define CLK_MM_MM_DSI0 17
+#define CLK_MM_MM_DISP_RDMA1 18
+#define CLK_MM_MM_MDP_RDMA1 19
+#define CLK_MM_DPI0_DPI0 20
+#define CLK_MM_MM_FAKE 21
+#define CLK_MM_MM_SMI_COMMON 22
+#define CLK_MM_MM_SMI_LARB0 23
+#define CLK_MM_MM_SMI_COMM0 24
+#define CLK_MM_MM_SMI_COMM1 25
+#define CLK_MM_MM_CAM_MDP 26
+#define CLK_MM_MM_SMI_IMG 27
+#define CLK_MM_MM_SMI_CAM 28
+#define CLK_MM_IMG_IMG_DL_RELAY 29
+#define CLK_MM_IMG_IMG_DL_ASYNC_TOP 30
+#define CLK_MM_DSI0_DIG_DSI 31
+#define CLK_MM_26M_HRTWT 32
+#define CLK_MM_MM_DPI0 33
+#define CLK_MM_LVDSTX_PXL 34
+#define CLK_MM_LVDSTX_CTS 35
+#define CLK_MM_NR_CLK 36
+
+/* IMGSYS */
+#define CLK_CAM_LARB2 0
+#define CLK_CAM 1
+#define CLK_CAMTG 2
+#define CLK_CAM_SENIF 3
+#define CLK_CAMSV0 4
+#define CLK_CAMSV1 5
+#define CLK_CAM_FDVT 6
+#define CLK_CAM_WPE 7
+#define CLK_CAM_NR_CLK 8
+
+/* VDECSYS */
+#define CLK_VDEC_VDEC 0
+#define CLK_VDEC_LARB1 1
+#define CLK_VDEC_NR_CLK 2
+
+/* VENCSYS */
+#define CLK_VENC 0
+#define CLK_VENC_JPGENC 1
+#define CLK_VENC_NR_CLK 2
+
+/* APUSYS */
+#define CLK_APU_IPU_CK 0
+#define CLK_APU_AXI 1
+#define CLK_APU_JTAG 2
+#define CLK_APU_IF_CK 3
+#define CLK_APU_EDMA 4
+#define CLK_APU_AHB 5
+#define CLK_APU_NR_CLK 6
+
+#endif /* _DT_BINDINGS_CLK_MT8365_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 68862aaf977e..78aa07fd7cc0 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -6,8 +6,6 @@
#ifndef __MESON8B_CLKC_H
#define __MESON8B_CLKC_H
-#define CLKID_UNUSED 0
-#define CLKID_XTAL 1
#define CLKID_PLL_FIXED 2
#define CLKID_PLL_VID 3
#define CLKID_PLL_SYS 4
@@ -107,6 +105,17 @@
#define CLKID_PERIPH 126
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
+#define CLKID_HDMI_PLL_HDMI_OUT 132
+#define CLKID_VID_PLL_FINAL_DIV 137
+#define CLKID_VCLK_IN_SEL 138
+#define CLKID_VCLK2_IN_SEL 149
+#define CLKID_CTS_ENCT 161
+#define CLKID_CTS_ENCP 163
+#define CLKID_CTS_ENCI 165
+#define CLKID_HDMI_TX_PIXEL 167
+#define CLKID_CTS_ENCL 169
+#define CLKID_CTS_VDAC0 171
+#define CLKID_HDMI_SYS 174
#define CLKID_VPU 190
#define CLKID_VDEC_1 196
#define CLKID_VDEC_HCODEC 199
diff --git a/include/dt-bindings/clock/microchip,lan966x.h b/include/dt-bindings/clock/microchip,lan966x.h
new file mode 100644
index 000000000000..6f9d43d76d5a
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,lan966x.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Microchip Inc.
+ *
+ * Author: Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_LAN966X_H
+#define _DT_BINDINGS_CLK_LAN966X_H
+
+#define GCK_ID_QSPI0 0
+#define GCK_ID_QSPI1 1
+#define GCK_ID_QSPI2 2
+#define GCK_ID_SDMMC0 3
+#define GCK_ID_PI 4
+#define GCK_ID_MCAN0 5
+#define GCK_ID_MCAN1 6
+#define GCK_ID_FLEXCOM0 7
+#define GCK_ID_FLEXCOM1 8
+#define GCK_ID_FLEXCOM2 9
+#define GCK_ID_FLEXCOM3 10
+#define GCK_ID_FLEXCOM4 11
+#define GCK_ID_TIMER 12
+#define GCK_ID_USB_REFCLK 13
+
+/* Gate clocks */
+#define GCK_GATE_UHPHS 14
+#define GCK_GATE_UDPHS 15
+#define GCK_GATE_MCRAMC 16
+#define GCK_GATE_HMATRIX 17
+
+#define N_CLOCKS 18
+
+#endif
diff --git a/include/dt-bindings/clock/microchip,mpfs-clock.h b/include/dt-bindings/clock/microchip,mpfs-clock.h
new file mode 100644
index 000000000000..79775a5134ca
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,mpfs-clock.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Daire McNamara,<daire.mcnamara@microchip.com>
+ * Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_
+#define _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_
+
+#define CLK_CPU 0
+#define CLK_AXI 1
+#define CLK_AHB 2
+
+#define CLK_ENVM 3
+#define CLK_MAC0 4
+#define CLK_MAC1 5
+#define CLK_MMC 6
+#define CLK_TIMER 7
+#define CLK_MMUART0 8
+#define CLK_MMUART1 9
+#define CLK_MMUART2 10
+#define CLK_MMUART3 11
+#define CLK_MMUART4 12
+#define CLK_SPI0 13
+#define CLK_SPI1 14
+#define CLK_I2C0 15
+#define CLK_I2C1 16
+#define CLK_CAN0 17
+#define CLK_CAN1 18
+#define CLK_USB 19
+#define CLK_RESERVED 20
+#define CLK_RTC 21
+#define CLK_QSPI 22
+#define CLK_GPIO0 23
+#define CLK_GPIO1 24
+#define CLK_GPIO2 25
+#define CLK_DDRC 26
+#define CLK_FIC0 27
+#define CLK_FIC1 28
+#define CLK_FIC2 29
+#define CLK_FIC3 30
+#define CLK_ATHENA 31
+#define CLK_CFM 32
+
+#define CLK_RTCREF 33
+#define CLK_MSSPLL 34
+
+/* Clock Conditioning Circuitry Clock IDs */
+
+#define CLK_CCC_PLL0 0
+#define CLK_CCC_PLL1 1
+#define CLK_CCC_DLL0 2
+#define CLK_CCC_DLL1 3
+
+#define CLK_CCC_PLL0_OUT0 4
+#define CLK_CCC_PLL0_OUT1 5
+#define CLK_CCC_PLL0_OUT2 6
+#define CLK_CCC_PLL0_OUT3 7
+
+#define CLK_CCC_PLL1_OUT0 8
+#define CLK_CCC_PLL1_OUT1 9
+#define CLK_CCC_PLL1_OUT2 10
+#define CLK_CCC_PLL1_OUT3 11
+
+#define CLK_CCC_DLL0_OUT0 12
+#define CLK_CCC_DLL0_OUT1 13
+
+#define CLK_CCC_DLL1_OUT0 14
+#define CLK_CCC_DLL1_OUT1 15
+
+#endif /* _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ */
diff --git a/include/dt-bindings/clock/microchip,sparx5.h b/include/dt-bindings/clock/microchip,sparx5.h
new file mode 100644
index 000000000000..4b04dabacec2
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,sparx5.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 Microchip Inc.
+ *
+ * Author: Lars Povlsen <lars.povlsen@microchip.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SPARX5_H
+#define _DT_BINDINGS_CLK_SPARX5_H
+
+#define CLK_ID_CORE 0
+#define CLK_ID_DDR 1
+#define CLK_ID_CPU2 2
+#define CLK_ID_ARM2 3
+#define CLK_ID_AUX1 4
+#define CLK_ID_AUX2 5
+#define CLK_ID_AUX3 6
+#define CLK_ID_AUX4 7
+#define CLK_ID_SYNCE 8
+
+#define N_CLOCKS 9
+
+#endif
diff --git a/include/dt-bindings/clock/mstar-msc313-mpll.h b/include/dt-bindings/clock/mstar-msc313-mpll.h
new file mode 100644
index 000000000000..1b30b02317b6
--- /dev/null
+++ b/include/dt-bindings/clock/mstar-msc313-mpll.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Output definitions for the MStar/SigmaStar MPLL
+ *
+ * Copyright (C) 2020 Daniel Palmer <daniel@thingy.jp>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MSTAR_MSC313_MPLL_H
+#define _DT_BINDINGS_CLOCK_MSTAR_MSC313_MPLL_H
+
+#define MSTAR_MSC313_MPLL_DIV2 1
+#define MSTAR_MSC313_MPLL_DIV3 2
+#define MSTAR_MSC313_MPLL_DIV4 3
+#define MSTAR_MSC313_MPLL_DIV5 4
+#define MSTAR_MSC313_MPLL_DIV6 5
+#define MSTAR_MSC313_MPLL_DIV7 6
+#define MSTAR_MSC313_MPLL_DIV10 7
+
+#endif
diff --git a/include/dt-bindings/clock/mt6765-clk.h b/include/dt-bindings/clock/mt6765-clk.h
new file mode 100644
index 000000000000..eb97e568518e
--- /dev/null
+++ b/include/dt-bindings/clock/mt6765-clk.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_MT6765_H
+#define _DT_BINDINGS_CLK_MT6765_H
+
+/* FIX Clks */
+#define CLK_TOP_CLK26M 0
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL_L 0
+#define CLK_APMIXED_ARMPLL 1
+#define CLK_APMIXED_CCIPLL 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_MFGPLL 4
+#define CLK_APMIXED_MMPLL 5
+#define CLK_APMIXED_UNIV2PLL 6
+#define CLK_APMIXED_MSDCPLL 7
+#define CLK_APMIXED_APLL1 8
+#define CLK_APMIXED_MPLL 9
+#define CLK_APMIXED_ULPOSC1 10
+#define CLK_APMIXED_ULPOSC2 11
+#define CLK_APMIXED_SSUSB26M 12
+#define CLK_APMIXED_APPLL26M 13
+#define CLK_APMIXED_MIPIC0_26M 14
+#define CLK_APMIXED_MDPLLGP26M 15
+#define CLK_APMIXED_MMSYS_F26M 16
+#define CLK_APMIXED_UFS26M 17
+#define CLK_APMIXED_MIPIC1_26M 18
+#define CLK_APMIXED_MEMPLL26M 19
+#define CLK_APMIXED_CLKSQ_LVPLL_26M 20
+#define CLK_APMIXED_MIPID0_26M 21
+#define CLK_APMIXED_NR_CLK 22
+
+/* TOPCKGEN */
+#define CLK_TOP_SYSPLL 0
+#define CLK_TOP_SYSPLL_D2 1
+#define CLK_TOP_SYSPLL1_D2 2
+#define CLK_TOP_SYSPLL1_D4 3
+#define CLK_TOP_SYSPLL1_D8 4
+#define CLK_TOP_SYSPLL1_D16 5
+#define CLK_TOP_SYSPLL_D3 6
+#define CLK_TOP_SYSPLL2_D2 7
+#define CLK_TOP_SYSPLL2_D4 8
+#define CLK_TOP_SYSPLL2_D8 9
+#define CLK_TOP_SYSPLL_D5 10
+#define CLK_TOP_SYSPLL3_D2 11
+#define CLK_TOP_SYSPLL3_D4 12
+#define CLK_TOP_SYSPLL_D7 13
+#define CLK_TOP_SYSPLL4_D2 14
+#define CLK_TOP_SYSPLL4_D4 15
+#define CLK_TOP_USB20_192M 16
+#define CLK_TOP_USB20_192M_D4 17
+#define CLK_TOP_USB20_192M_D8 18
+#define CLK_TOP_USB20_192M_D16 19
+#define CLK_TOP_USB20_192M_D32 20
+#define CLK_TOP_UNIVPLL 21
+#define CLK_TOP_UNIVPLL_D2 22
+#define CLK_TOP_UNIVPLL1_D2 23
+#define CLK_TOP_UNIVPLL1_D4 24
+#define CLK_TOP_UNIVPLL_D3 25
+#define CLK_TOP_UNIVPLL2_D2 26
+#define CLK_TOP_UNIVPLL2_D4 27
+#define CLK_TOP_UNIVPLL2_D8 28
+#define CLK_TOP_UNIVPLL2_D32 29
+#define CLK_TOP_UNIVPLL_D5 30
+#define CLK_TOP_UNIVPLL3_D2 31
+#define CLK_TOP_UNIVPLL3_D4 32
+#define CLK_TOP_MMPLL 33
+#define CLK_TOP_MMPLL_D2 34
+#define CLK_TOP_MPLL 35
+#define CLK_TOP_DA_MPLL_104M_DIV 36
+#define CLK_TOP_DA_MPLL_52M_DIV 37
+#define CLK_TOP_MFGPLL 38
+#define CLK_TOP_MSDCPLL 39
+#define CLK_TOP_MSDCPLL_D2 40
+#define CLK_TOP_APLL1 41
+#define CLK_TOP_APLL1_D2 42
+#define CLK_TOP_APLL1_D4 43
+#define CLK_TOP_APLL1_D8 44
+#define CLK_TOP_ULPOSC1 45
+#define CLK_TOP_ULPOSC1_D2 46
+#define CLK_TOP_ULPOSC1_D4 47
+#define CLK_TOP_ULPOSC1_D8 48
+#define CLK_TOP_ULPOSC1_D16 49
+#define CLK_TOP_ULPOSC1_D32 50
+#define CLK_TOP_DMPLL 51
+#define CLK_TOP_F_FRTC 52
+#define CLK_TOP_F_F26M 53
+#define CLK_TOP_AXI 54
+#define CLK_TOP_MM 55
+#define CLK_TOP_SCP 56
+#define CLK_TOP_MFG 57
+#define CLK_TOP_F_FUART 58
+#define CLK_TOP_SPI 59
+#define CLK_TOP_MSDC50_0 60
+#define CLK_TOP_MSDC30_1 61
+#define CLK_TOP_AUDIO 62
+#define CLK_TOP_AUD_1 63
+#define CLK_TOP_AUD_ENGEN1 64
+#define CLK_TOP_F_FDISP_PWM 65
+#define CLK_TOP_SSPM 66
+#define CLK_TOP_DXCC 67
+#define CLK_TOP_I2C 68
+#define CLK_TOP_F_FPWM 69
+#define CLK_TOP_F_FSENINF 70
+#define CLK_TOP_AES_FDE 71
+#define CLK_TOP_F_BIST2FPC 72
+#define CLK_TOP_ARMPLL_DIVIDER_PLL0 73
+#define CLK_TOP_ARMPLL_DIVIDER_PLL1 74
+#define CLK_TOP_ARMPLL_DIVIDER_PLL2 75
+#define CLK_TOP_DA_USB20_48M_DIV 76
+#define CLK_TOP_DA_UNIV_48M_DIV 77
+#define CLK_TOP_APLL12_DIV0 78
+#define CLK_TOP_APLL12_DIV1 79
+#define CLK_TOP_APLL12_DIV2 80
+#define CLK_TOP_APLL12_DIV3 81
+#define CLK_TOP_ARMPLL_DIVIDER_PLL0_EN 82
+#define CLK_TOP_ARMPLL_DIVIDER_PLL1_EN 83
+#define CLK_TOP_ARMPLL_DIVIDER_PLL2_EN 84
+#define CLK_TOP_FMEM_OCC_DRC_EN 85
+#define CLK_TOP_USB20_48M_EN 86
+#define CLK_TOP_UNIVPLL_48M_EN 87
+#define CLK_TOP_MPLL_104M_EN 88
+#define CLK_TOP_MPLL_52M_EN 89
+#define CLK_TOP_F_UFS_MP_SAP_CFG_EN 90
+#define CLK_TOP_F_BIST2FPC_EN 91
+#define CLK_TOP_MD_32K 92
+#define CLK_TOP_MD_26M 93
+#define CLK_TOP_MD2_32K 94
+#define CLK_TOP_MD2_26M 95
+#define CLK_TOP_AXI_SEL 96
+#define CLK_TOP_MEM_SEL 97
+#define CLK_TOP_MM_SEL 98
+#define CLK_TOP_SCP_SEL 99
+#define CLK_TOP_MFG_SEL 100
+#define CLK_TOP_ATB_SEL 101
+#define CLK_TOP_CAMTG_SEL 102
+#define CLK_TOP_CAMTG1_SEL 103
+#define CLK_TOP_CAMTG2_SEL 104
+#define CLK_TOP_CAMTG3_SEL 105
+#define CLK_TOP_UART_SEL 106
+#define CLK_TOP_SPI_SEL 107
+#define CLK_TOP_MSDC50_0_HCLK_SEL 108
+#define CLK_TOP_MSDC50_0_SEL 109
+#define CLK_TOP_MSDC30_1_SEL 110
+#define CLK_TOP_AUDIO_SEL 111
+#define CLK_TOP_AUD_INTBUS_SEL 112
+#define CLK_TOP_AUD_1_SEL 113
+#define CLK_TOP_AUD_ENGEN1_SEL 114
+#define CLK_TOP_DISP_PWM_SEL 115
+#define CLK_TOP_SSPM_SEL 116
+#define CLK_TOP_DXCC_SEL 117
+#define CLK_TOP_USB_TOP_SEL 118
+#define CLK_TOP_SPM_SEL 119
+#define CLK_TOP_I2C_SEL 120
+#define CLK_TOP_PWM_SEL 121
+#define CLK_TOP_SENINF_SEL 122
+#define CLK_TOP_AES_FDE_SEL 123
+#define CLK_TOP_PWRAP_ULPOSC_SEL 124
+#define CLK_TOP_CAMTM_SEL 125
+#define CLK_TOP_NR_CLK 126
+
+/* INFRACFG */
+#define CLK_IFR_ICUSB 0
+#define CLK_IFR_GCE 1
+#define CLK_IFR_THERM 2
+#define CLK_IFR_I2C_AP 3
+#define CLK_IFR_I2C_CCU 4
+#define CLK_IFR_I2C_SSPM 5
+#define CLK_IFR_I2C_RSV 6
+#define CLK_IFR_PWM_HCLK 7
+#define CLK_IFR_PWM1 8
+#define CLK_IFR_PWM2 9
+#define CLK_IFR_PWM3 10
+#define CLK_IFR_PWM4 11
+#define CLK_IFR_PWM5 12
+#define CLK_IFR_PWM 13
+#define CLK_IFR_UART0 14
+#define CLK_IFR_UART1 15
+#define CLK_IFR_GCE_26M 16
+#define CLK_IFR_CQ_DMA_FPC 17
+#define CLK_IFR_BTIF 18
+#define CLK_IFR_SPI0 19
+#define CLK_IFR_MSDC0 20
+#define CLK_IFR_MSDC1 21
+#define CLK_IFR_TRNG 22
+#define CLK_IFR_AUXADC 23
+#define CLK_IFR_CCIF1_AP 24
+#define CLK_IFR_CCIF1_MD 25
+#define CLK_IFR_AUXADC_MD 26
+#define CLK_IFR_AP_DMA 27
+#define CLK_IFR_DEVICE_APC 28
+#define CLK_IFR_CCIF_AP 29
+#define CLK_IFR_AUDIO 30
+#define CLK_IFR_CCIF_MD 31
+#define CLK_IFR_RG_PWM_FBCLK6 32
+#define CLK_IFR_DISP_PWM 33
+#define CLK_IFR_CLDMA_BCLK 34
+#define CLK_IFR_AUDIO_26M_BCLK 35
+#define CLK_IFR_SPI1 36
+#define CLK_IFR_I2C4 37
+#define CLK_IFR_SPI2 38
+#define CLK_IFR_SPI3 39
+#define CLK_IFR_I2C5 40
+#define CLK_IFR_I2C5_ARBITER 41
+#define CLK_IFR_I2C5_IMM 42
+#define CLK_IFR_I2C1_ARBITER 43
+#define CLK_IFR_I2C1_IMM 44
+#define CLK_IFR_I2C2_ARBITER 45
+#define CLK_IFR_I2C2_IMM 46
+#define CLK_IFR_SPI4 47
+#define CLK_IFR_SPI5 48
+#define CLK_IFR_CQ_DMA 49
+#define CLK_IFR_FAES_FDE 50
+#define CLK_IFR_MSDC0_SELF 51
+#define CLK_IFR_MSDC1_SELF 52
+#define CLK_IFR_I2C6 53
+#define CLK_IFR_AP_MSDC0 54
+#define CLK_IFR_MD_MSDC0 55
+#define CLK_IFR_MSDC0_SRC 56
+#define CLK_IFR_MSDC1_SRC 57
+#define CLK_IFR_AES_TOP0_BCLK 58
+#define CLK_IFR_MCU_PM_BCLK 59
+#define CLK_IFR_CCIF2_AP 60
+#define CLK_IFR_CCIF2_MD 61
+#define CLK_IFR_CCIF3_AP 62
+#define CLK_IFR_CCIF3_MD 63
+#define CLK_IFR_NR_CLK 64
+
+/* AUDIO */
+#define CLK_AUDIO_AFE 0
+#define CLK_AUDIO_22M 1
+#define CLK_AUDIO_APLL_TUNER 2
+#define CLK_AUDIO_ADC 3
+#define CLK_AUDIO_DAC 4
+#define CLK_AUDIO_DAC_PREDIS 5
+#define CLK_AUDIO_TML 6
+#define CLK_AUDIO_I2S1_BCLK 7
+#define CLK_AUDIO_I2S2_BCLK 8
+#define CLK_AUDIO_I2S3_BCLK 9
+#define CLK_AUDIO_I2S4_BCLK 10
+#define CLK_AUDIO_NR_CLK 11
+
+/* MIPI_RX_ANA_CSI0A */
+
+#define CLK_MIPI0A_CSR_CSI_EN_0A 0
+#define CLK_MIPI0A_NR_CLK 1
+
+/* MMSYS_CONFIG */
+
+#define CLK_MM_MDP_RDMA0 0
+#define CLK_MM_MDP_CCORR0 1
+#define CLK_MM_MDP_RSZ0 2
+#define CLK_MM_MDP_RSZ1 3
+#define CLK_MM_MDP_TDSHP0 4
+#define CLK_MM_MDP_WROT0 5
+#define CLK_MM_MDP_WDMA0 6
+#define CLK_MM_DISP_OVL0 7
+#define CLK_MM_DISP_OVL0_2L 8
+#define CLK_MM_DISP_RSZ0 9
+#define CLK_MM_DISP_RDMA0 10
+#define CLK_MM_DISP_WDMA0 11
+#define CLK_MM_DISP_COLOR0 12
+#define CLK_MM_DISP_CCORR0 13
+#define CLK_MM_DISP_AAL0 14
+#define CLK_MM_DISP_GAMMA0 15
+#define CLK_MM_DISP_DITHER0 16
+#define CLK_MM_DSI0 17
+#define CLK_MM_FAKE_ENG 18
+#define CLK_MM_SMI_COMMON 19
+#define CLK_MM_SMI_LARB0 20
+#define CLK_MM_SMI_COMM0 21
+#define CLK_MM_SMI_COMM1 22
+#define CLK_MM_CAM_MDP 23
+#define CLK_MM_SMI_IMG 24
+#define CLK_MM_SMI_CAM 25
+#define CLK_MM_IMG_DL_RELAY 26
+#define CLK_MM_IMG_DL_ASYNC_TOP 27
+#define CLK_MM_DIG_DSI 28
+#define CLK_MM_F26M_HRTWT 29
+#define CLK_MM_NR_CLK 30
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB2 0
+#define CLK_IMG_DIP 1
+#define CLK_IMG_FDVT 2
+#define CLK_IMG_DPE 3
+#define CLK_IMG_RSC 4
+#define CLK_IMG_NR_CLK 5
+
+/* VENCSYS */
+
+#define CLK_VENC_SET0_LARB 0
+#define CLK_VENC_SET1_VENC 1
+#define CLK_VENC_SET2_JPGENC 2
+#define CLK_VENC_SET3_VDEC 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB3 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM 2
+#define CLK_CAMTG 3
+#define CLK_CAM_SENINF 4
+#define CLK_CAMSV0 5
+#define CLK_CAMSV1 6
+#define CLK_CAMSV2 7
+#define CLK_CAM_CCU 8
+#define CLK_CAM_NR_CLK 9
+
+#endif /* _DT_BINDINGS_CLK_MT6765_H */
diff --git a/include/dt-bindings/clock/mt7621-clk.h b/include/dt-bindings/clock/mt7621-clk.h
new file mode 100644
index 000000000000..1422badcf9de
--- /dev/null
+++ b/include/dt-bindings/clock/mt7621-clk.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Author: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7621_H
+#define _DT_BINDINGS_CLK_MT7621_H
+
+#define MT7621_CLK_XTAL 0
+#define MT7621_CLK_CPU 1
+#define MT7621_CLK_BUS 2
+#define MT7621_CLK_50M 3
+#define MT7621_CLK_125M 4
+#define MT7621_CLK_150M 5
+#define MT7621_CLK_250M 6
+#define MT7621_CLK_270M 7
+
+#define MT7621_CLK_HSDMA 8
+#define MT7621_CLK_FE 9
+#define MT7621_CLK_SP_DIVTX 10
+#define MT7621_CLK_TIMER 11
+#define MT7621_CLK_PCM 12
+#define MT7621_CLK_PIO 13
+#define MT7621_CLK_GDMA 14
+#define MT7621_CLK_NAND 15
+#define MT7621_CLK_I2C 16
+#define MT7621_CLK_I2S 17
+#define MT7621_CLK_SPI 18
+#define MT7621_CLK_UART1 19
+#define MT7621_CLK_UART2 20
+#define MT7621_CLK_UART3 21
+#define MT7621_CLK_ETH 22
+#define MT7621_CLK_PCIE0 23
+#define MT7621_CLK_PCIE1 24
+#define MT7621_CLK_PCIE2 25
+#define MT7621_CLK_CRYPTO 26
+#define MT7621_CLK_SHXC 27
+
+#define MT7621_CLK_MAX 28
+
+#endif /* _DT_BINDINGS_CLK_MT7621_H */
diff --git a/include/dt-bindings/clock/mt7986-clk.h b/include/dt-bindings/clock/mt7986-clk.h
new file mode 100644
index 000000000000..5a9b169324b0
--- /dev/null
+++ b/include/dt-bindings/clock/mt7986-clk.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7986_H
+#define _DT_BINDINGS_CLK_MT7986_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_NET2PLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_SGMPLL 3
+#define CLK_APMIXED_WEDMCUPLL 4
+#define CLK_APMIXED_NET1PLL 5
+#define CLK_APMIXED_MPLL 6
+#define CLK_APMIXED_APLL2 7
+
+/* TOPCKGEN */
+
+#define CLK_TOP_XTAL 0
+#define CLK_TOP_XTAL_D2 1
+#define CLK_TOP_RTC_32K 2
+#define CLK_TOP_RTC_32P7K 3
+#define CLK_TOP_MPLL_D2 4
+#define CLK_TOP_MPLL_D4 5
+#define CLK_TOP_MPLL_D8 6
+#define CLK_TOP_MPLL_D8_D2 7
+#define CLK_TOP_MPLL_D3_D2 8
+#define CLK_TOP_MMPLL_D2 9
+#define CLK_TOP_MMPLL_D4 10
+#define CLK_TOP_MMPLL_D8 11
+#define CLK_TOP_MMPLL_D8_D2 12
+#define CLK_TOP_MMPLL_D3_D8 13
+#define CLK_TOP_MMPLL_U2PHY 14
+#define CLK_TOP_APLL2_D4 15
+#define CLK_TOP_NET1PLL_D4 16
+#define CLK_TOP_NET1PLL_D5 17
+#define CLK_TOP_NET1PLL_D5_D2 18
+#define CLK_TOP_NET1PLL_D5_D4 19
+#define CLK_TOP_NET1PLL_D8_D2 20
+#define CLK_TOP_NET1PLL_D8_D4 21
+#define CLK_TOP_NET2PLL_D4 22
+#define CLK_TOP_NET2PLL_D4_D2 23
+#define CLK_TOP_NET2PLL_D3_D2 24
+#define CLK_TOP_WEDMCUPLL_D5_D2 25
+#define CLK_TOP_NFI1X_SEL 26
+#define CLK_TOP_SPINFI_SEL 27
+#define CLK_TOP_SPI_SEL 28
+#define CLK_TOP_SPIM_MST_SEL 29
+#define CLK_TOP_UART_SEL 30
+#define CLK_TOP_PWM_SEL 31
+#define CLK_TOP_I2C_SEL 32
+#define CLK_TOP_PEXTP_TL_SEL 33
+#define CLK_TOP_EMMC_250M_SEL 34
+#define CLK_TOP_EMMC_416M_SEL 35
+#define CLK_TOP_F_26M_ADC_SEL 36
+#define CLK_TOP_DRAMC_SEL 37
+#define CLK_TOP_DRAMC_MD32_SEL 38
+#define CLK_TOP_SYSAXI_SEL 39
+#define CLK_TOP_SYSAPB_SEL 40
+#define CLK_TOP_ARM_DB_MAIN_SEL 41
+#define CLK_TOP_ARM_DB_JTSEL 42
+#define CLK_TOP_NETSYS_SEL 43
+#define CLK_TOP_NETSYS_500M_SEL 44
+#define CLK_TOP_NETSYS_MCU_SEL 45
+#define CLK_TOP_NETSYS_2X_SEL 46
+#define CLK_TOP_SGM_325M_SEL 47
+#define CLK_TOP_SGM_REG_SEL 48
+#define CLK_TOP_A1SYS_SEL 49
+#define CLK_TOP_CONN_MCUSYS_SEL 50
+#define CLK_TOP_EIP_B_SEL 51
+#define CLK_TOP_PCIE_PHY_SEL 52
+#define CLK_TOP_USB3_PHY_SEL 53
+#define CLK_TOP_F26M_SEL 54
+#define CLK_TOP_AUD_L_SEL 55
+#define CLK_TOP_A_TUNER_SEL 56
+#define CLK_TOP_U2U3_SEL 57
+#define CLK_TOP_U2U3_SYS_SEL 58
+#define CLK_TOP_U2U3_XHCI_SEL 59
+#define CLK_TOP_DA_U2_REFSEL 60
+#define CLK_TOP_DA_U2_CK_1P_SEL 61
+#define CLK_TOP_AP2CNN_HOST_SEL 62
+#define CLK_TOP_JTAG 63
+
+/* INFRACFG */
+
+#define CLK_INFRA_SYSAXI_D2 0
+#define CLK_INFRA_UART0_SEL 1
+#define CLK_INFRA_UART1_SEL 2
+#define CLK_INFRA_UART2_SEL 3
+#define CLK_INFRA_SPI0_SEL 4
+#define CLK_INFRA_SPI1_SEL 5
+#define CLK_INFRA_PWM1_SEL 6
+#define CLK_INFRA_PWM2_SEL 7
+#define CLK_INFRA_PWM_BSEL 8
+#define CLK_INFRA_PCIE_SEL 9
+#define CLK_INFRA_GPT_STA 10
+#define CLK_INFRA_PWM_HCK 11
+#define CLK_INFRA_PWM_STA 12
+#define CLK_INFRA_PWM1_CK 13
+#define CLK_INFRA_PWM2_CK 14
+#define CLK_INFRA_CQ_DMA_CK 15
+#define CLK_INFRA_EIP97_CK 16
+#define CLK_INFRA_AUD_BUS_CK 17
+#define CLK_INFRA_AUD_26M_CK 18
+#define CLK_INFRA_AUD_L_CK 19
+#define CLK_INFRA_AUD_AUD_CK 20
+#define CLK_INFRA_AUD_EG2_CK 21
+#define CLK_INFRA_DRAMC_26M_CK 22
+#define CLK_INFRA_DBG_CK 23
+#define CLK_INFRA_AP_DMA_CK 24
+#define CLK_INFRA_SEJ_CK 25
+#define CLK_INFRA_SEJ_13M_CK 26
+#define CLK_INFRA_THERM_CK 27
+#define CLK_INFRA_I2C0_CK 28
+#define CLK_INFRA_UART0_CK 29
+#define CLK_INFRA_UART1_CK 30
+#define CLK_INFRA_UART2_CK 31
+#define CLK_INFRA_NFI1_CK 32
+#define CLK_INFRA_SPINFI1_CK 33
+#define CLK_INFRA_NFI_HCK_CK 34
+#define CLK_INFRA_SPI0_CK 35
+#define CLK_INFRA_SPI1_CK 36
+#define CLK_INFRA_SPI0_HCK_CK 37
+#define CLK_INFRA_SPI1_HCK_CK 38
+#define CLK_INFRA_FRTC_CK 39
+#define CLK_INFRA_MSDC_CK 40
+#define CLK_INFRA_MSDC_HCK_CK 41
+#define CLK_INFRA_MSDC_133M_CK 42
+#define CLK_INFRA_MSDC_66M_CK 43
+#define CLK_INFRA_ADC_26M_CK 44
+#define CLK_INFRA_ADC_FRC_CK 45
+#define CLK_INFRA_FBIST2FPC_CK 46
+#define CLK_INFRA_IUSB_133_CK 47
+#define CLK_INFRA_IUSB_66M_CK 48
+#define CLK_INFRA_IUSB_SYS_CK 49
+#define CLK_INFRA_IUSB_CK 50
+#define CLK_INFRA_IPCIE_CK 51
+#define CLK_INFRA_IPCIE_PIPE_CK 52
+#define CLK_INFRA_IPCIER_CK 53
+#define CLK_INFRA_IPCIEB_CK 54
+#define CLK_INFRA_TRNG_CK 55
+
+/* SGMIISYS_0 */
+
+#define CLK_SGMII0_TX250M_EN 0
+#define CLK_SGMII0_RX250M_EN 1
+#define CLK_SGMII0_CDR_REF 2
+#define CLK_SGMII0_CDR_FB 3
+
+/* SGMIISYS_1 */
+
+#define CLK_SGMII1_TX250M_EN 0
+#define CLK_SGMII1_RX250M_EN 1
+#define CLK_SGMII1_CDR_REF 2
+#define CLK_SGMII1_CDR_FB 3
+
+/* ETHSYS */
+
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_WOCPU1_EN 3
+#define CLK_ETH_WOCPU0_EN 4
+
+#endif /* _DT_BINDINGS_CLK_MT7986_H */
diff --git a/include/dt-bindings/clock/mt8167-clk.h b/include/dt-bindings/clock/mt8167-clk.h
new file mode 100644
index 000000000000..a96158edd817
--- /dev/null
+++ b/include/dt-bindings/clock/mt8167-clk.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8167_H
+#define _DT_BINDINGS_CLK_MT8167_H
+
+/* MT8167 is based on MT8516 */
+#include <dt-bindings/clock/mt8516-clk.h>
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_TVDPLL (CLK_APMIXED_NR_CLK + 0)
+#define CLK_APMIXED_LVDSPLL (CLK_APMIXED_NR_CLK + 1)
+#define CLK_APMIXED_HDMI_REF (CLK_APMIXED_NR_CLK + 2)
+#define MT8167_CLK_APMIXED_NR_CLK (CLK_APMIXED_NR_CLK + 3)
+
+/* TOPCKGEN */
+
+#define CLK_TOP_DSI0_LNTC_DSICK (CLK_TOP_NR_CLK + 0)
+#define CLK_TOP_VPLL_DPIX (CLK_TOP_NR_CLK + 1)
+#define CLK_TOP_LVDSTX_CLKDIG_CTS (CLK_TOP_NR_CLK + 2)
+#define CLK_TOP_HDMTX_CLKDIG_CTS (CLK_TOP_NR_CLK + 3)
+#define CLK_TOP_LVDSPLL (CLK_TOP_NR_CLK + 4)
+#define CLK_TOP_LVDSPLL_D2 (CLK_TOP_NR_CLK + 5)
+#define CLK_TOP_LVDSPLL_D4 (CLK_TOP_NR_CLK + 6)
+#define CLK_TOP_LVDSPLL_D8 (CLK_TOP_NR_CLK + 7)
+#define CLK_TOP_MIPI_26M (CLK_TOP_NR_CLK + 8)
+#define CLK_TOP_TVDPLL (CLK_TOP_NR_CLK + 9)
+#define CLK_TOP_TVDPLL_D2 (CLK_TOP_NR_CLK + 10)
+#define CLK_TOP_TVDPLL_D4 (CLK_TOP_NR_CLK + 11)
+#define CLK_TOP_TVDPLL_D8 (CLK_TOP_NR_CLK + 12)
+#define CLK_TOP_TVDPLL_D16 (CLK_TOP_NR_CLK + 13)
+#define CLK_TOP_PWM_MM (CLK_TOP_NR_CLK + 14)
+#define CLK_TOP_CAM_MM (CLK_TOP_NR_CLK + 15)
+#define CLK_TOP_MFG_MM (CLK_TOP_NR_CLK + 16)
+#define CLK_TOP_SPM_52M (CLK_TOP_NR_CLK + 17)
+#define CLK_TOP_MIPI_26M_DBG (CLK_TOP_NR_CLK + 18)
+#define CLK_TOP_SCAM_MM (CLK_TOP_NR_CLK + 19)
+#define CLK_TOP_SMI_MM (CLK_TOP_NR_CLK + 20)
+#define CLK_TOP_26M_HDMI_SIFM (CLK_TOP_NR_CLK + 21)
+#define CLK_TOP_26M_CEC (CLK_TOP_NR_CLK + 22)
+#define CLK_TOP_32K_CEC (CLK_TOP_NR_CLK + 23)
+#define CLK_TOP_GCPU_B (CLK_TOP_NR_CLK + 24)
+#define CLK_TOP_RG_VDEC (CLK_TOP_NR_CLK + 25)
+#define CLK_TOP_RG_FDPI0 (CLK_TOP_NR_CLK + 26)
+#define CLK_TOP_RG_FDPI1 (CLK_TOP_NR_CLK + 27)
+#define CLK_TOP_RG_AXI_MFG (CLK_TOP_NR_CLK + 28)
+#define CLK_TOP_RG_SLOW_MFG (CLK_TOP_NR_CLK + 29)
+#define CLK_TOP_GFMUX_EMI1X_SEL (CLK_TOP_NR_CLK + 30)
+#define CLK_TOP_CSW_MUX_MFG_SEL (CLK_TOP_NR_CLK + 31)
+#define CLK_TOP_CAMTG_MM_SEL (CLK_TOP_NR_CLK + 32)
+#define CLK_TOP_PWM_MM_SEL (CLK_TOP_NR_CLK + 33)
+#define CLK_TOP_SPM_52M_SEL (CLK_TOP_NR_CLK + 34)
+#define CLK_TOP_MFG_MM_SEL (CLK_TOP_NR_CLK + 35)
+#define CLK_TOP_SMI_MM_SEL (CLK_TOP_NR_CLK + 36)
+#define CLK_TOP_SCAM_MM_SEL (CLK_TOP_NR_CLK + 37)
+#define CLK_TOP_VDEC_MM_SEL (CLK_TOP_NR_CLK + 38)
+#define CLK_TOP_DPI0_MM_SEL (CLK_TOP_NR_CLK + 39)
+#define CLK_TOP_DPI1_MM_SEL (CLK_TOP_NR_CLK + 40)
+#define CLK_TOP_AXI_MFG_IN_SEL (CLK_TOP_NR_CLK + 41)
+#define CLK_TOP_SLOW_MFG_SEL (CLK_TOP_NR_CLK + 42)
+#define MT8167_CLK_TOP_NR_CLK (CLK_TOP_NR_CLK + 43)
+
+/* MFGCFG */
+
+#define CLK_MFG_BAXI 0
+#define CLK_MFG_BMEM 1
+#define CLK_MFG_BG3D 2
+#define CLK_MFG_B26M 3
+#define CLK_MFG_NR_CLK 4
+
+/* MMSYS */
+
+#define CLK_MM_SMI_COMMON 0
+#define CLK_MM_SMI_LARB0 1
+#define CLK_MM_CAM_MDP 2
+#define CLK_MM_MDP_RDMA 3
+#define CLK_MM_MDP_RSZ0 4
+#define CLK_MM_MDP_RSZ1 5
+#define CLK_MM_MDP_TDSHP 6
+#define CLK_MM_MDP_WDMA 7
+#define CLK_MM_MDP_WROT 8
+#define CLK_MM_FAKE_ENG 9
+#define CLK_MM_DISP_OVL0 10
+#define CLK_MM_DISP_RDMA0 11
+#define CLK_MM_DISP_RDMA1 12
+#define CLK_MM_DISP_WDMA 13
+#define CLK_MM_DISP_COLOR 14
+#define CLK_MM_DISP_CCORR 15
+#define CLK_MM_DISP_AAL 16
+#define CLK_MM_DISP_GAMMA 17
+#define CLK_MM_DISP_DITHER 18
+#define CLK_MM_DISP_UFOE 19
+#define CLK_MM_DISP_PWM_MM 20
+#define CLK_MM_DISP_PWM_26M 21
+#define CLK_MM_DSI_ENGINE 22
+#define CLK_MM_DSI_DIGITAL 23
+#define CLK_MM_DPI0_ENGINE 24
+#define CLK_MM_DPI0_PXL 25
+#define CLK_MM_LVDS_PXL 26
+#define CLK_MM_LVDS_CTS 27
+#define CLK_MM_DPI1_ENGINE 28
+#define CLK_MM_DPI1_PXL 29
+#define CLK_MM_HDMI_PXL 30
+#define CLK_MM_HDMI_SPDIF 31
+#define CLK_MM_HDMI_ADSP_BCK 32
+#define CLK_MM_HDMI_PLL 33
+#define CLK_MM_NR_CLK 34
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB1_SMI 0
+#define CLK_IMG_CAM_SMI 1
+#define CLK_IMG_CAM_CAM 2
+#define CLK_IMG_SEN_TG 3
+#define CLK_IMG_SEN_CAM 4
+#define CLK_IMG_VENC 5
+#define CLK_IMG_NR_CLK 6
+
+/* VDECSYS */
+
+#define CLK_VDEC_CKEN 0
+#define CLK_VDEC_LARB1_CKEN 1
+#define CLK_VDEC_NR_CLK 2
+
+#endif /* _DT_BINDINGS_CLK_MT8167_H */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
index 3acebe937bfc..3d00c98b9654 100644
--- a/include/dt-bindings/clock/mt8173-clk.h
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -186,7 +186,6 @@
#define CLK_INFRA_PMICWRAP 11
#define CLK_INFRA_CLK_13M 12
#define CLK_INFRA_CA53SEL 13
-#define CLK_INFRA_CA57SEL 14 /* Deprecated. Don't use it. */
#define CLK_INFRA_CA72SEL 14
#define CLK_INFRA_NR_CLK 15
diff --git a/include/dt-bindings/clock/mt8186-clk.h b/include/dt-bindings/clock/mt8186-clk.h
new file mode 100644
index 000000000000..a70bf67af47d
--- /dev/null
+++ b/include/dt-bindings/clock/mt8186-clk.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8186_H
+#define _DT_BINDINGS_CLK_MT8186_H
+
+/* MCUSYS */
+
+#define CLK_MCU_ARMPLL_LL_SEL 0
+#define CLK_MCU_ARMPLL_BL_SEL 1
+#define CLK_MCU_ARMPLL_BUS_SEL 2
+#define CLK_MCU_NR_CLK 3
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SCP 1
+#define CLK_TOP_MFG 2
+#define CLK_TOP_CAMTG 3
+#define CLK_TOP_CAMTG1 4
+#define CLK_TOP_CAMTG2 5
+#define CLK_TOP_CAMTG3 6
+#define CLK_TOP_CAMTG4 7
+#define CLK_TOP_CAMTG5 8
+#define CLK_TOP_CAMTG6 9
+#define CLK_TOP_UART 10
+#define CLK_TOP_SPI 11
+#define CLK_TOP_MSDC50_0_HCLK 12
+#define CLK_TOP_MSDC50_0 13
+#define CLK_TOP_MSDC30_1 14
+#define CLK_TOP_AUDIO 15
+#define CLK_TOP_AUD_INTBUS 16
+#define CLK_TOP_AUD_1 17
+#define CLK_TOP_AUD_2 18
+#define CLK_TOP_AUD_ENGEN1 19
+#define CLK_TOP_AUD_ENGEN2 20
+#define CLK_TOP_DISP_PWM 21
+#define CLK_TOP_SSPM 22
+#define CLK_TOP_DXCC 23
+#define CLK_TOP_USB_TOP 24
+#define CLK_TOP_SRCK 25
+#define CLK_TOP_SPM 26
+#define CLK_TOP_I2C 27
+#define CLK_TOP_PWM 28
+#define CLK_TOP_SENINF 29
+#define CLK_TOP_SENINF1 30
+#define CLK_TOP_SENINF2 31
+#define CLK_TOP_SENINF3 32
+#define CLK_TOP_AES_MSDCFDE 33
+#define CLK_TOP_PWRAP_ULPOSC 34
+#define CLK_TOP_CAMTM 35
+#define CLK_TOP_VENC 36
+#define CLK_TOP_CAM 37
+#define CLK_TOP_IMG1 38
+#define CLK_TOP_IPE 39
+#define CLK_TOP_DPMAIF 40
+#define CLK_TOP_VDEC 41
+#define CLK_TOP_DISP 42
+#define CLK_TOP_MDP 43
+#define CLK_TOP_AUDIO_H 44
+#define CLK_TOP_UFS 45
+#define CLK_TOP_AES_FDE 46
+#define CLK_TOP_AUDIODSP 47
+#define CLK_TOP_DVFSRC 48
+#define CLK_TOP_DSI_OCC 49
+#define CLK_TOP_SPMI_MST 50
+#define CLK_TOP_SPINOR 51
+#define CLK_TOP_NNA 52
+#define CLK_TOP_NNA1 53
+#define CLK_TOP_NNA2 54
+#define CLK_TOP_SSUSB_XHCI 55
+#define CLK_TOP_SSUSB_TOP_1P 56
+#define CLK_TOP_SSUSB_XHCI_1P 57
+#define CLK_TOP_WPE 58
+#define CLK_TOP_DPI 59
+#define CLK_TOP_U3_OCC_250M 60
+#define CLK_TOP_U3_OCC_500M 61
+#define CLK_TOP_ADSP_BUS 62
+#define CLK_TOP_APLL_I2S0_MCK_SEL 63
+#define CLK_TOP_APLL_I2S1_MCK_SEL 64
+#define CLK_TOP_APLL_I2S2_MCK_SEL 65
+#define CLK_TOP_APLL_I2S4_MCK_SEL 66
+#define CLK_TOP_APLL_TDMOUT_MCK_SEL 67
+#define CLK_TOP_MAINPLL_D2 68
+#define CLK_TOP_MAINPLL_D2_D2 69
+#define CLK_TOP_MAINPLL_D2_D4 70
+#define CLK_TOP_MAINPLL_D2_D16 71
+#define CLK_TOP_MAINPLL_D3 72
+#define CLK_TOP_MAINPLL_D3_D2 73
+#define CLK_TOP_MAINPLL_D3_D4 74
+#define CLK_TOP_MAINPLL_D5 75
+#define CLK_TOP_MAINPLL_D5_D2 76
+#define CLK_TOP_MAINPLL_D5_D4 77
+#define CLK_TOP_MAINPLL_D7 78
+#define CLK_TOP_MAINPLL_D7_D2 79
+#define CLK_TOP_MAINPLL_D7_D4 80
+#define CLK_TOP_UNIVPLL 81
+#define CLK_TOP_UNIVPLL_D2 82
+#define CLK_TOP_UNIVPLL_D2_D2 83
+#define CLK_TOP_UNIVPLL_D2_D4 84
+#define CLK_TOP_UNIVPLL_D3 85
+#define CLK_TOP_UNIVPLL_D3_D2 86
+#define CLK_TOP_UNIVPLL_D3_D4 87
+#define CLK_TOP_UNIVPLL_D3_D8 88
+#define CLK_TOP_UNIVPLL_D3_D32 89
+#define CLK_TOP_UNIVPLL_D5 90
+#define CLK_TOP_UNIVPLL_D5_D2 91
+#define CLK_TOP_UNIVPLL_D5_D4 92
+#define CLK_TOP_UNIVPLL_D7 93
+#define CLK_TOP_UNIVPLL_192M 94
+#define CLK_TOP_UNIVPLL_192M_D4 95
+#define CLK_TOP_UNIVPLL_192M_D8 96
+#define CLK_TOP_UNIVPLL_192M_D16 97
+#define CLK_TOP_UNIVPLL_192M_D32 98
+#define CLK_TOP_APLL1_D2 99
+#define CLK_TOP_APLL1_D4 100
+#define CLK_TOP_APLL1_D8 101
+#define CLK_TOP_APLL2_D2 102
+#define CLK_TOP_APLL2_D4 103
+#define CLK_TOP_APLL2_D8 104
+#define CLK_TOP_MMPLL_D2 105
+#define CLK_TOP_TVDPLL_D2 106
+#define CLK_TOP_TVDPLL_D4 107
+#define CLK_TOP_TVDPLL_D8 108
+#define CLK_TOP_TVDPLL_D16 109
+#define CLK_TOP_TVDPLL_D32 110
+#define CLK_TOP_MSDCPLL_D2 111
+#define CLK_TOP_ULPOSC1 112
+#define CLK_TOP_ULPOSC1_D2 113
+#define CLK_TOP_ULPOSC1_D4 114
+#define CLK_TOP_ULPOSC1_D8 115
+#define CLK_TOP_ULPOSC1_D10 116
+#define CLK_TOP_ULPOSC1_D16 117
+#define CLK_TOP_ULPOSC1_D32 118
+#define CLK_TOP_ADSPPLL_D2 119
+#define CLK_TOP_ADSPPLL_D4 120
+#define CLK_TOP_ADSPPLL_D8 121
+#define CLK_TOP_NNAPLL_D2 122
+#define CLK_TOP_NNAPLL_D4 123
+#define CLK_TOP_NNAPLL_D8 124
+#define CLK_TOP_NNA2PLL_D2 125
+#define CLK_TOP_NNA2PLL_D4 126
+#define CLK_TOP_NNA2PLL_D8 127
+#define CLK_TOP_F_BIST2FPC 128
+#define CLK_TOP_466M_FMEM 129
+#define CLK_TOP_MPLL 130
+#define CLK_TOP_APLL12_CK_DIV0 131
+#define CLK_TOP_APLL12_CK_DIV1 132
+#define CLK_TOP_APLL12_CK_DIV2 133
+#define CLK_TOP_APLL12_CK_DIV4 134
+#define CLK_TOP_APLL12_CK_DIV_TDMOUT_M 135
+#define CLK_TOP_NR_CLK 136
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SCP_CORE 4
+#define CLK_INFRA_AO_SEJ 5
+#define CLK_INFRA_AO_APXGPT 6
+#define CLK_INFRA_AO_ICUSB 7
+#define CLK_INFRA_AO_GCE 8
+#define CLK_INFRA_AO_THERM 9
+#define CLK_INFRA_AO_I2C_AP 10
+#define CLK_INFRA_AO_I2C_CCU 11
+#define CLK_INFRA_AO_I2C_SSPM 12
+#define CLK_INFRA_AO_I2C_RSV 13
+#define CLK_INFRA_AO_PWM_HCLK 14
+#define CLK_INFRA_AO_PWM1 15
+#define CLK_INFRA_AO_PWM2 16
+#define CLK_INFRA_AO_PWM3 17
+#define CLK_INFRA_AO_PWM4 18
+#define CLK_INFRA_AO_PWM5 19
+#define CLK_INFRA_AO_PWM 20
+#define CLK_INFRA_AO_UART0 21
+#define CLK_INFRA_AO_UART1 22
+#define CLK_INFRA_AO_UART2 23
+#define CLK_INFRA_AO_GCE_26M 24
+#define CLK_INFRA_AO_CQ_DMA_FPC 25
+#define CLK_INFRA_AO_BTIF 26
+#define CLK_INFRA_AO_SPI0 27
+#define CLK_INFRA_AO_MSDC0 28
+#define CLK_INFRA_AO_MSDCFDE 29
+#define CLK_INFRA_AO_MSDC1 30
+#define CLK_INFRA_AO_DVFSRC 31
+#define CLK_INFRA_AO_GCPU 32
+#define CLK_INFRA_AO_TRNG 33
+#define CLK_INFRA_AO_AUXADC 34
+#define CLK_INFRA_AO_CPUM 35
+#define CLK_INFRA_AO_CCIF1_AP 36
+#define CLK_INFRA_AO_CCIF1_MD 37
+#define CLK_INFRA_AO_AUXADC_MD 38
+#define CLK_INFRA_AO_AP_DMA 39
+#define CLK_INFRA_AO_XIU 40
+#define CLK_INFRA_AO_DEVICE_APC 41
+#define CLK_INFRA_AO_CCIF_AP 42
+#define CLK_INFRA_AO_DEBUGTOP 43
+#define CLK_INFRA_AO_AUDIO 44
+#define CLK_INFRA_AO_CCIF_MD 45
+#define CLK_INFRA_AO_DXCC_SEC_CORE 46
+#define CLK_INFRA_AO_DXCC_AO 47
+#define CLK_INFRA_AO_IMP_IIC 48
+#define CLK_INFRA_AO_DRAMC_F26M 49
+#define CLK_INFRA_AO_RG_PWM_FBCLK6 50
+#define CLK_INFRA_AO_SSUSB_TOP_HCLK 51
+#define CLK_INFRA_AO_DISP_PWM 52
+#define CLK_INFRA_AO_CLDMA_BCLK 53
+#define CLK_INFRA_AO_AUDIO_26M_BCLK 54
+#define CLK_INFRA_AO_SSUSB_TOP_P1_HCLK 55
+#define CLK_INFRA_AO_SPI1 56
+#define CLK_INFRA_AO_I2C4 57
+#define CLK_INFRA_AO_MODEM_TEMP_SHARE 58
+#define CLK_INFRA_AO_SPI2 59
+#define CLK_INFRA_AO_SPI3 60
+#define CLK_INFRA_AO_SSUSB_TOP_REF 61
+#define CLK_INFRA_AO_SSUSB_TOP_XHCI 62
+#define CLK_INFRA_AO_SSUSB_TOP_P1_REF 63
+#define CLK_INFRA_AO_SSUSB_TOP_P1_XHCI 64
+#define CLK_INFRA_AO_SSPM 65
+#define CLK_INFRA_AO_SSUSB_TOP_P1_SYS 66
+#define CLK_INFRA_AO_I2C5 67
+#define CLK_INFRA_AO_I2C5_ARBITER 68
+#define CLK_INFRA_AO_I2C5_IMM 69
+#define CLK_INFRA_AO_I2C1_ARBITER 70
+#define CLK_INFRA_AO_I2C1_IMM 71
+#define CLK_INFRA_AO_I2C2_ARBITER 72
+#define CLK_INFRA_AO_I2C2_IMM 73
+#define CLK_INFRA_AO_SPI4 74
+#define CLK_INFRA_AO_SPI5 75
+#define CLK_INFRA_AO_CQ_DMA 76
+#define CLK_INFRA_AO_BIST2FPC 77
+#define CLK_INFRA_AO_MSDC0_SELF 78
+#define CLK_INFRA_AO_SPINOR 79
+#define CLK_INFRA_AO_SSPM_26M_SELF 80
+#define CLK_INFRA_AO_SSPM_32K_SELF 81
+#define CLK_INFRA_AO_I2C6 82
+#define CLK_INFRA_AO_AP_MSDC0 83
+#define CLK_INFRA_AO_MD_MSDC0 84
+#define CLK_INFRA_AO_MSDC0_SRC 85
+#define CLK_INFRA_AO_MSDC1_SRC 86
+#define CLK_INFRA_AO_SEJ_F13M 87
+#define CLK_INFRA_AO_AES_TOP0_BCLK 88
+#define CLK_INFRA_AO_MCU_PM_BCLK 89
+#define CLK_INFRA_AO_CCIF2_AP 90
+#define CLK_INFRA_AO_CCIF2_MD 91
+#define CLK_INFRA_AO_CCIF3_AP 92
+#define CLK_INFRA_AO_CCIF3_MD 93
+#define CLK_INFRA_AO_FADSP_26M 94
+#define CLK_INFRA_AO_FADSP_32K 95
+#define CLK_INFRA_AO_CCIF4_AP 96
+#define CLK_INFRA_AO_CCIF4_MD 97
+#define CLK_INFRA_AO_FADSP 98
+#define CLK_INFRA_AO_FLASHIF_133M 99
+#define CLK_INFRA_AO_FLASHIF_66M 100
+#define CLK_INFRA_AO_NR_CLK 101
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL_LL 0
+#define CLK_APMIXED_ARMPLL_BL 1
+#define CLK_APMIXED_CCIPLL 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_UNIV2PLL 4
+#define CLK_APMIXED_MSDCPLL 5
+#define CLK_APMIXED_MMPLL 6
+#define CLK_APMIXED_NNAPLL 7
+#define CLK_APMIXED_NNA2PLL 8
+#define CLK_APMIXED_ADSPPLL 9
+#define CLK_APMIXED_MFGPLL 10
+#define CLK_APMIXED_TVDPLL 11
+#define CLK_APMIXED_APLL1 12
+#define CLK_APMIXED_APLL2 13
+#define CLK_APMIXED_NR_CLK 14
+
+/* IMP_IIC_WRAP */
+
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C0 0
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C1 1
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C2 2
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C3 3
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C4 4
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C5 5
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C6 6
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C7 7
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C8 8
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C9 9
+#define CLK_IMP_IIC_WRAP_NR_CLK 10
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* MMSYS */
+
+#define CLK_MM_DISP_MUTEX0 0
+#define CLK_MM_APB_MM_BUS 1
+#define CLK_MM_DISP_OVL0 2
+#define CLK_MM_DISP_RDMA0 3
+#define CLK_MM_DISP_OVL0_2L 4
+#define CLK_MM_DISP_WDMA0 5
+#define CLK_MM_DISP_RSZ0 6
+#define CLK_MM_DISP_AAL0 7
+#define CLK_MM_DISP_CCORR0 8
+#define CLK_MM_DISP_COLOR0 9
+#define CLK_MM_SMI_INFRA 10
+#define CLK_MM_DISP_DSC_WRAP0 11
+#define CLK_MM_DISP_GAMMA0 12
+#define CLK_MM_DISP_POSTMASK0 13
+#define CLK_MM_DISP_DITHER0 14
+#define CLK_MM_SMI_COMMON 15
+#define CLK_MM_DSI0 16
+#define CLK_MM_DISP_FAKE_ENG0 17
+#define CLK_MM_DISP_FAKE_ENG1 18
+#define CLK_MM_SMI_GALS 19
+#define CLK_MM_SMI_IOMMU 20
+#define CLK_MM_DISP_RDMA1 21
+#define CLK_MM_DISP_DPI 22
+#define CLK_MM_DSI0_DSI_CK_DOMAIN 23
+#define CLK_MM_DISP_26M 24
+#define CLK_MM_NR_CLK 25
+
+/* WPESYS */
+
+#define CLK_WPE_CK_EN 0
+#define CLK_WPE_SMI_LARB8_CK_EN 1
+#define CLK_WPE_SYS_EVENT_TX_CK_EN 2
+#define CLK_WPE_SMI_LARB8_PCLK_EN 3
+#define CLK_WPE_NR_CLK 4
+
+/* IMGSYS1 */
+
+#define CLK_IMG1_LARB9_IMG1 0
+#define CLK_IMG1_LARB10_IMG1 1
+#define CLK_IMG1_DIP 2
+#define CLK_IMG1_GALS_IMG1 3
+#define CLK_IMG1_NR_CLK 4
+
+/* IMGSYS2 */
+
+#define CLK_IMG2_LARB9_IMG2 0
+#define CLK_IMG2_LARB10_IMG2 1
+#define CLK_IMG2_MFB 2
+#define CLK_IMG2_WPE 3
+#define CLK_IMG2_MSS 4
+#define CLK_IMG2_GALS_IMG2 5
+#define CLK_IMG2_NR_CLK 6
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1_CKEN 0
+#define CLK_VDEC_LAT_CKEN 1
+#define CLK_VDEC_LAT_ACTIVE 2
+#define CLK_VDEC_LAT_CKEN_ENG 3
+#define CLK_VDEC_MINI_MDP_CKEN_CFG_RG 4
+#define CLK_VDEC_CKEN 5
+#define CLK_VDEC_ACTIVE 6
+#define CLK_VDEC_CKEN_ENG 7
+#define CLK_VDEC_NR_CLK 8
+
+/* VENCSYS */
+
+#define CLK_VENC_CKE0_LARB 0
+#define CLK_VENC_CKE1_VENC 1
+#define CLK_VENC_CKE2_JPGENC 2
+#define CLK_VENC_CKE5_GALS 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM_LARB14 2
+#define CLK_CAM 3
+#define CLK_CAMTG 4
+#define CLK_CAM_SENINF 5
+#define CLK_CAMSV1 6
+#define CLK_CAMSV2 7
+#define CLK_CAMSV3 8
+#define CLK_CAM_CCU0 9
+#define CLK_CAM_CCU1 10
+#define CLK_CAM_MRAW0 11
+#define CLK_CAM_FAKE_ENG 12
+#define CLK_CAM_CCU_GALS 13
+#define CLK_CAM2MM_GALS 14
+#define CLK_CAM_NR_CLK 15
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX_RAWA 0
+#define CLK_CAM_RAWA 1
+#define CLK_CAM_RAWA_CAMTG_RAWA 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX_RAWB 0
+#define CLK_CAM_RAWB 1
+#define CLK_CAM_RAWB_CAMTG_RAWB 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* MDPSYS */
+
+#define CLK_MDP_RDMA0 0
+#define CLK_MDP_TDSHP0 1
+#define CLK_MDP_IMG_DL_ASYNC0 2
+#define CLK_MDP_IMG_DL_ASYNC1 3
+#define CLK_MDP_DISP_RDMA 4
+#define CLK_MDP_HMS 5
+#define CLK_MDP_SMI0 6
+#define CLK_MDP_APB_BUS 7
+#define CLK_MDP_WROT0 8
+#define CLK_MDP_RSZ0 9
+#define CLK_MDP_HDR0 10
+#define CLK_MDP_MUTEX0 11
+#define CLK_MDP_WROT1 12
+#define CLK_MDP_RSZ1 13
+#define CLK_MDP_FAKE_ENG0 14
+#define CLK_MDP_AAL0 15
+#define CLK_MDP_DISP_WDMA 16
+#define CLK_MDP_COLOR 17
+#define CLK_MDP_IMG_DL_ASYNC2 18
+#define CLK_MDP_IMG_DL_RELAY0_ASYNC0 19
+#define CLK_MDP_IMG_DL_RELAY1_ASYNC1 20
+#define CLK_MDP_IMG_DL_RELAY2_ASYNC2 21
+#define CLK_MDP_NR_CLK 22
+
+/* IPESYS */
+
+#define CLK_IPE_LARB19 0
+#define CLK_IPE_LARB20 1
+#define CLK_IPE_SMI_SUBCOM 2
+#define CLK_IPE_FD 3
+#define CLK_IPE_FE 4
+#define CLK_IPE_RSC 5
+#define CLK_IPE_DPE 6
+#define CLK_IPE_GALS_IPE 7
+#define CLK_IPE_NR_CLK 8
+
+#endif /* _DT_BINDINGS_CLK_MT8186_H */
diff --git a/include/dt-bindings/clock/mt8192-clk.h b/include/dt-bindings/clock/mt8192-clk.h
new file mode 100644
index 000000000000..5ab68f15a256
--- /dev/null
+++ b/include/dt-bindings/clock/mt8192-clk.h
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8192_H
+#define _DT_BINDINGS_CLK_MT8192_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI_SEL 0
+#define CLK_TOP_SPM_SEL 1
+#define CLK_TOP_SCP_SEL 2
+#define CLK_TOP_BUS_AXIMEM_SEL 3
+#define CLK_TOP_DISP_SEL 4
+#define CLK_TOP_MDP_SEL 5
+#define CLK_TOP_IMG1_SEL 6
+#define CLK_TOP_IMG2_SEL 7
+#define CLK_TOP_IPE_SEL 8
+#define CLK_TOP_DPE_SEL 9
+#define CLK_TOP_CAM_SEL 10
+#define CLK_TOP_CCU_SEL 11
+#define CLK_TOP_DSP7_SEL 12
+#define CLK_TOP_MFG_REF_SEL 13
+#define CLK_TOP_MFG_PLL_SEL 14
+#define CLK_TOP_CAMTG_SEL 15
+#define CLK_TOP_CAMTG2_SEL 16
+#define CLK_TOP_CAMTG3_SEL 17
+#define CLK_TOP_CAMTG4_SEL 18
+#define CLK_TOP_CAMTG5_SEL 19
+#define CLK_TOP_CAMTG6_SEL 20
+#define CLK_TOP_UART_SEL 21
+#define CLK_TOP_SPI_SEL 22
+#define CLK_TOP_MSDC50_0_H_SEL 23
+#define CLK_TOP_MSDC50_0_SEL 24
+#define CLK_TOP_MSDC30_1_SEL 25
+#define CLK_TOP_MSDC30_2_SEL 26
+#define CLK_TOP_AUDIO_SEL 27
+#define CLK_TOP_AUD_INTBUS_SEL 28
+#define CLK_TOP_PWRAP_ULPOSC_SEL 29
+#define CLK_TOP_ATB_SEL 30
+#define CLK_TOP_DPI_SEL 31
+#define CLK_TOP_SCAM_SEL 32
+#define CLK_TOP_DISP_PWM_SEL 33
+#define CLK_TOP_USB_TOP_SEL 34
+#define CLK_TOP_SSUSB_XHCI_SEL 35
+#define CLK_TOP_I2C_SEL 36
+#define CLK_TOP_SENINF_SEL 37
+#define CLK_TOP_SENINF1_SEL 38
+#define CLK_TOP_SENINF2_SEL 39
+#define CLK_TOP_SENINF3_SEL 40
+#define CLK_TOP_TL_SEL 41
+#define CLK_TOP_DXCC_SEL 42
+#define CLK_TOP_AUD_ENGEN1_SEL 43
+#define CLK_TOP_AUD_ENGEN2_SEL 44
+#define CLK_TOP_AES_UFSFDE_SEL 45
+#define CLK_TOP_UFS_SEL 46
+#define CLK_TOP_AUD_1_SEL 47
+#define CLK_TOP_AUD_2_SEL 48
+#define CLK_TOP_ADSP_SEL 49
+#define CLK_TOP_DPMAIF_MAIN_SEL 50
+#define CLK_TOP_VENC_SEL 51
+#define CLK_TOP_VDEC_SEL 52
+#define CLK_TOP_CAMTM_SEL 53
+#define CLK_TOP_PWM_SEL 54
+#define CLK_TOP_AUDIO_H_SEL 55
+#define CLK_TOP_SPMI_MST_SEL 56
+#define CLK_TOP_AES_MSDCFDE_SEL 57
+#define CLK_TOP_SFLASH_SEL 58
+#define CLK_TOP_APLL_I2S0_M_SEL 59
+#define CLK_TOP_APLL_I2S1_M_SEL 60
+#define CLK_TOP_APLL_I2S2_M_SEL 61
+#define CLK_TOP_APLL_I2S3_M_SEL 62
+#define CLK_TOP_APLL_I2S4_M_SEL 63
+#define CLK_TOP_APLL_I2S5_M_SEL 64
+#define CLK_TOP_APLL_I2S6_M_SEL 65
+#define CLK_TOP_APLL_I2S7_M_SEL 66
+#define CLK_TOP_APLL_I2S8_M_SEL 67
+#define CLK_TOP_APLL_I2S9_M_SEL 68
+#define CLK_TOP_MAINPLL_D3 69
+#define CLK_TOP_MAINPLL_D4 70
+#define CLK_TOP_MAINPLL_D4_D2 71
+#define CLK_TOP_MAINPLL_D4_D4 72
+#define CLK_TOP_MAINPLL_D4_D8 73
+#define CLK_TOP_MAINPLL_D4_D16 74
+#define CLK_TOP_MAINPLL_D5 75
+#define CLK_TOP_MAINPLL_D5_D2 76
+#define CLK_TOP_MAINPLL_D5_D4 77
+#define CLK_TOP_MAINPLL_D5_D8 78
+#define CLK_TOP_MAINPLL_D6 79
+#define CLK_TOP_MAINPLL_D6_D2 80
+#define CLK_TOP_MAINPLL_D6_D4 81
+#define CLK_TOP_MAINPLL_D7 82
+#define CLK_TOP_MAINPLL_D7_D2 83
+#define CLK_TOP_MAINPLL_D7_D4 84
+#define CLK_TOP_MAINPLL_D7_D8 85
+#define CLK_TOP_UNIVPLL_D3 86
+#define CLK_TOP_UNIVPLL_D4 87
+#define CLK_TOP_UNIVPLL_D4_D2 88
+#define CLK_TOP_UNIVPLL_D4_D4 89
+#define CLK_TOP_UNIVPLL_D4_D8 90
+#define CLK_TOP_UNIVPLL_D5 91
+#define CLK_TOP_UNIVPLL_D5_D2 92
+#define CLK_TOP_UNIVPLL_D5_D4 93
+#define CLK_TOP_UNIVPLL_D5_D8 94
+#define CLK_TOP_UNIVPLL_D6 95
+#define CLK_TOP_UNIVPLL_D6_D2 96
+#define CLK_TOP_UNIVPLL_D6_D4 97
+#define CLK_TOP_UNIVPLL_D6_D8 98
+#define CLK_TOP_UNIVPLL_D6_D16 99
+#define CLK_TOP_UNIVPLL_D7 100
+#define CLK_TOP_APLL1 101
+#define CLK_TOP_APLL1_D2 102
+#define CLK_TOP_APLL1_D4 103
+#define CLK_TOP_APLL1_D8 104
+#define CLK_TOP_APLL2 105
+#define CLK_TOP_APLL2_D2 106
+#define CLK_TOP_APLL2_D4 107
+#define CLK_TOP_APLL2_D8 108
+#define CLK_TOP_MMPLL_D4 109
+#define CLK_TOP_MMPLL_D4_D2 110
+#define CLK_TOP_MMPLL_D5 111
+#define CLK_TOP_MMPLL_D5_D2 112
+#define CLK_TOP_MMPLL_D6 113
+#define CLK_TOP_MMPLL_D6_D2 114
+#define CLK_TOP_MMPLL_D7 115
+#define CLK_TOP_MMPLL_D9 116
+#define CLK_TOP_APUPLL 117
+#define CLK_TOP_NPUPLL 118
+#define CLK_TOP_TVDPLL 119
+#define CLK_TOP_TVDPLL_D2 120
+#define CLK_TOP_TVDPLL_D4 121
+#define CLK_TOP_TVDPLL_D8 122
+#define CLK_TOP_TVDPLL_D16 123
+#define CLK_TOP_MSDCPLL 124
+#define CLK_TOP_MSDCPLL_D2 125
+#define CLK_TOP_MSDCPLL_D4 126
+#define CLK_TOP_ULPOSC 127
+#define CLK_TOP_OSC_D2 128
+#define CLK_TOP_OSC_D4 129
+#define CLK_TOP_OSC_D8 130
+#define CLK_TOP_OSC_D10 131
+#define CLK_TOP_OSC_D16 132
+#define CLK_TOP_OSC_D20 133
+#define CLK_TOP_CSW_F26M_D2 134
+#define CLK_TOP_ADSPPLL 135
+#define CLK_TOP_UNIVPLL_192M 136
+#define CLK_TOP_UNIVPLL_192M_D2 137
+#define CLK_TOP_UNIVPLL_192M_D4 138
+#define CLK_TOP_UNIVPLL_192M_D8 139
+#define CLK_TOP_UNIVPLL_192M_D16 140
+#define CLK_TOP_UNIVPLL_192M_D32 141
+#define CLK_TOP_APLL12_DIV0 142
+#define CLK_TOP_APLL12_DIV1 143
+#define CLK_TOP_APLL12_DIV2 144
+#define CLK_TOP_APLL12_DIV3 145
+#define CLK_TOP_APLL12_DIV4 146
+#define CLK_TOP_APLL12_DIVB 147
+#define CLK_TOP_APLL12_DIV5 148
+#define CLK_TOP_APLL12_DIV6 149
+#define CLK_TOP_APLL12_DIV7 150
+#define CLK_TOP_APLL12_DIV8 151
+#define CLK_TOP_APLL12_DIV9 152
+#define CLK_TOP_SSUSB_TOP_REF 153
+#define CLK_TOP_SSUSB_PHY_REF 154
+#define CLK_TOP_NR_CLK 155
+
+/* INFRACFG */
+
+#define CLK_INFRA_PMIC_TMR 0
+#define CLK_INFRA_PMIC_AP 1
+#define CLK_INFRA_PMIC_MD 2
+#define CLK_INFRA_PMIC_CONN 3
+#define CLK_INFRA_SCPSYS 4
+#define CLK_INFRA_SEJ 5
+#define CLK_INFRA_APXGPT 6
+#define CLK_INFRA_GCE 7
+#define CLK_INFRA_GCE2 8
+#define CLK_INFRA_THERM 9
+#define CLK_INFRA_I2C0 10
+#define CLK_INFRA_AP_DMA_PSEUDO 11
+#define CLK_INFRA_I2C2 12
+#define CLK_INFRA_I2C3 13
+#define CLK_INFRA_PWM_H 14
+#define CLK_INFRA_PWM1 15
+#define CLK_INFRA_PWM2 16
+#define CLK_INFRA_PWM3 17
+#define CLK_INFRA_PWM4 18
+#define CLK_INFRA_PWM 19
+#define CLK_INFRA_UART0 20
+#define CLK_INFRA_UART1 21
+#define CLK_INFRA_UART2 22
+#define CLK_INFRA_UART3 23
+#define CLK_INFRA_GCE_26M 24
+#define CLK_INFRA_CQ_DMA_FPC 25
+#define CLK_INFRA_BTIF 26
+#define CLK_INFRA_SPI0 27
+#define CLK_INFRA_MSDC0 28
+#define CLK_INFRA_MSDC1 29
+#define CLK_INFRA_MSDC2 30
+#define CLK_INFRA_MSDC0_SRC 31
+#define CLK_INFRA_GCPU 32
+#define CLK_INFRA_TRNG 33
+#define CLK_INFRA_AUXADC 34
+#define CLK_INFRA_CPUM 35
+#define CLK_INFRA_CCIF1_AP 36
+#define CLK_INFRA_CCIF1_MD 37
+#define CLK_INFRA_AUXADC_MD 38
+#define CLK_INFRA_PCIE_TL_26M 39
+#define CLK_INFRA_MSDC1_SRC 40
+#define CLK_INFRA_MSDC2_SRC 41
+#define CLK_INFRA_PCIE_TL_96M 42
+#define CLK_INFRA_PCIE_PL_P_250M 43
+#define CLK_INFRA_DEVICE_APC 44
+#define CLK_INFRA_CCIF_AP 45
+#define CLK_INFRA_DEBUGSYS 46
+#define CLK_INFRA_AUDIO 47
+#define CLK_INFRA_CCIF_MD 48
+#define CLK_INFRA_DXCC_SEC_CORE 49
+#define CLK_INFRA_DXCC_AO 50
+#define CLK_INFRA_DBG_TRACE 51
+#define CLK_INFRA_DEVMPU_B 52
+#define CLK_INFRA_DRAMC_F26M 53
+#define CLK_INFRA_IRTX 54
+#define CLK_INFRA_SSUSB 55
+#define CLK_INFRA_DISP_PWM 56
+#define CLK_INFRA_CLDMA_B 57
+#define CLK_INFRA_AUDIO_26M_B 58
+#define CLK_INFRA_MODEM_TEMP_SHARE 59
+#define CLK_INFRA_SPI1 60
+#define CLK_INFRA_I2C4 61
+#define CLK_INFRA_SPI2 62
+#define CLK_INFRA_SPI3 63
+#define CLK_INFRA_UNIPRO_SYS 64
+#define CLK_INFRA_UNIPRO_TICK 65
+#define CLK_INFRA_UFS_MP_SAP_B 66
+#define CLK_INFRA_MD32_B 67
+#define CLK_INFRA_UNIPRO_MBIST 68
+#define CLK_INFRA_I2C5 69
+#define CLK_INFRA_I2C5_ARBITER 70
+#define CLK_INFRA_I2C5_IMM 71
+#define CLK_INFRA_I2C1_ARBITER 72
+#define CLK_INFRA_I2C1_IMM 73
+#define CLK_INFRA_I2C2_ARBITER 74
+#define CLK_INFRA_I2C2_IMM 75
+#define CLK_INFRA_SPI4 76
+#define CLK_INFRA_SPI5 77
+#define CLK_INFRA_CQ_DMA 78
+#define CLK_INFRA_UFS 79
+#define CLK_INFRA_AES_UFSFDE 80
+#define CLK_INFRA_UFS_TICK 81
+#define CLK_INFRA_SSUSB_XHCI 82
+#define CLK_INFRA_MSDC0_SELF 83
+#define CLK_INFRA_MSDC1_SELF 84
+#define CLK_INFRA_MSDC2_SELF 85
+#define CLK_INFRA_UFS_AXI 86
+#define CLK_INFRA_I2C6 87
+#define CLK_INFRA_AP_MSDC0 88
+#define CLK_INFRA_MD_MSDC0 89
+#define CLK_INFRA_CCIF5_AP 90
+#define CLK_INFRA_CCIF5_MD 91
+#define CLK_INFRA_PCIE_TOP_H_133M 92
+#define CLK_INFRA_FLASHIF_TOP_H_133M 93
+#define CLK_INFRA_PCIE_PERI_26M 94
+#define CLK_INFRA_CCIF2_AP 95
+#define CLK_INFRA_CCIF2_MD 96
+#define CLK_INFRA_CCIF3_AP 97
+#define CLK_INFRA_CCIF3_MD 98
+#define CLK_INFRA_SEJ_F13M 99
+#define CLK_INFRA_AES 100
+#define CLK_INFRA_I2C7 101
+#define CLK_INFRA_I2C8 102
+#define CLK_INFRA_FBIST2FPC 103
+#define CLK_INFRA_DEVICE_APC_SYNC 104
+#define CLK_INFRA_DPMAIF_MAIN 105
+#define CLK_INFRA_PCIE_TL_32K 106
+#define CLK_INFRA_CCIF4_AP 107
+#define CLK_INFRA_CCIF4_MD 108
+#define CLK_INFRA_SPI6 109
+#define CLK_INFRA_SPI7 110
+#define CLK_INFRA_133M 111
+#define CLK_INFRA_66M 112
+#define CLK_INFRA_66M_PERI_BUS 113
+#define CLK_INFRA_FREE_DCM_133M 114
+#define CLK_INFRA_FREE_DCM_66M 115
+#define CLK_INFRA_PERI_BUS_DCM_133M 116
+#define CLK_INFRA_PERI_BUS_DCM_66M 117
+#define CLK_INFRA_FLASHIF_PERI_26M 118
+#define CLK_INFRA_FLASHIF_SFLASH 119
+#define CLK_INFRA_AP_DMA 120
+#define CLK_INFRA_NR_CLK 121
+
+/* PERICFG */
+
+#define CLK_PERI_PERIAXI 0
+#define CLK_PERI_NR_CLK 1
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_MAINPLL 0
+#define CLK_APMIXED_UNIVPLL 1
+#define CLK_APMIXED_USBPLL 2
+#define CLK_APMIXED_MSDCPLL 3
+#define CLK_APMIXED_MMPLL 4
+#define CLK_APMIXED_ADSPPLL 5
+#define CLK_APMIXED_MFGPLL 6
+#define CLK_APMIXED_TVDPLL 7
+#define CLK_APMIXED_APLL1 8
+#define CLK_APMIXED_APLL2 9
+#define CLK_APMIXED_MIPID26M 10
+#define CLK_APMIXED_NR_CLK 11
+
+/* SCP_ADSP */
+
+#define CLK_SCP_ADSP_AUDIODSP 0
+#define CLK_SCP_ADSP_NR_CLK 1
+
+/* IMP_IIC_WRAP_C */
+
+#define CLK_IMP_IIC_WRAP_C_I2C10 0
+#define CLK_IMP_IIC_WRAP_C_I2C11 1
+#define CLK_IMP_IIC_WRAP_C_I2C12 2
+#define CLK_IMP_IIC_WRAP_C_I2C13 3
+#define CLK_IMP_IIC_WRAP_C_NR_CLK 4
+
+/* AUDSYS */
+
+#define CLK_AUD_AFE 0
+#define CLK_AUD_22M 1
+#define CLK_AUD_24M 2
+#define CLK_AUD_APLL2_TUNER 3
+#define CLK_AUD_APLL_TUNER 4
+#define CLK_AUD_TDM 5
+#define CLK_AUD_ADC 6
+#define CLK_AUD_DAC 7
+#define CLK_AUD_DAC_PREDIS 8
+#define CLK_AUD_TML 9
+#define CLK_AUD_NLE 10
+#define CLK_AUD_I2S1_B 11
+#define CLK_AUD_I2S2_B 12
+#define CLK_AUD_I2S3_B 13
+#define CLK_AUD_I2S4_B 14
+#define CLK_AUD_CONNSYS_I2S_ASRC 15
+#define CLK_AUD_GENERAL1_ASRC 16
+#define CLK_AUD_GENERAL2_ASRC 17
+#define CLK_AUD_DAC_HIRES 18
+#define CLK_AUD_ADC_HIRES 19
+#define CLK_AUD_ADC_HIRES_TML 20
+#define CLK_AUD_ADDA6_ADC 21
+#define CLK_AUD_ADDA6_ADC_HIRES 22
+#define CLK_AUD_3RD_DAC 23
+#define CLK_AUD_3RD_DAC_PREDIS 24
+#define CLK_AUD_3RD_DAC_TML 25
+#define CLK_AUD_3RD_DAC_HIRES 26
+#define CLK_AUD_I2S5_B 27
+#define CLK_AUD_I2S6_B 28
+#define CLK_AUD_I2S7_B 29
+#define CLK_AUD_I2S8_B 30
+#define CLK_AUD_I2S9_B 31
+#define CLK_AUD_NR_CLK 32
+
+/* IMP_IIC_WRAP_E */
+
+#define CLK_IMP_IIC_WRAP_E_I2C3 0
+#define CLK_IMP_IIC_WRAP_E_NR_CLK 1
+
+/* IMP_IIC_WRAP_S */
+
+#define CLK_IMP_IIC_WRAP_S_I2C7 0
+#define CLK_IMP_IIC_WRAP_S_I2C8 1
+#define CLK_IMP_IIC_WRAP_S_I2C9 2
+#define CLK_IMP_IIC_WRAP_S_NR_CLK 3
+
+/* IMP_IIC_WRAP_WS */
+
+#define CLK_IMP_IIC_WRAP_WS_I2C1 0
+#define CLK_IMP_IIC_WRAP_WS_I2C2 1
+#define CLK_IMP_IIC_WRAP_WS_I2C4 2
+#define CLK_IMP_IIC_WRAP_WS_NR_CLK 3
+
+/* IMP_IIC_WRAP_W */
+
+#define CLK_IMP_IIC_WRAP_W_I2C5 0
+#define CLK_IMP_IIC_WRAP_W_NR_CLK 1
+
+/* IMP_IIC_WRAP_N */
+
+#define CLK_IMP_IIC_WRAP_N_I2C0 0
+#define CLK_IMP_IIC_WRAP_N_I2C6 1
+#define CLK_IMP_IIC_WRAP_N_NR_CLK 2
+
+/* MSDC_TOP */
+
+#define CLK_MSDC_TOP_AES_0P 0
+#define CLK_MSDC_TOP_SRC_0P 1
+#define CLK_MSDC_TOP_SRC_1P 2
+#define CLK_MSDC_TOP_SRC_2P 3
+#define CLK_MSDC_TOP_P_MSDC0 4
+#define CLK_MSDC_TOP_P_MSDC1 5
+#define CLK_MSDC_TOP_P_MSDC2 6
+#define CLK_MSDC_TOP_P_CFG 7
+#define CLK_MSDC_TOP_AXI 8
+#define CLK_MSDC_TOP_H_MST_0P 9
+#define CLK_MSDC_TOP_H_MST_1P 10
+#define CLK_MSDC_TOP_H_MST_2P 11
+#define CLK_MSDC_TOP_MEM_OFF_DLY_26M 12
+#define CLK_MSDC_TOP_32K 13
+#define CLK_MSDC_TOP_AHB2AXI_BRG_AXI 14
+#define CLK_MSDC_TOP_NR_CLK 15
+
+/* MSDC */
+
+#define CLK_MSDC_AXI_WRAP 0
+#define CLK_MSDC_NR_CLK 1
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* MMSYS */
+
+#define CLK_MM_DISP_MUTEX0 0
+#define CLK_MM_DISP_CONFIG 1
+#define CLK_MM_DISP_OVL0 2
+#define CLK_MM_DISP_RDMA0 3
+#define CLK_MM_DISP_OVL0_2L 4
+#define CLK_MM_DISP_WDMA0 5
+#define CLK_MM_DISP_UFBC_WDMA0 6
+#define CLK_MM_DISP_RSZ0 7
+#define CLK_MM_DISP_AAL0 8
+#define CLK_MM_DISP_CCORR0 9
+#define CLK_MM_DISP_DITHER0 10
+#define CLK_MM_SMI_INFRA 11
+#define CLK_MM_DISP_GAMMA0 12
+#define CLK_MM_DISP_POSTMASK0 13
+#define CLK_MM_DISP_DSC_WRAP0 14
+#define CLK_MM_DSI0 15
+#define CLK_MM_DISP_COLOR0 16
+#define CLK_MM_SMI_COMMON 17
+#define CLK_MM_DISP_FAKE_ENG0 18
+#define CLK_MM_DISP_FAKE_ENG1 19
+#define CLK_MM_MDP_TDSHP4 20
+#define CLK_MM_MDP_RSZ4 21
+#define CLK_MM_MDP_AAL4 22
+#define CLK_MM_MDP_HDR4 23
+#define CLK_MM_MDP_RDMA4 24
+#define CLK_MM_MDP_COLOR4 25
+#define CLK_MM_DISP_Y2R0 26
+#define CLK_MM_SMI_GALS 27
+#define CLK_MM_DISP_OVL2_2L 28
+#define CLK_MM_DISP_RDMA4 29
+#define CLK_MM_DISP_DPI0 30
+#define CLK_MM_SMI_IOMMU 31
+#define CLK_MM_DSI_DSI0 32
+#define CLK_MM_DPI_DPI0 33
+#define CLK_MM_26MHZ 34
+#define CLK_MM_32KHZ 35
+#define CLK_MM_NR_CLK 36
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB9 0
+#define CLK_IMG_LARB10 1
+#define CLK_IMG_DIP 2
+#define CLK_IMG_GALS 3
+#define CLK_IMG_NR_CLK 4
+
+/* IMGSYS2 */
+
+#define CLK_IMG2_LARB11 0
+#define CLK_IMG2_LARB12 1
+#define CLK_IMG2_MFB 2
+#define CLK_IMG2_WPE 3
+#define CLK_IMG2_MSS 4
+#define CLK_IMG2_GALS 5
+#define CLK_IMG2_NR_CLK 6
+
+/* VDECSYS_SOC */
+
+#define CLK_VDEC_SOC_LARB1 0
+#define CLK_VDEC_SOC_LAT 1
+#define CLK_VDEC_SOC_LAT_ACTIVE 2
+#define CLK_VDEC_SOC_VDEC 3
+#define CLK_VDEC_SOC_VDEC_ACTIVE 4
+#define CLK_VDEC_SOC_NR_CLK 5
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1 0
+#define CLK_VDEC_LAT 1
+#define CLK_VDEC_LAT_ACTIVE 2
+#define CLK_VDEC_VDEC 3
+#define CLK_VDEC_ACTIVE 4
+#define CLK_VDEC_NR_CLK 5
+
+/* VENCSYS */
+
+#define CLK_VENC_SET0_LARB 0
+#define CLK_VENC_SET1_VENC 1
+#define CLK_VENC_SET2_JPGENC 2
+#define CLK_VENC_SET5_GALS 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM_LARB14 2
+#define CLK_CAM_CAM 3
+#define CLK_CAM_CAMTG 4
+#define CLK_CAM_SENINF 5
+#define CLK_CAM_CAMSV0 6
+#define CLK_CAM_CAMSV1 7
+#define CLK_CAM_CAMSV2 8
+#define CLK_CAM_CAMSV3 9
+#define CLK_CAM_CCU0 10
+#define CLK_CAM_CCU1 11
+#define CLK_CAM_MRAW0 12
+#define CLK_CAM_FAKE_ENG 13
+#define CLK_CAM_CCU_GALS 14
+#define CLK_CAM_CAM2MM_GALS 15
+#define CLK_CAM_NR_CLK 16
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX 0
+#define CLK_CAM_RAWA_CAM 1
+#define CLK_CAM_RAWA_CAMTG 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX 0
+#define CLK_CAM_RAWB_CAM 1
+#define CLK_CAM_RAWB_CAMTG 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* CAMSYS_RAWC */
+
+#define CLK_CAM_RAWC_LARBX 0
+#define CLK_CAM_RAWC_CAM 1
+#define CLK_CAM_RAWC_CAMTG 2
+#define CLK_CAM_RAWC_NR_CLK 3
+
+/* IPESYS */
+
+#define CLK_IPE_LARB19 0
+#define CLK_IPE_LARB20 1
+#define CLK_IPE_SMI_SUBCOM 2
+#define CLK_IPE_FD 3
+#define CLK_IPE_FE 4
+#define CLK_IPE_RSC 5
+#define CLK_IPE_DPE 6
+#define CLK_IPE_GALS 7
+#define CLK_IPE_NR_CLK 8
+
+/* MDPSYS */
+
+#define CLK_MDP_RDMA0 0
+#define CLK_MDP_TDSHP0 1
+#define CLK_MDP_IMG_DL_ASYNC0 2
+#define CLK_MDP_IMG_DL_ASYNC1 3
+#define CLK_MDP_RDMA1 4
+#define CLK_MDP_TDSHP1 5
+#define CLK_MDP_SMI0 6
+#define CLK_MDP_APB_BUS 7
+#define CLK_MDP_WROT0 8
+#define CLK_MDP_RSZ0 9
+#define CLK_MDP_HDR0 10
+#define CLK_MDP_MUTEX0 11
+#define CLK_MDP_WROT1 12
+#define CLK_MDP_RSZ1 13
+#define CLK_MDP_HDR1 14
+#define CLK_MDP_FAKE_ENG0 15
+#define CLK_MDP_AAL0 16
+#define CLK_MDP_AAL1 17
+#define CLK_MDP_COLOR0 18
+#define CLK_MDP_COLOR1 19
+#define CLK_MDP_IMG_DL_RELAY0_ASYNC0 20
+#define CLK_MDP_IMG_DL_RELAY1_ASYNC1 21
+#define CLK_MDP_NR_CLK 22
+
+#endif /* _DT_BINDINGS_CLK_MT8192_H */
diff --git a/include/dt-bindings/clock/mt8195-clk.h b/include/dt-bindings/clock/mt8195-clk.h
new file mode 100644
index 000000000000..d70d017ad69c
--- /dev/null
+++ b/include/dt-bindings/clock/mt8195-clk.h
@@ -0,0 +1,866 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8195_H
+#define _DT_BINDINGS_CLK_MT8195_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SPM 1
+#define CLK_TOP_SCP 2
+#define CLK_TOP_BUS_AXIMEM 3
+#define CLK_TOP_VPP 4
+#define CLK_TOP_ETHDR 5
+#define CLK_TOP_IPE 6
+#define CLK_TOP_CAM 7
+#define CLK_TOP_CCU 8
+#define CLK_TOP_IMG 9
+#define CLK_TOP_CAMTM 10
+#define CLK_TOP_DSP 11
+#define CLK_TOP_DSP1 12
+#define CLK_TOP_DSP2 13
+#define CLK_TOP_DSP3 14
+#define CLK_TOP_DSP4 15
+#define CLK_TOP_DSP5 16
+#define CLK_TOP_DSP6 17
+#define CLK_TOP_DSP7 18
+#define CLK_TOP_IPU_IF 19
+#define CLK_TOP_MFG_CORE_TMP 20
+#define CLK_TOP_CAMTG 21
+#define CLK_TOP_CAMTG2 22
+#define CLK_TOP_CAMTG3 23
+#define CLK_TOP_CAMTG4 24
+#define CLK_TOP_CAMTG5 25
+#define CLK_TOP_UART 26
+#define CLK_TOP_SPI 27
+#define CLK_TOP_SPIS 28
+#define CLK_TOP_MSDC50_0_HCLK 29
+#define CLK_TOP_MSDC50_0 30
+#define CLK_TOP_MSDC30_1 31
+#define CLK_TOP_MSDC30_2 32
+#define CLK_TOP_INTDIR 33
+#define CLK_TOP_AUD_INTBUS 34
+#define CLK_TOP_AUDIO_H 35
+#define CLK_TOP_PWRAP_ULPOSC 36
+#define CLK_TOP_ATB 37
+#define CLK_TOP_PWRMCU 38
+#define CLK_TOP_DP 39
+#define CLK_TOP_EDP 40
+#define CLK_TOP_DPI 41
+#define CLK_TOP_DISP_PWM0 42
+#define CLK_TOP_DISP_PWM1 43
+#define CLK_TOP_USB_TOP 44
+#define CLK_TOP_SSUSB_XHCI 45
+#define CLK_TOP_USB_TOP_1P 46
+#define CLK_TOP_SSUSB_XHCI_1P 47
+#define CLK_TOP_USB_TOP_2P 48
+#define CLK_TOP_SSUSB_XHCI_2P 49
+#define CLK_TOP_USB_TOP_3P 50
+#define CLK_TOP_SSUSB_XHCI_3P 51
+#define CLK_TOP_I2C 52
+#define CLK_TOP_SENINF 53
+#define CLK_TOP_SENINF1 54
+#define CLK_TOP_SENINF2 55
+#define CLK_TOP_SENINF3 56
+#define CLK_TOP_GCPU 57
+#define CLK_TOP_DXCC 58
+#define CLK_TOP_DPMAIF_MAIN 59
+#define CLK_TOP_AES_UFSFDE 60
+#define CLK_TOP_UFS 61
+#define CLK_TOP_UFS_TICK1US 62
+#define CLK_TOP_UFS_MP_SAP_CFG 63
+#define CLK_TOP_VENC 64
+#define CLK_TOP_VDEC 65
+#define CLK_TOP_PWM 66
+#define CLK_TOP_MCUPM 67
+#define CLK_TOP_SPMI_P_MST 68
+#define CLK_TOP_SPMI_M_MST 69
+#define CLK_TOP_DVFSRC 70
+#define CLK_TOP_TL 71
+#define CLK_TOP_TL_P1 72
+#define CLK_TOP_AES_MSDCFDE 73
+#define CLK_TOP_DSI_OCC 74
+#define CLK_TOP_WPE_VPP 75
+#define CLK_TOP_HDCP 76
+#define CLK_TOP_HDCP_24M 77
+#define CLK_TOP_HD20_DACR_REF_CLK 78
+#define CLK_TOP_HD20_HDCP_CCLK 79
+#define CLK_TOP_HDMI_XTAL 80
+#define CLK_TOP_HDMI_APB 81
+#define CLK_TOP_SNPS_ETH_250M 82
+#define CLK_TOP_SNPS_ETH_62P4M_PTP 83
+#define CLK_TOP_SNPS_ETH_50M_RMII 84
+#define CLK_TOP_DGI_OUT 85
+#define CLK_TOP_NNA0 86
+#define CLK_TOP_NNA1 87
+#define CLK_TOP_ADSP 88
+#define CLK_TOP_ASM_H 89
+#define CLK_TOP_ASM_M 90
+#define CLK_TOP_ASM_L 91
+#define CLK_TOP_APLL1 92
+#define CLK_TOP_APLL2 93
+#define CLK_TOP_APLL3 94
+#define CLK_TOP_APLL4 95
+#define CLK_TOP_APLL5 96
+#define CLK_TOP_I2SO1_MCK 97
+#define CLK_TOP_I2SO2_MCK 98
+#define CLK_TOP_I2SI1_MCK 99
+#define CLK_TOP_I2SI2_MCK 100
+#define CLK_TOP_DPTX_MCK 101
+#define CLK_TOP_AUD_IEC_CLK 102
+#define CLK_TOP_A1SYS_HP 103
+#define CLK_TOP_A2SYS_HF 104
+#define CLK_TOP_A3SYS_HF 105
+#define CLK_TOP_A4SYS_HF 106
+#define CLK_TOP_SPINFI_BCLK 107
+#define CLK_TOP_NFI1X 108
+#define CLK_TOP_ECC 109
+#define CLK_TOP_AUDIO_LOCAL_BUS 110
+#define CLK_TOP_SPINOR 111
+#define CLK_TOP_DVIO_DGI_REF 112
+#define CLK_TOP_ULPOSC 113
+#define CLK_TOP_ULPOSC_CORE 114
+#define CLK_TOP_SRCK 115
+#define CLK_TOP_MFG_CK_FAST_REF 116
+#define CLK_TOP_CLK26M_D2 117
+#define CLK_TOP_CLK26M_D52 118
+#define CLK_TOP_IN_DGI 119
+#define CLK_TOP_IN_DGI_D2 120
+#define CLK_TOP_IN_DGI_D4 121
+#define CLK_TOP_IN_DGI_D6 122
+#define CLK_TOP_IN_DGI_D8 123
+#define CLK_TOP_MAINPLL_D3 124
+#define CLK_TOP_MAINPLL_D4 125
+#define CLK_TOP_MAINPLL_D4_D2 126
+#define CLK_TOP_MAINPLL_D4_D4 127
+#define CLK_TOP_MAINPLL_D4_D8 128
+#define CLK_TOP_MAINPLL_D5 129
+#define CLK_TOP_MAINPLL_D5_D2 130
+#define CLK_TOP_MAINPLL_D5_D4 131
+#define CLK_TOP_MAINPLL_D5_D8 132
+#define CLK_TOP_MAINPLL_D6 133
+#define CLK_TOP_MAINPLL_D6_D2 134
+#define CLK_TOP_MAINPLL_D6_D4 135
+#define CLK_TOP_MAINPLL_D6_D8 136
+#define CLK_TOP_MAINPLL_D7 137
+#define CLK_TOP_MAINPLL_D7_D2 138
+#define CLK_TOP_MAINPLL_D7_D4 139
+#define CLK_TOP_MAINPLL_D7_D8 140
+#define CLK_TOP_MAINPLL_D9 141
+#define CLK_TOP_UNIVPLL_D2 142
+#define CLK_TOP_UNIVPLL_D3 143
+#define CLK_TOP_UNIVPLL_D4 144
+#define CLK_TOP_UNIVPLL_D4_D2 145
+#define CLK_TOP_UNIVPLL_D4_D4 146
+#define CLK_TOP_UNIVPLL_D4_D8 147
+#define CLK_TOP_UNIVPLL_D5 148
+#define CLK_TOP_UNIVPLL_D5_D2 149
+#define CLK_TOP_UNIVPLL_D5_D4 150
+#define CLK_TOP_UNIVPLL_D5_D8 151
+#define CLK_TOP_UNIVPLL_D6 152
+#define CLK_TOP_UNIVPLL_D6_D2 153
+#define CLK_TOP_UNIVPLL_D6_D4 154
+#define CLK_TOP_UNIVPLL_D6_D8 155
+#define CLK_TOP_UNIVPLL_D6_D16 156
+#define CLK_TOP_UNIVPLL_D7 157
+#define CLK_TOP_UNIVPLL_192M 158
+#define CLK_TOP_UNIVPLL_192M_D4 159
+#define CLK_TOP_UNIVPLL_192M_D8 160
+#define CLK_TOP_UNIVPLL_192M_D16 161
+#define CLK_TOP_UNIVPLL_192M_D32 162
+#define CLK_TOP_APLL1_D3 163
+#define CLK_TOP_APLL1_D4 164
+#define CLK_TOP_APLL2_D3 165
+#define CLK_TOP_APLL2_D4 166
+#define CLK_TOP_APLL3_D4 167
+#define CLK_TOP_APLL4_D4 168
+#define CLK_TOP_APLL5_D4 169
+#define CLK_TOP_HDMIRX_APLL_D3 170
+#define CLK_TOP_HDMIRX_APLL_D4 171
+#define CLK_TOP_HDMIRX_APLL_D6 172
+#define CLK_TOP_MMPLL_D4 173
+#define CLK_TOP_MMPLL_D4_D2 174
+#define CLK_TOP_MMPLL_D4_D4 175
+#define CLK_TOP_MMPLL_D5 176
+#define CLK_TOP_MMPLL_D5_D2 177
+#define CLK_TOP_MMPLL_D5_D4 178
+#define CLK_TOP_MMPLL_D6 179
+#define CLK_TOP_MMPLL_D6_D2 180
+#define CLK_TOP_MMPLL_D7 181
+#define CLK_TOP_MMPLL_D9 182
+#define CLK_TOP_TVDPLL1_D2 183
+#define CLK_TOP_TVDPLL1_D4 184
+#define CLK_TOP_TVDPLL1_D8 185
+#define CLK_TOP_TVDPLL1_D16 186
+#define CLK_TOP_TVDPLL2_D2 187
+#define CLK_TOP_TVDPLL2_D4 188
+#define CLK_TOP_TVDPLL2_D8 189
+#define CLK_TOP_TVDPLL2_D16 190
+#define CLK_TOP_MSDCPLL_D2 191
+#define CLK_TOP_MSDCPLL_D4 192
+#define CLK_TOP_MSDCPLL_D16 193
+#define CLK_TOP_ETHPLL_D2 194
+#define CLK_TOP_ETHPLL_D8 195
+#define CLK_TOP_ETHPLL_D10 196
+#define CLK_TOP_DGIPLL_D2 197
+#define CLK_TOP_ULPOSC1 198
+#define CLK_TOP_ULPOSC1_D2 199
+#define CLK_TOP_ULPOSC1_D4 200
+#define CLK_TOP_ULPOSC1_D7 201
+#define CLK_TOP_ULPOSC1_D8 202
+#define CLK_TOP_ULPOSC1_D10 203
+#define CLK_TOP_ULPOSC1_D16 204
+#define CLK_TOP_ULPOSC2 205
+#define CLK_TOP_ADSPPLL_D2 206
+#define CLK_TOP_ADSPPLL_D4 207
+#define CLK_TOP_ADSPPLL_D8 208
+#define CLK_TOP_MEM_466M 209
+#define CLK_TOP_MPHONE_SLAVE_B 210
+#define CLK_TOP_PEXTP_PIPE 211
+#define CLK_TOP_UFS_RX_SYMBOL 212
+#define CLK_TOP_UFS_TX_SYMBOL 213
+#define CLK_TOP_SSUSB_U3PHY_P1_P_P0 214
+#define CLK_TOP_UFS_RX_SYMBOL1 215
+#define CLK_TOP_FPC 216
+#define CLK_TOP_HDMIRX_P 217
+#define CLK_TOP_APLL12_DIV0 218
+#define CLK_TOP_APLL12_DIV1 219
+#define CLK_TOP_APLL12_DIV2 220
+#define CLK_TOP_APLL12_DIV3 221
+#define CLK_TOP_APLL12_DIV4 222
+#define CLK_TOP_APLL12_DIV9 223
+#define CLK_TOP_CFG_VPP0 224
+#define CLK_TOP_CFG_VPP1 225
+#define CLK_TOP_CFG_VDO0 226
+#define CLK_TOP_CFG_VDO1 227
+#define CLK_TOP_CFG_UNIPLL_SES 228
+#define CLK_TOP_CFG_26M_VPP0 229
+#define CLK_TOP_CFG_26M_VPP1 230
+#define CLK_TOP_CFG_26M_AUD 231
+#define CLK_TOP_CFG_AXI_EAST 232
+#define CLK_TOP_CFG_AXI_EAST_NORTH 233
+#define CLK_TOP_CFG_AXI_NORTH 234
+#define CLK_TOP_CFG_AXI_SOUTH 235
+#define CLK_TOP_CFG_EXT_TEST 236
+#define CLK_TOP_SSUSB_REF 237
+#define CLK_TOP_SSUSB_PHY_REF 238
+#define CLK_TOP_SSUSB_P1_REF 239
+#define CLK_TOP_SSUSB_PHY_P1_REF 240
+#define CLK_TOP_SSUSB_P2_REF 241
+#define CLK_TOP_SSUSB_PHY_P2_REF 242
+#define CLK_TOP_SSUSB_P3_REF 243
+#define CLK_TOP_SSUSB_PHY_P3_REF 244
+#define CLK_TOP_NR_CLK 245
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SEJ 4
+#define CLK_INFRA_AO_APXGPT 5
+#define CLK_INFRA_AO_GCE 6
+#define CLK_INFRA_AO_GCE2 7
+#define CLK_INFRA_AO_THERM 8
+#define CLK_INFRA_AO_PWM_H 9
+#define CLK_INFRA_AO_PWM1 10
+#define CLK_INFRA_AO_PWM2 11
+#define CLK_INFRA_AO_PWM3 12
+#define CLK_INFRA_AO_PWM4 13
+#define CLK_INFRA_AO_PWM 14
+#define CLK_INFRA_AO_UART0 15
+#define CLK_INFRA_AO_UART1 16
+#define CLK_INFRA_AO_UART2 17
+#define CLK_INFRA_AO_UART3 18
+#define CLK_INFRA_AO_UART4 19
+#define CLK_INFRA_AO_GCE_26M 20
+#define CLK_INFRA_AO_CQ_DMA_FPC 21
+#define CLK_INFRA_AO_UART5 22
+#define CLK_INFRA_AO_HDMI_26M 23
+#define CLK_INFRA_AO_SPI0 24
+#define CLK_INFRA_AO_MSDC0 25
+#define CLK_INFRA_AO_MSDC1 26
+#define CLK_INFRA_AO_CG1_MSDC2 27
+#define CLK_INFRA_AO_MSDC0_SRC 28
+#define CLK_INFRA_AO_TRNG 29
+#define CLK_INFRA_AO_AUXADC 30
+#define CLK_INFRA_AO_CPUM 31
+#define CLK_INFRA_AO_HDMI_32K 32
+#define CLK_INFRA_AO_CEC_66M_H 33
+#define CLK_INFRA_AO_IRRX 34
+#define CLK_INFRA_AO_PCIE_TL_26M 35
+#define CLK_INFRA_AO_MSDC1_SRC 36
+#define CLK_INFRA_AO_CEC_66M_B 37
+#define CLK_INFRA_AO_PCIE_TL_96M 38
+#define CLK_INFRA_AO_DEVICE_APC 39
+#define CLK_INFRA_AO_ECC_66M_H 40
+#define CLK_INFRA_AO_DEBUGSYS 41
+#define CLK_INFRA_AO_AUDIO 42
+#define CLK_INFRA_AO_PCIE_TL_32K 43
+#define CLK_INFRA_AO_DBG_TRACE 44
+#define CLK_INFRA_AO_DRAMC_F26M 45
+#define CLK_INFRA_AO_IRTX 46
+#define CLK_INFRA_AO_SSUSB 47
+#define CLK_INFRA_AO_DISP_PWM 48
+#define CLK_INFRA_AO_CLDMA_B 49
+#define CLK_INFRA_AO_AUDIO_26M_B 50
+#define CLK_INFRA_AO_SPI1 51
+#define CLK_INFRA_AO_SPI2 52
+#define CLK_INFRA_AO_SPI3 53
+#define CLK_INFRA_AO_UNIPRO_SYS 54
+#define CLK_INFRA_AO_UNIPRO_TICK 55
+#define CLK_INFRA_AO_UFS_MP_SAP_B 56
+#define CLK_INFRA_AO_PWRMCU 57
+#define CLK_INFRA_AO_PWRMCU_BUS_H 58
+#define CLK_INFRA_AO_APDMA_B 59
+#define CLK_INFRA_AO_SPI4 60
+#define CLK_INFRA_AO_SPI5 61
+#define CLK_INFRA_AO_CQ_DMA 62
+#define CLK_INFRA_AO_AES_UFSFDE 63
+#define CLK_INFRA_AO_AES 64
+#define CLK_INFRA_AO_UFS_TICK 65
+#define CLK_INFRA_AO_SSUSB_XHCI 66
+#define CLK_INFRA_AO_MSDC0_SELF 67
+#define CLK_INFRA_AO_MSDC1_SELF 68
+#define CLK_INFRA_AO_MSDC2_SELF 69
+#define CLK_INFRA_AO_I2S_DMA 70
+#define CLK_INFRA_AO_AP_MSDC0 71
+#define CLK_INFRA_AO_MD_MSDC0 72
+#define CLK_INFRA_AO_CG3_MSDC2 73
+#define CLK_INFRA_AO_GCPU 74
+#define CLK_INFRA_AO_PCIE_PERI_26M 75
+#define CLK_INFRA_AO_GCPU_66M_B 76
+#define CLK_INFRA_AO_GCPU_133M_B 77
+#define CLK_INFRA_AO_DISP_PWM1 78
+#define CLK_INFRA_AO_FBIST2FPC 79
+#define CLK_INFRA_AO_DEVICE_APC_SYNC 80
+#define CLK_INFRA_AO_PCIE_P1_PERI_26M 81
+#define CLK_INFRA_AO_SPIS0 82
+#define CLK_INFRA_AO_SPIS1 83
+#define CLK_INFRA_AO_133M_M_PERI 84
+#define CLK_INFRA_AO_66M_M_PERI 85
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P0 86
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P1 87
+#define CLK_INFRA_AO_PCIE_P1_TL_96M 88
+#define CLK_INFRA_AO_AES_MSDCFDE_0P 89
+#define CLK_INFRA_AO_UFS_TX_SYMBOL 90
+#define CLK_INFRA_AO_UFS_RX_SYMBOL 91
+#define CLK_INFRA_AO_UFS_RX_SYMBOL1 92
+#define CLK_INFRA_AO_PERI_UFS_MEM_SUB 93
+#define CLK_INFRA_AO_NR_CLK 94
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_NNAPLL 0
+#define CLK_APMIXED_RESPLL 1
+#define CLK_APMIXED_ETHPLL 2
+#define CLK_APMIXED_MSDCPLL 3
+#define CLK_APMIXED_TVDPLL1 4
+#define CLK_APMIXED_TVDPLL2 5
+#define CLK_APMIXED_MMPLL 6
+#define CLK_APMIXED_MAINPLL 7
+#define CLK_APMIXED_VDECPLL 8
+#define CLK_APMIXED_IMGPLL 9
+#define CLK_APMIXED_UNIVPLL 10
+#define CLK_APMIXED_HDMIPLL1 11
+#define CLK_APMIXED_HDMIPLL2 12
+#define CLK_APMIXED_HDMIRX_APLL 13
+#define CLK_APMIXED_USB1PLL 14
+#define CLK_APMIXED_ADSPPLL 15
+#define CLK_APMIXED_APLL1 16
+#define CLK_APMIXED_APLL2 17
+#define CLK_APMIXED_APLL3 18
+#define CLK_APMIXED_APLL4 19
+#define CLK_APMIXED_APLL5 20
+#define CLK_APMIXED_MFGPLL 21
+#define CLK_APMIXED_DGIPLL 22
+#define CLK_APMIXED_PLL_SSUSB26M 23
+#define CLK_APMIXED_NR_CLK 24
+
+/* SCP_ADSP */
+
+#define CLK_SCP_ADSP_AUDIODSP 0
+#define CLK_SCP_ADSP_NR_CLK 1
+
+/* PERICFG_AO */
+
+#define CLK_PERI_AO_ETHERNET 0
+#define CLK_PERI_AO_ETHERNET_BUS 1
+#define CLK_PERI_AO_FLASHIF_BUS 2
+#define CLK_PERI_AO_FLASHIF_FLASH 3
+#define CLK_PERI_AO_SSUSB_1P_BUS 4
+#define CLK_PERI_AO_SSUSB_1P_XHCI 5
+#define CLK_PERI_AO_SSUSB_2P_BUS 6
+#define CLK_PERI_AO_SSUSB_2P_XHCI 7
+#define CLK_PERI_AO_SSUSB_3P_BUS 8
+#define CLK_PERI_AO_SSUSB_3P_XHCI 9
+#define CLK_PERI_AO_SPINFI 10
+#define CLK_PERI_AO_ETHERNET_MAC 11
+#define CLK_PERI_AO_NFI_H 12
+#define CLK_PERI_AO_FNFI1X 13
+#define CLK_PERI_AO_PCIE_P0_MEM 14
+#define CLK_PERI_AO_PCIE_P1_MEM 15
+#define CLK_PERI_AO_NR_CLK 16
+
+/* IMP_IIC_WRAP_S */
+
+#define CLK_IMP_IIC_WRAP_S_I2C5 0
+#define CLK_IMP_IIC_WRAP_S_I2C6 1
+#define CLK_IMP_IIC_WRAP_S_I2C7 2
+#define CLK_IMP_IIC_WRAP_S_NR_CLK 3
+
+/* IMP_IIC_WRAP_W */
+
+#define CLK_IMP_IIC_WRAP_W_I2C0 0
+#define CLK_IMP_IIC_WRAP_W_I2C1 1
+#define CLK_IMP_IIC_WRAP_W_I2C2 2
+#define CLK_IMP_IIC_WRAP_W_I2C3 3
+#define CLK_IMP_IIC_WRAP_W_I2C4 4
+#define CLK_IMP_IIC_WRAP_W_NR_CLK 5
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* VPPSYS0 */
+
+#define CLK_VPP0_MDP_FG 0
+#define CLK_VPP0_STITCH 1
+#define CLK_VPP0_PADDING 2
+#define CLK_VPP0_MDP_TCC 3
+#define CLK_VPP0_WARP0_ASYNC_TX 4
+#define CLK_VPP0_WARP1_ASYNC_TX 5
+#define CLK_VPP0_MUTEX 6
+#define CLK_VPP0_VPP02VPP1_RELAY 7
+#define CLK_VPP0_VPP12VPP0_ASYNC 8
+#define CLK_VPP0_MMSYSRAM_TOP 9
+#define CLK_VPP0_MDP_AAL 10
+#define CLK_VPP0_MDP_RSZ 11
+#define CLK_VPP0_SMI_COMMON 12
+#define CLK_VPP0_GALS_VDO0_LARB0 13
+#define CLK_VPP0_GALS_VDO0_LARB1 14
+#define CLK_VPP0_GALS_VENCSYS 15
+#define CLK_VPP0_GALS_VENCSYS_CORE1 16
+#define CLK_VPP0_GALS_INFRA 17
+#define CLK_VPP0_GALS_CAMSYS 18
+#define CLK_VPP0_GALS_VPP1_LARB5 19
+#define CLK_VPP0_GALS_VPP1_LARB6 20
+#define CLK_VPP0_SMI_REORDER 21
+#define CLK_VPP0_SMI_IOMMU 22
+#define CLK_VPP0_GALS_IMGSYS_CAMSYS 23
+#define CLK_VPP0_MDP_RDMA 24
+#define CLK_VPP0_MDP_WROT 25
+#define CLK_VPP0_GALS_EMI0_EMI1 26
+#define CLK_VPP0_SMI_SUB_COMMON_REORDER 27
+#define CLK_VPP0_SMI_RSI 28
+#define CLK_VPP0_SMI_COMMON_LARB4 29
+#define CLK_VPP0_GALS_VDEC_VDEC_CORE1 30
+#define CLK_VPP0_GALS_VPP1_WPE 31
+#define CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1 32
+#define CLK_VPP0_FAKE_ENG 33
+#define CLK_VPP0_MDP_HDR 34
+#define CLK_VPP0_MDP_TDSHP 35
+#define CLK_VPP0_MDP_COLOR 36
+#define CLK_VPP0_MDP_OVL 37
+#define CLK_VPP0_WARP0_RELAY 38
+#define CLK_VPP0_WARP0_MDP_DL_ASYNC 39
+#define CLK_VPP0_WARP1_RELAY 40
+#define CLK_VPP0_WARP1_MDP_DL_ASYNC 41
+#define CLK_VPP0_NR_CLK 42
+
+/* WPESYS */
+
+#define CLK_WPE_VPP0 0
+#define CLK_WPE_VPP1 1
+#define CLK_WPE_SMI_LARB7 2
+#define CLK_WPE_SMI_LARB8 3
+#define CLK_WPE_EVENT_TX 4
+#define CLK_WPE_SMI_LARB7_P 5
+#define CLK_WPE_SMI_LARB8_P 6
+#define CLK_WPE_NR_CLK 7
+
+/* WPESYS_VPP0 */
+
+#define CLK_WPE_VPP0_VECI 0
+#define CLK_WPE_VPP0_VEC2I 1
+#define CLK_WPE_VPP0_VEC3I 2
+#define CLK_WPE_VPP0_WPEO 3
+#define CLK_WPE_VPP0_MSKO 4
+#define CLK_WPE_VPP0_VGEN 5
+#define CLK_WPE_VPP0_EXT 6
+#define CLK_WPE_VPP0_VFC 7
+#define CLK_WPE_VPP0_CACH0_TOP 8
+#define CLK_WPE_VPP0_CACH0_DMA 9
+#define CLK_WPE_VPP0_CACH1_TOP 10
+#define CLK_WPE_VPP0_CACH1_DMA 11
+#define CLK_WPE_VPP0_CACH2_TOP 12
+#define CLK_WPE_VPP0_CACH2_DMA 13
+#define CLK_WPE_VPP0_CACH3_TOP 14
+#define CLK_WPE_VPP0_CACH3_DMA 15
+#define CLK_WPE_VPP0_PSP 16
+#define CLK_WPE_VPP0_PSP2 17
+#define CLK_WPE_VPP0_SYNC 18
+#define CLK_WPE_VPP0_C24 19
+#define CLK_WPE_VPP0_MDP_CROP 20
+#define CLK_WPE_VPP0_ISP_CROP 21
+#define CLK_WPE_VPP0_TOP 22
+#define CLK_WPE_VPP0_NR_CLK 23
+
+/* WPESYS_VPP1 */
+
+#define CLK_WPE_VPP1_VECI 0
+#define CLK_WPE_VPP1_VEC2I 1
+#define CLK_WPE_VPP1_VEC3I 2
+#define CLK_WPE_VPP1_WPEO 3
+#define CLK_WPE_VPP1_MSKO 4
+#define CLK_WPE_VPP1_VGEN 5
+#define CLK_WPE_VPP1_EXT 6
+#define CLK_WPE_VPP1_VFC 7
+#define CLK_WPE_VPP1_CACH0_TOP 8
+#define CLK_WPE_VPP1_CACH0_DMA 9
+#define CLK_WPE_VPP1_CACH1_TOP 10
+#define CLK_WPE_VPP1_CACH1_DMA 11
+#define CLK_WPE_VPP1_CACH2_TOP 12
+#define CLK_WPE_VPP1_CACH2_DMA 13
+#define CLK_WPE_VPP1_CACH3_TOP 14
+#define CLK_WPE_VPP1_CACH3_DMA 15
+#define CLK_WPE_VPP1_PSP 16
+#define CLK_WPE_VPP1_PSP2 17
+#define CLK_WPE_VPP1_SYNC 18
+#define CLK_WPE_VPP1_C24 19
+#define CLK_WPE_VPP1_MDP_CROP 20
+#define CLK_WPE_VPP1_ISP_CROP 21
+#define CLK_WPE_VPP1_TOP 22
+#define CLK_WPE_VPP1_NR_CLK 23
+
+/* VPPSYS1 */
+
+#define CLK_VPP1_SVPP1_MDP_OVL 0
+#define CLK_VPP1_SVPP1_MDP_TCC 1
+#define CLK_VPP1_SVPP1_MDP_WROT 2
+#define CLK_VPP1_SVPP1_VPP_PAD 3
+#define CLK_VPP1_SVPP2_MDP_WROT 4
+#define CLK_VPP1_SVPP2_VPP_PAD 5
+#define CLK_VPP1_SVPP3_MDP_WROT 6
+#define CLK_VPP1_SVPP3_VPP_PAD 7
+#define CLK_VPP1_SVPP1_MDP_RDMA 8
+#define CLK_VPP1_SVPP1_MDP_FG 9
+#define CLK_VPP1_SVPP2_MDP_RDMA 10
+#define CLK_VPP1_SVPP2_MDP_FG 11
+#define CLK_VPP1_SVPP3_MDP_RDMA 12
+#define CLK_VPP1_SVPP3_MDP_FG 13
+#define CLK_VPP1_VPP_SPLIT 14
+#define CLK_VPP1_SVPP2_VDO0_DL_RELAY 15
+#define CLK_VPP1_SVPP1_MDP_TDSHP 16
+#define CLK_VPP1_SVPP1_MDP_COLOR 17
+#define CLK_VPP1_SVPP3_VDO1_DL_RELAY 18
+#define CLK_VPP1_SVPP2_VPP_MERGE 19
+#define CLK_VPP1_SVPP2_MDP_COLOR 20
+#define CLK_VPP1_VPPSYS1_GALS 21
+#define CLK_VPP1_SVPP3_VPP_MERGE 22
+#define CLK_VPP1_SVPP3_MDP_COLOR 23
+#define CLK_VPP1_VPPSYS1_LARB 24
+#define CLK_VPP1_SVPP1_MDP_RSZ 25
+#define CLK_VPP1_SVPP1_MDP_HDR 26
+#define CLK_VPP1_SVPP1_MDP_AAL 27
+#define CLK_VPP1_SVPP2_MDP_HDR 28
+#define CLK_VPP1_SVPP2_MDP_AAL 29
+#define CLK_VPP1_DL_ASYNC 30
+#define CLK_VPP1_LARB5_FAKE_ENG 31
+#define CLK_VPP1_SVPP3_MDP_HDR 32
+#define CLK_VPP1_SVPP3_MDP_AAL 33
+#define CLK_VPP1_SVPP2_VDO1_DL_RELAY 34
+#define CLK_VPP1_LARB6_FAKE_ENG 35
+#define CLK_VPP1_SVPP2_MDP_RSZ 36
+#define CLK_VPP1_SVPP3_MDP_RSZ 37
+#define CLK_VPP1_SVPP3_VDO0_DL_RELAY 38
+#define CLK_VPP1_DISP_MUTEX 39
+#define CLK_VPP1_SVPP2_MDP_TDSHP 40
+#define CLK_VPP1_SVPP3_MDP_TDSHP 41
+#define CLK_VPP1_VPP0_DL1_RELAY 42
+#define CLK_VPP1_HDMI_META 43
+#define CLK_VPP1_VPP_SPLIT_HDMI 44
+#define CLK_VPP1_DGI_IN 45
+#define CLK_VPP1_DGI_OUT 46
+#define CLK_VPP1_VPP_SPLIT_DGI 47
+#define CLK_VPP1_VPP0_DL_ASYNC 48
+#define CLK_VPP1_VPP0_DL_RELAY 49
+#define CLK_VPP1_VPP_SPLIT_26M 50
+#define CLK_VPP1_NR_CLK 51
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB9 0
+#define CLK_IMG_TRAW0 1
+#define CLK_IMG_TRAW1 2
+#define CLK_IMG_TRAW2 3
+#define CLK_IMG_TRAW3 4
+#define CLK_IMG_DIP0 5
+#define CLK_IMG_WPE0 6
+#define CLK_IMG_IPE 7
+#define CLK_IMG_DIP1 8
+#define CLK_IMG_WPE1 9
+#define CLK_IMG_GALS 10
+#define CLK_IMG_NR_CLK 11
+
+/* IMGSYS1_DIP_TOP */
+
+#define CLK_IMG1_DIP_TOP_LARB10 0
+#define CLK_IMG1_DIP_TOP_DIP_TOP 1
+#define CLK_IMG1_DIP_TOP_NR_CLK 2
+
+/* IMGSYS1_DIP_NR */
+
+#define CLK_IMG1_DIP_NR_RESERVE 0
+#define CLK_IMG1_DIP_NR_DIP_NR 1
+#define CLK_IMG1_DIP_NR_NR_CLK 2
+
+/* IMGSYS1_WPE */
+
+#define CLK_IMG1_WPE_LARB11 0
+#define CLK_IMG1_WPE_WPE 1
+#define CLK_IMG1_WPE_NR_CLK 2
+
+/* IPESYS */
+
+#define CLK_IPE_DPE 0
+#define CLK_IPE_FDVT 1
+#define CLK_IPE_ME 2
+#define CLK_IPE_TOP 3
+#define CLK_IPE_SMI_LARB12 4
+#define CLK_IPE_NR_CLK 5
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_LARB14 1
+#define CLK_CAM_MAIN_CAM 2
+#define CLK_CAM_MAIN_CAMTG 3
+#define CLK_CAM_SENINF 4
+#define CLK_CAM_GCAMSVA 5
+#define CLK_CAM_GCAMSVB 6
+#define CLK_CAM_GCAMSVC 7
+#define CLK_CAM_SCAMSA 8
+#define CLK_CAM_SCAMSB 9
+#define CLK_CAM_CAMSV_TOP 10
+#define CLK_CAM_CAMSV_CQ 11
+#define CLK_CAM_ADL 12
+#define CLK_CAM_ASG 13
+#define CLK_CAM_PDA 14
+#define CLK_CAM_FAKE_ENG 15
+#define CLK_CAM_MAIN_MRAW0 16
+#define CLK_CAM_MAIN_MRAW1 17
+#define CLK_CAM_MAIN_MRAW2 18
+#define CLK_CAM_MAIN_MRAW3 19
+#define CLK_CAM_CAM2MM0_GALS 20
+#define CLK_CAM_CAM2MM1_GALS 21
+#define CLK_CAM_CAM2SYS_GALS 22
+#define CLK_CAM_NR_CLK 23
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX 0
+#define CLK_CAM_RAWA_CAM 1
+#define CLK_CAM_RAWA_CAMTG 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_YUVA */
+
+#define CLK_CAM_YUVA_LARBX 0
+#define CLK_CAM_YUVA_CAM 1
+#define CLK_CAM_YUVA_CAMTG 2
+#define CLK_CAM_YUVA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX 0
+#define CLK_CAM_RAWB_CAM 1
+#define CLK_CAM_RAWB_CAMTG 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* CAMSYS_YUVB */
+
+#define CLK_CAM_YUVB_LARBX 0
+#define CLK_CAM_YUVB_CAM 1
+#define CLK_CAM_YUVB_CAMTG 2
+#define CLK_CAM_YUVB_NR_CLK 3
+
+/* CAMSYS_MRAW */
+
+#define CLK_CAM_MRAW_LARBX 0
+#define CLK_CAM_MRAW_CAMTG 1
+#define CLK_CAM_MRAW_MRAW0 2
+#define CLK_CAM_MRAW_MRAW1 3
+#define CLK_CAM_MRAW_MRAW2 4
+#define CLK_CAM_MRAW_MRAW3 5
+#define CLK_CAM_MRAW_NR_CLK 6
+
+/* CCUSYS */
+
+#define CLK_CCU_LARB18 0
+#define CLK_CCU_AHB 1
+#define CLK_CCU_CCU0 2
+#define CLK_CCU_CCU1 3
+#define CLK_CCU_NR_CLK 4
+
+/* VDECSYS_SOC */
+
+#define CLK_VDEC_SOC_LARB1 0
+#define CLK_VDEC_SOC_LAT 1
+#define CLK_VDEC_SOC_VDEC 2
+#define CLK_VDEC_SOC_NR_CLK 3
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1 0
+#define CLK_VDEC_LAT 1
+#define CLK_VDEC_VDEC 2
+#define CLK_VDEC_NR_CLK 3
+
+/* VDECSYS_CORE1 */
+
+#define CLK_VDEC_CORE1_LARB1 0
+#define CLK_VDEC_CORE1_LAT 1
+#define CLK_VDEC_CORE1_VDEC 2
+#define CLK_VDEC_CORE1_NR_CLK 3
+
+/* APUSYS_PLL */
+
+#define CLK_APUSYS_PLL_APUPLL 0
+#define CLK_APUSYS_PLL_NPUPLL 1
+#define CLK_APUSYS_PLL_APUPLL1 2
+#define CLK_APUSYS_PLL_APUPLL2 3
+#define CLK_APUSYS_PLL_NR_CLK 4
+
+/* VENCSYS */
+
+#define CLK_VENC_LARB 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+#define CLK_VENC_JPGDEC_C1 4
+#define CLK_VENC_GALS 5
+#define CLK_VENC_NR_CLK 6
+
+/* VENCSYS_CORE1 */
+
+#define CLK_VENC_CORE1_LARB 0
+#define CLK_VENC_CORE1_VENC 1
+#define CLK_VENC_CORE1_JPGENC 2
+#define CLK_VENC_CORE1_JPGDEC 3
+#define CLK_VENC_CORE1_JPGDEC_C1 4
+#define CLK_VENC_CORE1_GALS 5
+#define CLK_VENC_CORE1_NR_CLK 6
+
+/* VDOSYS0 */
+
+#define CLK_VDO0_DISP_OVL0 0
+#define CLK_VDO0_DISP_COLOR0 1
+#define CLK_VDO0_DISP_COLOR1 2
+#define CLK_VDO0_DISP_CCORR0 3
+#define CLK_VDO0_DISP_CCORR1 4
+#define CLK_VDO0_DISP_AAL0 5
+#define CLK_VDO0_DISP_AAL1 6
+#define CLK_VDO0_DISP_GAMMA0 7
+#define CLK_VDO0_DISP_GAMMA1 8
+#define CLK_VDO0_DISP_DITHER0 9
+#define CLK_VDO0_DISP_DITHER1 10
+#define CLK_VDO0_DISP_OVL1 11
+#define CLK_VDO0_DISP_WDMA0 12
+#define CLK_VDO0_DISP_WDMA1 13
+#define CLK_VDO0_DISP_RDMA0 14
+#define CLK_VDO0_DISP_RDMA1 15
+#define CLK_VDO0_DSI0 16
+#define CLK_VDO0_DSI1 17
+#define CLK_VDO0_DSC_WRAP0 18
+#define CLK_VDO0_VPP_MERGE0 19
+#define CLK_VDO0_DP_INTF0 20
+#define CLK_VDO0_DISP_MUTEX0 21
+#define CLK_VDO0_DISP_IL_ROT0 22
+#define CLK_VDO0_APB_BUS 23
+#define CLK_VDO0_FAKE_ENG0 24
+#define CLK_VDO0_FAKE_ENG1 25
+#define CLK_VDO0_DL_ASYNC0 26
+#define CLK_VDO0_DL_ASYNC1 27
+#define CLK_VDO0_DL_ASYNC2 28
+#define CLK_VDO0_DL_ASYNC3 29
+#define CLK_VDO0_DL_ASYNC4 30
+#define CLK_VDO0_DISP_MONITOR0 31
+#define CLK_VDO0_DISP_MONITOR1 32
+#define CLK_VDO0_DISP_MONITOR2 33
+#define CLK_VDO0_DISP_MONITOR3 34
+#define CLK_VDO0_DISP_MONITOR4 35
+#define CLK_VDO0_SMI_GALS 36
+#define CLK_VDO0_SMI_COMMON 37
+#define CLK_VDO0_SMI_EMI 38
+#define CLK_VDO0_SMI_IOMMU 39
+#define CLK_VDO0_SMI_LARB 40
+#define CLK_VDO0_SMI_RSI 41
+#define CLK_VDO0_DSI0_DSI 42
+#define CLK_VDO0_DSI1_DSI 43
+#define CLK_VDO0_DP_INTF0_DP_INTF 44
+#define CLK_VDO0_NR_CLK 45
+
+/* VDOSYS1 */
+
+#define CLK_VDO1_SMI_LARB2 0
+#define CLK_VDO1_SMI_LARB3 1
+#define CLK_VDO1_GALS 2
+#define CLK_VDO1_FAKE_ENG0 3
+#define CLK_VDO1_FAKE_ENG 4
+#define CLK_VDO1_MDP_RDMA0 5
+#define CLK_VDO1_MDP_RDMA1 6
+#define CLK_VDO1_MDP_RDMA2 7
+#define CLK_VDO1_MDP_RDMA3 8
+#define CLK_VDO1_VPP_MERGE0 9
+#define CLK_VDO1_VPP_MERGE1 10
+#define CLK_VDO1_VPP_MERGE2 11
+#define CLK_VDO1_VPP_MERGE3 12
+#define CLK_VDO1_VPP_MERGE4 13
+#define CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC 14
+#define CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC 15
+#define CLK_VDO1_DISP_MUTEX 16
+#define CLK_VDO1_MDP_RDMA4 17
+#define CLK_VDO1_MDP_RDMA5 18
+#define CLK_VDO1_MDP_RDMA6 19
+#define CLK_VDO1_MDP_RDMA7 20
+#define CLK_VDO1_DP_INTF0_MM 21
+#define CLK_VDO1_DPI0_MM 22
+#define CLK_VDO1_DPI1_MM 23
+#define CLK_VDO1_DISP_MONITOR 24
+#define CLK_VDO1_MERGE0_DL_ASYNC 25
+#define CLK_VDO1_MERGE1_DL_ASYNC 26
+#define CLK_VDO1_MERGE2_DL_ASYNC 27
+#define CLK_VDO1_MERGE3_DL_ASYNC 28
+#define CLK_VDO1_MERGE4_DL_ASYNC 29
+#define CLK_VDO1_VDO0_DSC_TO_VDO1_DL_ASYNC 30
+#define CLK_VDO1_VDO0_MERGE_TO_VDO1_DL_ASYNC 31
+#define CLK_VDO1_HDR_VDO_FE0 32
+#define CLK_VDO1_HDR_GFX_FE0 33
+#define CLK_VDO1_HDR_VDO_BE 34
+#define CLK_VDO1_HDR_VDO_FE1 35
+#define CLK_VDO1_HDR_GFX_FE1 36
+#define CLK_VDO1_DISP_MIXER 37
+#define CLK_VDO1_HDR_VDO_FE0_DL_ASYNC 38
+#define CLK_VDO1_HDR_VDO_FE1_DL_ASYNC 39
+#define CLK_VDO1_HDR_GFX_FE0_DL_ASYNC 40
+#define CLK_VDO1_HDR_GFX_FE1_DL_ASYNC 41
+#define CLK_VDO1_HDR_VDO_BE_DL_ASYNC 42
+#define CLK_VDO1_DPI0 43
+#define CLK_VDO1_DISP_MONITOR_DPI0 44
+#define CLK_VDO1_DPI1 45
+#define CLK_VDO1_DISP_MONITOR_DPI1 46
+#define CLK_VDO1_DPINTF 47
+#define CLK_VDO1_DISP_MONITOR_DPINTF 48
+#define CLK_VDO1_26M_SLOW 49
+#define CLK_VDO1_DPI1_HDMI 50
+#define CLK_VDO1_NR_CLK 51
+
+
+#endif /* _DT_BINDINGS_CLK_MT8195_H */
diff --git a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
index f21522605b94..3e0a9b68933d 100644
--- a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
+++ b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Nuvoton NPCM7xx Clock Generator binding
- * clock binding number for all clocks supportted by nuvoton,npcm7xx-clk
+ * clock binding number for all clocks supported by nuvoton,npcm7xx-clk
*
* Copyright (C) 2018 Nuvoton Technologies tali.perry@nuvoton.com
*
diff --git a/include/dt-bindings/clock/nuvoton,npcm845-clk.h b/include/dt-bindings/clock/nuvoton,npcm845-clk.h
new file mode 100644
index 000000000000..e5cce08b00e1
--- /dev/null
+++ b/include/dt-bindings/clock/nuvoton,npcm845-clk.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Nuvoton Technologies.
+ * Author: Tomer Maimon <tomer.maimon@nuvoton.com>
+ *
+ * Device Tree binding constants for NPCM8XX clock controller.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_NPCM8XX_H
+#define __DT_BINDINGS_CLOCK_NPCM8XX_H
+
+#define NPCM8XX_CLK_CPU 0
+#define NPCM8XX_CLK_GFX_PIXEL 1
+#define NPCM8XX_CLK_MC 2
+#define NPCM8XX_CLK_ADC 3
+#define NPCM8XX_CLK_AHB 4
+#define NPCM8XX_CLK_TIMER 5
+#define NPCM8XX_CLK_UART 6
+#define NPCM8XX_CLK_UART2 7
+#define NPCM8XX_CLK_MMC 8
+#define NPCM8XX_CLK_SPI3 9
+#define NPCM8XX_CLK_PCI 10
+#define NPCM8XX_CLK_AXI 11
+#define NPCM8XX_CLK_APB4 12
+#define NPCM8XX_CLK_APB3 13
+#define NPCM8XX_CLK_APB2 14
+#define NPCM8XX_CLK_APB1 15
+#define NPCM8XX_CLK_APB5 16
+#define NPCM8XX_CLK_CLKOUT 17
+#define NPCM8XX_CLK_GFX 18
+#define NPCM8XX_CLK_SU 19
+#define NPCM8XX_CLK_SU48 20
+#define NPCM8XX_CLK_SDHC 21
+#define NPCM8XX_CLK_SPI0 22
+#define NPCM8XX_CLK_SPI1 23
+#define NPCM8XX_CLK_SPIX 24
+#define NPCM8XX_CLK_RG 25
+#define NPCM8XX_CLK_RCP 26
+#define NPCM8XX_CLK_PRE_ADC 27
+#define NPCM8XX_CLK_ATB 28
+#define NPCM8XX_CLK_PRE_CLK 29
+#define NPCM8XX_CLK_TH 30
+#define NPCM8XX_CLK_REFCLK 31
+#define NPCM8XX_CLK_SYSBYPCK 32
+#define NPCM8XX_CLK_MCBYPCK 33
+
+#define NPCM8XX_NUM_CLOCKS (NPCM8XX_CLK_MCBYPCK + 1)
+
+#endif
diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h
index 41775272fd27..90e0d4b00127 100644
--- a/include/dt-bindings/clock/omap5.h
+++ b/include/dt-bindings/clock/omap5.h
@@ -32,6 +32,8 @@
/* l3main2 clocks */
#define OMAP5_L3_MAIN_2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_L3_MAIN_2_GPMC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
+#define OMAP5_L3_MAIN_2_OCMC_RAM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
/* ipu clocks */
#define OMAP5_MMU_IPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
diff --git a/include/dt-bindings/clock/qcom,apss-ipq.h b/include/dt-bindings/clock/qcom,apss-ipq.h
new file mode 100644
index 000000000000..77b6e05492e2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,apss-ipq.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_QCA_APSS_IPQ6018_H
+#define _DT_BINDINGS_CLOCK_QCA_APSS_IPQ6018_H
+
+#define APCS_ALIAS0_CLK_SRC 0
+#define APCS_ALIAS0_CORE_CLK 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sc7180.h b/include/dt-bindings/clock/qcom,camcc-sc7180.h
new file mode 100644
index 000000000000..ef7d3a041b88
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sc7180.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7180_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL2_OUT_EARLY 0
+#define CAM_CC_PLL0 1
+#define CAM_CC_PLL1 2
+#define CAM_CC_PLL2 3
+#define CAM_CC_PLL2_OUT_AUX 4
+#define CAM_CC_PLL3 5
+#define CAM_CC_CAMNOC_AXI_CLK 6
+#define CAM_CC_CCI_0_CLK 7
+#define CAM_CC_CCI_0_CLK_SRC 8
+#define CAM_CC_CCI_1_CLK 9
+#define CAM_CC_CCI_1_CLK_SRC 10
+#define CAM_CC_CORE_AHB_CLK 11
+#define CAM_CC_CPAS_AHB_CLK 12
+#define CAM_CC_CPHY_RX_CLK_SRC 13
+#define CAM_CC_CSI0PHYTIMER_CLK 14
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 15
+#define CAM_CC_CSI1PHYTIMER_CLK 16
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 17
+#define CAM_CC_CSI2PHYTIMER_CLK 18
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 19
+#define CAM_CC_CSI3PHYTIMER_CLK 20
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 21
+#define CAM_CC_CSIPHY0_CLK 22
+#define CAM_CC_CSIPHY1_CLK 23
+#define CAM_CC_CSIPHY2_CLK 24
+#define CAM_CC_CSIPHY3_CLK 25
+#define CAM_CC_FAST_AHB_CLK_SRC 26
+#define CAM_CC_ICP_APB_CLK 27
+#define CAM_CC_ICP_ATB_CLK 28
+#define CAM_CC_ICP_CLK 29
+#define CAM_CC_ICP_CLK_SRC 30
+#define CAM_CC_ICP_CTI_CLK 31
+#define CAM_CC_ICP_TS_CLK 32
+#define CAM_CC_IFE_0_AXI_CLK 33
+#define CAM_CC_IFE_0_CLK 34
+#define CAM_CC_IFE_0_CLK_SRC 35
+#define CAM_CC_IFE_0_CPHY_RX_CLK 36
+#define CAM_CC_IFE_0_CSID_CLK 37
+#define CAM_CC_IFE_0_CSID_CLK_SRC 38
+#define CAM_CC_IFE_0_DSP_CLK 39
+#define CAM_CC_IFE_1_AXI_CLK 40
+#define CAM_CC_IFE_1_CLK 41
+#define CAM_CC_IFE_1_CLK_SRC 42
+#define CAM_CC_IFE_1_CPHY_RX_CLK 43
+#define CAM_CC_IFE_1_CSID_CLK 44
+#define CAM_CC_IFE_1_CSID_CLK_SRC 45
+#define CAM_CC_IFE_1_DSP_CLK 46
+#define CAM_CC_IFE_LITE_CLK 47
+#define CAM_CC_IFE_LITE_CLK_SRC 48
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 49
+#define CAM_CC_IFE_LITE_CSID_CLK 50
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 51
+#define CAM_CC_IPE_0_AHB_CLK 52
+#define CAM_CC_IPE_0_AREG_CLK 53
+#define CAM_CC_IPE_0_AXI_CLK 54
+#define CAM_CC_IPE_0_CLK 55
+#define CAM_CC_IPE_0_CLK_SRC 56
+#define CAM_CC_JPEG_CLK 57
+#define CAM_CC_JPEG_CLK_SRC 58
+#define CAM_CC_LRME_CLK 59
+#define CAM_CC_LRME_CLK_SRC 60
+#define CAM_CC_MCLK0_CLK 61
+#define CAM_CC_MCLK0_CLK_SRC 62
+#define CAM_CC_MCLK1_CLK 63
+#define CAM_CC_MCLK1_CLK_SRC 64
+#define CAM_CC_MCLK2_CLK 65
+#define CAM_CC_MCLK2_CLK_SRC 66
+#define CAM_CC_MCLK3_CLK 67
+#define CAM_CC_MCLK3_CLK_SRC 68
+#define CAM_CC_MCLK4_CLK 69
+#define CAM_CC_MCLK4_CLK_SRC 70
+#define CAM_CC_BPS_AHB_CLK 71
+#define CAM_CC_BPS_AREG_CLK 72
+#define CAM_CC_BPS_AXI_CLK 73
+#define CAM_CC_BPS_CLK 74
+#define CAM_CC_BPS_CLK_SRC 75
+#define CAM_CC_SLOW_AHB_CLK_SRC 76
+#define CAM_CC_SOC_AHB_CLK 77
+#define CAM_CC_SYS_TMR_CLK 78
+
+/* CAM_CC power domains */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IPE_0_GDSC 3
+#define TITAN_TOP_GDSC 4
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_0_BCR 2
+#define CAM_CC_CCI_1_BCR 3
+#define CAM_CC_CPAS_BCR 4
+#define CAM_CC_CSI0PHY_BCR 5
+#define CAM_CC_CSI1PHY_BCR 6
+#define CAM_CC_CSI2PHY_BCR 7
+#define CAM_CC_CSI3PHY_BCR 8
+#define CAM_CC_ICP_BCR 9
+#define CAM_CC_IFE_0_BCR 10
+#define CAM_CC_IFE_1_BCR 11
+#define CAM_CC_IFE_LITE_BCR 12
+#define CAM_CC_IPE_0_BCR 13
+#define CAM_CC_JPEG_BCR 14
+#define CAM_CC_LRME_BCR 15
+#define CAM_CC_MCLK0_BCR 16
+#define CAM_CC_MCLK1_BCR 17
+#define CAM_CC_MCLK2_BCR 18
+#define CAM_CC_MCLK3_BCR 19
+#define CAM_CC_MCLK4_BCR 20
+#define CAM_CC_TITAN_TOP_BCR 21
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sc7280.h b/include/dt-bindings/clock/qcom,camcc-sc7280.h
new file mode 100644
index 000000000000..56640f407309
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sc7280.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7280_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_AUX 6
+#define CAM_CC_PLL2_OUT_AUX2 7
+#define CAM_CC_PLL3 8
+#define CAM_CC_PLL3_OUT_EVEN 9
+#define CAM_CC_PLL4 10
+#define CAM_CC_PLL4_OUT_EVEN 11
+#define CAM_CC_PLL5 12
+#define CAM_CC_PLL5_OUT_EVEN 13
+#define CAM_CC_PLL6 14
+#define CAM_CC_PLL6_OUT_EVEN 15
+#define CAM_CC_PLL6_OUT_ODD 16
+#define CAM_CC_BPS_AHB_CLK 17
+#define CAM_CC_BPS_AREG_CLK 18
+#define CAM_CC_BPS_AXI_CLK 19
+#define CAM_CC_BPS_CLK 20
+#define CAM_CC_BPS_CLK_SRC 21
+#define CAM_CC_CAMNOC_AXI_CLK 22
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 23
+#define CAM_CC_CAMNOC_DCD_XO_CLK 24
+#define CAM_CC_CCI_0_CLK 25
+#define CAM_CC_CCI_0_CLK_SRC 26
+#define CAM_CC_CCI_1_CLK 27
+#define CAM_CC_CCI_1_CLK_SRC 28
+#define CAM_CC_CORE_AHB_CLK 29
+#define CAM_CC_CPAS_AHB_CLK 30
+#define CAM_CC_CPHY_RX_CLK_SRC 31
+#define CAM_CC_CSI0PHYTIMER_CLK 32
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSI1PHYTIMER_CLK 34
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSI2PHYTIMER_CLK 36
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 37
+#define CAM_CC_CSI3PHYTIMER_CLK 38
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 39
+#define CAM_CC_CSI4PHYTIMER_CLK 40
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 41
+#define CAM_CC_CSIPHY0_CLK 42
+#define CAM_CC_CSIPHY1_CLK 43
+#define CAM_CC_CSIPHY2_CLK 44
+#define CAM_CC_CSIPHY3_CLK 45
+#define CAM_CC_CSIPHY4_CLK 46
+#define CAM_CC_FAST_AHB_CLK_SRC 47
+#define CAM_CC_GDSC_CLK 48
+#define CAM_CC_ICP_AHB_CLK 49
+#define CAM_CC_ICP_CLK 50
+#define CAM_CC_ICP_CLK_SRC 51
+#define CAM_CC_IFE_0_AXI_CLK 52
+#define CAM_CC_IFE_0_CLK 53
+#define CAM_CC_IFE_0_CLK_SRC 54
+#define CAM_CC_IFE_0_CPHY_RX_CLK 55
+#define CAM_CC_IFE_0_CSID_CLK 56
+#define CAM_CC_IFE_0_CSID_CLK_SRC 57
+#define CAM_CC_IFE_0_DSP_CLK 58
+#define CAM_CC_IFE_1_AXI_CLK 59
+#define CAM_CC_IFE_1_CLK 60
+#define CAM_CC_IFE_1_CLK_SRC 61
+#define CAM_CC_IFE_1_CPHY_RX_CLK 62
+#define CAM_CC_IFE_1_CSID_CLK 63
+#define CAM_CC_IFE_1_CSID_CLK_SRC 64
+#define CAM_CC_IFE_1_DSP_CLK 65
+#define CAM_CC_IFE_2_AXI_CLK 66
+#define CAM_CC_IFE_2_CLK 67
+#define CAM_CC_IFE_2_CLK_SRC 68
+#define CAM_CC_IFE_2_CPHY_RX_CLK 69
+#define CAM_CC_IFE_2_CSID_CLK 70
+#define CAM_CC_IFE_2_CSID_CLK_SRC 71
+#define CAM_CC_IFE_2_DSP_CLK 72
+#define CAM_CC_IFE_LITE_0_CLK 73
+#define CAM_CC_IFE_LITE_0_CLK_SRC 74
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 75
+#define CAM_CC_IFE_LITE_0_CSID_CLK 76
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 77
+#define CAM_CC_IFE_LITE_1_CLK 78
+#define CAM_CC_IFE_LITE_1_CLK_SRC 79
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 80
+#define CAM_CC_IFE_LITE_1_CSID_CLK 81
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 82
+#define CAM_CC_IPE_0_AHB_CLK 83
+#define CAM_CC_IPE_0_AREG_CLK 84
+#define CAM_CC_IPE_0_AXI_CLK 85
+#define CAM_CC_IPE_0_CLK 86
+#define CAM_CC_IPE_0_CLK_SRC 87
+#define CAM_CC_JPEG_CLK 88
+#define CAM_CC_JPEG_CLK_SRC 89
+#define CAM_CC_LRME_CLK 90
+#define CAM_CC_LRME_CLK_SRC 91
+#define CAM_CC_MCLK0_CLK 92
+#define CAM_CC_MCLK0_CLK_SRC 93
+#define CAM_CC_MCLK1_CLK 94
+#define CAM_CC_MCLK1_CLK_SRC 95
+#define CAM_CC_MCLK2_CLK 96
+#define CAM_CC_MCLK2_CLK_SRC 97
+#define CAM_CC_MCLK3_CLK 98
+#define CAM_CC_MCLK3_CLK_SRC 99
+#define CAM_CC_MCLK4_CLK 100
+#define CAM_CC_MCLK4_CLK_SRC 101
+#define CAM_CC_MCLK5_CLK 102
+#define CAM_CC_MCLK5_CLK_SRC 103
+#define CAM_CC_SLEEP_CLK 104
+#define CAM_CC_SLEEP_CLK_SRC 105
+#define CAM_CC_SLOW_AHB_CLK_SRC 106
+#define CAM_CC_XO_CLK_SRC 107
+
+/* CAM_CC power domains */
+#define CAM_CC_BPS_GDSC 0
+#define CAM_CC_IFE_0_GDSC 1
+#define CAM_CC_IFE_1_GDSC 2
+#define CAM_CC_IFE_2_GDSC 3
+#define CAM_CC_IPE_0_GDSC 4
+#define CAM_CC_TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sm8250.h b/include/dt-bindings/clock/qcom,camcc-sm8250.h
new file mode 100644
index 000000000000..383ead17608d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sm8250.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_AXI_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 6
+#define CAM_CC_CAMNOC_DCD_XO_CLK 7
+#define CAM_CC_CCI_0_CLK 8
+#define CAM_CC_CCI_0_CLK_SRC 9
+#define CAM_CC_CCI_1_CLK 10
+#define CAM_CC_CCI_1_CLK_SRC 11
+#define CAM_CC_CORE_AHB_CLK 12
+#define CAM_CC_CPAS_AHB_CLK 13
+#define CAM_CC_CPHY_RX_CLK_SRC 14
+#define CAM_CC_CSI0PHYTIMER_CLK 15
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 16
+#define CAM_CC_CSI1PHYTIMER_CLK 17
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 18
+#define CAM_CC_CSI2PHYTIMER_CLK 19
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 20
+#define CAM_CC_CSI3PHYTIMER_CLK 21
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 22
+#define CAM_CC_CSI4PHYTIMER_CLK 23
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSI5PHYTIMER_CLK 25
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 26
+#define CAM_CC_CSIPHY0_CLK 27
+#define CAM_CC_CSIPHY1_CLK 28
+#define CAM_CC_CSIPHY2_CLK 29
+#define CAM_CC_CSIPHY3_CLK 30
+#define CAM_CC_CSIPHY4_CLK 31
+#define CAM_CC_CSIPHY5_CLK 32
+#define CAM_CC_FAST_AHB_CLK_SRC 33
+#define CAM_CC_FD_CORE_CLK 34
+#define CAM_CC_FD_CORE_CLK_SRC 35
+#define CAM_CC_FD_CORE_UAR_CLK 36
+#define CAM_CC_GDSC_CLK 37
+#define CAM_CC_ICP_AHB_CLK 38
+#define CAM_CC_ICP_CLK 39
+#define CAM_CC_ICP_CLK_SRC 40
+#define CAM_CC_IFE_0_AHB_CLK 41
+#define CAM_CC_IFE_0_AREG_CLK 42
+#define CAM_CC_IFE_0_AXI_CLK 43
+#define CAM_CC_IFE_0_CLK 44
+#define CAM_CC_IFE_0_CLK_SRC 45
+#define CAM_CC_IFE_0_CPHY_RX_CLK 46
+#define CAM_CC_IFE_0_CSID_CLK 47
+#define CAM_CC_IFE_0_CSID_CLK_SRC 48
+#define CAM_CC_IFE_0_DSP_CLK 49
+#define CAM_CC_IFE_1_AHB_CLK 50
+#define CAM_CC_IFE_1_AREG_CLK 51
+#define CAM_CC_IFE_1_AXI_CLK 52
+#define CAM_CC_IFE_1_CLK 53
+#define CAM_CC_IFE_1_CLK_SRC 54
+#define CAM_CC_IFE_1_CPHY_RX_CLK 55
+#define CAM_CC_IFE_1_CSID_CLK 56
+#define CAM_CC_IFE_1_CSID_CLK_SRC 57
+#define CAM_CC_IFE_1_DSP_CLK 58
+#define CAM_CC_IFE_LITE_AHB_CLK 59
+#define CAM_CC_IFE_LITE_AXI_CLK 60
+#define CAM_CC_IFE_LITE_CLK 61
+#define CAM_CC_IFE_LITE_CLK_SRC 62
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 63
+#define CAM_CC_IFE_LITE_CSID_CLK 64
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 65
+#define CAM_CC_IPE_0_AHB_CLK 66
+#define CAM_CC_IPE_0_AREG_CLK 67
+#define CAM_CC_IPE_0_AXI_CLK 68
+#define CAM_CC_IPE_0_CLK 69
+#define CAM_CC_IPE_0_CLK_SRC 70
+#define CAM_CC_JPEG_CLK 71
+#define CAM_CC_JPEG_CLK_SRC 72
+#define CAM_CC_MCLK0_CLK 73
+#define CAM_CC_MCLK0_CLK_SRC 74
+#define CAM_CC_MCLK1_CLK 75
+#define CAM_CC_MCLK1_CLK_SRC 76
+#define CAM_CC_MCLK2_CLK 77
+#define CAM_CC_MCLK2_CLK_SRC 78
+#define CAM_CC_MCLK3_CLK 79
+#define CAM_CC_MCLK3_CLK_SRC 80
+#define CAM_CC_MCLK4_CLK 81
+#define CAM_CC_MCLK4_CLK_SRC 82
+#define CAM_CC_MCLK5_CLK 83
+#define CAM_CC_MCLK5_CLK_SRC 84
+#define CAM_CC_MCLK6_CLK 85
+#define CAM_CC_MCLK6_CLK_SRC 86
+#define CAM_CC_PLL0 87
+#define CAM_CC_PLL0_OUT_EVEN 88
+#define CAM_CC_PLL0_OUT_ODD 89
+#define CAM_CC_PLL1 90
+#define CAM_CC_PLL1_OUT_EVEN 91
+#define CAM_CC_PLL2 92
+#define CAM_CC_PLL2_OUT_MAIN 93
+#define CAM_CC_PLL3 94
+#define CAM_CC_PLL3_OUT_EVEN 95
+#define CAM_CC_PLL4 96
+#define CAM_CC_PLL4_OUT_EVEN 97
+#define CAM_CC_SBI_AHB_CLK 98
+#define CAM_CC_SBI_AXI_CLK 99
+#define CAM_CC_SBI_CLK 100
+#define CAM_CC_SBI_CPHY_RX_CLK 101
+#define CAM_CC_SBI_CSID_CLK 102
+#define CAM_CC_SBI_CSID_CLK_SRC 103
+#define CAM_CC_SBI_DIV_CLK_SRC 104
+#define CAM_CC_SBI_IFE_0_CLK 105
+#define CAM_CC_SBI_IFE_1_CLK 106
+#define CAM_CC_SLEEP_CLK 107
+#define CAM_CC_SLEEP_CLK_SRC 108
+#define CAM_CC_SLOW_AHB_CLK_SRC 109
+#define CAM_CC_XO_CLK_SRC 110
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_ICP_BCR 1
+#define CAM_CC_IFE_0_BCR 2
+#define CAM_CC_IFE_1_BCR 3
+#define CAM_CC_IPE_0_BCR 4
+#define CAM_CC_SBI_BCR 5
+
+/* CAM_CC GDSCRs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define SBI_GDSC 2
+#define IFE_0_GDSC 3
+#define IFE_1_GDSC 4
+#define TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-qcm2290.h b/include/dt-bindings/clock/qcom,dispcc-qcm2290.h
new file mode 100644
index 000000000000..1db513d6b3ee
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-qcm2290.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_QCM2290_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_ESC0_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK_SRC 8
+#define DISP_CC_MDSS_MDP_CLK 9
+#define DISP_CC_MDSS_MDP_CLK_SRC 10
+#define DISP_CC_MDSS_MDP_LUT_CLK 11
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 12
+#define DISP_CC_MDSS_PCLK0_CLK 13
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 14
+#define DISP_CC_MDSS_VSYNC_CLK 15
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 16
+#define DISP_CC_SLEEP_CLK 17
+#define DISP_CC_SLEEP_CLK_SRC 18
+#define DISP_CC_XO_CLK 19
+#define DISP_CC_XO_CLK_SRC 20
+
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc7280.h b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
new file mode 100644
index 000000000000..a4a692c20acf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7280_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK 7
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 10
+#define DISP_CC_MDSS_DP_LINK_CLK 11
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL_CLK 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 16
+#define DISP_CC_MDSS_EDP_AUX_CLK 17
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 18
+#define DISP_CC_MDSS_EDP_LINK_CLK 19
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 20
+#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC 21
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_EDP_PIXEL_CLK 23
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 24
+#define DISP_CC_MDSS_ESC0_CLK 25
+#define DISP_CC_MDSS_ESC0_CLK_SRC 26
+#define DISP_CC_MDSS_MDP_CLK 27
+#define DISP_CC_MDSS_MDP_CLK_SRC 28
+#define DISP_CC_MDSS_MDP_LUT_CLK 29
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 30
+#define DISP_CC_MDSS_PCLK0_CLK 31
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 32
+#define DISP_CC_MDSS_ROT_CLK 33
+#define DISP_CC_MDSS_ROT_CLK_SRC 34
+#define DISP_CC_MDSS_RSCC_AHB_CLK 35
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 36
+#define DISP_CC_MDSS_VSYNC_CLK 37
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 38
+#define DISP_CC_SLEEP_CLK 39
+#define DISP_CC_XO_CLK 40
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6125.h b/include/dt-bindings/clock/qcom,dispcc-sm6125.h
new file mode 100644
index 000000000000..4ff974f4fcc3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm6125.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
+
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
+#define DISP_CC_MDSS_DP_AUX_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 7
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 9
+#define DISP_CC_MDSS_DP_LINK_CLK 10
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 12
+#define DISP_CC_MDSS_DP_PIXEL_CLK 13
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 14
+#define DISP_CC_MDSS_ESC0_CLK 15
+#define DISP_CC_MDSS_ESC0_CLK_SRC 16
+#define DISP_CC_MDSS_MDP_CLK 17
+#define DISP_CC_MDSS_MDP_CLK_SRC 18
+#define DISP_CC_MDSS_MDP_LUT_CLK 19
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 20
+#define DISP_CC_MDSS_PCLK0_CLK 21
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 22
+#define DISP_CC_MDSS_ROT_CLK 23
+#define DISP_CC_MDSS_ROT_CLK_SRC 24
+#define DISP_CC_MDSS_VSYNC_CLK 25
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 26
+#define DISP_CC_XO_CLK 27
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6350.h b/include/dt-bindings/clock/qcom,dispcc-sm6350.h
new file mode 100644
index 000000000000..cb54aae2723e
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm6350.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6350_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK 7
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 10
+#define DISP_CC_MDSS_DP_LINK_CLK 11
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL_CLK 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 16
+#define DISP_CC_MDSS_ESC0_CLK 17
+#define DISP_CC_MDSS_ESC0_CLK_SRC 18
+#define DISP_CC_MDSS_MDP_CLK 19
+#define DISP_CC_MDSS_MDP_CLK_SRC 20
+#define DISP_CC_MDSS_MDP_LUT_CLK 21
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 22
+#define DISP_CC_MDSS_PCLK0_CLK 23
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 24
+#define DISP_CC_MDSS_ROT_CLK 25
+#define DISP_CC_MDSS_ROT_CLK_SRC 26
+#define DISP_CC_MDSS_RSCC_AHB_CLK 27
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 28
+#define DISP_CC_MDSS_VSYNC_CLK 29
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 30
+#define DISP_CC_SLEEP_CLK 31
+#define DISP_CC_XO_CLK 32
+
+/* GDSCs */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8150.h b/include/dt-bindings/clock/qcom,dispcc-sm8150.h
new file mode 120000
index 000000000000..0312b4544acb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm8150.h
@@ -0,0 +1 @@
+qcom,dispcc-sm8250.h \ No newline at end of file
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8250.h b/include/dt-bindings/clock/qcom,dispcc-sm8250.h
new file mode 100644
index 000000000000..ce001cbbc27f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm8250.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8250_H
+
+/* DISP_CC clock registers */
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AHB_CLK_SRC 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
+#define DISP_CC_MDSS_BYTE1_CLK 6
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 8
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 9
+#define DISP_CC_MDSS_DP_AUX1_CLK 10
+#define DISP_CC_MDSS_DP_AUX1_CLK_SRC 11
+#define DISP_CC_MDSS_DP_AUX_CLK 12
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK1_CLK 14
+#define DISP_CC_MDSS_DP_LINK1_CLK_SRC 15
+#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC 16
+#define DISP_CC_MDSS_DP_LINK1_INTF_CLK 17
+#define DISP_CC_MDSS_DP_LINK_CLK 18
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 19
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 20
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 21
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 22
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 23
+#define DISP_CC_MDSS_DP_PIXEL2_CLK 24
+#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC 25
+#define DISP_CC_MDSS_DP_PIXEL_CLK 26
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 27
+#define DISP_CC_MDSS_ESC0_CLK 28
+#define DISP_CC_MDSS_ESC0_CLK_SRC 29
+#define DISP_CC_MDSS_ESC1_CLK 30
+#define DISP_CC_MDSS_ESC1_CLK_SRC 31
+#define DISP_CC_MDSS_MDP_CLK 32
+#define DISP_CC_MDSS_MDP_CLK_SRC 33
+#define DISP_CC_MDSS_MDP_LUT_CLK 34
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 35
+#define DISP_CC_MDSS_PCLK0_CLK 36
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 37
+#define DISP_CC_MDSS_PCLK1_CLK 38
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 39
+#define DISP_CC_MDSS_ROT_CLK 40
+#define DISP_CC_MDSS_ROT_CLK_SRC 41
+#define DISP_CC_MDSS_RSCC_AHB_CLK 42
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 43
+#define DISP_CC_MDSS_VSYNC_CLK 44
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 45
+#define DISP_CC_PLL0 46
+#define DISP_CC_PLL1 47
+#define DISP_CC_MDSS_EDP_AUX_CLK 48
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 49
+#define DISP_CC_MDSS_EDP_GTC_CLK 50
+#define DISP_CC_MDSS_EDP_GTC_CLK_SRC 51
+#define DISP_CC_MDSS_EDP_LINK_CLK 52
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 53
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 54
+#define DISP_CC_MDSS_EDP_PIXEL_CLK 55
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 56
+
+/* DISP_CC Reset */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8350.h b/include/dt-bindings/clock/qcom,dispcc-sm8350.h
new file mode 120000
index 000000000000..0312b4544acb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm8350.h
@@ -0,0 +1 @@
+qcom,dispcc-sm8250.h \ No newline at end of file
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index 7deec14a6dee..02262d2ac899 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -240,7 +240,7 @@
#define PLL14 232
#define PLL14_VOTE 233
#define PLL18 234
-#define CE5_SRC 235
+#define CE5_A_CLK 235
#define CE5_H_CLK 236
#define CE5_CORE_CLK 237
#define CE3_SLEEP_CLK 238
@@ -283,5 +283,8 @@
#define EBI2_AON_CLK 281
#define NSSTCM_CLK_SRC 282
#define NSSTCM_CLK 283
+#define CE5_A_CLK_SRC 285
+#define CE5_H_CLK_SRC 286
+#define CE5_CORE_CLK_SRC 287
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
index 4de4811a3540..e4991d303708 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -230,6 +230,10 @@
#define GCC_GP1_CLK 221
#define GCC_GP2_CLK 222
#define GCC_GP3_CLK 223
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 224
+#define GCC_PCIE0_RCHNG_CLK_SRC 225
+#define GCC_PCIE0_RCHNG_CLK 226
+#define GCC_CRYPTO_PPE_CLK 227
#define GCC_BLSP1_BCR 0
#define GCC_BLSP1_QUP1_BCR 1
@@ -362,5 +366,9 @@
#define GCC_PCIE1_AXI_SLAVE_ARES 128
#define GCC_PCIE1_AHB_ARES 129
#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130
+#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 131
+
+#define USB0_GDSC 0
+#define USB1_GDSC 1
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9607.h b/include/dt-bindings/clock/qcom,gcc-mdm9607.h
new file mode 100644
index 000000000000..357a680a40da
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-mdm9607.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_9607_H
+#define _DT_BINDINGS_CLK_MSM_GCC_9607_H
+
+#define GPLL0 0
+#define GPLL0_EARLY 1
+#define GPLL1 2
+#define GPLL1_VOTE 3
+#define GPLL2 4
+#define GPLL2_EARLY 5
+#define PCNOC_BFDCD_CLK_SRC 6
+#define SYSTEM_NOC_BFDCD_CLK_SRC 7
+#define GCC_SMMU_CFG_CLK 8
+#define APSS_AHB_CLK_SRC 9
+#define GCC_QDSS_DAP_CLK 10
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 14
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 15
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 16
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 18
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 19
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 20
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 21
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 22
+#define BLSP1_UART1_APPS_CLK_SRC 23
+#define BLSP1_UART2_APPS_CLK_SRC 24
+#define CRYPTO_CLK_SRC 25
+#define GP1_CLK_SRC 26
+#define GP2_CLK_SRC 27
+#define GP3_CLK_SRC 28
+#define PDM2_CLK_SRC 29
+#define SDCC1_APPS_CLK_SRC 30
+#define SDCC2_APPS_CLK_SRC 31
+#define APSS_TCU_CLK_SRC 32
+#define USB_HS_SYSTEM_CLK_SRC 33
+#define GCC_BLSP1_AHB_CLK 34
+#define GCC_BLSP1_SLEEP_CLK 35
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 36
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 37
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 38
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 39
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 40
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 41
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 42
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 43
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 44
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 45
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 46
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 47
+#define GCC_BLSP1_UART1_APPS_CLK 48
+#define GCC_BLSP1_UART2_APPS_CLK 49
+#define GCC_BOOT_ROM_AHB_CLK 50
+#define GCC_CRYPTO_AHB_CLK 51
+#define GCC_CRYPTO_AXI_CLK 52
+#define GCC_CRYPTO_CLK 53
+#define GCC_GP1_CLK 54
+#define GCC_GP2_CLK 55
+#define GCC_GP3_CLK 56
+#define GCC_MSS_CFG_AHB_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM_AHB_CLK 59
+#define GCC_PRNG_AHB_CLK 60
+#define GCC_SDCC1_AHB_CLK 61
+#define GCC_SDCC1_APPS_CLK 62
+#define GCC_SDCC2_AHB_CLK 63
+#define GCC_SDCC2_APPS_CLK 64
+#define GCC_USB2A_PHY_SLEEP_CLK 65
+#define GCC_USB_HS_AHB_CLK 66
+#define GCC_USB_HS_SYSTEM_CLK 67
+#define GCC_APSS_TCU_CLK 68
+#define GCC_MSS_Q6_BIMC_AXI_CLK 69
+#define BIMC_PLL 70
+#define BIMC_PLL_VOTE 71
+#define BIMC_DDR_CLK_SRC 72
+#define BLSP1_UART3_APPS_CLK_SRC 73
+#define BLSP1_UART4_APPS_CLK_SRC 74
+#define BLSP1_UART5_APPS_CLK_SRC 75
+#define BLSP1_UART6_APPS_CLK_SRC 76
+#define GCC_BLSP1_UART3_APPS_CLK 77
+#define GCC_BLSP1_UART4_APPS_CLK 78
+#define GCC_BLSP1_UART5_APPS_CLK 79
+#define GCC_BLSP1_UART6_APPS_CLK 80
+#define GCC_APSS_AHB_CLK 81
+#define GCC_APSS_AXI_CLK 82
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 83
+#define GCC_USB_HSIC_CLK_SRC 84
+#define GCC_USB_HSIC_IO_CAL_CLK_SRC 85
+#define GCC_USB_HSIC_SYSTEM_CLK_SRC 86
+
+/* Resets */
+#define USB2_HS_PHY_ONLY_BCR 0
+#define QUSB2_PHY_BCR 1
+#define GCC_MSS_RESTART 2
+#define USB_HS_HSIC_BCR 3
+#define USB_HS_BCR 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8909.h b/include/dt-bindings/clock/qcom,gcc-msm8909.h
new file mode 100644
index 000000000000..4394ba003425
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8909.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Kernkonzept GmbH.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_8909_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_8909_H
+
+/* PLLs */
+#define GPLL0_EARLY 0
+#define GPLL0 1
+#define GPLL1 2
+#define GPLL1_VOTE 3
+#define GPLL2_EARLY 4
+#define GPLL2 5
+#define BIMC_PLL_EARLY 6
+#define BIMC_PLL 7
+
+/* RCGs */
+#define APSS_AHB_CLK_SRC 8
+#define BIMC_DDR_CLK_SRC 9
+#define BIMC_GPU_CLK_SRC 10
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 14
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 15
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 16
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 18
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 19
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 20
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 21
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 22
+#define BLSP1_UART1_APPS_CLK_SRC 23
+#define BLSP1_UART2_APPS_CLK_SRC 24
+#define BYTE0_CLK_SRC 25
+#define CAMSS_GP0_CLK_SRC 26
+#define CAMSS_GP1_CLK_SRC 27
+#define CAMSS_TOP_AHB_CLK_SRC 28
+#define CODEC_DIGCODEC_CLK_SRC 29
+#define CRYPTO_CLK_SRC 30
+#define CSI0_CLK_SRC 31
+#define CSI0PHYTIMER_CLK_SRC 32
+#define CSI1_CLK_SRC 33
+#define ESC0_CLK_SRC 34
+#define GFX3D_CLK_SRC 35
+#define GP1_CLK_SRC 36
+#define GP2_CLK_SRC 37
+#define GP3_CLK_SRC 38
+#define MCLK0_CLK_SRC 39
+#define MCLK1_CLK_SRC 40
+#define MDP_CLK_SRC 41
+#define PCLK0_CLK_SRC 42
+#define PCNOC_BFDCD_CLK_SRC 43
+#define PDM2_CLK_SRC 44
+#define SDCC1_APPS_CLK_SRC 45
+#define SDCC2_APPS_CLK_SRC 46
+#define SYSTEM_NOC_BFDCD_CLK_SRC 47
+#define ULTAUDIO_AHBFABRIC_CLK_SRC 48
+#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 49
+#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 50
+#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 51
+#define ULTAUDIO_XO_CLK_SRC 52
+#define USB_HS_SYSTEM_CLK_SRC 53
+#define VCODEC0_CLK_SRC 54
+#define VFE0_CLK_SRC 55
+#define VSYNC_CLK_SRC 56
+
+/* Voteable Clocks */
+#define GCC_APSS_TCU_CLK 57
+#define GCC_BLSP1_AHB_CLK 58
+#define GCC_BLSP1_SLEEP_CLK 59
+#define GCC_BOOT_ROM_AHB_CLK 60
+#define GCC_CRYPTO_CLK 61
+#define GCC_CRYPTO_AHB_CLK 62
+#define GCC_CRYPTO_AXI_CLK 63
+#define GCC_GFX_TBU_CLK 64
+#define GCC_GFX_TCU_CLK 65
+#define GCC_GTCU_AHB_CLK 66
+#define GCC_MDP_TBU_CLK 67
+#define GCC_PRNG_AHB_CLK 68
+#define GCC_SMMU_CFG_CLK 69
+#define GCC_VENUS_TBU_CLK 70
+#define GCC_VFE_TBU_CLK 71
+
+/* Branches */
+#define GCC_BIMC_GFX_CLK 72
+#define GCC_BIMC_GPU_CLK 73
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 74
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 75
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 76
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 77
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 78
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 79
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 80
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 81
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 82
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 83
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 84
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 85
+#define GCC_BLSP1_UART1_APPS_CLK 86
+#define GCC_BLSP1_UART2_APPS_CLK 87
+#define GCC_CAMSS_AHB_CLK 88
+#define GCC_CAMSS_CSI0_CLK 89
+#define GCC_CAMSS_CSI0_AHB_CLK 90
+#define GCC_CAMSS_CSI0PHY_CLK 91
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 92
+#define GCC_CAMSS_CSI0PIX_CLK 93
+#define GCC_CAMSS_CSI0RDI_CLK 94
+#define GCC_CAMSS_CSI1_CLK 95
+#define GCC_CAMSS_CSI1_AHB_CLK 96
+#define GCC_CAMSS_CSI1PHY_CLK 97
+#define GCC_CAMSS_CSI1PIX_CLK 98
+#define GCC_CAMSS_CSI1RDI_CLK 99
+#define GCC_CAMSS_CSI_VFE0_CLK 100
+#define GCC_CAMSS_GP0_CLK 101
+#define GCC_CAMSS_GP1_CLK 102
+#define GCC_CAMSS_ISPIF_AHB_CLK 103
+#define GCC_CAMSS_MCLK0_CLK 104
+#define GCC_CAMSS_MCLK1_CLK 105
+#define GCC_CAMSS_TOP_AHB_CLK 106
+#define GCC_CAMSS_VFE0_CLK 107
+#define GCC_CAMSS_VFE_AHB_CLK 108
+#define GCC_CAMSS_VFE_AXI_CLK 109
+#define GCC_CODEC_DIGCODEC_CLK 110
+#define GCC_GP1_CLK 111
+#define GCC_GP2_CLK 112
+#define GCC_GP3_CLK 113
+#define GCC_MDSS_AHB_CLK 114
+#define GCC_MDSS_AXI_CLK 115
+#define GCC_MDSS_BYTE0_CLK 116
+#define GCC_MDSS_ESC0_CLK 117
+#define GCC_MDSS_MDP_CLK 118
+#define GCC_MDSS_PCLK0_CLK 119
+#define GCC_MDSS_VSYNC_CLK 120
+#define GCC_MSS_CFG_AHB_CLK 121
+#define GCC_MSS_Q6_BIMC_AXI_CLK 122
+#define GCC_OXILI_AHB_CLK 123
+#define GCC_OXILI_GFX3D_CLK 124
+#define GCC_PDM2_CLK 125
+#define GCC_PDM_AHB_CLK 126
+#define GCC_SDCC1_AHB_CLK 127
+#define GCC_SDCC1_APPS_CLK 128
+#define GCC_SDCC2_AHB_CLK 129
+#define GCC_SDCC2_APPS_CLK 130
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 131
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 132
+#define GCC_ULTAUDIO_AVSYNC_XO_CLK 133
+#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 134
+#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 135
+#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 136
+#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 137
+#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 138
+#define GCC_ULTAUDIO_STC_XO_CLK 139
+#define GCC_USB2A_PHY_SLEEP_CLK 140
+#define GCC_USB_HS_AHB_CLK 141
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 142
+#define GCC_USB_HS_SYSTEM_CLK 143
+#define GCC_VENUS0_AHB_CLK 144
+#define GCC_VENUS0_AXI_CLK 145
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 146
+#define GCC_VENUS0_VCODEC0_CLK 147
+
+/* Resets */
+#define GCC_AUDIO_CORE_BCR 0
+#define GCC_BLSP1_BCR 1
+#define GCC_BLSP1_QUP1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_QUP3_BCR 4
+#define GCC_BLSP1_QUP4_BCR 5
+#define GCC_BLSP1_QUP5_BCR 6
+#define GCC_BLSP1_QUP6_BCR 7
+#define GCC_BLSP1_UART1_BCR 8
+#define GCC_BLSP1_UART2_BCR 9
+#define GCC_CAMSS_CSI0_BCR 10
+#define GCC_CAMSS_CSI0PHY_BCR 11
+#define GCC_CAMSS_CSI0PIX_BCR 12
+#define GCC_CAMSS_CSI0RDI_BCR 13
+#define GCC_CAMSS_CSI1_BCR 14
+#define GCC_CAMSS_CSI1PHY_BCR 15
+#define GCC_CAMSS_CSI1PIX_BCR 16
+#define GCC_CAMSS_CSI1RDI_BCR 17
+#define GCC_CAMSS_CSI_VFE0_BCR 18
+#define GCC_CAMSS_GP0_BCR 19
+#define GCC_CAMSS_GP1_BCR 20
+#define GCC_CAMSS_ISPIF_BCR 21
+#define GCC_CAMSS_MCLK0_BCR 22
+#define GCC_CAMSS_MCLK1_BCR 23
+#define GCC_CAMSS_PHY0_BCR 24
+#define GCC_CAMSS_TOP_BCR 25
+#define GCC_CAMSS_TOP_AHB_BCR 26
+#define GCC_CAMSS_VFE_BCR 27
+#define GCC_CRYPTO_BCR 28
+#define GCC_MDSS_BCR 29
+#define GCC_OXILI_BCR 30
+#define GCC_PDM_BCR 31
+#define GCC_PRNG_BCR 32
+#define GCC_QUSB2_PHY_BCR 33
+#define GCC_SDCC1_BCR 34
+#define GCC_SDCC2_BCR 35
+#define GCC_ULT_AUDIO_BCR 36
+#define GCC_USB2A_PHY_BCR 37
+#define GCC_USB2_HS_PHY_ONLY_BCR 38
+#define GCC_USB_HS_BCR 39
+#define GCC_VENUS0_BCR 40
+
+/* Subsystem Restart */
+#define GCC_MSS_RESTART 41
+
+/* Power Domains */
+#define MDSS_GDSC 0
+#define OXILI_GDSC 1
+#define VENUS_GDSC 2
+#define VENUS_CORE0_GDSC 3
+#define VFE_GDSC 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h
new file mode 100644
index 000000000000..2d545ed0d35a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8939_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8939_H
+
+#define GPLL0 0
+#define GPLL0_VOTE 1
+#define BIMC_PLL 2
+#define BIMC_PLL_VOTE 3
+#define GPLL1 4
+#define GPLL1_VOTE 5
+#define GPLL2 6
+#define GPLL2_VOTE 7
+#define PCNOC_BFDCD_CLK_SRC 8
+#define SYSTEM_NOC_BFDCD_CLK_SRC 9
+#define CAMSS_AHB_CLK_SRC 10
+#define APSS_AHB_CLK_SRC 11
+#define CSI0_CLK_SRC 12
+#define CSI1_CLK_SRC 13
+#define GFX3D_CLK_SRC 14
+#define VFE0_CLK_SRC 15
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27
+#define BLSP1_UART1_APPS_CLK_SRC 28
+#define BLSP1_UART2_APPS_CLK_SRC 29
+#define CCI_CLK_SRC 30
+#define CAMSS_GP0_CLK_SRC 31
+#define CAMSS_GP1_CLK_SRC 32
+#define JPEG0_CLK_SRC 33
+#define MCLK0_CLK_SRC 34
+#define MCLK1_CLK_SRC 35
+#define CSI0PHYTIMER_CLK_SRC 36
+#define CSI1PHYTIMER_CLK_SRC 37
+#define CPP_CLK_SRC 38
+#define CRYPTO_CLK_SRC 39
+#define GP1_CLK_SRC 40
+#define GP2_CLK_SRC 41
+#define GP3_CLK_SRC 42
+#define BYTE0_CLK_SRC 43
+#define ESC0_CLK_SRC 44
+#define MDP_CLK_SRC 45
+#define PCLK0_CLK_SRC 46
+#define VSYNC_CLK_SRC 47
+#define PDM2_CLK_SRC 48
+#define SDCC1_APPS_CLK_SRC 49
+#define SDCC2_APPS_CLK_SRC 50
+#define APSS_TCU_CLK_SRC 51
+#define USB_HS_SYSTEM_CLK_SRC 52
+#define VCODEC0_CLK_SRC 53
+#define GCC_BLSP1_AHB_CLK 54
+#define GCC_BLSP1_SLEEP_CLK 55
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67
+#define GCC_BLSP1_UART1_APPS_CLK 68
+#define GCC_BLSP1_UART2_APPS_CLK 69
+#define GCC_BOOT_ROM_AHB_CLK 70
+#define GCC_CAMSS_CCI_AHB_CLK 71
+#define GCC_CAMSS_CCI_CLK 72
+#define GCC_CAMSS_CSI0_AHB_CLK 73
+#define GCC_CAMSS_CSI0_CLK 74
+#define GCC_CAMSS_CSI0PHY_CLK 75
+#define GCC_CAMSS_CSI0PIX_CLK 76
+#define GCC_CAMSS_CSI0RDI_CLK 77
+#define GCC_CAMSS_CSI1_AHB_CLK 78
+#define GCC_CAMSS_CSI1_CLK 79
+#define GCC_CAMSS_CSI1PHY_CLK 80
+#define GCC_CAMSS_CSI1PIX_CLK 81
+#define GCC_CAMSS_CSI1RDI_CLK 82
+#define GCC_CAMSS_CSI_VFE0_CLK 83
+#define GCC_CAMSS_GP0_CLK 84
+#define GCC_CAMSS_GP1_CLK 85
+#define GCC_CAMSS_ISPIF_AHB_CLK 86
+#define GCC_CAMSS_JPEG0_CLK 87
+#define GCC_CAMSS_JPEG_AHB_CLK 88
+#define GCC_CAMSS_JPEG_AXI_CLK 89
+#define GCC_CAMSS_MCLK0_CLK 90
+#define GCC_CAMSS_MCLK1_CLK 91
+#define GCC_CAMSS_MICRO_AHB_CLK 92
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 93
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 94
+#define GCC_CAMSS_AHB_CLK 95
+#define GCC_CAMSS_TOP_AHB_CLK 96
+#define GCC_CAMSS_CPP_AHB_CLK 97
+#define GCC_CAMSS_CPP_CLK 98
+#define GCC_CAMSS_VFE0_CLK 99
+#define GCC_CAMSS_VFE_AHB_CLK 100
+#define GCC_CAMSS_VFE_AXI_CLK 101
+#define GCC_CRYPTO_AHB_CLK 102
+#define GCC_CRYPTO_AXI_CLK 103
+#define GCC_CRYPTO_CLK 104
+#define GCC_OXILI_GMEM_CLK 105
+#define GCC_GP1_CLK 106
+#define GCC_GP2_CLK 107
+#define GCC_GP3_CLK 108
+#define GCC_MDSS_AHB_CLK 109
+#define GCC_MDSS_AXI_CLK 110
+#define GCC_MDSS_BYTE0_CLK 111
+#define GCC_MDSS_ESC0_CLK 112
+#define GCC_MDSS_MDP_CLK 113
+#define GCC_MDSS_PCLK0_CLK 114
+#define GCC_MDSS_VSYNC_CLK 115
+#define GCC_MSS_CFG_AHB_CLK 116
+#define GCC_OXILI_AHB_CLK 117
+#define GCC_OXILI_GFX3D_CLK 118
+#define GCC_PDM2_CLK 119
+#define GCC_PDM_AHB_CLK 120
+#define GCC_PRNG_AHB_CLK 121
+#define GCC_SDCC1_AHB_CLK 122
+#define GCC_SDCC1_APPS_CLK 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_GTCU_AHB_CLK 126
+#define GCC_JPEG_TBU_CLK 127
+#define GCC_MDP_TBU_CLK 128
+#define GCC_SMMU_CFG_CLK 129
+#define GCC_VENUS_TBU_CLK 130
+#define GCC_VFE_TBU_CLK 131
+#define GCC_USB2A_PHY_SLEEP_CLK 132
+#define GCC_USB_HS_AHB_CLK 133
+#define GCC_USB_HS_SYSTEM_CLK 134
+#define GCC_VENUS0_AHB_CLK 135
+#define GCC_VENUS0_AXI_CLK 136
+#define GCC_VENUS0_VCODEC0_CLK 137
+#define BIMC_DDR_CLK_SRC 138
+#define GCC_APSS_TCU_CLK 139
+#define GCC_GFX_TCU_CLK 140
+#define BIMC_GPU_CLK_SRC 141
+#define GCC_BIMC_GFX_CLK 142
+#define GCC_BIMC_GPU_CLK 143
+#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144
+#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145
+#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146
+#define ULTAUDIO_XO_CLK_SRC 147
+#define ULTAUDIO_AHBFABRIC_CLK_SRC 148
+#define CODEC_DIGCODEC_CLK_SRC 149
+#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150
+#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151
+#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152
+#define GCC_ULTAUDIO_STC_XO_CLK 153
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155
+#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156
+#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157
+#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158
+#define GCC_CODEC_DIGCODEC_CLK 159
+#define GCC_MSS_Q6_BIMC_AXI_CLK 160
+#define GPLL3 161
+#define GPLL3_VOTE 162
+#define GPLL4 163
+#define GPLL4_VOTE 164
+#define GPLL5 165
+#define GPLL5_VOTE 166
+#define GPLL6 167
+#define GPLL6_VOTE 168
+#define BYTE1_CLK_SRC 169
+#define GCC_MDSS_BYTE1_CLK 170
+#define ESC1_CLK_SRC 171
+#define GCC_MDSS_ESC1_CLK 172
+#define PCLK1_CLK_SRC 173
+#define GCC_MDSS_PCLK1_CLK 174
+#define GCC_GFX_TBU_CLK 175
+#define GCC_CPP_TBU_CLK 176
+#define GCC_MDP_RT_TBU_CLK 177
+#define USB_FS_SYSTEM_CLK_SRC 178
+#define USB_FS_IC_CLK_SRC 179
+#define GCC_USB_FS_AHB_CLK 180
+#define GCC_USB_FS_IC_CLK 181
+#define GCC_USB_FS_SYSTEM_CLK 182
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 183
+#define GCC_VENUS0_CORE1_VCODEC0_CLK 184
+#define GCC_OXILI_TIMER_CLK 185
+#define SYSTEM_MM_NOC_BFDCD_CLK_SRC 186
+
+/* Indexes for GDSCs */
+#define BIMC_GDSC 0
+#define VENUS_GDSC 1
+#define MDSS_GDSC 2
+#define JPEG_GDSC 3
+#define VFE_GDSC 4
+#define OXILI_GDSC 5
+#define VENUS_CORE0_GDSC 6
+#define VENUS_CORE1_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8953.h b/include/dt-bindings/clock/qcom,gcc-msm8953.h
new file mode 100644
index 000000000000..783162da6148
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8953.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8953_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8953_H
+
+/* Clocks */
+#define APC0_DROOP_DETECTOR_CLK_SRC 0
+#define APC1_DROOP_DETECTOR_CLK_SRC 1
+#define APSS_AHB_CLK_SRC 2
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 3
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 4
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 5
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 6
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 7
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 8
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 10
+#define BLSP1_UART1_APPS_CLK_SRC 11
+#define BLSP1_UART2_APPS_CLK_SRC 12
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 13
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 14
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 15
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 16
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 17
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 18
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 19
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 20
+#define BLSP2_UART1_APPS_CLK_SRC 21
+#define BLSP2_UART2_APPS_CLK_SRC 22
+#define BYTE0_CLK_SRC 23
+#define BYTE1_CLK_SRC 24
+#define CAMSS_GP0_CLK_SRC 25
+#define CAMSS_GP1_CLK_SRC 26
+#define CAMSS_TOP_AHB_CLK_SRC 27
+#define CCI_CLK_SRC 28
+#define CPP_CLK_SRC 29
+#define CRYPTO_CLK_SRC 30
+#define CSI0PHYTIMER_CLK_SRC 31
+#define CSI0P_CLK_SRC 32
+#define CSI0_CLK_SRC 33
+#define CSI1PHYTIMER_CLK_SRC 34
+#define CSI1P_CLK_SRC 35
+#define CSI1_CLK_SRC 36
+#define CSI2PHYTIMER_CLK_SRC 37
+#define CSI2P_CLK_SRC 38
+#define CSI2_CLK_SRC 39
+#define ESC0_CLK_SRC 40
+#define ESC1_CLK_SRC 41
+#define GCC_APC0_DROOP_DETECTOR_GPLL0_CLK 42
+#define GCC_APC1_DROOP_DETECTOR_GPLL0_CLK 43
+#define GCC_APSS_AHB_CLK 44
+#define GCC_APSS_AXI_CLK 45
+#define GCC_APSS_TCU_ASYNC_CLK 46
+#define GCC_BIMC_GFX_CLK 47
+#define GCC_BIMC_GPU_CLK 48
+#define GCC_BLSP1_AHB_CLK 49
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 50
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 51
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 52
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 53
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 54
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 55
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 56
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 57
+#define GCC_BLSP1_UART1_APPS_CLK 58
+#define GCC_BLSP1_UART2_APPS_CLK 59
+#define GCC_BLSP2_AHB_CLK 60
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 61
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 62
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 63
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 64
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 65
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 66
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 67
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 68
+#define GCC_BLSP2_UART1_APPS_CLK 69
+#define GCC_BLSP2_UART2_APPS_CLK 70
+#define GCC_BOOT_ROM_AHB_CLK 71
+#define GCC_CAMSS_AHB_CLK 72
+#define GCC_CAMSS_CCI_AHB_CLK 73
+#define GCC_CAMSS_CCI_CLK 74
+#define GCC_CAMSS_CPP_AHB_CLK 75
+#define GCC_CAMSS_CPP_AXI_CLK 76
+#define GCC_CAMSS_CPP_CLK 77
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 78
+#define GCC_CAMSS_CSI0PHY_CLK 79
+#define GCC_CAMSS_CSI0PIX_CLK 80
+#define GCC_CAMSS_CSI0RDI_CLK 81
+#define GCC_CAMSS_CSI0_AHB_CLK 82
+#define GCC_CAMSS_CSI0_CLK 83
+#define GCC_CAMSS_CSI0_CSIPHY_3P_CLK 84
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 85
+#define GCC_CAMSS_CSI1PHY_CLK 86
+#define GCC_CAMSS_CSI1PIX_CLK 87
+#define GCC_CAMSS_CSI1RDI_CLK 88
+#define GCC_CAMSS_CSI1_AHB_CLK 89
+#define GCC_CAMSS_CSI1_CLK 90
+#define GCC_CAMSS_CSI1_CSIPHY_3P_CLK 91
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 92
+#define GCC_CAMSS_CSI2PHY_CLK 93
+#define GCC_CAMSS_CSI2PIX_CLK 94
+#define GCC_CAMSS_CSI2RDI_CLK 95
+#define GCC_CAMSS_CSI2_AHB_CLK 96
+#define GCC_CAMSS_CSI2_CLK 97
+#define GCC_CAMSS_CSI2_CSIPHY_3P_CLK 98
+#define GCC_CAMSS_CSI_VFE0_CLK 99
+#define GCC_CAMSS_CSI_VFE1_CLK 100
+#define GCC_CAMSS_GP0_CLK 101
+#define GCC_CAMSS_GP1_CLK 102
+#define GCC_CAMSS_ISPIF_AHB_CLK 103
+#define GCC_CAMSS_JPEG0_CLK 104
+#define GCC_CAMSS_JPEG_AHB_CLK 105
+#define GCC_CAMSS_JPEG_AXI_CLK 106
+#define GCC_CAMSS_MCLK0_CLK 107
+#define GCC_CAMSS_MCLK1_CLK 108
+#define GCC_CAMSS_MCLK2_CLK 109
+#define GCC_CAMSS_MCLK3_CLK 110
+#define GCC_CAMSS_MICRO_AHB_CLK 111
+#define GCC_CAMSS_TOP_AHB_CLK 112
+#define GCC_CAMSS_VFE0_AHB_CLK 113
+#define GCC_CAMSS_VFE0_AXI_CLK 114
+#define GCC_CAMSS_VFE0_CLK 115
+#define GCC_CAMSS_VFE1_AHB_CLK 116
+#define GCC_CAMSS_VFE1_AXI_CLK 117
+#define GCC_CAMSS_VFE1_CLK 118
+#define GCC_CPP_TBU_CLK 119
+#define GCC_CRYPTO_AHB_CLK 120
+#define GCC_CRYPTO_AXI_CLK 121
+#define GCC_CRYPTO_CLK 122
+#define GCC_DCC_CLK 123
+#define GCC_GP1_CLK 124
+#define GCC_GP2_CLK 125
+#define GCC_GP3_CLK 126
+#define GCC_JPEG_TBU_CLK 127
+#define GCC_MDP_TBU_CLK 128
+#define GCC_MDSS_AHB_CLK 129
+#define GCC_MDSS_AXI_CLK 130
+#define GCC_MDSS_BYTE0_CLK 131
+#define GCC_MDSS_BYTE1_CLK 132
+#define GCC_MDSS_ESC0_CLK 133
+#define GCC_MDSS_ESC1_CLK 134
+#define GCC_MDSS_MDP_CLK 135
+#define GCC_MDSS_PCLK0_CLK 136
+#define GCC_MDSS_PCLK1_CLK 137
+#define GCC_MDSS_VSYNC_CLK 138
+#define GCC_MSS_CFG_AHB_CLK 139
+#define GCC_MSS_Q6_BIMC_AXI_CLK 140
+#define GCC_OXILI_AHB_CLK 141
+#define GCC_OXILI_AON_CLK 142
+#define GCC_OXILI_GFX3D_CLK 143
+#define GCC_OXILI_TIMER_CLK 144
+#define GCC_PCNOC_USB3_AXI_CLK 145
+#define GCC_PDM2_CLK 146
+#define GCC_PDM_AHB_CLK 147
+#define GCC_PRNG_AHB_CLK 148
+#define GCC_QDSS_DAP_CLK 149
+#define GCC_QUSB_REF_CLK 150
+#define GCC_RBCPR_GFX_CLK 151
+#define GCC_SDCC1_AHB_CLK 152
+#define GCC_SDCC1_APPS_CLK 153
+#define GCC_SDCC1_ICE_CORE_CLK 154
+#define GCC_SDCC2_AHB_CLK 155
+#define GCC_SDCC2_APPS_CLK 156
+#define GCC_SMMU_CFG_CLK 157
+#define GCC_USB30_MASTER_CLK 158
+#define GCC_USB30_MOCK_UTMI_CLK 159
+#define GCC_USB30_SLEEP_CLK 160
+#define GCC_USB3_AUX_CLK 161
+#define GCC_USB3_PIPE_CLK 162
+#define GCC_USB_PHY_CFG_AHB_CLK 163
+#define GCC_USB_SS_REF_CLK 164
+#define GCC_VENUS0_AHB_CLK 165
+#define GCC_VENUS0_AXI_CLK 166
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 167
+#define GCC_VENUS0_VCODEC0_CLK 168
+#define GCC_VENUS_TBU_CLK 169
+#define GCC_VFE1_TBU_CLK 170
+#define GCC_VFE_TBU_CLK 171
+#define GFX3D_CLK_SRC 172
+#define GP1_CLK_SRC 173
+#define GP2_CLK_SRC 174
+#define GP3_CLK_SRC 175
+#define GPLL0 176
+#define GPLL0_EARLY 177
+#define GPLL2 178
+#define GPLL2_EARLY 179
+#define GPLL3 180
+#define GPLL3_EARLY 181
+#define GPLL4 182
+#define GPLL4_EARLY 183
+#define GPLL6 184
+#define GPLL6_EARLY 185
+#define JPEG0_CLK_SRC 186
+#define MCLK0_CLK_SRC 187
+#define MCLK1_CLK_SRC 188
+#define MCLK2_CLK_SRC 189
+#define MCLK3_CLK_SRC 190
+#define MDP_CLK_SRC 191
+#define PCLK0_CLK_SRC 192
+#define PCLK1_CLK_SRC 193
+#define PDM2_CLK_SRC 194
+#define RBCPR_GFX_CLK_SRC 195
+#define SDCC1_APPS_CLK_SRC 196
+#define SDCC1_ICE_CORE_CLK_SRC 197
+#define SDCC2_APPS_CLK_SRC 198
+#define USB30_MASTER_CLK_SRC 199
+#define USB30_MOCK_UTMI_CLK_SRC 200
+#define USB3_AUX_CLK_SRC 201
+#define VCODEC0_CLK_SRC 202
+#define VFE0_CLK_SRC 203
+#define VFE1_CLK_SRC 204
+#define VSYNC_CLK_SRC 205
+
+/* GCC block resets */
+#define GCC_CAMSS_MICRO_BCR 0
+#define GCC_MSS_BCR 1
+#define GCC_QUSB2_PHY_BCR 2
+#define GCC_USB3PHY_PHY_BCR 3
+#define GCC_USB3_PHY_BCR 4
+#define GCC_USB_30_BCR 5
+
+/* GDSCs */
+#define CPP_GDSC 0
+#define JPEG_GDSC 1
+#define MDSS_GDSC 2
+#define OXILI_CX_GDSC 3
+#define OXILI_GX_GDSC 4
+#define USB30_GDSC 5
+#define VENUS_CORE0_GDSC 6
+#define VENUS_GDSC 7
+#define VFE0_GDSC 8
+#define VFE1_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8976.h b/include/dt-bindings/clock/qcom,gcc-msm8976.h
new file mode 100644
index 000000000000..5351f48b2068
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8976.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2021, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8976_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8976_H
+
+#define GPLL0 0
+#define GPLL2 1
+#define GPLL3 2
+#define GPLL4 3
+#define GPLL6 4
+#define GPLL0_CLK_SRC 5
+#define GPLL2_CLK_SRC 6
+#define GPLL3_CLK_SRC 7
+#define GPLL4_CLK_SRC 8
+#define GPLL6_CLK_SRC 9
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 10
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 11
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 12
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 13
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 14
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 15
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 16
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 17
+#define GCC_BLSP1_UART1_APPS_CLK 18
+#define GCC_BLSP1_UART2_APPS_CLK 19
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 20
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 21
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 22
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 23
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 24
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 25
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 26
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 27
+#define GCC_BLSP2_UART1_APPS_CLK 28
+#define GCC_BLSP2_UART2_APPS_CLK 29
+#define GCC_CAMSS_CCI_AHB_CLK 30
+#define GCC_CAMSS_CCI_CLK 31
+#define GCC_CAMSS_CPP_AHB_CLK 32
+#define GCC_CAMSS_CPP_AXI_CLK 33
+#define GCC_CAMSS_CPP_CLK 34
+#define GCC_CAMSS_CSI0_AHB_CLK 35
+#define GCC_CAMSS_CSI0_CLK 36
+#define GCC_CAMSS_CSI0PHY_CLK 37
+#define GCC_CAMSS_CSI0PIX_CLK 38
+#define GCC_CAMSS_CSI0RDI_CLK 39
+#define GCC_CAMSS_CSI1_AHB_CLK 40
+#define GCC_CAMSS_CSI1_CLK 41
+#define GCC_CAMSS_CSI1PHY_CLK 42
+#define GCC_CAMSS_CSI1PIX_CLK 43
+#define GCC_CAMSS_CSI1RDI_CLK 44
+#define GCC_CAMSS_CSI2_AHB_CLK 45
+#define GCC_CAMSS_CSI2_CLK 46
+#define GCC_CAMSS_CSI2PHY_CLK 47
+#define GCC_CAMSS_CSI2PIX_CLK 48
+#define GCC_CAMSS_CSI2RDI_CLK 49
+#define GCC_CAMSS_CSI_VFE0_CLK 50
+#define GCC_CAMSS_CSI_VFE1_CLK 51
+#define GCC_CAMSS_GP0_CLK 52
+#define GCC_CAMSS_GP1_CLK 53
+#define GCC_CAMSS_ISPIF_AHB_CLK 54
+#define GCC_CAMSS_JPEG0_CLK 55
+#define GCC_CAMSS_JPEG_AHB_CLK 56
+#define GCC_CAMSS_JPEG_AXI_CLK 57
+#define GCC_CAMSS_MCLK0_CLK 58
+#define GCC_CAMSS_MCLK1_CLK 59
+#define GCC_CAMSS_MCLK2_CLK 60
+#define GCC_CAMSS_MICRO_AHB_CLK 61
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 62
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 63
+#define GCC_CAMSS_AHB_CLK 64
+#define GCC_CAMSS_TOP_AHB_CLK 65
+#define GCC_CAMSS_VFE0_CLK 66
+#define GCC_CAMSS_VFE_AHB_CLK 67
+#define GCC_CAMSS_VFE_AXI_CLK 68
+#define GCC_CAMSS_VFE1_AHB_CLK 69
+#define GCC_CAMSS_VFE1_AXI_CLK 70
+#define GCC_CAMSS_VFE1_CLK 71
+#define GCC_DCC_CLK 72
+#define GCC_GP1_CLK 73
+#define GCC_GP2_CLK 74
+#define GCC_GP3_CLK 75
+#define GCC_MDSS_AHB_CLK 76
+#define GCC_MDSS_AXI_CLK 77
+#define GCC_MDSS_ESC0_CLK 78
+#define GCC_MDSS_ESC1_CLK 79
+#define GCC_MDSS_MDP_CLK 80
+#define GCC_MDSS_VSYNC_CLK 81
+#define GCC_MSS_CFG_AHB_CLK 82
+#define GCC_MSS_Q6_BIMC_AXI_CLK 83
+#define GCC_PDM2_CLK 84
+#define GCC_PRNG_AHB_CLK 85
+#define GCC_PDM_AHB_CLK 86
+#define GCC_RBCPR_GFX_AHB_CLK 87
+#define GCC_RBCPR_GFX_CLK 88
+#define GCC_SDCC1_AHB_CLK 89
+#define GCC_SDCC1_APPS_CLK 90
+#define GCC_SDCC1_ICE_CORE_CLK 91
+#define GCC_SDCC2_AHB_CLK 92
+#define GCC_SDCC2_APPS_CLK 93
+#define GCC_SDCC3_AHB_CLK 94
+#define GCC_SDCC3_APPS_CLK 95
+#define GCC_USB2A_PHY_SLEEP_CLK 96
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 97
+#define GCC_USB_FS_AHB_CLK 98
+#define GCC_USB_FS_IC_CLK 99
+#define GCC_USB_FS_SYSTEM_CLK 100
+#define GCC_USB_HS_AHB_CLK 101
+#define GCC_USB_HS_SYSTEM_CLK 102
+#define GCC_VENUS0_AHB_CLK 103
+#define GCC_VENUS0_AXI_CLK 104
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 105
+#define GCC_VENUS0_CORE1_VCODEC0_CLK 106
+#define GCC_VENUS0_VCODEC0_CLK 107
+#define GCC_APSS_AHB_CLK 108
+#define GCC_APSS_AXI_CLK 109
+#define GCC_BLSP1_AHB_CLK 110
+#define GCC_BLSP2_AHB_CLK 111
+#define GCC_BOOT_ROM_AHB_CLK 112
+#define GCC_CRYPTO_AHB_CLK 113
+#define GCC_CRYPTO_AXI_CLK 114
+#define GCC_CRYPTO_CLK 115
+#define GCC_CPP_TBU_CLK 116
+#define GCC_APSS_TCU_CLK 117
+#define GCC_JPEG_TBU_CLK 118
+#define GCC_MDP_RT_TBU_CLK 119
+#define GCC_MDP_TBU_CLK 120
+#define GCC_SMMU_CFG_CLK 121
+#define GCC_VENUS_1_TBU_CLK 122
+#define GCC_VENUS_TBU_CLK 123
+#define GCC_VFE1_TBU_CLK 124
+#define GCC_VFE_TBU_CLK 125
+#define GCC_APS_0_CLK 126
+#define GCC_APS_1_CLK 127
+#define APS_0_CLK_SRC 128
+#define APS_1_CLK_SRC 129
+#define APSS_AHB_CLK_SRC 130
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 131
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 132
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 133
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 134
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 135
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 136
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 137
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 138
+#define BLSP1_UART1_APPS_CLK_SRC 139
+#define BLSP1_UART2_APPS_CLK_SRC 140
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 141
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 142
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 143
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 144
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 145
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 146
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 147
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 148
+#define BLSP2_UART1_APPS_CLK_SRC 149
+#define BLSP2_UART2_APPS_CLK_SRC 150
+#define CCI_CLK_SRC 151
+#define CPP_CLK_SRC 152
+#define CSI0_CLK_SRC 153
+#define CSI1_CLK_SRC 154
+#define CSI2_CLK_SRC 155
+#define CAMSS_GP0_CLK_SRC 156
+#define CAMSS_GP1_CLK_SRC 157
+#define JPEG0_CLK_SRC 158
+#define MCLK0_CLK_SRC 159
+#define MCLK1_CLK_SRC 160
+#define MCLK2_CLK_SRC 161
+#define CSI0PHYTIMER_CLK_SRC 162
+#define CSI1PHYTIMER_CLK_SRC 163
+#define CAMSS_TOP_AHB_CLK_SRC 164
+#define VFE0_CLK_SRC 165
+#define VFE1_CLK_SRC 166
+#define CRYPTO_CLK_SRC 167
+#define GP1_CLK_SRC 168
+#define GP2_CLK_SRC 169
+#define GP3_CLK_SRC 170
+#define ESC0_CLK_SRC 171
+#define ESC1_CLK_SRC 172
+#define MDP_CLK_SRC 173
+#define VSYNC_CLK_SRC 174
+#define PDM2_CLK_SRC 175
+#define RBCPR_GFX_CLK_SRC 176
+#define SDCC1_APPS_CLK_SRC 177
+#define SDCC1_ICE_CORE_CLK_SRC 178
+#define SDCC2_APPS_CLK_SRC 179
+#define SDCC3_APPS_CLK_SRC 180
+#define USB_FS_IC_CLK_SRC 181
+#define USB_FS_SYSTEM_CLK_SRC 182
+#define USB_HS_SYSTEM_CLK_SRC 183
+#define VCODEC0_CLK_SRC 184
+#define GCC_MDSS_BYTE0_CLK_SRC 185
+#define GCC_MDSS_BYTE1_CLK_SRC 186
+#define GCC_MDSS_BYTE0_CLK 187
+#define GCC_MDSS_BYTE1_CLK 188
+#define GCC_MDSS_PCLK0_CLK_SRC 189
+#define GCC_MDSS_PCLK1_CLK_SRC 190
+#define GCC_MDSS_PCLK0_CLK 191
+#define GCC_MDSS_PCLK1_CLK 192
+#define GCC_GFX3D_CLK_SRC 193
+#define GCC_GFX3D_OXILI_CLK 194
+#define GCC_GFX3D_BIMC_CLK 195
+#define GCC_GFX3D_OXILI_AHB_CLK 196
+#define GCC_GFX3D_OXILI_AON_CLK 197
+#define GCC_GFX3D_OXILI_GMEM_CLK 198
+#define GCC_GFX3D_OXILI_TIMER_CLK 199
+#define GCC_GFX3D_TBU0_CLK 200
+#define GCC_GFX3D_TBU1_CLK 201
+#define GCC_GFX3D_TCU_CLK 202
+#define GCC_GFX3D_GTCU_AHB_CLK 203
+
+/* GCC block resets */
+#define RST_CAMSS_MICRO_BCR 0
+#define RST_USB_HS_BCR 1
+#define RST_QUSB2_PHY_BCR 2
+#define RST_USB2_HS_PHY_ONLY_BCR 3
+#define RST_USB_HS_PHY_CFG_AHB_BCR 4
+#define RST_USB_FS_BCR 5
+#define RST_CAMSS_CSI1PIX_BCR 6
+#define RST_CAMSS_CSI_VFE1_BCR 7
+#define RST_CAMSS_VFE1_BCR 8
+#define RST_CAMSS_CPP_BCR 9
+#define RST_MSS_BCR 10
+
+/* GDSCs */
+#define VENUS_GDSC 0
+#define VENUS_CORE0_GDSC 1
+#define VENUS_CORE1_GDSC 2
+#define MDSS_GDSC 3
+#define JPEG_GDSC 4
+#define VFE0_GDSC 5
+#define VFE1_GDSC 6
+#define CPP_GDSC 7
+#define OXILI_GX_GDSC 8
+#define OXILI_CX_GDSC 9
+
+#endif /* _DT_BINDINGS_CLK_MSM_GCC_8976_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h
index 938969309e00..f6836f430bb5 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8994.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h
@@ -126,5 +126,54 @@
#define GCC_USB3_PHY_AUX_CLK 116
#define GCC_USB_HS_SYSTEM_CLK 117
#define GCC_SDCC1_AHB_CLK 118
+#define GCC_LPASS_Q6_AXI_CLK 119
+#define GCC_MSS_Q6_BIMC_AXI_CLK 120
+#define GCC_PCIE_0_CFG_AHB_CLK 121
+#define GCC_PCIE_0_MSTR_AXI_CLK 122
+#define GCC_PCIE_0_SLV_AXI_CLK 123
+#define GCC_PCIE_1_CFG_AHB_CLK 124
+#define GCC_PCIE_1_MSTR_AXI_CLK 125
+#define GCC_PCIE_1_SLV_AXI_CLK 126
+#define GCC_PDM_AHB_CLK 127
+#define GCC_SDCC2_AHB_CLK 128
+#define GCC_SDCC3_AHB_CLK 129
+#define GCC_SDCC4_AHB_CLK 130
+#define GCC_TSIF_AHB_CLK 131
+#define GCC_UFS_AHB_CLK 132
+#define GCC_UFS_RX_SYMBOL_0_CLK 133
+#define GCC_UFS_RX_SYMBOL_1_CLK 134
+#define GCC_UFS_TX_SYMBOL_0_CLK 135
+#define GCC_UFS_TX_SYMBOL_1_CLK 136
+#define GCC_USB2_HS_PHY_SLEEP_CLK 137
+#define GCC_USB30_SLEEP_CLK 138
+#define GCC_USB_HS_AHB_CLK 139
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 140
+#define CONFIG_NOC_CLK_SRC 141
+#define PERIPH_NOC_CLK_SRC 142
+#define SYSTEM_NOC_CLK_SRC 143
+#define GPLL0_OUT_MMSSCC 144
+#define GPLL0_OUT_MSSCC 145
+#define PCIE_0_PHY_LDO 146
+#define PCIE_1_PHY_LDO 147
+#define UFS_PHY_LDO 148
+#define USB_SS_PHY_LDO 149
+#define GCC_BOOT_ROM_AHB_CLK 150
+#define GCC_PRNG_AHB_CLK 151
+#define GCC_USB3_PHY_PIPE_CLK 152
+
+/* GDSCs */
+#define PCIE_GDSC 0
+#define PCIE_0_GDSC 1
+#define PCIE_1_GDSC 2
+#define USB30_GDSC 3
+#define UFS_GDSC 4
+
+/* Resets */
+#define USB3_PHY_RESET 0
+#define USB3PHY_PHY_RESET 1
+#define PCIE_PHY_0_RESET 2
+#define PCIE_PHY_1_RESET 3
+#define QUSB2_PHY_RESET 4
+#define MSS_RESET 5
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 63e02dc32a0b..1badb4f9c58f 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -183,6 +183,13 @@
#define GCC_MSS_SNOC_AXI_CLK 174
#define GCC_MSS_MNOC_BIMC_AXI_CLK 175
#define GCC_BIMC_GFX_CLK 176
+#define UFS_UNIPRO_CORE_CLK_SRC 177
+#define GCC_MMSS_GPLL0_CLK 178
+#define HMSS_GPLL0_CLK_SRC 179
+#define GCC_IM_SLEEP 180
+#define AGGRE2_SNOC_NORTH_AXI 181
+#define SSC_XO 182
+#define SSC_CNOC_AHBS_CLK 183
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
diff --git a/include/dt-bindings/clock/qcom,gcc-qcm2290.h b/include/dt-bindings/clock/qcom,gcc-qcm2290.h
new file mode 100644
index 000000000000..8d907035f9e4
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-qcm2290.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCM2290_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_AUX2 1
+#define GPLL1 2
+#define GPLL10 3
+#define GPLL11 4
+#define GPLL3 5
+#define GPLL3_OUT_MAIN 6
+#define GPLL4 7
+#define GPLL5 8
+#define GPLL6 9
+#define GPLL6_OUT_MAIN 10
+#define GPLL7 11
+#define GPLL8 12
+#define GPLL8_OUT_MAIN 13
+#define GPLL9 14
+#define GPLL9_OUT_MAIN 15
+#define GCC_AHB2PHY_CSI_CLK 16
+#define GCC_AHB2PHY_USB_CLK 17
+#define GCC_APC_VS_CLK 18
+#define GCC_BIMC_GPU_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAM_THROTTLE_NRT_CLK 21
+#define GCC_CAM_THROTTLE_RT_CLK 22
+#define GCC_CAMERA_AHB_CLK 23
+#define GCC_CAMERA_XO_CLK 24
+#define GCC_CAMSS_AXI_CLK 25
+#define GCC_CAMSS_AXI_CLK_SRC 26
+#define GCC_CAMSS_CAMNOC_ATB_CLK 27
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 28
+#define GCC_CAMSS_CCI_0_CLK 29
+#define GCC_CAMSS_CCI_CLK_SRC 30
+#define GCC_CAMSS_CPHY_0_CLK 31
+#define GCC_CAMSS_CPHY_1_CLK 32
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 33
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 34
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 35
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 36
+#define GCC_CAMSS_MCLK0_CLK 37
+#define GCC_CAMSS_MCLK0_CLK_SRC 38
+#define GCC_CAMSS_MCLK1_CLK 39
+#define GCC_CAMSS_MCLK1_CLK_SRC 40
+#define GCC_CAMSS_MCLK2_CLK 41
+#define GCC_CAMSS_MCLK2_CLK_SRC 42
+#define GCC_CAMSS_MCLK3_CLK 43
+#define GCC_CAMSS_MCLK3_CLK_SRC 44
+#define GCC_CAMSS_NRT_AXI_CLK 45
+#define GCC_CAMSS_OPE_AHB_CLK 46
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 47
+#define GCC_CAMSS_OPE_CLK 48
+#define GCC_CAMSS_OPE_CLK_SRC 49
+#define GCC_CAMSS_RT_AXI_CLK 50
+#define GCC_CAMSS_TFE_0_CLK 51
+#define GCC_CAMSS_TFE_0_CLK_SRC 52
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 53
+#define GCC_CAMSS_TFE_0_CSID_CLK 54
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 55
+#define GCC_CAMSS_TFE_1_CLK 56
+#define GCC_CAMSS_TFE_1_CLK_SRC 57
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 58
+#define GCC_CAMSS_TFE_1_CSID_CLK 59
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 60
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 61
+#define GCC_CAMSS_TOP_AHB_CLK 62
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 63
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 64
+#define GCC_CPUSS_AHB_CLK 65
+#define GCC_CPUSS_AHB_CLK_SRC 66
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 67
+#define GCC_CPUSS_GNOC_CLK 68
+#define GCC_CPUSS_THROTTLE_CORE_CLK 69
+#define GCC_CPUSS_THROTTLE_XO_CLK 70
+#define GCC_DISP_AHB_CLK 71
+#define GCC_DISP_GPLL0_CLK_SRC 72
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 73
+#define GCC_DISP_HF_AXI_CLK 74
+#define GCC_DISP_THROTTLE_CORE_CLK 75
+#define GCC_DISP_XO_CLK 76
+#define GCC_GP1_CLK 77
+#define GCC_GP1_CLK_SRC 78
+#define GCC_GP2_CLK 79
+#define GCC_GP2_CLK_SRC 80
+#define GCC_GP3_CLK 81
+#define GCC_GP3_CLK_SRC 82
+#define GCC_GPU_CFG_AHB_CLK 83
+#define GCC_GPU_GPLL0_CLK_SRC 84
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 85
+#define GCC_GPU_IREF_CLK 86
+#define GCC_GPU_MEMNOC_GFX_CLK 87
+#define GCC_GPU_SNOC_DVM_GFX_CLK 88
+#define GCC_GPU_THROTTLE_CORE_CLK 89
+#define GCC_GPU_THROTTLE_XO_CLK 90
+#define GCC_PDM2_CLK 91
+#define GCC_PDM2_CLK_SRC 92
+#define GCC_PDM_AHB_CLK 93
+#define GCC_PDM_XO4_CLK 94
+#define GCC_PWM0_XO512_CLK 95
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 96
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 97
+#define GCC_QMIP_CPUSS_CFG_AHB_CLK 98
+#define GCC_QMIP_DISP_AHB_CLK 99
+#define GCC_QMIP_GPU_CFG_AHB_CLK 100
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 101
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 102
+#define GCC_QUPV3_WRAP0_CORE_CLK 103
+#define GCC_QUPV3_WRAP0_S0_CLK 104
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 105
+#define GCC_QUPV3_WRAP0_S1_CLK 106
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 107
+#define GCC_QUPV3_WRAP0_S2_CLK 108
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 109
+#define GCC_QUPV3_WRAP0_S3_CLK 110
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 111
+#define GCC_QUPV3_WRAP0_S4_CLK 112
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 113
+#define GCC_QUPV3_WRAP0_S5_CLK 114
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 115
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 116
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 117
+#define GCC_SDCC1_AHB_CLK 118
+#define GCC_SDCC1_APPS_CLK 119
+#define GCC_SDCC1_APPS_CLK_SRC 120
+#define GCC_SDCC1_ICE_CORE_CLK 121
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 122
+#define GCC_SDCC2_AHB_CLK 123
+#define GCC_SDCC2_APPS_CLK 124
+#define GCC_SDCC2_APPS_CLK_SRC 125
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 126
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 127
+#define GCC_USB30_PRIM_MASTER_CLK 128
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 129
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV 132
+#define GCC_USB30_PRIM_SLEEP_CLK 133
+#define GCC_USB3_PRIM_CLKREF_CLK 134
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 135
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 136
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 137
+#define GCC_VCODEC0_AXI_CLK 138
+#define GCC_VENUS_AHB_CLK 139
+#define GCC_VENUS_CTL_AXI_CLK 140
+#define GCC_VIDEO_AHB_CLK 141
+#define GCC_VIDEO_AXI0_CLK 142
+#define GCC_VIDEO_THROTTLE_CORE_CLK 143
+#define GCC_VIDEO_VCODEC0_SYS_CLK 144
+#define GCC_VIDEO_VENUS_CLK_SRC 145
+#define GCC_VIDEO_VENUS_CTL_CLK 146
+#define GCC_VIDEO_XO_CLK 147
+
+/* GCC resets */
+#define GCC_CAMSS_OPE_BCR 0
+#define GCC_CAMSS_TFE_BCR 1
+#define GCC_CAMSS_TOP_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_MMSS_BCR 4
+#define GCC_PDM_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_SDCC1_BCR 7
+#define GCC_SDCC2_BCR 8
+#define GCC_USB30_PRIM_BCR 9
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 10
+#define GCC_VCODEC0_BCR 11
+#define GCC_VENUS_BCR 12
+#define GCC_VIDEO_INTERFACE_BCR 13
+#define GCC_QUSB2PHY_PRIM_BCR 14
+#define GCC_USB3_PHY_PRIM_SP0_BCR 15
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 16
+
+/* Indexes for GDSCs */
+#define GCC_CAMSS_TOP_GDSC 0
+#define GCC_USB30_PRIM_GDSC 1
+#define GCC_VCODEC0_GDSC 2
+#define GCC_VENUS_GDSC 3
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 4
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h
index e8029b2e92d7..bdf43adc7897 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc7180.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC7180_H
@@ -132,6 +132,13 @@
#define GCC_VIDEO_GPLL0_DIV_CLK_SRC 122
#define GCC_VIDEO_THROTTLE_AXI_CLK 123
#define GCC_VIDEO_XO_CLK 124
+#define GCC_MSS_CFG_AHB_CLK 125
+#define GCC_MSS_MFAB_AXIS_CLK 126
+#define GCC_MSS_NAV_AXI_CLK 127
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 128
+#define GCC_MSS_SNOC_AXI_CLK 129
+#define GCC_SEC_CTRL_CLK_SRC 130
+#define GCC_LPASS_CFG_NOC_SWAY_CLK 131
/* GCC resets */
#define GCC_QUSB2PHY_PRIM_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7280.h b/include/dt-bindings/clock/qcom,gcc-sc7280.h
new file mode 100644
index 000000000000..3d5724b79bff
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc7280.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SC7280_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL0_OUT_ODD 2
+#define GCC_GPLL1 3
+#define GCC_GPLL10 4
+#define GCC_GPLL4 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 7
+#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 8
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 9
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 10
+#define GCC_CAMERA_AHB_CLK 11
+#define GCC_CAMERA_HF_AXI_CLK 12
+#define GCC_CAMERA_SF_AXI_CLK 13
+#define GCC_CAMERA_XO_CLK 14
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 15
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 16
+#define GCC_CPUSS_AHB_CLK 17
+#define GCC_CPUSS_AHB_CLK_SRC 18
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 19
+#define GCC_DDRSS_GPU_AXI_CLK 20
+#define GCC_DDRSS_PCIE_SF_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_GPLL0_CLK_SRC 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_SF_AXI_CLK 25
+#define GCC_DISP_XO_CLK 26
+#define GCC_GP1_CLK 27
+#define GCC_GP1_CLK_SRC 28
+#define GCC_GP2_CLK 29
+#define GCC_GP2_CLK_SRC 30
+#define GCC_GP3_CLK 31
+#define GCC_GP3_CLK_SRC 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_GPU_IREF_EN 36
+#define GCC_GPU_MEMNOC_GFX_CLK 37
+#define GCC_GPU_SNOC_DVM_GFX_CLK 38
+#define GCC_PCIE0_PHY_RCHNG_CLK 39
+#define GCC_PCIE1_PHY_RCHNG_CLK 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_MSTR_AXI_CLK 44
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_PIPE_CLK_SRC 47
+#define GCC_PCIE_0_SLV_AXI_CLK 48
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 49
+#define GCC_PCIE_1_AUX_CLK 50
+#define GCC_PCIE_1_AUX_CLK_SRC 51
+#define GCC_PCIE_1_CFG_AHB_CLK 52
+#define GCC_PCIE_1_MSTR_AXI_CLK 53
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 54
+#define GCC_PCIE_1_PIPE_CLK 55
+#define GCC_PCIE_1_PIPE_CLK_SRC 56
+#define GCC_PCIE_1_SLV_AXI_CLK 57
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 58
+#define GCC_PCIE_THROTTLE_CORE_CLK 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 64
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 65
+#define GCC_QMIP_DISP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 68
+#define GCC_QUPV3_WRAP0_CORE_CLK 69
+#define GCC_QUPV3_WRAP0_S0_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S1_CLK 72
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S2_CLK 74
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S3_CLK 76
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S4_CLK 78
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S5_CLK 80
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S6_CLK 82
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 83
+#define GCC_QUPV3_WRAP0_S7_CLK 84
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 86
+#define GCC_QUPV3_WRAP1_CORE_CLK 87
+#define GCC_QUPV3_WRAP1_S0_CLK 88
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S1_CLK 90
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S2_CLK 92
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S3_CLK 94
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S4_CLK 96
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S5_CLK 98
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S6_CLK 100
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_S7_CLK 102
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 103
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 104
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 105
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 106
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 107
+#define GCC_SDCC1_AHB_CLK 108
+#define GCC_SDCC1_APPS_CLK 109
+#define GCC_SDCC1_APPS_CLK_SRC 110
+#define GCC_SDCC1_ICE_CORE_CLK 111
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 112
+#define GCC_SDCC2_AHB_CLK 113
+#define GCC_SDCC2_APPS_CLK 114
+#define GCC_SDCC2_APPS_CLK_SRC 115
+#define GCC_SDCC4_AHB_CLK 116
+#define GCC_SDCC4_APPS_CLK 117
+#define GCC_SDCC4_APPS_CLK_SRC 118
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 119
+#define GCC_THROTTLE_PCIE_AHB_CLK 120
+#define GCC_TITAN_NRT_THROTTLE_CORE_CLK 121
+#define GCC_TITAN_RT_THROTTLE_CORE_CLK 122
+#define GCC_UFS_1_CLKREF_EN 123
+#define GCC_UFS_PHY_AHB_CLK 124
+#define GCC_UFS_PHY_AXI_CLK 125
+#define GCC_UFS_PHY_AXI_CLK_SRC 126
+#define GCC_UFS_PHY_ICE_CORE_CLK 127
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 128
+#define GCC_UFS_PHY_PHY_AUX_CLK 129
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 130
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 131
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 132
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 133
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 134
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 135
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 136
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 137
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 138
+#define GCC_USB30_PRIM_MASTER_CLK 139
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 140
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 141
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 142
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 143
+#define GCC_USB30_PRIM_SLEEP_CLK 144
+#define GCC_USB30_SEC_MASTER_CLK 145
+#define GCC_USB30_SEC_MASTER_CLK_SRC 146
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 147
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 148
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 149
+#define GCC_USB30_SEC_SLEEP_CLK 150
+#define GCC_USB3_PRIM_PHY_AUX_CLK 151
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 152
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 153
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 154
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 155
+#define GCC_USB3_SEC_PHY_AUX_CLK 156
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 157
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 158
+#define GCC_USB3_SEC_PHY_PIPE_CLK 159
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 160
+#define GCC_VIDEO_AHB_CLK 161
+#define GCC_VIDEO_AXI0_CLK 162
+#define GCC_VIDEO_MVP_THROTTLE_CORE_CLK 163
+#define GCC_VIDEO_XO_CLK 164
+#define GCC_GPLL0_MAIN_DIV_CDIV 165
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 166
+#define GCC_QSPI_CORE_CLK 167
+#define GCC_QSPI_CORE_CLK_SRC 168
+#define GCC_CFG_NOC_LPASS_CLK 169
+#define GCC_MSS_GPLL0_MAIN_DIV_CLK_SRC 170
+#define GCC_MSS_CFG_AHB_CLK 171
+#define GCC_MSS_OFFLINE_AXI_CLK 172
+#define GCC_MSS_SNOC_AXI_CLK 173
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 174
+#define GCC_MSS_Q6SS_BOOT_CLK_SRC 175
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 176
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 177
+#define GCC_AGGRE_NOC_PCIE_CENTER_SF_AXI_CLK 178
+#define GCC_PCIE_CLKREF_EN 179
+#define GCC_WPSS_AHB_CLK 180
+#define GCC_WPSS_AHB_BDG_MST_CLK 181
+#define GCC_WPSS_RSCP_CLK 182
+#define GCC_EDP_CLKREF_EN 183
+#define GCC_SEC_CTRL_CLK_SRC 184
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_PCIE_1_GDSC 1
+#define GCC_UFS_PHY_GDSC 2
+#define GCC_USB30_PRIM_GDSC 3
+#define GCC_USB30_SEC_GDSC 4
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 5
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 6
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 7
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 8
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 9
+
+/* GCC resets */
+#define GCC_PCIE_0_BCR 0
+#define GCC_PCIE_0_PHY_BCR 1
+#define GCC_PCIE_1_BCR 2
+#define GCC_PCIE_1_PHY_BCR 3
+#define GCC_QUSB2PHY_PRIM_BCR 4
+#define GCC_QUSB2PHY_SEC_BCR 5
+#define GCC_SDCC1_BCR 6
+#define GCC_SDCC2_BCR 7
+#define GCC_SDCC4_BCR 8
+#define GCC_UFS_PHY_BCR 9
+#define GCC_USB30_PRIM_BCR 10
+#define GCC_USB30_SEC_BCR 11
+#define GCC_USB3_DP_PHY_PRIM_BCR 12
+#define GCC_USB3_PHY_PRIM_BCR 13
+#define GCC_USB3PHY_PHY_PRIM_BCR 14
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8180x.h b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
new file mode 100644
index 000000000000..e893415ae13d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC8180X_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SC8180X_H
+
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 3
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 4
+#define GCC_AGGRE_USB3_MP_AXI_CLK 5
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 6
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 7
+#define GCC_BOOT_ROM_AHB_CLK 8
+#define GCC_CAMERA_HF_AXI_CLK 9
+#define GCC_CAMERA_SF_AXI_CLK 10
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 11
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
+#define GCC_CPUSS_AHB_CLK 14
+#define GCC_CPUSS_AHB_CLK_SRC 15
+#define GCC_CPUSS_RBCPR_CLK 16
+#define GCC_DDRSS_GPU_AXI_CLK 17
+#define GCC_DISP_HF_AXI_CLK 18
+#define GCC_DISP_SF_AXI_CLK 19
+#define GCC_EMAC_AXI_CLK 20
+#define GCC_EMAC_PTP_CLK 21
+#define GCC_EMAC_PTP_CLK_SRC 22
+#define GCC_EMAC_RGMII_CLK 23
+#define GCC_EMAC_RGMII_CLK_SRC 24
+#define GCC_EMAC_SLV_AHB_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GP4_CLK 32
+#define GCC_GP4_CLK_SRC 33
+#define GCC_GP5_CLK 34
+#define GCC_GP5_CLK_SRC 35
+#define GCC_GPU_GPLL0_CLK_SRC 36
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 37
+#define GCC_GPU_MEMNOC_GFX_CLK 38
+#define GCC_GPU_SNOC_DVM_GFX_CLK 39
+#define GCC_NPU_AT_CLK 40
+#define GCC_NPU_AXI_CLK 41
+#define GCC_NPU_AXI_CLK_SRC 42
+#define GCC_NPU_GPLL0_CLK_SRC 43
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 44
+#define GCC_NPU_TRIG_CLK 45
+#define GCC_PCIE0_PHY_REFGEN_CLK 46
+#define GCC_PCIE1_PHY_REFGEN_CLK 47
+#define GCC_PCIE2_PHY_REFGEN_CLK 48
+#define GCC_PCIE3_PHY_REFGEN_CLK 49
+#define GCC_PCIE_0_AUX_CLK 50
+#define GCC_PCIE_0_AUX_CLK_SRC 51
+#define GCC_PCIE_0_CFG_AHB_CLK 52
+#define GCC_PCIE_0_MSTR_AXI_CLK 53
+#define GCC_PCIE_0_PIPE_CLK 54
+#define GCC_PCIE_0_SLV_AXI_CLK 55
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 56
+#define GCC_PCIE_1_AUX_CLK 57
+#define GCC_PCIE_1_AUX_CLK_SRC 58
+#define GCC_PCIE_1_CFG_AHB_CLK 59
+#define GCC_PCIE_1_MSTR_AXI_CLK 60
+#define GCC_PCIE_1_PIPE_CLK 61
+#define GCC_PCIE_1_SLV_AXI_CLK 62
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 63
+#define GCC_PCIE_2_AUX_CLK 64
+#define GCC_PCIE_2_AUX_CLK_SRC 65
+#define GCC_PCIE_2_CFG_AHB_CLK 66
+#define GCC_PCIE_2_MSTR_AXI_CLK 67
+#define GCC_PCIE_2_PIPE_CLK 68
+#define GCC_PCIE_2_SLV_AXI_CLK 69
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 70
+#define GCC_PCIE_3_AUX_CLK 71
+#define GCC_PCIE_3_AUX_CLK_SRC 72
+#define GCC_PCIE_3_CFG_AHB_CLK 73
+#define GCC_PCIE_3_MSTR_AXI_CLK 74
+#define GCC_PCIE_3_PIPE_CLK 75
+#define GCC_PCIE_3_SLV_AXI_CLK 76
+#define GCC_PCIE_3_SLV_Q2A_AXI_CLK 77
+#define GCC_PCIE_PHY_AUX_CLK 78
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 79
+#define GCC_PDM2_CLK 80
+#define GCC_PDM2_CLK_SRC 81
+#define GCC_PDM_AHB_CLK 82
+#define GCC_PDM_XO4_CLK 83
+#define GCC_PRNG_AHB_CLK 84
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 85
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 86
+#define GCC_QMIP_DISP_AHB_CLK 87
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 88
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 89
+#define GCC_QSPI_1_CNOC_PERIPH_AHB_CLK 90
+#define GCC_QSPI_1_CORE_CLK 91
+#define GCC_QSPI_1_CORE_CLK_SRC 92
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 93
+#define GCC_QSPI_CORE_CLK 94
+#define GCC_QSPI_CORE_CLK_SRC 95
+#define GCC_QUPV3_WRAP0_S0_CLK 96
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 97
+#define GCC_QUPV3_WRAP0_S1_CLK 98
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 99
+#define GCC_QUPV3_WRAP0_S2_CLK 100
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 101
+#define GCC_QUPV3_WRAP0_S3_CLK 102
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 103
+#define GCC_QUPV3_WRAP0_S4_CLK 104
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 105
+#define GCC_QUPV3_WRAP0_S5_CLK 106
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 107
+#define GCC_QUPV3_WRAP0_S6_CLK 108
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 109
+#define GCC_QUPV3_WRAP0_S7_CLK 110
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 111
+#define GCC_QUPV3_WRAP1_S0_CLK 112
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 113
+#define GCC_QUPV3_WRAP1_S1_CLK 114
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 115
+#define GCC_QUPV3_WRAP1_S2_CLK 116
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 117
+#define GCC_QUPV3_WRAP1_S3_CLK 118
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 119
+#define GCC_QUPV3_WRAP1_S4_CLK 120
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 121
+#define GCC_QUPV3_WRAP1_S5_CLK 122
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 123
+#define GCC_QUPV3_WRAP2_S0_CLK 124
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 125
+#define GCC_QUPV3_WRAP2_S1_CLK 126
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 127
+#define GCC_QUPV3_WRAP2_S2_CLK 128
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 129
+#define GCC_QUPV3_WRAP2_S3_CLK 130
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 131
+#define GCC_QUPV3_WRAP2_S4_CLK 132
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 133
+#define GCC_QUPV3_WRAP2_S5_CLK 134
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 135
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 136
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 137
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 138
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 139
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 141
+#define GCC_SDCC2_AHB_CLK 142
+#define GCC_SDCC2_APPS_CLK 143
+#define GCC_SDCC2_APPS_CLK_SRC 144
+#define GCC_SDCC4_AHB_CLK 145
+#define GCC_SDCC4_APPS_CLK 146
+#define GCC_SDCC4_APPS_CLK_SRC 147
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 148
+#define GCC_TSIF_AHB_CLK 149
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 150
+#define GCC_TSIF_REF_CLK 151
+#define GCC_TSIF_REF_CLK_SRC 152
+#define GCC_UFS_CARD_2_AHB_CLK 153
+#define GCC_UFS_CARD_2_AXI_CLK 154
+#define GCC_UFS_CARD_2_AXI_CLK_SRC 155
+#define GCC_UFS_CARD_2_ICE_CORE_CLK 156
+#define GCC_UFS_CARD_2_ICE_CORE_CLK_SRC 157
+#define GCC_UFS_CARD_2_PHY_AUX_CLK 158
+#define GCC_UFS_CARD_2_PHY_AUX_CLK_SRC 159
+#define GCC_UFS_CARD_2_RX_SYMBOL_0_CLK 160
+#define GCC_UFS_CARD_2_RX_SYMBOL_1_CLK 161
+#define GCC_UFS_CARD_2_TX_SYMBOL_0_CLK 162
+#define GCC_UFS_CARD_2_UNIPRO_CORE_CLK 163
+#define GCC_UFS_CARD_2_UNIPRO_CORE_CLK_SRC 164
+#define GCC_UFS_CARD_AHB_CLK 165
+#define GCC_UFS_CARD_AXI_CLK 166
+#define GCC_UFS_CARD_AXI_CLK_SRC 167
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 168
+#define GCC_UFS_CARD_ICE_CORE_CLK 169
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 170
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 171
+#define GCC_UFS_CARD_PHY_AUX_CLK 172
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 173
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 174
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 175
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 176
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 177
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 178
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 179
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 180
+#define GCC_UFS_PHY_AHB_CLK 181
+#define GCC_UFS_PHY_AXI_CLK 182
+#define GCC_UFS_PHY_AXI_CLK_SRC 183
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 184
+#define GCC_UFS_PHY_ICE_CORE_CLK 185
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 186
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 187
+#define GCC_UFS_PHY_PHY_AUX_CLK 188
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 189
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 190
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 191
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 192
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 193
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 194
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 195
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 196
+#define GCC_USB30_MP_MASTER_CLK 197
+#define GCC_USB30_MP_MASTER_CLK_SRC 198
+#define GCC_USB30_MP_MOCK_UTMI_CLK 199
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 200
+#define GCC_USB30_MP_SLEEP_CLK 201
+#define GCC_USB30_PRIM_MASTER_CLK 202
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 203
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 204
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 205
+#define GCC_USB30_PRIM_SLEEP_CLK 206
+#define GCC_USB30_SEC_MASTER_CLK 207
+#define GCC_USB30_SEC_MASTER_CLK_SRC 208
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 209
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 210
+#define GCC_USB30_SEC_SLEEP_CLK 211
+#define GCC_USB3_MP_PHY_AUX_CLK 212
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 213
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 214
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 215
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 216
+#define GCC_USB3_PRIM_PHY_AUX_CLK 217
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 218
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 219
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 220
+#define GCC_USB3_SEC_PHY_AUX_CLK 221
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 222
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 223
+#define GCC_USB3_SEC_PHY_PIPE_CLK 224
+#define GCC_VIDEO_AXI0_CLK 225
+#define GCC_VIDEO_AXI1_CLK 226
+#define GCC_VIDEO_AXIC_CLK 227
+#define GPLL0 228
+#define GPLL0_OUT_EVEN 229
+#define GPLL1 230
+#define GPLL4 231
+#define GPLL7 232
+#define GCC_PCIE_0_CLKREF_CLK 233
+#define GCC_PCIE_1_CLKREF_CLK 234
+#define GCC_PCIE_2_CLKREF_CLK 235
+#define GCC_PCIE_3_CLKREF_CLK 236
+#define GCC_USB3_PRIM_CLKREF_CLK 237
+#define GCC_USB3_SEC_CLKREF_CLK 238
+
+#define GCC_EMAC_BCR 0
+#define GCC_GPU_BCR 1
+#define GCC_MMSS_BCR 2
+#define GCC_NPU_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_PHY_BCR 5
+#define GCC_PCIE_1_BCR 6
+#define GCC_PCIE_1_PHY_BCR 7
+#define GCC_PCIE_2_BCR 8
+#define GCC_PCIE_2_PHY_BCR 9
+#define GCC_PCIE_3_BCR 10
+#define GCC_PCIE_3_PHY_BCR 11
+#define GCC_PCIE_PHY_BCR 12
+#define GCC_PDM_BCR 13
+#define GCC_PRNG_BCR 14
+#define GCC_QSPI_1_BCR 15
+#define GCC_QSPI_BCR 16
+#define GCC_QUPV3_WRAPPER_0_BCR 17
+#define GCC_QUPV3_WRAPPER_1_BCR 18
+#define GCC_QUPV3_WRAPPER_2_BCR 19
+#define GCC_QUSB2PHY_5_BCR 20
+#define GCC_QUSB2PHY_MP0_BCR 21
+#define GCC_QUSB2PHY_MP1_BCR 22
+#define GCC_QUSB2PHY_PRIM_BCR 23
+#define GCC_QUSB2PHY_SEC_BCR 24
+#define GCC_USB3_PHY_PRIM_SP0_BCR 25
+#define GCC_USB3_PHY_PRIM_SP1_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_SP0_BCR 27
+#define GCC_USB3_DP_PHY_PRIM_SP1_BCR 28
+#define GCC_USB3_PHY_SEC_BCR 29
+#define GCC_USB3PHY_PHY_SEC_BCR 30
+#define GCC_SDCC2_BCR 31
+#define GCC_SDCC4_BCR 32
+#define GCC_TSIF_BCR 33
+#define GCC_UFS_CARD_2_BCR 34
+#define GCC_UFS_CARD_BCR 35
+#define GCC_UFS_PHY_BCR 36
+#define GCC_USB30_MP_BCR 37
+#define GCC_USB30_PRIM_BCR 38
+#define GCC_USB30_SEC_BCR 39
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 40
+#define GCC_VIDEO_AXIC_CLK_BCR 41
+#define GCC_VIDEO_AXI0_CLK_BCR 42
+#define GCC_VIDEO_AXI1_CLK_BCR 43
+#define GCC_USB3_DP_PHY_SEC_BCR 44
+
+/* GCC GDSCRs */
+#define EMAC_GDSC 0
+#define PCIE_0_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_2_GDSC 3
+#define PCIE_3_GDSC 4
+#define UFS_CARD_2_GDSC 5
+#define UFS_CARD_GDSC 6
+#define UFS_PHY_GDSC 7
+#define USB30_MP_GDSC 8
+#define USB30_PRIM_GDSC 9
+#define USB30_SEC_GDSC 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
new file mode 100644
index 000000000000..cb2fb638825c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
@@ -0,0 +1,496 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL2 2
+#define GCC_GPLL4 3
+#define GCC_GPLL7 4
+#define GCC_GPLL8 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE0_TUNNEL_AXI_CLK 7
+#define GCC_AGGRE_NOC_PCIE1_TUNNEL_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_4_AXI_CLK 9
+#define GCC_AGGRE_NOC_PCIE_SOUTH_SF_AXI_CLK 10
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 11
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 12
+#define GCC_AGGRE_USB3_MP_AXI_CLK 13
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 14
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 15
+#define GCC_AGGRE_USB4_1_AXI_CLK 16
+#define GCC_AGGRE_USB4_AXI_CLK 17
+#define GCC_AGGRE_USB_NOC_AXI_CLK 18
+#define GCC_AGGRE_USB_NOC_NORTH_AXI_CLK 19
+#define GCC_AGGRE_USB_NOC_SOUTH_AXI_CLK 20
+#define GCC_AHB2PHY0_CLK 21
+#define GCC_AHB2PHY2_CLK 22
+#define GCC_BOOT_ROM_AHB_CLK 23
+#define GCC_CAMERA_AHB_CLK 24
+#define GCC_CAMERA_HF_AXI_CLK 25
+#define GCC_CAMERA_SF_AXI_CLK 26
+#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK 27
+#define GCC_CAMERA_THROTTLE_RT_AXI_CLK 28
+#define GCC_CAMERA_THROTTLE_XO_CLK 29
+#define GCC_CAMERA_XO_CLK 30
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 31
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 32
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 33
+#define GCC_CNOC_PCIE0_TUNNEL_CLK 34
+#define GCC_CNOC_PCIE1_TUNNEL_CLK 35
+#define GCC_CNOC_PCIE4_QX_CLK 36
+#define GCC_DDRSS_GPU_AXI_CLK 37
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 38
+#define GCC_DISP1_AHB_CLK 39
+#define GCC_DISP1_HF_AXI_CLK 40
+#define GCC_DISP1_SF_AXI_CLK 41
+#define GCC_DISP1_THROTTLE_NRT_AXI_CLK 42
+#define GCC_DISP1_THROTTLE_RT_AXI_CLK 43
+#define GCC_DISP1_XO_CLK 44
+#define GCC_DISP_AHB_CLK 45
+#define GCC_DISP_HF_AXI_CLK 46
+#define GCC_DISP_SF_AXI_CLK 47
+#define GCC_DISP_THROTTLE_NRT_AXI_CLK 48
+#define GCC_DISP_THROTTLE_RT_AXI_CLK 49
+#define GCC_DISP_XO_CLK 50
+#define GCC_EMAC0_AXI_CLK 51
+#define GCC_EMAC0_PTP_CLK 52
+#define GCC_EMAC0_PTP_CLK_SRC 53
+#define GCC_EMAC0_RGMII_CLK 54
+#define GCC_EMAC0_RGMII_CLK_SRC 55
+#define GCC_EMAC0_SLV_AHB_CLK 56
+#define GCC_EMAC1_AXI_CLK 57
+#define GCC_EMAC1_PTP_CLK 58
+#define GCC_EMAC1_PTP_CLK_SRC 59
+#define GCC_EMAC1_RGMII_CLK 60
+#define GCC_EMAC1_RGMII_CLK_SRC 61
+#define GCC_EMAC1_SLV_AHB_CLK 62
+#define GCC_GP1_CLK 63
+#define GCC_GP1_CLK_SRC 64
+#define GCC_GP2_CLK 65
+#define GCC_GP2_CLK_SRC 66
+#define GCC_GP3_CLK 67
+#define GCC_GP3_CLK_SRC 68
+#define GCC_GP4_CLK 69
+#define GCC_GP4_CLK_SRC 70
+#define GCC_GP5_CLK 71
+#define GCC_GP5_CLK_SRC 72
+#define GCC_GPU_CFG_AHB_CLK 73
+#define GCC_GPU_GPLL0_CLK_SRC 74
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 75
+#define GCC_GPU_IREF_EN 76
+#define GCC_GPU_MEMNOC_GFX_CLK 77
+#define GCC_GPU_SNOC_DVM_GFX_CLK 78
+#define GCC_GPU_TCU_THROTTLE_AHB_CLK 79
+#define GCC_GPU_TCU_THROTTLE_CLK 80
+#define GCC_PCIE0_PHY_RCHNG_CLK 81
+#define GCC_PCIE1_PHY_RCHNG_CLK 82
+#define GCC_PCIE2A_PHY_RCHNG_CLK 83
+#define GCC_PCIE2B_PHY_RCHNG_CLK 84
+#define GCC_PCIE3A_PHY_RCHNG_CLK 85
+#define GCC_PCIE3B_PHY_RCHNG_CLK 86
+#define GCC_PCIE4_PHY_RCHNG_CLK 87
+#define GCC_PCIE_0_AUX_CLK 88
+#define GCC_PCIE_0_AUX_CLK_SRC 89
+#define GCC_PCIE_0_CFG_AHB_CLK 90
+#define GCC_PCIE_0_MSTR_AXI_CLK 91
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 92
+#define GCC_PCIE_0_PIPE_CLK 93
+#define GCC_PCIE_0_SLV_AXI_CLK 94
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 95
+#define GCC_PCIE_1_AUX_CLK 96
+#define GCC_PCIE_1_AUX_CLK_SRC 97
+#define GCC_PCIE_1_CFG_AHB_CLK 98
+#define GCC_PCIE_1_MSTR_AXI_CLK 99
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 100
+#define GCC_PCIE_1_PIPE_CLK 101
+#define GCC_PCIE_1_SLV_AXI_CLK 102
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 103
+#define GCC_PCIE_2A2B_CLKREF_CLK 104
+#define GCC_PCIE_2A_AUX_CLK 105
+#define GCC_PCIE_2A_AUX_CLK_SRC 106
+#define GCC_PCIE_2A_CFG_AHB_CLK 107
+#define GCC_PCIE_2A_MSTR_AXI_CLK 108
+#define GCC_PCIE_2A_PHY_RCHNG_CLK_SRC 109
+#define GCC_PCIE_2A_PIPE_CLK 110
+#define GCC_PCIE_2A_PIPE_CLK_SRC 111
+#define GCC_PCIE_2A_PIPE_DIV_CLK_SRC 112
+#define GCC_PCIE_2A_PIPEDIV2_CLK 113
+#define GCC_PCIE_2A_SLV_AXI_CLK 114
+#define GCC_PCIE_2A_SLV_Q2A_AXI_CLK 115
+#define GCC_PCIE_2B_AUX_CLK 116
+#define GCC_PCIE_2B_AUX_CLK_SRC 117
+#define GCC_PCIE_2B_CFG_AHB_CLK 118
+#define GCC_PCIE_2B_MSTR_AXI_CLK 119
+#define GCC_PCIE_2B_PHY_RCHNG_CLK_SRC 120
+#define GCC_PCIE_2B_PIPE_CLK 121
+#define GCC_PCIE_2B_PIPE_CLK_SRC 122
+#define GCC_PCIE_2B_PIPE_DIV_CLK_SRC 123
+#define GCC_PCIE_2B_PIPEDIV2_CLK 124
+#define GCC_PCIE_2B_SLV_AXI_CLK 125
+#define GCC_PCIE_2B_SLV_Q2A_AXI_CLK 126
+#define GCC_PCIE_3A3B_CLKREF_CLK 127
+#define GCC_PCIE_3A_AUX_CLK 128
+#define GCC_PCIE_3A_AUX_CLK_SRC 129
+#define GCC_PCIE_3A_CFG_AHB_CLK 130
+#define GCC_PCIE_3A_MSTR_AXI_CLK 131
+#define GCC_PCIE_3A_PHY_RCHNG_CLK_SRC 132
+#define GCC_PCIE_3A_PIPE_CLK 133
+#define GCC_PCIE_3A_PIPE_CLK_SRC 134
+#define GCC_PCIE_3A_PIPE_DIV_CLK_SRC 135
+#define GCC_PCIE_3A_PIPEDIV2_CLK 136
+#define GCC_PCIE_3A_SLV_AXI_CLK 137
+#define GCC_PCIE_3A_SLV_Q2A_AXI_CLK 138
+#define GCC_PCIE_3B_AUX_CLK 139
+#define GCC_PCIE_3B_AUX_CLK_SRC 140
+#define GCC_PCIE_3B_CFG_AHB_CLK 141
+#define GCC_PCIE_3B_MSTR_AXI_CLK 142
+#define GCC_PCIE_3B_PHY_RCHNG_CLK_SRC 143
+#define GCC_PCIE_3B_PIPE_CLK 144
+#define GCC_PCIE_3B_PIPE_CLK_SRC 145
+#define GCC_PCIE_3B_PIPE_DIV_CLK_SRC 146
+#define GCC_PCIE_3B_PIPEDIV2_CLK 147
+#define GCC_PCIE_3B_SLV_AXI_CLK 148
+#define GCC_PCIE_3B_SLV_Q2A_AXI_CLK 149
+#define GCC_PCIE_4_AUX_CLK 150
+#define GCC_PCIE_4_AUX_CLK_SRC 151
+#define GCC_PCIE_4_CFG_AHB_CLK 152
+#define GCC_PCIE_4_CLKREF_CLK 153
+#define GCC_PCIE_4_MSTR_AXI_CLK 154
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 155
+#define GCC_PCIE_4_PIPE_CLK 156
+#define GCC_PCIE_4_PIPE_CLK_SRC 157
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 158
+#define GCC_PCIE_4_PIPEDIV2_CLK 159
+#define GCC_PCIE_4_SLV_AXI_CLK 160
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 161
+#define GCC_PCIE_RSCC_AHB_CLK 162
+#define GCC_PCIE_RSCC_XO_CLK 163
+#define GCC_PCIE_RSCC_XO_CLK_SRC 164
+#define GCC_PCIE_THROTTLE_CFG_CLK 165
+#define GCC_PDM2_CLK 166
+#define GCC_PDM2_CLK_SRC 167
+#define GCC_PDM_AHB_CLK 168
+#define GCC_PDM_XO4_CLK 169
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 170
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 171
+#define GCC_QMIP_DISP1_AHB_CLK 172
+#define GCC_QMIP_DISP1_ROT_AHB_CLK 173
+#define GCC_QMIP_DISP_AHB_CLK 174
+#define GCC_QMIP_DISP_ROT_AHB_CLK 175
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 176
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 177
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 178
+#define GCC_QUPV3_WRAP0_CORE_CLK 179
+#define GCC_QUPV3_WRAP0_QSPI0_CLK 180
+#define GCC_QUPV3_WRAP0_S0_CLK 181
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 182
+#define GCC_QUPV3_WRAP0_S1_CLK 183
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 184
+#define GCC_QUPV3_WRAP0_S2_CLK 185
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 186
+#define GCC_QUPV3_WRAP0_S3_CLK 187
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 188
+#define GCC_QUPV3_WRAP0_S4_CLK 189
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 190
+#define GCC_QUPV3_WRAP0_S4_DIV_CLK_SRC 191
+#define GCC_QUPV3_WRAP0_S5_CLK 192
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 193
+#define GCC_QUPV3_WRAP0_S6_CLK 194
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 195
+#define GCC_QUPV3_WRAP0_S7_CLK 196
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 197
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 198
+#define GCC_QUPV3_WRAP1_CORE_CLK 199
+#define GCC_QUPV3_WRAP1_QSPI0_CLK 200
+#define GCC_QUPV3_WRAP1_S0_CLK 201
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 202
+#define GCC_QUPV3_WRAP1_S1_CLK 203
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 204
+#define GCC_QUPV3_WRAP1_S2_CLK 205
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 206
+#define GCC_QUPV3_WRAP1_S3_CLK 207
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 208
+#define GCC_QUPV3_WRAP1_S4_CLK 209
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 210
+#define GCC_QUPV3_WRAP1_S4_DIV_CLK_SRC 211
+#define GCC_QUPV3_WRAP1_S5_CLK 212
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 213
+#define GCC_QUPV3_WRAP1_S6_CLK 214
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 215
+#define GCC_QUPV3_WRAP1_S7_CLK 216
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 217
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 218
+#define GCC_QUPV3_WRAP2_CORE_CLK 219
+#define GCC_QUPV3_WRAP2_QSPI0_CLK 220
+#define GCC_QUPV3_WRAP2_S0_CLK 221
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 222
+#define GCC_QUPV3_WRAP2_S1_CLK 223
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 224
+#define GCC_QUPV3_WRAP2_S2_CLK 225
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 226
+#define GCC_QUPV3_WRAP2_S3_CLK 227
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 228
+#define GCC_QUPV3_WRAP2_S4_CLK 229
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 230
+#define GCC_QUPV3_WRAP2_S4_DIV_CLK_SRC 231
+#define GCC_QUPV3_WRAP2_S5_CLK 232
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 233
+#define GCC_QUPV3_WRAP2_S6_CLK 234
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 235
+#define GCC_QUPV3_WRAP2_S7_CLK 236
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 237
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 238
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 239
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 240
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 241
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 242
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 243
+#define GCC_SDCC2_AHB_CLK 244
+#define GCC_SDCC2_APPS_CLK 245
+#define GCC_SDCC2_APPS_CLK_SRC 246
+#define GCC_SDCC4_AHB_CLK 247
+#define GCC_SDCC4_APPS_CLK 248
+#define GCC_SDCC4_APPS_CLK_SRC 249
+#define GCC_SYS_NOC_USB_AXI_CLK 250
+#define GCC_UFS_1_CARD_CLKREF_CLK 251
+#define GCC_UFS_CARD_AHB_CLK 252
+#define GCC_UFS_CARD_AXI_CLK 253
+#define GCC_UFS_CARD_AXI_CLK_SRC 254
+#define GCC_UFS_CARD_CLKREF_CLK 255
+#define GCC_UFS_CARD_ICE_CORE_CLK 256
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 257
+#define GCC_UFS_CARD_PHY_AUX_CLK 258
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 259
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 260
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 261
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 262
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 263
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 264
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 265
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 266
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 267
+#define GCC_UFS_PHY_AHB_CLK 268
+#define GCC_UFS_PHY_AXI_CLK 269
+#define GCC_UFS_PHY_AXI_CLK_SRC 270
+#define GCC_UFS_PHY_ICE_CORE_CLK 271
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 272
+#define GCC_UFS_PHY_PHY_AUX_CLK 273
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 274
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 275
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 276
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 277
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 278
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 279
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 280
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 281
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 282
+#define GCC_UFS_REF_CLKREF_CLK 283
+#define GCC_USB2_HS0_CLKREF_CLK 284
+#define GCC_USB2_HS1_CLKREF_CLK 285
+#define GCC_USB2_HS2_CLKREF_CLK 286
+#define GCC_USB2_HS3_CLKREF_CLK 287
+#define GCC_USB30_MP_MASTER_CLK 288
+#define GCC_USB30_MP_MASTER_CLK_SRC 289
+#define GCC_USB30_MP_MOCK_UTMI_CLK 290
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 291
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 292
+#define GCC_USB30_MP_SLEEP_CLK 293
+#define GCC_USB30_PRIM_MASTER_CLK 294
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 295
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 296
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 297
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 298
+#define GCC_USB30_PRIM_SLEEP_CLK 299
+#define GCC_USB30_SEC_MASTER_CLK 300
+#define GCC_USB30_SEC_MASTER_CLK_SRC 301
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 302
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 303
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 304
+#define GCC_USB30_SEC_SLEEP_CLK 305
+#define GCC_USB34_PRIM_PHY_PIPE_CLK_SRC 306
+#define GCC_USB34_SEC_PHY_PIPE_CLK_SRC 307
+#define GCC_USB3_MP0_CLKREF_CLK 308
+#define GCC_USB3_MP1_CLKREF_CLK 309
+#define GCC_USB3_MP_PHY_AUX_CLK 310
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 311
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 312
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 313
+#define GCC_USB3_MP_PHY_PIPE_0_CLK_SRC 314
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 315
+#define GCC_USB3_MP_PHY_PIPE_1_CLK_SRC 316
+#define GCC_USB3_PRIM_PHY_AUX_CLK 317
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 318
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 319
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 320
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 321
+#define GCC_USB3_SEC_PHY_AUX_CLK 322
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 323
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 324
+#define GCC_USB3_SEC_PHY_PIPE_CLK 325
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 326
+#define GCC_USB4_1_CFG_AHB_CLK 327
+#define GCC_USB4_1_DP_CLK 328
+#define GCC_USB4_1_MASTER_CLK 329
+#define GCC_USB4_1_MASTER_CLK_SRC 330
+#define GCC_USB4_1_PHY_DP_CLK_SRC 331
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 332
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC 333
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 334
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 335
+#define GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC 336
+#define GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC 337
+#define GCC_USB4_1_PHY_RX0_CLK 338
+#define GCC_USB4_1_PHY_RX0_CLK_SRC 339
+#define GCC_USB4_1_PHY_RX1_CLK 340
+#define GCC_USB4_1_PHY_RX1_CLK_SRC 341
+#define GCC_USB4_1_PHY_SYS_CLK_SRC 342
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 343
+#define GCC_USB4_1_SB_IF_CLK 344
+#define GCC_USB4_1_SB_IF_CLK_SRC 345
+#define GCC_USB4_1_SYS_CLK 346
+#define GCC_USB4_1_TMU_CLK 347
+#define GCC_USB4_1_TMU_CLK_SRC 348
+#define GCC_USB4_CFG_AHB_CLK 349
+#define GCC_USB4_CLKREF_CLK 350
+#define GCC_USB4_DP_CLK 351
+#define GCC_USB4_EUD_CLKREF_CLK 352
+#define GCC_USB4_MASTER_CLK 353
+#define GCC_USB4_MASTER_CLK_SRC 354
+#define GCC_USB4_PHY_DP_CLK_SRC 355
+#define GCC_USB4_PHY_P2RR2P_PIPE_CLK 356
+#define GCC_USB4_PHY_P2RR2P_PIPE_CLK_SRC 357
+#define GCC_USB4_PHY_PCIE_PIPE_CLK 358
+#define GCC_USB4_PHY_PCIE_PIPE_CLK_SRC 359
+#define GCC_USB4_PHY_PCIE_PIPE_MUX_CLK_SRC 360
+#define GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC 361
+#define GCC_USB4_PHY_RX0_CLK 362
+#define GCC_USB4_PHY_RX0_CLK_SRC 363
+#define GCC_USB4_PHY_RX1_CLK 364
+#define GCC_USB4_PHY_RX1_CLK_SRC 365
+#define GCC_USB4_PHY_SYS_CLK_SRC 366
+#define GCC_USB4_PHY_USB_PIPE_CLK 367
+#define GCC_USB4_SB_IF_CLK 368
+#define GCC_USB4_SB_IF_CLK_SRC 369
+#define GCC_USB4_SYS_CLK 370
+#define GCC_USB4_TMU_CLK 371
+#define GCC_USB4_TMU_CLK_SRC 372
+#define GCC_VIDEO_AHB_CLK 373
+#define GCC_VIDEO_AXI0_CLK 374
+#define GCC_VIDEO_AXI1_CLK 375
+#define GCC_VIDEO_CVP_THROTTLE_CLK 376
+#define GCC_VIDEO_VCODEC_THROTTLE_CLK 377
+#define GCC_VIDEO_XO_CLK 378
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 379
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 380
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 381
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 382
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 383
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 384
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 385
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 386
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 387
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 388
+
+/* GCC resets */
+#define GCC_EMAC0_BCR 0
+#define GCC_EMAC1_BCR 1
+#define GCC_PCIE_0_LINK_DOWN_BCR 2
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 3
+#define GCC_PCIE_0_PHY_BCR 4
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_TUNNEL_BCR 6
+#define GCC_PCIE_1_LINK_DOWN_BCR 7
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_PHY_BCR 9
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_TUNNEL_BCR 11
+#define GCC_PCIE_2A_BCR 12
+#define GCC_PCIE_2A_LINK_DOWN_BCR 13
+#define GCC_PCIE_2A_NOCSR_COM_PHY_BCR 14
+#define GCC_PCIE_2A_PHY_BCR 15
+#define GCC_PCIE_2A_PHY_NOCSR_COM_PHY_BCR 16
+#define GCC_PCIE_2B_BCR 17
+#define GCC_PCIE_2B_LINK_DOWN_BCR 18
+#define GCC_PCIE_2B_NOCSR_COM_PHY_BCR 19
+#define GCC_PCIE_2B_PHY_BCR 20
+#define GCC_PCIE_2B_PHY_NOCSR_COM_PHY_BCR 21
+#define GCC_PCIE_3A_BCR 22
+#define GCC_PCIE_3A_LINK_DOWN_BCR 23
+#define GCC_PCIE_3A_NOCSR_COM_PHY_BCR 24
+#define GCC_PCIE_3A_PHY_BCR 25
+#define GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR 26
+#define GCC_PCIE_3B_BCR 27
+#define GCC_PCIE_3B_LINK_DOWN_BCR 28
+#define GCC_PCIE_3B_NOCSR_COM_PHY_BCR 29
+#define GCC_PCIE_3B_PHY_BCR 30
+#define GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR 31
+#define GCC_PCIE_4_BCR 32
+#define GCC_PCIE_4_LINK_DOWN_BCR 33
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 34
+#define GCC_PCIE_4_PHY_BCR 35
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 36
+#define GCC_PCIE_PHY_CFG_AHB_BCR 37
+#define GCC_PCIE_PHY_COM_BCR 38
+#define GCC_PCIE_RSCC_BCR 39
+#define GCC_QUSB2PHY_HS0_MP_BCR 40
+#define GCC_QUSB2PHY_HS1_MP_BCR 41
+#define GCC_QUSB2PHY_HS2_MP_BCR 42
+#define GCC_QUSB2PHY_HS3_MP_BCR 43
+#define GCC_QUSB2PHY_PRIM_BCR 44
+#define GCC_QUSB2PHY_SEC_BCR 45
+#define GCC_SDCC2_BCR 46
+#define GCC_SDCC4_BCR 47
+#define GCC_UFS_CARD_BCR 48
+#define GCC_UFS_PHY_BCR 49
+#define GCC_USB2_PHY_PRIM_BCR 50
+#define GCC_USB2_PHY_SEC_BCR 51
+#define GCC_USB30_MP_BCR 52
+#define GCC_USB30_PRIM_BCR 53
+#define GCC_USB30_SEC_BCR 54
+#define GCC_USB3_DP_PHY_PRIM_BCR 55
+#define GCC_USB3_DP_PHY_SEC_BCR 56
+#define GCC_USB3_PHY_PRIM_BCR 57
+#define GCC_USB3_PHY_SEC_BCR 58
+#define GCC_USB3_UNIPHY_MP0_BCR 59
+#define GCC_USB3_UNIPHY_MP1_BCR 60
+#define GCC_USB3PHY_PHY_PRIM_BCR 61
+#define GCC_USB3PHY_PHY_SEC_BCR 62
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 63
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 64
+#define GCC_USB4_1_BCR 65
+#define GCC_USB4_1_DP_PHY_PRIM_BCR 66
+#define GCC_USB4_1_DPPHY_AUX_BCR 67
+#define GCC_USB4_1_PHY_PRIM_BCR 68
+#define GCC_USB4_BCR 69
+#define GCC_USB4_DP_PHY_PRIM_BCR 70
+#define GCC_USB4_DPPHY_AUX_BCR 71
+#define GCC_USB4_PHY_PRIM_BCR 72
+#define GCC_USB4PHY_1_PHY_PRIM_BCR 73
+#define GCC_USB4PHY_PHY_PRIM_BCR 74
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 75
+#define GCC_VIDEO_BCR 76
+#define GCC_VIDEO_AXI0_CLK_ARES 77
+#define GCC_VIDEO_AXI1_CLK_ARES 78
+
+/* GCC GDSCs */
+#define PCIE_0_TUNNEL_GDSC 0
+#define PCIE_1_TUNNEL_GDSC 1
+#define PCIE_2A_GDSC 2
+#define PCIE_2B_GDSC 3
+#define PCIE_3A_GDSC 4
+#define PCIE_3B_GDSC 5
+#define PCIE_4_GDSC 6
+#define UFS_CARD_GDSC 7
+#define UFS_PHY_GDSC 8
+#define USB30_MP_GDSC 9
+#define USB30_PRIM_GDSC 10
+#define USB30_SEC_GDSC 11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h
index 468302282913..df8a6f3d367e 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm660.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h
@@ -152,5 +152,6 @@
#define GCC_USB_20_BCR 6
#define GCC_USB_30_BCR 7
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8
+#define GCC_MSS_RESTART 9
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 968fa65b9c42..d78b899263a2 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -199,6 +199,7 @@
#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189
#define GCC_LPASS_Q6_AXI_CLK 190
#define GCC_LPASS_SWAY_CLK 191
+#define GPLL6 192
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdx55.h b/include/dt-bindings/clock/qcom,gcc-sdx55.h
new file mode 100644
index 000000000000..fb9a5942f793
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdx55.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDX55_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SDX55_H
+
+#define GPLL0 3
+#define GPLL0_OUT_EVEN 4
+#define GPLL4 5
+#define GPLL4_OUT_EVEN 6
+#define GPLL5 7
+#define GCC_AHB_PCIE_LINK_CLK 8
+#define GCC_BLSP1_AHB_CLK 9
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 10
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 11
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 12
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 13
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 14
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 15
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 16
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 17
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 18
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 19
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 20
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 21
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 22
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 23
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 24
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 25
+#define GCC_BLSP1_UART1_APPS_CLK 26
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 27
+#define GCC_BLSP1_UART2_APPS_CLK 28
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 29
+#define GCC_BLSP1_UART3_APPS_CLK 30
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 31
+#define GCC_BLSP1_UART4_APPS_CLK 32
+#define GCC_BLSP1_UART4_APPS_CLK_SRC 33
+#define GCC_BOOT_ROM_AHB_CLK 34
+#define GCC_CE1_AHB_CLK 35
+#define GCC_CE1_AXI_CLK 36
+#define GCC_CE1_CLK 37
+#define GCC_CPUSS_AHB_CLK 38
+#define GCC_CPUSS_AHB_CLK_SRC 39
+#define GCC_CPUSS_GNOC_CLK 40
+#define GCC_CPUSS_RBCPR_CLK 41
+#define GCC_CPUSS_RBCPR_CLK_SRC 42
+#define GCC_EMAC_CLK_SRC 43
+#define GCC_EMAC_PTP_CLK_SRC 44
+#define GCC_ETH_AXI_CLK 45
+#define GCC_ETH_PTP_CLK 46
+#define GCC_ETH_RGMII_CLK 47
+#define GCC_ETH_SLAVE_AHB_CLK 48
+#define GCC_GP1_CLK 49
+#define GCC_GP1_CLK_SRC 50
+#define GCC_GP2_CLK 51
+#define GCC_GP2_CLK_SRC 52
+#define GCC_GP3_CLK 53
+#define GCC_GP3_CLK_SRC 54
+#define GCC_PCIE_0_CLKREF_CLK 55
+#define GCC_PCIE_AUX_CLK 56
+#define GCC_PCIE_AUX_PHY_CLK_SRC 57
+#define GCC_PCIE_CFG_AHB_CLK 58
+#define GCC_PCIE_MSTR_AXI_CLK 59
+#define GCC_PCIE_PIPE_CLK 60
+#define GCC_PCIE_RCHNG_PHY_CLK 61
+#define GCC_PCIE_RCHNG_PHY_CLK_SRC 62
+#define GCC_PCIE_SLEEP_CLK 63
+#define GCC_PCIE_SLV_AXI_CLK 64
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 65
+#define GCC_PDM2_CLK 66
+#define GCC_PDM2_CLK_SRC 67
+#define GCC_PDM_AHB_CLK 68
+#define GCC_PDM_XO4_CLK 69
+#define GCC_SDCC1_AHB_CLK 70
+#define GCC_SDCC1_APPS_CLK 71
+#define GCC_SDCC1_APPS_CLK_SRC 72
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 73
+#define GCC_USB30_MASTER_CLK 74
+#define GCC_USB30_MASTER_CLK_SRC 75
+#define GCC_USB30_MOCK_UTMI_CLK 76
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 77
+#define GCC_USB30_MSTR_AXI_CLK 78
+#define GCC_USB30_SLEEP_CLK 79
+#define GCC_USB30_SLV_AHB_CLK 80
+#define GCC_USB3_PHY_AUX_CLK 81
+#define GCC_USB3_PHY_AUX_CLK_SRC 82
+#define GCC_USB3_PHY_PIPE_CLK 83
+#define GCC_USB3_PRIM_CLKREF_CLK 84
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 85
+#define GCC_XO_DIV4_CLK 86
+#define GCC_XO_PCIE_LINK_CLK 87
+
+#define GCC_EMAC_BCR 0
+#define GCC_PCIE_BCR 1
+#define GCC_PCIE_LINK_DOWN_BCR 2
+#define GCC_PCIE_NOCSR_COM_PHY_BCR 3
+#define GCC_PCIE_PHY_BCR 4
+#define GCC_PCIE_PHY_CFG_AHB_BCR 5
+#define GCC_PCIE_PHY_COM_BCR 6
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PDM_BCR 8
+#define GCC_QUSB2PHY_BCR 9
+#define GCC_TCSR_PCIE_BCR 10
+#define GCC_USB30_BCR 11
+#define GCC_USB3_PHY_BCR 12
+#define GCC_USB3PHY_PHY_BCR 13
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 14
+
+/* GCC power domains */
+#define USB30_GDSC 0
+#define PCIE_GDSC 1
+#define EMAC_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdx65.h b/include/dt-bindings/clock/qcom,gcc-sdx65.h
new file mode 100644
index 000000000000..75ecc9237d8f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdx65.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDX65_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SDX65_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GCC_AHB_PCIE_LINK_CLK 2
+#define GCC_BLSP1_AHB_CLK 3
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 4
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 5
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 6
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 7
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 8
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 9
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 10
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 11
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 12
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 13
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 14
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 15
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 16
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 18
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 19
+#define GCC_BLSP1_SLEEP_CLK 20
+#define GCC_BLSP1_UART1_APPS_CLK 21
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 22
+#define GCC_BLSP1_UART2_APPS_CLK 23
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 24
+#define GCC_BLSP1_UART3_APPS_CLK 25
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 26
+#define GCC_BLSP1_UART4_APPS_CLK 27
+#define GCC_BLSP1_UART4_APPS_CLK_SRC 28
+#define GCC_BOOT_ROM_AHB_CLK 29
+#define GCC_CPUSS_AHB_CLK 30
+#define GCC_CPUSS_AHB_CLK_SRC 31
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 32
+#define GCC_CPUSS_GNOC_CLK 33
+#define GCC_GP1_CLK 34
+#define GCC_GP1_CLK_SRC 35
+#define GCC_GP2_CLK 36
+#define GCC_GP2_CLK_SRC 37
+#define GCC_GP3_CLK 38
+#define GCC_GP3_CLK_SRC 39
+#define GCC_PCIE_0_CLKREF_EN 40
+#define GCC_PCIE_AUX_CLK 41
+#define GCC_PCIE_AUX_CLK_SRC 42
+#define GCC_PCIE_AUX_PHY_CLK_SRC 43
+#define GCC_PCIE_CFG_AHB_CLK 44
+#define GCC_PCIE_MSTR_AXI_CLK 45
+#define GCC_PCIE_PIPE_CLK 46
+#define GCC_PCIE_PIPE_CLK_SRC 47
+#define GCC_PCIE_RCHNG_PHY_CLK 48
+#define GCC_PCIE_RCHNG_PHY_CLK_SRC 49
+#define GCC_PCIE_SLEEP_CLK 50
+#define GCC_PCIE_SLV_AXI_CLK 51
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 52
+#define GCC_PDM2_CLK 53
+#define GCC_PDM2_CLK_SRC 54
+#define GCC_PDM_AHB_CLK 55
+#define GCC_PDM_XO4_CLK 56
+#define GCC_RX1_USB2_CLKREF_EN 57
+#define GCC_SDCC1_AHB_CLK 58
+#define GCC_SDCC1_APPS_CLK 59
+#define GCC_SDCC1_APPS_CLK_SRC 60
+#define GCC_SPMI_FETCHER_AHB_CLK 61
+#define GCC_SPMI_FETCHER_CLK 62
+#define GCC_SPMI_FETCHER_CLK_SRC 63
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 64
+#define GCC_USB30_MASTER_CLK 65
+#define GCC_USB30_MASTER_CLK_SRC 66
+#define GCC_USB30_MOCK_UTMI_CLK 67
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 68
+#define GCC_USB30_MOCK_UTMI_POSTDIV_CLK_SRC 69
+#define GCC_USB30_MSTR_AXI_CLK 70
+#define GCC_USB30_SLEEP_CLK 71
+#define GCC_USB30_SLV_AHB_CLK 72
+#define GCC_USB3_PHY_AUX_CLK 73
+#define GCC_USB3_PHY_AUX_CLK_SRC 74
+#define GCC_USB3_PHY_PIPE_CLK 75
+#define GCC_USB3_PHY_PIPE_CLK_SRC 76
+#define GCC_USB3_PRIM_CLKREF_EN 77
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 78
+#define GCC_XO_DIV4_CLK 79
+#define GCC_XO_PCIE_LINK_CLK 80
+
+/* GCC resets */
+#define GCC_BLSP1_QUP1_BCR 0
+#define GCC_BLSP1_QUP2_BCR 1
+#define GCC_BLSP1_QUP3_BCR 2
+#define GCC_BLSP1_QUP4_BCR 3
+#define GCC_BLSP1_UART1_BCR 4
+#define GCC_BLSP1_UART2_BCR 5
+#define GCC_BLSP1_UART3_BCR 6
+#define GCC_BLSP1_UART4_BCR 7
+#define GCC_PCIE_BCR 8
+#define GCC_PCIE_LINK_DOWN_BCR 9
+#define GCC_PCIE_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_PHY_BCR 11
+#define GCC_PCIE_PHY_CFG_AHB_BCR 12
+#define GCC_PCIE_PHY_COM_BCR 13
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 14
+#define GCC_PDM_BCR 15
+#define GCC_QUSB2PHY_BCR 16
+#define GCC_SDCC1_BCR 17
+#define GCC_SPMI_FETCHER_BCR 18
+#define GCC_TCSR_PCIE_BCR 19
+#define GCC_USB30_BCR 20
+#define GCC_USB3_PHY_BCR 21
+#define GCC_USB3PHY_PHY_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+
+/* GCC power domains */
+#define USB30_GDSC 0
+#define PCIE_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm6115.h b/include/dt-bindings/clock/qcom,gcc-sm6115.h
new file mode 100644
index 000000000000..b91a7b460433
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm6115.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6115_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_AUX2 1
+#define GPLL0_OUT_MAIN 2
+#define GPLL10 3
+#define GPLL10_OUT_MAIN 4
+#define GPLL11 5
+#define GPLL11_OUT_MAIN 6
+#define GPLL3 7
+#define GPLL4 8
+#define GPLL4_OUT_MAIN 9
+#define GPLL6 10
+#define GPLL6_OUT_MAIN 11
+#define GPLL7 12
+#define GPLL7_OUT_MAIN 13
+#define GPLL8 14
+#define GPLL8_OUT_MAIN 15
+#define GPLL9 16
+#define GPLL9_OUT_MAIN 17
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 18
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 19
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 20
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 21
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 22
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 23
+#define GCC_CAMSS_MCLK0_CLK 24
+#define GCC_CAMSS_MCLK0_CLK_SRC 25
+#define GCC_CAMSS_MCLK1_CLK 26
+#define GCC_CAMSS_MCLK1_CLK_SRC 27
+#define GCC_CAMSS_MCLK2_CLK 28
+#define GCC_CAMSS_MCLK2_CLK_SRC 29
+#define GCC_CAMSS_MCLK3_CLK 30
+#define GCC_CAMSS_MCLK3_CLK_SRC 31
+#define GCC_CAMSS_NRT_AXI_CLK 32
+#define GCC_CAMSS_OPE_AHB_CLK 33
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 34
+#define GCC_CAMSS_OPE_CLK 35
+#define GCC_CAMSS_OPE_CLK_SRC 36
+#define GCC_CAMSS_RT_AXI_CLK 37
+#define GCC_CAMSS_TFE_0_CLK 38
+#define GCC_CAMSS_TFE_0_CLK_SRC 39
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 40
+#define GCC_CAMSS_TFE_0_CSID_CLK 41
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 42
+#define GCC_CAMSS_TFE_1_CLK 43
+#define GCC_CAMSS_TFE_1_CLK_SRC 44
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 45
+#define GCC_CAMSS_TFE_1_CSID_CLK 46
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 47
+#define GCC_CAMSS_TFE_2_CLK 48
+#define GCC_CAMSS_TFE_2_CLK_SRC 49
+#define GCC_CAMSS_TFE_2_CPHY_RX_CLK 50
+#define GCC_CAMSS_TFE_2_CSID_CLK 51
+#define GCC_CAMSS_TFE_2_CSID_CLK_SRC 52
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 53
+#define GCC_CAMSS_TOP_AHB_CLK 54
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 55
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 56
+#define GCC_CPUSS_AHB_CLK 57
+#define GCC_CPUSS_GNOC_CLK 60
+#define GCC_DISP_AHB_CLK 61
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 62
+#define GCC_DISP_HF_AXI_CLK 63
+#define GCC_DISP_THROTTLE_CORE_CLK 64
+#define GCC_DISP_XO_CLK 65
+#define GCC_GP1_CLK 66
+#define GCC_GP1_CLK_SRC 67
+#define GCC_GP2_CLK 68
+#define GCC_GP2_CLK_SRC 69
+#define GCC_GP3_CLK 70
+#define GCC_GP3_CLK_SRC 71
+#define GCC_GPU_CFG_AHB_CLK 72
+#define GCC_GPU_GPLL0_CLK_SRC 73
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 74
+#define GCC_GPU_IREF_CLK 75
+#define GCC_GPU_MEMNOC_GFX_CLK 76
+#define GCC_GPU_SNOC_DVM_GFX_CLK 77
+#define GCC_GPU_THROTTLE_CORE_CLK 78
+#define GCC_GPU_THROTTLE_XO_CLK 79
+#define GCC_PDM2_CLK 80
+#define GCC_PDM2_CLK_SRC 81
+#define GCC_PDM_AHB_CLK 82
+#define GCC_PDM_XO4_CLK 83
+#define GCC_PRNG_AHB_CLK 84
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 85
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 86
+#define GCC_QMIP_DISP_AHB_CLK 87
+#define GCC_QMIP_GPU_CFG_AHB_CLK 88
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 89
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 90
+#define GCC_QUPV3_WRAP0_CORE_CLK 91
+#define GCC_QUPV3_WRAP0_S0_CLK 92
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 93
+#define GCC_QUPV3_WRAP0_S1_CLK 94
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 95
+#define GCC_QUPV3_WRAP0_S2_CLK 96
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 97
+#define GCC_QUPV3_WRAP0_S3_CLK 98
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 99
+#define GCC_QUPV3_WRAP0_S4_CLK 100
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 101
+#define GCC_QUPV3_WRAP0_S5_CLK 102
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 103
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 104
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 105
+#define GCC_SDCC1_AHB_CLK 106
+#define GCC_SDCC1_APPS_CLK 107
+#define GCC_SDCC1_APPS_CLK_SRC 108
+#define GCC_SDCC1_ICE_CORE_CLK 109
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 110
+#define GCC_SDCC2_AHB_CLK 111
+#define GCC_SDCC2_APPS_CLK 112
+#define GCC_SDCC2_APPS_CLK_SRC 113
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 114
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 115
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 116
+#define GCC_UFS_PHY_AHB_CLK 117
+#define GCC_UFS_PHY_AXI_CLK 118
+#define GCC_UFS_PHY_AXI_CLK_SRC 119
+#define GCC_UFS_PHY_ICE_CORE_CLK 120
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 121
+#define GCC_UFS_PHY_PHY_AUX_CLK 122
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 123
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 124
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 126
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 127
+#define GCC_USB30_PRIM_MASTER_CLK 128
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 129
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 132
+#define GCC_USB30_PRIM_SLEEP_CLK 133
+#define GCC_USB3_PRIM_CLKREF_CLK 134
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 135
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 136
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 137
+#define GCC_VCODEC0_AXI_CLK 138
+#define GCC_VENUS_AHB_CLK 139
+#define GCC_VENUS_CTL_AXI_CLK 140
+#define GCC_VIDEO_AHB_CLK 141
+#define GCC_VIDEO_AXI0_CLK 142
+#define GCC_VIDEO_THROTTLE_CORE_CLK 143
+#define GCC_VIDEO_VCODEC0_SYS_CLK 144
+#define GCC_VIDEO_VENUS_CLK_SRC 145
+#define GCC_VIDEO_VENUS_CTL_CLK 146
+#define GCC_VIDEO_XO_CLK 147
+#define GCC_AHB2PHY_CSI_CLK 148
+#define GCC_AHB2PHY_USB_CLK 149
+#define GCC_BIMC_GPU_AXI_CLK 150
+#define GCC_BOOT_ROM_AHB_CLK 151
+#define GCC_CAM_THROTTLE_NRT_CLK 152
+#define GCC_CAM_THROTTLE_RT_CLK 153
+#define GCC_CAMERA_AHB_CLK 154
+#define GCC_CAMERA_XO_CLK 155
+#define GCC_CAMSS_AXI_CLK 156
+#define GCC_CAMSS_AXI_CLK_SRC 157
+#define GCC_CAMSS_CAMNOC_ATB_CLK 158
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 159
+#define GCC_CAMSS_CCI_0_CLK 160
+#define GCC_CAMSS_CCI_CLK_SRC 161
+#define GCC_CAMSS_CPHY_0_CLK 162
+#define GCC_CAMSS_CPHY_1_CLK 163
+#define GCC_CAMSS_CPHY_2_CLK 164
+#define GCC_UFS_CLKREF_CLK 165
+#define GCC_DISP_GPLL0_CLK_SRC 166
+
+/* GCC resets */
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_SDCC1_BCR 2
+#define GCC_UFS_PHY_BCR 3
+#define GCC_USB30_PRIM_BCR 4
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 5
+#define GCC_VCODEC0_BCR 6
+#define GCC_VENUS_BCR 7
+#define GCC_VIDEO_INTERFACE_BCR 8
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 9
+#define GCC_USB3_PHY_PRIM_SP0_BCR 10
+#define GCC_SDCC2_BCR 11
+
+/* Indexes for GDSCs */
+#define GCC_CAMSS_TOP_GDSC 0
+#define GCC_UFS_PHY_GDSC 1
+#define GCC_USB30_PRIM_GDSC 2
+#define GCC_VCODEC0_GDSC 3
+#define GCC_VENUS_GDSC 4
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 5
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 7
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm6125.h b/include/dt-bindings/clock/qcom,gcc-sm6125.h
new file mode 100644
index 000000000000..08ea18086824
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm6125.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H
+
+#define GPLL0_OUT_AUX2 0
+#define GPLL0_OUT_MAIN 1
+#define GPLL6_OUT_MAIN 2
+#define GPLL7_OUT_MAIN 3
+#define GPLL8_OUT_MAIN 4
+#define GPLL9_OUT_MAIN 5
+#define GPLL0_OUT_EARLY 6
+#define GPLL3_OUT_EARLY 7
+#define GPLL4_OUT_MAIN 8
+#define GPLL5_OUT_MAIN 9
+#define GPLL6_OUT_EARLY 10
+#define GPLL7_OUT_EARLY 11
+#define GPLL8_OUT_EARLY 12
+#define GPLL9_OUT_EARLY 13
+#define GCC_AHB2PHY_CSI_CLK 14
+#define GCC_AHB2PHY_USB_CLK 15
+#define GCC_APC_VS_CLK 16
+#define GCC_BOOT_ROM_AHB_CLK 17
+#define GCC_CAMERA_AHB_CLK 18
+#define GCC_CAMERA_XO_CLK 19
+#define GCC_CAMSS_AHB_CLK_SRC 20
+#define GCC_CAMSS_CCI_AHB_CLK 21
+#define GCC_CAMSS_CCI_CLK 22
+#define GCC_CAMSS_CCI_CLK_SRC 23
+#define GCC_CAMSS_CPHY_CSID0_CLK 24
+#define GCC_CAMSS_CPHY_CSID1_CLK 25
+#define GCC_CAMSS_CPHY_CSID2_CLK 26
+#define GCC_CAMSS_CPHY_CSID3_CLK 27
+#define GCC_CAMSS_CPP_AHB_CLK 28
+#define GCC_CAMSS_CPP_AXI_CLK 29
+#define GCC_CAMSS_CPP_CLK 30
+#define GCC_CAMSS_CPP_CLK_SRC 31
+#define GCC_CAMSS_CPP_VBIF_AHB_CLK 32
+#define GCC_CAMSS_CSI0_AHB_CLK 33
+#define GCC_CAMSS_CSI0_CLK 34
+#define GCC_CAMSS_CSI0_CLK_SRC 35
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 36
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 37
+#define GCC_CAMSS_CSI0PIX_CLK 38
+#define GCC_CAMSS_CSI0RDI_CLK 39
+#define GCC_CAMSS_CSI1_AHB_CLK 40
+#define GCC_CAMSS_CSI1_CLK 41
+#define GCC_CAMSS_CSI1_CLK_SRC 42
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 43
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 44
+#define GCC_CAMSS_CSI1PIX_CLK 45
+#define GCC_CAMSS_CSI1RDI_CLK 46
+#define GCC_CAMSS_CSI2_AHB_CLK 47
+#define GCC_CAMSS_CSI2_CLK 48
+#define GCC_CAMSS_CSI2_CLK_SRC 49
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 50
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 51
+#define GCC_CAMSS_CSI2PIX_CLK 52
+#define GCC_CAMSS_CSI2RDI_CLK 53
+#define GCC_CAMSS_CSI3_AHB_CLK 54
+#define GCC_CAMSS_CSI3_CLK 55
+#define GCC_CAMSS_CSI3_CLK_SRC 56
+#define GCC_CAMSS_CSI3PIX_CLK 57
+#define GCC_CAMSS_CSI3RDI_CLK 58
+#define GCC_CAMSS_CSI_VFE0_CLK 59
+#define GCC_CAMSS_CSI_VFE1_CLK 60
+#define GCC_CAMSS_CSIPHY0_CLK 61
+#define GCC_CAMSS_CSIPHY1_CLK 62
+#define GCC_CAMSS_CSIPHY2_CLK 63
+#define GCC_CAMSS_CSIPHY_CLK_SRC 64
+#define GCC_CAMSS_GP0_CLK 65
+#define GCC_CAMSS_GP0_CLK_SRC 66
+#define GCC_CAMSS_GP1_CLK 67
+#define GCC_CAMSS_GP1_CLK_SRC 68
+#define GCC_CAMSS_ISPIF_AHB_CLK 69
+#define GCC_CAMSS_JPEG_AHB_CLK 70
+#define GCC_CAMSS_JPEG_AXI_CLK 71
+#define GCC_CAMSS_JPEG_CLK 72
+#define GCC_CAMSS_JPEG_CLK_SRC 73
+#define GCC_CAMSS_MCLK0_CLK 74
+#define GCC_CAMSS_MCLK0_CLK_SRC 75
+#define GCC_CAMSS_MCLK1_CLK 76
+#define GCC_CAMSS_MCLK1_CLK_SRC 77
+#define GCC_CAMSS_MCLK2_CLK 78
+#define GCC_CAMSS_MCLK2_CLK_SRC 79
+#define GCC_CAMSS_MCLK3_CLK 80
+#define GCC_CAMSS_MCLK3_CLK_SRC 81
+#define GCC_CAMSS_MICRO_AHB_CLK 82
+#define GCC_CAMSS_THROTTLE_NRT_AXI_CLK 83
+#define GCC_CAMSS_THROTTLE_RT_AXI_CLK 84
+#define GCC_CAMSS_TOP_AHB_CLK 85
+#define GCC_CAMSS_VFE0_AHB_CLK 86
+#define GCC_CAMSS_VFE0_CLK 87
+#define GCC_CAMSS_VFE0_CLK_SRC 88
+#define GCC_CAMSS_VFE0_STREAM_CLK 89
+#define GCC_CAMSS_VFE1_AHB_CLK 90
+#define GCC_CAMSS_VFE1_CLK 91
+#define GCC_CAMSS_VFE1_CLK_SRC 92
+#define GCC_CAMSS_VFE1_STREAM_CLK 93
+#define GCC_CAMSS_VFE_TSCTR_CLK 94
+#define GCC_CAMSS_VFE_VBIF_AHB_CLK 95
+#define GCC_CAMSS_VFE_VBIF_AXI_CLK 96
+#define GCC_CE1_AHB_CLK 97
+#define GCC_CE1_AXI_CLK 98
+#define GCC_CE1_CLK 99
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 100
+#define GCC_CPUSS_GNOC_CLK 101
+#define GCC_DISP_AHB_CLK 102
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 103
+#define GCC_DISP_HF_AXI_CLK 104
+#define GCC_DISP_THROTTLE_CORE_CLK 105
+#define GCC_DISP_XO_CLK 106
+#define GCC_GP1_CLK 107
+#define GCC_GP1_CLK_SRC 108
+#define GCC_GP2_CLK 109
+#define GCC_GP2_CLK_SRC 110
+#define GCC_GP3_CLK 111
+#define GCC_GP3_CLK_SRC 112
+#define GCC_GPU_CFG_AHB_CLK 113
+#define GCC_GPU_GPLL0_CLK_SRC 114
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 115
+#define GCC_GPU_MEMNOC_GFX_CLK 116
+#define GCC_GPU_SNOC_DVM_GFX_CLK 117
+#define GCC_GPU_THROTTLE_CORE_CLK 118
+#define GCC_GPU_THROTTLE_XO_CLK 119
+#define GCC_MSS_VS_CLK 120
+#define GCC_PDM2_CLK 121
+#define GCC_PDM2_CLK_SRC 122
+#define GCC_PDM_AHB_CLK 123
+#define GCC_PDM_XO4_CLK 124
+#define GCC_PRNG_AHB_CLK 125
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 126
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 127
+#define GCC_QMIP_DISP_AHB_CLK 128
+#define GCC_QMIP_GPU_CFG_AHB_CLK 129
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 130
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 131
+#define GCC_QUPV3_WRAP0_CORE_CLK 132
+#define GCC_QUPV3_WRAP0_S0_CLK 133
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 134
+#define GCC_QUPV3_WRAP0_S1_CLK 135
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 136
+#define GCC_QUPV3_WRAP0_S2_CLK 137
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 138
+#define GCC_QUPV3_WRAP0_S3_CLK 139
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 140
+#define GCC_QUPV3_WRAP0_S4_CLK 141
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 142
+#define GCC_QUPV3_WRAP0_S5_CLK 143
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 144
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 145
+#define GCC_QUPV3_WRAP1_CORE_CLK 146
+#define GCC_QUPV3_WRAP1_S0_CLK 147
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 148
+#define GCC_QUPV3_WRAP1_S1_CLK 149
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 150
+#define GCC_QUPV3_WRAP1_S2_CLK 151
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 152
+#define GCC_QUPV3_WRAP1_S3_CLK 153
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 154
+#define GCC_QUPV3_WRAP1_S4_CLK 155
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 156
+#define GCC_QUPV3_WRAP1_S5_CLK 157
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 158
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 159
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 160
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 161
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 162
+#define GCC_SDCC1_AHB_CLK 163
+#define GCC_SDCC1_APPS_CLK 164
+#define GCC_SDCC1_APPS_CLK_SRC 165
+#define GCC_SDCC1_ICE_CORE_CLK 166
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 167
+#define GCC_SDCC2_AHB_CLK 168
+#define GCC_SDCC2_APPS_CLK 169
+#define GCC_SDCC2_APPS_CLK_SRC 170
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 171
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 172
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 173
+#define GCC_UFS_PHY_AHB_CLK 174
+#define GCC_UFS_PHY_AXI_CLK 175
+#define GCC_UFS_PHY_AXI_CLK_SRC 176
+#define GCC_UFS_PHY_ICE_CORE_CLK 177
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 178
+#define GCC_UFS_PHY_PHY_AUX_CLK 179
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 180
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 181
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 182
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 183
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 184
+#define GCC_USB30_PRIM_MASTER_CLK 185
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 186
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 187
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 188
+#define GCC_USB30_PRIM_SLEEP_CLK 189
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 190
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 191
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 192
+#define GCC_VDDA_VS_CLK 193
+#define GCC_VDDCX_VS_CLK 194
+#define GCC_VDDMX_VS_CLK 195
+#define GCC_VIDEO_AHB_CLK 196
+#define GCC_VIDEO_AXI0_CLK 197
+#define GCC_VIDEO_THROTTLE_CORE_CLK 198
+#define GCC_VIDEO_XO_CLK 199
+#define GCC_VS_CTRL_AHB_CLK 200
+#define GCC_VS_CTRL_CLK 201
+#define GCC_VS_CTRL_CLK_SRC 202
+#define GCC_VSENSOR_CLK_SRC 203
+#define GCC_WCSS_VS_CLK 204
+#define GCC_USB3_PRIM_CLKREF_CLK 205
+#define GCC_SYS_NOC_COMPUTE_SF_AXI_CLK 206
+#define GCC_BIMC_GPU_AXI_CLK 207
+#define GCC_UFS_MEM_CLKREF_CLK 208
+
+/* GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define CAMSS_VFE0_GDSC 2
+#define CAMSS_VFE1_GDSC 3
+#define CAMSS_TOP_GDSC 4
+#define CAM_CPP_GDSC 5
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 7
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 8
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 9
+
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_UFS_PHY_BCR 2
+#define GCC_USB30_PRIM_BCR 3
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 4
+#define GCC_USB3_PHY_PRIM_SP0_BCR 5
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 6
+#define GCC_CAMSS_MICRO_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm6350.h b/include/dt-bindings/clock/qcom,gcc-sm6350.h
new file mode 100644
index 000000000000..ba584ca33c39
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm6350.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6350_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL0_OUT_ODD 2
+#define GPLL6 3
+#define GPLL6_OUT_EVEN 4
+#define GPLL7 5
+#define GCC_AGGRE_CNOC_PERIPH_CENTER_AHB_CLK 6
+#define GCC_AGGRE_NOC_CENTER_AHB_CLK 7
+#define GCC_AGGRE_NOC_PCIE_SF_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 9
+#define GCC_AGGRE_NOC_WLAN_AXI_CLK 10
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 11
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12
+#define GCC_BOOT_ROM_AHB_CLK 13
+#define GCC_CAMERA_AHB_CLK 14
+#define GCC_CAMERA_AXI_CLK 15
+#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK 16
+#define GCC_CAMERA_THROTTLE_RT_AXI_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CE1_AHB_CLK 19
+#define GCC_CE1_AXI_CLK 20
+#define GCC_CE1_CLK 21
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 22
+#define GCC_CPUSS_AHB_CLK 23
+#define GCC_CPUSS_AHB_CLK_SRC 24
+#define GCC_CPUSS_AHB_DIV_CLK_SRC 25
+#define GCC_CPUSS_GNOC_CLK 26
+#define GCC_CPUSS_RBCPR_CLK 27
+#define GCC_DDRSS_GPU_AXI_CLK 28
+#define GCC_DISP_AHB_CLK 29
+#define GCC_DISP_AXI_CLK 30
+#define GCC_DISP_CC_SLEEP_CLK 31
+#define GCC_DISP_CC_XO_CLK 32
+#define GCC_DISP_GPLL0_CLK 33
+#define GCC_DISP_THROTTLE_AXI_CLK 34
+#define GCC_DISP_XO_CLK 35
+#define GCC_GP1_CLK 36
+#define GCC_GP1_CLK_SRC 37
+#define GCC_GP2_CLK 38
+#define GCC_GP2_CLK_SRC 39
+#define GCC_GP3_CLK 40
+#define GCC_GP3_CLK_SRC 41
+#define GCC_GPU_CFG_AHB_CLK 42
+#define GCC_GPU_GPLL0_CLK 43
+#define GCC_GPU_GPLL0_DIV_CLK 44
+#define GCC_GPU_MEMNOC_GFX_CLK 45
+#define GCC_GPU_SNOC_DVM_GFX_CLK 46
+#define GCC_NPU_AXI_CLK 47
+#define GCC_NPU_BWMON_AXI_CLK 48
+#define GCC_NPU_BWMON_DMA_CFG_AHB_CLK 49
+#define GCC_NPU_BWMON_DSP_CFG_AHB_CLK 50
+#define GCC_NPU_CFG_AHB_CLK 51
+#define GCC_NPU_DMA_CLK 52
+#define GCC_NPU_GPLL0_CLK 53
+#define GCC_NPU_GPLL0_DIV_CLK 54
+#define GCC_PCIE_0_AUX_CLK 55
+#define GCC_PCIE_0_AUX_CLK_SRC 56
+#define GCC_PCIE_0_CFG_AHB_CLK 57
+#define GCC_PCIE_0_MSTR_AXI_CLK 58
+#define GCC_PCIE_0_PIPE_CLK 59
+#define GCC_PCIE_0_SLV_AXI_CLK 60
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 61
+#define GCC_PCIE_PHY_RCHNG_CLK 62
+#define GCC_PCIE_PHY_RCHNG_CLK_SRC 63
+#define GCC_PDM2_CLK 64
+#define GCC_PDM2_CLK_SRC 65
+#define GCC_PDM_AHB_CLK 66
+#define GCC_PDM_XO4_CLK 67
+#define GCC_PRNG_AHB_CLK 68
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 69
+#define GCC_QUPV3_WRAP0_CORE_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK 71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_S1_CLK 73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 74
+#define GCC_QUPV3_WRAP0_S2_CLK 75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 76
+#define GCC_QUPV3_WRAP0_S3_CLK 77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S4_CLK 79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S5_CLK 81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 82
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 83
+#define GCC_QUPV3_WRAP1_CORE_CLK 84
+#define GCC_QUPV3_WRAP1_S0_CLK 85
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S1_CLK 87
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S2_CLK 89
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S3_CLK 91
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_S4_CLK 93
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S5_CLK 95
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 96
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 97
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 98
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 99
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 100
+#define GCC_SDCC1_AHB_CLK 101
+#define GCC_SDCC1_APPS_CLK 102
+#define GCC_SDCC1_APPS_CLK_SRC 103
+#define GCC_SDCC1_ICE_CORE_CLK 104
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 105
+#define GCC_SDCC2_AHB_CLK 106
+#define GCC_SDCC2_APPS_CLK 107
+#define GCC_SDCC2_APPS_CLK_SRC 108
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 109
+#define GCC_UFS_MEM_CLKREF_CLK 110
+#define GCC_UFS_PHY_AHB_CLK 111
+#define GCC_UFS_PHY_AXI_CLK 112
+#define GCC_UFS_PHY_AXI_CLK_SRC 113
+#define GCC_UFS_PHY_ICE_CORE_CLK 114
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 115
+#define GCC_UFS_PHY_PHY_AUX_CLK 116
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 117
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 118
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 119
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 120
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 121
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 122
+#define GCC_USB30_PRIM_MASTER_CLK 123
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 124
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 125
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 126
+#define GCC_USB30_PRIM_MOCK_UTMI_DIV_CLK_SRC 127
+#define GCC_USB3_PRIM_CLKREF_CLK 128
+#define GCC_USB30_PRIM_SLEEP_CLK 129
+#define GCC_USB3_PRIM_PHY_AUX_CLK 130
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 131
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 132
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 133
+#define GCC_VIDEO_AHB_CLK 134
+#define GCC_VIDEO_AXI_CLK 135
+#define GCC_VIDEO_THROTTLE_AXI_CLK 136
+#define GCC_VIDEO_XO_CLK 137
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 138
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 139
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 140
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 141
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 142
+#define GCC_RX5_PCIE_CLKREF_CLK 143
+#define GCC_GPU_GPLL0_MAIN_DIV_CLK_SRC 144
+#define GCC_NPU_PLL0_MAIN_DIV_CLK_SRC 145
+
+/* GCC resets */
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_SDCC1_BCR 2
+#define GCC_SDCC2_BCR 3
+#define GCC_UFS_PHY_BCR 4
+#define GCC_USB30_PRIM_BCR 5
+#define GCC_PCIE_0_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_QUPV3_WRAPPER_0_BCR 8
+#define GCC_QUPV3_WRAPPER_1_BCR 9
+#define GCC_USB3_PHY_PRIM_BCR 10
+#define GCC_USB3_DP_PHY_PRIM_BCR 11
+
+/* GCC GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 2
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8150.h b/include/dt-bindings/clock/qcom,gcc-sm8150.h
index 90d60ef94c64..dfefd5e8bf6e 100644
--- a/include/dt-bindings/clock/qcom,gcc-sm8150.h
+++ b/include/dt-bindings/clock/qcom,gcc-sm8150.h
@@ -240,4 +240,13 @@
#define GCC_USB30_SEC_BCR 27
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 28
+/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+#define EMAC_GDSC 6
+
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8250.h b/include/dt-bindings/clock/qcom,gcc-sm8250.h
new file mode 100644
index 000000000000..7b7abe327e37
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm8250.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8250_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL4 2
+#define GPLL9 3
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 4
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 5
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 6
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 7
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 8
+#define GCC_BOOT_ROM_AHB_CLK 9
+#define GCC_CAMERA_AHB_CLK 10
+#define GCC_CAMERA_HF_AXI_CLK 11
+#define GCC_CAMERA_SF_AXI_CLK 12
+#define GCC_CAMERA_XO_CLK 13
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 14
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 15
+#define GCC_CPUSS_AHB_CLK 16
+#define GCC_CPUSS_AHB_CLK_SRC 17
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 18
+#define GCC_CPUSS_DVM_BUS_CLK 19
+#define GCC_CPUSS_RBCPR_CLK 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 22
+#define GCC_DISP_AHB_CLK 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_SF_AXI_CLK 25
+#define GCC_DISP_XO_CLK 26
+#define GCC_GP1_CLK 27
+#define GCC_GP1_CLK_SRC 28
+#define GCC_GP2_CLK 29
+#define GCC_GP2_CLK_SRC 30
+#define GCC_GP3_CLK 31
+#define GCC_GP3_CLK_SRC 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_GPU_IREF_EN 36
+#define GCC_GPU_MEMNOC_GFX_CLK 37
+#define GCC_GPU_SNOC_DVM_GFX_CLK 38
+#define GCC_NPU_AXI_CLK 39
+#define GCC_NPU_BWMON_AXI_CLK 40
+#define GCC_NPU_BWMON_CFG_AHB_CLK 41
+#define GCC_NPU_CFG_AHB_CLK 42
+#define GCC_NPU_DMA_CLK 43
+#define GCC_NPU_GPLL0_CLK_SRC 44
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 45
+#define GCC_PCIE0_PHY_REFGEN_CLK 46
+#define GCC_PCIE1_PHY_REFGEN_CLK 47
+#define GCC_PCIE2_PHY_REFGEN_CLK 48
+#define GCC_PCIE_0_AUX_CLK 49
+#define GCC_PCIE_0_AUX_CLK_SRC 50
+#define GCC_PCIE_0_CFG_AHB_CLK 51
+#define GCC_PCIE_0_MSTR_AXI_CLK 52
+#define GCC_PCIE_0_PIPE_CLK 53
+#define GCC_PCIE_0_SLV_AXI_CLK 54
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 55
+#define GCC_PCIE_1_AUX_CLK 56
+#define GCC_PCIE_1_AUX_CLK_SRC 57
+#define GCC_PCIE_1_CFG_AHB_CLK 58
+#define GCC_PCIE_1_MSTR_AXI_CLK 59
+#define GCC_PCIE_1_PIPE_CLK 60
+#define GCC_PCIE_1_SLV_AXI_CLK 61
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 62
+#define GCC_PCIE_2_AUX_CLK 63
+#define GCC_PCIE_2_AUX_CLK_SRC 64
+#define GCC_PCIE_2_CFG_AHB_CLK 65
+#define GCC_PCIE_2_MSTR_AXI_CLK 66
+#define GCC_PCIE_2_PIPE_CLK 67
+#define GCC_PCIE_2_SLV_AXI_CLK 68
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 69
+#define GCC_PCIE_MDM_CLKREF_EN 70
+#define GCC_PCIE_PHY_AUX_CLK 71
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 72
+#define GCC_PCIE_WIFI_CLKREF_EN 73
+#define GCC_PCIE_WIGIG_CLKREF_EN 74
+#define GCC_PDM2_CLK 75
+#define GCC_PDM2_CLK_SRC 76
+#define GCC_PDM_AHB_CLK 77
+#define GCC_PDM_XO4_CLK 78
+#define GCC_PRNG_AHB_CLK 79
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 80
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 81
+#define GCC_QMIP_DISP_AHB_CLK 82
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 83
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 84
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 85
+#define GCC_QUPV3_WRAP0_CORE_CLK 86
+#define GCC_QUPV3_WRAP0_S0_CLK 87
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 88
+#define GCC_QUPV3_WRAP0_S1_CLK 89
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 90
+#define GCC_QUPV3_WRAP0_S2_CLK 91
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 92
+#define GCC_QUPV3_WRAP0_S3_CLK 93
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 94
+#define GCC_QUPV3_WRAP0_S4_CLK 95
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 96
+#define GCC_QUPV3_WRAP0_S5_CLK 97
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 98
+#define GCC_QUPV3_WRAP0_S6_CLK 99
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 100
+#define GCC_QUPV3_WRAP0_S7_CLK 101
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 103
+#define GCC_QUPV3_WRAP1_CORE_CLK 104
+#define GCC_QUPV3_WRAP1_S0_CLK 105
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_S1_CLK 107
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 108
+#define GCC_QUPV3_WRAP1_S2_CLK 109
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 110
+#define GCC_QUPV3_WRAP1_S3_CLK 111
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 112
+#define GCC_QUPV3_WRAP1_S4_CLK 113
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 114
+#define GCC_QUPV3_WRAP1_S5_CLK 115
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 116
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 117
+#define GCC_QUPV3_WRAP2_CORE_CLK 118
+#define GCC_QUPV3_WRAP2_S0_CLK 119
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 120
+#define GCC_QUPV3_WRAP2_S1_CLK 121
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 122
+#define GCC_QUPV3_WRAP2_S2_CLK 123
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 124
+#define GCC_QUPV3_WRAP2_S3_CLK 125
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 126
+#define GCC_QUPV3_WRAP2_S4_CLK 127
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 128
+#define GCC_QUPV3_WRAP2_S5_CLK 129
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 130
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 131
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 132
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 133
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 134
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 135
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 136
+#define GCC_SDCC2_AHB_CLK 137
+#define GCC_SDCC2_APPS_CLK 138
+#define GCC_SDCC2_APPS_CLK_SRC 139
+#define GCC_SDCC4_AHB_CLK 140
+#define GCC_SDCC4_APPS_CLK 141
+#define GCC_SDCC4_APPS_CLK_SRC 142
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 143
+#define GCC_TSIF_AHB_CLK 144
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 145
+#define GCC_TSIF_REF_CLK 146
+#define GCC_TSIF_REF_CLK_SRC 147
+#define GCC_UFS_1X_CLKREF_EN 148
+#define GCC_UFS_CARD_AHB_CLK 149
+#define GCC_UFS_CARD_AXI_CLK 150
+#define GCC_UFS_CARD_AXI_CLK_SRC 151
+#define GCC_UFS_CARD_ICE_CORE_CLK 152
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 153
+#define GCC_UFS_CARD_PHY_AUX_CLK 154
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 155
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 156
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 157
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 158
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 159
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 160
+#define GCC_UFS_PHY_AHB_CLK 161
+#define GCC_UFS_PHY_AXI_CLK 162
+#define GCC_UFS_PHY_AXI_CLK_SRC 163
+#define GCC_UFS_PHY_ICE_CORE_CLK 164
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 165
+#define GCC_UFS_PHY_PHY_AUX_CLK 166
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 167
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 168
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 169
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 170
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 171
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 172
+#define GCC_USB30_PRIM_MASTER_CLK 173
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 174
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 175
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 176
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 177
+#define GCC_USB30_PRIM_SLEEP_CLK 178
+#define GCC_USB30_SEC_MASTER_CLK 179
+#define GCC_USB30_SEC_MASTER_CLK_SRC 180
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 181
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 182
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 183
+#define GCC_USB30_SEC_SLEEP_CLK 184
+#define GCC_USB3_PRIM_PHY_AUX_CLK 185
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 186
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 187
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 188
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 189
+#define GCC_USB3_SEC_CLKREF_EN 190
+#define GCC_USB3_SEC_PHY_AUX_CLK 191
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 192
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 193
+#define GCC_USB3_SEC_PHY_PIPE_CLK 194
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 195
+#define GCC_VIDEO_AHB_CLK 196
+#define GCC_VIDEO_AXI0_CLK 197
+#define GCC_VIDEO_AXI1_CLK 198
+#define GCC_VIDEO_XO_CLK 199
+
+/* GCC resets */
+#define GCC_GPU_BCR 0
+#define GCC_MMSS_BCR 1
+#define GCC_NPU_BWMON_BCR 2
+#define GCC_NPU_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_BCR 9
+#define GCC_PCIE_1_LINK_DOWN_BCR 10
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_PHY_BCR 12
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_2_BCR 14
+#define GCC_PCIE_2_LINK_DOWN_BCR 15
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 16
+#define GCC_PCIE_2_PHY_BCR 17
+#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 18
+#define GCC_PCIE_PHY_BCR 19
+#define GCC_PCIE_PHY_CFG_AHB_BCR 20
+#define GCC_PCIE_PHY_COM_BCR 21
+#define GCC_PDM_BCR 22
+#define GCC_PRNG_BCR 23
+#define GCC_QUPV3_WRAPPER_0_BCR 24
+#define GCC_QUPV3_WRAPPER_1_BCR 25
+#define GCC_QUPV3_WRAPPER_2_BCR 26
+#define GCC_QUSB2PHY_PRIM_BCR 27
+#define GCC_QUSB2PHY_SEC_BCR 28
+#define GCC_SDCC2_BCR 29
+#define GCC_SDCC4_BCR 30
+#define GCC_TSIF_BCR 31
+#define GCC_UFS_CARD_BCR 32
+#define GCC_UFS_PHY_BCR 33
+#define GCC_USB30_PRIM_BCR 34
+#define GCC_USB30_SEC_BCR 35
+#define GCC_USB3_DP_PHY_PRIM_BCR 36
+#define GCC_USB3_DP_PHY_SEC_BCR 37
+#define GCC_USB3_PHY_PRIM_BCR 38
+#define GCC_USB3_PHY_SEC_BCR 39
+#define GCC_USB3PHY_PHY_PRIM_BCR 40
+#define GCC_USB3PHY_PHY_SEC_BCR 41
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 42
+#define GCC_VIDEO_AXI0_CLK_ARES 43
+#define GCC_VIDEO_AXI1_CLK_ARES 44
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define PCIE_2_GDSC 2
+#define UFS_CARD_GDSC 3
+#define UFS_PHY_GDSC 4
+#define USB30_PRIM_GDSC 5
+#define USB30_SEC_GDSC 6
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 7
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 8
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8350.h b/include/dt-bindings/clock/qcom,gcc-sm8350.h
new file mode 100644
index 000000000000..f6be3da5f781
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm8350.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8350_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8350_H
+
+/* GCC HW clocks */
+#define CORE_BI_PLL_TEST_SE 0
+#define PCIE_0_PIPE_CLK 1
+#define PCIE_1_PIPE_CLK 2
+#define UFS_CARD_RX_SYMBOL_0_CLK 3
+#define UFS_CARD_RX_SYMBOL_1_CLK 4
+#define UFS_CARD_TX_SYMBOL_0_CLK 5
+#define UFS_PHY_RX_SYMBOL_0_CLK 6
+#define UFS_PHY_RX_SYMBOL_1_CLK 7
+#define UFS_PHY_TX_SYMBOL_0_CLK 8
+#define USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK 9
+#define USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK 10
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 11
+#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 12
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 13
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 14
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 15
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 16
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 17
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 18
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAMERA_HF_AXI_CLK 21
+#define GCC_CAMERA_SF_AXI_CLK 22
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 24
+#define GCC_DDRSS_GPU_AXI_CLK 25
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 26
+#define GCC_DISP_HF_AXI_CLK 27
+#define GCC_DISP_SF_AXI_CLK 28
+#define GCC_GP1_CLK 29
+#define GCC_GP1_CLK_SRC 30
+#define GCC_GP2_CLK 31
+#define GCC_GP2_CLK_SRC 32
+#define GCC_GP3_CLK 33
+#define GCC_GP3_CLK_SRC 34
+#define GCC_GPLL0 35
+#define GCC_GPLL0_OUT_EVEN 36
+#define GCC_GPLL4 37
+#define GCC_GPLL9 38
+#define GCC_GPU_GPLL0_CLK_SRC 39
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 40
+#define GCC_GPU_IREF_EN 41
+#define GCC_GPU_MEMNOC_GFX_CLK 42
+#define GCC_GPU_SNOC_DVM_GFX_CLK 43
+#define GCC_PCIE0_PHY_RCHNG_CLK 44
+#define GCC_PCIE1_PHY_RCHNG_CLK 45
+#define GCC_PCIE_0_AUX_CLK 46
+#define GCC_PCIE_0_AUX_CLK_SRC 47
+#define GCC_PCIE_0_CFG_AHB_CLK 48
+#define GCC_PCIE_0_CLKREF_EN 49
+#define GCC_PCIE_0_MSTR_AXI_CLK 50
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 51
+#define GCC_PCIE_0_PIPE_CLK 52
+#define GCC_PCIE_0_PIPE_CLK_SRC 53
+#define GCC_PCIE_0_SLV_AXI_CLK 54
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 55
+#define GCC_PCIE_1_AUX_CLK 56
+#define GCC_PCIE_1_AUX_CLK_SRC 57
+#define GCC_PCIE_1_CFG_AHB_CLK 58
+#define GCC_PCIE_1_CLKREF_EN 59
+#define GCC_PCIE_1_MSTR_AXI_CLK 60
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 61
+#define GCC_PCIE_1_PIPE_CLK 62
+#define GCC_PCIE_1_PIPE_CLK_SRC 63
+#define GCC_PCIE_1_SLV_AXI_CLK 64
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 65
+#define GCC_PDM2_CLK 66
+#define GCC_PDM2_CLK_SRC 67
+#define GCC_PDM_AHB_CLK 68
+#define GCC_PDM_XO4_CLK 69
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 70
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 71
+#define GCC_QMIP_DISP_AHB_CLK 72
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 73
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 74
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 75
+#define GCC_QUPV3_WRAP0_CORE_CLK 76
+#define GCC_QUPV3_WRAP0_S0_CLK 77
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S1_CLK 79
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S2_CLK 81
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 82
+#define GCC_QUPV3_WRAP0_S3_CLK 83
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 84
+#define GCC_QUPV3_WRAP0_S4_CLK 85
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 86
+#define GCC_QUPV3_WRAP0_S5_CLK 87
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 88
+#define GCC_QUPV3_WRAP0_S6_CLK 89
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 90
+#define GCC_QUPV3_WRAP0_S7_CLK 91
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 93
+#define GCC_QUPV3_WRAP1_CORE_CLK 94
+#define GCC_QUPV3_WRAP1_S0_CLK 95
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S1_CLK 97
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S2_CLK 99
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S3_CLK 101
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_S4_CLK 103
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 104
+#define GCC_QUPV3_WRAP1_S5_CLK 105
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 106
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 107
+#define GCC_QUPV3_WRAP2_CORE_CLK 108
+#define GCC_QUPV3_WRAP2_S0_CLK 109
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110
+#define GCC_QUPV3_WRAP2_S1_CLK 111
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_S2_CLK 113
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114
+#define GCC_QUPV3_WRAP2_S3_CLK 115
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116
+#define GCC_QUPV3_WRAP2_S4_CLK 117
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118
+#define GCC_QUPV3_WRAP2_S5_CLK 119
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 121
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 122
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 123
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 124
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 125
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 126
+#define GCC_SDCC2_AHB_CLK 127
+#define GCC_SDCC2_APPS_CLK 128
+#define GCC_SDCC2_APPS_CLK_SRC 129
+#define GCC_SDCC4_AHB_CLK 130
+#define GCC_SDCC4_APPS_CLK 131
+#define GCC_SDCC4_APPS_CLK_SRC 132
+#define GCC_THROTTLE_PCIE_AHB_CLK 133
+#define GCC_UFS_1_CLKREF_EN 134
+#define GCC_UFS_CARD_AHB_CLK 135
+#define GCC_UFS_CARD_AXI_CLK 136
+#define GCC_UFS_CARD_AXI_CLK_SRC 137
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 138
+#define GCC_UFS_CARD_ICE_CORE_CLK 139
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 140
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 141
+#define GCC_UFS_CARD_PHY_AUX_CLK 142
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 143
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 144
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 145
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 146
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 147
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 148
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 149
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 150
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 151
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 152
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 153
+#define GCC_UFS_PHY_AHB_CLK 154
+#define GCC_UFS_PHY_AXI_CLK 155
+#define GCC_UFS_PHY_AXI_CLK_SRC 156
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 157
+#define GCC_UFS_PHY_ICE_CORE_CLK 158
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 160
+#define GCC_UFS_PHY_PHY_AUX_CLK 161
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 162
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 165
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 166
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 167
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 169
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 170
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 171
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 172
+#define GCC_USB30_PRIM_MASTER_CLK 173
+#define GCC_USB30_PRIM_MASTER_CLK__FORCE_MEM_CORE_ON 174
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 175
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 176
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 177
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 178
+#define GCC_USB30_PRIM_SLEEP_CLK 179
+#define GCC_USB30_SEC_MASTER_CLK 180
+#define GCC_USB30_SEC_MASTER_CLK__FORCE_MEM_CORE_ON 181
+#define GCC_USB30_SEC_MASTER_CLK_SRC 182
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 183
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 184
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 185
+#define GCC_USB30_SEC_SLEEP_CLK 186
+#define GCC_USB3_PRIM_PHY_AUX_CLK 187
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 188
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 189
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 190
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 191
+#define GCC_USB3_SEC_CLKREF_EN 192
+#define GCC_USB3_SEC_PHY_AUX_CLK 193
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 194
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 195
+#define GCC_USB3_SEC_PHY_PIPE_CLK 196
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 197
+#define GCC_VIDEO_AXI0_CLK 198
+#define GCC_VIDEO_AXI1_CLK 199
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_MMSS_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_BCR 9
+#define GCC_PCIE_1_LINK_DOWN_BCR 10
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_PHY_BCR 12
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_0_BCR 17
+#define GCC_QUPV3_WRAPPER_1_BCR 18
+#define GCC_QUPV3_WRAPPER_2_BCR 19
+#define GCC_QUSB2PHY_PRIM_BCR 20
+#define GCC_QUSB2PHY_SEC_BCR 21
+#define GCC_SDCC2_BCR 22
+#define GCC_SDCC4_BCR 23
+#define GCC_UFS_CARD_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB30_SEC_BCR 27
+#define GCC_USB3_DP_PHY_PRIM_BCR 28
+#define GCC_USB3_DP_PHY_SEC_BCR 29
+#define GCC_USB3_PHY_PRIM_BCR 30
+#define GCC_USB3_PHY_SEC_BCR 31
+#define GCC_USB3PHY_PHY_PRIM_BCR 32
+#define GCC_USB3PHY_PHY_SEC_BCR 33
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 34
+#define GCC_VIDEO_AXI0_CLK_ARES 35
+#define GCC_VIDEO_AXI1_CLK_ARES 36
+#define GCC_VIDEO_BCR 37
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 6
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 7
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 8
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8450.h b/include/dt-bindings/clock/qcom,gcc-sm8450.h
new file mode 100644
index 000000000000..cf1469312c4c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm8450.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8450_H
+
+/* GCC HW clocks */
+#define CORE_BI_PLL_TEST_SE 0
+#define PCIE_0_PIPE_CLK 1
+#define PCIE_1_PHY_AUX_CLK 2
+#define PCIE_1_PIPE_CLK 3
+#define UFS_PHY_RX_SYMBOL_0_CLK 4
+#define UFS_PHY_RX_SYMBOL_1_CLK 5
+#define UFS_PHY_TX_SYMBOL_0_CLK 6
+#define USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK 7
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 9
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 10
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 11
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12
+#define GCC_ANOC_PCIE_PWRCTL_CLK 13
+#define GCC_BOOT_ROM_AHB_CLK 14
+#define GCC_CAMERA_AHB_CLK 15
+#define GCC_CAMERA_HF_AXI_CLK 16
+#define GCC_CAMERA_SF_AXI_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 19
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 20
+#define GCC_CPUSS_AHB_CLK 21
+#define GCC_CPUSS_AHB_CLK_SRC 22
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 23
+#define GCC_CPUSS_CONFIG_NOC_SF_CLK 24
+#define GCC_DDRSS_GPU_AXI_CLK 25
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 26
+#define GCC_DISP_AHB_CLK 27
+#define GCC_DISP_HF_AXI_CLK 28
+#define GCC_DISP_SF_AXI_CLK 29
+#define GCC_DISP_XO_CLK 30
+#define GCC_EUSB3_0_CLKREF_EN 31
+#define GCC_GP1_CLK 32
+#define GCC_GP1_CLK_SRC 33
+#define GCC_GP2_CLK 34
+#define GCC_GP2_CLK_SRC 35
+#define GCC_GP3_CLK 36
+#define GCC_GP3_CLK_SRC 37
+#define GCC_GPLL0 38
+#define GCC_GPLL0_OUT_EVEN 39
+#define GCC_GPLL4 40
+#define GCC_GPLL9 41
+#define GCC_GPU_CFG_AHB_CLK 42
+#define GCC_GPU_GPLL0_CLK_SRC 43
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 44
+#define GCC_GPU_MEMNOC_GFX_CLK 45
+#define GCC_GPU_SNOC_DVM_GFX_CLK 46
+#define GCC_PCIE_0_AUX_CLK 47
+#define GCC_PCIE_0_AUX_CLK_SRC 48
+#define GCC_PCIE_0_CFG_AHB_CLK 49
+#define GCC_PCIE_0_CLKREF_EN 50
+#define GCC_PCIE_0_MSTR_AXI_CLK 51
+#define GCC_PCIE_0_PHY_RCHNG_CLK 52
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 53
+#define GCC_PCIE_0_PIPE_CLK 54
+#define GCC_PCIE_0_PIPE_CLK_SRC 55
+#define GCC_PCIE_0_SLV_AXI_CLK 56
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57
+#define GCC_PCIE_1_AUX_CLK 58
+#define GCC_PCIE_1_AUX_CLK_SRC 59
+#define GCC_PCIE_1_CFG_AHB_CLK 60
+#define GCC_PCIE_1_CLKREF_EN 61
+#define GCC_PCIE_1_MSTR_AXI_CLK 62
+#define GCC_PCIE_1_PHY_AUX_CLK 63
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 64
+#define GCC_PCIE_1_PHY_RCHNG_CLK 65
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 66
+#define GCC_PCIE_1_PIPE_CLK 67
+#define GCC_PCIE_1_PIPE_CLK_SRC 68
+#define GCC_PCIE_1_SLV_AXI_CLK 69
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 70
+#define GCC_PDM2_CLK 71
+#define GCC_PDM2_CLK_SRC 72
+#define GCC_PDM_AHB_CLK 73
+#define GCC_PDM_XO4_CLK 74
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 75
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 76
+#define GCC_QMIP_DISP_AHB_CLK 77
+#define GCC_QMIP_GPU_AHB_CLK 78
+#define GCC_QMIP_PCIE_AHB_CLK 79
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 80
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 81
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 82
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 83
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 84
+#define GCC_QUPV3_WRAP0_CORE_CLK 85
+#define GCC_QUPV3_WRAP0_S0_CLK 86
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 87
+#define GCC_QUPV3_WRAP0_S1_CLK 88
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 89
+#define GCC_QUPV3_WRAP0_S2_CLK 90
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 91
+#define GCC_QUPV3_WRAP0_S3_CLK 92
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 93
+#define GCC_QUPV3_WRAP0_S4_CLK 94
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 95
+#define GCC_QUPV3_WRAP0_S5_CLK 96
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 97
+#define GCC_QUPV3_WRAP0_S6_CLK 98
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 99
+#define GCC_QUPV3_WRAP0_S7_CLK 100
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 102
+#define GCC_QUPV3_WRAP1_CORE_CLK 103
+#define GCC_QUPV3_WRAP1_S0_CLK 104
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 105
+#define GCC_QUPV3_WRAP1_S1_CLK 106
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 107
+#define GCC_QUPV3_WRAP1_S2_CLK 108
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 109
+#define GCC_QUPV3_WRAP1_S3_CLK 110
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 111
+#define GCC_QUPV3_WRAP1_S4_CLK 112
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 113
+#define GCC_QUPV3_WRAP1_S5_CLK 114
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 115
+#define GCC_QUPV3_WRAP1_S6_CLK 116
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 117
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 118
+#define GCC_QUPV3_WRAP2_CORE_CLK 119
+#define GCC_QUPV3_WRAP2_S0_CLK 120
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 121
+#define GCC_QUPV3_WRAP2_S1_CLK 122
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 123
+#define GCC_QUPV3_WRAP2_S2_CLK 124
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 125
+#define GCC_QUPV3_WRAP2_S3_CLK 126
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 127
+#define GCC_QUPV3_WRAP2_S4_CLK 128
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 129
+#define GCC_QUPV3_WRAP2_S5_CLK 130
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 131
+#define GCC_QUPV3_WRAP2_S6_CLK 132
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 133
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 134
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 135
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 136
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 137
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 138
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 139
+#define GCC_SDCC2_AHB_CLK 140
+#define GCC_SDCC2_APPS_CLK 141
+#define GCC_SDCC2_APPS_CLK_SRC 142
+#define GCC_SDCC2_AT_CLK 143
+#define GCC_SDCC4_AHB_CLK 144
+#define GCC_SDCC4_APPS_CLK 145
+#define GCC_SDCC4_APPS_CLK_SRC 146
+#define GCC_SDCC4_AT_CLK 147
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 148
+#define GCC_UFS_0_CLKREF_EN 149
+#define GCC_UFS_PHY_AHB_CLK 150
+#define GCC_UFS_PHY_AXI_CLK 151
+#define GCC_UFS_PHY_AXI_CLK_SRC 152
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 153
+#define GCC_UFS_PHY_ICE_CORE_CLK 154
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 155
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 156
+#define GCC_UFS_PHY_PHY_AUX_CLK 157
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 158
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 159
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 160
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 161
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 162
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 163
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 165
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 166
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 167
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 168
+#define GCC_USB30_PRIM_MASTER_CLK 169
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 170
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 171
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 172
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 173
+#define GCC_USB30_PRIM_SLEEP_CLK 174
+#define GCC_USB3_0_CLKREF_EN 175
+#define GCC_USB3_PRIM_PHY_AUX_CLK 176
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 177
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 178
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 179
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 180
+#define GCC_VIDEO_AHB_CLK 181
+#define GCC_VIDEO_AXI0_CLK 182
+#define GCC_VIDEO_AXI1_CLK 183
+#define GCC_VIDEO_XO_CLK 184
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_MMSS_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_BCR 9
+#define GCC_PCIE_1_LINK_DOWN_BCR 10
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_PHY_BCR 12
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_PHY_BCR 14
+#define GCC_PCIE_PHY_CFG_AHB_BCR 15
+#define GCC_PCIE_PHY_COM_BCR 16
+#define GCC_PDM_BCR 17
+#define GCC_QUPV3_WRAPPER_0_BCR 18
+#define GCC_QUPV3_WRAPPER_1_BCR 19
+#define GCC_QUPV3_WRAPPER_2_BCR 20
+#define GCC_QUSB2PHY_PRIM_BCR 21
+#define GCC_QUSB2PHY_SEC_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_SDCC4_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_BCR 27
+#define GCC_USB3_DP_PHY_SEC_BCR 28
+#define GCC_USB3_PHY_PRIM_BCR 29
+#define GCC_USB3_PHY_SEC_BCR 30
+#define GCC_USB3PHY_PHY_PRIM_BCR 31
+#define GCC_USB3PHY_PHY_SEC_BCR 32
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 33
+#define GCC_VIDEO_AXI0_CLK_ARES 34
+#define GCC_VIDEO_AXI1_CLK_ARES 35
+#define GCC_VIDEO_BCR 36
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_PHY_GDSC 2
+#define USB30_PRIM_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7180.h b/include/dt-bindings/clock/qcom,gpucc-sc7180.h
index 0e4643b08b49..65e706d7d9c6 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sc7180.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sc7180.h
@@ -15,7 +15,8 @@
#define GPU_CC_CXO_CLK 6
#define GPU_CC_GMU_CLK_SRC 7
-/* CAM_CC GDSCRs */
+/* GPU_CC GDSCRs */
#define CX_GDSC 0
+#define GX_GDSC 1
#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7280.h b/include/dt-bindings/clock/qcom,gpucc-sc7280.h
new file mode 100644
index 000000000000..669b23b606ba
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sc7280.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7280_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_GMU_CLK_SRC 9
+#define GPU_CC_GX_GMU_CLK 10
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 11
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 12
+#define GPU_CC_HUB_AON_CLK 13
+#define GPU_CC_HUB_CLK_SRC 14
+#define GPU_CC_HUB_CX_INT_CLK 15
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 16
+#define GPU_CC_MND1X_0_GFX3D_CLK 17
+#define GPU_CC_MND1X_1_GFX3D_CLK 18
+#define GPU_CC_SLEEP_CLK 19
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h b/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h
new file mode 100644
index 000000000000..bb7da46333b0
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC8280XP_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC8280XP_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_FREQ_MEASURE_CLK 9
+#define GPU_CC_GMU_CLK_SRC 10
+#define GPU_CC_GX_GMU_CLK 11
+#define GPU_CC_GX_VSENSE_CLK 12
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 13
+#define GPU_CC_HUB_AON_CLK 14
+#define GPU_CC_HUB_CLK_SRC 15
+#define GPU_CC_HUB_CX_INT_CLK 16
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 17
+#define GPU_CC_SLEEP_CLK 18
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 19
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm660.h b/include/dt-bindings/clock/qcom,gpucc-sdm660.h
new file mode 100644
index 000000000000..7ea3e53df58c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm660.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_GPUCC_660_H
+#define _DT_BINDINGS_CLK_SDM_GPUCC_660_H
+
+#define GPUCC_CXO_CLK 0
+#define GPU_PLL0_PLL 1
+#define GPU_PLL1_PLL 2
+#define GFX3D_CLK_SRC 3
+#define RBCPR_CLK_SRC 4
+#define RBBMTIMER_CLK_SRC 5
+#define GPUCC_RBCPR_CLK 6
+#define GPUCC_GFX3D_CLK 7
+#define GPUCC_RBBMTIMER_CLK 8
+
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#define GPU_CX_BCR 0
+#define GPU_GX_BCR 1
+#define RBCPR_BCR 2
+#define SPDM_BCR 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm6350.h b/include/dt-bindings/clock/qcom,gpucc-sm6350.h
new file mode 100644
index 000000000000..68e814fc8acd
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm6350.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6350_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_ACD_AHB_CLK 2
+#define GPU_CC_ACD_CXO_CLK 3
+#define GPU_CC_AHB_CLK 4
+#define GPU_CC_CRC_AHB_CLK 5
+#define GPU_CC_CX_GFX3D_CLK 6
+#define GPU_CC_CX_GFX3D_SLV_CLK 7
+#define GPU_CC_CX_GMU_CLK 8
+#define GPU_CC_CX_SNOC_DVM_CLK 9
+#define GPU_CC_CXO_AON_CLK 10
+#define GPU_CC_CXO_CLK 11
+#define GPU_CC_GMU_CLK_SRC 12
+#define GPU_CC_GX_CXO_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_CLK_SRC 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+
+/* CLK_HW */
+#define GPU_CC_CRC_DIV 0
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8150.h b/include/dt-bindings/clock/qcom,gpucc-sm8150.h
new file mode 100644
index 000000000000..c5b70aad7770
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm8150.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H
+
+/* GPU_CC clock registers */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_APB_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CX_SNOC_DVM_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_GMU_CLK_SRC 7
+#define GPU_CC_GX_GMU_CLK 8
+#define GPU_CC_PLL1 9
+
+/* GPU_CC Resets */
+#define GPUCC_GPU_CC_CX_BCR 0
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 1
+#define GPUCC_GPU_CC_GMU_BCR 2
+#define GPUCC_GPU_CC_GX_BCR 3
+#define GPUCC_GPU_CC_SPDM_BCR 4
+#define GPUCC_GPU_CC_XO_BCR 5
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8250.h b/include/dt-bindings/clock/qcom,gpucc-sm8250.h
new file mode 100644
index 000000000000..dc8e387c48ad
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm8250.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8250_H
+
+/* GPU_CC clock registers */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_APB_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CX_SNOC_DVM_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_GMU_CLK_SRC 7
+#define GPU_CC_GX_GMU_CLK 8
+#define GPU_CC_PLL1 9
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 10
+
+/* GPU_CC Resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 2
+#define GPUCC_GPU_CC_GMU_BCR 3
+#define GPUCC_GPU_CC_GX_BCR 4
+#define GPUCC_GPU_CC_XO_BCR 5
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8350.h b/include/dt-bindings/clock/qcom,gpucc-sm8350.h
new file mode 100644
index 000000000000..2ca857f5bfd2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm8350.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_APB_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CX_QDSS_AT_CLK 5
+#define GPU_CC_CX_QDSS_TRIG_CLK 6
+#define GPU_CC_CX_QDSS_TSCTR_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_FREQ_MEASURE_CLK 11
+#define GPU_CC_GMU_CLK_SRC 12
+#define GPU_CC_GX_GMU_CLK 13
+#define GPU_CC_GX_QDSS_TSCTR_CLK 14
+#define GPU_CC_GX_VSENSE_CLK 15
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 16
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 17
+#define GPU_CC_HUB_AON_CLK 18
+#define GPU_CC_HUB_CLK_SRC 19
+#define GPU_CC_HUB_CX_INT_CLK 20
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 21
+#define GPU_CC_MND1X_0_GFX3D_CLK 22
+#define GPU_CC_MND1X_1_GFX3D_CLK 23
+#define GPU_CC_PLL0 24
+#define GPU_CC_PLL1 25
+#define GPU_CC_SLEEP_CLK 26
+
+/* GPU_CC resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CB_BCR 1
+#define GPUCC_GPU_CC_CX_BCR 2
+#define GPUCC_GPU_CC_FAST_HUB_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
index 25b92bbf0ab4..e0fb4acf4ba8 100644
--- a/include/dt-bindings/clock/qcom,lcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
@@ -19,4 +19,6 @@
#define SPDIF_CLK 10
#define AHBIX_CLK 11
+#define LCC_PCM_RESET 0
+
#endif
diff --git a/include/dt-bindings/clock/qcom,lpass-sc7280.h b/include/dt-bindings/clock/qcom,lpass-sc7280.h
new file mode 100644
index 000000000000..e71ccac3a375
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpass-sc7280.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_SC7280_H
+
+#define LPASS_Q6SS_AHBM_CLK 0
+#define LPASS_Q6SS_AHBS_CLK 1
+#define LPASS_TOP_CC_LPI_Q6_AXIM_HS_CLK 2
+#define LPASS_QDSP6SS_XO_CLK 3
+#define LPASS_QDSP6SS_SLEEP_CLK 4
+#define LPASS_QDSP6SS_CORE_CLK 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h
new file mode 100644
index 000000000000..22dcd47d4513
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_AUDIO_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_AUDIO_CC_SC7280_H
+
+/* LPASS_AUDIO_CC clocks */
+#define LPASS_AUDIO_CC_PLL 0
+#define LPASS_AUDIO_CC_PLL_OUT_AUX2 1
+#define LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC 2
+#define LPASS_AUDIO_CC_PLL_OUT_MAIN_DIV_CLK_SRC 3
+#define LPASS_AUDIO_CC_CDIV_RX_MCLK_DIV_CLK_SRC 4
+#define LPASS_AUDIO_CC_CODEC_MEM0_CLK 5
+#define LPASS_AUDIO_CC_CODEC_MEM1_CLK 6
+#define LPASS_AUDIO_CC_CODEC_MEM2_CLK 7
+#define LPASS_AUDIO_CC_CODEC_MEM_CLK 8
+#define LPASS_AUDIO_CC_EXT_MCLK0_CLK 9
+#define LPASS_AUDIO_CC_EXT_MCLK0_CLK_SRC 10
+#define LPASS_AUDIO_CC_EXT_MCLK1_CLK 11
+#define LPASS_AUDIO_CC_EXT_MCLK1_CLK_SRC 12
+#define LPASS_AUDIO_CC_RX_MCLK_2X_CLK 13
+#define LPASS_AUDIO_CC_RX_MCLK_CLK 14
+#define LPASS_AUDIO_CC_RX_MCLK_CLK_SRC 15
+
+/* LPASS AUDIO CC CSR */
+#define LPASS_AUDIO_SWR_RX_CGCR 0
+#define LPASS_AUDIO_SWR_TX_CGCR 1
+#define LPASS_AUDIO_SWR_WSA_CGCR 2
+
+/* LPASS_AON_CC clocks */
+#define LPASS_AON_CC_PLL 0
+#define LPASS_AON_CC_PLL_OUT_EVEN 1
+#define LPASS_AON_CC_PLL_OUT_MAIN_CDIV_DIV_CLK_SRC 2
+#define LPASS_AON_CC_PLL_OUT_ODD 3
+#define LPASS_AON_CC_AUDIO_HM_H_CLK 4
+#define LPASS_AON_CC_CDIV_TX_MCLK_DIV_CLK_SRC 5
+#define LPASS_AON_CC_MAIN_RCG_CLK_SRC 6
+#define LPASS_AON_CC_TX_MCLK_2X_CLK 7
+#define LPASS_AON_CC_TX_MCLK_CLK 8
+#define LPASS_AON_CC_TX_MCLK_RCG_CLK_SRC 9
+#define LPASS_AON_CC_VA_MEM0_CLK 10
+
+/* LPASS_AON_CC power domains */
+#define LPASS_AON_CC_LPASS_AUDIO_HM_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h b/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h
new file mode 100644
index 000000000000..a55d01db2b20
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7180_H
+
+/* LPASS_CORE_CC clocks */
+#define LPASS_LPAAUDIO_DIG_PLL 0
+#define LPASS_LPAAUDIO_DIG_PLL_OUT_ODD 1
+#define CORE_CLK_SRC 2
+#define EXT_MCLK0_CLK_SRC 3
+#define LPAIF_PRI_CLK_SRC 4
+#define LPAIF_SEC_CLK_SRC 5
+#define LPASS_AUDIO_CORE_CORE_CLK 6
+#define LPASS_AUDIO_CORE_EXT_MCLK0_CLK 7
+#define LPASS_AUDIO_CORE_LPAIF_PRI_IBIT_CLK 8
+#define LPASS_AUDIO_CORE_LPAIF_SEC_IBIT_CLK 9
+#define LPASS_AUDIO_CORE_SYSNOC_MPORT_CORE_CLK 10
+
+/* LPASS Core power domains */
+#define LPASS_CORE_HM_GDSCR 0
+
+/* LPASS Audio power domains */
+#define LPASS_AUDIO_HM_GDSCR 0
+#define LPASS_PDC_HM_GDSCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h
new file mode 100644
index 000000000000..0324c69ce968
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7280_H
+
+/* LPASS_CORE_CC clocks */
+#define LPASS_CORE_CC_DIG_PLL 0
+#define LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC 1
+#define LPASS_CORE_CC_DIG_PLL_OUT_ODD 2
+#define LPASS_CORE_CC_CORE_CLK 3
+#define LPASS_CORE_CC_CORE_CLK_SRC 4
+#define LPASS_CORE_CC_EXT_IF0_CLK_SRC 5
+#define LPASS_CORE_CC_EXT_IF0_IBIT_CLK 6
+#define LPASS_CORE_CC_EXT_IF1_CLK_SRC 7
+#define LPASS_CORE_CC_EXT_IF1_IBIT_CLK 8
+#define LPASS_CORE_CC_LPM_CORE_CLK 9
+#define LPASS_CORE_CC_LPM_MEM0_CORE_CLK 10
+#define LPASS_CORE_CC_SYSNOC_MPORT_CORE_CLK 11
+#define LPASS_CORE_CC_EXT_MCLK0_CLK 12
+#define LPASS_CORE_CC_EXT_MCLK0_CLK_SRC 13
+
+/* LPASS_CORE_CC power domains */
+#define LPASS_CORE_CC_LPASS_CORE_HM_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8994.h b/include/dt-bindings/clock/qcom,mmcc-msm8994.h
new file mode 100644
index 000000000000..4b289092f5a2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8994.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, Konrad Dybcio
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8994_H
+#define _DT_BINDINGS_CLK_MSM_MMCC_8994_H
+
+/* Clocks */
+#define MMPLL0_EARLY 0
+#define MMPLL0_PLL 1
+#define MMPLL1_EARLY 2
+#define MMPLL1_PLL 3
+#define MMPLL3_EARLY 4
+#define MMPLL3_PLL 5
+#define MMPLL4_EARLY 6
+#define MMPLL4_PLL 7
+#define MMPLL5_EARLY 8
+#define MMPLL5_PLL 9
+#define AXI_CLK_SRC 10
+#define RBBMTIMER_CLK_SRC 11
+#define PCLK0_CLK_SRC 12
+#define PCLK1_CLK_SRC 13
+#define MDP_CLK_SRC 14
+#define VSYNC_CLK_SRC 15
+#define BYTE0_CLK_SRC 16
+#define BYTE1_CLK_SRC 17
+#define ESC0_CLK_SRC 18
+#define ESC1_CLK_SRC 19
+#define MDSS_AHB_CLK 20
+#define MDSS_PCLK0_CLK 21
+#define MDSS_PCLK1_CLK 22
+#define MDSS_VSYNC_CLK 23
+#define MDSS_BYTE0_CLK 24
+#define MDSS_BYTE1_CLK 25
+#define MDSS_ESC0_CLK 26
+#define MDSS_ESC1_CLK 27
+#define CSI0_CLK_SRC 28
+#define CSI1_CLK_SRC 29
+#define CSI2_CLK_SRC 30
+#define CSI3_CLK_SRC 31
+#define VFE0_CLK_SRC 32
+#define VFE1_CLK_SRC 33
+#define CPP_CLK_SRC 34
+#define JPEG0_CLK_SRC 35
+#define JPEG1_CLK_SRC 36
+#define JPEG2_CLK_SRC 37
+#define CSI2PHYTIMER_CLK_SRC 38
+#define FD_CORE_CLK_SRC 39
+#define OCMEMNOC_CLK_SRC 40
+#define CCI_CLK_SRC 41
+#define MMSS_GP0_CLK_SRC 42
+#define MMSS_GP1_CLK_SRC 43
+#define JPEG_DMA_CLK_SRC 44
+#define MCLK0_CLK_SRC 45
+#define MCLK1_CLK_SRC 46
+#define MCLK2_CLK_SRC 47
+#define MCLK3_CLK_SRC 48
+#define CSI0PHYTIMER_CLK_SRC 49
+#define CSI1PHYTIMER_CLK_SRC 50
+#define EXTPCLK_CLK_SRC 51
+#define HDMI_CLK_SRC 52
+#define CAMSS_AHB_CLK 53
+#define CAMSS_CCI_CCI_AHB_CLK 54
+#define CAMSS_CCI_CCI_CLK 55
+#define CAMSS_VFE_CPP_AHB_CLK 56
+#define CAMSS_VFE_CPP_AXI_CLK 57
+#define CAMSS_VFE_CPP_CLK 58
+#define CAMSS_CSI0_AHB_CLK 59
+#define CAMSS_CSI0_CLK 60
+#define CAMSS_CSI0PHY_CLK 61
+#define CAMSS_CSI0PIX_CLK 62
+#define CAMSS_CSI0RDI_CLK 63
+#define CAMSS_CSI1_AHB_CLK 64
+#define CAMSS_CSI1_CLK 65
+#define CAMSS_CSI1PHY_CLK 66
+#define CAMSS_CSI1PIX_CLK 67
+#define CAMSS_CSI1RDI_CLK 68
+#define CAMSS_CSI2_AHB_CLK 69
+#define CAMSS_CSI2_CLK 70
+#define CAMSS_CSI2PHY_CLK 71
+#define CAMSS_CSI2PIX_CLK 72
+#define CAMSS_CSI2RDI_CLK 73
+#define CAMSS_CSI3_AHB_CLK 74
+#define CAMSS_CSI3_CLK 75
+#define CAMSS_CSI3PHY_CLK 76
+#define CAMSS_CSI3PIX_CLK 77
+#define CAMSS_CSI3RDI_CLK 78
+#define CAMSS_CSI_VFE0_CLK 79
+#define CAMSS_CSI_VFE1_CLK 80
+#define CAMSS_GP0_CLK 81
+#define CAMSS_GP1_CLK 82
+#define CAMSS_ISPIF_AHB_CLK 83
+#define CAMSS_JPEG_DMA_CLK 84
+#define CAMSS_JPEG_JPEG0_CLK 85
+#define CAMSS_JPEG_JPEG1_CLK 86
+#define CAMSS_JPEG_JPEG2_CLK 87
+#define CAMSS_JPEG_JPEG_AHB_CLK 88
+#define CAMSS_JPEG_JPEG_AXI_CLK 89
+#define CAMSS_MCLK0_CLK 90
+#define CAMSS_MCLK1_CLK 91
+#define CAMSS_MCLK2_CLK 92
+#define CAMSS_MCLK3_CLK 93
+#define CAMSS_MICRO_AHB_CLK 94
+#define CAMSS_PHY0_CSI0PHYTIMER_CLK 95
+#define CAMSS_PHY1_CSI1PHYTIMER_CLK 96
+#define CAMSS_PHY2_CSI2PHYTIMER_CLK 97
+#define CAMSS_TOP_AHB_CLK 98
+#define CAMSS_VFE_VFE0_CLK 99
+#define CAMSS_VFE_VFE1_CLK 100
+#define CAMSS_VFE_VFE_AHB_CLK 101
+#define CAMSS_VFE_VFE_AXI_CLK 102
+#define FD_AXI_CLK 103
+#define FD_CORE_CLK 104
+#define FD_CORE_UAR_CLK 105
+#define MDSS_AXI_CLK 106
+#define MDSS_EXTPCLK_CLK 107
+#define MDSS_HDMI_AHB_CLK 108
+#define MDSS_HDMI_CLK 109
+#define MDSS_MDP_CLK 110
+#define MMSS_MISC_AHB_CLK 111
+#define MMSS_MMSSNOC_AXI_CLK 112
+#define MMSS_S0_AXI_CLK 113
+#define OCMEMCX_OCMEMNOC_CLK 114
+#define OXILI_GFX3D_CLK 115
+#define OXILI_RBBMTIMER_CLK 116
+#define OXILICX_AHB_CLK 117
+#define VENUS0_AHB_CLK 118
+#define VENUS0_AXI_CLK 119
+#define VENUS0_OCMEMNOC_CLK 120
+#define VENUS0_VCODEC0_CLK 121
+#define VENUS0_CORE0_VCODEC_CLK 122
+#define VENUS0_CORE1_VCODEC_CLK 123
+#define VENUS0_CORE2_VCODEC_CLK 124
+#define AHB_CLK_SRC 125
+#define FD_AHB_CLK 126
+
+/* GDSCs */
+#define VENUS_GDSC 0
+#define VENUS_CORE0_GDSC 1
+#define VENUS_CORE1_GDSC 2
+#define VENUS_CORE2_GDSC 3
+#define CAMSS_TOP_GDSC 4
+#define MDSS_GDSC 5
+#define JPEG_GDSC 6
+#define VFE_GDSC 7
+#define CPP_GDSC 8
+#define OXILI_GX_GDSC 9
+#define OXILI_CX_GDSC 10
+#define FD_GDSC 11
+
+/* Resets */
+#define CAMSS_MICRO_BCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-sdm660.h b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
new file mode 100644
index 000000000000..f9dbc21cb5c7
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_MMCC_660_H
+#define _DT_BINDINGS_CLK_MSM_MMCC_660_H
+
+#define AHB_CLK_SRC 0
+#define BYTE0_CLK_SRC 1
+#define BYTE1_CLK_SRC 2
+#define CAMSS_GP0_CLK_SRC 3
+#define CAMSS_GP1_CLK_SRC 4
+#define CCI_CLK_SRC 5
+#define CPP_CLK_SRC 6
+#define CSI0_CLK_SRC 7
+#define CSI0PHYTIMER_CLK_SRC 8
+#define CSI1_CLK_SRC 9
+#define CSI1PHYTIMER_CLK_SRC 10
+#define CSI2_CLK_SRC 11
+#define CSI2PHYTIMER_CLK_SRC 12
+#define CSI3_CLK_SRC 13
+#define CSIPHY_CLK_SRC 14
+#define DP_AUX_CLK_SRC 15
+#define DP_CRYPTO_CLK_SRC 16
+#define DP_GTC_CLK_SRC 17
+#define DP_LINK_CLK_SRC 18
+#define DP_PIXEL_CLK_SRC 19
+#define ESC0_CLK_SRC 20
+#define ESC1_CLK_SRC 21
+#define JPEG0_CLK_SRC 22
+#define MCLK0_CLK_SRC 23
+#define MCLK1_CLK_SRC 24
+#define MCLK2_CLK_SRC 25
+#define MCLK3_CLK_SRC 26
+#define MDP_CLK_SRC 27
+#define MMPLL0_PLL 28
+#define MMPLL10_PLL 29
+#define MMPLL1_PLL 30
+#define MMPLL3_PLL 31
+#define MMPLL4_PLL 32
+#define MMPLL5_PLL 33
+#define MMPLL6_PLL 34
+#define MMPLL7_PLL 35
+#define MMPLL8_PLL 36
+#define BIMC_SMMU_AHB_CLK 37
+#define BIMC_SMMU_AXI_CLK 38
+#define CAMSS_AHB_CLK 39
+#define CAMSS_CCI_AHB_CLK 40
+#define CAMSS_CCI_CLK 41
+#define CAMSS_CPHY_CSID0_CLK 42
+#define CAMSS_CPHY_CSID1_CLK 43
+#define CAMSS_CPHY_CSID2_CLK 44
+#define CAMSS_CPHY_CSID3_CLK 45
+#define CAMSS_CPP_AHB_CLK 46
+#define CAMSS_CPP_AXI_CLK 47
+#define CAMSS_CPP_CLK 48
+#define CAMSS_CPP_VBIF_AHB_CLK 49
+#define CAMSS_CSI0_AHB_CLK 50
+#define CAMSS_CSI0_CLK 51
+#define CAMSS_CSI0PHYTIMER_CLK 52
+#define CAMSS_CSI0PIX_CLK 53
+#define CAMSS_CSI0RDI_CLK 54
+#define CAMSS_CSI1_AHB_CLK 55
+#define CAMSS_CSI1_CLK 56
+#define CAMSS_CSI1PHYTIMER_CLK 57
+#define CAMSS_CSI1PIX_CLK 58
+#define CAMSS_CSI1RDI_CLK 59
+#define CAMSS_CSI2_AHB_CLK 60
+#define CAMSS_CSI2_CLK 61
+#define CAMSS_CSI2PHYTIMER_CLK 62
+#define CAMSS_CSI2PIX_CLK 63
+#define CAMSS_CSI2RDI_CLK 64
+#define CAMSS_CSI3_AHB_CLK 65
+#define CAMSS_CSI3_CLK 66
+#define CAMSS_CSI3PIX_CLK 67
+#define CAMSS_CSI3RDI_CLK 68
+#define CAMSS_CSI_VFE0_CLK 69
+#define CAMSS_CSI_VFE1_CLK 70
+#define CAMSS_CSIPHY0_CLK 71
+#define CAMSS_CSIPHY1_CLK 72
+#define CAMSS_CSIPHY2_CLK 73
+#define CAMSS_GP0_CLK 74
+#define CAMSS_GP1_CLK 75
+#define CAMSS_ISPIF_AHB_CLK 76
+#define CAMSS_JPEG0_CLK 77
+#define CAMSS_JPEG_AHB_CLK 78
+#define CAMSS_JPEG_AXI_CLK 79
+#define CAMSS_MCLK0_CLK 80
+#define CAMSS_MCLK1_CLK 81
+#define CAMSS_MCLK2_CLK 82
+#define CAMSS_MCLK3_CLK 83
+#define CAMSS_MICRO_AHB_CLK 84
+#define CAMSS_TOP_AHB_CLK 85
+#define CAMSS_VFE0_AHB_CLK 86
+#define CAMSS_VFE0_CLK 87
+#define CAMSS_VFE0_STREAM_CLK 88
+#define CAMSS_VFE1_AHB_CLK 89
+#define CAMSS_VFE1_CLK 90
+#define CAMSS_VFE1_STREAM_CLK 91
+#define CAMSS_VFE_VBIF_AHB_CLK 92
+#define CAMSS_VFE_VBIF_AXI_CLK 93
+#define CSIPHY_AHB2CRIF_CLK 94
+#define CXO_CLK 95
+#define MDSS_AHB_CLK 96
+#define MDSS_AXI_CLK 97
+#define MDSS_BYTE0_CLK 98
+#define MDSS_BYTE0_INTF_CLK 99
+#define MDSS_BYTE0_INTF_DIV_CLK 100
+#define MDSS_BYTE1_CLK 101
+#define MDSS_BYTE1_INTF_CLK 102
+#define MDSS_DP_AUX_CLK 103
+#define MDSS_DP_CRYPTO_CLK 104
+#define MDSS_DP_GTC_CLK 105
+#define MDSS_DP_LINK_CLK 106
+#define MDSS_DP_LINK_INTF_CLK 107
+#define MDSS_DP_PIXEL_CLK 108
+#define MDSS_ESC0_CLK 109
+#define MDSS_ESC1_CLK 110
+#define MDSS_HDMI_DP_AHB_CLK 111
+#define MDSS_MDP_CLK 112
+#define MDSS_PCLK0_CLK 113
+#define MDSS_PCLK1_CLK 114
+#define MDSS_ROT_CLK 115
+#define MDSS_VSYNC_CLK 116
+#define MISC_AHB_CLK 117
+#define MISC_CXO_CLK 118
+#define MNOC_AHB_CLK 119
+#define SNOC_DVM_AXI_CLK 120
+#define THROTTLE_CAMSS_AHB_CLK 121
+#define THROTTLE_CAMSS_AXI_CLK 122
+#define THROTTLE_MDSS_AHB_CLK 123
+#define THROTTLE_MDSS_AXI_CLK 124
+#define THROTTLE_VIDEO_AHB_CLK 125
+#define THROTTLE_VIDEO_AXI_CLK 126
+#define VIDEO_AHB_CLK 127
+#define VIDEO_AXI_CLK 128
+#define VIDEO_CORE_CLK 129
+#define VIDEO_SUBCORE0_CLK 130
+#define PCLK0_CLK_SRC 131
+#define PCLK1_CLK_SRC 132
+#define ROT_CLK_SRC 133
+#define VFE0_CLK_SRC 134
+#define VFE1_CLK_SRC 135
+#define VIDEO_CORE_CLK_SRC 136
+#define VSYNC_CLK_SRC 137
+#define MDSS_BYTE1_INTF_DIV_CLK 138
+#define AXI_CLK_SRC 139
+
+#define VENUS_GDSC 0
+#define VENUS_CORE0_GDSC 1
+#define MDSS_GDSC 2
+#define CAMSS_TOP_GDSC 3
+#define CAMSS_VFE0_GDSC 4
+#define CAMSS_VFE1_GDSC 5
+#define CAMSS_CPP_GDSC 6
+#define BIMC_SMMU_GDSC 7
+
+#define CAMSS_MICRO_BCR 0
+
+#endif
+
diff --git a/include/dt-bindings/clock/qcom,mss-sc7180.h b/include/dt-bindings/clock/qcom,mss-sc7180.h
new file mode 100644
index 000000000000..f15a9ded2961
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mss-sc7180.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_MSS_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_MSS_SC7180_H
+
+#define MSS_AXI_CRYPTO_CLK 0
+#define MSS_AXI_NAV_CLK 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index 8e3095720552..c0ad624e930e 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -37,6 +37,10 @@
#define RPM_XO_A0 27
#define RPM_XO_A1 28
#define RPM_XO_A2 29
+#define RPM_NSS_FABRIC_0_CLK 30
+#define RPM_NSS_FABRIC_0_A_CLK 31
+#define RPM_NSS_FABRIC_1_CLK 32
+#define RPM_NSS_FABRIC_1_A_CLK 33
/* SMD RPM clocks */
#define RPM_SMD_XO_CLK_SRC 0
@@ -129,5 +133,40 @@
#define RPM_SMD_RF_CLK3_A 87
#define RPM_SMD_RF_CLK3_PIN 88
#define RPM_SMD_RF_CLK3_A_PIN 89
+#define RPM_SMD_MMSSNOC_AXI_CLK 90
+#define RPM_SMD_MMSSNOC_AXI_CLK_A 91
+#define RPM_SMD_CNOC_PERIPH_CLK 92
+#define RPM_SMD_CNOC_PERIPH_A_CLK 93
+#define RPM_SMD_LN_BB_CLK3 94
+#define RPM_SMD_LN_BB_CLK3_A 95
+#define RPM_SMD_LN_BB_CLK1_PIN 96
+#define RPM_SMD_LN_BB_CLK1_A_PIN 97
+#define RPM_SMD_LN_BB_CLK2_PIN 98
+#define RPM_SMD_LN_BB_CLK2_A_PIN 99
+#define RPM_SMD_SYSMMNOC_CLK 100
+#define RPM_SMD_SYSMMNOC_A_CLK 101
+#define RPM_SMD_CE2_CLK 102
+#define RPM_SMD_CE2_A_CLK 103
+#define RPM_SMD_CE3_CLK 104
+#define RPM_SMD_CE3_A_CLK 105
+#define RPM_SMD_QUP_CLK 106
+#define RPM_SMD_QUP_A_CLK 107
+#define RPM_SMD_MMRT_CLK 108
+#define RPM_SMD_MMRT_A_CLK 109
+#define RPM_SMD_MMNRT_CLK 110
+#define RPM_SMD_MMNRT_A_CLK 111
+#define RPM_SMD_SNOC_PERIPH_CLK 112
+#define RPM_SMD_SNOC_PERIPH_A_CLK 113
+#define RPM_SMD_SNOC_LPASS_CLK 114
+#define RPM_SMD_SNOC_LPASS_A_CLK 115
+#define RPM_SMD_HWKM_CLK 116
+#define RPM_SMD_HWKM_A_CLK 117
+#define RPM_SMD_PKA_CLK 118
+#define RPM_SMD_PKA_A_CLK 119
+#define RPM_SMD_CPUSS_GNOC_CLK 120
+#define RPM_SMD_CPUSS_GNOC_A_CLK 121
+#define RPM_SMD_MSS_CFG_AHB_CLK 122
+#define RPM_SMD_MSS_CFG_AHB_A_CLK 123
+#define RPM_SMD_BIMC_FREQ_LOG 124
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index edcab3f7b7d3..0a7d1be0d124 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. */
#ifndef _DT_BINDINGS_CLK_MSM_RPMH_H
@@ -19,5 +19,19 @@
#define RPMH_RF_CLK3 10
#define RPMH_RF_CLK3_A 11
#define RPMH_IPA_CLK 12
+#define RPMH_LN_BB_CLK1 13
+#define RPMH_LN_BB_CLK1_A 14
+#define RPMH_CE_CLK 15
+#define RPMH_QPIC_CLK 16
+#define RPMH_DIV_CLK1 17
+#define RPMH_DIV_CLK1_A 18
+#define RPMH_RF_CLK4 19
+#define RPMH_RF_CLK4_A 20
+#define RPMH_RF_CLK5 21
+#define RPMH_RF_CLK5_A 22
+#define RPMH_PKA_CLK 23
+#define RPMH_HWKM_CLK 24
+#define RPMH_QLINK_CLK 25
+#define RPMH_QLINK_CLK_A 26
#endif
diff --git a/include/dt-bindings/clock/qcom,sm6115-dispcc.h b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
new file mode 100644
index 000000000000..d1a6c45b5029
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_PLL0_OUT_MAIN 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK 8
+#define DISP_CC_MDSS_ESC0_CLK_SRC 9
+#define DISP_CC_MDSS_MDP_CLK 10
+#define DISP_CC_MDSS_MDP_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_LUT_CLK 12
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 13
+#define DISP_CC_MDSS_PCLK0_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 15
+#define DISP_CC_MDSS_ROT_CLK 16
+#define DISP_CC_MDSS_ROT_CLK_SRC 17
+#define DISP_CC_MDSS_VSYNC_CLK 18
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 19
+#define DISP_CC_SLEEP_CLK 20
+#define DISP_CC_SLEEP_CLK_SRC 21
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6375-gcc.h b/include/dt-bindings/clock/qcom,sm6375-gcc.h
new file mode 100644
index 000000000000..1e9801e1cedf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6375-gcc.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6375_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6375_H
+
+/* Clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL0_OUT_ODD 2
+#define GPLL1 3
+#define GPLL10 4
+#define GPLL11 5
+#define GPLL3 6
+#define GPLL3_OUT_EVEN 7
+#define GPLL4 8
+#define GPLL5 9
+#define GPLL6 10
+#define GPLL6_OUT_EVEN 11
+#define GPLL7 12
+#define GPLL8 13
+#define GPLL8_OUT_EVEN 14
+#define GPLL9 15
+#define GPLL9_OUT_MAIN 16
+#define GCC_AHB2PHY_CSI_CLK 17
+#define GCC_AHB2PHY_USB_CLK 18
+#define GCC_BIMC_GPU_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAM_THROTTLE_NRT_CLK 21
+#define GCC_CAM_THROTTLE_RT_CLK 22
+#define GCC_CAMERA_AHB_CLK 23
+#define GCC_CAMERA_XO_CLK 24
+#define GCC_CAMSS_AXI_CLK 25
+#define GCC_CAMSS_AXI_CLK_SRC 26
+#define GCC_CAMSS_CAMNOC_ATB_CLK 27
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 28
+#define GCC_CAMSS_CCI_0_CLK 29
+#define GCC_CAMSS_CCI_0_CLK_SRC 30
+#define GCC_CAMSS_CCI_1_CLK 31
+#define GCC_CAMSS_CCI_1_CLK_SRC 32
+#define GCC_CAMSS_CPHY_0_CLK 33
+#define GCC_CAMSS_CPHY_1_CLK 34
+#define GCC_CAMSS_CPHY_2_CLK 35
+#define GCC_CAMSS_CPHY_3_CLK 36
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 37
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 38
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 39
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 40
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 41
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 42
+#define GCC_CAMSS_CSI3PHYTIMER_CLK 43
+#define GCC_CAMSS_CSI3PHYTIMER_CLK_SRC 44
+#define GCC_CAMSS_MCLK0_CLK 45
+#define GCC_CAMSS_MCLK0_CLK_SRC 46
+#define GCC_CAMSS_MCLK1_CLK 47
+#define GCC_CAMSS_MCLK1_CLK_SRC 48
+#define GCC_CAMSS_MCLK2_CLK 49
+#define GCC_CAMSS_MCLK2_CLK_SRC 50
+#define GCC_CAMSS_MCLK3_CLK 51
+#define GCC_CAMSS_MCLK3_CLK_SRC 52
+#define GCC_CAMSS_MCLK4_CLK 53
+#define GCC_CAMSS_MCLK4_CLK_SRC 54
+#define GCC_CAMSS_NRT_AXI_CLK 55
+#define GCC_CAMSS_OPE_AHB_CLK 56
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 57
+#define GCC_CAMSS_OPE_CLK 58
+#define GCC_CAMSS_OPE_CLK_SRC 59
+#define GCC_CAMSS_RT_AXI_CLK 60
+#define GCC_CAMSS_TFE_0_CLK 61
+#define GCC_CAMSS_TFE_0_CLK_SRC 62
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 63
+#define GCC_CAMSS_TFE_0_CSID_CLK 64
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 65
+#define GCC_CAMSS_TFE_1_CLK 66
+#define GCC_CAMSS_TFE_1_CLK_SRC 67
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 68
+#define GCC_CAMSS_TFE_1_CSID_CLK 69
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 70
+#define GCC_CAMSS_TFE_2_CLK 71
+#define GCC_CAMSS_TFE_2_CLK_SRC 72
+#define GCC_CAMSS_TFE_2_CPHY_RX_CLK 73
+#define GCC_CAMSS_TFE_2_CSID_CLK 74
+#define GCC_CAMSS_TFE_2_CSID_CLK_SRC 75
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 76
+#define GCC_CAMSS_TOP_AHB_CLK 77
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 78
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 79
+#define GCC_CPUSS_AHB_CLK_SRC 80
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 81
+#define GCC_CPUSS_GNOC_CLK 82
+#define GCC_DISP_AHB_CLK 83
+#define GCC_DISP_GPLL0_CLK_SRC 84
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 85
+#define GCC_DISP_HF_AXI_CLK 86
+#define GCC_DISP_SLEEP_CLK 87
+#define GCC_DISP_THROTTLE_CORE_CLK 88
+#define GCC_DISP_XO_CLK 89
+#define GCC_GP1_CLK 90
+#define GCC_GP1_CLK_SRC 91
+#define GCC_GP2_CLK 92
+#define GCC_GP2_CLK_SRC 93
+#define GCC_GP3_CLK 94
+#define GCC_GP3_CLK_SRC 95
+#define GCC_GPU_CFG_AHB_CLK 96
+#define GCC_GPU_GPLL0_CLK_SRC 97
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 98
+#define GCC_GPU_MEMNOC_GFX_CLK 99
+#define GCC_GPU_SNOC_DVM_GFX_CLK 100
+#define GCC_GPU_THROTTLE_CORE_CLK 101
+#define GCC_PDM2_CLK 102
+#define GCC_PDM2_CLK_SRC 103
+#define GCC_PDM_AHB_CLK 104
+#define GCC_PDM_XO4_CLK 105
+#define GCC_PRNG_AHB_CLK 106
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 107
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 108
+#define GCC_QMIP_DISP_AHB_CLK 109
+#define GCC_QMIP_GPU_CFG_AHB_CLK 110
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 111
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 112
+#define GCC_QUPV3_WRAP0_CORE_CLK 113
+#define GCC_QUPV3_WRAP0_S0_CLK 114
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 115
+#define GCC_QUPV3_WRAP0_S1_CLK 116
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 117
+#define GCC_QUPV3_WRAP0_S2_CLK 118
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 119
+#define GCC_QUPV3_WRAP0_S3_CLK 120
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 121
+#define GCC_QUPV3_WRAP0_S4_CLK 122
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 123
+#define GCC_QUPV3_WRAP0_S5_CLK 124
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 125
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 126
+#define GCC_QUPV3_WRAP1_CORE_CLK 127
+#define GCC_QUPV3_WRAP1_S0_CLK 128
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 129
+#define GCC_QUPV3_WRAP1_S1_CLK 130
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 131
+#define GCC_QUPV3_WRAP1_S2_CLK 132
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 133
+#define GCC_QUPV3_WRAP1_S3_CLK 134
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 135
+#define GCC_QUPV3_WRAP1_S4_CLK 136
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 137
+#define GCC_QUPV3_WRAP1_S5_CLK 138
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 139
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 141
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 142
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 143
+#define GCC_RX5_PCIE_CLKREF_EN_CLK 144
+#define GCC_SDCC1_AHB_CLK 145
+#define GCC_SDCC1_APPS_CLK 146
+#define GCC_SDCC1_APPS_CLK_SRC 147
+#define GCC_SDCC1_ICE_CORE_CLK 148
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 149
+#define GCC_SDCC2_AHB_CLK 150
+#define GCC_SDCC2_APPS_CLK 151
+#define GCC_SDCC2_APPS_CLK_SRC 152
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 153
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 154
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 155
+#define GCC_UFS_MEM_CLKREF_CLK 156
+#define GCC_UFS_PHY_AHB_CLK 157
+#define GCC_UFS_PHY_AXI_CLK 158
+#define GCC_UFS_PHY_AXI_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_CLK 160
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 161
+#define GCC_UFS_PHY_PHY_AUX_CLK 162
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 165
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 166
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 167
+#define GCC_USB30_PRIM_MASTER_CLK 168
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 169
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 170
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 171
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 172
+#define GCC_USB30_PRIM_SLEEP_CLK 173
+#define GCC_USB3_PRIM_CLKREF_CLK 174
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 175
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 176
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 177
+#define GCC_VCODEC0_AXI_CLK 178
+#define GCC_VENUS_AHB_CLK 179
+#define GCC_VENUS_CTL_AXI_CLK 180
+#define GCC_VIDEO_AHB_CLK 181
+#define GCC_VIDEO_AXI0_CLK 182
+#define GCC_VIDEO_THROTTLE_CORE_CLK 183
+#define GCC_VIDEO_VCODEC0_SYS_CLK 184
+#define GCC_VIDEO_VENUS_CLK_SRC 185
+#define GCC_VIDEO_VENUS_CTL_CLK 186
+#define GCC_VIDEO_XO_CLK 187
+
+/* Resets */
+#define GCC_CAMSS_OPE_BCR 0
+#define GCC_CAMSS_TFE_BCR 1
+#define GCC_CAMSS_TOP_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_MMSS_BCR 4
+#define GCC_PDM_BCR 5
+#define GCC_PRNG_BCR 6
+#define GCC_QUPV3_WRAPPER_0_BCR 7
+#define GCC_QUPV3_WRAPPER_1_BCR 8
+#define GCC_QUSB2PHY_PRIM_BCR 9
+#define GCC_QUSB2PHY_SEC_BCR 10
+#define GCC_SDCC1_BCR 11
+#define GCC_SDCC2_BCR 12
+#define GCC_UFS_PHY_BCR 13
+#define GCC_USB30_PRIM_BCR 14
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15
+#define GCC_VCODEC0_BCR 16
+#define GCC_VENUS_BCR 17
+#define GCC_VIDEO_INTERFACE_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_PRIM_SP0_BCR 20
+
+/* GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define CAMSS_TOP_GDSC 2
+#define VENUS_GDSC 3
+#define VCODEC0_GDSC 4
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 6
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 7
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h b/include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h
new file mode 100644
index 000000000000..f5a1cfac8612
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_LPASS_AONCC_SM8250_H
+#define _DT_BINDINGS_CLK_LPASS_AONCC_SM8250_H
+
+/* from AOCC */
+#define LPASS_CDC_VA_MCLK 0
+#define LPASS_CDC_TX_NPL 1
+#define LPASS_CDC_TX_MCLK 2
+
+#endif /* _DT_BINDINGS_CLK_LPASS_AONCC_SM8250_H */
diff --git a/include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h b/include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h
new file mode 100644
index 000000000000..a1aa6cb5d840
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_LPASS_AUDIOCC_SM8250_H
+#define _DT_BINDINGS_CLK_LPASS_AUDIOCC_SM8250_H
+
+/* From AudioCC */
+#define LPASS_CDC_WSA_NPL 0
+#define LPASS_CDC_WSA_MCLK 1
+#define LPASS_CDC_RX_MCLK 2
+#define LPASS_CDC_RX_NPL 3
+#define LPASS_CDC_RX_MCLK_MCLK2 4
+
+#endif /* _DT_BINDINGS_CLK_LPASS_AUDIOCC_SM8250_H */
diff --git a/include/dt-bindings/clock/qcom,sm8450-camcc.h b/include/dt-bindings/clock/qcom,sm8450-camcc.h
new file mode 100644
index 000000000000..7ff67acf301a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-camcc.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_CAMNOC_AXI_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 5
+#define CAM_CC_CAMNOC_DCD_XO_CLK 6
+#define CAM_CC_CCI_0_CLK 7
+#define CAM_CC_CCI_0_CLK_SRC 8
+#define CAM_CC_CCI_1_CLK 9
+#define CAM_CC_CCI_1_CLK_SRC 10
+#define CAM_CC_CORE_AHB_CLK 11
+#define CAM_CC_CPAS_AHB_CLK 12
+#define CAM_CC_CPAS_BPS_CLK 13
+#define CAM_CC_CPAS_FAST_AHB_CLK 14
+#define CAM_CC_CPAS_IFE_0_CLK 15
+#define CAM_CC_CPAS_IFE_1_CLK 16
+#define CAM_CC_CPAS_IFE_2_CLK 17
+#define CAM_CC_CPAS_IFE_LITE_CLK 18
+#define CAM_CC_CPAS_IPE_NPS_CLK 19
+#define CAM_CC_CPAS_SBI_CLK 20
+#define CAM_CC_CPAS_SFE_0_CLK 21
+#define CAM_CC_CPAS_SFE_1_CLK 22
+#define CAM_CC_CPHY_RX_CLK_SRC 23
+#define CAM_CC_CSI0PHYTIMER_CLK 24
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 25
+#define CAM_CC_CSI1PHYTIMER_CLK 26
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI2PHYTIMER_CLK 28
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSI3PHYTIMER_CLK 30
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 31
+#define CAM_CC_CSI4PHYTIMER_CLK 32
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSI5PHYTIMER_CLK 34
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSID_CLK 36
+#define CAM_CC_CSID_CLK_SRC 37
+#define CAM_CC_CSID_CSIPHY_RX_CLK 38
+#define CAM_CC_CSIPHY0_CLK 39
+#define CAM_CC_CSIPHY1_CLK 40
+#define CAM_CC_CSIPHY2_CLK 41
+#define CAM_CC_CSIPHY3_CLK 42
+#define CAM_CC_CSIPHY4_CLK 43
+#define CAM_CC_CSIPHY5_CLK 44
+#define CAM_CC_FAST_AHB_CLK_SRC 45
+#define CAM_CC_GDSC_CLK 46
+#define CAM_CC_ICP_AHB_CLK 47
+#define CAM_CC_ICP_CLK 48
+#define CAM_CC_ICP_CLK_SRC 49
+#define CAM_CC_IFE_0_CLK 50
+#define CAM_CC_IFE_0_CLK_SRC 51
+#define CAM_CC_IFE_0_DSP_CLK 52
+#define CAM_CC_IFE_0_FAST_AHB_CLK 53
+#define CAM_CC_IFE_1_CLK 54
+#define CAM_CC_IFE_1_CLK_SRC 55
+#define CAM_CC_IFE_1_DSP_CLK 56
+#define CAM_CC_IFE_1_FAST_AHB_CLK 57
+#define CAM_CC_IFE_2_CLK 58
+#define CAM_CC_IFE_2_CLK_SRC 59
+#define CAM_CC_IFE_2_DSP_CLK 60
+#define CAM_CC_IFE_2_FAST_AHB_CLK 61
+#define CAM_CC_IFE_LITE_AHB_CLK 62
+#define CAM_CC_IFE_LITE_CLK 63
+#define CAM_CC_IFE_LITE_CLK_SRC 64
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 65
+#define CAM_CC_IFE_LITE_CSID_CLK 66
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 67
+#define CAM_CC_IPE_NPS_AHB_CLK 68
+#define CAM_CC_IPE_NPS_CLK 69
+#define CAM_CC_IPE_NPS_CLK_SRC 70
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 71
+#define CAM_CC_IPE_PPS_CLK 72
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 73
+#define CAM_CC_JPEG_CLK 74
+#define CAM_CC_JPEG_CLK_SRC 75
+#define CAM_CC_MCLK0_CLK 76
+#define CAM_CC_MCLK0_CLK_SRC 77
+#define CAM_CC_MCLK1_CLK 78
+#define CAM_CC_MCLK1_CLK_SRC 79
+#define CAM_CC_MCLK2_CLK 80
+#define CAM_CC_MCLK2_CLK_SRC 81
+#define CAM_CC_MCLK3_CLK 82
+#define CAM_CC_MCLK3_CLK_SRC 83
+#define CAM_CC_MCLK4_CLK 84
+#define CAM_CC_MCLK4_CLK_SRC 85
+#define CAM_CC_MCLK5_CLK 86
+#define CAM_CC_MCLK5_CLK_SRC 87
+#define CAM_CC_MCLK6_CLK 88
+#define CAM_CC_MCLK6_CLK_SRC 89
+#define CAM_CC_MCLK7_CLK 90
+#define CAM_CC_MCLK7_CLK_SRC 91
+#define CAM_CC_PLL0 92
+#define CAM_CC_PLL0_OUT_EVEN 93
+#define CAM_CC_PLL0_OUT_ODD 94
+#define CAM_CC_PLL1 95
+#define CAM_CC_PLL1_OUT_EVEN 96
+#define CAM_CC_PLL2 97
+#define CAM_CC_PLL3 98
+#define CAM_CC_PLL3_OUT_EVEN 99
+#define CAM_CC_PLL4 100
+#define CAM_CC_PLL4_OUT_EVEN 101
+#define CAM_CC_PLL5 102
+#define CAM_CC_PLL5_OUT_EVEN 103
+#define CAM_CC_PLL6 104
+#define CAM_CC_PLL6_OUT_EVEN 105
+#define CAM_CC_PLL7 106
+#define CAM_CC_PLL7_OUT_EVEN 107
+#define CAM_CC_PLL8 108
+#define CAM_CC_PLL8_OUT_EVEN 109
+#define CAM_CC_QDSS_DEBUG_CLK 110
+#define CAM_CC_QDSS_DEBUG_CLK_SRC 111
+#define CAM_CC_QDSS_DEBUG_XO_CLK 112
+#define CAM_CC_SBI_AHB_CLK 113
+#define CAM_CC_SBI_CLK 114
+#define CAM_CC_SFE_0_CLK 115
+#define CAM_CC_SFE_0_CLK_SRC 116
+#define CAM_CC_SFE_0_FAST_AHB_CLK 117
+#define CAM_CC_SFE_1_CLK 118
+#define CAM_CC_SFE_1_CLK_SRC 119
+#define CAM_CC_SFE_1_FAST_AHB_CLK 120
+#define CAM_CC_SLEEP_CLK 121
+#define CAM_CC_SLEEP_CLK_SRC 122
+#define CAM_CC_SLOW_AHB_CLK_SRC 123
+#define CAM_CC_XO_CLK_SRC 124
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_ICP_BCR 1
+#define CAM_CC_IFE_0_BCR 2
+#define CAM_CC_IFE_1_BCR 3
+#define CAM_CC_IFE_2_BCR 4
+#define CAM_CC_IPE_0_BCR 5
+#define CAM_CC_QDSS_DEBUG_BCR 6
+#define CAM_CC_SBI_BCR 7
+#define CAM_CC_SFE_0_BCR 8
+#define CAM_CC_SFE_1_BCR 9
+
+/* CAM_CC GDSCRs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define SBI_GDSC 2
+#define IFE_0_GDSC 3
+#define IFE_1_GDSC 4
+#define IFE_2_GDSC 5
+#define SFE_0_GDSC 6
+#define SFE_1_GDSC 7
+#define TITAN_TOP_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-dispcc.h b/include/dt-bindings/clock/qcom,sm8450-dispcc.h
new file mode 100644
index 000000000000..fd16ca894971
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-dispcc.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8450_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB1_CLK 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_BYTE1_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 8
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 10
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 12
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 13
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 15
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 17
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 21
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 25
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 26
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 28
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 29
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 30
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 31
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 32
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 34
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 35
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 36
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 37
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 39
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 41
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 43
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 45
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 47
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 48
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 50
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 52
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 54
+#define DISP_CC_MDSS_ESC0_CLK 55
+#define DISP_CC_MDSS_ESC0_CLK_SRC 56
+#define DISP_CC_MDSS_ESC1_CLK 57
+#define DISP_CC_MDSS_ESC1_CLK_SRC 58
+#define DISP_CC_MDSS_MDP1_CLK 59
+#define DISP_CC_MDSS_MDP_CLK 60
+#define DISP_CC_MDSS_MDP_CLK_SRC 61
+#define DISP_CC_MDSS_MDP_LUT1_CLK 62
+#define DISP_CC_MDSS_MDP_LUT_CLK 63
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 64
+#define DISP_CC_MDSS_PCLK0_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 66
+#define DISP_CC_MDSS_PCLK1_CLK 67
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 68
+#define DISP_CC_MDSS_ROT1_CLK 69
+#define DISP_CC_MDSS_ROT_CLK 70
+#define DISP_CC_MDSS_ROT_CLK_SRC 71
+#define DISP_CC_MDSS_RSCC_AHB_CLK 72
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC1_CLK 74
+#define DISP_CC_MDSS_VSYNC_CLK 75
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 76
+#define DISP_CC_PLL0 77
+#define DISP_CC_PLL1 78
+#define DISP_CC_SLEEP_CLK 79
+#define DISP_CC_SLEEP_CLK_SRC 80
+#define DISP_CC_XO_CLK 81
+#define DISP_CC_XO_CLK_SRC 82
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sc7280.h b/include/dt-bindings/clock/qcom,videocc-sc7280.h
new file mode 100644
index 000000000000..9e00c3a5f75e
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sc7280.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7280_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_PLL0 0
+#define VIDEO_CC_IRIS_AHB_CLK 1
+#define VIDEO_CC_IRIS_CLK_SRC 2
+#define VIDEO_CC_MVS0_AXI_CLK 3
+#define VIDEO_CC_MVS0_CORE_CLK 4
+#define VIDEO_CC_MVSC_CORE_CLK 5
+#define VIDEO_CC_MVSC_CTL_AXI_CLK 6
+#define VIDEO_CC_SLEEP_CLK 7
+#define VIDEO_CC_SLEEP_CLK_SRC 8
+#define VIDEO_CC_VENUS_AHB_CLK 9
+#define VIDEO_CC_XO_CLK 10
+#define VIDEO_CC_XO_CLK_SRC 11
+
+/* VIDEO_CC power domains */
+#define MVS0_GDSC 0
+#define MVSC_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h
new file mode 100644
index 000000000000..e24ee840cfdb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8150_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_IRIS_AHB_CLK 0
+#define VIDEO_CC_IRIS_CLK_SRC 1
+#define VIDEO_CC_MVS0_CORE_CLK 2
+#define VIDEO_CC_MVS1_CORE_CLK 3
+#define VIDEO_CC_MVSC_CORE_CLK 4
+#define VIDEO_CC_PLL0 5
+
+/* VIDEO_CC Resets */
+#define VIDEO_CC_MVSC_CORE_CLK_BCR 0
+
+/* VIDEO_CC GDSCRs */
+#define VENUS_GDSC 0
+#define VCODEC0_GDSC 1
+#define VCODEC1_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sm8250.h b/include/dt-bindings/clock/qcom,videocc-sm8250.h
new file mode 100644
index 000000000000..8d321ac3b1fa
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sm8250.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8250_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_MVS0_CLK_SRC 0
+#define VIDEO_CC_MVS0C_CLK 1
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 2
+#define VIDEO_CC_MVS1_CLK_SRC 3
+#define VIDEO_CC_MVS1_DIV2_CLK 4
+#define VIDEO_CC_MVS1C_CLK 5
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 6
+#define VIDEO_CC_PLL0 7
+#define VIDEO_CC_PLL1 8
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 9
+#define VIDEO_CC_MVS0_CLK 10
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_CVP_INTERFACE_BCR 0
+#define VIDEO_CC_CVP_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_CVP_MVS0C_BCR 3
+#define VIDEO_CC_CVP_MVS1_BCR 4
+#define VIDEO_CC_MVS1C_CLK_ARES 5
+#define VIDEO_CC_CVP_MVS1C_BCR 6
+
+#define MVS0C_GDSC 0
+#define MVS1C_GDSC 1
+#define MVS0_GDSC 2
+#define MVS1_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/r8a7742-cpg-mssr.h b/include/dt-bindings/clock/r8a7742-cpg-mssr.h
new file mode 100644
index 000000000000..e68191c24881
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7742-cpg-mssr.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A7742_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7742_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7742 CPG Core Clocks */
+#define R8A7742_CLK_Z 0
+#define R8A7742_CLK_Z2 1
+#define R8A7742_CLK_ZG 2
+#define R8A7742_CLK_ZTR 3
+#define R8A7742_CLK_ZTRD2 4
+#define R8A7742_CLK_ZT 5
+#define R8A7742_CLK_ZX 6
+#define R8A7742_CLK_ZS 7
+#define R8A7742_CLK_HP 8
+#define R8A7742_CLK_B 9
+#define R8A7742_CLK_LB 10
+#define R8A7742_CLK_P 11
+#define R8A7742_CLK_CL 12
+#define R8A7742_CLK_M2 13
+#define R8A7742_CLK_ZB3 14
+#define R8A7742_CLK_ZB3D2 15
+#define R8A7742_CLK_DDR 16
+#define R8A7742_CLK_SDH 17
+#define R8A7742_CLK_SD0 18
+#define R8A7742_CLK_SD1 19
+#define R8A7742_CLK_SD2 20
+#define R8A7742_CLK_SD3 21
+#define R8A7742_CLK_MMC0 22
+#define R8A7742_CLK_MMC1 23
+#define R8A7742_CLK_MP 24
+#define R8A7742_CLK_QSPI 25
+#define R8A7742_CLK_CP 26
+#define R8A7742_CLK_RCAN 27
+#define R8A7742_CLK_R 28
+#define R8A7742_CLK_OSC 29
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7742_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a774e1-cpg-mssr.h b/include/dt-bindings/clock/r8a774e1-cpg-mssr.h
new file mode 100644
index 000000000000..b2fc1d1c3c47
--- /dev/null
+++ b/include/dt-bindings/clock/r8a774e1-cpg-mssr.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A774E1_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A774E1_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R8A774E1 CPG Core Clocks */
+#define R8A774E1_CLK_Z 0
+#define R8A774E1_CLK_Z2 1
+#define R8A774E1_CLK_ZG 2
+#define R8A774E1_CLK_ZTR 3
+#define R8A774E1_CLK_ZTRD2 4
+#define R8A774E1_CLK_ZT 5
+#define R8A774E1_CLK_ZX 6
+#define R8A774E1_CLK_S0D1 7
+#define R8A774E1_CLK_S0D2 8
+#define R8A774E1_CLK_S0D3 9
+#define R8A774E1_CLK_S0D4 10
+#define R8A774E1_CLK_S0D6 11
+#define R8A774E1_CLK_S0D8 12
+#define R8A774E1_CLK_S0D12 13
+#define R8A774E1_CLK_S1D2 14
+#define R8A774E1_CLK_S1D4 15
+#define R8A774E1_CLK_S2D1 16
+#define R8A774E1_CLK_S2D2 17
+#define R8A774E1_CLK_S2D4 18
+#define R8A774E1_CLK_S3D1 19
+#define R8A774E1_CLK_S3D2 20
+#define R8A774E1_CLK_S3D4 21
+#define R8A774E1_CLK_LB 22
+#define R8A774E1_CLK_CL 23
+#define R8A774E1_CLK_ZB3 24
+#define R8A774E1_CLK_ZB3D2 25
+#define R8A774E1_CLK_ZB3D4 26
+#define R8A774E1_CLK_CR 27
+#define R8A774E1_CLK_CRD2 28
+#define R8A774E1_CLK_SD0H 29
+#define R8A774E1_CLK_SD0 30
+#define R8A774E1_CLK_SD1H 31
+#define R8A774E1_CLK_SD1 32
+#define R8A774E1_CLK_SD2H 33
+#define R8A774E1_CLK_SD2 34
+#define R8A774E1_CLK_SD3H 35
+#define R8A774E1_CLK_SD3 36
+#define R8A774E1_CLK_RPC 37
+#define R8A774E1_CLK_RPCD2 38
+#define R8A774E1_CLK_MSO 39
+#define R8A774E1_CLK_HDMI 40
+#define R8A774E1_CLK_CSI0 41
+#define R8A774E1_CLK_CP 42
+#define R8A774E1_CLK_CPEX 43
+#define R8A774E1_CLK_R 44
+#define R8A774E1_CLK_OSC 45
+#define R8A774E1_CLK_CANFD 46
+
+#endif /* __DT_BINDINGS_CLOCK_R8A774E1_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a779a0-cpg-mssr.h b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
new file mode 100644
index 000000000000..f1d737ca7ca1
--- /dev/null
+++ b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779A0 CPG Core Clocks */
+#define R8A779A0_CLK_Z0 0
+#define R8A779A0_CLK_ZX 1
+#define R8A779A0_CLK_Z1 2
+#define R8A779A0_CLK_ZR 3
+#define R8A779A0_CLK_ZS 4
+#define R8A779A0_CLK_ZT 5
+#define R8A779A0_CLK_ZTR 6
+#define R8A779A0_CLK_S1D1 7
+#define R8A779A0_CLK_S1D2 8
+#define R8A779A0_CLK_S1D4 9
+#define R8A779A0_CLK_S1D8 10
+#define R8A779A0_CLK_S1D12 11
+#define R8A779A0_CLK_S3D1 12
+#define R8A779A0_CLK_S3D2 13
+#define R8A779A0_CLK_S3D4 14
+#define R8A779A0_CLK_LB 15
+#define R8A779A0_CLK_CP 16
+#define R8A779A0_CLK_CL 17
+#define R8A779A0_CLK_CL16MCK 18
+#define R8A779A0_CLK_ZB30 19
+#define R8A779A0_CLK_ZB30D2 20
+#define R8A779A0_CLK_ZB30D4 21
+#define R8A779A0_CLK_ZB31 22
+#define R8A779A0_CLK_ZB31D2 23
+#define R8A779A0_CLK_ZB31D4 24
+#define R8A779A0_CLK_SD0H 25
+#define R8A779A0_CLK_SD0 26
+#define R8A779A0_CLK_RPC 27
+#define R8A779A0_CLK_RPCD2 28
+#define R8A779A0_CLK_MSO 29
+#define R8A779A0_CLK_CANFD 30
+#define R8A779A0_CLK_CSI0 31
+#define R8A779A0_CLK_FRAY 32
+#define R8A779A0_CLK_DSI 33
+#define R8A779A0_CLK_VIP 34
+#define R8A779A0_CLK_ADGH 35
+#define R8A779A0_CLK_CNNDSP 36
+#define R8A779A0_CLK_ICU 37
+#define R8A779A0_CLK_ICUD2 38
+#define R8A779A0_CLK_VCBUS 39
+#define R8A779A0_CLK_CBFUSA 40
+#define R8A779A0_CLK_R 41
+#define R8A779A0_CLK_OSC 42
+
+#endif /* __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a779f0-cpg-mssr.h b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
new file mode 100644
index 000000000000..f2ae1c6a82dd
--- /dev/null
+++ b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779f0 CPG Core Clocks */
+
+#define R8A779F0_CLK_ZX 0
+#define R8A779F0_CLK_ZS 1
+#define R8A779F0_CLK_ZT 2
+#define R8A779F0_CLK_ZTR 3
+#define R8A779F0_CLK_S0D2 4
+#define R8A779F0_CLK_S0D3 5
+#define R8A779F0_CLK_S0D4 6
+#define R8A779F0_CLK_S0D2_MM 7
+#define R8A779F0_CLK_S0D3_MM 8
+#define R8A779F0_CLK_S0D4_MM 9
+#define R8A779F0_CLK_S0D2_RT 10
+#define R8A779F0_CLK_S0D3_RT 11
+#define R8A779F0_CLK_S0D4_RT 12
+#define R8A779F0_CLK_S0D6_RT 13
+#define R8A779F0_CLK_S0D3_PER 14
+#define R8A779F0_CLK_S0D6_PER 15
+#define R8A779F0_CLK_S0D12_PER 16
+#define R8A779F0_CLK_S0D24_PER 17
+#define R8A779F0_CLK_S0D2_HSC 18
+#define R8A779F0_CLK_S0D3_HSC 19
+#define R8A779F0_CLK_S0D4_HSC 20
+#define R8A779F0_CLK_S0D6_HSC 21
+#define R8A779F0_CLK_S0D12_HSC 22
+#define R8A779F0_CLK_S0D2_CC 23
+#define R8A779F0_CLK_CL 24
+#define R8A779F0_CLK_CL16M 25
+#define R8A779F0_CLK_CL16M_MM 26
+#define R8A779F0_CLK_CL16M_RT 27
+#define R8A779F0_CLK_CL16M_PER 28
+#define R8A779F0_CLK_CL16M_HSC 29
+#define R8A779F0_CLK_Z0 30
+#define R8A779F0_CLK_Z1 31
+#define R8A779F0_CLK_ZB3 32
+#define R8A779F0_CLK_ZB3D2 33
+#define R8A779F0_CLK_ZB3D4 34
+#define R8A779F0_CLK_SD0H 35
+#define R8A779F0_CLK_SD0 36
+#define R8A779F0_CLK_RPC 37
+#define R8A779F0_CLK_RPCD2 38
+#define R8A779F0_CLK_MSO 39
+#define R8A779F0_CLK_SASYNCRT 40
+#define R8A779F0_CLK_SASYNCPERD1 41
+#define R8A779F0_CLK_SASYNCPERD2 42
+#define R8A779F0_CLK_SASYNCPERD4 43
+#define R8A779F0_CLK_DBGSOC_HSC 44
+#define R8A779F0_CLK_RSW2 45
+#define R8A779F0_CLK_OSC 46
+#define R8A779F0_CLK_ZR 47
+#define R8A779F0_CLK_CPEX 48
+#define R8A779F0_CLK_CBFUSA 49
+#define R8A779F0_CLK_R 50
+
+#endif /* __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
new file mode 100644
index 000000000000..754c54a6eb06
--- /dev/null
+++ b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779g0 CPG Core Clocks */
+
+#define R8A779G0_CLK_ZX 0
+#define R8A779G0_CLK_ZS 1
+#define R8A779G0_CLK_ZT 2
+#define R8A779G0_CLK_ZTR 3
+#define R8A779G0_CLK_S0D2 4
+#define R8A779G0_CLK_S0D3 5
+#define R8A779G0_CLK_S0D4 6
+#define R8A779G0_CLK_S0D1_VIO 7
+#define R8A779G0_CLK_S0D2_VIO 8
+#define R8A779G0_CLK_S0D4_VIO 9
+#define R8A779G0_CLK_S0D8_VIO 10
+#define R8A779G0_CLK_S0D1_VC 11
+#define R8A779G0_CLK_S0D2_VC 12
+#define R8A779G0_CLK_S0D4_VC 13
+#define R8A779G0_CLK_S0D2_MM 14
+#define R8A779G0_CLK_S0D4_MM 15
+#define R8A779G0_CLK_S0D2_U3DG 16
+#define R8A779G0_CLK_S0D4_U3DG 17
+#define R8A779G0_CLK_S0D2_RT 18
+#define R8A779G0_CLK_S0D3_RT 19
+#define R8A779G0_CLK_S0D4_RT 20
+#define R8A779G0_CLK_S0D6_RT 21
+#define R8A779G0_CLK_S0D24_RT 22
+#define R8A779G0_CLK_S0D2_PER 23
+#define R8A779G0_CLK_S0D3_PER 24
+#define R8A779G0_CLK_S0D4_PER 25
+#define R8A779G0_CLK_S0D6_PER 26
+#define R8A779G0_CLK_S0D12_PER 27
+#define R8A779G0_CLK_S0D24_PER 28
+#define R8A779G0_CLK_S0D1_HSC 29
+#define R8A779G0_CLK_S0D2_HSC 30
+#define R8A779G0_CLK_S0D4_HSC 31
+#define R8A779G0_CLK_S0D2_CC 32
+#define R8A779G0_CLK_SVD1_IR 33
+#define R8A779G0_CLK_SVD2_IR 34
+#define R8A779G0_CLK_SVD1_VIP 35
+#define R8A779G0_CLK_SVD2_VIP 36
+#define R8A779G0_CLK_CL 37
+#define R8A779G0_CLK_CL16M 38
+#define R8A779G0_CLK_CL16M_MM 39
+#define R8A779G0_CLK_CL16M_RT 40
+#define R8A779G0_CLK_CL16M_PER 41
+#define R8A779G0_CLK_CL16M_HSC 42
+#define R8A779G0_CLK_Z0 43
+#define R8A779G0_CLK_ZB3 44
+#define R8A779G0_CLK_ZB3D2 45
+#define R8A779G0_CLK_ZB3D4 46
+#define R8A779G0_CLK_ZG 47
+#define R8A779G0_CLK_SD0H 48
+#define R8A779G0_CLK_SD0 49
+#define R8A779G0_CLK_RPC 50
+#define R8A779G0_CLK_RPCD2 51
+#define R8A779G0_CLK_MSO 52
+#define R8A779G0_CLK_CANFD 53
+#define R8A779G0_CLK_CSI 54
+#define R8A779G0_CLK_FRAY 55
+#define R8A779G0_CLK_IPC 56
+#define R8A779G0_CLK_SASYNCRT 57
+#define R8A779G0_CLK_SASYNCPERD1 58
+#define R8A779G0_CLK_SASYNCPERD2 59
+#define R8A779G0_CLK_SASYNCPERD4 60
+#define R8A779G0_CLK_VIOBUS 61
+#define R8A779G0_CLK_VIOBUSD2 62
+#define R8A779G0_CLK_VCBUS 63
+#define R8A779G0_CLK_VCBUSD2 64
+#define R8A779G0_CLK_DSIEXT 65
+#define R8A779G0_CLK_DSIREF 66
+#define R8A779G0_CLK_ADGH 67
+#define R8A779G0_CLK_OSC 68
+#define R8A779G0_CLK_ZR0 69
+#define R8A779G0_CLK_ZR1 70
+#define R8A779G0_CLK_ZR2 71
+#define R8A779G0_CLK_IMPA 72
+#define R8A779G0_CLK_IMPAD4 73
+#define R8A779G0_CLK_CPEX 74
+#define R8A779G0_CLK_CBFUSA 75
+#define R8A779G0_CLK_R 76
+
+#endif /* __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h
index 90c0f3dc1ba1..d9d7b8b4f426 100644
--- a/include/dt-bindings/clock/r9a06g032-sysctrl.h
+++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h
@@ -74,6 +74,7 @@
#define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */
#define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */
#define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_WATCHDOG 82 /* AKA CLK_REF_SYNC_D8 */
#define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */
#define R9A06G032_HCLK_CAN0 85
#define R9A06G032_HCLK_CAN1 86
diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h
new file mode 100644
index 000000000000..77cde8effdc7
--- /dev/null
+++ b/include/dt-bindings/clock/r9a07g043-cpg.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A07G043 CPG Core Clocks */
+#define R9A07G043_CLK_I 0
+#define R9A07G043_CLK_I2 1
+#define R9A07G043_CLK_S0 2
+#define R9A07G043_CLK_SPI0 3
+#define R9A07G043_CLK_SPI1 4
+#define R9A07G043_CLK_SD0 5
+#define R9A07G043_CLK_SD1 6
+#define R9A07G043_CLK_M0 7
+#define R9A07G043_CLK_M2 8
+#define R9A07G043_CLK_M3 9
+#define R9A07G043_CLK_HP 10
+#define R9A07G043_CLK_TSU 11
+#define R9A07G043_CLK_ZT 12
+#define R9A07G043_CLK_P0 13
+#define R9A07G043_CLK_P1 14
+#define R9A07G043_CLK_P2 15
+#define R9A07G043_CLK_AT 16
+#define R9A07G043_OSCCLK 17
+#define R9A07G043_CLK_P0_DIV2 18
+
+/* R9A07G043 Module Clocks */
+#define R9A07G043_CA55_SCLK 0 /* RZ/G2UL Only */
+#define R9A07G043_CA55_PCLK 1 /* RZ/G2UL Only */
+#define R9A07G043_CA55_ATCLK 2 /* RZ/G2UL Only */
+#define R9A07G043_CA55_GICCLK 3 /* RZ/G2UL Only */
+#define R9A07G043_CA55_PERICLK 4 /* RZ/G2UL Only */
+#define R9A07G043_CA55_ACLK 5 /* RZ/G2UL Only */
+#define R9A07G043_CA55_TSCLK 6 /* RZ/G2UL Only */
+#define R9A07G043_GIC600_GICCLK 7 /* RZ/G2UL Only */
+#define R9A07G043_IA55_CLK 8 /* RZ/G2UL Only */
+#define R9A07G043_IA55_PCLK 9 /* RZ/G2UL Only */
+#define R9A07G043_MHU_PCLK 10 /* RZ/G2UL Only */
+#define R9A07G043_SYC_CNT_CLK 11
+#define R9A07G043_DMAC_ACLK 12
+#define R9A07G043_DMAC_PCLK 13
+#define R9A07G043_OSTM0_PCLK 14
+#define R9A07G043_OSTM1_PCLK 15
+#define R9A07G043_OSTM2_PCLK 16
+#define R9A07G043_MTU_X_MCK_MTU3 17
+#define R9A07G043_POE3_CLKM_POE 18
+#define R9A07G043_WDT0_PCLK 19
+#define R9A07G043_WDT0_CLK 20
+#define R9A07G043_WDT2_PCLK 21 /* RZ/G2UL Only */
+#define R9A07G043_WDT2_CLK 22 /* RZ/G2UL Only */
+#define R9A07G043_SPI_CLK2 23
+#define R9A07G043_SPI_CLK 24
+#define R9A07G043_SDHI0_IMCLK 25
+#define R9A07G043_SDHI0_IMCLK2 26
+#define R9A07G043_SDHI0_CLK_HS 27
+#define R9A07G043_SDHI0_ACLK 28
+#define R9A07G043_SDHI1_IMCLK 29
+#define R9A07G043_SDHI1_IMCLK2 30
+#define R9A07G043_SDHI1_CLK_HS 31
+#define R9A07G043_SDHI1_ACLK 32
+#define R9A07G043_ISU_ACLK 33 /* RZ/G2UL Only */
+#define R9A07G043_ISU_PCLK 34 /* RZ/G2UL Only */
+#define R9A07G043_CRU_SYSCLK 35 /* RZ/G2UL Only */
+#define R9A07G043_CRU_VCLK 36 /* RZ/G2UL Only */
+#define R9A07G043_CRU_PCLK 37 /* RZ/G2UL Only */
+#define R9A07G043_CRU_ACLK 38 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_CLK_A 39 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_CLK_P 40 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_CLK_D 41 /* RZ/G2UL Only */
+#define R9A07G043_SSI0_PCLK2 42
+#define R9A07G043_SSI0_PCLK_SFR 43
+#define R9A07G043_SSI1_PCLK2 44
+#define R9A07G043_SSI1_PCLK_SFR 45
+#define R9A07G043_SSI2_PCLK2 46
+#define R9A07G043_SSI2_PCLK_SFR 47
+#define R9A07G043_SSI3_PCLK2 48
+#define R9A07G043_SSI3_PCLK_SFR 49
+#define R9A07G043_SRC_CLKP 50 /* RZ/G2UL Only */
+#define R9A07G043_USB_U2H0_HCLK 51
+#define R9A07G043_USB_U2H1_HCLK 52
+#define R9A07G043_USB_U2P_EXR_CPUCLK 53
+#define R9A07G043_USB_PCLK 54
+#define R9A07G043_ETH0_CLK_AXI 55
+#define R9A07G043_ETH0_CLK_CHI 56
+#define R9A07G043_ETH1_CLK_AXI 57
+#define R9A07G043_ETH1_CLK_CHI 58
+#define R9A07G043_I2C0_PCLK 59
+#define R9A07G043_I2C1_PCLK 60
+#define R9A07G043_I2C2_PCLK 61
+#define R9A07G043_I2C3_PCLK 62
+#define R9A07G043_SCIF0_CLK_PCK 63
+#define R9A07G043_SCIF1_CLK_PCK 64
+#define R9A07G043_SCIF2_CLK_PCK 65
+#define R9A07G043_SCIF3_CLK_PCK 66
+#define R9A07G043_SCIF4_CLK_PCK 67
+#define R9A07G043_SCI0_CLKP 68
+#define R9A07G043_SCI1_CLKP 69
+#define R9A07G043_IRDA_CLKP 70
+#define R9A07G043_RSPI0_CLKB 71
+#define R9A07G043_RSPI1_CLKB 72
+#define R9A07G043_RSPI2_CLKB 73
+#define R9A07G043_CANFD_PCLK 74
+#define R9A07G043_GPIO_HCLK 75
+#define R9A07G043_ADC_ADCLK 76
+#define R9A07G043_ADC_PCLK 77
+#define R9A07G043_TSU_PCLK 78
+#define R9A07G043_NCEPLDM_DM_CLK 79 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_ACLK 80 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_TCK 81 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_ACLK 82 /* RZ/Five Only */
+#define R9A07G043_NCEPLIC_ACLK 83 /* RZ/Five Only */
+#define R9A07G043_AX45MP_CORE0_CLK 84 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ACLK 85 /* RZ/Five Only */
+#define R9A07G043_IAX45_CLK 86 /* RZ/Five Only */
+#define R9A07G043_IAX45_PCLK 87 /* RZ/Five Only */
+
+/* R9A07G043 Resets */
+#define R9A07G043_CA55_RST_1_0 0 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_1_1 1 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_3_0 2 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_3_1 3 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_4 4 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_5 5 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_6 6 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_7 7 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_8 8 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_9 9 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_10 10 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_11 11 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_12 12 /* RZ/G2UL Only */
+#define R9A07G043_GIC600_GICRESET_N 13 /* RZ/G2UL Only */
+#define R9A07G043_GIC600_DBG_GICRESET_N 14 /* RZ/G2UL Only */
+#define R9A07G043_IA55_RESETN 15 /* RZ/G2UL Only */
+#define R9A07G043_MHU_RESETN 16 /* RZ/G2UL Only */
+#define R9A07G043_DMAC_ARESETN 17
+#define R9A07G043_DMAC_RST_ASYNC 18
+#define R9A07G043_SYC_RESETN 19
+#define R9A07G043_OSTM0_PRESETZ 20
+#define R9A07G043_OSTM1_PRESETZ 21
+#define R9A07G043_OSTM2_PRESETZ 22
+#define R9A07G043_MTU_X_PRESET_MTU3 23
+#define R9A07G043_POE3_RST_M_REG 24
+#define R9A07G043_WDT0_PRESETN 25
+#define R9A07G043_WDT2_PRESETN 26 /* RZ/G2UL Only */
+#define R9A07G043_SPI_RST 27
+#define R9A07G043_SDHI0_IXRST 28
+#define R9A07G043_SDHI1_IXRST 29
+#define R9A07G043_ISU_ARESETN 30 /* RZ/G2UL Only */
+#define R9A07G043_ISU_PRESETN 31 /* RZ/G2UL Only */
+#define R9A07G043_CRU_CMN_RSTB 32 /* RZ/G2UL Only */
+#define R9A07G043_CRU_PRESETN 33 /* RZ/G2UL Only */
+#define R9A07G043_CRU_ARESETN 34 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_RESET_N 35 /* RZ/G2UL Only */
+#define R9A07G043_SSI0_RST_M2_REG 36
+#define R9A07G043_SSI1_RST_M2_REG 37
+#define R9A07G043_SSI2_RST_M2_REG 38
+#define R9A07G043_SSI3_RST_M2_REG 39
+#define R9A07G043_SRC_RST 40 /* RZ/G2UL Only */
+#define R9A07G043_USB_U2H0_HRESETN 41
+#define R9A07G043_USB_U2H1_HRESETN 42
+#define R9A07G043_USB_U2P_EXL_SYSRST 43
+#define R9A07G043_USB_PRESETN 44
+#define R9A07G043_ETH0_RST_HW_N 45
+#define R9A07G043_ETH1_RST_HW_N 46
+#define R9A07G043_I2C0_MRST 47
+#define R9A07G043_I2C1_MRST 48
+#define R9A07G043_I2C2_MRST 49
+#define R9A07G043_I2C3_MRST 50
+#define R9A07G043_SCIF0_RST_SYSTEM_N 51
+#define R9A07G043_SCIF1_RST_SYSTEM_N 52
+#define R9A07G043_SCIF2_RST_SYSTEM_N 53
+#define R9A07G043_SCIF3_RST_SYSTEM_N 54
+#define R9A07G043_SCIF4_RST_SYSTEM_N 55
+#define R9A07G043_SCI0_RST 56
+#define R9A07G043_SCI1_RST 57
+#define R9A07G043_IRDA_RST 58
+#define R9A07G043_RSPI0_RST 59
+#define R9A07G043_RSPI1_RST 60
+#define R9A07G043_RSPI2_RST 61
+#define R9A07G043_CANFD_RSTP_N 62
+#define R9A07G043_CANFD_RSTC_N 63
+#define R9A07G043_GPIO_RSTN 64
+#define R9A07G043_GPIO_PORT_RESETN 65
+#define R9A07G043_GPIO_SPARE_RESETN 66
+#define R9A07G043_ADC_PRESETN 67
+#define R9A07G043_ADC_ADRST_N 68
+#define R9A07G043_TSU_PRESETN 69
+#define R9A07G043_NCEPLDM_DTM_PWR_RST_N 70 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_ARESETN 71 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_POR_RSTN 72 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_ARESETN 73 /* RZ/Five Only */
+#define R9A07G043_NCEPLIC_ARESETN 74 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ARESETNM 75 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ARESETNS 76 /* RZ/Five Only */
+#define R9A07G043_AX45MP_L2_RESETN 77 /* RZ/Five Only */
+#define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */
+#define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */
+
+
+#endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h
new file mode 100644
index 000000000000..0bb17ff1a01a
--- /dev/null
+++ b/include/dt-bindings/clock/r9a07g044-cpg.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A07G044 CPG Core Clocks */
+#define R9A07G044_CLK_I 0
+#define R9A07G044_CLK_I2 1
+#define R9A07G044_CLK_G 2
+#define R9A07G044_CLK_S0 3
+#define R9A07G044_CLK_S1 4
+#define R9A07G044_CLK_SPI0 5
+#define R9A07G044_CLK_SPI1 6
+#define R9A07G044_CLK_SD0 7
+#define R9A07G044_CLK_SD1 8
+#define R9A07G044_CLK_M0 9
+#define R9A07G044_CLK_M1 10
+#define R9A07G044_CLK_M2 11
+#define R9A07G044_CLK_M3 12
+#define R9A07G044_CLK_M4 13
+#define R9A07G044_CLK_HP 14
+#define R9A07G044_CLK_TSU 15
+#define R9A07G044_CLK_ZT 16
+#define R9A07G044_CLK_P0 17
+#define R9A07G044_CLK_P1 18
+#define R9A07G044_CLK_P2 19
+#define R9A07G044_CLK_AT 20
+#define R9A07G044_OSCCLK 21
+#define R9A07G044_CLK_P0_DIV2 22
+
+/* R9A07G044 Module Clocks */
+#define R9A07G044_CA55_SCLK 0
+#define R9A07G044_CA55_PCLK 1
+#define R9A07G044_CA55_ATCLK 2
+#define R9A07G044_CA55_GICCLK 3
+#define R9A07G044_CA55_PERICLK 4
+#define R9A07G044_CA55_ACLK 5
+#define R9A07G044_CA55_TSCLK 6
+#define R9A07G044_GIC600_GICCLK 7
+#define R9A07G044_IA55_CLK 8
+#define R9A07G044_IA55_PCLK 9
+#define R9A07G044_MHU_PCLK 10
+#define R9A07G044_SYC_CNT_CLK 11
+#define R9A07G044_DMAC_ACLK 12
+#define R9A07G044_DMAC_PCLK 13
+#define R9A07G044_OSTM0_PCLK 14
+#define R9A07G044_OSTM1_PCLK 15
+#define R9A07G044_OSTM2_PCLK 16
+#define R9A07G044_MTU_X_MCK_MTU3 17
+#define R9A07G044_POE3_CLKM_POE 18
+#define R9A07G044_GPT_PCLK 19
+#define R9A07G044_POEG_A_CLKP 20
+#define R9A07G044_POEG_B_CLKP 21
+#define R9A07G044_POEG_C_CLKP 22
+#define R9A07G044_POEG_D_CLKP 23
+#define R9A07G044_WDT0_PCLK 24
+#define R9A07G044_WDT0_CLK 25
+#define R9A07G044_WDT1_PCLK 26
+#define R9A07G044_WDT1_CLK 27
+#define R9A07G044_WDT2_PCLK 28
+#define R9A07G044_WDT2_CLK 29
+#define R9A07G044_SPI_CLK2 30
+#define R9A07G044_SPI_CLK 31
+#define R9A07G044_SDHI0_IMCLK 32
+#define R9A07G044_SDHI0_IMCLK2 33
+#define R9A07G044_SDHI0_CLK_HS 34
+#define R9A07G044_SDHI0_ACLK 35
+#define R9A07G044_SDHI1_IMCLK 36
+#define R9A07G044_SDHI1_IMCLK2 37
+#define R9A07G044_SDHI1_CLK_HS 38
+#define R9A07G044_SDHI1_ACLK 39
+#define R9A07G044_GPU_CLK 40
+#define R9A07G044_GPU_AXI_CLK 41
+#define R9A07G044_GPU_ACE_CLK 42
+#define R9A07G044_ISU_ACLK 43
+#define R9A07G044_ISU_PCLK 44
+#define R9A07G044_H264_CLK_A 45
+#define R9A07G044_H264_CLK_P 46
+#define R9A07G044_CRU_SYSCLK 47
+#define R9A07G044_CRU_VCLK 48
+#define R9A07G044_CRU_PCLK 49
+#define R9A07G044_CRU_ACLK 50
+#define R9A07G044_MIPI_DSI_PLLCLK 51
+#define R9A07G044_MIPI_DSI_SYSCLK 52
+#define R9A07G044_MIPI_DSI_ACLK 53
+#define R9A07G044_MIPI_DSI_PCLK 54
+#define R9A07G044_MIPI_DSI_VCLK 55
+#define R9A07G044_MIPI_DSI_LPCLK 56
+#define R9A07G044_LCDC_CLK_A 57
+#define R9A07G044_LCDC_CLK_P 58
+#define R9A07G044_LCDC_CLK_D 59
+#define R9A07G044_SSI0_PCLK2 60
+#define R9A07G044_SSI0_PCLK_SFR 61
+#define R9A07G044_SSI1_PCLK2 62
+#define R9A07G044_SSI1_PCLK_SFR 63
+#define R9A07G044_SSI2_PCLK2 64
+#define R9A07G044_SSI2_PCLK_SFR 65
+#define R9A07G044_SSI3_PCLK2 66
+#define R9A07G044_SSI3_PCLK_SFR 67
+#define R9A07G044_SRC_CLKP 68
+#define R9A07G044_USB_U2H0_HCLK 69
+#define R9A07G044_USB_U2H1_HCLK 70
+#define R9A07G044_USB_U2P_EXR_CPUCLK 71
+#define R9A07G044_USB_PCLK 72
+#define R9A07G044_ETH0_CLK_AXI 73
+#define R9A07G044_ETH0_CLK_CHI 74
+#define R9A07G044_ETH1_CLK_AXI 75
+#define R9A07G044_ETH1_CLK_CHI 76
+#define R9A07G044_I2C0_PCLK 77
+#define R9A07G044_I2C1_PCLK 78
+#define R9A07G044_I2C2_PCLK 79
+#define R9A07G044_I2C3_PCLK 80
+#define R9A07G044_SCIF0_CLK_PCK 81
+#define R9A07G044_SCIF1_CLK_PCK 82
+#define R9A07G044_SCIF2_CLK_PCK 83
+#define R9A07G044_SCIF3_CLK_PCK 84
+#define R9A07G044_SCIF4_CLK_PCK 85
+#define R9A07G044_SCI0_CLKP 86
+#define R9A07G044_SCI1_CLKP 87
+#define R9A07G044_IRDA_CLKP 88
+#define R9A07G044_RSPI0_CLKB 89
+#define R9A07G044_RSPI1_CLKB 90
+#define R9A07G044_RSPI2_CLKB 91
+#define R9A07G044_CANFD_PCLK 92
+#define R9A07G044_GPIO_HCLK 93
+#define R9A07G044_ADC_ADCLK 94
+#define R9A07G044_ADC_PCLK 95
+#define R9A07G044_TSU_PCLK 96
+
+/* R9A07G044 Resets */
+#define R9A07G044_CA55_RST_1_0 0
+#define R9A07G044_CA55_RST_1_1 1
+#define R9A07G044_CA55_RST_3_0 2
+#define R9A07G044_CA55_RST_3_1 3
+#define R9A07G044_CA55_RST_4 4
+#define R9A07G044_CA55_RST_5 5
+#define R9A07G044_CA55_RST_6 6
+#define R9A07G044_CA55_RST_7 7
+#define R9A07G044_CA55_RST_8 8
+#define R9A07G044_CA55_RST_9 9
+#define R9A07G044_CA55_RST_10 10
+#define R9A07G044_CA55_RST_11 11
+#define R9A07G044_CA55_RST_12 12
+#define R9A07G044_GIC600_GICRESET_N 13
+#define R9A07G044_GIC600_DBG_GICRESET_N 14
+#define R9A07G044_IA55_RESETN 15
+#define R9A07G044_MHU_RESETN 16
+#define R9A07G044_DMAC_ARESETN 17
+#define R9A07G044_DMAC_RST_ASYNC 18
+#define R9A07G044_SYC_RESETN 19
+#define R9A07G044_OSTM0_PRESETZ 20
+#define R9A07G044_OSTM1_PRESETZ 21
+#define R9A07G044_OSTM2_PRESETZ 22
+#define R9A07G044_MTU_X_PRESET_MTU3 23
+#define R9A07G044_POE3_RST_M_REG 24
+#define R9A07G044_GPT_RST_C 25
+#define R9A07G044_POEG_A_RST 26
+#define R9A07G044_POEG_B_RST 27
+#define R9A07G044_POEG_C_RST 28
+#define R9A07G044_POEG_D_RST 29
+#define R9A07G044_WDT0_PRESETN 30
+#define R9A07G044_WDT1_PRESETN 31
+#define R9A07G044_WDT2_PRESETN 32
+#define R9A07G044_SPI_RST 33
+#define R9A07G044_SDHI0_IXRST 34
+#define R9A07G044_SDHI1_IXRST 35
+#define R9A07G044_GPU_RESETN 36
+#define R9A07G044_GPU_AXI_RESETN 37
+#define R9A07G044_GPU_ACE_RESETN 38
+#define R9A07G044_ISU_ARESETN 39
+#define R9A07G044_ISU_PRESETN 40
+#define R9A07G044_H264_X_RESET_VCP 41
+#define R9A07G044_H264_CP_PRESET_P 42
+#define R9A07G044_CRU_CMN_RSTB 43
+#define R9A07G044_CRU_PRESETN 44
+#define R9A07G044_CRU_ARESETN 45
+#define R9A07G044_MIPI_DSI_CMN_RSTB 46
+#define R9A07G044_MIPI_DSI_ARESET_N 47
+#define R9A07G044_MIPI_DSI_PRESET_N 48
+#define R9A07G044_LCDC_RESET_N 49
+#define R9A07G044_SSI0_RST_M2_REG 50
+#define R9A07G044_SSI1_RST_M2_REG 51
+#define R9A07G044_SSI2_RST_M2_REG 52
+#define R9A07G044_SSI3_RST_M2_REG 53
+#define R9A07G044_SRC_RST 54
+#define R9A07G044_USB_U2H0_HRESETN 55
+#define R9A07G044_USB_U2H1_HRESETN 56
+#define R9A07G044_USB_U2P_EXL_SYSRST 57
+#define R9A07G044_USB_PRESETN 58
+#define R9A07G044_ETH0_RST_HW_N 59
+#define R9A07G044_ETH1_RST_HW_N 60
+#define R9A07G044_I2C0_MRST 61
+#define R9A07G044_I2C1_MRST 62
+#define R9A07G044_I2C2_MRST 63
+#define R9A07G044_I2C3_MRST 64
+#define R9A07G044_SCIF0_RST_SYSTEM_N 65
+#define R9A07G044_SCIF1_RST_SYSTEM_N 66
+#define R9A07G044_SCIF2_RST_SYSTEM_N 67
+#define R9A07G044_SCIF3_RST_SYSTEM_N 68
+#define R9A07G044_SCIF4_RST_SYSTEM_N 69
+#define R9A07G044_SCI0_RST 70
+#define R9A07G044_SCI1_RST 71
+#define R9A07G044_IRDA_RST 72
+#define R9A07G044_RSPI0_RST 73
+#define R9A07G044_RSPI1_RST 74
+#define R9A07G044_RSPI2_RST 75
+#define R9A07G044_CANFD_RSTP_N 76
+#define R9A07G044_CANFD_RSTC_N 77
+#define R9A07G044_GPIO_RSTN 78
+#define R9A07G044_GPIO_PORT_RESETN 79
+#define R9A07G044_GPIO_SPARE_RESETN 80
+#define R9A07G044_ADC_PRESETN 81
+#define R9A07G044_ADC_ADRST_N 82
+#define R9A07G044_TSU_PRESETN 83
+
+#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g054-cpg.h b/include/dt-bindings/clock/r9a07g054-cpg.h
new file mode 100644
index 000000000000..43f4dbda872c
--- /dev/null
+++ b/include/dt-bindings/clock/r9a07g054-cpg.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A07G054 CPG Core Clocks */
+#define R9A07G054_CLK_I 0
+#define R9A07G054_CLK_I2 1
+#define R9A07G054_CLK_G 2
+#define R9A07G054_CLK_S0 3
+#define R9A07G054_CLK_S1 4
+#define R9A07G054_CLK_SPI0 5
+#define R9A07G054_CLK_SPI1 6
+#define R9A07G054_CLK_SD0 7
+#define R9A07G054_CLK_SD1 8
+#define R9A07G054_CLK_M0 9
+#define R9A07G054_CLK_M1 10
+#define R9A07G054_CLK_M2 11
+#define R9A07G054_CLK_M3 12
+#define R9A07G054_CLK_M4 13
+#define R9A07G054_CLK_HP 14
+#define R9A07G054_CLK_TSU 15
+#define R9A07G054_CLK_ZT 16
+#define R9A07G054_CLK_P0 17
+#define R9A07G054_CLK_P1 18
+#define R9A07G054_CLK_P2 19
+#define R9A07G054_CLK_AT 20
+#define R9A07G054_OSCCLK 21
+#define R9A07G054_CLK_P0_DIV2 22
+#define R9A07G054_CLK_DRP_M 23
+#define R9A07G054_CLK_DRP_D 24
+#define R9A07G054_CLK_DRP_A 25
+
+/* R9A07G054 Module Clocks */
+#define R9A07G054_CA55_SCLK 0
+#define R9A07G054_CA55_PCLK 1
+#define R9A07G054_CA55_ATCLK 2
+#define R9A07G054_CA55_GICCLK 3
+#define R9A07G054_CA55_PERICLK 4
+#define R9A07G054_CA55_ACLK 5
+#define R9A07G054_CA55_TSCLK 6
+#define R9A07G054_GIC600_GICCLK 7
+#define R9A07G054_IA55_CLK 8
+#define R9A07G054_IA55_PCLK 9
+#define R9A07G054_MHU_PCLK 10
+#define R9A07G054_SYC_CNT_CLK 11
+#define R9A07G054_DMAC_ACLK 12
+#define R9A07G054_DMAC_PCLK 13
+#define R9A07G054_OSTM0_PCLK 14
+#define R9A07G054_OSTM1_PCLK 15
+#define R9A07G054_OSTM2_PCLK 16
+#define R9A07G054_MTU_X_MCK_MTU3 17
+#define R9A07G054_POE3_CLKM_POE 18
+#define R9A07G054_GPT_PCLK 19
+#define R9A07G054_POEG_A_CLKP 20
+#define R9A07G054_POEG_B_CLKP 21
+#define R9A07G054_POEG_C_CLKP 22
+#define R9A07G054_POEG_D_CLKP 23
+#define R9A07G054_WDT0_PCLK 24
+#define R9A07G054_WDT0_CLK 25
+#define R9A07G054_WDT1_PCLK 26
+#define R9A07G054_WDT1_CLK 27
+#define R9A07G054_WDT2_PCLK 28
+#define R9A07G054_WDT2_CLK 29
+#define R9A07G054_SPI_CLK2 30
+#define R9A07G054_SPI_CLK 31
+#define R9A07G054_SDHI0_IMCLK 32
+#define R9A07G054_SDHI0_IMCLK2 33
+#define R9A07G054_SDHI0_CLK_HS 34
+#define R9A07G054_SDHI0_ACLK 35
+#define R9A07G054_SDHI1_IMCLK 36
+#define R9A07G054_SDHI1_IMCLK2 37
+#define R9A07G054_SDHI1_CLK_HS 38
+#define R9A07G054_SDHI1_ACLK 39
+#define R9A07G054_GPU_CLK 40
+#define R9A07G054_GPU_AXI_CLK 41
+#define R9A07G054_GPU_ACE_CLK 42
+#define R9A07G054_ISU_ACLK 43
+#define R9A07G054_ISU_PCLK 44
+#define R9A07G054_H264_CLK_A 45
+#define R9A07G054_H264_CLK_P 46
+#define R9A07G054_CRU_SYSCLK 47
+#define R9A07G054_CRU_VCLK 48
+#define R9A07G054_CRU_PCLK 49
+#define R9A07G054_CRU_ACLK 50
+#define R9A07G054_MIPI_DSI_PLLCLK 51
+#define R9A07G054_MIPI_DSI_SYSCLK 52
+#define R9A07G054_MIPI_DSI_ACLK 53
+#define R9A07G054_MIPI_DSI_PCLK 54
+#define R9A07G054_MIPI_DSI_VCLK 55
+#define R9A07G054_MIPI_DSI_LPCLK 56
+#define R9A07G054_LCDC_CLK_A 57
+#define R9A07G054_LCDC_CLK_P 58
+#define R9A07G054_LCDC_CLK_D 59
+#define R9A07G054_SSI0_PCLK2 60
+#define R9A07G054_SSI0_PCLK_SFR 61
+#define R9A07G054_SSI1_PCLK2 62
+#define R9A07G054_SSI1_PCLK_SFR 63
+#define R9A07G054_SSI2_PCLK2 64
+#define R9A07G054_SSI2_PCLK_SFR 65
+#define R9A07G054_SSI3_PCLK2 66
+#define R9A07G054_SSI3_PCLK_SFR 67
+#define R9A07G054_SRC_CLKP 68
+#define R9A07G054_USB_U2H0_HCLK 69
+#define R9A07G054_USB_U2H1_HCLK 70
+#define R9A07G054_USB_U2P_EXR_CPUCLK 71
+#define R9A07G054_USB_PCLK 72
+#define R9A07G054_ETH0_CLK_AXI 73
+#define R9A07G054_ETH0_CLK_CHI 74
+#define R9A07G054_ETH1_CLK_AXI 75
+#define R9A07G054_ETH1_CLK_CHI 76
+#define R9A07G054_I2C0_PCLK 77
+#define R9A07G054_I2C1_PCLK 78
+#define R9A07G054_I2C2_PCLK 79
+#define R9A07G054_I2C3_PCLK 80
+#define R9A07G054_SCIF0_CLK_PCK 81
+#define R9A07G054_SCIF1_CLK_PCK 82
+#define R9A07G054_SCIF2_CLK_PCK 83
+#define R9A07G054_SCIF3_CLK_PCK 84
+#define R9A07G054_SCIF4_CLK_PCK 85
+#define R9A07G054_SCI0_CLKP 86
+#define R9A07G054_SCI1_CLKP 87
+#define R9A07G054_IRDA_CLKP 88
+#define R9A07G054_RSPI0_CLKB 89
+#define R9A07G054_RSPI1_CLKB 90
+#define R9A07G054_RSPI2_CLKB 91
+#define R9A07G054_CANFD_PCLK 92
+#define R9A07G054_GPIO_HCLK 93
+#define R9A07G054_ADC_ADCLK 94
+#define R9A07G054_ADC_PCLK 95
+#define R9A07G054_TSU_PCLK 96
+#define R9A07G054_STPAI_INITCLK 97
+#define R9A07G054_STPAI_ACLK 98
+#define R9A07G054_STPAI_MCLK 99
+#define R9A07G054_STPAI_DCLKIN 100
+#define R9A07G054_STPAI_ACLK_DRP 101
+
+/* R9A07G054 Resets */
+#define R9A07G054_CA55_RST_1_0 0
+#define R9A07G054_CA55_RST_1_1 1
+#define R9A07G054_CA55_RST_3_0 2
+#define R9A07G054_CA55_RST_3_1 3
+#define R9A07G054_CA55_RST_4 4
+#define R9A07G054_CA55_RST_5 5
+#define R9A07G054_CA55_RST_6 6
+#define R9A07G054_CA55_RST_7 7
+#define R9A07G054_CA55_RST_8 8
+#define R9A07G054_CA55_RST_9 9
+#define R9A07G054_CA55_RST_10 10
+#define R9A07G054_CA55_RST_11 11
+#define R9A07G054_CA55_RST_12 12
+#define R9A07G054_GIC600_GICRESET_N 13
+#define R9A07G054_GIC600_DBG_GICRESET_N 14
+#define R9A07G054_IA55_RESETN 15
+#define R9A07G054_MHU_RESETN 16
+#define R9A07G054_DMAC_ARESETN 17
+#define R9A07G054_DMAC_RST_ASYNC 18
+#define R9A07G054_SYC_RESETN 19
+#define R9A07G054_OSTM0_PRESETZ 20
+#define R9A07G054_OSTM1_PRESETZ 21
+#define R9A07G054_OSTM2_PRESETZ 22
+#define R9A07G054_MTU_X_PRESET_MTU3 23
+#define R9A07G054_POE3_RST_M_REG 24
+#define R9A07G054_GPT_RST_C 25
+#define R9A07G054_POEG_A_RST 26
+#define R9A07G054_POEG_B_RST 27
+#define R9A07G054_POEG_C_RST 28
+#define R9A07G054_POEG_D_RST 29
+#define R9A07G054_WDT0_PRESETN 30
+#define R9A07G054_WDT1_PRESETN 31
+#define R9A07G054_WDT2_PRESETN 32
+#define R9A07G054_SPI_RST 33
+#define R9A07G054_SDHI0_IXRST 34
+#define R9A07G054_SDHI1_IXRST 35
+#define R9A07G054_GPU_RESETN 36
+#define R9A07G054_GPU_AXI_RESETN 37
+#define R9A07G054_GPU_ACE_RESETN 38
+#define R9A07G054_ISU_ARESETN 39
+#define R9A07G054_ISU_PRESETN 40
+#define R9A07G054_H264_X_RESET_VCP 41
+#define R9A07G054_H264_CP_PRESET_P 42
+#define R9A07G054_CRU_CMN_RSTB 43
+#define R9A07G054_CRU_PRESETN 44
+#define R9A07G054_CRU_ARESETN 45
+#define R9A07G054_MIPI_DSI_CMN_RSTB 46
+#define R9A07G054_MIPI_DSI_ARESET_N 47
+#define R9A07G054_MIPI_DSI_PRESET_N 48
+#define R9A07G054_LCDC_RESET_N 49
+#define R9A07G054_SSI0_RST_M2_REG 50
+#define R9A07G054_SSI1_RST_M2_REG 51
+#define R9A07G054_SSI2_RST_M2_REG 52
+#define R9A07G054_SSI3_RST_M2_REG 53
+#define R9A07G054_SRC_RST 54
+#define R9A07G054_USB_U2H0_HRESETN 55
+#define R9A07G054_USB_U2H1_HRESETN 56
+#define R9A07G054_USB_U2P_EXL_SYSRST 57
+#define R9A07G054_USB_PRESETN 58
+#define R9A07G054_ETH0_RST_HW_N 59
+#define R9A07G054_ETH1_RST_HW_N 60
+#define R9A07G054_I2C0_MRST 61
+#define R9A07G054_I2C1_MRST 62
+#define R9A07G054_I2C2_MRST 63
+#define R9A07G054_I2C3_MRST 64
+#define R9A07G054_SCIF0_RST_SYSTEM_N 65
+#define R9A07G054_SCIF1_RST_SYSTEM_N 66
+#define R9A07G054_SCIF2_RST_SYSTEM_N 67
+#define R9A07G054_SCIF3_RST_SYSTEM_N 68
+#define R9A07G054_SCIF4_RST_SYSTEM_N 69
+#define R9A07G054_SCI0_RST 70
+#define R9A07G054_SCI1_RST 71
+#define R9A07G054_IRDA_RST 72
+#define R9A07G054_RSPI0_RST 73
+#define R9A07G054_RSPI1_RST 74
+#define R9A07G054_RSPI2_RST 75
+#define R9A07G054_CANFD_RSTP_N 76
+#define R9A07G054_CANFD_RSTC_N 77
+#define R9A07G054_GPIO_RSTN 78
+#define R9A07G054_GPIO_PORT_RESETN 79
+#define R9A07G054_GPIO_SPARE_RESETN 80
+#define R9A07G054_ADC_PRESETN 81
+#define R9A07G054_ADC_ADRST_N 82
+#define R9A07G054_TSU_PRESETN 83
+#define R9A07G054_STPAI_ARESETN 84
+
+#endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a09g011-cpg.h b/include/dt-bindings/clock/r9a09g011-cpg.h
new file mode 100644
index 000000000000..41dd585d7115
--- /dev/null
+++ b/include/dt-bindings/clock/r9a09g011-cpg.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Module Clocks */
+#define R9A09G011_SYS_CLK 0
+#define R9A09G011_PFC_PCLK 1
+#define R9A09G011_PMC_CORE_CLOCK 2
+#define R9A09G011_GIC_CLK 3
+#define R9A09G011_RAMA_ACLK 4
+#define R9A09G011_ROMA_ACLK 5
+#define R9A09G011_SEC_ACLK 6
+#define R9A09G011_SEC_PCLK 7
+#define R9A09G011_SEC_TCLK 8
+#define R9A09G011_DMAA_ACLK 9
+#define R9A09G011_TSU0_PCLK 10
+#define R9A09G011_TSU1_PCLK 11
+
+#define R9A09G011_CST_TRACECLK 12
+#define R9A09G011_CST_SB_CLK 13
+#define R9A09G011_CST_AHB_CLK 14
+#define R9A09G011_CST_ATB_SB_CLK 15
+#define R9A09G011_CST_TS_SB_CLK 16
+
+#define R9A09G011_SDI0_ACLK 17
+#define R9A09G011_SDI0_IMCLK 18
+#define R9A09G011_SDI0_IMCLK2 19
+#define R9A09G011_SDI0_CLK_HS 20
+#define R9A09G011_SDI1_ACLK 21
+#define R9A09G011_SDI1_IMCLK 22
+#define R9A09G011_SDI1_IMCLK2 23
+#define R9A09G011_SDI1_CLK_HS 24
+#define R9A09G011_EMM_ACLK 25
+#define R9A09G011_EMM_IMCLK 26
+#define R9A09G011_EMM_IMCLK2 27
+#define R9A09G011_EMM_CLK_HS 28
+#define R9A09G011_NFI_ACLK 29
+#define R9A09G011_NFI_NF_CLK 30
+
+#define R9A09G011_PCI_ACLK 31
+#define R9A09G011_PCI_CLK_PMU 32
+#define R9A09G011_PCI_APB_CLK 33
+#define R9A09G011_USB_ACLK_H 34
+#define R9A09G011_USB_ACLK_P 35
+#define R9A09G011_USB_PCLK 36
+#define R9A09G011_ETH0_CLK_AXI 37
+#define R9A09G011_ETH0_CLK_CHI 38
+#define R9A09G011_ETH0_GPTP_EXT 39
+
+#define R9A09G011_SDT_CLK 40
+#define R9A09G011_SDT_CLKAPB 41
+#define R9A09G011_SDT_CLK48 42
+#define R9A09G011_GRP_CLK 43
+#define R9A09G011_CIF_P0_CLK 44
+#define R9A09G011_CIF_P1_CLK 45
+#define R9A09G011_CIF_APB_CLK 46
+#define R9A09G011_DCI_CLKAXI 47
+#define R9A09G011_DCI_CLKAPB 48
+#define R9A09G011_DCI_CLKDCI2 49
+
+#define R9A09G011_HMI_PCLK 50
+#define R9A09G011_LCI_PCLK 51
+#define R9A09G011_LCI_ACLK 52
+#define R9A09G011_LCI_VCLK 53
+#define R9A09G011_LCI_LPCLK 54
+
+#define R9A09G011_AUI_CLK 55
+#define R9A09G011_AUI_CLKAXI 56
+#define R9A09G011_AUI_CLKAPB 57
+#define R9A09G011_AUMCLK 58
+#define R9A09G011_GMCLK0 59
+#define R9A09G011_GMCLK1 60
+#define R9A09G011_MTR_CLK0 61
+#define R9A09G011_MTR_CLK1 62
+#define R9A09G011_MTR_CLKAPB 63
+#define R9A09G011_GFT_CLK 64
+#define R9A09G011_GFT_CLKAPB 65
+#define R9A09G011_GFT_MCLK 66
+
+#define R9A09G011_ATGA_CLK 67
+#define R9A09G011_ATGA_CLKAPB 68
+#define R9A09G011_ATGB_CLK 69
+#define R9A09G011_ATGB_CLKAPB 70
+#define R9A09G011_SYC_CNT_CLK 71
+
+#define R9A09G011_CPERI_GRPA_PCLK 72
+#define R9A09G011_TIM0_CLK 73
+#define R9A09G011_TIM1_CLK 74
+#define R9A09G011_TIM2_CLK 75
+#define R9A09G011_TIM3_CLK 76
+#define R9A09G011_TIM4_CLK 77
+#define R9A09G011_TIM5_CLK 78
+#define R9A09G011_TIM6_CLK 79
+#define R9A09G011_TIM7_CLK 80
+#define R9A09G011_IIC_PCLK0 81
+
+#define R9A09G011_CPERI_GRPB_PCLK 82
+#define R9A09G011_TIM8_CLK 83
+#define R9A09G011_TIM9_CLK 84
+#define R9A09G011_TIM10_CLK 85
+#define R9A09G011_TIM11_CLK 86
+#define R9A09G011_TIM12_CLK 87
+#define R9A09G011_TIM13_CLK 88
+#define R9A09G011_TIM14_CLK 89
+#define R9A09G011_TIM15_CLK 90
+#define R9A09G011_IIC_PCLK1 91
+
+#define R9A09G011_CPERI_GRPC_PCLK 92
+#define R9A09G011_TIM16_CLK 93
+#define R9A09G011_TIM17_CLK 94
+#define R9A09G011_TIM18_CLK 95
+#define R9A09G011_TIM19_CLK 96
+#define R9A09G011_TIM20_CLK 97
+#define R9A09G011_TIM21_CLK 98
+#define R9A09G011_TIM22_CLK 99
+#define R9A09G011_TIM23_CLK 100
+#define R9A09G011_WDT0_PCLK 101
+#define R9A09G011_WDT0_CLK 102
+#define R9A09G011_WDT1_PCLK 103
+#define R9A09G011_WDT1_CLK 104
+
+#define R9A09G011_CPERI_GRPD_PCLK 105
+#define R9A09G011_TIM24_CLK 106
+#define R9A09G011_TIM25_CLK 107
+#define R9A09G011_TIM26_CLK 108
+#define R9A09G011_TIM27_CLK 109
+#define R9A09G011_TIM28_CLK 110
+#define R9A09G011_TIM29_CLK 111
+#define R9A09G011_TIM30_CLK 112
+#define R9A09G011_TIM31_CLK 113
+
+#define R9A09G011_CPERI_GRPE_PCLK 114
+#define R9A09G011_PWM0_CLK 115
+#define R9A09G011_PWM1_CLK 116
+#define R9A09G011_PWM2_CLK 117
+#define R9A09G011_PWM3_CLK 118
+#define R9A09G011_PWM4_CLK 119
+#define R9A09G011_PWM5_CLK 120
+#define R9A09G011_PWM6_CLK 121
+#define R9A09G011_PWM7_CLK 122
+
+#define R9A09G011_CPERI_GRPF_PCLK 123
+#define R9A09G011_PWM8_CLK 124
+#define R9A09G011_PWM9_CLK 125
+#define R9A09G011_PWM10_CLK 126
+#define R9A09G011_PWM11_CLK 127
+#define R9A09G011_PWM12_CLK 128
+#define R9A09G011_PWM13_CLK 129
+#define R9A09G011_PWM14_CLK 130
+#define R9A09G011_PWM15_CLK 131
+
+#define R9A09G011_CPERI_GRPG_PCLK 132
+#define R9A09G011_CPERI_GRPH_PCLK 133
+#define R9A09G011_URT_PCLK 134
+#define R9A09G011_URT0_CLK 135
+#define R9A09G011_URT1_CLK 136
+#define R9A09G011_CSI0_CLK 137
+#define R9A09G011_CSI1_CLK 138
+#define R9A09G011_CSI2_CLK 139
+#define R9A09G011_CSI3_CLK 140
+#define R9A09G011_CSI4_CLK 141
+#define R9A09G011_CSI5_CLK 142
+
+#define R9A09G011_ICB_ACLK1 143
+#define R9A09G011_ICB_GIC_CLK 144
+#define R9A09G011_ICB_MPCLK1 145
+#define R9A09G011_ICB_SPCLK1 146
+#define R9A09G011_ICB_CLK48 147
+#define R9A09G011_ICB_CLK48_2 148
+#define R9A09G011_ICB_CLK48_3 149
+#define R9A09G011_ICB_CLK48_4L 150
+#define R9A09G011_ICB_CLK48_4R 151
+#define R9A09G011_ICB_CLK48_5 152
+#define R9A09G011_ICB_CST_ATB_SB_CLK 153
+#define R9A09G011_ICB_CST_CS_CLK 154
+#define R9A09G011_ICB_CLK100_1 155
+#define R9A09G011_ICB_ETH0_CLK_AXI 156
+#define R9A09G011_ICB_DCI_CLKAXI 157
+#define R9A09G011_ICB_SYC_CNT_CLK 158
+
+#define R9A09G011_ICB_DRPA_ACLK 159
+#define R9A09G011_ICB_RFX_ACLK 160
+#define R9A09G011_ICB_RFX_PCLK5 161
+#define R9A09G011_ICB_MMC_ACLK 162
+
+#define R9A09G011_ICB_MPCLK3 163
+#define R9A09G011_ICB_CIMA_CLK 164
+#define R9A09G011_ICB_CIMB_CLK 165
+#define R9A09G011_ICB_BIMA_CLK 166
+#define R9A09G011_ICB_FCD_CLKAXI 167
+#define R9A09G011_ICB_VD_ACLK4 168
+#define R9A09G011_ICB_MPCLK4 169
+#define R9A09G011_ICB_VCD_PCLK4 170
+
+#define R9A09G011_CA53_CLK 171
+#define R9A09G011_CA53_ACLK 172
+#define R9A09G011_CA53_APCLK_DBG 173
+#define R9A09G011_CST_APB_CA53_CLK 174
+#define R9A09G011_CA53_ATCLK 175
+#define R9A09G011_CST_CS_CLK 176
+#define R9A09G011_CA53_TSCLK 177
+#define R9A09G011_CST_TS_CLK 178
+#define R9A09G011_CA53_APCLK_REG 179
+
+#define R9A09G011_DRPA_ACLK 180
+#define R9A09G011_DRPA_DCLK 181
+#define R9A09G011_DRPA_INITCLK 182
+
+#define R9A09G011_RAMB0_ACLK 183
+#define R9A09G011_RAMB1_ACLK 184
+#define R9A09G011_RAMB2_ACLK 185
+#define R9A09G011_RAMB3_ACLK 186
+
+#define R9A09G011_CIMA_CLKAPB 187
+#define R9A09G011_CIMA_CLK 188
+#define R9A09G011_CIMB_CLK 189
+#define R9A09G011_FAFA_CLK 190
+#define R9A09G011_STG_CLKAXI 191
+#define R9A09G011_STG_CLK0 192
+
+#define R9A09G011_BIMA_CLKAPB 193
+#define R9A09G011_BIMA_CLK 194
+#define R9A09G011_FAFB_CLK 195
+#define R9A09G011_FCD_CLK 196
+#define R9A09G011_FCD_CLKAXI 197
+
+#define R9A09G011_RIM_CLK 198
+#define R9A09G011_VCD_ACLK 199
+#define R9A09G011_VCD_PCLK 200
+#define R9A09G011_JPG0_CLK 201
+#define R9A09G011_JPG0_ACLK 202
+
+#define R9A09G011_MMC_CORE_DDRC_CLK 203
+#define R9A09G011_MMC_ACLK 204
+#define R9A09G011_MMC_PCLK 205
+#define R9A09G011_DDI_APBCLK 206
+
+/* Resets */
+#define R9A09G011_SYS_RST_N 0
+#define R9A09G011_PFC_PRESETN 1
+#define R9A09G011_RAMA_ARESETN 2
+#define R9A09G011_ROM_ARESETN 3
+#define R9A09G011_DMAA_ARESETN 4
+#define R9A09G011_SEC_ARESETN 5
+#define R9A09G011_SEC_PRESETN 6
+#define R9A09G011_SEC_RSTB 7
+#define R9A09G011_TSU0_RESETN 8
+#define R9A09G011_TSU1_RESETN 9
+#define R9A09G011_PMC_RESET_N 10
+
+#define R9A09G011_CST_NTRST 11
+#define R9A09G011_CST_NPOTRST 12
+#define R9A09G011_CST_NTRST2 13
+#define R9A09G011_CST_CS_RESETN 14
+#define R9A09G011_CST_TS_RESETN 15
+#define R9A09G011_CST_TRESETN 16
+#define R9A09G011_CST_SB_RESETN 17
+#define R9A09G011_CST_AHB_RESETN 18
+#define R9A09G011_CST_TS_SB_RESETN 19
+#define R9A09G011_CST_APB_CA53_RESETN 20
+#define R9A09G011_CST_ATB_SB_RESETN 21
+
+#define R9A09G011_SDI0_IXRST 22
+#define R9A09G011_SDI1_IXRST 23
+#define R9A09G011_EMM_IXRST 24
+#define R9A09G011_NFI_MARESETN 25
+#define R9A09G011_NFI_REG_RST_N 26
+#define R9A09G011_USB_PRESET_N 27
+#define R9A09G011_USB_DRD_RESET 28
+#define R9A09G011_USB_ARESETN_P 29
+#define R9A09G011_USB_ARESETN_H 30
+#define R9A09G011_ETH0_RST_HW_N 31
+#define R9A09G011_PCI_ARESETN 32
+
+#define R9A09G011_SDT_RSTSYSAX 33
+#define R9A09G011_GRP_RESETN 34
+#define R9A09G011_CIF_RST_N 35
+#define R9A09G011_DCU_RSTSYSAX 36
+#define R9A09G011_HMI_RST_N 37
+#define R9A09G011_HMI_PRESETN 38
+#define R9A09G011_LCI_PRESETN 39
+#define R9A09G011_LCI_ARESETN 40
+
+#define R9A09G011_AUI_RSTSYSAX 41
+#define R9A09G011_MTR_RSTSYSAX 42
+#define R9A09G011_GFT_RSTSYSAX 43
+#define R9A09G011_ATGA_RSTSYSAX 44
+#define R9A09G011_ATGB_RSTSYSAX 45
+#define R9A09G011_SYC_RST_N 46
+
+#define R9A09G011_TIM_GPA_PRESETN 47
+#define R9A09G011_TIM_GPB_PRESETN 48
+#define R9A09G011_TIM_GPC_PRESETN 49
+#define R9A09G011_TIM_GPD_PRESETN 50
+#define R9A09G011_PWM_GPE_PRESETN 51
+#define R9A09G011_PWM_GPF_PRESETN 52
+#define R9A09G011_CSI_GPG_PRESETN 53
+#define R9A09G011_CSI_GPH_PRESETN 54
+#define R9A09G011_IIC_GPA_PRESETN 55
+#define R9A09G011_IIC_GPB_PRESETN 56
+#define R9A09G011_URT_PRESETN 57
+#define R9A09G011_WDT0_PRESETN 58
+#define R9A09G011_WDT1_PRESETN 59
+
+#define R9A09G011_ICB_PD_AWO_RST_N 60
+#define R9A09G011_ICB_PD_MMC_RST_N 61
+#define R9A09G011_ICB_PD_VD0_RST_N 62
+#define R9A09G011_ICB_PD_VD1_RST_N 63
+#define R9A09G011_ICB_PD_RFX_RST_N 64
+
+#define R9A09G011_CA53_NCPUPORESET0 65
+#define R9A09G011_CA53_NCPUPORESET1 66
+#define R9A09G011_CA53_NCORERESET0 67
+#define R9A09G011_CA53_NCORERESET1 68
+#define R9A09G011_CA53_NPRESETDBG 69
+#define R9A09G011_CA53_L2RESET 70
+#define R9A09G011_CA53_NMISCRESET_HM 71
+#define R9A09G011_CA53_NMISCRESET_SM 72
+#define R9A09G011_CA53_NARESET 73
+
+#define R9A09G011_DRPA_ARESETN 74
+
+#define R9A09G011_RAMB0_ARESETN 75
+#define R9A09G011_RAMB1_ARESETN 76
+#define R9A09G011_RAMB2_ARESETN 77
+#define R9A09G011_RAMB3_ARESETN 78
+
+#define R9A09G011_CIMA_RSTSYSAX 79
+#define R9A09G011_CIMB_RSTSYSAX 80
+#define R9A09G011_FAFA_RSTSYSAX 81
+#define R9A09G011_STG_RSTSYSAX 82
+
+#define R9A09G011_BIMA_RSTSYSAX 83
+#define R9A09G011_FAFB_RSTSYSAX 84
+#define R9A09G011_FCD_RSTSYSAX 85
+#define R9A09G011_RIM_RSTSYSAX 86
+#define R9A09G011_VCD_RESETN 87
+#define R9A09G011_JPG_XRESET 88
+
+#define R9A09G011_MMC_CORE_DDRC_RSTN 89
+#define R9A09G011_MMC_ARESETN_N 90
+#define R9A09G011_MMC_PRESETN 91
+#define R9A09G011_DDI_PWROK 92
+#define R9A09G011_DDI_RESET 93
+#define R9A09G011_DDI_RESETN_APB 94
+
+#endif /* __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__ */
diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h
index 35a5a01f9697..a96a9870ad59 100644
--- a/include/dt-bindings/clock/rk3036-cru.h
+++ b/include/dt-bindings/clock/rk3036-cru.h
@@ -81,6 +81,7 @@
#define HCLK_OTG0 449
#define HCLK_OTG1 450
#define HCLK_NANDC 453
+#define HCLK_SFC 454
#define HCLK_SDMMC 456
#define HCLK_SDIO 457
#define HCLK_EMMC 459
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
index 0a06c5f514d7..83c72a163fd3 100644
--- a/include/dt-bindings/clock/rk3368-cru.h
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -78,6 +78,7 @@
#define SCLK_TIMER13 136
#define SCLK_TIMER14 137
#define SCLK_TIMER15 138
+#define SCLK_VIP_OUT 139
#define DCLK_VOP 190
#define MCLK_CRYPTO 191
@@ -148,6 +149,8 @@
#define PCLK_VIP 367
#define PCLK_WDT 368
#define PCLK_EFUSE256 369
+#define PCLK_DPHYRX 370
+#define PCLK_DPHYTX0 371
/* hclk gates */
#define HCLK_SFC 448
diff --git a/include/dt-bindings/clock/rk3568-cru.h b/include/dt-bindings/clock/rk3568-cru.h
new file mode 100644
index 000000000000..d29890865150
--- /dev/null
+++ b/include/dt-bindings/clock/rk3568-cru.h
@@ -0,0 +1,926 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3568_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3568_H
+
+/* pmucru-clocks indices */
+
+/* pmucru plls */
+#define PLL_PPLL 1
+#define PLL_HPLL 2
+
+/* pmucru clocks */
+#define XIN_OSC0_DIV 4
+#define CLK_RTC_32K 5
+#define CLK_PMU 6
+#define CLK_I2C0 7
+#define CLK_RTC32K_FRAC 8
+#define CLK_UART0_DIV 9
+#define CLK_UART0_FRAC 10
+#define SCLK_UART0 11
+#define DBCLK_GPIO0 12
+#define CLK_PWM0 13
+#define CLK_CAPTURE_PWM0_NDFT 14
+#define CLK_PMUPVTM 15
+#define CLK_CORE_PMUPVTM 16
+#define CLK_REF24M 17
+#define XIN_OSC0_USBPHY0_G 18
+#define CLK_USBPHY0_REF 19
+#define XIN_OSC0_USBPHY1_G 20
+#define CLK_USBPHY1_REF 21
+#define XIN_OSC0_MIPIDSIPHY0_G 22
+#define CLK_MIPIDSIPHY0_REF 23
+#define XIN_OSC0_MIPIDSIPHY1_G 24
+#define CLK_MIPIDSIPHY1_REF 25
+#define CLK_WIFI_DIV 26
+#define CLK_WIFI_OSC0 27
+#define CLK_WIFI 28
+#define CLK_PCIEPHY0_DIV 29
+#define CLK_PCIEPHY0_OSC0 30
+#define CLK_PCIEPHY0_REF 31
+#define CLK_PCIEPHY1_DIV 32
+#define CLK_PCIEPHY1_OSC0 33
+#define CLK_PCIEPHY1_REF 34
+#define CLK_PCIEPHY2_DIV 35
+#define CLK_PCIEPHY2_OSC0 36
+#define CLK_PCIEPHY2_REF 37
+#define CLK_PCIE30PHY_REF_M 38
+#define CLK_PCIE30PHY_REF_N 39
+#define CLK_HDMI_REF 40
+#define XIN_OSC0_EDPPHY_G 41
+#define PCLK_PDPMU 42
+#define PCLK_PMU 43
+#define PCLK_UART0 44
+#define PCLK_I2C0 45
+#define PCLK_GPIO0 46
+#define PCLK_PMUPVTM 47
+#define PCLK_PWM0 48
+#define CLK_PDPMU 49
+#define SCLK_32K_IOE 50
+
+#define CLKPMU_NR_CLKS (SCLK_32K_IOE + 1)
+
+/* cru-clocks indices */
+
+/* cru plls */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_GPLL 4
+#define PLL_VPLL 5
+#define PLL_NPLL 6
+
+/* cru clocks */
+#define CPLL_333M 9
+#define ARMCLK 10
+#define USB480M 11
+#define ACLK_CORE_NIU2BUS 18
+#define CLK_CORE_PVTM 19
+#define CLK_CORE_PVTM_CORE 20
+#define CLK_CORE_PVTPLL 21
+#define CLK_GPU_SRC 22
+#define CLK_GPU_PRE_NDFT 23
+#define CLK_GPU_PRE_MUX 24
+#define ACLK_GPU_PRE 25
+#define PCLK_GPU_PRE 26
+#define CLK_GPU 27
+#define CLK_GPU_NP5 28
+#define PCLK_GPU_PVTM 29
+#define CLK_GPU_PVTM 30
+#define CLK_GPU_PVTM_CORE 31
+#define CLK_GPU_PVTPLL 32
+#define CLK_NPU_SRC 33
+#define CLK_NPU_PRE_NDFT 34
+#define CLK_NPU 35
+#define CLK_NPU_NP5 36
+#define HCLK_NPU_PRE 37
+#define PCLK_NPU_PRE 38
+#define ACLK_NPU_PRE 39
+#define ACLK_NPU 40
+#define HCLK_NPU 41
+#define PCLK_NPU_PVTM 42
+#define CLK_NPU_PVTM 43
+#define CLK_NPU_PVTM_CORE 44
+#define CLK_NPU_PVTPLL 45
+#define CLK_DDRPHY1X_SRC 46
+#define CLK_DDRPHY1X_HWFFC_SRC 47
+#define CLK_DDR1X 48
+#define CLK_MSCH 49
+#define CLK24_DDRMON 50
+#define ACLK_GIC_AUDIO 51
+#define HCLK_GIC_AUDIO 52
+#define HCLK_SDMMC_BUFFER 53
+#define DCLK_SDMMC_BUFFER 54
+#define ACLK_GIC600 55
+#define ACLK_SPINLOCK 56
+#define HCLK_I2S0_8CH 57
+#define HCLK_I2S1_8CH 58
+#define HCLK_I2S2_2CH 59
+#define HCLK_I2S3_2CH 60
+#define CLK_I2S0_8CH_TX_SRC 61
+#define CLK_I2S0_8CH_TX_FRAC 62
+#define MCLK_I2S0_8CH_TX 63
+#define I2S0_MCLKOUT_TX 64
+#define CLK_I2S0_8CH_RX_SRC 65
+#define CLK_I2S0_8CH_RX_FRAC 66
+#define MCLK_I2S0_8CH_RX 67
+#define I2S0_MCLKOUT_RX 68
+#define CLK_I2S1_8CH_TX_SRC 69
+#define CLK_I2S1_8CH_TX_FRAC 70
+#define MCLK_I2S1_8CH_TX 71
+#define I2S1_MCLKOUT_TX 72
+#define CLK_I2S1_8CH_RX_SRC 73
+#define CLK_I2S1_8CH_RX_FRAC 74
+#define MCLK_I2S1_8CH_RX 75
+#define I2S1_MCLKOUT_RX 76
+#define CLK_I2S2_2CH_SRC 77
+#define CLK_I2S2_2CH_FRAC 78
+#define MCLK_I2S2_2CH 79
+#define I2S2_MCLKOUT 80
+#define CLK_I2S3_2CH_TX_SRC 81
+#define CLK_I2S3_2CH_TX_FRAC 82
+#define MCLK_I2S3_2CH_TX 83
+#define I2S3_MCLKOUT_TX 84
+#define CLK_I2S3_2CH_RX_SRC 85
+#define CLK_I2S3_2CH_RX_FRAC 86
+#define MCLK_I2S3_2CH_RX 87
+#define I2S3_MCLKOUT_RX 88
+#define HCLK_PDM 89
+#define MCLK_PDM 90
+#define HCLK_VAD 91
+#define HCLK_SPDIF_8CH 92
+#define MCLK_SPDIF_8CH_SRC 93
+#define MCLK_SPDIF_8CH_FRAC 94
+#define MCLK_SPDIF_8CH 95
+#define HCLK_AUDPWM 96
+#define SCLK_AUDPWM_SRC 97
+#define SCLK_AUDPWM_FRAC 98
+#define SCLK_AUDPWM 99
+#define HCLK_ACDCDIG 100
+#define CLK_ACDCDIG_I2C 101
+#define CLK_ACDCDIG_DAC 102
+#define CLK_ACDCDIG_ADC 103
+#define ACLK_SECURE_FLASH 104
+#define HCLK_SECURE_FLASH 105
+#define ACLK_CRYPTO_NS 106
+#define HCLK_CRYPTO_NS 107
+#define CLK_CRYPTO_NS_CORE 108
+#define CLK_CRYPTO_NS_PKA 109
+#define CLK_CRYPTO_NS_RNG 110
+#define HCLK_TRNG_NS 111
+#define CLK_TRNG_NS 112
+#define PCLK_OTPC_NS 113
+#define CLK_OTPC_NS_SBPI 114
+#define CLK_OTPC_NS_USR 115
+#define HCLK_NANDC 116
+#define NCLK_NANDC 117
+#define HCLK_SFC 118
+#define HCLK_SFC_XIP 119
+#define SCLK_SFC 120
+#define ACLK_EMMC 121
+#define HCLK_EMMC 122
+#define BCLK_EMMC 123
+#define CCLK_EMMC 124
+#define TCLK_EMMC 125
+#define ACLK_PIPE 126
+#define PCLK_PIPE 127
+#define PCLK_PIPE_GRF 128
+#define ACLK_PCIE20_MST 129
+#define ACLK_PCIE20_SLV 130
+#define ACLK_PCIE20_DBI 131
+#define PCLK_PCIE20 132
+#define CLK_PCIE20_AUX_NDFT 133
+#define CLK_PCIE20_AUX_DFT 134
+#define CLK_PCIE20_PIPE_DFT 135
+#define ACLK_PCIE30X1_MST 136
+#define ACLK_PCIE30X1_SLV 137
+#define ACLK_PCIE30X1_DBI 138
+#define PCLK_PCIE30X1 139
+#define CLK_PCIE30X1_AUX_NDFT 140
+#define CLK_PCIE30X1_AUX_DFT 141
+#define CLK_PCIE30X1_PIPE_DFT 142
+#define ACLK_PCIE30X2_MST 143
+#define ACLK_PCIE30X2_SLV 144
+#define ACLK_PCIE30X2_DBI 145
+#define PCLK_PCIE30X2 146
+#define CLK_PCIE30X2_AUX_NDFT 147
+#define CLK_PCIE30X2_AUX_DFT 148
+#define CLK_PCIE30X2_PIPE_DFT 149
+#define ACLK_SATA0 150
+#define CLK_SATA0_PMALIVE 151
+#define CLK_SATA0_RXOOB 152
+#define CLK_SATA0_PIPE_NDFT 153
+#define CLK_SATA0_PIPE_DFT 154
+#define ACLK_SATA1 155
+#define CLK_SATA1_PMALIVE 156
+#define CLK_SATA1_RXOOB 157
+#define CLK_SATA1_PIPE_NDFT 158
+#define CLK_SATA1_PIPE_DFT 159
+#define ACLK_SATA2 160
+#define CLK_SATA2_PMALIVE 161
+#define CLK_SATA2_RXOOB 162
+#define CLK_SATA2_PIPE_NDFT 163
+#define CLK_SATA2_PIPE_DFT 164
+#define ACLK_USB3OTG0 165
+#define CLK_USB3OTG0_REF 166
+#define CLK_USB3OTG0_SUSPEND 167
+#define ACLK_USB3OTG1 168
+#define CLK_USB3OTG1_REF 169
+#define CLK_USB3OTG1_SUSPEND 170
+#define CLK_XPCS_EEE 171
+#define PCLK_XPCS 172
+#define ACLK_PHP 173
+#define HCLK_PHP 174
+#define PCLK_PHP 175
+#define HCLK_SDMMC0 176
+#define CLK_SDMMC0 177
+#define HCLK_SDMMC1 178
+#define CLK_SDMMC1 179
+#define ACLK_GMAC0 180
+#define PCLK_GMAC0 181
+#define CLK_MAC0_2TOP 182
+#define CLK_MAC0_OUT 183
+#define CLK_MAC0_REFOUT 184
+#define CLK_GMAC0_PTP_REF 185
+#define ACLK_USB 186
+#define HCLK_USB 187
+#define PCLK_USB 188
+#define HCLK_USB2HOST0 189
+#define HCLK_USB2HOST0_ARB 190
+#define HCLK_USB2HOST1 191
+#define HCLK_USB2HOST1_ARB 192
+#define HCLK_SDMMC2 193
+#define CLK_SDMMC2 194
+#define ACLK_GMAC1 195
+#define PCLK_GMAC1 196
+#define CLK_MAC1_2TOP 197
+#define CLK_MAC1_OUT 198
+#define CLK_MAC1_REFOUT 199
+#define CLK_GMAC1_PTP_REF 200
+#define ACLK_PERIMID 201
+#define HCLK_PERIMID 202
+#define ACLK_VI 203
+#define HCLK_VI 204
+#define PCLK_VI 205
+#define ACLK_VICAP 206
+#define HCLK_VICAP 207
+#define DCLK_VICAP 208
+#define ICLK_VICAP_G 209
+#define ACLK_ISP 210
+#define HCLK_ISP 211
+#define CLK_ISP 212
+#define PCLK_CSI2HOST1 213
+#define CLK_CIF_OUT 214
+#define CLK_CAM0_OUT 215
+#define CLK_CAM1_OUT 216
+#define ACLK_VO 217
+#define HCLK_VO 218
+#define PCLK_VO 219
+#define ACLK_VOP_PRE 220
+#define ACLK_VOP 221
+#define HCLK_VOP 222
+#define DCLK_VOP0 223
+#define DCLK_VOP1 224
+#define DCLK_VOP2 225
+#define CLK_VOP_PWM 226
+#define ACLK_HDCP 227
+#define HCLK_HDCP 228
+#define PCLK_HDCP 229
+#define PCLK_HDMI_HOST 230
+#define CLK_HDMI_SFR 231
+#define PCLK_DSITX_0 232
+#define PCLK_DSITX_1 233
+#define PCLK_EDP_CTRL 234
+#define CLK_EDP_200M 235
+#define ACLK_VPU_PRE 236
+#define HCLK_VPU_PRE 237
+#define ACLK_VPU 238
+#define HCLK_VPU 239
+#define ACLK_RGA_PRE 240
+#define HCLK_RGA_PRE 241
+#define PCLK_RGA_PRE 242
+#define ACLK_RGA 243
+#define HCLK_RGA 244
+#define CLK_RGA_CORE 245
+#define ACLK_IEP 246
+#define HCLK_IEP 247
+#define CLK_IEP_CORE 248
+#define HCLK_EBC 249
+#define DCLK_EBC 250
+#define ACLK_JDEC 251
+#define HCLK_JDEC 252
+#define ACLK_JENC 253
+#define HCLK_JENC 254
+#define PCLK_EINK 255
+#define HCLK_EINK 256
+#define ACLK_RKVENC_PRE 257
+#define HCLK_RKVENC_PRE 258
+#define ACLK_RKVENC 259
+#define HCLK_RKVENC 260
+#define CLK_RKVENC_CORE 261
+#define ACLK_RKVDEC_PRE 262
+#define HCLK_RKVDEC_PRE 263
+#define ACLK_RKVDEC 264
+#define HCLK_RKVDEC 265
+#define CLK_RKVDEC_CA 266
+#define CLK_RKVDEC_CORE 267
+#define CLK_RKVDEC_HEVC_CA 268
+#define ACLK_BUS 269
+#define PCLK_BUS 270
+#define PCLK_TSADC 271
+#define CLK_TSADC_TSEN 272
+#define CLK_TSADC 273
+#define PCLK_SARADC 274
+#define CLK_SARADC 275
+#define PCLK_SCR 276
+#define PCLK_WDT_NS 277
+#define TCLK_WDT_NS 278
+#define ACLK_DMAC0 279
+#define ACLK_DMAC1 280
+#define ACLK_MCU 281
+#define PCLK_INTMUX 282
+#define PCLK_MAILBOX 283
+#define PCLK_UART1 284
+#define CLK_UART1_SRC 285
+#define CLK_UART1_FRAC 286
+#define SCLK_UART1 287
+#define PCLK_UART2 288
+#define CLK_UART2_SRC 289
+#define CLK_UART2_FRAC 290
+#define SCLK_UART2 291
+#define PCLK_UART3 292
+#define CLK_UART3_SRC 293
+#define CLK_UART3_FRAC 294
+#define SCLK_UART3 295
+#define PCLK_UART4 296
+#define CLK_UART4_SRC 297
+#define CLK_UART4_FRAC 298
+#define SCLK_UART4 299
+#define PCLK_UART5 300
+#define CLK_UART5_SRC 301
+#define CLK_UART5_FRAC 302
+#define SCLK_UART5 303
+#define PCLK_UART6 304
+#define CLK_UART6_SRC 305
+#define CLK_UART6_FRAC 306
+#define SCLK_UART6 307
+#define PCLK_UART7 308
+#define CLK_UART7_SRC 309
+#define CLK_UART7_FRAC 310
+#define SCLK_UART7 311
+#define PCLK_UART8 312
+#define CLK_UART8_SRC 313
+#define CLK_UART8_FRAC 314
+#define SCLK_UART8 315
+#define PCLK_UART9 316
+#define CLK_UART9_SRC 317
+#define CLK_UART9_FRAC 318
+#define SCLK_UART9 319
+#define PCLK_CAN0 320
+#define CLK_CAN0 321
+#define PCLK_CAN1 322
+#define CLK_CAN1 323
+#define PCLK_CAN2 324
+#define CLK_CAN2 325
+#define CLK_I2C 326
+#define PCLK_I2C1 327
+#define CLK_I2C1 328
+#define PCLK_I2C2 329
+#define CLK_I2C2 330
+#define PCLK_I2C3 331
+#define CLK_I2C3 332
+#define PCLK_I2C4 333
+#define CLK_I2C4 334
+#define PCLK_I2C5 335
+#define CLK_I2C5 336
+#define PCLK_SPI0 337
+#define CLK_SPI0 338
+#define PCLK_SPI1 339
+#define CLK_SPI1 340
+#define PCLK_SPI2 341
+#define CLK_SPI2 342
+#define PCLK_SPI3 343
+#define CLK_SPI3 344
+#define PCLK_PWM1 345
+#define CLK_PWM1 346
+#define CLK_PWM1_CAPTURE 347
+#define PCLK_PWM2 348
+#define CLK_PWM2 349
+#define CLK_PWM2_CAPTURE 350
+#define PCLK_PWM3 351
+#define CLK_PWM3 352
+#define CLK_PWM3_CAPTURE 353
+#define DBCLK_GPIO 354
+#define PCLK_GPIO1 355
+#define DBCLK_GPIO1 356
+#define PCLK_GPIO2 357
+#define DBCLK_GPIO2 358
+#define PCLK_GPIO3 359
+#define DBCLK_GPIO3 360
+#define PCLK_GPIO4 361
+#define DBCLK_GPIO4 362
+#define OCC_SCAN_CLK_GPIO 363
+#define PCLK_TIMER 364
+#define CLK_TIMER0 365
+#define CLK_TIMER1 366
+#define CLK_TIMER2 367
+#define CLK_TIMER3 368
+#define CLK_TIMER4 369
+#define CLK_TIMER5 370
+#define ACLK_TOP_HIGH 371
+#define ACLK_TOP_LOW 372
+#define HCLK_TOP 373
+#define PCLK_TOP 374
+#define PCLK_PCIE30PHY 375
+#define CLK_OPTC_ARB 376
+#define PCLK_MIPICSIPHY 377
+#define PCLK_MIPIDSIPHY0 378
+#define PCLK_MIPIDSIPHY1 379
+#define PCLK_PIPEPHY0 380
+#define PCLK_PIPEPHY1 381
+#define PCLK_PIPEPHY2 382
+#define PCLK_CPU_BOOST 383
+#define CLK_CPU_BOOST 384
+#define PCLK_OTPPHY 385
+#define SCLK_GMAC0 386
+#define SCLK_GMAC0_RGMII_SPEED 387
+#define SCLK_GMAC0_RMII_SPEED 388
+#define SCLK_GMAC0_RX_TX 389
+#define SCLK_GMAC1 390
+#define SCLK_GMAC1_RGMII_SPEED 391
+#define SCLK_GMAC1_RMII_SPEED 392
+#define SCLK_GMAC1_RX_TX 393
+#define SCLK_SDMMC0_DRV 394
+#define SCLK_SDMMC0_SAMPLE 395
+#define SCLK_SDMMC1_DRV 396
+#define SCLK_SDMMC1_SAMPLE 397
+#define SCLK_SDMMC2_DRV 398
+#define SCLK_SDMMC2_SAMPLE 399
+#define SCLK_EMMC_DRV 400
+#define SCLK_EMMC_SAMPLE 401
+#define PCLK_EDPPHY_GRF 402
+#define CLK_HDMI_CEC 403
+#define CLK_I2S0_8CH_TX 404
+#define CLK_I2S0_8CH_RX 405
+#define CLK_I2S1_8CH_TX 406
+#define CLK_I2S1_8CH_RX 407
+#define CLK_I2S2_2CH 408
+#define CLK_I2S3_2CH_TX 409
+#define CLK_I2S3_2CH_RX 410
+#define CPLL_500M 411
+#define CPLL_250M 412
+#define CPLL_125M 413
+#define CPLL_62P5M 414
+#define CPLL_50M 415
+#define CPLL_25M 416
+#define CPLL_100M 417
+#define SCLK_DDRCLK 418
+
+#define PCLK_CORE_PVTM 450
+
+#define CLK_NR_CLKS (PCLK_CORE_PVTM + 1)
+
+/* pmu soft-reset indices */
+/* pmucru_softrst_con0 */
+#define SRST_P_PDPMU_NIU 0
+#define SRST_P_PMUCRU 1
+#define SRST_P_PMUGRF 2
+#define SRST_P_I2C0 3
+#define SRST_I2C0 4
+#define SRST_P_UART0 5
+#define SRST_S_UART0 6
+#define SRST_P_PWM0 7
+#define SRST_PWM0 8
+#define SRST_P_GPIO0 9
+#define SRST_GPIO0 10
+#define SRST_P_PMUPVTM 11
+#define SRST_PMUPVTM 12
+
+/* soft-reset indices */
+
+/* cru_softrst_con0 */
+#define SRST_NCORERESET0 0
+#define SRST_NCORERESET1 1
+#define SRST_NCORERESET2 2
+#define SRST_NCORERESET3 3
+#define SRST_NCPUPORESET0 4
+#define SRST_NCPUPORESET1 5
+#define SRST_NCPUPORESET2 6
+#define SRST_NCPUPORESET3 7
+#define SRST_NSRESET 8
+#define SRST_NSPORESET 9
+#define SRST_NATRESET 10
+#define SRST_NGICRESET 11
+#define SRST_NPRESET 12
+#define SRST_NPERIPHRESET 13
+
+/* cru_softrst_con1 */
+#define SRST_A_CORE_NIU2DDR 16
+#define SRST_A_CORE_NIU2BUS 17
+#define SRST_P_DBG_NIU 18
+#define SRST_P_DBG 19
+#define SRST_P_DBG_DAPLITE 20
+#define SRST_DAP 21
+#define SRST_A_ADB400_CORE2GIC 22
+#define SRST_A_ADB400_GIC2CORE 23
+#define SRST_P_CORE_GRF 24
+#define SRST_P_CORE_PVTM 25
+#define SRST_CORE_PVTM 26
+#define SRST_CORE_PVTPLL 27
+
+/* cru_softrst_con2 */
+#define SRST_GPU 32
+#define SRST_A_GPU_NIU 33
+#define SRST_P_GPU_NIU 34
+#define SRST_P_GPU_PVTM 35
+#define SRST_GPU_PVTM 36
+#define SRST_GPU_PVTPLL 37
+#define SRST_A_NPU_NIU 40
+#define SRST_H_NPU_NIU 41
+#define SRST_P_NPU_NIU 42
+#define SRST_A_NPU 43
+#define SRST_H_NPU 44
+#define SRST_P_NPU_PVTM 45
+#define SRST_NPU_PVTM 46
+#define SRST_NPU_PVTPLL 47
+
+/* cru_softrst_con3 */
+#define SRST_A_MSCH 51
+#define SRST_HWFFC_CTRL 52
+#define SRST_DDR_ALWAYSON 53
+#define SRST_A_DDRSPLIT 54
+#define SRST_DDRDFI_CTL 55
+#define SRST_A_DMA2DDR 57
+
+/* cru_softrst_con4 */
+#define SRST_A_PERIMID_NIU 64
+#define SRST_H_PERIMID_NIU 65
+#define SRST_A_GIC_AUDIO_NIU 66
+#define SRST_H_GIC_AUDIO_NIU 67
+#define SRST_A_GIC600 68
+#define SRST_A_GIC600_DEBUG 69
+#define SRST_A_GICADB_CORE2GIC 70
+#define SRST_A_GICADB_GIC2CORE 71
+#define SRST_A_SPINLOCK 72
+#define SRST_H_SDMMC_BUFFER 73
+#define SRST_D_SDMMC_BUFFER 74
+#define SRST_H_I2S0_8CH 75
+#define SRST_H_I2S1_8CH 76
+#define SRST_H_I2S2_2CH 77
+#define SRST_H_I2S3_2CH 78
+
+/* cru_softrst_con5 */
+#define SRST_M_I2S0_8CH_TX 80
+#define SRST_M_I2S0_8CH_RX 81
+#define SRST_M_I2S1_8CH_TX 82
+#define SRST_M_I2S1_8CH_RX 83
+#define SRST_M_I2S2_2CH 84
+#define SRST_M_I2S3_2CH_TX 85
+#define SRST_M_I2S3_2CH_RX 86
+#define SRST_H_PDM 87
+#define SRST_M_PDM 88
+#define SRST_H_VAD 89
+#define SRST_H_SPDIF_8CH 90
+#define SRST_M_SPDIF_8CH 91
+#define SRST_H_AUDPWM 92
+#define SRST_S_AUDPWM 93
+#define SRST_H_ACDCDIG 94
+#define SRST_ACDCDIG 95
+
+/* cru_softrst_con6 */
+#define SRST_A_SECURE_FLASH_NIU 96
+#define SRST_H_SECURE_FLASH_NIU 97
+#define SRST_A_CRYPTO_NS 103
+#define SRST_H_CRYPTO_NS 104
+#define SRST_CRYPTO_NS_CORE 105
+#define SRST_CRYPTO_NS_PKA 106
+#define SRST_CRYPTO_NS_RNG 107
+#define SRST_H_TRNG_NS 108
+#define SRST_TRNG_NS 109
+
+/* cru_softrst_con7 */
+#define SRST_H_NANDC 112
+#define SRST_N_NANDC 113
+#define SRST_H_SFC 114
+#define SRST_H_SFC_XIP 115
+#define SRST_S_SFC 116
+#define SRST_A_EMMC 117
+#define SRST_H_EMMC 118
+#define SRST_B_EMMC 119
+#define SRST_C_EMMC 120
+#define SRST_T_EMMC 121
+
+/* cru_softrst_con8 */
+#define SRST_A_PIPE_NIU 128
+#define SRST_P_PIPE_NIU 130
+#define SRST_P_PIPE_GRF 133
+#define SRST_A_SATA0 134
+#define SRST_SATA0_PIPE 135
+#define SRST_SATA0_PMALIVE 136
+#define SRST_SATA0_RXOOB 137
+#define SRST_A_SATA1 138
+#define SRST_SATA1_PIPE 139
+#define SRST_SATA1_PMALIVE 140
+#define SRST_SATA1_RXOOB 141
+
+/* cru_softrst_con9 */
+#define SRST_A_SATA2 144
+#define SRST_SATA2_PIPE 145
+#define SRST_SATA2_PMALIVE 146
+#define SRST_SATA2_RXOOB 147
+#define SRST_USB3OTG0 148
+#define SRST_USB3OTG1 149
+#define SRST_XPCS 150
+#define SRST_XPCS_TX_DIV10 151
+#define SRST_XPCS_RX_DIV10 152
+#define SRST_XPCS_XGXS_RX 153
+
+/* cru_softrst_con10 */
+#define SRST_P_PCIE20 160
+#define SRST_PCIE20_POWERUP 161
+#define SRST_MSTR_ARESET_PCIE20 162
+#define SRST_SLV_ARESET_PCIE20 163
+#define SRST_DBI_ARESET_PCIE20 164
+#define SRST_BRESET_PCIE20 165
+#define SRST_PERST_PCIE20 166
+#define SRST_CORE_RST_PCIE20 167
+#define SRST_NSTICKY_RST_PCIE20 168
+#define SRST_STICKY_RST_PCIE20 169
+#define SRST_PWR_RST_PCIE20 170
+
+/* cru_softrst_con11 */
+#define SRST_P_PCIE30X1 176
+#define SRST_PCIE30X1_POWERUP 177
+#define SRST_M_ARESET_PCIE30X1 178
+#define SRST_S_ARESET_PCIE30X1 179
+#define SRST_D_ARESET_PCIE30X1 180
+#define SRST_BRESET_PCIE30X1 181
+#define SRST_PERST_PCIE30X1 182
+#define SRST_CORE_RST_PCIE30X1 183
+#define SRST_NSTC_RST_PCIE30X1 184
+#define SRST_STC_RST_PCIE30X1 185
+#define SRST_PWR_RST_PCIE30X1 186
+
+/* cru_softrst_con12 */
+#define SRST_P_PCIE30X2 192
+#define SRST_PCIE30X2_POWERUP 193
+#define SRST_M_ARESET_PCIE30X2 194
+#define SRST_S_ARESET_PCIE30X2 195
+#define SRST_D_ARESET_PCIE30X2 196
+#define SRST_BRESET_PCIE30X2 197
+#define SRST_PERST_PCIE30X2 198
+#define SRST_CORE_RST_PCIE30X2 199
+#define SRST_NSTC_RST_PCIE30X2 200
+#define SRST_STC_RST_PCIE30X2 201
+#define SRST_PWR_RST_PCIE30X2 202
+
+/* cru_softrst_con13 */
+#define SRST_A_PHP_NIU 208
+#define SRST_H_PHP_NIU 209
+#define SRST_P_PHP_NIU 210
+#define SRST_H_SDMMC0 211
+#define SRST_SDMMC0 212
+#define SRST_H_SDMMC1 213
+#define SRST_SDMMC1 214
+#define SRST_A_GMAC0 215
+#define SRST_GMAC0_TIMESTAMP 216
+
+/* cru_softrst_con14 */
+#define SRST_A_USB_NIU 224
+#define SRST_H_USB_NIU 225
+#define SRST_P_USB_NIU 226
+#define SRST_P_USB_GRF 227
+#define SRST_H_USB2HOST0 228
+#define SRST_H_USB2HOST0_ARB 229
+#define SRST_USB2HOST0_UTMI 230
+#define SRST_H_USB2HOST1 231
+#define SRST_H_USB2HOST1_ARB 232
+#define SRST_USB2HOST1_UTMI 233
+#define SRST_H_SDMMC2 234
+#define SRST_SDMMC2 235
+#define SRST_A_GMAC1 236
+#define SRST_GMAC1_TIMESTAMP 237
+
+/* cru_softrst_con15 */
+#define SRST_A_VI_NIU 240
+#define SRST_H_VI_NIU 241
+#define SRST_P_VI_NIU 242
+#define SRST_A_VICAP 247
+#define SRST_H_VICAP 248
+#define SRST_D_VICAP 249
+#define SRST_I_VICAP 250
+#define SRST_P_VICAP 251
+#define SRST_H_ISP 252
+#define SRST_ISP 253
+#define SRST_P_CSI2HOST1 255
+
+/* cru_softrst_con16 */
+#define SRST_A_VO_NIU 256
+#define SRST_H_VO_NIU 257
+#define SRST_P_VO_NIU 258
+#define SRST_A_VOP_NIU 259
+#define SRST_A_VOP 260
+#define SRST_H_VOP 261
+#define SRST_VOP0 262
+#define SRST_VOP1 263
+#define SRST_VOP2 264
+#define SRST_VOP_PWM 265
+#define SRST_A_HDCP 266
+#define SRST_H_HDCP 267
+#define SRST_P_HDCP 268
+#define SRST_P_HDMI_HOST 270
+#define SRST_HDMI_HOST 271
+
+/* cru_softrst_con17 */
+#define SRST_P_DSITX_0 272
+#define SRST_P_DSITX_1 273
+#define SRST_P_EDP_CTRL 274
+#define SRST_EDP_24M 275
+#define SRST_A_VPU_NIU 280
+#define SRST_H_VPU_NIU 281
+#define SRST_A_VPU 282
+#define SRST_H_VPU 283
+#define SRST_H_EINK 286
+#define SRST_P_EINK 287
+
+/* cru_softrst_con18 */
+#define SRST_A_RGA_NIU 288
+#define SRST_H_RGA_NIU 289
+#define SRST_P_RGA_NIU 290
+#define SRST_A_RGA 292
+#define SRST_H_RGA 293
+#define SRST_RGA_CORE 294
+#define SRST_A_IEP 295
+#define SRST_H_IEP 296
+#define SRST_IEP_CORE 297
+#define SRST_H_EBC 298
+#define SRST_D_EBC 299
+#define SRST_A_JDEC 300
+#define SRST_H_JDEC 301
+#define SRST_A_JENC 302
+#define SRST_H_JENC 303
+
+/* cru_softrst_con19 */
+#define SRST_A_VENC_NIU 304
+#define SRST_H_VENC_NIU 305
+#define SRST_A_RKVENC 307
+#define SRST_H_RKVENC 308
+#define SRST_RKVENC_CORE 309
+
+/* cru_softrst_con20 */
+#define SRST_A_RKVDEC_NIU 320
+#define SRST_H_RKVDEC_NIU 321
+#define SRST_A_RKVDEC 322
+#define SRST_H_RKVDEC 323
+#define SRST_RKVDEC_CA 324
+#define SRST_RKVDEC_CORE 325
+#define SRST_RKVDEC_HEVC_CA 326
+
+/* cru_softrst_con21 */
+#define SRST_A_BUS_NIU 336
+#define SRST_P_BUS_NIU 338
+#define SRST_P_CAN0 340
+#define SRST_CAN0 341
+#define SRST_P_CAN1 342
+#define SRST_CAN1 343
+#define SRST_P_CAN2 344
+#define SRST_CAN2 345
+#define SRST_P_GPIO1 346
+#define SRST_GPIO1 347
+#define SRST_P_GPIO2 348
+#define SRST_GPIO2 349
+#define SRST_P_GPIO3 350
+#define SRST_GPIO3 351
+
+/* cru_softrst_con22 */
+#define SRST_P_GPIO4 352
+#define SRST_GPIO4 353
+#define SRST_P_I2C1 354
+#define SRST_I2C1 355
+#define SRST_P_I2C2 356
+#define SRST_I2C2 357
+#define SRST_P_I2C3 358
+#define SRST_I2C3 359
+#define SRST_P_I2C4 360
+#define SRST_I2C4 361
+#define SRST_P_I2C5 362
+#define SRST_I2C5 363
+#define SRST_P_OTPC_NS 364
+#define SRST_OTPC_NS_SBPI 365
+#define SRST_OTPC_NS_USR 366
+
+/* cru_softrst_con23 */
+#define SRST_P_PWM1 368
+#define SRST_PWM1 369
+#define SRST_P_PWM2 370
+#define SRST_PWM2 371
+#define SRST_P_PWM3 372
+#define SRST_PWM3 373
+#define SRST_P_SPI0 374
+#define SRST_SPI0 375
+#define SRST_P_SPI1 376
+#define SRST_SPI1 377
+#define SRST_P_SPI2 378
+#define SRST_SPI2 379
+#define SRST_P_SPI3 380
+#define SRST_SPI3 381
+
+/* cru_softrst_con24 */
+#define SRST_P_SARADC 384
+#define SRST_P_TSADC 385
+#define SRST_TSADC 386
+#define SRST_P_TIMER 387
+#define SRST_TIMER0 388
+#define SRST_TIMER1 389
+#define SRST_TIMER2 390
+#define SRST_TIMER3 391
+#define SRST_TIMER4 392
+#define SRST_TIMER5 393
+#define SRST_P_UART1 394
+#define SRST_S_UART1 395
+
+/* cru_softrst_con25 */
+#define SRST_P_UART2 400
+#define SRST_S_UART2 401
+#define SRST_P_UART3 402
+#define SRST_S_UART3 403
+#define SRST_P_UART4 404
+#define SRST_S_UART4 405
+#define SRST_P_UART5 406
+#define SRST_S_UART5 407
+#define SRST_P_UART6 408
+#define SRST_S_UART6 409
+#define SRST_P_UART7 410
+#define SRST_S_UART7 411
+#define SRST_P_UART8 412
+#define SRST_S_UART8 413
+#define SRST_P_UART9 414
+#define SRST_S_UART9 415
+
+/* cru_softrst_con26 */
+#define SRST_P_GRF 416
+#define SRST_P_GRF_VCCIO12 417
+#define SRST_P_GRF_VCCIO34 418
+#define SRST_P_GRF_VCCIO567 419
+#define SRST_P_SCR 420
+#define SRST_P_WDT_NS 421
+#define SRST_T_WDT_NS 422
+#define SRST_P_DFT2APB 423
+#define SRST_A_MCU 426
+#define SRST_P_INTMUX 427
+#define SRST_P_MAILBOX 428
+
+/* cru_softrst_con27 */
+#define SRST_A_TOP_HIGH_NIU 432
+#define SRST_A_TOP_LOW_NIU 433
+#define SRST_H_TOP_NIU 434
+#define SRST_P_TOP_NIU 435
+#define SRST_P_TOP_CRU 438
+#define SRST_P_DDRPHY 439
+#define SRST_DDRPHY 440
+#define SRST_P_MIPICSIPHY 442
+#define SRST_P_MIPIDSIPHY0 443
+#define SRST_P_MIPIDSIPHY1 444
+#define SRST_P_PCIE30PHY 445
+#define SRST_PCIE30PHY 446
+#define SRST_P_PCIE30PHY_GRF 447
+
+/* cru_softrst_con28 */
+#define SRST_P_APB2ASB_LEFT 448
+#define SRST_P_APB2ASB_BOTTOM 449
+#define SRST_P_ASB2APB_LEFT 450
+#define SRST_P_ASB2APB_BOTTOM 451
+#define SRST_P_PIPEPHY0 452
+#define SRST_PIPEPHY0 453
+#define SRST_P_PIPEPHY1 454
+#define SRST_PIPEPHY1 455
+#define SRST_P_PIPEPHY2 456
+#define SRST_PIPEPHY2 457
+#define SRST_P_USB2PHY0_GRF 458
+#define SRST_P_USB2PHY1_GRF 459
+#define SRST_P_CPU_BOOST 460
+#define SRST_CPU_BOOST 461
+#define SRST_P_OTPPHY 462
+#define SRST_OTPPHY 463
+
+/* cru_softrst_con29 */
+#define SRST_USB2PHY0_POR 464
+#define SRST_USB2PHY0_USB3OTG0 465
+#define SRST_USB2PHY0_USB3OTG1 466
+#define SRST_USB2PHY1_POR 467
+#define SRST_USB2PHY1_USB2HOST0 468
+#define SRST_USB2PHY1_USB2HOST1 469
+#define SRST_P_EDPPHY_GRF 470
+#define SRST_TSADCPHY 471
+#define SRST_GMAC0_DELAYLINE 472
+#define SRST_GMAC1_DELAYLINE 473
+#define SRST_OTPC_ARB 474
+#define SRST_P_PIPEPHY0_GRF 475
+#define SRST_P_PIPEPHY1_GRF 476
+#define SRST_P_PIPEPHY2_GRF 477
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rv1126-cru.h b/include/dt-bindings/clock/rockchip,rv1126-cru.h
new file mode 100644
index 000000000000..e89a3a5a4a34
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rv1126-cru.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2019 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1126_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RV1126_H
+
+/* pmucru-clocks indices */
+
+/* pll clocks */
+#define PLL_GPLL 1
+
+/* sclk (special clocks) */
+#define CLK_OSC0_DIV32K 2
+#define CLK_RTC32K 3
+#define CLK_WIFI_DIV 4
+#define CLK_WIFI_OSC0 5
+#define CLK_WIFI 6
+#define CLK_PMU 7
+#define SCLK_UART1_DIV 8
+#define SCLK_UART1_FRACDIV 9
+#define SCLK_UART1_MUX 10
+#define SCLK_UART1 11
+#define CLK_I2C0 12
+#define CLK_I2C2 13
+#define CLK_CAPTURE_PWM0 14
+#define CLK_PWM0 15
+#define CLK_CAPTURE_PWM1 16
+#define CLK_PWM1 17
+#define CLK_SPI0 18
+#define DBCLK_GPIO0 19
+#define CLK_PMUPVTM 20
+#define CLK_CORE_PMUPVTM 21
+#define CLK_REF12M 22
+#define CLK_USBPHY_OTG_REF 23
+#define CLK_USBPHY_HOST_REF 24
+#define CLK_REF24M 25
+#define CLK_MIPIDSIPHY_REF 26
+
+/* pclk */
+#define PCLK_PDPMU 30
+#define PCLK_PMU 31
+#define PCLK_UART1 32
+#define PCLK_I2C0 33
+#define PCLK_I2C2 34
+#define PCLK_PWM0 35
+#define PCLK_PWM1 36
+#define PCLK_SPI0 37
+#define PCLK_GPIO0 38
+#define PCLK_PMUSGRF 39
+#define PCLK_PMUGRF 40
+#define PCLK_PMUCRU 41
+#define PCLK_CHIPVEROTP 42
+#define PCLK_PDPMU_NIU 43
+#define PCLK_PMUPVTM 44
+#define PCLK_SCRKEYGEN 45
+
+#define CLKPMU_NR_CLKS (PCLK_SCRKEYGEN + 1)
+
+/* cru-clocks indices */
+
+/* pll clocks */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_HPLL 4
+
+/* sclk (special clocks) */
+#define ARMCLK 5
+#define USB480M 6
+#define CLK_CORE_CPUPVTM 7
+#define CLK_CPUPVTM 8
+#define CLK_SCR1 9
+#define CLK_SCR1_CORE 10
+#define CLK_SCR1_RTC 11
+#define CLK_SCR1_JTAG 12
+#define SCLK_UART0_DIV 13
+#define SCLK_UART0_FRAC 14
+#define SCLK_UART0_MUX 15
+#define SCLK_UART0 16
+#define SCLK_UART2_DIV 17
+#define SCLK_UART2_FRAC 18
+#define SCLK_UART2_MUX 19
+#define SCLK_UART2 20
+#define SCLK_UART3_DIV 21
+#define SCLK_UART3_FRAC 22
+#define SCLK_UART3_MUX 23
+#define SCLK_UART3 24
+#define SCLK_UART4_DIV 25
+#define SCLK_UART4_FRAC 26
+#define SCLK_UART4_MUX 27
+#define SCLK_UART4 28
+#define SCLK_UART5_DIV 29
+#define SCLK_UART5_FRAC 30
+#define SCLK_UART5_MUX 31
+#define SCLK_UART5 32
+#define CLK_I2C1 33
+#define CLK_I2C3 34
+#define CLK_I2C4 35
+#define CLK_I2C5 36
+#define CLK_SPI1 37
+#define CLK_CAPTURE_PWM2 38
+#define CLK_PWM2 39
+#define DBCLK_GPIO1 40
+#define DBCLK_GPIO2 41
+#define DBCLK_GPIO3 42
+#define DBCLK_GPIO4 43
+#define CLK_SARADC 44
+#define CLK_TIMER0 45
+#define CLK_TIMER1 46
+#define CLK_TIMER2 47
+#define CLK_TIMER3 48
+#define CLK_TIMER4 49
+#define CLK_TIMER5 50
+#define CLK_CAN 51
+#define CLK_NPU_TSADC 52
+#define CLK_NPU_TSADCPHY 53
+#define CLK_CPU_TSADC 54
+#define CLK_CPU_TSADCPHY 55
+#define CLK_CRYPTO_CORE 56
+#define CLK_CRYPTO_PKA 57
+#define MCLK_I2S0_TX_DIV 58
+#define MCLK_I2S0_TX_FRACDIV 59
+#define MCLK_I2S0_TX_MUX 60
+#define MCLK_I2S0_TX 61
+#define MCLK_I2S0_RX_DIV 62
+#define MCLK_I2S0_RX_FRACDIV 63
+#define MCLK_I2S0_RX_MUX 64
+#define MCLK_I2S0_RX 65
+#define MCLK_I2S0_TX_OUT2IO 66
+#define MCLK_I2S0_RX_OUT2IO 67
+#define MCLK_I2S1_DIV 68
+#define MCLK_I2S1_FRACDIV 69
+#define MCLK_I2S1_MUX 70
+#define MCLK_I2S1 71
+#define MCLK_I2S1_OUT2IO 72
+#define MCLK_I2S2_DIV 73
+#define MCLK_I2S2_FRACDIV 74
+#define MCLK_I2S2_MUX 75
+#define MCLK_I2S2 76
+#define MCLK_I2S2_OUT2IO 77
+#define MCLK_PDM 78
+#define SCLK_ADUPWM_DIV 79
+#define SCLK_AUDPWM_FRACDIV 80
+#define SCLK_AUDPWM_MUX 81
+#define SCLK_AUDPWM 82
+#define CLK_ACDCDIG_ADC 83
+#define CLK_ACDCDIG_DAC 84
+#define CLK_ACDCDIG_I2C 85
+#define CLK_VENC_CORE 86
+#define CLK_VDEC_CORE 87
+#define CLK_VDEC_CA 88
+#define CLK_VDEC_HEVC_CA 89
+#define CLK_RGA_CORE 90
+#define CLK_IEP_CORE 91
+#define CLK_ISP_DIV 92
+#define CLK_ISP_NP5 93
+#define CLK_ISP_NUX 94
+#define CLK_ISP 95
+#define CLK_CIF_OUT_DIV 96
+#define CLK_CIF_OUT_FRACDIV 97
+#define CLK_CIF_OUT_MUX 98
+#define CLK_CIF_OUT 99
+#define CLK_MIPICSI_OUT_DIV 100
+#define CLK_MIPICSI_OUT_FRACDIV 101
+#define CLK_MIPICSI_OUT_MUX 102
+#define CLK_MIPICSI_OUT 103
+#define CLK_ISPP_DIV 104
+#define CLK_ISPP_NP5 105
+#define CLK_ISPP_NUX 106
+#define CLK_ISPP 107
+#define CLK_SDMMC 108
+#define SCLK_SDMMC_DRV 109
+#define SCLK_SDMMC_SAMPLE 110
+#define CLK_SDIO 111
+#define SCLK_SDIO_DRV 112
+#define SCLK_SDIO_SAMPLE 113
+#define CLK_EMMC 114
+#define SCLK_EMMC_DRV 115
+#define SCLK_EMMC_SAMPLE 116
+#define CLK_NANDC 117
+#define SCLK_SFC 118
+#define CLK_USBHOST_UTMI_OHCI 119
+#define CLK_USBOTG_REF 120
+#define CLK_GMAC_DIV 121
+#define CLK_GMAC_RGMII_M0 122
+#define CLK_GMAC_SRC_M0 123
+#define CLK_GMAC_RGMII_M1 124
+#define CLK_GMAC_SRC_M1 125
+#define CLK_GMAC_SRC 126
+#define CLK_GMAC_REF 127
+#define CLK_GMAC_TX_SRC 128
+#define CLK_GMAC_TX_DIV5 129
+#define CLK_GMAC_TX_DIV50 130
+#define RGMII_MODE_CLK 131
+#define CLK_GMAC_RX_SRC 132
+#define CLK_GMAC_RX_DIV2 133
+#define CLK_GMAC_RX_DIV20 134
+#define RMII_MODE_CLK 135
+#define CLK_GMAC_TX_RX 136
+#define CLK_GMAC_PTPREF 137
+#define CLK_GMAC_ETHERNET_OUT 138
+#define CLK_DDRPHY 139
+#define CLK_DDR_MON 140
+#define TMCLK_DDR_MON 141
+#define CLK_NPU_DIV 142
+#define CLK_NPU_NP5 143
+#define CLK_CORE_NPU 144
+#define CLK_CORE_NPUPVTM 145
+#define CLK_NPUPVTM 146
+#define SCLK_DDRCLK 147
+#define CLK_OTP 148
+
+/* dclk */
+#define DCLK_DECOM 150
+#define DCLK_VOP_DIV 151
+#define DCLK_VOP_FRACDIV 152
+#define DCLK_VOP_MUX 153
+#define DCLK_VOP 154
+#define DCLK_CIF 155
+#define DCLK_CIFLITE 156
+
+/* aclk */
+#define ACLK_PDBUS 160
+#define ACLK_DMAC 161
+#define ACLK_DCF 162
+#define ACLK_SPINLOCK 163
+#define ACLK_DECOM 164
+#define ACLK_PDCRYPTO 165
+#define ACLK_CRYPTO 166
+#define ACLK_PDVEPU 167
+#define ACLK_VENC 168
+#define ACLK_PDVDEC 169
+#define ACLK_PDJPEG 170
+#define ACLK_VDEC 171
+#define ACLK_JPEG 172
+#define ACLK_PDVO 173
+#define ACLK_RGA 174
+#define ACLK_VOP 175
+#define ACLK_IEP 176
+#define ACLK_PDVI_DIV 177
+#define ACLK_PDVI_NP5 178
+#define ACLK_PDVI 179
+#define ACLK_ISP 180
+#define ACLK_CIF 181
+#define ACLK_CIFLITE 182
+#define ACLK_PDISPP_DIV 183
+#define ACLK_PDISPP_NP5 184
+#define ACLK_PDISPP 185
+#define ACLK_ISPP 186
+#define ACLK_PDPHP 187
+#define ACLK_PDUSB 188
+#define ACLK_USBOTG 189
+#define ACLK_PDGMAC 190
+#define ACLK_GMAC 191
+#define ACLK_PDNPU_DIV 192
+#define ACLK_PDNPU_NP5 193
+#define ACLK_PDNPU 194
+#define ACLK_NPU 195
+
+/* hclk */
+#define HCLK_PDCORE_NIU 200
+#define HCLK_PDUSB 201
+#define HCLK_PDCRYPTO 202
+#define HCLK_CRYPTO 203
+#define HCLK_PDAUDIO 204
+#define HCLK_I2S0 205
+#define HCLK_I2S1 206
+#define HCLK_I2S2 207
+#define HCLK_PDM 208
+#define HCLK_AUDPWM 209
+#define HCLK_PDVEPU 210
+#define HCLK_VENC 211
+#define HCLK_PDVDEC 212
+#define HCLK_PDJPEG 213
+#define HCLK_VDEC 214
+#define HCLK_JPEG 215
+#define HCLK_PDVO 216
+#define HCLK_RGA 217
+#define HCLK_VOP 218
+#define HCLK_IEP 219
+#define HCLK_PDVI 220
+#define HCLK_ISP 221
+#define HCLK_CIF 222
+#define HCLK_CIFLITE 223
+#define HCLK_PDISPP 224
+#define HCLK_ISPP 225
+#define HCLK_PDPHP 226
+#define HCLK_PDSDMMC 227
+#define HCLK_SDMMC 228
+#define HCLK_PDSDIO 229
+#define HCLK_SDIO 230
+#define HCLK_PDNVM 231
+#define HCLK_EMMC 232
+#define HCLK_NANDC 233
+#define HCLK_SFC 234
+#define HCLK_SFCXIP 235
+#define HCLK_PDBUS 236
+#define HCLK_USBHOST 237
+#define HCLK_USBHOST_ARB 238
+#define HCLK_PDNPU 239
+#define HCLK_NPU 240
+
+/* pclk */
+#define PCLK_CPUPVTM 245
+#define PCLK_PDBUS 246
+#define PCLK_DCF 247
+#define PCLK_WDT 248
+#define PCLK_MAILBOX 249
+#define PCLK_UART0 250
+#define PCLK_UART2 251
+#define PCLK_UART3 252
+#define PCLK_UART4 253
+#define PCLK_UART5 254
+#define PCLK_I2C1 255
+#define PCLK_I2C3 256
+#define PCLK_I2C4 257
+#define PCLK_I2C5 258
+#define PCLK_SPI1 259
+#define PCLK_PWM2 261
+#define PCLK_GPIO1 262
+#define PCLK_GPIO2 263
+#define PCLK_GPIO3 264
+#define PCLK_GPIO4 265
+#define PCLK_SARADC 266
+#define PCLK_TIMER 267
+#define PCLK_DECOM 268
+#define PCLK_CAN 269
+#define PCLK_NPU_TSADC 270
+#define PCLK_CPU_TSADC 271
+#define PCLK_ACDCDIG 272
+#define PCLK_PDVO 273
+#define PCLK_DSIHOST 274
+#define PCLK_PDVI 275
+#define PCLK_CSIHOST 276
+#define PCLK_PDGMAC 277
+#define PCLK_GMAC 278
+#define PCLK_PDDDR 279
+#define PCLK_DDR_MON 280
+#define PCLK_PDNPU 281
+#define PCLK_NPUPVTM 282
+#define PCLK_PDTOP 283
+#define PCLK_TOPCRU 284
+#define PCLK_TOPGRF 285
+#define PCLK_CPUEMADET 286
+#define PCLK_DDRPHY 287
+#define PCLK_DSIPHY 289
+#define PCLK_CSIPHY0 290
+#define PCLK_CSIPHY1 291
+#define PCLK_USBPHY_HOST 292
+#define PCLK_USBPHY_OTG 293
+#define PCLK_OTP 294
+
+#define CLK_NR_CLKS (PCLK_OTP + 1)
+
+/* pmu soft-reset indices */
+
+/* pmu_cru_softrst_con0 */
+#define SRST_PDPMU_NIU_P 0
+#define SRST_PMU_SGRF_P 1
+#define SRST_PMU_SGRF_REMAP_P 2
+#define SRST_I2C0_P 3
+#define SRST_I2C0 4
+#define SRST_I2C2_P 7
+#define SRST_I2C2 8
+#define SRST_UART1_P 9
+#define SRST_UART1 10
+#define SRST_PWM0_P 11
+#define SRST_PWM0 12
+#define SRST_PWM1_P 13
+#define SRST_PWM1 14
+#define SRST_DDR_FAIL_SAFE 15
+
+/* pmu_cru_softrst_con1 */
+#define SRST_GPIO0_P 17
+#define SRST_GPIO0_DB 18
+#define SRST_SPI0_P 19
+#define SRST_SPI0 20
+#define SRST_PMUGRF_P 21
+#define SRST_CHIPVEROTP_P 22
+#define SRST_PMUPVTM 24
+#define SRST_PMUPVTM_P 25
+#define SRST_PMUCRU_P 30
+
+/* soft-reset indices */
+
+/* cru_softrst_con0 */
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_CORE0_DBG 8
+#define SRST_CORE1_DBG 9
+#define SRST_CORE2_DBG 10
+#define SRST_CORE3_DBG 11
+#define SRST_NL2 12
+#define SRST_CORE_NIU_A 13
+#define SRST_DBG_DAPLITE_P 14
+#define SRST_DAPLITE_P 15
+
+/* cru_softrst_con1 */
+#define SRST_PDBUS_NIU1_A 16
+#define SRST_PDBUS_NIU1_H 17
+#define SRST_PDBUS_NIU1_P 18
+#define SRST_PDBUS_NIU2_A 19
+#define SRST_PDBUS_NIU2_H 20
+#define SRST_PDBUS_NIU3_A 21
+#define SRST_PDBUS_NIU3_H 22
+#define SRST_PDBUS_HOLD_NIU1_A 23
+#define SRST_DBG_NIU_P 24
+#define SRST_PDCORE_NIIU_H 25
+#define SRST_MUC_NIU 26
+#define SRST_DCF_A 29
+#define SRST_DCF_P 30
+#define SRST_SYSTEM_SRAM_A 31
+
+/* cru_softrst_con2 */
+#define SRST_I2C1_P 32
+#define SRST_I2C1 33
+#define SRST_I2C3_P 34
+#define SRST_I2C3 35
+#define SRST_I2C4_P 36
+#define SRST_I2C4 37
+#define SRST_I2C5_P 38
+#define SRST_I2C5 39
+#define SRST_SPI1_P 40
+#define SRST_SPI1 41
+#define SRST_MCU_CORE 42
+#define SRST_PWM2_P 44
+#define SRST_PWM2 45
+#define SRST_SPINLOCK_A 46
+
+/* cru_softrst_con3 */
+#define SRST_UART0_P 48
+#define SRST_UART0 49
+#define SRST_UART2_P 50
+#define SRST_UART2 51
+#define SRST_UART3_P 52
+#define SRST_UART3 53
+#define SRST_UART4_P 54
+#define SRST_UART4 55
+#define SRST_UART5_P 56
+#define SRST_UART5 57
+#define SRST_WDT_P 58
+#define SRST_SARADC_P 59
+#define SRST_GRF_P 61
+#define SRST_TIMER_P 62
+#define SRST_MAILBOX_P 63
+
+/* cru_softrst_con4 */
+#define SRST_TIMER0 64
+#define SRST_TIMER1 65
+#define SRST_TIMER2 66
+#define SRST_TIMER3 67
+#define SRST_TIMER4 68
+#define SRST_TIMER5 69
+#define SRST_INTMUX_P 70
+#define SRST_GPIO1_P 72
+#define SRST_GPIO1_DB 73
+#define SRST_GPIO2_P 74
+#define SRST_GPIO2_DB 75
+#define SRST_GPIO3_P 76
+#define SRST_GPIO3_DB 77
+#define SRST_GPIO4_P 78
+#define SRST_GPIO4_DB 79
+
+/* cru_softrst_con5 */
+#define SRST_CAN_P 80
+#define SRST_CAN 81
+#define SRST_DECOM_A 85
+#define SRST_DECOM_P 86
+#define SRST_DECOM_D 87
+#define SRST_PDCRYPTO_NIU_A 88
+#define SRST_PDCRYPTO_NIU_H 89
+#define SRST_CRYPTO_A 90
+#define SRST_CRYPTO_H 91
+#define SRST_CRYPTO_CORE 92
+#define SRST_CRYPTO_PKA 93
+#define SRST_SGRF_P 95
+
+/* cru_softrst_con6 */
+#define SRST_PDAUDIO_NIU_H 96
+#define SRST_PDAUDIO_NIU_P 97
+#define SRST_I2S0_H 98
+#define SRST_I2S0_TX_M 99
+#define SRST_I2S0_RX_M 100
+#define SRST_I2S1_H 101
+#define SRST_I2S1_M 102
+#define SRST_I2S2_H 103
+#define SRST_I2S2_M 104
+#define SRST_PDM_H 105
+#define SRST_PDM_M 106
+#define SRST_AUDPWM_H 107
+#define SRST_AUDPWM 108
+#define SRST_ACDCDIG_P 109
+#define SRST_ACDCDIG 110
+
+/* cru_softrst_con7 */
+#define SRST_PDVEPU_NIU_A 112
+#define SRST_PDVEPU_NIU_H 113
+#define SRST_VENC_A 114
+#define SRST_VENC_H 115
+#define SRST_VENC_CORE 116
+#define SRST_PDVDEC_NIU_A 117
+#define SRST_PDVDEC_NIU_H 118
+#define SRST_VDEC_A 119
+#define SRST_VDEC_H 120
+#define SRST_VDEC_CORE 121
+#define SRST_VDEC_CA 122
+#define SRST_VDEC_HEVC_CA 123
+#define SRST_PDJPEG_NIU_A 124
+#define SRST_PDJPEG_NIU_H 125
+#define SRST_JPEG_A 126
+#define SRST_JPEG_H 127
+
+/* cru_softrst_con8 */
+#define SRST_PDVO_NIU_A 128
+#define SRST_PDVO_NIU_H 129
+#define SRST_PDVO_NIU_P 130
+#define SRST_RGA_A 131
+#define SRST_RGA_H 132
+#define SRST_RGA_CORE 133
+#define SRST_VOP_A 134
+#define SRST_VOP_H 135
+#define SRST_VOP_D 136
+#define SRST_TXBYTEHS_DSIHOST 137
+#define SRST_DSIHOST_P 138
+#define SRST_IEP_A 139
+#define SRST_IEP_H 140
+#define SRST_IEP_CORE 141
+#define SRST_ISP_RX_P 142
+
+/* cru_softrst_con9 */
+#define SRST_PDVI_NIU_A 144
+#define SRST_PDVI_NIU_H 145
+#define SRST_PDVI_NIU_P 146
+#define SRST_ISP 147
+#define SRST_CIF_A 148
+#define SRST_CIF_H 149
+#define SRST_CIF_D 150
+#define SRST_CIF_P 151
+#define SRST_CIF_I 152
+#define SRST_CIF_RX_P 153
+#define SRST_PDISPP_NIU_A 154
+#define SRST_PDISPP_NIU_H 155
+#define SRST_ISPP_A 156
+#define SRST_ISPP_H 157
+#define SRST_ISPP 158
+#define SRST_CSIHOST_P 159
+
+/* cru_softrst_con10 */
+#define SRST_PDPHPMID_NIU_A 160
+#define SRST_PDPHPMID_NIU_H 161
+#define SRST_PDNVM_NIU_H 163
+#define SRST_SDMMC_H 164
+#define SRST_SDIO_H 165
+#define SRST_EMMC_H 166
+#define SRST_SFC_H 167
+#define SRST_SFCXIP_H 168
+#define SRST_SFC 169
+#define SRST_NANDC_H 170
+#define SRST_NANDC 171
+#define SRST_PDSDMMC_H 173
+#define SRST_PDSDIO_H 174
+
+/* cru_softrst_con11 */
+#define SRST_PDUSB_NIU_A 176
+#define SRST_PDUSB_NIU_H 177
+#define SRST_USBHOST_H 178
+#define SRST_USBHOST_ARB_H 179
+#define SRST_USBHOST_UTMI 180
+#define SRST_USBOTG_A 181
+#define SRST_USBPHY_OTG_P 182
+#define SRST_USBPHY_HOST_P 183
+#define SRST_USBPHYPOR_OTG 184
+#define SRST_USBPHYPOR_HOST 185
+#define SRST_PDGMAC_NIU_A 188
+#define SRST_PDGMAC_NIU_P 189
+#define SRST_GMAC_A 190
+
+/* cru_softrst_con12 */
+#define SRST_DDR_DFICTL_P 193
+#define SRST_DDR_MON_P 194
+#define SRST_DDR_STANDBY_P 195
+#define SRST_DDR_GRF_P 196
+#define SRST_DDR_MSCH_P 197
+#define SRST_DDR_SPLIT_A 198
+#define SRST_DDR_MSCH 199
+#define SRST_DDR_DFICTL 202
+#define SRST_DDR_STANDBY 203
+#define SRST_NPUMCU_NIU 205
+#define SRST_DDRPHY_P 206
+#define SRST_DDRPHY 207
+
+/* cru_softrst_con13 */
+#define SRST_PDNPU_NIU_A 208
+#define SRST_PDNPU_NIU_H 209
+#define SRST_PDNPU_NIU_P 210
+#define SRST_NPU_A 211
+#define SRST_NPU_H 212
+#define SRST_NPU 213
+#define SRST_NPUPVTM_P 214
+#define SRST_NPUPVTM 215
+#define SRST_NPU_TSADC_P 216
+#define SRST_NPU_TSADC 217
+#define SRST_NPU_TSADCPHY 218
+#define SRST_CIFLITE_A 220
+#define SRST_CIFLITE_H 221
+#define SRST_CIFLITE_D 222
+#define SRST_CIFLITE_RX_P 223
+
+/* cru_softrst_con14 */
+#define SRST_TOPNIU_P 224
+#define SRST_TOPCRU_P 225
+#define SRST_TOPGRF_P 226
+#define SRST_CPUEMADET_P 227
+#define SRST_CSIPHY0_P 228
+#define SRST_CSIPHY1_P 229
+#define SRST_DSIPHY_P 230
+#define SRST_CPU_TSADC_P 232
+#define SRST_CPU_TSADC 233
+#define SRST_CPU_TSADCPHY 234
+#define SRST_CPUPVTM_P 235
+#define SRST_CPUPVTM 236
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
new file mode 100644
index 000000000000..42133af6d6b9
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
+ * Author: Chanho Park <chanho61.park@samsung.com>
+ *
+ * Device Tree binding constants for Exynos Auto V9 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H
+#define _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H
+
+/* CMU_TOP */
+#define FOUT_SHARED0_PLL 1
+#define FOUT_SHARED1_PLL 2
+#define FOUT_SHARED2_PLL 3
+#define FOUT_SHARED3_PLL 4
+#define FOUT_SHARED4_PLL 5
+
+/* MUX in CMU_TOP */
+#define MOUT_SHARED0_PLL 6
+#define MOUT_SHARED1_PLL 7
+#define MOUT_SHARED2_PLL 8
+#define MOUT_SHARED3_PLL 9
+#define MOUT_SHARED4_PLL 10
+#define MOUT_CLKCMU_CMU_BOOST 11
+#define MOUT_CLKCMU_CMU_CMUREF 12
+#define MOUT_CLKCMU_ACC_BUS 13
+#define MOUT_CLKCMU_APM_BUS 14
+#define MOUT_CLKCMU_AUD_CPU 15
+#define MOUT_CLKCMU_AUD_BUS 16
+#define MOUT_CLKCMU_BUSC_BUS 17
+#define MOUT_CLKCMU_BUSMC_BUS 19
+#define MOUT_CLKCMU_CORE_BUS 20
+#define MOUT_CLKCMU_CPUCL0_SWITCH 21
+#define MOUT_CLKCMU_CPUCL0_CLUSTER 22
+#define MOUT_CLKCMU_CPUCL1_SWITCH 24
+#define MOUT_CLKCMU_CPUCL1_CLUSTER 25
+#define MOUT_CLKCMU_DPTX_BUS 26
+#define MOUT_CLKCMU_DPTX_DPGTC 27
+#define MOUT_CLKCMU_DPUM_BUS 28
+#define MOUT_CLKCMU_DPUS0_BUS 29
+#define MOUT_CLKCMU_DPUS1_BUS 30
+#define MOUT_CLKCMU_FSYS0_BUS 31
+#define MOUT_CLKCMU_FSYS0_PCIE 32
+#define MOUT_CLKCMU_FSYS1_BUS 33
+#define MOUT_CLKCMU_FSYS1_USBDRD 34
+#define MOUT_CLKCMU_FSYS1_MMC_CARD 35
+#define MOUT_CLKCMU_FSYS2_BUS 36
+#define MOUT_CLKCMU_FSYS2_UFS_EMBD 37
+#define MOUT_CLKCMU_FSYS2_ETHERNET 38
+#define MOUT_CLKCMU_G2D_G2D 39
+#define MOUT_CLKCMU_G2D_MSCL 40
+#define MOUT_CLKCMU_G3D00_SWITCH 41
+#define MOUT_CLKCMU_G3D01_SWITCH 42
+#define MOUT_CLKCMU_G3D1_SWITCH 43
+#define MOUT_CLKCMU_ISPB_BUS 44
+#define MOUT_CLKCMU_MFC_MFC 45
+#define MOUT_CLKCMU_MFC_WFD 46
+#define MOUT_CLKCMU_MIF_SWITCH 47
+#define MOUT_CLKCMU_MIF_BUSP 48
+#define MOUT_CLKCMU_NPU_BUS 49
+#define MOUT_CLKCMU_PERIC0_BUS 50
+#define MOUT_CLKCMU_PERIC0_IP 51
+#define MOUT_CLKCMU_PERIC1_BUS 52
+#define MOUT_CLKCMU_PERIC1_IP 53
+#define MOUT_CLKCMU_PERIS_BUS 54
+
+/* DIV in CMU_TOP */
+#define DOUT_SHARED0_DIV3 101
+#define DOUT_SHARED0_DIV2 102
+#define DOUT_SHARED1_DIV3 103
+#define DOUT_SHARED1_DIV2 104
+#define DOUT_SHARED1_DIV4 105
+#define DOUT_SHARED2_DIV3 106
+#define DOUT_SHARED2_DIV2 107
+#define DOUT_SHARED2_DIV4 108
+#define DOUT_SHARED4_DIV2 109
+#define DOUT_SHARED4_DIV4 110
+#define DOUT_CLKCMU_CMU_BOOST 111
+#define DOUT_CLKCMU_ACC_BUS 112
+#define DOUT_CLKCMU_APM_BUS 113
+#define DOUT_CLKCMU_AUD_CPU 114
+#define DOUT_CLKCMU_AUD_BUS 115
+#define DOUT_CLKCMU_BUSC_BUS 116
+#define DOUT_CLKCMU_BUSMC_BUS 118
+#define DOUT_CLKCMU_CORE_BUS 119
+#define DOUT_CLKCMU_CPUCL0_SWITCH 120
+#define DOUT_CLKCMU_CPUCL0_CLUSTER 121
+#define DOUT_CLKCMU_CPUCL1_SWITCH 123
+#define DOUT_CLKCMU_CPUCL1_CLUSTER 124
+#define DOUT_CLKCMU_DPTX_BUS 125
+#define DOUT_CLKCMU_DPTX_DPGTC 126
+#define DOUT_CLKCMU_DPUM_BUS 127
+#define DOUT_CLKCMU_DPUS0_BUS 128
+#define DOUT_CLKCMU_DPUS1_BUS 129
+#define DOUT_CLKCMU_FSYS0_BUS 130
+#define DOUT_CLKCMU_FSYS0_PCIE 131
+#define DOUT_CLKCMU_FSYS1_BUS 132
+#define DOUT_CLKCMU_FSYS1_USBDRD 133
+#define DOUT_CLKCMU_FSYS2_BUS 134
+#define DOUT_CLKCMU_FSYS2_UFS_EMBD 135
+#define DOUT_CLKCMU_FSYS2_ETHERNET 136
+#define DOUT_CLKCMU_G2D_G2D 137
+#define DOUT_CLKCMU_G2D_MSCL 138
+#define DOUT_CLKCMU_G3D00_SWITCH 139
+#define DOUT_CLKCMU_G3D01_SWITCH 140
+#define DOUT_CLKCMU_G3D1_SWITCH 141
+#define DOUT_CLKCMU_ISPB_BUS 142
+#define DOUT_CLKCMU_MFC_MFC 143
+#define DOUT_CLKCMU_MFC_WFD 144
+#define DOUT_CLKCMU_MIF_SWITCH 145
+#define DOUT_CLKCMU_MIF_BUSP 146
+#define DOUT_CLKCMU_NPU_BUS 147
+#define DOUT_CLKCMU_PERIC0_BUS 148
+#define DOUT_CLKCMU_PERIC0_IP 149
+#define DOUT_CLKCMU_PERIC1_BUS 150
+#define DOUT_CLKCMU_PERIC1_IP 151
+#define DOUT_CLKCMU_PERIS_BUS 152
+
+/* GAT in CMU_TOP */
+#define GOUT_CLKCMU_CMU_BOOST 201
+#define GOUT_CLKCMU_CPUCL0_BOOST 202
+#define GOUT_CLKCMU_CPUCL1_BOOST 203
+#define GOUT_CLKCMU_CORE_BOOST 204
+#define GOUT_CLKCMU_BUSC_BOOST 205
+#define GOUT_CLKCMU_BUSMC_BOOST 206
+#define GOUT_CLKCMU_MIF_BOOST 207
+#define GOUT_CLKCMU_ACC_BUS 208
+#define GOUT_CLKCMU_APM_BUS 209
+#define GOUT_CLKCMU_AUD_CPU 210
+#define GOUT_CLKCMU_AUD_BUS 211
+#define GOUT_CLKCMU_BUSC_BUS 212
+#define GOUT_CLKCMU_BUSMC_BUS 214
+#define GOUT_CLKCMU_CORE_BUS 215
+#define GOUT_CLKCMU_CPUCL0_SWITCH 216
+#define GOUT_CLKCMU_CPUCL0_CLUSTER 217
+#define GOUT_CLKCMU_CPUCL1_SWITCH 219
+#define GOUT_CLKCMU_CPUCL1_CLUSTER 220
+#define GOUT_CLKCMU_DPTX_BUS 221
+#define GOUT_CLKCMU_DPTX_DPGTC 222
+#define GOUT_CLKCMU_DPUM_BUS 223
+#define GOUT_CLKCMU_DPUS0_BUS 224
+#define GOUT_CLKCMU_DPUS1_BUS 225
+#define GOUT_CLKCMU_FSYS0_BUS 226
+#define GOUT_CLKCMU_FSYS0_PCIE 227
+#define GOUT_CLKCMU_FSYS1_BUS 228
+#define GOUT_CLKCMU_FSYS1_USBDRD 229
+#define GOUT_CLKCMU_FSYS1_MMC_CARD 230
+#define GOUT_CLKCMU_FSYS2_BUS 231
+#define GOUT_CLKCMU_FSYS2_UFS_EMBD 232
+#define GOUT_CLKCMU_FSYS2_ETHERNET 233
+#define GOUT_CLKCMU_G2D_G2D 234
+#define GOUT_CLKCMU_G2D_MSCL 235
+#define GOUT_CLKCMU_G3D00_SWITCH 236
+#define GOUT_CLKCMU_G3D01_SWITCH 237
+#define GOUT_CLKCMU_G3D1_SWITCH 238
+#define GOUT_CLKCMU_ISPB_BUS 239
+#define GOUT_CLKCMU_MFC_MFC 240
+#define GOUT_CLKCMU_MFC_WFD 241
+#define GOUT_CLKCMU_MIF_SWITCH 242
+#define GOUT_CLKCMU_MIF_BUSP 243
+#define GOUT_CLKCMU_NPU_BUS 244
+#define GOUT_CLKCMU_PERIC0_BUS 245
+#define GOUT_CLKCMU_PERIC0_IP 246
+#define GOUT_CLKCMU_PERIC1_BUS 247
+#define GOUT_CLKCMU_PERIC1_IP 248
+#define GOUT_CLKCMU_PERIS_BUS 249
+
+#define TOP_NR_CLK 250
+
+/* CMU_BUSMC */
+#define CLK_MOUT_BUSMC_BUS_USER 1
+#define CLK_DOUT_BUSMC_BUSP 2
+#define CLK_GOUT_BUSMC_PDMA0_PCLK 3
+#define CLK_GOUT_BUSMC_SPDMA_PCLK 4
+
+#define BUSMC_NR_CLK 5
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_DOUT_CORE_BUSP 2
+#define CLK_GOUT_CORE_CCI_CLK 3
+#define CLK_GOUT_CORE_CCI_PCLK 4
+#define CLK_GOUT_CORE_CMU_CORE_PCLK 5
+
+#define CORE_NR_CLK 6
+
+/* CMU_FSYS0 */
+#define CLK_MOUT_FSYS0_BUS_USER 1
+#define CLK_MOUT_FSYS0_PCIE_USER 2
+#define CLK_GOUT_FSYS0_BUS_PCLK 3
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_REFCLK 4
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_REFCLK 5
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_DBI_ACLK 6
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_MSTR_ACLK 7
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_SLV_ACLK 8
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_DBI_ACLK 9
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_MSTR_ACLK 10
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_SLV_ACLK 11
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_PIPE_CLK 12
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_2L0_CLK 13
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_2L0_CLK 14
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_REFCLK 15
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_REFCLK 16
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_DBI_ACLK 17
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_MSTR_ACLK 18
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_SLV_ACLK 19
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_DBI_ACLK 20
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_MSTR_ACLK 21
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_SLV_ACLK 22
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_PIPE_CLK 23
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_2L1_CLK 24
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_2L1_CLK 25
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_REFCLK 26
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_REFCLK 27
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_DBI_ACLK 28
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_MSTR_ACLK 29
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_SLV_ACLK 30
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_DBI_ACLK 31
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_MSTR_ACLK 32
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_SLV_ACLK 33
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_PIPE_CLK 34
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_4L_CLK 35
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK 36
+
+#define FSYS0_NR_CLK 37
+
+/* CMU_FSYS1 */
+#define FOUT_MMC_PLL 1
+
+#define CLK_MOUT_FSYS1_BUS_USER 2
+#define CLK_MOUT_FSYS1_MMC_PLL 3
+#define CLK_MOUT_FSYS1_MMC_CARD_USER 4
+#define CLK_MOUT_FSYS1_USBDRD_USER 5
+#define CLK_MOUT_FSYS1_MMC_CARD 6
+
+#define CLK_DOUT_FSYS1_MMC_CARD 7
+
+#define CLK_GOUT_FSYS1_PCLK 8
+#define CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN 9
+#define CLK_GOUT_FSYS1_MMC_CARD_ACLK 10
+#define CLK_GOUT_FSYS1_USB20DRD_0_REFCLK 11
+#define CLK_GOUT_FSYS1_USB20DRD_1_REFCLK 12
+#define CLK_GOUT_FSYS1_USB30DRD_0_REFCLK 13
+#define CLK_GOUT_FSYS1_USB30DRD_1_REFCLK 14
+#define CLK_GOUT_FSYS1_USB20_0_ACLK 15
+#define CLK_GOUT_FSYS1_USB20_1_ACLK 16
+#define CLK_GOUT_FSYS1_USB30_0_ACLK 17
+#define CLK_GOUT_FSYS1_USB30_1_ACLK 18
+
+#define FSYS1_NR_CLK 19
+
+/* CMU_FSYS2 */
+#define CLK_MOUT_FSYS2_BUS_USER 1
+#define CLK_MOUT_FSYS2_UFS_EMBD_USER 2
+#define CLK_MOUT_FSYS2_ETHERNET_USER 3
+#define CLK_GOUT_FSYS2_UFS_EMBD0_ACLK 4
+#define CLK_GOUT_FSYS2_UFS_EMBD0_UNIPRO 5
+#define CLK_GOUT_FSYS2_UFS_EMBD1_ACLK 6
+#define CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO 7
+
+#define FSYS2_NR_CLK 8
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_IP_USER 2
+#define CLK_MOUT_PERIC0_USI00_USI 3
+#define CLK_MOUT_PERIC0_USI01_USI 4
+#define CLK_MOUT_PERIC0_USI02_USI 5
+#define CLK_MOUT_PERIC0_USI03_USI 6
+#define CLK_MOUT_PERIC0_USI04_USI 7
+#define CLK_MOUT_PERIC0_USI05_USI 8
+#define CLK_MOUT_PERIC0_USI_I2C 9
+
+#define CLK_DOUT_PERIC0_USI00_USI 10
+#define CLK_DOUT_PERIC0_USI01_USI 11
+#define CLK_DOUT_PERIC0_USI02_USI 12
+#define CLK_DOUT_PERIC0_USI03_USI 13
+#define CLK_DOUT_PERIC0_USI04_USI 14
+#define CLK_DOUT_PERIC0_USI05_USI 15
+#define CLK_DOUT_PERIC0_USI_I2C 16
+
+#define CLK_GOUT_PERIC0_IPCLK_0 20
+#define CLK_GOUT_PERIC0_IPCLK_1 21
+#define CLK_GOUT_PERIC0_IPCLK_2 22
+#define CLK_GOUT_PERIC0_IPCLK_3 23
+#define CLK_GOUT_PERIC0_IPCLK_4 24
+#define CLK_GOUT_PERIC0_IPCLK_5 25
+#define CLK_GOUT_PERIC0_IPCLK_6 26
+#define CLK_GOUT_PERIC0_IPCLK_7 27
+#define CLK_GOUT_PERIC0_IPCLK_8 28
+#define CLK_GOUT_PERIC0_IPCLK_9 29
+#define CLK_GOUT_PERIC0_IPCLK_10 30
+#define CLK_GOUT_PERIC0_IPCLK_11 31
+#define CLK_GOUT_PERIC0_PCLK_0 32
+#define CLK_GOUT_PERIC0_PCLK_1 33
+#define CLK_GOUT_PERIC0_PCLK_2 34
+#define CLK_GOUT_PERIC0_PCLK_3 35
+#define CLK_GOUT_PERIC0_PCLK_4 36
+#define CLK_GOUT_PERIC0_PCLK_5 37
+#define CLK_GOUT_PERIC0_PCLK_6 38
+#define CLK_GOUT_PERIC0_PCLK_7 39
+#define CLK_GOUT_PERIC0_PCLK_8 40
+#define CLK_GOUT_PERIC0_PCLK_9 41
+#define CLK_GOUT_PERIC0_PCLK_10 42
+#define CLK_GOUT_PERIC0_PCLK_11 43
+
+#define PERIC0_NR_CLK 44
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_IP_USER 2
+#define CLK_MOUT_PERIC1_USI06_USI 3
+#define CLK_MOUT_PERIC1_USI07_USI 4
+#define CLK_MOUT_PERIC1_USI08_USI 5
+#define CLK_MOUT_PERIC1_USI09_USI 6
+#define CLK_MOUT_PERIC1_USI10_USI 7
+#define CLK_MOUT_PERIC1_USI11_USI 8
+#define CLK_MOUT_PERIC1_USI_I2C 9
+
+#define CLK_DOUT_PERIC1_USI06_USI 10
+#define CLK_DOUT_PERIC1_USI07_USI 11
+#define CLK_DOUT_PERIC1_USI08_USI 12
+#define CLK_DOUT_PERIC1_USI09_USI 13
+#define CLK_DOUT_PERIC1_USI10_USI 14
+#define CLK_DOUT_PERIC1_USI11_USI 15
+#define CLK_DOUT_PERIC1_USI_I2C 16
+
+#define CLK_GOUT_PERIC1_IPCLK_0 20
+#define CLK_GOUT_PERIC1_IPCLK_1 21
+#define CLK_GOUT_PERIC1_IPCLK_2 22
+#define CLK_GOUT_PERIC1_IPCLK_3 23
+#define CLK_GOUT_PERIC1_IPCLK_4 24
+#define CLK_GOUT_PERIC1_IPCLK_5 25
+#define CLK_GOUT_PERIC1_IPCLK_6 26
+#define CLK_GOUT_PERIC1_IPCLK_7 27
+#define CLK_GOUT_PERIC1_IPCLK_8 28
+#define CLK_GOUT_PERIC1_IPCLK_9 29
+#define CLK_GOUT_PERIC1_IPCLK_10 30
+#define CLK_GOUT_PERIC1_IPCLK_11 31
+#define CLK_GOUT_PERIC1_PCLK_0 32
+#define CLK_GOUT_PERIC1_PCLK_1 33
+#define CLK_GOUT_PERIC1_PCLK_2 34
+#define CLK_GOUT_PERIC1_PCLK_3 35
+#define CLK_GOUT_PERIC1_PCLK_4 36
+#define CLK_GOUT_PERIC1_PCLK_5 37
+#define CLK_GOUT_PERIC1_PCLK_6 38
+#define CLK_GOUT_PERIC1_PCLK_7 39
+#define CLK_GOUT_PERIC1_PCLK_8 40
+#define CLK_GOUT_PERIC1_PCLK_9 41
+#define CLK_GOUT_PERIC1_PCLK_10 42
+#define CLK_GOUT_PERIC1_PCLK_11 43
+
+#define PERIC1_NR_CLK 44
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_BUS_USER 1
+#define CLK_GOUT_SYSREG_PERIS_PCLK 2
+#define CLK_GOUT_WDT_CLUSTER0 3
+#define CLK_GOUT_WDT_CLUSTER1 4
+
+#define PERIS_NR_CLK 5
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H */
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
index 3b21d0522c91..5af372e8385f 100644
--- a/include/dt-bindings/clock/sifive-fu540-prci.h
+++ b/include/dt-bindings/clock/sifive-fu540-prci.h
@@ -10,9 +10,9 @@
/* Clock indexes for use by Device Tree data and the PRCI driver */
-#define PRCI_CLK_COREPLL 0
-#define PRCI_CLK_DDRPLL 1
-#define PRCI_CLK_GEMGXLPLL 2
-#define PRCI_CLK_TLCLK 3
+#define FU540_PRCI_CLK_COREPLL 0
+#define FU540_PRCI_CLK_DDRPLL 1
+#define FU540_PRCI_CLK_GEMGXLPLL 2
+#define FU540_PRCI_CLK_TLCLK 3
#endif
diff --git a/include/dt-bindings/clock/sifive-fu740-prci.h b/include/dt-bindings/clock/sifive-fu740-prci.h
new file mode 100644
index 000000000000..672bdadbf6c0
--- /dev/null
+++ b/include/dt-bindings/clock/sifive-fu740-prci.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ * Zong Li
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H
+#define __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H
+
+/* Clock indexes for use by Device Tree data and the PRCI driver */
+
+#define FU740_PRCI_CLK_COREPLL 0
+#define FU740_PRCI_CLK_DDRPLL 1
+#define FU740_PRCI_CLK_GEMGXLPLL 2
+#define FU740_PRCI_CLK_DVFSCOREPLL 3
+#define FU740_PRCI_CLK_HFPCLKPLL 4
+#define FU740_PRCI_CLK_CLTXPLL 5
+#define FU740_PRCI_CLK_TLCLK 6
+#define FU740_PRCI_CLK_PCLK 7
+#define FU740_PRCI_CLK_PCIE_AUX 8
+
+#endif /* __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H */
diff --git a/include/dt-bindings/clock/sprd,sc9863a-clk.h b/include/dt-bindings/clock/sprd,sc9863a-clk.h
new file mode 100644
index 000000000000..4e030421641f
--- /dev/null
+++ b/include/dt-bindings/clock/sprd,sc9863a-clk.h
@@ -0,0 +1,339 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Unisoc SC9863A platform clocks
+ *
+ * Copyright (C) 2019, Unisoc Communications Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SC9863A_H_
+#define _DT_BINDINGS_CLK_SC9863A_H_
+
+#define CLK_MPLL0_GATE 0
+#define CLK_DPLL0_GATE 1
+#define CLK_LPLL_GATE 2
+#define CLK_GPLL_GATE 3
+#define CLK_DPLL1_GATE 4
+#define CLK_MPLL1_GATE 5
+#define CLK_MPLL2_GATE 6
+#define CLK_ISPPLL_GATE 7
+#define CLK_PMU_APB_NUM (CLK_ISPPLL_GATE + 1)
+
+#define CLK_AUDIO_GATE 0
+#define CLK_RPLL 1
+#define CLK_RPLL_390M 2
+#define CLK_RPLL_260M 3
+#define CLK_RPLL_195M 4
+#define CLK_RPLL_26M 5
+#define CLK_ANLG_PHY_G5_NUM (CLK_RPLL_26M + 1)
+
+#define CLK_TWPLL 0
+#define CLK_TWPLL_768M 1
+#define CLK_TWPLL_384M 2
+#define CLK_TWPLL_192M 3
+#define CLK_TWPLL_96M 4
+#define CLK_TWPLL_48M 5
+#define CLK_TWPLL_24M 6
+#define CLK_TWPLL_12M 7
+#define CLK_TWPLL_512M 8
+#define CLK_TWPLL_256M 9
+#define CLK_TWPLL_128M 10
+#define CLK_TWPLL_64M 11
+#define CLK_TWPLL_307M2 12
+#define CLK_TWPLL_219M4 13
+#define CLK_TWPLL_170M6 14
+#define CLK_TWPLL_153M6 15
+#define CLK_TWPLL_76M8 16
+#define CLK_TWPLL_51M2 17
+#define CLK_TWPLL_38M4 18
+#define CLK_TWPLL_19M2 19
+#define CLK_LPLL 20
+#define CLK_LPLL_409M6 21
+#define CLK_LPLL_245M76 22
+#define CLK_GPLL 23
+#define CLK_ISPPLL 24
+#define CLK_ISPPLL_468M 25
+#define CLK_ANLG_PHY_G1_NUM (CLK_ISPPLL_468M + 1)
+
+#define CLK_DPLL0 0
+#define CLK_DPLL1 1
+#define CLK_DPLL0_933M 2
+#define CLK_DPLL0_622M3 3
+#define CLK_DPLL0_400M 4
+#define CLK_DPLL0_266M7 5
+#define CLK_DPLL0_123M1 6
+#define CLK_DPLL0_50M 7
+#define CLK_ANLG_PHY_G7_NUM (CLK_DPLL0_50M + 1)
+
+#define CLK_MPLL0 0
+#define CLK_MPLL1 1
+#define CLK_MPLL2 2
+#define CLK_MPLL2_675M 3
+#define CLK_ANLG_PHY_G4_NUM (CLK_MPLL2_675M + 1)
+
+#define CLK_AP_APB 0
+#define CLK_AP_CE 1
+#define CLK_NANDC_ECC 2
+#define CLK_NANDC_26M 3
+#define CLK_EMMC_32K 4
+#define CLK_SDIO0_32K 5
+#define CLK_SDIO1_32K 6
+#define CLK_SDIO2_32K 7
+#define CLK_OTG_UTMI 8
+#define CLK_AP_UART0 9
+#define CLK_AP_UART1 10
+#define CLK_AP_UART2 11
+#define CLK_AP_UART3 12
+#define CLK_AP_UART4 13
+#define CLK_AP_I2C0 14
+#define CLK_AP_I2C1 15
+#define CLK_AP_I2C2 16
+#define CLK_AP_I2C3 17
+#define CLK_AP_I2C4 18
+#define CLK_AP_I2C5 19
+#define CLK_AP_I2C6 20
+#define CLK_AP_SPI0 21
+#define CLK_AP_SPI1 22
+#define CLK_AP_SPI2 23
+#define CLK_AP_SPI3 24
+#define CLK_AP_IIS0 25
+#define CLK_AP_IIS1 26
+#define CLK_AP_IIS2 27
+#define CLK_SIM0 28
+#define CLK_SIM0_32K 29
+#define CLK_AP_CLK_NUM (CLK_SIM0_32K + 1)
+
+#define CLK_13M 0
+#define CLK_6M5 1
+#define CLK_4M3 2
+#define CLK_2M 3
+#define CLK_250K 4
+#define CLK_RCO_25M 5
+#define CLK_RCO_4M 6
+#define CLK_RCO_2M 7
+#define CLK_EMC 8
+#define CLK_AON_APB 9
+#define CLK_ADI 10
+#define CLK_AUX0 11
+#define CLK_AUX1 12
+#define CLK_AUX2 13
+#define CLK_PROBE 14
+#define CLK_PWM0 15
+#define CLK_PWM1 16
+#define CLK_PWM2 17
+#define CLK_AON_THM 18
+#define CLK_AUDIF 19
+#define CLK_CPU_DAP 20
+#define CLK_CPU_TS 21
+#define CLK_DJTAG_TCK 22
+#define CLK_EMC_REF 23
+#define CLK_CSSYS 24
+#define CLK_AON_PMU 25
+#define CLK_PMU_26M 26
+#define CLK_AON_TMR 27
+#define CLK_POWER_CPU 28
+#define CLK_AP_AXI 29
+#define CLK_SDIO0_2X 30
+#define CLK_SDIO1_2X 31
+#define CLK_SDIO2_2X 32
+#define CLK_EMMC_2X 33
+#define CLK_DPU 34
+#define CLK_DPU_DPI 35
+#define CLK_OTG_REF 36
+#define CLK_SDPHY_APB 37
+#define CLK_ALG_IO_APB 38
+#define CLK_GPU_CORE 39
+#define CLK_GPU_SOC 40
+#define CLK_MM_EMC 41
+#define CLK_MM_AHB 42
+#define CLK_BPC 43
+#define CLK_DCAM_IF 44
+#define CLK_ISP 45
+#define CLK_JPG 46
+#define CLK_CPP 47
+#define CLK_SENSOR0 48
+#define CLK_SENSOR1 49
+#define CLK_SENSOR2 50
+#define CLK_MM_VEMC 51
+#define CLK_MM_VAHB 52
+#define CLK_VSP 53
+#define CLK_CORE0 54
+#define CLK_CORE1 55
+#define CLK_CORE2 56
+#define CLK_CORE3 57
+#define CLK_CORE4 58
+#define CLK_CORE5 59
+#define CLK_CORE6 60
+#define CLK_CORE7 61
+#define CLK_SCU 62
+#define CLK_ACE 63
+#define CLK_AXI_PERIPH 64
+#define CLK_AXI_ACP 65
+#define CLK_ATB 66
+#define CLK_DEBUG_APB 67
+#define CLK_GIC 68
+#define CLK_PERIPH 69
+#define CLK_AON_CLK_NUM (CLK_VSP + 1)
+
+#define CLK_OTG_EB 0
+#define CLK_DMA_EB 1
+#define CLK_CE_EB 2
+#define CLK_NANDC_EB 3
+#define CLK_SDIO0_EB 4
+#define CLK_SDIO1_EB 5
+#define CLK_SDIO2_EB 6
+#define CLK_EMMC_EB 7
+#define CLK_EMMC_32K_EB 8
+#define CLK_SDIO0_32K_EB 9
+#define CLK_SDIO1_32K_EB 10
+#define CLK_SDIO2_32K_EB 11
+#define CLK_NANDC_26M_EB 12
+#define CLK_DMA_EB2 13
+#define CLK_CE_EB2 14
+#define CLK_AP_AHB_GATE_NUM (CLK_CE_EB2 + 1)
+
+#define CLK_GPIO_EB 0
+#define CLK_PWM0_EB 1
+#define CLK_PWM1_EB 2
+#define CLK_PWM2_EB 3
+#define CLK_PWM3_EB 4
+#define CLK_KPD_EB 5
+#define CLK_AON_SYST_EB 6
+#define CLK_AP_SYST_EB 7
+#define CLK_AON_TMR_EB 8
+#define CLK_EFUSE_EB 9
+#define CLK_EIC_EB 10
+#define CLK_INTC_EB 11
+#define CLK_ADI_EB 12
+#define CLK_AUDIF_EB 13
+#define CLK_AUD_EB 14
+#define CLK_VBC_EB 15
+#define CLK_PIN_EB 16
+#define CLK_AP_WDG_EB 17
+#define CLK_MM_EB 18
+#define CLK_AON_APB_CKG_EB 19
+#define CLK_CA53_TS0_EB 20
+#define CLK_CA53_TS1_EB 21
+#define CLK_CS53_DAP_EB 22
+#define CLK_PMU_EB 23
+#define CLK_THM_EB 24
+#define CLK_AUX0_EB 25
+#define CLK_AUX1_EB 26
+#define CLK_AUX2_EB 27
+#define CLK_PROBE_EB 28
+#define CLK_EMC_REF_EB 29
+#define CLK_CA53_WDG_EB 30
+#define CLK_AP_TMR1_EB 31
+#define CLK_AP_TMR2_EB 32
+#define CLK_DISP_EMC_EB 33
+#define CLK_ZIP_EMC_EB 34
+#define CLK_GSP_EMC_EB 35
+#define CLK_MM_VSP_EB 36
+#define CLK_MDAR_EB 37
+#define CLK_RTC4M0_CAL_EB 38
+#define CLK_RTC4M1_CAL_EB 39
+#define CLK_DJTAG_EB 40
+#define CLK_MBOX_EB 41
+#define CLK_AON_DMA_EB 42
+#define CLK_AON_APB_DEF_EB 43
+#define CLK_CA5_TS0_EB 44
+#define CLK_DBG_EB 45
+#define CLK_DBG_EMC_EB 46
+#define CLK_CROSS_TRIG_EB 47
+#define CLK_SERDES_DPHY_EB 48
+#define CLK_ARCH_RTC_EB 49
+#define CLK_KPD_RTC_EB 50
+#define CLK_AON_SYST_RTC_EB 51
+#define CLK_AP_SYST_RTC_EB 52
+#define CLK_AON_TMR_RTC_EB 53
+#define CLK_AP_TMR0_RTC_EB 54
+#define CLK_EIC_RTC_EB 55
+#define CLK_EIC_RTCDV5_EB 56
+#define CLK_AP_WDG_RTC_EB 57
+#define CLK_CA53_WDG_RTC_EB 58
+#define CLK_THM_RTC_EB 59
+#define CLK_ATHMA_RTC_EB 60
+#define CLK_GTHMA_RTC_EB 61
+#define CLK_ATHMA_RTC_A_EB 62
+#define CLK_GTHMA_RTC_A_EB 63
+#define CLK_AP_TMR1_RTC_EB 64
+#define CLK_AP_TMR2_RTC_EB 65
+#define CLK_DXCO_LC_RTC_EB 66
+#define CLK_BB_CAL_RTC_EB 67
+#define CLK_GNU_EB 68
+#define CLK_DISP_EB 69
+#define CLK_MM_EMC_EB 70
+#define CLK_POWER_CPU_EB 71
+#define CLK_HW_I2C_EB 72
+#define CLK_MM_VSP_EMC_EB 73
+#define CLK_VSP_EB 74
+#define CLK_CSSYS_EB 75
+#define CLK_DMC_EB 76
+#define CLK_ROSC_EB 77
+#define CLK_S_D_CFG_EB 78
+#define CLK_S_D_REF_EB 79
+#define CLK_B_DMA_EB 80
+#define CLK_ANLG_EB 81
+#define CLK_ANLG_APB_EB 82
+#define CLK_BSMTMR_EB 83
+#define CLK_AP_AXI_EB 84
+#define CLK_AP_INTC0_EB 85
+#define CLK_AP_INTC1_EB 86
+#define CLK_AP_INTC2_EB 87
+#define CLK_AP_INTC3_EB 88
+#define CLK_AP_INTC4_EB 89
+#define CLK_AP_INTC5_EB 90
+#define CLK_SCC_EB 91
+#define CLK_DPHY_CFG_EB 92
+#define CLK_DPHY_REF_EB 93
+#define CLK_CPHY_CFG_EB 94
+#define CLK_OTG_REF_EB 95
+#define CLK_SERDES_EB 96
+#define CLK_AON_AP_EMC_EB 97
+#define CLK_AON_APB_GATE_NUM (CLK_AON_AP_EMC_EB + 1)
+
+#define CLK_MAHB_CKG_EB 0
+#define CLK_MDCAM_EB 1
+#define CLK_MISP_EB 2
+#define CLK_MAHBCSI_EB 3
+#define CLK_MCSI_S_EB 4
+#define CLK_MCSI_T_EB 5
+#define CLK_DCAM_AXI_EB 6
+#define CLK_ISP_AXI_EB 7
+#define CLK_MCSI_EB 8
+#define CLK_MCSI_S_CKG_EB 9
+#define CLK_MCSI_T_CKG_EB 10
+#define CLK_SENSOR0_EB 11
+#define CLK_SENSOR1_EB 12
+#define CLK_SENSOR2_EB 13
+#define CLK_MCPHY_CFG_EB 14
+#define CLK_MM_GATE_NUM (CLK_MCPHY_CFG_EB + 1)
+
+#define CLK_MIPI_CSI 0
+#define CLK_MIPI_CSI_S 1
+#define CLK_MIPI_CSI_M 2
+#define CLK_MM_CLK_NUM (CLK_MIPI_CSI_M + 1)
+
+#define CLK_SIM0_EB 0
+#define CLK_IIS0_EB 1
+#define CLK_IIS1_EB 2
+#define CLK_IIS2_EB 3
+#define CLK_SPI0_EB 4
+#define CLK_SPI1_EB 5
+#define CLK_SPI2_EB 6
+#define CLK_I2C0_EB 7
+#define CLK_I2C1_EB 8
+#define CLK_I2C2_EB 9
+#define CLK_I2C3_EB 10
+#define CLK_I2C4_EB 11
+#define CLK_UART0_EB 12
+#define CLK_UART1_EB 13
+#define CLK_UART2_EB 14
+#define CLK_UART3_EB 15
+#define CLK_UART4_EB 16
+#define CLK_SIM0_32K_EB 17
+#define CLK_SPI3_EB 18
+#define CLK_I2C5_EB 19
+#define CLK_I2C6_EB 20
+#define CLK_AP_APB_GATE_NUM (CLK_I2C6_EB + 1)
+
+#endif /* _DT_BINDINGS_CLK_SC9863A_H_ */
diff --git a/include/dt-bindings/clock/sprd,ums512-clk.h b/include/dt-bindings/clock/sprd,ums512-clk.h
new file mode 100644
index 000000000000..4f1d90849944
--- /dev/null
+++ b/include/dt-bindings/clock/sprd,ums512-clk.h
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Unisoc UMS512 SoC DTS file
+ *
+ * Copyright (C) 2022, Unisoc Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_UMS512_H_
+#define _DT_BINDINGS_CLK_UMS512_H_
+
+#define CLK_26M_AUD 0
+#define CLK_13M 1
+#define CLK_6M5 2
+#define CLK_4M3 3
+#define CLK_2M 4
+#define CLK_1M 5
+#define CLK_250K 6
+#define CLK_RCO_25M 7
+#define CLK_RCO_4M 8
+#define CLK_RCO_2M 9
+#define CLK_ISPPLL_GATE 10
+#define CLK_DPLL0_GATE 11
+#define CLK_DPLL1_GATE 12
+#define CLK_LPLL_GATE 13
+#define CLK_TWPLL_GATE 14
+#define CLK_GPLL_GATE 15
+#define CLK_RPLL_GATE 16
+#define CLK_CPPLL_GATE 17
+#define CLK_MPLL0_GATE 18
+#define CLK_MPLL1_GATE 19
+#define CLK_MPLL2_GATE 20
+#define CLK_PMU_GATE_NUM (CLK_MPLL2_GATE + 1)
+
+#define CLK_DPLL0 0
+#define CLK_DPLL0_58M31 1
+#define CLK_ANLG_PHY_G0_NUM (CLK_DPLL0_58M31 + 1)
+
+#define CLK_MPLL1 0
+#define CLK_MPLL1_63M38 1
+#define CLK_ANLG_PHY_G2_NUM (CLK_MPLL1_63M38 + 1)
+
+#define CLK_RPLL 0
+#define CLK_AUDIO_GATE 1
+#define CLK_MPLL0 2
+#define CLK_MPLL0_56M88 3
+#define CLK_MPLL2 4
+#define CLK_MPLL2_47M13 5
+#define CLK_ANLG_PHY_G3_NUM (CLK_MPLL2_47M13 + 1)
+
+#define CLK_TWPLL 0
+#define CLK_TWPLL_768M 1
+#define CLK_TWPLL_384M 2
+#define CLK_TWPLL_192M 3
+#define CLK_TWPLL_96M 4
+#define CLK_TWPLL_48M 5
+#define CLK_TWPLL_24M 6
+#define CLK_TWPLL_12M 7
+#define CLK_TWPLL_512M 8
+#define CLK_TWPLL_256M 9
+#define CLK_TWPLL_128M 10
+#define CLK_TWPLL_64M 11
+#define CLK_TWPLL_307M2 12
+#define CLK_TWPLL_219M4 13
+#define CLK_TWPLL_170M6 14
+#define CLK_TWPLL_153M6 15
+#define CLK_TWPLL_76M8 16
+#define CLK_TWPLL_51M2 17
+#define CLK_TWPLL_38M4 18
+#define CLK_TWPLL_19M2 19
+#define CLK_TWPLL_12M29 20
+#define CLK_LPLL 21
+#define CLK_LPLL_614M4 22
+#define CLK_LPLL_409M6 23
+#define CLK_LPLL_245M76 24
+#define CLK_LPLL_30M72 25
+#define CLK_ISPPLL 26
+#define CLK_ISPPLL_468M 27
+#define CLK_ISPPLL_78M 28
+#define CLK_GPLL 29
+#define CLK_GPLL_40M 30
+#define CLK_CPPLL 31
+#define CLK_CPPLL_39M32 32
+#define CLK_ANLG_PHY_GC_NUM (CLK_CPPLL_39M32 + 1)
+
+#define CLK_AP_APB 0
+#define CLK_IPI 1
+#define CLK_AP_UART0 2
+#define CLK_AP_UART1 3
+#define CLK_AP_UART2 4
+#define CLK_AP_I2C0 5
+#define CLK_AP_I2C1 6
+#define CLK_AP_I2C2 7
+#define CLK_AP_I2C3 8
+#define CLK_AP_I2C4 9
+#define CLK_AP_SPI0 10
+#define CLK_AP_SPI1 11
+#define CLK_AP_SPI2 12
+#define CLK_AP_SPI3 13
+#define CLK_AP_IIS0 14
+#define CLK_AP_IIS1 15
+#define CLK_AP_IIS2 16
+#define CLK_AP_SIM 17
+#define CLK_AP_CE 18
+#define CLK_SDIO0_2X 19
+#define CLK_SDIO1_2X 20
+#define CLK_EMMC_2X 21
+#define CLK_VSP 22
+#define CLK_DISPC0 23
+#define CLK_DISPC0_DPI 24
+#define CLK_DSI_APB 25
+#define CLK_DSI_RXESC 26
+#define CLK_DSI_LANEBYTE 27
+#define CLK_VDSP 28
+#define CLK_VDSP_M 29
+#define CLK_AP_CLK_NUM (CLK_VDSP_M + 1)
+
+#define CLK_DSI_EB 0
+#define CLK_DISPC_EB 1
+#define CLK_VSP_EB 2
+#define CLK_VDMA_EB 3
+#define CLK_DMA_PUB_EB 4
+#define CLK_DMA_SEC_EB 5
+#define CLK_IPI_EB 6
+#define CLK_AHB_CKG_EB 7
+#define CLK_BM_CLK_EB 8
+#define CLK_AP_AHB_GATE_NUM (CLK_BM_CLK_EB + 1)
+
+#define CLK_AON_APB 0
+#define CLK_ADI 1
+#define CLK_AUX0 2
+#define CLK_AUX1 3
+#define CLK_AUX2 4
+#define CLK_PROBE 5
+#define CLK_PWM0 6
+#define CLK_PWM1 7
+#define CLK_PWM2 8
+#define CLK_PWM3 9
+#define CLK_EFUSE 10
+#define CLK_UART0 11
+#define CLK_UART1 12
+#define CLK_THM0 13
+#define CLK_THM1 14
+#define CLK_THM2 15
+#define CLK_THM3 16
+#define CLK_AON_I2C 17
+#define CLK_AON_IIS 18
+#define CLK_SCC 19
+#define CLK_APCPU_DAP 20
+#define CLK_APCPU_DAP_MTCK 21
+#define CLK_APCPU_TS 22
+#define CLK_DEBUG_TS 23
+#define CLK_DSI_TEST_S 24
+#define CLK_DJTAG_TCK 25
+#define CLK_DJTAG_TCK_HW 26
+#define CLK_AON_TMR 27
+#define CLK_AON_PMU 28
+#define CLK_DEBOUNCE 29
+#define CLK_APCPU_PMU 30
+#define CLK_TOP_DVFS 31
+#define CLK_OTG_UTMI 32
+#define CLK_OTG_REF 33
+#define CLK_CSSYS 34
+#define CLK_CSSYS_PUB 35
+#define CLK_CSSYS_APB 36
+#define CLK_AP_AXI 37
+#define CLK_AP_MM 38
+#define CLK_SDIO2_2X 39
+#define CLK_ANALOG_IO_APB 40
+#define CLK_DMC_REF_CLK 41
+#define CLK_EMC 42
+#define CLK_USB 43
+#define CLK_26M_PMU 44
+#define CLK_AON_APB_NUM (CLK_26M_PMU + 1)
+
+#define CLK_MM_AHB 0
+#define CLK_MM_MTX 1
+#define CLK_SENSOR0 2
+#define CLK_SENSOR1 3
+#define CLK_SENSOR2 4
+#define CLK_CPP 5
+#define CLK_JPG 6
+#define CLK_FD 7
+#define CLK_DCAM_IF 8
+#define CLK_DCAM_AXI 9
+#define CLK_ISP 10
+#define CLK_MIPI_CSI0 11
+#define CLK_MIPI_CSI1 12
+#define CLK_MIPI_CSI2 13
+#define CLK_MM_CLK_NUM (CLK_MIPI_CSI2 + 1)
+
+#define CLK_RC100M_CAL_EB 0
+#define CLK_DJTAG_TCK_EB 1
+#define CLK_DJTAG_EB 2
+#define CLK_AUX0_EB 3
+#define CLK_AUX1_EB 4
+#define CLK_AUX2_EB 5
+#define CLK_PROBE_EB 6
+#define CLK_MM_EB 7
+#define CLK_GPU_EB 8
+#define CLK_MSPI_EB 9
+#define CLK_APCPU_DAP_EB 10
+#define CLK_AON_CSSYS_EB 11
+#define CLK_CSSYS_APB_EB 12
+#define CLK_CSSYS_PUB_EB 13
+#define CLK_SDPHY_CFG_EB 14
+#define CLK_SDPHY_REF_EB 15
+#define CLK_EFUSE_EB 16
+#define CLK_GPIO_EB 17
+#define CLK_MBOX_EB 18
+#define CLK_KPD_EB 19
+#define CLK_AON_SYST_EB 20
+#define CLK_AP_SYST_EB 21
+#define CLK_AON_TMR_EB 22
+#define CLK_OTG_UTMI_EB 23
+#define CLK_OTG_PHY_EB 24
+#define CLK_SPLK_EB 25
+#define CLK_PIN_EB 26
+#define CLK_ANA_EB 27
+#define CLK_APCPU_TS0_EB 28
+#define CLK_APB_BUSMON_EB 29
+#define CLK_AON_IIS_EB 30
+#define CLK_SCC_EB 31
+#define CLK_THM0_EB 32
+#define CLK_THM1_EB 33
+#define CLK_THM2_EB 34
+#define CLK_ASIM_TOP_EB 35
+#define CLK_I2C_EB 36
+#define CLK_PMU_EB 37
+#define CLK_ADI_EB 38
+#define CLK_EIC_EB 39
+#define CLK_AP_INTC0_EB 40
+#define CLK_AP_INTC1_EB 41
+#define CLK_AP_INTC2_EB 42
+#define CLK_AP_INTC3_EB 43
+#define CLK_AP_INTC4_EB 44
+#define CLK_AP_INTC5_EB 45
+#define CLK_AUDCP_INTC_EB 46
+#define CLK_AP_TMR0_EB 47
+#define CLK_AP_TMR1_EB 48
+#define CLK_AP_TMR2_EB 49
+#define CLK_PWM0_EB 50
+#define CLK_PWM1_EB 51
+#define CLK_PWM2_EB 52
+#define CLK_PWM3_EB 53
+#define CLK_AP_WDG_EB 54
+#define CLK_APCPU_WDG_EB 55
+#define CLK_SERDES_EB 56
+#define CLK_ARCH_RTC_EB 57
+#define CLK_KPD_RTC_EB 58
+#define CLK_AON_SYST_RTC_EB 59
+#define CLK_AP_SYST_RTC_EB 60
+#define CLK_AON_TMR_RTC_EB 61
+#define CLK_EIC_RTC_EB 62
+#define CLK_EIC_RTCDV5_EB 63
+#define CLK_AP_WDG_RTC_EB 64
+#define CLK_AC_WDG_RTC_EB 65
+#define CLK_AP_TMR0_RTC_EB 66
+#define CLK_AP_TMR1_RTC_EB 67
+#define CLK_AP_TMR2_RTC_EB 68
+#define CLK_DCXO_LC_RTC_EB 69
+#define CLK_BB_CAL_RTC_EB 70
+#define CLK_AP_EMMC_RTC_EB 71
+#define CLK_AP_SDIO0_RTC_EB 72
+#define CLK_AP_SDIO1_RTC_EB 73
+#define CLK_AP_SDIO2_RTC_EB 74
+#define CLK_DSI_CSI_TEST_EB 75
+#define CLK_DJTAG_TCK_EN 76
+#define CLK_DPHY_REF_EB 77
+#define CLK_DMC_REF_EB 78
+#define CLK_OTG_REF_EB 79
+#define CLK_TSEN_EB 80
+#define CLK_TMR_EB 81
+#define CLK_RC100M_REF_EB 82
+#define CLK_RC100M_FDK_EB 83
+#define CLK_DEBOUNCE_EB 84
+#define CLK_DET_32K_EB 85
+#define CLK_TOP_CSSYS_EB 86
+#define CLK_AP_AXI_EN 87
+#define CLK_SDIO0_2X_EN 88
+#define CLK_SDIO0_1X_EN 89
+#define CLK_SDIO1_2X_EN 90
+#define CLK_SDIO1_1X_EN 91
+#define CLK_SDIO2_2X_EN 92
+#define CLK_SDIO2_1X_EN 93
+#define CLK_EMMC_2X_EN 94
+#define CLK_EMMC_1X_EN 95
+#define CLK_PLL_TEST_EN 96
+#define CLK_CPHY_CFG_EN 97
+#define CLK_DEBUG_TS_EN 98
+#define CLK_ACCESS_AUD_EN 99
+#define CLK_AON_APB_GATE_NUM (CLK_ACCESS_AUD_EN + 1)
+
+#define CLK_MM_CPP_EB 0
+#define CLK_MM_JPG_EB 1
+#define CLK_MM_DCAM_EB 2
+#define CLK_MM_ISP_EB 3
+#define CLK_MM_CSI2_EB 4
+#define CLK_MM_CSI1_EB 5
+#define CLK_MM_CSI0_EB 6
+#define CLK_MM_CKG_EB 7
+#define CLK_ISP_AHB_EB 8
+#define CLK_MM_DVFS_EB 9
+#define CLK_MM_FD_EB 10
+#define CLK_MM_SENSOR2_EB 11
+#define CLK_MM_SENSOR1_EB 12
+#define CLK_MM_SENSOR0_EB 13
+#define CLK_MM_MIPI_CSI2_EB 14
+#define CLK_MM_MIPI_CSI1_EB 15
+#define CLK_MM_MIPI_CSI0_EB 16
+#define CLK_DCAM_AXI_EB 17
+#define CLK_ISP_AXI_EB 18
+#define CLK_MM_CPHY_EB 19
+#define CLK_MM_GATE_CLK_NUM (CLK_MM_CPHY_EB + 1)
+
+#define CLK_SIM0_EB 0
+#define CLK_IIS0_EB 1
+#define CLK_IIS1_EB 2
+#define CLK_IIS2_EB 3
+#define CLK_APB_REG_EB 4
+#define CLK_SPI0_EB 5
+#define CLK_SPI1_EB 6
+#define CLK_SPI2_EB 7
+#define CLK_SPI3_EB 8
+#define CLK_I2C0_EB 9
+#define CLK_I2C1_EB 10
+#define CLK_I2C2_EB 11
+#define CLK_I2C3_EB 12
+#define CLK_I2C4_EB 13
+#define CLK_UART0_EB 14
+#define CLK_UART1_EB 15
+#define CLK_UART2_EB 16
+#define CLK_SIM0_32K_EB 17
+#define CLK_SPI0_LFIN_EB 18
+#define CLK_SPI1_LFIN_EB 19
+#define CLK_SPI2_LFIN_EB 20
+#define CLK_SPI3_LFIN_EB 21
+#define CLK_SDIO0_EB 22
+#define CLK_SDIO1_EB 23
+#define CLK_SDIO2_EB 24
+#define CLK_EMMC_EB 25
+#define CLK_SDIO0_32K_EB 26
+#define CLK_SDIO1_32K_EB 27
+#define CLK_SDIO2_32K_EB 28
+#define CLK_EMMC_32K_EB 29
+#define CLK_AP_APB_GATE_NUM (CLK_EMMC_32K_EB + 1)
+
+#define CLK_GPU_CORE_EB 0
+#define CLK_GPU_CORE 1
+#define CLK_GPU_MEM_EB 2
+#define CLK_GPU_MEM 3
+#define CLK_GPU_SYS_EB 4
+#define CLK_GPU_SYS 5
+#define CLK_GPU_CLK_NUM (CLK_GPU_SYS + 1)
+
+#define CLK_AUDCP_IIS0_EB 0
+#define CLK_AUDCP_IIS1_EB 1
+#define CLK_AUDCP_IIS2_EB 2
+#define CLK_AUDCP_UART_EB 3
+#define CLK_AUDCP_DMA_CP_EB 4
+#define CLK_AUDCP_DMA_AP_EB 5
+#define CLK_AUDCP_SRC48K_EB 6
+#define CLK_AUDCP_MCDT_EB 7
+#define CLK_AUDCP_VBCIFD_EB 8
+#define CLK_AUDCP_VBC_EB 9
+#define CLK_AUDCP_SPLK_EB 10
+#define CLK_AUDCP_ICU_EB 11
+#define CLK_AUDCP_DMA_AP_ASHB_EB 12
+#define CLK_AUDCP_DMA_CP_ASHB_EB 13
+#define CLK_AUDCP_AUD_EB 14
+#define CLK_AUDCP_VBC_24M_EB 15
+#define CLK_AUDCP_TMR_26M_EB 16
+#define CLK_AUDCP_DVFS_ASHB_EB 17
+#define CLK_AUDCP_AHB_GATE_NUM (CLK_AUDCP_DVFS_ASHB_EB + 1)
+
+#define CLK_AUDCP_WDG_EB 0
+#define CLK_AUDCP_RTC_WDG_EB 1
+#define CLK_AUDCP_TMR0_EB 2
+#define CLK_AUDCP_TMR1_EB 3
+#define CLK_AUDCP_APB_GATE_NUM (CLK_AUDCP_TMR1_EB + 1)
+
+#define CLK_ACORE0 0
+#define CLK_ACORE1 1
+#define CLK_ACORE2 2
+#define CLK_ACORE3 3
+#define CLK_ACORE4 4
+#define CLK_ACORE5 5
+#define CLK_PCORE0 6
+#define CLK_PCORE1 7
+#define CLK_SCU 8
+#define CLK_ACE 9
+#define CLK_PERIPH 10
+#define CLK_GIC 11
+#define CLK_ATB 12
+#define CLK_DEBUG_APB 13
+#define CLK_APCPU_SEC_NUM (CLK_DEBUG_APB + 1)
+
+#endif /* _DT_BINDINGS_CLK_UMS512_H_ */
diff --git a/include/dt-bindings/clock/starfive-jh7100-audio.h b/include/dt-bindings/clock/starfive-jh7100-audio.h
new file mode 100644
index 000000000000..fbb4eae6572b
--- /dev/null
+++ b/include/dt-bindings/clock/starfive-jh7100-audio.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__
+#define __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__
+
+#define JH7100_AUDCLK_ADC_MCLK 0
+#define JH7100_AUDCLK_I2S1_MCLK 1
+#define JH7100_AUDCLK_I2SADC_APB 2
+#define JH7100_AUDCLK_I2SADC_BCLK 3
+#define JH7100_AUDCLK_I2SADC_BCLK_N 4
+#define JH7100_AUDCLK_I2SADC_LRCLK 5
+#define JH7100_AUDCLK_PDM_APB 6
+#define JH7100_AUDCLK_PDM_MCLK 7
+#define JH7100_AUDCLK_I2SVAD_APB 8
+#define JH7100_AUDCLK_SPDIF 9
+#define JH7100_AUDCLK_SPDIF_APB 10
+#define JH7100_AUDCLK_PWMDAC_APB 11
+#define JH7100_AUDCLK_DAC_MCLK 12
+#define JH7100_AUDCLK_I2SDAC_APB 13
+#define JH7100_AUDCLK_I2SDAC_BCLK 14
+#define JH7100_AUDCLK_I2SDAC_BCLK_N 15
+#define JH7100_AUDCLK_I2SDAC_LRCLK 16
+#define JH7100_AUDCLK_I2S1_APB 17
+#define JH7100_AUDCLK_I2S1_BCLK 18
+#define JH7100_AUDCLK_I2S1_BCLK_N 19
+#define JH7100_AUDCLK_I2S1_LRCLK 20
+#define JH7100_AUDCLK_I2SDAC16K_APB 21
+#define JH7100_AUDCLK_APB0_BUS 22
+#define JH7100_AUDCLK_DMA1P_AHB 23
+#define JH7100_AUDCLK_USB_APB 24
+#define JH7100_AUDCLK_USB_LPM 25
+#define JH7100_AUDCLK_USB_STB 26
+#define JH7100_AUDCLK_APB_EN 27
+#define JH7100_AUDCLK_VAD_MEM 28
+
+#define JH7100_AUDCLK_END 29
+
+#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__ */
diff --git a/include/dt-bindings/clock/starfive-jh7100.h b/include/dt-bindings/clock/starfive-jh7100.h
new file mode 100644
index 000000000000..aa0863b9728d
--- /dev/null
+++ b/include/dt-bindings/clock/starfive-jh7100.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Ahmad Fatoum, Pengutronix
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_CLOCK_STARFIVE_JH7100_H__
+
+#define JH7100_CLK_CPUNDBUS_ROOT 0
+#define JH7100_CLK_DLA_ROOT 1
+#define JH7100_CLK_DSP_ROOT 2
+#define JH7100_CLK_GMACUSB_ROOT 3
+#define JH7100_CLK_PERH0_ROOT 4
+#define JH7100_CLK_PERH1_ROOT 5
+#define JH7100_CLK_VIN_ROOT 6
+#define JH7100_CLK_VOUT_ROOT 7
+#define JH7100_CLK_AUDIO_ROOT 8
+#define JH7100_CLK_CDECHIFI4_ROOT 9
+#define JH7100_CLK_CDEC_ROOT 10
+#define JH7100_CLK_VOUTBUS_ROOT 11
+#define JH7100_CLK_CPUNBUS_ROOT_DIV 12
+#define JH7100_CLK_DSP_ROOT_DIV 13
+#define JH7100_CLK_PERH0_SRC 14
+#define JH7100_CLK_PERH1_SRC 15
+#define JH7100_CLK_PLL0_TESTOUT 16
+#define JH7100_CLK_PLL1_TESTOUT 17
+#define JH7100_CLK_PLL2_TESTOUT 18
+#define JH7100_CLK_PLL2_REF 19
+#define JH7100_CLK_CPU_CORE 20
+#define JH7100_CLK_CPU_AXI 21
+#define JH7100_CLK_AHB_BUS 22
+#define JH7100_CLK_APB1_BUS 23
+#define JH7100_CLK_APB2_BUS 24
+#define JH7100_CLK_DOM3AHB_BUS 25
+#define JH7100_CLK_DOM7AHB_BUS 26
+#define JH7100_CLK_U74_CORE0 27
+#define JH7100_CLK_U74_CORE1 28
+#define JH7100_CLK_U74_AXI 29
+#define JH7100_CLK_U74RTC_TOGGLE 30
+#define JH7100_CLK_SGDMA2P_AXI 31
+#define JH7100_CLK_DMA2PNOC_AXI 32
+#define JH7100_CLK_SGDMA2P_AHB 33
+#define JH7100_CLK_DLA_BUS 34
+#define JH7100_CLK_DLA_AXI 35
+#define JH7100_CLK_DLANOC_AXI 36
+#define JH7100_CLK_DLA_APB 37
+#define JH7100_CLK_VP6_CORE 38
+#define JH7100_CLK_VP6BUS_SRC 39
+#define JH7100_CLK_VP6_AXI 40
+#define JH7100_CLK_VCDECBUS_SRC 41
+#define JH7100_CLK_VDEC_BUS 42
+#define JH7100_CLK_VDEC_AXI 43
+#define JH7100_CLK_VDECBRG_MAIN 44
+#define JH7100_CLK_VDEC_BCLK 45
+#define JH7100_CLK_VDEC_CCLK 46
+#define JH7100_CLK_VDEC_APB 47
+#define JH7100_CLK_JPEG_AXI 48
+#define JH7100_CLK_JPEG_CCLK 49
+#define JH7100_CLK_JPEG_APB 50
+#define JH7100_CLK_GC300_2X 51
+#define JH7100_CLK_GC300_AHB 52
+#define JH7100_CLK_JPCGC300_AXIBUS 53
+#define JH7100_CLK_GC300_AXI 54
+#define JH7100_CLK_JPCGC300_MAIN 55
+#define JH7100_CLK_VENC_BUS 56
+#define JH7100_CLK_VENC_AXI 57
+#define JH7100_CLK_VENCBRG_MAIN 58
+#define JH7100_CLK_VENC_BCLK 59
+#define JH7100_CLK_VENC_CCLK 60
+#define JH7100_CLK_VENC_APB 61
+#define JH7100_CLK_DDRPLL_DIV2 62
+#define JH7100_CLK_DDRPLL_DIV4 63
+#define JH7100_CLK_DDRPLL_DIV8 64
+#define JH7100_CLK_DDROSC_DIV2 65
+#define JH7100_CLK_DDRC0 66
+#define JH7100_CLK_DDRC1 67
+#define JH7100_CLK_DDRPHY_APB 68
+#define JH7100_CLK_NOC_ROB 69
+#define JH7100_CLK_NOC_COG 70
+#define JH7100_CLK_NNE_AHB 71
+#define JH7100_CLK_NNEBUS_SRC1 72
+#define JH7100_CLK_NNE_BUS 73
+#define JH7100_CLK_NNE_AXI 74
+#define JH7100_CLK_NNENOC_AXI 75
+#define JH7100_CLK_DLASLV_AXI 76
+#define JH7100_CLK_DSPX2C_AXI 77
+#define JH7100_CLK_HIFI4_SRC 78
+#define JH7100_CLK_HIFI4_COREFREE 79
+#define JH7100_CLK_HIFI4_CORE 80
+#define JH7100_CLK_HIFI4_BUS 81
+#define JH7100_CLK_HIFI4_AXI 82
+#define JH7100_CLK_HIFI4NOC_AXI 83
+#define JH7100_CLK_SGDMA1P_BUS 84
+#define JH7100_CLK_SGDMA1P_AXI 85
+#define JH7100_CLK_DMA1P_AXI 86
+#define JH7100_CLK_X2C_AXI 87
+#define JH7100_CLK_USB_BUS 88
+#define JH7100_CLK_USB_AXI 89
+#define JH7100_CLK_USBNOC_AXI 90
+#define JH7100_CLK_USBPHY_ROOTDIV 91
+#define JH7100_CLK_USBPHY_125M 92
+#define JH7100_CLK_USBPHY_PLLDIV25M 93
+#define JH7100_CLK_USBPHY_25M 94
+#define JH7100_CLK_AUDIO_DIV 95
+#define JH7100_CLK_AUDIO_SRC 96
+#define JH7100_CLK_AUDIO_12288 97
+#define JH7100_CLK_VIN_SRC 98
+#define JH7100_CLK_ISP0_BUS 99
+#define JH7100_CLK_ISP0_AXI 100
+#define JH7100_CLK_ISP0NOC_AXI 101
+#define JH7100_CLK_ISPSLV_AXI 102
+#define JH7100_CLK_ISP1_BUS 103
+#define JH7100_CLK_ISP1_AXI 104
+#define JH7100_CLK_ISP1NOC_AXI 105
+#define JH7100_CLK_VIN_BUS 106
+#define JH7100_CLK_VIN_AXI 107
+#define JH7100_CLK_VINNOC_AXI 108
+#define JH7100_CLK_VOUT_SRC 109
+#define JH7100_CLK_DISPBUS_SRC 110
+#define JH7100_CLK_DISP_BUS 111
+#define JH7100_CLK_DISP_AXI 112
+#define JH7100_CLK_DISPNOC_AXI 113
+#define JH7100_CLK_SDIO0_AHB 114
+#define JH7100_CLK_SDIO0_CCLKINT 115
+#define JH7100_CLK_SDIO0_CCLKINT_INV 116
+#define JH7100_CLK_SDIO1_AHB 117
+#define JH7100_CLK_SDIO1_CCLKINT 118
+#define JH7100_CLK_SDIO1_CCLKINT_INV 119
+#define JH7100_CLK_GMAC_AHB 120
+#define JH7100_CLK_GMAC_ROOT_DIV 121
+#define JH7100_CLK_GMAC_PTP_REF 122
+#define JH7100_CLK_GMAC_GTX 123
+#define JH7100_CLK_GMAC_RMII_TX 124
+#define JH7100_CLK_GMAC_RMII_RX 125
+#define JH7100_CLK_GMAC_TX 126
+#define JH7100_CLK_GMAC_TX_INV 127
+#define JH7100_CLK_GMAC_RX_PRE 128
+#define JH7100_CLK_GMAC_RX_INV 129
+#define JH7100_CLK_GMAC_RMII 130
+#define JH7100_CLK_GMAC_TOPHYREF 131
+#define JH7100_CLK_SPI2AHB_AHB 132
+#define JH7100_CLK_SPI2AHB_CORE 133
+#define JH7100_CLK_EZMASTER_AHB 134
+#define JH7100_CLK_E24_AHB 135
+#define JH7100_CLK_E24RTC_TOGGLE 136
+#define JH7100_CLK_QSPI_AHB 137
+#define JH7100_CLK_QSPI_APB 138
+#define JH7100_CLK_QSPI_REF 139
+#define JH7100_CLK_SEC_AHB 140
+#define JH7100_CLK_AES 141
+#define JH7100_CLK_SHA 142
+#define JH7100_CLK_PKA 143
+#define JH7100_CLK_TRNG_APB 144
+#define JH7100_CLK_OTP_APB 145
+#define JH7100_CLK_UART0_APB 146
+#define JH7100_CLK_UART0_CORE 147
+#define JH7100_CLK_UART1_APB 148
+#define JH7100_CLK_UART1_CORE 149
+#define JH7100_CLK_SPI0_APB 150
+#define JH7100_CLK_SPI0_CORE 151
+#define JH7100_CLK_SPI1_APB 152
+#define JH7100_CLK_SPI1_CORE 153
+#define JH7100_CLK_I2C0_APB 154
+#define JH7100_CLK_I2C0_CORE 155
+#define JH7100_CLK_I2C1_APB 156
+#define JH7100_CLK_I2C1_CORE 157
+#define JH7100_CLK_GPIO_APB 158
+#define JH7100_CLK_UART2_APB 159
+#define JH7100_CLK_UART2_CORE 160
+#define JH7100_CLK_UART3_APB 161
+#define JH7100_CLK_UART3_CORE 162
+#define JH7100_CLK_SPI2_APB 163
+#define JH7100_CLK_SPI2_CORE 164
+#define JH7100_CLK_SPI3_APB 165
+#define JH7100_CLK_SPI3_CORE 166
+#define JH7100_CLK_I2C2_APB 167
+#define JH7100_CLK_I2C2_CORE 168
+#define JH7100_CLK_I2C3_APB 169
+#define JH7100_CLK_I2C3_CORE 170
+#define JH7100_CLK_WDTIMER_APB 171
+#define JH7100_CLK_WDT_CORE 172
+#define JH7100_CLK_TIMER0_CORE 173
+#define JH7100_CLK_TIMER1_CORE 174
+#define JH7100_CLK_TIMER2_CORE 175
+#define JH7100_CLK_TIMER3_CORE 176
+#define JH7100_CLK_TIMER4_CORE 177
+#define JH7100_CLK_TIMER5_CORE 178
+#define JH7100_CLK_TIMER6_CORE 179
+#define JH7100_CLK_VP6INTC_APB 180
+#define JH7100_CLK_PWM_APB 181
+#define JH7100_CLK_MSI_APB 182
+#define JH7100_CLK_TEMP_APB 183
+#define JH7100_CLK_TEMP_SENSE 184
+#define JH7100_CLK_SYSERR_APB 185
+
+#define JH7100_CLK_PLL0_OUT 186
+#define JH7100_CLK_PLL1_OUT 187
+#define JH7100_CLK_PLL2_OUT 188
+
+#define JH7100_CLK_END 189
+
+#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/clock/ste-db8500-clkout.h b/include/dt-bindings/clock/ste-db8500-clkout.h
new file mode 100644
index 000000000000..ca07cb2bd1bc
--- /dev/null
+++ b/include/dt-bindings/clock/ste-db8500-clkout.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __STE_CLK_DB8500_CLKOUT_H__
+#define __STE_CLK_DB8500_CLKOUT_H__
+
+#define DB8500_CLKOUT_1 0
+#define DB8500_CLKOUT_2 1
+
+#define DB8500_CLKOUT_SRC_CLK38M 0
+#define DB8500_CLKOUT_SRC_ACLK 1
+#define DB8500_CLKOUT_SRC_SYSCLK 2
+#define DB8500_CLKOUT_SRC_LCDCLK 3
+#define DB8500_CLKOUT_SRC_SDMMCCLK 4
+#define DB8500_CLKOUT_SRC_TVCLK 5
+#define DB8500_CLKOUT_SRC_TIMCLK 6
+#define DB8500_CLKOUT_SRC_CLK009 7
+
+#endif
diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h
index 1cc89c548578..e5dad050d518 100644
--- a/include/dt-bindings/clock/stm32fx-clock.h
+++ b/include/dt-bindings/clock/stm32fx-clock.h
@@ -7,10 +7,10 @@
*/
/*
- * List of clocks wich are not derived from system clock (SYSCLOCK)
+ * List of clocks which are not derived from system clock (SYSCLOCK)
*
* The index of these clocks is the secondary index of DT bindings
- * (see Documentatoin/devicetree/bindings/clock/st,stm32-rcc.txt)
+ * (see Documentation/devicetree/bindings/clock/st,stm32-rcc.txt)
*
* e.g:
<assigned-clocks = <&rcc 1 CLK_LSE>;
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 4cdaf135829c..25e8cfd43459 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -248,4 +248,27 @@
#define STM32MP1_LAST_CLK 232
+/* SCMI clock identifiers */
+#define CK_SCMI_HSE 0
+#define CK_SCMI_HSI 1
+#define CK_SCMI_CSI 2
+#define CK_SCMI_LSE 3
+#define CK_SCMI_LSI 4
+#define CK_SCMI_PLL2_Q 5
+#define CK_SCMI_PLL2_R 6
+#define CK_SCMI_MPU 7
+#define CK_SCMI_AXI 8
+#define CK_SCMI_BSEC 9
+#define CK_SCMI_CRYP1 10
+#define CK_SCMI_GPIOZ 11
+#define CK_SCMI_HASH1 12
+#define CK_SCMI_I2C4 13
+#define CK_SCMI_I2C6 14
+#define CK_SCMI_IWDG1 15
+#define CK_SCMI_RNG1 16
+#define CK_SCMI_RTC 17
+#define CK_SCMI_RTCAPB 18
+#define CK_SCMI_SPI6 19
+#define CK_SCMI_USART1 20
+
#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */
diff --git a/include/dt-bindings/clock/stm32mp13-clks.h b/include/dt-bindings/clock/stm32mp13-clks.h
new file mode 100644
index 000000000000..02befd25edce
--- /dev/null
+++ b/include/dt-bindings/clock/stm32mp13-clks.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0+ or BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2020 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP13_CLKS_H_
+#define _DT_BINDINGS_STM32MP13_CLKS_H_
+
+/* OSCILLATOR clocks */
+#define CK_HSE 0
+#define CK_CSI 1
+#define CK_LSI 2
+#define CK_LSE 3
+#define CK_HSI 4
+#define CK_HSE_DIV2 5
+
+/* PLL */
+#define PLL1 6
+#define PLL2 7
+#define PLL3 8
+#define PLL4 9
+
+/* ODF */
+#define PLL1_P 10
+#define PLL1_Q 11
+#define PLL1_R 12
+#define PLL2_P 13
+#define PLL2_Q 14
+#define PLL2_R 15
+#define PLL3_P 16
+#define PLL3_Q 17
+#define PLL3_R 18
+#define PLL4_P 19
+#define PLL4_Q 20
+#define PLL4_R 21
+
+#define PCLK1 22
+#define PCLK2 23
+#define PCLK3 24
+#define PCLK4 25
+#define PCLK5 26
+#define PCLK6 27
+
+/* SYSTEM CLOCK */
+#define CK_PER 28
+#define CK_MPU 29
+#define CK_AXI 30
+#define CK_MLAHB 31
+
+/* BASE TIMER */
+#define CK_TIMG1 32
+#define CK_TIMG2 33
+#define CK_TIMG3 34
+
+/* AUX */
+#define RTC 35
+
+/* TRACE & DEBUG clocks */
+#define CK_DBG 36
+#define CK_TRACE 37
+
+/* MCO clocks */
+#define CK_MCO1 38
+#define CK_MCO2 39
+
+/* IP clocks */
+#define SYSCFG 40
+#define VREF 41
+#define DTS 42
+#define PMBCTRL 43
+#define HDP 44
+#define IWDG2 45
+#define STGENRO 46
+#define USART1 47
+#define RTCAPB 48
+#define TZC 49
+#define TZPC 50
+#define IWDG1 51
+#define BSEC 52
+#define DMA1 53
+#define DMA2 54
+#define DMAMUX1 55
+#define DMAMUX2 56
+#define GPIOA 57
+#define GPIOB 58
+#define GPIOC 59
+#define GPIOD 60
+#define GPIOE 61
+#define GPIOF 62
+#define GPIOG 63
+#define GPIOH 64
+#define GPIOI 65
+#define CRYP1 66
+#define HASH1 67
+#define BKPSRAM 68
+#define MDMA 69
+#define CRC1 70
+#define USBH 71
+#define DMA3 72
+#define TSC 73
+#define PKA 74
+#define AXIMC 75
+#define MCE 76
+#define ETH1TX 77
+#define ETH2TX 78
+#define ETH1RX 79
+#define ETH2RX 80
+#define ETH1MAC 81
+#define ETH2MAC 82
+#define ETH1STP 83
+#define ETH2STP 84
+
+/* IP clocks with parents */
+#define SDMMC1_K 85
+#define SDMMC2_K 86
+#define ADC1_K 87
+#define ADC2_K 88
+#define FMC_K 89
+#define QSPI_K 90
+#define RNG1_K 91
+#define USBPHY_K 92
+#define STGEN_K 93
+#define SPDIF_K 94
+#define SPI1_K 95
+#define SPI2_K 96
+#define SPI3_K 97
+#define SPI4_K 98
+#define SPI5_K 99
+#define I2C1_K 100
+#define I2C2_K 101
+#define I2C3_K 102
+#define I2C4_K 103
+#define I2C5_K 104
+#define TIM2_K 105
+#define TIM3_K 106
+#define TIM4_K 107
+#define TIM5_K 108
+#define TIM6_K 109
+#define TIM7_K 110
+#define TIM12_K 111
+#define TIM13_K 112
+#define TIM14_K 113
+#define TIM1_K 114
+#define TIM8_K 115
+#define TIM15_K 116
+#define TIM16_K 117
+#define TIM17_K 118
+#define LPTIM1_K 119
+#define LPTIM2_K 120
+#define LPTIM3_K 121
+#define LPTIM4_K 122
+#define LPTIM5_K 123
+#define USART1_K 124
+#define USART2_K 125
+#define USART3_K 126
+#define UART4_K 127
+#define UART5_K 128
+#define USART6_K 129
+#define UART7_K 130
+#define UART8_K 131
+#define DFSDM_K 132
+#define FDCAN_K 133
+#define SAI1_K 134
+#define SAI2_K 135
+#define ADFSDM_K 136
+#define USBO_K 137
+#define LTDC_PX 138
+#define ETH1CK_K 139
+#define ETH1PTP_K 140
+#define ETH2CK_K 141
+#define ETH2PTP_K 142
+#define DCMIPP_K 143
+#define SAES_K 144
+#define DTS_K 145
+
+/* DDR */
+#define DDRC1 146
+#define DDRC1LP 147
+#define DDRC2 148
+#define DDRC2LP 149
+#define DDRPHYC 150
+#define DDRPHYCLP 151
+#define DDRCAPB 152
+#define DDRCAPBLP 153
+#define AXIDCG 154
+#define DDRPHYCAPB 155
+#define DDRPHYCAPBLP 156
+#define DDRPERFM 157
+
+#define ADC1 158
+#define ADC2 159
+#define SAI1 160
+#define SAI2 161
+
+#define STM32MP1_LAST_CLK 162
+
+/* SCMI clock identifiers */
+#define CK_SCMI_HSE 0
+#define CK_SCMI_HSI 1
+#define CK_SCMI_CSI 2
+#define CK_SCMI_LSE 3
+#define CK_SCMI_LSI 4
+#define CK_SCMI_HSE_DIV2 5
+#define CK_SCMI_PLL2_Q 6
+#define CK_SCMI_PLL2_R 7
+#define CK_SCMI_PLL3_P 8
+#define CK_SCMI_PLL3_Q 9
+#define CK_SCMI_PLL3_R 10
+#define CK_SCMI_PLL4_P 11
+#define CK_SCMI_PLL4_Q 12
+#define CK_SCMI_PLL4_R 13
+#define CK_SCMI_MPU 14
+#define CK_SCMI_AXI 15
+#define CK_SCMI_MLAHB 16
+#define CK_SCMI_CKPER 17
+#define CK_SCMI_PCLK1 18
+#define CK_SCMI_PCLK2 19
+#define CK_SCMI_PCLK3 20
+#define CK_SCMI_PCLK4 21
+#define CK_SCMI_PCLK5 22
+#define CK_SCMI_PCLK6 23
+#define CK_SCMI_CKTIMG1 24
+#define CK_SCMI_CKTIMG2 25
+#define CK_SCMI_CKTIMG3 26
+#define CK_SCMI_RTC 27
+#define CK_SCMI_RTCAPB 28
+
+#endif /* _DT_BINDINGS_STM32MP13_CLKS_H_ */
diff --git a/include/dt-bindings/clock/stratix10-clock.h b/include/dt-bindings/clock/stratix10-clock.h
index 08b98e20b7cc..636498f9e08e 100644
--- a/include/dt-bindings/clock/stratix10-clock.h
+++ b/include/dt-bindings/clock/stratix10-clock.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017, Intel Corporation
*/
diff --git a/include/dt-bindings/clock/sun20i-d1-ccu.h b/include/dt-bindings/clock/sun20i-d1-ccu.h
new file mode 100644
index 000000000000..e3ac53315e1a
--- /dev/null
+++ b/include/dt-bindings/clock/sun20i-d1-ccu.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_
+#define _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_
+
+#define CLK_PLL_CPUX 0
+#define CLK_PLL_DDR0 1
+#define CLK_PLL_PERIPH0_4X 2
+#define CLK_PLL_PERIPH0_2X 3
+#define CLK_PLL_PERIPH0_800M 4
+#define CLK_PLL_PERIPH0 5
+#define CLK_PLL_PERIPH0_DIV3 6
+#define CLK_PLL_VIDEO0_4X 7
+#define CLK_PLL_VIDEO0_2X 8
+#define CLK_PLL_VIDEO0 9
+#define CLK_PLL_VIDEO1_4X 10
+#define CLK_PLL_VIDEO1_2X 11
+#define CLK_PLL_VIDEO1 12
+#define CLK_PLL_VE 13
+#define CLK_PLL_AUDIO0_4X 14
+#define CLK_PLL_AUDIO0_2X 15
+#define CLK_PLL_AUDIO0 16
+#define CLK_PLL_AUDIO1 17
+#define CLK_PLL_AUDIO1_DIV2 18
+#define CLK_PLL_AUDIO1_DIV5 19
+#define CLK_CPUX 20
+#define CLK_CPUX_AXI 21
+#define CLK_CPUX_APB 22
+#define CLK_PSI_AHB 23
+#define CLK_APB0 24
+#define CLK_APB1 25
+#define CLK_MBUS 26
+#define CLK_DE 27
+#define CLK_BUS_DE 28
+#define CLK_DI 29
+#define CLK_BUS_DI 30
+#define CLK_G2D 31
+#define CLK_BUS_G2D 32
+#define CLK_CE 33
+#define CLK_BUS_CE 34
+#define CLK_VE 35
+#define CLK_BUS_VE 36
+#define CLK_BUS_DMA 37
+#define CLK_BUS_MSGBOX0 38
+#define CLK_BUS_MSGBOX1 39
+#define CLK_BUS_MSGBOX2 40
+#define CLK_BUS_SPINLOCK 41
+#define CLK_BUS_HSTIMER 42
+#define CLK_AVS 43
+#define CLK_BUS_DBG 44
+#define CLK_BUS_PWM 45
+#define CLK_BUS_IOMMU 46
+#define CLK_DRAM 47
+#define CLK_MBUS_DMA 48
+#define CLK_MBUS_VE 49
+#define CLK_MBUS_CE 50
+#define CLK_MBUS_TVIN 51
+#define CLK_MBUS_CSI 52
+#define CLK_MBUS_G2D 53
+#define CLK_MBUS_RISCV 54
+#define CLK_BUS_DRAM 55
+#define CLK_MMC0 56
+#define CLK_MMC1 57
+#define CLK_MMC2 58
+#define CLK_BUS_MMC0 59
+#define CLK_BUS_MMC1 60
+#define CLK_BUS_MMC2 61
+#define CLK_BUS_UART0 62
+#define CLK_BUS_UART1 63
+#define CLK_BUS_UART2 64
+#define CLK_BUS_UART3 65
+#define CLK_BUS_UART4 66
+#define CLK_BUS_UART5 67
+#define CLK_BUS_I2C0 68
+#define CLK_BUS_I2C1 69
+#define CLK_BUS_I2C2 70
+#define CLK_BUS_I2C3 71
+#define CLK_SPI0 72
+#define CLK_SPI1 73
+#define CLK_BUS_SPI0 74
+#define CLK_BUS_SPI1 75
+#define CLK_EMAC_25M 76
+#define CLK_BUS_EMAC 77
+#define CLK_IR_TX 78
+#define CLK_BUS_IR_TX 79
+#define CLK_BUS_GPADC 80
+#define CLK_BUS_THS 81
+#define CLK_I2S0 82
+#define CLK_I2S1 83
+#define CLK_I2S2 84
+#define CLK_I2S2_ASRC 85
+#define CLK_BUS_I2S0 86
+#define CLK_BUS_I2S1 87
+#define CLK_BUS_I2S2 88
+#define CLK_SPDIF_TX 89
+#define CLK_SPDIF_RX 90
+#define CLK_BUS_SPDIF 91
+#define CLK_DMIC 92
+#define CLK_BUS_DMIC 93
+#define CLK_AUDIO_DAC 94
+#define CLK_AUDIO_ADC 95
+#define CLK_BUS_AUDIO 96
+#define CLK_USB_OHCI0 97
+#define CLK_USB_OHCI1 98
+#define CLK_BUS_OHCI0 99
+#define CLK_BUS_OHCI1 100
+#define CLK_BUS_EHCI0 101
+#define CLK_BUS_EHCI1 102
+#define CLK_BUS_OTG 103
+#define CLK_BUS_LRADC 104
+#define CLK_BUS_DPSS_TOP 105
+#define CLK_HDMI_24M 106
+#define CLK_HDMI_CEC_32K 107
+#define CLK_HDMI_CEC 108
+#define CLK_BUS_HDMI 109
+#define CLK_MIPI_DSI 110
+#define CLK_BUS_MIPI_DSI 111
+#define CLK_TCON_LCD0 112
+#define CLK_BUS_TCON_LCD0 113
+#define CLK_TCON_TV 114
+#define CLK_BUS_TCON_TV 115
+#define CLK_TVE 116
+#define CLK_BUS_TVE_TOP 117
+#define CLK_BUS_TVE 118
+#define CLK_TVD 119
+#define CLK_BUS_TVD_TOP 120
+#define CLK_BUS_TVD 121
+#define CLK_LEDC 122
+#define CLK_BUS_LEDC 123
+#define CLK_CSI_TOP 124
+#define CLK_CSI_MCLK 125
+#define CLK_BUS_CSI 126
+#define CLK_TPADC 127
+#define CLK_BUS_TPADC 128
+#define CLK_BUS_TZMA 129
+#define CLK_DSP 130
+#define CLK_BUS_DSP_CFG 131
+#define CLK_RISCV 132
+#define CLK_RISCV_AXI 133
+#define CLK_BUS_RISCV_CFG 134
+#define CLK_FANOUT_24M 135
+#define CLK_FANOUT_12M 136
+#define CLK_FANOUT_16M 137
+#define CLK_FANOUT_25M 138
+#define CLK_FANOUT_32K 139
+#define CLK_FANOUT_27M 140
+#define CLK_FANOUT_PCLK 141
+#define CLK_FANOUT0 142
+#define CLK_FANOUT1 143
+#define CLK_FANOUT2 144
+
+#endif /* _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun20i-d1-r-ccu.h b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
new file mode 100644
index 000000000000..4c2697fd32b0
--- /dev/null
+++ b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_
+
+#define CLK_R_AHB 0
+
+#define CLK_BUS_R_TIMER 2
+#define CLK_BUS_R_TWD 3
+#define CLK_BUS_R_PPU 4
+#define CLK_R_IR_RX 5
+#define CLK_BUS_R_IR_RX 6
+#define CLK_BUS_R_RTC 7
+#define CLK_BUS_R_CPUCFG 8
+
+#endif /* _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun50i-a100-ccu.h b/include/dt-bindings/clock/sun50i-a100-ccu.h
new file mode 100644
index 000000000000..28dc36e1a232
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-a100-ccu.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_A100_H_
+#define _DT_BINDINGS_CLK_SUN50I_A100_H_
+
+#define CLK_PLL_PERIPH0 3
+
+#define CLK_CPUX 24
+
+#define CLK_APB1 29
+
+#define CLK_MBUS 31
+#define CLK_DE 32
+#define CLK_BUS_DE 33
+#define CLK_G2D 34
+#define CLK_BUS_G2D 35
+#define CLK_GPU 36
+#define CLK_BUS_GPU 37
+#define CLK_CE 38
+#define CLK_BUS_CE 39
+#define CLK_VE 40
+#define CLK_BUS_VE 41
+#define CLK_BUS_DMA 42
+#define CLK_BUS_MSGBOX 43
+#define CLK_BUS_SPINLOCK 44
+#define CLK_BUS_HSTIMER 45
+#define CLK_AVS 46
+#define CLK_BUS_DBG 47
+#define CLK_BUS_PSI 48
+#define CLK_BUS_PWM 49
+#define CLK_BUS_IOMMU 50
+#define CLK_MBUS_DMA 51
+#define CLK_MBUS_VE 52
+#define CLK_MBUS_CE 53
+#define CLK_MBUS_NAND 54
+#define CLK_MBUS_CSI 55
+#define CLK_MBUS_ISP 56
+#define CLK_MBUS_G2D 57
+
+#define CLK_NAND0 59
+#define CLK_NAND1 60
+#define CLK_BUS_NAND 61
+#define CLK_MMC0 62
+#define CLK_MMC1 63
+#define CLK_MMC2 64
+#define CLK_MMC3 65
+#define CLK_BUS_MMC0 66
+#define CLK_BUS_MMC1 67
+#define CLK_BUS_MMC2 68
+#define CLK_BUS_UART0 69
+#define CLK_BUS_UART1 70
+#define CLK_BUS_UART2 71
+#define CLK_BUS_UART3 72
+#define CLK_BUS_UART4 73
+#define CLK_BUS_I2C0 74
+#define CLK_BUS_I2C1 75
+#define CLK_BUS_I2C2 76
+#define CLK_BUS_I2C3 77
+#define CLK_SPI0 78
+#define CLK_SPI1 79
+#define CLK_SPI2 80
+#define CLK_BUS_SPI0 81
+#define CLK_BUS_SPI1 82
+#define CLK_BUS_SPI2 83
+#define CLK_EMAC_25M 84
+#define CLK_BUS_EMAC 85
+#define CLK_IR_RX 86
+#define CLK_BUS_IR_RX 87
+#define CLK_IR_TX 88
+#define CLK_BUS_IR_TX 89
+#define CLK_BUS_GPADC 90
+#define CLK_BUS_THS 91
+#define CLK_I2S0 92
+#define CLK_I2S1 93
+#define CLK_I2S2 94
+#define CLK_I2S3 95
+#define CLK_BUS_I2S0 96
+#define CLK_BUS_I2S1 97
+#define CLK_BUS_I2S2 98
+#define CLK_BUS_I2S3 99
+#define CLK_SPDIF 100
+#define CLK_BUS_SPDIF 101
+#define CLK_DMIC 102
+#define CLK_BUS_DMIC 103
+#define CLK_AUDIO_DAC 104
+#define CLK_AUDIO_ADC 105
+#define CLK_AUDIO_4X 106
+#define CLK_BUS_AUDIO_CODEC 107
+#define CLK_USB_OHCI0 108
+#define CLK_USB_PHY0 109
+#define CLK_USB_OHCI1 110
+#define CLK_USB_PHY1 111
+#define CLK_BUS_OHCI0 112
+#define CLK_BUS_OHCI1 113
+#define CLK_BUS_EHCI0 114
+#define CLK_BUS_EHCI1 115
+#define CLK_BUS_OTG 116
+#define CLK_BUS_LRADC 117
+#define CLK_BUS_DPSS_TOP0 118
+#define CLK_BUS_DPSS_TOP1 119
+#define CLK_MIPI_DSI 120
+#define CLK_BUS_MIPI_DSI 121
+#define CLK_TCON_LCD 122
+#define CLK_BUS_TCON_LCD 123
+#define CLK_LEDC 124
+#define CLK_BUS_LEDC 125
+#define CLK_CSI_TOP 126
+#define CLK_CSI0_MCLK 127
+#define CLK_CSI1_MCLK 128
+#define CLK_BUS_CSI 129
+#define CLK_CSI_ISP 130
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_A100_H_ */
diff --git a/include/dt-bindings/clock/sun50i-a100-r-ccu.h b/include/dt-bindings/clock/sun50i-a100-r-ccu.h
new file mode 100644
index 000000000000..07312e7264fb
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-a100-r-ccu.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_A100_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN50I_A100_R_CCU_H_
+
+#define CLK_R_APB1 2
+
+#define CLK_R_APB1_TIMER 4
+#define CLK_R_APB1_TWD 5
+#define CLK_R_APB1_PWM 6
+#define CLK_R_APB1_BUS_PWM 7
+#define CLK_R_APB1_PPU 8
+#define CLK_R_APB2_UART 9
+#define CLK_R_APB2_I2C0 10
+#define CLK_R_APB2_I2C1 11
+#define CLK_R_APB1_IR 12
+#define CLK_R_APB1_BUS_IR 13
+#define CLK_R_AHB_BUS_RTC 14
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_A100_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index e512a1c9b0fc..175892189e9d 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -113,7 +113,7 @@
#define CLK_USB_OHCI0 91
#define CLK_USB_OHCI1 93
-
+#define CLK_DRAM 94
#define CLK_DRAM_VE 95
#define CLK_DRAM_CSI 96
#define CLK_DRAM_DEINTERLACE 97
@@ -131,7 +131,7 @@
#define CLK_AVS 109
#define CLK_HDMI 110
#define CLK_HDMI_DDC 111
-
+#define CLK_MBUS 112
#define CLK_DSI_DPHY 113
#define CLK_GPU 114
diff --git a/include/dt-bindings/clock/sun50i-h6-r-ccu.h b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
index 76136132a13e..a96087abc86f 100644
--- a/include/dt-bindings/clock/sun50i-h6-r-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
@@ -21,4 +21,7 @@
#define CLK_IR 11
#define CLK_W1 12
+#define CLK_R_APB2_RSB 13
+#define CLK_R_APB1_RTC 14
+
#endif /* _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun50i-h616-ccu.h b/include/dt-bindings/clock/sun50i-h616-ccu.h
new file mode 100644
index 000000000000..1191aca53ac6
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-h616-ccu.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2020 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_H616_H_
+#define _DT_BINDINGS_CLK_SUN50I_H616_H_
+
+#define CLK_PLL_PERIPH0 4
+
+#define CLK_CPUX 21
+
+#define CLK_APB1 26
+
+#define CLK_DE 29
+#define CLK_BUS_DE 30
+#define CLK_DEINTERLACE 31
+#define CLK_BUS_DEINTERLACE 32
+#define CLK_G2D 33
+#define CLK_BUS_G2D 34
+#define CLK_GPU0 35
+#define CLK_BUS_GPU 36
+#define CLK_GPU1 37
+#define CLK_CE 38
+#define CLK_BUS_CE 39
+#define CLK_VE 40
+#define CLK_BUS_VE 41
+#define CLK_BUS_DMA 42
+#define CLK_BUS_HSTIMER 43
+#define CLK_AVS 44
+#define CLK_BUS_DBG 45
+#define CLK_BUS_PSI 46
+#define CLK_BUS_PWM 47
+#define CLK_BUS_IOMMU 48
+
+#define CLK_MBUS_DMA 50
+#define CLK_MBUS_VE 51
+#define CLK_MBUS_CE 52
+#define CLK_MBUS_TS 53
+#define CLK_MBUS_NAND 54
+#define CLK_MBUS_G2D 55
+
+#define CLK_NAND0 57
+#define CLK_NAND1 58
+#define CLK_BUS_NAND 59
+#define CLK_MMC0 60
+#define CLK_MMC1 61
+#define CLK_MMC2 62
+#define CLK_BUS_MMC0 63
+#define CLK_BUS_MMC1 64
+#define CLK_BUS_MMC2 65
+#define CLK_BUS_UART0 66
+#define CLK_BUS_UART1 67
+#define CLK_BUS_UART2 68
+#define CLK_BUS_UART3 69
+#define CLK_BUS_UART4 70
+#define CLK_BUS_UART5 71
+#define CLK_BUS_I2C0 72
+#define CLK_BUS_I2C1 73
+#define CLK_BUS_I2C2 74
+#define CLK_BUS_I2C3 75
+#define CLK_BUS_I2C4 76
+#define CLK_SPI0 77
+#define CLK_SPI1 78
+#define CLK_BUS_SPI0 79
+#define CLK_BUS_SPI1 80
+#define CLK_EMAC_25M 81
+#define CLK_BUS_EMAC0 82
+#define CLK_BUS_EMAC1 83
+#define CLK_TS 84
+#define CLK_BUS_TS 85
+#define CLK_BUS_THS 86
+#define CLK_SPDIF 87
+#define CLK_BUS_SPDIF 88
+#define CLK_DMIC 89
+#define CLK_BUS_DMIC 90
+#define CLK_AUDIO_CODEC_1X 91
+#define CLK_AUDIO_CODEC_4X 92
+#define CLK_BUS_AUDIO_CODEC 93
+#define CLK_AUDIO_HUB 94
+#define CLK_BUS_AUDIO_HUB 95
+#define CLK_USB_OHCI0 96
+#define CLK_USB_PHY0 97
+#define CLK_USB_OHCI1 98
+#define CLK_USB_PHY1 99
+#define CLK_USB_OHCI2 100
+#define CLK_USB_PHY2 101
+#define CLK_USB_OHCI3 102
+#define CLK_USB_PHY3 103
+#define CLK_BUS_OHCI0 104
+#define CLK_BUS_OHCI1 105
+#define CLK_BUS_OHCI2 106
+#define CLK_BUS_OHCI3 107
+#define CLK_BUS_EHCI0 108
+#define CLK_BUS_EHCI1 109
+#define CLK_BUS_EHCI2 110
+#define CLK_BUS_EHCI3 111
+#define CLK_BUS_OTG 112
+#define CLK_BUS_KEYADC 113
+#define CLK_HDMI 114
+#define CLK_HDMI_SLOW 115
+#define CLK_HDMI_CEC 116
+#define CLK_BUS_HDMI 117
+#define CLK_BUS_TCON_TOP 118
+#define CLK_TCON_TV0 119
+#define CLK_TCON_TV1 120
+#define CLK_BUS_TCON_TV0 121
+#define CLK_BUS_TCON_TV1 122
+#define CLK_TVE0 123
+#define CLK_BUS_TVE_TOP 124
+#define CLK_BUS_TVE0 125
+#define CLK_HDCP 126
+#define CLK_BUS_HDCP 127
+#define CLK_PLL_SYSTEM_32K 128
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h
new file mode 100644
index 000000000000..c845493e4d37
--- /dev/null
+++ b/include/dt-bindings/clock/sun6i-rtc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+
+#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_
+#define _DT_BINDINGS_CLK_SUN6I_RTC_H_
+
+#define CLK_OSC32K 0
+#define CLK_OSC32K_FANOUT 1
+#define CLK_IOSC 2
+
+#endif /* _DT_BINDINGS_CLK_SUN6I_RTC_H_ */
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h
index 30d2d15373a2..5d4ada2c22e6 100644
--- a/include/dt-bindings/clock/sun8i-h3-ccu.h
+++ b/include/dt-bindings/clock/sun8i-h3-ccu.h
@@ -126,7 +126,7 @@
#define CLK_USB_OHCI1 93
#define CLK_USB_OHCI2 94
#define CLK_USB_OHCI3 95
-
+#define CLK_DRAM 96
#define CLK_DRAM_VE 97
#define CLK_DRAM_CSI 98
#define CLK_DRAM_DEINTERLACE 99
diff --git a/include/dt-bindings/clock/sunplus,sp7021-clkc.h b/include/dt-bindings/clock/sunplus,sp7021-clkc.h
new file mode 100644
index 000000000000..cd84321eb2b5
--- /dev/null
+++ b/include/dt-bindings/clock/sunplus,sp7021-clkc.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+#ifndef _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H
+#define _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H
+
+/* gates */
+#define CLK_RTC 0
+#define CLK_OTPRX 1
+#define CLK_NOC 2
+#define CLK_BR 3
+#define CLK_SPIFL 4
+#define CLK_PERI0 5
+#define CLK_PERI1 6
+#define CLK_STC0 7
+#define CLK_STC_AV0 8
+#define CLK_STC_AV1 9
+#define CLK_STC_AV2 10
+#define CLK_UA0 11
+#define CLK_UA1 12
+#define CLK_UA2 13
+#define CLK_UA3 14
+#define CLK_UA4 15
+#define CLK_HWUA 16
+#define CLK_DDC0 17
+#define CLK_UADMA 18
+#define CLK_CBDMA0 19
+#define CLK_CBDMA1 20
+#define CLK_SPI_COMBO_0 21
+#define CLK_SPI_COMBO_1 22
+#define CLK_SPI_COMBO_2 23
+#define CLK_SPI_COMBO_3 24
+#define CLK_AUD 25
+#define CLK_USBC0 26
+#define CLK_USBC1 27
+#define CLK_UPHY0 28
+#define CLK_UPHY1 29
+#define CLK_I2CM0 30
+#define CLK_I2CM1 31
+#define CLK_I2CM2 32
+#define CLK_I2CM3 33
+#define CLK_PMC 34
+#define CLK_CARD_CTL0 35
+#define CLK_CARD_CTL1 36
+#define CLK_CARD_CTL4 37
+#define CLK_BCH 38
+#define CLK_DDFCH 39
+#define CLK_CSIIW0 40
+#define CLK_CSIIW1 41
+#define CLK_MIPICSI0 42
+#define CLK_MIPICSI1 43
+#define CLK_HDMI_TX 44
+#define CLK_VPOST 45
+#define CLK_TGEN 46
+#define CLK_DMIX 47
+#define CLK_TCON 48
+#define CLK_GPIO 49
+#define CLK_MAILBOX 50
+#define CLK_SPIND 51
+#define CLK_I2C2CBUS 52
+#define CLK_SEC 53
+#define CLK_DVE 54
+#define CLK_GPOST0 55
+#define CLK_OSD0 56
+#define CLK_DISP_PWM 57
+#define CLK_UADBG 58
+#define CLK_FIO_CTL 59
+#define CLK_FPGA 60
+#define CLK_L2SW 61
+#define CLK_ICM 62
+#define CLK_AXI_GLOBAL 63
+
+/* plls */
+#define PLL_A 64
+#define PLL_E 65
+#define PLL_E_2P5 66
+#define PLL_E_25 67
+#define PLL_E_112P5 68
+#define PLL_F 69
+#define PLL_TV 70
+#define PLL_TV_A 71
+#define PLL_SYS 72
+
+#define CLK_MAX 73
+
+#endif
diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h
index bb5c2c999c05..a93426f008ac 100644
--- a/include/dt-bindings/clock/tegra114-car.h
+++ b/include/dt-bindings/clock/tegra114-car.h
@@ -228,6 +228,8 @@
#define TEGRA114_CLK_CLK_M 201
#define TEGRA114_CLK_CLK_M_DIV2 202
#define TEGRA114_CLK_CLK_M_DIV4 203
+#define TEGRA114_CLK_OSC_DIV2 202
+#define TEGRA114_CLK_OSC_DIV4 203
#define TEGRA114_CLK_PLL_REF 204
#define TEGRA114_CLK_PLL_C 205
#define TEGRA114_CLK_PLL_C_OUT1 206
@@ -270,11 +272,11 @@
#define TEGRA114_CLK_AUDIO3 242
#define TEGRA114_CLK_AUDIO4 243
#define TEGRA114_CLK_SPDIF 244
-#define TEGRA114_CLK_CLK_OUT_1 245
-#define TEGRA114_CLK_CLK_OUT_2 246
-#define TEGRA114_CLK_CLK_OUT_3 247
-#define TEGRA114_CLK_BLINK 248
-/* 249 */
+/* 245 */
+/* 246 */
+/* 247 */
+/* 248 */
+#define TEGRA114_CLK_OSC 249
/* 250 */
/* 251 */
#define TEGRA114_CLK_XUSB_HOST_SRC 252
@@ -333,9 +335,9 @@
#define TEGRA114_CLK_AUDIO3_MUX 303
#define TEGRA114_CLK_AUDIO4_MUX 304
#define TEGRA114_CLK_SPDIF_MUX 305
-#define TEGRA114_CLK_CLK_OUT_1_MUX 306
-#define TEGRA114_CLK_CLK_OUT_2_MUX 307
-#define TEGRA114_CLK_CLK_OUT_3_MUX 308
+/* 306 */
+/* 307 */
+/* 308 */
#define TEGRA114_CLK_DSIA_MUX 309
#define TEGRA114_CLK_DSIB_MUX 310
#define TEGRA114_CLK_XUSB_SS_DIV2 311
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
index 0c4f5be0a742..c59f9de01b4d 100644
--- a/include/dt-bindings/clock/tegra124-car-common.h
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -227,6 +227,8 @@
#define TEGRA124_CLK_CLK_M 201
#define TEGRA124_CLK_CLK_M_DIV2 202
#define TEGRA124_CLK_CLK_M_DIV4 203
+#define TEGRA124_CLK_OSC_DIV2 202
+#define TEGRA124_CLK_OSC_DIV4 203
#define TEGRA124_CLK_PLL_REF 204
#define TEGRA124_CLK_PLL_C 205
#define TEGRA124_CLK_PLL_C_OUT1 206
@@ -269,11 +271,11 @@
#define TEGRA124_CLK_AUDIO3 242
#define TEGRA124_CLK_AUDIO4 243
#define TEGRA124_CLK_SPDIF 244
-#define TEGRA124_CLK_CLK_OUT_1 245
-#define TEGRA124_CLK_CLK_OUT_2 246
-#define TEGRA124_CLK_CLK_OUT_3 247
-#define TEGRA124_CLK_BLINK 248
-/* 249 */
+/* 245 */
+/* 246 */
+/* 247 */
+/* 248 */
+#define TEGRA124_CLK_OSC 249
/* 250 */
/* 251 */
#define TEGRA124_CLK_XUSB_HOST_SRC 252
@@ -332,9 +334,9 @@
#define TEGRA124_CLK_AUDIO3_MUX 303
#define TEGRA124_CLK_AUDIO4_MUX 304
#define TEGRA124_CLK_SPDIF_MUX 305
-#define TEGRA124_CLK_CLK_OUT_1_MUX 306
-#define TEGRA124_CLK_CLK_OUT_2_MUX 307
-#define TEGRA124_CLK_CLK_OUT_3_MUX 308
+/* 306 */
+/* 307 */
+/* 308 */
/* 309 */
/* 310 */
#define TEGRA124_CLK_SOR0_LVDS 311 /* deprecated */
diff --git a/include/dt-bindings/clock/tegra20-car.h b/include/dt-bindings/clock/tegra20-car.h
index b21a0eb32921..fe541f627965 100644
--- a/include/dt-bindings/clock/tegra20-car.h
+++ b/include/dt-bindings/clock/tegra20-car.h
@@ -131,7 +131,7 @@
#define TEGRA20_CLK_CCLK 108
#define TEGRA20_CLK_HCLK 109
#define TEGRA20_CLK_PCLK 110
-#define TEGRA20_CLK_BLINK 111
+/* 111 */
#define TEGRA20_CLK_PLL_A 112
#define TEGRA20_CLK_PLL_A_OUT0 113
#define TEGRA20_CLK_PLL_C 114
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 44f60623f99b..9cfcc3baa52c 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -262,6 +262,8 @@
#define TEGRA210_CLK_CLK_M 233
#define TEGRA210_CLK_CLK_M_DIV2 234
#define TEGRA210_CLK_CLK_M_DIV4 235
+#define TEGRA210_CLK_OSC_DIV2 234
+#define TEGRA210_CLK_OSC_DIV4 235
#define TEGRA210_CLK_PLL_REF 236
#define TEGRA210_CLK_PLL_C 237
#define TEGRA210_CLK_PLL_C_OUT1 238
@@ -304,10 +306,10 @@
#define TEGRA210_CLK_AUDIO3 274
#define TEGRA210_CLK_AUDIO4 275
#define TEGRA210_CLK_SPDIF 276
-#define TEGRA210_CLK_CLK_OUT_1 277
-#define TEGRA210_CLK_CLK_OUT_2 278
-#define TEGRA210_CLK_CLK_OUT_3 279
-#define TEGRA210_CLK_BLINK 280
+/* 277 */
+#define TEGRA210_CLK_QSPI_PM 278
+/* 279 */
+/* 280 */
#define TEGRA210_CLK_SOR0_LVDS 281 /* deprecated */
#define TEGRA210_CLK_SOR0_OUT 281
#define TEGRA210_CLK_SOR1_OUT 282
@@ -349,14 +351,14 @@
#define TEGRA210_CLK_PLL_P_OUT_XUSB 317
#define TEGRA210_CLK_XUSB_SSP_SRC 318
#define TEGRA210_CLK_PLL_RE_OUT1 319
-/* 320 */
-/* 321 */
+#define TEGRA210_CLK_PLL_MB_UD 320
+#define TEGRA210_CLK_PLL_P_UD 321
#define TEGRA210_CLK_ISP 322
#define TEGRA210_CLK_PLL_A_OUT_ADSP 323
#define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324
/* 325 */
-/* 326 */
-/* 327 */
+#define TEGRA210_CLK_OSC 326
+#define TEGRA210_CLK_CSI_TPG 327
/* 328 */
/* 329 */
/* 330 */
@@ -386,9 +388,9 @@
#define TEGRA210_CLK_AUDIO3_MUX 353
#define TEGRA210_CLK_AUDIO4_MUX 354
#define TEGRA210_CLK_SPDIF_MUX 355
-#define TEGRA210_CLK_CLK_OUT_1_MUX 356
-#define TEGRA210_CLK_CLK_OUT_2_MUX 357
-#define TEGRA210_CLK_CLK_OUT_3_MUX 358
+/* 356 */
+/* 357 */
+/* 358 */
#define TEGRA210_CLK_DSIA_MUX 359
#define TEGRA210_CLK_DSIB_MUX 360
/* 361 */
diff --git a/include/dt-bindings/clock/tegra234-clock.h b/include/dt-bindings/clock/tegra234-clock.h
new file mode 100644
index 000000000000..173364a93381
--- /dev/null
+++ b/include/dt-bindings/clock/tegra234-clock.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H
+#define DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H
+
+/**
+ * @file
+ * @defgroup bpmp_clock_ids Clock ID's
+ * @{
+ */
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */
+#define TEGRA234_CLK_AHUB 4U
+/** @brief output of gate CLK_ENB_APB2APE */
+#define TEGRA234_CLK_APB2APE 5U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_APE */
+#define TEGRA234_CLK_APE 6U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */
+#define TEGRA234_CLK_AUD_MCLK 7U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */
+#define TEGRA234_CLK_DMIC1 15U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */
+#define TEGRA234_CLK_DMIC2 16U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC3 */
+#define TEGRA234_CLK_DMIC3 17U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */
+#define TEGRA234_CLK_DMIC4 18U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */
+#define TEGRA234_CLK_DSPK1 29U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */
+#define TEGRA234_CLK_DSPK2 30U
+/**
+ * @brief controls the EMC clock frequency.
+ * @details Doing a clk_set_rate on this clock will select the
+ * appropriate clock source, program the source rate and execute a
+ * specific sequence to switch to the new clock source for both memory
+ * controllers. This can be used to control the balance between memory
+ * throughput and memory controller power.
+ */
+#define TEGRA234_CLK_EMC 31U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */
+#define TEGRA234_CLK_HOST1X 46U
+/** @brief output of gate CLK_ENB_FUSE */
+#define TEGRA234_CLK_FUSE 40U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C1 */
+#define TEGRA234_CLK_I2C1 48U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */
+#define TEGRA234_CLK_I2C2 49U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C3 */
+#define TEGRA234_CLK_I2C3 50U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C4 */
+#define TEGRA234_CLK_I2C4 51U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */
+#define TEGRA234_CLK_I2C6 52U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C7 */
+#define TEGRA234_CLK_I2C7 53U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C8 */
+#define TEGRA234_CLK_I2C8 54U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C9 */
+#define TEGRA234_CLK_I2C9 55U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S1 */
+#define TEGRA234_CLK_I2S1 56U
+/** @brief clock recovered from I2S1 input */
+#define TEGRA234_CLK_I2S1_SYNC_INPUT 57U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S2 */
+#define TEGRA234_CLK_I2S2 58U
+/** @brief clock recovered from I2S2 input */
+#define TEGRA234_CLK_I2S2_SYNC_INPUT 59U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S3 */
+#define TEGRA234_CLK_I2S3 60U
+/** @brief clock recovered from I2S3 input */
+#define TEGRA234_CLK_I2S3_SYNC_INPUT 61U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S4 */
+#define TEGRA234_CLK_I2S4 62U
+/** @brief clock recovered from I2S4 input */
+#define TEGRA234_CLK_I2S4_SYNC_INPUT 63U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S5 */
+#define TEGRA234_CLK_I2S5 64U
+/** @brief clock recovered from I2S5 input */
+#define TEGRA234_CLK_I2S5_SYNC_INPUT 65U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S6 */
+#define TEGRA234_CLK_I2S6 66U
+/** @brief clock recovered from I2S6 input */
+#define TEGRA234_CLK_I2S6_SYNC_INPUT 67U
+/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */
+#define TEGRA234_CLK_PLLA 93U
+/** @brief PLLP clk output */
+#define TEGRA234_CLK_PLLP_OUT0 102U
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */
+#define TEGRA234_CLK_PLLA_OUT0 104U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */
+#define TEGRA234_CLK_PWM1 105U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM2 */
+#define TEGRA234_CLK_PWM2 106U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM3 */
+#define TEGRA234_CLK_PWM3 107U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM4 */
+#define TEGRA234_CLK_PWM4 108U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM5 */
+#define TEGRA234_CLK_PWM5 109U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM6 */
+#define TEGRA234_CLK_PWM6 110U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM7 */
+#define TEGRA234_CLK_PWM7 111U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */
+#define TEGRA234_CLK_PWM8 112U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */
+#define TEGRA234_CLK_SDMMC4 123U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */
+#define TEGRA234_CLK_SYNC_DMIC1 139U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */
+#define TEGRA234_CLK_SYNC_DMIC2 140U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC3 */
+#define TEGRA234_CLK_SYNC_DMIC3 141U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC4 */
+#define TEGRA234_CLK_SYNC_DMIC4 142U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK1 */
+#define TEGRA234_CLK_SYNC_DSPK1 143U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK2 */
+#define TEGRA234_CLK_SYNC_DSPK2 144U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S1 */
+#define TEGRA234_CLK_SYNC_I2S1 145U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S2 */
+#define TEGRA234_CLK_SYNC_I2S2 146U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S3 */
+#define TEGRA234_CLK_SYNC_I2S3 147U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S4 */
+#define TEGRA234_CLK_SYNC_I2S4 148U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S5 */
+#define TEGRA234_CLK_SYNC_I2S5 149U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */
+#define TEGRA234_CLK_SYNC_I2S6 150U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */
+#define TEGRA234_CLK_UARTA 155U
+/** @brief output of gate CLK_ENB_PEX1_CORE_6 */
+#define TEGRA234_CLK_PEX1_C6_CORE 161U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */
+#define TEGRA234_CLK_VIC 167U
+/** @brief output of gate CLK_ENB_PEX2_CORE_7 */
+#define TEGRA234_CLK_PEX2_C7_CORE 171U
+/** @brief output of gate CLK_ENB_PEX2_CORE_8 */
+#define TEGRA234_CLK_PEX2_C8_CORE 172U
+/** @brief output of gate CLK_ENB_PEX2_CORE_9 */
+#define TEGRA234_CLK_PEX2_C9_CORE 173U
+/** @brief output of gate CLK_ENB_PEX2_CORE_10 */
+#define TEGRA234_CLK_PEX2_C10_CORE 187U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 switch divider output */
+#define TEGRA234_CLK_QSPI0_2X_PM 192U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 switch divider output */
+#define TEGRA234_CLK_QSPI1_2X_PM 193U
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 */
+#define TEGRA234_CLK_QSPI0_PM 194U
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 */
+#define TEGRA234_CLK_QSPI1_PM 195U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM switch divider output */
+#define TEGRA234_CLK_SDMMC_LEGACY_TM 219U
+/** @brief output of gate CLK_ENB_PEX0_CORE_0 */
+#define TEGRA234_CLK_PEX0_C0_CORE 220U
+/** @brief output of gate CLK_ENB_PEX0_CORE_1 */
+#define TEGRA234_CLK_PEX0_C1_CORE 221U
+/** @brief output of gate CLK_ENB_PEX0_CORE_2 */
+#define TEGRA234_CLK_PEX0_C2_CORE 222U
+/** @brief output of gate CLK_ENB_PEX0_CORE_3 */
+#define TEGRA234_CLK_PEX0_C3_CORE 223U
+/** @brief output of gate CLK_ENB_PEX0_CORE_4 */
+#define TEGRA234_CLK_PEX0_C4_CORE 224U
+/** @brief output of gate CLK_ENB_PEX1_CORE_5 */
+#define TEGRA234_CLK_PEX1_C5_CORE 225U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */
+#define TEGRA234_CLK_PLLC4 237U
+/** @brief RX clock recovered from MGBE0 lane input */
+#define TEGRA234_CLK_MGBE0_RX_INPUT 248U
+/** @brief RX clock recovered from MGBE1 lane input */
+#define TEGRA234_CLK_MGBE1_RX_INPUT 249U
+/** @brief RX clock recovered from MGBE2 lane input */
+#define TEGRA234_CLK_MGBE2_RX_INPUT 250U
+/** @brief RX clock recovered from MGBE3 lane input */
+#define TEGRA234_CLK_MGBE3_RX_INPUT 251U
+/** @brief 32K input clock provided by PMIC */
+#define TEGRA234_CLK_CLK_32K 289U
+/** @brief Monitored branch of MBGE0 RX input clock */
+#define TEGRA234_CLK_MGBE0_RX_INPUT_M 357U
+/** @brief Monitored branch of MBGE1 RX input clock */
+#define TEGRA234_CLK_MGBE1_RX_INPUT_M 358U
+/** @brief Monitored branch of MBGE2 RX input clock */
+#define TEGRA234_CLK_MGBE2_RX_INPUT_M 359U
+/** @brief Monitored branch of MBGE3 RX input clock */
+#define TEGRA234_CLK_MGBE3_RX_INPUT_M 360U
+/** @brief Monitored branch of MGBE0 RX PCS mux output */
+#define TEGRA234_CLK_MGBE0_RX_PCS_M 361U
+/** @brief Monitored branch of MGBE1 RX PCS mux output */
+#define TEGRA234_CLK_MGBE1_RX_PCS_M 362U
+/** @brief Monitored branch of MGBE2 RX PCS mux output */
+#define TEGRA234_CLK_MGBE2_RX_PCS_M 363U
+/** @brief Monitored branch of MGBE3 RX PCS mux output */
+#define TEGRA234_CLK_MGBE3_RX_PCS_M 364U
+/** @brief RX PCS clock recovered from MGBE0 lane input */
+#define TEGRA234_CLK_MGBE0_RX_PCS_INPUT 369U
+/** @brief RX PCS clock recovered from MGBE1 lane input */
+#define TEGRA234_CLK_MGBE1_RX_PCS_INPUT 370U
+/** @brief RX PCS clock recovered from MGBE2 lane input */
+#define TEGRA234_CLK_MGBE2_RX_PCS_INPUT 371U
+/** @brief RX PCS clock recovered from MGBE3 lane input */
+#define TEGRA234_CLK_MGBE3_RX_PCS_INPUT 372U
+/** @brief output of mux controlled by GBE_UPHY_MGBE0_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE0_RX_PCS 373U
+/** @brief GBE_UPHY_MGBE0_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_TX 374U
+/** @brief GBE_UPHY_MGBE0_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_TX_PCS 375U
+/** @brief GBE_UPHY_MGBE0_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE0_MAC_DIVIDER 376U
+/** @brief GBE_UPHY_MGBE0_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE0_MAC 377U
+/** @brief GBE_UPHY_MGBE0_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE0_MACSEC 378U
+/** @brief GBE_UPHY_MGBE0_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE0_EEE_PCS 379U
+/** @brief GBE_UPHY_MGBE0_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE0_APP 380U
+/** @brief GBE_UPHY_MGBE0_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_PTP_REF 381U
+/** @brief output of mux controlled by GBE_UPHY_MGBE1_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE1_RX_PCS 382U
+/** @brief GBE_UPHY_MGBE1_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_TX 383U
+/** @brief GBE_UPHY_MGBE1_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_TX_PCS 384U
+/** @brief GBE_UPHY_MGBE1_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE1_MAC_DIVIDER 385U
+/** @brief GBE_UPHY_MGBE1_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE1_MAC 386U
+/** @brief GBE_UPHY_MGBE1_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE1_EEE_PCS 388U
+/** @brief GBE_UPHY_MGBE1_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE1_APP 389U
+/** @brief GBE_UPHY_MGBE1_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_PTP_REF 390U
+/** @brief output of mux controlled by GBE_UPHY_MGBE2_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE2_RX_PCS 391U
+/** @brief GBE_UPHY_MGBE2_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_TX 392U
+/** @brief GBE_UPHY_MGBE2_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_TX_PCS 393U
+/** @brief GBE_UPHY_MGBE2_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE2_MAC_DIVIDER 394U
+/** @brief GBE_UPHY_MGBE2_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE2_MAC 395U
+/** @brief GBE_UPHY_MGBE2_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE2_EEE_PCS 397U
+/** @brief GBE_UPHY_MGBE2_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE2_APP 398U
+/** @brief GBE_UPHY_MGBE2_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_PTP_REF 399U
+/** @brief output of mux controlled by GBE_UPHY_MGBE3_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE3_RX_PCS 400U
+/** @brief GBE_UPHY_MGBE3_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_TX 401U
+/** @brief GBE_UPHY_MGBE3_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_TX_PCS 402U
+/** @brief GBE_UPHY_MGBE3_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE3_MAC_DIVIDER 403U
+/** @brief GBE_UPHY_MGBE3_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE3_MAC 404U
+/** @brief GBE_UPHY_MGBE3_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE3_MACSEC 405U
+/** @brief GBE_UPHY_MGBE3_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE3_EEE_PCS 406U
+/** @brief GBE_UPHY_MGBE3_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE3_APP 407U
+/** @brief GBE_UPHY_MGBE3_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_PTP_REF 408U
+/** @brief CLK_RST_CONTROLLER_AZA2XBITCLK_OUT_SWITCH_DIVIDER switch divider output (aza_2xbitclk) */
+#define TEGRA234_CLK_AZA_2XBIT 457U
+/** @brief aza_2xbitclk / 2 (aza_bitclk) */
+#define TEGRA234_CLK_AZA_BIT 458U
+
+#endif
diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h
index 3c90f1535551..f193663e6f28 100644
--- a/include/dt-bindings/clock/tegra30-car.h
+++ b/include/dt-bindings/clock/tegra30-car.h
@@ -196,6 +196,8 @@
#define TEGRA30_CLK_CLK_M 171
#define TEGRA30_CLK_CLK_M_DIV2 172
#define TEGRA30_CLK_CLK_M_DIV4 173
+#define TEGRA30_CLK_OSC_DIV2 172
+#define TEGRA30_CLK_OSC_DIV4 173
#define TEGRA30_CLK_PLL_REF 174
#define TEGRA30_CLK_PLL_C 175
#define TEGRA30_CLK_PLL_C_OUT1 176
@@ -230,11 +232,11 @@
#define TEGRA30_CLK_AUDIO3 204
#define TEGRA30_CLK_AUDIO4 205
#define TEGRA30_CLK_SPDIF 206
-#define TEGRA30_CLK_CLK_OUT_1 207 /* (extern1) */
-#define TEGRA30_CLK_CLK_OUT_2 208 /* (extern2) */
-#define TEGRA30_CLK_CLK_OUT_3 209 /* (extern3) */
+/* 207 */
+/* 208 */
+/* 209 */
#define TEGRA30_CLK_SCLK 210
-#define TEGRA30_CLK_BLINK 211
+/* 211 */
#define TEGRA30_CLK_CCLK_G 212
#define TEGRA30_CLK_CCLK_LP 213
#define TEGRA30_CLK_TWD 214
@@ -243,7 +245,7 @@
#define TEGRA30_CLK_HCLK 217
#define TEGRA30_CLK_PCLK 218
/* 219 */
-/* 220 */
+#define TEGRA30_CLK_OSC 220
/* 221 */
/* 222 */
/* 223 */
@@ -260,9 +262,9 @@
/* 297 */
/* 298 */
/* 299 */
-#define TEGRA30_CLK_CLK_OUT_1_MUX 300
-#define TEGRA30_CLK_CLK_OUT_2_MUX 301
-#define TEGRA30_CLK_CLK_OUT_3_MUX 302
+/* 300 */
+/* 301 */
+/* 302 */
#define TEGRA30_CLK_AUDIO0_MUX 303
#define TEGRA30_CLK_AUDIO1_MUX 304
#define TEGRA30_CLK_AUDIO2_MUX 305
diff --git a/include/dt-bindings/clock/ti-dra7-atl.h b/include/dt-bindings/clock/ti-dra7-atl.h
index 42dd4164f6f4..b0e71e3cce95 100644
--- a/include/dt-bindings/clock/ti-dra7-atl.h
+++ b/include/dt-bindings/clock/ti-dra7-atl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This header provides constants for DRA7 ATL (Audio Tracking Logic)
*
@@ -6,15 +7,6 @@
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_CLK_DRA7_ATL_H
diff --git a/include/dt-bindings/clock/toshiba,tmpv770x.h b/include/dt-bindings/clock/toshiba,tmpv770x.h
new file mode 100644
index 000000000000..5fce713001fd
--- /dev/null
+++ b/include/dt-bindings/clock/toshiba,tmpv770x.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_
+#define _DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_
+
+/* PLL */
+#define TMPV770X_PLL_PIPLL0 0
+#define TMPV770X_PLL_PIPLL1 1
+#define TMPV770X_PLL_PIDNNPLL 2
+#define TMPV770X_PLL_PIETHERPLL 3
+#define TMPV770X_PLL_PIDDRCPLL 4
+#define TMPV770X_PLL_PIVOIFPLL 5
+#define TMPV770X_PLL_PIIMGERPLL 6
+#define TMPV770X_NR_PLL 7
+
+/* Clocks */
+#define TMPV770X_CLK_PIPLL1_DIV1 0
+#define TMPV770X_CLK_PIPLL1_DIV2 1
+#define TMPV770X_CLK_PIPLL1_DIV4 2
+#define TMPV770X_CLK_PIDNNPLL_DIV1 3
+#define TMPV770X_CLK_DDRC_PHY_PLL0 4
+#define TMPV770X_CLK_DDRC_PHY_PLL1 5
+#define TMPV770X_CLK_D_PHYPLL 6
+#define TMPV770X_CLK_PHY_PCIEPLL 7
+#define TMPV770X_CLK_CA53CL0 8
+#define TMPV770X_CLK_CA53CL1 9
+#define TMPV770X_CLK_PISDMAC 10
+#define TMPV770X_CLK_PIPDMAC0 11
+#define TMPV770X_CLK_PIPDMAC1 12
+#define TMPV770X_CLK_PIWRAM 13
+#define TMPV770X_CLK_DDRC0 14
+#define TMPV770X_CLK_DDRC0_SCLK 15
+#define TMPV770X_CLK_DDRC0_NCLK 16
+#define TMPV770X_CLK_DDRC0_MCLK 17
+#define TMPV770X_CLK_DDRC0_APBCLK 18
+#define TMPV770X_CLK_DDRC1 19
+#define TMPV770X_CLK_DDRC1_SCLK 20
+#define TMPV770X_CLK_DDRC1_NCLK 21
+#define TMPV770X_CLK_DDRC1_MCLK 22
+#define TMPV770X_CLK_DDRC1_APBCLK 23
+#define TMPV770X_CLK_HOX 24
+#define TMPV770X_CLK_PCIE_MSTR 25
+#define TMPV770X_CLK_PCIE_AUX 26
+#define TMPV770X_CLK_PIINTC 27
+#define TMPV770X_CLK_PIETHER_BUS 28
+#define TMPV770X_CLK_PISPI0 29
+#define TMPV770X_CLK_PISPI1 30
+#define TMPV770X_CLK_PISPI2 31
+#define TMPV770X_CLK_PISPI3 32
+#define TMPV770X_CLK_PISPI4 33
+#define TMPV770X_CLK_PISPI5 34
+#define TMPV770X_CLK_PISPI6 35
+#define TMPV770X_CLK_PIUART0 36
+#define TMPV770X_CLK_PIUART1 37
+#define TMPV770X_CLK_PIUART2 38
+#define TMPV770X_CLK_PIUART3 39
+#define TMPV770X_CLK_PII2C0 40
+#define TMPV770X_CLK_PII2C1 41
+#define TMPV770X_CLK_PII2C2 42
+#define TMPV770X_CLK_PII2C3 43
+#define TMPV770X_CLK_PII2C4 44
+#define TMPV770X_CLK_PII2C5 45
+#define TMPV770X_CLK_PII2C6 46
+#define TMPV770X_CLK_PII2C7 47
+#define TMPV770X_CLK_PII2C8 48
+#define TMPV770X_CLK_PIGPIO 49
+#define TMPV770X_CLK_PIPGM 50
+#define TMPV770X_CLK_PIPCMIF 51
+#define TMPV770X_CLK_PIPCMIF_AUDIO_O 52
+#define TMPV770X_CLK_PIPCMIF_AUDIO_I 53
+#define TMPV770X_CLK_PICMPT0 54
+#define TMPV770X_CLK_PICMPT1 55
+#define TMPV770X_CLK_PITSC 56
+#define TMPV770X_CLK_PIUWDT 57
+#define TMPV770X_CLK_PISWDT 58
+#define TMPV770X_CLK_WDTCLK 59
+#define TMPV770X_CLK_PISUBUS_150M 60
+#define TMPV770X_CLK_PISUBUS_300M 61
+#define TMPV770X_CLK_PIPMU 62
+#define TMPV770X_CLK_PIGPMU 63
+#define TMPV770X_CLK_PITMU 64
+#define TMPV770X_CLK_WRCK 65
+#define TMPV770X_CLK_PIEMM 66
+#define TMPV770X_CLK_PIMISC 67
+#define TMPV770X_CLK_PIGCOMM 68
+#define TMPV770X_CLK_PIDCOMM 69
+#define TMPV770X_CLK_PICKMON 70
+#define TMPV770X_CLK_PIMBUS 71
+#define TMPV770X_CLK_SBUSCLK 72
+#define TMPV770X_CLK_DDR0_APBCLKCLK 73
+#define TMPV770X_CLK_DDR1_APBCLKCLK 74
+#define TMPV770X_CLK_DSP0_PBCLK 75
+#define TMPV770X_CLK_DSP1_PBCLK 76
+#define TMPV770X_CLK_DSP2_PBCLK 77
+#define TMPV770X_CLK_DSP3_PBCLK 78
+#define TMPV770X_CLK_DSVIIF0_APBCLK 79
+#define TMPV770X_CLK_VIIF0_APBCLK 80
+#define TMPV770X_CLK_VIIF0_CFGCLK 81
+#define TMPV770X_CLK_VIIF1_APBCLK 82
+#define TMPV770X_CLK_VIIF1_CFGCLK 83
+#define TMPV770X_CLK_VIIF2_APBCLK 84
+#define TMPV770X_CLK_VIIF2_CFGCLK 85
+#define TMPV770X_CLK_VIIF3_APBCLK 86
+#define TMPV770X_CLK_VIIF3_CFGCLK 87
+#define TMPV770X_CLK_VIIF4_APBCLK 88
+#define TMPV770X_CLK_VIIF4_CFGCLK 89
+#define TMPV770X_CLK_VIIF5_APBCLK 90
+#define TMPV770X_CLK_VIIF5_CFGCLK 91
+#define TMPV770X_CLK_VOIF_SBUSCLK 92
+#define TMPV770X_CLK_VOIF_PROCCLK 93
+#define TMPV770X_CLK_VOIF_DPHYCFGCLK 94
+#define TMPV770X_CLK_DNN0 95
+#define TMPV770X_CLK_STMAT 96
+#define TMPV770X_CLK_HWA0 97
+#define TMPV770X_CLK_AFFINE0 98
+#define TMPV770X_CLK_HAMAT 99
+#define TMPV770X_CLK_SMLDB 100
+#define TMPV770X_CLK_HWA0_ASYNC 101
+#define TMPV770X_CLK_HWA2 102
+#define TMPV770X_CLK_FLMAT 103
+#define TMPV770X_CLK_PYRAMID 104
+#define TMPV770X_CLK_HWA2_ASYNC 105
+#define TMPV770X_CLK_DSP0 106
+#define TMPV770X_CLK_VIIFBS0 107
+#define TMPV770X_CLK_VIIFBS0_L2ISP 108
+#define TMPV770X_CLK_VIIFBS0_L1ISP 109
+#define TMPV770X_CLK_VIIFBS0_PROC 110
+#define TMPV770X_CLK_VIIFBS1 111
+#define TMPV770X_CLK_VIIFBS2 112
+#define TMPV770X_CLK_VIIFOP_MBUS 113
+#define TMPV770X_CLK_VIIFOP0_PROC 114
+#define TMPV770X_CLK_PIETHER_2P5M 115
+#define TMPV770X_CLK_PIETHER_25M 116
+#define TMPV770X_CLK_PIETHER_50M 117
+#define TMPV770X_CLK_PIETHER_125M 118
+#define TMPV770X_CLK_VOIF0_DPHYCFG 119
+#define TMPV770X_CLK_VOIF0_PROC 120
+#define TMPV770X_CLK_VOIF0_SBUS 121
+#define TMPV770X_CLK_VOIF0_DSIREF 122
+#define TMPV770X_CLK_VOIF0_PIXEL 123
+#define TMPV770X_CLK_PIREFCLK 124
+#define TMPV770X_CLK_SBUS 125
+#define TMPV770X_CLK_BUSLCK 126
+#define TMPV770X_NR_CLK 127
+
+/* Reset */
+#define TMPV770X_RESET_PIETHER_2P5M 0
+#define TMPV770X_RESET_PIETHER_25M 1
+#define TMPV770X_RESET_PIETHER_50M 2
+#define TMPV770X_RESET_PIETHER_125M 3
+#define TMPV770X_RESET_HOX 4
+#define TMPV770X_RESET_PCIE_MSTR 5
+#define TMPV770X_RESET_PCIE_AUX 6
+#define TMPV770X_RESET_PIINTC 7
+#define TMPV770X_RESET_PIETHER_BUS 8
+#define TMPV770X_RESET_PISPI0 9
+#define TMPV770X_RESET_PISPI1 10
+#define TMPV770X_RESET_PISPI2 11
+#define TMPV770X_RESET_PISPI3 12
+#define TMPV770X_RESET_PISPI4 13
+#define TMPV770X_RESET_PISPI5 14
+#define TMPV770X_RESET_PISPI6 15
+#define TMPV770X_RESET_PIUART0 16
+#define TMPV770X_RESET_PIUART1 17
+#define TMPV770X_RESET_PIUART2 18
+#define TMPV770X_RESET_PIUART3 19
+#define TMPV770X_RESET_PII2C0 20
+#define TMPV770X_RESET_PII2C1 21
+#define TMPV770X_RESET_PII2C2 22
+#define TMPV770X_RESET_PII2C3 23
+#define TMPV770X_RESET_PII2C4 24
+#define TMPV770X_RESET_PII2C5 25
+#define TMPV770X_RESET_PII2C6 26
+#define TMPV770X_RESET_PII2C7 27
+#define TMPV770X_RESET_PII2C8 28
+#define TMPV770X_RESET_PIPCMIF 29
+#define TMPV770X_RESET_PICKMON 30
+#define TMPV770X_RESET_SBUSCLK 31
+#define TMPV770X_NR_RESET 32
+
+#endif /*_DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/clock/versaclock.h b/include/dt-bindings/clock/versaclock.h
new file mode 100644
index 000000000000..c6a6a0946564
--- /dev/null
+++ b/include/dt-bindings/clock/versaclock.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* This file defines field values used by the versaclock 6 family
+ * for defining output type
+ */
+
+#define VC5_LVPECL 0
+#define VC5_CMOS 1
+#define VC5_HCSL33 2
+#define VC5_LVDS 3
+#define VC5_CMOS2 4
+#define VC5_CMOSD 5
+#define VC5_HCSL25 6
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 95394f35a74a..373644e46747 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -195,6 +195,8 @@
#define VF610_CLK_WKPU 186
#define VF610_CLK_TCON0 187
#define VF610_CLK_TCON1 188
-#define VF610_CLK_END 189
+#define VF610_CLK_CAAM 189
+#define VF610_CLK_CRC 190
+#define VF610_CLK_END 191
#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/x1000-cgu.h
deleted file mode 100644
index bbaebaf7adb9..000000000000
--- a/include/dt-bindings/clock/x1000-cgu.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides clock numbers for the ingenic,x1000-cgu DT binding.
- *
- * They are roughly ordered as:
- * - external clocks
- * - PLLs
- * - muxes/dividers in the order they appear in the x1000 programmers manual
- * - gates in order of their bit in the CLKGR* registers
- */
-
-#ifndef __DT_BINDINGS_CLOCK_X1000_CGU_H__
-#define __DT_BINDINGS_CLOCK_X1000_CGU_H__
-
-#define X1000_CLK_EXCLK 0
-#define X1000_CLK_RTCLK 1
-#define X1000_CLK_APLL 2
-#define X1000_CLK_MPLL 3
-#define X1000_CLK_SCLKA 4
-#define X1000_CLK_CPUMUX 5
-#define X1000_CLK_CPU 6
-#define X1000_CLK_L2CACHE 7
-#define X1000_CLK_AHB0 8
-#define X1000_CLK_AHB2PMUX 9
-#define X1000_CLK_AHB2 10
-#define X1000_CLK_PCLK 11
-#define X1000_CLK_DDR 12
-#define X1000_CLK_MAC 13
-#define X1000_CLK_MSCMUX 14
-#define X1000_CLK_MSC0 15
-#define X1000_CLK_MSC1 16
-#define X1000_CLK_SSIPLL 17
-#define X1000_CLK_SSIMUX 18
-#define X1000_CLK_SFC 19
-#define X1000_CLK_I2C0 20
-#define X1000_CLK_I2C1 21
-#define X1000_CLK_I2C2 22
-#define X1000_CLK_UART0 23
-#define X1000_CLK_UART1 24
-#define X1000_CLK_UART2 25
-#define X1000_CLK_SSI 26
-#define X1000_CLK_PDMA 27
-
-#endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
diff --git a/include/dt-bindings/clock/xlnx-vcu.h b/include/dt-bindings/clock/xlnx-vcu.h
new file mode 100644
index 000000000000..1ed76b9563b6
--- /dev/null
+++ b/include/dt-bindings/clock/xlnx-vcu.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_XLNX_VCU_H
+#define _DT_BINDINGS_CLOCK_XLNX_VCU_H
+
+#define CLK_XVCU_ENC_CORE 0
+#define CLK_XVCU_ENC_MCU 1
+#define CLK_XVCU_DEC_CORE 2
+#define CLK_XVCU_DEC_MCU 3
+#define CLK_XVCU_NUM_CLOCKS 4
+
+#endif /* _DT_BINDINGS_CLOCK_XLNX_VCU_H */
diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h
deleted file mode 100644
index e04126111aae..000000000000
--- a/include/dt-bindings/clock/zx296702-clock.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#ifndef __DT_BINDINGS_CLOCK_ZX296702_H
-#define __DT_BINDINGS_CLOCK_ZX296702_H
-
-#define ZX296702_OSC 0
-#define ZX296702_PLL_A9 1
-#define ZX296702_PLL_A9_350M 2
-#define ZX296702_PLL_MAC_1000M 3
-#define ZX296702_PLL_MAC_333M 4
-#define ZX296702_PLL_MM0_1188M 5
-#define ZX296702_PLL_MM0_396M 6
-#define ZX296702_PLL_MM0_198M 7
-#define ZX296702_PLL_MM1_108M 8
-#define ZX296702_PLL_MM1_72M 9
-#define ZX296702_PLL_MM1_54M 10
-#define ZX296702_PLL_LSP_104M 11
-#define ZX296702_PLL_LSP_26M 12
-#define ZX296702_PLL_AUDIO_294M912 13
-#define ZX296702_PLL_DDR_266M 14
-#define ZX296702_CLK_148M5 15
-#define ZX296702_MATRIX_ACLK 16
-#define ZX296702_MAIN_HCLK 17
-#define ZX296702_MAIN_PCLK 18
-#define ZX296702_CLK_500 19
-#define ZX296702_CLK_250 20
-#define ZX296702_CLK_125 21
-#define ZX296702_CLK_74M25 22
-#define ZX296702_A9_WCLK 23
-#define ZX296702_A9_AS1_ACLK_MUX 24
-#define ZX296702_A9_TRACE_CLKIN_MUX 25
-#define ZX296702_A9_AS1_ACLK_DIV 26
-#define ZX296702_CLK_2 27
-#define ZX296702_CLK_27 28
-#define ZX296702_DECPPU_ACLK_MUX 29
-#define ZX296702_PPU_ACLK_MUX 30
-#define ZX296702_MALI400_ACLK_MUX 31
-#define ZX296702_VOU_ACLK_MUX 32
-#define ZX296702_VOU_MAIN_WCLK_MUX 33
-#define ZX296702_VOU_AUX_WCLK_MUX 34
-#define ZX296702_VOU_SCALER_WCLK_MUX 35
-#define ZX296702_R2D_ACLK_MUX 36
-#define ZX296702_R2D_WCLK_MUX 37
-#define ZX296702_CLK_50 38
-#define ZX296702_CLK_25 39
-#define ZX296702_CLK_12 40
-#define ZX296702_CLK_16M384 41
-#define ZX296702_CLK_32K768 42
-#define ZX296702_SEC_WCLK_DIV 43
-#define ZX296702_DDR_WCLK_MUX 44
-#define ZX296702_NAND_WCLK_MUX 45
-#define ZX296702_LSP_26_WCLK_MUX 46
-#define ZX296702_A9_AS0_ACLK 47
-#define ZX296702_A9_AS1_ACLK 48
-#define ZX296702_A9_TRACE_CLKIN 49
-#define ZX296702_DECPPU_AXI_M_ACLK 50
-#define ZX296702_DECPPU_AHB_S_HCLK 51
-#define ZX296702_PPU_AXI_M_ACLK 52
-#define ZX296702_PPU_AHB_S_HCLK 53
-#define ZX296702_VOU_AXI_M_ACLK 54
-#define ZX296702_VOU_APB_PCLK 55
-#define ZX296702_VOU_MAIN_CHANNEL_WCLK 56
-#define ZX296702_VOU_AUX_CHANNEL_WCLK 57
-#define ZX296702_VOU_HDMI_OSCLK_CEC 58
-#define ZX296702_VOU_SCALER_WCLK 59
-#define ZX296702_MALI400_AXI_M_ACLK 60
-#define ZX296702_MALI400_APB_PCLK 61
-#define ZX296702_R2D_WCLK 62
-#define ZX296702_R2D_AXI_M_ACLK 63
-#define ZX296702_R2D_AHB_HCLK 64
-#define ZX296702_DDR3_AXI_S0_ACLK 65
-#define ZX296702_DDR3_APB_PCLK 66
-#define ZX296702_DDR3_WCLK 67
-#define ZX296702_USB20_0_AHB_HCLK 68
-#define ZX296702_USB20_0_EXTREFCLK 69
-#define ZX296702_USB20_1_AHB_HCLK 70
-#define ZX296702_USB20_1_EXTREFCLK 71
-#define ZX296702_USB20_2_AHB_HCLK 72
-#define ZX296702_USB20_2_EXTREFCLK 73
-#define ZX296702_GMAC_AXI_M_ACLK 74
-#define ZX296702_GMAC_APB_PCLK 75
-#define ZX296702_GMAC_125_CLKIN 76
-#define ZX296702_GMAC_RMII_CLKIN 77
-#define ZX296702_GMAC_25M_CLK 78
-#define ZX296702_NANDFLASH_AHB_HCLK 79
-#define ZX296702_NANDFLASH_WCLK 80
-#define ZX296702_LSP0_APB_PCLK 81
-#define ZX296702_LSP0_AHB_HCLK 82
-#define ZX296702_LSP0_26M_WCLK 83
-#define ZX296702_LSP0_104M_WCLK 84
-#define ZX296702_LSP0_16M384_WCLK 85
-#define ZX296702_LSP1_APB_PCLK 86
-#define ZX296702_LSP1_26M_WCLK 87
-#define ZX296702_LSP1_104M_WCLK 88
-#define ZX296702_LSP1_32K_CLK 89
-#define ZX296702_AON_HCLK 90
-#define ZX296702_SYS_CTRL_PCLK 91
-#define ZX296702_DMA_PCLK 92
-#define ZX296702_DMA_ACLK 93
-#define ZX296702_SEC_HCLK 94
-#define ZX296702_AES_WCLK 95
-#define ZX296702_DES_WCLK 96
-#define ZX296702_IRAM_ACLK 97
-#define ZX296702_IROM_ACLK 98
-#define ZX296702_BOOT_CTRL_HCLK 99
-#define ZX296702_EFUSE_CLK_30 100
-#define ZX296702_VOU_MAIN_CHANNEL_DIV 101
-#define ZX296702_VOU_AUX_CHANNEL_DIV 102
-#define ZX296702_VOU_TV_ENC_HD_DIV 103
-#define ZX296702_VOU_TV_ENC_SD_DIV 104
-#define ZX296702_VL0_MUX 105
-#define ZX296702_VL1_MUX 106
-#define ZX296702_VL2_MUX 107
-#define ZX296702_GL0_MUX 108
-#define ZX296702_GL1_MUX 109
-#define ZX296702_GL2_MUX 110
-#define ZX296702_WB_MUX 111
-#define ZX296702_HDMI_MUX 112
-#define ZX296702_VOU_TV_ENC_HD_MUX 113
-#define ZX296702_VOU_TV_ENC_SD_MUX 114
-#define ZX296702_VL0_CLK 115
-#define ZX296702_VL1_CLK 116
-#define ZX296702_VL2_CLK 117
-#define ZX296702_GL0_CLK 118
-#define ZX296702_GL1_CLK 119
-#define ZX296702_GL2_CLK 120
-#define ZX296702_WB_CLK 121
-#define ZX296702_CL_CLK 122
-#define ZX296702_MAIN_MIX_CLK 123
-#define ZX296702_AUX_MIX_CLK 124
-#define ZX296702_HDMI_CLK 125
-#define ZX296702_VOU_TV_ENC_HD_DAC_CLK 126
-#define ZX296702_VOU_TV_ENC_SD_DAC_CLK 127
-#define ZX296702_A9_PERIPHCLK 128
-#define ZX296702_TOPCLK_END 129
-
-#define ZX296702_SDMMC1_WCLK_MUX 0
-#define ZX296702_SDMMC1_WCLK_DIV 1
-#define ZX296702_SDMMC1_WCLK 2
-#define ZX296702_SDMMC1_PCLK 3
-#define ZX296702_SPDIF0_WCLK_MUX 4
-#define ZX296702_SPDIF0_WCLK 5
-#define ZX296702_SPDIF0_PCLK 6
-#define ZX296702_SPDIF0_DIV 7
-#define ZX296702_I2S0_WCLK_MUX 8
-#define ZX296702_I2S0_WCLK 9
-#define ZX296702_I2S0_PCLK 10
-#define ZX296702_I2S0_DIV 11
-#define ZX296702_I2S1_WCLK_MUX 12
-#define ZX296702_I2S1_WCLK 13
-#define ZX296702_I2S1_PCLK 14
-#define ZX296702_I2S1_DIV 15
-#define ZX296702_I2S2_WCLK_MUX 16
-#define ZX296702_I2S2_WCLK 17
-#define ZX296702_I2S2_PCLK 18
-#define ZX296702_I2S2_DIV 19
-#define ZX296702_GPIO_CLK 20
-#define ZX296702_LSP0CLK_END 21
-
-#define ZX296702_UART0_WCLK_MUX 0
-#define ZX296702_UART0_WCLK 1
-#define ZX296702_UART0_PCLK 2
-#define ZX296702_UART1_WCLK_MUX 3
-#define ZX296702_UART1_WCLK 4
-#define ZX296702_UART1_PCLK 5
-#define ZX296702_SDMMC0_WCLK_MUX 6
-#define ZX296702_SDMMC0_WCLK_DIV 7
-#define ZX296702_SDMMC0_WCLK 8
-#define ZX296702_SDMMC0_PCLK 9
-#define ZX296702_SPDIF1_WCLK_MUX 10
-#define ZX296702_SPDIF1_WCLK 11
-#define ZX296702_SPDIF1_PCLK 12
-#define ZX296702_SPDIF1_DIV 13
-#define ZX296702_LSP1CLK_END 14
-
-#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */
diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h
deleted file mode 100644
index bf2ff6d2ee23..000000000000
--- a/include/dt-bindings/clock/zx296718-clock.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2015 - 2016 ZTE Corporation.
- */
-#ifndef __DT_BINDINGS_CLOCK_ZX296718_H
-#define __DT_BINDINGS_CLOCK_ZX296718_H
-
-/* PLL */
-#define ZX296718_PLL_CPU 1
-#define ZX296718_PLL_MAC 2
-#define ZX296718_PLL_MM0 3
-#define ZX296718_PLL_MM1 4
-#define ZX296718_PLL_VGA 5
-#define ZX296718_PLL_DDR 6
-#define ZX296718_PLL_AUDIO 7
-#define ZX296718_PLL_HSIC 8
-#define CPU_DBG_GATE 9
-#define A72_GATE 10
-#define CPU_PERI_GATE 11
-#define A53_GATE 12
-#define DDR1_GATE 13
-#define DDR0_GATE 14
-#define SD1_WCLK 15
-#define SD1_AHB 16
-#define SD0_WCLK 17
-#define SD0_AHB 18
-#define EMMC_WCLK 19
-#define EMMC_NAND_AXI 20
-#define NAND_WCLK 21
-#define EMMC_NAND_AHB 22
-#define LSP1_148M5 23
-#define LSP1_99M 24
-#define LSP1_24M 25
-#define LSP0_74M25 26
-#define LSP0_32K 27
-#define LSP0_148M5 28
-#define LSP0_99M 29
-#define LSP0_24M 30
-#define DEMUX_AXI 31
-#define DEMUX_APB 32
-#define DEMUX_148M5 33
-#define DEMUX_108M 34
-#define AUDIO_APB 35
-#define AUDIO_99M 36
-#define AUDIO_24M 37
-#define AUDIO_16M384 38
-#define AUDIO_32K 39
-#define WDT_WCLK 40
-#define TIMER_WCLK 41
-#define VDE_ACLK 42
-#define VCE_ACLK 43
-#define HDE_ACLK 44
-#define GPU_ACLK 45
-#define SAPPU_ACLK 46
-#define SAPPU_WCLK 47
-#define VOU_ACLK 48
-#define VOU_MAIN_WCLK 49
-#define VOU_AUX_WCLK 50
-#define VOU_PPU_WCLK 51
-#define MIPI_CFG_CLK 52
-#define VGA_I2C_WCLK 53
-#define MIPI_REF_CLK 54
-#define HDMI_OSC_CEC 55
-#define HDMI_OSC_CLK 56
-#define HDMI_XCLK 57
-#define VIU_M0_ACLK 58
-#define VIU_M1_ACLK 59
-#define VIU_WCLK 60
-#define VIU_JPEG_WCLK 61
-#define VIU_CFG_CLK 62
-#define TS_SYS_WCLK 63
-#define TS_SYS_108M 64
-#define USB20_HCLK 65
-#define USB20_PHY_CLK 66
-#define USB21_HCLK 67
-#define USB21_PHY_CLK 68
-#define GMAC_RMIICLK 69
-#define GMAC_PCLK 70
-#define GMAC_ACLK 71
-#define GMAC_RFCLK 72
-#define TEMPSENSOR_GATE 73
-
-#define TOP_NR_CLKS 74
-
-
-#define LSP0_TIMER3_PCLK 1
-#define LSP0_TIMER3_WCLK 2
-#define LSP0_TIMER4_PCLK 3
-#define LSP0_TIMER4_WCLK 4
-#define LSP0_TIMER5_PCLK 5
-#define LSP0_TIMER5_WCLK 6
-#define LSP0_UART3_PCLK 7
-#define LSP0_UART3_WCLK 8
-#define LSP0_UART1_PCLK 9
-#define LSP0_UART1_WCLK 10
-#define LSP0_UART2_PCLK 11
-#define LSP0_UART2_WCLK 12
-#define LSP0_SPIFC0_PCLK 13
-#define LSP0_SPIFC0_WCLK 14
-#define LSP0_I2C4_PCLK 15
-#define LSP0_I2C4_WCLK 16
-#define LSP0_I2C5_PCLK 17
-#define LSP0_I2C5_WCLK 18
-#define LSP0_SSP0_PCLK 19
-#define LSP0_SSP0_WCLK 20
-#define LSP0_SSP1_PCLK 21
-#define LSP0_SSP1_WCLK 22
-#define LSP0_USIM_PCLK 23
-#define LSP0_USIM_WCLK 24
-#define LSP0_GPIO_PCLK 25
-#define LSP0_GPIO_WCLK 26
-#define LSP0_I2C3_PCLK 27
-#define LSP0_I2C3_WCLK 28
-
-#define LSP0_NR_CLKS 29
-
-
-#define LSP1_UART4_PCLK 1
-#define LSP1_UART4_WCLK 2
-#define LSP1_UART5_PCLK 3
-#define LSP1_UART5_WCLK 4
-#define LSP1_PWM_PCLK 5
-#define LSP1_PWM_WCLK 6
-#define LSP1_I2C2_PCLK 7
-#define LSP1_I2C2_WCLK 8
-#define LSP1_SSP2_PCLK 9
-#define LSP1_SSP2_WCLK 10
-#define LSP1_SSP3_PCLK 11
-#define LSP1_SSP3_WCLK 12
-#define LSP1_SSP4_PCLK 13
-#define LSP1_SSP4_WCLK 14
-#define LSP1_USIM1_PCLK 15
-#define LSP1_USIM1_WCLK 16
-
-#define LSP1_NR_CLKS 17
-
-
-#define AUDIO_I2S0_WCLK 1
-#define AUDIO_I2S0_PCLK 2
-#define AUDIO_I2S1_WCLK 3
-#define AUDIO_I2S1_PCLK 4
-#define AUDIO_I2S2_WCLK 5
-#define AUDIO_I2S2_PCLK 6
-#define AUDIO_I2S3_WCLK 7
-#define AUDIO_I2S3_PCLK 8
-#define AUDIO_I2C0_WCLK 9
-#define AUDIO_I2C0_PCLK 10
-#define AUDIO_SPDIF0_WCLK 11
-#define AUDIO_SPDIF0_PCLK 12
-#define AUDIO_SPDIF1_WCLK 13
-#define AUDIO_SPDIF1_PCLK 14
-#define AUDIO_TIMER_WCLK 15
-#define AUDIO_TIMER_PCLK 16
-#define AUDIO_TDM_WCLK 17
-#define AUDIO_TDM_PCLK 18
-#define AUDIO_TS_PCLK 19
-#define I2S0_WCLK_MUX 20
-#define I2S1_WCLK_MUX 21
-#define I2S2_WCLK_MUX 22
-#define I2S3_WCLK_MUX 23
-
-#define AUDIO_NR_CLKS 24
-
-#endif
diff --git a/include/dt-bindings/display/sdtv-standards.h b/include/dt-bindings/display/sdtv-standards.h
new file mode 100644
index 000000000000..fbc1a3db2ea7
--- /dev/null
+++ b/include/dt-bindings/display/sdtv-standards.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only or X11 */
+/*
+ * Copyright 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ */
+
+#ifndef _DT_BINDINGS_DISPLAY_SDTV_STDS_H
+#define _DT_BINDINGS_DISPLAY_SDTV_STDS_H
+
+/*
+ * Attention: Keep the SDTV_STD_* bit definitions in sync with
+ * include/uapi/linux/videodev2.h V4L2_STD_* bit definitions.
+ */
+/* One bit for each standard */
+#define SDTV_STD_PAL_B 0x00000001
+#define SDTV_STD_PAL_B1 0x00000002
+#define SDTV_STD_PAL_G 0x00000004
+#define SDTV_STD_PAL_H 0x00000008
+#define SDTV_STD_PAL_I 0x00000010
+#define SDTV_STD_PAL_D 0x00000020
+#define SDTV_STD_PAL_D1 0x00000040
+#define SDTV_STD_PAL_K 0x00000080
+
+#define SDTV_STD_PAL (SDTV_STD_PAL_B | \
+ SDTV_STD_PAL_B1 | \
+ SDTV_STD_PAL_G | \
+ SDTV_STD_PAL_H | \
+ SDTV_STD_PAL_I | \
+ SDTV_STD_PAL_D | \
+ SDTV_STD_PAL_D1 | \
+ SDTV_STD_PAL_K)
+
+#define SDTV_STD_PAL_M 0x00000100
+#define SDTV_STD_PAL_N 0x00000200
+#define SDTV_STD_PAL_Nc 0x00000400
+#define SDTV_STD_PAL_60 0x00000800
+
+#define SDTV_STD_NTSC_M 0x00001000 /* BTSC */
+#define SDTV_STD_NTSC_M_JP 0x00002000 /* EIA-J */
+#define SDTV_STD_NTSC_443 0x00004000
+#define SDTV_STD_NTSC_M_KR 0x00008000 /* FM A2 */
+
+#define SDTV_STD_NTSC (SDTV_STD_NTSC_M | \
+ SDTV_STD_NTSC_M_JP | \
+ SDTV_STD_NTSC_M_KR)
+
+#define SDTV_STD_SECAM_B 0x00010000
+#define SDTV_STD_SECAM_D 0x00020000
+#define SDTV_STD_SECAM_G 0x00040000
+#define SDTV_STD_SECAM_H 0x00080000
+#define SDTV_STD_SECAM_K 0x00100000
+#define SDTV_STD_SECAM_K1 0x00200000
+#define SDTV_STD_SECAM_L 0x00400000
+#define SDTV_STD_SECAM_LC 0x00800000
+
+#define SDTV_STD_SECAM (SDTV_STD_SECAM_B | \
+ SDTV_STD_SECAM_D | \
+ SDTV_STD_SECAM_G | \
+ SDTV_STD_SECAM_H | \
+ SDTV_STD_SECAM_K | \
+ SDTV_STD_SECAM_K1 | \
+ SDTV_STD_SECAM_L | \
+ SDTV_STD_SECAM_LC)
+
+/* Standards for Countries with 60Hz Line frequency */
+#define SDTV_STD_525_60 (SDTV_STD_PAL_M | \
+ SDTV_STD_PAL_60 | \
+ SDTV_STD_NTSC | \
+ SDTV_STD_NTSC_443)
+
+/* Standards for Countries with 50Hz Line frequency */
+#define SDTV_STD_625_50 (SDTV_STD_PAL | \
+ SDTV_STD_PAL_N | \
+ SDTV_STD_PAL_Nc | \
+ SDTV_STD_SECAM)
+
+#endif /* _DT_BINDINGS_DISPLAY_SDTV_STDS_H */
diff --git a/include/dt-bindings/dma/jz4775-dma.h b/include/dt-bindings/dma/jz4775-dma.h
new file mode 100644
index 000000000000..8d27e2c69dca
--- /dev/null
+++ b/include/dt-bindings/dma/jz4775-dma.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for JZ4775 DMA bindings.
+ *
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_JZ4775_DMA_H__
+#define __DT_BINDINGS_DMA_JZ4775_DMA_H__
+
+/*
+ * Request type numbers for the JZ4775 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define JZ4775_DMA_I2S0_TX 0x6
+#define JZ4775_DMA_I2S0_RX 0x7
+#define JZ4775_DMA_AUTO 0x8
+#define JZ4775_DMA_SADC_RX 0x9
+#define JZ4775_DMA_UART3_TX 0x0e
+#define JZ4775_DMA_UART3_RX 0x0f
+#define JZ4775_DMA_UART2_TX 0x10
+#define JZ4775_DMA_UART2_RX 0x11
+#define JZ4775_DMA_UART1_TX 0x12
+#define JZ4775_DMA_UART1_RX 0x13
+#define JZ4775_DMA_UART0_TX 0x14
+#define JZ4775_DMA_UART0_RX 0x15
+#define JZ4775_DMA_SSI0_TX 0x16
+#define JZ4775_DMA_SSI0_RX 0x17
+#define JZ4775_DMA_MSC0_TX 0x1a
+#define JZ4775_DMA_MSC0_RX 0x1b
+#define JZ4775_DMA_MSC1_TX 0x1c
+#define JZ4775_DMA_MSC1_RX 0x1d
+#define JZ4775_DMA_MSC2_TX 0x1e
+#define JZ4775_DMA_MSC2_RX 0x1f
+#define JZ4775_DMA_PCM0_TX 0x20
+#define JZ4775_DMA_PCM0_RX 0x21
+#define JZ4775_DMA_SMB0_TX 0x24
+#define JZ4775_DMA_SMB0_RX 0x25
+#define JZ4775_DMA_SMB1_TX 0x26
+#define JZ4775_DMA_SMB1_RX 0x27
+#define JZ4775_DMA_SMB2_TX 0x28
+#define JZ4775_DMA_SMB2_RX 0x29
+
+#endif /* __DT_BINDINGS_DMA_JZ4775_DMA_H__ */
diff --git a/include/dt-bindings/dma/qcom-gpi.h b/include/dt-bindings/dma/qcom-gpi.h
new file mode 100644
index 000000000000..ebda2a37f52a
--- /dev/null
+++ b/include/dt-bindings/dma/qcom-gpi.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/* Copyright (c) 2020, Linaro Ltd. */
+
+#ifndef __DT_BINDINGS_DMA_QCOM_GPI_H__
+#define __DT_BINDINGS_DMA_QCOM_GPI_H__
+
+#define QCOM_GPI_SPI 1
+#define QCOM_GPI_UART 2
+#define QCOM_GPI_I2C 3
+
+#endif /* __DT_BINDINGS_DMA_QCOM_GPI_H__ */
diff --git a/include/dt-bindings/dma/x2000-dma.h b/include/dt-bindings/dma/x2000-dma.h
new file mode 100644
index 000000000000..db2cd4830b00
--- /dev/null
+++ b/include/dt-bindings/dma/x2000-dma.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for X2000 DMA bindings.
+ *
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_X2000_DMA_H__
+#define __DT_BINDINGS_DMA_X2000_DMA_H__
+
+/*
+ * Request type numbers for the X2000 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define X2000_DMA_AUTO 0x8
+#define X2000_DMA_UART5_TX 0xa
+#define X2000_DMA_UART5_RX 0xb
+#define X2000_DMA_UART4_TX 0xc
+#define X2000_DMA_UART4_RX 0xd
+#define X2000_DMA_UART3_TX 0xe
+#define X2000_DMA_UART3_RX 0xf
+#define X2000_DMA_UART2_TX 0x10
+#define X2000_DMA_UART2_RX 0x11
+#define X2000_DMA_UART1_TX 0x12
+#define X2000_DMA_UART1_RX 0x13
+#define X2000_DMA_UART0_TX 0x14
+#define X2000_DMA_UART0_RX 0x15
+#define X2000_DMA_SSI0_TX 0x16
+#define X2000_DMA_SSI0_RX 0x17
+#define X2000_DMA_SSI1_TX 0x18
+#define X2000_DMA_SSI1_RX 0x19
+#define X2000_DMA_I2C0_TX 0x24
+#define X2000_DMA_I2C0_RX 0x25
+#define X2000_DMA_I2C1_TX 0x26
+#define X2000_DMA_I2C1_RX 0x27
+#define X2000_DMA_I2C2_TX 0x28
+#define X2000_DMA_I2C2_RX 0x29
+#define X2000_DMA_I2C3_TX 0x2a
+#define X2000_DMA_I2C3_RX 0x2b
+#define X2000_DMA_I2C4_TX 0x2c
+#define X2000_DMA_I2C4_RX 0x2d
+#define X2000_DMA_I2C5_TX 0x2e
+#define X2000_DMA_I2C5_RX 0x2f
+#define X2000_DMA_UART6_TX 0x30
+#define X2000_DMA_UART6_RX 0x31
+#define X2000_DMA_UART7_TX 0x32
+#define X2000_DMA_UART7_RX 0x33
+#define X2000_DMA_UART8_TX 0x34
+#define X2000_DMA_UART8_RX 0x35
+#define X2000_DMA_UART9_TX 0x36
+#define X2000_DMA_UART9_RX 0x37
+#define X2000_DMA_SADC_RX 0x38
+
+#endif /* __DT_BINDINGS_DMA_X2000_DMA_H__ */
diff --git a/include/dt-bindings/dma/xlnx-zynqmp-dpdma.h b/include/dt-bindings/dma/xlnx-zynqmp-dpdma.h
new file mode 100644
index 000000000000..3719cda5679d
--- /dev/null
+++ b/include/dt-bindings/dma/xlnx-zynqmp-dpdma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_XLNX_ZYNQMP_DPDMA_H__
+#define __DT_BINDINGS_DMA_XLNX_ZYNQMP_DPDMA_H__
+
+#define ZYNQMP_DPDMA_VIDEO0 0
+#define ZYNQMP_DPDMA_VIDEO1 1
+#define ZYNQMP_DPDMA_VIDEO2 2
+#define ZYNQMP_DPDMA_GRAPHICS 3
+#define ZYNQMP_DPDMA_AUDIO0 4
+#define ZYNQMP_DPDMA_AUDIO1 5
+
+#endif /* __DT_BINDINGS_DMA_XLNX_ZYNQMP_DPDMA_H__ */
diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h
index 4e61f6485097..1675de05ad33 100644
--- a/include/dt-bindings/firmware/imx/rsrc.h
+++ b/include/dt-bindings/firmware/imx/rsrc.h
@@ -37,10 +37,14 @@
#define IMX_SC_R_DC_0_BLIT2 21
#define IMX_SC_R_DC_0_BLIT_OUT 22
#define IMX_SC_R_PERF 23
+#define IMX_SC_R_USB_1_PHY 24
#define IMX_SC_R_DC_0_WARP 25
+#define IMX_SC_R_V2X_MU_0 26
+#define IMX_SC_R_V2X_MU_1 27
#define IMX_SC_R_DC_0_VIDEO0 28
#define IMX_SC_R_DC_0_VIDEO1 29
#define IMX_SC_R_DC_0_FRAC0 30
+#define IMX_SC_R_V2X_MU_2 31
#define IMX_SC_R_DC_0 32
#define IMX_SC_R_GPU_2_PID0 33
#define IMX_SC_R_DC_0_PLL_0 34
@@ -49,7 +53,10 @@
#define IMX_SC_R_DC_1_BLIT1 37
#define IMX_SC_R_DC_1_BLIT2 38
#define IMX_SC_R_DC_1_BLIT_OUT 39
+#define IMX_SC_R_V2X_MU_3 40
+#define IMX_SC_R_V2X_MU_4 41
#define IMX_SC_R_DC_1_WARP 42
+#define IMX_SC_R_SECVIO 44
#define IMX_SC_R_DC_1_VIDEO0 45
#define IMX_SC_R_DC_1_VIDEO1 46
#define IMX_SC_R_DC_1_FRAC0 47
@@ -111,6 +118,7 @@
#define IMX_SC_R_CAN_0 105
#define IMX_SC_R_CAN_1 106
#define IMX_SC_R_CAN_2 107
+#define IMX_SC_R_CAN(x) (IMX_SC_R_CAN_0 + (x))
#define IMX_SC_R_DMA_1_CH0 108
#define IMX_SC_R_DMA_1_CH1 109
#define IMX_SC_R_DMA_1_CH2 110
@@ -547,4 +555,88 @@
#define IMX_SC_R_ATTESTATION 545
#define IMX_SC_R_LAST 546
+/*
+ * Defines for SC PM CLK
+ */
+#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */
+#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */
+#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */
+#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */
+#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */
+#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */
+#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */
+#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */
+#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */
+#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */
+#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */
+#define IMX_SC_PM_CLK_PLL 4 /* PLL */
+#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
+
+/*
+ * Defines for SC CONTROL
+ */
+#define IMX_SC_C_TEMP 0
+#define IMX_SC_C_TEMP_HI 1
+#define IMX_SC_C_TEMP_LOW 2
+#define IMX_SC_C_PXL_LINK_MST1_ADDR 3
+#define IMX_SC_C_PXL_LINK_MST2_ADDR 4
+#define IMX_SC_C_PXL_LINK_MST_ENB 5
+#define IMX_SC_C_PXL_LINK_MST1_ENB 6
+#define IMX_SC_C_PXL_LINK_MST2_ENB 7
+#define IMX_SC_C_PXL_LINK_SLV1_ADDR 8
+#define IMX_SC_C_PXL_LINK_SLV2_ADDR 9
+#define IMX_SC_C_PXL_LINK_MST_VLD 10
+#define IMX_SC_C_PXL_LINK_MST1_VLD 11
+#define IMX_SC_C_PXL_LINK_MST2_VLD 12
+#define IMX_SC_C_SINGLE_MODE 13
+#define IMX_SC_C_ID 14
+#define IMX_SC_C_PXL_CLK_POLARITY 15
+#define IMX_SC_C_LINESTATE 16
+#define IMX_SC_C_PCIE_G_RST 17
+#define IMX_SC_C_PCIE_BUTTON_RST 18
+#define IMX_SC_C_PCIE_PERST 19
+#define IMX_SC_C_PHY_RESET 20
+#define IMX_SC_C_PXL_LINK_RATE_CORRECTION 21
+#define IMX_SC_C_PANIC 22
+#define IMX_SC_C_PRIORITY_GROUP 23
+#define IMX_SC_C_TXCLK 24
+#define IMX_SC_C_CLKDIV 25
+#define IMX_SC_C_DISABLE_50 26
+#define IMX_SC_C_DISABLE_125 27
+#define IMX_SC_C_SEL_125 28
+#define IMX_SC_C_MODE 29
+#define IMX_SC_C_SYNC_CTRL0 30
+#define IMX_SC_C_KACHUNK_CNT 31
+#define IMX_SC_C_KACHUNK_SEL 32
+#define IMX_SC_C_SYNC_CTRL1 33
+#define IMX_SC_C_DPI_RESET 34
+#define IMX_SC_C_MIPI_RESET 35
+#define IMX_SC_C_DUAL_MODE 36
+#define IMX_SC_C_VOLTAGE 37
+#define IMX_SC_C_PXL_LINK_SEL 38
+#define IMX_SC_C_OFS_SEL 39
+#define IMX_SC_C_OFS_AUDIO 40
+#define IMX_SC_C_OFS_PERIPH 41
+#define IMX_SC_C_OFS_IRQ 42
+#define IMX_SC_C_RST0 43
+#define IMX_SC_C_RST1 44
+#define IMX_SC_C_SEL0 45
+#define IMX_SC_C_CALIB0 46
+#define IMX_SC_C_CALIB1 47
+#define IMX_SC_C_CALIB2 48
+#define IMX_SC_C_IPG_DEBUG 49
+#define IMX_SC_C_IPG_DOZE 50
+#define IMX_SC_C_IPG_WAIT 51
+#define IMX_SC_C_IPG_STOP 52
+#define IMX_SC_C_IPG_STOP_MODE 53
+#define IMX_SC_C_IPG_STOP_ACK 54
+#define IMX_SC_C_SYNC_CTRL 55
+#define IMX_SC_C_OFS_AUDIO_ALT 56
+#define IMX_SC_C_DSP_BYP 57
+#define IMX_SC_C_CLK_GEN_EN 58
+#define IMX_SC_C_INTF_SEL 59
+#define IMX_SC_C_RXC_DLY 60
+#define IMX_SC_C_TIMER_SEL 61
+#define IMX_SC_C_LAST 62
+
#endif /* __DT_BINDINGS_RSCRC_IMX_H */
diff --git a/include/dt-bindings/gce/mt6779-gce.h b/include/dt-bindings/gce/mt6779-gce.h
new file mode 100644
index 000000000000..06101316ace4
--- /dev/null
+++ b/include/dt-bindings/gce/mt6779-gce.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Dennis-YC Hsieh <dennis-yc.hsieh@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT6779_H
+#define _DT_BINDINGS_GCE_MT6779_H
+
+#define CMDQ_NO_TIMEOUT 0xffffffff
+
+/* GCE HW thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* GCE subsys table */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1502XXXX 4
+#define SUBSYS_1880XXXX 5
+#define SUBSYS_1881XXXX 6
+#define SUBSYS_1882XXXX 7
+#define SUBSYS_1883XXXX 8
+#define SUBSYS_1884XXXX 9
+#define SUBSYS_1000XXXX 10
+#define SUBSYS_1001XXXX 11
+#define SUBSYS_1002XXXX 12
+#define SUBSYS_1003XXXX 13
+#define SUBSYS_1004XXXX 14
+#define SUBSYS_1005XXXX 15
+#define SUBSYS_1020XXXX 16
+#define SUBSYS_1028XXXX 17
+#define SUBSYS_1700XXXX 18
+#define SUBSYS_1701XXXX 19
+#define SUBSYS_1702XXXX 20
+#define SUBSYS_1703XXXX 21
+#define SUBSYS_1800XXXX 22
+#define SUBSYS_1801XXXX 23
+#define SUBSYS_1802XXXX 24
+#define SUBSYS_1804XXXX 25
+#define SUBSYS_1805XXXX 26
+#define SUBSYS_1808XXXX 27
+#define SUBSYS_180aXXXX 28
+#define SUBSYS_180bXXXX 29
+#define CMDQ_SUBSYS_OFF 32
+
+/* GCE hardware events */
+#define CMDQ_EVENT_DISP_RDMA0_SOF 0
+#define CMDQ_EVENT_DISP_RDMA1_SOF 1
+#define CMDQ_EVENT_MDP_RDMA0_SOF 2
+#define CMDQ_EVENT_MDP_RDMA1_SOF 3
+#define CMDQ_EVENT_MDP_RSZ0_SOF 4
+#define CMDQ_EVENT_MDP_RSZ1_SOF 5
+#define CMDQ_EVENT_MDP_TDSHP_SOF 6
+#define CMDQ_EVENT_MDP_WROT0_SOF 7
+#define CMDQ_EVENT_MDP_WROT1_SOF 8
+#define CMDQ_EVENT_DISP_OVL0_SOF 9
+#define CMDQ_EVENT_DISP_2L_OVL0_SOF 10
+#define CMDQ_EVENT_DISP_2L_OVL1_SOF 11
+#define CMDQ_EVENT_DISP_WDMA0_SOF 12
+#define CMDQ_EVENT_DISP_COLOR0_SOF 13
+#define CMDQ_EVENT_DISP_CCORR0_SOF 14
+#define CMDQ_EVENT_DISP_AAL0_SOF 15
+#define CMDQ_EVENT_DISP_GAMMA0_SOF 16
+#define CMDQ_EVENT_DISP_DITHER0_SOF 17
+#define CMDQ_EVENT_DISP_PWM0_SOF 18
+#define CMDQ_EVENT_DISP_DSI0_SOF 19
+#define CMDQ_EVENT_DISP_DPI0_SOF 20
+#define CMDQ_EVENT_DISP_POSTMASK0_SOF 21
+#define CMDQ_EVENT_DISP_RSZ0_SOF 22
+#define CMDQ_EVENT_MDP_AAL_SOF 23
+#define CMDQ_EVENT_MDP_CCORR_SOF 24
+#define CMDQ_EVENT_DISP_DBI0_SOF 25
+#define CMDQ_EVENT_ISP_RELAY_SOF 26
+#define CMDQ_EVENT_IPU_RELAY_SOF 27
+#define CMDQ_EVENT_DISP_RDMA0_EOF 28
+#define CMDQ_EVENT_DISP_RDMA1_EOF 29
+#define CMDQ_EVENT_MDP_RDMA0_EOF 30
+#define CMDQ_EVENT_MDP_RDMA1_EOF 31
+#define CMDQ_EVENT_MDP_RSZ0_EOF 32
+#define CMDQ_EVENT_MDP_RSZ1_EOF 33
+#define CMDQ_EVENT_MDP_TDSHP_EOF 34
+#define CMDQ_EVENT_MDP_WROT0_W_EOF 35
+#define CMDQ_EVENT_MDP_WROT1_W_EOF 36
+#define CMDQ_EVENT_DISP_OVL0_EOF 37
+#define CMDQ_EVENT_DISP_2L_OVL0_EOF 38
+#define CMDQ_EVENT_DISP_2L_OVL1_EOF 39
+#define CMDQ_EVENT_DISP_WDMA0_EOF 40
+#define CMDQ_EVENT_DISP_COLOR0_EOF 41
+#define CMDQ_EVENT_DISP_CCORR0_EOF 42
+#define CMDQ_EVENT_DISP_AAL0_EOF 43
+#define CMDQ_EVENT_DISP_GAMMA0_EOF 44
+#define CMDQ_EVENT_DISP_DITHER0_EOF 45
+#define CMDQ_EVENT_DISP_DSI0_EOF 46
+#define CMDQ_EVENT_DISP_DPI0_EOF 47
+#define CMDQ_EVENT_DISP_RSZ0_EOF 49
+#define CMDQ_EVENT_MDP_AAL_FRAME_DONE 50
+#define CMDQ_EVENT_MDP_CCORR_FRAME_DONE 51
+#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 52
+#define CMDQ_EVENT_MUTEX0_STREAM_EOF 130
+#define CMDQ_EVENT_MUTEX1_STREAM_EOF 131
+#define CMDQ_EVENT_MUTEX2_STREAM_EOF 132
+#define CMDQ_EVENT_MUTEX3_STREAM_EOF 133
+#define CMDQ_EVENT_MUTEX4_STREAM_EOF 134
+#define CMDQ_EVENT_MUTEX5_STREAM_EOF 135
+#define CMDQ_EVENT_MUTEX6_STREAM_EOF 136
+#define CMDQ_EVENT_MUTEX7_STREAM_EOF 137
+#define CMDQ_EVENT_MUTEX8_STREAM_EOF 138
+#define CMDQ_EVENT_MUTEX9_STREAM_EOF 139
+#define CMDQ_EVENT_MUTEX10_STREAM_EOF 140
+#define CMDQ_EVENT_MUTEX11_STREAM_EOF 141
+#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 142
+#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 143
+#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 144
+#define CMDQ_EVENT_DISP_RDMA3_UNDERRUN 145
+#define CMDQ_EVENT_DSI0_TE 146
+#define CMDQ_EVENT_DSI0_IRQ_EVENT 147
+#define CMDQ_EVENT_DSI0_DONE_EVENT 148
+#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE 150
+#define CMDQ_EVENT_DISP_WDMA0_RST_DONE 151
+#define CMDQ_EVENT_MDP_WROT0_RST_DONE 153
+#define CMDQ_EVENT_MDP_RDMA0_RST_DONE 154
+#define CMDQ_EVENT_DISP_OVL0_RST_DONE 155
+#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE 156
+#define CMDQ_EVENT_DISP_OVL1_2L_RST_DONE 157
+#define CMDQ_EVENT_DIP_CQ_THREAD0_EOF 257
+#define CMDQ_EVENT_DIP_CQ_THREAD1_EOF 258
+#define CMDQ_EVENT_DIP_CQ_THREAD2_EOF 259
+#define CMDQ_EVENT_DIP_CQ_THREAD3_EOF 260
+#define CMDQ_EVENT_DIP_CQ_THREAD4_EOF 261
+#define CMDQ_EVENT_DIP_CQ_THREAD5_EOF 262
+#define CMDQ_EVENT_DIP_CQ_THREAD6_EOF 263
+#define CMDQ_EVENT_DIP_CQ_THREAD7_EOF 264
+#define CMDQ_EVENT_DIP_CQ_THREAD8_EOF 265
+#define CMDQ_EVENT_DIP_CQ_THREAD9_EOF 266
+#define CMDQ_EVENT_DIP_CQ_THREAD10_EOF 267
+#define CMDQ_EVENT_DIP_CQ_THREAD11_EOF 268
+#define CMDQ_EVENT_DIP_CQ_THREAD12_EOF 269
+#define CMDQ_EVENT_DIP_CQ_THREAD13_EOF 270
+#define CMDQ_EVENT_DIP_CQ_THREAD14_EOF 271
+#define CMDQ_EVENT_DIP_CQ_THREAD15_EOF 272
+#define CMDQ_EVENT_DIP_CQ_THREAD16_EOF 273
+#define CMDQ_EVENT_DIP_CQ_THREAD17_EOF 274
+#define CMDQ_EVENT_DIP_CQ_THREAD18_EOF 275
+#define CMDQ_EVENT_DIP_DMA_ERR_EVENT 276
+#define CMDQ_EVENT_AMD_FRAME_DONE 277
+#define CMDQ_EVENT_MFB_DONE 278
+#define CMDQ_EVENT_WPE_A_EOF 279
+#define CMDQ_EVENT_VENC_EOF 289
+#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 290
+#define CMDQ_EVENT_JPEG_ENC_EOF 291
+#define CMDQ_EVENT_VENC_MB_DONE 292
+#define CMDQ_EVENT_VENC_128BYTE_CNT_DONE 293
+#define CMDQ_EVENT_ISP_FRAME_DONE_A 321
+#define CMDQ_EVENT_ISP_FRAME_DONE_B 322
+#define CMDQ_EVENT_ISP_FRAME_DONE_C 323
+#define CMDQ_EVENT_ISP_CAMSV_0_PASS1_DONE 324
+#define CMDQ_EVENT_ISP_CAMSV_0_2_PASS1_DONE 325
+#define CMDQ_EVENT_ISP_CAMSV_1_PASS1_DONE 326
+#define CMDQ_EVENT_ISP_CAMSV_2_PASS1_DONE 327
+#define CMDQ_EVENT_ISP_CAMSV_3_PASS1_DONE 328
+#define CMDQ_EVENT_ISP_TSF_DONE 329
+#define CMDQ_EVENT_SENINF_0_FIFO_FULL 330
+#define CMDQ_EVENT_SENINF_1_FIFO_FULL 331
+#define CMDQ_EVENT_SENINF_2_FIFO_FULL 332
+#define CMDQ_EVENT_SENINF_3_FIFO_FULL 333
+#define CMDQ_EVENT_SENINF_4_FIFO_FULL 334
+#define CMDQ_EVENT_SENINF_5_FIFO_FULL 335
+#define CMDQ_EVENT_SENINF_6_FIFO_FULL 336
+#define CMDQ_EVENT_SENINF_7_FIFO_FULL 337
+#define CMDQ_EVENT_TG_OVRUN_A_INT_DLY 338
+#define CMDQ_EVENT_TG_OVRUN_B_INT_DLY 339
+#define CMDQ_EVENT_TG_OVRUN_C_INT 340
+#define CMDQ_EVENT_TG_GRABERR_A_INT_DLY 341
+#define CMDQ_EVENT_TG_GRABERR_B_INT_DLY 342
+#define CMDQ_EVENT_TG_GRABERR_C_INT 343
+#define CMDQ_EVENT_CQ_VR_SNAP_A_INT_DLY 344
+#define CMDQ_EVENT_CQ_VR_SNAP_B_INT_DLY 345
+#define CMDQ_EVENT_CQ_VR_SNAP_C_INT 346
+#define CMDQ_EVENT_DMA_R1_ERROR_A_INT_DLY 347
+#define CMDQ_EVENT_DMA_R1_ERROR_B_INT_DLY 348
+#define CMDQ_EVENT_DMA_R1_ERROR_C_INT 349
+#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_0 353
+#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_1 354
+#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_2 355
+#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_3 356
+#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_0 385
+#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_1 386
+#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_2 387
+#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_3 388
+#define CMDQ_EVENT_VDEC_EVENT_0 416
+#define CMDQ_EVENT_VDEC_EVENT_1 417
+#define CMDQ_EVENT_VDEC_EVENT_2 418
+#define CMDQ_EVENT_VDEC_EVENT_3 419
+#define CMDQ_EVENT_VDEC_EVENT_4 420
+#define CMDQ_EVENT_VDEC_EVENT_5 421
+#define CMDQ_EVENT_VDEC_EVENT_6 422
+#define CMDQ_EVENT_VDEC_EVENT_7 423
+#define CMDQ_EVENT_VDEC_EVENT_8 424
+#define CMDQ_EVENT_VDEC_EVENT_9 425
+#define CMDQ_EVENT_VDEC_EVENT_10 426
+#define CMDQ_EVENT_VDEC_EVENT_11 427
+#define CMDQ_EVENT_VDEC_EVENT_12 428
+#define CMDQ_EVENT_VDEC_EVENT_13 429
+#define CMDQ_EVENT_VDEC_EVENT_14 430
+#define CMDQ_EVENT_VDEC_EVENT_15 431
+#define CMDQ_EVENT_FDVT_DONE 449
+#define CMDQ_EVENT_FE_DONE 450
+#define CMDQ_EVENT_RSC_EOF 451
+#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 452
+#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 453
+#define CMDQ_EVENT_DSI0_TE_INFRA 898
+
+#endif
diff --git a/include/dt-bindings/gce/mt8186-gce.h b/include/dt-bindings/gce/mt8186-gce.h
new file mode 100644
index 000000000000..f12e3cb586ce
--- /dev/null
+++ b/include/dt-bindings/gce/mt8186-gce.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Yongqiang Niu <yongqiang.niu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8186_H
+#define _DT_BINDINGS_GCE_MT8186_H
+
+/* assign timeout 0 also means default */
+#define CMDQ_NO_TIMEOUT 0xffffffff
+#define CMDQ_TIMEOUT_DEFAULT 1000
+
+/* GCE thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* CPR count in 32bit register */
+#define GCE_CPR_COUNT 1312
+
+/* GCE subsys table */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1502XXXX 4
+#define SUBSYS_1582XXXX 5
+#define SUBSYS_1B00XXXX 6
+#define SUBSYS_1C00XXXX 7
+#define SUBSYS_1C10XXXX 8
+#define SUBSYS_1000XXXX 9
+#define SUBSYS_1001XXXX 10
+#define SUBSYS_1020XXXX 11
+#define SUBSYS_1021XXXX 12
+#define SUBSYS_1022XXXX 13
+#define SUBSYS_1023XXXX 14
+#define SUBSYS_1060XXXX 15
+#define SUBSYS_1602XXXX 16
+#define SUBSYS_1608XXXX 17
+#define SUBSYS_1700XXXX 18
+#define SUBSYS_1701XXXX 19
+#define SUBSYS_1702XXXX 20
+#define SUBSYS_1703XXXX 21
+#define SUBSYS_1706XXXX 22
+#define SUBSYS_1A00XXXX 23
+#define SUBSYS_1A01XXXX 24
+#define SUBSYS_1A02XXXX 25
+#define SUBSYS_1A03XXXX 26
+#define SUBSYS_1A04XXXX 27
+#define SUBSYS_1A05XXXX 28
+#define SUBSYS_1A06XXXX 29
+#define SUBSYS_NO_SUPPORT 99
+
+/* GCE General Purpose Register (GPR) support
+ * Leave note for scenario usage here
+ */
+/* GCE: write mask */
+#define GCE_GPR_R00 0x00
+#define GCE_GPR_R01 0x01
+/* MDP: P1: JPEG dest */
+#define GCE_GPR_R02 0x02
+#define GCE_GPR_R03 0x03
+/* MDP: PQ color */
+#define GCE_GPR_R04 0x04
+/* MDP: 2D sharpness */
+#define GCE_GPR_R05 0x05
+/* DISP: poll esd */
+#define GCE_GPR_R06 0x06
+#define GCE_GPR_R07 0x07
+/* MDP: P4: 2D sharpness dst */
+#define GCE_GPR_R08 0x08
+#define GCE_GPR_R09 0x09
+/* VCU: poll with timeout for GPR timer */
+#define GCE_GPR_R10 0x0A
+#define GCE_GPR_R11 0x0B
+/* CMDQ: debug */
+#define GCE_GPR_R12 0x0C
+#define GCE_GPR_R13 0x0D
+/* CMDQ: P7: debug */
+#define GCE_GPR_R14 0x0E
+#define GCE_GPR_R15 0x0F
+
+/* GCE hardware events */
+/* VDEC */
+#define CMDQ_EVENT_LINE_COUNT_THRESHOLD_INTERRUPT 0
+#define CMDQ_EVENT_VDEC_INT 1
+#define CMDQ_EVENT_VDEC_PAUSE 2
+#define CMDQ_EVENT_VDEC_DEC_ERROR 3
+#define CMDQ_EVENT_MDEC_TIMEOUT 4
+#define CMDQ_EVENT_DRAM_ACCESS_DONE 5
+#define CMDQ_EVENT_INI_FETCH_RDY 6
+#define CMDQ_EVENT_PROCESS_FLAG 7
+#define CMDQ_EVENT_SEARCH_START_CODE_DONE 8
+#define CMDQ_EVENT_REF_REORDER_DONE 9
+#define CMDQ_EVENT_WP_TBLE_DONE 10
+#define CMDQ_EVENT_COUNT_SRAM_CLR_DONE 11
+#define CMDQ_EVENT_GCE_CNT_OP_THRESHOLD 15
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_0 16
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_1 17
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_2 18
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_3 19
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_4 20
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_5 21
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_6 22
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_7 23
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_8 24
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_9 25
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_10 26
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_11 27
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_12 28
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_13 29
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_14 30
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_15 31
+#define CMDQ_EVENT_WPE_GCE_FRAME_DONE 32
+
+/* CAM */
+#define CMDQ_EVENT_ISP_FRAME_DONE_A 65
+#define CMDQ_EVENT_ISP_FRAME_DONE_B 66
+#define CMDQ_EVENT_CAMSV1_PASS1_DONE 70
+#define CMDQ_EVENT_CAMSV2_PASS1_DONE 71
+#define CMDQ_EVENT_CAMSV3_PASS1_DONE 72
+#define CMDQ_EVENT_MRAW_0_PASS1_DONE 73
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 75
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 76
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 77
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 78
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 79
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 80
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 81
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 82
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 83
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 84
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 85
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 86
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 87
+#define CMDQ_EVENT_TG_OVRUN_A_INT 88
+#define CMDQ_EVENT_DMA_R1_ERROR_A_INT 89
+#define CMDQ_EVENT_TG_OVRUN_B_INT 90
+#define CMDQ_EVENT_DMA_R1_ERROR_B_INT 91
+#define CMDQ_EVENT_TG_OVRUN_M0_INT 94
+#define CMDQ_EVENT_R1_ERROR_M0_INT 95
+#define CMDQ_EVENT_TG_GRABERR_M0_INT 96
+#define CMDQ_EVENT_TG_GRABERR_A_INT 98
+#define CMDQ_EVENT_CQ_VR_SNAP_A_INT 99
+#define CMDQ_EVENT_TG_GRABERR_B_INT 100
+#define CMDQ_EVENT_CQ_VR_SNAP_B_INT 101
+/* VENC */
+#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 129
+#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 130
+#define CMDQ_EVENT_JPGENC_CMDQ_DONE 131
+#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 132
+#define CMDQ_EVENT_VENC_CMDQ_128BYTE_CNT_DONE 133
+#define CMDQ_EVENT_VENC_CMDQ_PPS_DONE 136
+#define CMDQ_EVENT_VENC_CMDQ_SPS_DONE 137
+#define CMDQ_EVENT_VENC_CMDQ_VPS_DONE 138
+/* IPE */
+#define CMDQ_EVENT_FDVT_DONE 161
+#define CMDQ_EVENT_FE_DONE 162
+#define CMDQ_EVENT_RSC_DONE 163
+#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 164
+#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 165
+/* IMG2 */
+#define CMDQ_EVENT_GCE_IMG2_EVENT0 193
+#define CMDQ_EVENT_GCE_IMG2_EVENT1 194
+#define CMDQ_EVENT_GCE_IMG2_EVENT2 195
+#define CMDQ_EVENT_GCE_IMG2_EVENT3 196
+#define CMDQ_EVENT_GCE_IMG2_EVENT4 197
+#define CMDQ_EVENT_GCE_IMG2_EVENT5 198
+#define CMDQ_EVENT_GCE_IMG2_EVENT6 199
+#define CMDQ_EVENT_GCE_IMG2_EVENT7 200
+#define CMDQ_EVENT_GCE_IMG2_EVENT8 201
+#define CMDQ_EVENT_GCE_IMG2_EVENT9 202
+#define CMDQ_EVENT_GCE_IMG2_EVENT10 203
+#define CMDQ_EVENT_GCE_IMG2_EVENT11 204
+#define CMDQ_EVENT_GCE_IMG2_EVENT12 205
+#define CMDQ_EVENT_GCE_IMG2_EVENT13 206
+#define CMDQ_EVENT_GCE_IMG2_EVENT14 207
+#define CMDQ_EVENT_GCE_IMG2_EVENT15 208
+#define CMDQ_EVENT_GCE_IMG2_EVENT16 209
+#define CMDQ_EVENT_GCE_IMG2_EVENT17 210
+#define CMDQ_EVENT_GCE_IMG2_EVENT18 211
+#define CMDQ_EVENT_GCE_IMG2_EVENT19 212
+#define CMDQ_EVENT_GCE_IMG2_EVENT20 213
+#define CMDQ_EVENT_GCE_IMG2_EVENT21 214
+#define CMDQ_EVENT_GCE_IMG2_EVENT22 215
+#define CMDQ_EVENT_GCE_IMG2_EVENT23 216
+/* IMG1 */
+#define CMDQ_EVENT_GCE_IMG1_EVENT0 225
+#define CMDQ_EVENT_GCE_IMG1_EVENT1 226
+#define CMDQ_EVENT_GCE_IMG1_EVENT2 227
+#define CMDQ_EVENT_GCE_IMG1_EVENT3 228
+#define CMDQ_EVENT_GCE_IMG1_EVENT4 229
+#define CMDQ_EVENT_GCE_IMG1_EVENT5 230
+#define CMDQ_EVENT_GCE_IMG1_EVENT6 231
+#define CMDQ_EVENT_GCE_IMG1_EVENT7 232
+#define CMDQ_EVENT_GCE_IMG1_EVENT8 233
+#define CMDQ_EVENT_GCE_IMG1_EVENT9 234
+#define CMDQ_EVENT_GCE_IMG1_EVENT10 235
+#define CMDQ_EVENT_GCE_IMG1_EVENT11 236
+#define CMDQ_EVENT_GCE_IMG1_EVENT12 237
+#define CMDQ_EVENT_GCE_IMG1_EVENT13 238
+#define CMDQ_EVENT_GCE_IMG1_EVENT14 239
+#define CMDQ_EVENT_GCE_IMG1_EVENT15 240
+#define CMDQ_EVENT_GCE_IMG1_EVENT16 241
+#define CMDQ_EVENT_GCE_IMG1_EVENT17 242
+#define CMDQ_EVENT_GCE_IMG1_EVENT18 243
+#define CMDQ_EVENT_GCE_IMG1_EVENT19 244
+#define CMDQ_EVENT_GCE_IMG1_EVENT20 245
+#define CMDQ_EVENT_GCE_IMG1_EVENT21 246
+#define CMDQ_EVENT_GCE_IMG1_EVENT22 247
+#define CMDQ_EVENT_GCE_IMG1_EVENT23 248
+/* MDP */
+#define CMDQ_EVENT_MDP_RDMA0_SOF 256
+#define CMDQ_EVENT_MDP_RDMA1_SOF 257
+#define CMDQ_EVENT_MDP_AAL0_SOF 258
+#define CMDQ_EVENT_MDP_AAL1_SOF 259
+#define CMDQ_EVENT_MDP_HDR0_SOF 260
+#define CMDQ_EVENT_MDP_RSZ0_SOF 261
+#define CMDQ_EVENT_MDP_RSZ1_SOF 262
+#define CMDQ_EVENT_MDP_WROT0_SOF 263
+#define CMDQ_EVENT_MDP_WROT1_SOF 264
+#define CMDQ_EVENT_MDP_TDSHP0_SOF 265
+#define CMDQ_EVENT_MDP_TDSHP1_SOF 266
+#define CMDQ_EVENT_IMG_DL_RELAY0_SOF 267
+#define CMDQ_EVENT_IMG_DL_RELAY1_SOF 268
+#define CMDQ_EVENT_MDP_COLOR0_SOF 269
+#define CMDQ_EVENT_MDP_WROT3_FRAME_DONE 288
+#define CMDQ_EVENT_MDP_WROT2_FRAME_DONE 289
+#define CMDQ_EVENT_MDP_WROT1_FRAME_DONE 290
+#define CMDQ_EVENT_MDP_WROT0_FRAME_DONE 291
+#define CMDQ_EVENT_MDP_TDSHP3_FRAME_DONE 292
+#define CMDQ_EVENT_MDP_TDSHP2_FRAME_DONE 293
+#define CMDQ_EVENT_MDP_TDSHP1_FRAME_DONE 294
+#define CMDQ_EVENT_MDP_TDSHP0_FRAME_DONE 295
+#define CMDQ_EVENT_MDP_RSZ3_FRAME_DONE 296
+#define CMDQ_EVENT_MDP_RSZ2_FRAME_DONE 297
+#define CMDQ_EVENT_MDP_RSZ1_FRAME_DONE 298
+#define CMDQ_EVENT_MDP_RSZ0_FRAME_DONE 299
+#define CMDQ_EVENT_MDP_RDMA3_FRAME_DONE 300
+#define CMDQ_EVENT_MDP_RDMA2_FRAME_DONE 301
+#define CMDQ_EVENT_MDP_RDMA1_FRAME_DONE 302
+#define CMDQ_EVENT_MDP_RDMA0_FRAME_DONE 303
+#define CMDQ_EVENT_MDP_HDR1_FRAME_DONE 304
+#define CMDQ_EVENT_MDP_HDR0_FRAME_DONE 305
+#define CMDQ_EVENT_MDP_COLOR0_FRAME_DONE 306
+#define CMDQ_EVENT_MDP_AAL3_FRAME_DONE 307
+#define CMDQ_EVENT_MDP_AAL2_FRAME_DONE 308
+#define CMDQ_EVENT_MDP_AAL1_FRAME_DONE 309
+#define CMDQ_EVENT_MDP_AAL0_FRAME_DONE 310
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_0 320
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_1 321
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_2 322
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_3 323
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_4 324
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_5 325
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_6 326
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_7 327
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_8 328
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_9 329
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_10 330
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_11 331
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_12 332
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_13 333
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_14 334
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_15 335
+#define CMDQ_EVENT_MDP_WROT3_SW_RST_DONE_ENG_EVENT 336
+#define CMDQ_EVENT_MDP_WROT2_SW_RST_DONE_ENG_EVENT 337
+#define CMDQ_EVENT_MDP_WROT1_SW_RST_DONE_ENG_EVENT 338
+#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE_ENG_EVENT 339
+#define CMDQ_EVENT_MDP_RDMA3_SW_RST_DONE_ENG_EVENT 340
+#define CMDQ_EVENT_MDP_RDMA2_SW_RST_DONE_ENG_EVENT 341
+#define CMDQ_EVENT_MDP_RDMA1_SW_RST_DONE_ENG_EVENT 342
+#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE_ENG_EVENT 343
+/* DISP */
+#define CMDQ_EVENT_DISP_OVL0_SOF 384
+#define CMDQ_EVENT_DISP_OVL0_2L_SOF 385
+#define CMDQ_EVENT_DISP_RDMA0_SOF 386
+#define CMDQ_EVENT_DISP_RSZ0_SOF 387
+#define CMDQ_EVENT_DISP_COLOR0_SOF 388
+#define CMDQ_EVENT_DISP_CCORR0_SOF 389
+#define CMDQ_EVENT_DISP_CCORR1_SOF 390
+#define CMDQ_EVENT_DISP_AAL0_SOF 391
+#define CMDQ_EVENT_DISP_GAMMA0_SOF 392
+#define CMDQ_EVENT_DISP_POSTMASK0_SOF 393
+#define CMDQ_EVENT_DISP_DITHER0_SOF 394
+#define CMDQ_EVENT_DISP_CM0_SOF 395
+#define CMDQ_EVENT_DISP_SPR0_SOF 396
+#define CMDQ_EVENT_DISP_DSC_WRAP0_SOF 397
+#define CMDQ_EVENT_DSI0_SOF 398
+#define CMDQ_EVENT_DISP_WDMA0_SOF 399
+#define CMDQ_EVENT_DISP_PWM0_SOF 400
+#define CMDQ_EVENT_DSI0_FRAME_DONE 410
+#define CMDQ_EVENT_DISP_WDMA0_FRAME_DONE 411
+#define CMDQ_EVENT_DISP_SPR0_FRAME_DONE 412
+#define CMDQ_EVENT_DISP_RSZ0_FRAME_DONE 413
+#define CMDQ_EVENT_DISP_RDMA0_FRAME_DONE 414
+#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 415
+#define CMDQ_EVENT_DISP_OVL0_FRAME_DONE 416
+#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_DONE 417
+#define CMDQ_EVENT_DISP_GAMMA0_FRAME_DONE 418
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_FRAME_DONE 420
+#define CMDQ_EVENT_DISP_DITHER0_FRAME_DONE 421
+#define CMDQ_EVENT_DISP_COLOR0_FRAME_DONE 422
+#define CMDQ_EVENT_DISP_CM0_FRAME_DONE 423
+#define CMDQ_EVENT_DISP_CCORR1_FRAME_DONE 424
+#define CMDQ_EVENT_DISP_CCORR0_FRAME_DONE 425
+#define CMDQ_EVENT_DISP_AAL0_FRAME_DONE 426
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0 434
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1 435
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_2 436
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_3 437
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_4 438
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_5 439
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_6 440
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_7 441
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_8 442
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_9 443
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_10 444
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_11 445
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_12 446
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_13 447
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_14 448
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_15 449
+#define CMDQ_EVENT_DSI0_TE_ENG_EVENT 450
+#define CMDQ_EVENT_DSI0_IRQ_ENG_EVENT 451
+#define CMDQ_EVENT_DSI0_DONE_ENG_EVENT 452
+#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE_ENG_EVENT 453
+#define CMDQ_EVENT_DISP_SMIASSERT_ENG_EVENT 454
+#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE_ENG_EVENT 455
+#define CMDQ_EVENT_DISP_OVL0_RST_DONE_ENG_EVENT 456
+#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE_ENG_EVENT 457
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_0 458
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_1 459
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_2 460
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_3 461
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_4 462
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_5 463
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_6 464
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_7 465
+#define CMDQ_EVENT_OUT_EVENT_0 898
+
+/* CMDQ sw tokens
+ * Following definitions are gce sw token which may use by clients
+ * event operation API.
+ * Note that token 512 to 639 may set secure
+ */
+
+/* end of hw event and begin of sw token */
+#define CMDQ_MAX_HW_EVENT 512
+
+/* Config thread notify trigger thread */
+#define CMDQ_SYNC_TOKEN_CONFIG_DIRTY 640
+/* Trigger thread notify config thread */
+#define CMDQ_SYNC_TOKEN_STREAM_EOF 641
+/* Block Trigger thread until the ESD check finishes. */
+#define CMDQ_SYNC_TOKEN_ESD_EOF 642
+#define CMDQ_SYNC_TOKEN_STREAM_BLOCK 643
+/* check CABC setup finish */
+#define CMDQ_SYNC_TOKEN_CABC_EOF 644
+
+/* Notify normal CMDQ there are some secure task done
+ * MUST NOT CHANGE, this token sync with secure world
+ */
+#define CMDQ_SYNC_SECURE_THR_EOF 647
+
+/* CMDQ use sw token */
+#define CMDQ_SYNC_TOKEN_USER_0 649
+#define CMDQ_SYNC_TOKEN_USER_1 650
+#define CMDQ_SYNC_TOKEN_POLL_MONITOR 651
+#define CMDQ_SYNC_TOKEN_TPR_LOCK 652
+
+/* ISP sw token */
+#define CMDQ_SYNC_TOKEN_MSS 665
+#define CMDQ_SYNC_TOKEN_MSF 666
+
+/* DISP sw token */
+#define CMDQ_SYNC_TOKEN_SODI 671
+
+/* GPR access tokens (for register backup)
+ * There are 15 32-bit GPR, 3 GPR form a set
+ * (64-bit for address, 32-bit for value)
+ * MUST NOT CHANGE, these tokens sync with MDP
+ */
+#define CMDQ_SYNC_TOKEN_GPR_SET_0 700
+#define CMDQ_SYNC_TOKEN_GPR_SET_1 701
+#define CMDQ_SYNC_TOKEN_GPR_SET_2 702
+#define CMDQ_SYNC_TOKEN_GPR_SET_3 703
+#define CMDQ_SYNC_TOKEN_GPR_SET_4 704
+
+/* Resource lock event to control resource in GCE thread */
+#define CMDQ_SYNC_RESOURCE_WROT0 710
+#define CMDQ_SYNC_RESOURCE_WROT1 711
+
+/* event for gpr timer, used in sleep and poll with timeout */
+#define CMDQ_TOKEN_GPR_TIMER_R0 994
+#define CMDQ_TOKEN_GPR_TIMER_R1 995
+#define CMDQ_TOKEN_GPR_TIMER_R2 996
+#define CMDQ_TOKEN_GPR_TIMER_R3 997
+#define CMDQ_TOKEN_GPR_TIMER_R4 998
+#define CMDQ_TOKEN_GPR_TIMER_R5 999
+#define CMDQ_TOKEN_GPR_TIMER_R6 1000
+#define CMDQ_TOKEN_GPR_TIMER_R7 1001
+#define CMDQ_TOKEN_GPR_TIMER_R8 1002
+#define CMDQ_TOKEN_GPR_TIMER_R9 1003
+#define CMDQ_TOKEN_GPR_TIMER_R10 1004
+#define CMDQ_TOKEN_GPR_TIMER_R11 1005
+#define CMDQ_TOKEN_GPR_TIMER_R12 1006
+#define CMDQ_TOKEN_GPR_TIMER_R13 1007
+#define CMDQ_TOKEN_GPR_TIMER_R14 1008
+#define CMDQ_TOKEN_GPR_TIMER_R15 1009
+
+#define CMDQ_EVENT_MAX 0x3FF
+/* CMDQ sw tokens END */
+
+#endif
diff --git a/include/dt-bindings/gce/mt8192-gce.h b/include/dt-bindings/gce/mt8192-gce.h
new file mode 100644
index 000000000000..9e5a0eb040a0
--- /dev/null
+++ b/include/dt-bindings/gce/mt8192-gce.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Yongqiang Niu <yongqiang.niu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8192_H
+#define _DT_BINDINGS_GCE_MT8192_H
+
+/* assign timeout 0 also means default */
+#define CMDQ_NO_TIMEOUT 0xffffffff
+#define CMDQ_TIMEOUT_DEFAULT 1000
+
+/* GCE thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* CPR count in 32bit register */
+#define GCE_CPR_COUNT 1312
+
+/* GCE subsys table */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1502XXXX 4
+#define SUBSYS_1880XXXX 5
+#define SUBSYS_1881XXXX 6
+#define SUBSYS_1882XXXX 7
+#define SUBSYS_1883XXXX 8
+#define SUBSYS_1884XXXX 9
+#define SUBSYS_1000XXXX 10
+#define SUBSYS_1001XXXX 11
+#define SUBSYS_1002XXXX 12
+#define SUBSYS_1003XXXX 13
+#define SUBSYS_1004XXXX 14
+#define SUBSYS_1005XXXX 15
+#define SUBSYS_1020XXXX 16
+#define SUBSYS_1028XXXX 17
+#define SUBSYS_1700XXXX 18
+#define SUBSYS_1701XXXX 19
+#define SUBSYS_1702XXXX 20
+#define SUBSYS_1703XXXX 21
+#define SUBSYS_1800XXXX 22
+#define SUBSYS_1801XXXX 23
+#define SUBSYS_1802XXXX 24
+#define SUBSYS_1804XXXX 25
+#define SUBSYS_1805XXXX 26
+#define SUBSYS_1808XXXX 27
+#define SUBSYS_180aXXXX 28
+#define SUBSYS_180bXXXX 29
+
+#define CMDQ_EVENT_VDEC_LAT_SOF_0 0
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_0 1
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_1 2
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_2 3
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_3 4
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_4 5
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_5 6
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_6 7
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_0 8
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_1 9
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_2 10
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_3 11
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_4 12
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_5 13
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_6 14
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_7 15
+
+#define CMDQ_EVENT_ISP_FRAME_DONE_A 65
+#define CMDQ_EVENT_ISP_FRAME_DONE_B 66
+#define CMDQ_EVENT_ISP_FRAME_DONE_C 67
+#define CMDQ_EVENT_CAMSV0_PASS1_DONE 68
+#define CMDQ_EVENT_CAMSV02_PASS1_DONE 69
+#define CMDQ_EVENT_CAMSV1_PASS1_DONE 70
+#define CMDQ_EVENT_CAMSV2_PASS1_DONE 71
+#define CMDQ_EVENT_CAMSV3_PASS1_DONE 72
+#define CMDQ_EVENT_MRAW_0_PASS1_DONE 73
+#define CMDQ_EVENT_MRAW_1_PASS1_DONE 74
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 75
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 76
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 77
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 78
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 79
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 80
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 81
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 82
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 83
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 84
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 85
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 86
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 87
+#define CMDQ_EVENT_TG_OVRUN_A_INT 88
+#define CMDQ_EVENT_DMA_R1_ERROR_A_INT 89
+#define CMDQ_EVENT_TG_OVRUN_B_INT 90
+#define CMDQ_EVENT_DMA_R1_ERROR_B_INT 91
+#define CMDQ_EVENT_TG_OVRUN_C_INT 92
+#define CMDQ_EVENT_DMA_R1_ERROR_C_INT 93
+#define CMDQ_EVENT_TG_OVRUN_M0_INT 94
+#define CMDQ_EVENT_DMA_R1_ERROR_M0_INT 95
+#define CMDQ_EVENT_TG_GRABERR_M0_INT 96
+#define CMDQ_EVENT_TG_GRABERR_M1_INT 97
+#define CMDQ_EVENT_TG_GRABERR_A_INT 98
+#define CMDQ_EVENT_CQ_VR_SNAP_A_INT 99
+#define CMDQ_EVENT_TG_GRABERR_B_INT 100
+#define CMDQ_EVENT_CQ_VR_SNAP_B_INT 101
+#define CMDQ_EVENT_TG_GRABERR_C_INT 102
+#define CMDQ_EVENT_CQ_VR_SNAP_C_INT 103
+
+#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 129
+#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 130
+#define CMDQ_EVENT_JPGENC_CMDQ_DONE 131
+#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 132
+#define CMDQ_EVENT_VENC_CMDQ_128BYTE_CNT_DONE 133
+#define CMDQ_EVENT_VENC_C0_CMDQ_WP_2ND_STAGE_DONE 134
+#define CMDQ_EVENT_VENC_C0_CMDQ_WP_3RD_STAGE_DONE 135
+#define CMDQ_EVENT_VENC_CMDQ_PPS_DONE 136
+#define CMDQ_EVENT_VENC_CMDQ_SPS_DONE 137
+#define CMDQ_EVENT_VENC_CMDQ_VPS_DONE 138
+
+#define CMDQ_EVENT_VDEC_CORE0_SOF_0 160
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_0 161
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_1 162
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_2 163
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_3 164
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_4 165
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_5 166
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_6 167
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_0 168
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_1 169
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_2 170
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_3 171
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_4 172
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_5 173
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_6 174
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_7 175
+#define CMDQ_EVENT_FDVT_DONE 177
+#define CMDQ_EVENT_FE_DONE 178
+#define CMDQ_EVENT_RSC_DONE 179
+#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 180
+#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 181
+
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_0 193
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_1 194
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_2 195
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_3 196
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_4 197
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_5 198
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_6 199
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_7 200
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_8 201
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_9 202
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_10 203
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_11 204
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_12 205
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_13 206
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_14 207
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_15 208
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_16 209
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_17 210
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_18 211
+#define CMDQ_EVENT_IMG2_DIP_DMA_ERR_EVENT 212
+#define CMDQ_EVENT_IMG2_AMD_FRAME_DONE 213
+#define CMDQ_EVENT_IMG2_MFB_DONE_LINK_MISC 214
+#define CMDQ_EVENT_IMG2_WPE_A_DONE_LINK_MISC 215
+#define CMDQ_EVENT_IMG2_MSS_DONE_LINK_MISC 216
+
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_0 225
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_1 226
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_2 227
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_3 228
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_4 229
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_5 230
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_6 231
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_7 232
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_8 233
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_9 234
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_10 235
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_11 236
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_12 237
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_13 238
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_14 239
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_15 240
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_16 241
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_17 242
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_18 243
+#define CMDQ_EVENT_IMG1_DIP_DMA_ERR_EVENT 244
+#define CMDQ_EVENT_IMG1_AMD_FRAME_DONE 245
+#define CMDQ_EVENT_IMG1_MFB_DONE_LINK_MISC 246
+#define CMDQ_EVENT_IMG1_WPE_A_DONE_LINK_MISC 247
+#define CMDQ_EVENT_IMG1_MSS_DONE_LINK_MISC 248
+
+#define CMDQ_EVENT_MDP_RDMA0_SOF 256
+#define CMDQ_EVENT_MDP_RDMA1_SOF 257
+#define CMDQ_EVENT_MDP_AAL0_SOF 258
+#define CMDQ_EVENT_MDP_AAL1_SOF 259
+#define CMDQ_EVENT_MDP_HDR0_SOF 260
+#define CMDQ_EVENT_MDP_HDR1_SOF 261
+#define CMDQ_EVENT_MDP_RSZ0_SOF 262
+#define CMDQ_EVENT_MDP_RSZ1_SOF 263
+#define CMDQ_EVENT_MDP_WROT0_SOF 264
+#define CMDQ_EVENT_MDP_WROT1_SOF 265
+#define CMDQ_EVENT_MDP_TDSHP0_SOF 266
+#define CMDQ_EVENT_MDP_TDSHP1_SOF 267
+#define CMDQ_EVENT_IMG_DL_RELAY0_SOF 268
+#define CMDQ_EVENT_IMG_DL_RELAY1_SOF 269
+#define CMDQ_EVENT_MDP_COLOR0_SOF 270
+#define CMDQ_EVENT_MDP_COLOR1_SOF 271
+#define CMDQ_EVENT_MDP_WROT1_FRAME_DONE 290
+#define CMDQ_EVENT_MDP_WROT0_FRAME_DONE 291
+#define CMDQ_EVENT_MDP_TDSHP1_FRAME_DONE 294
+#define CMDQ_EVENT_MDP_TDSHP0_FRAME_DONE 295
+#define CMDQ_EVENT_MDP_RSZ1_FRAME_DONE 302
+#define CMDQ_EVENT_MDP_RSZ0_FRAME_DONE 303
+#define CMDQ_EVENT_MDP_RDMA1_FRAME_DONE 306
+#define CMDQ_EVENT_MDP_RDMA0_FRAME_DONE 307
+#define CMDQ_EVENT_MDP_HDR1_FRAME_DONE 308
+#define CMDQ_EVENT_MDP_HDR0_FRAME_DONE 309
+#define CMDQ_EVENT_MDP_COLOR1_FRAME_DONE 312
+#define CMDQ_EVENT_MDP_COLOR0_FRAME_DONE 313
+#define CMDQ_EVENT_MDP_AAL1_FRAME_DONE 316
+#define CMDQ_EVENT_MDP_AAL0_FRAME_DONE 317
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_0 320
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_1 321
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_2 322
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_3 323
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_4 324
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_5 325
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_6 326
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_7 327
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_8 328
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_9 329
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_10 330
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_11 331
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_12 332
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_13 333
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_14 334
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_15 335
+#define CMDQ_EVENT_MDP_WROT1_SW_RST_DONE_ENG_EVENT 338
+#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE_ENG_EVENT 339
+#define CMDQ_EVENT_MDP_RDMA1_SW_RST_DONE_ENG_EVENT 342
+#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE_ENG_EVENT 343
+
+#define CMDQ_EVENT_DISP_OVL0_SOF 384
+#define CMDQ_EVENT_DISP_OVL0_2L_SOF 385
+#define CMDQ_EVENT_DISP_RDMA0_SOF 386
+#define CMDQ_EVENT_DISP_RSZ0_SOF 387
+#define CMDQ_EVENT_DISP_COLOR0_SOF 388
+#define CMDQ_EVENT_DISP_CCORR0_SOF 389
+#define CMDQ_EVENT_DISP_AAL0_SOF 390
+#define CMDQ_EVENT_DISP_GAMMA0_SOF 391
+#define CMDQ_EVENT_DISP_POSTMASK0_SOF 392
+#define CMDQ_EVENT_DISP_DITHER0_SOF 393
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_SOF 394
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE1_SOF 395
+#define CMDQ_EVENT_DSI0_SOF 396
+#define CMDQ_EVENT_DISP_WDMA0_SOF 397
+#define CMDQ_EVENT_DISP_UFBC_WDMA0_SOF 398
+#define CMDQ_EVENT_DISP_PWM0_SOF 399
+#define CMDQ_EVENT_DISP_OVL2_2L_SOF 400
+#define CMDQ_EVENT_DISP_RDMA4_SOF 401
+#define CMDQ_EVENT_DISP_DPI0_SOF 402
+#define CMDQ_EVENT_MDP_RDMA4_SOF 403
+#define CMDQ_EVENT_MDP_HDR4_SOF 404
+#define CMDQ_EVENT_MDP_RSZ4_SOF 405
+#define CMDQ_EVENT_MDP_AAL4_SOF 406
+#define CMDQ_EVENT_MDP_TDSHP4_SOF 407
+#define CMDQ_EVENT_MDP_COLOR4_SOF 408
+#define CMDQ_EVENT_DISP_Y2R0_SOF 409
+#define CMDQ_EVENT_MDP_TDSHP4_FRAME_DONE 410
+#define CMDQ_EVENT_MDP_RSZ4_FRAME_DONE 411
+#define CMDQ_EVENT_MDP_RDMA4_FRAME_DONE 412
+#define CMDQ_EVENT_MDP_HDR4_FRAME_DONE 413
+#define CMDQ_EVENT_MDP_COLOR4_FRAME_DONE 414
+#define CMDQ_EVENT_MDP_AAL4_FRAME_DONE 415
+#define CMDQ_EVENT_DSI0_FRAME_DONE 416
+#define CMDQ_EVENT_DISP_WDMA0_FRAME_DONE 417
+#define CMDQ_EVENT_DISP_UFBC_WDMA0_FRAME_DONE 418
+#define CMDQ_EVENT_DISP_RSZ0_FRAME_DONE 419
+#define CMDQ_EVENT_DISP_RDMA4_FRAME_DONE 420
+#define CMDQ_EVENT_DISP_RDMA0_FRAME_DONE 421
+#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 422
+#define CMDQ_EVENT_DISP_OVL2_2L_FRAME_DONE 423
+#define CMDQ_EVENT_DISP_OVL0_FRAME_DONE 424
+#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_DONE 425
+#define CMDQ_EVENT_DISP_GAMMA0_FRAME_DONE 426
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE1_FRAME_DONE 427
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_FRAME_DONE 428
+#define CMDQ_EVENT_DISP_DPI0_FRAME_DONE 429
+#define CMDQ_EVENT_DISP_DITHER0_FRAME_DONE 430
+#define CMDQ_EVENT_DISP_COLOR0_FRAME_DONE 431
+#define CMDQ_EVENT_DISP_CCORR0_FRAME_DONE 432
+#define CMDQ_EVENT_DISP_AAL0_FRAME_DONE 433
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0 434
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1 435
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_2 436
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_3 437
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_4 438
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_5 439
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_6 440
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_7 441
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_8 442
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_9 443
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_10 444
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_11 445
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_12 446
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_13 447
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_14 448
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_15 449
+#define CMDQ_EVENT_DSI0_TE_ENG_EVENT 450
+#define CMDQ_EVENT_DSI0_IRQ_ENG_EVENT 451
+#define CMDQ_EVENT_DSI0_DONE_ENG_EVENT 452
+#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE_ENG_EVENT 453
+#define CMDQ_EVENT_DISP_SMIASSERT_ENG_EVENT 454
+#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE_ENG_EVENT 455
+#define CMDQ_EVENT_DISP_OVL2_2L_RST_DONE_ENG_EVENT 456
+#define CMDQ_EVENT_DISP_OVL0_RST_DONE_ENG_EVENT 457
+#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE_ENG_EVENT 458
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_0 459
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_1 460
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_2 461
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_3 462
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_4 463
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_5 464
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_6 465
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_7 466
+#define CMDQ_MAX_HW_EVENT 512
+
+#endif
diff --git a/include/dt-bindings/gce/mt8195-gce.h b/include/dt-bindings/gce/mt8195-gce.h
new file mode 100644
index 000000000000..dcfb302b8a5b
--- /dev/null
+++ b/include/dt-bindings/gce/mt8195-gce.h
@@ -0,0 +1,812 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Jason-JH Lin <jason0jh.lin@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8195_H
+#define _DT_BINDINGS_GCE_MT8195_H
+
+/* assign timeout 0 also means default */
+#define CMDQ_NO_TIMEOUT 0xffffffff
+#define CMDQ_TIMEOUT_DEFAULT 1000
+
+/* GCE thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* CPR count in 32bit register */
+#define GCE_CPR_COUNT 1312
+
+/* GCE subsys table */
+#define SUBSYS_1400XXXX 0
+#define SUBSYS_1401XXXX 1
+#define SUBSYS_1402XXXX 2
+#define SUBSYS_1c00XXXX 3
+#define SUBSYS_1c01XXXX 4
+#define SUBSYS_1c02XXXX 5
+#define SUBSYS_1c10XXXX 6
+#define SUBSYS_1c11XXXX 7
+#define SUBSYS_1c12XXXX 8
+#define SUBSYS_14f0XXXX 9
+#define SUBSYS_14f1XXXX 10
+#define SUBSYS_14f2XXXX 11
+#define SUBSYS_1800XXXX 12
+#define SUBSYS_1801XXXX 13
+#define SUBSYS_1802XXXX 14
+#define SUBSYS_1803XXXX 15
+#define SUBSYS_1032XXXX 16
+#define SUBSYS_1033XXXX 17
+#define SUBSYS_1600XXXX 18
+#define SUBSYS_1601XXXX 19
+#define SUBSYS_14e0XXXX 20
+#define SUBSYS_1c20XXXX 21
+#define SUBSYS_1c30XXXX 22
+#define SUBSYS_1c40XXXX 23
+#define SUBSYS_1c50XXXX 24
+#define SUBSYS_1c60XXXX 25
+
+/* GCE General Purpose Register (GPR) support */
+#define GCE_GPR_R00 0x0
+#define GCE_GPR_R01 0x1
+#define GCE_GPR_R02 0x2
+#define GCE_GPR_R03 0x3
+#define GCE_GPR_R04 0x4
+#define GCE_GPR_R05 0x5
+#define GCE_GPR_R06 0x6
+#define GCE_GPR_R07 0x7
+#define GCE_GPR_R08 0x8
+#define GCE_GPR_R09 0x9
+#define GCE_GPR_R10 0xa
+#define GCE_GPR_R11 0xb
+#define GCE_GPR_R12 0xc
+#define GCE_GPR_R13 0xd
+#define GCE_GPR_R14 0xe
+#define GCE_GPR_R15 0xf
+
+/* GCE hw event id */
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_0 1
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_1 2
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_2 3
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_3 4
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_4 5
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_5 6
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_6 7
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_7 8
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_8 9
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_9 10
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_10 11
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_11 12
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_12 13
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_13 14
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_14 15
+#define CMDQ_EVENT_TRAW0_DMA_ERROR_INT 16
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_0 17
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_1 18
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_2 19
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_3 20
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_4 21
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_5 22
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_6 23
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_7 24
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_8 25
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_9 26
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_10 27
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_11 28
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_12 29
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_13 30
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_14 31
+#define CMDQ_EVENT_TRAW1_DMA_ERROR_INT 32
+
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_0 65
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_1 66
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_2 67
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_3 68
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_4 69
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_5 70
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_6 71
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_7 72
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_8 73
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_9 74
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_10 75
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_11 76
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_12 77
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_13 78
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_14 79
+#define CMDQ_EVENT_DIP0_DMA_ERR 80
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_0 81
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_1 82
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_2 83
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_3 84
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_4 85
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_5 86
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_6 87
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_7 88
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_8 89
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_9 90
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_10 91
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_11 92
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_12 93
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_13 94
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_14 95
+#define CMDQ_EVENT_PQA0_DMA_ERR 96
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_0 97
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_1 98
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_2 99
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_3 100
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_4 101
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_5 102
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_6 103
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_7 104
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_8 105
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_9 106
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_10 107
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_11 108
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_12 109
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_13 110
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_14 111
+#define CMDQ_EVENT_PQB0_DMA_ERR 112
+#define CMDQ_EVENT_DIP0_DUMMY_0 113
+#define CMDQ_EVENT_DIP0_DUMMY_1 114
+#define CMDQ_EVENT_DIP0_DUMMY_2 115
+#define CMDQ_EVENT_DIP0_DUMMY_3 116
+#define CMDQ_EVENT_WPE0_EIS_GCE_FRAME_DONE 117
+#define CMDQ_EVENT_WPE0_EIS_DONE_SYNC_OUT 118
+#define CMDQ_EVENT_WPE0_TNR_GCE_FRAME_DONE 119
+#define CMDQ_EVENT_WPE0_TNR_DONE_SYNC_OUT 120
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_0 121
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_1 122
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_2 123
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_3 124
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_4 125
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_5 126
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_6 127
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_7 128
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_8 129
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_9 130
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_10 131
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_11 132
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_12 133
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_13 134
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_14 135
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_0 136
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_1 137
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_2 138
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_3 139
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_4 140
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_5 141
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_6 142
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_7 143
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_8 144
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_9 145
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_10 146
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_11 147
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_12 148
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_13 149
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_14 150
+#define CMDQ_EVENT_WPE0_DUMMY_0 151
+#define CMDQ_EVENT_IMGSYS_IPE_DUMMY 152
+#define CMDQ_EVENT_IMGSYS_IPE_FDVT_DONE 153
+#define CMDQ_EVENT_IMGSYS_IPE_ME_DONE 154
+#define CMDQ_EVENT_IMGSYS_IPE_DVS_DONE 155
+#define CMDQ_EVENT_IMGSYS_IPE_DVP_DONE 156
+
+#define CMDQ_EVENT_TPR_0 194
+#define CMDQ_EVENT_TPR_1 195
+#define CMDQ_EVENT_TPR_2 196
+#define CMDQ_EVENT_TPR_3 197
+#define CMDQ_EVENT_TPR_4 198
+#define CMDQ_EVENT_TPR_5 199
+#define CMDQ_EVENT_TPR_6 200
+#define CMDQ_EVENT_TPR_7 201
+#define CMDQ_EVENT_TPR_8 202
+#define CMDQ_EVENT_TPR_9 203
+#define CMDQ_EVENT_TPR_10 204
+#define CMDQ_EVENT_TPR_11 205
+#define CMDQ_EVENT_TPR_12 206
+#define CMDQ_EVENT_TPR_13 207
+#define CMDQ_EVENT_TPR_14 208
+#define CMDQ_EVENT_TPR_15 209
+#define CMDQ_EVENT_TPR_16 210
+#define CMDQ_EVENT_TPR_17 211
+#define CMDQ_EVENT_TPR_18 212
+#define CMDQ_EVENT_TPR_19 213
+#define CMDQ_EVENT_TPR_20 214
+#define CMDQ_EVENT_TPR_21 215
+#define CMDQ_EVENT_TPR_22 216
+#define CMDQ_EVENT_TPR_23 217
+#define CMDQ_EVENT_TPR_24 218
+#define CMDQ_EVENT_TPR_25 219
+#define CMDQ_EVENT_TPR_26 220
+#define CMDQ_EVENT_TPR_27 221
+#define CMDQ_EVENT_TPR_28 222
+#define CMDQ_EVENT_TPR_29 223
+#define CMDQ_EVENT_TPR_30 224
+#define CMDQ_EVENT_TPR_31 225
+#define CMDQ_EVENT_TPR_TIMEOUT_0 226
+#define CMDQ_EVENT_TPR_TIMEOUT_1 227
+#define CMDQ_EVENT_TPR_TIMEOUT_2 228
+#define CMDQ_EVENT_TPR_TIMEOUT_3 229
+#define CMDQ_EVENT_TPR_TIMEOUT_4 230
+#define CMDQ_EVENT_TPR_TIMEOUT_5 231
+#define CMDQ_EVENT_TPR_TIMEOUT_6 232
+#define CMDQ_EVENT_TPR_TIMEOUT_7 233
+#define CMDQ_EVENT_TPR_TIMEOUT_8 234
+#define CMDQ_EVENT_TPR_TIMEOUT_9 235
+#define CMDQ_EVENT_TPR_TIMEOUT_10 236
+#define CMDQ_EVENT_TPR_TIMEOUT_11 237
+#define CMDQ_EVENT_TPR_TIMEOUT_12 238
+#define CMDQ_EVENT_TPR_TIMEOUT_13 239
+#define CMDQ_EVENT_TPR_TIMEOUT_14 240
+#define CMDQ_EVENT_TPR_TIMEOUT_15 241
+
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SOF 256
+#define CMDQ_EVENT_VPP0_MDP_FG_SOF 257
+#define CMDQ_EVENT_VPP0_STITCH_SOF 258
+#define CMDQ_EVENT_VPP0_MDP_HDR_SOF 259
+#define CMDQ_EVENT_VPP0_MDP_AAL_SOF 260
+#define CMDQ_EVENT_VPP0_MDP_RSZ_IN_RSZ_SOF 261
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_SOF 262
+#define CMDQ_EVENT_VPP0_DISP_COLOR_SOF 263
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_SOF 264
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_SOF 265
+#define CMDQ_EVENT_VPP0_MDP_TCC_IN_SOF 266
+#define CMDQ_EVENT_VPP0_MDP_WROT_SOF 267
+
+#define CMDQ_EVENT_VPP0_WARP0_MMSYS_TOP_RELAY_SOF_PRE 269
+#define CMDQ_EVENT_VPP0_WARP1_MMSYS_TOP_RELAY_SOF_PRE 270
+#define CMDQ_EVENT_VPP0_VPP1_MMSYS_TOP_RELAY_SOF 271
+#define CMDQ_EVENT_VPP0_VPP1_IN_MMSYS_TOP_RELAY_SOF_PRE 272
+
+#define CMDQ_EVENT_VPP0_MDP_RDMA_FRAME_DONE 288
+#define CMDQ_EVENT_VPP0_MDP_FG_TILE_DONE 289
+#define CMDQ_EVENT_VPP0_STITCH_FRAME_DONE 290
+#define CMDQ_EVENT_VPP0_MDP_HDR_FRAME_DONE 291
+#define CMDQ_EVENT_VPP0_MDP_AAL_FRAME_DONE 292
+#define CMDQ_EVENT_VPP0_MDP_RSZ_FRAME_DONE 293
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_FRAME_DONE 294
+#define CMDQ_EVENT_VPP0_DISP_COLOR_FRAME_DONE 295
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_DONE 296
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_FRAME_DONE 297
+#define CMDQ_EVENT_VPP0_MDP_TCC_TCC_FRAME_DONE 298
+#define CMDQ_EVENT_VPP0_MDP_WROT_VIDO_WDONE 299
+
+#define CMDQ_EVENT_VPP0_STREAM_DONE_0 320
+#define CMDQ_EVENT_VPP0_STREAM_DONE_1 321
+#define CMDQ_EVENT_VPP0_STREAM_DONE_2 322
+#define CMDQ_EVENT_VPP0_STREAM_DONE_3 323
+#define CMDQ_EVENT_VPP0_STREAM_DONE_4 324
+#define CMDQ_EVENT_VPP0_STREAM_DONE_5 325
+#define CMDQ_EVENT_VPP0_STREAM_DONE_6 326
+#define CMDQ_EVENT_VPP0_STREAM_DONE_7 327
+#define CMDQ_EVENT_VPP0_STREAM_DONE_8 328
+#define CMDQ_EVENT_VPP0_STREAM_DONE_9 329
+#define CMDQ_EVENT_VPP0_STREAM_DONE_10 330
+#define CMDQ_EVENT_VPP0_STREAM_DONE_11 331
+#define CMDQ_EVENT_VPP0_STREAM_DONE_12 332
+#define CMDQ_EVENT_VPP0_STREAM_DONE_13 333
+#define CMDQ_EVENT_VPP0_STREAM_DONE_14 334
+#define CMDQ_EVENT_VPP0_STREAM_DONE_15 335
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_0 336
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_1 337
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_2 338
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_3 339
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_4 340
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_5 341
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_6 342
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_7 343
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_8 344
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_9 345
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_10 346
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_11 347
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_12 348
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_13 349
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_14 350
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_15 351
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SW_RST_DONE 352
+#define CMDQ_EVENT_VPP0_MDP_RDMA_PM_VALID 353
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_RESET_DONE_PULSE 354
+#define CMDQ_EVENT_VPP0_MDP_WROT_SW_RST_DONE 355
+
+#define CMDQ_EVENT_VPP1_HDMI_META_SOF 384
+#define CMDQ_EVENT_VPP1_DGI_SOF 385
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_SOF 386
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_SOF 387
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_SOF 388
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_SOF 389
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_SOF 390
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_SOF 391
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_SOF 392
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_SOF 393
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_SOF 394
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_SOF 395
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_SOF 396
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_SOF 397
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_SOF 398
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_SOF 399
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_SOF 400
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_SOF 401
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_SOF 402
+#define CMDQ_EVENT_VPP1_SVPP1_TDSHP_SOF 403
+#define CMDQ_EVENT_VPP1_SVPP2_TDSHP_SOF 404
+#define CMDQ_EVENT_VPP1_SVPP3_TDSHP_SOF 405
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_SOF 406
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_SOF 407
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_SOF 408
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_SOF 409
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_SOF 410
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_SOF 411
+#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_SOF 412
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_SOF 413
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_SOF 414
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SOF 415
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SOF 416
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SOF 417
+#define CMDQ_EVENT_VPP1_VPP0_DL_IRLY_SOF 418
+#define CMDQ_EVENT_VPP1_VPP0_DL_ORLY_SOF 419
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_0_SOF 420
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_1_SOF 421
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_0_SOF 422
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_1_SOF 423
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_FRAME_DONE 424
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_FRAME_DONE 425
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_FRAME_DONE 426
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_FRAME_DONE 427
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_FRAME_DONE 428
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_FRAME_DONE 429
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_FRAME_DONE 430
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_FRAME_DONE 431
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_FRAME_DONE 432
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_FRAME_DONE 433
+#define CMDQ_EVENT_VPP1_FRAME_DONE_10 434
+#define CMDQ_EVENT_VPP1_FRAME_DONE_11 435
+#define CMDQ_EVENT_VPP1_FRAME_DONE_12 436
+#define CMDQ_EVENT_VPP1_FRAME_DONE_13 437
+#define CMDQ_EVENT_VPP1_FRAME_DONE_14 438
+#define CMDQ_EVENT_VPP1_STREAM_DONE_0 439
+#define CMDQ_EVENT_VPP1_STREAM_DONE_1 440
+#define CMDQ_EVENT_VPP1_STREAM_DONE_2 441
+#define CMDQ_EVENT_VPP1_STREAM_DONE_3 442
+#define CMDQ_EVENT_VPP1_STREAM_DONE_4 443
+#define CMDQ_EVENT_VPP1_STREAM_DONE_5 444
+#define CMDQ_EVENT_VPP1_STREAM_DONE_6 445
+#define CMDQ_EVENT_VPP1_STREAM_DONE_7 446
+#define CMDQ_EVENT_VPP1_STREAM_DONE_8 447
+#define CMDQ_EVENT_VPP1_STREAM_DONE_9 448
+#define CMDQ_EVENT_VPP1_STREAM_DONE_10 449
+#define CMDQ_EVENT_VPP1_STREAM_DONE_11 450
+#define CMDQ_EVENT_VPP1_STREAM_DONE_12 451
+#define CMDQ_EVENT_VPP1_STREAM_DONE_13 452
+#define CMDQ_EVENT_VPP1_STREAM_DONE_14 453
+#define CMDQ_EVENT_VPP1_STREAM_DONE_15 454
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_0 455
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_1 456
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_2 457
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_3 458
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_4 459
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_5 460
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_6 461
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_7 462
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_8 463
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_9 464
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_10 465
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_11 466
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_12 467
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_13 468
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_14 469
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_15 470
+#define CMDQ_EVENT_VPP1_DGI_0 471
+#define CMDQ_EVENT_VPP1_DGI_1 472
+#define CMDQ_EVENT_VPP1_DGI_2 473
+#define CMDQ_EVENT_VPP1_DGI_3 474
+#define CMDQ_EVENT_VPP1_DGI_4 475
+#define CMDQ_EVENT_VPP1_DGI_5 476
+#define CMDQ_EVENT_VPP1_DGI_6 477
+#define CMDQ_EVENT_VPP1_DGI_7 478
+#define CMDQ_EVENT_VPP1_DGI_8 479
+#define CMDQ_EVENT_VPP1_DGI_9 480
+#define CMDQ_EVENT_VPP1_DGI_10 481
+#define CMDQ_EVENT_VPP1_DGI_11 482
+#define CMDQ_EVENT_VPP1_DGI_12 483
+#define CMDQ_EVENT_VPP1_DGI_13 484
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE 485
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE 486
+#define CMDQ_EVENT_VPP1_MDP_OVL_FRAME_RESET_DONE_PULSE 487
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_DGI 488
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_HDMI 489
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SW_RST_DONE 490
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SW_RST_DONE 491
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SW_RST_DONE 492
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_TILE_DONE 493
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_TILE_DONE 494
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_TILE_DONE 495
+
+#define CMDQ_EVENT_VDO0_DISP_OVL0_SOF 512
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_SOF 513
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_SOF 514
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_SOF 515
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_SOF 516
+#define CMDQ_EVENT_VDO0_DISP_AAL0_SOF 517
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_SOF 518
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_SOF 519
+#define CMDQ_EVENT_VDO0_DSI0_SOF 520
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_SOF 521
+#define CMDQ_EVENT_VDO0_DISP_OVL1_SOF 522
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_SOF 523
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_SOF 524
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_SOF 525
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_SOF 526
+#define CMDQ_EVENT_VDO0_DISP_AAL1_SOF 527
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_SOF 528
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_SOF 529
+#define CMDQ_EVENT_VDO0_DSI1_SOF 530
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_SOF 531
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_SOF 532
+#define CMDQ_EVENT_VDO0_DP_INTF0_SOF 533
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY0_SOF 534
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY1_SOF 535
+#define CMDQ_EVENT_VDO0_VDO1_DL_RELAY2_SOF 536
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY3_SOF 537
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY4_SOF 538
+#define CMDQ_EVENT_VDO0_DISP_PWM0_SOF 539
+#define CMDQ_EVENT_VDO0_DISP_PWM1_SOF 540
+
+#define CMDQ_EVENT_VDO0_DISP_OVL0_FRAME_DONE 544
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_FRAME_DONE 545
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_FRAME_DONE 546
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_FRAME_DONE 547
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_FRAME_DONE 548
+#define CMDQ_EVENT_VDO0_DISP_AAL0_FRAME_DONE 549
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_FRAME_DONE 550
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_FRAME_DONE 551
+#define CMDQ_EVENT_VDO0_DSI0_FRAME_DONE 552
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_FRAME_DONE 553
+#define CMDQ_EVENT_VDO0_DISP_OVL1_FRAME_DONE 554
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_FRAME_DONE 555
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_FRAME_DONE 556
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_FRAME_DONE 557
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_FRAME_DONE 558
+#define CMDQ_EVENT_VDO0_DISP_AAL1_FRAME_DONE 559
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_FRAME_DONE 560
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_FRAME_DONE 561
+#define CMDQ_EVENT_VDO0_DSI1_FRAME_DONE 562
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_FRAME_DONE 563
+
+#define CMDQ_EVENT_VDO0_DP_INTF0_FRAME_DONE 565
+
+#define CMDQ_EVENT_VDO0_DISP_SMIASSERT_ENG 576
+#define CMDQ_EVENT_VDO0_DSI0_IRQ_ENG_EVENT_MM 577
+#define CMDQ_EVENT_VDO0_DSI0_TE_ENG_EVENT_MM 578
+#define CMDQ_EVENT_VDO0_DSI0_DONE_ENG_EVENT_MM 579
+#define CMDQ_EVENT_VDO0_DSI0_SOF_ENG_EVENT_MM 580
+#define CMDQ_EVENT_VDO0_DSI0_VACTL_ENG_EVENT_MM 581
+#define CMDQ_EVENT_VDO0_DSI1_IRQ_ENG_EVENT_MM 582
+#define CMDQ_EVENT_VDO0_DSI1_TE_ENG_EVENT_MM 583
+#define CMDQ_EVENT_VDO0_DSI1_DONE_ENG_EVENT_MM 584
+#define CMDQ_EVENT_VDO0_DSI1_SOF_ENG_EVENT_MM 585
+#define CMDQ_EVENT_VDO0_DSI1_VACTL_ENG_EVENT_MM 586
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_SW_RST_DONE_ENG 587
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_SW_RST_DONE_ENG 588
+#define CMDQ_EVENT_VDO0_DISP_OVL0_RST_DONE_ENG 589
+#define CMDQ_EVENT_VDO0_DISP_OVL1_RST_DONE_ENG 590
+#define CMDQ_EVENT_VDO0_DP_INTF0_VSYNC_START_ENG_EVENT_MM 591
+#define CMDQ_EVENT_VDO0_DP_INTF0_VSYNC_END_ENG_EVENT_MM 592
+#define CMDQ_EVENT_VDO0_DP_INTF0_VDE_START_ENG_EVENT_MM 593
+#define CMDQ_EVENT_VDO0_DP_INTF0_VDE_END_ENG_EVENT_MM 594
+#define CMDQ_EVENT_VDO0_DP_INTF0_TARGET_LINE_ENG_EVENT_MM 595
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_ENG 596
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0 597
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_1 598
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_2 599
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_3 600
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_4 601
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_5 602
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_6 603
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_7 604
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_8 605
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_9 606
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_10 607
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_11 608
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_12 609
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_13 610
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_14 611
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_15 612
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_0 613
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_1 614
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_2 615
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_3 616
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_4 617
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_5 618
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_6 619
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_7 620
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_8 621
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_9 622
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_10 623
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_11 624
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_12 625
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_13 626
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_14 627
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_15 628
+
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SOF 640
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SOF 641
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SOF 642
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SOF 643
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SOF 644
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SOF 645
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SOF 646
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SOF 647
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_SOF 648
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_SOF 649
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_SOF 650
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_SOF 651
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_SOF 652
+#define CMDQ_EVENT_VDO1_VPP2_DL_RELAY_SOF 653
+#define CMDQ_EVENT_VDO1_VPP3_DL_RELAY_SOF 654
+#define CMDQ_EVENT_VDO1_VDO0_DSC_DL_ASYNC_SOF 655
+#define CMDQ_EVENT_VDO1_VDO0_MERGE_DL_ASYNC_SOF 656
+#define CMDQ_EVENT_VDO1_OUT_DL_RELAY_SOF 657
+#define CMDQ_EVENT_VDO1_DISP_MIXER_SOF 658
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE0_SOF 659
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_SOF 660
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_SOF 661
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_SOF 662
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_SOF 663
+#define CMDQ_EVENT_VDO1_HDR_MLOAD_SOF 664
+
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_FRAME_DONE 672
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_FRAME_DONE 673
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_FRAME_DONE 674
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_FRAME_DONE 675
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_FRAME_DONE 676
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_FRAME_DONE 677
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_FRAME_DONE 678
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_FRAME_DONE 679
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_FRAME_DONE 680
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_FRAME_DONE 681
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_FRAME_DONE 682
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_FRAME_DONE 683
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_FRAME_DONE 684
+#define CMDQ_EVENT_VDO1_DPI0_FRAME_DONE 685
+#define CMDQ_EVENT_VDO1_DPI1_FRAME_DONE 686
+#define CMDQ_EVENT_VDO1_DP_INTF0_FRAME_DONE 687
+#define CMDQ_EVENT_VDO1_DISP_MIXER_FRAME_DONE_MM 688
+
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0 704
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_1 705
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_2 706
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_3 707
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_4 708
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_5 709
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_6 710
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_7 711
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_8 712
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_9 713
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_10 714
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_11 715
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_12 716
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_13 717
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_14 718
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_15 719
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_0 720
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_1 721
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_2 722
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_3 723
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_4 724
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_5 725
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_6 726
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_7 727
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_8 728
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_9 729
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_10 730
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_11 731
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_12 732
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_13 733
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_14 734
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_15 735
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SW_RST_DONE 736
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SW_RST_DONE 737
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SW_RST_DONE 738
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SW_RST_DONE 739
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SW_RST_DONE 740
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SW_RST_DONE 741
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SW_RST_DONE 742
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SW_RST_DONE 743
+
+#define CMDQ_EVENT_VDO1_DP0_VDE_END_ENG_EVENT_MM 745
+#define CMDQ_EVENT_VDO1_DP0_VDE_START_ENG_EVENT_MM 746
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_END_ENG_EVENT_MM 747
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_START_ENG_EVENT_MM 748
+#define CMDQ_EVENT_VDO1_DP0_TARGET_LINE_ENG_EVENT_MM 749
+#define CMDQ_EVENT_VDO1_VPP_MERGE0 750
+#define CMDQ_EVENT_VDO1_VPP_MERGE1 751
+#define CMDQ_EVENT_VDO1_VPP_MERGE2 752
+#define CMDQ_EVENT_VDO1_VPP_MERGE3 753
+#define CMDQ_EVENT_VDO1_VPP_MERGE4 754
+#define CMDQ_EVENT_VDO1_HDMITX 755
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_ADL_TRIG_EVENT_MM 756
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_THDR_ADL_TRIG_EVENT_MM 757
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_DM_ADL_TRIG_EVENT_MM 758
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_THDR_ADL_TRIG_EVENT_MM 759
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_DM_ADL_TRIG_EVENT_MM 760
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_ADL_TRIG_EVENT_MM 761
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_AD0_TRIG_EVENT_MM 762
+
+#define CMDQ_EVENT_CAM_A_PASS1_DONE 769
+#define CMDQ_EVENT_CAM_B_PASS1_DONE 770
+#define CMDQ_EVENT_GCAMSV_A_PASS1_DONE 771
+#define CMDQ_EVENT_GCAMSV_B_PASS1_DONE 772
+#define CMDQ_EVENT_MRAW_0_PASS1_DONE 773
+#define CMDQ_EVENT_MRAW_1_PASS1_DONE 774
+#define CMDQ_EVENT_MRAW_2_PASS1_DONE 775
+#define CMDQ_EVENT_MRAW_3_PASS1_DONE 776
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL_X 777
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL_X 778
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 779
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 780
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 781
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 782
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 783
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 784
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 785
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 786
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL_X 787
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL_X 788
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL_X 789
+#define CMDQ_EVENT_SENINF_CAM13_FIFO_FULL_X 790
+#define CMDQ_EVENT_TG_OVRUN_MRAW0_INT_X0 791
+#define CMDQ_EVENT_TG_OVRUN_MRAW1_INT_X0 792
+#define CMDQ_EVENT_TG_OVRUN_MRAW2_INT 793
+#define CMDQ_EVENT_TG_OVRUN_MRAW3_INT 794
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW0_INT 795
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW1_INT 796
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW2_INT 797
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW3_INT 798
+#define CMDQ_EVENT_U_CAMSYS_PDA_IRQO_EVENT_DONE_D1 799
+#define CMDQ_EVENT_SUBB_TG_INT4 800
+#define CMDQ_EVENT_SUBB_TG_INT3 801
+#define CMDQ_EVENT_SUBB_TG_INT2 802
+#define CMDQ_EVENT_SUBB_TG_INT1 803
+#define CMDQ_EVENT_SUBA_TG_INT4 804
+#define CMDQ_EVENT_SUBA_TG_INT3 805
+#define CMDQ_EVENT_SUBA_TG_INT2 806
+#define CMDQ_EVENT_SUBA_TG_INT1 807
+#define CMDQ_EVENT_SUBB_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 808
+#define CMDQ_EVENT_SUBB_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 809
+#define CMDQ_EVENT_SUBB_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 810
+#define CMDQ_EVENT_SUBB_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 811
+#define CMDQ_EVENT_SUBA_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 812
+#define CMDQ_EVENT_SUBA_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 813
+#define CMDQ_EVENT_SUBA_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 814
+#define CMDQ_EVENT_SUBA_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 815
+#define CMDQ_EVENT_GCE1_SOF_0 816
+#define CMDQ_EVENT_GCE1_SOF_1 817
+#define CMDQ_EVENT_GCE1_SOF_2 818
+#define CMDQ_EVENT_GCE1_SOF_3 819
+#define CMDQ_EVENT_GCE1_SOF_4 820
+#define CMDQ_EVENT_GCE1_SOF_5 821
+#define CMDQ_EVENT_GCE1_SOF_6 822
+#define CMDQ_EVENT_GCE1_SOF_7 823
+#define CMDQ_EVENT_GCE1_SOF_8 824
+#define CMDQ_EVENT_GCE1_SOF_9 825
+#define CMDQ_EVENT_GCE1_SOF_10 826
+#define CMDQ_EVENT_GCE1_SOF_11 827
+#define CMDQ_EVENT_GCE1_SOF_12 828
+#define CMDQ_EVENT_GCE1_SOF_13 829
+#define CMDQ_EVENT_GCE1_SOF_14 830
+#define CMDQ_EVENT_GCE1_SOF_15 831
+
+#define CMDQ_EVENT_VDEC_LAT_LINE_COUNT_THRESHOLD_INTERRUPT 832
+#define CMDQ_EVENT_VDEC_LAT_VDEC_INT 833
+#define CMDQ_EVENT_VDEC_LAT_VDEC_PAUSE 834
+#define CMDQ_EVENT_VDEC_LAT_VDEC_DEC_ERROR 835
+#define CMDQ_EVENT_VDEC_LAT_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 836
+#define CMDQ_EVENT_VDEC_LAT_VDEC_FRAME_DONE 837
+#define CMDQ_EVENT_VDEC_LAT_INI_FETCH_RDY 838
+#define CMDQ_EVENT_VDEC_LAT_PROCESS_FLAG 839
+#define CMDQ_EVENT_VDEC_LAT_SEARCH_START_CODE_DONE 840
+#define CMDQ_EVENT_VDEC_LAT_REF_REORDER_DONE 841
+#define CMDQ_EVENT_VDEC_LAT_WP_TBLE_DONE 842
+#define CMDQ_EVENT_VDEC_LAT_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 843
+#define CMDQ_EVENT_VDEC_LAT_GCE_CNT_OP_THRESHOLD 847
+
+#define CMDQ_EVENT_VDEC_LAT1_LINE_COUNT_THRESHOLD_INTERRUPT 848
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_INT 849
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_PAUSE 850
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_DEC_ERROR 851
+#define CMDQ_EVENT_VDEC_LAT1_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 852
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_FRAME_DONE 853
+#define CMDQ_EVENT_VDEC_LAT1_INI_FETCH_RDY 854
+#define CMDQ_EVENT_VDEC_LAT1_PROCESS_FLAG 855
+#define CMDQ_EVENT_VDEC_LAT1_SEARCH_START_CODE_DONE 856
+#define CMDQ_EVENT_VDEC_LAT1_REF_REORDER_DONE 857
+#define CMDQ_EVENT_VDEC_LAT1_WP_TBLE_DONE 858
+#define CMDQ_EVENT_VDEC_LAT1_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 859
+#define CMDQ_EVENT_VDEC_LAT1_GCE_CNT_OP_THRESHOLD 863
+
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_0 864
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_1 865
+
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_8 872
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_9 873
+
+#define CMDQ_EVENT_VDEC_CORE_LINE_COUNT_THRESHOLD_INTERRUPT 896
+#define CMDQ_EVENT_VDEC_CORE_VDEC_INT 897
+#define CMDQ_EVENT_VDEC_CORE_VDEC_PAUSE 898
+#define CMDQ_EVENT_VDEC_CORE_VDEC_DEC_ERROR 899
+#define CMDQ_EVENT_VDEC_CORE_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 900
+#define CMDQ_EVENT_VDEC_CORE_VDEC_FRAME_DONE 901
+#define CMDQ_EVENT_VDEC_CORE_INI_FETCH_RDY 902
+#define CMDQ_EVENT_VDEC_CORE_PROCESS_FLAG 903
+#define CMDQ_EVENT_VDEC_CORE_SEARCH_START_CODE_DONE 904
+#define CMDQ_EVENT_VDEC_CORE_REF_REORDER_DONE 905
+#define CMDQ_EVENT_VDEC_CORE_WP_TBLE_DONE 906
+#define CMDQ_EVENT_VDEC_CORE_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 907
+#define CMDQ_EVENT_VDEC_CORE_GCE_CNT_OP_THRESHOLD 911
+
+#define CMDQ_EVENT_VDEC_CORE1_LINE_COUNT_THRESHOLD_INTERRUPT 912
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_INT 913
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_PAUSE 914
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_DEC_ERROR 915
+#define CMDQ_EVENT_VDEC_CORE1_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 916
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_FRAME_DONE 917
+#define CMDQ_EVENT_VDEC_CORE1_INI_FETCH_RDY 918
+#define CMDQ_EVENT_VDEC_CORE1_PROCESS_FLAG 919
+#define CMDQ_EVENT_VDEC_CORE1_SEARCH_START_CODE_DONE 920
+#define CMDQ_EVENT_VDEC_CORE1_REF_REORDER_DONE 921
+#define CMDQ_EVENT_VDEC_CORE1_WP_TBLE_DONE 922
+#define CMDQ_EVENT_VDEC_CORE1_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 923
+#define CMDQ_EVENT_VDEC_CORE1_CNT_OP_THRESHOLD 927
+
+#define CMDQ_EVENT_VENC_TOP_FRAME_DONE 929
+#define CMDQ_EVENT_VENC_TOP_PAUSE_DONE 930
+#define CMDQ_EVENT_VENC_TOP_JPGENC_DONE 931
+#define CMDQ_EVENT_VENC_TOP_MB_DONE 932
+#define CMDQ_EVENT_VENC_TOP_128BYTE_DONE 933
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_DONE 934
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_C1_DONE 935
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_INSUFF_DONE 936
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_C1_INSUFF_DONE 937
+#define CMDQ_EVENT_VENC_TOP_WP_2ND_STAGE_DONE 938
+#define CMDQ_EVENT_VENC_TOP_WP_3RD_STAGE_DONE 939
+#define CMDQ_EVENT_VENC_TOP_PPS_HEADER_DONE 940
+#define CMDQ_EVENT_VENC_TOP_SPS_HEADER_DONE 941
+#define CMDQ_EVENT_VENC_TOP_VPS_HEADER_DONE 942
+
+#define CMDQ_EVENT_VENC_CORE1_TOP_FRAME_DONE 945
+#define CMDQ_EVENT_VENC_CORE1_TOP_PAUSE_DONE 946
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGENC_DONE 947
+#define CMDQ_EVENT_VENC_CORE1_TOP_MB_DONE 948
+#define CMDQ_EVENT_VENC_CORE1_TOP_128BYTE_DONE 949
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_DONE 950
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_C1_DONE 951
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_INSUFF_DONE 952
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_C1_INSUFF_DONE 953
+#define CMDQ_EVENT_VENC_CORE1_TOP_WP_2ND_STAGE_DONE 954
+#define CMDQ_EVENT_VENC_CORE1_TOP_WP_3RD_STAGE_DONE 955
+#define CMDQ_EVENT_VENC_CORE1_TOP_PPS_HEADER_DONE 956
+#define CMDQ_EVENT_VENC_CORE1_TOP_SPS_HEADER_DONE 957
+#define CMDQ_EVENT_VENC_CORE1_TOP_VPS_HEADER_DONE 958
+
+#define CMDQ_EVENT_WPE_VPP0_WPE_GCE_FRAME_DONE 962
+#define CMDQ_EVENT_WPE_VPP0_WPE_DONE_SYNC_OUT 963
+
+#define CMDQ_EVENT_WPE_VPP1_WPE_GCE_FRAME_DONE 969
+#define CMDQ_EVENT_WPE_VPP1_WPE_DONE_SYNC_OUT 970
+
+#define CMDQ_EVENT_DP_TX_VBLANK_FALLING 994
+#define CMDQ_EVENT_DP_TX_VSC_FINISH 995
+
+#define CMDQ_EVENT_OUTPIN_0 1018
+#define CMDQ_EVENT_OUTPIN_1 1019
+
+/* end of hw event */
+#define CMDQ_MAX_HW_EVENT 1019
+
+#endif
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index c029467e828b..5566e58196a2 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -39,4 +39,7 @@
/* Bit 5 express pull down */
#define GPIO_PULL_DOWN 32
+/* Bit 6 express pull disable */
+#define GPIO_PULL_DISABLE 64
+
#endif
diff --git a/include/dt-bindings/gpio/meson-s4-gpio.h b/include/dt-bindings/gpio/meson-s4-gpio.h
new file mode 100644
index 000000000000..35aee21b94f1
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-s4-gpio.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_S4_GPIO_H
+#define _DT_BINDINGS_MESON_S4_GPIO_H
+
+#define GPIOB_0 0
+#define GPIOB_1 1
+#define GPIOB_2 2
+#define GPIOB_3 3
+#define GPIOB_4 4
+#define GPIOB_5 5
+#define GPIOB_6 6
+#define GPIOB_7 7
+#define GPIOB_8 8
+#define GPIOB_9 9
+#define GPIOB_10 10
+#define GPIOB_11 11
+#define GPIOB_12 12
+#define GPIOB_13 13
+
+#define GPIOC_0 14
+#define GPIOC_1 15
+#define GPIOC_2 16
+#define GPIOC_3 17
+#define GPIOC_4 18
+#define GPIOC_5 19
+#define GPIOC_6 20
+#define GPIOC_7 21
+
+#define GPIOE_0 22
+#define GPIOE_1 23
+
+#define GPIOD_0 24
+#define GPIOD_1 25
+#define GPIOD_2 26
+#define GPIOD_3 27
+#define GPIOD_4 28
+#define GPIOD_5 29
+#define GPIOD_6 30
+#define GPIOD_7 31
+#define GPIOD_8 32
+#define GPIOD_9 33
+#define GPIOD_10 34
+#define GPIOD_11 35
+
+#define GPIOH_0 36
+#define GPIOH_1 37
+#define GPIOH_2 38
+#define GPIOH_3 39
+#define GPIOH_4 40
+#define GPIOH_5 41
+#define GPIOH_6 42
+#define GPIOH_7 43
+#define GPIOH_8 44
+#define GPIOH_9 45
+#define GPIOH_10 46
+#define GPIOH_11 47
+
+#define GPIOX_0 48
+#define GPIOX_1 49
+#define GPIOX_2 50
+#define GPIOX_3 51
+#define GPIOX_4 52
+#define GPIOX_5 53
+#define GPIOX_6 54
+#define GPIOX_7 55
+#define GPIOX_8 56
+#define GPIOX_9 57
+#define GPIOX_10 58
+#define GPIOX_11 59
+#define GPIOX_12 60
+#define GPIOX_13 61
+#define GPIOX_14 62
+#define GPIOX_15 63
+#define GPIOX_16 64
+#define GPIOX_17 65
+#define GPIOX_18 66
+#define GPIOX_19 67
+
+#define GPIOZ_0 68
+#define GPIOZ_1 69
+#define GPIOZ_2 70
+#define GPIOZ_3 71
+#define GPIOZ_4 72
+#define GPIOZ_5 73
+#define GPIOZ_6 74
+#define GPIOZ_7 75
+#define GPIOZ_8 76
+#define GPIOZ_9 77
+#define GPIOZ_10 78
+#define GPIOZ_11 79
+#define GPIOZ_12 80
+
+#define GPIO_TEST_N 81
+#endif /* _DT_BINDINGS_MESON_S4_GPIO_H */
diff --git a/include/dt-bindings/gpio/msc313-gpio.h b/include/dt-bindings/gpio/msc313-gpio.h
new file mode 100644
index 000000000000..5458c6580a02
--- /dev/null
+++ b/include/dt-bindings/gpio/msc313-gpio.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * GPIO definitions for MStar/SigmaStar MSC313 and later SoCs
+ *
+ * Copyright (C) 2020 Daniel Palmer <daniel@thingy.jp>
+ */
+
+#ifndef _DT_BINDINGS_MSC313_GPIO_H
+#define _DT_BINDINGS_MSC313_GPIO_H
+
+#define MSC313_GPIO_FUART 0
+#define MSC313_GPIO_FUART_RX (MSC313_GPIO_FUART + 0)
+#define MSC313_GPIO_FUART_TX (MSC313_GPIO_FUART + 1)
+#define MSC313_GPIO_FUART_CTS (MSC313_GPIO_FUART + 2)
+#define MSC313_GPIO_FUART_RTS (MSC313_GPIO_FUART + 3)
+
+#define MSC313_GPIO_SR (MSC313_GPIO_FUART_RTS + 1)
+#define MSC313_GPIO_SR_IO2 (MSC313_GPIO_SR + 0)
+#define MSC313_GPIO_SR_IO3 (MSC313_GPIO_SR + 1)
+#define MSC313_GPIO_SR_IO4 (MSC313_GPIO_SR + 2)
+#define MSC313_GPIO_SR_IO5 (MSC313_GPIO_SR + 3)
+#define MSC313_GPIO_SR_IO6 (MSC313_GPIO_SR + 4)
+#define MSC313_GPIO_SR_IO7 (MSC313_GPIO_SR + 5)
+#define MSC313_GPIO_SR_IO8 (MSC313_GPIO_SR + 6)
+#define MSC313_GPIO_SR_IO9 (MSC313_GPIO_SR + 7)
+#define MSC313_GPIO_SR_IO10 (MSC313_GPIO_SR + 8)
+#define MSC313_GPIO_SR_IO11 (MSC313_GPIO_SR + 9)
+#define MSC313_GPIO_SR_IO12 (MSC313_GPIO_SR + 10)
+#define MSC313_GPIO_SR_IO13 (MSC313_GPIO_SR + 11)
+#define MSC313_GPIO_SR_IO14 (MSC313_GPIO_SR + 12)
+#define MSC313_GPIO_SR_IO15 (MSC313_GPIO_SR + 13)
+#define MSC313_GPIO_SR_IO16 (MSC313_GPIO_SR + 14)
+#define MSC313_GPIO_SR_IO17 (MSC313_GPIO_SR + 15)
+
+#define MSC313_GPIO_SD (MSC313_GPIO_SR_IO17 + 1)
+#define MSC313_GPIO_SD_CLK (MSC313_GPIO_SD + 0)
+#define MSC313_GPIO_SD_CMD (MSC313_GPIO_SD + 1)
+#define MSC313_GPIO_SD_D0 (MSC313_GPIO_SD + 2)
+#define MSC313_GPIO_SD_D1 (MSC313_GPIO_SD + 3)
+#define MSC313_GPIO_SD_D2 (MSC313_GPIO_SD + 4)
+#define MSC313_GPIO_SD_D3 (MSC313_GPIO_SD + 5)
+
+#define MSC313_GPIO_I2C1 (MSC313_GPIO_SD_D3 + 1)
+#define MSC313_GPIO_I2C1_SCL (MSC313_GPIO_I2C1 + 0)
+#define MSC313_GPIO_I2C1_SDA (MSC313_GPIO_I2C1 + 1)
+
+#define MSC313_GPIO_SPI0 (MSC313_GPIO_I2C1_SDA + 1)
+#define MSC313_GPIO_SPI0_CZ (MSC313_GPIO_SPI0 + 0)
+#define MSC313_GPIO_SPI0_CK (MSC313_GPIO_SPI0 + 1)
+#define MSC313_GPIO_SPI0_DI (MSC313_GPIO_SPI0 + 2)
+#define MSC313_GPIO_SPI0_DO (MSC313_GPIO_SPI0 + 3)
+
+/* SSD20x */
+#define SSD20XD_GPIO_FUART 0
+#define SSD20XD_GPIO_FUART_RX (SSD20XD_GPIO_FUART + 0)
+#define SSD20XD_GPIO_FUART_TX (SSD20XD_GPIO_FUART + 1)
+#define SSD20XD_GPIO_FUART_CTS (SSD20XD_GPIO_FUART + 2)
+#define SSD20XD_GPIO_FUART_RTS (SSD20XD_GPIO_FUART + 3)
+
+#define SSD20XD_GPIO_SD (SSD20XD_GPIO_FUART_RTS + 1)
+#define SSD20XD_GPIO_SD_CLK (SSD20XD_GPIO_SD + 0)
+#define SSD20XD_GPIO_SD_CMD (SSD20XD_GPIO_SD + 1)
+#define SSD20XD_GPIO_SD_D0 (SSD20XD_GPIO_SD + 2)
+#define SSD20XD_GPIO_SD_D1 (SSD20XD_GPIO_SD + 3)
+#define SSD20XD_GPIO_SD_D2 (SSD20XD_GPIO_SD + 4)
+#define SSD20XD_GPIO_SD_D3 (SSD20XD_GPIO_SD + 5)
+
+#define SSD20XD_GPIO_UART0 (SSD20XD_GPIO_SD_D3 + 1)
+#define SSD20XD_GPIO_UART0_RX (SSD20XD_GPIO_UART0 + 0)
+#define SSD20XD_GPIO_UART0_TX (SSD20XD_GPIO_UART0 + 1)
+
+#define SSD20XD_GPIO_UART1 (SSD20XD_GPIO_UART0_TX + 1)
+#define SSD20XD_GPIO_UART1_RX (SSD20XD_GPIO_UART1 + 0)
+#define SSD20XD_GPIO_UART1_TX (SSD20XD_GPIO_UART1 + 1)
+
+#define SSD20XD_GPIO_TTL (SSD20XD_GPIO_UART1_TX + 1)
+#define SSD20XD_GPIO_TTL0 (SSD20XD_GPIO_TTL + 0)
+#define SSD20XD_GPIO_TTL1 (SSD20XD_GPIO_TTL + 1)
+#define SSD20XD_GPIO_TTL2 (SSD20XD_GPIO_TTL + 2)
+#define SSD20XD_GPIO_TTL3 (SSD20XD_GPIO_TTL + 3)
+#define SSD20XD_GPIO_TTL4 (SSD20XD_GPIO_TTL + 4)
+#define SSD20XD_GPIO_TTL5 (SSD20XD_GPIO_TTL + 5)
+#define SSD20XD_GPIO_TTL6 (SSD20XD_GPIO_TTL + 6)
+#define SSD20XD_GPIO_TTL7 (SSD20XD_GPIO_TTL + 7)
+#define SSD20XD_GPIO_TTL8 (SSD20XD_GPIO_TTL + 8)
+#define SSD20XD_GPIO_TTL9 (SSD20XD_GPIO_TTL + 9)
+#define SSD20XD_GPIO_TTL10 (SSD20XD_GPIO_TTL + 10)
+#define SSD20XD_GPIO_TTL11 (SSD20XD_GPIO_TTL + 11)
+#define SSD20XD_GPIO_TTL12 (SSD20XD_GPIO_TTL + 12)
+#define SSD20XD_GPIO_TTL13 (SSD20XD_GPIO_TTL + 13)
+#define SSD20XD_GPIO_TTL14 (SSD20XD_GPIO_TTL + 14)
+#define SSD20XD_GPIO_TTL15 (SSD20XD_GPIO_TTL + 15)
+#define SSD20XD_GPIO_TTL16 (SSD20XD_GPIO_TTL + 16)
+#define SSD20XD_GPIO_TTL17 (SSD20XD_GPIO_TTL + 17)
+#define SSD20XD_GPIO_TTL18 (SSD20XD_GPIO_TTL + 18)
+#define SSD20XD_GPIO_TTL19 (SSD20XD_GPIO_TTL + 19)
+#define SSD20XD_GPIO_TTL20 (SSD20XD_GPIO_TTL + 20)
+#define SSD20XD_GPIO_TTL21 (SSD20XD_GPIO_TTL + 21)
+#define SSD20XD_GPIO_TTL22 (SSD20XD_GPIO_TTL + 22)
+#define SSD20XD_GPIO_TTL23 (SSD20XD_GPIO_TTL + 23)
+#define SSD20XD_GPIO_TTL24 (SSD20XD_GPIO_TTL + 24)
+#define SSD20XD_GPIO_TTL25 (SSD20XD_GPIO_TTL + 25)
+#define SSD20XD_GPIO_TTL26 (SSD20XD_GPIO_TTL + 26)
+#define SSD20XD_GPIO_TTL27 (SSD20XD_GPIO_TTL + 27)
+
+#define SSD20XD_GPIO_GPIO (SSD20XD_GPIO_TTL27 + 1)
+#define SSD20XD_GPIO_GPIO0 (SSD20XD_GPIO_GPIO + 0)
+#define SSD20XD_GPIO_GPIO1 (SSD20XD_GPIO_GPIO + 1)
+#define SSD20XD_GPIO_GPIO2 (SSD20XD_GPIO_GPIO + 2)
+#define SSD20XD_GPIO_GPIO3 (SSD20XD_GPIO_GPIO + 3)
+#define SSD20XD_GPIO_GPIO4 (SSD20XD_GPIO_GPIO + 4)
+#define SSD20XD_GPIO_GPIO5 (SSD20XD_GPIO_GPIO + 5)
+#define SSD20XD_GPIO_GPIO6 (SSD20XD_GPIO_GPIO + 6)
+#define SSD20XD_GPIO_GPIO7 (SSD20XD_GPIO_GPIO + 7)
+#define SSD20XD_GPIO_GPIO10 (SSD20XD_GPIO_GPIO + 8)
+#define SSD20XD_GPIO_GPIO11 (SSD20XD_GPIO_GPIO + 9)
+#define SSD20XD_GPIO_GPIO12 (SSD20XD_GPIO_GPIO + 10)
+#define SSD20XD_GPIO_GPIO13 (SSD20XD_GPIO_GPIO + 11)
+#define SSD20XD_GPIO_GPIO14 (SSD20XD_GPIO_GPIO + 12)
+#define SSD20XD_GPIO_GPIO85 (SSD20XD_GPIO_GPIO + 13)
+#define SSD20XD_GPIO_GPIO86 (SSD20XD_GPIO_GPIO + 14)
+#define SSD20XD_GPIO_GPIO90 (SSD20XD_GPIO_GPIO + 15)
+
+#endif /* _DT_BINDINGS_MSC313_GPIO_H */
diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h
index 0782b05e2775..af0d9583be70 100644
--- a/include/dt-bindings/gpio/tegra186-gpio.h
+++ b/include/dt-bindings/gpio/tegra186-gpio.h
@@ -8,8 +8,8 @@
* The second cell contains standard flag values specified in gpio.h.
*/
-#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H
-#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H
+#ifndef _DT_BINDINGS_GPIO_TEGRA186_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA186_GPIO_H
#include <dt-bindings/gpio/gpio.h>
diff --git a/include/dt-bindings/gpio/tegra234-gpio.h b/include/dt-bindings/gpio/tegra234-gpio.h
new file mode 100644
index 000000000000..d7a1f2e298e8
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra234-gpio.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for binding nvidia,tegra234-gpio*.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA234_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA234_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA234_MAIN_GPIO_PORT_A 0
+#define TEGRA234_MAIN_GPIO_PORT_B 1
+#define TEGRA234_MAIN_GPIO_PORT_C 2
+#define TEGRA234_MAIN_GPIO_PORT_D 3
+#define TEGRA234_MAIN_GPIO_PORT_E 4
+#define TEGRA234_MAIN_GPIO_PORT_F 5
+#define TEGRA234_MAIN_GPIO_PORT_G 6
+#define TEGRA234_MAIN_GPIO_PORT_H 7
+#define TEGRA234_MAIN_GPIO_PORT_I 8
+#define TEGRA234_MAIN_GPIO_PORT_J 9
+#define TEGRA234_MAIN_GPIO_PORT_K 10
+#define TEGRA234_MAIN_GPIO_PORT_L 11
+#define TEGRA234_MAIN_GPIO_PORT_M 12
+#define TEGRA234_MAIN_GPIO_PORT_N 13
+#define TEGRA234_MAIN_GPIO_PORT_P 14
+#define TEGRA234_MAIN_GPIO_PORT_Q 15
+#define TEGRA234_MAIN_GPIO_PORT_R 16
+#define TEGRA234_MAIN_GPIO_PORT_S 17
+#define TEGRA234_MAIN_GPIO_PORT_T 18
+#define TEGRA234_MAIN_GPIO_PORT_U 19
+#define TEGRA234_MAIN_GPIO_PORT_V 20
+#define TEGRA234_MAIN_GPIO_PORT_X 21
+#define TEGRA234_MAIN_GPIO_PORT_Y 22
+#define TEGRA234_MAIN_GPIO_PORT_Z 23
+#define TEGRA234_MAIN_GPIO_PORT_AC 24
+#define TEGRA234_MAIN_GPIO_PORT_AD 25
+#define TEGRA234_MAIN_GPIO_PORT_AE 26
+#define TEGRA234_MAIN_GPIO_PORT_AF 27
+#define TEGRA234_MAIN_GPIO_PORT_AG 28
+
+#define TEGRA234_MAIN_GPIO(port, offset) \
+ ((TEGRA234_MAIN_GPIO_PORT_##port * 8) + offset)
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA234_AON_GPIO_PORT_AA 0
+#define TEGRA234_AON_GPIO_PORT_BB 1
+#define TEGRA234_AON_GPIO_PORT_CC 2
+#define TEGRA234_AON_GPIO_PORT_DD 3
+#define TEGRA234_AON_GPIO_PORT_EE 4
+#define TEGRA234_AON_GPIO_PORT_GG 5
+
+#define TEGRA234_AON_GPIO(port, offset) \
+ ((TEGRA234_AON_GPIO_PORT_##port * 8) + offset)
+
+#endif
diff --git a/include/dt-bindings/gpio/tegra241-gpio.h b/include/dt-bindings/gpio/tegra241-gpio.h
new file mode 100644
index 000000000000..80cee3016be6
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra241-gpio.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for the nvidia,tegra241-gpio DT binding.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA241_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA241_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA241_MAIN_GPIO_PORT_A 0
+#define TEGRA241_MAIN_GPIO_PORT_B 1
+#define TEGRA241_MAIN_GPIO_PORT_C 2
+#define TEGRA241_MAIN_GPIO_PORT_D 3
+#define TEGRA241_MAIN_GPIO_PORT_E 4
+#define TEGRA241_MAIN_GPIO_PORT_F 5
+#define TEGRA241_MAIN_GPIO_PORT_G 6
+#define TEGRA241_MAIN_GPIO_PORT_H 7
+#define TEGRA241_MAIN_GPIO_PORT_I 8
+#define TEGRA241_MAIN_GPIO_PORT_J 9
+#define TEGRA241_MAIN_GPIO_PORT_K 10
+#define TEGRA241_MAIN_GPIO_PORT_L 11
+
+#define TEGRA241_MAIN_GPIO(port, offset) \
+ ((TEGRA241_MAIN_GPIO_PORT_##port * 8) + (offset))
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA241_AON_GPIO_PORT_AA 0
+#define TEGRA241_AON_GPIO_PORT_BB 1
+
+#define TEGRA241_AON_GPIO(port, offset) \
+ ((TEGRA241_AON_GPIO_PORT_##port * 8) + (offset))
+
+#endif
diff --git a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
index 70f99dbdbb42..866d36530583 100644
--- a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
+++ b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
@@ -13,4 +13,7 @@
/* pressure channel index */
#define AT91_SAMA5D2_ADC_P_CHANNEL 26
+/* SAMA7G5 Temperature sensor channel index. */
+#define AT91_SAMA7G5_ADC_TEMP_CHANNEL 31
+
#endif
diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h
index 42f871ab3272..a6ccc031635b 100644
--- a/include/dt-bindings/iio/adc/ingenic,adc.h
+++ b/include/dt-bindings/iio/adc/ingenic,adc.h
@@ -7,5 +7,12 @@
#define INGENIC_ADC_AUX 0
#define INGENIC_ADC_BATTERY 1
#define INGENIC_ADC_AUX2 2
+#define INGENIC_ADC_TOUCH_XP 3
+#define INGENIC_ADC_TOUCH_YP 4
+#define INGENIC_ADC_TOUCH_XN 5
+#define INGENIC_ADC_TOUCH_YN 6
+#define INGENIC_ADC_TOUCH_XD 7
+#define INGENIC_ADC_TOUCH_YD 8
+#define INGENIC_ADC_AUX0 9
#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h b/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h
new file mode 100644
index 000000000000..6ee725547763
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_MEDIATEK_MT6370_ADC_H__
+#define __DT_BINDINGS_MEDIATEK_MT6370_ADC_H__
+
+/* ADC Channel Index */
+#define MT6370_CHAN_VBUSDIV5 0
+#define MT6370_CHAN_VBUSDIV2 1
+#define MT6370_CHAN_VSYS 2
+#define MT6370_CHAN_VBAT 3
+#define MT6370_CHAN_TS_BAT 4
+#define MT6370_CHAN_IBUS 5
+#define MT6370_CHAN_IBAT 6
+#define MT6370_CHAN_CHG_VDDP 7
+#define MT6370_CHAN_TEMP_JC 8
+#define MT6370_CHAN_MAX 9
+
+#endif
diff --git a/include/dt-bindings/iio/addac/adi,ad74413r.h b/include/dt-bindings/iio/addac/adi,ad74413r.h
new file mode 100644
index 000000000000..204f92bbd79f
--- /dev/null
+++ b/include/dt-bindings/iio/addac/adi,ad74413r.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_ADI_AD74413R_H
+#define _DT_BINDINGS_ADI_AD74413R_H
+
+#define CH_FUNC_HIGH_IMPEDANCE 0x0
+#define CH_FUNC_VOLTAGE_OUTPUT 0x1
+#define CH_FUNC_CURRENT_OUTPUT 0x2
+#define CH_FUNC_VOLTAGE_INPUT 0x3
+#define CH_FUNC_CURRENT_INPUT_EXT_POWER 0x4
+#define CH_FUNC_CURRENT_INPUT_LOOP_POWER 0x5
+#define CH_FUNC_RESISTANCE_INPUT 0x6
+#define CH_FUNC_DIGITAL_INPUT_LOGIC 0x7
+#define CH_FUNC_DIGITAL_INPUT_LOOP_POWER 0x8
+#define CH_FUNC_CURRENT_INPUT_EXT_POWER_HART 0x9
+#define CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART 0xA
+
+#define CH_FUNC_MIN CH_FUNC_HIGH_IMPEDANCE
+#define CH_FUNC_MAX CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART
+
+#endif /* _DT_BINDINGS_ADI_AD74413R_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
new file mode 100644
index 000000000000..9426f27a1946
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
+
+#ifndef PM8350_SID
+#define PM8350_SID 1
+#endif
+
+/* ADC channels for PM8350_ADC for PMIC7 */
+#define PM8350_ADC7_REF_GND (PM8350_SID << 8 | 0x0)
+#define PM8350_ADC7_1P25VREF (PM8350_SID << 8 | 0x01)
+#define PM8350_ADC7_VREF_VADC (PM8350_SID << 8 | 0x02)
+#define PM8350_ADC7_DIE_TEMP (PM8350_SID << 8 | 0x03)
+
+#define PM8350_ADC7_AMUX_THM1 (PM8350_SID << 8 | 0x04)
+#define PM8350_ADC7_AMUX_THM2 (PM8350_SID << 8 | 0x05)
+#define PM8350_ADC7_AMUX_THM3 (PM8350_SID << 8 | 0x06)
+#define PM8350_ADC7_AMUX_THM4 (PM8350_SID << 8 | 0x07)
+#define PM8350_ADC7_AMUX_THM5 (PM8350_SID << 8 | 0x08)
+#define PM8350_ADC7_GPIO1 (PM8350_SID << 8 | 0x0a)
+#define PM8350_ADC7_GPIO2 (PM8350_SID << 8 | 0x0b)
+#define PM8350_ADC7_GPIO3 (PM8350_SID << 8 | 0x0c)
+#define PM8350_ADC7_GPIO4 (PM8350_SID << 8 | 0x0d)
+
+/* 30k pull-up1 */
+#define PM8350_ADC7_AMUX_THM1_30K_PU (PM8350_SID << 8 | 0x24)
+#define PM8350_ADC7_AMUX_THM2_30K_PU (PM8350_SID << 8 | 0x25)
+#define PM8350_ADC7_AMUX_THM3_30K_PU (PM8350_SID << 8 | 0x26)
+#define PM8350_ADC7_AMUX_THM4_30K_PU (PM8350_SID << 8 | 0x27)
+#define PM8350_ADC7_AMUX_THM5_30K_PU (PM8350_SID << 8 | 0x28)
+#define PM8350_ADC7_GPIO1_30K_PU (PM8350_SID << 8 | 0x2a)
+#define PM8350_ADC7_GPIO2_30K_PU (PM8350_SID << 8 | 0x2b)
+#define PM8350_ADC7_GPIO3_30K_PU (PM8350_SID << 8 | 0x2c)
+#define PM8350_ADC7_GPIO4_30K_PU (PM8350_SID << 8 | 0x2d)
+
+/* 100k pull-up2 */
+#define PM8350_ADC7_AMUX_THM1_100K_PU (PM8350_SID << 8 | 0x44)
+#define PM8350_ADC7_AMUX_THM2_100K_PU (PM8350_SID << 8 | 0x45)
+#define PM8350_ADC7_AMUX_THM3_100K_PU (PM8350_SID << 8 | 0x46)
+#define PM8350_ADC7_AMUX_THM4_100K_PU (PM8350_SID << 8 | 0x47)
+#define PM8350_ADC7_AMUX_THM5_100K_PU (PM8350_SID << 8 | 0x48)
+#define PM8350_ADC7_GPIO1_100K_PU (PM8350_SID << 8 | 0x4a)
+#define PM8350_ADC7_GPIO2_100K_PU (PM8350_SID << 8 | 0x4b)
+#define PM8350_ADC7_GPIO3_100K_PU (PM8350_SID << 8 | 0x4c)
+#define PM8350_ADC7_GPIO4_100K_PU (PM8350_SID << 8 | 0x4d)
+
+/* 400k pull-up3 */
+#define PM8350_ADC7_AMUX_THM1_400K_PU (PM8350_SID << 8 | 0x64)
+#define PM8350_ADC7_AMUX_THM2_400K_PU (PM8350_SID << 8 | 0x65)
+#define PM8350_ADC7_AMUX_THM3_400K_PU (PM8350_SID << 8 | 0x66)
+#define PM8350_ADC7_AMUX_THM4_400K_PU (PM8350_SID << 8 | 0x67)
+#define PM8350_ADC7_AMUX_THM5_400K_PU (PM8350_SID << 8 | 0x68)
+#define PM8350_ADC7_GPIO1_400K_PU (PM8350_SID << 8 | 0x6a)
+#define PM8350_ADC7_GPIO2_400K_PU (PM8350_SID << 8 | 0x6b)
+#define PM8350_ADC7_GPIO3_400K_PU (PM8350_SID << 8 | 0x6c)
+#define PM8350_ADC7_GPIO4_400K_PU (PM8350_SID << 8 | 0x6d)
+
+/* 1/3 Divider */
+#define PM8350_ADC7_GPIO4_DIV3 (PM8350_SID << 8 | 0x8d)
+
+#define PM8350_ADC7_VPH_PWR (PM8350_SID << 8 | 0x8e)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
new file mode 100644
index 000000000000..dc2497c27e16
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H
+
+#ifndef PM8350B_SID
+#define PM8350B_SID 3
+#endif
+
+/* ADC channels for PM8350B_ADC for PMIC7 */
+#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | 0x0)
+#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | 0x01)
+#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | 0x02)
+#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | 0x03)
+
+#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | 0x04)
+#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | 0x05)
+#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | 0x06)
+#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | 0x07)
+#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | 0x08)
+#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | 0x09)
+#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | 0x0a)
+#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | 0x0b)
+#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | 0x0c)
+#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | 0x0d)
+
+#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | 0x10)
+#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | 0x11)
+#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | 0x12)
+#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | 0x13)
+#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | 0x15)
+#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | 0x17)
+
+/* 30k pull-up1 */
+#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | 0x24)
+#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | 0x25)
+#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | 0x26)
+#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | 0x27)
+#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | 0x28)
+#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | 0x29)
+#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | 0x2a)
+#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | 0x2b)
+#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | 0x2c)
+#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | 0x2d)
+#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | 0x33)
+
+/* 100k pull-up2 */
+#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | 0x44)
+#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | 0x45)
+#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | 0x46)
+#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | 0x47)
+#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | 0x48)
+#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | 0x49)
+#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | 0x4a)
+#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | 0x4b)
+#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | 0x4c)
+#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | 0x4d)
+#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | 0x53)
+
+/* 400k pull-up3 */
+#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | 0x64)
+#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | 0x65)
+#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | 0x66)
+#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | 0x67)
+#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | 0x68)
+#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | 0x69)
+#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | 0x6a)
+#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | 0x6b)
+#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | 0x6c)
+#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | 0x6d)
+#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | 0x73)
+
+/* 1/3 Divider */
+#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | 0x8a)
+#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | 0x8b)
+#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | 0x8c)
+#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | 0x8d)
+
+#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | 0x8e)
+#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | 0x8f)
+
+#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | 0x94)
+#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | 0x96)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
new file mode 100644
index 000000000000..6c296870e95b
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H
+
+#ifndef PMK8350_SID
+#define PMK8350_SID 0
+#endif
+
+/* ADC channels for PMK8350_ADC for PMIC7 */
+#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | 0x0)
+#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | 0x01)
+#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | 0x02)
+#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | 0x03)
+
+#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | 0x04)
+#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | 0x05)
+#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | 0x06)
+#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | 0x07)
+#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | 0x08)
+
+/* 30k pull-up1 */
+#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | 0x24)
+#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | 0x25)
+#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | 0x26)
+#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | 0x27)
+#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | 0x28)
+
+/* 100k pull-up2 */
+#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | 0x44)
+#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | 0x45)
+#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | 0x46)
+#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | 0x47)
+#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | 0x48)
+
+/* 400k pull-up3 */
+#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | 0x64)
+#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | 0x65)
+#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | 0x66)
+#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | 0x67)
+#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | 0x68)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
new file mode 100644
index 000000000000..d6df1b19e5ff
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H
+
+#ifndef PMR735A_SID
+#define PMR735A_SID 4
+#endif
+
+/* ADC channels for PMR735A_ADC for PMIC7 */
+#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | 0x0)
+#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | 0x01)
+#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | 0x02)
+#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | 0x03)
+
+#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | 0x0a)
+#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | 0x0b)
+#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | 0x0c)
+
+/* 100k pull-up2 */
+#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | 0x4a)
+#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | 0x4b)
+#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | 0x4c)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
new file mode 100644
index 000000000000..8da0e7dab315
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H
+
+#ifndef PMR735B_SID
+#define PMR735B_SID 5
+#endif
+
+/* ADC channels for PMR735B_ADC for PMIC7 */
+#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | 0x0)
+#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | 0x01)
+#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | 0x02)
+#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | 0x03)
+
+#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | 0x0a)
+#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | 0x0b)
+#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | 0x0c)
+
+/* 100k pull-up2 */
+#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | 0x4a)
+#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | 0x4b)
+#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | 0x4c)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index 61d556db1542..08adfe25964c 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2014,2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014,2018,2020 The Linux Foundation. All rights reserved.
*/
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H
@@ -221,4 +221,80 @@
#define ADC5_MAX_CHANNEL 0xc0
+/* ADC channels for ADC for PMIC7 */
+
+#define ADC7_REF_GND 0x00
+#define ADC7_1P25VREF 0x01
+#define ADC7_VREF_VADC 0x02
+#define ADC7_DIE_TEMP 0x03
+
+#define ADC7_AMUX_THM1 0x04
+#define ADC7_AMUX_THM2 0x05
+#define ADC7_AMUX_THM3 0x06
+#define ADC7_AMUX_THM4 0x07
+#define ADC7_AMUX_THM5 0x08
+#define ADC7_AMUX_THM6 0x09
+#define ADC7_GPIO1 0x0a
+#define ADC7_GPIO2 0x0b
+#define ADC7_GPIO3 0x0c
+#define ADC7_GPIO4 0x0d
+
+#define ADC7_CHG_TEMP 0x10
+#define ADC7_USB_IN_V_16 0x11
+#define ADC7_VDC_16 0x12
+#define ADC7_CC1_ID 0x13
+#define ADC7_VREF_BAT_THERM 0x15
+#define ADC7_IIN_FB 0x17
+
+/* 30k pull-up1 */
+#define ADC7_AMUX_THM1_30K_PU 0x24
+#define ADC7_AMUX_THM2_30K_PU 0x25
+#define ADC7_AMUX_THM3_30K_PU 0x26
+#define ADC7_AMUX_THM4_30K_PU 0x27
+#define ADC7_AMUX_THM5_30K_PU 0x28
+#define ADC7_AMUX_THM6_30K_PU 0x29
+#define ADC7_GPIO1_30K_PU 0x2a
+#define ADC7_GPIO2_30K_PU 0x2b
+#define ADC7_GPIO3_30K_PU 0x2c
+#define ADC7_GPIO4_30K_PU 0x2d
+#define ADC7_CC1_ID_30K_PU 0x33
+
+/* 100k pull-up2 */
+#define ADC7_AMUX_THM1_100K_PU 0x44
+#define ADC7_AMUX_THM2_100K_PU 0x45
+#define ADC7_AMUX_THM3_100K_PU 0x46
+#define ADC7_AMUX_THM4_100K_PU 0x47
+#define ADC7_AMUX_THM5_100K_PU 0x48
+#define ADC7_AMUX_THM6_100K_PU 0x49
+#define ADC7_GPIO1_100K_PU 0x4a
+#define ADC7_GPIO2_100K_PU 0x4b
+#define ADC7_GPIO3_100K_PU 0x4c
+#define ADC7_GPIO4_100K_PU 0x4d
+#define ADC7_CC1_ID_100K_PU 0x53
+
+/* 400k pull-up3 */
+#define ADC7_AMUX_THM1_400K_PU 0x64
+#define ADC7_AMUX_THM2_400K_PU 0x65
+#define ADC7_AMUX_THM3_400K_PU 0x66
+#define ADC7_AMUX_THM4_400K_PU 0x67
+#define ADC7_AMUX_THM5_400K_PU 0x68
+#define ADC7_AMUX_THM6_400K_PU 0x69
+#define ADC7_GPIO1_400K_PU 0x6a
+#define ADC7_GPIO2_400K_PU 0x6b
+#define ADC7_GPIO3_400K_PU 0x6c
+#define ADC7_GPIO4_400K_PU 0x6d
+#define ADC7_CC1_ID_400K_PU 0x73
+
+/* 1/3 Divider */
+#define ADC7_GPIO1_DIV3 0x8a
+#define ADC7_GPIO2_DIV3 0x8b
+#define ADC7_GPIO3_DIV3 0x8c
+#define ADC7_GPIO4_DIV3 0x8d
+
+#define ADC7_VPH_PWR 0x8e
+#define ADC7_VBAT_SNS 0x8f
+
+#define ADC7_SBUx 0x94
+#define ADC7_VBAT_2S_MID 0x96
+
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */
diff --git a/include/dt-bindings/input/atmel-maxtouch.h b/include/dt-bindings/input/atmel-maxtouch.h
new file mode 100644
index 000000000000..7345ab32224d
--- /dev/null
+++ b/include/dt-bindings/input/atmel-maxtouch.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _DT_BINDINGS_ATMEL_MAXTOUCH_H
+#define _DT_BINDINGS_ATMEL_MAXTOUCH_H
+
+#define ATMEL_MXT_WAKEUP_NONE 0
+#define ATMEL_MXT_WAKEUP_I2C_SCL 1
+#define ATMEL_MXT_WAKEUP_GPIO 2
+
+#endif /* _DT_BINDINGS_ATMEL_MAXTOUCH_H */
diff --git a/include/dt-bindings/input/cros-ec-keyboard.h b/include/dt-bindings/input/cros-ec-keyboard.h
new file mode 100644
index 000000000000..f0ae03634a96
--- /dev/null
+++ b/include/dt-bindings/input/cros-ec-keyboard.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides the constants of the standard Chrome OS key matrix
+ * for cros-ec keyboard-controller bindings.
+ *
+ * Copyright (c) 2021 Google, Inc
+ */
+
+#ifndef _CROS_EC_KEYBOARD_H
+#define _CROS_EC_KEYBOARD_H
+
+#define CROS_STD_TOP_ROW_KEYMAP \
+ MATRIX_KEY(0x00, 0x02, KEY_F1) \
+ MATRIX_KEY(0x03, 0x02, KEY_F2) \
+ MATRIX_KEY(0x02, 0x02, KEY_F3) \
+ MATRIX_KEY(0x01, 0x02, KEY_F4) \
+ MATRIX_KEY(0x03, 0x04, KEY_F5) \
+ MATRIX_KEY(0x02, 0x04, KEY_F6) \
+ MATRIX_KEY(0x01, 0x04, KEY_F7) \
+ MATRIX_KEY(0x02, 0x09, KEY_F8) \
+ MATRIX_KEY(0x01, 0x09, KEY_F9) \
+ MATRIX_KEY(0x00, 0x04, KEY_F10)
+
+#define CROS_STD_MAIN_KEYMAP \
+ MATRIX_KEY(0x00, 0x01, KEY_LEFTMETA) \
+ MATRIX_KEY(0x00, 0x03, KEY_B) \
+ MATRIX_KEY(0x00, 0x05, KEY_RO) \
+ MATRIX_KEY(0x00, 0x06, KEY_N) \
+ MATRIX_KEY(0x00, 0x08, KEY_EQUAL) \
+ MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) \
+ MATRIX_KEY(0x01, 0x01, KEY_ESC) \
+ MATRIX_KEY(0x01, 0x03, KEY_G) \
+ MATRIX_KEY(0x01, 0x06, KEY_H) \
+ MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) \
+ MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) \
+ MATRIX_KEY(0x01, 0x0c, KEY_HENKAN) \
+ \
+ MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL) \
+ MATRIX_KEY(0x02, 0x01, KEY_TAB) \
+ MATRIX_KEY(0x02, 0x03, KEY_T) \
+ MATRIX_KEY(0x02, 0x05, KEY_RIGHTBRACE) \
+ MATRIX_KEY(0x02, 0x06, KEY_Y) \
+ MATRIX_KEY(0x02, 0x07, KEY_102ND) \
+ MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) \
+ MATRIX_KEY(0x02, 0x0a, KEY_YEN) \
+ \
+ MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA) \
+ MATRIX_KEY(0x03, 0x01, KEY_GRAVE) \
+ MATRIX_KEY(0x03, 0x03, KEY_5) \
+ MATRIX_KEY(0x03, 0x06, KEY_6) \
+ MATRIX_KEY(0x03, 0x08, KEY_MINUS) \
+ MATRIX_KEY(0x03, 0x09, KEY_SLEEP) \
+ MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) \
+ MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN) \
+ \
+ MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL) \
+ MATRIX_KEY(0x04, 0x01, KEY_A) \
+ MATRIX_KEY(0x04, 0x02, KEY_D) \
+ MATRIX_KEY(0x04, 0x03, KEY_F) \
+ MATRIX_KEY(0x04, 0x04, KEY_S) \
+ MATRIX_KEY(0x04, 0x05, KEY_K) \
+ MATRIX_KEY(0x04, 0x06, KEY_J) \
+ MATRIX_KEY(0x04, 0x08, KEY_SEMICOLON) \
+ MATRIX_KEY(0x04, 0x09, KEY_L) \
+ MATRIX_KEY(0x04, 0x0a, KEY_BACKSLASH) \
+ MATRIX_KEY(0x04, 0x0b, KEY_ENTER) \
+ \
+ MATRIX_KEY(0x05, 0x01, KEY_Z) \
+ MATRIX_KEY(0x05, 0x02, KEY_C) \
+ MATRIX_KEY(0x05, 0x03, KEY_V) \
+ MATRIX_KEY(0x05, 0x04, KEY_X) \
+ MATRIX_KEY(0x05, 0x05, KEY_COMMA) \
+ MATRIX_KEY(0x05, 0x06, KEY_M) \
+ MATRIX_KEY(0x05, 0x07, KEY_LEFTSHIFT) \
+ MATRIX_KEY(0x05, 0x08, KEY_SLASH) \
+ MATRIX_KEY(0x05, 0x09, KEY_DOT) \
+ MATRIX_KEY(0x05, 0x0b, KEY_SPACE) \
+ \
+ MATRIX_KEY(0x06, 0x01, KEY_1) \
+ MATRIX_KEY(0x06, 0x02, KEY_3) \
+ MATRIX_KEY(0x06, 0x03, KEY_4) \
+ MATRIX_KEY(0x06, 0x04, KEY_2) \
+ MATRIX_KEY(0x06, 0x05, KEY_8) \
+ MATRIX_KEY(0x06, 0x06, KEY_7) \
+ MATRIX_KEY(0x06, 0x08, KEY_0) \
+ MATRIX_KEY(0x06, 0x09, KEY_9) \
+ MATRIX_KEY(0x06, 0x0a, KEY_LEFTALT) \
+ MATRIX_KEY(0x06, 0x0b, KEY_DOWN) \
+ MATRIX_KEY(0x06, 0x0c, KEY_RIGHT) \
+ \
+ MATRIX_KEY(0x07, 0x01, KEY_Q) \
+ MATRIX_KEY(0x07, 0x02, KEY_E) \
+ MATRIX_KEY(0x07, 0x03, KEY_R) \
+ MATRIX_KEY(0x07, 0x04, KEY_W) \
+ MATRIX_KEY(0x07, 0x05, KEY_I) \
+ MATRIX_KEY(0x07, 0x06, KEY_U) \
+ MATRIX_KEY(0x07, 0x07, KEY_RIGHTSHIFT) \
+ MATRIX_KEY(0x07, 0x08, KEY_P) \
+ MATRIX_KEY(0x07, 0x09, KEY_O) \
+ MATRIX_KEY(0x07, 0x0b, KEY_UP) \
+ MATRIX_KEY(0x07, 0x0c, KEY_LEFT)
+
+#endif /* _CROS_EC_KEYBOARD_H */
diff --git a/include/dt-bindings/interconnect/fsl,imx8mp.h b/include/dt-bindings/interconnect/fsl,imx8mp.h
new file mode 100644
index 000000000000..7357d417529a
--- /dev/null
+++ b/include/dt-bindings/interconnect/fsl,imx8mp.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright 2022 NXP
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MP_H
+#define __DT_BINDINGS_INTERCONNECT_IMX8MP_H
+
+#define IMX8MP_ICN_NOC 0
+#define IMX8MP_ICN_MAIN 1
+#define IMX8MP_ICS_DRAM 2
+#define IMX8MP_ICS_OCRAM 3
+#define IMX8MP_ICM_A53 4
+#define IMX8MP_ICM_SUPERMIX 5
+#define IMX8MP_ICM_GIC 6
+#define IMX8MP_ICM_MLMIX 7
+
+#define IMX8MP_ICN_AUDIO 8
+#define IMX8MP_ICM_DSP 9
+#define IMX8MP_ICM_SDMA2PER 10
+#define IMX8MP_ICM_SDMA2BURST 11
+#define IMX8MP_ICM_SDMA3PER 12
+#define IMX8MP_ICM_SDMA3BURST 13
+#define IMX8MP_ICM_EDMA 14
+
+#define IMX8MP_ICN_GPU 15
+#define IMX8MP_ICM_GPU2D 16
+#define IMX8MP_ICM_GPU3D 17
+
+#define IMX8MP_ICN_HDMI 18
+#define IMX8MP_ICM_HRV 19
+#define IMX8MP_ICM_LCDIF_HDMI 20
+#define IMX8MP_ICM_HDCP 21
+
+#define IMX8MP_ICN_HSIO 22
+#define IMX8MP_ICM_NOC_PCIE 23
+#define IMX8MP_ICM_USB1 24
+#define IMX8MP_ICM_USB2 25
+#define IMX8MP_ICM_PCIE 26
+
+#define IMX8MP_ICN_MEDIA 27
+#define IMX8MP_ICM_LCDIF_RD 28
+#define IMX8MP_ICM_LCDIF_WR 29
+#define IMX8MP_ICM_ISI0 30
+#define IMX8MP_ICM_ISI1 31
+#define IMX8MP_ICM_ISI2 32
+#define IMX8MP_ICM_ISP0 33
+#define IMX8MP_ICM_ISP1 34
+#define IMX8MP_ICM_DWE 35
+
+#define IMX8MP_ICN_VIDEO 36
+#define IMX8MP_ICM_VPU_G1 37
+#define IMX8MP_ICM_VPU_G2 38
+#define IMX8MP_ICM_VPU_H1 39
+
+#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MP_H */
diff --git a/include/dt-bindings/interconnect/imx8mm.h b/include/dt-bindings/interconnect/imx8mm.h
new file mode 100644
index 000000000000..8f10bb06cb59
--- /dev/null
+++ b/include/dt-bindings/interconnect/imx8mm.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MM_H
+#define __DT_BINDINGS_INTERCONNECT_IMX8MM_H
+
+#define IMX8MM_ICN_NOC 1
+#define IMX8MM_ICS_DRAM 2
+#define IMX8MM_ICS_OCRAM 3
+#define IMX8MM_ICM_A53 4
+
+#define IMX8MM_ICM_VPU_H1 5
+#define IMX8MM_ICM_VPU_G1 6
+#define IMX8MM_ICM_VPU_G2 7
+#define IMX8MM_ICN_VIDEO 8
+
+#define IMX8MM_ICM_GPU2D 9
+#define IMX8MM_ICM_GPU3D 10
+#define IMX8MM_ICN_GPU 11
+
+#define IMX8MM_ICM_CSI 12
+#define IMX8MM_ICM_LCDIF 13
+#define IMX8MM_ICN_MIPI 14
+
+#define IMX8MM_ICM_USB1 15
+#define IMX8MM_ICM_USB2 16
+#define IMX8MM_ICM_PCIE 17
+#define IMX8MM_ICN_HSIO 18
+
+#define IMX8MM_ICM_SDMA2 19
+#define IMX8MM_ICM_SDMA3 20
+#define IMX8MM_ICN_AUDIO 21
+
+#define IMX8MM_ICN_ENET 22
+#define IMX8MM_ICM_ENET 23
+
+#define IMX8MM_ICN_MAIN 24
+#define IMX8MM_ICM_NAND 25
+#define IMX8MM_ICM_SDMA1 26
+#define IMX8MM_ICM_USDHC1 27
+#define IMX8MM_ICM_USDHC2 28
+#define IMX8MM_ICM_USDHC3 29
+
+#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MM_H */
diff --git a/include/dt-bindings/interconnect/imx8mn.h b/include/dt-bindings/interconnect/imx8mn.h
new file mode 100644
index 000000000000..307b977100b6
--- /dev/null
+++ b/include/dt-bindings/interconnect/imx8mn.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MN_H
+#define __DT_BINDINGS_INTERCONNECT_IMX8MN_H
+
+#define IMX8MN_ICN_NOC 1
+#define IMX8MN_ICS_DRAM 2
+#define IMX8MN_ICS_OCRAM 3
+#define IMX8MN_ICM_A53 4
+
+#define IMX8MN_ICM_GPU 5
+#define IMX8MN_ICN_GPU 6
+
+#define IMX8MN_ICM_CSI1 7
+#define IMX8MN_ICM_CSI2 8
+#define IMX8MN_ICM_ISI 9
+#define IMX8MN_ICM_LCDIF 10
+#define IMX8MN_ICN_MIPI 11
+
+#define IMX8MN_ICM_USB 12
+
+#define IMX8MN_ICM_SDMA2 13
+#define IMX8MN_ICM_SDMA3 14
+#define IMX8MN_ICN_AUDIO 15
+
+#define IMX8MN_ICN_ENET 16
+#define IMX8MN_ICM_ENET 17
+
+#define IMX8MN_ICM_NAND 18
+#define IMX8MN_ICM_SDMA1 19
+#define IMX8MN_ICM_USDHC1 20
+#define IMX8MN_ICM_USDHC2 21
+#define IMX8MN_ICM_USDHC3 22
+#define IMX8MN_ICN_MAIN 23
+
+#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MN_H */
diff --git a/include/dt-bindings/interconnect/imx8mq.h b/include/dt-bindings/interconnect/imx8mq.h
new file mode 100644
index 000000000000..1a4cae7f8be2
--- /dev/null
+++ b/include/dt-bindings/interconnect/imx8mq.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MQ_H
+#define __DT_BINDINGS_INTERCONNECT_IMX8MQ_H
+
+#define IMX8MQ_ICN_NOC 1
+#define IMX8MQ_ICS_DRAM 2
+#define IMX8MQ_ICS_OCRAM 3
+#define IMX8MQ_ICM_A53 4
+
+#define IMX8MQ_ICM_VPU 5
+#define IMX8MQ_ICN_VIDEO 6
+
+#define IMX8MQ_ICM_GPU 7
+#define IMX8MQ_ICN_GPU 8
+
+#define IMX8MQ_ICM_DCSS 9
+#define IMX8MQ_ICN_DCSS 10
+
+#define IMX8MQ_ICM_USB1 11
+#define IMX8MQ_ICM_USB2 12
+#define IMX8MQ_ICN_USB 13
+
+#define IMX8MQ_ICM_CSI1 14
+#define IMX8MQ_ICM_CSI2 15
+#define IMX8MQ_ICM_LCDIF 16
+#define IMX8MQ_ICN_DISPLAY 17
+
+#define IMX8MQ_ICM_SDMA2 18
+#define IMX8MQ_ICN_AUDIO 19
+
+#define IMX8MQ_ICN_ENET 20
+#define IMX8MQ_ICM_ENET 21
+
+#define IMX8MQ_ICM_SDMA1 22
+#define IMX8MQ_ICM_NAND 23
+#define IMX8MQ_ICM_USDHC1 24
+#define IMX8MQ_ICM_USDHC2 25
+#define IMX8MQ_ICM_PCIE1 26
+#define IMX8MQ_ICM_PCIE2 27
+#define IMX8MQ_ICN_MAIN 28
+
+#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MQ_H */
diff --git a/include/dt-bindings/interconnect/qcom,icc.h b/include/dt-bindings/interconnect/qcom,icc.h
new file mode 100644
index 000000000000..cd34f36daaaa
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,icc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_ICC_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_ICC_H
+
+/*
+ * The AMC bucket denotes constraints that are applied to hardware when
+ * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
+ * when the execution environment transitions between active and low power mode.
+ */
+#define QCOM_ICC_BUCKET_AMC 0
+#define QCOM_ICC_BUCKET_WAKE 1
+#define QCOM_ICC_BUCKET_SLEEP 2
+#define QCOM_ICC_NUM_BUCKETS 3
+
+#define QCOM_ICC_TAG_AMC (1 << QCOM_ICC_BUCKET_AMC)
+#define QCOM_ICC_TAG_WAKE (1 << QCOM_ICC_BUCKET_WAKE)
+#define QCOM_ICC_TAG_SLEEP (1 << QCOM_ICC_BUCKET_SLEEP)
+#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
+#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
+ QCOM_ICC_TAG_SLEEP)
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8939.h b/include/dt-bindings/interconnect/qcom,msm8939.h
new file mode 100644
index 000000000000..c22369a4b9f5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8939.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm interconnect IDs
+ *
+ * Copyright (c) 2020, Linaro Ltd.
+ * Author: Jun Nie <jun.nie@linaro.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8939_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8939_H
+
+#define BIMC_SNOC_SLV 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QDSS_ETR 2
+#define MASTER_SNOC_CFG 3
+#define PCNOC_SNOC_SLV 4
+#define SLAVE_APSS 5
+#define SLAVE_CATS_128 6
+#define SLAVE_OCMEM_64 7
+#define SLAVE_IMEM 8
+#define SLAVE_QDSS_STM 9
+#define SLAVE_SRVC_SNOC 10
+#define SNOC_BIMC_0_MAS 11
+#define SNOC_BIMC_1_MAS 12
+#define SNOC_BIMC_2_MAS 13
+#define SNOC_INT_0 14
+#define SNOC_INT_1 15
+#define SNOC_INT_BIMC 16
+#define SNOC_PCNOC_MAS 17
+#define SNOC_QDSS_INT 18
+
+#define MASTER_VIDEO_P0 0
+#define MASTER_JPEG 1
+#define MASTER_VFE 2
+#define MASTER_MDP_PORT0 3
+#define MASTER_MDP_PORT1 4
+#define MASTER_CPP 5
+#define SNOC_MM_INT_0 6
+#define SNOC_MM_INT_1 7
+#define SNOC_MM_INT_2 8
+
+#define BIMC_SNOC_MAS 0
+#define MASTER_AMPSS_M0 1
+#define MASTER_GRAPHICS_3D 2
+#define MASTER_TCU0 3
+#define SLAVE_AMPSS_L2 4
+#define SLAVE_EBI_CH0 5
+#define SNOC_BIMC_0_SLV 6
+#define SNOC_BIMC_1_SLV 7
+#define SNOC_BIMC_2_SLV 8
+
+#define MASTER_BLSP_1 0
+#define MASTER_DEHR 1
+#define MASTER_LPASS 2
+#define MASTER_CRYPTO_CORE0 3
+#define MASTER_SDCC_1 4
+#define MASTER_SDCC_2 5
+#define MASTER_SPDM 6
+#define MASTER_USB_HS1 7
+#define MASTER_USB_HS2 8
+#define PCNOC_INT_0 9
+#define PCNOC_INT_1 10
+#define PCNOC_MAS_0 11
+#define PCNOC_MAS_1 12
+#define PCNOC_SLV_0 13
+#define PCNOC_SLV_1 14
+#define PCNOC_SLV_2 15
+#define PCNOC_SLV_3 16
+#define PCNOC_SLV_4 17
+#define PCNOC_SLV_8 18
+#define PCNOC_SLV_9 19
+#define PCNOC_SNOC_MAS 20
+#define SLAVE_BIMC_CFG 21
+#define SLAVE_BLSP_1 22
+#define SLAVE_BOOT_ROM 23
+#define SLAVE_CAMERA_CFG 24
+#define SLAVE_CLK_CTL 25
+#define SLAVE_CRYPTO_0_CFG 26
+#define SLAVE_DEHR_CFG 27
+#define SLAVE_DISPLAY_CFG 28
+#define SLAVE_GRAPHICS_3D_CFG 29
+#define SLAVE_IMEM_CFG 30
+#define SLAVE_LPASS 31
+#define SLAVE_MPM 32
+#define SLAVE_MSG_RAM 33
+#define SLAVE_MSS 34
+#define SLAVE_PDM 35
+#define SLAVE_PMIC_ARB 36
+#define SLAVE_PCNOC_CFG 37
+#define SLAVE_PRNG 38
+#define SLAVE_QDSS_CFG 39
+#define SLAVE_RBCPR_CFG 40
+#define SLAVE_SDCC_1 41
+#define SLAVE_SDCC_2 42
+#define SLAVE_SECURITY 43
+#define SLAVE_SNOC_CFG 44
+#define SLAVE_SPDM 45
+#define SLAVE_TCSR 46
+#define SLAVE_TLMM 47
+#define SLAVE_USB_HS1 48
+#define SLAVE_USB_HS2 49
+#define SLAVE_VENUS_CFG 50
+#define SNOC_PCNOC_SLV 51
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8996.h b/include/dt-bindings/interconnect/qcom,msm8996.h
new file mode 100644
index 000000000000..a0b7c0ec7bed
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8996.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Qualcomm MSM8996 interconnect IDs
+ *
+ * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_H
+
+/* A0NOC */
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+
+/* A1NOC */
+#define MASTER_CNOC_A1NOC 0
+#define MASTER_CRYPTO_CORE0 1
+#define MASTER_PNOC_A1NOC 2
+
+/* A2NOC */
+#define MASTER_USB3 0
+#define MASTER_IPA 1
+#define MASTER_UFS 2
+
+/* BIMC */
+#define MASTER_AMPSS_M0 0
+#define MASTER_GRAPHICS_3D 1
+#define MASTER_MNOC_BIMC 2
+#define MASTER_SNOC_BIMC 3
+#define SLAVE_EBI_CH0 4
+#define SLAVE_HMSS_L3 5
+#define SLAVE_BIMC_SNOC_0 6
+#define SLAVE_BIMC_SNOC_1 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_CNOC_A1NOC 2
+#define SLAVE_CLK_CTL 3
+#define SLAVE_TCSR 4
+#define SLAVE_TLMM 5
+#define SLAVE_CRYPTO_0_CFG 6
+#define SLAVE_MPM 7
+#define SLAVE_PIMEM_CFG 8
+#define SLAVE_IMEM_CFG 9
+#define SLAVE_MESSAGE_RAM 10
+#define SLAVE_BIMC_CFG 11
+#define SLAVE_PMIC_ARB 12
+#define SLAVE_PRNG 13
+#define SLAVE_DCC_CFG 14
+#define SLAVE_RBCPR_MX 15
+#define SLAVE_QDSS_CFG 16
+#define SLAVE_RBCPR_CX 17
+#define SLAVE_QDSS_RBCPR_APU 18
+#define SLAVE_CNOC_MNOC_CFG 19
+#define SLAVE_SNOC_CFG 20
+#define SLAVE_SNOC_MPU_CFG 21
+#define SLAVE_EBI1_PHY_CFG 22
+#define SLAVE_A0NOC_CFG 23
+#define SLAVE_PCIE_1_CFG 24
+#define SLAVE_PCIE_2_CFG 25
+#define SLAVE_PCIE_0_CFG 26
+#define SLAVE_PCIE20_AHB2PHY 27
+#define SLAVE_A0NOC_MPU_CFG 28
+#define SLAVE_UFS_CFG 29
+#define SLAVE_A1NOC_CFG 30
+#define SLAVE_A1NOC_MPU_CFG 31
+#define SLAVE_A2NOC_CFG 32
+#define SLAVE_A2NOC_MPU_CFG 33
+#define SLAVE_SSC_CFG 34
+#define SLAVE_A0NOC_SMMU_CFG 35
+#define SLAVE_A1NOC_SMMU_CFG 36
+#define SLAVE_A2NOC_SMMU_CFG 37
+#define SLAVE_LPASS_SMMU_CFG 38
+#define SLAVE_CNOC_MNOC_MMSS_CFG 39
+
+/* MNOC */
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CPP 1
+#define MASTER_JPEG 2
+#define MASTER_MDP_PORT0 3
+#define MASTER_MDP_PORT1 4
+#define MASTER_ROTATOR 5
+#define MASTER_VIDEO_P0 6
+#define MASTER_VFE 7
+#define MASTER_SNOC_VMEM 8
+#define MASTER_VIDEO_P0_OCMEM 9
+#define MASTER_CNOC_MNOC_MMSS_CFG 10
+#define SLAVE_MNOC_BIMC 11
+#define SLAVE_VMEM 12
+#define SLAVE_SERVICE_MNOC 13
+#define SLAVE_MMAGIC_CFG 14
+#define SLAVE_CPR_CFG 15
+#define SLAVE_MISC_CFG 16
+#define SLAVE_VENUS_THROTTLE_CFG 17
+#define SLAVE_VENUS_CFG 18
+#define SLAVE_VMEM_CFG 19
+#define SLAVE_DSA_CFG 20
+#define SLAVE_MMSS_CLK_CFG 21
+#define SLAVE_DSA_MPU_CFG 22
+#define SLAVE_MNOC_MPU_CFG 23
+#define SLAVE_DISPLAY_CFG 24
+#define SLAVE_DISPLAY_THROTTLE_CFG 25
+#define SLAVE_CAMERA_CFG 26
+#define SLAVE_CAMERA_THROTTLE_CFG 27
+#define SLAVE_GRAPHICS_3D_CFG 28
+#define SLAVE_SMMU_MDP_CFG 29
+#define SLAVE_SMMU_ROT_CFG 30
+#define SLAVE_SMMU_VENUS_CFG 31
+#define SLAVE_SMMU_CPP_CFG 32
+#define SLAVE_SMMU_JPEG_CFG 33
+#define SLAVE_SMMU_VFE_CFG 34
+
+/* PNOC */
+#define MASTER_SNOC_PNOC 0
+#define MASTER_SDCC_1 1
+#define MASTER_SDCC_2 2
+#define MASTER_SDCC_4 3
+#define MASTER_USB_HS 4
+#define MASTER_BLSP_1 5
+#define MASTER_BLSP_2 6
+#define MASTER_TSIF 7
+#define SLAVE_PNOC_A1NOC 8
+#define SLAVE_USB_HS 9
+#define SLAVE_SDCC_2 10
+#define SLAVE_SDCC_4 11
+#define SLAVE_TSIF 12
+#define SLAVE_BLSP_2 13
+#define SLAVE_SDCC_1 14
+#define SLAVE_BLSP_1 15
+#define SLAVE_PDM 16
+#define SLAVE_AHB2PHY 17
+
+/* SNOC */
+#define MASTER_HMSS 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_BIMC_SNOC_0 3
+#define MASTER_BIMC_SNOC_1 4
+#define MASTER_A0NOC_SNOC 5
+#define MASTER_A1NOC_SNOC 6
+#define MASTER_A2NOC_SNOC 7
+#define MASTER_QDSS_ETR 8
+#define SLAVE_A0NOC_SNOC 9
+#define SLAVE_A1NOC_SNOC 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_HMSS 12
+#define SLAVE_LPASS 13
+#define SLAVE_USB3 14
+#define SLAVE_SNOC_BIMC 15
+#define SLAVE_SNOC_CNOC 16
+#define SLAVE_IMEM 17
+#define SLAVE_PIMEM 18
+#define SLAVE_SNOC_VMEM 19
+#define SLAVE_SNOC_PNOC 20
+#define SLAVE_QDSS_STM 21
+#define SLAVE_PCIE_0 22
+#define SLAVE_PCIE_1 23
+#define SLAVE_PCIE_2 24
+#define SLAVE_SERVICE_SNOC 25
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,osm-l3.h b/include/dt-bindings/interconnect/qcom,osm-l3.h
new file mode 100644
index 000000000000..61ef649ae565
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,osm-l3.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_OSM_L3_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_OSM_L3_H
+
+#define MASTER_OSM_L3_APPS 0
+#define SLAVE_OSM_L3 1
+
+#define MASTER_EPSS_L3_APPS 0
+#define SLAVE_EPSS_L3_SHARED 1
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,qcm2290.h b/include/dt-bindings/interconnect/qcom,qcm2290.h
new file mode 100644
index 000000000000..6cbbb7fe0bd3
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qcm2290.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* QCM2290 interconnect IDs */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCM2290_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QCM2290_H
+
+/* BIMC */
+#define MASTER_APPSS_PROC 0
+#define MASTER_SNOC_BIMC_RT 1
+#define MASTER_SNOC_BIMC_NRT 2
+#define MASTER_SNOC_BIMC 3
+#define MASTER_TCU_0 4
+#define MASTER_GFX3D 5
+#define SLAVE_EBI1 6
+#define SLAVE_BIMC_SNOC 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_BIMC_CFG 2
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 3
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_DISPLAY_CFG 8
+#define SLAVE_DISPLAY_THROTTLE_CFG 9
+#define SLAVE_GPU_CFG 10
+#define SLAVE_HWKM 11
+#define SLAVE_IMEM_CFG 12
+#define SLAVE_IPA_CFG 13
+#define SLAVE_LPASS 14
+#define SLAVE_MESSAGE_RAM 15
+#define SLAVE_PDM 16
+#define SLAVE_PIMEM_CFG 17
+#define SLAVE_PKA_WRAPPER 18
+#define SLAVE_PMIC_ARB 19
+#define SLAVE_PRNG 20
+#define SLAVE_QDSS_CFG 21
+#define SLAVE_QM_CFG 22
+#define SLAVE_QM_MPU_CFG 23
+#define SLAVE_QPIC 24
+#define SLAVE_QUP_0 25
+#define SLAVE_SDCC_1 26
+#define SLAVE_SDCC_2 27
+#define SLAVE_SNOC_CFG 28
+#define SLAVE_TCSR 29
+#define SLAVE_USB3 30
+#define SLAVE_VENUS_CFG 31
+#define SLAVE_VENUS_THROTTLE_CFG 32
+#define SLAVE_VSENSE_CTRL_CFG 33
+#define SLAVE_SERVICE_CNOC 34
+
+/* SNOC */
+#define MASTER_CRYPTO_CORE0 0
+#define MASTER_SNOC_CFG 1
+#define MASTER_TIC 2
+#define MASTER_ANOC_SNOC 3
+#define MASTER_BIMC_SNOC 4
+#define MASTER_PIMEM 5
+#define MASTER_QDSS_BAM 6
+#define MASTER_QUP_0 7
+#define MASTER_IPA 8
+#define MASTER_QDSS_ETR 9
+#define MASTER_SDCC_1 10
+#define MASTER_SDCC_2 11
+#define MASTER_QPIC 12
+#define MASTER_USB3_0 13
+#define SLAVE_APPSS 14
+#define SLAVE_SNOC_CNOC 15
+#define SLAVE_IMEM 16
+#define SLAVE_PIMEM 17
+#define SLAVE_SNOC_BIMC 18
+#define SLAVE_SERVICE_SNOC 19
+#define SLAVE_QDSS_STM 20
+#define SLAVE_TCU 21
+#define SLAVE_ANOC_SNOC 22
+
+/* QUP Virtual */
+#define MASTER_QUP_CORE_0 0
+#define SLAVE_QUP_CORE_0 1
+
+/* MMNRT Virtual */
+#define MASTER_CAMNOC_SF 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define SLAVE_SNOC_BIMC_NRT 3
+
+/* MMRT Virtual */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP0 1
+#define SLAVE_SNOC_BIMC_RT 2
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc7180.h b/include/dt-bindings/interconnect/qcom,sc7180.h
new file mode 100644
index 000000000000..f9970f6032eb
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc7180.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SC7180 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC7180_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC7180_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QSPI 1
+#define MASTER_QUP_0 2
+#define MASTER_SDCC_2 3
+#define MASTER_EMMC 4
+#define MASTER_UFS_MEM 5
+#define SLAVE_A1NOC_SNOC 6
+#define SLAVE_SERVICE_A1NOC 7
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QUP_1 2
+#define MASTER_USB3 3
+#define MASTER_CRYPTO 4
+#define MASTER_IPA 5
+#define MASTER_QDSS_ETR 6
+#define SLAVE_A2NOC_SNOC 7
+#define SLAVE_SERVICE_A2NOC 8
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_HF1_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define SLAVE_CAMNOC_UNCOMP 3
+
+#define MASTER_NPU 0
+#define MASTER_NPU_PROC 1
+#define SLAVE_CDSP_GEM_NOC 2
+
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AHB2PHY_SOUTH 4
+#define SLAVE_AHB2PHY_CENTER 5
+#define SLAVE_AOP 6
+#define SLAVE_AOSS 7
+#define SLAVE_BOOT_ROM 8
+#define SLAVE_CAMERA_CFG 9
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 10
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 11
+#define SLAVE_CLK_CTL 12
+#define SLAVE_RBCPR_CX_CFG 13
+#define SLAVE_RBCPR_MX_CFG 14
+#define SLAVE_CRYPTO_0_CFG 15
+#define SLAVE_DCC_CFG 16
+#define SLAVE_CNOC_DDRSS 17
+#define SLAVE_DISPLAY_CFG 18
+#define SLAVE_DISPLAY_RT_THROTTLE_CFG 19
+#define SLAVE_DISPLAY_THROTTLE_CFG 20
+#define SLAVE_EMMC_CFG 21
+#define SLAVE_GLM 22
+#define SLAVE_GFX3D_CFG 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_CNOC_MNOC_CFG 26
+#define SLAVE_CNOC_MSS 27
+#define SLAVE_NPU_CFG 28
+#define SLAVE_NPU_DMA_BWMON_CFG 29
+#define SLAVE_NPU_PROC_BWMON_CFG 30
+#define SLAVE_PDM 31
+#define SLAVE_PIMEM_CFG 32
+#define SLAVE_PRNG 33
+#define SLAVE_QDSS_CFG 34
+#define SLAVE_QM_CFG 35
+#define SLAVE_QM_MPU_CFG 36
+#define SLAVE_QSPI_0 37
+#define SLAVE_QUP_0 38
+#define SLAVE_QUP_1 39
+#define SLAVE_SDCC_2 40
+#define SLAVE_SECURITY 41
+#define SLAVE_SNOC_CFG 42
+#define SLAVE_TCSR 43
+#define SLAVE_TLMM_WEST 44
+#define SLAVE_TLMM_NORTH 45
+#define SLAVE_TLMM_SOUTH 46
+#define SLAVE_UFS_MEM_CFG 47
+#define SLAVE_USB3 48
+#define SLAVE_VENUS_CFG 49
+#define SLAVE_VENUS_THROTTLE_CFG 50
+#define SLAVE_VSENSE_CTRL_CFG 51
+#define SLAVE_SERVICE_CNOC 52
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_APPSS_PROC 0
+#define MASTER_SYS_TCU 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_MNOC_HF_MEM_NOC 4
+#define MASTER_MNOC_SF_MEM_NOC 5
+#define MASTER_SNOC_GC_MEM_NOC 6
+#define MASTER_SNOC_SF_MEM_NOC 7
+#define MASTER_GFX3D 8
+#define SLAVE_MSS_PROC_MS_MPU_CFG 9
+#define SLAVE_GEM_NOC_SNOC 10
+#define SLAVE_LLCC 11
+#define SLAVE_SERVICE_GEM_NOC 12
+
+#define MASTER_IPA_CORE 0
+#define SLAVE_IPA_CORE 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP0 4
+#define MASTER_ROTATOR 5
+#define MASTER_VIDEO_P0 6
+#define MASTER_VIDEO_PROC 7
+#define SLAVE_MNOC_HF_MEM_NOC 8
+#define SLAVE_MNOC_SF_MEM_NOC 9
+#define SLAVE_SERVICE_MNOC 10
+
+#define MASTER_NPU_SYS 0
+#define MASTER_NPU_NOC_CFG 1
+#define SLAVE_NPU_CAL_DP0 2
+#define SLAVE_NPU_CP 3
+#define SLAVE_NPU_INT_DMA_BWMON_CFG 4
+#define SLAVE_NPU_DPM 5
+#define SLAVE_ISENSE_CFG 6
+#define SLAVE_NPU_LLM_CFG 7
+#define SLAVE_NPU_TCM 8
+#define SLAVE_NPU_COMPUTE_NOC 9
+#define SLAVE_SERVICE_NPU_NOC 10
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_SNOC_CFG 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define SLAVE_APPSS 5
+#define SLAVE_SNOC_CNOC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_IMEM 9
+#define SLAVE_PIMEM 10
+#define SLAVE_SERVICE_SNOC 11
+#define SLAVE_QDSS_STM 12
+#define SLAVE_TCU 13
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc7280.h b/include/dt-bindings/interconnect/qcom,sc7280.h
new file mode 100644
index 000000000000..21b000443999
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc7280.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SC7280 interconnect IDs
+ *
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_A1NOC_CFG 3
+#define MASTER_PCIE_0 4
+#define MASTER_PCIE_1 5
+#define MASTER_SDCC_1 6
+#define MASTER_SDCC_2 7
+#define MASTER_SDCC_4 8
+#define MASTER_UFS_MEM 9
+#define MASTER_USB2 10
+#define MASTER_USB3_0 11
+#define SLAVE_A1NOC_SNOC 12
+#define SLAVE_ANOC_PCIE_GEM_NOC 13
+#define SLAVE_SERVICE_A1NOC 14
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_A2NOC_CFG 1
+#define MASTER_CNOC_A2NOC 2
+#define MASTER_CRYPTO 3
+#define MASTER_IPA 4
+#define MASTER_QDSS_ETR 5
+#define SLAVE_A2NOC_SNOC 6
+#define SLAVE_SERVICE_A2NOC 7
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_CNOC3_CNOC2 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_AHB2PHY_SOUTH 2
+#define SLAVE_AHB2PHY_NORTH 3
+#define SLAVE_CAMERA_CFG 4
+#define SLAVE_CLK_CTL 5
+#define SLAVE_CDSP_CFG 6
+#define SLAVE_RBCPR_CX_CFG 7
+#define SLAVE_RBCPR_MX_CFG 8
+#define SLAVE_CRYPTO_0_CFG 9
+#define SLAVE_CX_RDPM 10
+#define SLAVE_DCC_CFG 11
+#define SLAVE_DISPLAY_CFG 12
+#define SLAVE_GFX3D_CFG 13
+#define SLAVE_HWKM 14
+#define SLAVE_IMEM_CFG 15
+#define SLAVE_IPA_CFG 16
+#define SLAVE_IPC_ROUTER_CFG 17
+#define SLAVE_LPASS 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_RDPM 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_PIMEM_CFG 24
+#define SLAVE_PKA_WRAPPER_CFG 25
+#define SLAVE_PMU_WRAPPER_CFG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_0 29
+#define SLAVE_QUP_1 30
+#define SLAVE_SDCC_1 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SECURITY 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB2 38
+#define SLAVE_USB3_0 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_A1NOC_CFG 42
+#define SLAVE_A2NOC_CFG 43
+#define SLAVE_CNOC2_CNOC3 44
+#define SLAVE_CNOC_MNOC_CFG 45
+#define SLAVE_SNOC_CFG 46
+
+#define MASTER_CNOC2_CNOC3 0
+#define MASTER_GEM_NOC_CNOC 1
+#define MASTER_GEM_NOC_PCIE_SNOC 2
+#define SLAVE_AOSS 3
+#define SLAVE_APPSS 4
+#define SLAVE_CNOC3_CNOC2 5
+#define SLAVE_CNOC_A2NOC 6
+#define SLAVE_DDRSS_CFG 7
+#define SLAVE_BOOT_IMEM 8
+#define SLAVE_IMEM 9
+#define SLAVE_PIMEM 10
+#define SLAVE_PCIE_0 11
+#define SLAVE_PCIE_1 12
+#define SLAVE_QDSS_STM 13
+#define SLAVE_TCU 14
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_GEM_NOC_CFG 4
+#define MASTER_GFX3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_MSS_PROC_MS_MPU_CFG 11
+#define SLAVE_MCDMA_MS_MPU_CFG 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_MEM_NOC_PCIE_SNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define SLAVE_LPASS_CORE_CFG 1
+#define SLAVE_LPASS_LPI_CFG 2
+#define SLAVE_LPASS_MPU_CFG 3
+#define SLAVE_LPASS_TOP_CFG 4
+#define SLAVE_SERVICES_LPASS_AML_NOC 5
+#define SLAVE_SERVICE_LPASS_AG_NOC 6
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define MASTER_CAMNOC_HF 3
+#define MASTER_CAMNOC_ICP 4
+#define MASTER_CAMNOC_SF 5
+#define MASTER_MDP0 6
+#define SLAVE_MNOC_HF_MEM_NOC 7
+#define SLAVE_MNOC_SF_MEM_NOC 8
+#define SLAVE_SERVICE_MNOC 9
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_PIMEM 3
+#define MASTER_GIC 4
+#define SLAVE_SNOC_GEM_NOC_GC 5
+#define SLAVE_SNOC_GEM_NOC_SF 6
+#define SLAVE_SERVICE_SNOC 7
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc8180x.h b/include/dt-bindings/interconnect/qcom,sc8180x.h
new file mode 100644
index 000000000000..e84cfec5afdd
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc8180x.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SC8180x interconnect IDs
+ *
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_UFS_CARD 1
+#define MASTER_UFS_GEN4 2
+#define MASTER_UFS_MEM 3
+#define MASTER_USB3 4
+#define MASTER_USB3_1 5
+#define MASTER_USB3_2 6
+#define A1NOC_SNOC_SLV 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QSPI_0 2
+#define MASTER_QSPI_1 3
+#define MASTER_QUP_0 4
+#define MASTER_QUP_1 5
+#define MASTER_QUP_2 6
+#define MASTER_SENSORS_AHB 7
+#define MASTER_CRYPTO_CORE_0 8
+#define MASTER_IPA 9
+#define MASTER_EMAC 10
+#define MASTER_PCIE 11
+#define MASTER_PCIE_1 12
+#define MASTER_PCIE_2 13
+#define MASTER_PCIE_3 14
+#define MASTER_QDSS_ETR 15
+#define MASTER_SDCC_2 16
+#define MASTER_SDCC_4 17
+#define A2NOC_SNOC_SLV 18
+#define SLAVE_ANOC_PCIE_GEM_NOC 19
+#define SLAVE_SERVICE_A2NOC 20
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_HF1_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define SLAVE_CAMNOC_UNCOMP 3
+
+#define MASTER_NPU 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define SNOC_CNOC_MAS 0
+#define SLAVE_A1NOC_CFG 1
+#define SLAVE_A2NOC_CFG 2
+#define SLAVE_AHB2PHY_CENTER 3
+#define SLAVE_AHB2PHY_EAST 4
+#define SLAVE_AHB2PHY_WEST 5
+#define SLAVE_AHB2PHY_SOUTH 6
+#define SLAVE_AOP 7
+#define SLAVE_AOSS 8
+#define SLAVE_CAMERA_CFG 9
+#define SLAVE_CLK_CTL 10
+#define SLAVE_CDSP_CFG 11
+#define SLAVE_RBCPR_CX_CFG 12
+#define SLAVE_RBCPR_MMCX_CFG 13
+#define SLAVE_RBCPR_MX_CFG 14
+#define SLAVE_CRYPTO_0_CFG 15
+#define SLAVE_CNOC_DDRSS 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_EMAC_CFG 18
+#define SLAVE_GLM 19
+#define SLAVE_GRAPHICS_3D_CFG 20
+#define SLAVE_IMEM_CFG 21
+#define SLAVE_IPA_CFG 22
+#define SLAVE_CNOC_MNOC_CFG 23
+#define SLAVE_NPU_CFG 24
+#define SLAVE_PCIE_0_CFG 25
+#define SLAVE_PCIE_1_CFG 26
+#define SLAVE_PCIE_2_CFG 27
+#define SLAVE_PCIE_3_CFG 28
+#define SLAVE_PDM 29
+#define SLAVE_PIMEM_CFG 30
+#define SLAVE_PRNG 31
+#define SLAVE_QDSS_CFG 32
+#define SLAVE_QSPI_0 33
+#define SLAVE_QSPI_1 34
+#define SLAVE_QUP_1 35
+#define SLAVE_QUP_2 36
+#define SLAVE_QUP_0 37
+#define SLAVE_SDCC_2 38
+#define SLAVE_SDCC_4 39
+#define SLAVE_SECURITY 40
+#define SLAVE_SNOC_CFG 41
+#define SLAVE_SPSS_CFG 42
+#define SLAVE_TCSR 43
+#define SLAVE_TLMM_EAST 44
+#define SLAVE_TLMM_SOUTH 45
+#define SLAVE_TLMM_WEST 46
+#define SLAVE_TSIF 47
+#define SLAVE_UFS_CARD_CFG 48
+#define SLAVE_UFS_MEM_0_CFG 49
+#define SLAVE_UFS_MEM_1_CFG 50
+#define SLAVE_USB3 51
+#define SLAVE_USB3_1 52
+#define SLAVE_USB3_2 53
+#define SLAVE_VENUS_CFG 54
+#define SLAVE_VSENSE_CTRL_CFG 55
+#define SLAVE_SERVICE_CNOC 56
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_GPU_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_GEM_NOC_CFG 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_GRAPHICS_3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_GEM_NOC_PCIE_SNOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_ECC 11
+#define SLAVE_MSS_PROC_MS_MPU_CFG 12
+#define SLAVE_ECC 13
+#define SLAVE_GEM_NOC_SNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_SERVICE_GEM_NOC 16
+#define SLAVE_SERVICE_GEM_NOC_1 17
+
+#define MASTER_IPA_CORE 0
+#define SLAVE_IPA_CORE 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP_PORT0 4
+#define MASTER_MDP_PORT1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_PCIE_0 13
+#define SLAVE_PCIE_1 14
+#define SLAVE_PCIE_2 15
+#define SLAVE_PCIE_3 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_TCU 18
+
+#define MASTER_MNOC_HF_MEM_NOC_DISPLAY 0
+#define MASTER_MNOC_SF_MEM_NOC_DISPLAY 1
+#define SLAVE_LLCC_DISPLAY 2
+
+#define MASTER_LLCC_DISPLAY 0
+#define SLAVE_EBI_CH0_DISPLAY 1
+
+#define MASTER_MDP_PORT0_DISPLAY 0
+#define MASTER_MDP_PORT1_DISPLAY 1
+#define MASTER_ROTATOR_DISPLAY 2
+#define SLAVE_MNOC_SF_MEM_NOC_DISPLAY 3
+#define SLAVE_MNOC_HF_MEM_NOC_DISPLAY 4
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc8280xp.h b/include/dt-bindings/interconnect/qcom,sc8280xp.h
new file mode 100644
index 000000000000..a3e5fda7c127
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc8280xp.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+
+/* aggre1_noc */
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_2 2
+#define MASTER_A1NOC_CFG 3
+#define MASTER_IPA 4
+#define MASTER_EMAC_1 5
+#define MASTER_SDCC_4 6
+#define MASTER_UFS_MEM 7
+#define MASTER_USB3_0 8
+#define MASTER_USB3_1 9
+#define MASTER_USB3_MP 10
+#define MASTER_USB4_0 11
+#define MASTER_USB4_1 12
+#define SLAVE_A1NOC_SNOC 13
+#define SLAVE_USB_NOC_SNOC 14
+#define SLAVE_SERVICE_A1NOC 15
+
+/* aggre2_noc */
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_A2NOC_CFG 2
+#define MASTER_CRYPTO 3
+#define MASTER_SENSORS_PROC 4
+#define MASTER_SP 5
+#define MASTER_EMAC 6
+#define MASTER_PCIE_0 7
+#define MASTER_PCIE_1 8
+#define MASTER_PCIE_2A 9
+#define MASTER_PCIE_2B 10
+#define MASTER_PCIE_3A 11
+#define MASTER_PCIE_3B 12
+#define MASTER_PCIE_4 13
+#define MASTER_QDSS_ETR 14
+#define MASTER_SDCC_2 15
+#define MASTER_UFS_CARD 16
+#define SLAVE_A2NOC_SNOC 17
+#define SLAVE_ANOC_PCIE_GEM_NOC 18
+#define SLAVE_SERVICE_A2NOC 19
+
+/* clk_virt */
+#define MASTER_IPA_CORE 0
+#define MASTER_QUP_CORE_0 1
+#define MASTER_QUP_CORE_1 2
+#define MASTER_QUP_CORE_2 3
+#define SLAVE_IPA_CORE 4
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+
+/* config_noc */
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_0 2
+#define SLAVE_AHB2PHY_1 3
+#define SLAVE_AHB2PHY_2 4
+#define SLAVE_AOSS 5
+#define SLAVE_APPSS 6
+#define SLAVE_CAMERA_CFG 7
+#define SLAVE_CLK_CTL 8
+#define SLAVE_CDSP_CFG 9
+#define SLAVE_CDSP1_CFG 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_RBCPR_MMCX_CFG 12
+#define SLAVE_RBCPR_MX_CFG 13
+#define SLAVE_CPR_NSPCX 14
+#define SLAVE_CRYPTO_0_CFG 15
+#define SLAVE_CX_RDPM 16
+#define SLAVE_DCC_CFG 17
+#define SLAVE_DISPLAY_CFG 18
+#define SLAVE_DISPLAY1_CFG 19
+#define SLAVE_EMAC_CFG 20
+#define SLAVE_EMAC1_CFG 21
+#define SLAVE_GFX3D_CFG 22
+#define SLAVE_HWKM 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_IPC_ROUTER_CFG 26
+#define SLAVE_LPASS 27
+#define SLAVE_MX_RDPM 28
+#define SLAVE_MXC_RDPM 29
+#define SLAVE_PCIE_0_CFG 30
+#define SLAVE_PCIE_1_CFG 31
+#define SLAVE_PCIE_2A_CFG 32
+#define SLAVE_PCIE_2B_CFG 33
+#define SLAVE_PCIE_3A_CFG 34
+#define SLAVE_PCIE_3B_CFG 35
+#define SLAVE_PCIE_4_CFG 36
+#define SLAVE_PCIE_RSC_CFG 37
+#define SLAVE_PDM 38
+#define SLAVE_PIMEM_CFG 39
+#define SLAVE_PKA_WRAPPER_CFG 40
+#define SLAVE_PMU_WRAPPER_CFG 41
+#define SLAVE_QDSS_CFG 42
+#define SLAVE_QSPI_0 43
+#define SLAVE_QUP_0 44
+#define SLAVE_QUP_1 45
+#define SLAVE_QUP_2 46
+#define SLAVE_SDCC_2 47
+#define SLAVE_SDCC_4 48
+#define SLAVE_SECURITY 49
+#define SLAVE_SMMUV3_CFG 50
+#define SLAVE_SMSS_CFG 51
+#define SLAVE_SPSS_CFG 52
+#define SLAVE_TCSR 53
+#define SLAVE_TLMM 54
+#define SLAVE_UFS_CARD_CFG 55
+#define SLAVE_UFS_MEM_CFG 56
+#define SLAVE_USB3_0 57
+#define SLAVE_USB3_1 58
+#define SLAVE_USB3_MP 59
+#define SLAVE_USB4_0 60
+#define SLAVE_USB4_1 61
+#define SLAVE_VENUS_CFG 62
+#define SLAVE_VSENSE_CTRL_CFG 63
+#define SLAVE_VSENSE_CTRL_R_CFG 64
+#define SLAVE_A1NOC_CFG 65
+#define SLAVE_A2NOC_CFG 66
+#define SLAVE_ANOC_PCIE_BRIDGE_CFG 67
+#define SLAVE_DDRSS_CFG 68
+#define SLAVE_CNOC_MNOC_CFG 69
+#define SLAVE_SNOC_CFG 70
+#define SLAVE_SNOC_SF_BRIDGE_CFG 71
+#define SLAVE_IMEM 72
+#define SLAVE_PIMEM 73
+#define SLAVE_SERVICE_CNOC 74
+#define SLAVE_PCIE_0 75
+#define SLAVE_PCIE_1 76
+#define SLAVE_PCIE_2A 77
+#define SLAVE_PCIE_2B 78
+#define SLAVE_PCIE_3A 79
+#define SLAVE_PCIE_3B 80
+#define SLAVE_PCIE_4 81
+#define SLAVE_QDSS_STM 82
+#define SLAVE_SMSS 83
+#define SLAVE_TCU 84
+
+/* dc_noc */
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+/* gem_noc */
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_COMPUTE_NOC_1 5
+#define MASTER_GEM_NOC_CFG 6
+#define MASTER_GFX3D 7
+#define MASTER_MNOC_HF_MEM_NOC 8
+#define MASTER_MNOC_SF_MEM_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_GC_MEM_NOC 11
+#define MASTER_SNOC_SF_MEM_NOC 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_GEM_NOC_PCIE_CNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+
+/* lpass_ag_noc */
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+/* mc_virt */
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+/*mmss_noc */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP0 1
+#define MASTER_MDP1 2
+#define MASTER_MDP_CORE1_0 3
+#define MASTER_MDP_CORE1_1 4
+#define MASTER_CNOC_MNOC_CFG 5
+#define MASTER_ROTATOR 6
+#define MASTER_ROTATOR_1 7
+#define MASTER_VIDEO_P0 8
+#define MASTER_VIDEO_P1 9
+#define MASTER_VIDEO_PROC 10
+#define MASTER_CAMNOC_ICP 11
+#define MASTER_CAMNOC_SF 12
+#define SLAVE_MNOC_HF_MEM_NOC 13
+#define SLAVE_MNOC_SF_MEM_NOC 14
+#define SLAVE_SERVICE_MNOC 15
+
+/* nspa_noc */
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_NSP_XFR 3
+#define SLAVE_SERVICE_NSP_NOC 4
+
+/* nspb_noc */
+#define MASTER_CDSPB_NOC_CFG 0
+#define MASTER_CDSP_PROC_B 1
+#define SLAVE_CDSPB_MEM_NOC 2
+#define SLAVE_NSPB_XFR 3
+#define SLAVE_SERVICE_NSPB_NOC 4
+
+/* system_noc */
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_USB_NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdm660.h b/include/dt-bindings/interconnect/qcom,sdm660.h
new file mode 100644
index 000000000000..62e8d8670d5e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdm660.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* SDM660 interconnect IDs */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM660_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM660_H
+
+/* A2NOC */
+#define MASTER_IPA 0
+#define MASTER_CNOC_A2NOC 1
+#define MASTER_SDCC_1 2
+#define MASTER_SDCC_2 3
+#define MASTER_BLSP_1 4
+#define MASTER_BLSP_2 5
+#define MASTER_UFS 6
+#define MASTER_USB_HS 7
+#define MASTER_USB3 8
+#define MASTER_CRYPTO_C0 9
+#define SLAVE_A2NOC_SNOC 10
+
+/* BIMC */
+#define MASTER_GNOC_BIMC 0
+#define MASTER_OXILI 1
+#define MASTER_MNOC_BIMC 2
+#define MASTER_SNOC_BIMC 3
+#define MASTER_PIMEM 4
+#define SLAVE_EBI 5
+#define SLAVE_HMSS_L3 6
+#define SLAVE_BIMC_SNOC 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_CNOC_A2NOC 2
+#define SLAVE_MPM 3
+#define SLAVE_PMIC_ARB 4
+#define SLAVE_TLMM_NORTH 5
+#define SLAVE_TCSR 6
+#define SLAVE_PIMEM_CFG 7
+#define SLAVE_IMEM_CFG 8
+#define SLAVE_MESSAGE_RAM 9
+#define SLAVE_GLM 10
+#define SLAVE_BIMC_CFG 11
+#define SLAVE_PRNG 12
+#define SLAVE_SPDM 13
+#define SLAVE_QDSS_CFG 14
+#define SLAVE_CNOC_MNOC_CFG 15
+#define SLAVE_SNOC_CFG 16
+#define SLAVE_QM_CFG 17
+#define SLAVE_CLK_CTL 18
+#define SLAVE_MSS_CFG 19
+#define SLAVE_TLMM_SOUTH 20
+#define SLAVE_UFS_CFG 21
+#define SLAVE_A2NOC_CFG 22
+#define SLAVE_A2NOC_SMMU_CFG 23
+#define SLAVE_GPUSS_CFG 24
+#define SLAVE_AHB2PHY 25
+#define SLAVE_BLSP_1 26
+#define SLAVE_SDCC_1 27
+#define SLAVE_SDCC_2 28
+#define SLAVE_TLMM_CENTER 29
+#define SLAVE_BLSP_2 30
+#define SLAVE_PDM 31
+#define SLAVE_CNOC_MNOC_MMSS_CFG 32
+#define SLAVE_USB_HS 33
+#define SLAVE_USB3_0 34
+#define SLAVE_SRVC_CNOC 35
+
+/* GNOC */
+#define MASTER_APSS_PROC 0
+#define SLAVE_GNOC_BIMC 1
+#define SLAVE_GNOC_SNOC 2
+
+/* MNOC */
+#define MASTER_CPP 0
+#define MASTER_JPEG 1
+#define MASTER_MDP_P0 2
+#define MASTER_MDP_P1 3
+#define MASTER_VENUS 4
+#define MASTER_VFE 5
+#define SLAVE_MNOC_BIMC 6
+#define MASTER_CNOC_MNOC_MMSS_CFG 7
+#define MASTER_CNOC_MNOC_CFG 8
+#define SLAVE_CAMERA_CFG 9
+#define SLAVE_CAMERA_THROTTLE_CFG 10
+#define SLAVE_MISC_CFG 11
+#define SLAVE_VENUS_THROTTLE_CFG 12
+#define SLAVE_VENUS_CFG 13
+#define SLAVE_MMSS_CLK_XPU_CFG 14
+#define SLAVE_MMSS_CLK_CFG 15
+#define SLAVE_MNOC_MPU_CFG 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_CSI_PHY_CFG 18
+#define SLAVE_DISPLAY_THROTTLE_CFG 19
+#define SLAVE_SMMU_CFG 20
+#define SLAVE_SRVC_MNOC 21
+
+/* SNOC */
+#define MASTER_QDSS_ETR 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_BIMC_SNOC 3
+#define MASTER_A2NOC_SNOC 4
+#define MASTER_GNOC_SNOC 5
+#define SLAVE_HMSS 6
+#define SLAVE_LPASS 7
+#define SLAVE_WLAN 8
+#define SLAVE_CDSP 9
+#define SLAVE_IPA 10
+#define SLAVE_SNOC_BIMC 11
+#define SLAVE_SNOC_CNOC 12
+#define SLAVE_IMEM 13
+#define SLAVE_PIMEM 14
+#define SLAVE_QDSS_STM 15
+#define SLAVE_SRVC_SNOC 16
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h
index 7b2393be7361..67b500e24915 100644
--- a/include/dt-bindings/interconnect/qcom,sdm845.h
+++ b/include/dt-bindings/interconnect/qcom,sdm845.h
@@ -10,134 +10,141 @@
#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H
#define MASTER_A1NOC_CFG 0
-#define MASTER_BLSP_1 1
-#define MASTER_TSIF 2
-#define MASTER_SDCC_2 3
-#define MASTER_SDCC_4 4
-#define MASTER_UFS_CARD 5
-#define MASTER_UFS_MEM 6
-#define MASTER_PCIE_0 7
-#define MASTER_A2NOC_CFG 8
-#define MASTER_QDSS_BAM 9
-#define MASTER_BLSP_2 10
-#define MASTER_CNOC_A2NOC 11
-#define MASTER_CRYPTO 12
-#define MASTER_IPA 13
-#define MASTER_PCIE_1 14
-#define MASTER_QDSS_ETR 15
-#define MASTER_USB3_0 16
-#define MASTER_USB3_1 17
-#define MASTER_CAMNOC_HF0_UNCOMP 18
-#define MASTER_CAMNOC_HF1_UNCOMP 19
-#define MASTER_CAMNOC_SF_UNCOMP 20
-#define MASTER_SPDM 21
-#define MASTER_TIC 22
-#define MASTER_SNOC_CNOC 23
-#define MASTER_QDSS_DAP 24
-#define MASTER_CNOC_DC_NOC 25
-#define MASTER_APPSS_PROC 26
-#define MASTER_GNOC_CFG 27
-#define MASTER_LLCC 28
-#define MASTER_TCU_0 29
-#define MASTER_MEM_NOC_CFG 30
-#define MASTER_GNOC_MEM_NOC 31
-#define MASTER_MNOC_HF_MEM_NOC 32
-#define MASTER_MNOC_SF_MEM_NOC 33
-#define MASTER_SNOC_GC_MEM_NOC 34
-#define MASTER_SNOC_SF_MEM_NOC 35
-#define MASTER_GFX3D 36
-#define MASTER_CNOC_MNOC_CFG 37
-#define MASTER_CAMNOC_HF0 38
-#define MASTER_CAMNOC_HF1 39
-#define MASTER_CAMNOC_SF 40
-#define MASTER_MDP0 41
-#define MASTER_MDP1 42
-#define MASTER_ROTATOR 43
-#define MASTER_VIDEO_P0 44
-#define MASTER_VIDEO_P1 45
-#define MASTER_VIDEO_PROC 46
-#define MASTER_SNOC_CFG 47
-#define MASTER_A1NOC_SNOC 48
-#define MASTER_A2NOC_SNOC 49
-#define MASTER_GNOC_SNOC 50
-#define MASTER_MEM_NOC_SNOC 51
-#define MASTER_ANOC_PCIE_SNOC 52
-#define MASTER_PIMEM 53
-#define MASTER_GIC 54
-#define SLAVE_A1NOC_SNOC 55
-#define SLAVE_SERVICE_A1NOC 56
-#define SLAVE_ANOC_PCIE_A1NOC_SNOC 57
-#define SLAVE_A2NOC_SNOC 58
-#define SLAVE_ANOC_PCIE_SNOC 59
-#define SLAVE_SERVICE_A2NOC 60
-#define SLAVE_CAMNOC_UNCOMP 61
-#define SLAVE_A1NOC_CFG 62
-#define SLAVE_A2NOC_CFG 63
-#define SLAVE_AOP 64
-#define SLAVE_AOSS 65
-#define SLAVE_CAMERA_CFG 66
-#define SLAVE_CLK_CTL 67
-#define SLAVE_CDSP_CFG 68
-#define SLAVE_RBCPR_CX_CFG 69
-#define SLAVE_CRYPTO_0_CFG 70
-#define SLAVE_DCC_CFG 71
-#define SLAVE_CNOC_DDRSS 72
-#define SLAVE_DISPLAY_CFG 73
-#define SLAVE_GLM 74
-#define SLAVE_GFX3D_CFG 75
-#define SLAVE_IMEM_CFG 76
-#define SLAVE_IPA_CFG 77
-#define SLAVE_CNOC_MNOC_CFG 78
-#define SLAVE_PCIE_0_CFG 79
-#define SLAVE_PCIE_1_CFG 80
-#define SLAVE_PDM 81
-#define SLAVE_SOUTH_PHY_CFG 82
-#define SLAVE_PIMEM_CFG 83
-#define SLAVE_PRNG 84
-#define SLAVE_QDSS_CFG 85
-#define SLAVE_BLSP_2 86
-#define SLAVE_BLSP_1 87
-#define SLAVE_SDCC_2 88
-#define SLAVE_SDCC_4 89
-#define SLAVE_SNOC_CFG 90
-#define SLAVE_SPDM_WRAPPER 91
-#define SLAVE_SPSS_CFG 92
-#define SLAVE_TCSR 93
-#define SLAVE_TLMM_NORTH 94
-#define SLAVE_TLMM_SOUTH 95
-#define SLAVE_TSIF 96
-#define SLAVE_UFS_CARD_CFG 97
-#define SLAVE_UFS_MEM_CFG 98
-#define SLAVE_USB3_0 99
-#define SLAVE_USB3_1 100
-#define SLAVE_VENUS_CFG 101
-#define SLAVE_VSENSE_CTRL_CFG 102
-#define SLAVE_CNOC_A2NOC 103
-#define SLAVE_SERVICE_CNOC 104
-#define SLAVE_LLCC_CFG 105
-#define SLAVE_MEM_NOC_CFG 106
-#define SLAVE_GNOC_SNOC 107
-#define SLAVE_GNOC_MEM_NOC 108
-#define SLAVE_SERVICE_GNOC 109
-#define SLAVE_EBI1 110
-#define SLAVE_MSS_PROC_MS_MPU_CFG 111
-#define SLAVE_MEM_NOC_GNOC 112
-#define SLAVE_LLCC 113
-#define SLAVE_MEM_NOC_SNOC 114
-#define SLAVE_SERVICE_MEM_NOC 115
-#define SLAVE_MNOC_SF_MEM_NOC 116
-#define SLAVE_MNOC_HF_MEM_NOC 117
-#define SLAVE_SERVICE_MNOC 118
-#define SLAVE_APPSS 119
-#define SLAVE_SNOC_CNOC 120
-#define SLAVE_SNOC_MEM_NOC_GC 121
-#define SLAVE_SNOC_MEM_NOC_SF 122
-#define SLAVE_IMEM 123
-#define SLAVE_PCIE_0 124
-#define SLAVE_PCIE_1 125
-#define SLAVE_PIMEM 126
-#define SLAVE_SERVICE_SNOC 127
-#define SLAVE_QDSS_STM 128
-#define SLAVE_TCU 129
+#define MASTER_TSIF 1
+#define MASTER_SDCC_2 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_CARD 4
+#define MASTER_UFS_MEM 5
+#define MASTER_PCIE_0 6
+#define SLAVE_A1NOC_SNOC 7
+#define SLAVE_SERVICE_A1NOC 8
+#define SLAVE_ANOC_PCIE_A1NOC_SNOC 9
+#define MASTER_QUP_1 10
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_CNOC_A2NOC 2
+#define MASTER_CRYPTO 3
+#define MASTER_IPA 4
+#define MASTER_PCIE_1 5
+#define MASTER_QDSS_ETR 6
+#define MASTER_USB3_0 7
+#define MASTER_USB3_1 8
+#define SLAVE_A2NOC_SNOC 9
+#define SLAVE_ANOC_PCIE_SNOC 10
+#define SLAVE_SERVICE_A2NOC 11
+#define MASTER_QUP_2 12
+
+#define MASTER_SPDM 0
+#define MASTER_TIC 1
+#define MASTER_SNOC_CNOC 2
+#define MASTER_QDSS_DAP 3
+#define SLAVE_A1NOC_CFG 4
+#define SLAVE_A2NOC_CFG 5
+#define SLAVE_AOP 6
+#define SLAVE_AOSS 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CLK_CTL 9
+#define SLAVE_CDSP_CFG 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_DCC_CFG 13
+#define SLAVE_CNOC_DDRSS 14
+#define SLAVE_DISPLAY_CFG 15
+#define SLAVE_GLM 16
+#define SLAVE_GFX3D_CFG 17
+#define SLAVE_IMEM_CFG 18
+#define SLAVE_IPA_CFG 19
+#define SLAVE_CNOC_MNOC_CFG 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_SOUTH_PHY_CFG 24
+#define SLAVE_PIMEM_CFG 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_BLSP_2 28
+#define SLAVE_BLSP_1 29
+#define SLAVE_SDCC_2 30
+#define SLAVE_SDCC_4 31
+#define SLAVE_SNOC_CFG 32
+#define SLAVE_SPDM_WRAPPER 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM_NORTH 36
+#define SLAVE_TLMM_SOUTH 37
+#define SLAVE_TSIF 38
+#define SLAVE_UFS_CARD_CFG 39
+#define SLAVE_UFS_MEM_CFG 40
+#define SLAVE_USB3_0 41
+#define SLAVE_USB3_1 42
+#define SLAVE_VENUS_CFG 43
+#define SLAVE_VSENSE_CTRL_CFG 44
+#define SLAVE_CNOC_A2NOC 45
+#define SLAVE_SERVICE_CNOC 46
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_MEM_NOC_CFG 2
+
+#define MASTER_APPSS_PROC 0
+#define MASTER_GNOC_CFG 1
+#define SLAVE_GNOC_SNOC 2
+#define SLAVE_GNOC_MEM_NOC 3
+#define SLAVE_SERVICE_GNOC 4
+
+#define MASTER_TCU_0 0
+#define MASTER_MEM_NOC_CFG 1
+#define MASTER_GNOC_MEM_NOC 2
+#define MASTER_MNOC_HF_MEM_NOC 3
+#define MASTER_MNOC_SF_MEM_NOC 4
+#define MASTER_SNOC_GC_MEM_NOC 5
+#define MASTER_SNOC_SF_MEM_NOC 6
+#define MASTER_GFX3D 7
+#define SLAVE_MSS_PROC_MS_MPU_CFG 8
+#define SLAVE_MEM_NOC_GNOC 9
+#define SLAVE_LLCC 10
+#define SLAVE_MEM_NOC_SNOC 11
+#define SLAVE_SERVICE_MEM_NOC 12
+#define MASTER_LLCC 13
+#define SLAVE_EBI1 14
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP0 4
+#define MASTER_MDP1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+#define MASTER_CAMNOC_HF0_UNCOMP 13
+#define MASTER_CAMNOC_HF1_UNCOMP 14
+#define MASTER_CAMNOC_SF_UNCOMP 15
+#define SLAVE_CAMNOC_UNCOMP 16
+
+#define MASTER_SNOC_CFG 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_GNOC_SNOC 3
+#define MASTER_MEM_NOC_SNOC 4
+#define MASTER_ANOC_PCIE_SNOC 5
+#define MASTER_PIMEM 6
+#define MASTER_GIC 7
+#define SLAVE_APPSS 8
+#define SLAVE_SNOC_CNOC 9
+#define SLAVE_SNOC_MEM_NOC_GC 10
+#define SLAVE_SNOC_MEM_NOC_SF 11
+#define SLAVE_IMEM 12
+#define SLAVE_PCIE_0 13
+#define SLAVE_PCIE_1 14
+#define SLAVE_PIMEM 15
+#define SLAVE_SERVICE_SNOC 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_TCU 18
#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx55.h b/include/dt-bindings/interconnect/qcom,sdx55.h
new file mode 100644
index 000000000000..bfb6524a2d90
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx55.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SDX55 interconnect IDs
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX55_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX55_H
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_TCU_0 0
+#define MASTER_SNOC_GC_MEM_NOC 1
+#define MASTER_AMPSS_M0 2
+#define SLAVE_LLCC 3
+#define SLAVE_MEM_NOC_SNOC 4
+#define SLAVE_MEM_NOC_PCIE_SNOC 5
+
+#define MASTER_AUDIO 0
+#define MASTER_BLSP_1 1
+#define MASTER_QDSS_BAM 2
+#define MASTER_QPIC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_SPMI_FETCHER 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_IPA 7
+#define MASTER_MEM_NOC_SNOC 8
+#define MASTER_MEM_NOC_PCIE_SNOC 9
+#define MASTER_CRYPTO_CORE_0 10
+#define MASTER_EMAC 11
+#define MASTER_IPA_PCIE 12
+#define MASTER_PCIE 13
+#define MASTER_QDSS_ETR 14
+#define MASTER_SDCC_1 15
+#define MASTER_USB3 16
+#define SLAVE_AOP 17
+#define SLAVE_AOSS 18
+#define SLAVE_APPSS 19
+#define SLAVE_AUDIO 20
+#define SLAVE_BLSP_1 21
+#define SLAVE_CLK_CTL 22
+#define SLAVE_CRYPTO_0_CFG 23
+#define SLAVE_CNOC_DDRSS 24
+#define SLAVE_ECC_CFG 25
+#define SLAVE_EMAC_CFG 26
+#define SLAVE_IMEM_CFG 27
+#define SLAVE_IPA_CFG 28
+#define SLAVE_CNOC_MSS 29
+#define SLAVE_PCIE_PARF 30
+#define SLAVE_PDM 31
+#define SLAVE_PRNG 32
+#define SLAVE_QDSS_CFG 33
+#define SLAVE_QPIC 34
+#define SLAVE_SDCC_1 35
+#define SLAVE_SNOC_CFG 36
+#define SLAVE_SPMI_FETCHER 37
+#define SLAVE_SPMI_VGI_COEX 38
+#define SLAVE_TCSR 39
+#define SLAVE_TLMM 40
+#define SLAVE_USB3 41
+#define SLAVE_USB3_PHY_CFG 42
+#define SLAVE_ANOC_SNOC 43
+#define SLAVE_SNOC_MEM_NOC_GC 44
+#define SLAVE_OCIMEM 45
+#define SLAVE_SERVICE_SNOC 46
+#define SLAVE_PCIE_0 47
+#define SLAVE_QDSS_STM 48
+#define SLAVE_TCU 49
+
+#define MASTER_IPA_CORE 0
+#define SLAVE_IPA_CORE 1
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx65.h b/include/dt-bindings/interconnect/qcom,sdx65.h
new file mode 100644
index 000000000000..b25288aa7d74
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx65.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_TCU_0 0
+#define MASTER_SNOC_GC_MEM_NOC 1
+#define MASTER_APPSS_PROC 2
+#define SLAVE_LLCC 3
+#define SLAVE_MEM_NOC_SNOC 4
+#define SLAVE_MEM_NOC_PCIE_SNOC 5
+
+#define MASTER_AUDIO 0
+#define MASTER_BLSP_1 1
+#define MASTER_QDSS_BAM 2
+#define MASTER_QPIC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_SPMI_FETCHER 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_IPA 7
+#define MASTER_MEM_NOC_SNOC 8
+#define MASTER_MEM_NOC_PCIE_SNOC 9
+#define MASTER_CRYPTO 10
+#define MASTER_IPA_PCIE 11
+#define MASTER_PCIE_0 12
+#define MASTER_QDSS_ETR 13
+#define MASTER_SDCC_1 14
+#define MASTER_USB3 15
+#define SLAVE_AOSS 16
+#define SLAVE_APPSS 17
+#define SLAVE_AUDIO 18
+#define SLAVE_BLSP_1 19
+#define SLAVE_CLK_CTL 20
+#define SLAVE_CRYPTO_0_CFG 21
+#define SLAVE_CNOC_DDRSS 22
+#define SLAVE_ECC_CFG 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_CNOC_MSS 26
+#define SLAVE_PCIE_PARF 27
+#define SLAVE_PDM 28
+#define SLAVE_PRNG 29
+#define SLAVE_QDSS_CFG 30
+#define SLAVE_QPIC 31
+#define SLAVE_SDCC_1 32
+#define SLAVE_SNOC_CFG 33
+#define SLAVE_SPMI_FETCHER 34
+#define SLAVE_SPMI_VGI_COEX 35
+#define SLAVE_TCSR 36
+#define SLAVE_TLMM 37
+#define SLAVE_USB3 38
+#define SLAVE_USB3_PHY_CFG 39
+#define SLAVE_ANOC_SNOC 40
+#define SLAVE_SNOC_MEM_NOC_GC 41
+#define SLAVE_IMEM 42
+#define SLAVE_SERVICE_SNOC 43
+#define SLAVE_PCIE_0 44
+#define SLAVE_QDSS_STM 45
+#define SLAVE_TCU 46
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm6350.h b/include/dt-bindings/interconnect/qcom,sm6350.h
new file mode 100644
index 000000000000..e662cede9aaa
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm6350.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Qualcomm SM6350 interconnect IDs
+ *
+ * Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QUP_0 1
+#define MASTER_EMMC 2
+#define MASTER_UFS_MEM 3
+#define A1NOC_SNOC_SLV 4
+#define SLAVE_SERVICE_A1NOC 5
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QUP_1 2
+#define MASTER_CRYPTO_CORE_0 3
+#define MASTER_IPA 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_SDCC_2 6
+#define MASTER_USB3 7
+#define A2NOC_SNOC_SLV 8
+#define SLAVE_SERVICE_A2NOC 9
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_ICP_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define MASTER_QUP_CORE_0 3
+#define MASTER_QUP_CORE_1 4
+#define MASTER_LLCC 5
+#define SLAVE_CAMNOC_UNCOMP 6
+#define SLAVE_QUP_CORE_0 7
+#define SLAVE_QUP_CORE_1 8
+#define SLAVE_EBI_CH0 9
+
+#define MASTER_NPU 0
+#define MASTER_NPU_PROC 1
+#define SLAVE_CDSP_GEM_NOC 2
+
+#define SNOC_CNOC_MAS 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AHB2PHY 4
+#define SLAVE_AHB2PHY_2 5
+#define SLAVE_AOSS 6
+#define SLAVE_BOOT_ROM 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 9
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 10
+#define SLAVE_CLK_CTL 11
+#define SLAVE_RBCPR_CX_CFG 12
+#define SLAVE_RBCPR_MX_CFG 13
+#define SLAVE_CRYPTO_0_CFG 14
+#define SLAVE_DCC_CFG 15
+#define SLAVE_CNOC_DDRSS 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_DISPLAY_THROTTLE_CFG 18
+#define SLAVE_EMMC_CFG 19
+#define SLAVE_GLM 20
+#define SLAVE_GRAPHICS_3D_CFG 21
+#define SLAVE_IMEM_CFG 22
+#define SLAVE_IPA_CFG 23
+#define SLAVE_CNOC_MNOC_CFG 24
+#define SLAVE_CNOC_MSS 25
+#define SLAVE_NPU_CFG 26
+#define SLAVE_PDM 27
+#define SLAVE_PIMEM_CFG 28
+#define SLAVE_PRNG 29
+#define SLAVE_QDSS_CFG 30
+#define SLAVE_QM_CFG 31
+#define SLAVE_QM_MPU_CFG 32
+#define SLAVE_QUP_0 33
+#define SLAVE_QUP_1 34
+#define SLAVE_SDCC_2 35
+#define SLAVE_SECURITY 36
+#define SLAVE_SNOC_CFG 37
+#define SLAVE_TCSR 38
+#define SLAVE_UFS_MEM_CFG 39
+#define SLAVE_USB3 40
+#define SLAVE_VENUS_CFG 41
+#define SLAVE_VENUS_THROTTLE_CFG 42
+#define SLAVE_VSENSE_CTRL_CFG 43
+#define SLAVE_SERVICE_CNOC 44
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_SYS_TCU 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_MNOC_HF_MEM_NOC 4
+#define MASTER_MNOC_SF_MEM_NOC 5
+#define MASTER_SNOC_GC_MEM_NOC 6
+#define MASTER_SNOC_SF_MEM_NOC 7
+#define MASTER_GRAPHICS_3D 8
+#define SLAVE_MCDMA_MS_MPU_CFG 9
+#define SLAVE_MSS_PROC_MS_MPU_CFG 10
+#define SLAVE_GEM_NOC_SNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_SERVICE_GEM_NOC 13
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define MASTER_CAMNOC_HF 3
+#define MASTER_CAMNOC_ICP 4
+#define MASTER_CAMNOC_SF 5
+#define MASTER_MDP_PORT0 6
+#define SLAVE_MNOC_HF_MEM_NOC 7
+#define SLAVE_MNOC_SF_MEM_NOC 8
+#define SLAVE_SERVICE_MNOC 9
+
+#define MASTER_NPU_SYS 0
+#define MASTER_NPU_NOC_CFG 1
+#define SLAVE_NPU_CAL_DP0 2
+#define SLAVE_NPU_CP 3
+#define SLAVE_NPU_INT_DMA_BWMON_CFG 4
+#define SLAVE_NPU_DPM 5
+#define SLAVE_ISENSE_CFG 6
+#define SLAVE_NPU_LLM_CFG 7
+#define SLAVE_NPU_TCM 8
+#define SLAVE_NPU_COMPUTE_NOC 9
+#define SLAVE_SERVICE_NPU_NOC 10
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_QDSS_STM 13
+#define SLAVE_TCU 14
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8150.h b/include/dt-bindings/interconnect/qcom,sm8150.h
new file mode 100644
index 000000000000..a25684680c42
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8150.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SM8150 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8150_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8150_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QUP_0 1
+#define MASTER_EMAC 2
+#define MASTER_UFS_MEM 3
+#define MASTER_USB3 4
+#define MASTER_USB3_1 5
+#define A1NOC_SNOC_SLV 6
+#define SLAVE_SERVICE_A1NOC 7
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QSPI 2
+#define MASTER_QUP_1 3
+#define MASTER_QUP_2 4
+#define MASTER_SENSORS_AHB 5
+#define MASTER_TSIF 6
+#define MASTER_CNOC_A2NOC 7
+#define MASTER_CRYPTO_CORE_0 8
+#define MASTER_IPA 9
+#define MASTER_PCIE 10
+#define MASTER_PCIE_1 11
+#define MASTER_QDSS_ETR 12
+#define MASTER_SDCC_2 13
+#define MASTER_SDCC_4 14
+#define A2NOC_SNOC_SLV 15
+#define SLAVE_ANOC_PCIE_GEM_NOC 16
+#define SLAVE_SERVICE_A2NOC 17
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_HF1_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define SLAVE_CAMNOC_UNCOMP 3
+
+#define MASTER_NPU 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_SPDM 0
+#define SNOC_CNOC_MAS 1
+#define MASTER_QDSS_DAP 2
+#define SLAVE_A1NOC_CFG 3
+#define SLAVE_A2NOC_CFG 4
+#define SLAVE_AHB2PHY_SOUTH 5
+#define SLAVE_AOP 6
+#define SLAVE_AOSS 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CLK_CTL 9
+#define SLAVE_CDSP_CFG 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_RBCPR_MMCX_CFG 12
+#define SLAVE_RBCPR_MX_CFG 13
+#define SLAVE_CRYPTO_0_CFG 14
+#define SLAVE_CNOC_DDRSS 15
+#define SLAVE_DISPLAY_CFG 16
+#define SLAVE_EMAC_CFG 17
+#define SLAVE_GLM 18
+#define SLAVE_GRAPHICS_3D_CFG 19
+#define SLAVE_IMEM_CFG 20
+#define SLAVE_IPA_CFG 21
+#define SLAVE_CNOC_MNOC_CFG 22
+#define SLAVE_NPU_CFG 23
+#define SLAVE_PCIE_0_CFG 24
+#define SLAVE_PCIE_1_CFG 25
+#define SLAVE_NORTH_PHY_CFG 26
+#define SLAVE_PIMEM_CFG 27
+#define SLAVE_PRNG 28
+#define SLAVE_QDSS_CFG 29
+#define SLAVE_QSPI 30
+#define SLAVE_QUP_2 31
+#define SLAVE_QUP_1 32
+#define SLAVE_QUP_0 33
+#define SLAVE_SDCC_2 34
+#define SLAVE_SDCC_4 35
+#define SLAVE_SNOC_CFG 36
+#define SLAVE_SPDM_WRAPPER 37
+#define SLAVE_SPSS_CFG 38
+#define SLAVE_SSC_CFG 39
+#define SLAVE_TCSR 40
+#define SLAVE_TLMM_EAST 41
+#define SLAVE_TLMM_NORTH 42
+#define SLAVE_TLMM_SOUTH 43
+#define SLAVE_TLMM_WEST 44
+#define SLAVE_TSIF 45
+#define SLAVE_UFS_CARD_CFG 46
+#define SLAVE_UFS_MEM_CFG 47
+#define SLAVE_USB3 48
+#define SLAVE_USB3_1 49
+#define SLAVE_VENUS_CFG 50
+#define SLAVE_VSENSE_CTRL_CFG 51
+#define SLAVE_CNOC_A2NOC 52
+#define SLAVE_SERVICE_CNOC 53
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_GPU_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_GEM_NOC_CFG 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_GRAPHICS_3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_GEM_NOC_PCIE_SNOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_ECC 11
+#define SLAVE_MSS_PROC_MS_MPU_CFG 12
+#define SLAVE_ECC 13
+#define SLAVE_GEM_NOC_SNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_SERVICE_GEM_NOC 16
+
+#define MASTER_IPA_CORE 0
+#define SLAVE_IPA_CORE 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP_PORT0 4
+#define MASTER_MDP_PORT1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_PCIE_0 13
+#define SLAVE_PCIE_1 14
+#define SLAVE_QDSS_STM 15
+#define SLAVE_TCU 16
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8250.h b/include/dt-bindings/interconnect/qcom,sm8250.h
new file mode 100644
index 000000000000..1b4d9fbe888d
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8250.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SM8250 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8250_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8250_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QSPI_0 1
+#define MASTER_QUP_1 2
+#define MASTER_QUP_2 3
+#define MASTER_TSIF 4
+#define MASTER_PCIE_2 5
+#define MASTER_SDCC_4 6
+#define MASTER_UFS_MEM 7
+#define MASTER_USB3 8
+#define MASTER_USB3_1 9
+#define A1NOC_SNOC_SLV 10
+#define SLAVE_ANOC_PCIE_GEM_NOC_1 11
+#define SLAVE_SERVICE_A1NOC 12
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QUP_0 2
+#define MASTER_CNOC_A2NOC 3
+#define MASTER_CRYPTO_CORE_0 4
+#define MASTER_IPA 5
+#define MASTER_PCIE 6
+#define MASTER_PCIE_1 7
+#define MASTER_QDSS_ETR 8
+#define MASTER_SDCC_2 9
+#define MASTER_UFS_CARD 10
+#define A2NOC_SNOC_SLV 11
+#define SLAVE_ANOC_PCIE_GEM_NOC 12
+#define SLAVE_SERVICE_A2NOC 13
+
+#define MASTER_NPU 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define SNOC_CNOC_MAS 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AHB2PHY_SOUTH 4
+#define SLAVE_AHB2PHY_NORTH 5
+#define SLAVE_AOSS 6
+#define SLAVE_CAMERA_CFG 7
+#define SLAVE_CLK_CTL 8
+#define SLAVE_CDSP_CFG 9
+#define SLAVE_RBCPR_CX_CFG 10
+#define SLAVE_RBCPR_MMCX_CFG 11
+#define SLAVE_RBCPR_MX_CFG 12
+#define SLAVE_CRYPTO_0_CFG 13
+#define SLAVE_CX_RDPM 14
+#define SLAVE_DCC_CFG 15
+#define SLAVE_CNOC_DDRSS 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_GRAPHICS_3D_CFG 18
+#define SLAVE_IMEM_CFG 19
+#define SLAVE_IPA_CFG 20
+#define SLAVE_IPC_ROUTER_CFG 21
+#define SLAVE_LPASS 22
+#define SLAVE_CNOC_MNOC_CFG 23
+#define SLAVE_NPU_CFG 24
+#define SLAVE_PCIE_0_CFG 25
+#define SLAVE_PCIE_1_CFG 26
+#define SLAVE_PCIE_2_CFG 27
+#define SLAVE_PDM 28
+#define SLAVE_PIMEM_CFG 29
+#define SLAVE_PRNG 30
+#define SLAVE_QDSS_CFG 31
+#define SLAVE_QSPI_0 32
+#define SLAVE_QUP_0 33
+#define SLAVE_QUP_1 34
+#define SLAVE_QUP_2 35
+#define SLAVE_SDCC_2 36
+#define SLAVE_SDCC_4 37
+#define SLAVE_SNOC_CFG 38
+#define SLAVE_TCSR 39
+#define SLAVE_TLMM_NORTH 40
+#define SLAVE_TLMM_SOUTH 41
+#define SLAVE_TLMM_WEST 42
+#define SLAVE_TSIF 43
+#define SLAVE_UFS_CARD_CFG 44
+#define SLAVE_UFS_MEM_CFG 45
+#define SLAVE_USB3 46
+#define SLAVE_USB3_1 47
+#define SLAVE_VENUS_CFG 48
+#define SLAVE_VSENSE_CTRL_CFG 49
+#define SLAVE_CNOC_A2NOC 50
+#define SLAVE_SERVICE_CNOC 51
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_AMPSS_M0 2
+#define MASTER_GEM_NOC_CFG 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_GRAPHICS_3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_GEM_NOC_SNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+#define SLAVE_SERVICE_GEM_NOC_1 14
+#define SLAVE_SERVICE_GEM_NOC_2 15
+#define SLAVE_SERVICE_GEM_NOC 16
+
+#define MASTER_IPA_CORE 0
+#define SLAVE_IPA_CORE 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF 1
+#define MASTER_CAMNOC_ICP 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_VIDEO_P0 4
+#define MASTER_VIDEO_P1 5
+#define MASTER_VIDEO_PROC 6
+#define MASTER_MDP_PORT0 7
+#define MASTER_MDP_PORT1 8
+#define MASTER_ROTATOR 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_NPU_SYS 0
+#define MASTER_NPU_CDP 1
+#define MASTER_NPU_NOC_CFG 2
+#define SLAVE_NPU_CAL_DP0 3
+#define SLAVE_NPU_CAL_DP1 4
+#define SLAVE_NPU_CP 5
+#define SLAVE_NPU_INT_DMA_BWMON_CFG 6
+#define SLAVE_NPU_DPM 7
+#define SLAVE_ISENSE_CFG 8
+#define SLAVE_NPU_LLM_CFG 9
+#define SLAVE_NPU_TCM 10
+#define SLAVE_NPU_COMPUTE_NOC 11
+#define SLAVE_SERVICE_NPU_NOC 12
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_GEM_NOC_PCIE_SNOC 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_APPSS 7
+#define SNOC_CNOC_SLV 8
+#define SLAVE_SNOC_GEM_NOC_GC 9
+#define SLAVE_SNOC_GEM_NOC_SF 10
+#define SLAVE_OCIMEM 11
+#define SLAVE_PIMEM 12
+#define SLAVE_SERVICE_SNOC 13
+#define SLAVE_PCIE_0 14
+#define SLAVE_PCIE_1 15
+#define SLAVE_PCIE_2 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_TCU 18
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8350.h b/include/dt-bindings/interconnect/qcom,sm8350.h
new file mode 100644
index 000000000000..c7f7ed315aeb
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8350.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SM8350 interconnect IDs
+ *
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8350_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8350_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_A1NOC_CFG 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define MASTER_USB3_1 6
+#define SLAVE_A1NOC_SNOC 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_2 2
+#define MASTER_A2NOC_CFG 3
+#define MASTER_CRYPTO 4
+#define MASTER_IPA 5
+#define MASTER_PCIE_0 6
+#define MASTER_PCIE_1 7
+#define MASTER_QDSS_ETR 8
+#define MASTER_SDCC_2 9
+#define MASTER_UFS_CARD 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_ANOC_PCIE_GEM_NOC 12
+#define SLAVE_SERVICE_A2NOC 13
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define MASTER_QDSS_DAP 2
+#define SLAVE_AHB2PHY_SOUTH 3
+#define SLAVE_AHB2PHY_NORTH 4
+#define SLAVE_AOSS 5
+#define SLAVE_APPSS 6
+#define SLAVE_CAMERA_CFG 7
+#define SLAVE_CLK_CTL 8
+#define SLAVE_CDSP_CFG 9
+#define SLAVE_RBCPR_CX_CFG 10
+#define SLAVE_RBCPR_MMCX_CFG 11
+#define SLAVE_RBCPR_MX_CFG 12
+#define SLAVE_CRYPTO_0_CFG 13
+#define SLAVE_CX_RDPM 14
+#define SLAVE_DCC_CFG 15
+#define SLAVE_DISPLAY_CFG 16
+#define SLAVE_GFX3D_CFG 17
+#define SLAVE_HWKM 18
+#define SLAVE_IMEM_CFG 19
+#define SLAVE_IPA_CFG 20
+#define SLAVE_IPC_ROUTER_CFG 21
+#define SLAVE_LPASS 22
+#define SLAVE_CNOC_MSS 23
+#define SLAVE_MX_RDPM 24
+#define SLAVE_PCIE_0_CFG 25
+#define SLAVE_PCIE_1_CFG 26
+#define SLAVE_PDM 27
+#define SLAVE_PIMEM_CFG 28
+#define SLAVE_PKA_WRAPPER_CFG 29
+#define SLAVE_PMU_WRAPPER_CFG 30
+#define SLAVE_QDSS_CFG 31
+#define SLAVE_QSPI_0 32
+#define SLAVE_QUP_0 33
+#define SLAVE_QUP_1 34
+#define SLAVE_QUP_2 35
+#define SLAVE_SDCC_2 36
+#define SLAVE_SDCC_4 37
+#define SLAVE_SECURITY 38
+#define SLAVE_SPSS_CFG 39
+#define SLAVE_TCSR 40
+#define SLAVE_TLMM 41
+#define SLAVE_UFS_CARD_CFG 42
+#define SLAVE_UFS_MEM_CFG 43
+#define SLAVE_USB3_0 44
+#define SLAVE_USB3_1 45
+#define SLAVE_VENUS_CFG 46
+#define SLAVE_VSENSE_CTRL_CFG 47
+#define SLAVE_A1NOC_CFG 48
+#define SLAVE_A2NOC_CFG 49
+#define SLAVE_DDRSS_CFG 50
+#define SLAVE_CNOC_MNOC_CFG 51
+#define SLAVE_SNOC_CFG 52
+#define SLAVE_BOOT_IMEM 53
+#define SLAVE_IMEM 54
+#define SLAVE_PIMEM 55
+#define SLAVE_SERVICE_CNOC 56
+#define SLAVE_PCIE_0 57
+#define SLAVE_PCIE_1 58
+#define SLAVE_QDSS_STM 59
+#define SLAVE_TCU 60
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_GEM_NOC_CFG 4
+#define MASTER_GFX3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_MSS_PROC_MS_MPU_CFG 11
+#define SLAVE_MCDMA_MS_MPU_CFG 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_MEM_NOC_PCIE_SNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+#define MASTER_MNOC_HF_MEM_NOC_DISP 19
+#define MASTER_MNOC_SF_MEM_NOC_DISP 20
+#define SLAVE_LLCC_DISP 21
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define SLAVE_LPASS_CORE_CFG 1
+#define SLAVE_LPASS_LPI_CFG 2
+#define SLAVE_LPASS_MPU_CFG 3
+#define SLAVE_LPASS_TOP_CFG 4
+#define SLAVE_SERVICES_LPASS_AML_NOC 5
+#define SLAVE_SERVICE_LPASS_AG_NOC 6
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_CNOC_MNOC_CFG 3
+#define MASTER_VIDEO_P0 4
+#define MASTER_VIDEO_P1 5
+#define MASTER_VIDEO_PROC 6
+#define MASTER_MDP0 7
+#define MASTER_MDP1 8
+#define MASTER_ROTATOR 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+#define MASTER_MDP0_DISP 13
+#define MASTER_MDP1_DISP 14
+#define MASTER_ROTATOR_DISP 15
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
+#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_PIMEM 3
+#define MASTER_GIC 4
+#define SLAVE_SNOC_GEM_NOC_GC 5
+#define SLAVE_SNOC_GEM_NOC_SF 6
+#define SLAVE_SERVICE_SNOC 7
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8450.h b/include/dt-bindings/interconnect/qcom,sm8450.h
new file mode 100644
index 000000000000..8f3c5e1fb4c4
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8450.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8450_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8450_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_A1NOC_CFG 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+#define SLAVE_SERVICE_A1NOC 7
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_2 2
+#define MASTER_A2NOC_CFG 3
+#define MASTER_CRYPTO 4
+#define MASTER_IPA 5
+#define MASTER_SENSORS_PROC 6
+#define MASTER_SP 7
+#define MASTER_QDSS_ETR 8
+#define MASTER_QDSS_ETR_1 9
+#define MASTER_SDCC_2 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_SERVICE_A2NOC 12
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_SOUTH 2
+#define SLAVE_AHB2PHY_NORTH 3
+#define SLAVE_AOSS 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CDSP_CFG 7
+#define SLAVE_RBCPR_CX_CFG 8
+#define SLAVE_RBCPR_MMCX_CFG 9
+#define SLAVE_RBCPR_MXA_CFG 10
+#define SLAVE_RBCPR_MXC_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_CX_RDPM 13
+#define SLAVE_DISPLAY_CFG 14
+#define SLAVE_GFX3D_CFG 15
+#define SLAVE_IMEM_CFG 16
+#define SLAVE_IPA_CFG 17
+#define SLAVE_IPC_ROUTER_CFG 18
+#define SLAVE_LPASS 19
+#define SLAVE_CNOC_MSS 20
+#define SLAVE_MX_RDPM 21
+#define SLAVE_PCIE_0_CFG 22
+#define SLAVE_PCIE_1_CFG 23
+#define SLAVE_PDM 24
+#define SLAVE_PIMEM_CFG 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_0 29
+#define SLAVE_QUP_1 30
+#define SLAVE_QUP_2 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_TME_CFG 37
+#define SLAVE_UFS_MEM_CFG 38
+#define SLAVE_USB3_0 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_A1NOC_CFG 42
+#define SLAVE_A2NOC_CFG 43
+#define SLAVE_DDRSS_CFG 44
+#define SLAVE_CNOC_MNOC_CFG 45
+#define SLAVE_PCIE_ANOC_CFG 46
+#define SLAVE_SNOC_CFG 47
+#define SLAVE_IMEM 48
+#define SLAVE_PIMEM 49
+#define SLAVE_SERVICE_CNOC 50
+#define SLAVE_PCIE_0 51
+#define SLAVE_PCIE_1 52
+#define SLAVE_QDSS_STM 53
+#define SLAVE_TCU 54
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_MSS_PROC 4
+#define MASTER_MNOC_HF_MEM_NOC 5
+#define MASTER_MNOC_SF_MEM_NOC 6
+#define MASTER_COMPUTE_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_GEM_NOC_CNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+#define MASTER_MNOC_HF_MEM_NOC_DISP 14
+#define MASTER_MNOC_SF_MEM_NOC_DISP 15
+#define MASTER_ANOC_PCIE_GEM_NOC_DISP 16
+#define SLAVE_LLCC_DISP 17
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CNOC_MNOC_CFG 4
+#define MASTER_ROTATOR 5
+#define MASTER_CDSP_HCP 6
+#define MASTER_VIDEO 7
+#define MASTER_VIDEO_CV_PROC 8
+#define MASTER_VIDEO_PROC 9
+#define MASTER_VIDEO_V_PROC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+#define MASTER_MDP_DISP 14
+#define MASTER_ROTATOR_DISP 15
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
+#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/interrupt-controller/apple-aic.h b/include/dt-bindings/interrupt-controller/apple-aic.h
new file mode 100644
index 000000000000..bf3aac0e5491
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/apple-aic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_APPLE_AIC_H
+#define _DT_BINDINGS_INTERRUPT_CONTROLLER_APPLE_AIC_H
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#define AIC_IRQ 0
+#define AIC_FIQ 1
+
+#define AIC_TMR_HV_PHYS 0
+#define AIC_TMR_HV_VIRT 1
+#define AIC_TMR_GUEST_PHYS 2
+#define AIC_TMR_GUEST_VIRT 3
+#define AIC_CPU_PMU_E 4
+#define AIC_CPU_PMU_P 5
+
+#endif
diff --git a/include/dt-bindings/interrupt-controller/irqc-rzg2l.h b/include/dt-bindings/interrupt-controller/irqc-rzg2l.h
new file mode 100644
index 000000000000..34ce778885a1
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/irqc-rzg2l.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G2L family IRQC bindings.
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_IRQC_RZG2L_H
+#define __DT_BINDINGS_IRQC_RZG2L_H
+
+/* NMI maps to SPI0 */
+#define RZG2L_NMI 0
+
+/* IRQ0-7 map to SPI1-8 */
+#define RZG2L_IRQ0 1
+#define RZG2L_IRQ1 2
+#define RZG2L_IRQ2 3
+#define RZG2L_IRQ3 4
+#define RZG2L_IRQ4 5
+#define RZG2L_IRQ5 6
+#define RZG2L_IRQ6 7
+#define RZG2L_IRQ7 8
+
+#endif /* __DT_BINDINGS_IRQC_RZG2L_H */
diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h
index 9e1256a7c1bf..9a0d33d027ff 100644
--- a/include/dt-bindings/leds/common.h
+++ b/include/dt-bindings/leds/common.h
@@ -6,6 +6,7 @@
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* Copyright (C) 2019 Jacek Anaszewski <jacek.anaszewski@gmail.com>
+ * Copyright (C) 2020 Pavel Machek <pavel@ucw.cz>
*/
#ifndef __DT_BINDINGS_LEDS_H
@@ -29,19 +30,56 @@
#define LED_COLOR_ID_VIOLET 5
#define LED_COLOR_ID_YELLOW 6
#define LED_COLOR_ID_IR 7
-#define LED_COLOR_ID_MAX 8
+#define LED_COLOR_ID_MULTI 8 /* For multicolor LEDs */
+#define LED_COLOR_ID_RGB 9 /* For multicolor LEDs that can do arbitrary color,
+ so this would include RGBW and similar */
+#define LED_COLOR_ID_PURPLE 10
+#define LED_COLOR_ID_ORANGE 11
+#define LED_COLOR_ID_PINK 12
+#define LED_COLOR_ID_CYAN 13
+#define LED_COLOR_ID_LIME 14
+#define LED_COLOR_ID_MAX 15
/* Standard LED functions */
+/* Keyboard LEDs, usually it would be input4::capslock etc. */
+/* Obsolete equivalent: "shift-key-light" */
+#define LED_FUNCTION_CAPSLOCK "capslock"
+#define LED_FUNCTION_SCROLLLOCK "scrolllock"
+#define LED_FUNCTION_NUMLOCK "numlock"
+/* Obsolete equivalents: "tpacpi::thinklight" (IBM/Lenovo Thinkpads),
+ "lp5523:kb{1,2,3,4,5,6}" (Nokia N900) */
+#define LED_FUNCTION_KBD_BACKLIGHT "kbd_backlight"
+
+/* System LEDs, usually found on system body.
+ platform::mute (etc) is sometimes seen, :mute would be better */
+#define LED_FUNCTION_POWER "power"
+#define LED_FUNCTION_DISK "disk"
+
+/* Obsolete: "platform:*:charging" (allwinner sun50i) */
+#define LED_FUNCTION_CHARGING "charging"
+/* Used RGB notification LEDs common on phones.
+ Obsolete equivalents: "status-led:{red,green,blue}" (Motorola Droid 4),
+ "lp5523:{r,g,b}" (Nokia N900) */
+#define LED_FUNCTION_STATUS "status"
+
+#define LED_FUNCTION_MICMUTE "micmute"
+#define LED_FUNCTION_MUTE "mute"
+
+/* Used for player LEDs as found on game controllers from e.g. Nintendo, Sony. */
+#define LED_FUNCTION_PLAYER1 "player-1"
+#define LED_FUNCTION_PLAYER2 "player-2"
+#define LED_FUNCTION_PLAYER3 "player-3"
+#define LED_FUNCTION_PLAYER4 "player-4"
+#define LED_FUNCTION_PLAYER5 "player-5"
+
+/* Miscelleaus functions. Use functions above if you can. */
#define LED_FUNCTION_ACTIVITY "activity"
#define LED_FUNCTION_ALARM "alarm"
#define LED_FUNCTION_BACKLIGHT "backlight"
#define LED_FUNCTION_BLUETOOTH "bluetooth"
#define LED_FUNCTION_BOOT "boot"
#define LED_FUNCTION_CPU "cpu"
-#define LED_FUNCTION_CAPSLOCK "capslock"
-#define LED_FUNCTION_CHARGING "charging"
#define LED_FUNCTION_DEBUG "debug"
-#define LED_FUNCTION_DISK "disk"
#define LED_FUNCTION_DISK_ACTIVITY "disk-activity"
#define LED_FUNCTION_DISK_ERR "disk-err"
#define LED_FUNCTION_DISK_READ "disk-read"
@@ -50,21 +88,14 @@
#define LED_FUNCTION_FLASH "flash"
#define LED_FUNCTION_HEARTBEAT "heartbeat"
#define LED_FUNCTION_INDICATOR "indicator"
-#define LED_FUNCTION_KBD_BACKLIGHT "kbd_backlight"
#define LED_FUNCTION_LAN "lan"
#define LED_FUNCTION_MAIL "mail"
#define LED_FUNCTION_MTD "mtd"
-#define LED_FUNCTION_MICMUTE "micmute"
-#define LED_FUNCTION_MUTE "mute"
-#define LED_FUNCTION_NUMLOCK "numlock"
#define LED_FUNCTION_PANIC "panic"
#define LED_FUNCTION_PROGRAMMING "programming"
-#define LED_FUNCTION_POWER "power"
#define LED_FUNCTION_RX "rx"
#define LED_FUNCTION_SD "sd"
-#define LED_FUNCTION_SCROLLLOCK "scrolllock"
#define LED_FUNCTION_STANDBY "standby"
-#define LED_FUNCTION_STATUS "status"
#define LED_FUNCTION_TORCH "torch"
#define LED_FUNCTION_TX "tx"
#define LED_FUNCTION_USB "usb"
diff --git a/include/dt-bindings/leds/rt4831-backlight.h b/include/dt-bindings/leds/rt4831-backlight.h
new file mode 100644
index 000000000000..125c6351bba0
--- /dev/null
+++ b/include/dt-bindings/leds/rt4831-backlight.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for rt4831 backlight bindings.
+ *
+ * Copyright (C) 2020, Richtek Technology Corp.
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#ifndef _DT_BINDINGS_RT4831_BACKLIGHT_H
+#define _DT_BINDINGS_RT4831_BACKLIGHT_H
+
+#define RT4831_BLOVPLVL_17V 0
+#define RT4831_BLOVPLVL_21V 1
+#define RT4831_BLOVPLVL_25V 2
+#define RT4831_BLOVPLVL_29V 3
+
+#define RT4831_BLED_CH1EN (1 << 0)
+#define RT4831_BLED_CH2EN (1 << 1)
+#define RT4831_BLED_CH3EN (1 << 2)
+#define RT4831_BLED_CH4EN (1 << 3)
+#define RT4831_BLED_ALLCHEN ((1 << 4) - 1)
+
+#endif /* _DT_BINDINGS_RT4831_BACKLIGHT_H */
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
new file mode 100644
index 000000000000..fbfa3febc66d
--- /dev/null
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_MAILBOX_IPCC_H
+#define __DT_BINDINGS_MAILBOX_IPCC_H
+
+/* Signal IDs for MPROC protocol */
+#define IPCC_MPROC_SIGNAL_GLINK_QMP 0
+#define IPCC_MPROC_SIGNAL_TZ 1
+#define IPCC_MPROC_SIGNAL_SMP2P 2
+#define IPCC_MPROC_SIGNAL_PING 3
+
+/* Client IDs */
+#define IPCC_CLIENT_AOP 0
+#define IPCC_CLIENT_TZ 1
+#define IPCC_CLIENT_MPSS 2
+#define IPCC_CLIENT_LPASS 3
+#define IPCC_CLIENT_SLPI 4
+#define IPCC_CLIENT_SDC 5
+#define IPCC_CLIENT_CDSP 6
+#define IPCC_CLIENT_NPU 7
+#define IPCC_CLIENT_APSS 8
+#define IPCC_CLIENT_GPU 9
+#define IPCC_CLIENT_CVP 10
+#define IPCC_CLIENT_CAM 11
+#define IPCC_CLIENT_VPU 12
+#define IPCC_CLIENT_PCIE0 13
+#define IPCC_CLIENT_PCIE1 14
+#define IPCC_CLIENT_PCIE2 15
+#define IPCC_CLIENT_SPSS 16
+#define IPCC_CLIENT_NSP1 18
+#define IPCC_CLIENT_TME 23
+#define IPCC_CLIENT_WPSS 24
+
+#endif
diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h
index 3bdec7a84d35..b9ccae2aa9e2 100644
--- a/include/dt-bindings/mailbox/tegra186-hsp.h
+++ b/include/dt-bindings/mailbox/tegra186-hsp.h
@@ -16,6 +16,11 @@
#define TEGRA_HSP_MBOX_TYPE_AS 0x3
/*
+ * These define the types of shared mailbox supported based on data size.
+ */
+#define TEGRA_HSP_MBOX_TYPE_SM_128BIT (1 << 8)
+
+/*
* These defines represent the bit associated with the given master ID in the
* doorbell registers.
*/
diff --git a/include/dt-bindings/media/tvp5150.h b/include/dt-bindings/media/tvp5150.h
index 01eedf4985b8..dda00c038530 100644
--- a/include/dt-bindings/media/tvp5150.h
+++ b/include/dt-bindings/media/tvp5150.h
@@ -14,8 +14,6 @@
#define TVP5150_COMPOSITE1 1
#define TVP5150_SVIDEO 2
-#define TVP5150_INPUT_NUM 3
-
/* TVP5150 HW outputs */
#define TVP5150_NORMAL 0
#define TVP5150_BLACK_SCREEN 1
diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h
index 2d85c2ec6cfd..25d03526f142 100644
--- a/include/dt-bindings/memory/mt2701-larb-port.h
+++ b/include/dt-bindings/memory/mt2701-larb-port.h
@@ -4,8 +4,8 @@
* Author: Honghui Zhang <honghui.zhang@mediatek.com>
*/
-#ifndef _MT2701_LARB_PORT_H_
-#define _MT2701_LARB_PORT_H_
+#ifndef _DT_BINDINGS_MEMORY_MT2701_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT2701_LARB_PORT_H_
/*
* Mediatek m4u generation 1 such as mt2701 has flat m4u port numbers,
diff --git a/include/dt-bindings/memory/mt2712-larb-port.h b/include/dt-bindings/memory/mt2712-larb-port.h
index 6f9aa7349cef..e41a2841bcff 100644
--- a/include/dt-bindings/memory/mt2712-larb-port.h
+++ b/include/dt-bindings/memory/mt2712-larb-port.h
@@ -3,10 +3,10 @@
* Copyright (c) 2017 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
-#ifndef __DTS_IOMMU_PORT_MT2712_H
-#define __DTS_IOMMU_PORT_MT2712_H
+#ifndef _DT_BINDINGS_MEMORY_MT2712_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT2712_LARB_PORT_H_
-#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+#include <dt-bindings/memory/mtk-memory-port.h>
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
diff --git a/include/dt-bindings/memory/mt6779-larb-port.h b/include/dt-bindings/memory/mt6779-larb-port.h
new file mode 100644
index 000000000000..3fb438a96e35
--- /dev/null
+++ b/include/dt-bindings/memory/mt6779-larb-port.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Chao Hao <chao.hao@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_MEMORY_MT6779_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT6779_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+#define M4U_LARB4_ID 4
+#define M4U_LARB5_ID 5
+#define M4U_LARB6_ID 6
+#define M4U_LARB7_ID 7
+#define M4U_LARB8_ID 8
+#define M4U_LARB9_ID 9
+#define M4U_LARB10_ID 10
+#define M4U_LARB11_ID 11
+
+/* larb0 */
+#define M4U_PORT_DISP_POSTMASK0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_OVL0_HDR MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_OVL1_HDR MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_DISP_PVRIC0 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 8)
+
+/* larb1 */
+#define M4U_PORT_DISP_OVL0_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_DISP_OVL1_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_DISP_OVL0_2L MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_DISP_OVL1_2L MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_MDP_PVRIC0 MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_MDP_PVRIC1 MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_MDP_WROT0_R MTK_M4U_ID(M4U_LARB1_ID, 9)
+#define M4U_PORT_MDP_WROT0_W MTK_M4U_ID(M4U_LARB1_ID, 10)
+#define M4U_PORT_MDP_WROT1_R MTK_M4U_ID(M4U_LARB1_ID, 11)
+#define M4U_PORT_MDP_WROT1_W MTK_M4U_ID(M4U_LARB1_ID, 12)
+#define M4U_PORT_DISP_FAKE1 MTK_M4U_ID(M4U_LARB1_ID, 13)
+
+/* larb2-VDEC */
+#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_HW_VDEC_TILE_EXT MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(M4U_LARB2_ID, 11)
+
+/* larb3-VENC */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_VENC_NBM_RDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_JPGENC_Y_RDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_JPGENC_C_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_JPGENC_Q_TABLE MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 10)
+#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 11)
+#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 12)
+#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 13)
+#define M4U_PORT_VENC_NBM_WDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 14)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 15)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 16)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 17)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 18)
+
+/* larb4-dummy */
+
+/* larb5-IMG */
+#define M4U_PORT_IMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 0)
+#define M4U_PORT_IMGBI_D1 MTK_M4U_ID(M4U_LARB5_ID, 1)
+#define M4U_PORT_DMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 2)
+#define M4U_PORT_DEPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 3)
+#define M4U_PORT_LCEI_D1 MTK_M4U_ID(M4U_LARB5_ID, 4)
+#define M4U_PORT_SMTI_D1 MTK_M4U_ID(M4U_LARB5_ID, 5)
+#define M4U_PORT_SMTO_D2 MTK_M4U_ID(M4U_LARB5_ID, 6)
+#define M4U_PORT_SMTO_D1 MTK_M4U_ID(M4U_LARB5_ID, 7)
+#define M4U_PORT_CRZO_D1 MTK_M4U_ID(M4U_LARB5_ID, 8)
+#define M4U_PORT_IMG3O_D1 MTK_M4U_ID(M4U_LARB5_ID, 9)
+#define M4U_PORT_VIPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 10)
+#define M4U_PORT_WPE_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 11)
+#define M4U_PORT_WPE_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 12)
+#define M4U_PORT_WPE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 13)
+#define M4U_PORT_TIMGO_D1 MTK_M4U_ID(M4U_LARB5_ID, 14)
+#define M4U_PORT_MFB_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 15)
+#define M4U_PORT_MFB_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 16)
+#define M4U_PORT_MFB_RDMA2 MTK_M4U_ID(M4U_LARB5_ID, 17)
+#define M4U_PORT_MFB_RDMA3 MTK_M4U_ID(M4U_LARB5_ID, 18)
+#define M4U_PORT_MFB_WDMA MTK_M4U_ID(M4U_LARB5_ID, 19)
+#define M4U_PORT_RESERVE1 MTK_M4U_ID(M4U_LARB5_ID, 20)
+#define M4U_PORT_RESERVE2 MTK_M4U_ID(M4U_LARB5_ID, 21)
+#define M4U_PORT_RESERVE3 MTK_M4U_ID(M4U_LARB5_ID, 22)
+#define M4U_PORT_RESERVE4 MTK_M4U_ID(M4U_LARB5_ID, 23)
+#define M4U_PORT_RESERVE5 MTK_M4U_ID(M4U_LARB5_ID, 24)
+#define M4U_PORT_RESERVE6 MTK_M4U_ID(M4U_LARB5_ID, 25)
+
+/* larb6-IMG-VPU */
+#define M4U_PORT_IMG_IPUO MTK_M4U_ID(M4U_LARB6_ID, 0)
+#define M4U_PORT_IMG_IPU3O MTK_M4U_ID(M4U_LARB6_ID, 1)
+#define M4U_PORT_IMG_IPUI MTK_M4U_ID(M4U_LARB6_ID, 2)
+
+/* larb7-DVS */
+#define M4U_PORT_DVS_RDMA MTK_M4U_ID(M4U_LARB7_ID, 0)
+#define M4U_PORT_DVS_WDMA MTK_M4U_ID(M4U_LARB7_ID, 1)
+#define M4U_PORT_DVP_RDMA MTK_M4U_ID(M4U_LARB7_ID, 2)
+#define M4U_PORT_DVP_WDMA MTK_M4U_ID(M4U_LARB7_ID, 3)
+
+/* larb8-IPESYS */
+#define M4U_PORT_FDVT_RDA MTK_M4U_ID(M4U_LARB8_ID, 0)
+#define M4U_PORT_FDVT_RDB MTK_M4U_ID(M4U_LARB8_ID, 1)
+#define M4U_PORT_FDVT_WRA MTK_M4U_ID(M4U_LARB8_ID, 2)
+#define M4U_PORT_FDVT_WRB MTK_M4U_ID(M4U_LARB8_ID, 3)
+#define M4U_PORT_FE_RD0 MTK_M4U_ID(M4U_LARB8_ID, 4)
+#define M4U_PORT_FE_RD1 MTK_M4U_ID(M4U_LARB8_ID, 5)
+#define M4U_PORT_FE_WR0 MTK_M4U_ID(M4U_LARB8_ID, 6)
+#define M4U_PORT_FE_WR1 MTK_M4U_ID(M4U_LARB8_ID, 7)
+#define M4U_PORT_RSC_RDMA0 MTK_M4U_ID(M4U_LARB8_ID, 8)
+#define M4U_PORT_RSC_WDMA MTK_M4U_ID(M4U_LARB8_ID, 9)
+
+/* larb9-CAM */
+#define M4U_PORT_CAM_IMGO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 0)
+#define M4U_PORT_CAM_RRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 1)
+#define M4U_PORT_CAM_LSCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 2)
+#define M4U_PORT_CAM_BPCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 3)
+#define M4U_PORT_CAM_YUVO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 4)
+#define M4U_PORT_CAM_UFDI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 5)
+#define M4U_PORT_CAM_RAWI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 6)
+#define M4U_PORT_CAM_RAWI_R5_C MTK_M4U_ID(M4U_LARB9_ID, 7)
+#define M4U_PORT_CAM_CAMSV_1 MTK_M4U_ID(M4U_LARB9_ID, 8)
+#define M4U_PORT_CAM_CAMSV_2 MTK_M4U_ID(M4U_LARB9_ID, 9)
+#define M4U_PORT_CAM_CAMSV_3 MTK_M4U_ID(M4U_LARB9_ID, 10)
+#define M4U_PORT_CAM_CAMSV_4 MTK_M4U_ID(M4U_LARB9_ID, 11)
+#define M4U_PORT_CAM_CAMSV_5 MTK_M4U_ID(M4U_LARB9_ID, 12)
+#define M4U_PORT_CAM_CAMSV_6 MTK_M4U_ID(M4U_LARB9_ID, 13)
+#define M4U_PORT_CAM_AAO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 14)
+#define M4U_PORT_CAM_AFO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 15)
+#define M4U_PORT_CAM_FLKO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 16)
+#define M4U_PORT_CAM_LCESO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 17)
+#define M4U_PORT_CAM_CRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 18)
+#define M4U_PORT_CAM_LTMSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 19)
+#define M4U_PORT_CAM_RSSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 20)
+#define M4U_PORT_CAM_CCUI MTK_M4U_ID(M4U_LARB9_ID, 21)
+#define M4U_PORT_CAM_CCUO MTK_M4U_ID(M4U_LARB9_ID, 22)
+#define M4U_PORT_CAM_FAKE MTK_M4U_ID(M4U_LARB9_ID, 23)
+
+/* larb10-CAM_A */
+#define M4U_PORT_CAM_IMGO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 0)
+#define M4U_PORT_CAM_RRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 1)
+#define M4U_PORT_CAM_LSCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 2)
+#define M4U_PORT_CAM_BPCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 3)
+#define M4U_PORT_CAM_YUVO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 4)
+#define M4U_PORT_CAM_UFDI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 5)
+#define M4U_PORT_CAM_RAWI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 6)
+#define M4U_PORT_CAM_RAWI_R5_A MTK_M4U_ID(M4U_LARB10_ID, 7)
+#define M4U_PORT_CAM_IMGO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 8)
+#define M4U_PORT_CAM_RRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 9)
+#define M4U_PORT_CAM_LSCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 10)
+#define M4U_PORT_CAM_BPCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 11)
+#define M4U_PORT_CAM_YUVO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 12)
+#define M4U_PORT_CAM_UFDI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 13)
+#define M4U_PORT_CAM_RAWI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 14)
+#define M4U_PORT_CAM_RAWI_R5_B MTK_M4U_ID(M4U_LARB10_ID, 15)
+#define M4U_PORT_CAM_CAMSV_0 MTK_M4U_ID(M4U_LARB10_ID, 16)
+#define M4U_PORT_CAM_AAO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 17)
+#define M4U_PORT_CAM_AFO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 18)
+#define M4U_PORT_CAM_FLKO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 19)
+#define M4U_PORT_CAM_LCESO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 20)
+#define M4U_PORT_CAM_CRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 21)
+#define M4U_PORT_CAM_AAO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 22)
+#define M4U_PORT_CAM_AFO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 23)
+#define M4U_PORT_CAM_FLKO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 24)
+#define M4U_PORT_CAM_LCESO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 25)
+#define M4U_PORT_CAM_CRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 26)
+#define M4U_PORT_CAM_LTMSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 27)
+#define M4U_PORT_CAM_RSSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 28)
+#define M4U_PORT_CAM_LTMSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 29)
+#define M4U_PORT_CAM_RSSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 30)
+
+/* larb11-CAM-VPU */
+#define M4U_PORT_CAM_IPUO MTK_M4U_ID(M4U_LARB11_ID, 0)
+#define M4U_PORT_CAM_IPU2O MTK_M4U_ID(M4U_LARB11_ID, 1)
+#define M4U_PORT_CAM_IPU3O MTK_M4U_ID(M4U_LARB11_ID, 2)
+#define M4U_PORT_CAM_IPUI MTK_M4U_ID(M4U_LARB11_ID, 3)
+#define M4U_PORT_CAM_IPU2I MTK_M4U_ID(M4U_LARB11_ID, 4)
+
+#endif
diff --git a/include/dt-bindings/memory/mt6795-larb-port.h b/include/dt-bindings/memory/mt6795-larb-port.h
new file mode 100644
index 000000000000..58cf6a6b6372
--- /dev/null
+++ b/include/dt-bindings/memory/mt6795-larb-port.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+#define M4U_LARB4_ID 4
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 8)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 9)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 10)
+#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 11)
+#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 12)
+#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB0_ID, 13)
+
+/* larb1 */
+#define M4U_PORT_VDEC_MC MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_VDEC_PP MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_VDEC_UFO MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_VDEC_VLD MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_VDEC_VLD2 MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_VDEC_AVC_MV MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_VDEC_PRED_RD MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_VDEC_PRED_WR MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_VDEC_PPWRAP MTK_M4U_ID(M4U_LARB1_ID, 8)
+
+/* larb2 */
+#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_CAM_LCSO MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_CAM_IMGO_S MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_CAM_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_CAM_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_CAM_UFDI MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB2_ID, 11)
+#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 12)
+#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 13)
+#define M4U_PORT_CAM_VIPI MTK_M4U_ID(M4U_LARB2_ID, 14)
+#define M4U_PORT_CAM_VIP2I MTK_M4U_ID(M4U_LARB2_ID, 15)
+#define M4U_PORT_CAM_VIP3I MTK_M4U_ID(M4U_LARB2_ID, 16)
+#define M4U_PORT_CAM_LCEI MTK_M4U_ID(M4U_LARB2_ID, 17)
+#define M4U_PORT_CAM_RB MTK_M4U_ID(M4U_LARB2_ID, 18)
+#define M4U_PORT_CAM_RP MTK_M4U_ID(M4U_LARB2_ID, 19)
+#define M4U_PORT_CAM_WR MTK_M4U_ID(M4U_LARB2_ID, 20)
+
+/* larb3 */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_REMDC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_REMDC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_JPGENC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 10)
+#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 11)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 12)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 13)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 14)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 15)
+#define M4U_PORT_REMDC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 16)
+#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 17)
+#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 18)
+
+/* larb4 */
+#define M4U_PORT_MJC_MV_RD MTK_M4U_ID(M4U_LARB4_ID, 0)
+#define M4U_PORT_MJC_MV_WR MTK_M4U_ID(M4U_LARB4_ID, 1)
+#define M4U_PORT_MJC_DMA_RD MTK_M4U_ID(M4U_LARB4_ID, 2)
+#define M4U_PORT_MJC_DMA_WR MTK_M4U_ID(M4U_LARB4_ID, 3)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8167-larb-port.h b/include/dt-bindings/memory/mt8167-larb-port.h
new file mode 100644
index 000000000000..aae57d4824ca
--- /dev/null
+++ b/include/dt-bindings/memory/mt8167-larb-port.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: Honghui Zhang <honghui.zhang@mediatek.com>
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8167_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8167_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_MDP_RDMA MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_MDP_WROT MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_FAKE MTK_M4U_ID(M4U_LARB0_ID, 7)
+
+/* larb1*/
+#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB1_ID, 9)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 10)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB1_ID, 11)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 12)
+
+/* larb2*/
+#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB2_ID, 6)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8173-larb-port.h b/include/dt-bindings/memory/mt8173-larb-port.h
index 9f31ccfeca21..167a7fc51868 100644
--- a/include/dt-bindings/memory/mt8173-larb-port.h
+++ b/include/dt-bindings/memory/mt8173-larb-port.h
@@ -3,10 +3,10 @@
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
-#ifndef __DTS_IOMMU_PORT_MT8173_H
-#define __DTS_IOMMU_PORT_MT8173_H
+#ifndef _DT_BINDINGS_MEMORY_MT8173_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8173_LARB_PORT_H_
-#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+#include <dt-bindings/memory/mtk-memory-port.h>
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
diff --git a/include/dt-bindings/memory/mt8183-larb-port.h b/include/dt-bindings/memory/mt8183-larb-port.h
index 2c579f305162..36abdf0ce5a2 100644
--- a/include/dt-bindings/memory/mt8183-larb-port.h
+++ b/include/dt-bindings/memory/mt8183-larb-port.h
@@ -3,10 +3,10 @@
* Copyright (c) 2018 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
-#ifndef __DTS_IOMMU_PORT_MT8183_H
-#define __DTS_IOMMU_PORT_MT8183_H
+#ifndef _DT_BINDINGS_MEMORY_MT8183_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8183_LARB_PORT_H_
-#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+#include <dt-bindings/memory/mtk-memory-port.h>
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
diff --git a/include/dt-bindings/memory/mt8186-memory-port.h b/include/dt-bindings/memory/mt8186-memory-port.h
new file mode 100644
index 000000000000..2bc6e4433048
--- /dev/null
+++ b/include/dt-bindings/memory/mt8186-memory-port.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ *
+ * Author: Anan Sun <anan.sun@mediatek.com>
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2
+ * vcodec 4G ~ 8G larb4/7
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb13: port 9/10
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb14: port 4/5
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- MMSYS */
+#define IOMMU_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(0, 0)
+#define IOMMU_PORT_L0_REVERSED MTK_M4U_ID(0, 1)
+#define IOMMU_PORT_L0_OVL_RDMA0 MTK_M4U_ID(0, 2)
+#define IOMMU_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 3)
+
+/* LARB 1 -- MMSYS */
+#define IOMMU_PORT_L1_DISP_RDMA1 MTK_M4U_ID(1, 0)
+#define IOMMU_PORT_L1_OVL_2L_RDMA0 MTK_M4U_ID(1, 1)
+#define IOMMU_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 2)
+#define IOMMU_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 3)
+#define IOMMU_PORT_L1_DISP_FAKE1 MTK_M4U_ID(1, 4)
+
+/* LARB 2 -- MMSYS */
+#define IOMMU_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define IOMMU_PORT_L2_MDP_RDMA1 MTK_M4U_ID(2, 1)
+#define IOMMU_PORT_L2_MDP_WROT0 MTK_M4U_ID(2, 2)
+#define IOMMU_PORT_L2_MDP_WROT1 MTK_M4U_ID(2, 3)
+#define IOMMU_PORT_L2_DISP_FAKE0 MTK_M4U_ID(2, 4)
+
+/* LARB 4 -- VDEC */
+#define IOMMU_PORT_L4_HW_VDEC_MC_EXT MTK_M4U_ID(4, 0)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_EXT MTK_M4U_ID(4, 1)
+#define IOMMU_PORT_L4_HW_VDEC_PP_EXT MTK_M4U_ID(4, 2)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(4, 3)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(4, 4)
+#define IOMMU_PORT_L4_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(4, 5)
+#define IOMMU_PORT_L4_HW_VDEC_TILE_EXT MTK_M4U_ID(4, 6)
+#define IOMMU_PORT_L4_HW_VDEC_VLD_EXT MTK_M4U_ID(4, 7)
+#define IOMMU_PORT_L4_HW_VDEC_VLD2_EXT MTK_M4U_ID(4, 8)
+#define IOMMU_PORT_L4_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(4, 9)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(4, 10)
+#define IOMMU_PORT_L4_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(4, 11)
+#define IOMMU_PORT_L4_HW_MINI_MDP_R0_EXT MTK_M4U_ID(4, 12)
+#define IOMMU_PORT_L4_HW_MINI_MDP_W0_EXT MTK_M4U_ID(4, 13)
+
+/* LARB 7 -- VENC */
+#define IOMMU_PORT_L7_VENC_RCPU MTK_M4U_ID(7, 0)
+#define IOMMU_PORT_L7_VENC_REC MTK_M4U_ID(7, 1)
+#define IOMMU_PORT_L7_VENC_BSDMA MTK_M4U_ID(7, 2)
+#define IOMMU_PORT_L7_VENC_SV_COMV MTK_M4U_ID(7, 3)
+#define IOMMU_PORT_L7_VENC_RD_COMV MTK_M4U_ID(7, 4)
+#define IOMMU_PORT_L7_VENC_CUR_LUMA MTK_M4U_ID(7, 5)
+#define IOMMU_PORT_L7_VENC_CUR_CHROMA MTK_M4U_ID(7, 6)
+#define IOMMU_PORT_L7_VENC_REF_LUMA MTK_M4U_ID(7, 7)
+#define IOMMU_PORT_L7_VENC_REF_CHROMA MTK_M4U_ID(7, 8)
+#define IOMMU_PORT_L7_JPGENC_Y_RDMA MTK_M4U_ID(7, 9)
+#define IOMMU_PORT_L7_JPGENC_C_RDMA MTK_M4U_ID(7, 10)
+#define IOMMU_PORT_L7_JPGENC_Q_TABLE MTK_M4U_ID(7, 11)
+#define IOMMU_PORT_L7_JPGENC_BSDMA MTK_M4U_ID(7, 12)
+
+/* LARB 8 -- WPE */
+#define IOMMU_PORT_L8_WPE_RDMA_0 MTK_M4U_ID(8, 0)
+#define IOMMU_PORT_L8_WPE_RDMA_1 MTK_M4U_ID(8, 1)
+#define IOMMU_PORT_L8_WPE_WDMA_0 MTK_M4U_ID(8, 2)
+
+/* LARB 9 -- IMG-1 */
+#define IOMMU_PORT_L9_IMG_IMGI_D1 MTK_M4U_ID(9, 0)
+#define IOMMU_PORT_L9_IMG_IMGBI_D1 MTK_M4U_ID(9, 1)
+#define IOMMU_PORT_L9_IMG_DMGI_D1 MTK_M4U_ID(9, 2)
+#define IOMMU_PORT_L9_IMG_DEPI_D1 MTK_M4U_ID(9, 3)
+#define IOMMU_PORT_L9_IMG_LCE_D1 MTK_M4U_ID(9, 4)
+#define IOMMU_PORT_L9_IMG_SMTI_D1 MTK_M4U_ID(9, 5)
+#define IOMMU_PORT_L9_IMG_SMTO_D2 MTK_M4U_ID(9, 6)
+#define IOMMU_PORT_L9_IMG_SMTO_D1 MTK_M4U_ID(9, 7)
+#define IOMMU_PORT_L9_IMG_CRZO_D1 MTK_M4U_ID(9, 8)
+#define IOMMU_PORT_L9_IMG_IMG3O_D1 MTK_M4U_ID(9, 9)
+#define IOMMU_PORT_L9_IMG_VIPI_D1 MTK_M4U_ID(9, 10)
+#define IOMMU_PORT_L9_IMG_SMTI_D5 MTK_M4U_ID(9, 11)
+#define IOMMU_PORT_L9_IMG_TIMGO_D1 MTK_M4U_ID(9, 12)
+#define IOMMU_PORT_L9_IMG_UFBC_W0 MTK_M4U_ID(9, 13)
+#define IOMMU_PORT_L9_IMG_UFBC_R0 MTK_M4U_ID(9, 14)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA1 MTK_M4U_ID(9, 15)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA0 MTK_M4U_ID(9, 16)
+#define IOMMU_PORT_L9_IMG_WPE_WDMA MTK_M4U_ID(9, 17)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA0 MTK_M4U_ID(9, 18)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA1 MTK_M4U_ID(9, 19)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA2 MTK_M4U_ID(9, 20)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA3 MTK_M4U_ID(9, 21)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA4 MTK_M4U_ID(9, 22)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA5 MTK_M4U_ID(9, 23)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA0 MTK_M4U_ID(9, 24)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA1 MTK_M4U_ID(9, 25)
+#define IOMMU_PORT_L9_IMG_RESERVE6 MTK_M4U_ID(9, 26)
+#define IOMMU_PORT_L9_IMG_RESERVE7 MTK_M4U_ID(9, 27)
+#define IOMMU_PORT_L9_IMG_RESERVE8 MTK_M4U_ID(9, 28)
+
+/* LARB 11 -- IMG-2 */
+#define IOMMU_PORT_L11_IMG_IMGI_D1 MTK_M4U_ID(11, 0)
+#define IOMMU_PORT_L11_IMG_IMGBI_D1 MTK_M4U_ID(11, 1)
+#define IOMMU_PORT_L11_IMG_DMGI_D1 MTK_M4U_ID(11, 2)
+#define IOMMU_PORT_L11_IMG_DEPI_D1 MTK_M4U_ID(11, 3)
+#define IOMMU_PORT_L11_IMG_LCE_D1 MTK_M4U_ID(11, 4)
+#define IOMMU_PORT_L11_IMG_SMTI_D1 MTK_M4U_ID(11, 5)
+#define IOMMU_PORT_L11_IMG_SMTO_D2 MTK_M4U_ID(11, 6)
+#define IOMMU_PORT_L11_IMG_SMTO_D1 MTK_M4U_ID(11, 7)
+#define IOMMU_PORT_L11_IMG_CRZO_D1 MTK_M4U_ID(11, 8)
+#define IOMMU_PORT_L11_IMG_IMG3O_D1 MTK_M4U_ID(11, 9)
+#define IOMMU_PORT_L11_IMG_VIPI_D1 MTK_M4U_ID(11, 10)
+#define IOMMU_PORT_L11_IMG_SMTI_D5 MTK_M4U_ID(11, 11)
+#define IOMMU_PORT_L11_IMG_TIMGO_D1 MTK_M4U_ID(11, 12)
+#define IOMMU_PORT_L11_IMG_UFBC_W0 MTK_M4U_ID(11, 13)
+#define IOMMU_PORT_L11_IMG_UFBC_R0 MTK_M4U_ID(11, 14)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA1 MTK_M4U_ID(11, 15)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA0 MTK_M4U_ID(11, 16)
+#define IOMMU_PORT_L11_IMG_WPE_WDMA MTK_M4U_ID(11, 17)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA0 MTK_M4U_ID(11, 18)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA1 MTK_M4U_ID(11, 19)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA2 MTK_M4U_ID(11, 20)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA3 MTK_M4U_ID(11, 21)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA4 MTK_M4U_ID(11, 22)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA5 MTK_M4U_ID(11, 23)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA0 MTK_M4U_ID(11, 24)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA1 MTK_M4U_ID(11, 25)
+#define IOMMU_PORT_L11_IMG_RESERVE6 MTK_M4U_ID(11, 26)
+#define IOMMU_PORT_L11_IMG_RESERVE7 MTK_M4U_ID(11, 27)
+#define IOMMU_PORT_L11_IMG_RESERVE8 MTK_M4U_ID(11, 28)
+
+/* LARB 13 -- CAM */
+#define IOMMU_PORT_L13_CAM_MRAWI MTK_M4U_ID(13, 0)
+#define IOMMU_PORT_L13_CAM_MRAWO_0 MTK_M4U_ID(13, 1)
+#define IOMMU_PORT_L13_CAM_MRAWO_1 MTK_M4U_ID(13, 2)
+#define IOMMU_PORT_L13_CAM_CAMSV_4 MTK_M4U_ID(13, 6)
+#define IOMMU_PORT_L13_CAM_CAMSV_5 MTK_M4U_ID(13, 7)
+#define IOMMU_PORT_L13_CAM_CAMSV_6 MTK_M4U_ID(13, 8)
+#define IOMMU_PORT_L13_CAM_CCUI MTK_M4U_ID(13, 9)
+#define IOMMU_PORT_L13_CAM_CCUO MTK_M4U_ID(13, 10)
+#define IOMMU_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 11)
+
+/* LARB 14 -- CAM */
+#define IOMMU_PORT_L14_CAM_CCUI MTK_M4U_ID(14, 4)
+#define IOMMU_PORT_L14_CAM_CCUO MTK_M4U_ID(14, 5)
+
+/* LARB 16 -- RAW-A */
+#define IOMMU_PORT_L16_CAM_IMGO_R1_A MTK_M4U_ID(16, 0)
+#define IOMMU_PORT_L16_CAM_RRZO_R1_A MTK_M4U_ID(16, 1)
+#define IOMMU_PORT_L16_CAM_CQI_R1_A MTK_M4U_ID(16, 2)
+#define IOMMU_PORT_L16_CAM_BPCI_R1_A MTK_M4U_ID(16, 3)
+#define IOMMU_PORT_L16_CAM_YUVO_R1_A MTK_M4U_ID(16, 4)
+#define IOMMU_PORT_L16_CAM_UFDI_R2_A MTK_M4U_ID(16, 5)
+#define IOMMU_PORT_L16_CAM_RAWI_R2_A MTK_M4U_ID(16, 6)
+#define IOMMU_PORT_L16_CAM_RAWI_R3_A MTK_M4U_ID(16, 7)
+#define IOMMU_PORT_L16_CAM_AAO_R1_A MTK_M4U_ID(16, 8)
+#define IOMMU_PORT_L16_CAM_AFO_R1_A MTK_M4U_ID(16, 9)
+#define IOMMU_PORT_L16_CAM_FLKO_R1_A MTK_M4U_ID(16, 10)
+#define IOMMU_PORT_L16_CAM_LCESO_R1_A MTK_M4U_ID(16, 11)
+#define IOMMU_PORT_L16_CAM_CRZO_R1_A MTK_M4U_ID(16, 12)
+#define IOMMU_PORT_L16_CAM_LTMSO_R1_A MTK_M4U_ID(16, 13)
+#define IOMMU_PORT_L16_CAM_RSSO_R1_A MTK_M4U_ID(16, 14)
+#define IOMMU_PORT_L16_CAM_AAHO_R1_A MTK_M4U_ID(16, 15)
+#define IOMMU_PORT_L16_CAM_LSCI_R1_A MTK_M4U_ID(16, 16)
+
+/* LARB 17 -- RAW-B */
+#define IOMMU_PORT_L17_CAM_IMGO_R1_B MTK_M4U_ID(17, 0)
+#define IOMMU_PORT_L17_CAM_RRZO_R1_B MTK_M4U_ID(17, 1)
+#define IOMMU_PORT_L17_CAM_CQI_R1_B MTK_M4U_ID(17, 2)
+#define IOMMU_PORT_L17_CAM_BPCI_R1_B MTK_M4U_ID(17, 3)
+#define IOMMU_PORT_L17_CAM_YUVO_R1_B MTK_M4U_ID(17, 4)
+#define IOMMU_PORT_L17_CAM_UFDI_R2_B MTK_M4U_ID(17, 5)
+#define IOMMU_PORT_L17_CAM_RAWI_R2_B MTK_M4U_ID(17, 6)
+#define IOMMU_PORT_L17_CAM_RAWI_R3_B MTK_M4U_ID(17, 7)
+#define IOMMU_PORT_L17_CAM_AAO_R1_B MTK_M4U_ID(17, 8)
+#define IOMMU_PORT_L17_CAM_AFO_R1_B MTK_M4U_ID(17, 9)
+#define IOMMU_PORT_L17_CAM_FLKO_R1_B MTK_M4U_ID(17, 10)
+#define IOMMU_PORT_L17_CAM_LCESO_R1_B MTK_M4U_ID(17, 11)
+#define IOMMU_PORT_L17_CAM_CRZO_R1_B MTK_M4U_ID(17, 12)
+#define IOMMU_PORT_L17_CAM_LTMSO_R1_B MTK_M4U_ID(17, 13)
+#define IOMMU_PORT_L17_CAM_RSSO_R1_B MTK_M4U_ID(17, 14)
+#define IOMMU_PORT_L17_CAM_AAHO_R1_B MTK_M4U_ID(17, 15)
+#define IOMMU_PORT_L17_CAM_LSCI_R1_B MTK_M4U_ID(17, 16)
+
+/* LARB 19 -- IPE */
+#define IOMMU_PORT_L19_IPE_DVS_RDMA MTK_M4U_ID(19, 0)
+#define IOMMU_PORT_L19_IPE_DVS_WDMA MTK_M4U_ID(19, 1)
+#define IOMMU_PORT_L19_IPE_DVP_RDMA MTK_M4U_ID(19, 2)
+#define IOMMU_PORT_L19_IPE_DVP_WDMA MTK_M4U_ID(19, 3)
+
+/* LARB 20 -- IPE */
+#define IOMMU_PORT_L20_IPE_FDVT_RDA MTK_M4U_ID(20, 0)
+#define IOMMU_PORT_L20_IPE_FDVT_RDB MTK_M4U_ID(20, 1)
+#define IOMMU_PORT_L20_IPE_FDVT_WRA MTK_M4U_ID(20, 2)
+#define IOMMU_PORT_L20_IPE_FDVT_WRB MTK_M4U_ID(20, 3)
+#define IOMMU_PORT_L20_IPE_RSC_RDMA0 MTK_M4U_ID(20, 4)
+#define IOMMU_PORT_L20_IPE_RSC_WDMA MTK_M4U_ID(20, 5)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8192-larb-port.h b/include/dt-bindings/memory/mt8192-larb-port.h
new file mode 100644
index 000000000000..23035a52c675
--- /dev/null
+++ b/include/dt-bindings/memory/mt8192-larb-port.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ *
+ * Author: Chao Hao <chao.hao@mediatek.com>
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8192_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8192_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address.
+ *
+ * The address will preassign like this:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1
+ * vcodec 4G ~ 8G larb4/5/7
+ * cam/mdp 8G ~ 12G larb2/9/11/13/14/16/17/18/19/20
+ * CCU0 0x4000_0000 ~ 0x43ff_ffff larb13: port 9/10
+ * CCU1 0x4400_0000 ~ 0x47ff_ffff larb14: port 4/5
+ *
+ * larb3/6/8/10/12/15 is null.
+ */
+
+/* larb0 */
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(0, 0)
+#define M4U_PORT_L0_OVL_RDMA0_HDR MTK_M4U_ID(0, 1)
+#define M4U_PORT_L0_OVL_RDMA0 MTK_M4U_ID(0, 2)
+#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_ID(0, 3)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(0, 4)
+#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 5)
+
+/* larb1 */
+#define M4U_PORT_L1_OVL_2L_RDMA0_HDR MTK_M4U_ID(1, 0)
+#define M4U_PORT_L1_OVL_2L_RDMA2_HDR MTK_M4U_ID(1, 1)
+#define M4U_PORT_L1_OVL_2L_RDMA0 MTK_M4U_ID(1, 2)
+#define M4U_PORT_L1_OVL_2L_RDMA2 MTK_M4U_ID(1, 3)
+#define M4U_PORT_L1_DISP_MDP_RDMA4 MTK_M4U_ID(1, 4)
+#define M4U_PORT_L1_DISP_RDMA4 MTK_M4U_ID(1, 5)
+#define M4U_PORT_L1_DISP_UFBC_WDMA0 MTK_M4U_ID(1, 6)
+#define M4U_PORT_L1_DISP_FAKE1 MTK_M4U_ID(1, 7)
+
+/* larb2 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define M4U_PORT_L2_MDP_RDMA1 MTK_M4U_ID(2, 1)
+#define M4U_PORT_L2_MDP_WROT0 MTK_M4U_ID(2, 2)
+#define M4U_PORT_L2_MDP_WROT1 MTK_M4U_ID(2, 3)
+#define M4U_PORT_L2_MDP_DISP_FAKE0 MTK_M4U_ID(2, 4)
+
+/* larb3: null */
+
+/* larb4 */
+#define M4U_PORT_L4_VDEC_MC_EXT MTK_M4U_ID(4, 0)
+#define M4U_PORT_L4_VDEC_UFO_EXT MTK_M4U_ID(4, 1)
+#define M4U_PORT_L4_VDEC_PP_EXT MTK_M4U_ID(4, 2)
+#define M4U_PORT_L4_VDEC_PRED_RD_EXT MTK_M4U_ID(4, 3)
+#define M4U_PORT_L4_VDEC_PRED_WR_EXT MTK_M4U_ID(4, 4)
+#define M4U_PORT_L4_VDEC_PPWRAP_EXT MTK_M4U_ID(4, 5)
+#define M4U_PORT_L4_VDEC_TILE_EXT MTK_M4U_ID(4, 6)
+#define M4U_PORT_L4_VDEC_VLD_EXT MTK_M4U_ID(4, 7)
+#define M4U_PORT_L4_VDEC_VLD2_EXT MTK_M4U_ID(4, 8)
+#define M4U_PORT_L4_VDEC_AVC_MV_EXT MTK_M4U_ID(4, 9)
+#define M4U_PORT_L4_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(4, 10)
+
+/* larb5 */
+#define M4U_PORT_L5_VDEC_LAT0_VLD_EXT MTK_M4U_ID(5, 0)
+#define M4U_PORT_L5_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(5, 1)
+#define M4U_PORT_L5_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(5, 2)
+#define M4U_PORT_L5_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(5, 3)
+#define M4U_PORT_L5_VDEC_LAT0_TILE_EXT MTK_M4U_ID(5, 4)
+#define M4U_PORT_L5_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(5, 5)
+#define M4U_PORT_L5_VDEC_LAT0_RG_CTRL_DMA_EXT MTK_M4U_ID(5, 6)
+#define M4U_PORT_L5_VDEC_UFO_ENC_EXT MTK_M4U_ID(5, 7)
+
+/* larb6: null */
+
+/* larb7 */
+#define M4U_PORT_L7_VENC_RCPU MTK_M4U_ID(7, 0)
+#define M4U_PORT_L7_VENC_REC MTK_M4U_ID(7, 1)
+#define M4U_PORT_L7_VENC_BSDMA MTK_M4U_ID(7, 2)
+#define M4U_PORT_L7_VENC_SV_COMV MTK_M4U_ID(7, 3)
+#define M4U_PORT_L7_VENC_RD_COMV MTK_M4U_ID(7, 4)
+#define M4U_PORT_L7_VENC_CUR_LUMA MTK_M4U_ID(7, 5)
+#define M4U_PORT_L7_VENC_CUR_CHROMA MTK_M4U_ID(7, 6)
+#define M4U_PORT_L7_VENC_REF_LUMA MTK_M4U_ID(7, 7)
+#define M4U_PORT_L7_VENC_REF_CHROMA MTK_M4U_ID(7, 8)
+#define M4U_PORT_L7_JPGENC_Y_RDMA MTK_M4U_ID(7, 9)
+#define M4U_PORT_L7_JPGENC_Q_RDMA MTK_M4U_ID(7, 10)
+#define M4U_PORT_L7_JPGENC_C_TABLE MTK_M4U_ID(7, 11)
+#define M4U_PORT_L7_JPGENC_BSDMA MTK_M4U_ID(7, 12)
+#define M4U_PORT_L7_VENC_SUB_R_LUMA MTK_M4U_ID(7, 13)
+#define M4U_PORT_L7_VENC_SUB_W_LUMA MTK_M4U_ID(7, 14)
+
+/* larb8: null */
+
+/* larb9 */
+#define M4U_PORT_L9_IMG_IMGI_D1 MTK_M4U_ID(9, 0)
+#define M4U_PORT_L9_IMG_IMGBI_D1 MTK_M4U_ID(9, 1)
+#define M4U_PORT_L9_IMG_DMGI_D1 MTK_M4U_ID(9, 2)
+#define M4U_PORT_L9_IMG_DEPI_D1 MTK_M4U_ID(9, 3)
+#define M4U_PORT_L9_IMG_ICE_D1 MTK_M4U_ID(9, 4)
+#define M4U_PORT_L9_IMG_SMTI_D1 MTK_M4U_ID(9, 5)
+#define M4U_PORT_L9_IMG_SMTO_D2 MTK_M4U_ID(9, 6)
+#define M4U_PORT_L9_IMG_SMTO_D1 MTK_M4U_ID(9, 7)
+#define M4U_PORT_L9_IMG_CRZO_D1 MTK_M4U_ID(9, 8)
+#define M4U_PORT_L9_IMG_IMG3O_D1 MTK_M4U_ID(9, 9)
+#define M4U_PORT_L9_IMG_VIPI_D1 MTK_M4U_ID(9, 10)
+#define M4U_PORT_L9_IMG_SMTI_D5 MTK_M4U_ID(9, 11)
+#define M4U_PORT_L9_IMG_TIMGO_D1 MTK_M4U_ID(9, 12)
+#define M4U_PORT_L9_IMG_UFBC_W0 MTK_M4U_ID(9, 13)
+#define M4U_PORT_L9_IMG_UFBC_R0 MTK_M4U_ID(9, 14)
+
+/* larb10: null */
+
+/* larb11 */
+#define M4U_PORT_L11_IMG_IMGI_D1 MTK_M4U_ID(11, 0)
+#define M4U_PORT_L11_IMG_IMGBI_D1 MTK_M4U_ID(11, 1)
+#define M4U_PORT_L11_IMG_DMGI_D1 MTK_M4U_ID(11, 2)
+#define M4U_PORT_L11_IMG_DEPI_D1 MTK_M4U_ID(11, 3)
+#define M4U_PORT_L11_IMG_ICE_D1 MTK_M4U_ID(11, 4)
+#define M4U_PORT_L11_IMG_SMTI_D1 MTK_M4U_ID(11, 5)
+#define M4U_PORT_L11_IMG_SMTO_D2 MTK_M4U_ID(11, 6)
+#define M4U_PORT_L11_IMG_SMTO_D1 MTK_M4U_ID(11, 7)
+#define M4U_PORT_L11_IMG_CRZO_D1 MTK_M4U_ID(11, 8)
+#define M4U_PORT_L11_IMG_IMG3O_D1 MTK_M4U_ID(11, 9)
+#define M4U_PORT_L11_IMG_VIPI_D1 MTK_M4U_ID(11, 10)
+#define M4U_PORT_L11_IMG_SMTI_D5 MTK_M4U_ID(11, 11)
+#define M4U_PORT_L11_IMG_TIMGO_D1 MTK_M4U_ID(11, 12)
+#define M4U_PORT_L11_IMG_UFBC_W0 MTK_M4U_ID(11, 13)
+#define M4U_PORT_L11_IMG_UFBC_R0 MTK_M4U_ID(11, 14)
+#define M4U_PORT_L11_IMG_WPE_RDMA1 MTK_M4U_ID(11, 15)
+#define M4U_PORT_L11_IMG_WPE_RDMA0 MTK_M4U_ID(11, 16)
+#define M4U_PORT_L11_IMG_WPE_WDMA MTK_M4U_ID(11, 17)
+#define M4U_PORT_L11_IMG_MFB_RDMA0 MTK_M4U_ID(11, 18)
+#define M4U_PORT_L11_IMG_MFB_RDMA1 MTK_M4U_ID(11, 19)
+#define M4U_PORT_L11_IMG_MFB_RDMA2 MTK_M4U_ID(11, 20)
+#define M4U_PORT_L11_IMG_MFB_RDMA3 MTK_M4U_ID(11, 21)
+#define M4U_PORT_L11_IMG_MFB_RDMA4 MTK_M4U_ID(11, 22)
+#define M4U_PORT_L11_IMG_MFB_RDMA5 MTK_M4U_ID(11, 23)
+#define M4U_PORT_L11_IMG_MFB_WDMA0 MTK_M4U_ID(11, 24)
+#define M4U_PORT_L11_IMG_MFB_WDMA1 MTK_M4U_ID(11, 25)
+
+/* larb12: null */
+
+/* larb13 */
+#define M4U_PORT_L13_CAM_MRAWI MTK_M4U_ID(13, 0)
+#define M4U_PORT_L13_CAM_MRAWO0 MTK_M4U_ID(13, 1)
+#define M4U_PORT_L13_CAM_MRAWO1 MTK_M4U_ID(13, 2)
+#define M4U_PORT_L13_CAM_CAMSV1 MTK_M4U_ID(13, 3)
+#define M4U_PORT_L13_CAM_CAMSV2 MTK_M4U_ID(13, 4)
+#define M4U_PORT_L13_CAM_CAMSV3 MTK_M4U_ID(13, 5)
+#define M4U_PORT_L13_CAM_CAMSV4 MTK_M4U_ID(13, 6)
+#define M4U_PORT_L13_CAM_CAMSV5 MTK_M4U_ID(13, 7)
+#define M4U_PORT_L13_CAM_CAMSV6 MTK_M4U_ID(13, 8)
+#define M4U_PORT_L13_CAM_CCUI MTK_M4U_ID(13, 9)
+#define M4U_PORT_L13_CAM_CCUO MTK_M4U_ID(13, 10)
+#define M4U_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 11)
+
+/* larb14 */
+#define M4U_PORT_L14_CAM_RESERVE1 MTK_M4U_ID(14, 0)
+#define M4U_PORT_L14_CAM_RESERVE2 MTK_M4U_ID(14, 1)
+#define M4U_PORT_L14_CAM_RESERVE3 MTK_M4U_ID(14, 2)
+#define M4U_PORT_L14_CAM_CAMSV0 MTK_M4U_ID(14, 3)
+#define M4U_PORT_L14_CAM_CCUI MTK_M4U_ID(14, 4)
+#define M4U_PORT_L14_CAM_CCUO MTK_M4U_ID(14, 5)
+
+/* larb15: null */
+
+/* larb16 */
+#define M4U_PORT_L16_CAM_IMGO_R1_A MTK_M4U_ID(16, 0)
+#define M4U_PORT_L16_CAM_RRZO_R1_A MTK_M4U_ID(16, 1)
+#define M4U_PORT_L16_CAM_CQI_R1_A MTK_M4U_ID(16, 2)
+#define M4U_PORT_L16_CAM_BPCI_R1_A MTK_M4U_ID(16, 3)
+#define M4U_PORT_L16_CAM_YUVO_R1_A MTK_M4U_ID(16, 4)
+#define M4U_PORT_L16_CAM_UFDI_R2_A MTK_M4U_ID(16, 5)
+#define M4U_PORT_L16_CAM_RAWI_R2_A MTK_M4U_ID(16, 6)
+#define M4U_PORT_L16_CAM_RAWI_R3_A MTK_M4U_ID(16, 7)
+#define M4U_PORT_L16_CAM_AAO_R1_A MTK_M4U_ID(16, 8)
+#define M4U_PORT_L16_CAM_AFO_R1_A MTK_M4U_ID(16, 9)
+#define M4U_PORT_L16_CAM_FLKO_R1_A MTK_M4U_ID(16, 10)
+#define M4U_PORT_L16_CAM_LCESO_R1_A MTK_M4U_ID(16, 11)
+#define M4U_PORT_L16_CAM_CRZO_R1_A MTK_M4U_ID(16, 12)
+#define M4U_PORT_L16_CAM_LTMSO_R1_A MTK_M4U_ID(16, 13)
+#define M4U_PORT_L16_CAM_RSSO_R1_A MTK_M4U_ID(16, 14)
+#define M4U_PORT_L16_CAM_AAHO_R1_A MTK_M4U_ID(16, 15)
+#define M4U_PORT_L16_CAM_LSCI_R1_A MTK_M4U_ID(16, 16)
+
+/* larb17 */
+#define M4U_PORT_L17_CAM_IMGO_R1_B MTK_M4U_ID(17, 0)
+#define M4U_PORT_L17_CAM_RRZO_R1_B MTK_M4U_ID(17, 1)
+#define M4U_PORT_L17_CAM_CQI_R1_B MTK_M4U_ID(17, 2)
+#define M4U_PORT_L17_CAM_BPCI_R1_B MTK_M4U_ID(17, 3)
+#define M4U_PORT_L17_CAM_YUVO_R1_B MTK_M4U_ID(17, 4)
+#define M4U_PORT_L17_CAM_UFDI_R2_B MTK_M4U_ID(17, 5)
+#define M4U_PORT_L17_CAM_RAWI_R2_B MTK_M4U_ID(17, 6)
+#define M4U_PORT_L17_CAM_RAWI_R3_B MTK_M4U_ID(17, 7)
+#define M4U_PORT_L17_CAM_AAO_R1_B MTK_M4U_ID(17, 8)
+#define M4U_PORT_L17_CAM_AFO_R1_B MTK_M4U_ID(17, 9)
+#define M4U_PORT_L17_CAM_FLKO_R1_B MTK_M4U_ID(17, 10)
+#define M4U_PORT_L17_CAM_LCESO_R1_B MTK_M4U_ID(17, 11)
+#define M4U_PORT_L17_CAM_CRZO_R1_B MTK_M4U_ID(17, 12)
+#define M4U_PORT_L17_CAM_LTMSO_R1_B MTK_M4U_ID(17, 13)
+#define M4U_PORT_L17_CAM_RSSO_R1_B MTK_M4U_ID(17, 14)
+#define M4U_PORT_L17_CAM_AAHO_R1_B MTK_M4U_ID(17, 15)
+#define M4U_PORT_L17_CAM_LSCI_R1_B MTK_M4U_ID(17, 16)
+
+/* larb18 */
+#define M4U_PORT_L18_CAM_IMGO_R1_C MTK_M4U_ID(18, 0)
+#define M4U_PORT_L18_CAM_RRZO_R1_C MTK_M4U_ID(18, 1)
+#define M4U_PORT_L18_CAM_CQI_R1_C MTK_M4U_ID(18, 2)
+#define M4U_PORT_L18_CAM_BPCI_R1_C MTK_M4U_ID(18, 3)
+#define M4U_PORT_L18_CAM_YUVO_R1_C MTK_M4U_ID(18, 4)
+#define M4U_PORT_L18_CAM_UFDI_R2_C MTK_M4U_ID(18, 5)
+#define M4U_PORT_L18_CAM_RAWI_R2_C MTK_M4U_ID(18, 6)
+#define M4U_PORT_L18_CAM_RAWI_R3_C MTK_M4U_ID(18, 7)
+#define M4U_PORT_L18_CAM_AAO_R1_C MTK_M4U_ID(18, 8)
+#define M4U_PORT_L18_CAM_AFO_R1_C MTK_M4U_ID(18, 9)
+#define M4U_PORT_L18_CAM_FLKO_R1_C MTK_M4U_ID(18, 10)
+#define M4U_PORT_L18_CAM_LCESO_R1_C MTK_M4U_ID(18, 11)
+#define M4U_PORT_L18_CAM_CRZO_R1_C MTK_M4U_ID(18, 12)
+#define M4U_PORT_L18_CAM_LTMSO_R1_C MTK_M4U_ID(18, 13)
+#define M4U_PORT_L18_CAM_RSSO_R1_C MTK_M4U_ID(18, 14)
+#define M4U_PORT_L18_CAM_AAHO_R1_C MTK_M4U_ID(18, 15)
+#define M4U_PORT_L18_CAM_LSCI_R1_C MTK_M4U_ID(18, 16)
+
+/* larb19 */
+#define M4U_PORT_L19_IPE_DVS_RDMA MTK_M4U_ID(19, 0)
+#define M4U_PORT_L19_IPE_DVS_WDMA MTK_M4U_ID(19, 1)
+#define M4U_PORT_L19_IPE_DVP_RDMA MTK_M4U_ID(19, 2)
+#define M4U_PORT_L19_IPE_DVP_WDMA MTK_M4U_ID(19, 3)
+
+/* larb20 */
+#define M4U_PORT_L20_IPE_FDVT_RDA MTK_M4U_ID(20, 0)
+#define M4U_PORT_L20_IPE_FDVT_RDB MTK_M4U_ID(20, 1)
+#define M4U_PORT_L20_IPE_FDVT_WRA MTK_M4U_ID(20, 2)
+#define M4U_PORT_L20_IPE_FDVT_WRB MTK_M4U_ID(20, 3)
+#define M4U_PORT_L20_IPE_RSC_RDMA0 MTK_M4U_ID(20, 4)
+#define M4U_PORT_L20_IPE_RSC_WDMA MTK_M4U_ID(20, 5)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8195-memory-port.h b/include/dt-bindings/memory/mt8195-memory-port.h
new file mode 100644
index 000000000000..70ba9f498eeb
--- /dev/null
+++ b/include/dt-bindings/memory/mt8195-memory-port.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19/20/21/22/23/24
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb18: port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb18: port 2/3
+ *
+ * This SoC have two IOMMU HWs, this is the detailed connected information:
+ * iommu-vdo: larb0/2/5/7/9/10/11/13/17/19/21/24/25/28
+ * iommu-vpp: larb1/3/4/6/8/12/14/16/18/20/22/23/26/27
+ */
+
+/* MM IOMMU ports */
+/* larb0 */
+#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_ID(0, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(0, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(0, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(0, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(0, 4)
+#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 5)
+
+/* larb1 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 0)
+#define M4U_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 1)
+#define M4U_PORT_L1_DISP_OVL0_RDMA0 MTK_M4U_ID(1, 2)
+#define M4U_PORT_L1_DISP_OVL0_RDMA1 MTK_M4U_ID(1, 3)
+#define M4U_PORT_L1_DISP_OVL0_HDR MTK_M4U_ID(1, 4)
+#define M4U_PORT_L1_DISP_FAKE0 MTK_M4U_ID(1, 5)
+
+/* larb2 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(2, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(2, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(2, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(2, 4)
+
+/* larb3 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(3, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(3, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(3, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(3, 3)
+#define M4U_PORT_L3_HDR_DS MTK_M4U_ID(3, 4)
+#define M4U_PORT_L3_HDR_ADL MTK_M4U_ID(3, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(3, 6)
+
+/* larb4 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(4, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(4, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(4, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(4, 3)
+#define M4U_PORT_L4_FAKE MTK_M4U_ID(4, 4)
+
+/* larb5 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(5, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(5, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(5, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(5, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(5, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(5, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(5, 6)
+#define M4U_PORT_L5_FAKE MTK_M4U_ID(5, 7)
+
+/* larb6 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(6, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(6, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(6, 2)
+#define M4U_PORT_L6_FAKE MTK_M4U_ID(6, 3)
+
+/* larb7 */
+#define M4U_PORT_L7_IMG_WPE_RDMA0 MTK_M4U_ID(7, 0)
+#define M4U_PORT_L7_IMG_WPE_RDMA1 MTK_M4U_ID(7, 1)
+#define M4U_PORT_L7_IMG_WPE_WDMA0 MTK_M4U_ID(7, 2)
+
+/* larb8 */
+#define M4U_PORT_L8_IMG_WPE_RDMA0 MTK_M4U_ID(8, 0)
+#define M4U_PORT_L8_IMG_WPE_RDMA1 MTK_M4U_ID(8, 1)
+#define M4U_PORT_L8_IMG_WPE_WDMA0 MTK_M4U_ID(8, 2)
+
+/* larb9 */
+#define M4U_PORT_L9_IMG_IMGI_T1_A MTK_M4U_ID(9, 0)
+#define M4U_PORT_L9_IMG_IMGBI_T1_A MTK_M4U_ID(9, 1)
+#define M4U_PORT_L9_IMG_IMGCI_T1_A MTK_M4U_ID(9, 2)
+#define M4U_PORT_L9_IMG_SMTI_T1_A MTK_M4U_ID(9, 3)
+#define M4U_PORT_L9_IMG_TNCSTI_T1_A MTK_M4U_ID(9, 4)
+#define M4U_PORT_L9_IMG_TNCSTI_T4_A MTK_M4U_ID(9, 5)
+#define M4U_PORT_L9_IMG_YUVO_T1_A MTK_M4U_ID(9, 6)
+#define M4U_PORT_L9_IMG_TIMGO_T1_A MTK_M4U_ID(9, 7)
+#define M4U_PORT_L9_IMG_YUVO_T2_A MTK_M4U_ID(9, 8)
+#define M4U_PORT_L9_IMG_IMGI_T1_B MTK_M4U_ID(9, 9)
+#define M4U_PORT_L9_IMG_IMGBI_T1_B MTK_M4U_ID(9, 10)
+#define M4U_PORT_L9_IMG_IMGCI_T1_B MTK_M4U_ID(9, 11)
+#define M4U_PORT_L9_IMG_YUVO_T5_A MTK_M4U_ID(9, 12)
+#define M4U_PORT_L9_IMG_SMTI_T1_B MTK_M4U_ID(9, 13)
+#define M4U_PORT_L9_IMG_TNCSO_T1_A MTK_M4U_ID(9, 14)
+#define M4U_PORT_L9_IMG_SMTO_T1_A MTK_M4U_ID(9, 15)
+#define M4U_PORT_L9_IMG_TNCSTO_T1_A MTK_M4U_ID(9, 16)
+#define M4U_PORT_L9_IMG_YUVO_T2_B MTK_M4U_ID(9, 17)
+#define M4U_PORT_L9_IMG_YUVO_T5_B MTK_M4U_ID(9, 18)
+#define M4U_PORT_L9_IMG_SMTO_T1_B MTK_M4U_ID(9, 19)
+
+/* larb10 */
+#define M4U_PORT_L10_IMG_IMGI_D1_A MTK_M4U_ID(10, 0)
+#define M4U_PORT_L10_IMG_IMGCI_D1_A MTK_M4U_ID(10, 1)
+#define M4U_PORT_L10_IMG_DEPI_D1_A MTK_M4U_ID(10, 2)
+#define M4U_PORT_L10_IMG_DMGI_D1_A MTK_M4U_ID(10, 3)
+#define M4U_PORT_L10_IMG_VIPI_D1_A MTK_M4U_ID(10, 4)
+#define M4U_PORT_L10_IMG_TNRWI_D1_A MTK_M4U_ID(10, 5)
+#define M4U_PORT_L10_IMG_RECI_D1_A MTK_M4U_ID(10, 6)
+#define M4U_PORT_L10_IMG_SMTI_D1_A MTK_M4U_ID(10, 7)
+#define M4U_PORT_L10_IMG_SMTI_D6_A MTK_M4U_ID(10, 8)
+#define M4U_PORT_L10_IMG_PIMGI_P1_A MTK_M4U_ID(10, 9)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_A MTK_M4U_ID(10, 10)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_A MTK_M4U_ID(10, 11)
+#define M4U_PORT_L10_IMG_PIMGI_P1_B MTK_M4U_ID(10, 12)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_B MTK_M4U_ID(10, 13)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_B MTK_M4U_ID(10, 14)
+#define M4U_PORT_L10_IMG_IMG3O_D1_A MTK_M4U_ID(10, 15)
+#define M4U_PORT_L10_IMG_IMG4O_D1_A MTK_M4U_ID(10, 16)
+#define M4U_PORT_L10_IMG_IMG3CO_D1_A MTK_M4U_ID(10, 17)
+#define M4U_PORT_L10_IMG_FEO_D1_A MTK_M4U_ID(10, 18)
+#define M4U_PORT_L10_IMG_IMG2O_D1_A MTK_M4U_ID(10, 19)
+#define M4U_PORT_L10_IMG_TNRWO_D1_A MTK_M4U_ID(10, 20)
+#define M4U_PORT_L10_IMG_SMTO_D1_A MTK_M4U_ID(10, 21)
+#define M4U_PORT_L10_IMG_WROT_P1_A MTK_M4U_ID(10, 22)
+#define M4U_PORT_L10_IMG_WROT_P1_B MTK_M4U_ID(10, 23)
+
+/* larb11 */
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA0_A MTK_M4U_ID(11, 0)
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA1_A MTK_M4U_ID(11, 1)
+#define M4U_PORT_L11_IMG_WPE_EIS_WDMA0_A MTK_M4U_ID(11, 2)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA0_A MTK_M4U_ID(11, 3)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA1_A MTK_M4U_ID(11, 4)
+#define M4U_PORT_L11_IMG_WPE_TNR_WDMA0_A MTK_M4U_ID(11, 5)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ0_A MTK_M4U_ID(11, 6)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ1_A MTK_M4U_ID(11, 7)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ0_A MTK_M4U_ID(11, 8)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ1_A MTK_M4U_ID(11, 9)
+
+/* larb12 */
+#define M4U_PORT_L12_IMG_FDVT_RDA MTK_M4U_ID(12, 0)
+#define M4U_PORT_L12_IMG_FDVT_RDB MTK_M4U_ID(12, 1)
+#define M4U_PORT_L12_IMG_FDVT_WRA MTK_M4U_ID(12, 2)
+#define M4U_PORT_L12_IMG_FDVT_WRB MTK_M4U_ID(12, 3)
+#define M4U_PORT_L12_IMG_ME_RDMA MTK_M4U_ID(12, 4)
+#define M4U_PORT_L12_IMG_ME_WDMA MTK_M4U_ID(12, 5)
+#define M4U_PORT_L12_IMG_DVS_RDMA MTK_M4U_ID(12, 6)
+#define M4U_PORT_L12_IMG_DVS_WDMA MTK_M4U_ID(12, 7)
+#define M4U_PORT_L12_IMG_DVP_RDMA MTK_M4U_ID(12, 8)
+#define M4U_PORT_L12_IMG_DVP_WDMA MTK_M4U_ID(12, 9)
+
+/* larb13 */
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E1 MTK_M4U_ID(13, 0)
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E2 MTK_M4U_ID(13, 1)
+#define M4U_PORT_L13_CAM_GCAMSV_A_IMGO_0 MTK_M4U_ID(13, 2)
+#define M4U_PORT_L13_CAM_SCAMSV_A_IMGO_0 MTK_M4U_ID(13, 3)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(13, 4)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(13, 5)
+#define M4U_PORT_L13_CAM_GCAMSV_A_UFEO_0 MTK_M4U_ID(13, 6)
+#define M4U_PORT_L13_CAM_GCAMSV_B_UFEO_0 MTK_M4U_ID(13, 7)
+#define M4U_PORT_L13_CAM_PDAI_0 MTK_M4U_ID(13, 8)
+#define M4U_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 9)
+
+/* larb14 */
+#define M4U_PORT_L14_CAM_GCAMSV_A_IMGO_1 MTK_M4U_ID(14, 0)
+#define M4U_PORT_L14_CAM_SCAMSV_A_IMGO_1 MTK_M4U_ID(14, 1)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(14, 2)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(14, 3)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_0 MTK_M4U_ID(14, 4)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_1 MTK_M4U_ID(14, 5)
+#define M4U_PORT_L14_CAM_IPUI MTK_M4U_ID(14, 6)
+#define M4U_PORT_L14_CAM_IPU2I MTK_M4U_ID(14, 7)
+#define M4U_PORT_L14_CAM_IPUO MTK_M4U_ID(14, 8)
+#define M4U_PORT_L14_CAM_IPU2O MTK_M4U_ID(14, 9)
+#define M4U_PORT_L14_CAM_IPU3O MTK_M4U_ID(14, 10)
+#define M4U_PORT_L14_CAM_GCAMSV_A_UFEO_1 MTK_M4U_ID(14, 11)
+#define M4U_PORT_L14_CAM_GCAMSV_B_UFEO_1 MTK_M4U_ID(14, 12)
+#define M4U_PORT_L14_CAM_PDAI_1 MTK_M4U_ID(14, 13)
+#define M4U_PORT_L14_CAM_PDAO MTK_M4U_ID(14, 14)
+
+/* larb15: null */
+
+/* larb16 */
+#define M4U_PORT_L16_CAM_IMGO_R1 MTK_M4U_ID(16, 0)
+#define M4U_PORT_L16_CAM_CQI_R1 MTK_M4U_ID(16, 1)
+#define M4U_PORT_L16_CAM_CQI_R2 MTK_M4U_ID(16, 2)
+#define M4U_PORT_L16_CAM_BPCI_R1 MTK_M4U_ID(16, 3)
+#define M4U_PORT_L16_CAM_LSCI_R1 MTK_M4U_ID(16, 4)
+#define M4U_PORT_L16_CAM_RAWI_R2 MTK_M4U_ID(16, 5)
+#define M4U_PORT_L16_CAM_RAWI_R3 MTK_M4U_ID(16, 6)
+#define M4U_PORT_L16_CAM_UFDI_R2 MTK_M4U_ID(16, 7)
+#define M4U_PORT_L16_CAM_UFDI_R3 MTK_M4U_ID(16, 8)
+#define M4U_PORT_L16_CAM_RAWI_R4 MTK_M4U_ID(16, 9)
+#define M4U_PORT_L16_CAM_RAWI_R5 MTK_M4U_ID(16, 10)
+#define M4U_PORT_L16_CAM_AAI_R1 MTK_M4U_ID(16, 11)
+#define M4U_PORT_L16_CAM_FHO_R1 MTK_M4U_ID(16, 12)
+#define M4U_PORT_L16_CAM_AAO_R1 MTK_M4U_ID(16, 13)
+#define M4U_PORT_L16_CAM_TSFSO_R1 MTK_M4U_ID(16, 14)
+#define M4U_PORT_L16_CAM_FLKO_R1 MTK_M4U_ID(16, 15)
+
+/* larb17 */
+#define M4U_PORT_L17_CAM_YUVO_R1 MTK_M4U_ID(17, 0)
+#define M4U_PORT_L17_CAM_YUVO_R3 MTK_M4U_ID(17, 1)
+#define M4U_PORT_L17_CAM_YUVCO_R1 MTK_M4U_ID(17, 2)
+#define M4U_PORT_L17_CAM_YUVO_R2 MTK_M4U_ID(17, 3)
+#define M4U_PORT_L17_CAM_RZH1N2TO_R1 MTK_M4U_ID(17, 4)
+#define M4U_PORT_L17_CAM_DRZS4NO_R1 MTK_M4U_ID(17, 5)
+#define M4U_PORT_L17_CAM_TNCSO_R1 MTK_M4U_ID(17, 6)
+
+/* larb18 */
+#define M4U_PORT_L18_CAM_CCUI MTK_M4U_ID(18, 0)
+#define M4U_PORT_L18_CAM_CCUO MTK_M4U_ID(18, 1)
+#define M4U_PORT_L18_CAM_CCUI2 MTK_M4U_ID(18, 2)
+#define M4U_PORT_L18_CAM_CCUO2 MTK_M4U_ID(18, 3)
+
+/* larb19 */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(19, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(19, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(19, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(19, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(19, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(19, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(19, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(19, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(19, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(19, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(19, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(19, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(19, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA0 MTK_M4U_ID(19, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA0 MTK_M4U_ID(19, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(19, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(19, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(19, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA1 MTK_M4U_ID(19, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA1 MTK_M4U_ID(19, 19)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(19, 20)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(19, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(19, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(19, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(19, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(19, 25)
+#define M4U_PORT_L19_VENC_SUB_R_CHROMA MTK_M4U_ID(19, 26)
+
+/* larb20 */
+#define M4U_PORT_L20_VENC_RCPU MTK_M4U_ID(20, 0)
+#define M4U_PORT_L20_VENC_REC MTK_M4U_ID(20, 1)
+#define M4U_PORT_L20_VENC_BSDMA MTK_M4U_ID(20, 2)
+#define M4U_PORT_L20_VENC_SV_COMV MTK_M4U_ID(20, 3)
+#define M4U_PORT_L20_VENC_RD_COMV MTK_M4U_ID(20, 4)
+#define M4U_PORT_L20_VENC_NBM_RDMA MTK_M4U_ID(20, 5)
+#define M4U_PORT_L20_VENC_NBM_RDMA_LITE MTK_M4U_ID(20, 6)
+#define M4U_PORT_L20_JPGENC_Y_RDMA MTK_M4U_ID(20, 7)
+#define M4U_PORT_L20_JPGENC_C_RDMA MTK_M4U_ID(20, 8)
+#define M4U_PORT_L20_JPGENC_Q_TABLE MTK_M4U_ID(20, 9)
+#define M4U_PORT_L20_VENC_SUB_W_LUMA MTK_M4U_ID(20, 10)
+#define M4U_PORT_L20_VENC_FCS_NBM_RDMA MTK_M4U_ID(20, 11)
+#define M4U_PORT_L20_JPGENC_BSDMA MTK_M4U_ID(20, 12)
+#define M4U_PORT_L20_JPGDEC_WDMA0 MTK_M4U_ID(20, 13)
+#define M4U_PORT_L20_JPGDEC_BSDMA0 MTK_M4U_ID(20, 14)
+#define M4U_PORT_L20_VENC_NBM_WDMA MTK_M4U_ID(20, 15)
+#define M4U_PORT_L20_VENC_NBM_WDMA_LITE MTK_M4U_ID(20, 16)
+#define M4U_PORT_L20_VENC_FCS_NBM_WDMA MTK_M4U_ID(20, 17)
+#define M4U_PORT_L20_JPGDEC_WDMA1 MTK_M4U_ID(20, 18)
+#define M4U_PORT_L20_JPGDEC_BSDMA1 MTK_M4U_ID(20, 19)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(20, 20)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(20, 21)
+#define M4U_PORT_L20_VENC_CUR_LUMA MTK_M4U_ID(20, 22)
+#define M4U_PORT_L20_VENC_CUR_CHROMA MTK_M4U_ID(20, 23)
+#define M4U_PORT_L20_VENC_REF_LUMA MTK_M4U_ID(20, 24)
+#define M4U_PORT_L20_VENC_REF_CHROMA MTK_M4U_ID(20, 25)
+#define M4U_PORT_L20_VENC_SUB_R_CHROMA MTK_M4U_ID(20, 26)
+
+/* larb21 */
+#define M4U_PORT_L21_VDEC_MC_EXT MTK_M4U_ID(21, 0)
+#define M4U_PORT_L21_VDEC_UFO_EXT MTK_M4U_ID(21, 1)
+#define M4U_PORT_L21_VDEC_PP_EXT MTK_M4U_ID(21, 2)
+#define M4U_PORT_L21_VDEC_PRED_RD_EXT MTK_M4U_ID(21, 3)
+#define M4U_PORT_L21_VDEC_PRED_WR_EXT MTK_M4U_ID(21, 4)
+#define M4U_PORT_L21_VDEC_PPWRAP_EXT MTK_M4U_ID(21, 5)
+#define M4U_PORT_L21_VDEC_TILE_EXT MTK_M4U_ID(21, 6)
+#define M4U_PORT_L21_VDEC_VLD_EXT MTK_M4U_ID(21, 7)
+#define M4U_PORT_L21_VDEC_VLD2_EXT MTK_M4U_ID(21, 8)
+#define M4U_PORT_L21_VDEC_AVC_MV_EXT MTK_M4U_ID(21, 9)
+
+/* larb22 */
+#define M4U_PORT_L22_VDEC_MC_EXT MTK_M4U_ID(22, 0)
+#define M4U_PORT_L22_VDEC_UFO_EXT MTK_M4U_ID(22, 1)
+#define M4U_PORT_L22_VDEC_PP_EXT MTK_M4U_ID(22, 2)
+#define M4U_PORT_L22_VDEC_PRED_RD_EXT MTK_M4U_ID(22, 3)
+#define M4U_PORT_L22_VDEC_PRED_WR_EXT MTK_M4U_ID(22, 4)
+#define M4U_PORT_L22_VDEC_PPWRAP_EXT MTK_M4U_ID(22, 5)
+#define M4U_PORT_L22_VDEC_TILE_EXT MTK_M4U_ID(22, 6)
+#define M4U_PORT_L22_VDEC_VLD_EXT MTK_M4U_ID(22, 7)
+#define M4U_PORT_L22_VDEC_VLD2_EXT MTK_M4U_ID(22, 8)
+#define M4U_PORT_L22_VDEC_AVC_MV_EXT MTK_M4U_ID(22, 9)
+
+/* larb23 */
+#define M4U_PORT_L23_VDEC_UFO_ENC_EXT MTK_M4U_ID(23, 0)
+#define M4U_PORT_L23_VDEC_RDMA_EXT MTK_M4U_ID(23, 1)
+
+/* larb24 */
+#define M4U_PORT_L24_VDEC_LAT0_VLD_EXT MTK_M4U_ID(24, 0)
+#define M4U_PORT_L24_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(24, 1)
+#define M4U_PORT_L24_VDEC_LAT0_AVC_MC_EXT MTK_M4U_ID(24, 2)
+#define M4U_PORT_L24_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(24, 3)
+#define M4U_PORT_L24_VDEC_LAT0_TILE_EXT MTK_M4U_ID(24, 4)
+#define M4U_PORT_L24_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(24, 5)
+#define M4U_PORT_L24_VDEC_LAT1_VLD_EXT MTK_M4U_ID(24, 6)
+#define M4U_PORT_L24_VDEC_LAT1_VLD2_EXT MTK_M4U_ID(24, 7)
+#define M4U_PORT_L24_VDEC_LAT1_AVC_MC_EXT MTK_M4U_ID(24, 8)
+#define M4U_PORT_L24_VDEC_LAT1_PRED_RD_EXT MTK_M4U_ID(24, 9)
+#define M4U_PORT_L24_VDEC_LAT1_TILE_EXT MTK_M4U_ID(24, 10)
+#define M4U_PORT_L24_VDEC_LAT1_WDMA_EXT MTK_M4U_ID(24, 11)
+
+/* larb25 */
+#define M4U_PORT_L25_CAM_MRAW0_LSCI_M1 MTK_M4U_ID(25, 0)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M1 MTK_M4U_ID(25, 1)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M2 MTK_M4U_ID(25, 2)
+#define M4U_PORT_L25_CAM_MRAW0_IMGO_M1 MTK_M4U_ID(25, 3)
+#define M4U_PORT_L25_CAM_MRAW0_IMGBO_M1 MTK_M4U_ID(25, 4)
+#define M4U_PORT_L25_CAM_MRAW2_LSCI_M1 MTK_M4U_ID(25, 5)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M1 MTK_M4U_ID(25, 6)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M2 MTK_M4U_ID(25, 7)
+#define M4U_PORT_L25_CAM_MRAW2_IMGO_M1 MTK_M4U_ID(25, 8)
+#define M4U_PORT_L25_CAM_MRAW2_IMGBO_M1 MTK_M4U_ID(25, 9)
+#define M4U_PORT_L25_CAM_MRAW0_AFO_M1 MTK_M4U_ID(25, 10)
+#define M4U_PORT_L25_CAM_MRAW2_AFO_M1 MTK_M4U_ID(25, 11)
+
+/* larb26 */
+#define M4U_PORT_L26_CAM_MRAW1_LSCI_M1 MTK_M4U_ID(26, 0)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M1 MTK_M4U_ID(26, 1)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M2 MTK_M4U_ID(26, 2)
+#define M4U_PORT_L26_CAM_MRAW1_IMGO_M1 MTK_M4U_ID(26, 3)
+#define M4U_PORT_L26_CAM_MRAW1_IMGBO_M1 MTK_M4U_ID(26, 4)
+#define M4U_PORT_L26_CAM_MRAW3_LSCI_M1 MTK_M4U_ID(26, 5)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M1 MTK_M4U_ID(26, 6)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M2 MTK_M4U_ID(26, 7)
+#define M4U_PORT_L26_CAM_MRAW3_IMGO_M1 MTK_M4U_ID(26, 8)
+#define M4U_PORT_L26_CAM_MRAW3_IMGBO_M1 MTK_M4U_ID(26, 9)
+#define M4U_PORT_L26_CAM_MRAW1_AFO_M1 MTK_M4U_ID(26, 10)
+#define M4U_PORT_L26_CAM_MRAW3_AFO_M1 MTK_M4U_ID(26, 11)
+
+/* larb27 */
+#define M4U_PORT_L27_CAM_IMGO_R1 MTK_M4U_ID(27, 0)
+#define M4U_PORT_L27_CAM_CQI_R1 MTK_M4U_ID(27, 1)
+#define M4U_PORT_L27_CAM_CQI_R2 MTK_M4U_ID(27, 2)
+#define M4U_PORT_L27_CAM_BPCI_R1 MTK_M4U_ID(27, 3)
+#define M4U_PORT_L27_CAM_LSCI_R1 MTK_M4U_ID(27, 4)
+#define M4U_PORT_L27_CAM_RAWI_R2 MTK_M4U_ID(27, 5)
+#define M4U_PORT_L27_CAM_RAWI_R3 MTK_M4U_ID(27, 6)
+#define M4U_PORT_L27_CAM_UFDI_R2 MTK_M4U_ID(27, 7)
+#define M4U_PORT_L27_CAM_UFDI_R3 MTK_M4U_ID(27, 8)
+#define M4U_PORT_L27_CAM_RAWI_R4 MTK_M4U_ID(27, 9)
+#define M4U_PORT_L27_CAM_RAWI_R5 MTK_M4U_ID(27, 10)
+#define M4U_PORT_L27_CAM_AAI_R1 MTK_M4U_ID(27, 11)
+#define M4U_PORT_L27_CAM_FHO_R1 MTK_M4U_ID(27, 12)
+#define M4U_PORT_L27_CAM_AAO_R1 MTK_M4U_ID(27, 13)
+#define M4U_PORT_L27_CAM_TSFSO_R1 MTK_M4U_ID(27, 14)
+#define M4U_PORT_L27_CAM_FLKO_R1 MTK_M4U_ID(27, 15)
+
+/* larb28 */
+#define M4U_PORT_L28_CAM_YUVO_R1 MTK_M4U_ID(28, 0)
+#define M4U_PORT_L28_CAM_YUVO_R3 MTK_M4U_ID(28, 1)
+#define M4U_PORT_L28_CAM_YUVCO_R1 MTK_M4U_ID(28, 2)
+#define M4U_PORT_L28_CAM_YUVO_R2 MTK_M4U_ID(28, 3)
+#define M4U_PORT_L28_CAM_RZH1N2TO_R1 MTK_M4U_ID(28, 4)
+#define M4U_PORT_L28_CAM_DRZS4NO_R1 MTK_M4U_ID(28, 5)
+#define M4U_PORT_L28_CAM_TNCSO_R1 MTK_M4U_ID(28, 6)
+
+/* Infra iommu ports */
+/* PCIe1: read: BIT16; write BIT17. */
+#define IOMMU_PORT_INFRA_PCIE1 MTK_IFAIOMMU_PERI_ID(16)
+/* PCIe0: read: BIT18; write BIT19. */
+#define IOMMU_PORT_INFRA_PCIE0 MTK_IFAIOMMU_PERI_ID(18)
+#define IOMMU_PORT_INFRA_SSUSB_P3_R MTK_IFAIOMMU_PERI_ID(20)
+#define IOMMU_PORT_INFRA_SSUSB_P3_W MTK_IFAIOMMU_PERI_ID(21)
+#define IOMMU_PORT_INFRA_SSUSB_P2_R MTK_IFAIOMMU_PERI_ID(22)
+#define IOMMU_PORT_INFRA_SSUSB_P2_W MTK_IFAIOMMU_PERI_ID(23)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_R MTK_IFAIOMMU_PERI_ID(24)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_W MTK_IFAIOMMU_PERI_ID(25)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_R MTK_IFAIOMMU_PERI_ID(26)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_W MTK_IFAIOMMU_PERI_ID(27)
+#define IOMMU_PORT_INFRA_SSUSB2_R MTK_IFAIOMMU_PERI_ID(28)
+#define IOMMU_PORT_INFRA_SSUSB2_W MTK_IFAIOMMU_PERI_ID(29)
+#define IOMMU_PORT_INFRA_SSUSB_R MTK_IFAIOMMU_PERI_ID(30)
+#define IOMMU_PORT_INFRA_SSUSB_W MTK_IFAIOMMU_PERI_ID(31)
+
+#endif
diff --git a/include/dt-bindings/memory/mtk-memory-port.h b/include/dt-bindings/memory/mtk-memory-port.h
new file mode 100644
index 000000000000..2f68a0511a25
--- /dev/null
+++ b/include/dt-bindings/memory/mtk-memory-port.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef __DT_BINDINGS_MEMORY_MTK_MEMORY_PORT_H_
+#define __DT_BINDINGS_MEMORY_MTK_MEMORY_PORT_H_
+
+#define MTK_LARB_NR_MAX 32
+
+#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x1f)
+#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
+
+#define MTK_IFAIOMMU_PERI_ID(port) MTK_M4U_ID(0, port)
+
+#endif
diff --git a/include/dt-bindings/memory/tegra124-mc.h b/include/dt-bindings/memory/tegra124-mc.h
index 186e6b7e9b35..7e73bb400eca 100644
--- a/include/dt-bindings/memory/tegra124-mc.h
+++ b/include/dt-bindings/memory/tegra124-mc.h
@@ -54,4 +54,72 @@
#define TEGRA124_MC_RESET_ISP2B 22
#define TEGRA124_MC_RESET_GPU 23
+#define TEGRA124_MC_PTCR 0
+#define TEGRA124_MC_DISPLAY0A 1
+#define TEGRA124_MC_DISPLAY0AB 2
+#define TEGRA124_MC_DISPLAY0B 3
+#define TEGRA124_MC_DISPLAY0BB 4
+#define TEGRA124_MC_DISPLAY0C 5
+#define TEGRA124_MC_DISPLAY0CB 6
+#define TEGRA124_MC_AFIR 14
+#define TEGRA124_MC_AVPCARM7R 15
+#define TEGRA124_MC_DISPLAYHC 16
+#define TEGRA124_MC_DISPLAYHCB 17
+#define TEGRA124_MC_HDAR 21
+#define TEGRA124_MC_HOST1XDMAR 22
+#define TEGRA124_MC_HOST1XR 23
+#define TEGRA124_MC_MSENCSRD 28
+#define TEGRA124_MC_PPCSAHBDMAR 29
+#define TEGRA124_MC_PPCSAHBSLVR 30
+#define TEGRA124_MC_SATAR 31
+#define TEGRA124_MC_VDEBSEVR 34
+#define TEGRA124_MC_VDEMBER 35
+#define TEGRA124_MC_VDEMCER 36
+#define TEGRA124_MC_VDETPER 37
+#define TEGRA124_MC_MPCORELPR 38
+#define TEGRA124_MC_MPCORER 39
+#define TEGRA124_MC_MSENCSWR 43
+#define TEGRA124_MC_AFIW 49
+#define TEGRA124_MC_AVPCARM7W 50
+#define TEGRA124_MC_HDAW 53
+#define TEGRA124_MC_HOST1XW 54
+#define TEGRA124_MC_MPCORELPW 56
+#define TEGRA124_MC_MPCOREW 57
+#define TEGRA124_MC_PPCSAHBDMAW 59
+#define TEGRA124_MC_PPCSAHBSLVW 60
+#define TEGRA124_MC_SATAW 61
+#define TEGRA124_MC_VDEBSEVW 62
+#define TEGRA124_MC_VDEDBGW 63
+#define TEGRA124_MC_VDEMBEW 64
+#define TEGRA124_MC_VDETPMW 65
+#define TEGRA124_MC_ISPRA 68
+#define TEGRA124_MC_ISPWA 70
+#define TEGRA124_MC_ISPWB 71
+#define TEGRA124_MC_XUSB_HOSTR 74
+#define TEGRA124_MC_XUSB_HOSTW 75
+#define TEGRA124_MC_XUSB_DEVR 76
+#define TEGRA124_MC_XUSB_DEVW 77
+#define TEGRA124_MC_ISPRAB 78
+#define TEGRA124_MC_ISPWAB 80
+#define TEGRA124_MC_ISPWBB 81
+#define TEGRA124_MC_TSECSRD 84
+#define TEGRA124_MC_TSECSWR 85
+#define TEGRA124_MC_A9AVPSCR 86
+#define TEGRA124_MC_A9AVPSCW 87
+#define TEGRA124_MC_GPUSRD 88
+#define TEGRA124_MC_GPUSWR 89
+#define TEGRA124_MC_DISPLAYT 90
+#define TEGRA124_MC_SDMMCRA 96
+#define TEGRA124_MC_SDMMCRAA 97
+#define TEGRA124_MC_SDMMCR 98
+#define TEGRA124_MC_SDMMCRAB 99
+#define TEGRA124_MC_SDMMCWA 100
+#define TEGRA124_MC_SDMMCWAA 101
+#define TEGRA124_MC_SDMMCW 102
+#define TEGRA124_MC_SDMMCWAB 103
+#define TEGRA124_MC_VICSRD 108
+#define TEGRA124_MC_VICSWR 109
+#define TEGRA124_MC_VIW 114
+#define TEGRA124_MC_DISPLAYD 115
+
#endif
diff --git a/include/dt-bindings/memory/tegra20-mc.h b/include/dt-bindings/memory/tegra20-mc.h
index 35e131eee198..6f8829508ad0 100644
--- a/include/dt-bindings/memory/tegra20-mc.h
+++ b/include/dt-bindings/memory/tegra20-mc.h
@@ -18,4 +18,57 @@
#define TEGRA20_MC_RESET_VDE 13
#define TEGRA20_MC_RESET_VI 14
+#define TEGRA20_MC_DISPLAY0A 0
+#define TEGRA20_MC_DISPLAY0AB 1
+#define TEGRA20_MC_DISPLAY0B 2
+#define TEGRA20_MC_DISPLAY0BB 3
+#define TEGRA20_MC_DISPLAY0C 4
+#define TEGRA20_MC_DISPLAY0CB 5
+#define TEGRA20_MC_DISPLAY1B 6
+#define TEGRA20_MC_DISPLAY1BB 7
+#define TEGRA20_MC_EPPUP 8
+#define TEGRA20_MC_G2PR 9
+#define TEGRA20_MC_G2SR 10
+#define TEGRA20_MC_MPEUNIFBR 11
+#define TEGRA20_MC_VIRUV 12
+#define TEGRA20_MC_AVPCARM7R 13
+#define TEGRA20_MC_DISPLAYHC 14
+#define TEGRA20_MC_DISPLAYHCB 15
+#define TEGRA20_MC_FDCDRD 16
+#define TEGRA20_MC_G2DR 17
+#define TEGRA20_MC_HOST1XDMAR 18
+#define TEGRA20_MC_HOST1XR 19
+#define TEGRA20_MC_IDXSRD 20
+#define TEGRA20_MC_MPCORER 21
+#define TEGRA20_MC_MPE_IPRED 22
+#define TEGRA20_MC_MPEAMEMRD 23
+#define TEGRA20_MC_MPECSRD 24
+#define TEGRA20_MC_PPCSAHBDMAR 25
+#define TEGRA20_MC_PPCSAHBSLVR 26
+#define TEGRA20_MC_TEXSRD 27
+#define TEGRA20_MC_VDEBSEVR 28
+#define TEGRA20_MC_VDEMBER 29
+#define TEGRA20_MC_VDEMCER 30
+#define TEGRA20_MC_VDETPER 31
+#define TEGRA20_MC_EPPU 32
+#define TEGRA20_MC_EPPV 33
+#define TEGRA20_MC_EPPY 34
+#define TEGRA20_MC_MPEUNIFBW 35
+#define TEGRA20_MC_VIWSB 36
+#define TEGRA20_MC_VIWU 37
+#define TEGRA20_MC_VIWV 38
+#define TEGRA20_MC_VIWY 39
+#define TEGRA20_MC_G2DW 40
+#define TEGRA20_MC_AVPCARM7W 41
+#define TEGRA20_MC_FDCDWR 42
+#define TEGRA20_MC_HOST1XW 43
+#define TEGRA20_MC_ISPW 44
+#define TEGRA20_MC_MPCOREW 45
+#define TEGRA20_MC_MPECSWR 46
+#define TEGRA20_MC_PPCSAHBDMAW 47
+#define TEGRA20_MC_PPCSAHBSLVW 48
+#define TEGRA20_MC_VDEBSEVW 49
+#define TEGRA20_MC_VDEMBEW 50
+#define TEGRA20_MC_VDETPMW 51
+
#endif
diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h
index cacf05617e03..5e082547f179 100644
--- a/include/dt-bindings/memory/tegra210-mc.h
+++ b/include/dt-bindings/memory/tegra210-mc.h
@@ -33,6 +33,16 @@
#define TEGRA_SWGROUP_AXIAP 28
#define TEGRA_SWGROUP_ETR 29
#define TEGRA_SWGROUP_TSECB 30
+#define TEGRA_SWGROUP_NV 31
+#define TEGRA_SWGROUP_NV2 32
+#define TEGRA_SWGROUP_PPCS1 33
+#define TEGRA_SWGROUP_DC1 34
+#define TEGRA_SWGROUP_PPCS2 35
+#define TEGRA_SWGROUP_HC1 36
+#define TEGRA_SWGROUP_SE1 37
+#define TEGRA_SWGROUP_TSEC1 38
+#define TEGRA_SWGROUP_TSECB1 39
+#define TEGRA_SWGROUP_NVDEC1 40
#define TEGRA210_MC_RESET_AFI 0
#define TEGRA210_MC_RESET_AVPC 1
diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h
new file mode 100644
index 000000000000..bd71cc1d7990
--- /dev/null
+++ b/include/dt-bindings/memory/tegra234-mc.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_MEMORY_TEGRA234_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA234_MC_H
+
+/* special clients */
+#define TEGRA234_SID_INVALID 0x00
+#define TEGRA234_SID_PASSTHROUGH 0x7f
+
+/* NISO0 stream IDs */
+#define TEGRA234_SID_APE 0x02
+#define TEGRA234_SID_HDA 0x03
+#define TEGRA234_SID_GPCDMA 0x04
+#define TEGRA234_SID_MGBE 0x06
+#define TEGRA234_SID_PCIE0 0x12
+#define TEGRA234_SID_PCIE4 0x13
+#define TEGRA234_SID_PCIE5 0x14
+#define TEGRA234_SID_PCIE6 0x15
+#define TEGRA234_SID_PCIE9 0x1f
+#define TEGRA234_SID_MGBE_VF1 0x49
+#define TEGRA234_SID_MGBE_VF2 0x4a
+#define TEGRA234_SID_MGBE_VF3 0x4b
+
+/* NISO1 stream IDs */
+#define TEGRA234_SID_SDMMC4 0x02
+#define TEGRA234_SID_PCIE1 0x05
+#define TEGRA234_SID_PCIE2 0x06
+#define TEGRA234_SID_PCIE3 0x07
+#define TEGRA234_SID_PCIE7 0x08
+#define TEGRA234_SID_PCIE8 0x09
+#define TEGRA234_SID_PCIE10 0x0b
+#define TEGRA234_SID_BPMP 0x10
+#define TEGRA234_SID_HOST1X 0x27
+#define TEGRA234_SID_VIC 0x34
+
+/* Shared stream IDs */
+#define TEGRA234_SID_HOST1X_CTX0 0x35
+#define TEGRA234_SID_HOST1X_CTX1 0x36
+#define TEGRA234_SID_HOST1X_CTX2 0x37
+#define TEGRA234_SID_HOST1X_CTX3 0x38
+#define TEGRA234_SID_HOST1X_CTX4 0x39
+#define TEGRA234_SID_HOST1X_CTX5 0x3a
+#define TEGRA234_SID_HOST1X_CTX6 0x3b
+#define TEGRA234_SID_HOST1X_CTX7 0x3c
+
+/*
+ * memory client IDs
+ */
+
+/* High-definition audio (HDA) read clients */
+#define TEGRA234_MEMORY_CLIENT_HDAR 0x15
+#define TEGRA234_MEMORY_CLIENT_HOST1XDMAR 0x16
+/* PCIE6 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE6AR 0x28
+/* PCIE6 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE6AW 0x29
+/* PCIE7 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE7AR 0x2a
+/* PCIE7 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE7AW 0x30
+/* PCIE8 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE8AR 0x32
+/* High-definition audio (HDA) write clients */
+#define TEGRA234_MEMORY_CLIENT_HDAW 0x35
+/* PCIE8 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE8AW 0x3b
+/* PCIE9 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE9AR 0x3c
+/* PCIE6r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE6AR1 0x3d
+/* PCIE9 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE9AW 0x3e
+/* PCIE10 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE10AR 0x3f
+/* PCIE10 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE10AW 0x40
+/* PCIE10r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE10AR1 0x48
+/* PCIE7r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE7AR1 0x49
+/* MGBE0 read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEARD 0x58
+/* MGBEB read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEBRD 0x59
+/* MGBEC read client */
+#define TEGRA234_MEMORY_CLIENT_MGBECRD 0x5a
+/* MGBED read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEDRD 0x5b
+/* MGBE0 write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEAWR 0x5c
+/* MGBEB write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEBWR 0x5f
+/* MGBEC write client */
+#define TEGRA234_MEMORY_CLIENT_MGBECWR 0x61
+/* sdmmcd memory read client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCRAB 0x63
+/* MGBED write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEDWR 0x65
+/* sdmmcd memory write client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCWAB 0x67
+#define TEGRA234_MEMORY_CLIENT_VICSRD 0x6c
+#define TEGRA234_MEMORY_CLIENT_VICSWR 0x6d
+/* BPMP read client */
+#define TEGRA234_MEMORY_CLIENT_BPMPR 0x93
+/* BPMP write client */
+#define TEGRA234_MEMORY_CLIENT_BPMPW 0x94
+/* BPMPDMA read client */
+#define TEGRA234_MEMORY_CLIENT_BPMPDMAR 0x95
+/* BPMPDMA write client */
+#define TEGRA234_MEMORY_CLIENT_BPMPDMAW 0x96
+/* APEDMA read client */
+#define TEGRA234_MEMORY_CLIENT_APEDMAR 0x9f
+/* APEDMA write client */
+#define TEGRA234_MEMORY_CLIENT_APEDMAW 0xa0
+/* PCIE0 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE0R 0xd8
+/* PCIE0 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE0W 0xd9
+/* PCIE1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE1R 0xda
+/* PCIE1 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE1W 0xdb
+/* PCIE2 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE2AR 0xdc
+/* PCIE2 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE2AW 0xdd
+/* PCIE3 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE3R 0xde
+/* PCIE3 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE3W 0xdf
+/* PCIE4 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE4R 0xe0
+/* PCIE4 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE4W 0xe1
+/* PCIE5 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE5R 0xe2
+/* PCIE5 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE5W 0xe3
+/* PCIE5r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE5R1 0xef
+
+#endif
diff --git a/include/dt-bindings/memory/tegra30-mc.h b/include/dt-bindings/memory/tegra30-mc.h
index 169f005fbc78..930f708aca17 100644
--- a/include/dt-bindings/memory/tegra30-mc.h
+++ b/include/dt-bindings/memory/tegra30-mc.h
@@ -41,4 +41,71 @@
#define TEGRA30_MC_RESET_VDE 16
#define TEGRA30_MC_RESET_VI 17
+#define TEGRA30_MC_PTCR 0
+#define TEGRA30_MC_DISPLAY0A 1
+#define TEGRA30_MC_DISPLAY0AB 2
+#define TEGRA30_MC_DISPLAY0B 3
+#define TEGRA30_MC_DISPLAY0BB 4
+#define TEGRA30_MC_DISPLAY0C 5
+#define TEGRA30_MC_DISPLAY0CB 6
+#define TEGRA30_MC_DISPLAY1B 7
+#define TEGRA30_MC_DISPLAY1BB 8
+#define TEGRA30_MC_EPPUP 9
+#define TEGRA30_MC_G2PR 10
+#define TEGRA30_MC_G2SR 11
+#define TEGRA30_MC_MPEUNIFBR 12
+#define TEGRA30_MC_VIRUV 13
+#define TEGRA30_MC_AFIR 14
+#define TEGRA30_MC_AVPCARM7R 15
+#define TEGRA30_MC_DISPLAYHC 16
+#define TEGRA30_MC_DISPLAYHCB 17
+#define TEGRA30_MC_FDCDRD 18
+#define TEGRA30_MC_FDCDRD2 19
+#define TEGRA30_MC_G2DR 20
+#define TEGRA30_MC_HDAR 21
+#define TEGRA30_MC_HOST1XDMAR 22
+#define TEGRA30_MC_HOST1XR 23
+#define TEGRA30_MC_IDXSRD 24
+#define TEGRA30_MC_IDXSRD2 25
+#define TEGRA30_MC_MPE_IPRED 26
+#define TEGRA30_MC_MPEAMEMRD 27
+#define TEGRA30_MC_MPECSRD 28
+#define TEGRA30_MC_PPCSAHBDMAR 29
+#define TEGRA30_MC_PPCSAHBSLVR 30
+#define TEGRA30_MC_SATAR 31
+#define TEGRA30_MC_TEXSRD 32
+#define TEGRA30_MC_TEXSRD2 33
+#define TEGRA30_MC_VDEBSEVR 34
+#define TEGRA30_MC_VDEMBER 35
+#define TEGRA30_MC_VDEMCER 36
+#define TEGRA30_MC_VDETPER 37
+#define TEGRA30_MC_MPCORELPR 38
+#define TEGRA30_MC_MPCORER 39
+#define TEGRA30_MC_EPPU 40
+#define TEGRA30_MC_EPPV 41
+#define TEGRA30_MC_EPPY 42
+#define TEGRA30_MC_MPEUNIFBW 43
+#define TEGRA30_MC_VIWSB 44
+#define TEGRA30_MC_VIWU 45
+#define TEGRA30_MC_VIWV 46
+#define TEGRA30_MC_VIWY 47
+#define TEGRA30_MC_G2DW 48
+#define TEGRA30_MC_AFIW 49
+#define TEGRA30_MC_AVPCARM7W 50
+#define TEGRA30_MC_FDCDWR 51
+#define TEGRA30_MC_FDCDWR2 52
+#define TEGRA30_MC_HDAW 53
+#define TEGRA30_MC_HOST1XW 54
+#define TEGRA30_MC_ISPW 55
+#define TEGRA30_MC_MPCORELPW 56
+#define TEGRA30_MC_MPCOREW 57
+#define TEGRA30_MC_MPECSWR 58
+#define TEGRA30_MC_PPCSAHBDMAW 59
+#define TEGRA30_MC_PPCSAHBSLVW 60
+#define TEGRA30_MC_SATAW 61
+#define TEGRA30_MC_VDEBSEVW 62
+#define TEGRA30_MC_VDEDBGW 63
+#define TEGRA30_MC_VDEMBEW 64
+#define TEGRA30_MC_VDETPMW 65
+
#endif
diff --git a/include/dt-bindings/mfd/cros_ec.h b/include/dt-bindings/mfd/cros_ec.h
new file mode 100644
index 000000000000..3b29cd049578
--- /dev/null
+++ b/include/dt-bindings/mfd/cros_ec.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * DTS binding definitions used for the Chromium OS Embedded Controller.
+ *
+ * Copyright (c) 2022 The Chromium OS Authors. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_MFD_CROS_EC_H
+#define _DT_BINDINGS_MFD_CROS_EC_H
+
+/* Typed channel for keyboard backlight. */
+#define CROS_EC_PWM_DT_KB_LIGHT 0
+/* Typed channel for display backlight. */
+#define CROS_EC_PWM_DT_DISPLAY_LIGHT 1
+/* Number of typed channels. */
+#define CROS_EC_PWM_DT_COUNT 2
+
+#endif
diff --git a/include/dt-bindings/mfd/qcom-pm8008.h b/include/dt-bindings/mfd/qcom-pm8008.h
new file mode 100644
index 000000000000..eca9448df228
--- /dev/null
+++ b/include/dt-bindings/mfd/qcom-pm8008.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_MFD_QCOM_PM8008_H
+#define __DT_BINDINGS_MFD_QCOM_PM8008_H
+
+/* PM8008 IRQ numbers */
+#define PM8008_IRQ_MISC_UVLO 0
+#define PM8008_IRQ_MISC_OVLO 1
+#define PM8008_IRQ_MISC_OTST2 2
+#define PM8008_IRQ_MISC_OTST3 3
+#define PM8008_IRQ_MISC_LDO_OCP 4
+#define PM8008_IRQ_TEMP_ALARM 5
+#define PM8008_IRQ_GPIO1 6
+#define PM8008_IRQ_GPIO2 7
+
+#endif
diff --git a/include/dt-bindings/mux/mux.h b/include/dt-bindings/mux/mux.h
index 042719218dbf..0b9d654506ef 100644
--- a/include/dt-bindings/mux/mux.h
+++ b/include/dt-bindings/mux/mux.h
@@ -3,7 +3,7 @@
* This header provides constants for most Multiplexer bindings.
*
* Most Multiplexer bindings specify an idle state. In most cases, the
- * the multiplexer can be left as is when idle, and in some cases it can
+ * multiplexer can be left as is when idle, and in some cases it can
* disconnect the input/output and leave the multiplexer in a high
* impedance state.
*/
diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
new file mode 100644
index 000000000000..d3116c52ab72
--- /dev/null
+++ b/include/dt-bindings/mux/ti-serdes.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for SERDES MUX for TI SoCs
+ */
+
+#ifndef _DT_BINDINGS_MUX_TI_SERDES
+#define _DT_BINDINGS_MUX_TI_SERDES
+
+/* J721E */
+
+#define J721E_SERDES0_LANE0_QSGMII_LANE1 0x0
+#define J721E_SERDES0_LANE0_PCIE0_LANE0 0x1
+#define J721E_SERDES0_LANE0_USB3_0_SWAP 0x2
+#define J721E_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J721E_SERDES0_LANE1_QSGMII_LANE2 0x0
+#define J721E_SERDES0_LANE1_PCIE0_LANE1 0x1
+#define J721E_SERDES0_LANE1_USB3_0 0x2
+#define J721E_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J721E_SERDES1_LANE0_QSGMII_LANE3 0x0
+#define J721E_SERDES1_LANE0_PCIE1_LANE0 0x1
+#define J721E_SERDES1_LANE0_USB3_1_SWAP 0x2
+#define J721E_SERDES1_LANE0_SGMII_LANE0 0x3
+
+#define J721E_SERDES1_LANE1_QSGMII_LANE4 0x0
+#define J721E_SERDES1_LANE1_PCIE1_LANE1 0x1
+#define J721E_SERDES1_LANE1_USB3_1 0x2
+#define J721E_SERDES1_LANE1_SGMII_LANE1 0x3
+
+#define J721E_SERDES2_LANE0_IP1_UNUSED 0x0
+#define J721E_SERDES2_LANE0_PCIE2_LANE0 0x1
+#define J721E_SERDES2_LANE0_USB3_1_SWAP 0x2
+#define J721E_SERDES2_LANE0_SGMII_LANE0 0x3
+
+#define J721E_SERDES2_LANE1_IP1_UNUSED 0x0
+#define J721E_SERDES2_LANE1_PCIE2_LANE1 0x1
+#define J721E_SERDES2_LANE1_USB3_1 0x2
+#define J721E_SERDES2_LANE1_SGMII_LANE1 0x3
+
+#define J721E_SERDES3_LANE0_IP1_UNUSED 0x0
+#define J721E_SERDES3_LANE0_PCIE3_LANE0 0x1
+#define J721E_SERDES3_LANE0_USB3_0_SWAP 0x2
+#define J721E_SERDES3_LANE0_IP4_UNUSED 0x3
+
+#define J721E_SERDES3_LANE1_IP1_UNUSED 0x0
+#define J721E_SERDES3_LANE1_PCIE3_LANE1 0x1
+#define J721E_SERDES3_LANE1_USB3_0 0x2
+#define J721E_SERDES3_LANE1_IP4_UNUSED 0x3
+
+#define J721E_SERDES4_LANE0_EDP_LANE0 0x0
+#define J721E_SERDES4_LANE0_IP2_UNUSED 0x1
+#define J721E_SERDES4_LANE0_QSGMII_LANE5 0x2
+#define J721E_SERDES4_LANE0_IP4_UNUSED 0x3
+
+#define J721E_SERDES4_LANE1_EDP_LANE1 0x0
+#define J721E_SERDES4_LANE1_IP2_UNUSED 0x1
+#define J721E_SERDES4_LANE1_QSGMII_LANE6 0x2
+#define J721E_SERDES4_LANE1_IP4_UNUSED 0x3
+
+#define J721E_SERDES4_LANE2_EDP_LANE2 0x0
+#define J721E_SERDES4_LANE2_IP2_UNUSED 0x1
+#define J721E_SERDES4_LANE2_QSGMII_LANE7 0x2
+#define J721E_SERDES4_LANE2_IP4_UNUSED 0x3
+
+#define J721E_SERDES4_LANE3_EDP_LANE3 0x0
+#define J721E_SERDES4_LANE3_IP2_UNUSED 0x1
+#define J721E_SERDES4_LANE3_QSGMII_LANE8 0x2
+#define J721E_SERDES4_LANE3_IP4_UNUSED 0x3
+
+/* J7200 */
+
+#define J7200_SERDES0_LANE0_QSGMII_LANE3 0x0
+#define J7200_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J7200_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J7200_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J7200_SERDES0_LANE1_QSGMII_LANE4 0x0
+#define J7200_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J7200_SERDES0_LANE1_IP3_UNUSED 0x2
+#define J7200_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J7200_SERDES0_LANE2_QSGMII_LANE1 0x0
+#define J7200_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J7200_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J7200_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J7200_SERDES0_LANE3_QSGMII_LANE2 0x0
+#define J7200_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J7200_SERDES0_LANE3_USB 0x2
+#define J7200_SERDES0_LANE3_IP4_UNUSED 0x3
+
+/* AM64 */
+
+#define AM64_SERDES0_LANE0_PCIE0 0x0
+#define AM64_SERDES0_LANE0_USB 0x1
+
+/* J721S2 */
+
+#define J721S2_SERDES0_LANE0_EDP_LANE0 0x0
+#define J721S2_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J721S2_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J721S2_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE1_EDP_LANE1 0x0
+#define J721S2_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J721S2_SERDES0_LANE1_USB 0x2
+#define J721S2_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE2_EDP_LANE2 0x0
+#define J721S2_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J721S2_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J721S2_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE3_EDP_LANE3 0x0
+#define J721S2_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J721S2_SERDES0_LANE3_USB 0x2
+#define J721S2_SERDES0_LANE3_IP4_UNUSED 0x3
+
+#endif /* _DT_BINDINGS_MUX_TI_SERDES */
diff --git a/include/dt-bindings/net/pcs-rzn1-miic.h b/include/dt-bindings/net/pcs-rzn1-miic.h
new file mode 100644
index 000000000000..784782eaec9e
--- /dev/null
+++ b/include/dt-bindings/net/pcs-rzn1-miic.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#ifndef _DT_BINDINGS_PCS_RZN1_MIIC
+#define _DT_BINDINGS_PCS_RZN1_MIIC
+
+/*
+ * Reefer to the datasheet [1] section 8.2.1, Internal Connection of Ethernet
+ * Ports to check the available combination
+ *
+ * [1] REN_r01uh0750ej0140-rzn1-introduction_MAT_20210228.pdf
+ */
+
+#define MIIC_GMAC1_PORT 0
+#define MIIC_GMAC2_PORT 1
+#define MIIC_RTOS_PORT 2
+#define MIIC_SERCOS_PORTA 3
+#define MIIC_SERCOS_PORTB 4
+#define MIIC_ETHERCAT_PORTA 5
+#define MIIC_ETHERCAT_PORTB 6
+#define MIIC_ETHERCAT_PORTC 7
+#define MIIC_SWITCH_PORTA 8
+#define MIIC_SWITCH_PORTB 9
+#define MIIC_SWITCH_PORTC 10
+#define MIIC_SWITCH_PORTD 11
+#define MIIC_HSR_PORTA 12
+#define MIIC_HSR_PORTB 13
+
+#endif
diff --git a/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h
new file mode 100644
index 000000000000..f570b23165a2
--- /dev/null
+++ b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H
+#define _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H
+
+/*
+ * Need to have it as a multiple of 4 as NVMEM memory is registered with
+ * stride = 4.
+ */
+#define OTP_PKT(id) ((id) * 4)
+
+#endif
diff --git a/include/dt-bindings/phy/phy-cadence.h b/include/dt-bindings/phy/phy-cadence.h
new file mode 100644
index 000000000000..0671991208fc
--- /dev/null
+++ b/include/dt-bindings/phy/phy-cadence.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for Cadence SERDES.
+ */
+
+#ifndef _DT_BINDINGS_CADENCE_SERDES_H
+#define _DT_BINDINGS_CADENCE_SERDES_H
+
+#define CDNS_SERDES_NO_SSC 0
+#define CDNS_SERDES_EXTERNAL_SSC 1
+#define CDNS_SERDES_INTERNAL_SSC 2
+
+/* Torrent */
+#define CDNS_TORRENT_REFCLK_DRIVER 0
+#define CDNS_TORRENT_DERIVED_REFCLK 1
+#define CDNS_TORRENT_RECEIVED_REFCLK 2
+
+/* Sierra */
+#define CDNS_SIERRA_PLL_CMNLC 0
+#define CDNS_SIERRA_PLL_CMNLC1 1
+#define CDNS_SIERRA_DERIVED_REFCLK 2
+
+#endif /* _DT_BINDINGS_CADENCE_SERDES_H */
diff --git a/include/dt-bindings/phy/phy-imx8-pcie.h b/include/dt-bindings/phy/phy-imx8-pcie.h
new file mode 100644
index 000000000000..8bbe2d6538d8
--- /dev/null
+++ b/include/dt-bindings/phy/phy-imx8-pcie.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * This header provides constants for i.MX8 PCIe.
+ */
+
+#ifndef _DT_BINDINGS_IMX8_PCIE_H
+#define _DT_BINDINGS_IMX8_PCIE_H
+
+/* Reference clock PAD mode */
+#define IMX8_PCIE_REFCLK_PAD_UNUSED 0
+#define IMX8_PCIE_REFCLK_PAD_INPUT 1
+#define IMX8_PCIE_REFCLK_PAD_OUTPUT 2
+
+#endif /* _DT_BINDINGS_IMX8_PCIE_H */
diff --git a/include/dt-bindings/phy/phy-lan966x-serdes.h b/include/dt-bindings/phy/phy-lan966x-serdes.h
new file mode 100644
index 000000000000..4330269a901e
--- /dev/null
+++ b/include/dt-bindings/phy/phy-lan966x-serdes.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+#ifndef __PHY_LAN966X_SERDES_H__
+#define __PHY_LAN966X_SERDES_H__
+
+#define CU(x) (x)
+#define CU_MAX CU(2)
+#define SERDES6G(x) (CU_MAX + 1 + (x))
+#define SERDES6G_MAX SERDES6G(3)
+#define RGMII(x) (SERDES6G_MAX + 1 + (x))
+#define RGMII_MAX RGMII(2)
+#define SERDES_MAX (RGMII_MAX + 1)
+
+#endif
diff --git a/include/dt-bindings/phy/phy-ti.h b/include/dt-bindings/phy/phy-ti.h
new file mode 100644
index 000000000000..ad955d3a56b4
--- /dev/null
+++ b/include/dt-bindings/phy/phy-ti.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for TI SERDES.
+ */
+
+#ifndef _DT_BINDINGS_TI_SERDES
+#define _DT_BINDINGS_TI_SERDES
+
+/* Clock index for output clocks from WIZ */
+
+/* MUX Clocks */
+#define TI_WIZ_PLL0_REFCLK 0
+#define TI_WIZ_PLL1_REFCLK 1
+#define TI_WIZ_REFCLK_DIG 2
+
+/* Reserve index here for future additions */
+
+/* MISC Clocks */
+#define TI_WIZ_PHY_EN_REFCLK 16
+
+#endif /* _DT_BINDINGS_TI_SERDES */
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index 1f3f866fae7b..6b901b342348 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -17,5 +17,11 @@
#define PHY_TYPE_USB3 4
#define PHY_TYPE_UFS 5
#define PHY_TYPE_DP 6
+#define PHY_TYPE_XPCS 7
+#define PHY_TYPE_SGMII 8
+#define PHY_TYPE_QSGMII 9
+#define PHY_TYPE_DPHY 10
+#define PHY_TYPE_CPHY 11
+#define PHY_TYPE_USXGMII 12
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/apple.h b/include/dt-bindings/pinctrl/apple.h
new file mode 100644
index 000000000000..ea0a6f466592
--- /dev/null
+++ b/include/dt-bindings/pinctrl/apple.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * This header provides constants for Apple pinctrl bindings.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_APPLE_H
+#define _DT_BINDINGS_PINCTRL_APPLE_H
+
+#define APPLE_PINMUX(pin, func) ((pin) | ((func) << 16))
+#define APPLE_PIN(pinmux) ((pinmux) & 0xffff)
+#define APPLE_FUNC(pinmux) ((pinmux) >> 16)
+
+#endif /* _DT_BINDINGS_PINCTRL_APPLE_H */
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
index 0359bfdc9119..2175ec89c82f 100644
--- a/include/dt-bindings/pinctrl/hisi.h
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This header provides constants for hisilicon pinctrl bindings.
*
- * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 HiSilicon Limited.
* Copyright (c) 2015 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_PINCTRL_HISI_H
diff --git a/include/dt-bindings/pinctrl/k210-fpioa.h b/include/dt-bindings/pinctrl/k210-fpioa.h
new file mode 100644
index 000000000000..314285eab3a1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/k210-fpioa.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2020 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef PINCTRL_K210_FPIOA_H
+#define PINCTRL_K210_FPIOA_H
+
+/*
+ * Full list of FPIOA functions from
+ * kendryte-standalone-sdk/lib/drivers/include/fpioa.h
+ */
+#define K210_PCF_MASK GENMASK(7, 0)
+#define K210_PCF_JTAG_TCLK 0 /* JTAG Test Clock */
+#define K210_PCF_JTAG_TDI 1 /* JTAG Test Data In */
+#define K210_PCF_JTAG_TMS 2 /* JTAG Test Mode Select */
+#define K210_PCF_JTAG_TDO 3 /* JTAG Test Data Out */
+#define K210_PCF_SPI0_D0 4 /* SPI0 Data 0 */
+#define K210_PCF_SPI0_D1 5 /* SPI0 Data 1 */
+#define K210_PCF_SPI0_D2 6 /* SPI0 Data 2 */
+#define K210_PCF_SPI0_D3 7 /* SPI0 Data 3 */
+#define K210_PCF_SPI0_D4 8 /* SPI0 Data 4 */
+#define K210_PCF_SPI0_D5 9 /* SPI0 Data 5 */
+#define K210_PCF_SPI0_D6 10 /* SPI0 Data 6 */
+#define K210_PCF_SPI0_D7 11 /* SPI0 Data 7 */
+#define K210_PCF_SPI0_SS0 12 /* SPI0 Chip Select 0 */
+#define K210_PCF_SPI0_SS1 13 /* SPI0 Chip Select 1 */
+#define K210_PCF_SPI0_SS2 14 /* SPI0 Chip Select 2 */
+#define K210_PCF_SPI0_SS3 15 /* SPI0 Chip Select 3 */
+#define K210_PCF_SPI0_ARB 16 /* SPI0 Arbitration */
+#define K210_PCF_SPI0_SCLK 17 /* SPI0 Serial Clock */
+#define K210_PCF_UARTHS_RX 18 /* UART High speed Receiver */
+#define K210_PCF_UARTHS_TX 19 /* UART High speed Transmitter */
+#define K210_PCF_RESV6 20 /* Reserved function */
+#define K210_PCF_RESV7 21 /* Reserved function */
+#define K210_PCF_CLK_SPI1 22 /* Clock SPI1 */
+#define K210_PCF_CLK_I2C1 23 /* Clock I2C1 */
+#define K210_PCF_GPIOHS0 24 /* GPIO High speed 0 */
+#define K210_PCF_GPIOHS1 25 /* GPIO High speed 1 */
+#define K210_PCF_GPIOHS2 26 /* GPIO High speed 2 */
+#define K210_PCF_GPIOHS3 27 /* GPIO High speed 3 */
+#define K210_PCF_GPIOHS4 28 /* GPIO High speed 4 */
+#define K210_PCF_GPIOHS5 29 /* GPIO High speed 5 */
+#define K210_PCF_GPIOHS6 30 /* GPIO High speed 6 */
+#define K210_PCF_GPIOHS7 31 /* GPIO High speed 7 */
+#define K210_PCF_GPIOHS8 32 /* GPIO High speed 8 */
+#define K210_PCF_GPIOHS9 33 /* GPIO High speed 9 */
+#define K210_PCF_GPIOHS10 34 /* GPIO High speed 10 */
+#define K210_PCF_GPIOHS11 35 /* GPIO High speed 11 */
+#define K210_PCF_GPIOHS12 36 /* GPIO High speed 12 */
+#define K210_PCF_GPIOHS13 37 /* GPIO High speed 13 */
+#define K210_PCF_GPIOHS14 38 /* GPIO High speed 14 */
+#define K210_PCF_GPIOHS15 39 /* GPIO High speed 15 */
+#define K210_PCF_GPIOHS16 40 /* GPIO High speed 16 */
+#define K210_PCF_GPIOHS17 41 /* GPIO High speed 17 */
+#define K210_PCF_GPIOHS18 42 /* GPIO High speed 18 */
+#define K210_PCF_GPIOHS19 43 /* GPIO High speed 19 */
+#define K210_PCF_GPIOHS20 44 /* GPIO High speed 20 */
+#define K210_PCF_GPIOHS21 45 /* GPIO High speed 21 */
+#define K210_PCF_GPIOHS22 46 /* GPIO High speed 22 */
+#define K210_PCF_GPIOHS23 47 /* GPIO High speed 23 */
+#define K210_PCF_GPIOHS24 48 /* GPIO High speed 24 */
+#define K210_PCF_GPIOHS25 49 /* GPIO High speed 25 */
+#define K210_PCF_GPIOHS26 50 /* GPIO High speed 26 */
+#define K210_PCF_GPIOHS27 51 /* GPIO High speed 27 */
+#define K210_PCF_GPIOHS28 52 /* GPIO High speed 28 */
+#define K210_PCF_GPIOHS29 53 /* GPIO High speed 29 */
+#define K210_PCF_GPIOHS30 54 /* GPIO High speed 30 */
+#define K210_PCF_GPIOHS31 55 /* GPIO High speed 31 */
+#define K210_PCF_GPIO0 56 /* GPIO pin 0 */
+#define K210_PCF_GPIO1 57 /* GPIO pin 1 */
+#define K210_PCF_GPIO2 58 /* GPIO pin 2 */
+#define K210_PCF_GPIO3 59 /* GPIO pin 3 */
+#define K210_PCF_GPIO4 60 /* GPIO pin 4 */
+#define K210_PCF_GPIO5 61 /* GPIO pin 5 */
+#define K210_PCF_GPIO6 62 /* GPIO pin 6 */
+#define K210_PCF_GPIO7 63 /* GPIO pin 7 */
+#define K210_PCF_UART1_RX 64 /* UART1 Receiver */
+#define K210_PCF_UART1_TX 65 /* UART1 Transmitter */
+#define K210_PCF_UART2_RX 66 /* UART2 Receiver */
+#define K210_PCF_UART2_TX 67 /* UART2 Transmitter */
+#define K210_PCF_UART3_RX 68 /* UART3 Receiver */
+#define K210_PCF_UART3_TX 69 /* UART3 Transmitter */
+#define K210_PCF_SPI1_D0 70 /* SPI1 Data 0 */
+#define K210_PCF_SPI1_D1 71 /* SPI1 Data 1 */
+#define K210_PCF_SPI1_D2 72 /* SPI1 Data 2 */
+#define K210_PCF_SPI1_D3 73 /* SPI1 Data 3 */
+#define K210_PCF_SPI1_D4 74 /* SPI1 Data 4 */
+#define K210_PCF_SPI1_D5 75 /* SPI1 Data 5 */
+#define K210_PCF_SPI1_D6 76 /* SPI1 Data 6 */
+#define K210_PCF_SPI1_D7 77 /* SPI1 Data 7 */
+#define K210_PCF_SPI1_SS0 78 /* SPI1 Chip Select 0 */
+#define K210_PCF_SPI1_SS1 79 /* SPI1 Chip Select 1 */
+#define K210_PCF_SPI1_SS2 80 /* SPI1 Chip Select 2 */
+#define K210_PCF_SPI1_SS3 81 /* SPI1 Chip Select 3 */
+#define K210_PCF_SPI1_ARB 82 /* SPI1 Arbitration */
+#define K210_PCF_SPI1_SCLK 83 /* SPI1 Serial Clock */
+#define K210_PCF_SPI2_D0 84 /* SPI2 Data 0 */
+#define K210_PCF_SPI2_SS 85 /* SPI2 Select */
+#define K210_PCF_SPI2_SCLK 86 /* SPI2 Serial Clock */
+#define K210_PCF_I2S0_MCLK 87 /* I2S0 Master Clock */
+#define K210_PCF_I2S0_SCLK 88 /* I2S0 Serial Clock(BCLK) */
+#define K210_PCF_I2S0_WS 89 /* I2S0 Word Select(LRCLK) */
+#define K210_PCF_I2S0_IN_D0 90 /* I2S0 Serial Data Input 0 */
+#define K210_PCF_I2S0_IN_D1 91 /* I2S0 Serial Data Input 1 */
+#define K210_PCF_I2S0_IN_D2 92 /* I2S0 Serial Data Input 2 */
+#define K210_PCF_I2S0_IN_D3 93 /* I2S0 Serial Data Input 3 */
+#define K210_PCF_I2S0_OUT_D0 94 /* I2S0 Serial Data Output 0 */
+#define K210_PCF_I2S0_OUT_D1 95 /* I2S0 Serial Data Output 1 */
+#define K210_PCF_I2S0_OUT_D2 96 /* I2S0 Serial Data Output 2 */
+#define K210_PCF_I2S0_OUT_D3 97 /* I2S0 Serial Data Output 3 */
+#define K210_PCF_I2S1_MCLK 98 /* I2S1 Master Clock */
+#define K210_PCF_I2S1_SCLK 99 /* I2S1 Serial Clock(BCLK) */
+#define K210_PCF_I2S1_WS 100 /* I2S1 Word Select(LRCLK) */
+#define K210_PCF_I2S1_IN_D0 101 /* I2S1 Serial Data Input 0 */
+#define K210_PCF_I2S1_IN_D1 102 /* I2S1 Serial Data Input 1 */
+#define K210_PCF_I2S1_IN_D2 103 /* I2S1 Serial Data Input 2 */
+#define K210_PCF_I2S1_IN_D3 104 /* I2S1 Serial Data Input 3 */
+#define K210_PCF_I2S1_OUT_D0 105 /* I2S1 Serial Data Output 0 */
+#define K210_PCF_I2S1_OUT_D1 106 /* I2S1 Serial Data Output 1 */
+#define K210_PCF_I2S1_OUT_D2 107 /* I2S1 Serial Data Output 2 */
+#define K210_PCF_I2S1_OUT_D3 108 /* I2S1 Serial Data Output 3 */
+#define K210_PCF_I2S2_MCLK 109 /* I2S2 Master Clock */
+#define K210_PCF_I2S2_SCLK 110 /* I2S2 Serial Clock(BCLK) */
+#define K210_PCF_I2S2_WS 111 /* I2S2 Word Select(LRCLK) */
+#define K210_PCF_I2S2_IN_D0 112 /* I2S2 Serial Data Input 0 */
+#define K210_PCF_I2S2_IN_D1 113 /* I2S2 Serial Data Input 1 */
+#define K210_PCF_I2S2_IN_D2 114 /* I2S2 Serial Data Input 2 */
+#define K210_PCF_I2S2_IN_D3 115 /* I2S2 Serial Data Input 3 */
+#define K210_PCF_I2S2_OUT_D0 116 /* I2S2 Serial Data Output 0 */
+#define K210_PCF_I2S2_OUT_D1 117 /* I2S2 Serial Data Output 1 */
+#define K210_PCF_I2S2_OUT_D2 118 /* I2S2 Serial Data Output 2 */
+#define K210_PCF_I2S2_OUT_D3 119 /* I2S2 Serial Data Output 3 */
+#define K210_PCF_RESV0 120 /* Reserved function */
+#define K210_PCF_RESV1 121 /* Reserved function */
+#define K210_PCF_RESV2 122 /* Reserved function */
+#define K210_PCF_RESV3 123 /* Reserved function */
+#define K210_PCF_RESV4 124 /* Reserved function */
+#define K210_PCF_RESV5 125 /* Reserved function */
+#define K210_PCF_I2C0_SCLK 126 /* I2C0 Serial Clock */
+#define K210_PCF_I2C0_SDA 127 /* I2C0 Serial Data */
+#define K210_PCF_I2C1_SCLK 128 /* I2C1 Serial Clock */
+#define K210_PCF_I2C1_SDA 129 /* I2C1 Serial Data */
+#define K210_PCF_I2C2_SCLK 130 /* I2C2 Serial Clock */
+#define K210_PCF_I2C2_SDA 131 /* I2C2 Serial Data */
+#define K210_PCF_DVP_XCLK 132 /* DVP System Clock */
+#define K210_PCF_DVP_RST 133 /* DVP System Reset */
+#define K210_PCF_DVP_PWDN 134 /* DVP Power Down Mode */
+#define K210_PCF_DVP_VSYNC 135 /* DVP Vertical Sync */
+#define K210_PCF_DVP_HSYNC 136 /* DVP Horizontal Sync */
+#define K210_PCF_DVP_PCLK 137 /* Pixel Clock */
+#define K210_PCF_DVP_D0 138 /* Data Bit 0 */
+#define K210_PCF_DVP_D1 139 /* Data Bit 1 */
+#define K210_PCF_DVP_D2 140 /* Data Bit 2 */
+#define K210_PCF_DVP_D3 141 /* Data Bit 3 */
+#define K210_PCF_DVP_D4 142 /* Data Bit 4 */
+#define K210_PCF_DVP_D5 143 /* Data Bit 5 */
+#define K210_PCF_DVP_D6 144 /* Data Bit 6 */
+#define K210_PCF_DVP_D7 145 /* Data Bit 7 */
+#define K210_PCF_SCCB_SCLK 146 /* Serial Camera Control Bus Clock */
+#define K210_PCF_SCCB_SDA 147 /* Serial Camera Control Bus Data */
+#define K210_PCF_UART1_CTS 148 /* UART1 Clear To Send */
+#define K210_PCF_UART1_DSR 149 /* UART1 Data Set Ready */
+#define K210_PCF_UART1_DCD 150 /* UART1 Data Carrier Detect */
+#define K210_PCF_UART1_RI 151 /* UART1 Ring Indicator */
+#define K210_PCF_UART1_SIR_IN 152 /* UART1 Serial Infrared Input */
+#define K210_PCF_UART1_DTR 153 /* UART1 Data Terminal Ready */
+#define K210_PCF_UART1_RTS 154 /* UART1 Request To Send */
+#define K210_PCF_UART1_OUT2 155 /* UART1 User-designated Output 2 */
+#define K210_PCF_UART1_OUT1 156 /* UART1 User-designated Output 1 */
+#define K210_PCF_UART1_SIR_OUT 157 /* UART1 Serial Infrared Output */
+#define K210_PCF_UART1_BAUD 158 /* UART1 Transmit Clock Output */
+#define K210_PCF_UART1_RE 159 /* UART1 Receiver Output Enable */
+#define K210_PCF_UART1_DE 160 /* UART1 Driver Output Enable */
+#define K210_PCF_UART1_RS485_EN 161 /* UART1 RS485 Enable */
+#define K210_PCF_UART2_CTS 162 /* UART2 Clear To Send */
+#define K210_PCF_UART2_DSR 163 /* UART2 Data Set Ready */
+#define K210_PCF_UART2_DCD 164 /* UART2 Data Carrier Detect */
+#define K210_PCF_UART2_RI 165 /* UART2 Ring Indicator */
+#define K210_PCF_UART2_SIR_IN 166 /* UART2 Serial Infrared Input */
+#define K210_PCF_UART2_DTR 167 /* UART2 Data Terminal Ready */
+#define K210_PCF_UART2_RTS 168 /* UART2 Request To Send */
+#define K210_PCF_UART2_OUT2 169 /* UART2 User-designated Output 2 */
+#define K210_PCF_UART2_OUT1 170 /* UART2 User-designated Output 1 */
+#define K210_PCF_UART2_SIR_OUT 171 /* UART2 Serial Infrared Output */
+#define K210_PCF_UART2_BAUD 172 /* UART2 Transmit Clock Output */
+#define K210_PCF_UART2_RE 173 /* UART2 Receiver Output Enable */
+#define K210_PCF_UART2_DE 174 /* UART2 Driver Output Enable */
+#define K210_PCF_UART2_RS485_EN 175 /* UART2 RS485 Enable */
+#define K210_PCF_UART3_CTS 176 /* UART3 Clear To Send */
+#define K210_PCF_UART3_DSR 177 /* UART3 Data Set Ready */
+#define K210_PCF_UART3_DCD 178 /* UART3 Data Carrier Detect */
+#define K210_PCF_UART3_RI 179 /* UART3 Ring Indicator */
+#define K210_PCF_UART3_SIR_IN 180 /* UART3 Serial Infrared Input */
+#define K210_PCF_UART3_DTR 181 /* UART3 Data Terminal Ready */
+#define K210_PCF_UART3_RTS 182 /* UART3 Request To Send */
+#define K210_PCF_UART3_OUT2 183 /* UART3 User-designated Output 2 */
+#define K210_PCF_UART3_OUT1 184 /* UART3 User-designated Output 1 */
+#define K210_PCF_UART3_SIR_OUT 185 /* UART3 Serial Infrared Output */
+#define K210_PCF_UART3_BAUD 186 /* UART3 Transmit Clock Output */
+#define K210_PCF_UART3_RE 187 /* UART3 Receiver Output Enable */
+#define K210_PCF_UART3_DE 188 /* UART3 Driver Output Enable */
+#define K210_PCF_UART3_RS485_EN 189 /* UART3 RS485 Enable */
+#define K210_PCF_TIMER0_TOGGLE1 190 /* TIMER0 Toggle Output 1 */
+#define K210_PCF_TIMER0_TOGGLE2 191 /* TIMER0 Toggle Output 2 */
+#define K210_PCF_TIMER0_TOGGLE3 192 /* TIMER0 Toggle Output 3 */
+#define K210_PCF_TIMER0_TOGGLE4 193 /* TIMER0 Toggle Output 4 */
+#define K210_PCF_TIMER1_TOGGLE1 194 /* TIMER1 Toggle Output 1 */
+#define K210_PCF_TIMER1_TOGGLE2 195 /* TIMER1 Toggle Output 2 */
+#define K210_PCF_TIMER1_TOGGLE3 196 /* TIMER1 Toggle Output 3 */
+#define K210_PCF_TIMER1_TOGGLE4 197 /* TIMER1 Toggle Output 4 */
+#define K210_PCF_TIMER2_TOGGLE1 198 /* TIMER2 Toggle Output 1 */
+#define K210_PCF_TIMER2_TOGGLE2 199 /* TIMER2 Toggle Output 2 */
+#define K210_PCF_TIMER2_TOGGLE3 200 /* TIMER2 Toggle Output 3 */
+#define K210_PCF_TIMER2_TOGGLE4 201 /* TIMER2 Toggle Output 4 */
+#define K210_PCF_CLK_SPI2 202 /* Clock SPI2 */
+#define K210_PCF_CLK_I2C2 203 /* Clock I2C2 */
+#define K210_PCF_INTERNAL0 204 /* Internal function signal 0 */
+#define K210_PCF_INTERNAL1 205 /* Internal function signal 1 */
+#define K210_PCF_INTERNAL2 206 /* Internal function signal 2 */
+#define K210_PCF_INTERNAL3 207 /* Internal function signal 3 */
+#define K210_PCF_INTERNAL4 208 /* Internal function signal 4 */
+#define K210_PCF_INTERNAL5 209 /* Internal function signal 5 */
+#define K210_PCF_INTERNAL6 210 /* Internal function signal 6 */
+#define K210_PCF_INTERNAL7 211 /* Internal function signal 7 */
+#define K210_PCF_INTERNAL8 212 /* Internal function signal 8 */
+#define K210_PCF_INTERNAL9 213 /* Internal function signal 9 */
+#define K210_PCF_INTERNAL10 214 /* Internal function signal 10 */
+#define K210_PCF_INTERNAL11 215 /* Internal function signal 11 */
+#define K210_PCF_INTERNAL12 216 /* Internal function signal 12 */
+#define K210_PCF_INTERNAL13 217 /* Internal function signal 13 */
+#define K210_PCF_INTERNAL14 218 /* Internal function signal 14 */
+#define K210_PCF_INTERNAL15 219 /* Internal function signal 15 */
+#define K210_PCF_INTERNAL16 220 /* Internal function signal 16 */
+#define K210_PCF_INTERNAL17 221 /* Internal function signal 17 */
+#define K210_PCF_CONSTANT 222 /* Constant function */
+#define K210_PCF_INTERNAL18 223 /* Internal function signal 18 */
+#define K210_PCF_DEBUG0 224 /* Debug function 0 */
+#define K210_PCF_DEBUG1 225 /* Debug function 1 */
+#define K210_PCF_DEBUG2 226 /* Debug function 2 */
+#define K210_PCF_DEBUG3 227 /* Debug function 3 */
+#define K210_PCF_DEBUG4 228 /* Debug function 4 */
+#define K210_PCF_DEBUG5 229 /* Debug function 5 */
+#define K210_PCF_DEBUG6 230 /* Debug function 6 */
+#define K210_PCF_DEBUG7 231 /* Debug function 7 */
+#define K210_PCF_DEBUG8 232 /* Debug function 8 */
+#define K210_PCF_DEBUG9 233 /* Debug function 9 */
+#define K210_PCF_DEBUG10 234 /* Debug function 10 */
+#define K210_PCF_DEBUG11 235 /* Debug function 11 */
+#define K210_PCF_DEBUG12 236 /* Debug function 12 */
+#define K210_PCF_DEBUG13 237 /* Debug function 13 */
+#define K210_PCF_DEBUG14 238 /* Debug function 14 */
+#define K210_PCF_DEBUG15 239 /* Debug function 15 */
+#define K210_PCF_DEBUG16 240 /* Debug function 16 */
+#define K210_PCF_DEBUG17 241 /* Debug function 17 */
+#define K210_PCF_DEBUG18 242 /* Debug function 18 */
+#define K210_PCF_DEBUG19 243 /* Debug function 19 */
+#define K210_PCF_DEBUG20 244 /* Debug function 20 */
+#define K210_PCF_DEBUG21 245 /* Debug function 21 */
+#define K210_PCF_DEBUG22 246 /* Debug function 22 */
+#define K210_PCF_DEBUG23 247 /* Debug function 23 */
+#define K210_PCF_DEBUG24 248 /* Debug function 24 */
+#define K210_PCF_DEBUG25 249 /* Debug function 25 */
+#define K210_PCF_DEBUG26 250 /* Debug function 26 */
+#define K210_PCF_DEBUG27 251 /* Debug function 27 */
+#define K210_PCF_DEBUG28 252 /* Debug function 28 */
+#define K210_PCF_DEBUG29 253 /* Debug function 29 */
+#define K210_PCF_DEBUG30 254 /* Debug function 30 */
+#define K210_PCF_DEBUG31 255 /* Debug function 31 */
+
+#define K210_FPIOA(pin, func) (((pin) << 16) | (func))
+
+#define K210_PC_POWER_3V3 0
+#define K210_PC_POWER_1V8 1
+
+#endif /* PINCTRL_K210_FPIOA_H */
diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h
index 499de6216581..54df633f9bfe 100644
--- a/include/dt-bindings/pinctrl/k3.h
+++ b/include/dt-bindings/pinctrl/k3.h
@@ -3,7 +3,7 @@
* This header provides constants for pinctrl bindings for TI's K3 SoC
* family.
*
- * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_PINCTRL_TI_K3_H
#define _DT_BINDINGS_PINCTRL_TI_K3_H
@@ -29,10 +29,22 @@
#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP)
#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN)
+#define AM62AX_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define AM62AX_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+
+#define AM62X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define AM62X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+
+#define AM64X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define AM64X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+
#define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
#define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
#define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
#define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define J721S2_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define J721S2_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+
#endif
diff --git a/include/dt-bindings/pinctrl/keystone.h b/include/dt-bindings/pinctrl/keystone.h
index 7f97d776a8ff..66f8aecada53 100644
--- a/include/dt-bindings/pinctrl/keystone.h
+++ b/include/dt-bindings/pinctrl/keystone.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This header provides constants for Keystone pinctrl bindings.
*
* Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_PINCTRL_KEYSTONE_H
diff --git a/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h b/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h
new file mode 100644
index 000000000000..2688da2f621f
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h
@@ -0,0 +1,1280 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Hui Liu <hui.liu@mediatek.com>
+ */
+
+#ifndef __MEDIATEK_MT8188_PINFUNC_H
+#define __MEDIATEK_MT8188_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_B_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_O_SPIM5_CSB (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_O_UTXD1 (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_O_DMIC3_CLK (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_O_I2SO2_MCK (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_B0_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_B_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_O_SPIM5_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_I1_URXD1 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_B0_DBG_MON_A1 (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_B_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_B0_SPIM5_MOSI (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_O_URTS1 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_B0_I2SIN_WS (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_B0_I2SO2_WS (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_B0_DBG_MON_A2 (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_B_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_B0_SPIM5_MISO (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_I1_UCTS1 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_O_DMIC4_CLK (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_O_I2SO2_D0 (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_B0_DBG_MON_A3 (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_B_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_O_I2SO1_MCK (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_O_I2SO2_D1 (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_B0_DBG_MON_A4 (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_B_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_O_I2SO1_BCK (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_O_I2SO2_D2 (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_B0_DBG_MON_A5 (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_B_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_O_I2SO1_WS (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_O_DMIC1_CLK (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_O_I2SO2_D3 (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_B_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_O_SPIM3_CSB (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_O_CMVREF0 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_O_CLKM0 (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_B0_DBG_MON_A6 (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_B_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_O_SPIM3_CLK (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_O_CMVREF1 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_O_CLKM1 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_B0_DBG_MON_A7 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_B_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_B0_SPIM3_MOSI (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_O_DMIC2_CLK (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_O_CMFLASH0 (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_O_PWM_0 (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_B0_DBG_MON_A8 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_B_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_B0_SPIM3_MISO (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I0_TDMIN_DI (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_O_CMFLASH1 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_O_PWM_1 (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_B0_DBG_MON_A9 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_B_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_O_SPDIF_OUT (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_O_I2SO1_D0 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_O_CMVREF6 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_B0_DBG_MON_A10 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_B_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_O_SPIM4_CSB (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_B1_JTMS_SEL3 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_B1_APU_JTAG_TMS (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_I0_VPU_UDI_TMS (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_I0_IPU_JTAG_TMS (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_I0_HDMITX20_HTPLG (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_B_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_O_SPIM4_CLK (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I0_JTCK_SEL3 (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_I0_APU_JTAG_TCK (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_I0_VPU_UDI_TCK (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_I0_IPU_JTAG_TCK (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_B1_HDMITX20_CEC (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_B_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_B0_SPIM4_MOSI (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_I1_JTDI_SEL3 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_I1_APU_JTAG_TDI (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_I0_VPU_UDI_TDI (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I0_IPU_JTAG_TDI (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_B1_HDMITX20_SCL (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_B_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_B0_SPIM4_MISO (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_O_JTDO_SEL3 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_O_APU_JTAG_TDO (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_O_VPU_UDI_TDO (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_O_IPU_JTAG_TDO (MTK_PIN_NO(15) | 6)
+#define PINMUX_GPIO15__FUNC_B1_HDMITX20_SDA (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_B_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_O_UTXD3 (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_I1_JTRSTn_SEL3 (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_I0_APU_JTAG_TRST (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_I0_VPU_UDI_NTRST (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_I0_IPU_JTAG_TRST (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_O_HDMITX20_PWR5V (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_B_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_I1_URXD3 (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_O_CMFLASH2 (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_O_CMVREF7 (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_B_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_O_CMFLASH0 (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_O_CMVREF4 (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_O_UTXD1 (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_B0_DBG_MON_A11 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_B_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_O_CMFLASH1 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_O_CMVREF5 (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_I1_URXD1 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_B0_DBG_MON_A12 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_B_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_O_CMFLASH2 (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_O_CLKM2 (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_O_URTS1 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_O_TP_URTS1_AO (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_B0_DBG_MON_A13 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_B_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_O_CMFLASH3 (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_O_CLKM3 (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_I0_TDMIN_DI (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_I1_UCTS1 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_I1_TP_UCTS1_AO (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_B0_DBG_MON_A14 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_B_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_O_CMMCLK0 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_B0_DBG_MON_A15 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_B_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_O_CMMCLK1 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_O_PWM_2 (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(23) | 6)
+#define PINMUX_GPIO23__FUNC_B0_DBG_MON_A16 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_B_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_O_CMMCLK2 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_O_PWM_3 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_B0_MD32_0_GPIO2 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_B_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_O_LCM_RST (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_O_LCM1_RST (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(25) | 3)
+
+#define PINMUX_GPIO26__FUNC_B_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_I0_DSI_TE (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_I0_DSI1_TE (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(26) | 3)
+
+#define PINMUX_GPIO27__FUNC_B_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_O_LCM1_RST (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_O_LCM_RST (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_O_CMVREF2 (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_O_PWM_2 (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_B0_DBG_MON_A17 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_B_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_I0_DSI1_TE (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_I0_DSI_TE (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_O_CMVREF3 (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_O_PWM_3 (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_B0_DBG_MON_A18 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_B_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_O_DISP_PWM0 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_O_DISP_PWM1 (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_B_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_O_DISP_PWM1 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_O_DISP_PWM0 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_O_CMFLASH3 (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_O_PWM_1 (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_B0_DBG_MON_A19 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_B_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_O_UTXD0 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_O_MD32_0_TXD (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_O_MD32_1_TXD (MTK_PIN_NO(31) | 6)
+#define PINMUX_GPIO31__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_B_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I1_URXD0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(32) | 6)
+#define PINMUX_GPIO32__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_B_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_O_UTXD1 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_O_URTS2 (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_O_MD32_0_TXD (MTK_PIN_NO(33) | 6)
+#define PINMUX_GPIO33__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(33) | 7)
+
+#define PINMUX_GPIO34__FUNC_B_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I1_URXD1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I1_UCTS2 (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(34) | 6)
+#define PINMUX_GPIO34__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_B_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_O_UTXD2 (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_O_URTS1 (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_O_TP_URTS1_AO (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_O_MD32_1_TXD (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_B0_DBG_MON_A20 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_B_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_I1_URXD2 (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_I1_UCTS1 (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_I1_TP_UCTS1_AO (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_B0_DBG_MON_A21 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_B_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_B1_JTMS_SEL1 (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_I0_UDI_TMS (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_I1_SPM_JTAG_TMS (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_I1_ADSP_JTAG0_TMS (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_I1_SCP_JTAG0_TMS (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_I1_CCU0_JTAG_TMS (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_I1_MCUPM_JTAG_TMS (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_B_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_I0_JTCK_SEL1 (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_I0_UDI_TCK (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_I1_SPM_JTAG_TCK (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_I0_ADSP_JTAG0_TCK (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_I1_SCP_JTAG0_TCK (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_I1_CCU0_JTAG_TCK (MTK_PIN_NO(38) | 6)
+#define PINMUX_GPIO38__FUNC_I1_MCUPM_JTAG_TCK (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_B_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_I1_JTDI_SEL1 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_I0_UDI_TDI (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_I1_SPM_JTAG_TDI (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_I1_ADSP_JTAG0_TDI (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_I1_SCP_JTAG0_TDI (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_I1_CCU0_JTAG_TDI (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_I1_MCUPM_JTAG_TDI (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_B_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_O_JTDO_SEL1 (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_O_UDI_TDO (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_O_SPM_JTAG_TDO (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_O_ADSP_JTAG0_TDO (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_O_SCP_JTAG0_TDO (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_O_CCU0_JTAG_TDO (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_O_MCUPM_JTAG_TDO (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_B_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_I1_JTRSTn_SEL1 (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_I0_UDI_NTRST (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_I0_SPM_JTAG_TRSTN (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_I1_ADSP_JTAG0_TRSTN (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_I0_SCP_JTAG0_TRSTN (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_I1_CCU0_JTAG_TRST (MTK_PIN_NO(41) | 6)
+#define PINMUX_GPIO41__FUNC_I0_MCUPM_JTAG_TRSTN (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_B_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_B1_KPCOL0 (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_B_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_B1_KPCOL1 (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_O_CMFLASH2 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_B_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_B1_KPROW0 (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_B_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_B1_KPROW1 (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_O_CMFLASH3 (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_B_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_O_PWM_0 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_I0_VBUSVALID_2P (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_B0_DBG_MON_A22 (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_B_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_I1_WAKEN (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_O_GDU_TROOPS_DET0 (MTK_PIN_NO(47) | 6)
+
+#define PINMUX_GPIO48__FUNC_B_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_O_PERSTN (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_O_GDU_TROOPS_DET1 (MTK_PIN_NO(48) | 6)
+
+#define PINMUX_GPIO49__FUNC_B_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_B1_CLKREQN (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_O_GDU_TROOPS_DET2 (MTK_PIN_NO(49) | 6)
+
+#define PINMUX_GPIO50__FUNC_B_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_O_HDMITX20_PWR5V (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_I1_IDDIG_1P (MTK_PIN_NO(50) | 3)
+#define PINMUX_GPIO50__FUNC_I1_SCP_JTAG1_TMS (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_I1_SSPM_JTAG_TMS (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_I1_MD32_0_JTAG_TMS (MTK_PIN_NO(50) | 6)
+#define PINMUX_GPIO50__FUNC_I1_MD32_1_JTAG_TMS (MTK_PIN_NO(50) | 7)
+
+#define PINMUX_GPIO51__FUNC_B_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_I0_HDMITX20_HTPLG (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(51) | 2)
+#define PINMUX_GPIO51__FUNC_O_USB_DRVVBUS_1P (MTK_PIN_NO(51) | 3)
+#define PINMUX_GPIO51__FUNC_I1_SCP_JTAG1_TCK (MTK_PIN_NO(51) | 4)
+#define PINMUX_GPIO51__FUNC_I1_SSPM_JTAG_TCK (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_I1_MD32_0_JTAG_TCK (MTK_PIN_NO(51) | 6)
+#define PINMUX_GPIO51__FUNC_I1_MD32_1_JTAG_TCK (MTK_PIN_NO(51) | 7)
+
+#define PINMUX_GPIO52__FUNC_B_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_B1_HDMITX20_CEC (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_I0_VBUSVALID_1P (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_I1_SCP_JTAG1_TDI (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_I1_SSPM_JTAG_TDI (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_I1_MD32_0_JTAG_TDI (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_I1_MD32_1_JTAG_TDI (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_B_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_B1_HDMITX20_SCL (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_I1_IDDIG_2P (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_O_SCP_JTAG1_TDO (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_O_SSPM_JTAG_TDO (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_O_MD32_0_JTAG_TDO (MTK_PIN_NO(53) | 6)
+#define PINMUX_GPIO53__FUNC_O_MD32_1_JTAG_TDO (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_B_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_B1_HDMITX20_SDA (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_O_USB_DRVVBUS_2P (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_I0_SCP_JTAG1_TRSTN (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_I0_SSPM_JTAG_TRSTN (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_I1_MD32_0_JTAG_TRST (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_I1_MD32_1_JTAG_TRST (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_B_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_B1_SCL0 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(55) | 4)
+
+#define PINMUX_GPIO56__FUNC_B_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_B1_SDA0 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(56) | 4)
+
+#define PINMUX_GPIO57__FUNC_B_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_B1_SCL1 (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_B_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_B1_SDA1 (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_B_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_B1_SCL2 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(59) | 2)
+#define PINMUX_GPIO59__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(59) | 3)
+
+#define PINMUX_GPIO60__FUNC_B_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_B1_SDA2 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(60) | 3)
+
+#define PINMUX_GPIO61__FUNC_B_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_B1_SCL3 (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(61) | 3)
+#define PINMUX_GPIO61__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(61) | 4)
+
+#define PINMUX_GPIO62__FUNC_B_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_B1_SDA3 (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(62) | 3)
+#define PINMUX_GPIO62__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(62) | 4)
+
+#define PINMUX_GPIO63__FUNC_B_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_B1_SCL4 (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_B_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_B1_SDA4 (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_B_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_B1_SCL5 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(65) | 3)
+
+#define PINMUX_GPIO66__FUNC_B_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_B1_SDA5 (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(66) | 3)
+
+#define PINMUX_GPIO67__FUNC_B_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_B1_SCL6 (MTK_PIN_NO(67) | 1)
+#define PINMUX_GPIO67__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(67) | 2)
+#define PINMUX_GPIO67__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(67) | 3)
+#define PINMUX_GPIO67__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(67) | 4)
+
+#define PINMUX_GPIO68__FUNC_B_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_B1_SDA6 (MTK_PIN_NO(68) | 1)
+#define PINMUX_GPIO68__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(68) | 2)
+#define PINMUX_GPIO68__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(68) | 3)
+#define PINMUX_GPIO68__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(68) | 4)
+
+#define PINMUX_GPIO69__FUNC_B_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_O_SPIM0_CSB (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_O_SCP_SPI0_CS (MTK_PIN_NO(69) | 2)
+#define PINMUX_GPIO69__FUNC_O_DMIC3_CLK (MTK_PIN_NO(69) | 3)
+#define PINMUX_GPIO69__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(69) | 4)
+#define PINMUX_GPIO69__FUNC_O_CMVREF0 (MTK_PIN_NO(69) | 5)
+#define PINMUX_GPIO69__FUNC_O_GDU_SUM_TROOP0_0 (MTK_PIN_NO(69) | 6)
+#define PINMUX_GPIO69__FUNC_B0_DBG_MON_A23 (MTK_PIN_NO(69) | 7)
+
+#define PINMUX_GPIO70__FUNC_B_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_O_SPIM0_CLK (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_O_SCP_SPI0_CK (MTK_PIN_NO(70) | 2)
+#define PINMUX_GPIO70__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(70) | 3)
+#define PINMUX_GPIO70__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(70) | 4)
+#define PINMUX_GPIO70__FUNC_O_CMVREF1 (MTK_PIN_NO(70) | 5)
+#define PINMUX_GPIO70__FUNC_O_GDU_SUM_TROOP0_1 (MTK_PIN_NO(70) | 6)
+#define PINMUX_GPIO70__FUNC_B0_DBG_MON_A24 (MTK_PIN_NO(70) | 7)
+
+#define PINMUX_GPIO71__FUNC_B_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_O_SCP_SPI0_MO (MTK_PIN_NO(71) | 2)
+#define PINMUX_GPIO71__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(71) | 3)
+#define PINMUX_GPIO71__FUNC_B0_MD32_1_GPIO2 (MTK_PIN_NO(71) | 4)
+#define PINMUX_GPIO71__FUNC_O_CMVREF2 (MTK_PIN_NO(71) | 5)
+#define PINMUX_GPIO71__FUNC_O_GDU_SUM_TROOP0_2 (MTK_PIN_NO(71) | 6)
+#define PINMUX_GPIO71__FUNC_B0_DBG_MON_A25 (MTK_PIN_NO(71) | 7)
+
+#define PINMUX_GPIO72__FUNC_B_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_B0_SPIM0_MISO (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_I0_SCP_SPI0_MI (MTK_PIN_NO(72) | 2)
+#define PINMUX_GPIO72__FUNC_O_DMIC4_CLK (MTK_PIN_NO(72) | 3)
+#define PINMUX_GPIO72__FUNC_O_CMVREF3 (MTK_PIN_NO(72) | 5)
+#define PINMUX_GPIO72__FUNC_O_GDU_SUM_TROOP1_0 (MTK_PIN_NO(72) | 6)
+#define PINMUX_GPIO72__FUNC_B0_DBG_MON_A26 (MTK_PIN_NO(72) | 7)
+
+#define PINMUX_GPIO73__FUNC_B_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_B0_SPIM0_MIO2 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_O_UTXD3 (MTK_PIN_NO(73) | 2)
+#define PINMUX_GPIO73__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(73) | 3)
+#define PINMUX_GPIO73__FUNC_O_CLKM0 (MTK_PIN_NO(73) | 4)
+#define PINMUX_GPIO73__FUNC_O_CMVREF4 (MTK_PIN_NO(73) | 5)
+#define PINMUX_GPIO73__FUNC_O_GDU_SUM_TROOP1_1 (MTK_PIN_NO(73) | 6)
+#define PINMUX_GPIO73__FUNC_B0_DBG_MON_A27 (MTK_PIN_NO(73) | 7)
+
+#define PINMUX_GPIO74__FUNC_B_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_B0_SPIM0_MIO3 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_I1_URXD3 (MTK_PIN_NO(74) | 2)
+#define PINMUX_GPIO74__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(74) | 3)
+#define PINMUX_GPIO74__FUNC_O_CLKM1 (MTK_PIN_NO(74) | 4)
+#define PINMUX_GPIO74__FUNC_O_CMVREF5 (MTK_PIN_NO(74) | 5)
+#define PINMUX_GPIO74__FUNC_O_GDU_SUM_TROOP1_2 (MTK_PIN_NO(74) | 6)
+#define PINMUX_GPIO74__FUNC_B0_DBG_MON_A28 (MTK_PIN_NO(74) | 7)
+
+#define PINMUX_GPIO75__FUNC_B_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_O_SPIM1_CSB (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_O_SCP_SPI1_A_CS (MTK_PIN_NO(75) | 2)
+#define PINMUX_GPIO75__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(75) | 3)
+#define PINMUX_GPIO75__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(75) | 4)
+#define PINMUX_GPIO75__FUNC_O_CMVREF6 (MTK_PIN_NO(75) | 5)
+#define PINMUX_GPIO75__FUNC_O_GDU_SUM_TROOP2_0 (MTK_PIN_NO(75) | 6)
+#define PINMUX_GPIO75__FUNC_B0_DBG_MON_A29 (MTK_PIN_NO(75) | 7)
+
+#define PINMUX_GPIO76__FUNC_B_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_O_SPIM1_CLK (MTK_PIN_NO(76) | 1)
+#define PINMUX_GPIO76__FUNC_O_SCP_SPI1_A_CK (MTK_PIN_NO(76) | 2)
+#define PINMUX_GPIO76__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(76) | 3)
+#define PINMUX_GPIO76__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(76) | 4)
+#define PINMUX_GPIO76__FUNC_O_CMVREF7 (MTK_PIN_NO(76) | 5)
+#define PINMUX_GPIO76__FUNC_O_GDU_SUM_TROOP2_1 (MTK_PIN_NO(76) | 6)
+#define PINMUX_GPIO76__FUNC_B0_DBG_MON_A30 (MTK_PIN_NO(76) | 7)
+
+#define PINMUX_GPIO77__FUNC_B_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_O_SCP_SPI1_A_MO (MTK_PIN_NO(77) | 2)
+#define PINMUX_GPIO77__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(77) | 3)
+#define PINMUX_GPIO77__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(77) | 4)
+#define PINMUX_GPIO77__FUNC_O_GDU_SUM_TROOP2_2 (MTK_PIN_NO(77) | 6)
+#define PINMUX_GPIO77__FUNC_B0_DBG_MON_A31 (MTK_PIN_NO(77) | 7)
+
+#define PINMUX_GPIO78__FUNC_B_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_B0_SPIM1_MISO (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_I0_SCP_SPI1_A_MI (MTK_PIN_NO(78) | 2)
+#define PINMUX_GPIO78__FUNC_I0_TDMIN_DI (MTK_PIN_NO(78) | 3)
+#define PINMUX_GPIO78__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(78) | 4)
+#define PINMUX_GPIO78__FUNC_B0_DBG_MON_A32 (MTK_PIN_NO(78) | 7)
+
+#define PINMUX_GPIO79__FUNC_B_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_O_SPIM2_CSB (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_O_SCP_SPI2_CS (MTK_PIN_NO(79) | 2)
+#define PINMUX_GPIO79__FUNC_O_I2SO1_MCK (MTK_PIN_NO(79) | 3)
+#define PINMUX_GPIO79__FUNC_O_UTXD2 (MTK_PIN_NO(79) | 4)
+#define PINMUX_GPIO79__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(79) | 5)
+#define PINMUX_GPIO79__FUNC_B0_PCM_SYNC (MTK_PIN_NO(79) | 6)
+#define PINMUX_GPIO79__FUNC_B0_DBG_MON_B0 (MTK_PIN_NO(79) | 7)
+
+#define PINMUX_GPIO80__FUNC_B_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_O_SPIM2_CLK (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_O_SCP_SPI2_CK (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_O_I2SO1_BCK (MTK_PIN_NO(80) | 3)
+#define PINMUX_GPIO80__FUNC_I1_URXD2 (MTK_PIN_NO(80) | 4)
+#define PINMUX_GPIO80__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(80) | 5)
+#define PINMUX_GPIO80__FUNC_B0_PCM_CLK (MTK_PIN_NO(80) | 6)
+#define PINMUX_GPIO80__FUNC_B0_DBG_MON_B1 (MTK_PIN_NO(80) | 7)
+
+#define PINMUX_GPIO81__FUNC_B_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_O_SCP_SPI2_MO (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_O_I2SO1_WS (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_O_URTS2 (MTK_PIN_NO(81) | 4)
+#define PINMUX_GPIO81__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(81) | 5)
+#define PINMUX_GPIO81__FUNC_O_PCM_DO (MTK_PIN_NO(81) | 6)
+#define PINMUX_GPIO81__FUNC_B0_DBG_MON_B2 (MTK_PIN_NO(81) | 7)
+
+#define PINMUX_GPIO82__FUNC_B_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_B0_SPIM2_MISO (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_I0_SCP_SPI2_MI (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_O_I2SO1_D0 (MTK_PIN_NO(82) | 3)
+#define PINMUX_GPIO82__FUNC_I1_UCTS2 (MTK_PIN_NO(82) | 4)
+#define PINMUX_GPIO82__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(82) | 5)
+#define PINMUX_GPIO82__FUNC_I0_PCM_DI (MTK_PIN_NO(82) | 6)
+#define PINMUX_GPIO82__FUNC_B0_DBG_MON_B3 (MTK_PIN_NO(82) | 7)
+
+#define PINMUX_GPIO83__FUNC_B_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_I1_IDDIG (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_B_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_O_USB_DRVVBUS (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_B_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_I0_VBUSVALID (MTK_PIN_NO(85) | 1)
+
+#define PINMUX_GPIO86__FUNC_B_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_I1_IDDIG_1P (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_O_UTXD1 (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_O_URTS2 (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_O_PWM_2 (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_B0_DBG_MON_B4 (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_B_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_O_USB_DRVVBUS_1P (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_I1_URXD1 (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_I1_UCTS2 (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_O_PWM_3 (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_B0_DBG_MON_B5 (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_B_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_I0_VBUSVALID_1P (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_O_UTXD2 (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_O_URTS1 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_O_CLKM2 (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(88) | 6)
+#define PINMUX_GPIO88__FUNC_B0_DBG_MON_B6 (MTK_PIN_NO(88) | 7)
+
+#define PINMUX_GPIO89__FUNC_B_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_I1_IDDIG_2P (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_I1_URXD2 (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_I1_UCTS1 (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_O_CLKM3 (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(89) | 6)
+#define PINMUX_GPIO89__FUNC_B0_DBG_MON_B7 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_B_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_O_USB_DRVVBUS_2P (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_O_UTXD3 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_O_MD32_0_TXD (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_O_MD32_1_TXD (MTK_PIN_NO(90) | 6)
+#define PINMUX_GPIO90__FUNC_B0_DBG_MON_B8 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_B_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_I0_VBUSVALID_2P (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_I1_URXD3 (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(91) | 6)
+#define PINMUX_GPIO91__FUNC_B0_DBG_MON_B9 (MTK_PIN_NO(91) | 7)
+
+#define PINMUX_GPIO92__FUNC_B_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_O_PWRAP_SPI0_CSN (MTK_PIN_NO(92) | 1)
+
+#define PINMUX_GPIO93__FUNC_B_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_O_PWRAP_SPI0_CK (MTK_PIN_NO(93) | 1)
+
+#define PINMUX_GPIO94__FUNC_B_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_B0_PWRAP_SPI0_MO (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_B0_PWRAP_SPI0_MI (MTK_PIN_NO(94) | 2)
+
+#define PINMUX_GPIO95__FUNC_B_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_B0_PWRAP_SPI0_MI (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_B0_PWRAP_SPI0_MO (MTK_PIN_NO(95) | 2)
+
+#define PINMUX_GPIO96__FUNC_B_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_O_SRCLKENA0 (MTK_PIN_NO(96) | 1)
+
+#define PINMUX_GPIO97__FUNC_B_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_O_SRCLKENA1 (MTK_PIN_NO(97) | 1)
+
+#define PINMUX_GPIO98__FUNC_B_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_O_SCP_VREQ_VAO (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(98) | 2)
+
+#define PINMUX_GPIO99__FUNC_B_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_I0_RTC32K_CK (MTK_PIN_NO(99) | 1)
+
+#define PINMUX_GPIO100__FUNC_B_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_O_WATCHDOG (MTK_PIN_NO(100) | 1)
+
+#define PINMUX_GPIO101__FUNC_B_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_O_I2SO1_MCK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(101) | 3)
+
+#define PINMUX_GPIO102__FUNC_B_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_O_I2SO1_BCK (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_B0_I2SIN_WS (MTK_PIN_NO(102) | 3)
+
+#define PINMUX_GPIO103__FUNC_B_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_O_I2SO1_WS (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(103) | 3)
+
+#define PINMUX_GPIO104__FUNC_B_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_O_I2SO1_D0 (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(104) | 3)
+
+#define PINMUX_GPIO105__FUNC_B_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_I0_VOW_DAT_MISO (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(105) | 3)
+
+#define PINMUX_GPIO106__FUNC_B_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_I0_VOW_CLK_MISO (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(106) | 3)
+
+#define PINMUX_GPIO107__FUNC_B_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_I0_SPLIN_MCK (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_O_CMVREF4 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(107) | 5)
+#define PINMUX_GPIO107__FUNC_O_PGD_LV_LSC_PWR0 (MTK_PIN_NO(107) | 6)
+
+#define PINMUX_GPIO108__FUNC_B_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_I0_SPLIN_LRCK (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_O_DMIC4_CLK (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_O_CMVREF5 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_O_PGD_LV_LSC_PWR1 (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_B0_DBG_MON_B10 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_B_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_B0_I2SIN_WS (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_I0_SPLIN_BCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_O_CMVREF6 (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_O_PGD_LV_LSC_PWR2 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_B0_DBG_MON_B11 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_B_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_I0_SPLIN_D0 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_O_CMVREF7 (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_O_PGD_LV_LSC_PWR3 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_B0_DBG_MON_B12 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_B_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_I0_SPLIN_D1 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_O_DMIC3_CLK (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_O_SPDIF_OUT (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_O_PGD_LV_LSC_PWR4 (MTK_PIN_NO(111) | 6)
+#define PINMUX_GPIO111__FUNC_B0_DBG_MON_B13 (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_B_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_I0_SPLIN_D2 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_O_I2SO1_WS (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_O_PGD_LV_LSC_PWR5 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_B0_DBG_MON_B14 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_B_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_I0_SPLIN_D3 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_O_I2SO1_D0 (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_B0_DBG_MON_B15 (MTK_PIN_NO(113) | 7)
+
+#define PINMUX_GPIO114__FUNC_B_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_O_I2SO2_MCK (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_I1_MCUPM_JTAG_TMS (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_B1_APU_JTAG_TMS (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_I1_SCP_JTAG1_TMS (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_I1_SPM_JTAG_TMS (MTK_PIN_NO(114) | 6)
+#define PINMUX_GPIO114__FUNC_B0_DBG_MON_B16 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_B_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_I1_MCUPM_JTAG_TCK (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_I0_APU_JTAG_TCK (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_I1_SCP_JTAG1_TCK (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_I1_SPM_JTAG_TCK (MTK_PIN_NO(115) | 6)
+#define PINMUX_GPIO115__FUNC_B0_DBG_MON_B17 (MTK_PIN_NO(115) | 7)
+
+#define PINMUX_GPIO116__FUNC_B_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_B0_I2SO2_WS (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_B0_I2SIN_WS (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_I1_MCUPM_JTAG_TDI (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_I1_APU_JTAG_TDI (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_I1_SCP_JTAG1_TDI (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_I1_SPM_JTAG_TDI (MTK_PIN_NO(116) | 6)
+#define PINMUX_GPIO116__FUNC_B0_DBG_MON_B18 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_B_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_O_I2SO2_D0 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_O_MCUPM_JTAG_TDO (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_O_APU_JTAG_TDO (MTK_PIN_NO(117) | 4)
+#define PINMUX_GPIO117__FUNC_O_SCP_JTAG1_TDO (MTK_PIN_NO(117) | 5)
+#define PINMUX_GPIO117__FUNC_O_SPM_JTAG_TDO (MTK_PIN_NO(117) | 6)
+#define PINMUX_GPIO117__FUNC_B0_DBG_MON_B19 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_B_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_O_I2SO2_D1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_I0_MCUPM_JTAG_TRSTN (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_I0_APU_JTAG_TRST (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_I0_SCP_JTAG1_TRSTN (MTK_PIN_NO(118) | 5)
+#define PINMUX_GPIO118__FUNC_I0_SPM_JTAG_TRSTN (MTK_PIN_NO(118) | 6)
+#define PINMUX_GPIO118__FUNC_B0_DBG_MON_B20 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_B_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_O_I2SO2_D2 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_O_UTXD3 (MTK_PIN_NO(119) | 3)
+#define PINMUX_GPIO119__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(119) | 4)
+#define PINMUX_GPIO119__FUNC_O_I2SO1_MCK (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(119) | 6)
+#define PINMUX_GPIO119__FUNC_B0_DBG_MON_B21 (MTK_PIN_NO(119) | 7)
+
+#define PINMUX_GPIO120__FUNC_B_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_O_I2SO2_D3 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_I1_URXD3 (MTK_PIN_NO(120) | 3)
+#define PINMUX_GPIO120__FUNC_I0_TDMIN_DI (MTK_PIN_NO(120) | 4)
+#define PINMUX_GPIO120__FUNC_O_I2SO1_BCK (MTK_PIN_NO(120) | 5)
+#define PINMUX_GPIO120__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(120) | 6)
+#define PINMUX_GPIO120__FUNC_B0_DBG_MON_B22 (MTK_PIN_NO(120) | 7)
+
+#define PINMUX_GPIO121__FUNC_B_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_B0_PCM_CLK (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_O_SPIM4_CSB (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_O_SCP_SPI1_B_CS (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_O_PGD_DA_EFUSE_RDY (MTK_PIN_NO(121) | 6)
+#define PINMUX_GPIO121__FUNC_B0_DBG_MON_B23 (MTK_PIN_NO(121) | 7)
+
+#define PINMUX_GPIO122__FUNC_B_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_B0_PCM_SYNC (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_O_SPIM4_CLK (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_O_SCP_SPI1_B_CK (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_O_PGD_DA_EFUSE_RDY_PRE (MTK_PIN_NO(122) | 6)
+#define PINMUX_GPIO122__FUNC_B0_DBG_MON_B24 (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_B_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_O_PCM_DO (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_B0_SPIM4_MOSI (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_O_SCP_SPI1_B_MO (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_O_PGD_DA_PWRGD_RESET (MTK_PIN_NO(123) | 6)
+#define PINMUX_GPIO123__FUNC_B0_DBG_MON_B25 (MTK_PIN_NO(123) | 7)
+
+#define PINMUX_GPIO124__FUNC_B_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_I0_PCM_DI (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_B0_SPIM4_MISO (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_I0_SCP_SPI1_B_MI (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_O_PGD_DA_PWRGD_ENB (MTK_PIN_NO(124) | 6)
+#define PINMUX_GPIO124__FUNC_B0_DBG_MON_B26 (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_B_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_O_DMIC1_CLK (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_O_SPINOR_CK (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_O_LVTS_FOUT (MTK_PIN_NO(125) | 6)
+#define PINMUX_GPIO125__FUNC_B0_DBG_MON_B27 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_B_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_O_SPINOR_CS (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_O_LVTS_SDO (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_B0_DBG_MON_B28 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_B_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_B0_SPINOR_IO0 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_I0_LVTS_26M (MTK_PIN_NO(127) | 6)
+#define PINMUX_GPIO127__FUNC_B0_DBG_MON_B29 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_B_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_O_DMIC2_CLK (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_B0_SPINOR_IO1 (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_I0_TDMIN_DI (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_I0_LVTS_SCF (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_B0_DBG_MON_B30 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_B_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_B0_SPINOR_IO2 (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_I0_LVTS_SCK (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_B0_DBG_MON_B31 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_B_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_B0_SPINOR_IO3 (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_I0_LVTS_SDI (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_B0_DBG_MON_B32 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_B_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_O_DPI_D0 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_O_GBE_TXD3 (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_O_DMIC1_CLK (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_O_I2SO2_MCK (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_O_SPIM5_CSB (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_O_PGD_LV_HSC_PWR0 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_B_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_O_DPI_D1 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_O_GBE_TXD2 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_O_SPIM5_CLK (MTK_PIN_NO(132) | 6)
+#define PINMUX_GPIO132__FUNC_O_PGD_LV_HSC_PWR1 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_B_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_O_DPI_D2 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_O_GBE_TXD1 (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_B0_I2SO2_WS (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_B0_SPIM5_MOSI (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_O_PGD_LV_HSC_PWR2 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_B_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_O_DPI_D3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_O_GBE_TXD0 (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_O_DMIC2_CLK (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_O_I2SO2_D0 (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_B0_SPIM5_MISO (MTK_PIN_NO(134) | 6)
+#define PINMUX_GPIO134__FUNC_O_PGD_LV_HSC_PWR3 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_B_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_O_DPI_D4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_I0_GBE_RXD3 (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_O_I2SO2_D1 (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_I1_WAKEN (MTK_PIN_NO(135) | 6)
+#define PINMUX_GPIO135__FUNC_O_PGD_LV_HSC_PWR4 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_B_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_O_DPI_D5 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_I0_GBE_RXD2 (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_O_I2SO2_D2 (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_O_PERSTN (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_O_PGD_LV_HSC_PWR5 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_B_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_O_DPI_D6 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_I0_GBE_RXD1 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_O_DMIC3_CLK (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_O_I2SO2_D3 (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_B1_CLKREQN (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_O_PWM_0 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_B_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_O_DPI_D7 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_I0_GBE_RXD0 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_O_CLKM2 (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_B_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_O_DPI_D8 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_B0_GBE_TXC (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_O_CLKM3 (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_O_UTXD2 (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_B_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_O_DPI_D9 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_I0_GBE_RXC (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_O_DMIC4_CLK (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_O_PWM_2 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_I1_URXD2 (MTK_PIN_NO(140) | 6)
+#define PINMUX_GPIO140__FUNC_B0_MD32_0_GPIO2 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_B_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_O_DPI_D10 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_I0_GBE_RXDV (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_O_PWM_3 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_O_URTS2 (MTK_PIN_NO(141) | 6)
+#define PINMUX_GPIO141__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_B_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_O_DPI_D11 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_O_GBE_TXEN (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_O_PWM_1 (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(142) | 5)
+#define PINMUX_GPIO142__FUNC_I1_UCTS2 (MTK_PIN_NO(142) | 6)
+#define PINMUX_GPIO142__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_B_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_O_DPI_D12 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_O_GBE_MDC (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_O_CLKM0 (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_O_SPIM3_CSB (MTK_PIN_NO(143) | 5)
+#define PINMUX_GPIO143__FUNC_O_UTXD1 (MTK_PIN_NO(143) | 6)
+#define PINMUX_GPIO143__FUNC_B0_MD32_1_GPIO2 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_B_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_O_DPI_D13 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_B1_GBE_MDIO (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_O_CLKM1 (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_O_SPIM3_CLK (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_I1_URXD1 (MTK_PIN_NO(144) | 6)
+#define PINMUX_GPIO144__FUNC_O_PGD_HV_HSC_PWR0 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_B_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_O_DPI_D14 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_O_GBE_TXER (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_O_CMFLASH0 (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_B0_SPIM3_MOSI (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_B0_GBE_AUX_PPS2 (MTK_PIN_NO(145) | 6)
+#define PINMUX_GPIO145__FUNC_O_PGD_HV_HSC_PWR1 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_B_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_O_DPI_D15 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_I0_GBE_RXER (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_O_CMFLASH1 (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_B0_SPIM3_MISO (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_B0_GBE_AUX_PPS3 (MTK_PIN_NO(146) | 6)
+#define PINMUX_GPIO146__FUNC_O_PGD_HV_HSC_PWR2 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_B_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_O_DPI_HSYNC (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_I0_GBE_COL (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_O_I2SO1_MCK (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_O_CMVREF0 (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_O_SPDIF_OUT (MTK_PIN_NO(147) | 5)
+#define PINMUX_GPIO147__FUNC_O_URTS1 (MTK_PIN_NO(147) | 6)
+#define PINMUX_GPIO147__FUNC_O_PGD_HV_HSC_PWR3 (MTK_PIN_NO(147) | 7)
+
+#define PINMUX_GPIO148__FUNC_B_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_O_DPI_VSYNC (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_I0_GBE_INTR (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_O_I2SO1_BCK (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_O_CMVREF1 (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_I1_UCTS1 (MTK_PIN_NO(148) | 6)
+#define PINMUX_GPIO148__FUNC_O_PGD_HV_HSC_PWR4 (MTK_PIN_NO(148) | 7)
+
+#define PINMUX_GPIO149__FUNC_B_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_O_DPI_DE (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_B0_GBE_AUX_PPS0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_O_I2SO1_WS (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_O_CMVREF2 (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_O_UTXD3 (MTK_PIN_NO(149) | 6)
+#define PINMUX_GPIO149__FUNC_O_PGD_HV_HSC_PWR5 (MTK_PIN_NO(149) | 7)
+
+#define PINMUX_GPIO150__FUNC_B_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_O_DPI_CK (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_B0_GBE_AUX_PPS1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_O_I2SO1_D0 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_O_CMVREF3 (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_I1_URXD3 (MTK_PIN_NO(150) | 6)
+
+#define PINMUX_GPIO151__FUNC_B_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7 (MTK_PIN_NO(151) | 1)
+
+#define PINMUX_GPIO152__FUNC_B_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6 (MTK_PIN_NO(152) | 1)
+
+#define PINMUX_GPIO153__FUNC_B_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5 (MTK_PIN_NO(153) | 1)
+
+#define PINMUX_GPIO154__FUNC_B_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4 (MTK_PIN_NO(154) | 1)
+
+#define PINMUX_GPIO155__FUNC_B_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_O_MSDC0_RSTB (MTK_PIN_NO(155) | 1)
+
+#define PINMUX_GPIO156__FUNC_B_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_B1_MSDC0_CMD (MTK_PIN_NO(156) | 1)
+
+#define PINMUX_GPIO157__FUNC_B_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_B1_MSDC0_CLK (MTK_PIN_NO(157) | 1)
+
+#define PINMUX_GPIO158__FUNC_B_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3 (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_B_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2 (MTK_PIN_NO(159) | 1)
+
+#define PINMUX_GPIO160__FUNC_B_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1 (MTK_PIN_NO(160) | 1)
+
+#define PINMUX_GPIO161__FUNC_B_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0 (MTK_PIN_NO(161) | 1)
+
+#define PINMUX_GPIO162__FUNC_B_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_B0_MSDC0_DSL (MTK_PIN_NO(162) | 1)
+
+#define PINMUX_GPIO163__FUNC_B_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_B1_MSDC1_CMD (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_O_SPDIF_OUT (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_I1_MD32_0_JTAG_TMS (MTK_PIN_NO(163) | 3)
+#define PINMUX_GPIO163__FUNC_I1_ADSP_JTAG0_TMS (MTK_PIN_NO(163) | 4)
+#define PINMUX_GPIO163__FUNC_I1_SCP_JTAG0_TMS (MTK_PIN_NO(163) | 5)
+#define PINMUX_GPIO163__FUNC_I1_CCU0_JTAG_TMS (MTK_PIN_NO(163) | 6)
+#define PINMUX_GPIO163__FUNC_I0_IPU_JTAG_TMS (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_B_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_B1_MSDC1_CLK (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_I1_MD32_0_JTAG_TCK (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_I0_ADSP_JTAG0_TCK (MTK_PIN_NO(164) | 4)
+#define PINMUX_GPIO164__FUNC_I1_SCP_JTAG0_TCK (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_I1_CCU0_JTAG_TCK (MTK_PIN_NO(164) | 6)
+#define PINMUX_GPIO164__FUNC_I0_IPU_JTAG_TCK (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_B_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_I1_MD32_0_JTAG_TDI (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_I1_ADSP_JTAG0_TDI (MTK_PIN_NO(165) | 4)
+#define PINMUX_GPIO165__FUNC_I1_SCP_JTAG0_TDI (MTK_PIN_NO(165) | 5)
+#define PINMUX_GPIO165__FUNC_I1_CCU0_JTAG_TDI (MTK_PIN_NO(165) | 6)
+#define PINMUX_GPIO165__FUNC_I0_IPU_JTAG_TDI (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_B_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_O_MD32_0_JTAG_TDO (MTK_PIN_NO(166) | 3)
+#define PINMUX_GPIO166__FUNC_O_ADSP_JTAG0_TDO (MTK_PIN_NO(166) | 4)
+#define PINMUX_GPIO166__FUNC_O_SCP_JTAG0_TDO (MTK_PIN_NO(166) | 5)
+#define PINMUX_GPIO166__FUNC_O_CCU0_JTAG_TDO (MTK_PIN_NO(166) | 6)
+#define PINMUX_GPIO166__FUNC_O_IPU_JTAG_TDO (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_B_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_O_PWM_0 (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_I1_MD32_0_JTAG_TRST (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_I1_ADSP_JTAG0_TRSTN (MTK_PIN_NO(167) | 4)
+#define PINMUX_GPIO167__FUNC_I0_SCP_JTAG0_TRSTN (MTK_PIN_NO(167) | 5)
+#define PINMUX_GPIO167__FUNC_I1_CCU0_JTAG_TRST (MTK_PIN_NO(167) | 6)
+#define PINMUX_GPIO167__FUNC_I0_IPU_JTAG_TRST (MTK_PIN_NO(167) | 7)
+
+#define PINMUX_GPIO168__FUNC_B_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_O_PWM_1 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_O_CLKM0 (MTK_PIN_NO(168) | 3)
+
+#define PINMUX_GPIO169__FUNC_B_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_B1_MSDC2_CMD (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_O_LVTS_FOUT (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_I1_MD32_1_JTAG_TMS (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_I0_UDI_TMS (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_I0_VPU_UDI_TMS (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_I1_SSPM_JTAG_TMS (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_B_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_B1_MSDC2_CLK (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_O_LVTS_SDO (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_I1_MD32_1_JTAG_TCK (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_I0_UDI_TCK (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_I0_VPU_UDI_TCK (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(170) | 6)
+#define PINMUX_GPIO170__FUNC_I1_SSPM_JTAG_TCK (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_B_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0 (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_I0_LVTS_26M (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_I1_MD32_1_JTAG_TDI (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_I0_UDI_TDI (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_I0_VPU_UDI_TDI (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(171) | 6)
+#define PINMUX_GPIO171__FUNC_I1_SSPM_JTAG_TDI (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_B_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1 (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_I0_LVTS_SCF (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_O_MD32_1_JTAG_TDO (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_O_UDI_TDO (MTK_PIN_NO(172) | 4)
+#define PINMUX_GPIO172__FUNC_O_VPU_UDI_TDO (MTK_PIN_NO(172) | 5)
+#define PINMUX_GPIO172__FUNC_I0_TDMIN_DI (MTK_PIN_NO(172) | 6)
+#define PINMUX_GPIO172__FUNC_O_SSPM_JTAG_TDO (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_B_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2 (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_I0_LVTS_SCK (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_I1_MD32_1_JTAG_TRST (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_I0_UDI_NTRST (MTK_PIN_NO(173) | 4)
+#define PINMUX_GPIO173__FUNC_I0_VPU_UDI_NTRST (MTK_PIN_NO(173) | 5)
+#define PINMUX_GPIO173__FUNC_I0_SSPM_JTAG_TRSTN (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_B_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3 (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_I0_LVTS_SDI (MTK_PIN_NO(174) | 2)
+
+#define PINMUX_GPIO175__FUNC_B_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_B0_SPMI_M_SCL (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_B_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_B0_SPMI_M_SDA (MTK_PIN_NO(176) | 1)
+
+#endif /* __MEDIATEK_MT8188-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h
index 7e16e58fe1f7..f5934abcd1bd 100644
--- a/include/dt-bindings/pinctrl/mt65xx.h
+++ b/include/dt-bindings/pinctrl/mt65xx.h
@@ -16,6 +16,15 @@
#define MTK_PUPD_SET_R1R0_10 102
#define MTK_PUPD_SET_R1R0_11 103
+#define MTK_PULL_SET_RSEL_000 200
+#define MTK_PULL_SET_RSEL_001 201
+#define MTK_PULL_SET_RSEL_010 202
+#define MTK_PULL_SET_RSEL_011 203
+#define MTK_PULL_SET_RSEL_100 204
+#define MTK_PULL_SET_RSEL_101 205
+#define MTK_PULL_SET_RSEL_110 206
+#define MTK_PULL_SET_RSEL_111 207
+
#define MTK_DRIVE_2mA 2
#define MTK_DRIVE_4mA 4
#define MTK_DRIVE_6mA 6
diff --git a/include/dt-bindings/pinctrl/mt6779-pinfunc.h b/include/dt-bindings/pinctrl/mt6779-pinfunc.h
new file mode 100644
index 000000000000..87fdc4310936
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6779-pinfunc.h
@@ -0,0 +1,1242 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Andy Teng <andy.teng@mediatek.com>
+ *
+ */
+
+#ifndef __MT6779_PINFUNC_H
+#define __MT6779_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_SPI6_MI (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S5_LRCK (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TDM_LRCK_2ND (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_PCM1_SYNC (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCL_6306 (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_PTA_RXD (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_SPI6_CSB (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S5_DO (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_TDM_DATA0_2ND (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_PCM1_DO0 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SDA_6306 (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_PTA_TXD (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_SPI6_MO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S5_BCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_TDM_BCK_2ND (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_PCM1_CLK (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 6)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_SPI6_CLK (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S5_MCK (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_TDM_MCK_2ND (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 6)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_SPI7_MI (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S0_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_TDM_DATA1_2ND (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_PCM1_DO1 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_DMIC1_CLK (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_SCL8 (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_SPI7_CSB (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S0_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_TDM_DATA2_2ND (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_PCM1_DO2 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_DMIC1_DAT (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_SDA8 (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_SPI7_MO (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S0_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_TDM_DATA3_2ND (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_PCM1_DI (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_DMIC_CLK (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_SCL9 (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI7_CLK (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S0_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_SRCLKENAI1 (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_DMIC_DAT (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_SDA9 (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_PWM_0 (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SRCLKENAI0 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_URXD1 (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_I2S0_MCK (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_IDDIG (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_PWM_3 (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_MD_INT0 (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_SRCLKENAI1 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_UTXD1 (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_I2S0_BCK (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_USB_DRVVBUS (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_MSDC1_CLK_A (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_TP_URXD1_AO (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I2S1_LRCK (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_UCTS0 (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_DMIC1_CLK (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_KPCOL2 (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_SCL8 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_MSDC1_CMD_A (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_TP_UTXD1_AO (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_I2S1_DO (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_URTS0 (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_DMIC1_DAT (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_KPROW2 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_SDA8 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_MSDC1_DAT3_A (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_TP_URXD2_AO (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2S1_MCK (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_UCTS1 (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_DMIC_CLK (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_ANT_SEL9 (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_SCL9 (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_MSDC1_DAT0_A (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_TP_UTXD2_AO (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2S1_BCK (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_URTS1 (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_DMIC_DAT (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_ANT_SEL10 (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_SDA9 (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_MSDC1_DAT2_A (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_PWM_3 (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_IDDIG (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_MD_INT0 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_PTA_RXD (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_ANT_SEL11 (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_MSDC1_DAT1_A (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_PTA_TXD (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_ANT_SEL12 (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI0 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_MFG_EJTAG_TRSTN (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_PWM_2 (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SPI0_A_MI (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SCP_SPI0_MI (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_MFG_EJTAG_TDO (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_DPI_HSYNC (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_MFG_DFD_JTAG_TDO (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_DFD_TDO (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_JTDO_SEL1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_SPI0_A_MO (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SCP_SPI0_MO (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_MFG_EJTAG_TDI (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_DPI_VSYNC (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_MFG_DFD_JTAG_TDI (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_DFD_TDI (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_JTDI_SEL1 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SPI0_A_CSB (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SCP_SPI0_CS (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_MFG_EJTAG_TMS (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_DPI_DE (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_MFG_DFD_JTAG_TMS (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_DFD_TMS (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_JTMS_SEL1 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SPI0_A_CLK (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SCP_SPI0_CK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_MFG_EJTAG_TCK (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_DPI_CK (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_MFG_DFD_JTAG_TCK (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_DFD_TCK_XI (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_JTCK_SEL1 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_PWM_0 (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_CMFLASH0 (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_CMVREF2 (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_CLKM0 (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_ANT_SEL9 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_A27 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_PWM_1 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_CMFLASH1 (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_CMVREF3 (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_CLKM1 (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_ANT_SEL10 (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_DBG_MON_A28 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_PWM_2 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_CMFLASH2 (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_CMVREF0 (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_CLKM2 (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_ANT_SEL11 (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_DBG_MON_A29 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_PWM_0 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_CMFLASH3 (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_CMVREF1 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_CLKM3 (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_ANT_SEL12 (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_DBG_MON_A30 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_SRCLKENAI0 (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_UCTS0 (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SCL8 (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_CMVREF4 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_I2S0_LRCK (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(25) | 6)
+#define PINMUX_GPIO25__FUNC_DBG_MON_A31 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_PWM_0 (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_URTS0 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SDA8 (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_CLKM0 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_I2S0_DI (MTK_PIN_NO(26) | 5)
+#define PINMUX_GPIO26__FUNC_AGPS_SYNC (MTK_PIN_NO(26) | 6)
+#define PINMUX_GPIO26__FUNC_DBG_MON_A32 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_AP_GOOD (MTK_PIN_NO(27) | 1)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_SCL5 (MTK_PIN_NO(28) | 1)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_SDA5 (MTK_PIN_NO(29) | 1)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_I2S1_MCK (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_I2S3_MCK (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_I2S2_MCK (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_DPI_D0 (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_SPI4_MI (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(30) | 6)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_I2S1_BCK (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_I2S3_BCK (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_I2S2_BCK (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_DPI_D1 (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_SPI4_CSB (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_CONN_MCU_TDO (MTK_PIN_NO(31) | 6)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I2S1_LRCK (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I2S3_LRCK (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I2S2_LRCK (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_DPI_D2 (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_SPI4_MO (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_CONN_MCU_TDI (MTK_PIN_NO(32) | 6)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_I2S2_DI (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_I2S0_DI (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_I2S5_DO (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_DPI_D3 (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_SPI4_CLK (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_CONN_MCU_TMS (MTK_PIN_NO(33) | 6)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I2S1_DO (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I2S3_DO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I2S2_DI2 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_DPI_D4 (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_AGPS_SYNC (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_CONN_MCU_TCK (MTK_PIN_NO(34) | 6)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_TDM_LRCK (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_I2S1_LRCK (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_I2S5_LRCK (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_DPI_D5 (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_SPI5_A_MO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_IO_JTAG_TDI (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_PWM_2 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_TDM_BCK (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_I2S1_BCK (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_I2S5_BCK (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_DPI_D6 (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SPI5_A_CSB (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_SRCLKENAI1 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_TDM_MCK (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_I2S1_MCK (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_I2S5_MCK (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_DPI_D7 (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_SPI5_A_MI (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_IO_JTAG_TCK (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_SRCLKENAI0 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_TDM_DATA0 (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_I2S2_DI (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_I2S5_DO (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_DPI_D8 (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_SPI5_A_CLK (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_IO_JTAG_TDO (MTK_PIN_NO(38) | 6)
+#define PINMUX_GPIO38__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_TDM_DATA1 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_I2S1_DO (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_I2S2_DI2 (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_DPI_D9 (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_IO_JTAG_TMS (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_IDDIG (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_TDM_DATA2 (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_SCL9 (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_PWM_3 (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_DPI_D10 (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_SRCLKENAI0 (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_DAP_MD32_SWD (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_USB_DRVVBUS (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_TDM_DATA3 (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_SDA9 (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_PWM_1 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_DPI_D11 (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_CLKM1 (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(41) | 6)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_DISP_PWM (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_DSI_TE (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_LCM_RST (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SCL6 (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_SCP_SCL0 (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_SCP_SCL1 (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_SCL_6306 (MTK_PIN_NO(45) | 4)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SDA6 (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_SCP_SDA0 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_SCP_SDA1 (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_SDA_6306 (MTK_PIN_NO(46) | 4)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_SPI1_A_MI (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_KPCOL2 (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_MD_URXD0 (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_CONN_UART0_RXD (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_SSPM_URXD_AO (MTK_PIN_NO(47) | 6)
+#define PINMUX_GPIO47__FUNC_DBG_MON_B32 (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SPI1_A_CSB (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_KPROW2 (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_MD_UTXD0 (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_CONN_UART0_TXD (MTK_PIN_NO(48) | 5)
+#define PINMUX_GPIO48__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(48) | 6)
+#define PINMUX_GPIO48__FUNC_DBG_MON_B31 (MTK_PIN_NO(48) | 7)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SPI1_A_MO (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_UCTS0 (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_MD_URXD1 (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_PWM_1 (MTK_PIN_NO(49) | 5)
+#define PINMUX_GPIO49__FUNC_TP_URXD2_AO (MTK_PIN_NO(49) | 6)
+#define PINMUX_GPIO49__FUNC_DBG_MON_B30 (MTK_PIN_NO(49) | 7)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SPI1_A_CLK (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(50) | 2)
+#define PINMUX_GPIO50__FUNC_URTS0 (MTK_PIN_NO(50) | 3)
+#define PINMUX_GPIO50__FUNC_MD_UTXD1 (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_WIFI_TXD (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_TP_UTXD2_AO (MTK_PIN_NO(50) | 6)
+#define PINMUX_GPIO50__FUNC_DBG_MON_B29 (MTK_PIN_NO(50) | 7)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SCL0 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_SDA0 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_URXD0 (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_UTXD0 (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_MD_URXD0 (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_MD_URXD1 (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_SSPM_URXD_AO (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_CONN_UART0_RXD (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_UTXD0 (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_URXD0 (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_MD_UTXD0 (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_MD_UTXD1 (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_WIFI_TXD (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_CONN_UART0_TXD (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_SCL3 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_SCP_SCL0 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_SCP_SCL1 (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_SCL_6306 (MTK_PIN_NO(55) | 4)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_SDA3 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_SCP_SDA0 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_SCP_SDA1 (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_SDA_6306 (MTK_PIN_NO(56) | 4)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_KPROW1 (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_PWM_1 (MTK_PIN_NO(57) | 2)
+#define PINMUX_GPIO57__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(57) | 3)
+#define PINMUX_GPIO57__FUNC_CLKM1 (MTK_PIN_NO(57) | 4)
+#define PINMUX_GPIO57__FUNC_IDDIG (MTK_PIN_NO(57) | 5)
+#define PINMUX_GPIO57__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(57) | 6)
+#define PINMUX_GPIO57__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(57) | 7)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_KPROW0 (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_DBG_MON_B28 (MTK_PIN_NO(58) | 7)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_KPCOL0 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_DBG_MON_B27 (MTK_PIN_NO(59) | 7)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_KPCOL1 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_PWM_2 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_UCTS1 (MTK_PIN_NO(60) | 3)
+#define PINMUX_GPIO60__FUNC_CLKM2 (MTK_PIN_NO(60) | 4)
+#define PINMUX_GPIO60__FUNC_USB_DRVVBUS (MTK_PIN_NO(60) | 5)
+#define PINMUX_GPIO60__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(60) | 7)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_SCL1 (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_SCP_SCL0 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_SCP_SCL1 (MTK_PIN_NO(61) | 3)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_SDA1 (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_SCP_SDA0 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_SCP_SDA1 (MTK_PIN_NO(62) | 3)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_SPI2_MI (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_SCP_SPI2_MI (MTK_PIN_NO(63) | 2)
+#define PINMUX_GPIO63__FUNC_KPCOL2 (MTK_PIN_NO(63) | 3)
+#define PINMUX_GPIO63__FUNC_MRG_DI (MTK_PIN_NO(63) | 4)
+#define PINMUX_GPIO63__FUNC_MD_URXD0 (MTK_PIN_NO(63) | 5)
+#define PINMUX_GPIO63__FUNC_CONN_UART0_RXD (MTK_PIN_NO(63) | 6)
+#define PINMUX_GPIO63__FUNC_DBG_MON_B26 (MTK_PIN_NO(63) | 7)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_SPI2_CSB (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_SCP_SPI2_CS (MTK_PIN_NO(64) | 2)
+#define PINMUX_GPIO64__FUNC_KPROW2 (MTK_PIN_NO(64) | 3)
+#define PINMUX_GPIO64__FUNC_MRG_SYNC (MTK_PIN_NO(64) | 4)
+#define PINMUX_GPIO64__FUNC_MD_UTXD0 (MTK_PIN_NO(64) | 5)
+#define PINMUX_GPIO64__FUNC_CONN_UART0_TXD (MTK_PIN_NO(64) | 6)
+#define PINMUX_GPIO64__FUNC_DBG_MON_B25 (MTK_PIN_NO(64) | 7)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_SPI2_MO (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_SCP_SPI2_MO (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_SCP_SDA1 (MTK_PIN_NO(65) | 3)
+#define PINMUX_GPIO65__FUNC_MRG_DO (MTK_PIN_NO(65) | 4)
+#define PINMUX_GPIO65__FUNC_MD_URXD1 (MTK_PIN_NO(65) | 5)
+#define PINMUX_GPIO65__FUNC_PWM_3 (MTK_PIN_NO(65) | 6)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_SPI2_CLK (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_SCP_SPI2_CK (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_SCP_SCL1 (MTK_PIN_NO(66) | 3)
+#define PINMUX_GPIO66__FUNC_MRG_CLK (MTK_PIN_NO(66) | 4)
+#define PINMUX_GPIO66__FUNC_MD_UTXD1 (MTK_PIN_NO(66) | 5)
+#define PINMUX_GPIO66__FUNC_WIFI_TXD (MTK_PIN_NO(66) | 6)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_I2S3_LRCK (MTK_PIN_NO(67) | 1)
+#define PINMUX_GPIO67__FUNC_I2S1_LRCK (MTK_PIN_NO(67) | 2)
+#define PINMUX_GPIO67__FUNC_URXD1 (MTK_PIN_NO(67) | 3)
+#define PINMUX_GPIO67__FUNC_PCM0_SYNC (MTK_PIN_NO(67) | 4)
+#define PINMUX_GPIO67__FUNC_I2S5_LRCK (MTK_PIN_NO(67) | 5)
+#define PINMUX_GPIO67__FUNC_ANT_SEL9 (MTK_PIN_NO(67) | 6)
+#define PINMUX_GPIO67__FUNC_DBG_MON_B10 (MTK_PIN_NO(67) | 7)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_I2S3_DO (MTK_PIN_NO(68) | 1)
+#define PINMUX_GPIO68__FUNC_I2S1_DO (MTK_PIN_NO(68) | 2)
+#define PINMUX_GPIO68__FUNC_UTXD1 (MTK_PIN_NO(68) | 3)
+#define PINMUX_GPIO68__FUNC_PCM0_DO (MTK_PIN_NO(68) | 4)
+#define PINMUX_GPIO68__FUNC_I2S5_DO (MTK_PIN_NO(68) | 5)
+#define PINMUX_GPIO68__FUNC_ANT_SEL10 (MTK_PIN_NO(68) | 6)
+#define PINMUX_GPIO68__FUNC_DBG_MON_B9 (MTK_PIN_NO(68) | 7)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_I2S3_MCK (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_I2S1_MCK (MTK_PIN_NO(69) | 2)
+#define PINMUX_GPIO69__FUNC_URTS1 (MTK_PIN_NO(69) | 3)
+#define PINMUX_GPIO69__FUNC_AGPS_SYNC (MTK_PIN_NO(69) | 4)
+#define PINMUX_GPIO69__FUNC_I2S5_MCK (MTK_PIN_NO(69) | 5)
+#define PINMUX_GPIO69__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(69) | 6)
+#define PINMUX_GPIO69__FUNC_DBG_MON_B8 (MTK_PIN_NO(69) | 7)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_I2S0_DI (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_I2S2_DI (MTK_PIN_NO(70) | 2)
+#define PINMUX_GPIO70__FUNC_KPCOL2 (MTK_PIN_NO(70) | 3)
+#define PINMUX_GPIO70__FUNC_PCM0_DI (MTK_PIN_NO(70) | 4)
+#define PINMUX_GPIO70__FUNC_I2S2_DI2 (MTK_PIN_NO(70) | 5)
+#define PINMUX_GPIO70__FUNC_ANT_SEL11 (MTK_PIN_NO(70) | 6)
+#define PINMUX_GPIO70__FUNC_DBG_MON_B7 (MTK_PIN_NO(70) | 7)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_I2S3_BCK (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_I2S1_BCK (MTK_PIN_NO(71) | 2)
+#define PINMUX_GPIO71__FUNC_KPROW2 (MTK_PIN_NO(71) | 3)
+#define PINMUX_GPIO71__FUNC_PCM0_CLK (MTK_PIN_NO(71) | 4)
+#define PINMUX_GPIO71__FUNC_I2S5_BCK (MTK_PIN_NO(71) | 5)
+#define PINMUX_GPIO71__FUNC_ANT_SEL12 (MTK_PIN_NO(71) | 6)
+#define PINMUX_GPIO71__FUNC_DBG_MON_B6 (MTK_PIN_NO(71) | 7)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS19_OLAT0 (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_CONN_BPI_BUS19_OLAT0 (MTK_PIN_NO(72) | 2)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS18_PA_VM1 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_CONN_MIPI5_SCLK (MTK_PIN_NO(73) | 2)
+#define PINMUX_GPIO73__FUNC_MIPI5_SCLK (MTK_PIN_NO(73) | 3)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS17_PA_VM0 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_CONN_MIPI5_SDATA (MTK_PIN_NO(74) | 2)
+#define PINMUX_GPIO74__FUNC_MIPI5_SDATA (MTK_PIN_NO(74) | 3)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS20_OLAT1 (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_CONN_BPI_BUS20_OLAT1 (MTK_PIN_NO(75) | 2)
+#define PINMUX_GPIO75__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(75) | 3)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_BUS7 (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_DBG_MON_B24 (MTK_PIN_NO(78) | 7)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS6 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_DBG_MON_B23 (MTK_PIN_NO(79) | 7)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS8 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_DBG_MON_B22 (MTK_PIN_NO(80) | 7)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS9 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_DBG_MON_B21 (MTK_PIN_NO(81) | 7)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS10 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_DBG_MON_B20 (MTK_PIN_NO(82) | 7)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS11 (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_DBG_MON_B19 (MTK_PIN_NO(83) | 7)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS12 (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_CONN_BPI_BUS12 (MTK_PIN_NO(84) | 2)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_BPI_BUS13 (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_CONN_BPI_BUS13 (MTK_PIN_NO(85) | 2)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_BPI_BUS14 (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_CONN_BPI_BUS14 (MTK_PIN_NO(86) | 2)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_BPI_BUS15 (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_CONN_BPI_BUS15 (MTK_PIN_NO(87) | 2)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_BPI_BUS16 (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_CONN_BPI_BUS16 (MTK_PIN_NO(88) | 2)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_BPI_BUS5 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_DBG_MON_B18 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_BPI_BUS4 (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_DBG_MON_B17 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_BPI_BUS3 (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_BPI_BUS2 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_DBG_MON_B16 (MTK_PIN_NO(92) | 7)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_BPI_BUS1 (MTK_PIN_NO(93) | 1)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_BPI_BUS0 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_DBG_MON_B15 (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_MIPI0_SDATA (MTK_PIN_NO(95) | 1)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_MIPI0_SCLK (MTK_PIN_NO(96) | 1)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_MIPI1_SDATA (MTK_PIN_NO(97) | 1)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_MIPI1_SCLK (MTK_PIN_NO(98) | 1)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_MIPI2_SCLK (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_DBG_MON_B14 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_MIPI2_SDATA (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_DBG_MON_B13 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_MIPI3_SCLK (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_DBG_MON_B12 (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_MIPI3_SDATA (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_DBG_MON_B11 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_MIPI4_SCLK (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_CONN_MIPI4_SCLK (MTK_PIN_NO(103) | 2)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_MIPI4_SDATA (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_CONN_MIPI4_SDATA (MTK_PIN_NO(104) | 2)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_BPI_BUS22_OLAT3 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_CONN_BPI_BUS22_OLAT3 (MTK_PIN_NO(105) | 2)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_BPI_BUS21_OLAT2 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_CONN_BPI_BUS21_OLAT2 (MTK_PIN_NO(106) | 2)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_BPI_BUS24_ANT1 (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_CONN_BPI_BUS24_ANT1 (MTK_PIN_NO(107) | 2)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_BPI_BUS25_ANT2 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_CONN_BPI_BUS25_ANT2 (MTK_PIN_NO(108) | 2)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_BPI_BUS23_ANT0 (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_CONN_BPI_BUS23_ANT0 (MTK_PIN_NO(109) | 2)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_SCL4 (MTK_PIN_NO(110) | 1)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_SDA4 (MTK_PIN_NO(111) | 1)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_SCL2 (MTK_PIN_NO(112) | 1)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_SDA2 (MTK_PIN_NO(113) | 1)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_CLKM0 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SPI3_MI (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_DBG_MON_B5 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_CLKM1 (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_SPI3_CSB (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_DBG_MON_B4 (MTK_PIN_NO(115) | 7)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_CMMCLK0 (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_DBG_MON_B3 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_CMMCLK1 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_DBG_MON_B2 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_CLKM2 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_SPI3_MO (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_DBG_MON_B1 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_CLKM3 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_SPI3_CLK (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_DBG_MON_B0 (MTK_PIN_NO(119) | 7)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_CMMCLK2 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_CLKM2 (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_ANT_SEL12 (MTK_PIN_NO(120) | 6)
+#define PINMUX_GPIO120__FUNC_TP_UCTS2_AO (MTK_PIN_NO(120) | 7)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_CMMCLK3 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_CLKM3 (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_ANT_SEL11 (MTK_PIN_NO(121) | 6)
+#define PINMUX_GPIO121__FUNC_TP_URTS2_AO (MTK_PIN_NO(121) | 7)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_CMVREF1 (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_PCM0_SYNC (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_SRCLKENAI1 (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_AGPS_SYNC (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_PWM_1 (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_ANT_SEL9 (MTK_PIN_NO(122) | 6)
+#define PINMUX_GPIO122__FUNC_TP_UCTS1_AO (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_PCM0_DI (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_VPU_UDI_NTRST (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(123) | 6)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_CMVREF2 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_PCM0_CLK (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_MD_INT0 (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_PWM_2 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_ANT_SEL10 (MTK_PIN_NO(124) | 6)
+#define PINMUX_GPIO124__FUNC_TP_URTS1_AO (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_CMVREF3 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_PCM0_DO (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_VPU_UDI_TMS (MTK_PIN_NO(125) | 4)
+#define PINMUX_GPIO125__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(125) | 5)
+#define PINMUX_GPIO125__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(125) | 6)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_CMVREF4 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_CMFLASH0 (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(126) | 6)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_CMVREF0 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_CMFLASH1 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(127) | 6)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(128) | 5)
+#define PINMUX_GPIO128__FUNC_LVTS_FOUT (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_DBG_MON_A3 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_CONN_DSP_JCK (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_LVTS_SDO (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_DBG_MON_A4 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_LVTS_26M (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A5 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_CONN_DSP_JDI (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_LVTS_SCK (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A0 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_CONN_DSP_JMS (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_LVTS_SDI (MTK_PIN_NO(132) | 6)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A1 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_CONN_DSP_JDO (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_LVTS_SCF (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A2 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_MSDC1_CLK (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_PCM1_CLK (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_SPI5_B_MI (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_UDI_TCK (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_CONN_DSP_JCK (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(134) | 6)
+#define PINMUX_GPIO134__FUNC_JTCK_SEL3 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_MSDC1_CMD (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_PCM1_SYNC (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_SPI5_B_CSB (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_UDI_TMS (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_CONN_DSP_JMS (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(135) | 6)
+#define PINMUX_GPIO135__FUNC_JTMS_SEL3 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_MSDC1_DAT3 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_PCM1_DI (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_SPI5_B_MO (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(136) | 6)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_MSDC1_DAT0 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_PCM1_DO0 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_SPI5_B_CLK (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_UDI_TDI (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_CONN_DSP_JDI (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_JTDI_SEL3 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_MSDC1_DAT2 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_PCM1_DO2 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_ANT_SEL11 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_UDI_NTRST (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(138) | 6)
+#define PINMUX_GPIO138__FUNC_JTRSTN_SEL3 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_MSDC1_DAT1 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_PCM1_DO1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_ANT_SEL12 (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_UDI_TDO (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_CONN_DSP_JDO (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_JTDO_SEL3 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_ADSP_URXD0 (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_SCL_6306 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_PTA_RXD (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_SSPM_URXD_AO (MTK_PIN_NO(140) | 6)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_ADSP_UTXD0 (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_SDA_6306 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_PTA_TXD (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(141) | 6)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(142) | 2)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MOSI2 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_DBG_MON_A9 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_AUD_NLE_MOSI1 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_AUD_CLK_MISO (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_I2S2_MCK (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_UDI_TCK (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(144) | 6)
+#define PINMUX_GPIO144__FUNC_DBG_MON_A10 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_AUD_NLE_MOSI0 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_I2S2_BCK (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_UDI_TMS (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_DBG_MON_A11 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_I2S2_DI2 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_UDI_TDO (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_DBG_MON_A14 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_ANT_SEL0 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_PWM_3 (MTK_PIN_NO(147) | 2)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_ANT_SEL1 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_SPI0_B_MI (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_SSPM_URXD_AO (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TP_UCTS2_AO (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_CLKM0 (MTK_PIN_NO(148) | 6)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_ANT_SEL2 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_SPI0_B_CSB (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_TP_URTS2_AO (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(149) | 6)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_ANT_SEL3 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_SPI0_B_MO (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_UCTS1 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_TP_UCTS1_AO (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_IDDIG (MTK_PIN_NO(150) | 6)
+#define PINMUX_GPIO150__FUNC_SCL9 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_ANT_SEL4 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_SPI0_B_CLK (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_URTS1 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_TP_URTS1_AO (MTK_PIN_NO(151) | 5)
+#define PINMUX_GPIO151__FUNC_USB_DRVVBUS (MTK_PIN_NO(151) | 6)
+#define PINMUX_GPIO151__FUNC_SDA9 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_ANT_SEL5 (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_SPI1_B_MI (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_CLKM3 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_TP_URXD1_AO (MTK_PIN_NO(152) | 5)
+#define PINMUX_GPIO152__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(152) | 6)
+#define PINMUX_GPIO152__FUNC_SCL8 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_ANT_SEL6 (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_SPI1_B_CSB (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_SRCLKENAI0 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_PWM_0 (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_TP_UTXD1_AO (MTK_PIN_NO(153) | 5)
+#define PINMUX_GPIO153__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(153) | 6)
+#define PINMUX_GPIO153__FUNC_SDA8 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_ANT_SEL7 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_SPI1_B_MO (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_SRCLKENAI1 (MTK_PIN_NO(154) | 3)
+#define PINMUX_GPIO154__FUNC_TP_URXD2_AO (MTK_PIN_NO(154) | 5)
+#define PINMUX_GPIO154__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(154) | 6)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_ANT_SEL8 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_SPI1_B_CLK (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_MD_INT0 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_TP_UTXD2_AO (MTK_PIN_NO(155) | 5)
+#define PINMUX_GPIO155__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_A15 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_CONN_TOP_CLK (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_AUXIF_CLK0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_DBG_MON_A16 (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_CONN_TOP_DATA (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_AUXIF_ST0 (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_DBG_MON_A17 (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_CONN_HRST_B (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_DBG_MON_A18 (MTK_PIN_NO(158) | 7)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_CONN_WB_PTA (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_DBG_MON_A19 (MTK_PIN_NO(159) | 7)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_CONN_BT_CLK (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_AUXIF_CLK1 (MTK_PIN_NO(160) | 2)
+#define PINMUX_GPIO160__FUNC_DBG_MON_A20 (MTK_PIN_NO(160) | 7)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_CONN_BT_DATA (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_AUXIF_ST1 (MTK_PIN_NO(161) | 2)
+#define PINMUX_GPIO161__FUNC_DBG_MON_A21 (MTK_PIN_NO(161) | 7)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_DBG_MON_A22 (MTK_PIN_NO(162) | 7)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_DBG_MON_A23 (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_DBG_MON_A24 (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_CONN_WF_CTRL3 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_DBG_MON_A25 (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_CONN_WF_CTRL4 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_DBG_MON_A26 (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_MSDC0_CMD (MTK_PIN_NO(167) | 1)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_MSDC0_DAT0 (MTK_PIN_NO(168) | 1)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_MSDC0_DAT2 (MTK_PIN_NO(169) | 1)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_MSDC0_DAT4 (MTK_PIN_NO(170) | 1)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_MSDC0_DAT6 (MTK_PIN_NO(171) | 1)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_MSDC0_DAT1 (MTK_PIN_NO(172) | 1)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_MSDC0_DAT5 (MTK_PIN_NO(173) | 1)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_MSDC0_DAT7 (MTK_PIN_NO(174) | 1)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_MSDC0_DSL (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_ANT_SEL9 (MTK_PIN_NO(175) | 2)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_MSDC0_CLK (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_ANT_SEL10 (MTK_PIN_NO(176) | 2)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_MSDC0_DAT3 (MTK_PIN_NO(177) | 1)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_MSDC0_RSTB (MTK_PIN_NO(178) | 1)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(179) | 1)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(180) | 1)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_SRCLKENA0 (MTK_PIN_NO(181) | 1)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_SRCLKENA1 (MTK_PIN_NO(182) | 1)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_WATCHDOG (MTK_PIN_NO(183) | 1)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(184) | 1)
+#define PINMUX_GPIO184__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(184) | 2)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(186) | 1)
+#define PINMUX_GPIO186__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(186) | 2)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_RTC32K_CK (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(189) | 1)
+#define PINMUX_GPIO189__FUNC_I2S1_MCK (MTK_PIN_NO(189) | 3)
+#define PINMUX_GPIO189__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(189) | 6)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(190) | 1)
+#define PINMUX_GPIO190__FUNC_I2S1_BCK (MTK_PIN_NO(190) | 3)
+#define PINMUX_GPIO190__FUNC_DBG_MON_A6 (MTK_PIN_NO(190) | 7)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(191) | 1)
+#define PINMUX_GPIO191__FUNC_I2S1_LRCK (MTK_PIN_NO(191) | 3)
+#define PINMUX_GPIO191__FUNC_DBG_MON_A7 (MTK_PIN_NO(191) | 7)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_I2S1_DO (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(192) | 6)
+#define PINMUX_GPIO192__FUNC_DBG_MON_A8 (MTK_PIN_NO(192) | 7)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(193) | 1)
+#define PINMUX_GPIO193__FUNC_VOW_DAT_MISO (MTK_PIN_NO(193) | 2)
+#define PINMUX_GPIO193__FUNC_I2S2_LRCK (MTK_PIN_NO(193) | 3)
+#define PINMUX_GPIO193__FUNC_UDI_TDI (MTK_PIN_NO(193) | 5)
+#define PINMUX_GPIO193__FUNC_DBG_MON_A12 (MTK_PIN_NO(193) | 7)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(194) | 1)
+#define PINMUX_GPIO194__FUNC_VOW_CLK_MISO (MTK_PIN_NO(194) | 2)
+#define PINMUX_GPIO194__FUNC_I2S2_DI (MTK_PIN_NO(194) | 3)
+#define PINMUX_GPIO194__FUNC_UDI_NTRST (MTK_PIN_NO(194) | 5)
+#define PINMUX_GPIO194__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(194) | 6)
+#define PINMUX_GPIO194__FUNC_DBG_MON_A13 (MTK_PIN_NO(194) | 7)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(195) | 3)
+#define PINMUX_GPIO195__FUNC_VPU_UDI_TCK (MTK_PIN_NO(195) | 4)
+#define PINMUX_GPIO195__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(195) | 5)
+#define PINMUX_GPIO195__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(195) | 6)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_CMMCLK4 (MTK_PIN_NO(196) | 1)
+#define PINMUX_GPIO196__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(196) | 3)
+#define PINMUX_GPIO196__FUNC_VPU_UDI_TDI (MTK_PIN_NO(196) | 4)
+#define PINMUX_GPIO196__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(196) | 5)
+#define PINMUX_GPIO196__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(196) | 6)
+
+#define PINMUX_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define PINMUX_GPIO197__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(197) | 3)
+#define PINMUX_GPIO197__FUNC_VPU_UDI_TDO (MTK_PIN_NO(197) | 4)
+#define PINMUX_GPIO197__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(197) | 5)
+#define PINMUX_GPIO197__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(197) | 6)
+
+#define PINMUX_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define PINMUX_GPIO198__FUNC_SCL7 (MTK_PIN_NO(198) | 1)
+
+#define PINMUX_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define PINMUX_GPIO199__FUNC_SDA7 (MTK_PIN_NO(199) | 1)
+
+#define PINMUX_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define PINMUX_GPIO200__FUNC_URXD1 (MTK_PIN_NO(200) | 1)
+#define PINMUX_GPIO200__FUNC_ADSP_URXD0 (MTK_PIN_NO(200) | 2)
+#define PINMUX_GPIO200__FUNC_TP_URXD1_AO (MTK_PIN_NO(200) | 3)
+#define PINMUX_GPIO200__FUNC_SSPM_URXD_AO (MTK_PIN_NO(200) | 4)
+#define PINMUX_GPIO200__FUNC_TP_URXD2_AO (MTK_PIN_NO(200) | 5)
+#define PINMUX_GPIO200__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(200) | 6)
+
+#define PINMUX_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define PINMUX_GPIO201__FUNC_UTXD1 (MTK_PIN_NO(201) | 1)
+#define PINMUX_GPIO201__FUNC_ADSP_UTXD0 (MTK_PIN_NO(201) | 2)
+#define PINMUX_GPIO201__FUNC_TP_UTXD1_AO (MTK_PIN_NO(201) | 3)
+#define PINMUX_GPIO201__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(201) | 4)
+#define PINMUX_GPIO201__FUNC_TP_UTXD2_AO (MTK_PIN_NO(201) | 5)
+#define PINMUX_GPIO201__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(201) | 6)
+
+#define PINMUX_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define PINMUX_GPIO202__FUNC_PWM_3 (MTK_PIN_NO(202) | 1)
+#define PINMUX_GPIO202__FUNC_CLKM3 (MTK_PIN_NO(202) | 2)
+
+#define PINMUX_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+
+#define PINMUX_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+
+#define PINMUX_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+
+#define PINMUX_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+
+#define PINMUX_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+
+#define PINMUX_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+
+#define PINMUX_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+
+#endif /* __MT6779-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt6795-pinfunc.h b/include/dt-bindings/pinctrl/mt6795-pinfunc.h
new file mode 100644
index 000000000000..bd1c5a9fad06
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6795-pinfunc.h
@@ -0,0 +1,908 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __DTS_MT8173_PINFUNC_H
+#define __DTS_MT8173_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_IRDA_PDN (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S1_WS (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TDD_TMS (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_UTXD0 (MTK_PIN_NO(0) | 5)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_IRDA_RXD (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S1_BCK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_SDA4 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_TDD_TCK (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_URXD0 (MTK_PIN_NO(1) | 5)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_IRDA_TXD (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S1_MCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_SCL4 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_TDD_TDI (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_UTXD3 (MTK_PIN_NO(2) | 5)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_DSI1_TE (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S1_DO_1 (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_SDA3 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_TDD_TDO (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_URXD3 (MTK_PIN_NO(3) | 5)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_DISP_PWM1 (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S1_DO_2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SCL3 (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_TDD_TRSTN (MTK_PIN_NO(4) | 4)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_PCM1_CLK (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S2_WS (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SPI_CK_3 (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(5) | 5)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_PCM1_SYNC (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S2_BCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_SPI_MI_3 (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(6) | 5)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_PCM1_DI (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S2_DI_1 (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_SPI_MO_3 (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(7) | 5)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_PCM1_DO (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI_2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SPI_CS_3 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(8) | 5)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_USB_DRVVBUS (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_I2S2_MCK (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(9) | 5)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_I2S0_WS (MTK_PIN_NO(10) | 2)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_I2S0_BCK (MTK_PIN_NO(11) | 2)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_I2S0_MCK (MTK_PIN_NO(12) | 2)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_I2S0_DO (MTK_PIN_NO(13) | 2)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_I2S0_DI (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_DISP_PWM1 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_PWM4 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_IRDA_RXD (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I2S1_BCK (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_DSI1_TE (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_PWM5 (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_IRDA_TXD (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_I2S1_MCK (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_IDDIG (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_FLASH (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_PWM5 (MTK_PIN_NO(16) | 4)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SIM1_SCLK (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SIM2_SCLK (MTK_PIN_NO(17) | 2)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_SIM1_SRST (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SIM2_SRST (MTK_PIN_NO(18) | 2)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SIM1_SDAT (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SIM2_SDAT (MTK_PIN_NO(19) | 2)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SIM2_SCLK (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SIM1_SCLK (MTK_PIN_NO(20) | 2)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_SIM2_SRST (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_SIM1_SRST (MTK_PIN_NO(21) | 2)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_SIM2_SDAT (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_SIM1_SDAT (MTK_PIN_NO(22) | 2)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_MSDC3_DAT0 (MTK_PIN_NO(23) | 1)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_MSDC3_DAT1 (MTK_PIN_NO(24) | 1)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_MSDC3_DAT2 (MTK_PIN_NO(25) | 1)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_MSDC3_DAT3 (MTK_PIN_NO(26) | 1)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_MSDC3_CLK (MTK_PIN_NO(27) | 1)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_MSDC3_CMD (MTK_PIN_NO(28) | 1)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_PTA_RXD (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_UCTS2 (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_PTA_TXD (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_URTS2 (MTK_PIN_NO(30) | 2)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_URXD2 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_UTXD2 (MTK_PIN_NO(31) | 2)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_UTXD2 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_URXD2 (MTK_PIN_NO(32) | 2)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_MRG_CLK (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_PCM0_CLK (MTK_PIN_NO(33) | 2)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_MRG_DI (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_PCM0_DI (MTK_PIN_NO(34) | 2)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_MRG_DO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_PCM0_DO (MTK_PIN_NO(35) | 2)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_MRG_SYNC (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_PCM0_SYNC (MTK_PIN_NO(36) | 2)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_GPS_SYNC (MTK_PIN_NO(37) | 1)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_DAIRSTB (MTK_PIN_NO(38) | 1)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_CM2MCLK (MTK_PIN_NO(39) | 1)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_CM3MCLK (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_IRDA_PDN (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_PWM6 (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_I2S1_WS (MTK_PIN_NO(40) | 4)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_CMPCLK (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_CMCSK (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_FLASH (MTK_PIN_NO(41) | 3)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_CMMCLK (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_SDA2 (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_SCL2 (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SDA0 (MTK_PIN_NO(45) | 1)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SCL0 (MTK_PIN_NO(46) | 1)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_BPI_BUS0 (MTK_PIN_NO(47) | 1)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_BPI_BUS1 (MTK_PIN_NO(48) | 1)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_BPI_BUS2 (MTK_PIN_NO(49) | 1)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_BPI_BUS3 (MTK_PIN_NO(50) | 1)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_BPI_BUS4 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_BPI_BUS5 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_BPI_BUS6 (MTK_PIN_NO(53) | 1)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_BPI_BUS7 (MTK_PIN_NO(54) | 1)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_BPI_BUS11 (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_BPI_BUS12 (MTK_PIN_NO(59) | 1)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_BPI_BUS13 (MTK_PIN_NO(60) | 1)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_BPI_BUS14 (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_RFIC1_BSI_CK (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_RFIC1_BSI_D0 (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_RFIC1_BSI_D1 (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_RFIC1_BSI_D2 (MTK_PIN_NO(65) | 1)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_RFIC1_BSI_CS (MTK_PIN_NO(66) | 1)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_TD_TXBPI (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_RFIC0_BSI_CS (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_MISC_BSI_DO (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_MISC_BSI_CK (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_MISC_BSI_CS0B (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_MIPI1_SCLK (MTK_PIN_NO(75) | 2)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_MISC_BSI_CS1B (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_MISC_BSI_DI (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_MIPI1_SDATA (MTK_PIN_NO(77) | 2)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_LTE_TXBPI (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS15 (MTK_PIN_NO(79) | 1)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS16 (MTK_PIN_NO(80) | 1)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS17 (MTK_PIN_NO(81) | 1)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS18 (MTK_PIN_NO(82) | 1)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS19 (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS20 (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_BPI_BUS21 (MTK_PIN_NO(85) | 1)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_BPI_BUS22 (MTK_PIN_NO(86) | 1)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_BPI_BUS23 (MTK_PIN_NO(87) | 1)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_BPI_BUS24 (MTK_PIN_NO(88) | 1)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_BPI_BUS25 (MTK_PIN_NO(89) | 1)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_BPI_BUS26 (MTK_PIN_NO(90) | 1)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_BPI_BUS27 (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_PCM1_CLK (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_I2S0_BCK (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_NLD6 (MTK_PIN_NO(92) | 3)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_PCM1_SYNC (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_I2S0_WS (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_NLD7 (MTK_PIN_NO(93) | 3)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_PCM1_DI (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_I2S0_DI (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_NREB (MTK_PIN_NO(94) | 3)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_PCM1_DO (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_I2S0_DO (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_NRNB0 (MTK_PIN_NO(95) | 3)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_URXD1 (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_UTXD1 (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_NWEB (MTK_PIN_NO(96) | 3)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_UTXD1 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_URXD1 (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_NCEB0 (MTK_PIN_NO(97) | 3)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_URTS1 (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_UCTS1 (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_NALE (MTK_PIN_NO(98) | 3)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_UCTS1 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_URTS1 (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_NCLE (MTK_PIN_NO(99) | 3)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_MSDC2_DAT0 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_URXD1 (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_USB_DRVVBUS (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_SDA4 (MTK_PIN_NO(100) | 4)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_MSDC2_DAT1 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_UTXD1 (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SCL4 (MTK_PIN_NO(101) | 4)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_MSDC2_DAT2 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_URTS1 (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_UTXD0 (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_PWM0 (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_SPI_CK_1 (MTK_PIN_NO(102) | 6)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_MSDC2_DAT3 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_UCTS1 (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_URXD0 (MTK_PIN_NO(103) | 3)
+#define PINMUX_GPIO103__FUNC_PWM1 (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_SPI_MI_1 (MTK_PIN_NO(103) | 6)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_MSDC2_CLK (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_NLD4 (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_UTXD3 (MTK_PIN_NO(104) | 3)
+#define PINMUX_GPIO104__FUNC_SDA3 (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_PWM2 (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_SPI_MO_1 (MTK_PIN_NO(104) | 6)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_MSDC2_CMD (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_NLD5 (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_URXD3 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_SCL3 (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_PWM3 (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_SPI_CS_1 (MTK_PIN_NO(105) | 6)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_LCM_RST (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DSI_TE (MTK_PIN_NO(107) | 1)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_JTMS (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_MFG_JTAG_TMS (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_TDD_TMS (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_DFD_TMS (MTK_PIN_NO(108) | 6)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_JTCK (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_MFG_JTAG_TCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_TDD_TCK (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_DFD_TCK (MTK_PIN_NO(109) | 6)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_JTDI (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_MFG_JTAG_TDI (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_TDD_TDI (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_DFD_TDI (MTK_PIN_NO(110) | 6)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_JTDO (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_MFG_JTAG_TDO (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_TDD_TDO (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_DFD_TDO (MTK_PIN_NO(111) | 6)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_JTRST_B (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_MFG_JTAG_TRSTN (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_TDD_TRSTN (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_DFD_NTRST (MTK_PIN_NO(112) | 6)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_URXD0 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_UTXD0 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_MD_URXD (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_LTE_URXD (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_TDD_TXD (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_I2S2_WS (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_UTXD0 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_URXD0 (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_MD_UTXD (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_LTE_UTXD (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_TDD_TXD (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_I2S2_BCK (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_URTS0 (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_UCTS0 (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_MD_URXD (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_LTE_URXD (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_TDD_TXD (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_I2S2_MCK (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_UCTS0 (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_URTS0 (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_MD_UTXD (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_LTE_UTXD (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_TDD_TXD (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_I2S2_DI_1 (MTK_PIN_NO(116) | 6)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_URXD3 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_UTXD3 (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_MD_URXD (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_LTE_URXD (MTK_PIN_NO(117) | 4)
+#define PINMUX_GPIO117__FUNC_TDD_TXD (MTK_PIN_NO(117) | 5)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_UTXD3 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_URXD3 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_MD_UTXD (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_LTE_UTXD (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_TDD_TXD (MTK_PIN_NO(118) | 5)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_KROW0 (MTK_PIN_NO(119) | 1)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_KROW1 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_PWM6 (MTK_PIN_NO(120) | 3)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_KROW2 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_IRDA_PDN (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_I2S1_DO_1 (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_USB_DRVVBUS (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_SPI_CK_2 (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_PWM4 (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_KCOL0 (MTK_PIN_NO(122) | 1)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_KCOL1 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_IRDA_RXD (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_I2S2_DI_2 (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_PWM5 (MTK_PIN_NO(123) | 4)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_KCOL2 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_IRDA_TXD (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_I2S1_DO_2 (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_USB_DRVVBUS (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_SPI_MI_2 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_PWM3 (MTK_PIN_NO(124) | 6)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_SDA1 (MTK_PIN_NO(125) | 1)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_SCL1 (MTK_PIN_NO(126) | 1)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MD_EINT1 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_DISP_PWM1 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_SPI_MO_2 (MTK_PIN_NO(127) | 3)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MD_EINT2 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_DSI1_TE (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_SPI_CS_2 (MTK_PIN_NO(128) | 3)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_I2S3_WS (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_I2S2_WS (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_PWM0 (MTK_PIN_NO(129) | 3)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_I2S3_BCK (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_I2S2_BCK (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_PWM1 (MTK_PIN_NO(130) | 3)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_I2S2_MCK (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_PWM2 (MTK_PIN_NO(131) | 3)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_I2S3_DO_1 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_I2S2_DI_1 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_PWM3 (MTK_PIN_NO(132) | 3)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_I2S3_DO_2 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_I2S2_DI_2 (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_PWM4 (MTK_PIN_NO(133) | 3)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_I2S3_DO_3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_DISP_PWM1 (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_I2S1_DO_1 (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_PWM5 (MTK_PIN_NO(134) | 4)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_I2S3_DO_4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_DSI1_TE (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_I2S1_DO_2 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_PWM6 (MTK_PIN_NO(135) | 4)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_SDA3 (MTK_PIN_NO(136) | 1)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_SCL3 (MTK_PIN_NO(137) | 1)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_DPI_CK (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_NLD6 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_UTXD0 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_USB_DRVVBUS (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_IRDA_PDN (MTK_PIN_NO(138) | 5)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_DPI_DE (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_NLD7 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_URXD0 (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_MD_UTXD (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_IRDA_RXD (MTK_PIN_NO(139) | 5)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_DPI_D0 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_NREB (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_UCTS0 (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_MD_URXD (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_IRDA_TXD (MTK_PIN_NO(140) | 5)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_DPI_D1 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_NRNB0 (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_URTS0 (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_LTE_UTXD (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_I2S2_WS (MTK_PIN_NO(141) | 5)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_DPI_D2 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_NWEB (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_UTXD1 (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_LTE_URXD (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_I2S2_BCK (MTK_PIN_NO(142) | 5)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_DPI_D3 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_NCEB0 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_URXD1 (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_TDD_TXD (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_I2S2_MCK (MTK_PIN_NO(143) | 5)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_DPI_D4 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_NALE (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_UCTS1 (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_TDD_TMS (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_I2S2_DI_1 (MTK_PIN_NO(144) | 5)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_DPI_D5 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_NCLE (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_URTS1 (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_TDD_TCK (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_I2S2_DI_2 (MTK_PIN_NO(145) | 5)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_DPI_D6 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_NLD8 (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_UTXD2 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_TDD_TDI (MTK_PIN_NO(146) | 4)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_DPI_D7 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_NLD9 (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_URXD2 (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_TDD_TDO (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_I2S1_WS (MTK_PIN_NO(147) | 5)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_DPI_D8 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_NLD10 (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_UCTS2 (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TDD_TRSTN (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_I2S1_BCK (MTK_PIN_NO(148) | 5)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_DPI_D9 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_NLD11 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_URTS2 (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_I2S1_MCK (MTK_PIN_NO(149) | 5)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_DPI_D10 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_NLD12 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_UTXD3 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_I2S1_DO_1 (MTK_PIN_NO(150) | 5)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_DPI_D11 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_NLD13 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_URXD3 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(151) | 4)
+#define PINMUX_GPIO151__FUNC_I2S1_DO_2 (MTK_PIN_NO(151) | 5)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_DPI_HSYNC (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_NLD14 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_UCTS3 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(152) | 4)
+#define PINMUX_GPIO152__FUNC_DSI1_TE (MTK_PIN_NO(152) | 5)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_DPI_VSYNC (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_NLD15 (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_URTS3 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_DISP_PWM1 (MTK_PIN_NO(153) | 5)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_MSDC0_DAT0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_NLD8 (MTK_PIN_NO(154) | 2)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_MSDC0_DAT1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_NLD9 (MTK_PIN_NO(155) | 2)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_MSDC0_DAT2 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_NLD10 (MTK_PIN_NO(156) | 2)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_MSDC0_DAT3 (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_NLD11 (MTK_PIN_NO(157) | 2)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_MSDC0_DAT4 (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_NLD12 (MTK_PIN_NO(158) | 2)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_MSDC0_DAT5 (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_NLD13 (MTK_PIN_NO(159) | 2)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_MSDC0_DAT6 (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_NLD14 (MTK_PIN_NO(160) | 2)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_MSDC0_DAT7 (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_NLD15 (MTK_PIN_NO(161) | 2)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_MSDC0_CMD (MTK_PIN_NO(162) | 1)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_MSDC0_CLK (MTK_PIN_NO(163) | 1)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_MSDC0_DSL (MTK_PIN_NO(164) | 1)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_MSDC0_RSTB (MTK_PIN_NO(165) | 1)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_SPI_CK_0 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_PWM0 (MTK_PIN_NO(166) | 3)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_SPI_MI_0 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_PWM1 (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_SPI_MO_0 (MTK_PIN_NO(167) | 4)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_SPI_MO_0 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_MD_EINT3 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_PWM2 (MTK_PIN_NO(168) | 3)
+#define PINMUX_GPIO168__FUNC_SPI_MI_0 (MTK_PIN_NO(168) | 4)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_SPI_CS_0 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_MD_EINT4 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_PWM3 (MTK_PIN_NO(169) | 3)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_MSDC1_CMD (MTK_PIN_NO(170) | 1)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_MSDC1_DAT0 (MTK_PIN_NO(171) | 1)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_MSDC1_DAT1 (MTK_PIN_NO(172) | 1)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_MSDC1_DAT2 (MTK_PIN_NO(173) | 1)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_MSDC1_DAT3 (MTK_PIN_NO(174) | 1)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_MSDC1_CLK (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_PWRAP_SPIMI (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_PWRAP_SPIMO (MTK_PIN_NO(176) | 2)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_PWRAP_SPIMO (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_PWRAP_SPIMI (MTK_PIN_NO(177) | 2)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_PWRAP_SPICK (MTK_PIN_NO(178) | 1)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_PWRAP_SPICS (MTK_PIN_NO(179) | 1)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_I2S1_WS (MTK_PIN_NO(180) | 2)
+#define PINMUX_GPIO180__FUNC_I2S2_WS (MTK_PIN_NO(180) | 3)
+#define PINMUX_GPIO180__FUNC_I2S0_WS (MTK_PIN_NO(180) | 4)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_AUD_DAT_MISO_1 (MTK_PIN_NO(181) | 1)
+#define PINMUX_GPIO181__FUNC_I2S1_BCK (MTK_PIN_NO(181) | 2)
+#define PINMUX_GPIO181__FUNC_I2S2_BCK (MTK_PIN_NO(181) | 3)
+#define PINMUX_GPIO181__FUNC_I2S0_BCK (MTK_PIN_NO(181) | 4)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_AUD_DAT_MOSI_1 (MTK_PIN_NO(182) | 1)
+#define PINMUX_GPIO182__FUNC_I2S1_MCK (MTK_PIN_NO(182) | 2)
+#define PINMUX_GPIO182__FUNC_I2S2_MCK (MTK_PIN_NO(182) | 3)
+#define PINMUX_GPIO182__FUNC_I2S0_MCK (MTK_PIN_NO(182) | 4)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_AUD_DAT_MISO_2 (MTK_PIN_NO(183) | 1)
+#define PINMUX_GPIO183__FUNC_I2S1_DO_1 (MTK_PIN_NO(183) | 2)
+#define PINMUX_GPIO183__FUNC_I2S2_DI_1 (MTK_PIN_NO(183) | 3)
+#define PINMUX_GPIO183__FUNC_I2S0_DO (MTK_PIN_NO(183) | 4)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_AUD_DAT_MOSI_2 (MTK_PIN_NO(184) | 1)
+#define PINMUX_GPIO184__FUNC_I2S1_DO_2 (MTK_PIN_NO(184) | 2)
+#define PINMUX_GPIO184__FUNC_I2S2_DI_2 (MTK_PIN_NO(184) | 3)
+#define PINMUX_GPIO184__FUNC_I2S0_DI (MTK_PIN_NO(184) | 4)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_RTC32K_CK (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_DISP_PWM0 (MTK_PIN_NO(186) | 1)
+#define PINMUX_GPIO186__FUNC_DISP_PWM1 (MTK_PIN_NO(186) | 2)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_SRCLKENAI (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_SRCLKENAI2 (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_SRCLKENA0 (MTK_PIN_NO(189) | 1)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_SRCLKENA1 (MTK_PIN_NO(190) | 1)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_WATCHDOG_AO (MTK_PIN_NO(191) | 1)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_I2S0_WS (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_I2S1_WS (MTK_PIN_NO(192) | 2)
+#define PINMUX_GPIO192__FUNC_I2S2_WS (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_NCEB1 (MTK_PIN_NO(192) | 4)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_I2S0_BCK (MTK_PIN_NO(193) | 1)
+#define PINMUX_GPIO193__FUNC_I2S1_BCK (MTK_PIN_NO(193) | 2)
+#define PINMUX_GPIO193__FUNC_I2S2_BCK (MTK_PIN_NO(193) | 3)
+#define PINMUX_GPIO193__FUNC_NRNB1 (MTK_PIN_NO(193) | 4)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_I2S0_MCK (MTK_PIN_NO(194) | 1)
+#define PINMUX_GPIO194__FUNC_I2S1_MCK (MTK_PIN_NO(194) | 2)
+#define PINMUX_GPIO194__FUNC_I2S2_MCK (MTK_PIN_NO(194) | 3)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_I2S0_DO (MTK_PIN_NO(195) | 1)
+#define PINMUX_GPIO195__FUNC_I2S1_DO_1 (MTK_PIN_NO(195) | 2)
+#define PINMUX_GPIO195__FUNC_I2S2_DI_1 (MTK_PIN_NO(195) | 3)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_I2S0_DI (MTK_PIN_NO(196) | 1)
+#define PINMUX_GPIO196__FUNC_I2S1_DO_2 (MTK_PIN_NO(196) | 2)
+#define PINMUX_GPIO196__FUNC_I2S2_DI_2 (MTK_PIN_NO(196) | 3)
+
+
+#endif
diff --git a/arch/arm/boot/dts/mt8135-pinfunc.h b/include/dt-bindings/pinctrl/mt8135-pinfunc.h
index ce0cb5a440eb..ce0cb5a440eb 100644
--- a/arch/arm/boot/dts/mt8135-pinfunc.h
+++ b/include/dt-bindings/pinctrl/mt8135-pinfunc.h
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h b/include/dt-bindings/pinctrl/mt8183-pinfunc.h
index 6221cd712718..6221cd712718 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h
+++ b/include/dt-bindings/pinctrl/mt8183-pinfunc.h
diff --git a/include/dt-bindings/pinctrl/mt8186-pinfunc.h b/include/dt-bindings/pinctrl/mt8186-pinfunc.h
new file mode 100644
index 000000000000..18d6683c6f65
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8186-pinfunc.h
@@ -0,0 +1,1174 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ * Author: Guodong Liu <Guodong.Liu@mediatek.com>
+ *
+ */
+
+#ifndef __MT8186_PINFUNC_H
+#define __MT8186_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_I2S0_MCK (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_SPI0_CLK_B (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_I2S2_MCK (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_CMFLASH0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCP_SPI0_CK (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_I2S0_BCK (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_SPI0_CSB_B (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_I2S2_BCK (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_CMFLASH1 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SCP_SPI0_CS (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 6)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_I2S0_LRCK (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_SPI0_MO_B (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_I2S2_LRCK (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_CMFLASH2 (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_SCP_SPI0_MO (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 6)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_I2S0_DI (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_SPI0_MI_B (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_I2S2_DI (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_SRCLKENAI1 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_SCP_SPI0_MI (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 6)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_I2S3_DO (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S1_DO (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_I2S3_MCK (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_SPI1_CLK_B (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_I2S1_MCK (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_DPI_DATA22 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_I2S3_BCK (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_SPI1_CSB_B (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_I2S1_BCK (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_DPI_DATA23 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_I2S3_LRCK (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_SPI1_MO_B (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_I2S1_LRCK (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_CONN_UART0_RXD (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_SSPM_URXD_AO (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_ADSP_UART_RX (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_I2S3_DO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_SPI1_MI_B (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_I2S1_DO (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_CONN_UART0_TXD (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_ADSP_UART_TX (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_I2S0_MCK (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_SPI4_CLK_A (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I2S2_MCK (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_CONN_MCU_TDI (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_I2S0_BCK (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_SPI4_CSB_A (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_I2S2_BCK (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_I2S0_LRCK (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_SPI4_MO_A (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2S2_LRCK (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_CONN_MCU_TCK (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_I2S0_DI (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI4_MI_A (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2S2_DI (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_CONN_MCU_TDO (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_CLKM0 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_CONN_MCU_TMS (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_SRCLKENAI1 (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_CLKM1 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_PWM0 (MTK_PIN_NO(15) | 4)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_CONN_WIFI_TXD (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI0 (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_CLKM2 (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_PWM1 (MTK_PIN_NO(16) | 4)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_CLKM3 (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_PWM2 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A32 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_CMVREF0 (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_SPI2_CLK_B (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_DBG_MON_A26 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_CMVREF1 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_ANT_SEL3 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_SPI2_CSB_B (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_DBG_MON_A2 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_CMVREF2 (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_ANT_SEL4 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_SPI2_MO_B (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_DBG_MON_A3 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_I2S0_MCK (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_I2S1_MCK (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_I2S3_MCK (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_ANT_SEL5 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_SPI2_MI_B (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_A4 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_I2S0_BCK (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_I2S1_BCK (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_I2S3_BCK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_TDM_RX_LRCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_ANT_SEL6 (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_DBG_MON_A5 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_I2S0_LRCK (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_I2S1_LRCK (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_I2S3_LRCK (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_TDM_RX_BCK (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_ANT_SEL7 (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_DBG_MON_A6 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_I2S0_DI (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_I2S1_DO (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_I2S3_DO (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_TDM_RX_MCK (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_DBG_MON_A7 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_I2S2_MCK (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_PCM_CLK (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SPI4_CLK_B (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_TDM_RX_DATA0 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_DBG_MON_A8 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_I2S2_BCK (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_PCM_SYNC (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SPI4_CSB_B (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_TDM_RX_DATA1 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_DBG_MON_A9 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_I2S2_LRCK (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_PCM_DI (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_SPI4_MO_B (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_TDM_RX_DATA2 (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_DBG_MON_A10 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_I2S2_DI (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_PCM_DO (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SPI4_MI_B (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_TDM_RX_DATA3 (MTK_PIN_NO(28) | 4)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_ANT_SEL0 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_ANT_SEL1 (MTK_PIN_NO(30) | 1)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_ANT_SEL2 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_SRCLKENAI1 (MTK_PIN_NO(31) | 3)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_URXD0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_UTXD0 (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_ADSP_UART_RX (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_TP_URXD1_AO (MTK_PIN_NO(32) | 4)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_UTXD0 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_URXD0 (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_ADSP_UART_TX (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_TP_UTXD1_AO (MTK_PIN_NO(33) | 4)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_URXD1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_TP_URXD2_AO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_SSPM_URXD_AO (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_ADSP_UART_RX (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_CONN_UART0_RXD (MTK_PIN_NO(34) | 5)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_UTXD1 (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_TP_UTXD2_AO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_ADSP_UART_TX (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_CONN_UART0_TXD (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_CONN_WIFI_TXD (MTK_PIN_NO(35) | 6)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_SPI0_CLK_A (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_CLKM0 (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_SCP_SPI0_CK (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SPINOR_CK (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A11 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_SPI0_CSB_A (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_CLKM1 (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_PWM0 (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_SCP_SPI0_CS (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_SPINOR_CS (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A12 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_SPI0_MO_A (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_CLKM2 (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_PWM1 (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_SCP_SPI0_MO (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_SPINOR_IO0 (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A13 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_SPI0_MI_A (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_CLKM3 (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_PWM2 (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_SCP_SPI0_MI (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_SPINOR_IO1 (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A14 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_SPI1_CLK_A (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_SCP_SPI1_CK (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_UCTS0 (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_SPINOR_IO2 (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_TP_UCTS1_AO (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A15 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_SPI1_CSB_A (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_SCP_SPI1_CS (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_PWM0 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_URTS0 (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_SPINOR_IO3 (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_TP_URTS1_AO (MTK_PIN_NO(41) | 6)
+#define PINMUX_GPIO41__FUNC_DBG_MON_A16 (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_SPI1_MO_A (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_SCP_SPI1_MO (MTK_PIN_NO(42) | 2)
+#define PINMUX_GPIO42__FUNC_PWM1 (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_UCTS1 (MTK_PIN_NO(42) | 4)
+#define PINMUX_GPIO42__FUNC_TP_UCTS2_AO (MTK_PIN_NO(42) | 6)
+#define PINMUX_GPIO42__FUNC_DBG_MON_A17 (MTK_PIN_NO(42) | 7)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_SPI1_MI_A (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_SCP_SPI1_MI (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_PWM2 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_URTS1 (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_TP_URTS2_AO (MTK_PIN_NO(43) | 6)
+#define PINMUX_GPIO43__FUNC_DBG_MON_A18 (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_SPI2_CLK_A (MTK_PIN_NO(44) | 1)
+#define PINMUX_GPIO44__FUNC_SCP_SPI0_CK (MTK_PIN_NO(44) | 2)
+#define PINMUX_GPIO44__FUNC_DBG_MON_A19 (MTK_PIN_NO(44) | 7)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SPI2_CSB_A (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_SCP_SPI0_CS (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_DBG_MON_A20 (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SPI2_MO_A (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_SCP_SPI0_MO (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_DBG_MON_A21 (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_SPI2_MI_A (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_SCP_SPI0_MI (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_DBG_MON_A22 (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SPI3_CLK (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_TP_URXD1_AO (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_TP_URXD2_AO (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_URXD1 (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_I2S2_MCK (MTK_PIN_NO(48) | 5)
+#define PINMUX_GPIO48__FUNC_SCP_SPI0_CK (MTK_PIN_NO(48) | 6)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SPI3_CSB (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_TP_UTXD1_AO (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_TP_UTXD2_AO (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_UTXD1 (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_I2S2_BCK (MTK_PIN_NO(49) | 5)
+#define PINMUX_GPIO49__FUNC_SCP_SPI0_CS (MTK_PIN_NO(49) | 6)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SPI3_MO (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_I2S2_LRCK (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_SCP_SPI0_MO (MTK_PIN_NO(50) | 6)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SPI3_MI (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_I2S2_DI (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_SCP_SPI0_MI (MTK_PIN_NO(51) | 6)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_SPI5_CLK (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_I2S2_MCK (MTK_PIN_NO(52) | 2)
+#define PINMUX_GPIO52__FUNC_I2S1_MCK (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_SCP_SPI1_CK (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_LVTS_26M (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_DFD_TCK_XI (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_DBG_MON_B30 (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_SPI5_CSB (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_I2S2_BCK (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_I2S1_BCK (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_SCP_SPI1_CS (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_LVTS_FOUT (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_DFD_TDI (MTK_PIN_NO(53) | 6)
+#define PINMUX_GPIO53__FUNC_DBG_MON_B31 (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_SPI5_MO (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_I2S2_LRCK (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_I2S1_LRCK (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_SCP_SPI1_MO (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_LVTS_SCK (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_DFD_TDO (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_DBG_MON_A1 (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_SPI5_MI (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_I2S2_DI (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_I2S1_DO (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_SCP_SPI1_MI (MTK_PIN_NO(55) | 4)
+#define PINMUX_GPIO55__FUNC_LVTS_SDO (MTK_PIN_NO(55) | 5)
+#define PINMUX_GPIO55__FUNC_DFD_TMS (MTK_PIN_NO(55) | 6)
+#define PINMUX_GPIO55__FUNC_DBG_MON_B32 (MTK_PIN_NO(55) | 7)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_I2S1_DO (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_I2S3_DO (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_DBG_MON_A23 (MTK_PIN_NO(56) | 7)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_I2S1_BCK (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_I2S3_BCK (MTK_PIN_NO(57) | 2)
+#define PINMUX_GPIO57__FUNC_DBG_MON_A24 (MTK_PIN_NO(57) | 7)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_I2S1_LRCK (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_I2S3_LRCK (MTK_PIN_NO(58) | 2)
+#define PINMUX_GPIO58__FUNC_DBG_MON_A25 (MTK_PIN_NO(58) | 7)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_I2S1_MCK (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_I2S3_MCK (MTK_PIN_NO(59) | 2)
+#define PINMUX_GPIO59__FUNC_DBG_MON_A27 (MTK_PIN_NO(59) | 7)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_TDM_RX_LRCK (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_ANT_SEL3 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(60) | 5)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_TDM_RX_BCK (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_ANT_SEL4 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_SPINOR_CK (MTK_PIN_NO(61) | 4)
+#define PINMUX_GPIO61__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(61) | 5)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_TDM_RX_MCK (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_ANT_SEL5 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_SPINOR_CS (MTK_PIN_NO(62) | 4)
+#define PINMUX_GPIO62__FUNC_CONN_MCU_TDI (MTK_PIN_NO(62) | 5)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_TDM_RX_DATA0 (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_ANT_SEL6 (MTK_PIN_NO(63) | 2)
+#define PINMUX_GPIO63__FUNC_SPINOR_IO0 (MTK_PIN_NO(63) | 4)
+#define PINMUX_GPIO63__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(63) | 5)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_TDM_RX_DATA1 (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_ANT_SEL7 (MTK_PIN_NO(64) | 2)
+#define PINMUX_GPIO64__FUNC_PWM0 (MTK_PIN_NO(64) | 3)
+#define PINMUX_GPIO64__FUNC_SPINOR_IO1 (MTK_PIN_NO(64) | 4)
+#define PINMUX_GPIO64__FUNC_CONN_MCU_TCK (MTK_PIN_NO(64) | 5)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_TDM_RX_DATA2 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_UCTS0 (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_PWM1 (MTK_PIN_NO(65) | 3)
+#define PINMUX_GPIO65__FUNC_SPINOR_IO2 (MTK_PIN_NO(65) | 4)
+#define PINMUX_GPIO65__FUNC_CONN_MCU_TDO (MTK_PIN_NO(65) | 5)
+#define PINMUX_GPIO65__FUNC_TP_UCTS1_AO (MTK_PIN_NO(65) | 6)
+#define PINMUX_GPIO65__FUNC_TP_UCTS2_AO (MTK_PIN_NO(65) | 7)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_TDM_RX_DATA3 (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_URTS0 (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_PWM2 (MTK_PIN_NO(66) | 3)
+#define PINMUX_GPIO66__FUNC_SPINOR_IO3 (MTK_PIN_NO(66) | 4)
+#define PINMUX_GPIO66__FUNC_CONN_MCU_TMS (MTK_PIN_NO(66) | 5)
+#define PINMUX_GPIO66__FUNC_TP_URTS1_AO (MTK_PIN_NO(66) | 6)
+#define PINMUX_GPIO66__FUNC_TP_URTS2_AO (MTK_PIN_NO(66) | 7)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_MSDC0_DSL (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_MSDC0_CLK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_MSDC0_CMD (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_MSDC0_RSTB (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_MSDC0_DAT0 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_MSDC0_DAT1 (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_MSDC0_DAT2 (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_MSDC0_DAT3 (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_MSDC0_DAT4 (MTK_PIN_NO(75) | 1)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_MSDC0_DAT5 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_MSDC0_DAT6 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_MSDC0_DAT7 (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_KPCOL0 (MTK_PIN_NO(79) | 1)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_KPCOL1 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_PWM0 (MTK_PIN_NO(80) | 3)
+#define PINMUX_GPIO80__FUNC_CLKM0 (MTK_PIN_NO(80) | 4)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_KPROW0 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_PWM1 (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_CLKM1 (MTK_PIN_NO(81) | 4)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_KPROW1 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_PWM2 (MTK_PIN_NO(82) | 3)
+#define PINMUX_GPIO82__FUNC_CLKM2 (MTK_PIN_NO(82) | 4)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_AP_GOOD (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_GPS_PPS (MTK_PIN_NO(83) | 2)
+#define PINMUX_GPIO83__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(83) | 4)
+#define PINMUX_GPIO83__FUNC_DBG_MON_A28 (MTK_PIN_NO(83) | 7)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_MSDC1_CLK (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(84) | 2)
+#define PINMUX_GPIO84__FUNC_UDI_TCK (MTK_PIN_NO(84) | 4)
+#define PINMUX_GPIO84__FUNC_CONN_DSP_JCK (MTK_PIN_NO(84) | 5)
+#define PINMUX_GPIO84__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(84) | 6)
+#define PINMUX_GPIO84__FUNC_DFD_TCK_XI (MTK_PIN_NO(84) | 7)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_MSDC1_CMD (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(85) | 3)
+#define PINMUX_GPIO85__FUNC_UDI_TMS (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_CONN_DSP_JMS (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(85) | 6)
+#define PINMUX_GPIO85__FUNC_DFD_TMS (MTK_PIN_NO(85) | 7)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_MSDC1_DAT0 (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_UDI_TDI (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_CONN_DSP_JDI (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_DFD_TDI (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_MSDC1_DAT1 (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_UDI_TDO (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_CONN_DSP_JDO (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_DFD_TDO (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_MSDC1_DAT2 (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_UDI_NTRST (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_CONN_WIFI_TXD (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(88) | 6)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_MSDC1_DAT3 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(89) | 5)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_IDDIG_P0 (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_PGD_HV_HSC_PWR4 (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_GDU_SUM_TROOP2_2 (MTK_PIN_NO(90) | 5)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_PGD_HV_HSC_PWR5 (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_GDU_TROOPS_DET0 (MTK_PIN_NO(91) | 5)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_VBUS_VALID_P0 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_PGD_DA_EFUSE_RDY (MTK_PIN_NO(92) | 4)
+#define PINMUX_GPIO92__FUNC_GDU_TROOPS_DET1 (MTK_PIN_NO(92) | 5)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_IDDIG_P1 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_PWM0 (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_CLKM0 (MTK_PIN_NO(93) | 3)
+#define PINMUX_GPIO93__FUNC_PGD_DA_EFUSE_RDY_PRE (MTK_PIN_NO(93) | 4)
+#define PINMUX_GPIO93__FUNC_GDU_TROOPS_DET2 (MTK_PIN_NO(93) | 5)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_PWM1 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_CLKM1 (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_PGD_DA_PWRGD_RESET (MTK_PIN_NO(94) | 4)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_VBUS_VALID_P1 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_PWM2 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_CLKM2 (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_PGD_DA_PWRGD_ENB (MTK_PIN_NO(95) | 4)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_DSI_TE (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_DBG_MON_A29 (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_DISP_PWM (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_DBG_MON_A30 (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_LCM_RST (MTK_PIN_NO(98) | 1)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_DPI_PCLK (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(99) | 3)
+#define PINMUX_GPIO99__FUNC_ANT_SEL0 (MTK_PIN_NO(99) | 5)
+#define PINMUX_GPIO99__FUNC_TP_GPIO0_AO (MTK_PIN_NO(99) | 6)
+#define PINMUX_GPIO99__FUNC_PGD_LV_LSC_PWR0 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_DPI_VSYNC (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_KPCOL2 (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_ANT_SEL1 (MTK_PIN_NO(100) | 5)
+#define PINMUX_GPIO100__FUNC_TP_GPIO1_AO (MTK_PIN_NO(100) | 6)
+#define PINMUX_GPIO100__FUNC_PGD_LV_LSC_PWR1 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_DPI_HSYNC (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_KPROW2 (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_ANT_SEL2 (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_TP_GPIO2_AO (MTK_PIN_NO(101) | 6)
+#define PINMUX_GPIO101__FUNC_PGD_LV_LSC_PWR2 (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_DPI_DE (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_ANT_SEL3 (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_TP_GPIO3_AO (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_PGD_LV_LSC_PWR3 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_DPI_DATA0 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(103) | 3)
+#define PINMUX_GPIO103__FUNC_CLKM0 (MTK_PIN_NO(103) | 4)
+#define PINMUX_GPIO103__FUNC_ANT_SEL4 (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_TP_GPIO4_AO (MTK_PIN_NO(103) | 6)
+#define PINMUX_GPIO103__FUNC_PGD_LV_LSC_PWR4 (MTK_PIN_NO(103) | 7)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_DPI_DATA1 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_GPS_PPS (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_UCTS2 (MTK_PIN_NO(104) | 3)
+#define PINMUX_GPIO104__FUNC_CLKM1 (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_ANT_SEL5 (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_TP_GPIO5_AO (MTK_PIN_NO(104) | 6)
+#define PINMUX_GPIO104__FUNC_PGD_LV_LSC_PWR5 (MTK_PIN_NO(104) | 7)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_DPI_DATA2 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_URTS2 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_CLKM2 (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_ANT_SEL6 (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_TP_GPIO6_AO (MTK_PIN_NO(105) | 6)
+#define PINMUX_GPIO105__FUNC_PGD_LV_HSC_PWR0 (MTK_PIN_NO(105) | 7)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_DPI_DATA3 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_TP_UTXD1_AO (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_UTXD2 (MTK_PIN_NO(106) | 3)
+#define PINMUX_GPIO106__FUNC_PWM0 (MTK_PIN_NO(106) | 4)
+#define PINMUX_GPIO106__FUNC_ANT_SEL7 (MTK_PIN_NO(106) | 5)
+#define PINMUX_GPIO106__FUNC_TP_GPIO7_AO (MTK_PIN_NO(106) | 6)
+#define PINMUX_GPIO106__FUNC_PGD_LV_HSC_PWR1 (MTK_PIN_NO(106) | 7)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DPI_DATA4 (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_TP_URXD1_AO (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_URXD2 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_PWM1 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_GDU_SUM_TROOP0_0 (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_PGD_LV_HSC_PWR2 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_DPI_DATA5 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_TP_UCTS1_AO (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_UCTS0 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_PWM2 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_GDU_SUM_TROOP0_1 (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_PGD_LV_HSC_PWR3 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_DPI_DATA6 (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_TP_URTS1_AO (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_URTS0 (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_I2S0_DI (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_I2S2_DI (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_GDU_SUM_TROOP0_2 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_PGD_LV_HSC_PWR4 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_DPI_DATA7 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_TP_UCTS2_AO (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_UCTS1 (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_I2S1_BCK (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_GDU_SUM_TROOP1_0 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_PGD_LV_HSC_PWR5 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_DPI_DATA8 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_TP_URTS2_AO (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_URTS1 (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_I2S3_MCK (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_I2S1_MCK (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_GDU_SUM_TROOP1_1 (MTK_PIN_NO(111) | 6)
+#define PINMUX_GPIO111__FUNC_PGD_HV_HSC_PWR0 (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_DPI_DATA9 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_TP_URXD2_AO (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_URXD1 (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_I2S3_LRCK (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_I2S1_LRCK (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_GDU_SUM_TROOP1_2 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_PGD_HV_HSC_PWR1 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_DPI_DATA10 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_TP_UTXD2_AO (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_UTXD1 (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_I2S3_DO (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_I2S1_DO (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_GDU_SUM_TROOP2_0 (MTK_PIN_NO(113) | 6)
+#define PINMUX_GPIO113__FUNC_PGD_HV_HSC_PWR2 (MTK_PIN_NO(113) | 7)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_DPI_DATA11 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_GDU_SUM_TROOP2_1 (MTK_PIN_NO(114) | 6)
+#define PINMUX_GPIO114__FUNC_PGD_HV_HSC_PWR3 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_PCM_CLK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_I2S0_BCK (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_I2S2_BCK (MTK_PIN_NO(115) | 3)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_PCM_SYNC (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_I2S0_LRCK (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_I2S2_LRCK (MTK_PIN_NO(116) | 3)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_PCM_DI (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_I2S0_DI (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_I2S2_DI (MTK_PIN_NO(117) | 3)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_PCM_DO (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_I2S0_MCK (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_I2S2_MCK (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_I2S3_DO (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_I2S1_DO (MTK_PIN_NO(118) | 5)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_JTMS_SEL1 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_UDI_TMS (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_DFD_TMS (MTK_PIN_NO(119) | 3)
+#define PINMUX_GPIO119__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(119) | 4)
+#define PINMUX_GPIO119__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(119) | 6)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_JTCK_SEL1 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_UDI_TCK (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_DFD_TCK_XI (MTK_PIN_NO(120) | 3)
+#define PINMUX_GPIO120__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(120) | 4)
+#define PINMUX_GPIO120__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(120) | 5)
+#define PINMUX_GPIO120__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(120) | 6)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_JTDI_SEL1 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_UDI_TDI (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_DFD_TDI (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_JTDO_SEL1 (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_UDI_TDO (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_DFD_TDO (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(122) | 6)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_UDI_NTRST (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(123) | 6)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_CMMCLK0 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_CLKM0 (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_PWM0 (MTK_PIN_NO(124) | 3)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_CMMCLK1 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_CLKM1 (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_PWM1 (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_DBG_MON_B0 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_CMMCLK2 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_CLKM2 (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_PWM2 (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_DBG_MON_B1 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_SCL0 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_SCP_SCL0 (MTK_PIN_NO(127) | 4)
+#define PINMUX_GPIO127__FUNC_SCP_SCL1 (MTK_PIN_NO(127) | 5)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_SDA0 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_SCP_SDA0 (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_SCP_SDA1 (MTK_PIN_NO(128) | 5)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_SCL1 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_SCP_SCL0 (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SCP_SCL1 (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_DBG_MON_B4 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_SDA1 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_SCP_SDA0 (MTK_PIN_NO(130) | 4)
+#define PINMUX_GPIO130__FUNC_SCP_SDA1 (MTK_PIN_NO(130) | 5)
+#define PINMUX_GPIO130__FUNC_DBG_MON_B5 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_SCL2 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_CONN_UART0_TXD (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_SCP_SCL0 (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SCP_SCL1 (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_DBG_MON_B6 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_SDA2 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_SSPM_URXD_AO (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_CONN_UART0_RXD (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_SCP_SDA0 (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_SCP_SDA1 (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_DBG_MON_B7 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_SCL3 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_SCP_SCL0 (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_SCP_SCL1 (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_DBG_MON_B8 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_SDA3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_GPS_PPS (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_SCP_SDA0 (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_SCP_SDA1 (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_DBG_MON_B9 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_SCL4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_TP_UTXD1_AO (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_UTXD1 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_SCP_SCL0 (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_SCP_SCL1 (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_DBG_MON_B10 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_SDA4 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_TP_URXD1_AO (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_URXD1 (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_SCP_SDA0 (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_SCP_SDA1 (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_DBG_MON_B11 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_SCL5 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_UTXD2 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_UCTS1 (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_SCP_SCL0 (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_SCP_SCL1 (MTK_PIN_NO(137) | 5)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_SDA5 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_URXD2 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_URTS1 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_SCP_SDA0 (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_SCP_SDA1 (MTK_PIN_NO(138) | 5)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_SCL6 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_UTXD1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_TP_UTXD1_AO (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_SCP_SCL0 (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_SCP_SCL1 (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_DBG_MON_B12 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_SDA6 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_URXD1 (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_TP_URXD1_AO (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_SCP_SDA0 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_SCP_SDA1 (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_DBG_MON_B13 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_SCL7 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_URTS0 (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_TP_URTS1_AO (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_SCP_SCL0 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_SCP_SCL1 (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_UDI_TCK (MTK_PIN_NO(141) | 6)
+#define PINMUX_GPIO141__FUNC_DBG_MON_B14 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SDA7 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_UCTS0 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_TP_UCTS1_AO (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_SCP_SDA0 (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_SCP_SDA1 (MTK_PIN_NO(142) | 5)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_SCL8 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_SCP_SCL0 (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_SCP_SCL1 (MTK_PIN_NO(143) | 5)
+#define PINMUX_GPIO143__FUNC_DBG_MON_B16 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_SDA8 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_SCP_SDA0 (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_SCP_SDA1 (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_DBG_MON_B17 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_SCL9 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_CMVREF1 (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_GPS_PPS (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_SCP_SCL0 (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_SCP_SCL1 (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_DBG_MON_B18 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_SDA9 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_CMVREF0 (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_SCP_SDA0 (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_SCP_SDA1 (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_DBG_MON_B19 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_CMFLASH0 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_LVTS_SDI (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_DPI_DATA12 (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_TP_GPIO0_AO (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_ANT_SEL3 (MTK_PIN_NO(147) | 5)
+#define PINMUX_GPIO147__FUNC_DFD_TCK_XI (MTK_PIN_NO(147) | 6)
+#define PINMUX_GPIO147__FUNC_DBG_MON_B20 (MTK_PIN_NO(147) | 7)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_CMFLASH1 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_LVTS_SCF (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_DPI_DATA13 (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TP_GPIO1_AO (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_ANT_SEL4 (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_DFD_TMS (MTK_PIN_NO(148) | 6)
+#define PINMUX_GPIO148__FUNC_DBG_MON_B21 (MTK_PIN_NO(148) | 7)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_CMFLASH2 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_CLKM0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_DPI_DATA14 (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_TP_GPIO2_AO (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_ANT_SEL5 (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_DFD_TDI (MTK_PIN_NO(149) | 6)
+#define PINMUX_GPIO149__FUNC_DBG_MON_B22 (MTK_PIN_NO(149) | 7)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_CLKM1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_DPI_DATA15 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_TP_GPIO3_AO (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_ANT_SEL6 (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_DFD_TDO (MTK_PIN_NO(150) | 6)
+#define PINMUX_GPIO150__FUNC_DBG_MON_B23 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CLKM2 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_DPI_DATA16 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_TP_GPIO4_AO (MTK_PIN_NO(151) | 4)
+#define PINMUX_GPIO151__FUNC_ANT_SEL7 (MTK_PIN_NO(151) | 5)
+#define PINMUX_GPIO151__FUNC_UDI_TMS (MTK_PIN_NO(151) | 6)
+#define PINMUX_GPIO151__FUNC_DBG_MON_B24 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_CLKM3 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_DPI_DATA17 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_TP_GPIO5_AO (MTK_PIN_NO(152) | 4)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_DPI_DATA18 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_TP_GPIO6_AO (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_UDI_TDI (MTK_PIN_NO(153) | 6)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B26 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_PWM0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_CMVREF2 (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_DPI_DATA19 (MTK_PIN_NO(154) | 3)
+#define PINMUX_GPIO154__FUNC_TP_GPIO7_AO (MTK_PIN_NO(154) | 4)
+#define PINMUX_GPIO154__FUNC_UDI_TDO (MTK_PIN_NO(154) | 6)
+#define PINMUX_GPIO154__FUNC_DBG_MON_B27 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_PWM1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_CMVREF1 (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_DPI_DATA20 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_UDI_NTRST (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_B28 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_PWM2 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_CMVREF0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_DPI_DATA21 (MTK_PIN_NO(156) | 3)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(157) | 1)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(159) | 2)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(160) | 2)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SRCLKENA0 (MTK_PIN_NO(161) | 1)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_SRCLKENA1 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_DBG_MON_A31 (MTK_PIN_NO(162) | 7)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(163) | 2)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_RTC32K_CK (MTK_PIN_NO(164) | 1)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_WATCHDOG (MTK_PIN_NO(165) | 1)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_AUD_CLK_MISO (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_I2S1_MCK (MTK_PIN_NO(166) | 3)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_I2S1_BCK (MTK_PIN_NO(167) | 3)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_I2S1_LRCK (MTK_PIN_NO(168) | 3)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_I2S1_DO (MTK_PIN_NO(169) | 3)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_AUD_CLK_MISO (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_I2S2_MCK (MTK_PIN_NO(170) | 3)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_I2S2_BCK (MTK_PIN_NO(171) | 3)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_I2S2_LRCK (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_VOW_DAT_MISO (MTK_PIN_NO(172) | 4)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_I2S2_DI (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_VOW_CLK_MISO (MTK_PIN_NO(173) | 4)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_CONN_TOP_CLK (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_AUXIF_CLK (MTK_PIN_NO(174) | 2)
+#define PINMUX_GPIO174__FUNC_DFD_TCK_XI (MTK_PIN_NO(174) | 3)
+#define PINMUX_GPIO174__FUNC_DBG_MON_B3 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_CONN_TOP_DATA (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_AUXIF_ST (MTK_PIN_NO(175) | 2)
+#define PINMUX_GPIO175__FUNC_DFD_TMS (MTK_PIN_NO(175) | 3)
+#define PINMUX_GPIO175__FUNC_DBG_MON_B15 (MTK_PIN_NO(175) | 7)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_CONN_BT_CLK (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_DFD_TDI (MTK_PIN_NO(176) | 3)
+#define PINMUX_GPIO176__FUNC_DBG_MON_B2 (MTK_PIN_NO(176) | 7)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_CONN_BT_DATA (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_DFD_TDO (MTK_PIN_NO(177) | 3)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_CONN_HRST_B (MTK_PIN_NO(178) | 1)
+#define PINMUX_GPIO178__FUNC_UDI_TMS (MTK_PIN_NO(178) | 3)
+#define PINMUX_GPIO178__FUNC_DBG_MON_B25 (MTK_PIN_NO(178) | 7)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_CONN_WB_PTA (MTK_PIN_NO(179) | 1)
+#define PINMUX_GPIO179__FUNC_UDI_TCK (MTK_PIN_NO(179) | 3)
+#define PINMUX_GPIO179__FUNC_DBG_MON_B29 (MTK_PIN_NO(179) | 7)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_UDI_TDI (MTK_PIN_NO(180) | 3)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(181) | 1)
+#define PINMUX_GPIO181__FUNC_UDI_TDO (MTK_PIN_NO(181) | 3)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(182) | 1)
+#define PINMUX_GPIO182__FUNC_UDI_NTRST (MTK_PIN_NO(182) | 3)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_SPMI_SCL (MTK_PIN_NO(183) | 1)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_SPMI_SDA (MTK_PIN_NO(184) | 1)
+
+#endif /* __MT8186_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8192-pinfunc.h b/include/dt-bindings/pinctrl/mt8192-pinfunc.h
new file mode 100644
index 000000000000..71ffe3a52578
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8192-pinfunc.h
@@ -0,0 +1,1344 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __MT8192_PINFUNC_H
+#define __MT8192_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_SPI6_CLK (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S5_MCK (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_PWM_0 (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_TDM_LRCK (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_MD_INT0 (MTK_PIN_NO(0) | 6)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_SPI6_CSB (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S5_BCK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_PWM_1 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_TDM_BCK (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_DBG_MON_A9 (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_SPI6_MI (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S5_LRCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_PWM_2 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_TDM_MCK (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_DBG_MON_A10 (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_SPI6_MO (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S5_DO (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_PWM_3 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_TDM_DATA0 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_CLKM0 (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_DBG_MON_A11 (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_SPI4_A_CLK (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S2_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_DMIC1_CLK (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_TDM_DATA1 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_PCM1_DI (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_IDDIG (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_SPI4_A_CSB (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S2_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_DMIC1_DAT (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_TDM_DATA2 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_PCM1_CLK (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_USB_DRVVBUS (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_SPI4_A_MI (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S2_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_DMIC_CLK (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_TDM_DATA3 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_PCM1_SYNC (MTK_PIN_NO(6) | 6)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI4_A_MO (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S2_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_DMIC_DAT (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_WIFI_TXD (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_PCM1_DO0 (MTK_PIN_NO(7) | 6)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SRCLKENAI1 (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_KPCOL2 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_CLKM1 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_PCM1_DO1 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_DBG_MON_A12 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_SRCLKENAI0 (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_KPROW2 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_CMMCLK4 (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_CLKM3 (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_PCM1_DO2 (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_DBG_MON_A13 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_MSDC2_CLK (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_SPI4_B_CLK (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I2S8_MCK (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_MD_INT0 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_TP_GPIO8_AO (MTK_PIN_NO(10) | 6)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_MSDC2_CMD (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_SPI4_B_CSB (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_I2S8_BCK (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_PCIE_CLKREQ_N (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_TP_GPIO9_AO (MTK_PIN_NO(11) | 6)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_MSDC2_DAT3 (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_SPI4_B_MI (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2S8_LRCK (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_DMIC1_CLK (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_TP_GPIO10_AO (MTK_PIN_NO(12) | 6)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_MSDC2_DAT0 (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI4_B_MO (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2S8_DI (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_DMIC1_DAT (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_ANT_SEL10 (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_TP_GPIO11_AO (MTK_PIN_NO(13) | 6)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_MSDC2_DAT2 (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_IDDIG (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_SCL_6306 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_PCIE_PERESET_N (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_ANT_SEL11 (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_TP_GPIO12_AO (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_MSDC2_DAT1 (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_SDA_6306 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_PCIE_WAKE_N (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_ANT_SEL12 (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_TP_GPIO13_AO (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI1 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_IDDIG (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_TP_GPIO14_AO (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_KPCOL2 (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_SPI7_A_MI (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_DBG_MON_A0 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SRCLKENAI0 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_USB_DRVVBUS (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_TP_GPIO15_AO (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_KPROW2 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_SPI7_A_MO (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_SRCLKENAI0 (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SPI4_C_MI (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_SPI1_B_MI (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_ANT_SEL10 (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_MD_INT0 (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_DBG_MON_B2 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SRCLKENAI1 (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SPI4_C_MO (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_SPI1_B_MO (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_ANT_SEL11 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_DBG_MON_B3 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SRCLKENAI0 (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SPI4_C_CLK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_SPI1_B_CLK (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_PWM_3 (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_ANT_SEL12 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_DBG_MON_B4 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_SPI4_C_CSB (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_SPI1_B_CSB (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_IDDIG (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_B5 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_SPI0_C_CLK (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_SPI7_B_CLK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_I2S7_BCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_I2S9_BCK (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_SCL_6306 (MTK_PIN_NO(22) | 6)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_SPI0_C_CSB (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_SPI7_B_CSB (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_I2S7_LRCK (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_I2S9_LRCK (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_SDA_6306 (MTK_PIN_NO(23) | 6)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_SRCLKENAI1 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_SPI0_C_MI (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_SPI7_B_MI (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_I2S6_DI (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_I2S8_DI (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_SPINOR_CS (MTK_PIN_NO(24) | 6)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_SRCLKENAI0 (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_SPI0_C_MO (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SPI7_B_MO (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_I2S7_DO (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_I2S9_DO (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_SPINOR_CK (MTK_PIN_NO(25) | 6)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_PWM_2 (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_CLKM0 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_USB_DRVVBUS (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_SPI5_C_MI (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_I2S9_BCK (MTK_PIN_NO(26) | 5)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_PWM_3 (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_CLKM1 (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_SPI5_C_MO (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_I2S9_LRCK (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_SPINOR_IO0 (MTK_PIN_NO(27) | 6)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_PWM_0 (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_CLKM2 (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SPI5_C_CSB (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_I2S9_MCK (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_SPINOR_IO1 (MTK_PIN_NO(28) | 6)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_PWM_1 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_CLKM3 (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_SPI5_C_CLK (MTK_PIN_NO(29) | 4)
+#define PINMUX_GPIO29__FUNC_I2S9_DO (MTK_PIN_NO(29) | 5)
+#define PINMUX_GPIO29__FUNC_SPINOR_IO2 (MTK_PIN_NO(29) | 6)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_PWM_2 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_CLKM0 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_I2S7_MCK (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_I2S9_MCK (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_SPINOR_IO3 (MTK_PIN_NO(30) | 6)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_I2S3_MCK (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_I2S1_MCK (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_I2S5_MCK (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_SRCLKENAI0 (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_I2S0_MCK (MTK_PIN_NO(31) | 5)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I2S3_BCK (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I2S1_BCK (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I2S5_BCK (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_PCM0_CLK (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_I2S0_BCK (MTK_PIN_NO(32) | 5)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_I2S3_LRCK (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_I2S1_LRCK (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_I2S5_LRCK (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_PCM0_SYNC (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_I2S0_LRCK (MTK_PIN_NO(33) | 5)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I2S0_DI (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I2S2_DI (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I2S2_DI2 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_PCM0_DI (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_I2S0_DI_A (MTK_PIN_NO(34) | 5)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_I2S3_DO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_I2S1_DO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_I2S5_DO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_PCM0_DO (MTK_PIN_NO(35) | 4)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_SPI5_A_CLK (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_DMIC1_CLK (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_MD_URXD0 (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_UCTS0 (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_URXD1 (MTK_PIN_NO(36) | 6)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_SPI5_A_CSB (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_DMIC1_DAT (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_MD_UTXD0 (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_URTS0 (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_UTXD1 (MTK_PIN_NO(37) | 6)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_SPI5_A_MI (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_DMIC_CLK (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_MD_URXD1 (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_URXD0 (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_UCTS1 (MTK_PIN_NO(38) | 6)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_SPI5_A_MO (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_DMIC_DAT (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_MD_UTXD1 (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_UTXD0 (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_URTS1 (MTK_PIN_NO(39) | 6)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_DISP_PWM (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A6 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_DSI_TE (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_DBG_MON_A7 (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_LCM_RST (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_DBG_MON_A8 (MTK_PIN_NO(42) | 7)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_SCL_6306 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_ADSP_URXD0 (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_PTA_RXD (MTK_PIN_NO(43) | 5)
+#define PINMUX_GPIO43__FUNC_SSPM_URXD_AO (MTK_PIN_NO(43) | 6)
+#define PINMUX_GPIO43__FUNC_DBG_MON_B0 (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(44) | 1)
+#define PINMUX_GPIO44__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(44) | 2)
+#define PINMUX_GPIO44__FUNC_SDA_6306 (MTK_PIN_NO(44) | 3)
+#define PINMUX_GPIO44__FUNC_ADSP_UTXD0 (MTK_PIN_NO(44) | 4)
+#define PINMUX_GPIO44__FUNC_PTA_TXD (MTK_PIN_NO(44) | 5)
+#define PINMUX_GPIO44__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(44) | 6)
+#define PINMUX_GPIO44__FUNC_DBG_MON_B1 (MTK_PIN_NO(44) | 7)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_MCUPM_JTAG_TDI (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_APU_JTAG_TDI (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(45) | 5)
+#define PINMUX_GPIO45__FUNC_LVTS_SCK (MTK_PIN_NO(45) | 6)
+#define PINMUX_GPIO45__FUNC_CONN_DSP_JDI (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_MCUPM_JTAG_TMS (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_APU_JTAG_TMS (MTK_PIN_NO(46) | 4)
+#define PINMUX_GPIO46__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(46) | 5)
+#define PINMUX_GPIO46__FUNC_LVTS_SDI (MTK_PIN_NO(46) | 6)
+#define PINMUX_GPIO46__FUNC_CONN_DSP_JMS (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_MCUPM_JTAG_TDO (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_APU_JTAG_TDO (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_LVTS_SCF (MTK_PIN_NO(47) | 6)
+#define PINMUX_GPIO47__FUNC_CONN_DSP_JDO (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_MCUPM_JTAG_TRSTN (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_APU_JTAG_TRST (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(48) | 5)
+#define PINMUX_GPIO48__FUNC_LVTS_FOUT (MTK_PIN_NO(48) | 6)
+#define PINMUX_GPIO48__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(48) | 7)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_MCUPM_JTAG_TCK (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_APU_JTAG_TCK (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(49) | 5)
+#define PINMUX_GPIO49__FUNC_LVTS_SDO (MTK_PIN_NO(49) | 6)
+#define PINMUX_GPIO49__FUNC_CONN_DSP_JCK (MTK_PIN_NO(49) | 7)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(50) | 2)
+#define PINMUX_GPIO50__FUNC_LVTS_26M (MTK_PIN_NO(50) | 6)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_MSDC1_CLK (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_PCM1_CLK (MTK_PIN_NO(51) | 2)
+#define PINMUX_GPIO51__FUNC_CONN_DSP_JCK (MTK_PIN_NO(51) | 3)
+#define PINMUX_GPIO51__FUNC_UDI_TCK (MTK_PIN_NO(51) | 4)
+#define PINMUX_GPIO51__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(51) | 6)
+#define PINMUX_GPIO51__FUNC_JTCK_SEL3 (MTK_PIN_NO(51) | 7)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_MSDC1_CMD (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_PCM1_SYNC (MTK_PIN_NO(52) | 2)
+#define PINMUX_GPIO52__FUNC_CONN_DSP_JMS (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_UDI_TMS (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_JTMS_SEL3 (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_MSDC1_DAT3 (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_PCM1_DI (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(53) | 4)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_MSDC1_DAT0 (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_PCM1_DO0 (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_CONN_DSP_JDI (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_UDI_TDI (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_JTDI_SEL3 (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_MSDC1_DAT2 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_PCM1_DO2 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_UDI_NTRST (MTK_PIN_NO(55) | 4)
+#define PINMUX_GPIO55__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(55) | 5)
+#define PINMUX_GPIO55__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(55) | 6)
+#define PINMUX_GPIO55__FUNC_JTRSTN_SEL3 (MTK_PIN_NO(55) | 7)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_MSDC1_DAT1 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_PCM1_DO1 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_CONN_DSP_JDO (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_UDI_TDO (MTK_PIN_NO(56) | 4)
+#define PINMUX_GPIO56__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(56) | 5)
+#define PINMUX_GPIO56__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(56) | 6)
+#define PINMUX_GPIO56__FUNC_JTDO_SEL3 (MTK_PIN_NO(56) | 7)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_MIPI2_D_SCLK (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_MIPI2_D_SDATA (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_MIPI_M_SCLK (MTK_PIN_NO(59) | 1)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_MIPI_M_SDATA (MTK_PIN_NO(60) | 1)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_MD_UCNT_A_TGL (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_DIGRF_IRQ (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_BPI_BUS0 (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_PCIE_WAKE_N (MTK_PIN_NO(63) | 3)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_BPI_BUS1 (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_PCIE_PERESET_N (MTK_PIN_NO(64) | 3)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_BPI_BUS2 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_PCIE_CLKREQ_N (MTK_PIN_NO(65) | 3)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_BPI_BUS3 (MTK_PIN_NO(66) | 1)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_BPI_BUS4 (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_BPI_BUS5 (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_BPI_BUS6 (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_CONN_BPI_BUS6 (MTK_PIN_NO(69) | 2)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_BPI_BUS7 (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_CONN_BPI_BUS7 (MTK_PIN_NO(70) | 2)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_BPI_BUS8 (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_CONN_BPI_BUS8 (MTK_PIN_NO(71) | 2)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS9 (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_CONN_BPI_BUS9 (MTK_PIN_NO(72) | 2)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS10 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_CONN_BPI_BUS10 (MTK_PIN_NO(73) | 2)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS11_OLAT0 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_CONN_BPI_BUS11_OLAT0 (MTK_PIN_NO(74) | 2)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS12_OLAT1 (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_CONN_BPI_BUS12_OLAT1 (MTK_PIN_NO(75) | 2)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_BPI_BUS13_OLAT2 (MTK_PIN_NO(76) | 1)
+#define PINMUX_GPIO76__FUNC_CONN_BPI_BUS13_OLAT2 (MTK_PIN_NO(76) | 2)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_BPI_BUS14_OLAT3 (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_CONN_BPI_BUS14_OLAT3 (MTK_PIN_NO(77) | 2)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_BUS15_OLAT4 (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_CONN_BPI_BUS15_OLAT4 (MTK_PIN_NO(78) | 2)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS16_OLAT5 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_CONN_BPI_BUS16_OLAT5 (MTK_PIN_NO(79) | 2)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS17_ANT0 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_CONN_BPI_BUS17_ANT0 (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_PCIE_WAKE_N (MTK_PIN_NO(80) | 3)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS18_ANT1 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_CONN_BPI_BUS18_ANT1 (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_PCIE_PERESET_N (MTK_PIN_NO(81) | 3)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS19_ANT2 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_CONN_BPI_BUS19_ANT2 (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_PCIE_CLKREQ_N (MTK_PIN_NO(82) | 3)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS20_ANT3 (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_CONN_BPI_BUS20_ANT3 (MTK_PIN_NO(83) | 2)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS21_ANT4 (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_CONN_BPI_BUS21_ANT4 (MTK_PIN_NO(84) | 2)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_MIPI1_D_SCLK (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_CONN_MIPI1_SCLK (MTK_PIN_NO(85) | 2)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_MIPI1_D_SDATA (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_CONN_MIPI1_SDATA (MTK_PIN_NO(86) | 2)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_MIPI0_D_SCLK (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_CONN_MIPI0_SCLK (MTK_PIN_NO(87) | 2)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_MIPI0_D_SDATA (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_CONN_MIPI0_SDATA (MTK_PIN_NO(88) | 2)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_SPMI_SCL (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_SCL10 (MTK_PIN_NO(89) | 2)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_SPMI_SDA (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_SDA10 (MTK_PIN_NO(90) | 2)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_AP_GOOD (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_URXD0 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_MD_URXD0 (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_MD_URXD1 (MTK_PIN_NO(92) | 3)
+#define PINMUX_GPIO92__FUNC_SSPM_URXD_AO (MTK_PIN_NO(92) | 4)
+#define PINMUX_GPIO92__FUNC_CONN_UART0_RXD (MTK_PIN_NO(92) | 5)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_UTXD0 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_MD_UTXD0 (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_MD_UTXD1 (MTK_PIN_NO(93) | 3)
+#define PINMUX_GPIO93__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(93) | 4)
+#define PINMUX_GPIO93__FUNC_CONN_UART0_TXD (MTK_PIN_NO(93) | 5)
+#define PINMUX_GPIO93__FUNC_WIFI_TXD (MTK_PIN_NO(93) | 6)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_URXD1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_ADSP_URXD0 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_MD32_0_RXD (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_SSPM_URXD_AO (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_TP_URXD1_AO (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_TP_URXD2_AO (MTK_PIN_NO(94) | 6)
+#define PINMUX_GPIO94__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_UTXD1 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_ADSP_UTXD0 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_MD32_0_TXD (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(95) | 4)
+#define PINMUX_GPIO95__FUNC_TP_UTXD1_AO (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_TP_UTXD2_AO (MTK_PIN_NO(95) | 6)
+#define PINMUX_GPIO95__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(95) | 7)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_TDM_LRCK (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_I2S7_LRCK (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_I2S9_LRCK (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_DPI_D0 (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_ADSP_JTAG0_TDI (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_IO_JTAG_TDI (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_TDM_BCK (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_I2S7_BCK (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_I2S9_BCK (MTK_PIN_NO(97) | 3)
+#define PINMUX_GPIO97__FUNC_DPI_D1 (MTK_PIN_NO(97) | 4)
+#define PINMUX_GPIO97__FUNC_ADSP_JTAG0_TRSTN (MTK_PIN_NO(97) | 5)
+#define PINMUX_GPIO97__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_TDM_MCK (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I2S7_MCK (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_I2S9_MCK (MTK_PIN_NO(98) | 3)
+#define PINMUX_GPIO98__FUNC_DPI_D2 (MTK_PIN_NO(98) | 4)
+#define PINMUX_GPIO98__FUNC_ADSP_JTAG0_TCK (MTK_PIN_NO(98) | 5)
+#define PINMUX_GPIO98__FUNC_IO_JTAG_TCK (MTK_PIN_NO(98) | 7)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_TDM_DATA0 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_I2S6_DI (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_I2S8_DI (MTK_PIN_NO(99) | 3)
+#define PINMUX_GPIO99__FUNC_DPI_D3 (MTK_PIN_NO(99) | 4)
+#define PINMUX_GPIO99__FUNC_ADSP_JTAG0_TDO (MTK_PIN_NO(99) | 5)
+#define PINMUX_GPIO99__FUNC_IO_JTAG_TDO (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_TDM_DATA1 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_I2S7_DO (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_I2S9_DO (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_DPI_D4 (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_ADSP_JTAG0_TMS (MTK_PIN_NO(100) | 5)
+#define PINMUX_GPIO100__FUNC_IO_JTAG_TMS (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_TDM_DATA2 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_DMIC1_CLK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SRCLKENAI0 (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_DPI_D5 (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_CLKM0 (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_DAP_MD32_SWD (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_TDM_DATA3 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_DMIC1_DAT (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_SRCLKENAI1 (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_DPI_D6 (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_SPI0_A_MI (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_SCP_SPI0_MI (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_DPI_D7 (MTK_PIN_NO(103) | 4)
+#define PINMUX_GPIO103__FUNC_DFD_TDO (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(103) | 6)
+#define PINMUX_GPIO103__FUNC_JTDO_SEL1 (MTK_PIN_NO(103) | 7)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_SPI0_A_CSB (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_SCP_SPI0_CS (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_DPI_D8 (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_DFD_TMS (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(104) | 6)
+#define PINMUX_GPIO104__FUNC_JTMS_SEL1 (MTK_PIN_NO(104) | 7)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_SPI0_A_MO (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_SCP_SPI0_MO (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_SCP_SDA0 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_DPI_D9 (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_DFD_TDI (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(105) | 6)
+#define PINMUX_GPIO105__FUNC_JTDI_SEL1 (MTK_PIN_NO(105) | 7)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_SPI0_A_CLK (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_SCP_SPI0_CK (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_SCP_SCL0 (MTK_PIN_NO(106) | 3)
+#define PINMUX_GPIO106__FUNC_DPI_D10 (MTK_PIN_NO(106) | 4)
+#define PINMUX_GPIO106__FUNC_DFD_TCK_XI (MTK_PIN_NO(106) | 5)
+#define PINMUX_GPIO106__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(106) | 6)
+#define PINMUX_GPIO106__FUNC_JTCK_SEL1 (MTK_PIN_NO(106) | 7)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DMIC_CLK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_PWM_0 (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_CLKM2 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_DMIC_DAT (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_PWM_1 (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_CLKM3 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_DAP_SONIC_SWD (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_I2S1_MCK (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_I2S3_MCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_I2S2_MCK (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_DPI_DE (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_I2S2_MCK_A (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_SRCLKENAI0 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_DAP_SONIC_SWCK (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_I2S1_BCK (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_I2S2_BCK (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_DPI_D11 (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_I2S2_BCK_A (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_CONN_MCU_TDO (MTK_PIN_NO(110) | 6)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_I2S1_LRCK (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_I2S3_LRCK (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_I2S2_LRCK (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_DPI_VSYNC (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_I2S2_LRCK_A (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_CONN_MCU_TDI (MTK_PIN_NO(111) | 6)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_I2S2_DI (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_I2S0_DI (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_I2S2_DI2 (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_DPI_CK (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_I2S2_DI_A (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_CONN_MCU_TMS (MTK_PIN_NO(112) | 6)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_I2S1_DO (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_I2S3_DO (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_I2S5_DO (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_DPI_HSYNC (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_I2S2_DI2 (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_CONN_MCU_TCK (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_SPI2_MI (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SCP_SPI2_MI (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_PCM0_DI (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_SPI2_CSB (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_SCP_SPI2_CS (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_PCM0_SYNC (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_SPI2_MO (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_SCP_SPI2_MO (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_SCP_SDA1 (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_PCM0_DO (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(116) | 6)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_SPI2_CLK (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_SCP_SPI2_CK (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_SCP_SCL1 (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_PCM0_CLK (MTK_PIN_NO(117) | 4)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_SCL1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_SCP_SCL0 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_SCP_SCL1 (MTK_PIN_NO(118) | 3)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_SDA1 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_SCP_SDA0 (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_SCP_SDA1 (MTK_PIN_NO(119) | 3)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_SCL9 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_SCP_SCL0 (MTK_PIN_NO(120) | 2)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_SDA9 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_SCP_SDA0 (MTK_PIN_NO(121) | 2)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_SCL8 (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_SCP_SDA0 (MTK_PIN_NO(122) | 2)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_SDA8 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_SCP_SCL0 (MTK_PIN_NO(123) | 2)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_SCL7 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_DMIC1_CLK (MTK_PIN_NO(124) | 2)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_SDA7 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_DMIC1_DAT (MTK_PIN_NO(125) | 2)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_CMFLASH0 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_PWM_2 (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_TP_UCTS1_AO (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_UCTS0 (MTK_PIN_NO(126) | 4)
+#define PINMUX_GPIO126__FUNC_SCL11 (MTK_PIN_NO(126) | 5)
+#define PINMUX_GPIO126__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_DBG_MON_A14 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_CMFLASH1 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_PWM_3 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_TP_URTS1_AO (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_URTS0 (MTK_PIN_NO(127) | 4)
+#define PINMUX_GPIO127__FUNC_SDA11 (MTK_PIN_NO(127) | 5)
+#define PINMUX_GPIO127__FUNC_DBG_MON_A15 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_CMFLASH2 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_PWM_0 (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_TP_UCTS2_AO (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UCTS1 (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_SCL_6306 (MTK_PIN_NO(128) | 5)
+#define PINMUX_GPIO128__FUNC_DBG_MON_A16 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_CMFLASH3 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_PWM_1 (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_TP_URTS2_AO (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_URTS1 (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SDA_6306 (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_DBG_MON_A17 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_CMVREF0 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_ANT_SEL10 (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_SCP_JTAG0_TDO (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_MD32_0_JTAG_TDO (MTK_PIN_NO(130) | 4)
+#define PINMUX_GPIO130__FUNC_SCL11 (MTK_PIN_NO(130) | 5)
+#define PINMUX_GPIO130__FUNC_SPI5_B_CLK (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A22 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_CMVREF1 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_ANT_SEL11 (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_SCP_JTAG0_TDI (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_MD32_0_JTAG_TDI (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SDA11 (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_SPI5_B_MO (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A25 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_CMVREF2 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_ANT_SEL12 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_SCP_JTAG0_TMS (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_MD32_0_JTAG_TMS (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A28 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_CMVREF3 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_SCP_JTAG0_TCK (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_MD32_0_JTAG_TCK (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_SPI5_B_CSB (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A23 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_CMVREF4 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_SCP_JTAG0_TRSTN (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_MD32_0_JTAG_TRST (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_DBG_MON_A26 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_PWM_0 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_SRCLKENAI1 (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_MD_URXD0 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_MD32_0_RXD (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_DBG_MON_A29 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_CMMCLK3 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_CLKM1 (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_MD_UTXD0 (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_MD32_0_TXD (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_SPI5_B_MI (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_DBG_MON_A24 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_CMMCLK4 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_CLKM2 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_MD_URXD1 (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_CONN_UART0_RXD (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_DBG_MON_A27 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_CMMCLK5 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_CLKM3 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_MD_UTXD1 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_CONN_UART0_TXD (MTK_PIN_NO(138) | 6)
+#define PINMUX_GPIO138__FUNC_DBG_MON_A30 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_SCL4 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_DBG_MON_A21 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_SDA4 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_DBG_MON_A20 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_SCL2 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_DBG_MON_A18 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SDA2 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_DBG_MON_A19 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_CMVREF0 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_SPI3_CLK (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_ADSP_JTAG1_TDO (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_SCP_JTAG1_TDO (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_DBG_MON_A31 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_CMVREF1 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_SPI3_CSB (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_ADSP_JTAG1_TDI (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_SCP_JTAG1_TDI (MTK_PIN_NO(144) | 4)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_CMVREF2 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_SPI3_MI (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_ADSP_JTAG1_TMS (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_SCP_JTAG1_TMS (MTK_PIN_NO(145) | 4)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_CMVREF3 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_SPI3_MO (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_ADSP_JTAG1_TCK (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_SCP_JTAG1_TCK (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_DBG_MON_A32 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_CMVREF4 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_ADSP_JTAG1_TRSTN (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_SCP_JTAG1_TRSTN (MTK_PIN_NO(147) | 4)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_PWM_1 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_AGPS_SYNC (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_CMMCLK5 (MTK_PIN_NO(148) | 3)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_CMMCLK0 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_CLKM0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_MD32_0_GPIO0 (MTK_PIN_NO(149) | 3)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_CMMCLK1 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_CLKM1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_MD32_0_GPIO1 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_CMMCLK2 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CLKM2 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_MD32_0_GPIO2 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_KPROW1 (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_PWM_2 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_IDDIG (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(152) | 6)
+#define PINMUX_GPIO152__FUNC_DBG_MON_B9 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_KPROW0 (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B8 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_KPCOL0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_DBG_MON_B6 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_KPCOL1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_PWM_3 (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_USB_DRVVBUS (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(155) | 4)
+#define PINMUX_GPIO155__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_B7 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_SPI1_A_CLK (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_MRG_CLK (MTK_PIN_NO(156) | 3)
+#define PINMUX_GPIO156__FUNC_AGPS_SYNC (MTK_PIN_NO(156) | 4)
+#define PINMUX_GPIO156__FUNC_MD_URXD0 (MTK_PIN_NO(156) | 5)
+#define PINMUX_GPIO156__FUNC_UDI_TMS (MTK_PIN_NO(156) | 6)
+#define PINMUX_GPIO156__FUNC_DBG_MON_B10 (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_SPI1_A_CSB (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_MRG_SYNC (MTK_PIN_NO(157) | 3)
+#define PINMUX_GPIO157__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(157) | 4)
+#define PINMUX_GPIO157__FUNC_MD_UTXD0 (MTK_PIN_NO(157) | 5)
+#define PINMUX_GPIO157__FUNC_UDI_TCK (MTK_PIN_NO(157) | 6)
+#define PINMUX_GPIO157__FUNC_DBG_MON_B11 (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_SPI1_A_MI (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(158) | 2)
+#define PINMUX_GPIO158__FUNC_MRG_DI (MTK_PIN_NO(158) | 3)
+#define PINMUX_GPIO158__FUNC_PTA_RXD (MTK_PIN_NO(158) | 4)
+#define PINMUX_GPIO158__FUNC_MD_URXD1 (MTK_PIN_NO(158) | 5)
+#define PINMUX_GPIO158__FUNC_UDI_TDO (MTK_PIN_NO(158) | 6)
+#define PINMUX_GPIO158__FUNC_DBG_MON_B12 (MTK_PIN_NO(158) | 7)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_SPI1_A_MO (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(159) | 2)
+#define PINMUX_GPIO159__FUNC_MRG_DO (MTK_PIN_NO(159) | 3)
+#define PINMUX_GPIO159__FUNC_PTA_TXD (MTK_PIN_NO(159) | 4)
+#define PINMUX_GPIO159__FUNC_MD_UTXD1 (MTK_PIN_NO(159) | 5)
+#define PINMUX_GPIO159__FUNC_UDI_NTRST (MTK_PIN_NO(159) | 6)
+#define PINMUX_GPIO159__FUNC_DBG_MON_B13 (MTK_PIN_NO(159) | 7)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_SCL3 (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_SCP_SCL1 (MTK_PIN_NO(160) | 3)
+#define PINMUX_GPIO160__FUNC_DBG_MON_B14 (MTK_PIN_NO(160) | 7)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SDA3 (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_SCP_SDA1 (MTK_PIN_NO(161) | 3)
+#define PINMUX_GPIO161__FUNC_DBG_MON_B15 (MTK_PIN_NO(161) | 7)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_ANT_SEL0 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(162) | 2)
+#define PINMUX_GPIO162__FUNC_UDI_TDI (MTK_PIN_NO(162) | 6)
+#define PINMUX_GPIO162__FUNC_DBG_MON_B16 (MTK_PIN_NO(162) | 7)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_ANT_SEL1 (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_DBG_MON_B17 (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_ANT_SEL2 (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_TP_URXD1_AO (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_UCTS0 (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_DBG_MON_B18 (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_ANT_SEL3 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_TP_UTXD1_AO (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(165) | 4)
+#define PINMUX_GPIO165__FUNC_URTS0 (MTK_PIN_NO(165) | 5)
+#define PINMUX_GPIO165__FUNC_DBG_MON_B19 (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_ANT_SEL4 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_TP_URXD2_AO (MTK_PIN_NO(166) | 3)
+#define PINMUX_GPIO166__FUNC_SRCLKENAI1 (MTK_PIN_NO(166) | 4)
+#define PINMUX_GPIO166__FUNC_UCTS1 (MTK_PIN_NO(166) | 5)
+#define PINMUX_GPIO166__FUNC_DBG_MON_B20 (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_ANT_SEL5 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_TP_UTXD2_AO (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_SRCLKENAI0 (MTK_PIN_NO(167) | 4)
+#define PINMUX_GPIO167__FUNC_URTS1 (MTK_PIN_NO(167) | 5)
+#define PINMUX_GPIO167__FUNC_DBG_MON_B21 (MTK_PIN_NO(167) | 7)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_ANT_SEL6 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_SPI0_B_CLK (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_TP_UCTS1_AO (MTK_PIN_NO(168) | 3)
+#define PINMUX_GPIO168__FUNC_KPCOL2 (MTK_PIN_NO(168) | 4)
+#define PINMUX_GPIO168__FUNC_MD_UCTS0 (MTK_PIN_NO(168) | 5)
+#define PINMUX_GPIO168__FUNC_SCL11 (MTK_PIN_NO(168) | 6)
+#define PINMUX_GPIO168__FUNC_DBG_MON_B22 (MTK_PIN_NO(168) | 7)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_ANT_SEL7 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_SPI0_B_CSB (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_TP_URTS1_AO (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_KPROW2 (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_MD_URTS0 (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_SDA11 (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_DBG_MON_B23 (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_ANT_SEL8 (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_SPI0_B_MI (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_TP_UCTS2_AO (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_SRCLKENAI1 (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_MD_UCTS1 (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_DBG_MON_B24 (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_ANT_SEL9 (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_SPI0_B_MO (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_TP_URTS2_AO (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_SRCLKENAI0 (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_MD_URTS1 (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_DBG_MON_B25 (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_CONN_TOP_CLK (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_AUXIF_CLK0 (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_DBG_MON_B29 (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_CONN_TOP_DATA (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_AUXIF_ST0 (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_DBG_MON_B30 (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_CONN_HRST_B (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_DBG_MON_B28 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_CONN_WB_PTA (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_DBG_MON_B31 (MTK_PIN_NO(175) | 7)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_CONN_BT_CLK (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_AUXIF_CLK1 (MTK_PIN_NO(176) | 2)
+#define PINMUX_GPIO176__FUNC_DBG_MON_B26 (MTK_PIN_NO(176) | 7)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_CONN_BT_DATA (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_AUXIF_ST1 (MTK_PIN_NO(177) | 2)
+#define PINMUX_GPIO177__FUNC_DBG_MON_B27 (MTK_PIN_NO(177) | 7)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(178) | 1)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(179) | 1)
+#define PINMUX_GPIO179__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(179) | 2)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(180) | 2)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_CONN_WF_CTRL3 (MTK_PIN_NO(181) | 1)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_CONN_WF_CTRL4 (MTK_PIN_NO(182) | 1)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_MSDC0_CMD (MTK_PIN_NO(183) | 1)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_MSDC0_DAT0 (MTK_PIN_NO(184) | 1)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_MSDC0_DAT2 (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_MSDC0_DAT4 (MTK_PIN_NO(186) | 1)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_MSDC0_DAT6 (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_MSDC0_DAT1 (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_MSDC0_DAT5 (MTK_PIN_NO(189) | 1)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_MSDC0_DAT7 (MTK_PIN_NO(190) | 1)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_MSDC0_DSL (MTK_PIN_NO(191) | 1)
+#define PINMUX_GPIO191__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(191) | 2)
+#define PINMUX_GPIO191__FUNC_IDDIG (MTK_PIN_NO(191) | 3)
+#define PINMUX_GPIO191__FUNC_DMIC_CLK (MTK_PIN_NO(191) | 4)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_MSDC0_CLK (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_USB_DRVVBUS (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_DMIC_DAT (MTK_PIN_NO(192) | 4)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_MSDC0_DAT3 (MTK_PIN_NO(193) | 1)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_MSDC0_RSTB (MTK_PIN_NO(194) | 1)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(195) | 1)
+#define PINMUX_GPIO195__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(195) | 2)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_AUD_DAT_MOSI2 (MTK_PIN_NO(196) | 1)
+
+#define PINMUX_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define PINMUX_GPIO197__FUNC_AUD_NLE_MOSI1 (MTK_PIN_NO(197) | 1)
+#define PINMUX_GPIO197__FUNC_AUD_CLK_MISO (MTK_PIN_NO(197) | 2)
+#define PINMUX_GPIO197__FUNC_I2S2_MCK (MTK_PIN_NO(197) | 3)
+#define PINMUX_GPIO197__FUNC_I2S6_MCK (MTK_PIN_NO(197) | 4)
+#define PINMUX_GPIO197__FUNC_I2S8_MCK (MTK_PIN_NO(197) | 5)
+
+#define PINMUX_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define PINMUX_GPIO198__FUNC_AUD_NLE_MOSI0 (MTK_PIN_NO(198) | 1)
+#define PINMUX_GPIO198__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(198) | 2)
+#define PINMUX_GPIO198__FUNC_I2S2_BCK (MTK_PIN_NO(198) | 3)
+#define PINMUX_GPIO198__FUNC_I2S6_BCK (MTK_PIN_NO(198) | 4)
+#define PINMUX_GPIO198__FUNC_I2S8_BCK (MTK_PIN_NO(198) | 5)
+
+#define PINMUX_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define PINMUX_GPIO199__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(199) | 1)
+#define PINMUX_GPIO199__FUNC_I2S2_DI2 (MTK_PIN_NO(199) | 3)
+
+#define PINMUX_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define PINMUX_GPIO200__FUNC_SCL6 (MTK_PIN_NO(200) | 1)
+#define PINMUX_GPIO200__FUNC_SCP_SCL1 (MTK_PIN_NO(200) | 3)
+#define PINMUX_GPIO200__FUNC_SCL_6306 (MTK_PIN_NO(200) | 4)
+#define PINMUX_GPIO200__FUNC_DBG_MON_A4 (MTK_PIN_NO(200) | 7)
+
+#define PINMUX_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define PINMUX_GPIO201__FUNC_SDA6 (MTK_PIN_NO(201) | 1)
+#define PINMUX_GPIO201__FUNC_SCP_SDA1 (MTK_PIN_NO(201) | 3)
+#define PINMUX_GPIO201__FUNC_SDA_6306 (MTK_PIN_NO(201) | 4)
+#define PINMUX_GPIO201__FUNC_DBG_MON_A5 (MTK_PIN_NO(201) | 7)
+
+#define PINMUX_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define PINMUX_GPIO202__FUNC_SCL5 (MTK_PIN_NO(202) | 1)
+
+#define PINMUX_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+#define PINMUX_GPIO203__FUNC_SDA5 (MTK_PIN_NO(203) | 1)
+
+#define PINMUX_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+#define PINMUX_GPIO204__FUNC_SCL0 (MTK_PIN_NO(204) | 1)
+#define PINMUX_GPIO204__FUNC_SPI7_A_CLK (MTK_PIN_NO(204) | 6)
+#define PINMUX_GPIO204__FUNC_DBG_MON_A2 (MTK_PIN_NO(204) | 7)
+
+#define PINMUX_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+#define PINMUX_GPIO205__FUNC_SDA0 (MTK_PIN_NO(205) | 1)
+#define PINMUX_GPIO205__FUNC_SPI7_A_CSB (MTK_PIN_NO(205) | 6)
+#define PINMUX_GPIO205__FUNC_DBG_MON_A3 (MTK_PIN_NO(205) | 7)
+
+#define PINMUX_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+#define PINMUX_GPIO206__FUNC_SRCLKENA0 (MTK_PIN_NO(206) | 1)
+
+#define PINMUX_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+#define PINMUX_GPIO207__FUNC_SRCLKENA1 (MTK_PIN_NO(207) | 1)
+
+#define PINMUX_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+#define PINMUX_GPIO208__FUNC_WATCHDOG (MTK_PIN_NO(208) | 1)
+
+#define PINMUX_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+#define PINMUX_GPIO209__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(209) | 1)
+#define PINMUX_GPIO209__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(209) | 2)
+
+#define PINMUX_GPIO210__FUNC_GPIO210 (MTK_PIN_NO(210) | 0)
+#define PINMUX_GPIO210__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(210) | 1)
+
+#define PINMUX_GPIO211__FUNC_GPIO211 (MTK_PIN_NO(211) | 0)
+#define PINMUX_GPIO211__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(211) | 1)
+#define PINMUX_GPIO211__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(211) | 2)
+
+#define PINMUX_GPIO212__FUNC_GPIO212 (MTK_PIN_NO(212) | 0)
+#define PINMUX_GPIO212__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(212) | 1)
+
+#define PINMUX_GPIO213__FUNC_GPIO213 (MTK_PIN_NO(213) | 0)
+#define PINMUX_GPIO213__FUNC_RTC32K_CK (MTK_PIN_NO(213) | 1)
+
+#define PINMUX_GPIO214__FUNC_GPIO214 (MTK_PIN_NO(214) | 0)
+#define PINMUX_GPIO214__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(214) | 1)
+#define PINMUX_GPIO214__FUNC_I2S1_MCK (MTK_PIN_NO(214) | 3)
+#define PINMUX_GPIO214__FUNC_I2S7_MCK (MTK_PIN_NO(214) | 4)
+#define PINMUX_GPIO214__FUNC_I2S9_MCK (MTK_PIN_NO(214) | 5)
+
+#define PINMUX_GPIO215__FUNC_GPIO215 (MTK_PIN_NO(215) | 0)
+#define PINMUX_GPIO215__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(215) | 1)
+#define PINMUX_GPIO215__FUNC_I2S1_BCK (MTK_PIN_NO(215) | 3)
+#define PINMUX_GPIO215__FUNC_I2S7_BCK (MTK_PIN_NO(215) | 4)
+#define PINMUX_GPIO215__FUNC_I2S9_BCK (MTK_PIN_NO(215) | 5)
+
+#define PINMUX_GPIO216__FUNC_GPIO216 (MTK_PIN_NO(216) | 0)
+#define PINMUX_GPIO216__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(216) | 1)
+#define PINMUX_GPIO216__FUNC_I2S1_LRCK (MTK_PIN_NO(216) | 3)
+#define PINMUX_GPIO216__FUNC_I2S7_LRCK (MTK_PIN_NO(216) | 4)
+#define PINMUX_GPIO216__FUNC_I2S9_LRCK (MTK_PIN_NO(216) | 5)
+
+#define PINMUX_GPIO217__FUNC_GPIO217 (MTK_PIN_NO(217) | 0)
+#define PINMUX_GPIO217__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(217) | 1)
+#define PINMUX_GPIO217__FUNC_I2S1_DO (MTK_PIN_NO(217) | 3)
+#define PINMUX_GPIO217__FUNC_I2S7_DO (MTK_PIN_NO(217) | 4)
+#define PINMUX_GPIO217__FUNC_I2S9_DO (MTK_PIN_NO(217) | 5)
+
+#define PINMUX_GPIO218__FUNC_GPIO218 (MTK_PIN_NO(218) | 0)
+#define PINMUX_GPIO218__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(218) | 1)
+#define PINMUX_GPIO218__FUNC_VOW_DAT_MISO (MTK_PIN_NO(218) | 2)
+#define PINMUX_GPIO218__FUNC_I2S2_LRCK (MTK_PIN_NO(218) | 3)
+#define PINMUX_GPIO218__FUNC_I2S6_LRCK (MTK_PIN_NO(218) | 4)
+#define PINMUX_GPIO218__FUNC_I2S8_LRCK (MTK_PIN_NO(218) | 5)
+
+#define PINMUX_GPIO219__FUNC_GPIO219 (MTK_PIN_NO(219) | 0)
+#define PINMUX_GPIO219__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(219) | 1)
+#define PINMUX_GPIO219__FUNC_VOW_CLK_MISO (MTK_PIN_NO(219) | 2)
+#define PINMUX_GPIO219__FUNC_I2S2_DI (MTK_PIN_NO(219) | 3)
+#define PINMUX_GPIO219__FUNC_I2S6_DI (MTK_PIN_NO(219) | 4)
+#define PINMUX_GPIO219__FUNC_I2S8_DI (MTK_PIN_NO(219) | 5)
+
+#endif /* __MT8192_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8195-pinfunc.h b/include/dt-bindings/pinctrl/mt8195-pinfunc.h
new file mode 100644
index 000000000000..666331bb9b40
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8195-pinfunc.h
@@ -0,0 +1,962 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ */
+
+#ifndef __MT8195_PINFUNC_H
+#define __MT8195_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_MSDC2_CMD (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TDMIN_MCK (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_CLKM0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_PERSTN_1 (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_IDDIG_1P (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_DMIC4_CLK (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_MSDC2_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_TDMIN_DI (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_CLKM1 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_CLKREQN_1 (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_USB_DRVVBUS_1P (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_DMIC4_DAT (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_MSDC2_DAT3 (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_TDMIN_LRCK (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_CLKM2 (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_WAKEN_1 (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_DMIC2_CLK (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_MSDC2_DAT0 (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_TDMIN_BCK (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_CLKM3 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_DMIC2_DAT (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_MSDC2_DAT2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SPDIF_IN1 (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_UTXD3 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_SDA2 (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_IDDIG_2P (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_MSDC2_DAT1 (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SPDIF_IN0 (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_URXD3 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_SCL2 (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_USB_DRVVBUS_2P (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_DP_TX_HPD (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_I2SO1_D4 (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_UTXD4 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_CMVREF3 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_DMIC3_CLK (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_EDP_TX_HPD (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_I2SO1_D5 (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_URXD4 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_CMVREF4 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_DMIC3_DAT (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SDA0 (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_PWM_0 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SPDIF_OUT (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_LVTS_FOUT (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_DBG_MON_A0 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_SCL0 (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_PWM_1 (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_IR_IN (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_LVTS_SDO (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_DBG_MON_A1 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_SDA1 (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_PWM_2 (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_ADSP_URXD0 (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_SPDIF_IN1 (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_LVTS_SCF (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_DBG_MON_A2 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_SCL1 (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_PWM_3 (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_ADSP_UTXD0 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_SPDIF_IN0 (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_LVTS_SCK (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_DBG_MON_A3 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_SDA2 (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_DMIC3_DAT_R (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2SO1_D6 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_LVTS_SDI (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_DBG_MON_A4 (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_SCL2 (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_DMIC4_DAT_R (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2SO1_D7 (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_DBG_MON_A5 (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_SDA3 (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_DMIC3_DAT (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_TDMIN_MCK (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_DBG_MON_A6 (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_SCL3 (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_DMIC3_CLK (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_TDMIN_DI (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_DBG_MON_A7 (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_SDA4 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_DMIC4_DAT (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_TDMIN_LRCK (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_DBG_MON_A8 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SCL4 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_DMIC4_CLK (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_TDMIN_BCK (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A9 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_DP_TX_HPD (MTK_PIN_NO(18) | 1)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_WAKEN (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SCP_SDA1 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_MD32_0_JTAG_TCK (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_ADSP_JTAG0_TCK (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_SDA6 (MTK_PIN_NO(19) | 5)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_PERSTN (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SCP_SCL1 (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_MD32_0_JTAG_TMS (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_ADSP_JTAG0_TMS (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_SCL6 (MTK_PIN_NO(20) | 5)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_CLKREQN (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_MD32_0_JTAG_TDI (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_ADSP_JTAG0_TDI (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_SCP_SDA1 (MTK_PIN_NO(21) | 5)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_CMMCLK0 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_PERSTN_1 (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_SCP_SCL1 (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_MD32_0_GPIO0 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_CMMCLK1 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_CLKREQN_1 (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_SDA4 (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_DMIC1_CLK (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_SCP_SDA0 (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_MD32_0_GPIO1 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_CMMCLK2 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_WAKEN_1 (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_SCL4 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_DMIC1_DAT (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_SCP_SCL0 (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_LVTS_26M (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_MD32_0_GPIO2 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_CMMRST (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_CMMCLK3 (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SPDIF_OUT (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_SDA6 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_ADSP_JTAG0_TRSTN (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_MD32_0_JTAG_TRST (MTK_PIN_NO(25) | 6)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_CMMPDN (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_CMMCLK4 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_IR_IN (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_SCL6 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_ADSP_JTAG0_TDO (MTK_PIN_NO(26) | 5)
+#define PINMUX_GPIO26__FUNC_MD32_0_JTAG_TDO (MTK_PIN_NO(26) | 6)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_HDMIRX20_HTPLG (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_CMFLASH0 (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_MD32_0_TXD (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_TP_UTXD2_AO (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_SCL7 (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_UCTS2 (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_DBG_MON_A18 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_HDMIRX20_PWR5V (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_CMFLASH1 (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_MD32_0_RXD (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_TP_URXD2_AO (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_SDA7 (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_URTS2 (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_DBG_MON_A19 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_HDMIRX20_SCL (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_CMFLASH2 (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_SCL5 (MTK_PIN_NO(29) | 3)
+#define PINMUX_GPIO29__FUNC_TP_URTS2_AO (MTK_PIN_NO(29) | 4)
+#define PINMUX_GPIO29__FUNC_UTXD2 (MTK_PIN_NO(29) | 6)
+#define PINMUX_GPIO29__FUNC_DBG_MON_A20 (MTK_PIN_NO(29) | 7)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_HDMIRX20_SDA (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_CMFLASH3 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_SDA5 (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_TP_UCTS2_AO (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_URXD2 (MTK_PIN_NO(30) | 6)
+#define PINMUX_GPIO30__FUNC_DBG_MON_A21 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_HDMITX20_PWR5V (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_DMIC1_DAT_R (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_PERSTN (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_DBG_MON_A22 (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_HDMITX20_HTPLG (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_CLKREQN (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_DBG_MON_A23 (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_HDMITX20_CEC (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_CMVREF0 (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_WAKEN (MTK_PIN_NO(33) | 3)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_HDMITX20_SCL (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_CMVREF1 (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_SCL7 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_SCL6 (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_DBG_MON_A24 (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_HDMITX20_SDA (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_CMVREF2 (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_SDA7 (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_SDA6 (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_DBG_MON_A25 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_RTC32K_CK (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A27 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_WATCHDOG (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A28 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_SRCLKENA0 (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A29 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_SRCLKENA1 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_DMIC2_DAT_R (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A30 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_SPIM3_CSB (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A31 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_SPIM3_CLK (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_DBG_MON_A32 (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(42) | 2)
+#define PINMUX_GPIO42__FUNC_SPIM3_MO (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_DBG_MON_B0 (MTK_PIN_NO(42) | 7)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_SPIM3_MI (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_DBG_MON_B1 (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_SPMI_M_SCL (MTK_PIN_NO(44) | 1)
+#define PINMUX_GPIO44__FUNC_I2SI00_DATA1 (MTK_PIN_NO(44) | 2)
+#define PINMUX_GPIO44__FUNC_SCL5 (MTK_PIN_NO(44) | 3)
+#define PINMUX_GPIO44__FUNC_UTXD5 (MTK_PIN_NO(44) | 4)
+#define PINMUX_GPIO44__FUNC_DBG_MON_B2 (MTK_PIN_NO(44) | 7)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SPMI_M_SDA (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_I2SI00_DATA2 (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_SDA5 (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_URXD5 (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_DBG_MON_B3 (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_I2SIN_MCK (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_I2SI00_DATA3 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_SPLIN_MCK (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_DBG_MON_B4 (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_I2SIN_BCK (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_I2SIN0_BCK (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_SPLIN_LRCK (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_DBG_MON_B5 (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_I2SIN_WS (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_I2SIN0_LRCK (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_SPLIN_BCK (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_DBG_MON_B6 (MTK_PIN_NO(48) | 7)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_I2SIN_D0 (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_I2SI00_DATA0 (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_SPLIN_D0 (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_DBG_MON_B7 (MTK_PIN_NO(49) | 7)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_I2SO1_MCK (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_I2SI5_D0 (MTK_PIN_NO(50) | 2)
+#define PINMUX_GPIO50__FUNC_I2SO4_MCK (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_DBG_MON_B8 (MTK_PIN_NO(50) | 7)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_I2SO1_BCK (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_I2SI5_BCK (MTK_PIN_NO(51) | 2)
+#define PINMUX_GPIO51__FUNC_DBG_MON_B9 (MTK_PIN_NO(51) | 7)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_I2SO1_WS (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_I2SI5_WS (MTK_PIN_NO(52) | 2)
+#define PINMUX_GPIO52__FUNC_DBG_MON_B10 (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_I2SO1_D0 (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_I2SI5_MCK (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_DBG_MON_B11 (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_I2SO1_D1 (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_I2SI01_DATA1 (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_SPLIN_D1 (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_I2SO4_BCK (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_DBG_MON_B12 (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_I2SO1_D2 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_I2SI01_DATA2 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_SPLIN_D2 (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_I2SO4_WS (MTK_PIN_NO(55) | 4)
+#define PINMUX_GPIO55__FUNC_DBG_MON_B13 (MTK_PIN_NO(55) | 7)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_I2SO1_D3 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_I2SI01_DATA3 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_SPLIN_D3 (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_I2SO4_D0 (MTK_PIN_NO(56) | 4)
+#define PINMUX_GPIO56__FUNC_DBG_MON_B14 (MTK_PIN_NO(56) | 7)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_I2SO2_MCK (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_I2SO1_D12 (MTK_PIN_NO(57) | 2)
+#define PINMUX_GPIO57__FUNC_LCM1_RST (MTK_PIN_NO(57) | 3)
+#define PINMUX_GPIO57__FUNC_DBG_MON_B15 (MTK_PIN_NO(57) | 7)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_I2SO2_BCK (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_I2SO1_D13 (MTK_PIN_NO(58) | 2)
+#define PINMUX_GPIO58__FUNC_I2SIN1_BCK (MTK_PIN_NO(58) | 3)
+#define PINMUX_GPIO58__FUNC_DBG_MON_B16 (MTK_PIN_NO(58) | 7)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_I2SO2_WS (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_I2SO1_D14 (MTK_PIN_NO(59) | 2)
+#define PINMUX_GPIO59__FUNC_I2SIN1_LRCK (MTK_PIN_NO(59) | 3)
+#define PINMUX_GPIO59__FUNC_DBG_MON_B17 (MTK_PIN_NO(59) | 7)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_I2SO2_D0 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_I2SO1_D15 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_I2SI01_DATA0 (MTK_PIN_NO(60) | 3)
+#define PINMUX_GPIO60__FUNC_DBG_MON_B18 (MTK_PIN_NO(60) | 7)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_DMIC1_CLK (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_I2SO2_BCK (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_SCP_SPI2_CK (MTK_PIN_NO(61) | 3)
+#define PINMUX_GPIO61__FUNC_DBG_MON_B19 (MTK_PIN_NO(61) | 7)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_DMIC1_DAT (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_I2SO2_WS (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_SCP_SPI2_MI (MTK_PIN_NO(62) | 3)
+#define PINMUX_GPIO62__FUNC_DBG_MON_B20 (MTK_PIN_NO(62) | 7)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_DMIC2_CLK (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_VBUSVALID (MTK_PIN_NO(63) | 2)
+#define PINMUX_GPIO63__FUNC_SCP_SPI2_MO (MTK_PIN_NO(63) | 3)
+#define PINMUX_GPIO63__FUNC_SCP_SCL2 (MTK_PIN_NO(63) | 4)
+#define PINMUX_GPIO63__FUNC_SCP_JTAG1_TDO (MTK_PIN_NO(63) | 5)
+#define PINMUX_GPIO63__FUNC_JTDO_SEL1 (MTK_PIN_NO(63) | 6)
+#define PINMUX_GPIO63__FUNC_DBG_MON_B21 (MTK_PIN_NO(63) | 7)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_DMIC2_DAT (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_VBUSVALID_1P (MTK_PIN_NO(64) | 2)
+#define PINMUX_GPIO64__FUNC_SCP_SPI2_CS (MTK_PIN_NO(64) | 3)
+#define PINMUX_GPIO64__FUNC_SCP_SDA2 (MTK_PIN_NO(64) | 4)
+#define PINMUX_GPIO64__FUNC_DBG_MON_B22 (MTK_PIN_NO(64) | 7)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_PCM_DO (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_AUXIF_ST0 (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_UCTS2 (MTK_PIN_NO(65) | 3)
+#define PINMUX_GPIO65__FUNC_SCP_JTAG1_TMS (MTK_PIN_NO(65) | 5)
+#define PINMUX_GPIO65__FUNC_JTMS_SEL1 (MTK_PIN_NO(65) | 6)
+#define PINMUX_GPIO65__FUNC_DBG_MON_B23 (MTK_PIN_NO(65) | 7)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_PCM_CLK (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_AUXIF_CLK0 (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_URTS2 (MTK_PIN_NO(66) | 3)
+#define PINMUX_GPIO66__FUNC_SCP_JTAG1_TCK (MTK_PIN_NO(66) | 5)
+#define PINMUX_GPIO66__FUNC_JTCK_SEL1 (MTK_PIN_NO(66) | 6)
+#define PINMUX_GPIO66__FUNC_DBG_MON_B24 (MTK_PIN_NO(66) | 7)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_PCM_DI (MTK_PIN_NO(67) | 1)
+#define PINMUX_GPIO67__FUNC_AUXIF_ST1 (MTK_PIN_NO(67) | 2)
+#define PINMUX_GPIO67__FUNC_UTXD2 (MTK_PIN_NO(67) | 3)
+#define PINMUX_GPIO67__FUNC_SCP_JTAG1_TRSTN (MTK_PIN_NO(67) | 5)
+#define PINMUX_GPIO67__FUNC_JTRSTn_SEL1 (MTK_PIN_NO(67) | 6)
+#define PINMUX_GPIO67__FUNC_DBG_MON_B25 (MTK_PIN_NO(67) | 7)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_PCM_SYNC (MTK_PIN_NO(68) | 1)
+#define PINMUX_GPIO68__FUNC_AUXIF_CLK1 (MTK_PIN_NO(68) | 2)
+#define PINMUX_GPIO68__FUNC_URXD2 (MTK_PIN_NO(68) | 3)
+#define PINMUX_GPIO68__FUNC_SCP_JTAG1_TDI (MTK_PIN_NO(68) | 5)
+#define PINMUX_GPIO68__FUNC_JTDI_SEL1 (MTK_PIN_NO(68) | 6)
+#define PINMUX_GPIO68__FUNC_DBG_MON_B26 (MTK_PIN_NO(68) | 7)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_I2SIN2_BCK (MTK_PIN_NO(69) | 2)
+#define PINMUX_GPIO69__FUNC_PWM_0 (MTK_PIN_NO(69) | 3)
+#define PINMUX_GPIO69__FUNC_WAKEN (MTK_PIN_NO(69) | 4)
+#define PINMUX_GPIO69__FUNC_DBG_MON_B27 (MTK_PIN_NO(69) | 7)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_I2SIN2_LRCK (MTK_PIN_NO(70) | 2)
+#define PINMUX_GPIO70__FUNC_PWM_1 (MTK_PIN_NO(70) | 3)
+#define PINMUX_GPIO70__FUNC_PERSTN (MTK_PIN_NO(70) | 4)
+#define PINMUX_GPIO70__FUNC_DBG_MON_B28 (MTK_PIN_NO(70) | 7)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_IDDIG_2P (MTK_PIN_NO(71) | 2)
+#define PINMUX_GPIO71__FUNC_PWM_2 (MTK_PIN_NO(71) | 3)
+#define PINMUX_GPIO71__FUNC_CLKREQN (MTK_PIN_NO(71) | 4)
+#define PINMUX_GPIO71__FUNC_DBG_MON_B29 (MTK_PIN_NO(71) | 7)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_USB_DRVVBUS_2P (MTK_PIN_NO(72) | 2)
+#define PINMUX_GPIO72__FUNC_PWM_3 (MTK_PIN_NO(72) | 3)
+#define PINMUX_GPIO72__FUNC_PERSTN_1 (MTK_PIN_NO(72) | 4)
+#define PINMUX_GPIO72__FUNC_DBG_MON_B30 (MTK_PIN_NO(72) | 7)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_I2SI02_DATA0 (MTK_PIN_NO(73) | 2)
+#define PINMUX_GPIO73__FUNC_CLKREQN_1 (MTK_PIN_NO(73) | 4)
+#define PINMUX_GPIO73__FUNC_VOW_DAT_MISO (MTK_PIN_NO(73) | 5)
+#define PINMUX_GPIO73__FUNC_DBG_MON_B31 (MTK_PIN_NO(73) | 7)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_I2SI02_DATA1 (MTK_PIN_NO(74) | 2)
+#define PINMUX_GPIO74__FUNC_WAKEN_1 (MTK_PIN_NO(74) | 4)
+#define PINMUX_GPIO74__FUNC_VOW_CLK_MISO (MTK_PIN_NO(74) | 5)
+#define PINMUX_GPIO74__FUNC_DBG_MON_B32 (MTK_PIN_NO(74) | 7)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_I2SI02_DATA2 (MTK_PIN_NO(75) | 2)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(76) | 1)
+#define PINMUX_GPIO76__FUNC_I2SI02_DATA3 (MTK_PIN_NO(76) | 2)
+#define PINMUX_GPIO76__FUNC_DBG_MON_A26 (MTK_PIN_NO(76) | 7)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_DGI_D0 (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_DPI_D0 (MTK_PIN_NO(77) | 2)
+#define PINMUX_GPIO77__FUNC_I2SI4_MCK (MTK_PIN_NO(77) | 3)
+#define PINMUX_GPIO77__FUNC_SPIM4_CLK (MTK_PIN_NO(77) | 4)
+#define PINMUX_GPIO77__FUNC_GBE_TXD3 (MTK_PIN_NO(77) | 5)
+#define PINMUX_GPIO77__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(77) | 6)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_DGI_D1 (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_DPI_D1 (MTK_PIN_NO(78) | 2)
+#define PINMUX_GPIO78__FUNC_I2SI4_BCK (MTK_PIN_NO(78) | 3)
+#define PINMUX_GPIO78__FUNC_SPIM4_MO (MTK_PIN_NO(78) | 4)
+#define PINMUX_GPIO78__FUNC_GBE_TXD2 (MTK_PIN_NO(78) | 5)
+#define PINMUX_GPIO78__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(78) | 6)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_DGI_D2 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_DPI_D2 (MTK_PIN_NO(79) | 2)
+#define PINMUX_GPIO79__FUNC_I2SI4_WS (MTK_PIN_NO(79) | 3)
+#define PINMUX_GPIO79__FUNC_SPIM4_CSB (MTK_PIN_NO(79) | 4)
+#define PINMUX_GPIO79__FUNC_GBE_TXD1 (MTK_PIN_NO(79) | 5)
+#define PINMUX_GPIO79__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(79) | 6)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_DGI_D3 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_DPI_D3 (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_I2SI4_D0 (MTK_PIN_NO(80) | 3)
+#define PINMUX_GPIO80__FUNC_SPIM4_MI (MTK_PIN_NO(80) | 4)
+#define PINMUX_GPIO80__FUNC_GBE_TXD0 (MTK_PIN_NO(80) | 5)
+#define PINMUX_GPIO80__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(80) | 6)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_DGI_D4 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_DPI_D4 (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_I2SI5_MCK (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_SPIM5_CLK (MTK_PIN_NO(81) | 4)
+#define PINMUX_GPIO81__FUNC_GBE_RXD3 (MTK_PIN_NO(81) | 5)
+#define PINMUX_GPIO81__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(81) | 6)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_DGI_D5 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_DPI_D5 (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_I2SI5_BCK (MTK_PIN_NO(82) | 3)
+#define PINMUX_GPIO82__FUNC_SPIM5_MO (MTK_PIN_NO(82) | 4)
+#define PINMUX_GPIO82__FUNC_GBE_RXD2 (MTK_PIN_NO(82) | 5)
+#define PINMUX_GPIO82__FUNC_MCUPM_JTAG_TDO (MTK_PIN_NO(82) | 6)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_DGI_D6 (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_DPI_D6 (MTK_PIN_NO(83) | 2)
+#define PINMUX_GPIO83__FUNC_I2SI5_WS (MTK_PIN_NO(83) | 3)
+#define PINMUX_GPIO83__FUNC_SPIM5_CSB (MTK_PIN_NO(83) | 4)
+#define PINMUX_GPIO83__FUNC_GBE_RXD1 (MTK_PIN_NO(83) | 5)
+#define PINMUX_GPIO83__FUNC_MCUPM_JTAG_TMS (MTK_PIN_NO(83) | 6)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_DGI_D7 (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_DPI_D7 (MTK_PIN_NO(84) | 2)
+#define PINMUX_GPIO84__FUNC_I2SI5_D0 (MTK_PIN_NO(84) | 3)
+#define PINMUX_GPIO84__FUNC_SPIM5_MI (MTK_PIN_NO(84) | 4)
+#define PINMUX_GPIO84__FUNC_GBE_RXD0 (MTK_PIN_NO(84) | 5)
+#define PINMUX_GPIO84__FUNC_MCUPM_JTAG_TCK (MTK_PIN_NO(84) | 6)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_DGI_D8 (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_DPI_D8 (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_I2SO4_MCK (MTK_PIN_NO(85) | 3)
+#define PINMUX_GPIO85__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_GBE_TXC (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_MCUPM_JTAG_TDI (MTK_PIN_NO(85) | 6)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_DGI_D9 (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_DPI_D9 (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_I2SO4_BCK (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_GBE_RXC (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_MCUPM_JTAG_TRSTN (MTK_PIN_NO(86) | 6)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_DGI_D10 (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_DPI_D10 (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_I2SO4_WS (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_GBE_RXDV (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(87) | 6)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_DGI_D11 (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_DPI_D11 (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_I2SO4_D0 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_GBE_TXEN (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(88) | 6)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_DGI_D12 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_DPI_D12 (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_MSDC2_CMD_A (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_I2SO5_BCK (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_GBE_MDC (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(89) | 6)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_DGI_D13 (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_DPI_D13 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_MSDC2_CLK_A (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_I2SO5_WS (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_GBE_MDIO (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(90) | 6)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_DGI_D14 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_DPI_D14 (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_MSDC2_DAT3_A (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_I2SO5_D0 (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_GBE_TXER (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(91) | 6)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_DGI_D15 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_DPI_D15 (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_MSDC2_DAT0_A (MTK_PIN_NO(92) | 3)
+#define PINMUX_GPIO92__FUNC_I2SO2_D1 (MTK_PIN_NO(92) | 4)
+#define PINMUX_GPIO92__FUNC_GBE_RXER (MTK_PIN_NO(92) | 5)
+#define PINMUX_GPIO92__FUNC_CCU0_JTAG_TDO (MTK_PIN_NO(92) | 6)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_DGI_HSYNC (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_DPI_HSYNC (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_MSDC2_DAT2_A (MTK_PIN_NO(93) | 3)
+#define PINMUX_GPIO93__FUNC_I2SO2_D2 (MTK_PIN_NO(93) | 4)
+#define PINMUX_GPIO93__FUNC_GBE_COL (MTK_PIN_NO(93) | 5)
+#define PINMUX_GPIO93__FUNC_CCU0_JTAG_TMS (MTK_PIN_NO(93) | 6)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_DGI_VSYNC (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_DPI_VSYNC (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_MSDC2_DAT1_A (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_I2SO2_D3 (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_GBE_INTR (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_CCU0_JTAG_TDI (MTK_PIN_NO(94) | 6)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_DGI_DE (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_DPI_DE (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_UTXD2 (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_I2SIN_D1 (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_CCU0_JTAG_TCK (MTK_PIN_NO(95) | 6)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_DGI_CK (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_DPI_CK (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_URXD2 (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_I2SO5_MCK (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_I2SIN_D2 (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_CCU0_JTAG_TRST (MTK_PIN_NO(96) | 6)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_DISP_PWM0 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(97) | 2)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_UTXD0 (MTK_PIN_NO(98) | 1)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_URXD0 (MTK_PIN_NO(99) | 1)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_URTS1 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_DSI_TE (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_I2SO1_D8 (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_KPROW2 (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_PWM_0 (MTK_PIN_NO(100) | 5)
+#define PINMUX_GPIO100__FUNC_TP_URTS1_AO (MTK_PIN_NO(100) | 6)
+#define PINMUX_GPIO100__FUNC_I2SIN_D0 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_UCTS1 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_DSI1_TE (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_I2SO1_D9 (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_KPCOL2 (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_PWM_1 (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_TP_UCTS1_AO (MTK_PIN_NO(101) | 6)
+#define PINMUX_GPIO101__FUNC_I2SIN_D1 (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_UTXD1 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_VBUSVALID_2P (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_I2SO1_D10 (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_TP_UTXD1_AO (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_MD32_1_TXD (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_I2SIN_D2 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_URXD1 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_VBUSVALID_3P (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_I2SO1_D11 (MTK_PIN_NO(103) | 3)
+#define PINMUX_GPIO103__FUNC_SSPM_URXD_AO (MTK_PIN_NO(103) | 4)
+#define PINMUX_GPIO103__FUNC_TP_URXD1_AO (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_MD32_1_RXD (MTK_PIN_NO(103) | 6)
+#define PINMUX_GPIO103__FUNC_I2SIN_D3 (MTK_PIN_NO(103) | 7)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_KPROW0 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_DISP_PWM1 (MTK_PIN_NO(104) | 2)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_KPROW1 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_EDP_TX_HPD (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_PWM_2 (MTK_PIN_NO(105) | 3)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_KPCOL0 (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_KPCOL1 (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_DSI1_TE (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_PWM_3 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_SCP_SCL3 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_I2SIN_MCK (MTK_PIN_NO(107) | 5)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_LCM_RST (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_KPCOL1 (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_SCP_SDA3 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_I2SIN_BCK (MTK_PIN_NO(108) | 5)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_DSI_TE (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_I2SIN_D3 (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_I2SIN_WS (MTK_PIN_NO(109) | 5)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_MSDC1_CMD (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_JTMS_SEL3 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_UDI_TMS (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_CCU1_JTAG_TMS (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(110) | 6)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_MSDC1_CLK (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_JTCK_SEL3 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_UDI_TCK (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_CCU1_JTAG_TCK (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(111) | 6)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_MSDC1_DAT0 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_JTDI_SEL3 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_UDI_TDI (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_I2SO2_D0 (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_CCU1_JTAG_TDI (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(112) | 6)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_MSDC1_DAT1 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_JTDO_SEL3 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_UDI_TDO (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_I2SO2_D1 (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_CCU1_JTAG_TDO (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_MSDC1_DAT2 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_JTRSTn_SEL3 (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_UDI_NTRST (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_I2SO2_D2 (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_CCU1_JTAG_TRST (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_MSDC1_DAT3 (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_I2SO2_D3 (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_MD32_1_GPIO2 (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_MSDC0_DAT7 (MTK_PIN_NO(116) | 1)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_MSDC0_DAT6 (MTK_PIN_NO(117) | 1)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_MSDC0_DAT5 (MTK_PIN_NO(118) | 1)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_MSDC0_DAT4 (MTK_PIN_NO(119) | 1)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_MSDC0_RSTB (MTK_PIN_NO(120) | 1)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_MSDC0_CMD (MTK_PIN_NO(121) | 1)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_MSDC0_CLK (MTK_PIN_NO(122) | 1)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_MSDC0_DAT3 (MTK_PIN_NO(123) | 1)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_MSDC0_DAT2 (MTK_PIN_NO(124) | 1)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_MSDC0_DAT1 (MTK_PIN_NO(125) | 1)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_MSDC0_DAT0 (MTK_PIN_NO(126) | 1)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MSDC0_DSL (MTK_PIN_NO(127) | 1)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_IDDIG (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_UCTS2 (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_UTXD5 (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_mbistreaden_trigger (MTK_PIN_NO(128) | 5)
+#define PINMUX_GPIO128__FUNC_MD32_1_GPIO0 (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_SCP_SCL2 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_USB_DRVVBUS (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_URTS2 (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_URXD5 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_mbistwriteen_trigger (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_MD32_1_GPIO1 (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_SCP_SDA2 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_IDDIG_1P (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_SPINOR_IO2 (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_SNFI_WP (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_VPU_UDI_NTRST (MTK_PIN_NO(130) | 4)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_USB_DRVVBUS_1P (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_SPINOR_IO3 (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_SNFI_HOLD (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_MD32_1_JTAG_TRST (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SCP_JTAG0_TRSTN (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_APU_JTAG_TRST (MTK_PIN_NO(131) | 6)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_SPIM0_CSB (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_SCP_SPI0_CS (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_SPIS0_CSB (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_VPU_UDI_TMS (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_I2SO5_D0 (MTK_PIN_NO(132) | 6)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_SPIM0_CLK (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_SCP_SPI0_CK (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_SPIS0_CLK (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_VPU_UDI_TCK (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_I2SO5_BCK (MTK_PIN_NO(133) | 6)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_SPIM0_MO (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_SCP_SPI0_MO (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_SPIS0_SI (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_VPU_UDI_TDO (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_I2SO5_WS (MTK_PIN_NO(134) | 6)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_SPIM0_MI (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_SCP_SPI0_MI (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_SPIS0_SO (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_VPU_UDI_TDI (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_I2SO5_MCK (MTK_PIN_NO(135) | 6)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_SPIM1_CSB (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_SPIS1_CSB (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_MD32_1_JTAG_TMS (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_SCP_JTAG0_TMS (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_APU_JTAG_TMS (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_DBG_MON_A15 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_SPIM1_CLK (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_SPIS1_CLK (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_MD32_1_JTAG_TCK (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_SCP_JTAG0_TCK (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_APU_JTAG_TCK (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_DBG_MON_A14 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_SPIM1_MO (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_SPIS1_SI (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_MD32_1_JTAG_TDO (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_SCP_JTAG0_TDO (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_APU_JTAG_TDO (MTK_PIN_NO(138) | 6)
+#define PINMUX_GPIO138__FUNC_DBG_MON_A16 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_SPIM1_MI (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_SPIS1_SO (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_MD32_1_JTAG_TDI (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_SCP_JTAG0_TDI (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_APU_JTAG_TDI (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_DBG_MON_A17 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_SPIM2_CSB (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_SPINOR_CS (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_SNFI_CS (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_DMIC3_DAT (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_DBG_MON_A11 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_SPIM2_CLK (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_SPINOR_CK (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_SNFI_CLK (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_DMIC3_CLK (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_DBG_MON_A10 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SPIM2_MO (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_SPINOR_IO0 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_SNFI_MOSI (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_DMIC4_DAT (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_DBG_MON_A12 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_SPIM2_MI (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_SPINOR_IO1 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_SNFI_MISO (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_DMIC4_CLK (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_DBG_MON_A13 (MTK_PIN_NO(143) | 7)
+
+#endif /* __MT8195-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8365-pinfunc.h b/include/dt-bindings/pinctrl/mt8365-pinfunc.h
new file mode 100644
index 000000000000..e2ec8af57dcf
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8365-pinfunc.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ */
+#ifndef __MT8365_PINFUNC_H
+#define __MT8365_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT8365_PIN_0_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT8365_PIN_0_GPIO0__FUNC_DPI_D0 (MTK_PIN_NO(0) | 1)
+#define MT8365_PIN_0_GPIO0__FUNC_PWM_A (MTK_PIN_NO(0) | 2)
+#define MT8365_PIN_0_GPIO0__FUNC_I2S2_BCK (MTK_PIN_NO(0) | 3)
+#define MT8365_PIN_0_GPIO0__FUNC_EXT_TXD0 (MTK_PIN_NO(0) | 4)
+#define MT8365_PIN_0_GPIO0__FUNC_CONN_MCU_TDO (MTK_PIN_NO(0) | 5)
+#define MT8365_PIN_0_GPIO0__FUNC_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define MT8365_PIN_1_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT8365_PIN_1_GPIO1__FUNC_DPI_D1 (MTK_PIN_NO(1) | 1)
+#define MT8365_PIN_1_GPIO1__FUNC_PWM_B (MTK_PIN_NO(1) | 2)
+#define MT8365_PIN_1_GPIO1__FUNC_I2S2_LRCK (MTK_PIN_NO(1) | 3)
+#define MT8365_PIN_1_GPIO1__FUNC_EXT_TXD1 (MTK_PIN_NO(1) | 4)
+#define MT8365_PIN_1_GPIO1__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(1) | 5)
+#define MT8365_PIN_1_GPIO1__FUNC_DBG_MON_A1 (MTK_PIN_NO(1) | 7)
+
+#define MT8365_PIN_2_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT8365_PIN_2_GPIO2__FUNC_DPI_D2 (MTK_PIN_NO(2) | 1)
+#define MT8365_PIN_2_GPIO2__FUNC_PWM_C (MTK_PIN_NO(2) | 2)
+#define MT8365_PIN_2_GPIO2__FUNC_I2S2_MCK (MTK_PIN_NO(2) | 3)
+#define MT8365_PIN_2_GPIO2__FUNC_EXT_TXD2 (MTK_PIN_NO(2) | 4)
+#define MT8365_PIN_2_GPIO2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(2) | 5)
+#define MT8365_PIN_2_GPIO2__FUNC_DBG_MON_A2 (MTK_PIN_NO(2) | 7)
+
+#define MT8365_PIN_3_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT8365_PIN_3_GPIO3__FUNC_DPI_D3 (MTK_PIN_NO(3) | 1)
+#define MT8365_PIN_3_GPIO3__FUNC_CLKM0 (MTK_PIN_NO(3) | 2)
+#define MT8365_PIN_3_GPIO3__FUNC_I2S2_DI (MTK_PIN_NO(3) | 3)
+#define MT8365_PIN_3_GPIO3__FUNC_EXT_TXD3 (MTK_PIN_NO(3) | 4)
+#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_TCK (MTK_PIN_NO(3) | 5)
+#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(3) | 6)
+#define MT8365_PIN_3_GPIO3__FUNC_DBG_MON_A3 (MTK_PIN_NO(3) | 7)
+
+#define MT8365_PIN_4_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT8365_PIN_4_GPIO4__FUNC_DPI_D4 (MTK_PIN_NO(4) | 1)
+#define MT8365_PIN_4_GPIO4__FUNC_CLKM1 (MTK_PIN_NO(4) | 2)
+#define MT8365_PIN_4_GPIO4__FUNC_I2S1_BCK (MTK_PIN_NO(4) | 3)
+#define MT8365_PIN_4_GPIO4__FUNC_EXT_TXC (MTK_PIN_NO(4) | 4)
+#define MT8365_PIN_4_GPIO4__FUNC_CONN_MCU_TDI (MTK_PIN_NO(4) | 5)
+#define MT8365_PIN_4_GPIO4__FUNC_VDEC_TEST_CK (MTK_PIN_NO(4) | 6)
+#define MT8365_PIN_4_GPIO4__FUNC_DBG_MON_A4 (MTK_PIN_NO(4) | 7)
+
+#define MT8365_PIN_5_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT8365_PIN_5_GPIO5__FUNC_DPI_D5 (MTK_PIN_NO(5) | 1)
+#define MT8365_PIN_5_GPIO5__FUNC_CLKM2 (MTK_PIN_NO(5) | 2)
+#define MT8365_PIN_5_GPIO5__FUNC_I2S1_LRCK (MTK_PIN_NO(5) | 3)
+#define MT8365_PIN_5_GPIO5__FUNC_EXT_RXER (MTK_PIN_NO(5) | 4)
+#define MT8365_PIN_5_GPIO5__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(5) | 5)
+#define MT8365_PIN_5_GPIO5__FUNC_MM_TEST_CK (MTK_PIN_NO(5) | 6)
+#define MT8365_PIN_5_GPIO5__FUNC_DBG_MON_A5 (MTK_PIN_NO(5) | 7)
+
+#define MT8365_PIN_6_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT8365_PIN_6_GPIO6__FUNC_DPI_D6 (MTK_PIN_NO(6) | 1)
+#define MT8365_PIN_6_GPIO6__FUNC_CLKM3 (MTK_PIN_NO(6) | 2)
+#define MT8365_PIN_6_GPIO6__FUNC_I2S1_MCK (MTK_PIN_NO(6) | 3)
+#define MT8365_PIN_6_GPIO6__FUNC_EXT_RXC (MTK_PIN_NO(6) | 4)
+#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_TMS (MTK_PIN_NO(6) | 5)
+#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(6) | 6)
+#define MT8365_PIN_6_GPIO6__FUNC_DBG_MON_A6 (MTK_PIN_NO(6) | 7)
+
+#define MT8365_PIN_7_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT8365_PIN_7_GPIO7__FUNC_DPI_D7 (MTK_PIN_NO(7) | 1)
+#define MT8365_PIN_7_GPIO7__FUNC_I2S1_DO (MTK_PIN_NO(7) | 3)
+#define MT8365_PIN_7_GPIO7__FUNC_EXT_RXDV (MTK_PIN_NO(7) | 4)
+#define MT8365_PIN_7_GPIO7__FUNC_CONN_DSP_JCK (MTK_PIN_NO(7) | 5)
+#define MT8365_PIN_7_GPIO7__FUNC_DBG_MON_A7 (MTK_PIN_NO(7) | 7)
+
+#define MT8365_PIN_8_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT8365_PIN_8_GPIO8__FUNC_DPI_D8 (MTK_PIN_NO(8) | 1)
+#define MT8365_PIN_8_GPIO8__FUNC_SPI_CLK (MTK_PIN_NO(8) | 2)
+#define MT8365_PIN_8_GPIO8__FUNC_I2S0_BCK (MTK_PIN_NO(8) | 3)
+#define MT8365_PIN_8_GPIO8__FUNC_EXT_RXD0 (MTK_PIN_NO(8) | 4)
+#define MT8365_PIN_8_GPIO8__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(8) | 5)
+#define MT8365_PIN_8_GPIO8__FUNC_DBG_MON_A8 (MTK_PIN_NO(8) | 7)
+
+#define MT8365_PIN_9_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT8365_PIN_9_GPIO9__FUNC_DPI_D9 (MTK_PIN_NO(9) | 1)
+#define MT8365_PIN_9_GPIO9__FUNC_SPI_CSB (MTK_PIN_NO(9) | 2)
+#define MT8365_PIN_9_GPIO9__FUNC_I2S0_LRCK (MTK_PIN_NO(9) | 3)
+#define MT8365_PIN_9_GPIO9__FUNC_EXT_RXD1 (MTK_PIN_NO(9) | 4)
+#define MT8365_PIN_9_GPIO9__FUNC_CONN_DSP_JDI (MTK_PIN_NO(9) | 5)
+#define MT8365_PIN_9_GPIO9__FUNC_DBG_MON_A9 (MTK_PIN_NO(9) | 7)
+
+#define MT8365_PIN_10_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT8365_PIN_10_GPIO10__FUNC_DPI_D10 (MTK_PIN_NO(10) | 1)
+#define MT8365_PIN_10_GPIO10__FUNC_SPI_MI (MTK_PIN_NO(10) | 2)
+#define MT8365_PIN_10_GPIO10__FUNC_I2S0_MCK (MTK_PIN_NO(10) | 3)
+#define MT8365_PIN_10_GPIO10__FUNC_EXT_RXD2 (MTK_PIN_NO(10) | 4)
+#define MT8365_PIN_10_GPIO10__FUNC_CONN_DSP_JMS (MTK_PIN_NO(10) | 5)
+#define MT8365_PIN_10_GPIO10__FUNC_DBG_MON_A10 (MTK_PIN_NO(10) | 7)
+
+#define MT8365_PIN_11_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT8365_PIN_11_GPIO11__FUNC_DPI_D11 (MTK_PIN_NO(11) | 1)
+#define MT8365_PIN_11_GPIO11__FUNC_SPI_MO (MTK_PIN_NO(11) | 2)
+#define MT8365_PIN_11_GPIO11__FUNC_I2S0_DI (MTK_PIN_NO(11) | 3)
+#define MT8365_PIN_11_GPIO11__FUNC_EXT_RXD3 (MTK_PIN_NO(11) | 4)
+#define MT8365_PIN_11_GPIO11__FUNC_CONN_DSP_JDO (MTK_PIN_NO(11) | 5)
+#define MT8365_PIN_11_GPIO11__FUNC_DBG_MON_A11 (MTK_PIN_NO(11) | 7)
+
+#define MT8365_PIN_12_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT8365_PIN_12_GPIO12__FUNC_DPI_DE (MTK_PIN_NO(12) | 1)
+#define MT8365_PIN_12_GPIO12__FUNC_UCTS1 (MTK_PIN_NO(12) | 2)
+#define MT8365_PIN_12_GPIO12__FUNC_I2S3_BCK (MTK_PIN_NO(12) | 3)
+#define MT8365_PIN_12_GPIO12__FUNC_EXT_TXEN (MTK_PIN_NO(12) | 4)
+#define MT8365_PIN_12_GPIO12__FUNC_O_WIFI_TXD (MTK_PIN_NO(12) | 5)
+#define MT8365_PIN_12_GPIO12__FUNC_DBG_MON_A12 (MTK_PIN_NO(12) | 7)
+
+#define MT8365_PIN_13_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT8365_PIN_13_GPIO13__FUNC_DPI_VSYNC (MTK_PIN_NO(13) | 1)
+#define MT8365_PIN_13_GPIO13__FUNC_URTS1 (MTK_PIN_NO(13) | 2)
+#define MT8365_PIN_13_GPIO13__FUNC_I2S3_LRCK (MTK_PIN_NO(13) | 3)
+#define MT8365_PIN_13_GPIO13__FUNC_EXT_COL (MTK_PIN_NO(13) | 4)
+#define MT8365_PIN_13_GPIO13__FUNC_SPDIF_IN (MTK_PIN_NO(13) | 5)
+#define MT8365_PIN_13_GPIO13__FUNC_DBG_MON_A13 (MTK_PIN_NO(13) | 7)
+
+#define MT8365_PIN_14_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT8365_PIN_14_GPIO14__FUNC_DPI_CK (MTK_PIN_NO(14) | 1)
+#define MT8365_PIN_14_GPIO14__FUNC_UCTS2 (MTK_PIN_NO(14) | 2)
+#define MT8365_PIN_14_GPIO14__FUNC_I2S3_MCK (MTK_PIN_NO(14) | 3)
+#define MT8365_PIN_14_GPIO14__FUNC_EXT_MDIO (MTK_PIN_NO(14) | 4)
+#define MT8365_PIN_14_GPIO14__FUNC_SPDIF_OUT (MTK_PIN_NO(14) | 5)
+#define MT8365_PIN_14_GPIO14__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(14) | 6)
+#define MT8365_PIN_14_GPIO14__FUNC_DBG_MON_A14 (MTK_PIN_NO(14) | 7)
+
+#define MT8365_PIN_15_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT8365_PIN_15_GPIO15__FUNC_DPI_HSYNC (MTK_PIN_NO(15) | 1)
+#define MT8365_PIN_15_GPIO15__FUNC_URTS2 (MTK_PIN_NO(15) | 2)
+#define MT8365_PIN_15_GPIO15__FUNC_I2S3_DO (MTK_PIN_NO(15) | 3)
+#define MT8365_PIN_15_GPIO15__FUNC_EXT_MDC (MTK_PIN_NO(15) | 4)
+#define MT8365_PIN_15_GPIO15__FUNC_IRRX (MTK_PIN_NO(15) | 5)
+#define MT8365_PIN_15_GPIO15__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(15) | 6)
+#define MT8365_PIN_15_GPIO15__FUNC_DBG_MON_A15 (MTK_PIN_NO(15) | 7)
+
+#define MT8365_PIN_16_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT8365_PIN_16_GPIO16__FUNC_DPI_D12 (MTK_PIN_NO(16) | 1)
+#define MT8365_PIN_16_GPIO16__FUNC_USB_DRVVBUS (MTK_PIN_NO(16) | 2)
+#define MT8365_PIN_16_GPIO16__FUNC_PWM_A (MTK_PIN_NO(16) | 3)
+#define MT8365_PIN_16_GPIO16__FUNC_CLKM0 (MTK_PIN_NO(16) | 4)
+#define MT8365_PIN_16_GPIO16__FUNC_ANT_SEL0 (MTK_PIN_NO(16) | 5)
+#define MT8365_PIN_16_GPIO16__FUNC_TSF_IN (MTK_PIN_NO(16) | 6)
+#define MT8365_PIN_16_GPIO16__FUNC_DBG_MON_A16 (MTK_PIN_NO(16) | 7)
+
+#define MT8365_PIN_17_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT8365_PIN_17_GPIO17__FUNC_DPI_D13 (MTK_PIN_NO(17) | 1)
+#define MT8365_PIN_17_GPIO17__FUNC_IDDIG (MTK_PIN_NO(17) | 2)
+#define MT8365_PIN_17_GPIO17__FUNC_PWM_B (MTK_PIN_NO(17) | 3)
+#define MT8365_PIN_17_GPIO17__FUNC_CLKM1 (MTK_PIN_NO(17) | 4)
+#define MT8365_PIN_17_GPIO17__FUNC_ANT_SEL1 (MTK_PIN_NO(17) | 5)
+#define MT8365_PIN_17_GPIO17__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(17) | 6)
+#define MT8365_PIN_17_GPIO17__FUNC_DBG_MON_A17 (MTK_PIN_NO(17) | 7)
+
+#define MT8365_PIN_18_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT8365_PIN_18_GPIO18__FUNC_DPI_D14 (MTK_PIN_NO(18) | 1)
+#define MT8365_PIN_18_GPIO18__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(18) | 2)
+#define MT8365_PIN_18_GPIO18__FUNC_PWM_C (MTK_PIN_NO(18) | 3)
+#define MT8365_PIN_18_GPIO18__FUNC_CLKM2 (MTK_PIN_NO(18) | 4)
+#define MT8365_PIN_18_GPIO18__FUNC_ANT_SEL2 (MTK_PIN_NO(18) | 5)
+#define MT8365_PIN_18_GPIO18__FUNC_MFG_TEST_CK (MTK_PIN_NO(18) | 6)
+#define MT8365_PIN_18_GPIO18__FUNC_DBG_MON_A18 (MTK_PIN_NO(18) | 7)
+
+#define MT8365_PIN_19_DISP_PWM__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT8365_PIN_19_DISP_PWM__FUNC_DISP_PWM (MTK_PIN_NO(19) | 1)
+#define MT8365_PIN_19_DISP_PWM__FUNC_PWM_A (MTK_PIN_NO(19) | 2)
+#define MT8365_PIN_19_DISP_PWM__FUNC_DBG_MON_A19 (MTK_PIN_NO(19) | 7)
+
+#define MT8365_PIN_20_LCM_RST__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT8365_PIN_20_LCM_RST__FUNC_LCM_RST (MTK_PIN_NO(20) | 1)
+#define MT8365_PIN_20_LCM_RST__FUNC_PWM_B (MTK_PIN_NO(20) | 2)
+#define MT8365_PIN_20_LCM_RST__FUNC_DBG_MON_A20 (MTK_PIN_NO(20) | 7)
+
+#define MT8365_PIN_21_DSI_TE__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT8365_PIN_21_DSI_TE__FUNC_DSI_TE (MTK_PIN_NO(21) | 1)
+#define MT8365_PIN_21_DSI_TE__FUNC_PWM_C (MTK_PIN_NO(21) | 2)
+#define MT8365_PIN_21_DSI_TE__FUNC_ANT_SEL0 (MTK_PIN_NO(21) | 3)
+#define MT8365_PIN_21_DSI_TE__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(21) | 4)
+#define MT8365_PIN_21_DSI_TE__FUNC_DBG_MON_A21 (MTK_PIN_NO(21) | 7)
+
+#define MT8365_PIN_22_KPROW0__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT8365_PIN_22_KPROW0__FUNC_KPROW0 (MTK_PIN_NO(22) | 1)
+#define MT8365_PIN_22_KPROW0__FUNC_DBG_MON_A22 (MTK_PIN_NO(22) | 7)
+
+#define MT8365_PIN_23_KPROW1__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT8365_PIN_23_KPROW1__FUNC_KPROW1 (MTK_PIN_NO(23) | 1)
+#define MT8365_PIN_23_KPROW1__FUNC_IDDIG (MTK_PIN_NO(23) | 2)
+#define MT8365_PIN_23_KPROW1__FUNC_WIFI_TXD (MTK_PIN_NO(23) | 3)
+#define MT8365_PIN_23_KPROW1__FUNC_CLKM3 (MTK_PIN_NO(23) | 4)
+#define MT8365_PIN_23_KPROW1__FUNC_ANT_SEL1 (MTK_PIN_NO(23) | 5)
+#define MT8365_PIN_23_KPROW1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 6)
+#define MT8365_PIN_23_KPROW1__FUNC_DBG_MON_B0 (MTK_PIN_NO(23) | 7)
+
+#define MT8365_PIN_24_KPCOL0__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT8365_PIN_24_KPCOL0__FUNC_KPCOL0 (MTK_PIN_NO(24) | 1)
+#define MT8365_PIN_24_KPCOL0__FUNC_DBG_MON_A23 (MTK_PIN_NO(24) | 7)
+
+#define MT8365_PIN_25_KPCOL1__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT8365_PIN_25_KPCOL1__FUNC_KPCOL1 (MTK_PIN_NO(25) | 1)
+#define MT8365_PIN_25_KPCOL1__FUNC_USB_DRVVBUS (MTK_PIN_NO(25) | 2)
+#define MT8365_PIN_25_KPCOL1__FUNC_APU_JTAG_TRST (MTK_PIN_NO(25) | 3)
+#define MT8365_PIN_25_KPCOL1__FUNC_UDI_NTRST_XI (MTK_PIN_NO(25) | 4)
+#define MT8365_PIN_25_KPCOL1__FUNC_DFD_NTRST_XI (MTK_PIN_NO(25) | 5)
+#define MT8365_PIN_25_KPCOL1__FUNC_CONN_TEST_CK (MTK_PIN_NO(25) | 6)
+#define MT8365_PIN_25_KPCOL1__FUNC_DBG_MON_B1 (MTK_PIN_NO(25) | 7)
+
+#define MT8365_PIN_26_SPI_CS__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT8365_PIN_26_SPI_CS__FUNC_SPI_CSB (MTK_PIN_NO(26) | 1)
+#define MT8365_PIN_26_SPI_CS__FUNC_APU_JTAG_TMS (MTK_PIN_NO(26) | 3)
+#define MT8365_PIN_26_SPI_CS__FUNC_UDI_TMS_XI (MTK_PIN_NO(26) | 4)
+#define MT8365_PIN_26_SPI_CS__FUNC_DFD_TMS_XI (MTK_PIN_NO(26) | 5)
+#define MT8365_PIN_26_SPI_CS__FUNC_CONN_TEST_CK (MTK_PIN_NO(26) | 6)
+#define MT8365_PIN_26_SPI_CS__FUNC_DBG_MON_A24 (MTK_PIN_NO(26) | 7)
+
+#define MT8365_PIN_27_SPI_CK__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT8365_PIN_27_SPI_CK__FUNC_SPI_CLK (MTK_PIN_NO(27) | 1)
+#define MT8365_PIN_27_SPI_CK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(27) | 3)
+#define MT8365_PIN_27_SPI_CK__FUNC_UDI_TCK_XI (MTK_PIN_NO(27) | 4)
+#define MT8365_PIN_27_SPI_CK__FUNC_DFD_TCK_XI (MTK_PIN_NO(27) | 5)
+#define MT8365_PIN_27_SPI_CK__FUNC_APU_TEST_CK (MTK_PIN_NO(27) | 6)
+#define MT8365_PIN_27_SPI_CK__FUNC_DBG_MON_A25 (MTK_PIN_NO(27) | 7)
+
+#define MT8365_PIN_28_SPI_MI__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MI (MTK_PIN_NO(28) | 1)
+#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MO (MTK_PIN_NO(28) | 2)
+#define MT8365_PIN_28_SPI_MI__FUNC_APU_JTAG_TDI (MTK_PIN_NO(28) | 3)
+#define MT8365_PIN_28_SPI_MI__FUNC_UDI_TDI_XI (MTK_PIN_NO(28) | 4)
+#define MT8365_PIN_28_SPI_MI__FUNC_DFD_TDI_XI (MTK_PIN_NO(28) | 5)
+#define MT8365_PIN_28_SPI_MI__FUNC_DSP_TEST_CK (MTK_PIN_NO(28) | 6)
+#define MT8365_PIN_28_SPI_MI__FUNC_DBG_MON_A26 (MTK_PIN_NO(28) | 7)
+
+#define MT8365_PIN_29_SPI_MO__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MO (MTK_PIN_NO(29) | 1)
+#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MI (MTK_PIN_NO(29) | 2)
+#define MT8365_PIN_29_SPI_MO__FUNC_APU_JTAG_TDO (MTK_PIN_NO(29) | 3)
+#define MT8365_PIN_29_SPI_MO__FUNC_UDI_TDO (MTK_PIN_NO(29) | 4)
+#define MT8365_PIN_29_SPI_MO__FUNC_DFD_TDO (MTK_PIN_NO(29) | 5)
+#define MT8365_PIN_29_SPI_MO__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(29) | 6)
+#define MT8365_PIN_29_SPI_MO__FUNC_DBG_MON_A27 (MTK_PIN_NO(29) | 7)
+
+#define MT8365_PIN_30_JTMS__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT8365_PIN_30_JTMS__FUNC_JTMS (MTK_PIN_NO(30) | 1)
+#define MT8365_PIN_30_JTMS__FUNC_DFD_TMS_XI (MTK_PIN_NO(30) | 2)
+#define MT8365_PIN_30_JTMS__FUNC_UDI_TMS_XI (MTK_PIN_NO(30) | 3)
+#define MT8365_PIN_30_JTMS__FUNC_MCU_SPM_TMS (MTK_PIN_NO(30) | 4)
+#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_TMS (MTK_PIN_NO(30) | 5)
+#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 6)
+
+#define MT8365_PIN_31_JTCK__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT8365_PIN_31_JTCK__FUNC_JTCK (MTK_PIN_NO(31) | 1)
+#define MT8365_PIN_31_JTCK__FUNC_DFD_TCK_XI (MTK_PIN_NO(31) | 2)
+#define MT8365_PIN_31_JTCK__FUNC_UDI_TCK_XI (MTK_PIN_NO(31) | 3)
+#define MT8365_PIN_31_JTCK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(31) | 4)
+#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_TCK (MTK_PIN_NO(31) | 5)
+#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(31) | 6)
+
+#define MT8365_PIN_32_JTDI__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT8365_PIN_32_JTDI__FUNC_JTDI (MTK_PIN_NO(32) | 1)
+#define MT8365_PIN_32_JTDI__FUNC_DFD_TDI_XI (MTK_PIN_NO(32) | 2)
+#define MT8365_PIN_32_JTDI__FUNC_UDI_TDI_XI (MTK_PIN_NO(32) | 3)
+#define MT8365_PIN_32_JTDI__FUNC_MCU_SPM_TDI (MTK_PIN_NO(32) | 4)
+#define MT8365_PIN_32_JTDI__FUNC_CONN_MCU_TDI (MTK_PIN_NO(32) | 5)
+
+#define MT8365_PIN_33_JTDO__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT8365_PIN_33_JTDO__FUNC_JTDO (MTK_PIN_NO(33) | 1)
+#define MT8365_PIN_33_JTDO__FUNC_DFD_TDO (MTK_PIN_NO(33) | 2)
+#define MT8365_PIN_33_JTDO__FUNC_UDI_TDO (MTK_PIN_NO(33) | 3)
+#define MT8365_PIN_33_JTDO__FUNC_MCU_SPM_TDO (MTK_PIN_NO(33) | 4)
+#define MT8365_PIN_33_JTDO__FUNC_CONN_MCU_TDO (MTK_PIN_NO(33) | 5)
+
+#define MT8365_PIN_34_JTRST__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT8365_PIN_34_JTRST__FUNC_JTRST (MTK_PIN_NO(34) | 1)
+#define MT8365_PIN_34_JTRST__FUNC_DFD_NTRST_XI (MTK_PIN_NO(34) | 2)
+#define MT8365_PIN_34_JTRST__FUNC_UDI_NTRST_XI (MTK_PIN_NO(34) | 3)
+#define MT8365_PIN_34_JTRST__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(34) | 4)
+#define MT8365_PIN_34_JTRST__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(34) | 5)
+
+#define MT8365_PIN_35_URXD0__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT8365_PIN_35_URXD0__FUNC_URXD0 (MTK_PIN_NO(35) | 1)
+#define MT8365_PIN_35_URXD0__FUNC_UTXD0 (MTK_PIN_NO(35) | 2)
+#define MT8365_PIN_35_URXD0__FUNC_DSP_URXD0 (MTK_PIN_NO(35) | 7)
+
+#define MT8365_PIN_36_UTXD0__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT8365_PIN_36_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(36) | 1)
+#define MT8365_PIN_36_UTXD0__FUNC_URXD0 (MTK_PIN_NO(36) | 2)
+#define MT8365_PIN_36_UTXD0__FUNC_DSP_UTXD0 (MTK_PIN_NO(36) | 7)
+
+#define MT8365_PIN_37_URXD1__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT8365_PIN_37_URXD1__FUNC_URXD1 (MTK_PIN_NO(37) | 1)
+#define MT8365_PIN_37_URXD1__FUNC_UTXD1 (MTK_PIN_NO(37) | 2)
+#define MT8365_PIN_37_URXD1__FUNC_UCTS2 (MTK_PIN_NO(37) | 3)
+#define MT8365_PIN_37_URXD1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(37) | 4)
+#define MT8365_PIN_37_URXD1__FUNC_CONN_UART0_RXD (MTK_PIN_NO(37) | 5)
+#define MT8365_PIN_37_URXD1__FUNC_I2S0_MCK (MTK_PIN_NO(37) | 6)
+#define MT8365_PIN_37_URXD1__FUNC_DSP_URXD0 (MTK_PIN_NO(37) | 7)
+
+#define MT8365_PIN_38_UTXD1__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT8365_PIN_38_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(38) | 1)
+#define MT8365_PIN_38_UTXD1__FUNC_URXD1 (MTK_PIN_NO(38) | 2)
+#define MT8365_PIN_38_UTXD1__FUNC_URTS2 (MTK_PIN_NO(38) | 3)
+#define MT8365_PIN_38_UTXD1__FUNC_ANT_SEL2 (MTK_PIN_NO(38) | 4)
+#define MT8365_PIN_38_UTXD1__FUNC_CONN_UART0_TXD (MTK_PIN_NO(38) | 5)
+#define MT8365_PIN_38_UTXD1__FUNC_I2S1_MCK (MTK_PIN_NO(38) | 6)
+#define MT8365_PIN_38_UTXD1__FUNC_DSP_UTXD0 (MTK_PIN_NO(38) | 7)
+
+#define MT8365_PIN_39_URXD2__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT8365_PIN_39_URXD2__FUNC_URXD2 (MTK_PIN_NO(39) | 1)
+#define MT8365_PIN_39_URXD2__FUNC_UTXD2 (MTK_PIN_NO(39) | 2)
+#define MT8365_PIN_39_URXD2__FUNC_UCTS1 (MTK_PIN_NO(39) | 3)
+#define MT8365_PIN_39_URXD2__FUNC_IDDIG (MTK_PIN_NO(39) | 4)
+#define MT8365_PIN_39_URXD2__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(39) | 5)
+#define MT8365_PIN_39_URXD2__FUNC_I2S2_MCK (MTK_PIN_NO(39) | 6)
+#define MT8365_PIN_39_URXD2__FUNC_DSP_URXD0 (MTK_PIN_NO(39) | 7)
+
+#define MT8365_PIN_40_UTXD2__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT8365_PIN_40_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(40) | 1)
+#define MT8365_PIN_40_UTXD2__FUNC_URXD2 (MTK_PIN_NO(40) | 2)
+#define MT8365_PIN_40_UTXD2__FUNC_URTS1 (MTK_PIN_NO(40) | 3)
+#define MT8365_PIN_40_UTXD2__FUNC_USB_DRVVBUS (MTK_PIN_NO(40) | 4)
+#define MT8365_PIN_40_UTXD2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(40) | 5)
+#define MT8365_PIN_40_UTXD2__FUNC_I2S3_MCK (MTK_PIN_NO(40) | 6)
+#define MT8365_PIN_40_UTXD2__FUNC_DSP_UTXD0 (MTK_PIN_NO(40) | 7)
+
+#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(41) | 1)
+#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(41) | 2)
+
+#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(42) | 1)
+#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(42) | 2)
+
+#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(43) | 1)
+
+#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(44) | 1)
+
+#define MT8365_PIN_45_RTC32K_CK__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT8365_PIN_45_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(45) | 1)
+
+#define MT8365_PIN_46_WATCHDOG__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT8365_PIN_46_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(46) | 1)
+
+#define MT8365_PIN_47_SRCLKENA0__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA0 (MTK_PIN_NO(47) | 1)
+#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA1 (MTK_PIN_NO(47) | 2)
+
+#define MT8365_PIN_48_SRCLKENA1__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT8365_PIN_48_SRCLKENA1__FUNC_SRCLKENA1 (MTK_PIN_NO(48) | 1)
+
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(49) | 1)
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MISO (MTK_PIN_NO(49) | 2)
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_I2S1_MCK (MTK_PIN_NO(49) | 3)
+
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(50) | 1)
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(50) | 2)
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_I2S1_BCK (MTK_PIN_NO(50) | 3)
+
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(51) | 1)
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(51) | 2)
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_I2S1_LRCK (MTK_PIN_NO(51) | 3)
+
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(52) | 1)
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(52) | 2)
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_I2S1_DO (MTK_PIN_NO(52) | 3)
+
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MISO (MTK_PIN_NO(53) | 1)
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(53) | 2)
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_I2S2_MCK (MTK_PIN_NO(53) | 3)
+
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(54) | 1)
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(54) | 2)
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_I2S2_BCK (MTK_PIN_NO(54) | 3)
+
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(55) | 1)
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(55) | 2)
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_I2S2_LRCK (MTK_PIN_NO(55) | 3)
+
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(56) | 1)
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(56) | 2)
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_I2S2_DI (MTK_PIN_NO(56) | 3)
+
+#define MT8365_PIN_57_SDA0__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT8365_PIN_57_SDA0__FUNC_SDA0_0 (MTK_PIN_NO(57) | 1)
+
+#define MT8365_PIN_58_SCL0__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT8365_PIN_58_SCL0__FUNC_SCL0_0 (MTK_PIN_NO(58) | 1)
+
+#define MT8365_PIN_59_SDA1__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT8365_PIN_59_SDA1__FUNC_SDA1_0 (MTK_PIN_NO(59) | 1)
+#define MT8365_PIN_59_SDA1__FUNC_USB_SDA (MTK_PIN_NO(59) | 6)
+#define MT8365_PIN_59_SDA1__FUNC_DBG_SDA (MTK_PIN_NO(59) | 7)
+
+#define MT8365_PIN_60_SCL1__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT8365_PIN_60_SCL1__FUNC_SCL1_0 (MTK_PIN_NO(60) | 1)
+#define MT8365_PIN_60_SCL1__FUNC_USB_SCL (MTK_PIN_NO(60) | 6)
+#define MT8365_PIN_60_SCL1__FUNC_DBG_SCL (MTK_PIN_NO(60) | 7)
+
+#define MT8365_PIN_61_SDA2__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT8365_PIN_61_SDA2__FUNC_SDA2_0 (MTK_PIN_NO(61) | 1)
+
+#define MT8365_PIN_62_SCL2__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT8365_PIN_62_SCL2__FUNC_SCL2_0 (MTK_PIN_NO(62) | 1)
+
+#define MT8365_PIN_63_SDA3__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT8365_PIN_63_SDA3__FUNC_SDA3_0 (MTK_PIN_NO(63) | 1)
+
+#define MT8365_PIN_64_SCL3__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT8365_PIN_64_SCL3__FUNC_SCL3_0 (MTK_PIN_NO(64) | 1)
+
+#define MT8365_PIN_65_CMMCLK0__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK0 (MTK_PIN_NO(65) | 1)
+#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK1 (MTK_PIN_NO(65) | 2)
+#define MT8365_PIN_65_CMMCLK0__FUNC_DBG_MON_A28 (MTK_PIN_NO(65) | 7)
+
+#define MT8365_PIN_66_CMMCLK1__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK1 (MTK_PIN_NO(66) | 1)
+#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK0 (MTK_PIN_NO(66) | 2)
+#define MT8365_PIN_66_CMMCLK1__FUNC_DBG_MON_B2 (MTK_PIN_NO(66) | 7)
+
+#define MT8365_PIN_67_CMPCLK__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT8365_PIN_67_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(67) | 1)
+#define MT8365_PIN_67_CMPCLK__FUNC_ANT_SEL0 (MTK_PIN_NO(67) | 2)
+#define MT8365_PIN_67_CMPCLK__FUNC_TDM_RX_BCK (MTK_PIN_NO(67) | 4)
+#define MT8365_PIN_67_CMPCLK__FUNC_I2S0_BCK (MTK_PIN_NO(67) | 5)
+#define MT8365_PIN_67_CMPCLK__FUNC_DBG_MON_B3 (MTK_PIN_NO(67) | 7)
+
+#define MT8365_PIN_68_CMDAT0__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT8365_PIN_68_CMDAT0__FUNC_CMDAT0 (MTK_PIN_NO(68) | 1)
+#define MT8365_PIN_68_CMDAT0__FUNC_ANT_SEL1 (MTK_PIN_NO(68) | 2)
+#define MT8365_PIN_68_CMDAT0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(68) | 4)
+#define MT8365_PIN_68_CMDAT0__FUNC_I2S0_LRCK (MTK_PIN_NO(68) | 5)
+#define MT8365_PIN_68_CMDAT0__FUNC_DBG_MON_B4 (MTK_PIN_NO(68) | 7)
+
+#define MT8365_PIN_69_CMDAT1__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT8365_PIN_69_CMDAT1__FUNC_CMDAT1 (MTK_PIN_NO(69) | 1)
+#define MT8365_PIN_69_CMDAT1__FUNC_ANT_SEL2 (MTK_PIN_NO(69) | 2)
+#define MT8365_PIN_69_CMDAT1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(69) | 3)
+#define MT8365_PIN_69_CMDAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(69) | 4)
+#define MT8365_PIN_69_CMDAT1__FUNC_I2S0_MCK (MTK_PIN_NO(69) | 5)
+#define MT8365_PIN_69_CMDAT1__FUNC_DBG_MON_B5 (MTK_PIN_NO(69) | 7)
+
+#define MT8365_PIN_70_CMDAT2__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT8365_PIN_70_CMDAT2__FUNC_CMDAT2 (MTK_PIN_NO(70) | 1)
+#define MT8365_PIN_70_CMDAT2__FUNC_ANT_SEL3 (MTK_PIN_NO(70) | 2)
+#define MT8365_PIN_70_CMDAT2__FUNC_TDM_RX_DI (MTK_PIN_NO(70) | 4)
+#define MT8365_PIN_70_CMDAT2__FUNC_I2S0_DI (MTK_PIN_NO(70) | 5)
+#define MT8365_PIN_70_CMDAT2__FUNC_DBG_MON_B6 (MTK_PIN_NO(70) | 7)
+
+#define MT8365_PIN_71_CMDAT3__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT8365_PIN_71_CMDAT3__FUNC_CMDAT3 (MTK_PIN_NO(71) | 1)
+#define MT8365_PIN_71_CMDAT3__FUNC_ANT_SEL4 (MTK_PIN_NO(71) | 2)
+#define MT8365_PIN_71_CMDAT3__FUNC_DBG_MON_B7 (MTK_PIN_NO(71) | 7)
+
+#define MT8365_PIN_72_CMDAT4__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT8365_PIN_72_CMDAT4__FUNC_CMDAT4 (MTK_PIN_NO(72) | 1)
+#define MT8365_PIN_72_CMDAT4__FUNC_ANT_SEL5 (MTK_PIN_NO(72) | 2)
+#define MT8365_PIN_72_CMDAT4__FUNC_I2S3_BCK (MTK_PIN_NO(72) | 5)
+#define MT8365_PIN_72_CMDAT4__FUNC_DBG_MON_B8 (MTK_PIN_NO(72) | 7)
+
+#define MT8365_PIN_73_CMDAT5__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT8365_PIN_73_CMDAT5__FUNC_CMDAT5 (MTK_PIN_NO(73) | 1)
+#define MT8365_PIN_73_CMDAT5__FUNC_ANT_SEL6 (MTK_PIN_NO(73) | 2)
+#define MT8365_PIN_73_CMDAT5__FUNC_I2S3_LRCK (MTK_PIN_NO(73) | 5)
+#define MT8365_PIN_73_CMDAT5__FUNC_DBG_MON_B9 (MTK_PIN_NO(73) | 7)
+
+#define MT8365_PIN_74_CMDAT6__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT8365_PIN_74_CMDAT6__FUNC_CMDAT6 (MTK_PIN_NO(74) | 1)
+#define MT8365_PIN_74_CMDAT6__FUNC_ANT_SEL7 (MTK_PIN_NO(74) | 2)
+#define MT8365_PIN_74_CMDAT6__FUNC_I2S3_MCK (MTK_PIN_NO(74) | 5)
+#define MT8365_PIN_74_CMDAT6__FUNC_DBG_MON_B10 (MTK_PIN_NO(74) | 7)
+
+#define MT8365_PIN_75_CMDAT7__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT8365_PIN_75_CMDAT7__FUNC_CMDAT7 (MTK_PIN_NO(75) | 1)
+#define MT8365_PIN_75_CMDAT7__FUNC_I2S3_DO (MTK_PIN_NO(75) | 5)
+#define MT8365_PIN_75_CMDAT7__FUNC_DBG_MON_B11 (MTK_PIN_NO(75) | 7)
+
+#define MT8365_PIN_76_CMDAT8__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT8365_PIN_76_CMDAT8__FUNC_CMDAT8 (MTK_PIN_NO(76) | 1)
+#define MT8365_PIN_76_CMDAT8__FUNC_PCM_CLK (MTK_PIN_NO(76) | 5)
+#define MT8365_PIN_76_CMDAT8__FUNC_DBG_MON_A29 (MTK_PIN_NO(76) | 7)
+
+#define MT8365_PIN_77_CMDAT9__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define MT8365_PIN_77_CMDAT9__FUNC_CMDAT9 (MTK_PIN_NO(77) | 1)
+#define MT8365_PIN_77_CMDAT9__FUNC_PCM_SYNC (MTK_PIN_NO(77) | 5)
+#define MT8365_PIN_77_CMDAT9__FUNC_DBG_MON_A30 (MTK_PIN_NO(77) | 7)
+
+#define MT8365_PIN_78_CMHSYNC__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define MT8365_PIN_78_CMHSYNC__FUNC_CMHSYNC (MTK_PIN_NO(78) | 1)
+#define MT8365_PIN_78_CMHSYNC__FUNC_PCM_RX (MTK_PIN_NO(78) | 5)
+#define MT8365_PIN_78_CMHSYNC__FUNC_DBG_MON_A31 (MTK_PIN_NO(78) | 7)
+
+#define MT8365_PIN_79_CMVSYNC__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define MT8365_PIN_79_CMVSYNC__FUNC_CMVSYNC (MTK_PIN_NO(79) | 1)
+#define MT8365_PIN_79_CMVSYNC__FUNC_PCM_TX (MTK_PIN_NO(79) | 5)
+#define MT8365_PIN_79_CMVSYNC__FUNC_DBG_MON_A32 (MTK_PIN_NO(79) | 7)
+
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(80) | 1)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_TDM_TX_LRCK (MTK_PIN_NO(80) | 2)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_UTXD1 (MTK_PIN_NO(80) | 3)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_DPI_D19 (MTK_PIN_NO(80) | 4)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_UDI_TMS_XI (MTK_PIN_NO(80) | 5)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(80) | 6)
+
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(81) | 1)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_TDM_TX_BCK (MTK_PIN_NO(81) | 2)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_URXD1 (MTK_PIN_NO(81) | 3)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_DPI_D20 (MTK_PIN_NO(81) | 4)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_UDI_TCK_XI (MTK_PIN_NO(81) | 5)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(81) | 6)
+
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(82) | 1)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(82) | 2)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UTXD2 (MTK_PIN_NO(82) | 3)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_DPI_D21 (MTK_PIN_NO(82) | 4)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UDI_TDI_XI (MTK_PIN_NO(82) | 5)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(82) | 6)
+
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(83) | 1)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(83) | 2)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_URXD2 (MTK_PIN_NO(83) | 3)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_DPI_D22 (MTK_PIN_NO(83) | 4)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_UDI_TDO (MTK_PIN_NO(83) | 5)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(83) | 6)
+
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(84) | 1)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(84) | 2)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_PWM_A (MTK_PIN_NO(84) | 3)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_DPI_D23 (MTK_PIN_NO(84) | 4)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_UDI_NTRST_XI (MTK_PIN_NO(84) | 5)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(84) | 6)
+
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(85) | 1)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(85) | 2)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_PWM_B (MTK_PIN_NO(85) | 3)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(85) | 5)
+
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_MSDC2_DSL (MTK_PIN_NO(86) | 1)
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_TDM_TX_MCK (MTK_PIN_NO(86) | 2)
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_PWM_C (MTK_PIN_NO(86) | 3)
+
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(87) | 1)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(87) | 2)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_DFD_TMS_XI (MTK_PIN_NO(87) | 3)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_APU_JTAG_TMS (MTK_PIN_NO(87) | 4)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_MCU_SPM_TMS (MTK_PIN_NO(87) | 5)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_DSP_JMS (MTK_PIN_NO(87) | 6)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(87) | 7)
+
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(88) | 1)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(88) | 2)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 3)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(88) | 4)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(88) | 5)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_DSP_JCK (MTK_PIN_NO(88) | 6)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(88) | 7)
+
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(89) | 1)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_PWM_C (MTK_PIN_NO(89) | 2)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_DFD_TDI_XI (MTK_PIN_NO(89) | 3)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_APU_JTAG_TDI (MTK_PIN_NO(89) | 4)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MCU_SPM_TDI (MTK_PIN_NO(89) | 5)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_CONN_DSP_JDI (MTK_PIN_NO(89) | 6)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(89) | 7)
+
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(90) | 1)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_SPDIF_IN (MTK_PIN_NO(90) | 2)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_DFD_TDO (MTK_PIN_NO(90) | 3)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_APU_JTAG_TDO (MTK_PIN_NO(90) | 4)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MCU_SPM_TDO (MTK_PIN_NO(90) | 5)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_CONN_DSP_JDO (MTK_PIN_NO(90) | 6)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(90) | 7)
+
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(91) | 1)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_SPDIF_OUT (MTK_PIN_NO(91) | 2)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_DFD_NTRST_XI (MTK_PIN_NO(91) | 3)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_APU_JTAG_TRST (MTK_PIN_NO(91) | 4)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(91) | 5)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(91) | 6)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(91) | 7)
+
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(92) | 1)
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_IRRX (MTK_PIN_NO(92) | 2)
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_PWM_A (MTK_PIN_NO(92) | 3)
+
+#define MT8365_PIN_93_MSDC0_DAT7__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT8365_PIN_93_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(93) | 1)
+#define MT8365_PIN_93_MSDC0_DAT7__FUNC_NLD7 (MTK_PIN_NO(93) | 2)
+
+#define MT8365_PIN_94_MSDC0_DAT6__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT8365_PIN_94_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(94) | 1)
+#define MT8365_PIN_94_MSDC0_DAT6__FUNC_NLD6 (MTK_PIN_NO(94) | 2)
+
+#define MT8365_PIN_95_MSDC0_DAT5__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define MT8365_PIN_95_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(95) | 1)
+#define MT8365_PIN_95_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(95) | 2)
+
+#define MT8365_PIN_96_MSDC0_DAT4__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define MT8365_PIN_96_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(96) | 1)
+#define MT8365_PIN_96_MSDC0_DAT4__FUNC_NLD3 (MTK_PIN_NO(96) | 2)
+
+#define MT8365_PIN_97_MSDC0_RSTB__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define MT8365_PIN_97_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(97) | 1)
+#define MT8365_PIN_97_MSDC0_RSTB__FUNC_NLD0 (MTK_PIN_NO(97) | 2)
+
+#define MT8365_PIN_98_MSDC0_CMD__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define MT8365_PIN_98_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(98) | 1)
+#define MT8365_PIN_98_MSDC0_CMD__FUNC_NALE (MTK_PIN_NO(98) | 2)
+
+#define MT8365_PIN_99_MSDC0_CLK__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define MT8365_PIN_99_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(99) | 1)
+#define MT8365_PIN_99_MSDC0_CLK__FUNC_NWEB (MTK_PIN_NO(99) | 2)
+
+#define MT8365_PIN_100_MSDC0_DAT3__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT8365_PIN_100_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(100) | 1)
+#define MT8365_PIN_100_MSDC0_DAT3__FUNC_NLD1 (MTK_PIN_NO(100) | 2)
+
+#define MT8365_PIN_101_MSDC0_DAT2__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT8365_PIN_101_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(101) | 1)
+#define MT8365_PIN_101_MSDC0_DAT2__FUNC_NLD5 (MTK_PIN_NO(101) | 2)
+
+#define MT8365_PIN_102_MSDC0_DAT1__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT8365_PIN_102_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(102) | 1)
+#define MT8365_PIN_102_MSDC0_DAT1__FUNC_NDQS (MTK_PIN_NO(102) | 2)
+
+#define MT8365_PIN_103_MSDC0_DAT0__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT8365_PIN_103_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(103) | 1)
+#define MT8365_PIN_103_MSDC0_DAT0__FUNC_NLD2 (MTK_PIN_NO(103) | 2)
+
+#define MT8365_PIN_104_MSDC0_DSL__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT8365_PIN_104_MSDC0_DSL__FUNC_MSDC0_DSL (MTK_PIN_NO(104) | 1)
+
+#define MT8365_PIN_105_NCLE__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT8365_PIN_105_NCLE__FUNC_NCLE (MTK_PIN_NO(105) | 1)
+#define MT8365_PIN_105_NCLE__FUNC_TDM_RX_MCK (MTK_PIN_NO(105) | 2)
+#define MT8365_PIN_105_NCLE__FUNC_DBG_MON_B12 (MTK_PIN_NO(105) | 7)
+
+#define MT8365_PIN_106_NCEB1__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT8365_PIN_106_NCEB1__FUNC_NCEB1 (MTK_PIN_NO(106) | 1)
+#define MT8365_PIN_106_NCEB1__FUNC_TDM_RX_BCK (MTK_PIN_NO(106) | 2)
+#define MT8365_PIN_106_NCEB1__FUNC_DBG_MON_B13 (MTK_PIN_NO(106) | 7)
+
+#define MT8365_PIN_107_NCEB0__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT8365_PIN_107_NCEB0__FUNC_NCEB0 (MTK_PIN_NO(107) | 1)
+#define MT8365_PIN_107_NCEB0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(107) | 2)
+#define MT8365_PIN_107_NCEB0__FUNC_DBG_MON_B14 (MTK_PIN_NO(107) | 7)
+
+#define MT8365_PIN_108_NREB__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT8365_PIN_108_NREB__FUNC_NREB (MTK_PIN_NO(108) | 1)
+#define MT8365_PIN_108_NREB__FUNC_TDM_RX_DI (MTK_PIN_NO(108) | 2)
+#define MT8365_PIN_108_NREB__FUNC_DBG_MON_B15 (MTK_PIN_NO(108) | 7)
+
+#define MT8365_PIN_109_NRNB__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT8365_PIN_109_NRNB__FUNC_NRNB (MTK_PIN_NO(109) | 1)
+#define MT8365_PIN_109_NRNB__FUNC_TSF_IN (MTK_PIN_NO(109) | 2)
+#define MT8365_PIN_109_NRNB__FUNC_DBG_MON_B16 (MTK_PIN_NO(109) | 7)
+
+#define MT8365_PIN_110_PCM_CLK__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT8365_PIN_110_PCM_CLK__FUNC_PCM_CLK (MTK_PIN_NO(110) | 1)
+#define MT8365_PIN_110_PCM_CLK__FUNC_I2S0_BCK (MTK_PIN_NO(110) | 2)
+#define MT8365_PIN_110_PCM_CLK__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 3)
+#define MT8365_PIN_110_PCM_CLK__FUNC_SPDIF_IN (MTK_PIN_NO(110) | 4)
+#define MT8365_PIN_110_PCM_CLK__FUNC_DPI_D15 (MTK_PIN_NO(110) | 5)
+
+#define MT8365_PIN_111_PCM_SYNC__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_PCM_SYNC (MTK_PIN_NO(111) | 1)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S0_LRCK (MTK_PIN_NO(111) | 2)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S3_LRCK (MTK_PIN_NO(111) | 3)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_SPDIF_OUT (MTK_PIN_NO(111) | 4)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_DPI_D16 (MTK_PIN_NO(111) | 5)
+
+#define MT8365_PIN_112_PCM_RX__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT8365_PIN_112_PCM_RX__FUNC_PCM_RX (MTK_PIN_NO(112) | 1)
+#define MT8365_PIN_112_PCM_RX__FUNC_I2S0_DI (MTK_PIN_NO(112) | 2)
+#define MT8365_PIN_112_PCM_RX__FUNC_I2S3_MCK (MTK_PIN_NO(112) | 3)
+#define MT8365_PIN_112_PCM_RX__FUNC_IRRX (MTK_PIN_NO(112) | 4)
+#define MT8365_PIN_112_PCM_RX__FUNC_DPI_D17 (MTK_PIN_NO(112) | 5)
+
+#define MT8365_PIN_113_PCM_TX__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT8365_PIN_113_PCM_TX__FUNC_PCM_TX (MTK_PIN_NO(113) | 1)
+#define MT8365_PIN_113_PCM_TX__FUNC_I2S0_MCK (MTK_PIN_NO(113) | 2)
+#define MT8365_PIN_113_PCM_TX__FUNC_I2S3_DO (MTK_PIN_NO(113) | 3)
+#define MT8365_PIN_113_PCM_TX__FUNC_PWM_B (MTK_PIN_NO(113) | 4)
+#define MT8365_PIN_113_PCM_TX__FUNC_DPI_D18 (MTK_PIN_NO(113) | 5)
+
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S0_DI (MTK_PIN_NO(114) | 1)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S1_DO (MTK_PIN_NO(114) | 2)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S2_DI (MTK_PIN_NO(114) | 3)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S3_DO (MTK_PIN_NO(114) | 4)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_PWM_A (MTK_PIN_NO(114) | 5)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_SPDIF_IN (MTK_PIN_NO(114) | 6)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_DBG_MON_B17 (MTK_PIN_NO(114) | 7)
+
+#define MT8365_PIN_115_I2S_LRCK__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S0_LRCK (MTK_PIN_NO(115) | 1)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S1_LRCK (MTK_PIN_NO(115) | 2)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S2_LRCK (MTK_PIN_NO(115) | 3)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(115) | 4)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_PWM_B (MTK_PIN_NO(115) | 5)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_SPDIF_OUT (MTK_PIN_NO(115) | 6)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_DBG_MON_B18 (MTK_PIN_NO(115) | 7)
+
+#define MT8365_PIN_116_I2S_BCK__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(116) | 1)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S1_BCK (MTK_PIN_NO(116) | 2)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S2_BCK (MTK_PIN_NO(116) | 3)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(116) | 4)
+#define MT8365_PIN_116_I2S_BCK__FUNC_PWM_C (MTK_PIN_NO(116) | 5)
+#define MT8365_PIN_116_I2S_BCK__FUNC_IRRX (MTK_PIN_NO(116) | 6)
+#define MT8365_PIN_116_I2S_BCK__FUNC_DBG_MON_B19 (MTK_PIN_NO(116) | 7)
+
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_DMIC0_CLK (MTK_PIN_NO(117) | 1)
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_I2S2_BCK (MTK_PIN_NO(117) | 2)
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_DBG_MON_B20 (MTK_PIN_NO(117) | 7)
+
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DMIC0_DAT0 (MTK_PIN_NO(118) | 1)
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_I2S2_DI (MTK_PIN_NO(118) | 2)
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DBG_MON_B21 (MTK_PIN_NO(118) | 7)
+
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DMIC0_DAT1 (MTK_PIN_NO(119) | 1)
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_I2S2_LRCK (MTK_PIN_NO(119) | 2)
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DBG_MON_B22 (MTK_PIN_NO(119) | 7)
+
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_DMIC1_CLK (MTK_PIN_NO(120) | 1)
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_I2S2_MCK (MTK_PIN_NO(120) | 2)
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_DBG_MON_B23 (MTK_PIN_NO(120) | 7)
+
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DMIC1_DAT0 (MTK_PIN_NO(121) | 1)
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_I2S1_BCK (MTK_PIN_NO(121) | 2)
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DBG_MON_B24 (MTK_PIN_NO(121) | 7)
+
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DMIC1_DAT1 (MTK_PIN_NO(122) | 1)
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_I2S1_LRCK (MTK_PIN_NO(122) | 2)
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DBG_MON_B25 (MTK_PIN_NO(122) | 7)
+
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_DMIC2_CLK (MTK_PIN_NO(123) | 1)
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_I2S1_MCK (MTK_PIN_NO(123) | 2)
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_DBG_MON_B26 (MTK_PIN_NO(123) | 7)
+
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DMIC2_DAT0 (MTK_PIN_NO(124) | 1)
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_I2S1_DO (MTK_PIN_NO(124) | 2)
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DBG_MON_B27 (MTK_PIN_NO(124) | 7)
+
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DMIC2_DAT1 (MTK_PIN_NO(125) | 1)
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_TDM_RX_BCK (MTK_PIN_NO(125) | 2)
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DBG_MON_B28 (MTK_PIN_NO(125) | 7)
+
+#define MT8365_PIN_126_DMIC3_CLK__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT8365_PIN_126_DMIC3_CLK__FUNC_DMIC3_CLK (MTK_PIN_NO(126) | 1)
+#define MT8365_PIN_126_DMIC3_CLK__FUNC_TDM_RX_LRCK (MTK_PIN_NO(126) | 2)
+
+#define MT8365_PIN_127_DMIC3_DAT0__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define MT8365_PIN_127_DMIC3_DAT0__FUNC_DMIC3_DAT0 (MTK_PIN_NO(127) | 1)
+#define MT8365_PIN_127_DMIC3_DAT0__FUNC_TDM_RX_DI (MTK_PIN_NO(127) | 2)
+
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_DMIC3_DAT1 (MTK_PIN_NO(128) | 1)
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(128) | 2)
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_VAD_CLK (MTK_PIN_NO(128) | 3)
+
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_TDM_TX_BCK (MTK_PIN_NO(129) | 1)
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(129) | 2)
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_ckmon1_ck (MTK_PIN_NO(129) | 3)
+
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_TDM_TX_LRCK (MTK_PIN_NO(130) | 1)
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(130) | 2)
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_ckmon2_ck (MTK_PIN_NO(130) | 3)
+
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_TDM_TX_MCK (MTK_PIN_NO(131) | 1)
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 2)
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_ckmon3_ck (MTK_PIN_NO(131) | 3)
+
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(132) | 1)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_I2S3_DO (MTK_PIN_NO(132) | 2)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_ckmon4_ck (MTK_PIN_NO(132) | 3)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_DBG_MON_B29 (MTK_PIN_NO(132) | 7)
+
+#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(133) | 1)
+#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_DBG_MON_B30 (MTK_PIN_NO(133) | 7)
+
+#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(134) | 1)
+#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_DBG_MON_B31 (MTK_PIN_NO(134) | 7)
+
+#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(135) | 1)
+#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_DBG_MON_B32 (MTK_PIN_NO(135) | 7)
+
+#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_CONN_TOP_CLK (MTK_PIN_NO(136) | 1)
+
+#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_CONN_TOP_DATA (MTK_PIN_NO(137) | 1)
+
+#define MT8365_PIN_138_CONN_HRST_B__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define MT8365_PIN_138_CONN_HRST_B__FUNC_CONN_HRST_B (MTK_PIN_NO(138) | 1)
+
+#define MT8365_PIN_139_CONN_WB_PTA__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define MT8365_PIN_139_CONN_WB_PTA__FUNC_CONN_WB_PTA (MTK_PIN_NO(139) | 1)
+
+#define MT8365_PIN_140_CONN_BT_CLK__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define MT8365_PIN_140_CONN_BT_CLK__FUNC_CONN_BT_CLK (MTK_PIN_NO(140) | 1)
+
+#define MT8365_PIN_141_CONN_BT_DATA__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define MT8365_PIN_141_CONN_BT_DATA__FUNC_CONN_BT_DATA (MTK_PIN_NO(141) | 1)
+
+#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(142) | 1)
+
+#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(143) | 1)
+
+#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(144) | 1)
+
+#endif /* __MT8365_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h
index 625718042413..f48245ff87e5 100644
--- a/include/dt-bindings/pinctrl/omap.h
+++ b/include/dt-bindings/pinctrl/omap.h
@@ -64,8 +64,8 @@
#define OMAP3_WKUP_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2a00) (val)
#define DM814X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
#define DM816X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
-#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
-#define AM33XX_PADCONF(pa, dir, mux) OMAP_IOPAD_OFFSET((pa), 0x0800) ((dir) | (mux))
+#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) (0)
+#define AM33XX_PADCONF(pa, conf, mux) OMAP_IOPAD_OFFSET((pa), 0x0800) (conf) (mux)
/*
* Macros to allow using the offset from the padconf physical address
diff --git a/include/dt-bindings/pinctrl/pads-imx8dxl.h b/include/dt-bindings/pinctrl/pads-imx8dxl.h
new file mode 100644
index 000000000000..b1d7b84c3e0a
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pads-imx8dxl.h
@@ -0,0 +1,639 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2019~2020 NXP
+ */
+
+#ifndef _IMX8DXL_PADS_H
+#define _IMX8DXL_PADS_H
+
+/* pin id */
+#define IMX8DXL_PCIE_CTRL0_PERST_B 0
+#define IMX8DXL_PCIE_CTRL0_CLKREQ_B 1
+#define IMX8DXL_PCIE_CTRL0_WAKE_B 2
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP 3
+#define IMX8DXL_USB_SS3_TC0 4
+#define IMX8DXL_USB_SS3_TC1 5
+#define IMX8DXL_USB_SS3_TC2 6
+#define IMX8DXL_USB_SS3_TC3 7
+#define IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO 8
+#define IMX8DXL_EMMC0_CLK 9
+#define IMX8DXL_EMMC0_CMD 10
+#define IMX8DXL_EMMC0_DATA0 11
+#define IMX8DXL_EMMC0_DATA1 12
+#define IMX8DXL_EMMC0_DATA2 13
+#define IMX8DXL_EMMC0_DATA3 14
+#define IMX8DXL_EMMC0_DATA4 15
+#define IMX8DXL_EMMC0_DATA5 16
+#define IMX8DXL_EMMC0_DATA6 17
+#define IMX8DXL_EMMC0_DATA7 18
+#define IMX8DXL_EMMC0_STROBE 19
+#define IMX8DXL_EMMC0_RESET_B 20
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0 21
+#define IMX8DXL_USDHC1_RESET_B 22
+#define IMX8DXL_USDHC1_VSELECT 23
+#define IMX8DXL_CTL_NAND_RE_P_N 24
+#define IMX8DXL_USDHC1_WP 25
+#define IMX8DXL_USDHC1_CD_B 26
+#define IMX8DXL_CTL_NAND_DQS_P_N 27
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP 28
+#define IMX8DXL_ENET0_RGMII_TXC 29
+#define IMX8DXL_ENET0_RGMII_TX_CTL 30
+#define IMX8DXL_ENET0_RGMII_TXD0 31
+#define IMX8DXL_ENET0_RGMII_TXD1 32
+#define IMX8DXL_ENET0_RGMII_TXD2 33
+#define IMX8DXL_ENET0_RGMII_TXD3 34
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0 35
+#define IMX8DXL_ENET0_RGMII_RXC 36
+#define IMX8DXL_ENET0_RGMII_RX_CTL 37
+#define IMX8DXL_ENET0_RGMII_RXD0 38
+#define IMX8DXL_ENET0_RGMII_RXD1 39
+#define IMX8DXL_ENET0_RGMII_RXD2 40
+#define IMX8DXL_ENET0_RGMII_RXD3 41
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1 42
+#define IMX8DXL_ENET0_REFCLK_125M_25M 43
+#define IMX8DXL_ENET0_MDIO 44
+#define IMX8DXL_ENET0_MDC 45
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT 46
+#define IMX8DXL_ENET1_RGMII_TXC 47
+#define IMX8DXL_ENET1_RGMII_TXD2 48
+#define IMX8DXL_ENET1_RGMII_TX_CTL 49
+#define IMX8DXL_ENET1_RGMII_TXD3 50
+#define IMX8DXL_ENET1_RGMII_RXC 51
+#define IMX8DXL_ENET1_RGMII_RXD3 52
+#define IMX8DXL_ENET1_RGMII_RXD2 53
+#define IMX8DXL_ENET1_RGMII_RXD1 54
+#define IMX8DXL_ENET1_RGMII_TXD0 55
+#define IMX8DXL_ENET1_RGMII_TXD1 56
+#define IMX8DXL_ENET1_RGMII_RXD0 57
+#define IMX8DXL_ENET1_RGMII_RX_CTL 58
+#define IMX8DXL_ENET1_REFCLK_125M_25M 59
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB 60
+#define IMX8DXL_SPI3_SCK 61
+#define IMX8DXL_SPI3_SDO 62
+#define IMX8DXL_SPI3_SDI 63
+#define IMX8DXL_SPI3_CS0 64
+#define IMX8DXL_SPI3_CS1 65
+#define IMX8DXL_MCLK_IN1 66
+#define IMX8DXL_MCLK_IN0 67
+#define IMX8DXL_MCLK_OUT0 68
+#define IMX8DXL_UART1_TX 69
+#define IMX8DXL_UART1_RX 70
+#define IMX8DXL_UART1_RTS_B 71
+#define IMX8DXL_UART1_CTS_B 72
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK 73
+#define IMX8DXL_SPI0_SCK 74
+#define IMX8DXL_SPI0_SDI 75
+#define IMX8DXL_SPI0_SDO 76
+#define IMX8DXL_SPI0_CS1 77
+#define IMX8DXL_SPI0_CS0 78
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT 79
+#define IMX8DXL_ADC_IN1 80
+#define IMX8DXL_ADC_IN0 81
+#define IMX8DXL_ADC_IN3 82
+#define IMX8DXL_ADC_IN2 83
+#define IMX8DXL_ADC_IN5 84
+#define IMX8DXL_ADC_IN4 85
+#define IMX8DXL_FLEXCAN0_RX 86
+#define IMX8DXL_FLEXCAN0_TX 87
+#define IMX8DXL_FLEXCAN1_RX 88
+#define IMX8DXL_FLEXCAN1_TX 89
+#define IMX8DXL_FLEXCAN2_RX 90
+#define IMX8DXL_FLEXCAN2_TX 91
+#define IMX8DXL_UART0_RX 92
+#define IMX8DXL_UART0_TX 93
+#define IMX8DXL_UART2_TX 94
+#define IMX8DXL_UART2_RX 95
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH 96
+#define IMX8DXL_JTAG_TRST_B 97
+#define IMX8DXL_PMIC_I2C_SCL 98
+#define IMX8DXL_PMIC_I2C_SDA 99
+#define IMX8DXL_PMIC_INT_B 100
+#define IMX8DXL_SCU_GPIO0_00 101
+#define IMX8DXL_SCU_GPIO0_01 102
+#define IMX8DXL_SCU_PMIC_STANDBY 103
+#define IMX8DXL_SCU_BOOT_MODE1 104
+#define IMX8DXL_SCU_BOOT_MODE0 105
+#define IMX8DXL_SCU_BOOT_MODE2 106
+#define IMX8DXL_SNVS_TAMPER_OUT1 107
+#define IMX8DXL_SNVS_TAMPER_OUT2 108
+#define IMX8DXL_SNVS_TAMPER_OUT3 109
+#define IMX8DXL_SNVS_TAMPER_OUT4 110
+#define IMX8DXL_SNVS_TAMPER_IN0 111
+#define IMX8DXL_SNVS_TAMPER_IN1 112
+#define IMX8DXL_SNVS_TAMPER_IN2 113
+#define IMX8DXL_SNVS_TAMPER_IN3 114
+#define IMX8DXL_SPI1_SCK 115
+#define IMX8DXL_SPI1_SDO 116
+#define IMX8DXL_SPI1_SDI 117
+#define IMX8DXL_SPI1_CS0 118
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD 119
+#define IMX8DXL_QSPI0A_DATA1 120
+#define IMX8DXL_QSPI0A_DATA0 121
+#define IMX8DXL_QSPI0A_DATA3 122
+#define IMX8DXL_QSPI0A_DATA2 123
+#define IMX8DXL_QSPI0A_SS0_B 124
+#define IMX8DXL_QSPI0A_DQS 125
+#define IMX8DXL_QSPI0A_SCLK 126
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A 127
+#define IMX8DXL_QSPI0B_SCLK 128
+#define IMX8DXL_QSPI0B_DQS 129
+#define IMX8DXL_QSPI0B_DATA1 130
+#define IMX8DXL_QSPI0B_DATA0 131
+#define IMX8DXL_QSPI0B_DATA3 132
+#define IMX8DXL_QSPI0B_DATA2 133
+#define IMX8DXL_QSPI0B_SS0_B 134
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B 135
+
+/* format: <pin_id mux_mode> */
+#define IMX8DXL_PCIE_CTRL0_PERST_B_HSIO_PCIE0_PERST_B IMX8DXL_PCIE_CTRL0_PERST_B 0
+#define IMX8DXL_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO00 IMX8DXL_PCIE_CTRL0_PERST_B 4
+#define IMX8DXL_PCIE_CTRL0_PERST_B_LSIO_GPIO7_IO00 IMX8DXL_PCIE_CTRL0_PERST_B 5
+#define IMX8DXL_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B IMX8DXL_PCIE_CTRL0_CLKREQ_B 0
+#define IMX8DXL_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO01 IMX8DXL_PCIE_CTRL0_CLKREQ_B 4
+#define IMX8DXL_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO7_IO01 IMX8DXL_PCIE_CTRL0_CLKREQ_B 5
+#define IMX8DXL_PCIE_CTRL0_WAKE_B_HSIO_PCIE0_WAKE_B IMX8DXL_PCIE_CTRL0_WAKE_B 0
+#define IMX8DXL_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO02 IMX8DXL_PCIE_CTRL0_WAKE_B 4
+#define IMX8DXL_PCIE_CTRL0_WAKE_B_LSIO_GPIO7_IO02 IMX8DXL_PCIE_CTRL0_WAKE_B 5
+#define IMX8DXL_USB_SS3_TC0_ADMA_I2C1_SCL IMX8DXL_USB_SS3_TC0 0
+#define IMX8DXL_USB_SS3_TC0_CONN_USB_OTG1_PWR IMX8DXL_USB_SS3_TC0 1
+#define IMX8DXL_USB_SS3_TC0_CONN_USB_OTG2_PWR IMX8DXL_USB_SS3_TC0 2
+#define IMX8DXL_USB_SS3_TC0_LSIO_GPIO4_IO03 IMX8DXL_USB_SS3_TC0 4
+#define IMX8DXL_USB_SS3_TC0_LSIO_GPIO7_IO03 IMX8DXL_USB_SS3_TC0 5
+#define IMX8DXL_USB_SS3_TC1_ADMA_I2C1_SCL IMX8DXL_USB_SS3_TC1 0
+#define IMX8DXL_USB_SS3_TC1_CONN_USB_OTG2_PWR IMX8DXL_USB_SS3_TC1 1
+#define IMX8DXL_USB_SS3_TC1_LSIO_GPIO4_IO04 IMX8DXL_USB_SS3_TC1 4
+#define IMX8DXL_USB_SS3_TC1_LSIO_GPIO7_IO04 IMX8DXL_USB_SS3_TC1 5
+#define IMX8DXL_USB_SS3_TC2_ADMA_I2C1_SDA IMX8DXL_USB_SS3_TC2 0
+#define IMX8DXL_USB_SS3_TC2_CONN_USB_OTG1_OC IMX8DXL_USB_SS3_TC2 1
+#define IMX8DXL_USB_SS3_TC2_CONN_USB_OTG2_OC IMX8DXL_USB_SS3_TC2 2
+#define IMX8DXL_USB_SS3_TC2_LSIO_GPIO4_IO05 IMX8DXL_USB_SS3_TC2 4
+#define IMX8DXL_USB_SS3_TC2_LSIO_GPIO7_IO05 IMX8DXL_USB_SS3_TC2 5
+#define IMX8DXL_USB_SS3_TC3_ADMA_I2C1_SDA IMX8DXL_USB_SS3_TC3 0
+#define IMX8DXL_USB_SS3_TC3_CONN_USB_OTG2_OC IMX8DXL_USB_SS3_TC3 1
+#define IMX8DXL_USB_SS3_TC3_LSIO_GPIO4_IO06 IMX8DXL_USB_SS3_TC3 4
+#define IMX8DXL_USB_SS3_TC3_LSIO_GPIO7_IO06 IMX8DXL_USB_SS3_TC3 5
+#define IMX8DXL_EMMC0_CLK_CONN_EMMC0_CLK IMX8DXL_EMMC0_CLK 0
+#define IMX8DXL_EMMC0_CLK_CONN_NAND_READY_B IMX8DXL_EMMC0_CLK 1
+#define IMX8DXL_EMMC0_CLK_LSIO_GPIO4_IO07 IMX8DXL_EMMC0_CLK 4
+#define IMX8DXL_EMMC0_CMD_CONN_EMMC0_CMD IMX8DXL_EMMC0_CMD 0
+#define IMX8DXL_EMMC0_CMD_CONN_NAND_DQS IMX8DXL_EMMC0_CMD 1
+#define IMX8DXL_EMMC0_CMD_LSIO_GPIO4_IO08 IMX8DXL_EMMC0_CMD 4
+#define IMX8DXL_EMMC0_DATA0_CONN_EMMC0_DATA0 IMX8DXL_EMMC0_DATA0 0
+#define IMX8DXL_EMMC0_DATA0_CONN_NAND_DATA00 IMX8DXL_EMMC0_DATA0 1
+#define IMX8DXL_EMMC0_DATA0_LSIO_GPIO4_IO09 IMX8DXL_EMMC0_DATA0 4
+#define IMX8DXL_EMMC0_DATA1_CONN_EMMC0_DATA1 IMX8DXL_EMMC0_DATA1 0
+#define IMX8DXL_EMMC0_DATA1_CONN_NAND_DATA01 IMX8DXL_EMMC0_DATA1 1
+#define IMX8DXL_EMMC0_DATA1_LSIO_GPIO4_IO10 IMX8DXL_EMMC0_DATA1 4
+#define IMX8DXL_EMMC0_DATA2_CONN_EMMC0_DATA2 IMX8DXL_EMMC0_DATA2 0
+#define IMX8DXL_EMMC0_DATA2_CONN_NAND_DATA02 IMX8DXL_EMMC0_DATA2 1
+#define IMX8DXL_EMMC0_DATA2_LSIO_GPIO4_IO11 IMX8DXL_EMMC0_DATA2 4
+#define IMX8DXL_EMMC0_DATA3_CONN_EMMC0_DATA3 IMX8DXL_EMMC0_DATA3 0
+#define IMX8DXL_EMMC0_DATA3_CONN_NAND_DATA03 IMX8DXL_EMMC0_DATA3 1
+#define IMX8DXL_EMMC0_DATA3_LSIO_GPIO4_IO12 IMX8DXL_EMMC0_DATA3 4
+#define IMX8DXL_EMMC0_DATA4_CONN_EMMC0_DATA4 IMX8DXL_EMMC0_DATA4 0
+#define IMX8DXL_EMMC0_DATA4_CONN_NAND_DATA04 IMX8DXL_EMMC0_DATA4 1
+#define IMX8DXL_EMMC0_DATA4_LSIO_GPIO4_IO13 IMX8DXL_EMMC0_DATA4 4
+#define IMX8DXL_EMMC0_DATA5_CONN_EMMC0_DATA5 IMX8DXL_EMMC0_DATA5 0
+#define IMX8DXL_EMMC0_DATA5_CONN_NAND_DATA05 IMX8DXL_EMMC0_DATA5 1
+#define IMX8DXL_EMMC0_DATA5_LSIO_GPIO4_IO14 IMX8DXL_EMMC0_DATA5 4
+#define IMX8DXL_EMMC0_DATA6_CONN_EMMC0_DATA6 IMX8DXL_EMMC0_DATA6 0
+#define IMX8DXL_EMMC0_DATA6_CONN_NAND_DATA06 IMX8DXL_EMMC0_DATA6 1
+#define IMX8DXL_EMMC0_DATA6_LSIO_GPIO4_IO15 IMX8DXL_EMMC0_DATA6 4
+#define IMX8DXL_EMMC0_DATA7_CONN_EMMC0_DATA7 IMX8DXL_EMMC0_DATA7 0
+#define IMX8DXL_EMMC0_DATA7_CONN_NAND_DATA07 IMX8DXL_EMMC0_DATA7 1
+#define IMX8DXL_EMMC0_DATA7_LSIO_GPIO4_IO16 IMX8DXL_EMMC0_DATA7 4
+#define IMX8DXL_EMMC0_STROBE_CONN_EMMC0_STROBE IMX8DXL_EMMC0_STROBE 0
+#define IMX8DXL_EMMC0_STROBE_CONN_NAND_CLE IMX8DXL_EMMC0_STROBE 1
+#define IMX8DXL_EMMC0_STROBE_LSIO_GPIO4_IO17 IMX8DXL_EMMC0_STROBE 4
+#define IMX8DXL_EMMC0_RESET_B_CONN_EMMC0_RESET_B IMX8DXL_EMMC0_RESET_B 0
+#define IMX8DXL_EMMC0_RESET_B_CONN_NAND_WP_B IMX8DXL_EMMC0_RESET_B 1
+#define IMX8DXL_EMMC0_RESET_B_LSIO_GPIO4_IO18 IMX8DXL_EMMC0_RESET_B 4
+#define IMX8DXL_USDHC1_RESET_B_CONN_USDHC1_RESET_B IMX8DXL_USDHC1_RESET_B 0
+#define IMX8DXL_USDHC1_RESET_B_CONN_NAND_RE_N IMX8DXL_USDHC1_RESET_B 1
+#define IMX8DXL_USDHC1_RESET_B_ADMA_SPI2_SCK IMX8DXL_USDHC1_RESET_B 2
+#define IMX8DXL_USDHC1_RESET_B_CONN_NAND_WE_B IMX8DXL_USDHC1_RESET_B 3
+#define IMX8DXL_USDHC1_RESET_B_LSIO_GPIO4_IO19 IMX8DXL_USDHC1_RESET_B 4
+#define IMX8DXL_USDHC1_RESET_B_LSIO_GPIO7_IO08 IMX8DXL_USDHC1_RESET_B 5
+#define IMX8DXL_USDHC1_VSELECT_CONN_USDHC1_VSELECT IMX8DXL_USDHC1_VSELECT 0
+#define IMX8DXL_USDHC1_VSELECT_CONN_NAND_RE_P IMX8DXL_USDHC1_VSELECT 1
+#define IMX8DXL_USDHC1_VSELECT_ADMA_SPI2_SDO IMX8DXL_USDHC1_VSELECT 2
+#define IMX8DXL_USDHC1_VSELECT_CONN_NAND_RE_B IMX8DXL_USDHC1_VSELECT 3
+#define IMX8DXL_USDHC1_VSELECT_LSIO_GPIO4_IO20 IMX8DXL_USDHC1_VSELECT 4
+#define IMX8DXL_USDHC1_VSELECT_LSIO_GPIO7_IO09 IMX8DXL_USDHC1_VSELECT 5
+#define IMX8DXL_USDHC1_WP_CONN_USDHC1_WP IMX8DXL_USDHC1_WP 0
+#define IMX8DXL_USDHC1_WP_CONN_NAND_DQS_N IMX8DXL_USDHC1_WP 1
+#define IMX8DXL_USDHC1_WP_ADMA_SPI2_SDI IMX8DXL_USDHC1_WP 2
+#define IMX8DXL_USDHC1_WP_CONN_NAND_ALE IMX8DXL_USDHC1_WP 3
+#define IMX8DXL_USDHC1_WP_LSIO_GPIO4_IO21 IMX8DXL_USDHC1_WP 4
+#define IMX8DXL_USDHC1_WP_LSIO_GPIO7_IO10 IMX8DXL_USDHC1_WP 5
+#define IMX8DXL_USDHC1_CD_B_CONN_USDHC1_CD_B IMX8DXL_USDHC1_CD_B 0
+#define IMX8DXL_USDHC1_CD_B_CONN_NAND_DQS_P IMX8DXL_USDHC1_CD_B 1
+#define IMX8DXL_USDHC1_CD_B_ADMA_SPI2_CS0 IMX8DXL_USDHC1_CD_B 2
+#define IMX8DXL_USDHC1_CD_B_CONN_NAND_DQS IMX8DXL_USDHC1_CD_B 3
+#define IMX8DXL_USDHC1_CD_B_LSIO_GPIO4_IO22 IMX8DXL_USDHC1_CD_B 4
+#define IMX8DXL_USDHC1_CD_B_LSIO_GPIO7_IO11 IMX8DXL_USDHC1_CD_B 5
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC IMX8DXL_ENET0_RGMII_TXC 0
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_OUT IMX8DXL_ENET0_RGMII_TXC 1
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_IN IMX8DXL_ENET0_RGMII_TXC 2
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_NAND_CE1_B IMX8DXL_ENET0_RGMII_TXC 3
+#define IMX8DXL_ENET0_RGMII_TXC_LSIO_GPIO4_IO29 IMX8DXL_ENET0_RGMII_TXC 4
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_USDHC2_CLK IMX8DXL_ENET0_RGMII_TXC 5
+#define IMX8DXL_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL IMX8DXL_ENET0_RGMII_TX_CTL 0
+#define IMX8DXL_ENET0_RGMII_TX_CTL_CONN_USDHC1_RESET_B IMX8DXL_ENET0_RGMII_TX_CTL 3
+#define IMX8DXL_ENET0_RGMII_TX_CTL_LSIO_GPIO4_IO30 IMX8DXL_ENET0_RGMII_TX_CTL 4
+#define IMX8DXL_ENET0_RGMII_TX_CTL_CONN_USDHC2_CMD IMX8DXL_ENET0_RGMII_TX_CTL 5
+#define IMX8DXL_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 IMX8DXL_ENET0_RGMII_TXD0 0
+#define IMX8DXL_ENET0_RGMII_TXD0_CONN_USDHC1_VSELECT IMX8DXL_ENET0_RGMII_TXD0 3
+#define IMX8DXL_ENET0_RGMII_TXD0_LSIO_GPIO4_IO31 IMX8DXL_ENET0_RGMII_TXD0 4
+#define IMX8DXL_ENET0_RGMII_TXD0_CONN_USDHC2_DATA0 IMX8DXL_ENET0_RGMII_TXD0 5
+#define IMX8DXL_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 IMX8DXL_ENET0_RGMII_TXD1 0
+#define IMX8DXL_ENET0_RGMII_TXD1_CONN_USDHC1_WP IMX8DXL_ENET0_RGMII_TXD1 3
+#define IMX8DXL_ENET0_RGMII_TXD1_LSIO_GPIO5_IO00 IMX8DXL_ENET0_RGMII_TXD1 4
+#define IMX8DXL_ENET0_RGMII_TXD1_CONN_USDHC2_DATA1 IMX8DXL_ENET0_RGMII_TXD1 5
+#define IMX8DXL_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 IMX8DXL_ENET0_RGMII_TXD2 0
+#define IMX8DXL_ENET0_RGMII_TXD2_CONN_NAND_CE0_B IMX8DXL_ENET0_RGMII_TXD2 2
+#define IMX8DXL_ENET0_RGMII_TXD2_CONN_USDHC1_CD_B IMX8DXL_ENET0_RGMII_TXD2 3
+#define IMX8DXL_ENET0_RGMII_TXD2_LSIO_GPIO5_IO01 IMX8DXL_ENET0_RGMII_TXD2 4
+#define IMX8DXL_ENET0_RGMII_TXD2_CONN_USDHC2_DATA2 IMX8DXL_ENET0_RGMII_TXD2 5
+#define IMX8DXL_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 IMX8DXL_ENET0_RGMII_TXD3 0
+#define IMX8DXL_ENET0_RGMII_TXD3_CONN_NAND_RE_B IMX8DXL_ENET0_RGMII_TXD3 2
+#define IMX8DXL_ENET0_RGMII_TXD3_LSIO_GPIO5_IO02 IMX8DXL_ENET0_RGMII_TXD3 4
+#define IMX8DXL_ENET0_RGMII_TXD3_CONN_USDHC2_DATA3 IMX8DXL_ENET0_RGMII_TXD3 5
+#define IMX8DXL_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC IMX8DXL_ENET0_RGMII_RXC 0
+#define IMX8DXL_ENET0_RGMII_RXC_CONN_NAND_WE_B IMX8DXL_ENET0_RGMII_RXC 2
+#define IMX8DXL_ENET0_RGMII_RXC_CONN_USDHC1_CLK IMX8DXL_ENET0_RGMII_RXC 3
+#define IMX8DXL_ENET0_RGMII_RXC_LSIO_GPIO5_IO03 IMX8DXL_ENET0_RGMII_RXC 4
+#define IMX8DXL_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL IMX8DXL_ENET0_RGMII_RX_CTL 0
+#define IMX8DXL_ENET0_RGMII_RX_CTL_CONN_USDHC1_CMD IMX8DXL_ENET0_RGMII_RX_CTL 3
+#define IMX8DXL_ENET0_RGMII_RX_CTL_LSIO_GPIO5_IO04 IMX8DXL_ENET0_RGMII_RX_CTL 4
+#define IMX8DXL_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 IMX8DXL_ENET0_RGMII_RXD0 0
+#define IMX8DXL_ENET0_RGMII_RXD0_CONN_USDHC1_DATA0 IMX8DXL_ENET0_RGMII_RXD0 3
+#define IMX8DXL_ENET0_RGMII_RXD0_LSIO_GPIO5_IO05 IMX8DXL_ENET0_RGMII_RXD0 4
+#define IMX8DXL_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 IMX8DXL_ENET0_RGMII_RXD1 0
+#define IMX8DXL_ENET0_RGMII_RXD1_CONN_USDHC1_DATA1 IMX8DXL_ENET0_RGMII_RXD1 3
+#define IMX8DXL_ENET0_RGMII_RXD1_LSIO_GPIO5_IO06 IMX8DXL_ENET0_RGMII_RXD1 4
+#define IMX8DXL_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 IMX8DXL_ENET0_RGMII_RXD2 0
+#define IMX8DXL_ENET0_RGMII_RXD2_CONN_ENET0_RMII_RX_ER IMX8DXL_ENET0_RGMII_RXD2 1
+#define IMX8DXL_ENET0_RGMII_RXD2_CONN_USDHC1_DATA2 IMX8DXL_ENET0_RGMII_RXD2 3
+#define IMX8DXL_ENET0_RGMII_RXD2_LSIO_GPIO5_IO07 IMX8DXL_ENET0_RGMII_RXD2 4
+#define IMX8DXL_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 IMX8DXL_ENET0_RGMII_RXD3 0
+#define IMX8DXL_ENET0_RGMII_RXD3_CONN_NAND_ALE IMX8DXL_ENET0_RGMII_RXD3 2
+#define IMX8DXL_ENET0_RGMII_RXD3_CONN_USDHC1_DATA3 IMX8DXL_ENET0_RGMII_RXD3 3
+#define IMX8DXL_ENET0_RGMII_RXD3_LSIO_GPIO5_IO08 IMX8DXL_ENET0_RGMII_RXD3 4
+#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_ENET0_REFCLK_125M_25M IMX8DXL_ENET0_REFCLK_125M_25M 0
+#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_ENET0_PPS IMX8DXL_ENET0_REFCLK_125M_25M 1
+#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_EQOS_PPS_IN IMX8DXL_ENET0_REFCLK_125M_25M 2
+#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_EQOS_PPS_OUT IMX8DXL_ENET0_REFCLK_125M_25M 3
+#define IMX8DXL_ENET0_REFCLK_125M_25M_LSIO_GPIO5_IO09 IMX8DXL_ENET0_REFCLK_125M_25M 4
+#define IMX8DXL_ENET0_MDIO_CONN_ENET0_MDIO IMX8DXL_ENET0_MDIO 0
+#define IMX8DXL_ENET0_MDIO_ADMA_I2C3_SDA IMX8DXL_ENET0_MDIO 1
+#define IMX8DXL_ENET0_MDIO_CONN_EQOS_MDIO IMX8DXL_ENET0_MDIO 2
+#define IMX8DXL_ENET0_MDIO_LSIO_GPIO5_IO10 IMX8DXL_ENET0_MDIO 4
+#define IMX8DXL_ENET0_MDIO_LSIO_GPIO7_IO16 IMX8DXL_ENET0_MDIO 5
+#define IMX8DXL_ENET0_MDC_CONN_ENET0_MDC IMX8DXL_ENET0_MDC 0
+#define IMX8DXL_ENET0_MDC_ADMA_I2C3_SCL IMX8DXL_ENET0_MDC 1
+#define IMX8DXL_ENET0_MDC_CONN_EQOS_MDC IMX8DXL_ENET0_MDC 2
+#define IMX8DXL_ENET0_MDC_LSIO_GPIO5_IO11 IMX8DXL_ENET0_MDC 4
+#define IMX8DXL_ENET0_MDC_LSIO_GPIO7_IO17 IMX8DXL_ENET0_MDC 5
+#define IMX8DXL_ENET1_RGMII_TXC_LSIO_GPIO0_IO00 IMX8DXL_ENET1_RGMII_TXC 0
+#define IMX8DXL_ENET1_RGMII_TXC_CONN_EQOS_RCLK50M_OUT IMX8DXL_ENET1_RGMII_TXC 1
+#define IMX8DXL_ENET1_RGMII_TXC_ADMA_LCDIF_D00 IMX8DXL_ENET1_RGMII_TXC 2
+#define IMX8DXL_ENET1_RGMII_TXC_CONN_EQOS_RGMII_TXC IMX8DXL_ENET1_RGMII_TXC 3
+#define IMX8DXL_ENET1_RGMII_TXC_CONN_EQOS_RCLK50M_IN IMX8DXL_ENET1_RGMII_TXC 4
+#define IMX8DXL_ENET1_RGMII_TXD2_ADMA_LCDIF_D01 IMX8DXL_ENET1_RGMII_TXD2 2
+#define IMX8DXL_ENET1_RGMII_TXD2_CONN_EQOS_RGMII_TXD2 IMX8DXL_ENET1_RGMII_TXD2 3
+#define IMX8DXL_ENET1_RGMII_TXD2_LSIO_GPIO0_IO01 IMX8DXL_ENET1_RGMII_TXD2 4
+#define IMX8DXL_ENET1_RGMII_TX_CTL_ADMA_LCDIF_D02 IMX8DXL_ENET1_RGMII_TX_CTL 2
+#define IMX8DXL_ENET1_RGMII_TX_CTL_CONN_EQOS_RGMII_TX_CTL IMX8DXL_ENET1_RGMII_TX_CTL 3
+#define IMX8DXL_ENET1_RGMII_TX_CTL_LSIO_GPIO0_IO02 IMX8DXL_ENET1_RGMII_TX_CTL 4
+#define IMX8DXL_ENET1_RGMII_TXD3_ADMA_LCDIF_D03 IMX8DXL_ENET1_RGMII_TXD3 2
+#define IMX8DXL_ENET1_RGMII_TXD3_CONN_EQOS_RGMII_TXD3 IMX8DXL_ENET1_RGMII_TXD3 3
+#define IMX8DXL_ENET1_RGMII_TXD3_LSIO_GPIO0_IO03 IMX8DXL_ENET1_RGMII_TXD3 4
+#define IMX8DXL_ENET1_RGMII_RXC_ADMA_LCDIF_D04 IMX8DXL_ENET1_RGMII_RXC 2
+#define IMX8DXL_ENET1_RGMII_RXC_CONN_EQOS_RGMII_RXC IMX8DXL_ENET1_RGMII_RXC 3
+#define IMX8DXL_ENET1_RGMII_RXC_LSIO_GPIO0_IO04 IMX8DXL_ENET1_RGMII_RXC 4
+#define IMX8DXL_ENET1_RGMII_RXD3_ADMA_LCDIF_D05 IMX8DXL_ENET1_RGMII_RXD3 2
+#define IMX8DXL_ENET1_RGMII_RXD3_CONN_EQOS_RGMII_RXD3 IMX8DXL_ENET1_RGMII_RXD3 3
+#define IMX8DXL_ENET1_RGMII_RXD3_LSIO_GPIO0_IO05 IMX8DXL_ENET1_RGMII_RXD3 4
+#define IMX8DXL_ENET1_RGMII_RXD2_ADMA_LCDIF_D06 IMX8DXL_ENET1_RGMII_RXD2 2
+#define IMX8DXL_ENET1_RGMII_RXD2_CONN_EQOS_RGMII_RXD2 IMX8DXL_ENET1_RGMII_RXD2 3
+#define IMX8DXL_ENET1_RGMII_RXD2_LSIO_GPIO0_IO06 IMX8DXL_ENET1_RGMII_RXD2 4
+#define IMX8DXL_ENET1_RGMII_RXD2_LSIO_GPIO6_IO00 IMX8DXL_ENET1_RGMII_RXD2 5
+#define IMX8DXL_ENET1_RGMII_RXD1_ADMA_LCDIF_D07 IMX8DXL_ENET1_RGMII_RXD1 2
+#define IMX8DXL_ENET1_RGMII_RXD1_CONN_EQOS_RGMII_RXD1 IMX8DXL_ENET1_RGMII_RXD1 3
+#define IMX8DXL_ENET1_RGMII_RXD1_LSIO_GPIO0_IO07 IMX8DXL_ENET1_RGMII_RXD1 4
+#define IMX8DXL_ENET1_RGMII_RXD1_LSIO_GPIO6_IO01 IMX8DXL_ENET1_RGMII_RXD1 5
+#define IMX8DXL_ENET1_RGMII_TXD0_ADMA_LCDIF_D08 IMX8DXL_ENET1_RGMII_TXD0 2
+#define IMX8DXL_ENET1_RGMII_TXD0_CONN_EQOS_RGMII_TXD0 IMX8DXL_ENET1_RGMII_TXD0 3
+#define IMX8DXL_ENET1_RGMII_TXD0_LSIO_GPIO0_IO08 IMX8DXL_ENET1_RGMII_TXD0 4
+#define IMX8DXL_ENET1_RGMII_TXD0_LSIO_GPIO6_IO02 IMX8DXL_ENET1_RGMII_TXD0 5
+#define IMX8DXL_ENET1_RGMII_TXD1_ADMA_LCDIF_D09 IMX8DXL_ENET1_RGMII_TXD1 2
+#define IMX8DXL_ENET1_RGMII_TXD1_CONN_EQOS_RGMII_TXD1 IMX8DXL_ENET1_RGMII_TXD1 3
+#define IMX8DXL_ENET1_RGMII_TXD1_LSIO_GPIO0_IO09 IMX8DXL_ENET1_RGMII_TXD1 4
+#define IMX8DXL_ENET1_RGMII_TXD1_LSIO_GPIO6_IO03 IMX8DXL_ENET1_RGMII_TXD1 5
+#define IMX8DXL_ENET1_RGMII_RXD0_ADMA_SPDIF0_RX IMX8DXL_ENET1_RGMII_RXD0 0
+#define IMX8DXL_ENET1_RGMII_RXD0_ADMA_MQS_R IMX8DXL_ENET1_RGMII_RXD0 1
+#define IMX8DXL_ENET1_RGMII_RXD0_ADMA_LCDIF_D10 IMX8DXL_ENET1_RGMII_RXD0 2
+#define IMX8DXL_ENET1_RGMII_RXD0_CONN_EQOS_RGMII_RXD0 IMX8DXL_ENET1_RGMII_RXD0 3
+#define IMX8DXL_ENET1_RGMII_RXD0_LSIO_GPIO0_IO10 IMX8DXL_ENET1_RGMII_RXD0 4
+#define IMX8DXL_ENET1_RGMII_RXD0_LSIO_GPIO6_IO04 IMX8DXL_ENET1_RGMII_RXD0 5
+#define IMX8DXL_ENET1_RGMII_RX_CTL_ADMA_SPDIF0_TX IMX8DXL_ENET1_RGMII_RX_CTL 0
+#define IMX8DXL_ENET1_RGMII_RX_CTL_ADMA_MQS_L IMX8DXL_ENET1_RGMII_RX_CTL 1
+#define IMX8DXL_ENET1_RGMII_RX_CTL_ADMA_LCDIF_D11 IMX8DXL_ENET1_RGMII_RX_CTL 2
+#define IMX8DXL_ENET1_RGMII_RX_CTL_CONN_EQOS_RGMII_RX_CTL IMX8DXL_ENET1_RGMII_RX_CTL 3
+#define IMX8DXL_ENET1_RGMII_RX_CTL_LSIO_GPIO0_IO11 IMX8DXL_ENET1_RGMII_RX_CTL 4
+#define IMX8DXL_ENET1_RGMII_RX_CTL_LSIO_GPIO6_IO05 IMX8DXL_ENET1_RGMII_RX_CTL 5
+#define IMX8DXL_ENET1_REFCLK_125M_25M_ADMA_SPDIF0_EXT_CLK IMX8DXL_ENET1_REFCLK_125M_25M 0
+#define IMX8DXL_ENET1_REFCLK_125M_25M_ADMA_LCDIF_D12 IMX8DXL_ENET1_REFCLK_125M_25M 2
+#define IMX8DXL_ENET1_REFCLK_125M_25M_CONN_EQOS_REFCLK_125M_25M IMX8DXL_ENET1_REFCLK_125M_25M 3
+#define IMX8DXL_ENET1_REFCLK_125M_25M_LSIO_GPIO0_IO12 IMX8DXL_ENET1_REFCLK_125M_25M 4
+#define IMX8DXL_ENET1_REFCLK_125M_25M_LSIO_GPIO6_IO06 IMX8DXL_ENET1_REFCLK_125M_25M 5
+#define IMX8DXL_SPI3_SCK_ADMA_SPI3_SCK IMX8DXL_SPI3_SCK 0
+#define IMX8DXL_SPI3_SCK_ADMA_LCDIF_D13 IMX8DXL_SPI3_SCK 2
+#define IMX8DXL_SPI3_SCK_LSIO_GPIO0_IO13 IMX8DXL_SPI3_SCK 4
+#define IMX8DXL_SPI3_SCK_ADMA_LCDIF_D00 IMX8DXL_SPI3_SCK 5
+#define IMX8DXL_SPI3_SDO_ADMA_SPI3_SDO IMX8DXL_SPI3_SDO 0
+#define IMX8DXL_SPI3_SDO_ADMA_LCDIF_D14 IMX8DXL_SPI3_SDO 2
+#define IMX8DXL_SPI3_SDO_LSIO_GPIO0_IO14 IMX8DXL_SPI3_SDO 4
+#define IMX8DXL_SPI3_SDO_ADMA_LCDIF_D01 IMX8DXL_SPI3_SDO 5
+#define IMX8DXL_SPI3_SDI_ADMA_SPI3_SDI IMX8DXL_SPI3_SDI 0
+#define IMX8DXL_SPI3_SDI_ADMA_LCDIF_D15 IMX8DXL_SPI3_SDI 2
+#define IMX8DXL_SPI3_SDI_LSIO_GPIO0_IO15 IMX8DXL_SPI3_SDI 4
+#define IMX8DXL_SPI3_SDI_ADMA_LCDIF_D02 IMX8DXL_SPI3_SDI 5
+#define IMX8DXL_SPI3_CS0_ADMA_SPI3_CS0 IMX8DXL_SPI3_CS0 0
+#define IMX8DXL_SPI3_CS0_ADMA_ACM_MCLK_OUT1 IMX8DXL_SPI3_CS0 1
+#define IMX8DXL_SPI3_CS0_ADMA_LCDIF_HSYNC IMX8DXL_SPI3_CS0 2
+#define IMX8DXL_SPI3_CS0_LSIO_GPIO0_IO16 IMX8DXL_SPI3_CS0 4
+#define IMX8DXL_SPI3_CS0_ADMA_LCDIF_CS IMX8DXL_SPI3_CS0 5
+#define IMX8DXL_SPI3_CS1_ADMA_SPI3_CS1 IMX8DXL_SPI3_CS1 0
+#define IMX8DXL_SPI3_CS1_ADMA_I2C3_SCL IMX8DXL_SPI3_CS1 1
+#define IMX8DXL_SPI3_CS1_ADMA_LCDIF_RESET IMX8DXL_SPI3_CS1 2
+#define IMX8DXL_SPI3_CS1_ADMA_SPI2_CS0 IMX8DXL_SPI3_CS1 3
+#define IMX8DXL_SPI3_CS1_ADMA_LCDIF_D16 IMX8DXL_SPI3_CS1 4
+#define IMX8DXL_SPI3_CS1_ADMA_LCDIF_RD_E IMX8DXL_SPI3_CS1 5
+#define IMX8DXL_MCLK_IN1_ADMA_ACM_MCLK_IN1 IMX8DXL_MCLK_IN1 0
+#define IMX8DXL_MCLK_IN1_ADMA_I2C3_SDA IMX8DXL_MCLK_IN1 1
+#define IMX8DXL_MCLK_IN1_ADMA_LCDIF_EN IMX8DXL_MCLK_IN1 2
+#define IMX8DXL_MCLK_IN1_ADMA_SPI2_SCK IMX8DXL_MCLK_IN1 3
+#define IMX8DXL_MCLK_IN1_ADMA_LCDIF_D17 IMX8DXL_MCLK_IN1 4
+#define IMX8DXL_MCLK_IN1_ADMA_LCDIF_D03 IMX8DXL_MCLK_IN1 5
+#define IMX8DXL_MCLK_IN0_ADMA_ACM_MCLK_IN0 IMX8DXL_MCLK_IN0 0
+#define IMX8DXL_MCLK_IN0_ADMA_LCDIF_VSYNC IMX8DXL_MCLK_IN0 2
+#define IMX8DXL_MCLK_IN0_ADMA_SPI2_SDI IMX8DXL_MCLK_IN0 3
+#define IMX8DXL_MCLK_IN0_LSIO_GPIO0_IO19 IMX8DXL_MCLK_IN0 4
+#define IMX8DXL_MCLK_IN0_ADMA_LCDIF_RS IMX8DXL_MCLK_IN0 5
+#define IMX8DXL_MCLK_OUT0_ADMA_ACM_MCLK_OUT0 IMX8DXL_MCLK_OUT0 0
+#define IMX8DXL_MCLK_OUT0_ADMA_LCDIF_CLK IMX8DXL_MCLK_OUT0 2
+#define IMX8DXL_MCLK_OUT0_ADMA_SPI2_SDO IMX8DXL_MCLK_OUT0 3
+#define IMX8DXL_MCLK_OUT0_LSIO_GPIO0_IO20 IMX8DXL_MCLK_OUT0 4
+#define IMX8DXL_MCLK_OUT0_ADMA_LCDIF_WR_RWN IMX8DXL_MCLK_OUT0 5
+#define IMX8DXL_UART1_TX_ADMA_UART1_TX IMX8DXL_UART1_TX 0
+#define IMX8DXL_UART1_TX_LSIO_PWM0_OUT IMX8DXL_UART1_TX 1
+#define IMX8DXL_UART1_TX_LSIO_GPT0_CAPTURE IMX8DXL_UART1_TX 2
+#define IMX8DXL_UART1_TX_LSIO_GPIO0_IO21 IMX8DXL_UART1_TX 4
+#define IMX8DXL_UART1_TX_ADMA_LCDIF_D04 IMX8DXL_UART1_TX 5
+#define IMX8DXL_UART1_RX_ADMA_UART1_RX IMX8DXL_UART1_RX 0
+#define IMX8DXL_UART1_RX_LSIO_PWM1_OUT IMX8DXL_UART1_RX 1
+#define IMX8DXL_UART1_RX_LSIO_GPT0_COMPARE IMX8DXL_UART1_RX 2
+#define IMX8DXL_UART1_RX_LSIO_GPT1_CLK IMX8DXL_UART1_RX 3
+#define IMX8DXL_UART1_RX_LSIO_GPIO0_IO22 IMX8DXL_UART1_RX 4
+#define IMX8DXL_UART1_RX_ADMA_LCDIF_D05 IMX8DXL_UART1_RX 5
+#define IMX8DXL_UART1_RTS_B_ADMA_UART1_RTS_B IMX8DXL_UART1_RTS_B 0
+#define IMX8DXL_UART1_RTS_B_LSIO_PWM2_OUT IMX8DXL_UART1_RTS_B 1
+#define IMX8DXL_UART1_RTS_B_ADMA_LCDIF_D16 IMX8DXL_UART1_RTS_B 2
+#define IMX8DXL_UART1_RTS_B_LSIO_GPT1_CAPTURE IMX8DXL_UART1_RTS_B 3
+#define IMX8DXL_UART1_RTS_B_LSIO_GPT0_CLK IMX8DXL_UART1_RTS_B 4
+#define IMX8DXL_UART1_RTS_B_ADMA_LCDIF_D06 IMX8DXL_UART1_RTS_B 5
+#define IMX8DXL_UART1_CTS_B_ADMA_UART1_CTS_B IMX8DXL_UART1_CTS_B 0
+#define IMX8DXL_UART1_CTS_B_LSIO_PWM3_OUT IMX8DXL_UART1_CTS_B 1
+#define IMX8DXL_UART1_CTS_B_ADMA_LCDIF_D17 IMX8DXL_UART1_CTS_B 2
+#define IMX8DXL_UART1_CTS_B_LSIO_GPT1_COMPARE IMX8DXL_UART1_CTS_B 3
+#define IMX8DXL_UART1_CTS_B_LSIO_GPIO0_IO24 IMX8DXL_UART1_CTS_B 4
+#define IMX8DXL_UART1_CTS_B_ADMA_LCDIF_D07 IMX8DXL_UART1_CTS_B 5
+#define IMX8DXL_SPI0_SCK_ADMA_SPI0_SCK IMX8DXL_SPI0_SCK 0
+#define IMX8DXL_SPI0_SCK_ADMA_SAI0_TXC IMX8DXL_SPI0_SCK 1
+#define IMX8DXL_SPI0_SCK_M40_I2C0_SCL IMX8DXL_SPI0_SCK 2
+#define IMX8DXL_SPI0_SCK_M40_GPIO0_IO00 IMX8DXL_SPI0_SCK 3
+#define IMX8DXL_SPI0_SCK_LSIO_GPIO1_IO04 IMX8DXL_SPI0_SCK 4
+#define IMX8DXL_SPI0_SCK_ADMA_LCDIF_D08 IMX8DXL_SPI0_SCK 5
+#define IMX8DXL_SPI0_SDI_ADMA_SPI0_SDI IMX8DXL_SPI0_SDI 0
+#define IMX8DXL_SPI0_SDI_ADMA_SAI0_TXD IMX8DXL_SPI0_SDI 1
+#define IMX8DXL_SPI0_SDI_M40_TPM0_CH0 IMX8DXL_SPI0_SDI 2
+#define IMX8DXL_SPI0_SDI_M40_GPIO0_IO02 IMX8DXL_SPI0_SDI 3
+#define IMX8DXL_SPI0_SDI_LSIO_GPIO1_IO05 IMX8DXL_SPI0_SDI 4
+#define IMX8DXL_SPI0_SDI_ADMA_LCDIF_D09 IMX8DXL_SPI0_SDI 5
+#define IMX8DXL_SPI0_SDO_ADMA_SPI0_SDO IMX8DXL_SPI0_SDO 0
+#define IMX8DXL_SPI0_SDO_ADMA_SAI0_TXFS IMX8DXL_SPI0_SDO 1
+#define IMX8DXL_SPI0_SDO_M40_I2C0_SDA IMX8DXL_SPI0_SDO 2
+#define IMX8DXL_SPI0_SDO_M40_GPIO0_IO01 IMX8DXL_SPI0_SDO 3
+#define IMX8DXL_SPI0_SDO_LSIO_GPIO1_IO06 IMX8DXL_SPI0_SDO 4
+#define IMX8DXL_SPI0_SDO_ADMA_LCDIF_D10 IMX8DXL_SPI0_SDO 5
+#define IMX8DXL_SPI0_CS1_ADMA_SPI0_CS1 IMX8DXL_SPI0_CS1 0
+#define IMX8DXL_SPI0_CS1_ADMA_SAI0_RXC IMX8DXL_SPI0_CS1 1
+#define IMX8DXL_SPI0_CS1_ADMA_SAI1_TXD IMX8DXL_SPI0_CS1 2
+#define IMX8DXL_SPI0_CS1_ADMA_LCD_PWM0_OUT IMX8DXL_SPI0_CS1 3
+#define IMX8DXL_SPI0_CS1_LSIO_GPIO1_IO07 IMX8DXL_SPI0_CS1 4
+#define IMX8DXL_SPI0_CS1_ADMA_LCDIF_D11 IMX8DXL_SPI0_CS1 5
+#define IMX8DXL_SPI0_CS0_ADMA_SPI0_CS0 IMX8DXL_SPI0_CS0 0
+#define IMX8DXL_SPI0_CS0_ADMA_SAI0_RXD IMX8DXL_SPI0_CS0 1
+#define IMX8DXL_SPI0_CS0_M40_TPM0_CH1 IMX8DXL_SPI0_CS0 2
+#define IMX8DXL_SPI0_CS0_M40_GPIO0_IO03 IMX8DXL_SPI0_CS0 3
+#define IMX8DXL_SPI0_CS0_LSIO_GPIO1_IO08 IMX8DXL_SPI0_CS0 4
+#define IMX8DXL_SPI0_CS0_ADMA_LCDIF_D12 IMX8DXL_SPI0_CS0 5
+#define IMX8DXL_ADC_IN1_ADMA_ADC_IN1 IMX8DXL_ADC_IN1 0
+#define IMX8DXL_ADC_IN1_M40_I2C0_SDA IMX8DXL_ADC_IN1 1
+#define IMX8DXL_ADC_IN1_M40_GPIO0_IO01 IMX8DXL_ADC_IN1 2
+#define IMX8DXL_ADC_IN1_ADMA_I2C0_SDA IMX8DXL_ADC_IN1 3
+#define IMX8DXL_ADC_IN1_LSIO_GPIO1_IO09 IMX8DXL_ADC_IN1 4
+#define IMX8DXL_ADC_IN1_ADMA_LCDIF_D13 IMX8DXL_ADC_IN1 5
+#define IMX8DXL_ADC_IN0_ADMA_ADC_IN0 IMX8DXL_ADC_IN0 0
+#define IMX8DXL_ADC_IN0_M40_I2C0_SCL IMX8DXL_ADC_IN0 1
+#define IMX8DXL_ADC_IN0_M40_GPIO0_IO00 IMX8DXL_ADC_IN0 2
+#define IMX8DXL_ADC_IN0_ADMA_I2C0_SCL IMX8DXL_ADC_IN0 3
+#define IMX8DXL_ADC_IN0_LSIO_GPIO1_IO10 IMX8DXL_ADC_IN0 4
+#define IMX8DXL_ADC_IN0_ADMA_LCDIF_D14 IMX8DXL_ADC_IN0 5
+#define IMX8DXL_ADC_IN3_ADMA_ADC_IN3 IMX8DXL_ADC_IN3 0
+#define IMX8DXL_ADC_IN3_M40_UART0_TX IMX8DXL_ADC_IN3 1
+#define IMX8DXL_ADC_IN3_M40_GPIO0_IO03 IMX8DXL_ADC_IN3 2
+#define IMX8DXL_ADC_IN3_ADMA_ACM_MCLK_OUT0 IMX8DXL_ADC_IN3 3
+#define IMX8DXL_ADC_IN3_LSIO_GPIO1_IO11 IMX8DXL_ADC_IN3 4
+#define IMX8DXL_ADC_IN3_ADMA_LCDIF_D15 IMX8DXL_ADC_IN3 5
+#define IMX8DXL_ADC_IN2_ADMA_ADC_IN2 IMX8DXL_ADC_IN2 0
+#define IMX8DXL_ADC_IN2_M40_UART0_RX IMX8DXL_ADC_IN2 1
+#define IMX8DXL_ADC_IN2_M40_GPIO0_IO02 IMX8DXL_ADC_IN2 2
+#define IMX8DXL_ADC_IN2_ADMA_ACM_MCLK_IN0 IMX8DXL_ADC_IN2 3
+#define IMX8DXL_ADC_IN2_LSIO_GPIO1_IO12 IMX8DXL_ADC_IN2 4
+#define IMX8DXL_ADC_IN2_ADMA_LCDIF_D16 IMX8DXL_ADC_IN2 5
+#define IMX8DXL_ADC_IN5_ADMA_ADC_IN5 IMX8DXL_ADC_IN5 0
+#define IMX8DXL_ADC_IN5_M40_TPM0_CH1 IMX8DXL_ADC_IN5 1
+#define IMX8DXL_ADC_IN5_M40_GPIO0_IO05 IMX8DXL_ADC_IN5 2
+#define IMX8DXL_ADC_IN5_ADMA_LCDIF_LCDBUSY IMX8DXL_ADC_IN5 3
+#define IMX8DXL_ADC_IN5_LSIO_GPIO1_IO13 IMX8DXL_ADC_IN5 4
+#define IMX8DXL_ADC_IN5_ADMA_LCDIF_D17 IMX8DXL_ADC_IN5 5
+#define IMX8DXL_ADC_IN4_ADMA_ADC_IN4 IMX8DXL_ADC_IN4 0
+#define IMX8DXL_ADC_IN4_M40_TPM0_CH0 IMX8DXL_ADC_IN4 1
+#define IMX8DXL_ADC_IN4_M40_GPIO0_IO04 IMX8DXL_ADC_IN4 2
+#define IMX8DXL_ADC_IN4_ADMA_LCDIF_LCDRESET IMX8DXL_ADC_IN4 3
+#define IMX8DXL_ADC_IN4_LSIO_GPIO1_IO14 IMX8DXL_ADC_IN4 4
+#define IMX8DXL_FLEXCAN0_RX_ADMA_FLEXCAN0_RX IMX8DXL_FLEXCAN0_RX 0
+#define IMX8DXL_FLEXCAN0_RX_ADMA_SAI2_RXC IMX8DXL_FLEXCAN0_RX 1
+#define IMX8DXL_FLEXCAN0_RX_ADMA_UART0_RTS_B IMX8DXL_FLEXCAN0_RX 2
+#define IMX8DXL_FLEXCAN0_RX_ADMA_SAI1_TXC IMX8DXL_FLEXCAN0_RX 3
+#define IMX8DXL_FLEXCAN0_RX_LSIO_GPIO1_IO15 IMX8DXL_FLEXCAN0_RX 4
+#define IMX8DXL_FLEXCAN0_RX_LSIO_GPIO6_IO08 IMX8DXL_FLEXCAN0_RX 5
+#define IMX8DXL_FLEXCAN0_TX_ADMA_FLEXCAN0_TX IMX8DXL_FLEXCAN0_TX 0
+#define IMX8DXL_FLEXCAN0_TX_ADMA_SAI2_RXD IMX8DXL_FLEXCAN0_TX 1
+#define IMX8DXL_FLEXCAN0_TX_ADMA_UART0_CTS_B IMX8DXL_FLEXCAN0_TX 2
+#define IMX8DXL_FLEXCAN0_TX_ADMA_SAI1_TXFS IMX8DXL_FLEXCAN0_TX 3
+#define IMX8DXL_FLEXCAN0_TX_LSIO_GPIO1_IO16 IMX8DXL_FLEXCAN0_TX 4
+#define IMX8DXL_FLEXCAN0_TX_LSIO_GPIO6_IO09 IMX8DXL_FLEXCAN0_TX 5
+#define IMX8DXL_FLEXCAN1_RX_ADMA_FLEXCAN1_RX IMX8DXL_FLEXCAN1_RX 0
+#define IMX8DXL_FLEXCAN1_RX_ADMA_SAI2_RXFS IMX8DXL_FLEXCAN1_RX 1
+#define IMX8DXL_FLEXCAN1_RX_ADMA_FTM_CH2 IMX8DXL_FLEXCAN1_RX 2
+#define IMX8DXL_FLEXCAN1_RX_ADMA_SAI1_TXD IMX8DXL_FLEXCAN1_RX 3
+#define IMX8DXL_FLEXCAN1_RX_LSIO_GPIO1_IO17 IMX8DXL_FLEXCAN1_RX 4
+#define IMX8DXL_FLEXCAN1_RX_LSIO_GPIO6_IO10 IMX8DXL_FLEXCAN1_RX 5
+#define IMX8DXL_FLEXCAN1_TX_ADMA_FLEXCAN1_TX IMX8DXL_FLEXCAN1_TX 0
+#define IMX8DXL_FLEXCAN1_TX_ADMA_SAI3_RXC IMX8DXL_FLEXCAN1_TX 1
+#define IMX8DXL_FLEXCAN1_TX_ADMA_DMA0_REQ_IN0 IMX8DXL_FLEXCAN1_TX 2
+#define IMX8DXL_FLEXCAN1_TX_ADMA_SAI1_RXD IMX8DXL_FLEXCAN1_TX 3
+#define IMX8DXL_FLEXCAN1_TX_LSIO_GPIO1_IO18 IMX8DXL_FLEXCAN1_TX 4
+#define IMX8DXL_FLEXCAN1_TX_LSIO_GPIO6_IO11 IMX8DXL_FLEXCAN1_TX 5
+#define IMX8DXL_FLEXCAN2_RX_ADMA_FLEXCAN2_RX IMX8DXL_FLEXCAN2_RX 0
+#define IMX8DXL_FLEXCAN2_RX_ADMA_SAI3_RXD IMX8DXL_FLEXCAN2_RX 1
+#define IMX8DXL_FLEXCAN2_RX_ADMA_UART3_RX IMX8DXL_FLEXCAN2_RX 2
+#define IMX8DXL_FLEXCAN2_RX_ADMA_SAI1_RXFS IMX8DXL_FLEXCAN2_RX 3
+#define IMX8DXL_FLEXCAN2_RX_LSIO_GPIO1_IO19 IMX8DXL_FLEXCAN2_RX 4
+#define IMX8DXL_FLEXCAN2_RX_LSIO_GPIO6_IO12 IMX8DXL_FLEXCAN2_RX 5
+#define IMX8DXL_FLEXCAN2_TX_ADMA_FLEXCAN2_TX IMX8DXL_FLEXCAN2_TX 0
+#define IMX8DXL_FLEXCAN2_TX_ADMA_SAI3_RXFS IMX8DXL_FLEXCAN2_TX 1
+#define IMX8DXL_FLEXCAN2_TX_ADMA_UART3_TX IMX8DXL_FLEXCAN2_TX 2
+#define IMX8DXL_FLEXCAN2_TX_ADMA_SAI1_RXC IMX8DXL_FLEXCAN2_TX 3
+#define IMX8DXL_FLEXCAN2_TX_LSIO_GPIO1_IO20 IMX8DXL_FLEXCAN2_TX 4
+#define IMX8DXL_FLEXCAN2_TX_LSIO_GPIO6_IO13 IMX8DXL_FLEXCAN2_TX 5
+#define IMX8DXL_UART0_RX_ADMA_UART0_RX IMX8DXL_UART0_RX 0
+#define IMX8DXL_UART0_RX_ADMA_MQS_R IMX8DXL_UART0_RX 1
+#define IMX8DXL_UART0_RX_ADMA_FLEXCAN0_RX IMX8DXL_UART0_RX 2
+#define IMX8DXL_UART0_RX_SCU_UART0_RX IMX8DXL_UART0_RX 3
+#define IMX8DXL_UART0_RX_LSIO_GPIO1_IO21 IMX8DXL_UART0_RX 4
+#define IMX8DXL_UART0_RX_LSIO_GPIO6_IO14 IMX8DXL_UART0_RX 5
+#define IMX8DXL_UART0_TX_ADMA_UART0_TX IMX8DXL_UART0_TX 0
+#define IMX8DXL_UART0_TX_ADMA_MQS_L IMX8DXL_UART0_TX 1
+#define IMX8DXL_UART0_TX_ADMA_FLEXCAN0_TX IMX8DXL_UART0_TX 2
+#define IMX8DXL_UART0_TX_SCU_UART0_TX IMX8DXL_UART0_TX 3
+#define IMX8DXL_UART0_TX_LSIO_GPIO1_IO22 IMX8DXL_UART0_TX 4
+#define IMX8DXL_UART0_TX_LSIO_GPIO6_IO15 IMX8DXL_UART0_TX 5
+#define IMX8DXL_UART2_TX_ADMA_UART2_TX IMX8DXL_UART2_TX 0
+#define IMX8DXL_UART2_TX_ADMA_FTM_CH1 IMX8DXL_UART2_TX 1
+#define IMX8DXL_UART2_TX_ADMA_FLEXCAN1_TX IMX8DXL_UART2_TX 2
+#define IMX8DXL_UART2_TX_LSIO_GPIO1_IO23 IMX8DXL_UART2_TX 4
+#define IMX8DXL_UART2_TX_LSIO_GPIO6_IO16 IMX8DXL_UART2_TX 5
+#define IMX8DXL_UART2_RX_ADMA_UART2_RX IMX8DXL_UART2_RX 0
+#define IMX8DXL_UART2_RX_ADMA_FTM_CH0 IMX8DXL_UART2_RX 1
+#define IMX8DXL_UART2_RX_ADMA_FLEXCAN1_RX IMX8DXL_UART2_RX 2
+#define IMX8DXL_UART2_RX_LSIO_GPIO1_IO24 IMX8DXL_UART2_RX 4
+#define IMX8DXL_UART2_RX_LSIO_GPIO6_IO17 IMX8DXL_UART2_RX 5
+#define IMX8DXL_JTAG_TRST_B_SCU_JTAG_TRST_B IMX8DXL_JTAG_TRST_B 0
+#define IMX8DXL_JTAG_TRST_B_SCU_WDOG0_WDOG_OUT IMX8DXL_JTAG_TRST_B 1
+#define IMX8DXL_PMIC_I2C_SCL_SCU_PMIC_I2C_SCL IMX8DXL_PMIC_I2C_SCL 0
+#define IMX8DXL_PMIC_I2C_SCL_SCU_GPIO0_IOXX_PMIC_A35_ON IMX8DXL_PMIC_I2C_SCL 1
+#define IMX8DXL_PMIC_I2C_SCL_LSIO_GPIO2_IO01 IMX8DXL_PMIC_I2C_SCL 4
+#define IMX8DXL_PMIC_I2C_SDA_SCU_PMIC_I2C_SDA IMX8DXL_PMIC_I2C_SDA 0
+#define IMX8DXL_PMIC_I2C_SDA_SCU_GPIO0_IOXX_PMIC_GPU_ON IMX8DXL_PMIC_I2C_SDA 1
+#define IMX8DXL_PMIC_I2C_SDA_LSIO_GPIO2_IO02 IMX8DXL_PMIC_I2C_SDA 4
+#define IMX8DXL_PMIC_INT_B_SCU_DSC_PMIC_INT_B IMX8DXL_PMIC_INT_B 0
+#define IMX8DXL_SCU_GPIO0_00_SCU_GPIO0_IO00 IMX8DXL_SCU_GPIO0_00 0
+#define IMX8DXL_SCU_GPIO0_00_SCU_UART0_RX IMX8DXL_SCU_GPIO0_00 1
+#define IMX8DXL_SCU_GPIO0_00_M40_UART0_RX IMX8DXL_SCU_GPIO0_00 2
+#define IMX8DXL_SCU_GPIO0_00_ADMA_UART3_RX IMX8DXL_SCU_GPIO0_00 3
+#define IMX8DXL_SCU_GPIO0_00_LSIO_GPIO2_IO03 IMX8DXL_SCU_GPIO0_00 4
+#define IMX8DXL_SCU_GPIO0_01_SCU_GPIO0_IO01 IMX8DXL_SCU_GPIO0_01 0
+#define IMX8DXL_SCU_GPIO0_01_SCU_UART0_TX IMX8DXL_SCU_GPIO0_01 1
+#define IMX8DXL_SCU_GPIO0_01_M40_UART0_TX IMX8DXL_SCU_GPIO0_01 2
+#define IMX8DXL_SCU_GPIO0_01_ADMA_UART3_TX IMX8DXL_SCU_GPIO0_01 3
+#define IMX8DXL_SCU_GPIO0_01_SCU_WDOG0_WDOG_OUT IMX8DXL_SCU_GPIO0_01 4
+#define IMX8DXL_SCU_PMIC_STANDBY_SCU_DSC_PMIC_STANDBY IMX8DXL_SCU_PMIC_STANDBY 0
+#define IMX8DXL_SCU_BOOT_MODE1_SCU_DSC_BOOT_MODE1 IMX8DXL_SCU_BOOT_MODE1 0
+#define IMX8DXL_SCU_BOOT_MODE0_SCU_DSC_BOOT_MODE0 IMX8DXL_SCU_BOOT_MODE0 0
+#define IMX8DXL_SCU_BOOT_MODE2_SCU_DSC_BOOT_MODE2 IMX8DXL_SCU_BOOT_MODE2 0
+#define IMX8DXL_SCU_BOOT_MODE2_SCU_DSC_RTC_CLOCK_OUTPUT_32K IMX8DXL_SCU_BOOT_MODE2 1
+#define IMX8DXL_SNVS_TAMPER_OUT1_LSIO_GPIO2_IO05_IN IMX8DXL_SNVS_TAMPER_OUT1 4
+#define IMX8DXL_SNVS_TAMPER_OUT1_LSIO_GPIO6_IO19_IN IMX8DXL_SNVS_TAMPER_OUT1 5
+#define IMX8DXL_SNVS_TAMPER_OUT2_LSIO_GPIO2_IO06_IN IMX8DXL_SNVS_TAMPER_OUT2 4
+#define IMX8DXL_SNVS_TAMPER_OUT2_LSIO_GPIO6_IO20_IN IMX8DXL_SNVS_TAMPER_OUT2 5
+#define IMX8DXL_SNVS_TAMPER_OUT3_ADMA_SAI2_RXC IMX8DXL_SNVS_TAMPER_OUT3 2
+#define IMX8DXL_SNVS_TAMPER_OUT3_LSIO_GPIO2_IO07_IN IMX8DXL_SNVS_TAMPER_OUT3 4
+#define IMX8DXL_SNVS_TAMPER_OUT3_LSIO_GPIO6_IO21_IN IMX8DXL_SNVS_TAMPER_OUT3 5
+#define IMX8DXL_SNVS_TAMPER_OUT4_ADMA_SAI2_RXD IMX8DXL_SNVS_TAMPER_OUT4 2
+#define IMX8DXL_SNVS_TAMPER_OUT4_LSIO_GPIO2_IO08_IN IMX8DXL_SNVS_TAMPER_OUT4 4
+#define IMX8DXL_SNVS_TAMPER_OUT4_LSIO_GPIO6_IO22_IN IMX8DXL_SNVS_TAMPER_OUT4 5
+#define IMX8DXL_SNVS_TAMPER_IN0_ADMA_SAI2_RXFS IMX8DXL_SNVS_TAMPER_IN0 2
+#define IMX8DXL_SNVS_TAMPER_IN0_LSIO_GPIO2_IO09_IN IMX8DXL_SNVS_TAMPER_IN0 4
+#define IMX8DXL_SNVS_TAMPER_IN0_LSIO_GPIO6_IO23_IN IMX8DXL_SNVS_TAMPER_IN0 5
+#define IMX8DXL_SNVS_TAMPER_IN1_ADMA_SAI3_RXC IMX8DXL_SNVS_TAMPER_IN1 2
+#define IMX8DXL_SNVS_TAMPER_IN1_LSIO_GPIO2_IO10_IN IMX8DXL_SNVS_TAMPER_IN1 4
+#define IMX8DXL_SNVS_TAMPER_IN1_LSIO_GPIO6_IO24_IN IMX8DXL_SNVS_TAMPER_IN1 5
+#define IMX8DXL_SNVS_TAMPER_IN2_ADMA_SAI3_RXD IMX8DXL_SNVS_TAMPER_IN2 2
+#define IMX8DXL_SNVS_TAMPER_IN2_LSIO_GPIO2_IO11_IN IMX8DXL_SNVS_TAMPER_IN2 4
+#define IMX8DXL_SNVS_TAMPER_IN2_LSIO_GPIO6_IO25_IN IMX8DXL_SNVS_TAMPER_IN2 5
+#define IMX8DXL_SNVS_TAMPER_IN3_ADMA_SAI3_RXFS IMX8DXL_SNVS_TAMPER_IN3 2
+#define IMX8DXL_SNVS_TAMPER_IN3_LSIO_GPIO2_IO12_IN IMX8DXL_SNVS_TAMPER_IN3 4
+#define IMX8DXL_SNVS_TAMPER_IN3_LSIO_GPIO6_IO26_IN IMX8DXL_SNVS_TAMPER_IN3 5
+#define IMX8DXL_SPI1_SCK_ADMA_I2C2_SDA IMX8DXL_SPI1_SCK 2
+#define IMX8DXL_SPI1_SCK_ADMA_SPI1_SCK IMX8DXL_SPI1_SCK 3
+#define IMX8DXL_SPI1_SCK_LSIO_GPIO3_IO00 IMX8DXL_SPI1_SCK 4
+#define IMX8DXL_SPI1_SDO_ADMA_I2C2_SCL IMX8DXL_SPI1_SDO 2
+#define IMX8DXL_SPI1_SDO_ADMA_SPI1_SDO IMX8DXL_SPI1_SDO 3
+#define IMX8DXL_SPI1_SDO_LSIO_GPIO3_IO01 IMX8DXL_SPI1_SDO 4
+#define IMX8DXL_SPI1_SDI_ADMA_I2C3_SCL IMX8DXL_SPI1_SDI 2
+#define IMX8DXL_SPI1_SDI_ADMA_SPI1_SDI IMX8DXL_SPI1_SDI 3
+#define IMX8DXL_SPI1_SDI_LSIO_GPIO3_IO02 IMX8DXL_SPI1_SDI 4
+#define IMX8DXL_SPI1_CS0_ADMA_I2C3_SDA IMX8DXL_SPI1_CS0 2
+#define IMX8DXL_SPI1_CS0_ADMA_SPI1_CS0 IMX8DXL_SPI1_CS0 3
+#define IMX8DXL_SPI1_CS0_LSIO_GPIO3_IO03 IMX8DXL_SPI1_CS0 4
+#define IMX8DXL_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 IMX8DXL_QSPI0A_DATA1 0
+#define IMX8DXL_QSPI0A_DATA1_LSIO_GPIO3_IO10 IMX8DXL_QSPI0A_DATA1 4
+#define IMX8DXL_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 IMX8DXL_QSPI0A_DATA0 0
+#define IMX8DXL_QSPI0A_DATA0_LSIO_GPIO3_IO09 IMX8DXL_QSPI0A_DATA0 4
+#define IMX8DXL_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 IMX8DXL_QSPI0A_DATA3 0
+#define IMX8DXL_QSPI0A_DATA3_LSIO_GPIO3_IO12 IMX8DXL_QSPI0A_DATA3 4
+#define IMX8DXL_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 IMX8DXL_QSPI0A_DATA2 0
+#define IMX8DXL_QSPI0A_DATA2_LSIO_GPIO3_IO11 IMX8DXL_QSPI0A_DATA2 4
+#define IMX8DXL_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B IMX8DXL_QSPI0A_SS0_B 0
+#define IMX8DXL_QSPI0A_SS0_B_LSIO_GPIO3_IO14 IMX8DXL_QSPI0A_SS0_B 4
+#define IMX8DXL_QSPI0A_DQS_LSIO_QSPI0A_DQS IMX8DXL_QSPI0A_DQS 0
+#define IMX8DXL_QSPI0A_DQS_LSIO_GPIO3_IO13 IMX8DXL_QSPI0A_DQS 4
+#define IMX8DXL_QSPI0A_SCLK_LSIO_QSPI0A_SCLK IMX8DXL_QSPI0A_SCLK 0
+#define IMX8DXL_QSPI0A_SCLK_LSIO_GPIO3_IO16 IMX8DXL_QSPI0A_SCLK 4
+#define IMX8DXL_QSPI0B_SCLK_LSIO_QSPI0B_SCLK IMX8DXL_QSPI0B_SCLK 0
+#define IMX8DXL_QSPI0B_SCLK_LSIO_GPIO3_IO17 IMX8DXL_QSPI0B_SCLK 4
+#define IMX8DXL_QSPI0B_DQS_LSIO_QSPI0B_DQS IMX8DXL_QSPI0B_DQS 0
+#define IMX8DXL_QSPI0B_DQS_LSIO_GPIO3_IO22 IMX8DXL_QSPI0B_DQS 4
+#define IMX8DXL_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 IMX8DXL_QSPI0B_DATA1 0
+#define IMX8DXL_QSPI0B_DATA1_LSIO_GPIO3_IO19 IMX8DXL_QSPI0B_DATA1 4
+#define IMX8DXL_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 IMX8DXL_QSPI0B_DATA0 0
+#define IMX8DXL_QSPI0B_DATA0_LSIO_GPIO3_IO18 IMX8DXL_QSPI0B_DATA0 4
+#define IMX8DXL_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 IMX8DXL_QSPI0B_DATA3 0
+#define IMX8DXL_QSPI0B_DATA3_LSIO_GPIO3_IO21 IMX8DXL_QSPI0B_DATA3 4
+#define IMX8DXL_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 IMX8DXL_QSPI0B_DATA2 0
+#define IMX8DXL_QSPI0B_DATA2_LSIO_GPIO3_IO20 IMX8DXL_QSPI0B_DATA2 4
+#define IMX8DXL_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B IMX8DXL_QSPI0B_SS0_B 0
+#define IMX8DXL_QSPI0B_SS0_B_LSIO_GPIO3_IO23 IMX8DXL_QSPI0B_SS0_B 4
+#define IMX8DXL_QSPI0B_SS0_B_LSIO_QSPI0A_SS1_B IMX8DXL_QSPI0B_SS0_B 5
+
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP 0
+#define IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO_PAD IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B 0
+
+#endif
diff --git a/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h b/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h
new file mode 100644
index 000000000000..a200f546d078
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__
+
+#define PAD_GPIO_OFFSET 0
+#define PAD_FUNC_SHARE_OFFSET 64
+#define PAD_GPIO(x) (PAD_GPIO_OFFSET + (x))
+#define PAD_FUNC_SHARE(x) (PAD_FUNC_SHARE_OFFSET + (x))
+
+/*
+ * GPIOMUX bits:
+ * | 31 - 24 | 23 - 16 | 15 - 8 | 7 | 6 | 5 - 0 |
+ * | dout | doen | din | dout rev | doen rev | gpio nr |
+ *
+ * dout: output signal
+ * doen: output enable signal
+ * din: optional input signal, 0xff = none
+ * dout rev: output signal reverse bit
+ * doen rev: output enable signal reverse bit
+ * gpio nr: gpio number, 0 - 63
+ */
+#define GPIOMUX(n, dout, doen, din) ( \
+ (((dout) & 0x80000000) >> (31 - 7)) | (((dout) & 0xff) << 24) | \
+ (((doen) & 0x80000000) >> (31 - 6)) | (((doen) & 0xff) << 16) | \
+ (((din) & 0xff) << 8) | \
+ ((n) & 0x3f))
+
+#define GPO_REVERSE 0x80000000
+
+#define GPO_LOW 0
+#define GPO_HIGH 1
+#define GPO_ENABLE 0
+#define GPO_DISABLE 1
+#define GPO_CLK_GMAC_PAPHYREF 2
+#define GPO_JTAG_TDO 3
+#define GPO_JTAG_TDO_OEN 4
+#define GPO_DMIC_CLK_OUT 5
+#define GPO_DSP_JTDOEN_PAD 6
+#define GPO_DSP_JTDO_PAD 7
+#define GPO_I2C0_PAD_SCK_OE 8
+#define GPO_I2C0_PAD_SCK_OEN (GPO_I2C0_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C0_PAD_SDA_OE 9
+#define GPO_I2C0_PAD_SDA_OEN (GPO_I2C0_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2C1_PAD_SCK_OE 10
+#define GPO_I2C1_PAD_SCK_OEN (GPO_I2C1_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C1_PAD_SDA_OE 11
+#define GPO_I2C1_PAD_SDA_OEN (GPO_I2C1_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2C2_PAD_SCK_OE 12
+#define GPO_I2C2_PAD_SCK_OEN (GPO_I2C2_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C2_PAD_SDA_OE 13
+#define GPO_I2C2_PAD_SDA_OEN (GPO_I2C2_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2C3_PAD_SCK_OE 14
+#define GPO_I2C3_PAD_SCK_OEN (GPO_I2C3_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C3_PAD_SDA_OE 15
+#define GPO_I2C3_PAD_SDA_OEN (GPO_I2C3_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2SRX_BCLK_OUT 16
+#define GPO_I2SRX_BCLK_OUT_OEN 17
+#define GPO_I2SRX_LRCK_OUT 18
+#define GPO_I2SRX_LRCK_OUT_OEN 19
+#define GPO_I2SRX_MCLK_OUT 20
+#define GPO_I2STX_BCLK_OUT 21
+#define GPO_I2STX_BCLK_OUT_OEN 22
+#define GPO_I2STX_LRCK_OUT 23
+#define GPO_I2STX_LRCK_OUT_OEN 24
+#define GPO_I2STX_MCLK_OUT 25
+#define GPO_I2STX_SDOUT0 26
+#define GPO_I2STX_SDOUT1 27
+#define GPO_LCD_PAD_CSM_N 28
+#define GPO_PWM_PAD_OE_N_BIT0 29
+#define GPO_PWM_PAD_OE_N_BIT1 30
+#define GPO_PWM_PAD_OE_N_BIT2 31
+#define GPO_PWM_PAD_OE_N_BIT3 32
+#define GPO_PWM_PAD_OE_N_BIT4 33
+#define GPO_PWM_PAD_OE_N_BIT5 34
+#define GPO_PWM_PAD_OE_N_BIT6 35
+#define GPO_PWM_PAD_OE_N_BIT7 36
+#define GPO_PWM_PAD_OUT_BIT0 37
+#define GPO_PWM_PAD_OUT_BIT1 38
+#define GPO_PWM_PAD_OUT_BIT2 39
+#define GPO_PWM_PAD_OUT_BIT3 40
+#define GPO_PWM_PAD_OUT_BIT4 41
+#define GPO_PWM_PAD_OUT_BIT5 42
+#define GPO_PWM_PAD_OUT_BIT6 43
+#define GPO_PWM_PAD_OUT_BIT7 44
+#define GPO_PWMDAC_LEFT_OUT 45
+#define GPO_PWMDAC_RIGHT_OUT 46
+#define GPO_QSPI_CSN1_OUT 47
+#define GPO_QSPI_CSN2_OUT 48
+#define GPO_QSPI_CSN3_OUT 49
+#define GPO_REGISTER23_SCFG_CMSENSOR_RST0 50
+#define GPO_REGISTER23_SCFG_CMSENSOR_RST1 51
+#define GPO_REGISTER32_SCFG_GMAC_PHY_RSTN 52
+#define GPO_SDIO0_PAD_CARD_POWER_EN 53
+#define GPO_SDIO0_PAD_CCLK_OUT 54
+#define GPO_SDIO0_PAD_CCMD_OE 55
+#define GPO_SDIO0_PAD_CCMD_OEN (GPO_SDIO0_PAD_CCMD_OE | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CCMD_OUT 56
+#define GPO_SDIO0_PAD_CDATA_OE_BIT0 57
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT0 (GPO_SDIO0_PAD_CDATA_OE_BIT0 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT1 58
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT1 (GPO_SDIO0_PAD_CDATA_OE_BIT1 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT2 59
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT2 (GPO_SDIO0_PAD_CDATA_OE_BIT2 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT3 60
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT3 (GPO_SDIO0_PAD_CDATA_OE_BIT3 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT4 61
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT4 (GPO_SDIO0_PAD_CDATA_OE_BIT4 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT5 62
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT5 (GPO_SDIO0_PAD_CDATA_OE_BIT5 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT6 63
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT6 (GPO_SDIO0_PAD_CDATA_OE_BIT6 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT7 64
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT7 (GPO_SDIO0_PAD_CDATA_OE_BIT7 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT0 65
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT1 66
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT2 67
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT3 68
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT4 69
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT5 70
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT6 71
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT7 72
+#define GPO_SDIO0_PAD_RST_N 73
+#define GPO_SDIO1_PAD_CARD_POWER_EN 74
+#define GPO_SDIO1_PAD_CCLK_OUT 75
+#define GPO_SDIO1_PAD_CCMD_OE 76
+#define GPO_SDIO1_PAD_CCMD_OEN (GPO_SDIO1_PAD_CCMD_OE | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CCMD_OUT 77
+#define GPO_SDIO1_PAD_CDATA_OE_BIT0 78
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT0 (GPO_SDIO1_PAD_CDATA_OE_BIT0 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT1 79
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT1 (GPO_SDIO1_PAD_CDATA_OE_BIT1 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT2 80
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT2 (GPO_SDIO1_PAD_CDATA_OE_BIT2 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT3 81
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT3 (GPO_SDIO1_PAD_CDATA_OE_BIT3 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT4 82
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT4 (GPO_SDIO1_PAD_CDATA_OE_BIT4 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT5 83
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT5 (GPO_SDIO1_PAD_CDATA_OE_BIT5 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT6 84
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT6 (GPO_SDIO1_PAD_CDATA_OE_BIT6 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT7 85
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT7 (GPO_SDIO1_PAD_CDATA_OE_BIT7 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT0 86
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT1 87
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT2 88
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT3 89
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT4 90
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT5 91
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT6 92
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT7 93
+#define GPO_SDIO1_PAD_RST_N 94
+#define GPO_SPDIF_TX_SDOUT 95
+#define GPO_SPDIF_TX_SDOUT_OEN 96
+#define GPO_SPI0_PAD_OE_N 97
+#define GPO_SPI0_PAD_SCK_OUT 98
+#define GPO_SPI0_PAD_SS_0_N 99
+#define GPO_SPI0_PAD_SS_1_N 100
+#define GPO_SPI0_PAD_TXD 101
+#define GPO_SPI1_PAD_OE_N 102
+#define GPO_SPI1_PAD_SCK_OUT 103
+#define GPO_SPI1_PAD_SS_0_N 104
+#define GPO_SPI1_PAD_SS_1_N 105
+#define GPO_SPI1_PAD_TXD 106
+#define GPO_SPI2_PAD_OE_N 107
+#define GPO_SPI2_PAD_SCK_OUT 108
+#define GPO_SPI2_PAD_SS_0_N 109
+#define GPO_SPI2_PAD_SS_1_N 110
+#define GPO_SPI2_PAD_TXD 111
+#define GPO_SPI2AHB_PAD_OE_N_BIT0 112
+#define GPO_SPI2AHB_PAD_OE_N_BIT1 113
+#define GPO_SPI2AHB_PAD_OE_N_BIT2 114
+#define GPO_SPI2AHB_PAD_OE_N_BIT3 115
+#define GPO_SPI2AHB_PAD_TXD_BIT0 116
+#define GPO_SPI2AHB_PAD_TXD_BIT1 117
+#define GPO_SPI2AHB_PAD_TXD_BIT2 118
+#define GPO_SPI2AHB_PAD_TXD_BIT3 119
+#define GPO_SPI3_PAD_OE_N 120
+#define GPO_SPI3_PAD_SCK_OUT 121
+#define GPO_SPI3_PAD_SS_0_N 122
+#define GPO_SPI3_PAD_SS_1_N 123
+#define GPO_SPI3_PAD_TXD 124
+#define GPO_UART0_PAD_DTRN 125
+#define GPO_UART0_PAD_RTSN 126
+#define GPO_UART0_PAD_SOUT 127
+#define GPO_UART1_PAD_SOUT 128
+#define GPO_UART2_PAD_DTR_N 129
+#define GPO_UART2_PAD_RTS_N 130
+#define GPO_UART2_PAD_SOUT 131
+#define GPO_UART3_PAD_SOUT 132
+#define GPO_USB_DRV_BUS 133
+
+#define GPI_CPU_JTAG_TCK 0
+#define GPI_CPU_JTAG_TDI 1
+#define GPI_CPU_JTAG_TMS 2
+#define GPI_CPU_JTAG_TRST 3
+#define GPI_DMIC_SDIN_BIT0 4
+#define GPI_DMIC_SDIN_BIT1 5
+#define GPI_DSP_JTCK_PAD 6
+#define GPI_DSP_JTDI_PAD 7
+#define GPI_DSP_JTMS_PAD 8
+#define GPI_DSP_TRST_PAD 9
+#define GPI_I2C0_PAD_SCK_IN 10
+#define GPI_I2C0_PAD_SDA_IN 11
+#define GPI_I2C1_PAD_SCK_IN 12
+#define GPI_I2C1_PAD_SDA_IN 13
+#define GPI_I2C2_PAD_SCK_IN 14
+#define GPI_I2C2_PAD_SDA_IN 15
+#define GPI_I2C3_PAD_SCK_IN 16
+#define GPI_I2C3_PAD_SDA_IN 17
+#define GPI_I2SRX_BCLK_IN 18
+#define GPI_I2SRX_LRCK_IN 19
+#define GPI_I2SRX_SDIN_BIT0 20
+#define GPI_I2SRX_SDIN_BIT1 21
+#define GPI_I2SRX_SDIN_BIT2 22
+#define GPI_I2STX_BCLK_IN 23
+#define GPI_I2STX_LRCK_IN 24
+#define GPI_SDIO0_PAD_CARD_DETECT_N 25
+#define GPI_SDIO0_PAD_CARD_WRITE_PRT 26
+#define GPI_SDIO0_PAD_CCMD_IN 27
+#define GPI_SDIO0_PAD_CDATA_IN_BIT0 28
+#define GPI_SDIO0_PAD_CDATA_IN_BIT1 29
+#define GPI_SDIO0_PAD_CDATA_IN_BIT2 30
+#define GPI_SDIO0_PAD_CDATA_IN_BIT3 31
+#define GPI_SDIO0_PAD_CDATA_IN_BIT4 32
+#define GPI_SDIO0_PAD_CDATA_IN_BIT5 33
+#define GPI_SDIO0_PAD_CDATA_IN_BIT6 34
+#define GPI_SDIO0_PAD_CDATA_IN_BIT7 35
+#define GPI_SDIO1_PAD_CARD_DETECT_N 36
+#define GPI_SDIO1_PAD_CARD_WRITE_PRT 37
+#define GPI_SDIO1_PAD_CCMD_IN 38
+#define GPI_SDIO1_PAD_CDATA_IN_BIT0 39
+#define GPI_SDIO1_PAD_CDATA_IN_BIT1 40
+#define GPI_SDIO1_PAD_CDATA_IN_BIT2 41
+#define GPI_SDIO1_PAD_CDATA_IN_BIT3 42
+#define GPI_SDIO1_PAD_CDATA_IN_BIT4 43
+#define GPI_SDIO1_PAD_CDATA_IN_BIT5 44
+#define GPI_SDIO1_PAD_CDATA_IN_BIT6 45
+#define GPI_SDIO1_PAD_CDATA_IN_BIT7 46
+#define GPI_SPDIF_RX_SDIN 47
+#define GPI_SPI0_PAD_RXD 48
+#define GPI_SPI0_PAD_SS_IN_N 49
+#define GPI_SPI1_PAD_RXD 50
+#define GPI_SPI1_PAD_SS_IN_N 51
+#define GPI_SPI2_PAD_RXD 52
+#define GPI_SPI2_PAD_SS_IN_N 53
+#define GPI_SPI2AHB_PAD_RXD_BIT0 54
+#define GPI_SPI2AHB_PAD_RXD_BIT1 55
+#define GPI_SPI2AHB_PAD_RXD_BIT2 56
+#define GPI_SPI2AHB_PAD_RXD_BIT3 57
+#define GPI_SPI2AHB_PAD_SS_N 58
+#define GPI_SPI2AHB_SLV_SCLKIN 59
+#define GPI_SPI3_PAD_RXD 60
+#define GPI_SPI3_PAD_SS_IN_N 61
+#define GPI_UART0_PAD_CTSN 62
+#define GPI_UART0_PAD_DCDN 63
+#define GPI_UART0_PAD_DSRN 64
+#define GPI_UART0_PAD_RIN 65
+#define GPI_UART0_PAD_SIN 66
+#define GPI_UART1_PAD_SIN 67
+#define GPI_UART2_PAD_CTS_N 68
+#define GPI_UART2_PAD_DCD_N 69
+#define GPI_UART2_PAD_DSR_N 70
+#define GPI_UART2_PAD_RI_N 71
+#define GPI_UART2_PAD_SIN 72
+#define GPI_UART3_PAD_SIN 73
+#define GPI_USB_OVER_CURRENT 74
+
+#define GPI_NONE 0xff
+
+#endif /* __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/pinctrl/pinctrl-zynq.h b/include/dt-bindings/pinctrl/pinctrl-zynq.h
new file mode 100644
index 000000000000..bbfc345f017d
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-zynq.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MIO pin configuration defines for Xilinx Zynq
+ *
+ * Copyright (C) 2021 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_ZYNQ_H
+#define _DT_BINDINGS_PINCTRL_ZYNQ_H
+
+/* Configuration options for different power supplies */
+#define IO_STANDARD_LVCMOS18 1
+#define IO_STANDARD_LVCMOS25 2
+#define IO_STANDARD_LVCMOS33 3
+#define IO_STANDARD_HSTL 4
+
+#endif /* _DT_BINDINGS_PINCTRL_ZYNQ_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-zynqmp.h b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
new file mode 100644
index 000000000000..cdb215734bdf
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MIO pin configuration defines for Xilinx ZynqMP
+ *
+ * Copyright (C) 2020 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_ZYNQMP_H
+#define _DT_BINDINGS_PINCTRL_ZYNQMP_H
+
+/* Bit value for different voltage levels */
+#define IO_STANDARD_LVCMOS33 0
+#define IO_STANDARD_LVCMOS18 1
+
+/* Bit values for Slew Rates */
+#define SLEW_RATE_FAST 0
+#define SLEW_RATE_SLOW 1
+
+#endif /* _DT_BINDINGS_PINCTRL_ZYNQMP_H */
diff --git a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
index 2d0c23e5d3a7..8736ce038eca 100644
--- a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
+++ b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
@@ -42,6 +42,6 @@
/*
* Convert a port and pin label to its global pin index
*/
- #define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin))
+#define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin))
#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H */
diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h
index 6d6bac1c26d7..5f291045e8fd 100644
--- a/include/dt-bindings/pinctrl/rockchip.h
+++ b/include/dt-bindings/pinctrl/rockchip.h
@@ -9,13 +9,6 @@
#ifndef __DT_BINDINGS_ROCKCHIP_PINCTRL_H__
#define __DT_BINDINGS_ROCKCHIP_PINCTRL_H__
-#define RK_GPIO0 0
-#define RK_GPIO1 1
-#define RK_GPIO2 2
-#define RK_GPIO3 3
-#define RK_GPIO4 4
-#define RK_GPIO6 6
-
#define RK_PA0 0
#define RK_PA1 1
#define RK_PA2 2
@@ -50,9 +43,5 @@
#define RK_PD7 31
#define RK_FUNC_GPIO 0
-#define RK_FUNC_1 1 /* deprecated */
-#define RK_FUNC_2 2 /* deprecated */
-#define RK_FUNC_3 3 /* deprecated */
-#define RK_FUNC_4 4 /* deprecated */
#endif
diff --git a/include/dt-bindings/pinctrl/rzg2l-pinctrl.h b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h
new file mode 100644
index 000000000000..c78ed5e5efb7
--- /dev/null
+++ b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G2L family pinctrl bindings.
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_RZG2L_PINCTRL_H
+#define __DT_BINDINGS_RZG2L_PINCTRL_H
+
+#define RZG2L_PINS_PER_PORT 8
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZG2L_PORT_PINMUX(b, p, f) ((b) * RZG2L_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZG2L_GPIO(port, pin) ((port) * RZG2L_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_RZG2L_PINCTRL_H */
diff --git a/include/dt-bindings/pinctrl/rzv2m-pinctrl.h b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h
new file mode 100644
index 000000000000..525532cd15da
--- /dev/null
+++ b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/V2M pinctrl bindings.
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_RZV2M_PINCTRL_H
+#define __DT_BINDINGS_RZV2M_PINCTRL_H
+
+#define RZV2M_PINS_PER_PORT 16
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZV2M_PORT_PINMUX(b, p, f) ((b) * RZV2M_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZV2M_GPIO(port, pin) ((port) * RZV2M_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_RZV2M_PINCTRL_H */
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
index b1832506b923..d1da5ff68d0c 100644
--- a/include/dt-bindings/pinctrl/samsung.h
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -10,6 +10,13 @@
#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
+/*
+ * These bindings are deprecated, because they do not match the actual
+ * concept of bindings but rather contain pure register values.
+ * Instead include the header in the DTS source directory.
+ */
+#warning "These bindings are deprecated. Instead use the header in the DTS source directory."
+
#define EXYNOS_PIN_PULL_NONE 0
#define EXYNOS_PIN_PULL_DOWN 1
#define EXYNOS_PIN_PULL_UP 3
@@ -36,7 +43,10 @@
#define EXYNOS5260_PIN_DRV_LV4 2
#define EXYNOS5260_PIN_DRV_LV6 3
-/* Drive strengths for Exynos5410, Exynos542x and Exynos5800 */
+/*
+ * Drive strengths for Exynos5410, Exynos542x, Exynos5800 and Exynos850 (except
+ * GPIO_HSI block)
+ */
#define EXYNOS5420_PIN_DRV_LV1 0
#define EXYNOS5420_PIN_DRV_LV2 1
#define EXYNOS5420_PIN_DRV_LV3 2
@@ -56,6 +66,14 @@
#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc
#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf
+/* Drive strengths for Exynos850 GPIO_HSI block */
+#define EXYNOS850_HSI_PIN_DRV_LV1 0 /* 1x */
+#define EXYNOS850_HSI_PIN_DRV_LV1_5 1 /* 1.5x */
+#define EXYNOS850_HSI_PIN_DRV_LV2 2 /* 2x */
+#define EXYNOS850_HSI_PIN_DRV_LV2_5 3 /* 2.5x */
+#define EXYNOS850_HSI_PIN_DRV_LV3 4 /* 3x */
+#define EXYNOS850_HSI_PIN_DRV_LV4 5 /* 4x */
+
#define EXYNOS_PIN_FUNC_INPUT 0
#define EXYNOS_PIN_FUNC_OUTPUT 1
#define EXYNOS_PIN_FUNC_2 2
diff --git a/include/dt-bindings/pinctrl/sppctl-sp7021.h b/include/dt-bindings/pinctrl/sppctl-sp7021.h
new file mode 100644
index 000000000000..629aa9b5ffbc
--- /dev/null
+++ b/include/dt-bindings/pinctrl/sppctl-sp7021.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Sunplus SP7021 dt-bindings Pinctrl header file
+ * Copyright (C) Sunplus Tech/Tibbo Tech.
+ * Author: Dvorkin Dmitry <dvorkin@tibbo.com>
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_SPPCTL_SP7021_H__
+#define __DT_BINDINGS_PINCTRL_SPPCTL_SP7021_H__
+
+#include <dt-bindings/pinctrl/sppctl.h>
+
+/*
+ * Please don't change the order of the following defines.
+ * They are based on order of 'hardware' control register
+ * defined in MOON2 ~ MOON3 registers.
+ */
+#define MUXF_GPIO 0
+#define MUXF_IOP 1
+#define MUXF_L2SW_CLK_OUT 2
+#define MUXF_L2SW_MAC_SMI_MDC 3
+#define MUXF_L2SW_LED_FLASH0 4
+#define MUXF_L2SW_LED_FLASH1 5
+#define MUXF_L2SW_LED_ON0 6
+#define MUXF_L2SW_LED_ON1 7
+#define MUXF_L2SW_MAC_SMI_MDIO 8
+#define MUXF_L2SW_P0_MAC_RMII_TXEN 9
+#define MUXF_L2SW_P0_MAC_RMII_TXD0 10
+#define MUXF_L2SW_P0_MAC_RMII_TXD1 11
+#define MUXF_L2SW_P0_MAC_RMII_CRSDV 12
+#define MUXF_L2SW_P0_MAC_RMII_RXD0 13
+#define MUXF_L2SW_P0_MAC_RMII_RXD1 14
+#define MUXF_L2SW_P0_MAC_RMII_RXER 15
+#define MUXF_L2SW_P1_MAC_RMII_TXEN 16
+#define MUXF_L2SW_P1_MAC_RMII_TXD0 17
+#define MUXF_L2SW_P1_MAC_RMII_TXD1 18
+#define MUXF_L2SW_P1_MAC_RMII_CRSDV 19
+#define MUXF_L2SW_P1_MAC_RMII_RXD0 20
+#define MUXF_L2SW_P1_MAC_RMII_RXD1 21
+#define MUXF_L2SW_P1_MAC_RMII_RXER 22
+#define MUXF_DAISY_MODE 23
+#define MUXF_SDIO_CLK 24
+#define MUXF_SDIO_CMD 25
+#define MUXF_SDIO_D0 26
+#define MUXF_SDIO_D1 27
+#define MUXF_SDIO_D2 28
+#define MUXF_SDIO_D3 29
+#define MUXF_PWM0 30
+#define MUXF_PWM1 31
+#define MUXF_PWM2 32
+#define MUXF_PWM3 33
+#define MUXF_PWM4 34
+#define MUXF_PWM5 35
+#define MUXF_PWM6 36
+#define MUXF_PWM7 37
+#define MUXF_ICM0_D 38
+#define MUXF_ICM1_D 39
+#define MUXF_ICM2_D 40
+#define MUXF_ICM3_D 41
+#define MUXF_ICM0_CLK 42
+#define MUXF_ICM1_CLK 43
+#define MUXF_ICM2_CLK 44
+#define MUXF_ICM3_CLK 45
+#define MUXF_SPIM0_INT 46
+#define MUXF_SPIM0_CLK 47
+#define MUXF_SPIM0_EN 48
+#define MUXF_SPIM0_DO 49
+#define MUXF_SPIM0_DI 50
+#define MUXF_SPIM1_INT 51
+#define MUXF_SPIM1_CLK 52
+#define MUXF_SPIM1_EN 53
+#define MUXF_SPIM1_DO 54
+#define MUXF_SPIM1_DI 55
+#define MUXF_SPIM2_INT 56
+#define MUXF_SPIM2_CLK 57
+#define MUXF_SPIM2_EN 58
+#define MUXF_SPIM2_DO 59
+#define MUXF_SPIM2_DI 60
+#define MUXF_SPIM3_INT 61
+#define MUXF_SPIM3_CLK 62
+#define MUXF_SPIM3_EN 63
+#define MUXF_SPIM3_DO 64
+#define MUXF_SPIM3_DI 65
+#define MUXF_SPI0S_INT 66
+#define MUXF_SPI0S_CLK 67
+#define MUXF_SPI0S_EN 68
+#define MUXF_SPI0S_DO 69
+#define MUXF_SPI0S_DI 70
+#define MUXF_SPI1S_INT 71
+#define MUXF_SPI1S_CLK 72
+#define MUXF_SPI1S_EN 73
+#define MUXF_SPI1S_DO 74
+#define MUXF_SPI1S_DI 75
+#define MUXF_SPI2S_INT 76
+#define MUXF_SPI2S_CLK 77
+#define MUXF_SPI2S_EN 78
+#define MUXF_SPI2S_DO 79
+#define MUXF_SPI2S_DI 80
+#define MUXF_SPI3S_INT 81
+#define MUXF_SPI3S_CLK 82
+#define MUXF_SPI3S_EN 83
+#define MUXF_SPI3S_DO 84
+#define MUXF_SPI3S_DI 85
+#define MUXF_I2CM0_CLK 86
+#define MUXF_I2CM0_DAT 87
+#define MUXF_I2CM1_CLK 88
+#define MUXF_I2CM1_DAT 89
+#define MUXF_I2CM2_CLK 90
+#define MUXF_I2CM2_DAT 91
+#define MUXF_I2CM3_CLK 92
+#define MUXF_I2CM3_DAT 93
+#define MUXF_UA1_TX 94
+#define MUXF_UA1_RX 95
+#define MUXF_UA1_CTS 96
+#define MUXF_UA1_RTS 97
+#define MUXF_UA2_TX 98
+#define MUXF_UA2_RX 99
+#define MUXF_UA2_CTS 100
+#define MUXF_UA2_RTS 101
+#define MUXF_UA3_TX 102
+#define MUXF_UA3_RX 103
+#define MUXF_UA3_CTS 104
+#define MUXF_UA3_RTS 105
+#define MUXF_UA4_TX 106
+#define MUXF_UA4_RX 107
+#define MUXF_UA4_CTS 108
+#define MUXF_UA4_RTS 109
+#define MUXF_TIMER0_INT 110
+#define MUXF_TIMER1_INT 111
+#define MUXF_TIMER2_INT 112
+#define MUXF_TIMER3_INT 113
+#define MUXF_GPIO_INT0 114
+#define MUXF_GPIO_INT1 115
+#define MUXF_GPIO_INT2 116
+#define MUXF_GPIO_INT3 117
+#define MUXF_GPIO_INT4 118
+#define MUXF_GPIO_INT5 119
+#define MUXF_GPIO_INT6 120
+#define MUXF_GPIO_INT7 121
+
+/*
+ * Please don't change the order of the following defines.
+ * They are based on order of items in array 'sppctl_list_funcs'
+ * in Sunplus pinctrl driver.
+ */
+#define GROP_SPI_FLASH 122
+#define GROP_SPI_FLASH_4BIT 123
+#define GROP_SPI_NAND 124
+#define GROP_CARD0_EMMC 125
+#define GROP_SD_CARD 126
+#define GROP_UA0 127
+#define GROP_ACHIP_DEBUG 128
+#define GROP_ACHIP_UA2AXI 129
+#define GROP_FPGA_IFX 130
+#define GROP_HDMI_TX 131
+#define GROP_AUD_EXT_ADC_IFX0 132
+#define GROP_AUD_EXT_DAC_IFX0 133
+#define GROP_SPDIF_RX 134
+#define GROP_SPDIF_TX 135
+#define GROP_TDMTX_IFX0 136
+#define GROP_TDMRX_IFX0 137
+#define GROP_PDMRX_IFX0 138
+#define GROP_PCM_IEC_TX 139
+#define GROP_LCDIF 140
+#define GROP_DVD_DSP_DEBUG 141
+#define GROP_I2C_DEBUG 142
+#define GROP_I2C_SLAVE 143
+#define GROP_WAKEUP 144
+#define GROP_UART2AXI 145
+#define GROP_USB0_I2C 146
+#define GROP_USB1_I2C 147
+#define GROP_USB0_OTG 148
+#define GROP_USB1_OTG 149
+#define GROP_UPHY0_DEBUG 150
+#define GROP_UPHY1_DEBUG 151
+#define GROP_UPHY0_EXT 152
+#define GROP_PROBE_PORT 153
+
+#endif
diff --git a/include/dt-bindings/pinctrl/sppctl.h b/include/dt-bindings/pinctrl/sppctl.h
new file mode 100644
index 000000000000..50557265dbfc
--- /dev/null
+++ b/include/dt-bindings/pinctrl/sppctl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Sunplus dt-bindings Pinctrl header file
+ * Copyright (C) Sunplus Tech / Tibbo Tech.
+ * Author: Dvorkin Dmitry <dvorkin@tibbo.com>
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_SPPCTL_H__
+#define __DT_BINDINGS_PINCTRL_SPPCTL_H__
+
+#define IOP_G_MASTE (0x01 << 0)
+#define IOP_G_FIRST (0x01 << 1)
+
+#define SPPCTL_PCTL_G_PMUX (0x00 | IOP_G_MASTE)
+#define SPPCTL_PCTL_G_GPIO (IOP_G_FIRST | IOP_G_MASTE)
+#define SPPCTL_PCTL_G_IOPP (IOP_G_FIRST | 0x00)
+
+#define SPPCTL_PCTL_L_OUT (0x01 << 0) /* Output LOW */
+#define SPPCTL_PCTL_L_OU1 (0x01 << 1) /* Output HIGH */
+#define SPPCTL_PCTL_L_INV (0x01 << 2) /* Input Invert */
+#define SPPCTL_PCTL_L_ONV (0x01 << 3) /* Output Invert */
+#define SPPCTL_PCTL_L_ODR (0x01 << 4) /* Output Open Drain */
+
+/*
+ * pack into 32-bit value:
+ * pin# (8bit), typ (8bit), function (8bit), flag (8bit)
+ */
+#define SPPCTL_IOPAD(pin, typ, fun, flg) (((pin) << 24) | ((typ) << 16) | \
+ ((fun) << 8) | (flg))
+
+#endif
diff --git a/include/dt-bindings/power/fsl,imx93-power.h b/include/dt-bindings/power/fsl,imx93-power.h
new file mode 100644
index 000000000000..17f9f015bf7d
--- /dev/null
+++ b/include/dt-bindings/power/fsl,imx93-power.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2022 NXP
+ */
+
+#ifndef __DT_BINDINGS_IMX93_POWER_H__
+#define __DT_BINDINGS_IMX93_POWER_H__
+
+#define IMX93_MEDIABLK_PD_MIPI_DSI 0
+#define IMX93_MEDIABLK_PD_MIPI_CSI 1
+#define IMX93_MEDIABLK_PD_PXP 2
+#define IMX93_MEDIABLK_PD_LCDIF 3
+#define IMX93_MEDIABLK_PD_ISI 4
+
+#endif
diff --git a/include/dt-bindings/power/imx8mm-power.h b/include/dt-bindings/power/imx8mm-power.h
new file mode 100644
index 000000000000..648938f24c8e
--- /dev/null
+++ b/include/dt-bindings/power/imx8mm-power.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#ifndef __DT_BINDINGS_IMX8MM_POWER_H__
+#define __DT_BINDINGS_IMX8MM_POWER_H__
+
+#define IMX8MM_POWER_DOMAIN_HSIOMIX 0
+#define IMX8MM_POWER_DOMAIN_PCIE 1
+#define IMX8MM_POWER_DOMAIN_OTG1 2
+#define IMX8MM_POWER_DOMAIN_OTG2 3
+#define IMX8MM_POWER_DOMAIN_GPUMIX 4
+#define IMX8MM_POWER_DOMAIN_GPU 5
+#define IMX8MM_POWER_DOMAIN_VPUMIX 6
+#define IMX8MM_POWER_DOMAIN_VPUG1 7
+#define IMX8MM_POWER_DOMAIN_VPUG2 8
+#define IMX8MM_POWER_DOMAIN_VPUH1 9
+#define IMX8MM_POWER_DOMAIN_DISPMIX 10
+#define IMX8MM_POWER_DOMAIN_MIPI 11
+
+#define IMX8MM_VPUBLK_PD_G1 0
+#define IMX8MM_VPUBLK_PD_G2 1
+#define IMX8MM_VPUBLK_PD_H1 2
+
+#define IMX8MM_DISPBLK_PD_CSI_BRIDGE 0
+#define IMX8MM_DISPBLK_PD_LCDIF 1
+#define IMX8MM_DISPBLK_PD_MIPI_DSI 2
+#define IMX8MM_DISPBLK_PD_MIPI_CSI 3
+
+#endif
diff --git a/include/dt-bindings/power/imx8mn-power.h b/include/dt-bindings/power/imx8mn-power.h
new file mode 100644
index 000000000000..eedd0e581939
--- /dev/null
+++ b/include/dt-bindings/power/imx8mn-power.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Compass Electronics Group, LLC
+ */
+
+#ifndef __DT_BINDINGS_IMX8MN_POWER_H__
+#define __DT_BINDINGS_IMX8MN_POWER_H__
+
+#define IMX8MN_POWER_DOMAIN_HSIOMIX 0
+#define IMX8MN_POWER_DOMAIN_OTG1 1
+#define IMX8MN_POWER_DOMAIN_GPUMIX 2
+#define IMX8MN_POWER_DOMAIN_DISPMIX 3
+#define IMX8MN_POWER_DOMAIN_MIPI 4
+
+#define IMX8MN_DISPBLK_PD_MIPI_DSI 0
+#define IMX8MN_DISPBLK_PD_MIPI_CSI 1
+#define IMX8MN_DISPBLK_PD_LCDIF 2
+#define IMX8MN_DISPBLK_PD_ISI 3
+
+#endif
diff --git a/include/dt-bindings/power/imx8mp-power.h b/include/dt-bindings/power/imx8mp-power.h
new file mode 100644
index 000000000000..2fe3c2abad13
--- /dev/null
+++ b/include/dt-bindings/power/imx8mp-power.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ */
+
+#ifndef __DT_BINDINGS_IMX8MP_POWER_DOMAIN_POWER_H__
+#define __DT_BINDINGS_IMX8MP_POWER_DOMAIN_POWER_H__
+
+#define IMX8MP_POWER_DOMAIN_MIPI_PHY1 0
+#define IMX8MP_POWER_DOMAIN_PCIE_PHY 1
+#define IMX8MP_POWER_DOMAIN_USB1_PHY 2
+#define IMX8MP_POWER_DOMAIN_USB2_PHY 3
+#define IMX8MP_POWER_DOMAIN_MLMIX 4
+#define IMX8MP_POWER_DOMAIN_AUDIOMIX 5
+#define IMX8MP_POWER_DOMAIN_GPU2D 6
+#define IMX8MP_POWER_DOMAIN_GPUMIX 7
+#define IMX8MP_POWER_DOMAIN_VPUMIX 8
+#define IMX8MP_POWER_DOMAIN_GPU3D 9
+#define IMX8MP_POWER_DOMAIN_MEDIAMIX 10
+#define IMX8MP_POWER_DOMAIN_VPU_G1 11
+#define IMX8MP_POWER_DOMAIN_VPU_G2 12
+#define IMX8MP_POWER_DOMAIN_VPU_VC8000E 13
+#define IMX8MP_POWER_DOMAIN_HDMIMIX 14
+#define IMX8MP_POWER_DOMAIN_HDMI_PHY 15
+#define IMX8MP_POWER_DOMAIN_MIPI_PHY2 16
+#define IMX8MP_POWER_DOMAIN_HSIOMIX 17
+#define IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP 18
+
+#define IMX8MP_HSIOBLK_PD_USB 0
+#define IMX8MP_HSIOBLK_PD_USB_PHY1 1
+#define IMX8MP_HSIOBLK_PD_USB_PHY2 2
+#define IMX8MP_HSIOBLK_PD_PCIE 3
+#define IMX8MP_HSIOBLK_PD_PCIE_PHY 4
+
+#define IMX8MP_MEDIABLK_PD_MIPI_DSI_1 0
+#define IMX8MP_MEDIABLK_PD_MIPI_CSI2_1 1
+#define IMX8MP_MEDIABLK_PD_LCDIF_1 2
+#define IMX8MP_MEDIABLK_PD_ISI 3
+#define IMX8MP_MEDIABLK_PD_MIPI_CSI2_2 4
+#define IMX8MP_MEDIABLK_PD_LCDIF_2 5
+#define IMX8MP_MEDIABLK_PD_ISP 6
+#define IMX8MP_MEDIABLK_PD_DWE 7
+#define IMX8MP_MEDIABLK_PD_MIPI_DSI_2 8
+
+#define IMX8MP_HDMIBLK_PD_IRQSTEER 0
+#define IMX8MP_HDMIBLK_PD_LCDIF 1
+#define IMX8MP_HDMIBLK_PD_PAI 2
+#define IMX8MP_HDMIBLK_PD_PVI 3
+#define IMX8MP_HDMIBLK_PD_TRNG 4
+#define IMX8MP_HDMIBLK_PD_HDMI_TX 5
+#define IMX8MP_HDMIBLK_PD_HDMI_TX_PHY 6
+#define IMX8MP_HDMIBLK_PD_HDCP 7
+#define IMX8MP_HDMIBLK_PD_HRV 8
+
+#define IMX8MP_VPUBLK_PD_G1 0
+#define IMX8MP_VPUBLK_PD_G2 1
+#define IMX8MP_VPUBLK_PD_VC8000E 2
+
+#endif
diff --git a/include/dt-bindings/power/imx8mq-power.h b/include/dt-bindings/power/imx8mq-power.h
index 8a513bd9166e..9f7d0f1e7c32 100644
--- a/include/dt-bindings/power/imx8mq-power.h
+++ b/include/dt-bindings/power/imx8mq-power.h
@@ -18,4 +18,7 @@
#define IMX8M_POWER_DOMAIN_MIPI_CSI2 9
#define IMX8M_POWER_DOMAIN_PCIE2 10
+#define IMX8MQ_VPUBLK_PD_G1 0
+#define IMX8MQ_VPUBLK_PD_G2 1
+
#endif
diff --git a/include/dt-bindings/power/imx8ulp-power.h b/include/dt-bindings/power/imx8ulp-power.h
new file mode 100644
index 000000000000..a556b2e96df1
--- /dev/null
+++ b/include/dt-bindings/power/imx8ulp-power.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef __DT_BINDINGS_IMX8ULP_POWER_H__
+#define __DT_BINDINGS_IMX8ULP_POWER_H__
+
+#define IMX8ULP_PD_DMA1 0
+#define IMX8ULP_PD_FLEXSPI2 1
+#define IMX8ULP_PD_USB0 2
+#define IMX8ULP_PD_USDHC0 3
+#define IMX8ULP_PD_USDHC1 4
+#define IMX8ULP_PD_USDHC2_USB1 5
+#define IMX8ULP_PD_DCNANO 6
+#define IMX8ULP_PD_EPDC 7
+#define IMX8ULP_PD_DMA2 8
+#define IMX8ULP_PD_GPU2D 9
+#define IMX8ULP_PD_GPU3D 10
+#define IMX8ULP_PD_HIFI4 11
+#define IMX8ULP_PD_ISI 12
+#define IMX8ULP_PD_MIPI_CSI 13
+#define IMX8ULP_PD_MIPI_DSI 14
+#define IMX8ULP_PD_PXP 15
+
+#endif
diff --git a/include/dt-bindings/power/marvell,mmp2.h b/include/dt-bindings/power/marvell,mmp2.h
new file mode 100644
index 000000000000..c53d2b3e1057
--- /dev/null
+++ b/include/dt-bindings/power/marvell,mmp2.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DTS_MARVELL_MMP2_POWER_H
+#define __DTS_MARVELL_MMP2_POWER_H
+
+#define MMP2_POWER_DOMAIN_GPU 0
+#define MMP2_POWER_DOMAIN_AUDIO 1
+#define MMP3_POWER_DOMAIN_CAMERA 2
+
+#define MMP2_NR_POWER_DOMAINS 3
+
+#endif
diff --git a/include/dt-bindings/power/meson-a1-power.h b/include/dt-bindings/power/meson-a1-power.h
new file mode 100644
index 000000000000..6cf50bfb8ccf
--- /dev/null
+++ b/include/dt-bindings/power/meson-a1-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2019 Amlogic, Inc.
+ * Author: Jianxin Pan <jianxin.pan@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_A1_POWER_H
+#define _DT_BINDINGS_MESON_A1_POWER_H
+
+#define PWRC_DSPA_ID 8
+#define PWRC_DSPB_ID 9
+#define PWRC_UART_ID 10
+#define PWRC_DMC_ID 11
+#define PWRC_I2C_ID 12
+#define PWRC_PSRAM_ID 13
+#define PWRC_ACODEC_ID 14
+#define PWRC_AUDIO_ID 15
+#define PWRC_OTP_ID 16
+#define PWRC_DMA_ID 17
+#define PWRC_SD_EMMC_ID 18
+#define PWRC_RAMA_ID 19
+#define PWRC_RAMB_ID 20
+#define PWRC_IR_ID 21
+#define PWRC_SPICC_ID 22
+#define PWRC_SPIFC_ID 23
+#define PWRC_USB_ID 24
+#define PWRC_NIC_ID 25
+#define PWRC_PDMIN_ID 26
+#define PWRC_RSA_ID 27
+#define PWRC_MAX_ID 28
+
+#endif
diff --git a/include/dt-bindings/power/meson-axg-power.h b/include/dt-bindings/power/meson-axg-power.h
new file mode 100644
index 000000000000..e5243884b249
--- /dev/null
+++ b/include/dt-bindings/power/meson-axg-power.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_AXG_POWER_H
+#define _DT_BINDINGS_MESON_AXG_POWER_H
+
+#define PWRC_AXG_VPU_ID 0
+#define PWRC_AXG_ETHERNET_MEM_ID 1
+#define PWRC_AXG_AUDIO_ID 2
+
+#endif
diff --git a/include/dt-bindings/power/meson-gxbb-power.h b/include/dt-bindings/power/meson-gxbb-power.h
new file mode 100644
index 000000000000..1262dac696c0
--- /dev/null
+++ b/include/dt-bindings/power/meson-gxbb-power.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_GXBB_POWER_H
+#define _DT_BINDINGS_MESON_GXBB_POWER_H
+
+#define PWRC_GXBB_VPU_ID 0
+#define PWRC_GXBB_ETHERNET_MEM_ID 1
+
+#endif
diff --git a/include/dt-bindings/power/meson-s4-power.h b/include/dt-bindings/power/meson-s4-power.h
new file mode 100644
index 000000000000..462dd2cb938b
--- /dev/null
+++ b/include/dt-bindings/power/meson-s4-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc.
+ * Author: Shunzhou Jiang <shunzhou.jiang@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_S4_POWER_H
+#define _DT_BINDINGS_MESON_S4_POWER_H
+
+#define PWRC_S4_DOS_HEVC_ID 0
+#define PWRC_S4_DOS_VDEC_ID 1
+#define PWRC_S4_VPU_HDMI_ID 2
+#define PWRC_S4_USB_COMB_ID 3
+#define PWRC_S4_GE2D_ID 4
+#define PWRC_S4_ETH_ID 5
+#define PWRC_S4_DEMOD_ID 6
+#define PWRC_S4_AUDIO_ID 7
+
+#endif
diff --git a/include/dt-bindings/power/meson8-power.h b/include/dt-bindings/power/meson8-power.h
new file mode 100644
index 000000000000..dd8b2ddb82a7
--- /dev/null
+++ b/include/dt-bindings/power/meson8-power.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON8_POWER_H
+#define _DT_BINDINGS_MESON8_POWER_H
+
+#define PWRC_MESON8_VPU_ID 0
+#define PWRC_MESON8_ETHERNET_MEM_ID 1
+#define PWRC_MESON8_AUDIO_DSP_MEM_ID 2
+
+#endif /* _DT_BINDINGS_MESON8_POWER_H */
diff --git a/include/dt-bindings/power/mt6795-power.h b/include/dt-bindings/power/mt6795-power.h
new file mode 100644
index 000000000000..b0fc26cb1da4
--- /dev/null
+++ b/include/dt-bindings/power/mt6795-power.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef _DT_BINDINGS_POWER_MT6795_POWER_H
+#define _DT_BINDINGS_POWER_MT6795_POWER_H
+
+#define MT6795_POWER_DOMAIN_MM 0
+#define MT6795_POWER_DOMAIN_VDEC 1
+#define MT6795_POWER_DOMAIN_VENC 2
+#define MT6795_POWER_DOMAIN_ISP 3
+#define MT6795_POWER_DOMAIN_MJC 4
+#define MT6795_POWER_DOMAIN_AUDIO 5
+#define MT6795_POWER_DOMAIN_MFG_ASYNC 6
+#define MT6795_POWER_DOMAIN_MFG_2D 7
+#define MT6795_POWER_DOMAIN_MFG 8
+#define MT6795_POWER_DOMAIN_MODEM 9
+
+#endif /* _DT_BINDINGS_POWER_MT6795_POWER_H */
diff --git a/include/dt-bindings/power/mt6797-power.h b/include/dt-bindings/power/mt6797-power.h
index a60c1d81cf75..bd451d860e6a 100644
--- a/include/dt-bindings/power/mt6797-power.h
+++ b/include/dt-bindings/power/mt6797-power.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017 MediaTek Inc.
* Author: Mars.C <mars.cheng@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_POWER_MT6797_POWER_H
diff --git a/include/dt-bindings/power/mt8167-power.h b/include/dt-bindings/power/mt8167-power.h
new file mode 100644
index 000000000000..c8ec9983a4bc
--- /dev/null
+++ b/include/dt-bindings/power/mt8167-power.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8167_POWER_H
+#define _DT_BINDINGS_POWER_MT8167_POWER_H
+
+#define MT8167_POWER_DOMAIN_MM 0
+#define MT8167_POWER_DOMAIN_VDEC 1
+#define MT8167_POWER_DOMAIN_ISP 2
+#define MT8167_POWER_DOMAIN_CONN 3
+#define MT8167_POWER_DOMAIN_MFG_ASYNC 4
+#define MT8167_POWER_DOMAIN_MFG_2D 5
+#define MT8167_POWER_DOMAIN_MFG 6
+
+#endif /* _DT_BINDINGS_POWER_MT8167_POWER_H */
diff --git a/include/dt-bindings/power/mt8183-power.h b/include/dt-bindings/power/mt8183-power.h
new file mode 100644
index 000000000000..d1ab387ba8c7
--- /dev/null
+++ b/include/dt-bindings/power/mt8183-power.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H
+#define _DT_BINDINGS_POWER_MT8183_POWER_H
+
+#define MT8183_POWER_DOMAIN_AUDIO 0
+#define MT8183_POWER_DOMAIN_CONN 1
+#define MT8183_POWER_DOMAIN_MFG_ASYNC 2
+#define MT8183_POWER_DOMAIN_MFG 3
+#define MT8183_POWER_DOMAIN_MFG_CORE0 4
+#define MT8183_POWER_DOMAIN_MFG_CORE1 5
+#define MT8183_POWER_DOMAIN_MFG_2D 6
+#define MT8183_POWER_DOMAIN_DISP 7
+#define MT8183_POWER_DOMAIN_CAM 8
+#define MT8183_POWER_DOMAIN_ISP 9
+#define MT8183_POWER_DOMAIN_VDEC 10
+#define MT8183_POWER_DOMAIN_VENC 11
+#define MT8183_POWER_DOMAIN_VPU_TOP 12
+#define MT8183_POWER_DOMAIN_VPU_CORE0 13
+#define MT8183_POWER_DOMAIN_VPU_CORE1 14
+
+#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */
diff --git a/include/dt-bindings/power/mt8186-power.h b/include/dt-bindings/power/mt8186-power.h
new file mode 100644
index 000000000000..429f7197f6b6
--- /dev/null
+++ b/include/dt-bindings/power/mt8186-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8186_POWER_H
+#define _DT_BINDINGS_POWER_MT8186_POWER_H
+
+#define MT8186_POWER_DOMAIN_MFG0 0
+#define MT8186_POWER_DOMAIN_MFG1 1
+#define MT8186_POWER_DOMAIN_MFG2 2
+#define MT8186_POWER_DOMAIN_MFG3 3
+#define MT8186_POWER_DOMAIN_SSUSB 4
+#define MT8186_POWER_DOMAIN_SSUSB_P1 5
+#define MT8186_POWER_DOMAIN_DIS 6
+#define MT8186_POWER_DOMAIN_IMG 7
+#define MT8186_POWER_DOMAIN_IMG2 8
+#define MT8186_POWER_DOMAIN_IPE 9
+#define MT8186_POWER_DOMAIN_CAM 10
+#define MT8186_POWER_DOMAIN_CAM_RAWA 11
+#define MT8186_POWER_DOMAIN_CAM_RAWB 12
+#define MT8186_POWER_DOMAIN_VENC 13
+#define MT8186_POWER_DOMAIN_VDEC 14
+#define MT8186_POWER_DOMAIN_WPE 15
+#define MT8186_POWER_DOMAIN_CONN_ON 16
+#define MT8186_POWER_DOMAIN_CSIRX_TOP 17
+#define MT8186_POWER_DOMAIN_ADSP_AO 18
+#define MT8186_POWER_DOMAIN_ADSP_INFRA 19
+#define MT8186_POWER_DOMAIN_ADSP_TOP 20
+
+#endif /* _DT_BINDINGS_POWER_MT8186_POWER_H */
diff --git a/include/dt-bindings/power/mt8192-power.h b/include/dt-bindings/power/mt8192-power.h
new file mode 100644
index 000000000000..4eaa53d7270a
--- /dev/null
+++ b/include/dt-bindings/power/mt8192-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8192_POWER_H
+#define _DT_BINDINGS_POWER_MT8192_POWER_H
+
+#define MT8192_POWER_DOMAIN_AUDIO 0
+#define MT8192_POWER_DOMAIN_CONN 1
+#define MT8192_POWER_DOMAIN_MFG0 2
+#define MT8192_POWER_DOMAIN_MFG1 3
+#define MT8192_POWER_DOMAIN_MFG2 4
+#define MT8192_POWER_DOMAIN_MFG3 5
+#define MT8192_POWER_DOMAIN_MFG4 6
+#define MT8192_POWER_DOMAIN_MFG5 7
+#define MT8192_POWER_DOMAIN_MFG6 8
+#define MT8192_POWER_DOMAIN_DISP 9
+#define MT8192_POWER_DOMAIN_IPE 10
+#define MT8192_POWER_DOMAIN_ISP 11
+#define MT8192_POWER_DOMAIN_ISP2 12
+#define MT8192_POWER_DOMAIN_MDP 13
+#define MT8192_POWER_DOMAIN_VENC 14
+#define MT8192_POWER_DOMAIN_VDEC 15
+#define MT8192_POWER_DOMAIN_VDEC2 16
+#define MT8192_POWER_DOMAIN_CAM 17
+#define MT8192_POWER_DOMAIN_CAM_RAWA 18
+#define MT8192_POWER_DOMAIN_CAM_RAWB 19
+#define MT8192_POWER_DOMAIN_CAM_RAWC 20
+
+#endif /* _DT_BINDINGS_POWER_MT8192_POWER_H */
diff --git a/include/dt-bindings/power/mt8195-power.h b/include/dt-bindings/power/mt8195-power.h
new file mode 100644
index 000000000000..b20ca4b3e3a8
--- /dev/null
+++ b/include/dt-bindings/power/mt8195-power.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8195_POWER_H
+#define _DT_BINDINGS_POWER_MT8195_POWER_H
+
+#define MT8195_POWER_DOMAIN_PCIE_MAC_P0 0
+#define MT8195_POWER_DOMAIN_PCIE_MAC_P1 1
+#define MT8195_POWER_DOMAIN_PCIE_PHY 2
+#define MT8195_POWER_DOMAIN_SSUSB_PCIE_PHY 3
+#define MT8195_POWER_DOMAIN_CSI_RX_TOP 4
+#define MT8195_POWER_DOMAIN_ETHER 5
+#define MT8195_POWER_DOMAIN_ADSP 6
+#define MT8195_POWER_DOMAIN_AUDIO 7
+#define MT8195_POWER_DOMAIN_MFG0 8
+#define MT8195_POWER_DOMAIN_MFG1 9
+#define MT8195_POWER_DOMAIN_MFG2 10
+#define MT8195_POWER_DOMAIN_MFG3 11
+#define MT8195_POWER_DOMAIN_MFG4 12
+#define MT8195_POWER_DOMAIN_MFG5 13
+#define MT8195_POWER_DOMAIN_MFG6 14
+#define MT8195_POWER_DOMAIN_VPPSYS0 15
+#define MT8195_POWER_DOMAIN_VDOSYS0 16
+#define MT8195_POWER_DOMAIN_VPPSYS1 17
+#define MT8195_POWER_DOMAIN_VDOSYS1 18
+#define MT8195_POWER_DOMAIN_DP_TX 19
+#define MT8195_POWER_DOMAIN_EPD_TX 20
+#define MT8195_POWER_DOMAIN_HDMI_TX 21
+#define MT8195_POWER_DOMAIN_WPESYS 22
+#define MT8195_POWER_DOMAIN_VDEC0 23
+#define MT8195_POWER_DOMAIN_VDEC1 24
+#define MT8195_POWER_DOMAIN_VDEC2 25
+#define MT8195_POWER_DOMAIN_VENC 26
+#define MT8195_POWER_DOMAIN_VENC_CORE1 27
+#define MT8195_POWER_DOMAIN_IMG 28
+#define MT8195_POWER_DOMAIN_DIP 29
+#define MT8195_POWER_DOMAIN_IPE 30
+#define MT8195_POWER_DOMAIN_CAM 31
+#define MT8195_POWER_DOMAIN_CAM_RAWA 32
+#define MT8195_POWER_DOMAIN_CAM_RAWB 33
+#define MT8195_POWER_DOMAIN_CAM_MRAW 34
+
+#endif /* _DT_BINDINGS_POWER_MT8195_POWER_H */
diff --git a/include/dt-bindings/power/qcom-aoss-qmp.h b/include/dt-bindings/power/qcom-aoss-qmp.h
deleted file mode 100644
index ec336d31dee4..000000000000
--- a/include/dt-bindings/power/qcom-aoss-qmp.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Linaro Ltd. */
-
-#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-
-#define AOSS_QMP_LS_CDSP 0
-#define AOSS_QMP_LS_LPASS 1
-#define AOSS_QMP_LS_MODEM 2
-#define AOSS_QMP_LS_SLPI 3
-#define AOSS_QMP_LS_SPSS 4
-#define AOSS_QMP_LS_VENUS 5
-
-#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 3f74096d5a7c..f5f82dde7399 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -15,6 +15,39 @@
#define SDM845_GFX 7
#define SDM845_MSS 8
+/* SDX55 Power Domain Indexes */
+#define SDX55_MSS 0
+#define SDX55_MX 1
+#define SDX55_CX 2
+
+/* SDX65 Power Domain Indexes */
+#define SDX65_MSS 0
+#define SDX65_MX 1
+#define SDX65_MX_AO 2
+#define SDX65_CX 3
+#define SDX65_CX_AO 4
+#define SDX65_MXC 5
+
+/* SM6350 Power Domain Indexes */
+#define SM6350_CX 0
+#define SM6350_GFX 1
+#define SM6350_LCX 2
+#define SM6350_LMX 3
+#define SM6350_MSS 4
+#define SM6350_MX 5
+
+/* SM6350 Power Domain Indexes */
+#define SM6375_VDDCX 0
+#define SM6375_VDDCX_AO 1
+#define SM6375_VDDCX_VFL 2
+#define SM6375_VDDMX 3
+#define SM6375_VDDMX_AO 4
+#define SM6375_VDDMX_VFL 5
+#define SM6375_VDDGX 6
+#define SM6375_VDDGX_AO 7
+#define SM6375_VDD_LPI_CX 8
+#define SM6375_VDD_LPI_MX 9
+
/* SM8150 Power Domain Indexes */
#define SM8150_MSS 0
#define SM8150_EBI 1
@@ -28,6 +61,48 @@
#define SM8150_MMCX 9
#define SM8150_MMCX_AO 10
+/* SM8250 Power Domain Indexes */
+#define SM8250_CX 0
+#define SM8250_CX_AO 1
+#define SM8250_EBI 2
+#define SM8250_GFX 3
+#define SM8250_LCX 4
+#define SM8250_LMX 5
+#define SM8250_MMCX 6
+#define SM8250_MMCX_AO 7
+#define SM8250_MX 8
+#define SM8250_MX_AO 9
+
+/* SM8350 Power Domain Indexes */
+#define SM8350_CX 0
+#define SM8350_CX_AO 1
+#define SM8350_EBI 2
+#define SM8350_GFX 3
+#define SM8350_LCX 4
+#define SM8350_LMX 5
+#define SM8350_MMCX 6
+#define SM8350_MMCX_AO 7
+#define SM8350_MX 8
+#define SM8350_MX_AO 9
+#define SM8350_MXC 10
+#define SM8350_MXC_AO 11
+#define SM8350_MSS 12
+
+/* SM8450 Power Domain Indexes */
+#define SM8450_CX 0
+#define SM8450_CX_AO 1
+#define SM8450_EBI 2
+#define SM8450_GFX 3
+#define SM8450_LCX 4
+#define SM8450_LMX 5
+#define SM8450_MMCX 6
+#define SM8450_MMCX_AO 7
+#define SM8450_MX 8
+#define SM8450_MX_AO 9
+#define SM8450_MXC 10
+#define SM8450_MXC_AO 11
+#define SM8450_MSS 12
+
/* SC7180 Power Domain Indexes */
#define SC7180_CX 0
#define SC7180_CX_AO 1
@@ -38,11 +113,54 @@
#define SC7180_LCX 6
#define SC7180_MSS 7
+/* SC7280 Power Domain Indexes */
+#define SC7280_CX 0
+#define SC7280_CX_AO 1
+#define SC7280_EBI 2
+#define SC7280_GFX 3
+#define SC7280_MX 4
+#define SC7280_MX_AO 5
+#define SC7280_LMX 6
+#define SC7280_LCX 7
+#define SC7280_MSS 8
+
+/* SC8180X Power Domain Indexes */
+#define SC8180X_CX 0
+#define SC8180X_CX_AO 1
+#define SC8180X_EBI 2
+#define SC8180X_GFX 3
+#define SC8180X_LCX 4
+#define SC8180X_LMX 5
+#define SC8180X_MMCX 6
+#define SC8180X_MMCX_AO 7
+#define SC8180X_MSS 8
+#define SC8180X_MX 9
+#define SC8180X_MX_AO 10
+
+/* SC8280XP Power Domain Indexes */
+#define SC8280XP_CX 0
+#define SC8280XP_CX_AO 1
+#define SC8280XP_DDR 2
+#define SC8280XP_EBI 3
+#define SC8280XP_GFX 4
+#define SC8280XP_LCX 5
+#define SC8280XP_LMX 6
+#define SC8280XP_MMCX 7
+#define SC8280XP_MMCX_AO 8
+#define SC8280XP_MSS 9
+#define SC8280XP_MX 10
+#define SC8280XP_MXC 12
+#define SC8280XP_MX_AO 11
+#define SC8280XP_NSP 13
+#define SC8280XP_QPHY 14
+#define SC8280XP_XO 15
+
/* SDM845 Power Domain performance levels */
#define RPMH_REGULATOR_LEVEL_RETENTION 16
#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
#define RPMH_REGULATOR_LEVEL_SVS 128
+#define RPMH_REGULATOR_LEVEL_SVS_L0 144
#define RPMH_REGULATOR_LEVEL_SVS_L1 192
#define RPMH_REGULATOR_LEVEL_SVS_L2 224
#define RPMH_REGULATOR_LEVEL_NOM 256
@@ -51,6 +169,52 @@
#define RPMH_REGULATOR_LEVEL_TURBO 384
#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+/* MDM9607 Power Domains */
+#define MDM9607_VDDCX 0
+#define MDM9607_VDDCX_AO 1
+#define MDM9607_VDDCX_VFL 2
+#define MDM9607_VDDMX 3
+#define MDM9607_VDDMX_AO 4
+#define MDM9607_VDDMX_VFL 5
+
+/* MSM8226 Power Domain Indexes */
+#define MSM8226_VDDCX 0
+#define MSM8226_VDDCX_AO 1
+#define MSM8226_VDDCX_VFC 2
+
+/* MSM8939 Power Domains */
+#define MSM8939_VDDMDCX 0
+#define MSM8939_VDDMDCX_AO 1
+#define MSM8939_VDDMDCX_VFC 2
+#define MSM8939_VDDCX 3
+#define MSM8939_VDDCX_AO 4
+#define MSM8939_VDDCX_VFC 5
+#define MSM8939_VDDMX 6
+#define MSM8939_VDDMX_AO 7
+
+/* MSM8916 Power Domain Indexes */
+#define MSM8916_VDDCX 0
+#define MSM8916_VDDCX_AO 1
+#define MSM8916_VDDCX_VFC 2
+#define MSM8916_VDDMX 3
+#define MSM8916_VDDMX_AO 4
+
+/* MSM8909 Power Domain Indexes */
+#define MSM8909_VDDCX MSM8916_VDDCX
+#define MSM8909_VDDCX_AO MSM8916_VDDCX_AO
+#define MSM8909_VDDCX_VFC MSM8916_VDDCX_VFC
+#define MSM8909_VDDMX MSM8916_VDDMX
+#define MSM8909_VDDMX_AO MSM8916_VDDMX_AO
+
+/* MSM8953 Power Domain Indexes */
+#define MSM8953_VDDMD 0
+#define MSM8953_VDDMD_AO 1
+#define MSM8953_VDDCX 2
+#define MSM8953_VDDCX_AO 3
+#define MSM8953_VDDCX_VFL 4
+#define MSM8953_VDDMX 5
+#define MSM8953_VDDMX_AO 6
+
/* MSM8976 Power Domain Indexes */
#define MSM8976_VDDCX 0
#define MSM8976_VDDCX_AO 1
@@ -59,6 +223,15 @@
#define MSM8976_VDDMX_AO 4
#define MSM8976_VDDMX_VFL 5
+/* MSM8994 Power Domain Indexes */
+#define MSM8994_VDDCX 0
+#define MSM8994_VDDCX_AO 1
+#define MSM8994_VDDCX_VFC 2
+#define MSM8994_VDDMX 3
+#define MSM8994_VDDMX_AO 4
+#define MSM8994_VDDGFX 5
+#define MSM8994_VDDGFX_VFC 6
+
/* MSM8996 Power Domain Indexes */
#define MSM8996_VDDCX 0
#define MSM8996_VDDCX_AO 1
@@ -89,6 +262,46 @@
#define QCS404_LPIMX 5
#define QCS404_LPIMX_VFL 6
+/* SDM660 Power Domains */
+#define SDM660_VDDCX 0
+#define SDM660_VDDCX_AO 1
+#define SDM660_VDDCX_VFL 2
+#define SDM660_VDDMX 3
+#define SDM660_VDDMX_AO 4
+#define SDM660_VDDMX_VFL 5
+#define SDM660_SSCCX 6
+#define SDM660_SSCCX_VFL 7
+#define SDM660_SSCMX 8
+#define SDM660_SSCMX_VFL 9
+
+/* SM6115 Power Domains */
+#define SM6115_VDDCX 0
+#define SM6115_VDDCX_AO 1
+#define SM6115_VDDCX_VFL 2
+#define SM6115_VDDMX 3
+#define SM6115_VDDMX_AO 4
+#define SM6115_VDDMX_VFL 5
+#define SM6115_VDD_LPI_CX 6
+#define SM6115_VDD_LPI_MX 7
+
+/* SM6125 Power Domains */
+#define SM6125_VDDCX 0
+#define SM6125_VDDCX_AO 1
+#define SM6125_VDDCX_VFL 2
+#define SM6125_VDDMX 3
+#define SM6125_VDDMX_AO 4
+#define SM6125_VDDMX_VFL 5
+
+/* QCM2290 Power Domains */
+#define QCM2290_VDDCX 0
+#define QCM2290_VDDCX_AO 1
+#define QCM2290_VDDCX_VFL 2
+#define QCM2290_VDDMX 3
+#define QCM2290_VDDMX_AO 4
+#define QCM2290_VDDMX_VFL 5
+#define QCM2290_VDD_LPI_CX 6
+#define QCM2290_VDD_LPI_MX 7
+
/* RPM SMD Power Domain performance levels */
#define RPM_SMD_LEVEL_RETENTION 16
#define RPM_SMD_LEVEL_RETENTION_PLUS 32
diff --git a/include/dt-bindings/power/r8a7742-sysc.h b/include/dt-bindings/power/r8a7742-sysc.h
new file mode 100644
index 000000000000..1b1bd3cf95db
--- /dev/null
+++ b/include/dt-bindings/power/r8a7742-sysc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7742_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7742_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7742_PD_CA15_CPU0 0
+#define R8A7742_PD_CA15_CPU1 1
+#define R8A7742_PD_CA15_CPU2 2
+#define R8A7742_PD_CA15_CPU3 3
+#define R8A7742_PD_CA7_CPU0 5
+#define R8A7742_PD_CA7_CPU1 6
+#define R8A7742_PD_CA7_CPU2 7
+#define R8A7742_PD_CA7_CPU3 8
+#define R8A7742_PD_CA15_SCU 12
+#define R8A7742_PD_RGX 20
+#define R8A7742_PD_CA7_SCU 21
+
+/* Always-on power area */
+#define R8A7742_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7742_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a774e1-sysc.h b/include/dt-bindings/power/r8a774e1-sysc.h
new file mode 100644
index 000000000000..7edb8161db36
--- /dev/null
+++ b/include/dt-bindings/power/r8a774e1-sysc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A774E1_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A774E1_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A774E1_PD_CA57_CPU0 0
+#define R8A774E1_PD_CA57_CPU1 1
+#define R8A774E1_PD_CA57_CPU2 2
+#define R8A774E1_PD_CA57_CPU3 3
+#define R8A774E1_PD_CA53_CPU0 5
+#define R8A774E1_PD_CA53_CPU1 6
+#define R8A774E1_PD_CA53_CPU2 7
+#define R8A774E1_PD_CA53_CPU3 8
+#define R8A774E1_PD_A3VP 9
+#define R8A774E1_PD_CA57_SCU 12
+#define R8A774E1_PD_A3VC 14
+#define R8A774E1_PD_3DG_A 17
+#define R8A774E1_PD_3DG_B 18
+#define R8A774E1_PD_3DG_C 19
+#define R8A774E1_PD_3DG_D 20
+#define R8A774E1_PD_CA53_SCU 21
+#define R8A774E1_PD_3DG_E 22
+#define R8A774E1_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A774E1_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A774E1_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a779a0-sysc.h b/include/dt-bindings/power/r8a779a0-sysc.h
new file mode 100644
index 000000000000..57929e459a67
--- /dev/null
+++ b/include/dt-bindings/power/r8a779a0-sysc.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A779A0_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A779A0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779A0_PD_A1E0D0C0 0
+#define R8A779A0_PD_A1E0D0C1 1
+#define R8A779A0_PD_A1E0D1C0 2
+#define R8A779A0_PD_A1E0D1C1 3
+#define R8A779A0_PD_A1E1D0C0 4
+#define R8A779A0_PD_A1E1D0C1 5
+#define R8A779A0_PD_A1E1D1C0 6
+#define R8A779A0_PD_A1E1D1C1 7
+#define R8A779A0_PD_A2E0D0 16
+#define R8A779A0_PD_A2E0D1 17
+#define R8A779A0_PD_A2E1D0 18
+#define R8A779A0_PD_A2E1D1 19
+#define R8A779A0_PD_A3E0 20
+#define R8A779A0_PD_A3E1 21
+#define R8A779A0_PD_3DG_A 24
+#define R8A779A0_PD_3DG_B 25
+#define R8A779A0_PD_A1CNN2 32
+#define R8A779A0_PD_A1DSP0 33
+#define R8A779A0_PD_A2IMP01 34
+#define R8A779A0_PD_A2DP0 35
+#define R8A779A0_PD_A2CV0 36
+#define R8A779A0_PD_A2CV1 37
+#define R8A779A0_PD_A2CV4 38
+#define R8A779A0_PD_A2CV6 39
+#define R8A779A0_PD_A2CN2 40
+#define R8A779A0_PD_A1CNN0 41
+#define R8A779A0_PD_A2CN0 42
+#define R8A779A0_PD_A3IR 43
+#define R8A779A0_PD_A1CNN1 44
+#define R8A779A0_PD_A1DSP1 45
+#define R8A779A0_PD_A2IMP23 46
+#define R8A779A0_PD_A2DP1 47
+#define R8A779A0_PD_A2CV2 48
+#define R8A779A0_PD_A2CV3 49
+#define R8A779A0_PD_A2CV5 50
+#define R8A779A0_PD_A2CV7 51
+#define R8A779A0_PD_A2CN1 52
+#define R8A779A0_PD_A3VIP0 56
+#define R8A779A0_PD_A3VIP1 57
+#define R8A779A0_PD_A3VIP2 58
+#define R8A779A0_PD_A3VIP3 59
+#define R8A779A0_PD_A3ISP01 60
+#define R8A779A0_PD_A3ISP23 61
+
+/* Always-on power area */
+#define R8A779A0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_R8A779A0_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a779f0-sysc.h b/include/dt-bindings/power/r8a779f0-sysc.h
new file mode 100644
index 000000000000..0ec8ad727ed9
--- /dev/null
+++ b/include/dt-bindings/power/r8a779f0-sysc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A779F0_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A779F0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779F0_PD_A1E0D0C0 0
+#define R8A779F0_PD_A1E0D0C1 1
+#define R8A779F0_PD_A1E0D1C0 2
+#define R8A779F0_PD_A1E0D1C1 3
+#define R8A779F0_PD_A1E1D0C0 4
+#define R8A779F0_PD_A1E1D0C1 5
+#define R8A779F0_PD_A1E1D1C0 6
+#define R8A779F0_PD_A1E1D1C1 7
+#define R8A779F0_PD_A2E0D0 16
+#define R8A779F0_PD_A2E0D1 17
+#define R8A779F0_PD_A2E1D0 18
+#define R8A779F0_PD_A2E1D1 19
+#define R8A779F0_PD_A3E0 20
+#define R8A779F0_PD_A3E1 21
+
+/* Always-on power area */
+#define R8A779F0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_R8A779A0_SYSC_H__*/
diff --git a/include/dt-bindings/power/r8a779g0-sysc.h b/include/dt-bindings/power/r8a779g0-sysc.h
new file mode 100644
index 000000000000..7daa70f1814e
--- /dev/null
+++ b/include/dt-bindings/power/r8a779g0-sysc.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A779G0_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A779G0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779G0_PD_A1E0D0C0 0
+#define R8A779G0_PD_A1E0D0C1 1
+#define R8A779G0_PD_A1E0D1C0 2
+#define R8A779G0_PD_A1E0D1C1 3
+#define R8A779G0_PD_A2E0D0 16
+#define R8A779G0_PD_A2E0D1 17
+#define R8A779G0_PD_A3E0 20
+#define R8A779G0_PD_A33DGA 24
+#define R8A779G0_PD_A23DGB 25
+#define R8A779G0_PD_A1DSP0 33
+#define R8A779G0_PD_A2IMP01 34
+#define R8A779G0_PD_A2PSC 35
+#define R8A779G0_PD_A2CV0 36
+#define R8A779G0_PD_A2CV1 37
+#define R8A779G0_PD_A1CNN0 41
+#define R8A779G0_PD_A2CN0 42
+#define R8A779G0_PD_A3IR 43
+#define R8A779G0_PD_A1DSP1 45
+#define R8A779G0_PD_A2IMP23 46
+#define R8A779G0_PD_A2DMA 47
+#define R8A779G0_PD_A2CV2 48
+#define R8A779G0_PD_A2CV3 49
+#define R8A779G0_PD_A1DSP2 53
+#define R8A779G0_PD_A1DSP3 54
+#define R8A779G0_PD_A3VIP0 56
+#define R8A779G0_PD_A3VIP1 57
+#define R8A779G0_PD_A3VIP2 58
+#define R8A779G0_PD_A3ISP0 60
+#define R8A779G0_PD_A3ISP1 61
+
+/* Always-on power area */
+#define R8A779G0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_R8A779G0_SYSC_H__*/
diff --git a/include/dt-bindings/power/rk3568-power.h b/include/dt-bindings/power/rk3568-power.h
new file mode 100644
index 000000000000..6cc1af1a9d26
--- /dev/null
+++ b/include/dt-bindings/power/rk3568-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_RK3568_POWER_H__
+#define __DT_BINDINGS_POWER_RK3568_POWER_H__
+
+/* VD_CORE */
+#define RK3568_PD_CPU_0 0
+#define RK3568_PD_CPU_1 1
+#define RK3568_PD_CPU_2 2
+#define RK3568_PD_CPU_3 3
+#define RK3568_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RK3568_PD_PMU 5
+
+/* VD_NPU */
+#define RK3568_PD_NPU 6
+
+/* VD_GPU */
+#define RK3568_PD_GPU 7
+
+/* VD_LOGIC */
+#define RK3568_PD_VI 8
+#define RK3568_PD_VO 9
+#define RK3568_PD_RGA 10
+#define RK3568_PD_VPU 11
+#define RK3568_PD_CENTER 12
+#define RK3568_PD_RKVDEC 13
+#define RK3568_PD_RKVENC 14
+#define RK3568_PD_PIPE 15
+#define RK3568_PD_LOGIC_ALIVE 16
+
+#endif
diff --git a/include/dt-bindings/power/rk3588-power.h b/include/dt-bindings/power/rk3588-power.h
new file mode 100644
index 000000000000..1b92fec013cb
--- /dev/null
+++ b/include/dt-bindings/power/rk3588-power.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3588_POWER_H__
+#define __DT_BINDINGS_POWER_RK3588_POWER_H__
+
+/* VD_LITDSU */
+#define RK3588_PD_CPU_0 0
+#define RK3588_PD_CPU_1 1
+#define RK3588_PD_CPU_2 2
+#define RK3588_PD_CPU_3 3
+
+/* VD_BIGCORE0 */
+#define RK3588_PD_CPU_4 4
+#define RK3588_PD_CPU_5 5
+
+/* VD_BIGCORE1 */
+#define RK3588_PD_CPU_6 6
+#define RK3588_PD_CPU_7 7
+
+/* VD_NPU */
+#define RK3588_PD_NPU 8
+#define RK3588_PD_NPUTOP 9
+#define RK3588_PD_NPU1 10
+#define RK3588_PD_NPU2 11
+
+/* VD_GPU */
+#define RK3588_PD_GPU 12
+
+/* VD_VCODEC */
+#define RK3588_PD_VCODEC 13
+#define RK3588_PD_RKVDEC0 14
+#define RK3588_PD_RKVDEC1 15
+#define RK3588_PD_VENC0 16
+#define RK3588_PD_VENC1 17
+
+/* VD_DD01 */
+#define RK3588_PD_DDR01 18
+
+/* VD_DD23 */
+#define RK3588_PD_DDR23 19
+
+/* VD_LOGIC */
+#define RK3588_PD_CENTER 20
+#define RK3588_PD_VDPU 21
+#define RK3588_PD_RGA30 22
+#define RK3588_PD_AV1 23
+#define RK3588_PD_VOP 24
+#define RK3588_PD_VO0 25
+#define RK3588_PD_VO1 26
+#define RK3588_PD_VI 27
+#define RK3588_PD_ISP1 28
+#define RK3588_PD_FEC 29
+#define RK3588_PD_RGA31 30
+#define RK3588_PD_USB 31
+#define RK3588_PD_PHP 32
+#define RK3588_PD_GMAC 33
+#define RK3588_PD_PCIE 34
+#define RK3588_PD_NVM 35
+#define RK3588_PD_NVM0 36
+#define RK3588_PD_SDIO 37
+#define RK3588_PD_AUDIO 38
+#define RK3588_PD_SECURE 39
+#define RK3588_PD_SDMMC 40
+#define RK3588_PD_CRYPTO 41
+#define RK3588_PD_BUS 42
+
+/* VD_PMU */
+#define RK3588_PD_PMU1 43
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rv1126-power.h b/include/dt-bindings/power/rockchip,rv1126-power.h
new file mode 100644
index 000000000000..38a68e000d38
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rv1126-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_POWER_RV1126_POWER_H__
+#define __DT_BINDINGS_POWER_RV1126_POWER_H__
+
+/* VD_CORE */
+#define RV1126_PD_CPU_0 0
+#define RV1126_PD_CPU_1 1
+#define RV1126_PD_CPU_2 2
+#define RV1126_PD_CPU_3 3
+#define RV1126_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RV1126_PD_PMU 5
+#define RV1126_PD_PMU_ALIVE 6
+
+/* VD_NPU */
+#define RV1126_PD_NPU 7
+
+/* VD_VEPU */
+#define RV1126_PD_VEPU 8
+
+/* VD_LOGIC */
+#define RV1126_PD_VI 9
+#define RV1126_PD_VO 10
+#define RV1126_PD_ISPP 11
+#define RV1126_PD_VDPU 12
+#define RV1126_PD_CRYPTO 13
+#define RV1126_PD_DDR 14
+#define RV1126_PD_NVM 15
+#define RV1126_PD_SDIO 16
+#define RV1126_PD_USB 17
+#define RV1126_PD_LOGIC_ALIVE 18
+
+#endif
diff --git a/include/dt-bindings/power/summit,smb347-charger.h b/include/dt-bindings/power/summit,smb347-charger.h
new file mode 100644
index 000000000000..3205699b5e41
--- /dev/null
+++ b/include/dt-bindings/power/summit,smb347-charger.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later or MIT) */
+/*
+ * Author: David Heidelberg <david@ixit.cz>
+ */
+
+#ifndef _DT_BINDINGS_SMB347_CHARGER_H
+#define _DT_BINDINGS_SMB347_CHARGER_H
+
+/* Charging compensation method */
+#define SMB3XX_SOFT_TEMP_COMPENSATE_NONE 0
+#define SMB3XX_SOFT_TEMP_COMPENSATE_CURRENT 1
+#define SMB3XX_SOFT_TEMP_COMPENSATE_VOLTAGE 2
+
+/* Charging enable control */
+#define SMB3XX_CHG_ENABLE_SW 0
+#define SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW 1
+#define SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH 2
+
+/* Polarity of INOK signal */
+#define SMB3XX_SYSOK_INOK_ACTIVE_LOW 0
+#define SMB3XX_SYSOK_INOK_ACTIVE_HIGH 1
+
+#endif
diff --git a/include/dt-bindings/power/tegra234-powergate.h b/include/dt-bindings/power/tegra234-powergate.h
new file mode 100644
index 000000000000..ae9286cef85c
--- /dev/null
+++ b/include/dt-bindings/power/tegra234-powergate.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __ABI_MACH_T234_POWERGATE_T234_H_
+#define __ABI_MACH_T234_POWERGATE_T234_H_
+
+#define TEGRA234_POWER_DOMAIN_AUD 2U
+#define TEGRA234_POWER_DOMAIN_DISP 3U
+#define TEGRA234_POWER_DOMAIN_PCIEX8A 5U
+#define TEGRA234_POWER_DOMAIN_PCIEX4A 6U
+#define TEGRA234_POWER_DOMAIN_PCIEX4BA 7U
+#define TEGRA234_POWER_DOMAIN_PCIEX4BB 8U
+#define TEGRA234_POWER_DOMAIN_PCIEX1A 9U
+#define TEGRA234_POWER_DOMAIN_PCIEX4CA 13U
+#define TEGRA234_POWER_DOMAIN_PCIEX4CB 14U
+#define TEGRA234_POWER_DOMAIN_PCIEX4CC 15U
+#define TEGRA234_POWER_DOMAIN_PCIEX8B 16U
+#define TEGRA234_POWER_DOMAIN_MGBEA 17U
+#define TEGRA234_POWER_DOMAIN_MGBEB 18U
+#define TEGRA234_POWER_DOMAIN_MGBEC 19U
+#define TEGRA234_POWER_DOMAIN_MGBED 20U
+#define TEGRA234_POWER_DOMAIN_VIC 29U
+
+#endif
diff --git a/include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h b/include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h
new file mode 100644
index 000000000000..27c5ce68847b
--- /dev/null
+++ b/include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Nicolas Saenz Julienne
+ * Author: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ */
+
+#ifndef _DT_BINDINGS_RASPBERRYPI_FIRMWARE_PWM_H
+#define _DT_BINDINGS_RASPBERRYPI_FIRMWARE_PWM_H
+
+#define RASPBERRYPI_FIRMWARE_PWM_POE 0
+#define RASPBERRYPI_FIRMWARE_PWM_NUM 1
+
+#endif
diff --git a/include/dt-bindings/regulator/dlg,da9121-regulator.h b/include/dt-bindings/regulator/dlg,da9121-regulator.h
new file mode 100644
index 000000000000..954edf633ce7
--- /dev/null
+++ b/include/dt-bindings/regulator/dlg,da9121-regulator.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _DT_BINDINGS_REGULATOR_DLG_DA9121_H
+#define _DT_BINDINGS_REGULATOR_DLG_DA9121_H
+
+/*
+ * These buck mode constants may be used to specify values in device tree
+ * properties (e.g. regulator-initial-mode).
+ * A description of the following modes is in the manufacturers datasheet.
+ */
+
+#define DA9121_BUCK_MODE_FORCE_PFM 0
+#define DA9121_BUCK_MODE_FORCE_PWM 1
+#define DA9121_BUCK_MODE_FORCE_PWM_SHEDDING 2
+#define DA9121_BUCK_MODE_AUTO 3
+
+#define DA9121_BUCK_RIPPLE_CANCEL_NONE 0
+#define DA9121_BUCK_RIPPLE_CANCEL_SMALL 1
+#define DA9121_BUCK_RIPPLE_CANCEL_MID 2
+#define DA9121_BUCK_RIPPLE_CANCEL_LARGE 3
+
+#endif
diff --git a/include/dt-bindings/regulator/dlg,da9211-regulator.h b/include/dt-bindings/regulator/dlg,da9211-regulator.h
new file mode 100644
index 000000000000..cdce2d54c8ba
--- /dev/null
+++ b/include/dt-bindings/regulator/dlg,da9211-regulator.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_REGULATOR_DLG_DA9211_H
+#define _DT_BINDINGS_REGULATOR_DLG_DA9211_H
+
+/*
+ * These buck mode constants may be used to specify values in device tree
+ * properties (e.g. regulator-initial-mode, regulator-allowed-modes).
+ * A description of the following modes is in the manufacturers datasheet.
+ */
+
+#define DA9211_BUCK_MODE_SLEEP 1
+#define DA9211_BUCK_MODE_SYNC 2
+#define DA9211_BUCK_MODE_AUTO 3
+
+#endif
diff --git a/include/dt-bindings/regulator/mediatek,mt6360-regulator.h b/include/dt-bindings/regulator/mediatek,mt6360-regulator.h
new file mode 100644
index 000000000000..21c75de700c0
--- /dev/null
+++ b/include/dt-bindings/regulator/mediatek,mt6360-regulator.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_MEDIATEK_MT6360_REGULATOR_H__
+#define __DT_BINDINGS_MEDIATEK_MT6360_REGULATOR_H__
+
+/*
+ * BUCK/LDO mode constants which may be used in devicetree properties
+ * (eg. regulator-allowed-modes).
+ * See the manufacturer's datasheet for more information on these modes.
+ */
+
+#define MT6360_OPMODE_LP 2
+#define MT6360_OPMODE_ULP 3
+#define MT6360_OPMODE_NORMAL 0
+
+#endif
diff --git a/include/dt-bindings/regulator/mediatek,mt6397-regulator.h b/include/dt-bindings/regulator/mediatek,mt6397-regulator.h
new file mode 100644
index 000000000000..99869a8665cf
--- /dev/null
+++ b/include/dt-bindings/regulator/mediatek,mt6397-regulator.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_REGULATOR_MEDIATEK_MT6397_H_
+#define _DT_BINDINGS_REGULATOR_MEDIATEK_MT6397_H_
+
+/*
+ * Buck mode constants which may be used in devicetree properties (eg.
+ * regulator-initial-mode, regulator-allowed-modes).
+ * See the manufacturer's datasheet for more information on these modes.
+ */
+
+#define MT6397_BUCK_MODE_AUTO 0
+#define MT6397_BUCK_MODE_FORCE_PWM 1
+
+#endif
diff --git a/include/dt-bindings/regulator/richtek,rt5190a-regulator.h b/include/dt-bindings/regulator/richtek,rt5190a-regulator.h
new file mode 100644
index 000000000000..63f99d4c1cb3
--- /dev/null
+++ b/include/dt-bindings/regulator/richtek,rt5190a-regulator.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_RICHTEK_RT5190A_REGULATOR_H__
+#define __DT_BINDINGS_RICHTEK_RT5190A_REGULATOR_H__
+
+/*
+ * BUCK/LDO mode constants which may be used in devicetree properties
+ * (eg. regulator-allowed-modes).
+ * See the manufacturer's datasheet for more information on these modes.
+ */
+
+#define RT5190A_OPMODE_AUTO 0
+#define RT5190A_OPMODE_FPWM 1
+
+#endif
diff --git a/include/dt-bindings/regulator/ti,tps62864.h b/include/dt-bindings/regulator/ti,tps62864.h
new file mode 100644
index 000000000000..8db31f23d956
--- /dev/null
+++ b/include/dt-bindings/regulator/ti,tps62864.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_REGULATOR_TI_TPS62864_H
+#define _DT_BINDINGS_REGULATOR_TI_TPS62864_H
+
+#define TPS62864_MODE_NORMAL 0
+#define TPS62864_MODE_FPWM 1
+
+#endif
diff --git a/include/dt-bindings/reset/actions,s500-reset.h b/include/dt-bindings/reset/actions,s500-reset.h
new file mode 100644
index 000000000000..f5d94176d10b
--- /dev/null
+++ b/include/dt-bindings/reset/actions,s500-reset.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Device Tree binding constants for Actions Semi S500 Reset Management Unit
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Copyright (c) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_ACTIONS_S500_RESET_H
+#define __DT_BINDINGS_ACTIONS_S500_RESET_H
+
+#define RESET_DMAC 0
+#define RESET_NORIF 1
+#define RESET_DDR 2
+#define RESET_NANDC 3
+#define RESET_SD0 4
+#define RESET_SD1 5
+#define RESET_PCM1 6
+#define RESET_DE 7
+#define RESET_LCD 8
+#define RESET_SD2 9
+#define RESET_DSI 10
+#define RESET_CSI 11
+#define RESET_BISP 12
+#define RESET_KEY 13
+#define RESET_GPIO 14
+#define RESET_AUDIO 15
+#define RESET_PCM0 16
+#define RESET_VDE 17
+#define RESET_VCE 18
+#define RESET_GPU3D 19
+#define RESET_NIC301 20
+#define RESET_LENS 21
+#define RESET_PERIPHRESET 22
+#define RESET_USB2_0 23
+#define RESET_TVOUT 24
+#define RESET_HDMI 25
+#define RESET_HDCP2TX 26
+#define RESET_UART6 27
+#define RESET_UART0 28
+#define RESET_UART1 29
+#define RESET_UART2 30
+#define RESET_SPI0 31
+#define RESET_SPI1 32
+#define RESET_SPI2 33
+#define RESET_SPI3 34
+#define RESET_I2C0 35
+#define RESET_I2C1 36
+#define RESET_USB3 37
+#define RESET_UART3 38
+#define RESET_UART4 39
+#define RESET_UART5 40
+#define RESET_I2C2 41
+#define RESET_I2C3 42
+#define RESET_ETHERNET 43
+#define RESET_CHIPID 44
+#define RESET_USB2_1 45
+#define RESET_WD0RESET 46
+#define RESET_WD1RESET 47
+#define RESET_WD2RESET 48
+#define RESET_WD3RESET 49
+#define RESET_DBG0RESET 50
+#define RESET_DBG1RESET 51
+#define RESET_DBG2RESET 52
+#define RESET_DBG3RESET 53
+
+#endif /* __DT_BINDINGS_ACTIONS_S500_RESET_H */
diff --git a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h
index ea5058618863..883bfd3bcbad 100644
--- a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h
@@ -69,7 +69,7 @@
#define RESET_SYS_CPU_L2 58
#define RESET_SYS_CPU_P 59
#define RESET_SYS_CPU_MBIST 60
-/* 61 */
+#define RESET_ACODEC 61
/* 62 */
/* 63 */
/* RESET2 */
diff --git a/include/dt-bindings/reset/amlogic,meson-s4-reset.h b/include/dt-bindings/reset/amlogic,meson-s4-reset.h
new file mode 100644
index 000000000000..eab428eb8ad6
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-s4-reset.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Zelong Dong <zelong.dong@amlogic.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H
+
+/* RESET0 */
+#define RESET_USB_DDR0 0
+#define RESET_USB_DDR1 1
+#define RESET_USB_DDR2 2
+#define RESET_USB_DDR3 3
+#define RESET_USBCTRL 4
+/* 5-7 */
+#define RESET_USBPHY20 8
+#define RESET_USBPHY21 9
+/* 10-15 */
+#define RESET_HDMITX_APB 16
+#define RESET_BRG_VCBUS_DEC 17
+#define RESET_VCBUS 18
+#define RESET_VID_PLL_DIV 19
+#define RESET_VDI6 20
+#define RESET_GE2D 21
+#define RESET_HDMITXPHY 22
+#define RESET_VID_LOCK 23
+#define RESET_VENCL 24
+#define RESET_VDAC 25
+#define RESET_VENCP 26
+#define RESET_VENCI 27
+#define RESET_RDMA 28
+#define RESET_HDMI_TX 29
+#define RESET_VIU 30
+#define RESET_VENC 31
+
+/* RESET1 */
+#define RESET_AUDIO 32
+#define RESET_MALI_APB 33
+#define RESET_MALI 34
+#define RESET_DDR_APB 35
+#define RESET_DDR 36
+#define RESET_DOS_APB 37
+#define RESET_DOS 38
+/* 39-47 */
+#define RESET_ETH 48
+/* 49-51 */
+#define RESET_DEMOD 52
+/* 53-63 */
+
+/* RESET2 */
+#define RESET_ABUS_ARB 64
+#define RESET_IR_CTRL 65
+#define RESET_TEMPSENSOR_DDR 66
+#define RESET_TEMPSENSOR_PLL 67
+/* 68-71 */
+#define RESET_SMART_CARD 72
+#define RESET_SPICC0 73
+/* 74 */
+#define RESET_RSA 75
+/* 76-79 */
+#define RESET_MSR_CLK 80
+#define RESET_SPIFC 81
+#define RESET_SARADC 82
+/* 83-87 */
+#define RESET_ACODEC 88
+#define RESET_CEC 89
+#define RESET_AFIFO 90
+#define RESET_WATCHDOG 91
+/* 92-95 */
+
+/* RESET3 */
+/* 96-127 */
+
+/* RESET4 */
+/* 128-131 */
+#define RESET_PWM_AB 132
+#define RESET_PWM_CD 133
+#define RESET_PWM_EF 134
+#define RESET_PWM_GH 135
+#define RESET_PWM_IJ 136
+/* 137 */
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+#define RESET_UART_C 140
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+/* 143 */
+#define RESET_I2C_S_A 144
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+#define RESET_I2C_M_E 149
+/* 150-151 */
+#define RESET_SD_EMMC_A 152
+#define RESET_SD_EMMC_B 153
+#define RESET_NAND_EMMC 154
+/* 155-159 */
+
+/* RESET5 */
+#define RESET_BRG_VDEC_PIPL0 160
+#define RESET_BRG_HEVCF_PIPL0 161
+/* 162 */
+#define RESET_BRG_HCODEC_PIPL0 163
+#define RESET_BRG_GE2D_PIPL0 164
+#define RESET_BRG_VPU_PIPL0 165
+#define RESET_BRG_CPU_PIPL0 166
+#define RESET_BRG_MALI_PIPL0 167
+/* 168 */
+#define RESET_BRG_MALI_PIPL1 169
+/* 170-171 */
+#define RESET_BRG_HEVCF_PIPL1 172
+#define RESET_BRG_HEVCB_PIPL1 173
+/* 174-183 */
+#define RESET_RAMA 184
+/* 185-186 */
+#define RESET_BRG_NIC_VAPB 187
+#define RESET_BRG_NIC_DSU 188
+#define RESET_BRG_NIC_SYSCLK 189
+#define RESET_BRG_NIC_MAIN 190
+#define RESET_BRG_NIC_ALL 191
+
+#endif
diff --git a/include/dt-bindings/reset/bcm6318-reset.h b/include/dt-bindings/reset/bcm6318-reset.h
new file mode 100644
index 000000000000..f882662505ea
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6318-reset.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6318_H
+#define __DT_BINDINGS_RESET_BCM6318_H
+
+#define BCM6318_RST_SPI 0
+#define BCM6318_RST_EPHY 1
+#define BCM6318_RST_SAR 2
+#define BCM6318_RST_ENETSW 3
+#define BCM6318_RST_USBD 4
+#define BCM6318_RST_USBH 5
+#define BCM6318_RST_PCIE_CORE 6
+#define BCM6318_RST_PCIE 7
+#define BCM6318_RST_PCIE_EXT 8
+#define BCM6318_RST_PCIE_HARD 9
+#define BCM6318_RST_ADSL 10
+#define BCM6318_RST_PHYMIPS 11
+#define BCM6318_RST_HOSTMIPS 12
+
+#endif /* __DT_BINDINGS_RESET_BCM6318_H */
diff --git a/include/dt-bindings/reset/bcm63268-reset.h b/include/dt-bindings/reset/bcm63268-reset.h
new file mode 100644
index 000000000000..6a6403a4c2d5
--- /dev/null
+++ b/include/dt-bindings/reset/bcm63268-reset.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM63268_H
+#define __DT_BINDINGS_RESET_BCM63268_H
+
+#define BCM63268_RST_SPI 0
+#define BCM63268_RST_IPSEC 1
+#define BCM63268_RST_EPHY 2
+#define BCM63268_RST_SAR 3
+#define BCM63268_RST_ENETSW 4
+#define BCM63268_RST_USBS 5
+#define BCM63268_RST_USBH 6
+#define BCM63268_RST_PCM 7
+#define BCM63268_RST_PCIE_CORE 8
+#define BCM63268_RST_PCIE 9
+#define BCM63268_RST_PCIE_EXT 10
+#define BCM63268_RST_WLAN_SHIM 11
+#define BCM63268_RST_DDR_PHY 12
+#define BCM63268_RST_FAP0 13
+#define BCM63268_RST_WLAN_UBUS 14
+#define BCM63268_RST_DECT 15
+#define BCM63268_RST_FAP1 16
+#define BCM63268_RST_PCIE_HARD 17
+#define BCM63268_RST_GPHY 18
+
+#endif /* __DT_BINDINGS_RESET_BCM63268_H */
diff --git a/include/dt-bindings/reset/bcm6328-reset.h b/include/dt-bindings/reset/bcm6328-reset.h
new file mode 100644
index 000000000000..0f3df87d47af
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6328-reset.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6328_H
+#define __DT_BINDINGS_RESET_BCM6328_H
+
+#define BCM6328_RST_SPI 0
+#define BCM6328_RST_EPHY 1
+#define BCM6328_RST_SAR 2
+#define BCM6328_RST_ENETSW 3
+#define BCM6328_RST_USBS 4
+#define BCM6328_RST_USBH 5
+#define BCM6328_RST_PCM 6
+#define BCM6328_RST_PCIE_CORE 7
+#define BCM6328_RST_PCIE 8
+#define BCM6328_RST_PCIE_EXT 9
+#define BCM6328_RST_PCIE_HARD 10
+
+#endif /* __DT_BINDINGS_RESET_BCM6328_H */
diff --git a/include/dt-bindings/reset/bcm6358-reset.h b/include/dt-bindings/reset/bcm6358-reset.h
new file mode 100644
index 000000000000..bda62ef84f5a
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6358-reset.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6358_H
+#define __DT_BINDINGS_RESET_BCM6358_H
+
+#define BCM6358_RST_SPI 0
+#define BCM6358_RST_ENET 2
+#define BCM6358_RST_MPI 3
+#define BCM6358_RST_EPHY 6
+#define BCM6358_RST_SAR 7
+#define BCM6358_RST_USBH 12
+#define BCM6358_RST_PCM 13
+#define BCM6358_RST_ADSL 14
+
+#endif /* __DT_BINDINGS_RESET_BCM6358_H */
diff --git a/include/dt-bindings/reset/bcm6362-reset.h b/include/dt-bindings/reset/bcm6362-reset.h
new file mode 100644
index 000000000000..7ebb0546e0ab
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6362-reset.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6362_H
+#define __DT_BINDINGS_RESET_BCM6362_H
+
+#define BCM6362_RST_SPI 0
+#define BCM6362_RST_IPSEC 1
+#define BCM6362_RST_EPHY 2
+#define BCM6362_RST_SAR 3
+#define BCM6362_RST_ENETSW 4
+#define BCM6362_RST_USBD 5
+#define BCM6362_RST_USBH 6
+#define BCM6362_RST_PCM 7
+#define BCM6362_RST_PCIE_CORE 8
+#define BCM6362_RST_PCIE 9
+#define BCM6362_RST_PCIE_EXT 10
+#define BCM6362_RST_WLAN_SHIM 11
+#define BCM6362_RST_DDR_PHY 12
+#define BCM6362_RST_FAP 13
+#define BCM6362_RST_WLAN_UBUS 14
+
+#endif /* __DT_BINDINGS_RESET_BCM6362_H */
diff --git a/include/dt-bindings/reset/bcm6368-reset.h b/include/dt-bindings/reset/bcm6368-reset.h
new file mode 100644
index 000000000000..c81d8eb6d173
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6368-reset.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6368_H
+#define __DT_BINDINGS_RESET_BCM6368_H
+
+#define BCM6368_RST_SPI 0
+#define BCM6368_RST_MPI 3
+#define BCM6368_RST_IPSEC 4
+#define BCM6368_RST_EPHY 6
+#define BCM6368_RST_SAR 7
+#define BCM6368_RST_SWITCH 10
+#define BCM6368_RST_USBD 11
+#define BCM6368_RST_USBH 12
+#define BCM6368_RST_PCM 13
+
+#endif /* __DT_BINDINGS_RESET_BCM6368_H */
diff --git a/include/dt-bindings/reset/bt1-ccu.h b/include/dt-bindings/reset/bt1-ccu.h
new file mode 100644
index 000000000000..c691efaa678f
--- /dev/null
+++ b/include/dt-bindings/reset/bt1-ccu.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU reset indices
+ */
+#ifndef __DT_BINDINGS_RESET_BT1_CCU_H
+#define __DT_BINDINGS_RESET_BT1_CCU_H
+
+#define CCU_AXI_MAIN_RST 0
+#define CCU_AXI_DDR_RST 1
+#define CCU_AXI_SATA_RST 2
+#define CCU_AXI_GMAC0_RST 3
+#define CCU_AXI_GMAC1_RST 4
+#define CCU_AXI_XGMAC_RST 5
+#define CCU_AXI_PCIE_M_RST 6
+#define CCU_AXI_PCIE_S_RST 7
+#define CCU_AXI_USB_RST 8
+#define CCU_AXI_HWA_RST 9
+#define CCU_AXI_SRAM_RST 10
+
+#define CCU_SYS_SATA_REF_RST 0
+#define CCU_SYS_APB_RST 1
+#define CCU_SYS_DDR_FULL_RST 2
+#define CCU_SYS_DDR_INIT_RST 3
+#define CCU_SYS_PCIE_PCS_PHY_RST 4
+#define CCU_SYS_PCIE_PIPE0_RST 5
+#define CCU_SYS_PCIE_CORE_RST 6
+#define CCU_SYS_PCIE_PWR_RST 7
+#define CCU_SYS_PCIE_STICKY_RST 8
+#define CCU_SYS_PCIE_NSTICKY_RST 9
+#define CCU_SYS_PCIE_HOT_RST 10
+
+#endif /* __DT_BINDINGS_RESET_BT1_CCU_H */
diff --git a/include/dt-bindings/reset/delta,tn48m-reset.h b/include/dt-bindings/reset/delta,tn48m-reset.h
new file mode 100644
index 000000000000..d4e9ed12de3e
--- /dev/null
+++ b/include/dt-bindings/reset/delta,tn48m-reset.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Delta TN48M CPLD GPIO driver
+ *
+ * Copyright (C) 2021 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ */
+
+#ifndef _DT_BINDINGS_RESET_TN48M_H
+#define _DT_BINDINGS_RESET_TN48M_H
+
+#define CPU_88F7040_RESET 0
+#define CPU_88F6820_RESET 1
+#define MAC_98DX3265_RESET 2
+#define PHY_88E1680_RESET 3
+#define PHY_88E1512_RESET 4
+#define POE_RESET 5
+
+#endif /* _DT_BINDINGS_RESET_TN48M_H */
diff --git a/include/dt-bindings/reset/imx8mp-reset.h b/include/dt-bindings/reset/imx8mp-reset.h
new file mode 100644
index 000000000000..2e8c9104b666
--- /dev/null
+++ b/include/dt-bindings/reset/imx8mp-reset.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 NXP
+ */
+
+#ifndef DT_BINDING_RESET_IMX8MP_H
+#define DT_BINDING_RESET_IMX8MP_H
+
+#define IMX8MP_RESET_A53_CORE_POR_RESET0 0
+#define IMX8MP_RESET_A53_CORE_POR_RESET1 1
+#define IMX8MP_RESET_A53_CORE_POR_RESET2 2
+#define IMX8MP_RESET_A53_CORE_POR_RESET3 3
+#define IMX8MP_RESET_A53_CORE_RESET0 4
+#define IMX8MP_RESET_A53_CORE_RESET1 5
+#define IMX8MP_RESET_A53_CORE_RESET2 6
+#define IMX8MP_RESET_A53_CORE_RESET3 7
+#define IMX8MP_RESET_A53_DBG_RESET0 8
+#define IMX8MP_RESET_A53_DBG_RESET1 9
+#define IMX8MP_RESET_A53_DBG_RESET2 10
+#define IMX8MP_RESET_A53_DBG_RESET3 11
+#define IMX8MP_RESET_A53_ETM_RESET0 12
+#define IMX8MP_RESET_A53_ETM_RESET1 13
+#define IMX8MP_RESET_A53_ETM_RESET2 14
+#define IMX8MP_RESET_A53_ETM_RESET3 15
+#define IMX8MP_RESET_A53_SOC_DBG_RESET 16
+#define IMX8MP_RESET_A53_L2RESET 17
+#define IMX8MP_RESET_SW_NON_SCLR_M7C_RST 18
+#define IMX8MP_RESET_OTG1_PHY_RESET 19
+#define IMX8MP_RESET_OTG2_PHY_RESET 20
+#define IMX8MP_RESET_SUPERMIX_RESET 21
+#define IMX8MP_RESET_AUDIOMIX_RESET 22
+#define IMX8MP_RESET_MLMIX_RESET 23
+#define IMX8MP_RESET_PCIEPHY 24
+#define IMX8MP_RESET_PCIEPHY_PERST 25
+#define IMX8MP_RESET_PCIE_CTRL_APPS_EN 26
+#define IMX8MP_RESET_PCIE_CTRL_APPS_TURNOFF 27
+#define IMX8MP_RESET_HDMI_PHY_APB_RESET 28
+#define IMX8MP_RESET_MEDIA_RESET 29
+#define IMX8MP_RESET_GPU2D_RESET 30
+#define IMX8MP_RESET_GPU3D_RESET 31
+#define IMX8MP_RESET_GPU_RESET 32
+#define IMX8MP_RESET_VPU_RESET 33
+#define IMX8MP_RESET_VPU_G1_RESET 34
+#define IMX8MP_RESET_VPU_G2_RESET 35
+#define IMX8MP_RESET_VPUVC8KE_RESET 36
+#define IMX8MP_RESET_NOC_RESET 37
+
+#define IMX8MP_RESET_NUM 38
+
+#endif
diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h
index 9a301082d361..705870693ec2 100644
--- a/include/dt-bindings/reset/imx8mq-reset.h
+++ b/include/dt-bindings/reset/imx8mq-reset.h
@@ -28,37 +28,40 @@
#define IMX8MQ_RESET_A53_L2RESET 17
#define IMX8MQ_RESET_SW_NON_SCLR_M4C_RST 18
#define IMX8MQ_RESET_OTG1_PHY_RESET 19
-#define IMX8MQ_RESET_OTG2_PHY_RESET 20
-#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21
-#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22
-#define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23
-#define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24
-#define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25
-#define IMX8MQ_RESET_PCIEPHY 26
-#define IMX8MQ_RESET_PCIEPHY_PERST 27
-#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28
-#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29
-#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM does NOT support */
+#define IMX8MQ_RESET_OTG2_PHY_RESET 20 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIEPHY 26 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIEPHY_PERST 27 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM/i.MX8MN does NOT support */
#define IMX8MQ_RESET_DISP_RESET 31
#define IMX8MQ_RESET_GPU_RESET 32
-#define IMX8MQ_RESET_VPU_RESET 33
-#define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_DDRC1_PRST 44
-#define IMX8MQ_RESET_DDRC1_CORE_RESET 45
-#define IMX8MQ_RESET_DDRC1_PHY_RESET 46
-#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM does NOT support */
+#define IMX8MQ_RESET_VPU_RESET 33 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC1_PRST 44 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC1_CORE_RESET 45 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC1_PHY_RESET 46 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_SW_M4C_RST 50
+#define IMX8MQ_RESET_SW_M4P_RST 51
+#define IMX8MQ_RESET_M4_ENABLE 52
-#define IMX8MQ_RESET_NUM 50
+#define IMX8MQ_RESET_NUM 53
#endif
diff --git a/include/dt-bindings/reset/imx8ulp-pcc-reset.h b/include/dt-bindings/reset/imx8ulp-pcc-reset.h
new file mode 100644
index 000000000000..e99a4735c3c4
--- /dev/null
+++ b/include/dt-bindings/reset/imx8ulp-pcc-reset.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef DT_BINDING_PCC_RESET_IMX8ULP_H
+#define DT_BINDING_PCC_RESET_IMX8ULP_H
+
+/* PCC3 */
+#define PCC3_WDOG3_SWRST 0
+#define PCC3_WDOG4_SWRST 1
+#define PCC3_LPIT1_SWRST 2
+#define PCC3_TPM4_SWRST 3
+#define PCC3_TPM5_SWRST 4
+#define PCC3_FLEXIO1_SWRST 5
+#define PCC3_I3C2_SWRST 6
+#define PCC3_LPI2C4_SWRST 7
+#define PCC3_LPI2C5_SWRST 8
+#define PCC3_LPUART4_SWRST 9
+#define PCC3_LPUART5_SWRST 10
+#define PCC3_LPSPI4_SWRST 11
+#define PCC3_LPSPI5_SWRST 12
+
+/* PCC4 */
+#define PCC4_FLEXSPI2_SWRST 0
+#define PCC4_TPM6_SWRST 1
+#define PCC4_TPM7_SWRST 2
+#define PCC4_LPI2C6_SWRST 3
+#define PCC4_LPI2C7_SWRST 4
+#define PCC4_LPUART6_SWRST 5
+#define PCC4_LPUART7_SWRST 6
+#define PCC4_SAI4_SWRST 7
+#define PCC4_SAI5_SWRST 8
+#define PCC4_USDHC0_SWRST 9
+#define PCC4_USDHC1_SWRST 10
+#define PCC4_USDHC2_SWRST 11
+#define PCC4_USB0_SWRST 12
+#define PCC4_USB0_PHY_SWRST 13
+#define PCC4_USB1_SWRST 14
+#define PCC4_USB1_PHY_SWRST 15
+#define PCC4_ENET_SWRST 16
+
+/* PCC5 */
+#define PCC5_TPM8_SWRST 0
+#define PCC5_SAI6_SWRST 1
+#define PCC5_SAI7_SWRST 2
+#define PCC5_SPDIF_SWRST 3
+#define PCC5_ISI_SWRST 4
+#define PCC5_CSI_REGS_SWRST 5
+#define PCC5_CSI_SWRST 6
+#define PCC5_DSI_SWRST 7
+#define PCC5_WDOG5_SWRST 8
+#define PCC5_EPDC_SWRST 9
+#define PCC5_PXP_SWRST 10
+#define PCC5_GPU2D_SWRST 11
+#define PCC5_GPU3D_SWRST 12
+#define PCC5_DC_NANO_SWRST 13
+
+#endif /*DT_BINDING_RESET_IMX8ULP_H */
diff --git a/include/dt-bindings/reset/k210-rst.h b/include/dt-bindings/reset/k210-rst.h
new file mode 100644
index 000000000000..883c1aed50e8
--- /dev/null
+++ b/include/dt-bindings/reset/k210-rst.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef RESET_K210_SYSCTL_H
+#define RESET_K210_SYSCTL_H
+
+/*
+ * Kendryte K210 SoC system controller K210_SYSCTL_SOFT_RESET register bits.
+ * Taken from Kendryte SDK (kendryte-standalone-sdk).
+ */
+#define K210_RST_ROM 0
+#define K210_RST_DMA 1
+#define K210_RST_AI 2
+#define K210_RST_DVP 3
+#define K210_RST_FFT 4
+#define K210_RST_GPIO 5
+#define K210_RST_SPI0 6
+#define K210_RST_SPI1 7
+#define K210_RST_SPI2 8
+#define K210_RST_SPI3 9
+#define K210_RST_I2S0 10
+#define K210_RST_I2S1 11
+#define K210_RST_I2S2 12
+#define K210_RST_I2C0 13
+#define K210_RST_I2C1 14
+#define K210_RST_I2C2 15
+#define K210_RST_UART1 16
+#define K210_RST_UART2 17
+#define K210_RST_UART3 18
+#define K210_RST_AES 19
+#define K210_RST_FPIOA 20
+#define K210_RST_TIMER0 21
+#define K210_RST_TIMER1 22
+#define K210_RST_TIMER2 23
+#define K210_RST_WDT0 24
+#define K210_RST_WDT1 25
+#define K210_RST_SHA 26
+#define K210_RST_RTC 29
+
+#endif /* RESET_K210_SYSCTL_H */
diff --git a/include/dt-bindings/reset/mediatek,mt6795-resets.h b/include/dt-bindings/reset/mediatek,mt6795-resets.h
new file mode 100644
index 000000000000..5464a4a79a70
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6795-resets.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT6795
+#define _DT_BINDINGS_RESET_CONTROLLER_MT6795
+
+/* INFRACFG resets */
+#define MT6795_INFRA_RST0_SCPSYS_RST 0
+#define MT6795_INFRA_RST0_PMIC_WRAP_RST 1
+#define MT6795_INFRA_RST1_MIPI_DSI_RST 2
+#define MT6795_INFRA_RST1_MIPI_CSI_RST 3
+#define MT6795_INFRA_RST1_MM_IOMMU_RST 4
+
+/* MMSYS resets */
+#define MT6795_MMSYS_SW0_RST_B_SMI_COMMON 0
+#define MT6795_MMSYS_SW0_RST_B_SMI_LARB 1
+#define MT6795_MMSYS_SW0_RST_B_CAM_MDP 2
+#define MT6795_MMSYS_SW0_RST_B_MDP_RDMA0 3
+#define MT6795_MMSYS_SW0_RST_B_MDP_RDMA1 4
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ0 5
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ1 6
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ2 7
+#define MT6795_MMSYS_SW0_RST_B_MDP_TDSHP0 8
+#define MT6795_MMSYS_SW0_RST_B_MDP_TDSHP1 9
+#define MT6795_MMSYS_SW0_RST_B_MDP_WDMA 10
+#define MT6795_MMSYS_SW0_RST_B_MDP_WROT0 11
+#define MT6795_MMSYS_SW0_RST_B_MDP_WROT1 12
+#define MT6795_MMSYS_SW0_RST_B_MDP_CROP 13
+
+/* PERICFG resets */
+#define MT6795_PERI_NFI_SW_RST 0
+#define MT6795_PERI_THERM_SW_RST 1
+#define MT6795_PERI_MSDC1_SW_RST 2
+
+/* TOPRGU resets */
+#define MT6795_TOPRGU_INFRA_SW_RST 0
+#define MT6795_TOPRGU_MM_SW_RST 1
+#define MT6795_TOPRGU_MFG_SW_RST 2
+#define MT6795_TOPRGU_VENC_SW_RST 3
+#define MT6795_TOPRGU_VDEC_SW_RST 4
+#define MT6795_TOPRGU_IMG_SW_RST 5
+#define MT6795_TOPRGU_DDRPHY_SW_RST 6
+#define MT6795_TOPRGU_MD_SW_RST 7
+#define MT6795_TOPRGU_INFRA_AO_SW_RST 8
+#define MT6795_TOPRGU_MD_LITE_SW_RST 9
+#define MT6795_TOPRGU_APMIXED_SW_RST 10
+#define MT6795_TOPRGU_PWRAP_SPI_CTL_RST 11
+#define MT6795_TOPRGU_SW_RST_NUM 12
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT6795 */
diff --git a/include/dt-bindings/reset-controller/mt2712-resets.h b/include/dt-bindings/reset/mt2712-resets.h
index 9e7ee762f076..9e7ee762f076 100644
--- a/include/dt-bindings/reset-controller/mt2712-resets.h
+++ b/include/dt-bindings/reset/mt2712-resets.h
diff --git a/include/dt-bindings/reset/mt7621-reset.h b/include/dt-bindings/reset/mt7621-reset.h
new file mode 100644
index 000000000000..7572c6b41453
--- /dev/null
+++ b/include/dt-bindings/reset/mt7621-reset.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Sergio Paracuellos
+ * Author: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+ */
+
+#ifndef DT_BINDING_MT7621_RESET_H
+#define DT_BINDING_MT7621_RESET_H
+
+#define MT7621_RST_SYS 0
+#define MT7621_RST_MCM 2
+#define MT7621_RST_HSDMA 5
+#define MT7621_RST_FE 6
+#define MT7621_RST_SPDIFTX 7
+#define MT7621_RST_TIMER 8
+#define MT7621_RST_INT 9
+#define MT7621_RST_MC 10
+#define MT7621_RST_PCM 11
+#define MT7621_RST_PIO 13
+#define MT7621_RST_GDMA 14
+#define MT7621_RST_NFI 15
+#define MT7621_RST_I2C 16
+#define MT7621_RST_I2S 17
+#define MT7621_RST_SPI 18
+#define MT7621_RST_UART1 19
+#define MT7621_RST_UART2 20
+#define MT7621_RST_UART3 21
+#define MT7621_RST_ETH 23
+#define MT7621_RST_PCIE0 24
+#define MT7621_RST_PCIE1 25
+#define MT7621_RST_PCIE2 26
+#define MT7621_RST_AUX_STCK 28
+#define MT7621_RST_CRYPTO 29
+#define MT7621_RST_SDXC 30
+#define MT7621_RST_PPE 31
+
+#endif /* DT_BINDING_MT7621_RESET_H */
diff --git a/include/dt-bindings/reset/mt7986-resets.h b/include/dt-bindings/reset/mt7986-resets.h
new file mode 100644
index 000000000000..af3d16c81192
--- /dev/null
+++ b/include/dt-bindings/reset/mt7986-resets.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7986
+#define _DT_BINDINGS_RESET_CONTROLLER_MT7986
+
+/* INFRACFG resets */
+#define MT7986_INFRACFG_PEXTP_MAC_SW_RST 6
+#define MT7986_INFRACFG_SSUSB_SW_RST 7
+#define MT7986_INFRACFG_EIP97_SW_RST 8
+#define MT7986_INFRACFG_AUDIO_SW_RST 13
+#define MT7986_INFRACFG_CQ_DMA_SW_RST 14
+
+#define MT7986_INFRACFG_TRNG_SW_RST 17
+#define MT7986_INFRACFG_AP_DMA_SW_RST 32
+#define MT7986_INFRACFG_I2C_SW_RST 33
+#define MT7986_INFRACFG_NFI_SW_RST 34
+#define MT7986_INFRACFG_SPI0_SW_RST 35
+#define MT7986_INFRACFG_SPI1_SW_RST 36
+#define MT7986_INFRACFG_UART0_SW_RST 37
+#define MT7986_INFRACFG_UART1_SW_RST 38
+#define MT7986_INFRACFG_UART2_SW_RST 39
+#define MT7986_INFRACFG_AUXADC_SW_RST 43
+
+#define MT7986_INFRACFG_APXGPT_SW_RST 66
+#define MT7986_INFRACFG_PWM_SW_RST 68
+
+#define MT7986_INFRACFG_SW_RST_NUM 69
+
+/* TOPRGU resets */
+#define MT7986_TOPRGU_APMIXEDSYS_SW_RST 0
+#define MT7986_TOPRGU_SGMII0_SW_RST 1
+#define MT7986_TOPRGU_SGMII1_SW_RST 2
+#define MT7986_TOPRGU_INFRA_SW_RST 3
+#define MT7986_TOPRGU_U2PHY_SW_RST 5
+#define MT7986_TOPRGU_PCIE_SW_RST 6
+#define MT7986_TOPRGU_SSUSB_SW_RST 7
+#define MT7986_TOPRGU_ETHDMA_SW_RST 20
+#define MT7986_TOPRGU_CONSYS_SW_RST 23
+
+#define MT7986_TOPRGU_SW_RST_NUM 24
+
+/* ETHSYS Subsystem resets */
+#define MT7986_ETHSYS_FE_SW_RST 6
+#define MT7986_ETHSYS_PMTR_SW_RST 8
+#define MT7986_ETHSYS_GMAC_SW_RST 23
+#define MT7986_ETHSYS_PPE0_SW_RST 30
+#define MT7986_ETHSYS_PPE1_SW_RST 31
+
+#define MT7986_ETHSYS_SW_RST_NUM 32
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7986 */
diff --git a/include/dt-bindings/reset/mt8173-resets.h b/include/dt-bindings/reset/mt8173-resets.h
index ba8636eda5ae..6a60c7cecc4c 100644
--- a/include/dt-bindings/reset/mt8173-resets.h
+++ b/include/dt-bindings/reset/mt8173-resets.h
@@ -27,6 +27,8 @@
#define MT8173_INFRA_GCE_FAXI_RST 40
#define MT8173_INFRA_MMIOMMURST 47
+/* MMSYS resets */
+#define MT8173_MMSYS_SW0_RST_B_DISP_DSI0 25
/* PERICFG resets */
#define MT8173_PERI_UART0_SW_RST 0
diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset/mt8183-resets.h
index a1bbd41e0d12..48c5d2de0a38 100644
--- a/include/dt-bindings/reset-controller/mt8183-resets.h
+++ b/include/dt-bindings/reset/mt8183-resets.h
@@ -80,6 +80,9 @@
#define MT8183_INFRACFG_SW_RST_NUM 128
+/* MMSYS resets */
+#define MT8183_MMSYS_SW0_RST_B_DISP_DSI0 25
+
#define MT8183_TOPRGU_MM_SW_RST 1
#define MT8183_TOPRGU_MFG_SW_RST 2
#define MT8183_TOPRGU_VENC_SW_RST 3
diff --git a/include/dt-bindings/reset/mt8186-resets.h b/include/dt-bindings/reset/mt8186-resets.h
new file mode 100644
index 000000000000..2e9029c22f38
--- /dev/null
+++ b/include/dt-bindings/reset/mt8186-resets.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Runyang Chen <runyang.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8186
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8186
+
+/* TOPRGU resets */
+#define MT8186_TOPRGU_INFRA_SW_RST 0
+#define MT8186_TOPRGU_MM_SW_RST 1
+#define MT8186_TOPRGU_MFG_SW_RST 2
+#define MT8186_TOPRGU_VENC_SW_RST 3
+#define MT8186_TOPRGU_VDEC_SW_RST 4
+#define MT8186_TOPRGU_IMG_SW_RST 5
+#define MT8186_TOPRGU_DDR_SW_RST 6
+#define MT8186_TOPRGU_INFRA_AO_SW_RST 8
+#define MT8186_TOPRGU_CONNSYS_SW_RST 9
+#define MT8186_TOPRGU_APMIXED_SW_RST 10
+#define MT8186_TOPRGU_PWRAP_SW_RST 11
+#define MT8186_TOPRGU_CONN_MCU_SW_RST 12
+#define MT8186_TOPRGU_IPNNA_SW_RST 13
+#define MT8186_TOPRGU_WPE_SW_RST 14
+#define MT8186_TOPRGU_ADSP_SW_RST 15
+#define MT8186_TOPRGU_AUDIO_SW_RST 17
+#define MT8186_TOPRGU_CAM_MAIN_SW_RST 18
+#define MT8186_TOPRGU_CAM_RAWA_SW_RST 19
+#define MT8186_TOPRGU_CAM_RAWB_SW_RST 20
+#define MT8186_TOPRGU_IPE_SW_RST 21
+#define MT8186_TOPRGU_IMG2_SW_RST 22
+#define MT8186_TOPRGU_SW_RST_NUM 23
+
+/* MMSYS resets */
+#define MT8186_MMSYS_SW0_RST_B_DISP_DSI0 19
+
+/* INFRA resets */
+#define MT8186_INFRA_THERMAL_CTRL_RST 0
+#define MT8186_INFRA_PTP_CTRL_RST 1
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8186 */
diff --git a/include/dt-bindings/reset/mt8192-resets.h b/include/dt-bindings/reset/mt8192-resets.h
new file mode 100644
index 000000000000..12e2087c90a3
--- /dev/null
+++ b/include/dt-bindings/reset/mt8192-resets.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Yong Liang <yong.liang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8192
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8192
+
+/* TOPRGU resets */
+#define MT8192_TOPRGU_MM_SW_RST 1
+#define MT8192_TOPRGU_MFG_SW_RST 2
+#define MT8192_TOPRGU_VENC_SW_RST 3
+#define MT8192_TOPRGU_VDEC_SW_RST 4
+#define MT8192_TOPRGU_IMG_SW_RST 5
+#define MT8192_TOPRGU_MD_SW_RST 7
+#define MT8192_TOPRGU_CONN_SW_RST 9
+#define MT8192_TOPRGU_CONN_MCU_SW_RST 12
+#define MT8192_TOPRGU_IPU0_SW_RST 14
+#define MT8192_TOPRGU_IPU1_SW_RST 15
+#define MT8192_TOPRGU_AUDIO_SW_RST 17
+#define MT8192_TOPRGU_CAMSYS_SW_RST 18
+#define MT8192_TOPRGU_MJC_SW_RST 19
+#define MT8192_TOPRGU_C2K_S2_SW_RST 20
+#define MT8192_TOPRGU_C2K_SW_RST 21
+#define MT8192_TOPRGU_PERI_SW_RST 22
+#define MT8192_TOPRGU_PERI_AO_SW_RST 23
+
+#define MT8192_TOPRGU_SW_RST_NUM 23
+
+/* MMSYS resets */
+#define MT8192_MMSYS_SW0_RST_B_DISP_DSI0 15
+
+/* INFRA resets */
+#define MT8192_INFRA_RST0_THERM_CTRL_SWRST 0
+#define MT8192_INFRA_RST2_PEXTP_PHY_SWRST 1
+#define MT8192_INFRA_RST3_THERM_CTRL_PTP_SWRST 2
+#define MT8192_INFRA_RST4_PCIE_TOP_SWRST 3
+#define MT8192_INFRA_RST4_THERM_CTRL_MCU_SWRST 4
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8192 */
diff --git a/include/dt-bindings/reset/mt8195-resets.h b/include/dt-bindings/reset/mt8195-resets.h
new file mode 100644
index 000000000000..24ab3631dcea
--- /dev/null
+++ b/include/dt-bindings/reset/mt8195-resets.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)*/
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Christine Zhu <christine.zhu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8195
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8195
+
+/* TOPRGU resets */
+#define MT8195_TOPRGU_CONN_MCU_SW_RST 0
+#define MT8195_TOPRGU_INFRA_GRST_SW_RST 1
+#define MT8195_TOPRGU_APU_SW_RST 2
+#define MT8195_TOPRGU_INFRA_AO_GRST_SW_RST 6
+#define MT8195_TOPRGU_MMSYS_SW_RST 7
+#define MT8195_TOPRGU_MFG_SW_RST 8
+#define MT8195_TOPRGU_VENC_SW_RST 9
+#define MT8195_TOPRGU_VDEC_SW_RST 10
+#define MT8195_TOPRGU_IMG_SW_RST 11
+#define MT8195_TOPRGU_APMIXEDSYS_SW_RST 13
+#define MT8195_TOPRGU_AUDIO_SW_RST 14
+#define MT8195_TOPRGU_CAMSYS_SW_RST 15
+#define MT8195_TOPRGU_EDPTX_SW_RST 16
+#define MT8195_TOPRGU_ADSPSYS_SW_RST 21
+#define MT8195_TOPRGU_DPTX_SW_RST 22
+#define MT8195_TOPRGU_SPMI_MST_SW_RST 23
+
+#define MT8195_TOPRGU_SW_RST_NUM 16
+
+/* INFRA resets */
+#define MT8195_INFRA_RST0_THERM_CTRL_SWRST 0
+#define MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST 1
+#define MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST 2
+#define MT8195_INFRA_RST2_PCIE_P0_SWRST 3
+#define MT8195_INFRA_RST2_PCIE_P1_SWRST 4
+#define MT8195_INFRA_RST2_USBSIF_P1_SWRST 5
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8195 */
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
index 26b6f9200620..020c9cf18751 100644
--- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
@@ -163,5 +163,10 @@
#define NSS_CAL_PRBS_RST_N_RESET 154
#define NSS_LCKDT_RST_N_RESET 155
#define NSS_SRDS_N_RESET 156
+#define CRYPTO_ENG1_RESET 157
+#define CRYPTO_ENG2_RESET 158
+#define CRYPTO_ENG3_RESET 159
+#define CRYPTO_ENG4_RESET 160
+#define CRYPTO_AHB_RESET 161
#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-msm8939.h b/include/dt-bindings/reset/qcom,gcc-msm8939.h
new file mode 100644
index 000000000000..fa41ffeae7a2
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-msm8939.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_MSM_GCC_8939_H
+#define _DT_BINDINGS_RESET_MSM_GCC_8939_H
+
+#define GCC_BLSP1_BCR 0
+#define GCC_BLSP1_QUP1_BCR 1
+#define GCC_BLSP1_UART1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_QUP4_BCR 6
+#define GCC_BLSP1_QUP5_BCR 7
+#define GCC_BLSP1_QUP6_BCR 8
+#define GCC_IMEM_BCR 9
+#define GCC_SMMU_BCR 10
+#define GCC_APSS_TCU_BCR 11
+#define GCC_SMMU_XPU_BCR 12
+#define GCC_PCNOC_TBU_BCR 13
+#define GCC_PRNG_BCR 14
+#define GCC_BOOT_ROM_BCR 15
+#define GCC_CRYPTO_BCR 16
+#define GCC_SEC_CTRL_BCR 17
+#define GCC_AUDIO_CORE_BCR 18
+#define GCC_ULT_AUDIO_BCR 19
+#define GCC_DEHR_BCR 20
+#define GCC_SYSTEM_NOC_BCR 21
+#define GCC_PCNOC_BCR 22
+#define GCC_TCSR_BCR 23
+#define GCC_QDSS_BCR 24
+#define GCC_DCD_BCR 25
+#define GCC_MSG_RAM_BCR 26
+#define GCC_MPM_BCR 27
+#define GCC_SPMI_BCR 28
+#define GCC_SPDM_BCR 29
+#define GCC_MM_SPDM_BCR 30
+#define GCC_BIMC_BCR 31
+#define GCC_RBCPR_BCR 32
+#define GCC_TLMM_BCR 33
+#define GCC_USB_HS_BCR 34
+#define GCC_USB2A_PHY_BCR 35
+#define GCC_SDCC1_BCR 36
+#define GCC_SDCC2_BCR 37
+#define GCC_PDM_BCR 38
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 39
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49
+#define GCC_MMSS_BCR 50
+#define GCC_VENUS0_BCR 51
+#define GCC_MDSS_BCR 52
+#define GCC_CAMSS_PHY0_BCR 53
+#define GCC_CAMSS_CSI0_BCR 54
+#define GCC_CAMSS_CSI0PHY_BCR 55
+#define GCC_CAMSS_CSI0RDI_BCR 56
+#define GCC_CAMSS_CSI0PIX_BCR 57
+#define GCC_CAMSS_PHY1_BCR 58
+#define GCC_CAMSS_CSI1_BCR 59
+#define GCC_CAMSS_CSI1PHY_BCR 60
+#define GCC_CAMSS_CSI1RDI_BCR 61
+#define GCC_CAMSS_CSI1PIX_BCR 62
+#define GCC_CAMSS_ISPIF_BCR 63
+#define GCC_CAMSS_CCI_BCR 64
+#define GCC_CAMSS_MCLK0_BCR 65
+#define GCC_CAMSS_MCLK1_BCR 66
+#define GCC_CAMSS_GP0_BCR 67
+#define GCC_CAMSS_GP1_BCR 68
+#define GCC_CAMSS_TOP_BCR 69
+#define GCC_CAMSS_MICRO_BCR 70
+#define GCC_CAMSS_JPEG_BCR 71
+#define GCC_CAMSS_VFE_BCR 72
+#define GCC_CAMSS_CSI_VFE0_BCR 73
+#define GCC_OXILI_BCR 74
+#define GCC_GMEM_BCR 75
+#define GCC_CAMSS_AHB_BCR 76
+#define GCC_MDP_TBU_BCR 77
+#define GCC_GFX_TBU_BCR 78
+#define GCC_GFX_TCU_BCR 79
+#define GCC_MSS_TBU_AXI_BCR 80
+#define GCC_MSS_TBU_GSS_AXI_BCR 81
+#define GCC_MSS_TBU_Q6_AXI_BCR 82
+#define GCC_GTCU_AHB_BCR 83
+#define GCC_SMMU_CFG_BCR 84
+#define GCC_VFE_TBU_BCR 85
+#define GCC_VENUS_TBU_BCR 86
+#define GCC_JPEG_TBU_BCR 87
+#define GCC_PRONTO_TBU_BCR 88
+#define GCC_SMMU_CATS_BCR 89
+#define GCC_BLSP1_UART3_BCR 90
+#define GCC_CAMSS_CSI2_BCR 91
+#define GCC_CAMSS_CSI2PHY_BCR 92
+#define GCC_CAMSS_CSI2RDI_BCR 93
+#define GCC_CAMSS_CSI2PIX_BCR 94
+#define GCC_USB_FS_BCR 95
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR 96
+#define GCC_CAMSS_MCLK2_BCR 97
+#define GCC_CPP_TBU_BCR 98
+#define GCC_MDP_RT_TBU_BCR 99
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sdm845-pdc.h b/include/dt-bindings/reset/qcom,sdm845-pdc.h
index 53c37f9c319a..03a0c0eb8147 100644
--- a/include/dt-bindings/reset/qcom,sdm845-pdc.h
+++ b/include/dt-bindings/reset/qcom,sdm845-pdc.h
@@ -16,5 +16,7 @@
#define PDC_DISPLAY_SYNC_RESET 7
#define PDC_COMPUTE_SYNC_RESET 8
#define PDC_MODEM_SYNC_RESET 9
+#define PDC_WLAN_RF_SYNC_RESET 10
+#define PDC_WPSS_SYNC_RESET 11
#endif
diff --git a/include/dt-bindings/reset/raspberrypi,firmware-reset.h b/include/dt-bindings/reset/raspberrypi,firmware-reset.h
new file mode 100644
index 000000000000..1a4f4c792723
--- /dev/null
+++ b/include/dt-bindings/reset/raspberrypi,firmware-reset.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Nicolas Saenz Julienne
+ * Author: Nicolas Saenz Julienne <nsaenzjulienne@suse.com>
+ */
+
+#ifndef _DT_BINDINGS_RASPBERRYPI_FIRMWARE_RESET_H
+#define _DT_BINDINGS_RASPBERRYPI_FIRMWARE_RESET_H
+
+#define RASPBERRYPI_FIRMWARE_RESET_ID_USB 0
+#define RASPBERRYPI_FIRMWARE_RESET_NUM_IDS 1
+
+#endif
diff --git a/include/dt-bindings/reset/realtek,rtd1195.h b/include/dt-bindings/reset/realtek,rtd1195.h
new file mode 100644
index 000000000000..27902abf935b
--- /dev/null
+++ b/include/dt-bindings/reset/realtek,rtd1195.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
+/*
+ * Realtek RTD1195 reset controllers
+ *
+ * Copyright (c) 2017 Andreas Färber
+ */
+#ifndef DT_BINDINGS_RESET_RTD1195_H
+#define DT_BINDINGS_RESET_RTD1195_H
+
+/* soft reset 1 */
+#define RTD1195_RSTN_MISC 0
+#define RTD1195_RSTN_RNG 1
+#define RTD1195_RSTN_USB3_POW 2
+#define RTD1195_RSTN_GSPI 3
+#define RTD1195_RSTN_USB3_P0_MDIO 4
+#define RTD1195_RSTN_VE_H265 5
+#define RTD1195_RSTN_USB 6
+#define RTD1195_RSTN_USB_PHY0 8
+#define RTD1195_RSTN_USB_PHY1 9
+#define RTD1195_RSTN_HDMIRX 11
+#define RTD1195_RSTN_HDMI 12
+#define RTD1195_RSTN_ETN 14
+#define RTD1195_RSTN_AIO 15
+#define RTD1195_RSTN_GPU 16
+#define RTD1195_RSTN_VE_H264 17
+#define RTD1195_RSTN_VE_JPEG 18
+#define RTD1195_RSTN_TVE 19
+#define RTD1195_RSTN_VO 20
+#define RTD1195_RSTN_LVDS 21
+#define RTD1195_RSTN_SE 22
+#define RTD1195_RSTN_DCU 23
+#define RTD1195_RSTN_DC_PHY 24
+#define RTD1195_RSTN_CP 25
+#define RTD1195_RSTN_MD 26
+#define RTD1195_RSTN_TP 27
+#define RTD1195_RSTN_AE 28
+#define RTD1195_RSTN_NF 29
+#define RTD1195_RSTN_MIPI 30
+
+/* soft reset 2 */
+#define RTD1195_RSTN_ACPU 0
+#define RTD1195_RSTN_VCPU 1
+#define RTD1195_RSTN_PCR 9
+#define RTD1195_RSTN_CR 10
+#define RTD1195_RSTN_EMMC 11
+#define RTD1195_RSTN_SDIO 12
+#define RTD1195_RSTN_I2C_5 18
+#define RTD1195_RSTN_RTC 20
+#define RTD1195_RSTN_I2C_4 23
+#define RTD1195_RSTN_I2C_3 24
+#define RTD1195_RSTN_I2C_2 25
+#define RTD1195_RSTN_I2C_1 26
+#define RTD1195_RSTN_UR1 28
+
+/* soft reset 3 */
+#define RTD1195_RSTN_SB2 0
+
+/* iso soft reset */
+#define RTD1195_ISO_RSTN_VFD 0
+#define RTD1195_ISO_RSTN_IR 1
+#define RTD1195_ISO_RSTN_CEC0 2
+#define RTD1195_ISO_RSTN_CEC1 3
+#define RTD1195_ISO_RSTN_DP 4
+#define RTD1195_ISO_RSTN_CBUSTX 5
+#define RTD1195_ISO_RSTN_CBUSRX 6
+#define RTD1195_ISO_RSTN_EFUSE 7
+#define RTD1195_ISO_RSTN_UR0 8
+#define RTD1195_ISO_RSTN_GMAC 9
+#define RTD1195_ISO_RSTN_GPHY 10
+#define RTD1195_ISO_RSTN_I2C_0 11
+#define RTD1195_ISO_RSTN_I2C_6 12
+#define RTD1195_ISO_RSTN_CBUS 13
+
+#endif
diff --git a/include/dt-bindings/reset/realtek,rtd1295.h b/include/dt-bindings/reset/realtek,rtd1295.h
index 2c0cb6afe816..dd89e4c80264 100644
--- a/include/dt-bindings/reset/realtek,rtd1295.h
+++ b/include/dt-bindings/reset/realtek,rtd1295.h
@@ -75,6 +75,9 @@
#define RTD1295_RSTN_CBUS_TX 30
#define RTD1295_RSTN_SDS_PHY 31
+/* soft reset 3 */
+#define RTD1295_RSTN_SB2 0
+
/* soft reset 4 */
#define RTD1295_RSTN_DCPHY_CRT 0
#define RTD1295_RSTN_DCPHY_ALERT_RX 1
diff --git a/include/dt-bindings/reset/sama7g5-reset.h b/include/dt-bindings/reset/sama7g5-reset.h
new file mode 100644
index 000000000000..2116f41d04e0
--- /dev/null
+++ b/include/dt-bindings/reset/sama7g5-reset.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_BINDINGS_RESET_SAMA7G5_H
+#define __DT_BINDINGS_RESET_SAMA7G5_H
+
+#define SAMA7G5_RESET_USB_PHY1 4
+#define SAMA7G5_RESET_USB_PHY2 5
+#define SAMA7G5_RESET_USB_PHY3 6
+
+#endif /* __DT_BINDINGS_RESET_SAMA7G5_H */
diff --git a/include/dt-bindings/reset/starfive-jh7100.h b/include/dt-bindings/reset/starfive-jh7100.h
new file mode 100644
index 000000000000..540e19254f39
--- /dev/null
+++ b/include/dt-bindings/reset/starfive-jh7100.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Ahmad Fatoum, Pengutronix
+ */
+
+#ifndef __DT_BINDINGS_RESET_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_RESET_STARFIVE_JH7100_H__
+
+#define JH7100_RSTN_DOM3AHB_BUS 0
+#define JH7100_RSTN_DOM7AHB_BUS 1
+#define JH7100_RST_U74 2
+#define JH7100_RSTN_U74_AXI 3
+#define JH7100_RSTN_SGDMA2P_AHB 4
+#define JH7100_RSTN_SGDMA2P_AXI 5
+#define JH7100_RSTN_DMA2PNOC_AXI 6
+#define JH7100_RSTN_DLA_AXI 7
+#define JH7100_RSTN_DLANOC_AXI 8
+#define JH7100_RSTN_DLA_APB 9
+#define JH7100_RST_VP6_DRESET 10
+#define JH7100_RST_VP6_BRESET 11
+#define JH7100_RSTN_VP6_AXI 12
+#define JH7100_RSTN_VDECBRG_MAIN 13
+#define JH7100_RSTN_VDEC_AXI 14
+#define JH7100_RSTN_VDEC_BCLK 15
+#define JH7100_RSTN_VDEC_CCLK 16
+#define JH7100_RSTN_VDEC_APB 17
+#define JH7100_RSTN_JPEG_AXI 18
+#define JH7100_RSTN_JPEG_CCLK 19
+#define JH7100_RSTN_JPEG_APB 20
+#define JH7100_RSTN_JPCGC300_MAIN 21
+#define JH7100_RSTN_GC300_2X 22
+#define JH7100_RSTN_GC300_AXI 23
+#define JH7100_RSTN_GC300_AHB 24
+#define JH7100_RSTN_VENC_AXI 25
+#define JH7100_RSTN_VENCBRG_MAIN 26
+#define JH7100_RSTN_VENC_BCLK 27
+#define JH7100_RSTN_VENC_CCLK 28
+#define JH7100_RSTN_VENC_APB 29
+#define JH7100_RSTN_DDRPHY_APB 30
+#define JH7100_RSTN_NOC_ROB 31
+#define JH7100_RSTN_NOC_COG 32
+#define JH7100_RSTN_HIFI4_AXI 33
+#define JH7100_RSTN_HIFI4NOC_AXI 34
+#define JH7100_RST_HIFI4_DRESET 35
+#define JH7100_RST_HIFI4_BRESET 36
+#define JH7100_RSTN_USB_AXI 37
+#define JH7100_RSTN_USBNOC_AXI 38
+#define JH7100_RSTN_SGDMA1P_AXI 39
+#define JH7100_RSTN_DMA1P_AXI 40
+#define JH7100_RSTN_X2C_AXI 41
+#define JH7100_RSTN_NNE_AHB 42
+#define JH7100_RSTN_NNE_AXI 43
+#define JH7100_RSTN_NNENOC_AXI 44
+#define JH7100_RSTN_DLASLV_AXI 45
+#define JH7100_RSTN_DSPX2C_AXI 46
+#define JH7100_RSTN_VIN_SRC 47
+#define JH7100_RSTN_ISPSLV_AXI 48
+#define JH7100_RSTN_VIN_AXI 49
+#define JH7100_RSTN_VINNOC_AXI 50
+#define JH7100_RSTN_ISP0_AXI 51
+#define JH7100_RSTN_ISP0NOC_AXI 52
+#define JH7100_RSTN_ISP1_AXI 53
+#define JH7100_RSTN_ISP1NOC_AXI 54
+#define JH7100_RSTN_VOUT_SRC 55
+#define JH7100_RSTN_DISP_AXI 56
+#define JH7100_RSTN_DISPNOC_AXI 57
+#define JH7100_RSTN_SDIO0_AHB 58
+#define JH7100_RSTN_SDIO1_AHB 59
+#define JH7100_RSTN_GMAC_AHB 60
+#define JH7100_RSTN_SPI2AHB_AHB 61
+#define JH7100_RSTN_SPI2AHB_CORE 62
+#define JH7100_RSTN_EZMASTER_AHB 63
+#define JH7100_RST_E24 64
+#define JH7100_RSTN_QSPI_AHB 65
+#define JH7100_RSTN_QSPI_CORE 66
+#define JH7100_RSTN_QSPI_APB 67
+#define JH7100_RSTN_SEC_AHB 68
+#define JH7100_RSTN_AES 69
+#define JH7100_RSTN_PKA 70
+#define JH7100_RSTN_SHA 71
+#define JH7100_RSTN_TRNG_APB 72
+#define JH7100_RSTN_OTP_APB 73
+#define JH7100_RSTN_UART0_APB 74
+#define JH7100_RSTN_UART0_CORE 75
+#define JH7100_RSTN_UART1_APB 76
+#define JH7100_RSTN_UART1_CORE 77
+#define JH7100_RSTN_SPI0_APB 78
+#define JH7100_RSTN_SPI0_CORE 79
+#define JH7100_RSTN_SPI1_APB 80
+#define JH7100_RSTN_SPI1_CORE 81
+#define JH7100_RSTN_I2C0_APB 82
+#define JH7100_RSTN_I2C0_CORE 83
+#define JH7100_RSTN_I2C1_APB 84
+#define JH7100_RSTN_I2C1_CORE 85
+#define JH7100_RSTN_GPIO_APB 86
+#define JH7100_RSTN_UART2_APB 87
+#define JH7100_RSTN_UART2_CORE 88
+#define JH7100_RSTN_UART3_APB 89
+#define JH7100_RSTN_UART3_CORE 90
+#define JH7100_RSTN_SPI2_APB 91
+#define JH7100_RSTN_SPI2_CORE 92
+#define JH7100_RSTN_SPI3_APB 93
+#define JH7100_RSTN_SPI3_CORE 94
+#define JH7100_RSTN_I2C2_APB 95
+#define JH7100_RSTN_I2C2_CORE 96
+#define JH7100_RSTN_I2C3_APB 97
+#define JH7100_RSTN_I2C3_CORE 98
+#define JH7100_RSTN_WDTIMER_APB 99
+#define JH7100_RSTN_WDT 100
+#define JH7100_RSTN_TIMER0 101
+#define JH7100_RSTN_TIMER1 102
+#define JH7100_RSTN_TIMER2 103
+#define JH7100_RSTN_TIMER3 104
+#define JH7100_RSTN_TIMER4 105
+#define JH7100_RSTN_TIMER5 106
+#define JH7100_RSTN_TIMER6 107
+#define JH7100_RSTN_VP6INTC_APB 108
+#define JH7100_RSTN_PWM_APB 109
+#define JH7100_RSTN_MSI_APB 110
+#define JH7100_RSTN_TEMP_APB 111
+#define JH7100_RSTN_TEMP_SENSE 112
+#define JH7100_RSTN_SYSERR_APB 113
+
+#define JH7100_RSTN_END 114
+
+#endif /* __DT_BINDINGS_RESET_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h b/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h
new file mode 100644
index 000000000000..ea906896c70f
--- /dev/null
+++ b/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_STE_PRCC_RESET
+#define _DT_BINDINGS_STE_PRCC_RESET
+
+#define DB8500_PRCC_1 1
+#define DB8500_PRCC_2 2
+#define DB8500_PRCC_3 3
+#define DB8500_PRCC_6 6
+
+/* Reset lines on PRCC 1 */
+#define DB8500_PRCC_1_RESET_UART0 0
+#define DB8500_PRCC_1_RESET_UART1 1
+#define DB8500_PRCC_1_RESET_I2C1 2
+#define DB8500_PRCC_1_RESET_MSP0 3
+#define DB8500_PRCC_1_RESET_MSP1 4
+#define DB8500_PRCC_1_RESET_SDI0 5
+#define DB8500_PRCC_1_RESET_I2C2 6
+#define DB8500_PRCC_1_RESET_SPI3 7
+#define DB8500_PRCC_1_RESET_SLIMBUS0 8
+#define DB8500_PRCC_1_RESET_I2C4 9
+#define DB8500_PRCC_1_RESET_MSP3 10
+#define DB8500_PRCC_1_RESET_PER_MSP3 11
+#define DB8500_PRCC_1_RESET_PER_MSP1 12
+#define DB8500_PRCC_1_RESET_PER_MSP0 13
+#define DB8500_PRCC_1_RESET_PER_SLIMBUS 14
+
+/* Reset lines on PRCC 2 */
+#define DB8500_PRCC_2_RESET_I2C3 0
+#define DB8500_PRCC_2_RESET_PWL 1
+#define DB8500_PRCC_2_RESET_SDI4 2
+#define DB8500_PRCC_2_RESET_MSP2 3
+#define DB8500_PRCC_2_RESET_SDI1 4
+#define DB8500_PRCC_2_RESET_SDI3 5
+#define DB8500_PRCC_2_RESET_HSIRX 6
+#define DB8500_PRCC_2_RESET_HSITX 7
+#define DB8500_PRCC_1_RESET_PER_MSP2 8
+
+/* Reset lines on PRCC 3 */
+#define DB8500_PRCC_3_RESET_SSP0 1
+#define DB8500_PRCC_3_RESET_SSP1 2
+#define DB8500_PRCC_3_RESET_I2C0 3
+#define DB8500_PRCC_3_RESET_SDI2 4
+#define DB8500_PRCC_3_RESET_SKE 5
+#define DB8500_PRCC_3_RESET_UART2 6
+#define DB8500_PRCC_3_RESET_SDI5 7
+
+/* Reset lines on PRCC 6 */
+#define DB8500_PRCC_3_RESET_RNG 0
+
+#endif
diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h
index f0c3aaef67a0..4ffa7c3612e6 100644
--- a/include/dt-bindings/reset/stm32mp1-resets.h
+++ b/include/dt-bindings/reset/stm32mp1-resets.h
@@ -7,6 +7,7 @@
#ifndef _DT_BINDINGS_STM32MP1_RESET_H_
#define _DT_BINDINGS_STM32MP1_RESET_H_
+#define MCU_HOLD_BOOT_R 2144
#define LTDC_R 3072
#define DSI_R 3076
#define DDRPERFM_R 3080
@@ -105,4 +106,18 @@
#define GPIOJ_R 19785
#define GPIOK_R 19786
+/* SCMI reset domain identifiers */
+#define RST_SCMI_SPI6 0
+#define RST_SCMI_I2C4 1
+#define RST_SCMI_I2C6 2
+#define RST_SCMI_USART1 3
+#define RST_SCMI_STGEN 4
+#define RST_SCMI_GPIOZ 5
+#define RST_SCMI_CRYP1 6
+#define RST_SCMI_HASH1 7
+#define RST_SCMI_RNG1 8
+#define RST_SCMI_MDMA 9
+#define RST_SCMI_MCU 10
+#define RST_SCMI_MCU_HOLD_BOOT 11
+
#endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */
diff --git a/include/dt-bindings/reset/stm32mp13-resets.h b/include/dt-bindings/reset/stm32mp13-resets.h
new file mode 100644
index 000000000000..934864e90da6
--- /dev/null
+++ b/include/dt-bindings/reset/stm32mp13-resets.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP13_RESET_H_
+#define _DT_BINDINGS_STM32MP13_RESET_H_
+
+#define TIM2_R 13568
+#define TIM3_R 13569
+#define TIM4_R 13570
+#define TIM5_R 13571
+#define TIM6_R 13572
+#define TIM7_R 13573
+#define LPTIM1_R 13577
+#define SPI2_R 13579
+#define SPI3_R 13580
+#define USART3_R 13583
+#define UART4_R 13584
+#define UART5_R 13585
+#define UART7_R 13586
+#define UART8_R 13587
+#define I2C1_R 13589
+#define I2C2_R 13590
+#define SPDIF_R 13594
+#define TIM1_R 13632
+#define TIM8_R 13633
+#define SPI1_R 13640
+#define USART6_R 13645
+#define SAI1_R 13648
+#define SAI2_R 13649
+#define DFSDM_R 13652
+#define FDCAN_R 13656
+#define LPTIM2_R 13696
+#define LPTIM3_R 13697
+#define LPTIM4_R 13698
+#define LPTIM5_R 13699
+#define SYSCFG_R 13707
+#define VREF_R 13709
+#define DTS_R 13712
+#define PMBCTRL_R 13713
+#define LTDC_R 13760
+#define DCMIPP_R 13761
+#define DDRPERFM_R 13768
+#define USBPHY_R 13776
+#define STGEN_R 13844
+#define USART1_R 13888
+#define USART2_R 13889
+#define SPI4_R 13890
+#define SPI5_R 13891
+#define I2C3_R 13892
+#define I2C4_R 13893
+#define I2C5_R 13894
+#define TIM12_R 13895
+#define TIM13_R 13896
+#define TIM14_R 13897
+#define TIM15_R 13898
+#define TIM16_R 13899
+#define TIM17_R 13900
+#define DMA1_R 13952
+#define DMA2_R 13953
+#define DMAMUX1_R 13954
+#define DMA3_R 13955
+#define DMAMUX2_R 13956
+#define ADC1_R 13957
+#define ADC2_R 13958
+#define USBO_R 13960
+#define GPIOA_R 14080
+#define GPIOB_R 14081
+#define GPIOC_R 14082
+#define GPIOD_R 14083
+#define GPIOE_R 14084
+#define GPIOF_R 14085
+#define GPIOG_R 14086
+#define GPIOH_R 14087
+#define GPIOI_R 14088
+#define TSC_R 14095
+#define PKA_R 14146
+#define SAES_R 14147
+#define CRYP1_R 14148
+#define HASH1_R 14149
+#define RNG1_R 14150
+#define AXIMC_R 14160
+#define MDMA_R 14208
+#define MCE_R 14209
+#define ETH1MAC_R 14218
+#define FMC_R 14220
+#define QSPI_R 14222
+#define SDMMC1_R 14224
+#define SDMMC2_R 14225
+#define CRC1_R 14228
+#define USBH_R 14232
+#define ETH2MAC_R 14238
+
+/* SCMI reset domain identifiers */
+#define RST_SCMI_LTDC 0
+#define RST_SCMI_MDMA 1
+
+#endif /* _DT_BINDINGS_STM32MP13_RESET_H_ */
diff --git a/include/dt-bindings/reset/sun20i-d1-ccu.h b/include/dt-bindings/reset/sun20i-d1-ccu.h
new file mode 100644
index 000000000000..de9ff5203239
--- /dev/null
+++ b/include/dt-bindings/reset/sun20i-d1-ccu.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN20I_D1_CCU_H_
+#define _DT_BINDINGS_RST_SUN20I_D1_CCU_H_
+
+#define RST_MBUS 0
+#define RST_BUS_DE 1
+#define RST_BUS_DI 2
+#define RST_BUS_G2D 3
+#define RST_BUS_CE 4
+#define RST_BUS_VE 5
+#define RST_BUS_DMA 6
+#define RST_BUS_MSGBOX0 7
+#define RST_BUS_MSGBOX1 8
+#define RST_BUS_MSGBOX2 9
+#define RST_BUS_SPINLOCK 10
+#define RST_BUS_HSTIMER 11
+#define RST_BUS_DBG 12
+#define RST_BUS_PWM 13
+#define RST_BUS_DRAM 14
+#define RST_BUS_MMC0 15
+#define RST_BUS_MMC1 16
+#define RST_BUS_MMC2 17
+#define RST_BUS_UART0 18
+#define RST_BUS_UART1 19
+#define RST_BUS_UART2 20
+#define RST_BUS_UART3 21
+#define RST_BUS_UART4 22
+#define RST_BUS_UART5 23
+#define RST_BUS_I2C0 24
+#define RST_BUS_I2C1 25
+#define RST_BUS_I2C2 26
+#define RST_BUS_I2C3 27
+#define RST_BUS_SPI0 28
+#define RST_BUS_SPI1 29
+#define RST_BUS_EMAC 30
+#define RST_BUS_IR_TX 31
+#define RST_BUS_GPADC 32
+#define RST_BUS_THS 33
+#define RST_BUS_I2S0 34
+#define RST_BUS_I2S1 35
+#define RST_BUS_I2S2 36
+#define RST_BUS_SPDIF 37
+#define RST_BUS_DMIC 38
+#define RST_BUS_AUDIO 39
+#define RST_USB_PHY0 40
+#define RST_USB_PHY1 41
+#define RST_BUS_OHCI0 42
+#define RST_BUS_OHCI1 43
+#define RST_BUS_EHCI0 44
+#define RST_BUS_EHCI1 45
+#define RST_BUS_OTG 46
+#define RST_BUS_LRADC 47
+#define RST_BUS_DPSS_TOP 48
+#define RST_BUS_HDMI_SUB 49
+#define RST_BUS_HDMI_MAIN 50
+#define RST_BUS_MIPI_DSI 51
+#define RST_BUS_TCON_LCD0 52
+#define RST_BUS_TCON_TV 53
+#define RST_BUS_LVDS0 54
+#define RST_BUS_TVE 55
+#define RST_BUS_TVE_TOP 56
+#define RST_BUS_TVD 57
+#define RST_BUS_TVD_TOP 58
+#define RST_BUS_LEDC 59
+#define RST_BUS_CSI 60
+#define RST_BUS_TPADC 61
+#define RST_DSP 62
+#define RST_BUS_DSP_CFG 63
+#define RST_BUS_DSP_DBG 64
+#define RST_BUS_RISCV_CFG 65
+
+#endif /* _DT_BINDINGS_RST_SUN20I_D1_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun20i-d1-r-ccu.h b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
new file mode 100644
index 000000000000..d93d6423d283
--- /dev/null
+++ b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_
+
+#define RST_BUS_R_TIMER 0
+#define RST_BUS_R_TWD 1
+#define RST_BUS_R_PPU 2
+#define RST_BUS_R_IR_RX 3
+#define RST_BUS_R_RTC 4
+#define RST_BUS_R_CPUCFG 5
+
+#endif /* _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun50i-a100-ccu.h b/include/dt-bindings/reset/sun50i-a100-ccu.h
new file mode 100644
index 000000000000..55c0ada99885
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-a100-ccu.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_SUN50I_A100_H_
+#define _DT_BINDINGS_RESET_SUN50I_A100_H_
+
+#define RST_MBUS 0
+#define RST_BUS_DE 1
+#define RST_BUS_G2D 2
+#define RST_BUS_GPU 3
+#define RST_BUS_CE 4
+#define RST_BUS_VE 5
+#define RST_BUS_DMA 6
+#define RST_BUS_MSGBOX 7
+#define RST_BUS_SPINLOCK 8
+#define RST_BUS_HSTIMER 9
+#define RST_BUS_DBG 10
+#define RST_BUS_PSI 11
+#define RST_BUS_PWM 12
+#define RST_BUS_DRAM 13
+#define RST_BUS_NAND 14
+#define RST_BUS_MMC0 15
+#define RST_BUS_MMC1 16
+#define RST_BUS_MMC2 17
+#define RST_BUS_UART0 18
+#define RST_BUS_UART1 19
+#define RST_BUS_UART2 20
+#define RST_BUS_UART3 21
+#define RST_BUS_UART4 22
+#define RST_BUS_I2C0 23
+#define RST_BUS_I2C1 24
+#define RST_BUS_I2C2 25
+#define RST_BUS_I2C3 26
+#define RST_BUS_SPI0 27
+#define RST_BUS_SPI1 28
+#define RST_BUS_SPI2 29
+#define RST_BUS_EMAC 30
+#define RST_BUS_IR_RX 31
+#define RST_BUS_IR_TX 32
+#define RST_BUS_GPADC 33
+#define RST_BUS_THS 34
+#define RST_BUS_I2S0 35
+#define RST_BUS_I2S1 36
+#define RST_BUS_I2S2 37
+#define RST_BUS_I2S3 38
+#define RST_BUS_SPDIF 39
+#define RST_BUS_DMIC 40
+#define RST_BUS_AUDIO_CODEC 41
+#define RST_USB_PHY0 42
+#define RST_USB_PHY1 43
+#define RST_BUS_OHCI0 44
+#define RST_BUS_OHCI1 45
+#define RST_BUS_EHCI0 46
+#define RST_BUS_EHCI1 47
+#define RST_BUS_OTG 48
+#define RST_BUS_LRADC 49
+#define RST_BUS_DPSS_TOP0 50
+#define RST_BUS_DPSS_TOP1 51
+#define RST_BUS_MIPI_DSI 52
+#define RST_BUS_TCON_LCD 53
+#define RST_BUS_LVDS 54
+#define RST_BUS_LEDC 55
+#define RST_BUS_CSI 56
+#define RST_BUS_CSI_ISP 57
+
+#endif /* _DT_BINDINGS_RESET_SUN50I_A100_H_ */
diff --git a/include/dt-bindings/reset/sun50i-a100-r-ccu.h b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
new file mode 100644
index 000000000000..737bf6f66626
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN50I_A100_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN50I_A100_R_CCU_H_
+
+#define RST_R_APB1_TIMER 0
+#define RST_R_APB1_BUS_PWM 1
+#define RST_R_APB1_PPU 2
+#define RST_R_APB2_UART 3
+#define RST_R_APB2_I2C0 4
+#define RST_R_APB2_I2C1 5
+#define RST_R_APB1_BUS_IR 6
+#define RST_R_AHB_BUS_RTC 7
+
+#endif /* _DT_BINDINGS_RST_SUN50I_A100_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
index 01c84dba49a4..7950e799c76d 100644
--- a/include/dt-bindings/reset/sun50i-h6-r-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
@@ -13,5 +13,6 @@
#define RST_R_APB2_I2C 4
#define RST_R_APB1_IR 5
#define RST_R_APB1_W1 6
+#define RST_R_APB2_RSB 7
#endif /* _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun50i-h616-ccu.h b/include/dt-bindings/reset/sun50i-h616-ccu.h
new file mode 100644
index 000000000000..cb6285a8d128
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-h616-ccu.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2020 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_RESET_SUN50I_H616_H_
+#define _DT_BINDINGS_RESET_SUN50I_H616_H_
+
+#define RST_MBUS 0
+#define RST_BUS_DE 1
+#define RST_BUS_DEINTERLACE 2
+#define RST_BUS_GPU 3
+#define RST_BUS_CE 4
+#define RST_BUS_VE 5
+#define RST_BUS_DMA 6
+#define RST_BUS_HSTIMER 7
+#define RST_BUS_DBG 8
+#define RST_BUS_PSI 9
+#define RST_BUS_PWM 10
+#define RST_BUS_IOMMU 11
+#define RST_BUS_DRAM 12
+#define RST_BUS_NAND 13
+#define RST_BUS_MMC0 14
+#define RST_BUS_MMC1 15
+#define RST_BUS_MMC2 16
+#define RST_BUS_UART0 17
+#define RST_BUS_UART1 18
+#define RST_BUS_UART2 19
+#define RST_BUS_UART3 20
+#define RST_BUS_UART4 21
+#define RST_BUS_UART5 22
+#define RST_BUS_I2C0 23
+#define RST_BUS_I2C1 24
+#define RST_BUS_I2C2 25
+#define RST_BUS_I2C3 26
+#define RST_BUS_I2C4 27
+#define RST_BUS_SPI0 28
+#define RST_BUS_SPI1 29
+#define RST_BUS_EMAC0 30
+#define RST_BUS_EMAC1 31
+#define RST_BUS_TS 32
+#define RST_BUS_THS 33
+#define RST_BUS_SPDIF 34
+#define RST_BUS_DMIC 35
+#define RST_BUS_AUDIO_CODEC 36
+#define RST_BUS_AUDIO_HUB 37
+#define RST_USB_PHY0 38
+#define RST_USB_PHY1 39
+#define RST_USB_PHY2 40
+#define RST_USB_PHY3 41
+#define RST_BUS_OHCI0 42
+#define RST_BUS_OHCI1 43
+#define RST_BUS_OHCI2 44
+#define RST_BUS_OHCI3 45
+#define RST_BUS_EHCI0 46
+#define RST_BUS_EHCI1 47
+#define RST_BUS_EHCI2 48
+#define RST_BUS_EHCI3 49
+#define RST_BUS_OTG 50
+#define RST_BUS_HDMI 51
+#define RST_BUS_HDMI_SUB 52
+#define RST_BUS_TCON_TOP 53
+#define RST_BUS_TCON_TV0 54
+#define RST_BUS_TCON_TV1 55
+#define RST_BUS_TVE_TOP 56
+#define RST_BUS_TVE0 57
+#define RST_BUS_HDCP 58
+#define RST_BUS_KEYADC 59
+
+#endif /* _DT_BINDINGS_RESET_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/reset/sunplus,sp7021-reset.h b/include/dt-bindings/reset/sunplus,sp7021-reset.h
new file mode 100644
index 000000000000..ab486707387f
--- /dev/null
+++ b/include/dt-bindings/reset/sunplus,sp7021-reset.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+#ifndef _DT_BINDINGS_RST_SUNPLUS_SP7021_H
+#define _DT_BINDINGS_RST_SUNPLUS_SP7021_H
+
+#define RST_SYSTEM 0
+#define RST_RTC 1
+#define RST_IOCTL 2
+#define RST_IOP 3
+#define RST_OTPRX 4
+#define RST_NOC 5
+#define RST_BR 6
+#define RST_RBUS_L00 7
+#define RST_SPIFL 8
+#define RST_SDCTRL0 9
+#define RST_PERI0 10
+#define RST_A926 11
+#define RST_UMCTL2 12
+#define RST_PERI1 13
+#define RST_DDR_PHY0 14
+#define RST_ACHIP 15
+#define RST_STC0 16
+#define RST_STC_AV0 17
+#define RST_STC_AV1 18
+#define RST_STC_AV2 19
+#define RST_UA0 20
+#define RST_UA1 21
+#define RST_UA2 22
+#define RST_UA3 23
+#define RST_UA4 24
+#define RST_HWUA 25
+#define RST_DDC0 26
+#define RST_UADMA 27
+#define RST_CBDMA0 28
+#define RST_CBDMA1 29
+#define RST_SPI_COMBO_0 30
+#define RST_SPI_COMBO_1 31
+#define RST_SPI_COMBO_2 32
+#define RST_SPI_COMBO_3 33
+#define RST_AUD 34
+#define RST_USBC0 35
+#define RST_USBC1 36
+#define RST_UPHY0 37
+#define RST_UPHY1 38
+#define RST_I2CM0 39
+#define RST_I2CM1 40
+#define RST_I2CM2 41
+#define RST_I2CM3 42
+#define RST_PMC 43
+#define RST_CARD_CTL0 44
+#define RST_CARD_CTL1 45
+#define RST_CARD_CTL4 46
+#define RST_BCH 47
+#define RST_DDFCH 48
+#define RST_CSIIW0 49
+#define RST_CSIIW1 50
+#define RST_MIPICSI0 51
+#define RST_MIPICSI1 52
+#define RST_HDMI_TX 53
+#define RST_VPOST 54
+#define RST_TGEN 55
+#define RST_DMIX 56
+#define RST_TCON 57
+#define RST_INTERRUPT 58
+#define RST_RGST 59
+#define RST_GPIO 60
+#define RST_RBUS_TOP 61
+#define RST_MAILBOX 62
+#define RST_SPIND 63
+#define RST_I2C2CBUS 64
+#define RST_SEC 65
+#define RST_DVE 66
+#define RST_GPOST0 67
+#define RST_OSD0 68
+#define RST_DISP_PWM 69
+#define RST_UADBG 70
+#define RST_DUMMY_MASTER 71
+#define RST_FIO_CTL 72
+#define RST_FPGA 73
+#define RST_L2SW 74
+#define RST_ICM 75
+#define RST_AXI_GLOBAL 76
+
+#endif
diff --git a/include/dt-bindings/reset/tegra234-reset.h b/include/dt-bindings/reset/tegra234-reset.h
new file mode 100644
index 000000000000..d48d22b2bc7f
--- /dev/null
+++ b/include/dt-bindings/reset/tegra234-reset.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_RESET_TEGRA234_RESET_H
+#define DT_BINDINGS_RESET_TEGRA234_RESET_H
+
+/**
+ * @file
+ * @defgroup bpmp_reset_ids Reset ID's
+ * @brief Identifiers for Resets controllable by firmware
+ * @{
+ */
+#define TEGRA234_RESET_PEX1_CORE_6 11U
+#define TEGRA234_RESET_PEX1_CORE_6_APB 12U
+#define TEGRA234_RESET_PEX1_COMMON_APB 13U
+#define TEGRA234_RESET_PEX2_CORE_7 14U
+#define TEGRA234_RESET_PEX2_CORE_7_APB 15U
+#define TEGRA234_RESET_GPCDMA 18U
+#define TEGRA234_RESET_HDA 20U
+#define TEGRA234_RESET_HDACODEC 21U
+#define TEGRA234_RESET_I2C1 24U
+#define TEGRA234_RESET_PEX2_CORE_8 25U
+#define TEGRA234_RESET_PEX2_CORE_8_APB 26U
+#define TEGRA234_RESET_PEX2_CORE_9 27U
+#define TEGRA234_RESET_PEX2_CORE_9_APB 28U
+#define TEGRA234_RESET_I2C2 29U
+#define TEGRA234_RESET_I2C3 30U
+#define TEGRA234_RESET_I2C4 31U
+#define TEGRA234_RESET_I2C6 32U
+#define TEGRA234_RESET_I2C7 33U
+#define TEGRA234_RESET_I2C8 34U
+#define TEGRA234_RESET_I2C9 35U
+#define TEGRA234_RESET_MGBE0_PCS 45U
+#define TEGRA234_RESET_MGBE0_MAC 46U
+#define TEGRA234_RESET_MGBE1_PCS 49U
+#define TEGRA234_RESET_MGBE1_MAC 50U
+#define TEGRA234_RESET_MGBE2_PCS 53U
+#define TEGRA234_RESET_MGBE2_MAC 54U
+#define TEGRA234_RESET_PEX2_CORE_10 56U
+#define TEGRA234_RESET_PEX2_CORE_10_APB 57U
+#define TEGRA234_RESET_PEX2_COMMON_APB 58U
+#define TEGRA234_RESET_PWM1 68U
+#define TEGRA234_RESET_PWM2 69U
+#define TEGRA234_RESET_PWM3 70U
+#define TEGRA234_RESET_PWM4 71U
+#define TEGRA234_RESET_PWM5 72U
+#define TEGRA234_RESET_PWM6 73U
+#define TEGRA234_RESET_PWM7 74U
+#define TEGRA234_RESET_PWM8 75U
+#define TEGRA234_RESET_QSPI0 76U
+#define TEGRA234_RESET_QSPI1 77U
+#define TEGRA234_RESET_SDMMC4 85U
+#define TEGRA234_RESET_MGBE3_PCS 87U
+#define TEGRA234_RESET_MGBE3_MAC 88U
+#define TEGRA234_RESET_UARTA 100U
+#define TEGRA234_RESET_VIC 113U
+#define TEGRA234_RESET_PEX0_CORE_0 116U
+#define TEGRA234_RESET_PEX0_CORE_1 117U
+#define TEGRA234_RESET_PEX0_CORE_2 118U
+#define TEGRA234_RESET_PEX0_CORE_3 119U
+#define TEGRA234_RESET_PEX0_CORE_4 120U
+#define TEGRA234_RESET_PEX0_CORE_0_APB 121U
+#define TEGRA234_RESET_PEX0_CORE_1_APB 122U
+#define TEGRA234_RESET_PEX0_CORE_2_APB 123U
+#define TEGRA234_RESET_PEX0_CORE_3_APB 124U
+#define TEGRA234_RESET_PEX0_CORE_4_APB 125U
+#define TEGRA234_RESET_PEX0_COMMON_APB 126U
+#define TEGRA234_RESET_PEX1_CORE_5 129U
+#define TEGRA234_RESET_PEX1_CORE_5_APB 130U
+
+/** @} */
+
+#endif
diff --git a/include/dt-bindings/reset/ti-syscon.h b/include/dt-bindings/reset/ti-syscon.h
index 6d696d2d1508..eacc0f18083e 100644
--- a/include/dt-bindings/reset/ti-syscon.h
+++ b/include/dt-bindings/reset/ti-syscon.h
@@ -2,7 +2,7 @@
/*
* TI Syscon Reset definitions
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __DT_BINDINGS_RESET_TI_SYSCON_H__
diff --git a/include/dt-bindings/reset/toshiba,tmpv770x.h b/include/dt-bindings/reset/toshiba,tmpv770x.h
new file mode 100644
index 000000000000..c1007acb1941
--- /dev/null
+++ b/include/dt-bindings/reset/toshiba,tmpv770x.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_
+#define _DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_
+
+/* Reset */
+#define TMPV770X_RESET_PIETHER_2P5M 0
+#define TMPV770X_RESET_PIETHER_25M 1
+#define TMPV770X_RESET_PIETHER_50M 2
+#define TMPV770X_RESET_PIETHER_125M 3
+#define TMPV770X_RESET_HOX 4
+#define TMPV770X_RESET_PCIE_MSTR 5
+#define TMPV770X_RESET_PCIE_AUX 6
+#define TMPV770X_RESET_PIINTC 7
+#define TMPV770X_RESET_PIETHER_BUS 8
+#define TMPV770X_RESET_PISPI0 9
+#define TMPV770X_RESET_PISPI1 10
+#define TMPV770X_RESET_PISPI2 11
+#define TMPV770X_RESET_PISPI3 12
+#define TMPV770X_RESET_PISPI4 13
+#define TMPV770X_RESET_PISPI5 14
+#define TMPV770X_RESET_PISPI6 15
+#define TMPV770X_RESET_PIUART0 16
+#define TMPV770X_RESET_PIUART1 17
+#define TMPV770X_RESET_PIUART2 18
+#define TMPV770X_RESET_PIUART3 19
+#define TMPV770X_RESET_PII2C0 20
+#define TMPV770X_RESET_PII2C1 21
+#define TMPV770X_RESET_PII2C2 22
+#define TMPV770X_RESET_PII2C3 23
+#define TMPV770X_RESET_PII2C4 24
+#define TMPV770X_RESET_PII2C5 25
+#define TMPV770X_RESET_PII2C6 26
+#define TMPV770X_RESET_PII2C7 27
+#define TMPV770X_RESET_PII2C8 28
+#define TMPV770X_RESET_PIPCMIF 29
+#define TMPV770X_RESET_PICKMON 30
+#define TMPV770X_RESET_SBUSCLK 31
+#define TMPV770X_NR_RESET 32
+
+#endif /*_DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/reset/xlnx-versal-resets.h b/include/dt-bindings/reset/xlnx-versal-resets.h
new file mode 100644
index 000000000000..895424e9b0e5
--- /dev/null
+++ b/include/dt-bindings/reset/xlnx-versal-resets.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_VERSAL_RESETS_H
+#define _DT_BINDINGS_VERSAL_RESETS_H
+
+#define VERSAL_RST_PMC_POR (0xc30c001U)
+#define VERSAL_RST_PMC (0xc410002U)
+#define VERSAL_RST_PS_POR (0xc30c003U)
+#define VERSAL_RST_PL_POR (0xc30c004U)
+#define VERSAL_RST_NOC_POR (0xc30c005U)
+#define VERSAL_RST_FPD_POR (0xc30c006U)
+#define VERSAL_RST_ACPU_0_POR (0xc30c007U)
+#define VERSAL_RST_ACPU_1_POR (0xc30c008U)
+#define VERSAL_RST_OCM2_POR (0xc30c009U)
+#define VERSAL_RST_PS_SRST (0xc41000aU)
+#define VERSAL_RST_PL_SRST (0xc41000bU)
+#define VERSAL_RST_NOC (0xc41000cU)
+#define VERSAL_RST_NPI (0xc41000dU)
+#define VERSAL_RST_SYS_RST_1 (0xc41000eU)
+#define VERSAL_RST_SYS_RST_2 (0xc41000fU)
+#define VERSAL_RST_SYS_RST_3 (0xc410010U)
+#define VERSAL_RST_FPD (0xc410011U)
+#define VERSAL_RST_PL0 (0xc410012U)
+#define VERSAL_RST_PL1 (0xc410013U)
+#define VERSAL_RST_PL2 (0xc410014U)
+#define VERSAL_RST_PL3 (0xc410015U)
+#define VERSAL_RST_APU (0xc410016U)
+#define VERSAL_RST_ACPU_0 (0xc410017U)
+#define VERSAL_RST_ACPU_1 (0xc410018U)
+#define VERSAL_RST_ACPU_L2 (0xc410019U)
+#define VERSAL_RST_ACPU_GIC (0xc41001aU)
+#define VERSAL_RST_RPU_ISLAND (0xc41001bU)
+#define VERSAL_RST_RPU_AMBA (0xc41001cU)
+#define VERSAL_RST_R5_0 (0xc41001dU)
+#define VERSAL_RST_R5_1 (0xc41001eU)
+#define VERSAL_RST_SYSMON_PMC_SEQ_RST (0xc41001fU)
+#define VERSAL_RST_SYSMON_PMC_CFG_RST (0xc410020U)
+#define VERSAL_RST_SYSMON_FPD_CFG_RST (0xc410021U)
+#define VERSAL_RST_SYSMON_FPD_SEQ_RST (0xc410022U)
+#define VERSAL_RST_SYSMON_LPD (0xc410023U)
+#define VERSAL_RST_PDMA_RST1 (0xc410024U)
+#define VERSAL_RST_PDMA_RST0 (0xc410025U)
+#define VERSAL_RST_ADMA (0xc410026U)
+#define VERSAL_RST_TIMESTAMP (0xc410027U)
+#define VERSAL_RST_OCM (0xc410028U)
+#define VERSAL_RST_OCM2_RST (0xc410029U)
+#define VERSAL_RST_IPI (0xc41002aU)
+#define VERSAL_RST_SBI (0xc41002bU)
+#define VERSAL_RST_LPD (0xc41002cU)
+#define VERSAL_RST_QSPI (0xc10402dU)
+#define VERSAL_RST_OSPI (0xc10402eU)
+#define VERSAL_RST_SDIO_0 (0xc10402fU)
+#define VERSAL_RST_SDIO_1 (0xc104030U)
+#define VERSAL_RST_I2C_PMC (0xc104031U)
+#define VERSAL_RST_GPIO_PMC (0xc104032U)
+#define VERSAL_RST_GEM_0 (0xc104033U)
+#define VERSAL_RST_GEM_1 (0xc104034U)
+#define VERSAL_RST_SPARE (0xc104035U)
+#define VERSAL_RST_USB_0 (0xc104036U)
+#define VERSAL_RST_UART_0 (0xc104037U)
+#define VERSAL_RST_UART_1 (0xc104038U)
+#define VERSAL_RST_SPI_0 (0xc104039U)
+#define VERSAL_RST_SPI_1 (0xc10403aU)
+#define VERSAL_RST_CAN_FD_0 (0xc10403bU)
+#define VERSAL_RST_CAN_FD_1 (0xc10403cU)
+#define VERSAL_RST_I2C_0 (0xc10403dU)
+#define VERSAL_RST_I2C_1 (0xc10403eU)
+#define VERSAL_RST_GPIO_LPD (0xc10403fU)
+#define VERSAL_RST_TTC_0 (0xc104040U)
+#define VERSAL_RST_TTC_1 (0xc104041U)
+#define VERSAL_RST_TTC_2 (0xc104042U)
+#define VERSAL_RST_TTC_3 (0xc104043U)
+#define VERSAL_RST_SWDT_FPD (0xc104044U)
+#define VERSAL_RST_SWDT_LPD (0xc104045U)
+#define VERSAL_RST_USB (0xc104046U)
+#define VERSAL_RST_DPC (0xc208047U)
+#define VERSAL_RST_PMCDBG (0xc208048U)
+#define VERSAL_RST_DBG_TRACE (0xc208049U)
+#define VERSAL_RST_DBG_FPD (0xc20804aU)
+#define VERSAL_RST_DBG_TSTMP (0xc20804bU)
+#define VERSAL_RST_RPU0_DBG (0xc20804cU)
+#define VERSAL_RST_RPU1_DBG (0xc20804dU)
+#define VERSAL_RST_HSDP (0xc20804eU)
+#define VERSAL_RST_DBG_LPD (0xc20804fU)
+#define VERSAL_RST_CPM_POR (0xc30c050U)
+#define VERSAL_RST_CPM (0xc410051U)
+#define VERSAL_RST_CPMDBG (0xc208052U)
+#define VERSAL_RST_PCIE_CFG (0xc410053U)
+#define VERSAL_RST_PCIE_CORE0 (0xc410054U)
+#define VERSAL_RST_PCIE_CORE1 (0xc410055U)
+#define VERSAL_RST_PCIE_DMA (0xc410056U)
+#define VERSAL_RST_CMN (0xc410057U)
+#define VERSAL_RST_L2_0 (0xc410058U)
+#define VERSAL_RST_L2_1 (0xc410059U)
+#define VERSAL_RST_ADDR_REMAP (0xc41005aU)
+#define VERSAL_RST_CPI0 (0xc41005bU)
+#define VERSAL_RST_CPI1 (0xc41005cU)
+#define VERSAL_RST_XRAM (0xc30c05dU)
+#define VERSAL_RST_AIE_ARRAY (0xc10405eU)
+#define VERSAL_RST_AIE_SHIM (0xc10405fU)
+
+#endif
diff --git a/include/dt-bindings/soc/bcm-pmb.h b/include/dt-bindings/soc/bcm-pmb.h
new file mode 100644
index 000000000000..385884468007
--- /dev/null
+++ b/include/dt-bindings/soc/bcm-pmb.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */
+
+#ifndef __DT_BINDINGS_SOC_BCM_PMB_H
+#define __DT_BINDINGS_SOC_BCM_PMB_H
+
+#define BCM_PMB_PCIE0 0x01
+#define BCM_PMB_PCIE1 0x02
+#define BCM_PMB_PCIE2 0x03
+#define BCM_PMB_HOST_USB 0x04
+#define BCM_PMB_SATA 0x05
+
+#endif
diff --git a/include/dt-bindings/soc/bcm6318-pm.h b/include/dt-bindings/soc/bcm6318-pm.h
new file mode 100644
index 000000000000..05931dce8333
--- /dev/null
+++ b/include/dt-bindings/soc/bcm6318-pm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_BMIPS_BCM6318_PM_H
+#define __DT_BINDINGS_BMIPS_BCM6318_PM_H
+
+#define BCM6318_POWER_DOMAIN_PCIE 0
+#define BCM6318_POWER_DOMAIN_USB 1
+#define BCM6318_POWER_DOMAIN_EPHY0 2
+#define BCM6318_POWER_DOMAIN_EPHY1 3
+#define BCM6318_POWER_DOMAIN_EPHY2 4
+#define BCM6318_POWER_DOMAIN_EPHY3 5
+#define BCM6318_POWER_DOMAIN_LDO2P5 6
+#define BCM6318_POWER_DOMAIN_LDO2P9 7
+#define BCM6318_POWER_DOMAIN_SW1P0 8
+#define BCM6318_POWER_DOMAIN_PAD 9
+
+#endif /* __DT_BINDINGS_BMIPS_BCM6318_PM_H */
diff --git a/include/dt-bindings/soc/bcm63268-pm.h b/include/dt-bindings/soc/bcm63268-pm.h
new file mode 100644
index 000000000000..84ded53a732f
--- /dev/null
+++ b/include/dt-bindings/soc/bcm63268-pm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_BMIPS_BCM63268_PM_H
+#define __DT_BINDINGS_BMIPS_BCM63268_PM_H
+
+#define BCM63268_POWER_DOMAIN_SAR 0
+#define BCM63268_POWER_DOMAIN_IPSEC 1
+#define BCM63268_POWER_DOMAIN_MIPS 2
+#define BCM63268_POWER_DOMAIN_DECT 3
+#define BCM63268_POWER_DOMAIN_USBH 4
+#define BCM63268_POWER_DOMAIN_USBD 5
+#define BCM63268_POWER_DOMAIN_ROBOSW 6
+#define BCM63268_POWER_DOMAIN_PCM 7
+#define BCM63268_POWER_DOMAIN_PERIPH 8
+#define BCM63268_POWER_DOMAIN_VDSL_PHY 9
+#define BCM63268_POWER_DOMAIN_VDSL_MIPS 10
+#define BCM63268_POWER_DOMAIN_FAP 11
+#define BCM63268_POWER_DOMAIN_PCIE 12
+#define BCM63268_POWER_DOMAIN_WLAN_PADS 13
+
+#endif /* __DT_BINDINGS_BMIPS_BCM63268_PM_H */
diff --git a/include/dt-bindings/soc/bcm6328-pm.h b/include/dt-bindings/soc/bcm6328-pm.h
new file mode 100644
index 000000000000..557e1a69b7f7
--- /dev/null
+++ b/include/dt-bindings/soc/bcm6328-pm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_BMIPS_BCM6328_PM_H
+#define __DT_BINDINGS_BMIPS_BCM6328_PM_H
+
+#define BCM6328_POWER_DOMAIN_ADSL2_MIPS 0
+#define BCM6328_POWER_DOMAIN_ADSL2_PHY 1
+#define BCM6328_POWER_DOMAIN_ADSL2_AFE 2
+#define BCM6328_POWER_DOMAIN_SAR 3
+#define BCM6328_POWER_DOMAIN_PCM 4
+#define BCM6328_POWER_DOMAIN_USBD 5
+#define BCM6328_POWER_DOMAIN_USBH 6
+#define BCM6328_POWER_DOMAIN_PCIE 7
+#define BCM6328_POWER_DOMAIN_ROBOSW 8
+#define BCM6328_POWER_DOMAIN_EPHY 9
+
+#endif /* __DT_BINDINGS_BMIPS_BCM6328_PM_H */
diff --git a/include/dt-bindings/soc/bcm6362-pm.h b/include/dt-bindings/soc/bcm6362-pm.h
new file mode 100644
index 000000000000..d087ba63c7a1
--- /dev/null
+++ b/include/dt-bindings/soc/bcm6362-pm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_BMIPS_BCM6362_PM_H
+#define __DT_BINDINGS_BMIPS_BCM6362_PM_H
+
+#define BCM6362_POWER_DOMAIN_SAR 0
+#define BCM6362_POWER_DOMAIN_IPSEC 1
+#define BCM6362_POWER_DOMAIN_MIPS 2
+#define BCM6362_POWER_DOMAIN_DECT 3
+#define BCM6362_POWER_DOMAIN_USBH 4
+#define BCM6362_POWER_DOMAIN_USBD 5
+#define BCM6362_POWER_DOMAIN_ROBOSW 6
+#define BCM6362_POWER_DOMAIN_PCM 7
+#define BCM6362_POWER_DOMAIN_PERIPH 8
+#define BCM6362_POWER_DOMAIN_ADSL_PHY 9
+#define BCM6362_POWER_DOMAIN_GMII_PADS 10
+#define BCM6362_POWER_DOMAIN_FAP 11
+#define BCM6362_POWER_DOMAIN_PCIE 12
+#define BCM6362_POWER_DOMAIN_WLAN_PADS 13
+
+#endif /* __DT_BINDINGS_BMIPS_BCM6362_PM_H */
diff --git a/include/dt-bindings/soc/qcom,gpr.h b/include/dt-bindings/soc/qcom,gpr.h
new file mode 100644
index 000000000000..3107da59319c
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,gpr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_QCOM_GPR_H
+#define __DT_BINDINGS_QCOM_GPR_H
+
+/* DOMAINS */
+
+#define GPR_DOMAIN_ID_MODEM 1
+#define GPR_DOMAIN_ID_ADSP 2
+#define GPR_DOMAIN_ID_APPS 3
+
+/* Static Services */
+
+#define GPR_APM_MODULE_IID 1
+#define GPR_PRM_MODULE_IID 2
+#define GPR_AMDB_MODULE_IID 3
+#define GPR_VCPM_MODULE_IID 4
+
+#endif /* __DT_BINDINGS_QCOM_GPR_H */
diff --git a/include/dt-bindings/soc/rockchip,vop2.h b/include/dt-bindings/soc/rockchip,vop2.h
new file mode 100644
index 000000000000..6e66a802b96a
--- /dev/null
+++ b/include/dt-bindings/soc/rockchip,vop2.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_ROCKCHIP_VOP2_H
+#define __DT_BINDINGS_ROCKCHIP_VOP2_H
+
+#define ROCKCHIP_VOP2_EP_RGB0 1
+#define ROCKCHIP_VOP2_EP_HDMI0 2
+#define ROCKCHIP_VOP2_EP_EDP0 3
+#define ROCKCHIP_VOP2_EP_MIPI0 4
+#define ROCKCHIP_VOP2_EP_LVDS0 5
+#define ROCKCHIP_VOP2_EP_MIPI1 6
+#define ROCKCHIP_VOP2_EP_LVDS1 7
+
+#endif /* __DT_BINDINGS_ROCKCHIP_VOP2_H */
diff --git a/include/dt-bindings/soc/samsung,boot-mode.h b/include/dt-bindings/soc/samsung,boot-mode.h
new file mode 100644
index 000000000000..47ef1cdd3916
--- /dev/null
+++ b/include/dt-bindings/soc/samsung,boot-mode.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
+ * Author: Chanho Park <chanho61.park@samsung.com>
+ *
+ * Device Tree bindings for Samsung Boot Mode.
+ */
+
+#ifndef __DT_BINDINGS_SAMSUNG_BOOT_MODE_H
+#define __DT_BINDINGS_SAMSUNG_BOOT_MODE_H
+
+/* Boot mode definitions for Exynos Auto v9 SoC */
+
+#define EXYNOSAUTOV9_BOOT_FASTBOOT 0xfa
+#define EXYNOSAUTOV9_BOOT_BOOTLOADER 0xfc
+#define EXYNOSAUTOV9_BOOT_RECOVERY 0xff
+
+#endif /* __DT_BINDINGS_SAMSUNG_BOOT_MODE_H */
diff --git a/include/dt-bindings/soc/samsung,exynos-usi.h b/include/dt-bindings/soc/samsung,exynos-usi.h
new file mode 100644
index 000000000000..a01af169d249
--- /dev/null
+++ b/include/dt-bindings/soc/samsung,exynos-usi.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Device Tree bindings for Samsung Exynos USI (Universal Serial Interface).
+ */
+
+#ifndef __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
+#define __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
+
+#define USI_V2_NONE 0
+#define USI_V2_UART 1
+#define USI_V2_SPI 2
+#define USI_V2_I2C 3
+
+#endif /* __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H */
diff --git a/include/dt-bindings/soc/tegra-pmc.h b/include/dt-bindings/soc/tegra-pmc.h
new file mode 100644
index 000000000000..a99a457471ee
--- /dev/null
+++ b/include/dt-bindings/soc/tegra-pmc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_SOC_TEGRA_PMC_H
+#define _DT_BINDINGS_SOC_TEGRA_PMC_H
+
+#define TEGRA_PMC_CLK_OUT_1 0
+#define TEGRA_PMC_CLK_OUT_2 1
+#define TEGRA_PMC_CLK_OUT_3 2
+#define TEGRA_PMC_CLK_BLINK 3
+
+#define TEGRA_PMC_CLK_MAX 4
+
+#endif /* _DT_BINDINGS_SOC_TEGRA_PMC_H */
diff --git a/include/dt-bindings/soc/zte,pm_domains.h b/include/dt-bindings/soc/zte,pm_domains.h
deleted file mode 100644
index df044705a5ec..000000000000
--- a/include/dt-bindings/soc/zte,pm_domains.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Linaro Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#ifndef _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H
-#define _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H
-
-#define DM_ZX296718_SAPPU 0
-#define DM_ZX296718_VDE 1 /* g1v6 */
-#define DM_ZX296718_VCE 2 /* h1v6 */
-#define DM_ZX296718_HDE 3 /* g2v2 */
-#define DM_ZX296718_VIU 4
-#define DM_ZX296718_USB20 5
-#define DM_ZX296718_USB21 6
-#define DM_ZX296718_USB30 7
-#define DM_ZX296718_HSIC 8
-#define DM_ZX296718_GMAC 9
-#define DM_ZX296718_TS 10
-#define DM_ZX296718_VOU 11
-
-#endif /* _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H */
diff --git a/include/dt-bindings/sound/adi,adau1977.h b/include/dt-bindings/sound/adi,adau1977.h
new file mode 100644
index 000000000000..8eebec6570f2
--- /dev/null
+++ b/include/dt-bindings/sound/adi,adau1977.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __DT_BINDINGS_ADI_ADAU1977_H__
+#define __DT_BINDINGS_ADI_ADAU1977_H__
+
+#define ADAU1977_MICBIAS_5V0 0x0
+#define ADAU1977_MICBIAS_5V5 0x1
+#define ADAU1977_MICBIAS_6V0 0x2
+#define ADAU1977_MICBIAS_6V5 0x3
+#define ADAU1977_MICBIAS_7V0 0x4
+#define ADAU1977_MICBIAS_7V5 0x5
+#define ADAU1977_MICBIAS_8V0 0x6
+#define ADAU1977_MICBIAS_8V5 0x7
+#define ADAU1977_MICBIAS_9V0 0x8
+
+#endif /* __DT_BINDINGS_ADI_ADAU1977_H__ */
diff --git a/include/dt-bindings/sound/apq8016-lpass.h b/include/dt-bindings/sound/apq8016-lpass.h
index 3c3e16c0aadb..dc605c4bc224 100644
--- a/include/dt-bindings/sound/apq8016-lpass.h
+++ b/include/dt-bindings/sound/apq8016-lpass.h
@@ -2,9 +2,8 @@
#ifndef __DT_APQ8016_LPASS_H
#define __DT_APQ8016_LPASS_H
-#define MI2S_PRIMARY 0
-#define MI2S_SECONDARY 1
-#define MI2S_TERTIARY 2
-#define MI2S_QUATERNARY 3
+#include <dt-bindings/sound/qcom,lpass.h>
+
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
#endif /* __DT_APQ8016_LPASS_H */
diff --git a/include/dt-bindings/sound/cs35l45.h b/include/dt-bindings/sound/cs35l45.h
new file mode 100644
index 000000000000..076da4b2c28d
--- /dev/null
+++ b/include/dt-bindings/sound/cs35l45.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * cs35l45.h -- CS35L45 ALSA SoC audio driver DT bindings header
+ *
+ * Copyright 2022 Cirrus Logic, Inc.
+ */
+
+#ifndef DT_CS35L45_H
+#define DT_CS35L45_H
+
+/*
+ * cirrus,asp-sdout-hiz-ctrl
+ *
+ * TX_HIZ_UNUSED: TX pin high-impedance during unused slots.
+ * TX_HIZ_DISABLED: TX pin high-impedance when all channels disabled.
+ */
+#define CS35L45_ASP_TX_HIZ_UNUSED 0x1
+#define CS35L45_ASP_TX_HIZ_DISABLED 0x2
+
+#endif /* DT_CS35L45_H */
diff --git a/include/dt-bindings/sound/meson-aiu.h b/include/dt-bindings/sound/meson-aiu.h
new file mode 100644
index 000000000000..1051b8af298b
--- /dev/null
+++ b/include/dt-bindings/sound/meson-aiu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_MESON_AIU_H
+#define __DT_MESON_AIU_H
+
+#define AIU_CPU 0
+#define AIU_HDMI 1
+#define AIU_ACODEC 2
+
+#define CPU_I2S_FIFO 0
+#define CPU_SPDIF_FIFO 1
+#define CPU_I2S_ENCODER 2
+#define CPU_SPDIF_ENCODER 3
+
+#define CTRL_I2S 0
+#define CTRL_PCM 1
+#define CTRL_OUT 2
+
+#endif /* __DT_MESON_AIU_H */
diff --git a/include/dt-bindings/sound/meson-g12a-toacodec.h b/include/dt-bindings/sound/meson-g12a-toacodec.h
new file mode 100644
index 000000000000..69d7a75592a2
--- /dev/null
+++ b/include/dt-bindings/sound/meson-g12a-toacodec.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_MESON_G12A_TOACODEC_H
+#define __DT_MESON_G12A_TOACODEC_H
+
+#define TOACODEC_IN_A 0
+#define TOACODEC_IN_B 1
+#define TOACODEC_IN_C 2
+#define TOACODEC_OUT 3
+
+#endif /* __DT_MESON_G12A_TOACODEC_H */
diff --git a/include/dt-bindings/sound/microchip,pdmc.h b/include/dt-bindings/sound/microchip,pdmc.h
new file mode 100644
index 000000000000..96cde94ce74f
--- /dev/null
+++ b/include/dt-bindings/sound/microchip,pdmc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_MICROCHIP_PDMC_H__
+#define __DT_BINDINGS_MICROCHIP_PDMC_H__
+
+/* PDM microphone's pin placement */
+#define MCHP_PDMC_DS0 0
+#define MCHP_PDMC_DS1 1
+
+/* PDM microphone clock edge sampling */
+#define MCHP_PDMC_CLK_POSITIVE 0
+#define MCHP_PDMC_CLK_NEGATIVE 1
+
+#endif /* __DT_BINDINGS_MICROCHIP_PDMC_H__ */
diff --git a/include/dt-bindings/sound/qcom,lpass.h b/include/dt-bindings/sound/qcom,lpass.h
new file mode 100644
index 000000000000..a9404c3b8884
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,lpass.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_QCOM_LPASS_H
+#define __DT_QCOM_LPASS_H
+
+#define MI2S_PRIMARY 0
+#define MI2S_SECONDARY 1
+#define MI2S_TERTIARY 2
+#define MI2S_QUATERNARY 3
+#define MI2S_QUINARY 4
+
+#define LPASS_DP_RX 5
+
+#define LPASS_CDC_DMA_RX0 6
+#define LPASS_CDC_DMA_RX1 7
+#define LPASS_CDC_DMA_RX2 8
+#define LPASS_CDC_DMA_RX3 9
+#define LPASS_CDC_DMA_RX4 10
+#define LPASS_CDC_DMA_RX5 11
+#define LPASS_CDC_DMA_RX6 12
+#define LPASS_CDC_DMA_RX7 13
+#define LPASS_CDC_DMA_RX8 14
+#define LPASS_CDC_DMA_RX9 15
+
+#define LPASS_CDC_DMA_TX0 16
+#define LPASS_CDC_DMA_TX1 17
+#define LPASS_CDC_DMA_TX2 18
+#define LPASS_CDC_DMA_TX3 19
+#define LPASS_CDC_DMA_TX4 20
+#define LPASS_CDC_DMA_TX5 21
+#define LPASS_CDC_DMA_TX6 22
+#define LPASS_CDC_DMA_TX7 23
+#define LPASS_CDC_DMA_TX8 24
+
+#define LPASS_CDC_DMA_VA_TX0 25
+#define LPASS_CDC_DMA_VA_TX1 26
+#define LPASS_CDC_DMA_VA_TX2 27
+#define LPASS_CDC_DMA_VA_TX3 28
+#define LPASS_CDC_DMA_VA_TX4 29
+#define LPASS_CDC_DMA_VA_TX5 30
+#define LPASS_CDC_DMA_VA_TX6 31
+#define LPASS_CDC_DMA_VA_TX7 32
+#define LPASS_CDC_DMA_VA_TX8 33
+
+#define LPASS_MCLK0 0
+
+#endif /* __DT_QCOM_LPASS_H */
diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h
index 1df06f8ad5c3..9d5d89cfabcf 100644
--- a/include/dt-bindings/sound/qcom,q6afe.h
+++ b/include/dt-bindings/sound/qcom,q6afe.h
@@ -2,111 +2,8 @@
#ifndef __DT_BINDINGS_Q6_AFE_H__
#define __DT_BINDINGS_Q6_AFE_H__
-/* Audio Front End (AFE) virtual ports IDs */
-#define HDMI_RX 1
-#define SLIMBUS_0_RX 2
-#define SLIMBUS_0_TX 3
-#define SLIMBUS_1_RX 4
-#define SLIMBUS_1_TX 5
-#define SLIMBUS_2_RX 6
-#define SLIMBUS_2_TX 7
-#define SLIMBUS_3_RX 8
-#define SLIMBUS_3_TX 9
-#define SLIMBUS_4_RX 10
-#define SLIMBUS_4_TX 11
-#define SLIMBUS_5_RX 12
-#define SLIMBUS_5_TX 13
-#define SLIMBUS_6_RX 14
-#define SLIMBUS_6_TX 15
-#define PRIMARY_MI2S_RX 16
-#define PRIMARY_MI2S_TX 17
-#define SECONDARY_MI2S_RX 18
-#define SECONDARY_MI2S_TX 19
-#define TERTIARY_MI2S_RX 20
-#define TERTIARY_MI2S_TX 21
-#define QUATERNARY_MI2S_RX 22
-#define QUATERNARY_MI2S_TX 23
-#define PRIMARY_TDM_RX_0 24
-#define PRIMARY_TDM_TX_0 25
-#define PRIMARY_TDM_RX_1 26
-#define PRIMARY_TDM_TX_1 27
-#define PRIMARY_TDM_RX_2 28
-#define PRIMARY_TDM_TX_2 29
-#define PRIMARY_TDM_RX_3 30
-#define PRIMARY_TDM_TX_3 31
-#define PRIMARY_TDM_RX_4 32
-#define PRIMARY_TDM_TX_4 33
-#define PRIMARY_TDM_RX_5 34
-#define PRIMARY_TDM_TX_5 35
-#define PRIMARY_TDM_RX_6 36
-#define PRIMARY_TDM_TX_6 37
-#define PRIMARY_TDM_RX_7 38
-#define PRIMARY_TDM_TX_7 39
-#define SECONDARY_TDM_RX_0 40
-#define SECONDARY_TDM_TX_0 41
-#define SECONDARY_TDM_RX_1 42
-#define SECONDARY_TDM_TX_1 43
-#define SECONDARY_TDM_RX_2 44
-#define SECONDARY_TDM_TX_2 45
-#define SECONDARY_TDM_RX_3 46
-#define SECONDARY_TDM_TX_3 47
-#define SECONDARY_TDM_RX_4 48
-#define SECONDARY_TDM_TX_4 49
-#define SECONDARY_TDM_RX_5 50
-#define SECONDARY_TDM_TX_5 51
-#define SECONDARY_TDM_RX_6 52
-#define SECONDARY_TDM_TX_6 53
-#define SECONDARY_TDM_RX_7 54
-#define SECONDARY_TDM_TX_7 55
-#define TERTIARY_TDM_RX_0 56
-#define TERTIARY_TDM_TX_0 57
-#define TERTIARY_TDM_RX_1 58
-#define TERTIARY_TDM_TX_1 59
-#define TERTIARY_TDM_RX_2 60
-#define TERTIARY_TDM_TX_2 61
-#define TERTIARY_TDM_RX_3 62
-#define TERTIARY_TDM_TX_3 63
-#define TERTIARY_TDM_RX_4 64
-#define TERTIARY_TDM_TX_4 65
-#define TERTIARY_TDM_RX_5 66
-#define TERTIARY_TDM_TX_5 67
-#define TERTIARY_TDM_RX_6 68
-#define TERTIARY_TDM_TX_6 69
-#define TERTIARY_TDM_RX_7 70
-#define TERTIARY_TDM_TX_7 71
-#define QUATERNARY_TDM_RX_0 72
-#define QUATERNARY_TDM_TX_0 73
-#define QUATERNARY_TDM_RX_1 74
-#define QUATERNARY_TDM_TX_1 75
-#define QUATERNARY_TDM_RX_2 76
-#define QUATERNARY_TDM_TX_2 77
-#define QUATERNARY_TDM_RX_3 78
-#define QUATERNARY_TDM_TX_3 79
-#define QUATERNARY_TDM_RX_4 80
-#define QUATERNARY_TDM_TX_4 81
-#define QUATERNARY_TDM_RX_5 82
-#define QUATERNARY_TDM_TX_5 83
-#define QUATERNARY_TDM_RX_6 84
-#define QUATERNARY_TDM_TX_6 85
-#define QUATERNARY_TDM_RX_7 86
-#define QUATERNARY_TDM_TX_7 87
-#define QUINARY_TDM_RX_0 88
-#define QUINARY_TDM_TX_0 89
-#define QUINARY_TDM_RX_1 90
-#define QUINARY_TDM_TX_1 91
-#define QUINARY_TDM_RX_2 92
-#define QUINARY_TDM_TX_2 93
-#define QUINARY_TDM_RX_3 94
-#define QUINARY_TDM_TX_3 95
-#define QUINARY_TDM_RX_4 96
-#define QUINARY_TDM_TX_4 97
-#define QUINARY_TDM_RX_5 98
-#define QUINARY_TDM_TX_5 99
-#define QUINARY_TDM_RX_6 100
-#define QUINARY_TDM_TX_6 101
-#define QUINARY_TDM_RX_7 102
-#define QUINARY_TDM_TX_7 103
-#define DISPLAY_PORT_RX 104
+/* This file exists due to backward compatibility reasons, Please do not DELETE! */
-#endif /* __DT_BINDINGS_Q6_AFE_H__ */
+#include <dt-bindings/sound/qcom,q6dsp-lpass-ports.h>
+#endif /* __DT_BINDINGS_Q6_AFE_H__ */
diff --git a/include/dt-bindings/sound/qcom,q6asm.h b/include/dt-bindings/sound/qcom,q6asm.h
index 1eb77d87c2e8..f59d74f14395 100644
--- a/include/dt-bindings/sound/qcom,q6asm.h
+++ b/include/dt-bindings/sound/qcom,q6asm.h
@@ -19,4 +19,8 @@
#define MSM_FRONTEND_DAI_MULTIMEDIA15 14
#define MSM_FRONTEND_DAI_MULTIMEDIA16 15
+#define Q6ASM_DAI_TX_RX 0
+#define Q6ASM_DAI_TX 1
+#define Q6ASM_DAI_RX 2
+
#endif /* __DT_BINDINGS_Q6_ASM_H__ */
diff --git a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
new file mode 100644
index 000000000000..9f7c5103bc82
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_AUDIO_PORTS_H__
+#define __DT_BINDINGS_Q6_AUDIO_PORTS_H__
+
+/* LPASS Audio virtual ports IDs */
+#define HDMI_RX 1
+#define SLIMBUS_0_RX 2
+#define SLIMBUS_0_TX 3
+#define SLIMBUS_1_RX 4
+#define SLIMBUS_1_TX 5
+#define SLIMBUS_2_RX 6
+#define SLIMBUS_2_TX 7
+#define SLIMBUS_3_RX 8
+#define SLIMBUS_3_TX 9
+#define SLIMBUS_4_RX 10
+#define SLIMBUS_4_TX 11
+#define SLIMBUS_5_RX 12
+#define SLIMBUS_5_TX 13
+#define SLIMBUS_6_RX 14
+#define SLIMBUS_6_TX 15
+#define PRIMARY_MI2S_RX 16
+#define PRIMARY_MI2S_TX 17
+#define SECONDARY_MI2S_RX 18
+#define SECONDARY_MI2S_TX 19
+#define TERTIARY_MI2S_RX 20
+#define TERTIARY_MI2S_TX 21
+#define QUATERNARY_MI2S_RX 22
+#define QUATERNARY_MI2S_TX 23
+#define PRIMARY_TDM_RX_0 24
+#define PRIMARY_TDM_TX_0 25
+#define PRIMARY_TDM_RX_1 26
+#define PRIMARY_TDM_TX_1 27
+#define PRIMARY_TDM_RX_2 28
+#define PRIMARY_TDM_TX_2 29
+#define PRIMARY_TDM_RX_3 30
+#define PRIMARY_TDM_TX_3 31
+#define PRIMARY_TDM_RX_4 32
+#define PRIMARY_TDM_TX_4 33
+#define PRIMARY_TDM_RX_5 34
+#define PRIMARY_TDM_TX_5 35
+#define PRIMARY_TDM_RX_6 36
+#define PRIMARY_TDM_TX_6 37
+#define PRIMARY_TDM_RX_7 38
+#define PRIMARY_TDM_TX_7 39
+#define SECONDARY_TDM_RX_0 40
+#define SECONDARY_TDM_TX_0 41
+#define SECONDARY_TDM_RX_1 42
+#define SECONDARY_TDM_TX_1 43
+#define SECONDARY_TDM_RX_2 44
+#define SECONDARY_TDM_TX_2 45
+#define SECONDARY_TDM_RX_3 46
+#define SECONDARY_TDM_TX_3 47
+#define SECONDARY_TDM_RX_4 48
+#define SECONDARY_TDM_TX_4 49
+#define SECONDARY_TDM_RX_5 50
+#define SECONDARY_TDM_TX_5 51
+#define SECONDARY_TDM_RX_6 52
+#define SECONDARY_TDM_TX_6 53
+#define SECONDARY_TDM_RX_7 54
+#define SECONDARY_TDM_TX_7 55
+#define TERTIARY_TDM_RX_0 56
+#define TERTIARY_TDM_TX_0 57
+#define TERTIARY_TDM_RX_1 58
+#define TERTIARY_TDM_TX_1 59
+#define TERTIARY_TDM_RX_2 60
+#define TERTIARY_TDM_TX_2 61
+#define TERTIARY_TDM_RX_3 62
+#define TERTIARY_TDM_TX_3 63
+#define TERTIARY_TDM_RX_4 64
+#define TERTIARY_TDM_TX_4 65
+#define TERTIARY_TDM_RX_5 66
+#define TERTIARY_TDM_TX_5 67
+#define TERTIARY_TDM_RX_6 68
+#define TERTIARY_TDM_TX_6 69
+#define TERTIARY_TDM_RX_7 70
+#define TERTIARY_TDM_TX_7 71
+#define QUATERNARY_TDM_RX_0 72
+#define QUATERNARY_TDM_TX_0 73
+#define QUATERNARY_TDM_RX_1 74
+#define QUATERNARY_TDM_TX_1 75
+#define QUATERNARY_TDM_RX_2 76
+#define QUATERNARY_TDM_TX_2 77
+#define QUATERNARY_TDM_RX_3 78
+#define QUATERNARY_TDM_TX_3 79
+#define QUATERNARY_TDM_RX_4 80
+#define QUATERNARY_TDM_TX_4 81
+#define QUATERNARY_TDM_RX_5 82
+#define QUATERNARY_TDM_TX_5 83
+#define QUATERNARY_TDM_RX_6 84
+#define QUATERNARY_TDM_TX_6 85
+#define QUATERNARY_TDM_RX_7 86
+#define QUATERNARY_TDM_TX_7 87
+#define QUINARY_TDM_RX_0 88
+#define QUINARY_TDM_TX_0 89
+#define QUINARY_TDM_RX_1 90
+#define QUINARY_TDM_TX_1 91
+#define QUINARY_TDM_RX_2 92
+#define QUINARY_TDM_TX_2 93
+#define QUINARY_TDM_RX_3 94
+#define QUINARY_TDM_TX_3 95
+#define QUINARY_TDM_RX_4 96
+#define QUINARY_TDM_TX_4 97
+#define QUINARY_TDM_RX_5 98
+#define QUINARY_TDM_TX_5 99
+#define QUINARY_TDM_RX_6 100
+#define QUINARY_TDM_TX_6 101
+#define QUINARY_TDM_RX_7 102
+#define QUINARY_TDM_TX_7 103
+#define DISPLAY_PORT_RX 104
+#define WSA_CODEC_DMA_RX_0 105
+#define WSA_CODEC_DMA_TX_0 106
+#define WSA_CODEC_DMA_RX_1 107
+#define WSA_CODEC_DMA_TX_1 108
+#define WSA_CODEC_DMA_TX_2 109
+#define VA_CODEC_DMA_TX_0 110
+#define VA_CODEC_DMA_TX_1 111
+#define VA_CODEC_DMA_TX_2 112
+#define RX_CODEC_DMA_RX_0 113
+#define TX_CODEC_DMA_TX_0 114
+#define RX_CODEC_DMA_RX_1 115
+#define TX_CODEC_DMA_TX_1 116
+#define RX_CODEC_DMA_RX_2 117
+#define TX_CODEC_DMA_TX_2 118
+#define RX_CODEC_DMA_RX_3 119
+#define TX_CODEC_DMA_TX_3 120
+#define RX_CODEC_DMA_RX_4 121
+#define TX_CODEC_DMA_TX_4 122
+#define RX_CODEC_DMA_RX_5 123
+#define TX_CODEC_DMA_TX_5 124
+#define RX_CODEC_DMA_RX_6 125
+#define RX_CODEC_DMA_RX_7 126
+#define QUINARY_MI2S_RX 127
+#define QUINARY_MI2S_TX 128
+
+#define LPASS_CLK_ID_PRI_MI2S_IBIT 1
+#define LPASS_CLK_ID_PRI_MI2S_EBIT 2
+#define LPASS_CLK_ID_SEC_MI2S_IBIT 3
+#define LPASS_CLK_ID_SEC_MI2S_EBIT 4
+#define LPASS_CLK_ID_TER_MI2S_IBIT 5
+#define LPASS_CLK_ID_TER_MI2S_EBIT 6
+#define LPASS_CLK_ID_QUAD_MI2S_IBIT 7
+#define LPASS_CLK_ID_QUAD_MI2S_EBIT 8
+#define LPASS_CLK_ID_SPEAKER_I2S_IBIT 9
+#define LPASS_CLK_ID_SPEAKER_I2S_EBIT 10
+#define LPASS_CLK_ID_SPEAKER_I2S_OSR 11
+#define LPASS_CLK_ID_QUI_MI2S_IBIT 12
+#define LPASS_CLK_ID_QUI_MI2S_EBIT 13
+#define LPASS_CLK_ID_SEN_MI2S_IBIT 14
+#define LPASS_CLK_ID_SEN_MI2S_EBIT 15
+#define LPASS_CLK_ID_INT0_MI2S_IBIT 16
+#define LPASS_CLK_ID_INT1_MI2S_IBIT 17
+#define LPASS_CLK_ID_INT2_MI2S_IBIT 18
+#define LPASS_CLK_ID_INT3_MI2S_IBIT 19
+#define LPASS_CLK_ID_INT4_MI2S_IBIT 20
+#define LPASS_CLK_ID_INT5_MI2S_IBIT 21
+#define LPASS_CLK_ID_INT6_MI2S_IBIT 22
+#define LPASS_CLK_ID_QUI_MI2S_OSR 23
+#define LPASS_CLK_ID_PRI_PCM_IBIT 24
+#define LPASS_CLK_ID_PRI_PCM_EBIT 25
+#define LPASS_CLK_ID_SEC_PCM_IBIT 26
+#define LPASS_CLK_ID_SEC_PCM_EBIT 27
+#define LPASS_CLK_ID_TER_PCM_IBIT 28
+#define LPASS_CLK_ID_TER_PCM_EBIT 29
+#define LPASS_CLK_ID_QUAD_PCM_IBIT 30
+#define LPASS_CLK_ID_QUAD_PCM_EBIT 31
+#define LPASS_CLK_ID_QUIN_PCM_IBIT 32
+#define LPASS_CLK_ID_QUIN_PCM_EBIT 33
+#define LPASS_CLK_ID_QUI_PCM_OSR 34
+#define LPASS_CLK_ID_PRI_TDM_IBIT 35
+#define LPASS_CLK_ID_PRI_TDM_EBIT 36
+#define LPASS_CLK_ID_SEC_TDM_IBIT 37
+#define LPASS_CLK_ID_SEC_TDM_EBIT 38
+#define LPASS_CLK_ID_TER_TDM_IBIT 39
+#define LPASS_CLK_ID_TER_TDM_EBIT 40
+#define LPASS_CLK_ID_QUAD_TDM_IBIT 41
+#define LPASS_CLK_ID_QUAD_TDM_EBIT 42
+#define LPASS_CLK_ID_QUIN_TDM_IBIT 43
+#define LPASS_CLK_ID_QUIN_TDM_EBIT 44
+#define LPASS_CLK_ID_QUIN_TDM_OSR 45
+#define LPASS_CLK_ID_MCLK_1 46
+#define LPASS_CLK_ID_MCLK_2 47
+#define LPASS_CLK_ID_MCLK_3 48
+#define LPASS_CLK_ID_MCLK_4 49
+#define LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE 50
+#define LPASS_CLK_ID_INT_MCLK_0 51
+#define LPASS_CLK_ID_INT_MCLK_1 52
+#define LPASS_CLK_ID_MCLK_5 53
+#define LPASS_CLK_ID_WSA_CORE_MCLK 54
+#define LPASS_CLK_ID_WSA_CORE_NPL_MCLK 55
+#define LPASS_CLK_ID_VA_CORE_MCLK 56
+#define LPASS_CLK_ID_TX_CORE_MCLK 57
+#define LPASS_CLK_ID_TX_CORE_NPL_MCLK 58
+#define LPASS_CLK_ID_RX_CORE_MCLK 59
+#define LPASS_CLK_ID_RX_CORE_NPL_MCLK 60
+#define LPASS_CLK_ID_VA_CORE_2X_MCLK 61
+/* Clock ID for MCLK for WSA2 core */
+#define LPASS_CLK_ID_WSA2_CORE_MCLK 62
+/* Clock ID for NPL MCLK for WSA2 core */
+#define LPASS_CLK_ID_WSA2_CORE_2X_MCLK 63
+/* Clock ID for RX Core TX MCLK */
+#define LPASS_CLK_ID_RX_CORE_TX_MCLK 64
+/* Clock ID for RX CORE TX 2X MCLK */
+#define LPASS_CLK_ID_RX_CORE_TX_2X_MCLK 65
+/* Clock ID for WSA core TX MCLK */
+#define LPASS_CLK_ID_WSA_CORE_TX_MCLK 66
+/* Clock ID for WSA core TX 2X MCLK */
+#define LPASS_CLK_ID_WSA_CORE_TX_2X_MCLK 67
+/* Clock ID for WSA2 core TX MCLK */
+#define LPASS_CLK_ID_WSA2_CORE_TX_MCLK 68
+/* Clock ID for WSA2 core TX 2X MCLK */
+#define LPASS_CLK_ID_WSA2_CORE_TX_2X_MCLK 69
+/* Clock ID for RX CORE MCLK2 2X MCLK */
+#define LPASS_CLK_ID_RX_CORE_MCLK2_2X_MCLK 70
+
+#define LPASS_HW_AVTIMER_VOTE 101
+#define LPASS_HW_MACRO_VOTE 102
+#define LPASS_HW_DCODEC_VOTE 103
+
+#define Q6AFE_MAX_CLK_ID 104
+
+#define LPASS_CLK_ATTRIBUTE_INVALID 0x0
+#define LPASS_CLK_ATTRIBUTE_COUPLE_NO 0x1
+#define LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND 0x2
+#define LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR 0x3
+
+#endif /* __DT_BINDINGS_Q6_AUDIO_PORTS_H__ */
diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h
new file mode 100644
index 000000000000..f5e9f1db091e
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,wcd9335.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_SOUND_QCOM_WCD9335_H
+#define __DT_SOUND_QCOM_WCD9335_H
+
+#define AIF1_PB 0
+#define AIF1_CAP 1
+#define AIF2_PB 2
+#define AIF2_CAP 3
+#define AIF3_PB 4
+#define AIF3_CAP 5
+#define AIF4_PB 6
+#define NUM_CODEC_DAIS 7
+
+#endif
diff --git a/include/dt-bindings/sound/rt5640.h b/include/dt-bindings/sound/rt5640.h
index 154c9b4414f2..655f6946388a 100644
--- a/include/dt-bindings/sound/rt5640.h
+++ b/include/dt-bindings/sound/rt5640.h
@@ -16,6 +16,7 @@
#define RT5640_JD_SRC_GPIO2 4
#define RT5640_JD_SRC_GPIO3 5
#define RT5640_JD_SRC_GPIO4 6
+#define RT5640_JD_SRC_HDA_HEADER 7
#define RT5640_OVCD_SF_0P5 0
#define RT5640_OVCD_SF_0P75 1
diff --git a/include/dt-bindings/sound/sc7180-lpass.h b/include/dt-bindings/sound/sc7180-lpass.h
new file mode 100644
index 000000000000..5c1ee8b36b19
--- /dev/null
+++ b/include/dt-bindings/sound/sc7180-lpass.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_SC7180_LPASS_H
+#define __DT_SC7180_LPASS_H
+
+#include <dt-bindings/sound/qcom,lpass.h>
+
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
+
+#endif /* __DT_APQ8016_LPASS_H */
diff --git a/include/dt-bindings/sound/tlv320adc3xxx.h b/include/dt-bindings/sound/tlv320adc3xxx.h
new file mode 100644
index 000000000000..ec988439da20
--- /dev/null
+++ b/include/dt-bindings/sound/tlv320adc3xxx.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Devicetree bindings definitions for tlv320adc3xxx driver.
+ *
+ * Copyright (C) 2021 Axis Communications AB
+ */
+#ifndef __DT_TLV320ADC3XXX_H
+#define __DT_TLV320ADC3XXX_H
+
+#define ADC3XXX_GPIO_DISABLED 0 /* I/O buffers powered down */
+#define ADC3XXX_GPIO_INPUT 1 /* Various non-GPIO inputs */
+#define ADC3XXX_GPIO_GPI 2 /* General purpose input */
+#define ADC3XXX_GPIO_GPO 3 /* General purpose output */
+#define ADC3XXX_GPIO_CLKOUT 4 /* Source set in reg. CLKOUT_MUX */
+#define ADC3XXX_GPIO_INT1 5 /* INT1 output */
+#define ADC3XXX_GPIO_INT2 6 /* INT2 output */
+/* value 7 is reserved */
+#define ADC3XXX_GPIO_SECONDARY_BCLK 8 /* Codec interface secondary BCLK */
+#define ADC3XXX_GPIO_SECONDARY_WCLK 9 /* Codec interface secondary WCLK */
+#define ADC3XXX_GPIO_ADC_MOD_CLK 10 /* Clock output for digital mics */
+/* values 11-15 reserved */
+
+#define ADC3XXX_MICBIAS_OFF 0 /* Micbias pin powered off */
+#define ADC3XXX_MICBIAS_2_0V 1 /* Micbias pin set to 2.0V */
+#define ADC3XXX_MICBIAS_2_5V 2 /* Micbias pin set to 2.5V */
+#define ADC3XXX_MICBIAS_AVDD 3 /* Use AVDD voltage for micbias pin */
+
+#endif /* __DT_TLV320ADC3XXX_H */
diff --git a/include/dt-bindings/sound/tlv320aic31xx-micbias.h b/include/dt-bindings/sound/tlv320aic31xx-micbias.h
deleted file mode 100644
index c6895a18a455..000000000000
--- a/include/dt-bindings/sound/tlv320aic31xx-micbias.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DT_TLV320AIC31XX_MICBIAS_H
-#define __DT_TLV320AIC31XX_MICBIAS_H
-
-#define MICBIAS_2_0V 1
-#define MICBIAS_2_5V 2
-#define MICBIAS_AVDDV 3
-
-#endif /* __DT_TLV320AIC31XX_MICBIAS_H */
diff --git a/include/dt-bindings/sound/tlv320aic31xx.h b/include/dt-bindings/sound/tlv320aic31xx.h
new file mode 100644
index 000000000000..4a80238ab250
--- /dev/null
+++ b/include/dt-bindings/sound/tlv320aic31xx.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_TLV320AIC31XX_H
+#define __DT_TLV320AIC31XX_H
+
+#define MICBIAS_2_0V 1
+#define MICBIAS_2_5V 2
+#define MICBIAS_AVDDV 3
+
+#define PLL_CLKIN_MCLK 0x00
+#define PLL_CLKIN_BCLK 0x01
+#define PLL_CLKIN_GPIO1 0x02
+#define PLL_CLKIN_DIN 0x03
+
+#endif /* __DT_TLV320AIC31XX_H */
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
index 985f2bbd4d24..e6526b138174 100644
--- a/include/dt-bindings/usb/pd.h
+++ b/include/dt-bindings/usb/pd.h
@@ -85,4 +85,384 @@
PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \
PDO_PPS_APDO_MAX_CURR(max_ma))
- #endif /* __DT_POWER_DELIVERY_H */
+ /*
+ * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
+ * Version 1.2"
+ * Initial current capability of the new source when vSafe5V is applied.
+ */
+#define FRS_DEFAULT_POWER 1
+#define FRS_5V_1P5A 2
+#define FRS_5V_3A 3
+
+/*
+ * SVDM Identity Header
+ * --------------------
+ * <31> :: data capable as a USB host
+ * <30> :: data capable as a USB device
+ * <29:27> :: product type (UFP / Cable / VPD)
+ * <26> :: modal operation supported (1b == yes)
+ * <25:23> :: product type (DFP) (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <22:21> :: connector type (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <20:16> :: Reserved, Shall be set to zero
+ * <15:0> :: USB-IF assigned VID for this cable vendor
+ */
+
+/* PD Rev2.0 definition */
+#define IDH_PTYPE_UNDEF 0
+
+/* SOP Product Type (UFP) */
+#define IDH_PTYPE_NOT_UFP 0
+#define IDH_PTYPE_HUB 1
+#define IDH_PTYPE_PERIPH 2
+#define IDH_PTYPE_PSD 3
+#define IDH_PTYPE_AMA 5
+
+/* SOP' Product Type (Cable Plug / VPD) */
+#define IDH_PTYPE_NOT_CABLE 0
+#define IDH_PTYPE_PCABLE 3
+#define IDH_PTYPE_ACABLE 4
+#define IDH_PTYPE_VPD 6
+
+/* SOP Product Type (DFP) */
+#define IDH_PTYPE_NOT_DFP 0
+#define IDH_PTYPE_DFP_HUB 1
+#define IDH_PTYPE_DFP_HOST 2
+#define IDH_PTYPE_DFP_PB 3
+
+#define VDO_IDH(usbh, usbd, ufp_cable, is_modal, dfp, conn, vid) \
+ ((usbh) << 31 | (usbd) << 30 | ((ufp_cable) & 0x7) << 27 \
+ | (is_modal) << 26 | ((dfp) & 0x7) << 23 | ((conn) & 0x3) << 21 \
+ | ((vid) & 0xffff))
+
+/*
+ * Cert Stat VDO
+ * -------------
+ * <31:0> : USB-IF assigned XID for this cable
+ */
+#define VDO_CERT(xid) ((xid) & 0xffffffff)
+
+/*
+ * Product VDO
+ * -----------
+ * <31:16> : USB Product ID
+ * <15:0> : USB bcdDevice
+ */
+#define VDO_PRODUCT(pid, bcd) (((pid) & 0xffff) << 16 | ((bcd) & 0xffff))
+
+/*
+ * UFP VDO (PD Revision 3.0+ only)
+ * --------
+ * <31:29> :: UFP VDO version
+ * <28> :: Reserved
+ * <27:24> :: Device capability
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:11> :: Reserved
+ * <10:8> :: Vconn power (AMA only)
+ * <7> :: Vconn required (AMA only, 0b == no, 1b == yes)
+ * <6> :: Vbus required (AMA only, 0b == yes, 1b == no)
+ * <5:3> :: Alternate modes
+ * <2:0> :: USB highest speed
+ */
+/* UFP VDO Version */
+#define UFP_VDO_VER1_2 2
+
+/* Device Capability */
+#define DEV_USB2_CAPABLE (1 << 0)
+#define DEV_USB2_BILLBOARD (1 << 1)
+#define DEV_USB3_CAPABLE (1 << 2)
+#define DEV_USB4_CAPABLE (1 << 3)
+
+/* Connector Type */
+#define UFP_RECEPTACLE 2
+#define UFP_CAPTIVE 3
+
+/* Vconn Power (AMA only, set to AMA_VCONN_NOT_REQ if Vconn is not required) */
+#define AMA_VCONN_PWR_1W 0
+#define AMA_VCONN_PWR_1W5 1
+#define AMA_VCONN_PWR_2W 2
+#define AMA_VCONN_PWR_3W 3
+#define AMA_VCONN_PWR_4W 4
+#define AMA_VCONN_PWR_5W 5
+#define AMA_VCONN_PWR_6W 6
+
+/* Vconn Required (AMA only) */
+#define AMA_VCONN_NOT_REQ 0
+#define AMA_VCONN_REQ 1
+
+/* Vbus Required (AMA only) */
+#define AMA_VBUS_REQ 0
+#define AMA_VBUS_NOT_REQ 1
+
+/* Alternate Modes */
+#define UFP_ALTMODE_NOT_SUPP 0
+#define UFP_ALTMODE_TBT3 (1 << 0)
+#define UFP_ALTMODE_RECFG (1 << 1)
+#define UFP_ALTMODE_NO_RECFG (1 << 2)
+
+/* USB Highest Speed */
+#define UFP_USB2_ONLY 0
+#define UFP_USB32_GEN1 1
+#define UFP_USB32_4_GEN2 2
+#define UFP_USB4_GEN3 3
+
+#define VDO_UFP(ver, cap, conn, vcpwr, vcr, vbr, alt, spd) \
+ (((ver) & 0x7) << 29 | ((cap) & 0xf) << 24 | ((conn) & 0x3) << 22 \
+ | ((vcpwr) & 0x7) << 8 | (vcr) << 7 | (vbr) << 6 | ((alt) & 0x7) << 3 \
+ | ((spd) & 0x7))
+
+/*
+ * DFP VDO (PD Revision 3.0+ only)
+ * --------
+ * <31:29> :: DFP VDO version
+ * <28:27> :: Reserved
+ * <26:24> :: Host capability
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:5> :: Reserved
+ * <4:0> :: Port number
+ */
+#define DFP_VDO_VER1_1 1
+#define HOST_USB2_CAPABLE (1 << 0)
+#define HOST_USB3_CAPABLE (1 << 1)
+#define HOST_USB4_CAPABLE (1 << 2)
+#define DFP_RECEPTACLE 2
+#define DFP_CAPTIVE 3
+
+#define VDO_DFP(ver, cap, conn, pnum) \
+ (((ver) & 0x7) << 29 | ((cap) & 0x7) << 24 | ((conn) & 0x3) << 22 \
+ | ((pnum) & 0x1f))
+
+/*
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9> :: SSTX2 Directionality support
+ * <8> :: SSRX1 Directionality support
+ * <7> :: SSRX2 Directionality support
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Type-C to Type-C/Captive (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == Vconn not req, 01b == Vconn req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8:7> :: Reserved, Shall be set to zero
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4:3> :: Reserved, Shall be set to zero
+ * <2:0> :: USB highest speed
+ *
+ * Active Cable VDO 1 (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Connector type (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == one end active, 11b == both ends active VCONN req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8> :: SBU supported (0b == supported, 1b == not supported)
+ * <7> :: SBU type (0b == passive, 1b == active)
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB highest speed
+ */
+/* Cable VDO Version */
+#define CABLE_VDO_VER1_0 0
+#define CABLE_VDO_VER1_3 3
+
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
+#define CABLE_ATYPE 0
+#define CABLE_BTYPE 1
+#define CABLE_CTYPE 2
+#define CABLE_CAPTIVE 3
+
+/* Cable Latency */
+#define CABLE_LATENCY_1M 1
+#define CABLE_LATENCY_2M 2
+#define CABLE_LATENCY_3M 3
+#define CABLE_LATENCY_4M 4
+#define CABLE_LATENCY_5M 5
+#define CABLE_LATENCY_6M 6
+#define CABLE_LATENCY_7M 7
+#define CABLE_LATENCY_7M_PLUS 8
+
+/* Cable Termination Type */
+#define PCABLE_VCONN_NOT_REQ 0
+#define PCABLE_VCONN_REQ 1
+#define ACABLE_ONE_END 2
+#define ACABLE_BOTH_END 3
+
+/* Maximum Vbus Voltage */
+#define CABLE_MAX_VBUS_20V 0
+#define CABLE_MAX_VBUS_30V 1
+#define CABLE_MAX_VBUS_40V 2
+#define CABLE_MAX_VBUS_50V 3
+
+/* Active Cable SBU Supported/Type */
+#define ACABLE_SBU_SUPP 0
+#define ACABLE_SBU_NOT_SUPP 1
+#define ACABLE_SBU_PASSIVE 0
+#define ACABLE_SBU_ACTIVE 1
+
+/* Vbus Current Handling Capability */
+#define CABLE_CURR_DEF 0
+#define CABLE_CURR_3A 1
+#define CABLE_CURR_5A 2
+
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
+#define CABLE_USBSS_U2_ONLY 0
+#define CABLE_USBSS_U31_GEN1 1
+#define CABLE_USBSS_U31_GEN2 2
+
+/* USB Highest Speed */
+#define CABLE_USB2_ONLY 0
+#define CABLE_USB32_GEN1 1
+#define CABLE_USB32_4_GEN2 2
+#define CABLE_USB4_GEN3 3
+
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \
+ | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
+ | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
+#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7))
+#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
+ | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7))
+
+/*
+ * Active Cable VDO 2
+ * ---------
+ * <31:24> :: Maximum operating temperature
+ * <23:16> :: Shutdown temperature
+ * <15> :: Reserved, Shall be set to zero
+ * <14:12> :: U3/CLd power
+ * <11> :: U3 to U0 transition mode (0b == direct, 1b == through U3S)
+ * <10> :: Physical connection (0b == copper, 1b == optical)
+ * <9> :: Active element (0b == redriver, 1b == retimer)
+ * <8> :: USB4 supported (0b == yes, 1b == no)
+ * <7:6> :: USB2 hub hops consumed
+ * <5> :: USB2 supported (0b == yes, 1b == no)
+ * <4> :: USB3.2 supported (0b == yes, 1b == no)
+ * <3> :: USB lanes supported (0b == one lane, 1b == two lanes)
+ * <2> :: Optically isolated active cable (0b == no, 1b == yes)
+ * <1> :: Reserved, Shall be set to zero
+ * <0> :: USB gen (0b == gen1, 1b == gen2+)
+ */
+/* U3/CLd Power*/
+#define ACAB2_U3_CLD_10MW_PLUS 0
+#define ACAB2_U3_CLD_10MW 1
+#define ACAB2_U3_CLD_5MW 2
+#define ACAB2_U3_CLD_1MW 3
+#define ACAB2_U3_CLD_500UW 4
+#define ACAB2_U3_CLD_200UW 5
+#define ACAB2_U3_CLD_50UW 6
+
+/* Other Active Cable VDO 2 Fields */
+#define ACAB2_U3U0_DIRECT 0
+#define ACAB2_U3U0_U3S 1
+#define ACAB2_PHY_COPPER 0
+#define ACAB2_PHY_OPTICAL 1
+#define ACAB2_REDRIVER 0
+#define ACAB2_RETIMER 1
+#define ACAB2_USB4_SUPP 0
+#define ACAB2_USB4_NOT_SUPP 1
+#define ACAB2_USB2_SUPP 0
+#define ACAB2_USB2_NOT_SUPP 1
+#define ACAB2_USB32_SUPP 0
+#define ACAB2_USB32_NOT_SUPP 1
+#define ACAB2_LANES_ONE 0
+#define ACAB2_LANES_TWO 1
+#define ACAB2_OPT_ISO_NO 0
+#define ACAB2_OPT_ISO_YES 1
+#define ACAB2_GEN_1 0
+#define ACAB2_GEN_2_PLUS 1
+
+#define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen) \
+ (((mtemp) & 0xff) << 24 | ((stemp) & 0xff) << 16 | ((u3p) & 0x7) << 12 \
+ | (trans) << 11 | (phy) << 10 | (ele) << 9 | (u4) << 8 \
+ | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3 \
+ | (iso) << 2 | (gen))
+
+/*
+ * AMA VDO (PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10> :: SSTX2 Directionality support
+ * <9> :: SSRX1 Directionality support
+ * <8> :: SSRX2 Directionality support
+ * <7:5> :: Vconn power
+ * <4> :: Vconn power required
+ * <3> :: Vbus power required
+ * <2:0> :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \
+ | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \
+ | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \
+ | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
+
+#define AMA_USBSS_U2_ONLY 0
+#define AMA_USBSS_U31_GEN1 1
+#define AMA_USBSS_U31_GEN2 2
+#define AMA_USBSS_BBONLY 3
+
+/*
+ * VPD VDO
+ * ---------
+ * <31:28> :: HW version
+ * <27:24> :: FW version
+ * <23:21> :: VDO version
+ * <20:17> :: Reserved, Shall be set to zero
+ * <16:15> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <14> :: Charge through current support (0b == 3A, 1b == 5A)
+ * <13> :: Reserved, Shall be set to zero
+ * <12:7> :: Vbus impedance
+ * <6:1> :: Ground impedance
+ * <0> :: Charge through support (0b == no, 1b == yes)
+ */
+#define VPD_VDO_VER1_0 0
+#define VPD_MAX_VBUS_20V 0
+#define VPD_MAX_VBUS_30V 1
+#define VPD_MAX_VBUS_40V 2
+#define VPD_MAX_VBUS_50V 3
+#define VPDCT_CURR_3A 0
+#define VPDCT_CURR_5A 1
+#define VPDCT_NOT_SUPP 0
+#define VPDCT_SUPP 1
+
+#define VDO_VPD(hw, fw, ver, vbm, curr, vbi, gi, ct) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((vbm) & 0x3) << 15 | (curr) << 14 | ((vbi) & 0x3f) << 7 \
+ | ((gi) & 0x3f) << 1 | (ct))
+
+#endif /* __DT_POWER_DELIVERY_H */
diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h
index 8a21d6a613ab..c47dc5405f79 100644
--- a/include/keys/asymmetric-parser.h
+++ b/include/keys/asymmetric-parser.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Asymmetric public-key cryptography data parser
*
- * See Documentation/crypto/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
index 21407815d9c3..d55171f640a0 100644
--- a/include/keys/asymmetric-subtype.h
+++ b/include/keys/asymmetric-subtype.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Asymmetric public-key cryptography key subtype
*
- * See Documentation/crypto/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 91cfd9bd9385..69a13e1e5b2e 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Asymmetric Public-key cryptography key type interface
*
- * See Documentation/crypto/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -53,7 +53,7 @@ struct asymmetric_key_id {
};
struct asymmetric_key_ids {
- void *id[2];
+ void *id[3];
};
extern bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
@@ -72,11 +72,21 @@ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
return key->payload.data[asym_key_ids];
}
+static inline
+const struct public_key *asymmetric_key_public_key(const struct key *key)
+{
+ return key->payload.data[asym_crypto];
+}
+
extern struct key *find_asymmetric_key(struct key *keyring,
const struct asymmetric_key_id *id_0,
const struct asymmetric_key_id *id_1,
+ const struct asymmetric_key_id *id_2,
bool partial);
+int x509_load_certificate_list(const u8 cert_list[], const unsigned long list_size,
+ const struct key *keyring);
+
/*
* The payload is at the discretion of the subtype.
*/
diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h
index f6a7ba4dccd4..988d90d77f53 100644
--- a/include/keys/big_key-type.h
+++ b/include/keys/big_key-type.h
@@ -17,6 +17,7 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep);
extern void big_key_revoke(struct key *key);
extern void big_key_destroy(struct key *key);
extern void big_key_describe(const struct key *big_key, struct seq_file *m);
-extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen);
+extern long big_key_read(const struct key *key, char *buffer, size_t buflen);
+extern int big_key_update(struct key *key, struct key_preparsed_payload *prep);
#endif /* _KEYS_BIG_KEY_TYPE_H */
diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h
index 9e9ccb20d586..abfcbe02001a 100644
--- a/include/keys/encrypted-type.h
+++ b/include/keys/encrypted-type.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2010 IBM Corporation
* Copyright (C) 2010 Politecnico di Torino, Italy
- * TORSEC group -- http://security.polito.it
+ * TORSEC group -- https://security.polito.it
*
* Authors:
* Mimi Zohar <zohar@us.ibm.com>
@@ -27,7 +27,7 @@ struct encrypted_key_payload {
unsigned short payload_datalen; /* payload data length */
unsigned short encrypted_key_format; /* encrypted key format */
u8 *decrypted_data; /* decrypted data */
- u8 payload_data[0]; /* payload data + datablob + hmac */
+ u8 payload_data[]; /* payload data + datablob + hmac */
};
extern struct key_type key_type_encrypted;
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index a183278c3e9e..333c0f49a9cd 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -28,55 +28,7 @@ struct rxkad_key {
u8 primary_flag; /* T if key for primary cell for this user */
u16 ticket_len; /* length of ticket[] */
u8 session_key[8]; /* DES session key */
- u8 ticket[0]; /* the encrypted ticket */
-};
-
-/*
- * Kerberos 5 principal
- * name/name/name@realm
- */
-struct krb5_principal {
- u8 n_name_parts; /* N of parts of the name part of the principal */
- char **name_parts; /* parts of the name part of the principal */
- char *realm; /* parts of the realm part of the principal */
-};
-
-/*
- * Kerberos 5 tagged data
- */
-struct krb5_tagged_data {
- /* for tag value, see /usr/include/krb5/krb5.h
- * - KRB5_AUTHDATA_* for auth data
- * -
- */
- s32 tag;
- u32 data_len;
- u8 *data;
-};
-
-/*
- * RxRPC key for Kerberos V (type-5 security)
- */
-struct rxk5_key {
- u64 authtime; /* time at which auth token generated */
- u64 starttime; /* time at which auth token starts */
- u64 endtime; /* time at which auth token expired */
- u64 renew_till; /* time to which auth token can be renewed */
- s32 is_skey; /* T if ticket is encrypted in another ticket's
- * skey */
- s32 flags; /* mask of TKT_FLG_* bits (krb5/krb5.h) */
- struct krb5_principal client; /* client principal name */
- struct krb5_principal server; /* server principal name */
- u16 ticket_len; /* length of ticket */
- u16 ticket2_len; /* length of second ticket */
- u8 n_authdata; /* number of authorisation data elements */
- u8 n_addresses; /* number of addresses */
- struct krb5_tagged_data session; /* session data; tag is enctype */
- struct krb5_tagged_data *addresses; /* addresses */
- u8 *ticket; /* krb5 ticket */
- u8 *ticket2; /* second krb5 ticket, if related to ticket (via
- * DUPLICATE-SKEY or ENC-TKT-IN-SKEY) */
- struct krb5_tagged_data *authdata; /* authorisation data */
+ u8 ticket[]; /* the encrypted ticket */
};
/*
@@ -84,10 +36,10 @@ struct rxk5_key {
*/
struct rxrpc_key_token {
u16 security_index; /* RxRPC header security index */
+ bool no_leak_key; /* Don't copy the key to userspace */
struct rxrpc_key_token *next; /* the next token in the list */
union {
struct rxkad_key *kad;
- struct rxk5_key *k5;
};
};
@@ -100,7 +52,7 @@ struct rxrpc_key_data_v1 {
u32 expiry; /* time_t */
u32 kvno;
u8 session_key[8];
- u8 ticket[0];
+ u8 ticket[];
};
/*
@@ -116,12 +68,6 @@ struct rxrpc_key_data_v1 {
#define AFSTOKEN_RK_TIX_MAX 12000 /* max RxKAD ticket size */
#define AFSTOKEN_GK_KEY_MAX 64 /* max GSSAPI key size */
#define AFSTOKEN_GK_TOKEN_MAX 16384 /* max GSSAPI token size */
-#define AFSTOKEN_K5_COMPONENTS_MAX 16 /* max K5 components */
-#define AFSTOKEN_K5_NAME_MAX 128 /* max K5 name length */
-#define AFSTOKEN_K5_REALM_MAX 64 /* max K5 realm name length */
-#define AFSTOKEN_K5_TIX_MAX 16384 /* max K5 ticket size */
-#define AFSTOKEN_K5_ADDRESSES_MAX 16 /* max K5 addresses */
-#define AFSTOKEN_K5_AUTHDATA_MAX 16 /* max K5 pieces of auth data */
/*
* Truncate a time64_t to the range from 1970 to 2106 as in the network
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index fb8b07daa9d1..91e080efb918 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -10,15 +10,29 @@
#include <linux/key.h>
+enum blacklist_hash_type {
+ /* TBSCertificate hash */
+ BLACKLIST_HASH_X509_TBS = 1,
+ /* Raw data hash */
+ BLACKLIST_HASH_BINARY = 2,
+};
+
#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
extern int restrict_link_by_builtin_trusted(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
+extern __init int load_module_cert(struct key *keyring);
#else
#define restrict_link_by_builtin_trusted restrict_link_reject
+
+static inline __init int load_module_cert(struct key *keyring)
+{
+ return 0;
+}
+
#endif
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
@@ -31,14 +45,30 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
#endif
+#ifdef CONFIG_INTEGRITY_MACHINE_KEYRING
+extern int restrict_link_by_builtin_secondary_and_machine(
+ struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restrict_key);
+extern void __init set_machine_trusted_keys(struct key *keyring);
+#else
+#define restrict_link_by_builtin_secondary_and_machine restrict_link_by_builtin_trusted
+static inline void __init set_machine_trusted_keys(struct key *keyring)
+{
+}
+#endif
+
+extern struct pkcs7_message *pkcs7;
#ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
-extern int mark_hash_blacklisted(const char *hash);
+extern int mark_hash_blacklisted(const u8 *hash, size_t hash_len,
+ enum blacklist_hash_type hash_type);
extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
- const char *type);
+ enum blacklist_hash_type hash_type);
extern int is_binary_blacklisted(const u8 *hash, size_t hash_len);
#else
static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len,
- const char *type)
+ enum blacklist_hash_type hash_type)
{
return 0;
}
@@ -49,6 +79,20 @@ static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
}
#endif
+#ifdef CONFIG_SYSTEM_REVOCATION_LIST
+extern int add_key_to_revocation_list(const char *data, size_t size);
+extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7);
+#else
+static inline int add_key_to_revocation_list(const char *data, size_t size)
+{
+ return 0;
+}
+static inline int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
+{
+ return -ENOKEY;
+}
+#endif
+
#ifdef CONFIG_IMA_BLACKLIST_KEYRING
extern struct key *ima_blacklist_keyring;
diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
index a94c03a61d8f..4eb64548a74f 100644
--- a/include/keys/trusted-type.h
+++ b/include/keys/trusted-type.h
@@ -11,6 +11,12 @@
#include <linux/rcupdate.h>
#include <linux/tpm.h>
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "trusted_key: " fmt
+
#define MIN_KEY_SIZE 32
#define MAX_KEY_SIZE 128
#define MAX_BLOB_SIZE 512
@@ -22,6 +28,7 @@ struct trusted_key_payload {
unsigned int key_len;
unsigned int blob_len;
unsigned char migratable;
+ unsigned char old_format;
unsigned char key[MAX_KEY_SIZE + 1];
unsigned char blob[MAX_BLOB_SIZE];
};
@@ -30,6 +37,7 @@ struct trusted_key_options {
uint16_t keytype;
uint32_t keyhandle;
unsigned char keyauth[TPM_DIGEST_SIZE];
+ uint32_t blobauth_len;
unsigned char blobauth[TPM_DIGEST_SIZE];
uint32_t pcrinfo_len;
unsigned char pcrinfo[MAX_PCRINFO_SIZE];
@@ -40,6 +48,53 @@ struct trusted_key_options {
uint32_t policyhandle;
};
+struct trusted_key_ops {
+ /*
+ * flag to indicate if trusted key implementation supports migration
+ * or not.
+ */
+ unsigned char migratable;
+
+ /* Initialize key interface. */
+ int (*init)(void);
+
+ /* Seal a key. */
+ int (*seal)(struct trusted_key_payload *p, char *datablob);
+
+ /* Unseal a key. */
+ int (*unseal)(struct trusted_key_payload *p, char *datablob);
+
+ /* Optional: Get a randomized key. */
+ int (*get_random)(unsigned char *key, size_t key_len);
+
+ /* Exit key interface. */
+ void (*exit)(void);
+};
+
+struct trusted_key_source {
+ char *name;
+ struct trusted_key_ops *ops;
+};
+
extern struct key_type key_type_trusted;
+#define TRUSTED_DEBUG 0
+
+#if TRUSTED_DEBUG
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+ pr_info("key_len %d\n", p->key_len);
+ print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
+ 16, 1, p->key, p->key_len, 0);
+ pr_info("bloblen %d\n", p->blob_len);
+ print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
+ 16, 1, p->blob, p->blob_len, 0);
+ pr_info("migratable %d\n", p->migratable);
+}
+#else
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+}
+#endif
+
#endif /* _KEYS_TRUSTED_TYPE_H */
diff --git a/include/keys/trusted_caam.h b/include/keys/trusted_caam.h
new file mode 100644
index 000000000000..73fe2f32f65e
--- /dev/null
+++ b/include/keys/trusted_caam.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ */
+
+#ifndef __CAAM_TRUSTED_KEY_H
+#define __CAAM_TRUSTED_KEY_H
+
+extern struct trusted_key_ops trusted_key_caam_ops;
+
+#endif
diff --git a/include/keys/trusted_tee.h b/include/keys/trusted_tee.h
new file mode 100644
index 000000000000..151be25a979e
--- /dev/null
+++ b/include/keys/trusted_tee.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ *
+ * Author:
+ * Sumit Garg <sumit.garg@linaro.org>
+ */
+
+#ifndef __TEE_TRUSTED_KEY_H
+#define __TEE_TRUSTED_KEY_H
+
+#include <keys/trusted-type.h>
+
+extern struct trusted_key_ops trusted_key_tee_ops;
+
+#endif
diff --git a/include/keys/trusted_tpm.h b/include/keys/trusted_tpm.h
index a56d8e1298f2..7769b726863a 100644
--- a/include/keys/trusted_tpm.h
+++ b/include/keys/trusted_tpm.h
@@ -16,6 +16,8 @@
#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset])
#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
+extern struct trusted_key_ops trusted_key_tpm_ops;
+
struct osapsess {
uint32_t handle;
unsigned char secret[SHA1_DIGEST_SIZE];
@@ -52,30 +54,19 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
#if TPM_DEBUG
static inline void dump_options(struct trusted_key_options *o)
{
- pr_info("trusted_key: sealing key type %d\n", o->keytype);
- pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle);
- pr_info("trusted_key: pcrlock %d\n", o->pcrlock);
- pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len);
+ pr_info("sealing key type %d\n", o->keytype);
+ pr_info("sealing key handle %0X\n", o->keyhandle);
+ pr_info("pcrlock %d\n", o->pcrlock);
+ pr_info("pcrinfo %d\n", o->pcrinfo_len);
print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
16, 1, o->pcrinfo, o->pcrinfo_len, 0);
}
-static inline void dump_payload(struct trusted_key_payload *p)
-{
- pr_info("trusted_key: key_len %d\n", p->key_len);
- print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
- 16, 1, p->key, p->key_len, 0);
- pr_info("trusted_key: bloblen %d\n", p->blob_len);
- print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
- 16, 1, p->blob, p->blob_len, 0);
- pr_info("trusted_key: migratable %d\n", p->migratable);
-}
-
static inline void dump_sess(struct osapsess *s)
{
print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
16, 1, &s->handle, 4, 0);
- pr_info("trusted-key: secret:\n");
+ pr_info("secret:\n");
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
pr_info("trusted-key: enonce:\n");
@@ -87,7 +78,7 @@ static inline void dump_tpm_buf(unsigned char *buf)
{
int len;
- pr_info("\ntrusted-key: tpm buffer\n");
+ pr_info("\ntpm buffer\n");
len = LOAD32(buf, TPM_SIZE_OFFSET);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
}
@@ -96,10 +87,6 @@ static inline void dump_options(struct trusted_key_options *o)
{
}
-static inline void dump_payload(struct trusted_key_payload *p)
-{
-}
-
static inline void dump_sess(struct osapsess *s)
{
}
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index d5e73266a81a..386c31432789 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -27,7 +27,7 @@
struct user_key_payload {
struct rcu_head rcu; /* RCU destructor */
unsigned short datalen; /* length of this data */
- char data[0] __aligned(__alignof__(u64)); /* actual data */
+ char data[] __aligned(__alignof__(u64)); /* actual data */
};
extern struct key_type key_type_user;
@@ -41,8 +41,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep);
extern void user_revoke(struct key *key);
extern void user_destroy(struct key *key);
extern void user_describe(const struct key *user, struct seq_file *m);
-extern long user_read(const struct key *key,
- char __user *buffer, size_t buflen);
+extern long user_read(const struct key *key, char *buffer, size_t buflen);
static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key)
{
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index ad889b539ab3..ace3de8d1ee7 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -10,7 +10,7 @@
#define _KUNIT_ASSERT_H
#include <linux/err.h>
-#include <linux/kernel.h>
+#include <linux/printk.h>
struct kunit;
struct string_stream;
@@ -29,57 +29,32 @@ enum kunit_assert_type {
};
/**
- * struct kunit_assert - Data for printing a failed assertion or expectation.
- * @test: the test case this expectation/assertion is associated with.
- * @type: the type (either an expectation or an assertion) of this kunit_assert.
- * @line: the source code line number that the expectation/assertion is at.
- * @file: the file path of the source file that the expectation/assertion is in.
- * @message: an optional message to provide additional context.
- * @format: a function which formats the data in this kunit_assert to a string.
- *
- * Represents a failed expectation/assertion. Contains all the data necessary to
- * format a string to a user reporting the failure.
+ * struct kunit_loc - Identifies the source location of a line of code.
+ * @line: the line number in the file.
+ * @file: the file name.
*/
-struct kunit_assert {
- struct kunit *test;
- enum kunit_assert_type type;
+struct kunit_loc {
int line;
const char *file;
- struct va_format message;
- void (*format)(const struct kunit_assert *assert,
- struct string_stream *stream);
};
-/**
- * KUNIT_INIT_VA_FMT_NULL - Default initializer for struct va_format.
- *
- * Used inside a struct initialization block to initialize struct va_format to
- * default values where fmt and va are null.
- */
-#define KUNIT_INIT_VA_FMT_NULL { .fmt = NULL, .va = NULL }
+#define KUNIT_CURRENT_LOC { .file = __FILE__, .line = __LINE__ }
/**
- * KUNIT_INIT_ASSERT_STRUCT() - Initializer for a &struct kunit_assert.
- * @kunit: The test case that this expectation/assertion is associated with.
- * @assert_type: The type (assertion or expectation) of this kunit_assert.
- * @fmt: The formatting function which builds a string out of this kunit_assert.
+ * struct kunit_assert - Data for printing a failed assertion or expectation.
*
- * The base initializer for a &struct kunit_assert.
+ * Represents a failed expectation/assertion. Contains all the data necessary to
+ * format a string to a user reporting the failure.
*/
-#define KUNIT_INIT_ASSERT_STRUCT(kunit, assert_type, fmt) { \
- .test = kunit, \
- .type = assert_type, \
- .file = __FILE__, \
- .line = __LINE__, \
- .message = KUNIT_INIT_VA_FMT_NULL, \
- .format = fmt \
-}
+struct kunit_assert {};
-void kunit_base_assert_format(const struct kunit_assert *assert,
- struct string_stream *stream);
+typedef void (*assert_format_t)(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream);
-void kunit_assert_print_msg(const struct kunit_assert *assert,
- struct string_stream *stream);
+void kunit_assert_prologue(const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ struct string_stream *stream);
/**
* struct kunit_fail_assert - Represents a plain fail expectation/assertion.
@@ -92,23 +67,10 @@ struct kunit_fail_assert {
};
void kunit_fail_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_FAIL_ASSERT_STRUCT() - Initializer for &struct kunit_fail_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- *
- * Initializes a &struct kunit_fail_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_FAIL_ASSERT_STRUCT(test, type) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_fail_assert_format) \
-}
-
-/**
* struct kunit_unary_assert - Represents a KUNIT_{EXPECT|ASSERT}_{TRUE|FALSE}
* @assert: The parent of this type.
* @condition: A string representation of a conditional expression.
@@ -125,22 +87,18 @@ struct kunit_unary_assert {
};
void kunit_unary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
* KUNIT_INIT_UNARY_ASSERT_STRUCT() - Initializes &struct kunit_unary_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
* @cond: A string representation of the expression asserted true or false.
* @expect_true: True if of type KUNIT_{EXPECT|ASSERT}_TRUE, false otherwise.
*
* Initializes a &struct kunit_unary_assert. Intended to be used in
* KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
*/
-#define KUNIT_INIT_UNARY_ASSERT_STRUCT(test, type, cond, expect_true) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_unary_assert_format), \
+#define KUNIT_INIT_UNARY_ASSERT_STRUCT(cond, expect_true) { \
.condition = cond, \
.expected_true = expect_true \
}
@@ -162,35 +120,42 @@ struct kunit_ptr_not_err_assert {
};
void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
* KUNIT_INIT_PTR_NOT_ERR_ASSERT_STRUCT() - Initializes a
* &struct kunit_ptr_not_err_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
* @txt: A string representation of the expression passed to the expectation.
* @val: The actual evaluated pointer value of the expression.
*
* Initializes a &struct kunit_ptr_not_err_assert. Intended to be used in
* KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
*/
-#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, type, txt, val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_ptr_not_err_assert_format), \
+#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(txt, val) { \
.text = txt, \
.value = val \
}
/**
+ * struct kunit_binary_assert_text - holds strings for &struct
+ * kunit_binary_assert and friends to try and make the structs smaller.
+ * @operation: A string representation of the comparison operator (e.g. "==").
+ * @left_text: A string representation of the left expression (e.g. "2+2").
+ * @right_text: A string representation of the right expression (e.g. "2+2").
+ */
+struct kunit_binary_assert_text {
+ const char *operation;
+ const char *left_text;
+ const char *right_text;
+};
+
+/**
* struct kunit_binary_assert - An expectation/assertion that compares two
* non-pointer values (for example, KUNIT_EXPECT_EQ(test, 1 + 1, 2)).
* @assert: The parent of this type.
- * @operation: A string representation of the comparison operator (e.g. "==").
- * @left_text: A string representation of the expression in the left slot.
+ * @text: Holds the textual representations of the operands and op (e.g. "==").
* @left_value: The actual evaluated value of the expression in the left slot.
- * @right_text: A string representation of the expression in the right slot.
* @right_value: The actual evaluated value of the expression in the right slot.
*
* Represents an expectation/assertion that compares two non-pointer values. For
@@ -199,44 +164,33 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
*/
struct kunit_binary_assert {
struct kunit_assert assert;
- const char *operation;
- const char *left_text;
+ const struct kunit_binary_assert_text *text;
long long left_value;
- const char *right_text;
long long right_value;
};
void kunit_binary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a
- * &struct kunit_binary_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @op_str: A string representation of the comparison operator (e.g. "==").
- * @left_str: A string representation of the expression in the left slot.
+ * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a binary assert like
+ * kunit_binary_assert, kunit_binary_ptr_assert, etc.
+ *
+ * @text_: Pointer to a kunit_binary_assert_text.
* @left_val: The actual evaluated value of the expression in the left slot.
- * @right_str: A string representation of the expression in the right slot.
* @right_val: The actual evaluated value of the expression in the right slot.
*
- * Initializes a &struct kunit_binary_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ * Initializes a binary assert like kunit_binary_assert,
+ * kunit_binary_ptr_assert, etc. This relies on these structs having the same
+ * fields but with different types for left_val/right_val.
+ * This is ultimately used by binary assertion macros like KUNIT_EXPECT_EQ, etc.
*/
-#define KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
- type, \
- op_str, \
- left_str, \
+#define KUNIT_INIT_BINARY_ASSERT_STRUCT(text_, \
left_val, \
- right_str, \
right_val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_binary_assert_format), \
- .operation = op_str, \
- .left_text = left_str, \
+ .text = text_, \
.left_value = left_val, \
- .right_text = right_str, \
.right_value = right_val \
}
@@ -244,10 +198,8 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
* struct kunit_binary_ptr_assert - An expectation/assertion that compares two
* pointer values (for example, KUNIT_EXPECT_PTR_EQ(test, foo, bar)).
* @assert: The parent of this type.
- * @operation: A string representation of the comparison operator (e.g. "==").
- * @left_text: A string representation of the expression in the left slot.
+ * @text: Holds the textual representations of the operands and op (e.g. "==").
* @left_value: The actual evaluated value of the expression in the left slot.
- * @right_text: A string representation of the expression in the right slot.
* @right_value: The actual evaluated value of the expression in the right slot.
*
* Represents an expectation/assertion that compares two pointer values. For
@@ -256,55 +208,21 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
*/
struct kunit_binary_ptr_assert {
struct kunit_assert assert;
- const char *operation;
- const char *left_text;
+ const struct kunit_binary_assert_text *text;
const void *left_value;
- const char *right_text;
const void *right_value;
};
void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT() - Initializes a
- * &struct kunit_binary_ptr_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @op_str: A string representation of the comparison operator (e.g. "==").
- * @left_str: A string representation of the expression in the left slot.
- * @left_val: The actual evaluated value of the expression in the left slot.
- * @right_str: A string representation of the expression in the right slot.
- * @right_val: The actual evaluated value of the expression in the right slot.
- *
- * Initializes a &struct kunit_binary_ptr_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT(test, \
- type, \
- op_str, \
- left_str, \
- left_val, \
- right_str, \
- right_val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_binary_ptr_assert_format), \
- .operation = op_str, \
- .left_text = left_str, \
- .left_value = left_val, \
- .right_text = right_str, \
- .right_value = right_val \
-}
-
-/**
* struct kunit_binary_str_assert - An expectation/assertion that compares two
* string values (for example, KUNIT_EXPECT_STREQ(test, foo, "bar")).
* @assert: The parent of this type.
- * @operation: A string representation of the comparison operator (e.g. "==").
- * @left_text: A string representation of the expression in the left slot.
+ * @text: Holds the textual representations of the operands and comparator.
* @left_value: The actual evaluated value of the expression in the left slot.
- * @right_text: A string representation of the expression in the right slot.
* @right_value: The actual evaluated value of the expression in the right slot.
*
* Represents an expectation/assertion that compares two string values. For
@@ -313,45 +231,13 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
*/
struct kunit_binary_str_assert {
struct kunit_assert assert;
- const char *operation;
- const char *left_text;
+ const struct kunit_binary_assert_text *text;
const char *left_value;
- const char *right_text;
const char *right_value;
};
void kunit_binary_str_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
-/**
- * KUNIT_INIT_BINARY_STR_ASSERT_STRUCT() - Initializes a
- * &struct kunit_binary_str_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @op_str: A string representation of the comparison operator (e.g. "==").
- * @left_str: A string representation of the expression in the left slot.
- * @left_val: The actual evaluated value of the expression in the left slot.
- * @right_str: A string representation of the expression in the right slot.
- * @right_val: The actual evaluated value of the expression in the right slot.
- *
- * Initializes a &struct kunit_binary_str_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \
- type, \
- op_str, \
- left_str, \
- left_val, \
- right_str, \
- right_val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_binary_str_assert_format), \
- .operation = op_str, \
- .left_text = left_str, \
- .left_value = left_val, \
- .right_text = right_str, \
- .right_value = right_val \
-}
-
#endif /* _KUNIT_ASSERT_H */
diff --git a/include/kunit/resource.h b/include/kunit/resource.h
new file mode 100644
index 000000000000..cf6fb8f2ac1b
--- /dev/null
+++ b/include/kunit/resource.h
@@ -0,0 +1,390 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit resource API for test managed resources (allocations, etc.).
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#ifndef _KUNIT_RESOURCE_H
+#define _KUNIT_RESOURCE_H
+
+#include <kunit/test.h>
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct kunit_resource;
+
+typedef int (*kunit_resource_init_t)(struct kunit_resource *, void *);
+typedef void (*kunit_resource_free_t)(struct kunit_resource *);
+
+/**
+ * struct kunit_resource - represents a *test managed resource*
+ * @data: for the user to store arbitrary data.
+ * @name: optional name
+ * @free: a user supplied function to free the resource.
+ *
+ * Represents a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. This cleanup is performed by the 'free'
+ * function. The struct kunit_resource itself is freed automatically with
+ * kfree() if it was allocated by KUnit (e.g., by kunit_alloc_resource()), but
+ * must be freed by the user otherwise.
+ *
+ * Resources are reference counted so if a resource is retrieved via
+ * kunit_alloc_and_get_resource() or kunit_find_resource(), we need
+ * to call kunit_put_resource() to reduce the resource reference count
+ * when finished with it. Note that kunit_alloc_resource() does not require a
+ * kunit_resource_put() because it does not retrieve the resource itself.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * struct kunit_kmalloc_params {
+ * size_t size;
+ * gfp_t gfp;
+ * };
+ *
+ * static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
+ * {
+ * struct kunit_kmalloc_params *params = context;
+ * res->data = kmalloc(params->size, params->gfp);
+ *
+ * if (!res->data)
+ * return -ENOMEM;
+ *
+ * return 0;
+ * }
+ *
+ * static void kunit_kmalloc_free(struct kunit_resource *res)
+ * {
+ * kfree(res->data);
+ * }
+ *
+ * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+ * {
+ * struct kunit_kmalloc_params params;
+ *
+ * params.size = size;
+ * params.gfp = gfp;
+ *
+ * return kunit_alloc_resource(test, kunit_kmalloc_init,
+ * kunit_kmalloc_free, &params);
+ * }
+ *
+ * Resources can also be named, with lookup/removal done on a name
+ * basis also. kunit_add_named_resource(), kunit_find_named_resource()
+ * and kunit_destroy_named_resource(). Resource names must be
+ * unique within the test instance.
+ */
+struct kunit_resource {
+ void *data;
+ const char *name;
+ kunit_resource_free_t free;
+
+ /* private: internal use only. */
+ struct kref refcount;
+ struct list_head node;
+ bool should_kfree;
+};
+
+/**
+ * kunit_get_resource() - Hold resource for use. Should not need to be used
+ * by most users as we automatically get resources
+ * retrieved by kunit_find_resource*().
+ * @res: resource
+ */
+static inline void kunit_get_resource(struct kunit_resource *res)
+{
+ kref_get(&res->refcount);
+}
+
+/*
+ * Called when refcount reaches zero via kunit_put_resource();
+ * should not be called directly.
+ */
+static inline void kunit_release_resource(struct kref *kref)
+{
+ struct kunit_resource *res = container_of(kref, struct kunit_resource,
+ refcount);
+
+ if (res->free)
+ res->free(res);
+
+ /* 'res' is valid here, as if should_kfree is set, res->free may not free
+ * 'res' itself, just res->data
+ */
+ if (res->should_kfree)
+ kfree(res);
+}
+
+/**
+ * kunit_put_resource() - When caller is done with retrieved resource,
+ * kunit_put_resource() should be called to drop
+ * reference count. The resource list maintains
+ * a reference count on resources, so if no users
+ * are utilizing a resource and it is removed from
+ * the resource list, it will be freed via the
+ * associated free function (if any). Only
+ * needs to be used if we alloc_and_get() or
+ * find() resource.
+ * @res: resource
+ */
+static inline void kunit_put_resource(struct kunit_resource *res)
+{
+ kref_put(&res->refcount, kunit_release_resource);
+}
+
+/**
+ * __kunit_add_resource() - Internal helper to add a resource.
+ *
+ * res->should_kfree is not initialised.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the result (if needed). If
+ * none is supplied, the resource data value is simply set to @data.
+ * If an init function is supplied, @data is passed to it instead.
+ * @free: a user-supplied function to free the resource (if needed).
+ * @res: The resource.
+ * @data: value to pass to init function or set in resource data field.
+ */
+int __kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data);
+
+/**
+ * kunit_add_resource() - Add a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the result (if needed). If
+ * none is supplied, the resource data value is simply set to @data.
+ * If an init function is supplied, @data is passed to it instead.
+ * @free: a user-supplied function to free the resource (if needed).
+ * @res: The resource.
+ * @data: value to pass to init function or set in resource data field.
+ */
+static inline int kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data)
+{
+ res->should_kfree = false;
+ return __kunit_add_resource(test, init, free, res, data);
+}
+
+static inline struct kunit_resource *
+kunit_find_named_resource(struct kunit *test, const char *name);
+
+/**
+ * kunit_add_named_resource() - Add a named *test managed resource*.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the resource data, if needed.
+ * @free: a user-supplied function to free the resource data, if needed.
+ * @res: The resource.
+ * @name: name to be set for resource.
+ * @data: value to pass to init function or set in resource data field.
+ */
+static inline int kunit_add_named_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ const char *name,
+ void *data)
+{
+ struct kunit_resource *existing;
+
+ if (!name)
+ return -EINVAL;
+
+ existing = kunit_find_named_resource(test, name);
+ if (existing) {
+ kunit_put_resource(existing);
+ return -EEXIST;
+ }
+
+ res->name = name;
+ res->should_kfree = false;
+
+ return __kunit_add_resource(test, init, free, res, data);
+}
+
+/**
+ * kunit_alloc_and_get_resource() - Allocates and returns a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user supplied function to initialize the resource.
+ * @free: a user supplied function to free the resource (if needed).
+ * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
+ * @context: for the user to pass in arbitrary data to the init function.
+ *
+ * Allocates a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. See &struct kunit_resource for an
+ * example.
+ *
+ * This is effectively identical to kunit_alloc_resource, but returns the
+ * struct kunit_resource pointer, not just the 'data' pointer. It therefore
+ * also increments the resource's refcount, so kunit_put_resource() should be
+ * called when you've finished with it.
+ *
+ * Note: KUnit needs to allocate memory for a kunit_resource object. You must
+ * specify an @internal_gfp that is compatible with the use context of your
+ * resource.
+ */
+static inline struct kunit_resource *
+kunit_alloc_and_get_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context)
+{
+ struct kunit_resource *res;
+ int ret;
+
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
+
+ res->should_kfree = true;
+
+ ret = __kunit_add_resource(test, init, free, res, context);
+ if (!ret) {
+ /*
+ * bump refcount for get; kunit_resource_put() should be called
+ * when done.
+ */
+ kunit_get_resource(res);
+ return res;
+ }
+ return NULL;
+}
+
+/**
+ * kunit_alloc_resource() - Allocates a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user supplied function to initialize the resource.
+ * @free: a user supplied function to free the resource (if needed).
+ * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
+ * @context: for the user to pass in arbitrary data to the init function.
+ *
+ * Allocates a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. See &struct kunit_resource for an
+ * example.
+ *
+ * Note: KUnit needs to allocate memory for a kunit_resource object. You must
+ * specify an @internal_gfp that is compatible with the use context of your
+ * resource.
+ */
+static inline void *kunit_alloc_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context)
+{
+ struct kunit_resource *res;
+
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
+
+ res->should_kfree = true;
+ if (!__kunit_add_resource(test, init, free, res, context))
+ return res->data;
+
+ return NULL;
+}
+
+typedef bool (*kunit_resource_match_t)(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_data);
+
+/**
+ * kunit_resource_name_match() - Match a resource with the same name.
+ * @test: Test case to which the resource belongs.
+ * @res: The resource.
+ * @match_name: The name to match against.
+ */
+static inline bool kunit_resource_name_match(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_name)
+{
+ return res->name && strcmp(res->name, match_name) == 0;
+}
+
+/**
+ * kunit_find_resource() - Find a resource using match function/data.
+ * @test: Test case to which the resource belongs.
+ * @match: match function to be applied to resources/match data.
+ * @match_data: data to be used in matching.
+ */
+static inline struct kunit_resource *
+kunit_find_resource(struct kunit *test,
+ kunit_resource_match_t match,
+ void *match_data)
+{
+ struct kunit_resource *res, *found = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&test->lock, flags);
+
+ list_for_each_entry_reverse(res, &test->resources, node) {
+ if (match(test, res, (void *)match_data)) {
+ found = res;
+ kunit_get_resource(found);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ return found;
+}
+
+/**
+ * kunit_find_named_resource() - Find a resource using match name.
+ * @test: Test case to which the resource belongs.
+ * @name: match name.
+ */
+static inline struct kunit_resource *
+kunit_find_named_resource(struct kunit *test,
+ const char *name)
+{
+ return kunit_find_resource(test, kunit_resource_name_match,
+ (void *)name);
+}
+
+/**
+ * kunit_destroy_resource() - Find a kunit_resource and destroy it.
+ * @test: Test case to which the resource belongs.
+ * @match: Match function. Returns whether a given resource matches @match_data.
+ * @match_data: Data passed into @match.
+ *
+ * RETURNS:
+ * 0 if kunit_resource is found and freed, -ENOENT if not found.
+ */
+int kunit_destroy_resource(struct kunit *test,
+ kunit_resource_match_t match,
+ void *match_data);
+
+static inline int kunit_destroy_named_resource(struct kunit *test,
+ const char *name)
+{
+ return kunit_destroy_resource(test, kunit_resource_name_match,
+ (void *)name);
+}
+
+/**
+ * kunit_remove_resource() - remove resource from resource list associated with
+ * test.
+ * @test: The test context object.
+ * @res: The resource to be removed.
+ *
+ * Note that the resource will not be immediately freed since it is likely
+ * the caller has a reference to it via alloc_and_get() or find();
+ * in this case a final call to kunit_put_resource() is required.
+ */
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
+
+#endif /* _KUNIT_RESOURCE_H */
diff --git a/include/kunit/test-bug.h b/include/kunit/test-bug.h
new file mode 100644
index 000000000000..5fc58081d511
--- /dev/null
+++ b/include/kunit/test-bug.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit API allowing dynamic analysis tools to interact with KUnit tests
+ *
+ * Copyright (C) 2020, Google LLC.
+ * Author: Uriel Guajardo <urielguajardo@google.com>
+ */
+
+#ifndef _KUNIT_TEST_BUG_H
+#define _KUNIT_TEST_BUG_H
+
+#define kunit_fail_current_test(fmt, ...) \
+ __kunit_fail_current_test(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
+
+#if IS_BUILTIN(CONFIG_KUNIT)
+
+extern __printf(3, 4) void __kunit_fail_current_test(const char *file, int line,
+ const char *fmt, ...);
+
+#else
+
+static inline __printf(3, 4) void __kunit_fail_current_test(const char *file, int line,
+ const char *fmt, ...)
+{
+}
+
+#endif
+
+#endif /* _KUNIT_TEST_BUG_H */
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 2dfb550c6723..b1ab6b32216d 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -11,81 +11,59 @@
#include <kunit/assert.h>
#include <kunit/try-catch.h>
-#include <linux/kernel.h>
+
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kconfig.h>
+#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/types.h>
-struct kunit_resource;
+#include <asm/rwonce.h>
-typedef int (*kunit_resource_init_t)(struct kunit_resource *, void *);
-typedef void (*kunit_resource_free_t)(struct kunit_resource *);
+struct kunit;
-/**
- * struct kunit_resource - represents a *test managed resource*
- * @allocation: for the user to store arbitrary data.
- * @free: a user supplied function to free the resource. Populated by
- * kunit_alloc_resource().
- *
- * Represents a *test managed resource*, a resource which will automatically be
- * cleaned up at the end of a test case.
- *
- * Example:
- *
- * .. code-block:: c
- *
- * struct kunit_kmalloc_params {
- * size_t size;
- * gfp_t gfp;
- * };
- *
- * static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
- * {
- * struct kunit_kmalloc_params *params = context;
- * res->allocation = kmalloc(params->size, params->gfp);
- *
- * if (!res->allocation)
- * return -ENOMEM;
- *
- * return 0;
- * }
- *
- * static void kunit_kmalloc_free(struct kunit_resource *res)
- * {
- * kfree(res->allocation);
- * }
- *
- * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
- * {
- * struct kunit_kmalloc_params params;
- * struct kunit_resource *res;
- *
- * params.size = size;
- * params.gfp = gfp;
- *
- * res = kunit_alloc_resource(test, kunit_kmalloc_init,
- * kunit_kmalloc_free, &params);
- * if (res)
- * return res->allocation;
- *
- * return NULL;
- * }
+/* Size of log associated with test. */
+#define KUNIT_LOG_SIZE 512
+
+/* Maximum size of parameter description string. */
+#define KUNIT_PARAM_DESC_SIZE 128
+
+/* Maximum size of a status comment. */
+#define KUNIT_STATUS_COMMENT_SIZE 256
+
+/*
+ * TAP specifies subtest stream indentation of 4 spaces, 8 spaces for a
+ * sub-subtest. See the "Subtests" section in
+ * https://node-tap.org/tap-protocol/
*/
-struct kunit_resource {
- void *allocation;
- kunit_resource_free_t free;
+#define KUNIT_SUBTEST_INDENT " "
+#define KUNIT_SUBSUBTEST_INDENT " "
- /* private: internal use only. */
- struct list_head node;
+/**
+ * enum kunit_status - Type of result for a test or test suite
+ * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped
+ * @KUNIT_FAILURE: Denotes the test has failed.
+ * @KUNIT_SKIPPED: Denotes the test has been skipped.
+ */
+enum kunit_status {
+ KUNIT_SUCCESS,
+ KUNIT_FAILURE,
+ KUNIT_SKIPPED,
};
-struct kunit;
-
/**
* struct kunit_case - represents an individual test case.
*
* @run_case: the function representing the actual test case.
* @name: the name of the test case.
+ * @generate_params: the generator function for parameterized tests.
*
* A test case is a function with the signature,
* ``void (*)(struct kunit *)``
@@ -120,11 +98,25 @@ struct kunit;
struct kunit_case {
void (*run_case)(struct kunit *test);
const char *name;
+ const void* (*generate_params)(const void *prev, char *desc);
/* private: internal use only. */
- bool success;
+ enum kunit_status status;
+ char *log;
};
+static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
+{
+ switch (status) {
+ case KUNIT_SKIPPED:
+ case KUNIT_SUCCESS:
+ return "ok";
+ case KUNIT_FAILURE:
+ return "not ok";
+ }
+ return "invalid";
+}
+
/**
* KUNIT_CASE - A helper for creating a &struct kunit_case
*
@@ -137,9 +129,32 @@ struct kunit_case {
#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }
/**
+ * KUNIT_CASE_PARAM - A helper for creation a parameterized &struct kunit_case
+ *
+ * @test_name: a reference to a test case function.
+ * @gen_params: a reference to a parameter generator function.
+ *
+ * The generator function::
+ *
+ * const void* gen_params(const void *prev, char *desc)
+ *
+ * is used to lazily generate a series of arbitrarily typed values that fit into
+ * a void*. The argument @prev is the previously returned value, which should be
+ * used to derive the next value; @prev is set to NULL on the initial generator
+ * call. When no more values are available, the generator must return NULL.
+ * Optionally write a string into @desc (size of KUNIT_PARAM_DESC_SIZE)
+ * describing the parameter.
+ */
+#define KUNIT_CASE_PARAM(test_name, gen_params) \
+ { .run_case = test_name, .name = #test_name, \
+ .generate_params = gen_params }
+
+/**
* struct kunit_suite - describes a related collection of &struct kunit_case
*
* @name: the name of the test. Purely informational.
+ * @suite_init: called once per test suite before the test cases.
+ * @suite_exit: called once per test suite after all test cases.
* @init: called before every test case.
* @exit: called after every test case.
* @test_cases: a null terminated array of test cases.
@@ -154,9 +169,17 @@ struct kunit_case {
*/
struct kunit_suite {
const char name[256];
+ int (*suite_init)(struct kunit_suite *suite);
+ void (*suite_exit)(struct kunit_suite *suite);
int (*init)(struct kunit *test);
void (*exit)(struct kunit *test);
struct kunit_case *test_cases;
+
+ /* private: internal use only */
+ char status_comment[KUNIT_STATUS_COMMENT_SIZE];
+ struct dentry *debugfs;
+ char *log;
+ int suite_init_err;
};
/**
@@ -175,7 +198,12 @@ struct kunit {
/* private: internal use only. */
const char *name; /* Read only after initialization! */
+ char *log; /* Points at case log after initialization */
struct kunit_try_catch try_catch;
+ /* param_value is the current parameter value for a test case. */
+ const void *param_value;
+ /* param_index stores the index of the parameter in parameterized tests. */
+ int param_index;
/*
* success starts as true, and may only be set to false during a
* test case; thus, it is safe to update this across multiple
@@ -183,144 +211,117 @@ struct kunit {
* be read after the test case finishes once all threads associated
* with the test case have terminated.
*/
- bool success; /* Read only after test_case finishes! */
spinlock_t lock; /* Guards all mutable test state. */
+ enum kunit_status status; /* Read only after test_case finishes! */
/*
* Because resources is a list that may be updated multiple times (with
* new resources) from any thread associated with a test case, we must
* protect it with some type of lock.
*/
struct list_head resources; /* Protected by lock. */
+
+ char status_comment[KUNIT_STATUS_COMMENT_SIZE];
};
-void kunit_init_test(struct kunit *test, const char *name);
+static inline void kunit_set_failure(struct kunit *test)
+{
+ WRITE_ONCE(test->status, KUNIT_FAILURE);
+}
+
+bool kunit_enabled(void);
+
+void kunit_init_test(struct kunit *test, const char *name, char *log);
int kunit_run_tests(struct kunit_suite *suite);
+size_t kunit_suite_num_test_cases(struct kunit_suite *suite);
+
+unsigned int kunit_test_case_num(struct kunit_suite *suite,
+ struct kunit_case *test_case);
+
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites);
+
+void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites);
+
+#if IS_BUILTIN(CONFIG_KUNIT)
+int kunit_run_all_tests(void);
+#else
+static inline int kunit_run_all_tests(void)
+{
+ return 0;
+}
+#endif /* IS_BUILTIN(CONFIG_KUNIT) */
+
+#define __kunit_test_suites(unique_array, ...) \
+ static struct kunit_suite *unique_array[] \
+ __aligned(sizeof(struct kunit_suite *)) \
+ __used __section(".kunit_test_suites") = { __VA_ARGS__ }
+
/**
* kunit_test_suites() - used to register one or more &struct kunit_suite
* with KUnit.
*
- * @suites: a statically allocated list of &struct kunit_suite.
+ * @__suites: a statically allocated list of &struct kunit_suite.
*
- * Registers @suites with the test framework. See &struct kunit_suite for
- * more information.
+ * Registers @suites with the test framework.
+ * This is done by placing the array of struct kunit_suite * in the
+ * .kunit_test_suites ELF section.
+ *
+ * When builtin, KUnit tests are all run via the executor at boot, and when
+ * built as a module, they run on module load.
*
- * When builtin, KUnit tests are all run as late_initcalls; this means
- * that they cannot test anything where tests must run at a different init
- * phase. One significant restriction resulting from this is that KUnit
- * cannot reliably test anything that is initialize in the late_init phase;
- * another is that KUnit is useless to test things that need to be run in
- * an earlier init phase.
- *
- * An alternative is to build the tests as a module. Because modules
- * do not support multiple late_initcall()s, we need to initialize an
- * array of suites for a module.
- *
- * TODO(brendanhiggins@google.com): Don't run all KUnit tests as
- * late_initcalls. I have some future work planned to dispatch all KUnit
- * tests from the same place, and at the very least to do so after
- * everything else is definitely initialized.
*/
-#define kunit_test_suites(...) \
- static struct kunit_suite *suites[] = { __VA_ARGS__, NULL}; \
- static int kunit_test_suites_init(void) \
- { \
- unsigned int i; \
- for (i = 0; suites[i] != NULL; i++) \
- kunit_run_tests(suites[i]); \
- return 0; \
- } \
- late_initcall(kunit_test_suites_init); \
- static void __exit kunit_test_suites_exit(void) \
- { \
- return; \
- } \
- module_exit(kunit_test_suites_exit)
+#define kunit_test_suites(__suites...) \
+ __kunit_test_suites(__UNIQUE_ID(array), \
+ ##__suites)
#define kunit_test_suite(suite) kunit_test_suites(&suite)
-/*
- * Like kunit_alloc_resource() below, but returns the struct kunit_resource
- * object that contains the allocation. This is mostly for testing purposes.
- */
-struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *context);
-
/**
- * kunit_alloc_resource() - Allocates a *test managed resource*.
- * @test: The test context object.
- * @init: a user supplied function to initialize the resource.
- * @free: a user supplied function to free the resource.
- * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
- * @context: for the user to pass in arbitrary data to the init function.
- *
- * Allocates a *test managed resource*, a resource which will automatically be
- * cleaned up at the end of a test case. See &struct kunit_resource for an
- * example.
- *
- * NOTE: KUnit needs to allocate memory for each kunit_resource object. You must
- * specify an @internal_gfp that is compatible with the use context of your
- * resource.
+ * kunit_test_init_section_suites() - used to register one or more &struct
+ * kunit_suite containing init functions or
+ * init data.
+ *
+ * @__suites: a statically allocated list of &struct kunit_suite.
+ *
+ * This functions identically as kunit_test_suites() except that it suppresses
+ * modpost warnings for referencing functions marked __init or data marked
+ * __initdata; this is OK because currently KUnit only runs tests upon boot
+ * during the init phase or upon loading a module during the init phase.
+ *
+ * NOTE TO KUNIT DEVS: If we ever allow KUnit tests to be run after boot, these
+ * tests must be excluded.
+ *
+ * The only thing this macro does that's different from kunit_test_suites is
+ * that it suffixes the array and suite declarations it makes with _probe;
+ * modpost suppresses warnings about referencing init data for symbols named in
+ * this manner.
*/
-static inline void *kunit_alloc_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *context)
-{
- struct kunit_resource *res;
+#define kunit_test_init_section_suites(__suites...) \
+ __kunit_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
+ CONCATENATE(__UNIQUE_ID(suites), _probe), \
+ ##__suites)
- res = kunit_alloc_and_get_resource(test, init, free, internal_gfp,
- context);
+#define kunit_test_init_section_suite(suite) \
+ kunit_test_init_section_suites(&suite)
- if (res)
- return res->allocation;
+#define kunit_suite_for_each_test_case(suite, test_case) \
+ for (test_case = suite->test_cases; test_case->run_case; test_case++)
- return NULL;
-}
-
-typedef bool (*kunit_resource_match_t)(struct kunit *test,
- const void *res,
- void *match_data);
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
/**
- * kunit_resource_instance_match() - Match a resource with the same instance.
- * @test: Test case to which the resource belongs.
- * @res: The data stored in kunit_resource->allocation.
- * @match_data: The resource pointer to match against.
+ * kunit_kmalloc_array() - Like kmalloc_array() except the allocation is *test managed*.
+ * @test: The test context object.
+ * @n: number of elements.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
*
- * An instance of kunit_resource_match_t that matches a resource whose
- * allocation matches @match_data.
- */
-static inline bool kunit_resource_instance_match(struct kunit *test,
- const void *res,
- void *match_data)
-{
- return res == match_data;
-}
-
-/**
- * kunit_resource_destroy() - Find a kunit_resource and destroy it.
- * @test: Test case to which the resource belongs.
- * @match: Match function. Returns whether a given resource matches @match_data.
- * @free: Must match free on the kunit_resource to free.
- * @match_data: Data passed into @match.
- *
- * Free the latest kunit_resource of @test for which @free matches the
- * kunit_resource_free_t associated with the resource and for which @match
- * returns true.
- *
- * RETURNS:
- * 0 if kunit_resource is found and freed, -ENOENT if not found.
+ * Just like `kmalloc_array(...)`, except the allocation is managed by the test case
+ * and is automatically cleaned up after the test case concludes. See &struct
+ * kunit_resource for more information.
*/
-int kunit_resource_destroy(struct kunit *test,
- kunit_resource_match_t match,
- kunit_resource_free_t free,
- void *match_data);
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
/**
* kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
@@ -328,11 +329,12 @@ int kunit_resource_destroy(struct kunit *test,
* @size: The size in bytes of the desired memory.
* @gfp: flags passed to underlying kmalloc().
*
- * Just like `kmalloc(...)`, except the allocation is managed by the test case
- * and is automatically cleaned up after the test case concludes. See &struct
- * kunit_resource for more information.
+ * See kmalloc() and kunit_kmalloc_array() for more information.
*/
-void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp);
+static inline void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+{
+ return kunit_kmalloc_array(test, 1, size, gfp);
+}
/**
* kunit_kfree() - Like kfree except for allocations managed by KUnit.
@@ -347,17 +349,81 @@ void kunit_kfree(struct kunit *test, const void *ptr);
* @size: The size in bytes of the desired memory.
* @gfp: flags passed to underlying kmalloc().
*
- * See kzalloc() and kunit_kmalloc() for more information.
+ * See kzalloc() and kunit_kmalloc_array() for more information.
*/
static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
{
return kunit_kmalloc(test, size, gfp | __GFP_ZERO);
}
+/**
+ * kunit_kcalloc() - Just like kunit_kmalloc_array(), but zeroes the allocation.
+ * @test: The test context object.
+ * @n: number of elements.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kcalloc() and kunit_kmalloc_array() for more information.
+ */
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
+{
+ return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
+}
+
void kunit_cleanup(struct kunit *test);
-#define kunit_printk(lvl, test, fmt, ...) \
- printk(lvl "\t# %s: " fmt, (test)->name, ##__VA_ARGS__)
+void __printf(2, 3) kunit_log_append(char *log, const char *fmt, ...);
+
+/**
+ * kunit_mark_skipped() - Marks @test_or_suite as skipped
+ *
+ * @test_or_suite: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Marks the test as skipped. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
+ *
+ * Test execution continues after kunit_mark_skipped() is called.
+ */
+#define kunit_mark_skipped(test_or_suite, fmt, ...) \
+ do { \
+ WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \
+ scnprintf((test_or_suite)->status_comment, \
+ KUNIT_STATUS_COMMENT_SIZE, \
+ fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * kunit_skip() - Marks @test_or_suite as skipped
+ *
+ * @test_or_suite: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Skips the test. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
+ *
+ * Test execution is halted after kunit_skip() is called.
+ */
+#define kunit_skip(test_or_suite, fmt, ...) \
+ do { \
+ kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\
+ kunit_try_catch_throw(&((test_or_suite)->try_catch)); \
+ } while (0)
+
+/*
+ * printk and log to per-test or per-suite log buffer. Logging only done
+ * if CONFIG_KUNIT_DEBUGFS is 'y'; if it is 'n', no log is allocated/used.
+ */
+#define kunit_log(lvl, test_or_suite, fmt, ...) \
+ do { \
+ printk(lvl fmt, ##__VA_ARGS__); \
+ kunit_log_append((test_or_suite)->log, fmt "\n", \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define kunit_printk(lvl, test, fmt, ...) \
+ kunit_log(lvl, test, KUNIT_SUBTEST_INDENT "# %s: " fmt, \
+ (test)->name, ##__VA_ARGS__)
/**
* kunit_info() - Prints an INFO level message associated with @test.
@@ -403,28 +469,34 @@ void kunit_cleanup(struct kunit *test);
*/
#define KUNIT_SUCCEED(test) do {} while (0)
-void kunit_do_assertion(struct kunit *test,
- struct kunit_assert *assert,
- bool pass,
- const char *fmt, ...);
-
-#define KUNIT_ASSERTION(test, pass, assert_class, INITIALIZER, fmt, ...) do { \
- struct assert_class __assertion = INITIALIZER; \
- kunit_do_assertion(test, \
- &__assertion.assert, \
- pass, \
- fmt, \
- ##__VA_ARGS__); \
+void kunit_do_failed_assertion(struct kunit *test,
+ const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ const struct kunit_assert *assert,
+ assert_format_t assert_format,
+ const char *fmt, ...);
+
+#define _KUNIT_FAILED(test, assert_type, assert_class, assert_format, INITIALIZER, fmt, ...) do { \
+ static const struct kunit_loc __loc = KUNIT_CURRENT_LOC; \
+ const struct assert_class __assertion = INITIALIZER; \
+ kunit_do_failed_assertion(test, \
+ &__loc, \
+ assert_type, \
+ &__assertion.assert, \
+ assert_format, \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
#define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) \
- KUNIT_ASSERTION(test, \
- false, \
- kunit_fail_assert, \
- KUNIT_INIT_FAIL_ASSERT_STRUCT(test, assert_type), \
- fmt, \
- ##__VA_ARGS__)
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_fail_assert, \
+ kunit_fail_assert_format, \
+ {}, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_FAIL() - Always causes a test to fail when evaluated.
@@ -449,15 +521,19 @@ void kunit_do_assertion(struct kunit *test,
expected_true, \
fmt, \
...) \
- KUNIT_ASSERTION(test, \
- !!(condition) == !!expected_true, \
- kunit_unary_assert, \
- KUNIT_INIT_UNARY_ASSERT_STRUCT(test, \
- assert_type, \
- #condition, \
- expected_true), \
- fmt, \
- ##__VA_ARGS__)
+do { \
+ if (likely(!!(condition) == !!expected_true)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_unary_assert, \
+ kunit_unary_assert_format, \
+ KUNIT_INIT_UNARY_ASSERT_STRUCT(#condition, \
+ expected_true), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
#define KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
KUNIT_UNARY_ASSERTION(test, \
@@ -467,9 +543,6 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
##__VA_ARGS__)
-#define KUNIT_TRUE_ASSERTION(test, assert_type, condition) \
- KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, NULL)
-
#define KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
KUNIT_UNARY_ASSERTION(test, \
assert_type, \
@@ -478,9 +551,6 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
##__VA_ARGS__)
-#define KUNIT_FALSE_ASSERTION(test, assert_type, condition) \
- KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, NULL)
-
/*
* A factory macro for defining the assertions and expectations for the basic
* comparisons defined for the built in types.
@@ -497,7 +567,7 @@ void kunit_do_assertion(struct kunit *test,
*/
#define KUNIT_BASE_BINARY_ASSERTION(test, \
assert_class, \
- ASSERT_CLASS_INIT, \
+ format_func, \
assert_type, \
left, \
op, \
@@ -505,354 +575,58 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
...) \
do { \
- typeof(left) __left = (left); \
- typeof(right) __right = (right); \
- ((void)__typecheck(__left, __right)); \
+ const typeof(left) __left = (left); \
+ const typeof(right) __right = (right); \
+ static const struct kunit_binary_assert_text __text = { \
+ .operation = #op, \
+ .left_text = #left, \
+ .right_text = #right, \
+ }; \
\
- KUNIT_ASSERTION(test, \
- __left op __right, \
- assert_class, \
- ASSERT_CLASS_INIT(test, \
- assert_type, \
- #op, \
- #left, \
- __left, \
- #right, \
- __right), \
- fmt, \
- ##__VA_ARGS__); \
+ if (likely(__left op __right)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ assert_class, \
+ format_func, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT(&__text, \
+ __left, \
+ __right), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define KUNIT_BASE_EQ_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, ==, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_NE_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, !=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_LT_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, <, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_LE_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, <=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_GT_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
+#define KUNIT_BINARY_INT_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
...) \
KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
+ kunit_binary_assert, \
+ kunit_binary_assert_format, \
assert_type, \
- left, >, right, \
+ left, op, right, \
fmt, \
##__VA_ARGS__)
-#define KUNIT_BASE_GE_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
+#define KUNIT_BINARY_PTR_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
...) \
KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, >=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_EQ_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_EQ_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_EQ_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_EQ_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_EQ_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_NE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_NE_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_NE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_NE_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_NE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_LT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_LT_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_LT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_LT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_LT_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_LT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_LE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_LE_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_LE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_LE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_LE_MSG_ASSERTION(test, \
kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ kunit_binary_ptr_assert_format, \
assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_LE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_GT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_GT_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_GT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_GT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_GT_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_GT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_GE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_GE_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
+ left, op, right, \
fmt, \
##__VA_ARGS__)
-#define KUNIT_BINARY_GE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_GE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_GE_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_GE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
#define KUNIT_BINARY_STR_ASSERTION(test, \
assert_type, \
left, \
@@ -861,86 +635,49 @@ do { \
fmt, \
...) \
do { \
- typeof(left) __left = (left); \
- typeof(right) __right = (right); \
+ const char *__left = (left); \
+ const char *__right = (right); \
+ static const struct kunit_binary_assert_text __text = { \
+ .operation = #op, \
+ .left_text = #left, \
+ .right_text = #right, \
+ }; \
+ \
+ if (likely(strcmp(__left, __right) op 0)) \
+ break; \
\
- KUNIT_ASSERTION(test, \
- strcmp(__left, __right) op 0, \
- kunit_binary_str_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
- assert_type, \
- #op, \
- #left, \
- __left, \
- #right, \
- __right), \
- fmt, \
- ##__VA_ARGS__); \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_binary_str_assert, \
+ kunit_binary_str_assert_format, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT(&__text, \
+ __left, \
+ __right), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BINARY_STR_ASSERTION(test, \
- assert_type, \
- left, ==, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_STR_EQ_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BINARY_STR_ASSERTION(test, \
- assert_type, \
- left, !=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_STR_NE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
#define KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
assert_type, \
ptr, \
fmt, \
...) \
do { \
- typeof(ptr) __ptr = (ptr); \
+ const typeof(ptr) __ptr = (ptr); \
+ \
+ if (!IS_ERR_OR_NULL(__ptr)) \
+ break; \
\
- KUNIT_ASSERTION(test, \
- !IS_ERR_OR_NULL(__ptr), \
- kunit_ptr_not_err_assert, \
- KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, \
- assert_type, \
- #ptr, \
- __ptr), \
- fmt, \
- ##__VA_ARGS__); \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_ptr_not_err_assert, \
+ kunit_ptr_not_err_assert_format, \
+ KUNIT_INIT_PTR_NOT_ERR_STRUCT(#ptr, __ptr), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, assert_type, ptr) \
- KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
- assert_type, \
- ptr, \
- NULL)
-
/**
* KUNIT_EXPECT_TRUE() - Causes a test failure when the expression is not true.
* @test: The test context object.
@@ -953,7 +690,7 @@ do { \
* *expectation failure*.
*/
#define KUNIT_EXPECT_TRUE(test, condition) \
- KUNIT_TRUE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+ KUNIT_EXPECT_TRUE_MSG(test, condition, NULL)
#define KUNIT_EXPECT_TRUE_MSG(test, condition, fmt, ...) \
KUNIT_TRUE_MSG_ASSERTION(test, \
@@ -972,7 +709,7 @@ do { \
* KUNIT_EXPECT_TRUE() for more information.
*/
#define KUNIT_EXPECT_FALSE(test, condition) \
- KUNIT_FALSE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+ KUNIT_EXPECT_FALSE_MSG(test, condition, NULL)
#define KUNIT_EXPECT_FALSE_MSG(test, condition, fmt, ...) \
KUNIT_FALSE_MSG_ASSERTION(test, \
@@ -993,15 +730,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_EQ(test, left, right) \
- KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_EQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_PTR_EQ() - Expects that pointers @left and @right are equal.
@@ -1015,18 +751,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_PTR_EQ(test, left, right) \
- KUNIT_BINARY_PTR_EQ_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right)
+ KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_NE() - An expectation that @left and @right are not equal.
@@ -1040,15 +772,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_NE(test, left, right) \
- KUNIT_BINARY_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_NE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_NE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_PTR_NE() - Expects that pointers @left and @right are not equal.
@@ -1062,18 +793,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_PTR_NE(test, left, right) \
- KUNIT_BINARY_PTR_NE_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right)
+ KUNIT_EXPECT_PTR_NE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_PTR_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_LT() - An expectation that @left is less than @right.
@@ -1087,15 +814,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_LT(test, left, right) \
- KUNIT_BINARY_LT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_LT_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_LT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LT_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, <, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_LE() - Expects that @left is less than or equal to @right.
@@ -1109,15 +835,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_LE(test, left, right) \
- KUNIT_BINARY_LE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_LE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, <=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_GT() - An expectation that @left is greater than @right.
@@ -1131,15 +856,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_GT(test, left, right) \
- KUNIT_BINARY_GT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_GT_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_GT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GT_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, >, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_GE() - Expects that @left is greater than or equal to @right.
@@ -1153,15 +877,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_GE(test, left, right) \
- KUNIT_BINARY_GE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_GE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_GE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, >=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_STREQ() - Expects that strings @left and @right are equal.
@@ -1175,15 +898,14 @@ do { \
* for more information.
*/
#define KUNIT_EXPECT_STREQ(test, left, right) \
- KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_STREQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_STREQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_STRNEQ() - Expects that strings @left and @right are not equal.
@@ -1197,15 +919,56 @@ do { \
* for more information.
*/
#define KUNIT_EXPECT_STRNEQ(test, left, right) \
- KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_STRNEQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_STRNEQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NULL() - Expects that @ptr is null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an expectation that the value that @ptr evaluates to is null. This is
+ * semantically equivalent to KUNIT_EXPECT_PTR_EQ(@test, ptr, NULL).
+ * See KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_EXPECT_NULL(test, ptr) \
+ KUNIT_EXPECT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_EXPECT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ ptr, ==, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NOT_NULL() - Expects that @ptr is not null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an expectation that the value that @ptr evaluates to is not null. This
+ * is semantically equivalent to KUNIT_EXPECT_PTR_NE(@test, ptr, NULL).
+ * See KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_EXPECT_NOT_NULL(test, ptr) \
+ KUNIT_EXPECT_NOT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_EXPECT_NOT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ ptr, !=, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_NOT_ERR_OR_NULL() - Expects that @ptr is not null and not err.
@@ -1218,7 +981,7 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr) \
- KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_EXPECTATION, ptr)
+ KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, NULL)
#define KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
@@ -1242,7 +1005,7 @@ do { \
* this is otherwise known as an *assertion failure*.
*/
#define KUNIT_ASSERT_TRUE(test, condition) \
- KUNIT_TRUE_ASSERTION(test, KUNIT_ASSERTION, condition)
+ KUNIT_ASSERT_TRUE_MSG(test, condition, NULL)
#define KUNIT_ASSERT_TRUE_MSG(test, condition, fmt, ...) \
KUNIT_TRUE_MSG_ASSERTION(test, \
@@ -1261,7 +1024,7 @@ do { \
* (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_FALSE(test, condition) \
- KUNIT_FALSE_ASSERTION(test, KUNIT_ASSERTION, condition)
+ KUNIT_ASSERT_FALSE_MSG(test, condition, NULL)
#define KUNIT_ASSERT_FALSE_MSG(test, condition, fmt, ...) \
KUNIT_FALSE_MSG_ASSERTION(test, \
@@ -1281,15 +1044,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_EQ(test, left, right) \
- KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_EQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_PTR_EQ() - Asserts that pointers @left and @right are equal.
@@ -1302,15 +1064,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_PTR_EQ(test, left, right) \
- KUNIT_BINARY_PTR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_NE() - An assertion that @left and @right are not equal.
@@ -1323,15 +1084,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_NE(test, left, right) \
- KUNIT_BINARY_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_NE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_NE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_PTR_NE() - Asserts that pointers @left and @right are not equal.
@@ -1345,15 +1105,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_PTR_NE(test, left, right) \
- KUNIT_BINARY_PTR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_PTR_NE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_PTR_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_LT() - An assertion that @left is less than @right.
* @test: The test context object.
@@ -1366,15 +1125,14 @@ do { \
* is not met.
*/
#define KUNIT_ASSERT_LT(test, left, right) \
- KUNIT_BINARY_LT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_LT_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LT_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, <, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_LE() - An assertion that @left is less than or equal to @right.
* @test: The test context object.
@@ -1387,15 +1145,14 @@ do { \
* KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_LE(test, left, right) \
- KUNIT_BINARY_LE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_LE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_LE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, <=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_GT() - An assertion that @left is greater than @right.
@@ -1409,15 +1166,14 @@ do { \
* is not met.
*/
#define KUNIT_ASSERT_GT(test, left, right) \
- KUNIT_BINARY_GT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_GT_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GT_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, >, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_GE() - Assertion that @left is greater than or equal to @right.
@@ -1431,15 +1187,14 @@ do { \
* is not met.
*/
#define KUNIT_ASSERT_GE(test, left, right) \
- KUNIT_BINARY_GE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_GE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_GE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, >=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_STREQ() - An assertion that strings @left and @right are equal.
@@ -1452,15 +1207,14 @@ do { \
* assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_STREQ(test, left, right) \
- KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_STREQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_STREQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_STRNEQ() - Expects that strings @left and @right are not equal.
@@ -1474,15 +1228,56 @@ do { \
* for more information.
*/
#define KUNIT_ASSERT_STRNEQ(test, left, right) \
- KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_STRNEQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_STRNEQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NULL() - Asserts that pointers @ptr is null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an assertion that the values that @ptr evaluates to is null. This is
+ * the same as KUNIT_EXPECT_NULL(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NULL(test, ptr) \
+ KUNIT_ASSERT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_ASSERT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ ptr, ==, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NOT_NULL() - Asserts that pointers @ptr is not null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an assertion that the values that @ptr evaluates to is not null. This
+ * is the same as KUNIT_EXPECT_NOT_NULL(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NOT_NULL(test, ptr) \
+ KUNIT_ASSERT_NOT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_ASSERT_NOT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ ptr, !=, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_NOT_ERR_OR_NULL() - Assertion that @ptr is not null and not err.
@@ -1495,7 +1290,7 @@ do { \
* KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr) \
- KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_ASSERTION, ptr)
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, NULL)
#define KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
@@ -1504,4 +1299,29 @@ do { \
fmt, \
##__VA_ARGS__)
+/**
+ * KUNIT_ARRAY_PARAM() - Define test parameter generator from an array.
+ * @name: prefix for the test parameter generator function.
+ * @array: array of test parameters.
+ * @get_desc: function to convert param to description; NULL to use default
+ *
+ * Define function @name_gen_params which uses @array to generate parameters.
+ */
+#define KUNIT_ARRAY_PARAM(name, array, get_desc) \
+ static const void *name##_gen_params(const void *prev, char *desc) \
+ { \
+ typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (__next - (array) < ARRAY_SIZE((array))) { \
+ void (*__get_desc)(typeof(__next), char *) = get_desc; \
+ if (__get_desc) \
+ __get_desc(__next, desc); \
+ return __next; \
+ } \
+ return NULL; \
+ }
+
+// TODO(dlatypov@google.com): consider eventually migrating users to explicitly
+// include resource.h themselves if they need it.
+#include <kunit/resource.h>
+
#endif /* _KUNIT_TEST_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index d120e6c323e7..cd6d8f260eab 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -26,16 +26,9 @@ enum kvm_arch_timer_regs {
struct arch_timer_context {
struct kvm_vcpu *vcpu;
- /* Registers: control register, timer value */
- u32 cnt_ctl;
- u64 cnt_cval;
-
/* Timer IRQ */
struct kvm_irq_level irq;
- /* Virtual offset */
- u64 cntvoff;
-
/* Emulated Timer (may be unused) */
struct hrtimer hrtimer;
@@ -71,7 +64,7 @@ int kvm_timer_hyp_init(bool);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
@@ -83,8 +76,6 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
-bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
-
u64 kvm_phys_timer_read(void);
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
@@ -109,4 +100,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timer_regs treg,
u64 val);
+/* Needed for tracing */
+u32 timer_get_ctl(struct arch_timer_context *ctxt);
+u64 timer_get_cval(struct arch_timer_context *ctxt);
+
#endif
diff --git a/include/kvm/arm_hypercalls.h b/include/kvm/arm_hypercalls.h
index 0e2509d27910..1188f116cf4e 100644
--- a/include/kvm/arm_hypercalls.h
+++ b/include/kvm/arm_hypercalls.h
@@ -40,4 +40,12 @@ static inline void smccc_set_retval(struct kvm_vcpu *vcpu,
vcpu_set_reg(vcpu, 3, a3);
}
+struct kvm_one_reg;
+
+void kvm_arm_init_hypercalls(struct kvm *kvm);
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 6db030439e29..c0b868ce6a8f 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -13,27 +13,45 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
-#ifdef CONFIG_KVM_ARM_PMU
+#ifdef CONFIG_HW_PERF_EVENTS
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event;
};
+struct kvm_pmu_events {
+ u32 events_host;
+ u32 events_guest;
+};
+
struct kvm_pmu {
- int irq_num;
+ struct irq_work overflow_work;
+ struct kvm_pmu_events events;
struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
- bool ready;
+ int irq_num;
bool created;
bool irq_level;
};
-#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
+struct arm_pmu_entry {
+ struct list_head entry;
+ struct arm_pmu *arm_pmu;
+};
+
+DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+
+static __always_inline bool kvm_arm_support_pmu_v3(void)
+{
+ return static_branch_likely(&kvm_arm_pmu_available);
+}
+
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -47,7 +65,6 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx);
-bool kvm_arm_support_pmu_v3(void);
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
@@ -55,11 +72,34 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
+
+struct kvm_pmu_events *kvm_get_pmu_events(void);
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+
+#define kvm_vcpu_has_pmu(vcpu) \
+ (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
+
+/*
+ * Updates the vcpu's view of the pmu events for this cpu.
+ * Must be called before every vcpu run after disabling interrupts, to ensure
+ * that an interrupt cannot fire and update the structure.
+ */
+#define kvm_pmu_update_vcpu_events(vcpu) \
+ do { \
+ if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
+ vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
+ } while (0)
+
#else
struct kvm_pmu {
};
-#define kvm_arm_pmu_v3_ready(v) (false)
+static inline bool kvm_arm_support_pmu_v3(void)
+{
+ return false;
+}
+
#define kvm_arm_pmu_irq_initialized(v) (false)
static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx)
@@ -88,7 +128,6 @@ static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
u64 data, u64 select_idx) {}
-static inline bool kvm_arm_support_pmu_v3(void) { return false; }
static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -108,6 +147,16 @@ static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
return 0;
}
+static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
+{
+ return 0;
+}
+
+#define kvm_vcpu_has_pmu(vcpu) ({ false; })
+static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+
#endif
#endif
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index 5b58bd2fe088..6e55b9283789 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -13,14 +13,11 @@
#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
+#define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1)
-#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
+#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_1
-/*
- * We need the KVM pointer independently from the vcpu as we can call
- * this from HYP, and need to apply kern_hyp_va on it...
- */
-static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
+static inline int kvm_psci_version(struct kvm_vcpu *vcpu)
{
/*
* Our PSCI implementation stays the same across versions from
@@ -42,11 +39,4 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
int kvm_psci_call(struct kvm_vcpu *vcpu);
-struct kvm_one_reg;
-
-int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-
#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 9d53f545a3d5..4df9e73a8bb5 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -5,9 +5,11 @@
#ifndef __KVM_ARM_VGIC_H
#define __KVM_ARM_VGIC_H
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/static_key.h>
#include <linux/types.h>
@@ -70,6 +72,10 @@ struct vgic_global {
/* Hardware has GICv4? */
bool has_gicv4;
+ bool has_gicv4_1;
+
+ /* Pseudo GICv3 from outer space */
+ bool no_hw_deactivation;
/* GIC system register CPU interface */
struct static_key_false gicv3_cpuif;
@@ -88,6 +94,26 @@ enum vgic_irq_config {
VGIC_CONFIG_LEVEL
};
+/*
+ * Per-irq ops overriding some common behavious.
+ *
+ * Always called in non-preemptible section and the functions can use
+ * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs.
+ */
+struct irq_ops {
+ /* Per interrupt flags for special-cased interrupts */
+ unsigned long flags;
+
+#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */
+
+ /*
+ * Callback function pointer to in-kernel devices that can tell us the
+ * state of the input level of mapped level-triggered IRQ faster than
+ * peaking into the physical GIC.
+ */
+ bool (*get_input_level)(int vintid);
+};
+
struct vgic_irq {
raw_spinlock_t irq_lock; /* Protects the content of the struct */
struct list_head lpi_list; /* Used to link all LPIs together */
@@ -125,21 +151,17 @@ struct vgic_irq {
u8 group; /* 0 == group 0, 1 == group 1 */
enum vgic_irq_config config; /* Level or edge */
- /*
- * Callback function pointer to in-kernel devices that can tell us the
- * state of the input level of mapped level-triggered IRQ faster than
- * peaking into the physical GIC.
- *
- * Always called in non-preemptible section and the functions can use
- * kvm_arm_get_running_vcpu() to get the vcpu pointer for private
- * IRQs.
- */
- bool (*get_input_level)(int vintid);
+ struct irq_ops *ops;
void *owner; /* Opaque pointer to reserve an interrupt
for in-kernel devices. */
};
+static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq)
+{
+ return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE);
+}
+
struct vgic_register_region;
struct vgic_its;
@@ -209,6 +231,9 @@ struct vgic_dist {
/* Implementation revision as reported in the GICD_IIDR */
u32 implementation_rev;
+#define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */
+#define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */
+#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3
/* Userspace can write to GICv2 IGROUPR */
bool v2_groups_user_writable;
@@ -230,6 +255,9 @@ struct vgic_dist {
/* distributor enabled */
bool enabled;
+ /* Wants SGIs without active state */
+ bool nassgireq;
+
struct vgic_irq *spis;
struct vgic_io_device dist_iodev;
@@ -270,6 +298,8 @@ struct vgic_v2_cpu_if {
u32 vgic_vmcr;
u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS];
+
+ unsigned int used_lrs;
};
struct vgic_v3_cpu_if {
@@ -287,6 +317,8 @@ struct vgic_v3_cpu_if {
* linking the Linux IRQ subsystem and the ITS together.
*/
struct its_vpe its_vpe;
+
+ unsigned int used_lrs;
};
struct vgic_cpu {
@@ -296,7 +328,6 @@ struct vgic_cpu {
struct vgic_v3_cpu_if vgic_v3;
};
- unsigned int used_lrs;
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
raw_spinlock_t ap_list_lock; /* Protects the ap_list */
@@ -315,11 +346,13 @@ struct vgic_cpu {
*/
struct vgic_io_device rd_iodev;
struct vgic_redist_region *rdreg;
+ u32 rdreg_index;
+ atomic_t syncr_busy;
/* Contains the attributes and gpa of the LPI pending tables. */
u64 pendbaser;
-
- bool lpis_enabled;
+ /* GICR_CTLR.{ENABLE_LPIS,RWP} */
+ atomic_t ctlr;
/* Cache guest priority bits */
u32 num_pri_bits;
@@ -331,7 +364,7 @@ struct vgic_cpu {
extern struct static_key_false vgic_v2_cpuif_trap;
extern struct static_key_false vgic_v3_cpuif_trap;
-int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
+int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
int kvm_vgic_create(struct kvm *kvm, u32 type);
@@ -344,7 +377,7 @@ void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid, bool (*get_input_level)(int vindid));
+ u32 vintid, struct irq_ops *ops);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
@@ -395,6 +428,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
int vgic_v4_load(struct kvm_vcpu *vcpu);
+void vgic_v4_commit(struct kvm_vcpu *vcpu);
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
#endif /* __KVM_ARM_VGIC_H */
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
deleted file mode 100644
index 600cf45645c6..000000000000
--- a/include/linux/a.out.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __A_OUT_GNU_H__
-#define __A_OUT_GNU_H__
-
-#include <uapi/linux/a.out.h>
-
-#ifndef __ASSEMBLY__
-#ifdef linux
-#include <asm/page.h>
-#if defined(__i386__) || defined(__mc68000__)
-#else
-#ifndef SEGMENT_SIZE
-#define SEGMENT_SIZE PAGE_SIZE
-#endif
-#endif
-#endif
-#endif /*__ASSEMBLY__ */
-#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acct.h b/include/linux/acct.h
index bc70e81895c0..2718c4854815 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -21,7 +21,6 @@
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pid_namespace;
-extern int acct_parm[]; /* for sysctl */
extern void acct_collect(long exitcode, int group_dead);
extern void acct_process(void);
extern void acct_exit_ns(struct pid_namespace *);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0f24d701fbdc..3015235d65e3 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -55,7 +55,7 @@ static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
if (!fwnode)
return NULL;
- fwnode->ops = &acpi_static_fwnode_ops;
+ fwnode_init(fwnode, &acpi_static_fwnode_ops);
return fwnode;
}
@@ -105,6 +105,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
+ ACPI_IRQ_MODEL_LPIC,
ACPI_IRQ_MODEL_COUNT
};
@@ -132,6 +133,8 @@ enum acpi_address_range_id {
union acpi_subtable_headers {
struct acpi_subtable_header common;
struct acpi_hmat_structure hmat;
+ struct acpi_prmt_module_header prmt;
+ struct acpi_cedt_header cedt;
};
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
@@ -139,6 +142,9 @@ typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
const unsigned long end);
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+ void *arg, const unsigned long end);
+
/* Debugger support */
struct acpi_debugger_ops {
@@ -215,6 +221,8 @@ static inline int acpi_debugger_notify_command_complete(void)
struct acpi_subtable_proc {
int id;
acpi_tbl_entry_handler handler;
+ acpi_tbl_entry_handler_arg handler_arg;
+ void *arg;
int count;
};
@@ -222,29 +230,47 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
int acpi_table_init (void);
+
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
-int __init acpi_table_parse_entries(char *id, unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries);
-int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries(char *id,
+ unsigned long table_size, int entry_id,
+ acpi_tbl_entry_handler handler, unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries_array(char *id,
+ unsigned long table_size, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
int acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
+int __init_or_acpilib
+acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg);
+
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
/* the following numa functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
static inline void
@@ -253,6 +279,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { }
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
+#if defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+void acpi_arch_dma_setup(struct device *dev);
+#else
+static inline void acpi_arch_dma_setup(struct device *dev) { }
+#endif
+
#ifdef CONFIG_ARM64
void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa);
#else
@@ -328,7 +360,8 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
void acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *fwnode);
+ struct fwnode_handle *(*)(u32));
+void acpi_set_gsi_to_irq_fallback(u32 (*)(u32));
struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
unsigned int size,
@@ -416,11 +449,31 @@ extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
#ifdef CONFIG_ACPI_NUMA
-int acpi_map_pxm_to_online_node(int pxm);
int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
+
+/**
+ * pxm_to_online_node - Map proximity ID to online node
+ * @pxm: ACPI proximity ID
+ *
+ * This is similar to pxm_to_node(), but always returns an online
+ * node. When the mapped node from a given proximity ID is offline, it
+ * looks up the node distance table and returns the nearest online node.
+ *
+ * ACPI device drivers, which are called after the NUMA initialization has
+ * completed in the kernel, can call this interface to obtain their device
+ * NUMA topology from ACPI tables. Such drivers do not have to deal with
+ * offline nodes. A node may be offline when SRAT memory entry does not exist,
+ * or NUMA is disabled, ex. "numa=off" on x86.
+ */
+static inline int pxm_to_online_node(int pxm)
+{
+ int node = pxm_to_node(pxm);
+
+ return numa_map_to_online_node(node);
+}
#else
-static inline int acpi_map_pxm_to_online_node(int pxm)
+static inline int pxm_to_online_node(int pxm)
{
return 0;
}
@@ -445,7 +498,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares,
struct resource_win *win);
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
struct resource_win *win);
-unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable);
unsigned int acpi_dev_get_irq_type(int triggering, int polarity);
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res);
@@ -456,6 +509,7 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
void *preproc_data);
int acpi_dev_get_dma_resources(struct acpi_device *adev,
struct list_head *list);
+int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list);
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
unsigned long types);
@@ -472,13 +526,10 @@ int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
-acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
- u32 level);
-
int acpi_resources_are_enforced(void);
#ifdef CONFIG_HIBERNATION
-void __init acpi_no_s4_hw_signature(void);
+extern int acpi_check_s4_hw_signature;
#endif
#ifdef CONFIG_PM_SLEEP
@@ -488,6 +539,11 @@ void __init acpi_nvs_nosave_s3(void);
void __init acpi_sleep_no_blacklist(void);
#endif /* CONFIG_PM_SLEEP */
+int acpi_register_wakeup_handler(
+ int wake_irq, bool (*wakeup)(void *context), void *context);
+void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context);
+
struct acpi_osc_context {
char *uuid_str; /* UUID string */
int rev;
@@ -497,10 +553,16 @@ struct acpi_osc_context {
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
-/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+/* Number of _OSC capability DWORDS depends on bridge type */
+#define OSC_PCI_CAPABILITY_DWORDS 3
+#define OSC_CXL_CAPABILITY_DWORDS 5
+
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 to 5 are device-specific) */
#define OSC_QUERY_DWORD 0 /* DWORD 1 */
#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
+#define OSC_EXT_SUPPORT_DWORD 3 /* DWORD 4 */
+#define OSC_EXT_CONTROL_DWORD 4 /* DWORD 5 */
/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
#define OSC_QUERY_ENABLE 0x00000001 /* input */
@@ -520,9 +582,24 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
+#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
+#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
+#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
+#define OSC_SB_PRM_SUPPORT 0x00200000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
+extern bool osc_sb_native_usb4_support_confirmed;
+extern bool osc_sb_cppc2_support_acked;
+extern bool osc_cpc_flexible_adr_space_confirmed;
+
+/* USB4 Capabilities */
+#define OSC_USB_USB3_TUNNELING 0x00000001
+#define OSC_USB_DP_TUNNELING 0x00000002
+#define OSC_USB_PCIE_TUNNELING 0x00000004
+#define OSC_USB_XDOMAIN 0x00000008
+
+extern u32 osc_sb_native_usb4_control;
/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
@@ -530,8 +607,8 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
#define OSC_PCI_MSI_SUPPORT 0x00000010
+#define OSC_PCI_EDR_SUPPORT 0x00000080
#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100
-#define OSC_PCI_SUPPORT_MASKS 0x0000011f
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
@@ -540,7 +617,30 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
-#define OSC_PCI_CONTROL_MASKS 0x0000003f
+#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080
+
+/* CXL _OSC: Capabilities DWORD 4: Support Field */
+#define OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT 0x00000001
+#define OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT 0x00000002
+#define OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT 0x00000004
+#define OSC_CXL_NATIVE_HP_SUPPORT 0x00000008
+
+/* CXL _OSC: Capabilities DWORD 5: Control Field */
+#define OSC_CXL_ERROR_REPORTING_CONTROL 0x00000001
+
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_CONTROL_DWORD];
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_EXT_CONTROL_DWORD];
+}
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
@@ -553,9 +653,6 @@ extern bool osc_pc_lpi_support_confirmed;
#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E
#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F
-extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
- u32 *mask, u32 req);
-
/* Enable _OST when all relevant hotplug operations are enabled */
#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \
@@ -627,10 +724,9 @@ extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
-void acpi_walk_dep_device_list(acpi_handle handle);
struct platform_device *acpi_create_platform_device(struct acpi_device *,
- struct property_entry *);
+ const struct property_entry *);
#define ACPI_PTR(_ptr) (_ptr)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -671,6 +767,9 @@ static inline u64 acpi_arch_get_root_pointer(void)
}
#endif
+int acpi_get_local_address(acpi_handle handle, u32 *addr);
+const char *acpi_get_subsystem_id(acpi_handle handle);
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -681,6 +780,8 @@ static inline u64 acpi_arch_get_root_pointer(void)
#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
+#include <acpi/acpi_numa.h>
+
struct fwnode_handle;
static inline bool acpi_dev_found(const char *hid)
@@ -701,40 +802,50 @@ acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *u
return false;
}
+static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
+{
+ return -ENODEV;
+}
+
static inline struct acpi_device *
acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
return NULL;
}
+static inline bool acpi_reduced_hardware(void)
+{
+ return false;
+}
+
static inline void acpi_dev_put(struct acpi_device *adev) {}
-static inline bool is_acpi_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool is_acpi_data_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode)
+static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode,
const char *name)
{
return false;
@@ -777,9 +888,12 @@ static inline int acpi_boot_init(void)
return 0;
}
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
static inline void acpi_boot_table_init(void)
{
- return;
}
static inline int acpi_mps_check(void)
@@ -837,7 +951,7 @@ static inline bool acpi_driver_match_device(struct device *dev,
static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
const guid_t *guid,
- int rev, int func,
+ u64 rev, u64 func,
union acpi_object *argv4)
{
return NULL;
@@ -855,7 +969,14 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
-static inline bool acpi_dma_supported(struct acpi_device *adev)
+static inline struct platform_device *
+acpi_create_platform_device(struct acpi_device *adev,
+ const struct property_entry *properties)
+{
+ return NULL;
+}
+
+static inline bool acpi_dma_supported(const struct acpi_device *adev)
{
return false;
}
@@ -865,8 +986,7 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
-static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr,
- u64 *offset, u64 *size)
+static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
{
return -ENODEV;
}
@@ -877,6 +997,13 @@ static inline int acpi_dma_configure(struct device *dev,
return 0;
}
+static inline int acpi_dma_configure_id(struct device *dev,
+ enum dev_dma_attr attr,
+ const u32 *input_id)
+{
+ return 0;
+}
+
#define ACPI_PTR(_ptr) (NULL)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -902,6 +1029,36 @@ static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
return NULL;
}
+static inline int acpi_get_local_address(acpi_handle handle, u32 *addr)
+{
+ return -ENODEV;
+}
+
+static inline const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int acpi_register_wakeup_handler(int wake_irq,
+ bool (*wakeup)(void *context), void *context)
+{
+ return -ENXIO;
+}
+
+static inline void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context) { }
+
+struct acpi_osc_context;
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
@@ -922,8 +1079,17 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-
#ifdef CONFIG_X86
+struct acpi_s2idle_dev_ops {
+ struct list_head list_node;
+ void (*prepare)(void);
+ void (*check)(void);
+ void (*restore)(void);
+};
+int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+#endif /* CONFIG_X86 */
+#ifndef CONFIG_IA64
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
static inline void arch_reserve_mem_area(acpi_physical_address addr,
@@ -941,15 +1107,23 @@ int acpi_dev_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
+bool acpi_storage_d3(struct device *dev);
+bool acpi_dev_state_d0(struct device *dev);
#else
-static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
-static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
return 0;
}
+static inline bool acpi_storage_d3(struct device *dev)
+{
+ return false;
+}
+static inline bool acpi_dev_state_d0(struct device *dev)
+{
+ return true;
+}
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
@@ -978,9 +1152,14 @@ static inline void acpi_ec_set_gpe_wake_mask(u8 action) {}
__printf(3, 4)
void acpi_handle_printk(const char *level, acpi_handle handle,
const char *fmt, ...);
+void acpi_evaluation_failure_warn(acpi_handle handle, const char *name,
+ acpi_status status);
#else /* !CONFIG_ACPI */
static inline __printf(3, 4) void
acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
+static inline void acpi_evaluation_failure_warn(acpi_handle handle,
+ const char *name,
+ acpi_status status) {}
#endif /* !CONFIG_ACPI */
#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG)
@@ -1030,19 +1209,45 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
-int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
+bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio);
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index,
+ bool *wake_capable);
#else
static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio)
{
return false;
}
-static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ return false;
+}
+static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name,
+ int index, bool *wake_capable)
{
return -ENXIO;
}
#endif
+static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index,
+ bool *wake_capable)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable);
+}
+
+static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name,
+ int index)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, name, index, NULL);
+}
+
+static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, NULL);
+}
+
/* Device properties */
#ifdef CONFIG_ACPI
@@ -1068,22 +1273,13 @@ static inline bool acpi_dev_has_props(const struct acpi_device *adev)
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
- const union acpi_object *properties);
+ union acpi_object *properties);
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
-int acpi_dev_prop_read_single(struct acpi_device *adev,
- const char *propname, enum dev_prop_type proptype,
- void *val);
-int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname, enum dev_prop_type proptype,
- void *val, size_t nval);
-int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval);
struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1115,16 +1311,27 @@ struct acpi_probe_entry {
kernel_ulong_t driver_data;
};
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
+ valid, data, fn) \
+ static const struct acpi_probe_entry __acpi_probe_##name \
+ __used __section("__" #table "_acpi_probe_table") = { \
+ .id = table_id, \
+ .type = subtable, \
+ .subtable_valid = valid, \
+ .probe_table = fn, \
+ .driver_data = data, \
+ }
+
+#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \
+ subtable, valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
- __used __section(__##table##_acpi_probe_table) \
- = { \
+ __used __section("__" #table "_acpi_probe_table") = { \
.id = table_id, \
.type = subtable, \
.subtable_valid = valid, \
- .probe_table = (acpi_tbl_table_handler)fn, \
- .driver_data = data, \
- }
+ .probe_subtbl = fn, \
+ .driver_data = data, \
+ }
#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table
#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end
@@ -1170,37 +1377,6 @@ static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode,
return -ENXIO;
}
-static inline int acpi_dev_prop_get(const struct acpi_device *adev,
- const char *propname,
- void **valptr)
-{
- return -ENXIO;
-}
-
-static inline int acpi_dev_prop_read_single(const struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val)
-{
- return -ENXIO;
-}
-
-static inline int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
-static inline int acpi_dev_prop_read(const struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
static inline struct fwnode_handle *
acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
@@ -1209,12 +1385,6 @@ acpi_get_next_subnode(const struct fwnode_handle *fwnode,
}
static inline struct fwnode_handle *
-acpi_node_get_parent(const struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
-static inline struct fwnode_handle *
acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
@@ -1286,9 +1456,9 @@ static inline int lpit_read_residency_count_address(u64 *address)
#ifdef CONFIG_ACPI_PPTT
int acpi_pptt_cpu_is_thread(unsigned int cpu);
int find_acpi_cpu_topology(unsigned int cpu, int level);
+int find_acpi_cpu_topology_cluster(unsigned int cpu);
int find_acpi_cpu_topology_package(unsigned int cpu);
int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
-int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
#else
static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
{
@@ -1298,28 +1468,32 @@ static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
{
return -EINVAL;
}
-static inline int find_acpi_cpu_topology_package(unsigned int cpu)
+static inline int find_acpi_cpu_topology_cluster(unsigned int cpu)
{
return -EINVAL;
}
-static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
+static inline int find_acpi_cpu_topology_package(unsigned int cpu)
{
return -EINVAL;
}
-static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
+static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
{
return -EINVAL;
}
#endif
+#ifdef CONFIG_ACPI_PCC
+void acpi_init_pcc(void);
+#else
+static inline void acpi_init_pcc(void) { }
+#endif
+
#ifdef CONFIG_ACPI
-extern int acpi_platform_notify(struct device *dev, enum kobject_action action);
+extern void acpi_device_notify(struct device *dev);
+extern void acpi_device_notify_remove(struct device *dev);
#else
-static inline int
-acpi_platform_notify(struct device *dev, enum kobject_action action)
-{
- return 0;
-}
+static inline void acpi_device_notify(struct device *dev) { }
+static inline void acpi_device_notify_remove(struct device *dev) { }
#endif
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_agdi.h b/include/linux/acpi_agdi.h
new file mode 100644
index 000000000000..f477f0b452fa
--- /dev/null
+++ b/include/linux/acpi_agdi.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ACPI_AGDI_H__
+#define __ACPI_AGDI_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_AGDI
+void __init acpi_agdi_init(void);
+#else
+static inline void acpi_agdi_init(void) {}
+#endif
+#endif /* __ACPI_AGDI_H__ */
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 8e7e2ec37f1b..b43be0987b19 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -28,31 +28,43 @@ void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT
void acpi_iort_init(void);
-u32 iort_msi_map_rid(struct device *dev, u32 req_id);
-struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+u32 iort_msi_map_id(struct device *dev, u32 id);
+struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
+ enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev);
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
/* IOMMU interface */
-void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
-const struct iommu_ops *iort_iommu_configure(struct device *dev);
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
+int iort_dma_get_ranges(struct device *dev, u64 *size);
+int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
+phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
static inline void acpi_iort_init(void) { }
-static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
-{ return req_id; }
-static inline struct irq_domain *iort_get_device_domain(struct device *dev,
- u32 req_id)
+static inline u32 iort_msi_map_id(struct device *dev, u32 id)
+{ return id; }
+static inline struct irq_domain *iort_get_device_domain(
+ struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
+static inline
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
+static inline
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
/* IOMMU interface */
-static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
- u64 *size) { }
-static inline const struct iommu_ops *iort_iommu_configure(
- struct device *dev)
-{ return NULL; }
+static inline int iort_dma_get_ranges(struct device *dev, u64 *size)
+{ return -ENODEV; }
+static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
+{ return -ENODEV; }
static inline
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
-{ return 0; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
+
+static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void)
+{ return PHYS_ADDR_MAX; }
#endif
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h
new file mode 100644
index 000000000000..0a24ab7cb66f
--- /dev/null
+++ b/include/linux/acpi_mdio.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ACPI helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_ACPI_MDIO_H
+#define __LINUX_ACPI_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_ACPI_MDIO)
+int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode);
+#else /* CONFIG_ACPI_MDIO */
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+{
+ /*
+ * Fall back to mdiobus_register() function to register a bus.
+ * This way, we don't have to keep compat bits around in drivers.
+ */
+
+ return mdiobus_register(mdio);
+}
+#endif
+
+#endif /* __LINUX_ACPI_MDIO_H */
diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h
new file mode 100644
index 000000000000..a5a122431563
--- /dev/null
+++ b/include/linux/acpi_viot.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ACPI_VIOT_H__
+#define __ACPI_VIOT_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_VIOT
+void __init acpi_viot_early_init(void);
+void __init acpi_viot_init(void);
+int viot_iommu_configure(struct device *dev);
+#else
+static inline void acpi_viot_early_init(void) {}
+static inline void acpi_viot_init(void) {}
+static inline int viot_iommu_configure(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ACPI_VIOT_H__ */
diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h
new file mode 100644
index 000000000000..c637e0997f6d
--- /dev/null
+++ b/include/linux/adreno-smmu-priv.h
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef __ADRENO_SMMU_PRIV_H
+#define __ADRENO_SMMU_PRIV_H
+
+#include <linux/io-pgtable.h>
+
+/**
+ * struct adreno_smmu_fault_info - container for key fault information
+ *
+ * @far: The faulting IOVA from ARM_SMMU_CB_FAR
+ * @ttbr0: The current TTBR0 pagetable from ARM_SMMU_CB_TTBR0
+ * @contextidr: The value of ARM_SMMU_CB_CONTEXTIDR
+ * @fsr: The fault status from ARM_SMMU_CB_FSR
+ * @fsynr0: The value of FSYNR0 from ARM_SMMU_CB_FSYNR0
+ * @fsynr1: The value of FSYNR1 from ARM_SMMU_CB_FSYNR0
+ * @cbfrsynra: The value of CBFRSYNRA from ARM_SMMU_GR1_CBFRSYNRA(idx)
+ *
+ * This struct passes back key page fault information to the GPU driver
+ * through the get_fault_info function pointer.
+ * The GPU driver can use this information to print informative
+ * log messages and provide deeper GPU specific insight into the fault.
+ */
+struct adreno_smmu_fault_info {
+ u64 far;
+ u64 ttbr0;
+ u32 contextidr;
+ u32 fsr;
+ u32 fsynr0;
+ u32 fsynr1;
+ u32 cbfrsynra;
+};
+
+/**
+ * struct adreno_smmu_priv - private interface between adreno-smmu and GPU
+ *
+ * @cookie: An opque token provided by adreno-smmu and passed
+ * back into the callbacks
+ * @get_ttbr1_cfg: Get the TTBR1 config for the GPUs context-bank
+ * @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank. A
+ * NULL config disables TTBR0 translation, otherwise
+ * TTBR0 translation is enabled with the specified cfg
+ * @get_fault_info: Called by the GPU fault handler to get information about
+ * the fault
+ * @set_stall: Configure whether stall on fault (CFCFG) is enabled. Call
+ * before set_ttbr0_cfg(). If stalling on fault is enabled,
+ * the GPU driver must call resume_translation()
+ * @resume_translation: Resume translation after a fault
+ *
+ *
+ * The GPU driver (drm/msm) and adreno-smmu work together for controlling
+ * the GPU's SMMU instance. This is by necessity, as the GPU is directly
+ * updating the SMMU for context switches, while on the other hand we do
+ * not want to duplicate all of the initial setup logic from arm-smmu.
+ *
+ * This private interface is used for the two drivers to coordinate. The
+ * cookie and callback functions are populated when the GPU driver attaches
+ * it's domain.
+ */
+struct adreno_smmu_priv {
+ const void *cookie;
+ const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie);
+ int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg);
+ void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
+ void (*set_stall)(const void *cookie, bool enabled);
+ void (*resume_translation)(const void *cookie, bool terminate);
+};
+
+#endif /* __ADRENO_SMMU_PRIV_H */
diff --git a/include/linux/aer.h b/include/linux/aer.h
index fa19e01f418a..97f64ba1b34a 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -44,8 +44,7 @@ struct aer_capability_regs {
/* PCIe port driver needs this function to enable AER */
int pci_enable_pcie_error_reporting(struct pci_dev *dev);
int pci_disable_pcie_error_reporting(struct pci_dev *dev);
-int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
-int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
+int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
void pci_save_aer_state(struct pci_dev *dev);
void pci_restore_aer_state(struct pci_dev *dev);
#else
@@ -57,11 +56,7 @@ static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
return -EINVAL;
}
-static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 49e5383d4222..17fa26215292 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -13,6 +13,7 @@
#include <linux/compiler.h>
+struct clk;
struct device;
struct ata_port_info;
struct ahci_host_priv;
@@ -21,8 +22,12 @@ struct scsi_host_template;
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
+struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv,
+ const char *con_id);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv);
+int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
@@ -41,6 +46,7 @@ int ahci_platform_resume_host(struct device *dev);
int ahci_platform_suspend(struct device *dev);
int ahci_platform_resume(struct device *dev);
-#define AHCI_PLATFORM_GET_RESETS 0x01
+#define AHCI_PLATFORM_GET_RESETS BIT(0)
+#define AHCI_PLATFORM_RST_TRIGGER BIT(1)
#endif /* _AHCI_PLATFORM_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b83e68dd006f..86892a4fe7c8 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -20,8 +20,4 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
-/* for sysctl: */
-extern unsigned long aio_nr;
-extern unsigned long aio_max_nr;
-
#endif /* __LINUX__AIO_H */
diff --git a/include/linux/align.h b/include/linux/align.h
new file mode 100644
index 000000000000..2b4acec7b95a
--- /dev/null
+++ b/include/linux/align.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ALIGN_H
+#define _LINUX_ALIGN_H
+
+#include <linux/const.h>
+
+/* @a is a power of 2 value */
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
+#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+
+#endif /* _LINUX_ALIGN_H */
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 26f0ecf401ea..5001e14c5c06 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -65,19 +65,33 @@ struct amba_device {
struct device dev;
struct resource res;
struct clk *pclk;
+ struct device_dma_parameters dma_parms;
unsigned int periphid;
+ struct mutex periphid_lock;
unsigned int cid;
struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
- char *driver_override;
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
};
struct amba_driver {
struct device_driver drv;
int (*probe)(struct amba_device *, const struct amba_id *);
- int (*remove)(struct amba_device *);
+ void (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
const struct amba_id *id_table;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
/*
@@ -89,14 +103,8 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
AMBA_VENDOR_QCOM = 0x51,
AMBA_VENDOR_LSI = 0xb6,
- AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */
};
-/* This is used to generate pseudo-ID for AMBA device */
-#define AMBA_LINUX_ID(conf, rev, part) \
- (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \
- AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff))
-
extern struct bus_type amba_bustype;
#define to_amba_device(d) container_of(d, struct amba_device, dev)
@@ -104,55 +112,27 @@ extern struct bus_type amba_bustype;
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
+#ifdef CONFIG_ARM_AMBA
int amba_driver_register(struct amba_driver *);
void amba_driver_unregister(struct amba_driver *);
+#else
+static inline int amba_driver_register(struct amba_driver *drv)
+{
+ return -EINVAL;
+}
+static inline void amba_driver_unregister(struct amba_driver *drv)
+{
+}
+#endif
+
struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
void amba_device_put(struct amba_device *);
int amba_device_add(struct amba_device *, struct resource *);
int amba_device_register(struct amba_device *, struct resource *);
-struct amba_device *amba_apb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *amba_ahb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *
-amba_apb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
-struct amba_device *
-amba_ahb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
void amba_device_unregister(struct amba_device *);
-struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
int amba_request_regions(struct amba_device *, const char *);
void amba_release_regions(struct amba_device *);
-static inline int amba_pclk_enable(struct amba_device *dev)
-{
- return clk_enable(dev->pclk);
-}
-
-static inline void amba_pclk_disable(struct amba_device *dev)
-{
- clk_disable(dev->pclk);
-}
-
-static inline int amba_pclk_prepare(struct amba_device *dev)
-{
- return clk_prepare(dev->pclk);
-}
-
-static inline void amba_pclk_unprepare(struct amba_device *dev)
-{
- clk_unprepare(dev->pclk);
-}
-
/* Some drivers don't use the struct amba_device */
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index c92ebc39fc1f..6f96dc2209c0 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -13,17 +13,11 @@
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
- * @ios_handler: a callback function to act on specfic ios changes,
- * used for example to control a levelshifter
- * mask into a value to be binary (or set some other custom bits
- * in MMCIPWR) or:ed and written into the MMCIPWR register of the
- * block. May also control external power based on the power_mode.
* @status: if no GPIO line was given to the block in this function will
* be called to determine whether a card is present in the MMC slot or not
*/
struct mmci_platform_data {
unsigned int ocr_mask;
- int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
};
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 131b27c97209..9bf58aac0df2 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -223,10 +223,6 @@ struct dma_chan;
/**
* struct pl022_ssp_master - device.platform_data for SPI controller devices.
* @bus_id: identifier for this bus
- * @num_chipselect: chipselects are used to distinguish individual
- * SPI slaves, and are numbered from zero to num_chipselects - 1.
- * each slave has a chipselect signal, but it's common that not
- * every chipselect is connected to a slave.
* @enable_dma: if true enables DMA driven transfers.
* @dma_rx_param: parameter to locate an RX DMA channel.
* @dma_tx_param: parameter to locate a TX DMA channel.
@@ -235,18 +231,15 @@ struct dma_chan;
* indicates no delay and the device will be suspended immediately.
* @rt: indicates the controller should run the message pump with realtime
* priority to minimise the transfer latency on the bus.
- * @chipselects: list of <num_chipselects> chip select gpios
*/
struct pl022_ssp_controller {
u16 bus_id;
- u8 num_chipselect;
u8 enable_dma:1;
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
int autosuspend_delay;
bool rt;
- int *chipselects;
};
/**
@@ -265,8 +258,6 @@ struct pl022_ssp_controller {
* @duplex: Microwire interface: Full/Half duplex
* @clkdelay: on the PL023 variant, the delay in feeback clock cycles
* before sampling the incoming line
- * @cs_control: function pointer to board-specific function to
- * assert/deassert I/O port to control HW generation of devices chip-select.
*/
struct pl022_config_chip {
enum ssp_interface iface;
@@ -280,7 +271,6 @@ struct pl022_config_chip {
enum ssp_microwire_wait_state wait_state;
enum ssp_duplex duplex;
enum ssp_clkdelay clkdelay;
- void (*cs_control) (u32 control);
};
#endif /* _SSP_PL022_H */
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 21e950e4ab62..953e6f12fa1c 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -10,6 +10,8 @@
#include <linux/types.h>
+struct amd_iommu;
+
/*
* This is mainly used to communicate information back-and-forth
* between SVM and IOMMU for setting up and tearing down posted
@@ -33,24 +35,6 @@ extern int amd_iommu_detect(void);
extern int amd_iommu_init_hardware(void);
/**
- * amd_iommu_enable_device_erratum() - Enable erratum workaround for device
- * in the IOMMUv2 driver
- * @pdev: The PCI device the workaround is necessary for
- * @erratum: The erratum workaround to enable
- *
- * The function needs to be called before amd_iommu_init_device().
- * Possible values for the erratum number are for now:
- * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI
- * is enabled
- * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI
- * requests to one
- */
-#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0
-#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1
-
-extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum);
-
-/**
* amd_iommu_init_device() - Init device for use with IOMMUv2 driver
* @pdev: The PCI device to initialize
* @pasids: Number of PASIDs to support for this device
@@ -76,7 +60,7 @@ extern void amd_iommu_free_device(struct pci_dev *pdev);
*
* The function returns 0 on success or a negative value on error.
*/
-extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
struct task_struct *task);
/**
@@ -88,7 +72,7 @@ extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
* When this function returns the device is no longer using the PASID
* and the PASID is no longer bound to its task.
*/
-extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid);
+extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid);
/**
* amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
@@ -114,7 +98,7 @@ extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid);
#define AMD_IOMMU_INV_PRI_RSP_FAIL 2
typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
- int pasid,
+ u32 pasid,
unsigned long address,
u16);
@@ -166,7 +150,7 @@ extern int amd_iommu_device_info(struct pci_dev *pdev,
* @cb: The call-back function
*/
-typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
+typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, u32 pasid);
extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
amd_iommu_invalidate_ctx cb);
@@ -212,4 +196,18 @@ static inline int amd_iommu_deactivate_guest_mode(void *data)
}
#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
+int amd_iommu_get_num_iommus(void);
+bool amd_iommu_pc_supported(void);
+u8 amd_iommu_pc_get_max_banks(unsigned int idx);
+u8 amd_iommu_pc_get_max_counters(unsigned int idx);
+int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
+ u64 *value);
+int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
+ u64 *value);
+struct amd_iommu *get_amd_iommu(unsigned int idx);
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+int amd_iommu_snp_enable(void);
+#endif
+
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
new file mode 100644
index 000000000000..1c4b8659f171
--- /dev/null
+++ b/include/linux/amd-pstate.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/include/linux/amd-pstate.h
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Author: Meng Li <li.meng@amd.com>
+ */
+
+#ifndef _LINUX_AMD_PSTATE_H
+#define _LINUX_AMD_PSTATE_H
+
+#include <linux/pm_qos.h>
+
+/*********************************************************************
+ * AMD P-state INTERFACE *
+ *********************************************************************/
+/**
+ * struct amd_aperf_mperf
+ * @aperf: actual performance frequency clock count
+ * @mperf: maximum performance frequency clock count
+ * @tsc: time stamp counter
+ */
+struct amd_aperf_mperf {
+ u64 aperf;
+ u64 mperf;
+ u64 tsc;
+};
+
+/**
+ * struct amd_cpudata - private CPU data for AMD P-State
+ * @cpu: CPU number
+ * @req: constraint request to apply
+ * @cppc_req_cached: cached performance request hints
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @max_freq: the frequency that mapped to highest_perf
+ * @min_freq: the frequency that mapped to lowest_perf
+ * @nominal_freq: the frequency that mapped to nominal_perf
+ * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+ * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
+ * @prev: Last Aperf/Mperf/tsc count value read from register
+ * @freq: current cpu frequency value
+ * @boost_supported: check whether the Processor or SBIOS supports boost mode
+ *
+ * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
+ * represents all the attributes and goals that AMD P-State requests at runtime.
+ */
+struct amd_cpudata {
+ int cpu;
+
+ struct freq_qos_request req[2];
+ u64 cppc_req_cached;
+
+ u32 highest_perf;
+ u32 nominal_perf;
+ u32 lowest_nonlinear_perf;
+ u32 lowest_perf;
+
+ u32 max_freq;
+ u32 min_freq;
+ u32 nominal_freq;
+ u32 lowest_nonlinear_freq;
+
+ struct amd_aperf_mperf cur;
+ struct amd_aperf_mperf prev;
+
+ u64 freq;
+ bool boost_supported;
+};
+
+#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index d0d7d96261ad..5deaddbd7927 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -10,12 +10,21 @@
#define _LINUX_ANON_INODES_H
struct file_operations;
+struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
+struct file *anon_inode_getfile_secure(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
+int anon_inode_getfd_secure(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
#endif /* _LINUX_ANON_INODES_H */
diff --git a/include/linux/aperture.h b/include/linux/aperture.h
new file mode 100644
index 000000000000..442f15a57cad
--- /dev/null
+++ b/include/linux/aperture.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _LINUX_APERTURE_H_
+#define _LINUX_APERTURE_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+struct platform_device;
+
+#if defined(CONFIG_APERTURE_HELPERS)
+int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size);
+
+int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ bool primary, const char *name);
+
+int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name);
+#else
+static inline int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ bool primary, const char *name)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
+{
+ return 0;
+}
+#endif
+
+/**
+ * aperture_remove_all_conflicting_devices - remove all existing framebuffers
+ * @primary: also kick vga16fb if present; only relevant for VGA devices
+ * @name: a descriptive name of the requesting driver
+ *
+ * This function removes all graphics device drivers. Use this function on systems
+ * that can have their framebuffer located anywhere in memory.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+static inline int aperture_remove_all_conflicting_devices(bool primary, const char *name)
+{
+ return aperture_remove_conflicting_devices(0, (resource_size_t)-1, primary, name);
+}
+
+#endif
diff --git a/include/linux/apple-mailbox.h b/include/linux/apple-mailbox.h
new file mode 100644
index 000000000000..720fbb70294a
--- /dev/null
+++ b/include/linux/apple-mailbox.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple mailbox message format
+ *
+ * Copyright (C) 2021 The Asahi Linux Contributors
+ */
+
+#ifndef _LINUX_APPLE_MAILBOX_H_
+#define _LINUX_APPLE_MAILBOX_H_
+
+#include <linux/types.h>
+
+/* encodes a single 96bit message sent over the single channel */
+struct apple_mbox_msg {
+ u64 msg0;
+ u32 msg1;
+};
+
+#endif
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 3015ecbb90b1..a07b510e7dc5 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -11,35 +11,66 @@
void topology_normalize_cpu_scale(void);
int topology_update_cpu_topology(void);
+#ifdef CONFIG_ACPI_CPPC_LIB
+void topology_init_cpu_capacity_cppc(void);
+#endif
+
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
DECLARE_PER_CPU(unsigned long, cpu_scale);
-struct sched_domain;
-static inline
-unsigned long topology_get_cpu_scale(int cpu)
+static inline unsigned long topology_get_cpu_scale(int cpu)
{
return per_cpu(cpu_scale, cpu);
}
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
-DECLARE_PER_CPU(unsigned long, freq_scale);
+DECLARE_PER_CPU(unsigned long, arch_freq_scale);
-static inline
-unsigned long topology_get_freq_scale(int cpu)
+static inline unsigned long topology_get_freq_scale(int cpu)
{
- return per_cpu(freq_scale, cpu);
+ return per_cpu(arch_freq_scale, cpu);
}
+void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq);
+bool topology_scale_freq_invariant(void);
+
+enum scale_freq_source {
+ SCALE_FREQ_SOURCE_CPUFREQ = 0,
+ SCALE_FREQ_SOURCE_ARCH,
+ SCALE_FREQ_SOURCE_CPPC,
+};
+
+struct scale_freq_data {
+ enum scale_freq_source source;
+ void (*set_freq_scale)(void);
+};
+
+void topology_scale_freq_tick(void);
+void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus);
+void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus);
+
+DECLARE_PER_CPU(unsigned long, thermal_pressure);
+
+static inline unsigned long topology_get_thermal_pressure(int cpu)
+{
+ return per_cpu(thermal_pressure, cpu);
+}
+
+void topology_update_thermal_pressure(const struct cpumask *cpus,
+ unsigned long capped_freq);
+
struct cpu_topology {
int thread_id;
int core_id;
+ int cluster_id;
int package_id;
- int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
+ cpumask_t cluster_sibling;
cpumask_t llc_sibling;
};
@@ -47,13 +78,16 @@ struct cpu_topology {
extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
+#define topology_cluster_id(cpu) (cpu_topology[cpu].cluster_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+const struct cpumask *cpu_clustergroup_mask(int cpu);
void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 59494df0f55b..220c8c60e021 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -5,12 +5,15 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <linux/init.h>
#include <uapi/linux/const.h>
/*
* This file provides common defines for ARM SMC Calling Convention as
* specified in
- * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+ * https://developer.arm.com/docs/den0028/latest
+ *
+ * This code is up-to-date with version DEN 0028 C
*/
#define ARM_SMCCC_STD_CALL _AC(0,U)
@@ -46,16 +49,23 @@
#define ARM_SMCCC_OWNER_OEM 3
#define ARM_SMCCC_OWNER_STANDARD 4
#define ARM_SMCCC_OWNER_STANDARD_HYP 5
+#define ARM_SMCCC_OWNER_VENDOR_HYP 6
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01
+
#define ARM_SMCCC_QUIRK_NONE 0
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
#define ARM_SMCCC_VERSION_1_0 0x10000
#define ARM_SMCCC_VERSION_1_1 0x10001
+#define ARM_SMCCC_VERSION_1_2 0x10002
+#define ARM_SMCCC_VERSION_1_3 0x10003
+
+#define ARM_SMCCC_1_3_SVE_HINT 0x10000
#define ARM_SMCCC_VERSION_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
@@ -67,6 +77,11 @@
ARM_SMCCC_SMC_32, \
0, 1)
+#define ARM_SMCCC_ARCH_SOC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 2)
+
#define ARM_SMCCC_ARCH_WORKAROUND_1 \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_32, \
@@ -77,6 +92,105 @@
ARM_SMCCC_SMC_32, \
0, 0x7fff)
+#define ARM_SMCCC_ARCH_WORKAROUND_3 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x3fff)
+
+#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_FUNC_QUERY_CALL_UID)
+
+/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU
+
+/* KVM "vendor specific" services */
+#define ARM_SMCCC_KVM_FUNC_FEATURES 0
+#define ARM_SMCCC_KVM_FUNC_PTP 1
+#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
+#define ARM_SMCCC_KVM_NUM_FUNCS 128
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_FEATURES)
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
+
+/*
+ * ptp_kvm is a feature used for time sync between vm and host.
+ * ptp_kvm module in guest kernel will get service from host using
+ * this hypercall ID.
+ */
+#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_PTP)
+
+/* ptp_kvm counter type ID */
+#define KVM_PTP_VIRT_COUNTER 0
+#define KVM_PTP_PHYS_COUNTER 1
+
+/* Paravirtualised time calls (defined by ARM DEN0057A) */
+#define ARM_SMCCC_HV_PV_TIME_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x20)
+
+#define ARM_SMCCC_HV_PV_TIME_ST \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x21)
+
+/* TRNG entropy source calls (defined by ARM DEN0098) */
+#define ARM_SMCCC_TRNG_VERSION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x50)
+
+#define ARM_SMCCC_TRNG_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x51)
+
+#define ARM_SMCCC_TRNG_GET_UUID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x52)
+
+#define ARM_SMCCC_TRNG_RND32 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+#define ARM_SMCCC_TRNG_RND64 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+/*
+ * Return codes defined in ARM DEN 0070A
+ * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
+ */
+#define SMCCC_RET_SUCCESS 0
+#define SMCCC_RET_NOT_SUPPORTED -1
+#define SMCCC_RET_NOT_REQUIRED -2
+#define SMCCC_RET_INVALID_PARAMETER -3
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -98,6 +212,21 @@ enum arm_smccc_conduit {
enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void);
/**
+ * arm_smccc_get_version()
+ *
+ * Returns the version to be used for SMCCCv1.1 or later.
+ *
+ * When SMCCCv1.1 or above is not present, returns SMCCCv1.0, but this
+ * does not imply the presence of firmware or a valid conduit. Caller
+ * handling SMCCCv1.0 must determine the conduit by other means.
+ */
+u32 arm_smccc_get_version(void);
+
+void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
+
+extern u64 smccc_has_sve_hint;
+
+/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
*/
@@ -108,6 +237,61 @@ struct arm_smccc_res {
unsigned long a3;
};
+#ifdef CONFIG_ARM64
+/**
+ * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call
+ * @a0-a17 argument values from registers 0 to 17
+ */
+struct arm_smccc_1_2_regs {
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long a8;
+ unsigned long a9;
+ unsigned long a10;
+ unsigned long a11;
+ unsigned long a12;
+ unsigned long a13;
+ unsigned long a14;
+ unsigned long a15;
+ unsigned long a16;
+ unsigned long a17;
+};
+
+/**
+ * arm_smccc_1_2_hvc() - make HVC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make HVC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the HVC instruction. The return values
+ * are updated with the content from registers on return from the HVC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+
+/**
+ * arm_smccc_1_2_smc() - make SMC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make SMC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the SMC instruction. The return values
+ * are updated with the content from registers on return from the SMC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+#endif
+
/**
* struct arm_smccc_quirk - Contains quirk information
* @id: quirk identification
@@ -122,6 +306,15 @@ struct arm_smccc_quirk {
};
/**
+ * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls
+ *
+ * Sets the SMCCC hint bit to indicate if there is live state in the SVE
+ * registers, this modifies x0 in place and should never be called from C
+ * code.
+ */
+asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
+
+/**
* __arm_smccc_smc() - make SMC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
@@ -133,10 +326,20 @@ struct arm_smccc_quirk {
* from register 0 to 3 on return from the SMC instruction. An optional
* quirk structure provides vendor specific behavior.
*/
+#ifdef CONFIG_HAVE_ARM_SMCCC
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+ *res = (struct arm_smccc_res){};
+}
+#endif
/**
* __arm_smccc_hvc() - make HVC calls
@@ -178,94 +381,88 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#endif
+/* nVHE hypervisor doesn't have a current thread so needs separate checks */
+#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__)
+
+#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \
+ ARM64_SVE)
+#define smccc_sve_clobbers "x16", "x30", "cc",
+
+#else
+
+#define SMCCC_SVE_CHECK
+#define smccc_sve_clobbers
+
+#endif
+
#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
#define __count_args(...) \
___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
-#define __constraint_write_0 \
- "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
-#define __constraint_write_1 \
- "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
-#define __constraint_write_2 \
- "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
-#define __constraint_write_3 \
- "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
-#define __constraint_write_4 __constraint_write_3
-#define __constraint_write_5 __constraint_write_4
-#define __constraint_write_6 __constraint_write_5
-#define __constraint_write_7 __constraint_write_6
-
-#define __constraint_read_0
-#define __constraint_read_1
-#define __constraint_read_2
-#define __constraint_read_3
-#define __constraint_read_4 "r" (r4)
-#define __constraint_read_5 __constraint_read_4, "r" (r5)
-#define __constraint_read_6 __constraint_read_5, "r" (r6)
-#define __constraint_read_7 __constraint_read_6, "r" (r7)
+#define __constraint_read_0 "r" (arg0)
+#define __constraint_read_1 __constraint_read_0, "r" (arg1)
+#define __constraint_read_2 __constraint_read_1, "r" (arg2)
+#define __constraint_read_3 __constraint_read_2, "r" (arg3)
+#define __constraint_read_4 __constraint_read_3, "r" (arg4)
+#define __constraint_read_5 __constraint_read_4, "r" (arg5)
+#define __constraint_read_6 __constraint_read_5, "r" (arg6)
+#define __constraint_read_7 __constraint_read_6, "r" (arg7)
#define __declare_arg_0(a0, res) \
struct arm_smccc_res *___res = res; \
- register unsigned long r0 asm("r0") = (u32)a0; \
- register unsigned long r1 asm("r1"); \
- register unsigned long r2 asm("r2"); \
- register unsigned long r3 asm("r3")
+ register unsigned long arg0 asm("r0") = (u32)a0
#define __declare_arg_1(a0, a1, res) \
typeof(a1) __a1 = a1; \
struct arm_smccc_res *___res = res; \
- register unsigned long r0 asm("r0") = (u32)a0; \
- register unsigned long r1 asm("r1") = __a1; \
- register unsigned long r2 asm("r2"); \
- register unsigned long r3 asm("r3")
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1
#define __declare_arg_2(a0, a1, a2, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
struct arm_smccc_res *___res = res; \
- register unsigned long r0 asm("r0") = (u32)a0; \
- register unsigned long r1 asm("r1") = __a1; \
- register unsigned long r2 asm("r2") = __a2; \
- register unsigned long r3 asm("r3")
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1; \
+ register typeof(a2) arg2 asm("r2") = __a2
#define __declare_arg_3(a0, a1, a2, a3, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
typeof(a3) __a3 = a3; \
struct arm_smccc_res *___res = res; \
- register unsigned long r0 asm("r0") = (u32)a0; \
- register unsigned long r1 asm("r1") = __a1; \
- register unsigned long r2 asm("r2") = __a2; \
- register unsigned long r3 asm("r3") = __a3
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1; \
+ register typeof(a2) arg2 asm("r2") = __a2; \
+ register typeof(a3) arg3 asm("r3") = __a3
#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
typeof(a4) __a4 = a4; \
__declare_arg_3(a0, a1, a2, a3, res); \
- register unsigned long r4 asm("r4") = __a4
+ register typeof(a4) arg4 asm("r4") = __a4
#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
typeof(a5) __a5 = a5; \
__declare_arg_4(a0, a1, a2, a3, a4, res); \
- register unsigned long r5 asm("r5") = __a5
+ register typeof(a5) arg5 asm("r5") = __a5
#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
typeof(a6) __a6 = a6; \
__declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
- register unsigned long r6 asm("r6") = __a6
+ register typeof(a6) arg6 asm("r6") = __a6
#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
typeof(a7) __a7 = a7; \
__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
- register unsigned long r7 asm("r7") = __a7
+ register typeof(a7) arg7 asm("r7") = __a7
#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
#define ___constraints(count) \
- : __constraint_write_ ## count \
: __constraint_read_ ## count \
- : "memory"
+ : smccc_sve_clobbers "memory"
#define __constraints(count) ___constraints(count)
/*
@@ -275,8 +472,14 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
*/
#define __arm_smccc_1_1(inst, ...) \
do { \
+ register unsigned long r0 asm("r0"); \
+ register unsigned long r1 asm("r1"); \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3"); \
__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
- asm volatile(inst "\n" \
+ asm volatile(SMCCC_SVE_CHECK \
+ inst "\n" : \
+ "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
__constraints(__count_args(__VA_ARGS__))); \
if (___res) \
*___res = (typeof(*___res)){r0, r1, r2, r3}; \
@@ -314,11 +517,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
*/
#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
-/* Return codes defined in ARM DEN 0070A */
-#define SMCCC_RET_SUCCESS 0
-#define SMCCC_RET_NOT_SUPPORTED -1
-#define SMCCC_RET_NOT_REQUIRED -2
-
/*
* Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED.
* Used when the SMCCC conduit is not defined. The empty asm statement
@@ -327,7 +525,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define __fail_smccc_1_1(...) \
do { \
__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
- asm ("" __constraints(__count_args(__VA_ARGS__))); \
+ asm ("" : __constraints(__count_args(__VA_ARGS__))); \
if (___res) \
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
} while (0)
@@ -364,18 +562,5 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
method; \
})
-/* Paravirtualised time calls (defined by ARM DEN0057A) */
-#define ARM_SMCCC_HV_PV_TIME_FEATURES \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
- ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_STANDARD_HYP, \
- 0x20)
-
-#define ARM_SMCCC_HV_PV_TIME_ST \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
- ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_STANDARD_HYP, \
- 0x21)
-
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
new file mode 100644
index 000000000000..5f02d2e6b9d9
--- /dev/null
+++ b/include/linux/arm_ffa.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _LINUX_ARM_FFA_H
+#define _LINUX_ARM_FFA_H
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+/* FFA Bus/Device/Driver related */
+struct ffa_device {
+ int vm_id;
+ bool mode_32bit;
+ uuid_t uuid;
+ struct device dev;
+ const struct ffa_ops *ops;
+};
+
+#define to_ffa_dev(d) container_of(d, struct ffa_device, dev)
+
+struct ffa_device_id {
+ uuid_t uuid;
+};
+
+struct ffa_driver {
+ const char *name;
+ int (*probe)(struct ffa_device *sdev);
+ void (*remove)(struct ffa_device *sdev);
+ const struct ffa_device_id *id_table;
+
+ struct device_driver driver;
+};
+
+#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver)
+
+static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
+{
+ dev_set_drvdata(&fdev->dev, data);
+}
+
+static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
+{
+ return dev_get_drvdata(&fdev->dev);
+}
+
+#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops);
+void ffa_device_unregister(struct ffa_device *ffa_dev);
+int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name);
+void ffa_driver_unregister(struct ffa_driver *driver);
+bool ffa_device_is_valid(struct ffa_device *ffa_dev);
+
+#else
+static inline
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops)
+{
+ return NULL;
+}
+
+static inline void ffa_device_unregister(struct ffa_device *dev) {}
+
+static inline int
+ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ return -EINVAL;
+}
+
+static inline void ffa_driver_unregister(struct ffa_driver *driver) {}
+
+static inline
+bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
+
+#endif /* CONFIG_ARM_FFA_TRANSPORT */
+
+#define ffa_register(driver) \
+ ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define ffa_unregister(driver) \
+ ffa_driver_unregister(driver)
+
+/**
+ * module_ffa_driver() - Helper macro for registering a psa_ffa driver
+ * @__ffa_driver: ffa_driver structure
+ *
+ * Helper macro for psa_ffa drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_ffa_driver(__ffa_driver) \
+ module_driver(__ffa_driver, ffa_register, ffa_unregister)
+
+/* FFA transport related */
+struct ffa_partition_info {
+ u16 id;
+ u16 exec_ctxt;
+/* partition supports receipt of direct requests */
+#define FFA_PARTITION_DIRECT_RECV BIT(0)
+/* partition can send direct requests. */
+#define FFA_PARTITION_DIRECT_SEND BIT(1)
+/* partition can send and receive indirect messages. */
+#define FFA_PARTITION_INDIRECT_MSG BIT(2)
+/* partition runs in the AArch64 execution state. */
+#define FFA_PARTITION_AARCH64_EXEC BIT(8)
+ u32 properties;
+ u32 uuid[4];
+};
+
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
+struct ffa_send_direct_data {
+ unsigned long data0; /* w3/x3 */
+ unsigned long data1; /* w4/x4 */
+ unsigned long data2; /* w5/x5 */
+ unsigned long data3; /* w6/x6 */
+ unsigned long data4; /* w7/x7 */
+};
+
+struct ffa_mem_region_addr_range {
+ /* The base IPA of the constituent memory region, aligned to 4 kiB */
+ u64 address;
+ /* The number of 4 kiB pages in the constituent memory region. */
+ u32 pg_cnt;
+ u32 reserved;
+};
+
+struct ffa_composite_mem_region {
+ /*
+ * The total number of 4 kiB pages included in this memory region. This
+ * must be equal to the sum of page counts specified in each
+ * `struct ffa_mem_region_addr_range`.
+ */
+ u32 total_pg_cnt;
+ /* The number of constituents included in this memory region range */
+ u32 addr_range_cnt;
+ u64 reserved;
+ /** An array of `addr_range_cnt` memory region constituents. */
+ struct ffa_mem_region_addr_range constituents[];
+};
+
+struct ffa_mem_region_attributes {
+ /* The ID of the VM to which the memory is being given or shared. */
+ u16 receiver;
+ /*
+ * The permissions with which the memory region should be mapped in the
+ * receiver's page table.
+ */
+#define FFA_MEM_EXEC BIT(3)
+#define FFA_MEM_NO_EXEC BIT(2)
+#define FFA_MEM_RW BIT(1)
+#define FFA_MEM_RO BIT(0)
+ u8 attrs;
+ /*
+ * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+ * for memory regions with multiple borrowers.
+ */
+#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
+ u8 flag;
+ u32 composite_off;
+ /*
+ * Offset in bytes from the start of the outer `ffa_memory_region` to
+ * an `struct ffa_mem_region_addr_range`.
+ */
+ u64 reserved;
+};
+
+struct ffa_mem_region {
+ /* The ID of the VM/owner which originally sent the memory region */
+ u16 sender_id;
+#define FFA_MEM_NORMAL BIT(5)
+#define FFA_MEM_DEVICE BIT(4)
+
+#define FFA_MEM_WRITE_BACK (3 << 2)
+#define FFA_MEM_NON_CACHEABLE (1 << 2)
+
+#define FFA_DEV_nGnRnE (0 << 2)
+#define FFA_DEV_nGnRE (1 << 2)
+#define FFA_DEV_nGRE (2 << 2)
+#define FFA_DEV_GRE (3 << 2)
+
+#define FFA_MEM_NON_SHAREABLE (0)
+#define FFA_MEM_OUTER_SHAREABLE (2)
+#define FFA_MEM_INNER_SHAREABLE (3)
+ u8 attributes;
+ u8 reserved_0;
+/*
+ * Clear memory region contents after unmapping it from the sender and
+ * before mapping it for any receiver.
+ */
+#define FFA_MEM_CLEAR BIT(0)
+/*
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_TIME_SLICE_ENABLE BIT(1)
+
+#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3)
+
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9)
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5)
+ /* Flags to control behaviour of the transaction. */
+ u32 flags;
+#define HANDLE_LOW_MASK GENMASK_ULL(31, 0)
+#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32)
+#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x))))
+#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x))))
+
+#define PACK_HANDLE(l, h) \
+ (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
+ /*
+ * A globally-unique ID assigned by the hypervisor for a region
+ * of memory being sent between VMs.
+ */
+ u64 handle;
+ /*
+ * An implementation defined value associated with the receiver and the
+ * memory region.
+ */
+ u64 tag;
+ u32 reserved_1;
+ /*
+ * The number of `ffa_mem_region_attributes` entries included in this
+ * transaction.
+ */
+ u32 ep_count;
+ /*
+ * An array of endpoint memory access descriptors.
+ * Each one specifies a memory region offset, an endpoint and the
+ * attributes with which this memory region should be mapped in that
+ * endpoint's page table.
+ */
+ struct ffa_mem_region_attributes ep_mem_access[];
+};
+
+#define COMPOSITE_OFFSET(x) \
+ (offsetof(struct ffa_mem_region, ep_mem_access[x]))
+#define CONSTITUENTS_OFFSET(x) \
+ (offsetof(struct ffa_composite_mem_region, constituents[x]))
+#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \
+ (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y))
+
+struct ffa_mem_ops_args {
+ bool use_txbuf;
+ u32 nattrs;
+ u32 flags;
+ u64 tag;
+ u64 g_handle;
+ struct scatterlist *sg;
+ struct ffa_mem_region_attributes *attrs;
+};
+
+struct ffa_info_ops {
+ u32 (*api_version_get)(void);
+ int (*partition_info_get)(const char *uuid_str,
+ struct ffa_partition_info *buffer);
+};
+
+struct ffa_msg_ops {
+ void (*mode_32bit_set)(struct ffa_device *dev);
+ int (*sync_send_receive)(struct ffa_device *dev,
+ struct ffa_send_direct_data *data);
+};
+
+struct ffa_mem_ops {
+ int (*memory_reclaim)(u64 g_handle, u32 flags);
+ int (*memory_share)(struct ffa_mem_ops_args *args);
+ int (*memory_lend)(struct ffa_mem_ops_args *args);
+};
+
+struct ffa_ops {
+ const struct ffa_info_ops *info_ops;
+ const struct ffa_msg_ops *msg_ops;
+ const struct ffa_mem_ops *mem_ops;
+};
+
+#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 0a241c5c911d..14dc461b0e82 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -46,9 +46,11 @@ int sdei_unregister_ghes(struct ghes *ghes);
/* For use by arch code when CPU hotplug notifiers are not appropriate. */
int sdei_mask_local_cpu(void);
int sdei_unmask_local_cpu(void);
+void __init sdei_init(void);
#else
static inline int sdei_mask_local_cpu(void) { return 0; }
static inline int sdei_unmask_local_cpu(void) { return 0; }
+static inline void sdei_init(void) { }
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/include/linux/armada-37xx-rwtm-mailbox.h b/include/linux/armada-37xx-rwtm-mailbox.h
index 57bb54f6767a..ef4bd705eb65 100644
--- a/include/linux/armada-37xx-rwtm-mailbox.h
+++ b/include/linux/armada-37xx-rwtm-mailbox.h
@@ -2,7 +2,7 @@
/*
* rWTM BIU Mailbox driver for Armada 37xx
*
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
*/
#ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
index 4cc40201273e..83ad775ad0aa 100644
--- a/include/linux/ascii85.h
+++ b/include/linux/ascii85.h
@@ -8,7 +8,8 @@
#ifndef _ASCII85_H_
#define _ASCII85_H_
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/types.h>
#define ASCII85_BUFSZ 6
diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h
new file mode 100644
index 000000000000..08cd0c2ad34f
--- /dev/null
+++ b/include/linux/asn1_encoder.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_ASN1_ENCODER_H
+#define _LINUX_ASN1_ENCODER_H
+
+#include <linux/types.h>
+#include <linux/asn1.h>
+#include <linux/asn1_ber_bytecode.h>
+#include <linux/bug.h>
+
+#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32))
+unsigned char *
+asn1_encode_integer(unsigned char *data, const unsigned char *end_data,
+ s64 integer);
+unsigned char *
+asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
+ u32 oid[], int oid_len);
+unsigned char *
+asn1_encode_tag(unsigned char *data, const unsigned char *end_data,
+ u32 tag, const unsigned char *string, int len);
+unsigned char *
+asn1_encode_octet_string(unsigned char *data,
+ const unsigned char *end_data,
+ const unsigned char *string, u32 len);
+unsigned char *
+asn1_encode_sequence(unsigned char *data, const unsigned char *end_data,
+ const unsigned char *seq, int len);
+unsigned char *
+asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
+ bool val);
+
+#endif
diff --git a/include/linux/async.h b/include/linux/async.h
index 0a17cd27f348..cce4ad31e8fc 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -112,7 +112,6 @@ async_schedule_dev_domain(async_func_t func, struct device *dev,
return async_schedule_node_domain(func, dev, dev_to_node(dev), domain);
}
-void async_unregister_domain(struct async_domain *domain);
extern void async_synchronize_full(void);
extern void async_synchronize_full_domain(struct async_domain *domain);
extern void async_synchronize_cookie(async_cookie_t cookie);
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 75e582b8d2d9..5cc73d7e5b52 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -36,7 +36,7 @@ struct dma_chan_ref {
/**
* async_tx_flags - modifiers for the async_* calls
* @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
- * the destination address is not a source. The asynchronous case handles this
+ * destination address is not a source. The asynchronous case handles this
* implicitly, the synchronous case needs to zero the destination block.
* @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
* also one of the source addresses. In the synchronous case the destination
@@ -163,11 +163,22 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
+async_xor_offs(struct page *dest, unsigned int offset,
+ struct page **src_list, unsigned int *src_offset,
+ int src_cnt, size_t len, struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum sum_check_flags *result,
struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
+async_xor_val_offs(struct page *dest, unsigned int offset,
+ struct page **src_list, unsigned int *src_offset,
+ int src_cnt, size_t len, enum sum_check_flags *result,
+ struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
unsigned int src_offset, size_t len,
struct async_submit_ctl *submit);
@@ -175,21 +186,23 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
+async_gen_syndrome(struct page **blocks, unsigned int *offsets, int src_cnt,
size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
+async_syndrome_val(struct page **blocks, unsigned int *offsets, int src_cnt,
size_t len, enum sum_check_flags *pqres, struct page *spare,
- struct async_submit_ctl *submit);
+ unsigned int s_off, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
- struct page **ptrs, struct async_submit_ctl *submit);
+ struct page **ptrs, unsigned int *offs,
+ struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_raid6_datap_recov(int src_num, size_t bytes, int faila,
- struct page **ptrs, struct async_submit_ctl *submit);
+ struct page **ptrs, unsigned int *offs,
+ struct async_submit_ctl *submit);
void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6e67aded28f8..e3050e153a71 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -13,7 +13,7 @@
#ifndef __LINUX_ATA_H__
#define __LINUX_ATA_H__
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
@@ -324,6 +324,7 @@ enum {
ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
ATA_LOG_IDENTIFY_DEVICE = 0x30,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device log pages: */
ATA_LOG_SECURITY = 0x06,
@@ -565,6 +566,18 @@ struct ata_bmdma_prd {
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 2)))
+#define ata_id_has_devslp(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)))
+#define ata_id_has_ncq_autosense(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)))
+#define ata_id_has_dipm(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3)))
#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
#define ata_id_u32(id,n) \
@@ -577,9 +590,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
-#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -591,17 +601,6 @@ static inline bool ata_id_has_hipm(const u16 *id)
return val & (1 << 9);
}
-static inline bool ata_id_has_dipm(const u16 *id)
-{
- u16 val = id[ATA_ID_FEATURE_SUPP];
-
- if (val == 0 || val == 0xffff)
- return false;
-
- return val & (1 << 3);
-}
-
-
static inline bool ata_id_has_fua(const u16 *id)
{
if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000)
@@ -770,16 +769,21 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15)))
+ return false;
+ if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_3] & BIT(6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!ata_id_has_sense_reporting(id))
+ return false;
+ /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */
+ if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_4] & BIT(6);
}
/**
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index f6034ba774be..a55bfc6567d0 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -113,7 +113,7 @@ extern int aarp_proto_init(void);
/* Inter module exports */
/* Give a device find its atif control structure */
-#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
+#if IS_ENABLED(CONFIG_ATALK)
static inline struct atalk_iface *atalk_find_dev(struct net_device *dev)
{
return dev->atalk_ptr;
diff --git a/include/linux/atm_suni.h b/include/linux/atm_suni.h
deleted file mode 100644
index 84f3aab54468..000000000000
--- a/include/linux/atm_suni.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by
- driver-specific utilities) */
-
-/* Written 1998,2000 by Werner Almesberger, EPFL ICA */
-
-
-#ifndef LINUX_ATM_SUNI_H
-#define LINUX_ATM_SUNI_H
-
-/* everything obsoleted */
-
-#endif
diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h
index c8ecf6f68fb5..2558439d849b 100644
--- a/include/linux/atm_tcp.h
+++ b/include/linux/atm_tcp.h
@@ -9,6 +9,8 @@
#include <uapi/linux/atm_tcp.h>
+struct atm_vcc;
+struct module;
struct atm_tcp_ops {
int (*attach)(struct atm_vcc *vcc,int itf);
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 8124815eb121..9b02961d65ee 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -151,7 +151,7 @@ struct atm_dev {
const char *type; /* device type name */
int number; /* device index */
void *dev_data; /* per-device data */
- void *phy_data; /* private PHY date */
+ void *phy_data; /* private PHY data */
unsigned long flags; /* device flags (ATM_DF_*) */
struct list_head local; /* local ATM addresses */
struct list_head lecs; /* LECS ATM addresses learned via ILMI */
@@ -176,11 +176,6 @@ struct atm_dev {
#define ATM_OF_IMMED 1 /* Attempt immediate delivery */
#define ATM_OF_INRATE 2 /* Attempt in-rate delivery */
-
-/*
- * ioctl, getsockopt, and setsockopt are optional and can be set to NULL.
- */
-
struct atmdev_ops { /* only send is required */
void (*dev_close)(struct atm_dev *dev);
int (*open)(struct atm_vcc *vcc);
@@ -190,11 +185,8 @@ struct atmdev_ops { /* only send is required */
int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
void __user *arg);
#endif
- int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,int optlen);
- int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,unsigned int optlen);
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
+ int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
void (*phy_put)(struct atm_dev *dev,unsigned char value,
unsigned long addr);
@@ -215,7 +207,7 @@ struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
unsigned int acct_truesize; /* truesize accounted to vcc */
-};
+} __packed;
#define VCC_HTABLE_SIZE 32
diff --git a/include/linux/atmel-isc-media.h b/include/linux/atmel-isc-media.h
new file mode 100644
index 000000000000..79a320fb724e
--- /dev/null
+++ b/include/linux/atmel-isc-media.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ */
+
+#ifndef __LINUX_ATMEL_ISC_MEDIA_H__
+#define __LINUX_ATMEL_ISC_MEDIA_H__
+
+/*
+ * There are 8 controls available:
+ * 4 gain controls, sliders, for each of the BAYER components: R, B, GR, GB.
+ * These gains are multipliers for each component, in format unsigned 0:4:9 with
+ * a default value of 512 (1.0 multiplier).
+ * 4 offset controls, sliders, for each of the BAYER components: R, B, GR, GB.
+ * These offsets are added/substracted from each component, in format signed
+ * 1:12:0 with a default value of 0 (+/- 0)
+ *
+ * To expose this to userspace, added 8 custom controls, in an auto cluster.
+ *
+ * To summarize the functionality:
+ * The auto cluster switch is the auto white balance control, and it works
+ * like this:
+ * AWB == 1: autowhitebalance is on, the do_white_balance button is inactive,
+ * the gains/offsets are inactive, but volatile and readable.
+ * Thus, the results of the whitebalance algorithm are available to userspace to
+ * read at any time.
+ * AWB == 0: autowhitebalance is off, cluster is in manual mode, user can
+ * configure the gain/offsets directly.
+ * More than that, if the do_white_balance button is
+ * pressed, the driver will perform one-time-adjustment, (preferably with color
+ * checker card) and the userspace can read again the new values.
+ *
+ * With this feature, the userspace can save the coefficients and reinstall them
+ * for example after reboot or reprobing the driver.
+ */
+
+enum atmel_isc_ctrl_id {
+ /* Red component gain control */
+ ISC_CID_R_GAIN = (V4L2_CID_USER_ATMEL_ISC_BASE + 0),
+ /* Blue component gain control */
+ ISC_CID_B_GAIN,
+ /* Green Red component gain control */
+ ISC_CID_GR_GAIN,
+ /* Green Blue gain control */
+ ISC_CID_GB_GAIN,
+ /* Red component offset control */
+ ISC_CID_R_OFFSET,
+ /* Blue component offset control */
+ ISC_CID_B_OFFSET,
+ /* Green Red component offset control */
+ ISC_CID_GR_OFFSET,
+ /* Green Blue component offset control */
+ ISC_CID_GB_OFFSET,
+};
+
+#endif
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
deleted file mode 100644
index a7d240e465c0..000000000000
--- a/include/linux/atomic-fallback.h
+++ /dev/null
@@ -1,2295 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-fallback.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _LINUX_ATOMIC_FALLBACK_H
-#define _LINUX_ATOMIC_FALLBACK_H
-
-#ifndef xchg_relaxed
-#define xchg_relaxed xchg
-#define xchg_acquire xchg
-#define xchg_release xchg
-#else /* xchg_relaxed */
-
-#ifndef xchg_acquire
-#define xchg_acquire(...) \
- __atomic_op_acquire(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg_release
-#define xchg_release(...) \
- __atomic_op_release(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg
-#define xchg(...) \
- __atomic_op_fence(xchg, __VA_ARGS__)
-#endif
-
-#endif /* xchg_relaxed */
-
-#ifndef cmpxchg_relaxed
-#define cmpxchg_relaxed cmpxchg
-#define cmpxchg_acquire cmpxchg
-#define cmpxchg_release cmpxchg
-#else /* cmpxchg_relaxed */
-
-#ifndef cmpxchg_acquire
-#define cmpxchg_acquire(...) \
- __atomic_op_acquire(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg_release
-#define cmpxchg_release(...) \
- __atomic_op_release(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg
-#define cmpxchg(...) \
- __atomic_op_fence(cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg_relaxed */
-
-#ifndef cmpxchg64_relaxed
-#define cmpxchg64_relaxed cmpxchg64
-#define cmpxchg64_acquire cmpxchg64
-#define cmpxchg64_release cmpxchg64
-#else /* cmpxchg64_relaxed */
-
-#ifndef cmpxchg64_acquire
-#define cmpxchg64_acquire(...) \
- __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64_release
-#define cmpxchg64_release(...) \
- __atomic_op_release(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64
-#define cmpxchg64(...) \
- __atomic_op_fence(cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg64_relaxed */
-
-#ifndef atomic_read_acquire
-static inline int
-atomic_read_acquire(const atomic_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic_read_acquire atomic_read_acquire
-#endif
-
-#ifndef atomic_set_release
-static inline void
-atomic_set_release(atomic_t *v, int i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic_set_release atomic_set_release
-#endif
-
-#ifndef atomic_add_return_relaxed
-#define atomic_add_return_acquire atomic_add_return
-#define atomic_add_return_release atomic_add_return
-#define atomic_add_return_relaxed atomic_add_return
-#else /* atomic_add_return_relaxed */
-
-#ifndef atomic_add_return_acquire
-static inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-
-#ifndef atomic_add_return_release
-static inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_add_return_relaxed(i, v);
-}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-
-#ifndef atomic_add_return
-static inline int
-atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_add_return atomic_add_return
-#endif
-
-#endif /* atomic_add_return_relaxed */
-
-#ifndef atomic_fetch_add_relaxed
-#define atomic_fetch_add_acquire atomic_fetch_add
-#define atomic_fetch_add_release atomic_fetch_add
-#define atomic_fetch_add_relaxed atomic_fetch_add
-#else /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_fetch_add_acquire
-static inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-
-#ifndef atomic_fetch_add_release
-static inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_add_relaxed(i, v);
-}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-
-#ifndef atomic_fetch_add
-static inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-
-#endif /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_sub_return_relaxed
-#define atomic_sub_return_acquire atomic_sub_return
-#define atomic_sub_return_release atomic_sub_return
-#define atomic_sub_return_relaxed atomic_sub_return
-#else /* atomic_sub_return_relaxed */
-
-#ifndef atomic_sub_return_acquire
-static inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-
-#ifndef atomic_sub_return_release
-static inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_sub_return_relaxed(i, v);
-}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-
-#ifndef atomic_sub_return
-static inline int
-atomic_sub_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_sub_return atomic_sub_return
-#endif
-
-#endif /* atomic_sub_return_relaxed */
-
-#ifndef atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_acquire atomic_fetch_sub
-#define atomic_fetch_sub_release atomic_fetch_sub
-#define atomic_fetch_sub_relaxed atomic_fetch_sub
-#else /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_fetch_sub_acquire
-static inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-
-#ifndef atomic_fetch_sub_release
-static inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_sub_relaxed(i, v);
-}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-
-#ifndef atomic_fetch_sub
-static inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-
-#endif /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_inc
-static inline void
-atomic_inc(atomic_t *v)
-{
- atomic_add(1, v);
-}
-#define atomic_inc atomic_inc
-#endif
-
-#ifndef atomic_inc_return_relaxed
-#ifdef atomic_inc_return
-#define atomic_inc_return_acquire atomic_inc_return
-#define atomic_inc_return_release atomic_inc_return
-#define atomic_inc_return_relaxed atomic_inc_return
-#endif /* atomic_inc_return */
-
-#ifndef atomic_inc_return
-static inline int
-atomic_inc_return(atomic_t *v)
-{
- return atomic_add_return(1, v);
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#ifndef atomic_inc_return_acquire
-static inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- return atomic_add_return_acquire(1, v);
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static inline int
-atomic_inc_return_release(atomic_t *v)
-{
- return atomic_add_return_release(1, v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return_relaxed
-static inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
- return atomic_add_return_relaxed(1, v);
-}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-
-#else /* atomic_inc_return_relaxed */
-
-#ifndef atomic_inc_return_acquire
-static inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- int ret = atomic_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static inline int
-atomic_inc_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_inc_return_relaxed(v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return
-static inline int
-atomic_inc_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#endif /* atomic_inc_return_relaxed */
-
-#ifndef atomic_fetch_inc_relaxed
-#ifdef atomic_fetch_inc
-#define atomic_fetch_inc_acquire atomic_fetch_inc
-#define atomic_fetch_inc_release atomic_fetch_inc
-#define atomic_fetch_inc_relaxed atomic_fetch_inc
-#endif /* atomic_fetch_inc */
-
-#ifndef atomic_fetch_inc
-static inline int
-atomic_fetch_inc(atomic_t *v)
-{
- return atomic_fetch_add(1, v);
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#ifndef atomic_fetch_inc_acquire
-static inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- return atomic_fetch_add_acquire(1, v);
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- return atomic_fetch_add_release(1, v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc_relaxed
-static inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
- return atomic_fetch_add_relaxed(1, v);
-}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-
-#else /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_fetch_inc_acquire
-static inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_inc_relaxed(v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc
-static inline int
-atomic_fetch_inc(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#endif /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_dec
-static inline void
-atomic_dec(atomic_t *v)
-{
- atomic_sub(1, v);
-}
-#define atomic_dec atomic_dec
-#endif
-
-#ifndef atomic_dec_return_relaxed
-#ifdef atomic_dec_return
-#define atomic_dec_return_acquire atomic_dec_return
-#define atomic_dec_return_release atomic_dec_return
-#define atomic_dec_return_relaxed atomic_dec_return
-#endif /* atomic_dec_return */
-
-#ifndef atomic_dec_return
-static inline int
-atomic_dec_return(atomic_t *v)
-{
- return atomic_sub_return(1, v);
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#ifndef atomic_dec_return_acquire
-static inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- return atomic_sub_return_acquire(1, v);
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static inline int
-atomic_dec_return_release(atomic_t *v)
-{
- return atomic_sub_return_release(1, v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return_relaxed
-static inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
- return atomic_sub_return_relaxed(1, v);
-}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-
-#else /* atomic_dec_return_relaxed */
-
-#ifndef atomic_dec_return_acquire
-static inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- int ret = atomic_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static inline int
-atomic_dec_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_dec_return_relaxed(v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return
-static inline int
-atomic_dec_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#endif /* atomic_dec_return_relaxed */
-
-#ifndef atomic_fetch_dec_relaxed
-#ifdef atomic_fetch_dec
-#define atomic_fetch_dec_acquire atomic_fetch_dec
-#define atomic_fetch_dec_release atomic_fetch_dec
-#define atomic_fetch_dec_relaxed atomic_fetch_dec
-#endif /* atomic_fetch_dec */
-
-#ifndef atomic_fetch_dec
-static inline int
-atomic_fetch_dec(atomic_t *v)
-{
- return atomic_fetch_sub(1, v);
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#ifndef atomic_fetch_dec_acquire
-static inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- return atomic_fetch_sub_acquire(1, v);
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- return atomic_fetch_sub_release(1, v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec_relaxed
-static inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
- return atomic_fetch_sub_relaxed(1, v);
-}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
-
-#else /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_dec_acquire
-static inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_dec_relaxed(v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec
-static inline int
-atomic_fetch_dec(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#endif /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_and_relaxed
-#define atomic_fetch_and_acquire atomic_fetch_and
-#define atomic_fetch_and_release atomic_fetch_and
-#define atomic_fetch_and_relaxed atomic_fetch_and
-#else /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_fetch_and_acquire
-static inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-
-#ifndef atomic_fetch_and_release
-static inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_and_relaxed(i, v);
-}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-
-#ifndef atomic_fetch_and
-static inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-
-#endif /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_andnot
-static inline void
-atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-#define atomic_andnot atomic_andnot
-#endif
-
-#ifndef atomic_fetch_andnot_relaxed
-#ifdef atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#endif /* atomic_fetch_andnot */
-
-#ifndef atomic_fetch_andnot
-static inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#ifndef atomic_fetch_andnot_acquire
-static inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return atomic_fetch_and_acquire(~i, v);
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- return atomic_fetch_and_release(~i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot_relaxed
-static inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return atomic_fetch_and_relaxed(~i, v);
-}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
-
-#else /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_andnot_acquire
-static inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_andnot_relaxed(i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot
-static inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#endif /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_or_relaxed
-#define atomic_fetch_or_acquire atomic_fetch_or
-#define atomic_fetch_or_release atomic_fetch_or
-#define atomic_fetch_or_relaxed atomic_fetch_or
-#else /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_or_acquire
-static inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-
-#ifndef atomic_fetch_or_release
-static inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_or_relaxed(i, v);
-}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-
-#ifndef atomic_fetch_or
-static inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-
-#endif /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_acquire atomic_fetch_xor
-#define atomic_fetch_xor_release atomic_fetch_xor
-#define atomic_fetch_xor_relaxed atomic_fetch_xor
-#else /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_fetch_xor_acquire
-static inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-
-#ifndef atomic_fetch_xor_release
-static inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_xor_relaxed(i, v);
-}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-
-#ifndef atomic_fetch_xor
-static inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-
-#endif /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg
-#define atomic_xchg_release atomic_xchg
-#define atomic_xchg_relaxed atomic_xchg
-#else /* atomic_xchg_relaxed */
-
-#ifndef atomic_xchg_acquire
-static inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
- int ret = atomic_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-
-#ifndef atomic_xchg_release
-static inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
- __atomic_release_fence();
- return atomic_xchg_relaxed(v, i);
-}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-
-#ifndef atomic_xchg
-static inline int
-atomic_xchg(atomic_t *v, int i)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_xchg atomic_xchg
-#endif
-
-#endif /* atomic_xchg_relaxed */
-
-#ifndef atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg
-#define atomic_cmpxchg_release atomic_cmpxchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg
-#else /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_cmpxchg_acquire
-static inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- int ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-
-#ifndef atomic_cmpxchg_release
-static inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- __atomic_release_fence();
- return atomic_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-
-#ifndef atomic_cmpxchg
-static inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-
-#endif /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg_relaxed
-#ifdef atomic_try_cmpxchg
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
-#endif /* atomic_try_cmpxchg */
-
-#ifndef atomic_try_cmpxchg
-static inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#ifndef atomic_try_cmpxchg_acquire
-static inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg_relaxed
-static inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic_try_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg_acquire
-static inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- __atomic_release_fence();
- return atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg
-static inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#endif /* atomic_try_cmpxchg_relaxed */
-
-#ifndef atomic_sub_and_test
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
- return atomic_sub_return(i, v) == 0;
-}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-
-#ifndef atomic_dec_and_test
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static inline bool
-atomic_dec_and_test(atomic_t *v)
-{
- return atomic_dec_return(v) == 0;
-}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-
-#ifndef atomic_inc_and_test
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static inline bool
-atomic_inc_and_test(atomic_t *v)
-{
- return atomic_inc_return(v) == 0;
-}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-
-#ifndef atomic_add_negative
-/**
- * atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-#define atomic_add_negative atomic_add_negative
-#endif
-
-#ifndef atomic_fetch_add_unless
-/**
- * atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-
-#ifndef atomic_add_unless
-/**
- * atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
- return atomic_fetch_add_unless(v, a, u) != u;
-}
-#define atomic_add_unless atomic_add_unless
-#endif
-
-#ifndef atomic_inc_not_zero
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
- return atomic_add_unless(v, 1, 0);
-}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-
-#ifndef atomic_inc_unless_negative
-static inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-
-#ifndef atomic_dec_unless_positive
-static inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-
-#ifndef atomic_dec_if_positive
-static inline int
-atomic_dec_if_positive(atomic_t *v)
-{
- int dec, c = atomic_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
-
-#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#ifndef atomic64_read_acquire
-static inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
-
-#ifndef atomic64_set_release
-static inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic64_set_release atomic64_set_release
-#endif
-
-#ifndef atomic64_add_return_relaxed
-#define atomic64_add_return_acquire atomic64_add_return
-#define atomic64_add_return_release atomic64_add_return
-#define atomic64_add_return_relaxed atomic64_add_return
-#else /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_add_return_acquire
-static inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-
-#ifndef atomic64_add_return_release
-static inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_add_return_relaxed(i, v);
-}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-
-#ifndef atomic64_add_return
-static inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_add_return atomic64_add_return
-#endif
-
-#endif /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_acquire atomic64_fetch_add
-#define atomic64_fetch_add_release atomic64_fetch_add
-#define atomic64_fetch_add_relaxed atomic64_fetch_add
-#else /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_fetch_add_acquire
-static inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-
-#ifndef atomic64_fetch_add_release
-static inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_add_relaxed(i, v);
-}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-
-#ifndef atomic64_fetch_add
-static inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-
-#endif /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_sub_return_relaxed
-#define atomic64_sub_return_acquire atomic64_sub_return
-#define atomic64_sub_return_release atomic64_sub_return
-#define atomic64_sub_return_relaxed atomic64_sub_return
-#else /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_sub_return_acquire
-static inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-
-#ifndef atomic64_sub_return_release
-static inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_sub_return_relaxed(i, v);
-}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-
-#ifndef atomic64_sub_return
-static inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-
-#endif /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub
-#define atomic64_fetch_sub_release atomic64_fetch_sub
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
-#else /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_fetch_sub_acquire
-static inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-
-#ifndef atomic64_fetch_sub_release
-static inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_sub_relaxed(i, v);
-}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-
-#ifndef atomic64_fetch_sub
-static inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-
-#endif /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_inc
-static inline void
-atomic64_inc(atomic64_t *v)
-{
- atomic64_add(1, v);
-}
-#define atomic64_inc atomic64_inc
-#endif
-
-#ifndef atomic64_inc_return_relaxed
-#ifdef atomic64_inc_return
-#define atomic64_inc_return_acquire atomic64_inc_return
-#define atomic64_inc_return_release atomic64_inc_return
-#define atomic64_inc_return_relaxed atomic64_inc_return
-#endif /* atomic64_inc_return */
-
-#ifndef atomic64_inc_return
-static inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- return atomic64_add_return(1, v);
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#ifndef atomic64_inc_return_acquire
-static inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- return atomic64_add_return_acquire(1, v);
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- return atomic64_add_return_release(1, v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return_relaxed
-static inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
- return atomic64_add_return_relaxed(1, v);
-}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-
-#else /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_inc_return_acquire
-static inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_inc_return_relaxed(v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return
-static inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#endif /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_fetch_inc_relaxed
-#ifdef atomic64_fetch_inc
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc
-#define atomic64_fetch_inc_release atomic64_fetch_inc
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
-#endif /* atomic64_fetch_inc */
-
-#ifndef atomic64_fetch_inc
-static inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- return atomic64_fetch_add(1, v);
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#ifndef atomic64_fetch_inc_acquire
-static inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- return atomic64_fetch_add_acquire(1, v);
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- return atomic64_fetch_add_release(1, v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc_relaxed
-static inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_add_relaxed(1, v);
-}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-
-#else /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_fetch_inc_acquire
-static inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_inc_relaxed(v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc
-static inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#endif /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_dec
-static inline void
-atomic64_dec(atomic64_t *v)
-{
- atomic64_sub(1, v);
-}
-#define atomic64_dec atomic64_dec
-#endif
-
-#ifndef atomic64_dec_return_relaxed
-#ifdef atomic64_dec_return
-#define atomic64_dec_return_acquire atomic64_dec_return
-#define atomic64_dec_return_release atomic64_dec_return
-#define atomic64_dec_return_relaxed atomic64_dec_return
-#endif /* atomic64_dec_return */
-
-#ifndef atomic64_dec_return
-static inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- return atomic64_sub_return(1, v);
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#ifndef atomic64_dec_return_acquire
-static inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- return atomic64_sub_return_acquire(1, v);
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- return atomic64_sub_return_release(1, v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return_relaxed
-static inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
- return atomic64_sub_return_relaxed(1, v);
-}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-
-#else /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_dec_return_acquire
-static inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_dec_return_relaxed(v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return
-static inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#endif /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_fetch_dec_relaxed
-#ifdef atomic64_fetch_dec
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec
-#define atomic64_fetch_dec_release atomic64_fetch_dec
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
-#endif /* atomic64_fetch_dec */
-
-#ifndef atomic64_fetch_dec
-static inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- return atomic64_fetch_sub(1, v);
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#ifndef atomic64_fetch_dec_acquire
-static inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- return atomic64_fetch_sub_acquire(1, v);
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- return atomic64_fetch_sub_release(1, v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec_relaxed
-static inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_sub_relaxed(1, v);
-}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
-
-#else /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_dec_acquire
-static inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_dec_relaxed(v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec
-static inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#endif /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_acquire atomic64_fetch_and
-#define atomic64_fetch_and_release atomic64_fetch_and
-#define atomic64_fetch_and_relaxed atomic64_fetch_and
-#else /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_fetch_and_acquire
-static inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-
-#ifndef atomic64_fetch_and_release
-static inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_and_relaxed(i, v);
-}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-
-#ifndef atomic64_fetch_and
-static inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-
-#endif /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_andnot
-static inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
- atomic64_and(~i, v);
-}
-#define atomic64_andnot atomic64_andnot
-#endif
-
-#ifndef atomic64_fetch_andnot_relaxed
-#ifdef atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#endif /* atomic64_fetch_andnot */
-
-#ifndef atomic64_fetch_andnot
-static inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and(~i, v);
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#ifndef atomic64_fetch_andnot_acquire
-static inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_acquire(~i, v);
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_release(~i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot_relaxed
-static inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_relaxed(~i, v);
-}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_andnot_acquire
-static inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_andnot_relaxed(i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot
-static inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#endif /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_acquire atomic64_fetch_or
-#define atomic64_fetch_or_release atomic64_fetch_or
-#define atomic64_fetch_or_relaxed atomic64_fetch_or
-#else /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_or_acquire
-static inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-
-#ifndef atomic64_fetch_or_release
-static inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_or_relaxed(i, v);
-}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-
-#ifndef atomic64_fetch_or
-static inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-
-#endif /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor
-#define atomic64_fetch_xor_release atomic64_fetch_xor
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
-#else /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_fetch_xor_acquire
-static inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-
-#ifndef atomic64_fetch_xor_release
-static inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_xor_relaxed(i, v);
-}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-
-#ifndef atomic64_fetch_xor
-static inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-
-#endif /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_xchg_relaxed
-#define atomic64_xchg_acquire atomic64_xchg
-#define atomic64_xchg_release atomic64_xchg
-#define atomic64_xchg_relaxed atomic64_xchg
-#else /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_xchg_acquire
-static inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- s64 ret = atomic64_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-
-#ifndef atomic64_xchg_release
-static inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- __atomic_release_fence();
- return atomic64_xchg_relaxed(v, i);
-}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-
-#ifndef atomic64_xchg
-static inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_xchg atomic64_xchg
-#endif
-
-#endif /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_cmpxchg_relaxed
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg
-#define atomic64_cmpxchg_release atomic64_cmpxchg
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
-#else /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_cmpxchg_acquire
-static inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_cmpxchg_release
-static inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-
-#ifndef atomic64_cmpxchg
-static inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-
-#endif /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg_relaxed
-#ifdef atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
-#endif /* atomic64_try_cmpxchg */
-
-#ifndef atomic64_try_cmpxchg
-static inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg_relaxed
-static inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic64_try_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg
-static inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#endif /* atomic64_try_cmpxchg_relaxed */
-
-#ifndef atomic64_sub_and_test
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- return atomic64_sub_return(i, v) == 0;
-}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-
-#ifndef atomic64_dec_and_test
-/**
- * atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
- return atomic64_dec_return(v) == 0;
-}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-
-#ifndef atomic64_inc_and_test
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
- return atomic64_inc_return(v) == 0;
-}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-
-#ifndef atomic64_add_negative
-/**
- * atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-
-#ifndef atomic64_fetch_add_unless
-/**
- * atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-
-#ifndef atomic64_add_unless
-/**
- * atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- return atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-
-#ifndef atomic64_inc_not_zero
-/**
- * atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
- return atomic64_add_unless(v, 1, 0);
-}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-
-#ifndef atomic64_inc_unless_negative
-static inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-
-#ifndef atomic64_dec_unless_positive
-static inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-
-#ifndef atomic64_dec_if_positive
-static inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = atomic64_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-
-#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
-#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 25de4a2804d70f57e994fe3b419148658bb5378a
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 4c0d009a46f0..8dd57c3a99e9 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -25,6 +25,12 @@
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
@@ -71,8 +77,8 @@
__ret; \
})
-#include <linux/atomic-fallback.h>
-
-#include <asm-generic/atomic-long.h>
+#include <linux/atomic/atomic-arch-fallback.h>
+#include <linux/atomic/atomic-long.h>
+#include <linux/atomic/atomic-instrumented.h>
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
new file mode 100644
index 000000000000..77bc5522e61c
--- /dev/null
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -0,0 +1,2459 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#ifndef arch_xchg_relaxed
+#define arch_xchg_acquire arch_xchg
+#define arch_xchg_release arch_xchg
+#define arch_xchg_relaxed arch_xchg
+#else /* arch_xchg_relaxed */
+
+#ifndef arch_xchg_acquire
+#define arch_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg_release
+#define arch_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg
+#define arch_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_xchg_relaxed */
+
+#ifndef arch_cmpxchg_relaxed
+#define arch_cmpxchg_acquire arch_cmpxchg
+#define arch_cmpxchg_release arch_cmpxchg
+#define arch_cmpxchg_relaxed arch_cmpxchg
+#else /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg_acquire
+#define arch_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg_release
+#define arch_cmpxchg_release(...) \
+ __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg
+#define arch_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg64_relaxed
+#define arch_cmpxchg64_acquire arch_cmpxchg64
+#define arch_cmpxchg64_release arch_cmpxchg64
+#define arch_cmpxchg64_relaxed arch_cmpxchg64
+#else /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_cmpxchg64_acquire
+#define arch_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64_release
+#define arch_cmpxchg64_release(...) \
+ __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64
+#define arch_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_try_cmpxchg_relaxed
+#ifdef arch_try_cmpxchg
+#define arch_try_cmpxchg_acquire arch_try_cmpxchg
+#define arch_try_cmpxchg_release arch_try_cmpxchg
+#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_acquire */
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_release */
+
+#ifndef arch_try_cmpxchg_relaxed
+#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_relaxed */
+
+#else /* arch_try_cmpxchg_relaxed */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(...) \
+ __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(...) \
+ __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_try_cmpxchg_relaxed */
+
+#ifndef arch_try_cmpxchg64_relaxed
+#ifdef arch_try_cmpxchg64
+#define arch_try_cmpxchg64_acquire arch_try_cmpxchg64
+#define arch_try_cmpxchg64_release arch_try_cmpxchg64
+#define arch_try_cmpxchg64_relaxed arch_try_cmpxchg64
+#endif /* arch_try_cmpxchg64 */
+
+#ifndef arch_try_cmpxchg64
+#define arch_try_cmpxchg64(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg64((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg64 */
+
+#ifndef arch_try_cmpxchg64_acquire
+#define arch_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg64_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg64_acquire */
+
+#ifndef arch_try_cmpxchg64_release
+#define arch_try_cmpxchg64_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg64_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg64_release */
+
+#ifndef arch_try_cmpxchg64_relaxed
+#define arch_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg64_relaxed */
+
+#else /* arch_try_cmpxchg64_relaxed */
+
+#ifndef arch_try_cmpxchg64_acquire
+#define arch_try_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg64_release
+#define arch_try_cmpxchg64_release(...) \
+ __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg64
+#define arch_try_cmpxchg64(...) \
+ __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* arch_try_cmpxchg64_relaxed */
+
+#ifndef arch_atomic_read_acquire
+static __always_inline int
+arch_atomic_read_acquire(const atomic_t *v)
+{
+ int ret;
+
+ if (__native_word(atomic_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = arch_atomic_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+}
+#define arch_atomic_read_acquire arch_atomic_read_acquire
+#endif
+
+#ifndef arch_atomic_set_release
+static __always_inline void
+arch_atomic_set_release(atomic_t *v, int i)
+{
+ if (__native_word(atomic_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ arch_atomic_set(v, i);
+ }
+}
+#define arch_atomic_set_release arch_atomic_set_release
+#endif
+
+#ifndef arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return
+#else /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_add_return_acquire
+static __always_inline int
+arch_atomic_add_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#endif
+
+#ifndef arch_atomic_add_return_release
+static __always_inline int
+arch_atomic_add_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_add_return_relaxed(i, v);
+}
+#define arch_atomic_add_return_release arch_atomic_add_return_release
+#endif
+
+#ifndef arch_atomic_add_return
+static __always_inline int
+arch_atomic_add_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_add_return arch_atomic_add_return
+#endif
+
+#endif /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
+#else /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_fetch_add_acquire
+static __always_inline int
+arch_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic_fetch_add_release
+static __always_inline int
+arch_atomic_fetch_add_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+#endif
+
+#ifndef arch_atomic_fetch_add
+static __always_inline int
+arch_atomic_fetch_add(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#endif
+
+#endif /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
+#else /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_sub_return_acquire
+static __always_inline int
+arch_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#endif
+
+#ifndef arch_atomic_sub_return_release
+static __always_inline int
+arch_atomic_sub_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+#endif
+
+#ifndef arch_atomic_sub_return
+static __always_inline int
+arch_atomic_sub_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_sub_return arch_atomic_sub_return
+#endif
+
+#endif /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
+#else /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_fetch_sub_acquire
+static __always_inline int
+arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic_fetch_sub_release
+static __always_inline int
+arch_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+#endif
+
+#ifndef arch_atomic_fetch_sub
+static __always_inline int
+arch_atomic_fetch_sub(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#endif
+
+#endif /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_inc
+static __always_inline void
+arch_atomic_inc(atomic_t *v)
+{
+ arch_atomic_add(1, v);
+}
+#define arch_atomic_inc arch_atomic_inc
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+#ifdef arch_atomic_inc_return
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return
+#define arch_atomic_inc_return_release arch_atomic_inc_return
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
+#endif /* arch_atomic_inc_return */
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ return arch_atomic_add_return(1, v);
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ return arch_atomic_add_return_acquire(1, v);
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ return arch_atomic_add_return_release(1, v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+static __always_inline int
+arch_atomic_inc_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_add_return_relaxed(1, v);
+}
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#endif
+
+#else /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_inc_return_relaxed(v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#endif /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_fetch_inc_relaxed
+#ifdef arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
+#endif /* arch_atomic_fetch_inc */
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ return arch_atomic_fetch_add(1, v);
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_add_acquire(1, v);
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ return arch_atomic_fetch_add_release(1, v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc_relaxed
+static __always_inline int
+arch_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_add_relaxed(1, v);
+}
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#endif /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_dec
+static __always_inline void
+arch_atomic_dec(atomic_t *v)
+{
+ arch_atomic_sub(1, v);
+}
+#define arch_atomic_dec arch_atomic_dec
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+#ifdef arch_atomic_dec_return
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return
+#define arch_atomic_dec_return_release arch_atomic_dec_return
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
+#endif /* arch_atomic_dec_return */
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ return arch_atomic_sub_return(1, v);
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ return arch_atomic_sub_return_acquire(1, v);
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ return arch_atomic_sub_return_release(1, v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+static __always_inline int
+arch_atomic_dec_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_sub_return_relaxed(1, v);
+}
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
+#endif
+
+#else /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_dec_return_relaxed(v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#endif /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_fetch_dec_relaxed
+#ifdef arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
+#endif /* arch_atomic_fetch_dec */
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ return arch_atomic_fetch_sub(1, v);
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_acquire(1, v);
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_release(1, v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec_relaxed
+static __always_inline int
+arch_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#endif /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
+#else /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_fetch_and_acquire
+static __always_inline int
+arch_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic_fetch_and_release
+static __always_inline int
+arch_atomic_fetch_and_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_and_relaxed(i, v);
+}
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#endif
+
+#ifndef arch_atomic_fetch_and
+static __always_inline int
+arch_atomic_fetch_and(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#endif
+
+#endif /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_andnot
+static __always_inline void
+arch_atomic_andnot(int i, atomic_t *v)
+{
+ arch_atomic_and(~i, v);
+}
+#define arch_atomic_andnot arch_atomic_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+#ifdef arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
+#endif /* arch_atomic_fetch_andnot */
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and(~i, v);
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_acquire(~i, v);
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_release(~i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+static __always_inline int
+arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#endif /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
+#else /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_or_acquire
+static __always_inline int
+arch_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic_fetch_or_release
+static __always_inline int
+arch_atomic_fetch_or_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_or_relaxed(i, v);
+}
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+#endif
+
+#ifndef arch_atomic_fetch_or
+static __always_inline int
+arch_atomic_fetch_or(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#endif
+
+#endif /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
+#else /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_fetch_xor_acquire
+static __always_inline int
+arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic_fetch_xor_release
+static __always_inline int
+arch_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+#endif
+
+#ifndef arch_atomic_fetch_xor
+static __always_inline int
+arch_atomic_fetch_xor(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#endif
+
+#endif /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg
+#define arch_atomic_xchg_release arch_atomic_xchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg
+#else /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_xchg_acquire
+static __always_inline int
+arch_atomic_xchg_acquire(atomic_t *v, int i)
+{
+ int ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#endif
+
+#ifndef arch_atomic_xchg_release
+static __always_inline int
+arch_atomic_xchg_release(atomic_t *v, int i)
+{
+ __atomic_release_fence();
+ return arch_atomic_xchg_relaxed(v, i);
+}
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#endif
+
+#ifndef arch_atomic_xchg
+static __always_inline int
+arch_atomic_xchg(atomic_t *v, int i)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_xchg arch_atomic_xchg
+#endif
+
+#endif /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
+#else /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_acquire
+static __always_inline int
+arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_cmpxchg_release
+static __always_inline int
+arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_cmpxchg
+static __always_inline int
+arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+#endif
+
+#endif /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+#ifdef arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
+#endif /* arch_atomic_try_cmpxchg */
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#endif /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_sub_and_test
+/**
+ * arch_atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_sub_and_test(int i, atomic_t *v)
+{
+ return arch_atomic_sub_return(i, v) == 0;
+}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
+#endif
+
+#ifndef arch_atomic_dec_and_test
+/**
+ * arch_atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic_dec_and_test(atomic_t *v)
+{
+ return arch_atomic_dec_return(v) == 0;
+}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
+#endif
+
+#ifndef arch_atomic_inc_and_test
+/**
+ * arch_atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_inc_and_test(atomic_t *v)
+{
+ return arch_atomic_inc_return(v) == 0;
+}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
+#endif
+
+#ifndef arch_atomic_add_negative
+/**
+ * arch_atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative(int i, atomic_t *v)
+{
+ return arch_atomic_add_return(i, v) < 0;
+}
+#define arch_atomic_add_negative arch_atomic_add_negative
+#endif
+
+#ifndef arch_atomic_fetch_add_unless
+/**
+ * arch_atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline int
+arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+#endif
+
+#ifndef arch_atomic_add_unless
+/**
+ * arch_atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic_add_unless(atomic_t *v, int a, int u)
+{
+ return arch_atomic_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic_add_unless arch_atomic_add_unless
+#endif
+
+#ifndef arch_atomic_inc_not_zero
+/**
+ * arch_atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic_inc_not_zero(atomic_t *v)
+{
+ return arch_atomic_add_unless(v, 1, 0);
+}
+#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
+#endif
+
+#ifndef arch_atomic_inc_unless_negative
+static __always_inline bool
+arch_atomic_inc_unless_negative(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#endif
+
+#ifndef arch_atomic_dec_unless_positive
+static __always_inline bool
+arch_atomic_dec_unless_positive(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#endif
+
+#ifndef arch_atomic_dec_if_positive
+static __always_inline int
+arch_atomic_dec_if_positive(atomic_t *v)
+{
+ int dec, c = arch_atomic_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+#endif
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+#ifndef arch_atomic64_read_acquire
+static __always_inline s64
+arch_atomic64_read_acquire(const atomic64_t *v)
+{
+ s64 ret;
+
+ if (__native_word(atomic64_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = arch_atomic64_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+}
+#define arch_atomic64_read_acquire arch_atomic64_read_acquire
+#endif
+
+#ifndef arch_atomic64_set_release
+static __always_inline void
+arch_atomic64_set_release(atomic64_t *v, s64 i)
+{
+ if (__native_word(atomic64_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ arch_atomic64_set(v, i);
+ }
+}
+#define arch_atomic64_set_release arch_atomic64_set_release
+#endif
+
+#ifndef arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
+#else /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_add_return_acquire
+static __always_inline s64
+arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
+#endif
+
+#ifndef arch_atomic64_add_return_release
+static __always_inline s64
+arch_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_add_return_relaxed(i, v);
+}
+#define arch_atomic64_add_return_release arch_atomic64_add_return_release
+#endif
+
+#ifndef arch_atomic64_add_return
+static __always_inline s64
+arch_atomic64_add_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_add_return arch_atomic64_add_return
+#endif
+
+#endif /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
+#else /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_fetch_add_acquire
+static __always_inline s64
+arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_add_release
+static __always_inline s64
+arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+#endif
+
+#ifndef arch_atomic64_fetch_add
+static __always_inline s64
+arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#endif
+
+#endif /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
+#else /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_sub_return_acquire
+static __always_inline s64
+arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#endif
+
+#ifndef arch_atomic64_sub_return_release
+static __always_inline s64
+arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+#endif
+
+#ifndef arch_atomic64_sub_return
+static __always_inline s64
+arch_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#endif
+
+#endif /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
+#else /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_acquire
+static __always_inline s64
+arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_sub_release
+static __always_inline s64
+arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+#endif
+
+#ifndef arch_atomic64_fetch_sub
+static __always_inline s64
+arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#endif
+
+#endif /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_inc
+static __always_inline void
+arch_atomic64_inc(atomic64_t *v)
+{
+ arch_atomic64_add(1, v);
+}
+#define arch_atomic64_inc arch_atomic64_inc
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+#ifdef arch_atomic64_inc_return
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
+#endif /* arch_atomic64_inc_return */
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ return arch_atomic64_add_return(1, v);
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_add_return_acquire(1, v);
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ return arch_atomic64_add_return_release(1, v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+static __always_inline s64
+arch_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_add_return_relaxed(1, v);
+}
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#endif
+
+#else /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_inc_return_relaxed(v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#endif /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+#ifdef arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
+#endif /* arch_atomic64_fetch_inc */
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add(1, v);
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_acquire(1, v);
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_release(1, v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+static __always_inline s64
+arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_relaxed(1, v);
+}
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#endif /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_dec
+static __always_inline void
+arch_atomic64_dec(atomic64_t *v)
+{
+ arch_atomic64_sub(1, v);
+}
+#define arch_atomic64_dec arch_atomic64_dec
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+#ifdef arch_atomic64_dec_return
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
+#endif /* arch_atomic64_dec_return */
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ return arch_atomic64_sub_return(1, v);
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_acquire(1, v);
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_release(1, v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+static __always_inline s64
+arch_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_relaxed(1, v);
+}
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+#endif
+
+#else /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_dec_return_relaxed(v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#endif /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+#ifdef arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
+#endif /* arch_atomic64_fetch_dec */
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub(1, v);
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_acquire(1, v);
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_release(1, v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+static __always_inline s64
+arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#endif /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
+#else /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_fetch_and_acquire
+static __always_inline s64
+arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_and_release
+static __always_inline s64
+arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#endif
+
+#ifndef arch_atomic64_fetch_and
+static __always_inline s64
+arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#endif
+
+#endif /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_andnot
+static __always_inline void
+arch_atomic64_andnot(s64 i, atomic64_t *v)
+{
+ arch_atomic64_and(~i, v);
+}
+#define arch_atomic64_andnot arch_atomic64_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+#ifdef arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
+#endif /* arch_atomic64_fetch_andnot */
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and(~i, v);
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_acquire(~i, v);
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_release(~i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+static __always_inline s64
+arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#endif /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
+#else /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_or_acquire
+static __always_inline s64
+arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_or_release
+static __always_inline s64
+arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+#endif
+
+#ifndef arch_atomic64_fetch_or
+static __always_inline s64
+arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#endif
+
+#endif /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
+#else /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_acquire
+static __always_inline s64
+arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_xor_release
+static __always_inline s64
+arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+#endif
+
+#ifndef arch_atomic64_fetch_xor
+static __always_inline s64
+arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#endif
+
+#endif /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_xchg_relaxed
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg
+#define arch_atomic64_xchg_release arch_atomic64_xchg
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
+#else /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_xchg_acquire
+static __always_inline s64
+arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+ s64 ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
+#endif
+
+#ifndef arch_atomic64_xchg_release
+static __always_inline s64
+arch_atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+ __atomic_release_fence();
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+#define arch_atomic64_xchg_release arch_atomic64_xchg_release
+#endif
+
+#ifndef arch_atomic64_xchg
+static __always_inline s64
+arch_atomic64_xchg(atomic64_t *v, s64 i)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+#endif
+
+#endif /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
+#else /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_acquire
+static __always_inline s64
+arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_cmpxchg_release
+static __always_inline s64
+arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_cmpxchg
+static __always_inline s64
+arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#endif
+
+#endif /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+#ifdef arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
+#endif /* arch_atomic64_try_cmpxchg */
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#endif /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_sub_and_test
+/**
+ * arch_atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_sub_return(i, v) == 0;
+}
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
+#endif
+
+#ifndef arch_atomic64_dec_and_test
+/**
+ * arch_atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic64_dec_and_test(atomic64_t *v)
+{
+ return arch_atomic64_dec_return(v) == 0;
+}
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
+#endif
+
+#ifndef arch_atomic64_inc_and_test
+/**
+ * arch_atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_inc_and_test(atomic64_t *v)
+{
+ return arch_atomic64_inc_return(v) == 0;
+}
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
+#endif
+
+#ifndef arch_atomic64_add_negative
+/**
+ * arch_atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_add_return(i, v) < 0;
+}
+#define arch_atomic64_add_negative arch_atomic64_add_negative
+#endif
+
+#ifndef arch_atomic64_fetch_add_unless
+/**
+ * arch_atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline s64
+arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+#endif
+
+#ifndef arch_atomic64_add_unless
+/**
+ * arch_atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ return arch_atomic64_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
+#endif
+
+#ifndef arch_atomic64_inc_not_zero
+/**
+ * arch_atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic64_inc_not_zero(atomic64_t *v)
+{
+ return arch_atomic64_add_unless(v, 1, 0);
+}
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
+#endif
+
+#ifndef arch_atomic64_inc_unless_negative
+static __always_inline bool
+arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+#endif
+
+#ifndef arch_atomic64_dec_unless_positive
+static __always_inline bool
+arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+#endif
+
+#ifndef arch_atomic64_dec_if_positive
+static __always_inline s64
+arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 dec, c = arch_atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+#endif
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// b5e87bdd5ede61470c29f7a7e4de781af3770f09
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
new file mode 100644
index 000000000000..7a139ec030b0
--- /dev/null
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -0,0 +1,2086 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
+
+static __always_inline int
+atomic_read(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic_read(v);
+}
+
+static __always_inline int
+atomic_read_acquire(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic_read_acquire(v);
+}
+
+static __always_inline void
+atomic_set(atomic_t *v, int i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic_set(v, i);
+}
+
+static __always_inline void
+atomic_set_release(atomic_t *v, int i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic_set_release(v, i);
+}
+
+static __always_inline void
+atomic_add(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_add(i, v);
+}
+
+static __always_inline int
+atomic_add_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_return(i, v);
+}
+
+static __always_inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_return_acquire(i, v);
+}
+
+static __always_inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_return_release(i, v);
+}
+
+static __always_inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_return_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_sub(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_sub(i, v);
+}
+
+static __always_inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_return_acquire(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_return_release(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_inc(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_inc(v);
+}
+
+static __always_inline int
+atomic_inc_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_return(v);
+}
+
+static __always_inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_return_acquire(v);
+}
+
+static __always_inline int
+atomic_inc_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_return_release(v);
+}
+
+static __always_inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_return_relaxed(v);
+}
+
+static __always_inline int
+atomic_fetch_inc(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc_acquire(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc_release(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic_dec(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_dec(v);
+}
+
+static __always_inline int
+atomic_dec_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_return(v);
+}
+
+static __always_inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_return_acquire(v);
+}
+
+static __always_inline int
+atomic_dec_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_return_release(v);
+}
+
+static __always_inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_return_relaxed(v);
+}
+
+static __always_inline int
+atomic_fetch_dec(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec_acquire(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec_release(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic_and(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_and(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_and_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_and_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_andnot(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_andnot(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_or(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_or(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_or_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_or_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_xor(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_xor(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_xchg(atomic_t *v, int i)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_xchg(v, i);
+}
+
+static __always_inline int
+atomic_xchg_acquire(atomic_t *v, int i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_xchg_acquire(v, i);
+}
+
+static __always_inline int
+atomic_xchg_release(atomic_t *v, int i)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_xchg_release(v, i);
+}
+
+static __always_inline int
+atomic_xchg_relaxed(atomic_t *v, int i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_xchg_relaxed(v, i);
+}
+
+static __always_inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg_release(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_unless_positive(v);
+}
+
+static __always_inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_if_positive(v);
+}
+
+static __always_inline s64
+atomic64_read(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic64_read(v);
+}
+
+static __always_inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic64_read_acquire(v);
+}
+
+static __always_inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic64_set(v, i);
+}
+
+static __always_inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic64_set_release(v, i);
+}
+
+static __always_inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_add(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_return_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_return_release(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_return_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_sub(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_return_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_return_release(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_inc(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_inc(v);
+}
+
+static __always_inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_return(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_return_acquire(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_return_release(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_return_relaxed(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc_acquire(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc_release(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic64_dec(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_dec(v);
+}
+
+static __always_inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_return(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_return_acquire(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_return_release(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_return_relaxed(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec_acquire(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec_release(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic64_and(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_and(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_andnot(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_or(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_or(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_xor(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_xor(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_xchg(atomic64_t *v, s64 i)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_xchg_acquire(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_xchg_release(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+
+static __always_inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg_release(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_unless_positive(v);
+}
+
+static __always_inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_if_positive(v);
+}
+
+static __always_inline long
+atomic_long_read(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic_long_read(v);
+}
+
+static __always_inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic_long_read_acquire(v);
+}
+
+static __always_inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic_long_set(v, i);
+}
+
+static __always_inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic_long_set_release(v, i);
+}
+
+static __always_inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_add(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_return(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_return_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_return_release(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_sub(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_return(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_return_release(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_sub(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_inc(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_inc(v);
+}
+
+static __always_inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_return(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_return_acquire(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_return_release(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_return_relaxed(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_inc(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_inc_release(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic_long_dec(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_dec(v);
+}
+
+static __always_inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_return(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_return_acquire(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_return_release(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_return_relaxed(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_dec(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_dec_release(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_and(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_and(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_and_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_andnot(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_andnot(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_or(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_or(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_or_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_xor(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_xor(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_xchg(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_xchg_acquire(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_xchg_release(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_long_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_long_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_long_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_long_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_negative(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_unless_positive(v);
+}
+
+static __always_inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_if_positive(v);
+}
+
+#define xchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define cmpxchg_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define sync_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_double(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
+})
+
+
+#define cmpxchg_double_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+// 764f741eb77a7ad565dc8d99ce2837d5542e8aee
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
new file mode 100644
index 000000000000..800b8c35992d
--- /dev/null
+++ b/include/linux/atomic/atomic-long.h
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_LONG_H
+#define _LINUX_ATOMIC_LONG_H
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
+#endif
+
+#ifdef CONFIG_64BIT
+
+static __always_inline long
+arch_atomic_long_read(const atomic_long_t *v)
+{
+ return arch_atomic64_read(v);
+}
+
+static __always_inline long
+arch_atomic_long_read_acquire(const atomic_long_t *v)
+{
+ return arch_atomic64_read_acquire(v);
+}
+
+static __always_inline void
+arch_atomic_long_set(atomic_long_t *v, long i)
+{
+ arch_atomic64_set(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_set_release(atomic_long_t *v, long i)
+{
+ arch_atomic64_set_release(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_add(long i, atomic_long_t *v)
+{
+ arch_atomic64_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_add_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_sub(long i, atomic_long_t *v)
+{
+ arch_atomic64_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_inc(atomic_long_t *v)
+{
+ arch_atomic64_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return(atomic_long_t *v)
+{
+ return arch_atomic64_inc_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ return arch_atomic64_inc_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_release(atomic_long_t *v)
+{
+ return arch_atomic64_inc_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ return arch_atomic64_inc_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_inc_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_dec(atomic_long_t *v)
+{
+ arch_atomic64_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return(atomic_long_t *v)
+{
+ return arch_atomic64_dec_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ return arch_atomic64_dec_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_release(atomic_long_t *v)
+{
+ return arch_atomic64_dec_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ return arch_atomic64_dec_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_dec_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_and(long i, atomic_long_t *v)
+{
+ arch_atomic64_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_and_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_andnot(long i, atomic_long_t *v)
+{
+ arch_atomic64_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_or(long i, atomic_long_t *v)
+{
+ arch_atomic64_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_or_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_xor(long i, atomic_long_t *v)
+{
+ arch_atomic64_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_xchg(atomic_long_t *v, long i)
+{
+ return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+ return arch_atomic64_xchg_acquire(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+ return arch_atomic64_xchg_release(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic64_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic64_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic64_try_cmpxchg(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_and_test(atomic_long_t *v)
+{
+ return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_and_test(atomic_long_t *v)
+{
+ return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ return arch_atomic64_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ return arch_atomic64_inc_unless_negative(v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ return arch_atomic64_dec_unless_positive(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ return arch_atomic64_dec_if_positive(v);
+}
+
+#else /* CONFIG_64BIT */
+
+static __always_inline long
+arch_atomic_long_read(const atomic_long_t *v)
+{
+ return arch_atomic_read(v);
+}
+
+static __always_inline long
+arch_atomic_long_read_acquire(const atomic_long_t *v)
+{
+ return arch_atomic_read_acquire(v);
+}
+
+static __always_inline void
+arch_atomic_long_set(atomic_long_t *v, long i)
+{
+ arch_atomic_set(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_set_release(atomic_long_t *v, long i)
+{
+ arch_atomic_set_release(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_add(long i, atomic_long_t *v)
+{
+ arch_atomic_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_add_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_sub(long i, atomic_long_t *v)
+{
+ arch_atomic_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_inc(atomic_long_t *v)
+{
+ arch_atomic_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return(atomic_long_t *v)
+{
+ return arch_atomic_inc_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ return arch_atomic_inc_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_release(atomic_long_t *v)
+{
+ return arch_atomic_inc_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ return arch_atomic_inc_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc(atomic_long_t *v)
+{
+ return arch_atomic_fetch_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ return arch_atomic_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ return arch_atomic_fetch_inc_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_dec(atomic_long_t *v)
+{
+ arch_atomic_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return(atomic_long_t *v)
+{
+ return arch_atomic_dec_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ return arch_atomic_dec_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_release(atomic_long_t *v)
+{
+ return arch_atomic_dec_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ return arch_atomic_dec_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec(atomic_long_t *v)
+{
+ return arch_atomic_fetch_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ return arch_atomic_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ return arch_atomic_fetch_dec_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_and(long i, atomic_long_t *v)
+{
+ arch_atomic_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_and_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_andnot(long i, atomic_long_t *v)
+{
+ arch_atomic_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_or(long i, atomic_long_t *v)
+{
+ arch_atomic_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_or_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_xor(long i, atomic_long_t *v)
+{
+ arch_atomic_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_xchg(atomic_long_t *v, long i)
+{
+ return arch_atomic_xchg(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+ return arch_atomic_xchg_acquire(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+ return arch_atomic_xchg_release(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+ return arch_atomic_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic_try_cmpxchg(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic_try_cmpxchg_release(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_and_test(atomic_long_t *v)
+{
+ return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_and_test(atomic_long_t *v)
+{
+ return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ return arch_atomic_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ return arch_atomic_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ return arch_atomic_inc_not_zero(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ return arch_atomic_inc_unless_negative(v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ return arch_atomic_dec_unless_positive(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ return arch_atomic_dec_if_positive(v);
+}
+
+#endif /* CONFIG_64BIT */
+#endif /* _LINUX_ATOMIC_LONG_H */
+// e8f0e08ff072b74d180eabe2ad001282b38c2c88
diff --git a/include/linux/audit.h b/include/linux/audit.h
index f9ceae57ca8d..3608992848d3 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -11,7 +11,9 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
+#include <linux/audit_arch.h>
#include <uapi/linux/audit.h>
+#include <uapi/linux/netfilter/nf_tables.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -19,7 +21,7 @@
struct audit_sig_info {
uid_t uid;
pid_t pid;
- char ctx[0];
+ char ctx[];
};
struct audit_buffer;
@@ -94,7 +96,28 @@ struct audit_ntp_data {
struct audit_ntp_data {};
#endif
-extern int is_audit_feature_set(int which);
+enum audit_nfcfgop {
+ AUDIT_XT_OP_REGISTER,
+ AUDIT_XT_OP_REPLACE,
+ AUDIT_XT_OP_UNREGISTER,
+ AUDIT_NFT_OP_TABLE_REGISTER,
+ AUDIT_NFT_OP_TABLE_UNREGISTER,
+ AUDIT_NFT_OP_CHAIN_REGISTER,
+ AUDIT_NFT_OP_CHAIN_UNREGISTER,
+ AUDIT_NFT_OP_RULE_REGISTER,
+ AUDIT_NFT_OP_RULE_UNREGISTER,
+ AUDIT_NFT_OP_SET_REGISTER,
+ AUDIT_NFT_OP_SET_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_REGISTER,
+ AUDIT_NFT_OP_SETELEM_UNREGISTER,
+ AUDIT_NFT_OP_GEN_REGISTER,
+ AUDIT_NFT_OP_OBJ_REGISTER,
+ AUDIT_NFT_OP_OBJ_UNREGISTER,
+ AUDIT_NFT_OP_OBJ_RESET,
+ AUDIT_NFT_OP_FLOWTABLE_REGISTER,
+ AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_INVALID,
+};
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
@@ -263,12 +286,13 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
/* Public API */
extern int audit_alloc(struct task_struct *task);
extern void __audit_free(struct task_struct *task);
+extern void __audit_uring_entry(u8 op);
+extern void __audit_uring_exit(int success, long code);
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
extern struct filename *__audit_reusename(const __user char *uptr);
extern void __audit_getname(struct filename *name);
-
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
extern void __audit_file(const struct file *);
@@ -300,6 +324,21 @@ static inline void audit_free(struct task_struct *task)
if (unlikely(task->audit_context))
__audit_free(task);
}
+static inline void audit_uring_entry(u8 op)
+{
+ /*
+ * We intentionally check audit_context() before audit_enabled as most
+ * Linux systems (as of ~2021) rely on systemd which forces audit to
+ * be enabled regardless of the user's audit configuration.
+ */
+ if (unlikely(audit_context() && audit_enabled))
+ __audit_uring_entry(op);
+}
+static inline void audit_uring_exit(int success, long code)
+{
+ if (unlikely(audit_context()))
+ __audit_uring_exit(success, code);
+}
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -375,10 +414,13 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *old);
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
+extern void __audit_openat2_how(struct open_how *how);
extern void __audit_log_kern_module(char *name);
extern void __audit_fanotify(unsigned int response);
extern void __audit_tk_injoffset(struct timespec64 offset);
extern void __audit_ntp_log(const struct audit_ntp_data *ad);
+extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries,
+ enum audit_nfcfgop op, gfp_t gfp);
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
@@ -469,6 +511,12 @@ static inline void audit_mmap_fd(int fd, int flags)
__audit_mmap_fd(fd, flags);
}
+static inline void audit_openat2_how(struct open_how *how)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_openat2_how(how);
+}
+
static inline void audit_log_kern_module(char *name)
{
if (!audit_dummy_context())
@@ -514,6 +562,14 @@ static inline void audit_ntp_log(const struct audit_ntp_data *ad)
__audit_ntp_log(ad);
}
+static inline void audit_log_nfcfg(const char *name, u8 af,
+ unsigned int nentries,
+ enum audit_nfcfgop op, gfp_t gfp)
+{
+ if (audit_enabled)
+ __audit_log_nfcfg(name, af, nentries, op, gfp);
+}
+
extern int audit_n_rules;
extern int audit_signals;
#else /* CONFIG_AUDITSYSCALL */
@@ -523,6 +579,10 @@ static inline int audit_alloc(struct task_struct *task)
}
static inline void audit_free(struct task_struct *task)
{ }
+static inline void audit_uring_entry(u8 op)
+{ }
+static inline void audit_uring_exit(int success, long code)
+{ }
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -545,14 +605,6 @@ static inline struct filename *audit_reusename(const __user char *name)
}
static inline void audit_getname(struct filename *name)
{ }
-static inline void __audit_inode(struct filename *name,
- const struct dentry *dentry,
- unsigned int flags)
-{ }
-static inline void __audit_inode_child(struct inode *parent,
- const struct dentry *dentry,
- const unsigned char type)
-{ }
static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
unsigned int aflags)
@@ -620,6 +672,9 @@ static inline void audit_log_capset(const struct cred *new,
static inline void audit_mmap_fd(int fd, int flags)
{ }
+static inline void audit_openat2_how(struct open_how *how)
+{ }
+
static inline void audit_log_kern_module(char *name)
{
}
@@ -646,6 +701,12 @@ static inline void audit_ntp_log(const struct audit_ntp_data *ad)
static inline void audit_ptrace(struct task_struct *t)
{ }
+
+static inline void audit_log_nfcfg(const char *name, u8 af,
+ unsigned int nentries,
+ enum audit_nfcfgop op, gfp_t gfp)
+{ }
+
#define audit_n_rules 0
#define audit_signals 0
#endif /* CONFIG_AUDITSYSCALL */
@@ -655,9 +716,4 @@ static inline bool audit_loginuid_set(struct task_struct *tsk)
return uid_valid(audit_get_loginuid(tsk));
}
-static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
-{
- audit_log_n_string(ab, buf, strlen(buf));
-}
-
#endif
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
new file mode 100644
index 000000000000..8fdb1afe251a
--- /dev/null
+++ b/include/linux/audit_arch.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* audit_arch.h -- Arch layer specific support for audit
+ *
+ * Copyright 2021 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * Author: Richard Guy Briggs <rgb@redhat.com>
+ */
+#ifndef _LINUX_AUDIT_ARCH_H_
+#define _LINUX_AUDIT_ARCH_H_
+
+enum auditsc_class_t {
+ AUDITSC_NATIVE = 0,
+ AUDITSC_COMPAT,
+ AUDITSC_OPEN,
+ AUDITSC_OPENAT,
+ AUDITSC_SOCKETCALL,
+ AUDITSC_EXECVE,
+ AUDITSC_OPENAT2,
+
+ AUDITSC_NVALS /* count */
+};
+
+#endif
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
new file mode 100644
index 000000000000..de21d9d24a95
--- /dev/null
+++ b/include/linux/auxiliary_bus.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020 Intel Corporation
+ *
+ * Please see Documentation/driver-api/auxiliary_bus.rst for more information.
+ */
+
+#ifndef _AUXILIARY_BUS_H_
+#define _AUXILIARY_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * DOC: DEVICE_LIFESPAN
+ *
+ * The registering driver is the entity that allocates memory for the
+ * auxiliary_device and registers it on the auxiliary bus. It is important to
+ * note that, as opposed to the platform bus, the registering driver is wholly
+ * responsible for the management of the memory used for the device object.
+ *
+ * To be clear the memory for the auxiliary_device is freed in the release()
+ * callback defined by the registering driver. The registering driver should
+ * only call auxiliary_device_delete() and then auxiliary_device_uninit() when
+ * it is done with the device. The release() function is then automatically
+ * called if and when other code releases their reference to the devices.
+ *
+ * A parent object, defined in the shared header file, contains the
+ * auxiliary_device. It also contains a pointer to the shared object(s), which
+ * also is defined in the shared header. Both the parent object and the shared
+ * object(s) are allocated by the registering driver. This layout allows the
+ * auxiliary_driver's registering module to perform a container_of() call to go
+ * from the pointer to the auxiliary_device, that is passed during the call to
+ * the auxiliary_driver's probe function, up to the parent object, and then
+ * have access to the shared object(s).
+ *
+ * The memory for the shared object(s) must have a lifespan equal to, or
+ * greater than, the lifespan of the memory for the auxiliary_device. The
+ * auxiliary_driver should only consider that the shared object is valid as
+ * long as the auxiliary_device is still registered on the auxiliary bus. It
+ * is up to the registering driver to manage (e.g. free or keep available) the
+ * memory for the shared object beyond the life of the auxiliary_device.
+ *
+ * The registering driver must unregister all auxiliary devices before its own
+ * driver.remove() is completed. An easy way to ensure this is to use the
+ * devm_add_action_or_reset() call to register a function against the parent
+ * device which unregisters the auxiliary device object(s).
+ *
+ * Finally, any operations which operate on the auxiliary devices must continue
+ * to function (if only to return an error) after the registering driver
+ * unregisters the auxiliary device.
+ */
+
+/**
+ * struct auxiliary_device - auxiliary device object.
+ * @dev: Device,
+ * The release and parent fields of the device structure must be filled
+ * in
+ * @name: Match name found by the auxiliary device driver,
+ * @id: unique identitier if multiple devices of the same name are exported,
+ *
+ * An auxiliary_device represents a part of its parent device's functionality.
+ * It is given a name that, combined with the registering drivers
+ * KBUILD_MODNAME, creates a match_name that is used for driver binding, and an
+ * id that combined with the match_name provide a unique name to register with
+ * the bus subsystem. For example, a driver registering an auxiliary device is
+ * named 'foo_mod.ko' and the subdevice is named 'foo_dev'. The match name is
+ * therefore 'foo_mod.foo_dev'.
+ *
+ * Registering an auxiliary_device is a three-step process.
+ *
+ * First, a 'struct auxiliary_device' needs to be defined or allocated for each
+ * sub-device desired. The name, id, dev.release, and dev.parent fields of
+ * this structure must be filled in as follows.
+ *
+ * The 'name' field is to be given a name that is recognized by the auxiliary
+ * driver. If two auxiliary_devices with the same match_name, eg
+ * "foo_mod.foo_dev", are registered onto the bus, they must have unique id
+ * values (e.g. "x" and "y") so that the registered devices names are
+ * "foo_mod.foo_dev.x" and "foo_mod.foo_dev.y". If match_name + id are not
+ * unique, then the device_add fails and generates an error message.
+ *
+ * The auxiliary_device.dev.type.release or auxiliary_device.dev.release must
+ * be populated with a non-NULL pointer to successfully register the
+ * auxiliary_device. This release call is where resources associated with the
+ * auxiliary device must be free'ed. Because once the device is placed on the
+ * bus the parent driver can not tell what other code may have a reference to
+ * this data.
+ *
+ * The auxiliary_device.dev.parent should be set. Typically to the registering
+ * drivers device.
+ *
+ * Second, call auxiliary_device_init(), which checks several aspects of the
+ * auxiliary_device struct and performs a device_initialize(). After this step
+ * completes, any error state must have a call to auxiliary_device_uninit() in
+ * its resolution path.
+ *
+ * The third and final step in registering an auxiliary_device is to perform a
+ * call to auxiliary_device_add(), which sets the name of the device and adds
+ * the device to the bus.
+ *
+ * .. code-block:: c
+ *
+ * #define MY_DEVICE_NAME "foo_dev"
+ *
+ * ...
+ *
+ * struct auxiliary_device *my_aux_dev = my_aux_dev_alloc(xxx);
+ *
+ * // Step 1:
+ * my_aux_dev->name = MY_DEVICE_NAME;
+ * my_aux_dev->id = my_unique_id_alloc(xxx);
+ * my_aux_dev->dev.release = my_aux_dev_release;
+ * my_aux_dev->dev.parent = my_dev;
+ *
+ * // Step 2:
+ * if (auxiliary_device_init(my_aux_dev))
+ * goto fail;
+ *
+ * // Step 3:
+ * if (auxiliary_device_add(my_aux_dev)) {
+ * auxiliary_device_uninit(my_aux_dev);
+ * goto fail;
+ * }
+ *
+ * ...
+ *
+ *
+ * Unregistering an auxiliary_device is a two-step process to mirror the
+ * register process. First call auxiliary_device_delete(), then call
+ * auxiliary_device_uninit().
+ *
+ * .. code-block:: c
+ *
+ * auxiliary_device_delete(my_dev->my_aux_dev);
+ * auxiliary_device_uninit(my_dev->my_aux_dev);
+ */
+struct auxiliary_device {
+ struct device dev;
+ const char *name;
+ u32 id;
+};
+
+/**
+ * struct auxiliary_driver - Definition of an auxiliary bus driver
+ * @probe: Called when a matching device is added to the bus.
+ * @remove: Called when device is removed from the bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @name: Driver name.
+ * @driver: Core driver structure.
+ * @id_table: Table of devices this driver should match on the bus.
+ *
+ * Auxiliary drivers follow the standard driver model convention, where
+ * discovery/enumeration is handled by the core, and drivers provide probe()
+ * and remove() methods. They support power management and shutdown
+ * notifications using the standard conventions.
+ *
+ * Auxiliary drivers register themselves with the bus by calling
+ * auxiliary_driver_register(). The id_table contains the match_names of
+ * auxiliary devices that a driver can bind with.
+ *
+ * .. code-block:: c
+ *
+ * static const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * {},
+ * };
+ *
+ * MODULE_DEVICE_TABLE(auxiliary, my_auxiliary_id_table);
+ *
+ * struct auxiliary_driver my_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_drv_probe,
+ * .remove = my_drv_remove
+ * };
+ */
+struct auxiliary_driver {
+ int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
+ void (*remove)(struct auxiliary_device *auxdev);
+ void (*shutdown)(struct auxiliary_device *auxdev);
+ int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state);
+ int (*resume)(struct auxiliary_device *auxdev);
+ const char *name;
+ struct device_driver driver;
+ const struct auxiliary_device_id *id_table;
+};
+
+static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
+{
+ return dev_get_drvdata(&auxdev->dev);
+}
+
+static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
+{
+ dev_set_drvdata(&auxdev->dev, data);
+}
+
+static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev)
+{
+ return container_of(dev, struct auxiliary_device, dev);
+}
+
+static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct auxiliary_driver, driver);
+}
+
+int auxiliary_device_init(struct auxiliary_device *auxdev);
+int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname);
+#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME)
+
+static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev)
+{
+ put_device(&auxdev->dev);
+}
+
+static inline void auxiliary_device_delete(struct auxiliary_device *auxdev)
+{
+ device_del(&auxdev->dev);
+}
+
+int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner,
+ const char *modname);
+#define auxiliary_driver_register(auxdrv) \
+ __auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME)
+
+void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
+
+/**
+ * module_auxiliary_driver() - Helper macro for registering an auxiliary driver
+ * @__auxiliary_driver: auxiliary driver struct
+ *
+ * Helper macro for auxiliary drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ *
+ * .. code-block:: c
+ *
+ * module_auxiliary_driver(my_drv);
+ */
+#define module_auxiliary_driver(__auxiliary_driver) \
+ module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister)
+
+struct auxiliary_device *auxiliary_find_device(struct device *start,
+ const void *data,
+ int (*match)(struct device *dev, const void *data));
+
+#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index ca956b672ac0..2ce27e8e4f19 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -136,6 +136,19 @@ enum virtchnl_ops {
VIRTCHNL_OP_DISABLE_CHANNELS = 31,
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+ /* opcode 34 - 44 are reserved */
+ VIRTCHNL_OP_ADD_RSS_CFG = 45,
+ VIRTCHNL_OP_DEL_RSS_CFG = 46,
+ VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
+ VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
+ VIRTCHNL_OP_ADD_VLAN_V2 = 52,
+ VIRTCHNL_OP_DEL_VLAN_V2 = 53,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+ VIRTCHNL_OP_MAX,
};
/* These macros are used to generate compilation errors if a structure/union
@@ -232,24 +245,27 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
-#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
-#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
-#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
-#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
-
-/* Define below the capability flags that are not offloads */
-#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
+#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
+#define VIRTCHNL_VF_OFFLOAD_IWARP BIT(1)
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
+/* used to negotiate communicating link speeds in Mbps */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
+#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
+#define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
+#define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
+
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -403,9 +419,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
* PF removes the filters and returns status.
*/
+/* VIRTCHNL_ETHER_ADDR_LEGACY
+ * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
+ * bytes. Moving forward all VF drivers should not set type to
+ * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
+ * behavior. The control plane function (i.e. PF) can use a best effort method
+ * of tracking the primary/device unicast in this case, but there is no
+ * guarantee and functionality depends on the implementation of the PF.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_PRIMARY
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
+ * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
+ * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
+ * function (i.e. PF) to accurately track and use this MAC address for
+ * displaying on the host and for VM/function reset.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_EXTRA
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
+ * unicast and/or multicast filters that are being added/deleted via
+ * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
+ */
struct virtchnl_ether_addr {
u8 addr[ETH_ALEN];
- u8 pad[2];
+ u8 type;
+#define VIRTCHNL_ETHER_ADDR_LEGACY 0
+#define VIRTCHNL_ETHER_ADDR_PRIMARY 1
+#define VIRTCHNL_ETHER_ADDR_EXTRA 2
+#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
+ u8 pad;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
@@ -440,6 +483,351 @@ struct virtchnl_vlan_filter_list {
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
+ * structures and opcodes.
+ *
+ * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
+ * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
+ * by the PF concurrently. For example, if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
+ * would OR the following bits:
+ *
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * The VF would interpret this as VLAN filtering can be supported on both 0x8100
+ * and 0x88A8 VLAN ethertypes.
+ *
+ * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
+ * by the PF concurrently. For example if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
+ * offload it would OR the following bits:
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * The VF would interpret this as VLAN stripping can be supported on either
+ * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
+ * the previously set value.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
+ * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
+ * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
+ * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
+ *
+ * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
+ * VLAN filtering if the underlying PF supports it.
+ *
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
+ * certain VLAN capability can be toggled. For example if the underlying PF/CP
+ * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
+ * set this bit along with the supported ethertypes.
+ */
+enum virtchnl_vlan_support {
+ VIRTCHNL_VLAN_UNSUPPORTED = 0,
+ VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
+ VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
+ VIRTCHNL_VLAN_PRIO = BIT(24),
+ VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
+ VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
+ VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
+ VIRTCHNL_VLAN_TOGGLE = BIT(31),
+};
+
+/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * for filtering, insertion, and stripping capabilities.
+ *
+ * If only outer capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective.
+ *
+ * If only inner capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective. Functionally this is the same as if only outer capabilities are
+ * supported. The VF driver is just forced to use the inner fields when
+ * adding/deleting filters and enabling/disabling offloads (if supported).
+ *
+ * If both outer and inner capabilities are supported (for filtering, insertion,
+ * and/or stripping) then outer refers to the outer most or single VLAN and
+ * inner refers to the second VLAN, if it exists, in the packet.
+ *
+ * There is no support for tunneled VLAN offloads, so outer or inner are never
+ * referring to a tunneled packet from the VF's perspective.
+ */
+struct virtchnl_vlan_supported_caps {
+ u32 outer;
+ u32 inner;
+};
+
+/* The PF populates these fields based on the supported VLAN filtering. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
+ * the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN filtering setting if the
+ * VIRTCHNL_VLAN_TOGGLE bit is set.
+ *
+ * The ethertype(s) specified in the ethertype_init field are the ethertypes
+ * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
+ * most VLAN from the VF's perspective. If both inner and outer filtering are
+ * allowed then ethertype_init only refers to the outer most VLAN as only
+ * VLAN ethertype supported for inner VLAN filtering is
+ * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
+ * when both inner and outer filtering are allowed.
+ *
+ * The max_filters field tells the VF how many VLAN filters it's allowed to have
+ * at any one time. If it exceeds this amount and tries to add another filter,
+ * then the request will be rejected by the PF. To prevent failures, the VF
+ * should keep track of how many VLAN filters it has added and not attempt to
+ * add more than max_filters.
+ */
+struct virtchnl_vlan_filtering_caps {
+ struct virtchnl_vlan_supported_caps filtering_support;
+ u32 ethertype_init;
+ u16 max_filters;
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
+
+/* This enum is used for the virtchnl_vlan_offload_caps structure to specify
+ * if the PF supports a different ethertype for stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
+ * for stripping affect the ethertype(s) specified for insertion and visa versa
+ * as well. If the VF tries to configure VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
+ * that will be the ethertype for both stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
+ * stripping do not affect the ethertype(s) specified for insertion and visa
+ * versa.
+ */
+enum virtchnl_vlan_ethertype_match {
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
+ VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
+};
+
+/* The PF populates these fields based on the supported VLAN offloads. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN offload setting if the
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
+ *
+ * The VF driver needs to be aware of how the tags are stripped by hardware and
+ * inserted by the VF driver based on the level of offload support. The PF will
+ * populate these fields based on where the VLAN tags are expected to be
+ * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
+ * interpret these fields. See the definition of the
+ * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
+ * enumeration.
+ */
+struct virtchnl_vlan_offload_caps {
+ struct virtchnl_vlan_supported_caps stripping_support;
+ struct virtchnl_vlan_supported_caps insertion_support;
+ u32 ethertype_init;
+ u8 ethertype_match;
+ u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
+
+/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * VF sends this message to determine its VLAN capabilities.
+ *
+ * PF will mark which capabilities it supports based on hardware support and
+ * current configuration. For example, if a port VLAN is configured the PF will
+ * not allow outer VLAN filtering, stripping, or insertion to be configured so
+ * it will block these features from the VF.
+ *
+ * The VF will need to cross reference its capabilities with the PFs
+ * capabilities in the response message from the PF to determine the VLAN
+ * support.
+ */
+struct virtchnl_vlan_caps {
+ struct virtchnl_vlan_filtering_caps filtering;
+ struct virtchnl_vlan_offload_caps offloads;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
+
+struct virtchnl_vlan {
+ u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
+ u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
+ * filtering caps
+ */
+ u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
+ * filtering caps. Note that tpid here does not refer to
+ * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
+ * actual 2-byte VLAN TPID
+ */
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
+
+struct virtchnl_vlan_filter {
+ struct virtchnl_vlan inner;
+ struct virtchnl_vlan outer;
+ u8 pad[16];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
+
+/* VIRTCHNL_OP_ADD_VLAN_V2
+ * VIRTCHNL_OP_DEL_VLAN_V2
+ *
+ * VF sends these messages to add/del one or more VLAN tag filters for Rx
+ * traffic.
+ *
+ * The PF attempts to add the filters and returns status.
+ *
+ * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
+ * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
+ */
+struct virtchnl_vlan_filter_list_v2 {
+ u16 vport_id;
+ u16 num_elements;
+ u8 pad[4];
+ struct virtchnl_vlan_filter filters[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);
+
+/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ *
+ * VF sends this message to enable or disable VLAN stripping or insertion. It
+ * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
+ * allowed and whether or not it's allowed to enable/disable the specific
+ * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.offloads fields to determine which offload
+ * messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable and/or disable 0x8100 inner
+ * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
+ * case means the outer most or single VLAN from the VF's perspective. This is
+ * because no outer offloads are supported. See the comments above the
+ * virtchnl_vlan_supported_caps structure for more details.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * In order to enable inner (again note that in this case inner is the outer
+ * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
+ * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
+ * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.inner_ethertype_setting =
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * The reason that VLAN TPID(s) are not being used for the
+ * outer_ethertype_setting and inner_ethertype_setting fields is because it's
+ * possible a device could support VLAN insertion and/or stripping offload on
+ * multiple ethertypes concurrently, so this method allows a VF to request
+ * multiple ethertypes in one message using the virtchnl_vlan_support
+ * enumeration.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
+ * VLAN insertion and stripping simultaneously. The
+ * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
+ * populated based on what the PF can support.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
+ * would populate the virthcnl_vlan_offload_structure in the following manner
+ * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * There is also the case where a PF and the underlying hardware can support
+ * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
+ * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
+ * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
+ * offloads. The ethertypes must match for stripping and insertion.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.ethertype_match =
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ *
+ * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
+ * populate the virtchnl_vlan_setting structure in the following manner and send
+ * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
+ * ethertype for VLAN insertion if it's enabled. So, for completeness, a
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ */
+struct virtchnl_vlan_setting {
+ u32 outer_ethertype_setting;
+ u32 inner_ethertype_setting;
+ u16 vport_id;
+ u8 pad[6];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
+
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
@@ -557,6 +945,11 @@ enum virtchnl_action {
/* action types */
VIRTCHNL_ACTION_DROP = 0,
VIRTCHNL_ACTION_TC_REDIRECT,
+ VIRTCHNL_ACTION_PASSTHRU,
+ VIRTCHNL_ACTION_QUEUE,
+ VIRTCHNL_ACTION_Q_REGION,
+ VIRTCHNL_ACTION_MARK,
+ VIRTCHNL_ACTION_COUNT,
};
enum virtchnl_flow_type {
@@ -572,6 +965,7 @@ struct virtchnl_filter {
enum virtchnl_action action;
u32 action_meta;
u8 field_flags;
+ u8 pad[3];
};
VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
@@ -610,6 +1004,7 @@ struct virtchnl_pf_event {
/* link_speed provided in Mbps */
u32 link_speed;
u8 link_status;
+ u8 pad[3];
} link_event_adv;
} event_data;
@@ -635,6 +1030,7 @@ struct virtchnl_iwarp_qv_info {
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
+ u8 pad[3];
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
@@ -663,6 +1059,286 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define PROTO_HDR_SHIFT 5
+#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
+#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
+
+/* VF use these macros to configure each protocol header.
+ * Specify which protocol headers and protocol header fields base on
+ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
+ * @param hdr: a struct of virtchnl_proto_hdr
+ * @param hdr_type: ETH/IPV4/TCP, etc
+ * @param field: SRC/DST/TEID/SPI, etc
+ */
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
+ ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
+
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+
+#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
+ ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
+#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
+ (((hdr)->type) >> PROTO_HDR_SHIFT)
+#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
+ ((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
+#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
+ (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
+ VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl_proto_hdr_type {
+ VIRTCHNL_PROTO_HDR_NONE,
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_S_VLAN,
+ VIRTCHNL_PROTO_HDR_C_VLAN,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_GTPU_EH,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ VIRTCHNL_PROTO_HDR_PPPOE,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_PFCP,
+};
+
+/* Protocol header field within a protocol header. */
+enum virtchnl_proto_hdr_field {
+ /* ETHER */
+ VIRTCHNL_PROTO_HDR_ETH_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
+ VIRTCHNL_PROTO_HDR_ETH_DST,
+ VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
+ /* S-VLAN */
+ VIRTCHNL_PROTO_HDR_S_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
+ /* C-VLAN */
+ VIRTCHNL_PROTO_HDR_C_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
+ /* IPV4 */
+ VIRTCHNL_PROTO_HDR_IPV4_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
+ VIRTCHNL_PROTO_HDR_IPV4_DST,
+ VIRTCHNL_PROTO_HDR_IPV4_DSCP,
+ VIRTCHNL_PROTO_HDR_IPV4_TTL,
+ VIRTCHNL_PROTO_HDR_IPV4_PROT,
+ /* IPV6 */
+ VIRTCHNL_PROTO_HDR_IPV6_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
+ VIRTCHNL_PROTO_HDR_IPV6_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_TC,
+ VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
+ VIRTCHNL_PROTO_HDR_IPV6_PROT,
+ /* TCP */
+ VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
+ VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+ /* UDP */
+ VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
+ VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+ /* SCTP */
+ VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
+ VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+ /* GTPU_IP */
+ VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
+ /* GTPU_EH */
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
+ VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
+ /* PPPOE */
+ VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
+ /* L2TPV3 */
+ VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
+ /* ESP */
+ VIRTCHNL_PROTO_HDR_ESP_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
+ /* AH */
+ VIRTCHNL_PROTO_HDR_AH_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
+ /* PFCP */
+ VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
+ VIRTCHNL_PROTO_HDR_PFCP_SEID,
+};
+
+struct virtchnl_proto_hdr {
+ enum virtchnl_proto_hdr_type type;
+ u32 field_selector; /* a bit mask to select field for header type */
+ u8 buffer[64];
+ /**
+ * binary buffer in network order for specific header type.
+ * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+ * header is expected to be copied into the buffer.
+ */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
+
+struct virtchnl_proto_hdrs {
+ u8 tunnel_level;
+ u8 pad[3];
+ /**
+ * specify where protocol header start from.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * ....
+ **/
+ int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
+ struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
+
+struct virtchnl_rss_cfg {
+ struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
+ enum virtchnl_rss_algorithm rss_algorithm; /* RSS algorithm type */
+ u8 reserved[128]; /* reserve for future */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
+
+/* action configuration for FDIR */
+struct virtchnl_filter_action {
+ enum virtchnl_action type;
+ union {
+ /* used for queue and qgroup action */
+ struct {
+ u16 index;
+ u8 region;
+ } queue;
+ /* used for count action */
+ struct {
+ /* share counter ID with other flow rules */
+ u8 shared;
+ u32 id; /* counter ID */
+ } count;
+ /* used for mark action */
+ u32 mark_id;
+ u8 reserve[32];
+ } act_conf;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
+
+#define VIRTCHNL_MAX_NUM_ACTIONS 8
+
+struct virtchnl_filter_action_set {
+ /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
+ int count;
+ struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
+
+/* pattern and action for FDIR rule */
+struct virtchnl_fdir_rule {
+ struct virtchnl_proto_hdrs proto_hdrs;
+ struct virtchnl_filter_action_set action_set;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
+
+/* Status returned to VF after VF requests FDIR commands
+ * VIRTCHNL_FDIR_SUCCESS
+ * VF FDIR related request is successfully done by PF
+ * The request can be OP_ADD/DEL.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
+ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
+ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
+ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
+ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
+ * OP_ADD_FDIR_FILTER request is failed due to parameters validation
+ * or HW doesn't support.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
+ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
+ * for programming.
+ */
+enum virtchnl_fdir_prgm_status {
+ VIRTCHNL_FDIR_SUCCESS = 0,
+ VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
+ VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
+ VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+};
+
+/* VIRTCHNL_OP_ADD_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only and rule_cfg. PF will return flow_id
+ * if the request is successfully done and return add_status to VF.
+ */
+struct virtchnl_fdir_add {
+ u16 vsi_id; /* INPUT */
+ /*
+ * 1 for validating a fdir rule, 0 for creating a fdir rule.
+ * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
+ */
+ u16 validate_only; /* INPUT */
+ u32 flow_id; /* OUTPUT */
+ struct virtchnl_fdir_rule rule_cfg; /* INPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
+
+/* VIRTCHNL_OP_DEL_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return del_status to VF.
+ */
+struct virtchnl_fdir_del {
+ u16 vsi_id; /* INPUT */
+ u16 pad;
+ u32 flow_id; /* INPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
@@ -823,6 +1499,40 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
+ case VIRTCHNL_OP_ADD_RSS_CFG:
+ case VIRTCHNL_OP_DEL_RSS_CFG:
+ valid_len = sizeof(struct virtchnl_rss_cfg);
+ break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_add);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_del);
+ break;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ case VIRTCHNL_OP_DEL_VLAN_V2:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list_v2);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+
+ valid_len += (vfl->num_elements - 1) *
+ sizeof(struct virtchnl_vlan_filter);
+
+ if (vfl->num_elements == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ valid_len = sizeof(struct virtchnl_vlan_setting);
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 4fc87dee005a..ae12696ec492 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -28,13 +28,6 @@ enum wb_state {
WB_start_all, /* nr_pages == 0 (all) work pending */
};
-enum wb_congested_state {
- WB_async_congested, /* The async (write) queue is getting full */
- WB_sync_congested, /* The sync queue is getting full */
-};
-
-typedef int (congested_fn)(void *, int);
-
enum wb_stat_item {
WB_RECLAIMABLE,
WB_WRITEBACK,
@@ -54,7 +47,6 @@ enum wb_reason {
WB_REASON_SYNC,
WB_REASON_PERIODIC,
WB_REASON_LAPTOP_TIMER,
- WB_REASON_FREE_MORE_MEM,
WB_REASON_FS_FREE_SPACE,
/*
* There is no bdi forker thread any more and works are done
@@ -89,26 +81,6 @@ struct wb_completion {
struct wb_completion cmpl = WB_COMPLETION_INIT(bdi)
/*
- * For cgroup writeback, multiple wb's may map to the same blkcg. Those
- * wb's can operate mostly independently but should share the congested
- * state. To facilitate such sharing, the congested state is tracked using
- * the following struct which is created on demand, indexed by blkcg ID on
- * its bdi, and refcounted.
- */
-struct bdi_writeback_congested {
- unsigned long state; /* WB_[a]sync_congested flags */
- refcount_t refcnt; /* nr of attached wb's and blkg */
-
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
- * on bdi unregistration. For memcg-wb
- * internal use only! */
- int blkcg_id; /* ID of the associated blkcg */
- struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
-#endif
-};
-
-/*
* Each wb (bdi_writeback) can perform writeback operations, is measured
* and throttled, independently. Without cgroup writeback, each bdi
* (bdi_writeback) is served by its embedded bdi->wb.
@@ -126,6 +98,9 @@ struct bdi_writeback_congested {
* change as blkcg is disabled and enabled higher up in the hierarchy, a wb
* is tested for blkcg after lookup and removed from index on mismatch so
* that a new wb for the combination can be created.
+ *
+ * Each bdi_writeback that is not embedded into the backing_dev_info must hold
+ * a reference to the parent backing_dev_info. See cgwb_create() for details.
*/
struct bdi_writeback {
struct backing_dev_info *bdi; /* our parent bdi */
@@ -139,10 +114,9 @@ struct bdi_writeback {
struct list_head b_dirty_time; /* time stamps are dirty */
spinlock_t list_lock; /* protects the b_* lists */
+ atomic_t writeback_inodes; /* number of inodes under writeback */
struct percpu_counter stat[NR_WB_STAT_ITEMS];
- struct bdi_writeback_congested *congested;
-
unsigned long bw_time_stamp; /* last time write bw is updated */
unsigned long dirtied_stamp;
unsigned long written_stamp; /* pages written at bw_time_stamp */
@@ -165,6 +139,7 @@ struct bdi_writeback {
spinlock_t work_lock; /* protects work_list & dwork scheduling */
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
+ struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
unsigned long dirty_sleep; /* last wait */
@@ -177,6 +152,8 @@ struct bdi_writeback {
struct cgroup_subsys_state *blkcg_css; /* and blkcg */
struct list_head memcg_node; /* anchored at memcg->cgwb_list */
struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
+ struct list_head b_attached; /* attached inodes, protected by list_lock */
+ struct list_head offline_node; /* anchored at offline_cgwbs */
union {
struct work_struct release_work;
@@ -191,10 +168,6 @@ struct backing_dev_info {
struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
unsigned long io_pages; /* max allowed IO size */
- congested_fn *congested_fn; /* Function pointer if device is md/dm */
- void *congested_data; /* Pointer to aux data for congested func */
-
- const char *name;
struct kref refcnt; /* Reference counter for the structure */
unsigned int capabilities; /* Device capabilities */
@@ -211,15 +184,13 @@ struct backing_dev_info {
struct list_head wb_list; /* list of all wbs */
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
- struct rb_root cgwb_congested_tree; /* their congested states */
struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
-#else
- struct bdi_writeback_congested *wb_congested;
#endif
wait_queue_head_t wb_waitq;
struct device *dev;
+ char dev_name[64];
struct device *owner;
struct timer_list laptop_mode_wb_timer;
@@ -229,24 +200,6 @@ struct backing_dev_info {
#endif
};
-enum {
- BLK_RW_ASYNC = 0,
- BLK_RW_SYNC = 1,
-};
-
-void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
-void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
-
-static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
-{
- clear_wb_congested(bdi->wb.congested, sync);
-}
-
-static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
-{
- set_wb_congested(bdi->wb.congested, sync);
-}
-
struct wb_lock_cookie {
bool locked;
unsigned long flags;
@@ -278,8 +231,9 @@ static inline void wb_get(struct bdi_writeback *wb)
/**
* wb_put - decrement a wb's refcount
* @wb: bdi_writeback to put
+ * @nr: number of references to put
*/
-static inline void wb_put(struct bdi_writeback *wb)
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
{
if (WARN_ON_ONCE(!wb->bdi)) {
/*
@@ -290,7 +244,16 @@ static inline void wb_put(struct bdi_writeback *wb)
}
if (wb != &wb->bdi->wb)
- percpu_ref_put(&wb->refcnt);
+ percpu_ref_put_many(&wb->refcnt, nr);
+}
+
+/**
+ * wb_put - decrement a wb's refcount
+ * @wb: bdi_writeback to put
+ */
+static inline void wb_put(struct bdi_writeback *wb)
+{
+ wb_put_many(wb, 1);
}
/**
@@ -319,6 +282,10 @@ static inline void wb_put(struct bdi_writeback *wb)
{
}
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
+{
+}
+
static inline bool wb_dying(struct bdi_writeback *wb)
{
return false;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index f88197c1ffc2..439815cc1ab9 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -12,10 +12,8 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/writeback.h>
-#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
@@ -33,14 +31,10 @@ int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
__printf(2, 0)
int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
va_list args);
-int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
+void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
void bdi_unregister(struct backing_dev_info *bdi);
-struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
-static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
-{
- return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
-}
+struct backing_dev_info *bdi_alloc(int node_id);
void wb_start_background_writeback(struct bdi_writeback *wb);
void wb_workfn(struct work_struct *work);
@@ -68,7 +62,7 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
return atomic_long_read(&bdi->tot_write_bandwidth);
}
-static inline void __add_wb_stat(struct bdi_writeback *wb,
+static inline void wb_stat_mod(struct bdi_writeback *wb,
enum wb_stat_item item, s64 amount)
{
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
@@ -76,12 +70,12 @@ static inline void __add_wb_stat(struct bdi_writeback *wb,
static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- __add_wb_stat(wb, item, 1);
+ wb_stat_mod(wb, item, 1);
}
static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- __add_wb_stat(wb, item, -1);
+ wb_stat_mod(wb, item, -1);
}
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
@@ -114,36 +108,19 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
/*
* Flags in backing_dev_info::capability
*
- * The first three flags control whether dirty pages will contribute to the
- * VM's accounting and whether writepages() should be called for dirty pages
- * (something that would not, for example, be appropriate for ramfs)
- *
- * WARNING: these flags are closely related and should not normally be
- * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
- * three flags into a single convenience macro.
- *
- * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
- * BDI_CAP_NO_WRITEBACK: Don't write pages back
- * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
- * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
- *
- * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
- * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
- * inefficient.
+ * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
+ * should contribute to accounting
+ * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
+ * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
*/
-#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
-#define BDI_CAP_NO_WRITEBACK 0x00000002
-#define BDI_CAP_NO_ACCT_WB 0x00000004
-#define BDI_CAP_STABLE_WRITES 0x00000008
-#define BDI_CAP_STRICTLIMIT 0x00000010
-#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
-#define BDI_CAP_SYNCHRONOUS_IO 0x00000040
-
-#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
- (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
+#define BDI_CAP_WRITEBACK (1 << 0)
+#define BDI_CAP_WRITEBACK_ACCT (1 << 1)
+#define BDI_CAP_STRICTLIMIT (1 << 2)
extern struct backing_dev_info noop_backing_dev_info;
+int bdi_init(struct backing_dev_info *bdi);
+
/**
* writeback_in_progress - determine whether there is writeback in progress
* @wb: bdi_writeback of interest
@@ -156,97 +133,30 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb)
return test_bit(WB_writeback_running, &wb->state);
}
-static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
-{
- struct super_block *sb;
-
- if (!inode)
- return &noop_backing_dev_info;
-
- sb = inode->i_sb;
-#ifdef CONFIG_BLOCK
- if (sb_is_blkdev_sb(sb))
- return I_BDEV(inode)->bd_bdi;
-#endif
- return sb->s_bdi;
-}
-
-static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
-{
- struct backing_dev_info *bdi = wb->bdi;
-
- if (bdi->congested_fn)
- return bdi->congested_fn(bdi->congested_data, cong_bits);
- return wb->congested->state & cong_bits;
-}
-
-long congestion_wait(int sync, long timeout);
-long wait_iff_congested(int sync, long timeout);
-
-static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
-{
- return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
-}
-
-static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
-{
- return bdi->capabilities & BDI_CAP_STABLE_WRITES;
-}
+struct backing_dev_info *inode_to_bdi(struct inode *inode);
-static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
+static inline bool mapping_can_writeback(struct address_space *mapping)
{
- return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
-}
-
-static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
-{
- return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
-}
-
-static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
-{
- /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
- return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
- BDI_CAP_NO_WRITEBACK));
-}
-
-static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
-{
- return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
-}
-
-static inline bool mapping_cap_account_dirty(struct address_space *mapping)
-{
- return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
-}
-
-static inline int bdi_sched_wait(void *word)
-{
- schedule();
- return 0;
+ return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
}
#ifdef CONFIG_CGROUP_WRITEBACK
-struct bdi_writeback_congested *
-wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
-void wb_congested_put(struct bdi_writeback_congested *congested);
struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
struct cgroup_subsys_state *memcg_css);
struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
struct cgroup_subsys_state *memcg_css,
gfp_t gfp);
void wb_memcg_offline(struct mem_cgroup *memcg);
-void wb_blkcg_offline(struct blkcg *blkcg);
-int inode_congested(struct inode *inode, int cong_bits);
+void wb_blkcg_offline(struct cgroup_subsys_state *css);
/**
* inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
* @inode: inode of interest
*
- * cgroup writeback requires support from both the bdi and filesystem.
- * Also, both memcg and iocg have to be on the default hierarchy. Test
- * whether all conditions are met.
+ * Cgroup writeback requires support from the filesystem. Also, both memcg and
+ * iocg have to be on the default hierarchy. Test whether all conditions are
+ * met.
*
* Note that the test result may change dynamically on the same inode
* depending on how memcg and iocg are configured.
@@ -257,8 +167,7 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
cgroup_subsys_on_dfl(io_cgrp_subsys) &&
- bdi_cap_account_dirty(bdi) &&
- (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
+ (bdi->capabilities & BDI_CAP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
}
@@ -321,18 +230,6 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
}
/**
- * inode_to_wb_is_valid - test whether an inode has a wb associated
- * @inode: inode of interest
- *
- * Returns %true if @inode has a wb associated. May be called without any
- * locking.
- */
-static inline bool inode_to_wb_is_valid(struct inode *inode)
-{
- return inode->i_wb;
-}
-
-/**
* inode_to_wb - determine the wb of an inode
* @inode: inode of interest
*
@@ -351,6 +248,17 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
return inode->i_wb;
}
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
+{
+ /*
+ * If wbc does not have inode attached, it means cgroup writeback was
+ * disabled when wbc started. Just use the default wb in that case.
+ */
+ return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
+}
+
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
@@ -408,19 +316,6 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return false;
}
-static inline struct bdi_writeback_congested *
-wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
-{
- refcount_inc(&bdi->wb_congested->refcnt);
- return bdi->wb_congested;
-}
-
-static inline void wb_congested_put(struct bdi_writeback_congested *congested)
-{
- if (refcount_dec_and_test(&congested->refcnt))
- kfree(congested);
-}
-
static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
{
return &bdi->wb;
@@ -432,16 +327,19 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
return &bdi->wb;
}
-static inline bool inode_to_wb_is_valid(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
{
- return true;
+ return &inode_to_bdi(inode)->wb;
}
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
{
- return &inode_to_bdi(inode)->wb;
+ return inode_to_wb(inode);
}
+
static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
@@ -457,61 +355,12 @@ static inline void wb_memcg_offline(struct mem_cgroup *memcg)
{
}
-static inline void wb_blkcg_offline(struct blkcg *blkcg)
+static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
{
}
-static inline int inode_congested(struct inode *inode, int cong_bits)
-{
- return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
-}
-
#endif /* CONFIG_CGROUP_WRITEBACK */
-static inline int inode_read_congested(struct inode *inode)
-{
- return inode_congested(inode, 1 << WB_sync_congested);
-}
-
-static inline int inode_write_congested(struct inode *inode)
-{
- return inode_congested(inode, 1 << WB_async_congested);
-}
-
-static inline int inode_rw_congested(struct inode *inode)
-{
- return inode_congested(inode, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
-}
-
-static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
-{
- return wb_congested(&bdi->wb, cong_bits);
-}
-
-static inline int bdi_read_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << WB_sync_congested);
-}
-
-static inline int bdi_write_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << WB_async_congested);
-}
-
-static inline int bdi_rw_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
-}
-
-extern const char *bdi_unknown_name;
-
-static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
-{
- if (!bdi || !bdi->dev)
- return bdi_unknown_name;
- return dev_name(bdi->dev);
-}
+const char *bdi_dev_name(struct backing_dev_info *bdi);
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index c7d6b2e8c3b5..614653e07e3a 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -14,113 +14,336 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
-/* Notes on locking:
- *
- * backlight_device->ops_lock is an internal backlight lock protecting the
- * ops pointer and no code outside the core should need to touch it.
- *
- * Access to update_status() is serialised by the update_lock mutex since
- * most drivers seem to need this and historically get it wrong.
- *
- * Most drivers don't need locking on their get_brightness() method.
- * If yours does, you need to implement it in the driver. You can use the
- * update_lock mutex if appropriate.
+/**
+ * enum backlight_update_reason - what method was used to update backlight
*
- * Any other use of the locks below is probably wrong.
+ * A driver indicates the method (reason) used for updating the backlight
+ * when calling backlight_force_update().
*/
-
enum backlight_update_reason {
+ /**
+ * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key.
+ */
BACKLIGHT_UPDATE_HOTKEY,
+
+ /**
+ * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs.
+ */
BACKLIGHT_UPDATE_SYSFS,
};
+/**
+ * enum backlight_type - the type of backlight control
+ *
+ * The type of interface used to control the backlight.
+ */
enum backlight_type {
+ /**
+ * @BACKLIGHT_RAW:
+ *
+ * The backlight is controlled using hardware registers.
+ */
BACKLIGHT_RAW = 1,
+
+ /**
+ * @BACKLIGHT_PLATFORM:
+ *
+ * The backlight is controlled using a platform-specific interface.
+ */
BACKLIGHT_PLATFORM,
+
+ /**
+ * @BACKLIGHT_FIRMWARE:
+ *
+ * The backlight is controlled using a standard firmware interface.
+ */
BACKLIGHT_FIRMWARE,
+
+ /**
+ * @BACKLIGHT_TYPE_MAX: Number of entries.
+ */
BACKLIGHT_TYPE_MAX,
};
+/**
+ * enum backlight_notification - the type of notification
+ *
+ * The notifications that is used for notification sent to the receiver
+ * that registered notifications using backlight_register_notifier().
+ */
enum backlight_notification {
+ /**
+ * @BACKLIGHT_REGISTERED: The backlight device is registered.
+ */
BACKLIGHT_REGISTERED,
+
+ /**
+ * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered.
+ */
BACKLIGHT_UNREGISTERED,
};
+/** enum backlight_scale - the type of scale used for brightness values
+ *
+ * The type of scale used for brightness values.
+ */
enum backlight_scale {
+ /**
+ * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown.
+ */
BACKLIGHT_SCALE_UNKNOWN = 0,
+
+ /**
+ * @BACKLIGHT_SCALE_LINEAR: The scale is linear.
+ *
+ * The linear scale will increase brightness the same for each step.
+ */
BACKLIGHT_SCALE_LINEAR,
+
+ /**
+ * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear.
+ *
+ * This is often used when the brightness values tries to adjust to
+ * the relative perception of the eye demanding a non-linear scale.
+ */
BACKLIGHT_SCALE_NON_LINEAR,
};
struct backlight_device;
struct fb_info;
+/**
+ * struct backlight_ops - backlight operations
+ *
+ * The backlight operations are specified when the backlight device is registered.
+ */
struct backlight_ops {
+ /**
+ * @options: Configure how operations are called from the core.
+ *
+ * The options parameter is used to adjust the behaviour of the core.
+ * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called
+ * upon suspend and resume.
+ */
unsigned int options;
#define BL_CORE_SUSPENDRESUME (1 << 0)
- /* Notify the backlight driver some property has changed */
+ /**
+ * @update_status: Operation called when properties have changed.
+ *
+ * Notify the backlight driver some property has changed.
+ * The update_status operation is protected by the update_lock.
+ *
+ * The backlight driver is expected to use backlight_is_blank()
+ * to check if the display is blanked and set brightness accordingly.
+ * update_status() is called when any of the properties has changed.
+ *
+ * RETURNS:
+ *
+ * 0 on success, negative error code if any failure occurred.
+ */
int (*update_status)(struct backlight_device *);
- /* Return the current backlight brightness (accounting for power,
- fb_blank etc.) */
+
+ /**
+ * @get_brightness: Return the current backlight brightness.
+ *
+ * The driver may implement this as a readback from the HW.
+ * This operation is optional and if not present then the current
+ * brightness property value is used.
+ *
+ * RETURNS:
+ *
+ * A brightness value which is 0 or a positive number.
+ * On failure a negative error code is returned.
+ */
int (*get_brightness)(struct backlight_device *);
- /* Check if given framebuffer device is the one bound to this backlight;
- return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
- int (*check_fb)(struct backlight_device *, struct fb_info *);
+
+ /**
+ * @check_fb: Check the framebuffer device.
+ *
+ * Check if given framebuffer device is the one bound to this backlight.
+ * This operation is optional and if not implemented it is assumed that the
+ * fbdev is always the one bound to the backlight.
+ *
+ * RETURNS:
+ *
+ * If info is NULL or the info matches the fbdev bound to the backlight return true.
+ * If info does not match the fbdev bound to the backlight return false.
+ */
+ int (*check_fb)(struct backlight_device *bd, struct fb_info *info);
};
-/* This structure defines all the properties of a backlight */
+/**
+ * struct backlight_properties - backlight properties
+ *
+ * This structure defines all the properties of a backlight.
+ */
struct backlight_properties {
- /* Current User requested brightness (0 - max_brightness) */
+ /**
+ * @brightness: The current brightness requested by the user.
+ *
+ * The backlight core makes sure the range is (0 to max_brightness)
+ * when the brightness is set via the sysfs attribute:
+ * /sys/class/backlight/<backlight>/brightness.
+ *
+ * This value can be set in the backlight_properties passed
+ * to devm_backlight_device_register() to set a default brightness
+ * value.
+ */
int brightness;
- /* Maximal value for brightness (read-only) */
+
+ /**
+ * @max_brightness: The maximum brightness value.
+ *
+ * This value must be set in the backlight_properties passed to
+ * devm_backlight_device_register() and shall not be modified by the
+ * driver after registration.
+ */
int max_brightness;
- /* Current FB Power mode (0: full on, 1..3: power saving
- modes; 4: full off), see FB_BLANK_XXX */
+
+ /**
+ * @power: The current power mode.
+ *
+ * User space can configure the power mode using the sysfs
+ * attribute: /sys/class/backlight/<backlight>/bl_power
+ * When the power property is updated update_status() is called.
+ *
+ * The possible values are: (0: full on, 1 to 3: power saving
+ * modes; 4: full off), see FB_BLANK_XXX.
+ *
+ * When the backlight device is enabled @power is set
+ * to FB_BLANK_UNBLANK. When the backlight device is disabled
+ * @power is set to FB_BLANK_POWERDOWN.
+ */
int power;
- /* FB Blanking active? (values as for power) */
- /* Due to be removed, please use (state & BL_CORE_FBBLANK) */
+
+ /**
+ * @fb_blank: The power state from the FBIOBLANK ioctl.
+ *
+ * When the FBIOBLANK ioctl is called @fb_blank is set to the
+ * blank parameter and the update_status() operation is called.
+ *
+ * When the backlight device is enabled @fb_blank is set
+ * to FB_BLANK_UNBLANK. When the backlight device is disabled
+ * @fb_blank is set to FB_BLANK_POWERDOWN.
+ *
+ * Backlight drivers should avoid using this property. It has been
+ * replaced by state & BL_CORE_FBLANK (although most drivers should
+ * use backlight_is_blank() as the preferred means to get the blank
+ * state).
+ *
+ * fb_blank is deprecated and will be removed.
+ */
int fb_blank;
- /* Backlight type */
+
+ /**
+ * @type: The type of backlight supported.
+ *
+ * The backlight type allows userspace to make appropriate
+ * policy decisions based on the backlight type.
+ *
+ * This value must be set in the backlight_properties
+ * passed to devm_backlight_device_register().
+ */
enum backlight_type type;
- /* Flags used to signal drivers of state changes */
+
+ /**
+ * @state: The state of the backlight core.
+ *
+ * The state is a bitmask. BL_CORE_FBBLANK is set when the display
+ * is expected to be blank. BL_CORE_SUSPENDED is set when the
+ * driver is suspended.
+ *
+ * backlight drivers are expected to use backlight_is_blank()
+ * in their update_status() operation rather than reading the
+ * state property.
+ *
+ * The state is maintained by the core and drivers may not modify it.
+ */
unsigned int state;
- /* Type of the brightness scale (linear, non-linear, ...) */
- enum backlight_scale scale;
#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
+ /**
+ * @scale: The type of the brightness scale.
+ */
+ enum backlight_scale scale;
};
+/**
+ * struct backlight_device - backlight device data
+ *
+ * This structure holds all data required by a backlight device.
+ */
struct backlight_device {
- /* Backlight properties */
+ /**
+ * @props: Backlight properties
+ */
struct backlight_properties props;
- /* Serialise access to update_status method */
+ /**
+ * @update_lock: The lock used when calling the update_status() operation.
+ *
+ * update_lock is an internal backlight lock that serialise access
+ * to the update_status() operation. The backlight core holds the update_lock
+ * when calling the update_status() operation. The update_lock shall not
+ * be used by backlight drivers.
+ */
struct mutex update_lock;
- /* This protects the 'ops' field. If 'ops' is NULL, the driver that
- registered this device has been unloaded, and if class_get_devdata()
- points to something in the body of that driver, it is also invalid. */
+ /**
+ * @ops_lock: The lock used around everything related to backlight_ops.
+ *
+ * ops_lock is an internal backlight lock that protects the ops pointer
+ * and is used around all accesses to ops and when the operations are
+ * invoked. The ops_lock shall not be used by backlight drivers.
+ */
struct mutex ops_lock;
+
+ /**
+ * @ops: Pointer to the backlight operations.
+ *
+ * If ops is NULL, the driver that registered this device has been unloaded,
+ * and if class_get_devdata() points to something in the body of that driver,
+ * it is also invalid.
+ */
const struct backlight_ops *ops;
- /* The framebuffer notifier block */
+ /**
+ * @fb_notif: The framebuffer notifier block
+ */
struct notifier_block fb_notif;
- /* list entry of all registered backlight devices */
+ /**
+ * @entry: List entry of all registered backlight devices
+ */
struct list_head entry;
+ /**
+ * @dev: Parent device.
+ */
struct device dev;
- /* Multiple framebuffers may share one backlight device */
+ /**
+ * @fb_bl_on: The state of individual fbdev's.
+ *
+ * Multiple fbdev's may share one backlight device. The fb_bl_on
+ * records the state of the individual fbdev.
+ */
bool fb_bl_on[FB_MAX];
+ /**
+ * @use_count: The number of uses of fb_bl_on.
+ */
int use_count;
};
+/**
+ * backlight_update_status - force an update of the backlight device status
+ * @bd: the backlight device
+ */
static inline int backlight_update_status(struct backlight_device *bd)
{
int ret = -ENOENT;
@@ -166,48 +389,83 @@ static inline int backlight_disable(struct backlight_device *bd)
}
/**
- * backlight_put - Drop backlight reference
- * @bd: the backlight device to put
+ * backlight_is_blank - Return true if display is expected to be blank
+ * @bd: the backlight device
+ *
+ * Display is expected to be blank if any of these is true::
+ *
+ * 1) if power in not UNBLANK
+ * 2) if fb_blank is not UNBLANK
+ * 3) if state indicate BLANK or SUSPENDED
+ *
+ * Returns true if display is expected to be blank, false otherwise.
*/
-static inline void backlight_put(struct backlight_device *bd)
+static inline bool backlight_is_blank(const struct backlight_device *bd)
{
- if (bd)
- put_device(&bd->dev);
+ return bd->props.power != FB_BLANK_UNBLANK ||
+ bd->props.fb_blank != FB_BLANK_UNBLANK ||
+ bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK);
}
-extern struct backlight_device *backlight_device_register(const char *name,
- struct device *dev, void *devdata, const struct backlight_ops *ops,
- const struct backlight_properties *props);
-extern struct backlight_device *devm_backlight_device_register(
- struct device *dev, const char *name, struct device *parent,
- void *devdata, const struct backlight_ops *ops,
- const struct backlight_properties *props);
-extern void backlight_device_unregister(struct backlight_device *bd);
-extern void devm_backlight_device_unregister(struct device *dev,
- struct backlight_device *bd);
-extern void backlight_force_update(struct backlight_device *bd,
- enum backlight_update_reason reason);
-extern int backlight_register_notifier(struct notifier_block *nb);
-extern int backlight_unregister_notifier(struct notifier_block *nb);
-extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
-extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
+/**
+ * backlight_get_brightness - Returns the current brightness value
+ * @bd: the backlight device
+ *
+ * Returns the current brightness value, taking in consideration the current
+ * state. If backlight_is_blank() returns true then return 0 as brightness
+ * otherwise return the current brightness property value.
+ *
+ * Backlight drivers are expected to use this function in their update_status()
+ * operation to get the brightness value.
+ */
+static inline int backlight_get_brightness(const struct backlight_device *bd)
+{
+ if (backlight_is_blank(bd))
+ return 0;
+ else
+ return bd->props.brightness;
+}
+
+struct backlight_device *
+backlight_device_register(const char *name, struct device *dev, void *devdata,
+ const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+struct backlight_device *
+devm_backlight_device_register(struct device *dev, const char *name,
+ struct device *parent, void *devdata,
+ const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+void backlight_device_unregister(struct backlight_device *bd);
+void devm_backlight_device_unregister(struct device *dev,
+ struct backlight_device *bd);
+void backlight_force_update(struct backlight_device *bd,
+ enum backlight_update_reason reason);
+int backlight_register_notifier(struct notifier_block *nb);
+int backlight_unregister_notifier(struct notifier_block *nb);
+struct backlight_device *backlight_device_get_by_name(const char *name);
+struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+int backlight_device_set_brightness(struct backlight_device *bd,
+ unsigned long brightness);
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
+/**
+ * bl_get_data - access devdata
+ * @bl_dev: pointer to backlight device
+ *
+ * When a backlight device is registered the driver has the possibility
+ * to supply a void * devdata. bl_get_data() return a pointer to the
+ * devdata.
+ *
+ * RETURNS:
+ *
+ * pointer to devdata stored while registering the backlight device.
+ */
static inline void * bl_get_data(struct backlight_device *bl_dev)
{
return dev_get_drvdata(&bl_dev->dev);
}
-struct generic_bl_info {
- const char *name;
- int max_intensity;
- int default_intensity;
- int limit_mask;
- void (*set_bl_intensity)(int intensity);
- void (*kick_battery)(void);
-};
-
#ifdef CONFIG_OF
struct backlight_device *of_find_backlight_by_node(struct device_node *node);
#else
@@ -219,14 +477,8 @@ of_find_backlight_by_node(struct device_node *node)
#endif
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-struct backlight_device *of_find_backlight(struct device *dev);
struct backlight_device *devm_of_find_backlight(struct device *dev);
#else
-static inline struct backlight_device *of_find_backlight(struct device *dev)
-{
- return NULL;
-}
-
static inline struct backlight_device *
devm_of_find_backlight(struct device *dev)
{
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 338aa27e4773..5ca2d5699620 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -57,7 +57,6 @@ struct balloon_dev_info {
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
- struct inode *inode;
};
extern struct page *balloon_page_alloc(void);
@@ -75,17 +74,10 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
- balloon->inode = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
-extern const struct address_space_operations balloon_aops;
-extern bool balloon_page_isolate(struct page *page,
- isolate_mode_t mode);
-extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct address_space *mapping,
- struct page *newpage,
- struct page *page, enum migrate_mode mode);
+extern const struct movable_operations balloon_mops;
/*
* balloon_page_insert - insert a page into the balloon's page list and make
@@ -100,7 +92,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
__SetPageOffline(page);
- __SetPageMovable(page, balloon->inode->i_mapping);
+ __SetPageMovable(page, &balloon_mops);
set_page_private(page, (unsigned long)balloon);
list_add(&page->lru, &balloon->pages);
}
@@ -155,22 +147,6 @@ static inline void balloon_page_delete(struct page *page)
list_del(&page->lru);
}
-static inline bool balloon_page_isolate(struct page *page)
-{
- return false;
-}
-
-static inline void balloon_page_putback(struct page *page)
-{
- return;
-}
-
-static inline int balloon_page_migrate(struct page *newpage,
- struct page *page, enum migrate_mode mode)
-{
- return 0;
-}
-
static inline gfp_t balloon_mapping_gfp_mask(void)
{
return GFP_HIGHUSER;
diff --git a/include/linux/base64.h b/include/linux/base64.h
new file mode 100644
index 000000000000..660d4cb1ef31
--- /dev/null
+++ b/include/linux/base64.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64 encoding, lifted from fs/crypto/fname.c.
+ */
+
+#ifndef _LINUX_BASE64_H
+#define _LINUX_BASE64_H
+
+#include <linux/types.h>
+
+#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
+
+int base64_encode(const u8 *src, int len, char *dst);
+int base64_decode(const char *src, int len, u8 *dst);
+
+#endif /* _LINUX_BASE64_H */
diff --git a/include/linux/bch.h b/include/linux/bch.h
index aa765af85c38..85fdce83d4e2 100644
--- a/include/linux/bch.h
+++ b/include/linux/bch.h
@@ -33,6 +33,7 @@
* @cache: log-based polynomial representation buffer
* @elp: error locator polynomial
* @poly_2t: temporary polynomials of degree 2t
+ * @swap_bits: swap bits within data and syndrome bytes
*/
struct bch_control {
unsigned int m;
@@ -51,16 +52,18 @@ struct bch_control {
int *cache;
struct gf_poly *elp;
struct gf_poly *poly_2t[4];
+ bool swap_bits;
};
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits);
-void free_bch(struct bch_control *bch);
+void bch_free(struct bch_control *bch);
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc);
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc);
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
index b0f4424f34fc..f8254fd53e15 100644
--- a/include/linux/bcm47xx_sprom.h
+++ b/include/linux/bcm47xx_sprom.h
@@ -9,9 +9,19 @@
#include <linux/kernel.h>
#include <linux/vmalloc.h>
+struct ssb_sprom;
+
#ifdef CONFIG_BCM47XX_SPROM
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback);
int bcm47xx_sprom_register_fallbacks(void);
#else
+static inline void bcm47xx_fill_sprom(struct ssb_sprom *sprom,
+ const char *prefix,
+ bool fallback)
+{
+}
+
static inline int bcm47xx_sprom_register_fallbacks(void)
{
return -ENOTSUPP;
diff --git a/include/linux/bcm963xx_tag.h b/include/linux/bcm963xx_tag.h
index b87945cb6946..7edb809a2586 100644
--- a/include/linux/bcm963xx_tag.h
+++ b/include/linux/bcm963xx_tag.h
@@ -84,7 +84,7 @@ struct bcm_tag {
char flash_layout_ver[FLASHLAYOUTVER_LEN];
/* 196-199: kernel+rootfs CRC32 */
__u32 fskernel_crc;
- /* 200-215: Unused except on Alice Gate where is is information */
+ /* 200-215: Unused except on Alice Gate where it is information */
char information2[TAGINFO2_LEN];
/* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
__u32 image_crc;
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index d35b9206096d..2d94c30ed439 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -3,6 +3,7 @@
#define LINUX_BCMA_DRIVER_CC_H_
#include <linux/platform_device.h>
+#include <linux/platform_data/brcmnand.h>
#include <linux/gpio.h>
/** ChipCommon core registers. **/
@@ -270,6 +271,7 @@
#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000
#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000
#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010
+#define BCMA_CC_SROM_CONTROL_OTP_PRESENT 0x00000020
#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008
#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006
#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000
@@ -599,6 +601,10 @@ struct bcma_sflash {
#ifdef CONFIG_BCMA_NFLASH
struct bcma_nflash {
+ /* Must be the fist member for the brcmnand driver to
+ * de-reference that structure.
+ */
+ struct brcmnand_platform_data brcmnand_info;
bool present;
bool boot; /* This is the flash the SoC boots from */
};
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index b40fc633f3be..8d51f69f9f5e 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -8,6 +8,7 @@
#include <uapi/linux/binfmts.h>
struct filename;
+struct coredump_params;
#define CORENAME_MAX_SIZE 128
@@ -26,40 +27,36 @@ struct linux_binprm {
unsigned long p; /* current top of mem */
unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
+ /* Should an execfd be passed to userspace? */
+ have_execfd:1,
+
+ /* Use the creds of a script (see binfmt_misc) */
+ execfd_creds:1,
/*
- * True after the bprm_set_creds hook has been called once
- * (multiple calls can be made via prepare_binprm() for
- * binfmt_script/misc).
- */
- called_set_creds:1,
- /*
- * True if most recent call to the commoncaps bprm_set_creds
- * hook (due to multiple prepare_binprm() calls from the
- * binfmt_script/misc handlers) resulted in elevated
- * privileges.
+ * Set by bprm_creds_for_exec hook to indicate a
+ * privilege-gaining exec has happened. Used to set
+ * AT_SECURE auxv for glibc.
*/
- cap_elevated:1,
+ secureexec:1,
/*
- * Set by bprm_set_creds hook to indicate a privilege-gaining
- * exec has happened. Used to sanitize execution environment
- * and to set AT_SECURE auxv for glibc.
+ * Set when errors can no longer be returned to the
+ * original userspace.
*/
- secureexec:1;
-#ifdef __alpha__
- unsigned int taso:1;
-#endif
- unsigned int recursion_depth; /* only for search_binary_handler() */
- struct file * file;
+ point_of_no_return:1;
+ struct file *executable; /* Executable to pass to the interpreter */
+ struct file *interpreter;
+ struct file *file;
struct cred *cred; /* new credentials */
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
unsigned int per_clear; /* bits to clear in current->personality */
int argc, envc;
- const char * filename; /* Name of binary as seen by procps */
- const char * interp; /* Name of the binary really executed. Most
+ const char *filename; /* Name of binary as seen by procps */
+ const char *interp; /* Name of the binary really executed. Most
of the time same as filename, but could be
different for binfmt_{misc,script} */
+ const char *fdpath; /* generated filename for execveat */
unsigned interp_flags;
- unsigned interp_data;
+ int execfd; /* File descriptor of the executable */
unsigned long loader, exec;
struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
@@ -70,24 +67,13 @@ struct linux_binprm {
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
-/* fd of the binary should be passed to the interpreter */
-#define BINPRM_FLAGS_EXECFD_BIT 1
-#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
-
/* filename of the binary will be inaccessible after exec */
#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
-/* Function parameter for binfmt->coredump */
-struct coredump_params {
- const kernel_siginfo_t *siginfo;
- struct pt_regs *regs;
- struct file *file;
- unsigned long limit;
- unsigned long mm_flags;
- loff_t written;
- loff_t pos;
-};
+/* preserve argv0 for the interpreter */
+#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3
+#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT)
/*
* This structure defines the functions that are used to load the binary formats that
@@ -98,8 +84,10 @@ struct linux_binfmt {
struct module *module;
int (*load_binary)(struct linux_binprm *);
int (*load_shlib)(struct file *);
+#ifdef CONFIG_COREDUMP
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
+#endif
} __randomize_layout;
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
@@ -117,10 +105,8 @@ static inline void insert_binfmt(struct linux_binfmt *fmt)
extern void unregister_binfmt(struct linux_binfmt *);
-extern int prepare_binprm(struct linux_binprm *);
extern int __must_check remove_arg_zero(struct linux_binprm *);
-extern int search_binary_handler(struct linux_binprm *);
-extern int flush_old_exec(struct linux_binprm * bprm);
+extern int begin_new_exec(struct linux_binprm * bprm);
extern void setup_new_exec(struct linux_binprm * bprm);
extern void finalize_exec(struct linux_binprm *bprm);
extern void would_dump(struct linux_binprm *, struct file *);
@@ -138,19 +124,11 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
extern int transfer_args_to_stack(struct linux_binprm *bprm,
unsigned long *sp_location);
extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
-extern int copy_strings_kernel(int argc, const char *const *argv,
- struct linux_binprm *bprm);
-extern void install_exec_creds(struct linux_binprm *bprm);
+int copy_string_kernel(const char *arg, struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
-extern int do_execve(struct filename *,
- const char __user * const __user *,
- const char __user * const __user *);
-extern int do_execveat(int, struct filename *,
- const char __user * const __user *,
- const char __user * const __user *,
- int);
-int do_execve_file(struct file *file, void *__argv, void *__envp);
+int kernel_execve(const char *filename,
+ const char *const *argv, const char *const *envp);
#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 853d92ceee64..2c5806997bbf 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -5,23 +5,17 @@
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H
-#include <linux/highmem.h>
#include <linux/mempool.h>
-#include <linux/ioprio.h>
-
-#ifdef CONFIG_BLOCK
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
+#include <linux/uio.h>
-#define BIO_DEBUG
-
-#ifdef BIO_DEBUG
-#define BIO_BUG_ON BUG_ON
-#else
-#define BIO_BUG_ON
-#endif
+#define BIO_MAX_VECS 256U
-#define BIO_MAX_PAGES 256
+static inline unsigned int bio_max_segs(unsigned int nr_segs)
+{
+ return min(nr_segs, BIO_MAX_VECS);
+}
#define bio_prio(bio) (bio)->bi_ioprio
#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
@@ -40,9 +34,6 @@
#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
-#define bio_multiple_segments(bio) \
- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
-
#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
@@ -70,30 +61,13 @@ static inline bool bio_has_data(struct bio *bio)
return false;
}
-static inline bool bio_no_advance_iter(struct bio *bio)
+static inline bool bio_no_advance_iter(const struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
- bio_op(bio) == REQ_OP_WRITE_SAME ||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
}
-static inline bool bio_mergeable(struct bio *bio)
-{
- if (bio->bi_opf & REQ_NOMERGE_FLAGS)
- return false;
-
- return true;
-}
-
-static inline unsigned int bio_cur_bytes(struct bio *bio)
-{
- if (bio_has_data(bio))
- return bio_iovec(bio).bv_len;
- else /* dataless requests such as discard */
- return bio->bi_iter.bi_size;
-}
-
static inline void *bio_data(struct bio *bio)
{
if (bio_has_data(bio))
@@ -102,25 +76,6 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
-/**
- * bio_full - check if the bio is full
- * @bio: bio to check
- * @len: length of one segment to be added
- *
- * Return true if @bio is full and one segment with @len bytes can't be
- * added to the bio, otherwise return false
- */
-static inline bool bio_full(struct bio *bio, unsigned len)
-{
- if (bio->bi_vcnt >= bio->bi_max_vecs)
- return true;
-
- if (bio->bi_iter.bi_size > UINT_MAX - len)
- return true;
-
- return false;
-}
-
static inline bool bio_next_segment(const struct bio *bio,
struct bvec_iter_all *iter)
{
@@ -138,8 +93,8 @@ static inline bool bio_next_segment(const struct bio *bio,
#define bio_for_each_segment_all(bvl, bio, iter) \
for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
-static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
- unsigned bytes)
+static inline void bio_advance_iter(const struct bio *bio,
+ struct bvec_iter *iter, unsigned int bytes)
{
iter->bi_sector += bytes >> 9;
@@ -150,11 +105,46 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
/* TODO: It is reasonable to complete bio with error here. */
}
+/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
+static inline void bio_advance_iter_single(const struct bio *bio,
+ struct bvec_iter *iter,
+ unsigned int bytes)
+{
+ iter->bi_sector += bytes >> 9;
+
+ if (bio_no_advance_iter(bio))
+ iter->bi_size -= bytes;
+ else
+ bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
+}
+
+void __bio_advance(struct bio *, unsigned bytes);
+
+/**
+ * bio_advance - increment/complete a bio by some number of bytes
+ * @bio: bio to advance
+ * @nbytes: number of bytes to complete
+ *
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
+ * be updated on the last bvec as well.
+ *
+ * @bio will then represent the remaining, uncompleted portion of the io.
+ */
+static inline void bio_advance(struct bio *bio, unsigned int nbytes)
+{
+ if (nbytes == bio->bi_iter.bi_size) {
+ bio->bi_iter.bi_size = 0;
+ return;
+ }
+ __bio_advance(bio, nbytes);
+}
+
#define __bio_for_each_segment(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bio_iter_iovec((bio), (iter))), 1); \
- bio_advance_iter((bio), &(iter), (bvl).bv_len))
+ bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
#define bio_for_each_segment(bvl, bio, iter) \
__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
@@ -163,12 +153,20 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
for (iter = (start); \
(iter).bi_size && \
((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
- bio_advance_iter((bio), &(iter), (bvl).bv_len))
+ bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
/* iterate over multi-page bvec */
#define bio_for_each_bvec(bvl, bio, iter) \
__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
+/*
+ * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
+ * same reasons as bio_for_each_segment_all().
+ */
+#define bio_for_each_bvec_all(bvl, bio, i) \
+ for (i = 0, bvl = bio_first_bvec_all(bio); \
+ i < (bio)->bi_vcnt; i++, bvl++)
+
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
static inline unsigned bio_segments(struct bio *bio)
@@ -187,8 +185,6 @@ static inline unsigned bio_segments(struct bio *bio)
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
return 0;
- case REQ_OP_WRITE_SAME:
- return 1;
default:
break;
}
@@ -244,38 +240,6 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio->bi_flags &= ~(1U << bit);
}
-static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
-{
- *bv = bio_iovec(bio);
-}
-
-static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
-{
- struct bvec_iter iter = bio->bi_iter;
- int idx;
-
- if (unlikely(!bio_multiple_segments(bio))) {
- *bv = bio_iovec(bio);
- return;
- }
-
- bio_advance_iter(bio, &iter, iter.bi_size);
-
- if (!iter.bi_bvec_done)
- idx = iter.bi_idx - 1;
- else /* in the middle of bvec */
- idx = iter.bi_idx;
-
- *bv = bio->bi_io_vec[idx];
-
- /*
- * iter.bi_bvec_done records actual length of the last bvec
- * if this bio ends in the middle of one io vector
- */
- if (iter.bi_bvec_done)
- bv->bv_len = iter.bi_bvec_done;
-}
-
static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
@@ -293,6 +257,60 @@ static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
return &bio->bi_io_vec[bio->bi_vcnt - 1];
}
+/**
+ * struct folio_iter - State for iterating all folios in a bio.
+ * @folio: The current folio we're iterating. NULL after the last folio.
+ * @offset: The byte offset within the current folio.
+ * @length: The number of bytes in this iteration (will not cross folio
+ * boundary).
+ */
+struct folio_iter {
+ struct folio *folio;
+ size_t offset;
+ size_t length;
+ /* private: for use by the iterator */
+ struct folio *_next;
+ size_t _seg_count;
+ int _i;
+};
+
+static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ int i)
+{
+ struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
+
+ fi->folio = page_folio(bvec->bv_page);
+ fi->offset = bvec->bv_offset +
+ PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
+ fi->_seg_count = bvec->bv_len;
+ fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ fi->_i = i;
+}
+
+static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
+{
+ fi->_seg_count -= fi->length;
+ if (fi->_seg_count) {
+ fi->folio = fi->_next;
+ fi->offset = 0;
+ fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ } else if (fi->_i + 1 < bio->bi_vcnt) {
+ bio_first_folio(fi, bio, fi->_i + 1);
+ } else {
+ fi->folio = NULL;
+ }
+}
+
+/**
+ * bio_for_each_folio_all - Iterate over each folio in a bio.
+ * @fi: struct folio_iter which is updated for each folio.
+ * @bio: struct bio to iterate over.
+ */
+#define bio_for_each_folio_all(fi, bio) \
+ for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -309,7 +327,6 @@ struct bio_integrity_payload {
struct bvec_iter bip_iter;
- unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
@@ -319,7 +336,7 @@ struct bio_integrity_payload {
struct work_struct bip_work; /* I/O completion */
struct bio_vec *bip_vec;
- struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
+ struct bio_vec bip_inline_vecs[];/* embedded bvec array */
};
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -355,7 +372,7 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-extern void bio_trim(struct bio *bio, int offset, int size);
+void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
@@ -366,7 +383,7 @@ extern struct bio *bio_split(struct bio *bio, int sectors,
* @gfp: gfp mask
* @bs: bio set to allocate from
*
- * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * Return: a bio representing the next @sectors of @bio - if the bio is smaller
* than @sectors, returns the original bio unchanged.
*/
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
@@ -381,31 +398,32 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
enum {
BIOSET_NEED_BVECS = BIT(0),
BIOSET_NEED_RESCUER = BIT(1),
+ BIOSET_PERCPU_CACHE = BIT(2),
};
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
-extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
-extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
+struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
+ blk_opf_t opf, gfp_t gfp_mask,
+ struct bio_set *bs);
+struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
extern void bio_put(struct bio *);
-extern void __bio_clone_fast(struct bio *, struct bio *);
-extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
+struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
+ gfp_t gfp, struct bio_set *bs);
+int bio_init_clone(struct block_device *bdev, struct bio *bio,
+ struct bio *bio_src, gfp_t gfp);
extern struct bio_set fs_bio_set;
-static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
-}
-
-static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+static inline struct bio *bio_alloc(struct block_device *bdev,
+ unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
+ return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
}
-extern blk_qc_t submit_bio(struct bio *);
+void submit_bio(struct bio *bio);
extern void bio_endio(struct bio *);
@@ -417,105 +435,68 @@ static inline void bio_io_error(struct bio *bio)
static inline void bio_wouldblock_error(struct bio *bio)
{
+ bio_set_flag(bio, BIO_QUIET);
bio->bi_status = BLK_STS_AGAIN;
bio_endio(bio);
}
+/*
+ * Calculate number of bvec segments that should be allocated to fit data
+ * pointed by @iter. If @iter is backed by bvec it's going to be reused
+ * instead of allocating a new one.
+ */
+static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
+{
+ if (iov_iter_is_bvec(iter))
+ return 0;
+ return iov_iter_npages(iter, max_segs);
+}
+
struct request_queue;
extern int submit_bio_wait(struct bio *bio);
-extern void bio_advance(struct bio *, unsigned);
-
-extern void bio_init(struct bio *bio, struct bio_vec *table,
- unsigned short max_vecs);
+void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
+ unsigned short max_vecs, blk_opf_t opf);
extern void bio_uninit(struct bio *);
-extern void bio_reset(struct bio *);
+void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
-extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
+int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
+bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
-bool __bio_try_merge_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int off, bool *same_page);
+int bio_add_zone_append_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset);
void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
-void bio_release_pages(struct bio *bio, bool mark_dirty);
-struct rq_map_data;
-extern struct bio *bio_map_user_iov(struct request_queue *,
- struct iov_iter *, gfp_t);
-extern void bio_unmap_user(struct bio *);
-extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
- gfp_t);
-extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
- gfp_t, int);
+void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
+void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-void generic_start_io_acct(struct request_queue *q, int op,
- unsigned long sectors, struct hd_struct *part);
-void generic_end_io_acct(struct request_queue *q, int op,
- struct hd_struct *part,
- unsigned long start_time);
-
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);
-extern void bio_list_copy_data(struct bio *dst, struct bio *src);
extern void bio_free_pages(struct bio *bio);
+void guard_bio_eod(struct bio *bio);
+void zero_fill_bio(struct bio *bio);
-extern struct bio *bio_copy_user_iov(struct request_queue *,
- struct rq_map_data *,
- struct iov_iter *,
- gfp_t);
-extern int bio_uncopy_user(struct bio *);
-void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
-void bio_truncate(struct bio *bio, unsigned new_size);
-
-static inline void zero_fill_bio(struct bio *bio)
+static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
{
- zero_fill_bio_iter(bio, bio->bi_iter);
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+ __bio_release_pages(bio, mark_dirty);
}
-extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
-extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
-extern unsigned int bvec_nr_vecs(unsigned short idx);
-extern const char *bio_devname(struct bio *bio, char *buffer);
-
-#define bio_set_dev(bio, bdev) \
-do { \
- if ((bio)->bi_disk != (bdev)->bd_disk) \
- bio_clear_flag(bio, BIO_THROTTLED);\
- (bio)->bi_disk = (bdev)->bd_disk; \
- (bio)->bi_partno = (bdev)->bd_partno; \
- bio_associate_blkg(bio); \
-} while (0)
-
-#define bio_copy_dev(dst, src) \
-do { \
- (dst)->bi_disk = (src)->bi_disk; \
- (dst)->bi_partno = (src)->bi_partno; \
- bio_clone_blkg_association(dst, src); \
-} while (0)
-
#define bio_dev(bio) \
- disk_devt((bio)->bi_disk)
-
-#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
-#else
-static inline void bio_associate_blkg_from_page(struct bio *bio,
- struct page *page) { }
-#endif
+ disk_devt((bio)->bi_bdev->bd_disk)
#ifdef CONFIG_BLK_CGROUP
-void bio_disassociate_blkg(struct bio *bio);
void bio_associate_blkg(struct bio *bio);
void bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css);
void bio_clone_blkg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
-static inline void bio_disassociate_blkg(struct bio *bio) { }
static inline void bio_associate_blkg(struct bio *bio) { }
static inline void bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css)
@@ -524,47 +505,15 @@ static inline void bio_clone_blkg_association(struct bio *dst,
struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
-#ifdef CONFIG_HIGHMEM
-/*
- * remember never ever reenable interrupts between a bvec_kmap_irq and
- * bvec_kunmap_irq!
- */
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
- unsigned long addr;
-
- /*
- * might not be a highmem page, but the preempt/irq count
- * balancing is a lot nicer this way
- */
- local_irq_save(*flags);
- addr = (unsigned long) kmap_atomic(bvec->bv_page);
-
- BUG_ON(addr & ~PAGE_MASK);
-
- return (char *) addr + bvec->bv_offset;
-}
-
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
-{
- unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
- kunmap_atomic((void *) ptr);
- local_irq_restore(*flags);
-}
-
-#else
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
{
- return page_address(bvec->bv_page) + bvec->bv_offset;
+ bio_clear_flag(bio, BIO_REMAPPED);
+ if (bio->bi_bdev != bdev)
+ bio_clear_flag(bio, BIO_BPS_THROTTLED);
+ bio->bi_bdev = bdev;
+ bio_associate_blkg(bio);
}
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
-{
- *flags = 0;
-}
-#endif
-
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*
@@ -704,6 +653,11 @@ struct bio_set {
struct kmem_cache *bio_slab;
unsigned int front_pad;
+ /*
+ * per-cpu bio alloc cache
+ */
+ struct bio_alloc_cache __percpu *cache;
+
mempool_t bio_pool;
mempool_t bvec_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -711,6 +665,7 @@ struct bio_set {
mempool_t bvec_integrity_pool;
#endif
+ unsigned int back_pad;
/*
* Deadlock avoidance for stacking block drivers: see comments in
* bio_alloc_bioset() for details
@@ -719,12 +674,11 @@ struct bio_set {
struct bio_list rescue_list;
struct work_struct rescue_work;
struct workqueue_struct *rescue_workqueue;
-};
-struct biovec_slab {
- int nr_vecs;
- char *name;
- struct kmem_cache *slab;
+ /*
+ * Hot un-plug notifier for the per-cpu cache, if used
+ */
+ struct hlist_node cpuhp_dead;
};
static inline bool bioset_initialized(struct bio_set *bs)
@@ -732,12 +686,6 @@ static inline bool bioset_initialized(struct bio_set *bs)
return bs->bio_slab != NULL;
}
-/*
- * a small number of entries is fine, not going to be performance critical.
- * basically we just need to survive
- */
-#define BIO_SPLIT_ENTRIES 2
-
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#define bip_for_each_vec(bvl, bip, iter) \
@@ -829,10 +777,18 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
*/
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
- bio->bi_opf |= REQ_HIPRI;
+ bio->bi_opf |= REQ_POLLED;
if (!is_sync_kiocb(kiocb))
bio->bi_opf |= REQ_NOWAIT;
}
-#endif /* CONFIG_BLOCK */
+static inline void bio_clear_polled(struct bio *bio)
+{
+ /* can't support alloc cache if we turn off polling */
+ bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
+}
+
+struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
+
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 4bbb5f1c8b5b..c9be1657f03d 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -19,6 +19,9 @@
*
* Example:
*
+ * #include <linux/bitfield.h>
+ * #include <linux/bits.h>
+ *
* #define REG_FIELD_A GENMASK(6, 0)
* #define REG_FIELD_B BIT(7)
* #define REG_FIELD_C GENMASK(15, 8)
@@ -41,6 +44,22 @@
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+#define __scalar_type_to_unsigned_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (unsigned type)0
+
+#define __unsigned_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (unsigned char)0, \
+ __scalar_type_to_unsigned_cases(char), \
+ __scalar_type_to_unsigned_cases(short), \
+ __scalar_type_to_unsigned_cases(int), \
+ __scalar_type_to_unsigned_cases(long), \
+ __scalar_type_to_unsigned_cases(long long), \
+ default: (x)))
+
+#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
+
#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
@@ -49,13 +68,27 @@
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
_pfx "value too large for the field"); \
- BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
+ BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
+ __bf_cast_unsigned(_reg, ~0ull), \
_pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \
})
/**
+ * FIELD_MAX() - produce the maximum value representable by a field
+ * @_mask: shifted mask defining the field's length and position
+ *
+ * FIELD_MAX() returns the maximum value that can be held in the field
+ * specified by @_mask.
+ */
+#define FIELD_MAX(_mask) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \
+ (typeof(_mask))((_mask) >> __bf_shf(_mask)); \
+ })
+
+/**
* FIELD_FIT() - check if value fits in the field
* @_mask: shifted mask defining the field's length and position
* @_val: value to test against the field
@@ -64,7 +97,7 @@
*/
#define FIELD_FIT(_mask, _val) \
({ \
- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
})
@@ -110,6 +143,7 @@ static __always_inline u64 field_mask(u64 field)
{
return field / field_multiplier(field);
}
+#define field_max(field) ((typeof(field))field_mask(field))
#define ____MAKE_OP(type,base,to,from) \
static __always_inline __##type type##_encode_bits(base v, base field) \
{ \
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index e52ceb1a73d3..7d6d73b78147 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -4,10 +4,14 @@
#ifndef __ASSEMBLY__
-#include <linux/types.h>
+#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/find.h>
+#include <linux/limits.h>
#include <linux/string.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct device;
/*
* bitmaps provide bit arrays that consume one or more unsigned
@@ -47,10 +51,11 @@
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
- * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
@@ -67,7 +72,9 @@
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
+ * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
+ * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_get_value8(map, start) Get 8bit value from map at start
* bitmap_set_value8(map, value, start) Set 8bit value to map at start
*
@@ -112,56 +119,63 @@
* Allocation and deallocation of bitmap.
* Provided in lib/bitmap.c to avoid circular dependency.
*/
-extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
-extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
-extern void bitmap_free(const unsigned long *bitmap);
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node);
+unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node);
+void bitmap_free(const unsigned long *bitmap);
+
+/* Managed variants of the above. */
+unsigned long *devm_bitmap_alloc(struct device *dev,
+ unsigned int nbits, gfp_t flags);
+unsigned long *devm_bitmap_zalloc(struct device *dev,
+ unsigned int nbits, gfp_t flags);
/*
* lib/bitmap.c provides these functions:
*/
-extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
-extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
-extern int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern bool __pure __bitmap_or_equal(const unsigned long *src1,
- const unsigned long *src2,
- const unsigned long *src3,
- unsigned int nbits);
-extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits);
-extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits);
-extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits);
-extern void bitmap_cut(unsigned long *dst, const unsigned long *src,
- unsigned int first, unsigned int cut,
- unsigned int nbits);
-extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_replace(unsigned long *dst,
- const unsigned long *old, const unsigned long *new,
- const unsigned long *mask, unsigned int nbits);
-extern int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
-extern void __bitmap_set(unsigned long *map, unsigned int start, int len);
-extern void __bitmap_clear(unsigned long *map, unsigned int start, int len);
-
-extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask,
- unsigned long align_offset);
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __pure __bitmap_or_equal(const unsigned long *src1,
+ const unsigned long *src2,
+ const unsigned long *src3,
+ unsigned int nbits);
+void __bitmap_complement(unsigned long *dst, const unsigned long *src,
+ unsigned int nbits);
+void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+void bitmap_cut(unsigned long *dst, const unsigned long *src,
+ unsigned int first, unsigned int cut, unsigned int nbits);
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_replace(unsigned long *dst,
+ const unsigned long *old, const unsigned long *new,
+ const unsigned long *mask, unsigned int nbits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
+unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_set(unsigned long *map, unsigned int start, int len);
+void __bitmap_clear(unsigned long *map, unsigned int start, int len);
+
+unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset);
/**
* bitmap_find_next_zero_area - find a contiguous aligned zero area
@@ -186,63 +200,72 @@ bitmap_find_next_zero_area(unsigned long *map,
align_mask, 0);
}
-extern int bitmap_parse(const char *buf, unsigned int buflen,
+int bitmap_parse(const char *buf, unsigned int buflen,
unsigned long *dst, int nbits);
-extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
+int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
-extern int bitmap_parselist(const char *buf, unsigned long *maskp,
+int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
-extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
+int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
-extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
+void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
-extern int bitmap_bitremap(int oldbit,
+int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
-extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
-extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
-extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
-extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
-extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
#ifdef __BIG_ENDIAN
-extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
+void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
#define bitmap_copy_le bitmap_copy
#endif
-extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
-extern int bitmap_print_to_pagebuf(bool list, char *buf,
+int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
+extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+
+extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
-/*
- * The static inlines below do not handle constant nbits==0 correctly,
- * so make such users (should any ever turn up) call the out-of-line
- * versions.
- */
-#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
-
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
+
+ if (small_const_nbits(nbits))
+ *dst = 0;
+ else
+ memset(dst, 0, len);
}
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0xff, len);
+
+ if (small_const_nbits(nbits))
+ *dst = ~0UL;
+ else
+ memset(dst, 0xff, len);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memcpy(dst, src, len);
+
+ if (small_const_nbits(nbits))
+ *dst = *src;
+ else
+ memcpy(dst, src, len);
}
/*
@@ -257,13 +280,17 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst,
}
/*
- * On 32-bit systems bitmaps are represented as u32 arrays internally, and
- * therefore conversion is not needed when copying data from/to arrays of u32.
+ * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead
+ * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit
+ * architectures are not using bitmap_copy_clear_tail().
*/
#if BITS_PER_LONG == 64
-extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
unsigned int nbits);
-extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
+void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
unsigned int nbits);
#else
#define bitmap_from_arr32(bitmap, buf, nbits) \
@@ -274,7 +301,23 @@ extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
(const unsigned long *) (bitmap), (nbits))
#endif
-static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+/*
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u64.
+ */
+#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
+#else
+#define bitmap_from_arr64(bitmap, buf, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits))
+#define bitmap_to_arr64(buf, bitmap, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
+#endif
+
+static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -300,7 +343,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -324,8 +367,8 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -355,8 +398,9 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -364,8 +408,8 @@ static inline int bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline int bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_subset(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -373,7 +417,7 @@ static inline int bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
-static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -381,7 +425,7 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -389,18 +433,30 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline
+unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
+static __always_inline
+unsigned long bitmap_weight_and(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_and(src1, src2, nbits);
+}
+
static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map |= GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
@@ -415,6 +471,8 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map &= ~GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
@@ -454,14 +512,6 @@ static inline void bitmap_replace(unsigned long *dst,
__bitmap_replace(dst, old, new, mask, nbits);
}
-static inline void bitmap_next_clear_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
-{
- *rs = find_next_zero_bit(bitmap, end, *rs);
- *re = find_next_bit(bitmap, end, *rs + 1);
-}
-
static inline void bitmap_next_set_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
@@ -470,25 +520,6 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
-/*
- * Bitmap region iterators. Iterates over the bitmap between [@start, @end).
- * @rs and @re should be integer variables and will be set to start and end
- * index of the current clear or set region.
- */
-#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), \
- bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, \
- bitmap_next_clear_region((bitmap), &(rs), &(re), (end)))
-
-#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), \
- bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, \
- bitmap_next_set_region((bitmap), &(rs), &(re), (end)))
-
/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value
@@ -534,10 +565,7 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
*/
static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
- dst[0] = mask & ULONG_MAX;
-
- if (sizeof(mask) > sizeof(unsigned long))
- dst[1] = mask >> 32;
+ bitmap_from_arr64(dst, &mask, 64);
}
/**
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 47f54b459c26..2ba557e067fe 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -1,8 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
+
#include <asm/types.h>
#include <linux/bits.h>
+#include <linux/typecheck.h>
+
+#include <uapi/linux/kernel.h>
/* Set bits in the first 'n' bytes when loaded from memory */
#ifdef __LITTLE_ENDIAN
@@ -12,10 +16,10 @@
#endif
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
-#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
-#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
-#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
+#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
+#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
+#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
@@ -23,44 +27,61 @@ extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
+ * Defined here because those may be needed by architecture-specific static
+ * inlines.
+ */
+
+#include <asm-generic/bitops/generic-non-atomic.h>
+
+/*
+ * Many architecture-specific non-atomic bitops contain inline asm code and due
+ * to that the compiler can't optimize them to compile-time expressions or
+ * constants. In contrary, generic_*() helpers are defined in pure C and
+ * compilers optimize them just well.
+ * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
+ * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
+ * the arguments can be resolved at compile time. That expression itself is a
+ * constant and doesn't bring any functional changes to the rest of cases.
+ * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
+ * passing a bitmap from .bss or .data (-> `!!addr` is always true).
+ */
+#define bitop(op, nr, addr) \
+ ((__builtin_constant_p(nr) && \
+ __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
+ (uintptr_t)(addr) != (uintptr_t)NULL && \
+ __builtin_constant_p(*(const unsigned long *)(addr))) ? \
+ const##op(nr, addr) : op(nr, addr))
+
+#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
+#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
+#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
+#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
+#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
+#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
+#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
+#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
+
+/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+/* Check that the bitops prototypes are sane */
+#define __check_bitop_pr(name) \
+ static_assert(__same_type(arch_##name, generic_##name) && \
+ __same_type(const_##name, generic_##name) && \
+ __same_type(_##name, generic_##name))
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_from(bit, addr, size) \
- for ((bit) = find_next_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+__check_bitop_pr(__set_bit);
+__check_bitop_pr(__clear_bit);
+__check_bitop_pr(__change_bit);
+__check_bitop_pr(__test_and_set_bit);
+__check_bitop_pr(__test_and_clear_bit);
+__check_bitop_pr(__test_and_change_bit);
+__check_bitop_pr(test_bit);
-#define for_each_clear_bit(bit, addr, size) \
- for ((bit) = find_first_zero_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-
-/* same as for_each_clear_bit() but use bit as value to start with */
-#define for_each_clear_bit_from(bit, addr, size) \
- for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-
-/**
- * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
- * @start: bit offset to start search and to store the current iteration offset
- * @clump: location to store copy of current 8-bit clump
- * @bits: bitmap address to base the search on
- * @size: bitmap size in number of bits
- */
-#define for_each_set_clump8(start, clump, bits, size) \
- for ((start) = find_first_clump8(&(clump), (bits), (size)); \
- (start) < (size); \
- (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+#undef __check_bitop_pr
static inline int get_bitmask_order(unsigned int count)
{
@@ -72,7 +93,7 @@ static inline int get_bitmask_order(unsigned int count)
static __always_inline unsigned long hweight_long(unsigned long w)
{
- return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+ return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
}
/**
@@ -162,7 +183,7 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
*
* This is safe to use for 16- and 8-bit types as well.
*/
-static inline __s32 sign_extend32(__u32 value, int index)
+static __always_inline __s32 sign_extend32(__u32 value, int index)
{
__u8 shift = 31 - index;
return (__s32)(value << shift) >> shift;
@@ -173,7 +194,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
-static inline __s64 sign_extend64(__u64 value, int index)
+static __always_inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
@@ -188,12 +209,10 @@ static inline unsigned fls_long(unsigned long l)
static inline int get_count_order(unsigned int count)
{
- int order;
+ if (count == 0)
+ return -1;
- order = fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
+ return fls(--count);
}
/**
@@ -206,17 +225,14 @@ static inline int get_count_order_long(unsigned long l)
{
if (l == 0UL)
return -1;
- else if (l & (l - 1UL))
- return (int)fls_long(l);
- else
- return (int)fls_long(l) - 1;
+ return (int)fls_long(--l);
}
/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
- * On 64 bit arches this is a synomyn for __ffs
+ * On 64 bit arches this is a synonym for __ffs
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
@@ -232,6 +248,25 @@ static inline unsigned long __ffs64(u64 word)
}
/**
+ * fns - find N'th set bit in a word
+ * @word: The word to search
+ * @n: Bit to find
+ */
+static inline unsigned long fns(unsigned long word, unsigned int n)
+{
+ unsigned int bit;
+
+ while (word) {
+ bit = __ffs(word);
+ if (n-- == 0)
+ return bit;
+ __clear_bit(bit, &word);
+ }
+
+ return BITS_PER_LONG;
+}
+
+/**
* assign_bit - Assign value to a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
@@ -255,6 +290,55 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
__clear_bit(nr, addr);
}
+/**
+ * __ptr_set_bit - Set bit in a pointer's value
+ * @nr: the bit to set
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_set_bit(bit, &p);
+ */
+#define __ptr_set_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __set_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_clear_bit - Clear bit in a pointer's value
+ * @nr: the bit to clear
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_clear_bit(bit, &p);
+ */
+#define __ptr_clear_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __clear_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_test_bit - Test bit in a pointer's value
+ * @nr: the bit to test
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * if (__ptr_test_bit(bit, &p)) {
+ * ...
+ * } else {
+ * ...
+ * }
+ */
+#define __ptr_test_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ test_bit(nr, (unsigned long *)(addr)); \
+ })
+
#ifdef __KERNEL__
#ifndef set_mask_bits
@@ -263,10 +347,10 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old__ = READ_ONCE(*(ptr)); \
new__ = (old__ & ~mask__) | bits__; \
- } while (cmpxchg(ptr, old__, new__) != old__); \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
old__; \
})
@@ -278,27 +362,16 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old__ = READ_ONCE(*(ptr)); \
+ if (old__ & test__) \
+ break; \
new__ = old__ & ~clear__; \
- } while (!(old__ & test__) && \
- cmpxchg(ptr, old__, new__) != old__); \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
!(old__ & test__); \
})
#endif
-#ifndef find_last_bit
-/**
- * find_last_bit - find the last set bit in a memory region
- * @addr: The address to start the search at
- * @size: The number of bits to search
- *
- * Returns the bit number of the last set bit, or size.
- */
-extern unsigned long find_last_bit(const unsigned long *addr,
- unsigned long size);
-#endif
-
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 669d69441a62..87d112650dfb 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -3,9 +3,9 @@
#define __LINUX_BITS_H
#include <linux/const.h>
+#include <vdso/bits.h>
#include <asm/bitsperlong.h>
-#define BIT(nr) (UL(1) << (nr))
#define BIT_ULL(nr) (ULL(1) << (nr))
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
@@ -18,12 +18,29 @@
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
-#define GENMASK(h, l) \
+#if !defined(__ASSEMBLY__)
+#include <linux/build_bug.h>
+#define GENMASK_INPUT_CHECK(h, l) \
+ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+ __is_constexpr((l) > (h)), (l) > (h), 0)))
+#else
+/*
+ * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
+ * disable the input check if that is the case.
+ */
+#define GENMASK_INPUT_CHECK(h, l) 0
+#endif
+
+#define __GENMASK(h, l) \
(((~UL(0)) - (UL(1) << (l)) + 1) & \
(~UL(0) >> (BITS_PER_LONG - 1 - (h))))
+#define GENMASK(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-#define GENMASK_ULL(h, l) \
+#define __GENMASK_ULL(h, l) \
(((~ULL(0)) - (ULL(1) << (l)) + 1) & \
(~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define GENMASK_ULL(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index e4a6949fd171..dd5841a42c33 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -14,737 +14,38 @@
* Nauman Rafique <nauman@google.com>
*/
-#include <linux/cgroup.h>
-#include <linux/percpu.h>
-#include <linux/percpu_counter.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/seq_file.h>
-#include <linux/radix-tree.h>
-#include <linux/blkdev.h>
-#include <linux/atomic.h>
-#include <linux/kthread.h>
-#include <linux/fs.h>
+#include <linux/types.h>
-/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
-#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
+struct bio;
+struct cgroup_subsys_state;
+struct gendisk;
-/* Max limits for throttle policy */
-#define THROTL_IOPS_MAX UINT_MAX
+#define FC_APPID_LEN 129
#ifdef CONFIG_BLK_CGROUP
-
-enum blkg_iostat_type {
- BLKG_IOSTAT_READ,
- BLKG_IOSTAT_WRITE,
- BLKG_IOSTAT_DISCARD,
-
- BLKG_IOSTAT_NR,
-};
-
-struct blkcg_gq;
-
-struct blkcg {
- struct cgroup_subsys_state css;
- spinlock_t lock;
-
- struct radix_tree_root blkg_tree;
- struct blkcg_gq __rcu *blkg_hint;
- struct hlist_head blkg_list;
-
- struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
-
- struct list_head all_blkcgs_node;
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct list_head cgwb_list;
- refcount_t cgwb_refcnt;
-#endif
-};
-
-struct blkg_iostat {
- u64 bytes[BLKG_IOSTAT_NR];
- u64 ios[BLKG_IOSTAT_NR];
-};
-
-struct blkg_iostat_set {
- struct u64_stats_sync sync;
- struct blkg_iostat cur;
- struct blkg_iostat last;
-};
-
-/*
- * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
- * request_queue (q). This is used by blkcg policies which need to track
- * information per blkcg - q pair.
- *
- * There can be multiple active blkcg policies and each blkg:policy pair is
- * represented by a blkg_policy_data which is allocated and freed by each
- * policy's pd_alloc/free_fn() methods. A policy can allocate private data
- * area by allocating larger data structure which embeds blkg_policy_data
- * at the beginning.
- */
-struct blkg_policy_data {
- /* the blkg and policy id this per-policy data belongs to */
- struct blkcg_gq *blkg;
- int plid;
-};
-
-/*
- * Policies that need to keep per-blkcg data which is independent from any
- * request_queue associated to it should implement cpd_alloc/free_fn()
- * methods. A policy can allocate private data area by allocating larger
- * data structure which embeds blkcg_policy_data at the beginning.
- * cpd_init() is invoked to let each policy handle per-blkcg data.
- */
-struct blkcg_policy_data {
- /* the blkcg and policy id this per-policy data belongs to */
- struct blkcg *blkcg;
- int plid;
-};
-
-/* association between a blk cgroup and a request queue */
-struct blkcg_gq {
- /* Pointer to the associated request_queue */
- struct request_queue *q;
- struct list_head q_node;
- struct hlist_node blkcg_node;
- struct blkcg *blkcg;
-
- /*
- * Each blkg gets congested separately and the congestion state is
- * propagated to the matching bdi_writeback_congested.
- */
- struct bdi_writeback_congested *wb_congested;
-
- /* all non-root blkcg_gq's are guaranteed to have access to parent */
- struct blkcg_gq *parent;
-
- /* reference count */
- struct percpu_ref refcnt;
-
- /* is this blkg online? protected by both blkcg and q locks */
- bool online;
-
- struct blkg_iostat_set __percpu *iostat_cpu;
- struct blkg_iostat_set iostat;
-
- struct blkg_policy_data *pd[BLKCG_MAX_POLS];
-
- spinlock_t async_bio_lock;
- struct bio_list async_bios;
- struct work_struct async_bio_work;
-
- atomic_t use_delay;
- atomic64_t delay_nsec;
- atomic64_t delay_start;
- u64 last_delay;
- int last_use;
-
- struct rcu_head rcu_head;
-};
-
-typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
-typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
- struct request_queue *q, struct blkcg *blkcg);
-typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
-typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
- size_t size);
-
-struct blkcg_policy {
- int plid;
- /* cgroup files for the policy */
- struct cftype *dfl_cftypes;
- struct cftype *legacy_cftypes;
-
- /* operations */
- blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
- blkcg_pol_init_cpd_fn *cpd_init_fn;
- blkcg_pol_free_cpd_fn *cpd_free_fn;
- blkcg_pol_bind_cpd_fn *cpd_bind_fn;
-
- blkcg_pol_alloc_pd_fn *pd_alloc_fn;
- blkcg_pol_init_pd_fn *pd_init_fn;
- blkcg_pol_online_pd_fn *pd_online_fn;
- blkcg_pol_offline_pd_fn *pd_offline_fn;
- blkcg_pol_free_pd_fn *pd_free_fn;
- blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
- blkcg_pol_stat_pd_fn *pd_stat_fn;
-};
-
-extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
-extern bool blkcg_debug_stats;
-
-struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
- struct request_queue *q, bool update_hint);
-struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q);
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q);
-int blkcg_init_queue(struct request_queue *q);
-void blkcg_exit_queue(struct request_queue *q);
-
-/* Blkio controller policy registration */
-int blkcg_policy_register(struct blkcg_policy *pol);
-void blkcg_policy_unregister(struct blkcg_policy *pol);
-int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-
-const char *blkg_dev_name(struct blkcg_gq *blkg);
-void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
- u64 (*prfill)(struct seq_file *,
- struct blkg_policy_data *, int),
- const struct blkcg_policy *pol, int data,
- bool show_total);
-u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
-
-struct blkg_conf_ctx {
- struct gendisk *disk;
- struct blkcg_gq *blkg;
- char *body;
-};
-
-struct gendisk *blkcg_conf_get_disk(char **inputp);
-int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- char *input, struct blkg_conf_ctx *ctx);
-void blkg_conf_finish(struct blkg_conf_ctx *ctx);
-
-/**
- * blkcg_css - find the current css
- *
- * Find the css associated with either the kthread or the current task.
- * This may return a dying css, so it is up to the caller to use tryget logic
- * to confirm it is alive and well.
- */
-static inline struct cgroup_subsys_state *blkcg_css(void)
-{
- struct cgroup_subsys_state *css;
-
- css = kthread_blkcg();
- if (css)
- return css;
- return task_css(current, io_cgrp_id);
-}
-
-static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct blkcg, css) : NULL;
-}
-
-/**
- * __bio_blkcg - internal, inconsistent version to get blkcg
- *
- * DO NOT USE.
- * This function is inconsistent and consequently is dangerous to use. The
- * first part of the function returns a blkcg where a reference is owned by the
- * bio. This means it does not need to be rcu protected as it cannot go away
- * with the bio owning a reference to it. However, the latter potentially gets
- * it from task_css(). This can race against task migration and the cgroup
- * dying. It is also semantically different as it must be called rcu protected
- * and is susceptible to failure when trying to get a reference to it.
- * Therefore, it is not ok to assume that *_get() will always succeed on the
- * blkcg returned here.
- */
-static inline struct blkcg *__bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_blkg)
- return bio->bi_blkg->blkcg;
- return css_to_blkcg(blkcg_css());
-}
-
-/**
- * bio_blkcg - grab the blkcg associated with a bio
- * @bio: target bio
- *
- * This returns the blkcg associated with a bio, %NULL if not associated.
- * Callers are expected to either handle %NULL or know association has been
- * done prior to calling this.
- */
-static inline struct blkcg *bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_blkg)
- return bio->bi_blkg->blkcg;
- return NULL;
-}
-
-static inline bool blk_cgroup_congested(void)
-{
- struct cgroup_subsys_state *css;
- bool ret = false;
-
- rcu_read_lock();
- css = kthread_blkcg();
- if (!css)
- css = task_css(current, io_cgrp_id);
- while (css) {
- if (atomic_read(&css->cgroup->congestion_count)) {
- ret = true;
- break;
- }
- css = css->parent;
- }
- rcu_read_unlock();
- return ret;
-}
-
-/**
- * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
- * @return: true if this bio needs to be submitted with the root blkg context.
- *
- * In order to avoid priority inversions we sometimes need to issue a bio as if
- * it were attached to the root blkg, and then backcharge to the actual owning
- * blkg. The idea is we do bio_blkcg() to look up the actual context for the
- * bio and attach the appropriate blkg to the bio. Then we call this helper and
- * if it is true run with the root blkg for that queue and then do any
- * backcharging to the originating cgroup once the io is complete.
- */
-static inline bool bio_issue_as_root_blkg(struct bio *bio)
-{
- return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
-}
-
-/**
- * blkcg_parent - get the parent of a blkcg
- * @blkcg: blkcg of interest
- *
- * Return the parent blkcg of @blkcg. Can be called anytime.
- */
-static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
-{
- return css_to_blkcg(blkcg->css.parent);
-}
-
-/**
- * __blkg_lookup - internal version of blkg_lookup()
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- * @update_hint: whether to update lookup hint with the result or not
- *
- * This is internal version and shouldn't be used by policy
- * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
- * @q's bypass state. If @update_hint is %true, the caller should be
- * holding @q->queue_lock and lookup hint is updated on success.
- */
-static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q,
- bool update_hint)
-{
- struct blkcg_gq *blkg;
-
- if (blkcg == &blkcg_root)
- return q->root_blkg;
-
- blkg = rcu_dereference(blkcg->blkg_hint);
- if (blkg && blkg->q == q)
- return blkg;
-
- return blkg_lookup_slowpath(blkcg, q, update_hint);
-}
-
-/**
- * blkg_lookup - lookup blkg for the specified blkcg - q pair
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- *
- * Lookup blkg for the @blkcg - @q pair. This function should be called
- * under RCU read lock.
- */
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return __blkg_lookup(blkcg, q, false);
-}
-
-/**
- * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
- * @q: request_queue of interest
- *
- * Lookup blkg for @q at the root level. See also blkg_lookup().
- */
-static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
-{
- return q->root_blkg;
-}
-
-/**
- * blkg_to_pdata - get policy private data
- * @blkg: blkg of interest
- * @pol: policy of interest
- *
- * Return pointer to private data associated with the @blkg-@pol pair.
- */
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol)
-{
- return blkg ? blkg->pd[pol->plid] : NULL;
-}
-
-static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
- struct blkcg_policy *pol)
-{
- return blkcg ? blkcg->cpd[pol->plid] : NULL;
-}
-
-/**
- * pdata_to_blkg - get blkg associated with policy private data
- * @pd: policy private data of interest
- *
- * @pd is policy private data. Determine the blkg it's associated with.
- */
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
-{
- return pd ? pd->blkg : NULL;
-}
-
-static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
-{
- return cpd ? cpd->blkcg : NULL;
-}
-extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
-
-#ifdef CONFIG_CGROUP_WRITEBACK
-
-/**
- * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
- * @blkcg: blkcg of interest
- *
- * This is used to track the number of active wb's related to a blkcg.
- */
-static inline void blkcg_cgwb_get(struct blkcg *blkcg)
-{
- refcount_inc(&blkcg->cgwb_refcnt);
-}
-
-/**
- * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
- * @blkcg: blkcg of interest
- *
- * This is used to track the number of active wb's related to a blkcg.
- * When this count goes to zero, all active wb has finished so the
- * blkcg can continue destruction by calling blkcg_destroy_blkgs().
- * This work may occur in cgwb_release_workfn() on the cgwb_release
- * workqueue.
- */
-static inline void blkcg_cgwb_put(struct blkcg *blkcg)
-{
- if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
- blkcg_destroy_blkgs(blkcg);
-}
-
-#else
-
-static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
-
-static inline void blkcg_cgwb_put(struct blkcg *blkcg)
-{
- /* wb isn't being accounted, so trigger destruction right away */
- blkcg_destroy_blkgs(blkcg);
-}
-
-#endif
-
-/**
- * blkg_path - format cgroup path of blkg
- * @blkg: blkg of interest
- * @buf: target buffer
- * @buflen: target buffer length
- *
- * Format the path of the cgroup of @blkg into @buf.
- */
-static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
-{
- return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
-}
-
-/**
- * blkg_get - get a blkg reference
- * @blkg: blkg to get
- *
- * The caller should be holding an existing reference.
- */
-static inline void blkg_get(struct blkcg_gq *blkg)
-{
- percpu_ref_get(&blkg->refcnt);
-}
-
-/**
- * blkg_tryget - try and get a blkg reference
- * @blkg: blkg to get
- *
- * This is for use when doing an RCU lookup of the blkg. We may be in the midst
- * of freeing this blkg, so we can only use it if the refcnt is not zero.
- */
-static inline bool blkg_tryget(struct blkcg_gq *blkg)
-{
- return blkg && percpu_ref_tryget(&blkg->refcnt);
-}
-
-/**
- * blkg_tryget_closest - try and get a blkg ref on the closet blkg
- * @blkg: blkg to get
- *
- * This needs to be called rcu protected. As the failure mode here is to walk
- * up the blkg tree, this ensure that the blkg->parent pointers are always
- * valid. This returns the blkg that it ended up taking a reference on or %NULL
- * if no reference was taken.
- */
-static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
-{
- struct blkcg_gq *ret_blkg = NULL;
-
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- while (blkg) {
- if (blkg_tryget(blkg)) {
- ret_blkg = blkg;
- break;
- }
- blkg = blkg->parent;
- }
-
- return ret_blkg;
-}
-
-/**
- * blkg_put - put a blkg reference
- * @blkg: blkg to put
- */
-static inline void blkg_put(struct blkcg_gq *blkg)
-{
- percpu_ref_put(&blkg->refcnt);
-}
-
-/**
- * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
- * read locked. If called under either blkcg or queue lock, the iteration
- * is guaranteed to include all and only online blkgs. The caller may
- * update @pos_css by calling css_rightmost_descendant() to skip subtree.
- * @p_blkg is included in the iteration and the first node to be visited.
- */
-#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-/**
- * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Similar to blkg_for_each_descendant_pre() but performs post-order
- * traversal instead. Synchronization rules are the same. @p_blkg is
- * included in the iteration and the last node to be visited.
- */
-#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-#ifdef CONFIG_BLK_DEV_THROTTLING
-extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
- struct bio *bio);
-#else
-static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
- struct bio *bio) { return false; }
-#endif
-
-bool __blkcg_punt_bio_submit(struct bio *bio);
-
-static inline bool blkcg_punt_bio_submit(struct bio *bio)
-{
- if (bio->bi_opf & REQ_CGROUP_PUNT)
- return __blkcg_punt_bio_submit(bio);
- else
- return false;
-}
-
-static inline void blkcg_bio_issue_init(struct bio *bio)
-{
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-}
-
-static inline bool blkcg_bio_issue_check(struct request_queue *q,
- struct bio *bio)
-{
- struct blkcg_gq *blkg;
- bool throtl = false;
-
- rcu_read_lock();
-
- if (!bio->bi_blkg) {
- char b[BDEVNAME_SIZE];
-
- WARN_ONCE(1,
- "no blkg associated for bio on block-device: %s\n",
- bio_devname(bio, b));
- bio_associate_blkg(bio);
- }
-
- blkg = bio->bi_blkg;
-
- throtl = blk_throtl_bio(q, blkg, bio);
-
- if (!throtl) {
- struct blkg_iostat_set *bis;
- int rwd, cpu;
-
- if (op_is_discard(bio->bi_opf))
- rwd = BLKG_IOSTAT_DISCARD;
- else if (op_is_write(bio->bi_opf))
- rwd = BLKG_IOSTAT_WRITE;
- else
- rwd = BLKG_IOSTAT_READ;
-
- cpu = get_cpu();
- bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
- u64_stats_update_begin(&bis->sync);
-
- /*
- * If the bio is flagged with BIO_QUEUE_ENTERED it means this
- * is a split bio and we would have already accounted for the
- * size of the bio.
- */
- if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
- bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
- bis->cur.ios[rwd]++;
-
- u64_stats_update_end(&bis->sync);
- if (cgroup_subsys_on_dfl(io_cgrp_subsys))
- cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
- put_cpu();
- }
-
- blkcg_bio_issue_init(bio);
-
- rcu_read_unlock();
- return !throtl;
-}
-
-static inline void blkcg_use_delay(struct blkcg_gq *blkg)
-{
- if (atomic_add_return(1, &blkg->use_delay) == 1)
- atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
-}
-
-static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
-{
- int old = atomic_read(&blkg->use_delay);
-
- if (old == 0)
- return 0;
-
- /*
- * We do this song and dance because we can race with somebody else
- * adding or removing delay. If we just did an atomic_dec we'd end up
- * negative and we'd already be in trouble. We need to subtract 1 and
- * then check to see if we were the last delay so we can drop the
- * congestion count on the cgroup.
- */
- while (old) {
- int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
- if (cur == old)
- break;
- old = cur;
- }
-
- if (old == 0)
- return 0;
- if (old == 1)
- atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
- return 1;
-}
-
-static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
-{
- int old = atomic_read(&blkg->use_delay);
- if (!old)
- return;
- /* We only want 1 person clearing the congestion count for this blkg. */
- while (old) {
- int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
- if (cur == old) {
- atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
- break;
- }
- old = cur;
- }
-}
-
-void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
-void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
+void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
-#else /* CONFIG_BLK_CGROUP */
-
-struct blkcg {
-};
-
-struct blkg_policy_data {
-};
-
-struct blkcg_policy_data {
-};
-
-struct blkcg_gq {
-};
+bool blk_cgroup_congested(void);
+void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
+void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css);
+struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
-struct blkcg_policy {
-};
+#else /* CONFIG_BLK_CGROUP */
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
static inline void blkcg_maybe_throttle_current(void) { }
static inline bool blk_cgroup_congested(void) { return false; }
+static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
+{
+ return NULL;
+}
+#endif /* CONFIG_BLK_CGROUP */
-#ifdef CONFIG_BLOCK
-
-static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
-
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
-static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
-{ return NULL; }
-static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
-static inline void blkcg_exit_queue(struct request_queue *q) { }
-static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
-static inline int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { }
-
-static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
-static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
-
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol) { return NULL; }
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
-static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
-static inline void blkg_get(struct blkcg_gq *blkg) { }
-static inline void blkg_put(struct blkcg_gq *blkg) { }
-
-static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
-static inline void blkcg_bio_issue_init(struct bio *bio) { }
-static inline bool blkcg_bio_issue_check(struct request_queue *q,
- struct bio *bio) { return true; }
-
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len);
+char *blkcg_get_fc_appid(struct bio *bio);
-#endif /* CONFIG_BLOCK */
-#endif /* CONFIG_BLK_CGROUP */
#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
new file mode 100644
index 000000000000..bbab65bd5428
--- /dev/null
+++ b/include/linux/blk-crypto-profile.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_BLK_CRYPTO_PROFILE_H
+#define __LINUX_BLK_CRYPTO_PROFILE_H
+
+#include <linux/bio.h>
+#include <linux/blk-crypto.h>
+
+struct blk_crypto_profile;
+
+/**
+ * struct blk_crypto_ll_ops - functions to control inline encryption hardware
+ *
+ * Low-level operations for controlling inline encryption hardware. This
+ * interface must be implemented by storage drivers that support inline
+ * encryption. All functions may sleep, are serialized by profile->lock, and
+ * are never called while profile->dev (if set) is runtime-suspended.
+ */
+struct blk_crypto_ll_ops {
+
+ /**
+ * @keyslot_program: Program a key into the inline encryption hardware.
+ *
+ * Program @key into the specified @slot in the inline encryption
+ * hardware, overwriting any key that the keyslot may already contain.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * This is required if the device has keyslots. Otherwise (i.e. if the
+ * device is a layered device, or if the device is real hardware that
+ * simply doesn't have the concept of keyslots) it is never called.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_program)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+
+ /**
+ * @keyslot_evict: Evict a key from the inline encryption hardware.
+ *
+ * If the device has keyslots, this function must evict the key from the
+ * specified @slot. The slot will contain @key, but there should be no
+ * need for the @key argument to be used as @slot should be sufficient.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * If the device doesn't have keyslots itself, this function must evict
+ * @key from any underlying devices. @slot won't be valid in this case.
+ *
+ * If there are no keyslots and no underlying devices, this function
+ * isn't required.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_evict)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+};
+
+/**
+ * struct blk_crypto_profile - inline encryption profile for a device
+ *
+ * This struct contains a storage device's inline encryption capabilities (e.g.
+ * the supported crypto algorithms), driver-provided functions to control the
+ * inline encryption hardware (e.g. programming and evicting keys), and optional
+ * device-independent keyslot management data.
+ */
+struct blk_crypto_profile {
+
+ /* public: Drivers must initialize the following fields. */
+
+ /**
+ * @ll_ops: Driver-provided functions to control the inline encryption
+ * hardware, e.g. program and evict keys.
+ */
+ struct blk_crypto_ll_ops ll_ops;
+
+ /**
+ * @max_dun_bytes_supported: The maximum number of bytes supported for
+ * specifying the data unit number (DUN). Specifically, the range of
+ * supported DUNs is 0 through (1 << (8 * max_dun_bytes_supported)) - 1.
+ */
+ unsigned int max_dun_bytes_supported;
+
+ /**
+ * @modes_supported: Array of bitmasks that specifies whether each
+ * combination of crypto mode and data unit size is supported.
+ * Specifically, the i'th bit of modes_supported[crypto_mode] is set if
+ * crypto_mode can be used with a data unit size of (1 << i). Note that
+ * only data unit sizes that are powers of 2 can be supported.
+ */
+ unsigned int modes_supported[BLK_ENCRYPTION_MODE_MAX];
+
+ /**
+ * @dev: An optional device for runtime power management. If the driver
+ * provides this device, it will be runtime-resumed before any function
+ * in @ll_ops is called and will remain resumed during the call.
+ */
+ struct device *dev;
+
+ /* private: The following fields shouldn't be accessed by drivers. */
+
+ /* Number of keyslots, or 0 if not applicable */
+ unsigned int num_slots;
+
+ /*
+ * Serializes all calls to functions in @ll_ops as well as all changes
+ * to @slot_hashtable. This can also be taken in read mode to look up
+ * keyslots while ensuring that they can't be changed concurrently.
+ */
+ struct rw_semaphore lock;
+
+ /* List of idle slots, with least recently used slot at front */
+ wait_queue_head_t idle_slots_wait_queue;
+ struct list_head idle_slots;
+ spinlock_t idle_slots_lock;
+
+ /*
+ * Hash table which maps struct *blk_crypto_key to keyslots, so that we
+ * can find a key's keyslot in O(1) time rather than O(num_slots).
+ * Protected by 'lock'.
+ */
+ struct hlist_head *slot_hashtable;
+ unsigned int log_slot_ht_size;
+
+ /* Per-keyslot data */
+ struct blk_crypto_keyslot *slots;
+};
+
+int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+int devm_blk_crypto_profile_init(struct device *dev,
+ struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot);
+
+blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ struct blk_crypto_keyslot **slot_ptr);
+
+void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
+
+bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
+ const struct blk_crypto_config *cfg);
+
+int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key);
+
+void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);
+
+void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
+
+void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
+ const struct blk_crypto_profile *child);
+
+bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
+ const struct blk_crypto_profile *reference);
+
+void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
+ const struct blk_crypto_profile *src);
+
+#endif /* __LINUX_BLK_CRYPTO_PROFILE_H */
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
new file mode 100644
index 000000000000..69b24fe92cbf
--- /dev/null
+++ b/include/linux/blk-crypto.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_BLK_CRYPTO_H
+#define __LINUX_BLK_CRYPTO_H
+
+#include <linux/types.h>
+
+enum blk_crypto_mode_num {
+ BLK_ENCRYPTION_MODE_INVALID,
+ BLK_ENCRYPTION_MODE_AES_256_XTS,
+ BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
+ BLK_ENCRYPTION_MODE_ADIANTUM,
+ BLK_ENCRYPTION_MODE_MAX,
+};
+
+#define BLK_CRYPTO_MAX_KEY_SIZE 64
+/**
+ * struct blk_crypto_config - an inline encryption key's crypto configuration
+ * @crypto_mode: encryption algorithm this key is for
+ * @data_unit_size: the data unit size for all encryption/decryptions with this
+ * key. This is the size in bytes of each individual plaintext and
+ * ciphertext. This is always a power of 2. It might be e.g. the
+ * filesystem block size or the disk sector size.
+ * @dun_bytes: the maximum number of bytes of DUN used when using this key
+ */
+struct blk_crypto_config {
+ enum blk_crypto_mode_num crypto_mode;
+ unsigned int data_unit_size;
+ unsigned int dun_bytes;
+};
+
+/**
+ * struct blk_crypto_key - an inline encryption key
+ * @crypto_cfg: the crypto configuration (like crypto_mode, key size) for this
+ * key
+ * @data_unit_size_bits: log2 of data_unit_size
+ * @size: size of this key in bytes (determined by @crypto_cfg.crypto_mode)
+ * @raw: the raw bytes of this key. Only the first @size bytes are used.
+ *
+ * A blk_crypto_key is immutable once created, and many bios can reference it at
+ * the same time. It must not be freed until all bios using it have completed
+ * and it has been evicted from all devices on which it may have been used.
+ */
+struct blk_crypto_key {
+ struct blk_crypto_config crypto_cfg;
+ unsigned int data_unit_size_bits;
+ unsigned int size;
+ u8 raw[BLK_CRYPTO_MAX_KEY_SIZE];
+};
+
+#define BLK_CRYPTO_MAX_IV_SIZE 32
+#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE / sizeof(u64))
+
+/**
+ * struct bio_crypt_ctx - an inline encryption context
+ * @bc_key: the key, algorithm, and data unit size to use
+ * @bc_dun: the data unit number (starting IV) to use
+ *
+ * A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for
+ * write requests) or decrypted (for read requests) inline by the storage device
+ * or controller, or by the crypto API fallback.
+ */
+struct bio_crypt_ctx {
+ const struct blk_crypto_key *bc_key;
+ u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
+};
+
+#include <linux/blk_types.h>
+#include <linux/blkdev.h>
+
+struct request;
+struct request_queue;
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+
+static inline bool bio_has_crypt_ctx(struct bio *bio)
+{
+ return bio->bi_crypt_context;
+}
+
+void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
+ const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
+ gfp_t gfp_mask);
+
+bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
+ unsigned int bytes,
+ const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]);
+
+int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+ enum blk_crypto_mode_num crypto_mode,
+ unsigned int dun_bytes,
+ unsigned int data_unit_size);
+
+int blk_crypto_start_using_key(const struct blk_crypto_key *key,
+ struct request_queue *q);
+
+int blk_crypto_evict_key(struct request_queue *q,
+ const struct blk_crypto_key *key);
+
+bool blk_crypto_config_supported(struct request_queue *q,
+ const struct blk_crypto_config *cfg);
+
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+static inline bool bio_has_crypt_ctx(struct bio *bio)
+{
+ return false;
+}
+
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
+/**
+ * bio_crypt_clone - clone bio encryption context
+ * @dst: destination bio
+ * @src: source bio
+ * @gfp_mask: memory allocation flags
+ *
+ * If @src has an encryption context, clone it to @dst.
+ *
+ * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
+ * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
+ */
+static inline int bio_crypt_clone(struct bio *dst, struct bio *src,
+ gfp_t gfp_mask)
+{
+ if (bio_has_crypt_ctx(src))
+ return __bio_crypt_clone(dst, src, gfp_mask);
+ return 0;
+}
+
+#endif /* __LINUX_BLK_CRYPTO_H */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
new file mode 100644
index 000000000000..378b2459efe2
--- /dev/null
+++ b/include/linux/blk-integrity.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BLK_INTEGRITY_H
+#define _LINUX_BLK_INTEGRITY_H
+
+#include <linux/blk-mq.h>
+
+struct request;
+
+enum blk_integrity_flags {
+ BLK_INTEGRITY_VERIFY = 1 << 0,
+ BLK_INTEGRITY_GENERATE = 1 << 1,
+ BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
+ BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
+};
+
+struct blk_integrity_iter {
+ void *prot_buf;
+ void *data_buf;
+ sector_t seed;
+ unsigned int data_size;
+ unsigned short interval;
+ unsigned char tuple_size;
+ const char *disk_name;
+};
+
+typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
+typedef void (integrity_prepare_fn) (struct request *);
+typedef void (integrity_complete_fn) (struct request *, unsigned int);
+
+struct blk_integrity_profile {
+ integrity_processing_fn *generate_fn;
+ integrity_processing_fn *verify_fn;
+ integrity_prepare_fn *prepare_fn;
+ integrity_complete_fn *complete_fn;
+ const char *name;
+};
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+void blk_integrity_register(struct gendisk *, struct blk_integrity *);
+void blk_integrity_unregister(struct gendisk *);
+int blk_integrity_compare(struct gendisk *, struct gendisk *);
+int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
+ struct scatterlist *);
+int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ struct blk_integrity *bi = &disk->queue->integrity;
+
+ if (!bi->profile)
+ return NULL;
+
+ return bi;
+}
+
+static inline struct blk_integrity *
+bdev_get_integrity(struct block_device *bdev)
+{
+ return blk_get_integrity(bdev->bd_disk);
+}
+
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return q->integrity.profile;
+}
+
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+ q->limits.max_integrity_segments = segs;
+}
+
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return q->limits.max_integrity_segments;
+}
+
+/**
+ * bio_integrity_intervals - Return number of integrity intervals for a bio
+ * @bi: blk_integrity profile for device
+ * @sectors: Size of the bio in 512-byte sectors
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the data integrity
+ * interval size of the storage device. Convert the block layer sectors
+ * to the appropriate number of integrity intervals.
+ */
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return sectors >> (bi->interval_exp - 9);
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+}
+
+static inline bool blk_integrity_rq(struct request *rq)
+{
+ return rq->cmd_flags & REQ_INTEGRITY;
+}
+
+/*
+ * Return the first bvec that contains integrity data. Only drivers that are
+ * limited to a single integrity segment should use this helper.
+ */
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
+ return NULL;
+ return rq->bio->bi_integrity->bip_vec;
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+ struct bio *b)
+{
+ return 0;
+}
+static inline int blk_rq_map_integrity_sg(struct request_queue *q,
+ struct bio *b,
+ struct scatterlist *s)
+{
+ return 0;
+}
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
+{
+ return NULL;
+}
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ return NULL;
+}
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return false;
+}
+static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
+{
+ return 0;
+}
+static inline void blk_integrity_register(struct gendisk *d,
+ struct blk_integrity *b)
+{
+}
+static inline void blk_integrity_unregister(struct gendisk *d)
+{
+}
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+}
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+static inline int blk_integrity_rq(struct request *rq)
+{
+ return 0;
+}
+
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ return NULL;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+#endif /* _LINUX_BLK_INTEGRITY_H */
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 0b1f45c62623..ca544e1d3508 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -5,7 +5,7 @@
struct blk_mq_queue_map;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
- int offset);
+void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
+ int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
index 5cc5f0f36218..53b58c610e76 100644
--- a/include/linux/blk-mq-rdma.h
+++ b/include/linux/blk-mq-rdma.h
@@ -5,7 +5,7 @@
struct blk_mq_tag_set;
struct ib_device;
-int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
+void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec);
#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
index 687ae287e1dc..13226e9b22dd 100644
--- a/include/linux/blk-mq-virtio.h
+++ b/include/linux/blk-mq-virtio.h
@@ -5,7 +5,7 @@
struct blk_mq_queue_map;
struct virtio_device;
-int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
+void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec);
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 11cfd6470b1a..d6119c5d1069 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -4,11 +4,290 @@
#include <linux/blkdev.h>
#include <linux/sbitmap.h>
-#include <linux/srcu.h>
+#include <linux/lockdep.h>
+#include <linux/scatterlist.h>
+#include <linux/prefetch.h>
struct blk_mq_tags;
struct blk_flush_queue;
+#define BLKDEV_MIN_RQ 4
+#define BLKDEV_DEFAULT_RQ 128
+
+enum rq_end_io_ret {
+ RQ_END_IO_NONE,
+ RQ_END_IO_FREE,
+};
+
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+
+/*
+ * request flags */
+typedef __u32 __bitwise req_flags_t;
+
+/* drive already may have started this one */
+#define RQF_STARTED ((__force req_flags_t)(1 << 1))
+/* may not be passed by ioscheduler */
+#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
+/* request for flush sequence */
+#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
+/* merge of different types, fail separately */
+#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
+/* track inflight for MQ */
+#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
+/* don't call prep for this one */
+#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
+/* vaguely specified driver internal error. Ignored by the block layer */
+#define RQF_FAILED ((__force req_flags_t)(1 << 10))
+/* don't warn about errors */
+#define RQF_QUIET ((__force req_flags_t)(1 << 11))
+/* elevator private data attached */
+#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
+/* account into disk and partition IO statistics */
+#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
+/* runtime pm request */
+#define RQF_PM ((__force req_flags_t)(1 << 15))
+/* on IO scheduler merge hash */
+#define RQF_HASHED ((__force req_flags_t)(1 << 16))
+/* track IO completion time */
+#define RQF_STATS ((__force req_flags_t)(1 << 17))
+/* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+/* The per-zone write lock is held for this request */
+#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* already slept for hybrid poll */
+#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
+/* ->timeout has been called, don't expire again */
+#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
+/* queue has elevator attached */
+#define RQF_ELV ((__force req_flags_t)(1 << 22))
+#define RQF_RESV ((__force req_flags_t)(1 << 23))
+
+/* flags that prevent us from merging requests: */
+#define RQF_NOMERGE_FLAGS \
+ (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+};
+
+/*
+ * Try to put the fields that are referenced together in the same cacheline.
+ *
+ * If you modify this structure, make sure to update blk_rq_init() and
+ * especially blk_mq_rq_ctx_init() to take care of the added fields.
+ */
+struct request {
+ struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
+
+ blk_opf_t cmd_flags; /* op and common flags */
+ req_flags_t rq_flags;
+
+ int tag;
+ int internal_tag;
+
+ unsigned int timeout;
+
+ /* the following two fields are internal, NEVER access directly */
+ unsigned int __data_len; /* total data len */
+ sector_t __sector; /* sector cursor */
+
+ struct bio *bio;
+ struct bio *biotail;
+
+ union {
+ struct list_head queuelist;
+ struct request *rq_next;
+ };
+
+ struct block_device *part;
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+ /* Time that the first bio started allocating this request. */
+ u64 alloc_time_ns;
+#endif
+ /* Time that this request was allocated for this IO. */
+ u64 start_time_ns;
+ /* Time that I/O was submitted to the device. */
+ u64 io_start_time_ns;
+
+#ifdef CONFIG_BLK_WBT
+ unsigned short wbt_flags;
+#endif
+ /*
+ * rq sectors used for blk stats. It has the same value
+ * with blk_rq_sectors(rq), except that it never be zeroed
+ * by completion.
+ */
+ unsigned short stats_sectors;
+
+ /*
+ * Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ unsigned short nr_integrity_segments;
+#endif
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *crypt_ctx;
+ struct blk_crypto_keyslot *crypt_keyslot;
+#endif
+
+ unsigned short write_hint;
+ unsigned short ioprio;
+
+ enum mq_rq_state state;
+ atomic_t ref;
+
+ unsigned long deadline;
+
+ /*
+ * The hash is used inside the scheduler, and killed once the
+ * request reaches the dispatch list. The ipi_list is only used
+ * to queue the request for softirq completion, which is long
+ * after the request has been unhashed (and even removed from
+ * the dispatch list).
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+ struct llist_node ipi_list;
+ };
+
+ /*
+ * The rb_node is only used inside the io scheduler, requests
+ * are pruned when moved to the dispatch queue. So let the
+ * completion_data share space with the rb_node.
+ */
+ union {
+ struct rb_node rb_node; /* sort/lookup */
+ struct bio_vec special_vec;
+ void *completion_data;
+ };
+
+
+ /*
+ * Three pointers are available for the IO schedulers, if they need
+ * more they have to dynamically allocate it. Flush requests are
+ * never put on the IO scheduler. So let the flush fields share
+ * space with the elevator data.
+ */
+ union {
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
+
+ struct {
+ unsigned int seq;
+ struct list_head list;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+ };
+
+ union {
+ struct __call_single_data csd;
+ u64 fifo_time;
+ };
+
+ /*
+ * completion callback.
+ */
+ rq_end_io_fn *end_io;
+ void *end_io_data;
+};
+
+static inline enum req_op req_op(const struct request *req)
+{
+ return req->cmd_flags & REQ_OP_MASK;
+}
+
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return blk_op_is_passthrough(req_op(rq));
+}
+
+static inline unsigned short req_get_ioprio(struct request *req)
+{
+ return req->ioprio;
+}
+
+#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
+
+#define rq_dma_dir(rq) \
+ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+#define rq_list_add(listptr, rq) do { \
+ (rq)->rq_next = *(listptr); \
+ *(listptr) = rq; \
+} while (0)
+
+#define rq_list_pop(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) { \
+ __req = *(listptr); \
+ *(listptr) = __req->rq_next; \
+ } \
+ __req; \
+})
+
+#define rq_list_peek(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) \
+ __req = *(listptr); \
+ __req; \
+})
+
+#define rq_list_for_each(listptr, pos) \
+ for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
+
+#define rq_list_for_each_safe(listptr, pos, nxt) \
+ for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
+ pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
+
+#define rq_list_next(rq) (rq)->rq_next
+#define rq_list_empty(list) ((list) == (struct request *) NULL)
+
+/**
+ * rq_list_move() - move a struct request from one list to another
+ * @src: The source list @rq is currently in
+ * @dst: The destination list that @rq will be appended to
+ * @rq: The request to move
+ * @prev: The request preceding @rq in @src (NULL if @rq is the head)
+ */
+static inline void rq_list_move(struct request **src, struct request **dst,
+ struct request *rq, struct request *prev)
+{
+ if (prev)
+ prev->rq_next = rq->rq_next;
+ else
+ *src = rq->rq_next;
+ rq_list_add(dst, rq);
+}
+
+/**
+ * enum blk_eh_timer_return - How the timeout handler should proceed
+ * @BLK_EH_DONE: The block driver completed the command or will complete it at
+ * a later time.
+ * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
+ * request to complete.
+ */
+enum blk_eh_timer_return {
+ BLK_EH_DONE,
+ BLK_EH_RESET_TIMER,
+};
+
+#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
+#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
+
/**
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
* block device
@@ -125,9 +404,6 @@ struct blk_mq_hw_ctx {
unsigned long queued;
/** @run: Number of dispatched requests. */
unsigned long run;
-#define BLK_MQ_MAX_DISPATCH_ORDER 7
- /** @dispatched: Number of dispatch requests by queue. */
- unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
/** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
@@ -140,18 +416,13 @@ struct blk_mq_hw_ctx {
*/
atomic_t nr_active;
+ /** @cpuhp_online: List to store request if CPU is going to die */
+ struct hlist_node cpuhp_online;
/** @cpuhp_dead: List to store request if some CPU die. */
struct hlist_node cpuhp_dead;
/** @kobj: Kernel object for sysfs. */
struct kobject kobj;
- /** @poll_considered: Count times blk_poll() was called. */
- unsigned long poll_considered;
- /** @poll_invoked: Count how many requests blk_poll() polled. */
- unsigned long poll_invoked;
- /** @poll_success: Count how many polled requests were completed. */
- unsigned long poll_success;
-
#ifdef CONFIG_BLK_DEBUG_FS
/**
* @debugfs_dir: debugfs directory for this hardware queue. Named
@@ -162,15 +433,11 @@ struct blk_mq_hw_ctx {
struct dentry *sched_debugfs_dir;
#endif
- /** @hctx_list: List of all hardware queues. */
- struct list_head hctx_list;
-
/**
- * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
- * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
- * blk_mq_hw_ctx_size().
+ * @hctx_list: if this hctx is not in use, this is an entry in
+ * q->unused_hctx_list.
*/
- struct srcu_struct srcu[0];
+ struct list_head hctx_list;
};
/**
@@ -228,6 +495,9 @@ enum hctx_type {
* tag set.
* @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
* elements.
+ * @shared_tags:
+ * Shared set of tags. Has @nr_hw_queues elements. If set,
+ * shared by all @tags.
* @tag_list_lock: Serializes tag_list accesses.
* @tag_list: List of the request queues that use this tag set. See also
* request_queue.tag_set_list.
@@ -247,6 +517,8 @@ struct blk_mq_tag_set {
struct blk_mq_tags **tags;
+ struct blk_mq_tags *shared_tags;
+
struct mutex tag_list_lock;
struct list_head tag_list;
};
@@ -262,27 +534,7 @@ struct blk_mq_queue_data {
bool last;
};
-typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
- const struct blk_mq_queue_data *);
-typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *);
-typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
-typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
-typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
-typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
-typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
-typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
- unsigned int, unsigned int);
-typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
- unsigned int);
-
-typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
- bool);
-typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
-typedef int (poll_fn)(struct blk_mq_hw_ctx *);
-typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
-typedef bool (busy_fn)(struct request_queue *);
-typedef void (complete_fn)(struct request *);
-typedef void (cleanup_rq_fn)(struct request *);
+typedef bool (busy_tag_iter_fn)(struct request *, void *);
/**
* struct blk_mq_ops - Callback functions that implements block driver
@@ -292,7 +544,8 @@ struct blk_mq_ops {
/**
* @queue_rq: Queue a new request from block IO.
*/
- queue_rq_fn *queue_rq;
+ blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
+ const struct blk_mq_queue_data *);
/**
* @commit_rqs: If a driver uses bd->last to judge when to submit
@@ -301,7 +554,15 @@ struct blk_mq_ops {
* purpose of kicking the hardware (which the last request otherwise
* would have done).
*/
- commit_rqs_fn *commit_rqs;
+ void (*commit_rqs)(struct blk_mq_hw_ctx *);
+
+ /**
+ * @queue_rqs: Queue a list of new requests. Driver is guaranteed
+ * that each request belongs to the same queue. If the driver doesn't
+ * empty the @rqlist completely, then the rest will be queued
+ * individually by the block layer upon return.
+ */
+ void (*queue_rqs)(struct request **rqlist);
/**
* @get_budget: Reserve budget before queue request, once .queue_rq is
@@ -309,37 +570,47 @@ struct blk_mq_ops {
* reserved budget. Also we have to handle failure case
* of .get_budget for avoiding I/O deadlock.
*/
- get_budget_fn *get_budget;
+ int (*get_budget)(struct request_queue *);
+
/**
* @put_budget: Release the reserved budget.
*/
- put_budget_fn *put_budget;
+ void (*put_budget)(struct request_queue *, int);
+
+ /**
+ * @set_rq_budget_token: store rq's budget token
+ */
+ void (*set_rq_budget_token)(struct request *, int);
+ /**
+ * @get_rq_budget_token: retrieve rq's budget token
+ */
+ int (*get_rq_budget_token)(struct request *);
/**
* @timeout: Called on request timeout.
*/
- timeout_fn *timeout;
+ enum blk_eh_timer_return (*timeout)(struct request *);
/**
* @poll: Called to poll for completion of a specific tag.
*/
- poll_fn *poll;
+ int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
/**
* @complete: Mark the request as complete.
*/
- complete_fn *complete;
+ void (*complete)(struct request *);
/**
* @init_hctx: Called when the block layer side of a hardware queue has
* been set up, allowing the driver to allocate/init matching
* structures.
*/
- init_hctx_fn *init_hctx;
+ int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
/**
* @exit_hctx: Ditto for exit/teardown.
*/
- exit_hctx_fn *exit_hctx;
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
/**
* @init_request: Called for every command allocated by the block layer
@@ -348,33 +619,30 @@ struct blk_mq_ops {
* Tag greater than or equal to queue_depth is for setting up
* flush request.
*/
- init_request_fn *init_request;
+ int (*init_request)(struct blk_mq_tag_set *set, struct request *,
+ unsigned int, unsigned int);
/**
* @exit_request: Ditto for exit/teardown.
*/
- exit_request_fn *exit_request;
-
- /**
- * @initialize_rq_fn: Called from inside blk_get_request().
- */
- void (*initialize_rq_fn)(struct request *rq);
+ void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
+ unsigned int);
/**
* @cleanup_rq: Called before freeing one request which isn't completed
* yet, and usually for freeing the driver private data.
*/
- cleanup_rq_fn *cleanup_rq;
+ void (*cleanup_rq)(struct request *);
/**
* @busy: If set, returns whether or not this queue currently is busy.
*/
- busy_fn *busy;
+ bool (*busy)(struct request_queue *);
/**
* @map_queues: This allows drivers specify their own queue mapping by
* overriding the setup-time function that builds the mq_map.
*/
- map_queues_fn *map_queues;
+ void (*map_queues)(struct blk_mq_tag_set *set);
#ifdef CONFIG_BLK_DEBUG_FS
/**
@@ -387,9 +655,21 @@ struct blk_mq_ops {
enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
- BLK_MQ_F_TAG_SHARED = 1 << 1,
+ BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
+ /*
+ * Set when this device requires underlying blk-mq device for
+ * completing IO:
+ */
+ BLK_MQ_F_STACKING = 1 << 2,
+ BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
BLK_MQ_F_BLOCKING = 1 << 5,
+ /* Do not allow an I/O scheduler to be configured. */
BLK_MQ_F_NO_SCHED = 1 << 6,
+ /*
+ * Select 'none' during queue registration in case of a single hwq
+ * or shared hwqs instead of 'mq-deadline'.
+ */
+ BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -397,6 +677,9 @@ enum {
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2,
+ /* hw queue is inactive after all its CPUs become offline */
+ BLK_MQ_S_INACTIVE = 3,
+
BLK_MQ_MAX_DEPTH = 10240,
BLK_MQ_CPU_WORK_BATCH = 8,
@@ -408,21 +691,29 @@ enum {
((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
+#define BLK_MQ_NO_HCTX_IDX (-1U)
+
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
+ struct lock_class_key *lkclass);
+#define blk_mq_alloc_disk(set, queuedata) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_mq_alloc_disk(set, queuedata, &__key); \
+})
+struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
+ struct lock_class_key *lkclass);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q,
- bool elevator_init);
-struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
- const struct blk_mq_ops *ops,
- unsigned int queue_depth,
- unsigned int set_flags);
-void blk_mq_unregister_dev(struct device *, struct request_queue *);
+int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
+void blk_mq_destroy_queue(struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
+int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int queue_depth,
+ unsigned int set_flags);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
-void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-
void blk_mq_free_request(struct request *rq);
bool blk_mq_queue_inflight(struct request_queue *q);
@@ -432,18 +723,49 @@ enum {
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
/* allocate from reserved pool */
BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
- /* allocate internal/sched tag */
- BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2),
- /* set RQF_PREEMPT */
- BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
+ /* set RQF_PM */
+ BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
};
-struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
+struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
- unsigned int op, blk_mq_req_flags_t flags,
+ blk_opf_t opf, blk_mq_req_flags_t flags,
unsigned int hctx_idx);
-struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+
+/*
+ * Tag address space map.
+ */
+struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+
+ atomic_t active_queues;
+
+ struct sbitmap_queue bitmap_tags;
+ struct sbitmap_queue breserved_tags;
+
+ struct request **rqs;
+ struct request **static_rqs;
+ struct list_head page_list;
+
+ /*
+ * used to clear request reference in rqs[] before freeing one
+ * request pool
+ */
+ spinlock_t lock;
+};
+
+static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ if (tag < tags->nr_tags) {
+ prefetch(tags->rqs[tag]);
+ return tags->rqs[tag];
+ }
+
+ return NULL;
+}
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -481,17 +803,74 @@ static inline int blk_mq_request_completed(struct request *rq)
return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
}
+/*
+ *
+ * Set the state to complete when completing a request from inside ->queue_rq.
+ * This is used by drivers that want to ensure special complete actions that
+ * need access to the request are called on failure, e.g. by nvme for
+ * multipathing.
+ */
+static inline void blk_mq_set_request_complete(struct request *rq)
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+}
+
+/*
+ * Complete the request directly instead of deferring it to softirq or
+ * completing it another CPU. Useful in preemptible instead of an interrupt.
+ */
+static inline void blk_mq_complete_request_direct(struct request *rq,
+ void (*complete)(struct request *rq))
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+ complete(rq);
+}
+
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
+void blk_mq_end_request_batch(struct io_comp_batch *ib);
+
+/*
+ * Only need start/end time stamping if we have iostat or
+ * blk stats enabled, or using an IO scheduler.
+ */
+static inline bool blk_mq_need_time_stamp(struct request *rq)
+{
+ return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
+}
+
+static inline bool blk_mq_is_reserved_rq(struct request *rq)
+{
+ return rq->rq_flags & RQF_RESV;
+}
+
+/*
+ * Batched completions only work when there is no I/O error and no special
+ * ->end_io handler.
+ */
+static inline bool blk_mq_add_to_batch(struct request *req,
+ struct io_comp_batch *iob, int ioerror,
+ void (*complete)(struct io_comp_batch *))
+{
+ if (!iob || (req->rq_flags & RQF_ELV) || ioerror ||
+ (req->end_io && !blk_rq_is_passthrough(req)))
+ return false;
+
+ if (!iob->complete)
+ iob->complete = complete;
+ else if (iob->complete != complete)
+ return false;
+ iob->need_ts |= blk_mq_need_time_stamp(req);
+ rq_list_add(&iob->req_list, req);
+ return true;
+}
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
-bool blk_mq_complete_request(struct request *rq);
-bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
- struct bio *bio, unsigned int nr_segs);
-bool blk_mq_queue_stopped(struct request_queue *q);
+void blk_mq_complete_request(struct request *rq);
+bool blk_mq_complete_request_remote(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
@@ -499,10 +878,12 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
+void blk_mq_wait_quiesce_done(struct request_queue *q);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
+void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
@@ -513,13 +894,22 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
unsigned int blk_mq_rq_cpu(struct request *rq);
+bool __blk_should_fake_timeout(struct request_queue *q);
+static inline bool blk_should_fake_timeout(struct request_queue *q)
+{
+ if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
+ test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
+ return __blk_should_fake_timeout(q);
+ return false;
+}
+
/**
* blk_mq_rq_from_pdu - cast a PDU to a request
* @pdu: the PDU (Protocol Data Unit) to be casted
@@ -549,27 +939,273 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
}
#define queue_for_each_hw_ctx(q, hctx, i) \
- for ((i) = 0; (i) < (q)->nr_hw_queues && \
- ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
+ xa_for_each(&(q)->hctx_table, (i), (hctx))
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
-static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+static inline void blk_mq_cleanup_rq(struct request *rq)
{
- if (rq->tag != -1)
- return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
+ if (rq->q->mq_ops->cleanup_rq)
+ rq->q->mq_ops->cleanup_rq(rq);
+}
- return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
- BLK_QC_T_INTERNAL;
+static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
+ unsigned int nr_segs)
+{
+ rq->nr_phys_segments = nr_segs;
+ rq->__data_len = bio->bi_iter.bi_size;
+ rq->bio = rq->biotail = bio;
+ rq->ioprio = bio_prio(bio);
}
-static inline void blk_mq_cleanup_rq(struct request *rq)
+void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
+ struct lock_class_key *key);
+
+static inline bool rq_is_sync(struct request *rq)
{
- if (rq->q->mq_ops->cleanup_rq)
- rq->q->mq_ops->cleanup_rq(rq);
+ return op_is_sync(rq->cmd_flags);
}
-#endif
+void blk_rq_init(struct request_queue *q, struct request *rq);
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
+void blk_rq_unprep_clone(struct request *rq);
+blk_status_t blk_insert_cloned_request(struct request *rq);
+
+struct rq_map_data {
+ struct page **pages;
+ unsigned long offset;
+ unsigned short page_order;
+ unsigned short nr_entries;
+ bool null_mapped;
+ bool from_user;
+};
+
+int blk_rq_map_user(struct request_queue *, struct request *,
+ struct rq_map_data *, void __user *, unsigned long, gfp_t);
+int blk_rq_map_user_io(struct request *, struct rq_map_data *,
+ void __user *, unsigned long, gfp_t, bool, int, bool, int);
+int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct rq_map_data *, const struct iov_iter *, gfp_t);
+int blk_rq_unmap_user(struct bio *);
+int blk_rq_map_kern(struct request_queue *, struct request *, void *,
+ unsigned int, gfp_t);
+int blk_rq_append_bio(struct request *rq, struct bio *bio);
+void blk_execute_rq_nowait(struct request *rq, bool at_head);
+blk_status_t blk_execute_rq(struct request *rq, bool at_head);
+bool blk_rq_is_poll(struct request *rq);
+
+struct req_iterator {
+ struct bvec_iter iter;
+ struct bio *bio;
+};
+
+#define __rq_for_each_bio(_bio, rq) \
+ if ((rq->bio)) \
+ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+
+#define rq_for_each_segment(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+
+#define rq_for_each_bvec(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
+
+#define rq_iter_last(bvec, _iter) \
+ (_iter.bio->bi_next == NULL && \
+ bio_iter_last(bvec, _iter.iter))
+
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ * blk_rq_stats_sectors() : sectors of the entire request used for stats
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ if (!rq->bio)
+ return 0;
+ if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
+ return rq->bio->bi_iter.bi_size;
+ return bio_iovec(rq->bio).bv_len;
+}
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
+{
+ return rq->stats_sectors;
+}
+
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request. Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec.bv_len;
+ return blk_rq_bytes(rq);
+}
+
+/*
+ * Return the first full biovec in the request. The caller needs to check that
+ * there are any bvecs before calling this helper.
+ */
+static inline struct bio_vec req_bvec(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
+}
+
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+ unsigned int nr_bios = 0;
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ nr_bios++;
+
+ return nr_bios;
+}
+
+void blk_steal_bios(struct bio_list *list, struct request *rq);
+
+/*
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ */
+bool blk_update_request(struct request *rq, blk_status_t error,
+ unsigned int nr_bytes);
+void blk_abort_request(struct request *);
+
+/*
+ * Number of physical segments as sent to the device.
+ *
+ * Normally this is the number of discontiguous data segments sent by the
+ * submitter. But for data-less command like discard we might have no
+ * actual data segments submitted, but the driver might have to add it's
+ * own special payload. In that case we still return 1 here so that this
+ * special payload will be mapped.
+ */
+static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return 1;
+ return rq->nr_phys_segments;
+}
+
+/*
+ * Number of discard segments (or ranges) the driver needs to fill in.
+ * Each discard bio merged into a request is counted as one segment.
+ */
+static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
+{
+ return max_t(unsigned short, rq->nr_phys_segments, 1);
+}
+
+int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist, struct scatterlist **last_sg);
+static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *last_sg = NULL;
+
+ return __blk_rq_map_sg(q, rq, sglist, &last_sg);
+}
+void blk_dump_rq_flags(struct request *, char *);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int blk_rq_zone_no(struct request *rq)
+{
+ return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
+}
+
+static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+{
+ return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
+}
+
+bool blk_req_needs_zone_write_lock(struct request *rq);
+bool blk_req_zone_write_trylock(struct request *rq);
+void __blk_req_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_unlock(struct request *rq);
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+ if (blk_req_needs_zone_write_lock(rq))
+ __blk_req_zone_write_lock(rq);
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+ if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
+ __blk_req_zone_write_unlock(rq);
+}
+
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return rq->q->disk->seq_zones_wlock &&
+ test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ if (!blk_req_needs_zone_write_lock(rq))
+ return true;
+ return !blk_req_zone_is_write_locked(rq);
+}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ return false;
+}
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+}
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ return true;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+#endif /* BLK_MQ_H */
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
index b80c65aba249..2580e05a8ab6 100644
--- a/include/linux/blk-pm.h
+++ b/include/linux/blk-pm.h
@@ -14,7 +14,7 @@ extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
+extern void blk_post_runtime_resume(struct request_queue *q);
extern void blk_set_runtime_active(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 70254ae11769..e0b098089ef2 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -8,16 +8,76 @@
#include <linux/types.h>
#include <linux/bvec.h>
+#include <linux/device.h>
#include <linux/ktime.h>
struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
-struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
+struct bio_crypt_ctx;
+
+/*
+ * The basic unit of block I/O is a sector. It is used in a number of contexts
+ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
+ * bytes. Variables of type sector_t represent an offset or size that is a
+ * multiple of 512 bytes. Hence these two constants.
+ */
+#ifndef SECTOR_SHIFT
+#define SECTOR_SHIFT 9
+#endif
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#endif
+
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
+struct block_device {
+ sector_t bd_start_sect;
+ sector_t bd_nr_sectors;
+ struct disk_stats __percpu *bd_stats;
+ unsigned long bd_stamp;
+ bool bd_read_only; /* read-only policy */
+ dev_t bd_dev;
+ atomic_t bd_openers;
+ struct inode * bd_inode; /* will die */
+ struct super_block * bd_super;
+ void * bd_claiming;
+ struct device bd_device;
+ void * bd_holder;
+ int bd_holders;
+ bool bd_write_holder;
+ struct kobject *bd_holder_dir;
+ u8 bd_partno;
+ spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
+ struct gendisk * bd_disk;
+ struct request_queue * bd_queue;
+
+ /* The counter of freeze processes */
+ int bd_fsfreeze_count;
+ /* Mutex for freeze */
+ struct mutex bd_fsfreeze_mutex;
+ struct super_block *bd_fsfreeze_sb;
+
+ struct partition_meta_info *bd_meta_info;
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ bool bd_make_it_fail;
+#endif
+} __randomize_layout;
+
+#define bdev_whole(_bdev) \
+ ((_bdev)->bd_disk->part0)
+
+#define dev_to_bdev(device) \
+ container_of((device), struct block_device, bd_device)
+
+#define bdev_kobj(_bdev) \
+ (&((_bdev)->bd_device.kobj))
/*
* Block error status values. See block/blk-core:blk_errors for the details.
@@ -25,8 +85,10 @@ typedef void (bio_end_io_t) (struct bio *);
*/
#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
typedef u32 __bitwise blk_status_t;
+typedef u32 blk_short_t;
#else
typedef u8 __bitwise blk_status_t;
+typedef u16 blk_short_t;
#endif
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
@@ -43,6 +105,10 @@ typedef u8 __bitwise blk_status_t;
/* hack for device mapper, don't use elsewhere: */
#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
+/*
+ * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
+ * and the bio would block (cf bio_wouldblock_error())
+ */
#define BLK_STS_AGAIN ((__force blk_status_t)12)
/*
@@ -63,6 +129,43 @@ typedef u8 __bitwise blk_status_t;
*/
#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
+/*
+ * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
+ * related resources are unavailable, but the driver can guarantee the queue
+ * will be rerun in the future once the resources become available again.
+ *
+ * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
+ * a zone specific resource and IO to a different zone on the same device could
+ * still be served. Examples of that are zones that are write-locked, but a read
+ * to the same zone could be served.
+ */
+#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
+
+/*
+ * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
+ * path if the device returns a status indicating that too many zone resources
+ * are currently open. The same command should be successful if resubmitted
+ * after the number of open zones decreases below the device's limits, which is
+ * reported in the request_queue's max_open_zones.
+ */
+#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
+
+/*
+ * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
+ * path if the device returns a status indicating that too many zone resources
+ * are currently active. The same command should be successful if resubmitted
+ * after the number of active zones decreases below the device's limits, which
+ * is reported in the request_queue's max_active_zones.
+ */
+#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
+
+/*
+ * BLK_STS_OFFLINE is returned from the driver when the target device is offline
+ * or is being taken offline. This could help differentiate the case where a
+ * device is intentionally being shut down from a real I/O error.
+ */
+#define BLK_STS_OFFLINE ((__force blk_status_t)17)
+
/**
* blk_path_error - returns true if error may be path related
* @error: status the request was completed with
@@ -137,28 +240,30 @@ static inline void bio_issue_init(struct bio_issue *issue,
((u64)size << BIO_ISSUE_SIZE_SHIFT));
}
+typedef __u32 __bitwise blk_opf_t;
+
+typedef unsigned int blk_qc_t;
+#define BLK_QC_T_NONE -1U
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
*/
struct bio {
struct bio *bi_next; /* request queue link */
- struct gendisk *bi_disk;
- unsigned int bi_opf; /* bottom bits req flags,
- * top bits REQ_OP. Use
- * accessors.
+ struct block_device *bi_bdev;
+ blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
+ * req_flags.
*/
- unsigned short bi_flags; /* status, etc and bvec pool number */
+ unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
- unsigned short bi_write_hint;
blk_status_t bi_status;
- u8 bi_partno;
atomic_t __bi_remaining;
struct bvec_iter bi_iter;
+ blk_qc_t bi_cookie;
bio_end_io_t *bi_end_io;
-
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
@@ -173,6 +278,11 @@ struct bio {
u64 bi_iocost_cost;
#endif
#endif
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *bi_crypt_context;
+#endif
+
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
@@ -198,10 +308,11 @@ struct bio {
* double allocations for a small number of bio_vecs. This member
* MUST obviously be kept at the very end of the bio.
*/
- struct bio_vec bi_inline_vecs[0];
+ struct bio_vec bi_inline_vecs[];
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
+#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
/*
* bio flags
@@ -210,52 +321,29 @@ enum {
BIO_NO_PAGE_REF, /* don't put release vec pages */
BIO_CLONED, /* doesn't own data */
BIO_BOUNCED, /* bio is a bounce bio */
- BIO_USER_MAPPED, /* contains user pages */
- BIO_NULL_MAPPED, /* contains invalid user pages */
- BIO_WORKINGSET, /* contains userspace workingset pages */
BIO_QUIET, /* Make BIO Quiet */
BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
BIO_REFFED, /* bio has elevated ->bi_cnt */
- BIO_THROTTLED, /* This bio has already been subjected to
+ BIO_BPS_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
* of this bio. */
- BIO_QUEUE_ENTERED, /* can use blk_queue_enter_live() */
- BIO_TRACKED, /* set if bio goes through the rq_qos path */
+ BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
+ BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
+ BIO_QOS_MERGED, /* but went through rq_qos merge path */
+ BIO_REMAPPED,
+ BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
BIO_FLAG_LAST
};
-/* See BVEC_POOL_OFFSET below before adding new flags */
-
-/*
- * We support 6 different bvec pools, the last one is magic in that it
- * is backed by a mempool.
- */
-#define BVEC_POOL_NR 6
-#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
-
-/*
- * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
- * 1 to the actual index so that 0 indicates that there are no bvecs to be
- * freed.
- */
-#define BVEC_POOL_BITS (3)
-#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
-#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
-#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
-# error "BVEC_POOL_BITS is too small"
-#endif
-
-/*
- * Flags starting here get preserved by bio_reset() - this includes
- * only BVEC_POOL_IDX()
- */
-#define BIO_RESET_BITS BVEC_POOL_OFFSET
-
typedef __u32 __bitwise blk_mq_req_flags_t;
-/*
- * Operations and flags common to the bio and request structures.
+#define REQ_OP_BITS 8
+#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
+#define REQ_FLAG_BITS 24
+
+/**
+ * enum req_op - Operations common to the bio and request structures.
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
*
* The least significant bit of the operation number indicates the data
@@ -267,44 +355,37 @@ typedef __u32 __bitwise blk_mq_req_flags_t;
* If a operation does not transfer data the least significant bit has no
* meaning.
*/
-#define REQ_OP_BITS 8
-#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
-#define REQ_FLAG_BITS 24
-
-enum req_opf {
+enum req_op {
/* read sectors from the device */
- REQ_OP_READ = 0,
+ REQ_OP_READ = (__force blk_opf_t)0,
/* write sectors to the device */
- REQ_OP_WRITE = 1,
+ REQ_OP_WRITE = (__force blk_opf_t)1,
/* flush the volatile write cache */
- REQ_OP_FLUSH = 2,
+ REQ_OP_FLUSH = (__force blk_opf_t)2,
/* discard sectors */
- REQ_OP_DISCARD = 3,
+ REQ_OP_DISCARD = (__force blk_opf_t)3,
/* securely erase sectors */
- REQ_OP_SECURE_ERASE = 5,
- /* reset a zone write pointer */
- REQ_OP_ZONE_RESET = 6,
- /* write the same sector many times */
- REQ_OP_WRITE_SAME = 7,
- /* reset all the zone present on the device */
- REQ_OP_ZONE_RESET_ALL = 8,
+ REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
/* write the zero filled sector many times */
- REQ_OP_WRITE_ZEROES = 9,
+ REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
/* Open a zone */
- REQ_OP_ZONE_OPEN = 10,
+ REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
/* Close a zone */
- REQ_OP_ZONE_CLOSE = 11,
+ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
/* Transition a zone to full */
- REQ_OP_ZONE_FINISH = 12,
+ REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
+ /* write data at the current zone write pointer */
+ REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
+ /* reset a zone write pointer */
+ REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
+ /* reset all the zone present on the device */
+ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
- /* SCSI passthrough using struct scsi_request */
- REQ_OP_SCSI_IN = 32,
- REQ_OP_SCSI_OUT = 33,
/* Driver private requests */
- REQ_OP_DRV_IN = 34,
- REQ_OP_DRV_OUT = 35,
+ REQ_OP_DRV_IN = (__force blk_opf_t)34,
+ REQ_OP_DRV_OUT = (__force blk_opf_t)35,
- REQ_OP_LAST,
+ REQ_OP_LAST = (__force blk_opf_t)36,
};
enum req_flag_bits {
@@ -323,7 +404,6 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
- __REQ_NOWAIT_INLINE, /* Return would-block error inline */
/*
* When a shared kthread needs to issue a bio for a cgroup, doing
* so synchronously can lead to priority inversions as the kthread
@@ -332,40 +412,45 @@ enum req_flag_bits {
* work item to avoid such priority inversions.
*/
__REQ_CGROUP_PUNT,
+ __REQ_POLLED, /* caller polls for completion using bio_poll */
+ __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
+ __REQ_SWAP, /* swap I/O */
+ __REQ_DRV, /* for driver use */
- /* command specific flags for REQ_OP_WRITE_ZEROES: */
+ /*
+ * Command specific flags, keep last:
+ */
+ /* for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
- __REQ_HIPRI,
-
- /* for driver use */
- __REQ_DRV,
- __REQ_SWAP, /* swapping request. */
__REQ_NR_BITS, /* stops here */
};
-#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
-#define REQ_SYNC (1ULL << __REQ_SYNC)
-#define REQ_META (1ULL << __REQ_META)
-#define REQ_PRIO (1ULL << __REQ_PRIO)
-#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
-#define REQ_IDLE (1ULL << __REQ_IDLE)
-#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
-#define REQ_FUA (1ULL << __REQ_FUA)
-#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
-#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
-#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
-#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
-#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
-#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
-
-#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
-#define REQ_HIPRI (1ULL << __REQ_HIPRI)
-
-#define REQ_DRV (1ULL << __REQ_DRV)
-#define REQ_SWAP (1ULL << __REQ_SWAP)
+#define REQ_FAILFAST_DEV \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
+#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
+#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
+#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
+#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
+#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
+#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
+#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
+#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
+#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
+#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
+#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
+#define REQ_CGROUP_PUNT (__force blk_opf_t)(1ULL << __REQ_CGROUP_PUNT)
+
+#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
+#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
+
+#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
+#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -382,28 +467,28 @@ enum stat_group {
NR_STAT_GROUPS
};
-#define bio_op(bio) \
- ((bio)->bi_opf & REQ_OP_MASK)
-#define req_op(req) \
- ((req)->cmd_flags & REQ_OP_MASK)
+static inline enum req_op bio_op(const struct bio *bio)
+{
+ return bio->bi_opf & REQ_OP_MASK;
+}
/* obsolete, don't use in new code */
-static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
- unsigned op_flags)
+static inline void bio_set_op_attrs(struct bio *bio, enum req_op op,
+ blk_opf_t op_flags)
{
bio->bi_opf = op | op_flags;
}
-static inline bool op_is_write(unsigned int op)
+static inline bool op_is_write(blk_opf_t op)
{
- return (op & 1);
+ return !!(op & (__force blk_opf_t)1);
}
/*
* Check if the bio or request is one that needs special treatment in the
* flush state machine.
*/
-static inline bool op_is_flush(unsigned int op)
+static inline bool op_is_flush(blk_opf_t op)
{
return op & (REQ_FUA | REQ_PREFLUSH);
}
@@ -413,13 +498,13 @@ static inline bool op_is_flush(unsigned int op)
* PREFLUSH flag. Other operations may be marked as synchronous using the
* REQ_SYNC flag.
*/
-static inline bool op_is_sync(unsigned int op)
+static inline bool op_is_sync(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_READ ||
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}
-static inline bool op_is_discard(unsigned int op)
+static inline bool op_is_discard(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}
@@ -430,7 +515,7 @@ static inline bool op_is_discard(unsigned int op)
* due to its different handling in the block layer and device response in
* case of command failure.
*/
-static inline bool op_is_zone_mgmt(enum req_opf op)
+static inline bool op_is_zone_mgmt(enum req_op op)
{
switch (op & REQ_OP_MASK) {
case REQ_OP_ZONE_RESET:
@@ -443,39 +528,13 @@ static inline bool op_is_zone_mgmt(enum req_opf op)
}
}
-static inline int op_stat_group(unsigned int op)
+static inline int op_stat_group(enum req_op op)
{
if (op_is_discard(op))
return STAT_DISCARD;
return op_is_write(op);
}
-typedef unsigned int blk_qc_t;
-#define BLK_QC_T_NONE -1U
-#define BLK_QC_T_EAGAIN -2U
-#define BLK_QC_T_SHIFT 16
-#define BLK_QC_T_INTERNAL (1U << 31)
-
-static inline bool blk_qc_t_valid(blk_qc_t cookie)
-{
- return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
-}
-
-static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
-{
- return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
-}
-
-static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
-{
- return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
-}
-
-static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
-{
- return (cookie & BLK_QC_T_INTERNAL) != 0;
-}
-
struct blk_rq_stat {
u64 mean;
u64 min;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f629d40c645c..50e358a19d98 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,51 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions Copyright (C) 1992 Drew Eckhardt
+ */
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-
-#ifdef CONFIG_BLOCK
-
-#include <linux/major.h>
-#include <linux/genhd.h>
+#include <linux/types.h>
+#include <linux/blk_types.h>
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <linux/pagemap.h>
-#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
-#include <linux/mempool.h>
-#include <linux/pfn.h>
#include <linux/bio.h>
-#include <linux/stringify.h>
#include <linux/gfp.h>
-#include <linux/bsg.h>
-#include <linux/smp.h>
+#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
-#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/sched.h>
+#include <linux/sbitmap.h>
+#include <linux/srcu.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
struct module;
-struct scsi_ioctl_command;
-
struct request_queue;
struct elevator_queue;
struct blk_trace;
struct request;
struct sg_io_hdr;
-struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
+struct kiocb;
struct pr_ops;
struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
+struct blk_crypto_profile;
-#define BLKDEV_MIN_RQ 4
-#define BLKDEV_MAX_RQ 128 /* Default maximum */
+extern const struct device_type disk_type;
+extern struct device_type part_type;
+extern struct class block_class;
/* Must be consistent with blk_mq_poll_stats_bkt() */
#define BLK_MQ_POLL_STATS_BKTS 16
@@ -57,268 +55,232 @@ struct blk_stat_callback;
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 5
-
-typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+#define BLKCG_MAX_POLS 6
-/*
- * request flags */
-typedef __u32 __bitwise req_flags_t;
-
-/* elevator knows about this request */
-#define RQF_SORTED ((__force req_flags_t)(1 << 0))
-/* drive already may have started this one */
-#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* may not be passed by ioscheduler */
-#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
-/* request for flush sequence */
-#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
-/* merge of different types, fail separately */
-#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
-/* track inflight for MQ */
-#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
-/* don't call prep for this one */
-#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
-/* set for "ide_preempt" requests and also for requests for which the SCSI
- "quiesce" state must be ignored. */
-#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
-/* contains copies of user pages */
-#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
-/* vaguely specified driver internal error. Ignored by the block layer */
-#define RQF_FAILED ((__force req_flags_t)(1 << 10))
-/* don't warn about errors */
-#define RQF_QUIET ((__force req_flags_t)(1 << 11))
-/* elevator private data attached */
-#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
-/* account into disk and partition IO statistics */
-#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
-/* request came from our alloc pool */
-#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
-/* runtime pm request */
-#define RQF_PM ((__force req_flags_t)(1 << 15))
-/* on IO scheduler merge hash */
-#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* track IO completion time */
-#define RQF_STATS ((__force req_flags_t)(1 << 17))
-/* Look at ->special_vec for the actual data payload instead of the
- bio chain. */
-#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
-/* The per-zone write lock is held for this request */
-#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
-/* already slept for hybrid poll */
-#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
-/* ->timeout has been called, don't expire again */
-#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
-
-/* flags that prevent us from merging requests: */
-#define RQF_NOMERGE_FLAGS \
- (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+#define DISK_MAX_PARTS 256
+#define DISK_NAME_LEN 32
+#define PARTITION_META_INFO_VOLNAMELTH 64
/*
- * Request state for blk-mq.
+ * Enough for the string representation of any kind of UUID plus NULL.
+ * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
*/
-enum mq_rq_state {
- MQ_RQ_IDLE = 0,
- MQ_RQ_IN_FLIGHT = 1,
- MQ_RQ_COMPLETE = 2,
+#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
+
+struct partition_meta_info {
+ char uuid[PARTITION_META_INFO_UUIDLTH];
+ u8 volname[PARTITION_META_INFO_VOLNAMELTH];
};
-/*
- * Try to put the fields that are referenced together in the same cacheline.
+/**
+ * DOC: genhd capability flags
+ *
+ * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
+ * removable media. When set, the device remains present even when media is not
+ * inserted. Shall not be set for devices which are removed entirely when the
+ * media is removed.
+ *
+ * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
+ * doesn't appear in sysfs, and can't be opened from userspace or using
+ * blkdev_get*. Used for the underlying components of multipath devices.
+ *
+ * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
+ * scan for partitions from add_disk, and users can't add partitions manually.
*
- * If you modify this structure, make sure to update blk_rq_init() and
- * especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
-struct request {
- struct request_queue *q;
- struct blk_mq_ctx *mq_ctx;
- struct blk_mq_hw_ctx *mq_hctx;
-
- unsigned int cmd_flags; /* op and common flags */
- req_flags_t rq_flags;
+enum {
+ GENHD_FL_REMOVABLE = 1 << 0,
+ GENHD_FL_HIDDEN = 1 << 1,
+ GENHD_FL_NO_PART = 1 << 2,
+};
- int tag;
- int internal_tag;
+enum {
+ DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
+ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
+};
- /* the following two fields are internal, NEVER access directly */
- unsigned int __data_len; /* total data len */
- sector_t __sector; /* sector cursor */
+enum {
+ /* Poll even if events_poll_msecs is unset */
+ DISK_EVENT_FLAG_POLL = 1 << 0,
+ /* Forward events to udev */
+ DISK_EVENT_FLAG_UEVENT = 1 << 1,
+ /* Block event polling when open for exclusive write */
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
+};
- struct bio *bio;
- struct bio *biotail;
+struct disk_events;
+struct badblocks;
- struct list_head queuelist;
+struct blk_integrity {
+ const struct blk_integrity_profile *profile;
+ unsigned char flags;
+ unsigned char tuple_size;
+ unsigned char interval_exp;
+ unsigned char tag_size;
+};
+struct gendisk {
/*
- * The hash is used inside the scheduler, and killed once the
- * request reaches the dispatch list. The ipi_list is only used
- * to queue the request for softirq completion, which is long
- * after the request has been unhashed (and even removed from
- * the dispatch list).
+ * major/first_minor/minors should not be set by any new driver, the
+ * block core will take care of allocating them automatically.
*/
- union {
- struct hlist_node hash; /* merge hash */
- struct list_head ipi_list;
- };
+ int major;
+ int first_minor;
+ int minors;
- /*
- * The rb_node is only used inside the io scheduler, requests
- * are pruned when moved to the dispatch queue. So let the
- * completion_data share space with the rb_node.
- */
- union {
- struct rb_node rb_node; /* sort/lookup */
- struct bio_vec special_vec;
- void *completion_data;
- int error_count; /* for legacy drivers, don't use */
- };
+ char disk_name[DISK_NAME_LEN]; /* name of major driver */
- /*
- * Three pointers are available for the IO schedulers, if they need
- * more they have to dynamically allocate it. Flush requests are
- * never put on the IO scheduler. So let the flush fields share
- * space with the elevator data.
- */
- union {
- struct {
- struct io_cq *icq;
- void *priv[2];
- } elv;
-
- struct {
- unsigned int seq;
- struct list_head list;
- rq_end_io_fn *saved_end_io;
- } flush;
- };
-
- struct gendisk *rq_disk;
- struct hd_struct *part;
-#ifdef CONFIG_BLK_RQ_ALLOC_TIME
- /* Time that the first bio started allocating this request. */
- u64 alloc_time_ns;
-#endif
- /* Time that this request was allocated for this IO. */
- u64 start_time_ns;
- /* Time that I/O was submitted to the device. */
- u64 io_start_time_ns;
+ unsigned short events; /* supported events */
+ unsigned short event_flags; /* flags related to event processing */
-#ifdef CONFIG_BLK_WBT
- unsigned short wbt_flags;
-#endif
- /*
- * rq sectors used for blk stats. It has the same value
- * with blk_rq_sectors(rq), except that it never be zeroed
- * by completion.
- */
- unsigned short stats_sectors;
+ struct xarray part_tbl;
+ struct block_device *part0;
- /*
- * Number of scatter-gather DMA addr+len pairs after
- * physical address coalescing is performed.
- */
- unsigned short nr_phys_segments;
+ const struct block_device_operations *fops;
+ struct request_queue *queue;
+ void *private_data;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- unsigned short nr_integrity_segments;
-#endif
+ struct bio_set bio_split;
- unsigned short write_hint;
- unsigned short ioprio;
+ int flags;
+ unsigned long state;
+#define GD_NEED_PART_SCAN 0
+#define GD_READ_ONLY 1
+#define GD_DEAD 2
+#define GD_NATIVE_CAPACITY 3
+#define GD_ADDED 4
+#define GD_SUPPRESS_PART_SCAN 5
+#define GD_OWNS_QUEUE 6
- unsigned int extra_len; /* length of alignment and padding */
+ struct mutex open_mutex; /* open/close mutex */
+ unsigned open_partitions; /* number of open partitions */
- enum mq_rq_state state;
- refcount_t ref;
+ struct backing_dev_info *bdi;
+ struct kobject *slave_dir;
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+ struct list_head slave_bdevs;
+#endif
+ struct timer_rand_state *random;
+ atomic_t sync_io; /* RAID */
+ struct disk_events *ev;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct kobject integrity_kobj;
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
- unsigned int timeout;
- unsigned long deadline;
+#ifdef CONFIG_BLK_DEV_ZONED
+ /*
+ * Zoned block device information for request dispatch control.
+ * nr_zones is the total number of zones of the device. This is always
+ * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
+ * bits which indicates if a zone is conventional (bit set) or
+ * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
+ * bits which indicates if a zone is write locked, that is, if a write
+ * request targeting the zone was dispatched.
+ *
+ * Reads of this information must be protected with blk_queue_enter() /
+ * blk_queue_exit(). Modifying this information is only allowed while
+ * no requests are being processed. See also blk_mq_freeze_queue() and
+ * blk_mq_unfreeze_queue().
+ */
+ unsigned int nr_zones;
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
+ unsigned long *conv_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+#endif /* CONFIG_BLK_DEV_ZONED */
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
+#if IS_ENABLED(CONFIG_CDROM)
+ struct cdrom_device_info *cdi;
+#endif
+ int node_id;
+ struct badblocks *bb;
+ struct lockdep_map lockdep_map;
+ u64 diskseq;
/*
- * completion callback.
+ * Independent sector access ranges. This is always NULL for
+ * devices that do not have multiple independent access ranges.
*/
- rq_end_io_fn *end_io;
- void *end_io_data;
+ struct blk_independent_access_ranges *ia_ranges;
};
-static inline bool blk_op_is_scsi(unsigned int op)
+static inline bool disk_live(struct gendisk *disk)
{
- return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
+ return !inode_unhashed(disk->part0->bd_inode);
}
-static inline bool blk_op_is_private(unsigned int op)
+/**
+ * disk_openers - returns how many openers are there for a disk
+ * @disk: disk to check
+ *
+ * This returns the number of openers for a disk. Note that this value is only
+ * stable if disk->open_mutex is held.
+ *
+ * Note: Due to a quirk in the block layer open code, each open partition is
+ * only counted once even if there are multiple openers.
+ */
+static inline unsigned int disk_openers(struct gendisk *disk)
{
- return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
+ return atomic_read(&disk->part0->bd_openers);
}
-static inline bool blk_rq_is_scsi(struct request *rq)
-{
- return blk_op_is_scsi(req_op(rq));
-}
+/*
+ * The gendisk is refcounted by the part0 block_device, and the bd_device
+ * therein is also used for device model presentation in sysfs.
+ */
+#define dev_to_disk(device) \
+ (dev_to_bdev(device)->bd_disk)
+#define disk_to_dev(disk) \
+ (&((disk)->part0->bd_device))
-static inline bool blk_rq_is_private(struct request *rq)
-{
- return blk_op_is_private(req_op(rq));
-}
+#if IS_REACHABLE(CONFIG_CDROM)
+#define disk_to_cdi(disk) ((disk)->cdi)
+#else
+#define disk_to_cdi(disk) NULL
+#endif
-static inline bool blk_rq_is_passthrough(struct request *rq)
+static inline dev_t disk_devt(struct gendisk *disk)
{
- return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
+ return MKDEV(disk->major, disk->first_minor);
}
-static inline bool bio_is_passthrough(struct bio *bio)
+static inline int blk_validate_block_size(unsigned long bsize)
{
- unsigned op = bio_op(bio);
+ if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ return -EINVAL;
- return blk_op_is_scsi(op) || blk_op_is_private(op);
+ return 0;
}
-static inline unsigned short req_get_ioprio(struct request *req)
+static inline bool blk_op_is_passthrough(blk_opf_t op)
{
- return req->ioprio;
+ op &= REQ_OP_MASK;
+ return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
-#include <linux/elevator.h>
-
-struct blk_queue_ctx;
-
-typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-
-struct bio_vec;
-typedef int (dma_drain_needed_fn)(struct request *);
-
-enum blk_eh_timer_return {
- BLK_EH_DONE, /* drivers has completed the command */
- BLK_EH_RESET_TIMER, /* reset timer and try again */
-};
-
-enum blk_queue_state {
- Queue_down,
- Queue_up,
-};
-
-#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
-#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
-
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
/*
* Zoned block device models (zoned limit).
+ *
+ * Note: This needs to be ordered from the least to the most severe
+ * restrictions for the inheritance in blk_stack_limits() to work.
*/
enum blk_zoned_model {
- BLK_ZONED_NONE, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
+ BLK_ZONED_NONE = 0, /* Regular block device */
+ BLK_ZONED_HA, /* Host-aware zoned block device */
+ BLK_ZONED_HM, /* Host-managed zoned block device */
+};
+
+/*
+ * BLK_BOUNCE_NONE: never bounce (default)
+ * BLK_BOUNCE_HIGH: bounce all highmem pages
+ */
+enum blk_bounce {
+ BLK_BOUNCE_NONE,
+ BLK_BOUNCE_HIGH,
};
struct queue_limits {
- unsigned long bounce_pfn;
+ enum blk_bounce bounce;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
@@ -334,10 +296,12 @@ struct queue_limits {
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
- unsigned int max_write_same_sectors;
+ unsigned int max_secure_erase_sectors;
unsigned int max_write_zeroes_sectors;
+ unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int zone_write_granularity;
unsigned short max_segments;
unsigned short max_integrity_segments;
@@ -352,16 +316,19 @@ struct queue_limits {
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
+void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
+
#ifdef CONFIG_BLK_DEV_ZONED
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
-unsigned int blkdev_nr_zones(struct gendisk *disk);
-extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
+unsigned int bdev_nr_zones(struct block_device *bdev);
+extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
sector_t sectors, sector_t nr_sectors,
gfp_t gfp_mask);
-extern int blk_revalidate_disk_zones(struct gendisk *disk);
+int blk_revalidate_disk_zones(struct gendisk *disk,
+ void (*update_driver_data)(struct gendisk *disk));
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
@@ -370,7 +337,7 @@ extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{
return 0;
}
@@ -391,16 +358,42 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
#endif /* CONFIG_BLK_DEV_ZONED */
+/*
+ * Independent access ranges: struct blk_independent_access_range describes
+ * a range of contiguous sectors that can be accessed using device command
+ * execution resources that are independent from the resources used for
+ * other access ranges. This is typically found with single-LUN multi-actuator
+ * HDDs where each access range is served by a different set of heads.
+ * The set of independent ranges supported by the device is defined using
+ * struct blk_independent_access_ranges. The independent ranges must not overlap
+ * and must include all sectors within the disk capacity (no sector holes
+ * allowed).
+ * For a device with multiple ranges, requests targeting sectors in different
+ * ranges can be executed in parallel. A request can straddle an access range
+ * boundary.
+ */
+struct blk_independent_access_range {
+ struct kobject kobj;
+ sector_t sector;
+ sector_t nr_sectors;
+};
+
+struct blk_independent_access_ranges {
+ struct kobject kobj;
+ bool sysfs_registered;
+ unsigned int nr_ia_ranges;
+ struct blk_independent_access_range ia_range[];
+};
+
struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
+ struct percpu_ref q_usage_counter;
+
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
- make_request_fn *make_request_fn;
- dma_drain_needed_fn *dma_drain_needed;
-
const struct blk_mq_ops *mq_ops;
/* sw queues */
@@ -409,11 +402,9 @@ struct request_queue {
unsigned int queue_depth;
/* hw dispatch queues */
- struct blk_mq_hw_ctx **queue_hw_ctx;
+ struct xarray hctx_table;
unsigned int nr_hw_queues;
- struct backing_dev_info *backing_dev_info;
-
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
@@ -426,8 +417,7 @@ struct request_queue {
unsigned long queue_flags;
/*
* Number of contexts that have called blk_set_pm_only(). If this
- * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
- * processed.
+ * counter is above zero then only RQF_PM requests are processed.
*/
atomic_t pm_only;
@@ -437,13 +427,10 @@ struct request_queue {
*/
int id;
- /*
- * queue needs bounce pages for pages above this limit
- */
- gfp_t bounce_gfp;
-
spinlock_t queue_lock;
+ struct gendisk *disk;
+
/*
* queue kobject
*/
@@ -460,8 +447,7 @@ struct request_queue {
#ifdef CONFIG_PM
struct device *dev;
- int rpm_status;
- unsigned int nr_pending;
+ enum rpm_status rpm_status;
#endif
/*
@@ -469,20 +455,32 @@ struct request_queue {
*/
unsigned long nr_requests; /* Max # of requests */
- unsigned int dma_drain_size;
- void *dma_drain_buffer;
unsigned int dma_pad_mask;
+ /*
+ * Drivers that set dma_alignment to less than 511 must be prepared to
+ * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+ * due to possible offsets.
+ */
unsigned int dma_alignment;
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct blk_crypto_profile *crypto_profile;
+ struct kobject *crypto_kobject;
+#endif
+
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
- struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
+ struct blk_rq_stat *poll_stat;
struct timer_list timeout;
struct work_struct timeout_work;
+ atomic_t nr_active_requests_shared_tags;
+
+ struct blk_mq_tags *sched_shared_tags;
+
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
@@ -494,38 +492,9 @@ struct request_queue {
unsigned int required_elevator_features;
-#ifdef CONFIG_BLK_DEV_ZONED
- /*
- * Zoned block device information for request dispatch control.
- * nr_zones is the total number of zones of the device. This is always
- * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
- * bits which indicates if a zone is conventional (bit set) or
- * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
- * bits which indicates if a zone is write locked, that is, if a write
- * request targeting the zone was dispatched. All three fields are
- * initialized by the low level device driver (e.g. scsi/sd.c).
- * Stacking drivers (device mappers) may or may not initialize
- * these fields.
- *
- * Reads of this information must be protected with blk_queue_enter() /
- * blk_queue_exit(). Modifying this information is only allowed while
- * no requests are being processed. See also blk_mq_freeze_queue() and
- * blk_mq_unfreeze_queue().
- */
- unsigned int nr_zones;
- unsigned long *conv_zones_bitmap;
- unsigned long *seq_zones_wlock;
-#endif /* CONFIG_BLK_DEV_ZONED */
-
- /*
- * sg stuff
- */
- unsigned int sg_timeout;
- unsigned int sg_reserved_size;
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace __rcu *blk_trace;
- struct mutex blk_trace_mutex;
#endif
/*
* for flush operations
@@ -548,10 +517,6 @@ struct request_queue {
int mq_freeze_depth;
-#if defined(CONFIG_BLK_DEV_BSG)
- struct bsg_class_device bsg_dev;
-#endif
-
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
@@ -563,58 +528,61 @@ struct request_queue {
* percpu_ref_kill() and percpu_ref_reinit().
*/
struct mutex mq_freeze_lock;
- struct percpu_ref q_usage_counter;
+
+ int quiesce_depth;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
- struct bio_set bio_split;
-#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
-#endif
+ /*
+ * Serializes all debugfs metadata operations using the above dentries.
+ */
+ struct mutex debugfs_mutex;
bool mq_sysfs_init_done;
- size_t cmd_size;
-
- struct work_struct release_work;
-
-#define BLK_MAX_WRITE_HINTS 5
- u64 write_hints[BLK_MAX_WRITE_HINTS];
+ /**
+ * @srcu: Sleepable RCU. Use as lock when type of the request queue
+ * is blocking (BLK_MQ_F_BLOCKING). Must be the last member
+ */
+ struct srcu_struct srcu[];
};
+/* Keep blk_queue_flag_name[] in sync with the definitions below */
#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
#define QUEUE_FLAG_DYING 1 /* queue being torn down */
+#define QUEUE_FLAG_HAS_SRCU 2 /* SRCU is allocated */
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
-#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
-#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
+#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
#define QUEUE_FLAG_WC 17 /* Write back caching */
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
#define QUEUE_FLAG_DAX 19 /* device supports DAX */
#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
-#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
-#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
+#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
+#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
+#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
-#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP))
+#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
+ (1UL << QUEUE_FLAG_SAME_COMP) | \
+ (1UL << QUEUE_FLAG_NOWAIT))
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
@@ -622,22 +590,19 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
-#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+#define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_stable_writes(q) \
+ test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_zone_resetall(q) \
test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
-#define blk_queue_secure_erase(q) \
- (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
-#define blk_queue_scsi_passthrough(q) \
- test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
#define blk_queue_pci_p2pdma(q) \
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
@@ -652,24 +617,14 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
-#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
+#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
-static inline bool blk_account_rq(struct request *rq)
-{
- return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
-}
-
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
-
-#define rq_dma_dir(rq) \
- (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
-
#define dma_map_bvec(dev, bv, dir, attrs) \
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
(dir), (attrs))
@@ -679,10 +634,24 @@ static inline bool queue_is_mq(struct request_queue *q)
return q->mq_ops;
}
+#ifdef CONFIG_PM
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
+{
+ return q->rpm_status;
+}
+#else
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
+{
+ return RPM_ACTIVE;
+}
+#endif
+
static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q)
{
- return q->limits.zoned;
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return q->limits.zoned;
+ return BLK_ZONED_NONE;
}
static inline bool blk_queue_is_zoned(struct request_queue *q)
@@ -696,73 +665,73 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}
-static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
-{
- return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
-}
-
#ifdef CONFIG_BLK_DEV_ZONED
-static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
- return blk_queue_is_zoned(q) ? q->nr_zones : 0;
+ return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
}
-static inline unsigned int blk_queue_zone_no(struct request_queue *q,
- sector_t sector)
+static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
- if (!blk_queue_is_zoned(q))
+ if (!blk_queue_is_zoned(disk->queue))
return 0;
- return sector >> ilog2(q->limits.chunk_sectors);
+ return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline bool blk_queue_zone_is_seq(struct request_queue *q,
- sector_t sector)
+static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
{
- if (!blk_queue_is_zoned(q))
+ if (!blk_queue_is_zoned(disk->queue))
return false;
- if (!q->conv_zones_bitmap)
+ if (!disk->conv_zones_bitmap)
return true;
- return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
+ return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
}
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+
+static inline void disk_set_max_open_zones(struct gendisk *disk,
+ unsigned int max_open_zones)
{
- return 0;
+ disk->max_open_zones = max_open_zones;
}
-#endif /* CONFIG_BLK_DEV_ZONED */
-static inline bool rq_is_sync(struct request *rq)
+static inline void disk_set_max_active_zones(struct gendisk *disk,
+ unsigned int max_active_zones)
{
- return op_is_sync(rq->cmd_flags);
+ disk->max_active_zones = max_active_zones;
}
-static inline bool rq_mergeable(struct request *rq)
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
- if (blk_rq_is_passthrough(rq))
- return false;
-
- if (req_op(rq) == REQ_OP_FLUSH)
- return false;
-
- if (req_op(rq) == REQ_OP_WRITE_ZEROES)
- return false;
-
- if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
- return false;
- if (rq->rq_flags & RQF_NOMERGE_FLAGS)
- return false;
-
- return true;
+ return bdev->bd_disk->max_open_zones;
}
-static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
- if (bio_page(a) == bio_page(b) &&
- bio_offset(a) == bio_offset(b))
- return true;
+ return bdev->bd_disk->max_active_zones;
+}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return 0;
+}
+static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
+{
return false;
}
+static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
+{
+ return 0;
+}
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
+{
+ return 0;
+}
+
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
+{
+ return 0;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_depth(struct request_queue *q)
{
@@ -772,315 +741,204 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
return q->nr_requests;
}
-extern unsigned long blk_max_low_pfn, blk_max_pfn;
-
-/*
- * standard bounce addresses:
- *
- * BLK_BOUNCE_HIGH : bounce all highmem pages
- * BLK_BOUNCE_ANY : don't bounce anything
- * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
- */
-
-#if BITS_PER_LONG == 32
-#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
-#else
-#define BLK_BOUNCE_HIGH -1ULL
-#endif
-#define BLK_BOUNCE_ANY (-1ULL)
-#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
-
/*
* default timeout for SG_IO if none specified
*/
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
-struct rq_map_data {
- struct page **pages;
- int page_order;
- int nr_entries;
- unsigned long offset;
- int null_mapped;
- int from_user;
-};
-
-struct req_iterator {
- struct bvec_iter iter;
- struct bio *bio;
-};
-
/* This should not be used directly - use rq_for_each_segment */
#define for_each_bio(_bio) \
for (; _bio; _bio = _bio->bi_next)
-#define __rq_for_each_bio(_bio, rq) \
- if ((rq->bio)) \
- for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
-
-#define rq_for_each_segment(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_segment(bvl, _iter.bio, _iter.iter)
-
-#define rq_for_each_bvec(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
-
-#define rq_iter_last(bvec, _iter) \
- (_iter.bio->bi_next == NULL && \
- bio_iter_last(bvec, _iter.iter))
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-extern void rq_flush_dcache_pages(struct request *rq);
-#else
-static inline void rq_flush_dcache_pages(struct request *rq)
-{
-}
-#endif
-
-extern int blk_register_queue(struct gendisk *disk);
-extern void blk_unregister_queue(struct gendisk *disk);
-extern blk_qc_t generic_make_request(struct bio *bio);
-extern blk_qc_t direct_make_request(struct bio *bio);
-extern void blk_rq_init(struct request_queue *q, struct request *rq);
-extern void blk_put_request(struct request *);
-extern struct request *blk_get_request(struct request_queue *, unsigned int op,
- blk_mq_req_flags_t flags);
-extern int blk_lld_busy(struct request_queue *q);
-extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data);
-extern void blk_rq_unprep_clone(struct request *rq);
-extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
- struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
-extern void blk_queue_split(struct request_queue *, struct bio **);
-extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
-extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
- unsigned int, void __user *);
-extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- unsigned int, void __user *);
-extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- struct scsi_ioctl_command __user *);
-extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
-extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
-
-extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
-extern void blk_queue_exit(struct request_queue *q);
-extern void blk_sync_queue(struct request_queue *q);
-extern int blk_rq_map_user(struct request_queue *, struct request *,
- struct rq_map_data *, void __user *, unsigned long,
- gfp_t);
-extern int blk_rq_unmap_user(struct bio *);
-extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct iov_iter *,
- gfp_t);
-extern void blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
-extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
- struct request *, int, rq_end_io_fn *);
-
-/* Helper to convert REQ_OP_XXX to its string format XXX */
-extern const char *blk_op_str(unsigned int op);
-int blk_status_to_errno(blk_status_t status);
-blk_status_t errno_to_blk_status(int errno);
-
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
-
-static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
+static inline int __must_check add_disk(struct gendisk *disk)
{
- return bdev->bd_disk->queue; /* this is never NULL */
+ return device_add_disk(NULL, disk, NULL);
}
+void del_gendisk(struct gendisk *gp);
+void invalidate_disk(struct gendisk *disk);
+void set_disk_ro(struct gendisk *disk, bool read_only);
+void disk_uevent(struct gendisk *disk, enum kobject_action action);
-/*
- * The basic unit of block I/O is a sector. It is used in a number of contexts
- * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
- * bytes. Variables of type sector_t represent an offset or size that is a
- * multiple of 512 bytes. Hence these two constants.
- */
-#ifndef SECTOR_SHIFT
-#define SECTOR_SHIFT 9
-#endif
-#ifndef SECTOR_SIZE
-#define SECTOR_SIZE (1 << SECTOR_SHIFT)
-#endif
-
-/*
- * blk_rq_pos() : the current sector
- * blk_rq_bytes() : bytes left in the entire request
- * blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_err_bytes() : bytes left till the next error boundary
- * blk_rq_sectors() : sectors left in the entire request
- * blk_rq_cur_sectors() : sectors left in the current segment
- * blk_rq_stats_sectors() : sectors of the entire request used for stats
- */
-static inline sector_t blk_rq_pos(const struct request *rq)
+static inline int get_disk_ro(struct gendisk *disk)
{
- return rq->__sector;
+ return disk->part0->bd_read_only ||
+ test_bit(GD_READ_ONLY, &disk->state);
}
-static inline unsigned int blk_rq_bytes(const struct request *rq)
+static inline int bdev_read_only(struct block_device *bdev)
{
- return rq->__data_len;
+ return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
}
-static inline int blk_rq_cur_bytes(const struct request *rq)
-{
- return rq->bio ? bio_cur_bytes(rq->bio) : 0;
-}
+bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
+bool disk_force_media_change(struct gendisk *disk, unsigned int events);
-extern unsigned int blk_rq_err_bytes(const struct request *rq);
+void add_disk_randomness(struct gendisk *disk) __latent_entropy;
+void rand_initialize_disk(struct gendisk *disk);
-static inline unsigned int blk_rq_sectors(const struct request *rq)
+static inline sector_t get_start_sect(struct block_device *bdev)
{
- return blk_rq_bytes(rq) >> SECTOR_SHIFT;
+ return bdev->bd_start_sect;
}
-static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{
- return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+ return bdev->bd_nr_sectors;
}
-static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
+static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{
- return rq->stats_sectors;
+ return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
}
-#ifdef CONFIG_BLK_DEV_ZONED
-static inline unsigned int blk_rq_zone_no(struct request *rq)
+static inline sector_t get_capacity(struct gendisk *disk)
{
- return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
+ return bdev_nr_sectors(disk->part0);
}
-static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
{
- return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
+ return bdev_nr_sectors(sb->s_bdev) >>
+ (sb->s_blocksize_bits - SECTOR_SHIFT);
}
-#endif /* CONFIG_BLK_DEV_ZONED */
-/*
- * Some commands like WRITE SAME have a payload or data transfer size which
- * is different from the size of the request. Any driver that supports such
- * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
- * calculate the data transfer size.
+int bdev_disk_changed(struct gendisk *disk, bool invalidate);
+
+void put_disk(struct gendisk *disk);
+struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
+
+/**
+ * blk_alloc_disk - allocate a gendisk structure
+ * @node_id: numa node to allocate on
+ *
+ * Allocate and pre-initialize a gendisk structure for use with BIO based
+ * drivers.
+ *
+ * Context: can sleep
*/
-static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+#define blk_alloc_disk(node_id) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_alloc_disk(node_id, &__key); \
+})
+
+int __register_blkdev(unsigned int major, const char *name,
+ void (*probe)(dev_t devt));
+#define register_blkdev(major, name) \
+ __register_blkdev(major, name, NULL)
+void unregister_blkdev(unsigned int major, const char *name);
+
+bool bdev_check_media_change(struct block_device *bdev);
+int __invalidate_device(struct block_device *bdev, bool kill_dirty);
+void set_capacity(struct gendisk *disk, sector_t size);
+
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
+int bd_register_pending_holders(struct gendisk *disk);
+#else
+static inline int bd_link_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec.bv_len;
- return blk_rq_bytes(rq);
+ return 0;
}
-
-/*
- * Return the first full biovec in the request. The caller needs to check that
- * there are any bvecs before calling this helper.
- */
-static inline struct bio_vec req_bvec(struct request *rq)
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec;
- return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
}
-
-static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
- int op)
+static inline int bd_register_pending_holders(struct gendisk *disk)
{
- if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
- return min(q->limits.max_discard_sectors,
- UINT_MAX >> SECTOR_SHIFT);
-
- if (unlikely(op == REQ_OP_WRITE_SAME))
- return q->limits.max_write_same_sectors;
-
- if (unlikely(op == REQ_OP_WRITE_ZEROES))
- return q->limits.max_write_zeroes_sectors;
-
- return q->limits.max_sectors;
+ return 0;
}
+#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
-/*
- * Return maximum size of a request at given offset. Only valid for
- * file system requests.
- */
-static inline unsigned int blk_max_size_offset(struct request_queue *q,
- sector_t offset)
-{
- if (!q->limits.chunk_sectors)
- return q->limits.max_sectors;
+dev_t part_devt(struct gendisk *disk, u8 partno);
+void inc_diskseq(struct gendisk *disk);
+dev_t blk_lookup_devt(const char *name, int partno);
+void blk_request_module(dev_t devt);
- return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
- (offset & (q->limits.chunk_sectors - 1))));
-}
+extern int blk_register_queue(struct gendisk *disk);
+extern void blk_unregister_queue(struct gendisk *disk);
+void submit_bio_noacct(struct bio *bio);
+struct bio *bio_split_to_limits(struct bio *bio);
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
- sector_t offset)
-{
- struct request_queue *q = rq->q;
+extern int blk_lld_busy(struct request_queue *q);
+extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
+extern void blk_queue_exit(struct request_queue *q);
+extern void blk_sync_queue(struct request_queue *q);
- if (blk_rq_is_passthrough(rq))
- return q->limits.max_hw_sectors;
+/* Helper to convert REQ_OP_XXX to its string format XXX */
+extern const char *blk_op_str(enum req_op op);
- if (!q->limits.chunk_sectors ||
- req_op(rq) == REQ_OP_DISCARD ||
- req_op(rq) == REQ_OP_SECURE_ERASE)
- return blk_queue_get_max_sectors(q, req_op(rq));
+int blk_status_to_errno(blk_status_t status);
+blk_status_t errno_to_blk_status(int errno);
- return min(blk_max_size_offset(q, offset),
- blk_queue_get_max_sectors(q, req_op(rq)));
-}
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT (1 << 0)
+/* do not sleep to wait for the expected completion time */
+#define BLK_POLL_NOSLEEP (1 << 1)
+int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
+int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags);
-static inline unsigned int blk_rq_count_bios(struct request *rq)
+static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
- unsigned int nr_bios = 0;
- struct bio *bio;
+ return bdev->bd_queue; /* this is never NULL */
+}
- __rq_for_each_bio(bio, rq)
- nr_bios++;
+/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
+const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
- return nr_bios;
+static inline unsigned int bio_zone_no(struct bio *bio)
+{
+ return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
-void blk_steal_bios(struct bio_list *list, struct request *rq);
+static inline unsigned int bio_zone_is_seq(struct bio *bio)
+{
+ return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
+}
/*
- * Request completion related functions.
- *
- * blk_update_request() completes given number of bytes and updates
- * the request without completing it.
+ * Return how much of the chunk is left to be used for I/O at a given offset.
*/
-extern bool blk_update_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
-
-extern void __blk_complete_request(struct request *);
-extern void blk_abort_request(struct request *);
+static inline unsigned int blk_chunk_sectors_left(sector_t offset,
+ unsigned int chunk_sectors)
+{
+ if (unlikely(!is_power_of_2(chunk_sectors)))
+ return chunk_sectors - sector_div(offset, chunk_sectors);
+ return chunk_sectors - (offset & (chunk_sectors - 1));
+}
/*
* Access functions for manipulating queue properties
*/
-extern void blk_cleanup_queue(struct request_queue *);
-extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
-extern void blk_queue_bounce_limit(struct request_queue *, u64);
+void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_discard_segments(struct request_queue *,
unsigned short);
+void blk_queue_max_secure_erase_sectors(struct request_queue *q,
+ unsigned int max_sectors);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
-extern void blk_queue_max_write_same_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
+extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
+ unsigned int max_zone_append_sectors);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
+void blk_queue_zone_write_granularity(struct request_queue *q,
+ unsigned int size);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
+void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
@@ -1090,61 +948,38 @@ extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
-extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
- sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
-extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
-extern int blk_queue_dma_drain(struct request_queue *q,
- dma_drain_needed_fn *dma_drain_needed,
- void *buf, unsigned int size);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
-extern void blk_queue_required_elevator_features(struct request_queue *q,
- unsigned int features);
-extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
- struct device *dev);
-/*
- * Number of physical segments as sent to the device.
- *
- * Normally this is the number of discontiguous data segments sent by the
- * submitter. But for data-less command like discard we might have no
- * actual data segments submitted, but the driver might have to add it's
- * own special payload. In that case we still return 1 here so that this
- * special payload will be mapped.
- */
-static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
-{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return 1;
- return rq->nr_phys_segments;
-}
+struct blk_independent_access_ranges *
+disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
+void disk_set_independent_access_ranges(struct gendisk *disk,
+ struct blk_independent_access_ranges *iars);
/*
- * Number of discard segments (or ranges) the driver needs to fill in.
- * Each discard bio merged into a request is counted as one segment.
+ * Elevator features for blk_queue_required_elevator_features:
*/
-static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
-{
- return max_t(unsigned short, rq->nr_phys_segments, 1);
-}
+/* Supports zoned block devices sequential write constraint */
+#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
-extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
-extern void blk_dump_rq_flags(struct request *, char *);
-extern long nr_blockdev_pages(void);
+extern void blk_queue_required_elevator_features(struct request_queue *q,
+ unsigned int features);
+extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
+ struct device *dev);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
extern void blk_put_queue(struct request_queue *);
-extern void blk_set_queue_dying(struct request_queue *);
+void blk_mark_disk_dead(struct gendisk *disk);
+
+#ifdef CONFIG_BLOCK
/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
@@ -1153,18 +988,24 @@ extern void blk_set_queue_dying(struct request_queue *);
* as the lock contention for request_queue lock is reduced.
*
* It is ok not to disable preemption when adding the request to the plug list
- * or when attempting a merge, because blk_schedule_flush_list() will only flush
- * the plug list when the task sleeps by itself. For details, please see
- * schedule() where blk_schedule_flush_plug() is called.
+ * or when attempting a merge. For details, please see schedule() where
+ * blk_flush_plug() is called.
*/
struct blk_plug {
- struct list_head mq_list; /* blk-mq requests */
- struct list_head cb_list; /* md requires an unplug callback */
+ struct request *mq_list; /* blk-mq requests */
+
+ /* if ios_left is > 1, we can batch tag/rq allocations */
+ struct request *cached_rq;
+ unsigned short nr_ios;
+
unsigned short rq_count;
+
bool multiple_queues;
+ bool has_elevator;
+ bool nowait;
+
+ struct list_head cb_list; /* md requires an unplug callback */
};
-#define BLK_MAX_REQUEST_COUNT 16
-#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
struct blk_plug_cb;
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
@@ -1176,45 +1017,58 @@ struct blk_plug_cb {
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size);
extern void blk_start_plug(struct blk_plug *);
+extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
-extern void blk_flush_plug_list(struct blk_plug *, bool);
-static inline void blk_flush_plug(struct task_struct *tsk)
+void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
- struct blk_plug *plug = tsk->plug;
-
if (plug)
- blk_flush_plug_list(plug, false);
+ __blk_flush_plug(plug, async);
}
-static inline void blk_schedule_flush_plug(struct task_struct *tsk)
+int blkdev_issue_flush(struct block_device *bdev);
+long nr_blockdev_pages(void);
+#else /* CONFIG_BLOCK */
+struct blk_plug {
+};
+
+static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
+ unsigned short nr_ios)
{
- struct blk_plug *plug = tsk->plug;
+}
- if (plug)
- blk_flush_plug_list(plug, true);
+static inline void blk_start_plug(struct blk_plug *plug)
+{
}
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+static inline void blk_finish_plug(struct blk_plug *plug)
{
- struct blk_plug *plug = tsk->plug;
+}
- return plug &&
- (!list_empty(&plug->mq_list) ||
- !list_empty(&plug->cb_list));
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
+{
}
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+static inline int blkdev_issue_flush(struct block_device *bdev)
+{
+ return 0;
+}
-#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
+static inline long nr_blockdev_pages(void)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
-extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
-extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int flags,
- struct bio **biop);
+extern void blk_io_schedule(void);
+
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
+int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp);
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
@@ -1233,7 +1087,7 @@ static inline int sb_issue_discard(struct super_block *sb, sector_t block,
SECTOR_SHIFT),
nr_blocks << (sb->s_blocksize_bits -
SECTOR_SHIFT),
- gfp_mask, flags);
+ gfp_mask);
}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
@@ -1246,7 +1100,10 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
gfp_mask, 0);
}
-extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
+static inline bool bdev_is_partition(struct block_device *bdev)
+{
+ return bdev->bd_partno;
+}
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
@@ -1271,6 +1128,11 @@ static inline unsigned int queue_max_sectors(const struct request_queue *q)
return q->limits.max_sectors;
}
+static inline unsigned int queue_max_bytes(struct request_queue *q)
+{
+ return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
+}
+
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{
return q->limits.max_hw_sectors;
@@ -1291,6 +1153,25 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
+static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
+{
+
+ const struct queue_limits *l = &q->limits;
+
+ return min(l->max_zone_append_sectors, l->max_sectors);
+}
+
+static inline unsigned int
+bdev_max_zone_append_sectors(struct block_device *bdev)
+{
+ return queue_max_zone_append_sectors(bdev_get_queue(bdev));
+}
+
+static inline unsigned int bdev_max_segments(struct block_device *bdev)
+{
+ return queue_max_segments(bdev_get_queue(bdev));
+}
+
static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;
@@ -1336,95 +1217,71 @@ static inline int bdev_io_opt(struct block_device *bdev)
return queue_io_opt(bdev_get_queue(bdev));
}
-static inline int queue_alignment_offset(const struct request_queue *q)
+static inline unsigned int
+queue_zone_write_granularity(const struct request_queue *q)
{
- if (q->limits.misaligned)
- return -1;
-
- return q->limits.alignment_offset;
+ return q->limits.zone_write_granularity;
}
-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+static inline unsigned int
+bdev_zone_write_granularity(struct block_device *bdev)
{
- unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
- << SECTOR_SHIFT;
-
- return (granularity + lim->alignment_offset - alignment) % granularity;
+ return queue_zone_write_granularity(bdev_get_queue(bdev));
}
-static inline int bdev_alignment_offset(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q->limits.misaligned)
- return -1;
-
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->alignment_offset;
+int bdev_alignment_offset(struct block_device *bdev);
+unsigned int bdev_discard_alignment(struct block_device *bdev);
- return q->limits.alignment_offset;
+static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
+{
+ return bdev_get_queue(bdev)->limits.max_discard_sectors;
}
-static inline int queue_discard_alignment(const struct request_queue *q)
+static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- if (q->limits.discard_misaligned)
- return -1;
-
- return q->limits.discard_alignment;
+ return bdev_get_queue(bdev)->limits.discard_granularity;
}
-static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
+static inline unsigned int
+bdev_max_secure_erase_sectors(struct block_device *bdev)
{
- unsigned int alignment, granularity, offset;
-
- if (!lim->max_discard_sectors)
- return 0;
-
- /* Why are these in bytes, not sectors? */
- alignment = lim->discard_alignment >> SECTOR_SHIFT;
- granularity = lim->discard_granularity >> SECTOR_SHIFT;
- if (!granularity)
- return 0;
-
- /* Offset of the partition start in 'granularity' sectors */
- offset = sector_div(sector, granularity);
-
- /* And why do we do this modulus *again* in blkdev_issue_discard()? */
- offset = (granularity + alignment - offset) % granularity;
-
- /* Turn it back into bytes, gaah */
- return offset << SECTOR_SHIFT;
+ return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
}
-static inline int bdev_discard_alignment(struct block_device *bdev)
+static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->discard_alignment;
+ if (q)
+ return q->limits.max_write_zeroes_sectors;
- return q->limits.discard_alignment;
+ return 0;
}
-static inline unsigned int bdev_write_same(struct block_device *bdev)
+static inline bool bdev_nonrot(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_same_sectors;
+ return blk_queue_nonrot(bdev_get_queue(bdev));
+}
- return 0;
+static inline bool bdev_stable_writes(struct block_device *bdev)
+{
+ return test_bit(QUEUE_FLAG_STABLE_WRITES,
+ &bdev_get_queue(bdev)->queue_flags);
}
-static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
+static inline bool bdev_write_cache(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
+ return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
+}
- if (q)
- return q->limits.max_write_zeroes_sectors;
+static inline bool bdev_fua(struct block_device *bdev)
+{
+ return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
+}
- return 0;
+static inline bool bdev_nowait(struct block_device *bdev)
+{
+ return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
}
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
@@ -1447,13 +1304,22 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false;
}
+static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
+ blk_opf_t op)
+{
+ if (!bdev_is_zoned(bdev))
+ return false;
+
+ return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
+}
+
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
- if (q)
- return blk_queue_zone_sectors(q);
- return 0;
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return q->limits.chunk_sectors;
}
static inline int queue_dma_alignment(const struct request_queue *q)
@@ -1461,6 +1327,18 @@ static inline int queue_dma_alignment(const struct request_queue *q)
return q ? q->dma_alignment : 511;
}
+static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
+{
+ return queue_dma_alignment(bdev_get_queue(bdev));
+}
+
+static inline bool bdev_iter_is_aligned(struct block_device *bdev,
+ struct iov_iter *iter)
+{
+ return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
+ bdev_logical_block_size(bdev) - 1);
+}
+
static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
unsigned int len)
{
@@ -1481,16 +1359,7 @@ static inline unsigned int blksize_bits(unsigned int size)
static inline unsigned int block_size(struct block_device *bdev)
{
- return bdev->bd_block_size;
-}
-
-typedef struct {struct page *v;} Sector;
-
-unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
-
-static inline void put_dev_sector(Sector p)
-{
- put_page(p.v);
+ return 1 << bdev->bd_inode->i_blkbits;
}
int kblockd_schedule_work(struct work_struct *work);
@@ -1501,213 +1370,62 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned lo
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
MODULE_ALIAS("block-major-" __stringify(major) "-*")
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-enum blk_integrity_flags {
- BLK_INTEGRITY_VERIFY = 1 << 0,
- BLK_INTEGRITY_GENERATE = 1 << 1,
- BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
- BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
-};
-
-struct blk_integrity_iter {
- void *prot_buf;
- void *data_buf;
- sector_t seed;
- unsigned int data_size;
- unsigned short interval;
- const char *disk_name;
-};
-
-typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
-typedef void (integrity_prepare_fn) (struct request *);
-typedef void (integrity_complete_fn) (struct request *, unsigned int);
-
-struct blk_integrity_profile {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
- integrity_prepare_fn *prepare_fn;
- integrity_complete_fn *complete_fn;
- const char *name;
-};
-
-extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
-extern void blk_integrity_unregister(struct gendisk *);
-extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
-extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
-extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
- struct request *);
-extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
- struct bio *);
-
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
-{
- struct blk_integrity *bi = &disk->queue->integrity;
-
- if (!bi->profile)
- return NULL;
-
- return bi;
-}
-
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
-{
- return blk_get_integrity(bdev->bd_disk);
-}
-
-static inline bool blk_integrity_rq(struct request *rq)
-{
- return rq->cmd_flags & REQ_INTEGRITY;
-}
-
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
- q->limits.max_integrity_segments = segs;
-}
-
-static inline unsigned short
-queue_max_integrity_segments(const struct request_queue *q)
-{
- return q->limits.max_integrity_segments;
-}
-
-/**
- * bio_integrity_intervals - Return number of integrity intervals for a bio
- * @bi: blk_integrity profile for device
- * @sectors: Size of the bio in 512-byte sectors
- *
- * Description: The block layer calculates everything in 512 byte
- * sectors but integrity metadata is done in terms of the data integrity
- * interval size of the storage device. Convert the block layer sectors
- * to the appropriate number of integrity intervals.
- */
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return sectors >> (bi->interval_exp - 9);
-}
-
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
-}
-
-/*
- * Return the first bvec that contains integrity data. Only drivers that are
- * limited to a single integrity segment should use this helper.
- */
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
-{
- if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
- return NULL;
- return rq->bio->bi_integrity->bip_vec;
-}
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-#else /* CONFIG_BLK_DEV_INTEGRITY */
+bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q);
-struct bio;
-struct block_device;
-struct gendisk;
-struct blk_integrity;
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static inline int blk_integrity_rq(struct request *rq)
-{
- return 0;
-}
-static inline int blk_rq_count_integrity_sg(struct request_queue *q,
- struct bio *b)
-{
- return 0;
-}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
- struct scatterlist *s)
-{
- return 0;
-}
-static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
-{
- return NULL;
-}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
-{
- return NULL;
-}
-static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
-{
- return 0;
-}
-static inline void blk_integrity_register(struct gendisk *d,
- struct blk_integrity *b)
-{
-}
-static inline void blk_integrity_unregister(struct gendisk *d)
-{
-}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
-}
-static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
-{
- return 0;
-}
-static inline bool blk_integrity_merge_rq(struct request_queue *rq,
- struct request *r1,
- struct request *r2)
+static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q)
{
return true;
}
-static inline bool blk_integrity_merge_bio(struct request_queue *rq,
- struct request *r,
- struct bio *b)
-{
- return true;
-}
-
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return 0;
-}
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return 0;
-}
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
-{
- return NULL;
-}
+enum blk_unique_id {
+ /* these match the Designator Types specified in SPC */
+ BLK_UID_T10 = 1,
+ BLK_UID_EUI64 = 2,
+ BLK_UID_NAA = 3,
+};
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+#define NFL4_UFLG_MASK 0x0000003F
struct block_device_operations {
+ void (*submit_bio)(struct bio *bio);
+ int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
+ unsigned int flags);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, enum req_op);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
- /* ->media_changed() is DEPRECATED, use ->check_events() instead */
- int (*media_changed) (struct gendisk *);
void (*unlock_native_capacity) (struct gendisk *);
- int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
+ int (*set_read_only)(struct block_device *bdev, bool ro);
+ void (*free_disk)(struct gendisk *disk);
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
+ char *(*devnode)(struct gendisk *disk, umode_t *mode);
+ /* returns the length of the identifier or a negative errno: */
+ int (*get_unique_id)(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id id_type);
struct module *owner;
const struct pr_ops *pr_ops;
+
+ /*
+ * Special callback for probing GPT entry at a given sector.
+ * Needed by Android devices, used by GPT scanner and MMC blk
+ * driver.
+ */
+ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
};
#ifdef CONFIG_COMPAT
@@ -1717,123 +1435,118 @@ extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
#define blkdev_compat_ptr_ioctl NULL
#endif
-extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
- unsigned long);
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
-#ifdef CONFIG_BLK_DEV_ZONED
-bool blk_req_needs_zone_write_lock(struct request *rq);
-void __blk_req_zone_write_lock(struct request *rq);
-void __blk_req_zone_write_unlock(struct request *rq);
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
- if (blk_req_needs_zone_write_lock(rq))
- __blk_req_zone_write_lock(rq);
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
+static inline void blk_wake_io_task(struct task_struct *waiter)
{
- if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
- __blk_req_zone_write_unlock(rq);
+ /*
+ * If we're polling, the task itself is doing the completions. For
+ * that case, we don't need to signal a wakeup, it's enough to just
+ * mark us as RUNNING.
+ */
+ if (waiter == current)
+ __set_current_state(TASK_RUNNING);
+ else
+ wake_up_process(waiter);
}
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return rq->q->seq_zones_wlock &&
- test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
-}
+unsigned long bdev_start_io_acct(struct block_device *bdev,
+ unsigned int sectors, enum req_op op,
+ unsigned long start_time);
+void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned long start_time);
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- if (!blk_req_needs_zone_write_lock(rq))
- return true;
- return !blk_req_zone_is_write_locked(rq);
-}
-#else
-static inline bool blk_req_needs_zone_write_lock(struct request *rq)
-{
- return false;
-}
+void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
+unsigned long bio_start_io_acct(struct bio *bio);
+void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
+ struct block_device *orig_bdev);
-static inline void blk_req_zone_write_lock(struct request *rq)
+/**
+ * bio_end_io_acct - end I/O accounting for bio based drivers
+ * @bio: bio to end account for
+ * @start_time: start time returned by bio_start_io_acct()
+ */
+static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
+ return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
-}
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return false;
-}
+int bdev_read_only(struct block_device *bdev);
+int set_blocksize(struct block_device *bdev, int size);
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- return true;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
+int lookup_bdev(const char *pathname, dev_t *dev);
-#else /* CONFIG_BLOCK */
+void blkdev_show(struct seq_file *seqf, off_t offset);
-struct block_device;
+#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
+#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
+#ifdef CONFIG_BLOCK
+#define BLKDEV_MAJOR_MAX 512
+#else
+#define BLKDEV_MAJOR_MAX 0
+#endif
-/*
- * stubs for when the block layer is configured out
- */
-#define buffer_heads_over_limit 0
+struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+ void *holder);
+struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
+int bd_prepare_to_claim(struct block_device *bdev, void *holder);
+void bd_abort_claiming(struct block_device *bdev, void *holder);
+void blkdev_put(struct block_device *bdev, fmode_t mode);
-static inline long nr_blockdev_pages(void)
-{
- return 0;
-}
+/* just for blk-cgroup, don't use elsewhere */
+struct block_device *blkdev_get_no_open(dev_t dev);
+void blkdev_put_no_open(struct block_device *bdev);
-struct blk_plug {
-};
+struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
+void bdev_add(struct block_device *bdev, dev_t dev);
+struct block_device *I_BDEV(struct inode *inode);
+int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
+ loff_t lend);
-static inline void blk_start_plug(struct blk_plug *plug)
+#ifdef CONFIG_BLOCK
+void invalidate_bdev(struct block_device *bdev);
+int sync_blockdev(struct block_device *bdev);
+int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
+int sync_blockdev_nowait(struct block_device *bdev);
+void sync_bdevs(bool wait);
+void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
+void printk_all_partitions(void);
+#else
+static inline void invalidate_bdev(struct block_device *bdev)
{
}
-
-static inline void blk_finish_plug(struct blk_plug *plug)
+static inline int sync_blockdev(struct block_device *bdev)
{
+ return 0;
}
-
-static inline void blk_flush_plug(struct task_struct *task)
+static inline int sync_blockdev_nowait(struct block_device *bdev)
{
+ return 0;
}
-
-static inline void blk_schedule_flush_plug(struct task_struct *task)
+static inline void sync_bdevs(bool wait)
{
}
-
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
{
- return false;
}
-
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
- sector_t *error_sector)
+static inline void printk_all_partitions(void)
{
- return 0;
}
-
#endif /* CONFIG_BLOCK */
-static inline void blk_wake_io_task(struct task_struct *waiter)
-{
- /*
- * If we're polling, the task itself is doing the completions. For
- * that case, we don't need to signal a wakeup, it's enough to just
- * mark us as RUNNING.
- */
- if (waiter == current)
- __set_current_state(TASK_RUNNING);
- else
- wake_up_process(waiter);
-}
+int fsync_bdev(struct block_device *bdev);
-#endif
+int freeze_bdev(struct block_device *bdev);
+int thaw_bdev(struct block_device *bdev);
+
+struct io_comp_batch {
+ struct request *req_list;
+ bool need_ts;
+ void (*complete)(struct io_comp_batch *);
+};
+
+#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
+
+#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3b6ff5902edc..cfbda114348c 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -2,11 +2,12 @@
#ifndef BLKTRACE_H
#define BLKTRACE_H
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/relay.h>
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
#include <linux/list.h>
+#include <linux/blk_types.h>
#if defined(CONFIG_BLK_DEV_IO_TRACE)
@@ -23,18 +24,14 @@ struct blk_trace {
u32 pid;
u32 dev;
struct dentry *dir;
- struct dentry *dropped_file;
- struct dentry *msg_file;
struct list_head running_list;
atomic_t dropped;
};
-struct blkcg;
-
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern __printf(3, 4)
-void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...);
+__printf(3, 4) void __blk_trace_note_message(struct blk_trace *bt,
+ struct cgroup_subsys_state *css, const char *fmt, ...);
/**
* blk_add_trace_msg - Add a (simple) message to the blktrace stream
@@ -49,14 +46,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
* NOTE: Can not use 'static inline' due to presence of var args...
*
**/
-#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
+#define blk_add_cgroup_trace_msg(q, css, fmt, ...) \
do { \
struct blk_trace *bt; \
\
rcu_read_lock(); \
bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
- __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
+ __blk_trace_note_message(bt, css, fmt, ##__VA_ARGS__);\
rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
@@ -75,34 +72,23 @@ static inline bool blk_trace_note_message_enabled(struct request_queue *q)
return ret;
}
-extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
- void *data, size_t len);
+extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct block_device *bdev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
-extern void blk_trace_remove_sysfs(struct device *dev);
-extern int blk_trace_init_sysfs(struct device *dev);
-
-extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
# define blk_trace_shutdown(q) do { } while (0)
-# define blk_add_driver_data(q, rq, data, len) do {} while (0)
+# define blk_add_driver_data(rq, data, len) do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
# define blk_trace_startstop(q, start) (-ENOTTY)
# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
-# define blk_trace_remove_sysfs(dev) do { } while (0)
# define blk_trace_note_message_enabled(q) (false)
-static inline int blk_trace_init_sysfs(struct device *dev)
-{
- return 0;
-}
-
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_COMPAT
@@ -120,7 +106,7 @@ struct compat_blk_user_trace_setup {
#endif
-extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
+void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{
diff --git a/include/linux/bma150.h b/include/linux/bma150.h
index 31c9e323a391..4d4a62d49341 100644
--- a/include/linux/bma150.h
+++ b/include/linux/bma150.h
@@ -33,8 +33,8 @@ struct bma150_cfg {
unsigned char lg_hyst; /* Low-G hysterisis */
unsigned char lg_dur; /* Low-G duration */
unsigned char lg_thres; /* Low-G threshold */
- unsigned char range; /* one of BMA0150_RANGE_xxx */
- unsigned char bandwidth; /* one of BMA0150_BW_xxx */
+ unsigned char range; /* one of BMA150_RANGE_xxx */
+ unsigned char bandwidth; /* one of BMA150_BW_xxx */
};
struct bma150_platform_data {
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index d11e183fcb54..1611f9db878e 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -7,18 +7,51 @@
* Author: Masami Hiramatsu <mhiramat@kernel.org>
*/
+#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/types.h>
+#else /* !__KERNEL__ */
+/*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+ * run the parser sanity test.
+ * This does NOT mean linux/bootconfig.h is available in the user space.
+ * However, if you change this file, please make sure the tools/bootconfig
+ * has no issue on building and running.
+ */
+#endif
#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
#define BOOTCONFIG_MAGIC_LEN 12
+#define BOOTCONFIG_ALIGN_SHIFT 2
+#define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT)
+#define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1)
+
+/**
+ * xbc_calc_checksum() - Calculate checksum of bootconfig
+ * @data: Bootconfig data.
+ * @size: The size of the bootconfig data.
+ *
+ * Calculate the checksum value of the bootconfig data.
+ * The checksum will be used with the BOOTCONFIG_MAGIC and the size for
+ * embedding the bootconfig in the initrd image.
+ */
+static inline __init uint32_t xbc_calc_checksum(void *data, uint32_t size)
+{
+ unsigned char *p = data;
+ uint32_t ret = 0;
+
+ while (size--)
+ ret += *p++;
+
+ return ret;
+}
/* XBC tree node */
struct xbc_node {
- u16 next;
- u16 child;
- u16 parent;
- u16 data;
+ uint16_t next;
+ uint16_t child;
+ uint16_t parent;
+ uint16_t data;
} __attribute__ ((__packed__));
#define XBC_KEY 0
@@ -68,7 +101,7 @@ static inline __init bool xbc_node_is_key(struct xbc_node *node)
*/
static inline __init bool xbc_node_is_array(struct xbc_node *node)
{
- return xbc_node_is_value(node) && node->next != 0;
+ return xbc_node_is_value(node) && node->child != 0;
}
/**
@@ -77,6 +110,8 @@ static inline __init bool xbc_node_is_array(struct xbc_node *node)
*
* Test the @node is a leaf key node which is a key node and has a value node
* or no child. Returns true if it is a leaf node, or false if not.
+ * Note that the leaf node can have subkey nodes in addition to the
+ * value node.
*/
static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
{
@@ -85,7 +120,7 @@ static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
}
/* Tree-based key-value access APIs */
-struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent,
+struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent,
const char *key);
const char * __init xbc_node_find_value(struct xbc_node *parent,
@@ -123,7 +158,24 @@ xbc_find_value(const char *key, struct xbc_node **vnode)
*/
static inline struct xbc_node * __init xbc_find_node(const char *key)
{
- return xbc_node_find_child(NULL, key);
+ return xbc_node_find_subkey(NULL, key);
+}
+
+/**
+ * xbc_node_get_subkey() - Return the first subkey node if exists
+ * @node: Parent node
+ *
+ * Return the first subkey node of the @node. If the @node has no child
+ * or only value node, this will return NULL.
+ */
+static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node)
+{
+ struct xbc_node *child = xbc_node_get_child(node);
+
+ if (child && xbc_node_is_value(child))
+ return xbc_node_get_next(child);
+ else
+ return child;
}
/**
@@ -137,7 +189,7 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
*/
#define xbc_array_for_each_value(anode, value) \
for (value = xbc_node_get_data(anode); anode != NULL ; \
- anode = xbc_node_get_next(anode), \
+ anode = xbc_node_get_child(anode), \
value = anode ? xbc_node_get_data(anode) : NULL)
/**
@@ -146,12 +198,25 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
* @child: Iterated XBC node.
*
* Iterate child nodes of @parent. Each child nodes are stored to @child.
+ * The @child can be mixture of a value node and subkey nodes.
*/
#define xbc_node_for_each_child(parent, child) \
for (child = xbc_node_get_child(parent); child != NULL ; \
child = xbc_node_get_next(child))
/**
+ * xbc_node_for_each_subkey() - Iterate child subkey nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate subkey nodes of @parent. Each child nodes are stored to @child.
+ * The @child is only the subkey node.
+ */
+#define xbc_node_for_each_subkey(parent, child) \
+ for (child = xbc_node_get_subkey(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
* xbc_node_for_each_array_value() - Iterate array entries of geven key
* @node: An XBC node.
* @key: A key string searched under @node
@@ -159,16 +224,16 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
* @value: Iterated value of array entry.
*
* Iterate array entries of given @key under @node. Each array entry node
- * is stroed to @anode and @value. If the @node doesn't have @key node,
+ * is stored to @anode and @value. If the @node doesn't have @key node,
* it does nothing.
* Note that even if the found key node has only one value (not array)
- * this executes block once. Hoever, if the found key node has no value
+ * this executes block once. However, if the found key node has no value
* (key-only node), this does nothing. So don't use this for testing the
* key-value pair existence.
*/
#define xbc_node_for_each_array_value(node, key, anode, value) \
for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
- anode = xbc_node_get_next(anode), \
+ anode = xbc_node_get_child(anode), \
value = anode ? xbc_node_get_data(anode) : NULL)
/**
@@ -216,12 +281,22 @@ static inline int __init xbc_node_compose_key(struct xbc_node *node,
}
/* XBC node initializer */
-int __init xbc_init(char *buf);
+int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
+
+/* XBC node and size information */
+int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
-void __init xbc_destroy_all(void);
+void __init xbc_exit(void);
-/* Debug dump functions */
-void __init xbc_debug_dump(void);
+/* XBC embedded bootconfig data in kernel */
+#ifdef CONFIG_BOOT_CONFIG_EMBED
+const char * __init xbc_get_embedded_bootconfig(size_t *size);
+#else
+static inline const char *xbc_get_embedded_bootconfig(size_t *size)
+{
+ return NULL;
+}
+#endif
#endif
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
new file mode 100644
index 000000000000..cc35d010fa94
--- /dev/null
+++ b/include/linux/bootmem_info.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BOOTMEM_INFO_H
+#define __LINUX_BOOTMEM_INFO_H
+
+#include <linux/mm.h>
+
+/*
+ * Types for free bootmem stored in page->lru.next. These have to be in
+ * some random range in unsigned long space for debugging purposes.
+ */
+enum {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+ SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
+ MIX_SECTION_INFO,
+ NODE_INFO,
+ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
+};
+
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
+void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
+
+void get_page_bootmem(unsigned long info, struct page *page,
+ unsigned long type);
+void put_page_bootmem(struct page *page);
+
+/*
+ * Any memory allocated via the memblock allocator and not via the
+ * buddy will be marked reserved already in the memmap. For those
+ * pages, we can call this function to free it to buddy allocator.
+ */
+static inline void free_bootmem_page(struct page *page)
+{
+ unsigned long magic = page->index;
+
+ /*
+ * The reserve_bootmem_region sets the reserved flag on bootmem
+ * pages.
+ */
+ VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
+
+ if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
+ put_page_bootmem(page);
+ else
+ VM_BUG_ON_PAGE(1, page);
+}
+#else
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
+static inline void put_page_bootmem(struct page *page)
+{
+}
+
+static inline void get_page_bootmem(unsigned long info, struct page *page,
+ unsigned long type)
+{
+}
+
+static inline void free_bootmem_page(struct page *page)
+{
+ free_reserved_page(page);
+}
+#endif
+
+#endif /* __LINUX_BOOTMEM_INFO_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index a19519f4241d..fc53e0ad56d9 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,9 +2,10 @@
#ifndef _LINUX_BH_H
#define _LINUX_BH_H
+#include <linux/instruction_pointer.h>
#include <linux/preempt.h>
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS)
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
@@ -32,4 +33,10 @@ static inline void local_bh_enable(void)
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
+#ifdef CONFIG_PREEMPT_RT
+extern bool local_bh_blocked(void);
+#else
+static inline bool local_bh_blocked(void) { return false; }
+#endif
+
#endif /* _LINUX_BH_H */
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
new file mode 100644
index 000000000000..7b121bd780eb
--- /dev/null
+++ b/include/linux/bpf-cgroup-defs.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_CGROUP_DEFS_H
+#define _BPF_CGROUP_DEFS_H
+
+#ifdef CONFIG_CGROUP_BPF
+
+#include <linux/list.h>
+#include <linux/percpu-refcount.h>
+#include <linux/workqueue.h>
+
+struct bpf_prog_array;
+
+#ifdef CONFIG_BPF_LSM
+/* Maximum number of concurrently attachable per-cgroup LSM hooks. */
+#define CGROUP_LSM_NUM 10
+#else
+#define CGROUP_LSM_NUM 0
+#endif
+
+enum cgroup_bpf_attach_type {
+ CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
+ CGROUP_INET_INGRESS = 0,
+ CGROUP_INET_EGRESS,
+ CGROUP_INET_SOCK_CREATE,
+ CGROUP_SOCK_OPS,
+ CGROUP_DEVICE,
+ CGROUP_INET4_BIND,
+ CGROUP_INET6_BIND,
+ CGROUP_INET4_CONNECT,
+ CGROUP_INET6_CONNECT,
+ CGROUP_INET4_POST_BIND,
+ CGROUP_INET6_POST_BIND,
+ CGROUP_UDP4_SENDMSG,
+ CGROUP_UDP6_SENDMSG,
+ CGROUP_SYSCTL,
+ CGROUP_UDP4_RECVMSG,
+ CGROUP_UDP6_RECVMSG,
+ CGROUP_GETSOCKOPT,
+ CGROUP_SETSOCKOPT,
+ CGROUP_INET4_GETPEERNAME,
+ CGROUP_INET6_GETPEERNAME,
+ CGROUP_INET4_GETSOCKNAME,
+ CGROUP_INET6_GETSOCKNAME,
+ CGROUP_INET_SOCK_RELEASE,
+ CGROUP_LSM_START,
+ CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1,
+ MAX_CGROUP_BPF_ATTACH_TYPE
+};
+
+struct cgroup_bpf {
+ /* array of effective progs in this cgroup */
+ struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* attached progs to this cgroup and attach flags
+ * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
+ * have either zero or one element
+ * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
+ */
+ struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* list of cgroup shared storages */
+ struct list_head storages;
+
+ /* temp storage for effective prog array used by prog_attach/detach */
+ struct bpf_prog_array *inactive;
+
+ /* reference counter used to detach bpf programs after cgroup removal */
+ struct percpu_ref refcnt;
+
+ /* cgroup_bpf is released using a work queue */
+ struct work_struct release_work;
+};
+
+#else /* CONFIG_CGROUP_BPF */
+struct cgroup_bpf {};
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a11d5b7dbbf3..57e9e109257e 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -3,11 +3,12 @@
#define _BPF_CGROUP_H
#include <linux/bpf.h>
+#include <linux/bpf-cgroup-defs.h>
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
-#include <linux/percpu-refcount.h>
#include <linux/rbtree.h>
+#include <net/sock.h>
#include <uapi/linux/bpf.h>
struct sock;
@@ -20,14 +21,56 @@ struct bpf_sock_ops_kern;
struct bpf_cgroup_storage;
struct ctl_table;
struct ctl_table_header;
+struct task_struct;
+
+unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
+ const struct bpf_insn *insn);
#ifdef CONFIG_CGROUP_BPF
-extern struct static_key_false cgroup_bpf_enabled_key;
-#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+#define CGROUP_ATYPE(type) \
+ case BPF_##type: return type
+
+static inline enum cgroup_bpf_attach_type
+to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ CGROUP_ATYPE(CGROUP_INET_INGRESS);
+ CGROUP_ATYPE(CGROUP_INET_EGRESS);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
+ CGROUP_ATYPE(CGROUP_SOCK_OPS);
+ CGROUP_ATYPE(CGROUP_DEVICE);
+ CGROUP_ATYPE(CGROUP_INET4_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_BIND);
+ CGROUP_ATYPE(CGROUP_INET4_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
+ CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+ CGROUP_ATYPE(CGROUP_SYSCTL);
+ CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+ CGROUP_ATYPE(CGROUP_GETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_SETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+ default:
+ return CGROUP_BPF_ATTACH_TYPE_INVALID;
+ }
+}
-DECLARE_PER_CPU(struct bpf_cgroup_storage*,
- bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
+#undef CGROUP_ATYPE
+
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
#define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
@@ -36,7 +79,7 @@ struct bpf_cgroup_storage_map;
struct bpf_storage_buffer {
struct rcu_head rcu;
- char data[0];
+ char data[];
};
struct bpf_cgroup_storage {
@@ -46,85 +89,52 @@ struct bpf_cgroup_storage {
};
struct bpf_cgroup_storage_map *map;
struct bpf_cgroup_storage_key key;
- struct list_head list;
+ struct list_head list_map;
+ struct list_head list_cg;
struct rb_node node;
struct rcu_head rcu;
};
+struct bpf_cgroup_link {
+ struct bpf_link link;
+ struct cgroup *cgroup;
+ enum bpf_attach_type type;
+};
+
struct bpf_prog_list {
- struct list_head node;
+ struct hlist_node node;
struct bpf_prog *prog;
+ struct bpf_cgroup_link *link;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
-struct bpf_prog_array;
-
-struct cgroup_bpf {
- /* array of effective progs in this cgroup */
- struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
-
- /* attached progs to this cgroup and attach flags
- * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
- * have either zero or one element
- * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
- */
- struct list_head progs[MAX_BPF_ATTACH_TYPE];
- u32 flags[MAX_BPF_ATTACH_TYPE];
-
- /* temp storage for effective prog array used by prog_attach/detach */
- struct bpf_prog_array *inactive;
-
- /* reference counter used to detach bpf programs after cgroup removal */
- struct percpu_ref refcnt;
-
- /* cgroup_bpf is released using a work queue */
- struct work_struct release_work;
-};
-
int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
-int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_prog *replace_prog,
- enum bpf_attach_type type, u32 flags);
-int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type);
-int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-
-/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
-int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_prog *replace_prog, enum bpf_attach_type type,
- u32 flags);
-int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 flags);
-int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sk(struct sock *sk,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
- enum bpf_attach_type type,
- void *t_ctx);
+ enum cgroup_bpf_attach_type atype,
+ void *t_ctx,
+ u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
- short access, enum bpf_attach_type type);
+ short access, enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
struct ctl_table *table, int write,
- void __user *buf, size_t *pcount,
- loff_t *ppos, void **new_buf,
- enum bpf_attach_type type);
+ char **buf, size_t *pcount, loff_t *ppos,
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
int *optname, char __user *optval,
@@ -134,6 +144,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
int __user *optlen, int max_optlen,
int retval);
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+ int optname, void *optval,
+ int *optlen, int retval);
+
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
@@ -143,15 +157,9 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
return BPF_CGROUP_STORAGE_SHARED;
}
-static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
- *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
- enum bpf_cgroup_storage_type stype;
-
- for_each_cgroup_storage_type(stype)
- this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
-}
-
+struct bpf_cgroup_storage *
+cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
+ void *key, bool locked);
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
enum bpf_cgroup_storage_type stype);
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
@@ -160,19 +168,30 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
-void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);
+/* Opportunistic check to see whether we have any BPF program attached*/
+static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ enum cgroup_bpf_attach_type type)
+{
+ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ struct bpf_prog_array *array;
+
+ array = rcu_access_pointer(cgrp->bpf.effective[type]);
+ return array != &bpf_empty_prog_array.hdr;
+}
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
+ cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
- BPF_CGROUP_INET_INGRESS); \
+ CGROUP_INET_INGRESS); \
\
__ret; \
})
@@ -180,119 +199,164 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk)) \
+ if (sk_fullsock(__sk) && \
+ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
- BPF_CGROUP_INET_EGRESS); \
+ CGROUP_INET_EGRESS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
+#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
- __ret = __cgroup_bpf_run_filter_sk(sk, type); \
+ if (cgroup_bpf_enabled(atype)) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
+
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- NULL); \
+ if (cgroup_bpf_enabled(atype)) \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+ NULL, NULL); \
__ret; \
})
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
+ if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- t_ctx); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+ t_ctx, NULL); \
release_sock(sk); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
-
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
+/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
+ * via upper bits of return code. The only flag that is supported
+ * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
+ * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
+ */
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
+({ \
+ u32 __flags = 0; \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+ NULL, &__flags); \
+ release_sock(sk); \
+ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
+ *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
+ } \
+ __ret; \
+})
-#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
- sk->sk_prot->pre_connect)
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
+ ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
+ cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
+ (sk)->sk_prot->pre_connect)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
+
+/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
+ * fullsock and its parent fullsock cannot be traced by
+ * sk_to_full_sk().
+ *
+ * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
+ * Its listener-sk is not attached to the rsk_listener.
+ * In this case, the caller holds the listener-sk (unlocked),
+ * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
+ * the listener-sk such that the cgroup-bpf-progs of the
+ * listener-sk will be run.
+ *
+ * Regardless of syncookie mode or not,
+ * calling bpf_setsockopt on listener-sk will not make sense anyway,
+ * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
+ */
+#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
+ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
+ sock_ops, \
+ CGROUP_SOCK_OPS); \
+ __ret; \
+})
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && (sock_ops)->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
- BPF_CGROUP_SOCK_OPS); \
+ CGROUP_SOCK_OPS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
+ if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
+ __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
access, \
- BPF_CGROUP_DEVICE); \
+ CGROUP_DEVICE); \
\
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
- buf, count, pos, nbuf, \
- BPF_CGROUP_SYSCTL); \
+ buf, count, pos, \
+ CGROUP_SYSCTL); \
__ret; \
})
@@ -300,7 +364,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
kernel_optval) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
@@ -311,7 +376,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
get_user(__ret, optlen); \
__ret; \
})
@@ -320,11 +385,25 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
max_optlen, retval) \
({ \
int __ret = retval; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
- optname, optval, \
- optlen, max_optlen, \
- retval); \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
+ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
+ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
+ tcp_bpf_bypass_getsockopt, \
+ level, optname)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt( \
+ sock, level, optname, optval, optlen, \
+ max_optlen, retval); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) \
+({ \
+ int __ret = retval; \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
+ sock, level, optname, optval, optlen, retval); \
__ret; \
})
@@ -332,12 +411,16 @@ int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype, struct bpf_prog *prog);
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
enum bpf_prog_type ptype);
+int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr);
+
+const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
-struct bpf_prog;
-struct cgroup_bpf {};
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
@@ -354,18 +437,32 @@ static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
return -EINVAL;
}
+static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
return -EINVAL;
}
-static inline void bpf_cgroup_storage_set(
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
+static inline const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
+static inline const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
-static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
- struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
static inline void bpf_cgroup_storage_free(
@@ -379,13 +476,15 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
return 0;
}
-#define cgroup_bpf_enabled (0)
+#define cgroup_bpf_enabled(atype) (0)
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
@@ -397,11 +496,13 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; })
diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h
new file mode 100644
index 000000000000..413cfa5e4b07
--- /dev/null
+++ b/include/linux/bpf-netns.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_NETNS_H
+#define _BPF_NETNS_H
+
+#include <linux/mutex.h>
+#include <net/netns/bpf.h>
+#include <uapi/linux/bpf.h>
+
+static inline enum netns_bpf_attach_type
+to_netns_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ case BPF_FLOW_DISSECTOR:
+ return NETNS_BPF_FLOW_DISSECTOR;
+ case BPF_SK_LOOKUP:
+ return NETNS_BPF_SK_LOOKUP;
+ default:
+ return NETNS_BPF_INVALID;
+ }
+}
+
+/* Protects updates to netns_bpf */
+extern struct mutex netns_bpf_mutex;
+
+union bpf_attr;
+struct bpf_prog;
+
+#ifdef CONFIG_NET
+int netns_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int netns_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog);
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int netns_bpf_link_create(const union bpf_attr *attr,
+ struct bpf_prog *prog);
+#else
+static inline int netns_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netns_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netns_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netns_bpf_link_create(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* _BPF_NETNS_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 49b1a70e12c8..0566705c1d4e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -5,6 +5,7 @@
#define _LINUX_BPF_H 1
#include <uapi/linux/bpf.h>
+#include <uapi/linux/filter.h>
#include <linux/workqueue.h>
#include <linux/file.h>
@@ -14,10 +15,19 @@
#include <linux/numa.h>
#include <linux/mm_types.h>
#include <linux/wait.h>
-#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/capability.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+#include <linux/percpu-refcount.h>
+#include <linux/stddef.h>
+#include <linux/bpfptr.h>
+#include <linux/btf.h>
+#include <linux/rcupdate_trace.h>
+#include <linux/init.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -30,11 +40,35 @@ struct seq_file;
struct btf;
struct btf_type;
struct exception_table_entry;
+struct seq_operations;
+struct bpf_iter_aux_info;
+struct bpf_local_storage;
+struct bpf_local_storage_map;
+struct kobject;
+struct mem_cgroup;
+struct module;
+struct bpf_func_state;
+struct ftrace_ops;
+struct cgroup;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
+extern struct kobject *btf_kobj;
+
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
+typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
+ struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
+typedef unsigned int (*bpf_func_t)(const void *,
+ const struct bpf_insn *);
+struct bpf_iter_seq_info {
+ const struct seq_operations *seq_ops;
+ bpf_iter_init_seq_priv_t init_seq_private;
+ bpf_iter_fini_seq_priv_t fini_seq_private;
+ u32 seq_priv_size;
+};
-/* map is generic key/value storage optionally accesible by eBPF programs */
+/* map is generic key/value storage optionally accessible by eBPF programs */
struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */
int (*map_alloc_check)(union bpf_attr *attr);
@@ -46,6 +80,8 @@ struct bpf_map_ops {
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
+ int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
+ void *value, u64 flags);
int (*map_lookup_and_delete_batch)(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
@@ -61,12 +97,13 @@ struct bpf_map_ops {
int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
int (*map_pop_elem)(struct bpf_map *map, void *value);
int (*map_peek_elem)(struct bpf_map *map, void *value);
+ void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
void (*map_fd_put_ptr)(void *ptr);
- u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
+ int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
@@ -87,11 +124,79 @@ struct bpf_map_ops {
int (*map_direct_value_meta)(const struct bpf_map *map,
u64 imm, u32 *off);
int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
+ __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
+ struct poll_table_struct *pts);
+
+ /* Functions called by bpf_local_storage maps */
+ int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
+
+ /* Misc helpers.*/
+ int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
+
+ /* map_meta_equal must be implemented for maps that can be
+ * used as an inner map. It is a runtime check to ensure
+ * an inner map can be inserted to an outer map.
+ *
+ * Some properties of the inner map has been used during the
+ * verification time. When inserting an inner map at the runtime,
+ * map_meta_equal has to ensure the inserting map has the same
+ * properties that the verifier has used earlier.
+ */
+ bool (*map_meta_equal)(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
+
+ int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
+ int (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
+ void *callback_ctx, u64 flags);
+
+ /* BTF id of struct allocated by map_alloc */
+ int *map_btf_id;
+
+ /* bpf_iter info used to open a seq_file */
+ const struct bpf_iter_seq_info *iter_seq_info;
};
-struct bpf_map_memory {
- u32 pages;
- struct user_struct *user;
+enum {
+ /* Support at most 8 pointers in a BPF map value */
+ BPF_MAP_VALUE_OFF_MAX = 8,
+ BPF_MAP_OFF_ARR_MAX = BPF_MAP_VALUE_OFF_MAX +
+ 1 + /* for bpf_spin_lock */
+ 1, /* for bpf_timer */
+};
+
+enum bpf_kptr_type {
+ BPF_KPTR_UNREF,
+ BPF_KPTR_REF,
+};
+
+struct bpf_map_value_off_desc {
+ u32 offset;
+ enum bpf_kptr_type type;
+ struct {
+ struct btf *btf;
+ struct module *module;
+ btf_dtor_kfunc_t dtor;
+ u32 btf_id;
+ } kptr;
+};
+
+struct bpf_map_value_off {
+ u32 nr_off;
+ struct bpf_map_value_off_desc off[];
+};
+
+struct bpf_map_off_arr {
+ u32 cnt;
+ u32 field_off[BPF_MAP_OFF_ARR_MAX];
+ u8 field_sz[BPF_MAP_OFF_ARR_MAX];
};
struct bpf_map {
@@ -107,20 +212,22 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
+ u64 map_extra; /* any per-map-type extra fields */
u32 map_flags;
int spin_lock_off; /* >=0 valid offset, <0 error */
+ struct bpf_map_value_off *kptr_off_tab;
+ int timer_off; /* >=0 valid offset, <0 error */
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
+ u32 btf_vmlinux_value_type_id;
struct btf *btf;
- struct bpf_map_memory memory;
+#ifdef CONFIG_MEMCG_KMEM
+ struct obj_cgroup *objcg;
+#endif
char name[BPF_OBJ_NAME_LEN];
- u32 btf_vmlinux_value_type_id;
- bool unpriv_array;
- bool frozen; /* write-once; write-protected by freeze_mutex */
- /* 22 bytes hole */
-
+ struct bpf_map_off_arr *off_arr;
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
@@ -128,7 +235,20 @@ struct bpf_map {
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
- u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
+ atomic64_t writecnt;
+ /* 'Ownership' of program-containing map is claimed by the first program
+ * that is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have the
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+ struct {
+ spinlock_t lock;
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
+ } owner;
+ bool bypass_spec_v1;
+ bool frozen; /* write-once; write-protected by freeze_mutex */
};
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
@@ -136,30 +256,103 @@ static inline bool map_value_has_spin_lock(const struct bpf_map *map)
return map->spin_lock_off >= 0;
}
-static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
+static inline bool map_value_has_timer(const struct bpf_map *map)
+{
+ return map->timer_off >= 0;
+}
+
+static inline bool map_value_has_kptrs(const struct bpf_map *map)
{
- if (likely(!map_value_has_spin_lock(map)))
+ return !IS_ERR_OR_NULL(map->kptr_off_tab);
+}
+
+static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
+{
+ if (unlikely(map_value_has_spin_lock(map)))
+ memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
+ if (unlikely(map_value_has_timer(map)))
+ memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
+ if (unlikely(map_value_has_kptrs(map))) {
+ struct bpf_map_value_off *tab = map->kptr_off_tab;
+ int i;
+
+ for (i = 0; i < tab->nr_off; i++)
+ *(u64 *)(dst + tab->off[i].offset) = 0;
+ }
+}
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only. No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+ const long *lsrc = src;
+ long *ldst = dst;
+
+ size /= sizeof(long);
+ while (size--)
+ *ldst++ = *lsrc++;
+}
+
+/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
+static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, bool long_memcpy)
+{
+ u32 curr_off = 0;
+ int i;
+
+ if (likely(!map->off_arr)) {
+ if (long_memcpy)
+ bpf_long_memcpy(dst, src, round_up(map->value_size, 8));
+ else
+ memcpy(dst, src, map->value_size);
return;
- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
- (struct bpf_spin_lock){};
+ }
+
+ for (i = 0; i < map->off_arr->cnt; i++) {
+ u32 next_off = map->off_arr->field_off[i];
+
+ memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
+ curr_off += map->off_arr->field_sz[i];
+ }
+ memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
}
-/* copy everything but bpf_spin_lock */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
- if (unlikely(map_value_has_spin_lock(map))) {
- u32 off = map->spin_lock_off;
+ __copy_map_value(map, dst, src, false);
+}
+
+static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
+{
+ __copy_map_value(map, dst, src, true);
+}
+
+static inline void zero_map_value(struct bpf_map *map, void *dst)
+{
+ u32 curr_off = 0;
+ int i;
- memcpy(dst, src, off);
- memcpy(dst + off + sizeof(struct bpf_spin_lock),
- src + off + sizeof(struct bpf_spin_lock),
- map->value_size - off - sizeof(struct bpf_spin_lock));
- } else {
- memcpy(dst, src, map->value_size);
+ if (likely(!map->off_arr)) {
+ memset(dst, 0, map->value_size);
+ return;
}
+
+ for (i = 0; i < map->off_arr->cnt; i++) {
+ u32 next_off = map->off_arr->field_off[i];
+
+ memset(dst + curr_off, 0, next_off - curr_off);
+ curr_off += map->off_arr->field_sz[i];
+ }
+ memset(dst + curr_off, 0, map->value_size - curr_off);
}
+
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
+void bpf_timer_cancel_and_free(void *timer);
+int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
struct bpf_offload_dev;
struct bpf_offloaded_map;
@@ -203,8 +396,80 @@ int map_check_no_btf(const struct bpf_map *map,
const struct btf_type *key_type,
const struct btf_type *value_type);
+bool bpf_map_meta_equal(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
extern const struct bpf_map_ops bpf_map_offload_ops;
+/* bpf_type_flag contains a set of flags that are applicable to the values of
+ * arg_type, ret_type and reg_type. For example, a pointer value may be null,
+ * or a memory is read-only. We classify types into two categories: base types
+ * and extended types. Extended types are base types combined with a type flag.
+ *
+ * Currently there are no more than 32 base types in arg_type, ret_type and
+ * reg_types.
+ */
+#define BPF_BASE_TYPE_BITS 8
+
+enum bpf_type_flag {
+ /* PTR may be NULL. */
+ PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
+ * compatible with both mutable and immutable memory.
+ */
+ MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
+
+ /* MEM was "allocated" from a different helper, and cannot be mixed
+ * with regular non-MEM_ALLOC'ed MEM types.
+ */
+ MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is in user address space. */
+ MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
+ * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
+ * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
+ * or bpf_this_cpu_ptr(), which will return the pointer corresponding
+ * to the specified cpu.
+ */
+ MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
+
+ /* Indicates that the argument will be released. */
+ OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
+
+ /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
+ * unreferenced and referenced kptr loaded from map value using a load
+ * instruction, so that they can only be dereferenced but not escape the
+ * BPF program into the kernel (i.e. cannot be passed as arguments to
+ * kfunc or bpf helpers).
+ */
+ PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
+
+ MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to memory local to the bpf program. */
+ DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to a kernel-produced ringbuf record. */
+ DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
+
+ /* Size is known at compile time. */
+ MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
+
+ __BPF_TYPE_FLAG_MAX,
+ __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
+};
+
+#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF)
+
+/* Max number of base types. */
+#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
+
+/* Max number of all types. */
+#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
+
/* function argument constraints */
enum bpf_arg_type {
ARG_DONTCARE = 0, /* unused argument in helper function */
@@ -215,18 +480,11 @@ enum bpf_arg_type {
ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
- ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
- ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */
- /* the following constraints used to prototype bpf_memcmp() and other
- * functions that access data on eBPF program stack
+ /* Used to prototype bpf_memcmp() and other functions that access data
+ * on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
- ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
- ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
- * helper function must fill all bytes or clear
- * them in error case.
- */
ARG_CONST_SIZE, /* number of bytes accessed from memory */
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
@@ -239,18 +497,68 @@ enum bpf_arg_type {
ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
+ ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
+ ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
+ ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
+ ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
+ ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
+ ARG_PTR_TO_STACK, /* pointer to stack */
+ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
+ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
+ ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
+ ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
+ __BPF_ARG_TYPE_MAX,
+
+ /* Extended arg_types. */
+ ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
+ ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
+ ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
+ ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
+ ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
+ ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
+ ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
+ /* pointer to memory does not need to be initialized, helper function must fill
+ * all bytes or clear them in error case.
+ */
+ ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
+ /* Pointer to valid memory of size known at compile time. */
+ ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* type of values returned from helper functions */
enum bpf_return_type {
RET_INTEGER, /* function returns integer */
RET_VOID, /* function doesn't return anything */
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
- RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
- RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
- RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
- RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
+ RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
+ RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
+ RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
+ RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
+ RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
+ RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
+ __BPF_RET_TYPE_MAX,
+
+ /* Extended ret_types. */
+ RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
+ RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
+ RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
+ RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
+ RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
+ RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM,
+ RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
* to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
@@ -271,7 +579,26 @@ struct bpf_func_proto {
};
enum bpf_arg_type arg_type[5];
};
- int *btf_id; /* BTF ids of arguments */
+ union {
+ struct {
+ u32 *arg1_btf_id;
+ u32 *arg2_btf_id;
+ u32 *arg3_btf_id;
+ u32 *arg4_btf_id;
+ u32 *arg5_btf_id;
+ };
+ u32 *arg_btf_id[5];
+ struct {
+ size_t arg1_size;
+ size_t arg2_size;
+ size_t arg3_size;
+ size_t arg4_size;
+ size_t arg5_size;
+ };
+ size_t arg_size[5];
+ };
+ int *ret_btf_id; /* return value btf_id */
+ bool (*allowed)(const struct bpf_prog *prog);
};
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
@@ -301,22 +628,51 @@ enum bpf_reg_type {
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
+ PTR_TO_MAP_KEY, /* reg points to a map element key */
PTR_TO_STACK, /* reg == frame_pointer + offset */
PTR_TO_PACKET_META, /* skb->data - meta_len */
PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
PTR_TO_SOCKET, /* reg points to struct bpf_sock */
- PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
PTR_TO_SOCK_COMMON, /* reg points to sock_common */
- PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
- PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
- PTR_TO_BTF_ID, /* reg points to kernel struct */
+ /* PTR_TO_BTF_ID points to a kernel struct that does not need
+ * to be null checked by the BPF program. This does not imply the
+ * pointer is _not_ null and in practice this can easily be a null
+ * pointer when reading pointer chains. The assumption is program
+ * context will handle null pointer dereference typically via fault
+ * handling. The verifier must keep this in mind and can make no
+ * assumptions about null or non-null when doing branch analysis.
+ * Further, when passed into helpers the helpers can not, without
+ * additional context, assume the value is non-null.
+ */
+ PTR_TO_BTF_ID,
+ /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
+ * been checked for null. Used primarily to inform the verifier
+ * an explicit null check is required for this struct.
+ */
+ PTR_TO_MEM, /* reg points to valid memory region */
+ PTR_TO_BUF, /* reg points to a read/write buffer */
+ PTR_TO_FUNC, /* reg points to a bpf program function */
+ PTR_TO_DYNPTR, /* reg points to a dynptr */
+ __BPF_REG_TYPE_MAX,
+
+ /* Extended reg_types. */
+ PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
+ PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
+ PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
+ PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
+ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* The information passed from prog-specific *_is_valid_access
* back to the verifier.
@@ -325,7 +681,10 @@ struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
union {
int ctx_field_size;
- u32 btf_id;
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ };
};
struct bpf_verifier_log *log; /* for verbose logs */
};
@@ -336,6 +695,12 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
+static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
+ insn->src_reg == BPF_PSEUDO_FUNC;
+}
+
struct bpf_prog_ops {
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
@@ -362,9 +727,10 @@ struct bpf_verifier_ops {
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
int (*btf_struct_access)(struct bpf_verifier_log *log,
+ const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
- u32 *next_btf_id);
+ u32 *next_btf_id, enum bpf_type_flag *flag);
};
struct bpf_prog_offload_ops {
@@ -407,16 +773,19 @@ enum bpf_cgroup_storage_type {
*/
#define MAX_BPF_FUNC_ARGS 12
-struct bpf_prog_stats {
- u64 cnt;
- u64 nsecs;
- struct u64_stats_sync syncp;
-} __aligned(2 * sizeof(u64));
+/* The maximum number of arguments passed through registers
+ * a single function may have.
+ */
+#define MAX_BPF_FUNC_REG_ARGS 5
+
+/* The argument is a structure. */
+#define BTF_FMODEL_STRUCT_ARG BIT(0)
struct btf_func_model {
u8 ret_size;
u8 nr_args;
u8 arg_size[MAX_BPF_FUNC_ARGS];
+ u8 arg_flags[MAX_BPF_FUNC_ARGS];
};
/* Restore arguments before returning from trampoline to let original function
@@ -432,6 +801,34 @@ struct btf_func_model {
* programs only. Should not be used with normal calls and indirect calls.
*/
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
+/* Store IP address of the caller on the trampoline stack,
+ * so it's available for trampoline's programs.
+ */
+#define BPF_TRAMP_F_IP_ARG BIT(3)
+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
+#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
+
+/* Get original function from stack instead of from provided direct address.
+ * Makes sense for trampolines with fexit or fmod_ret programs.
+ */
+#define BPF_TRAMP_F_ORIG_STACK BIT(5)
+
+/* This trampoline is on a function with another ftrace_ops with IPMODIFY,
+ * e.g., a live patch. This flag is set and cleared by ftrace call backs,
+ */
+#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+
+/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+ * bytes on x86.
+ */
+#define BPF_MAX_TRAMP_LINKS 38
+
+struct bpf_tramp_links {
+ struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
+ int nr_links;
+};
+
+struct bpf_tramp_run_ctx;
/* Different use cases for BPF trampoline:
* 1. replace nop at the function entry (kprobe equivalent)
@@ -453,28 +850,65 @@ struct btf_func_model {
* fentry = a set of program to run before calling original function
* fexit = a set of program to run after original function
*/
-int arch_prepare_bpf_trampoline(void *image, void *image_end,
+struct bpf_tramp_image;
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
- struct bpf_prog **fentry_progs, int fentry_cnt,
- struct bpf_prog **fexit_progs, int fexit_cnt,
+ struct bpf_tramp_links *tlinks,
void *orig_call);
/* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(void);
-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
+u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
+u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
+void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
+
+struct bpf_ksym {
+ unsigned long start;
+ unsigned long end;
+ char name[KSYM_NAME_LEN];
+ struct list_head lnode;
+ struct latch_tree_node tnode;
+ bool prog;
+};
enum bpf_tramp_prog_type {
BPF_TRAMP_FENTRY,
BPF_TRAMP_FEXIT,
+ BPF_TRAMP_MODIFY_RETURN,
BPF_TRAMP_MAX,
BPF_TRAMP_REPLACE, /* more than MAX */
};
+struct bpf_tramp_image {
+ void *image;
+ struct bpf_ksym ksym;
+ struct percpu_ref pcref;
+ void *ip_after_call;
+ void *ip_epilogue;
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
+};
+
struct bpf_trampoline {
/* hlist for trampoline_table */
struct hlist_node hlist;
+ struct ftrace_ops *fops;
/* serializes access to fields of this trampoline */
struct mutex mutex;
refcount_t refcnt;
+ u32 flags;
u64 key;
struct {
struct btf_func_model model;
@@ -491,8 +925,16 @@ struct bpf_trampoline {
/* Number of attached programs. A counter per kind. */
int progs_cnt[BPF_TRAMP_MAX];
/* Executable image of trampoline */
- void *image;
+ struct bpf_tramp_image *cur_image;
u64 selector;
+ struct module *mod;
+};
+
+struct bpf_attach_target_info {
+ struct btf_func_model fmodel;
+ long tgt_addr;
+ const char *tgt_name;
+ const struct btf_type *tgt_type;
};
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
@@ -509,77 +951,106 @@ struct bpf_dispatcher {
struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
int num_progs;
void *image;
+ void *rw_image;
u32 image_off;
+ struct bpf_ksym ksym;
};
-static __always_inline unsigned int bpf_dispatcher_nopfunc(
+static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
const void *ctx,
const struct bpf_insn *insnsi,
- unsigned int (*bpf_func)(const void *,
- const struct bpf_insn *))
+ bpf_func_t bpf_func)
{
return bpf_func(ctx, insnsi);
}
+
#ifdef CONFIG_BPF_JIT
-struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
-int bpf_trampoline_link_prog(struct bpf_prog *prog);
-int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
+int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
-#define BPF_DISPATCHER_INIT(name) { \
- .mutex = __MUTEX_INITIALIZER(name.mutex), \
- .func = &name##func, \
- .progs = {}, \
- .num_progs = 0, \
- .image = NULL, \
- .image_off = 0 \
-}
+int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
+int __init bpf_arch_init_dispatcher_early(void *ip);
+
+#define BPF_DISPATCHER_INIT(_name) { \
+ .mutex = __MUTEX_INITIALIZER(_name.mutex), \
+ .func = &_name##_func, \
+ .progs = {}, \
+ .num_progs = 0, \
+ .image = NULL, \
+ .image_off = 0, \
+ .ksym = { \
+ .name = #_name, \
+ .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
+ }, \
+}
+
+#define BPF_DISPATCHER_INIT_CALL(_name) \
+ static int __init _name##_init(void) \
+ { \
+ return bpf_arch_init_dispatcher_early(_name##_func); \
+ } \
+ early_initcall(_name##_init)
+
+#ifdef CONFIG_X86_64
+#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
+#else
+#define BPF_DISPATCHER_ATTRIBUTES
+#endif
#define DEFINE_BPF_DISPATCHER(name) \
- noinline unsigned int name##func( \
+ notrace BPF_DISPATCHER_ATTRIBUTES \
+ noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
- unsigned int (*bpf_func)(const void *, \
- const struct bpf_insn *)) \
+ bpf_func_t bpf_func) \
{ \
return bpf_func(ctx, insnsi); \
} \
- EXPORT_SYMBOL(name##func); \
- struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name);
+ EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
+ struct bpf_dispatcher bpf_dispatcher_##name = \
+ BPF_DISPATCHER_INIT(bpf_dispatcher_##name); \
+ BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name);
+
#define DECLARE_BPF_DISPATCHER(name) \
- unsigned int name##func( \
+ unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
- unsigned int (*bpf_func)(const void *, \
- const struct bpf_insn *)); \
- extern struct bpf_dispatcher name;
-#define BPF_DISPATCHER_FUNC(name) name##func
-#define BPF_DISPATCHER_PTR(name) (&name)
+ bpf_func_t bpf_func); \
+ extern struct bpf_dispatcher bpf_dispatcher_##name;
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
+#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
-struct bpf_image {
- struct latch_tree_node tnode;
- unsigned char data[];
-};
-#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
-bool is_bpf_image_address(unsigned long address);
-void *bpf_image_alloc(void);
+/* Called only from JIT-enabled code, so there's no need for stubs. */
+void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
+void bpf_image_ksym_del(struct bpf_ksym *ksym);
+void bpf_ksym_add(struct bpf_ksym *ksym);
+void bpf_ksym_del(struct bpf_ksym *ksym);
+int bpf_jit_charge_modmem(u32 size);
+void bpf_jit_uncharge_modmem(u32 size);
+bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
#else
-static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr)
{
- return NULL;
+ return -ENOTSUPP;
}
-static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
+static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr)
{
return -ENOTSUPP;
}
-static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
+static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info)
{
- return -ENOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
#define DECLARE_BPF_DISPATCHER(name)
-#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
#define BPF_DISPATCHER_PTR(name) NULL
static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
struct bpf_prog *from,
@@ -588,6 +1059,10 @@ static inline bool is_bpf_image_address(unsigned long address)
{
return false;
}
+static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
+{
+ return false;
+}
#endif
struct bpf_func_info_aux {
@@ -601,21 +1076,40 @@ enum bpf_jit_poke_reason {
/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
- void *ip;
+ void *tailcall_target;
+ void *tailcall_bypass;
+ void *bypass_addr;
+ void *aux;
union {
struct {
struct bpf_map *map;
u32 key;
} tail_call;
};
- bool ip_stable;
+ bool tailcall_target_stable;
u8 adj_off;
u16 reason;
+ u32 insn_idx;
+};
+
+/* reg_type info for ctx arguments */
+struct bpf_ctx_arg_aux {
+ u32 offset;
+ enum bpf_reg_type reg_type;
+ u32 btf_id;
};
+struct btf_mod_pair {
+ struct btf *btf;
+ struct module *module;
+};
+
+struct bpf_kfunc_desc_tab;
+
struct bpf_prog_aux {
atomic64_t refcnt;
u32 used_map_cnt;
+ u32 used_btf_cnt;
u32 max_ctx_offset;
u32 max_pkt_offset;
u32 max_tp_access;
@@ -624,14 +1118,23 @@ struct bpf_prog_aux {
u32 func_cnt; /* used by non-func prog as the number of func progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
- struct bpf_prog *linked_prog;
+ u32 ctx_arg_info_size;
+ u32 max_rdonly_access;
+ u32 max_rdwr_access;
+ struct btf *attach_btf;
+ const struct bpf_ctx_arg_aux *ctx_arg_info;
+ struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
+ struct bpf_prog *dst_prog;
+ struct bpf_trampoline *dst_trampoline;
+ enum bpf_prog_type saved_dst_prog_type;
+ enum bpf_attach_type saved_dst_attach_type;
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
bool offload_requested;
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
bool func_proto_unreliable;
- enum bpf_tramp_prog_type trampoline_prog_type;
- struct bpf_trampoline *trampoline;
- struct hlist_node tramp_hlist;
+ bool sleepable;
+ bool tail_call_reachable;
+ bool xdp_has_frags;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
@@ -639,14 +1142,19 @@ struct bpf_prog_aux {
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab;
+ struct bpf_kfunc_desc_tab *kfunc_tab;
+ struct bpf_kfunc_btf_tab *kfunc_btf_tab;
u32 size_poke_tab;
- struct latch_tree_node ksym_tnode;
- struct list_head ksym_lnode;
+ struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
+ struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
+ struct btf_mod_pair *used_btfs;
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
+ u32 verified_insns;
+ int cgroup_atype; /* enum cgroup_bpf_attach_type */
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
@@ -680,21 +1188,47 @@ struct bpf_prog_aux {
u32 linfo_idx;
u32 num_exentries;
struct exception_table_entry *extable;
- struct bpf_prog_stats __percpu *stats;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
+struct bpf_prog {
+ u16 pages; /* Number of allocated pages */
+ u16 jited:1, /* Is our filter JIT'ed? */
+ jit_requested:1,/* archs need to JIT the prog */
+ gpl_compatible:1, /* Is filter GPL compatible? */
+ cb_access:1, /* Is control block accessed? */
+ dst_needed:1, /* Do we need dst entry? */
+ blinding_requested:1, /* needs constant blinding */
+ blinded:1, /* Was blinded */
+ is_func:1, /* program is a bpf function */
+ kprobe_override:1, /* Do we override a kprobe? */
+ has_callchain_buf:1, /* callchain buffer allocated? */
+ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
+ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
+ call_get_func_ip:1, /* Do we call get_func_ip() */
+ tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
+ enum bpf_prog_type type; /* Type of BPF program */
+ enum bpf_attach_type expected_attach_type; /* For some prog types */
+ u32 len; /* Number of filter blocks */
+ u32 jited_len; /* Size of jited insns in bytes */
+ u8 tag[BPF_TAG_SIZE];
+ struct bpf_prog_stats __percpu *stats;
+ int __percpu *active;
+ unsigned int (*bpf_func)(const void *ctx,
+ const struct bpf_insn *insn);
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ /* Instructions for interpreter */
+ union {
+ DECLARE_FLEX_ARRAY(struct sock_filter, insns);
+ DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
+ };
+};
+
struct bpf_array_aux {
- /* 'Ownership' of prog array is claimed by the first program that
- * is going to use this map or by the first program which FD is
- * stored in the map to make sure that all callers and callees have
- * the same prog type and JITed flag.
- */
- enum bpf_prog_type type;
- bool jited;
/* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs;
struct bpf_map *map;
@@ -702,8 +1236,52 @@ struct bpf_array_aux {
struct work_struct work;
};
+struct bpf_link {
+ atomic64_t refcnt;
+ u32 id;
+ enum bpf_link_type type;
+ const struct bpf_link_ops *ops;
+ struct bpf_prog *prog;
+ struct work_struct work;
+};
+
+struct bpf_link_ops {
+ void (*release)(struct bpf_link *link);
+ void (*dealloc)(struct bpf_link *link);
+ int (*detach)(struct bpf_link *link);
+ int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog);
+ void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
+ int (*fill_link_info)(const struct bpf_link *link,
+ struct bpf_link_info *info);
+};
+
+struct bpf_tramp_link {
+ struct bpf_link link;
+ struct hlist_node tramp_hlist;
+ u64 cookie;
+};
+
+struct bpf_shim_tramp_link {
+ struct bpf_tramp_link link;
+ struct bpf_trampoline *trampoline;
+};
+
+struct bpf_tracing_link {
+ struct bpf_tramp_link link;
+ enum bpf_attach_type attach_type;
+ struct bpf_trampoline *trampoline;
+ struct bpf_prog *tgt_prog;
+};
+
+struct bpf_link_primer {
+ struct bpf_link *link;
+ struct file *file;
+ int fd;
+ u32 id;
+};
+
struct bpf_struct_ops_value;
-struct btf_type;
struct btf_member;
#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
@@ -733,6 +1311,10 @@ bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
+int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
+ struct bpf_tramp_link *link,
+ const struct btf_func_model *model,
+ void *image, void *image_end);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
@@ -747,6 +1329,22 @@ static inline void bpf_module_put(const void *data, struct module *owner)
else
module_put(owner);
}
+
+#ifdef CONFIG_NET
+/* Define it here to avoid the use of forward declaration */
+struct bpf_dummy_ops_state {
+ int val;
+};
+
+struct bpf_dummy_ops {
+ int (*test_1)(struct bpf_dummy_ops_state *cb);
+ int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
+ char a3, unsigned long a4);
+};
+
+int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+#endif
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
@@ -772,6 +1370,21 @@ static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
}
#endif
+#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
+int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype);
+void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
+#else
+static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
+{
+}
+#endif
+
struct bpf_array {
struct bpf_map map;
u32 elem_size;
@@ -785,7 +1398,10 @@ struct bpf_array {
};
#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
-#define MAX_TAIL_CALL_CNT 32
+#define MAX_TAIL_CALL_CNT 33
+
+/* Maximum number of loops for bpf_loop */
+#define BPF_MAX_LOOPS BIT(23)
#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
BPF_F_RDONLY_PROG | \
@@ -795,6 +1411,11 @@ struct bpf_array {
#define BPF_MAP_CAN_READ BIT(0)
#define BPF_MAP_CAN_WRITE BIT(1)
+/* Maximum number of user-producer ring buffer samples that can be drained in
+ * a call to bpf_user_ringbuf_drain().
+ */
+#define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
+
static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
@@ -823,11 +1444,18 @@ struct bpf_event_entry {
struct rcu_head rcu;
};
-bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+static inline bool map_type_contains_progs(struct bpf_map *map)
+{
+ return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
+ map->map_type == BPF_MAP_TYPE_DEVMAP ||
+ map->map_type == BPF_MAP_TYPE_CPUMAP;
+}
+
+bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
-const char *kernel_type_name(u32 btf_type_id);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
@@ -843,7 +1471,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
/* an array of programs to be executed under rcu_lock.
*
* Typical usage:
- * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
+ * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
*
* the structure returned by bpf_prog_array_alloc() should be populated
* with program pointers and the last pointer must be NULL.
@@ -854,16 +1482,34 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
*/
struct bpf_prog_array_item {
struct bpf_prog *prog;
- struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ union {
+ struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ u64 bpf_cookie;
+ };
};
struct bpf_prog_array {
struct rcu_head rcu;
- struct bpf_prog_array_item items[0];
+ struct bpf_prog_array_item items[];
+};
+
+struct bpf_empty_prog_array {
+ struct bpf_prog_array hdr;
+ struct bpf_prog *null_prog;
};
+/* to avoid allocating empty bpf_prog_array for cgroups that
+ * don't have bpf program attached use one global 'bpf_empty_prog_array'
+ * It will not be modified the caller of bpf_prog_array_alloc()
+ * (since caller requested prog_cnt == 0)
+ * that pointer should be 'freed' by bpf_prog_array_free()
+ */
+extern struct bpf_empty_prog_array bpf_empty_prog_array;
+
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array *progs);
+/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
+void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
@@ -871,107 +1517,175 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
struct bpf_prog *old_prog);
+int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
+int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
+ struct bpf_prog *prog);
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
u32 *prog_ids, u32 request_cnt,
u32 *prog_cnt);
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
+ u64 bpf_cookie,
struct bpf_prog_array **new_array);
-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
- ({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 _ret = 1; \
- preempt_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- if (unlikely(check_non_null && !_array))\
- goto _out; \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- bpf_cgroup_storage_set(_item->cgroup_storage); \
- _ret &= func(_prog, ctx); \
- _item++; \
- } \
-_out: \
- rcu_read_unlock(); \
- preempt_enable(); \
- _ret; \
- })
-
-/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
- * so BPF programs can request cwr for TCP packets.
- *
- * Current cgroup skb programs can only return 0 or 1 (0 to drop the
- * packet. This macro changes the behavior so the low order bit
- * indicates whether the packet should be dropped (0) or not (1)
- * and the next bit is a congestion notification bit. This could be
- * used by TCP to call tcp_enter_cwr()
+struct bpf_run_ctx {};
+
+struct bpf_cg_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ const struct bpf_prog_array_item *prog_item;
+ int retval;
+};
+
+struct bpf_trace_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+};
+
+struct bpf_tramp_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+ struct bpf_run_ctx *saved_run_ctx;
+};
+
+static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
+{
+ struct bpf_run_ctx *old_ctx = NULL;
+
+#ifdef CONFIG_BPF_SYSCALL
+ old_ctx = current->bpf_ctx;
+ current->bpf_ctx = new_ctx;
+#endif
+ return old_ctx;
+}
+
+static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ current->bpf_ctx = old_ctx;
+#endif
+}
+
+/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
+#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
+/* BPF program asks to set CN on the packet. */
+#define BPF_RET_SET_CN (1 << 0)
+
+typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
+
+static __always_inline u32
+bpf_prog_run_array(const struct bpf_prog_array *array,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
+
+ if (unlikely(!array))
+ return ret;
+
+ migrate_disable();
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ migrate_enable();
+ return ret;
+}
+
+/* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
*
- * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
- * 0: drop packet
- * 1: keep packet
- * 2: drop packet and cn
- * 3: keep packet and cn
+ * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
+ * overall. As a result, we must use the bpf_prog_array_free_sleepable
+ * in order to use the tasks_trace rcu grace period.
*
- * This macro then converts it to one of the NET_XMIT or an error
- * code that is then interpreted as drop packet (and no cn):
- * 0: NET_XMIT_SUCCESS skb should be transmitted
- * 1: NET_XMIT_DROP skb should be dropped and cn
- * 2: NET_XMIT_CN skb should be transmitted and cn
- * 3: -EPERM skb should be dropped
+ * When a non-sleepable program is inside the array, we take the rcu read
+ * section and disable preemption for that program alone, so it can access
+ * rcu-protected dynamically sized maps.
*/
-#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
- ({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 ret; \
- u32 _ret = 1; \
- u32 _cn = 0; \
- preempt_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- bpf_cgroup_storage_set(_item->cgroup_storage); \
- ret = func(_prog, ctx); \
- _ret &= (ret & 1); \
- _cn |= (ret & 2); \
- _item++; \
- } \
- rcu_read_unlock(); \
- preempt_enable(); \
- if (_ret) \
- _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
- else \
- _ret = (_cn ? NET_XMIT_DROP : -EPERM); \
- _ret; \
- })
-
-#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
-
-#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
+static __always_inline u32
+bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ const struct bpf_prog_array *array;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ might_fault();
+
+ rcu_read_lock_trace();
+ migrate_disable();
+
+ array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
+ if (unlikely(!array))
+ goto out;
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ if (!prog->aux->sleepable)
+ rcu_read_lock();
+
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+
+ if (!prog->aux->sleepable)
+ rcu_read_unlock();
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+out:
+ migrate_enable();
+ rcu_read_unlock_trace();
+ return ret;
+}
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
+extern struct mutex bpf_stats_enabled_mutex;
+
+/*
+ * Block execution of BPF programs attached to instrumentation (perf,
+ * kprobes, tracepoints) to prevent deadlocks on map operations as any of
+ * these events can happen inside a region which holds a map bucket lock
+ * and can deadlock on it.
+ */
+static inline void bpf_disable_instrumentation(void)
+{
+ migrate_disable();
+ this_cpu_inc(bpf_prog_active);
+}
+
+static inline void bpf_enable_instrumentation(void)
+{
+ this_cpu_dec(bpf_prog_active);
+ migrate_enable();
+}
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
+extern const struct file_operations bpf_iter_fops;
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
extern const struct bpf_prog_ops _name ## _prog_ops; \
extern const struct bpf_verifier_ops _name ## _verifier_ops;
#define BPF_MAP_TYPE(_id, _ops) \
extern const struct bpf_map_ops _ops;
+#define BPF_LINK_TYPE(_id, _name)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
+#undef BPF_LINK_TYPE
extern const struct bpf_prog_ops bpf_offload_prog_ops;
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
@@ -985,14 +1699,17 @@ void bpf_prog_sub(struct bpf_prog *prog, int i);
void bpf_prog_inc(struct bpf_prog *prog);
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-int __bpf_prog_charge(struct user_struct *user, u32 pages);
-void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
-void __bpf_free_used_maps(struct bpf_prog_aux *aux,
- struct bpf_map **used_maps, u32 len);
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset);
+void bpf_map_free_kptr_off_tab(struct bpf_map *map);
+struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map);
+bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b);
+void bpf_map_free_kptrs(struct bpf_map *map, void *map_value);
+
+struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map);
@@ -1000,15 +1717,10 @@ void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
-int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
-void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
-int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
-void bpf_map_charge_finish(struct bpf_map_memory *mem);
-void bpf_map_charge_move(struct bpf_map_memory *dst,
- struct bpf_map_memory *src);
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
+bool bpf_map_write_active(const struct bpf_map *map);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
@@ -1019,15 +1731,185 @@ int generic_map_update_batch(struct bpf_map *map,
int generic_map_delete_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
+struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
+struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
+
+#ifdef CONFIG_MEMCG_KMEM
+void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node);
+void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
+void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
+ size_t align, gfp_t flags);
+#else
+static inline void *
+bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node)
+{
+ return kmalloc_node(size, flags, node);
+}
+
+static inline void *
+bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
+{
+ return kzalloc(size, flags);
+}
+
+static inline void __percpu *
+bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
+ gfp_t flags)
+{
+ return __alloc_percpu_gfp(size, align, flags);
+}
+#endif
extern int sysctl_unprivileged_bpf_disabled;
+static inline bool bpf_allow_ptr_leaks(void)
+{
+ return perfmon_capable();
+}
+
+static inline bool bpf_allow_uninit_stack(void)
+{
+ return perfmon_capable();
+}
+
+static inline bool bpf_allow_ptr_to_map_access(void)
+{
+ return perfmon_capable();
+}
+
+static inline bool bpf_bypass_spec_v1(void)
+{
+ return perfmon_capable();
+}
+
+static inline bool bpf_bypass_spec_v4(void)
+{
+ return perfmon_capable();
+}
+
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
+void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog);
+int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
+int bpf_link_settle(struct bpf_link_primer *primer);
+void bpf_link_cleanup(struct bpf_link_primer *primer);
+void bpf_link_inc(struct bpf_link *link);
+void bpf_link_put(struct bpf_link *link);
+int bpf_link_new_fd(struct bpf_link *link);
+struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
+struct bpf_link *bpf_link_get_from_fd(u32 ufd);
+struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
+
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
int bpf_obj_get_user(const char __user *pathname, int flags);
+#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
+#define DEFINE_BPF_ITER_FUNC(target, args...) \
+ extern int bpf_iter_ ## target(args); \
+ int __init bpf_iter_ ## target(args) { return 0; }
+
+/*
+ * The task type of iterators.
+ *
+ * For BPF task iterators, they can be parameterized with various
+ * parameters to visit only some of tasks.
+ *
+ * BPF_TASK_ITER_ALL (default)
+ * Iterate over resources of every task.
+ *
+ * BPF_TASK_ITER_TID
+ * Iterate over resources of a task/tid.
+ *
+ * BPF_TASK_ITER_TGID
+ * Iterate over resources of every task of a process / task group.
+ */
+enum bpf_iter_task_type {
+ BPF_TASK_ITER_ALL = 0,
+ BPF_TASK_ITER_TID,
+ BPF_TASK_ITER_TGID,
+};
+
+struct bpf_iter_aux_info {
+ /* for map_elem iter */
+ struct bpf_map *map;
+
+ /* for cgroup iter */
+ struct {
+ struct cgroup *start; /* starting cgroup */
+ enum bpf_cgroup_iter_order order;
+ } cgroup;
+ struct {
+ enum bpf_iter_task_type type;
+ u32 pid;
+ } task;
+};
+
+typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
+ union bpf_iter_link_info *linfo,
+ struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
+typedef const struct bpf_func_proto *
+(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
+
+enum bpf_iter_feature {
+ BPF_ITER_RESCHED = BIT(0),
+};
+
+#define BPF_ITER_CTX_ARG_MAX 2
+struct bpf_iter_reg {
+ const char *target;
+ bpf_iter_attach_target_t attach_target;
+ bpf_iter_detach_target_t detach_target;
+ bpf_iter_show_fdinfo_t show_fdinfo;
+ bpf_iter_fill_link_info_t fill_link_info;
+ bpf_iter_get_func_proto_t get_func_proto;
+ u32 ctx_arg_info_size;
+ u32 feature;
+ struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
+ const struct bpf_iter_seq_info *seq_info;
+};
+
+struct bpf_iter_meta {
+ __bpf_md_ptr(struct seq_file *, seq);
+ u64 session_id;
+ u64 seq_num;
+};
+
+struct bpf_iter__bpf_map_elem {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct bpf_map *, map);
+ __bpf_md_ptr(void *, key);
+ __bpf_md_ptr(void *, value);
+};
+
+int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
+void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
+bool bpf_iter_prog_supported(struct bpf_prog *prog);
+const struct bpf_func_proto *
+bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
+int bpf_iter_new_fd(struct bpf_link *link);
+bool bpf_link_is_iter(struct bpf_link *link);
+struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
+int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
+void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
+
+int map_set_for_each_callback_args(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
+
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
@@ -1045,48 +1927,42 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
int bpf_get_file_flag(int flags);
-int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
+int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
size_t actual_size);
-/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
- * forced to use 'long' read/writes to try to atomically copy long counters.
- * Best-effort only. No barriers here, since it _will_ race with concurrent
- * updates from BPF programs. Called from bpf syscall and mostly used with
- * size 8 or 16 bytes, so ask compiler to inline it.
- */
-static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
-{
- const long *lsrc = src;
- long *ldst = dst;
-
- size /= sizeof(long);
- while (size--)
- *ldst++ = *lsrc++;
-}
-
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
- union bpf_attr __user *uattr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
+#endif
+
+struct btf *bpf_get_btf_vmlinux(void);
/* Map specifics */
-struct xdp_buff;
+struct xdp_frame;
struct sk_buff;
+struct bpf_dtab_netdev;
+struct bpf_cpu_map_entry;
-struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
-struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
void __dev_flush(void);
-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx);
-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
struct net_device *dev_rx);
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog);
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress);
-struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(void);
-int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
+int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
struct net_device *dev_rx);
+int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb);
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
@@ -1102,18 +1978,52 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
-int btf_struct_access(struct bpf_verifier_log *log,
+
+static inline bool bpf_tracing_ctx_access(int off, int size,
+ enum bpf_access_type type)
+{
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return true;
+}
+
+static inline bool bpf_tracing_btf_ctx_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (!bpf_tracing_ctx_access(off, size, type))
+ return false;
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
+int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
- u32 *next_btf_id);
-int btf_resolve_helper_id(struct bpf_verifier_log *log,
- const struct bpf_func_proto *fn, int);
+ u32 *next_btf_id, enum bpf_type_flag *flag);
+bool btf_struct_ids_match(struct bpf_verifier_log *log,
+ const struct btf *btf, u32 id, int off,
+ const struct btf *need_btf, u32 need_type_id,
+ bool strict);
int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
@@ -1121,16 +2031,60 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
const char *func_name,
struct btf_func_model *m);
+struct bpf_kfunc_arg_meta {
+ u64 r0_size;
+ bool r0_rdonly;
+ int ref_obj_id;
+ u32 flags;
+};
+
struct bpf_reg_state;
-int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs);
+int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *regs);
+int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *regs);
+int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
+ const struct btf *btf, u32 func_id,
+ struct bpf_reg_state *regs,
+ struct bpf_kfunc_arg_meta *meta);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *reg);
-int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
+int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
struct bpf_prog *bpf_prog_by_id(u32 id);
+struct bpf_link *bpf_link_by_id(u32 id);
+
+const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
+void bpf_task_storage_free(struct task_struct *task);
+bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
+const struct btf_func_model *
+bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
+ const struct bpf_insn *insn);
+struct bpf_core_ctx {
+ struct bpf_verifier_log *log;
+ const struct btf *btf;
+};
+
+int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
+ int relo_idx, void *insn);
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return !sysctl_unprivileged_bpf_disabled;
+}
+/* Not all bpf prog type has the bpf_ctx.
+ * For the bpf prog type that has initialized the bpf_ctx,
+ * this function can be used to decide if a kernel function
+ * is called by a bpf program.
+ */
+static inline bool has_current_bpf_ctx(void)
+{
+ return !!current->bpf_ctx;
+}
+
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -1166,53 +2120,69 @@ bpf_prog_inc_not_zero(struct bpf_prog *prog)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
+static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops,
+ struct bpf_prog *prog)
{
- return 0;
}
-static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
+static inline int bpf_link_prime(struct bpf_link *link,
+ struct bpf_link_primer *primer)
{
+ return -EOPNOTSUPP;
}
-static inline int bpf_obj_get_user(const char __user *pathname, int flags)
+static inline int bpf_link_settle(struct bpf_link_primer *primer)
{
return -EOPNOTSUPP;
}
-static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
- u32 key)
+static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
{
- return NULL;
}
-static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map,
- u32 key)
+static inline void bpf_link_inc(struct bpf_link *link)
{
- return NULL;
+}
+
+static inline void bpf_link_put(struct bpf_link *link)
+{
+}
+
+static inline int bpf_obj_get_user(const char __user *pathname, int flags)
+{
+ return -EOPNOTSUPP;
}
static inline void __dev_flush(void)
{
}
-struct xdp_buff;
+struct xdp_frame;
struct bpf_dtab_netdev;
+struct bpf_cpu_map_entry;
static inline
-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return 0;
}
static inline
-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return 0;
}
+static inline
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress)
+{
+ return 0;
+}
+
struct sk_buff;
static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
@@ -1223,9 +2193,11 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
}
static inline
-struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress)
{
- return NULL;
+ return 0;
}
static inline void __cpu_map_flush(void)
@@ -1233,12 +2205,18 @@ static inline void __cpu_map_flush(void)
}
static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
- struct xdp_buff *xdp,
+ struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return 0;
}
+static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
enum bpf_prog_type type)
{
@@ -1259,6 +2237,13 @@ static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
return -ENOTSUPP;
}
+static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
@@ -1266,6 +2251,13 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
return -ENOTSUPP;
}
+static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
static inline void bpf_map_put(struct bpf_map *map)
{
}
@@ -1274,14 +2266,65 @@ static inline struct bpf_prog *bpf_prog_by_id(u32 id)
{
return ERR_PTR(-ENOTSUPP);
}
+
+static inline int btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf *btf,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag)
+{
+ return -EACCES;
+}
+
+static inline const struct bpf_func_proto *
+bpf_base_func_proto(enum bpf_func_id func_id)
+{
+ return NULL;
+}
+
+static inline void bpf_task_storage_free(struct task_struct *task)
+{
+}
+
+static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
+{
+ return false;
+}
+
+static inline const struct btf_func_model *
+bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
+ const struct bpf_insn *insn)
+{
+ return NULL;
+}
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return false;
+}
+
+static inline bool has_current_bpf_ctx(void)
+{
+ return false;
+}
+
+static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
+void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
+ struct btf_mod_pair *used_btfs, u32 len);
+
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
{
return bpf_prog_get_type_dev(ufd, type, false);
}
+void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+ struct bpf_map **used_maps, u32 len);
+
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog);
@@ -1310,6 +2353,8 @@ void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
struct net_device *netdev);
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
+void unpriv_ebpf_notify(int new_state);
+
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
@@ -1325,6 +2370,19 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
+int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+
+int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
+int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+
+void sock_map_unhash(struct sock *sk);
+void sock_map_destroy(struct sock *sk);
+void sock_map_close(struct sock *sk, long timeout);
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
union bpf_attr *attr)
@@ -1350,24 +2408,40 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
-#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
-#if defined(CONFIG_BPF_STREAM_PARSER)
-int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
-int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
-#else
-static inline int sock_map_prog_update(struct bpf_map *map,
- struct bpf_prog *prog, u32 which)
+static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
{
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
+#ifdef CONFIG_BPF_SYSCALL
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
struct bpf_prog *prog)
{
return -EINVAL;
}
-#endif
+
+static inline int sock_map_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
+ u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
@@ -1403,20 +2477,28 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
+extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
+extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
+extern const struct bpf_func_proto bpf_get_stack_proto_pe;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
@@ -1428,10 +2510,53 @@ extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
extern const struct bpf_func_proto bpf_jiffies64_proto;
+extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
+extern const struct bpf_func_proto bpf_event_output_data_proto;
+extern const struct bpf_func_proto bpf_ringbuf_output_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
+extern const struct bpf_func_proto bpf_ringbuf_query_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_proto;
+extern const struct bpf_func_proto bpf_snprintf_btf_proto;
+extern const struct bpf_func_proto bpf_snprintf_proto;
+extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
+extern const struct bpf_func_proto bpf_sock_from_file_proto;
+extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_proto;
+extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
+extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
+extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_find_vma_proto;
+extern const struct bpf_func_proto bpf_loop_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
+extern const struct bpf_func_proto bpf_set_retval_proto;
+extern const struct bpf_func_proto bpf_get_retval_proto;
+extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
+
+const struct bpf_func_proto *tracing_prog_func_proto(
+ enum bpf_func_id func_id, const struct bpf_prog *prog);
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#if defined(CONFIG_NET)
bool bpf_sock_common_is_valid_access(int off, int size,
@@ -1472,6 +2597,7 @@ struct sk_reuseport_kern {
struct sk_buff *skb;
struct sock *sk;
struct sock *selected_sk;
+ struct sock *migrating_sk;
void *data_end;
u32 hash;
u32 reuseport_id;
@@ -1535,4 +2661,62 @@ enum bpf_text_poke_type {
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2);
+void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+int bpf_arch_text_invalidate(void *dst, size_t len);
+
+struct btf_id_set;
+bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+
+#define MAX_BPRINTF_VARARGS 12
+
+int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+ u32 **bin_buf, u32 num_args);
+void bpf_bprintf_cleanup(void);
+
+/* the implementation of the opaque uapi struct bpf_dynptr */
+struct bpf_dynptr_kern {
+ void *data;
+ /* Size represents the number of usable bytes of dynptr data.
+ * If for example the offset is at 4 for a local dynptr whose data is
+ * of type u64, the number of usable bytes is 4.
+ *
+ * The upper 8 bits are reserved. It is as follows:
+ * Bits 0 - 23 = size
+ * Bits 24 - 30 = dynptr type
+ * Bit 31 = whether dynptr is read-only
+ */
+ u32 size;
+ u32 offset;
+} __aligned(8);
+
+enum bpf_dynptr_type {
+ BPF_DYNPTR_TYPE_INVALID,
+ /* Points to memory that is local to the bpf program */
+ BPF_DYNPTR_TYPE_LOCAL,
+ /* Underlying data is a kernel-produced ringbuf record */
+ BPF_DYNPTR_TYPE_RINGBUF,
+};
+
+void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size);
+void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
+int bpf_dynptr_check_size(u32 size);
+u32 bpf_dynptr_get_size(struct bpf_dynptr_kern *ptr);
+
+#ifdef CONFIG_BPF_LSM
+void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
+void bpf_cgroup_atype_put(int cgroup_atype);
+#else
+static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
+static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
+#endif /* CONFIG_BPF_LSM */
+
+struct key;
+
+#ifdef CONFIG_KEYS
+struct bpf_key {
+ struct key *key;
+ bool has_ref;
+};
+#endif /* CONFIG_KEYS */
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
new file mode 100644
index 000000000000..7ea18d4da84b
--- /dev/null
+++ b/include/linux/bpf_local_storage.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Facebook
+ * Copyright 2020 Google LLC.
+ */
+
+#ifndef _BPF_LOCAL_STORAGE_H
+#define _BPF_LOCAL_STORAGE_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <uapi/linux/btf.h>
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+#define bpf_rcu_lock_held() \
+ (rcu_read_lock_held() || rcu_read_lock_trace_held() || \
+ rcu_read_lock_bh_held())
+struct bpf_local_storage_map_bucket {
+ struct hlist_head list;
+ raw_spinlock_t lock;
+};
+
+/* Thp map is not the primary owner of a bpf_local_storage_elem.
+ * Instead, the container object (eg. sk->sk_bpf_storage) is.
+ *
+ * The map (bpf_local_storage_map) is for two purposes
+ * 1. Define the size of the "local storage". It is
+ * the map's value_size.
+ *
+ * 2. Maintain a list to keep track of all elems such
+ * that they can be cleaned up during the map destruction.
+ *
+ * When a bpf local storage is being looked up for a
+ * particular object, the "bpf_map" pointer is actually used
+ * as the "key" to search in the list of elem in
+ * the respective bpf_local_storage owned by the object.
+ *
+ * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
+ * as the searching key.
+ */
+struct bpf_local_storage_map {
+ struct bpf_map map;
+ /* Lookup elem does not require accessing the map.
+ *
+ * Updating/Deleting requires a bucket lock to
+ * link/unlink the elem from the map. Having
+ * multiple buckets to improve contention.
+ */
+ struct bpf_local_storage_map_bucket *buckets;
+ u32 bucket_log;
+ u16 elem_size;
+ u16 cache_idx;
+};
+
+struct bpf_local_storage_data {
+ /* smap is used as the searching key when looking up
+ * from the object's bpf_local_storage.
+ *
+ * Put it in the same cacheline as the data to minimize
+ * the number of cachelines accessed during the cache hit case.
+ */
+ struct bpf_local_storage_map __rcu *smap;
+ u8 data[] __aligned(8);
+};
+
+/* Linked to bpf_local_storage and bpf_local_storage_map */
+struct bpf_local_storage_elem {
+ struct hlist_node map_node; /* Linked to bpf_local_storage_map */
+ struct hlist_node snode; /* Linked to bpf_local_storage */
+ struct bpf_local_storage __rcu *local_storage;
+ struct rcu_head rcu;
+ /* 8 bytes hole */
+ /* The data is stored in another cacheline to minimize
+ * the number of cachelines access during a cache hit.
+ */
+ struct bpf_local_storage_data sdata ____cacheline_aligned;
+};
+
+struct bpf_local_storage {
+ struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
+ struct hlist_head list; /* List of bpf_local_storage_elem */
+ void *owner; /* The object that owns the above "list" of
+ * bpf_local_storage_elem.
+ */
+ struct rcu_head rcu;
+ raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+};
+
+/* U16_MAX is much more than enough for sk local storage
+ * considering a tcp_sock is ~2k.
+ */
+#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE \
+ min_t(u32, \
+ (KMALLOC_MAX_SIZE - MAX_BPF_STACK - \
+ sizeof(struct bpf_local_storage_elem)), \
+ (U16_MAX - sizeof(struct bpf_local_storage_elem)))
+
+#define SELEM(_SDATA) \
+ container_of((_SDATA), struct bpf_local_storage_elem, sdata)
+#define SDATA(_SELEM) (&(_SELEM)->sdata)
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+struct bpf_local_storage_cache {
+ spinlock_t idx_lock;
+ u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
+};
+
+#define DEFINE_BPF_STORAGE_CACHE(name) \
+static struct bpf_local_storage_cache name = { \
+ .idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \
+}
+
+u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache);
+void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
+ u16 idx);
+
+/* Helper functions for bpf_local_storage */
+int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
+
+struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr);
+
+struct bpf_local_storage_data *
+bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ bool cacheit_lockit);
+
+void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
+ int __percpu *busy_counter);
+
+int bpf_local_storage_map_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
+void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem);
+
+bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem,
+ bool uncharge_omem, bool use_trace_rcu);
+
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu);
+
+void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+
+void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem);
+
+struct bpf_local_storage_elem *
+bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
+ bool charge_mem, gfp_t gfp_flags);
+
+int
+bpf_local_storage_alloc(void *owner,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *first_selem,
+ gfp_t gfp_flags);
+
+struct bpf_local_storage_data *
+bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ void *value, u64 map_flags, gfp_t gfp_flags);
+
+void bpf_local_storage_free_rcu(struct rcu_head *rcu);
+
+#endif /* _BPF_LOCAL_STORAGE_H */
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
new file mode 100644
index 000000000000..4bcf76a9bb06
--- /dev/null
+++ b/include/linux/bpf_lsm.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+
+#ifndef _LINUX_BPF_LSM_H
+#define _LINUX_BPF_LSM_H
+
+#include <linux/sched.h>
+#include <linux/bpf.h>
+#include <linux/lsm_hooks.h>
+
+#ifdef CONFIG_BPF_LSM
+
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ RET bpf_lsm_##NAME(__VA_ARGS__);
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+
+struct bpf_storage_blob {
+ struct bpf_local_storage __rcu *storage;
+};
+
+extern struct lsm_blob_sizes bpf_lsm_blob_sizes;
+
+int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
+ const struct bpf_prog *prog);
+
+bool bpf_lsm_is_sleepable_hook(u32 btf_id);
+
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ if (unlikely(!inode->i_security))
+ return NULL;
+
+ return inode->i_security + bpf_lsm_blob_sizes.lbs_inode;
+}
+
+extern const struct bpf_func_proto bpf_inode_storage_get_proto;
+extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
+void bpf_inode_storage_free(struct inode *inode);
+
+void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+
+#else /* !CONFIG_BPF_LSM */
+
+static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
+{
+ return false;
+}
+
+static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
+ const struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline void bpf_inode_storage_free(struct inode *inode)
+{
+}
+
+static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
+ bpf_func_t *bpf_func)
+{
+}
+
+#endif /* CONFIG_BPF_LSM */
+
+#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
new file mode 100644
index 000000000000..3e164b8efaa9
--- /dev/null
+++ b/include/linux/bpf_mem_alloc.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_MEM_ALLOC_H
+#define _BPF_MEM_ALLOC_H
+#include <linux/compiler_types.h>
+#include <linux/workqueue.h>
+
+struct bpf_mem_cache;
+struct bpf_mem_caches;
+
+struct bpf_mem_alloc {
+ struct bpf_mem_caches __percpu *caches;
+ struct bpf_mem_cache __percpu *cache;
+ struct work_struct work;
+};
+
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
+
+/* kmalloc/kfree equivalent: */
+void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
+void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
+
+/* kmem_cache_alloc/free equivalent: */
+void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
+void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
+
+#endif /* _BPF_MEM_ALLOC_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index c81d4ece79a4..2c6a4f2562a7 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -64,13 +64,21 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
#ifdef CONFIG_INET
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
struct sk_reuseport_md, struct sk_reuseport_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup,
+ struct bpf_sk_lookup, struct bpf_sk_lookup_kern)
#endif
#if defined(CONFIG_BPF_JIT)
BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
void *, void *)
BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension,
void *, void *)
+#ifdef CONFIG_BPF_LSM
+BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm,
+ void *, void *)
+#endif /* CONFIG_BPF_LSM */
#endif
+BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall,
+ void *, void *)
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -93,19 +101,21 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
+#ifdef CONFIG_BPF_LSM
+BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
-#if defined(CONFIG_BPF_STREAM_PARSER)
-BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
-BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
-#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
#endif
#ifdef CONFIG_INET
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
#endif
@@ -114,3 +124,22 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
#if defined(CONFIG_BPF_JIT)
BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops)
+
+BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
+BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
+#ifdef CONFIG_CGROUP_BPF
+BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
+#endif
+BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
+#ifdef CONFIG_NET
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
+#endif
+#ifdef CONFIG_PERF_EVENTS
+BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
+#endif
+BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
+BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 5406e6e96585..9e1e6965f407 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -5,6 +5,7 @@
#define _LINUX_BPF_VERIFIER_H 1
#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/btf.h> /* for struct btf and btf_id() */
#include <linux/filter.h> /* for MAX_BPF_STACK */
#include <linux/tnum.h>
@@ -17,6 +18,8 @@
* that converting umax_value to int cannot overflow.
*/
#define BPF_MAX_VAR_SIZ (1 << 29)
+/* size of type_str_buf in bpf_verifier. */
+#define TYPE_STR_BUF_LEN 64
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
@@ -43,28 +46,62 @@ enum bpf_reg_liveness {
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
+ /* Fixed part of pointer offset, pointer types only */
+ s32 off;
union {
/* valid when type == PTR_TO_PACKET */
- u16 range;
+ int range;
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
- struct bpf_map *map_ptr;
+ struct {
+ struct bpf_map *map_ptr;
+ /* To distinguish map lookups from outer map
+ * the map_uid is non-zero for registers
+ * pointing to inner maps.
+ */
+ u32 map_uid;
+ };
+
+ /* for PTR_TO_BTF_ID */
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ };
+
+ u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
- u32 btf_id; /* for PTR_TO_BTF_ID */
+ /* For dynptr stack slots */
+ struct {
+ enum bpf_dynptr_type type;
+ /* A dynptr is 16 bytes so it takes up 2 stack slots.
+ * We need to track which slot is the first slot
+ * to protect against cases where the user may try to
+ * pass in an address starting at the second slot of the
+ * dynptr.
+ */
+ bool first_slot;
+ } dynptr;
/* Max size from any of the above. */
- unsigned long raw;
+ struct {
+ unsigned long raw1;
+ unsigned long raw2;
+ } raw;
+
+ u32 subprogno; /* for PTR_TO_FUNC */
};
- /* Fixed part of pointer offset, pointer types only */
- s32 off;
/* For PTR_TO_PACKET, used to find other pointers with the same variable
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
* came from, when one is tested for != NULL.
+ * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
+ * for the purpose of tracking that it's freed.
* For PTR_TO_SOCKET this is used to share which pointers retain the
* same reference to the socket, to determine proper reference freeing.
+ * For stack slots that are dynptrs, this is used to track references to
+ * the dynptr to determine proper reference freeing.
*/
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
@@ -123,6 +160,10 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */
+ s32 s32_min_value; /* minimum possible (s32)value */
+ s32 s32_max_value; /* maximum possible (s32)value */
+ u32 u32_min_value; /* minimum possible (u32)value */
+ u32 u32_max_value; /* maximum possible (u32)value */
/* parentage chain for liveness checking */
struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
@@ -147,9 +188,15 @@ enum bpf_stack_slot_type {
STACK_SPILL, /* register spilled into stack */
STACK_MISC, /* BPF program wrote some data into this slot */
STACK_ZERO, /* BPF program wrote constant zero */
+ /* A dynptr is stored in this stack slot. The type of dynptr
+ * is stored in bpf_stack_state->spilled_ptr.dynptr.type
+ */
+ STACK_DYNPTR,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
+#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
struct bpf_stack_state {
struct bpf_reg_state spilled_ptr;
@@ -165,6 +212,17 @@ struct bpf_reference_state {
* is used purely to inform the user of a reference leak.
*/
int insn_idx;
+ /* There can be a case like:
+ * main (frame 0)
+ * cb (frame 1)
+ * func (frame 3)
+ * cb (frame 4)
+ * Hence for frame 4, if callback_ref just stored boolean, it would be
+ * impossible to distinguish nested callback refs. Hence store the
+ * frameno and compare that to callback_ref in check_reference_leak when
+ * exiting a callback function.
+ */
+ int callback_ref;
};
/* state of the program:
@@ -179,10 +237,19 @@ struct bpf_func_state {
* 0 = main function, 1 = first callee.
*/
u32 frameno;
- /* subprog number == index within subprog_stack_depth
+ /* subprog number == index within subprog_info
* zero == main subprog
*/
u32 subprogno;
+ /* Every bpf_timer_start will increment async_entry_cnt.
+ * It's used to distinguish:
+ * void foo(void) { for(;;); }
+ * void foo(void) { bpf_timer_set_callback(,foo); }
+ */
+ u32 async_entry_cnt;
+ bool in_callback_fn;
+ struct tnum callback_ret_range;
+ bool in_async_callback_fn;
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
@@ -196,6 +263,13 @@ struct bpf_idx_pair {
u32 idx;
};
+struct bpf_id_pair {
+ u32 old;
+ u32 cur;
+};
+
+/* Maximum number of register states that can exist at once */
+#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
#define MAX_CALL_FRAMES 8
struct bpf_verifier_state {
/* call stack tracking */
@@ -237,7 +311,7 @@ struct bpf_verifier_state {
* If is_state_visited() sees a state with branches > 0 it means
* there is a loop. If such state is exactly equal to the current state
* it's an infinite loop. Note states_equal() checks for states
- * equvalency, so two states being 'states_equal' does not mean
+ * equivalency, so two states being 'states_equal' does not mean
* infinite loop. The exact comparison is provided by
* states_maybe_looping() function. It's a stronger pre-check and
* much faster than states_equal().
@@ -275,6 +349,27 @@ struct bpf_verifier_state {
iter < frame->allocated_stack / BPF_REG_SIZE; \
iter++, reg = bpf_get_spilled_reg(iter, frame))
+/* Invoke __expr over regsiters in __vst, setting __state and __reg */
+#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+ ({ \
+ struct bpf_verifier_state *___vstate = __vst; \
+ int ___i, ___j; \
+ for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
+ struct bpf_reg_state *___regs; \
+ __state = ___vstate->frame[___i]; \
+ ___regs = __state->regs; \
+ for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
+ __reg = &___regs[___j]; \
+ (void)(__expr); \
+ } \
+ bpf_for_each_spilled_reg(___j, __state, __reg) { \
+ if (!__reg) \
+ continue; \
+ (void)(__expr); \
+ } \
+ } \
+ })
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
@@ -282,11 +377,20 @@ struct bpf_verifier_state_list {
int miss_cnt, hit_cnt;
};
+struct bpf_loop_inline_state {
+ unsigned int initialized:1; /* set to true upon first entry */
+ unsigned int fit_for_inline:1; /* true if callback function is the same
+ * at each call and flags are always zero
+ */
+ u32 callback_subprogno; /* valid when fit_for_inline is true */
+};
+
/* Possible states for alu_state member. */
-#define BPF_ALU_SANITIZE_SRC 1U
-#define BPF_ALU_SANITIZE_DST 2U
+#define BPF_ALU_SANITIZE_SRC (1U << 0)
+#define BPF_ALU_SANITIZE_DST (1U << 1)
#define BPF_ALU_NEG_VALUE (1U << 2)
#define BPF_ALU_NON_POINTER (1U << 3)
+#define BPF_ALU_IMMEDIATE (1U << 4)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
@@ -300,11 +404,25 @@ struct bpf_insn_aux_data {
u32 map_index; /* index into used_maps[] */
u32 map_off; /* offset from value base address */
};
+ struct {
+ enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
+ union {
+ struct {
+ struct btf *btf;
+ u32 btf_id; /* btf_id for struct typed var */
+ };
+ u32 mem_size; /* mem_size for non-struct typed var */
+ };
+ } btf_var;
+ /* if instruction is a call to bpf_loop this field tracks
+ * the state of the relevant registers to make decision about inlining
+ */
+ struct bpf_loop_inline_state loop_inline_state;
};
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int sanitize_stack_off; /* stack slot to be cleared */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
+ bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
bool zext_dst; /* this insn zero extends dst reg */
u8 alu_state; /* used in combination with alu_limit */
@@ -314,6 +432,7 @@ struct bpf_insn_aux_data {
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
@@ -336,11 +455,21 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
+#define BPF_LOG_MIN_ALIGNMENT 8U
+#define BPF_LOG_ALIGNMENT 40U
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{
- return (log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
- log->level == BPF_LOG_KERNEL;
+ return log &&
+ ((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
+ log->level == BPF_LOG_KERNEL);
+}
+
+static inline bool
+bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
+{
+ return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
+ log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
}
#define BPF_MAX_SUBPROGS 256
@@ -350,6 +479,10 @@ struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
+ bool has_tail_call;
+ bool tail_call_reachable;
+ bool has_ld_abs;
+ bool is_async_cb;
};
/* single container for all structs
@@ -368,14 +501,23 @@ struct bpf_verifier_env {
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_verifier_state_list *free_list;
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
u32 used_map_cnt; /* number of used maps */
+ u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
+ bool explore_alu_limits;
bool allow_ptr_leaks;
+ bool allow_uninit_stack;
+ bool allow_ptr_to_map_access;
+ bool bpf_capable;
+ bool bypass_spec_v1;
+ bool bypass_spec_v4;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
+ struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
struct {
int *insn_state;
int *insn_stack;
@@ -400,6 +542,17 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
+ bpfptr_t fd_array;
+
+ /* bit mask to keep track of whether a register has been accessed
+ * since the last time the function state was printed
+ */
+ u32 scratched_regs;
+ /* Same as scratched_regs but for stack slots */
+ u64 scratched_stack_slots;
+ u32 prev_log_len, prev_insn_print_len;
+ /* buffer used in reg_type_str() to generate reg_type string */
+ char type_str_buf[TYPE_STR_BUF_LEN];
};
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
@@ -431,7 +584,68 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
-int check_ctx_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno);
+int check_ptr_off_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno);
+int check_func_arg_reg_off(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno,
+ enum bpf_arg_type arg_type);
+int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ u32 regno);
+int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ u32 regno, u32 mem_size);
+bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg);
+bool is_dynptr_type_expected(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg,
+ enum bpf_arg_type arg_type);
+
+/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
+static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
+ struct btf *btf, u32 btf_id)
+{
+ if (tgt_prog)
+ return ((u64)tgt_prog->aux->id << 32) | btf_id;
+ else
+ return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
+}
+
+/* unpack the IDs from the key as constructed above */
+static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
+{
+ if (obj_id)
+ *obj_id = key >> 32;
+ if (btf_id)
+ *btf_id = key & 0x7FFFFFFF;
+}
+
+int bpf_check_attach_target(struct bpf_verifier_log *log,
+ const struct bpf_prog *prog,
+ const struct bpf_prog *tgt_prog,
+ u32 btf_id,
+ struct bpf_attach_target_info *tgt_info);
+void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
+
+int mark_chain_precision(struct bpf_verifier_env *env, int regno);
+
+#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
+
+/* extract base type from bpf_{arg, return, reg}_type. */
+static inline u32 base_type(u32 type)
+{
+ return type & BPF_BASE_TYPE_MASK;
+}
+
+/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
+static inline u32 type_flag(u32 type)
+{
+ return type & ~BPF_BASE_TYPE_MASK;
+}
+
+/* only use after check_attach_btf_id() */
+static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
+{
+ return prog->type == BPF_PROG_TYPE_EXT ?
+ prog->aux->dst_prog->type : prog->type;
+}
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index d815622cd31e..2ae3c8e1d83c 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -3,22 +3,23 @@
#define _LINUX_BPFILTER_H
#include <uapi/linux/bpfilter.h>
-#include <linux/umh.h>
+#include <linux/usermode_driver.h>
+#include <linux/sockptr.h>
struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen);
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
+void bpfilter_umh_cleanup(struct umd_info *info);
+
struct bpfilter_umh_ops {
- struct umh_info info;
+ struct umd_info info;
/* since ip_getsockopt() can run in parallel, serialize access to umh */
struct mutex lock;
- int (*sockopt)(struct sock *sk, int optname,
- char __user *optval,
+ int (*sockopt)(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen, bool is_set);
int (*start)(void);
- bool stop;
};
extern struct bpfilter_umh_ops bpfilter_ops;
#endif
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
new file mode 100644
index 000000000000..79b2f78eec1a
--- /dev/null
+++ b/include/linux/bpfptr.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* A pointer that can point to either kernel or userspace memory. */
+#ifndef _LINUX_BPFPTR_H
+#define _LINUX_BPFPTR_H
+
+#include <linux/mm.h>
+#include <linux/sockptr.h>
+
+typedef sockptr_t bpfptr_t;
+
+static inline bool bpfptr_is_kernel(bpfptr_t bpfptr)
+{
+ return bpfptr.is_kernel;
+}
+
+static inline bpfptr_t KERNEL_BPFPTR(void *p)
+{
+ return (bpfptr_t) { .kernel = p, .is_kernel = true };
+}
+
+static inline bpfptr_t USER_BPFPTR(void __user *p)
+{
+ return (bpfptr_t) { .user = p };
+}
+
+static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel)
+{
+ if (is_kernel)
+ return KERNEL_BPFPTR((void*) (uintptr_t) addr);
+ else
+ return USER_BPFPTR(u64_to_user_ptr(addr));
+}
+
+static inline bool bpfptr_is_null(bpfptr_t bpfptr)
+{
+ if (bpfptr_is_kernel(bpfptr))
+ return !bpfptr.kernel;
+ return !bpfptr.user;
+}
+
+static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
+{
+ if (bpfptr_is_kernel(*bpfptr))
+ bpfptr->kernel += val;
+ else
+ bpfptr->user += val;
+}
+
+static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
+ size_t offset, size_t size)
+{
+ if (!bpfptr_is_kernel(src))
+ return copy_from_user(dst, src.user + offset, size);
+ return copy_from_kernel_nofault(dst, src.kernel + offset, size);
+}
+
+static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
+{
+ return copy_from_bpfptr_offset(dst, src, 0, size);
+}
+
+static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
+ const void *src, size_t size)
+{
+ return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
+}
+
+static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
+{
+ void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_bpfptr(p, src, len)) {
+ kvfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ return p;
+}
+
+static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
+{
+ if (bpfptr_is_kernel(src))
+ return strncpy_from_kernel_nofault(dst, src.kernel, count);
+ return strncpy_from_user(dst, src.user, count);
+}
+
+#endif /* _LINUX_BPFPTR_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index b475e7f20d28..9e77165f3ef6 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -15,7 +15,10 @@
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
#define PHY_ID_BCM5395 0x0143bcf0
+#define PHY_ID_BCM53125 0x03625f20
+#define PHY_ID_BCM53128 0x03625e10
#define PHY_ID_BCM54810 0x03625d00
+#define PHY_ID_BCM54811 0x03625cc0
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
#define PHY_ID_BCM5421 0x002060e0
@@ -24,9 +27,13 @@
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM54612E 0x03625e60
#define PHY_ID_BCM54616S 0x03625d10
+#define PHY_ID_BCM54140 0xae025009
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM89610 0x03625cd0
+#define PHY_ID_BCM72113 0x35905310
+#define PHY_ID_BCM72116 0x35905350
+#define PHY_ID_BCM72165 0x35905340
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
@@ -44,6 +51,7 @@
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
+#define PHY_ID_BCM7712 0x35905330
#define PHY_ID_BCM_CYGNUS 0xae025200
#define PHY_ID_BCM_OMEGA 0xae025100
@@ -56,19 +64,12 @@
#define PHY_BCM_OUI_5 0x03625e00
#define PHY_BCM_OUI_6 0xae025000
-#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
-#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
-#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
-#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
-#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100
-#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200
-#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400
-#define PHY_BRCM_STD_IBND_DISABLE 0x00000800
-#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000
-#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
-#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
-#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
-#define PHY_BRCM_EN_MASTER_MODE 0x00010000
+#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000001
+#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000002
+#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004
+#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008
+#define PHY_BRCM_EN_MASTER_MODE 0x00000010
+#define PHY_BRCM_IDDQ_SUSPEND 0x00000020
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
@@ -79,12 +80,14 @@
#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
+#define MII_BCM54XX_ECR_FIFOE 0x0001 /* FIFO elasticity */
#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_TOP 0x0d00 /* TOP_MISC expansion register select */
#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */
@@ -113,15 +116,25 @@
#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
+#define MII_BCM54XX_RDB_ADDR 0x1e
+#define MII_BCM54XX_RDB_DATA 0x1f
+
+/* legacy access control via rdb/expansion register */
+#define BCM54XX_RDB_REG0087 0x0087
+#define BCM54XX_EXP_REG7E (MII_BCM54XX_EXP_SEL_ER + 0x7E)
+#define BCM54XX_ACCESS_MODE_LEGACY_EN BIT(15)
+
/*
* AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
*/
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
+#define MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN 0x4000
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
@@ -183,6 +196,7 @@
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
+#define BCM54XX_SHD_SCR3_RXCTXC_DIS 0x0100
/* 01010: Auto Power-Down */
#define BCM54XX_SHD_APD 0x0a
@@ -208,6 +222,9 @@
/* 11111: Mode Control Register */
#define BCM54XX_SHD_MODE 0x1f
#define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */
+#define BCM54XX_SHD_INTF_SEL_RGMII 0x02
+#define BCM54XX_SHD_INTF_SEL_SGMII 0x04
+#define BCM54XX_SHD_INTF_SEL_GBIC 0x06
#define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */
/*
@@ -221,6 +238,7 @@
#define MII_BCM54XX_EXP_EXP08 0x0F08
#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
+#define MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE 0x0100
#define MII_BCM54XX_EXP_EXP75 0x0f75
#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
@@ -229,6 +247,12 @@
#define MII_BCM54XX_EXP_EXP97 0x0f97
#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
+/* Top-MISC expansion registers */
+#define BCM54XX_TOP_MISC_IDDQ_CTRL (MII_BCM54XX_EXP_SEL_TOP + 0x06)
+#define BCM54XX_TOP_MISC_IDDQ_LP (1 << 0)
+#define BCM54XX_TOP_MISC_IDDQ_SD (1 << 2)
+#define BCM54XX_TOP_MISC_IDDQ_SR (1 << 3)
+
/*
* BCM5482: Secondary SerDes registers
*/
@@ -269,6 +293,7 @@
#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
+#define MII_BRCM_FET_SHDW_AM4_STANDBY 0x0008 /* Standby enable */
#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
@@ -287,4 +312,51 @@
#define MII_BRCM_CORE_EXPB0 0xB0
#define MII_BRCM_CORE_EXPB1 0xB1
+/* Enhanced Cable Diagnostics */
+#define BCM54XX_RDB_ECD_CTRL 0x2a0
+#define BCM54XX_EXP_ECD_CTRL (MII_BCM54XX_EXP_SEL_ER + 0xc0)
+
+#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT3 1 /* CAT3 or worse */
+#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT5 0 /* CAT5 or better */
+#define BCM54XX_ECD_CTRL_CABLE_TYPE_MASK BIT(0) /* cable type */
+#define BCM54XX_ECD_CTRL_INVALID BIT(3) /* invalid result */
+#define BCM54XX_ECD_CTRL_UNIT_CM 0 /* centimeters */
+#define BCM54XX_ECD_CTRL_UNIT_M 1 /* meters */
+#define BCM54XX_ECD_CTRL_UNIT_MASK BIT(10) /* cable length unit */
+#define BCM54XX_ECD_CTRL_IN_PROGRESS BIT(11) /* test in progress */
+#define BCM54XX_ECD_CTRL_BREAK_LINK BIT(12) /* unconnect link
+ * during test
+ */
+#define BCM54XX_ECD_CTRL_CROSS_SHORT_DIS BIT(13) /* disable inter-pair
+ * short check
+ */
+#define BCM54XX_ECD_CTRL_RUN BIT(15) /* run immediate */
+
+#define BCM54XX_RDB_ECD_FAULT_TYPE 0x2a1
+#define BCM54XX_EXP_ECD_FAULT_TYPE (MII_BCM54XX_EXP_SEL_ER + 0xc1)
+#define BCM54XX_ECD_FAULT_TYPE_INVALID 0x0
+#define BCM54XX_ECD_FAULT_TYPE_OK 0x1
+#define BCM54XX_ECD_FAULT_TYPE_OPEN 0x2
+#define BCM54XX_ECD_FAULT_TYPE_SAME_SHORT 0x3 /* short same pair */
+#define BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT 0x4 /* short different pairs */
+#define BCM54XX_ECD_FAULT_TYPE_BUSY 0x9
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK GENMASK(3, 0)
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK GENMASK(7, 4)
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK GENMASK(11, 8)
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK GENMASK(15, 12)
+#define BCM54XX_ECD_PAIR_A_LENGTH_RESULTS 0x2a2
+#define BCM54XX_ECD_PAIR_B_LENGTH_RESULTS 0x2a3
+#define BCM54XX_ECD_PAIR_C_LENGTH_RESULTS 0x2a4
+#define BCM54XX_ECD_PAIR_D_LENGTH_RESULTS 0x2a5
+
+#define BCM54XX_RDB_ECD_PAIR_A_LENGTH_RESULTS 0x2a2
+#define BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc2)
+#define BCM54XX_RDB_ECD_PAIR_B_LENGTH_RESULTS 0x2a3
+#define BCM54XX_EXP_ECD_PAIR_B_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc3)
+#define BCM54XX_RDB_ECD_PAIR_C_LENGTH_RESULTS 0x2a4
+#define BCM54XX_EXP_ECD_PAIR_C_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc4)
+#define BCM54XX_RDB_ECD_PAIR_D_LENGTH_RESULTS 0x2a5
+#define BCM54XX_EXP_ECD_PAIR_D_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc5)
+#define BCM54XX_ECD_LENGTH_RESULTS_INVALID 0xffff
+
#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h
index 8ed53d7524ea..e66b711d091e 100644
--- a/include/linux/bsearch.h
+++ b/include/linux/bsearch.h
@@ -4,7 +4,29 @@
#include <linux/types.h>
-void *bsearch(const void *key, const void *base, size_t num, size_t size,
- cmp_func_t cmp);
+static __always_inline
+void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp)
+{
+ const char *pivot;
+ int result;
+
+ while (num > 0) {
+ pivot = base + (num >> 1) * size;
+ result = cmp(key, pivot);
+
+ if (result == 0)
+ return (void *)pivot;
+
+ if (result > 0) {
+ base = pivot + size;
+ num--;
+ }
+ num >>= 1;
+ }
+
+ return NULL;
+}
+
+extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp);
#endif /* _LINUX_BSEARCH_H */
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 960988d42f77..9e97ced2896d 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -10,8 +10,8 @@
#define _BLK_BSG_
#include <linux/blkdev.h>
-#include <scsi/scsi_request.h>
+struct bsg_job;
struct request;
struct device;
struct scatterlist;
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index dac37b6e00ec..1ac81c809da9 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -4,36 +4,16 @@
#include <uapi/linux/bsg.h>
-struct request;
+struct bsg_device;
+struct device;
+struct request_queue;
-#ifdef CONFIG_BLK_DEV_BSG
-struct bsg_ops {
- int (*check_proto)(struct sg_io_v4 *hdr);
- int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
- fmode_t mode);
- int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
- void (*free_rq)(struct request *rq);
-};
+typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
+ fmode_t mode, unsigned int timeout);
-struct bsg_class_device {
- struct device *class_dev;
- int minor;
- struct request_queue *queue;
- const struct bsg_ops *ops;
-};
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ bsg_sg_io_fn *sg_io_fn);
+void bsg_unregister_queue(struct bsg_device *bcd);
-int bsg_register_queue(struct request_queue *q, struct device *parent,
- const char *name, const struct bsg_ops *ops);
-int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
-void bsg_unregister_queue(struct request_queue *q);
-#else
-static inline int bsg_scsi_register_queue(struct request_queue *q,
- struct device *parent)
-{
- return 0;
-}
-static inline void bsg_unregister_queue(struct request_queue *q)
-{
-}
-#endif /* CONFIG_BLK_DEV_BSG */
#endif /* _LINUX_BSG_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 5c1ea99b480f..f9aababc5d78 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -5,19 +5,86 @@
#define _LINUX_BTF_H 1
#include <linux/types.h>
+#include <linux/bpfptr.h>
#include <uapi/linux/btf.h>
+#include <uapi/linux/bpf.h>
#define BTF_TYPE_EMIT(type) ((void)(type *)0)
+#define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val)
+
+/* These need to be macros, as the expressions are used in assembler input */
+#define KF_ACQUIRE (1 << 0) /* kfunc is an acquire function */
+#define KF_RELEASE (1 << 1) /* kfunc is a release function */
+#define KF_RET_NULL (1 << 2) /* kfunc returns a pointer that may be NULL */
+#define KF_KPTR_GET (1 << 3) /* kfunc returns reference to a kptr */
+/* Trusted arguments are those which are meant to be referenced arguments with
+ * unchanged offset. It is used to enforce that pointers obtained from acquire
+ * kfuncs remain unmodified when being passed to helpers taking trusted args.
+ *
+ * Consider
+ * struct foo {
+ * int data;
+ * struct foo *next;
+ * };
+ *
+ * struct bar {
+ * int data;
+ * struct foo f;
+ * };
+ *
+ * struct foo *f = alloc_foo(); // Acquire kfunc
+ * struct bar *b = alloc_bar(); // Acquire kfunc
+ *
+ * If a kfunc set_foo_data() wants to operate only on the allocated object, it
+ * will set the KF_TRUSTED_ARGS flag, which will prevent unsafe usage like:
+ *
+ * set_foo_data(f, 42); // Allowed
+ * set_foo_data(f->next, 42); // Rejected, non-referenced pointer
+ * set_foo_data(&f->next, 42);// Rejected, referenced, but wrong type
+ * set_foo_data(&b->f, 42); // Rejected, referenced, but bad offset
+ *
+ * In the final case, usually for the purposes of type matching, it is deduced
+ * by looking at the type of the member at the offset, but due to the
+ * requirement of trusted argument, this deduction will be strict and not done
+ * for this case.
+ */
+#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */
+#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
+#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
+
+/*
+ * Return the name of the passed struct, if exists, or halt the build if for
+ * example the structure gets renamed. In this way, developers have to revisit
+ * the code using that structure name, and update it accordingly.
+ */
+#define stringify_struct(x) \
+ ({ BUILD_BUG_ON(sizeof(struct x) < 0); \
+ __stringify(x); })
struct btf;
struct btf_member;
struct btf_type;
union bpf_attr;
+struct btf_show;
+struct btf_id_set;
+
+struct btf_kfunc_id_set {
+ struct module *owner;
+ struct btf_id_set8 *set;
+};
+
+struct btf_id_dtor_kfunc {
+ u32 btf_id;
+ u32 kfunc_btf_id;
+};
+
+typedef void (*btf_dtor_kfunc_t)(void *);
extern const struct file_operations btf_fops;
+void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
-int btf_new_fd(const union bpf_attr *attr);
+int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr);
struct btf *btf_get_by_fd(int fd);
int btf_get_info_by_fd(const struct btf *btf,
const union bpf_attr *attr,
@@ -46,14 +113,58 @@ int btf_get_info_by_fd(const struct btf *btf,
const struct btf_type *btf_type_id_size(const struct btf *btf,
u32 *type_id,
u32 *ret_size);
+
+/*
+ * Options to control show behaviour.
+ * - BTF_SHOW_COMPACT: no formatting around type information
+ * - BTF_SHOW_NONAME: no struct/union member names/types
+ * - BTF_SHOW_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_SHOW_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ * - BTF_SHOW_UNSAFE: skip use of bpf_probe_read() to safely read
+ * data before displaying it.
+ */
+#define BTF_SHOW_COMPACT BTF_F_COMPACT
+#define BTF_SHOW_NONAME BTF_F_NONAME
+#define BTF_SHOW_PTR_RAW BTF_F_PTR_RAW
+#define BTF_SHOW_ZERO BTF_F_ZERO
+#define BTF_SHOW_UNSAFE (1ULL << 4)
+
void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m);
+int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, void *obj,
+ struct seq_file *m, u64 flags);
+
+/*
+ * Copy len bytes of string representation of obj of BTF type_id into buf.
+ *
+ * @btf: struct btf object
+ * @type_id: type id of type obj points to
+ * @obj: pointer to typed data
+ * @buf: buffer to write to
+ * @len: maximum length to write to buf
+ * @flags: show options (see above)
+ *
+ * Return: length that would have been/was copied as per snprintf, or
+ * negative error.
+ */
+int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
+ char *buf, int len, u64 flags);
+
int btf_get_fd_by_id(u32 id);
-u32 btf_id(const struct btf *btf);
+u32 btf_obj_id(const struct btf *btf);
+bool btf_is_kernel(const struct btf *btf);
+bool btf_is_module(const struct btf *btf);
+struct module *btf_try_get_module(const struct btf *btf);
+u32 btf_nr_types(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
+int btf_find_timer(const struct btf *btf, const struct btf_type *t);
+struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf,
+ const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
@@ -64,14 +175,19 @@ const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *
btf_resolve_size(const struct btf *btf, const struct btf_type *type,
- u32 *type_size, const struct btf_type **elem_type,
- u32 *total_nelems);
+ u32 *type_size);
+const char *btf_type_str(const struct btf_type *t);
#define for_each_member(i, struct_type, member) \
for (i = 0, member = btf_type_member(struct_type); \
i < btf_type_vlen(struct_type); \
i++, member++)
+#define for_each_vsi(i, datasec_type, member) \
+ for (i = 0, member = btf_type_var_secinfo(datasec_type); \
+ i < btf_type_vlen(datasec_type); \
+ i++, member++)
+
static inline bool btf_type_is_ptr(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
@@ -82,11 +198,91 @@ static inline bool btf_type_is_int(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
}
+static inline bool btf_type_is_small_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && t->size <= sizeof(u64);
+}
+
static inline bool btf_type_is_enum(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
}
+static inline bool btf_is_any_enum(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM ||
+ BTF_INFO_KIND(t->info) == BTF_KIND_ENUM64;
+}
+
+static inline bool btf_kind_core_compat(const struct btf_type *t1,
+ const struct btf_type *t2)
+{
+ return BTF_INFO_KIND(t1->info) == BTF_INFO_KIND(t2->info) ||
+ (btf_is_any_enum(t1) && btf_is_any_enum(t2));
+}
+
+static inline bool str_is_empty(const char *s)
+{
+ return !s || !s[0];
+}
+
+static inline u16 btf_kind(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info);
+}
+
+static inline bool btf_is_enum(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_is_enum64(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM64;
+}
+
+static inline u64 btf_enum64_value(const struct btf_enum64 *e)
+{
+ return ((u64)e->val_hi32 << 32) | e->val_lo32;
+}
+
+static inline bool btf_is_composite(const struct btf_type *t)
+{
+ u16 kind = btf_kind(t);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool btf_is_array(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ARRAY;
+}
+
+static inline bool btf_is_int(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_INT;
+}
+
+static inline bool btf_is_ptr(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_PTR;
+}
+
+static inline u8 btf_int_offset(const struct btf_type *t)
+{
+ return BTF_INT_OFFSET(*(u32 *)(t + 1));
+}
+
+static inline u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_scalar(const struct btf_type *t)
+{
+ return btf_type_is_int(t) || btf_type_is_enum(t);
+}
+
static inline bool btf_type_is_typedef(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
@@ -102,11 +298,36 @@ static inline bool btf_type_is_func_proto(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
}
+static inline bool btf_type_is_var(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
+}
+
+static inline bool btf_type_is_type_tag(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG;
+}
+
+/* union is only a special case of struct:
+ * all its offsetof(member) == 0
+ */
+static inline bool btf_type_is_struct(const struct btf_type *t)
+{
+ u8 kind = BTF_INFO_KIND(t->info);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
static inline u16 btf_type_vlen(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
}
+static inline u16 btf_vlen(const struct btf_type *t)
+{
+ return btf_type_vlen(t);
+}
+
static inline u16 btf_func_linkage(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
@@ -117,30 +338,85 @@ static inline bool btf_type_kflag(const struct btf_type *t)
return BTF_INFO_KFLAG(t->info);
}
-static inline u32 btf_member_bit_offset(const struct btf_type *struct_type,
- const struct btf_member *member)
+static inline u32 __btf_member_bit_offset(const struct btf_type *struct_type,
+ const struct btf_member *member)
{
return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
: member->offset;
}
-static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type,
- const struct btf_member *member)
+static inline u32 __btf_member_bitfield_size(const struct btf_type *struct_type,
+ const struct btf_member *member)
{
return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
: 0;
}
+static inline struct btf_member *btf_members(const struct btf_type *t)
+{
+ return (struct btf_member *)(t + 1);
+}
+
+static inline u32 btf_member_bit_offset(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bit_offset(t, m);
+}
+
+static inline u32 btf_member_bitfield_size(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bitfield_size(t, m);
+}
+
static inline const struct btf_member *btf_type_member(const struct btf_type *t)
{
return (const struct btf_member *)(t + 1);
}
+static inline struct btf_array *btf_array(const struct btf_type *t)
+{
+ return (struct btf_array *)(t + 1);
+}
+
+static inline struct btf_enum *btf_enum(const struct btf_type *t)
+{
+ return (struct btf_enum *)(t + 1);
+}
+
+static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
+{
+ return (struct btf_enum64 *)(t + 1);
+}
+
+static inline const struct btf_var_secinfo *btf_type_var_secinfo(
+ const struct btf_type *t)
+{
+ return (const struct btf_var_secinfo *)(t + 1);
+}
+
+static inline struct btf_param *btf_params(const struct btf_type *t)
+{
+ return (struct btf_param *)(t + 1);
+}
+
#ifdef CONFIG_BPF_SYSCALL
+struct bpf_prog;
+
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
+u32 *btf_kfunc_id_set_contains(const struct btf *btf,
+ enum bpf_prog_type prog_type,
+ u32 kfunc_btf_id);
+int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s);
+s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
+int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
+ struct module *owner);
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
@@ -152,6 +428,36 @@ static inline const char *btf_name_by_offset(const struct btf *btf,
{
return NULL;
}
+static inline u32 *btf_kfunc_id_set_contains(const struct btf *btf,
+ enum bpf_prog_type prog_type,
+ u32 kfunc_btf_id)
+{
+ return NULL;
+}
+static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s)
+{
+ return 0;
+}
+static inline s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
+{
+ return -ENOENT;
+}
+static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors,
+ u32 add_cnt, struct module *owner)
+{
+ return 0;
+}
#endif
+static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
+{
+ if (!btf_type_is_ptr(t))
+ return false;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ return btf_type_is_struct(t);
+}
+
#endif
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
new file mode 100644
index 000000000000..2aea877d644f
--- /dev/null
+++ b/include/linux/btf_ids.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_BTF_IDS_H
+#define _LINUX_BTF_IDS_H
+
+struct btf_id_set {
+ u32 cnt;
+ u32 ids[];
+};
+
+struct btf_id_set8 {
+ u32 cnt;
+ u32 flags;
+ struct {
+ u32 id;
+ u32 flags;
+ } pairs[];
+};
+
+#ifdef CONFIG_DEBUG_INFO_BTF
+
+#include <linux/compiler.h> /* for __PASTE */
+#include <linux/compiler_attributes.h> /* for __maybe_unused */
+
+/*
+ * Following macros help to define lists of BTF IDs placed
+ * in .BTF_ids section. They are initially filled with zeros
+ * (during compilation) and resolved later during the
+ * linking phase by resolve_btfids tool.
+ *
+ * Any change in list layout must be reflected in resolve_btfids
+ * tool logic.
+ */
+
+#define BTF_IDS_SECTION ".BTF_ids"
+
+#define ____BTF_ID(symbol, word) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".local " #symbol " ; \n" \
+".type " #symbol ", STT_OBJECT; \n" \
+".size " #symbol ", 4; \n" \
+#symbol ": \n" \
+".zero 4 \n" \
+word \
+".popsection; \n");
+
+#define __BTF_ID(symbol, word) \
+ ____BTF_ID(symbol, word)
+
+#define __ID(prefix) \
+ __PASTE(prefix, __COUNTER__)
+
+/*
+ * The BTF_ID defines unique symbol for each ID pointing
+ * to 4 zero bytes.
+ */
+#define BTF_ID(prefix, name) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), "")
+
+#define ____BTF_ID_FLAGS(prefix, name, flags) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), ".long " #flags "\n")
+#define __BTF_ID_FLAGS(prefix, name, flags, ...) \
+ ____BTF_ID_FLAGS(prefix, name, flags)
+#define BTF_ID_FLAGS(prefix, name, ...) \
+ __BTF_ID_FLAGS(prefix, name, ##__VA_ARGS__, 0)
+
+/*
+ * The BTF_ID_LIST macro defines pure (unsorted) list
+ * of BTF IDs, with following layout:
+ *
+ * BTF_ID_LIST(list1)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ *
+ * list1:
+ * __BTF_ID__type1__name1__1:
+ * .zero 4
+ * __BTF_ID__type2__name2__2:
+ * .zero 4
+ *
+ */
+#define __BTF_ID_LIST(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " " #name "; \n" \
+#name ":; \n" \
+".popsection; \n");
+
+#define BTF_ID_LIST(name) \
+__BTF_ID_LIST(name, local) \
+extern u32 name[];
+
+#define BTF_ID_LIST_GLOBAL(name, n) \
+__BTF_ID_LIST(name, globl)
+
+/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
+ * a single entry.
+ */
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST(name) \
+ BTF_ID(prefix, typename)
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST_GLOBAL(name, 1) \
+ BTF_ID(prefix, typename)
+
+/*
+ * The BTF_ID_UNUSED macro defines 4 zero bytes.
+ * It's used when we want to define 'unused' entry
+ * in BTF_ID_LIST, like:
+ *
+ * BTF_ID_LIST(bpf_skb_output_btf_ids)
+ * BTF_ID(struct, sk_buff)
+ * BTF_ID_UNUSED
+ * BTF_ID(struct, task_struct)
+ */
+
+#define BTF_ID_UNUSED \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+/*
+ * The BTF_SET_START/END macros pair defines sorted list of
+ * BTF IDs plus its members count, with following layout:
+ *
+ * BTF_SET_START(list)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ * BTF_SET_END(list)
+ *
+ * __BTF_ID__set__list:
+ * .zero 4
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * __BTF_ID__type2__name2__4:
+ * .zero 4
+ *
+ */
+#define __BTF_SET_START(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set__" #name "; \n" \
+"__BTF_ID__set__" #name ":; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define BTF_SET_START(name) \
+__BTF_ID_LIST(name, local) \
+__BTF_SET_START(name, local)
+
+#define BTF_SET_START_GLOBAL(name) \
+__BTF_ID_LIST(name, globl) \
+__BTF_SET_START(name, globl)
+
+#define BTF_SET_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set name;
+
+/*
+ * The BTF_SET8_START/END macros pair defines sorted list of
+ * BTF IDs and their flags plus its members count, with the
+ * following layout:
+ *
+ * BTF_SET8_START(list)
+ * BTF_ID_FLAGS(type1, name1, flags)
+ * BTF_ID_FLAGS(type2, name2, flags)
+ * BTF_SET8_END(list)
+ *
+ * __BTF_ID__set8__list:
+ * .zero 8
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * .word (1 << 0) | (1 << 2)
+ * __BTF_ID__type2__name2__5:
+ * .zero 4
+ * .word (1 << 3) | (1 << 1) | (1 << 2)
+ *
+ */
+#define __BTF_SET8_START(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set8__" #name "; \n" \
+"__BTF_ID__set8__" #name ":; \n" \
+".zero 8 \n" \
+".popsection; \n");
+
+#define BTF_SET8_START(name) \
+__BTF_ID_LIST(name, local) \
+__BTF_SET8_START(name, local)
+
+#define BTF_SET8_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set8__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set8 name;
+
+#else
+
+#define BTF_ID_LIST(name) static u32 __maybe_unused name[5];
+#define BTF_ID(prefix, name)
+#define BTF_ID_FLAGS(prefix, name, ...)
+#define BTF_ID_UNUSED
+#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
+#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_END(name)
+#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 };
+#define BTF_SET8_END(name)
+
+#endif /* CONFIG_DEBUG_INFO_BTF */
+
+#ifdef CONFIG_NET
+/* Define a list of socket types which can be the argument for
+ * skc_to_*_sock() helpers. All these sockets should have
+ * sock_common as the first argument in its memory layout.
+ */
+#define BTF_SOCK_TYPE_xxx \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
+
+enum {
+#define BTF_SOCK_TYPE(name, str) name,
+BTF_SOCK_TYPE_xxx
+#undef BTF_SOCK_TYPE
+MAX_BTF_SOCK_TYPE,
+};
+
+extern u32 btf_sock_ids[];
+#endif
+
+#define BTF_TRACING_TYPE_xxx \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
+
+enum {
+#define BTF_TRACING_TYPE(name, type) name,
+BTF_TRACING_TYPE_xxx
+#undef BTF_TRACING_TYPE
+MAX_BTF_TRACING_TYPE,
+};
+
+extern u32 btf_tracing_ids[];
+
+#endif
diff --git a/include/linux/btree.h b/include/linux/btree.h
index 68f858c831b1..243ee544397a 100644
--- a/include/linux/btree.h
+++ b/include/linux/btree.h
@@ -10,7 +10,7 @@
*
* A B+Tree is a data structure for looking up arbitrary (currently allowing
* unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure
- * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not
+ * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not
* use binary search to find the key on lookups.
*
* Each B+Tree consists of a head, that contains bookkeeping information and
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 7b73ef7f902d..33fa5e94aa80 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -9,6 +9,7 @@
#define _LINUX_BUFFER_HEAD_H
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/pagemap.h>
@@ -22,9 +23,6 @@ enum bh_state_bits {
BH_Dirty, /* Is dirty */
BH_Lock, /* Is locked */
BH_Req, /* Has been submitted for I/O */
- BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
- * IO completion of other buffers in the page
- */
BH_Mapped, /* Has a disk mapping */
BH_New, /* Disk mapping was newly created by get_block */
@@ -76,6 +74,9 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+ spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
+ * serialise IO completion of other
+ * buffers in the page */
};
/*
@@ -117,7 +118,6 @@ static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
* of the form "mark_buffer_foo()". These are higher-level functions which
* do something in addition to setting a b_state bit.
*/
-BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked)
@@ -135,6 +135,41 @@ BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
BUFFER_FNS(Defer_Completion, defer_completion)
+static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
+{
+ /*
+ * If somebody else already set this uptodate, they will
+ * have done the memory barrier, and a reader will thus
+ * see *some* valid buffer state.
+ *
+ * Any other serialization (with IO errors or whatever that
+ * might clear the bit) has to come from other state (eg BH_Lock).
+ */
+ if (test_bit(BH_Uptodate, &bh->b_state))
+ return;
+
+ /*
+ * make it consistent with folio_mark_uptodate
+ * pairs with smp_load_acquire in buffer_uptodate
+ */
+ smp_mb__before_atomic();
+ set_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
+{
+ clear_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline int buffer_uptodate(const struct buffer_head *bh)
+{
+ /*
+ * make it consistent with folio_test_uptodate
+ * pairs with smp_mb__before_atomic in set_buffer_uptodate
+ */
+ return test_bit_acquire(BH_Uptodate, &bh->b_state);
+}
+
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
/* If we *know* page->private refers to buffer_heads */
@@ -144,8 +179,9 @@ BUFFER_FNS(Defer_Completion, defer_completion)
((struct buffer_head *)page_private(page)); \
})
#define page_has_buffers(page) PagePrivate(page)
+#define folio_buffers(folio) folio_get_private(folio)
-void buffer_check_dirty_writeback(struct page *page,
+void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback);
/*
@@ -157,7 +193,7 @@ void mark_buffer_write_io_error(struct buffer_head *bh);
void touch_buffer(struct buffer_head *bh);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
-int try_to_free_buffers(struct page *);
+bool try_to_free_buffers(struct folio *);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
void create_empty_buffers(struct page *, unsigned long,
@@ -192,19 +228,22 @@ void __breadahead(struct block_device *, sector_t block, unsigned int size);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
+void invalidate_bh_lrus_cpu(void);
+bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
-void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
-int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
-void write_dirty_buffer(struct buffer_head *bh, int op_flags);
-int submit_bh(int, int, struct buffer_head *);
+int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
-int bh_submit_read(struct buffer_head *bh);
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags, bool force_lock);
extern int buffer_heads_over_limit;
@@ -212,18 +251,16 @@ extern int buffer_heads_over_limit;
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
-void block_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
+void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc,
bh_end_io_t *handler);
-int block_read_full_page(struct page*, get_block_t*);
-int block_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count);
+int block_read_full_folio(struct folio *, get_block_t *);
+bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- unsigned flags, struct page **pagep, get_block_t *get_block);
+ struct page **pagep, get_block_t *get_block);
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block);
int block_write_end(struct file *, struct address_space *,
@@ -235,7 +272,7 @@ int generic_write_end(struct file *, struct address_space *,
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, unsigned, struct page **, void **,
+ unsigned, struct page **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
@@ -255,14 +292,16 @@ static inline vm_fault_t block_page_mkwrite_return(int err)
}
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
- struct page **, void **, get_block_t*);
-int nobh_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
+
+#ifdef CONFIG_MIGRATION
+extern int buffer_migrate_folio(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+extern int buffer_migrate_folio_norefs(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+#else
+#define buffer_migrate_folio NULL
+#define buffer_migrate_folio_norefs NULL
+#endif
void buffer_init(void);
@@ -270,14 +309,6 @@ void buffer_init(void);
* inline definitions
*/
-static inline void attach_page_buffers(struct page *page,
- struct buffer_head *head)
-{
- get_page(page);
- SetPagePrivate(page);
- set_page_private(page, (unsigned long)head);
-}
-
static inline void get_bh(struct buffer_head *bh)
{
atomic_inc(&bh->b_count);
@@ -380,6 +411,41 @@ static inline struct buffer_head *__getblk(struct block_device *bdev,
return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
}
+static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
+ if (!buffer_uptodate(bh))
+ __bh_read(bh, op_flags, false);
+ else
+ unlock_buffer(bh);
+ }
+}
+
+static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!bh_uptodate_or_lock(bh))
+ __bh_read(bh, op_flags, false);
+}
+
+/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
+static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (bh_uptodate_or_lock(bh))
+ return 1;
+ return __bh_read(bh, op_flags, true);
+}
+
+static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
+{
+ __bh_read_batch(nr, bhs, 0, true);
+}
+
+static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags)
+{
+ __bh_read_batch(nr, bhs, op_flags, false);
+}
+
/**
* __bread() - reads a specified block and returns the bh
* @bdev: the block_device to read from
@@ -396,16 +462,19 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
}
-extern int __set_page_dirty_buffers(struct page *page);
+bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
#else /* CONFIG_BLOCK */
static inline void buffer_init(void) {}
-static inline int try_to_free_buffers(struct page *page) { return 1; }
+static inline bool try_to_free_buffers(struct folio *folio) { return true; }
static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
+static inline void invalidate_bh_lrus_cpu(void) {}
+static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
+#define buffer_heads_over_limit 0
#endif /* CONFIG_BLOCK */
#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index f639bd0122f3..348acf2558f3 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -36,6 +36,9 @@ static inline int is_warning_bug(const struct bug_entry *bug)
return bug->flags & BUGFLAG_WARNING;
}
+void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line);
+
struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
@@ -58,6 +61,13 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
return BUG_TRAP_TYPE_BUG;
}
+struct bug_entry;
+static inline void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line)
+{
+ *file = NULL;
+ *line = 0;
+}
static inline void generic_bug_clear_once(void) {}
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
new file mode 100644
index 000000000000..3b7a0ff4642f
--- /dev/null
+++ b/include/linux/buildid.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILDID_H
+#define _LINUX_BUILDID_H
+
+#include <linux/mm_types.h>
+
+#define BUILD_ID_SIZE_MAX 20
+
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size);
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+extern unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX];
+void init_vmlinux_build_id(void);
+#else
+static inline void init_vmlinux_build_id(void) { }
+#endif
+
+#endif
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index a81c13ac1972..35c25dff651a 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -4,16 +4,30 @@
*
* Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
*/
-#ifndef __LINUX_BVEC_ITER_H
-#define __LINUX_BVEC_ITER_H
+#ifndef __LINUX_BVEC_H
+#define __LINUX_BVEC_H
-#include <linux/kernel.h>
+#include <linux/highmem.h>
#include <linux/bug.h>
#include <linux/errno.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
#include <linux/mm.h>
+#include <linux/types.h>
-/*
- * was unsigned short, but we might as well be ready for > 64kB I/O pages
+struct page;
+
+/**
+ * struct bio_vec - a contiguous range of physical memory addresses
+ * @bv_page: First page associated with the address range.
+ * @bv_len: Number of bytes in the address range.
+ * @bv_offset: Start of the address range relative to the start of @bv_page.
+ *
+ * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len:
+ *
+ * nth_page(@bv_page, n) == @bv_page + n
+ *
+ * This holds because page_is_mergeable() checks the above property.
*/
struct bio_vec {
struct page *bv_page;
@@ -30,7 +44,7 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
-};
+} __packed;
struct bvec_iter_all {
struct bio_vec bv;
@@ -108,11 +122,28 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
return true;
}
+/*
+ * A simpler version of bvec_iter_advance(), @bytes should not span
+ * across multiple bvec entries, i.e. bytes <= bv[i->bi_idx].bv_len
+ */
+static inline void bvec_iter_advance_single(const struct bio_vec *bv,
+ struct bvec_iter *iter, unsigned int bytes)
+{
+ unsigned int done = iter->bi_bvec_done + bytes;
+
+ if (done == bv[iter->bi_idx].bv_len) {
+ done = 0;
+ iter->bi_idx++;
+ }
+ iter->bi_bvec_done = done;
+ iter->bi_size -= bytes;
+}
+
#define for_each_bvec(bvl, bio_vec, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
@@ -153,4 +184,61 @@ static inline void bvec_advance(const struct bio_vec *bvec,
}
}
-#endif /* __LINUX_BVEC_ITER_H */
+/**
+ * bvec_kmap_local - map a bvec into the kernel virtual address space
+ * @bvec: bvec to map
+ *
+ * Must be called on single-page bvecs only. Call kunmap_local on the returned
+ * address to unmap.
+ */
+static inline void *bvec_kmap_local(struct bio_vec *bvec)
+{
+ return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
+}
+
+/**
+ * memcpy_from_bvec - copy data from a bvec
+ * @bvec: bvec to copy from
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
+{
+ memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * memcpy_to_bvec - copy data to a bvec
+ * @bvec: bvec to copy to
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
+{
+ memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
+}
+
+/**
+ * memzero_bvec - zero all data in a bvec
+ * @bvec: bvec to zero
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memzero_bvec(struct bio_vec *bvec)
+{
+ memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * bvec_virt - return the virtual address for a bvec
+ * @bvec: bvec to return the virtual address for
+ *
+ * Note: the caller must ensure that @bvec->bv_page is not a highmem page.
+ */
+static inline void *bvec_virt(struct bio_vec *bvec)
+{
+ WARN_ON_ONCE(PageHighMem(bvec->bv_page));
+ return page_address(bvec->bv_page) + bvec->bv_offset;
+}
+
+#endif /* __LINUX_BVEC_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 4b13e0a3e15b..c9a4c96c9943 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -190,7 +190,7 @@ static inline void be64_add_cpu(__be64 *var, u64 val)
static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
{
- int i;
+ size_t i;
for (i = 0; i < len; i++)
dst[i] = cpu_to_be32(src[i]);
@@ -198,7 +198,7 @@ static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
{
- int i;
+ size_t i;
for (i = 0; i < len; i++)
dst[i] = be32_to_cpu(src[i]);
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 750621e41d1c..5da1bbd96154 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -15,8 +15,14 @@
/*
* __read_mostly is used to keep rarely changing variables out of frequently
- * updated cachelines. If an architecture doesn't support it, ignore the
- * hint.
+ * updated cachelines. Its use should be reserved for data that is used
+ * frequently in hot paths. Performance traces can help decide when to use
+ * this. You want __read_mostly data to be tightly packed, so that in the
+ * best case multiple frequently read variables for a hot path will be next
+ * to each other in order to reduce the number of cachelines needed to
+ * execute a critical path. We should be mindful and selective of its use.
+ * ie: if you're going to use it please supply a *good* justification in your
+ * commit log
*/
#ifndef __read_mostly
#define __read_mostly
@@ -28,7 +34,7 @@
* but may get written to during init, so can't live in .rodata (via "const").
*/
#ifndef __ro_after_init
-#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+#define __ro_after_init __section(".data..ro_after_init")
#endif
#ifndef ____cacheline_aligned
@@ -79,4 +85,17 @@
#define cache_line_size() L1_CACHE_BYTES
#endif
+/*
+ * Helper to add padding within a struct to ensure data fall into separate
+ * cachelines.
+ */
+#if defined(CONFIG_SMP)
+struct cacheline_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define CACHELINE_PADDING(name) struct cacheline_padding name
+#else
+#define CACHELINE_PADDING(name)
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
new file mode 100644
index 000000000000..a6189d21f2ba
--- /dev/null
+++ b/include/linux/cacheflush.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CACHEFLUSH_H
+#define _LINUX_CACHEFLUSH_H
+
+#include <asm/cacheflush.h>
+
+struct folio;
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
+void flush_dcache_folio(struct folio *folio);
+#endif
+#else
+static inline void flush_dcache_folio(struct folio *folio)
+{
+}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0
+#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+
+#endif /* _LINUX_CACHEFLUSH_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 46b92cd61d0c..00b7a6ae8617 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -78,28 +78,13 @@ struct cpu_cacheinfo {
bool cpu_map_populated;
};
-/*
- * Helpers to make sure "func" is executed on the cpu whose cache
- * attributes are being detected
- */
-#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
-static inline void _##func(void *ret) \
-{ \
- int cpu = smp_processor_id(); \
- *(int *)ret = __##func(cpu); \
-} \
- \
-int func(unsigned int cpu) \
-{ \
- int ret; \
- smp_call_function_single(cpu, _##func, &ret, true); \
- return ret; \
-}
-
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
int init_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
int cache_setup_acpi(unsigned int cpu);
+bool last_level_cache_is_valid(unsigned int cpu);
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
+int detect_cache_attributes(unsigned int cpu);
#ifndef CONFIG_ACPI_PPTT
/*
* acpi_find_last_cache_level is only called on ACPI enabled
@@ -119,4 +104,24 @@ int acpi_find_last_cache_level(unsigned int cpu);
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
+/*
+ * Get the id of the cache associated with @cpu at level @level.
+ * cpuhp lock must be held.
+ */
+static inline int get_cpu_cacheinfo_id(int cpu, int level)
+{
+ struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+ int i;
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == level) {
+ if (ci->info_list[i].attributes & CACHE_ID)
+ return ci->info_list[i].id;
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
new file mode 100644
index 000000000000..ef0a77173e3c
--- /dev/null
+++ b/include/linux/can/bittiming.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#ifndef _CAN_BITTIMING_H
+#define _CAN_BITTIMING_H
+
+#include <linux/netdevice.h>
+#include <linux/can/netlink.h>
+
+#define CAN_SYNC_SEG 1
+
+#define CAN_BITRATE_UNSET 0
+#define CAN_BITRATE_UNKNOWN (-1U)
+
+#define CAN_CTRLMODE_TDC_MASK \
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
+
+/*
+ * struct can_tdc - CAN FD Transmission Delay Compensation parameters
+ *
+ * At high bit rates, the propagation delay from the TX pin to the RX
+ * pin of the transceiver causes measurement errors: the sample point
+ * on the RX pin might occur on the previous bit.
+ *
+ * To solve this issue, ISO 11898-1 introduces in section 11.3.3
+ * "Transmitter delay compensation" a SSP (Secondary Sample Point)
+ * equal to the distance from the start of the bit time on the TX pin
+ * to the actual measurement on the RX pin.
+ *
+ * This structure contains the parameters to calculate that SSP.
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------- TDCO ------->|
+ * |<----------- Secondary Sample Point ---------->|
+ *
+ * To increase precision, contrary to the other bittiming parameters
+ * which are measured in time quanta, the TDC parameters are measured
+ * in clock periods (also referred as "minimum time quantum" in ISO
+ * 11898-1).
+ *
+ * @tdcv: Transmitter Delay Compensation Value. The time needed for
+ * the signal to propagate, i.e. the distance, in clock periods,
+ * from the start of the bit on the TX pin to when it is received
+ * on the RX pin. @tdcv depends on the controller modes:
+ *
+ * CAN_CTRLMODE_TDC_AUTO is set: The transceiver dynamically
+ * measures @tdcv for each transmitted CAN FD frame and the
+ * value provided here should be ignored.
+ *
+ * CAN_CTRLMODE_TDC_MANUAL is set: use the fixed provided @tdcv
+ * value.
+ *
+ * N.B. CAN_CTRLMODE_TDC_AUTO and CAN_CTRLMODE_TDC_MANUAL are
+ * mutually exclusive. Only one can be set at a time. If both
+ * CAN_TDC_CTRLMODE_AUTO and CAN_TDC_CTRLMODE_MANUAL are unset,
+ * TDC is disabled and all the values of this structure should be
+ * ignored.
+ *
+ * @tdco: Transmitter Delay Compensation Offset. Offset value, in
+ * clock periods, defining the distance between the start of the
+ * bit reception on the RX pin of the transceiver and the SSP
+ * position such that SSP = @tdcv + @tdco.
+ *
+ * @tdcf: Transmitter Delay Compensation Filter window. Defines the
+ * minimum value for the SSP position in clock periods. If the
+ * SSP position is less than @tdcf, then no delay compensations
+ * occur and the normal sampling point is used instead. The
+ * feature is enabled if and only if @tdcv is set to zero
+ * (automatic mode) and @tdcf is configured to a value greater
+ * than @tdco.
+ */
+struct can_tdc {
+ u32 tdcv;
+ u32 tdco;
+ u32 tdcf;
+};
+
+/*
+ * struct can_tdc_const - CAN hardware-dependent constant for
+ * Transmission Delay Compensation
+ *
+ * @tdcv_min: Transmitter Delay Compensation Value minimum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ * @tdcv_max: Transmitter Delay Compensation Value maximum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ *
+ * @tdco_min: Transmitter Delay Compensation Offset minimum value.
+ * @tdco_max: Transmitter Delay Compensation Offset maximum value.
+ * Should not be zero. If the controller does not support TDC,
+ * then the pointer to this structure should be NULL.
+ *
+ * @tdcf_min: Transmitter Delay Compensation Filter window minimum
+ * value. If @tdcf_max is zero, this value is ignored.
+ * @tdcf_max: Transmitter Delay Compensation Filter window maximum
+ * value. Should be set to zero if the controller does not
+ * support this feature.
+ */
+struct can_tdc_const {
+ u32 tdcv_min;
+ u32 tdcv_max;
+ u32 tdco_min;
+ u32 tdco_max;
+ u32 tdcf_min;
+ u32 tdcf_max;
+};
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc);
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported);
+#else /* !CONFIG_CAN_CALC_BITTIMING */
+static inline int
+can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ netdev_err(dev, "bit-timing calculation not available\n");
+ return -EINVAL;
+}
+
+static inline void
+can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
+{
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt);
+
+/*
+ * can_bit_time() - Duration of one bit
+ *
+ * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
+ * additional information.
+ *
+ * Return: the number of time quanta in one bit.
+ */
+static inline unsigned int can_bit_time(const struct can_bittiming *bt)
+{
+ return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
+}
+
+#endif /* !_CAN_BITTIMING_H */
diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
index 2f5d731ae251..8afa92d15a66 100644
--- a/include/linux/can/can-ml.h
+++ b/include/linux/can/can-ml.h
@@ -44,6 +44,7 @@
#include <linux/can.h>
#include <linux/list.h>
+#include <linux/netdevice.h>
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
#define CAN_EFF_RCV_HASH_BITS 10
@@ -65,4 +66,15 @@ struct can_ml_priv {
#endif
};
+static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
+{
+ return netdev_get_ml_priv(dev, ML_PRIV_CAN);
+}
+
+static inline void can_set_ml_priv(struct net_device *dev,
+ struct can_ml_priv *ml_priv)
+{
+ netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
+}
+
#endif /* CAN_ML_H */
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index e20a0cd09ba5..5fb8d0e3f9c1 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -2,7 +2,7 @@
/*
* linux/can/core.h
*
- * Protoypes and definitions for CAN protocol modules using the PF_CAN core
+ * Prototypes and definitions for CAN protocol modules using the PF_CAN core
*
* Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
* Urs Thuermann <urs.thuermann@volkswagen.de>
@@ -18,13 +18,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20170425"
-
-/* increment this number each time you change some user-space interface */
-#define CAN_ABI_VERSION "9"
-
-#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION
-
#define DNAME(dev) ((dev) ? (dev)->name : "any")
/**
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5e3d45525bd3..982ba245eb41 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -15,10 +15,12 @@
#define _CAN_DEV_H
#include <linux/can.h>
+#include <linux/can/bittiming.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
+#include <linux/can/length.h>
#include <linux/can/netlink.h>
#include <linux/can/skb.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
/*
@@ -30,6 +32,12 @@ enum can_mode {
CAN_MODE_SLEEP
};
+enum can_termination_gpio {
+ CAN_TERMINATION_GPIO_DISABLED = 0,
+ CAN_TERMINATION_GPIO_ENABLED,
+ CAN_TERMINATION_GPIO_MAX,
+};
+
/*
* CAN common private data
*/
@@ -37,25 +45,33 @@ struct can_priv {
struct net_device *dev;
struct can_device_stats can_stats;
- struct can_bittiming bittiming, data_bittiming;
const struct can_bittiming_const *bittiming_const,
*data_bittiming_const;
- const u16 *termination_const;
- unsigned int termination_const_cnt;
- u16 termination;
- const u32 *bitrate_const;
+ struct can_bittiming bittiming, data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc tdc;
+
unsigned int bitrate_const_cnt;
+ const u32 *bitrate_const;
const u32 *data_bitrate_const;
unsigned int data_bitrate_const_cnt;
u32 bitrate_max;
struct can_clock clock;
+ unsigned int termination_const_cnt;
+ const u16 *termination_const;
+ u16 termination;
+ struct gpio_desc *termination_gpio;
+ u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX];
+
+ unsigned int echo_skb_max;
+ struct sk_buff **echo_skb;
+
enum can_state state;
/* CAN controller features - see include/uapi/linux/can/netlink.h */
u32 ctrlmode; /* current options setting */
u32 ctrlmode_supported; /* options that can be modified by netlink */
- u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
struct delayed_work restart_work;
@@ -68,114 +84,91 @@ struct can_priv {
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
struct can_berr_counter *bec);
-
- unsigned int echo_skb_max;
- struct sk_buff **echo_skb;
-
-#ifdef CONFIG_CAN_LEDS
- struct led_trigger *tx_led_trig;
- char tx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rx_led_trig;
- char rx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rxtx_led_trig;
- char rxtx_led_trig_name[CAN_LED_NAME_SZ];
-#endif
+ int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
};
+static inline bool can_tdc_is_enabled(const struct can_priv *priv)
+{
+ return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK);
+}
+
/*
- * get_can_dlc(value) - helper macro to cast a given data length code (dlc)
- * to __u8 and ensure the dlc value to be max. 8 bytes.
+ * can_get_relative_tdco() - TDCO relative to the sample point
*
- * To be used in the CAN netdriver receive path to ensure conformance with
- * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ * struct can_tdc::tdco represents the absolute offset from TDCV. Some
+ * controllers use instead an offset relative to the Sample Point (SP)
+ * such that:
+ *
+ * SSP = TDCV + absolute TDCO
+ * = TDCV + SP + relative TDCO
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------------------------>| absolute TDCO
+ * |<--- Sample Point --->|
+ * | |<->| relative TDCO
+ * |<------------- Secondary Sample Point ------------>|
*/
-#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
-#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
+static inline s32 can_get_relative_tdco(const struct can_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->data_bittiming;
+ s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ return (s32)priv->tdc.tdco - sample_point_in_tc;
+}
-/* Check for outgoing skbs that have not been created by the CAN subsystem */
-static inline bool can_skb_headroom_valid(struct net_device *dev,
- struct sk_buff *skb)
+/* helper to define static CAN controller features at device creation time */
+static inline int __must_check can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode)
{
- /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
- if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
- return false;
-
- /* af_packet does not apply CAN skb specific settings */
- if (skb->ip_summed == CHECKSUM_NONE) {
- /* init headroom */
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* preform proper loopback on capable devices */
- if (dev->flags & IFF_ECHO)
- skb->pkt_type = PACKET_LOOPBACK;
- else
- skb->pkt_type = PACKET_HOST;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ if (priv->ctrlmode_supported & static_mode) {
+ netdev_warn(dev,
+ "Controller features can not be supported and static at the same time\n");
+ return -EINVAL;
}
+ priv->ctrlmode = static_mode;
+
+ /* override MTU which was set by default in can_setup()? */
+ if (static_mode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
- return true;
+ return 0;
}
-/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline bool can_dropped_invalid_skb(struct net_device *dev,
- struct sk_buff *skb)
+static inline u32 can_get_static_ctrlmode(struct can_priv *priv)
{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
-
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
- goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
- goto inval_skb;
- } else
- goto inval_skb;
-
- if (!can_skb_headroom_valid(dev, skb))
- goto inval_skb;
-
- return false;
-
-inval_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return true;
+ return priv->ctrlmode & ~priv->ctrlmode_supported;
}
-static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
{
- /* the CAN specific type of skb is identified by its data length */
- return skb->len == CANFD_MTU;
+ return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
}
-/* helper to define static CAN controller features at device creation time */
-static inline void can_set_static_ctrlmode(struct net_device *dev,
- u32 static_mode)
+/* drop skb if it does not contain a valid CAN frame for sending */
+static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
{
struct can_priv *priv = netdev_priv(dev);
- /* alloc_candev() succeeded => netdev_priv() is valid at this point */
- priv->ctrlmode = static_mode;
- priv->ctrlmode_static = static_mode;
+ if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ netdev_info_once(dev,
+ "interface in listen only mode, dropping skb\n");
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+ }
- /* override MTU which was set by default in can_setup()? */
- if (static_mode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
+ return can_dropped_invalid_skb(dev, skb);
}
-/* get data length from can_dlc with sanitized can_dlc */
-u8 can_dlc2len(u8 can_dlc);
-
-/* map the sanitized data length to an appropriate data length code */
-u8 can_len2dlc(u8 len);
+void can_setup(struct net_device *dev);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
unsigned int txqs, unsigned int rxqs);
@@ -191,6 +184,9 @@ struct can_priv *safe_candev_priv(struct net_device *dev);
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
int can_change_mtu(struct net_device *dev, int new_mtu);
+int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct ethtool_ts_info *info);
int register_candev(struct net_device *dev);
void unregister_candev(struct net_device *dev);
@@ -198,26 +194,18 @@ void unregister_candev(struct net_device *dev);
int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
+const char *can_get_state_str(const enum can_state state);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx);
-struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
- u8 *len_ptr);
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
-void can_free_echo_skb(struct net_device *dev, unsigned int idx);
-
#ifdef CONFIG_OF
void of_can_transceiver(struct net_device *dev);
#else
static inline void of_can_transceiver(struct net_device *dev) { }
#endif
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd);
-struct sk_buff *alloc_can_err_skb(struct net_device *dev,
- struct can_frame **cf);
+extern struct rtnl_link_ops can_link_ops;
+int can_netlink_register(void);
+void can_netlink_unregister(void);
#endif /* !_CAN_DEV_H */
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
index 511a37302fea..f38772fd0c07 100644
--- a/include/linux/can/dev/peak_canfd.h
+++ b/include/linux/can/dev/peak_canfd.h
@@ -189,7 +189,7 @@ struct __packed pucan_rx_msg {
u8 client;
__le16 flags;
__le32 can_id;
- u8 d[0];
+ u8 d[];
};
/* uCAN error types */
@@ -266,7 +266,7 @@ struct __packed pucan_tx_msg {
u8 client;
__le16 flags;
__le32 can_id;
- u8 d[0];
+ u8 d[];
};
/* build the cmd opcode_channel field with respect to the correct endianness */
@@ -282,7 +282,7 @@ static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg)
}
/* return the dlc value from any received message channel_dlc field */
-static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
+static inline u8 pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
{
return msg->channel_dlc >> 4;
}
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
deleted file mode 100644
index 7c3cfd798c56..000000000000
--- a/include/linux/can/led.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- */
-
-#ifndef _CAN_LED_H
-#define _CAN_LED_H
-
-#include <linux/if.h>
-#include <linux/leds.h>
-#include <linux/netdevice.h>
-
-enum can_led_event {
- CAN_LED_EVENT_OPEN,
- CAN_LED_EVENT_STOP,
- CAN_LED_EVENT_TX,
- CAN_LED_EVENT_RX,
-};
-
-#ifdef CONFIG_CAN_LEDS
-
-/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
- * suffix and null terminator
- */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
-
-void can_led_event(struct net_device *netdev, enum can_led_event event);
-void devm_can_led_init(struct net_device *netdev);
-int __init can_led_notifier_init(void);
-void __exit can_led_notifier_exit(void);
-
-#else
-
-static inline void can_led_event(struct net_device *netdev,
- enum can_led_event event)
-{
-}
-static inline void devm_can_led_init(struct net_device *netdev)
-{
-}
-static inline int can_led_notifier_init(void)
-{
- return 0;
-}
-static inline void can_led_notifier_exit(void)
-{
-}
-
-#endif
-
-#endif /* !_CAN_LED_H */
diff --git a/include/linux/can/length.h b/include/linux/can/length.h
new file mode 100644
index 000000000000..6995092b774e
--- /dev/null
+++ b/include/linux/can/length.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _CAN_LENGTH_H
+#define _CAN_LENGTH_H
+
+/*
+ * Size of a Classical CAN Standard Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier 11
+ * Remote transmission request (RTR) 1
+ * Identifier extension bit (IDE) 1
+ * Reserved bit (r0) 1
+ * Data length code (DLC) 4
+ * Data field 0...64
+ * CRC 15
+ * CRC delimiter 1
+ * ACK slot 1
+ * ACK delimiter 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * rounded up and ignoring bitstuffing
+ */
+#define CAN_FRAME_OVERHEAD_SFF DIV_ROUND_UP(47, 8)
+
+/*
+ * Size of a Classical CAN Extended Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier A 11
+ * Substitute remote request (SRR) 1
+ * Identifier extension bit (IDE) 1
+ * Identifier B 18
+ * Remote transmission request (RTR) 1
+ * Reserved bits (r1, r0) 2
+ * Data length code (DLC) 4
+ * Data field 0...64
+ * CRC 15
+ * CRC delimiter 1
+ * ACK slot 1
+ * ACK delimiter 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * rounded up and ignoring bitstuffing
+ */
+#define CAN_FRAME_OVERHEAD_EFF DIV_ROUND_UP(67, 8)
+
+/*
+ * Size of a CAN-FD Standard Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier 11
+ * Reserved bit (r1) 1
+ * Identifier extension bit (IDE) 1
+ * Flexible data rate format (FDF) 1
+ * Reserved bit (r0) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+ * Stuff Bit Count (SBC) 0...16: 4 20...64:5
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * assuming CRC21, rounded up and ignoring bitstuffing
+ */
+#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8)
+
+/*
+ * Size of a CAN-FD Extended Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier A 11
+ * Substitute remote request (SRR) 1
+ * Identifier extension bit (IDE) 1
+ * Identifier B 18
+ * Reserved bit (r1) 1
+ * Flexible data rate format (FDF) 1
+ * Reserved bit (r0) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+ * Stuff Bit Count (SBC) 0...16: 4 20...64:5
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * assuming CRC21, rounded up and ignoring bitstuffing
+ */
+#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8)
+
+/*
+ * Maximum size of a Classical CAN frame
+ * (rounded up and ignoring bitstuffing)
+ */
+#define CAN_FRAME_LEN_MAX (CAN_FRAME_OVERHEAD_EFF + CAN_MAX_DLEN)
+
+/*
+ * Maximum size of a CAN-FD frame
+ * (rounded up and ignoring bitstuffing)
+ */
+#define CANFD_FRAME_LEN_MAX (CANFD_FRAME_OVERHEAD_EFF + CANFD_MAX_DLEN)
+
+/*
+ * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
+ * Classical CAN frame into a valid data length of max. 8 bytes.
+ *
+ * To be used in the CAN netdriver receive path to ensure conformance with
+ * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ */
+#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN))
+
+/* helper to get the data length code (DLC) for Classical CAN raw DLC access */
+static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode)
+{
+ /* return len8_dlc as dlc value only if all conditions apply */
+ if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) &&
+ (cf->len == CAN_MAX_DLEN) &&
+ (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC))
+ return cf->len8_dlc;
+
+ /* return the payload length as dlc value */
+ return cf->len;
+}
+
+/* helper to set len and len8_dlc value for Classical CAN raw DLC access */
+static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc,
+ const u32 ctrlmode)
+{
+ /* the caller already ensured that dlc is a value from 0 .. 15 */
+ if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN)
+ cf->len8_dlc = dlc;
+
+ /* limit the payload length 'len' to CAN_MAX_DLEN */
+ cf->len = can_cc_dlc2len(dlc);
+}
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc);
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len);
+
+/* calculate the CAN Frame length in bytes of a given skb */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb);
+
+/* map the data length to an appropriate data link layer length */
+static inline u8 canfd_sanitize_len(u8 len)
+{
+ return can_fd_dlc2len(can_fd_len2dlc(len));
+}
+
+#endif /* !_CAN_LENGTH_H */
diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h
new file mode 100644
index 000000000000..1b536fb999de
--- /dev/null
+++ b/include/linux/can/platform/flexcan.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAN_PLATFORM_FLEXCAN_H
+#define _CAN_PLATFORM_FLEXCAN_H
+
+struct flexcan_platform_data {
+ u32 clock_frequency;
+ u8 clk_src;
+};
+
+#endif /* _CAN_PLATFORM_FLEXCAN_H */
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 1b78a0cfb615..c205c51d79c9 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -20,6 +20,7 @@ struct can_rx_offload {
bool drop);
struct sk_buff_head skb_queue;
+ struct sk_buff_head skb_irq_queue;
u32 skb_queue_len_max;
unsigned int mb_first;
@@ -35,23 +36,24 @@ int can_rx_offload_add_timestamp(struct net_device *dev,
int can_rx_offload_add_fifo(struct net_device *dev,
struct can_rx_offload *offload,
unsigned int weight);
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight);
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
- struct sk_buff *skb, u32 timestamp);
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp);
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
+void can_rx_offload_irq_finish(struct can_rx_offload *offload);
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
-static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
-{
- napi_schedule(&offload->napi);
-}
-
static inline void can_rx_offload_disable(struct can_rx_offload *offload)
{
napi_disable(&offload->napi);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index a954def26c0d..1abc25a8d144 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -16,6 +16,27 @@
#include <linux/can.h>
#include <net/sock.h>
+void can_flush_echo_skb(struct net_device *dev);
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr,
+ unsigned int *frame_len_ptr);
+unsigned int __must_check can_get_echo_skb(struct net_device *dev,
+ unsigned int idx,
+ unsigned int *frame_len_ptr);
+void can_free_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr);
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len);
+struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf);
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb);
+
/*
* The struct can_skb_priv is used to transport additional information along
* with the stored struct can(fd)_frame that can not be contained in existing
@@ -29,12 +50,14 @@
* struct can_skb_priv - private additional data inside CAN sk_buffs
* @ifindex: ifindex of the first interface the CAN frame appeared on
* @skbcnt: atomic counter to have an unique id together with skb pointer
+ * @frame_len: length of CAN frame in data link layer
* @cf: align to the following CAN frame at skb->data
*/
struct can_skb_priv {
int ifindex;
int skbcnt;
- struct can_frame cf[0];
+ unsigned int frame_len;
+ struct can_frame cf[];
};
static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
@@ -49,8 +72,12 @@ static inline void can_skb_reserve(struct sk_buff *skb)
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
- if (sk) {
- sock_hold(sk);
+ /* If the socket has already been closed by user space, the
+ * refcount may already be 0 (and the socket will be freed
+ * after the last TX skb has been freed). So only increase
+ * socket refcount if the refcount is > 0.
+ */
+ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
skb->destructor = sock_efree;
skb->sk = sk;
}
@@ -61,21 +88,72 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
*/
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
{
- if (skb_shared(skb)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (likely(nskb)) {
- can_skb_set_owner(nskb, skb->sk);
- consume_skb(skb);
- return nskb;
- } else {
- kfree_skb(skb);
- return NULL;
- }
+ struct sk_buff *nskb;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ kfree_skb(skb);
+ return NULL;
}
- /* we can assume to have an unshared skb with proper owner */
- return skb;
+ can_skb_set_owner(nskb, skb->sk);
+ consume_skb(skb);
+ return nskb;
+}
+
+static inline bool can_is_can_skb(const struct sk_buff *skb)
+{
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CAN_MTU && cf->len <= CAN_MAX_DLEN);
+}
+
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CANFD_MTU && cfd->len <= CANFD_MAX_DLEN);
+}
+
+static inline bool can_is_canxl_skb(const struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ if (skb->len < CANXL_HDR_SIZE + CANXL_MIN_DLEN || skb->len > CANXL_MTU)
+ return false;
+
+ /* this also checks valid CAN XL data length boundaries */
+ if (skb->len != CANXL_HDR_SIZE + cxl->len)
+ return false;
+
+ return cxl->flags & CANXL_XLF;
+}
+
+/* get length element value from can[|fd|xl]_frame structure */
+static inline unsigned int can_skb_get_len_val(struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (can_is_canxl_skb(skb))
+ return cxl->len;
+
+ return cfd->len;
+}
+
+/* get needed data length inside CAN frame for all frame types (RTR aware) */
+static inline unsigned int can_skb_get_data_len(struct sk_buff *skb)
+{
+ unsigned int len = can_skb_get_len_val(skb);
+ const struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* RTR frames have an actual length of zero */
+ if (can_is_can_skb(skb) && cf->can_id & CAN_RTR_FLAG)
+ return 0;
+
+ return len;
}
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index ecce0f43c73a..65efb74c3585 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -247,14 +247,35 @@ static inline bool ns_capable_setid(struct user_namespace *ns, int cap)
return true;
}
#endif /* CONFIG_MULTIUSER */
-extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
-extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+bool privileged_wrt_inode_uidgid(struct user_namespace *ns,
+ struct user_namespace *mnt_userns,
+ const struct inode *inode);
+bool capable_wrt_inode_uidgid(struct user_namespace *mnt_userns,
+ const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
+static inline bool perfmon_capable(void)
+{
+ return capable(CAP_PERFMON) || capable(CAP_SYS_ADMIN);
+}
+
+static inline bool bpf_capable(void)
+{
+ return capable(CAP_BPF) || capable(CAP_SYS_ADMIN);
+}
+
+static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns)
+{
+ return ns_capable(ns, CAP_CHECKPOINT_RESTORE) ||
+ ns_capable(ns, CAP_SYS_ADMIN);
+}
/* audit system wants to get cap info from files as well */
-extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+int get_vfs_caps_from_disk(struct user_namespace *mnt_userns,
+ const struct dentry *dentry,
+ struct cpu_vfs_cap_data *cpu_caps);
-extern int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size);
+int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const void **ivalue, size_t size);
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
index 60de3fedd3a7..405657a9a0d5 100644
--- a/include/linux/cb710.h
+++ b/include/linux/cb710.h
@@ -36,7 +36,7 @@ struct cb710_chip {
unsigned slot_mask;
unsigned slots;
spinlock_t irq_lock;
- struct cb710_slot slot[0];
+ struct cb710_slot slot[];
};
/* NOTE: cb710_chip.slots is modified only during device init/exit and
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
new file mode 100644
index 000000000000..cb0d6cd1c12f
--- /dev/null
+++ b/include/linux/cc_platform.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Confidential Computing Platform Capability checks
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ */
+
+#ifndef _LINUX_CC_PLATFORM_H
+#define _LINUX_CC_PLATFORM_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+/**
+ * enum cc_attr - Confidential computing attributes
+ *
+ * These attributes represent confidential computing features that are
+ * currently active.
+ */
+enum cc_attr {
+ /**
+ * @CC_ATTR_MEM_ENCRYPT: Memory encryption is active
+ *
+ * The platform/OS is running with active memory encryption. This
+ * includes running either as a bare-metal system or a hypervisor
+ * and actively using memory encryption or as a guest/virtual machine
+ * and actively using memory encryption.
+ *
+ * Examples include SME, SEV and SEV-ES.
+ */
+ CC_ATTR_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_HOST_MEM_ENCRYPT: Host memory encryption is active
+ *
+ * The platform/OS is running as a bare-metal system or a hypervisor
+ * and actively using memory encryption.
+ *
+ * Examples include SME.
+ */
+ CC_ATTR_HOST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_MEM_ENCRYPT: Guest memory encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption.
+ *
+ * Examples include SEV and SEV-ES.
+ */
+ CC_ATTR_GUEST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_STATE_ENCRYPT: Guest state encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption and register state encryption.
+ *
+ * Examples include SEV-ES.
+ */
+ CC_ATTR_GUEST_STATE_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_UNROLL_STRING_IO: String I/O is implemented with
+ * IN/OUT instructions
+ *
+ * The platform/OS is running as a guest/virtual machine and uses
+ * IN/OUT instructions in place of string I/O.
+ *
+ * Examples include TDX guest & SEV.
+ */
+ CC_ATTR_GUEST_UNROLL_STRING_IO,
+
+ /**
+ * @CC_ATTR_SEV_SNP: Guest SNP is active.
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using AMD SEV-SNP features.
+ */
+ CC_ATTR_GUEST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_HOTPLUG_DISABLED: Hotplug is not supported or disabled.
+ *
+ * The platform/OS is running as a guest/virtual machine does not
+ * support CPU hotplug feature.
+ *
+ * Examples include TDX Guest.
+ */
+ CC_ATTR_HOTPLUG_DISABLED,
+};
+
+#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+
+/**
+ * cc_platform_has() - Checks if the specified cc_attr attribute is active
+ * @attr: Confidential computing attribute to check
+ *
+ * The cc_platform_has() function will return an indicator as to whether the
+ * specified Confidential Computing attribute is currently active.
+ *
+ * Context: Any context
+ * Return:
+ * * TRUE - Specified Confidential Computing attribute is active
+ * * FALSE - Specified Confidential Computing attribute is not active
+ */
+bool cc_platform_has(enum cc_attr attr);
+
+#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
+
+static inline bool cc_platform_has(enum cc_attr attr) { return false; }
+
+#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
+
+#endif /* _LINUX_CC_PLATFORM_H */
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index a5dfbaf2470d..868924dec5a1 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -15,7 +15,8 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <crypto/aes.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
struct ccp_device;
struct ccp_cmd;
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 528271c60018..67caa909e3e6 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -64,6 +64,7 @@ struct cdrom_device_info {
int for_data;
int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
+ __s64 last_media_change_ms;
};
struct cdrom_device_ops {
@@ -73,11 +74,9 @@ struct cdrom_device_ops {
int (*drive_status) (struct cdrom_device_info *, int);
unsigned int (*check_events) (struct cdrom_device_info *cdi,
unsigned int clearing, int slot);
- int (*media_changed) (struct cdrom_device_info *, int);
int (*tray_move) (struct cdrom_device_info *, int);
int (*lock_door) (struct cdrom_device_info *, int);
int (*select_speed) (struct cdrom_device_info *, int);
- int (*select_disc) (struct cdrom_device_info *, int);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn) (struct cdrom_device_info *,
@@ -87,13 +86,20 @@ struct cdrom_device_ops {
/* play stuff */
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
-/* driver specifications */
- const int capability; /* capability flags */
/* handle uniform packets for scsi type devices (scsi,atapi) */
int (*generic_packet) (struct cdrom_device_info *,
struct packet_command *);
+ int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nframes, u8 *last_sense);
+/* driver specifications */
+ const int capability; /* capability flags */
};
+int cdrom_multisession(struct cdrom_device_info *cdi,
+ struct cdrom_multisession *info);
+int cdrom_read_tocentry(struct cdrom_device_info *cdi,
+ struct cdrom_tocentry *entry);
+
/* the general block_device operations structure: */
extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
fmode_t mode);
@@ -102,9 +108,8 @@ extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
fmode_t mode, unsigned int cmd, unsigned long arg);
extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
unsigned int clearing);
-extern int cdrom_media_changed(struct cdrom_device_info *);
-extern int register_cdrom(struct cdrom_device_info *cdi);
+extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi);
extern void unregister_cdrom(struct cdrom_device_info *cdi);
typedef struct {
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 6728c2ee0205..6b138fa97db8 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -32,8 +32,6 @@ struct ceph_auth_handshake {
};
struct ceph_auth_client_ops {
- const char *name;
-
/*
* true if we are authenticated and can connect to
* services.
@@ -52,8 +50,10 @@ struct ceph_auth_client_ops {
* another request.
*/
int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
- int (*handle_reply)(struct ceph_auth_client *ac, int result,
- void *buf, void *end);
+ int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
+ void *buf, void *end, u8 *session_key,
+ int *session_key_len, u8 *con_secret,
+ int *con_secret_len);
/*
* Create authorizer for connecting to a service, and verify
@@ -69,7 +69,10 @@ struct ceph_auth_client_ops {
void *challenge_buf,
int challenge_buf_len);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+ struct ceph_authorizer *a,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
@@ -95,11 +98,17 @@ struct ceph_auth_client {
const struct ceph_crypto_key *key; /* our secret key */
unsigned want_keys; /* which services we want */
+ int preferred_mode; /* CEPH_CON_MODE_* */
+ int fallback_mode; /* ditto */
+
struct mutex mutex;
};
-extern struct ceph_auth_client *ceph_auth_init(const char *name,
- const struct ceph_crypto_key *key);
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
+
+struct ceph_auth_client *ceph_auth_init(const char *name,
+ const struct ceph_crypto_key *key,
+ const int *con_modes);
extern void ceph_auth_destroy(struct ceph_auth_client *ac);
extern void ceph_auth_reset(struct ceph_auth_client *ac);
@@ -113,21 +122,22 @@ int ceph_auth_entity_name_encode(const char *name, void **p, void *end);
extern int ceph_build_auth(struct ceph_auth_client *ac,
void *msg_buf, size_t msg_len);
-
extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
-extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
- int peer_type,
- struct ceph_auth_handshake *auth);
+
+int __ceph_auth_get_authorizer(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ int peer_type, bool force_new,
+ int *proto, int *pref_mode, int *fallb_mode);
void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
-extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
- int peer_type,
- struct ceph_auth_handshake *a);
int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac,
struct ceph_authorizer *a,
void *challenge_buf,
int challenge_buf_len);
-extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
int peer_type);
@@ -147,4 +157,34 @@ int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth,
return auth->check_message_signature(auth, msg);
return 0;
}
+
+int ceph_auth_get_request(struct ceph_auth_client *ac, void *buf, int buf_len);
+int ceph_auth_handle_reply_more(struct ceph_auth_client *ac, void *reply,
+ int reply_len, void *buf, int buf_len);
+int ceph_auth_handle_reply_done(struct ceph_auth_client *ac,
+ u64 global_id, void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+bool ceph_auth_handle_bad_method(struct ceph_auth_client *ac,
+ int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
+int ceph_auth_get_authorizer(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ int peer_type, void *buf, int *buf_len);
+int ceph_auth_handle_svc_reply_more(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ void *reply, int reply_len,
+ void *buf, int *buf_len);
+int ceph_auth_handle_svc_reply_done(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+bool ceph_auth_handle_bad_authorizer(struct ceph_auth_client *ac,
+ int peer_type, int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
#endif
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 39e6f4c57580..3a47acd9cc14 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -8,17 +8,18 @@
* feature. Base case is 1 (first use).
*/
#define CEPH_FEATURE_INCARNATION_1 (0ull)
-#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL
+#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // SERVER_JEWEL
+#define CEPH_FEATURE_INCARNATION_3 ((1ull<<57)|(1ull<<28)) // SERVER_MIMIC
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
- static const uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
- static const uint64_t CEPH_FEATUREMASK_##name = \
+ static const uint64_t __maybe_unused CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t __maybe_unused CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/* this bit is ignored but still advertised by release *when* */
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
- static const uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
- static const uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
+ static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/*
@@ -58,7 +59,7 @@
* because 10.2.z (jewel) did not care if its peers advertised this
* feature bit.
*
- * - In the second phase we stop advertising the the bit and call it
+ * - In the second phase we stop advertising the bit and call it
* RETIRED. This can normally be done in the *next* major release
* following the one in which we marked the feature DEPRECATED. In
* the above example, for 12.0.z (luminous) we can say:
@@ -75,7 +76,7 @@
DEFINE_CEPH_FEATURE( 0, 1, UID)
DEFINE_CEPH_FEATURE( 1, 1, NOSRCADDR)
DEFINE_CEPH_FEATURE_RETIRED( 2, 1, MONCLOCKCHECK, JEWEL, LUMINOUS)
-
+DEFINE_CEPH_FEATURE( 2, 3, SERVER_NAUTILUS)
DEFINE_CEPH_FEATURE( 3, 1, FLOCK)
DEFINE_CEPH_FEATURE( 4, 1, SUBSCRIBE2)
DEFINE_CEPH_FEATURE( 5, 1, MONNAMES)
@@ -114,7 +115,7 @@ DEFINE_CEPH_FEATURE(25, 1, CRUSH_TUNABLES2)
DEFINE_CEPH_FEATURE(26, 1, CREATEPOOLID)
DEFINE_CEPH_FEATURE(27, 1, REPLY_CREATE_INODE)
DEFINE_CEPH_FEATURE_RETIRED(28, 1, OSD_HBMSGS, HAMMER, JEWEL)
-DEFINE_CEPH_FEATURE(28, 2, SERVER_M)
+DEFINE_CEPH_FEATURE(28, 2, SERVER_MIMIC)
DEFINE_CEPH_FEATURE(29, 1, MDSENC)
DEFINE_CEPH_FEATURE(30, 1, OSDHASHPSPOOL)
DEFINE_CEPH_FEATURE(31, 1, MON_SINGLE_PAXOS) // deprecate me
@@ -177,13 +178,16 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_SERVER_NAUTILUS | \
CEPH_FEATURE_FLOCK | \
CEPH_FEATURE_SUBSCRIBE2 | \
+ CEPH_FEATURE_MONNAMES | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_DIRLAYOUTHASH | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
+ CEPH_FEATURE_MONENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_SERVER_LUMINOUS | \
CEPH_FEATURE_RESEND_ON_SPLIT | \
@@ -193,6 +197,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
+ CEPH_FEATURE_SERVER_MIMIC | \
CEPH_FEATURE_MDSENC | \
CEPH_FEATURE_OSDHASHPSPOOL | \
CEPH_FEATURE_OSD_CACHEPOOL | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index cb21c5cf12c3..49586ff26152 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -28,8 +28,8 @@
#define CEPH_INO_ROOT 1
-#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
-#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
+#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
+#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
@@ -93,8 +93,19 @@ struct ceph_dir_layout {
#define CEPH_AUTH_NONE 0x1
#define CEPH_AUTH_CEPHX 0x2
+#define CEPH_AUTH_MODE_NONE 0
+#define CEPH_AUTH_MODE_AUTHORIZER 1
+#define CEPH_AUTH_MODE_MON 10
+
+/* msgr2 protocol modes */
+#define CEPH_CON_MODE_UNKNOWN 0x0
+#define CEPH_CON_MODE_CRC 0x1
+#define CEPH_CON_MODE_SECURE 0x2
+
#define CEPH_AUTH_UID_DEFAULT ((__u64) -1)
+const char *ceph_auth_proto_name(int proto);
+const char *ceph_con_mode_name(int mode);
/*********************************************
* message layer
@@ -130,6 +141,7 @@ struct ceph_dir_layout {
#define CEPH_MSG_CLIENT_REQUEST 24
#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
#define CEPH_MSG_CLIENT_REPLY 26
+#define CEPH_MSG_CLIENT_METRICS 29
#define CEPH_MSG_CLIENT_CAPS 0x310
#define CEPH_MSG_CLIENT_LEASE 0x311
#define CEPH_MSG_CLIENT_SNAP 0x312
@@ -287,8 +299,11 @@ enum {
CEPH_SESSION_FLUSHMSG_ACK,
CEPH_SESSION_FORCE_RO,
CEPH_SESSION_REJECT,
+ CEPH_SESSION_REQUEST_FLUSH_MDLOG,
};
+#define CEPH_SESSION_BLOCKLISTED (1 << 0) /* session blocklisted */
+
extern const char *ceph_session_op_name(int op);
struct ceph_mds_session_head {
@@ -313,6 +328,7 @@ enum {
CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
CEPH_MDS_OP_LOOKUPINO = 0x00104,
CEPH_MDS_OP_LOOKUPNAME = 0x00105,
+ CEPH_MDS_OP_GETVXATTR = 0x00106,
CEPH_MDS_OP_SETXATTR = 0x01105,
CEPH_MDS_OP_RMXATTR = 0x01106,
@@ -417,12 +433,13 @@ union ceph_mds_request_args {
__le32 stripe_unit; /* layout for newly created file */
__le32 stripe_count; /* ... */
__le32 object_size;
- __le32 file_replication;
- __le32 mask; /* CEPH_CAP_* */
- __le32 old_size;
+ __le32 pool;
+ __le32 mask; /* CEPH_CAP_* */
+ __le64 old_size;
} __attribute__ ((packed)) open;
struct {
__le32 flags;
+ __le32 osdmap_epoch; /* used for setting file/dir layouts */
} __attribute__ ((packed)) setxattr;
struct {
struct ceph_file_layout_legacy layout;
@@ -444,10 +461,25 @@ union ceph_mds_request_args {
} __attribute__ ((packed)) lookupino;
} __attribute__ ((packed));
-#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
-#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
+union ceph_mds_request_args_ext {
+ union ceph_mds_request_args old;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ struct ceph_timespec btime;
+ } __attribute__ ((packed)) setattr_ext;
+};
-struct ceph_mds_request_head {
+#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
+#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
+#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */
+
+struct ceph_mds_request_head_old {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
@@ -460,6 +492,22 @@ struct ceph_mds_request_head {
union ceph_mds_request_args args;
} __attribute__ ((packed));
+#define CEPH_MDS_REQUEST_HEAD_VERSION 1
+
+struct ceph_mds_request_head {
+ __le16 version; /* struct version */
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* count retry, fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args_ext args;
+} __attribute__ ((packed));
+
/* cap/lease release record */
struct ceph_mds_request_release {
__le64 ino, cap_id; /* ino and unique cap id */
@@ -530,6 +578,9 @@ struct ceph_mds_reply_lease {
__le32 seq;
} __attribute__ ((packed));
+#define CEPH_LEASE_VALID (1 | 2) /* old and new bit values */
+#define CEPH_LEASE_PRIMARY_LINK 4 /* primary linkage */
+
struct ceph_mds_reply_dirfrag {
__le32 frag; /* fragment */
__le32 auth; /* auth mds, if this is a delegation point */
@@ -564,6 +615,7 @@ struct ceph_filelock {
#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
#define CEPH_FILE_MODE_BITS 4
+#define CEPH_FILE_MODE_MASK ((1 << CEPH_FILE_MODE_BITS) - 1)
int ceph_flags_to_mode(int flags);
@@ -655,10 +707,19 @@ int ceph_flags_to_mode(int flags);
#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \
CEPH_CAP_PIN)
+#define CEPH_CAP_ALL_FILE (CEPH_CAP_PIN | CEPH_CAP_ANY_SHARED | \
+ CEPH_CAP_AUTH_EXCL | CEPH_CAP_XATTR_EXCL | \
+ CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)
#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
CEPH_LOCK_IXATTR)
+/* cap masks async dir operations */
+#define CEPH_CAP_DIR_CREATE CEPH_CAP_FILE_CACHE
+#define CEPH_CAP_DIR_UNLINK CEPH_CAP_FILE_RD
+#define CEPH_CAP_ANY_DIR_OPS (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | \
+ CEPH_CAP_FILE_WREXTEND | CEPH_CAP_FILE_LAZYIO)
+
int ceph_caps_for_mode(int mode);
enum {
@@ -707,7 +768,7 @@ struct ceph_mds_caps {
__le32 xattr_len;
__le64 xattr_version;
- /* filelock */
+ /* a union of non-export and export bodies. */
__le64 size, max_size, truncate_size;
__le32 truncate_seq;
struct ceph_timespec mtime, atime, ctime;
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
index cf5e840eec71..8b3a1a7a953a 100644
--- a/include/linux/ceph/debugfs.h
+++ b/include/linux/ceph/debugfs.h
@@ -2,22 +2,8 @@
#ifndef _FS_CEPH_DEBUGFS_H
#define _FS_CEPH_DEBUGFS_H
-#include <linux/ceph/ceph_debug.h>
#include <linux/ceph/types.h>
-#define CEPH_DEFINE_SHOW_FUNC(name) \
-static int name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, name, inode->i_private); \
-} \
- \
-static const struct file_operations name##_fops = { \
- .open = name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-};
-
/* debugfs.c */
extern void ceph_debugfs_init(void);
extern void ceph_debugfs_cleanup(void);
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 450384fe487c..04f3ace5787b 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -220,6 +220,8 @@ static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
*/
#define CEPH_ENTITY_ADDR_TYPE_NONE 0
#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1)
+#define CEPH_ENTITY_ADDR_TYPE_MSGR2 __cpu_to_le32(2)
+#define CEPH_ENTITY_ADDR_TYPE_ANY __cpu_to_le32(3)
static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a)
{
@@ -239,6 +241,12 @@ static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a)
extern int ceph_decode_entity_addr(void **p, void *end,
struct ceph_entity_addr *addr);
+int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ struct ceph_entity_addr *addr);
+
+int ceph_entity_addr_encoding_len(const struct ceph_entity_addr *addr);
+void ceph_encode_entity_addr(void **p, const struct ceph_entity_addr *addr);
+
/*
* encoders
*/
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index ec73ebc4827d..00af2c98da75 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -31,11 +31,11 @@
#define CEPH_OPT_FSID (1<<0)
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
-#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
-#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
-#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
-#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
-#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes (msgr1) */
+#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
+#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
+#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
@@ -52,6 +52,8 @@ struct ceph_options {
unsigned long osd_idle_ttl; /* jiffies */
unsigned long osd_keepalive_timeout; /* jiffies */
unsigned long osd_request_timeout; /* jiffies */
+ u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */
+ int con_modes[2]; /* CEPH_CON_MODE_* */
/*
* any type that can't be simply compared or doesn't need
@@ -64,6 +66,7 @@ struct ceph_options {
int num_mon;
char *name;
struct ceph_crypto_key *key;
+ struct rb_root crush_locs;
};
/*
@@ -73,6 +76,7 @@ struct ceph_options {
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */
+#define CEPH_READ_FROM_REPLICA_DEFAULT 0 /* read from primary */
#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000)
#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000)
@@ -80,6 +84,7 @@ struct ceph_options {
#define CEPH_MONC_HUNT_BACKOFF 2
#define CEPH_MONC_HUNT_MAX_MULT 10
+#define CEPH_MSG_MAX_CONTROL_LEN (16*1024*1024)
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
@@ -101,6 +106,7 @@ enum {
CEPH_MOUNT_UNMOUNTING,
CEPH_MOUNT_UNMOUNTED,
CEPH_MOUNT_SHUTDOWN,
+ CEPH_MOUNT_RECOVER,
};
static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
@@ -147,6 +153,10 @@ struct ceph_client {
#define from_msgr(ms) container_of(ms, struct ceph_client, msgr)
+static inline bool ceph_msgr2(struct ceph_client *client)
+{
+ return client->options->con_modes[0] != CEPH_CON_MODE_UNKNOWN;
+}
/*
* snapshots
@@ -188,7 +198,7 @@ static inline int calc_pages_for(u64 off, u64 len)
#define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b))
#define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \
-static void insert_##name(struct rb_root *root, type *t) \
+static bool __insert_##name(struct rb_root *root, type *t) \
{ \
struct rb_node **n = &root->rb_node; \
struct rb_node *parent = NULL; \
@@ -206,11 +216,17 @@ static void insert_##name(struct rb_root *root, type *t) \
else if (cmp > 0) \
n = &(*n)->rb_right; \
else \
- BUG(); \
+ return false; \
} \
\
rb_link_node(&t->nodefld, parent, n); \
rb_insert_color(&t->nodefld, root); \
+ return true; \
+} \
+static void __maybe_unused insert_##name(struct rb_root *root, type *t) \
+{ \
+ if (!__insert_##name(root, t)) \
+ BUG(); \
} \
static void erase_##name(struct rb_root *root, type *t) \
{ \
@@ -268,23 +284,26 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_cap_snap_cachep;
extern struct kmem_cache *ceph_cap_flush_cachep;
extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
extern struct kmem_cache *ceph_dir_file_cachep;
+extern struct kmem_cache *ceph_mds_request_cachep;
+extern mempool_t *ceph_wb_pagevec_pool;
/* ceph_common.c */
extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
-extern void *ceph_kvmalloc(size_t size, gfp_t flags);
+extern int ceph_parse_fsid(const char *str, struct ceph_fsid *fsid);
struct fs_parameter;
struct fc_log;
struct ceph_options *ceph_alloc_options(void);
int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
- struct fc_log *l);
+ struct fc_log *l, char delim);
int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
struct fc_log *l);
int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 35d385296fbb..4c3e0648dc27 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -25,6 +25,7 @@ struct ceph_mdsmap {
u32 m_session_timeout; /* seconds */
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
+ u64 m_max_xattr_size; /* maximum size for xattrs blob */
u32 m_max_mds; /* expected up:active mds number */
u32 m_num_active_mds; /* actual up:active mds number */
u32 possible_max_rank; /* possible max rank index */
@@ -64,7 +65,7 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
}
extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
-extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
+struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2);
extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index c4458dc6a757..99c1726be6ee 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -3,6 +3,7 @@
#define __FS_CEPH_MESSENGER_H
#include <linux/bvec.h>
+#include <linux/crypto.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/net.h>
@@ -52,9 +53,26 @@ struct ceph_connection_operations {
int (*sign_message) (struct ceph_msg *msg);
int (*check_message_signature) (struct ceph_msg *msg);
+
+ /* msgr2 authentication exchange */
+ int (*get_auth_request)(struct ceph_connection *con,
+ void *buf, int *buf_len,
+ void **authorizer, int *authorizer_len);
+ int (*handle_auth_reply_more)(struct ceph_connection *con,
+ void *reply, int reply_len,
+ void *buf, int *buf_len,
+ void **authorizer, int *authorizer_len);
+ int (*handle_auth_done)(struct ceph_connection *con,
+ u64 global_id, void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+ int (*handle_auth_bad_method)(struct ceph_connection *con,
+ int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
};
-/* use format string %s%d */
+/* use format string %s%lld */
#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num)
struct ceph_messenger {
@@ -175,9 +193,10 @@ struct ceph_msg_data {
#endif /* CONFIG_BLOCK */
struct ceph_bvec_iter bvec_pos;
struct {
- struct page **pages; /* NOT OWNER. */
+ struct page **pages;
size_t length; /* total # bytes */
unsigned int alignment; /* first page */
+ bool own_pages;
};
struct ceph_pagelist *pagelist;
};
@@ -188,7 +207,6 @@ struct ceph_msg_data_cursor {
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
- bool last_piece; /* current is last piece */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
@@ -234,14 +252,175 @@ struct ceph_msg {
bool more_to_follow;
bool needs_out_seq;
int front_alloc_len;
- unsigned long ack_stamp; /* tx: when we were acked */
struct ceph_msgpool *pool;
};
+/*
+ * connection states
+ */
+#define CEPH_CON_S_CLOSED 1
+#define CEPH_CON_S_PREOPEN 2
+#define CEPH_CON_S_V1_BANNER 3
+#define CEPH_CON_S_V1_CONNECT_MSG 4
+#define CEPH_CON_S_V2_BANNER_PREFIX 5
+#define CEPH_CON_S_V2_BANNER_PAYLOAD 6
+#define CEPH_CON_S_V2_HELLO 7
+#define CEPH_CON_S_V2_AUTH 8
+#define CEPH_CON_S_V2_AUTH_SIGNATURE 9
+#define CEPH_CON_S_V2_SESSION_CONNECT 10
+#define CEPH_CON_S_V2_SESSION_RECONNECT 11
+#define CEPH_CON_S_OPEN 12
+#define CEPH_CON_S_STANDBY 13
+
+/*
+ * ceph_connection flag bits
+ */
+#define CEPH_CON_F_LOSSYTX 0 /* we can close channel or drop
+ messages on errors */
+#define CEPH_CON_F_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
+#define CEPH_CON_F_WRITE_PENDING 2 /* we have data ready to send */
+#define CEPH_CON_F_SOCK_CLOSED 3 /* socket state changed to closed */
+#define CEPH_CON_F_BACKOFF 4 /* need to retry queuing delayed
+ work */
+
/* ceph connection fault delay defaults, for exponential backoff */
-#define BASE_DELAY_INTERVAL (HZ/2)
-#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
+#define BASE_DELAY_INTERVAL (HZ / 4)
+#define MAX_DELAY_INTERVAL (15 * HZ)
+
+struct ceph_connection_v1_info {
+ struct kvec out_kvec[8], /* sending header/footer data */
+ *out_kvec_cur;
+ int out_kvec_left; /* kvec's left in out_kvec */
+ int out_skip; /* skip this many bytes */
+ int out_kvec_bytes; /* total bytes left */
+ bool out_more; /* there is more data after the kvecs */
+ bool out_msg_done;
+
+ struct ceph_auth_handshake *auth;
+ int auth_retry; /* true if we need a newer authorizer */
+
+ /* connection negotiation temps */
+ u8 in_banner[CEPH_BANNER_MAX_LEN];
+ struct ceph_entity_addr actual_peer_addr;
+ struct ceph_entity_addr peer_addr_for_me;
+ struct ceph_msg_connect out_connect;
+ struct ceph_msg_connect_reply in_reply;
+
+ int in_base_pos; /* bytes read */
+
+ /* message in temps */
+ u8 in_tag; /* protocol control byte */
+ struct ceph_msg_header in_hdr;
+ __le64 in_temp_ack; /* for reading an ack */
+
+ /* message out temps */
+ struct ceph_msg_header out_hdr;
+ __le64 out_temp_ack; /* for writing an ack */
+ struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+ stamp */
+
+ u32 connect_seq; /* identify the most recent connection
+ attempt for this session */
+ u32 peer_global_seq; /* peer's global seq for this connection */
+};
+
+#define CEPH_CRC_LEN 4
+#define CEPH_GCM_KEY_LEN 16
+#define CEPH_GCM_IV_LEN sizeof(struct ceph_gcm_nonce)
+#define CEPH_GCM_BLOCK_LEN 16
+#define CEPH_GCM_TAG_LEN 16
+
+#define CEPH_PREAMBLE_LEN 32
+#define CEPH_PREAMBLE_INLINE_LEN 48
+#define CEPH_PREAMBLE_PLAIN_LEN CEPH_PREAMBLE_LEN
+#define CEPH_PREAMBLE_SECURE_LEN (CEPH_PREAMBLE_LEN + \
+ CEPH_PREAMBLE_INLINE_LEN + \
+ CEPH_GCM_TAG_LEN)
+#define CEPH_EPILOGUE_PLAIN_LEN (1 + 3 * CEPH_CRC_LEN)
+#define CEPH_EPILOGUE_SECURE_LEN (CEPH_GCM_BLOCK_LEN + CEPH_GCM_TAG_LEN)
+
+#define CEPH_FRAME_MAX_SEGMENT_COUNT 4
+
+struct ceph_frame_desc {
+ int fd_tag; /* FRAME_TAG_* */
+ int fd_seg_cnt;
+ int fd_lens[CEPH_FRAME_MAX_SEGMENT_COUNT]; /* logical */
+ int fd_aligns[CEPH_FRAME_MAX_SEGMENT_COUNT];
+};
+
+struct ceph_gcm_nonce {
+ __le32 fixed;
+ __le64 counter __packed;
+};
+
+struct ceph_connection_v2_info {
+ struct iov_iter in_iter;
+ struct kvec in_kvecs[5]; /* recvmsg */
+ struct bio_vec in_bvec; /* recvmsg (in_cursor) */
+ int in_kvec_cnt;
+ int in_state; /* IN_S_* */
+
+ struct iov_iter out_iter;
+ struct kvec out_kvecs[8]; /* sendmsg */
+ struct bio_vec out_bvec; /* sendpage (out_cursor, out_zero),
+ sendmsg (out_enc_pages) */
+ int out_kvec_cnt;
+ int out_state; /* OUT_S_* */
+
+ int out_zero; /* # of zero bytes to send */
+ bool out_iter_sendpage; /* use sendpage if possible */
+
+ struct ceph_frame_desc in_desc;
+ struct ceph_msg_data_cursor in_cursor;
+ struct ceph_msg_data_cursor out_cursor;
+
+ struct crypto_shash *hmac_tfm; /* post-auth signature */
+ struct crypto_aead *gcm_tfm; /* on-wire encryption */
+ struct aead_request *gcm_req;
+ struct crypto_wait gcm_wait;
+ struct ceph_gcm_nonce in_gcm_nonce;
+ struct ceph_gcm_nonce out_gcm_nonce;
+
+ struct page **in_enc_pages;
+ int in_enc_page_cnt;
+ int in_enc_resid;
+ int in_enc_i;
+ struct page **out_enc_pages;
+ int out_enc_page_cnt;
+ int out_enc_resid;
+ int out_enc_i;
+
+ int con_mode; /* CEPH_CON_MODE_* */
+
+ void *conn_bufs[16];
+ int conn_buf_cnt;
+
+ struct kvec in_sign_kvecs[8];
+ struct kvec out_sign_kvecs[8];
+ int in_sign_kvec_cnt;
+ int out_sign_kvec_cnt;
+
+ u64 client_cookie;
+ u64 server_cookie;
+ u64 global_seq;
+ u64 connect_seq;
+ u64 peer_global_seq;
+
+ u8 in_buf[CEPH_PREAMBLE_SECURE_LEN];
+ u8 out_buf[CEPH_PREAMBLE_SECURE_LEN];
+ struct {
+ u8 late_status; /* FRAME_LATE_STATUS_* */
+ union {
+ struct {
+ u32 front_crc;
+ u32 middle_crc;
+ u32 data_crc;
+ } __packed;
+ u8 pad[CEPH_GCM_BLOCK_LEN - 1];
+ };
+ } out_epil;
+};
/*
* A single connection with another host.
@@ -257,24 +436,16 @@ struct ceph_connection {
struct ceph_messenger *msgr;
+ int state; /* CEPH_CON_S_* */
atomic_t sock_state;
struct socket *sock;
- struct ceph_entity_addr peer_addr; /* peer address */
- struct ceph_entity_addr peer_addr_for_me;
- unsigned long flags;
- unsigned long state;
+ unsigned long flags; /* CEPH_CON_F_* */
const char *error_msg; /* error message, if any */
struct ceph_entity_name peer_name; /* peer name */
-
+ struct ceph_entity_addr peer_addr; /* peer address */
u64 peer_features;
- u32 connect_seq; /* identify the most recent connection
- attempt for this connection, client */
- u32 peer_global_seq; /* peer's global seq for this connection */
-
- struct ceph_auth_handshake *auth;
- int auth_retry; /* true if we need a newer authorizer */
struct mutex mutex;
@@ -285,50 +456,86 @@ struct ceph_connection {
u64 in_seq, in_seq_acked; /* last message received, acked */
- /* connection negotiation temps */
- char in_banner[CEPH_BANNER_MAX_LEN];
- struct ceph_msg_connect out_connect;
- struct ceph_msg_connect_reply in_reply;
- struct ceph_entity_addr actual_peer_addr;
-
- /* message out temps */
- struct ceph_msg_header out_hdr;
+ struct ceph_msg *in_msg;
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
- bool out_msg_done;
-
- struct kvec out_kvec[8], /* sending header/footer data */
- *out_kvec_cur;
- int out_kvec_left; /* kvec's left in out_kvec */
- int out_skip; /* skip this many bytes */
- int out_kvec_bytes; /* total bytes left */
- int out_more; /* there is more data after the kvecs */
- __le64 out_temp_ack; /* for writing an ack */
- struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
- stamp */
- /* message in temps */
- struct ceph_msg_header in_hdr;
- struct ceph_msg *in_msg;
+ struct page *bounce_page;
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
- char in_tag; /* protocol control byte */
- int in_base_pos; /* bytes read */
- __le64 in_temp_ack; /* for reading an ack */
-
struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
+
+ union {
+ struct ceph_connection_v1_info v1;
+ struct ceph_connection_v2_info v2;
+ };
};
+extern struct page *ceph_zero_page;
+
+void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag);
+void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag);
+bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag);
+bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
+ unsigned long con_flag);
+bool ceph_con_flag_test_and_set(struct ceph_connection *con,
+ unsigned long con_flag);
+
+void ceph_encode_my_addr(struct ceph_messenger *msgr);
+
+int ceph_tcp_connect(struct ceph_connection *con);
+int ceph_con_close_socket(struct ceph_connection *con);
+void ceph_con_reset_session(struct ceph_connection *con);
+
+u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt);
+void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq);
+void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq);
+
+void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
+ struct ceph_msg *msg, size_t length);
+struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
+ size_t *page_offset, size_t *length);
+void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
+
+u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
+ unsigned int length);
+
+bool ceph_addr_is_blank(const struct ceph_entity_addr *addr);
+int ceph_addr_port(const struct ceph_entity_addr *addr);
+void ceph_addr_set_port(struct ceph_entity_addr *addr, int p);
+
+void ceph_con_process_message(struct ceph_connection *con);
+int ceph_con_in_msg_alloc(struct ceph_connection *con,
+ struct ceph_msg_header *hdr, int *skip);
+void ceph_con_get_out_msg(struct ceph_connection *con);
+
+/* messenger_v1.c */
+int ceph_con_v1_try_read(struct ceph_connection *con);
+int ceph_con_v1_try_write(struct ceph_connection *con);
+void ceph_con_v1_revoke(struct ceph_connection *con);
+void ceph_con_v1_revoke_incoming(struct ceph_connection *con);
+bool ceph_con_v1_opened(struct ceph_connection *con);
+void ceph_con_v1_reset_session(struct ceph_connection *con);
+void ceph_con_v1_reset_protocol(struct ceph_connection *con);
+
+/* messenger_v2.c */
+int ceph_con_v2_try_read(struct ceph_connection *con);
+int ceph_con_v2_try_write(struct ceph_connection *con);
+void ceph_con_v2_revoke(struct ceph_connection *con);
+void ceph_con_v2_revoke_incoming(struct ceph_connection *con);
+bool ceph_con_v2_opened(struct ceph_connection *con);
+void ceph_con_v2_reset_session(struct ceph_connection *con);
+void ceph_con_v2_reset_protocol(struct ceph_connection *con);
+
extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr);
extern int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
- int max_count, int *count);
-
+ int max_count, int *count, char delim);
extern int ceph_msgr_init(void);
extern void ceph_msgr_exit(void);
@@ -356,8 +563,8 @@ extern void ceph_con_keepalive(struct ceph_connection *con);
extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
unsigned long interval);
-extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
- size_t length, size_t alignment);
+void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
+ size_t length, size_t alignment, bool own_pages);
extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index dbb8a6959a73..b658961156a0 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -19,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[0];
+ struct ceph_entity_inst mon_inst[];
};
struct ceph_mon_client;
@@ -142,7 +142,7 @@ int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
ceph_monc_callback_t cb, u64 private_data);
-int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
+int ceph_monc_blocklist_add(struct ceph_mon_client *monc,
struct ceph_entity_addr *client_addr);
extern int ceph_monc_open_session(struct ceph_mon_client *monc);
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 9e50aede46c8..3989dcb94d3d 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -9,24 +9,45 @@
#define CEPH_MON_PORT 6789 /* default monitor port */
/*
- * client-side processes will try to bind to ports in this
- * range, simply for the benefit of tools like nmap or wireshark
- * that would like to identify the protocol.
- */
-#define CEPH_PORT_FIRST 6789
-#define CEPH_PORT_START 6800 /* non-monitors start here */
-#define CEPH_PORT_LAST 6900
-
-/*
* tcp connection banner. include a protocol version. and adjust
* whenever the wire protocol changes. try to keep this string length
* constant.
*/
#define CEPH_BANNER "ceph v027"
+#define CEPH_BANNER_LEN 9
#define CEPH_BANNER_MAX_LEN 30
/*
+ * messenger V2 connection banner prefix.
+ * The full banner string should have the form: "ceph v2\n<le16>"
+ * the 2 bytes are the length of the remaining banner.
+ */
+#define CEPH_BANNER_V2 "ceph v2\n"
+#define CEPH_BANNER_V2_LEN 8
+#define CEPH_BANNER_V2_PREFIX_LEN (CEPH_BANNER_V2_LEN + sizeof(__le16))
+
+/*
+ * messenger V2 features
+ */
+#define CEPH_MSGR2_INCARNATION_1 (0ull)
+
+#define DEFINE_MSGR2_FEATURE(bit, incarnation, name) \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATUREMASK_##name = \
+ (1ULL << bit | CEPH_MSGR2_INCARNATION_##incarnation);
+
+#define HAVE_MSGR2_FEATURE(x, name) \
+ (((x) & (CEPH_MSGR2_FEATUREMASK_##name)) == (CEPH_MSGR2_FEATUREMASK_##name))
+
+DEFINE_MSGR2_FEATURE( 0, 1, REVISION_1) // msgr2.1
+
+#define CEPH_MSGR2_SUPPORTED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1)
+
+#define CEPH_MSGR2_REQUIRED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1)
+
+
+/*
* Rollover-safe type and comparator for 32-bit sequence numbers.
* Comparator returns -1, 0, or 1.
*/
@@ -61,11 +82,18 @@ extern const char *ceph_entity_type_name(int type);
* entity_addr -- network address
*/
struct ceph_entity_addr {
- __le32 type;
+ __le32 type; /* CEPH_ENTITY_ADDR_TYPE_* */
__le32 nonce; /* unique id for process (e.g. pid) */
struct sockaddr_storage in_addr;
} __attribute__ ((packed));
+static inline bool ceph_addr_equal_no_type(const struct ceph_entity_addr *lhs,
+ const struct ceph_entity_addr *rhs)
+{
+ return !memcmp(&lhs->in_addr, &rhs->in_addr, sizeof(lhs->in_addr)) &&
+ lhs->nonce == rhs->nonce;
+}
+
struct ceph_entity_inst {
struct ceph_entity_name name;
struct ceph_entity_addr addr;
@@ -160,6 +188,24 @@ struct ceph_msg_header {
__le32 crc; /* header crc32c */
} __attribute__ ((packed));
+struct ceph_msg_header2 {
+ __le64 seq; /* message seq# for this session */
+ __le64 tid; /* transaction id */
+ __le16 type; /* message type */
+ __le16 priority; /* priority. higher value == higher priority */
+ __le16 version; /* version of message encoding */
+
+ __le32 data_pre_padding_len;
+ __le16 data_off; /* sender: include full offset;
+ receiver: mask against ~PAGE_MASK */
+
+ __le64 ack_seq;
+ __u8 flags;
+ /* oldest code we think can decode this. unknown if zero. */
+ __le16 compat_version;
+ __le16 reserved;
+} __attribute__ ((packed));
+
#define CEPH_MSG_PRIO_LOW 64
#define CEPH_MSG_PRIO_DEFAULT 127
#define CEPH_MSG_PRIO_HIGH 196
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 5a62dbd3f4c2..fb6be72104df 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -8,6 +8,7 @@
#include <linux/mempool.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
+#include <linux/ktime.h>
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
@@ -135,6 +136,7 @@ struct ceph_osd_req_op {
struct {
u64 expected_object_size;
u64 expected_write_size;
+ u32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} alloc_hint;
struct {
u64 snapid;
@@ -164,6 +166,7 @@ struct ceph_osd_request_target {
bool recovery_deletes;
unsigned int flags; /* CEPH_OSD_FLAG_* */
+ bool used_replica;
bool paused;
u32 epoch;
@@ -213,6 +216,8 @@ struct ceph_osd_request {
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
+ ktime_t r_start_latency; /* ktime_t */
+ ktime_t r_end_latency; /* ktime_t */
int r_attempts;
u32 r_map_dne_bound;
@@ -282,6 +287,9 @@ struct ceph_osd_linger_request {
rados_watcherrcb_t errcb;
void *data;
+ struct ceph_pagelist *request_pl;
+ struct page **notify_id_pages;
+
struct page ***preply_pages;
size_t *preply_len;
};
@@ -399,7 +407,7 @@ void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc);
&__oreq->r_ops[__whch].typ.fld; \
})
-extern void osd_req_op_init(struct ceph_osd_request *osd_req,
+struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
@@ -468,7 +476,16 @@ extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int
extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
u64 expected_object_size,
- u64 expected_write_size);
+ u64 expected_write_size,
+ u32 flags);
+extern int osd_req_op_copy_from_init(struct ceph_osd_request *req,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ u32 dst_fadvise_flags,
+ u32 truncate_seq, u64 truncate_size,
+ u8 copy_from_flags);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
@@ -490,9 +507,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
-extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req,
- bool nofail);
+void ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req);
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
@@ -509,34 +525,6 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
struct page *req_page, size_t req_len,
struct page **resp_pages, size_t *resp_len);
-extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
- struct ceph_vino vino,
- struct ceph_file_layout *layout,
- u64 off, u64 *plen,
- u32 truncate_seq, u64 truncate_size,
- struct page **pages, int nr_pages,
- int page_align);
-
-extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
- struct ceph_vino vino,
- struct ceph_file_layout *layout,
- struct ceph_snap_context *sc,
- u64 off, u64 len,
- u32 truncate_seq, u64 truncate_size,
- struct timespec64 *mtime,
- struct page **pages, int nr_pages);
-
-int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
- u64 src_snapid, u64 src_version,
- struct ceph_object_id *src_oid,
- struct ceph_object_locator *src_oloc,
- u32 src_fadvise_flags,
- struct ceph_object_id *dst_oid,
- struct ceph_object_locator *dst_oloc,
- u32 dst_fadvise_flags,
- u32 truncate_seq, u64 truncate_size,
- u8 copy_from_flags);
-
/* watch/notify */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index e081b56f1c1d..5553019c3f07 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
together */
#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
+#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota,
+ will set FULL too */
+#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */
struct ceph_pg_pool_info {
struct rb_node node;
@@ -134,6 +137,17 @@ int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
const char *fmt, ...);
void ceph_oid_destroy(struct ceph_object_id *oid);
+struct workspace_manager {
+ struct list_head idle_ws;
+ spinlock_t ws_lock;
+ /* Number of free workspaces */
+ int free_ws;
+ /* Total number of allocated workspaces */
+ atomic_t total_ws;
+ /* Waiters for a free workspace */
+ wait_queue_head_t ws_wait;
+};
+
struct ceph_pg_mapping {
struct rb_node node;
struct ceph_pg pgid;
@@ -181,8 +195,7 @@ struct ceph_osdmap {
* the list of osds that store+replicate them. */
struct crush_map *crush;
- struct mutex crush_workspace_mutex;
- void *crush_workspace;
+ struct workspace_manager crush_wsm;
};
static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
@@ -238,8 +251,8 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
}
struct ceph_osdmap *ceph_osdmap_alloc(void);
-extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
-struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2);
+struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
struct ceph_osdmap *map);
extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
@@ -299,10 +312,28 @@ bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
const struct ceph_pg *raw_pgid);
+struct crush_loc {
+ char *cl_type_name;
+ char *cl_name;
+};
+
+struct crush_loc_node {
+ struct rb_node cl_node;
+ struct crush_loc cl_loc; /* pointers into cl_data */
+ char cl_data[];
+};
+
+int ceph_parse_crush_location(char *crush_location, struct rb_root *locs);
+int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2);
+void ceph_clear_crush_locs(struct rb_root *locs);
+
+int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
+ struct rb_root *locs);
+
extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
u64 id);
-
extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
+u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
#endif
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 59bdfd470100..43a7a1573b51 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -143,8 +143,10 @@ extern const char *ceph_osd_state_name(int s);
/*
* osd map flag bits
*/
-#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
-#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
+#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC),
+ not set since ~luminous */
+#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC),
+ not set since ~luminous */
#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
@@ -422,7 +424,7 @@ enum {
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
-#define EBLACKLISTED ESHUTDOWN /* blacklisted */
+#define EBLOCKLISTED ESHUTDOWN /* blocklisted */
/* xattr comparison */
enum {
@@ -463,6 +465,19 @@ enum {
const char *ceph_osd_watch_op_name(int o);
enum {
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE = 1,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE = 2,
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ = 4,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ = 8,
+ CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY = 16,
+ CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE = 32,
+ CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED = 64,
+ CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED = 128,
+ CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE = 256,
+ CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE = 512,
+};
+
+enum {
CEPH_OSD_BACKOFF_OP_BLOCK = 1,
CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2,
CEPH_OSD_BACKOFF_OP_UNBLOCK = 3,
@@ -515,6 +530,7 @@ struct ceph_osd_op {
struct {
__le64 expected_object_size;
__le64 expected_write_size;
+ __le32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} __attribute__ ((packed)) alloc_hint;
struct {
__le64 snapid;
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index 4060004968c8..6617d9c68d86 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -4,7 +4,7 @@
* Version: 0.1.0
* Description: cfag12864b LCD driver header
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-12
*/
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
new file mode 100644
index 000000000000..5e134f4ce8b7
--- /dev/null
+++ b/include/linux/cfi.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) support.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef _LINUX_CFI_H
+#define _LINUX_CFI_H
+
+#include <linux/bug.h>
+#include <linux/module.h>
+
+#ifdef CONFIG_CFI_CLANG
+enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
+ unsigned long *target, u32 type);
+
+static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return report_cfi_failure(regs, addr, NULL, 0);
+}
+
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+bool is_cfi_trap(unsigned long addr);
+#endif
+#endif /* CONFIG_CFI_CLANG */
+
+#ifdef CONFIG_MODULES
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+void module_cfi_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod);
+#else
+static inline void module_cfi_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod) {}
+#endif /* CONFIG_ARCH_USES_CFI_TRAPS */
+#endif /* CONFIG_MODULES */
+
+#endif /* _LINUX_CFI_H */
diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h
new file mode 100644
index 000000000000..6b8713675765
--- /dev/null
+++ b/include/linux/cfi_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) type definitions.
+ */
+#ifndef _LINUX_CFI_TYPES_H
+#define _LINUX_CFI_TYPES_H
+
+#ifdef __ASSEMBLY__
+#include <linux/linkage.h>
+
+#ifdef CONFIG_CFI_CLANG
+/*
+ * Use the __kcfi_typeid_<function> type identifier symbol to
+ * annotate indirectly called assembly functions. The compiler emits
+ * these symbols for all address-taken function declarations in C
+ * code.
+ */
+#ifndef __CFI_TYPE
+#define __CFI_TYPE(name) \
+ .4byte __kcfi_typeid_##name
+#endif
+
+#define SYM_TYPED_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ __CFI_TYPE(name) ASM_NL \
+ name:
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_TYPED_ENTRY(name, linkage, align)
+
+#else /* CONFIG_CFI_CLANG */
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_START(name, linkage, align)
+
+#endif /* CONFIG_CFI_CLANG */
+
+#ifndef SYM_TYPED_FUNC_START
+#define SYM_TYPED_FUNC_START(name) \
+ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_CFI_TYPES_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 63097cb243cb..6e01f10f0d88 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -19,7 +19,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
-#include <linux/bpf-cgroup.h>
+#include <linux/bpf-cgroup-defs.h>
#include <linux/psi_types.h>
#ifdef CONFIG_CGROUPS
@@ -71,6 +71,9 @@ enum {
/* Cgroup is frozen. */
CGRP_FROZEN,
+
+ /* Control group has to be killed. */
+ CGRP_KILL,
};
/* cgroup_root->flags */
@@ -86,14 +89,32 @@ enum {
CGRP_ROOT_NS_DELEGATE = (1 << 3),
/*
+ * Reduce latencies on dynamic cgroup modifications such as task
+ * migrations and controller on/offs by disabling percpu operation on
+ * cgroup_threadgroup_rwsem. This makes hot path operations such as
+ * forks and exits into the slow path and more expensive.
+ *
+ * The static usage pattern of creating a cgroup, enabling controllers,
+ * and then seeding it with CLONE_INTO_CGROUP doesn't require write
+ * locking cgroup_threadgroup_rwsem and thus doesn't benefit from
+ * favordynmod.
+ */
+ CGRP_ROOT_FAVOR_DYNMODS = (1 << 4),
+
+ /*
* Enable cpuset controller in v1 cgroup to use v2 behavior.
*/
- CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
+ CGRP_ROOT_CPUSET_V2_MODE = (1 << 16),
/*
* Enable legacy local memory.events.
*/
- CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5),
+ CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17),
+
+ /*
+ * Enable recursive subtree protection
+ */
+ CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
};
/* cftype->flags */
@@ -109,6 +130,7 @@ enum {
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
+ __CFTYPE_ADDED = (1 << 18),
};
/*
@@ -227,7 +249,7 @@ struct css_set {
struct list_head task_iters;
/*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * On the default hierarchy, ->subsys[ssid] may point to a css
* attached to an ancestor instead of the cgroup this css_set is
* associated with. The following node is anchored at
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
@@ -255,7 +277,8 @@ struct css_set {
* List of csets participating in the on-going migration either as
* source or destination. Protected by cgroup_mutex.
*/
- struct list_head mg_preload_node;
+ struct list_head mg_src_preload_node;
+ struct list_head mg_dst_preload_node;
struct list_head mg_node;
/*
@@ -278,6 +301,10 @@ struct css_set {
struct cgroup_base_stat {
struct task_cputime cputime;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 forceidle_sum;
+#endif
};
/*
@@ -357,7 +384,7 @@ struct cgroup {
/*
* The depth this cgroup is at. The root is at depth zero and each
* step down the hierarchy increments the level. This along with
- * ancestor_ids[] can determine whether a given cgroup is a
+ * ancestors[] can determine whether a given cgroup is a
* descendant of another without traversing the hierarchy.
*/
int level;
@@ -401,10 +428,13 @@ struct cgroup {
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
struct cgroup_file events_file; /* handle for "cgroup.events" */
+ /* handles for "{cpu,memory,io,irq}.pressure" */
+ struct cgroup_file psi_files[NR_PSI_RESOURCES];
+
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
- * "cgroup.subtree_control" while ->child_ss_mask is the effective
+ * "cgroup.subtree_control" while ->subtree_ss_mask is the effective
* one which may have more subsystems enabled. Controller knobs
* are made available iff it's enabled in ->subtree_control.
*/
@@ -466,7 +496,7 @@ struct cgroup {
struct work_struct release_agent_work;
/* used to track pressure stalls */
- struct psi_group psi;
+ struct psi_group *psi;
/* used to store eBPF programs */
struct cgroup_bpf bpf;
@@ -477,8 +507,8 @@ struct cgroup {
/* Used to store internal freezer state */
struct cgroup_freezer_state freezer;
- /* ids of the ancestors at each level including self */
- u64 ancestor_ids[];
+ /* All ancestors including self */
+ struct cgroup *ancestors[];
};
/*
@@ -495,11 +525,15 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
- /* The root cgroup. Root is destroyed on its release. */
+ /*
+ * The root cgroup. The containing cgroup_root will be destroyed on its
+ * release. cgrp->ancestors[0] will be used overflowing into the
+ * following field. cgrp_ancestor_storage must immediately follow.
+ */
struct cgroup cgrp;
- /* for cgrp->ancestor_ids[0] */
- u64 cgrp_ancestor_id_storage;
+ /* must follow cgrp for cgrp->ancestors[0], see above */
+ struct cgroup *cgrp_ancestor_storage;
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
@@ -628,8 +662,9 @@ struct cgroup_subsys {
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
void (*post_attach)(void);
- int (*can_fork)(struct task_struct *task);
- void (*cancel_fork)(struct task_struct *task);
+ int (*can_fork)(struct task_struct *task,
+ struct css_set *cset);
+ void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
void (*fork)(struct task_struct *task);
void (*exit)(struct task_struct *task);
void (*release)(struct task_struct *task);
@@ -662,22 +697,7 @@ struct cgroup_subsys {
*/
bool threaded:1;
- /*
- * If %false, this subsystem is properly hierarchical -
- * configuration, resource accounting and restriction on a parent
- * cgroup cover those of its children. If %true, hierarchy support
- * is broken in some ways - some subsystems ignore hierarchy
- * completely while others are only implemented half-way.
- *
- * It's now disallowed to create nested cgroups if the subsystem is
- * broken and cgroup core will emit a warning message on such
- * cases. Eventually, all subsystems will be made properly
- * hierarchical and this will go away.
- */
- bool broken_hierarchy:1;
- bool warned_broken_hierarchy:1;
-
- /* the following two fields are initialized automtically during boot */
+ /* the following two fields are initialized automatically during boot */
int id;
const char *name;
@@ -757,103 +777,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
* per-socket cgroup information except for memcg association.
*
- * On legacy hierarchies, net_prio and net_cls controllers directly set
- * attributes on each sock which can then be tested by the network layer.
- * On the default hierarchy, each sock is associated with the cgroup it was
- * created in and the networking layer can match the cgroup directly.
- *
- * To avoid carrying all three cgroup related fields separately in sock,
- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
- * On boot, sock_cgroup_data records the cgroup that the sock was created
- * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overriden to carry prioidx and/or
- * classid. The two modes are distinguished by whether the lowest bit is
- * set. Clear bit indicates cgroup pointer while set bit prioidx and
- * classid.
- *
- * While userland may start using net_prio or net_cls at any time, once
- * either is used, cgroup2 matching no longer works. There is no reason to
- * mix the two and this is in line with how legacy and v2 compatibility is
- * handled. On mode switch, cgroup references which are already being
- * pointed to by socks may be leaked. While this can be remedied by adding
- * synchronization around sock_cgroup_data, given that the number of leaked
- * cgroups is bound and highly unlikely to be high, this seems to be the
- * better trade-off.
+ * On legacy hierarchies, net_prio and net_cls controllers directly
+ * set attributes on each sock which can then be tested by the network
+ * layer. On the default hierarchy, each sock is associated with the
+ * cgroup it was created in and the networking layer can match the
+ * cgroup directly.
*/
struct sock_cgroup_data {
- union {
-#ifdef __LITTLE_ENDIAN
- struct {
- u8 is_data;
- u8 padding;
- u16 prioidx;
- u32 classid;
- } __packed;
-#else
- struct {
- u32 classid;
- u16 prioidx;
- u8 padding;
- u8 is_data;
- } __packed;
+ struct cgroup *cgroup; /* v2 */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ u32 classid; /* v1 */
+#endif
+#ifdef CONFIG_CGROUP_NET_PRIO
+ u16 prioidx; /* v1 */
#endif
- u64 val;
- };
};
-/*
- * There's a theoretical window where the following accessors race with
- * updaters and return part of the previous pointer as the prioidx or
- * classid. Such races are short-lived and the result isn't critical.
- */
static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
- /* fallback to 1 which is always the ID of the root cgroup */
- return (skcd->is_data & 1) ? skcd->prioidx : 1;
+#ifdef CONFIG_CGROUP_NET_PRIO
+ return READ_ONCE(skcd->prioidx);
+#else
+ return 1;
+#endif
}
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
- /* fallback to 0 which is the unconfigured default classid */
- return (skcd->is_data & 1) ? skcd->classid : 0;
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ return READ_ONCE(skcd->classid);
+#else
+ return 0;
+#endif
}
-/*
- * If invoked concurrently, the updaters may clobber each other. The
- * caller is responsible for synchronization.
- */
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.prioidx = prioidx;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_PRIO
+ WRITE_ONCE(skcd->prioidx, prioidx);
+#endif
}
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_classid(&skcd_buf) == classid)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.classid = classid;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ WRITE_ONCE(skcd->classid, classid);
+#endif
}
#else /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index e75d2191226b..528bd44b59e2 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -27,10 +27,12 @@
#include <linux/cgroup-defs.h>
+struct kernel_clone_args;
+
#ifdef CONFIG_CGROUPS
/*
- * All weight knobs on the default hierarhcy should use the following min,
+ * All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
* MIN and MAX and allows 100x to be expressed in both directions.
*/
@@ -58,9 +60,6 @@ struct css_task_iter {
struct list_head *tcset_head;
struct list_head *task_pos;
- struct list_head *tasks_head;
- struct list_head *mg_tasks_head;
- struct list_head *dying_tasks_head;
struct list_head *cur_tasks_head;
struct css_set *cur_cset;
@@ -107,6 +106,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup *cgroup_get_from_path(const char *path);
struct cgroup *cgroup_get_from_fd(int fd);
+struct cgroup *cgroup_v1v2_get_from_fd(int fd);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
@@ -115,6 +115,7 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
+void cgroup_file_show(struct cgroup_file *cfile, bool show);
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
@@ -122,9 +123,12 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
void cgroup_fork(struct task_struct *p);
-extern int cgroup_can_fork(struct task_struct *p);
-extern void cgroup_cancel_fork(struct task_struct *p);
-extern void cgroup_post_fork(struct task_struct *p);
+extern int cgroup_can_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
+extern void cgroup_cancel_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
+extern void cgroup_post_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
void cgroup_exit(struct task_struct *p);
void cgroup_release(struct task_struct *p);
void cgroup_free(struct task_struct *p);
@@ -305,7 +309,7 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/
-static inline u64 cgroup_id(struct cgroup *cgrp)
+static inline u64 cgroup_id(const struct cgroup *cgrp)
{
return cgrp->kn->id;
}
@@ -430,6 +434,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
css_put(&cgrp->self);
}
+extern struct mutex cgroup_mutex;
+
+static inline void cgroup_lock(void)
+{
+ mutex_lock(&cgroup_mutex);
+}
+
+static inline void cgroup_unlock(void)
+{
+ mutex_unlock(&cgroup_mutex);
+}
+
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -444,10 +460,10 @@ static inline void cgroup_put(struct cgroup *cgrp)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
+ rcu_read_lock_sched_held() || \
lockdep_is_held(&cgroup_mutex) || \
lockdep_is_held(&css_set_lock) || \
((task)->flags & PF_EXITING) || (__c))
@@ -571,7 +587,7 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
- return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
+ return cgrp->ancestors[ancestor->level] == ancestor;
}
/**
@@ -588,11 +604,9 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
- if (cgrp->level < ancestor_level)
+ if (ancestor_level < 0 || ancestor_level > cgrp->level)
return NULL;
- while (cgrp && cgrp->level > ancestor_level)
- cgrp = cgroup_parent(cgrp);
- return cgrp;
+ return cgrp->ancestors[ancestor_level];
}
/**
@@ -669,10 +683,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
-{
- return &cgrp->psi;
-}
+bool cgroup_psi_enabled(void);
static inline void cgroup_init_kthreadd(void)
{
@@ -694,23 +705,29 @@ static inline void cgroup_kthread_ready(void)
}
void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
+struct cgroup *cgroup_get_from_id(u64 id);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
struct cgroup;
-static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
+static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline void cgroup_lock(void) {}
+static inline void cgroup_unlock(void) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t) { return 0; }
static inline int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry) { return -EINVAL; }
static inline void cgroup_fork(struct task_struct *p) {}
-static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
-static inline void cgroup_cancel_fork(struct task_struct *p) {}
-static inline void cgroup_post_fork(struct task_struct *p) {}
+static inline int cgroup_can_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) { return 0; }
+static inline void cgroup_cancel_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) {}
+static inline void cgroup_post_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) {}
static inline void cgroup_exit(struct task_struct *p) {}
static inline void cgroup_release(struct task_struct *p) {}
static inline void cgroup_free(struct task_struct *p) {}
@@ -725,9 +742,9 @@ static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
return NULL;
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+static inline bool cgroup_psi_enabled(void)
{
- return NULL;
+ return false;
}
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
@@ -773,11 +790,9 @@ static inline void cgroup_account_cputime(struct task_struct *task,
cpuacct_charge(task, delta_exec);
- rcu_read_lock();
cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime(cgrp, delta_exec);
- rcu_read_unlock();
}
static inline void cgroup_account_cputime_field(struct task_struct *task,
@@ -788,11 +803,9 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
cpuacct_account_field(task, index, delta_exec);
- rcu_read_lock();
cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime_field(cgrp, index, delta_exec);
- rcu_read_unlock();
}
#else /* CONFIG_CGROUPS */
@@ -811,43 +824,24 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-extern spinlock_t cgroup_sk_update_lock;
-#endif
-
-void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
- unsigned long v;
-
- /*
- * @skcd->val is 64bit but the following is safe on 32bit too as we
- * just need the lower ulong to be written and read atomically.
- */
- v = READ_ONCE(skcd->val);
-
- if (v & 1)
- return &cgrp_dfl_root.cgrp;
-
- return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
-#else
- return (struct cgroup *)(unsigned long)skcd->val;
-#endif
+ return skcd->cgroup;
}
#else /* CONFIG_CGROUP_DATA */
static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
struct cgroup_namespace {
- refcount_t count;
struct ns_common ns;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -882,12 +876,12 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
static inline void get_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns)
- refcount_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
}
static inline void put_cgroup_ns(struct cgroup_namespace *ns)
{
- if (ns && refcount_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->ns.count))
free_cgroup_ns(ns);
}
@@ -900,20 +894,6 @@ void cgroup_freeze(struct cgroup *cgrp, bool freeze);
void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
struct cgroup *dst);
-static inline bool cgroup_task_freeze(struct task_struct *task)
-{
- bool ret;
-
- if (task->flags & PF_KTHREAD)
- return false;
-
- rcu_read_lock();
- ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
- rcu_read_unlock();
-
- return ret;
-}
-
static inline bool cgroup_task_frozen(struct task_struct *task)
{
return task->frozen;
@@ -923,10 +903,6 @@ static inline bool cgroup_task_frozen(struct task_struct *task)
static inline void cgroup_enter_frozen(void) { }
static inline void cgroup_leave_frozen(bool always_leave) { }
-static inline bool cgroup_task_freeze(struct task_struct *task)
-{
- return false;
-}
static inline bool cgroup_task_frozen(struct task_struct *task)
{
return false;
diff --git a/include/linux/cgroup_api.h b/include/linux/cgroup_api.h
new file mode 100644
index 000000000000..d0cfe8025111
--- /dev/null
+++ b/include/linux/cgroup_api.h
@@ -0,0 +1 @@
+#include <linux/cgroup.h>
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index acb77dcff3b4..445235487230 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -61,6 +61,10 @@ SUBSYS(pids)
SUBSYS(rdma)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_MISC)
+SUBSYS(misc)
+#endif
+
/*
* The following subsystems are not supported on the default hierarchy.
*/
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
deleted file mode 100644
index 5f5730c1d324..000000000000
--- a/include/linux/cleancache.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_CLEANCACHE_H
-#define _LINUX_CLEANCACHE_H
-
-#include <linux/fs.h>
-#include <linux/exportfs.h>
-#include <linux/mm.h>
-
-#define CLEANCACHE_NO_POOL -1
-#define CLEANCACHE_NO_BACKEND -2
-#define CLEANCACHE_NO_BACKEND_SHARED -3
-
-#define CLEANCACHE_KEY_MAX 6
-
-/*
- * cleancache requires every file with a page in cleancache to have a
- * unique key unless/until the file is removed/truncated. For some
- * filesystems, the inode number is unique, but for "modern" filesystems
- * an exportable filehandle is required (see exportfs.h)
- */
-struct cleancache_filekey {
- union {
- ino_t ino;
- __u32 fh[CLEANCACHE_KEY_MAX];
- u32 key[CLEANCACHE_KEY_MAX];
- } u;
-};
-
-struct cleancache_ops {
- int (*init_fs)(size_t);
- int (*init_shared_fs)(uuid_t *uuid, size_t);
- int (*get_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*put_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
- void (*invalidate_inode)(int, struct cleancache_filekey);
- void (*invalidate_fs)(int);
-};
-
-extern int cleancache_register_ops(const struct cleancache_ops *ops);
-extern void __cleancache_init_fs(struct super_block *);
-extern void __cleancache_init_shared_fs(struct super_block *);
-extern int __cleancache_get_page(struct page *);
-extern void __cleancache_put_page(struct page *);
-extern void __cleancache_invalidate_page(struct address_space *, struct page *);
-extern void __cleancache_invalidate_inode(struct address_space *);
-extern void __cleancache_invalidate_fs(struct super_block *);
-
-#ifdef CONFIG_CLEANCACHE
-#define cleancache_enabled (1)
-static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
-{
- return mapping->host->i_sb->cleancache_poolid >= 0;
-}
-static inline bool cleancache_fs_enabled(struct page *page)
-{
- return cleancache_fs_enabled_mapping(page->mapping);
-}
-#else
-#define cleancache_enabled (0)
-#define cleancache_fs_enabled(_page) (0)
-#define cleancache_fs_enabled_mapping(_page) (0)
-#endif
-
-/*
- * The shim layer provided by these inline functions allows the compiler
- * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
- * is disabled, to a single global variable check if CONFIG_CLEANCACHE
- * is enabled but no cleancache "backend" has dynamically enabled it,
- * and, for the most frequent cleancache ops, to a single global variable
- * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
- * and a cleancache backend has dynamically enabled cleancache, but the
- * filesystem referenced by that cleancache op has not enabled cleancache.
- * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
- * no measurable performance impact.
- */
-
-static inline void cleancache_init_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_fs(sb);
-}
-
-static inline void cleancache_init_shared_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_shared_fs(sb);
-}
-
-static inline int cleancache_get_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- return __cleancache_get_page(page);
- return -1;
-}
-
-static inline void cleancache_put_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- __cleancache_put_page(page);
-}
-
-static inline void cleancache_invalidate_page(struct address_space *mapping,
- struct page *page)
-{
- /* careful... page->mapping is NULL sometimes when this is called */
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_page(mapping, page);
-}
-
-static inline void cleancache_invalidate_inode(struct address_space *mapping)
-{
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_inode(mapping);
-}
-
-static inline void cleancache_invalidate_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_invalidate_fs(sb);
-}
-
-#endif /* _LINUX_CLEANCACHE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 952ac035bab9..267cd06b54a0 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -42,6 +42,8 @@ struct dentry;
* struct clk_rate_request - Structure encoding the clk constraints that
* a clock user might require.
*
+ * Should be initialized by calling clk_hw_init_rate_request().
+ *
* @rate: Requested clock rate. This field will be adjusted by
* clock drivers according to hardware capabilities.
* @min_rate: Minimum rate imposed by clk users.
@@ -60,6 +62,15 @@ struct clk_rate_request {
struct clk_hw *best_parent_hw;
};
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate);
+void clk_hw_forward_rate_request(const struct clk_hw *core,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate);
+
/**
* struct clk_duty - Struture encoding the duty cycle ratio of a clock
*
@@ -118,8 +129,9 @@ struct clk_duty {
*
* @recalc_rate Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
- * ensure that the prepare_mutex is held across this call.
- * Returns the calculated rate. Optional, but recommended - if
+ * ensure that the prepare_mutex is held across this call. If the
+ * driver cannot figure out a rate for this clock, it must return
+ * 0. Returns the calculated rate. Optional, but recommended - if
* this op is not set then clock rate will be initialized to 0.
*
* @round_rate: Given a target rate as input, returns the closest rate actually
@@ -189,7 +201,7 @@ struct clk_duty {
* and >= numerator) Return 0 on success, otherwise -EERROR.
*
* @init: Perform platform-specific initialization magic.
- * This is not not used by any of the basic clock types.
+ * This is not used by any of the basic clock types.
* This callback exist for HW which needs to perform some
* initialisation magic for CCF to get an accurate view of the
* clock. It may also be used dynamic resource allocation is
@@ -342,7 +354,7 @@ struct clk_fixed_rate {
unsigned long flags;
};
-#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
+#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
extern const struct clk_ops clk_fixed_rate_ops;
struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
@@ -350,7 +362,7 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy,
- unsigned long clk_fixed_flags);
+ unsigned long clk_fixed_flags, bool devm);
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
@@ -365,7 +377,20 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
*/
#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
- NULL, (flags), (fixed_rate), 0, 0)
+ NULL, (flags), (fixed_rate), 0, 0, false)
+
+/**
+ * devm_clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (fixed_rate), 0, 0, true)
/**
* clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
* the clock framework
@@ -378,7 +403,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags, \
fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
- NULL, (flags), (fixed_rate), 0, 0)
+ NULL, (flags), (fixed_rate), 0, 0, false)
/**
* clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
* the clock framework
@@ -392,7 +417,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
(parent_data), (flags), (fixed_rate), 0, \
- 0)
+ 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
* the clock framework
@@ -408,7 +433,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), \
NULL, NULL, (flags), (fixed_rate), \
- (fixed_accuracy), 0)
+ (fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
* clock with the clock framework
@@ -423,7 +448,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
parent_hw, flags, fixed_rate, fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
NULL, NULL, (flags), (fixed_rate), \
- (fixed_accuracy), 0)
+ (fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
* clock with the clock framework
@@ -438,7 +463,21 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
parent_data, flags, fixed_rate, fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
(parent_data), NULL, (flags), \
- (fixed_rate), (fixed_accuracy), 0)
+ (fixed_rate), (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_parent_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_accuracy(dev, name, parent_data, \
+ flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ CLK_FIXED_RATE_PARENT_ACCURACY, false)
void clk_unregister_fixed_rate(struct clk *clk);
void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
@@ -490,6 +529,13 @@ struct clk_hw *__clk_hw_register_gate(struct device *dev,
unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
@@ -522,9 +568,9 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
* @clk_gate_flags: gate-specific flags for this clock
* @lock: shared register lock for this clock
*/
-#define clk_hw_register_gate_parent_hw(dev, name, parent_name, flags, reg, \
+#define clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, reg, \
bit_idx, clk_gate_flags, lock) \
- __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ __clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \
NULL, (flags), (reg), (bit_idx), \
(clk_gate_flags), (lock))
/**
@@ -539,9 +585,25 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
* @clk_gate_flags: gate-specific flags for this clock
* @lock: shared register lock for this clock
*/
-#define clk_hw_register_gate_parent_data(dev, name, parent_name, flags, reg, \
+#define clk_hw_register_gate_parent_data(dev, name, parent_data, flags, reg, \
bit_idx, clk_gate_flags, lock) \
- __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ __clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \
+ (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx,\
+ clk_gate_flags, lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
NULL, (flags), (reg), (bit_idx), \
(clk_gate_flags), (lock))
void clk_unregister_gate(struct clk *clk);
@@ -629,6 +691,12 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val);
+int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags);
+int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val);
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags);
@@ -639,6 +707,12 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
const struct clk_parent_data *parent_data, unsigned long flags,
void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -779,6 +853,63 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
(parent_data), (flags), (reg), (shift), \
(width), (clk_divider_flags), (table), \
(lock))
+/**
+ * devm_clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \
+ width, clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * devm_clk_hw_register_divider_parent_hw - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, \
+ reg, shift, width, \
+ clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), NULL, \
+ (parent_hw), NULL, (flags), (reg), \
+ (shift), (width), (clk_divider_flags), \
+ NULL, (lock))
+/**
+ * devm_clk_hw_register_divider_table - register a table based divider clock
+ * with the clock framework (devres variant)
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_table(dev, name, parent_name, flags, \
+ reg, shift, width, \
+ clk_divider_flags, table, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), \
+ NULL, NULL, (flags), (reg), (shift), \
+ (width), (clk_divider_flags), (table), \
+ (lock))
void clk_unregister_divider(struct clk *clk);
void clk_hw_unregister_divider(struct clk_hw *hw);
@@ -815,7 +946,7 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
- u32 *table;
+ const u32 *table;
u32 mask;
u8 shift;
u8 flags;
@@ -840,11 +971,18 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
const struct clk_hw **parent_hws,
const struct clk_parent_data *parent_data,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
+ const char *name, u8 num_parents,
+ const char * const *parent_names,
+ const struct clk_hw **parent_hws,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg, \
shift, width, clk_mux_flags, lock) \
@@ -858,6 +996,13 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
(parent_names), NULL, NULL, (flags), (reg), \
(shift), (mask), (clk_mux_flags), (table), \
(lock))
+#define clk_hw_register_mux_table_parent_data(dev, name, parent_data, \
+ num_parents, flags, reg, shift, mask, \
+ clk_mux_flags, table, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ NULL, NULL, (parent_data), (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
shift, width, clk_mux_flags, lock) \
__clk_hw_register_mux((dev), NULL, (name), (num_parents), \
@@ -875,10 +1020,37 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
__clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
(parent_data), (flags), (reg), (shift), \
BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
-
-int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
+#define clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
+#define devm_clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, (clk_mux_flags), \
+ NULL, (lock))
+#define devm_clk_hw_register_mux_parent_hws(dev, name, parent_hws, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ (parent_hws), NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, \
+ (clk_mux_flags), NULL, (lock))
+#define devm_clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ NULL, (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
+
+int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
unsigned int val);
-unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index);
+unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index);
void clk_unregister_mux(struct clk *clk);
void clk_hw_unregister_mux(struct clk_hw *hw);
@@ -914,7 +1086,20 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
+struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
+
+struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
*
@@ -936,6 +1121,12 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
* CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are
* used for the divider register. Setting this flag makes the register
* accesses big endian.
+ * CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might
+ * be saturated and the caller will get quite far from the good enough
+ * approximation. Instead the caller may require, by setting this flag,
+ * to shift left by a few bits in case, when the asked one is quite small
+ * to satisfy the desired range of denominator. It assumes that on the
+ * caller's side the power-of-two capable prescaler exists.
*/
struct clk_fractional_divider {
struct clk_hw hw;
@@ -957,8 +1148,8 @@ struct clk_fractional_divider {
#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1)
+#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2)
-extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
@@ -1004,9 +1195,9 @@ struct clk_multiplier {
#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
-#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
+#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
-#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
+#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_multiplier_ops;
@@ -1062,6 +1253,13 @@ struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
+struct clk_hw *devm_clk_hw_register_composite_pdata(struct device *dev,
+ const char *name, const struct clk_parent_data *parent_data,
+ int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags);
void clk_hw_unregister_composite(struct clk_hw *hw);
struct clk *clk_register(struct device *dev, struct clk_hw *hw);
@@ -1072,10 +1270,8 @@ int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw);
int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw);
void clk_unregister(struct clk *clk);
-void devm_clk_unregister(struct device *dev, struct clk *clk);
void clk_hw_unregister(struct clk_hw *hw);
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
@@ -1088,6 +1284,11 @@ static inline struct clk_hw *__clk_get_hw(struct clk *clk)
return (struct clk_hw *)clk;
}
#endif
+
+struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id);
+struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
+ const char *con_id);
+
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
@@ -1096,7 +1297,6 @@ int clk_hw_get_parent_index(struct clk_hw *hw);
int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
-unsigned long __clk_get_flags(struct clk *clk);
unsigned long clk_hw_get_flags(const struct clk_hw *hw);
#define clk_hw_can_set_rate_parent(hw) \
(clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
@@ -1115,6 +1315,8 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate);
@@ -1314,7 +1516,7 @@ int devm_of_clk_add_hw_provider(struct device *dev,
void *data),
void *data);
void of_clk_del_provider(struct device_node *np);
-void devm_of_clk_del_provider(struct device *dev);
+
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
@@ -1351,7 +1553,7 @@ static inline int devm_of_clk_add_hw_provider(struct device *dev,
return 0;
}
static inline void of_clk_del_provider(struct device_node *np) {}
-static inline void devm_of_clk_del_provider(struct device *dev) {}
+
static inline struct clk *of_clk_src_simple_get(
struct of_phandle_args *clkspec, void *data)
{
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 7fd6a1febcf4..1ef013324237 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -92,7 +92,7 @@ struct clk_bulk_data {
#ifdef CONFIG_COMMON_CLK
/**
- * clk_notifier_register: register a clock rate-change notifier callback
+ * clk_notifier_register - register a clock rate-change notifier callback
* @clk: clock whose rate we are interested in
* @nb: notifier block with callback function pointer
*
@@ -103,13 +103,24 @@ struct clk_bulk_data {
int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
/**
- * clk_notifier_unregister: unregister a clock rate-change notifier callback
+ * clk_notifier_unregister - unregister a clock rate-change notifier callback
* @clk: clock whose rate we are no longer interested in
* @nb: notifier block which will be unregistered
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
/**
+ * devm_clk_notifier_register - register a managed rate-change notifier callback
+ * @dev: device for clock "consumer"
+ * @clk: clock whose rate we are interested in
+ * @nb: notifier block with callback function pointer
+ *
+ * Returns 0 on success, -EERROR otherwise
+ */
+int devm_clk_notifier_register(struct device *dev, struct clk *clk,
+ struct notifier_block *nb);
+
+/**
* clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
* for a clock source.
* @clk: clock source
@@ -150,7 +161,7 @@ int clk_get_phase(struct clk *clk);
int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
/**
- * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
+ * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
* @clk: clock signal source
* @scale: scaling factor to be applied to represent the ratio as an integer
*
@@ -186,6 +197,13 @@ static inline int clk_notifier_unregister(struct clk *clk,
return -ENOTSUPP;
}
+static inline int devm_clk_notifier_register(struct device *dev,
+ struct clk *clk,
+ struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
static inline long clk_get_accuracy(struct clk *clk)
{
return -ENOTSUPP;
@@ -220,6 +238,7 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
#endif
+#ifdef CONFIG_HAVE_CLK_PREPARE
/**
* clk_prepare - prepare a clock source
* @clk: clock source
@@ -228,10 +247,26 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
*
* Must not be called from within atomic context.
*/
-#ifdef CONFIG_HAVE_CLK_PREPARE
int clk_prepare(struct clk *clk);
int __must_check clk_bulk_prepare(int num_clks,
const struct clk_bulk_data *clks);
+
+/**
+ * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
+ * @clk: clock source
+ *
+ * Returns true if clk_prepare() implicitly enables the clock, effectively
+ * making clk_enable()/clk_disable() no-ops, false otherwise.
+ *
+ * This is of interest mainly to the power management code where actually
+ * disabling the clock also requires unpreparing it to have any material
+ * effect.
+ *
+ * Regardless of the value returned here, the caller must always invoke
+ * clk_enable() or clk_prepare_enable() and counterparts for usage counts
+ * to be right.
+ */
+bool clk_is_enabled_when_prepared(struct clk *clk);
#else
static inline int clk_prepare(struct clk *clk)
{
@@ -245,6 +280,11 @@ clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
might_sleep();
return 0;
}
+
+static inline bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return false;
+}
#endif
/**
@@ -403,15 +443,16 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Returns a struct clk corresponding to the clock producer, or
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
* uses @dev and @id to determine the clock consumer, and thereby
* the clock producer. (IOW, @id may be identical strings, but
* clk_get may return different clock producers depending on @dev.)
*
- * Drivers must assume that the clock source is not enabled.
- *
- * devm_clk_get should not be called from within interrupt context.
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
*
* The clock will automatically be freed when the device is unbound
* from the bus.
@@ -419,17 +460,114 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
+ * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared. Drivers must however assume
+ * that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the device
+ * is unbound from the bus.
+ */
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
+
+/**
* devm_clk_get_optional - lookup and obtain a managed reference to an optional
* clock producer.
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Behaves the same as devm_clk_get() except where there is no clock producer.
- * In this case, instead of returning -ENOENT, the function returns NULL.
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get().
+ *
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
*/
struct clk *devm_clk_get_optional(struct device *dev, const char *id);
/**
+ * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_prepared().
+ *
+ * The returned clk (if valid) is prepared. Drivers must however
+ * assume that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the
+ * device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional_enabled - devm_clk_get_optional() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
+
+/**
* devm_get_clk_from_child - lookup and obtain a managed reference to a
* clock producer from child node.
* @dev: device for clock "consumer"
@@ -661,7 +799,7 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent);
+bool clk_has_parent(const struct clk *clk, const struct clk *parent);
/**
* clk_set_rate_range - set a rate range for a clock source
@@ -773,12 +911,36 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
+static inline struct clk *devm_clk_get_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline struct clk *devm_clk_get_optional(struct device *dev,
const char *id)
{
return NULL;
}
+static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
@@ -947,6 +1109,17 @@ static inline void clk_bulk_disable_unprepare(int num_clks,
}
/**
+ * clk_drop_range - Reset any range set on that clock
+ * @clk: clock source
+ *
+ * Returns success (0) or negative errno.
+ */
+static inline int clk_drop_range(struct clk *clk)
+{
+ return clk_set_rate_range(clk, 0, ULONG_MAX);
+}
+
+/**
* clk_get_optional - lookup and obtain a reference to an optional clock
* producer.
* @dev: device for clock "consumer"
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 390437887b46..7af499bdbecb 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -12,6 +12,11 @@
#ifndef AT91_PMC_H
#define AT91_PMC_H
+#include <linux/bits.h>
+
+#define AT91_PMC_V1 (1) /* PMC version 1 */
+#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */
+
#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
@@ -30,16 +35,35 @@
#define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */
#define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */
+#define AT91_PMC_PLL_CTRL0 0x0C /* PLL Control Register 0 [for SAM9X60] */
+#define AT91_PMC_PLL_CTRL0_ENPLL (1 << 28) /* Enable PLL */
+#define AT91_PMC_PLL_CTRL0_ENPLLCK (1 << 29) /* Enable PLL clock for PMC */
+#define AT91_PMC_PLL_CTRL0_ENLOCK (1 << 31) /* Enable PLL lock */
+
+#define AT91_PMC_PLL_CTRL1 0x10 /* PLL Control Register 1 [for SAM9X60] */
+
#define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */
#define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */
#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */
+#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */
+#define AT91_PMC_PLL_ACR_DEFAULT_UPLL UL(0x12020010) /* Default PLL ACR value for UPLL */
+#define AT91_PMC_PLL_ACR_DEFAULT_PLLA UL(0x00020010) /* Default PLL ACR value for PLLA */
+#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */
+#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */
+
#define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */
#define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */
#define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */
#define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */
#define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */
+#define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */
+#define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */
+#define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */
+#define AT91_PMC_PLL_UPDT_ID_MSK (0xf) /* PLL ID mask */
+#define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */
+
#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */
#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */
#define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */
@@ -56,6 +80,10 @@
#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */
#define AT91_CKGR_PLLAR 0x28 /* PLL A Register */
+
+#define AT91_PMC_RATIO 0x2c /* Processor clock ratio register [SAMA7G5 only] */
+#define AT91_PMC_RATIO_RATIO (0xf) /* CPU clock ratio. */
+
#define AT91_CKGR_PLLBR 0x2c /* PLL B Register */
#define AT91_PMC_DIV (0xff << 0) /* Divider */
#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */
@@ -115,6 +143,34 @@
#define AT91_PMC_PLLADIV2_ON (1 << 12)
#define AT91_PMC_H32MXDIV BIT(24)
+#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */
+#define AT91_PMC_MCR_V2_ID_MSK (0xF)
+#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK)
+#define AT91_PMC_MCR_V2_CMD (1 << 7)
+#define AT91_PMC_MCR_V2_DIV (7 << 8)
+#define AT91_PMC_MCR_V2_DIV1 (0 << 8)
+#define AT91_PMC_MCR_V2_DIV2 (1 << 8)
+#define AT91_PMC_MCR_V2_DIV4 (2 << 8)
+#define AT91_PMC_MCR_V2_DIV8 (3 << 8)
+#define AT91_PMC_MCR_V2_DIV16 (4 << 8)
+#define AT91_PMC_MCR_V2_DIV32 (5 << 8)
+#define AT91_PMC_MCR_V2_DIV64 (6 << 8)
+#define AT91_PMC_MCR_V2_DIV3 (7 << 8)
+#define AT91_PMC_MCR_V2_CSS (0x1F << 16)
+#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16)
+#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16)
+#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16)
+#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16)
+#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16)
+#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16)
+#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16)
+#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16)
+#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16)
+#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16)
+#define AT91_PMC_MCR_V2_EN (1 << 28)
+
+#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */
+
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */
#define AT91_PMC_USBS_PLLA (0 << 0)
@@ -153,6 +209,7 @@
#define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */
#define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */
#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */
+#define AT91_PMC_MCKXRDY (1 << 26) /* Master Clock x [x=1..4] Ready Status */
#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */
#define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */
@@ -180,6 +237,8 @@
#define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */
#define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */
+#define AT91_PMC_PLL_ISR0 0xEC /* PLL Interrupt Status Register 0 [SAM9X60 only] */
+
#define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/
#define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */
#define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */
diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h
index 8a7b5cd7eac0..f6ebab6228c2 100644
--- a/include/linux/clk/davinci.h
+++ b/include/linux/clk/davinci.h
@@ -28,13 +28,5 @@ int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgch
int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
int dm365_psc_init(struct device *dev, void __iomem *base);
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
-int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm644x_psc_init(struct device *dev, void __iomem *base);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
-int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm646x_psc_init(struct device *dev, void __iomem *base);
-#endif
#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
diff --git a/include/linux/clk/imx.h b/include/linux/clk/imx.h
new file mode 100644
index 000000000000..75a0d9696552
--- /dev/null
+++ b/include/linux/clk/imx.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Freescale Semiconductor, Inc.
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_IMX_H
+#define __LINUX_CLK_IMX_H
+
+#include <linux/types.h>
+
+void imx6sl_set_wait_clk(bool enter);
+
+#endif
diff --git a/include/linux/clk/pxa.h b/include/linux/clk/pxa.h
new file mode 100644
index 000000000000..736b8bb91bd7
--- /dev/null
+++ b/include/linux/clk/pxa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern int pxa25x_clocks_init(void __iomem *regs);
+extern int pxa27x_clocks_init(void __iomem *regs);
+extern int pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg);
+
+#ifdef CONFIG_PXA3xx
+extern unsigned pxa3xx_get_clk_frequency_khz(int);
+extern void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask);
+#else
+#define pxa3xx_get_clk_frequency_khz(x) (0)
+#define pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask) do { } while (0)
+#endif
diff --git a/include/linux/clk/samsung.h b/include/linux/clk/samsung.h
new file mode 100644
index 000000000000..38b774001712
--- /dev/null
+++ b/include/linux/clk/samsung.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+ */
+
+#ifndef __LINUX_CLK_SAMSUNG_H_
+#define __LINUX_CLK_SAMSUNG_H_
+
+#include <linux/compiler_types.h>
+
+struct device_node;
+
+#ifdef CONFIG_S3C64XX_COMMON_CLK
+void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
+ unsigned long xusbxti_f, bool s3c6400,
+ void __iomem *base);
+#else
+static inline void s3c64xx_clk_init(struct device_node *np,
+ unsigned long xtal_f,
+ unsigned long xusbxti_f,
+ bool s3c6400, void __iomem *base) { }
+#endif /* CONFIG_S3C64XX_COMMON_CLK */
+
+#ifdef CONFIG_S3C2410_COMMON_CLK
+void s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
+ int current_soc,
+ void __iomem *reg_base);
+#else
+static inline void s3c2410_common_clk_init(struct device_node *np,
+ unsigned long xti_f,
+ int current_soc,
+ void __iomem *reg_base) { }
+#endif /* CONFIG_S3C2410_COMMON_CLK */
+
+#ifdef CONFIG_S3C2412_COMMON_CLK
+void s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
+ unsigned long ext_f, void __iomem *reg_base);
+#else
+static inline void s3c2412_common_clk_init(struct device_node *np,
+ unsigned long xti_f,
+ unsigned long ext_f,
+ void __iomem *reg_base) { }
+#endif /* CONFIG_S3C2412_COMMON_CLK */
+
+#ifdef CONFIG_S3C2443_COMMON_CLK
+void s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
+ int current_soc,
+ void __iomem *reg_base);
+#else
+static inline void s3c2443_common_clk_init(struct device_node *np,
+ unsigned long xti_f,
+ int current_soc,
+ void __iomem *reg_base) { }
+#endif /* CONFIG_S3C2443_COMMON_CLK */
+
+#endif /* __LINUX_CLK_SAMSUNG_H_ */
diff --git a/include/linux/clk/spear.h b/include/linux/clk/spear.h
new file mode 100644
index 000000000000..eaf95ca656f8
--- /dev/null
+++ b/include/linux/clk/spear.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 STMicroelectronics - All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_SPEAR_H
+#define __LINUX_CLK_SPEAR_H
+
+#ifdef CONFIG_ARCH_SPEAR3XX
+void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base);
+#else
+static inline void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base) {}
+#endif
+
+#ifdef CONFIG_ARCH_SPEAR6XX
+void __init spear6xx_clk_init(void __iomem *misc_base);
+#else
+static inline void __init spear6xx_clk_init(void __iomem *misc_base) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1310
+void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base);
+#else
+static inline void spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+void __init spear1340_clk_init(void __iomem *misc_base);
+#else
+static inline void spear1340_clk_init(void __iomem *misc_base) {}
+#endif
+
+#endif
diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h
index 3cd14acde0a1..57c8ec44ab4e 100644
--- a/include/linux/clk/sunxi-ng.h
+++ b/include/linux/clk/sunxi-ng.h
@@ -6,22 +6,9 @@
#ifndef _LINUX_CLK_SUNXI_NG_H_
#define _LINUX_CLK_SUNXI_NG_H_
-#include <linux/errno.h>
-
-#ifdef CONFIG_SUNXI_CCU
int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
-#else
-static inline int sunxi_ccu_set_mmc_timing_mode(struct clk *clk,
- bool new_mode)
-{
- return -ENOTSUPP;
-}
-static inline int sunxi_ccu_get_mmc_timing_mode(struct clk *clk)
-{
- return -ENOTSUPP;
-}
-#endif
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 2b1b35240074..3650e926e93f 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2020, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __LINUX_CLK_TEGRA_H_
@@ -42,6 +42,7 @@ struct tegra_cpu_car_ops {
#endif
};
+#ifdef CONFIG_ARCH_TEGRA
extern struct tegra_cpu_car_ops *tegra_cpu_car_ops;
static inline void tegra_wait_cpu_in_reset(u32 cpu)
@@ -83,8 +84,29 @@ static inline void tegra_disable_cpu_clock(u32 cpu)
tegra_cpu_car_ops->disable_clock(cpu);
}
+#else
+static inline void tegra_wait_cpu_in_reset(u32 cpu)
+{
+}
-#ifdef CONFIG_PM_SLEEP
+static inline void tegra_put_cpu_in_reset(u32 cpu)
+{
+}
+
+static inline void tegra_cpu_out_of_reset(u32 cpu)
+{
+}
+
+static inline void tegra_enable_cpu_clock(u32 cpu)
+{
+}
+
+static inline void tegra_disable_cpu_clock(u32 cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_PM_SLEEP)
static inline bool tegra_cpu_rail_off_ready(void)
{
if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready))
@@ -123,24 +145,119 @@ static inline void tegra_cpu_clock_resume(void)
}
#endif
-extern void tegra210_xusb_pll_hw_control_enable(void);
-extern void tegra210_xusb_pll_hw_sequence_start(void);
-extern void tegra210_sata_pll_hw_control_enable(void);
-extern void tegra210_sata_pll_hw_sequence_start(void);
-extern void tegra210_set_sata_pll_seq_sw(bool state);
-extern void tegra210_put_utmipll_in_iddq(void);
-extern void tegra210_put_utmipll_out_iddq(void);
-extern int tegra210_clk_handle_mbist_war(unsigned int id);
-
struct clk;
+struct tegra_emc;
typedef long (tegra20_clk_emc_round_cb)(unsigned long rate,
unsigned long min_rate,
unsigned long max_rate,
void *arg);
+typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
+typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
+
+struct tegra210_clk_emc_config {
+ unsigned long rate;
+ bool same_freq;
+ u32 value;
+
+ unsigned long parent_rate;
+ u8 parent;
+};
+
+struct tegra210_clk_emc_provider {
+ struct module *owner;
+ struct device *dev;
+
+ struct tegra210_clk_emc_config *configs;
+ unsigned int num_configs;
+
+ int (*set_rate)(struct device *dev,
+ const struct tegra210_clk_emc_config *config);
+};
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
void *cb_arg);
int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+#else
+static inline void
+tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg)
+{
+}
+
+static inline int
+tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_TEGRA124_CLK_EMC
+void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb);
+#else
+static inline void
+tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+int tegra210_plle_hw_sequence_start(void);
+bool tegra210_plle_hw_sequence_is_enabled(void);
+void tegra210_xusb_pll_hw_control_enable(void);
+void tegra210_xusb_pll_hw_sequence_start(void);
+void tegra210_sata_pll_hw_control_enable(void);
+void tegra210_sata_pll_hw_sequence_start(void);
+void tegra210_set_sata_pll_seq_sw(bool state);
+void tegra210_put_utmipll_in_iddq(void);
+void tegra210_put_utmipll_out_iddq(void);
+int tegra210_clk_handle_mbist_war(unsigned int id);
+void tegra210_clk_emc_dll_enable(bool flag);
+void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
+void tegra210_clk_emc_update_setting(u32 emc_src_value);
+
+int tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider);
+void tegra210_clk_emc_detach(struct clk *clk);
+#else
+static inline int tegra210_plle_hw_sequence_start(void)
+{
+ return 0;
+}
+
+static inline bool tegra210_plle_hw_sequence_is_enabled(void)
+{
+ return false;
+}
+
+static inline int tegra210_clk_handle_mbist_war(unsigned int id)
+{
+ return 0;
+}
+
+static inline int
+tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider)
+{
+ return 0;
+}
+
+static inline void tegra210_xusb_pll_hw_control_enable(void) {}
+static inline void tegra210_xusb_pll_hw_sequence_start(void) {}
+static inline void tegra210_sata_pll_hw_control_enable(void) {}
+static inline void tegra210_sata_pll_hw_sequence_start(void) {}
+static inline void tegra210_set_sata_pll_seq_sw(bool state) {}
+static inline void tegra210_put_utmipll_in_iddq(void) {}
+static inline void tegra210_put_utmipll_out_iddq(void) {}
+static inline void tegra210_clk_emc_dll_enable(bool flag) {}
+static inline void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value) {}
+static inline void tegra210_clk_emc_update_setting(u32 emc_src_value) {}
+static inline void tegra210_clk_emc_detach(struct clk *clk) {}
+#endif
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index c62f6fa6763d..cbfcbf186ce3 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI clock drivers support
*
* Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_CLK_TI_H__
#define __LINUX_CLK_TI_H__
@@ -63,6 +55,17 @@ struct clk_omap_reg {
* @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
* @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
* @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
+ * @ssc_deltam_reg: register containing the DPLL SSC frequency spreading
+ * @ssc_modfreq_reg: register containing the DPLL SSC modulation frequency
+ * @ssc_modfreq_mant_mask: mask of the mantissa component in @ssc_modfreq_reg
+ * @ssc_modfreq_exp_mask: mask of the exponent component in @ssc_modfreq_reg
+ * @ssc_enable_mask: mask of the DPLL SSC enable bit in @control_reg
+ * @ssc_downspread_mask: mask of the DPLL SSC low frequency only bit in
+ * @control_reg
+ * @ssc_modfreq: the DPLL SSC frequency modulation in kHz
+ * @ssc_deltam: the DPLL SSC frequency spreading in permille (10th of percent)
+ * @ssc_downspread: require the only low frequency spread of the DPLL in SSC
+ * mode
* @flags: DPLL type/features (see below)
*
* Possible values for @flags:
@@ -110,6 +113,17 @@ struct dpll_data {
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
+ struct clk_omap_reg ssc_deltam_reg;
+ struct clk_omap_reg ssc_modfreq_reg;
+ u32 ssc_deltam_int_mask;
+ u32 ssc_deltam_frac_mask;
+ u32 ssc_modfreq_mant_mask;
+ u32 ssc_modfreq_exp_mask;
+ u32 ssc_enable_mask;
+ u32 ssc_downspread_mask;
+ u32 ssc_modfreq;
+ u32 ssc_deltam;
+ bool ssc_downspread;
u8 flags;
};
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index fd06b2780a22..45570bc21a43 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -30,11 +30,6 @@ struct clk_lookup {
.clk = c, \
}
-struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
@@ -51,6 +46,4 @@ int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
const char *con_id, const char *dev_id);
-void devm_clk_release_clkdev(struct device *dev, const char *con_id,
- const char *dev_id);
#endif
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
deleted file mode 100644
index 4b0a69863656..000000000000
--- a/include/linux/clock_cooling.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/include/linux/clock_cooling.h
- *
- * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
- *
- * Copyright (C) 2013 Texas Instruments Inc.
- * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * Highly based on cpufreq_cooling.c.
- * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
- * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
- */
-
-#ifndef __CPU_COOLING_H__
-#define __CPU_COOLING_H__
-
-#include <linux/of.h>
-#include <linux/thermal.h>
-#include <linux/cpumask.h>
-
-#ifdef CONFIG_CLOCK_THERMAL
-/**
- * clock_cooling_register - function to create clock cooling device.
- * @dev: struct device pointer to the device used as clock cooling device.
- * @clock_name: string containing the clock used as cooling mechanism.
- */
-struct thermal_cooling_device *
-clock_cooling_register(struct device *dev, const char *clock_name);
-
-/**
- * clock_cooling_unregister - function to remove clock cooling device.
- * @cdev: thermal cooling device pointer.
- */
-void clock_cooling_unregister(struct thermal_cooling_device *cdev);
-
-unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
- unsigned long freq);
-#else /* !CONFIG_CLOCK_THERMAL */
-static inline struct thermal_cooling_device *
-clock_cooling_register(struct device *dev, const char *clock_name)
-{
- return NULL;
-}
-static inline
-void clock_cooling_unregister(struct thermal_cooling_device *cdev)
-{
-}
-static inline
-unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
- unsigned long freq)
-{
- return THERMAL_CSTATE_INVALID;
-}
-#endif /* CONFIG_CLOCK_THERMAL */
-
-#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index b21db536fd52..1d42d4b17327 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -17,24 +17,40 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/clocksource_ids.h>
#include <asm/div64.h>
#include <asm/io.h>
struct clocksource;
struct module;
-#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
+#if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \
+ defined(CONFIG_GENERIC_GETTIMEOFDAY)
#include <asm/clocksource.h>
#endif
+#include <vdso/clocksource.h>
+
/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
* This is the structure used for system time.
*
- * @name: ptr to clocksource name
- * @list: list head for registration
- * @rating: rating value for selection (higher is better)
+ * @read: Returns a cycle value, passes clocksource as argument
+ * @mask: Bitmask for two's complement
+ * subtraction of non 64 bit counters
+ * @mult: Cycle to nanosecond multiplier
+ * @shift: Cycle to nanosecond divisor (power of two)
+ * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs)
+ * @maxadj: Maximum adjustment value to mult (~11%)
+ * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second.
+ * Zero says to use default WATCHDOG_THRESHOLD.
+ * @archdata: Optional arch-specific data
+ * @max_cycles: Maximum safe cycle value which won't overflow on
+ * multiplication
+ * @name: Pointer to clocksource name
+ * @list: List head for registration (internal)
+ * @rating: Rating value for selection (higher is better)
* To avoid rating inflation the following
* list should give you a guide as to how
* to assign your clocksource a rating
@@ -49,27 +65,27 @@ struct module;
* 400-499: Perfect
* The ideal clocksource. A must-use where
* available.
- * @read: returns a cycle value, passes clocksource as argument
- * @enable: optional function to enable the clocksource
- * @disable: optional function to disable the clocksource
- * @mask: bitmask for two's complement
- * subtraction of non 64 bit counters
- * @mult: cycle to nanosecond multiplier
- * @shift: cycle to nanosecond divisor (power of two)
- * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
- * @maxadj: maximum adjustment value to mult (~11%)
- * @max_cycles: maximum safe cycle value which won't overflow on multiplication
- * @flags: flags describing special properties
- * @archdata: arch-specific data
- * @suspend: suspend function for the clocksource, if necessary
- * @resume: resume function for the clocksource, if necessary
+ * @id: Defaults to CSID_GENERIC. The id value is captured
+ * in certain snapshot functions to allow callers to
+ * validate the clocksource from which the snapshot was
+ * taken.
+ * @flags: Flags describing special properties
+ * @enable: Optional function to enable the clocksource
+ * @disable: Optional function to disable the clocksource
+ * @suspend: Optional suspend function for the clocksource
+ * @resume: Optional resume function for the clocksource
* @mark_unstable: Optional function to inform the clocksource driver that
* the watchdog marked the clocksource unstable
- * @owner: module reference, must be set by clocksource in modules
+ * @tick_stable: Optional function called periodically from the watchdog
+ * code to provide stable synchronization points
+ * @wd_list: List head to enqueue into the watchdog list (internal)
+ * @cs_last: Last clocksource value for clocksource watchdog
+ * @wd_last: Last watchdog value corresponding to @cs_last
+ * @owner: Module reference, must be set by clocksource in modules
*
* Note: This struct is not used in hotpathes of the timekeeping code
* because the timekeeper caches the hot path fields in its own data
- * structure, so no line cache alignment is required,
+ * structure, so no cache line alignment is required,
*
* The pointer to the clocksource itself is handed to the read
* callback. If you need extra information there you can wrap struct
@@ -78,35 +94,39 @@ struct module;
* structure.
*/
struct clocksource {
- u64 (*read)(struct clocksource *cs);
- u64 mask;
- u32 mult;
- u32 shift;
- u64 max_idle_ns;
- u32 maxadj;
+ u64 (*read)(struct clocksource *cs);
+ u64 mask;
+ u32 mult;
+ u32 shift;
+ u64 max_idle_ns;
+ u32 maxadj;
+ u32 uncertainty_margin;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
- u64 max_cycles;
- const char *name;
- struct list_head list;
- int rating;
- int (*enable)(struct clocksource *cs);
- void (*disable)(struct clocksource *cs);
- unsigned long flags;
- void (*suspend)(struct clocksource *cs);
- void (*resume)(struct clocksource *cs);
- void (*mark_unstable)(struct clocksource *cs);
- void (*tick_stable)(struct clocksource *cs);
+ u64 max_cycles;
+ const char *name;
+ struct list_head list;
+ int rating;
+ enum clocksource_ids id;
+ enum vdso_clock_mode vdso_clock_mode;
+ unsigned long flags;
+
+ int (*enable)(struct clocksource *cs);
+ void (*disable)(struct clocksource *cs);
+ void (*suspend)(struct clocksource *cs);
+ void (*resume)(struct clocksource *cs);
+ void (*mark_unstable)(struct clocksource *cs);
+ void (*tick_stable)(struct clocksource *cs);
/* private: */
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
- struct list_head wd_list;
- u64 cs_last;
- u64 wd_last;
+ struct list_head wd_list;
+ u64 cs_last;
+ u64 wd_last;
#endif
- struct module *owner;
+ struct module *owner;
};
/*
@@ -120,7 +140,7 @@ struct clocksource {
#define CLOCK_SOURCE_UNSTABLE 0x40
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
#define CLOCK_SOURCE_RESELECT 0x100
-
+#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
@@ -271,4 +291,7 @@ static inline void timer_probe(void) {}
#define TIMER_ACPI_DECLARE(name, table_id, fn) \
ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
+extern ulong max_cswd_read_retries;
+void clocksource_verify_percpu(struct clocksource *cs);
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h
new file mode 100644
index 000000000000..16775d7d8f8d
--- /dev/null
+++ b/include/linux/clocksource_ids.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOCKSOURCE_IDS_H
+#define _LINUX_CLOCKSOURCE_IDS_H
+
+/* Enum to give clocksources a unique identifier */
+enum clocksource_ids {
+ CSID_GENERIC = 0,
+ CSID_ARM_ARCH_COUNTER,
+ CSID_MAX,
+};
+
+#endif
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 190184b5ff32..63873b93deaa 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -4,6 +4,7 @@
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/numa.h>
/*
* There is always at least global CMA area and a few optional
@@ -11,11 +12,17 @@
*/
#ifdef CONFIG_CMA_AREAS
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+#endif
-#else
-#define MAX_CMA_AREAS (0)
+#define CMA_MAX_NAME 64
-#endif
+/*
+ * the buddy -- especially pageblock merging and alloc_contig_range()
+ * -- can deal with only some pageblocks of a higher-order page being
+ * MIGRATE_CMA, we can use pageblock_nr_pages.
+ */
+#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
+#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
struct cma;
@@ -24,17 +31,29 @@ extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
extern const char *cma_get_name(const struct cma *cma);
-extern int __init cma_declare_contiguous(phys_addr_t base,
+extern int __init cma_declare_contiguous_nid(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, const char *name, struct cma **res_cma);
+ bool fixed, const char *name, struct cma **res_cma,
+ int nid);
+static inline int __init cma_declare_contiguous(phys_addr_t base,
+ phys_addr_t size, phys_addr_t limit,
+ phys_addr_t alignment, unsigned int order_per_bit,
+ bool fixed, const char *name, struct cma **res_cma)
+{
+ return cma_declare_contiguous_nid(base, size, limit, alignment,
+ order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
+}
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
const char *name,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
+extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn);
-extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
+extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
+
+extern void cma_reserve_pages_on_error(struct cma *cma);
#endif
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
deleted file mode 100644
index 68a541807bdf..000000000000
--- a/include/linux/cmdline-parser.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Parsing command line, get the partitions information.
- *
- * Written by Cai Zhiyong <caizhiyong@huawei.com>
- *
- */
-#ifndef CMDLINEPARSEH
-#define CMDLINEPARSEH
-
-#include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-/* partition flags */
-#define PF_RDONLY 0x01 /* Device is read only */
-#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
-
-struct cmdline_subpart {
- char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
- sector_t from;
- sector_t size;
- int flags;
- struct cmdline_subpart *next_subpart;
-};
-
-struct cmdline_parts {
- char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
- unsigned int nr_subparts;
- struct cmdline_subpart *subpart;
- struct cmdline_parts *next_parts;
-};
-
-void cmdline_parts_free(struct cmdline_parts **parts);
-
-int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
-
-struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
- const char *bdev);
-
-int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
- int slot,
- int (*add_part)(int, struct cmdline_subpart *, void *),
- void *param);
-
-#endif /* CMDLINEPARSEH */
diff --git a/drivers/staging/comedi/drivers/comedi_8254.h b/include/linux/comedi/comedi_8254.h
index d8264417e53c..d8264417e53c 100644
--- a/drivers/staging/comedi/drivers/comedi_8254.h
+++ b/include/linux/comedi/comedi_8254.h
diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h
new file mode 100644
index 000000000000..b2a5bc6b3a49
--- /dev/null
+++ b/include/linux/comedi/comedi_8255.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_8255.h
+ * Generic 8255 digital I/O subdevice support
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_8255_H
+#define _COMEDI_8255_H
+
+#define I8255_SIZE 0x04
+
+#define I8255_DATA_A_REG 0x00
+#define I8255_DATA_B_REG 0x01
+#define I8255_DATA_C_REG 0x02
+#define I8255_CTRL_REG 0x03
+#define I8255_CTRL_C_LO_IO BIT(0)
+#define I8255_CTRL_B_IO BIT(1)
+#define I8255_CTRL_B_MODE BIT(2)
+#define I8255_CTRL_C_HI_IO BIT(3)
+#define I8255_CTRL_A_IO BIT(4)
+#define I8255_CTRL_A_MODE(x) ((x) << 5)
+#define I8255_CTRL_CW BIT(7)
+
+struct comedi_device;
+struct comedi_subdevice;
+
+int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long regbase),
+ unsigned long regbase);
+
+int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long regbase),
+ unsigned long regbase);
+
+unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
+
+#endif
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.h b/include/linux/comedi/comedi_isadma.h
index 9d2b12db7e6e..9d2b12db7e6e 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.h
+++ b/include/linux/comedi/comedi_isadma.h
diff --git a/drivers/staging/comedi/comedi_pci.h b/include/linux/comedi/comedi_pci.h
index 4e069440cbdc..2fb50663e3ed 100644
--- a/drivers/staging/comedi/comedi_pci.h
+++ b/include/linux/comedi/comedi_pci.h
@@ -11,8 +11,7 @@
#define _COMEDI_PCI_H
#include <linux/pci.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* PCI Vendor IDs not in <linux/pci_ids.h>
diff --git a/drivers/staging/comedi/comedi_pcmcia.h b/include/linux/comedi/comedi_pcmcia.h
index f2f6e779645b..a33dfb65b869 100644
--- a/drivers/staging/comedi/comedi_pcmcia.h
+++ b/include/linux/comedi/comedi_pcmcia.h
@@ -12,8 +12,7 @@
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev);
diff --git a/drivers/staging/comedi/comedi_usb.h b/include/linux/comedi/comedi_usb.h
index 601e29d3891c..5d17dd425bd2 100644
--- a/drivers/staging/comedi/comedi_usb.h
+++ b/include/linux/comedi/comedi_usb.h
@@ -10,8 +10,7 @@
#define _COMEDI_USB_H
#include <linux/usb.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev);
struct usb_device *comedi_to_usb_dev(struct comedi_device *dev);
diff --git a/drivers/staging/comedi/comedidev.h b/include/linux/comedi/comedidev.h
index 0dff1ac057cd..0a1150900ef3 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/include/linux/comedi/comedidev.h
@@ -15,8 +15,7 @@
#include <linux/spinlock_types.h>
#include <linux/rwsem.h>
#include <linux/kref.h>
-
-#include "comedi.h"
+#include <linux/comedi.h>
#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
@@ -627,7 +626,7 @@ extern const struct comedi_lrange range_unknown;
* @range: Array of &struct comedi_krange, one for each range.
*
* Each element of @range[] describes the minimum and maximum physical range
- * range and the type of units. Typically, the type of unit is %UNIT_volt
+ * and the type of units. Typically, the type of unit is %UNIT_volt
* (i.e. volts) and the minimum and maximum are in millionths of a volt.
* There may also be a flag that indicates the minimum and maximum are merely
* scale factors for an unknown, external reference.
diff --git a/drivers/staging/comedi/comedilib.h b/include/linux/comedi/comedilib.h
index 0223c9cd9215..0223c9cd9215 100644
--- a/drivers/staging/comedi/comedilib.h
+++ b/include/linux/comedi/comedilib.h
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 4b898cdbdf05..52a9ff65faee 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -29,21 +29,18 @@ enum compact_result {
/* compaction didn't start as it was deferred due to past failures */
COMPACT_DEFERRED,
- /* compaction not active last round */
- COMPACT_INACTIVE = COMPACT_DEFERRED,
-
/* For more detailed tracepoint output - internal to compaction */
COMPACT_NO_SUITABLE_PAGE,
/* compaction should continue to another pageblock */
COMPACT_CONTINUE,
/*
- * The full zone was compacted scanned but wasn't successfull to compact
+ * The full zone was compacted scanned but wasn't successful to compact
* suitable pages.
*/
COMPACT_COMPLETE,
/*
- * direct compaction has scanned part of the zone but wasn't successfull
+ * direct compaction has scanned part of the zone but wasn't successful
* to compact suitable pages.
*/
COMPACT_PARTIAL_SKIPPED,
@@ -84,12 +81,15 @@ static inline unsigned long compact_gap(unsigned int order)
}
#ifdef CONFIG_COMPACTION
-extern int sysctl_compact_memory;
+extern unsigned int sysctl_compaction_proactiveness;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos);
+ void *buffer, size_t *length, loff_t *ppos);
+extern int compaction_proactiveness_sysctl_handler(struct ctl_table *table,
+ int write, void *buffer, size_t *length, loff_t *ppos);
extern int sysctl_extfrag_threshold;
extern int sysctl_compact_unevictable_allowed;
+extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, unsigned int alloc_flags,
@@ -97,13 +97,10 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
- unsigned int alloc_flags, int classzone_idx);
+ unsigned int alloc_flags, int highest_zoneidx);
-extern void defer_compaction(struct zone *zone, int order);
-extern bool compaction_deferred(struct zone *zone, int order);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
-extern bool compaction_restarting(struct zone *zone, int order);
/* Compaction has made some progress and retrying makes sense */
static inline bool compaction_made_progress(enum compact_result result)
@@ -180,9 +177,9 @@ static inline bool compaction_withdrawn(enum compact_result result)
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags);
-extern int kcompactd_run(int nid);
+extern void kcompactd_run(int nid);
extern void kcompactd_stop(int nid);
-extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
#else
static inline void reset_isolation_suitable(pg_data_t *pgdat)
@@ -190,20 +187,11 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
}
static inline enum compact_result compaction_suitable(struct zone *zone, int order,
- int alloc_flags, int classzone_idx)
+ int alloc_flags, int highest_zoneidx)
{
return COMPACT_SKIPPED;
}
-static inline void defer_compaction(struct zone *zone, int order)
-{
-}
-
-static inline bool compaction_deferred(struct zone *zone, int order)
-{
- return true;
-}
-
static inline bool compaction_made_progress(enum compact_result result)
{
return false;
@@ -224,15 +212,15 @@ static inline bool compaction_withdrawn(enum compact_result result)
return true;
}
-static inline int kcompactd_run(int nid)
+static inline void kcompactd_run(int nid)
{
- return 0;
}
static inline void kcompactd_stop(int nid)
{
}
-static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+static inline void wakeup_kcompactd(pg_data_t *pgdat,
+ int order, int highest_zoneidx)
{
}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index df2475be134a..594357881b0b 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -20,11 +20,8 @@
#include <linux/unistd.h>
#include <asm/compat.h>
-
-#ifdef CONFIG_COMPAT
#include <asm/siginfo.h>
#include <asm/signal.h>
-#endif
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
/*
@@ -75,7 +72,6 @@
__diag_push(); \
__diag_ignore(GCC, 8, "-Wattribute-alias", \
"Type aliasing is used to sanitize syscall arguments");\
- asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_compat_sys##name)))); \
ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \
@@ -91,7 +87,10 @@
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* COMPAT_SYSCALL_DEFINEx */
-#ifdef CONFIG_COMPAT
+struct compat_iovec {
+ compat_uptr_t iov_base;
+ compat_size_t iov_len;
+};
#ifndef compat_user_stack_pointer
#define compat_user_stack_pointer() current_user_stack_pointer()
@@ -127,9 +126,11 @@ struct compat_tms {
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
+#ifndef compat_sigset_t
typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
+#endif
int set_compat_user_sigmask(const compat_sigset_t __user *umask,
size_t sigsetsize);
@@ -209,12 +210,11 @@ typedef struct compat_siginfo {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
compat_uptr_t _addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \
sizeof(short) : __alignof__(compat_uptr_t))
union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
/*
* used when si_code=BUS_MCEERR_AR or
* used when si_code=BUS_MCEERR_AO
@@ -231,6 +231,12 @@ typedef struct compat_siginfo {
char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD];
u32 _pkey;
} _addr_pkey;
+ /* used when si_code=TRAP_PERF */
+ struct {
+ compat_ulong_t _data;
+ u32 _type;
+ u32 _flags;
+ } _perf;
};
} _sigfault;
@@ -248,16 +254,42 @@ typedef struct compat_siginfo {
} _sifields;
} compat_siginfo_t;
-struct compat_iovec {
- compat_uptr_t iov_base;
- compat_size_t iov_len;
-};
-
struct compat_rlimit {
compat_ulong_t rlim_cur;
compat_ulong_t rlim_max;
};
+#ifdef __ARCH_NEED_COMPAT_FLOCK64_PACKED
+#define __ARCH_COMPAT_FLOCK64_PACK __attribute__((packed))
+#else
+#define __ARCH_COMPAT_FLOCK64_PACK
+#endif
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+#ifdef __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+ __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+#endif
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK_PAD
+ __ARCH_COMPAT_FLOCK_PAD
+#endif
+};
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK64_PAD
+ __ARCH_COMPAT_FLOCK64_PAD
+#endif
+} __ARCH_COMPAT_FLOCK64_PACK;
+
struct compat_rusage {
struct old_timeval32 ru_utime;
struct old_timeval32 ru_stime;
@@ -381,6 +413,7 @@ struct compat_keyctl_kdf_params {
__u32 __spare[8];
};
+struct compat_stat;
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
@@ -394,16 +427,15 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
-
-#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
-
-long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
- unsigned long bitmap_size);
-long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
- unsigned long bitmap_size);
-int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from);
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from);
+void copy_siginfo_to_external32(struct compat_siginfo *to,
+ const struct kernel_siginfo *from);
+int copy_siginfo_from_user32(kernel_siginfo_t *to,
+ const struct compat_siginfo __user *from);
+int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
+ const kernel_siginfo_t *from);
+#ifndef copy_siginfo_to_user32
+#define copy_siginfo_to_user32 __copy_siginfo_to_user32
+#endif
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
@@ -418,15 +450,15 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
unsigned int size)
{
/* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
-#ifdef __BIG_ENDIAN
+#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT)
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
- /* fall through */
+ fallthrough;
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
- /* fall through */
+ fallthrough;
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
- /* fall through */
+ fallthrough;
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
}
return copy_to_user(compat, &v, size) ? -EFAULT : 0;
@@ -435,6 +467,73 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
#endif
}
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define unsafe_put_compat_sigset(compat, set, label) do { \
+ compat_sigset_t __user *__c = compat; \
+ const sigset_t *__s = set; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ unsafe_put_user(__s->sig[3] >> 32, &__c->sig[7], label); \
+ unsafe_put_user(__s->sig[3], &__c->sig[6], label); \
+ fallthrough; \
+ case 3: \
+ unsafe_put_user(__s->sig[2] >> 32, &__c->sig[5], label); \
+ unsafe_put_user(__s->sig[2], &__c->sig[4], label); \
+ fallthrough; \
+ case 2: \
+ unsafe_put_user(__s->sig[1] >> 32, &__c->sig[3], label); \
+ unsafe_put_user(__s->sig[1], &__c->sig[2], label); \
+ fallthrough; \
+ case 1: \
+ unsafe_put_user(__s->sig[0] >> 32, &__c->sig[1], label); \
+ unsafe_put_user(__s->sig[0], &__c->sig[0], label); \
+ } \
+} while (0)
+
+#define unsafe_get_compat_sigset(set, compat, label) do { \
+ const compat_sigset_t __user *__c = compat; \
+ compat_sigset_word hi, lo; \
+ sigset_t *__s = set; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ unsafe_get_user(lo, &__c->sig[7], label); \
+ unsafe_get_user(hi, &__c->sig[6], label); \
+ __s->sig[3] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 3: \
+ unsafe_get_user(lo, &__c->sig[5], label); \
+ unsafe_get_user(hi, &__c->sig[4], label); \
+ __s->sig[2] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 2: \
+ unsafe_get_user(lo, &__c->sig[3], label); \
+ unsafe_get_user(hi, &__c->sig[2], label); \
+ __s->sig[1] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 1: \
+ unsafe_get_user(lo, &__c->sig[1], label); \
+ unsafe_get_user(hi, &__c->sig[0], label); \
+ __s->sig[0] = hi | (((long)lo) << 32); \
+ } \
+} while (0)
+#else
+#define unsafe_put_compat_sigset(compat, set, label) do { \
+ compat_sigset_t __user *__c = compat; \
+ const sigset_t *__s = set; \
+ \
+ unsafe_copy_to_user(__c, __s, sizeof(*__c), label); \
+} while (0)
+
+#define unsafe_get_compat_sigset(set, compat, label) do { \
+ const compat_sigset_t __user *__c = compat; \
+ sigset_t *__s = set; \
+ \
+ unsafe_copy_from_user(__s, __c, sizeof(*__c), label); \
+} while (0)
+#endif
+
extern int compat_ptrace_request(struct task_struct *child,
compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
@@ -444,24 +543,15 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
struct epoll_event; /* fortunately, this one is fixed-layout */
-extern ssize_t compat_rw_copy_check_uvector(int type,
- const struct compat_iovec __user *uvector,
- unsigned long nr_segs,
- unsigned long fast_segs, struct iovec *fast_pointer,
- struct iovec **ret_pointer);
-
-extern void __user *compat_alloc_user_space(unsigned long len);
-
int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
-#define compat_save_altstack_ex(uss, sp) do { \
+#define unsafe_compat_save_altstack(uss, sp, label) do { \
compat_stack_t __user *__uss = uss; \
struct task_struct *t = current; \
- put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
- put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
- put_user_ex(t->sas_ss_size, &__uss->ss_size); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
+ unsafe_put_user(ptr_to_compat((void __user *)t->sas_ss_sp), \
+ &__uss->ss_sp, label); \
+ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
+ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
} while (0);
/*
@@ -503,6 +593,12 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
int maxevents, int timeout,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
+asmlinkage long compat_sys_epoll_pwait2(int epfd,
+ struct epoll_event __user *events,
+ int maxevents,
+ const struct __kernel_timespec __user *timeout,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
/* fs/fcntl.c */
asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
@@ -514,12 +610,6 @@ asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
-/* fs/namespace.c */
-asmlinkage long compat_sys_mount(const char __user *dev_name,
- const char __user *dir_name,
- const char __user *type, compat_ulong_t flags,
- const void __user *data);
-
/* fs/open.c */
asmlinkage long compat_sys_statfs(const char __user *pathname,
struct compat_statfs __user *buf);
@@ -543,26 +633,22 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
/* fs/read_write.c */
asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
-asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
-asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
/* No generic prototype for pread64 and pwrite64 */
asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
asmlinkage long compat_sys_preadv64(unsigned long fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
unsigned long vlen, loff_t pos);
#endif
#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
asmlinkage long compat_sys_pwritev64(unsigned long fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
unsigned long vlen, loff_t pos);
#endif
@@ -599,10 +685,6 @@ asmlinkage long compat_sys_signalfd4(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, int flags);
-/* fs/splice.c */
-asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
- unsigned int nr_segs, unsigned int flags);
-
/* fs/stat.c */
asmlinkage long compat_sys_newfstatat(unsigned int dfd,
const char __user *filename,
@@ -729,10 +811,6 @@ asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
unsigned flags, struct sockaddr __user *addr,
int __user *addrlen);
-asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
- char __user *optval, unsigned int optlen);
-asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
- char __user *optval, int __user *optlen);
asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
unsigned flags);
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
@@ -751,26 +829,6 @@ asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr
/* mm/fadvise.c: No generic prototype for fadvise64_64 */
/* mm/, CONFIG_MMU only */
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
- compat_ulong_t mode,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode, compat_ulong_t flags);
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode,
- compat_ulong_t addr,
- compat_ulong_t flags);
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode);
-asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
- compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
- const compat_ulong_t __user *new_nodes);
-asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
- __u32 __user *pages,
- const int __user *nodes,
- int __user *status,
- int flags);
-
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
@@ -790,32 +848,24 @@ asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
int flags);
asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags);
-asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
- compat_ulong_t riovcnt, compat_ulong_t flags);
-asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
- compat_ulong_t riovcnt, compat_ulong_t flags);
asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp, int flags);
asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
-asmlinkage long compat_sys_readv64v2(unsigned long fd,
- const struct compat_iovec __user *vec,
+asmlinkage long compat_sys_preadv64v2(unsigned long fd,
+ const struct iovec __user *vec,
unsigned long vlen, loff_t pos, rwf_t flags);
#endif
#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
unsigned long vlen, loff_t pos, rwf_t flags);
#endif
@@ -847,7 +897,6 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
/* obsolete: fs/readdir.c */
asmlinkage long compat_sys_old_readdir(unsigned int fd,
@@ -878,19 +927,45 @@ asmlinkage long compat_sys_sigaction(int sig,
/* obsolete: net/socket.c */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
-#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+#ifdef __ARCH_WANT_COMPAT_TRUNCATE64
+asmlinkage long compat_sys_truncate64(const char __user *pathname, compat_arg_u64(len));
+#endif
+#ifdef __ARCH_WANT_COMPAT_FTRUNCATE64
+asmlinkage long compat_sys_ftruncate64(unsigned int fd, compat_arg_u64(len));
+#endif
-/*
- * For most but not all architectures, "am I in a compat syscall?" and
- * "am I a compat task?" are the same question. For architectures on which
- * they aren't the same question, arch code can override in_compat_syscall.
- */
+#ifdef __ARCH_WANT_COMPAT_FALLOCATE
+asmlinkage long compat_sys_fallocate(int fd, int mode, compat_arg_u64(offset),
+ compat_arg_u64(len));
+#endif
-#ifndef in_compat_syscall
-static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#ifdef __ARCH_WANT_COMPAT_PREAD64
+asmlinkage long compat_sys_pread64(unsigned int fd, char __user *buf, size_t count,
+ compat_arg_u64(pos));
#endif
+#ifdef __ARCH_WANT_COMPAT_PWRITE64
+asmlinkage long compat_sys_pwrite64(unsigned int fd, const char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+asmlinkage long compat_sys_sync_file_range(int fd, compat_arg_u64(pos),
+ compat_arg_u64(nbytes), unsigned int flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FADVISE64_64
+asmlinkage long compat_sys_fadvise64_64(int fd, compat_arg_u64(pos),
+ compat_arg_u64(len), int advice);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_READAHEAD
+asmlinkage long compat_sys_readahead(int fd, compat_arg_u64(offset), size_t count);
+#endif
+
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
/**
* ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
@@ -920,6 +995,17 @@ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user * buf);
+#ifdef CONFIG_COMPAT
+
+/*
+ * For most but not all architectures, "am I in a compat syscall?" and
+ * "am I a compat task?" are the same question. For architectures on which
+ * they aren't the same question, arch code can override in_compat_syscall.
+ */
+#ifndef in_compat_syscall
+static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#endif
+
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
@@ -929,6 +1015,24 @@ static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
+
+#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
+
+long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
+ unsigned long bitmap_size);
+long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
+ unsigned long bitmap_size);
+
+/*
+ * Some legacy ABIs like the i386 one use less than natural alignment for 64-bit
+ * types, and will need special compat treatment for that. Most architectures
+ * don't need that special handling even for compat syscalls.
+ */
+#ifndef compat_need_64bit_alignment_fixup
+#define compat_need_64bit_alignment_fixup() false
+#endif
+
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 333a6695a918..6cfd6902bd5b 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -5,8 +5,6 @@
/* Compiler specific definitions for Clang compiler */
-#define uninitialized_var(x) x = *(&(x))
-
/* same as gcc, this was present in clang-2.6 so we can assume it works
* with any version that can compile the kernel
*/
@@ -15,8 +13,14 @@
/* all clang versions usable with the kernel support KASAN ABI version 5 */
#define KASAN_ABI_VERSION 5
+/*
+ * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+ * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
+ * to avoid adding redundant attributes in other configurations.
+ */
+
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
-/* emulate gcc's __SANITIZE_ADDRESS__ flag */
+/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
@@ -24,21 +28,93 @@
#define __no_sanitize_address
#endif
+#if __has_feature(thread_sanitizer)
+/* emulate gcc's __SANITIZE_THREAD__ flag */
+#define __SANITIZE_THREAD__
+#define __no_sanitize_thread \
+ __attribute__((no_sanitize("thread")))
+#else
+#define __no_sanitize_thread
+#endif
+
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if __has_feature(undefined_behavior_sanitizer)
+/* GCC does not have __SANITIZE_UNDEFINED__ */
+#define __no_sanitize_undefined \
+ __attribute__((no_sanitize("undefined")))
+#else
+#define __no_sanitize_undefined
+#endif
+
+#if __has_feature(memory_sanitizer)
+#define __SANITIZE_MEMORY__
+/*
+ * Unlike other sanitizers, KMSAN still inserts code into functions marked with
+ * no_sanitize("kernel-memory"). Using disable_sanitizer_instrumentation
+ * provides the behavior consistent with other __no_sanitize_ attributes,
+ * guaranteeing that __no_sanitize_memory functions remain uninstrumented.
+ */
+#define __no_sanitize_memory __disable_sanitizer_instrumentation
+
/*
- * Not all versions of clang implement the the type-generic versions
- * of the builtin overflow checkers. Fortunately, clang implements
- * __has_builtin allowing us to avoid awkward version
- * checks. Unfortunately, we don't know which version of gcc clang
- * pretends to be, so the macro may or may not be defined.
+ * The __no_kmsan_checks attribute ensures that a function does not produce
+ * false positive reports by:
+ * - initializing all local variables and memory stores in this function;
+ * - skipping all shadow checks;
+ * - passing initialized arguments to this function's callees.
*/
-#if __has_builtin(__builtin_mul_overflow) && \
- __has_builtin(__builtin_add_overflow) && \
- __has_builtin(__builtin_sub_overflow)
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+#define __no_kmsan_checks __attribute__((no_sanitize("kernel-memory")))
+#else
+#define __no_sanitize_memory
+#define __no_kmsan_checks
#endif
-/* The following are for compatibility with GCC, from compiler-gcc.h,
- * and may be redefined here because they should not be shared with other
- * compilers, like ICC.
+/*
+ * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together
+ * with no_sanitize("coverage"). Prior versions of Clang support coverage
+ * instrumentation, but cannot be queried for support by the preprocessor.
*/
-#define barrier() __asm__ __volatile__("" : : : "memory")
+#if __has_feature(coverage_sanitizer)
+#define __no_sanitize_coverage __attribute__((no_sanitize("coverage")))
+#else
+#define __no_sanitize_coverage
+#endif
+
+#if __has_feature(shadow_call_stack)
+# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
+#endif
+
+#if __has_feature(kcfi)
+/* Disable CFI checking inside a function. */
+#define __nocfi __attribute__((__no_sanitize__("kcfi")))
+#endif
+
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_clang(version, severity, s) \
+ __diag_clang_ ## version(__diag_clang_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_clang_ignore ignored
+#define __diag_clang_warn warning
+#define __diag_clang_error error
+
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(clang diagnostic s))
+
+#if CONFIG_CLANG_VERSION >= 110000
+#define __diag_clang_11(s) __diag(s)
+#else
+#define __diag_clang_11(s)
+#endif
+
+#define __diag_ignore_all(option, comment) \
+ __diag_clang(11, ignore, option)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index d7ee4c6bad48..f55a37efdb97 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -10,29 +10,6 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-#if GCC_VERSION < 40600
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
-
-/* Optimization barrier */
-
-/* The "volatile" is due to gcc bugs */
-#define barrier() __asm__ __volatile__("": : :"memory")
-/*
- * This version is i.e. to prevent dead stores elimination on @ptr
- * where gcc and llvm may behave differently when otherwise using
- * normal barrier(): while gcc behavior gets along with a normal
- * barrier(), llvm needs an explicit input variable to be assumed
- * clobbered. The issue is as follows: while the inline asm might
- * access any memory it wants, the compiler could have fit all of
- * @ptr into memory registers instead, and since @ptr never escaped
- * from that, it proved that the inline asm wasn't touching any of
- * it. This version works well with both compilers, i.e. we're telling
- * the compiler that the inline asm absolutely may see the contents
- * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
- */
-#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
-
/*
* This macro obfuscates arithmetic on a variable address so that gcc
* shouldn't recognize the original var, and make assumptions about it.
@@ -58,23 +35,12 @@
(typeof(ptr)) (__ptr + (off)); \
})
-/*
- * A trick to suppress uninitialized variable warning without generating any
- * code
- */
-#define uninitialized_var(x) x = x
-
#ifdef CONFIG_RETPOLINE
#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-
-#define __compiletime_warning(message) __attribute__((__warning__(message)))
-#define __compiletime_error(message) __attribute__((__error__(message)))
-
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
@@ -100,43 +66,20 @@
__builtin_unreachable(); \
} while (0)
-#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
-#define __randomize_layout __attribute__((randomize_layout))
-#define __no_randomize_layout __attribute__((no_randomize_layout))
-/* This anon struct can add padding, so only enable it under randstruct. */
-#define randomized_struct_fields_start struct {
-#define randomized_struct_fields_end } __randomize_layout;
-#endif
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-/*
- * sparse (__CHECKER__) pretends to be gcc, but can't do constant
- * folding in __builtin_bswap*() (yet), so don't set these for it.
- */
-#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
-#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
-#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
#if GCC_VERSION >= 70000
#define KASAN_ABI_VERSION 5
-#elif GCC_VERSION >= 50000
+#else
#define KASAN_ABI_VERSION 4
-#elif GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
+#endif
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
#if __has_attribute(__no_sanitize_address__)
@@ -145,11 +88,39 @@
#define __no_sanitize_address
#endif
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
+#define __no_sanitize_thread __attribute__((no_sanitize_thread))
+#else
+#define __no_sanitize_thread
+#endif
+
+#if __has_attribute(__no_sanitize_undefined__)
+#define __no_sanitize_undefined __attribute__((no_sanitize_undefined))
+#else
+#define __no_sanitize_undefined
+#endif
+
+#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__)
+#define __no_sanitize_coverage __attribute__((no_sanitize_coverage))
+#else
+#define __no_sanitize_coverage
#endif
/*
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel,
+ * matching the defines used by Clang.
+ */
+#ifdef __SANITIZE_HWADDRESS__
+#define __SANITIZE_ADDRESS__
+#endif
+
+/*
+ * GCC does not support KMSAN.
+ */
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+
+/*
* Turn individual warnings and errors on and off locally, depending
* on version.
*/
@@ -171,4 +142,13 @@
#define __diag_GCC_8(s)
#endif
-#define __no_fgcse __attribute__((optimize("-fno-gcse")))
+#define __diag_ignore_all(option, comment) \
+ __diag_GCC(8, ignore, option)
+
+/*
+ * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
+ * attribute) do not work, and must be disabled.
+ */
+#if GCC_VERSION < 90100
+#undef __alloc_size__
+#endif
diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h
new file mode 100644
index 000000000000..573fa85b6c0c
--- /dev/null
+++ b/include/linux/compiler-version.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifdef __LINUX_COMPILER_VERSION_H
+#error "Please do not include <linux/compiler-version.h>. This is done by the build system."
+#endif
+#define __LINUX_COMPILER_VERSION_H
+
+/*
+ * This header exists to force full rebuild when the compiler is upgraded.
+ *
+ * When fixdep scans this, it will find this string "CONFIG_CC_VERSION_TEXT"
+ * and add dependency on include/config/CC_VERSION_TEXT, which is touched
+ * by Kconfig when the version string from the compiler changes.
+ */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5e88e7e33abe..973a1bfd7ef5 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -24,7 +24,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
long ______r; \
static struct ftrace_likely_data \
__aligned(4) \
- __section(_ftrace_annotated_branch) \
+ __section("_ftrace_annotated_branch") \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
@@ -60,7 +60,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __trace_if_value(cond) ({ \
static struct ftrace_branch_data \
__aligned(4) \
- __section(_ftrace_branch) \
+ __section("_ftrace_branch") \
__if_trace = { \
.func = __func__, \
.file = __FILE__, \
@@ -76,15 +76,31 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
+# define likely_notrace(x) likely(x)
+# define unlikely_notrace(x) unlikely(x)
#endif
/* Optimization barrier */
#ifndef barrier
-# define barrier() __memory_barrier()
+/* The "volatile" is due to gcc bugs */
+# define barrier() __asm__ __volatile__("": : :"memory")
#endif
#ifndef barrier_data
-# define barrier_data(ptr) barrier()
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proved that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
#endif
/* workaround for GCC PR82365 if needed */
@@ -93,42 +109,30 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
/* Unreachable code */
-#ifdef CONFIG_STACK_VALIDATION
+#ifdef CONFIG_OBJTOOL
/*
* These macros help objtool understand GCC code flow for unreachable code.
* The __COUNTER__ based labels are a hack to make each instance of the macros
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
-#define annotate_reachable() ({ \
- asm volatile("%c0:\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
-})
-#define annotate_unreachable() ({ \
- asm volatile("%c0:\n\t" \
+#define __stringify_label(n) #n
+
+#define __annotate_unreachable(c) ({ \
+ asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify_label(c) "b - .\n\t" \
+ ".popsection\n\t" : : "i" (c)); \
})
-#define ASM_UNREACHABLE \
- "999:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long 999b - .\n\t" \
- ".popsection\n\t"
+#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(.rodata..c_jump_table)
+#define __annotate_jump_table __section(".rodata..c_jump_table")
-#else
-#define annotate_reachable()
+#else /* !CONFIG_OBJTOOL */
#define annotate_unreachable()
#define __annotate_jump_table
-#endif
+#endif /* CONFIG_OBJTOOL */
-#ifndef ASM_UNREACHABLE
-# define ASM_UNREACHABLE
-#endif
#ifndef unreachable
# define unreachable() do { \
annotate_unreachable(); \
@@ -155,7 +159,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
extern typeof(sym) sym; \
static const unsigned long __kentry_##sym \
__used \
- __section("___kentry" "+" #sym ) \
+ __attribute__((__section__("___kentry+" #sym))) \
= (unsigned long)&sym;
#endif
@@ -166,6 +170,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
(typeof(ptr)) (__ptr + (off)); })
#endif
+#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
+
#ifndef OPTIMIZER_HIDE_VAR
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) \
@@ -177,116 +183,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif
-#include <uapi/linux/types.h>
-
-#define __READ_ONCE_SIZE \
-({ \
- switch (size) { \
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
- default: \
- barrier(); \
- __builtin_memcpy((void *)res, (const void *)p, size); \
- barrier(); \
- } \
-})
-
-static __always_inline
-void __read_once_size(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-#ifdef CONFIG_KASAN
-/*
- * We can't declare function 'inline' because __no_sanitize_address confilcts
- * with inlining. Attempt to inline it may cause a build failure.
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
- */
-# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
-#else
-# define __no_kasan_or_inline __always_inline
-#endif
-
-static __no_kasan_or_inline
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
-{
- switch (size) {
- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
- default:
- barrier();
- __builtin_memcpy((void *)p, (const void *)res, size);
- barrier();
- }
-}
-
-/*
- * Prevent the compiler from merging or refetching reads or writes. The
- * compiler is also forbidden from reordering successive instances of
- * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
- * particular ordering. One way to make the compiler aware of ordering is to
- * put the two invocations of READ_ONCE or WRITE_ONCE in different C
- * statements.
+/**
+ * data_race - mark an expression as containing intentional data races
*
- * These two macros will also work on aggregate data types like structs or
- * unions. If the size of the accessed data type exceeds the word size of
- * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
- * fall back to memcpy(). There's at least two memcpy()s: one for the
- * __builtin_memcpy() and then one for the macro doing the copy of variable
- * - '__u' allocated on the stack.
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven. One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
*
- * Their two major use cases are: (1) Mediating communication between
- * process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
- * mutilate accesses that either do not require ordering or that interact
- * with an explicit memory barrier or atomic instruction that provides the
- * required ordering.
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored.
*/
-#include <asm/barrier.h>
-#include <linux/kasan-checks.h>
-
-#define __READ_ONCE(x, check) \
+#define data_race(expr) \
({ \
- union { typeof(x) __val; char __c[1]; } __u; \
- if (check) \
- __read_once_size(&(x), __u.__c, sizeof(x)); \
- else \
- __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
- smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
- __u.__val; \
-})
-#define READ_ONCE(x) __READ_ONCE(x, 1)
-
-/*
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
- * to hide memory access from KASAN.
- */
-#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
-
-static __no_kasan_or_inline
-unsigned long read_word_at_a_time(const void *addr)
-{
- kasan_check_read(addr, 1);
- return *(unsigned long *)addr;
-}
-
-#define WRITE_ONCE(x, val) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(x)) (val) }; \
- __write_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
+ __unqual_scalar_typeof(({ expr; })) __v = ({ \
+ __kcsan_disable_current(); \
+ expr; \
+ }); \
+ __kcsan_enable_current(); \
+ __v; \
})
#endif /* __KERNEL__ */
@@ -297,9 +211,11 @@ unsigned long read_word_at_a_time(const void *addr)
* otherwise, or eliminated entirely due to lack of references that are
* visible to the compiler.
*/
+#define ___ADDRESSABLE(sym, __attrs) \
+ static void * __used __attrs \
+ __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
#define __ADDRESSABLE(sym) \
- static void * __section(.discard.addressable) __used \
- __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
+ ___ADDRESSABLE(sym, __section(".discard.addressable"))
/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer
@@ -312,48 +228,21 @@ static inline void *offset_to_ptr(const int *off)
#endif /* __ASSEMBLY__ */
-/* Compile time object size, -1 for unknown */
-#ifndef __compiletime_object_size
-# define __compiletime_object_size(obj) -1
-#endif
-#ifndef __compiletime_warning
-# define __compiletime_warning(message)
-#endif
-#ifndef __compiletime_error
-# define __compiletime_error(message)
-#endif
-
-#ifdef __OPTIMIZE__
-# define __compiletime_assert(condition, msg, prefix, suffix) \
- do { \
- extern void prefix ## suffix(void) __compiletime_error(msg); \
- if (!(condition)) \
- prefix ## suffix(); \
- } while (0)
-#else
-# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
-#endif
-
-#define _compiletime_assert(condition, msg, prefix, suffix) \
- __compiletime_assert(condition, msg, prefix, suffix)
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
-/**
- * compiletime_assert - break build and emit msg if condition is false
- * @condition: a compile-time constant condition to check
- * @msg: a message to emit if condition is false
- *
- * In tradition of POSIX assert, this macro will break the build if the
- * supplied condition is *false*, emitting the supplied error message if the
- * compiler has support to do so.
+/*
+ * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
+ * bool and also pointer types.
*/
-#define compiletime_assert(condition, msg) \
- _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+#define is_signed_type(type) (((type)(-1)) < (__force type)1)
-#define compiletime_assert_atomic_type(t) \
- compiletime_assert(__native_word(t), \
- "Need native word sized stores/loads for atomicity.")
+/*
+ * This is needed in functions which generate the stack canary, see
+ * arch/x86/kernel/smpboot.c::start_secondary() for an example.
+ */
+#define prevent_tail_call_optimization() mb()
-/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+#include <asm/rwonce.h>
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index cdf016596659..898b3458b24a 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -21,29 +21,6 @@
*/
/*
- * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
- * by hand.
- *
- * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
- * depending on the compiler used to build it; however, these attributes have
- * no semantic effects for sparse, so it does not matter. Also note that,
- * in order to avoid sparse's warnings, even the unsupported ones must be
- * defined to 0.
- */
-#ifndef __has_attribute
-# define __has_attribute(x) __GCC4_has_attribute_##x
-# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
-# define __GCC4_has_attribute___copy__ 0
-# define __GCC4_has_attribute___designated_init__ 0
-# define __GCC4_has_attribute___externally_visible__ 1
-# define __GCC4_has_attribute___noclone__ 1
-# define __GCC4_has_attribute___nonstring__ 0
-# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
-# define __GCC4_has_attribute___fallthrough__ 0
-#endif
-
-/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
*/
#define __alias(symbol) __attribute__((__alias__(#symbol)))
@@ -57,6 +34,16 @@
#define __aligned_largest __attribute__((__aligned__))
/*
+ * Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
+ * available and includes other attributes. For GCC < 9.1, __alloc_size__ gets undefined
+ * in compiler-gcc.h, due to misbehaviors.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
+ */
+#define __alloc_size__(x, ...) __attribute__((__alloc_size__(x, ## __VA_ARGS__)))
+
+/*
* Note: users of __always_inline currently do not write "inline" themselves,
* which seems to be required by gcc to apply the attribute according
* to its docs (and also "warning: always_inline function might not be
@@ -77,7 +64,6 @@
* compiler should see some alignment anyway, when the return value is
* massaged by 'flags = ptr & 3; ptr &= ~3;').
*
- * Optional: only supported since gcc >= 4.9
* Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
@@ -116,6 +102,19 @@
#endif
/*
+ * Optional: not supported by gcc
+ * Optional: only supported since clang >= 14.0
+ * Optional: not supported by icc
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#diagnose_as_builtin
+ */
+#if __has_attribute(__diagnose_as_builtin__)
+# define __diagnose_as(builtin...) __attribute__((__diagnose_as_builtin__(builtin)))
+#else
+# define __diagnose_as(builtin...)
+#endif
+
+/*
* Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
* attribute warnings entirely and for good") for more information.
*
@@ -128,7 +127,6 @@
#define __deprecated
/*
- * Optional: only supported since gcc >= 5.1
* Optional: not supported by clang
* Optional: not supported by icc
*
@@ -141,6 +139,17 @@
#endif
/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute
+ */
+#if __has_attribute(__error__)
+# define __compiletime_error(msg) __attribute__((__error__(msg)))
+#else
+# define __compiletime_error(msg)
+#endif
+
+/*
* Optional: not supported by clang
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
@@ -166,6 +175,7 @@
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#malloc
*/
#define __malloc __attribute__((__malloc__))
@@ -176,6 +186,18 @@
#define __mode(x) __attribute__((__mode__(x)))
/*
+ * Optional: only supported since gcc >= 7
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/x86-Function-Attributes.html#index-no_005fcaller_005fsaved_005fregisters-function-attribute_002c-x86
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-caller-saved-registers
+ */
+#if __has_attribute(__no_caller_saved_registers__)
+# define __no_caller_saved_registers __attribute__((__no_caller_saved_registers__))
+#else
+# define __no_caller_saved_registers
+#endif
+
+/*
* Optional: not supported by clang
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
@@ -191,6 +213,7 @@
* must end with any of these keywords:
* break;
* fallthrough;
+ * continue;
* goto <label>;
* return [expression];
*
@@ -203,6 +226,12 @@
#endif
/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#flatten
+ */
+# define __flatten __attribute__((flatten))
+
+/*
* Note the missing underscores.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
@@ -224,6 +253,18 @@
#endif
/*
+ * Optional: only supported since GCC >= 7.1, clang >= 13.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fprofile_005finstrument_005ffunction-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-profile-instrument-function
+ */
+#if __has_attribute(__no_profile_instrument_function__)
+# define __no_profile __attribute__((__no_profile_instrument_function__))
+#else
+# define __no_profile
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
* clang: https://clang.llvm.org/docs/AttributeReference.html#id1
@@ -231,12 +272,38 @@
#define __noreturn __attribute__((__noreturn__))
/*
+ * Optional: not supported by gcc.
+ * Optional: not supported by icc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#overloadable
+ */
+#if __has_attribute(__overloadable__)
+# define __overloadable __attribute__((__overloadable__))
+#else
+# define __overloadable
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
* clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
*/
#define __packed __attribute__((__packed__))
/*
+ * Note: the "type" argument should match any __builtin_object_size(p, type) usage.
+ *
+ * Optional: not supported by gcc.
+ * Optional: not supported by icc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#pass-object-size-pass-dynamic-object-size
+ */
+#if __has_attribute(__pass_object_size__)
+# define __pass_object_size(type) __attribute__((__pass_object_size__(type)))
+#else
+# define __pass_object_size(type)
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
*/
#define __pure __attribute__((__pure__))
@@ -246,7 +313,7 @@
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate
*/
-#define __section(S) __attribute__((__section__(#S)))
+#define __section(section) __attribute__((__section__(section)))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
@@ -265,9 +332,51 @@
#define __used __attribute__((__used__))
/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
+ */
+#define __must_check __attribute__((__warn_unused_result__))
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute
+ */
+#if __has_attribute(__warning__)
+# define __compiletime_warning(msg) __attribute__((__warning__(msg)))
+#else
+# define __compiletime_warning(msg)
+#endif
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#disable-sanitizer-instrumentation
+ *
+ * disable_sanitizer_instrumentation is not always similar to
+ * no_sanitize((<sanitizer-name>)): the latter may still let specific sanitizers
+ * insert code into functions to prevent false positives. Unlike that,
+ * disable_sanitizer_instrumentation prevents all kinds of instrumentation to
+ * functions with the attribute.
+ */
+#if __has_attribute(disable_sanitizer_instrumentation)
+# define __disable_sanitizer_instrumentation \
+ __attribute__((disable_sanitizer_instrumentation))
+#else
+# define __disable_sanitizer_instrumentation
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
*/
#define __weak __attribute__((__weak__))
+/*
+ * Used by functions that use '__builtin_return_address'. These function
+ * don't want to be splited or made inline, which can make
+ * the '__builtin_return_address' get unexpected address.
+ */
+#define __fix_address noinline __noclone
+
#endif /* __LINUX_COMPILER_ATTRIBUTES_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 72393a8c1a6c..eb0466236661 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -4,49 +4,69 @@
#ifndef __ASSEMBLY__
+/*
+ * Skipped when running bindgen due to a libclang issue;
+ * see https://github.com/rust-lang/rust-bindgen/issues/2244.
+ */
+#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
+ __has_attribute(btf_type_tag) && !defined(__BINDGEN__)
+# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
+#else
+# define BTF_TYPE_TAG(value) /* nothing */
+#endif
+
+/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
-# define __user __attribute__((noderef, address_space(1)))
+/* address spaces */
# define __kernel __attribute__((address_space(0)))
-# define __safe __attribute__((safe))
-# define __force __attribute__((force))
-# define __nocast __attribute__((nocast))
-# define __iomem __attribute__((noderef, address_space(2)))
+# define __user __attribute__((noderef, address_space(__user)))
+# define __iomem __attribute__((noderef, address_space(__iomem)))
+# define __percpu __attribute__((noderef, address_space(__percpu)))
+# define __rcu __attribute__((noderef, address_space(__rcu)))
+static inline void __chk_user_ptr(const volatile void __user *ptr) { }
+static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
+/* context/locking */
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
+# define __cond_acquires(x) __attribute__((context(x,0,-1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
-# define __percpu __attribute__((noderef, address_space(3)))
-# define __rcu __attribute__((noderef, address_space(4)))
+/* other */
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __safe __attribute__((safe))
# define __private __attribute__((noderef))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
#else /* __CHECKER__ */
+/* address spaces */
+# define __kernel
# ifdef STRUCTLEAK_PLUGIN
-# define __user __attribute__((user))
+# define __user __attribute__((user))
# else
-# define __user
+# define __user BTF_TYPE_TAG(user)
# endif
-# define __kernel
-# define __safe
-# define __force
-# define __nocast
# define __iomem
-# define __chk_user_ptr(x) (void)0
-# define __chk_io_ptr(x) (void)0
-# define __builtin_warning(x, y...) (1)
+# define __percpu BTF_TYPE_TAG(percpu)
+# define __rcu
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+/* context/locking */
# define __must_hold(x)
# define __acquires(x)
+# define __cond_acquires(x)
# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
+# define __acquire(x) (void)0
+# define __release(x) (void)0
# define __cond_lock(x,c) (c)
-# define __percpu
-# define __rcu
+/* other */
+# define __force
+# define __nocast
+# define __safe
# define __private
# define ACCESS_PRIVATE(p, member) ((p)->member)
+# define __builtin_warning(x, y...) (1)
#endif /* __CHECKER__ */
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
@@ -58,6 +78,17 @@ extern void __chk_io_ptr(const volatile void __iomem *);
/* Attributes */
#include <linux/compiler_attributes.h>
+/* Builtins */
+
+/*
+ * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
+ * In the meantime, to support gcc < 10, we implement __has_builtin
+ * by hand.
+ */
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
@@ -104,12 +135,6 @@ struct ftrace_likely_data {
unsigned long constant;
};
-#ifdef CONFIG_ENABLE_MUST_CHECK
-#define __must_check __attribute__((__warn_unused_result__))
-#else
-#define __must_check
-#endif
-
#if defined(CC_USING_HOTPATCH)
#define notrace __attribute__((hotpatch(0, 0)))
#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
@@ -126,25 +151,14 @@ struct ftrace_likely_data {
*/
#define __naked __attribute__((__naked__)) notrace
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-
/*
- * Force always-inline if the user requests it so via the .config.
* Prefer gnu_inline, so that extern inline functions do not emit an
* externally visible function. This makes extern inline behave as per gnu89
* semantics rather than c99. This prevents multiple symbol definition errors
* of extern inline functions at link time.
* A lot of inline functions can cause havoc with function tracing.
- * Do not use __always_inline here, since currently it expands to inline again
- * (which would break users of __always_inline).
*/
-#if !defined(CONFIG_OPTIMIZE_INLINING)
-#define inline inline __attribute__((__always_inline__)) __gnu_inline \
- __inline_maybe_unused notrace
-#else
-#define inline inline __gnu_inline \
- __inline_maybe_unused notrace
-#endif
+#define inline inline __gnu_inline __inline_maybe_unused notrace
/*
* gcc provides both __inline__ and __inline as alternate spellings of
@@ -176,6 +190,52 @@ struct ftrace_likely_data {
*/
#define noinline_for_stack noinline
+/*
+ * Sanitizer helper attributes: Because using __always_inline and
+ * __no_sanitize_* conflict, provide helper attributes that will either expand
+ * to __no_sanitize_* in compilation units where instrumentation is enabled
+ * (__SANITIZE_*__), or __always_inline in compilation units without
+ * instrumentation (__SANITIZE_*__ undefined).
+ */
+#ifdef __SANITIZE_ADDRESS__
+/*
+ * We can't declare function 'inline' because __no_sanitize_address conflicts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
+#else
+# define __no_kasan_or_inline __always_inline
+#endif
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
+ * atomics even with __no_sanitize_thread (to avoid false positives in userspace
+ * ThreadSanitizer). The kernel's requirements are stricter and we really do not
+ * want any instrumentation with __no_kcsan.
+ *
+ * Therefore we add __disable_sanitizer_instrumentation where available to
+ * disable all instrumentation. See Kconfig.kcsan where this is mandatory.
+ */
+# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
+# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
+#else
+# define __no_kcsan
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
+/* Section for code which can't be instrumented at all */
+#define noinstr \
+ noinline notrace __attribute((__section__(".noinstr.text"))) \
+ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
+ __no_sanitize_memory
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
@@ -189,17 +249,39 @@ struct ftrace_likely_data {
# define __latent_entropy
#endif
-#ifndef __randomize_layout
+#if defined(RANDSTRUCT) && !defined(__CHECKER__)
+# define __randomize_layout __designated_init __attribute__((randomize_layout))
+# define __no_randomize_layout __attribute__((no_randomize_layout))
+/* This anon struct can add padding, so only enable it under randstruct. */
+# define randomized_struct_fields_start struct {
+# define randomized_struct_fields_end } __randomize_layout;
+#else
# define __randomize_layout __designated_init
+# define __no_randomize_layout
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
#endif
-#ifndef __no_randomize_layout
-# define __no_randomize_layout
+#ifndef __noscs
+# define __noscs
#endif
-#ifndef randomized_struct_fields_start
-# define randomized_struct_fields_start
-# define randomized_struct_fields_end
+#ifndef __nocfi
+# define __nocfi
+#endif
+
+/*
+ * Any place that could be marked with the "alloc_size" attribute is also
+ * a place to be marked with the "malloc" attribute, except those that may
+ * be performing a _reallocation_, as that may alias the existing pointer.
+ * For these, use __realloc_size().
+ */
+#ifdef __alloc_size__
+# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
+# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
+#else
+# define __alloc_size(x, ...) __malloc
+# define __realloc_size(x, ...)
#endif
#ifndef asm_volatile_goto
@@ -212,18 +294,72 @@ struct ftrace_likely_data {
#define asm_inline asm
#endif
-#ifndef __no_fgcse
-# define __no_fgcse
-#endif
-
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+/*
+ * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
+ * non-scalar types unchanged.
+ */
+/*
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ */
+#define __scalar_type_to_expr_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (signed type)0
+
+#define __unqual_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (char)0, \
+ __scalar_type_to_expr_cases(char), \
+ __scalar_type_to_expr_cases(short), \
+ __scalar_type_to_expr_cases(int), \
+ __scalar_type_to_expr_cases(long), \
+ __scalar_type_to_expr_cases(long long), \
+ default: (x)))
+
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+#ifdef __OPTIMIZE__
+# define __compiletime_assert(condition, msg, prefix, suffix) \
+ do { \
+ /* \
+ * __noreturn is needed to give the compiler enough \
+ * information to avoid certain possibly-uninitialized \
+ * warnings (regardless of the build failing). \
+ */ \
+ __noreturn extern void prefix ## suffix(void) \
+ __compiletime_error(msg); \
+ if (!(condition)) \
+ prefix ## suffix(); \
+ } while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+#endif
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+ __compiletime_assert(condition, msg, prefix, suffix)
+
+/**
+ * compiletime_assert - break build and emit msg if condition is false
+ * @condition: a compile-time constant condition to check
+ * @msg: a message to emit if condition is false
+ *
+ * In tradition of POSIX assert, this macro will break the build if the
+ * supplied condition is *false*, emitting the supplied error message if the
+ * compiler has support to do so.
+ */
+#define compiletime_assert(condition, msg) \
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
+
+#define compiletime_assert_atomic_type(t) \
+ compiletime_assert(__native_word(t), \
+ "Need native word sized stores/loads for atomicity.")
+
/* Helpers for emitting diagnostics in pragmas. */
#ifndef __diag
#define __diag(string)
@@ -243,4 +379,8 @@ struct ftrace_likely_data {
#define __diag_error(compiler, version, option, comment) \
__diag_ ## compiler(version, error, option)
+#ifndef __diag_ignore_all
+#define __diag_ignore_all(option, comment)
+#endif
+
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 519e94915d18..62b32b19e0a8 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -9,7 +9,7 @@
* See kernel/sched/completion.c for details.
*/
-#include <linux/wait.h>
+#include <linux/swait.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -25,16 +25,15 @@
*/
struct completion {
unsigned int done;
- wait_queue_head_t wait;
+ struct swait_queue_head wait;
};
-#define init_completion_map(x, m) __init_completion(x)
-#define init_completion(x) __init_completion(x)
+#define init_completion_map(x, m) init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
(*({ init_completion_map(&(work), &(map)); &(work); }))
@@ -82,10 +81,10 @@ static inline void complete_release(struct completion *x) {}
* This inline function will initialize a dynamically created completion
* structure.
*/
-static inline void __init_completion(struct completion *x)
+static inline void init_completion(struct completion *x)
{
x->done = 0;
- init_waitqueue_head(&x->wait);
+ init_swait_queue_head(&x->wait);
}
/**
@@ -104,6 +103,7 @@ extern void wait_for_completion(struct completion *);
extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
+extern int wait_for_completion_state(struct completion *x, unsigned int state);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_io_timeout(struct completion *x,
diff --git a/include/linux/component.h b/include/linux/component.h
index 16de18f473d7..df4aa75c9e7c 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -38,10 +38,10 @@ int component_add_typed(struct device *dev, const struct component_ops *ops,
int subcomponent);
void component_del(struct device *, const struct component_ops *);
-int component_bind_all(struct device *master, void *master_data);
-void component_unbind_all(struct device *master, void *master_data);
+int component_bind_all(struct device *parent, void *data);
+void component_unbind_all(struct device *parent, void *data);
-struct master;
+struct aggregate_device;
/**
* struct component_master_ops - callback for the aggregate driver
@@ -82,6 +82,12 @@ struct component_master_ops {
void (*unbind)(struct device *master);
};
+/* A set helper functions for component compare/release */
+int component_compare_of(struct device *dev, void *data);
+void component_release_of(struct device *dev, void *data);
+int component_compare_dev(struct device *dev, void *data);
+int component_compare_dev_name(struct device *dev, void *data);
+
void component_master_del(struct device *,
const struct component_master_ops *);
@@ -89,22 +95,22 @@ struct component_match;
int component_master_add_with_match(struct device *,
const struct component_master_ops *, struct component_match *);
-void component_match_add_release(struct device *master,
+void component_match_add_release(struct device *parent,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data);
-void component_match_add_typed(struct device *master,
+void component_match_add_typed(struct device *parent,
struct component_match **matchptr,
int (*compare_typed)(struct device *, int, void *), void *compare_data);
/**
* component_match_add - add a component match entry
- * @master: device with the aggregate driver
+ * @parent: device with the aggregate driver
* @matchptr: pointer to the list of component matches
* @compare: compare function to match against all components
* @compare_data: opaque pointer passed to the @compare function
*
- * Adds a new component match to the list stored in @matchptr, which the @master
+ * Adds a new component match to the list stored in @matchptr, which the @parent
* aggregate driver needs to function. The list of component matches pointed to
* by @matchptr must be initialized to NULL before adding the first match. This
* only matches against components added with component_add().
@@ -114,11 +120,11 @@ void component_match_add_typed(struct device *master,
*
* See also component_match_add_release() and component_match_add_typed().
*/
-static inline void component_match_add(struct device *master,
+static inline void component_match_add(struct device *parent,
struct component_match **matchptr,
int (*compare)(struct device *, void *), void *compare_data)
{
- component_match_add_release(master, matchptr, NULL, compare,
+ component_match_add_release(parent, matchptr, NULL, compare,
compare_data);
}
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index fa9490a8874c..97cfd13bae51 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
+/*
* configfs.h - definitions for the device driver filesystem
*
* Based on sysfs:
@@ -13,7 +11,7 @@
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
- * Please read Documentation/filesystems/configfs/configfs.txt before using
+ * Please read Documentation/filesystems/configfs.rst before using
* the configfs interface, ESPECIALLY the parts about reference counts and
* item destructors.
*/
diff --git a/include/linux/connector.h b/include/linux/connector.h
index cb732643471b..487350bb19c3 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -64,14 +64,14 @@ struct cn_dev {
* @callback: connector's callback.
* parameters are %cn_msg and the sender's credentials
*/
-int cn_add_callback(struct cb_id *id, const char *name,
+int cn_add_callback(const struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
/**
* cn_del_callback() - Unregisters new callback with connector core.
*
* @id: unique connector's user identifier.
*/
-void cn_del_callback(struct cb_id *id);
+void cn_del_callback(const struct cb_id *id);
/**
@@ -99,7 +99,7 @@ void cn_del_callback(struct cb_id *id);
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
/**
- * cn_netlink_send_mult - Sends message to the specified groups.
+ * cn_netlink_send - Sends message to the specified groups.
*
* @msg: message header(with attached data).
* @portid: destination port.
@@ -122,14 +122,14 @@ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp
int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
- struct cb_id *id,
+ const struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
-void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id);
void cn_queue_release_callback(struct cn_callback_entry *);
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
-int cn_cb_equal(struct cb_id *, struct cb_id *);
+int cn_cb_equal(const struct cb_id *, const struct cb_id *);
#endif /* __CONNECTOR_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index f33016b3a401..8c1686e2c233 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -24,17 +24,13 @@ struct module;
struct tty_struct;
struct notifier_block;
-/*
- * this is what the terminal answers to a ESC-Z or csi0c query.
- */
-#define VT100ID "\033[?1;2c"
-#define VT102ID "\033[?6c"
-
enum con_scroll {
SM_UP,
SM_DOWN,
};
+enum vc_intensity;
+
/**
* struct consw - callbacks for consoles
*
@@ -66,7 +62,6 @@ struct consw {
int (*con_font_get)(struct vc_data *vc, struct console_font *font);
int (*con_font_default)(struct vc_data *vc,
struct console_font *font, char *name);
- int (*con_font_copy)(struct vc_data *vc, int con);
int (*con_resize)(struct vc_data *vc, unsigned int width,
unsigned int height, unsigned int user);
void (*con_set_palette)(struct vc_data *vc,
@@ -74,10 +69,11 @@ struct consw {
void (*con_scrolldelta)(struct vc_data *vc, int lines);
int (*con_set_origin)(struct vc_data *vc);
void (*con_save_screen)(struct vc_data *vc);
- u8 (*con_build_attr)(struct vc_data *vc, u8 color, u8 intensity,
- u8 blink, u8 underline, u8 reverse, u8 italic);
+ u8 (*con_build_attr)(struct vc_data *vc, u8 color,
+ enum vc_intensity intensity,
+ bool blink, bool underline, bool reverse, bool italic);
void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
- u16 *(*con_screen_pos)(struct vc_data *vc, int offset);
+ u16 *(*con_screen_pos)(const struct vc_data *vc, int offset);
unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position,
int *px, int *py);
/*
@@ -134,7 +130,7 @@ static inline int con_debug_leave(void)
*/
#define CON_PRINTBUFFER (1)
-#define CON_CONSDEV (2) /* Last on the command line */
+#define CON_CONSDEV (2) /* Preferred console, /dev/console */
#define CON_ENABLED (4)
#define CON_BOOT (8)
#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
@@ -148,10 +144,15 @@ struct console {
struct tty_driver *(*device)(struct console *, int *);
void (*unblank)(void);
int (*setup)(struct console *, char *);
+ int (*exit)(struct console *);
int (*match)(struct console *, char *name, int idx, char *options);
short flags;
short index;
int cflag;
+ uint ispeed;
+ uint ospeed;
+ u64 seq;
+ unsigned long dropped;
void *data;
struct console *next;
};
@@ -220,12 +221,6 @@ extern atomic_t ignore_console_lock_warning;
#define VESA_HSYNC_SUSPEND 2
#define VESA_POWERDOWN 3
-#ifdef CONFIG_VGA_CONSOLE
-extern bool vgacon_text_force(void);
-#else
-static inline bool vgacon_text_force(void) { return false; }
-#endif
-
extern void console_init(void);
/* For deferred console takeover */
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index 24d4c16e3ae0..1518568aaf0f 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -17,10 +17,47 @@
#include <linux/vt.h>
#include <linux/workqueue.h>
-struct uni_pagedir;
+struct uni_pagedict;
struct uni_screen;
#define NPAR 16
+#define VC_TABSTOPS_COUNT 256U
+
+enum vc_intensity {
+ VCI_HALF_BRIGHT,
+ VCI_NORMAL,
+ VCI_BOLD,
+ VCI_MASK = 0x3,
+};
+
+/**
+ * struct vc_state -- state of a VC
+ * @x: cursor's x-position
+ * @y: cursor's y-position
+ * @color: foreground & background colors
+ * @Gx_charset: what's G0/G1 slot set to (like GRAF_MAP, LAT1_MAP)
+ * @charset: what character set to use (0=G0 or 1=G1)
+ * @intensity: see enum vc_intensity for values
+ * @reverse: reversed foreground/background colors
+ *
+ * These members are defined separately from struct vc_data as we save &
+ * restore them at times.
+ */
+struct vc_state {
+ unsigned int x, y;
+
+ unsigned char color;
+
+ unsigned char Gx_charset[2];
+ unsigned int charset : 1;
+
+ /* attribute flags */
+ enum vc_intensity intensity;
+ bool italic;
+ bool underline;
+ bool blink;
+ bool reverse;
+};
/*
* Example: vc_data of a console that was scrolled 3 lines down.
@@ -57,11 +94,14 @@ struct uni_screen;
struct vc_data {
struct tty_port port; /* Upper level data */
+ struct vc_state state, saved_state;
+
unsigned short vc_num; /* Console number */
unsigned int vc_cols; /* [#] Console size */
unsigned int vc_rows;
unsigned int vc_size_row; /* Bytes per row */
unsigned int vc_scan_lines; /* # of scan lines */
+ unsigned int vc_cell_height; /* CRTC character cell height */
unsigned long vc_origin; /* [!] Start of real screen */
unsigned long vc_scr_end; /* [!] End of real screen */
unsigned long vc_visible_origin; /* [!] Top of visible window */
@@ -73,8 +113,6 @@ struct vc_data {
/* attributes for all characters on screen */
unsigned char vc_attr; /* Current attributes */
unsigned char vc_def_color; /* Default colors */
- unsigned char vc_color; /* Foreground & background */
- unsigned char vc_s_color; /* Saved foreground & background */
unsigned char vc_ulcolor; /* Color for underline mode */
unsigned char vc_itcolor;
unsigned char vc_halfcolor; /* Color for half intensity mode */
@@ -82,8 +120,6 @@ struct vc_data {
unsigned int vc_cursor_type;
unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */
unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */
- unsigned int vc_x, vc_y; /* Cursor position */
- unsigned int vc_saved_x, vc_saved_y;
unsigned long vc_pos; /* Cursor address */
/* fonts */
unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
@@ -98,8 +134,6 @@ struct vc_data {
int vt_newvt;
wait_queue_head_t paste_wait;
/* mode flags */
- unsigned int vc_charset : 1; /* Character set G0 / G1 */
- unsigned int vc_s_charset : 1; /* Saved character set */
unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */
unsigned int vc_toggle_meta : 1; /* Toggle high bit? */
unsigned int vc_decscnm : 1; /* Screen Mode */
@@ -107,17 +141,6 @@ struct vc_data {
unsigned int vc_decawm : 1; /* Autowrap Mode */
unsigned int vc_deccm : 1; /* Cursor Visible */
unsigned int vc_decim : 1; /* Insert Mode */
- /* attribute flags */
- unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */
- unsigned int vc_italic:1;
- unsigned int vc_underline : 1;
- unsigned int vc_blink : 1;
- unsigned int vc_reverse : 1;
- unsigned int vc_s_intensity : 2; /* saved rendition */
- unsigned int vc_s_italic:1;
- unsigned int vc_s_underline : 1;
- unsigned int vc_s_blink : 1;
- unsigned int vc_s_reverse : 1;
/* misc */
unsigned int vc_priv : 3;
unsigned int vc_need_wrap : 1;
@@ -126,20 +149,16 @@ struct vc_data {
unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
unsigned char vc_utf_count;
int vc_utf_char;
- unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */
+ DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */
unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */
unsigned short * vc_translate;
- unsigned char vc_G0_charset;
- unsigned char vc_G1_charset;
- unsigned char vc_saved_G0;
- unsigned char vc_saved_G1;
unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
unsigned short vc_cur_blink_ms; /* Cursor blink duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
- struct uni_pagedir *vc_uni_pagedir;
- struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ struct uni_pagedict *uni_pagedict;
+ struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */
struct uni_screen *vc_uni_screen; /* unicode screen content */
/* additional information is in vt_kern.h */
};
@@ -149,24 +168,29 @@ struct vc {
struct work_struct SAK_work;
/* might add scrmem, kbd at some time,
- to have everything in one place - the disadvantage
- would be that vc_cons etc can no longer be static */
+ to have everything in one place */
};
extern struct vc vc_cons [MAX_NR_CONSOLES];
extern void vc_SAK(struct work_struct *work);
-#define CUR_DEF 0
-#define CUR_NONE 1
-#define CUR_UNDERLINE 2
-#define CUR_LOWER_THIRD 3
-#define CUR_LOWER_HALF 4
-#define CUR_TWO_THIRDS 5
-#define CUR_BLOCK 6
-#define CUR_HWMASK 0x0f
-#define CUR_SWMASK 0xfff0
-
-#define CUR_DEFAULT CUR_UNDERLINE
+#define CUR_MAKE(size, change, set) ((size) | ((change) << 8) | \
+ ((set) << 16))
+#define CUR_SIZE(c) ((c) & 0x00000f)
+# define CUR_DEF 0
+# define CUR_NONE 1
+# define CUR_UNDERLINE 2
+# define CUR_LOWER_THIRD 3
+# define CUR_LOWER_HALF 4
+# define CUR_TWO_THIRDS 5
+# define CUR_BLOCK 6
+#define CUR_SW 0x000010
+#define CUR_ALWAYS_BG 0x000020
+#define CUR_INVERT_FG_BG 0x000040
+#define CUR_FG 0x000700
+#define CUR_BG 0x007000
+#define CUR_CHANGE(c) ((c) & 0x00ff00)
+#define CUR_SET(c) (((c) & 0xff0000) >> 8)
bool con_is_visible(const struct vc_data *vc);
diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h
index 254246673390..c35db4896c37 100644
--- a/include/linux/consolemap.h
+++ b/include/linux/consolemap.h
@@ -7,29 +7,56 @@
#ifndef __LINUX_CONSOLEMAP_H__
#define __LINUX_CONSOLEMAP_H__
-#define LAT1_MAP 0
-#define GRAF_MAP 1
-#define IBMPC_MAP 2
-#define USER_MAP 3
+enum translation_map {
+ LAT1_MAP,
+ GRAF_MAP,
+ IBMPC_MAP,
+ USER_MAP,
+
+ FIRST_MAP = LAT1_MAP,
+ LAST_MAP = USER_MAP,
+};
#include <linux/types.h>
-#ifdef CONFIG_CONSOLE_TRANSLATIONS
struct vc_data;
-extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode);
-extern unsigned short *set_translate(int m, struct vc_data *vc);
-extern int conv_uni_to_pc(struct vc_data *conp, long ucs);
-extern u32 conv_8bit_to_uni(unsigned char c);
-extern int conv_uni_to_8bit(u32 uni);
+#ifdef CONFIG_CONSOLE_TRANSLATIONS
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode);
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc);
+int conv_uni_to_pc(struct vc_data *conp, long ucs);
+u32 conv_8bit_to_uni(unsigned char c);
+int conv_uni_to_8bit(u32 uni);
void console_map_init(void);
#else
-#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph)
-#define set_translate(m, vc) ((unsigned short *)NULL)
-#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs))
-#define conv_8bit_to_uni(c) ((uint32_t)(c))
-#define conv_uni_to_8bit(c) ((int) ((c) & 0xff))
-#define console_map_init(c) do { ; } while (0)
+static inline u16 inverse_translate(const struct vc_data *conp, u16 glyph,
+ bool use_unicode)
+{
+ return glyph;
+}
+
+static inline unsigned short *set_translate(enum translation_map m,
+ struct vc_data *vc)
+{
+ return NULL;
+}
+
+static inline int conv_uni_to_pc(struct vc_data *conp, long ucs)
+{
+ return ucs > 0xff ? -1 : ucs;
+}
+
+static inline u32 conv_8bit_to_uni(unsigned char c)
+{
+ return c;
+}
+
+static inline int conv_uni_to_8bit(u32 uni)
+{
+ return uni & 0xff;
+}
+
+static inline void console_map_init(void) { }
#endif /* CONFIG_CONSOLE_TRANSLATIONS */
#endif /* __LINUX_CONSOLEMAP_H__ */
diff --git a/include/linux/const.h b/include/linux/const.h
index 7b55a55f5911..435ddd72d2c4 100644
--- a/include/linux/const.h
+++ b/include/linux/const.h
@@ -1,9 +1,14 @@
#ifndef _LINUX_CONST_H
#define _LINUX_CONST_H
-#include <uapi/linux/const.h>
+#include <vdso/const.h>
-#define UL(x) (_UL(x))
-#define ULL(x) (_ULL(x))
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
#endif /* _LINUX_CONST_H */
diff --git a/include/linux/container_of.h b/include/linux/container_of.h
new file mode 100644
index 000000000000..2f4944b791b8
--- /dev/null
+++ b/include/linux/container_of.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTAINER_OF_H
+#define _LINUX_CONTAINER_OF_H
+
+#include <linux/build_bug.h>
+#include <linux/err.h>
+
+#define typeof_member(T, m) typeof(((T*)0)->m)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ static_assert(__same_type(*(ptr), ((type *)0)->member) || \
+ __same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type *)(__mptr - offsetof(type, member))); })
+
+/**
+ * container_of_safe - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged.
+ */
+#define container_of_safe(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ static_assert(__same_type(*(ptr), ((type *)0)->member) || \
+ __same_type(*(ptr), void), \
+ "pointer type mismatch in container_of_safe()"); \
+ IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
+ ((type *)(__mptr - offsetof(type, member))); })
+
+#endif /* _LINUX_CONTAINER_OF_H */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 8150f5ac176c..dcef4a9e4d63 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -5,162 +5,134 @@
#include <linux/sched.h>
#include <linux/vtime.h>
#include <linux/context_tracking_state.h>
+#include <linux/instrumentation.h>
+
#include <asm/ptrace.h>
-#ifdef CONFIG_CONTEXT_TRACKING
-extern void context_tracking_cpu_set(int cpu);
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern void ct_cpu_track_user(int cpu);
/* Called with interrupts disabled. */
-extern void __context_tracking_enter(enum ctx_state state);
-extern void __context_tracking_exit(enum ctx_state state);
+extern void __ct_user_enter(enum ctx_state state);
+extern void __ct_user_exit(enum ctx_state state);
+
+extern void ct_user_enter(enum ctx_state state);
+extern void ct_user_exit(enum ctx_state state);
-extern void context_tracking_enter(enum ctx_state state);
-extern void context_tracking_exit(enum ctx_state state);
-extern void context_tracking_user_enter(void);
-extern void context_tracking_user_exit(void);
+extern void user_enter_callable(void);
+extern void user_exit_callable(void);
static inline void user_enter(void)
{
if (context_tracking_enabled())
- context_tracking_enter(CONTEXT_USER);
+ ct_user_enter(CONTEXT_USER);
}
static inline void user_exit(void)
{
if (context_tracking_enabled())
- context_tracking_exit(CONTEXT_USER);
+ ct_user_exit(CONTEXT_USER);
}
/* Called with interrupts disabled. */
-static inline void user_enter_irqoff(void)
+static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
- __context_tracking_enter(CONTEXT_USER);
+ __ct_user_enter(CONTEXT_USER);
}
-static inline void user_exit_irqoff(void)
+static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
- __context_tracking_exit(CONTEXT_USER);
+ __ct_user_exit(CONTEXT_USER);
}
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!context_tracking_enabled())
+ if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
+ !context_tracking_enabled())
return 0;
- prev_ctx = this_cpu_read(context_tracking.state);
+ prev_ctx = __ct_state();
if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_exit(prev_ctx);
+ ct_user_exit(prev_ctx);
return prev_ctx;
}
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (context_tracking_enabled()) {
+ if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
+ context_tracking_enabled()) {
if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_enter(prev_ctx);
+ ct_user_enter(prev_ctx);
}
}
+static __always_inline bool context_tracking_guest_enter(void)
+{
+ if (context_tracking_enabled())
+ __ct_user_enter(CONTEXT_GUEST);
+
+ return context_tracking_enabled_this_cpu();
+}
-/**
- * ct_state() - return the current context tracking state if known
- *
- * Returns the current cpu's context tracking state if context tracking
- * is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
- */
-static inline enum ctx_state ct_state(void)
+static __always_inline void context_tracking_guest_exit(void)
{
- return context_tracking_enabled() ?
- this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
+ if (context_tracking_enabled())
+ __ct_user_exit(CONTEXT_GUEST);
}
+
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
+
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline void user_enter_irqoff(void) { }
static inline void user_exit_irqoff(void) { }
-static inline enum ctx_state exception_enter(void) { return 0; }
+static inline int exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
-#endif /* !CONFIG_CONTEXT_TRACKING */
+static inline int ct_state(void) { return -1; }
+static __always_inline bool context_tracking_guest_enter(void) { return false; }
+static inline void context_tracking_guest_exit(void) { }
+#define CT_WARN_ON(cond) do { } while (0)
+#endif /* !CONFIG_CONTEXT_TRACKING_USER */
-#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
-
-#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
extern void context_tracking_init(void);
#else
static inline void context_tracking_init(void) { }
-#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
+#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+extern void ct_idle_enter(void);
+extern void ct_idle_exit(void);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-/* must be called with irqs disabled */
-static inline void guest_enter_irqoff(void)
+/*
+ * Is the current CPU in an extended quiescent state?
+ *
+ * No ordering, as we are sampling CPU-local information.
+ */
+static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{
- if (vtime_accounting_enabled_this_cpu())
- vtime_guest_enter(current);
- else
- current->flags |= PF_VCPU;
-
- if (context_tracking_enabled())
- __context_tracking_enter(CONTEXT_GUEST);
-
- /* KVM does not hold any references to rcu protected data when it
- * switches CPU into a guest mode. In fact switching to a guest mode
- * is very similar to exiting to userspace from rcu point of view. In
- * addition CPU may stay in a guest mode for quite a long time (up to
- * one time slice). Lets treat guest mode as quiescent state, just like
- * we do with user-mode execution.
- */
- if (!context_tracking_enabled_this_cpu())
- rcu_virt_note_context_switch(smp_processor_id());
+ return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
}
-static inline void guest_exit_irqoff(void)
+/*
+ * Increment the current CPU's context_tracking structure's ->state field
+ * with ordering. Return the new value.
+ */
+static __always_inline unsigned long ct_state_inc(int incby)
{
- if (context_tracking_enabled())
- __context_tracking_exit(CONTEXT_GUEST);
-
- if (vtime_accounting_enabled_this_cpu())
- vtime_guest_exit(current);
- else
- current->flags &= ~PF_VCPU;
+ return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
}
#else
-static inline void guest_enter_irqoff(void)
-{
- /*
- * This is running in ioctl context so its safe
- * to assume that it's the stime pending cputime
- * to flush.
- */
- vtime_account_kernel(current);
- current->flags |= PF_VCPU;
- rcu_virt_note_context_switch(smp_processor_id());
-}
-
-static inline void guest_exit_irqoff(void)
-{
- /* Flush the guest cputime we spent on the guest */
- vtime_account_kernel(current);
- current->flags &= ~PF_VCPU;
-}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-
-static inline void guest_exit(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- guest_exit_irqoff();
- local_irq_restore(flags);
-}
+static inline void ct_idle_enter(void) { }
+static inline void ct_idle_exit(void) { }
+#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
#endif
diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h
new file mode 100644
index 000000000000..c50b5670c4a5
--- /dev/null
+++ b/include/linux/context_tracking_irq.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTEXT_TRACKING_IRQ_H
+#define _LINUX_CONTEXT_TRACKING_IRQ_H
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+void ct_irq_enter(void);
+void ct_irq_exit(void);
+void ct_irq_enter_irqson(void);
+void ct_irq_exit_irqson(void);
+void ct_nmi_enter(void);
+void ct_nmi_exit(void);
+#else
+static inline void ct_irq_enter(void) { }
+static inline void ct_irq_exit(void) { }
+static inline void ct_irq_enter_irqson(void) { }
+static inline void ct_irq_exit_irqson(void) { }
+static inline void ct_nmi_enter(void) { }
+static inline void ct_nmi_exit(void) { }
+#endif
+
+#endif
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index e7fe6678b7ad..4a4d56f77180 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -4,8 +4,28 @@
#include <linux/percpu.h>
#include <linux/static_key.h>
+#include <linux/context_tracking_irq.h>
+
+/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
+#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+
+enum ctx_state {
+ CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
+ CONTEXT_KERNEL = 0,
+ CONTEXT_IDLE = 1,
+ CONTEXT_USER = 2,
+ CONTEXT_GUEST = 3,
+ CONTEXT_MAX = 4,
+};
+
+/* Even value for idle, else odd. */
+#define RCU_DYNTICKS_IDX CONTEXT_MAX
+
+#define CT_STATE_MASK (CONTEXT_MAX - 1)
+#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
struct context_tracking {
+#ifdef CONFIG_CONTEXT_TRACKING_USER
/*
* When active is false, probes are unset in order
* to minimize overhead: TIF flags are cleared
@@ -14,24 +34,79 @@ struct context_tracking {
*/
bool active;
int recursion;
- enum ctx_state {
- CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
- CONTEXT_KERNEL = 0,
- CONTEXT_USER,
- CONTEXT_GUEST,
- } state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING
+ atomic_t state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+ long dynticks_nesting; /* Track process nesting level. */
+ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
+#endif
};
#ifdef CONFIG_CONTEXT_TRACKING
-extern struct static_key_false context_tracking_key;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-static inline bool context_tracking_enabled(void)
+static __always_inline int __ct_state(void)
+{
+ return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
+}
+#endif
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+static __always_inline int ct_dynticks(void)
+{
+ return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
+}
+
+static __always_inline int ct_dynticks_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
+}
+
+static __always_inline int ct_dynticks_cpu_acquire(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
+}
+
+static __always_inline long ct_dynticks_nesting(void)
+{
+ return __this_cpu_read(context_tracking.dynticks_nesting);
+}
+
+static __always_inline long ct_dynticks_nesting_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->dynticks_nesting;
+}
+
+static __always_inline long ct_dynticks_nmi_nesting(void)
+{
+ return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
+}
+
+static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->dynticks_nmi_nesting;
+}
+#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern struct static_key_false context_tracking_key;
+
+static __always_inline bool context_tracking_enabled(void)
{
return static_branch_unlikely(&context_tracking_key);
}
-static inline bool context_tracking_enabled_cpu(int cpu)
+static __always_inline bool context_tracking_enabled_cpu(int cpu)
{
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
}
@@ -41,15 +116,31 @@ static inline bool context_tracking_enabled_this_cpu(void)
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
-static inline bool context_tracking_in_user(void)
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled. If context tracking is disabled, returns
+ * CONTEXT_DISABLED. This should be used primarily for debugging.
+ */
+static __always_inline int ct_state(void)
{
- return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
+ int ret;
+
+ if (!context_tracking_enabled())
+ return CONTEXT_DISABLED;
+
+ preempt_disable();
+ ret = __ct_state();
+ preempt_enable();
+
+ return ret;
}
+
#else
-static inline bool context_tracking_in_user(void) { return false; }
-static inline bool context_tracking_enabled(void) { return false; }
-static inline bool context_tracking_enabled_cpu(int cpu) { return false; }
-static inline bool context_tracking_enabled_this_cpu(void) { return false; }
-#endif /* CONFIG_CONTEXT_TRACKING */
+static __always_inline bool context_tracking_enabled(void) { return false; }
+static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
+static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
+#endif /* CONFIG_CONTEXT_TRACKING_USER */
#endif
diff --git a/include/linux/cookie.h b/include/linux/cookie.h
new file mode 100644
index 000000000000..0c159f585109
--- /dev/null
+++ b/include/linux/cookie.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COOKIE_H
+#define __LINUX_COOKIE_H
+
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <asm/local.h>
+
+struct pcpu_gen_cookie {
+ local_t nesting;
+ u64 last;
+} __aligned(16);
+
+struct gen_cookie {
+ struct pcpu_gen_cookie __percpu *local;
+ atomic64_t forward_last ____cacheline_aligned_in_smp;
+ atomic64_t reverse_last;
+};
+
+#define COOKIE_LOCAL_BATCH 4096
+
+#define DEFINE_COOKIE(name) \
+ static DEFINE_PER_CPU(struct pcpu_gen_cookie, __##name); \
+ static struct gen_cookie name = { \
+ .local = &__##name, \
+ .forward_last = ATOMIC64_INIT(0), \
+ .reverse_last = ATOMIC64_INIT(0), \
+ }
+
+static __always_inline u64 gen_cookie_next(struct gen_cookie *gc)
+{
+ struct pcpu_gen_cookie *local = this_cpu_ptr(gc->local);
+ u64 val;
+
+ if (likely(local_inc_return(&local->nesting) == 1)) {
+ val = local->last;
+ if (__is_defined(CONFIG_SMP) &&
+ unlikely((val & (COOKIE_LOCAL_BATCH - 1)) == 0)) {
+ s64 next = atomic64_add_return(COOKIE_LOCAL_BATCH,
+ &gc->forward_last);
+ val = next - COOKIE_LOCAL_BATCH;
+ }
+ local->last = ++val;
+ } else {
+ val = atomic64_dec_return(&gc->reverse_last);
+ }
+ local_dec(&local->nesting);
+ return val;
+}
+
+#endif /* __LINUX_COOKIE_H */
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index abf4b4e65dbb..08a1d3e7e46d 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -7,19 +7,48 @@
#include <linux/fs.h>
#include <asm/siginfo.h>
+#ifdef CONFIG_COREDUMP
+struct core_vma_metadata {
+ unsigned long start, end;
+ unsigned long flags;
+ unsigned long dump_size;
+ unsigned long pgoff;
+ struct file *file;
+};
+
+struct coredump_params {
+ const kernel_siginfo_t *siginfo;
+ struct pt_regs *regs;
+ struct file *file;
+ unsigned long limit;
+ unsigned long mm_flags;
+ loff_t written;
+ loff_t pos;
+ loff_t to_skip;
+ int vma_count;
+ size_t vma_data_size;
+ struct core_vma_metadata *vma_meta;
+};
+
/*
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
*/
-struct coredump_params;
-extern int dump_skip(struct coredump_params *cprm, size_t nr);
+extern void dump_skip_to(struct coredump_params *cprm, unsigned long to);
+extern void dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
-extern void dump_truncate(struct coredump_params *cprm);
-#ifdef CONFIG_COREDUMP
+int dump_user_range(struct coredump_params *cprm, unsigned long start,
+ unsigned long len);
extern void do_coredump(const kernel_siginfo_t *siginfo);
#else
static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
#endif
+#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
+extern void validate_coredump_safety(void);
+#else
+static inline void validate_coredump_safety(void) {}
+#endif
+
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index b0e35eec6499..6c2fd6cc5a98 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -10,17 +10,29 @@
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
#define CORESIGHT_ETM_PMU_SEED 0x10
-/* ETMv3.5/PTM's ETMCR config bit */
-#define ETM_OPT_CYCACC 12
-#define ETM_OPT_CTXTID 14
-#define ETM_OPT_TS 28
-#define ETM_OPT_RETSTK 29
+/*
+ * Below are the definition of bit offsets for perf option, and works as
+ * arbitrary values for all ETM versions.
+ *
+ * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
+ * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
+ * directly use below macros as config bits.
+ */
+#define ETM_OPT_BRANCH_BROADCAST 8
+#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
+#define ETM_OPT_CTXTID2 15
+#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_BB 3
#define ETM4_CFG_BIT_CYCACC 4
#define ETM4_CFG_BIT_CTXTID 6
+#define ETM4_CFG_BIT_VMID 7
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
+#define ETM4_CFG_BIT_VMID_OPT 15
static inline int coresight_get_trace_id(int cpu)
{
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 44e552de419c..1554021231f9 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -7,6 +7,7 @@
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/perf_event.h>
#include <linux/sched.h>
@@ -35,39 +36,43 @@
extern struct bus_type coresight_bustype;
enum coresight_dev_type {
- CORESIGHT_DEV_TYPE_NONE,
CORESIGHT_DEV_TYPE_SINK,
CORESIGHT_DEV_TYPE_LINK,
CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
CORESIGHT_DEV_TYPE_HELPER,
+ CORESIGHT_DEV_TYPE_ECT,
};
enum coresight_dev_subtype_sink {
- CORESIGHT_DEV_SUBTYPE_SINK_NONE,
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+ CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
+ CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM,
};
enum coresight_dev_subtype_link {
- CORESIGHT_DEV_SUBTYPE_LINK_NONE,
CORESIGHT_DEV_SUBTYPE_LINK_MERG,
CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
};
enum coresight_dev_subtype_source {
- CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
};
enum coresight_dev_subtype_helper {
- CORESIGHT_DEV_SUBTYPE_HELPER_NONE,
CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
};
+/* Embedded Cross Trigger (ECT) sub-types */
+enum coresight_dev_subtype_ect {
+ CORESIGHT_DEV_SUBTYPE_ECT_NONE,
+ CORESIGHT_DEV_SUBTYPE_ECT_CTI,
+};
+
/**
* union coresight_dev_subtype - further characterisation of a type
* @sink_subtype: type of sink this component is, as defined
@@ -78,6 +83,8 @@ enum coresight_dev_subtype_helper {
* by @coresight_dev_subtype_source.
* @helper_subtype: type of helper this component is, as defined
* by @coresight_dev_subtype_helper.
+ * @ect_subtype: type of cross trigger this component is, as
+ * defined by @coresight_dev_subtype_ect
*/
union coresight_dev_subtype {
/* We have some devices which acts as LINK and SINK */
@@ -87,13 +94,16 @@ union coresight_dev_subtype {
};
enum coresight_dev_subtype_source source_subtype;
enum coresight_dev_subtype_helper helper_subtype;
+ enum coresight_dev_subtype_ect ect_subtype;
};
/**
- * struct coresight_platform_data - data harvested from the DT specification
- * @nr_inport: number of input ports for this component.
- * @nr_outport: number of output ports for this component.
- * @conns: Array of nr_outport connections from this component
+ * struct coresight_platform_data - data harvested from the firmware
+ * specification.
+ *
+ * @nr_inport: Number of elements for the input connections.
+ * @nr_outport: Number of elements for the output connections.
+ * @conns: Sparse array of nr_outport connections from this component.
*/
struct coresight_platform_data {
int nr_inport;
@@ -102,6 +112,32 @@ struct coresight_platform_data {
};
/**
+ * struct csdev_access - Abstraction of a CoreSight device access.
+ *
+ * @io_mem : True if the device has memory mapped I/O
+ * @base : When io_mem == true, base address of the component
+ * @read : Read from the given "offset" of the given instance.
+ * @write : Write "val" to the given "offset".
+ */
+struct csdev_access {
+ bool io_mem;
+ union {
+ void __iomem *base;
+ struct {
+ u64 (*read)(u32 offset, bool relaxed, bool _64bit);
+ void (*write)(u64 val, u32 offset, bool relaxed,
+ bool _64bit);
+ };
+ };
+};
+
+#define CSDEV_ACCESS_IOMEM(_addr) \
+ ((struct csdev_access) { \
+ .io_mem = true, \
+ .base = (_addr), \
+ })
+
+/**
* struct coresight_desc - description of a component required from drivers
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
@@ -112,6 +148,7 @@ struct coresight_platform_data {
* @groups: operations specific to this component. These will end up
* in the component's sysfs sub-directory.
* @name: name for the coresight device, also shown under sysfs.
+ * @access: Describe access to the device
*/
struct coresight_desc {
enum coresight_dev_type type;
@@ -121,6 +158,7 @@ struct coresight_desc {
struct device *dev;
const struct attribute_group **groups;
const char *name;
+ struct csdev_access access;
};
/**
@@ -130,12 +168,28 @@ struct coresight_desc {
* @chid_fwnode: remote component's fwnode handle.
* @child_dev: a @coresight_device representation of the component
connected to @outport.
+ * @link: Representation of the connection as a sysfs link.
*/
struct coresight_connection {
int outport;
int child_port;
struct fwnode_handle *child_fwnode;
struct coresight_device *child_dev;
+ struct coresight_sysfs_link *link;
+};
+
+/**
+ * struct coresight_sysfs_link - representation of a connection in sysfs.
+ * @orig: Originating (master) coresight device for the link.
+ * @orig_name: Name to use for the link orig->target.
+ * @target: Target (slave) coresight device for the link.
+ * @target_name: Name to use for the link target->orig.
+ */
+struct coresight_sysfs_link {
+ struct coresight_device *orig;
+ const char *orig_name;
+ struct coresight_device *target;
+ const char *target_name;
};
/**
@@ -144,21 +198,34 @@ struct coresight_connection {
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
* @ops: generic operations for this component, as defined
- by @coresight_ops.
+ * by @coresight_ops.
+ * @access: Device i/o access abstraction for this device.
* @dev: The device entity associated to this component.
* @refcnt: keep track of what is in use.
* @orphan: true if the component has connections that haven't been linked.
* @enable: 'true' if component is currently part of an active path.
* @activated: 'true' only if a _sink_ has been activated. A sink can be
* activated but not yet enabled. Enabling for a _sink_
- * appens when a source has been selected for that it.
+ * happens when a source has been selected and a path is enabled
+ * from source to that sink.
* @ea: Device attribute for sink representation under PMU directory.
+ * @def_sink: cached reference to default sink found for this device.
+ * @ect_dev: Associated cross trigger device. Not part of the trace data
+ * path or connections.
+ * @nr_links: number of sysfs links created to other components from this
+ * device. These will appear in the "connections" group.
+ * @has_conns_grp: Have added a "connections" group for sysfs links.
+ * @feature_csdev_list: List of complex feature programming added to the device.
+ * @config_csdev_list: List of system configurations added to the device.
+ * @cscfg_csdev_lock: Protect the lists of configurations and features.
+ * @active_cscfg_ctxt: Context information for current active system configuration.
*/
struct coresight_device {
struct coresight_platform_data *pdata;
enum coresight_dev_type type;
union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
+ struct csdev_access access;
struct device dev;
atomic_t *refcnt;
bool orphan;
@@ -166,6 +233,18 @@ struct coresight_device {
/* sink specific fields */
bool activated; /* true only if a sink is part of a path */
struct dev_ext_attribute *ea;
+ struct coresight_device *def_sink;
+ /* cross trigger handling */
+ struct coresight_device *ect_dev;
+ /* sysfs links between components */
+ int nr_links;
+ bool has_conns_grp;
+ bool ect_enabled; /* true only if associated ect device is enabled */
+ /* system configuration and feature lists */
+ struct list_head feature_csdev_list;
+ struct list_head config_csdev_list;
+ spinlock_t cscfg_csdev_lock;
+ void *active_cscfg_ctxt;
};
/*
@@ -196,6 +275,7 @@ static struct coresight_dev_list (var) = { \
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
#define helper_ops(csdev) csdev->ops->helper_ops
+#define ect_ops(csdev) csdev->ops->ect_ops
/**
* struct coresight_ops_sink - basic operations for a sink
@@ -262,31 +342,188 @@ struct coresight_ops_helper {
int (*disable)(struct coresight_device *csdev, void *data);
};
+/**
+ * struct coresight_ops_ect - Ops for an embedded cross trigger device
+ *
+ * @enable : Enable the device
+ * @disable : Disable the device
+ */
+struct coresight_ops_ect {
+ int (*enable)(struct coresight_device *csdev);
+ int (*disable)(struct coresight_device *csdev);
+};
+
struct coresight_ops {
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
const struct coresight_ops_helper *helper_ops;
+ const struct coresight_ops_ect *ect_ops;
};
-#ifdef CONFIG_CORESIGHT
+#if IS_ENABLED(CONFIG_CORESIGHT)
+
+static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, false);
+}
+
+static inline u64 csdev_access_relaxed_read_pair(struct csdev_access *csa,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ return readl_relaxed(csa->base + lo_offset) |
+ ((u64)readl_relaxed(csa->base + hi_offset) << 32);
+ }
+
+ return csa->read(lo_offset, true, false) | (csa->read(hi_offset, true, false) << 32);
+}
+
+static inline void csdev_access_relaxed_write_pair(struct csdev_access *csa, u64 val,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ writel_relaxed((u32)val, csa->base + lo_offset);
+ writel_relaxed((u32)(val >> 32), csa->base + hi_offset);
+ } else {
+ csa->write((u32)val, lo_offset, true, false);
+ csa->write((u32)(val >> 32), hi_offset, true, false);
+ }
+}
+
+static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl(csa->base + offset);
+
+ return csa->read(offset, false, false);
+}
+
+static inline void csdev_access_relaxed_write32(struct csdev_access *csa,
+ u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, false);
+}
+
+static inline void csdev_access_write32(struct csdev_access *csa, u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, false);
+}
+
+#ifdef CONFIG_64BIT
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, true);
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq(csa->base + offset);
+
+ return csa->read(offset, false, true);
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, true);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, true);
+}
+
+#else /* !CONFIG_64BIT */
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+#endif /* CONFIG_64BIT */
+
+static inline bool coresight_is_percpu_source(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC);
+}
+
+static inline bool coresight_is_percpu_sink(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SINK) &&
+ (csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM);
+}
+
extern struct coresight_device *
coresight_register(struct coresight_desc *desc);
extern void coresight_unregister(struct coresight_device *csdev);
extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
-extern int coresight_timeout(void __iomem *addr, u32 offset,
+extern int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value);
-extern int coresight_claim_device(void __iomem *base);
-extern int coresight_claim_device_unlocked(void __iomem *base);
+extern int coresight_claim_device(struct coresight_device *csdev);
+extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
-extern void coresight_disclaim_device(void __iomem *base);
-extern void coresight_disclaim_device_unlocked(void __iomem *base);
+extern void coresight_disclaim_device(struct coresight_device *csdev);
+extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
struct device *dev);
extern bool coresight_loses_context_with_cpu(struct device *dev);
+
+u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset);
+u32 coresight_read32(struct coresight_device *csdev, u32 offset);
+void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset);
+void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset);
+u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset);
+u64 coresight_read64(struct coresight_device *csdev, u32 offset);
+void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset);
+void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
+
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -294,29 +531,78 @@ static inline void coresight_unregister(struct coresight_device *csdev) {}
static inline int
coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
-static inline int coresight_timeout(void __iomem *addr, u32 offset,
- int position, int value) { return 1; }
-static inline int coresight_claim_device_unlocked(void __iomem *base)
+
+static inline int coresight_timeout(struct csdev_access *csa, u32 offset,
+ int position, int value)
+{
+ return 1;
+}
+
+static inline int coresight_claim_device_unlocked(struct coresight_device *csdev)
{
return -EINVAL;
}
-static inline int coresight_claim_device(void __iomem *base)
+static inline int coresight_claim_device(struct coresight_device *csdev)
{
return -EINVAL;
}
-static inline void coresight_disclaim_device(void __iomem *base) {}
-static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
+static inline void coresight_disclaim_device(struct coresight_device *csdev) {}
+static inline void coresight_disclaim_device_unlocked(struct coresight_device *csdev) {}
static inline bool coresight_loses_context_with_cpu(struct device *dev)
{
return false;
}
-#endif
+
+static inline u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline u32 coresight_read32(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
+{
+}
+
+static inline void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset)
+{
+}
+
+static inline u64 coresight_relaxed_read64(struct coresight_device *csdev,
+ u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline u64 coresight_read64(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset)
+{
+}
+
+static inline void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_CORESIGHT) */
extern int coresight_get_cpu(struct device *dev);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
-#endif
+#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/counter.h b/include/linux/counter.h
index 9dbd5df4cd34..b63746637de2 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -6,417 +6,352 @@
#ifndef _COUNTER_H_
#define _COUNTER_H_
-#include <linux/counter_enum.h>
+#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
#include <linux/types.h>
-
-enum counter_count_direction {
- COUNTER_COUNT_DIRECTION_FORWARD = 0,
- COUNTER_COUNT_DIRECTION_BACKWARD
-};
-extern const char *const counter_count_direction_str[2];
-
-enum counter_count_mode {
- COUNTER_COUNT_MODE_NORMAL = 0,
- COUNTER_COUNT_MODE_RANGE_LIMIT,
- COUNTER_COUNT_MODE_NON_RECYCLE,
- COUNTER_COUNT_MODE_MODULO_N
-};
-extern const char *const counter_count_mode_str[4];
+#include <linux/wait.h>
+#include <uapi/linux/counter.h>
struct counter_device;
+struct counter_count;
+struct counter_synapse;
struct counter_signal;
+enum counter_comp_type {
+ COUNTER_COMP_U8,
+ COUNTER_COMP_U64,
+ COUNTER_COMP_BOOL,
+ COUNTER_COMP_SIGNAL_LEVEL,
+ COUNTER_COMP_FUNCTION,
+ COUNTER_COMP_SYNAPSE_ACTION,
+ COUNTER_COMP_ENUM,
+ COUNTER_COMP_COUNT_DIRECTION,
+ COUNTER_COMP_COUNT_MODE,
+ COUNTER_COMP_SIGNAL_POLARITY,
+ COUNTER_COMP_ARRAY,
+};
+
/**
- * struct counter_signal_ext - Counter Signal extensions
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
+ * struct counter_comp - Counter component node
+ * @type: Counter component data type
+ * @name: device-specific component name
+ * @priv: component-relevant data
+ * @action_read: Synapse action mode read callback. The read value of the
+ * respective Synapse action mode should be passed back via
+ * the action parameter.
+ * @device_u8_read: Device u8 component read callback. The read value of the
+ * respective Device u8 component should be passed back via
+ * the val parameter.
+ * @count_u8_read: Count u8 component read callback. The read value of the
+ * respective Count u8 component should be passed back via
+ * the val parameter.
+ * @signal_u8_read: Signal u8 component read callback. The read value of the
+ * respective Signal u8 component should be passed back via
+ * the val parameter.
+ * @device_u32_read: Device u32 component read callback. The read value of
+ * the respective Device u32 component should be passed
+ * back via the val parameter.
+ * @count_u32_read: Count u32 component read callback. The read value of the
+ * respective Count u32 component should be passed back via
+ * the val parameter.
+ * @signal_u32_read: Signal u32 component read callback. The read value of
+ * the respective Signal u32 component should be passed
+ * back via the val parameter.
+ * @device_u64_read: Device u64 component read callback. The read value of
+ * the respective Device u64 component should be passed
+ * back via the val parameter.
+ * @count_u64_read: Count u64 component read callback. The read value of the
+ * respective Count u64 component should be passed back via
+ * the val parameter.
+ * @signal_u64_read: Signal u64 component read callback. The read value of
+ * the respective Signal u64 component should be passed
+ * back via the val parameter.
+ * @signal_array_u32_read: Signal u32 array component read callback. The
+ * index of the respective Count u32 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u32 array component element should be
+ * passed back via the val parameter.
+ * @device_array_u64_read: Device u64 array component read callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Device u64 array component element should be
+ * passed back via the val parameter.
+ * @count_array_u64_read: Count u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @signal_array_u64_read: Signal u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @action_write: Synapse action mode write callback. The write value of
+ * the respective Synapse action mode is passed via the
+ * action parameter.
+ * @device_u8_write: Device u8 component write callback. The write value of
+ * the respective Device u8 component is passed via the val
+ * parameter.
+ * @count_u8_write: Count u8 component write callback. The write value of
+ * the respective Count u8 component is passed via the val
+ * parameter.
+ * @signal_u8_write: Signal u8 component write callback. The write value of
+ * the respective Signal u8 component is passed via the val
+ * parameter.
+ * @device_u32_write: Device u32 component write callback. The write value of
+ * the respective Device u32 component is passed via the
+ * val parameter.
+ * @count_u32_write: Count u32 component write callback. The write value of
+ * the respective Count u32 component is passed via the val
+ * parameter.
+ * @signal_u32_write: Signal u32 component write callback. The write value of
+ * the respective Signal u32 component is passed via the
+ * val parameter.
+ * @device_u64_write: Device u64 component write callback. The write value of
+ * the respective Device u64 component is passed via the
+ * val parameter.
+ * @count_u64_write: Count u64 component write callback. The write value of
+ * the respective Count u64 component is passed via the val
+ * parameter.
+ * @signal_u64_write: Signal u64 component write callback. The write value of
+ * the respective Signal u64 component is passed via the
+ * val parameter.
+ * @signal_array_u32_write: Signal u32 array component write callback. The
+ * index of the respective Signal u32 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u32 array component element is passed via
+ * the val parameter.
+ * @device_array_u64_write: Device u64 array component write callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Device u64 array component element is passed via
+ * the val parameter.
+ * @count_array_u64_write: Count u64 array component write callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Count u64 array component element is passed via
+ * the val parameter.
+ * @signal_array_u64_write: Signal u64 array component write callback. The
+ * index of the respective Signal u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u64 array component element is passed via
+ * the val parameter.
*/
-struct counter_signal_ext {
+struct counter_comp {
+ enum counter_comp_type type;
const char *name;
- ssize_t (*read)(struct counter_device *counter,
- struct counter_signal *signal, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- const char *buf, size_t len);
void *priv;
+ union {
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*device_u8_read)(struct counter_device *counter, u8 *val);
+ int (*count_u8_read)(struct counter_device *counter,
+ struct counter_count *count, u8 *val);
+ int (*signal_u8_read)(struct counter_device *counter,
+ struct counter_signal *signal, u8 *val);
+ int (*device_u32_read)(struct counter_device *counter,
+ u32 *val);
+ int (*count_u32_read)(struct counter_device *counter,
+ struct counter_count *count, u32 *val);
+ int (*signal_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal, u32 *val);
+ int (*device_u64_read)(struct counter_device *counter,
+ u64 *val);
+ int (*count_u64_read)(struct counter_device *counter,
+ struct counter_count *count, u64 *val);
+ int (*signal_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal, u64 *val);
+ int (*signal_array_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 *val);
+ int (*device_array_u64_read)(struct counter_device *counter,
+ size_t idx, u64 *val);
+ int (*count_array_u64_read)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 *val);
+ int (*signal_array_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 *val);
+ };
+ union {
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*device_u8_write)(struct counter_device *counter, u8 val);
+ int (*count_u8_write)(struct counter_device *counter,
+ struct counter_count *count, u8 val);
+ int (*signal_u8_write)(struct counter_device *counter,
+ struct counter_signal *signal, u8 val);
+ int (*device_u32_write)(struct counter_device *counter,
+ u32 val);
+ int (*count_u32_write)(struct counter_device *counter,
+ struct counter_count *count, u32 val);
+ int (*signal_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal, u32 val);
+ int (*device_u64_write)(struct counter_device *counter,
+ u64 val);
+ int (*count_u64_write)(struct counter_device *counter,
+ struct counter_count *count, u64 val);
+ int (*signal_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal, u64 val);
+ int (*signal_array_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 val);
+ int (*device_array_u64_write)(struct counter_device *counter,
+ size_t idx, u64 val);
+ int (*count_array_u64_write)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 val);
+ int (*signal_array_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 val);
+ };
};
/**
* struct counter_signal - Counter Signal node
- * @id: unique ID used to identify signal
- * @name: device-specific Signal name; ideally, this should match the name
- * as it appears in the datasheet documentation
- * @ext: optional array of Counter Signal extensions
- * @num_ext: number of Counter Signal extensions specified in @ext
- * @priv: optional private data supplied by driver
+ * @id: unique ID used to identify the Signal
+ * @name: device-specific Signal name
+ * @ext: optional array of Signal extensions
+ * @num_ext: number of Signal extensions specified in @ext
*/
struct counter_signal {
int id;
const char *name;
- const struct counter_signal_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
-
- void *priv;
-};
-
-/**
- * struct counter_signal_enum_ext - Signal enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_signal_enum_ext structure can be used to implement enum style
- * Signal extension attributes. Enum style attributes are those which have a set
- * of strings that map to unsigned integer values. The Generic Counter Signal
- * enum extension helper code takes care of mapping between value and string, as
- * well as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_signal_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter,
- struct counter_signal *signal, size_t *item);
- int (*set)(struct counter_device *counter,
- struct counter_signal *signal, size_t item);
-};
-
-/**
- * COUNTER_SIGNAL_ENUM() - Initialize Signal enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_signal_enum_ext structure
- *
- * This should usually be used together with COUNTER_SIGNAL_ENUM_AVAILABLE()
- */
-#define COUNTER_SIGNAL_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_signal_enum_read, \
- .write = counter_signal_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_SIGNAL_ENUM_AVAILABLE() - Initialize Signal enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_signal_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_SIGNAL_ENUM()
- */
-#define COUNTER_SIGNAL_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_signal_enum_available_read, \
- .priv = (_e) \
-}
-
-enum counter_synapse_action {
- COUNTER_SYNAPSE_ACTION_NONE = 0,
- COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
/**
* struct counter_synapse - Counter Synapse node
- * @action: index of current action mode
* @actions_list: array of available action modes
* @num_actions: number of action modes specified in @actions_list
- * @signal: pointer to associated signal
+ * @signal: pointer to the associated Signal
*/
struct counter_synapse {
- size_t action;
const enum counter_synapse_action *actions_list;
size_t num_actions;
struct counter_signal *signal;
};
-struct counter_count;
-
-/**
- * struct counter_count_ext - Counter Count extension
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
- */
-struct counter_count_ext {
- const char *name;
- ssize_t (*read)(struct counter_device *counter,
- struct counter_count *count, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter,
- struct counter_count *count, void *priv,
- const char *buf, size_t len);
- void *priv;
-};
-
-enum counter_count_function {
- COUNTER_COUNT_FUNCTION_INCREASE = 0,
- COUNTER_COUNT_FUNCTION_DECREASE,
- COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X4
-};
-
/**
* struct counter_count - Counter Count node
- * @id: unique ID used to identify Count
- * @name: device-specific Count name; ideally, this should match
- * the name as it appears in the datasheet documentation
- * @function: index of current function mode
- * @functions_list: array available function modes
+ * @id: unique ID used to identify the Count
+ * @name: device-specific Count name
+ * @functions_list: array of available function modes
* @num_functions: number of function modes specified in @functions_list
- * @synapses: array of synapses for initialization
- * @num_synapses: number of synapses specified in @synapses
- * @ext: optional array of Counter Count extensions
- * @num_ext: number of Counter Count extensions specified in @ext
- * @priv: optional private data supplied by driver
+ * @synapses: array of Synapses for initialization
+ * @num_synapses: number of Synapses specified in @synapses
+ * @ext: optional array of Count extensions
+ * @num_ext: number of Count extensions specified in @ext
*/
struct counter_count {
int id;
const char *name;
- size_t function;
- const enum counter_count_function *functions_list;
+ const enum counter_function *functions_list;
size_t num_functions;
struct counter_synapse *synapses;
size_t num_synapses;
- const struct counter_count_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
-
- void *priv;
-};
-
-/**
- * struct counter_count_enum_ext - Count enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_count_enum_ext structure can be used to implement enum style
- * Count extension attributes. Enum style attributes are those which have a set
- * of strings that map to unsigned integer values. The Generic Counter Count
- * enum extension helper code takes care of mapping between value and string, as
- * well as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_count_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter, struct counter_count *count,
- size_t *item);
- int (*set)(struct counter_device *counter, struct counter_count *count,
- size_t item);
-};
-
-/**
- * COUNTER_COUNT_ENUM() - Initialize Count enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_count_enum_ext structure
- *
- * This should usually be used together with COUNTER_COUNT_ENUM_AVAILABLE()
- */
-#define COUNTER_COUNT_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_count_enum_read, \
- .write = counter_count_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_COUNT_ENUM_AVAILABLE() - Initialize Count enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_count_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_COUNT_ENUM()
- */
-#define COUNTER_COUNT_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_count_enum_available_read, \
- .priv = (_e) \
-}
-
-/**
- * struct counter_device_attr_group - internal container for attribute group
- * @attr_group: Counter sysfs attributes group
- * @attr_list: list to keep track of created Counter sysfs attributes
- * @num_attr: number of Counter sysfs attributes
- */
-struct counter_device_attr_group {
- struct attribute_group attr_group;
- struct list_head attr_list;
- size_t num_attr;
};
/**
- * struct counter_device_state - internal state container for a Counter device
- * @id: unique ID used to identify the Counter
- * @dev: internal device structure
- * @groups_list: attribute groups list (for Signals, Counts, and ext)
- * @num_groups: number of attribute groups containers
- * @groups: Counter sysfs attribute groups (to populate @dev.groups)
+ * struct counter_event_node - Counter Event node
+ * @l: list of current watching Counter events
+ * @event: event that triggers
+ * @channel: event channel
+ * @comp_list: list of components to watch when event triggers
*/
-struct counter_device_state {
- int id;
- struct device dev;
- struct counter_device_attr_group *groups_list;
- size_t num_groups;
- const struct attribute_group **groups;
-};
-
-enum counter_signal_value {
- COUNTER_SIGNAL_LOW = 0,
- COUNTER_SIGNAL_HIGH
+struct counter_event_node {
+ struct list_head l;
+ u8 event;
+ u8 channel;
+ struct list_head comp_list;
};
/**
* struct counter_ops - Callbacks from driver
- * @signal_read: optional read callback for Signal attribute. The read
- * value of the respective Signal should be passed back via
- * the val parameter.
- * @count_read: optional read callback for Count attribute. The read
- * value of the respective Count should be passed back via
- * the val parameter.
- * @count_write: optional write callback for Count attribute. The write
- * value for the respective Count is passed in via the val
+ * @signal_read: optional read callback for Signals. The read level of
+ * the respective Signal should be passed back via the
+ * level parameter.
+ * @count_read: read callback for Counts. The read value of the
+ * respective Count should be passed back via the value
+ * parameter.
+ * @count_write: optional write callback for Counts. The write value for
+ * the respective Count is passed in via the value
* parameter.
- * @function_get: function to get the current count function mode. Returns
- * 0 on success and negative error code on error. The index
- * of the respective Count's returned function mode should
- * be passed back via the function parameter.
- * @function_set: function to set the count function mode. function is the
- * index of the requested function mode from the respective
- * Count's functions_list array.
- * @action_get: function to get the current action mode. Returns 0 on
- * success and negative error code on error. The index of
- * the respective Synapse's returned action mode should be
+ * @function_read: read callback the Count function modes. The read
+ * function mode of the respective Count should be passed
+ * back via the function parameter.
+ * @function_write: optional write callback for Count function modes. The
+ * function mode to write for the respective Count is
+ * passed in via the function parameter.
+ * @action_read: optional read callback the Synapse action modes. The
+ * read action mode of the respective Synapse should be
* passed back via the action parameter.
- * @action_set: function to set the action mode. action is the index of
- * the requested action mode from the respective Synapse's
- * actions_list array.
+ * @action_write: optional write callback for Synapse action modes. The
+ * action mode to write for the respective Synapse is
+ * passed in via the action parameter.
+ * @events_configure: optional write callback to configure events. The list of
+ * struct counter_event_node may be accessed via the
+ * events_list member of the counter parameter.
+ * @watch_validate: optional callback to validate a watch. The Counter
+ * component watch configuration is passed in via the watch
+ * parameter. A return value of 0 indicates a valid Counter
+ * component watch configuration.
*/
struct counter_ops {
int (*signal_read)(struct counter_device *counter,
struct counter_signal *signal,
- enum counter_signal_value *val);
+ enum counter_signal_level *level);
int (*count_read)(struct counter_device *counter,
- struct counter_count *count, unsigned long *val);
+ struct counter_count *count, u64 *value);
int (*count_write)(struct counter_device *counter,
- struct counter_count *count, unsigned long val);
- int (*function_get)(struct counter_device *counter,
- struct counter_count *count, size_t *function);
- int (*function_set)(struct counter_device *counter,
- struct counter_count *count, size_t function);
- int (*action_get)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t *action);
- int (*action_set)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t action);
+ struct counter_count *count, u64 value);
+ int (*function_read)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function);
+ int (*function_write)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function);
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*events_configure)(struct counter_device *counter);
+ int (*watch_validate)(struct counter_device *counter,
+ const struct counter_watch *watch);
};
/**
- * struct counter_device_ext - Counter device extension
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
- */
-struct counter_device_ext {
- const char *name;
- ssize_t (*read)(struct counter_device *counter, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter, void *priv,
- const char *buf, size_t len);
- void *priv;
-};
-
-/**
- * struct counter_device_enum_ext - Counter enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_device_enum_ext structure can be used to implement enum style
- * Counter extension attributes. Enum style attributes are those which have a
- * set of strings that map to unsigned integer values. The Generic Counter enum
- * extension helper code takes care of mapping between value and string, as well
- * as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_device_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter, size_t *item);
- int (*set)(struct counter_device *counter, size_t item);
-};
-
-/**
- * COUNTER_DEVICE_ENUM() - Initialize Counter enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_device_enum_ext structure
- *
- * This should usually be used together with COUNTER_DEVICE_ENUM_AVAILABLE()
- */
-#define COUNTER_DEVICE_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_device_enum_read, \
- .write = counter_device_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_DEVICE_ENUM_AVAILABLE() - Initialize Counter enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_device_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_DEVICE_ENUM()
- */
-#define COUNTER_DEVICE_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_device_enum_available_read, \
- .priv = (_e) \
-}
-
-/**
* struct counter_device - Counter data structure
- * @name: name of the device as it appears in the datasheet
+ * @name: name of the device
* @parent: optional parent device providing the counters
- * @device_state: internal device state container
* @ops: callbacks from driver
* @signals: array of Signals
* @num_signals: number of Signals specified in @signals
@@ -425,11 +360,21 @@ struct counter_device_enum_ext {
* @ext: optional array of Counter device extensions
* @num_ext: number of Counter device extensions specified in @ext
* @priv: optional private data supplied by driver
+ * @dev: internal device structure
+ * @chrdev: internal character device structure
+ * @events_list: list of current watching Counter events
+ * @events_list_lock: lock to protect Counter events list operations
+ * @next_events_list: list of next watching Counter events
+ * @n_events_list_lock: lock to protect Counter next events list operations
+ * @events: queue of detected Counter events
+ * @events_wait: wait queue to allow blocking reads of Counter events
+ * @events_in_lock: lock to protect Counter events queue in operations
+ * @events_out_lock: lock to protect Counter events queue out operations
+ * @ops_exist_lock: lock to prevent use during removal
*/
struct counter_device {
const char *name;
struct device *parent;
- struct counter_device_state *device_state;
const struct counter_ops *ops;
@@ -438,17 +383,250 @@ struct counter_device {
struct counter_count *counts;
size_t num_counts;
- const struct counter_device_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
- void *priv;
+ struct device dev;
+ struct cdev chrdev;
+ struct list_head events_list;
+ spinlock_t events_list_lock;
+ struct list_head next_events_list;
+ struct mutex n_events_list_lock;
+ DECLARE_KFIFO_PTR(events, struct counter_event);
+ wait_queue_head_t events_wait;
+ spinlock_t events_in_lock;
+ struct mutex events_out_lock;
+ struct mutex ops_exist_lock;
};
-int counter_register(struct counter_device *const counter);
+void *counter_priv(const struct counter_device *const counter);
+
+struct counter_device *counter_alloc(size_t sizeof_priv);
+void counter_put(struct counter_device *const counter);
+int counter_add(struct counter_device *const counter);
+
void counter_unregister(struct counter_device *const counter);
-int devm_counter_register(struct device *dev,
- struct counter_device *const counter);
-void devm_counter_unregister(struct device *dev,
- struct counter_device *const counter);
+struct counter_device *devm_counter_alloc(struct device *dev,
+ size_t sizeof_priv);
+int devm_counter_add(struct device *dev,
+ struct counter_device *const counter);
+void counter_push_event(struct counter_device *const counter, const u8 event,
+ const u8 channel);
+
+#define COUNTER_COMP_DEVICE_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .device_u64_read = (_read), \
+ .device_u64_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .count_u64_read = (_read), \
+ .count_u64_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .signal_u64_read = (_read), \
+ .signal_u64_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+struct counter_available {
+ union {
+ const u32 *enums;
+ const char *const *strs;
+ };
+ size_t num_items;
+};
+
+#define DEFINE_COUNTER_AVAILABLE(_name, _enums) \
+ struct counter_available _name = { \
+ .enums = (_enums), \
+ .num_items = ARRAY_SIZE(_enums), \
+ }
+
+#define DEFINE_COUNTER_ENUM(_name, _strs) \
+ struct counter_available _name = { \
+ .strs = (_strs), \
+ .num_items = ARRAY_SIZE(_strs), \
+ }
+
+#define COUNTER_COMP_DEVICE_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .device_u32_read = (_get), \
+ .device_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_COUNT_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .count_u32_read = (_get), \
+ .count_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_SIGNAL_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .signal_u32_read = (_get), \
+ .signal_u32_write = (_set), \
+ .priv = &(_available), \
+}
+
+struct counter_array {
+ enum counter_comp_type type;
+ const struct counter_available *avail;
+ union {
+ size_t length;
+ size_t idx;
+ };
+};
+
+#define DEFINE_COUNTER_ARRAY_U64(_name, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_U64, \
+ .length = (_length), \
+ }
+
+#define DEFINE_COUNTER_ARRAY_CAPTURE(_name, _length) \
+ DEFINE_COUNTER_ARRAY_U64(_name, _length)
+
+#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _available, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .avail = &(_available), \
+ .length = (_length), \
+ }
+
+#define COUNTER_COMP_DEVICE_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .device_array_u64_read = (_read), \
+ .device_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_COUNT_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .count_array_u64_read = (_read), \
+ .count_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_SIGNAL_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .signal_array_u64_read = (_read), \
+ .signal_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+
+#define COUNTER_COMP_CAPTURE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("capture", _read, _write)
+
+#define COUNTER_COMP_CEILING(_read, _write) \
+ COUNTER_COMP_COUNT_U64("ceiling", _read, _write)
+
+#define COUNTER_COMP_COUNT_MODE(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_COUNT_MODE, \
+ .name = "count_mode", \
+ .count_u32_read = (_read), \
+ .count_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_DIRECTION(_read) \
+{ \
+ .type = COUNTER_COMP_COUNT_DIRECTION, \
+ .name = "direction", \
+ .count_u32_read = (_read), \
+}
+
+#define COUNTER_COMP_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("enable", _read, _write)
+
+#define COUNTER_COMP_FLOOR(_read, _write) \
+ COUNTER_COMP_COUNT_U64("floor", _read, _write)
+
+#define COUNTER_COMP_POLARITY(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .name = "polarity", \
+ .signal_u32_read = (_read), \
+ .signal_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_PRESET(_read, _write) \
+ COUNTER_COMP_COUNT_U64("preset", _read, _write)
+
+#define COUNTER_COMP_PRESET_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("preset_enable", _read, _write)
+
+#define COUNTER_COMP_ARRAY_CAPTURE(_read, _write, _array) \
+ COUNTER_COMP_COUNT_ARRAY_U64("capture", _read, _write, _array)
+
+#define COUNTER_COMP_ARRAY_POLARITY(_read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = "polarity", \
+ .signal_array_u32_read = (_read), \
+ .signal_array_u32_write = (_write), \
+ .priv = &(_array), \
+}
#endif /* _COUNTER_H_ */
diff --git a/include/linux/counter_enum.h b/include/linux/counter_enum.h
deleted file mode 100644
index 9f917298a88f..000000000000
--- a/include/linux/counter_enum.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Counter interface enum functions
- * Copyright (C) 2018 William Breathitt Gray
- */
-#ifndef _COUNTER_ENUM_H_
-#define _COUNTER_ENUM_H_
-
-#include <linux/types.h>
-
-struct counter_device;
-struct counter_signal;
-struct counter_count;
-
-ssize_t counter_signal_enum_read(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- char *buf);
-ssize_t counter_signal_enum_write(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_signal_enum_available_read(struct counter_device *counter,
- struct counter_signal *signal,
- void *priv, char *buf);
-
-ssize_t counter_count_enum_read(struct counter_device *counter,
- struct counter_count *count, void *priv,
- char *buf);
-ssize_t counter_count_enum_write(struct counter_device *counter,
- struct counter_count *count, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_count_enum_available_read(struct counter_device *counter,
- struct counter_count *count,
- void *priv, char *buf);
-
-ssize_t counter_device_enum_read(struct counter_device *counter, void *priv,
- char *buf);
-ssize_t counter_device_enum_write(struct counter_device *counter, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_device_enum_available_read(struct counter_device *counter,
- void *priv, char *buf);
-
-#endif /* _COUNTER_ENUM_H_ */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 4f005d95ce88..eacb7dd7b3af 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -230,6 +230,18 @@ enum {
#define CPER_MEM_VALID_RANK_NUMBER 0x8000
#define CPER_MEM_VALID_CARD_HANDLE 0x10000
#define CPER_MEM_VALID_MODULE_HANDLE 0x20000
+#define CPER_MEM_VALID_ROW_EXT 0x40000
+#define CPER_MEM_VALID_BANK_GROUP 0x80000
+#define CPER_MEM_VALID_BANK_ADDRESS 0x100000
+#define CPER_MEM_VALID_CHIP_ID 0x200000
+
+#define CPER_MEM_EXT_ROW_MASK 0x3
+#define CPER_MEM_EXT_ROW_SHIFT 16
+
+#define CPER_MEM_BANK_ADDRESS_MASK 0xff
+#define CPER_MEM_BANK_GROUP_SHIFT 8
+
+#define CPER_MEM_CHIP_ID_SHIFT 5
#define CPER_PCIE_VALID_PORT_TYPE 0x0001
#define CPER_PCIE_VALID_VERSION 0x0002
@@ -443,7 +455,7 @@ struct cper_sec_mem_err_old {
u8 error_type;
};
-/* Memory Error Section (UEFI >= v2.3), UEFI v2.7 sec N.2.5 */
+/* Memory Error Section (UEFI >= v2.3), UEFI v2.8 sec N.2.5 */
struct cper_sec_mem_err {
u64 validation_bits;
u64 error_status;
@@ -461,7 +473,7 @@ struct cper_sec_mem_err {
u64 responder_id;
u64 target_id;
u8 error_type;
- u8 reserved;
+ u8 extended;
u16 rank;
u16 mem_array_handle; /* "card handle" in UEFI 2.4 */
u16 mem_dev_handle; /* "module handle" in UEFI 2.4 */
@@ -483,8 +495,16 @@ struct cper_mem_err_compact {
u16 rank;
u16 mem_array_handle;
u16 mem_dev_handle;
+ u8 extended;
};
+static inline u32 cper_get_mem_extension(u64 mem_valid, u8 mem_extended)
+{
+ if (!(mem_valid & CPER_MEM_VALID_ROW_EXT))
+ return 0;
+ return (mem_extended & CPER_MEM_EXT_ROW_MASK) << CPER_MEM_EXT_ROW_SHIFT;
+}
+
/* PCI Express Error Section, UEFI v2.7 sec N.2.7 */
struct cper_sec_pcie {
u64 validation_bits;
@@ -521,6 +541,15 @@ struct cper_sec_pcie {
u8 aer_info[96];
};
+/* Firmware Error Record Reference, UEFI v2.7 sec N.2.10 */
+struct cper_sec_fw_err_rec_ref {
+ u8 record_type;
+ u8 revision;
+ u8 reserved[6];
+ u64 record_identifier;
+ guid_t record_identifier_guid;
+};
+
/* Reset to default packing */
#pragma pack()
@@ -529,6 +558,7 @@ extern const char *const cper_proc_error_type_strs[4];
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
+const char *cper_mem_err_status_str(u64 status);
void cper_print_bits(const char *prefix, unsigned int bits,
const char * const strs[], unsigned int strs_size);
void cper_mem_err_pack(const struct cper_sec_mem_err *,
@@ -539,5 +569,7 @@ void cper_print_proc_arm(const char *pfx,
const struct cper_sec_proc_arm *proc);
void cper_print_proc_ia(const char *pfx,
const struct cper_sec_proc_ia *proc);
+int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg);
+int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 1ca2baf817ed..314802f98b9d 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -64,6 +64,12 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
char *buf);
extern ssize_t cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -88,10 +94,13 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
-int cpu_up(unsigned int cpu);
+int add_cpu(unsigned int cpu);
+int cpu_device_up(struct device *dev);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
+int bringup_hibernate_cpu(unsigned int sleep_cpu);
+void bringup_nonboot_cpus(unsigned int setup_max_cpus);
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
@@ -104,9 +113,13 @@ static inline void cpu_maps_update_done(void)
{
}
+static inline int add_cpu(unsigned int cpu) { return 0;}
+
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
+extern int lockdep_is_cpus_held(void);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
@@ -117,7 +130,9 @@ extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
-int cpu_down(unsigned int cpu);
+int remove_cpu(unsigned int cpu);
+int cpu_device_down(struct device *dev);
+extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
#else /* CONFIG_HOTPLUG_CPU */
@@ -129,21 +144,13 @@ static inline int cpus_read_trylock(void) { return true; }
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
+static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
+static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
#endif /* !CONFIG_HOTPLUG_CPU */
-/* Wrappers which go away once all code is converted */
-static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
-static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
-static inline void get_online_cpus(void) { cpus_read_lock(); }
-static inline void put_online_cpus(void) { cpus_read_unlock(); }
-
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
-static inline int disable_nonboot_cpus(void)
-{
- return freeze_secondary_cpus(0);
-}
-extern void enable_nonboot_cpus(void);
+extern void thaw_secondary_cpus(void);
static inline int suspend_disable_secondary_cpus(void)
{
@@ -156,22 +163,21 @@ static inline int suspend_disable_secondary_cpus(void)
}
static inline void suspend_enable_secondary_cpus(void)
{
- return enable_nonboot_cpus();
+ return thaw_secondary_cpus();
}
#else /* !CONFIG_PM_SLEEP_SMP */
-static inline int disable_nonboot_cpus(void) { return 0; }
-static inline void enable_nonboot_cpus(void) {}
+static inline void thaw_secondary_cpus(void) {}
static inline int suspend_disable_secondary_cpus(void) { return 0; }
static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */
-void cpu_startup_entry(enum cpuhp_state state);
+void __noreturn cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
/* Attach to any functions which should be considered cpuidle. */
-#define __cpuidle __attribute__((__section__(".cpuidle.text")))
+#define __cpuidle __section(".cpuidle.text")
bool cpu_in_idle(unsigned long pc);
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index 65501d8f9778..a3bdc8a98f2c 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -63,18 +63,10 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
struct cpuidle_driver;
#ifdef CONFIG_CPU_IDLE_THERMAL
-int cpuidle_cooling_register(struct cpuidle_driver *drv);
-int cpuidle_of_cooling_register(struct device_node *np,
- struct cpuidle_driver *drv);
+void cpuidle_cooling_register(struct cpuidle_driver *drv);
#else /* CONFIG_CPU_IDLE_THERMAL */
-static inline int cpuidle_cooling_register(struct cpuidle_driver *drv)
+static inline void cpuidle_cooling_register(struct cpuidle_driver *drv)
{
- return 0;
-}
-static inline int cpuidle_of_cooling_register(struct device_node *np,
- struct cpuidle_driver *drv)
-{
- return 0;
}
#endif /* CONFIG_CPU_IDLE_THERMAL */
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index 02edeafcb2bf..be8aea04d023 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -28,7 +28,7 @@ struct cpu_rmap {
struct {
u16 index;
u16 dist;
- } near[0];
+ } near[];
};
#define CPU_RMAP_DIST_INF 0xffff
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 0fb561d1b524..d5595d57f4e5 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -9,10 +9,14 @@
#define _LINUX_CPUFREQ_H
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
@@ -65,7 +69,6 @@ struct cpufreq_policy {
unsigned int max; /* in kHz */
unsigned int cur; /* in kHz, only needed if cpufreq
* governors are used */
- unsigned int restore_freq; /* = policy->cur before transition */
unsigned int suspend_freq; /* freq to set during suspend */
unsigned int policy; /* see above */
@@ -110,6 +113,19 @@ struct cpufreq_policy {
bool fast_switch_enabled;
/*
+ * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
+ * governor.
+ */
+ bool strict_target;
+
+ /*
+ * Set if inefficient frequencies were found in the frequency table.
+ * This indicates if the relation flag CPUFREQ_RELATION_E can be
+ * honored.
+ */
+ bool efficiencies_available;
+
+ /*
* Preferred average time interval between consecutive invocations of
* the driver to set the frequency for this policy. To be set by the
* scaling driver (0, which is the default, means no preference).
@@ -127,7 +143,7 @@ struct cpufreq_policy {
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
- int cached_resolved_idx;
+ unsigned int cached_resolved_idx;
/* Synchronization for frequency transitions */
bool transition_ongoing; /* Tracks transition status */
@@ -205,6 +221,7 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy)
unsigned int cpufreq_get(unsigned int cpu);
unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int cpufreq_quick_get_max(unsigned int cpu);
+unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
@@ -216,6 +233,7 @@ void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
+bool cpufreq_supports_freq_invariance(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
@@ -232,6 +250,14 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
return 0;
}
+static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
+{
+ return 0;
+}
+static inline bool cpufreq_supports_freq_invariance(void)
+{
+ return false;
+}
static inline void disable_cpufreq(void) { }
#endif
@@ -254,6 +280,12 @@ static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
+/* relation flags */
+#define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */
+
+#define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E)
struct freq_attr {
struct attribute attr;
@@ -288,7 +320,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN];
- u8 flags;
+ u16 flags;
void *driver_data;
/* needed by all drivers */
@@ -298,10 +330,6 @@ struct cpufreq_driver {
/* define one out of two */
int (*setpolicy)(struct cpufreq_policy *policy);
- /*
- * On failure, should always restore frequency to policy->restore_freq
- * (i.e. old freq).
- */
int (*target)(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation); /* Deprecated */
@@ -309,15 +337,15 @@ struct cpufreq_driver {
unsigned int index);
unsigned int (*fast_switch)(struct cpufreq_policy *policy,
unsigned int target_freq);
-
/*
- * Caches and returns the lowest driver-supported frequency greater than
- * or equal to the target frequency, subject to any driver limitations.
- * Does not set the frequency. Only to be implemented for drivers with
- * target().
+ * ->fast_switch() replacement for drivers that use an internal
+ * representation of performance levels and can pass hints other than
+ * the target performance level to the hardware.
*/
- unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
- unsigned int target_freq);
+ void (*adjust_perf)(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity);
/*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
@@ -325,7 +353,7 @@ struct cpufreq_driver {
*
* get_intermediate should return a stable intermediate frequency
* platform wants to switch to and target_intermediate() should set CPU
- * to to that frequency, before jumping to the frequency corresponding
+ * to that frequency, before jumping to the frequency corresponding
* to 'index'. Core will take care of sending notifications and driver
* doesn't have to handle them in target_intermediate() or
* target_index().
@@ -351,7 +379,6 @@ struct cpufreq_driver {
int (*online)(struct cpufreq_policy *policy);
int (*offline)(struct cpufreq_policy *policy);
int (*exit)(struct cpufreq_policy *policy);
- void (*stop_cpu)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
@@ -362,19 +389,33 @@ struct cpufreq_driver {
/* platform specific boost support code */
bool boost_enabled;
- int (*set_boost)(int state);
+ int (*set_boost)(struct cpufreq_policy *policy, int state);
+
+ /*
+ * Set by drivers that want to register with the energy model after the
+ * policy is properly initialized, but before the governor is started.
+ */
+ void (*register_em)(struct cpufreq_policy *policy);
};
/* flags */
-/* driver isn't removed even if all ->init() calls failed */
-#define CPUFREQ_STICKY BIT(0)
+/*
+ * Set by drivers that need to update internal upper and lower boundaries along
+ * with the target frequency and so the core and governors should also invoke
+ * the diver if the target frequency does not change, but the policy min or max
+ * may have changed.
+ */
+#define CPUFREQ_NEED_UPDATE_LIMITS BIT(0)
/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
#define CPUFREQ_CONST_LOOPS BIT(1)
-/* don't warn on suspend/resume speed mismatches */
-#define CPUFREQ_PM_NO_WARN BIT(2)
+/*
+ * Set by drivers that want the core to automatically register the cpufreq
+ * driver as a thermal cooling device.
+ */
+#define CPUFREQ_IS_COOLING_DEV BIT(2)
/*
* This should be set by platforms having multiple clock-domains, i.e.
@@ -406,15 +447,10 @@ struct cpufreq_driver {
*/
#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
-/*
- * Set by drivers that want the core to automatically register the cpufreq
- * driver as a thermal cooling device.
- */
-#define CPUFREQ_IS_COOLING_DEV BIT(7)
-
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+bool cpufreq_driver_test_flags(u16 flags);
const char *cpufreq_get_current_driver(void);
void *cpufreq_get_driver_data(void);
@@ -551,15 +587,28 @@ struct cpufreq_governor {
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
- /* For governors which change frequency dynamically by themselves */
- bool dynamic_switching;
struct list_head governor_list;
struct module *owner;
+ u8 flags;
};
+/* Governor flags */
+
+/* For governors which change frequency dynamically by themselves */
+#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
+
+/* For governors wanting the target frequency to be set exactly */
+#define CPUFREQ_GOV_STRICT_TARGET BIT(1)
+
+
/* Pass a target to the cpufreq driver */
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq);
+void cpufreq_driver_adjust_perf(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity);
+bool cpufreq_driver_has_adjust_perf(void);
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
@@ -571,6 +620,22 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+int cpufreq_start_governor(struct cpufreq_policy *policy);
+void cpufreq_stop_governor(struct cpufreq_policy *policy);
+
+#define cpufreq_governor_init(__governor) \
+static int __init __governor##_init(void) \
+{ \
+ return cpufreq_register_governor(&__governor); \
+} \
+core_initcall(__governor##_init)
+
+#define cpufreq_governor_exit(__governor) \
+static void __exit __governor##_exit(void) \
+{ \
+ return cpufreq_unregister_governor(&__governor); \
+} \
+module_exit(__governor##_exit)
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
@@ -578,9 +643,11 @@ struct cpufreq_governor *cpufreq_fallback_governor(void);
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
{
if (policy->max < policy->cur)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_HE);
else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_LE);
}
/* Governor attribute set */
@@ -594,6 +661,11 @@ struct gov_attr_set {
/* sysfs ops for cpufreq governors */
extern const struct sysfs_ops governor_sysfs_ops;
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+ return container_of(kobj, struct gov_attr_set, kobj);
+}
+
void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
@@ -611,10 +683,11 @@ struct governor_attr {
*********************************************************************/
/* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID ~0u
-#define CPUFREQ_TABLE_END ~1u
+#define CPUFREQ_ENTRY_INVALID ~0u
+#define CPUFREQ_TABLE_END ~1u
/* Special Values of .flags field */
-#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
struct cpufreq_frequency_table {
unsigned int flags;
@@ -691,6 +764,22 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
continue; \
else
+/**
+ * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID and
+ * CPUFREQ_INEFFICIENT_FREQ frequencies.
+ * @pos: the &struct cpufreq_frequency_table to use as a loop cursor.
+ * @table: the &struct cpufreq_frequency_table to iterate over.
+ * @idx: the table entry currently being processed.
+ * @efficiencies: set to true to only iterate over efficient frequencies.
+ */
+
+#define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \
+ continue; \
+ else
+
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
@@ -715,14 +804,15 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy);
/* Find lowest freq at or above target in a table in ascending order */
static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq >= target_freq)
@@ -736,14 +826,15 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
/* Find lowest freq at or above target in a table in descending order */
static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -766,26 +857,30 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_al(policy, target_freq);
+ return cpufreq_table_find_index_al(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dl(policy, target_freq);
+ return cpufreq_table_find_index_dl(policy, target_freq,
+ efficiencies);
}
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -808,14 +903,15 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
/* Find highest freq at or below target in a table in descending order */
static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq <= target_freq)
@@ -829,26 +925,30 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ah(policy, target_freq);
+ return cpufreq_table_find_index_ah(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dh(policy, target_freq);
+ return cpufreq_table_find_index_dh(policy, target_freq,
+ efficiencies);
}
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -875,14 +975,15 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
/* Find closest freq to target in a table in descending order */
static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -909,35 +1010,58 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ac(policy, target_freq);
+ return cpufreq_table_find_index_ac(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dc(policy, target_freq);
+ return cpufreq_table_find_index_dc(policy, target_freq,
+ efficiencies);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
+ bool efficiencies = policy->efficiencies_available &&
+ (relation & CPUFREQ_RELATION_E);
+ int idx;
+
+ /* cpufreq_table_index_unsorted() has no use for this flag anyway */
+ relation &= ~CPUFREQ_RELATION_E;
+
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
return cpufreq_table_index_unsorted(policy, target_freq,
relation);
-
+retry:
switch (relation) {
case CPUFREQ_RELATION_L:
- return cpufreq_table_find_index_l(policy, target_freq);
+ idx = cpufreq_table_find_index_l(policy, target_freq,
+ efficiencies);
+ break;
case CPUFREQ_RELATION_H:
- return cpufreq_table_find_index_h(policy, target_freq);
+ idx = cpufreq_table_find_index_h(policy, target_freq,
+ efficiencies);
+ break;
case CPUFREQ_RELATION_C:
- return cpufreq_table_find_index_c(policy, target_freq);
+ idx = cpufreq_table_find_index_c(policy, target_freq,
+ efficiencies);
+ break;
default:
- pr_err("%s: Invalid relation: %d\n", __func__, relation);
- return -EINVAL;
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ if (idx < 0 && efficiencies) {
+ efficiencies = false;
+ goto retry;
}
+
+ return idx;
}
static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
@@ -953,6 +1077,86 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy
return count;
}
+
+/**
+ * cpufreq_table_set_inefficient() - Mark a frequency as inefficient
+ * @policy: the &struct cpufreq_policy containing the inefficient frequency
+ * @frequency: the inefficient frequency
+ *
+ * The &struct cpufreq_policy must use a sorted frequency table
+ *
+ * Return: %0 on success or a negative errno code
+ */
+
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ struct cpufreq_frequency_table *pos;
+
+ /* Not supported */
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)
+ return -EINVAL;
+
+ cpufreq_for_each_valid_entry(pos, policy->freq_table) {
+ if (pos->frequency == frequency) {
+ pos->flags |= CPUFREQ_INEFFICIENT_FREQ;
+ policy->efficiencies_available = true;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static inline int parse_perf_domain(int cpu, const char *list_name,
+ const char *cell_name)
+{
+ struct device_node *cpu_np;
+ struct of_phandle_args args;
+ int ret;
+
+ cpu_np = of_cpu_device_node_get(cpu);
+ if (!cpu_np)
+ return -ENODEV;
+
+ ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
+ &args);
+ if (ret < 0)
+ return ret;
+
+ of_node_put(cpu_np);
+
+ return args.args[0];
+}
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask)
+{
+ int target_idx;
+ int cpu, ret;
+
+ ret = parse_perf_domain(pcpu, list_name, cell_name);
+ if (ret < 0)
+ return ret;
+
+ target_idx = ret;
+ cpumask_set_cpu(pcpu, cpumask);
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == pcpu)
+ continue;
+
+ ret = parse_perf_domain(cpu, list_name, cell_name);
+ if (ret < 0)
+ continue;
+
+ if (target_idx == ret)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ return target_idx;
+}
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -972,6 +1176,19 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
return false;
}
+
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ return -EINVAL;
+}
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask)
+{
+ return -EOPNOTSUPP;
+}
#endif
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
@@ -982,12 +1199,16 @@ static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
struct cpufreq_governor *old_gov) { }
#endif
-extern void arch_freq_prepare_all(void);
extern unsigned int arch_freq_get_on_cpu(int cpu);
-extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
- unsigned long max_freq);
-
+#ifndef arch_set_freq_scale
+static __always_inline
+void arch_set_freq_scale(const struct cpumask *cpus,
+ unsigned long cur_freq,
+ unsigned long max_freq)
+{
+}
+#endif
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
@@ -998,4 +1219,10 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+
+static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
+{
+ dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
+ policy->related_cpus);
+}
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index d37c17e68268..f61447913db9 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -22,8 +22,42 @@
* AP_ACTIVE AP_ACTIVE
*/
+/*
+ * CPU hotplug states. The state machine invokes the installed state
+ * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
+ * during a CPU online operation. During a CPU offline operation the
+ * installed teardown callbacks are invoked in the reverse order from
+ * CPU_ONLINE - 1 down to CPUHP_OFFLINE.
+ *
+ * The state space has three sections: PREPARE, STARTING and ONLINE.
+ *
+ * PREPARE: The callbacks are invoked on a control CPU before the
+ * hotplugged CPU is started up or after the hotplugged CPU has died.
+ *
+ * STARTING: The callbacks are invoked on the hotplugged CPU from the low level
+ * hotplug startup/teardown code with interrupts disabled.
+ *
+ * ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU
+ * hotplug thread with interrupts and preemption enabled.
+ *
+ * Adding explicit states to this enum is only necessary when:
+ *
+ * 1) The state is within the STARTING section
+ *
+ * 2) The state has ordering constraints vs. other states in the
+ * same section.
+ *
+ * If neither #1 nor #2 apply, please use the dynamic state space when
+ * setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE
+ * for the @state argument of the setup function.
+ *
+ * See Documentation/core-api/cpu_hotplug.rst for further information and
+ * examples.
+ */
enum cpuhp_state {
CPUHP_INVALID = -1,
+
+ /* PREPARE section invoked on a control CPU */
CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
@@ -36,7 +70,10 @@ enum cpuhp_state {
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_SLUB_DEAD,
+ CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
+ /* Must be after CPUHP_MM_VMSTAT_DEAD */
+ CPUHP_MM_DEMOTION_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
@@ -45,21 +82,25 @@ enum cpuhp_state {
CPUHP_ARM_OMAP_WAKE_DEAD,
CPUHP_IRQ_POLL_DEAD,
CPUHP_BLOCK_SOFTIRQ_DEAD,
+ CPUHP_BIO_DEAD,
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
CPUHP_FS_BUFF_DEAD,
CPUHP_PRINTK_DEAD,
CPUHP_MM_MEMCQ_DEAD,
+ CPUHP_XFS_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
- CPUHP_PAGE_ALLOC_DEAD,
+ CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
- CPUHP_IOMMU_INTEL_DEAD,
+ CPUHP_IOMMU_IOVA_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
+ CPUHP_AP_DTPM_CPU_DEAD,
+ CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -92,6 +133,11 @@ enum cpuhp_state {
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,
+
+ /*
+ * STARTING section invoked on the hotplugged CPU in low level
+ * bringup and teardown code.
+ */
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
CPUHP_AP_SCHED_STARTING,
@@ -99,9 +145,13 @@ enum cpuhp_state {
CPUHP_AP_CPU_PM_STARTING,
CPUHP_AP_IRQ_GIC_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
+ CPUHP_AP_IRQ_APPLE_AIC_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_STARTING,
+ CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
@@ -117,6 +167,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
+ CPUHP_AP_PERF_RISCV_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
@@ -130,7 +181,9 @@ enum cpuhp_state {
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
+ CPUHP_AP_CLINT_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
+ CPUHP_AP_TI_GP_TIMER_STARTING,
CPUHP_AP_HYPERV_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
@@ -139,18 +192,22 @@ enum cpuhp_state {
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
- CPUHP_AP_ARM_KVMPV_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
+
+ /* Online section invoked on the hotplugged CPU from the hotplug thread */
CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
+ CPUHP_AP_BLK_MQ_ONLINE,
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
CPUHP_AP_X86_INTEL_EPB_ONLINE,
CPUHP_AP_PERF_ONLINE,
@@ -161,28 +218,41 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_RAPL_ONLINE,
CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
+ CPUHP_AP_PERF_X86_IDXD_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
+ CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
+ CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
+ CPUHP_AP_PERF_CSKY_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
+ CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
+ CPUHP_AP_MM_DEMOTION_ONLINE,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
@@ -199,14 +269,15 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
int (*teardown)(unsigned int cpu),
bool multi_instance);
/**
- * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup
+ * callback
* @state: The state for which the calls are installed
* @name: Name of the callback (will be used in debug output)
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Installs the callback functions and invokes the startup callback on
- * the present cpus which have already reached the @state.
+ * Installs the callback functions and invokes the @startup callback on
+ * the online cpus which have already reached the @state.
*/
static inline int cpuhp_setup_state(enum cpuhp_state state,
const char *name,
@@ -216,6 +287,18 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
}
+/**
+ * cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling
+ * @startup callback from a cpus_read_lock()
+ * held region
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback (will be used in debug output)
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state() except that it must be invoked from within a
+ * cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -227,14 +310,14 @@ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
/**
* cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
- * callbacks
+ * @startup callback
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Same as @cpuhp_setup_state except that no calls are executed are invoked
- * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
+ * Same as cpuhp_setup_state() except that the @startup callback is not
+ * invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n.
*/
static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
const char *name,
@@ -245,6 +328,19 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
false);
}
+/**
+ * cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without
+ * invoking the @startup callback from
+ * a cpus_read_lock() held region
+ * callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback.
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state_nocalls() except that it must be invoked from
+ * within a cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -258,13 +354,13 @@ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
* cpuhp_setup_state_multi - Add callbacks for multi state
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
* Sets the internal multi_instance flag and prepares a state to work as a multi
* instance callback. No callbacks are invoked at this point. The callbacks are
* invoked once an instance for this state are registered via
- * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
+ * cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls()
*/
static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
const char *name,
@@ -289,9 +385,10 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state and invokes the startup callback on
- * the present cpus which have already reached the @state. The @state must have
- * been earlier marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state and invokes the registered startup
+ * callback on the online cpus which have already reached the @state. The
+ * @state must have been earlier marked as multi-instance by
+ * cpuhp_setup_state_multi().
*/
static inline int cpuhp_state_add_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -305,8 +402,9 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state The @state must have been earlier
- * marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state. The @state must have been earlier
+ * marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or
+ * HOTPLUG_CPU=n.
*/
static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
struct hlist_node *node)
@@ -314,6 +412,17 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
return __cpuhp_state_add_instance(state, node, false);
}
+/**
+ * cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state
+ * without invoking the startup
+ * callback from a cpus_read_lock()
+ * held region.
+ * @state: The state for which the instance is installed
+ * @node: The node for this individual state.
+ *
+ * Same as cpuhp_state_add_instance_nocalls() except that it must be
+ * invoked from within a cpus_read_lock() held region.
+ */
static inline int
cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
struct hlist_node *node)
@@ -329,7 +438,7 @@ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
* @state: The state for which the calls are removed
*
* Removes the callback functions and invokes the teardown callback on
- * the present cpus which have already reached the @state.
+ * the online cpus which have already reached the @state.
*/
static inline void cpuhp_remove_state(enum cpuhp_state state)
{
@@ -338,7 +447,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state)
/**
* cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
- * teardown
+ * the teardown callback
* @state: The state for which the calls are removed
*/
static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
@@ -346,6 +455,14 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
__cpuhp_remove_state(state, false);
}
+/**
+ * cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking
+ * teardown from a cpus_read_lock() held region.
+ * @state: The state for which the calls are removed
+ *
+ * Same as cpuhp_remove_state nocalls() except that it must be invoked
+ * from within a cpus_read_lock() held region.
+ */
static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
{
__cpuhp_remove_state_cpuslocked(state, false);
@@ -373,8 +490,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
- * Removes the instance and invokes the teardown callback on the present cpus
- * which have already reached the @state.
+ * Removes the instance and invokes the teardown callback on the online cpus
+ * which have already reached @state.
*/
static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -384,7 +501,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
/**
* cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
- * without invoking the reatdown callback
+ * without invoking the teardown callback
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index ec2ef63771f0..fce476275e16 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -38,6 +38,7 @@ struct cpuidle_state_usage {
u64 time_ns;
unsigned long long above; /* Number of times it's been too deep */
unsigned long long below; /* Number of times it's been too shallow */
+ unsigned long long rejected; /* Number of times idle entry was rejected */
#ifdef CONFIG_SUSPEND
unsigned long long s2idle_usage;
unsigned long long s2idle_time; /* in US */
@@ -48,8 +49,8 @@ struct cpuidle_state {
char name[CPUIDLE_NAME_LEN];
char desc[CPUIDLE_DESC_LEN];
- u64 exit_latency_ns;
- u64 target_residency_ns;
+ s64 exit_latency_ns;
+ s64 target_residency_ns;
unsigned int flags;
unsigned int exit_latency; /* in US */
int power_usage; /* in mW */
@@ -65,19 +66,24 @@ struct cpuidle_state {
* CPUs execute ->enter_s2idle with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
+ *
+ * This callback may point to the same function as ->enter if all of
+ * the above requirements are met by it.
*/
- void (*enter_s2idle) (struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index);
+ int (*enter_s2idle)(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
};
/* Idle State Flags */
-#define CPUIDLE_FLAG_NONE (0x00)
-#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
-#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
-#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
-#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
-#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
+#define CPUIDLE_FLAG_NONE (0x00)
+#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
+#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
+#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
+#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
+#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
+#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
+#define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
@@ -265,13 +271,8 @@ struct cpuidle_governor {
void (*reflect) (struct cpuidle_device *dev, int index);
};
-#ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
extern s64 cpuidle_governor_latency_req(unsigned int cpu);
-#else
-static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
-{return 0;}
-#endif
#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
idx, \
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d5cc88514aee..c2aa0aa26b45 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -12,6 +12,8 @@
#include <linux/bitmap.h>
#include <linux/atomic.h>
#include <linux/bug.h>
+#include <linux/gfp_types.h>
+#include <linux/numa.h>
/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
@@ -33,19 +35,23 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
-#if NR_CPUS == 1
-#define nr_cpu_ids 1U
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+#define nr_cpu_ids ((unsigned int)NR_CPUS)
#else
extern unsigned int nr_cpu_ids;
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
+static inline void set_nr_cpu_ids(unsigned int nr)
+{
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+ WARN_ON(nr != nr_cpu_ids);
#else
-#define nr_cpumask_bits ((unsigned int)NR_CPUS)
+ nr_cpu_ids = nr;
#endif
+}
+
+/* Deprecated. Always use nr_cpu_ids. */
+#define nr_cpumask_bits nr_cpu_ids
/*
* The following particular system cpumasks and operations manage
@@ -65,10 +71,6 @@ extern unsigned int nr_cpu_ids;
* cpu_online_mask is the dynamic subset of cpu_present_mask,
* indicating those CPUs available for scheduling.
*
- * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
- * all NR_CPUS bits set, otherwise it is just the set of CPUs that
- * ACPI reports present at boot.
- *
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
* depending on what ACPI reports as currently plugged in, otherwise
* cpu_present_mask is just a copy of cpu_possible_mask.
@@ -91,47 +93,18 @@ extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
+extern struct cpumask __cpu_dying_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
+#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
extern atomic_t __num_online_cpus;
-#if NR_CPUS > 1
-/**
- * num_online_cpus() - Read the number of online CPUs
- *
- * Despite the fact that __num_online_cpus is of type atomic_t, this
- * interface gives only a momentary snapshot and is not protected against
- * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
- * region.
- */
-static inline unsigned int num_online_cpus(void)
-{
- return atomic_read(&__num_online_cpus);
-}
-#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
-#define num_present_cpus() cpumask_weight(cpu_present_mask)
-#define num_active_cpus() cpumask_weight(cpu_active_mask)
-#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
-#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
-#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
-#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
-#else
-#define num_online_cpus() 1U
-#define num_possible_cpus() 1U
-#define num_present_cpus() 1U
-#define num_active_cpus() 1U
-#define cpu_online(cpu) ((cpu) == 0)
-#define cpu_possible(cpu) ((cpu) == 0)
-#define cpu_present(cpu) ((cpu) == 0)
-#define cpu_active(cpu) ((cpu) == 0)
-#endif
-
extern cpumask_t cpus_booted_once_mask;
-static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
+static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
WARN_ON_ONCE(cpu >= bits);
@@ -139,79 +112,45 @@ static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
}
/* verify cpu argument to cpumask_* operators */
-static inline unsigned int cpumask_check(unsigned int cpu)
+static __always_inline unsigned int cpumask_check(unsigned int cpu)
{
cpu_max_bits_warn(cpu, nr_cpumask_bits);
return cpu;
}
-#if NR_CPUS == 1
-/* Uniprocessor. Assume all masks are "1". */
+/**
+ * cpumask_first - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
static inline unsigned int cpumask_first(const struct cpumask *srcp)
{
- return 0;
-}
-
-static inline unsigned int cpumask_last(const struct cpumask *srcp)
-{
- return 0;
-}
-
-/* Valid inputs for n are -1 and 0. */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
-{
- return n+1;
-}
-
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
-{
- return n+1;
-}
-
-static inline unsigned int cpumask_next_and(int n,
- const struct cpumask *srcp,
- const struct cpumask *andp)
-{
- return n+1;
-}
-
-static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
- int start, bool wrap)
-{
- /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
- return (wrap && n == 0);
-}
-
-/* cpu must be a valid cpu, ie 0, so there's no other choice. */
-static inline unsigned int cpumask_any_but(const struct cpumask *mask,
- unsigned int cpu)
-{
- return 1;
+ return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+/**
+ * cpumask_first_zero - get the first unset cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if all cpus are set.
+ */
+static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
- return 0;
+ return find_first_zero_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
-#define for_each_cpu(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
-#define for_each_cpu_and(cpu, mask1, mask2) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
-#else
/**
- * cpumask_first - get the first cpu in a cpumask
- * @srcp: the cpumask pointer
+ * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
+ * @src1p: the first input
+ * @src2p: the second input
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+static inline
+unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
- return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
+ return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits);
}
/**
@@ -225,7 +164,21 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
-unsigned int cpumask_next(int n, const struct cpumask *srcp);
+/**
+ * cpumask_next - get the next cpu in a cpumask
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set.
+ */
+static inline
+unsigned int cpumask_next(int n, const struct cpumask *srcp)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
+}
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
@@ -242,9 +195,48 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
}
-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
-int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+#if NR_CPUS == 1
+/* Uniprocessor: there is only one valid CPU */
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+{
+ return 0;
+}
+
+static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return cpumask_first_and(src1p, src2p);
+}
+
+static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+{
+ return cpumask_first(srcp);
+}
+#else
unsigned int cpumask_local_spread(unsigned int i, int node);
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p);
+unsigned int cpumask_any_distribute(const struct cpumask *srcp);
+#endif /* NR_CPUS */
+
+/**
+ * cpumask_next_and - get the next cpu in *src1p & *src2p
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set in both.
+ */
+static inline
+unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits, n + 1);
+}
/**
* for_each_cpu - iterate over every cpu in a mask
@@ -254,9 +246,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+ for_each_set_bit(cpu, cpumask_bits(mask), nr_cpumask_bits)
/**
* for_each_cpu_not - iterate over every cpu in a complemented mask
@@ -266,26 +256,41 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_zero((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+ for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits)
-extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+#if NR_CPUS == 1
+static inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ cpumask_check(start);
+ if (n != -1)
+ cpumask_check(n);
+
+ /*
+ * Return the first available CPU when wrapping, or when starting before cpu0,
+ * since there is only one valid option.
+ */
+ if (wrap && n >= 0)
+ return nr_cpumask_bits;
+
+ return cpumask_first(mask);
+}
+#else
+unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+#endif
/**
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the cpumask poiter
+ * @mask: the cpumask pointer
* @start: the start location
*
* The implementation does not assume any bit in @mask is set (including @start).
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
- (cpu) < nr_cpumask_bits; \
- (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for_each_set_bit_wrap(cpu, cpumask_bits(mask), nr_cpumask_bits, start)
/**
* for_each_cpu_and - iterate over every cpu in both masks
@@ -302,10 +307,89 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_and(cpu, mask1, mask2) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
- (cpu) < nr_cpu_ids;)
-#endif /* SMP */
+ for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), nr_cpumask_bits)
+
+/**
+ * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
+ * those present in another.
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_andnot(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_andnot(cpu, mask1, mask2) \
+ for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), nr_cpumask_bits)
+
+/**
+ * cpumask_any_but - return a "random" in a cpumask, but not this one.
+ * @mask: the cpumask to search
+ * @cpu: the cpu to ignore.
+ *
+ * Often used to find any cpu but smp_processor_id() in a mask.
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static inline
+unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
+{
+ unsigned int i;
+
+ cpumask_check(cpu);
+ for_each_cpu(i, mask)
+ if (i != cpu)
+ break;
+ return i;
+}
+
+/**
+ * cpumask_nth - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+{
+ return find_nth_bit(cpumask_bits(srcp), nr_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and - get the first cpu in 2 cpumasks
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ nr_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ nr_cpumask_bits, cpumask_check(cpu));
+}
#define CPU_BITS_NONE \
{ \
@@ -322,12 +406,12 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -338,12 +422,12 @@ static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -353,9 +437,9 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in @cpumask, else returns 0
+ * Returns true if @cpu is set in @cpumask, else returns false
*/
-static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -365,11 +449,11 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
*
* test_and_set_bit wrapper for cpumasks.
*/
-static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -379,11 +463,11 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
*
* test_and_clear_bit wrapper for cpumasks.
*/
-static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -412,9 +496,9 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * If *@dstp is empty, returns false, else returns true
*/
-static inline int cpumask_and(struct cpumask *dstp,
+static inline bool cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
@@ -455,9 +539,9 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * If *@dstp is empty, returns false, else returns true
*/
-static inline int cpumask_andnot(struct cpumask *dstp,
+static inline bool cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
@@ -520,9 +604,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*
- * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
+ * Returns true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline int cpumask_subset(const struct cpumask *src1p,
+static inline bool cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
@@ -557,6 +641,17 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
}
/**
+ * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ */
+static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits);
+}
+
+/**
* cpumask_shift_right - *dstp = *srcp >> n
* @dstp: the cpumask result
* @srcp: the input to shift
@@ -602,15 +697,6 @@ static inline void cpumask_copy(struct cpumask *dstp,
#define cpumask_any(srcp) cpumask_first(srcp)
/**
- * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
- * @src1p: the first input
- * @src2p: the second input
- *
- * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
- */
-#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
-
-/**
* cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
* @mask1: the first input cpumask
* @mask2: the second input cpumask
@@ -733,9 +819,35 @@ typedef struct cpumask *cpumask_var_t;
#define __cpumask_var_read_mostly __read_mostly
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
-bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+
+static inline
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
+{
+ return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
+}
+
+/**
+ * alloc_cpumask_var - allocate a struct cpumask
+ * @mask: pointer to cpumask_var_t where the cpumask is returned
+ * @flags: GFP_ flags
+ *
+ * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
+ * a nop returning a constant 1 (in <linux/cpumask.h>).
+ *
+ * See alloc_cpumask_var_node.
+ */
+static inline
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
+}
+
+static inline
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var(mask, flags | __GFP_ZERO);
+}
+
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
@@ -801,9 +913,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
/* First bits of cpu_bit_bitmap are in fact unset. */
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
+#if NR_CPUS == 1
+/* Uniprocessor: the possible/online/present masks are always "1" */
+#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#else
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+#endif
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
@@ -844,6 +963,14 @@ set_cpu_active(unsigned int cpu, bool active)
cpumask_clear_cpu(cpu, &__cpu_active_mask);
}
+static inline void
+set_cpu_dying(unsigned int cpu, bool dying)
+{
+ if (dying)
+ cpumask_set_cpu(cpu, &__cpu_dying_mask);
+ else
+ cpumask_clear_cpu(cpu, &__cpu_dying_mask);
+}
/**
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
@@ -881,6 +1008,82 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
return to_cpumask(p);
}
+#if NR_CPUS > 1
+/**
+ * num_online_cpus() - Read the number of online CPUs
+ *
+ * Despite the fact that __num_online_cpus is of type atomic_t, this
+ * interface gives only a momentary snapshot and is not protected against
+ * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
+ * region.
+ */
+static inline unsigned int num_online_cpus(void)
+{
+ return atomic_read(&__num_online_cpus);
+}
+#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+#define num_present_cpus() cpumask_weight(cpu_present_mask)
+#define num_active_cpus() cpumask_weight(cpu_active_mask)
+
+static inline bool cpu_online(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_online_mask);
+}
+
+static inline bool cpu_possible(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_possible_mask);
+}
+
+static inline bool cpu_present(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_present_mask);
+}
+
+static inline bool cpu_active(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_active_mask);
+}
+
+static inline bool cpu_dying(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_dying_mask);
+}
+
+#else
+
+#define num_online_cpus() 1U
+#define num_possible_cpus() 1U
+#define num_present_cpus() 1U
+#define num_active_cpus() 1U
+
+static inline bool cpu_online(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_possible(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_present(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_active(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_dying(unsigned int cpu)
+{
+ return false;
+}
+
+#endif /* NR_CPUS > 1 */
+
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#if NR_CPUS <= BITS_PER_LONG
@@ -915,6 +1118,45 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
nr_cpu_ids);
}
+/**
+ * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as
+ * hex values of cpumask
+ *
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The function prints the cpumask into the buffer as hex values of
+ * cpumask; Typically used by bin_attribute to export cpumask bitmask
+ * ABI.
+ *
+ * Returns the length of how many bytes have been copied, excluding
+ * terminating '\0'.
+ */
+static inline ssize_t
+cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
+/**
+ * cpumap_print_list_to_buf - copies the cpumask into the buffer as
+ * comma-separated list of cpus
+ *
+ * Everything is same with the above cpumap_print_bitmask_to_buf()
+ * except the print format.
+ */
+static inline ssize_t
+cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
#if NR_CPUS <= BITS_PER_LONG
#define CPU_MASK_ALL \
(cpumask_t) { { \
@@ -938,4 +1180,23 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
[0] = 1UL \
} }
+/*
+ * Provide a valid theoretical max size for cpumap and cpulist sysfs files
+ * to avoid breaking userspace which may allocate a buffer based on the size
+ * reported by e.g. fstat.
+ *
+ * for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
+ *
+ * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
+ * to 2 orders of magnitude larger than 8192. And then we divide by 2 to
+ * cover a worst-case of every other cpu being on one of two nodes for a
+ * very large NR_CPUS.
+ *
+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ * unsigned comparison to -1.
+ */
+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
+ ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
+#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
+
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cpumask_api.h b/include/linux/cpumask_api.h
new file mode 100644
index 000000000000..83bd3ebe82b0
--- /dev/null
+++ b/include/linux/cpumask_api.h
@@ -0,0 +1 @@
+#include <linux/cpumask.h>
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 04c20de66afc..d58e0476ee8e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -15,6 +15,7 @@
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/jump_label.h>
#ifdef CONFIG_CPUSETS
@@ -33,6 +34,8 @@
*/
extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
+extern struct static_key_false cpusets_insane_config_key;
+
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
@@ -50,6 +53,19 @@ static inline void cpuset_dec(void)
static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
}
+/*
+ * This will get enabled whenever a cpuset configuration is considered
+ * unsupportable in general. E.g. movable only node which cannot satisfy
+ * any non movable allocations (see update_nodemask). Page allocator
+ * needs to make additional checks for those configurations and this
+ * check is meant to guard those checks without any overhead for sane
+ * configurations.
+ */
+static inline bool cpusets_insane_config(void)
+{
+ return static_branch_unlikely(&cpusets_insane_config_key);
+}
+
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
@@ -58,7 +74,7 @@ extern void cpuset_wait_for_hotplug(void);
extern void cpuset_read_lock(void);
extern void cpuset_read_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -166,6 +182,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
static inline bool cpusets_enabled(void) { return false; }
+static inline bool cpusets_insane_config(void) { return false; }
+
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
@@ -184,11 +202,12 @@ static inline void cpuset_read_unlock(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- cpumask_copy(mask, cpu_possible_mask);
+ cpumask_copy(mask, task_cpu_possible_mask(p));
}
-static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
+static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
{
+ return false;
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 525510a9f965..de62a722431e 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -38,6 +38,12 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_OSRELEASE(value) \
vmcoreinfo_append_str("OSRELEASE=%s\n", value)
+#define VMCOREINFO_BUILD_ID() \
+ ({ \
+ static_assert(sizeof(vmlinux_build_id) == 20); \
+ vmcoreinfo_append_str("BUILD-ID=%20phN\n", vmlinux_build_id); \
+ })
+
#define VMCOREINFO_PAGESIZE(value) \
vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
#define VMCOREINFO_SYMBOL(name) \
@@ -53,6 +59,9 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_OFFSET(name, field) \
vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
(unsigned long)offsetof(struct name, field))
+#define VMCOREINFO_TYPE_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(name, field))
#define VMCOREINFO_LENGTH(name, value) \
vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
#define VMCOREINFO_NUMBER(name) \
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 4664fc1871de..0f3a656293b0 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -5,17 +5,17 @@
#include <linux/kexec.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>
+#include <linux/pgtable.h>
#include <uapi/linux/vmcore.h>
-#include <asm/pgtable.h> /* for pgprot_t */
-
-#ifdef CONFIG_CRASH_DUMP
+/* For IS_ENABLED(CONFIG_CRASH_DUMP) */
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
+#ifdef CONFIG_CRASH_DUMP
extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
extern void elfcorehdr_free(unsigned long long addr);
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
@@ -24,11 +24,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot);
-extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
- unsigned long, int);
-extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset,
- int userbuf);
+ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
+ unsigned long offset);
+ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset);
void vmcore_cleanup(void);
@@ -89,16 +88,34 @@ static inline void vmcore_unusable(void)
elfcorehdr_addr = ELFCORE_ADDR_ERR;
}
-#define HAVE_OLDMEM_PFN_IS_RAM 1
-extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
-extern void unregister_oldmem_pfn_is_ram(void);
+/**
+ * struct vmcore_cb - driver callbacks for /proc/vmcore handling
+ * @pfn_is_ram: check whether a PFN really is RAM and should be accessed when
+ * reading the vmcore. Will return "true" if it is RAM or if the
+ * callback cannot tell. If any callback returns "false", it's not
+ * RAM and the page must not be accessed; zeroes should be
+ * indicated in the vmcore instead. For example, a ballooned page
+ * contains no data and reading from such a page will cause high
+ * load in the hypervisor.
+ * @next: List head to manage registered callbacks internally; initialized by
+ * register_vmcore_cb().
+ *
+ * vmcore callbacks allow drivers managing physical memory ranges to
+ * coordinate with vmcore handling code, for example, to prevent accessing
+ * physical memory ranges that should not be accessed when reading the vmcore,
+ * although included in the vmcore header as memory ranges to dump.
+ */
+struct vmcore_cb {
+ bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
+ struct list_head next;
+};
+extern void register_vmcore_cb(struct vmcore_cb *cb);
+extern void unregister_vmcore_cb(struct vmcore_cb *cb);
#else /* !CONFIG_CRASH_DUMP */
-static inline bool is_kdump_kernel(void) { return 0; }
+static inline bool is_kdump_kernel(void) { return false; }
#endif /* CONFIG_CRASH_DUMP */
-extern unsigned long saved_max_pfn;
-
/* Device Dump information to be filled by drivers */
struct vmcoredd_data {
char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */
@@ -117,13 +134,11 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
#ifdef CONFIG_PROC_VMCORE
-ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted);
+ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted);
#else
-static inline ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted)
+static inline ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index a4367051e192..2f991a427ade 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -4,7 +4,7 @@
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^5 + 1)
* Init 0
*/
diff --git a/include/linux/crc64.h b/include/linux/crc64.h
index c756e65a1b58..e044c60d1e61 100644
--- a/include/linux/crc64.h
+++ b/include/linux/crc64.h
@@ -7,5 +7,12 @@
#include <linux/types.h>
+#define CRC64_ROCKSOFT_STRING "crc64-rocksoft"
+
u64 __pure crc64_be(u64 crc, const void *p, size_t len);
+u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len);
+
+u64 crc64_rocksoft(const unsigned char *buffer, size_t len);
+u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len);
+
#endif /* _LINUX_CRC64_H */
diff --git a/include/linux/crc8.h b/include/linux/crc8.h
index 13c8dabb0441..674045c59a04 100644
--- a/include/linux/crc8.h
+++ b/include/linux/crc8.h
@@ -96,6 +96,6 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
* Williams, Ross N., ross<at>ross.net
* (see URL http://www.ross.net/crc/download/crc_v3.txt).
*/
-u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc);
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc);
#endif /* __CRC8_H_ */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 18639c069263..9ed9232af934 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -25,7 +25,7 @@ struct inode;
struct group_info {
atomic_t usage;
int ngroups;
- kgid_t gid[0];
+ kgid_t gid[];
} __randomize_layout;
/**
@@ -53,7 +53,6 @@ do { \
groups_free(group_info); \
} while (0)
-extern struct group_info init_groups;
#ifdef CONFIG_MULTIUSER
extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);
@@ -140,10 +139,11 @@ struct cred {
struct key *request_key_auth; /* assumed request_key authority */
#endif
#ifdef CONFIG_SECURITY
- void *security; /* subjective LSM security */
+ void *security; /* LSM security */
#endif
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
+ struct ucounts *ucounts;
struct group_info *group_info; /* supplementary groups for euid/fsgid */
/* RCU deletion */
union {
@@ -170,12 +170,13 @@ extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
+extern int set_cred_ucounts(struct cred *);
/*
* check for validity of credentials
*/
#ifdef CONFIG_DEBUG_CREDENTIALS
-extern void __invalid_creds(const struct cred *, const char *, unsigned);
+extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned);
extern void __validate_process_creds(struct task_struct *,
const char *, unsigned);
@@ -370,6 +371,7 @@ static inline void put_cred(const struct cred *_cred)
#define task_uid(task) (task_cred_xxx((task), uid))
#define task_euid(task) (task_cred_xxx((task), euid))
+#define task_ucounts(task) (task_cred_xxx((task), ucounts))
#define current_cred_xxx(xxx) \
({ \
@@ -386,6 +388,7 @@ static inline void put_cred(const struct cred *_cred)
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
+#define current_ucounts() (current_cred_xxx(ucounts))
extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 54741295c70b..30dba392b730 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -17,7 +17,7 @@
* The algorithm was originally described in detail in this paper
* (although the algorithm has evolved somewhat since then):
*
- * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
+ * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
*
* LGPL2
*/
@@ -87,7 +87,7 @@ struct crush_rule_mask {
struct crush_rule {
__u32 len;
struct crush_rule_mask mask;
- struct crush_rule_step steps[0];
+ struct crush_rule_step steps[];
};
#define crush_rule_size(len) (sizeof(struct crush_rule) + \
@@ -301,6 +301,12 @@ struct crush_map {
__u32 *choose_tries;
#else
+ /* device/bucket type id -> type name (CrushWrapper::type_map) */
+ struct rb_root type_names;
+
+ /* device/bucket id -> name (CrushWrapper::name_map) */
+ struct rb_root names;
+
/* CrushWrapper::choose_args */
struct rb_root choose_args;
#endif
@@ -340,6 +346,15 @@ struct crush_work_bucket {
struct crush_work {
struct crush_work_bucket **work; /* Per-bucket working store */
+#ifdef __KERNEL__
+ struct list_head item;
+#endif
};
+#ifdef __KERNEL__
+/* osdmap.c */
+void clear_crush_names(struct rb_root *root);
+void clear_choose_args(struct crush_map *c);
+#endif
+
#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 763863dbc079..2324ab6f1846 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -16,9 +16,8 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/bug.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
#include <linux/completion.h>
/*
@@ -61,8 +60,8 @@
#define CRYPTO_ALG_ASYNC 0x00000080
/*
- * Set this bit if and only if the algorithm requires another algorithm of
- * the same type to handle corner cases.
+ * Set if the algorithm (or an algorithm which it uses) requires another
+ * algorithm of the same type to handle corner cases.
*/
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
@@ -102,6 +101,47 @@
#define CRYPTO_NOLOAD 0x00008000
/*
+ * The algorithm may allocate memory during request processing, i.e. during
+ * encryption, decryption, or hashing. Users can request an algorithm with this
+ * flag unset if they can't handle memory allocation failures.
+ *
+ * This flag is currently only implemented for algorithms of type "skcipher",
+ * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not
+ * have this flag set even if they allocate memory.
+ *
+ * In some edge cases, algorithms can allocate memory regardless of this flag.
+ * To avoid these cases, users must obey the following usage constraints:
+ * skcipher:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - If the data were to be divided into chunks of size
+ * crypto_skcipher_walksize() (with any remainder going at the end), no
+ * chunk can cross a page boundary or a scatterlist element boundary.
+ * aead:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - The first scatterlist element must contain all the associated data,
+ * and its pages must be !PageHighMem.
+ * - If the plaintext/ciphertext were to be divided into chunks of size
+ * crypto_aead_walksize() (with the remainder going at the end), no chunk
+ * can cross a page boundary or a scatterlist element boundary.
+ * ahash:
+ * - The result buffer must be aligned to the algorithm's alignmask.
+ * - crypto_ahash_finup() must not be used unless the algorithm implements
+ * ->finup() natively.
+ */
+#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
+
+/*
+ * Mark an algorithm as a service implementation only usable by a
+ * template and never by a normal user of the kernel crypto API.
+ * This is intended to be used by algorithms that are themselves
+ * not FIPS-approved but may instead be used to implement parts of
+ * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)).
+ */
+#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
@@ -120,9 +160,12 @@
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
* declaration) is used to ensure that the crypto_tfm context structure is
* aligned correctly for the given architecture so that there are no alignment
- * faults for C data types. In particular, this is required on platforms such
- * as arm where pointers are 32-bit aligned but there are data types such as
- * u64 which require 64-bit alignment.
+ * faults for C data types. On architectures that support non-cache coherent
+ * DMA, such as ARM or arm64, it also takes into account the minimal alignment
+ * that is required to ensure that the context struct member does not share any
+ * cachelines with the rest of the struct. This is needed to ensure that cache
+ * maintenance for non-coherent DMA (cache invalidation in particular) does not
+ * affect data that may be accessed by the CPU concurrently.
*/
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
@@ -595,6 +638,8 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
struct crypto_tfm {
u32 crt_flags;
+
+ int node;
void (*exit)(struct crypto_tfm *tfm);
@@ -603,40 +648,10 @@ struct crypto_tfm {
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
-struct crypto_cipher {
- struct crypto_tfm base;
-};
-
struct crypto_comp {
struct crypto_tfm base;
};
-enum {
- CRYPTOA_UNSPEC,
- CRYPTOA_ALG,
- CRYPTOA_TYPE,
- CRYPTOA_U32,
- __CRYPTOA_MAX,
-};
-
-#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
-
-/* Maximum number of (rtattr) parameters for each template. */
-#define CRYPTO_MAX_ATTRS 32
-
-struct crypto_attr_alg {
- char name[CRYPTO_MAX_ALG_NAME];
-};
-
-struct crypto_attr_type {
- u32 type;
- u32 mask;
-};
-
-struct crypto_attr_u32 {
- u32 num;
-};
-
/*
* Transform user interface.
*/
@@ -710,165 +725,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
return __alignof__(tfm->__crt_ctx);
}
-/**
- * DOC: Single Block Cipher API
- *
- * The single block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
- *
- * Using the single block cipher API calls, operations with the basic cipher
- * primitive can be implemented. These cipher primitives exclude any block
- * chaining operations including IV handling.
- *
- * The purpose of this single block cipher API is to support the implementation
- * of templates or other concepts that only need to perform the cipher operation
- * on one block at a time. Templates invoke the underlying cipher primitive
- * block-wise and process either the input or the output data of these cipher
- * operations.
- */
-
-static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_cipher *)tfm;
-}
-
-/**
- * crypto_alloc_cipher() - allocate single block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a single block cipher. The returned struct
- * crypto_cipher is the cipher handle that is required for any subsequent API
- * invocation for that single block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_cipher() - zeroize and free the single block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_cipher(struct crypto_cipher *tfm)
-{
- crypto_free_tfm(crypto_cipher_tfm(tfm));
-}
-
-/**
- * crypto_has_cipher() - Search for the availability of a single block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the single block cipher is known to the kernel crypto API;
- * false otherwise
- */
-static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-/**
- * crypto_cipher_blocksize() - obtain block size for cipher
- * @tfm: cipher handle
- *
- * The block size for the single block cipher referenced with the cipher handle
- * tfm is returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
-}
-
-static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
-}
-
-static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_cipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the single block cipher referenced by the
- * cipher handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-int crypto_cipher_setkey(struct crypto_cipher *tfm,
- const u8 *key, unsigned int keylen);
-
-/**
- * crypto_cipher_encrypt_one() - encrypt one block of plaintext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the ciphertext
- * @src: buffer holding the plaintext to be encrypted
- *
- * Invoke the encryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src);
-
-/**
- * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the plaintext
- * @src: buffer holding the ciphertext to be decrypted
- *
- * Invoke the decryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src);
-
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
deleted file mode 100644
index f6ba4c3e60d7..000000000000
--- a/include/linux/cryptohash.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CRYPTOHASH_H
-#define __CRYPTOHASH_H
-
-#include <uapi/linux/types.h>
-
-#define SHA_DIGEST_WORDS 5
-#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
-#define SHA_WORKSPACE_WORDS 16
-
-void sha_init(__u32 *buf);
-void sha_transform(__u32 *digest, const char *data, __u32 *W);
-
-#endif
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index 363b004426db..bc95aef2219c 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_CTYPE_H
#define _LINUX_CTYPE_H
+#include <linux/compiler.h>
+
/*
* NOTE! This ctype does not handle EOF like the standard C
* library is required to.
@@ -23,10 +25,6 @@ extern const unsigned char _ctype[];
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
-static inline int isdigit(int c)
-{
- return '0' <= c && c <= '9';
-}
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
@@ -39,6 +37,15 @@ static inline int isdigit(int c)
#define isascii(c) (((unsigned char)(c))<=0x7f)
#define toascii(c) (((unsigned char)(c))&0x7f)
+#if __has_builtin(__builtin_isdigit)
+#define isdigit(c) __builtin_isdigit(c)
+#else
+static inline int isdigit(int c)
+{
+ return '0' <= c && c <= '9';
+}
+#endif
+
static inline unsigned char __tolower(unsigned char c)
{
if (isupper(c))
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
index 45bfe9d61271..daf3e6f98444 100644
--- a/include/linux/cuda.h
+++ b/include/linux/cuda.h
@@ -12,7 +12,7 @@
#include <uapi/linux/cuda.h>
-extern int find_via_cuda(void);
+extern int __init find_via_cuda(void);
extern int cuda_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
extern void cuda_poll(void);
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
deleted file mode 100644
index 05ee0f19448a..000000000000
--- a/include/linux/cyclades.h
+++ /dev/null
@@ -1,364 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $
- * linux/include/linux/cyclades.h
- *
- * This file was initially written by
- * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by
- * Ivan Passos <ivan@cyclades.com>.
- *
- * This file contains the general definitions for the cyclades.c driver
- *$Log: cyclades.h,v $
- *Revision 3.1 2002/01/29 11:36:16 henrique
- *added throttle field on struct cyclades_port to indicate whether the
- *port is throttled or not
- *
- *Revision 3.1 2000/04/19 18:52:52 ivan
- *converted address fields to unsigned long and added fields for physical
- *addresses on cyclades_card structure;
- *
- *Revision 3.0 1998/11/02 14:20:59 ivan
- *added nports field on cyclades_card structure;
- *
- *Revision 2.5 1998/08/03 16:57:01 ivan
- *added cyclades_idle_stats structure;
- *
- *Revision 2.4 1998/06/01 12:09:53 ivan
- *removed closing_wait2 from cyclades_port structure;
- *
- *Revision 2.3 1998/03/16 18:01:12 ivan
- *changes in the cyclades_port structure to get it closer to the
- *standard serial port structure;
- *added constants for new ioctls;
- *
- *Revision 2.2 1998/02/17 16:50:00 ivan
- *changes in the cyclades_port structure (addition of shutdown_wait and
- *chip_rev variables);
- *added constants for new ioctls and for CD1400 rev. numbers.
- *
- *Revision 2.1 1997/10/24 16:03:00 ivan
- *added rflow (which allows enabling the CD1400 special flow control
- *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to
- *cyclades_port structure;
- *added Alpha support
- *
- *Revision 2.0 1997/06/30 10:30:00 ivan
- *added some new doorbell command constants related to IOCTLW and
- *UART error signaling
- *
- *Revision 1.8 1997/06/03 15:30:00 ivan
- *added constant ZFIRM_HLT
- *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin)
- *
- *Revision 1.7 1997/03/26 10:30:00 daniel
- *new entries at the end of cyclades_port struct to reallocate
- *variables illegally allocated within card memory.
- *
- *Revision 1.6 1996/09/09 18:35:30 bentson
- *fold in changes for Cyclom-Z -- including structures for
- *communicating with board as well modest changes to original
- *structures to support new features.
- *
- *Revision 1.5 1995/11/13 21:13:31 bentson
- *changes suggested by Michael Chastain <mec@duracef.shout.net>
- *to support use of this file in non-kernel applications
- *
- *
- */
-#ifndef _LINUX_CYCLADES_H
-#define _LINUX_CYCLADES_H
-
-#include <uapi/linux/cyclades.h>
-
-
-/* Per card data structure */
-struct cyclades_card {
- void __iomem *base_addr;
- union {
- void __iomem *p9050;
- struct RUNTIME_9060 __iomem *p9060;
- } ctl_addr;
- struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */
- int irq;
- unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
- unsigned int first_line; /* minor number of first channel on card */
- unsigned int nports; /* Number of ports in the card */
- int bus_index; /* address shift - 0 for ISA, 1 for PCI */
- int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
- u32 hw_ver;
- spinlock_t card_lock;
- struct cyclades_port *ports;
-};
-
-/***************************************
- * Memory access functions/macros *
- * (required to support Alpha systems) *
- ***************************************/
-
-#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0)
-#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0)
-#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0)
-
-/*
- * Statistics counters
- */
-struct cyclades_icount {
- __u32 cts, dsr, rng, dcd, tx, rx;
- __u32 frame, parity, overrun, brk;
- __u32 buf_overrun;
-};
-
-/*
- * This is our internal structure for each serial port's state.
- *
- * Many fields are paralleled by the structure used by the serial_struct
- * structure.
- *
- * For definitions of the flags field, see tty.h
- */
-
-struct cyclades_port {
- int magic;
- struct tty_port port;
- struct cyclades_card *card;
- union {
- struct {
- void __iomem *base_addr;
- } cyy;
- struct {
- struct CH_CTRL __iomem *ch_ctrl;
- struct BUF_CTRL __iomem *buf_ctrl;
- } cyz;
- } u;
- int line;
- int flags; /* defined in tty.h */
- int type; /* UART type */
- int read_status_mask;
- int ignore_status_mask;
- int timeout;
- int xmit_fifo_size;
- int cor1,cor2,cor3,cor4,cor5;
- int tbpr,tco,rbpr,rco;
- int baud;
- int rflow;
- int rtsdtr_inv;
- int chip_rev;
- int custom_divisor;
- u8 x_char; /* to be pushed out ASAP */
- int breakon;
- int breakoff;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
- int default_threshold;
- int default_timeout;
- unsigned long rflush_count;
- struct cyclades_monitor mon;
- struct cyclades_idle_stats idle_stats;
- struct cyclades_icount icount;
- struct completion shutdown_wait;
- int throttle;
-#ifdef CONFIG_CYZ_INTR
- struct timer_list rx_full_timer;
-#endif
-};
-
-#define CLOSING_WAIT_DELAY 30*HZ
-#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE
-#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF
-
-
-#define CyMAX_CHIPS_PER_CARD 8
-#define CyMAX_CHAR_FIFO 12
-#define CyPORTS_PER_CHIP 4
-#define CD1400_MAX_SPEED 115200
-
-#define CyISA_Ywin 0x2000
-
-#define CyPCI_Ywin 0x4000
-#define CyPCI_Yctl 0x80
-#define CyPCI_Zctl CTRL_WINDOW_SIZE
-#define CyPCI_Zwin 0x80000
-#define CyPCI_Ze_win (2 * CyPCI_Zwin)
-
-#define PCI_DEVICE_ID_MASK 0x06
-
-/**** CD1400 registers ****/
-
-#define CD1400_REV_G 0x46
-#define CD1400_REV_J 0x48
-
-#define CyRegSize 0x0400
-#define Cy_HwReset 0x1400
-#define Cy_ClrIntr 0x1800
-#define Cy_EpldRev 0x1e00
-
-/* Global Registers */
-
-#define CyGFRCR (0x40*2)
-#define CyRevE (44)
-#define CyCAR (0x68*2)
-#define CyCHAN_0 (0x00)
-#define CyCHAN_1 (0x01)
-#define CyCHAN_2 (0x02)
-#define CyCHAN_3 (0x03)
-#define CyGCR (0x4B*2)
-#define CyCH0_SERIAL (0x00)
-#define CyCH0_PARALLEL (0x80)
-#define CySVRR (0x67*2)
-#define CySRModem (0x04)
-#define CySRTransmit (0x02)
-#define CySRReceive (0x01)
-#define CyRICR (0x44*2)
-#define CyTICR (0x45*2)
-#define CyMICR (0x46*2)
-#define CyICR0 (0x00)
-#define CyICR1 (0x01)
-#define CyICR2 (0x02)
-#define CyICR3 (0x03)
-#define CyRIR (0x6B*2)
-#define CyTIR (0x6A*2)
-#define CyMIR (0x69*2)
-#define CyIRDirEq (0x80)
-#define CyIRBusy (0x40)
-#define CyIRUnfair (0x20)
-#define CyIRContext (0x1C)
-#define CyIRChannel (0x03)
-#define CyPPR (0x7E*2)
-#define CyCLOCK_20_1MS (0x27)
-#define CyCLOCK_25_1MS (0x31)
-#define CyCLOCK_25_5MS (0xf4)
-#define CyCLOCK_60_1MS (0x75)
-#define CyCLOCK_60_2MS (0xea)
-
-/* Virtual Registers */
-
-#define CyRIVR (0x43*2)
-#define CyTIVR (0x42*2)
-#define CyMIVR (0x41*2)
-#define CyIVRMask (0x07)
-#define CyIVRRxEx (0x07)
-#define CyIVRRxOK (0x03)
-#define CyIVRTxOK (0x02)
-#define CyIVRMdmOK (0x01)
-#define CyTDR (0x63*2)
-#define CyRDSR (0x62*2)
-#define CyTIMEOUT (0x80)
-#define CySPECHAR (0x70)
-#define CyBREAK (0x08)
-#define CyPARITY (0x04)
-#define CyFRAME (0x02)
-#define CyOVERRUN (0x01)
-#define CyMISR (0x4C*2)
-/* see CyMCOR_ and CyMSVR_ for bits*/
-#define CyEOSRR (0x60*2)
-
-/* Channel Registers */
-
-#define CyLIVR (0x18*2)
-#define CyMscsr (0x01)
-#define CyTdsr (0x02)
-#define CyRgdsr (0x03)
-#define CyRedsr (0x07)
-#define CyCCR (0x05*2)
-/* Format 1 */
-#define CyCHAN_RESET (0x80)
-#define CyCHIP_RESET (0x81)
-#define CyFlushTransFIFO (0x82)
-/* Format 2 */
-#define CyCOR_CHANGE (0x40)
-#define CyCOR1ch (0x02)
-#define CyCOR2ch (0x04)
-#define CyCOR3ch (0x08)
-/* Format 3 */
-#define CySEND_SPEC_1 (0x21)
-#define CySEND_SPEC_2 (0x22)
-#define CySEND_SPEC_3 (0x23)
-#define CySEND_SPEC_4 (0x24)
-/* Format 4 */
-#define CyCHAN_CTL (0x10)
-#define CyDIS_RCVR (0x01)
-#define CyENB_RCVR (0x02)
-#define CyDIS_XMTR (0x04)
-#define CyENB_XMTR (0x08)
-#define CySRER (0x06*2)
-#define CyMdmCh (0x80)
-#define CyRxData (0x10)
-#define CyTxRdy (0x04)
-#define CyTxMpty (0x02)
-#define CyNNDT (0x01)
-#define CyCOR1 (0x08*2)
-#define CyPARITY_NONE (0x00)
-#define CyPARITY_0 (0x20)
-#define CyPARITY_1 (0xA0)
-#define CyPARITY_E (0x40)
-#define CyPARITY_O (0xC0)
-#define Cy_1_STOP (0x00)
-#define Cy_1_5_STOP (0x04)
-#define Cy_2_STOP (0x08)
-#define Cy_5_BITS (0x00)
-#define Cy_6_BITS (0x01)
-#define Cy_7_BITS (0x02)
-#define Cy_8_BITS (0x03)
-#define CyCOR2 (0x09*2)
-#define CyIXM (0x80)
-#define CyTxIBE (0x40)
-#define CyETC (0x20)
-#define CyAUTO_TXFL (0x60)
-#define CyLLM (0x10)
-#define CyRLM (0x08)
-#define CyRtsAO (0x04)
-#define CyCtsAE (0x02)
-#define CyDsrAE (0x01)
-#define CyCOR3 (0x0A*2)
-#define CySPL_CH_DRANGE (0x80) /* special character detect range */
-#define CySPL_CH_DET1 (0x40) /* enable special character detection
- on SCHR4-SCHR3 */
-#define CyFL_CTRL_TRNSP (0x20) /* Flow Control Transparency */
-#define CySPL_CH_DET2 (0x10) /* Enable special character detection
- on SCHR2-SCHR1 */
-#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */
-#define CyCOR4 (0x1E*2)
-#define CyCOR5 (0x1F*2)
-#define CyCCSR (0x0B*2)
-#define CyRxEN (0x80)
-#define CyRxFloff (0x40)
-#define CyRxFlon (0x20)
-#define CyTxEN (0x08)
-#define CyTxFloff (0x04)
-#define CyTxFlon (0x02)
-#define CyRDCR (0x0E*2)
-#define CySCHR1 (0x1A*2)
-#define CySCHR2 (0x1B*2)
-#define CySCHR3 (0x1C*2)
-#define CySCHR4 (0x1D*2)
-#define CySCRL (0x22*2)
-#define CySCRH (0x23*2)
-#define CyLNC (0x24*2)
-#define CyMCOR1 (0x15*2)
-#define CyMCOR2 (0x16*2)
-#define CyRTPR (0x21*2)
-#define CyMSVR1 (0x6C*2)
-#define CyMSVR2 (0x6D*2)
-#define CyANY_DELTA (0xF0)
-#define CyDSR (0x80)
-#define CyCTS (0x40)
-#define CyRI (0x20)
-#define CyDCD (0x10)
-#define CyDTR (0x02)
-#define CyRTS (0x01)
-#define CyPVSR (0x6F*2)
-#define CyRBPR (0x78*2)
-#define CyRCOR (0x7C*2)
-#define CyTBPR (0x72*2)
-#define CyTCOR (0x76*2)
-
-/* Custom Registers */
-
-#define CyPLX_VER (0x3400)
-#define PLX_9050 0x0b
-#define PLX_9060 0x0c
-#define PLX_9080 0x0d
-
-/***************************************************************************/
-
-#endif /* _LINUX_CYCLADES_H */
diff --git a/include/linux/damon.h b/include/linux/damon.h
new file mode 100644
index 000000000000..620ada094c3b
--- /dev/null
+++ b/include/linux/damon.h
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON api
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#ifndef _DAMON_H_
+#define _DAMON_H_
+
+#include <linux/mutex.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/random.h>
+
+/* Minimal region size. Every damon_region is aligned by this. */
+#define DAMON_MIN_REGION PAGE_SIZE
+/* Max priority score for DAMON-based operation schemes */
+#define DAMOS_MAX_SCORE (99)
+
+/* Get a random number in [l, r) */
+static inline unsigned long damon_rand(unsigned long l, unsigned long r)
+{
+ return l + prandom_u32_max(r - l);
+}
+
+/**
+ * struct damon_addr_range - Represents an address region of [@start, @end).
+ * @start: Start address of the region (inclusive).
+ * @end: End address of the region (exclusive).
+ */
+struct damon_addr_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+/**
+ * struct damon_region - Represents a monitoring target region.
+ * @ar: The address range of the region.
+ * @sampling_addr: Address of the sample for the next access check.
+ * @nr_accesses: Access frequency of this region.
+ * @list: List head for siblings.
+ * @age: Age of this region.
+ *
+ * @age is initially zero, increased for each aggregation interval, and reset
+ * to zero again if the access frequency is significantly changed. If two
+ * regions are merged into a new region, both @nr_accesses and @age of the new
+ * region are set as region size-weighted average of those of the two regions.
+ */
+struct damon_region {
+ struct damon_addr_range ar;
+ unsigned long sampling_addr;
+ unsigned int nr_accesses;
+ struct list_head list;
+
+ unsigned int age;
+/* private: Internal value for age calculation. */
+ unsigned int last_nr_accesses;
+};
+
+/**
+ * struct damon_target - Represents a monitoring target.
+ * @pid: The PID of the virtual address space to monitor.
+ * @nr_regions: Number of monitoring target regions of this target.
+ * @regions_list: Head of the monitoring target regions of this target.
+ * @list: List head for siblings.
+ *
+ * Each monitoring context could have multiple targets. For example, a context
+ * for virtual memory address spaces could have multiple target processes. The
+ * @pid should be set for appropriate &struct damon_operations including the
+ * virtual address spaces monitoring operations.
+ */
+struct damon_target {
+ struct pid *pid;
+ unsigned int nr_regions;
+ struct list_head regions_list;
+ struct list_head list;
+};
+
+/**
+ * enum damos_action - Represents an action of a Data Access Monitoring-based
+ * Operation Scheme.
+ *
+ * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
+ * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
+ * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT.
+ * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
+ * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
+ * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists.
+ * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists.
+ * @DAMOS_STAT: Do nothing but count the stat.
+ * @NR_DAMOS_ACTIONS: Total number of DAMOS actions
+ */
+enum damos_action {
+ DAMOS_WILLNEED,
+ DAMOS_COLD,
+ DAMOS_PAGEOUT,
+ DAMOS_HUGEPAGE,
+ DAMOS_NOHUGEPAGE,
+ DAMOS_LRU_PRIO,
+ DAMOS_LRU_DEPRIO,
+ DAMOS_STAT, /* Do nothing but only record the stat */
+ NR_DAMOS_ACTIONS,
+};
+
+/**
+ * struct damos_quota - Controls the aggressiveness of the given scheme.
+ * @ms: Maximum milliseconds that the scheme can use.
+ * @sz: Maximum bytes of memory that the action can be applied.
+ * @reset_interval: Charge reset interval in milliseconds.
+ *
+ * @weight_sz: Weight of the region's size for prioritization.
+ * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
+ * @weight_age: Weight of the region's age for prioritization.
+ *
+ * To avoid consuming too much CPU time or IO resources for applying the
+ * &struct damos->action to large memory, DAMON allows users to set time and/or
+ * size quotas. The quotas can be set by writing non-zero values to &ms and
+ * &sz, respectively. If the time quota is set, DAMON tries to use only up to
+ * &ms milliseconds within &reset_interval for applying the action. If the
+ * size quota is set, DAMON tries to apply the action only up to &sz bytes
+ * within &reset_interval.
+ *
+ * Internally, the time quota is transformed to a size quota using estimated
+ * throughput of the scheme's action. DAMON then compares it against &sz and
+ * uses smaller one as the effective quota.
+ *
+ * For selecting regions within the quota, DAMON prioritizes current scheme's
+ * target memory regions using the &struct damon_operations->get_scheme_score.
+ * You could customize the prioritization logic by setting &weight_sz,
+ * &weight_nr_accesses, and &weight_age, because monitoring operations are
+ * encouraged to respect those.
+ */
+struct damos_quota {
+ unsigned long ms;
+ unsigned long sz;
+ unsigned long reset_interval;
+
+ unsigned int weight_sz;
+ unsigned int weight_nr_accesses;
+ unsigned int weight_age;
+
+/* private: */
+ /* For throughput estimation */
+ unsigned long total_charged_sz;
+ unsigned long total_charged_ns;
+
+ unsigned long esz; /* Effective size quota in bytes */
+
+ /* For charging the quota */
+ unsigned long charged_sz;
+ unsigned long charged_from;
+ struct damon_target *charge_target_from;
+ unsigned long charge_addr_from;
+
+ /* For prioritization */
+ unsigned long histogram[DAMOS_MAX_SCORE + 1];
+ unsigned int min_score;
+};
+
+/**
+ * enum damos_wmark_metric - Represents the watermark metric.
+ *
+ * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
+ * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
+ * @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics
+ */
+enum damos_wmark_metric {
+ DAMOS_WMARK_NONE,
+ DAMOS_WMARK_FREE_MEM_RATE,
+ NR_DAMOS_WMARK_METRICS,
+};
+
+/**
+ * struct damos_watermarks - Controls when a given scheme should be activated.
+ * @metric: Metric for the watermarks.
+ * @interval: Watermarks check time interval in microseconds.
+ * @high: High watermark.
+ * @mid: Middle watermark.
+ * @low: Low watermark.
+ *
+ * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active
+ * means DAMON does monitoring and applying the action of the scheme to
+ * appropriate memory regions. Else, DAMON checks &metric of the system for at
+ * least every &interval microseconds and works as below.
+ *
+ * If &metric is higher than &high, the scheme is inactivated. If &metric is
+ * between &mid and &low, the scheme is activated. If &metric is lower than
+ * &low, the scheme is inactivated.
+ */
+struct damos_watermarks {
+ enum damos_wmark_metric metric;
+ unsigned long interval;
+ unsigned long high;
+ unsigned long mid;
+ unsigned long low;
+
+/* private: */
+ bool activated;
+};
+
+/**
+ * struct damos_stat - Statistics on a given scheme.
+ * @nr_tried: Total number of regions that the scheme is tried to be applied.
+ * @sz_tried: Total size of regions that the scheme is tried to be applied.
+ * @nr_applied: Total number of regions that the scheme is applied.
+ * @sz_applied: Total size of regions that the scheme is applied.
+ * @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ */
+struct damos_stat {
+ unsigned long nr_tried;
+ unsigned long sz_tried;
+ unsigned long nr_applied;
+ unsigned long sz_applied;
+ unsigned long qt_exceeds;
+};
+
+/**
+ * struct damos_access_pattern - Target access pattern of the given scheme.
+ * @min_sz_region: Minimum size of target regions.
+ * @max_sz_region: Maximum size of target regions.
+ * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
+ * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
+ * @min_age_region: Minimum age of target regions.
+ * @max_age_region: Maximum age of target regions.
+ */
+struct damos_access_pattern {
+ unsigned long min_sz_region;
+ unsigned long max_sz_region;
+ unsigned int min_nr_accesses;
+ unsigned int max_nr_accesses;
+ unsigned int min_age_region;
+ unsigned int max_age_region;
+};
+
+/**
+ * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * @pattern: Access pattern of target regions.
+ * @action: &damo_action to be applied to the target regions.
+ * @quota: Control the aggressiveness of this scheme.
+ * @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @stat: Statistics of this scheme.
+ * @list: List head for siblings.
+ *
+ * For each aggregation interval, DAMON finds regions which fit in the
+ * &pattern and applies &action to those. To avoid consuming too much
+ * CPU time or IO resources for the &action, &quota is used.
+ *
+ * To do the work only when needed, schemes can be activated for specific
+ * system situations using &wmarks. If all schemes that registered to the
+ * monitoring context are inactive, DAMON stops monitoring either, and just
+ * repeatedly checks the watermarks.
+ *
+ * If all schemes that registered to a &struct damon_ctx are inactive, DAMON
+ * stops monitoring and just repeatedly checks the watermarks.
+ *
+ * After applying the &action to each region, &stat_count and &stat_sz is
+ * updated to reflect the number of regions and total size of regions that the
+ * &action is applied.
+ */
+struct damos {
+ struct damos_access_pattern pattern;
+ enum damos_action action;
+ struct damos_quota quota;
+ struct damos_watermarks wmarks;
+ struct damos_stat stat;
+ struct list_head list;
+};
+
+/**
+ * enum damon_ops_id - Identifier for each monitoring operations implementation
+ *
+ * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces
+ * @DAMON_OPS_FVADDR: Monitoring operations for only fixed ranges of virtual
+ * address spaces
+ * @DAMON_OPS_PADDR: Monitoring operations for the physical address space
+ * @NR_DAMON_OPS: Number of monitoring operations implementations
+ */
+enum damon_ops_id {
+ DAMON_OPS_VADDR,
+ DAMON_OPS_FVADDR,
+ DAMON_OPS_PADDR,
+ NR_DAMON_OPS,
+};
+
+struct damon_ctx;
+
+/**
+ * struct damon_operations - Monitoring operations for given use cases.
+ *
+ * @id: Identifier of this operations set.
+ * @init: Initialize operations-related data structures.
+ * @update: Update operations-related data structures.
+ * @prepare_access_checks: Prepare next access check of target regions.
+ * @check_accesses: Check the accesses to target regions.
+ * @reset_aggregated: Reset aggregated accesses monitoring results.
+ * @get_scheme_score: Get the score of a region for a scheme.
+ * @apply_scheme: Apply a DAMON-based operation scheme.
+ * @target_valid: Determine if the target is valid.
+ * @cleanup: Clean up the context.
+ *
+ * DAMON can be extended for various address spaces and usages. For this,
+ * users should register the low level operations for their target address
+ * space and usecase via the &damon_ctx.ops. Then, the monitoring thread
+ * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
+ * the monitoring, @update after each &damon_ctx.ops_update_interval, and
+ * @check_accesses, @target_valid and @prepare_access_checks after each
+ * &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each
+ * &damon_ctx.aggr_interval.
+ *
+ * Each &struct damon_operations instance having valid @id can be registered
+ * via damon_register_ops() and selected by damon_select_ops() later.
+ * @init should initialize operations-related data structures. For example,
+ * this could be used to construct proper monitoring target regions and link
+ * those to @damon_ctx.adaptive_targets.
+ * @update should update the operations-related data structures. For example,
+ * this could be used to update monitoring target regions for current status.
+ * @prepare_access_checks should manipulate the monitoring regions to be
+ * prepared for the next access check.
+ * @check_accesses should check the accesses to each region that made after the
+ * last preparation and update the number of observed accesses of each region.
+ * It should also return max number of observed accesses that made as a result
+ * of its update. The value will be used for regions adjustment threshold.
+ * @reset_aggregated should reset the access monitoring results that aggregated
+ * by @check_accesses.
+ * @get_scheme_score should return the priority score of a region for a scheme
+ * as an integer in [0, &DAMOS_MAX_SCORE].
+ * @apply_scheme is called from @kdamond when a region for user provided
+ * DAMON-based operation scheme is found. It should apply the scheme's action
+ * to the region and return bytes of the region that the action is successfully
+ * applied.
+ * @target_valid should check whether the target is still valid for the
+ * monitoring.
+ * @cleanup is called from @kdamond just before its termination.
+ */
+struct damon_operations {
+ enum damon_ops_id id;
+ void (*init)(struct damon_ctx *context);
+ void (*update)(struct damon_ctx *context);
+ void (*prepare_access_checks)(struct damon_ctx *context);
+ unsigned int (*check_accesses)(struct damon_ctx *context);
+ void (*reset_aggregated)(struct damon_ctx *context);
+ int (*get_scheme_score)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ unsigned long (*apply_scheme)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ bool (*target_valid)(struct damon_target *t);
+ void (*cleanup)(struct damon_ctx *context);
+};
+
+/**
+ * struct damon_callback - Monitoring events notification callbacks.
+ *
+ * @before_start: Called before starting the monitoring.
+ * @after_wmarks_check: Called after each schemes' watermarks check.
+ * @after_sampling: Called after each sampling.
+ * @after_aggregation: Called after each aggregation.
+ * @before_terminate: Called before terminating the monitoring.
+ * @private: User private data.
+ *
+ * The monitoring thread (&damon_ctx.kdamond) calls @before_start and
+ * @before_terminate just before starting and finishing the monitoring,
+ * respectively. Therefore, those are good places for installing and cleaning
+ * @private.
+ *
+ * The monitoring thread calls @after_wmarks_check after each DAMON-based
+ * operation schemes' watermarks check. If users need to make changes to the
+ * attributes of the monitoring context while it's deactivated due to the
+ * watermarks, this is the good place to do.
+ *
+ * The monitoring thread calls @after_sampling and @after_aggregation for each
+ * of the sampling intervals and aggregation intervals, respectively.
+ * Therefore, users can safely access the monitoring results without additional
+ * protection. For the reason, users are recommended to use these callback for
+ * the accesses to the results.
+ *
+ * If any callback returns non-zero, monitoring stops.
+ */
+struct damon_callback {
+ void *private;
+
+ int (*before_start)(struct damon_ctx *context);
+ int (*after_wmarks_check)(struct damon_ctx *context);
+ int (*after_sampling)(struct damon_ctx *context);
+ int (*after_aggregation)(struct damon_ctx *context);
+ void (*before_terminate)(struct damon_ctx *context);
+};
+
+/**
+ * struct damon_attrs - Monitoring attributes for accuracy/overhead control.
+ *
+ * @sample_interval: The time between access samplings.
+ * @aggr_interval: The time between monitor results aggregations.
+ * @ops_update_interval: The time between monitoring operations updates.
+ * @min_nr_regions: The minimum number of adaptive monitoring
+ * regions.
+ * @max_nr_regions: The maximum number of adaptive monitoring
+ * regions.
+ *
+ * For each @sample_interval, DAMON checks whether each region is accessed or
+ * not. It aggregates and keeps the access information (number of accesses to
+ * each region) for @aggr_interval time. DAMON also checks whether the target
+ * memory regions need update (e.g., by ``mmap()`` calls from the application,
+ * in case of virtual memory monitoring) and applies the changes for each
+ * @ops_update_interval. All time intervals are in micro-seconds.
+ * Please refer to &struct damon_operations and &struct damon_callback for more
+ * detail.
+ */
+struct damon_attrs {
+ unsigned long sample_interval;
+ unsigned long aggr_interval;
+ unsigned long ops_update_interval;
+ unsigned long min_nr_regions;
+ unsigned long max_nr_regions;
+};
+
+/**
+ * struct damon_ctx - Represents a context for each monitoring. This is the
+ * main interface that allows users to set the attributes and get the results
+ * of the monitoring.
+ *
+ * @attrs: Monitoring attributes for accuracy/overhead control.
+ * @kdamond: Kernel thread who does the monitoring.
+ * @kdamond_lock: Mutex for the synchronizations with @kdamond.
+ *
+ * For each monitoring context, one kernel thread for the monitoring is
+ * created. The pointer to the thread is stored in @kdamond.
+ *
+ * Once started, the monitoring thread runs until explicitly required to be
+ * terminated or every monitoring target is invalid. The validity of the
+ * targets is checked via the &damon_operations.target_valid of @ops. The
+ * termination can also be explicitly requested by calling damon_stop().
+ * The thread sets @kdamond to NULL when it terminates. Therefore, users can
+ * know whether the monitoring is ongoing or terminated by reading @kdamond.
+ * Reads and writes to @kdamond from outside of the monitoring thread must
+ * be protected by @kdamond_lock.
+ *
+ * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
+ * Accesses to other fields must be protected by themselves.
+ *
+ * @ops: Set of monitoring operations for given use cases.
+ * @callback: Set of callbacks for monitoring events notifications.
+ *
+ * @adaptive_targets: Head of monitoring targets (&damon_target) list.
+ * @schemes: Head of schemes (&damos) list.
+ */
+struct damon_ctx {
+ struct damon_attrs attrs;
+
+/* private: internal use only */
+ struct timespec64 last_aggregation;
+ struct timespec64 last_ops_update;
+
+/* public: */
+ struct task_struct *kdamond;
+ struct mutex kdamond_lock;
+
+ struct damon_operations ops;
+ struct damon_callback callback;
+
+ struct list_head adaptive_targets;
+ struct list_head schemes;
+};
+
+static inline struct damon_region *damon_next_region(struct damon_region *r)
+{
+ return container_of(r->list.next, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_prev_region(struct damon_region *r)
+{
+ return container_of(r->list.prev, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_last_region(struct damon_target *t)
+{
+ return list_last_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_first_region(struct damon_target *t)
+{
+ return list_first_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline unsigned long damon_sz_region(struct damon_region *r)
+{
+ return r->ar.end - r->ar.start;
+}
+
+
+#define damon_for_each_region(r, t) \
+ list_for_each_entry(r, &t->regions_list, list)
+
+#define damon_for_each_region_from(r, t) \
+ list_for_each_entry_from(r, &t->regions_list, list)
+
+#define damon_for_each_region_safe(r, next, t) \
+ list_for_each_entry_safe(r, next, &t->regions_list, list)
+
+#define damon_for_each_target(t, ctx) \
+ list_for_each_entry(t, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_target_safe(t, next, ctx) \
+ list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_scheme(s, ctx) \
+ list_for_each_entry(s, &(ctx)->schemes, list)
+
+#define damon_for_each_scheme_safe(s, next, ctx) \
+ list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
+
+#ifdef CONFIG_DAMON
+
+struct damon_region *damon_new_region(unsigned long start, unsigned long end);
+
+/*
+ * Add a region between two other regions
+ */
+static inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next,
+ struct damon_target *t)
+{
+ __list_add(&r->list, &prev->list, &next->list);
+ t->nr_regions++;
+}
+
+void damon_add_region(struct damon_region *r, struct damon_target *t);
+void damon_destroy_region(struct damon_region *r, struct damon_target *t);
+int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
+ unsigned int nr_ranges);
+
+struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+ enum damos_action action, struct damos_quota *quota,
+ struct damos_watermarks *wmarks);
+void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
+void damon_destroy_scheme(struct damos *s);
+
+struct damon_target *damon_new_target(void);
+void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
+bool damon_targets_empty(struct damon_ctx *ctx);
+void damon_free_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t);
+unsigned int damon_nr_regions(struct damon_target *t);
+
+struct damon_ctx *damon_new_ctx(void);
+void damon_destroy_ctx(struct damon_ctx *ctx);
+int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
+void damon_set_schemes(struct damon_ctx *ctx,
+ struct damos **schemes, ssize_t nr_schemes);
+int damon_nr_running_ctxs(void);
+bool damon_is_registered_ops(enum damon_ops_id id);
+int damon_register_ops(struct damon_operations *ops);
+int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
+
+static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+{
+ return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+}
+
+
+int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+
+int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end);
+
+#endif /* CONFIG_DAMON */
+
+#endif /* _DAMON_H */
diff --git a/include/linux/dasd_mod.h b/include/linux/dasd_mod.h
new file mode 100644
index 000000000000..14e6cf8c6267
--- /dev/null
+++ b/include/linux/dasd_mod.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DASD_MOD_H
+#define DASD_MOD_H
+
+#include <asm/dasd.h>
+
+struct gendisk;
+
+extern int dasd_biodasdinfo(struct gendisk *disk, dasd_information2_t *info);
+
+#endif
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 328c2dbb4409..ba985333e26b 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -5,15 +5,20 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/radix-tree.h>
-#include <asm/pgtable.h>
-
-/* Flag for synchronous flush */
-#define DAXDEV_F_SYNC (1UL << 0)
typedef unsigned long dax_entry_t;
-struct iomap_ops;
struct dax_device;
+struct gendisk;
+struct iomap_ops;
+struct iomap_iter;
+struct iomap;
+
+enum dax_access_mode {
+ DAX_ACCESS,
+ DAX_RECOVERY_WRITE,
+};
+
struct dax_operations {
/*
* direct_access: translate a device-relative
@@ -21,41 +26,46 @@ struct dax_operations {
* number of pages available for DAX at that pfn.
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
- void **, pfn_t *);
+ enum dax_access_mode, void **, pfn_t *);
/*
* Validate whether this device is usable as an fsdax backing
* device.
*/
bool (*dax_supported)(struct dax_device *, struct block_device *, int,
sector_t, sector_t);
- /* copy_from_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
- /* copy_to_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
+ /* zero_page_range: required operation. Zero page range */
+ int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
+ /*
+ * recovery_write: recover a poisoned range by DAX device driver
+ * capable of clearing poison.
+ */
+ size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *iter);
};
-extern struct attribute_group dax_attribute_group;
+struct dax_holder_operations {
+ /*
+ * notify_failure - notify memory failure into inner holder device
+ * @dax_dev: the dax device which contains the holder
+ * @offset: offset on this dax device where memory failure occurs
+ * @len: length of this memory failure event
+ * @flags: action flags for memory failure handler
+ */
+ int (*notify_failure)(struct dax_device *dax_dev, u64 offset,
+ u64 len, int mf_flags);
+};
#if IS_ENABLED(CONFIG_DAX)
-struct dax_device *dax_get_by_host(const char *host);
-struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops, unsigned long flags);
+struct dax_device *alloc_dax(void *private, const struct dax_operations *ops);
+void *dax_holder(struct dax_device *dax_dev);
void put_dax(struct dax_device *dax_dev);
void kill_dax(struct dax_device *dax_dev);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
-bool __dax_synchronous(struct dax_device *dax_dev);
-static inline bool dax_synchronous(struct dax_device *dax_dev)
-{
- return __dax_synchronous(dax_dev);
-}
-void __set_dax_synchronous(struct dax_device *dax_dev);
-static inline void set_dax_synchronous(struct dax_device *dax_dev)
-{
- __set_dax_synchronous(dax_dev);
-}
+bool dax_synchronous(struct dax_device *dax_dev);
+void set_dax_synchronous(struct dax_device *dax_dev);
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
/*
* Check if given mapping is supported by the file / underlying device.
*/
@@ -69,12 +79,12 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
return dax_synchronous(dax_dev);
}
#else
-static inline struct dax_device *dax_get_by_host(const char *host)
+static inline void *dax_holder(struct dax_device *dax_dev)
{
return NULL;
}
-static inline struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops, unsigned long flags)
+static inline struct dax_device *alloc_dax(void *private,
+ const struct dax_operations *ops)
{
/*
* Callers should check IS_ENABLED(CONFIG_DAX) to know if this
@@ -107,64 +117,61 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
{
return !(vma->vm_flags & VM_SYNC);
}
+static inline size_t dax_recovery_write(struct dax_device *dax_dev,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+ return 0;
+}
#endif
+void set_dax_nocache(struct dax_device *dax_dev);
+void set_dax_nomc(struct dax_device *dax_dev);
+
struct writeback_control;
-int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
-#if IS_ENABLED(CONFIG_FS_DAX)
-bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
-static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
+#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
+int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
+void dax_remove_host(struct gendisk *disk);
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops);
+void fs_put_dax(struct dax_device *dax_dev, void *holder);
+#else
+static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
{
- return __bdev_dax_supported(bdev, blocksize);
+ return 0;
}
-
-bool __generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors);
-static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
+static inline void dax_remove_host(struct gendisk *disk)
{
- return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
- sectors);
}
-
-static inline void fs_put_dax(struct dax_device *dax_dev)
+static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
+ u64 *start_off, void *holder,
+ const struct dax_holder_operations *ops)
{
- put_dax(dax_dev);
+ return NULL;
}
+static inline void fs_put_dax(struct dax_device *dax_dev, void *holder)
+{
+}
+#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
+#if IS_ENABLED(CONFIG_FS_DAX)
int dax_writeback_mapping_range(struct address_space *mapping,
struct dax_device *dax_dev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
+struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page);
+void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie);
#else
-static inline bool bdev_dax_supported(struct block_device *bdev,
- int blocksize)
-{
- return false;
-}
-
-static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
-{
- return false;
-}
-
-static inline void fs_put_dax(struct dax_device *dax_dev)
-{
-}
-
-static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
+static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
return NULL;
}
-static inline struct page *dax_layout_busy_page(struct address_space *mapping)
+static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
{
return NULL;
}
@@ -185,20 +192,49 @@ static inline dax_entry_t dax_lock_page(struct page *page)
static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
}
+
+static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page)
+{
+ return 0;
+}
+
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie)
+{
+}
#endif
+int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ const struct iomap_ops *ops);
+int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ const struct iomap_ops *ops);
+
+#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
+#else
+static inline int dax_read_lock(void)
+{
+ return 0;
+}
+
+static inline void dax_read_unlock(int id)
+{
+}
+#endif /* CONFIG_DAX */
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- void **kaddr, pfn_t *pfn);
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len);
+ enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
+int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len,
+ int mf_flags);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
@@ -210,23 +246,25 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
-
-#ifdef CONFIG_FS_DAX
-int __dax_zero_page_range(struct block_device *bdev,
- struct dax_device *dax_dev, sector_t sector,
- unsigned int offset, unsigned int length);
-#else
-static inline int __dax_zero_page_range(struct block_device *bdev,
- struct dax_device *dax_dev, sector_t sector,
- unsigned int offset, unsigned int length)
+int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dest, loff_t destoff,
+ loff_t len, bool *is_same,
+ const struct iomap_ops *ops);
+int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *ops);
+static inline bool dax_mapping(struct address_space *mapping)
{
- return -ENXIO;
+ return mapping->host && IS_DAX(mapping->host);
}
-#endif
-static inline bool dax_mapping(struct address_space *mapping)
+#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
+void hmem_register_device(int target_nid, struct resource *r);
+#else
+static inline void hmem_register_device(int target_nid, struct resource *r)
{
- return mapping->host && IS_DAX(mapping->host);
}
+#endif
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c1488cc84fd9..6b351e009f59 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/list.h>
+#include <linux/math.h>
#include <linux/rculist.h>
#include <linux/rculist_bl.h>
#include <linux/spinlock.h>
@@ -15,6 +16,7 @@
#include <linux/wait.h>
struct path;
+struct file;
struct vfsmount;
/*
@@ -58,16 +60,7 @@ struct qstr {
extern const struct qstr empty_name;
extern const struct qstr slash_name;
-
-struct dentry_stat_t {
- long nr_dentry;
- long nr_unused;
- long age_limit; /* age in seconds */
- long want_pages; /* pages requested by system */
- long nr_negative; /* # of unused negative dentries */
- long dummy; /* Reserved for future use */
-};
-extern struct dentry_stat_t dentry_stat;
+extern const struct qstr dotdot_name;
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
@@ -89,7 +82,7 @@ extern struct dentry_stat_t dentry_stat;
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
- seqcount_t d_seq; /* per dentry seqlock */
+ seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
struct qstr d_name;
@@ -177,6 +170,8 @@ struct dentry_operations {
#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
+#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */
+
#define DCACHE_CANT_MOUNT 0x00000100
#define DCACHE_GENOCIDE 0x00000200
#define DCACHE_SHRINK_LIST 0x00000400
@@ -211,7 +206,7 @@ struct dentry_operations {
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_ENCRYPTED_NAME 0x02000000 /* Encrypted name (dir key was unavailable) */
+#define DCACHE_NOKEY_NAME 0x02000000 /* Encrypted name encoded without key */
#define DCACHE_OP_REAL 0x04000000
#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
@@ -239,6 +234,8 @@ extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
+ const struct qstr *name);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
@@ -254,11 +251,13 @@ extern struct dentry * d_make_root(struct inode *);
/* <clickety>-<click> the ramfs-type tree */
extern void d_genocide(struct dentry *);
-extern void d_tmpfile(struct dentry *, struct inode *);
+extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
+extern struct dentry *d_find_alias_rcu(struct inode *);
+
/* test whether we have any submounts in a subdir tree */
extern int path_has_submounts(const struct path *);
@@ -289,14 +288,14 @@ static inline unsigned d_count(const struct dentry *dentry)
/*
* helper function for dentry_operations.d_dname() members
*/
-extern __printf(4, 5)
-char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern __printf(3, 4)
+char *dynamic_dname(char *, int, const char *, ...);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
-extern char *dentry_path_raw(struct dentry *, char *, int);
-extern char *dentry_path(struct dentry *, char *, int);
+extern char *dentry_path_raw(const struct dentry *, char *, int);
+extern char *dentry_path(const struct dentry *, char *, int);
/* Allocation counts.. */
@@ -353,7 +352,7 @@ static inline void dont_mount(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
-extern void __d_lookup_done(struct dentry *);
+extern void __d_lookup_unhash_wake(struct dentry *dentry);
static inline int d_in_lookup(const struct dentry *dentry)
{
@@ -362,11 +361,8 @@ static inline int d_in_lookup(const struct dentry *dentry)
static inline void d_lookup_done(struct dentry *dentry)
{
- if (unlikely(d_in_lookup(dentry))) {
- spin_lock(&dentry->d_lock);
- __d_lookup_done(dentry);
- spin_unlock(&dentry->d_lock);
- }
+ if (unlikely(d_in_lookup(dentry)))
+ __d_lookup_unhash_wake(dentry);
}
extern void dput(struct dentry *);
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 6b64b6cc2175..07e547c02fd8 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -198,7 +198,7 @@ enum dccp_role {
struct dccp_service_list {
__u32 dccpsl_nr;
- __be32 dccpsl_list[0];
+ __be32 dccpsl_list[];
};
#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
deleted file mode 100644
index ddfdac20cad0..000000000000
--- a/include/linux/dcookies.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * dcookies.h
- *
- * Persistent cookie-path mappings
- *
- * Copyright 2002 John Levon <levon@movementarian.org>
- */
-
-#ifndef DCOOKIES_H
-#define DCOOKIES_H
-
-
-#ifdef CONFIG_PROFILING
-
-#include <linux/dcache.h>
-#include <linux/types.h>
-
-struct dcookie_user;
-struct path;
-
-/**
- * dcookie_register - register a user of dcookies
- *
- * Register as a dcookie user. Returns %NULL on failure.
- */
-struct dcookie_user * dcookie_register(void);
-
-/**
- * dcookie_unregister - unregister a user of dcookies
- *
- * Unregister as a dcookie user. This may invalidate
- * any dcookie values returned from get_dcookie().
- */
-void dcookie_unregister(struct dcookie_user * user);
-
-/**
- * get_dcookie - acquire a dcookie
- *
- * Convert the given dentry/vfsmount pair into
- * a cookie value.
- *
- * Returns -EINVAL if no living task has registered as a
- * dcookie user.
- *
- * Returns 0 on success, with *cookie filled in
- */
-int get_dcookie(const struct path *path, unsigned long *cookie);
-
-#else
-
-static inline struct dcookie_user * dcookie_register(void)
-{
- return NULL;
-}
-
-static inline void dcookie_unregister(struct dcookie_user * user)
-{
- return;
-}
-
-static inline int get_dcookie(const struct path *path, unsigned long *cookie)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_PROFILING */
-
-#endif /* DCOOKIES_H */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 257ab3c92cb8..dbb409d77d4f 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -2,9 +2,8 @@
#ifndef __LINUX_DEBUG_LOCKING_H
#define __LINUX_DEBUG_LOCKING_H
-#include <linux/kernel.h>
#include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/cache.h>
struct task_struct;
@@ -12,7 +11,7 @@ extern int debug_locks __read_mostly;
extern int debug_locks_silent __read_mostly;
-static inline int __debug_locks_off(void)
+static __always_inline int __debug_locks_off(void)
{
return xchg(&debug_locks, 0);
}
@@ -27,8 +26,10 @@ extern int debug_locks_off(void);
int __ret = 0; \
\
if (!oops_in_progress && unlikely(c)) { \
+ instrumentation_begin(); \
if (debug_locks_off() && !debug_locks_silent) \
WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
+ instrumentation_end(); \
__ret = 1; \
} \
__ret; \
@@ -46,8 +47,6 @@ extern int debug_locks_off(void);
# define locking_selftest() do { } while (0)
#endif
-struct task_struct;
-
#ifdef CONFIG_LOCKDEP
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 43efcc49f061..f60674692d36 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -35,6 +35,12 @@ struct debugfs_regset32 {
const struct debugfs_reg32 *regs;
int nregs;
void __iomem *base;
+ struct device *dev; /* Optional device for Runtime PM */
+};
+
+struct debugfs_u32_array {
+ u32 *array;
+ u32 n_elements;
};
extern struct dentry *arch_debugfs_dir;
@@ -67,10 +73,10 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
-struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops,
- loff_t file_size);
+void debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size);
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
@@ -85,6 +91,8 @@ struct dentry *debugfs_create_automount(const char *name,
void debugfs_remove(struct dentry *dentry);
#define debugfs_remove_recursive debugfs_remove
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
int debugfs_file_get(struct dentry *dentry);
@@ -102,12 +110,12 @@ void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
u8 *value);
void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
u16 *value);
-struct dentry *debugfs_create_u32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
+void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent,
+ u32 *value);
void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
u64 *value);
-struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value);
+void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent,
+ unsigned long *value);
void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
u8 *value);
void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
@@ -120,8 +128,10 @@ void debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent, size_t *value);
void debugfs_create_atomic_t(const char *name, umode_t mode,
struct dentry *parent, atomic_t *value);
-struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, bool *value);
+void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent,
+ bool *value);
+void debugfs_create_str(const char *name, umode_t mode,
+ struct dentry *parent, char **value);
struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
@@ -135,12 +145,12 @@ void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
void debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent, u32 *array, u32 elements);
+ struct dentry *parent,
+ struct debugfs_u32_array *array);
-struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
- struct dentry *parent,
- int (*read_fn)(struct seq_file *s,
- void *data));
+void debugfs_create_devm_seqfile(struct device *dev, const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s, void *data));
bool debugfs_initialized(void);
@@ -150,6 +160,9 @@ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos);
+ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+
#else
#include <linux/err.h>
@@ -181,13 +194,11 @@ static inline struct dentry *debugfs_create_file_unsafe(const char *name,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops,
- loff_t file_size)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size)
+{ }
static inline struct dentry *debugfs_create_dir(const char *name,
struct dentry *parent)
@@ -216,6 +227,10 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
+static inline void debugfs_lookup_and_remove(const char *name,
+ struct dentry *parent)
+{ }
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
static inline int debugfs_file_get(struct dentry *dentry)
@@ -251,23 +266,15 @@ static inline void debugfs_create_u8(const char *name, umode_t mode,
static inline void debugfs_create_u16(const char *name, umode_t mode,
struct dentry *parent, u16 *value) { }
-static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value) { }
static inline void debugfs_create_u64(const char *name, umode_t mode,
struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_ulong(const char *name,
- umode_t mode,
- struct dentry *parent,
- unsigned long *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value) { }
static inline void debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value) { }
@@ -290,12 +297,13 @@ static inline void debugfs_create_atomic_t(const char *name, umode_t mode,
atomic_t *value)
{ }
-static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent,
- bool *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent, bool *value) { }
+
+static inline void debugfs_create_str(const char *name, umode_t mode,
+ struct dentry *parent,
+ char **value)
+{ }
static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
@@ -321,18 +329,17 @@ static inline bool debugfs_initialized(void)
}
static inline void debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent, u32 *array,
- u32 elements)
+ struct dentry *parent,
+ struct debugfs_u32_array *array)
{
}
-static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
- const char *name,
- struct dentry *parent,
- int (*read_fn)(struct seq_file *s,
- void *data))
+static inline void debugfs_create_devm_seqfile(struct device *dev,
+ const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data))
{
- return ERR_PTR(-ENODEV);
}
static inline ssize_t debugfs_read_file_bool(struct file *file,
@@ -349,6 +356,13 @@ static inline ssize_t debugfs_write_file_bool(struct file *file,
return -ENODEV;
}
+static inline ssize_t debugfs_read_file_str(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
#endif
/**
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index afc416e5dcab..32444686b6ff 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -18,7 +18,7 @@ enum debug_obj_state {
struct debug_obj_descr;
/**
- * struct debug_obj - representaion of an tracked object
+ * struct debug_obj - representation of an tracked object
* @node: hlist node to link the object into the tracker list
* @state: tracked object state
* @astate: current active state
@@ -30,7 +30,7 @@ struct debug_obj {
enum debug_obj_state state;
unsigned int astate;
void *object;
- struct debug_obj_descr *descr;
+ const struct debug_obj_descr *descr;
};
/**
@@ -64,14 +64,14 @@ struct debug_obj_descr {
};
#ifdef CONFIG_DEBUG_OBJECTS
-extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_init (void *addr, const struct debug_obj_descr *descr);
extern void
-debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
-extern int debug_object_activate (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
-extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
+debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr);
+extern int debug_object_activate (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_destroy (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_free (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr);
/*
* Active state:
@@ -79,26 +79,26 @@ extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
* - Must return to 0 before deactivation.
*/
extern void
-debug_object_active_state(void *addr, struct debug_obj_descr *descr,
+debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next);
extern void debug_objects_early_init(void);
extern void debug_objects_mem_init(void);
#else
static inline void
-debug_object_init (void *addr, struct debug_obj_descr *descr) { }
+debug_object_init (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
+debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) { }
static inline int
-debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; }
+debug_object_activate (void *addr, const struct debug_obj_descr *descr) { return 0; }
static inline void
-debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
+debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
+debug_object_destroy (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_free (void *addr, struct debug_obj_descr *descr) { }
+debug_object_free (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
+debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) { }
static inline void debug_objects_early_init(void) { }
static inline void debug_objects_mem_init(void) { }
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 868e9eacd69e..9192986b1a73 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -25,13 +25,21 @@
#define STATIC_RW_DATA static
#endif
+/*
+ * When an architecture needs to share the malloc()/free() implementation
+ * between compilation units, it needs to have non-local visibility.
+ */
+#ifndef MALLOC_VISIBLE
+#define MALLOC_VISIBLE static
+#endif
+
/* A trivial malloc implementation, adapted from
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
*/
STATIC_RW_DATA unsigned long malloc_ptr;
STATIC_RW_DATA int malloc_count;
-static void *malloc(int size)
+MALLOC_VISIBLE void *malloc(int size)
{
void *p;
@@ -52,7 +60,7 @@ static void *malloc(int size)
return p;
}
-static void free(void *where)
+MALLOC_VISIBLE void free(void *where)
{
malloc_count--;
if (!malloc_count)
diff --git a/include/linux/decompress/unzstd.h b/include/linux/decompress/unzstd.h
new file mode 100644
index 000000000000..56d539ae880f
--- /dev/null
+++ b/include/linux/decompress/unzstd.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_DECOMPRESS_UNZSTD_H
+#define LINUX_DECOMPRESS_UNZSTD_H
+
+int unzstd(unsigned char *inbuf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *pos,
+ void (*error_fn)(char *x));
+#endif
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 8e6828094c1e..039e7e0c7378 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -16,10 +16,11 @@
* 3. CPU clock rate changes.
*
* Please see this thread:
- * http://lists.openwall.net/linux-kernel/2011/01/09/56
+ * https://lists.openwall.net/linux-kernel/2011/01/09/56
*/
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/sched.h>
extern unsigned long loops_per_jiffy;
@@ -58,11 +59,33 @@ void calibrate_delay(void);
void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+ unsigned int state);
+
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+static inline void usleep_idle_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_IDLE);
+}
static inline void ssleep(unsigned int seconds)
{
msleep(seconds * 1000);
}
+/* see Documentation/timers/timers-howto.rst for the thresholds */
+static inline void fsleep(unsigned long usecs)
+{
+ if (usecs <= 10)
+ udelay(usecs);
+ else if (usecs <= 20000)
+ usleep_range(usecs, 2 * usecs);
+ else
+ msleep(DIV_ROUND_UP(usecs, 1000));
+}
+
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 2d3bdcccf5eb..0da97dba9ef8 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -9,18 +9,9 @@
#include <uapi/linux/taskstats.h>
-/*
- * Per-task flags relevant to delay accounting
- * maintained privately to avoid exhausting similar flags in sched.h:PF_*
- * Used to set current->delays->flags
- */
-#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
-#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */
-
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
raw_spinlock_t lock;
- unsigned int flags; /* Private per-task flags */
/* For each stat XXX, add following, aligned appropriately
*
@@ -37,13 +28,13 @@ struct task_delay_info {
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
- u64 blkio_start; /* Shared by blkio, swapin */
+ u64 blkio_start;
u64 blkio_delay; /* wait for sync block io completion */
- u64 swapin_delay; /* wait for swapin block io completion */
+ u64 swapin_start;
+ u64 swapin_delay; /* wait for swapin */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
- u32 swapin_count; /* total count of the number of swapin block */
- /* io operations performed */
+ u32 swapin_count; /* total count of swapin */
u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
@@ -51,48 +42,45 @@ struct task_delay_info {
u64 thrashing_start;
u64 thrashing_delay; /* wait for thrashing page */
+ u64 compact_start;
+ u64 compact_delay; /* wait for memory compact */
+
+ u64 wpcopy_start;
+ u64 wpcopy_delay; /* wait for write-protect copy */
+
u32 freepages_count; /* total count of memory reclaim */
u32 thrashing_count; /* total count of thrash waits */
+ u32 compact_count; /* total count of memory compact */
+ u32 wpcopy_count; /* total count of write-protect copy */
};
#endif
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_TASK_DELAY_ACCT
+DECLARE_STATIC_KEY_FALSE(delayacct_key);
extern int delayacct_on; /* Delay accounting turned on/off */
extern struct kmem_cache *delayacct_cache;
extern void delayacct_init(void);
+
extern void __delayacct_tsk_init(struct task_struct *);
extern void __delayacct_tsk_exit(struct task_struct *);
extern void __delayacct_blkio_start(void);
extern void __delayacct_blkio_end(struct task_struct *);
-extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
+extern int delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
-extern void __delayacct_thrashing_start(void);
-extern void __delayacct_thrashing_end(void);
-
-static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
-{
- if (p->delays)
- return (p->delays->flags & DELAYACCT_PF_BLKIO);
- else
- return 0;
-}
-
-static inline void delayacct_set_flag(int flag)
-{
- if (current->delays)
- current->delays->flags |= flag;
-}
-
-static inline void delayacct_clear_flag(int flag)
-{
- if (current->delays)
- current->delays->flags &= ~flag;
-}
+extern void __delayacct_thrashing_start(bool *in_thrashing);
+extern void __delayacct_thrashing_end(bool *in_thrashing);
+extern void __delayacct_swapin_start(void);
+extern void __delayacct_swapin_end(void);
+extern void __delayacct_compact_start(void);
+extern void __delayacct_compact_end(void);
+extern void __delayacct_wpcopy_start(void);
+extern void __delayacct_wpcopy_end(void);
static inline void delayacct_tsk_init(struct task_struct *tsk)
{
@@ -114,24 +102,20 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
static inline void delayacct_blkio_start(void)
{
- delayacct_set_flag(DELAYACCT_PF_BLKIO);
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_blkio_start();
}
static inline void delayacct_blkio_end(struct task_struct *p)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (p->delays)
__delayacct_blkio_end(p);
- delayacct_clear_flag(DELAYACCT_PF_BLKIO);
-}
-
-static inline int delayacct_add_tsk(struct taskstats *d,
- struct task_struct *tsk)
-{
- if (!delayacct_on || !tsk->delays)
- return 0;
- return __delayacct_add_tsk(d, tsk);
}
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
@@ -143,33 +127,95 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
static inline void delayacct_freepages_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_start();
}
static inline void delayacct_freepages_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_end();
}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_start(in_thrashing);
+}
+
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_end(in_thrashing);
+}
+
+static inline void delayacct_swapin_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_start();
+}
+
+static inline void delayacct_swapin_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_end();
+}
+
+static inline void delayacct_compact_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_start();
+}
+
+static inline void delayacct_compact_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
- __delayacct_thrashing_start();
+ __delayacct_compact_end();
}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_wpcopy_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_wpcopy_start();
+}
+
+static inline void delayacct_wpcopy_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
- __delayacct_thrashing_end();
+ __delayacct_wpcopy_end();
}
#else
-static inline void delayacct_set_flag(int flag)
-{}
-static inline void delayacct_clear_flag(int flag)
-{}
static inline void delayacct_init(void)
{}
static inline void delayacct_tsk_init(struct task_struct *tsk)
@@ -191,9 +237,21 @@ static inline void delayacct_freepages_start(void)
{}
static inline void delayacct_freepages_end(void)
{}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{}
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{}
+static inline void delayacct_swapin_start(void)
+{}
+static inline void delayacct_swapin_end(void)
+{}
+static inline void delayacct_compact_start(void)
+{}
+static inline void delayacct_compact_end(void)
+{}
+static inline void delayacct_wpcopy_start(void)
{}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_wpcopy_end(void)
{}
#endif /* CONFIG_TASK_DELAY_ACCT */
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 5aad06b4ca7b..8904063d4c9f 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -21,6 +21,14 @@
struct device;
+#define PRINTK_INFO_SUBSYSTEM_LEN 16
+#define PRINTK_INFO_DEVICE_LEN 48
+
+struct dev_printk_info {
+ char subsystem[PRINTK_INFO_SUBSYSTEM_LEN];
+ char device[PRINTK_INFO_DEVICE_LEN];
+};
+
#ifdef CONFIG_PRINTK
__printf(3, 0) __cold
@@ -30,8 +38,8 @@ __printf(3, 4) __cold
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
__printf(3, 4) __cold
-void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...);
+void _dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...);
__printf(2, 3) __cold
void _dev_emerg(const struct device *dev, const char *fmt, ...);
__printf(2, 3) __cold
@@ -61,7 +69,7 @@ static inline void __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{}
static inline __printf(3, 4)
-void dev_printk(const char *level, const struct device *dev,
+void _dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
{}
@@ -90,26 +98,59 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
#endif
/*
+ * Need to take variadic arguments even though we don't use them, as dev_fmt()
+ * may only just have been expanded and may result in multiple arguments.
+ */
+#define dev_printk_index_emit(level, fmt, ...) \
+ printk_index_subsys_emit("%s %s: ", level, fmt)
+
+#define dev_printk_index_wrap(_p_func, level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _p_func(dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
+ * Some callsites directly call dev_printk rather than going through the
+ * dev_<level> infrastructure, so we need to emit here as well as inside those
+ * level-specific macros. Only one index entry will be produced, either way,
+ * since dev_printk's `fmt` isn't known at compile time if going through the
+ * dev_<level> macros.
+ *
+ * dev_fmt() isn't called for dev_printk when used directly, as it's used by
+ * the dev_<level> macros internally which already have dev_fmt() processed.
+ *
+ * We also can't use dev_printk_index_wrap directly, because we have a separate
+ * level to process.
+ */
+#define dev_printk(level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
* #defines for all the dev_<level> macros to prefix with whatever
* possible use of #define dev_fmt(fmt) ...
*/
-#define dev_emerg(dev, fmt, ...) \
- _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_crit(dev, fmt, ...) \
- _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_alert(dev, fmt, ...) \
- _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_err(dev, fmt, ...) \
- _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_warn(dev, fmt, ...) \
- _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_notice(dev, fmt, ...) \
- _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_info(dev, fmt, ...) \
- _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#define dev_emerg(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_emerg, KERN_EMERG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_crit(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_crit, KERN_CRIT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_alert(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_alert, KERN_ALERT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_err(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_warn(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_warn, KERN_WARNING, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_notice(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_notice, KERN_NOTICE, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_info(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define dev_dbg(dev, fmt, ...) \
dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
#elif defined(DEBUG)
@@ -181,7 +222,8 @@ do { \
dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
#define dev_info_ratelimited(dev, fmt, ...) \
dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define dev_dbg_ratelimited(dev, fmt, ...) \
do { \
@@ -226,7 +268,7 @@ do { \
* using WARN/WARN_ONCE to include file/line information and a backtrace.
*/
#define dev_WARN(dev, format, arg...) \
- WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg)
#define dev_WARN_ONCE(dev, condition, format, arg...) \
WARN_ONCE(condition, "%s %s: " format, \
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index f14f17f8cb7f..4a50a5c71a5f 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -106,8 +106,11 @@ extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata);
extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
- struct device *dev, int index);
-extern int devfreq_event_get_edev_count(struct device *dev);
+ struct device *dev,
+ const char *phandle_name,
+ int index);
+extern int devfreq_event_get_edev_count(struct device *dev,
+ const char *phandle_name);
extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc);
extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
@@ -152,12 +155,15 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
}
static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
- struct device *dev, int index)
+ struct device *dev,
+ const char *phandle_name,
+ int index)
{
return ERR_PTR(-EINVAL);
}
-static inline int devfreq_event_get_edev_count(struct device *dev)
+static inline int devfreq_event_get_edev_count(struct device *dev,
+ const char *phandle_name)
{
return -EINVAL;
}
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index c6f82d4bec9f..34aab4dd336c 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -15,8 +15,6 @@
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
-#define DEVFREQ_NAME_LEN 16
-
/* DEVFREQ governor name */
#define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand"
#define DEVFREQ_GOV_PERFORMANCE "performance"
@@ -31,8 +29,17 @@
#define DEVFREQ_PRECHANGE (0)
#define DEVFREQ_POSTCHANGE (1)
+/* DEVFREQ work timers */
+enum devfreq_timer {
+ DEVFREQ_TIMER_DEFERRABLE = 0,
+ DEVFREQ_TIMER_DELAYED,
+ DEVFREQ_TIMER_NUM,
+};
+
struct devfreq;
struct devfreq_governor;
+struct devfreq_cpu_data;
+struct thermal_cooling_device;
/**
* struct devfreq_dev_status - Data given from devfreq user device to
@@ -70,6 +77,7 @@ struct devfreq_dev_status {
* @initial_freq: The operating frequency when devfreq_add_device() is
* called.
* @polling_ms: The polling interval in ms. 0 disables polling.
+ * @timer: Timer type is either deferrable or delayed timer.
* @target: The device should set its operating frequency at
* freq or lowest-upper-than-freq value. If freq is
* higher than any operable frequency, set maximum.
@@ -92,10 +100,15 @@ struct devfreq_dev_status {
* @freq_table: Optional list of frequencies to support statistics
* and freq_table must be generated in ascending order.
* @max_state: The size of freq_table.
+ *
+ * @is_cooling_device: A self-explanatory boolean giving the device a
+ * cooling effect property.
*/
struct devfreq_dev_profile {
unsigned long initial_freq;
unsigned int polling_ms;
+ enum devfreq_timer timer;
+ bool is_cooling_device;
int (*target)(struct device *dev, unsigned long *freq, u32 flags);
int (*get_dev_status)(struct device *dev,
@@ -130,11 +143,13 @@ struct devfreq_stats {
* using devfreq.
* @profile: device-specific devfreq profile
* @governor: method how to choose frequency based on the usage.
- * @governor_name: devfreq governor name for use with this devfreq
+ * @opp_table: Reference to OPP table of dev.parent, if one exists.
* @nb: notifier block used to notify devfreq object that it should
* reevaluate operable frequencies. Devfreq users may use
* devfreq.nb to the corresponding register notifier call chain.
* @work: delayed work for load monitoring.
+ * @freq_table: current frequency table used by the devfreq driver.
+ * @max_state: count of entry present in the frequency table.
* @previous_freq: previously configured frequency value.
* @last_status: devfreq user device info, performance statistics
* @data: Private data of the governor. The devfreq framework does not
@@ -149,6 +164,7 @@ struct devfreq_stats {
* @suspend_count: suspend requests counter for a device.
* @stats: Statistics of devfreq device behavior
* @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
+ * @cdev: Cooling device pointer if the devfreq has cooling property
* @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY
* @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY
*
@@ -158,7 +174,7 @@ struct devfreq_stats {
* functions except for the context of callbacks defined in struct
* devfreq_governor, the governor should protect its access with the
* struct mutex lock in struct devfreq. A governor may use this mutex
- * to protect its own private data in void *data as well.
+ * to protect its own private data in ``void *data`` as well.
*/
struct devfreq {
struct list_head node;
@@ -167,10 +183,13 @@ struct devfreq {
struct device dev;
struct devfreq_dev_profile *profile;
const struct devfreq_governor *governor;
- char governor_name[DEVFREQ_NAME_LEN];
+ struct opp_table *opp_table;
struct notifier_block nb;
struct delayed_work work;
+ unsigned long *freq_table;
+ unsigned int max_state;
+
unsigned long previous_freq;
struct devfreq_dev_status last_status;
@@ -191,6 +210,9 @@ struct devfreq {
struct srcu_notifier_head transition_notifier_list;
+ /* Pointer to the cooling device if used for thermal mitigation */
+ struct thermal_cooling_device *cdev;
+
struct notifier_block nb_min;
struct notifier_block nb_max;
};
@@ -201,64 +223,59 @@ struct devfreq_freqs {
};
#if defined(CONFIG_PM_DEVFREQ)
-extern struct devfreq *devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data);
-extern int devfreq_remove_device(struct devfreq *devfreq);
-extern struct devfreq *devm_devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data);
-extern void devm_devfreq_remove_device(struct device *dev,
- struct devfreq *devfreq);
+struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+int devfreq_remove_device(struct devfreq *devfreq);
+struct devfreq *devm_devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq);
/* Supposed to be called by PM callbacks */
-extern int devfreq_suspend_device(struct devfreq *devfreq);
-extern int devfreq_resume_device(struct devfreq *devfreq);
+int devfreq_suspend_device(struct devfreq *devfreq);
+int devfreq_resume_device(struct devfreq *devfreq);
-extern void devfreq_suspend(void);
-extern void devfreq_resume(void);
+void devfreq_suspend(void);
+void devfreq_resume(void);
-/**
- * update_devfreq() - Reevaluate the device and configure frequency
- * @devfreq: the devfreq device
- *
- * Note: devfreq->lock must be held
- */
-extern int update_devfreq(struct devfreq *devfreq);
+/* update_devfreq() - Reevaluate the device and configure frequency */
+int update_devfreq(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
-extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
- unsigned long *freq, u32 flags);
-extern int devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devm_devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devfreq_register_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list);
-extern int devfreq_unregister_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list);
-extern int devm_devfreq_register_notifier(struct device *dev,
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq, u32 flags);
+int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devm_devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+void devm_devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+int devm_devfreq_register_notifier(struct device *dev,
struct devfreq *devfreq,
struct notifier_block *nb,
unsigned int list);
-extern void devm_devfreq_unregister_notifier(struct device *dev,
+void devm_devfreq_unregister_notifier(struct device *dev,
struct devfreq *devfreq,
struct notifier_block *nb,
unsigned int list);
-extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
- int index);
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node);
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ const char *phandle_name, int index);
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
- * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
+ * struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
* @upthreshold: If the load is over this value, the frequency jumps.
* Specify 0 to use the default. Valid value = 0 to 100.
@@ -277,8 +294,13 @@ struct devfreq_simple_ondemand_data {
#endif
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+enum devfreq_parent_dev_type {
+ DEVFREQ_PARENT_DEV,
+ CPUFREQ_PARENT_DEV,
+};
+
/**
- * struct devfreq_passive_data - void *data fed to struct devfreq
+ * struct devfreq_passive_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
* @parent: the devfreq instance of parent device.
* @get_target_freq: Optional callback, Returns desired operating frequency
@@ -288,8 +310,11 @@ struct devfreq_simple_ondemand_data {
* using governors except for passive governor.
* If the devfreq device has the specific method to decide
* the next frequency, should use this callback.
- * @this: the devfreq instance of own device.
- * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list
+ * @parent_type: the parent type of the device.
+ * @this: the devfreq instance of own device.
+ * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER or
+ * CPUFREQ_TRANSITION_NOTIFIER list.
+ * @cpu_data_list: the list of cpu frequency data for all cpufreq_policy.
*
* The devfreq_passive_data have to set the devfreq instance of parent
* device with governors except for the passive governor. But, don't need to
@@ -303,17 +328,21 @@ struct devfreq_passive_data {
/* Optional callback to decide the next frequency of passvice device */
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ /* Should set the type of parent device */
+ enum devfreq_parent_dev_type parent_type;
+
/* For passive governor's internal use. Don't need to set them */
struct devfreq *this;
struct notifier_block nb;
+ struct list_head cpu_data_list;
};
#endif
#else /* !CONFIG_PM_DEVFREQ */
static inline struct devfreq *devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data)
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data)
{
return ERR_PTR(-ENOSYS);
}
@@ -350,31 +379,31 @@ static inline void devfreq_suspend(void) {}
static inline void devfreq_resume(void) {}
static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
- unsigned long *freq, u32 flags)
+ unsigned long *freq, u32 flags)
{
return ERR_PTR(-EINVAL);
}
static inline int devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline int devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline int devm_devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
}
@@ -393,22 +422,27 @@ static inline int devfreq_unregister_notifier(struct devfreq *devfreq,
}
static inline int devm_devfreq_register_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
return 0;
}
static inline void devm_devfreq_unregister_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
}
+static inline struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
- int index)
+ const char *phandle_name, int index)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 4635f95000a4..14baa73fc2de 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -1,17 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* devfreq_cooling: Thermal cooling device implementation for devices using
* devfreq
*
* Copyright (C) 2014-2015 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DEVFREQ_COOLING_H__
@@ -23,17 +16,6 @@
/**
* struct devfreq_cooling_power - Devfreq cooling power ops
- * @get_static_power: Take voltage, in mV, and return the static power
- * in mW. If NULL, the static power is assumed
- * to be 0.
- * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and
- * return the dynamic power draw in mW. If NULL,
- * a simple power model is used.
- * @dyn_power_coeff: Coefficient for the simple dynamic power model in
- * mW/(MHz mV mV).
- * If get_dynamic_power() is NULL, then the
- * dynamic power is calculated as
- * @dyn_power_coeff * frequency * voltage^2
* @get_real_power: When this is set, the framework uses it to ask the
* device driver for the actual power.
* Some devices have more sophisticated methods
@@ -53,14 +35,8 @@
* max total (static + dynamic) power value for each OPP.
*/
struct devfreq_cooling_power {
- unsigned long (*get_static_power)(struct devfreq *devfreq,
- unsigned long voltage);
- unsigned long (*get_dynamic_power)(struct devfreq *devfreq,
- unsigned long freq,
- unsigned long voltage);
int (*get_real_power)(struct devfreq *df, u32 *power,
unsigned long freq, unsigned long voltage);
- unsigned long dyn_power_coeff;
};
#ifdef CONFIG_DEVFREQ_THERMAL
@@ -72,10 +48,13 @@ struct thermal_cooling_device *
of_devfreq_cooling_register(struct device_node *np, struct devfreq *df);
struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df);
void devfreq_cooling_unregister(struct thermal_cooling_device *dfc);
+struct thermal_cooling_device *
+devfreq_cooling_em_register(struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power);
#else /* !CONFIG_DEVFREQ_THERMAL */
-struct thermal_cooling_device *
+static inline struct thermal_cooling_device *
of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct devfreq_cooling_power *dfc_power)
{
@@ -94,6 +73,13 @@ devfreq_cooling_register(struct devfreq *df)
return ERR_PTR(-EINVAL);
}
+static inline struct thermal_cooling_device *
+devfreq_cooling_em_register(struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline void
devfreq_cooling_unregister(struct thermal_cooling_device *dfc)
{
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 475668c69dbc..04c6acf7faaa 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -20,6 +20,7 @@ struct dm_table;
struct dm_report_zones_args;
struct mapped_device;
struct bio_vec;
+enum dax_access_mode;
/*
* Type of table, mapped_device's mempool and request_queue
@@ -29,10 +30,9 @@ enum dm_queue_mode {
DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_DAX_BIO_BASED = 3,
- DM_TYPE_NVME_BIO_BASED = 4,
};
-typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
+typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
union map_info {
void *ptr;
@@ -94,9 +94,18 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+#ifdef CONFIG_BLK_DEV_ZONED
typedef int (*dm_report_zones_fn) (struct dm_target *ti,
struct dm_report_zones_args *args,
unsigned int nr_zones);
+#else
+/*
+ * Define dm_report_zones_fn so that targets can assign to NULL if
+ * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
+ * awkward #ifdefs in their target_type, etc.
+ */
+typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
+#endif
/*
* These iteration functions are typically used to check (and combine)
@@ -138,10 +147,18 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
* >= 0 : the number of bytes accessible at the address
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
-typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, enum dax_access_mode node, void **kaddr,
+ pfn_t *pfn);
+typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
+ size_t nr_pages);
+
+/*
+ * Returns:
+ * != 0 : number of bytes transferred
+ * 0 : recovery write failed
+ */
+typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
-#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
@@ -186,15 +203,13 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_prepare_ioctl_fn prepare_ioctl;
-#ifdef CONFIG_BLK_DEV_ZONED
dm_report_zones_fn report_zones;
-#endif
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
- dm_dax_copy_iter_fn dax_copy_from_iter;
- dm_dax_copy_iter_fn dax_copy_to_iter;
+ dm_dax_zero_page_range_fn dax_zero_page_range;
+ dm_dax_recovery_write_fn dax_recovery_write;
/* For internal device-mapper use. */
struct list_head list;
@@ -244,10 +259,40 @@ struct target_type {
#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
/*
- * Indicates that a target supports host-managed zoned block devices.
+ * Indicates support for zoned block devices:
+ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
+ * block devices but does not support combining different zoned models.
+ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
+ * devices with different zoned models.
*/
+#ifdef CONFIG_BLK_DEV_ZONED
#define DM_TARGET_ZONED_HM 0x00000040
#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
+#else
+#define DM_TARGET_ZONED_HM 0x00000000
+#define dm_target_supports_zoned_hm(type) (false)
+#endif
+
+/*
+ * A target handles REQ_NOWAIT
+ */
+#define DM_TARGET_NOWAIT 0x00000080
+#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
+
+/*
+ * A target supports passing through inline crypto support.
+ */
+#define DM_TARGET_PASSES_CRYPTO 0x00000100
+#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
+
+#ifdef CONFIG_BLK_DEV_ZONED
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
+#define dm_target_supports_mixed_zoned_model(type) \
+ ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
+#else
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
+#define dm_target_supports_mixed_zoned_model(type) (false)
+#endif
struct dm_target {
struct dm_table *table;
@@ -283,12 +328,6 @@ struct dm_target {
unsigned num_secure_erase_bios;
/*
- * The number of WRITE SAME bios that will be submitted to the target.
- * The bio number can be accessed with dm_bio_get_target_bio_nr.
- */
- unsigned num_write_same_bios;
-
- /*
* The number of WRITE ZEROES bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
@@ -317,18 +356,37 @@ struct dm_target {
* whether or not its underlying devices have support.
*/
bool discards_supported:1;
-};
-/* Each target can link one of these into the table */
-struct dm_target_callbacks {
- struct list_head list;
- int (*congested_fn) (struct dm_target_callbacks *, int);
+ /*
+ * Set if we need to limit the number of in-flight bios when swapping.
+ */
+ bool limit_swap_bios:1;
+
+ /*
+ * Set if this target implements a zoned device and needs emulation of
+ * zone append operations using regular writes.
+ */
+ bool emulate_zone_append:1;
+
+ /*
+ * Set if the target will submit IO using dm_submit_bio_remap()
+ * after returning DM_MAPIO_SUBMITTED from its map function.
+ */
+ bool accounts_remapped_io:1;
+
+ /*
+ * Set if the target will submit the DM bio without first calling
+ * bio_set_dev(). NOTE: ideally a target should _not_ need this.
+ */
+ bool needs_bio_set_dev:1;
};
void *dm_per_bio_data(struct bio *bio, size_t data_size);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
+u64 dm_start_time_ns_from_clone(struct bio *bio);
+
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -421,8 +479,10 @@ const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
+int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
+void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
union map_info *dm_get_rq_mapinfo(struct request *rq);
#ifdef CONFIG_BLK_DEV_ZONED
@@ -437,7 +497,8 @@ struct dm_report_zones_args {
/* must be filled by ->report_zones before calling dm_report_zones_cb */
sector_t start;
};
-int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
+int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
+ struct dm_report_zones_args *args, unsigned int nr_zones);
#endif /* CONFIG_BLK_DEV_ZONED */
/*
@@ -473,11 +534,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
sector_t start, sector_t len, char *params);
/*
- * Target_ctr should call this if it needs to add any callbacks.
- */
-void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
-
-/*
* Target can use this to set the table's type.
* Can only ever be called from a target's ctr.
* Useful for "hybrid" target (supports both bio-based
@@ -511,7 +567,6 @@ void dm_sync_table(struct mapped_device *md);
* Queries
*/
sector_t dm_table_get_size(struct dm_table *t);
-unsigned int dm_table_get_num_targets(struct dm_table *t);
fmode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
const char *dm_table_device_name(struct dm_table *t);
@@ -534,9 +589,9 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
struct dm_table *t);
/*
- * A wrapper around vmalloc.
+ * Table blk_crypto_profile functions
*/
-void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
/*-----------------------------------------------------------------
* Macros.
@@ -554,17 +609,16 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#ifdef CONFIG_DM_DEBUG
-#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
+#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#else
-#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0 : scnprintf(result + sz, maxlen - sz, x))
+#define DMEMIT_TARGET_NAME_VERSION(y) \
+ DMEMIT("target_name=%s,target_version=%u.%u.%u", \
+ (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
+
/*
* Definitions of return values from target end_io function.
*/
diff --git a/include/linux/device.h b/include/linux/device.h
index fa04dfd22bbc..424b55df0272 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -13,6 +13,7 @@
#define _DEVICE_H_
#include <linux/dev_printk.h>
+#include <linux/energy_model.h>
#include <linux/ioport.h>
#include <linux/kobject.h>
#include <linux/klist.h>
@@ -42,14 +43,14 @@ struct device_node;
struct fwnode_handle;
struct iommu_ops;
struct iommu_group;
-struct iommu_fwspec;
struct dev_pin_info;
-struct iommu_param;
+struct dev_iommu;
+struct msi_device_data;
/**
* struct subsys_interface - interfaces to device functions
* @name: name of the device function
- * @subsys: subsytem of the devices to attach to
+ * @subsys: subsystem of the devices to attach to
* @node: the list of functions registered at the subsystem
* @add_dev: device hookup to device function handler
* @remove_dev: device hookup to device function handler
@@ -129,8 +130,12 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
__ATTR_PREALLOC(_name, _mode, _show, _store)
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+#define DEVICE_ATTR_ADMIN_RW(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+#define DEVICE_ATTR_ADMIN_RO(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
@@ -146,68 +151,59 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
struct device_attribute dev_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
-extern int device_create_file(struct device *device,
- const struct device_attribute *entry);
-extern void device_remove_file(struct device *dev,
- const struct device_attribute *attr);
-extern bool device_remove_file_self(struct device *dev,
- const struct device_attribute *attr);
-extern int __must_check device_create_bin_file(struct device *dev,
+int device_create_file(struct device *device,
+ const struct device_attribute *entry);
+void device_remove_file(struct device *dev,
+ const struct device_attribute *attr);
+bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr);
+int __must_check device_create_bin_file(struct device *dev,
const struct bin_attribute *attr);
-extern void device_remove_bin_file(struct device *dev,
- const struct bin_attribute *attr);
+void device_remove_bin_file(struct device *dev,
+ const struct bin_attribute *attr);
/* device resource management */
typedef void (*dr_release_t)(struct device *dev, void *res);
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
-#ifdef CONFIG_DEBUG_DEVRES
-extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid, const char *name) __malloc;
+void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
+ int nid, const char *name) __malloc;
#define devres_alloc(release, size, gfp) \
__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
#define devres_alloc_node(release, size, gfp, nid) \
__devres_alloc_node(release, size, gfp, nid, #release)
-#else
-extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid) __malloc;
-static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
-{
- return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
-}
-#endif
-extern void devres_for_each_res(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data,
- void (*fn)(struct device *, void *, void *),
- void *data);
-extern void devres_free(void *res);
-extern void devres_add(struct device *dev, void *res);
-extern void *devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern void *devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data);
-extern void *devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern int devres_destroy(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern int devres_release(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
+void devres_for_each_res(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data,
+ void (*fn)(struct device *, void *, void *),
+ void *data);
+void devres_free(void *res);
+void devres_add(struct device *dev, void *res);
+void *devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+void *devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data);
+void *devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+int devres_destroy(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+int devres_release(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
/* devres group */
-extern void * __must_check devres_open_group(struct device *dev, void *id,
- gfp_t gfp);
-extern void devres_close_group(struct device *dev, void *id);
-extern void devres_remove_group(struct device *dev, void *id);
-extern int devres_release_group(struct device *dev, void *id);
+void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
+void devres_close_group(struct device *dev, void *id);
+void devres_remove_group(struct device *dev, void *id);
+int devres_release_group(struct device *dev, void *id);
/* managed devm_k.alloc/kfree for device drivers */
-extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
-extern __printf(3, 0)
-char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
- va_list ap) __malloc;
-extern __printf(3, 4)
-char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
+void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
+void *devm_krealloc(struct device *dev, void *ptr, size_t size,
+ gfp_t gfp) __must_check;
+__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
+ const char *fmt, va_list ap) __malloc;
+__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
+ const char *fmt, ...) __malloc;
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
{
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
@@ -227,16 +223,14 @@ static inline void *devm_kcalloc(struct device *dev,
{
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
-extern void devm_kfree(struct device *dev, const void *p);
-extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
-extern const char *devm_kstrdup_const(struct device *dev,
- const char *s, gfp_t gfp);
-extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
- gfp_t gfp);
+void devm_kfree(struct device *dev, const void *p);
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
+void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
-extern unsigned long devm_get_free_pages(struct device *dev,
- gfp_t gfp_mask, unsigned int order);
-extern void devm_free_pages(struct device *dev, unsigned long addr);
+unsigned long devm_get_free_pages(struct device *dev,
+ gfp_t gfp_mask, unsigned int order);
+void devm_free_pages(struct device *dev, unsigned long addr);
void __iomem *devm_ioremap_resource(struct device *dev,
const struct resource *res);
@@ -289,66 +283,11 @@ struct device_dma_parameters {
* sg limitations.
*/
unsigned int max_segment_size;
+ unsigned int min_align_mask;
unsigned long segment_boundary_mask;
};
/**
- * struct device_connection - Device Connection Descriptor
- * @fwnode: The device node of the connected device
- * @endpoint: The names of the two devices connected together
- * @id: Unique identifier for the connection
- * @list: List head, private, for internal use only
- *
- * NOTE: @fwnode is not used together with @endpoint. @fwnode is used when
- * platform firmware defines the connection. When the connection is registered
- * with device_connection_add() @endpoint is used instead.
- */
-struct device_connection {
- struct fwnode_handle *fwnode;
- const char *endpoint[2];
- const char *id;
- struct list_head list;
-};
-
-typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep,
- void *data);
-
-void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
- const char *con_id, void *data,
- devcon_match_fn_t match);
-void *device_connection_find_match(struct device *dev, const char *con_id,
- void *data, devcon_match_fn_t match);
-
-struct device *device_connection_find(struct device *dev, const char *con_id);
-
-void device_connection_add(struct device_connection *con);
-void device_connection_remove(struct device_connection *con);
-
-/**
- * device_connections_add - Add multiple device connections at once
- * @cons: Zero terminated array of device connection descriptors
- */
-static inline void device_connections_add(struct device_connection *cons)
-{
- struct device_connection *c;
-
- for (c = cons; c->endpoint[0]; c++)
- device_connection_add(c);
-}
-
-/**
- * device_connections_remove - Remove multiple device connections at once
- * @cons: Zero terminated array of device connection descriptors
- */
-static inline void device_connections_remove(struct device_connection *cons)
-{
- struct device_connection *c;
-
- for (c = cons; c->endpoint[0]; c++)
- device_connection_remove(c);
-}
-
-/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
@@ -377,6 +316,7 @@ enum device_link_state {
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
* MANAGED: The core tracks presence of supplier/consumer drivers (internal).
* SYNC_STATE_ONLY: Link only affects sync_state() behavior.
+ * INFERRED: Inferred from data (eg: firmware) and not from driver actions.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
@@ -386,34 +326,7 @@ enum device_link_state {
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
#define DL_FLAG_MANAGED BIT(6)
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
-
-/**
- * struct device_link - Device link representation.
- * @supplier: The device on the supplier end of the link.
- * @s_node: Hook to the supplier device's list of links to consumers.
- * @consumer: The device on the consumer end of the link.
- * @c_node: Hook to the consumer device's list of links to suppliers.
- * @status: The state of the link (with respect to the presence of drivers).
- * @flags: Link flags.
- * @rpm_active: Whether or not the consumer device is runtime-PM-active.
- * @kref: Count repeated addition of the same link.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
- * @supplier_preactivated: Supplier has been made active before consumer probe.
- */
-struct device_link {
- struct device *supplier;
- struct list_head s_node;
- struct device *consumer;
- struct list_head c_node;
- enum device_link_state status;
- u32 flags;
- refcount_t rpm_active;
- struct kref kref;
-#ifdef CONFIG_SRCU
- struct rcu_head rcu_head;
-#endif
- bool supplier_preactivated; /* Owned by consumer probe. */
-};
+#define DL_FLAG_INFERRED BIT(8)
/**
* enum dl_dev_state - Device driver presence tracking information.
@@ -430,25 +343,119 @@ enum dl_dev_state {
};
/**
+ * enum device_removable - Whether the device is removable. The criteria for a
+ * device to be classified as removable is determined by its subsystem or bus.
+ * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this
+ * device (default).
+ * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown.
+ * @DEVICE_FIXED: Device is not removable by the user.
+ * @DEVICE_REMOVABLE: Device is removable by the user.
+ */
+enum device_removable {
+ DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */
+ DEVICE_REMOVABLE_UNKNOWN,
+ DEVICE_FIXED,
+ DEVICE_REMOVABLE,
+};
+
+/**
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
- * @needs_suppliers: Hook to global list of devices waiting for suppliers.
* @defer_sync: Hook to global list of devices that have deferred sync_state.
- * @need_for_probe: If needs_suppliers is on a list, this indicates if the
- * suppliers are needed for probe or not.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
- struct list_head needs_suppliers;
struct list_head defer_sync;
- bool need_for_probe;
enum dl_dev_state status;
};
/**
+ * struct dev_msi_info - Device data related to MSI
+ * @domain: The MSI interrupt domain associated to the device
+ * @data: Pointer to MSI device data
+ */
+struct dev_msi_info {
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *domain;
+#endif
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct msi_device_data *data;
+#endif
+};
+
+/**
+ * enum device_physical_location_panel - Describes which panel surface of the
+ * system's housing the device connection point resides on.
+ * @DEVICE_PANEL_TOP: Device connection point is on the top panel.
+ * @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel.
+ * @DEVICE_PANEL_LEFT: Device connection point is on the left panel.
+ * @DEVICE_PANEL_RIGHT: Device connection point is on the right panel.
+ * @DEVICE_PANEL_FRONT: Device connection point is on the front panel.
+ * @DEVICE_PANEL_BACK: Device connection point is on the back panel.
+ * @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown.
+ */
+enum device_physical_location_panel {
+ DEVICE_PANEL_TOP,
+ DEVICE_PANEL_BOTTOM,
+ DEVICE_PANEL_LEFT,
+ DEVICE_PANEL_RIGHT,
+ DEVICE_PANEL_FRONT,
+ DEVICE_PANEL_BACK,
+ DEVICE_PANEL_UNKNOWN,
+};
+
+/**
+ * enum device_physical_location_vertical_position - Describes vertical
+ * position of the device connection point on the panel surface.
+ * @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel.
+ * @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel.
+ */
+enum device_physical_location_vertical_position {
+ DEVICE_VERT_POS_UPPER,
+ DEVICE_VERT_POS_CENTER,
+ DEVICE_VERT_POS_LOWER,
+};
+
+/**
+ * enum device_physical_location_horizontal_position - Describes horizontal
+ * position of the device connection point on the panel surface.
+ * @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel.
+ * @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel.
+ */
+enum device_physical_location_horizontal_position {
+ DEVICE_HORI_POS_LEFT,
+ DEVICE_HORI_POS_CENTER,
+ DEVICE_HORI_POS_RIGHT,
+};
+
+/**
+ * struct device_physical_location - Device data related to physical location
+ * of the device connection point.
+ * @panel: Panel surface of the system's housing that the device connection
+ * point resides on.
+ * @vertical_position: Vertical position of the device connection point within
+ * the panel.
+ * @horizontal_position: Horizontal position of the device connection point
+ * within the panel.
+ * @dock: Set if the device connection point resides in a docking station or
+ * port replicator.
+ * @lid: Set if this device connection point resides on the lid of laptop
+ * system.
+ */
+struct device_physical_location {
+ enum device_physical_location_panel panel;
+ enum device_physical_location_vertical_position vertical_position;
+ enum device_physical_location_horizontal_position horizontal_position;
+ bool dock;
+ bool lid;
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -462,8 +469,6 @@ struct dev_links_info {
* This identifies the device type and carries type-specific
* information.
* @mutex: Mutex to synchronize calls to its driver.
- * @lockdep_mutex: An optional debug lock that a subsystem can use as a
- * peer lock to gain localized lockdep coverage of the device_lock.
* @bus: Type of bus device is on.
* @driver: Which driver has allocated this
* @platform_data: Platform data specific to the device.
@@ -481,10 +486,10 @@ struct dev_links_info {
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
+ * @em_pd: device's energy model performance domain
* @pins: For device pin management.
- * See Documentation/driver-api/pinctl.rst for details.
- * @msi_list: Hosts MSI descriptors
- * @msi_domain: The generic MSI domain this device is using.
+ * See Documentation/driver-api/pin-control.rst for details.
+ * @msi: MSI related data
* @numa_node: NUMA node this device is close to.
* @dma_ops: DMA mapping operations for this device.
* @dma_mask: Dma mask (if dma'ble device).
@@ -493,12 +498,13 @@ struct dev_links_info {
* such descriptors.
* @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
* DMA limit than the device itself supports.
- * @dma_pfn_offset: offset of DMA memory range relatively of RAM
+ * @dma_range_map: map for DMA memory ranges relative to that of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
+ * @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@@ -513,8 +519,12 @@ struct dev_links_info {
* gone away. This should be set by the allocator of the
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
- * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
- * @iommu_param: Per device generic IOMMU runtime data
+ * @iommu: Per device generic IOMMU runtime data
+ * @physical_location: Describes physical location of the device connection
+ * point in the system housing.
+ * @removable: Whether the device can be removed from the system. This
+ * should be set by the subsystem / bus driver that discovered
+ * the device.
*
* @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline().
@@ -523,8 +533,16 @@ struct dev_links_info {
* @state_synced: The hardware state of this device has been synced to match
* the software state of this device by calling the driver/bus
* sync_state() callback.
+ * @can_match: The device has matched with a driver at least once or it is in
+ * a bus (like AMBA) which can't check for matching drivers until
+ * other devices probe successfully.
* @dma_coherent: this particular device is dma coherent, even if the
* architecture supports non-coherent devices.
+ * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
+ * streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
+ * and optionall (if the coherent mask is large enough) also
+ * for dma allocations. This flag is managed by the dma ops
+ * instance from ->dma_supported.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -550,9 +568,6 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
-#ifdef CONFIG_PROVE_LOCKING
- struct mutex lockdep_mutex;
-#endif
struct mutex mutex; /* mutex to synchronize calls to
* its driver.
*/
@@ -561,17 +576,17 @@ struct device {
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *msi_domain;
+#ifdef CONFIG_ENERGY_MODEL
+ struct em_perf_domain *em_pd;
#endif
+
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
-#ifdef CONFIG_GENERIC_MSI_IRQ
- struct list_head msi_list;
-#endif
-
+ struct dev_msi_info msi;
+#ifdef CONFIG_DMA_OPS
const struct dma_map_ops *dma_ops;
+#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as
@@ -579,7 +594,7 @@ struct device {
64 bit addresses for consistent
allocations such descriptors. */
u64 bus_dma_limit; /* upstream dma constraint */
- unsigned long dma_pfn_offset;
+ const struct bus_dma_region *dma_range_map;
struct device_dma_parameters *dma_parms;
@@ -593,6 +608,9 @@ struct device {
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
+#ifdef CONFIG_SWIOTLB
+ struct io_tlb_mem *dma_io_tlb_mem;
+#endif
/* arch specific additions */
struct dev_archdata archdata;
@@ -613,18 +631,53 @@ struct device {
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
- struct iommu_fwspec *iommu_fwspec;
- struct iommu_param *iommu_param;
+ struct dev_iommu *iommu;
+
+ struct device_physical_location *physical_location;
+
+ enum device_removable removable;
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
bool state_synced:1;
+ bool can_match:1;
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
bool dma_coherent:1;
#endif
+#ifdef CONFIG_DMA_OPS_BYPASS
+ bool dma_ops_bypass : 1;
+#endif
+};
+
+/**
+ * struct device_link - Device link representation.
+ * @supplier: The device on the supplier end of the link.
+ * @s_node: Hook to the supplier device's list of links to consumers.
+ * @consumer: The device on the consumer end of the link.
+ * @c_node: Hook to the consumer device's list of links to suppliers.
+ * @link_dev: device used to expose link details in sysfs
+ * @status: The state of the link (with respect to the presence of drivers).
+ * @flags: Link flags.
+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.
+ * @kref: Count repeated addition of the same link.
+ * @rm_work: Work structure used for removing the link.
+ * @supplier_preactivated: Supplier has been made active before consumer probe.
+ */
+struct device_link {
+ struct device *supplier;
+ struct list_head s_node;
+ struct device *consumer;
+ struct list_head c_node;
+ struct device link_dev;
+ enum device_link_state status;
+ u32 flags;
+ refcount_t rpm_active;
+ struct kref kref;
+ struct work_struct rm_work;
+ bool supplier_preactivated; /* Owned by consumer probe. */
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
@@ -654,8 +707,19 @@ static inline const char *dev_name(const struct device *dev)
return kobject_name(&dev->kobj);
}
-extern __printf(2, 3)
-int dev_set_name(struct device *dev, const char *name, ...);
+/**
+ * dev_bus_name - Return a device's bus/class name, if at all possible
+ * @dev: struct device to get the bus/class name of
+ *
+ * Will return the name of the bus/class the device is attached to. If it is
+ * not attached to a bus/class, an empty string will be returned.
+ */
+static inline const char *dev_bus_name(const struct device *dev)
+{
+ return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
+}
+
+__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
#ifdef CONFIG_NUMA
static inline int dev_to_node(struct device *dev)
@@ -679,7 +743,7 @@ static inline void set_dev_node(struct device *dev, int node)
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- return dev->msi_domain;
+ return dev->msi.domain;
#else
return NULL;
#endif
@@ -688,7 +752,7 @@ static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- dev->msi_domain = d;
+ dev->msi.domain = d;
#endif
}
@@ -809,42 +873,105 @@ static inline bool dev_has_sync_state(struct device *dev)
return false;
}
+static inline void dev_set_removable(struct device *dev,
+ enum device_removable removable)
+{
+ dev->removable = removable;
+}
+
+static inline bool dev_is_removable(struct device *dev)
+{
+ return dev->removable == DEVICE_REMOVABLE;
+}
+
+static inline bool dev_removable_is_valid(struct device *dev)
+{
+ return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
+}
+
/*
* High level routines for use by the bus drivers
*/
-extern int __must_check device_register(struct device *dev);
-extern void device_unregister(struct device *dev);
-extern void device_initialize(struct device *dev);
-extern int __must_check device_add(struct device *dev);
-extern void device_del(struct device *dev);
-extern int device_for_each_child(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-extern int device_for_each_child_reverse(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-extern struct device *device_find_child(struct device *dev, void *data,
- int (*match)(struct device *dev, void *data));
-extern struct device *device_find_child_by_name(struct device *parent,
- const char *name);
-extern int device_rename(struct device *dev, const char *new_name);
-extern int device_move(struct device *dev, struct device *new_parent,
- enum dpm_order dpm_order);
-extern const char *device_get_devnode(struct device *dev,
- umode_t *mode, kuid_t *uid, kgid_t *gid,
- const char **tmp);
+int __must_check device_register(struct device *dev);
+void device_unregister(struct device *dev);
+void device_initialize(struct device *dev);
+int __must_check device_add(struct device *dev);
+void device_del(struct device *dev);
+int device_for_each_child(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
+int device_for_each_child_reverse(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
+struct device *device_find_child(struct device *dev, void *data,
+ int (*match)(struct device *dev, void *data));
+struct device *device_find_child_by_name(struct device *parent,
+ const char *name);
+struct device *device_find_any_child(struct device *parent);
+
+int device_rename(struct device *dev, const char *new_name);
+int device_move(struct device *dev, struct device *new_parent,
+ enum dpm_order dpm_order);
+int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
+const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+ kgid_t *gid, const char **tmp);
+int device_is_dependent(struct device *dev, void *target);
static inline bool device_supports_offline(struct device *dev)
{
return dev->bus && dev->bus->offline && dev->bus->online;
}
-extern void lock_device_hotplug(void);
-extern void unlock_device_hotplug(void);
-extern int lock_device_hotplug_sysfs(void);
-extern int device_offline(struct device *dev);
-extern int device_online(struct device *dev);
-extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
-extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+#define __device_lock_set_class(dev, name, key) \
+do { \
+ struct device *__d2 __maybe_unused = dev; \
+ lock_set_class(&__d2->mutex.dep_map, name, key, 0, _THIS_IP_); \
+} while (0)
+
+/**
+ * device_lock_set_class - Specify a temporary lock class while a device
+ * is attached to a driver
+ * @dev: device to modify
+ * @key: lock class key data
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->probe(). Take care to only override the default
+ * lockdep_no_validate class.
+ */
+#ifdef CONFIG_LOCKDEP
+#define device_lock_set_class(dev, key) \
+do { \
+ struct device *__d = dev; \
+ dev_WARN_ONCE(__d, !lockdep_match_class(&__d->mutex, \
+ &__lockdep_no_validate__), \
+ "overriding existing custom lock class\n"); \
+ __device_lock_set_class(__d, #key, key); \
+} while (0)
+#else
+#define device_lock_set_class(dev, key) __device_lock_set_class(dev, #key, key)
+#endif
+
+/**
+ * device_lock_reset_class - Return a device to the default lockdep novalidate state
+ * @dev: device to modify
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->remove().
+ */
+#define device_lock_reset_class(dev) \
+do { \
+ struct device *__d __maybe_unused = dev; \
+ lock_set_novalidate_class(&__d->mutex.dep_map, "&dev->mutex", \
+ _THIS_IP_); \
+} while (0)
+
+void lock_device_hotplug(void);
+void unlock_device_hotplug(void);
+int lock_device_hotplug_sysfs(void);
+int device_offline(struct device *dev);
+int device_online(struct device *dev);
+void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
static inline int dev_num_vf(struct device *dev)
{
@@ -856,14 +983,13 @@ static inline int dev_num_vf(struct device *dev)
/*
* Root device objects for grouping under /sys/devices
*/
-extern struct device *__root_device_register(const char *name,
- struct module *owner);
+struct device *__root_device_register(const char *name, struct module *owner);
/* This is a macro to avoid include problems with THIS_MODULE */
#define root_device_register(name) \
__root_device_register(name, THIS_MODULE)
-extern void root_device_unregister(struct device *root);
+void root_device_unregister(struct device *root);
static inline void *dev_get_platdata(const struct device *dev)
{
@@ -874,37 +1000,33 @@ static inline void *dev_get_platdata(const struct device *dev)
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
-extern int __must_check device_bind_driver(struct device *dev);
-extern void device_release_driver(struct device *dev);
-extern int __must_check device_attach(struct device *dev);
-extern int __must_check driver_attach(struct device_driver *drv);
-extern void device_initial_probe(struct device *dev);
-extern int __must_check device_reprobe(struct device *dev);
+int __must_check device_driver_attach(struct device_driver *drv,
+ struct device *dev);
+int __must_check device_bind_driver(struct device *dev);
+void device_release_driver(struct device *dev);
+int __must_check device_attach(struct device *dev);
+int __must_check driver_attach(struct device_driver *drv);
+void device_initial_probe(struct device *dev);
+int __must_check device_reprobe(struct device *dev);
-extern bool device_is_bound(struct device *dev);
+bool device_is_bound(struct device *dev);
/*
* Easy functions for dynamically creating devices on the fly
*/
-extern __printf(5, 0)
-struct device *device_create_vargs(struct class *cls, struct device *parent,
- dev_t devt, void *drvdata,
- const char *fmt, va_list vargs);
-extern __printf(5, 6)
-struct device *device_create(struct class *cls, struct device *parent,
- dev_t devt, void *drvdata,
- const char *fmt, ...);
-extern __printf(6, 7)
-struct device *device_create_with_groups(struct class *cls,
- struct device *parent, dev_t devt, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...);
-extern void device_destroy(struct class *cls, dev_t devt);
-
-extern int __must_check device_add_groups(struct device *dev,
- const struct attribute_group **groups);
-extern void device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
+__printf(5, 6) struct device *
+device_create(struct class *cls, struct device *parent, dev_t devt,
+ void *drvdata, const char *fmt, ...);
+__printf(6, 7) struct device *
+device_create_with_groups(struct class *cls, struct device *parent, dev_t devt,
+ void *drvdata, const struct attribute_group **groups,
+ const char *fmt, ...);
+void device_destroy(struct class *cls, dev_t devt);
+
+int __must_check device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
static inline int __must_check device_add_group(struct device *dev,
const struct attribute_group *grp)
@@ -922,14 +1044,14 @@ static inline void device_remove_group(struct device *dev,
return device_remove_groups(dev, groups);
}
-extern int __must_check devm_device_add_groups(struct device *dev,
+int __must_check devm_device_add_groups(struct device *dev,
const struct attribute_group **groups);
-extern void devm_device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
-extern int __must_check devm_device_add_group(struct device *dev,
- const struct attribute_group *grp);
-extern void devm_device_remove_group(struct device *dev,
- const struct attribute_group *grp);
+void devm_device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
+int __must_check devm_device_add_group(struct device *dev,
+ const struct attribute_group *grp);
+void devm_device_remove_group(struct device *dev,
+ const struct attribute_group *grp);
/*
* Platform "fixup" functions - allow the platform to have their say
@@ -946,21 +1068,21 @@ extern int (*platform_notify_remove)(struct device *dev);
* get_device - atomically increment the reference count for the device.
*
*/
-extern struct device *get_device(struct device *dev);
-extern void put_device(struct device *dev);
-extern bool kill_device(struct device *dev);
+struct device *get_device(struct device *dev);
+void put_device(struct device *dev);
+bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
-extern int devtmpfs_mount(void);
+int devtmpfs_mount(void);
#else
static inline int devtmpfs_mount(void) { return 0; }
#endif
/* drivers/base/power/shutdown.c */
-extern void device_shutdown(void);
+void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
-extern const char *dev_driver_string(const struct device *dev);
+const char *dev_driver_string(const struct device *dev);
/* Device links interface. */
struct device_link *device_link_add(struct device *consumer,
@@ -970,6 +1092,9 @@ void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
+extern __printf(3, 4)
+int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 1ea5e1d1545b..d8b29ccd07e5 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -59,6 +59,8 @@ struct fwnode_handle;
* bus supports.
* @dma_configure: Called to setup DMA configuration on a device on
* this bus.
+ * @dma_cleanup: Called to cleanup DMA configuration on a device on
+ * this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -91,7 +93,7 @@ struct bus_type {
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
void (*sync_state)(struct device *dev);
- int (*remove)(struct device *dev);
+ void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
int (*online)(struct device *dev);
@@ -103,6 +105,7 @@ struct bus_type {
int (*num_vf)(struct device *dev);
int (*dma_configure)(struct device *dev);
+ void (*dma_cleanup)(struct device *dev);
const struct dev_pm_ops *pm;
@@ -143,6 +146,7 @@ int device_match_of_node(struct device *dev, const void *np);
int device_match_fwnode(struct device *dev, const void *fwnode);
int device_match_devt(struct device *dev, const void *pdevt);
int device_match_acpi_dev(struct device *dev, const void *adev);
+int device_match_acpi_handle(struct device *dev, const void *handle);
int device_match_any(struct device *dev, const void *unused);
/* iterator helpers for buses */
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index e8d470c457d1..e61ec5502019 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -256,6 +256,20 @@ extern void class_destroy(struct class *cls);
/* This is a #define to keep the compiler from merging different
* instances of the __key variable */
+
+/**
+ * class_create - create a struct class structure
+ * @owner: pointer to the module that is to "own" this struct class
+ * @name: pointer to a string for the name of this class.
+ *
+ * This is used to create a struct class pointer that can then be used
+ * in calls to device_create().
+ *
+ * Returns &struct class pointer on success, or ERR_PTR() on error.
+ *
+ * Note, the pointer created here is to be destroyed when finished by
+ * making a call to class_destroy().
+ */
#define class_create(owner, name) \
({ \
static struct lock_class_key __key; \
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 1188260f9a02..2114d65b862f 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -18,6 +18,7 @@
#include <linux/klist.h>
#include <linux/pm.h>
#include <linux/device/bus.h>
+#include <linux/module.h>
/**
* enum probe_type - device driver probe type to try
@@ -75,7 +76,7 @@ enum probe_type {
* @resume: Called to bring a device from sleep mode.
* @groups: Default attributes that get created by the driver core
* automatically.
- * @dev_groups: Additional attributes attached to device instance once the
+ * @dev_groups: Additional attributes attached to device instance once
* it is bound to the driver.
* @pm: Power management operations of the device which matched
* this driver.
@@ -128,6 +129,7 @@ extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
extern void wait_for_device_probe(void);
+void __init wait_for_init_devices_probe(void);
/* sysfs interface for exporting driver attributes */
@@ -150,6 +152,8 @@ extern int __must_check driver_create_file(struct device_driver *driver,
extern void driver_remove_file(struct device_driver *driver,
const struct driver_attribute *attr);
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len);
extern int __must_check driver_for_each_device(struct device_driver *drv,
struct device *start,
void *data,
@@ -236,9 +240,9 @@ driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
}
#endif
+extern int driver_deferred_probe_timeout;
void driver_deferred_probe_add(struct device *dev);
int driver_deferred_probe_check_state(struct device *dev);
-int driver_deferred_probe_check_state_continue(struct device *dev);
void driver_init(void);
/**
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index fa35b52e0002..d02f32b7514e 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/fs.h>
-#include <linux/bpf-cgroup.h>
#define DEVCG_ACC_MKNOD 1
#define DEVCG_ACC_READ 2
@@ -11,16 +10,10 @@
#define DEVCG_DEV_CHAR 2
#define DEVCG_DEV_ALL 4 /* this represents all devices */
-#ifdef CONFIG_CGROUP_DEVICE
-int devcgroup_check_permission(short type, u32 major, u32 minor,
- short access);
-#else
-static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
-{ return 0; }
-#endif
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access);
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
short type, access = 0;
@@ -51,6 +44,9 @@ static inline int devcgroup_inode_mknod(int mode, dev_t dev)
if (!S_ISBLK(mode) && !S_ISCHR(mode))
return 0;
+ if (S_ISCHR(mode) && dev == WHITEOUT_DEV)
+ return 0;
+
if (S_ISBLK(mode))
type = DEVCG_DEV_BLOCK;
else
@@ -61,6 +57,9 @@ static inline int devcgroup_inode_mknod(int mode, dev_t dev)
}
#else
+static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
+{ return 0; }
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{ return 0; }
static inline int devcgroup_inode_mknod(int mode, dev_t dev)
diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h
new file mode 100644
index 000000000000..74891802200d
--- /dev/null
+++ b/include/linux/devm-helpers.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __LINUX_DEVM_HELPERS_H
+#define __LINUX_DEVM_HELPERS_H
+
+/*
+ * Functions which do automatically cancel operations or release resources upon
+ * driver detach.
+ *
+ * These should be helpful to avoid mixing the manual and devm-based resource
+ * management which can be source of annoying, rarely occurring,
+ * hard-to-reproduce bugs.
+ *
+ * Please take into account that devm based cancellation may be performed some
+ * time after the remove() is ran.
+ *
+ * Thus mixing devm and manual resource management can easily cause problems
+ * when unwinding operations with dependencies. IRQ scheduling a work in a queue
+ * is typical example where IRQs are often devm-managed and WQs are manually
+ * cleaned at remove(). If IRQs are not manually freed at remove() (and this is
+ * often the case when we use devm for IRQs) we have a period of time after
+ * remove() - and before devm managed IRQs are freed - where new IRQ may fire
+ * and schedule a work item which won't be cancelled because remove() was
+ * already ran.
+ */
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+static inline void devm_delayed_work_drop(void *res)
+{
+ cancel_delayed_work_sync(res);
+}
+
+/**
+ * devm_delayed_work_autocancel - Resource-managed delayed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work item to be queued
+ * @worker: Worker function
+ *
+ * Initialize delayed work which is automatically cancelled when driver is
+ * detached. A few drivers need delayed work which must be cancelled before
+ * driver is detached to avoid accessing removed resources.
+ * devm_delayed_work_autocancel() can be used to omit the explicit
+ * cancelleation when driver is detached.
+ */
+static inline int devm_delayed_work_autocancel(struct device *dev,
+ struct delayed_work *w,
+ work_func_t worker)
+{
+ INIT_DELAYED_WORK(w, worker);
+ return devm_add_action(dev, devm_delayed_work_drop, w);
+}
+
+static inline void devm_work_drop(void *res)
+{
+ cancel_work_sync(res);
+}
+
+/**
+ * devm_work_autocancel - Resource-managed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work to be added (and automatically cancelled)
+ * @worker: Worker function
+ *
+ * Initialize work which is automatically cancelled when driver is detached.
+ * A few drivers need to queue work which must be cancelled before driver
+ * is detached to avoid accessing removed resources.
+ * devm_work_autocancel() can be used to omit the explicit
+ * cancelleation when driver is detached.
+ */
+static inline int devm_work_autocancel(struct device *dev,
+ struct work_struct *w,
+ work_func_t worker)
+{
+ INIT_WORK(w, worker);
+ return devm_add_action(dev, devm_work_drop, w);
+}
+
+#endif
diff --git a/include/linux/dfl.h b/include/linux/dfl.h
new file mode 100644
index 000000000000..431636a0dc78
--- /dev/null
+++ b/include/linux/dfl.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for DFL driver and device API
+ *
+ * Copyright (C) 2020 Intel Corporation, Inc.
+ */
+
+#ifndef __LINUX_DFL_H
+#define __LINUX_DFL_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * enum dfl_id_type - define the DFL FIU types
+ */
+enum dfl_id_type {
+ FME_ID = 0,
+ PORT_ID = 1,
+ DFL_ID_MAX,
+};
+
+/**
+ * struct dfl_device - represent an dfl device on dfl bus
+ *
+ * @dev: generic device interface.
+ * @id: id of the dfl device.
+ * @type: type of DFL FIU of the device. See enum dfl_id_type.
+ * @feature_id: feature identifier local to its DFL FIU type.
+ * @mmio_res: mmio resource of this dfl device.
+ * @irqs: list of Linux IRQ numbers of this dfl device.
+ * @num_irqs: number of IRQs supported by this dfl device.
+ * @cdev: pointer to DFL FPGA container device this dfl device belongs to.
+ * @id_entry: matched id entry in dfl driver's id table.
+ */
+struct dfl_device {
+ struct device dev;
+ int id;
+ u16 type;
+ u16 feature_id;
+ u8 revision;
+ struct resource mmio_res;
+ int *irqs;
+ unsigned int num_irqs;
+ struct dfl_fpga_cdev *cdev;
+ const struct dfl_device_id *id_entry;
+};
+
+/**
+ * struct dfl_driver - represent an dfl device driver
+ *
+ * @drv: driver model structure.
+ * @id_table: pointer to table of device IDs the driver is interested in.
+ * { } member terminated.
+ * @probe: mandatory callback for device binding.
+ * @remove: callback for device unbinding.
+ */
+struct dfl_driver {
+ struct device_driver drv;
+ const struct dfl_device_id *id_table;
+
+ int (*probe)(struct dfl_device *dfl_dev);
+ void (*remove)(struct dfl_device *dfl_dev);
+};
+
+#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
+#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE.
+ */
+#define dfl_driver_register(drv) \
+ __dfl_driver_register(drv, THIS_MODULE)
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
+void dfl_driver_unregister(struct dfl_driver *dfl_drv);
+
+/*
+ * module_dfl_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dfl_driver(__dfl_driver) \
+ module_driver(__dfl_driver, dfl_driver_register, \
+ dfl_driver_unregister)
+
+#endif /* __LINUX_DFL_H */
diff --git a/include/linux/digsig.h b/include/linux/digsig.h
index 594fc66a395a..2ace69e41088 100644
--- a/include/linux/digsig.h
+++ b/include/linux/digsig.h
@@ -29,7 +29,7 @@ struct pubkey_hdr {
uint32_t timestamp; /* key made, always 0 for now */
uint8_t algo;
uint8_t nmpi;
- char mpi[0];
+ char mpi[];
} __packed;
struct signature_hdr {
@@ -39,7 +39,7 @@ struct signature_hdr {
uint8_t hash;
uint8_t keyid[8];
uint8_t nmpi;
- char mpi[0];
+ char mpi[];
} __packed;
#if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE)
diff --git a/include/linux/dim.h b/include/linux/dim.h
index b698266d0035..6c5733981563 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -21,7 +21,7 @@
* We consider 10% difference as significant.
*/
#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100UL * abs((val) - (ref))) / (ref)) > 10)
+ ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10))
/*
* Calculate the gap between two values.
diff --git a/include/linux/dio.h b/include/linux/dio.h
index 1470d1d943b4..5abd07361eb5 100644
--- a/include/linux/dio.h
+++ b/include/linux/dio.h
@@ -247,11 +247,6 @@ extern int dio_create_sysfs_dev_files(struct dio_dev *);
/* New-style probing */
extern int dio_register_driver(struct dio_driver *);
extern void dio_unregister_driver(struct dio_driver *);
-extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z);
-static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d)
-{
- return d->driver;
-}
#define dio_resource_start(d) ((d)->resource.start)
#define dio_resource_end(d) ((d)->resource.end)
diff --git a/include/linux/dirent.h b/include/linux/dirent.h
index fc61f3cff72f..99002220cd45 100644
--- a/include/linux/dirent.h
+++ b/include/linux/dirent.h
@@ -7,7 +7,7 @@ struct linux_dirent64 {
s64 d_off;
unsigned short d_reclen;
unsigned char d_type;
- char d_name[0];
+ char d_name[];
};
#endif
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index ff951e9f6f20..c6bc2b5ee7e6 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -56,9 +56,6 @@ struct dlm_lockspace_ops {
* DLM_LSFL_TIMEWARN
* The dlm should emit netlink messages if locks have been waiting
* for a configurable amount of time. (Unused.)
- * DLM_LSFL_FS
- * The lockspace user is in the kernel (i.e. filesystem). Enables
- * direct bast/cast callbacks.
* DLM_LSFL_NEWEXCL
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
*
@@ -134,7 +131,7 @@ int dlm_lock(dlm_lockspace_t *lockspace,
int mode,
struct dlm_lksb *lksb,
uint32_t flags,
- void *name,
+ const void *name,
unsigned int namelen,
uint32_t parent_lkid,
void (*lockast) (void *astarg),
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 3c8b7d274bd9..15d9e15ca830 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -18,13 +18,19 @@ struct dm_bufio_client;
struct dm_buffer;
/*
+ * Flags for dm_bufio_client_create
+ */
+#define DM_BUFIO_CLIENT_NO_SLEEP 0x1
+
+/*
* Create a buffered IO cache on a given device
*/
struct dm_bufio_client *
dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
unsigned reserved_buffers, unsigned aux_size,
void (*alloc_callback)(struct dm_buffer *),
- void (*write_callback)(struct dm_buffer *));
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags);
/*
* Release a buffered IO cache.
@@ -119,6 +125,11 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
int dm_bufio_issue_flush(struct dm_bufio_client *c);
/*
+ * Send a discard request to the underlying device.
+ */
+int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
+
+/*
* Like dm_bufio_release but also move the buffer to the new
* block. dm_bufio_write_dirty_buffers is needed to commit the new block.
*/
@@ -132,12 +143,20 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
/*
+ * Free the given range of buffers.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks);
+
+/*
* Set the minimum number of buffers before cleanup happens.
*/
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
void *dm_bufio_get_block_data(struct dm_buffer *b);
void *dm_bufio_get_aux_data(struct dm_buffer *b);
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index a52c6580cc9a..8e1c4ab5df04 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -13,6 +13,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
struct dm_io_region {
struct block_device *bdev;
@@ -57,8 +58,7 @@ struct dm_io_notify {
*/
struct dm_io_client;
struct dm_io_request {
- int bi_op; /* REQ_OP */
- int bi_op_flags; /* req_flag_bits */
+ blk_opf_t bi_opf; /* Request type and flags */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index e42de7750c88..c1707ee5b540 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -51,6 +51,7 @@ MODULE_PARM_DESC(name, description)
struct dm_kcopyd_client;
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle);
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
+void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc);
/*
* Submit a copy job to kcopyd. This is built on top of the
diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h
new file mode 100644
index 000000000000..552b817ab102
--- /dev/null
+++ b/include/linux/dm-verity-loadpin.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_DM_VERITY_LOADPIN_H
+#define __LINUX_DM_VERITY_LOADPIN_H
+
+#include <linux/list.h>
+
+struct block_device;
+
+extern struct list_head dm_verity_loadpin_trusted_root_digests;
+
+struct dm_verity_loadpin_trusted_root_digest {
+ struct list_head node;
+ unsigned int len;
+ u8 data[];
+};
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY)
+bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev);
+#else
+static inline bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
+{
+ return false;
+}
+#endif
+
+#endif /* __LINUX_DM_VERITY_LOADPIN_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index abf5459a5b9d..71731796c8c3 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -13,6 +13,7 @@
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
+#include <linux/iosys-map.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
@@ -43,18 +44,6 @@ struct dma_buf_ops {
bool cache_sgt_mapping;
/**
- * @dynamic_mapping:
- *
- * If true the framework makes sure that the map/unmap_dma_buf
- * callbacks are always called with the dma_resv object locked.
- *
- * If false the framework makes sure that the map/unmap_dma_buf
- * callbacks are always called without the dma_resv object locked.
- * Mutual exclusive with @cache_sgt_mapping.
- */
- bool dynamic_mapping;
-
- /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -65,7 +54,7 @@ struct dma_buf_ops {
* device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
- * allocation fullfills the DMA constraints of the new device. If this
+ * allocation fulfills the DMA constraints of the new device. If this
* is not the case, and the allocation cannot be moved, it should also
* fail the attach operation.
*
@@ -94,13 +83,50 @@ struct dma_buf_ops {
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
/**
+ * @pin:
+ *
+ * This is called by dma_buf_pin() and lets the exporter know that the
+ * DMA-buf can't be moved any more. Ideally, the exporter should
+ * pin the buffer so that it is generally accessible by all
+ * devices.
+ *
+ * This is called with the &dmabuf.resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This is called automatically for non-dynamic importers from
+ * dma_buf_attach().
+ *
+ * Note that similar to non-dynamic exporters in their @map_dma_buf
+ * callback the driver must guarantee that the memory is available for
+ * use and cleared of any old data by the time this function returns.
+ * Drivers which pipeline their buffer moves internally must wait for
+ * all moves and clears to complete.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+ int (*pin)(struct dma_buf_attachment *attach);
+
+ /**
+ * @unpin:
+ *
+ * This is called by dma_buf_unpin() and lets the exporter know that the
+ * DMA-buf can be moved again.
+ *
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct dma_buf_attachment *attach);
+
+ /**
* @map_dma_buf:
*
* This is called by dma_buf_map_attachment() and is used to map a
* shared &dma_buf into device address space, and it is mandatory. It
- * can only be called if @attach has been called successfully. This
- * essentially pins the DMA buffer into place, and it cannot be moved
- * any more
+ * can only be called if @attach has been called successfully.
*
* This call may sleep, e.g. when the backing storage first needs to be
* allocated, or moved to a location suitable for all currently attached
@@ -124,15 +150,31 @@ struct dma_buf_ops {
* This is always called with the dmabuf->resv object locked when
* the dynamic_mapping flag is true.
*
+ * Note that for non-dynamic exporters the driver must guarantee that
+ * that the memory is available for use and cleared of any old data by
+ * the time this function returns. Drivers which pipeline their buffer
+ * moves internally must wait for all moves and clears to complete.
+ * Dynamic exporters do not need to follow this rule: For non-dynamic
+ * importers the buffer is already pinned through @pin, which has the
+ * same requirements. Dynamic importers otoh are required to obey the
+ * dma_resv fences.
+ *
* Returns:
*
- * A &sg_table scatter list of or the backing storage of the DMA buffer,
+ * A &sg_table scatter list of the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
- * with the provided &dma_buf_attachment.
+ * with the provided &dma_buf_attachment. The addresses and lengths in
+ * the scatter list are PAGE_SIZE aligned.
*
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being
* blocked.
+ *
+ * Note that exporters should not try to cache the scatter list, or
+ * return the same one for multiple calls. Caching is done either by the
+ * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
+ * of the scatter list is transferred to the caller, and returned by
+ * @unmap_dma_buf.
*/
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
enum dma_data_direction);
@@ -141,9 +183,8 @@ struct dma_buf_ops {
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
- * It should also unpin the backing storage if this is the last mapping
- * of the DMA buffer, it the exporter supports backing storage
- * migration.
+ * For static dma_buf handling this might also unpin the backing
+ * storage if this is the last mapping of the DMA buffer.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
struct sg_table *,
@@ -165,24 +206,19 @@ struct dma_buf_ops {
* @begin_cpu_access:
*
* This is called from dma_buf_begin_cpu_access() and allows the
- * exporter to ensure that the memory is actually available for cpu
- * access - the exporter might need to allocate or swap-in and pin the
- * backing storage. The exporter also needs to ensure that cpu access is
- * coherent for the access direction. The direction can be used by the
- * exporter to optimize the cache flushing, i.e. access with a different
+ * exporter to ensure that the memory is actually coherent for cpu
+ * access. The exporter also needs to ensure that cpu access is coherent
+ * for the access direction. The direction can be used by the exporter
+ * to optimize the cache flushing, i.e. access with a different
* direction (read instead of write) might return stale or even bogus
* data (e.g. when the exporter needs to copy the data to temporary
* storage).
*
- * This callback is optional.
+ * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
+ * command for userspace mappings established through @mmap, and also
+ * for kernel mappings established with @vmap.
*
- * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
- * from userspace (where storage shouldn't be pinned to avoid handing
- * de-factor mlock rights to userspace) and for the kernel-internal
- * users of the various kmap interfaces, where the backing storage must
- * be pinned to guarantee that the atomic kmap calls can succeed. Since
- * there's no in-kernel users of the kmap interfaces yet this isn't a
- * real problem.
+ * This callback is optional.
*
* Returns:
*
@@ -198,9 +234,7 @@ struct dma_buf_ops {
*
* This is called from dma_buf_end_cpu_access() when the importer is
* done accessing the CPU. The exporter can use this to flush caches and
- * unpin any resources pinned in @begin_cpu_access.
- * The result of any dma_buf kmap calls after end_cpu_access is
- * undefined.
+ * undo anything else done in @begin_cpu_access.
*
* This callback is optional.
*
@@ -218,7 +252,7 @@ struct dma_buf_ops {
* This callback is used by the dma_buf_mmap() function
*
* Note that the mapping needs to be incoherent, userspace is expected
- * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
+ * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
*
* Because dma-buf buffers have invariant size over their lifetime, the
* dma-buf core checks whether a vma is too large and rejects such
@@ -249,32 +283,12 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- void *(*vmap)(struct dma_buf *);
- void (*vunmap)(struct dma_buf *, void *vaddr);
+ int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
+ void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
};
/**
* struct dma_buf - shared buffer object
- * @size: size of the buffer
- * @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached,
- * protected by dma_resv lock.
- * @ops: dma_buf_ops associated with this buffer object.
- * @lock: used internally to serialize list manipulation, attach/detach and
- * vmap/unmap
- * @vmapping_counter: used internally to refcnt the vmaps
- * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
- * @exp_name: name of the exporter; useful for debugging.
- * @name: userspace-provided name; useful for accounting and debugging,
- * protected by @resv.
- * @owner: pointer to exporter module; used for refcounting when exporter is a
- * kernel module.
- * @list_node: node for dma_buf accounting and debugging.
- * @priv: exporter specific private data for this buffer object.
- * @resv: reservation object linked to this dma-buf
- * @poll: for userspace poll support
- * @cb_excl: for userspace poll support
- * @cb_shared: for userspace poll support
*
* This represents a shared buffer, created by calling dma_buf_export(). The
* userspace representation is a normal file descriptor, which can be created by
@@ -286,29 +300,196 @@ struct dma_buf_ops {
* Device DMA access is handled by the separate &struct dma_buf_attachment.
*/
struct dma_buf {
+ /**
+ * @size:
+ *
+ * Size of the buffer; invariant over the lifetime of the buffer.
+ */
size_t size;
+
+ /**
+ * @file:
+ *
+ * File pointer used for sharing buffers across, and for refcounting.
+ * See dma_buf_get() and dma_buf_put().
+ */
struct file *file;
+
+ /**
+ * @attachments:
+ *
+ * List of dma_buf_attachment that denotes all devices attached,
+ * protected by &dma_resv lock @resv.
+ */
struct list_head attachments;
+
+ /** @ops: dma_buf_ops associated with this buffer object. */
const struct dma_buf_ops *ops;
+
+ /**
+ * @lock:
+ *
+ * Used internally to serialize list manipulation, attach/detach and
+ * vmap/unmap. Note that in many cases this is superseeded by
+ * dma_resv_lock() on @resv.
+ */
struct mutex lock;
+
+ /**
+ * @vmapping_counter:
+ *
+ * Used internally to refcnt the vmaps returned by dma_buf_vmap().
+ * Protected by @lock.
+ */
unsigned vmapping_counter;
- void *vmap_ptr;
+
+ /**
+ * @vmap_ptr:
+ * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
+ */
+ struct iosys_map vmap_ptr;
+
+ /**
+ * @exp_name:
+ *
+ * Name of the exporter; useful for debugging. See the
+ * DMA_BUF_SET_NAME IOCTL.
+ */
const char *exp_name;
+
+ /**
+ * @name:
+ *
+ * Userspace-provided name; useful for accounting and debugging,
+ * protected by dma_resv_lock() on @resv and @name_lock for read access.
+ */
const char *name;
+
+ /** @name_lock: Spinlock to protect name acces for read access. */
+ spinlock_t name_lock;
+
+ /**
+ * @owner:
+ *
+ * Pointer to exporter module; used for refcounting when exporter is a
+ * kernel module.
+ */
struct module *owner;
+
+ /** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
+
+ /** @priv: exporter specific private data for this buffer object. */
void *priv;
+
+ /**
+ * @resv:
+ *
+ * Reservation object linked to this dma-buf.
+ *
+ * IMPLICIT SYNCHRONIZATION RULES:
+ *
+ * Drivers which support implicit synchronization of buffer access as
+ * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
+ * below rules.
+ *
+ * - Drivers must add a read fence through dma_resv_add_fence() with the
+ * DMA_RESV_USAGE_READ flag for anything the userspace API considers a
+ * read access. This highly depends upon the API and window system.
+ *
+ * - Similarly drivers must add a write fence through
+ * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
+ * anything the userspace API considers write access.
+ *
+ * - Drivers may just always add a write fence, since that only
+ * causes unecessarily synchronization, but no correctness issues.
+ *
+ * - Some drivers only expose a synchronous userspace API with no
+ * pipelining across drivers. These do not set any fences for their
+ * access. An example here is v4l.
+ *
+ * - Driver should use dma_resv_usage_rw() when retrieving fences as
+ * dependency for implicit synchronization.
+ *
+ * DYNAMIC IMPORTER RULES:
+ *
+ * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
+ * additional constraints on how they set up fences:
+ *
+ * - Dynamic importers must obey the write fences and wait for them to
+ * signal before allowing access to the buffer's underlying storage
+ * through the device.
+ *
+ * - Dynamic importers should set fences for any access that they can't
+ * disable immediately from their &dma_buf_attach_ops.move_notify
+ * callback.
+ *
+ * IMPORTANT:
+ *
+ * All drivers and memory management related functions must obey the
+ * struct dma_resv rules, specifically the rules for updating and
+ * obeying fences. See enum dma_resv_usage for further descriptions.
+ */
struct dma_resv *resv;
- /* poll support */
+ /** @poll: for userspace poll support */
wait_queue_head_t poll;
+ /** @cb_in: for userspace poll support */
+ /** @cb_out: for userspace poll support */
struct dma_buf_poll_cb_t {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
__poll_t active;
- } cb_excl, cb_shared;
+ } cb_in, cb_out;
+#ifdef CONFIG_DMABUF_SYSFS_STATS
+ /**
+ * @sysfs_entry:
+ *
+ * For exposing information about this buffer in sysfs. See also
+ * `DMA-BUF statistics`_ for the uapi this enables.
+ */
+ struct dma_buf_sysfs_entry {
+ struct kobject kobj;
+ struct dma_buf *dmabuf;
+ } *sysfs_entry;
+#endif
+};
+
+/**
+ * struct dma_buf_attach_ops - importer operations for an attachment
+ *
+ * Attachment operations implemented by the importer.
+ */
+struct dma_buf_attach_ops {
+ /**
+ * @allow_peer2peer:
+ *
+ * If this is set to true the importer must be able to handle peer
+ * resources without struct pages.
+ */
+ bool allow_peer2peer;
+
+ /**
+ * @move_notify: [optional] notification that the DMA-buf is moving
+ *
+ * If this callback is provided the framework can avoid pinning the
+ * backing store while mappings exists.
+ *
+ * This callback is called with the lock of the reservation object
+ * associated with the dma_buf held and the mapping function must be
+ * called with this lock held as well. This makes sure that no mapping
+ * is created concurrently with an ongoing move operation.
+ *
+ * Mappings stay valid and are not directly affected by this callback.
+ * But the DMA-buf can now be in a different physical location, so all
+ * mappings should be destroyed and re-created as soon as possible.
+ *
+ * New mappings can be created after this callback returns, and will
+ * point to the new location of the DMA-buf.
+ */
+ void (*move_notify)(struct dma_buf_attachment *attach);
};
/**
@@ -318,9 +499,11 @@ struct dma_buf {
* @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
* @sgt: cached mapping.
* @dir: direction of cached mapping.
+ * @peer2peer: true if the importer can handle peer resources without pages.
* @priv: exporter specific attachment data.
- * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
- * dma_resv lock held.
+ * @importer_ops: importer operations for this attachment, if provided
+ * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
+ * @importer_priv: importer specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -337,7 +520,9 @@ struct dma_buf_attachment {
struct list_head node;
struct sg_table *sgt;
enum dma_data_direction dir;
- bool dynamic_mapping;
+ bool peer2peer;
+ const struct dma_buf_attach_ops *importer_ops;
+ void *importer_priv;
void *priv;
};
@@ -346,7 +531,7 @@ struct dma_buf_attachment {
* @exp_name: name of the exporter - useful for debugging.
* @owner: pointer to exporter module - used for refcounting kernel module
* @ops: Attach allocator-defined dma buf ops to the new buffer
- * @size: Size of the buffer
+ * @size: Size of the buffer - invariant over the lifetime of the buffer
* @flags: mode flags for the file
* @resv: reservation-object, NULL to allocate default one
* @priv: Attach private data of allocator to this buffer
@@ -399,12 +584,12 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
*/
static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
{
- return dmabuf->ops->dynamic_mapping;
+ return !!dmabuf->ops->pin;
}
/**
* dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
- * mappinsg
+ * mappings
* @attach: the DMA-buf attachment to check
*
* Returns true if a DMA-buf importer wants to call the map/unmap functions with
@@ -413,16 +598,19 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
static inline bool
dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
{
- return attach->dynamic_mapping;
+ return !!attach->importer_ops;
}
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
- bool dynamic_mapping);
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv);
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach);
+int dma_buf_pin(struct dma_buf_attachment *attach);
+void dma_buf_unpin(struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
@@ -442,6 +630,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
-void *dma_buf_vmap(struct dma_buf *);
-void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
deleted file mode 100644
index 03f8e98e3bcc..000000000000
--- a/include/linux/dma-contiguous.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef __LINUX_CMA_H
-#define __LINUX_CMA_H
-
-/*
- * Contiguous Memory Allocator for DMA mapping framework
- * Copyright (c) 2010-2011 by Samsung Electronics.
- * Written by:
- * Marek Szyprowski <m.szyprowski@samsung.com>
- * Michal Nazarewicz <mina86@mina86.com>
- */
-
-/*
- * Contiguous Memory Allocator
- *
- * The Contiguous Memory Allocator (CMA) makes it possible to
- * allocate big contiguous chunks of memory after the system has
- * booted.
- *
- * Why is it needed?
- *
- * Various devices on embedded systems have no scatter-getter and/or
- * IO map support and require contiguous blocks of memory to
- * operate. They include devices such as cameras, hardware video
- * coders, etc.
- *
- * Such devices often require big memory buffers (a full HD frame
- * is, for instance, more then 2 mega pixels large, i.e. more than 6
- * MB of memory), which makes mechanisms such as kmalloc() or
- * alloc_page() ineffective.
- *
- * At the same time, a solution where a big memory region is
- * reserved for a device is suboptimal since often more memory is
- * reserved then strictly required and, moreover, the memory is
- * inaccessible to page system even if device drivers don't use it.
- *
- * CMA tries to solve this issue by operating on memory regions
- * where only movable pages can be allocated from. This way, kernel
- * can use the memory for pagecache and when device driver requests
- * it, allocated pages can be migrated.
- *
- * Driver usage
- *
- * CMA should not be used by the device drivers directly. It is
- * only a helper framework for dma-mapping subsystem.
- *
- * For more information, see kernel-docs in kernel/dma/contiguous.c
- */
-
-#ifdef __KERNEL__
-
-#include <linux/device.h>
-#include <linux/mm.h>
-
-struct cma;
-struct page;
-
-#ifdef CONFIG_DMA_CMA
-
-extern struct cma *dma_contiguous_default_area;
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- if (dev && dev->cma_area)
- return dev->cma_area;
- return dma_contiguous_default_area;
-}
-
-static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
-{
- if (dev)
- dev->cma_area = cma;
-}
-
-static inline void dma_contiguous_set_default(struct cma *cma)
-{
- dma_contiguous_default_area = cma;
-}
-
-void dma_contiguous_reserve(phys_addr_t addr_limit);
-
-int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
- phys_addr_t limit, struct cma **res_cma,
- bool fixed);
-
-/**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- * for particular device
- * @dev: Pointer to device structure.
- * @size: Size of the reserved memory.
- * @base: Start address of the reserved memory (optional, 0 for any).
- * @limit: End address of the reserved memory (optional, 0 for any).
- *
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
- */
-
-static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
-{
- struct cma *cma;
- int ret;
- ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
- if (ret == 0)
- dev_set_cma_area(dev, cma);
-
- return ret;
-}
-
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, bool no_warn);
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
- int count);
-struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
-void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
-
-#else
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- return NULL;
-}
-
-static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
-
-static inline void dma_contiguous_set_default(struct cma *cma) { }
-
-static inline void dma_contiguous_reserve(phys_addr_t limit) { }
-
-static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
- phys_addr_t limit, struct cma **res_cma,
- bool fixed)
-{
- return -ENOSYS;
-}
-
-static inline
-int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
-{
- return -ENOSYS;
-}
-
-static inline
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, bool no_warn)
-{
- return NULL;
-}
-
-static inline
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
- int count)
-{
- return false;
-}
-
-/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
-static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
- gfp_t gfp)
-{
- return NULL;
-}
-
-static inline void dma_free_contiguous(struct device *dev, struct page *page,
- size_t size)
-{
- __free_pages(page, get_order(size));
-}
-
-#endif
-
-#endif
-
-#endif
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
deleted file mode 100644
index 4208f94d93f7..000000000000
--- a/include/linux/dma-debug.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008 Advanced Micro Devices, Inc.
- *
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- */
-
-#ifndef __DMA_DEBUG_H
-#define __DMA_DEBUG_H
-
-#include <linux/types.h>
-
-struct device;
-struct scatterlist;
-struct bus_type;
-
-#ifdef CONFIG_DMA_API_DEBUG
-
-extern void dma_debug_add_bus(struct bus_type *bus);
-
-extern void debug_dma_map_single(struct device *dev, const void *addr,
- unsigned long len);
-
-extern void debug_dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- int direction, dma_addr_t dma_addr);
-
-extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-
-extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction);
-
-extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction);
-
-extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir);
-
-extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt);
-
-extern void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr);
-
-extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
- size_t size, int direction,
- dma_addr_t dma_addr);
-
-extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction);
-
-extern void debug_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- int direction);
-
-extern void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction);
-
-extern void debug_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction);
-
-extern void debug_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction);
-
-extern void debug_dma_dump_mappings(struct device *dev);
-
-extern void debug_dma_assert_idle(struct page *page);
-
-#else /* CONFIG_DMA_API_DEBUG */
-
-static inline void dma_debug_add_bus(struct bus_type *bus)
-{
-}
-
-static inline void debug_dma_map_single(struct device *dev, const void *addr,
- unsigned long len)
-{
-}
-
-static inline void debug_dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- int direction, dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction)
-{
-}
-
-static inline void debug_dma_unmap_sg(struct device *dev,
- struct scatterlist *sglist,
- int nelems, int dir)
-{
-}
-
-static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt)
-{
-}
-
-static inline void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr)
-{
-}
-
-static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
- size_t size, int direction,
- dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_unmap_resource(struct device *dev,
- dma_addr_t dma_addr, size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
-}
-
-static inline void debug_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
-}
-
-static inline void debug_dma_dump_mappings(struct device *dev)
-{
-}
-
-static inline void debug_dma_assert_idle(struct page *page)
-{
-}
-
-#endif /* CONFIG_DMA_API_DEBUG */
-
-#endif /* __DMA_DEBUG_H */
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 24b8684aa21d..18aade195884 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -1,64 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internals of the DMA direct mapping implementation. Only for use by the
+ * DMA mapping code and IOMMU drivers.
+ */
#ifndef _LINUX_DMA_DIRECT_H
#define _LINUX_DMA_DIRECT_H 1
#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/memblock.h> /* for min_low_pfn */
#include <linux/mem_encrypt.h>
+#include <linux/swiotlb.h>
extern unsigned int zone_dma_bits;
-#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
-#include <asm/dma-direct.h>
-#else
-static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+/*
+ * Record the mapping of CPU physical to DMA addresses for a given region.
+ */
+struct bus_dma_region {
+ phys_addr_t cpu_start;
+ dma_addr_t dma_start;
+ u64 size;
+ u64 offset;
+};
+
+static inline dma_addr_t translate_phys_to_dma(struct device *dev,
+ phys_addr_t paddr)
{
- dma_addr_t dev_addr = (dma_addr_t)paddr;
+ const struct bus_dma_region *m;
+
+ for (m = dev->dma_range_map; m->size; m++)
+ if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
+ return (dma_addr_t)paddr - m->offset;
- return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+ /* make sure dma_capable fails when no translation is available */
+ return DMA_MAPPING_ERROR;
}
-static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+static inline phys_addr_t translate_dma_to_phys(struct device *dev,
+ dma_addr_t dma_addr)
{
- phys_addr_t paddr = (phys_addr_t)dev_addr;
+ const struct bus_dma_region *m;
- return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+ for (m = dev->dma_range_map; m->size; m++)
+ if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size)
+ return (phys_addr_t)dma_addr + m->offset;
+
+ return (phys_addr_t)-1;
}
-#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
-#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
-bool force_dma_unencrypted(struct device *dev);
+#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
+#include <asm/dma-direct.h>
+#ifndef phys_to_dma_unencrypted
+#define phys_to_dma_unencrypted phys_to_dma
+#endif
#else
-static inline bool force_dma_unencrypted(struct device *dev)
+static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
+ phys_addr_t paddr)
{
- return false;
+ if (dev->dma_range_map)
+ return translate_phys_to_dma(dev, paddr);
+ return paddr;
}
-#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
/*
* If memory encryption is supported, phys_to_dma will set the memory encryption
- * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
- * and __dma_to_phys versions should only be used on non-encrypted memory for
- * special occasions like DMA coherent buffers.
+ * bit in the DMA address, and dma_to_phys will clear it.
+ * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
+ * buffers.
*/
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- return __sme_set(__phys_to_dma(dev, paddr));
+ return __sme_set(phys_to_dma_unencrypted(dev, paddr));
}
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
- return __sme_clr(__dma_to_phys(dev, daddr));
+ phys_addr_t paddr;
+
+ if (dev->dma_range_map)
+ paddr = translate_dma_to_phys(dev, dma_addr);
+ else
+ paddr = dma_addr;
+
+ return __sme_clr(paddr);
}
+#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
+
+#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
+bool force_dma_unencrypted(struct device *dev);
+#else
+static inline bool force_dma_unencrypted(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
bool is_ram)
{
dma_addr_t end = addr + size - 1;
- if (!dev->dma_mask)
+ if (addr == DMA_MAPPING_ERROR)
return false;
-
if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
return false;
@@ -71,18 +115,13 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
-void *dma_direct_alloc_pages(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
-void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_addr, unsigned long attrs);
-struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
- gfp_t gfp, unsigned long attrs);
-int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-bool dma_direct_can_mmap(struct device *dev);
-int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
+struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_direct_free_pages(struct device *dev, size_t size,
+ struct page *page, dma_addr_t dma_addr,
+ enum dma_data_direction dir);
int dma_direct_supported(struct device *dev, u64 mask);
+dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h
index 9c96e30e6a0b..a2fe4571bc92 100644
--- a/include/linux/dma-direction.h
+++ b/include/linux/dma-direction.h
@@ -9,4 +9,10 @@ enum dma_data_direction {
DMA_NONE = 3,
};
-#endif
+static inline int valid_dma_direction(enum dma_data_direction dir)
+{
+ return dir == DMA_BIDIRECTIONAL || dir == DMA_TO_DEVICE ||
+ dir == DMA_FROM_DEVICE;
+}
+
+#endif /* _LINUX_DMA_DIRECTION_H */
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 303dd712220f..ec7f25def392 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -45,19 +45,6 @@ struct dma_fence_array {
struct irq_work work;
};
-extern const struct dma_fence_ops dma_fence_array_ops;
-
-/**
- * dma_fence_is_array - check if a fence is from the array subsclass
- * @fence: fence to test
- *
- * Return true if it is a dma_fence_array and false otherwise.
- */
-static inline bool dma_fence_is_array(struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_array_ops;
-}
-
/**
* to_dma_fence_array - cast a fence to a dma_fence_array
* @fence: fence to cast to a dma_fence_array
@@ -68,12 +55,27 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
static inline struct dma_fence_array *
to_dma_fence_array(struct dma_fence *fence)
{
- if (fence->ops != &dma_fence_array_ops)
+ if (!fence || !dma_fence_is_array(fence))
return NULL;
return container_of(fence, struct dma_fence_array, base);
}
+/**
+ * dma_fence_array_for_each - iterate over all fences in array
+ * @fence: current fence
+ * @index: index into the array
+ * @head: potential dma_fence_array object
+ *
+ * Test if @array is a dma_fence_array object and if yes iterate over all fences
+ * in the array. If not just iterate over the fence in @array itself.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_array_for_each(fence, index, head) \
+ for (index = 0, fence = dma_fence_array_first(head); fence; \
+ ++(index), fence = dma_fence_array_next(head, index))
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
@@ -81,4 +83,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
bool dma_fence_match_context(struct dma_fence *fence, u64 context);
+struct dma_fence *dma_fence_array_first(struct dma_fence *head);
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+ unsigned int index);
+
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index 10462a029da2..4bdf0b96da28 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -12,28 +12,43 @@
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
+#include <linux/slab.h>
/**
* struct dma_fence_chain - fence to represent an node of a fence chain
* @base: fence base class
- * @lock: spinlock for fence handling
* @prev: previous fence of the chain
* @prev_seqno: original previous seqno before garbage collection
* @fence: encapsulated fence
- * @cb: callback structure for signaling
- * @work: irq work item for signaling
+ * @lock: spinlock for fence handling
*/
struct dma_fence_chain {
struct dma_fence base;
- spinlock_t lock;
struct dma_fence __rcu *prev;
u64 prev_seqno;
struct dma_fence *fence;
- struct dma_fence_cb cb;
- struct irq_work work;
+ union {
+ /**
+ * @cb: callback for signaling
+ *
+ * This is used to add the callback for signaling the
+ * complection of the fence chain. Never used at the same time
+ * as the irq work.
+ */
+ struct dma_fence_cb cb;
+
+ /**
+ * @work: irq work item for signaling
+ *
+ * Irq work structure to allow us to add the callback without
+ * running into lock inversion. Never used at the same time as
+ * the callback.
+ */
+ struct irq_work work;
+ };
+ spinlock_t lock;
};
-extern const struct dma_fence_ops dma_fence_chain_ops;
/**
* to_dma_fence_chain - cast a fence to a dma_fence_chain
@@ -45,19 +60,60 @@ extern const struct dma_fence_ops dma_fence_chain_ops;
static inline struct dma_fence_chain *
to_dma_fence_chain(struct dma_fence *fence)
{
- if (!fence || fence->ops != &dma_fence_chain_ops)
+ if (!fence || !dma_fence_is_chain(fence))
return NULL;
return container_of(fence, struct dma_fence_chain, base);
}
/**
+ * dma_fence_chain_contained - return the contained fence
+ * @fence: the fence to test
+ *
+ * If the fence is a dma_fence_chain the function returns the fence contained
+ * inside the chain object, otherwise it returns the fence itself.
+ */
+static inline struct dma_fence *
+dma_fence_chain_contained(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+ return chain ? chain->fence : fence;
+}
+
+/**
+ * dma_fence_chain_alloc
+ *
+ * Returns a new struct dma_fence_chain object or NULL on failure.
+ */
+static inline struct dma_fence_chain *dma_fence_chain_alloc(void)
+{
+ return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+};
+
+/**
+ * dma_fence_chain_free
+ * @chain: chain node to free
+ *
+ * Frees up an allocated but not used struct dma_fence_chain object. This
+ * doesn't need an RCU grace period since the fence was never initialized nor
+ * published. After dma_fence_chain_init() has been called the fence must be
+ * released by calling dma_fence_put(), and not through this function.
+ */
+static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
+{
+ kfree(chain);
+};
+
+/**
* dma_fence_chain_for_each - iterate over all fences in chain
* @iter: current fence
* @head: starting point
*
* Iterate over all fences in the chain. We keep a reference to the current
* fence while inside the loop which must be dropped when breaking out.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
*/
#define dma_fence_chain_for_each(iter, head) \
for (iter = dma_fence_get(head); iter; \
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
new file mode 100644
index 000000000000..66b1e56fbb81
--- /dev/null
+++ b/include/linux/dma-fence-unwrap.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_UNWRAP_H
+#define __LINUX_DMA_FENCE_UNWRAP_H
+
+struct dma_fence;
+
+/**
+ * struct dma_fence_unwrap - cursor into the container structure
+ *
+ * Should be used with dma_fence_unwrap_for_each() iterator macro.
+ */
+struct dma_fence_unwrap {
+ /**
+ * @chain: potential dma_fence_chain, but can be other fence as well
+ */
+ struct dma_fence *chain;
+ /**
+ * @array: potential dma_fence_array, but can be other fence as well
+ */
+ struct dma_fence *array;
+ /**
+ * @index: last returned index if @array is really a dma_fence_array
+ */
+ unsigned int index;
+};
+
+struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
+ struct dma_fence_unwrap *cursor);
+struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor);
+
+/**
+ * dma_fence_unwrap_for_each - iterate over all fences in containers
+ * @fence: current fence
+ * @cursor: current position inside the containers
+ * @head: starting point for the iterator
+ *
+ * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
+ * potential fences in them. If @head is just a normal fence only that one is
+ * returned.
+ */
+#define dma_fence_unwrap_for_each(fence, cursor, head) \
+ for (fence = dma_fence_unwrap_first(head, cursor); fence; \
+ fence = dma_fence_unwrap_next(cursor))
+
+struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ struct dma_fence **fences,
+ struct dma_fence_unwrap *cursors);
+
+/**
+ * dma_fence_unwrap_merge - unwrap and merge fences
+ *
+ * All fences given as parameters are unwrapped and merged back together as flat
+ * dma_fence_array. Useful if multiple containers need to be merged together.
+ *
+ * Implemented as a macro to allocate the necessary arrays on the stack and
+ * account the stack frame size to the caller.
+ *
+ * Returns NULL on memory allocation failure, a dma_fence object representing
+ * all the given fences otherwise.
+ */
+#define dma_fence_unwrap_merge(...) \
+ ({ \
+ struct dma_fence *__f[] = { __VA_ARGS__ }; \
+ struct dma_fence_unwrap __c[ARRAY_SIZE(__f)]; \
+ \
+ __dma_fence_unwrap_merge(ARRAY_SIZE(__f), __f, __c); \
+ })
+
+#endif
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 3347c54f3a87..775cdc0b4f24 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -214,19 +214,15 @@ struct dma_fence_ops {
* Custom wait implementation, defaults to dma_fence_default_wait() if
* not set.
*
- * The dma_fence_default_wait implementation should work for any fence, as long
- * as @enable_signaling works correctly. This hook allows drivers to
- * have an optimized version for the case where a process context is
- * already available, e.g. if @enable_signaling for the general case
- * needs to set up a worker thread.
+ * Deprecated and should not be used by new implementations. Only used
+ * by existing implementations which need special handling for their
+ * hardware reset procedure.
*
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
* timed out. Can also return other error values on custom implementations,
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
- *
- * This callback is optional.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -268,6 +264,7 @@ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence);
+void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
/**
* dma_fence_put - decreases refcount of the fence
@@ -357,8 +354,24 @@ dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
} while (1);
}
+#ifdef CONFIG_LOCKDEP
+bool dma_fence_begin_signalling(void);
+void dma_fence_end_signalling(bool cookie);
+void __dma_fence_might_wait(void);
+#else
+static inline bool dma_fence_begin_signalling(void)
+{
+ return true;
+}
+static inline void dma_fence_end_signalling(bool cookie) {}
+static inline void __dma_fence_might_wait(void) {}
+#endif
+
int dma_fence_signal(struct dma_fence *fence);
int dma_fence_signal_locked(struct dma_fence *fence);
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
+ ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,
@@ -571,28 +584,45 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
}
struct dma_fence *dma_fence_get_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(void);
u64 dma_fence_context_alloc(unsigned num);
-#define DMA_FENCE_TRACE(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
- pr_info("f %llu#%llu: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define DMA_FENCE_WARN(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
- ##args); \
- } while (0)
-
-#define DMA_FENCE_ERR(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
+extern const struct dma_fence_ops dma_fence_array_ops;
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * dma_fence_is_chain - check if a fence is from the chain subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_chain and false otherwise.
+ */
+static inline bool dma_fence_is_chain(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+/**
+ * dma_fence_is_container - check if a fence is a container for other fences
+ * @fence: the fence to test
+ *
+ * Return true if this fence is a container for other fences, false otherwise.
+ * This is important since we can't build up large fence structure or otherwise
+ * we run into recursion during operation on those fences.
+ */
+static inline bool dma_fence_is_container(struct dma_fence *fence)
+{
+ return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+}
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 454e354d1ffb..0c05561cad6e 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -16,15 +16,15 @@ struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
- * @allocate: allocate dmabuf and return fd
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
*
- * allocate returns dmabuf fd on success, -errno on error.
+ * allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
struct dma_heap_ops {
- int (*allocate)(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags);
+ struct dma_buf *(*allocate)(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags);
};
/**
@@ -51,6 +51,15 @@ struct dma_heap_export_info {
void *dma_heap_get_drvdata(struct dma_heap *heap);
/**
+ * dma_heap_get_name() - get heap name
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The char* for the heap name.
+ */
+const char *dma_heap_get_name(struct dma_heap *heap);
+
+/**
* dma_heap_add - adds a heap to dmabuf heaps
* @exp_info: information needed to register this heap
*/
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
deleted file mode 100644
index 2112f21f73d8..000000000000
--- a/include/linux/dma-iommu.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2014-2015 ARM Ltd.
- */
-#ifndef __DMA_IOMMU_H
-#define __DMA_IOMMU_H
-
-#include <linux/errno.h>
-#include <linux/types.h>
-
-#ifdef CONFIG_IOMMU_DMA
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-#include <linux/msi.h>
-
-/* Domain management interface for IOMMU drivers */
-int iommu_get_dma_cookie(struct iommu_domain *domain);
-int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
-void iommu_put_dma_cookie(struct iommu_domain *domain);
-
-/* Setup call for arch DMA mapping code */
-void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
-
-/* The DMA API isn't _quite_ the whole story, though... */
-/*
- * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device
- *
- * The MSI page will be stored in @desc.
- *
- * Return: 0 on success otherwise an error describing the failure.
- */
-int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
-
-/* Update the MSI message if required. */
-void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg);
-
-void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
-
-#else /* CONFIG_IOMMU_DMA */
-
-struct iommu_domain;
-struct msi_desc;
-struct msi_msg;
-struct device;
-
-static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size)
-{
-}
-
-static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
-{
- return -ENODEV;
-}
-
-static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
-{
- return -ENODEV;
-}
-
-static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
-{
-}
-
-static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
- phys_addr_t msi_addr)
-{
- return 0;
-}
-
-static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg)
-{
-}
-
-static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
-{
-}
-
-#endif /* CONFIG_IOMMU_DMA */
-#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
new file mode 100644
index 000000000000..d678afeb8a13
--- /dev/null
+++ b/include/linux/dma-map-ops.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header is for implementations of dma_map_ops and related code.
+ * It should not be included in drivers just using the DMA API.
+ */
+#ifndef _LINUX_DMA_MAP_OPS_H
+#define _LINUX_DMA_MAP_OPS_H
+
+#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
+
+struct cma;
+
+/*
+ * Values for struct dma_map_ops.flags:
+ *
+ * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
+ * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
+ */
+#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
+
+struct dma_map_ops {
+ unsigned int flags;
+
+ void *(*alloc)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs);
+ void (*free)(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs);
+ struct page *(*alloc_pages)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir,
+ gfp_t gfp);
+ void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+ struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp,
+ unsigned long attrs);
+ void (*free_noncontiguous)(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+ int (*mmap)(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t, unsigned long attrs);
+
+ int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ /*
+ * map_sg should return a negative error code on error. See
+ * dma_map_sgtable() for a list of appropriate error codes
+ * and their meanings.
+ */
+ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+ dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ u64 (*get_required_mask)(struct device *dev);
+ size_t (*max_mapping_size)(struct device *dev);
+ size_t (*opt_mapping_size)(void);
+ unsigned long (*get_merge_boundary)(struct device *dev);
+};
+
+#ifdef CONFIG_DMA_OPS
+#include <asm/dma-mapping.h>
+
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev->dma_ops)
+ return dev->dma_ops;
+ return get_arch_dma_ops(dev->bus);
+}
+
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+ dev->dma_ops = dma_ops;
+}
+#else /* CONFIG_DMA_OPS */
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return NULL;
+}
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+}
+#endif /* CONFIG_DMA_OPS */
+
+#ifdef CONFIG_DMA_CMA
+extern struct cma *dma_contiguous_default_area;
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma, bool fixed);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
+ unsigned int order, bool no_warn);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count);
+struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
+void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+#else /* CONFIG_DMA_CMA */
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ return NULL;
+}
+static inline void dma_contiguous_reserve(phys_addr_t limit)
+{
+}
+static inline int dma_contiguous_reserve_area(phys_addr_t size,
+ phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
+ bool fixed)
+{
+ return -ENOSYS;
+}
+static inline struct page *dma_alloc_from_contiguous(struct device *dev,
+ size_t count, unsigned int order, bool no_warn)
+{
+ return NULL;
+}
+static inline bool dma_release_from_contiguous(struct device *dev,
+ struct page *pages, int count)
+{
+ return false;
+}
+/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
+static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
+ gfp_t gfp)
+{
+ return NULL;
+}
+static inline void dma_free_contiguous(struct device *dev, struct page *page,
+ size_t size)
+{
+ __free_pages(page, get_order(size));
+}
+#endif /* CONFIG_DMA_CMA*/
+
+#ifdef CONFIG_DMA_PERNUMA_CMA
+void dma_pernuma_cma_reserve(void);
+#else
+static inline void dma_pernuma_cma_reserve(void) { }
+#endif /* CONFIG_DMA_PERNUMA_CMA */
+
+#ifdef CONFIG_DMA_DECLARE_COHERENT
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size);
+void dma_release_coherent_memory(struct device *dev);
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret);
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
+#else
+static inline int dma_declare_coherent_memory(struct device *dev,
+ phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
+{
+ return -ENOSYS;
+}
+
+#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+static inline void dma_release_coherent_memory(struct device *dev) { }
+#endif /* CONFIG_DMA_DECLARE_COHERENT */
+
+#ifdef CONFIG_DMA_GLOBAL_POOL
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle);
+int dma_release_from_global_coherent(int order, void *vaddr);
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
+ size_t size, int *ret);
+int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
+#else
+static inline void *dma_alloc_from_global_coherent(struct device *dev,
+ ssize_t size, dma_addr_t *dma_handle)
+{
+ return NULL;
+}
+static inline int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ return 0;
+}
+static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret)
+{
+ return 0;
+}
+#endif /* CONFIG_DMA_GLOBAL_POOL */
+
+/*
+ * This is the actual return value from the ->alloc_noncontiguous method.
+ * The users of the DMA API should only care about the sg_table, but to make
+ * the DMA-API internal vmaping and freeing easier we stash away the page
+ * array as well (except for the fallback case). This can go away any time,
+ * e.g. when a vmap-variant that takes a scatterlist comes along.
+ */
+struct dma_sgt_handle {
+ struct sg_table sgt;
+ struct page **pages;
+};
+#define sgt_handle(sgt) \
+ container_of((sgt), struct dma_sgt_handle, sgt)
+
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+struct page *dma_common_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+
+struct page **dma_common_find_pages(void *cpu_addr);
+void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
+ const void *caller);
+void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
+ const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size);
+
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+ void **cpu_addr, gfp_t flags,
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
+bool dma_free_from_pool(struct device *dev, void *start, size_t size);
+
+int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
+ dma_addr_t dma_start, u64 size);
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+extern bool dma_default_coherent;
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return dev->dma_coherent;
+}
+#else
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return true;
+}
+#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+
+#ifdef CONFIG_MMU
+/*
+ * Page protection so that devices that can't snoop CPU caches can use the
+ * memory coherently. We default to pgprot_noncached which is usually used
+ * for ioremap as a safe bet, but architectures can override this with less
+ * strict semantics if possible.
+ */
+#ifndef pgprot_dmacoherent
+#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
+#endif
+
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
+#else
+static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
+ return prot; /* no protection bits supported without page tables */
+}
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+void arch_sync_dma_for_cpu_all(void);
+#else
+static inline void arch_sync_dma_for_cpu_all(void)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+
+#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
+void arch_dma_prep_coherent(struct page *page, size_t size);
+#else
+static inline void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
+
+#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
+void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
+#else
+static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
+{
+}
+#endif /* ARCH_HAS_DMA_MARK_CLEAN */
+
+void *arch_dma_set_uncached(void *addr, size_t size);
+void arch_dma_clear_uncached(void *addr, size_t size);
+
+#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
+bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
+bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
+bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents);
+bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents);
+#else
+#define arch_dma_map_page_direct(d, a) (false)
+#define arch_dma_unmap_page_direct(d, a) (false)
+#define arch_dma_map_sg_direct(d, s, n) (false)
+#define arch_dma_unmap_sg_direct(d, s, n) (false)
+#endif
+
+#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent);
+#else
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, const struct iommu_ops *iommu, bool coherent)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
+
+#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
+void arch_teardown_dma_ops(struct device *dev);
+#else
+static inline void arch_teardown_dma_ops(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
+
+#ifdef CONFIG_DMA_API_DEBUG
+void dma_debug_add_bus(struct bus_type *bus);
+void debug_dma_dump_mappings(struct device *dev);
+#else
+static inline void dma_debug_add_bus(struct bus_type *bus)
+{
+}
+static inline void debug_dma_dump_mappings(struct device *dev)
+{
+}
+#endif /* CONFIG_DMA_API_DEBUG */
+
+extern const struct dma_map_ops dma_dummy_ops;
+
+enum pci_p2pdma_map_type {
+ /*
+ * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
+ * type hasn't been calculated yet. Functions that return this enum
+ * never return this value.
+ */
+ PCI_P2PDMA_MAP_UNKNOWN = 0,
+
+ /*
+ * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
+ * traverse the host bridge and the host bridge is not in the
+ * allowlist. DMA Mapping routines should return an error when
+ * this is returned.
+ */
+ PCI_P2PDMA_MAP_NOT_SUPPORTED,
+
+ /*
+ * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
+ * each other directly through a PCI switch and the transaction will
+ * not traverse the host bridge. Such a mapping should program
+ * the DMA engine with PCI bus addresses.
+ */
+ PCI_P2PDMA_MAP_BUS_ADDR,
+
+ /*
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
+ * to each other, but the transaction traverses a host bridge on the
+ * allowlist. In this case, a normal mapping either with CPU physical
+ * addresses (in the case of dma-direct) or IOVA addresses (in the
+ * case of IOMMUs) should be used to program the DMA engine.
+ */
+ PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
+};
+
+struct pci_p2pdma_map_state {
+ struct dev_pagemap *pgmap;
+ int map;
+ u64 bus_off;
+};
+
+#ifdef CONFIG_PCI_P2PDMA
+enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg);
+#else /* CONFIG_PCI_P2PDMA */
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg)
+{
+ return PCI_P2PDMA_MAP_NOT_SUPPORTED;
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
+#endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 330ad58fbf4d..0ee20b764000 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -6,7 +6,6 @@
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/bug.h>
@@ -14,7 +13,7 @@
/**
* List of possible attributes associated with a DMA mapping. The semantics
- * of each attribute should be defined in Documentation/DMA-attributes.txt.
+ * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
*/
/*
@@ -28,11 +27,6 @@
*/
#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
/*
- * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
- * consistent or non-consistent memory as it sees fit.
- */
-#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
-/*
* DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
* virtual mapping for the allocated buffer.
*/
@@ -68,377 +62,68 @@
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
- * A dma_addr_t can hold any valid DMA or bus address for the platform.
- * It can be given to a device to use as a DMA source or target. A CPU cannot
- * reference a dma_addr_t directly because there may be translation between
- * its physical address space and the bus address space.
+ * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
+ * be given to a device to use as a DMA source or target. It is specific to a
+ * given device and there may be a translation between the CPU physical address
+ * space and the bus address space.
+ *
+ * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
+ * be used directly in drivers, but checked for using dma_mapping_error()
+ * instead.
*/
-struct dma_map_ops {
- void* (*alloc)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs);
- void (*free)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs);
- int (*mmap)(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t,
- unsigned long attrs);
-
- int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
- dma_addr_t, size_t, unsigned long attrs);
-
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- /*
- * map_sg returns 0 on error and a value > 0 on success.
- * It should never return a value < 0.
- */
- int (*map_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_sg)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir,
- unsigned long attrs);
- dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*sync_single_for_cpu)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir);
- void (*sync_single_for_device)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir);
- void (*sync_sg_for_cpu)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
- void (*sync_sg_for_device)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
- void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
- int (*dma_supported)(struct device *dev, u64 mask);
- u64 (*get_required_mask)(struct device *dev);
- size_t (*max_mapping_size)(struct device *dev);
- unsigned long (*get_merge_boundary)(struct device *dev);
-};
-
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
-extern const struct dma_map_ops dma_virt_ops;
-extern const struct dma_map_ops dma_dummy_ops;
-
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
-#define DMA_MASK_NONE 0x0ULL
-
-static inline int valid_dma_direction(int dma_direction)
-{
- return ((dma_direction == DMA_BIDIRECTIONAL) ||
- (dma_direction == DMA_TO_DEVICE) ||
- (dma_direction == DMA_FROM_DEVICE));
-}
-
-#ifdef CONFIG_DMA_DECLARE_COHERENT
-/*
- * These three functions are only for dma allocator.
- * Don't use them in device drivers.
- */
-int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret);
-int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
-
-int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, size_t size, int *ret);
-
-void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
-int dma_release_from_global_coherent(int order, void *vaddr);
-int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
- size_t size, int *ret);
-
-#else
-#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
-#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
-
-static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle)
-{
- return NULL;
-}
-
-static inline int dma_release_from_global_coherent(int order, void *vaddr)
-{
- return 0;
-}
-
-static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
- void *cpu_addr, size_t size,
- int *ret)
-{
- return 0;
-}
-#endif /* CONFIG_DMA_DECLARE_COHERENT */
-
-static inline bool dma_is_direct(const struct dma_map_ops *ops)
-{
- return likely(!ops);
-}
-
-/*
- * All the dma_direct_* declarations are here just for the indirect call bypass,
- * and must not be used directly drivers!
- */
-dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir, unsigned long attrs);
-dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
- defined(CONFIG_SWIOTLB)
-void dma_direct_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir);
-void dma_direct_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir);
-#else
-static inline void dma_direct_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-}
-static inline void dma_direct_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-}
-#endif
-
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
- defined(CONFIG_SWIOTLB)
-void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir, unsigned long attrs);
-void dma_direct_sync_single_for_cpu(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir);
-void dma_direct_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#ifdef CONFIG_DMA_API_DEBUG
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len);
#else
-static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-}
-static inline void dma_direct_unmap_sg(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir,
- unsigned long attrs)
+static inline void debug_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
{
}
-static inline void dma_direct_sync_single_for_cpu(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir)
+static inline void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len)
{
}
-static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-}
-#endif
-
-size_t dma_direct_max_mapping_size(struct device *dev);
+#endif /* CONFIG_DMA_API_DEBUG */
#ifdef CONFIG_HAS_DMA
-#include <asm/dma-mapping.h>
-
-static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- if (dev->dma_ops)
- return dev->dma_ops;
- return get_arch_dma_ops(dev->bus);
-}
-
-static inline void set_dma_ops(struct device *dev,
- const struct dma_map_ops *dma_ops)
-{
- dev->dma_ops = dma_ops;
-}
-
-static inline dma_addr_t dma_map_page_attrs(struct device *dev,
- struct page *page, size_t offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
- else
- addr = ops->map_page(dev, page, offset, size, dir, attrs);
- debug_dma_map_page(dev, page, offset, size, dir, addr);
-
- return addr;
-}
-
-static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_unmap_page(dev, addr, size, dir, attrs);
- else if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir);
-}
-
-/*
- * dma_maps_sg_attrs returns 0 on error and > 0 on success.
- * It should never return a value < 0.
- */
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- int ents;
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
- else
- ents = ops->map_sg(dev, sg, nents, dir, attrs);
- BUG_ON(ents < 0);
- debug_dma_map_sg(dev, sg, nents, ents, dir);
-
- return ents;
-}
-
-static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- debug_dma_unmap_sg(dev, sg, nents, dir);
- if (dma_is_direct(ops))
- dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
- else if (ops->unmap_sg)
- ops->unmap_sg(dev, sg, nents, dir, attrs);
-}
-
-static inline dma_addr_t dma_map_resource(struct device *dev,
- phys_addr_t phys_addr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr = DMA_MAPPING_ERROR;
-
- BUG_ON(!valid_dma_direction(dir));
-
- /* Don't allow RAM to be mapped */
- if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
- return DMA_MAPPING_ERROR;
-
- if (dma_is_direct(ops))
- addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
- else if (ops->map_resource)
- addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
-
- debug_dma_map_resource(dev, phys_addr, size, dir, addr);
- return addr;
-}
-
-static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (!dma_is_direct(ops) && ops->unmap_resource)
- ops->unmap_resource(dev, addr, size, dir, attrs);
- debug_dma_unmap_resource(dev, addr, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_sync_single_for_cpu(dev, addr, size, dir);
- else if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr, size, dir);
- debug_dma_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_sync_single_for_device(dev, addr, size, dir);
- else if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr, size, dir);
- debug_dma_sync_single_for_device(dev, addr, size, dir);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
- else if (ops->sync_sg_for_cpu)
- ops->sync_sg_for_cpu(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
- else if (ops->sync_sg_for_device)
- ops->sync_sg_for_device(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
-
-}
-
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
debug_dma_mapping_error(dev, dma_addr);
- if (dma_addr == DMA_MAPPING_ERROR)
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
return -ENOMEM;
return 0;
}
+dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir);
+void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir);
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, unsigned long attrs);
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
@@ -447,8 +132,6 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir);
int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
@@ -456,12 +139,23 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
bool dma_can_mmap(struct device *dev);
-int dma_supported(struct device *dev, u64 mask);
+bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
+size_t dma_opt_mapping_size(struct device *dev);
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
unsigned long dma_get_merge_boundary(struct device *dev);
+struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
+int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
#else /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct page *page, size_t offset, size_t size,
@@ -473,8 +167,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
}
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+static inline unsigned int dma_map_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
{
return 0;
}
@@ -483,6 +178,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
unsigned long attrs)
{
}
+static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -531,10 +231,6 @@ static inline void dmam_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
}
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
-}
static inline int dma_get_sgtable_attrs(struct device *dev,
struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
size_t size, unsigned long attrs)
@@ -551,9 +247,9 @@ static inline bool dma_can_mmap(struct device *dev)
{
return false;
}
-static inline int dma_supported(struct device *dev, u64 mask)
+static inline bool dma_pci_p2pdma_supported(struct device *dev)
{
- return 0;
+ return false;
}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
@@ -571,12 +267,63 @@ static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
}
+static inline size_t dma_opt_mapping_size(struct device *dev)
+{
+ return 0;
+}
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ return false;
+}
static inline unsigned long dma_get_merge_boundary(struct device *dev)
{
return 0;
}
+static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
+ size_t size, enum dma_data_direction dir, gfp_t gfp,
+ unsigned long attrs)
+{
+ return NULL;
+}
+static inline void dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+}
+static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt)
+{
+ return NULL;
+}
+static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
+{
+}
+static inline int dma_mmap_noncontiguous(struct device *dev,
+ struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_HAS_DMA */
+struct page *dma_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_free_pages(struct device *dev, size_t size, struct page *page,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct page *page);
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
+{
+ struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
+ return page ? page_address(page) : NULL;
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
+{
+ dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
+}
+
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
@@ -609,6 +356,58 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
+/**
+ * dma_unmap_sgtable - Unmap the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ * @attrs: Optional DMA attributes for the unmap operation
+ *
+ * Unmaps a buffer described by a scatterlist stored in the given sg_table
+ * object for the @dir DMA operation by the @dev device. After this function
+ * the ownership of the buffer is transferred back to the CPU domain.
+ */
+static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+}
+
+/**
+ * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the CPU domain, so it is safe to perform any access to it
+ * by the CPU. Before doing any further DMA operations, one has to transfer
+ * the ownership of the buffer back to the DMA domain by calling the
+ * dma_sync_sgtable_for_device().
+ */
+static inline void dma_sync_sgtable_for_cpu(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
+/**
+ * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the DMA domain, so it is safe to perform the DMA operation.
+ * Once finished, one has to call dma_sync_sgtable_for_cpu() or
+ * dma_unmap_sgtable().
+ */
+static inline void dma_sync_sgtable_for_device(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
@@ -618,30 +417,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-struct page **dma_common_find_pages(void *cpu_addr);
-void *dma_common_contiguous_remap(struct page *page, size_t size,
- pgprot_t prot, const void *caller);
-
-void *dma_common_pages_remap(struct page **pages, size_t size,
- pgprot_t prot, const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size);
-
-bool dma_in_atomic_pool(void *start, size_t size);
-void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
-bool dma_free_from_pool(void *start, size_t size);
-
-int
-dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs);
-
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
-
return dma_alloc_attrs(dev, size, dma_handle, gfp,
(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
@@ -698,24 +476,6 @@ static inline bool dma_addressing_limited(struct device *dev)
dma_get_required_mask(dev);
}
-#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
-#else
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu, bool coherent)
-{
-}
-#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
-
-#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
-void arch_teardown_dma_ops(struct device *dev);
-#else
-static inline void arch_teardown_dma_ops(struct device *dev)
-{
-}
-#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
-
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->max_segment_size)
@@ -736,7 +496,26 @@ static inline unsigned long dma_get_seg_boundary(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
return dev->dma_parms->segment_boundary_mask;
- return DMA_BIT_MASK(32);
+ return ULONG_MAX;
+}
+
+/**
+ * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
+ * @dev: device to guery the boundary for
+ * @page_shift: ilog() of the IOMMU page size
+ *
+ * Return the segment boundary in IOMMU page units (which may be different from
+ * the CPU page size) for the passed in device.
+ *
+ * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
+ * non-DMA API callers.
+ */
+static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
+ unsigned int page_shift)
+{
+ if (!dev)
+ return (U32_MAX >> page_shift) + 1;
+ return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
@@ -748,6 +527,22 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
+static inline unsigned int dma_get_min_align_mask(struct device *dev)
+{
+ if (dev->dma_parms)
+ return dev->dma_parms->min_align_mask;
+ return 0;
+}
+
+static inline int dma_set_min_align_mask(struct device *dev,
+ unsigned int min_align_mask)
+{
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return -EIO;
+ dev->dma_parms->min_align_mask = min_align_mask;
+ return 0;
+}
+
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN
@@ -756,18 +551,6 @@ static inline int dma_get_cache_alignment(void)
return 1;
}
-#ifdef CONFIG_DMA_DECLARE_COHERENT
-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size);
-#else
-static inline int
-dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size)
-{
- return -ENOSYS;
-}
-#endif /* CONFIG_DMA_DECLARE_COHERENT */
-
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
@@ -818,4 +601,4 @@ static inline int dma_mmap_wc(struct device *dev,
#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif
-#endif
+#endif /* _LINUX_DMA_MAPPING_H */
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
deleted file mode 100644
index ca9b5770caee..000000000000
--- a/include/linux/dma-noncoherent.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_DMA_NONCOHERENT_H
-#define _LINUX_DMA_NONCOHERENT_H 1
-
-#include <linux/dma-mapping.h>
-#include <asm/pgtable.h>
-
-#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
-#include <asm/dma-coherence.h>
-#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
-static inline bool dev_is_dma_coherent(struct device *dev)
-{
- return dev->dma_coherent;
-}
-#else
-static inline bool dev_is_dma_coherent(struct device *dev)
-{
- return true;
-}
-#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
-
-/*
- * Check if an allocation needs to be marked uncached to be coherent.
- */
-static __always_inline bool dma_alloc_need_uncached(struct device *dev,
- unsigned long attrs)
-{
- if (dev_is_dma_coherent(dev))
- return false;
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
- return false;
- if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
- (attrs & DMA_ATTR_NON_CONSISTENT))
- return false;
- return true;
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs);
-void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_addr, unsigned long attrs);
-
-#ifdef CONFIG_MMU
-/*
- * Page protection so that devices that can't snoop CPU caches can use the
- * memory coherently. We default to pgprot_noncached which is usually used
- * for ioremap as a safe bet, but architectures can override this with less
- * strict semantics if possible.
- */
-#ifndef pgprot_dmacoherent
-#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
-#endif
-
-pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
-#else
-static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
- unsigned long attrs)
-{
- return prot; /* no protection bits supported without page tables */
-}
-#endif /* CONFIG_MMU */
-
-#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
-void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
-#else
-static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
- size_t size, enum dma_data_direction direction)
-{
-}
-#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
-
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir);
-#else
-static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
-{
-}
-#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
-
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir);
-#else
-static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
-{
-}
-#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
-
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
-void arch_sync_dma_for_cpu_all(void);
-#else
-static inline void arch_sync_dma_for_cpu_all(void)
-{
-}
-#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
-
-#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
-void arch_dma_prep_coherent(struct page *page, size_t size);
-#else
-static inline void arch_dma_prep_coherent(struct page *page, size_t size)
-{
-}
-#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
-
-void *uncached_kernel_address(void *addr);
-void *cached_kernel_address(void *addr);
-
-#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index ee50d10f052b..0637659a702c 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -46,55 +46,278 @@
#include <linux/rcupdate.h>
extern struct ww_class reservation_ww_class;
-extern struct lock_class_key reservation_seqcount_class;
-extern const char reservation_seqcount_string[];
+
+struct dma_resv_list;
/**
- * struct dma_resv_list - a list of shared fences
- * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
+ * enum dma_resv_usage - how the fences from a dma_resv obj are used
+ *
+ * This enum describes the different use cases for a dma_resv object and
+ * controls which fences are returned when queried.
+ *
+ * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
+ *
+ * Already used fences can be promoted in the sense that a fence with
+ * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again
+ * with this usage. But fences can never be degraded in the sense that a fence
+ * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ.
*/
-struct dma_resv_list {
- struct rcu_head rcu;
- u32 shared_count, shared_max;
- struct dma_fence __rcu *shared[];
+enum dma_resv_usage {
+ /**
+ * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+ *
+ * This should only be used for things like copying or clearing memory
+ * with a DMA hardware engine for the purpose of kernel memory
+ * management.
+ *
+ * Drivers *always* must wait for those fences before accessing the
+ * resource protected by the dma_resv object. The only exception for
+ * that is when the resource is known to be locked down in place by
+ * pinning it previously.
+ */
+ DMA_RESV_USAGE_KERNEL,
+
+ /**
+ * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit write dependency.
+ */
+ DMA_RESV_USAGE_WRITE,
+
+ /**
+ * @DMA_RESV_USAGE_READ: Implicit read synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit read dependency.
+ */
+ DMA_RESV_USAGE_READ,
+
+ /**
+ * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
+ *
+ * This should be used by submissions which don't want to participate in
+ * any implicit synchronization.
+ *
+ * The most common case are preemption fences, page table updates, TLB
+ * flushes as well as explicit synced user submissions.
+ *
+ * Explicit synced user user submissions can be promoted to
+ * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
+ * dma_buf_import_sync_file() when implicit synchronization should
+ * become necessary after initial adding of the fence.
+ */
+ DMA_RESV_USAGE_BOOKKEEP
};
/**
+ * dma_resv_usage_rw - helper for implicit sync
+ * @write: true if we create a new implicit sync write
+ *
+ * This returns the implicit synchronization usage for write or read accesses,
+ * see enum dma_resv_usage and &dma_buf.resv.
+ */
+static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
+{
+ /* This looks confusing at first sight, but is indeed correct.
+ *
+ * The rational is that new write operations needs to wait for the
+ * existing read and write operations to finish.
+ * But a new read operation only needs to wait for the existing write
+ * operations to finish.
+ */
+ return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
+}
+
+/**
* struct dma_resv - a reservation object manages fences for a buffer
- * @lock: update side lock
- * @seq: sequence count for managing RCU read-side synchronization
- * @fence_excl: the exclusive fence, if there is one currently
- * @fence: list of current shared fences
+ *
+ * This is a container for dma_fence objects which needs to handle multiple use
+ * cases.
+ *
+ * One use is to synchronize cross-driver access to a struct dma_buf, either for
+ * dynamic buffer management or just to handle implicit synchronization between
+ * different users of the buffer in userspace. See &dma_buf.resv for a more
+ * in-depth discussion.
+ *
+ * The other major use is to manage access and locking within a driver in a
+ * buffer based memory manager. struct ttm_buffer_object is the canonical
+ * example here, since this is where reservation objects originated from. But
+ * use in drivers is spreading and some drivers also manage struct
+ * drm_gem_object with the same scheme.
*/
struct dma_resv {
+ /**
+ * @lock:
+ *
+ * Update side lock. Don't use directly, instead use the wrapper
+ * functions like dma_resv_lock() and dma_resv_unlock().
+ *
+ * Drivers which use the reservation object to manage memory dynamically
+ * also use this lock to protect buffer object state like placement,
+ * allocation policies or throughout command submission.
+ */
struct ww_mutex lock;
- seqcount_t seq;
- struct dma_fence __rcu *fence_excl;
- struct dma_resv_list __rcu *fence;
+ /**
+ * @fences:
+ *
+ * Array of fences which where added to the dma_resv object
+ *
+ * A new fence is added by calling dma_resv_add_fence(). Since this
+ * often needs to be done past the point of no return in command
+ * submission it cannot fail, and therefore sufficient slots need to be
+ * reserved by calling dma_resv_reserve_fences().
+ */
+ struct dma_resv_list __rcu *fences;
};
-#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
-#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ *
+ * IMPORTANT
+ *
+ * When using the lockless iterators like dma_resv_iter_next_unlocked() or
+ * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
+ * Code which accumulates statistics or similar needs to check for this with
+ * dma_resv_iter_is_restarted().
+ */
+struct dma_resv_iter {
+ /** @obj: The dma_resv object we iterate over */
+ struct dma_resv *obj;
+
+ /** @usage: Return fences with this usage or lower. */
+ enum dma_resv_usage usage;
+
+ /** @fence: the currently handled fence */
+ struct dma_fence *fence;
+
+ /** @fence_usage: the usage of the current fence */
+ enum dma_resv_usage fence_usage;
+
+ /** @index: index into the shared fences */
+ unsigned int index;
+
+ /** @fences: the shared fences; private, *MUST* not dereference */
+ struct dma_resv_list *fences;
+
+ /** @num_fences: number of fences */
+ unsigned int num_fences;
+
+ /** @is_restarted: true if this is the first returned fence */
+ bool is_restarted;
+};
+
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
/**
- * dma_resv_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterate over
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+ struct dma_resv *obj,
+ enum dma_resv_usage usage)
+{
+ cursor->obj = obj;
+ cursor->usage = usage;
+ cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+ dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_usage - Return the usage of the current fence
+ * @cursor: the cursor of the current position
+ *
+ * Returns the usage of the currently processed fence.
+ */
+static inline enum dma_resv_usage
+dma_resv_iter_usage(struct dma_resv_iter *cursor)
+{
+ return cursor->fence_usage;
+}
+
+/**
+ * dma_resv_iter_is_restarted - test if this is the first fence after a restart
+ * @cursor: the cursor with the current position
*
- * Returns the shared fence list. Does NOT take references to
- * the fence. The obj->lock must be held.
+ * Return true if this is the first fence in an iteration after a restart.
*/
-static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
+static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
{
- return rcu_dereference_protected(obj->fence,
- dma_resv_held(obj));
+ return cursor->is_restarted;
}
/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
+ * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
+ * the iterator a reference to the dma_fence is held and the RCU lock dropped.
+ *
+ * Beware that the iterator can be restarted when the struct dma_resv for
+ * @cursor is modified. Code which accumulates statistics or similar needs to
+ * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
+ * lock iterator dma_resv_for_each_fence() whenever possible.
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence) \
+ for (fence = dma_resv_iter_first_unlocked(cursor); \
+ fence; fence = dma_resv_iter_next_unlocked(cursor))
+
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @usage: controls which fences to return
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * &dma_resv.lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator and the fence stays
+ * valid as long as the lock is held and so no extra reference to the fence is
+ * taken.
+ */
+#define dma_resv_for_each_fence(cursor, obj, usage, fence) \
+ for (dma_resv_iter_begin(cursor, obj, usage), \
+ fence = dma_resv_iter_first(cursor); fence; \
+ fence = dma_resv_iter_next(cursor))
+
+#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+
+#ifdef CONFIG_DEBUG_MUTEXES
+void dma_resv_reset_max_fences(struct dma_resv *obj);
+#else
+static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
+#endif
+
+/**
* dma_resv_lock - lock the reservation object
* @obj: the reservation object
* @ctx: the locking context
@@ -108,6 +331,13 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
+ *
+ * See also dma_resv_lock_interruptible() for the interruptible variant.
*/
static inline int dma_resv_lock(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -129,6 +359,12 @@ static inline int dma_resv_lock(struct dma_resv *obj,
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
+ * @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
*/
static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -144,6 +380,8 @@ static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
* Acquires the reservation object after a die case. This function
* will sleep until the lock becomes available. See dma_resv_lock() as
* well.
+ *
+ * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
*/
static inline void dma_resv_lock_slow(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -177,13 +415,13 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
* if they overlap with a writer.
*
* Also note that since no context is provided, no deadlock protection is
- * possible.
+ * possible, which is also not needed for a trylock.
*
* Returns true if the lock was acquired, false otherwise.
*/
static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
{
- return ww_mutex_trylock(&obj->lock);
+ return ww_mutex_trylock(&obj->lock, NULL);
}
/**
@@ -203,6 +441,11 @@ static inline bool dma_resv_is_locked(struct dma_resv *obj)
*
* Returns the context used to lock a reservation object or NULL if no context
* was used or the object is not locked at all.
+ *
+ * WARNING: This interface is pretty horrible, but TTM needs it because it
+ * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
+ * Everyone else just uses it to check whether they're holding a reservation or
+ * not.
*/
static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
{
@@ -217,79 +460,26 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
*/
static inline void dma_resv_unlock(struct dma_resv *obj)
{
-#ifdef CONFIG_DEBUG_MUTEXES
- /* Test shared fence slot reservation */
- if (rcu_access_pointer(obj->fence)) {
- struct dma_resv_list *fence = dma_resv_get_list(obj);
-
- fence->shared_max = fence->shared_count;
- }
-#endif
+ dma_resv_reset_max_fences(obj);
ww_mutex_unlock(&obj->lock);
}
-/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any). Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-dma_resv_get_excl(struct dma_resv *obj)
-{
- return rcu_dereference_protected(obj->fence_excl,
- dma_resv_held(obj));
-}
-
-/**
- * dma_resv_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-dma_resv_get_excl_rcu(struct dma_resv *obj)
-{
- struct dma_fence *fence;
-
- if (!rcu_access_pointer(obj->fence_excl))
- return NULL;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&obj->fence_excl);
- rcu_read_unlock();
-
- return fence;
-}
-
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared);
-
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage);
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage);
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
+ unsigned int *num_fences, struct dma_fence ***fences);
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
+ struct dma_fence **fence);
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout);
-
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout);
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
+void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
index cab6e18773da..7d8062e9c544 100644
--- a/include/linux/dma/edma.h
+++ b/include/linux/dma/edma.h
@@ -12,19 +12,74 @@
#include <linux/device.h>
#include <linux/dmaengine.h>
+#define EDMA_MAX_WR_CH 8
+#define EDMA_MAX_RD_CH 8
+
struct dw_edma;
+struct dw_edma_region {
+ phys_addr_t paddr;
+ void __iomem *vaddr;
+ size_t sz;
+};
+
+struct dw_edma_core_ops {
+ int (*irq_vector)(struct device *dev, unsigned int nr);
+};
+
+enum dw_edma_map_format {
+ EDMA_MF_EDMA_LEGACY = 0x0,
+ EDMA_MF_EDMA_UNROLL = 0x1,
+ EDMA_MF_HDMA_COMPAT = 0x5
+};
+
+/**
+ * enum dw_edma_chip_flags - Flags specific to an eDMA chip
+ * @DW_EDMA_CHIP_LOCAL: eDMA is used locally by an endpoint
+ */
+enum dw_edma_chip_flags {
+ DW_EDMA_CHIP_LOCAL = BIT(0),
+};
+
/**
* struct dw_edma_chip - representation of DesignWare eDMA controller hardware
* @dev: struct device of the eDMA controller
* @id: instance ID
- * @irq: irq line
- * @dw: struct dw_edma that is filed by dw_edma_probe()
+ * @nr_irqs: total number of DMA IRQs
+ * @ops DMA channel to IRQ number mapping
+ * @flags dw_edma_chip_flags
+ * @reg_base DMA register base address
+ * @ll_wr_cnt DMA write link list count
+ * @ll_rd_cnt DMA read link list count
+ * @rg_region DMA register region
+ * @ll_region_wr DMA descriptor link list memory for write channel
+ * @ll_region_rd DMA descriptor link list memory for read channel
+ * @dt_region_wr DMA data memory for write channel
+ * @dt_region_rd DMA data memory for read channel
+ * @mf DMA register map format
+ * @dw: struct dw_edma that is filled by dw_edma_probe()
*/
struct dw_edma_chip {
struct device *dev;
int id;
- int irq;
+ int nr_irqs;
+ const struct dw_edma_core_ops *ops;
+ u32 flags;
+
+ void __iomem *reg_base;
+
+ u16 ll_wr_cnt;
+ u16 ll_rd_cnt;
+ /* link list address */
+ struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
+
+ /* data region */
+ struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
+
+ enum dw_edma_map_format mf;
+
struct dw_edma *dw;
};
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index a6b7bc707356..77ea602c287c 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -8,11 +8,13 @@
#ifndef _DMA_HSU_H
#define _DMA_HSU_H
-#include <linux/device.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+#include <linux/types.h>
#include <linux/platform_data/dma-hsu.h>
+struct device;
struct hsu_dma;
/**
diff --git a/include/linux/dma/imx-dma.h b/include/linux/dma/imx-dma.h
new file mode 100644
index 000000000000..f487a4fa103a
--- /dev/null
+++ b/include/linux/dma/imx-dma.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+#ifndef __LINUX_DMA_IMX_H
+#define __LINUX_DMA_IMX_H
+
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+/*
+ * This enumerates peripheral types. Used for SDMA.
+ */
+enum sdma_peripheral_type {
+ IMX_DMATYPE_SSI, /* MCU domain SSI */
+ IMX_DMATYPE_SSI_SP, /* Shared SSI */
+ IMX_DMATYPE_MMC, /* MMC */
+ IMX_DMATYPE_SDHC, /* SDHC */
+ IMX_DMATYPE_UART, /* MCU domain UART */
+ IMX_DMATYPE_UART_SP, /* Shared UART */
+ IMX_DMATYPE_FIRI, /* FIRI */
+ IMX_DMATYPE_CSPI, /* MCU domain CSPI */
+ IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
+ IMX_DMATYPE_SIM, /* SIM */
+ IMX_DMATYPE_ATA, /* ATA */
+ IMX_DMATYPE_CCM, /* CCM */
+ IMX_DMATYPE_EXT, /* External peripheral */
+ IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
+ IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
+ IMX_DMATYPE_DSP, /* DSP */
+ IMX_DMATYPE_MEMORY, /* Memory */
+ IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
+ IMX_DMATYPE_SPDIF, /* SPDIF */
+ IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
+ IMX_DMATYPE_ASRC, /* ASRC */
+ IMX_DMATYPE_ESAI, /* ESAI */
+ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
+ IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
+ IMX_DMATYPE_SAI, /* SAI */
+ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
+};
+
+enum imx_dma_prio {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MEDIUM = 1,
+ DMA_PRIO_LOW = 2
+};
+
+struct imx_dma_data {
+ int dma_request; /* DMA request line */
+ int dma_request2; /* secondary DMA request line */
+ enum sdma_peripheral_type peripheral_type;
+ int priority;
+};
+
+static inline int imx_dma_is_ipu(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "ipu-core");
+}
+
+static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
+{
+ return !strcmp(chan->device->dev->driver->name, "imx-sdma") ||
+ !strcmp(chan->device->dev->driver->name, "imx-dma");
+}
+
+/**
+ * struct sdma_peripheral_config - SDMA config for audio
+ * @n_fifos_src: Number of FIFOs for recording
+ * @n_fifos_dst: Number of FIFOs for playback
+ * @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are
+ * continuous, 1 means 1 word stride between FIFOs. All stride
+ * between FIFOs should be same.
+ * @stride_fifos_dst: FIFO address stride for playback
+ * @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means
+ * one channel per FIFO, 2 means 2 channels per FIFO..
+ * If 'n_fifos_src = 4' and 'words_per_fifo = 2', it
+ * means the first two words(channels) fetch from FIFO0
+ * and then jump to FIFO1 for next two words, and so on
+ * after the last FIFO3 fetched, roll back to FIFO0.
+ * @sw_done: Use software done. Needed for PDM (micfil)
+ *
+ * Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO
+ * registers. For multichannel recording/playback the SAI/micfil have
+ * one FIFO register per channel and the SDMA engine has to read/write
+ * the next channel from/to the next register and wrap around to the
+ * first register when all channels are handled. The number of active
+ * channels must be communicated to the SDMA engine using this struct.
+ */
+struct sdma_peripheral_config {
+ int n_fifos_src;
+ int n_fifos_dst;
+ int stride_fifos_src;
+ int stride_fifos_dst;
+ int words_per_fifo;
+ bool sw_done;
+};
+
+#endif /* __LINUX_DMA_IMX_H */
diff --git a/include/linux/dma/k3-event-router.h b/include/linux/dma/k3-event-router.h
new file mode 100644
index 000000000000..e3f88b2f87be
--- /dev/null
+++ b/include/linux/dma/k3-event-router.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef K3_EVENT_ROUTER_
+#define K3_EVENT_ROUTER_
+
+#include <linux/types.h>
+
+struct k3_event_route_data {
+ void *priv;
+ int (*set_event)(void *priv, u32 event);
+};
+
+#endif /* K3_EVENT_ROUTER_ */
diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h
index 61d5cc0ad601..5f106d852f1c 100644
--- a/include/linux/dma/k3-psil.h
+++ b/include/linux/dma/k3-psil.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
*/
#ifndef K3_PSIL_H_
@@ -42,27 +42,42 @@ enum psil_endpoint_type {
/**
* struct psil_endpoint_config - PSI-L Endpoint configuration
* @ep_type: PSI-L endpoint type
+ * @channel_tpl: Desired throughput level for the channel
* @pkt_mode: If set, the channel must be in Packet mode, otherwise in
* TR mode
* @notdpkt: TDCM must be suppressed on the TX channel
* @needs_epib: Endpoint needs EPIB
- * @psd_size: If set, PSdata is used by the endpoint
- * @channel_tpl: Desired throughput level for the channel
* @pdma_acc32: ACC32 must be enabled on the PDMA side
* @pdma_burst: BURST must be enabled on the PDMA side
+ * @psd_size: If set, PSdata is used by the endpoint
+ * @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels.
+ * The thread must be serviced by the specified channel if
+ * mapped_channel_id is >= 0 in case of PKTDMA
+ * @flow_start: PKDMA flow range start of mapped channel. Unmapped
+ * channels use flow_id == chan_id
+ * @flow_num: PKDMA flow count of mapped channel. Unmapped channels
+ * use flow_id == chan_id
+ * @default_flow_id: PKDMA default (r)flow index of mapped channel.
+ * Must be within the flow range of the mapped channel.
*/
struct psil_endpoint_config {
enum psil_endpoint_type ep_type;
+ enum udma_tp_level channel_tpl;
unsigned pkt_mode:1;
unsigned notdpkt:1;
unsigned needs_epib:1;
- u32 psd_size;
- enum udma_tp_level channel_tpl;
-
/* PDMA properties, valid for PSIL_EP_PDMA_* */
unsigned pdma_acc32:1;
unsigned pdma_burst:1;
+
+ u32 psd_size;
+ /* PKDMA mapped channel */
+ s16 mapped_channel_id;
+ /* PKTDMA tflow and rflow ranges for mapped channel */
+ u16 flow_start;
+ u16 flow_num;
+ s16 default_flow_id;
};
int psil_set_new_ep_config(struct device *dev, const char *name,
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index caadbab1632a..e443be4d3b4b 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
*/
#ifndef K3_UDMA_GLUE_H_
@@ -41,6 +41,12 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
+struct device *
+ k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr);
+void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr);
enum {
K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
@@ -130,5 +136,11 @@ int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
+struct device *
+ k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr);
+void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr);
#endif /* K3_UDMA_GLUE_H_ */
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
deleted file mode 100644
index 25cab62a28c4..000000000000
--- a/include/linux/dma/mmp-pdma.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MMP_PDMA_H_
-#define _MMP_PDMA_H_
-
-struct dma_chan;
-
-#ifdef CONFIG_MMP_PDMA
-bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
-#else
-static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
-#endif /* _MMP_PDMA_H_ */
diff --git a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h
new file mode 100644
index 000000000000..6680dd1a43c6
--- /dev/null
+++ b/include/linux/dma/qcom-gpi-dma.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, Linaro Limited
+ */
+
+#ifndef QCOM_GPI_DMA_H
+#define QCOM_GPI_DMA_H
+
+/**
+ * enum spi_transfer_cmd - spi transfer commands
+ */
+enum spi_transfer_cmd {
+ SPI_TX = 1,
+ SPI_RX,
+ SPI_DUPLEX,
+};
+
+/**
+ * struct gpi_spi_config - spi config for peripheral
+ *
+ * @loopback_en: spi loopback enable when set
+ * @clock_pol_high: clock polarity
+ * @data_pol_high: data polarity
+ * @pack_en: process tx/rx buffers as packed
+ * @word_len: spi word length
+ * @clk_div: source clock divider
+ * @clk_src: serial clock
+ * @cmd: spi cmd
+ * @fragmentation: keep CS asserted at end of sequence
+ * @cs: chip select toggle
+ * @set_config: set peripheral config
+ * @rx_len: receive length for buffer
+ */
+struct gpi_spi_config {
+ u8 set_config;
+ u8 loopback_en;
+ u8 clock_pol_high;
+ u8 data_pol_high;
+ u8 pack_en;
+ u8 word_len;
+ u8 fragmentation;
+ u8 cs;
+ u32 clk_div;
+ u32 clk_src;
+ enum spi_transfer_cmd cmd;
+ u32 rx_len;
+};
+
+enum i2c_op {
+ I2C_WRITE = 1,
+ I2C_READ,
+};
+
+/**
+ * struct gpi_i2c_config - i2c config for peripheral
+ *
+ * @pack_enable: process tx/rx buffers as packed
+ * @cycle_count: clock cycles to be sent
+ * @high_count: high period of clock
+ * @low_count: low period of clock
+ * @clk_div: source clock divider
+ * @addr: i2c bus address
+ * @stretch: stretch the clock at eot
+ * @set_config: set peripheral config
+ * @rx_len: receive length for buffer
+ * @op: i2c cmd
+ * @muli-msg: is part of multi i2c r-w msgs
+ */
+struct gpi_i2c_config {
+ u8 set_config;
+ u8 pack_enable;
+ u8 cycle_count;
+ u8 high_count;
+ u8 low_count;
+ u8 addr;
+ u8 stretch;
+ u16 clk_div;
+ u32 rx_len;
+ enum i2c_op op;
+ bool multi_msg;
+};
+
+#endif /* QCOM_GPI_DMA_H */
diff --git a/include/linux/dma/qcom_adm.h b/include/linux/dma/qcom_adm.h
new file mode 100644
index 000000000000..af20df674f0c
--- /dev/null
+++ b/include/linux/dma/qcom_adm.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_DMA_QCOM_ADM_H
+#define __LINUX_DMA_QCOM_ADM_H
+
+#include <linux/types.h>
+
+struct qcom_adm_peripheral_config {
+ u32 crci;
+ u32 mux;
+};
+
+#endif /* __LINUX_DMA_QCOM_ADM_H */
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
index 579356ae447e..efa2f0309f00 100644
--- a/include/linux/dma/ti-cppi5.h
+++ b/include/linux/dma/ti-cppi5.h
@@ -2,7 +2,7 @@
/*
* CPPI5 descriptors interface
*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
*/
#ifndef __TI_CPPI5_H__
@@ -47,7 +47,7 @@ struct cppi5_host_desc_t {
u32 buf_info1;
u32 org_buf_len;
u64 org_buf_ptr;
- u32 epib[0];
+ u32 epib[];
} __packed;
#define CPPI5_DESC_MIN_ALIGN (16U)
@@ -139,7 +139,7 @@ struct cppi5_desc_epib_t {
*/
struct cppi5_monolithic_desc_t {
struct cppi5_desc_hdr_t hdr;
- u32 epib[0];
+ u32 epib[];
};
#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U)
diff --git a/include/linux/dma/xilinx_dpdma.h b/include/linux/dma/xilinx_dpdma.h
new file mode 100644
index 000000000000..02a4adf8921b
--- /dev/null
+++ b/include/linux/dma/xilinx_dpdma.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_DMA_XILINX_DPDMA_H
+#define __LINUX_DMA_XILINX_DPDMA_H
+
+#include <linux/types.h>
+
+struct xilinx_dpdma_peripheral_config {
+ bool video_group;
+};
+
+#endif /* __LINUX_DMA_XILINX_DPDMA_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 64461fc64e1b..c923f4e60f24 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -39,6 +39,7 @@ enum dma_status {
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
+ DMA_OUT_OF_ORDER,
};
/**
@@ -61,6 +62,9 @@ enum dma_transaction_type {
DMA_SLAVE,
DMA_CYCLIC,
DMA_INTERLEAVE,
+ DMA_COMPLETION_NO_ORDER,
+ DMA_REPEAT,
+ DMA_LOAD_EOT,
/* last transaction type for creation of the capabilities mask */
DMA_TX_TYPE_END,
};
@@ -83,9 +87,9 @@ enum dma_transfer_direction {
/**
* Interleaved Transfer Request
* ----------------------------
- * A chunk is collection of contiguous bytes to be transfered.
+ * A chunk is collection of contiguous bytes to be transferred.
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
- * ICGs may or maynot change between chunks.
+ * ICGs may or may not change between chunks.
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
* that when repeated an integral number of times, specifies the transfer.
* A transfer template is specification of a Frame, the number of times
@@ -153,7 +157,7 @@ struct dma_interleaved_template {
bool dst_sgl;
size_t numf;
size_t frame_size;
- struct data_chunk sgl[0];
+ struct data_chunk sgl[];
};
/**
@@ -162,7 +166,7 @@ struct dma_interleaved_template {
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
- * acknowledges receipt, i.e. has has a chance to establish any dependency
+ * acknowledges receipt, i.e. has a chance to establish any dependency
* chains
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
@@ -176,6 +180,16 @@ struct dma_interleaved_template {
* @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
* data and the descriptor should be in different format from normal
* data descriptors.
+ * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
+ * repeated when it ends until a transaction is issued on the same channel
+ * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
+ * interleaved transactions and is ignored for all other transaction types.
+ * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
+ * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
+ * repeated transaction ends. Not setting this flag when the previously queued
+ * transaction is marked with DMA_PREP_REPEAT will cause the new transaction
+ * to never be processed and stay in the issued queue forever. The flag is
+ * ignored if the previous transaction is not a repeated transaction.
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -186,6 +200,8 @@ enum dma_ctrl_flags {
DMA_PREP_FENCE = (1 << 5),
DMA_CTRL_REUSE = (1 << 6),
DMA_PREP_CMD = (1 << 7),
+ DMA_PREP_REPEAT = (1 << 8),
+ DMA_PREP_LOAD_EOT = (1 << 9),
};
/**
@@ -214,12 +230,6 @@ enum sum_check_flags {
typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
/**
- * struct dma_chan_percpu - the per-CPU part of struct dma_chan
- * @memcpy_count: transaction counter
- * @bytes_transferred: byte counter
- */
-
-/**
* enum dma_desc_metadata_mode - per descriptor metadata mode types supported
* @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
* client driver and it is attached (via the dmaengine_desc_attach_metadata()
@@ -275,6 +285,11 @@ enum dma_desc_metadata_mode {
DESC_METADATA_ENGINE = BIT(1),
};
+/**
+ * struct dma_chan_percpu - the per-CPU part of struct dma_chan
+ * @memcpy_count: transaction counter
+ * @bytes_transferred: byte counter
+ */
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
@@ -300,6 +315,8 @@ struct dma_router {
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
* @name: backlink name for sysfs
+ * @dbg_client_name: slave name for debugfs in format:
+ * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
@@ -318,6 +335,9 @@ struct dma_chan {
int chan_id;
struct dma_chan_dev *dev;
const char *name;
+#ifdef CONFIG_DEBUG_FS
+ char *dbg_client_name;
+#endif
struct list_head device_node;
struct dma_chan_percpu __percpu *local;
@@ -336,13 +356,14 @@ struct dma_chan {
* @chan: driver channel device
* @device: sysfs device
* @dev_id: parent dma_device dev_id
- * @idr_ref: reference count to gate release of dma_device dev_id
+ * @chan_dma_dev: The channel is using custom/different dma-mapping
+ * compared to the parent dma_device
*/
struct dma_chan_dev {
struct dma_chan *chan;
struct device device;
int dev_id;
- atomic_t *idr_ref;
+ bool chan_dma_dev;
};
/**
@@ -359,6 +380,7 @@ enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+ DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
};
/**
@@ -377,7 +399,7 @@ enum dma_slave_buswidth {
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
- * Legal values: 1, 2, 3, 4, 8, 16, 32, 64.
+ * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in
@@ -396,9 +418,9 @@ enum dma_slave_buswidth {
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
- * @slave_id: Slave requester id. Only valid for slave channels. The dma
- * slave peripheral will have unique id as dma requester which need to be
- * pass as slave config.
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
@@ -423,7 +445,8 @@ struct dma_slave_config {
u32 src_port_window_size;
u32 dst_port_window_size;
bool device_fc;
- unsigned int slave_id;
+ void *peripheral_config;
+ size_t peripheral_size;
};
/**
@@ -462,7 +485,11 @@ enum dma_residue_granularity {
* Since the enum dma_transfer_direction is not defined as bit flag for
* each type, the dma controller should set BIT(<TYPE>) and same
* should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
* @cmd_pause: true, if pause is supported (i.e. for reading residue or
* for resume later)
* @cmd_resume: true, if resume is supported
@@ -475,7 +502,9 @@ struct dma_slave_caps {
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool cmd_pause;
bool cmd_resume;
bool cmd_terminate;
@@ -532,7 +561,7 @@ struct dmaengine_unmap_data {
struct device *dev;
struct kref kref;
size_t len;
- dma_addr_t addr[0];
+ dma_addr_t addr[];
};
struct dma_async_tx_descriptor;
@@ -618,10 +647,11 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
{
- if (tx->unmap) {
- dmaengine_unmap_put(tx->unmap);
- tx->unmap = NULL;
- }
+ if (!tx->unmap)
+ return;
+
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
}
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
@@ -711,6 +741,8 @@ enum dmaengine_alignment {
DMAENGINE_ALIGN_16_BYTES = 4,
DMAENGINE_ALIGN_32_BYTES = 5,
DMAENGINE_ALIGN_64_BYTES = 6,
+ DMAENGINE_ALIGN_128_BYTES = 7,
+ DMAENGINE_ALIGN_256_BYTES = 8,
};
/**
@@ -765,11 +797,16 @@ struct dma_filter {
* Since the enum dma_transfer_direction is not defined as bit flag for
* each type, the dma controller should set BIT(<TYPE>) and same
* should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
* number of allocated descriptors
+ * @device_router_config: optional callback for DMA router configuration
* @device_free_chan_resources: release DMA channel's resources
* @device_prep_dma_memcpy: prepares a memcpy operation
* @device_prep_dma_xor: prepares a xor operation
@@ -785,6 +822,8 @@ struct dma_filter {
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
* @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
+ * @device_caps: May be used to override the generic DMA slave capabilities
+ * with per-channel specific ones
* @device_config: Pushes a new configuration to a channel, return 0 or an error
* code
* @device_pause: Pauses any transfer happening on a channel. Returns
@@ -805,7 +844,9 @@ struct dma_filter {
* called and there are no further references to this structure. This
* must be implemented to free resources however many existing drivers
* do not and are therefore not safe to unbind while in use.
- *
+ * @dbg_summary_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra,
+ * controller specific information.
*/
struct dma_device {
struct kref ref;
@@ -827,15 +868,19 @@ struct dma_device {
int dev_id;
struct device *dev;
struct module *owner;
+ struct ida chan_ida;
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool descriptor_reuse;
enum dma_residue_granularity residue_granularity;
int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ int (*device_router_config)(struct dma_chan *chan);
void (*device_free_chan_resources)(struct dma_chan *chan);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -879,6 +924,8 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t dst, u64 data,
unsigned long flags);
+ void (*device_caps)(struct dma_chan *chan,
+ struct dma_slave_caps *caps);
int (*device_config)(struct dma_chan *chan,
struct dma_slave_config *config);
int (*device_pause)(struct dma_chan *chan);
@@ -891,6 +938,9 @@ struct dma_device {
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
void (*device_release)(struct dma_device *dev);
+ /* debugfs support */
+ void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
+ struct dentry *dbg_dev_root;
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -967,10 +1017,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
{
if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
return NULL;
+ if (flags & DMA_PREP_REPEAT &&
+ !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
+ return NULL;
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+/**
+ * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
+ * @chan: The channel to be used for this descriptor
+ * @dest: Address of buffer to be set
+ * @value: Treated as a single byte value that fills the destination buffer
+ * @len: The total size of dest
+ * @flags: DMA engine flags
+ */
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags)
@@ -1056,7 +1117,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
* dmaengine_synchronize() needs to be called before it is safe to free
* any memory that is accessed by previously submitted descriptors or before
* freeing any resources accessed from within the completion callback of any
- * perviously submitted descriptors.
+ * previously submitted descriptors.
*
* This function can be called from atomic context as well as from within a
* complete callback of a descriptor submitted on the same channel.
@@ -1078,7 +1139,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
*
* Synchronizes to the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory assoicated
+ * descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
@@ -1155,14 +1216,7 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
static inline bool dmaengine_check_align(enum dmaengine_alignment align,
size_t off1, size_t off2, size_t len)
{
- size_t mask;
-
- if (!align)
- return true;
- mask = (1 << align) - 1;
- if (mask & (off1 | off2 | len))
- return false;
- return true;
+ return !(((1 << align) - 1) & (off1 | off2 | len));
}
static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
@@ -1236,9 +1290,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
{
if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
return dma_dev_to_maxpq(dma);
- else if (dmaf_p_disabled_continue(flags))
+ if (dmaf_p_disabled_continue(flags))
return dma_dev_to_maxpq(dma) - 1;
- else if (dmaf_continue(flags))
+ if (dmaf_continue(flags))
return dma_dev_to_maxpq(dma) - 3;
BUG();
}
@@ -1249,7 +1303,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
if (inc) {
if (dir_icg)
return dir_icg;
- else if (sgl)
+ if (sgl)
return icg;
}
@@ -1415,11 +1469,12 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
static inline void
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
{
- if (st) {
- st->last = last;
- st->used = used;
- st->residue = residue;
- }
+ if (!st)
+ return;
+
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
}
#ifdef CONFIG_DMA_ENGINE
@@ -1430,7 +1485,6 @@ void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param,
struct device_node *np);
-struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
@@ -1460,11 +1514,6 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
{
return NULL;
}
-static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
- const char *name)
-{
- return NULL;
-}
static inline struct dma_chan *dma_request_chan(struct device *dev,
const char *name)
{
@@ -1485,8 +1534,6 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
}
#endif
-#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
-
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
@@ -1496,12 +1543,11 @@ static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
if (ret)
return ret;
- if (caps.descriptor_reuse) {
- tx->flags |= DMA_CTRL_REUSE;
- return 0;
- } else {
+ if (!caps.descriptor_reuse)
return -EPERM;
- }
+
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
}
static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
@@ -1517,10 +1563,10 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
{
/* this is supported for reusable desc, so check that */
- if (dmaengine_desc_test_reuse(desc))
- return desc->desc_free(desc);
- else
+ if (!dmaengine_desc_test_reuse(desc))
return -EPERM;
+
+ return desc->desc_free(desc);
}
/* --- DMA device --- */
@@ -1536,6 +1582,15 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
#define dma_request_channel(mask, x, y) \
__dma_request_channel(&(mask), x, y, NULL)
+/* Deprecated, please use dma_request_chan() directly */
+static inline struct dma_chan * __deprecated
+dma_request_slave_channel(struct device *dev, const char *name)
+{
+ struct dma_chan *ch = dma_request_chan(dev, name);
+
+ return IS_ERR(ch) ? NULL : ch;
+}
+
static inline struct dma_chan
*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
dma_filter_fn fn, void *fn_param,
@@ -1566,9 +1621,16 @@ dmaengine_get_direction_text(enum dma_transfer_direction dir)
case DMA_DEV_TO_DEV:
return "DEV_TO_DEV";
default:
- break;
+ return "invalid";
}
+}
+
+static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
+{
+ if (chan->dev->chan_dma_dev)
+ return &chan->dev->device;
- return "invalid";
+ return chan->device->dev;
}
+
#endif /* DMAENGINE_H */
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index d7bf029df737..d81a51978d01 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -18,11 +18,7 @@
struct acpi_dmar_header;
-#ifdef CONFIG_X86
-# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define DMAR_UNITS_SUPPORTED 64
-#endif
+#define DMAR_UNITS_SUPPORTED 1024
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
@@ -48,6 +44,7 @@ struct dmar_drhd_unit {
u16 segment; /* PCI domain */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
+ u8 gfx_dedicated:1; /* graphic dedicated */
struct intel_iommu *iommu;
};
@@ -120,7 +117,7 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
u16 segment, struct dmar_dev_scope *devices,
int count);
/* Intel IOMMU detection */
-extern int detect_intel_iommu(void);
+void detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
extern int dmar_device_add(acpi_handle handle);
extern int dmar_device_remove(acpi_handle handle);
@@ -130,6 +127,14 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+#ifdef CONFIG_DMAR_DEBUG
+void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid);
+#else
+static inline void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid) {}
+#endif
+
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
@@ -137,6 +142,7 @@ extern void intel_iommu_shutdown(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
@@ -148,6 +154,7 @@ static inline void intel_iommu_shutdown(void) { }
#define dmar_parse_one_atsr dmar_res_noop
#define dmar_check_one_atsr dmar_res_noop
#define dmar_release_one_atsr dmar_res_noop
+#define dmar_parse_one_satc dmar_res_noop
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
@@ -186,6 +193,10 @@ static inline bool dmar_platform_optin(void)
return false;
}
+static inline void detect_intel_iommu(void)
+{
+}
+
#endif /* CONFIG_DMAR_TABLE */
struct irte {
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index 0aad774beaec..b1d26f9f1c9f 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -26,10 +26,9 @@ struct dnotify_struct {
FS_MODIFY | FS_MODIFY_CHILD |\
FS_ACCESS | FS_ACCESS_CHILD |\
FS_ATTRIB | FS_ATTRIB_CHILD |\
- FS_CREATE | FS_DN_RENAME |\
+ FS_CREATE | FS_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
-extern int dir_notify_enable;
extern void dnotify_flush(struct file *, fl_owner_t);
extern int fcntl_dirnotify(int, struct file *, unsigned long);
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index 0aa803c451a3..3ed117e299ec 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -5,70 +5,56 @@
#ifndef _NET_DSA_8021Q_H
#define _NET_DSA_8021Q_H
+#include <linux/refcount.h>
#include <linux/types.h>
+#include <net/dsa.h>
struct dsa_switch;
+struct dsa_port;
struct sk_buff;
struct net_device;
-struct packet_type;
-#if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q)
+struct dsa_tag_8021q_vlan {
+ struct list_head list;
+ int port;
+ u16 vid;
+ refcount_t refcount;
+};
-int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
- bool enabled);
+struct dsa_8021q_context {
+ struct dsa_switch *ds;
+ struct list_head vlans;
+ /* EtherType of RX VID, used for filtering on master interface */
+ __be16 proto;
+};
+
+int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
+
+void dsa_tag_8021q_unregister(struct dsa_switch *ds);
+
+int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
-
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
+void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
+ int *vbid);
-int dsa_8021q_rx_switch_id(u16 vid);
+struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
+ int vbid);
-int dsa_8021q_rx_source_port(u16 vid);
+u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num);
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
+u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp);
-#else
+int dsa_8021q_rx_switch_id(u16 vid);
-int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
- bool enabled)
-{
- return 0;
-}
+int dsa_8021q_rx_source_port(u16 vid);
-struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
- u16 tpid, u16 tci)
-{
- return NULL;
-}
-
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
-{
- return 0;
-}
-
-int dsa_8021q_rx_switch_id(u16 vid)
-{
- return 0;
-}
-
-int dsa_8021q_rx_source_port(u16 vid)
-{
- return 0;
-}
-
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
-{
- return NULL;
-}
-
-#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
+bool vid_is_dsa_8021q(u16 vid);
#endif /* _NET_DSA_8021Q_H */
diff --git a/include/linux/dsa/brcm.h b/include/linux/dsa/brcm.h
new file mode 100644
index 000000000000..47545a948784
--- /dev/null
+++ b/include/linux/dsa/brcm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2014 Broadcom Corporation
+ */
+
+/* Included by drivers/net/ethernet/broadcom/bcmsysport.c and
+ * net/dsa/tag_brcm.c
+ */
+#ifndef _NET_DSA_BRCM_H
+#define _NET_DSA_BRCM_H
+
+/* Broadcom tag specific helpers to insert and extract queue/port number */
+#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
+#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
+#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+
+#endif
diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h
new file mode 100644
index 000000000000..b8fef35591aa
--- /dev/null
+++ b/include/linux/dsa/loop.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DSA_LOOP_H
+#define DSA_LOOP_H
+
+#include <linux/if_vlan.h>
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <net/dsa.h>
+
+struct dsa_loop_vlan {
+ u16 members;
+ u16 untagged;
+};
+
+struct dsa_loop_mib_entry {
+ char name[ETH_GSTRING_LEN];
+ unsigned long val;
+};
+
+enum dsa_loop_mib_counters {
+ DSA_LOOP_PHY_READ_OK,
+ DSA_LOOP_PHY_READ_ERR,
+ DSA_LOOP_PHY_WRITE_OK,
+ DSA_LOOP_PHY_WRITE_ERR,
+ __DSA_LOOP_CNT_MAX,
+};
+
+struct dsa_loop_port {
+ struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX];
+ u16 pvid;
+ int mtu;
+};
+
+struct dsa_loop_priv {
+ struct mii_bus *bus;
+ unsigned int port_base;
+ struct dsa_loop_vlan vlans[VLAN_N_VID];
+ struct net_device *netdev;
+ struct dsa_loop_port ports[DSA_MAX_PORTS];
+};
+
+#endif /* DSA_LOOP_H */
diff --git a/include/linux/dsa/mv88e6xxx.h b/include/linux/dsa/mv88e6xxx.h
new file mode 100644
index 000000000000..8c3d45eca46b
--- /dev/null
+++ b/include/linux/dsa/mv88e6xxx.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_MV88E6XXX_H
+#define _NET_DSA_TAG_MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+
+#define MV88E6XXX_VID_STANDALONE 0
+#define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1)
+
+#endif
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
new file mode 100644
index 000000000000..dca2969015d8
--- /dev/null
+++ b/include/linux/dsa/ocelot.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2019-2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_OCELOT_H
+#define _NET_DSA_TAG_OCELOT_H
+
+#include <linux/kthread.h>
+#include <linux/packing.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+struct ocelot_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_class; /* valid only for clones */
+ u32 tstamp_lo;
+ u8 ptp_cmd;
+ u8 ts_id;
+};
+
+#define OCELOT_SKB_CB(skb) \
+ ((struct ocelot_skb_cb *)((skb)->cb))
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
+#define OCELOT_TAG_LEN 16
+#define OCELOT_SHORT_PREFIX_LEN 4
+#define OCELOT_LONG_PREFIX_LEN 16
+#define OCELOT_TOTAL_TAG_LEN (OCELOT_SHORT_PREFIX_LEN + OCELOT_TAG_LEN)
+
+/* The CPU injection header and the CPU extraction header can have 3 types of
+ * prefixes: long, short and no prefix. The format of the header itself is the
+ * same in all 3 cases.
+ *
+ * Extraction with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | ff:ff:ff:ff:ff:ff | fe:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Extraction with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | extraction | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Extraction with no prefix:
+ *
+ * +------------+-------+
+ * | extraction | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ *
+ * Injection with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | any dmac | any smac | 8880 | 000a | injection | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Injection with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | injection | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Injection with no prefix:
+ *
+ * +------------+-------+
+ * | injection | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ * The injection header looks like this (network byte order, bit 127
+ * is part of lowest address byte in memory, bit 0 is part of highest
+ * address byte):
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP|
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | RSV | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER|
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | TFRM_TIMER | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 | RSV | DP | POP_CNT | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ *
+ * And the extraction header looks like this:
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 | RSV | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL | LLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | LLEN | WLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | WLEN | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | ACL_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | ACL_ID | RSV | SFLOW_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ */
+
+struct felix_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ocelot_8021q_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+};
+
+static inline struct ocelot_8021q_tagger_data *
+ocelot_8021q_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_OCELOT_8021Q);
+
+ return ds->tagger_data;
+}
+
+static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
+{
+ packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_len(void *extraction, u64 *len)
+{
+ u64 llen, wlen;
+
+ packing(extraction, &llen, 84, 79, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &wlen, 78, 71, OCELOT_TAG_LEN, UNPACK, 0);
+
+ *len = 60 * wlen + llen - 80;
+}
+
+static inline void ocelot_xfh_get_src_port(void *extraction, u64 *src_port)
+{
+ packing(extraction, src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_qos_class(void *extraction, u64 *qos_class)
+{
+ packing(extraction, qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_tag_type(void *extraction, u64 *tag_type)
+{
+ packing(extraction, tag_type, 16, 16, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_vlan_tci(void *extraction, u64 *vlan_tci)
+{
+ packing(extraction, vlan_tci, 15, 0, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_ifh_set_bypass(void *injection, u64 bypass)
+{
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_rew_op(void *injection, u64 rew_op)
+{
+ packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 56, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_qos_class(void *injection, u64 qos_class)
+{
+ packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void seville_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_src(void *injection, u64 src)
+{
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type)
+{
+ packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_vlan_tci(void *injection, u64 vlan_tci)
+{
+ packing(injection, &vlan_tci, 15, 0, OCELOT_TAG_LEN, PACK, 0);
+}
+
+/* Determine the PTP REW_OP to use for injecting the given skb */
+static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+ u32 rew_op = 0;
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+ rew_op = ptp_cmd;
+ rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+ } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ rew_op = ptp_cmd;
+ }
+
+ return rew_op;
+}
+
+#endif
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index fa5735c353cd..159e43171ccc 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -9,10 +9,14 @@
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
+#include <linux/dsa/8021q.h>
#include <net/dsa.h>
#define ETH_P_SJA1105 ETH_P_DSA_8021Q
#define ETH_P_SJA1105_META 0x0008
+#define ETH_P_SJA1110 0xdadc
+
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
@@ -31,34 +35,45 @@
#define SJA1105_META_SMAC 0x222222222222ull
#define SJA1105_META_DMAC 0x0180C200000Eull
-#define SJA1105_HWTS_RX_EN 0
+enum sja1110_meta_tstamp {
+ SJA1110_META_TSTAMP_TX = 0,
+ SJA1110_META_TSTAMP_RX = 1,
+};
-/* Global tagger data: each struct sja1105_port has a reference to
- * the structure defined in struct sja1105_private.
- */
+struct sja1105_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+/* Global tagger data */
struct sja1105_tagger_data {
- struct sk_buff *stampable_skb;
- /* Protects concurrent access to the meta state machine
- * from taggers running on multiple ports on SMP systems
- */
- spinlock_t meta_lock;
- unsigned long state;
+ /* Tagger to switch */
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*meta_tstamp_handler)(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
+ /* Switch to tagger */
+ bool (*rxtstamp_get_state)(struct dsa_switch *ds);
+ void (*rxtstamp_set_state)(struct dsa_switch *ds, bool on);
};
struct sja1105_skb_cb {
- u32 meta_tstamp;
+ struct sk_buff *clone;
+ u64 tstamp;
+ /* Only valid for packets cloned for 2-step TX timestamping */
+ u8 ts_id;
};
#define SJA1105_SKB_CB(skb) \
- ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
+ ((struct sja1105_skb_cb *)((skb)->cb))
-struct sja1105_port {
- struct kthread_worker *xmit_worker;
- struct kthread_work xmit_work;
- struct sk_buff_head xmit_queue;
- struct sja1105_tagger_data *data;
- struct dsa_port *dp;
- bool hwts_tx_en;
-};
+static inline struct sja1105_tagger_data *
+sja1105_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1105 &&
+ ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1110);
+
+ return ds->tagger_data;
+}
#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h
new file mode 100644
index 000000000000..b1b5720d89a5
--- /dev/null
+++ b/include/linux/dsa/tag_qca.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TAG_QCA_H
+#define __TAG_QCA_H
+
+#include <linux/types.h>
+
+struct dsa_switch;
+struct sk_buff;
+
+#define QCA_HDR_LEN 2
+#define QCA_HDR_VERSION 0x2
+
+#define QCA_HDR_RECV_VERSION GENMASK(15, 14)
+#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_RECV_TYPE GENMASK(10, 6)
+#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
+#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0)
+
+/* Packet type for recv */
+#define QCA_HDR_RECV_TYPE_NORMAL 0x0
+#define QCA_HDR_RECV_TYPE_MIB 0x1
+#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2
+
+#define QCA_HDR_XMIT_VERSION GENMASK(15, 14)
+#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8)
+#define QCA_HDR_XMIT_FROM_CPU BIT(7)
+#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0)
+
+/* Packet type for xmit */
+#define QCA_HDR_XMIT_TYPE_NORMAL 0x0
+#define QCA_HDR_XMIT_TYPE_RW_REG 0x1
+
+/* Check code for a valid mgmt packet. Switch will ignore the packet
+ * with this wrong.
+ */
+#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5
+
+/* Specific define for in-band MDIO read/write with Ethernet packet */
+#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */
+#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */
+#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */
+#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \
+ QCA_HDR_MGMT_COMMAND_LEN + \
+ QCA_HDR_MGMT_DATA1_LEN)
+
+#define QCA_HDR_MGMT_DATA2_LEN 12 /* Other 12 byte for the mdio data */
+#define QCA_HDR_MGMT_PADDING_LEN 34 /* Padding to reach the min Ethernet packet */
+
+#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \
+ QCA_HDR_LEN + \
+ QCA_HDR_MGMT_DATA2_LEN + \
+ QCA_HDR_MGMT_PADDING_LEN)
+
+#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */
+#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */
+#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */
+#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */
+#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */
+
+/* Special struct emulating a Ethernet header */
+struct qca_mgmt_ethhdr {
+ __le32 command; /* command bit 31:0 */
+ __le32 seq; /* seq 63:32 */
+ __le32 mdio_data; /* first 4byte mdio */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+enum mdio_cmd {
+ MDIO_WRITE = 0x0,
+ MDIO_READ
+};
+
+struct mib_ethhdr {
+ __le32 data[3]; /* first 3 mib counter */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+struct qca_tagger_data {
+ void (*rw_reg_ack_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+ void (*mib_autocast_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+};
+
+#endif /* __TAG_QCA_H */
diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h
new file mode 100644
index 000000000000..a4a13514b730
--- /dev/null
+++ b/include/linux/dtpm.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#ifndef ___DTPM_H__
+#define ___DTPM_H__
+
+#include <linux/powercap.h>
+
+#define MAX_DTPM_DESCR 8
+#define MAX_DTPM_CONSTRAINTS 1
+
+struct dtpm {
+ struct powercap_zone zone;
+ struct dtpm *parent;
+ struct list_head sibling;
+ struct list_head children;
+ struct dtpm_ops *ops;
+ unsigned long flags;
+ u64 power_limit;
+ u64 power_max;
+ u64 power_min;
+ int weight;
+};
+
+struct dtpm_ops {
+ u64 (*set_power_uw)(struct dtpm *, u64);
+ u64 (*get_power_uw)(struct dtpm *);
+ int (*update_power_uw)(struct dtpm *);
+ void (*release)(struct dtpm *);
+};
+
+struct device_node;
+
+struct dtpm_subsys_ops {
+ const char *name;
+ int (*init)(void);
+ void (*exit)(void);
+ int (*setup)(struct dtpm *, struct device_node *);
+};
+
+enum DTPM_NODE_TYPE {
+ DTPM_NODE_VIRTUAL = 0,
+ DTPM_NODE_DT,
+};
+
+struct dtpm_node {
+ enum DTPM_NODE_TYPE type;
+ const char *name;
+ struct dtpm_node *parent;
+};
+
+static inline struct dtpm *to_dtpm(struct powercap_zone *zone)
+{
+ return container_of(zone, struct dtpm, zone);
+}
+
+int dtpm_update_power(struct dtpm *dtpm);
+
+int dtpm_release_zone(struct powercap_zone *pcz);
+
+void dtpm_init(struct dtpm *dtpm, struct dtpm_ops *ops);
+
+void dtpm_unregister(struct dtpm *dtpm);
+
+int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent);
+
+int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table);
+
+void dtpm_destroy_hierarchy(void);
+#endif
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 14f072edbca5..82ebf9223948 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -25,7 +25,6 @@ struct dw_apb_timer {
struct dw_apb_clock_event_device {
struct clock_event_device ced;
struct dw_apb_timer timer;
- struct irqaction irqaction;
void (*eoi)(struct dw_apb_timer *);
};
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 4cf02ecd67de..41682278d2e8 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -6,6 +6,8 @@
#include <linux/jump_label.h>
#endif
+#include <linux/build_bug.h>
+
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -21,6 +23,9 @@ struct _ddebug {
const char *filename;
const char *format;
unsigned int lineno:18;
+#define CLS_BITS 6
+ unsigned int class_id:CLS_BITS;
+#define _DPRINTK_CLASS_DFLT ((1 << CLS_BITS) - 1)
/*
* The flags field controls the behaviour at the callsite.
* The bits here are changed dynamically when the user
@@ -32,6 +37,11 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+
+#define _DPRINTK_FLAGS_INCL_ANY \
+ (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID)
+
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
#else
@@ -46,11 +56,82 @@ struct _ddebug {
#endif
} __attribute__((aligned(8)));
+enum class_map_type {
+ DD_CLASS_TYPE_DISJOINT_BITS,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_BITS: classes are independent, one per bit.
+ * expecting hex input. Built for drm.debug, basis for other types.
+ */
+ DD_CLASS_TYPE_LEVEL_NUM,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NUM: input is numeric level, 0-N.
+ * N turns on just bits N-1 .. 0, so N=0 turns all bits off.
+ */
+ DD_CLASS_TYPE_DISJOINT_NAMES,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * classes are independent, like _DISJOINT_BITS.
+ */
+ DD_CLASS_TYPE_LEVEL_NAMES,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * intended for names like: INFO,DEBUG,TRACE, with a module prefix
+ * avoid EMERG,ALERT,CRIT,ERR,WARNING: they're not debug
+ */
+};
+
+struct ddebug_class_map {
+ struct list_head link;
+ struct module *mod;
+ const char *mod_name; /* needed for builtins */
+ const char **class_names;
+ const int length;
+ const int base; /* index of 1st .class_id, allows split/shared space */
+ enum class_map_type map_type;
+};
+
+/**
+ * DECLARE_DYNDBG_CLASSMAP - declare classnames known by a module
+ * @_var: a struct ddebug_class_map, passed to module_param_cb
+ * @_type: enum class_map_type, chooses bits/verbose, numeric/symbolic
+ * @_base: offset of 1st class-name. splits .class_id space
+ * @classes: class-names used to control class'd prdbgs
+ */
+#define DECLARE_DYNDBG_CLASSMAP(_var, _maptype, _base, ...) \
+ static const char *_var##_classnames[] = { __VA_ARGS__ }; \
+ static struct ddebug_class_map __aligned(8) __used \
+ __section("__dyndbg_classes") _var = { \
+ .mod = THIS_MODULE, \
+ .mod_name = KBUILD_MODNAME, \
+ .base = _base, \
+ .map_type = _maptype, \
+ .length = NUM_TYPE_ARGS(char*, __VA_ARGS__), \
+ .class_names = _var##_classnames, \
+ }
+#define NUM_TYPE_ARGS(eltype, ...) \
+ (sizeof((eltype[]){__VA_ARGS__}) / sizeof(eltype))
+
+/* encapsulate linker provided built-in (or module) dyndbg data */
+struct _ddebug_info {
+ struct _ddebug *descs;
+ struct ddebug_class_map *classes;
+ unsigned int num_descs;
+ unsigned int num_classes;
+};
+
+struct ddebug_class_param {
+ union {
+ unsigned long *bits;
+ unsigned int *lvl;
+ };
+ char flags[8];
+ const struct ddebug_class_map *map;
+};
+
+#if defined(CONFIG_DYNAMIC_DEBUG_CORE)
+int ddebug_add_module(struct _ddebug_info *dyndbg, const char *modname);
-#if defined(CONFIG_DYNAMIC_DEBUG)
-int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname);
extern int ddebug_remove_module(const char *mod_name);
extern __printf(2, 3)
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
@@ -78,17 +159,23 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
const struct ib_device *ibdev,
const char *fmt, ...);
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+#define DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, cls, fmt) \
static struct _ddebug __aligned(8) \
- __attribute__((section("__verbose"))) name = { \
+ __section("__dyndbg") name = { \
.modname = KBUILD_MODNAME, \
.function = __func__, \
.filename = __FILE__, \
.format = (fmt), \
.lineno = __LINE__, \
.flags = _DPRINTK_FLAGS_DEFAULT, \
+ .class_id = cls, \
_DPRINTK_KEY_INIT \
- }
+ }; \
+ BUILD_BUG_ON_MSG(cls > _DPRINTK_CLASS_DFLT, \
+ "classid value overflow")
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, _DPRINTK_CLASS_DFLT, fmt)
#ifdef CONFIG_JUMP_LABEL
@@ -105,7 +192,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
static_branch_unlikely(&descriptor.key.dd_key_false)
#endif
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
#define _DPRINTK_KEY_INIT
@@ -117,44 +204,72 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
#endif
-#endif
+#endif /* CONFIG_JUMP_LABEL */
-#define __dynamic_func_call(id, fmt, func, ...) do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(id)) \
- func(&id, ##__VA_ARGS__); \
-} while (0)
-
-#define __dynamic_func_call_no_desc(id, fmt, func, ...) do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
+/*
+ * Factory macros: ($prefix)dynamic_func_call($suffix)
+ *
+ * Lower layer (with __ prefix) gets the callsite metadata, and wraps
+ * the func inside a debug-branch/static-key construct. Upper layer
+ * (with _ prefix) does the UNIQUE_ID once, so that lower can ref the
+ * name/label multiple times, and tie the elements together.
+ * Multiple flavors:
+ * (|_cls): adds in _DPRINT_CLASS_DFLT as needed
+ * (|_no_desc): former gets callsite descriptor as 1st arg (for prdbgs)
+ */
+#define __dynamic_func_call_cls(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
if (DYNAMIC_DEBUG_BRANCH(id)) \
- func(__VA_ARGS__); \
+ func(&id, ##__VA_ARGS__); \
} while (0)
+#define __dynamic_func_call(id, fmt, func, ...) \
+ __dynamic_func_call_cls(id, _DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define __dynamic_func_call_cls_no_desc(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(id)) \
+ func(__VA_ARGS__); \
+} while (0)
+#define __dynamic_func_call_no_desc(id, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(id, _DPRINTK_CLASS_DFLT, \
+ fmt, func, ##__VA_ARGS__)
/*
* "Factory macro" for generating a call to func, guarded by a
- * DYNAMIC_DEBUG_BRANCH. The dynamic debug decriptor will be
+ * DYNAMIC_DEBUG_BRANCH. The dynamic debug descriptor will be
* initialized using the fmt argument. The function will be called with
* the address of the descriptor as first argument, followed by all
* the varargs. Note that fmt is repeated in invocations of this
* macro.
*/
+#define _dynamic_func_call_cls(cls, fmt, func, ...) \
+ __dynamic_func_call_cls(__UNIQUE_ID(ddebug), cls, fmt, func, ##__VA_ARGS__)
#define _dynamic_func_call(fmt, func, ...) \
- __dynamic_func_call(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__)
+ _dynamic_func_call_cls(_DPRINTK_CLASS_DFLT, fmt, func, ##__VA_ARGS__)
+
/*
* A variant that does the same, except that the descriptor is not
* passed as the first argument to the function; it is only called
* with precisely the macro's varargs.
*/
-#define _dynamic_func_call_no_desc(fmt, func, ...) \
- __dynamic_func_call_no_desc(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__)
+#define _dynamic_func_call_cls_no_desc(cls, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(__UNIQUE_ID(ddebug), cls, fmt, \
+ func, ##__VA_ARGS__)
+#define _dynamic_func_call_no_desc(fmt, func, ...) \
+ _dynamic_func_call_cls_no_desc(_DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define dynamic_pr_debug_cls(cls, fmt, ...) \
+ _dynamic_func_call_cls(cls, fmt, __dynamic_pr_debug, \
+ pr_fmt(fmt), ##__VA_ARGS__)
#define dynamic_pr_debug(fmt, ...) \
- _dynamic_func_call(fmt, __dynamic_pr_debug, \
+ _dynamic_func_call(fmt, __dynamic_pr_debug, \
pr_fmt(fmt), ##__VA_ARGS__)
#define dynamic_dev_dbg(dev, fmt, ...) \
- _dynamic_func_call(fmt,__dynamic_dev_dbg, \
+ _dynamic_func_call(fmt, __dynamic_dev_dbg, \
dev, fmt, ##__VA_ARGS__)
#define dynamic_netdev_dbg(dev, fmt, ...) \
@@ -172,13 +287,24 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii)
-#else
+struct kernel_param;
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp);
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp);
+
+/* for test only, generally expect drm.debug style macro wrappers */
+#define __pr_debug_cls(cls, fmt, ...) do { \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(cls), \
+ "expecting constant class int/enum"); \
+ dynamic_pr_debug_cls(cls, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#else /* !CONFIG_DYNAMIC_DEBUG_CORE */
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/printk.h>
-static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname)
+static inline int ddebug_add_module(struct _ddebug_info *dinfo, const char *modname)
{
return 0;
}
@@ -191,7 +317,7 @@ static inline int ddebug_remove_module(const char *mod)
static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
const char *modname)
{
- if (strstr(param, "dyndbg")) {
+ if (!strcmp(param, "dyndbg")) {
/* avoid pr_warn(), which wants pr_fmt() fully defined */
printk(KERN_WARNING "dyndbg param is supported only in "
"CONFIG_DYNAMIC_DEBUG builds\n");
@@ -210,6 +336,15 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii); \
} while (0)
-#endif
+
+struct kernel_param;
+static inline int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
+{ return 0; }
+static inline int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
+{ return 0; }
+
+#endif /* !CONFIG_DYNAMIC_DEBUG_CORE */
+
+extern const struct kernel_param_ops param_ops_dyndbg_classes;
#endif
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 99fc06f0afc1..407c2f281b64 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -38,6 +38,8 @@
#ifdef __KERNEL__
+#include <asm/bug.h>
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index cc31b9742684..fa4bda2a70f6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -31,14 +31,6 @@ struct device;
extern int edac_op_state;
struct bus_type *edac_get_sysfs_subsys(void);
-int edac_get_report_status(void);
-void edac_set_report_status(int new);
-
-enum {
- EDAC_REPORTING_ENABLED,
- EDAC_REPORTING_DISABLED,
- EDAC_REPORTING_FORCE
-};
static inline void opstate_init(void)
{
@@ -183,11 +175,18 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
* @MEM_LRDDR3: Load-Reduced DDR3 memory.
+ * @MEM_LPDDR3: Low-Power DDR3 memory.
* @MEM_DDR4: Unbuffered DDR4 RAM
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
+ * @MEM_LPDDR4: Low-Power DDR4 memory.
+ * @MEM_DDR5: Unbuffered DDR5 RAM
+ * @MEM_RDDR5: Registered DDR5 RAM
+ * @MEM_LRDDR5: Load-Reduced DDR5 memory.
* @MEM_NVDIMM: Non-volatile RAM
+ * @MEM_WIO2: Wide I/O 2.
+ * @MEM_HBM2: High bandwidth Memory Gen 2.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -208,10 +207,17 @@ enum mem_type {
MEM_DDR3,
MEM_RDDR3,
MEM_LRDDR3,
+ MEM_LPDDR3,
MEM_DDR4,
MEM_RDDR4,
MEM_LRDDR4,
+ MEM_LPDDR4,
+ MEM_DDR5,
+ MEM_RDDR5,
+ MEM_LRDDR5,
MEM_NVDIMM,
+ MEM_WIO2,
+ MEM_HBM2,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -225,19 +231,26 @@ enum mem_type {
#define MEM_FLAG_DDR BIT(MEM_DDR)
#define MEM_FLAG_RDDR BIT(MEM_RDDR)
#define MEM_FLAG_RMBS BIT(MEM_RMBS)
-#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
-#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
-#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
-#define MEM_FLAG_XDR BIT(MEM_XDR)
-#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
-#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
-#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
-#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
-#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
-#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
+#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
+#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3)
+#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
+#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
+#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
+#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
+#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5)
+#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5)
+#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
+#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
+#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
/**
- * enum edac-type - Error Detection and Correction capabilities and mode
+ * enum edac_type - Error Detection and Correction capabilities and mode
* @EDAC_UNKNOWN: Unknown if ECC is available
* @EDAC_NONE: Doesn't support ECC
* @EDAC_RESERVED: Reserved ECC type
@@ -317,7 +330,7 @@ enum scrub_type {
#define OP_OFFLINE 0x300
/**
- * enum edac_mc_layer - memory controller hierarchy layer
+ * enum edac_mc_layer_type - memory controller hierarchy layer
*
* @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
* @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
@@ -383,6 +396,9 @@ struct dimm_info {
unsigned int csrow, cschannel; /* Points to the old API data */
u16 smbios_handle; /* Handle for SMBIOS type 17 */
+
+ u32 ce_count;
+ u32 ue_count;
};
/**
@@ -442,6 +458,7 @@ struct errcount_attribute_data {
* struct edac_raw_error_desc - Raw error report structure
* @grain: minimum granularity for an error report, in bytes
* @error_count: number of errors of the same type
+ * @type: severity of the error (CE/UE/Fatal)
* @top_layer: top layer of the error (layer[0])
* @mid_layer: middle layer of the error (layer[1])
* @low_layer: low layer of the error (layer[2])
@@ -453,8 +470,6 @@ struct errcount_attribute_data {
* @location: location of the error
* @label: label of the affected DIMM(s)
* @other_detail: other driver-specific detail about the error
- * @enable_per_layer_report: if false, the error affects all layers
- * (typically, a memory controller error)
*/
struct edac_raw_error_desc {
char location[LOCATION_SIZE];
@@ -462,6 +477,7 @@ struct edac_raw_error_desc {
long grain;
u16 error_count;
+ enum hw_event_mc_err_type type;
int top_layer;
int mid_layer;
int low_layer;
@@ -470,7 +486,6 @@ struct edac_raw_error_desc {
unsigned long syndrome;
const char *msg;
const char *other_detail;
- bool enable_per_layer_report;
};
/* MEMORY controller information structure
@@ -560,7 +575,6 @@ struct mem_ctl_info {
*/
u32 ce_noinfo_count, ue_noinfo_count;
u32 ue_mc, ce_mc;
- u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
struct completion complete;
@@ -602,27 +616,6 @@ struct mem_ctl_info {
: NULL)
/**
- * edac_get_dimm_by_index - Get DIMM info at @index from a memory
- * controller
- *
- * @mci: MC descriptor struct mem_ctl_info
- * @index: index in the memory controller's DIMM array
- *
- * Returns a struct dimm_info * or NULL on failure.
- */
-static inline struct dimm_info *
-edac_get_dimm_by_index(struct mem_ctl_info *mci, int index)
-{
- if (index < 0 || index >= mci->tot_dimms)
- return NULL;
-
- if (WARN_ON_ONCE(mci->dimms[index]->idx != index))
- return NULL;
-
- return mci->dimms[index];
-}
-
-/**
* edac_get_dimm - Get DIMM info from a memory controller given by
* [layer0,layer1,layer2] position
*
@@ -657,6 +650,12 @@ static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci,
if (mci->n_layers > 2)
index = index * mci->layers[2].size + layer2;
- return edac_get_dimm_by_index(mci, index);
+ if (index < 0 || index >= mci->tot_dimms)
+ return NULL;
+
+ if (WARN_ON_ONCE(mci->dimms[index]->idx != index))
+ return NULL;
+
+ return mci->dimms[index];
}
#endif /* _LINUX_EDAC_H_ */
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index eec7928ff8fe..34c2175e6a1e 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -10,12 +10,17 @@ struct eeprom_93xx46_platform_data {
#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
#define EE_READONLY 0x08 /* forbid writing */
+#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */
+#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */
+#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */
unsigned int quirks;
/* Single word read transfers only; no sequential read. */
#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
/* Instructions such as EWEN are (addrlen + 2) in length. */
#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
+/* Add extra cycle after address during a read */
+#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
/*
* optional hooks to control additional logic
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 7efd7072cca5..7603fc58c47c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -29,16 +29,17 @@
#include <asm/page.h>
#define EFI_SUCCESS 0
-#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1)))
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
@@ -56,19 +57,6 @@ typedef void *efi_handle_t;
#define __efiapi
#endif
-#define efi_get_handle_at(array, idx) \
- (efi_is_native() ? (array)[idx] \
- : (efi_handle_t)(unsigned long)((u32 *)(array))[idx])
-
-#define efi_get_handle_num(size) \
- ((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
-
-#define for_each_efi_handle(handle, array, size, i) \
- for (i = 0; \
- i < efi_get_handle_num(size) && \
- ((handle = efi_get_handle_at((array), i)) || true); \
- i++)
-
/*
* The UEFI spec and EDK2 reference implementation both define EFI_GUID as
* struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment
@@ -84,8 +72,10 @@ typedef void *efi_handle_t;
*/
typedef guid_t efi_guid_t __aligned(__alignof__(u32));
-#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
- GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \
+ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, d } }
/*
* Generic EFI table header
@@ -134,6 +124,7 @@ typedef struct {
((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */
+#define EFI_MEMORY_CPU_CRYPTO ((u64)0x0000000000080000ULL) /* supports encryption */
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
@@ -157,13 +148,50 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
-struct efi_boot_memmap {
- efi_memory_desc_t **map;
- unsigned long *map_size;
- unsigned long *desc_size;
- u32 *desc_ver;
- unsigned long *key_ptr;
- unsigned long *buff_size;
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER */
+struct efi_manage_capsule_header {
+ u32 ver;
+ u16 emb_drv_cnt;
+ u16 payload_cnt;
+ /*
+ * Variable-size array of the size given by the sum of
+ * emb_drv_cnt and payload_cnt.
+ */
+ u64 offset_list[];
+} __packed;
+
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER */
+struct efi_manage_capsule_image_header {
+ u32 ver;
+ efi_guid_t image_type_id;
+ u8 image_index;
+ u8 reserved_bytes[3];
+ u32 image_size;
+ u32 vendor_code_size;
+ /* hw_ins was introduced in version 2 */
+ u64 hw_ins;
+ /* capsule_support was introduced in version 3 */
+ u64 capsule_support;
+} __packed;
+
+/* WIN_CERTIFICATE */
+struct win_cert {
+ u32 len;
+ u16 rev;
+ u16 cert_type;
+};
+
+/* WIN_CERTIFICATE_UEFI_GUID */
+struct win_cert_uefi_guid {
+ struct win_cert hdr;
+ efi_guid_t cert_type;
+ u8 cert_data[];
+};
+
+/* EFI_FIRMWARE_IMAGE_AUTHENTICATION */
+struct efi_image_auth {
+ u64 mon_count;
+ struct win_cert_uefi_guid auth_info;
};
/*
@@ -185,19 +213,11 @@ struct capsule_info {
size_t page_bytes_remain;
};
+int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+ size_t hdr_bytes);
int __efi_capsule_setup_info(struct capsule_info *cap_info);
/*
- * Allocation types for calls to boottime->allocate_pages.
- */
-#define EFI_ALLOCATE_ANY_PAGES 0
-#define EFI_ALLOCATE_MAX_ADDRESS 1
-#define EFI_ALLOCATE_ADDRESS 2
-#define EFI_MAX_ALLOCATE_TYPE 3
-
-typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
-
-/*
* Types and defines for Time Services
*/
#define EFI_TIME_ADJUST_DAYLIGHT 0x1
@@ -224,291 +244,7 @@ typedef struct {
u8 sets_to_zero;
} efi_time_cap_t;
-typedef struct {
- efi_table_hdr_t hdr;
- u32 raise_tpl;
- u32 restore_tpl;
- u32 allocate_pages;
- u32 free_pages;
- u32 get_memory_map;
- u32 allocate_pool;
- u32 free_pool;
- u32 create_event;
- u32 set_timer;
- u32 wait_for_event;
- u32 signal_event;
- u32 close_event;
- u32 check_event;
- u32 install_protocol_interface;
- u32 reinstall_protocol_interface;
- u32 uninstall_protocol_interface;
- u32 handle_protocol;
- u32 __reserved;
- u32 register_protocol_notify;
- u32 locate_handle;
- u32 locate_device_path;
- u32 install_configuration_table;
- u32 load_image;
- u32 start_image;
- u32 exit;
- u32 unload_image;
- u32 exit_boot_services;
- u32 get_next_monotonic_count;
- u32 stall;
- u32 set_watchdog_timer;
- u32 connect_controller;
- u32 disconnect_controller;
- u32 open_protocol;
- u32 close_protocol;
- u32 open_protocol_information;
- u32 protocols_per_handle;
- u32 locate_handle_buffer;
- u32 locate_protocol;
- u32 install_multiple_protocol_interfaces;
- u32 uninstall_multiple_protocol_interfaces;
- u32 calculate_crc32;
- u32 copy_mem;
- u32 set_mem;
- u32 create_event_ex;
-} __packed efi_boot_services_32_t;
-
-/*
- * EFI Boot Services table
- */
-typedef union {
- struct {
- efi_table_hdr_t hdr;
- void *raise_tpl;
- void *restore_tpl;
- efi_status_t (__efiapi *allocate_pages)(int, int, unsigned long,
- efi_physical_addr_t *);
- efi_status_t (__efiapi *free_pages)(efi_physical_addr_t,
- unsigned long);
- efi_status_t (__efiapi *get_memory_map)(unsigned long *, void *,
- unsigned long *,
- unsigned long *, u32 *);
- efi_status_t (__efiapi *allocate_pool)(int, unsigned long,
- void **);
- efi_status_t (__efiapi *free_pool)(void *);
- void *create_event;
- void *set_timer;
- void *wait_for_event;
- void *signal_event;
- void *close_event;
- void *check_event;
- void *install_protocol_interface;
- void *reinstall_protocol_interface;
- void *uninstall_protocol_interface;
- efi_status_t (__efiapi *handle_protocol)(efi_handle_t,
- efi_guid_t *, void **);
- void *__reserved;
- void *register_protocol_notify;
- efi_status_t (__efiapi *locate_handle)(int, efi_guid_t *,
- void *, unsigned long *,
- efi_handle_t *);
- void *locate_device_path;
- efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *,
- void *);
- void *load_image;
- void *start_image;
- void *exit;
- void *unload_image;
- efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
- unsigned long);
- void *get_next_monotonic_count;
- void *stall;
- void *set_watchdog_timer;
- void *connect_controller;
- efi_status_t (__efiapi *disconnect_controller)(efi_handle_t,
- efi_handle_t,
- efi_handle_t);
- void *open_protocol;
- void *close_protocol;
- void *open_protocol_information;
- void *protocols_per_handle;
- void *locate_handle_buffer;
- efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
- void **);
- void *install_multiple_protocol_interfaces;
- void *uninstall_multiple_protocol_interfaces;
- void *calculate_crc32;
- void *copy_mem;
- void *set_mem;
- void *create_event_ex;
- };
- efi_boot_services_32_t mixed_mode;
-} efi_boot_services_t;
-
-typedef enum {
- EfiPciIoWidthUint8,
- EfiPciIoWidthUint16,
- EfiPciIoWidthUint32,
- EfiPciIoWidthUint64,
- EfiPciIoWidthFifoUint8,
- EfiPciIoWidthFifoUint16,
- EfiPciIoWidthFifoUint32,
- EfiPciIoWidthFifoUint64,
- EfiPciIoWidthFillUint8,
- EfiPciIoWidthFillUint16,
- EfiPciIoWidthFillUint32,
- EfiPciIoWidthFillUint64,
- EfiPciIoWidthMaximum
-} EFI_PCI_IO_PROTOCOL_WIDTH;
-
-typedef enum {
- EfiPciIoAttributeOperationGet,
- EfiPciIoAttributeOperationSet,
- EfiPciIoAttributeOperationEnable,
- EfiPciIoAttributeOperationDisable,
- EfiPciIoAttributeOperationSupported,
- EfiPciIoAttributeOperationMaximum
-} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
-
-typedef struct {
- u32 read;
- u32 write;
-} efi_pci_io_protocol_access_32_t;
-
-typedef union efi_pci_io_protocol efi_pci_io_protocol_t;
-
-typedef
-efi_status_t (__efiapi *efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *,
- EFI_PCI_IO_PROTOCOL_WIDTH,
- u32 offset,
- unsigned long count,
- void *buffer);
-
-typedef struct {
- void *read;
- void *write;
-} efi_pci_io_protocol_access_t;
-
-typedef struct {
- efi_pci_io_protocol_cfg_t read;
- efi_pci_io_protocol_cfg_t write;
-} efi_pci_io_protocol_config_access_t;
-
-union efi_pci_io_protocol {
- struct {
- void *poll_mem;
- void *poll_io;
- efi_pci_io_protocol_access_t mem;
- efi_pci_io_protocol_access_t io;
- efi_pci_io_protocol_config_access_t pci;
- void *copy_mem;
- void *map;
- void *unmap;
- void *allocate_buffer;
- void *free_buffer;
- void *flush;
- efi_status_t (__efiapi *get_location)(efi_pci_io_protocol_t *,
- unsigned long *segment_nr,
- unsigned long *bus_nr,
- unsigned long *device_nr,
- unsigned long *func_nr);
- void *attributes;
- void *get_bar_attributes;
- void *set_bar_attributes;
- uint64_t romsize;
- void *romimage;
- };
- struct {
- u32 poll_mem;
- u32 poll_io;
- efi_pci_io_protocol_access_32_t mem;
- efi_pci_io_protocol_access_32_t io;
- efi_pci_io_protocol_access_32_t pci;
- u32 copy_mem;
- u32 map;
- u32 unmap;
- u32 allocate_buffer;
- u32 free_buffer;
- u32 flush;
- u32 get_location;
- u32 attributes;
- u32 get_bar_attributes;
- u32 set_bar_attributes;
- u64 romsize;
- u32 romimage;
- } mixed_mode;
-};
-
-#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
-#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
-#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004
-#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008
-#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010
-#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020
-#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080
-#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200
-#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000
-#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000
-#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000
-#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000
-#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000
-#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
-#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
-
-struct efi_dev_path;
-
-typedef union apple_properties_protocol apple_properties_protocol_t;
-
-union apple_properties_protocol {
- struct {
- unsigned long version;
- efi_status_t (__efiapi *get)(apple_properties_protocol_t *,
- struct efi_dev_path *,
- efi_char16_t *, void *, u32 *);
- efi_status_t (__efiapi *set)(apple_properties_protocol_t *,
- struct efi_dev_path *,
- efi_char16_t *, void *, u32);
- efi_status_t (__efiapi *del)(apple_properties_protocol_t *,
- struct efi_dev_path *,
- efi_char16_t *);
- efi_status_t (__efiapi *get_all)(apple_properties_protocol_t *,
- void *buffer, u32 *);
- };
- struct {
- u32 version;
- u32 get;
- u32 set;
- u32 del;
- u32 get_all;
- } mixed_mode;
-};
-
-typedef u32 efi_tcg2_event_log_format;
-
-typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
-
-union efi_tcg2_protocol {
- struct {
- void *get_capability;
- efi_status_t (__efiapi *get_event_log)(efi_handle_t,
- efi_tcg2_event_log_format,
- efi_physical_addr_t *,
- efi_physical_addr_t *,
- efi_bool_t *);
- void *hash_log_extend_event;
- void *submit_command;
- void *get_active_pcr_banks;
- void *set_active_pcr_banks;
- void *get_result_of_set_active_pcr_banks;
- };
- struct {
- u32 get_capability;
- u32 get_event_log;
- u32 hash_log_extend_event;
- u32 submit_command;
- u32 get_active_pcr_banks;
- u32 set_active_pcr_banks;
- u32 get_result_of_set_active_pcr_banks;
- } mixed_mode;
-};
+typedef union efi_boot_services efi_boot_services_t;
/*
* Types and defines for EFI ResetSystem
@@ -632,6 +368,9 @@ void efi_native_runtime_setup(void);
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0xbc62157e, 0x3e33, 0x4fec, 0x99, 0x20, 0x2d, 0x3b, 0x36, 0xd7, 0x50, 0xdf)
+#define EFI_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0x09576e91, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
@@ -646,6 +385,11 @@ void efi_native_runtime_setup(void);
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
+#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d)
+#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
+#define EFI_DXE_SERVICES_TABLE_GUID EFI_GUID(0x05ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
+#define EFI_SMBIOS_PROTOCOL_GUID EFI_GUID(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -653,6 +397,7 @@ void efi_native_runtime_setup(void);
#define EFI_CERT_SHA256_GUID EFI_GUID(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28)
#define EFI_CERT_X509_GUID EFI_GUID(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72)
#define EFI_CERT_X509_SHA256_GUID EFI_GUID(0x3bd2a492, 0x96c0, 0x4079, 0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed)
+#define EFI_CC_BLOB_GUID EFI_GUID(0x067b1f5f, 0xcf26, 0x44c5, 0x85, 0x54, 0x93, 0xd7, 0x77, 0x91, 0x2d, 0x42)
/*
* This GUID is used to pass to the kernel proper the struct screen_info
@@ -660,14 +405,34 @@ void efi_native_runtime_setup(void);
* associated with ConOut
*/
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
+#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68)
+#define LINUX_EFI_ZBOOT_MEDIA_GUID EFI_GUID(0xe565a30d, 0x47da, 0x4dbd, 0xb3, 0x54, 0x9b, 0xb5, 0xc8, 0x4f, 0x8b, 0xe2)
+#define LINUX_EFI_MOK_VARIABLE_TABLE_GUID EFI_GUID(0xc451ed2b, 0x9694, 0x45d3, 0xba, 0xba, 0xed, 0x9f, 0x89, 0x88, 0xa3, 0x89)
+#define LINUX_EFI_COCO_SECRET_AREA_GUID EFI_GUID(0xadf956ad, 0xe98c, 0x484c, 0xae, 0x11, 0xb5, 0x1c, 0x7d, 0x33, 0x64, 0x47)
+#define LINUX_EFI_BOOT_MEMMAP_GUID EFI_GUID(0x800f683f, 0xd08b, 0x423a, 0xa2, 0x93, 0x96, 0x5c, 0x3c, 0x6f, 0xe2, 0xb4)
+
+#define RISCV_EFI_BOOT_PROTOCOL_GUID EFI_GUID(0xccd15fec, 0x6f73, 0x4eec, 0x83, 0x95, 0x3e, 0x69, 0xe4, 0xb9, 0x40, 0xbf)
+
+/*
+ * This GUID may be installed onto the kernel image's handle as a NULL protocol
+ * to signal to the stub that the placement of the image should be respected,
+ * and moving the image in physical memory is undesirable. To ensure
+ * compatibility with 64k pages kernels with virtually mapped stacks, and to
+ * avoid defeating physical randomization, this protocol should only be
+ * installed if the image was placed at a randomized 128k aligned address in
+ * memory.
+ */
+#define LINUX_EFI_LOADED_IMAGE_FIXED_GUID EFI_GUID(0xf5a37b6d, 0x3344, 0x42a5, 0xb6, 0xbb, 0x97, 0x86, 0x48, 0xc1, 0x89, 0x0a)
/* OEM GUIDs */
#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55)
+#define AMD_SEV_MEM_ENCRYPT_GUID EFI_GUID(0x0cf29b71, 0x9e51, 0x433a, 0xa3, 0xb7, 0x81, 0xf3, 0xab, 0x16, 0xb8, 0x75)
typedef struct {
efi_guid_t guid;
@@ -689,11 +454,12 @@ typedef union {
typedef struct {
efi_guid_t guid;
- const char *name;
unsigned long *ptr;
+ const char name[16];
} efi_config_table_type_t;
#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
+#define EFI_DXE_SERVICES_TABLE_SIGNATURE ((u64)0x565245535f455844ULL)
#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20))
@@ -736,6 +502,7 @@ typedef struct {
u32 tables;
} efi_system_table_32_t;
+typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t;
typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t;
typedef union {
@@ -744,7 +511,7 @@ typedef union {
unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
u32 fw_revision;
unsigned long con_in_handle;
- unsigned long con_in;
+ efi_simple_text_input_protocol_t *con_in;
unsigned long con_out_handle;
efi_simple_text_output_protocol_t *con_out;
unsigned long stderr_handle;
@@ -757,6 +524,15 @@ typedef union {
efi_system_table_32_t mixed_mode;
} efi_system_table_t;
+struct efi_boot_memmap {
+ unsigned long map_size;
+ unsigned long desc_size;
+ u32 desc_ver;
+ unsigned long map_key;
+ unsigned long buff_size;
+ efi_memory_desc_t map[];
+};
+
/*
* Architecture independent structure for describing a memory map for the
* benefit of efi_memmap_init_early(), and for passing context between
@@ -788,74 +564,6 @@ struct efi_mem_range {
u64 attribute;
};
-struct efi_fdt_params {
- u64 system_table;
- u64 mmap;
- u32 mmap_size;
- u32 desc_size;
- u32 desc_ver;
-};
-
-typedef struct {
- u32 revision;
- efi_handle_t parent_handle;
- efi_system_table_t *system_table;
- efi_handle_t device_handle;
- void *file_path;
- void *reserved;
- u32 load_options_size;
- void *load_options;
- void *image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- efi_status_t ( __efiapi *unload)(efi_handle_t image_handle);
-} efi_loaded_image_t;
-
-typedef struct {
- u64 size;
- u64 file_size;
- u64 phys_size;
- efi_time_t create_time;
- efi_time_t last_access_time;
- efi_time_t modification_time;
- __aligned_u64 attribute;
- efi_char16_t filename[1];
-} efi_file_info_t;
-
-typedef struct efi_file_handle efi_file_handle_t;
-
-struct efi_file_handle {
- u64 revision;
- efi_status_t (__efiapi *open)(efi_file_handle_t *,
- efi_file_handle_t **,
- efi_char16_t *, u64, u64);
- efi_status_t (__efiapi *close)(efi_file_handle_t *);
- void *delete;
- efi_status_t (__efiapi *read)(efi_file_handle_t *,
- unsigned long *, void *);
- void *write;
- void *get_position;
- void *set_position;
- efi_status_t (__efiapi *get_info)(efi_file_handle_t *,
- efi_guid_t *, unsigned long *,
- void *);
- void *set_info;
- void *flush;
-};
-
-typedef struct efi_file_io_interface efi_file_io_interface_t;
-
-struct efi_file_io_interface {
- u64 revision;
- int (__efiapi *open_volume)(efi_file_io_interface_t *,
- efi_file_handle_t **);
-};
-
-#define EFI_FILE_MODE_READ 0x0000000000000001
-#define EFI_FILE_MODE_WRITE 0x0000000000000002
-#define EFI_FILE_MODE_CREATE 0x8000000000000000
-
typedef struct {
u32 version;
u32 length;
@@ -865,6 +573,14 @@ typedef struct {
#define EFI_PROPERTIES_TABLE_VERSION 0x00010000
#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1
+typedef struct {
+ u16 version;
+ u16 length;
+ u32 runtime_services_supported;
+} efi_rt_properties_table_t;
+
+#define EFI_RT_PROPERTIES_TABLE_VERSION 0x1
+
#define EFI_INVALID_TABLE_ADDR (~0UL)
typedef struct {
@@ -896,48 +612,65 @@ typedef struct {
efi_time_t time_of_revocation;
} efi_cert_x509_sha256_t;
+extern unsigned long __ro_after_init efi_rng_seed; /* RNG Seed table */
+
/*
* All runtime access to EFI goes through this structure:
*/
extern struct efi {
- efi_system_table_t *systab; /* EFI system table */
- unsigned int runtime_version; /* Runtime services version */
- unsigned long mps; /* MPS table */
- unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
- unsigned long acpi20; /* ACPI table (ACPI 2.0) */
- unsigned long smbios; /* SMBIOS table (32 bit entry point) */
- unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
- unsigned long boot_info; /* boot info table */
- unsigned long hcdp; /* HCDP table */
- unsigned long uga; /* UGA table */
- unsigned long fw_vendor; /* fw_vendor */
- unsigned long runtime; /* runtime table */
- unsigned long config_table; /* config tables */
- unsigned long esrt; /* ESRT table */
- unsigned long properties_table; /* properties table */
- unsigned long mem_attr_table; /* memory attributes table */
- unsigned long rng_seed; /* UEFI firmware random seed */
- unsigned long tpm_log; /* TPM2 Event Log table */
- unsigned long tpm_final_log; /* TPM2 Final Events Log table */
- unsigned long mem_reserve; /* Linux EFI memreserve table */
- efi_get_time_t *get_time;
- efi_set_time_t *set_time;
- efi_get_wakeup_time_t *get_wakeup_time;
- efi_set_wakeup_time_t *set_wakeup_time;
- efi_get_variable_t *get_variable;
- efi_get_next_variable_t *get_next_variable;
- efi_set_variable_t *set_variable;
- efi_set_variable_t *set_variable_nonblocking;
- efi_query_variable_info_t *query_variable_info;
- efi_query_variable_info_t *query_variable_info_nonblocking;
- efi_update_capsule_t *update_capsule;
- efi_query_capsule_caps_t *query_capsule_caps;
- efi_get_next_high_mono_count_t *get_next_high_mono_count;
- efi_reset_system_t *reset_system;
- struct efi_memory_map memmap;
- unsigned long flags;
+ const efi_runtime_services_t *runtime; /* EFI runtime services table */
+ unsigned int runtime_version; /* Runtime services version */
+ unsigned int runtime_supported_mask;
+
+ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
+ unsigned long acpi20; /* ACPI table (ACPI 2.0) */
+ unsigned long smbios; /* SMBIOS table (32 bit entry point) */
+ unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
+ unsigned long esrt; /* ESRT table */
+ unsigned long tpm_log; /* TPM2 Event Log table */
+ unsigned long tpm_final_log; /* TPM2 Final Events Log table */
+ unsigned long mokvar_table; /* MOK variable config table */
+ unsigned long coco_secret; /* Confidential computing secret table */
+
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
+ efi_set_wakeup_time_t *set_wakeup_time;
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_set_variable_t *set_variable_nonblocking;
+ efi_query_variable_info_t *query_variable_info;
+ efi_query_variable_info_t *query_variable_info_nonblocking;
+ efi_update_capsule_t *update_capsule;
+ efi_query_capsule_caps_t *query_capsule_caps;
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
+ efi_reset_system_t *reset_system;
+
+ struct efi_memory_map memmap;
+ unsigned long flags;
} efi;
+#define EFI_RT_SUPPORTED_GET_TIME 0x0001
+#define EFI_RT_SUPPORTED_SET_TIME 0x0002
+#define EFI_RT_SUPPORTED_GET_WAKEUP_TIME 0x0004
+#define EFI_RT_SUPPORTED_SET_WAKEUP_TIME 0x0008
+#define EFI_RT_SUPPORTED_GET_VARIABLE 0x0010
+#define EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME 0x0020
+#define EFI_RT_SUPPORTED_SET_VARIABLE 0x0040
+#define EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP 0x0080
+#define EFI_RT_SUPPORTED_CONVERT_POINTER 0x0100
+#define EFI_RT_SUPPORTED_GET_NEXT_HIGH_MONOTONIC_COUNT 0x0200
+#define EFI_RT_SUPPORTED_RESET_SYSTEM 0x0400
+#define EFI_RT_SUPPORTED_UPDATE_CAPSULE 0x0800
+#define EFI_RT_SUPPORTED_QUERY_CAPSULE_CAPABILITIES 0x1000
+#define EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO 0x2000
+
+#define EFI_RT_SUPPORTED_ALL 0x3fff
+
+#define EFI_RT_SUPPORTED_TIME_SERVICES 0x000f
+#define EFI_RT_SUPPORTED_VARIABLE_SERVICES 0x0070
+
extern struct mm_struct efi_mm;
static inline int
@@ -954,11 +687,11 @@ efi_guid_to_str(efi_guid_t *guid, char *out)
}
extern void efi_init (void);
-extern void *efi_get_pal_addr (void);
-extern void efi_map_pal_code (void);
-extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-extern void efi_gettimeofday (struct timespec64 *ts);
+#ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
+#else
+static inline void efi_enter_virtual_mode (void) {}
+#endif
#ifdef CONFIG_X86
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
@@ -987,14 +720,18 @@ extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
void *buf, struct efi_mem_range *mem);
-extern int efi_config_init(efi_config_table_type_t *arch_tables);
#ifdef CONFIG_EFI_ESRT
extern void __init efi_esrt_init(void);
#else
static inline void efi_esrt_init(void) { }
#endif
-extern int efi_config_parse_tables(void *config_tables, int count, int sz,
- efi_config_table_type_t *arch_tables);
+extern int efi_config_parse_tables(const efi_config_table_t *config_tables,
+ int count,
+ const efi_config_table_type_t *arch_tables);
+extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
+ int min_major_version);
+extern void efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
+ unsigned long fw_vendor);
extern u64 efi_get_iobase (void);
extern int efi_mem_type(unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
@@ -1006,7 +743,7 @@ extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
-extern int efi_get_fdt_params(struct efi_fdt_params *params);
+extern u64 efi_get_fdt_params(struct efi_memory_map_data *data);
extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
@@ -1018,6 +755,8 @@ extern void __init efi_fake_memmap(void);
static inline void efi_fake_memmap(void) { }
#endif
+extern unsigned long efi_mem_attr_table;
+
/*
* efi_memattr_perm_setter - arch specific callback function passed into
* efi_memattr_apply_permissions() that updates the
@@ -1124,6 +863,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */
+#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */
#ifdef CONFIG_EFI
/*
@@ -1142,6 +882,12 @@ static inline bool __pure efi_soft_reserve_enabled(void)
return IS_ENABLED(CONFIG_EFI_SOFT_RESERVE)
&& __efi_soft_reserve_enabled();
}
+
+static inline bool efi_rt_services_supported(unsigned int mask)
+{
+ return (efi.runtime_supported_mask & mask) == mask;
+}
+extern void efi_find_mirror(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1150,16 +896,17 @@ static inline bool efi_enabled(int feature)
static inline void
efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
-static inline bool
-efi_capsule_pending(int *reset_type)
+static inline bool efi_soft_reserve_enabled(void)
{
return false;
}
-static inline bool efi_soft_reserve_enabled(void)
+static inline bool efi_rt_services_supported(unsigned int mask)
{
return false;
}
+
+static inline void efi_find_mirror(void) {}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -1175,7 +922,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
-#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
+#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS | \
EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
@@ -1189,13 +936,6 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN
/*
- * The type of search to perform when calling boottime->locate_handle
- */
-#define EFI_LOCATE_ALL_HANDLES 0
-#define EFI_LOCATE_BY_REGISTER_NOTIFY 1
-#define EFI_LOCATE_BY_PROTOCOL 2
-
-/*
* EFI Device Path information
*/
#define EFI_DEV_HW 0x01
@@ -1227,6 +967,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_MEDIA_VENDOR 3
#define EFI_DEV_MEDIA_FILE 4
#define EFI_DEV_MEDIA_PROTOCOL 5
+#define EFI_DEV_MEDIA_REL_OFFSET 8
#define EFI_DEV_BIOS_BOOT 0x05
#define EFI_DEV_END_PATH 0x7F
#define EFI_DEV_END_PATH2 0xFF
@@ -1234,30 +975,55 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_END_ENTIRE 0xFF
struct efi_generic_dev_path {
- u8 type;
- u8 sub_type;
- u16 length;
-} __attribute ((packed));
+ u8 type;
+ u8 sub_type;
+ u16 length;
+} __packed;
+
+struct efi_acpi_dev_path {
+ struct efi_generic_dev_path header;
+ u32 hid;
+ u32 uid;
+} __packed;
+
+struct efi_pci_dev_path {
+ struct efi_generic_dev_path header;
+ u8 fn;
+ u8 dev;
+} __packed;
+
+struct efi_vendor_dev_path {
+ struct efi_generic_dev_path header;
+ efi_guid_t vendorguid;
+ u8 vendordata[];
+} __packed;
+
+struct efi_rel_offset_dev_path {
+ struct efi_generic_dev_path header;
+ u32 reserved;
+ u64 starting_offset;
+ u64 ending_offset;
+} __packed;
+
+struct efi_mem_mapped_dev_path {
+ struct efi_generic_dev_path header;
+ u32 memory_type;
+ u64 starting_addr;
+ u64 ending_addr;
+} __packed;
struct efi_dev_path {
- u8 type; /* can be replaced with unnamed */
- u8 sub_type; /* struct efi_generic_dev_path; */
- u16 length; /* once we've moved to -std=c11 */
union {
- struct {
- u32 hid;
- u32 uid;
- } acpi;
- struct {
- u8 fn;
- u8 dev;
- } pci;
+ struct efi_generic_dev_path header;
+ struct efi_acpi_dev_path acpi;
+ struct efi_pci_dev_path pci;
+ struct efi_vendor_dev_path vendor;
+ struct efi_rel_offset_dev_path rel_offset;
};
-} __attribute ((packed));
+} __packed;
-#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER)
-struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len);
-#endif
+struct device *efi_get_device_by_path(const struct efi_dev_path **node,
+ size_t *len);
static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
{
@@ -1295,158 +1061,32 @@ struct efivars {
#define EFI_VAR_NAME_LEN 1024
-struct efi_variable {
- efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
- efi_guid_t VendorGuid;
- unsigned long DataSize;
- __u8 Data[1024];
- efi_status_t Status;
- __u32 Attributes;
-} __attribute__((packed));
-
-struct efivar_entry {
- struct efi_variable var;
- struct list_head list;
- struct kobject kobj;
- bool scanning;
- bool deleting;
-};
-
-union efi_simple_text_output_protocol {
- struct {
- void *reset;
- efi_status_t (__efiapi *output_string)(efi_simple_text_output_protocol_t *,
- efi_char16_t *);
- void *test_string;
- };
- struct {
- u32 reset;
- u32 output_string;
- u32 test_string;
- } mixed_mode;
-};
-
-#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
-#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
-#define PIXEL_BIT_MASK 2
-#define PIXEL_BLT_ONLY 3
-#define PIXEL_FORMAT_MAX 4
-
-typedef struct {
- u32 red_mask;
- u32 green_mask;
- u32 blue_mask;
- u32 reserved_mask;
-} efi_pixel_bitmask_t;
-
-typedef struct {
- u32 version;
- u32 horizontal_resolution;
- u32 vertical_resolution;
- int pixel_format;
- efi_pixel_bitmask_t pixel_information;
- u32 pixels_per_scan_line;
-} efi_graphics_output_mode_info_t;
-
-typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t;
-
-union efi_graphics_output_protocol_mode {
- struct {
- u32 max_mode;
- u32 mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long size_of_info;
- efi_physical_addr_t frame_buffer_base;
- unsigned long frame_buffer_size;
- };
- struct {
- u32 max_mode;
- u32 mode;
- u32 info;
- u32 size_of_info;
- u64 frame_buffer_base;
- u32 frame_buffer_size;
- } mixed_mode;
-};
-
-typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t;
-
-union efi_graphics_output_protocol {
- struct {
- void *query_mode;
- void *set_mode;
- void *blt;
- efi_graphics_output_protocol_mode_t *mode;
- };
- struct {
- u32 query_mode;
- u32 set_mode;
- u32 blt;
- u32 mode;
- } mixed_mode;
-};
-
-extern struct list_head efivar_sysfs_list;
-
-static inline void
-efivar_unregister(struct efivar_entry *var)
-{
- kobject_put(&var->kobj);
-}
-
int efivars_register(struct efivars *efivars,
const struct efivar_operations *ops,
struct kobject *kobject);
int efivars_unregister(struct efivars *efivars);
struct kobject *efivars_kobject(void);
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool duplicates, struct list_head *head);
-
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-int efivar_entry_remove(struct efivar_entry *entry);
-
-int __efivar_entry_delete(struct efivar_entry *entry);
-int efivar_entry_delete(struct efivar_entry *entry);
-
-int efivar_entry_size(struct efivar_entry *entry, unsigned long *size);
-int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
- unsigned long size, void *data, struct list_head *head);
-int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
- unsigned long *size, void *data, bool *set);
-int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
- bool block, unsigned long size, void *data);
+int efivar_supports_writes(void);
-int efivar_entry_iter_begin(void);
-void efivar_entry_iter_end(void);
+int efivar_lock(void);
+int efivar_trylock(void);
+void efivar_unlock(void);
-int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data,
- struct efivar_entry **prev);
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data);
+efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *size, void *data);
-struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
- struct list_head *head, bool remove);
+efi_status_t efivar_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name, efi_guid_t *vendor);
-bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
- unsigned long data_size);
-bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
- size_t len);
+efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data, bool nonblocking);
-extern struct work_struct efivar_work;
-void efivar_run_worker(void);
+efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data);
-#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
-int efivars_sysfs_init(void);
-
-#define EFIVARS_DATA_SIZE_MAX 1024
-
-#endif /* CONFIG_EFI_VARS */
+#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER)
extern bool efi_capsule_pending(int *reset_type);
extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
@@ -1454,6 +1094,9 @@ extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
extern int efi_capsule_update(efi_capsule_header_t *capsule,
phys_addr_t *pages);
+#else
+static inline bool efi_capsule_pending(int *reset_type) { return false; }
+#endif
#ifdef CONFIG_EFI_RUNTIME_MAP
int efi_runtime_map_init(struct kobject *);
@@ -1483,52 +1126,6 @@ static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
#endif
-/* prototypes shared between arch specific and generic stub code */
-
-void efi_printk(char *str);
-
-void efi_free(unsigned long size, unsigned long addr);
-
-char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
-
-efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
-
-efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long min);
-
-static inline
-efi_status_t efi_low_alloc(unsigned long size, unsigned long align,
- unsigned long *addr)
-{
- /*
- * Don't allocate at 0x0. It will confuse code that
- * checks pointers against NULL. Skip the first 8
- * bytes so we start at a nice even number.
- */
- return efi_low_alloc_above(size, align, addr, 0x8);
-}
-
-efi_status_t efi_high_alloc(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long max);
-
-efi_status_t efi_relocate_kernel(unsigned long *image_addr,
- unsigned long image_size,
- unsigned long alloc_size,
- unsigned long preferred_addr,
- unsigned long alignment,
- unsigned long min_addr);
-
-efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
- char *cmd_line, char *option_string,
- unsigned long max_addr,
- unsigned long *load_addr,
- unsigned long *load_size);
-
-efi_status_t efi_parse_options(char const *cmdline);
-
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size);
-
#ifdef CONFIG_EFI
extern bool efi_runtime_disabled(void);
#else
@@ -1544,18 +1141,38 @@ enum efi_secureboot_mode {
efi_secureboot_mode_disabled,
efi_secureboot_mode_enabled,
};
-enum efi_secureboot_mode efi_get_secureboot(void);
-#ifdef CONFIG_RESET_ATTACK_MITIGATION
-void efi_enable_reset_attack_mitigation(void);
+static inline
+enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var)
+{
+ u8 secboot, setupmode = 0;
+ efi_status_t status;
+ unsigned long size;
+
+ size = sizeof(secboot);
+ status = get_var(L"SecureBoot", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size,
+ &secboot);
+ if (status == EFI_NOT_FOUND)
+ return efi_secureboot_mode_disabled;
+ if (status != EFI_SUCCESS)
+ return efi_secureboot_mode_unknown;
+
+ size = sizeof(setupmode);
+ get_var(L"SetupMode", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size, &setupmode);
+ if (secboot == 0 || setupmode == 1)
+ return efi_secureboot_mode_disabled;
+ return efi_secureboot_mode_enabled;
+}
+
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+void efi_check_for_embedded_firmwares(void);
#else
-static inline void
-efi_enable_reset_attack_mitigation(void) { }
+static inline void efi_check_for_embedded_firmwares(void) { }
#endif
efi_status_t efi_random_get_seed(void);
-void efi_retrieve_tpm2_eventlog(void);
+#define arch_efi_call_virt(p, f, args...) ((p)->f(args))
/*
* Arch code can implement the following three template macros, avoiding
@@ -1606,16 +1223,7 @@ void efi_retrieve_tpm2_eventlog(void);
arch_efi_call_virt_teardown(); \
})
-typedef efi_status_t (*efi_exit_boot_map_processing)(
- struct efi_boot_memmap *map,
- void *priv);
-
-efi_status_t efi_exit_boot_services(void *handle,
- struct efi_boot_memmap *map,
- void *priv,
- efi_exit_boot_map_processing priv_func);
-
-#define EFI_RANDOM_SEED_SIZE 64U
+#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
u32 size;
@@ -1692,15 +1300,61 @@ struct linux_efi_memreserve {
struct {
phys_addr_t base;
phys_addr_t size;
- } entry[0];
+ } entry[];
};
-#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \
- (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
-
#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
- / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+ / sizeof_field(struct linux_efi_memreserve, entry[0]))
+
+void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
+
+char *efi_systab_show_arch(char *str);
+
+/*
+ * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided
+ * to the kernel by an EFI boot loader. The table contains a packed
+ * sequence of these entries, one for each named MOK variable.
+ * The sequence is terminated by an entry with a completely NULL
+ * name and 0 data size.
+ */
+struct efi_mokvar_table_entry {
+ char name[256];
+ u64 data_size;
+ u8 data[];
+} __attribute((packed));
+
+#ifdef CONFIG_LOAD_UEFI_KEYS
+extern void __init efi_mokvar_table_init(void);
+extern struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry);
+extern struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name);
+#else
+static inline void efi_mokvar_table_init(void) { }
+static inline struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry)
+{
+ return NULL;
+}
+static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
+ const char *name)
+{
+ return NULL;
+}
+#endif
+
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
+struct linux_efi_coco_secret_area {
+ u64 base_pa;
+ u64 size;
+};
+
+struct linux_efi_initrd {
+ unsigned long base;
+ unsigned long size;
+};
-void efi_pci_disable_bridge_busmaster(void);
+/* Header of a populated EFI secret area */
+#define EFI_SECRET_TABLE_HEADER_GUID EFI_GUID(0x1e74f542, 0x71dd, 0x4d66, 0x96, 0x3e, 0xef, 0x42, 0x87, 0xff, 0x17, 0x3b)
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h
new file mode 100644
index 000000000000..a97a12bb2c9e
--- /dev/null
+++ b/include/linux/efi_embedded_fw.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_EFI_EMBEDDED_FW_H
+#define _LINUX_EFI_EMBEDDED_FW_H
+
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+
+#define EFI_EMBEDDED_FW_PREFIX_LEN 8
+
+/*
+ * This struct is private to the efi-embedded fw implementation.
+ * They are in this header for use by lib/test_firmware.c only!
+ */
+struct efi_embedded_fw {
+ struct list_head list;
+ const char *name;
+ const u8 *data;
+ size_t length;
+};
+
+/**
+ * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw
+ * code to search for embedded firmwares.
+ *
+ * @name: Name to register the firmware with if found
+ * @prefix: First 8 bytes of the firmware
+ * @length: Length of the firmware in bytes including prefix
+ * @sha256: SHA256 of the firmware
+ */
+struct efi_embedded_fw_desc {
+ const char *name;
+ u8 prefix[EFI_EMBEDDED_FW_PREFIX_LEN];
+ u32 length;
+ u8 sha256[32];
+};
+
+extern const struct dmi_system_id touchscreen_dmi_table[];
+
+int efi_get_embedded_fw(const char *name, const u8 **dat, size_t *sz);
+
+#endif
diff --git a/include/linux/elf.h b/include/linux/elf.h
index e3649b3e970e..c9a46c4e183b 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_ELF_H
#define _LINUX_ELF_H
+#include <linux/types.h>
#include <asm/elf.h>
#include <uapi/linux/elf.h>
@@ -21,6 +22,19 @@
SET_PERSONALITY(ex)
#endif
+#ifndef START_THREAD
+#define START_THREAD(elf_ex, regs, elf_entry, start_stack) \
+ start_thread(regs, elf_entry, start_stack)
+#endif
+
+#if defined(ARCH_HAS_SETUP_ADDITIONAL_PAGES) && !defined(ARCH_SETUP_ADDITIONAL_PAGES)
+#define ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \
+ arch_setup_additional_pages(bprm, interpreter)
+#endif
+
+#define ELF32_GNU_PROPERTY_ALIGN 4
+#define ELF64_GNU_PROPERTY_ALIGN 8
+
#if ELF_CLASS == ELFCLASS32
extern Elf32_Dyn _DYNAMIC [];
@@ -31,6 +45,7 @@ extern Elf32_Dyn _DYNAMIC [];
#define elf_addr_t Elf32_Off
#define Elf_Half Elf32_Half
#define Elf_Word Elf32_Word
+#define ELF_GNU_PROPERTY_ALIGN ELF32_GNU_PROPERTY_ALIGN
#else
@@ -42,6 +57,7 @@ extern Elf64_Dyn _DYNAMIC [];
#define elf_addr_t Elf64_Off
#define Elf_Half Elf64_Half
#define Elf_Word Elf64_Word
+#define ELF_GNU_PROPERTY_ALIGN ELF64_GNU_PROPERTY_ALIGN
#endif
@@ -56,4 +72,41 @@ static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) {
extern int elf_coredump_extra_notes_size(void);
extern int elf_coredump_extra_notes_write(struct coredump_params *cprm);
#endif
+
+/*
+ * NT_GNU_PROPERTY_TYPE_0 header:
+ * Keep this internal until/unless there is an agreed UAPI definition.
+ * pr_type values (GNU_PROPERTY_*) are public and defined in the UAPI header.
+ */
+struct gnu_property {
+ u32 pr_type;
+ u32 pr_datasz;
+};
+
+struct arch_elf_state;
+
+#ifndef CONFIG_ARCH_USE_GNU_PROPERTY
+static inline int arch_parse_elf_property(u32 type, const void *data,
+ size_t datasz, bool compat,
+ struct arch_elf_state *arch)
+{
+ return 0;
+}
+#else
+extern int arch_parse_elf_property(u32 type, const void *data, size_t datasz,
+ bool compat, struct arch_elf_state *arch);
+#endif
+
+#ifdef CONFIG_ARCH_HAVE_ELF_PROT
+int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
+ bool has_interp, bool is_interp);
+#else
+static inline int arch_elf_adjust_prot(int prot,
+ const struct arch_elf_state *state,
+ bool has_interp, bool is_interp)
+{
+ return prot;
+}
+#endif
+
#endif /* _LINUX_ELF_H */
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index 7a37f4ce9fd2..54feb64e9b5d 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -17,7 +17,7 @@ struct compat_elf_siginfo
compat_int_t si_errno;
};
-struct compat_elf_prstatus
+struct compat_elf_prstatus_common
{
struct compat_elf_siginfo pr_info;
short pr_cursig;
@@ -31,12 +31,6 @@ struct compat_elf_prstatus
struct old_timeval32 pr_stime;
struct old_timeval32 pr_cutime;
struct old_timeval32 pr_cstime;
- compat_elf_gregset_t pr_reg;
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- compat_ulong_t pr_exec_fdpic_loadmap;
- compat_ulong_t pr_interp_fdpic_loadmap;
-#endif
- compat_int_t pr_fpvalid;
};
struct compat_elf_prpsinfo
@@ -49,8 +43,24 @@ struct compat_elf_prpsinfo
__compat_uid_t pr_uid;
__compat_gid_t pr_gid;
compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
};
+#ifdef CONFIG_ARCH_HAS_ELFCORE_COMPAT
+#include <asm/elfcore-compat.h>
+#endif
+
+struct compat_elf_prstatus
+{
+ struct compat_elf_prstatus_common common;
+ compat_elf_gregset_t pr_reg;
+ compat_int_t pr_fpvalid;
+};
+
#endif /* _LINUX_ELFCORE_COMPAT_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 4cad0e784b28..346a8b56cdc8 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -5,12 +5,75 @@
#include <linux/user.h>
#include <linux/bug.h>
#include <linux/sched/task_stack.h>
-
-#include <asm/elf.h>
-#include <uapi/linux/elfcore.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/fs.h>
+#include <linux/elf.h>
struct coredump_params;
+struct elf_siginfo
+{
+ int si_signo; /* signal number */
+ int si_code; /* extra code */
+ int si_errno; /* errno */
+};
+
+/*
+ * Definitions to generate Intel SVR4-like core files.
+ * These mostly have the same names as the SVR4 types with "elf_"
+ * tacked on the front to prevent clashes with linux definitions,
+ * and the typedef forms have been avoided. This is mostly like
+ * the SVR4 structure, but more Linuxy, with things that Linux does
+ * not support and which gdb doesn't really use excluded.
+ */
+struct elf_prstatus_common
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned long pr_sigpend; /* Set of pending signals */
+ unsigned long pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct __kernel_old_timeval pr_utime; /* User time */
+ struct __kernel_old_timeval pr_stime; /* System time */
+ struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
+ struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
+};
+
+struct elf_prstatus
+{
+ struct elf_prstatus_common common;
+ elf_gregset_t pr_reg; /* GP registers */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define ELF_PRARGSZ (80) /* Number of chars for args */
+
+struct elf_prpsinfo
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned long pr_flag; /* flags */
+ __kernel_uid_t pr_uid;
+ __kernel_gid_t pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
{
#ifdef ELF_CORE_COPY_REGS
@@ -21,15 +84,6 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
#endif
}
-static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
-{
-#ifdef ELF_CORE_COPY_KERNEL_REGS
- ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
-#else
- elf_core_copy_regs(elfregs, regs);
-#endif
-}
-
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
#if defined (ELF_CORE_COPY_TASK_REGS)
@@ -51,13 +105,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
#endif
}
-#ifdef ELF_CORE_COPY_XFPREGS
-static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
-{
- return ELF_CORE_COPY_XFPREGS(t, xfpu);
-}
-#endif
-
+#ifdef CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
@@ -72,5 +120,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
extern int
elf_core_write_extra_data(struct coredump_params *cprm);
extern size_t elf_core_extra_data_size(void);
+#else
+static inline Elf_Half elf_core_extra_phdrs(void)
+{
+ return 0;
+}
+
+static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+{
+ return 1;
+}
+
+static inline int elf_core_write_extra_data(struct coredump_params *cprm)
+{
+ return 1;
+}
+
+static inline size_t elf_core_extra_data_size(void)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS */
#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/elfnote-lto.h b/include/linux/elfnote-lto.h
new file mode 100644
index 000000000000..d4635a3ecc4f
--- /dev/null
+++ b/include/linux/elfnote-lto.h
@@ -0,0 +1,14 @@
+#ifndef __ELFNOTE_LTO_H
+#define __ELFNOTE_LTO_H
+
+#include <linux/elfnote.h>
+
+#define LINUX_ELFNOTE_LTO_INFO 0x101
+
+#ifdef CONFIG_LTO
+#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 1)
+#else
+#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 0)
+#endif
+
+#endif /* __ELFNOTE_LTO_H */
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index f236f5b931b2..69b136e4dd2b 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -54,12 +54,12 @@
.popsection ;
#define ELFNOTE(name, type, desc) \
- ELFNOTE_START(name, type, "") \
+ ELFNOTE_START(name, type, "a") \
desc ; \
ELFNOTE_END
#else /* !__ASSEMBLER__ */
-#include <linux/elf.h>
+#include <uapi/linux/elf.h>
/*
* Use an anonymous structure which matches the shape of
* Elf{32,64}_Nhdr, but includes the name and desc data. The size and
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 564e96f625ff..1c630e2c2756 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -101,7 +101,7 @@ struct enclosure_device {
struct device edev;
struct enclosure_component_callbacks *cb;
int components;
- struct enclosure_component component[0];
+ struct enclosure_component component[];
};
static inline struct enclosure_device *
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index d249b88a4d5a..b9caa01dfac4 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_ENERGY_MODEL_H
#define _LINUX_ENERGY_MODEL_H
#include <linux/cpumask.h>
+#include <linux/device.h>
#include <linux/jump_label.h>
#include <linux/kobject.h>
#include <linux/rcupdate.h>
@@ -10,116 +11,265 @@
#include <linux/types.h>
/**
- * em_cap_state - Capacity state of a performance domain
- * @frequency: The CPU frequency in KHz, for consistency with CPUFreq
- * @power: The power consumed by 1 CPU at this level, in milli-watts
+ * struct em_perf_state - Performance state of a performance domain
+ * @frequency: The frequency in KHz, for consistency with CPUFreq
+ * @power: The power consumed at this level (by 1 CPU or by a registered
+ * device). It can be a total power: static and dynamic.
* @cost: The cost coefficient associated with this level, used during
* energy calculation. Equal to: power * max_frequency / frequency
+ * @flags: see "em_perf_state flags" description below.
*/
-struct em_cap_state {
+struct em_perf_state {
unsigned long frequency;
unsigned long power;
unsigned long cost;
+ unsigned long flags;
};
+/*
+ * em_perf_state flags:
+ *
+ * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
+ * in this em_perf_domain, another performance state with a higher frequency
+ * but a lower or equal power cost. Such inefficient states are ignored when
+ * using em_pd_get_efficient_*() functions.
+ */
+#define EM_PERF_STATE_INEFFICIENT BIT(0)
+
/**
- * em_perf_domain - Performance domain
- * @table: List of capacity states, in ascending order
- * @nr_cap_states: Number of capacity states
- * @cpus: Cpumask covering the CPUs of the domain
+ * struct em_perf_domain - Performance domain
+ * @table: List of performance states, in ascending order
+ * @nr_perf_states: Number of performance states
+ * @flags: See "em_perf_domain flags"
+ * @cpus: Cpumask covering the CPUs of the domain. It's here
+ * for performance reasons to avoid potential cache
+ * misses during energy calculations in the scheduler
+ * and simplifies allocating/freeing that memory region.
*
- * A "performance domain" represents a group of CPUs whose performance is
- * scaled together. All CPUs of a performance domain must have the same
- * micro-architecture. Performance domains often have a 1-to-1 mapping with
- * CPUFreq policies.
+ * In case of CPU device, a "performance domain" represents a group of CPUs
+ * whose performance is scaled together. All CPUs of a performance domain
+ * must have the same micro-architecture. Performance domains often have
+ * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
+ * field is unused.
*/
struct em_perf_domain {
- struct em_cap_state *table;
- int nr_cap_states;
- unsigned long cpus[0];
+ struct em_perf_state *table;
+ int nr_perf_states;
+ unsigned long flags;
+ unsigned long cpus[];
};
+/*
+ * em_perf_domain flags:
+ *
+ * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
+ * other scale.
+ *
+ * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
+ * energy consumption.
+ *
+ * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
+ * created by platform missing real power information
+ */
+#define EM_PERF_DOMAIN_MICROWATTS BIT(0)
+#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
+#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
+
+#define em_span_cpus(em) (to_cpumask((em)->cpus))
+#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
+
#ifdef CONFIG_ENERGY_MODEL
-#define EM_CPU_MAX_POWER 0xFFFF
+/*
+ * The max power value in micro-Watts. The limit of 64 Watts is set as
+ * a safety net to not overflow multiplications on 32bit platforms. The
+ * 32bit value limit for total Perf Domain power implies a limit of
+ * maximum CPUs in such domain to 64.
+ */
+#define EM_MAX_POWER (64000000) /* 64 Watts */
+
+/*
+ * To avoid possible energy estimation overflow on 32bit machines add
+ * limits to number of CPUs in the Perf. Domain.
+ * We are safe on 64bit machine, thus some big number.
+ */
+#ifdef CONFIG_64BIT
+#define EM_MAX_NUM_CPUS 4096
+#else
+#define EM_MAX_NUM_CPUS 16
+#endif
+
+/*
+ * To avoid an overflow on 32bit machines while calculating the energy
+ * use a different order in the operation. First divide by the 'cpu_scale'
+ * which would reduce big value stored in the 'cost' field, then multiply by
+ * the 'sum_util'. This would allow to handle existing platforms, which have
+ * e.g. power ~1.3 Watt at max freq, so the 'cost' value > 1mln micro-Watts.
+ * In such scenario, where there are 4 CPUs in the Perf. Domain the 'sum_util'
+ * could be 4096, then multiplication: 'cost' * 'sum_util' would overflow.
+ * This reordering of operations has some limitations, we lose small
+ * precision in the estimation (comparing to 64bit platform w/o reordering).
+ *
+ * We are safe on 64bit machine.
+ */
+#ifdef CONFIG_64BIT
+#define em_estimate_energy(cost, sum_util, scale_cpu) \
+ (((cost) * (sum_util)) / (scale_cpu))
+#else
+#define em_estimate_energy(cost, sum_util, scale_cpu) \
+ (((cost) / (scale_cpu)) * (sum_util))
+#endif
struct em_data_callback {
/**
- * active_power() - Provide power at the next capacity state of a CPU
- * @power : Active power at the capacity state in mW (modified)
- * @freq : Frequency at the capacity state in kHz (modified)
- * @cpu : CPU for which we do this operation
+ * active_power() - Provide power at the next performance state of
+ * a device
+ * @dev : Device for which we do this operation (can be a CPU)
+ * @power : Active power at the performance state
+ * (modified)
+ * @freq : Frequency at the performance state in kHz
+ * (modified)
*
- * active_power() must find the lowest capacity state of 'cpu' above
+ * active_power() must find the lowest performance state of 'dev' above
* 'freq' and update 'power' and 'freq' to the matching active power
* and frequency.
*
- * The power is the one of a single CPU in the domain, expressed in
- * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER]
- * range.
+ * In case of CPUs, the power is the one of a single CPU in the domain,
+ * expressed in micro-Watts or an abstract scale. It is expected to
+ * fit in the [0, EM_MAX_POWER] range.
*
* Return 0 on success.
*/
- int (*active_power)(unsigned long *power, unsigned long *freq, int cpu);
+ int (*active_power)(struct device *dev, unsigned long *power,
+ unsigned long *freq);
+
+ /**
+ * get_cost() - Provide the cost at the given performance state of
+ * a device
+ * @dev : Device for which we do this operation (can be a CPU)
+ * @freq : Frequency at the performance state in kHz
+ * @cost : The cost value for the performance state
+ * (modified)
+ *
+ * In case of CPUs, the cost is the one of a single CPU in the domain.
+ * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
+ * usage in EAS calculation.
+ *
+ * Return 0 on success, or appropriate error value in case of failure.
+ */
+ int (*get_cost)(struct device *dev, unsigned long freq,
+ unsigned long *cost);
};
-#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \
+ { .active_power = _active_power_cb, \
+ .get_cost = _cost_cb }
+#define EM_DATA_CB(_active_power_cb) \
+ EM_ADV_DATA_CB(_active_power_cb, NULL)
struct em_perf_domain *em_cpu_get(int cpu);
-int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
- struct em_data_callback *cb);
+struct em_perf_domain *em_pd_get(struct device *dev);
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ struct em_data_callback *cb, cpumask_t *span,
+ bool microwatts);
+void em_dev_unregister_perf_domain(struct device *dev);
+
+/**
+ * em_pd_get_efficient_state() - Get an efficient performance state from the EM
+ * @pd : Performance domain for which we want an efficient frequency
+ * @freq : Frequency to map with the EM
+ *
+ * It is called from the scheduler code quite frequently and as a consequence
+ * doesn't implement any check.
+ *
+ * Return: An efficient performance state, high enough to meet @freq
+ * requirement.
+ */
+static inline
+struct em_perf_state *em_pd_get_efficient_state(struct em_perf_domain *pd,
+ unsigned long freq)
+{
+ struct em_perf_state *ps;
+ int i;
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+ ps = &pd->table[i];
+ if (ps->frequency >= freq) {
+ if (pd->flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
+ ps->flags & EM_PERF_STATE_INEFFICIENT)
+ continue;
+ break;
+ }
+ }
+
+ return ps;
+}
/**
- * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain
+ * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
+ * performance domain
* @pd : performance domain for which energy has to be estimated
* @max_util : highest utilization among CPUs of the domain
* @sum_util : sum of the utilization of all CPUs in the domain
+ * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
+ * might reflect reduced frequency (due to thermal)
+ *
+ * This function must be used only for CPU devices. There is no validation,
+ * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
+ * the scheduler code quite frequently and that is why there is not checks.
*
* Return: the sum of the energy consumed by the CPUs of the domain assuming
* a capacity state satisfying the max utilization of the domain.
*/
-static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
- unsigned long max_util, unsigned long sum_util)
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
{
unsigned long freq, scale_cpu;
- struct em_cap_state *cs;
- int i, cpu;
+ struct em_perf_state *ps;
+ int cpu;
+
+ if (!sum_util)
+ return 0;
/*
- * In order to predict the capacity state, map the utilization of the
- * most utilized CPU of the performance domain to a requested frequency,
- * like schedutil.
+ * In order to predict the performance state, map the utilization of
+ * the most utilized CPU of the performance domain to a requested
+ * frequency, like schedutil. Take also into account that the real
+ * frequency might be set lower (due to thermal capping). Thus, clamp
+ * max utilization to the allowed CPU capacity before calculating
+ * effective frequency.
*/
cpu = cpumask_first(to_cpumask(pd->cpus));
scale_cpu = arch_scale_cpu_capacity(cpu);
- cs = &pd->table[pd->nr_cap_states - 1];
- freq = map_util_freq(max_util, cs->frequency, scale_cpu);
+ ps = &pd->table[pd->nr_perf_states - 1];
+
+ max_util = map_util_perf(max_util);
+ max_util = min(max_util, allowed_cpu_cap);
+ freq = map_util_freq(max_util, ps->frequency, scale_cpu);
/*
- * Find the lowest capacity state of the Energy Model above the
+ * Find the lowest performance state of the Energy Model above the
* requested frequency.
*/
- for (i = 0; i < pd->nr_cap_states; i++) {
- cs = &pd->table[i];
- if (cs->frequency >= freq)
- break;
- }
+ ps = em_pd_get_efficient_state(pd, freq);
/*
- * The capacity of a CPU in the domain at that capacity state (cs)
+ * The capacity of a CPU in the domain at the performance state (ps)
* can be computed as:
*
- * cs->freq * scale_cpu
- * cs->cap = -------------------- (1)
+ * ps->freq * scale_cpu
+ * ps->cap = -------------------- (1)
* cpu_max_freq
*
* So, ignoring the costs of idle states (which are not available in
- * the EM), the energy consumed by this CPU at that capacity state is
- * estimated as:
+ * the EM), the energy consumed by this CPU at that performance state
+ * is estimated as:
*
- * cs->power * cpu_util
+ * ps->power * cpu_util
* cpu_nrg = -------------------- (2)
- * cs->cap
+ * ps->cap
*
- * since 'cpu_util / cs->cap' represents its percentage of busy time.
+ * since 'cpu_util / ps->cap' represents its percentage of busy time.
*
* NOTE: Although the result of this computation actually is in
* units of power, it can be manipulated as an energy value
@@ -129,55 +279,68 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
* By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
* of two terms:
*
- * cs->power * cpu_max_freq cpu_util
+ * ps->power * cpu_max_freq cpu_util
* cpu_nrg = ------------------------ * --------- (3)
- * cs->freq scale_cpu
+ * ps->freq scale_cpu
*
- * The first term is static, and is stored in the em_cap_state struct
- * as 'cs->cost'.
+ * The first term is static, and is stored in the em_perf_state struct
+ * as 'ps->cost'.
*
* Since all CPUs of the domain have the same micro-architecture, they
- * share the same 'cs->cost', and the same CPU capacity. Hence, the
+ * share the same 'ps->cost', and the same CPU capacity. Hence, the
* total energy of the domain (which is the simple sum of the energy of
* all of its CPUs) can be factorized as:
*
- * cs->cost * \Sum cpu_util
+ * ps->cost * \Sum cpu_util
* pd_nrg = ------------------------ (4)
* scale_cpu
*/
- return cs->cost * sum_util / scale_cpu;
+ return em_estimate_energy(ps->cost, sum_util, scale_cpu);
}
/**
- * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain
+ * em_pd_nr_perf_states() - Get the number of performance states of a perf.
+ * domain
* @pd : performance domain for which this must be done
*
- * Return: the number of capacity states in the performance domain table
+ * Return: the number of performance states in the performance domain table
*/
-static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
- return pd->nr_cap_states;
+ return pd->nr_perf_states;
}
#else
struct em_data_callback {};
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
#define EM_DATA_CB(_active_power_cb) { }
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
-static inline int em_register_perf_domain(cpumask_t *span,
- unsigned int nr_states, struct em_data_callback *cb)
+static inline
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ struct em_data_callback *cb, cpumask_t *span,
+ bool microwatts)
{
return -EINVAL;
}
+static inline void em_dev_unregister_perf_domain(struct device *dev)
+{
+}
static inline struct em_perf_domain *em_cpu_get(int cpu)
{
return NULL;
}
-static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
- unsigned long max_util, unsigned long sum_util)
+static inline struct em_perf_domain *em_pd_get(struct device *dev)
+{
+ return NULL;
+}
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
{
return 0;
}
-static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
return 0;
}
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
new file mode 100644
index 000000000000..d95ab85f96ba
--- /dev/null
+++ b/include/linux/entry-common.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_ENTRYCOMMON_H
+#define __LINUX_ENTRYCOMMON_H
+
+#include <linux/static_call_types.h>
+#include <linux/ptrace.h>
+#include <linux/syscalls.h>
+#include <linux/seccomp.h>
+#include <linux/sched.h>
+
+#include <asm/entry-common.h>
+
+/*
+ * Define dummy _TIF work flags if not defined by the architecture or for
+ * disabled functionality.
+ */
+#ifndef _TIF_PATCH_PENDING
+# define _TIF_PATCH_PENDING (0)
+#endif
+
+#ifndef _TIF_UPROBE
+# define _TIF_UPROBE (0)
+#endif
+
+/*
+ * SYSCALL_WORK flags handled in syscall_enter_from_user_mode()
+ */
+#ifndef ARCH_SYSCALL_WORK_ENTER
+# define ARCH_SYSCALL_WORK_ENTER (0)
+#endif
+
+/*
+ * SYSCALL_WORK flags handled in syscall_exit_to_user_mode()
+ */
+#ifndef ARCH_SYSCALL_WORK_EXIT
+# define ARCH_SYSCALL_WORK_EXIT (0)
+#endif
+
+#define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \
+ SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_EMU | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ ARCH_SYSCALL_WORK_ENTER)
+#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
+ ARCH_SYSCALL_WORK_EXIT)
+
+/*
+ * TIF flags handled in exit_to_user_mode_loop()
+ */
+#ifndef ARCH_EXIT_TO_USER_MODE_WORK
+# define ARCH_EXIT_TO_USER_MODE_WORK (0)
+#endif
+
+#define EXIT_TO_USER_MODE_WORK \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
+ ARCH_EXIT_TO_USER_MODE_WORK)
+
+/**
+ * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
+ * @regs: Pointer to currents pt_regs
+ *
+ * Defaults to an empty implementation. Can be replaced by architecture
+ * specific code.
+ *
+ * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
+ * section. Use __always_inline so the compiler cannot push it out of line
+ * and make it instrumentable.
+ */
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
+
+#ifndef arch_enter_from_user_mode
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
+#endif
+
+/**
+ * enter_from_user_mode - Establish state when coming from user mode
+ *
+ * Syscall/interrupt entry disables interrupts, but user mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct and interrupts are still
+ * disabled. The subsequent functions can be instrumented.
+ *
+ * This is invoked when there is architecture specific functionality to be
+ * done between establishing state and enabling interrupts. The caller must
+ * enable interrupts before invoking syscall_enter_from_user_mode_work().
+ */
+void enter_from_user_mode(struct pt_regs *regs);
+
+/**
+ * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This handles lockdep, RCU (context tracking) and tracing state, i.e.
+ * the functionality provided by enter_from_user_mode().
+ *
+ * This is invoked when there is extra architecture specific functionality
+ * to be done between establishing state and handling user mode entry work.
+ */
+void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+
+/**
+ * syscall_enter_from_user_mode_work - Check and handle work before invoking
+ * a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
+ * architecture specific work.
+ *
+ * Returns: The original or a modified syscall number
+ *
+ * If the returned syscall number is -1 then the syscall should be
+ * skipped. In this case the caller may invoke syscall_set_error() or
+ * syscall_set_return_value() first. If neither of those are called and -1
+ * is returned, then the syscall will fail with ENOSYS.
+ *
+ * It handles the following work items:
+ *
+ * 1) syscall_work flag dependent invocations of
+ * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
+ * 2) Invocation of audit_syscall_entry()
+ */
+long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+
+/**
+ * syscall_enter_from_user_mode - Establish state and check and handle work
+ * before invoking a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This is combination of syscall_enter_from_user_mode_prepare() and
+ * syscall_enter_from_user_mode_work().
+ *
+ * Returns: The original or a modified syscall number. See
+ * syscall_enter_from_user_mode_work() for further explanation.
+ */
+long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
+
+/**
+ * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Defaults to local_irq_enable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
+
+#ifndef local_irq_enable_exit_to_user
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
+{
+ local_irq_enable();
+}
+#endif
+
+/**
+ * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
+ *
+ * Defaults to local_irq_disable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_disable_exit_to_user(void);
+
+#ifndef local_irq_disable_exit_to_user
+static inline void local_irq_disable_exit_to_user(void)
+{
+ local_irq_disable();
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
+ * to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_loop() with interrupt enabled
+ *
+ * Defaults to NOOP. Can be supplied by architecture specific code.
+ */
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_work
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_prepare - Architecture specific preparation for
+ * exit to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ */
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_prepare
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode - Architecture specific final work before
+ * exit to user mode.
+ *
+ * Invoked from exit_to_user_mode() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ *
+ * This needs to be __always_inline because it is non-instrumentable code
+ * invoked after context tracking switched to user mode.
+ *
+ * An architecture implementation must not do anything complex, no locking
+ * etc. The main purpose is for speculation mitigations.
+ */
+static __always_inline void arch_exit_to_user_mode(void);
+
+#ifndef arch_exit_to_user_mode
+static __always_inline void arch_exit_to_user_mode(void) { }
+#endif
+
+/**
+ * arch_do_signal_or_restart - Architecture specific signal delivery function
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from exit_to_user_mode_loop().
+ */
+void arch_do_signal_or_restart(struct pt_regs *regs);
+
+/**
+ * exit_to_user_mode - Fixup state when exiting to user mode
+ *
+ * Syscall/interrupt exit enables interrupts, but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Invoke architecture specific last minute exit code, e.g. speculation
+ * mitigations, etc.: arch_exit_to_user_mode()
+ * 4) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code when syscall_exit_to_user_mode()
+ * is not suitable as the last step before returning to userspace. Must be
+ * invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke syscall_exit_to_user_mode_work() before this.
+ */
+void exit_to_user_mode(void);
+
+/**
+ * syscall_exit_to_user_mode_work - Handle work before returning to user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling
+ * exit_to_user_mode() to perform the final transition to user mode.
+ *
+ * Calling convention is the same as for syscall_exit_to_user_mode() and it
+ * returns with all work handled and interrupts disabled. The caller must
+ * invoke exit_to_user_mode() before actually switching to user mode to
+ * make the final state transitions. Interrupts must stay disabled between
+ * return from this function and the invocation of exit_to_user_mode().
+ */
+void syscall_exit_to_user_mode_work(struct pt_regs *regs);
+
+/**
+ * syscall_exit_to_user_mode - Handle work before returning to user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked with interrupts enabled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific syscall and ret
+ * from fork code.
+ *
+ * The call order is:
+ * 1) One-time syscall exit work:
+ * - rseq syscall exit
+ * - audit
+ * - syscall tracing
+ * - ptrace (single stepping)
+ *
+ * 2) Preparatory work
+ * - Exit to user mode loop (common TIF handling). Invokes
+ * arch_exit_to_user_mode_work() for architecture specific TIF work
+ * - Architecture specific one time work arch_exit_to_user_mode_prepare()
+ * - Address limit and lockdep checks
+ *
+ * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the
+ * functionality in exit_to_user_mode().
+ *
+ * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
+ * exit_to_user_mode(). This function is preferred unless there is a
+ * compelling architectural reason to use the separate functions.
+ */
+void syscall_exit_to_user_mode(struct pt_regs *regs);
+
+/**
+ * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from architecture specific entry code with interrupts disabled.
+ * Can only be called when the interrupt entry came from user mode. The
+ * calling code must be non-instrumentable. When the function returns all
+ * state is correct and the subsequent functions can be instrumented.
+ *
+ * The function establishes state (lockdep, RCU (context tracking), tracing)
+ */
+void irqentry_enter_from_user_mode(struct pt_regs *regs);
+
+/**
+ * irqentry_exit_to_user_mode - Interrupt exit work
+ * @regs: Pointer to current's pt_regs
+ *
+ * Invoked with interrupts disabled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific interrupt
+ * handling code.
+ *
+ * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
+ * Interrupt exit is not invoking #1 which is the syscall specific one time
+ * work.
+ */
+void irqentry_exit_to_user_mode(struct pt_regs *regs);
+
+#ifndef irqentry_state
+/**
+ * struct irqentry_state - Opaque object for exception state storage
+ * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
+ * exit path has to invoke ct_irq_exit().
+ * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
+ * lockdep state is restored correctly on exit from nmi.
+ *
+ * This opaque object is filled in by the irqentry_*_enter() functions and
+ * must be passed back into the corresponding irqentry_*_exit() functions
+ * when the exception is complete.
+ *
+ * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
+ * and all members private. Descriptions of the members are provided to aid in
+ * the maintenance of the irqentry_*() functions.
+ */
+typedef struct irqentry_state {
+ union {
+ bool exit_rcu;
+ bool lockdep;
+ };
+} irqentry_state_t;
+#endif
+
+/**
+ * irqentry_enter - Handle state tracking on ordinary interrupt entries
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes:
+ * - lockdep irqflag state tracking as low level ASM entry disabled
+ * interrupts.
+ *
+ * - Context tracking if the exception hit user mode.
+ *
+ * - The hardirq tracer to keep the state consistent as low level ASM
+ * entry disabled interrupts.
+ *
+ * As a precondition, this requires that the entry came from user mode,
+ * idle, or a kernel context in which RCU is watching.
+ *
+ * For kernel mode entries RCU handling is done conditional. If RCU is
+ * watching then the only RCU requirement is to check whether the tick has
+ * to be restarted. If RCU is not watching then ct_irq_enter() has to be
+ * invoked on entry and ct_irq_exit() on exit.
+ *
+ * Avoiding the ct_irq_enter/exit() calls is an optimization but also
+ * solves the problem of kernel mode pagefaults which can schedule, which
+ * is not possible after invoking ct_irq_enter() without undoing it.
+ *
+ * For user mode entries irqentry_enter_from_user_mode() is invoked to
+ * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
+ * would not be possible.
+ *
+ * Returns: An opaque object that must be passed to idtentry_exit()
+ */
+irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
+ *
+ * Conditional reschedule with additional sanity checks.
+ */
+void raw_irqentry_exit_cond_resched(void);
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
+#define irqentry_exit_cond_resched_dynamic_disabled NULL
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
+#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
+#endif
+#else /* CONFIG_PREEMPT_DYNAMIC */
+#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+/**
+ * irqentry_exit - Handle return from exception that used irqentry_enter()
+ * @regs: Pointer to pt_regs (exception entry regs)
+ * @state: Return value from matching call to irqentry_enter()
+ *
+ * Depending on the return target (kernel/user) this runs the necessary
+ * preemption and work checks if possible and required and returns to
+ * the caller with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to irqentry_enter().
+ */
+void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
+
+/**
+ * irqentry_nmi_enter - Handle NMI entry
+ * @regs: Pointer to currents pt_regs
+ *
+ * Similar to irqentry_enter() but taking care of the NMI constraints.
+ */
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_nmi_exit - Handle return from NMI handling
+ * @regs: Pointer to pt_regs (NMI entry regs)
+ * @irq_state: Return value from matching call to irqentry_nmi_enter()
+ *
+ * Last action before returning to the low level assembly code.
+ *
+ * Counterpart to irqentry_nmi_enter().
+ */
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+
+#endif
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
new file mode 100644
index 000000000000..6813171afccb
--- /dev/null
+++ b/include/linux/entry-kvm.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_ENTRYKVM_H
+#define __LINUX_ENTRYKVM_H
+
+#include <linux/static_call_types.h>
+#include <linux/resume_user_mode.h>
+#include <linux/syscalls.h>
+#include <linux/seccomp.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+
+/* Transfer to guest mode work */
+#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+
+#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
+# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
+#endif
+
+#define XFER_TO_GUEST_MODE_WORK \
+ (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
+ _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
+
+struct kvm_vcpu;
+
+/**
+ * arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
+ * mode work handling function.
+ * @vcpu: Pointer to current's VCPU data
+ * @ti_work: Cached TIF flags gathered in xfer_to_guest_mode_handle_work()
+ *
+ * Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
+ * replaced by architecture specific code.
+ */
+static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
+ unsigned long ti_work);
+
+#ifndef arch_xfer_to_guest_mode_work
+static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
+ unsigned long ti_work)
+{
+ return 0;
+}
+#endif
+
+/**
+ * xfer_to_guest_mode_handle_work - Check and handle pending work which needs
+ * to be handled before going to guest mode
+ * @vcpu: Pointer to current's VCPU data
+ *
+ * Returns: 0 or an error code
+ */
+int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+
+/**
+ * xfer_to_guest_mode_prepare - Perform last minute preparation work that
+ * need to be handled while IRQs are disabled
+ * upon entering to guest.
+ *
+ * Has to be invoked with interrupts disabled before the last call
+ * to xfer_to_guest_mode_work_pending().
+ */
+static inline void xfer_to_guest_mode_prepare(void)
+{
+ lockdep_assert_irqs_disabled();
+ tick_nohz_user_enter_prepare();
+}
+
+/**
+ * __xfer_to_guest_mode_work_pending - Check if work is pending
+ *
+ * Returns: True if work pending, False otherwise.
+ *
+ * Bare variant of xfer_to_guest_mode_work_pending(). Can be called from
+ * interrupt enabled code for racy quick checks with care.
+ */
+static inline bool __xfer_to_guest_mode_work_pending(void)
+{
+ unsigned long ti_work = read_thread_flags();
+
+ return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
+}
+
+/**
+ * xfer_to_guest_mode_work_pending - Check if work is pending which needs to be
+ * handled before returning to guest mode
+ *
+ * Returns: True if work pending, False otherwise.
+ *
+ * Has to be invoked with interrupts disabled before the transition to
+ * guest mode.
+ */
+static inline bool xfer_to_guest_mode_work_pending(void)
+{
+ lockdep_assert_irqs_disabled();
+ return __xfer_to_guest_mode_work_pending();
+}
+#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+
+#endif
diff --git a/include/linux/err.h b/include/linux/err.h
index 87be24350e91..a139c64aef2a 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -62,9 +62,6 @@ static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
return 0;
}
-/* Deprecated */
-#define PTR_RET(p) PTR_ERR_OR_ZERO(p)
-
#endif
#endif /* _LINUX_ERR_H */
diff --git a/include/linux/errno.h b/include/linux/errno.h
index d73f597a2484..8b0c754bab02 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -31,5 +31,6 @@
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
#define ERECALLCONFLICT 530 /* conflict with recalled state */
+#define ENOGRACE 531 /* NFS file lock reclaim refused */
#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 8801f1f986e5..a541f0c4f146 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -11,7 +11,7 @@
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
- * Relocated to include/linux where it belongs by Alan Cox
+ * Relocated to include/linux where it belongs by Alan Cox
* <gw4pts@gw4pts.ampr.org>
*/
#ifndef _LINUX_ETHERDEVICE_H
@@ -20,15 +20,23 @@
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/random.h>
+#include <linux/crc32.h>
#include <asm/unaligned.h>
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
struct device;
+struct fwnode_handle;
+
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
+int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
unsigned char *arch_get_platform_mac_address(void);
int nvmem_get_mac_address(struct device *dev, void *addrbuf);
-u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len);
+int device_get_mac_address(struct device *dev, char *addr);
+int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
+int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
+
+u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
@@ -126,7 +134,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
#endif
}
-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
#ifdef __BIG_ENDIAN
@@ -226,8 +234,6 @@ static inline void eth_random_addr(u8 *addr)
addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
}
-#define random_ether_addr(addr) eth_random_addr(addr)
-
/**
* eth_broadcast_addr - Assign broadcast address
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -261,8 +267,22 @@ static inline void eth_zero_addr(u8 *addr)
*/
static inline void eth_hw_addr_random(struct net_device *dev)
{
+ u8 addr[ETH_ALEN];
+
+ eth_random_addr(addr);
+ __dev_addr_set(dev, addr, ETH_ALEN);
dev->addr_assign_type = NET_ADDR_RANDOM;
- eth_random_addr(dev->dev_addr);
+}
+
+/**
+ * eth_hw_addr_crc - Calculate CRC from netdev_hw_addr
+ * @ha: pointer to hardware address
+ *
+ * Calculate CRC from a hardware address as basis for filter hashes.
+ */
+static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha)
+{
+ return ether_crc(ETH_ALEN, ha->addr);
}
/**
@@ -288,6 +308,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
}
/**
+ * eth_hw_addr_set - Assign Ethernet address to a net_device
+ * @dev: pointer to net_device structure
+ * @addr: address to assign
+ *
+ * Assign given address to the net_device, addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, ETH_ALEN);
+}
+
+/**
* eth_hw_addr_inherit - Copy dev_addr from another net_device
* @dst: pointer to net_device to copy dev_addr to
* @src: pointer to net_device to copy dev_addr from
@@ -299,7 +331,7 @@ static inline void eth_hw_addr_inherit(struct net_device *dst,
struct net_device *src)
{
dst->addr_assign_type = src->addr_assign_type;
- ether_addr_copy(dst->dev_addr, src->dev_addr);
+ eth_hw_addr_set(dst, src->dev_addr);
}
/**
@@ -340,8 +372,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
*/
-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
- const u8 addr2[6+2])
+static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
@@ -397,6 +428,28 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
return true;
}
+static inline bool ether_addr_is_ipv4_mcast(const u8 *addr)
+{
+ u8 base[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, base, mask);
+}
+
+static inline bool ether_addr_is_ipv6_mcast(const u8 *addr)
+{
+ u8 base[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, base, mask);
+}
+
+static inline bool ether_addr_is_ip_mcast(const u8 *addr)
+{
+ return ether_addr_is_ipv4_mcast(addr) ||
+ ether_addr_is_ipv6_mcast(addr);
+}
+
/**
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -520,6 +573,27 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
}
/**
+ * eth_hw_addr_gen - Generate and assign Ethernet address to a port
+ * @dev: pointer to port's net_device structure
+ * @base_addr: base Ethernet address
+ * @id: offset to add to the base address
+ *
+ * Generate a MAC address using a base address and an offset and assign it
+ * to a net_device. Commonly used by switch drivers which need to compute
+ * addresses for all their ports. addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
+ unsigned int id)
+{
+ u64 u = ether_addr_to_u64(base_addr);
+ u8 addr[ETH_ALEN];
+
+ u += id;
+ u64_to_ether_addr(u, addr);
+ eth_hw_addr_set(dev, addr);
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 95991e4300bf..99dc7bfbcd3c 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -15,10 +15,9 @@
#include <linux/bitmap.h>
#include <linux/compat.h>
+#include <linux/netlink.h>
#include <uapi/linux/ethtool.h>
-#ifdef CONFIG_COMPAT
-
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
union ethtool_flow_union h_u;
@@ -35,11 +34,9 @@ struct compat_ethtool_rxnfc {
compat_u64 data;
struct compat_ethtool_rx_flow_spec fs;
u32 rule_cnt;
- u32 rule_locs[0];
+ u32 rule_locs[];
};
-#endif /* CONFIG_COMPAT */
-
#include <linux/rculist.h>
/**
@@ -70,6 +67,32 @@ enum {
ETH_RSS_HASH_FUNCS_COUNT
};
+/**
+ * struct kernel_ethtool_ringparam - RX/TX ring configuration
+ * @rx_buf_len: Current length of buffers on the rx ring.
+ * @tcp_data_split: Scatter packet headers and data to separate buffers
+ * @tx_push: The flag of tx push mode
+ * @cqe_size: Size of TX/RX completion queue event
+ */
+struct kernel_ethtool_ringparam {
+ u32 rx_buf_len;
+ u8 tcp_data_split;
+ u8 tx_push;
+ u32 cqe_size;
+};
+
+/**
+ * enum ethtool_supported_ring_param - indicator caps for setting ring params
+ * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
+ * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
+ * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
+ */
+enum ethtool_supported_ring_param {
+ ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
+ ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
+ ETHTOOL_RING_USE_TX_PUSH = BIT(2),
+};
+
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
@@ -81,11 +104,27 @@ enum {
#define ETH_RSS_HASH_NO_CHANGE 0
struct net_device;
+struct netlink_ext_ack;
/* Some generic methods drivers may use in their ethtool_ops */
u32 ethtool_op_get_link(struct net_device *dev);
int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+
+/* Link extended state and substate. */
+struct ethtool_link_ext_state_info {
+ enum ethtool_link_ext_state link_ext_state;
+ union {
+ enum ethtool_link_ext_substate_autoneg autoneg;
+ enum ethtool_link_ext_substate_link_training link_training;
+ enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch;
+ enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
+ enum ethtool_link_ext_substate_cable_issue cable_issue;
+ enum ethtool_link_ext_substate_module module;
+ u32 __link_ext_substate;
+ };
+};
+
/**
* ethtool_rxfh_indir_default - get default value for RX flow hash indirection
* @index: Index in RX flow hash indirection table
@@ -112,6 +151,7 @@ struct ethtool_link_ksettings {
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
} link_modes;
+ u32 lanes;
};
/**
@@ -160,6 +200,11 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+struct kernel_ethtool_coalesce {
+ u8 use_cqe_mode_tx;
+ u8 use_cqe_mode_rx;
+};
+
/**
* ethtool_intersect_link_masks - Given two link masks, AND them together
* @dst: first mask and where result is stored
@@ -177,8 +222,243 @@ void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
const unsigned long *src);
+#define ETHTOOL_COALESCE_RX_USECS BIT(0)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES BIT(1)
+#define ETHTOOL_COALESCE_RX_USECS_IRQ BIT(2)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ BIT(3)
+#define ETHTOOL_COALESCE_TX_USECS BIT(4)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES BIT(5)
+#define ETHTOOL_COALESCE_TX_USECS_IRQ BIT(6)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ BIT(7)
+#define ETHTOOL_COALESCE_STATS_BLOCK_USECS BIT(8)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_RX BIT(9)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_TX BIT(10)
+#define ETHTOOL_COALESCE_PKT_RATE_LOW BIT(11)
+#define ETHTOOL_COALESCE_RX_USECS_LOW BIT(12)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW BIT(13)
+#define ETHTOOL_COALESCE_TX_USECS_LOW BIT(14)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW BIT(15)
+#define ETHTOOL_COALESCE_PKT_RATE_HIGH BIT(16)
+#define ETHTOOL_COALESCE_RX_USECS_HIGH BIT(17)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH BIT(18)
+#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
+#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
+#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
+#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0)
+
+#define ETHTOOL_COALESCE_USECS \
+ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
+#define ETHTOOL_COALESCE_MAX_FRAMES \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES | ETHTOOL_COALESCE_TX_MAX_FRAMES)
+#define ETHTOOL_COALESCE_USECS_IRQ \
+ (ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_TX_USECS_IRQ)
+#define ETHTOOL_COALESCE_MAX_FRAMES_IRQ \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_ADAPTIVE_TX)
+#define ETHTOOL_COALESCE_USECS_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_TX_USECS_LOW | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH)
+#define ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH)
+#define ETHTOOL_COALESCE_PKT_RATE_RX_USECS \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \
+ ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
+ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+#define ETHTOOL_COALESCE_USE_CQE \
+ (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
+
+#define ETHTOOL_STAT_NOT_SET (~0ULL)
+
+static inline void ethtool_stats_init(u64 *stats, unsigned int n)
+{
+ while (n--)
+ stats[n] = ETHTOOL_STAT_NOT_SET;
+}
+
+/* Basic IEEE 802.3 MAC statistics (30.3.1.1.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_mac_stats {
+ u64 FramesTransmittedOK;
+ u64 SingleCollisionFrames;
+ u64 MultipleCollisionFrames;
+ u64 FramesReceivedOK;
+ u64 FrameCheckSequenceErrors;
+ u64 AlignmentErrors;
+ u64 OctetsTransmittedOK;
+ u64 FramesWithDeferredXmissions;
+ u64 LateCollisions;
+ u64 FramesAbortedDueToXSColls;
+ u64 FramesLostDueToIntMACXmitError;
+ u64 CarrierSenseErrors;
+ u64 OctetsReceivedOK;
+ u64 FramesLostDueToIntMACRcvError;
+ u64 MulticastFramesXmittedOK;
+ u64 BroadcastFramesXmittedOK;
+ u64 FramesWithExcessiveDeferral;
+ u64 MulticastFramesReceivedOK;
+ u64 BroadcastFramesReceivedOK;
+ u64 InRangeLengthErrors;
+ u64 OutOfRangeLengthField;
+ u64 FrameTooLongErrors;
+};
+
+/* Basic IEEE 802.3 PHY statistics (30.3.2.1.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_phy_stats {
+ u64 SymbolErrorDuringCarrier;
+};
+
+/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_ctrl_stats {
+ u64 MACControlFramesTransmitted;
+ u64 MACControlFramesReceived;
+ u64 UnsupportedOpcodesReceived;
+};
+
+/**
+ * struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames
+ * @tx_pause_frames: transmitted pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES.
+ *
+ * Equivalent to `30.3.4.2 aPAUSEMACCtrlFramesTransmitted`
+ * from the standard.
+ *
+ * @rx_pause_frames: received pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_RX_FRAMES. Equivalent to:
+ *
+ * Equivalent to `30.3.4.3 aPAUSEMACCtrlFramesReceived`
+ * from the standard.
+ */
+struct ethtool_pause_stats {
+ u64 tx_pause_frames;
+ u64 rx_pause_frames;
+};
+
+#define ETHTOOL_MAX_LANES 8
+
+/**
+ * struct ethtool_fec_stats - statistics for IEEE 802.3 FEC
+ * @corrected_blocks: number of received blocks corrected by FEC
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_CORRECTED.
+ *
+ * Equivalent to `30.5.1.1.17 aFECCorrectedBlocks` from the standard.
+ *
+ * @uncorrectable_blocks: number of received blocks FEC was not able to correct
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_UNCORR.
+ *
+ * Equivalent to `30.5.1.1.18 aFECUncorrectableBlocks` from the standard.
+ *
+ * @corrected_bits: number of bits corrected by FEC
+ * Similar to @corrected_blocks but counts individual bit changes,
+ * not entire FEC data blocks. This is a non-standard statistic.
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS.
+ *
+ * @lane: per-lane/PCS-instance counts as defined by the standard
+ * @total: error counts for the entire port, for drivers incapable of reporting
+ * per-lane stats
+ *
+ * Drivers should fill in either only total or per-lane statistics, core
+ * will take care of adding lane values up to produce the total.
+ */
+struct ethtool_fec_stats {
+ struct ethtool_fec_stat {
+ u64 total;
+ u64 lanes[ETHTOOL_MAX_LANES];
+ } corrected_blocks, uncorrectable_blocks, corrected_bits;
+};
+
+/**
+ * struct ethtool_rmon_hist_range - byte range for histogram statistics
+ * @low: low bound of the bucket (inclusive)
+ * @high: high bound of the bucket (inclusive)
+ */
+struct ethtool_rmon_hist_range {
+ u16 low;
+ u16 high;
+};
+
+#define ETHTOOL_RMON_HIST_MAX 10
+
+/**
+ * struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
+ * @undersize_pkts: Equivalent to `etherStatsUndersizePkts` from the RFC.
+ * @oversize_pkts: Equivalent to `etherStatsOversizePkts` from the RFC.
+ * @fragments: Equivalent to `etherStatsFragments` from the RFC.
+ * @jabbers: Equivalent to `etherStatsJabbers` from the RFC.
+ * @hist: Packet counter for packet length buckets (e.g.
+ * `etherStatsPkts128to255Octets` from the RFC).
+ * @hist_tx: Tx counters in similar form to @hist, not defined in the RFC.
+ *
+ * Selection of RMON (RFC 2819) statistics which are not exposed via different
+ * APIs, primarily the packet-length-based counters.
+ * Unfortunately different designs choose different buckets beyond
+ * the 1024B mark (jumbo frame teritory), so the definition of the bucket
+ * ranges is left to the driver.
+ */
+struct ethtool_rmon_stats {
+ u64 undersize_pkts;
+ u64 oversize_pkts;
+ u64 fragments;
+ u64 jabbers;
+
+ u64 hist[ETHTOOL_RMON_HIST_MAX];
+ u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+};
+
+#define ETH_MODULE_EEPROM_PAGE_LEN 128
+#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f
+
+/**
+ * struct ethtool_module_eeprom - EEPROM dump from specified page
+ * @offset: Offset within the specified EEPROM page to begin read, in bytes.
+ * @length: Number of bytes to read.
+ * @page: Page number to read from.
+ * @bank: Page bank number to read from, if applicable by EEPROM spec.
+ * @i2c_address: I2C address of a page. Value less than 0x7f expected. Most
+ * EEPROMs use 0x50 or 0x51.
+ * @data: Pointer to buffer with EEPROM data of @length size.
+ *
+ * This can be used to manage pages during EEPROM dump in ethtool and pass
+ * required information to the driver.
+ */
+struct ethtool_module_eeprom {
+ u32 offset;
+ u32 length;
+ u8 page;
+ u8 bank;
+ u8 i2c_address;
+ u8 *data;
+};
+
+/**
+ * struct ethtool_module_power_mode_params - module power mode parameters
+ * @policy: The power mode policy enforced by the host for the plug-in module.
+ * @mode: The operational power mode of the plug-in module. Should be filled by
+ * device drivers on get operations.
+ */
+struct ethtool_module_power_mode_params {
+ enum ethtool_module_power_mode_policy policy;
+ enum ethtool_module_power_mode mode;
+};
+
/**
* struct ethtool_ops - optional netdev operations
+ * @cap_link_lanes_supported: indicates if the driver supports lanes
+ * parameter.
+ * @supported_coalesce_params: supported types of interrupt coalescing.
+ * @supported_ring_params: supported ring params.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
* implemented, the @driver and @bus_info fields will be filled in
@@ -196,6 +476,14 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_link: Report whether physical link is up. Will only be called if
* the netdev is up. Should usually be set to ethtool_op_get_link(),
* which uses netif_carrier_ok().
+ * @get_link_ext_state: Report link extended state. Should set link_ext_state and
+ * link_ext_substate (link_ext_substate of 0 means link_ext_substate is unknown,
+ * do not attach ext_substate attribute to netlink message). If link_ext_state
+ * and link_ext_substate are unknown, return -ENODATA. If not implemented,
+ * link_ext_state and link_ext_substate will not be sent to userspace.
+ * @get_eeprom_len: Read range of EEPROM addresses for validation of
+ * @get_eeprom and @set_eeprom requests.
+ * Returns 0 if device does not support EEPROM access.
* @get_eeprom: Read data from the device EEPROM.
* Should fill in the magic field. Don't need to check len for zero
* or wraparound. Fill in the data argument with the eeprom values
@@ -207,10 +495,14 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* or zero.
* @get_coalesce: Get interrupt coalescing parameters. Returns a negative
* error code or zero.
- * @set_coalesce: Set interrupt coalescing parameters. Returns a negative
- * error code or zero.
+ * @set_coalesce: Set interrupt coalescing parameters. Supported coalescing
+ * types should be set in @supported_coalesce_params.
+ * Returns a negative error code or zero.
* @get_ringparam: Report ring sizes
* @set_ringparam: Set ring sizes. Returns a negative error code or zero.
+ * @get_pause_stats: Report pause frame statistics. Drivers must not zero
+ * statistics which they don't report. The stats structure is initialized
+ * to ETHTOOL_STAT_NOT_SET indicating driver does not report statistics.
* @get_pauseparam: Report pause parameters
* @set_pauseparam: Set pause parameters. Returns a negative error code
* or zero.
@@ -284,6 +576,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_module_eeprom: Get the eeprom information from the plug-in module
* @get_eee: Get Energy-Efficient (EEE) supported and status.
* @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_tunable: Read the value of a driver / device tunable.
+ * @set_tunable: Set the value of a driver / device tunable.
* @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
@@ -292,7 +586,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @set_per_queue_coalesce: Set interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
- * queue has this number, ignore the inapplicable fields.
+ * queue has this number, ignore the inapplicable fields. Supported
+ * coalescing types should be set in @supported_coalesce_params.
* Returns a negative error code or zero.
* @get_link_ksettings: Get various device settings including Ethernet link
* settings. The %cmd and %link_mode_masks_nwords fields should be
@@ -304,11 +599,31 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
* any change to them will be overwritten by kernel. Returns a negative
* error code or zero.
+ * @get_fec_stats: Report FEC statistics.
+ * Core will sum up per-lane stats to get the total.
+ * Drivers must not zero statistics which they don't report. The stats
+ * structure is initialized to ETHTOOL_STAT_NOT_SET indicating driver does
+ * not report statistics.
* @get_fecparam: Get the network device Forward Error Correction parameters.
* @set_fecparam: Set the network device Forward Error Correction parameters.
* @get_ethtool_phy_stats: Return extended statistics about the PHY device.
* This is only useful if the device maintains PHY statistics and
* cannot use the standard PHY library helpers.
+ * @get_phy_tunable: Read the value of a PHY tunable.
+ * @set_phy_tunable: Set the value of a PHY tunable.
+ * @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from
+ * specified page. Returns a negative error code or the amount of bytes
+ * read.
+ * @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics.
+ * @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics.
+ * @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics.
+ * @get_rmon_stats: Query some of the RMON (RFC 2819) statistics.
+ * Set %ranges to a pointer to zero-terminated array of byte ranges.
+ * @get_module_power_mode: Get the power mode policy for the plug-in module
+ * used by the network device and its operational power mode, if
+ * plugged-in.
+ * @set_module_power_mode: Set the power mode policy for the plug-in module
+ * used by the network device.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -323,6 +638,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* of the generic netdev features interface.
*/
struct ethtool_ops {
+ u32 cap_link_lanes_supported:1;
+ u32 supported_coalesce_params;
+ u32 supported_ring_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -332,17 +650,31 @@ struct ethtool_ops {
void (*set_msglevel)(struct net_device *, u32);
int (*nway_reset)(struct net_device *);
u32 (*get_link)(struct net_device *);
+ int (*get_link_ext_state)(struct net_device *,
+ struct ethtool_link_ext_state_info *);
int (*get_eeprom_len)(struct net_device *);
int (*get_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
int (*set_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
- int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*get_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
+ int (*set_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
void (*get_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
int (*set_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
+ void (*get_pause_stats)(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
void (*get_pauseparam)(struct net_device *,
struct ethtool_pauseparam*);
int (*set_pauseparam)(struct net_device *,
@@ -398,17 +730,43 @@ struct ethtool_ops {
struct ethtool_link_ksettings *);
int (*set_link_ksettings)(struct net_device *,
const struct ethtool_link_ksettings *);
+ void (*get_fec_stats)(struct net_device *dev,
+ struct ethtool_fec_stats *fec_stats);
int (*get_fecparam)(struct net_device *,
struct ethtool_fecparam *);
int (*set_fecparam)(struct net_device *,
struct ethtool_fecparam *);
void (*get_ethtool_phy_stats)(struct net_device *,
struct ethtool_stats *, u64 *);
+ int (*get_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, void *);
+ int (*set_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, const void *);
+ int (*get_module_eeprom_by_page)(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+ void (*get_eth_phy_stats)(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+ int (*get_module_power_mode)(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*set_module_power_mode)(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
};
+int ethtool_check_ops(const struct ethtool_ops *ops);
+
struct ethtool_rx_flow_rule {
struct flow_rule *rule;
- unsigned long priv[0];
+ unsigned long priv[];
};
struct ethtool_rx_flow_spec_input {
@@ -420,4 +778,69 @@ struct ethtool_rx_flow_rule *
ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input);
void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule);
+bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd);
+int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd,
+ u32 *dev_speed, u8 *dev_duplex);
+
+struct phy_device;
+struct phy_tdr_config;
+
+/**
+ * struct ethtool_phy_ops - Optional PHY device options
+ * @get_sset_count: Get number of strings that @get_strings will write.
+ * @get_strings: Return a set of strings that describe the requested objects
+ * @get_stats: Return extended statistics about the PHY device.
+ * @start_cable_test: Start a cable test
+ * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
+ *
+ * All operations are optional (i.e. the function pointer may be set to %NULL)
+ * and callers must take this into account. Callers must hold the RTNL lock.
+ */
+struct ethtool_phy_ops {
+ int (*get_sset_count)(struct phy_device *dev);
+ int (*get_strings)(struct phy_device *dev, u8 *data);
+ int (*get_stats)(struct phy_device *dev,
+ struct ethtool_stats *stats, u64 *data);
+ int (*start_cable_test)(struct phy_device *phydev,
+ struct netlink_ext_ack *extack);
+ int (*start_cable_test_tdr)(struct phy_device *phydev,
+ struct netlink_ext_ack *extack,
+ const struct phy_tdr_config *config);
+};
+
+/**
+ * ethtool_set_ethtool_phy_ops - Set the ethtool_phy_ops singleton
+ * @ops: Ethtool PHY operations to set
+ */
+void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
+
+/**
+ * ethtool_params_from_link_mode - Derive link parameters from a given link mode
+ * @link_ksettings: Link parameters to be derived from the link mode
+ * @link_mode: Link mode
+ */
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+ enum ethtool_link_mode_bit_indices link_mode);
+
+/**
+ * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller
+ * is responsible to free memory of vclock_index
+ * @dev: pointer to net_device structure
+ * @vclock_index: pointer to pointer of vclock index
+ *
+ * Return number of phc vclocks
+ */
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+
+/**
+ * ethtool_sprintf - Write formatted string to ethtool string data
+ * @data: Pointer to start of string to update
+ * @fmt: Format of string to write
+ *
+ * Write formatted string to data. Update data to point at start of
+ * next string.
+ */
+extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index d01b77887f82..aba348d58ff6 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -10,8 +10,65 @@
#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
+#define ETHTOOL_PAUSE_STAT_CNT (__ETHTOOL_A_PAUSE_STAT_CNT - \
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES)
+
enum ethtool_multicast_groups {
ETHNL_MCGRP_MONITOR,
};
+struct phy_device;
+
+#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
+int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd);
+void ethnl_cable_test_free(struct phy_device *phydev);
+void ethnl_cable_test_finished(struct phy_device *phydev);
+int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result);
+int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm);
+int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV);
+int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV);
+int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last,
+ u32 step);
+#else
+static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ethnl_cable_test_free(struct phy_device *phydev)
+{
+}
+
+static inline void ethnl_cable_test_finished(struct phy_device *phydev)
+{
+}
+static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
+ u8 result)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_fault_length(struct phy_device *phydev,
+ u8 pair, u32 cm)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_amplitude(struct phy_device *phydev,
+ u8 pair, s16 mV)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
+ u32 last, u32 step)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index dc4fd8a6644d..30eb30d6909b 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/percpu-defs.h>
#include <linux/percpu.h>
+#include <linux/sched.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -41,12 +42,11 @@ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
+void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
-DECLARE_PER_CPU(int, eventfd_wake_count);
-
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
{
- return this_cpu_read(eventfd_wake_count);
+ return !current->in_eventfd;
}
#else /* CONFIG_EVENTFD */
@@ -77,9 +77,14 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
return -ENOSYS;
}
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
{
- return false;
+ return true;
+}
+
+static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+{
+
}
#endif
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 8f000fada5a4..3337745d81bd 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -18,18 +18,10 @@ struct file;
#ifdef CONFIG_EPOLL
-#ifdef CONFIG_CHECKPOINT_RESTORE
+#ifdef CONFIG_KCMP
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
#endif
-/* Used to initialize the epoll bits inside the "struct file" */
-static inline void eventpoll_init_file(struct file *file)
-{
- INIT_LIST_HEAD(&file->f_ep_links);
- INIT_LIST_HEAD(&file->f_tfile_llink);
-}
-
-
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
@@ -50,7 +42,7 @@ static inline void eventpoll_release(struct file *file)
* because the file in on the way to be removed and nobody ( but
* eventpoll ) has still a reference to this file.
*/
- if (likely(list_empty(&file->f_ep_links)))
+ if (likely(!file->f_ep))
return;
/*
@@ -72,9 +64,26 @@ static inline int ep_op_has_event(int op)
#else
-static inline void eventpoll_init_file(struct file *file) {}
static inline void eventpoll_release(struct file *file) {}
#endif
+#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT)
+/* ARM OABI has an incompatible struct layout and needs a special handler */
+extern struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent);
+#else
+static inline struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent)
+{
+ if (__put_user(revents, &uevent->events) ||
+ __put_user(data, &uevent->data))
+ return NULL;
+
+ return uevent+1;
+}
+#endif
+
#endif /* #ifndef _LINUX_EVENTPOLL_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 8302bc29bb35..aa63e0b3c0a2 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -21,20 +21,28 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
void *xattr_value,
size_t xattr_value_len,
struct integrity_iint_cache *iint);
-extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
+extern int evm_inode_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr);
extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
-extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
+extern int evm_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
const void *value, size_t size);
extern void evm_inode_post_setxattr(struct dentry *dentry,
const char *xattr_name,
const void *xattr_value,
size_t xattr_value_len);
-extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+extern int evm_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name);
extern void evm_inode_post_removexattr(struct dentry *dentry,
const char *xattr_name);
extern int evm_inode_init_security(struct inode *inode,
const struct xattr *xattr_array,
struct xattr *evm);
+extern bool evm_revalidate_status(const char *xattr_name);
+extern int evm_protected_xattr_if_enabled(const char *req_xattr_name);
+extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt);
#ifdef CONFIG_FS_POSIX_ACL
extern int posix_xattr_acl(const char *xattrname);
#else
@@ -61,7 +69,8 @@ static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
}
#endif
-static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+static inline int evm_inode_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
return 0;
}
@@ -71,7 +80,8 @@ static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
return;
}
-static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
+static inline int evm_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
const void *value, size_t size)
{
return 0;
@@ -85,7 +95,8 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry,
return;
}
-static inline int evm_inode_removexattr(struct dentry *dentry,
+static inline int evm_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
const char *xattr_name)
{
return 0;
@@ -104,5 +115,22 @@ static inline int evm_inode_init_security(struct inode *inode,
return 0;
}
+static inline bool evm_revalidate_status(const char *xattr_name)
+{
+ return false;
+}
+
+static inline int evm_protected_xattr_if_enabled(const char *req_xattr_name)
+{
+ return false;
+}
+
+static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_EVM */
#endif /* LINUX_EVM_H */
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
new file mode 100644
index 000000000000..fe7e6ba918f1
--- /dev/null
+++ b/include/linux/export-internal.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Please do not include this explicitly.
+ * This is used by C files generated by modpost.
+ */
+
+#ifndef __LINUX_EXPORT_INTERNAL_H__
+#define __LINUX_EXPORT_INTERNAL_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#define SYMBOL_CRC(sym, crc, sec) \
+ asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
+ "__crc_" #sym ":" "\n" \
+ ".long " #crc "\n" \
+ ".previous" "\n")
+
+#endif /* __LINUX_EXPORT_INTERNAL_H__ */
diff --git a/include/linux/export.h b/include/linux/export.h
index fceb5e855717..3f31ced0d977 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
+#include <linux/stringify.h>
+
/*
* Export symbols from the kernel to modules. Forked from module.h
* to reduce the amount of pointless cruft we feed to gcc when only
@@ -11,6 +13,14 @@
* hackers place grumpy comments in header files.
*/
+/*
+ * This comment block is used by fixdep. Please do not remove.
+ *
+ * When CONFIG_MODVERSIONS is changed from n to y, all source files having
+ * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
+ * side effect of the *.o build rule.
+ */
+
#ifndef __ASSEMBLY__
#ifdef MODULE
extern struct module __this_module;
@@ -19,26 +29,6 @@ extern struct module __this_module;
#define THIS_MODULE ((struct module *)0)
#endif
-#ifdef CONFIG_MODVERSIONS
-/* Mark the CRC weak since genksyms apparently decides not to
- * generate a checksums for some symbols */
-#if defined(CONFIG_MODULE_REL_CRCS)
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak __crc_" #sym " \n" \
- " .long __crc_" #sym " - . \n" \
- " .previous \n")
-#else
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak __crc_" #sym " \n" \
- " .long __crc_" #sym " \n" \
- " .previous \n")
-#endif
-#else
-#define __CRC_SYMBOL(sym, sec)
-#endif
-
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
#include <linux/compiler.h>
/*
@@ -85,7 +75,6 @@ struct kernel_symbol {
/*
* For every exported symbol, do the following:
*
- * - If applicable, place a CRC entry in the __kcrctab section.
* - Put the name of the symbol and namespace (empty string "" for none) in
* __ksymtab_strings.
* - Place a struct kernel_symbol entry in the __ksymtab section.
@@ -98,7 +87,6 @@ struct kernel_symbol {
extern typeof(sym) sym; \
extern const char __kstrtab_##sym[]; \
extern const char __kstrtabns_##sym[]; \
- __CRC_SYMBOL(sym, sec); \
asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \
"__kstrtab_" #sym ": \n" \
" .asciz \"" #sym "\" \n" \
@@ -140,7 +128,12 @@ struct kernel_symbol {
#define ___cond_export_sym(sym, sec, ns, enabled) \
__cond_export_sym_##enabled(sym, sec, ns)
#define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
+
+#ifdef __GENKSYMS__
+#define __cond_export_sym_0(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
+#else
#define __cond_export_sym_0(sym, sec, ns) /* nothing */
+#endif
#else
@@ -149,7 +142,6 @@ struct kernel_symbol {
#endif /* CONFIG_MODULES */
#ifdef DEFAULT_SYMBOL_NAMESPACE
-#include <linux/stringify.h>
#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE))
#else
#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, "")
@@ -157,17 +149,8 @@ struct kernel_symbol {
#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl")
-#define EXPORT_SYMBOL_GPL_FUTURE(sym) _EXPORT_SYMBOL(sym, "_gpl_future")
-#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", #ns)
-#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", #ns)
-
-#ifdef CONFIG_UNUSED_SYMBOLS
-#define EXPORT_UNUSED_SYMBOL(sym) _EXPORT_SYMBOL(sym, "_unused")
-#define EXPORT_UNUSED_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_unused_gpl")
-#else
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
-#endif
+#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns))
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", __stringify(ns))
#endif /* !__ASSEMBLY__ */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index d896b8657085..fe848901fcc3 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -178,7 +178,7 @@ struct fid {
* get_name:
* @get_name should find a name for the given @child in the given @parent
* directory. The name should be stored in the @name (with the
- * understanding that it is already pointing to a a %NAME_MAX+1 sized
+ * understanding that it is already pointing to a %NAME_MAX+1 sized
* buffer. get_name() should return %0 on success, a negative error code
* or error. @get_name will be called without @parent->i_mutex held.
*
@@ -213,12 +213,26 @@ struct export_operations {
bool write, u32 *device_generation);
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
+ u64 (*fetch_iversion)(struct inode *);
+#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
+#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
+#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
+#define EXPORT_OP_REMOTE_FS (0x8) /* Filesystem is remote */
+#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
+ atomic attribute updates
+ */
+ unsigned long flags;
};
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
int *max_len, struct inode *parent);
extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
int *max_len, int connectable);
+extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt,
+ struct fid *fid, int fh_len,
+ int fileid_type,
+ int (*acceptable)(void *, struct dentry *),
+ void *context);
extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
void *context);
diff --git a/include/linux/extcon-provider.h b/include/linux/extcon-provider.h
index 1c143d200caa..fa70945f4e6b 100644
--- a/include/linux/extcon-provider.h
+++ b/include/linux/extcon-provider.h
@@ -17,30 +17,30 @@ struct extcon_dev;
#if IS_ENABLED(CONFIG_EXTCON)
/* Following APIs register/unregister the extcon device. */
-extern int extcon_dev_register(struct extcon_dev *edev);
-extern void extcon_dev_unregister(struct extcon_dev *edev);
-extern int devm_extcon_dev_register(struct device *dev,
+int extcon_dev_register(struct extcon_dev *edev);
+void extcon_dev_unregister(struct extcon_dev *edev);
+int devm_extcon_dev_register(struct device *dev,
struct extcon_dev *edev);
-extern void devm_extcon_dev_unregister(struct device *dev,
+void devm_extcon_dev_unregister(struct device *dev,
struct extcon_dev *edev);
/* Following APIs allocate/free the memory of the extcon device. */
-extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
-extern void extcon_dev_free(struct extcon_dev *edev);
-extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
+void extcon_dev_free(struct extcon_dev *edev);
+struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
const unsigned int *cable);
-extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
/* Synchronize the state and property value for each external connector. */
-extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
+int extcon_sync(struct extcon_dev *edev, unsigned int id);
/*
* Following APIs set the connected state of each external connector.
* The 'id' argument indicates the defined external connector.
*/
-extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+int extcon_set_state(struct extcon_dev *edev, unsigned int id,
bool state);
-extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
bool state);
/*
@@ -52,13 +52,13 @@ extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
* for each external connector. They are used to set the capability of the
* property of each external connector based on the id and property.
*/
-extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+int extcon_set_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value prop_val);
-extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
+int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value prop_val);
-extern int extcon_set_property_capability(struct extcon_dev *edev,
+int extcon_set_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
#else /* CONFIG_EXTCON */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 1b1d77ec2114..3c45c3846fe9 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -76,6 +76,8 @@
#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
#define EXTCON_DISP_DP 44 /* Display Port */
#define EXTCON_DISP_HMD 45 /* Head-Mounted Display */
+#define EXTCON_DISP_CVBS 46 /* Composite Video Broadcast Signal */
+#define EXTCON_DISP_EDP 47 /* Embedded Display Port */
/* Miscellaneous external connector */
#define EXTCON_DOCK 60
@@ -271,9 +273,32 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb) { }
+
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
- return ERR_PTR(-ENODEV);
+ return NULL;
}
static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
@@ -286,6 +311,11 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
{
return ERR_PTR(-ENODEV);
}
+
+static inline const char *extcon_get_edev_name(struct extcon_dev *edev)
+{
+ return NULL;
+}
#endif /* CONFIG_EXTCON */
/*
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index ac3f4888b3df..ee0d75d9a302 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -34,13 +34,11 @@
#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
+#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid)
#define F2FS_MAX_QUOTAS 3
#define F2FS_ENC_UTF8_12_1 1
-#define F2FS_ENC_STRICT_MODE_FL (1 << 0)
-#define f2fs_has_strict_mode(sbi) \
- (sbi->s_encoding_flags & F2FS_ENC_STRICT_MODE_FL)
#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */
@@ -75,6 +73,42 @@ struct f2fs_device {
__le32 total_segments;
} __packed;
+/* reason of stop_checkpoint */
+enum stop_cp_reason {
+ STOP_CP_REASON_SHUTDOWN,
+ STOP_CP_REASON_FAULT_INJECT,
+ STOP_CP_REASON_META_PAGE,
+ STOP_CP_REASON_WRITE_FAIL,
+ STOP_CP_REASON_CORRUPTED_SUMMARY,
+ STOP_CP_REASON_UPDATE_INODE,
+ STOP_CP_REASON_FLUSH_FAIL,
+ STOP_CP_REASON_MAX,
+};
+
+#define MAX_STOP_REASON 32
+
+/* detail reason for EFSCORRUPTED */
+enum f2fs_error {
+ ERROR_CORRUPTED_CLUSTER,
+ ERROR_FAIL_DECOMPRESSION,
+ ERROR_INVALID_BLKADDR,
+ ERROR_CORRUPTED_DIRENT,
+ ERROR_CORRUPTED_INODE,
+ ERROR_INCONSISTENT_SUMMARY,
+ ERROR_INCONSISTENT_FOOTER,
+ ERROR_INCONSISTENT_SUM_TYPE,
+ ERROR_CORRUPTED_JOURNAL,
+ ERROR_INCONSISTENT_NODE_COUNT,
+ ERROR_INCONSISTENT_BLOCK_COUNT,
+ ERROR_INVALID_CURSEG,
+ ERROR_INCONSISTENT_SIT,
+ ERROR_CORRUPTED_VERITY_XATTR,
+ ERROR_CORRUPTED_XATTR,
+ ERROR_MAX,
+};
+
+#define MAX_F2FS_ERRORS 16
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -118,13 +152,16 @@ struct f2fs_super_block {
__u8 hot_ext_count; /* # of hot file extension */
__le16 s_encoding; /* Filename charset encoding */
__le16 s_encoding_flags; /* Filename charset encoding flags */
- __u8 reserved[306]; /* valid reserved region */
+ __u8 s_stop_reason[MAX_STOP_REASON]; /* stop checkpoint reason */
+ __u8 s_errors[MAX_F2FS_ERRORS]; /* reason of image corrupts */
+ __u8 reserved[258]; /* valid reserved region */
__le32 crc; /* checksum of superblock */
} __packed;
/*
* For checkpoint
*/
+#define CP_RESIZEFS_FLAG 0x00004000
#define CP_DISABLED_QUICK_FLAG 0x00002000
#define CP_DISABLED_FLAG 0x00001000
#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
@@ -170,7 +207,7 @@ struct f2fs_checkpoint {
unsigned char alloc_type[MAX_ACTIVE_LOGS];
/* SIT and NAT version bitmap */
- unsigned char sit_nat_version_bitmap[1];
+ unsigned char sit_nat_version_bitmap[];
} __packed;
#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
@@ -231,6 +268,7 @@ struct f2fs_extent {
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
+#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -275,7 +313,10 @@ struct f2fs_inode {
__le64 i_compr_blocks; /* # of compressed blocks */
__u8 i_compress_algorithm; /* compress algorithm */
__u8 i_log_cluster_size; /* log of cluster size */
- __le16 i_padding; /* padding */
+ __le16 i_compress_flag; /* compress flag */
+ /* 0 bit: chksum flag
+ * [10,15] bits: compress level
+ */
__le32 i_extra_end[0]; /* for attribute size calculation */
} __packed;
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index b79fa9bb7359..8ad743def6f3 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_FANOTIFY_H
#define _LINUX_FANOTIFY_H
+#include <linux/sysctl.h>
#include <uapi/linux/fanotify.h>
#define FAN_GROUP_FLAG(group, flag) \
@@ -15,25 +16,62 @@
* these constant, the programs may break if re-compiled with new uapi headers
* and then run on an old kernel.
*/
-#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \
+
+/* Group classes where permission events are allowed */
+#define FANOTIFY_PERM_CLASSES (FAN_CLASS_CONTENT | \
FAN_CLASS_PRE_CONTENT)
-#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \
- FAN_REPORT_TID | FAN_REPORT_FID | \
- FAN_CLOEXEC | FAN_NONBLOCK | \
- FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS)
+#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FANOTIFY_PERM_CLASSES)
+
+#define FANOTIFY_FID_BITS (FAN_REPORT_DFID_NAME_TARGET)
+
+#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
+
+/*
+ * fanotify_init() flags that require CAP_SYS_ADMIN.
+ * We do not allow unprivileged groups to request permission events.
+ * We do not allow unprivileged groups to get other process pid in events.
+ * We do not allow unprivileged groups to use unlimited resources.
+ */
+#define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \
+ FAN_REPORT_TID | \
+ FAN_REPORT_PIDFD | \
+ FAN_UNLIMITED_QUEUE | \
+ FAN_UNLIMITED_MARKS)
+
+/*
+ * fanotify_init() flags that are allowed for user without CAP_SYS_ADMIN.
+ * FAN_CLASS_NOTIF is the only class we allow for unprivileged group.
+ * We do not allow unprivileged groups to get file descriptors in events,
+ * so one of the flags for reporting file handles is required.
+ */
+#define FANOTIFY_USER_INIT_FLAGS (FAN_CLASS_NOTIF | \
+ FANOTIFY_FID_BITS | \
+ FAN_CLOEXEC | FAN_NONBLOCK)
+
+#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \
+ FANOTIFY_USER_INIT_FLAGS)
+
+/* Internal group flags */
+#define FANOTIFY_UNPRIV 0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV)
#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
FAN_MARK_FILESYSTEM)
+#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \
+ FAN_MARK_FLUSH)
+
+#define FANOTIFY_MARK_IGNORE_BITS (FAN_MARK_IGNORED_MASK | \
+ FAN_MARK_IGNORE)
+
#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \
- FAN_MARK_ADD | \
- FAN_MARK_REMOVE | \
+ FANOTIFY_MARK_CMD_BITS | \
+ FANOTIFY_MARK_IGNORE_BITS | \
FAN_MARK_DONT_FOLLOW | \
FAN_MARK_ONLYDIR | \
- FAN_MARK_IGNORED_MASK | \
FAN_MARK_IGNORED_SURV_MODIFY | \
- FAN_MARK_FLUSH)
+ FAN_MARK_EVICTABLE)
/*
* Events that can be reported with data type FSNOTIFY_EVENT_PATH.
@@ -47,15 +85,23 @@
* Directory entry modification events - reported only to directory
* where entry is modified and not to a watching parent.
*/
-#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE)
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
+ FAN_RENAME)
+
+/* Events that can be reported with event->fd */
+#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS)
/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF)
+/* Events that can only be reported with data type FSNOTIFY_EVENT_ERROR */
+#define FANOTIFY_ERROR_EVENTS (FAN_FS_ERROR)
+
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \
- FANOTIFY_INODE_EVENTS)
+ FANOTIFY_INODE_EVENTS | \
+ FANOTIFY_ERROR_EVENTS)
/* Events that require a permission response from user */
#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
@@ -69,6 +115,10 @@
FANOTIFY_PERM_EVENTS | \
FAN_Q_OVERFLOW | FAN_ONDIR)
+/* Events and flags relevant only for directories */
+#define FANOTIFY_DIRONLY_EVENT_BITS (FANOTIFY_DIRENT_EVENTS | \
+ FAN_EVENT_ON_CHILD | FAN_ONDIR)
+
#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
FANOTIFY_EVENT_FLAGS)
diff --git a/include/linux/fault-inject-usercopy.h b/include/linux/fault-inject-usercopy.h
new file mode 100644
index 000000000000..56c3a693fdd9
--- /dev/null
+++ b/include/linux/fault-inject-usercopy.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FAULT_INJECT_USERCOPY_H__
+#define __LINUX_FAULT_INJECT_USERCOPY_H__
+
+/*
+ * This header provides a wrapper for injecting failures to user space memory
+ * access functions.
+ */
+
+#include <linux/types.h>
+
+#ifdef CONFIG_FAULT_INJECTION_USERCOPY
+
+bool should_fail_usercopy(void);
+
+#else
+
+static inline bool should_fail_usercopy(void) { return false; }
+
+#endif /* CONFIG_FAULT_INJECTION_USERCOPY */
+
+#endif /* __LINUX_FAULT_INJECT_USERCOPY_H__ */
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index e525f6957c49..9f6e25467844 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -20,6 +20,7 @@ struct fault_attr {
atomic_t space;
unsigned long verbose;
bool task_filter;
+ bool no_warn;
unsigned long stacktrace_depth;
unsigned long require_start;
unsigned long require_end;
@@ -39,6 +40,7 @@ struct fault_attr {
.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
.verbose = 2, \
.dname = NULL, \
+ .no_warn = false, \
}
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
@@ -64,6 +66,8 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
struct kmem_cache;
+bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
+
int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#ifdef CONFIG_FAILSLAB
extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3b4b2f0c6994..bcb8658f5b64 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_FB_H
#define _LINUX_FB_H
+#include <linux/refcount.h>
#include <linux/kgdb.h>
#include <uapi/linux/fb.h>
@@ -124,7 +125,7 @@ struct fb_cursor_user {
* Register/unregister for framebuffer events
*/
-/* The resolution of the passed in fb_info about to change */
+/* The resolution of the passed in fb_info about to change */
#define FB_EVENT_MODE_CHANGE 0x01
#ifdef CONFIG_GUMSTIX_AM200EPD
@@ -200,11 +201,19 @@ struct fb_pixmap {
};
#ifdef CONFIG_FB_DEFERRED_IO
+struct fb_deferred_io_pageref {
+ struct page *page;
+ unsigned long offset;
+ /* private */
+ struct list_head list;
+};
+
struct fb_deferred_io {
/* delay between mkwrite and deferred handler */
unsigned long delay;
- struct mutex lock; /* mutex that protects the page list */
- struct list_head pagelist; /* list of touched pages */
+ bool sort_pagereflist; /* sort pagelist by offset */
+ struct mutex lock; /* mutex that protects the pageref list */
+ struct list_head pagereflist; /* list of pagerefs for touched pages */
/* callback */
void (*first_io)(struct fb_info *info);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
@@ -400,8 +409,6 @@ struct fb_tile_ops {
#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */
#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */
-#define FBINFO_MISC_USEREVENT 0x10000 /* event request
- from userspace */
#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
/* A driver may set this flag to indicate that it does want a set_par to be
@@ -437,7 +444,7 @@ struct fb_tile_ops {
struct fb_info {
- atomic_t count;
+ refcount_t count;
int node;
int flags;
/*
@@ -450,7 +457,6 @@ struct fb_info {
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
- struct work_struct queue; /* Framebuffer event queue */
struct fb_pixmap pixmap; /* Image hardware mapper */
struct fb_pixmap sprite; /* Cursor hardware mapper */
struct fb_cmap cmap; /* Current cmap */
@@ -459,16 +465,18 @@ struct fb_info {
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
/* assigned backlight device */
- /* set before framebuffer registration,
+ /* set before framebuffer registration,
remove after unregister */
struct backlight_device *bl_dev;
/* Backlight level curve */
- struct mutex bl_curve_mutex;
+ struct mutex bl_curve_mutex;
u8 bl_curve[FB_BACKLIGHT_LEVELS];
#endif
#ifdef CONFIG_FB_DEFERRED_IO
struct delayed_work deferred_work;
+ unsigned long npagerefs;
+ struct fb_deferred_io_pageref *pagerefs;
struct fb_deferred_io *fbdefio;
#endif
@@ -483,8 +491,8 @@ struct fb_info {
char __iomem *screen_base; /* Virtual address */
char *screen_buffer;
};
- unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
- void *pseudo_palette; /* Fake palette of 16 colors */
+ unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
+ void *pseudo_palette; /* Fake palette of 16 colors */
#define FBINFO_STATE_RUNNING 0
#define FBINFO_STATE_SUSPENDED 1
u32 state; /* Hardware state i.e suspend */
@@ -506,8 +514,9 @@ struct fb_info {
};
static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
- struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct)
- + max_num * sizeof(struct aperture), GFP_KERNEL);
+ struct apertures_struct *a;
+
+ a = kzalloc(struct_size(a, ranges, max_num), GFP_KERNEL);
if (!a)
return NULL;
a->count = max_num;
@@ -546,7 +555,7 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \
defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \
- defined(__arm__) || defined(__aarch64__)
+ defined(__arm__) || defined(__aarch64__) || defined(__mips__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw
@@ -586,11 +595,11 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
* `Generic' versions of the frame buffer device operations
*/
-extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
-extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_blank(struct fb_info *info, int blank);
-extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
/*
* Drawing operations where framebuffer is in system RAM
@@ -606,10 +615,6 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
-extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const char *name);
-extern int remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
extern int fb_show_logo(struct fb_info *fb_info, int rotate);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
@@ -622,16 +627,10 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern struct fb_info *registered_fb[FB_MAX];
-extern int num_registered_fb;
extern bool fb_center_logo;
extern int fb_logo_count;
extern struct class *fb_class;
-#define for_each_registered_fb(i) \
- for (i = 0; i < FB_MAX; i++) \
- if (!registered_fb[i]) {} else
-
static inline void lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
@@ -659,7 +658,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
/* drivers/video/fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
-extern void fb_deferred_io_init(struct fb_info *info);
+extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file);
diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h
index ff5596dd30f8..2382dec6d6ab 100644
--- a/include/linux/fbcon.h
+++ b/include/linux/fbcon.h
@@ -15,6 +15,8 @@ void fbcon_new_modelist(struct fb_info *info);
void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps);
void fbcon_fb_blanked(struct fb_info *info, int blank);
+int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var);
void fbcon_update_vcs(struct fb_info *info, bool all);
void fbcon_remap_all(struct fb_info *info);
int fbcon_set_con2fb_map_ioctl(void __user *argp);
@@ -33,6 +35,8 @@ static inline void fbcon_new_modelist(struct fb_info *info) {}
static inline void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps) {}
static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {}
+static inline int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var) { return 0; }
static inline void fbcon_update_vcs(struct fb_info *info, bool all) {}
static inline void fbcon_remap_all(struct fb_info *info) {}
static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; }
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 7bcdcf4f6ab2..a332e79b3207 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -8,18 +8,14 @@
/* List of all valid flags for the open/openat flags argument: */
#define VALID_OPEN_FLAGS \
(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
- O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | __O_SYNC | O_DSYNC | \
FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
-/* List of all valid flags for the how->upgrade_mask argument: */
-#define VALID_UPGRADE_FLAGS \
- (UPGRADE_NOWRITE | UPGRADE_NOREAD)
-
/* List of all valid flags for the how->resolve argument: */
#define VALID_RESOLVE_FLAGS \
(RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
- RESOLVE_BENEATH | RESOLVE_IN_ROOT)
+ RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED)
/* List of all open_how "versions". */
#define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index f07c55ea0c22..e066816f3519 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -22,6 +22,7 @@
* as this is the granularity returned by copy_fdset().
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
+#define NR_OPEN_MAX ~0U
struct fdtable {
unsigned int max_fds;
@@ -79,7 +80,7 @@ struct dentry;
/*
* The caller must ensure that fd table isn't shared or hold rcu or file lock
*/
-static inline struct file *__fcheck_files(struct files_struct *files, unsigned int fd)
+static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
@@ -90,38 +91,43 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
return NULL;
}
-static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
+static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
- !lockdep_is_held(&files->file_lock),
+ RCU_LOCKDEP_WARN(!lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
- return __fcheck_files(files, fd);
+ return files_lookup_fd_raw(files, fd);
}
-/*
- * Check whether the specified fd has an open file.
- */
-#define fcheck(fd) fcheck_files(current->files, fd)
+static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "suspicious rcu_dereference_check() usage");
+ return files_lookup_fd_raw(files, fd);
+}
+
+static inline struct file *lookup_fd_rcu(unsigned int fd)
+{
+ return files_lookup_fd_rcu(current->files, fd);
+}
+
+struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd);
+struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd);
struct task_struct;
-struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
-void reset_files_struct(struct files_struct *);
-int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
+int unshare_files(void);
+struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
const void *);
-extern int __alloc_fd(struct files_struct *files,
- unsigned start, unsigned end, unsigned flags);
-extern void __fd_install(struct files_struct *files,
- unsigned int fd, struct file *file);
-extern int __close_fd(struct files_struct *files,
- unsigned int fd);
-extern int __close_fd_get_file(unsigned int fd, struct file **res);
+extern int close_fd(unsigned int fd);
+extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
+extern struct file *close_fd_get_file(unsigned int fd);
+extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
+ struct files_struct **new_fdp);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
new file mode 100644
index 000000000000..c50882f19235
--- /dev/null
+++ b/include/linux/fiemap.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FIEMAP_H
+#define _LINUX_FIEMAP_H 1
+
+#include <uapi/linux/fiemap.h>
+#include <linux/fs.h>
+
+struct fiemap_extent_info {
+ unsigned int fi_flags; /* Flags as passed from user */
+ unsigned int fi_extents_mapped; /* Number of mapped extents */
+ unsigned int fi_extents_max; /* Size of fiemap_extent array */
+ struct fiemap_extent __user *fi_extents_start; /* Start of
+ fiemap_extent array */
+};
+
+int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 *len, u32 supported_flags);
+int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
+ u64 phys, u64 len, u32 flags);
+
+#endif /* _LINUX_FIEMAP_H 1 */
diff --git a/include/linux/file.h b/include/linux/file.h
index 142d102f285e..39704eae83e2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -9,11 +9,11 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/posix_types.h>
+#include <linux/errno.h>
struct file;
extern void fput(struct file *);
-extern void fput_many(struct file *, unsigned int);
struct file_operations;
struct task_struct;
@@ -46,7 +46,6 @@ static inline void fdput(struct fd fd)
}
extern struct file *fget(unsigned int fd);
-extern struct file *fget_many(unsigned int fd, unsigned int refs);
extern struct file *fget_raw(unsigned int fd);
extern struct file *fget_task(struct task_struct *task, unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
@@ -91,7 +90,23 @@ extern void put_unused_fd(unsigned int fd);
extern void fd_install(unsigned int fd, struct file *file);
+extern int __receive_fd(struct file *file, int __user *ufd,
+ unsigned int o_flags);
+
+extern int receive_fd(struct file *file, unsigned int o_flags);
+
+static inline int receive_fd_user(struct file *file, int __user *ufd,
+ unsigned int o_flags)
+{
+ if (ufd == NULL)
+ return -EFAULT;
+ return __receive_fd(file, ufd, o_flags);
+}
+int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
+
extern void flush_delayed_fput(void);
extern void __fput_sync(struct file *);
+extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max;
+
#endif /* __LINUX_FILE_H */
diff --git a/include/linux/fileattr.h b/include/linux/fileattr.h
new file mode 100644
index 000000000000..9e37e063ac69
--- /dev/null
+++ b/include/linux/fileattr.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_FILEATTR_H
+#define _LINUX_FILEATTR_H
+
+/* Flags shared betwen flags/xflags */
+#define FS_COMMON_FL \
+ (FS_SYNC_FL | FS_IMMUTABLE_FL | FS_APPEND_FL | \
+ FS_NODUMP_FL | FS_NOATIME_FL | FS_DAX_FL | \
+ FS_PROJINHERIT_FL)
+
+#define FS_XFLAG_COMMON \
+ (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND | \
+ FS_XFLAG_NODUMP | FS_XFLAG_NOATIME | FS_XFLAG_DAX | \
+ FS_XFLAG_PROJINHERIT)
+
+/*
+ * Merged interface for miscellaneous file attributes. 'flags' originates from
+ * ext* and 'fsx_flags' from xfs. There's some overlap between the two, which
+ * is handled by the VFS helpers, so filesystems are free to implement just one
+ * or both of these sub-interfaces.
+ */
+struct fileattr {
+ u32 flags; /* flags (FS_IOC_GETFLAGS/FS_IOC_SETFLAGS) */
+ /* struct fsxattr: */
+ u32 fsx_xflags; /* xflags field value (get/set) */
+ u32 fsx_extsize; /* extsize field value (get/set)*/
+ u32 fsx_nextents; /* nextents field value (get) */
+ u32 fsx_projid; /* project identifier (get/set) */
+ u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/
+ /* selectors: */
+ bool flags_valid:1;
+ bool fsx_valid:1;
+};
+
+int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa);
+
+void fileattr_fill_xflags(struct fileattr *fa, u32 xflags);
+void fileattr_fill_flags(struct fileattr *fa, u32 flags);
+
+/**
+ * fileattr_has_fsx - check for extended flags/attributes
+ * @fa: fileattr pointer
+ *
+ * Return: true if any attributes are present that are not represented in
+ * ->flags.
+ */
+static inline bool fileattr_has_fsx(const struct fileattr *fa)
+{
+ return fa->fsx_valid &&
+ ((fa->fsx_xflags & ~FS_XFLAG_COMMON) || fa->fsx_extsize != 0 ||
+ fa->fsx_projid != 0 || fa->fsx_cowextsize != 0);
+}
+
+int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int vfs_fileattr_set(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct fileattr *fa);
+
+#endif /* _LINUX_FILEATTR_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index f349e2c0884c..efc42a6e3aed 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -5,9 +5,8 @@
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
-#include <stdarg.h>
-
#include <linux/atomic.h>
+#include <linux/bpf.h>
#include <linux/refcount.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
@@ -16,17 +15,18 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/capability.h>
-#include <linux/cryptohash.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <linux/sockptr.h>
+#include <crypto/sha1.h>
+#include <linux/u64_stats_sync.h>
#include <net/sch_generic.h>
#include <asm/byteorder.h>
#include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
struct sk_buff;
struct sock;
@@ -71,6 +71,11 @@ struct ctl_table_header;
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
+/* unused opcode to mark speculation barrier for mitigating
+ * Speculative Store Bypass
+ */
+#define BPF_NOSPEC 0xc0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -258,15 +263,32 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+/*
+ * Atomic operations:
+ *
+ * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
+ * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
+ * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
+ * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
+ * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
+ * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
+ * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
+ * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
+ * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
+ * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
((struct bpf_insn) { \
- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
- .imm = 0 })
+ .imm = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
@@ -338,10 +360,9 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = TGT })
-/* Function call */
+/* Convert function address to BPF immediate */
-#define BPF_CAST_CALL(x) \
- ((u64 (*)(u64, u64, u64, u64, u64))(x))
+#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@ -349,7 +370,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@ -371,6 +392,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = 0 })
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC() \
+ ((struct bpf_insn) { \
+ .code = BPF_ST | BPF_NOSPEC, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
/* Internal classic blocks for direct assignment */
#define __BPF_STMT(CODE, K) \
@@ -502,13 +533,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
offsetof(TYPE, MEMBER); \
})
-#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
u16 len;
compat_uptr_t filter; /* struct sock_filter * */
};
-#endif
struct sock_fprog_kern {
u16 len;
@@ -519,37 +548,16 @@ struct sock_fprog_kern {
#define BPF_IMAGE_ALIGNMENT 8
struct bpf_binary_header {
- u32 pages;
+ u32 size;
u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
};
-struct bpf_prog {
- u16 pages; /* Number of allocated pages */
- u16 jited:1, /* Is our filter JIT'ed? */
- jit_requested:1,/* archs need to JIT the prog */
- gpl_compatible:1, /* Is filter GPL compatible? */
- cb_access:1, /* Is control block accessed? */
- dst_needed:1, /* Do we need dst entry? */
- blinded:1, /* Was blinded */
- is_func:1, /* program is a bpf function */
- kprobe_override:1, /* Do we override a kprobe? */
- has_callchain_buf:1, /* callchain buffer allocated? */
- enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */
- enum bpf_prog_type type; /* Type of BPF program */
- enum bpf_attach_type expected_attach_type; /* For some prog types */
- u32 len; /* Number of filter blocks */
- u32 jited_len; /* Size of jited insns in bytes */
- u8 tag[BPF_TAG_SIZE];
- struct bpf_prog_aux *aux; /* Auxiliary fields */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
- unsigned int (*bpf_func)(const void *ctx,
- const struct bpf_insn *insn);
- /* Instructions for interpreter */
- union {
- struct sock_filter insns[0];
- struct bpf_insn insnsi[0];
- };
-};
+struct bpf_prog_stats {
+ u64_stats_t cnt;
+ u64_stats_t nsecs;
+ u64_stats_t misses;
+ struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
struct sk_filter {
refcount_t refcnt;
@@ -559,25 +567,64 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
- u32 ret; \
- cant_sleep(); \
- if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *stats; \
- u64 start = sched_clock(); \
- ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- stats = this_cpu_ptr(prog->aux->stats); \
- u64_stats_update_begin(&stats->syncp); \
- stats->cnt++; \
- stats->nsecs += sched_clock() - start; \
- u64_stats_update_end(&stats->syncp); \
- } else { \
- ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- } \
- ret; })
-
-#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \
- bpf_dispatcher_nopfunc)
+extern struct mutex nf_conn_btf_access_lock;
+extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype, u32 *next_btf_id,
+ enum bpf_type_flag *flag);
+
+typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
+ const struct bpf_insn *insnsi,
+ unsigned int (*bpf_func)(const void *,
+ const struct bpf_insn *));
+
+static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
+ const void *ctx,
+ bpf_dispatcher_fn dfunc)
+{
+ u32 ret;
+
+ cant_migrate();
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+ struct bpf_prog_stats *stats;
+ u64 start = sched_clock();
+ unsigned long flags;
+
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ stats = this_cpu_ptr(prog->stats);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, sched_clock() - start);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ } else {
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ }
+ return ret;
+}
+
+static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
+{
+ return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
+}
+
+/*
+ * Use in preemptible and therefore migratable context to make sure that
+ * the execution of the BPF program runs on one CPU.
+ *
+ * This uses migrate_disable/enable() explicitly to document that the
+ * invocation of a BPF program does not require reentrancy protection
+ * against a BPF program which is invoked from a preempting task.
+ */
+static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
+ const void *ctx)
+{
+ u32 ret;
+
+ migrate_disable();
+ ret = bpf_prog_run(prog, ctx);
+ migrate_enable();
+ return ret;
+}
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
@@ -587,12 +634,23 @@ struct bpf_skb_data_end {
void *data_end;
};
+struct bpf_nh_params {
+ u32 nh_family;
+ union {
+ u32 ipv4_nh;
+ struct in6_addr ipv6_nh;
+ };
+};
+
struct bpf_redirect_info {
u32 flags;
u32 tgt_index;
void *tgt_value;
struct bpf_map *map;
+ u32 map_id;
+ enum bpf_map_type map_type;
u32 kern_flags;
+ struct bpf_nh_params nh;
};
DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
@@ -636,7 +694,7 @@ static inline void bpf_restore_data_end(
cb->data_end = saved_data_end;
}
-static inline u8 *bpf_skb_cb(struct sk_buff *skb)
+static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
* data between tail calls. Since this also needs to work with
@@ -655,9 +713,11 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
return qdisc_skb_cb(skb)->data;
}
+/* Must be invoked with migration disabled */
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
- struct sk_buff *skb)
+ const void *ctx)
{
+ const struct sk_buff *skb = ctx;
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
u32 res;
@@ -667,7 +727,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
memset(cb_data, 0, sizeof(cb_saved));
}
- res = BPF_PROG_RUN(prog, skb);
+ res = bpf_prog_run(prog, skb);
if (unlikely(prog->cb_access))
memcpy(cb_data, cb_saved, sizeof(cb_saved));
@@ -680,9 +740,9 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
{
u32 res;
- preempt_disable();
+ migrate_disable();
res = __bpf_prog_run_save_cb(prog, skb);
- preempt_enable();
+ migrate_enable();
return res;
}
@@ -695,25 +755,31 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN);
- preempt_disable();
- res = BPF_PROG_RUN(prog, skb);
- preempt_enable();
+ res = bpf_prog_run_pin_on_cpu(prog, skb);
return res;
}
-DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+DECLARE_BPF_DISPATCHER(xdp)
+
+DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
+
+u32 xdp_master_redirect(struct xdp_buff *xdp);
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
- /* Caller needs to hold rcu_read_lock() (!), otherwise program
- * can be released while still running, or map elements could be
- * freed early while still having concurrent users. XDP fastpath
- * already takes rcu_read_lock() when fetching the program, so
- * it's not necessary here anymore.
+ /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
+ * under local_bh_disable(), which provides the needed RCU protection
+ * for accessing map entries.
*/
- return __BPF_PROG_RUN(prog, xdp,
- BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
+ u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
+
+ if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
+ if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
+ act = xdp_master_redirect(xdp);
+ }
+
+ return act;
}
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
@@ -726,7 +792,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
- sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
+ sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
}
static inline unsigned int bpf_prog_size(unsigned int proglen)
@@ -794,17 +860,8 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
set_vm_flush_reset_perms(hdr);
- set_memory_ro((unsigned long)hdr, hdr->pages);
- set_memory_x((unsigned long)hdr, hdr->pages);
-}
-
-static inline struct bpf_binary_header *
-bpf_jit_binary_hdr(const struct bpf_prog *fp)
-{
- unsigned long real_start = (unsigned long)fp->bpf_func;
- unsigned long addr = real_start & PAGE_MASK;
-
- return (void *)addr;
+ set_memory_ro((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
+ set_memory_x((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
@@ -822,8 +879,7 @@ void bpf_prog_free_linfo(struct bpf_prog *prog);
void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
const u32 *insn_to_jit_off);
int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
-void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
-void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_jit_attempt_done(struct bpf_prog *prog);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
@@ -843,8 +899,6 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bpf_aux_classic_check_t trans, bool save_orig);
void bpf_prog_destroy(struct bpf_prog *fp);
-const struct bpf_func_proto *
-bpf_base_func_proto(enum bpf_func_id func_id);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
@@ -852,8 +906,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
- unsigned int len);
+int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len);
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
@@ -861,19 +914,21 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#define __bpf_call_base_args \
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
- __bpf_call_base)
+ (void *)__bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_jit_needs_zext(void);
+bool bpf_jit_supports_subprog_tailcalls(void);
+bool bpf_jit_supports_kfunc_call(void);
bool bpf_helper_changes_pkt_data(void *func);
-static inline bool bpf_dump_raw_ok(void)
+static inline bool bpf_dump_raw_ok(const struct cred *cred)
{
/* Reconstruction of call-sites is dependent on kallsyms,
* thus make dump the same restriction.
*/
- return kallsyms_show_value() == 1;
+ return kallsyms_show_value(cred);
}
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
@@ -929,6 +984,10 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
+int xdp_do_redirect_frame(struct net_device *dev,
+ struct xdp_buff *xdp,
+ struct xdp_frame *xdpf,
+ struct bpf_prog *prog);
void xdp_do_flush(void);
/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
@@ -937,16 +996,18 @@ void xdp_do_flush(void);
*/
#define xdp_do_flush_map xdp_do_flush
-void bpf_warn_invalid_xdp_action(u32 act);
+void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash);
#else
static inline struct sock *
bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash)
{
return NULL;
@@ -958,9 +1019,12 @@ extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
extern long bpf_jit_limit;
+extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
+void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
@@ -970,6 +1034,29 @@ u64 bpf_jit_alloc_exec_limit(void);
void *bpf_jit_alloc_exec(unsigned long size);
void bpf_jit_free_exec(void *addr);
void bpf_jit_free(struct bpf_prog *fp);
+struct bpf_binary_header *
+bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
+
+void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
+void bpf_prog_pack_free(struct bpf_binary_header *hdr);
+
+static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
+{
+ return list_empty(&fp->aux->ksym.lnode) ||
+ fp->aux->ksym.lnode.prev == LIST_POISON2;
+}
+
+struct bpf_binary_header *
+bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
+ unsigned int alignment,
+ struct bpf_binary_header **rw_hdr,
+ u8 **rw_image,
+ bpf_jit_fill_hole_t bpf_fill_ill_insns);
+int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
+ struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
+void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
struct bpf_jit_poke_descriptor *poke);
@@ -1023,7 +1110,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
return false;
if (!bpf_jit_harden)
return false;
- if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+ if (bpf_jit_harden == 1 && bpf_capable())
return false;
return true;
@@ -1063,7 +1150,6 @@ bpf_address_lookup(unsigned long addr, unsigned long *size,
void bpf_prog_kallsyms_add(struct bpf_prog *fp);
void bpf_prog_kallsyms_del(struct bpf_prog *fp);
-void bpf_get_prog_name(const struct bpf_prog *prog, char *sym);
#else /* CONFIG_BPF_JIT */
@@ -1132,11 +1218,6 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{
}
-static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
-{
- sym[0] = '\0';
-}
-
#endif /* CONFIG_BPF_JIT */
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
@@ -1190,7 +1271,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
BPF_ANCILLARY(RANDOM);
BPF_ANCILLARY(VLAN_TPID);
}
- /* Fallthrough. */
+ fallthrough;
default:
return ftest->code;
}
@@ -1199,15 +1280,6 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
int k, unsigned int size);
-static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
- unsigned int size, void *buffer)
-{
- if (k >= 0)
- return skb_header_pointer(skb, k, size, buffer);
-
- return bpf_internal_load_pointer_neg_helper(skb, k, size);
-}
-
static inline int bpf_tell_extensions(void)
{
return SKF_AD_MAX;
@@ -1226,13 +1298,17 @@ struct bpf_sock_addr_kern {
struct bpf_sock_ops_kern {
struct sock *sk;
- u32 op;
union {
u32 args[4];
u32 reply;
u32 replylong[4];
};
- u32 is_fullsock;
+ struct sk_buff *syn_skb;
+ struct sk_buff *skb;
+ void *skb_data_end;
+ u8 op;
+ u8 is_fullsock;
+ u8 remaining_opt_len;
u64 temp; /* temp and everything after is not
* initialized to 0 before calling
* the BPF program. New fields that
@@ -1258,6 +1334,11 @@ struct bpf_sysctl_kern {
u64 tmp_reg;
};
+#define BPF_SOCKOPT_KERN_BUF_SIZE 32
+struct bpf_sockopt_buf {
+ u8 data[BPF_SOCKOPT_KERN_BUF_SIZE];
+};
+
struct bpf_sockopt_kern {
struct sock *sk;
u8 *optval;
@@ -1265,7 +1346,200 @@ struct bpf_sockopt_kern {
s32 level;
s32 optname;
s32 optlen;
- s32 retval;
+ /* for retval in struct bpf_cg_run_ctx */
+ struct task_struct *current_task;
+ /* Temporary "register" for indirect stores to ppos. */
+ u64 tmp_reg;
};
+int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
+
+struct bpf_sk_lookup_kern {
+ u16 family;
+ u16 protocol;
+ __be16 sport;
+ u16 dport;
+ struct {
+ __be32 saddr;
+ __be32 daddr;
+ } v4;
+ struct {
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ } v6;
+ struct sock *selected_sk;
+ u32 ingress_ifindex;
+ bool no_reuseport;
+};
+
+extern struct static_key_false bpf_sk_lookup_enabled;
+
+/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup.
+ *
+ * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and
+ * SK_DROP. Their meaning is as follows:
+ *
+ * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result
+ * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup
+ * SK_DROP : terminate lookup with -ECONNREFUSED
+ *
+ * This macro aggregates return values and selected sockets from
+ * multiple BPF programs according to following rules in order:
+ *
+ * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk,
+ * macro result is SK_PASS and last ctx.selected_sk is used.
+ * 2. If any program returned SK_DROP return value,
+ * macro result is SK_DROP.
+ * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL.
+ *
+ * Caller must ensure that the prog array is non-NULL, and that the
+ * array as well as the programs it contains remain valid.
+ */
+#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \
+ ({ \
+ struct bpf_sk_lookup_kern *_ctx = &(ctx); \
+ struct bpf_prog_array_item *_item; \
+ struct sock *_selected_sk = NULL; \
+ bool _no_reuseport = false; \
+ struct bpf_prog *_prog; \
+ bool _all_pass = true; \
+ u32 _ret; \
+ \
+ migrate_disable(); \
+ _item = &(array)->items[0]; \
+ while ((_prog = READ_ONCE(_item->prog))) { \
+ /* restore most recent selection */ \
+ _ctx->selected_sk = _selected_sk; \
+ _ctx->no_reuseport = _no_reuseport; \
+ \
+ _ret = func(_prog, _ctx); \
+ if (_ret == SK_PASS && _ctx->selected_sk) { \
+ /* remember last non-NULL socket */ \
+ _selected_sk = _ctx->selected_sk; \
+ _no_reuseport = _ctx->no_reuseport; \
+ } else if (_ret == SK_DROP && _all_pass) { \
+ _all_pass = false; \
+ } \
+ _item++; \
+ } \
+ _ctx->selected_sk = _selected_sk; \
+ _ctx->no_reuseport = _no_reuseport; \
+ migrate_enable(); \
+ _all_pass || _selected_sk ? SK_PASS : SK_DROP; \
+ })
+
+static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 dport,
+ const int ifindex, struct sock **psk)
+{
+ struct bpf_prog_array *run_array;
+ struct sock *selected_sk = NULL;
+ bool no_reuseport = false;
+
+ rcu_read_lock();
+ run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+ if (run_array) {
+ struct bpf_sk_lookup_kern ctx = {
+ .family = AF_INET,
+ .protocol = protocol,
+ .v4.saddr = saddr,
+ .v4.daddr = daddr,
+ .sport = sport,
+ .dport = dport,
+ .ingress_ifindex = ifindex,
+ };
+ u32 act;
+
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
+ if (act == SK_PASS) {
+ selected_sk = ctx.selected_sk;
+ no_reuseport = ctx.no_reuseport;
+ } else {
+ selected_sk = ERR_PTR(-ECONNREFUSED);
+ }
+ }
+ rcu_read_unlock();
+ *psk = selected_sk;
+ return no_reuseport;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 dport,
+ const int ifindex, struct sock **psk)
+{
+ struct bpf_prog_array *run_array;
+ struct sock *selected_sk = NULL;
+ bool no_reuseport = false;
+
+ rcu_read_lock();
+ run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+ if (run_array) {
+ struct bpf_sk_lookup_kern ctx = {
+ .family = AF_INET6,
+ .protocol = protocol,
+ .v6.saddr = saddr,
+ .v6.daddr = daddr,
+ .sport = sport,
+ .dport = dport,
+ .ingress_ifindex = ifindex,
+ };
+ u32 act;
+
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
+ if (act == SK_PASS) {
+ selected_sk = ctx.selected_sk;
+ no_reuseport = ctx.no_reuseport;
+ } else {
+ selected_sk = ERR_PTR(-ECONNREFUSED);
+ }
+ }
+ rcu_read_unlock();
+ *psk = selected_sk;
+ return no_reuseport;
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex,
+ u64 flags, const u64 flag_mask,
+ void *lookup_elem(struct bpf_map *map, u32 key))
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
+
+ /* Lower bits of the flags are used as return code on lookup failure */
+ if (unlikely(flags & ~(action_mask | flag_mask)))
+ return XDP_ABORTED;
+
+ ri->tgt_value = lookup_elem(map, ifindex);
+ if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
+ /* If the lookup fails we want to clear out the state in the
+ * redirect_info struct completely, so that if an eBPF program
+ * performs multiple lookups, the last one always takes
+ * precedence.
+ */
+ ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->map_type = BPF_MAP_TYPE_UNSPEC;
+ return flags & action_mask;
+ }
+
+ ri->tgt_index = ifindex;
+ ri->map_id = map->id;
+ ri->map_type = map->map_type;
+
+ if (flags & BPF_F_BROADCAST) {
+ WRITE_ONCE(ri->map, map);
+ ri->flags = flags;
+ } else {
+ WRITE_ONCE(ri->map, NULL);
+ ri->flags = 0;
+ }
+
+ return XDP_REDIRECT;
+}
+
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/find.h b/include/linux/find.h
new file mode 100644
index 000000000000..ccaf61a0f5fd
--- /dev/null
+++ b/include/linux/find.h
@@ -0,0 +1,600 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FIND_H_
+#define __LINUX_FIND_H_
+
+#ifndef __LINUX_BITMAP_H
+#error only <linux/bitmap.h> can be included directly
+#endif
+
+#include <linux/bitops.h>
+
+unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
+ unsigned long start);
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start);
+extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
+unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
+unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+extern unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size);
+extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
+extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
+
+#ifdef __BIG_ENDIAN
+unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+#endif
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & *addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_and_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_andnot_bit
+/**
+ * find_next_andnot_bit - find the next set bit in *addr1 excluding all the bits
+ * in *addr2
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_andnot_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_bit
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_bit(addr, size);
+}
+#endif
+
+/**
+ * find_nth_bit - find N'th set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * The following is semantically equivalent:
+ * idx = find_nth_bit(addr, size, 0);
+ * idx = find_first_bit(addr, size);
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_bit(addr, size, n);
+}
+
+/**
+ * find_nth_and_bit - find N'th set bit in 2 memory regions
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_bit(addr1, addr2, size, n);
+}
+
+/**
+ * find_nth_andnot_bit - find N'th set bit in 2 memory regions,
+ * flipping bits in 2nd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_andnot_bit(addr1, addr2, size, n);
+}
+
+#ifndef find_first_and_bit
+/**
+ * find_first_and_bit - find the first set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_bit(addr1, addr2, size);
+}
+#endif
+
+#ifndef find_first_zero_bit
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit(addr, size);
+}
+#endif
+
+#ifndef find_last_bit
+/**
+ * find_last_bit - find the last set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The number of bits to search
+ *
+ * Returns the bit number of the last set bit, or size.
+ */
+static inline
+unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __fls(val) : size;
+ }
+
+ return _find_last_bit(addr, size);
+}
+#endif
+
+/**
+ * find_next_and_bit_wrap - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_and_bit(addr1, addr2, size, offset);
+
+ if (bit < size)
+ return bit;
+
+ bit = find_first_and_bit(addr1, addr2, offset);
+ return bit < offset ? bit : size;
+}
+
+/**
+ * find_next_bit_wrap - find the next set bit in both memory regions
+ * @addr: The first address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_bit_wrap(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_bit(addr, size, offset);
+
+ if (bit < size)
+ return bit;
+
+ bit = find_first_bit(addr, offset);
+ return bit < offset ? bit : size;
+}
+
+/*
+ * Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
+ * before using it alone.
+ */
+static inline
+unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
+ unsigned long start, unsigned long n)
+{
+ unsigned long bit;
+
+ /* If not wrapped around */
+ if (n > start) {
+ /* and have a bit, just return it. */
+ bit = find_next_bit(bitmap, size, n);
+ if (bit < size)
+ return bit;
+
+ /* Otherwise, wrap around and ... */
+ n = 0;
+ }
+
+ /* Search the other part. */
+ bit = find_next_bit(bitmap, start, n);
+ return bit < start ? bit : size;
+}
+
+/**
+ * find_next_clump8 - find next 8-bit clump with set bits in a memory region
+ * @clump: location to store copy of found clump
+ * @addr: address to base the search on
+ * @size: bitmap size in number of bits
+ * @offset: bit offset at which to start searching
+ *
+ * Returns the bit offset for the next set clump; the found clump value is
+ * copied to the location pointed by @clump. If no bits are set, returns @size.
+ */
+extern unsigned long find_next_clump8(unsigned long *clump,
+ const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+#define find_first_clump8(clump, bits, size) \
+ find_next_clump8((clump), (bits), (size), 0)
+
+#if defined(__LITTLE_ENDIAN)
+
+static inline unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
+
+static inline unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+static inline unsigned long find_first_zero_bit_le(const void *addr,
+ unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
+
+#elif defined(__BIG_ENDIAN)
+
+#ifndef find_next_zero_bit_le
+static inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit_le(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_zero_bit_le
+static inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = swab(*(const unsigned long *)addr) | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit_le(addr, size);
+}
+#endif
+
+#ifndef find_next_bit_le
+static inline
+unsigned long find_next_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit_le(addr, size, offset);
+}
+#endif
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = 0; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_and_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_and_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_andnot_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); \
+ (bit)++)
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+/**
+ * for_each_set_bitrange - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit)
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_bit((addr), (size), b), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_bit((addr), (size), (b)), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first unset bit)
+ * @e: bit offset of end of current bitrange (first set bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bit_wrap - iterate over all set bits starting from @start, and
+ * wrapping around the end of bitmap.
+ * @bit: offset for current iteration
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ * @start: Starting bit for bitmap traversing, wrapping around the bitmap end
+ */
+#define for_each_set_bit_wrap(bit, addr, size, start) \
+ for ((bit) = find_next_bit_wrap((addr), (size), (start)); \
+ (bit) < (size); \
+ (bit) = __for_each_wrap((addr), (size), (start), (bit) + 1))
+
+/**
+ * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
+ * @start: bit offset to start search and to store the current iteration offset
+ * @clump: location to store copy of current 8-bit clump
+ * @bits: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_clump8(start, clump, bits, size) \
+ for ((start) = find_first_clump8(&(clump), (bits), (size)); \
+ (start) < (size); \
+ (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+
+#endif /*__LINUX_FIND_H_ */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index aec8f30ab200..980019053e54 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -150,6 +150,8 @@ static inline void fw_card_put(struct fw_card *card)
kref_put(&card->kref, fw_card_release);
}
+int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time);
+
struct fw_attribute_group {
struct attribute_group *groups[2];
struct attribute_group group;
@@ -352,6 +354,7 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler);
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode);
int fw_get_request_speed(struct fw_request *request);
+u32 fw_request_get_timestamp(const struct fw_request *request);
void fw_send_request(struct fw_card *card, struct fw_transaction *t,
int tcode, int destination_id, int generation, int speed,
unsigned long long offset, void *payload, size_t length,
@@ -436,6 +439,12 @@ typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
void *header, void *data);
typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
dma_addr_t completed, void *data);
+
+union fw_iso_callback {
+ fw_iso_callback_t sc;
+ fw_iso_mc_callback_t mc;
+};
+
struct fw_iso_context {
struct fw_card *card;
int type;
@@ -443,10 +452,7 @@ struct fw_iso_context {
int speed;
bool drop_overflow_headers;
size_t header_size;
- union {
- fw_iso_callback_t sc;
- fw_iso_mc_callback_t mc;
- } callback;
+ union fw_iso_callback callback;
void *callback_data;
};
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 2dd566c91d44..de7fea3bca51 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -6,44 +6,99 @@
#include <linux/compiler.h>
#include <linux/gfp.h>
-#define FW_ACTION_NOHOTPLUG 0
-#define FW_ACTION_HOTPLUG 1
+#define FW_ACTION_NOUEVENT 0
+#define FW_ACTION_UEVENT 1
struct firmware {
size_t size;
const u8 *data;
- struct page **pages;
/* firmware loader private fields */
void *priv;
};
-struct module;
-struct device;
+/**
+ * enum fw_upload_err - firmware upload error codes
+ * @FW_UPLOAD_ERR_NONE: returned to indicate success
+ * @FW_UPLOAD_ERR_HW_ERROR: error signalled by hardware, see kernel log
+ * @FW_UPLOAD_ERR_TIMEOUT: SW timed out on handshake with HW/firmware
+ * @FW_UPLOAD_ERR_CANCELED: upload was cancelled by the user
+ * @FW_UPLOAD_ERR_BUSY: there is an upload operation already in progress
+ * @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size
+ * @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log
+ * @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry
+ * @FW_UPLOAD_ERR_MAX: Maximum error code marker
+ */
+enum fw_upload_err {
+ FW_UPLOAD_ERR_NONE,
+ FW_UPLOAD_ERR_HW_ERROR,
+ FW_UPLOAD_ERR_TIMEOUT,
+ FW_UPLOAD_ERR_CANCELED,
+ FW_UPLOAD_ERR_BUSY,
+ FW_UPLOAD_ERR_INVALID_SIZE,
+ FW_UPLOAD_ERR_RW_ERROR,
+ FW_UPLOAD_ERR_WEAROUT,
+ FW_UPLOAD_ERR_MAX
+};
-struct builtin_fw {
- char *name;
- void *data;
- unsigned long size;
+struct fw_upload {
+ void *dd_handle; /* reference to parent driver */
+ void *priv; /* firmware loader private fields */
};
-/* We have to play tricks here much like stringify() to get the
- __COUNTER__ macro to be expanded as we want it */
-#define __fw_concat1(x, y) x##y
-#define __fw_concat(x, y) __fw_concat1(x, y)
+/**
+ * struct fw_upload_ops - device specific operations to support firmware upload
+ * @prepare: Required: Prepare secure update
+ * @write: Required: The write() op receives the remaining
+ * size to be written and must return the actual
+ * size written or a negative error code. The write()
+ * op will be called repeatedly until all data is
+ * written.
+ * @poll_complete: Required: Check for the completion of the
+ * HW authentication/programming process.
+ * @cancel: Required: Request cancellation of update. This op
+ * is called from the context of a different kernel
+ * thread, so race conditions need to be considered.
+ * @cleanup: Optional: Complements the prepare()
+ * function and is called at the completion
+ * of the update, on success or failure, if the
+ * prepare function succeeded.
+ */
+struct fw_upload_ops {
+ enum fw_upload_err (*prepare)(struct fw_upload *fw_upload,
+ const u8 *data, u32 size);
+ enum fw_upload_err (*write)(struct fw_upload *fw_upload,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written);
+ enum fw_upload_err (*poll_complete)(struct fw_upload *fw_upload);
+ void (*cancel)(struct fw_upload *fw_upload);
+ void (*cleanup)(struct fw_upload *fw_upload);
+};
-#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
- DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
+struct module;
+struct device;
-#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
- static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
- __used __section(.builtin_fw) = { name, blob, size }
+/*
+ * Built-in firmware functionality is only available if FW_LOADER=y, but not
+ * FW_LOADER=m
+ */
+#ifdef CONFIG_FW_LOADER
+bool firmware_request_builtin(struct firmware *fw, const char *name);
+#else
+static inline bool firmware_request_builtin(struct firmware *fw,
+ const char *name)
+{
+ return false;
+}
+#endif
-#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
+#if IS_REACHABLE(CONFIG_FW_LOADER)
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
int firmware_request_nowarn(const struct firmware **fw, const char *name,
struct device *device);
+int firmware_request_platform(const struct firmware **fw, const char *name,
+ struct device *device);
int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
@@ -52,6 +107,9 @@ int request_firmware_direct(const struct firmware **fw, const char *name,
struct device *device);
int request_firmware_into_buf(const struct firmware **firmware_p,
const char *name, struct device *device, void *buf, size_t size);
+int request_partial_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device,
+ void *buf, size_t size, size_t offset);
void release_firmware(const struct firmware *fw);
#else
@@ -69,6 +127,13 @@ static inline int firmware_request_nowarn(const struct firmware **fw,
return -EINVAL;
}
+static inline int firmware_request_platform(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+ return -EINVAL;
+}
+
static inline int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
@@ -94,6 +159,39 @@ static inline int request_firmware_into_buf(const struct firmware **firmware_p,
return -EINVAL;
}
+static inline int request_partial_firmware_into_buf
+ (const struct firmware **firmware_p,
+ const char *name,
+ struct device *device,
+ void *buf, size_t size, size_t offset)
+{
+ return -EINVAL;
+}
+
+#endif
+
+#ifdef CONFIG_FW_UPLOAD
+
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle);
+void firmware_upload_unregister(struct fw_upload *fw_upload);
+
+#else
+
+static inline struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+}
+
#endif
int firmware_request_cache(struct device *device, const char *name);
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
new file mode 100644
index 000000000000..cad828e21c72
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -0,0 +1,328 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * cs_dsp.h -- Cirrus Logic DSP firmware support
+ *
+ * Based on sound/soc/codecs/wm_adsp.h
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ * Copyright (C) 2015-2021 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+#ifndef __CS_DSP_H
+#define __CS_DSP_H
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+
+#define CS_ADSP2_REGION_0 BIT(0)
+#define CS_ADSP2_REGION_1 BIT(1)
+#define CS_ADSP2_REGION_2 BIT(2)
+#define CS_ADSP2_REGION_3 BIT(3)
+#define CS_ADSP2_REGION_4 BIT(4)
+#define CS_ADSP2_REGION_5 BIT(5)
+#define CS_ADSP2_REGION_6 BIT(6)
+#define CS_ADSP2_REGION_7 BIT(7)
+#define CS_ADSP2_REGION_8 BIT(8)
+#define CS_ADSP2_REGION_9 BIT(9)
+#define CS_ADSP2_REGION_1_9 (CS_ADSP2_REGION_1 | \
+ CS_ADSP2_REGION_2 | CS_ADSP2_REGION_3 | \
+ CS_ADSP2_REGION_4 | CS_ADSP2_REGION_5 | \
+ CS_ADSP2_REGION_6 | CS_ADSP2_REGION_7 | \
+ CS_ADSP2_REGION_8 | CS_ADSP2_REGION_9)
+#define CS_ADSP2_REGION_ALL (CS_ADSP2_REGION_0 | CS_ADSP2_REGION_1_9)
+
+#define CS_DSP_DATA_WORD_SIZE 3
+#define CS_DSP_DATA_WORD_BITS (3 * BITS_PER_BYTE)
+
+#define CS_DSP_ACKED_CTL_TIMEOUT_MS 100
+#define CS_DSP_ACKED_CTL_N_QUICKPOLLS 10
+#define CS_DSP_ACKED_CTL_MIN_VALUE 0
+#define CS_DSP_ACKED_CTL_MAX_VALUE 0xFFFFFF
+
+/**
+ * struct cs_dsp_region - Describes a logical memory region in DSP address space
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_region {
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_alg_region - Describes a logical algorithm region in DSP address space
+ * @list: List node for internal use
+ * @alg: Algorithm id
+ * @ver: Expected algorithm version
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_alg_region {
+ struct list_head list;
+ unsigned int alg;
+ unsigned int ver;
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_coeff_ctl - Describes a coefficient control
+ * @list: List node for internal use
+ * @dsp: DSP instance associated with this control
+ * @cache: Cached value of the control
+ * @fw_name: Name of the firmware
+ * @subname: Name of the control parsed from the WMFW
+ * @subname_len: Length of subname
+ * @offset: Offset of control within alg_region in words
+ * @len: Length of the cached value in bytes
+ * @type: One of the WMFW_CTL_TYPE_ control types defined in wmfw.h
+ * @flags: Bitfield of WMFW_CTL_FLAG_ control flags defined in wmfw.h
+ * @set: Flag indicating the value has been written by the user
+ * @enabled: Flag indicating whether control is enabled
+ * @alg_region: Logical region associated with this control
+ * @priv: For use by the client
+ */
+struct cs_dsp_coeff_ctl {
+ struct list_head list;
+ struct cs_dsp *dsp;
+ void *cache;
+ const char *fw_name;
+ /* Subname is needed to match with firmware */
+ const char *subname;
+ unsigned int subname_len;
+ unsigned int offset;
+ size_t len;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int set:1;
+ unsigned int enabled:1;
+ struct cs_dsp_alg_region alg_region;
+
+ void *priv;
+};
+
+struct cs_dsp_ops;
+struct cs_dsp_client_ops;
+
+/**
+ * struct cs_dsp - Configuration and state of a Cirrus Logic DSP
+ * @name: The name of the DSP instance
+ * @rev: Revision of the DSP
+ * @num: DSP instance number
+ * @type: Type of DSP
+ * @dev: Driver model representation of the device
+ * @regmap: Register map of the device
+ * @ops: Function pointers for internal callbacks
+ * @client_ops: Function pointers for client callbacks
+ * @base: Address of the DSP registers
+ * @base_sysinfo: Address of the sysinfo register (Halo only)
+ * @sysclk_reg: Address of the sysclk register (ADSP1 only)
+ * @sysclk_mask: Mask of frequency bits within sysclk register (ADSP1 only)
+ * @sysclk_shift: Shift of frequency bits within sysclk register (ADSP1 only)
+ * @alg_regions: List of currently loaded algorithm regions
+ * @fw_file_name: Filename of the current firmware
+ * @fw_name: Name of the current firmware
+ * @fw_id: ID of the current firmware, obtained from the wmfw
+ * @fw_id_version: Version of the firmware, obtained from the wmfw
+ * @fw_vendor_id: Vendor of the firmware, obtained from the wmfw
+ * @mem: DSP memory region descriptions
+ * @num_mems: Number of memory regions in this DSP
+ * @fw_ver: Version of the wmfw file format
+ * @booted: Flag indicating DSP has been configured
+ * @running: Flag indicating DSP is executing firmware
+ * @ctl_list: Controls defined within the loaded DSP firmware
+ * @lock_regions: Enable MPU traps on specified memory regions
+ * @pwr_lock: Lock used to serialize accesses
+ * @debugfs_root: Debugfs directory for this DSP instance
+ * @wmfw_file_name: Filename of the currently loaded firmware
+ * @bin_file_name: Filename of the currently loaded coefficients
+ */
+struct cs_dsp {
+ const char *name;
+ int rev;
+ int num;
+ int type;
+ struct device *dev;
+ struct regmap *regmap;
+
+ const struct cs_dsp_ops *ops;
+ const struct cs_dsp_client_ops *client_ops;
+
+ unsigned int base;
+ unsigned int base_sysinfo;
+ unsigned int sysclk_reg;
+ unsigned int sysclk_mask;
+ unsigned int sysclk_shift;
+
+ struct list_head alg_regions;
+
+ const char *fw_name;
+ unsigned int fw_id;
+ unsigned int fw_id_version;
+ unsigned int fw_vendor_id;
+
+ const struct cs_dsp_region *mem;
+ int num_mems;
+
+ int fw_ver;
+
+ bool booted;
+ bool running;
+
+ struct list_head ctl_list;
+
+ struct mutex pwr_lock;
+
+ unsigned int lock_regions;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+ char *wmfw_file_name;
+ char *bin_file_name;
+#endif
+};
+
+/**
+ * struct cs_dsp_client_ops - client callbacks
+ * @control_add: Called under the pwr_lock when a control is created
+ * @control_remove: Called under the pwr_lock when a control is destroyed
+ * @pre_run: Called under the pwr_lock by cs_dsp_run() before the core is started
+ * @post_run: Called under the pwr_lock by cs_dsp_run() after the core is started
+ * @pre_stop: Called under the pwr_lock by cs_dsp_stop() before the core is stopped
+ * @post_stop: Called under the pwr_lock by cs_dsp_stop() after the core is stopped
+ * @watchdog_expired: Called when a watchdog expiry is detected
+ *
+ * These callbacks give the cs_dsp client an opportunity to respond to events
+ * or to perform actions atomically.
+ */
+struct cs_dsp_client_ops {
+ int (*control_add)(struct cs_dsp_coeff_ctl *ctl);
+ void (*control_remove)(struct cs_dsp_coeff_ctl *ctl);
+ int (*pre_run)(struct cs_dsp *dsp);
+ int (*post_run)(struct cs_dsp *dsp);
+ void (*pre_stop)(struct cs_dsp *dsp);
+ void (*post_stop)(struct cs_dsp *dsp);
+ void (*watchdog_expired)(struct cs_dsp *dsp);
+};
+
+int cs_dsp_adsp1_init(struct cs_dsp *dsp);
+int cs_dsp_adsp2_init(struct cs_dsp *dsp);
+int cs_dsp_halo_init(struct cs_dsp *dsp);
+
+int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_adsp1_power_down(struct cs_dsp *dsp);
+int cs_dsp_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_power_down(struct cs_dsp *dsp);
+int cs_dsp_run(struct cs_dsp *dsp);
+void cs_dsp_stop(struct cs_dsp *dsp);
+
+void cs_dsp_remove(struct cs_dsp *dsp);
+
+int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq);
+void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp);
+
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root);
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp);
+
+int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id);
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ const void *buf, size_t len);
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ void *buf, size_t len);
+struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg);
+
+int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr,
+ unsigned int num_words, __be32 *data);
+int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data);
+int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data);
+void cs_dsp_remove_padding(u32 *buf, int nwords);
+
+struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
+ int type, unsigned int id);
+
+const char *cs_dsp_mem_region_name(unsigned int type);
+
+/**
+ * struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP
+ * @data: Pointer to underlying buffer memory
+ * @max: Pointer to end of the buffer memory
+ * @bytes: Number of bytes read/written into the memory chunk
+ * @cache: Temporary holding data as it is formatted
+ * @cachebits: Number of bits of data currently in cache
+ */
+struct cs_dsp_chunk {
+ u8 *data;
+ u8 *max;
+ int bytes;
+
+ u32 cache;
+ int cachebits;
+};
+
+/**
+ * cs_dsp_chunk() - Create a DSP memory chunk
+ * @data: Pointer to the buffer that will be used to store data
+ * @size: Size of the buffer in bytes
+ *
+ * Return: A cs_dsp_chunk structure
+ */
+static inline struct cs_dsp_chunk cs_dsp_chunk(void *data, int size)
+{
+ struct cs_dsp_chunk ch = {
+ .data = data,
+ .max = data + size,
+ };
+
+ return ch;
+}
+
+/**
+ * cs_dsp_chunk_end() - Check if a DSP memory chunk is full
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the whole buffer has been read/written
+ */
+static inline bool cs_dsp_chunk_end(struct cs_dsp_chunk *ch)
+{
+ return ch->data == ch->max;
+}
+
+/**
+ * cs_dsp_chunk_bytes() - Number of bytes written/read from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: Number of bytes read/written to the buffer
+ */
+static inline int cs_dsp_chunk_bytes(struct cs_dsp_chunk *ch)
+{
+ return ch->bytes;
+}
+
+/**
+ * cs_dsp_chunk_valid_addr() - Check if an address is in a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the given address is within the buffer
+ */
+static inline bool cs_dsp_chunk_valid_addr(struct cs_dsp_chunk *ch, void *addr)
+{
+ return (u8 *)addr >= ch->data && (u8 *)addr < ch->max;
+}
+
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val);
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch);
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits);
+
+#endif
diff --git a/sound/soc/codecs/wmfw.h b/include/linux/firmware/cirrus/wmfw.h
index 4278aa6aeb01..74e5a4f6c13a 100644
--- a/sound/soc/codecs/wmfw.h
+++ b/include/linux/firmware/cirrus/wmfw.h
@@ -23,10 +23,13 @@
#define WMFW_CTL_FLAG_WRITEABLE 0x0002
#define WMFW_CTL_FLAG_READABLE 0x0001
+#define WMFW_CTL_TYPE_BYTES 0x0004 /* byte control */
+
/* Non-ALSA coefficient types start at 0x1000 */
#define WMFW_CTL_TYPE_ACKED 0x1000 /* acked control */
#define WMFW_CTL_TYPE_HOSTEVENT 0x1001 /* event control */
#define WMFW_CTL_TYPE_HOST_BUFFER 0x1002 /* host buffer pointer */
+#define WMFW_CTL_TYPE_FWEVENT 0x1004 /* firmware event control */
struct wmfw_header {
char magic[4];
@@ -180,6 +183,7 @@ struct wmfw_coeff_item {
#define WMFW_ABSOLUTE 0xf0
#define WMFW_ALGORITHM_DATA 0xf2
+#define WMFW_METADATA 0xfc
#define WMFW_NAME_TEXT 0xfe
#define WMFW_INFO_TEXT 0xff
diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h
index 7562099c9e46..4f7895a3b73c 100644
--- a/include/linux/firmware/imx/dsp.h
+++ b/include/linux/firmware/imx/dsp.h
@@ -55,6 +55,9 @@ static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc)
int imx_dsp_ring_doorbell(struct imx_dsp_ipc *dsp, unsigned int chan_idx);
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx);
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx);
+
#else
static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc,
@@ -63,5 +66,12 @@ static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc,
return -ENOTSUPP;
}
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx) { }
+
#endif
#endif /* _IMX_DSP_IPC_H */
diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h
index 6312c8cb084a..0b4643571625 100644
--- a/include/linux/firmware/imx/ipc.h
+++ b/include/linux/firmware/imx/ipc.h
@@ -25,7 +25,6 @@ enum imx_sc_rpc_svc {
IMX_SC_RPC_SVC_PAD = 6,
IMX_SC_RPC_SVC_MISC = 7,
IMX_SC_RPC_SVC_IRQ = 8,
- IMX_SC_RPC_SVC_ABORT = 9
};
struct imx_sc_rpc_msg {
@@ -35,6 +34,7 @@ struct imx_sc_rpc_msg {
uint8_t func;
};
+#ifdef CONFIG_IMX_SCU
/*
* This is an function to send an RPC message over an IPC channel.
* It is called by client-side SCFW API function shims.
@@ -56,4 +56,16 @@ int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
* @return Returns an error code (0 = success, failed if < 0)
*/
int imx_scu_get_handle(struct imx_sc_ipc **ipc);
+#else
+static inline int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg,
+ bool have_resp)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_get_handle(struct imx_sc_ipc **ipc)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* _SC_IPC_H */
diff --git a/include/linux/firmware/imx/s4.h b/include/linux/firmware/imx/s4.h
new file mode 100644
index 000000000000..9e34923ae1d6
--- /dev/null
+++ b/include/linux/firmware/imx/s4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 NXP
+ *
+ * Header file for the IPC implementation.
+ */
+
+#ifndef _S4_IPC_H
+#define _S4_IPC_H
+
+struct imx_s4_ipc;
+
+struct imx_s4_rpc_msg {
+ uint8_t ver;
+ uint8_t size;
+ uint8_t cmd;
+ uint8_t tag;
+} __packed;
+
+#endif /* _S4_IPC_H */
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 17ba4e405129..5cc63fe7e84d 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -11,13 +11,41 @@
#define _SC_SCI_H
#include <linux/firmware/imx/ipc.h>
-#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/svc/misc.h>
#include <linux/firmware/imx/svc/pm.h>
+#include <linux/firmware/imx/svc/rm.h>
+#if IS_ENABLED(CONFIG_IMX_SCU)
int imx_scu_enable_general_irq_channel(struct device *dev);
int imx_scu_irq_register_notifier(struct notifier_block *nb);
int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_soc_init(struct device *dev);
+#else
+static inline int imx_scu_soc_init(struct device *dev)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h
index 031dd4d3c766..760db08a67fc 100644
--- a/include/linux/firmware/imx/svc/misc.h
+++ b/include/linux/firmware/imx/svc/misc.h
@@ -46,6 +46,7 @@ enum imx_misc_func {
* Control Functions
*/
+#ifdef CONFIG_IMX_SCU
int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 val);
@@ -54,5 +55,23 @@ int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
bool enable, u64 phys_addr);
+#else
+static inline int imx_sc_misc_set_control(struct imx_sc_ipc *ipc,
+ u32 resource, u8 ctrl, u32 val)
+{
+ return -ENOTSUPP;
+}
+static inline int imx_sc_misc_get_control(struct imx_sc_ipc *ipc,
+ u32 resource, u8 ctrl, u32 *val)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* _SC_MISC_API_H */
diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h
new file mode 100644
index 000000000000..31456f897aa9
--- /dev/null
+++ b/include/linux/firmware/imx/svc/rm.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2020 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Resource Management (RM) function. This includes functions for
+ * partitioning resources, pads, and memory regions.
+ *
+ * RM_SVC (SVC) Resource Management Service
+ *
+ * Module for the Resource Management (RM) service.
+ */
+
+#ifndef _SC_RM_API_H
+#define _SC_RM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC RM function calls.
+ */
+enum imx_sc_rm_func {
+ IMX_SC_RM_FUNC_UNKNOWN = 0,
+ IMX_SC_RM_FUNC_PARTITION_ALLOC = 1,
+ IMX_SC_RM_FUNC_SET_CONFIDENTIAL = 31,
+ IMX_SC_RM_FUNC_PARTITION_FREE = 2,
+ IMX_SC_RM_FUNC_GET_DID = 26,
+ IMX_SC_RM_FUNC_PARTITION_STATIC = 3,
+ IMX_SC_RM_FUNC_PARTITION_LOCK = 4,
+ IMX_SC_RM_FUNC_GET_PARTITION = 5,
+ IMX_SC_RM_FUNC_SET_PARENT = 6,
+ IMX_SC_RM_FUNC_MOVE_ALL = 7,
+ IMX_SC_RM_FUNC_ASSIGN_RESOURCE = 8,
+ IMX_SC_RM_FUNC_SET_RESOURCE_MOVABLE = 9,
+ IMX_SC_RM_FUNC_SET_SUBSYS_RSRC_MOVABLE = 28,
+ IMX_SC_RM_FUNC_SET_MASTER_ATTRIBUTES = 10,
+ IMX_SC_RM_FUNC_SET_MASTER_SID = 11,
+ IMX_SC_RM_FUNC_SET_PERIPHERAL_PERMISSIONS = 12,
+ IMX_SC_RM_FUNC_IS_RESOURCE_OWNED = 13,
+ IMX_SC_RM_FUNC_GET_RESOURCE_OWNER = 33,
+ IMX_SC_RM_FUNC_IS_RESOURCE_MASTER = 14,
+ IMX_SC_RM_FUNC_IS_RESOURCE_PERIPHERAL = 15,
+ IMX_SC_RM_FUNC_GET_RESOURCE_INFO = 16,
+ IMX_SC_RM_FUNC_MEMREG_ALLOC = 17,
+ IMX_SC_RM_FUNC_MEMREG_SPLIT = 29,
+ IMX_SC_RM_FUNC_MEMREG_FRAG = 32,
+ IMX_SC_RM_FUNC_MEMREG_FREE = 18,
+ IMX_SC_RM_FUNC_FIND_MEMREG = 30,
+ IMX_SC_RM_FUNC_ASSIGN_MEMREG = 19,
+ IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS = 20,
+ IMX_SC_RM_FUNC_IS_MEMREG_OWNED = 21,
+ IMX_SC_RM_FUNC_GET_MEMREG_INFO = 22,
+ IMX_SC_RM_FUNC_ASSIGN_PAD = 23,
+ IMX_SC_RM_FUNC_SET_PAD_MOVABLE = 24,
+ IMX_SC_RM_FUNC_IS_PAD_OWNED = 25,
+ IMX_SC_RM_FUNC_DUMP = 27,
+};
+
+#if IS_ENABLED(CONFIG_IMX_SCU)
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource);
+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt);
+#else
+static inline bool
+imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ return true;
+}
+static inline int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h
deleted file mode 100644
index 80821100e85f..000000000000
--- a/include/linux/firmware/imx/types.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright 2017~2018 NXP
- *
- * Header file containing types used across multiple service APIs.
- */
-
-#ifndef _SC_TYPES_H
-#define _SC_TYPES_H
-
-/*
- * This type is used to indicate a control.
- */
-enum imx_sc_ctrl {
- IMX_SC_C_TEMP = 0,
- IMX_SC_C_TEMP_HI = 1,
- IMX_SC_C_TEMP_LOW = 2,
- IMX_SC_C_PXL_LINK_MST1_ADDR = 3,
- IMX_SC_C_PXL_LINK_MST2_ADDR = 4,
- IMX_SC_C_PXL_LINK_MST_ENB = 5,
- IMX_SC_C_PXL_LINK_MST1_ENB = 6,
- IMX_SC_C_PXL_LINK_MST2_ENB = 7,
- IMX_SC_C_PXL_LINK_SLV1_ADDR = 8,
- IMX_SC_C_PXL_LINK_SLV2_ADDR = 9,
- IMX_SC_C_PXL_LINK_MST_VLD = 10,
- IMX_SC_C_PXL_LINK_MST1_VLD = 11,
- IMX_SC_C_PXL_LINK_MST2_VLD = 12,
- IMX_SC_C_SINGLE_MODE = 13,
- IMX_SC_C_ID = 14,
- IMX_SC_C_PXL_CLK_POLARITY = 15,
- IMX_SC_C_LINESTATE = 16,
- IMX_SC_C_PCIE_G_RST = 17,
- IMX_SC_C_PCIE_BUTTON_RST = 18,
- IMX_SC_C_PCIE_PERST = 19,
- IMX_SC_C_PHY_RESET = 20,
- IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21,
- IMX_SC_C_PANIC = 22,
- IMX_SC_C_PRIORITY_GROUP = 23,
- IMX_SC_C_TXCLK = 24,
- IMX_SC_C_CLKDIV = 25,
- IMX_SC_C_DISABLE_50 = 26,
- IMX_SC_C_DISABLE_125 = 27,
- IMX_SC_C_SEL_125 = 28,
- IMX_SC_C_MODE = 29,
- IMX_SC_C_SYNC_CTRL0 = 30,
- IMX_SC_C_KACHUNK_CNT = 31,
- IMX_SC_C_KACHUNK_SEL = 32,
- IMX_SC_C_SYNC_CTRL1 = 33,
- IMX_SC_C_DPI_RESET = 34,
- IMX_SC_C_MIPI_RESET = 35,
- IMX_SC_C_DUAL_MODE = 36,
- IMX_SC_C_VOLTAGE = 37,
- IMX_SC_C_PXL_LINK_SEL = 38,
- IMX_SC_C_OFS_SEL = 39,
- IMX_SC_C_OFS_AUDIO = 40,
- IMX_SC_C_OFS_PERIPH = 41,
- IMX_SC_C_OFS_IRQ = 42,
- IMX_SC_C_RST0 = 43,
- IMX_SC_C_RST1 = 44,
- IMX_SC_C_SEL0 = 45,
- IMX_SC_C_LAST
-};
-
-#endif /* _SC_TYPES_H */
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index 013ae4819deb..a718f853d457 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -54,32 +54,25 @@
* Secure monitor software doesn't recognize the request.
*
* INTEL_SIP_SMC_STATUS_OK:
- * FPGA configuration completed successfully,
- * In case of FPGA configuration write operation, it means secure monitor
- * software can accept the next chunk of FPGA configuration data.
+ * Secure monitor software accepts the service client's request.
*
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
- * In case of FPGA configuration write operation, it means secure monitor
- * software is still processing previous data & can't accept the next chunk
- * of data. Service driver needs to issue
- * INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE call to query the
- * completed block(s).
+ * INTEL_SIP_SMC_STATUS_BUSY:
+ * Secure monitor software is still processing service client's request.
*
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
- * There is error during the FPGA configuration process.
+ * INTEL_SIP_SMC_STATUS_REJECTED:
+ * Secure monitor software reject the service client's request.
*
- * INTEL_SIP_SMC_REG_ERROR:
- * There is error during a read or write operation of the protected registers.
+ * INTEL_SIP_SMC_STATUS_ERROR:
+ * There is error during the process of service request.
*
* INTEL_SIP_SMC_RSU_ERROR:
- * There is error during a remote status update.
+ * There is error during the process of remote status update request.
*/
#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
#define INTEL_SIP_SMC_STATUS_OK 0x0
-#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY 0x1
-#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED 0x2
-#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR 0x4
-#define INTEL_SIP_SMC_REG_ERROR 0x5
+#define INTEL_SIP_SMC_STATUS_BUSY 0x1
+#define INTEL_SIP_SMC_STATUS_REJECTED 0x2
+#define INTEL_SIP_SMC_STATUS_ERROR 0x4
#define INTEL_SIP_SMC_RSU_ERROR 0x7
/**
@@ -95,7 +88,7 @@
* a2-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_STATUS_ERROR.
* a1-3: not used.
*/
#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1
@@ -115,8 +108,8 @@
* a3-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
* a1: 64bit physical address of 1st completed memory block if any completed
* block, otherwise zero value.
* a2: 64bit physical address of 2nd completed memory block if any completed
@@ -133,15 +126,15 @@
*
* Sync call used by service driver at EL1 to track the completed write
* transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE
- * call returns INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY.
+ * call returns INTEL_SIP_SMC_STATUS_BUSY.
*
* Call register usage:
* a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE.
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
* a1: 64bit physical address of 1st completed memory block.
* a2: 64bit physical address of 2nd completed memory block if
* any completed block, otherwise zero value.
@@ -164,8 +157,8 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
* a1-3: not used.
*/
#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4
@@ -183,7 +176,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
* a1: start of physical address of reserved memory block.
* a2: size of reserved memory block.
* a3: not used.
@@ -203,7 +196,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
* a1-3: not used.
*/
#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6
@@ -328,8 +321,6 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_ECC_DBE \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
-#endif
-
/**
* Request INTEL_SIP_SMC_RSU_NOTIFY
*
@@ -368,3 +359,240 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER 15
#define INTEL_SIP_SMC_RSU_RETRY_COUNTER \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_DCMF_VERSION
+ *
+ * Sync call used by service driver at EL1 to query DCMF (Decision
+ * Configuration Management Firmware) version from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_DCMF_VERSION
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 dcmf1 | dcmf0
+ * a2 dcmf3 | dcmf2
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION 16
+#define INTEL_SIP_SMC_RSU_DCMF_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_MAX_RETRY
+ *
+ * Sync call used by service driver at EL1 to query max retry value from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_MAX_RETRY
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 max retry value
+ *
+ * Or
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18
+#define INTEL_SIP_SMC_RSU_MAX_RETRY \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_DCMF_STATUS
+ *
+ * Sync call used by service driver at EL1 to query DCMF status from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_DCMF_STATUS
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 dcmf3 | dcmf2 | dcmf1 | dcmf0
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS 20
+#define INTEL_SIP_SMC_RSU_DCMF_STATUS \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_SERVICE_COMPLETED
+ * Sync call to check if the secure world have completed service request
+ * or not.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_SERVICE_COMPLETED
+ * a1: this register is optional. If used, it is the physical address for
+ * secure firmware to put output data
+ * a2: this register is optional. If used, it is the size of output data
+ * a3-a7: not used
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR,
+ * INTEL_SIP_SMC_REJECTED or INTEL_SIP_SMC_STATUS_BUSY
+ * a1: mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR
+ * a2: physical address containing the process info
+ * for FCS certificate -- the data contains the certificate status
+ * for FCS cryption -- the data contains the actual data size FW processes
+ * a3: output data size
+ */
+#define INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED 30
+#define INTEL_SIP_SMC_SERVICE_COMPLETED \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED)
+
+/**
+ * Request INTEL_SIP_SMC_FIRMWARE_VERSION
+ *
+ * Sync call used to query the version of running firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FIRMWARE_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR
+ * a1 running firmware version
+ */
+#define INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION 31
+#define INTEL_SIP_SMC_FIRMWARE_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
+
+/**
+ * Request INTEL_SIP_SMC_SVC_VERSION
+ *
+ * Sync call used to query the SIP SMC API Version
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_SVC_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Major
+ * a2 Minor
+ */
+#define INTEL_SIP_SMC_SVC_FUNCID_VERSION 512
+#define INTEL_SIP_SMC_SVC_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_SVC_FUNCID_VERSION)
+
+/**
+ * SMC call protocol for FPGA Crypto Service (FCS)
+ * FUNCID starts from 90
+ */
+
+/**
+ * Request INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ *
+ * Sync call used to query the random number generated by the firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ * a1 the physical address for firmware to write generated random data
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 the physical address of generated random number
+ * a3 size
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER 90
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_CRYPTION
+ * Async call for data encryption and HMAC signature generation, or for
+ * data decryption and HMAC verification.
+ *
+ * Call INTEL_SIP_SMC_SERVICE_COMPLETED to get the output encrypted or
+ * decrypted data
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_CRYPTION
+ * a1 cryption mode (1 for encryption and 0 for decryption)
+ * a2 physical address which stores to be encrypted or decrypted data
+ * a3 input data size
+ * a4 physical address which will hold the encrypted or decrypted output data
+ * a5 output data size
+ * a6-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or
+ * INTEL_SIP_SMC_STATUS_REJECTED
+ * a1-3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_CRYPTION 91
+#define INTEL_SIP_SMC_FCS_CRYPTION \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CRYPTION)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * Async call for authentication service of HPS software
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * a1 the physical address of data block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_ERROR or
+ * INTEL_SIP_SMC_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST 92
+#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST)
+
+/**
+ * Request INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE
+ * Sync call to send a signed certificate
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SEND_CERTIFICATE
+ * a1 the physical address of CERTIFICATE block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FCS_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE 93
+#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * Sync call to dump all the fuses and key hashes
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * a1 the physical address for firmware to write structure of fuse and
+ * key hashes
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 physical address for the structure of fuse and key hashes
+ * a3 the size of structure
+ *
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA 94
+#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA)
+
+#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 59bc6e2af693..0c16037fd08d 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -6,7 +6,7 @@
#ifndef __STRATIX10_SVC_CLIENT_H
#define __STRATIX10_SVC_CLIENT_H
-/**
+/*
* Service layer driver supports client names
*
* fpga: for FPGA configuration
@@ -14,67 +14,62 @@
*/
#define SVC_CLIENT_FPGA "fpga"
#define SVC_CLIENT_RSU "rsu"
+#define SVC_CLIENT_FCS "fcs"
-/**
+/*
* Status of the sent command, in bit number
*
- * SVC_COMMAND_STATUS_RECONFIG_REQUEST_OK:
- * Secure firmware accepts the request of FPGA reconfiguration.
+ * SVC_STATUS_OK:
+ * Secure firmware accepts the request issued by one of service clients.
*
- * SVC_STATUS_RECONFIG_BUFFER_SUBMITTED:
- * Service client successfully submits FPGA configuration
- * data buffer to secure firmware.
+ * SVC_STATUS_BUFFER_SUBMITTED:
+ * Service client successfully submits data buffer to secure firmware.
*
- * SVC_COMMAND_STATUS_RECONFIG_BUFFER_DONE:
+ * SVC_STATUS_BUFFER_DONE:
* Secure firmware completes data process, ready to accept the
* next WRITE transaction.
*
- * SVC_COMMAND_STATUS_RECONFIG_COMPLETED:
- * Secure firmware completes FPGA configuration successfully, FPGA should
- * be in user mode.
- *
- * SVC_COMMAND_STATUS_RECONFIG_BUSY:
- * FPGA configuration is still in process.
- *
- * SVC_COMMAND_STATUS_RECONFIG_ERROR:
- * Error encountered during FPGA configuration.
+ * SVC_STATUS_COMPLETED:
+ * Secure firmware completes service request successfully. In case of
+ * FPGA configuration, FPGA should be in user mode.
*
- * SVC_STATUS_RSU_OK:
- * Secure firmware accepts the request of remote status update (RSU).
+ * SVC_COMMAND_STATUS_BUSY:
+ * Service request is still in process.
*
- * SVC_STATUS_RSU_ERROR:
- * Error encountered during remote system update.
+ * SVC_COMMAND_STATUS_ERROR:
+ * Error encountered during the process of the service request.
*
- * SVC_STATUS_RSU_NO_SUPPORT:
- * Secure firmware doesn't support RSU retry or notify feature.
+ * SVC_STATUS_NO_SUPPORT:
+ * Secure firmware doesn't support requested features such as RSU retry
+ * or RSU notify.
*/
-#define SVC_STATUS_RECONFIG_REQUEST_OK 0
-#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1
-#define SVC_STATUS_RECONFIG_BUFFER_DONE 2
-#define SVC_STATUS_RECONFIG_COMPLETED 3
-#define SVC_STATUS_RECONFIG_BUSY 4
-#define SVC_STATUS_RECONFIG_ERROR 5
-#define SVC_STATUS_RSU_OK 6
-#define SVC_STATUS_RSU_ERROR 7
-#define SVC_STATUS_RSU_NO_SUPPORT 8
+#define SVC_STATUS_OK 0
+#define SVC_STATUS_BUFFER_SUBMITTED 1
+#define SVC_STATUS_BUFFER_DONE 2
+#define SVC_STATUS_COMPLETED 3
+#define SVC_STATUS_BUSY 4
+#define SVC_STATUS_ERROR 5
+#define SVC_STATUS_NO_SUPPORT 6
+#define SVC_STATUS_INVALID_PARAM 7
-/**
+/*
* Flag bit for COMMAND_RECONFIG
*
* COMMAND_RECONFIG_FLAG_PARTIAL:
- * Set to FPGA configuration type (full or partial), the default
- * is full reconfig.
+ * Set to FPGA configuration type (full or partial).
*/
#define COMMAND_RECONFIG_FLAG_PARTIAL 0
-/**
+/*
* Timeout settings for service clients:
* timeout value used in Stratix10 FPGA manager driver.
* timeout value used in RSU driver
*/
-#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 100
-#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 240
+#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300
+#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720
#define SVC_RSU_REQUEST_TIMEOUT_MS 300
+#define SVC_FCS_REQUEST_TIMEOUT_MS 2000
+#define SVC_COMPLETED_TIMEOUT_MS 30000
struct stratix10_svc_chan;
@@ -84,55 +79,109 @@ struct stratix10_svc_chan;
* @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting
*
* @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status
- * is SVC_STATUS_RECONFIG_REQUEST_OK
+ * is SVC_STATUS_OK
*
* @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the
- * FPGA configuration, return status is SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
- * or SVC_STATUS_RECONFIG_ERROR
+ * FPGA configuration, return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR
*
* @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return
- * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
- * SVC_STATUS_RECONFIG_ERROR
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
*
* @COMMAND_RECONFIG_STATUS: check the status of the configuration, return
- * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
- * SVC_STATUS_RECONFIG_ERROR
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
*
* @COMMAND_RSU_STATUS: request remote system update boot log, return status
* is log data or SVC_STATUS_RSU_ERROR
*
* @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot,
- * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*
* @COMMAND_RSU_NOTIFY: report the status of hard processor system
- * software to firmware, return status is SVC_STATUS_RSU_OK or
- * SVC_STATUS_RSU_ERROR
+ * software to firmware, return status is SVC_STATUS_OK or
+ * SVC_STATUS_ERROR
*
* @COMMAND_RSU_RETRY: query firmware for the current image's retry counter,
- * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_MAX_RETRY: query firmware for the max retry value,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_POLL_SERVICE_STATUS: poll if the service request is complete,
+ * return statis is SVC_STATUS_OK, SVC_STATUS_ERROR or SVC_STATUS_BUSY
+ *
+ * @COMMAND_FIRMWARE_VERSION: query running firmware version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version,
+ * return status is SVC_STATUS_OK
+ *
+ * @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_REQUEST_SERVICE: request validation of image from firmware,
+ * return status is SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM
+ *
+ * @COMMAND_FCS_SEND_CERTIFICATE: send a certificate, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_GET_PROVISION_DATA: read the provisioning data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_ENCRYPTION: encrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_DECRYPTION: decrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_RANDOM_NUMBER_GEN: generate a random number, return status
+ * is SVC_STATUS_OK, SVC_STATUS_ERROR
*/
enum stratix10_svc_command_code {
+ /* for FPGA */
COMMAND_NOOP = 0,
COMMAND_RECONFIG,
COMMAND_RECONFIG_DATA_SUBMIT,
COMMAND_RECONFIG_DATA_CLAIM,
COMMAND_RECONFIG_STATUS,
- COMMAND_RSU_STATUS,
+ /* for RSU */
+ COMMAND_RSU_STATUS = 10,
COMMAND_RSU_UPDATE,
COMMAND_RSU_NOTIFY,
COMMAND_RSU_RETRY,
+ COMMAND_RSU_MAX_RETRY,
+ COMMAND_RSU_DCMF_VERSION,
+ COMMAND_RSU_DCMF_STATUS,
+ COMMAND_FIRMWARE_VERSION,
+ /* for FCS */
+ COMMAND_FCS_REQUEST_SERVICE = 20,
+ COMMAND_FCS_SEND_CERTIFICATE,
+ COMMAND_FCS_GET_PROVISION_DATA,
+ COMMAND_FCS_DATA_ENCRYPTION,
+ COMMAND_FCS_DATA_DECRYPTION,
+ COMMAND_FCS_RANDOM_NUMBER_GEN,
+ /* for general status poll */
+ COMMAND_POLL_SERVICE_STATUS = 40,
+ /* Non-mailbox SMC Call */
+ COMMAND_SMC_SVC_VERSION = 200,
};
/**
* struct stratix10_svc_client_msg - message sent by client to service
* @payload: starting address of data need be processed
- * @payload_length: data size in bytes
+ * @payload_length: to be processed data size in bytes
+ * @payload_output: starting address of processed data
+ * @payload_length_output: processed data size in bytes
* @command: service command
* @arg: args to be passed via registers and not physically mapped buffers
*/
struct stratix10_svc_client_msg {
void *payload;
size_t payload_length;
+ void *payload_output;
+ size_t payload_length_output;
enum stratix10_svc_command_code command;
u64 arg[3];
};
@@ -222,7 +271,7 @@ void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr);
int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg);
/**
- * intel_svc_done() - complete service request
+ * stratix10_svc_done() - complete service request
* @chan: service channel assigned to the client
*
* This function is used by service client to inform service layer that
diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
new file mode 100644
index 000000000000..28fd313340b8
--- /dev/null
+++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef MTK_ADSP_IPC_H
+#define MTK_ADSP_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#define MTK_ADSP_IPC_REQ 0
+#define MTK_ADSP_IPC_RSP 1
+#define MTK_ADSP_IPC_OP_REQ 0x1
+#define MTK_ADSP_IPC_OP_RSP 0x2
+
+enum {
+ MTK_ADSP_MBOX_REPLY,
+ MTK_ADSP_MBOX_REQUEST,
+ MTK_ADSP_MBOX_NUM,
+};
+
+struct mtk_adsp_ipc;
+
+struct mtk_adsp_ipc_ops {
+ void (*handle_reply)(struct mtk_adsp_ipc *ipc);
+ void (*handle_request)(struct mtk_adsp_ipc *ipc);
+};
+
+struct mtk_adsp_chan {
+ struct mtk_adsp_ipc *ipc;
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ char *name;
+ int idx;
+};
+
+struct mtk_adsp_ipc {
+ struct mtk_adsp_chan chans[MTK_ADSP_MBOX_NUM];
+ struct device *dev;
+ struct mtk_adsp_ipc_ops *ops;
+ void *private_data;
+};
+
+static inline void mtk_adsp_ipc_set_data(struct mtk_adsp_ipc *ipc, void *data)
+{
+ if (!ipc)
+ return;
+
+ ipc->private_data = data;
+}
+
+static inline void *mtk_adsp_ipc_get_data(struct mtk_adsp_ipc *ipc)
+{
+ if (!ipc)
+ return NULL;
+
+ return ipc->private_data;
+}
+
+int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t op);
+
+#endif /* MTK_ADSP_IPC_H */
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 6669e2a1d5fd..95b0da2326a9 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -12,6 +12,8 @@ enum {
SM_EFUSE_WRITE,
SM_EFUSE_USER_MAX,
SM_GET_CHIP_ID,
+ SM_A1_PWRC_SET,
+ SM_A1_PWRC_GET,
};
struct meson_sm_firmware;
diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h
index 2549a2db56aa..931b6c5c72df 100644
--- a/include/linux/firmware/trusted_foundations.h
+++ b/include/linux/firmware/trusted_foundations.h
@@ -32,6 +32,7 @@
#define TF_PM_MODE_LP1_NO_MC_CLK 2
#define TF_PM_MODE_LP2 3
#define TF_PM_MODE_LP2_NOFLUSH_L2 4
+#define TF_PM_MODE_NONE 5
struct trusted_foundations_platform_data {
unsigned int version_major;
@@ -70,12 +71,16 @@ static inline void register_trusted_foundations(
static inline void of_register_trusted_foundations(void)
{
+ struct device_node *np = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+
+ if (!np)
+ return;
+ of_node_put(np);
/*
* If we find the target should enable TF but does not support it,
* fail as the system won't be able to do much anyway
*/
- if (of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations"))
- register_trusted_foundations(NULL);
+ register_trusted_foundations(NULL);
}
static inline bool trusted_foundations_registered(void)
diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h
new file mode 100644
index 000000000000..82e8254b0f80
--- /dev/null
+++ b/include/linux/firmware/xlnx-event-manager.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _FIRMWARE_XLNX_EVENT_MANAGER_H_
+#define _FIRMWARE_XLNX_EVENT_MANAGER_H_
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define CB_MAX_PAYLOAD_SIZE (4U) /*In payload maximum 32bytes */
+
+/************************** Exported Function *****************************/
+
+typedef void (*event_cb_func_t)(const u32 *payload, void *data);
+
+#if IS_REACHABLE(CONFIG_XLNX_EVENT_MANAGER)
+int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data);
+
+int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data);
+#else
+static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+
+static inline int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _FIRMWARE_XLNX_EVENT_MANAGER_H_ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 2cd12ebd6826..76d2b3ebad84 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2019 Xilinx
+ * Copyright (C) 2014-2021 Xilinx
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -13,6 +13,8 @@
#ifndef __FIRMWARE_ZYNQMP_H__
#define __FIRMWARE_ZYNQMP_H__
+#include <linux/err.h>
+
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -27,6 +29,12 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
+
+/* PM API versions */
+#define PM_API_VERSION_2 2
+
+/* ATF only commands */
+#define TF_A_PM_REGISTER_SGI 0xa04
#define PM_GET_TRUSTZONE_VERSION 0xa03
#define PM_SET_SUSPEND_MODE 0xa02
#define GET_CALLBACK_DATA 0xa01
@@ -42,15 +50,17 @@
#define ZYNQMP_PM_MAX_QOS 100U
+#define GSS_NUM_REGS (4)
+
/* Node capabilities */
#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U
#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
#define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
-/* Feature check status */
-#define PM_FEATURE_INVALID -1
-#define PM_FEATURE_UNCHECKED 0
+/* Loader commands */
+#define PM_LOAD_PDI 0x701
+#define PDI_SRC_DDR 0xF
/*
* Firmware FPGA Manager flags
@@ -60,30 +70,55 @@
#define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U
#define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0)
+/*
+ * Node IDs for the Error Events.
+ */
+#define EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define EVENT_ERROR_PSM_ERR1 (0x28108000U)
+#define EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+
+enum pm_api_cb_id {
+ PM_INIT_SUSPEND_CB = 30,
+ PM_ACKNOWLEDGE_CB = 31,
+ PM_NOTIFY_CB = 32,
+};
+
enum pm_api_id {
PM_GET_API_VERSION = 1,
+ PM_REGISTER_NOTIFIER = 5,
+ PM_SYSTEM_SHUTDOWN = 12,
PM_REQUEST_NODE = 13,
- PM_RELEASE_NODE,
- PM_SET_REQUIREMENT,
+ PM_RELEASE_NODE = 14,
+ PM_SET_REQUIREMENT = 15,
PM_RESET_ASSERT = 17,
- PM_RESET_GET_STATUS,
+ PM_RESET_GET_STATUS = 18,
+ PM_MMIO_WRITE = 19,
+ PM_MMIO_READ = 20,
PM_PM_INIT_FINALIZE = 21,
- PM_FPGA_LOAD,
- PM_FPGA_GET_STATUS,
+ PM_FPGA_LOAD = 22,
+ PM_FPGA_GET_STATUS = 23,
PM_GET_CHIPID = 24,
+ PM_SECURE_SHA = 26,
+ PM_PINCTRL_REQUEST = 28,
+ PM_PINCTRL_RELEASE = 29,
+ PM_PINCTRL_GET_FUNCTION = 30,
+ PM_PINCTRL_SET_FUNCTION = 31,
+ PM_PINCTRL_CONFIG_PARAM_GET = 32,
+ PM_PINCTRL_CONFIG_PARAM_SET = 33,
PM_IOCTL = 34,
- PM_QUERY_DATA,
- PM_CLOCK_ENABLE,
- PM_CLOCK_DISABLE,
- PM_CLOCK_GETSTATE,
- PM_CLOCK_SETDIVIDER,
- PM_CLOCK_GETDIVIDER,
- PM_CLOCK_SETRATE,
- PM_CLOCK_GETRATE,
- PM_CLOCK_SETPARENT,
- PM_CLOCK_GETPARENT,
+ PM_QUERY_DATA = 35,
+ PM_CLOCK_ENABLE = 36,
+ PM_CLOCK_DISABLE = 37,
+ PM_CLOCK_GETSTATE = 38,
+ PM_CLOCK_SETDIVIDER = 39,
+ PM_CLOCK_GETDIVIDER = 40,
+ PM_CLOCK_SETRATE = 41,
+ PM_CLOCK_GETRATE = 42,
+ PM_CLOCK_SETPARENT = 43,
+ PM_CLOCK_GETPARENT = 44,
+ PM_SECURE_AES = 47,
PM_FEATURE_CHECK = 63,
- PM_API_MAX,
};
/* PMU-FW return status codes */
@@ -91,184 +126,304 @@ enum pm_ret_status {
XST_PM_SUCCESS = 0,
XST_PM_NO_FEATURE = 19,
XST_PM_INTERNAL = 2000,
- XST_PM_CONFLICT,
- XST_PM_NO_ACCESS,
- XST_PM_INVALID_NODE,
- XST_PM_DOUBLE_REQ,
- XST_PM_ABORT_SUSPEND,
+ XST_PM_CONFLICT = 2001,
+ XST_PM_NO_ACCESS = 2002,
+ XST_PM_INVALID_NODE = 2003,
+ XST_PM_DOUBLE_REQ = 2004,
+ XST_PM_ABORT_SUSPEND = 2005,
XST_PM_MULT_USER = 2008,
};
enum pm_ioctl_id {
+ IOCTL_SD_DLL_RESET = 6,
IOCTL_SET_SD_TAPDELAY = 7,
- IOCTL_SET_PLL_FRAC_MODE,
- IOCTL_GET_PLL_FRAC_MODE,
- IOCTL_SET_PLL_FRAC_DATA,
- IOCTL_GET_PLL_FRAC_DATA,
+ IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_GET_PLL_FRAC_MODE = 9,
+ IOCTL_SET_PLL_FRAC_DATA = 10,
+ IOCTL_GET_PLL_FRAC_DATA = 11,
+ IOCTL_WRITE_GGS = 12,
+ IOCTL_READ_GGS = 13,
+ IOCTL_WRITE_PGGS = 14,
+ IOCTL_READ_PGGS = 15,
+ /* Set healthy bit value */
+ IOCTL_SET_BOOT_HEALTH_STATUS = 17,
+ IOCTL_OSPI_MUX_SELECT = 21,
+ /* Register SGI to ATF */
+ IOCTL_REGISTER_SGI = 25,
+ /* Runtime feature configuration */
+ IOCTL_SET_FEATURE_CONFIG = 26,
+ IOCTL_GET_FEATURE_CONFIG = 27,
+ /* Dynamic SD/GEM configuration */
+ IOCTL_SET_SD_CONFIG = 30,
+ IOCTL_SET_GEM_CONFIG = 31,
};
enum pm_query_id {
- PM_QID_INVALID,
- PM_QID_CLOCK_GET_NAME,
- PM_QID_CLOCK_GET_TOPOLOGY,
- PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
- PM_QID_CLOCK_GET_PARENTS,
- PM_QID_CLOCK_GET_ATTRIBUTES,
+ PM_QID_INVALID = 0,
+ PM_QID_CLOCK_GET_NAME = 1,
+ PM_QID_CLOCK_GET_TOPOLOGY = 2,
+ PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3,
+ PM_QID_CLOCK_GET_PARENTS = 4,
+ PM_QID_CLOCK_GET_ATTRIBUTES = 5,
+ PM_QID_PINCTRL_GET_NUM_PINS = 6,
+ PM_QID_PINCTRL_GET_NUM_FUNCTIONS = 7,
+ PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS = 8,
+ PM_QID_PINCTRL_GET_FUNCTION_NAME = 9,
+ PM_QID_PINCTRL_GET_FUNCTION_GROUPS = 10,
+ PM_QID_PINCTRL_GET_PIN_GROUPS = 11,
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
- PM_QID_CLOCK_GET_MAX_DIVISOR,
+ PM_QID_CLOCK_GET_MAX_DIVISOR = 13,
};
enum zynqmp_pm_reset_action {
- PM_RESET_ACTION_RELEASE,
- PM_RESET_ACTION_ASSERT,
- PM_RESET_ACTION_PULSE,
+ PM_RESET_ACTION_RELEASE = 0,
+ PM_RESET_ACTION_ASSERT = 1,
+ PM_RESET_ACTION_PULSE = 2,
};
enum zynqmp_pm_reset {
ZYNQMP_PM_RESET_START = 1000,
ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START,
- ZYNQMP_PM_RESET_PCIE_BRIDGE,
- ZYNQMP_PM_RESET_PCIE_CTRL,
- ZYNQMP_PM_RESET_DP,
- ZYNQMP_PM_RESET_SWDT_CRF,
- ZYNQMP_PM_RESET_AFI_FM5,
- ZYNQMP_PM_RESET_AFI_FM4,
- ZYNQMP_PM_RESET_AFI_FM3,
- ZYNQMP_PM_RESET_AFI_FM2,
- ZYNQMP_PM_RESET_AFI_FM1,
- ZYNQMP_PM_RESET_AFI_FM0,
- ZYNQMP_PM_RESET_GDMA,
- ZYNQMP_PM_RESET_GPU_PP1,
- ZYNQMP_PM_RESET_GPU_PP0,
- ZYNQMP_PM_RESET_GPU,
- ZYNQMP_PM_RESET_GT,
- ZYNQMP_PM_RESET_SATA,
- ZYNQMP_PM_RESET_ACPU3_PWRON,
- ZYNQMP_PM_RESET_ACPU2_PWRON,
- ZYNQMP_PM_RESET_ACPU1_PWRON,
- ZYNQMP_PM_RESET_ACPU0_PWRON,
- ZYNQMP_PM_RESET_APU_L2,
- ZYNQMP_PM_RESET_ACPU3,
- ZYNQMP_PM_RESET_ACPU2,
- ZYNQMP_PM_RESET_ACPU1,
- ZYNQMP_PM_RESET_ACPU0,
- ZYNQMP_PM_RESET_DDR,
- ZYNQMP_PM_RESET_APM_FPD,
- ZYNQMP_PM_RESET_SOFT,
- ZYNQMP_PM_RESET_GEM0,
- ZYNQMP_PM_RESET_GEM1,
- ZYNQMP_PM_RESET_GEM2,
- ZYNQMP_PM_RESET_GEM3,
- ZYNQMP_PM_RESET_QSPI,
- ZYNQMP_PM_RESET_UART0,
- ZYNQMP_PM_RESET_UART1,
- ZYNQMP_PM_RESET_SPI0,
- ZYNQMP_PM_RESET_SPI1,
- ZYNQMP_PM_RESET_SDIO0,
- ZYNQMP_PM_RESET_SDIO1,
- ZYNQMP_PM_RESET_CAN0,
- ZYNQMP_PM_RESET_CAN1,
- ZYNQMP_PM_RESET_I2C0,
- ZYNQMP_PM_RESET_I2C1,
- ZYNQMP_PM_RESET_TTC0,
- ZYNQMP_PM_RESET_TTC1,
- ZYNQMP_PM_RESET_TTC2,
- ZYNQMP_PM_RESET_TTC3,
- ZYNQMP_PM_RESET_SWDT_CRL,
- ZYNQMP_PM_RESET_NAND,
- ZYNQMP_PM_RESET_ADMA,
- ZYNQMP_PM_RESET_GPIO,
- ZYNQMP_PM_RESET_IOU_CC,
- ZYNQMP_PM_RESET_TIMESTAMP,
- ZYNQMP_PM_RESET_RPU_R50,
- ZYNQMP_PM_RESET_RPU_R51,
- ZYNQMP_PM_RESET_RPU_AMBA,
- ZYNQMP_PM_RESET_OCM,
- ZYNQMP_PM_RESET_RPU_PGE,
- ZYNQMP_PM_RESET_USB0_CORERESET,
- ZYNQMP_PM_RESET_USB1_CORERESET,
- ZYNQMP_PM_RESET_USB0_HIBERRESET,
- ZYNQMP_PM_RESET_USB1_HIBERRESET,
- ZYNQMP_PM_RESET_USB0_APB,
- ZYNQMP_PM_RESET_USB1_APB,
- ZYNQMP_PM_RESET_IPI,
- ZYNQMP_PM_RESET_APM_LPD,
- ZYNQMP_PM_RESET_RTC,
- ZYNQMP_PM_RESET_SYSMON,
- ZYNQMP_PM_RESET_AFI_FM6,
- ZYNQMP_PM_RESET_LPD_SWDT,
- ZYNQMP_PM_RESET_FPD,
- ZYNQMP_PM_RESET_RPU_DBG1,
- ZYNQMP_PM_RESET_RPU_DBG0,
- ZYNQMP_PM_RESET_DBG_LPD,
- ZYNQMP_PM_RESET_DBG_FPD,
- ZYNQMP_PM_RESET_APLL,
- ZYNQMP_PM_RESET_DPLL,
- ZYNQMP_PM_RESET_VPLL,
- ZYNQMP_PM_RESET_IOPLL,
- ZYNQMP_PM_RESET_RPLL,
- ZYNQMP_PM_RESET_GPO3_PL_0,
- ZYNQMP_PM_RESET_GPO3_PL_1,
- ZYNQMP_PM_RESET_GPO3_PL_2,
- ZYNQMP_PM_RESET_GPO3_PL_3,
- ZYNQMP_PM_RESET_GPO3_PL_4,
- ZYNQMP_PM_RESET_GPO3_PL_5,
- ZYNQMP_PM_RESET_GPO3_PL_6,
- ZYNQMP_PM_RESET_GPO3_PL_7,
- ZYNQMP_PM_RESET_GPO3_PL_8,
- ZYNQMP_PM_RESET_GPO3_PL_9,
- ZYNQMP_PM_RESET_GPO3_PL_10,
- ZYNQMP_PM_RESET_GPO3_PL_11,
- ZYNQMP_PM_RESET_GPO3_PL_12,
- ZYNQMP_PM_RESET_GPO3_PL_13,
- ZYNQMP_PM_RESET_GPO3_PL_14,
- ZYNQMP_PM_RESET_GPO3_PL_15,
- ZYNQMP_PM_RESET_GPO3_PL_16,
- ZYNQMP_PM_RESET_GPO3_PL_17,
- ZYNQMP_PM_RESET_GPO3_PL_18,
- ZYNQMP_PM_RESET_GPO3_PL_19,
- ZYNQMP_PM_RESET_GPO3_PL_20,
- ZYNQMP_PM_RESET_GPO3_PL_21,
- ZYNQMP_PM_RESET_GPO3_PL_22,
- ZYNQMP_PM_RESET_GPO3_PL_23,
- ZYNQMP_PM_RESET_GPO3_PL_24,
- ZYNQMP_PM_RESET_GPO3_PL_25,
- ZYNQMP_PM_RESET_GPO3_PL_26,
- ZYNQMP_PM_RESET_GPO3_PL_27,
- ZYNQMP_PM_RESET_GPO3_PL_28,
- ZYNQMP_PM_RESET_GPO3_PL_29,
- ZYNQMP_PM_RESET_GPO3_PL_30,
- ZYNQMP_PM_RESET_GPO3_PL_31,
- ZYNQMP_PM_RESET_RPU_LS,
- ZYNQMP_PM_RESET_PS_ONLY,
- ZYNQMP_PM_RESET_PL,
- ZYNQMP_PM_RESET_PS_PL0,
- ZYNQMP_PM_RESET_PS_PL1,
- ZYNQMP_PM_RESET_PS_PL2,
- ZYNQMP_PM_RESET_PS_PL3,
+ ZYNQMP_PM_RESET_PCIE_BRIDGE = 1001,
+ ZYNQMP_PM_RESET_PCIE_CTRL = 1002,
+ ZYNQMP_PM_RESET_DP = 1003,
+ ZYNQMP_PM_RESET_SWDT_CRF = 1004,
+ ZYNQMP_PM_RESET_AFI_FM5 = 1005,
+ ZYNQMP_PM_RESET_AFI_FM4 = 1006,
+ ZYNQMP_PM_RESET_AFI_FM3 = 1007,
+ ZYNQMP_PM_RESET_AFI_FM2 = 1008,
+ ZYNQMP_PM_RESET_AFI_FM1 = 1009,
+ ZYNQMP_PM_RESET_AFI_FM0 = 1010,
+ ZYNQMP_PM_RESET_GDMA = 1011,
+ ZYNQMP_PM_RESET_GPU_PP1 = 1012,
+ ZYNQMP_PM_RESET_GPU_PP0 = 1013,
+ ZYNQMP_PM_RESET_GPU = 1014,
+ ZYNQMP_PM_RESET_GT = 1015,
+ ZYNQMP_PM_RESET_SATA = 1016,
+ ZYNQMP_PM_RESET_ACPU3_PWRON = 1017,
+ ZYNQMP_PM_RESET_ACPU2_PWRON = 1018,
+ ZYNQMP_PM_RESET_ACPU1_PWRON = 1019,
+ ZYNQMP_PM_RESET_ACPU0_PWRON = 1020,
+ ZYNQMP_PM_RESET_APU_L2 = 1021,
+ ZYNQMP_PM_RESET_ACPU3 = 1022,
+ ZYNQMP_PM_RESET_ACPU2 = 1023,
+ ZYNQMP_PM_RESET_ACPU1 = 1024,
+ ZYNQMP_PM_RESET_ACPU0 = 1025,
+ ZYNQMP_PM_RESET_DDR = 1026,
+ ZYNQMP_PM_RESET_APM_FPD = 1027,
+ ZYNQMP_PM_RESET_SOFT = 1028,
+ ZYNQMP_PM_RESET_GEM0 = 1029,
+ ZYNQMP_PM_RESET_GEM1 = 1030,
+ ZYNQMP_PM_RESET_GEM2 = 1031,
+ ZYNQMP_PM_RESET_GEM3 = 1032,
+ ZYNQMP_PM_RESET_QSPI = 1033,
+ ZYNQMP_PM_RESET_UART0 = 1034,
+ ZYNQMP_PM_RESET_UART1 = 1035,
+ ZYNQMP_PM_RESET_SPI0 = 1036,
+ ZYNQMP_PM_RESET_SPI1 = 1037,
+ ZYNQMP_PM_RESET_SDIO0 = 1038,
+ ZYNQMP_PM_RESET_SDIO1 = 1039,
+ ZYNQMP_PM_RESET_CAN0 = 1040,
+ ZYNQMP_PM_RESET_CAN1 = 1041,
+ ZYNQMP_PM_RESET_I2C0 = 1042,
+ ZYNQMP_PM_RESET_I2C1 = 1043,
+ ZYNQMP_PM_RESET_TTC0 = 1044,
+ ZYNQMP_PM_RESET_TTC1 = 1045,
+ ZYNQMP_PM_RESET_TTC2 = 1046,
+ ZYNQMP_PM_RESET_TTC3 = 1047,
+ ZYNQMP_PM_RESET_SWDT_CRL = 1048,
+ ZYNQMP_PM_RESET_NAND = 1049,
+ ZYNQMP_PM_RESET_ADMA = 1050,
+ ZYNQMP_PM_RESET_GPIO = 1051,
+ ZYNQMP_PM_RESET_IOU_CC = 1052,
+ ZYNQMP_PM_RESET_TIMESTAMP = 1053,
+ ZYNQMP_PM_RESET_RPU_R50 = 1054,
+ ZYNQMP_PM_RESET_RPU_R51 = 1055,
+ ZYNQMP_PM_RESET_RPU_AMBA = 1056,
+ ZYNQMP_PM_RESET_OCM = 1057,
+ ZYNQMP_PM_RESET_RPU_PGE = 1058,
+ ZYNQMP_PM_RESET_USB0_CORERESET = 1059,
+ ZYNQMP_PM_RESET_USB1_CORERESET = 1060,
+ ZYNQMP_PM_RESET_USB0_HIBERRESET = 1061,
+ ZYNQMP_PM_RESET_USB1_HIBERRESET = 1062,
+ ZYNQMP_PM_RESET_USB0_APB = 1063,
+ ZYNQMP_PM_RESET_USB1_APB = 1064,
+ ZYNQMP_PM_RESET_IPI = 1065,
+ ZYNQMP_PM_RESET_APM_LPD = 1066,
+ ZYNQMP_PM_RESET_RTC = 1067,
+ ZYNQMP_PM_RESET_SYSMON = 1068,
+ ZYNQMP_PM_RESET_AFI_FM6 = 1069,
+ ZYNQMP_PM_RESET_LPD_SWDT = 1070,
+ ZYNQMP_PM_RESET_FPD = 1071,
+ ZYNQMP_PM_RESET_RPU_DBG1 = 1072,
+ ZYNQMP_PM_RESET_RPU_DBG0 = 1073,
+ ZYNQMP_PM_RESET_DBG_LPD = 1074,
+ ZYNQMP_PM_RESET_DBG_FPD = 1075,
+ ZYNQMP_PM_RESET_APLL = 1076,
+ ZYNQMP_PM_RESET_DPLL = 1077,
+ ZYNQMP_PM_RESET_VPLL = 1078,
+ ZYNQMP_PM_RESET_IOPLL = 1079,
+ ZYNQMP_PM_RESET_RPLL = 1080,
+ ZYNQMP_PM_RESET_GPO3_PL_0 = 1081,
+ ZYNQMP_PM_RESET_GPO3_PL_1 = 1082,
+ ZYNQMP_PM_RESET_GPO3_PL_2 = 1083,
+ ZYNQMP_PM_RESET_GPO3_PL_3 = 1084,
+ ZYNQMP_PM_RESET_GPO3_PL_4 = 1085,
+ ZYNQMP_PM_RESET_GPO3_PL_5 = 1086,
+ ZYNQMP_PM_RESET_GPO3_PL_6 = 1087,
+ ZYNQMP_PM_RESET_GPO3_PL_7 = 1088,
+ ZYNQMP_PM_RESET_GPO3_PL_8 = 1089,
+ ZYNQMP_PM_RESET_GPO3_PL_9 = 1090,
+ ZYNQMP_PM_RESET_GPO3_PL_10 = 1091,
+ ZYNQMP_PM_RESET_GPO3_PL_11 = 1092,
+ ZYNQMP_PM_RESET_GPO3_PL_12 = 1093,
+ ZYNQMP_PM_RESET_GPO3_PL_13 = 1094,
+ ZYNQMP_PM_RESET_GPO3_PL_14 = 1095,
+ ZYNQMP_PM_RESET_GPO3_PL_15 = 1096,
+ ZYNQMP_PM_RESET_GPO3_PL_16 = 1097,
+ ZYNQMP_PM_RESET_GPO3_PL_17 = 1098,
+ ZYNQMP_PM_RESET_GPO3_PL_18 = 1099,
+ ZYNQMP_PM_RESET_GPO3_PL_19 = 1100,
+ ZYNQMP_PM_RESET_GPO3_PL_20 = 1101,
+ ZYNQMP_PM_RESET_GPO3_PL_21 = 1102,
+ ZYNQMP_PM_RESET_GPO3_PL_22 = 1103,
+ ZYNQMP_PM_RESET_GPO3_PL_23 = 1104,
+ ZYNQMP_PM_RESET_GPO3_PL_24 = 1105,
+ ZYNQMP_PM_RESET_GPO3_PL_25 = 1106,
+ ZYNQMP_PM_RESET_GPO3_PL_26 = 1107,
+ ZYNQMP_PM_RESET_GPO3_PL_27 = 1108,
+ ZYNQMP_PM_RESET_GPO3_PL_28 = 1109,
+ ZYNQMP_PM_RESET_GPO3_PL_29 = 1110,
+ ZYNQMP_PM_RESET_GPO3_PL_30 = 1111,
+ ZYNQMP_PM_RESET_GPO3_PL_31 = 1112,
+ ZYNQMP_PM_RESET_RPU_LS = 1113,
+ ZYNQMP_PM_RESET_PS_ONLY = 1114,
+ ZYNQMP_PM_RESET_PL = 1115,
+ ZYNQMP_PM_RESET_PS_PL0 = 1116,
+ ZYNQMP_PM_RESET_PS_PL1 = 1117,
+ ZYNQMP_PM_RESET_PS_PL2 = 1118,
+ ZYNQMP_PM_RESET_PS_PL3 = 1119,
ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
};
enum zynqmp_pm_suspend_reason {
SUSPEND_POWER_REQUEST = 201,
- SUSPEND_ALERT,
- SUSPEND_SYSTEM_SHUTDOWN,
+ SUSPEND_ALERT = 202,
+ SUSPEND_SYSTEM_SHUTDOWN = 203,
};
enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NO = 1,
- ZYNQMP_PM_REQUEST_ACK_BLOCKING,
- ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING = 2,
+ ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING = 3,
};
enum pm_node_id {
NODE_SD_0 = 39,
- NODE_SD_1,
+ NODE_SD_1 = 40,
};
enum tap_delay_type {
PM_TAPDELAY_INPUT = 0,
- PM_TAPDELAY_OUTPUT,
+ PM_TAPDELAY_OUTPUT = 1,
+};
+
+enum dll_reset_type {
+ PM_DLL_RESET_ASSERT = 0,
+ PM_DLL_RESET_RELEASE = 1,
+ PM_DLL_RESET_PULSE = 2,
+};
+
+enum pm_pinctrl_config_param {
+ PM_PINCTRL_CONFIG_SLEW_RATE = 0,
+ PM_PINCTRL_CONFIG_BIAS_STATUS = 1,
+ PM_PINCTRL_CONFIG_PULL_CTRL = 2,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS = 3,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH = 4,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS = 5,
+ PM_PINCTRL_CONFIG_TRI_STATE = 6,
+ PM_PINCTRL_CONFIG_MAX = 7,
+};
+
+enum pm_pinctrl_slew_rate {
+ PM_PINCTRL_SLEW_RATE_FAST = 0,
+ PM_PINCTRL_SLEW_RATE_SLOW = 1,
+};
+
+enum pm_pinctrl_bias_status {
+ PM_PINCTRL_BIAS_DISABLE = 0,
+ PM_PINCTRL_BIAS_ENABLE = 1,
+};
+
+enum pm_pinctrl_pull_ctrl {
+ PM_PINCTRL_BIAS_PULL_DOWN = 0,
+ PM_PINCTRL_BIAS_PULL_UP = 1,
+};
+
+enum pm_pinctrl_schmitt_cmos {
+ PM_PINCTRL_INPUT_TYPE_CMOS = 0,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT = 1,
+};
+
+enum pm_pinctrl_drive_strength {
+ PM_PINCTRL_DRIVE_STRENGTH_2MA = 0,
+ PM_PINCTRL_DRIVE_STRENGTH_4MA = 1,
+ PM_PINCTRL_DRIVE_STRENGTH_8MA = 2,
+ PM_PINCTRL_DRIVE_STRENGTH_12MA = 3,
+};
+
+enum pm_pinctrl_tri_state {
+ PM_PINCTRL_TRI_STATE_DISABLE = 0,
+ PM_PINCTRL_TRI_STATE_ENABLE = 1,
+};
+
+enum zynqmp_pm_shutdown_type {
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY = 2,
+};
+
+enum zynqmp_pm_shutdown_subtype {
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM = 0,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY = 1,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2,
+};
+
+enum ospi_mux_select_type {
+ PM_OSPI_MUX_SEL_DMA = 0,
+ PM_OSPI_MUX_SEL_LINEAR = 1,
+};
+
+enum pm_feature_config_id {
+ PM_FEATURE_INVALID = 0,
+ PM_FEATURE_OVERTEMP_STATUS = 1,
+ PM_FEATURE_OVERTEMP_VALUE = 2,
+ PM_FEATURE_EXTWDT_STATUS = 3,
+ PM_FEATURE_EXTWDT_VALUE = 4,
+};
+
+/**
+ * enum pm_sd_config_type - PM SD configuration.
+ * @SD_CONFIG_EMMC_SEL: To set SD_EMMC_SEL in CTRL_REG_SD and SD_SLOTTYPE
+ * @SD_CONFIG_BASECLK: To set SD_BASECLK in SD_CONFIG_REG1
+ * @SD_CONFIG_8BIT: To set SD_8BIT in SD_CONFIG_REG2
+ * @SD_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_sd_config_type {
+ SD_CONFIG_EMMC_SEL = 1,
+ SD_CONFIG_BASECLK = 2,
+ SD_CONFIG_8BIT = 3,
+ SD_CONFIG_FIXED = 4,
+};
+
+/**
+ * enum pm_gem_config_type - PM GEM configuration.
+ * @GEM_CONFIG_SGMII_MODE: To set GEM_SGMII_MODE in GEM_CLK_CTRL register
+ * @GEM_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_gem_config_type {
+ GEM_CONFIG_SGMII_MODE = 1,
+ GEM_CONFIG_FIXED = 2,
};
/**
@@ -285,48 +440,356 @@ struct zynqmp_pm_query_data {
u32 arg3;
};
-struct zynqmp_eemi_ops {
- int (*get_api_version)(u32 *version);
- int (*get_chipid)(u32 *idcode, u32 *version);
- int (*fpga_load)(const u64 address, const u32 size, const u32 flags);
- int (*fpga_get_status)(u32 *value);
- int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
- int (*clock_enable)(u32 clock_id);
- int (*clock_disable)(u32 clock_id);
- int (*clock_getstate)(u32 clock_id, u32 *state);
- int (*clock_setdivider)(u32 clock_id, u32 divider);
- int (*clock_getdivider)(u32 clock_id, u32 *divider);
- int (*clock_setrate)(u32 clock_id, u64 rate);
- int (*clock_getrate)(u32 clock_id, u64 *rate);
- int (*clock_setparent)(u32 clock_id, u32 parent_id);
- int (*clock_getparent)(u32 clock_id, u32 *parent_id);
- int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out);
- int (*reset_assert)(const enum zynqmp_pm_reset reset,
- const enum zynqmp_pm_reset_action assert_flag);
- int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status);
- int (*init_finalize)(void);
- int (*set_suspend_mode)(u32 mode);
- int (*request_node)(const u32 node,
- const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack);
- int (*release_node)(const u32 node);
- int (*set_requirement)(const u32 node,
- const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack);
-};
-
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 *ret_payload);
-#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP)
-const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_get_api_version(u32 *version);
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
+int zynqmp_pm_clock_enable(u32 clock_id);
+int zynqmp_pm_clock_disable(u32 clock_id);
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate);
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate);
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode);
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data);
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
+int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select);
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag);
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
+unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode);
+int zynqmp_pm_bootmode_write(u32 ps_mode);
+int zynqmp_pm_init_finalize(void);
+int zynqmp_pm_set_suspend_mode(u32 mode);
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_release_node(const u32 node);
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
+int zynqmp_pm_fpga_get_status(u32 *value);
+int zynqmp_pm_write_ggs(u32 index, u32 value);
+int zynqmp_pm_read_ggs(u32 index, u32 *value);
+int zynqmp_pm_write_pggs(u32 index, u32 value);
+int zynqmp_pm_read_pggs(u32 index, u32 *value);
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
+int zynqmp_pm_set_boot_health_status(u32 value);
+int zynqmp_pm_pinctrl_request(const u32 pin);
+int zynqmp_pm_pinctrl_release(const u32 pin);
+int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id);
+int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id);
+int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value);
+int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value);
+int zynqmp_pm_load_pdi(const u32 src, const u64 address);
+int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable);
+int zynqmp_pm_feature(const u32 api_id);
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
+int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
+int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
+int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset);
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value);
#else
-static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
+static inline int zynqmp_pm_get_api_version(u32 *version)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
+ u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_enable(u32 clock_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_disable(u32 clock_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
+
+static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
+ u32 *status)
+{
+ return -ENODEV;
+}
+
+static inline unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_bootmode_write(u32 ps_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_init_finalize(void)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_release_node(const u32 node)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_requirement(const u32 node,
+ const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_feature(const u32 api_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_feature_config(enum pm_feature_config_id id,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
+ u32 *payload)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sd_config(u32 node,
+ enum pm_sd_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_gem_config(u32 node,
+ enum pm_gem_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
index 8396013785ef..281cb4f83dbe 100644
--- a/include/linux/fixp-arith.h
+++ b/include/linux/fixp-arith.h
@@ -141,4 +141,23 @@ static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
#define fixp_cos32_rad(rad, twopi) \
fixp_sin32_rad(rad + twopi / 4, twopi)
+/**
+ * fixp_linear_interpolate() - interpolates a value from two known points
+ *
+ * @x0: x value of point 0
+ * @y0: y value of point 0
+ * @x1: x value of point 1
+ * @y1: y value of point 1
+ * @x: the linear interpolant
+ */
+static inline int fixp_linear_interpolate(int x0, int y0, int x1, int y1, int x)
+{
+ if (y0 == y1 || x == x0)
+ return y0;
+ if (x1 == x0 || x == x1)
+ return y1;
+
+ return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
+}
+
#endif
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index c12df59d3f5f..3e378b1fb0bc 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -83,9 +83,10 @@ struct fprop_local_percpu {
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
-void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
- int max_frac);
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr);
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);
@@ -96,7 +97,7 @@ void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
unsigned long flags;
local_irq_save(flags);
- __fprop_inc_percpu(p, pl);
+ __fprop_add_percpu(p, pl, 1);
local_irq_restore(flags);
}
diff --git a/include/linux/font.h b/include/linux/font.h
index 51b91c8b69d5..abf1442ce719 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -16,7 +16,8 @@
struct font_desc {
int idx;
const char *name;
- int width, height;
+ unsigned int width, height;
+ unsigned int charcount;
const void *data;
int pref;
};
@@ -33,6 +34,7 @@ struct font_desc {
#define MINI4x6_IDX 9
#define FONT6x10_IDX 10
#define TER16x32_IDX 11
+#define FONT6x8_IDX 12
extern const struct font_desc font_vga_8x8,
font_vga_8x16,
@@ -45,7 +47,8 @@ extern const struct font_desc font_vga_8x8,
font_acorn_8x8,
font_mini_4x6,
font_6x10,
- font_ter_16x32;
+ font_ter_16x32,
+ font_6x8;
/* Find a font with a specific name */
@@ -59,4 +62,17 @@ extern const struct font_desc *get_default_font(int xres, int yres,
/* Max. length for the name of a predefined font */
#define MAX_FONT_NAME 32
+/* Extra word getters */
+#define REFCOUNT(fd) (((int *)(fd))[-1])
+#define FNTSIZE(fd) (((int *)(fd))[-2])
+#define FNTCHARCNT(fd) (((int *)(fd))[-3])
+#define FNTSUM(fd) (((int *)(fd))[-4])
+
+#define FONT_EXTRA_WORDS 4
+
+struct font_data {
+ unsigned int extra[FONT_EXTRA_WORDS];
+ const unsigned char data[];
+} __packed;
+
#endif /* _VIDEO_FONT_H */
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
new file mode 100644
index 000000000000..1067a8450826
--- /dev/null
+++ b/include/linux/fortify-string.h
@@ -0,0 +1,624 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FORTIFY_STRING_H_
+#define _LINUX_FORTIFY_STRING_H_
+
+#include <linux/bug.h>
+#include <linux/const.h>
+#include <linux/limits.h>
+
+#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
+#define __RENAME(x) __asm__(#x)
+
+void fortify_panic(const char *name) __noreturn __cold;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)");
+void __read_overflow2_field(size_t avail, size_t wanted) __compiletime_warning("detected read beyond size of field (2nd parameter); maybe use struct_group()?");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)");
+void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("detected write beyond size of field (1st parameter); maybe use struct_group()?");
+
+#define __compiletime_strlen(p) \
+({ \
+ unsigned char *__p = (unsigned char *)(p); \
+ size_t __ret = SIZE_MAX; \
+ size_t __p_size = __member_size(p); \
+ if (__p_size != SIZE_MAX && \
+ __builtin_constant_p(*__p)) { \
+ size_t __p_len = __p_size - 1; \
+ if (__builtin_constant_p(__p[__p_len]) && \
+ __p[__p_len] == '\0') \
+ __ret = __builtin_strlen(__p); \
+ } \
+ __ret; \
+})
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+#else
+
+#if defined(__SANITIZE_MEMORY__)
+/*
+ * For KMSAN builds all memcpy/memset/memmove calls should be replaced by the
+ * corresponding __msan_XXX functions.
+ */
+#include <linux/kmsan_string.h>
+#define __underlying_memcpy __msan_memcpy
+#define __underlying_memmove __msan_memmove
+#define __underlying_memset __msan_memset
+#else
+#define __underlying_memcpy __builtin_memcpy
+#define __underlying_memmove __builtin_memmove
+#define __underlying_memset __builtin_memset
+#endif
+
+#define __underlying_memchr __builtin_memchr
+#define __underlying_memcmp __builtin_memcmp
+#define __underlying_strcat __builtin_strcat
+#define __underlying_strcpy __builtin_strcpy
+#define __underlying_strlen __builtin_strlen
+#define __underlying_strncat __builtin_strncat
+#define __underlying_strncpy __builtin_strncpy
+#endif
+
+/**
+ * unsafe_memcpy - memcpy implementation with no FORTIFY bounds checking
+ *
+ * @dst: Destination memory address to write to
+ * @src: Source memory address to read from
+ * @bytes: How many bytes to write to @dst from @src
+ * @justification: Free-form text or comment describing why the use is needed
+ *
+ * This should be used for corner cases where the compiler cannot do the
+ * right thing, or during transitions between APIs, etc. It should be used
+ * very rarely, and includes a place for justification detailing where bounds
+ * checking has happened, and why existing solutions cannot be employed.
+ */
+#define unsafe_memcpy(dst, src, bytes, justification) \
+ __underlying_memcpy(dst, src, bytes)
+
+/*
+ * Clang's use of __builtin_*object_size() within inlines needs hinting via
+ * __pass_*object_size(). The preference is to only ever use type 1 (member
+ * size, rather than struct size), but there remain some stragglers using
+ * type 0 that will be converted in the future.
+ */
+#define POS __pass_object_size(1)
+#define POS0 __pass_object_size(0)
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+
+#define __compiletime_lessthan(bounds, length) ( \
+ __builtin_constant_p((bounds) < (length)) && \
+ (bounds) < (length) \
+)
+
+/**
+ * strncpy - Copy a string to memory with non-guaranteed NUL padding
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ * @size: bytes to write at @p
+ *
+ * If strlen(@q) >= @size, the copy of @q will stop after @size bytes,
+ * and @p will NOT be NUL-terminated
+ *
+ * If strlen(@q) < @size, following the copy of @q, trailing NUL bytes
+ * will be written to @p until @size total bytes have been written.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * over-reads of @q, it cannot defend against writing unterminated
+ * results to @p. Using strncpy() remains ambiguous and fragile.
+ * Instead, please choose an alternative, so that the expectation
+ * of @p's contents is unambiguous:
+ *
+ * +--------------------+-----------------+------------+
+ * | @p needs to be: | padded to @size | not padded |
+ * +====================+=================+============+
+ * | NUL-terminated | strscpy_pad() | strscpy() |
+ * +--------------------+-----------------+------------+
+ * | not NUL-terminated | strtomem_pad() | strtomem() |
+ * +--------------------+-----------------+------------+
+ *
+ * Note strscpy*()'s differing return values for detecting truncation,
+ * and strtomem*()'s expectation that the destination is marked with
+ * __nonstring when it is a character array.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
+char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
+{
+ size_t p_size = __member_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __underlying_strncpy(p, q, size);
+}
+
+__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
+char *strcat(char * const POS p, const char *q)
+{
+ size_t p_size = __member_size(p);
+
+ if (p_size == SIZE_MAX)
+ return __underlying_strcat(p, q);
+ if (strlcat(p, q, p_size) >= p_size)
+ fortify_panic(__func__);
+ return p;
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
+{
+ size_t p_size = __member_size(p);
+ size_t p_len = __compiletime_strlen(p);
+ size_t ret;
+
+ /* We can take compile-time actions when maxlen is const. */
+ if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) {
+ /* If p is const, we can use its compile-time-known len. */
+ if (maxlen >= p_size)
+ return p_len;
+ }
+
+ /* Do not check characters beyond the end of p. */
+ ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+/*
+ * Defined after fortified strnlen to reuse it. However, it must still be
+ * possible for strlen() to be used on compile-time strings for use in
+ * static initializers (i.e. as a constant expression).
+ */
+#define strlen(p) \
+ __builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \
+ __builtin_strlen(p), __fortify_strlen(p))
+__FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
+__kernel_size_t __fortify_strlen(const char * const POS p)
+{
+ __kernel_size_t ret;
+ size_t p_size = __member_size(p);
+
+ /* Give up if we don't know how large p is. */
+ if (p_size == SIZE_MAX)
+ return __underlying_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+/* defined after fortified strlen to reuse it */
+extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
+__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
+{
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
+ size_t q_len; /* Full count of source string length. */
+ size_t len; /* Count of characters going into destination. */
+
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strlcpy(p, q, size);
+ q_len = strlen(q);
+ len = (q_len >= size) ? size - 1 : q_len;
+ if (__builtin_constant_p(size) && __builtin_constant_p(q_len) && size) {
+ /* Write size is always larger than destination. */
+ if (len >= p_size)
+ __write_overflow();
+ }
+ if (size) {
+ if (len >= p_size)
+ fortify_panic(__func__);
+ __underlying_memcpy(p, q, len);
+ p[len] = '\0';
+ }
+ return q_len;
+}
+
+/* defined after fortified strnlen to reuse it */
+extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
+__FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, size_t size)
+{
+ size_t len;
+ /* Use string size rather than possible enclosing struct size. */
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
+
+ /* If we cannot get size of p and q default to call strscpy. */
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strscpy(p, q, size);
+
+ /*
+ * If size can be known at compile time and is greater than
+ * p_size, generate a compile time write overflow error.
+ */
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+
+ /*
+ * This call protects from read overflow, because len will default to q
+ * length if it smaller than size.
+ */
+ len = strnlen(q, size);
+ /*
+ * If len equals size, we will copy only size bytes which leads to
+ * -E2BIG being returned.
+ * Otherwise we will copy len + 1 because of the final '\O'.
+ */
+ len = len == size ? size : len + 1;
+
+ /*
+ * Generate a runtime write overflow error if len is greater than
+ * p_size.
+ */
+ if (len > p_size)
+ fortify_panic(__func__);
+
+ /*
+ * We can now safely call vanilla strscpy because we are protected from:
+ * 1. Read overflow thanks to call to strnlen().
+ * 2. Write overflow thanks to above ifs.
+ */
+ return __real_strscpy(p, q, len);
+}
+
+/* defined after fortified strlen and strnlen to reuse them */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
+char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
+{
+ size_t p_len, copy_len;
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
+
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __underlying_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ if (p_size < p_len + copy_len + 1)
+ fortify_panic(__func__);
+ __underlying_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+
+__FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t p_size_field)
+{
+ if (__builtin_constant_p(size)) {
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
+ __write_overflow();
+
+ /* Warn when write size is larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
+ }
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if (p_size != SIZE_MAX && p_size < size)
+ fortify_panic("memset");
+}
+
+#define __fortify_memset_chk(p, c, size, p_size, p_size_field) ({ \
+ size_t __fortify_size = (size_t)(size); \
+ fortify_memset_chk(__fortify_size, p_size, p_size_field), \
+ __underlying_memset(p, c, __fortify_size); \
+})
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#ifndef CONFIG_KMSAN
+#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
+ __struct_size(p), __member_size(p))
+#endif
+
+/*
+ * To make sure the compiler can enforce protection against buffer overflows,
+ * memcpy(), memmove(), and memset() must not be used beyond individual
+ * struct members. If you need to copy across multiple members, please use
+ * struct_group() to create a named mirror of an anonymous struct union.
+ * (e.g. see struct sk_buff.) Read overflow checking is currently only
+ * done when a write overflow is also present, or when building with W=1.
+ *
+ * Mitigation coverage matrix
+ * Bounds checking at:
+ * +-------+-------+-------+-------+
+ * | Compile time | Run time |
+ * memcpy() argument sizes: | write | read | write | read |
+ * dest source length +-------+-------+-------+-------+
+ * memcpy(known, known, constant) | y | y | n/a | n/a |
+ * memcpy(known, unknown, constant) | y | n | n/a | V |
+ * memcpy(known, known, dynamic) | n | n | B | B |
+ * memcpy(known, unknown, dynamic) | n | n | B | V |
+ * memcpy(unknown, known, constant) | n | y | V | n/a |
+ * memcpy(unknown, unknown, constant) | n | n | V | V |
+ * memcpy(unknown, known, dynamic) | n | n | V | B |
+ * memcpy(unknown, unknown, dynamic) | n | n | V | V |
+ * +-------+-------+-------+-------+
+ *
+ * y = perform deterministic compile-time bounds checking
+ * n = cannot perform deterministic compile-time bounds checking
+ * n/a = no run-time bounds checking needed since compile-time deterministic
+ * B = can perform run-time bounds checking (currently unimplemented)
+ * V = vulnerable to run-time overflow (will need refactoring to solve)
+ *
+ */
+__FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t q_size,
+ const size_t p_size_field,
+ const size_t q_size_field,
+ const char *func)
+{
+ if (__builtin_constant_p(size)) {
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
+ __write_overflow();
+ if (__compiletime_lessthan(q_size_field, q_size) &&
+ __compiletime_lessthan(q_size, size))
+ __read_overflow2();
+
+ /* Warn when write size argument larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
+ /*
+ * Warn for source field over-read when building with W=1
+ * or when an over-write happened, so both can be fixed at
+ * the same time.
+ */
+ if ((IS_ENABLED(KBUILD_EXTRA_WARN1) ||
+ __compiletime_lessthan(p_size_field, size)) &&
+ __compiletime_lessthan(q_size_field, size))
+ __read_overflow2_field(q_size_field, size);
+ }
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if ((p_size != SIZE_MAX && p_size < size) ||
+ (q_size != SIZE_MAX && q_size < size))
+ fortify_panic(func);
+
+ /*
+ * Warn when writing beyond destination field size.
+ *
+ * We must ignore p_size_field == 0 for existing 0-element
+ * fake flexible arrays, until they are all converted to
+ * proper flexible arrays.
+ *
+ * The implementation of __builtin_*object_size() behaves
+ * like sizeof() when not directly referencing a flexible
+ * array member, which means there will be many bounds checks
+ * that will appear at run-time, without a way for them to be
+ * detected at compile-time (as can be done when the destination
+ * is specifically the flexible array member).
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
+ */
+ if (p_size_field != 0 && p_size_field != SIZE_MAX &&
+ p_size != p_size_field && p_size_field < size)
+ return true;
+
+ return false;
+}
+
+#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
+ p_size_field, q_size_field, op) ({ \
+ const size_t __fortify_size = (size_t)(size); \
+ const size_t __p_size = (p_size); \
+ const size_t __q_size = (q_size); \
+ const size_t __p_size_field = (p_size_field); \
+ const size_t __q_size_field = (q_size_field); \
+ WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \
+ __q_size, __p_size_field, \
+ __q_size_field, #op), \
+ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
+ __fortify_size, \
+ "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \
+ __p_size_field); \
+ __underlying_##op(p, q, __fortify_size); \
+})
+
+/*
+ * Notes about compile-time buffer size detection:
+ *
+ * With these types...
+ *
+ * struct middle {
+ * u16 a;
+ * u8 middle_buf[16];
+ * int b;
+ * };
+ * struct end {
+ * u16 a;
+ * u8 end_buf[16];
+ * };
+ * struct flex {
+ * int a;
+ * u8 flex_buf[];
+ * };
+ *
+ * void func(TYPE *ptr) { ... }
+ *
+ * Cases where destination size cannot be currently detected:
+ * - the size of ptr's object (seemingly by design, gcc & clang fail):
+ * __builtin_object_size(ptr, 1) == SIZE_MAX
+ * - the size of flexible arrays in ptr's obj (by design, dynamic size):
+ * __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX
+ * - the size of ANY array at the end of ptr's obj (gcc and clang bug):
+ * __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836
+ *
+ * Cases where destination size is currently detected:
+ * - the size of non-array members within ptr's object:
+ * __builtin_object_size(ptr->a, 1) == 2
+ * - the size of non-flexible-array in the middle of ptr's obj:
+ * __builtin_object_size(ptr->middle_buf, 1) == 16
+ *
+ */
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memcpy)
+#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memmove)
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
+{
+ size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
+int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
+{
+ size_t p_size = __struct_size(p);
+ size_t q_size = __struct_size(q);
+
+ if (__builtin_constant_p(size)) {
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (__compiletime_lessthan(q_size, size))
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __underlying_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
+void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
+{
+ size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __underlying_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
+{
+ size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
+__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp)
+{
+ size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_kmemdup(p, size, gfp);
+}
+
+/* Defined after fortified strlen to reuse it. */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
+char *strcpy(char * const POS p, const char * const POS q)
+{
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
+ size_t size;
+
+ /* If neither buffer size is known, immediately give up. */
+ if (__builtin_constant_p(p_size) &&
+ __builtin_constant_p(q_size) &&
+ p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __underlying_strcpy(p, q);
+ size = strlen(q) + 1;
+ /* Compile-time check for const size overflow. */
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+ /* Run-time check for dynamic size overflow. */
+ if (p_size < size)
+ fortify_panic(__func__);
+ __underlying_memcpy(p, q, size);
+ return p;
+}
+
+/* Don't use these outside the FORITFY_SOURCE implementation */
+#undef __underlying_memchr
+#undef __underlying_memcmp
+#undef __underlying_strcat
+#undef __underlying_strcpy
+#undef __underlying_strlen
+#undef __underlying_strncat
+#undef __underlying_strncpy
+
+#undef POS
+#undef POS0
+
+#endif /* _LINUX_FORTIFY_STRING_H_ */
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
index 7fc95d5c95bb..141ac3f251e6 100644
--- a/include/linux/fpga/adi-axi-common.h
+++ b/include/linux/fpga/adi-axi-common.h
@@ -11,9 +11,13 @@
#ifndef ADI_AXI_COMMON_H_
#define ADI_AXI_COMMON_H_
-#define ADI_AXI_REG_VERSION 0x0000
+#define ADI_AXI_REG_VERSION 0x0000
#define ADI_AXI_PCORE_VER(major, minor, patch) \
(((major) << 16) | ((minor) << 8) | (patch))
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
index 0b08ac20ab16..a6b4c07858cc 100644
--- a/include/linux/fpga/altera-pr-ip-core.h
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -13,6 +13,5 @@
#include <linux/io.h>
int alt_pr_register(struct device *dev, void __iomem *reg_base);
-void alt_pr_unregister(struct device *dev);
#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index 817600a32c93..223da48a6d18 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -11,7 +11,7 @@ struct fpga_bridge;
/**
* struct fpga_bridge_ops - ops for low level FPGA bridge drivers
* @enable_show: returns the FPGA bridge's status
- * @enable_set: set a FPGA bridge as enabled or disabled
+ * @enable_set: set an FPGA bridge as enabled or disabled
* @fpga_bridge_remove: set FPGA into a specific state during driver remove
* @groups: optional attribute groups.
*/
@@ -23,6 +23,23 @@ struct fpga_bridge_ops {
};
/**
+ * struct fpga_bridge_info - collection of parameters an FPGA Bridge
+ * @name: fpga bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: fpga bridge private data
+ *
+ * fpga_bridge_info contains parameters for the register function. These
+ * are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_bridge_info {
+ const char *name;
+ const struct fpga_bridge_ops *br_ops;
+ void *priv;
+};
+
+/**
* struct fpga_bridge - FPGA bridge structure
* @name: name of low level FPGA bridge
* @dev: FPGA bridge device
@@ -62,15 +79,10 @@ int of_fpga_bridge_get_to_list(struct device_node *np,
struct fpga_image_info *info,
struct list_head *bridge_list);
-struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops,
- void *priv);
-void fpga_bridge_free(struct fpga_bridge *br);
-int fpga_bridge_register(struct fpga_bridge *br);
+struct fpga_bridge *
+fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv);
void fpga_bridge_unregister(struct fpga_bridge *br);
-struct fpga_bridge
-*devm_fpga_bridge_create(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops, void *priv);
-
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index e8ca62b2cb5b..54f63459efd6 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -22,6 +22,8 @@ struct sg_table;
* @FPGA_MGR_STATE_RESET: FPGA in reset state
* @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress
* @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed
+ * @FPGA_MGR_STATE_PARSE_HEADER: parse FPGA image header
+ * @FPGA_MGR_STATE_PARSE_HEADER_ERR: Error during PARSE_HEADER stage
* @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming
* @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
* @FPGA_MGR_STATE_WRITE: writing image to FPGA
@@ -41,7 +43,9 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_FIRMWARE_REQ,
FPGA_MGR_STATE_FIRMWARE_REQ_ERR,
- /* write sequence: init, write, complete */
+ /* write sequence: parse header, init, write, complete */
+ FPGA_MGR_STATE_PARSE_HEADER,
+ FPGA_MGR_STATE_PARSE_HEADER_ERR,
FPGA_MGR_STATE_WRITE_INIT,
FPGA_MGR_STATE_WRITE_INIT_ERR,
FPGA_MGR_STATE_WRITE,
@@ -75,7 +79,7 @@ enum fpga_mgr_states {
#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
/**
- * struct fpga_image_info - information specific to a FPGA image
+ * struct fpga_image_info - information specific to an FPGA image
* @flags: boolean flags as defined above
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
@@ -85,6 +89,9 @@ enum fpga_mgr_states {
* @sgt: scatter/gather table containing FPGA image
* @buf: contiguous buffer containing FPGA image
* @count: size of buf
+ * @header_size: size of image header.
+ * @data_size: size of image data to be sent to the device. If not specified,
+ * whole image will be used. Header may be skipped in either case.
* @region_id: id of target region
* @dev: device that owns this
* @overlay: Device Tree overlay
@@ -98,6 +105,8 @@ struct fpga_image_info {
struct sg_table *sgt;
const char *buf;
size_t count;
+ size_t header_size;
+ size_t data_size;
int region_id;
struct device *dev;
#ifdef CONFIG_OF
@@ -106,11 +115,48 @@ struct fpga_image_info {
};
/**
+ * struct fpga_compat_id - id for compatibility check
+ *
+ * @id_h: high 64bit of the compat_id
+ * @id_l: low 64bit of the compat_id
+ */
+struct fpga_compat_id {
+ u64 id_h;
+ u64 id_l;
+};
+
+/**
+ * struct fpga_manager_info - collection of parameters for an FPGA Manager
+ * @name: fpga manager name
+ * @compat_id: FPGA manager id for compatibility check.
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * fpga_manager_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_manager_info {
+ const char *name;
+ struct fpga_compat_id *compat_id;
+ const struct fpga_manager_ops *mops;
+ void *priv;
+};
+
+/**
* struct fpga_manager_ops - ops for low level fpga manager drivers
- * @initial_header_size: Maximum number of bytes that should be passed into write_init
+ * @initial_header_size: minimum number of bytes that should be passed into
+ * parse_header and write_init.
+ * @skip_header: bool flag to tell fpga-mgr core whether it should skip
+ * info->header_size part at the beginning of the image when invoking
+ * write callback.
* @state: returns an enum value of the FPGA's state
* @status: returns status of the FPGA, including reconfiguration error code
- * @write_init: prepare the FPGA to receive confuration data
+ * @parse_header: parse FPGA image header to set info->header_size and
+ * info->data_size. In case the input buffer is not large enough, set
+ * required size to info->header_size and return -EAGAIN.
+ * @write_init: prepare the FPGA to receive configuration data
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
@@ -123,8 +169,12 @@ struct fpga_image_info {
*/
struct fpga_manager_ops {
size_t initial_header_size;
+ bool skip_header;
enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
u64 (*status)(struct fpga_manager *mgr);
+ int (*parse_header)(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count);
int (*write_init)(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count);
@@ -144,17 +194,6 @@ struct fpga_manager_ops {
#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4)
/**
- * struct fpga_compat_id - id for compatibility check
- *
- * @id_h: high 64bit of the compat_id
- * @id_l: low 64bit of the compat_id
- */
-struct fpga_compat_id {
- u64 id_h;
- u64 id_l;
-};
-
-/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
* @dev: fpga manager device
@@ -191,15 +230,18 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
void fpga_mgr_put(struct fpga_manager *mgr);
-struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv);
-void fpga_mgr_free(struct fpga_manager *mgr);
-int fpga_mgr_register(struct fpga_manager *mgr);
+struct fpga_manager *
+fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
+
+struct fpga_manager *
+fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv);
void fpga_mgr_unregister(struct fpga_manager *mgr);
-struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv);
+struct fpga_manager *
+devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
+struct fpga_manager *
+devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv);
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 27cb706275db..9d4d32909340 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -7,6 +7,27 @@
#include <linux/fpga/fpga-mgr.h>
#include <linux/fpga/fpga-bridge.h>
+struct fpga_region;
+
+/**
+ * struct fpga_region_info - collection of parameters an FPGA Region
+ * @mgr: fpga region manager
+ * @compat_id: FPGA region id for compatibility check.
+ * @priv: fpga region private data
+ * @get_bridges: optional function to get bridges to a list
+ *
+ * fpga_region_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_region_info {
+ struct fpga_manager *mgr;
+ struct fpga_compat_id *compat_id;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+};
+
/**
* struct fpga_region - FPGA Region structure
* @dev: FPGA Region device
@@ -31,21 +52,18 @@ struct fpga_region {
#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
-struct fpga_region *fpga_region_class_find(
- struct device *start, const void *data,
- int (*match)(struct device *, const void *));
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+ int (*match)(struct device *, const void *));
int fpga_region_program_fpga(struct fpga_region *region);
-struct fpga_region
-*fpga_region_create(struct device *dev, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *));
-void fpga_region_free(struct fpga_region *region);
-int fpga_region_register(struct fpga_region *region);
-void fpga_region_unregister(struct fpga_region *region);
+struct fpga_region *
+fpga_region_register_full(struct device *parent, const struct fpga_region_info *info);
-struct fpga_region
-*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *));
+struct fpga_region *
+fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *));
+void fpga_region_unregister(struct fpga_region *region);
#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
new file mode 100644
index 000000000000..1c2bde0ead73
--- /dev/null
+++ b/include/linux/fprobe.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Simple ftrace probe wrapper */
+#ifndef _LINUX_FPROBE_H
+#define _LINUX_FPROBE_H
+
+#include <linux/compiler.h>
+#include <linux/ftrace.h>
+#include <linux/rethook.h>
+
+/**
+ * struct fprobe - ftrace based probe.
+ * @ops: The ftrace_ops.
+ * @nmissed: The counter for missing events.
+ * @flags: The status flag.
+ * @rethook: The rethook data structure. (internal data)
+ * @entry_handler: The callback function for function entry.
+ * @exit_handler: The callback function for function exit.
+ */
+struct fprobe {
+#ifdef CONFIG_FUNCTION_TRACER
+ /*
+ * If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too.
+ * But user of fprobe may keep embedding the struct fprobe on their own
+ * code. To avoid build error, this will keep the fprobe data structure
+ * defined here, but remove ftrace_ops data structure.
+ */
+ struct ftrace_ops ops;
+#endif
+ unsigned long nmissed;
+ unsigned int flags;
+ struct rethook *rethook;
+
+ void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
+ void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
+};
+
+/* This fprobe is soft-disabled. */
+#define FPROBE_FL_DISABLED 1
+
+/*
+ * This fprobe handler will be shared with kprobes.
+ * This flag must be set before registering.
+ */
+#define FPROBE_FL_KPROBE_SHARED 2
+
+static inline bool fprobe_disabled(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_DISABLED : false;
+}
+
+static inline bool fprobe_shared_with_kprobes(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false;
+}
+
+#ifdef CONFIG_FPROBE
+int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter);
+int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
+int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
+int unregister_fprobe(struct fprobe *fp);
+#else
+static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int unregister_fprobe(struct fprobe *fp)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/**
+ * disable_fprobe() - Disable fprobe
+ * @fp: The fprobe to be disabled.
+ *
+ * This will soft-disable @fp. Note that this doesn't remove the ftrace
+ * hooks from the function entry.
+ */
+static inline void disable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags |= FPROBE_FL_DISABLED;
+}
+
+/**
+ * enable_fprobe() - Enable fprobe
+ * @fp: The fprobe to be enabled.
+ *
+ * This will soft-enable @fp.
+ */
+static inline void enable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags &= ~FPROBE_FL_DISABLED;
+}
+
+#endif
diff --git a/include/linux/frame.h b/include/linux/frame.h
deleted file mode 100644
index 02d3ca2d9598..000000000000
--- a/include/linux/frame.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FRAME_H
-#define _LINUX_FRAME_H
-
-#ifdef CONFIG_STACK_VALIDATION
-/*
- * This macro marks the given function's stack frame as "non-standard", which
- * tells objtool to ignore the function when doing stack metadata validation.
- * It should only be used in special cases where you're 100% sure it won't
- * affect the reliability of frame pointers and kernel stack traces.
- *
- * For more information, see tools/objtool/Documentation/stack-validation.txt.
- */
-#define STACK_FRAME_NON_STANDARD(func) \
- static void __used __section(.discard.func_stack_frame_non_standard) \
- *__func_stack_frame_non_standard_##func = func
-
-#else /* !CONFIG_STACK_VALIDATION */
-
-#define STACK_FRAME_NON_STANDARD(func)
-
-#endif /* CONFIG_STACK_VALIDATION */
-
-#endif /* _LINUX_FRAME_H */
diff --git a/include/linux/freelist.h b/include/linux/freelist.h
new file mode 100644
index 000000000000..fc1842b96469
--- /dev/null
+++ b/include/linux/freelist.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+#ifndef FREELIST_H
+#define FREELIST_H
+
+#include <linux/atomic.h>
+
+/*
+ * Copyright: cameron@moodycamel.com
+ *
+ * A simple CAS-based lock-free free list. Not the fastest thing in the world
+ * under heavy contention, but simple and correct (assuming nodes are never
+ * freed until after the free list is destroyed), and fairly speedy under low
+ * contention.
+ *
+ * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
+ */
+
+struct freelist_node {
+ atomic_t refs;
+ struct freelist_node *next;
+};
+
+struct freelist_head {
+ struct freelist_node *head;
+};
+
+#define REFS_ON_FREELIST 0x80000000
+#define REFS_MASK 0x7FFFFFFF
+
+static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
+{
+ /*
+ * Since the refcount is zero, and nobody can increase it once it's
+ * zero (except us, and we run only one copy of this method per node at
+ * a time, i.e. the single thread case), then we know we can safely
+ * change the next pointer of the node; however, once the refcount is
+ * back above zero, then other threads could increase it (happens under
+ * heavy contention, when the refcount goes to zero in between a load
+ * and a refcount increment of a node in try_get, then back up to
+ * something non-zero, then the refcount increment is done by the other
+ * thread) -- so if the CAS to add the node to the actual list fails,
+ * decrese the refcount and leave the add operation to the next thread
+ * who puts the refcount back to zero (which could be us, hence the
+ * loop).
+ */
+ struct freelist_node *head = READ_ONCE(list->head);
+
+ for (;;) {
+ WRITE_ONCE(node->next, head);
+ atomic_set_release(&node->refs, 1);
+
+ if (!try_cmpxchg_release(&list->head, &head, node)) {
+ /*
+ * Hmm, the add failed, but we can only try again when
+ * the refcount goes back to zero.
+ */
+ if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
+ continue;
+ }
+ return;
+ }
+}
+
+static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
+{
+ /*
+ * We know that the should-be-on-freelist bit is 0 at this point, so
+ * it's safe to set it using a fetch_add.
+ */
+ if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
+ /*
+ * Oh look! We were the last ones referencing this node, and we
+ * know we want to add it to the free list, so let's do it!
+ */
+ __freelist_add(node, list);
+ }
+}
+
+static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
+{
+ struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
+ unsigned int refs;
+
+ while (head) {
+ prev = head;
+ refs = atomic_read(&head->refs);
+ if ((refs & REFS_MASK) == 0 ||
+ !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
+ head = smp_load_acquire(&list->head);
+ continue;
+ }
+
+ /*
+ * Good, reference count has been incremented (it wasn't at
+ * zero), which means we can read the next and not worry about
+ * it changing between now and the time we do the CAS.
+ */
+ next = READ_ONCE(head->next);
+ if (try_cmpxchg_acquire(&list->head, &head, next)) {
+ /*
+ * Yay, got the node. This means it was on the list,
+ * which means should-be-on-freelist must be false no
+ * matter the refcount (because nobody else knows it's
+ * been taken off yet, it can't have been put back on).
+ */
+ WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
+
+ /*
+ * Decrease refcount twice, once for our ref, and once
+ * for the list's ref.
+ */
+ atomic_fetch_add(-2, &head->refs);
+
+ return head;
+ }
+
+ /*
+ * OK, the head must have changed on us, but we still need to decrement
+ * the refcount we increased.
+ */
+ refs = atomic_fetch_add(-1, &prev->refs);
+ if (refs == REFS_ON_FREELIST + 1)
+ __freelist_add(prev, list);
+ }
+
+ return NULL;
+}
+
+#endif /* FREELIST_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 21f5aa0b217f..b303472255be 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -8,9 +8,11 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_FREEZER
-extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
+DECLARE_STATIC_KEY_FALSE(freezer_active);
+
extern bool pm_freezing; /* PM freezing in effect */
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
@@ -22,10 +24,7 @@ extern unsigned int freeze_timeout_msecs;
/*
* Check if a process has been frozen
*/
-static inline bool frozen(struct task_struct *p)
-{
- return p->flags & PF_FROZEN;
-}
+extern bool frozen(struct task_struct *p);
extern bool freezing_slow_path(struct task_struct *p);
@@ -34,9 +33,10 @@ extern bool freezing_slow_path(struct task_struct *p);
*/
static inline bool freezing(struct task_struct *p)
{
- if (likely(!atomic_read(&system_freezing_cnt)))
- return false;
- return freezing_slow_path(p);
+ if (static_branch_unlikely(&freezer_active))
+ return freezing_slow_path(p);
+
+ return false;
}
/* Takes and releases task alloc lock using task_lock() */
@@ -48,23 +48,14 @@ extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
extern void thaw_kernel_threads(void);
-/*
- * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
- * If try_to_freeze causes a lockdep warning it means the caller may deadlock
- */
-static inline bool try_to_freeze_unsafe(void)
+static inline bool try_to_freeze(void)
{
might_sleep();
if (likely(!freezing(current)))
return false;
- return __refrigerator(false);
-}
-
-static inline bool try_to_freeze(void)
-{
if (!(current->flags & PF_NOFREEZE))
debug_check_no_locks_held();
- return try_to_freeze_unsafe();
+ return __refrigerator(false);
}
extern bool freeze_task(struct task_struct *p);
@@ -79,184 +70,6 @@ static inline bool cgroup_freezing(struct task_struct *task)
}
#endif /* !CONFIG_CGROUP_FREEZER */
-/*
- * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
- * calls wait_for_completion(&vfork) and reset right after it returns from this
- * function. Next, the parent should call try_to_freeze() to freeze itself
- * appropriately in case the child has exited before the freezing of tasks is
- * complete. However, we don't want kernel threads to be frozen in unexpected
- * places, so we allow them to block freeze_processes() instead or to set
- * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
- * parent won't really block freeze_processes(), since ____call_usermodehelper()
- * (the child) does a little before exec/exit and it can't be frozen before
- * waking up the parent.
- */
-
-
-/**
- * freezer_do_not_count - tell freezer to ignore %current
- *
- * Tell freezers to ignore the current task when determining whether the
- * target frozen state is reached. IOW, the current task will be
- * considered frozen enough by freezers.
- *
- * The caller shouldn't do anything which isn't allowed for a frozen task
- * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
- * wrap a scheduling operation and nothing much else.
- */
-static inline void freezer_do_not_count(void)
-{
- current->flags |= PF_FREEZER_SKIP;
-}
-
-/**
- * freezer_count - tell freezer to stop ignoring %current
- *
- * Undo freezer_do_not_count(). It tells freezers that %current should be
- * considered again and tries to freeze if freezing condition is already in
- * effect.
- */
-static inline void freezer_count(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- /*
- * If freezing is in progress, the following paired with smp_mb()
- * in freezer_should_skip() ensures that either we see %true
- * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
- */
- smp_mb();
- try_to_freeze();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezer_count_unsafe(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- smp_mb();
- try_to_freeze_unsafe();
-}
-
-/**
- * freezer_should_skip - whether to skip a task when determining frozen
- * state is reached
- * @p: task in quesion
- *
- * This function is used by freezers after establishing %true freezing() to
- * test whether a task should be skipped when determining the target frozen
- * state is reached. IOW, if this function returns %true, @p is considered
- * frozen enough.
- */
-static inline bool freezer_should_skip(struct task_struct *p)
-{
- /*
- * The following smp_mb() paired with the one in freezer_count()
- * ensures that either freezer_count() sees %true freezing() or we
- * see cleared %PF_FREEZER_SKIP and return %false. This makes it
- * impossible for a task to slip frozen state testing after
- * clearing %PF_FREEZER_SKIP.
- */
- smp_mb();
- return p->flags & PF_FREEZER_SKIP;
-}
-
-/*
- * These functions are intended to be used whenever you want allow a sleeping
- * task to be frozen. Note that neither return any clear indication of
- * whether a freeze event happened while in this function.
- */
-
-/* Like schedule(), but should not block the freezer. */
-static inline void freezable_schedule(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezable_schedule_unsafe(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count_unsafe();
-}
-
-/*
- * Like schedule_timeout(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout(timeout);
- freezer_count();
- return __retval;
-}
-
-/*
- * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout_interruptible(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_interruptible(timeout);
- freezer_count();
- return __retval;
-}
-
-/* Like schedule_timeout_killable(), but should not block the freezer. */
-static inline long freezable_schedule_timeout_killable(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count();
- return __retval;
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count_unsafe();
- return __retval;
-}
-
-/*
- * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
- u64 delta, const enum hrtimer_mode mode)
-{
- int __retval;
- freezer_do_not_count();
- __retval = schedule_hrtimeout_range(expires, delta, mode);
- freezer_count();
- return __retval;
-}
-
-/*
- * Freezer-friendly wrappers around wait_event_interruptible(),
- * wait_event_killable() and wait_event_interruptible_timeout(), originally
- * defined in <linux/wait.h>
- */
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-#define wait_event_freezekillable_unsafe(wq, condition) \
-({ \
- int __retval; \
- freezer_do_not_count(); \
- __retval = wait_event_killable(wq, (condition)); \
- freezer_count_unsafe(); \
- __retval; \
-})
-
#else /* !CONFIG_FREEZER */
static inline bool frozen(struct task_struct *p) { return false; }
static inline bool freezing(struct task_struct *p) { return false; }
@@ -268,35 +81,10 @@ static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
static inline void thaw_kernel_threads(void) {}
-static inline bool try_to_freeze_nowarn(void) { return false; }
static inline bool try_to_freeze(void) { return false; }
-static inline void freezer_do_not_count(void) {}
-static inline void freezer_count(void) {}
-static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-#define freezable_schedule() schedule()
-
-#define freezable_schedule_unsafe() schedule()
-
-#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
-
-#define freezable_schedule_timeout_interruptible(timeout) \
- schedule_timeout_interruptible(timeout)
-
-#define freezable_schedule_timeout_killable(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_timeout_killable_unsafe(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
- schedule_hrtimeout_range(expires, delta, mode)
-
-#define wait_event_freezekillable_unsafe(wq, condition) \
- wait_event_killable(wq, condition)
-
#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 6d775984905b..a631bac12220 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -7,31 +7,17 @@
#include <linux/bitops.h>
#include <linux/jump_label.h>
-/*
- * Return code to denote that requested number of
- * frontswap pages are unused(moved to page cache).
- * Used in in shmem_unuse and try_to_unuse.
- */
-#define FRONTSWAP_PAGES_UNUSED 2
-
struct frontswap_ops {
void (*init)(unsigned); /* this swap type was just swapon'ed */
int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
- struct frontswap_ops *next; /* private pointer to next ops */
};
-extern void frontswap_register_ops(struct frontswap_ops *ops);
-extern void frontswap_shrink(unsigned long);
-extern unsigned long frontswap_curr_pages(void);
-extern void frontswap_writethrough(bool);
-#define FRONTSWAP_HAS_EXCLUSIVE_GETS
-extern void frontswap_tmem_exclusive_gets(bool);
+int frontswap_register_ops(const struct frontswap_ops *ops);
-extern bool __frontswap_test(struct swap_info_struct *, pgoff_t);
-extern void __frontswap_init(unsigned type, unsigned long *map);
+extern void frontswap_init(unsigned type, unsigned long *map);
extern int __frontswap_store(struct page *page);
extern int __frontswap_load(struct page *page);
extern void __frontswap_invalidate_page(unsigned, pgoff_t);
@@ -45,11 +31,6 @@ static inline bool frontswap_enabled(void)
return static_branch_unlikely(&frontswap_enabled_key);
}
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return __frontswap_test(sis, offset);
-}
-
static inline void frontswap_map_set(struct swap_info_struct *p,
unsigned long *map)
{
@@ -68,11 +49,6 @@ static inline bool frontswap_enabled(void)
return false;
}
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return false;
-}
-
static inline void frontswap_map_set(struct swap_info_struct *p,
unsigned long *map)
{
@@ -112,11 +88,4 @@ static inline void frontswap_invalidate_area(unsigned type)
__frontswap_invalidate_area(type);
}
-static inline void frontswap_init(unsigned type, unsigned long *map)
-{
-#ifdef CONFIG_FRONTSWAP
- __frontswap_init(type, map);
-#endif
-}
-
#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index abedbffe2c9e..e654435f1651 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -24,7 +24,6 @@
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fcntl.h>
-#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
#include <linux/atomic.h>
#include <linux/shrinker.h>
@@ -40,6 +39,10 @@
#include <linux/fs_types.h>
#include <linux/build_bug.h>
#include <linux/stddef.h>
+#include <linux/mount.h>
+#include <linux/cred.h>
+#include <linux/mnt_idmapping.h>
+#include <linux/slab.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -47,7 +50,9 @@
struct backing_dev_info;
struct bdi_writeback;
struct bio;
+struct io_comp_batch;
struct export_operations;
+struct fiemap_extent_info;
struct hd_geometry;
struct iovec;
struct kiocb;
@@ -68,21 +73,16 @@ struct fsverity_info;
struct fsverity_operations;
struct fs_context;
struct fs_parameter_spec;
+struct fileattr;
+struct iomap_ops;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
extern void __init files_init(void);
extern void __init files_maxfiles_init(void);
-extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
extern unsigned int sysctl_nr_open;
-extern struct inodes_stat_t inodes_stat;
-extern int leases_enable, lease_break_time;
-extern int sysctl_protected_symlinks;
-extern int sysctl_protected_hardlinks;
-extern int sysctl_protected_fifos;
-extern int sysctl_protected_regular;
typedef __kernel_rwf_t rwf_t;
@@ -142,7 +142,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)0x1000)
-/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
+/* File is huge (eg. /dev/mem): treat loff_t as unsigned */
#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
/* File is opened with O_PATH; almost nothing can be done with it */
@@ -163,6 +163,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File is stream-like */
#define FMODE_STREAM ((__force fmode_t)0x200000)
+/* File supports DIRECT IO */
+#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -175,13 +178,11 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File does not contribute to nr_files count */
#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
-/*
- * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
- * that indicates that they should check the contents of the iovec are
- * valid, but not check the memory that the iovec elements
- * points too.
- */
-#define CHECK_IOVEC_ONLY -1
+/* File supports async buffered reads */
+#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000)
+
+/* File supports async nowait buffered writes */
+#define FMODE_BUF_WASYNC ((__force fmode_t)0x80000000)
/*
* Attribute flags. These should be or-ed together to figure out what
@@ -224,8 +225,26 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
struct iattr {
unsigned int ia_valid;
umode_t ia_mode;
- kuid_t ia_uid;
- kgid_t ia_gid;
+ /*
+ * The two anonymous unions wrap structures with the same member.
+ *
+ * Filesystems raising FS_ALLOW_IDMAP need to use ia_vfs{g,u}id which
+ * are a dedicated type requiring the filesystem to use the dedicated
+ * helpers. Other filesystem can continue to use ia_{g,u}id until they
+ * have been ported.
+ *
+ * They always contain the same value. In other words FS_ALLOW_IDMAP
+ * pass down the same value on idmapped mounts as they would on regular
+ * mounts.
+ */
+ union {
+ kuid_t ia_uid;
+ vfsuid_t ia_vfsuid;
+ };
+ union {
+ kgid_t ia_gid;
+ vfsgid_t ia_vfsgid;
+ };
loff_t ia_size;
struct timespec64 ia_atime;
struct timespec64 ia_mtime;
@@ -268,7 +287,7 @@ struct iattr {
* trying again. The aop will be taking reasonable
* precautions not to livelock. If the caller held a page
* reference, it should drop it before retrying. Returned
- * by readpage().
+ * by read_folio().
*
* address_space_operation functions return these large constants to indicate
* special semantics to the caller. These are much larger than the bytes in a
@@ -281,17 +300,13 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
-#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
- * helper code (eg buffer layer)
- * to clear GFP_FS from alloc */
-
/*
* oh the beauties of C type declarations.
*/
struct page;
struct address_space;
struct writeback_control;
+struct readahead_control;
/*
* Write life time hint values.
@@ -306,30 +321,31 @@ enum rw_hint {
WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
};
-#define IOCB_EVENTFD (1 << 0)
-#define IOCB_APPEND (1 << 1)
-#define IOCB_DIRECT (1 << 2)
-#define IOCB_HIPRI (1 << 3)
-#define IOCB_DSYNC (1 << 4)
-#define IOCB_SYNC (1 << 5)
-#define IOCB_WRITE (1 << 6)
-#define IOCB_NOWAIT (1 << 7)
+/* Match RWF_* bits to IOCB bits */
+#define IOCB_HIPRI (__force int) RWF_HIPRI
+#define IOCB_DSYNC (__force int) RWF_DSYNC
+#define IOCB_SYNC (__force int) RWF_SYNC
+#define IOCB_NOWAIT (__force int) RWF_NOWAIT
+#define IOCB_APPEND (__force int) RWF_APPEND
+
+/* non-RWF related bits - start at 16 */
+#define IOCB_EVENTFD (1 << 16)
+#define IOCB_DIRECT (1 << 17)
+#define IOCB_WRITE (1 << 18)
+/* iocb->ki_waitq is valid */
+#define IOCB_WAITQ (1 << 19)
+#define IOCB_NOIO (1 << 20)
+/* can use bio alloc cache */
+#define IOCB_ALLOC_CACHE (1 << 21)
struct kiocb {
struct file *ki_filp;
-
- /* The 'ki_filp' pointer is shared in a union for aio */
- randomized_struct_fields_start
-
loff_t ki_pos;
- void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void (*ki_complete)(struct kiocb *iocb, long ret);
void *private;
int ki_flags;
- u16 ki_hint;
u16 ki_ioprio; /* See linux/ioprio.h */
- unsigned int ki_cookie; /* for ->iopoll */
-
- randomized_struct_fields_end
+ struct wait_page_queue *ki_waitq; /* for async buffered IO */
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -337,47 +353,20 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
return kiocb->ki_complete == NULL;
}
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
- size_t written;
- size_t count;
- union {
- char __user *buf;
- void *data;
- } arg;
- int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
- unsigned long, unsigned long);
-
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
- int (*readpage)(struct file *, struct page *);
+ int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
- /* Set a page dirty. Return true if this dirtied it */
- int (*set_page_dirty)(struct page *page);
+ /* Mark a folio dirty. Return true if this dirtied it */
+ bool (*dirty_folio)(struct address_space *, struct folio *);
- /*
- * Reads in the requested pages. Unlike ->readpage(), this is
- * PURELY used for read-ahead!.
- */
- int (*readpages)(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages);
+ void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
@@ -385,55 +374,45 @@ struct address_space_operations {
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
- void (*invalidatepage) (struct page *, unsigned int, unsigned int);
- int (*releasepage) (struct page *, gfp_t);
- void (*freepage)(struct page *);
+ void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
+ bool (*release_folio)(struct folio *, gfp_t);
+ void (*free_folio)(struct folio *folio);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
- * migrate the contents of a page to the specified target. If
+ * migrate the contents of a folio to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
*/
- int (*migratepage) (struct address_space *,
- struct page *, struct page *, enum migrate_mode);
- bool (*isolate_page)(struct page *, isolate_mode_t);
- void (*putback_page)(struct page *);
- int (*launder_page) (struct page *);
- int (*is_partially_uptodate) (struct page *, unsigned long,
- unsigned long);
- void (*is_dirty_writeback) (struct page *, bool *, bool *);
+ int (*migrate_folio)(struct address_space *, struct folio *dst,
+ struct folio *src, enum migrate_mode);
+ int (*launder_folio)(struct folio *);
+ bool (*is_partially_uptodate) (struct folio *, size_t from,
+ size_t count);
+ void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
int (*error_remove_page)(struct address_space *, struct page *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
sector_t *span);
void (*swap_deactivate)(struct file *file);
+ int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
};
extern const struct address_space_operations empty_aops;
-/*
- * pagecache_write_begin/pagecache_write_end must be used by general code
- * to write into the pagecache.
- */
-int pagecache_write_begin(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-
-int pagecache_write_end(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
-
/**
* struct address_space - Contents of a cacheable, mappable object.
* @host: Owner, either the inode or the block_device.
* @i_pages: Cached pages.
+ * @invalidate_lock: Guards coherency between page cache contents and
+ * file offset->disk block mappings in the filesystem during invalidates.
+ * It is also used to block modification of page cache contents through
+ * memory mappings.
* @gfp_mask: Memory allocation flags to use for allocating pages.
* @i_mmap_writable: Number of VM_SHARED mappings.
* @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
* @nrpages: Number of page entries, protected by the i_pages lock.
- * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock.
* @writeback_index: Writeback starts here.
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
@@ -445,6 +424,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
struct address_space {
struct inode *host;
struct xarray i_pages;
+ struct rw_semaphore invalidate_lock;
gfp_t gfp_mask;
atomic_t i_mmap_writable;
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -454,7 +434,6 @@ struct address_space {
struct rb_root_cached i_mmap;
struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
- unsigned long nrexceptional;
pgoff_t writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
@@ -468,45 +447,6 @@ struct address_space {
* must be enforced here for CRIS, to let the least significant bit
* of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
*/
-struct request_queue;
-
-struct block_device {
- dev_t bd_dev; /* not a kdev_t - it's a search key */
- int bd_openers;
- struct inode * bd_inode; /* will die */
- struct super_block * bd_super;
- struct mutex bd_mutex; /* open/close mutex */
- void * bd_claiming;
- void * bd_holder;
- int bd_holders;
- bool bd_write_holder;
-#ifdef CONFIG_SYSFS
- struct list_head bd_holder_disks;
-#endif
- struct block_device * bd_contains;
- unsigned bd_block_size;
- u8 bd_partno;
- struct hd_struct * bd_part;
- /* number of times partitions within this device have been opened. */
- unsigned bd_part_count;
- int bd_invalidated;
- struct gendisk * bd_disk;
- struct request_queue * bd_queue;
- struct backing_dev_info *bd_bdi;
- struct list_head bd_list;
- /*
- * Private data. You must have bd_claim'ed the block_device
- * to use this. NOTE: bd_claim allows an owner to claim
- * the same device multiple times, the owner must take special
- * care to not mess up bd_private for that case.
- */
- unsigned long bd_private;
-
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
-} __randomize_layout;
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
#define PAGECACHE_TAG_DIRTY XA_MARK_0
@@ -526,11 +466,21 @@ static inline void i_mmap_lock_write(struct address_space *mapping)
down_write(&mapping->i_mmap_rwsem);
}
+static inline int i_mmap_trylock_write(struct address_space *mapping)
+{
+ return down_write_trylock(&mapping->i_mmap_rwsem);
+}
+
static inline void i_mmap_unlock_write(struct address_space *mapping)
{
up_write(&mapping->i_mmap_rwsem);
}
+static inline int i_mmap_trylock_read(struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->i_mmap_rwsem);
+}
+
static inline void i_mmap_lock_read(struct address_space *mapping)
{
down_read(&mapping->i_mmap_rwsem);
@@ -541,6 +491,16 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
up_read(&mapping->i_mmap_rwsem);
}
+static inline void i_mmap_assert_locked(struct address_space *mapping)
+{
+ lockdep_assert_held(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_assert_write_locked(struct address_space *mapping)
+{
+ lockdep_assert_held_write(&mapping->i_mmap_rwsem);
+}
+
/*
* Might pages of this file be mapped into userspace?
*/
@@ -551,7 +511,7 @@ static inline int mapping_mapped(struct address_space *mapping)
/*
* Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
+ * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*
@@ -598,6 +558,11 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+/*
+ * ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
+ * cache the ACL. This also means that ->get_acl() can be called in RCU mode
+ * with the LOOKUP_RCU flag.
+ */
#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
@@ -714,7 +679,6 @@ struct inode {
struct list_head i_devices;
union {
struct pipe_inode_info *i_pipe;
- struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
unsigned i_dir_seq;
@@ -832,9 +796,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla
down_read_nested(&inode->i_rwsem, subclass);
}
+static inline void filemap_invalidate_lock(struct address_space *mapping)
+{
+ down_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock(struct address_space *mapping)
+{
+ up_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
+{
+ down_read(&mapping->invalidate_lock);
+}
+
+static inline int filemap_invalidate_trylock_shared(
+ struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock_shared(
+ struct address_space *mapping)
+{
+ up_read(&mapping->invalidate_lock);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+
+
/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
@@ -900,8 +897,6 @@ static inline unsigned imajor(const struct inode *inode)
return MAJOR(inode->i_rdev);
}
-extern struct block_device *I_BDEV(struct inode *inode);
-
struct fown_struct {
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
@@ -910,18 +905,27 @@ struct fown_struct {
int signum; /* posix.1b rt signal to be delivered on IO */
};
-/*
- * Track a single file's readahead state
+/**
+ * struct file_ra_state - Track a file's readahead state.
+ * @start: Where the most recent readahead started.
+ * @size: Number of pages read in the most recent readahead.
+ * @async_size: Numer of pages that were/are not needed immediately
+ * and so were/are genuinely "ahead". Start next readahead when
+ * the first of these pages is accessed.
+ * @ra_pages: Maximum size of a readahead request, copied from the bdi.
+ * @mmap_miss: How many mmap accesses missed in the page cache.
+ * @prev_pos: The last byte in the most recent read request.
+ *
+ * When this structure is passed to ->readahead(), the "most recent"
+ * readahead means the current readahead.
*/
struct file_ra_state {
- pgoff_t start; /* where readahead started */
- unsigned int size; /* # of readahead pages */
- unsigned int async_size; /* do asynchronous readahead when
- there are only # of pages ahead */
-
- unsigned int ra_pages; /* Maximum readahead window */
- unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
- loff_t prev_pos; /* Cache last read() position */
+ pgoff_t start;
+ unsigned int size;
+ unsigned int async_size;
+ unsigned int ra_pages;
+ unsigned int mmap_miss;
+ loff_t prev_pos;
};
/*
@@ -935,19 +939,19 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
struct file {
union {
- struct llist_node fu_llist;
- struct rcu_head fu_rcuhead;
- } f_u;
+ struct llist_node f_llist;
+ struct rcu_head f_rcuhead;
+ unsigned int f_iocb_flags;
+ };
struct path f_path;
struct inode *f_inode; /* cached value */
const struct file_operations *f_op;
/*
- * Protects f_ep_links, f_flags.
+ * Protects f_ep, f_flags.
* Must not be taken from IRQ context.
*/
spinlock_t f_lock;
- enum rw_hint f_write_hint;
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
@@ -966,11 +970,11 @@ struct file {
#ifdef CONFIG_EPOLL
/* Used by fs/eventpoll.c to link all the hooks to this file */
- struct list_head f_ep_links;
- struct list_head f_tfile_llink;
+ struct hlist_head *f_ep;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
errseq_t f_wb_err;
+ errseq_t f_sb_err; /* for syncfs */
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
@@ -978,7 +982,7 @@ struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
- unsigned char f_handle[0];
+ unsigned char f_handle[];
};
static inline struct file *get_file(struct file *f)
@@ -986,9 +990,7 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
-#define get_file_rcu_many(x, cnt) \
- atomic_long_add_unless(&(x)->f_count, (cnt), 0)
-#define get_file_rcu(x) get_file_rcu_many((x), 1)
+#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
#define file_count(x) atomic_long_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -1013,6 +1015,7 @@ static inline struct file *get_file(struct file *f)
#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
#define FL_LAYOUT 2048 /* outstanding pNFS layout */
+#define FL_RECLAIM 4096 /* reclaiming from a reboot server */
#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
@@ -1033,6 +1036,7 @@ struct file_lock_operations {
};
struct lock_manager_operations {
+ void *lm_mod_owner;
fl_owner_t (*lm_get_owner)(fl_owner_t);
void (*lm_put_owner)(fl_owner_t);
void (*lm_notify)(struct file_lock *); /* unblock callback */
@@ -1040,6 +1044,9 @@ struct lock_manager_operations {
bool (*lm_break)(struct file_lock *);
int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **);
+ bool (*lm_breaker_owns_lease)(struct file_lock *);
+ bool (*lm_lock_expirable)(struct file_lock *cfl);
+ void (*lm_expire_lock)(void);
};
struct lock_manager {
@@ -1177,6 +1184,8 @@ extern void lease_unregister_notifier(struct notifier_block *);
struct files_struct;
extern void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files);
+extern bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1192,13 +1201,13 @@ static inline int fcntl_setlk(unsigned int fd, struct file *file,
#if BITS_PER_LONG == 32
static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
- struct flock64 __user *user)
+ struct flock64 *user)
{
return -EINVAL;
}
static inline int fcntl_setlk64(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock64 __user *user)
+ unsigned int cmd, struct flock64 *user)
{
return -EACCES;
}
@@ -1312,6 +1321,11 @@ static inline int lease_modify(struct file_lock *fl, int arg,
struct files_struct;
static inline void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files) {}
+static inline bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner)
+{
+ return false;
+}
#endif /* !CONFIG_FILE_LOCKING */
static inline struct inode *file_inode(const struct file *f)
@@ -1371,6 +1385,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_NODIRATIME 2048 /* Do not update directory access times */
#define SB_SILENT 32768
#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */
#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define SB_I_VERSION (1<<23) /* Update inode I_version field */
#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
@@ -1383,6 +1398,12 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_ACTIVE (1<<30)
#define SB_NOUSER (1<<31)
+/* These flags relate to encoding and casefolding */
+#define SB_ENC_STRICT_MODE_FL (1 << 0)
+
+#define sb_has_strict_encoding(sb) \
+ (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
+
/*
* Umount options
*/
@@ -1397,13 +1418,18 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
-#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
+#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
+#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
+#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
+
/* Possible states of 'frozen' field */
enum {
SB_UNFROZEN = 0, /* FS is unfrozen */
@@ -1418,7 +1444,7 @@ enum {
struct sb_writers {
int frozen; /* Is sb frozen? */
- wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
+ wait_queue_head_t wait_unfrozen; /* wait for thaw */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1446,11 +1472,15 @@ struct super_block {
const struct xattr_handler **s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
- struct key *s_master_keys; /* master crypto keys in use */
+ struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
#endif
#ifdef CONFIG_FS_VERITY
const struct fsverity_operations *s_vop;
#endif
+#if IS_ENABLED(CONFIG_UNICODE)
+ struct unicode_map *s_encoding;
+ __u16 s_encoding_flags;
+#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
@@ -1499,22 +1529,23 @@ struct super_block {
const struct dentry_operations *s_d_op; /* default d_op for dentries */
- /*
- * Saved pool identifier for cleancache (-1 means none)
- */
- int cleancache_poolid;
-
struct shrinker s_shrink; /* per-sb shrinker handle */
/* Number of inodes with nlink == 0 but still referenced */
atomic_long_t s_remove_count;
- /* Pending fsnotify inode refs */
- atomic_long_t s_fsnotify_inode_refs;
+ /*
+ * Number of inode/mount/sb objects that are being watched, note that
+ * inodes objects are currently double-accounted.
+ */
+ atomic_long_t s_fsnotify_connectors;
/* Being remounted read-only */
int s_readonly_remount;
+ /* per-sb errseq_t for reporting writeback errors via syncfs */
+ errseq_t s_wb_err;
+
/* AIO completions deferred from interrupt context */
struct workqueue_struct *s_dio_done_wq;
struct hlist_head s_pins;
@@ -1551,6 +1582,11 @@ struct super_block {
struct list_head s_inodes_wb; /* writeback inodes */
} __randomize_layout;
+static inline struct user_namespace *i_user_ns(const struct inode *inode)
+{
+ return inode->i_sb->s_user_ns;
+}
+
/* Helper functions so that in most cases filesystems will
* not need to deal directly with kuid_t and kgid_t and can
* instead deal with the raw numeric values that are stored
@@ -1558,22 +1594,216 @@ struct super_block {
*/
static inline uid_t i_uid_read(const struct inode *inode)
{
- return from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
+ return from_kuid(i_user_ns(inode), inode->i_uid);
}
static inline gid_t i_gid_read(const struct inode *inode)
{
- return from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
+ return from_kgid(i_user_ns(inode), inode->i_gid);
}
static inline void i_uid_write(struct inode *inode, uid_t uid)
{
- inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid);
+ inode->i_uid = make_kuid(i_user_ns(inode), uid);
}
static inline void i_gid_write(struct inode *inode, gid_t gid)
{
- inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
+ inode->i_gid = make_kgid(i_user_ns(inode), gid);
+}
+
+/**
+ * i_uid_into_mnt - map an inode's i_uid down into a mnt_userns
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Note, this will eventually be removed completely in favor of the type-safe
+ * i_uid_into_vfsuid().
+ *
+ * Return: the inode's i_uid mapped down according to @mnt_userns.
+ * If the inode's i_uid has no mapping INVALID_UID is returned.
+ */
+static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns,
+ const struct inode *inode)
+{
+ return AS_KUIDT(make_vfsuid(mnt_userns, i_user_ns(inode), inode->i_uid));
+}
+
+/**
+ * i_uid_into_vfsuid - map an inode's i_uid down into a mnt_userns
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: whe inode's i_uid mapped down according to @mnt_userns.
+ * If the inode's i_uid has no mapping INVALID_VFSUID is returned.
+ */
+static inline vfsuid_t i_uid_into_vfsuid(struct user_namespace *mnt_userns,
+ const struct inode *inode)
+{
+ return make_vfsuid(mnt_userns, i_user_ns(inode), inode->i_uid);
+}
+
+/**
+ * i_uid_needs_update - check whether inode's i_uid needs to be updated
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_uid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_uid field needs to be updated, false if not.
+ */
+static inline bool i_uid_needs_update(struct user_namespace *mnt_userns,
+ const struct iattr *attr,
+ const struct inode *inode)
+{
+ return ((attr->ia_valid & ATTR_UID) &&
+ !vfsuid_eq(attr->ia_vfsuid,
+ i_uid_into_vfsuid(mnt_userns, inode)));
+}
+
+/**
+ * i_uid_update - update @inode's i_uid field
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_uid field translating the vfsuid of any idmapped
+ * mount into the filesystem kuid.
+ */
+static inline void i_uid_update(struct user_namespace *mnt_userns,
+ const struct iattr *attr,
+ struct inode *inode)
+{
+ if (attr->ia_valid & ATTR_UID)
+ inode->i_uid = from_vfsuid(mnt_userns, i_user_ns(inode),
+ attr->ia_vfsuid);
+}
+
+/**
+ * i_gid_into_mnt - map an inode's i_gid down into a mnt_userns
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Note, this will eventually be removed completely in favor of the type-safe
+ * i_gid_into_vfsgid().
+ *
+ * Return: the inode's i_gid mapped down according to @mnt_userns.
+ * If the inode's i_gid has no mapping INVALID_GID is returned.
+ */
+static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns,
+ const struct inode *inode)
+{
+ return AS_KGIDT(make_vfsgid(mnt_userns, i_user_ns(inode), inode->i_gid));
+}
+
+/**
+ * i_gid_into_vfsgid - map an inode's i_gid down into a mnt_userns
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: the inode's i_gid mapped down according to @mnt_userns.
+ * If the inode's i_gid has no mapping INVALID_VFSGID is returned.
+ */
+static inline vfsgid_t i_gid_into_vfsgid(struct user_namespace *mnt_userns,
+ const struct inode *inode)
+{
+ return make_vfsgid(mnt_userns, i_user_ns(inode), inode->i_gid);
+}
+
+/**
+ * i_gid_needs_update - check whether inode's i_gid needs to be updated
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_gid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_gid field needs to be updated, false if not.
+ */
+static inline bool i_gid_needs_update(struct user_namespace *mnt_userns,
+ const struct iattr *attr,
+ const struct inode *inode)
+{
+ return ((attr->ia_valid & ATTR_GID) &&
+ !vfsgid_eq(attr->ia_vfsgid,
+ i_gid_into_vfsgid(mnt_userns, inode)));
+}
+
+/**
+ * i_gid_update - update @inode's i_gid field
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_gid field translating the vfsgid of any idmapped
+ * mount into the filesystem kgid.
+ */
+static inline void i_gid_update(struct user_namespace *mnt_userns,
+ const struct iattr *attr,
+ struct inode *inode)
+{
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = from_vfsgid(mnt_userns, i_user_ns(inode),
+ attr->ia_vfsgid);
+}
+
+/**
+ * inode_fsuid_set - initialize inode's i_uid field with callers fsuid
+ * @inode: inode to initialize
+ * @mnt_userns: user namespace of the mount the inode was found from
+ *
+ * Initialize the i_uid field of @inode. If the inode was found/created via
+ * an idmapped mount map the caller's fsuid according to @mnt_users.
+ */
+static inline void inode_fsuid_set(struct inode *inode,
+ struct user_namespace *mnt_userns)
+{
+ inode->i_uid = mapped_fsuid(mnt_userns, i_user_ns(inode));
+}
+
+/**
+ * inode_fsgid_set - initialize inode's i_gid field with callers fsgid
+ * @inode: inode to initialize
+ * @mnt_userns: user namespace of the mount the inode was found from
+ *
+ * Initialize the i_gid field of @inode. If the inode was found/created via
+ * an idmapped mount map the caller's fsgid according to @mnt_users.
+ */
+static inline void inode_fsgid_set(struct inode *inode,
+ struct user_namespace *mnt_userns)
+{
+ inode->i_gid = mapped_fsgid(mnt_userns, i_user_ns(inode));
+}
+
+/**
+ * fsuidgid_has_mapping() - check whether caller's fsuid/fsgid is mapped
+ * @sb: the superblock we want a mapping in
+ * @mnt_userns: user namespace of the relevant mount
+ *
+ * Check whether the caller's fsuid and fsgid have a valid mapping in the
+ * s_user_ns of the superblock @sb. If the caller is on an idmapped mount map
+ * the caller's fsuid and fsgid according to the @mnt_userns first.
+ *
+ * Return: true if fsuid and fsgid is mapped, false if not.
+ */
+static inline bool fsuidgid_has_mapping(struct super_block *sb,
+ struct user_namespace *mnt_userns)
+{
+ struct user_namespace *fs_userns = sb->s_user_ns;
+ kuid_t kuid;
+ kgid_t kgid;
+
+ kuid = mapped_fsuid(mnt_userns, fs_userns);
+ if (!uid_valid(kuid))
+ return false;
+ kgid = mapped_fsgid(mnt_userns, fs_userns);
+ if (!gid_valid(kgid))
+ return false;
+ return kuid_has_mapping(fs_userns, kuid) &&
+ kgid_has_mapping(fs_userns, kgid);
}
extern struct timespec64 current_time(struct inode *inode);
@@ -1582,14 +1812,35 @@ extern struct timespec64 current_time(struct inode *inode);
* Snapshotting support.
*/
-void __sb_end_write(struct super_block *sb, int level);
-int __sb_start_write(struct super_block *sb, int level, bool wait);
+/*
+ * These are internal functions, please use sb_start_{write,pagefault,intwrite}
+ * instead.
+ */
+static inline void __sb_end_write(struct super_block *sb, int level)
+{
+ percpu_up_read(sb->s_writers.rw_sem + level-1);
+}
+
+static inline void __sb_start_write(struct super_block *sb, int level)
+{
+ percpu_down_read(sb->s_writers.rw_sem + level - 1);
+}
+
+static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
+{
+ return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
+}
#define __sb_writers_acquired(sb, lev) \
percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
#define __sb_writers_release(sb, lev) \
percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+static inline bool sb_write_started(const struct super_block *sb)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + SB_FREEZE_WRITE - 1, 1);
+}
+
/**
* sb_end_write - drop write access to a superblock
* @sb: the super we wrote to
@@ -1647,12 +1898,12 @@ static inline void sb_end_intwrite(struct super_block *sb)
*/
static inline void sb_start_write(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_WRITE, true);
+ __sb_start_write(sb, SB_FREEZE_WRITE);
}
-static inline int sb_start_write_trylock(struct super_block *sb)
+static inline bool sb_start_write_trylock(struct super_block *sb)
{
- return __sb_start_write(sb, SB_FREEZE_WRITE, false);
+ return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
}
/**
@@ -1668,18 +1919,18 @@ static inline int sb_start_write_trylock(struct super_block *sb)
*
* Since page fault freeze protection behaves as a lock, users have to preserve
* ordering of freeze protection and other filesystem locks. It is advised to
- * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
+ * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
* handling code implies lock dependency:
*
- * mmap_sem
+ * mmap_lock
* -> sb_start_pagefault
*/
static inline void sb_start_pagefault(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
+ __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
}
-/*
+/**
* sb_start_intwrite - get write access to a superblock for internal fs purposes
* @sb: the super we write to
*
@@ -1694,37 +1945,77 @@ static inline void sb_start_pagefault(struct super_block *sb)
*/
static inline void sb_start_intwrite(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_FS, true);
+ __sb_start_write(sb, SB_FREEZE_FS);
}
-static inline int sb_start_intwrite_trylock(struct super_block *sb)
+static inline bool sb_start_intwrite_trylock(struct super_block *sb)
{
- return __sb_start_write(sb, SB_FREEZE_FS, false);
+ return __sb_start_write_trylock(sb, SB_FREEZE_FS);
}
-
-extern bool inode_owner_or_capable(const struct inode *inode);
+bool inode_owner_or_capable(struct user_namespace *mnt_userns,
+ const struct inode *inode);
/*
* VFS helper functions..
*/
-extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
-extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
-extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
-extern int vfs_symlink(struct inode *, struct dentry *, const char *);
-extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
-extern int vfs_rmdir(struct inode *, struct dentry *);
-extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
-extern int vfs_whiteout(struct inode *, struct dentry *);
+int vfs_create(struct user_namespace *, struct inode *,
+ struct dentry *, umode_t, bool);
+int vfs_mkdir(struct user_namespace *, struct inode *,
+ struct dentry *, umode_t);
+int vfs_mknod(struct user_namespace *, struct inode *, struct dentry *,
+ umode_t, dev_t);
+int vfs_symlink(struct user_namespace *, struct inode *,
+ struct dentry *, const char *);
+int vfs_link(struct dentry *, struct user_namespace *, struct inode *,
+ struct dentry *, struct inode **);
+int vfs_rmdir(struct user_namespace *, struct inode *, struct dentry *);
+int vfs_unlink(struct user_namespace *, struct inode *, struct dentry *,
+ struct inode **);
+
+/**
+ * struct renamedata - contains all information required for renaming
+ * @old_mnt_userns: old user namespace of the mount the inode was found from
+ * @old_dir: parent of source
+ * @old_dentry: source
+ * @new_mnt_userns: new user namespace of the mount the inode was found from
+ * @new_dir: parent of destination
+ * @new_dentry: destination
+ * @delegated_inode: returns an inode needing a delegation break
+ * @flags: rename flags
+ */
+struct renamedata {
+ struct user_namespace *old_mnt_userns;
+ struct inode *old_dir;
+ struct dentry *old_dentry;
+ struct user_namespace *new_mnt_userns;
+ struct inode *new_dir;
+ struct dentry *new_dentry;
+ struct inode **delegated_inode;
+ unsigned int flags;
+} __randomize_layout;
+
+int vfs_rename(struct renamedata *);
+
+static inline int vfs_whiteout(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry)
+{
+ return vfs_mknod(mnt_userns, dir, dentry, S_IFCHR | WHITEOUT_MODE,
+ WHITEOUT_DEV);
+}
-extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
- int open_flag);
+struct file *vfs_tmpfile_open(struct user_namespace *mnt_userns,
+ const struct path *parentpath,
+ umode_t mode, int open_flag, const struct cred *cred);
int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
void *);
+int vfs_fchown(struct file *file, uid_t user, gid_t group);
+int vfs_fchmod(struct file *file, umode_t mode);
+int vfs_utimes(const struct path *path, struct timespec64 *times);
+
extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
@@ -1737,31 +2028,21 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
/*
* VFS file helper functions.
*/
-extern void inode_init_owner(struct inode *inode, const struct inode *dir,
- umode_t mode);
+void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
+ const struct inode *dir, umode_t mode);
extern bool may_open_dev(const struct path *path);
-/*
- * VFS FS_IOC_FIEMAP helper definitions.
- */
-struct fiemap_extent_info {
- unsigned int fi_flags; /* Flags as passed from user */
- unsigned int fi_extents_mapped; /* Number of mapped extents */
- unsigned int fi_extents_max; /* Size of fiemap_extent array */
- struct fiemap_extent __user *fi_extents_start; /* Start of
- fiemap_extent array */
-};
-int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
- u64 phys, u64 len, u32 flags);
-int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
+umode_t mode_strip_sgid(struct user_namespace *mnt_userns,
+ const struct inode *dir, umode_t mode);
/*
* This is the "filldir" function type, used by readdir() to let
* the kernel specify what kind of dirent layout it wants to have.
* This allows the kernel to read directories into kernel space or
* to have different dirent layouts depending on the binary type.
+ * Return 'true' to keep going and 'false' if there are no more entries.
*/
struct dir_context;
-typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
+typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
unsigned);
struct dir_context {
@@ -1769,14 +2050,6 @@ struct dir_context {
loff_t pos;
};
-struct block_device_operations;
-
-/* These macros are for out of kernel modules to test that
- * the kernel supports the unlocked_ioctl and compat_ioctl
- * fields in struct file_operations. */
-#define HAVE_COMPAT_IOCTL 1
-#define HAVE_UNLOCKED_IOCTL 1
-
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
@@ -1817,6 +2090,7 @@ struct block_device_operations;
#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
struct iov_iter;
+struct io_uring_cmd;
struct file_operations {
struct module *owner;
@@ -1825,7 +2099,8 @@ struct file_operations {
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
- int (*iopoll)(struct kiocb *kiocb, bool spin);
+ int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
+ unsigned int flags);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
@@ -1858,27 +2133,36 @@ struct file_operations {
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
+ int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+ int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
+ unsigned int poll_flags);
} __randomize_layout;
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
- int (*permission) (struct inode *, int);
- struct posix_acl * (*get_acl)(struct inode *, int);
+ int (*permission) (struct user_namespace *, struct inode *, int);
+ struct posix_acl * (*get_acl)(struct inode *, int, bool);
int (*readlink) (struct dentry *, char __user *,int);
- int (*create) (struct inode *,struct dentry *, umode_t, bool);
+ int (*create) (struct user_namespace *, struct inode *,struct dentry *,
+ umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
- int (*symlink) (struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct inode *,struct dentry *,umode_t);
+ int (*symlink) (struct user_namespace *, struct inode *,struct dentry *,
+ const char *);
+ int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *,
+ umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
- int (*rename) (struct inode *, struct dentry *,
+ int (*mknod) (struct user_namespace *, struct inode *,struct dentry *,
+ umode_t,dev_t);
+ int (*rename) (struct user_namespace *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
- int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
+ int (*setattr) (struct user_namespace *, struct dentry *,
+ struct iattr *);
+ int (*getattr) (struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
@@ -1886,8 +2170,13 @@ struct inode_operations {
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
- int (*tmpfile) (struct inode *, struct dentry *, umode_t);
- int (*set_acl)(struct inode *, struct posix_acl *, int);
+ int (*tmpfile) (struct user_namespace *, struct inode *,
+ struct file *, umode_t);
+ int (*set_acl)(struct user_namespace *, struct inode *,
+ struct posix_acl *, int);
+ int (*fileattr_set)(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct fileattr *fa);
+ int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
} ____cacheline_aligned;
static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
@@ -1907,25 +2196,20 @@ static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
return file->f_op->mmap(file, vma);
}
-ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
- unsigned long nr_segs, unsigned long fast_segs,
- struct iovec *fast_pointer,
- struct iovec **ret_pointer);
-
-extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
-extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t len, unsigned int flags);
-extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *count,
- unsigned int remap_flags);
+int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *dax_read_ops);
+int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *count, unsigned int remap_flags);
extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
@@ -1967,7 +2251,6 @@ struct super_operations {
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
struct dquot **(*get_dquots)(struct inode *);
#endif
- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
long (*nr_cached_objects)(struct super_block *,
struct shrink_control *);
long (*free_cached_objects)(struct super_block *,
@@ -1977,27 +2260,28 @@ struct super_operations {
/*
* Inode flags - they have no relation to superblock flags now
*/
-#define S_SYNC 1 /* Writes are synced at once */
-#define S_NOATIME 2 /* Do not update access times */
-#define S_APPEND 4 /* Append-only file */
-#define S_IMMUTABLE 8 /* Immutable file */
-#define S_DEAD 16 /* removed, but still open directory */
-#define S_NOQUOTA 32 /* Inode is not counted to quota */
-#define S_DIRSYNC 64 /* Directory modifications are synchronous */
-#define S_NOCMTIME 128 /* Do not update file c/mtime */
-#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
-#define S_PRIVATE 512 /* Inode is fs-internal */
-#define S_IMA 1024 /* Inode has an associated IMA struct */
-#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
-#define S_NOSEC 4096 /* no suid or xattr security attributes */
+#define S_SYNC (1 << 0) /* Writes are synced at once */
+#define S_NOATIME (1 << 1) /* Do not update access times */
+#define S_APPEND (1 << 2) /* Append-only file */
+#define S_IMMUTABLE (1 << 3) /* Immutable file */
+#define S_DEAD (1 << 4) /* removed, but still open directory */
+#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */
+#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */
+#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */
+#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */
+#define S_PRIVATE (1 << 9) /* Inode is fs-internal */
+#define S_IMA (1 << 10) /* Inode has an associated IMA struct */
+#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */
+#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */
#ifdef CONFIG_FS_DAX
-#define S_DAX 8192 /* Direct Access, avoiding the page cache */
+#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */
#else
-#define S_DAX 0 /* Make all the DAX code disappear */
+#define S_DAX 0 /* Make all the DAX code disappear */
#endif
-#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */
-#define S_CASEFOLD 32768 /* Casefolded file */
-#define S_VERITY 65536 /* Verity file (using fs/verity/) */
+#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
+#define S_CASEFOLD (1 << 15) /* Casefolded file */
+#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
+#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -2044,36 +2328,18 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
-static inline bool HAS_UNMAPPED_ID(struct inode *inode)
-{
- return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid);
-}
-
-static inline enum rw_hint file_write_hint(struct file *file)
+static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns,
+ struct inode *inode)
{
- if (file->f_write_hint != WRITE_LIFE_NOT_SET)
- return file->f_write_hint;
-
- return file_inode(file)->i_write_hint;
-}
-
-static inline int iocb_flags(struct file *file);
-
-static inline u16 ki_hint_validate(enum rw_hint hint)
-{
- typeof(((struct kiocb *)0)->ki_hint) max_hint = -1;
-
- if (hint <= max_hint)
- return hint;
- return 0;
+ return !vfsuid_valid(i_uid_into_vfsuid(mnt_userns, inode)) ||
+ !vfsgid_valid(i_gid_into_vfsgid(mnt_userns, inode));
}
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
- .ki_flags = iocb_flags(filp),
- .ki_hint = ki_hint_validate(file_write_hint(filp)),
+ .ki_flags = filp->f_iocb_flags,
.ki_ioprio = get_current_ioprio(),
};
}
@@ -2084,7 +2350,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = kiocb_src->ki_flags,
- .ki_hint = kiocb_src->ki_hint,
.ki_ioprio = kiocb_src->ki_ioprio,
.ki_pos = kiocb_src->ki_pos,
};
@@ -2093,8 +2358,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
/*
* Inode state bits. Protected by inode->i_lock
*
- * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
- * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
+ * Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
+ * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
*
* Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
* until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
@@ -2103,12 +2368,21 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
* Two bits are used for locking and completion notification, I_NEW and I_SYNC.
*
* I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
- * fdatasync(). i_atime is the usual cause.
- * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+ * fdatasync() (unless I_DIRTY_DATASYNC is also set).
+ * Timestamp updates are the usual cause.
+ * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
* these changes separately from I_DIRTY_SYNC so that we
* don't have to write inode on fdatasync() when only
- * mtime has changed in it.
+ * e.g. the timestamps have changed.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
+ * I_DIRTY_TIME The inode itself has dirty timestamps, and the
+ * lazytime mount option is enabled. We keep track of this
+ * separately from I_DIRTY_SYNC in order to implement
+ * lazytime. This gets cleared if I_DIRTY_INODE
+ * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. But
+ * I_DIRTY_TIME can still be set if I_DIRTY_SYNC is already
+ * in place because writeback might already be in progress
+ * and we don't want to lose the time update
* I_NEW Serves as both a mutex and completion notification.
* New inodes set I_NEW. If two processes both create
* the same inode, one of them will release its inode and
@@ -2151,6 +2425,14 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
*
* I_CREATING New object's inode in the middle of setting up.
*
+ * I_DONTCACHE Evict inode as soon as it is not used anymore.
+ *
+ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
+ * Used to detect that mark_inode_dirty() should not move
+ * inode between dirty lists.
+ *
+ * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2168,11 +2450,12 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
-#define __I_DIRTY_TIME_EXPIRED 12
-#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH (1 << 13)
#define I_OVL_INUSE (1 << 14)
#define I_CREATING (1 << 15)
+#define I_DONTCACHE (1 << 16)
+#define I_SYNC_QUEUED (1 << 17)
+#define I_PINNING_FSCACHE_WB (1 << 18)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2189,6 +2472,21 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
+/*
+ * Returns true if the given inode itself only has dirty timestamps (its pages
+ * may still be dirty) and isn't currently being allocated or freed.
+ * Filesystems should call this if when writing an inode when lazytime is
+ * enabled, they want to opportunistically write the timestamps of other inodes
+ * located very nearby on-disk, e.g. in the same inode block. This returns true
+ * if the given inode is in need of such an opportunistic update. Requires
+ * i_lock, or at least later re-checking under i_lock.
+ */
+static inline bool inode_is_dirtytime_only(struct inode *inode)
+{
+ return (inode->i_state & (I_DIRTY_TIME | I_NEW |
+ I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
+}
+
extern void inc_nlink(struct inode *inode);
extern void drop_nlink(struct inode *inode);
extern void clear_nlink(struct inode *inode);
@@ -2215,6 +2513,8 @@ enum file_time_flags {
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
+int inode_update_time(struct inode *inode, struct timespec64 *time, int flags);
+
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
@@ -2222,8 +2522,8 @@ static inline void file_accessed(struct file *file)
}
extern int file_modified(struct file *file);
+int kiocb_modified(struct kiocb *iocb);
-int sync_inode(struct inode *inode, struct writeback_control *wbc);
int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
@@ -2234,6 +2534,7 @@ struct file_system_type {
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
+#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2251,23 +2552,15 @@ struct file_system_type {
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
+ struct lock_class_key invalidate_lock_key;
struct lock_class_key i_mutex_dir_key;
};
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-#ifdef CONFIG_BLOCK
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
-#else
-static inline struct dentry *mount_bdev(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int))
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
extern struct dentry *mount_single(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -2275,15 +2568,9 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
+void retire_super(struct super_block *sb);
void generic_shutdown_super(struct super_block *sb);
-#ifdef CONFIG_BLOCK
void kill_block_super(struct super_block *sb);
-#else
-static inline void kill_block_super(struct super_block *sb)
-{
- BUG();
-}
-#endif
void kill_anon_super(struct super_block *sb);
void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
@@ -2319,22 +2606,11 @@ struct super_block *sget(struct file_system_type *type,
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount(struct file_system_type *);
-extern void kern_unmount(struct vfsmount *mnt);
-extern int may_umount_tree(struct vfsmount *);
-extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char __user *,
- const char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(const struct path *);
-extern void drop_collected_mounts(struct vfsmount *);
-extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
- struct vfsmount *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
-extern bool our_mnt(struct vfsmount *mnt);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
@@ -2350,90 +2626,6 @@ extern struct kobject *fs_kobj;
#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-extern int locks_mandatory_locked(struct file *);
-extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit - an otherwise meaningless combination.
- */
-
-static inline int __mandatory_lock(struct inode *ino)
-{
- return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
-}
-
-/*
- * ... and these candidates should be on SB_MANDLOCK mounted fs,
- * otherwise these will be advisory locks
- */
-
-static inline int mandatory_lock(struct inode *ino)
-{
- return IS_MANDLOCK(ino) && __mandatory_lock(ino);
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
- if (mandatory_lock(locks_inode(file)))
- return locks_mandatory_locked(file);
- return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode,
- struct file *f,
- loff_t size)
-{
- if (!inode->i_flctx || !mandatory_lock(inode))
- return 0;
-
- if (size < inode->i_size) {
- return locks_mandatory_area(inode, f, size, inode->i_size - 1,
- F_WRLCK);
- } else {
- return locks_mandatory_area(inode, f, inode->i_size, size - 1,
- F_WRLCK);
- }
-}
-
-#else /* !CONFIG_MANDATORY_FILE_LOCKING */
-
-static inline int locks_mandatory_locked(struct file *file)
-{
- return 0;
-}
-
-static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
- loff_t start, loff_t end, unsigned char type)
-{
- return 0;
-}
-
-static inline int __mandatory_lock(struct inode *inode)
-{
- return 0;
-}
-
-static inline int mandatory_lock(struct inode *inode)
-{
- return 0;
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
- return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
- size_t size)
-{
- return 0;
-}
-
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
-
#ifdef CONFIG_FILE_LOCKING
static inline int break_lease(struct inode *inode, unsigned int mode)
{
@@ -2535,18 +2727,45 @@ struct filename {
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
+static inline struct user_namespace *file_mnt_user_ns(struct file *file)
+{
+ return mnt_user_ns(file->f_path.mnt);
+}
+
+/**
+ * is_idmapped_mnt - check whether a mount is mapped
+ * @mnt: the mount to check
+ *
+ * If @mnt has an idmapping attached different from the
+ * filesystem's idmapping then @mnt is mapped.
+ *
+ * Return: true if mount is mapped, false if not.
+ */
+static inline bool is_idmapped_mnt(const struct vfsmount *mnt)
+{
+ return mnt_user_ns(mnt) != mnt->mnt_sb->s_user_ns;
+}
+
extern long vfs_truncate(const struct path *, loff_t);
-extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
- struct file *filp);
+int do_truncate(struct user_namespace *, struct dentry *, loff_t start,
+ unsigned int time_attrs, struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
umode_t mode);
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
-extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+extern struct file *file_open_root(const struct path *,
const char *, int, umode_t);
+static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
+ const char *name, int flags, umode_t mode)
+{
+ return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root},
+ name, flags, mode);
+}
extern struct file * dentry_open(const struct path *, int, const struct cred *);
+extern struct file *dentry_create(const struct path *path, int flags,
+ umode_t mode, const struct cred *cred);
extern struct file * open_with_fake_path(const struct path *, int,
struct inode*, const struct cred *);
static inline struct file *file_clone_open(struct file *file)
@@ -2556,6 +2775,7 @@ static inline struct file *file_clone_open(struct file *file)
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
+extern struct filename *getname_uflags(const char __user *, int);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
extern void putname(struct filename *name);
@@ -2564,6 +2784,15 @@ extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
+/* Helper for the simple case when original dentry is used */
+static inline int finish_open_simple(struct file *file, int error)
+{
+ if (error)
+ return error;
+
+ return finish_open(file, file->f_path.dentry, NULL);
+}
+
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
@@ -2573,97 +2802,16 @@ extern struct kmem_cache *names_cachep;
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-#ifdef CONFIG_BLOCK
-extern int register_blkdev(unsigned int, const char *);
-extern void unregister_blkdev(unsigned int, const char *);
-extern void bdev_unhash_inode(dev_t dev);
-extern struct block_device *bdget(dev_t);
-extern struct block_device *bdgrab(struct block_device *bdev);
-extern void bd_set_size(struct block_device *, loff_t size);
-extern void bd_forget(struct inode *inode);
-extern void bdput(struct block_device *);
-extern void invalidate_bdev(struct block_device *);
-extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
-extern int sync_blockdev(struct block_device *bdev);
-extern void kill_bdev(struct block_device *);
-extern struct super_block *freeze_bdev(struct block_device *);
-extern void emergency_thaw_all(void);
-extern void emergency_thaw_bdev(struct super_block *sb);
-extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
-extern int fsync_bdev(struct block_device *);
-
extern struct super_block *blockdev_superblock;
-
static inline bool sb_is_blkdev_sb(struct super_block *sb)
{
- return sb == blockdev_superblock;
-}
-#else
-static inline void bd_forget(struct inode *inode) {}
-static inline int sync_blockdev(struct block_device *bdev) { return 0; }
-static inline void kill_bdev(struct block_device *bdev) {}
-static inline void invalidate_bdev(struct block_device *bdev) {}
-
-static inline struct super_block *freeze_bdev(struct block_device *sb)
-{
- return NULL;
-}
-
-static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
- return 0;
-}
-
-static inline int emergency_thaw_bdev(struct super_block *sb)
-{
- return 0;
+ return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
}
-static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
-{
-}
-
-static inline bool sb_is_blkdev_sb(struct super_block *sb)
-{
- return false;
-}
-#endif
+void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
-#ifdef CONFIG_BLOCK
-extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
-extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
-extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
-extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder);
-extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
- void *holder);
-extern struct block_device *bd_start_claiming(struct block_device *bdev,
- void *holder);
-extern void bd_finish_claiming(struct block_device *bdev,
- struct block_device *whole, void *holder);
-extern void bd_abort_claiming(struct block_device *bdev,
- struct block_device *whole, void *holder);
-extern void blkdev_put(struct block_device *bdev, fmode_t mode);
-
-#ifdef CONFIG_SYSFS
-extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
-extern void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk);
-#else
-static inline int bd_link_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
- return 0;
-}
-static inline void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
-}
-#endif
-#endif
/* fs/char_dev.c */
#define CHRDEV_MAJOR_MAX 512
@@ -2694,75 +2842,12 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
__unregister_chrdev(major, 0, 256, name);
}
-/* fs/block_dev.c */
-#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
-#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
-
-#ifdef CONFIG_BLOCK
-#define BLKDEV_MAJOR_MAX 512
-extern const char *__bdevname(dev_t, char *buffer);
-extern const char *bdevname(struct block_device *bdev, char *buffer);
-extern struct block_device *lookup_bdev(const char *);
-extern void blkdev_show(struct seq_file *,off_t);
-
-#else
-#define BLKDEV_MAJOR_MAX 0
-#endif
-
extern void init_special_inode(struct inode *, umode_t, dev_t);
/* Invalid inode operations -- fs/bad_inode.c */
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
-#ifdef CONFIG_BLOCK
-extern int revalidate_disk(struct gendisk *);
-extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *, bool);
-extern int invalidate_partition(struct gendisk *, int);
-#endif
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-
-static inline void invalidate_remote_inode(struct inode *inode)
-{
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
- invalidate_mapping_pages(inode->i_mapping, 0, -1);
-}
-extern int invalidate_inode_pages2(struct address_space *mapping);
-extern int invalidate_inode_pages2_range(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-extern int write_inode_now(struct inode *, int);
-extern int filemap_fdatawrite(struct address_space *);
-extern int filemap_flush(struct address_space *);
-extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
-extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
- loff_t lend);
-extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
- loff_t start_byte, loff_t end_byte);
-
-static inline int filemap_fdatawait(struct address_space *mapping)
-{
- return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
-}
-
-extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
- loff_t lend);
-extern int filemap_write_and_wait_range(struct address_space *mapping,
- loff_t lstart, loff_t lend);
-extern int __filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end, int sync_mode);
-extern int filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end);
-extern int filemap_check_errors(struct address_space *mapping);
-extern void __filemap_set_wb_err(struct address_space *mapping, int err);
-
-static inline int filemap_write_and_wait(struct address_space *mapping)
-{
- return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
-}
-
extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
@@ -2774,82 +2859,6 @@ static inline int file_write_and_wait(struct file *file)
return file_write_and_wait_range(file, 0, LLONG_MAX);
}
-/**
- * filemap_set_wb_err - set a writeback error on an address_space
- * @mapping: mapping in which to set writeback error
- * @err: error to be set in mapping
- *
- * When writeback fails in some way, we must record that error so that
- * userspace can be informed when fsync and the like are called. We endeavor
- * to report errors on any file that was open at the time of the error. Some
- * internal callers also need to know when writeback errors have occurred.
- *
- * When a writeback error occurs, most filesystems will want to call
- * filemap_set_wb_err to record the error in the mapping so that it will be
- * automatically reported whenever fsync is called on the file.
- */
-static inline void filemap_set_wb_err(struct address_space *mapping, int err)
-{
- /* Fastpath for common case of no error */
- if (unlikely(err))
- __filemap_set_wb_err(mapping, err);
-}
-
-/**
- * filemap_check_wb_error - has an error occurred since the mark was sampled?
- * @mapping: mapping to check for writeback errors
- * @since: previously-sampled errseq_t
- *
- * Grab the errseq_t value from the mapping, and see if it has changed "since"
- * the given value was sampled.
- *
- * If it has then report the latest error set, otherwise return 0.
- */
-static inline int filemap_check_wb_err(struct address_space *mapping,
- errseq_t since)
-{
- return errseq_check(&mapping->wb_err, since);
-}
-
-/**
- * filemap_sample_wb_err - sample the current errseq_t to test for later errors
- * @mapping: mapping to be sampled
- *
- * Writeback errors are always reported relative to a particular sample point
- * in the past. This function provides those sample points.
- */
-static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
-{
- return errseq_sample(&mapping->wb_err);
-}
-
-static inline int filemap_nr_thps(struct address_space *mapping)
-{
-#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- return atomic_read(&mapping->nr_thps);
-#else
- return 0;
-#endif
-}
-
-static inline void filemap_nr_thps_inc(struct address_space *mapping)
-{
-#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- atomic_inc(&mapping->nr_thps);
-#else
- WARN_ON_ONCE(1);
-#endif
-}
-
-static inline void filemap_nr_thps_dec(struct address_space *mapping)
-{
-#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- atomic_dec(&mapping->nr_thps);
-#else
- WARN_ON_ONCE(1);
-#endif
-}
-
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
@@ -2857,6 +2866,12 @@ extern int vfs_fsync(struct file *file, int datasync);
extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
unsigned int flags);
+static inline bool iocb_is_dsync(const struct kiocb *iocb)
+{
+ return (iocb->ki_flags & IOCB_DSYNC) ||
+ IS_SYNC(iocb->ki_filp->f_mapping->host);
+}
+
/*
* Sync the bytes written if this was a synchronous write. Expect ki_pos
* to already be updated for the write, and will return either the amount
@@ -2864,7 +2879,7 @@ extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
*/
static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
{
- if (iocb->ki_flags & IOCB_DSYNC) {
+ if (iocb_is_dsync(iocb)) {
int ret = vfs_fsync_range(iocb->ki_filp,
iocb->ki_pos - count, iocb->ki_pos - 1,
(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
@@ -2887,28 +2902,45 @@ static inline int bmap(struct inode *inode, sector_t *block)
}
#endif
-extern int notify_change(struct dentry *, struct iattr *, struct inode **);
-extern int inode_permission(struct inode *, int);
-extern int generic_permission(struct inode *, int);
-extern int __check_sticky(struct inode *dir, struct inode *inode);
+int notify_change(struct user_namespace *, struct dentry *,
+ struct iattr *, struct inode **);
+int inode_permission(struct user_namespace *, struct inode *, int);
+int generic_permission(struct user_namespace *, struct inode *, int);
+static inline int file_permission(struct file *file, int mask)
+{
+ return inode_permission(file_mnt_user_ns(file),
+ file_inode(file), mask);
+}
+static inline int path_permission(const struct path *path, int mask)
+{
+ return inode_permission(mnt_user_ns(path->mnt),
+ d_inode(path->dentry), mask);
+}
+int __check_sticky(struct user_namespace *mnt_userns, struct inode *dir,
+ struct inode *inode);
static inline bool execute_ok(struct inode *inode)
{
return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
}
+static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
+{
+ return (inode->i_mode ^ mode) & S_IFMT;
+}
+
static inline void file_start_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
+ sb_start_write(file_inode(file)->i_sb);
}
static inline bool file_start_write_trylock(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return true;
- return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false);
+ return sb_start_write_trylock(file_inode(file)->i_sb);
}
static inline void file_end_write(struct file *file)
@@ -2919,15 +2951,20 @@ static inline void file_end_write(struct file *file)
}
/*
+ * This is used for regular files where some users -- especially the
+ * currently executed binary in a process, previously handled via
+ * VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap
+ * read-write shared) accesses.
+ *
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
- * This is used for regular files.
- * We cannot support write (and maybe mmap read-write shared) accesses and
- * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
- * can have the following values:
- * 0: no writers, no VM_DENYWRITE mappings
- * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
- * > 0: (i_writecount) users are writing to the file.
+ * deny_write_access() denies write access to a file.
+ * allow_write_access() re-enables write access to a file.
+ *
+ * The i_writecount field of an inode can have the following values:
+ * 0: no write access, no denied write access
+ * < 0: (-i_writecount) users that denied write access to the file.
+ * > 0: (i_writecount) users that have write access to the file.
*
* Normally we operate on that counter with atomic_{inc,dec} and it's safe
* except for the cases where we don't hold i_writecount yet. Then we need to
@@ -2979,43 +3016,8 @@ static inline void i_readcount_inc(struct inode *inode)
#endif
extern int do_pipe_flags(int *, int);
-#define __kernel_read_file_id(id) \
- id(UNKNOWN, unknown) \
- id(FIRMWARE, firmware) \
- id(FIRMWARE_PREALLOC_BUFFER, firmware) \
- id(MODULE, kernel-module) \
- id(KEXEC_IMAGE, kexec-image) \
- id(KEXEC_INITRAMFS, kexec-initramfs) \
- id(POLICY, security-policy) \
- id(X509_CERTIFICATE, x509-certificate) \
- id(MAX_ID, )
-
-#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
-#define __fid_stringify(dummy, str) #str,
-
-enum kernel_read_file_id {
- __kernel_read_file_id(__fid_enumify)
-};
-
-static const char * const kernel_read_file_str[] = {
- __kernel_read_file_id(__fid_stringify)
-};
-
-static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
-{
- if ((unsigned)id >= READING_MAX_ID)
- return kernel_read_file_str[READING_UNKNOWN];
-
- return kernel_read_file_str[id];
-}
-
-extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern int kernel_read_file_from_path(const char *, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
extern struct file * open_exec(const char *);
@@ -3044,6 +3046,7 @@ static inline int generic_drop_inode(struct inode *inode)
{
return !inode->i_nlink || inode_unhashed(inode);
}
+extern void d_mark_dontcache(struct inode *inode);
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
@@ -3063,6 +3066,9 @@ extern struct inode *find_inode_nowait(struct super_block *,
int (*match)(struct inode *,
unsigned long, void *),
void *data);
+extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
+ int (*)(struct inode *, void *), void *);
+extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -3074,6 +3080,22 @@ extern void unlock_new_inode(struct inode *);
extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
+void dump_mapping(const struct address_space *);
+
+/*
+ * Userspace may rely on the the inode number being non-zero. For example, glibc
+ * simply ignores files with zero i_ino in unlink() and other places.
+ *
+ * As an additional complication, if userspace was compiled with
+ * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the
+ * lower 32 bits, so we need to check that those aren't zero explicitly. With
+ * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but
+ * better safe than sorry.
+ */
+static inline bool is_zero_ino(ino_t ino)
+{
+ return (u32)ino == 0;
+}
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
@@ -3085,6 +3107,16 @@ extern void free_inode_nonrcu(struct inode *inode);
extern int should_remove_suid(struct dentry *);
extern int file_remove_privs(struct file *);
+/*
+ * This must be used for allocating filesystems specific inodes to set
+ * up the inode reclaim context correctly.
+ */
+static inline void *
+alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp)
+{
+ return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp);
+}
+
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
{
@@ -3099,29 +3131,25 @@ static inline void remove_inode_hash(struct inode *inode)
}
extern void inode_sb_list_add(struct inode *inode);
+extern void inode_add_lru(struct inode *inode);
-#ifdef CONFIG_BLOCK
-extern int bdev_read_only(struct block_device *);
-#endif
-extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
-extern int generic_remap_checks(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *count, unsigned int remap_flags);
+int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
+extern int generic_write_check_limits(struct file *file, loff_t pos,
+ loff_t *count);
extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
-extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- size_t *count, unsigned int flags);
+ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to,
+ ssize_t already_read);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
+ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
@@ -3132,13 +3160,6 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);
-/* fs/block_dev.c */
-extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
-extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
-extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
- int datasync);
-extern void block_sync_page(struct page *page);
-
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
@@ -3153,7 +3174,7 @@ extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
-extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
+#define no_llseek NULL
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
@@ -3162,6 +3183,7 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
+int rw_verify_area(int, struct file *, const loff_t *, size_t);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
extern int stream_open(struct inode * inode, struct file * filp);
@@ -3178,8 +3200,6 @@ enum {
DIO_SKIP_HOLES = 0x02,
};
-void dio_end_io(struct bio *bio);
-
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
get_block_t get_block,
@@ -3198,7 +3218,7 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
void inode_dio_wait(struct inode *inode);
-/*
+/**
* inode_dio_begin - signal start of a direct I/O requests
* @inode: inode the direct I/O happens on
*
@@ -3210,7 +3230,7 @@ static inline void inode_dio_begin(struct inode *inode)
atomic_inc(&inode->i_dio_count);
}
-/*
+/**
* inode_dio_end - signal finish of a direct I/O requests
* @inode: inode the direct I/O happens on
*
@@ -3240,12 +3260,11 @@ extern int page_readlink(struct dentry *, char __user *, int);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
-extern int __page_symlink(struct inode *inode, const char *symname, int len,
- int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-extern void generic_fillattr(struct inode *, struct kstat *);
+void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *);
+void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -3264,48 +3283,26 @@ extern const struct inode_operations simple_symlink_inode_operations;
extern int iterate_dir(struct file *, struct dir_context *);
-extern int vfs_statx(int, const char __user *, int, struct kstat *, u32);
-extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int);
+int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
+ int flags);
+int vfs_fstat(int fd, struct kstat *stat);
static inline int vfs_stat(const char __user *filename, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
+ return vfs_fstatat(AT_FDCWD, filename, stat, 0);
}
static inline int vfs_lstat(const char __user *name, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
-}
-static inline int vfs_fstatat(int dfd, const char __user *filename,
- struct kstat *stat, int flags)
-{
- return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
-}
-static inline int vfs_fstat(int fd, struct kstat *stat)
-{
- return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0);
+ return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
}
-
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int vfs_readlink(struct dentry *, char __user *, int);
-extern int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len,
- get_block_t *get_block);
-extern int generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block);
-
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
-extern struct super_block *get_super_thawed(struct block_device *);
-extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev);
extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
@@ -3317,30 +3314,29 @@ extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, struct dir_context *);
-extern int simple_setattr(struct dentry *, struct iattr *);
-extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int simple_setattr(struct user_namespace *, struct dentry *,
+ struct iattr *);
+extern int simple_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *,
- struct inode *, struct dentry *, unsigned int);
+extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+extern int simple_rename(struct user_namespace *, struct inode *,
+ struct dentry *, struct inode *, struct dentry *,
+ unsigned int);
extern void simple_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
-extern int noop_set_page_dirty(struct page *page);
-extern void noop_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
-extern int simple_readpage(struct file *file, struct page *page);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
-extern int simple_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
@@ -3369,30 +3365,18 @@ extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
-#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct address_space *,
- struct page *, struct page *,
- enum migrate_mode);
-extern int buffer_migrate_page_norefs(struct address_space *,
- struct page *, struct page *,
- enum migrate_mode);
-#else
-#define buffer_migrate_page NULL
-#define buffer_migrate_page_norefs NULL
-#endif
+extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);
-extern int setattr_prepare(struct dentry *, struct iattr *);
+int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
+ unsigned int ia_valid);
+int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
-extern void setattr_copy(struct inode *inode, const struct iattr *attr);
+void setattr_copy(struct user_namespace *, struct inode *inode,
+ const struct iattr *attr);
extern int file_update_time(struct file *file);
-static inline bool io_is_direct(struct file *filp)
-{
- return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
-}
-
-static inline bool vma_is_dax(struct vm_area_struct *vma)
+static inline bool vma_is_dax(const struct vm_area_struct *vma)
{
return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
}
@@ -3401,7 +3385,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
{
struct inode *inode;
- if (!vma->vm_file)
+ if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file)
return false;
if (!vma_is_dax(vma))
return false;
@@ -3416,9 +3400,9 @@ static inline int iocb_flags(struct file *file)
int res = 0;
if (file->f_flags & O_APPEND)
res |= IOCB_APPEND;
- if (io_is_direct(file))
+ if (file->f_flags & O_DIRECT)
res |= IOCB_DIRECT;
- if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+ if (file->f_flags & O_DSYNC)
res |= IOCB_DSYNC;
if (file->f_flags & __O_SYNC)
res |= IOCB_SYNC;
@@ -3427,22 +3411,26 @@ static inline int iocb_flags(struct file *file)
static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
{
+ int kiocb_flags = 0;
+
+ /* make sure there's no overlap between RWF and private IOCB flags */
+ BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD);
+
+ if (!flags)
+ return 0;
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- ki->ki_flags |= IOCB_NOWAIT;
+ kiocb_flags |= IOCB_NOIO;
}
- if (flags & RWF_HIPRI)
- ki->ki_flags |= IOCB_HIPRI;
- if (flags & RWF_DSYNC)
- ki->ki_flags |= IOCB_DSYNC;
+ kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
if (flags & RWF_SYNC)
- ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
- if (flags & RWF_APPEND)
- ki->ki_flags |= IOCB_APPEND;
+ kiocb_flags |= IOCB_DSYNC;
+
+ ki->ki_flags |= kiocb_flags;
return 0;
}
@@ -3468,7 +3456,7 @@ static inline ino_t parent_ino(struct dentry *dentry)
*/
struct simple_transaction_argresp {
ssize_t size;
- char data[0];
+ char data[];
};
#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
@@ -3528,13 +3516,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
struct ctl_table;
-int proc_nr_files(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_dentry(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_inodes(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-int __init get_filesystem_list(char *buf);
+int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
@@ -3548,12 +3530,13 @@ static inline bool is_sxid(umode_t mode)
return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
}
-static inline int check_sticky(struct inode *dir, struct inode *inode)
+static inline int check_sticky(struct user_namespace *mnt_userns,
+ struct inode *dir, struct inode *inode)
{
if (!(dir->i_mode & S_ISVTX))
return 0;
- return __check_sticky(dir, inode);
+ return __check_sticky(mnt_userns, dir, inode);
}
static inline void inode_has_no_xattr(struct inode *inode)
@@ -3571,17 +3554,17 @@ static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
- return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
+ return ctx->actor(ctx, name, namelen, ctx->pos, ino, type);
}
static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, ".", 1, ctx->pos,
- file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
+ file->f_path.dentry->d_inode->i_ino, DT_DIR);
}
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
- parent_ino(file->f_path.dentry), DT_DIR) == 0;
+ parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
@@ -3620,36 +3603,4 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
-#if defined(CONFIG_IO_URING)
-extern struct sock *io_uring_get_socket(struct file *file);
-#else
-static inline struct sock *io_uring_get_socket(struct file *file)
-{
- return NULL;
-}
-#endif
-
-int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
- unsigned int flags);
-
-int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa,
- struct fsxattr *fa);
-
-static inline void simple_fill_fsxattr(struct fsxattr *fa, __u32 xflags)
-{
- memset(fa, 0, sizeof(*fa));
- fa->fsx_xflags = xflags;
-}
-
-/*
- * Flush file data before changing attributes. Caller must hold any locks
- * required to prevent further writes to this file until we're done setting
- * flags.
- */
-static inline int inode_drain_writes(struct inode *inode)
-{
- inode_dio_wait(inode);
- return filemap_write_and_wait(inode->i_mapping);
-}
-
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_api.h b/include/linux/fs_api.h
new file mode 100644
index 000000000000..83be38d6d413
--- /dev/null
+++ b/include/linux/fs_api.h
@@ -0,0 +1 @@
+#include <linux/fs.h>
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index e6c3e4c61dad..13fa6f3df8e4 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -85,7 +85,7 @@ struct p_log {
* Superblock creation fills in ->root whereas reconfiguration begins with this
* already set.
*
- * See Documentation/filesystems/mount_api.txt
+ * See Documentation/filesystems/mount_api.rst
*/
struct fs_context {
const struct fs_context_operations *ops;
@@ -109,6 +109,7 @@ struct fs_context {
enum fs_context_phase phase:8; /* The phase the context is in */
bool need_free:1; /* Need to call ops->free() */
bool global:1; /* Goes into &init_user_ns */
+ bool oldapi:1; /* Coming from mount(2) */
};
struct fs_context_operations {
@@ -138,6 +139,11 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc);
+extern int vfs_parse_fs_param_source(struct fs_context *fc,
+ struct fs_parameter *param);
+extern void fc_drop_locked(struct fs_context *fc);
+int reconfigure_single(struct super_block *s,
+ int flags, void *data);
/*
* sget() wrappers to be called from the ->get_tree() op.
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index 2eab6d5f6736..f103c91139d4 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -42,7 +42,7 @@ struct fs_parameter_spec {
u8 opt; /* Option number (returned by fs_parse()) */
unsigned short flags;
#define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */
-#define fs_param_neg_with_empty 0x0004 /* "xxx=" is negative param */
+#define fs_param_can_be_empty 0x0004 /* "xxx=" is allowed */
#define fs_param_deprecated 0x0008 /* The param is deprecated */
const void *data;
};
@@ -120,7 +120,7 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_u32oct(NAME, OPT) \
__fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
#define fsparam_u32hex(NAME, OPT) \
- __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *16))
+ __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16)
#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index cf1015abfbf2..783b48dedb72 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -9,7 +9,7 @@
struct fs_struct {
int users;
spinlock_t lock;
- seqcount_t seq;
+ seqcount_spinlock_t seq;
int umask;
int in_exec;
struct path root, pwd;
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index d5ba431b5d63..a174cedf4d90 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching backing cache interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* NOTE!!! See:
*
- * Documentation/filesystems/caching/backend-api.txt
+ * Documentation/filesystems/caching/backend-api.rst
*
* for a description of the cache backend interface declared here.
*/
@@ -15,208 +15,34 @@
#define _LINUX_FSCACHE_CACHE_H
#include <linux/fscache.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#define NR_MAXCACHES BITS_PER_LONG
-
-struct fscache_cache;
-struct fscache_cache_ops;
-struct fscache_object;
-struct fscache_operation;
-
-enum fscache_obj_ref_trace {
- fscache_obj_get_add_to_deps,
- fscache_obj_get_queue,
- fscache_obj_put_alloc_fail,
- fscache_obj_put_attach_fail,
- fscache_obj_put_drop_obj,
- fscache_obj_put_enq_dep,
- fscache_obj_put_queue,
- fscache_obj_put_work,
- fscache_obj_ref__nr_traces
+enum fscache_cache_trace;
+enum fscache_cookie_trace;
+enum fscache_access_trace;
+
+enum fscache_cache_state {
+ FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
+ FSCACHE_CACHE_IS_PREPARING, /* A cache is preparing to come live */
+ FSCACHE_CACHE_IS_ACTIVE, /* Attached cache is active and can be used */
+ FSCACHE_CACHE_GOT_IOERROR, /* Attached cache stopped on I/O error */
+ FSCACHE_CACHE_IS_WITHDRAWN, /* Attached cache is being withdrawn */
+#define NR__FSCACHE_CACHE_STATE (FSCACHE_CACHE_IS_WITHDRAWN + 1)
};
/*
- * cache tag definition
- */
-struct fscache_cache_tag {
- struct list_head link;
- struct fscache_cache *cache; /* cache referred to by this tag */
- unsigned long flags;
-#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
- atomic_t usage;
- char name[0]; /* tag name */
-};
-
-/*
- * cache definition
+ * Cache cookie.
*/
struct fscache_cache {
const struct fscache_cache_ops *ops;
- struct fscache_cache_tag *tag; /* tag representing this cache */
- struct kobject *kobj; /* system representation of this cache */
- struct list_head link; /* link in list of caches */
- size_t max_index_size; /* maximum size of index data */
- char identifier[36]; /* cache label */
-
- /* node management */
- struct work_struct op_gc; /* operation garbage collector */
- struct list_head object_list; /* list of data/index objects */
- struct list_head op_gc_list; /* list of ops to be deleted */
- spinlock_t object_list_lock;
- spinlock_t op_gc_list_lock;
+ struct list_head cache_link; /* Link in cache list */
+ void *cache_priv; /* Private cache data (or NULL) */
+ refcount_t ref;
+ atomic_t n_volumes; /* Number of active volumes; */
+ atomic_t n_accesses; /* Number of in-progress accesses on the cache */
atomic_t object_count; /* no. of live objects in this cache */
- struct fscache_object *fsdef; /* object for the fsdef index */
- unsigned long flags;
-#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
-#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
-};
-
-extern wait_queue_head_t fscache_cache_cleared_wq;
-
-/*
- * operation to be applied to a cache object
- * - retrieval initiation operations are done in the context of the process
- * that issued them, and not in an async thread pool
- */
-typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
-
-enum fscache_operation_state {
- FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
- FSCACHE_OP_ST_INITIALISED, /* Op is initialised */
- FSCACHE_OP_ST_PENDING, /* Op is blocked from running */
- FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */
- FSCACHE_OP_ST_COMPLETE, /* Op is complete */
- FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */
- FSCACHE_OP_ST_DEAD /* Op is now dead */
-};
-
-struct fscache_operation {
- struct work_struct work; /* record for async ops */
- struct list_head pend_link; /* link in object->pending_ops */
- struct fscache_object *object; /* object to be operated upon */
-
- unsigned long flags;
-#define FSCACHE_OP_TYPE 0x000f /* operation type */
-#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
-#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
-#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
-#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
-#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
-#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */
-#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */
-
- enum fscache_operation_state state;
- atomic_t usage;
- unsigned debug_id; /* debugging ID */
-
- /* operation processor callback
- * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
- * the op in a non-pool thread */
- fscache_operation_processor_t processor;
-
- /* Operation cancellation cleanup (optional) */
- fscache_operation_cancel_t cancel;
-
- /* operation releaser */
- fscache_operation_release_t release;
-};
-
-extern atomic_t fscache_op_debug_id;
-extern void fscache_op_work_func(struct work_struct *work);
-
-extern void fscache_enqueue_operation(struct fscache_operation *);
-extern void fscache_op_complete(struct fscache_operation *, bool);
-extern void fscache_put_operation(struct fscache_operation *);
-extern void fscache_operation_init(struct fscache_cookie *,
- struct fscache_operation *,
- fscache_operation_processor_t,
- fscache_operation_cancel_t,
- fscache_operation_release_t);
-
-/*
- * data read operation
- */
-struct fscache_retrieval {
- struct fscache_operation op;
- struct fscache_cookie *cookie; /* The netfs cookie */
- struct address_space *mapping; /* netfs pages */
- fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
- void *context; /* netfs read context (pinned) */
- struct list_head to_do; /* list of things to be done by the backend */
- unsigned long start_time; /* time at which retrieval started */
- atomic_t n_pages; /* number of pages to be retrieved */
-};
-
-typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp);
-
-typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp);
-
-/**
- * fscache_get_retrieval - Get an extra reference on a retrieval operation
- * @op: The retrieval operation to get a reference on
- *
- * Get an extra reference on a retrieval operation.
- */
-static inline
-struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
-{
- atomic_inc(&op->op.usage);
- return op;
-}
-
-/**
- * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
- * @op: The retrieval operation affected
- *
- * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
- */
-static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
-{
- fscache_enqueue_operation(&op->op);
-}
-
-/**
- * fscache_retrieval_complete - Record (partial) completion of a retrieval
- * @op: The retrieval operation affected
- * @n_pages: The number of pages to account for
- */
-static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
- int n_pages)
-{
- if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
- fscache_op_complete(&op->op, false);
-}
-
-/**
- * fscache_put_retrieval - Drop a reference to a retrieval operation
- * @op: The retrieval operation affected
- *
- * Drop a reference to a retrieval operation.
- */
-static inline void fscache_put_retrieval(struct fscache_retrieval *op)
-{
- fscache_put_operation(&op->op);
-}
-
-/*
- * cached page storage work item
- * - used to do three things:
- * - batch writes to the cache
- * - do cache writes asynchronously
- * - defer writes until cache object lookup completion
- */
-struct fscache_storage {
- struct fscache_operation op;
- pgoff_t store_limit; /* don't write more than this */
+ unsigned int debug_id;
+ enum fscache_cache_state state;
+ char *name;
};
/*
@@ -226,340 +52,154 @@ struct fscache_cache_ops {
/* name of cache provider */
const char *name;
- /* allocate an object record for a cookie */
- struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
- struct fscache_cookie *cookie);
-
- /* look up the object for a cookie
- * - return -ETIMEDOUT to be requeued
- */
- int (*lookup_object)(struct fscache_object *object);
-
- /* finished looking up */
- void (*lookup_complete)(struct fscache_object *object);
-
- /* increment the usage count on this object (may fail if unmounting) */
- struct fscache_object *(*grab_object)(struct fscache_object *object,
- enum fscache_obj_ref_trace why);
+ /* Acquire a volume */
+ void (*acquire_volume)(struct fscache_volume *volume);
- /* pin an object in the cache */
- int (*pin_object)(struct fscache_object *object);
+ /* Free the cache's data attached to a volume */
+ void (*free_volume)(struct fscache_volume *volume);
- /* unpin an object in the cache */
- void (*unpin_object)(struct fscache_object *object);
+ /* Look up a cookie in the cache */
+ bool (*lookup_cookie)(struct fscache_cookie *cookie);
- /* check the consistency between the backing cache and the FS-Cache
- * cookie */
- int (*check_consistency)(struct fscache_operation *op);
+ /* Withdraw an object without any cookie access counts held */
+ void (*withdraw_cookie)(struct fscache_cookie *cookie);
- /* store the updated auxiliary data on an object */
- void (*update_object)(struct fscache_object *object);
+ /* Change the size of a data object */
+ void (*resize_cookie)(struct netfs_cache_resources *cres,
+ loff_t new_size);
/* Invalidate an object */
- void (*invalidate_object)(struct fscache_operation *op);
-
- /* discard the resources pinned by an object and effect retirement if
- * necessary */
- void (*drop_object)(struct fscache_object *object);
-
- /* dispose of a reference to an object */
- void (*put_object)(struct fscache_object *object,
- enum fscache_obj_ref_trace why);
-
- /* sync a cache */
- void (*sync_cache)(struct fscache_cache *cache);
-
- /* notification that the attributes of a non-index object (such as
- * i_size) have changed */
- int (*attr_changed)(struct fscache_object *object);
+ bool (*invalidate_cookie)(struct fscache_cookie *cookie);
- /* reserve space for an object's data and associated metadata */
- int (*reserve_space)(struct fscache_object *object, loff_t i_size);
+ /* Begin an operation for the netfs lib */
+ bool (*begin_operation)(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state);
- /* request a backing block for a page be read or allocated in the
- * cache */
- fscache_page_retrieval_func_t read_or_alloc_page;
-
- /* request backing blocks for a list of pages be read or allocated in
- * the cache */
- fscache_pages_retrieval_func_t read_or_alloc_pages;
-
- /* request a backing block for a page be allocated in the cache so that
- * it can be written directly */
- fscache_page_retrieval_func_t allocate_page;
-
- /* request backing blocks for pages be allocated in the cache so that
- * they can be written directly */
- fscache_pages_retrieval_func_t allocate_pages;
-
- /* write a page to its backing block in the cache */
- int (*write_page)(struct fscache_storage *op, struct page *page);
-
- /* detach backing block from a page (optional)
- * - must release the cookie lock before returning
- * - may sleep
- */
- void (*uncache_page)(struct fscache_object *object,
- struct page *page);
-
- /* dissociate a cache from all the pages it was backing */
- void (*dissociate_pages)(struct fscache_cache *cache);
+ /* Prepare to write to a live cache object */
+ void (*prepare_to_write)(struct fscache_cookie *cookie);
};
-extern struct fscache_cookie fscache_fsdef_index;
+extern struct workqueue_struct *fscache_wq;
+extern wait_queue_head_t fscache_clearance_waiters;
/*
- * Event list for fscache_object::{event_mask,events}
- */
-enum {
- FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */
- FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */
- FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */
- FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
- FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */
- FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */
- FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */
- NR_FSCACHE_OBJECT_EVENTS
-};
-
-#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
-
-/*
- * States for object state machine.
- */
-struct fscache_transition {
- unsigned long events;
- const struct fscache_state *transit_to;
-};
-
-struct fscache_state {
- char name[24];
- char short_name[8];
- const struct fscache_state *(*work)(struct fscache_object *object,
- int event);
- const struct fscache_transition transitions[];
-};
-
-/*
- * on-disk cache file or index handle
+ * out-of-line cache backend functions
*/
-struct fscache_object {
- const struct fscache_state *state; /* Object state machine state */
- const struct fscache_transition *oob_table; /* OOB state transition table */
- int debug_id; /* debugging ID */
- int n_children; /* number of child objects */
- int n_ops; /* number of extant ops on object */
- int n_obj_ops; /* number of object ops outstanding on object */
- int n_in_progress; /* number of ops in progress */
- int n_exclusive; /* number of exclusive ops queued or in progress */
- atomic_t n_reads; /* number of read ops in progress */
- spinlock_t lock; /* state and operations lock */
-
- unsigned long lookup_jif; /* time at which lookup started */
- unsigned long oob_event_mask; /* OOB events this object is interested in */
- unsigned long event_mask; /* events this object is interested in */
- unsigned long events; /* events to be processed by this object
- * (order is important - using fls) */
-
- unsigned long flags;
-#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
-#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
-#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
-#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
-#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
-#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
-#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
-#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
-#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
-
- struct list_head cache_link; /* link in cache->object_list */
- struct hlist_node cookie_link; /* link in cookie->backing_objects */
- struct fscache_cache *cache; /* cache that supplied this object */
- struct fscache_cookie *cookie; /* netfs's file/index object */
- struct fscache_object *parent; /* parent object */
- struct work_struct work; /* attention scheduling record */
- struct list_head dependents; /* FIFO of dependent objects */
- struct list_head dep_link; /* link in parent's dependents list */
- struct list_head pending_ops; /* unstarted operations on this object */
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- struct rb_node objlist_link; /* link in global object list */
-#endif
- pgoff_t store_limit; /* current storage limit */
- loff_t store_limit_l; /* current storage limit */
-};
-
-extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
- struct fscache_cache *);
-extern void fscache_object_destroy(struct fscache_object *);
-
-extern void fscache_object_lookup_negative(struct fscache_object *object);
-extern void fscache_obtained_object(struct fscache_object *object);
-
-static inline bool fscache_object_is_live(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
-}
-
-static inline bool fscache_object_is_dying(struct fscache_object *object)
-{
- return !fscache_object_is_live(object);
-}
-
-static inline bool fscache_object_is_available(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
-}
+extern struct rw_semaphore fscache_addremove_sem;
+extern struct fscache_cache *fscache_acquire_cache(const char *name);
+extern void fscache_relinquish_cache(struct fscache_cache *cache);
+extern int fscache_add_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ void *cache_priv);
+extern void fscache_withdraw_cache(struct fscache_cache *cache);
+extern void fscache_withdraw_volume(struct fscache_volume *volume);
+extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
-static inline bool fscache_cache_is_broken(struct fscache_object *object)
-{
- return test_bit(FSCACHE_IOERROR, &object->cache->flags);
-}
+extern void fscache_io_error(struct fscache_cache *cache);
-static inline bool fscache_object_is_active(struct fscache_object *object)
-{
- return fscache_object_is_available(object) &&
- fscache_object_is_live(object) &&
- !fscache_cache_is_broken(object);
-}
+extern void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+
+extern struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_put_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_end_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+extern void fscache_cookie_lookup_negative(struct fscache_cookie *cookie);
+extern void fscache_resume_after_invalidation(struct fscache_cookie *cookie);
+extern void fscache_caching_failed(struct fscache_cookie *cookie);
+extern bool fscache_wait_for_operation(struct netfs_cache_resources *cred,
+ enum fscache_want_state state);
/**
- * fscache_object_destroyed - Note destruction of an object in a cache
- * @cache: The cache from which the object came
+ * fscache_cookie_state - Read the state of a cookie
+ * @cookie: The cookie to query
*
- * Note the destruction and deallocation of an object record in a cache.
+ * Get the state of a cookie, imposing an ordering between the cookie contents
+ * and the state value. Paired with fscache_set_cookie_state().
*/
-static inline void fscache_object_destroyed(struct fscache_cache *cache)
+static inline
+enum fscache_cookie_state fscache_cookie_state(struct fscache_cookie *cookie)
{
- if (atomic_dec_and_test(&cache->object_count))
- wake_up_all(&fscache_cache_cleared_wq);
+ return smp_load_acquire(&cookie->state);
}
/**
- * fscache_object_lookup_error - Note an object encountered an error
- * @object: The object on which the error was encountered
+ * fscache_get_key - Get a pointer to the cookie key
+ * @cookie: The cookie to query
*
- * Note that an object encountered a fatal error (usually an I/O error) and
- * that it should be withdrawn as soon as possible.
+ * Return a pointer to the where a cookie's key is stored.
*/
-static inline void fscache_object_lookup_error(struct fscache_object *object)
+static inline void *fscache_get_key(struct fscache_cookie *cookie)
{
- set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
+ if (cookie->key_len <= sizeof(cookie->inline_key))
+ return cookie->inline_key;
+ else
+ return cookie->key;
}
-/**
- * fscache_set_store_limit - Set the maximum size to be stored in an object
- * @object: The object to set the maximum on
- * @i_size: The limit to set in bytes
- *
- * Set the maximum size an object is permitted to reach, implying the highest
- * byte that may be written. Intended to be called by the attr_changed() op.
- *
- * See Documentation/filesystems/caching/backend-api.txt for a complete
- * description.
- */
-static inline
-void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
+static inline struct fscache_cookie *fscache_cres_cookie(struct netfs_cache_resources *cres)
{
- object->store_limit_l = i_size;
- object->store_limit = i_size >> PAGE_SHIFT;
- if (i_size & ~PAGE_MASK)
- object->store_limit++;
+ return cres->cache_priv;
}
/**
- * fscache_end_io - End a retrieval operation on a page
- * @op: The FS-Cache operation covering the retrieval
- * @page: The page that was to be fetched
- * @error: The error code (0 if successful)
+ * fscache_count_object - Tell fscache that an object has been added
+ * @cache: The cache to account to
*
- * Note the end of an operation to retrieve a page, as covered by a particular
- * operation record.
+ * Tell fscache that an object has been added to the cache. This prevents the
+ * cache from tearing down the cache structure until the object is uncounted.
*/
-static inline void fscache_end_io(struct fscache_retrieval *op,
- struct page *page, int error)
+static inline void fscache_count_object(struct fscache_cache *cache)
{
- op->end_io_func(page, op->context, error);
-}
-
-static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
-{
- atomic_inc(&cookie->n_active);
+ atomic_inc(&cache->object_count);
}
/**
- * fscache_use_cookie - Request usage of cookie attached to an object
- * @object: Object description
- *
- * Request usage of the cookie attached to an object. NULL is returned if the
- * relinquishment had reduced the cookie usage count to 0.
+ * fscache_uncount_object - Tell fscache that an object has been removed
+ * @cache: The cache to account to
+ *
+ * Tell fscache that an object has been removed from the cache and will no
+ * longer be accessed. After this point, the cache cookie may be destroyed.
*/
-static inline bool fscache_use_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- return atomic_inc_not_zero(&cookie->n_active) != 0;
-}
-
-static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
+static inline void fscache_uncount_object(struct fscache_cache *cache)
{
- return atomic_dec_and_test(&cookie->n_active);
-}
-
-static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
-{
- wake_up_var(&cookie->n_active);
+ if (atomic_dec_and_test(&cache->object_count))
+ wake_up_all(&fscache_clearance_waiters);
}
/**
- * fscache_unuse_cookie - Cease usage of cookie attached to an object
- * @object: Object description
- *
- * Cease usage of the cookie attached to an object. When the users count
- * reaches zero then the cookie relinquishment will be permitted to proceed.
- */
-static inline void fscache_unuse_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- if (__fscache_unuse_cookie(cookie))
- __fscache_wake_unused_cookie(cookie);
-}
-
-/*
- * out-of-line cache backend functions
- */
-extern __printf(3, 4)
-void fscache_init_cache(struct fscache_cache *cache,
- const struct fscache_cache_ops *ops,
- const char *idfmt, ...);
-
-extern int fscache_add_cache(struct fscache_cache *cache,
- struct fscache_object *fsdef,
- const char *tagname);
-extern void fscache_withdraw_cache(struct fscache_cache *cache);
-
-extern void fscache_io_error(struct fscache_cache *cache);
-
-extern void fscache_mark_page_cached(struct fscache_retrieval *op,
- struct page *page);
-
-extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
- struct pagevec *pagevec);
-
-extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
-
-extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
-extern void fscache_object_retrying_stale(struct fscache_object *object);
-
-enum fscache_why_object_killed {
- FSCACHE_OBJECT_IS_STALE,
- FSCACHE_OBJECT_NO_SPACE,
- FSCACHE_OBJECT_WAS_RETIRED,
- FSCACHE_OBJECT_WAS_CULLED,
-};
-extern void fscache_object_mark_killed(struct fscache_object *object,
- enum fscache_why_object_killed why);
+ * fscache_wait_for_objects - Wait for all objects to be withdrawn
+ * @cache: The cache to query
+ *
+ * Wait for all extant objects in a cache to finish being withdrawn
+ * and go away.
+ */
+static inline void fscache_wait_for_objects(struct fscache_cache *cache)
+{
+ wait_event(fscache_clearance_waiters,
+ atomic_read(&cache->object_count) == 0);
+}
+
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_read;
+extern atomic_t fscache_n_write;
+extern atomic_t fscache_n_no_write_space;
+extern atomic_t fscache_n_no_create_space;
+extern atomic_t fscache_n_culled;
+#define fscache_count_read() atomic_inc(&fscache_n_read)
+#define fscache_count_write() atomic_inc(&fscache_n_write)
+#define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
+#define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
+#define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#else
+#define fscache_count_read() do {} while(0)
+#define fscache_count_write() do {} while(0)
+#define fscache_count_no_write_space() do {} while(0)
+#define fscache_count_no_create_space() do {} while(0)
+#define fscache_count_culled() do {} while(0)
+#endif
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index ad044c0cb1f3..36e5dd84cf59 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* NOTE!!! See:
*
- * Documentation/filesystems/caching/netfs-api.txt
+ * Documentation/filesystems/caching/netfs-api.rst
*
* for a description of the network filesystem interface declared here.
*/
@@ -15,152 +15,128 @@
#define _LINUX_FSCACHE_H
#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-#include <linux/list_bl.h>
+#include <linux/netfs.h>
+#include <linux/writeback.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
+#define __fscache_available (1)
#define fscache_available() (1)
+#define fscache_volume_valid(volume) (volume)
#define fscache_cookie_valid(cookie) (cookie)
+#define fscache_resources_valid(cres) ((cres)->cache_priv)
+#define fscache_cookie_enabled(cookie) (cookie && !test_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
#else
+#define __fscache_available (0)
#define fscache_available() (0)
+#define fscache_volume_valid(volume) (0)
#define fscache_cookie_valid(cookie) (0)
+#define fscache_resources_valid(cres) (false)
+#define fscache_cookie_enabled(cookie) (0)
#endif
-
-/*
- * overload PG_private_2 to give us PG_fscache - this is used to indicate that
- * a page is currently backed by a local disk cache
- */
-#define PageFsCache(page) PagePrivate2((page))
-#define SetPageFsCache(page) SetPagePrivate2((page))
-#define ClearPageFsCache(page) ClearPagePrivate2((page))
-#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
-#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
-
-/* pattern used to fill dead space in an index entry */
-#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
-
-struct pagevec;
-struct fscache_cache_tag;
struct fscache_cookie;
-struct fscache_netfs;
-typedef void (*fscache_rw_complete_t)(struct page *page,
- void *context,
- int error);
+#define FSCACHE_ADV_SINGLE_CHUNK 0x01 /* The object is a single chunk of data */
+#define FSCACHE_ADV_WRITE_CACHE 0x00 /* Do cache if written to locally */
+#define FSCACHE_ADV_WRITE_NOCACHE 0x02 /* Don't cache if written to locally */
+#define FSCACHE_ADV_WANT_CACHE_SIZE 0x04 /* Retrieve cache size at runtime */
+
+#define FSCACHE_INVAL_DIO_WRITE 0x01 /* Invalidate due to DIO write */
-/* result of index entry consultation */
-enum fscache_checkaux {
- FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
- FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
- FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
+enum fscache_want_state {
+ FSCACHE_WANT_PARAMS,
+ FSCACHE_WANT_WRITE,
+ FSCACHE_WANT_READ,
};
/*
- * fscache cookie definition
- */
-struct fscache_cookie_def {
- /* name of cookie type */
- char name[16];
-
- /* cookie type */
- uint8_t type;
-#define FSCACHE_COOKIE_TYPE_INDEX 0
-#define FSCACHE_COOKIE_TYPE_DATAFILE 1
-
- /* select the cache into which to insert an entry in this index
- * - optional
- * - should return a cache identifier or NULL to cause the cache to be
- * inherited from the parent if possible or the first cache picked
- * for a non-index file if not
- */
- struct fscache_cache_tag *(*select_cache)(
- const void *parent_netfs_data,
- const void *cookie_netfs_data);
-
- /* consult the netfs about the state of an object
- * - this function can be absent if the index carries no state data
- * - the netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data and the object size
- */
- enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
- /* get an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*get_context)(void *cookie_netfs_data, void *context);
-
- /* release an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*put_context)(void *cookie_netfs_data, void *context);
-
- /* indicate page that now have cache metadata retained
- * - this function should mark the specified page as now being cached
- * - the page will have been marked with PG_fscache before this is
- * called, so this is optional
- */
- void (*mark_page_cached)(void *cookie_netfs_data,
- struct address_space *mapping,
- struct page *page);
-};
+ * Data object state.
+ */
+enum fscache_cookie_state {
+ FSCACHE_COOKIE_STATE_QUIESCENT, /* The cookie is uncached */
+ FSCACHE_COOKIE_STATE_LOOKING_UP, /* The cache object is being looked up */
+ FSCACHE_COOKIE_STATE_CREATING, /* The cache object is being created */
+ FSCACHE_COOKIE_STATE_ACTIVE, /* The cache is active, readable and writable */
+ FSCACHE_COOKIE_STATE_INVALIDATING, /* The cache is being invalidated */
+ FSCACHE_COOKIE_STATE_FAILED, /* The cache failed, withdraw to clear */
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING, /* The cookie is being discarded by the LRU */
+ FSCACHE_COOKIE_STATE_WITHDRAWING, /* The cookie is being withdrawn */
+ FSCACHE_COOKIE_STATE_RELINQUISHING, /* The cookie is being relinquished */
+ FSCACHE_COOKIE_STATE_DROPPED, /* The cookie has been dropped */
+#define FSCACHE_COOKIE_STATE__NR (FSCACHE_COOKIE_STATE_DROPPED + 1)
+} __attribute__((mode(byte)));
/*
- * fscache cached network filesystem type
- * - name, version and ops must be filled in before registration
- * - all other fields will be set during registration
- */
-struct fscache_netfs {
- uint32_t version; /* indexing version */
- const char *name; /* filesystem name */
- struct fscache_cookie *primary_index;
+ * Volume representation cookie.
+ */
+struct fscache_volume {
+ refcount_t ref;
+ atomic_t n_cookies; /* Number of data cookies in volume */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int key_hash; /* Hash of key string */
+ char *key; /* Volume ID, eg. "afs@example.com@1234" */
+ struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
+ struct hlist_bl_node hash_link; /* Link in hash table */
+ struct work_struct work;
+ struct fscache_cache *cache; /* The cache in which this resides */
+ void *cache_priv; /* Cache private data */
+ spinlock_t lock;
+ unsigned long flags;
+#define FSCACHE_VOLUME_RELINQUISHED 0 /* Volume is being cleaned up */
+#define FSCACHE_VOLUME_INVALIDATE 1 /* Volume was invalidated */
+#define FSCACHE_VOLUME_COLLIDED_WITH 2 /* Volume was collided with */
+#define FSCACHE_VOLUME_ACQUIRE_PENDING 3 /* Volume is waiting to complete acquisition */
+#define FSCACHE_VOLUME_CREATING 4 /* Volume is being created on disk */
+ u8 coherency_len; /* Length of the coherency data */
+ u8 coherency[]; /* Coherency data */
};
/*
- * data file or index object cookie
+ * Data file representation cookie.
* - a file will only appear in one cache
* - a request to cache a file may or may not be honoured, subject to
* constraints such as disk space
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
- atomic_t usage; /* number of users of this cookie */
- atomic_t n_children; /* number of children of this cookie */
- atomic_t n_active; /* number of active users of netfs ptrs */
+ refcount_t ref;
+ atomic_t n_active; /* number of active users of cookie */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int inval_counter; /* Number of invalidations made */
spinlock_t lock;
- spinlock_t stores_lock; /* lock on page store tree */
- struct hlist_head backing_objects; /* object(s) backing this file/index */
- const struct fscache_cookie_def *def; /* definition */
- struct fscache_cookie *parent; /* parent of this entry */
+ struct fscache_volume *volume; /* Parent volume of this file. */
+ void *cache_priv; /* Cache-side representation */
struct hlist_bl_node hash_link; /* Link in hash table */
- void *netfs_data; /* back pointer to netfs */
- struct radix_tree_root stores; /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
-
+ struct list_head proc_link; /* Link in proc list */
+ struct list_head commit_link; /* Link in commit queue */
+ struct work_struct work; /* Commit/relinq/withdraw work */
+ loff_t object_size; /* Size of the netfs object */
+ unsigned long unused_at; /* Time at which unused (jiffies) */
unsigned long flags;
-#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
-#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
-#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */
-#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */
-#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */
-
- u8 type; /* Type of object */
+#define FSCACHE_COOKIE_RELINQUISHED 0 /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_RETIRED 1 /* T if this cookie has retired on relinq */
+#define FSCACHE_COOKIE_IS_CACHING 2 /* T if this cookie is cached */
+#define FSCACHE_COOKIE_NO_DATA_TO_READ 3 /* T if this cookie has nothing to read */
+#define FSCACHE_COOKIE_NEEDS_UPDATE 4 /* T if attrs have been updated */
+#define FSCACHE_COOKIE_HAS_BEEN_CACHED 5 /* T if cookie needs withdraw-on-relinq */
+#define FSCACHE_COOKIE_DISABLED 6 /* T if cookie has been disabled */
+#define FSCACHE_COOKIE_LOCAL_WRITE 7 /* T if cookie has been modified locally */
+#define FSCACHE_COOKIE_NO_ACCESS_WAKE 8 /* T if no wake when n_accesses goes 0 */
+#define FSCACHE_COOKIE_DO_RELINQUISH 9 /* T if this cookie needs relinquishment */
+#define FSCACHE_COOKIE_DO_WITHDRAW 10 /* T if this cookie needs withdrawing */
+#define FSCACHE_COOKIE_DO_LRU_DISCARD 11 /* T if this cookie needs LRU discard */
+#define FSCACHE_COOKIE_DO_PREP_TO_WRITE 12 /* T if cookie needs write preparation */
+#define FSCACHE_COOKIE_HAVE_DATA 13 /* T if this cookie has data stored */
+#define FSCACHE_COOKIE_IS_HASHED 14 /* T if this cookie is hashed */
+#define FSCACHE_COOKIE_DO_INVALIDATE 15 /* T if cookie needs invalidation */
+
+ enum fscache_cookie_state state;
+ u8 advice; /* FSCACHE_ADV_* */
u8 key_len; /* Length of index key */
u8 aux_len; /* Length of auxiliary data */
- u32 key_hash; /* Hash of parent, type, key, len */
+ u32 key_hash; /* Hash of volume, key, len */
union {
void *key; /* Index key */
u8 inline_key[16]; /* - If the key is short enough */
@@ -171,11 +147,6 @@ struct fscache_cookie {
};
};
-static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
-{
- return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
-}
-
/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
@@ -183,661 +154,542 @@ static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
* - these are undefined symbols when FS-Cache is not configured and the
* optimiser takes care of not using them
*/
-extern int __fscache_register_netfs(struct fscache_netfs *);
-extern void __fscache_unregister_netfs(struct fscache_netfs *);
-extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
-extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
+extern struct fscache_volume *__fscache_acquire_volume(const char *, const char *,
+ const void *, size_t);
+extern void __fscache_relinquish_volume(struct fscache_volume *, const void *, bool);
extern struct fscache_cookie *__fscache_acquire_cookie(
- struct fscache_cookie *,
- const struct fscache_cookie_def *,
+ struct fscache_volume *,
+ u8,
const void *, size_t,
const void *, size_t,
- void *, loff_t, bool);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool);
-extern int __fscache_check_consistency(struct fscache_cookie *, const void *);
-extern void __fscache_update_cookie(struct fscache_cookie *, const void *);
-extern int __fscache_attr_changed(struct fscache_cookie *);
-extern void __fscache_invalidate(struct fscache_cookie *);
-extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
-extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
- struct page *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
- struct address_space *,
- struct list_head *,
- unsigned *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
-extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t);
-extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
-extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
-extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
-extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
- gfp_t);
-extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
- struct inode *);
-extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages);
-extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
-extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
- bool (*)(void *), void *);
+ loff_t);
+extern void __fscache_use_cookie(struct fscache_cookie *, bool);
+extern void __fscache_unuse_cookie(struct fscache_cookie *, const void *, const loff_t *);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
+extern void __fscache_resize_cookie(struct fscache_cookie *, loff_t);
+extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t, unsigned int);
+extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+
+extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *,
+ loff_t, size_t, loff_t, netfs_io_terminated_t, void *,
+ bool);
+extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t);
/**
- * fscache_register_netfs - Register a filesystem as desiring caching services
- * @netfs: The description of the filesystem
+ * fscache_acquire_volume - Register a volume as desiring caching services
+ * @volume_key: An identification string for the volume
+ * @cache_name: The name of the cache to use (or NULL for the default)
+ * @coherency_data: Piece of arbitrary coherency data to check (or NULL)
+ * @coherency_len: The size of the coherency data
*
- * Register a filesystem as desiring caching services if they're available.
+ * Register a volume as desiring caching services if they're available. The
+ * caller must provide an identifier for the volume and may also indicate which
+ * cache it should be in. If a preexisting volume entry is found in the cache,
+ * the coherency data must match otherwise the entry will be invalidated.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Returns a cookie pointer on success, -ENOMEM if out of memory or -EBUSY if a
+ * cache volume of that name is already acquired. Note that "NULL" is a valid
+ * cookie pointer and can be returned if caching is refused.
*/
static inline
-int fscache_register_netfs(struct fscache_netfs *netfs)
+struct fscache_volume *fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
{
- if (fscache_available())
- return __fscache_register_netfs(netfs);
- else
- return 0;
+ if (!fscache_available())
+ return NULL;
+ return __fscache_acquire_volume(volume_key, cache_name,
+ coherency_data, coherency_len);
}
/**
- * fscache_unregister_netfs - Indicate that a filesystem no longer desires
- * caching services
- * @netfs: The description of the filesystem
- *
- * Indicate that a filesystem no longer desires caching services for the
- * moment.
+ * fscache_relinquish_volume - Cease caching a volume
+ * @volume: The volume cookie
+ * @coherency_data: Piece of arbitrary coherency data to set (or NULL)
+ * @invalidate: True if the volume should be invalidated
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Indicate that a filesystem no longer desires caching services for a volume.
+ * The caller must have relinquished all file cookies prior to calling this.
+ * The stored coherency data is updated.
*/
static inline
-void fscache_unregister_netfs(struct fscache_netfs *netfs)
+void fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate)
{
- if (fscache_available())
- __fscache_unregister_netfs(netfs);
+ if (fscache_volume_valid(volume))
+ __fscache_relinquish_volume(volume, coherency_data, invalidate);
}
/**
- * fscache_lookup_cache_tag - Look up a cache tag
- * @name: The name of the tag to search for
+ * fscache_acquire_cookie - Acquire a cookie to represent a cache object
+ * @volume: The volume in which to locate/create this cookie
+ * @advice: Advice flags (FSCACHE_COOKIE_ADV_*)
+ * @index_key: The index key for this cookie
+ * @index_key_len: Size of the index key
+ * @aux_data: The auxiliary data for the cookie (may be NULL)
+ * @aux_data_len: Size of the auxiliary data buffer
+ * @object_size: The initial size of object
*
- * Acquire a specific cache referral tag that can be used to select a specific
- * cache in which to cache an index.
+ * Acquire a cookie to represent a data file within the given cache volume.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
+struct fscache_cookie *fscache_acquire_cookie(struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key,
+ size_t index_key_len,
+ const void *aux_data,
+ size_t aux_data_len,
+ loff_t object_size)
{
- if (fscache_available())
- return __fscache_lookup_cache_tag(name);
- else
+ if (!fscache_volume_valid(volume))
return NULL;
+ return __fscache_acquire_cookie(volume, advice,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ object_size);
}
/**
- * fscache_release_cache_tag - Release a cache tag
- * @tag: The tag to release
- *
- * Release a reference to a cache referral tag previously looked up.
+ * fscache_use_cookie - Request usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @will_modify: If cache is expected to be modified locally
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Request usage of the cookie attached to an object. The caller should tell
+ * the cache if the object's contents are about to be modified locally and then
+ * the cache can apply the policy that has been set to handle this case.
*/
-static inline
-void fscache_release_cache_tag(struct fscache_cache_tag *tag)
+static inline void fscache_use_cookie(struct fscache_cookie *cookie,
+ bool will_modify)
{
- if (fscache_available())
- __fscache_release_cache_tag(tag);
+ if (fscache_cookie_valid(cookie))
+ __fscache_use_cookie(cookie, will_modify);
}
/**
- * fscache_acquire_cookie - Acquire a cookie to represent a cache object
- * @parent: The cookie that's to be the parent of this one
- * @def: A description of the cache object, including callback operations
- * @index_key: The index key for this cookie
- * @index_key_len: Size of the index key
- * @aux_data: The auxiliary data for the cookie (may be NULL)
- * @aux_data_len: Size of the auxiliary data buffer
- * @netfs_data: An arbitrary piece of data to be kept in the cookie to
- * represent the cache object to the netfs
- * @object_size: The initial size of object
- * @enable: Whether or not to enable a data cookie immediately
- *
- * This function is used to inform FS-Cache about part of an index hierarchy
- * that can be used to locate files. This is done by requesting a cookie for
- * each index in the path to the file.
+ * fscache_unuse_cookie - Cease usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @aux_data: Updated auxiliary data (or NULL)
+ * @object_size: Revised size of the object (or NULL)
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Cease usage of the cookie attached to an object. When the users count
+ * reaches zero then the cookie relinquishment will be permitted to proceed.
*/
-static inline
-struct fscache_cookie *fscache_acquire_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
- const void *index_key,
- size_t index_key_len,
- const void *aux_data,
- size_t aux_data_len,
- void *netfs_data,
- loff_t object_size,
- bool enable)
+static inline void fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ const loff_t *object_size)
{
- if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
- return __fscache_acquire_cookie(parent, def,
- index_key, index_key_len,
- aux_data, aux_data_len,
- netfs_data, object_size, enable);
- else
- return NULL;
+ if (fscache_cookie_valid(cookie))
+ __fscache_unuse_cookie(cookie, aux_data, object_size);
}
/**
* fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
* it
* @cookie: The cookie being returned
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
* @retire: True if the cache object the cookie represents is to be discarded
*
* This function returns a cookie to the cache, forcibly discarding the
- * associated cache object if retire is set to true. The opportunity is
- * provided to update the auxiliary data in the cache before the object is
- * disconnected.
+ * associated cache object if retire is set to true.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
{
if (fscache_cookie_valid(cookie))
- __fscache_relinquish_cookie(cookie, aux_data, retire);
+ __fscache_relinquish_cookie(cookie, retire);
}
-/**
- * fscache_check_consistency - Request validation of a cache's auxiliary data
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- *
- * Request an consistency check from fscache, which passes the request to the
- * backing cache. The auxiliary data on the cookie will be updated first if
- * @aux_data is set.
- *
- * Returns 0 if consistent and -ESTALE if inconsistent. May also
- * return -ENOMEM and -ERESTARTSYS.
+/*
+ * Find the auxiliary data on a cookie.
*/
-static inline
-int fscache_check_consistency(struct fscache_cookie *cookie,
- const void *aux_data)
+static inline void *fscache_get_aux(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_check_consistency(cookie, aux_data);
+ if (cookie->aux_len <= sizeof(cookie->inline_aux))
+ return cookie->inline_aux;
else
- return 0;
+ return cookie->aux;
}
-/**
- * fscache_update_cookie - Request that a cache object be updated
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- *
- * Request an update of the index data for the cache object associated with the
- * cookie. The auxiliary data on the cookie will be updated first if @aux_data
- * is set.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+/*
+ * Update the auxiliary data on a cookie.
*/
static inline
-void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data)
+void fscache_update_aux(struct fscache_cookie *cookie,
+ const void *aux_data, const loff_t *object_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_update_cookie(cookie, aux_data);
+ void *p = fscache_get_aux(cookie);
+
+ if (aux_data && p)
+ memcpy(p, aux_data, cookie->aux_len);
+ if (object_size)
+ cookie->object_size = *object_size;
}
-/**
- * fscache_pin_cookie - Pin a data-storage cache object in its cache
- * @cookie: The cookie representing the cache object
- *
- * Permit data-storage cache objects to be pinned in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_updates;
+#endif
+
static inline
-int fscache_pin_cookie(struct fscache_cookie *cookie)
+void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
- return -ENOBUFS;
+#ifdef CONFIG_FSCACHE_STATS
+ atomic_inc(&fscache_n_updates);
+#endif
+ fscache_update_aux(cookie, aux_data, object_size);
+ smp_wmb();
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
}
/**
- * fscache_pin_cookie - Unpin a data-storage cache object in its cache
+ * fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @object_size: The current size of the object (may be NULL)
*
- * Permit data-storage cache objects to be unpinned from the cache.
+ * Request an update of the index data for the cache object associated with the
+ * cookie. The auxiliary data on the cookie will be updated first if @aux_data
+ * is set and the object size will be updated and the object possibly trimmed
+ * if @object_size is set.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_unpin_cookie(struct fscache_cookie *cookie)
+void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
+ if (fscache_cookie_enabled(cookie))
+ __fscache_update_cookie(cookie, aux_data, object_size);
}
/**
- * fscache_attr_changed - Notify cache that an object's attributes changed
+ * fscache_resize_cookie - Request that a cache object be resized
* @cookie: The cookie representing the cache object
+ * @new_size: The new size of the object (may be NULL)
*
- * Send a notification to the cache indicating that an object's attributes have
- * changed. This includes the data size. These attributes will be obtained
- * through the get_attr() cookie definition op.
+ * Request that the size of an object be changed.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-int fscache_attr_changed(struct fscache_cookie *cookie)
+void fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_attr_changed(cookie);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ __fscache_resize_cookie(cookie, new_size);
}
/**
* fscache_invalidate - Notify cache that an object needs invalidation
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @size: The revised size of the object.
+ * @flags: Invalidation flags (FSCACHE_INVAL_*)
*
* Notify the cache that an object is needs to be invalidated and that it
- * should abort any retrievals or stores it is doing on the cache. The object
- * is then marked non-caching until such time as the invalidation is complete.
+ * should abort any retrievals or stores it is doing on the cache. This
+ * increments inval_counter on the cookie which can be used by the caller to
+ * reconsider I/O requests as they complete.
*
- * This can be called with spinlocks held.
+ * If @flags has FSCACHE_INVAL_DIO_WRITE set, this indicates that this is due
+ * to a direct I/O write and will cause caching to be disabled on this cookie
+ * until it is completely unused.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_invalidate(struct fscache_cookie *cookie)
+void fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data, loff_t size, unsigned int flags)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_invalidate(cookie);
+ if (fscache_cookie_enabled(cookie))
+ __fscache_invalidate(cookie, aux_data, size, flags);
}
/**
- * fscache_wait_on_invalidate - Wait for invalidation to complete
- * @cookie: The cookie representing the cache object
+ * fscache_operation_valid - Return true if operations resources are usable
+ * @cres: The resources to check.
*
- * Wait for the invalidation of an object to complete.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Returns a pointer to the operations table if usable or NULL if not.
*/
static inline
-void fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_resources *cres)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_invalidate(cookie);
+ return fscache_resources_valid(cres) ? cres->ops : NULL;
}
/**
- * fscache_reserve_space - Reserve data space for a cached object
+ * fscache_begin_read_operation - Begin a read operation for the netfs lib
+ * @cres: The cache resources for the read being performed
* @cookie: The cookie representing the cache object
- * @i_size: The amount of space to be reserved
*
- * Reserve an amount of space in the cache for the cache object attached to a
- * cookie so that a write to that object within the space can always be
- * honoured.
+ * Begin a read operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * This is intended to be called from the ->begin_cache_operation() netfs lib
+ * operation as implemented by the network filesystem.
+ *
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
+ *
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
+int fscache_begin_read_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_read_operation(cres, cookie);
return -ENOBUFS;
}
/**
- * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
- * in which to store it
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to fill if possible
- * @end_io_func: The callback to invoke when and if the page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a page from the cache, or if that's not possible make a potential
- * one-block reservation in the cache into which the page may be stored once
- * fetched from the server.
- *
- * If the page is not backed by the cache object, or if it there's some reason
- * it can't be, -ENOBUFS will be returned and nothing more will be done for
- * that page.
- *
- * Else, if that page is backed by the cache, a read will be initiated directly
- * to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
+ * fscache_end_operation - End the read operation for the netfs lib
+ * @cres: The cache resources for the read operation
*
- * Else, if the page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Clean up the resources at the end of the read request.
*/
-static inline
-int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_page(cookie, page, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
-}
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-/**
- * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
- * blocks in which to store them
- * @cookie: The cookie representing the cache object
- * @mapping: The netfs inode mapping to which the pages will be attached
- * @pages: A list of potential netfs pages to be filled
- * @nr_pages: Number of pages to be read and/or allocated
- * @end_io_func: The callback to invoke when and if each page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a set of pages from the cache, or if that's not possible, attempt to
- * make a potential one-block reservation for each page in the cache into which
- * that page may be stored once fetched from the server.
- *
- * If some pages are not backed by the cache object, or if it there's some
- * reason they can't be, -ENOBUFS will be returned and nothing more will be
- * done for that pages.
- *
- * Else, if some of the pages are backed by the cache, a read will be initiated
- * directly to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if a page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
- * regard to different pages, the return values are prioritised in that order.
- * Any pages submitted for reading are removed from the pages list.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_pages(cookie, mapping, pages,
- nr_pages, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
+ if (ops)
+ ops->end_operation(cres);
}
/**
- * fscache_alloc_page - Allocate a block in which to store a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to allocate a page for
- * @gfp: The conditions under which memory allocation should be made
+ * fscache_read - Start a read from the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The buffer to fill - and also the length
+ * @read_hole: How to handle a hole in the data.
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * Request Allocation a block in the cache in which to store a netfs page
- * without retrieving any contents from the cache.
+ * Start a read from the cache. @cres indicates the cache object to read from
+ * and must be obtained by a call to fscache_begin_operation() beforehand.
*
- * If the page is not backed by a file then -ENOBUFS will be returned and
- * nothing more will be done, and no reservation will be made.
+ * The data is read into the iterator, @iter, and that also indicates the size
+ * of the operation. @start_pos is the start position in the file, though if
+ * @seek_data is set appropriately, the cache can use SEEK_DATA to find the
+ * next piece of data, writing zeros for the hole into the iterator.
*
- * Else, a block will be allocated if one wasn't already, and 0 will be
- * returned
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-int fscache_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_alloc_page(cookie, page, gfp);
- else
- return -ENOBUFS;
-}
-
-/**
- * fscache_readpages_cancel - Cancel read/alloc on pages
- * @cookie: The cookie representing the inode's cache object.
- * @pages: The netfs pages that we canceled write on in readpages()
+ * @read_hole indicates how a partially populated region in the cache should be
+ * handled. It can be one of a number of settings:
+ *
+ * NETFS_READ_HOLE_IGNORE - Just try to read (may return a short read).
*
- * Uncache/unreserve the pages reserved earlier in readpages() via
- * fscache_readpages_or_alloc() and similar. In most successful caches in
- * readpages() this doesn't do anything. In cases when the underlying netfs's
- * readahead failed we need to clean up the pagelist (unmark and uncache).
+ * NETFS_READ_HOLE_CLEAR - Seek for data, clearing the part of the buffer
+ * skipped over, then do as for IGNORE.
*
- * This function may sleep as it may have to clean up disk state.
+ * NETFS_READ_HOLE_FAIL - Give ENODATA if we encounter a hole.
*/
static inline
-void fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages)
+int fscache_read(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_readpages_cancel(cookie, pages);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->read(cres, start_pos, iter, read_hole,
+ term_func, term_func_priv);
}
/**
- * fscache_write_page - Request storage of a page in the cache
+ * fscache_begin_write_operation - Begin a write operation for the netfs lib
+ * @cres: The cache resources for the write being performed
* @cookie: The cookie representing the cache object
- * @page: The netfs page to store
- * @object_size: Updated size of object
- * @gfp: The conditions under which memory allocation should be made
*
- * Request the contents of the netfs page be written into the cache. This
- * request may be ignored if no cache block is currently allocated, in which
- * case it will return -ENOBUFS.
+ * Begin a write operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * If a cache block was already allocated, a write will be initiated and 0 will
- * be returned. The PG_fscache_write page bit is set immediately and will then
- * be cleared at the completion of the write to indicate the success or failure
- * of the operation. Note that the completion may happen before the return.
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_write_page(struct fscache_cookie *cookie,
- struct page *page,
- loff_t object_size,
- gfp_t gfp)
+int fscache_begin_write_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_write_page(cookie, page, object_size, gfp);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_write_operation(cres, cookie);
+ return -ENOBUFS;
}
/**
- * fscache_uncache_page - Indicate that caching is no longer required on a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that was being cached.
+ * fscache_write - Start a write to the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The data to write - and also the length
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * Tell the cache that we no longer want a page to be cached and that it should
- * remove any knowledge of the netfs page it may have.
+ * Start a write to the cache. @cres indicates the cache object to write to and
+ * must be obtained by a call to fscache_begin_operation() beforehand.
*
- * Note that this cannot cancel any outstanding I/O operations between this
- * page and the cache.
+ * The data to be written is obtained from the iterator, @iter, and that also
+ * indicates the size of the operation. @start_pos is the start position in
+ * the file.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*/
static inline
-void fscache_uncache_page(struct fscache_cookie *cookie,
- struct page *page)
+int fscache_write(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_page(cookie, page);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->write(cres, start_pos, iter, term_func, term_func_priv);
}
/**
- * fscache_check_page_write - Ask if a page is being writing to the cache
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache if a page is being written to the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-bool fscache_check_page_write(struct fscache_cookie *cookie,
- struct page *page)
+ * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to unlock
+ * @caching: If PG_fscache has been set
+ *
+ * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
+ * waiting.
+ */
+static inline void fscache_clear_page_bits(struct address_space *mapping,
+ loff_t start, size_t len,
+ bool caching)
{
- if (fscache_cookie_valid(cookie))
- return __fscache_check_page_write(cookie, page);
- return false;
+ if (caching)
+ __fscache_clear_page_bits(mapping, start, len);
}
/**
- * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
+ * fscache_write_to_cache - Save a write to the cache and clear PG_fscache
* @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache to wake us up when a page is no longer being written to the
- * cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-void fscache_wait_on_page_write(struct fscache_cookie *cookie,
- struct page *page)
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to write back
+ * @i_size: The new size of the inode
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
+ * @caching: If PG_fscache has been set
+ *
+ * Helper function for a netfs to write dirty data from an inode into the cache
+ * object that's backing it.
+ *
+ * @start and @len describe the range of the data. This does not need to be
+ * page-aligned, but to satisfy DIO requirements, the cache may expand it up to
+ * the page boundaries on either end. All the pages covering the range must be
+ * marked with PG_fscache.
+ *
+ * If given, @term_func will be called upon completion and supplied with
+ * @term_func_priv. Note that the PG_fscache flags will have been cleared by
+ * this point, so the netfs must retain its own pin on the mapping.
+ */
+static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool caching)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_page_write(cookie, page);
-}
+ if (caching)
+ __fscache_write_to_cache(cookie, mapping, start, len, i_size,
+ term_func, term_func_priv, caching);
+ else if (term_func)
+ term_func(term_func_priv, -ENOBUFS, false);
-/**
- * fscache_maybe_release_page - Consider releasing a page, cancelling a store
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- * @gfp: The gfp flags passed to releasepage()
- *
- * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
- * releasepage() call. A storage request on the page may cancelled if it is
- * not currently being processed.
- *
- * The function returns true if the page no longer has a storage request on it,
- * and false if a storage request is left in place. If true is returned, the
- * page will have been passed to fscache_uncache_page(). If false is returned
- * the page cannot be freed yet.
- */
-static inline
-bool fscache_maybe_release_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && PageFsCache(page))
- return __fscache_maybe_release_page(cookie, page, gfp);
- return true;
}
+#if __fscache_available
+bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
+ struct fscache_cookie *cookie);
+#else
+#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \
+ filemap_dirty_folio(MAPPING, FOLIO)
+#endif
+
/**
- * fscache_uncache_all_inode_pages - Uncache all an inode's pages
- * @cookie: The cookie representing the inode's cache object.
- * @inode: The inode to uncache pages from.
- *
- * Uncache all the pages in an inode that are marked PG_fscache, assuming them
- * to be associated with the given cookie.
+ * fscache_unpin_writeback - Unpin writeback resources
+ * @wbc: The writeback control
+ * @cookie: The cookie referring to the cache object
*
- * This function may sleep. It will wait for pages that are being written out
- * and will wait whilst the PG_fscache mark is removed by the cache.
+ * Unpin the writeback resources pinned by fscache_dirty_folio(). This is
+ * intended to be called by the netfs's ->write_inode() method.
*/
-static inline
-void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
- struct inode *inode)
+static inline void fscache_unpin_writeback(struct writeback_control *wbc,
+ struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_all_inode_pages(cookie, inode);
+ if (wbc->unpinned_fscache_wb)
+ fscache_unuse_cookie(cookie, NULL, NULL);
}
/**
- * fscache_disable_cookie - Disable a cookie
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- * @invalidate: Invalidate the backing object
- *
- * Disable a cookie from accepting further alloc, read, write, invalidate,
- * update or acquire operations. Outstanding operations can still be waited
- * upon and pages can still be uncached and the cookie relinquished.
- *
- * This will not return until all outstanding operations have completed.
- *
- * If @invalidate is set, then the backing object will be invalidated and
- * detached, otherwise it will just be detached.
+ * fscache_clear_inode_writeback - Clear writeback resources pinned by an inode
+ * @cookie: The cookie referring to the cache object
+ * @inode: The inode to clean up
+ * @aux: Auxiliary data to apply to the inode
*
- * If @aux_data is set, then auxiliary data will be updated from that.
+ * Clear any writeback resources held by an inode when the inode is evicted.
+ * This must be called before clear_inode() is called.
*/
-static inline
-void fscache_disable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool invalidate)
+static inline void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
+ struct inode *inode,
+ const void *aux)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_disable_cookie(cookie, aux_data, invalidate);
+ if (inode->i_state & I_PINNING_FSCACHE_WB) {
+ loff_t i_size = i_size_read(inode);
+ fscache_unuse_cookie(cookie, aux, &i_size);
+ }
}
/**
- * fscache_enable_cookie - Reenable a cookie
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- * @object_size: Current size of object
- * @can_enable: A function to permit enablement once lock is held
- * @data: Data for can_enable()
+ * fscache_note_page_release - Note that a netfs page got released
+ * @cookie: The cookie corresponding to the file
*
- * Reenable a previously disabled cookie, allowing it to accept further alloc,
- * read, write, invalidate, update or acquire operations. An attempt will be
- * made to immediately reattach the cookie to a backing object. If @aux_data
- * is set, the auxiliary data attached to the cookie will be updated.
- *
- * The can_enable() function is called (if not NULL) once the enablement lock
- * is held to rule on whether enablement is still permitted to go ahead.
+ * Note that a page that has been copied to the cache has been released. This
+ * means that future reads will need to look in the cache to see if it's there.
*/
static inline
-void fscache_enable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- loff_t object_size,
- bool (*can_enable)(void *data),
- void *data)
+void fscache_note_page_release(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
- __fscache_enable_cookie(cookie, aux_data, object_size,
- can_enable, data);
+ /* If we've written data to the cache (HAVE_DATA) and there wasn't any
+ * data in the cache when we started (NO_DATA_TO_READ), it may no
+ * longer be true that we can skip reading from the cache - so clear
+ * the flag that causes reads to be skipped.
+ */
+ if (cookie &&
+ test_bit(FSCACHE_COOKIE_HAVE_DATA, &cookie->flags) &&
+ test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags))
+ clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
}
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 556f4adf5dc5..4f5f8a651213 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -18,9 +18,22 @@
#include <linux/slab.h>
#include <uapi/linux/fscrypt.h>
-#define FS_CRYPTO_BLOCK_SIZE 16
+/*
+ * The lengths of all file contents blocks must be divisible by this value.
+ * This is needed to ensure that all contents encryption modes will work, as
+ * some of the supported modes don't support arbitrarily byte-aligned messages.
+ *
+ * Since the needed alignment is 16 bytes, most filesystems will meet this
+ * requirement naturally, as typical block sizes are powers of 2. However, if a
+ * filesystem can generate arbitrarily byte-aligned block lengths (e.g., via
+ * compression), then it will need to pad to this alignment before encryption.
+ */
+#define FSCRYPT_CONTENTS_ALIGNMENT 16
+union fscrypt_policy;
struct fscrypt_info;
+struct fs_parameter;
+struct seq_file;
struct fscrypt_str {
unsigned char *name;
@@ -33,7 +46,7 @@ struct fscrypt_name {
u32 hash;
u32 minor_hash;
struct fscrypt_str crypto_buf;
- bool is_ciphertext_name;
+ bool is_nokey_name;
};
#define FSTR_INIT(n, l) { .name = n, .len = l }
@@ -45,36 +58,141 @@ struct fscrypt_name {
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40
#ifdef CONFIG_FS_ENCRYPTION
+
/*
- * fscrypt superblock flags
+ * If set, the fscrypt bounce page pool won't be allocated (unless another
+ * filesystem needs it). Set this if the filesystem always uses its own bounce
+ * pages for writes and therefore won't need the fscrypt bounce page pool.
*/
#define FS_CFLG_OWN_PAGES (1U << 1)
-/*
- * crypto operations for filesystems
- */
+/* Crypto operations for filesystems */
struct fscrypt_operations {
+
+ /* Set of optional flags; see above for allowed flags */
unsigned int flags;
+
+ /*
+ * If set, this is a filesystem-specific key description prefix that
+ * will be accepted for "logon" keys for v1 fscrypt policies, in
+ * addition to the generic prefix "fscrypt:". This functionality is
+ * deprecated, so new filesystems shouldn't set this field.
+ */
const char *key_prefix;
- int (*get_context)(struct inode *, void *, size_t);
- int (*set_context)(struct inode *, const void *, size_t, void *);
- bool (*dummy_context)(struct inode *);
- bool (*empty_dir)(struct inode *);
- unsigned int max_namelen;
+
+ /*
+ * Get the fscrypt context of the given inode.
+ *
+ * @inode: the inode whose context to get
+ * @ctx: the buffer into which to get the context
+ * @len: length of the @ctx buffer in bytes
+ *
+ * Return: On success, returns the length of the context in bytes; this
+ * may be less than @len. On failure, returns -ENODATA if the
+ * inode doesn't have a context, -ERANGE if the context is
+ * longer than @len, or another -errno code.
+ */
+ int (*get_context)(struct inode *inode, void *ctx, size_t len);
+
+ /*
+ * Set an fscrypt context on the given inode.
+ *
+ * @inode: the inode whose context to set. The inode won't already have
+ * an fscrypt context.
+ * @ctx: the context to set
+ * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE)
+ * @fs_data: If called from fscrypt_set_context(), this will be the
+ * value the filesystem passed to fscrypt_set_context().
+ * Otherwise (i.e. when called from
+ * FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL.
+ *
+ * i_rwsem will be held for write.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+ int (*set_context)(struct inode *inode, const void *ctx, size_t len,
+ void *fs_data);
+
+ /*
+ * Get the dummy fscrypt policy in use on the filesystem (if any).
+ *
+ * Filesystems only need to implement this function if they support the
+ * test_dummy_encryption mount option.
+ *
+ * Return: A pointer to the dummy fscrypt policy, if the filesystem is
+ * mounted with test_dummy_encryption; otherwise NULL.
+ */
+ const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb);
+
+ /*
+ * Check whether a directory is empty. i_rwsem will be held for write.
+ */
+ bool (*empty_dir)(struct inode *inode);
+
+ /*
+ * Check whether the filesystem's inode numbers and UUID are stable,
+ * meaning that they will never be changed even by offline operations
+ * such as filesystem shrinking and therefore can be used in the
+ * encryption without the possibility of files becoming unreadable.
+ *
+ * Filesystems only need to implement this function if they want to
+ * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These
+ * flags are designed to work around the limitations of UFS and eMMC
+ * inline crypto hardware, and they shouldn't be used in scenarios where
+ * such hardware isn't being used.
+ *
+ * Leaving this NULL is equivalent to always returning false.
+ */
bool (*has_stable_inodes)(struct super_block *sb);
+
+ /*
+ * Get the number of bits that the filesystem uses to represent inode
+ * numbers and file logical block numbers.
+ *
+ * By default, both of these are assumed to be 64-bit. This function
+ * can be implemented to declare that either or both of these numbers is
+ * shorter, which may allow the use of the
+ * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of
+ * inline crypto hardware whose maximum DUN length is less than 64 bits
+ * (e.g., eMMC v5.2 spec compliant hardware). This function only needs
+ * to be implemented if support for one of these features is needed.
+ */
void (*get_ino_and_lblk_bits)(struct super_block *sb,
int *ino_bits_ret, int *lblk_bits_ret);
+
+ /*
+ * Return an array of pointers to the block devices to which the
+ * filesystem may write encrypted file contents, NULL if the filesystem
+ * only has a single such block device, or an ERR_PTR() on error.
+ *
+ * On successful non-NULL return, *num_devs is set to the number of
+ * devices in the returned array. The caller must free the returned
+ * array using kfree().
+ *
+ * If the filesystem can use multiple block devices (other than block
+ * devices that aren't used for encrypted file contents, such as
+ * external journal devices), and wants to support inline encryption,
+ * then it must implement this function. Otherwise it's not needed.
+ */
+ struct block_device **(*get_devices)(struct super_block *sb,
+ unsigned int *num_devs);
};
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
{
- /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */
- return READ_ONCE(inode->i_crypt_info) != NULL;
+ /*
+ * Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
+ * I.e., another task may publish ->i_crypt_info concurrently, executing
+ * a RELEASE barrier. We need to use smp_load_acquire() here to safely
+ * ACQUIRE the memory the other task published.
+ */
+ return smp_load_acquire(&inode->i_crypt_info);
}
/**
* fscrypt_needs_contents_encryption() - check whether an inode needs
* contents encryption
+ * @inode: the inode to check
*
* Return: %true iff the inode is an encrypted regular file and the kernel was
* built with fscrypt support.
@@ -87,41 +205,63 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
}
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+/*
+ * When d_splice_alias() moves a directory's no-key alias to its plaintext alias
+ * as a result of the encryption key being added, DCACHE_NOKEY_NAME must be
+ * cleared. Note that we don't have to support arbitrary moves of this flag
+ * because fscrypt doesn't allow no-key names to be the source or target of a
+ * rename().
+ */
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
{
- return inode->i_sb->s_cop->dummy_context &&
- inode->i_sb->s_cop->dummy_context(inode);
+ dentry->d_flags &= ~DCACHE_NOKEY_NAME;
}
-/*
- * When d_splice_alias() moves a directory's encrypted alias to its decrypted
- * alias as a result of the encryption key being added, DCACHE_ENCRYPTED_NAME
- * must be cleared. Note that we don't have to support arbitrary moves of this
- * flag because fscrypt doesn't allow encrypted aliases to be the source or
- * target of a rename().
+/**
+ * fscrypt_is_nokey_name() - test whether a dentry is a no-key name
+ * @dentry: the dentry to check
+ *
+ * This returns true if the dentry is a no-key dentry. A no-key dentry is a
+ * dentry that was created in an encrypted directory that hasn't had its
+ * encryption key added yet. Such dentries may be either positive or negative.
+ *
+ * When a filesystem is asked to create a new filename in an encrypted directory
+ * and the new filename's dentry is a no-key dentry, it must fail the operation
+ * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(),
+ * ->rename(), and ->link(). (However, ->rename() and ->link() are already
+ * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().)
+ *
+ * This is necessary because creating a filename requires the directory's
+ * encryption key, but just checking for the key on the directory inode during
+ * the final filesystem operation doesn't guarantee that the key was available
+ * during the preceding dentry lookup. And the key must have already been
+ * available during the dentry lookup in order for it to have been checked
+ * whether the filename already exists in the directory and for the new file's
+ * dentry not to be invalidated due to it incorrectly having the no-key flag.
+ *
+ * Return: %true if the dentry is a no-key name
*/
-static inline void fscrypt_handle_d_move(struct dentry *dentry)
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
{
- dentry->d_flags &= ~DCACHE_ENCRYPTED_NAME;
+ return dentry->d_flags & DCACHE_NOKEY_NAME;
}
/* crypto.c */
-extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
-
-extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags);
-extern int fscrypt_encrypt_block_inplace(const struct inode *inode,
- struct page *page, unsigned int len,
- unsigned int offs, u64 lblk_num,
- gfp_t gfp_flags);
-
-extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
- unsigned int offs);
-extern int fscrypt_decrypt_block_inplace(const struct inode *inode,
- struct page *page, unsigned int len,
- unsigned int offs, u64 lblk_num);
+void fscrypt_enqueue_decrypt_work(struct work_struct *);
+
+struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
+ unsigned int len,
+ unsigned int offs,
+ gfp_t gfp_flags);
+int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags);
+
+int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
+ unsigned int offs);
+int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num);
static inline bool fscrypt_is_bounce_page(struct page *page)
{
@@ -133,77 +273,107 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
return (struct page *)page_private(bounce_page);
}
-extern void fscrypt_free_bounce_page(struct page *bounce_page);
+void fscrypt_free_bounce_page(struct page *bounce_page);
/* policy.c */
-extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
-extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
-extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *);
-extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
-extern int fscrypt_inherit_context(struct inode *, struct inode *,
- void *, bool);
+int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg);
+int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg);
+int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg);
+int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg);
+int fscrypt_has_permitted_context(struct inode *parent, struct inode *child);
+int fscrypt_context_for_new_inode(void *ctx, struct inode *inode);
+int fscrypt_set_context(struct inode *inode, void *fs_data);
+
+struct fscrypt_dummy_policy {
+ const union fscrypt_policy *policy;
+};
+
+int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy);
+bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2);
+void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep,
+ struct super_block *sb);
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return dummy_policy->policy != NULL;
+}
+static inline void
+fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
+{
+ kfree(dummy_policy->policy);
+ dummy_policy->policy = NULL;
+}
+
/* keyring.c */
-extern void fscrypt_sb_free(struct super_block *sb);
-extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
-extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg);
-extern int fscrypt_ioctl_remove_key_all_users(struct file *filp,
- void __user *arg);
-extern int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg);
+void fscrypt_destroy_keyring(struct super_block *sb);
+int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
+int fscrypt_add_test_dummy_key(struct super_block *sb,
+ const struct fscrypt_dummy_policy *dummy_policy);
+int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg);
+int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg);
+int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg);
/* keysetup.c */
-extern int fscrypt_get_encryption_info(struct inode *);
-extern void fscrypt_put_encryption_info(struct inode *);
-extern void fscrypt_free_inode(struct inode *);
-extern int fscrypt_drop_inode(struct inode *inode);
+int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode,
+ bool *encrypt_ret);
+void fscrypt_put_encryption_info(struct inode *inode);
+void fscrypt_free_inode(struct inode *inode);
+int fscrypt_drop_inode(struct inode *inode);
/* fname.c */
-extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
- int lookup, struct fscrypt_name *);
+int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen);
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+ u32 max_len, u32 *encrypted_len_ret);
+int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname);
static inline void fscrypt_free_filename(struct fscrypt_name *fname)
{
kfree(fname->crypto_buf.name);
}
-extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
- struct fscrypt_str *);
-extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
-extern int fscrypt_fname_disk_to_usr(const struct inode *inode,
- u32 hash, u32 minor_hash,
- const struct fscrypt_str *iname,
- struct fscrypt_str *oname);
-extern bool fscrypt_match_name(const struct fscrypt_name *fname,
- const u8 *de_name, u32 de_name_len);
-extern u64 fscrypt_fname_siphash(const struct inode *dir,
- const struct qstr *name);
+int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
+ struct fscrypt_str *crypto_str);
+void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str);
+int fscrypt_fname_disk_to_usr(const struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname);
+bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len);
+u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name);
+int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags);
/* bio.c */
-extern void fscrypt_decrypt_bio(struct bio *);
-extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
- unsigned int);
+bool fscrypt_decrypt_bio(struct bio *bio);
+int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len);
/* hooks.c */
-extern int fscrypt_file_open(struct inode *inode, struct file *filp);
-extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
- struct dentry *dentry);
-extern int __fscrypt_prepare_rename(struct inode *old_dir,
- struct dentry *old_dentry,
- struct inode *new_dir,
- struct dentry *new_dentry,
- unsigned int flags);
-extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
- struct fscrypt_name *fname);
-extern int fscrypt_prepare_setflags(struct inode *inode,
- unsigned int oldflags, unsigned int flags);
-extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
- unsigned int max_len,
- struct fscrypt_str *disk_link);
-extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
- unsigned int len,
- struct fscrypt_str *disk_link);
-extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
- unsigned int max_size,
- struct delayed_call *done);
+int fscrypt_file_open(struct inode *inode, struct file *filp);
+int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry);
+int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags);
+int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct fscrypt_name *fname);
+int __fscrypt_prepare_readdir(struct inode *dir);
+int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr);
+int fscrypt_prepare_setflags(struct inode *inode,
+ unsigned int oldflags, unsigned int flags);
+int fscrypt_prepare_symlink(struct inode *dir, const char *target,
+ unsigned int len, unsigned int max_len,
+ struct fscrypt_str *disk_link);
+int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+ unsigned int len, struct fscrypt_str *disk_link);
+const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done);
+int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat);
static inline void fscrypt_set_ops(struct super_block *sb,
const struct fscrypt_operations *s_cop)
{
@@ -211,9 +381,9 @@ static inline void fscrypt_set_ops(struct super_block *sb,
}
#else /* !CONFIG_FS_ENCRYPTION */
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
{
- return false;
+ return NULL;
}
static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
@@ -221,13 +391,13 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
return false;
}
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
{
- return false;
}
-static inline void fscrypt_handle_d_move(struct dentry *dentry)
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
{
+ return false;
}
/* crypto.c */
@@ -300,21 +470,58 @@ static inline int fscrypt_ioctl_get_policy_ex(struct file *filp,
return -EOPNOTSUPP;
}
+static inline int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int fscrypt_has_permitted_context(struct inode *parent,
struct inode *child)
{
return 0;
}
-static inline int fscrypt_inherit_context(struct inode *parent,
- struct inode *child,
- void *fs_data, bool preload)
+static inline int fscrypt_set_context(struct inode *inode, void *fs_data)
{
return -EOPNOTSUPP;
}
+struct fscrypt_dummy_policy {
+};
+
+static inline int
+fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy)
+{
+ return -EINVAL;
+}
+
+static inline bool
+fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2)
+{
+ return true;
+}
+
+static inline void fscrypt_show_test_dummy_encryption(struct seq_file *seq,
+ char sep,
+ struct super_block *sb)
+{
+}
+
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return false;
+}
+
+static inline void
+fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
+{
+}
+
/* keyring.c */
-static inline void fscrypt_sb_free(struct super_block *sb)
+static inline void fscrypt_destroy_keyring(struct super_block *sb)
{
}
@@ -323,6 +530,13 @@ static inline int fscrypt_ioctl_add_key(struct file *filp, void __user *arg)
return -EOPNOTSUPP;
}
+static inline int
+fscrypt_add_test_dummy_key(struct super_block *sb,
+ const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return 0;
+}
+
static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg)
{
return -EOPNOTSUPP;
@@ -341,9 +555,14 @@ static inline int fscrypt_ioctl_get_key_status(struct file *filp,
}
/* keysetup.c */
-static inline int fscrypt_get_encryption_info(struct inode *inode)
+
+static inline int fscrypt_prepare_new_inode(struct inode *dir,
+ struct inode *inode,
+ bool *encrypt_ret)
{
- return -EOPNOTSUPP;
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ return 0;
}
static inline void fscrypt_put_encryption_info(struct inode *inode)
@@ -380,8 +599,7 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
return;
}
-static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
- u32 max_encrypted_len,
+static inline int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
struct fscrypt_str *crypto_str)
{
return -EOPNOTSUPP;
@@ -416,9 +634,16 @@ static inline u64 fscrypt_fname_siphash(const struct inode *dir,
return 0;
}
+static inline int fscrypt_d_revalidate(struct dentry *dentry,
+ unsigned int flags)
+{
+ return 1;
+}
+
/* bio.c */
-static inline void fscrypt_decrypt_bio(struct bio *bio)
+static inline bool fscrypt_decrypt_bio(struct bio *bio)
{
+ return true;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
@@ -458,6 +683,17 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
return -EOPNOTSUPP;
}
+static inline int __fscrypt_prepare_readdir(struct inode *dir)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags,
unsigned int flags)
@@ -465,15 +701,21 @@ static inline int fscrypt_prepare_setflags(struct inode *inode,
return 0;
}
-static inline int __fscrypt_prepare_symlink(struct inode *dir,
- unsigned int len,
- unsigned int max_len,
- struct fscrypt_str *disk_link)
+static inline int fscrypt_prepare_symlink(struct inode *dir,
+ const char *target,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
{
- return -EOPNOTSUPP;
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ disk_link->name = (unsigned char *)target;
+ disk_link->len = len + 1;
+ if (disk_link->len > max_len)
+ return -ENAMETOOLONG;
+ return 0;
}
-
static inline int __fscrypt_encrypt_symlink(struct inode *inode,
const char *target,
unsigned int len,
@@ -490,6 +732,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline int fscrypt_symlink_getattr(const struct path *path,
+ struct kstat *stat)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void fscrypt_set_ops(struct super_block *sb,
const struct fscrypt_operations *s_cop)
{
@@ -497,42 +745,124 @@ static inline void fscrypt_set_ops(struct super_block *sb,
#endif /* !CONFIG_FS_ENCRYPTION */
+/* inline_crypt.c */
+#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
+
+bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode);
+
+void fscrypt_set_bio_crypt_ctx(struct bio *bio,
+ const struct inode *inode, u64 first_lblk,
+ gfp_t gfp_mask);
+
+void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
+ const struct buffer_head *first_bh,
+ gfp_t gfp_mask);
+
+bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
+ u64 next_lblk);
+
+bool fscrypt_mergeable_bio_bh(struct bio *bio,
+ const struct buffer_head *next_bh);
+
+bool fscrypt_dio_supported(struct inode *inode);
+
+u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks);
+
+#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
+
+static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
+{
+ return false;
+}
+
+static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
+ const struct inode *inode,
+ u64 first_lblk, gfp_t gfp_mask) { }
+
+static inline void fscrypt_set_bio_crypt_ctx_bh(
+ struct bio *bio,
+ const struct buffer_head *first_bh,
+ gfp_t gfp_mask) { }
+
+static inline bool fscrypt_mergeable_bio(struct bio *bio,
+ const struct inode *inode,
+ u64 next_lblk)
+{
+ return true;
+}
+
+static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
+ const struct buffer_head *next_bh)
+{
+ return true;
+}
+
+static inline bool fscrypt_dio_supported(struct inode *inode)
+{
+ return !fscrypt_needs_contents_encryption(inode);
+}
+
+static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk,
+ u64 nr_blocks)
+{
+ return nr_blocks;
+}
+#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
+
/**
- * fscrypt_require_key - require an inode's encryption key
- * @inode: the inode we need the key for
+ * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline
+ * encryption
+ * @inode: an inode. If encrypted, its key must be set up.
*
- * If the inode is encrypted, set up its encryption key if not already done.
- * Then require that the key be present and return -ENOKEY otherwise.
- *
- * No locks are needed, and the key will live as long as the struct inode --- so
- * it won't go away from under you.
+ * Return: true if the inode requires file contents encryption and if the
+ * encryption should be done in the block layer via blk-crypto rather
+ * than in the filesystem layer.
+ */
+static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
+{
+ return fscrypt_needs_contents_encryption(inode) &&
+ __fscrypt_inode_uses_inline_crypto(inode);
+}
+
+/**
+ * fscrypt_inode_uses_fs_layer_crypto() - test whether an inode uses fs-layer
+ * encryption
+ * @inode: an inode. If encrypted, its key must be set up.
*
- * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
- * if a problem occurred while setting up the encryption key.
+ * Return: true if the inode requires file contents encryption and if the
+ * encryption should be done in the filesystem layer rather than in the
+ * block layer via blk-crypto.
*/
-static inline int fscrypt_require_key(struct inode *inode)
+static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
{
- if (IS_ENCRYPTED(inode)) {
- int err = fscrypt_get_encryption_info(inode);
+ return fscrypt_needs_contents_encryption(inode) &&
+ !__fscrypt_inode_uses_inline_crypto(inode);
+}
- if (err)
- return err;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
- return 0;
+/**
+ * fscrypt_has_encryption_key() - check whether an inode has had its key set up
+ * @inode: the inode to check
+ *
+ * Return: %true if the inode has had its encryption key set up, else %false.
+ *
+ * Usually this should be preceded by fscrypt_get_encryption_info() to try to
+ * set up the key first.
+ */
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return fscrypt_get_info(inode) != NULL;
}
/**
- * fscrypt_prepare_link - prepare to link an inode into a possibly-encrypted directory
+ * fscrypt_prepare_link() - prepare to link an inode into a possibly-encrypted
+ * directory
* @old_dentry: an existing dentry for the inode being linked
* @dir: the target directory
* @dentry: negative dentry for the target filename
*
* A new link can only be added to an encrypted directory if the directory's
* encryption key is available --- since otherwise we'd have no way to encrypt
- * the filename. Therefore, we first set up the directory's encryption key (if
- * not already done) and return an error if it's unavailable.
+ * the filename.
*
* We also verify that the link will not violate the constraint that all files
* in an encrypted directory tree use the same encryption policy.
@@ -551,7 +881,8 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry,
}
/**
- * fscrypt_prepare_rename - prepare for a rename between possibly-encrypted directories
+ * fscrypt_prepare_rename() - prepare for a rename between possibly-encrypted
+ * directories
* @old_dir: source directory
* @old_dentry: dentry for source file
* @new_dir: target directory
@@ -584,23 +915,27 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir,
}
/**
- * fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory
+ * fscrypt_prepare_lookup() - prepare to lookup a name in a possibly-encrypted
+ * directory
* @dir: directory being searched
* @dentry: filename being looked up
* @fname: (output) the name to use to search the on-disk directory
*
* Prepare for ->lookup() in a directory which may be encrypted by determining
- * the name that will actually be used to search the directory on-disk. Lookups
- * can be done with or without the directory's encryption key; without the key,
- * filenames are presented in encrypted form. Therefore, we'll try to set up
- * the directory's encryption key, but even without it the lookup can continue.
+ * the name that will actually be used to search the directory on-disk. If the
+ * directory's encryption policy is supported by this kernel and its encryption
+ * key is available, then the lookup is assumed to be by plaintext name;
+ * otherwise, it is assumed to be by no-key name.
*
- * This also installs a custom ->d_revalidate() method which will invalidate the
- * dentry if it was created without the key and the key is later added.
+ * This will set DCACHE_NOKEY_NAME on the dentry if the lookup is by no-key
+ * name. In this case the filesystem must assign the dentry a dentry_operations
+ * which contains fscrypt_d_revalidate (or contains a d_revalidate method that
+ * calls fscrypt_d_revalidate), so that the dentry will be invalidated if the
+ * directory's encryption key is later added.
*
- * Return: 0 on success; -ENOENT if key is unavailable but the filename isn't a
- * correctly formed encoded ciphertext name, so a negative dentry should be
- * created; or another -errno code.
+ * Return: 0 on success; -ENOENT if the directory's key is unavailable but the
+ * filename isn't a valid no-key name, so a negative dentry should be created;
+ * or another -errno code.
*/
static inline int fscrypt_prepare_lookup(struct inode *dir,
struct dentry *dentry,
@@ -617,7 +952,28 @@ static inline int fscrypt_prepare_lookup(struct inode *dir,
}
/**
- * fscrypt_prepare_setattr - prepare to change a possibly-encrypted inode's attributes
+ * fscrypt_prepare_readdir() - prepare to read a possibly-encrypted directory
+ * @dir: the directory inode
+ *
+ * If the directory is encrypted and it doesn't already have its encryption key
+ * set up, try to set it up so that the filenames will be listed in plaintext
+ * form rather than in no-key form.
+ *
+ * Return: 0 on success; -errno on error. Note that the encryption key being
+ * unavailable is not considered an error. It is also not an error if
+ * the encryption policy is unsupported by this kernel; that is treated
+ * like the key being unavailable, so that files can still be deleted.
+ */
+static inline int fscrypt_prepare_readdir(struct inode *dir)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_readdir(dir);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_setattr() - prepare to change a possibly-encrypted inode's
+ * attributes
* @dentry: dentry through which the inode is being changed
* @attr: attributes to change
*
@@ -636,52 +992,13 @@ static inline int fscrypt_prepare_lookup(struct inode *dir,
static inline int fscrypt_prepare_setattr(struct dentry *dentry,
struct iattr *attr)
{
- if (attr->ia_valid & ATTR_SIZE)
- return fscrypt_require_key(d_inode(dentry));
- return 0;
-}
-
-/**
- * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink
- * @dir: directory in which the symlink is being created
- * @target: plaintext symlink target
- * @len: length of @target excluding null terminator
- * @max_len: space the filesystem has available to store the symlink target
- * @disk_link: (out) the on-disk symlink target being prepared
- *
- * This function computes the size the symlink target will require on-disk,
- * stores it in @disk_link->len, and validates it against @max_len. An
- * encrypted symlink may be longer than the original.
- *
- * Additionally, @disk_link->name is set to @target if the symlink will be
- * unencrypted, but left NULL if the symlink will be encrypted. For encrypted
- * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the
- * on-disk target later. (The reason for the two-step process is that some
- * filesystems need to know the size of the symlink target before creating the
- * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.)
- *
- * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long,
- * -ENOKEY if the encryption key is missing, or another -errno code if a problem
- * occurred while setting up the encryption key.
- */
-static inline int fscrypt_prepare_symlink(struct inode *dir,
- const char *target,
- unsigned int len,
- unsigned int max_len,
- struct fscrypt_str *disk_link)
-{
- if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir))
- return __fscrypt_prepare_symlink(dir, len, max_len, disk_link);
-
- disk_link->name = (unsigned char *)target;
- disk_link->len = len + 1;
- if (disk_link->len > max_len)
- return -ENAMETOOLONG;
+ if (IS_ENCRYPTED(d_inode(dentry)))
+ return __fscrypt_prepare_setattr(dentry, attr);
return 0;
}
/**
- * fscrypt_encrypt_symlink - encrypt the symlink target if needed
+ * fscrypt_encrypt_symlink() - encrypt the symlink target if needed
* @inode: symlink inode
* @target: plaintext symlink target
* @len: length of @target excluding null terminator
diff --git a/include/linux/fsi-occ.h b/include/linux/fsi-occ.h
index d4cdc2aa6e33..7ee3dbd7f4b3 100644
--- a/include/linux/fsi-occ.h
+++ b/include/linux/fsi-occ.h
@@ -19,6 +19,8 @@ struct device;
#define OCC_RESP_CRIT_OCB 0xE3
#define OCC_RESP_CRIT_HW 0xE4
+#define OCC_MAX_RESP_WORDS 2048
+
int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
void *response, size_t *resp_len);
diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h
index a0e2e6b19b57..154e541ce57e 100644
--- a/include/linux/fsl/bestcomm/bestcomm.h
+++ b/include/linux/fsl/bestcomm/bestcomm.h
@@ -27,7 +27,7 @@
*/
struct bcom_bd {
u32 status;
- u32 data[0]; /* variable payload size */
+ u32 data[]; /* variable payload size */
};
/* ======================================================================== */
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
index 4875dd38af7e..2d9203314865 100644
--- a/include/linux/fsl/enetc_mdio.h
+++ b/include/linux/fsl/enetc_mdio.h
@@ -15,6 +15,7 @@
#define ENETC_PCS_IF_MODE_SGMII_EN BIT(0)
#define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1)
#define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+#define ENETC_PCS_IF_MODE_DUPLEX_HALF BIT(3)
/* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset
* Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 0ac27b233f12..fdb55ca47a4f 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* Freecale 85xx and 86xx Global Utilties register set
*
* Authors: Jeff Brown
@@ -14,7 +14,7 @@
#include <linux/types.h>
#include <linux/io.h>
-/**
+/*
* Global Utility Registers.
*
* Not all registers defined in this structure are available on all chips, so
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 54d9436600c7..a86115bc799c 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -3,6 +3,7 @@
* Freescale Management Complex (MC) bus public interface
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
* Author: German Rivera <German.Rivera@freescale.com>
*
*/
@@ -12,6 +13,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
+#include <uapi/linux/fsl_mc.h>
#define FSL_MC_VENDOR_FREESCALE 0x1957
@@ -30,6 +32,13 @@ struct fsl_mc_io;
* @shutdown: Function called at shutdown time to quiesce the device
* @suspend: Function called when a device is stopped
* @resume: Function called when a device is resumed
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
*
* Generic DPAA device driver object for device drivers that are registered
* with a DPRC bus. This structure is to be embedded in each device-specific
@@ -43,6 +52,7 @@ struct fsl_mc_driver {
void (*shutdown)(struct fsl_mc_device *dev);
int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
int (*resume)(struct fsl_mc_device *dev);
+ bool driver_managed_dma;
};
#define to_fsl_mc_driver(_drv) \
@@ -89,13 +99,13 @@ struct fsl_mc_resource {
/**
* struct fsl_mc_device_irq - MC object device message-based interrupt
- * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
+ * @virq: Linux virtual interrupt number
* @mc_dev: MC object device that owns this interrupt
* @dev_irq_index: device-relative IRQ index
* @resource: MC generic resource associated with the interrupt
*/
struct fsl_mc_device_irq {
- struct msi_desc *msi_desc;
+ unsigned int virq;
struct fsl_mc_device *mc_dev;
u8 dev_irq_index;
struct fsl_mc_resource resource;
@@ -148,6 +158,13 @@ struct fsl_mc_obj_desc {
*/
#define FSL_MC_IS_DPRC 0x0001
+/* Region flags */
+/* Indicates that region can be mapped as cacheable */
+#define FSL_MC_REGION_CACHEABLE 0x00000001
+
+/* Indicates that region can be mapped as shareable */
+#define FSL_MC_REGION_SHAREABLE 0x00000002
+
/**
* struct fsl_mc_device - MC object device object
* @dev: Linux driver model device object
@@ -161,6 +178,9 @@ struct fsl_mc_obj_desc {
* @regions: pointer to array of MMIO region entries
* @irqs: pointer to array of pointers to interrupts allocated to this device
* @resource: generic resource associated with this MC object device, if any.
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
*
* Generic device object for MC object devices that are "attached" to a
* MC bus.
@@ -186,7 +206,7 @@ struct fsl_mc_device {
struct device dev;
u64 dma_mask;
u16 flags;
- u16 icid;
+ u32 icid;
u16 mc_handle;
struct fsl_mc_io *mc_io;
struct fsl_mc_obj_desc obj_desc;
@@ -194,13 +214,12 @@ struct fsl_mc_device {
struct fsl_mc_device_irq **irqs;
struct fsl_mc_resource *resource;
struct device_link *consumer_link;
+ const char *driver_override;
};
#define to_fsl_mc_device(_dev) \
container_of(_dev, struct fsl_mc_device, dev)
-#define MC_CMD_NUM_OF_PARAMS 7
-
struct mc_cmd_header {
u8 src_id;
u8 flags_hw;
@@ -210,11 +229,6 @@ struct mc_cmd_header {
__le16 cmd_id;
};
-struct fsl_mc_command {
- __le64 header;
- __le64 params[MC_CMD_NUM_OF_PARAMS];
-};
-
enum mc_cmd_status {
MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
@@ -339,7 +353,7 @@ struct fsl_mc_io {
* This field is only meaningful if the
* FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
*/
- spinlock_t spinlock; /* serializes mc_send_command() */
+ raw_spinlock_t spinlock; /* serializes mc_send_command() */
};
};
@@ -381,6 +395,22 @@ int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
+/**
+ * struct fsl_mc_version
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct fsl_mc_version {
+ u32 major;
+ u32 minor;
+ u32 revision;
+};
+
+struct fsl_mc_version *fsl_mc_get_version(void);
+
int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
u16 mc_io_flags,
struct fsl_mc_io **new_mc_io);
@@ -403,7 +433,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
-struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id);
extern struct bus_type fsl_mc_bus_type;
@@ -417,6 +448,11 @@ extern struct device_type fsl_mc_bus_dpmcp_type;
extern struct device_type fsl_mc_bus_dpmac_type;
extern struct device_type fsl_mc_bus_dprtc_type;
extern struct device_type fsl_mc_bus_dpseci_type;
+extern struct device_type fsl_mc_bus_dpdmux_type;
+extern struct device_type fsl_mc_bus_dpdcei_type;
+extern struct device_type fsl_mc_bus_dpaiop_type;
+extern struct device_type fsl_mc_bus_dpci_type;
+extern struct device_type fsl_mc_bus_dpdmai_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
@@ -438,6 +474,11 @@ static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dpsw_type;
}
+static inline bool is_fsl_mc_bus_dpdmux(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmux_type;
+}
+
static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev)
{
return mc_dev->dev.type == &fsl_mc_bus_dpbp_type;
@@ -468,6 +509,55 @@ static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
}
+static inline bool is_fsl_mc_bus_dpdcei(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdcei_type;
+}
+
+static inline bool is_fsl_mc_bus_dpaiop(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpaiop_type;
+}
+
+static inline bool is_fsl_mc_bus_dpci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpci_type;
+}
+
+static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type;
+}
+
+#define DPRC_RESET_OPTION_NON_RECURSIVE 0x00000001
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ u32 options);
+
+int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts);
+
+void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc);
+
+int dprc_cleanup(struct fsl_mc_device *mc_dev);
+
+int dprc_setup(struct fsl_mc_device *mc_dev);
+
+/**
+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+ * IRQ pool
+ */
+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
+
+int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
+ unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev);
+
/*
* Data Path Buffer Pool (DPBP) API
* Contains initialization APIs and runtime control APIs for DPBP
@@ -540,6 +630,20 @@ int dpcon_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
/**
* struct dpcon_attr - Structure representing DPCON attributes
* @id: DPCON object ID
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index b0b743563f43..01acebe37fab 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -135,7 +135,8 @@ struct ptp_qoriq_registers {
#define DEFAULT_CKSEL 1
#define DEFAULT_TMR_PRSC 2
#define DEFAULT_FIPER1_PERIOD 1000000000
-#define DEFAULT_FIPER2_PERIOD 100000
+#define DEFAULT_FIPER2_PERIOD 1000000000
+#define DEFAULT_FIPER3_PERIOD 1000000000
struct ptp_qoriq {
void __iomem *base;
@@ -147,16 +148,16 @@ struct ptp_qoriq {
struct dentry *debugfs_root;
struct device *dev;
bool extts_fifo_support;
+ bool fiper3_support;
int irq;
int phc_index;
- u64 alarm_interval; /* for periodic alarm */
- u64 alarm_value;
u32 tclk_period; /* nanoseconds */
u32 tmr_prsc;
u32 tmr_add;
u32 cksel;
u32 tmr_fiper1;
u32 tmr_fiper2;
+ u32 tmr_fiper3;
u32 (*read)(unsigned __iomem *addr);
void (*write)(unsigned __iomem *addr, u32 val);
};
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index a2d5d175d3c1..bb8467cd11ae 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -18,59 +18,101 @@
#include <linux/bug.h>
/*
- * Notify this @dir inode about a change in the directory entry @dentry.
+ * Notify this @dir inode about a change in a child directory entry.
+ * The directory entry may have turned positive or negative or its inode may
+ * have changed (i.e. renamed over).
*
* Unlike fsnotify_parent(), the event will be reported regardless of the
- * FS_EVENT_ON_CHILD mask on the parent inode.
+ * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only
+ * the child is interested and not the parent.
*/
-static inline int fsnotify_dirent(struct inode *dir, struct dentry *dentry,
- __u32 mask)
+static inline int fsnotify_name(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ u32 cookie)
{
- return fsnotify(dir, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE,
- &dentry->d_name, 0);
+ if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0)
+ return 0;
+
+ return fsnotify(mask, data, data_type, dir, name, NULL, cookie);
+}
+
+static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
+ __u32 mask)
+{
+ fsnotify_name(mask, dentry, FSNOTIFY_EVENT_DENTRY, dir, &dentry->d_name, 0);
+}
+
+static inline void fsnotify_inode(struct inode *inode, __u32 mask)
+{
+ if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+ return;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify(mask, inode, FSNOTIFY_EVENT_INODE, NULL, NULL, inode, 0);
}
/* Notify this dentry's parent about a child's events. */
-static inline int fsnotify_parent(const struct path *path,
- struct dentry *dentry, __u32 mask)
+static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
+ const void *data, int data_type)
{
- if (!dentry)
- dentry = path->dentry;
+ struct inode *inode = d_inode(dentry);
- return __fsnotify_parent(path, dentry, mask);
+ if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+ return 0;
+
+ if (S_ISDIR(inode->i_mode)) {
+ mask |= FS_ISDIR;
+
+ /* sb/mount marks are not interested in name of directory */
+ if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
+ goto notify_child;
+ }
+
+ /* disconnected dentry cannot notify parent */
+ if (IS_ROOT(dentry))
+ goto notify_child;
+
+ return __fsnotify_parent(dentry, mask, data, data_type);
+
+notify_child:
+ return fsnotify(mask, data, data_type, NULL, NULL, inode, 0);
}
/*
- * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when
- * an event is on a path.
+ * Simple wrappers to consolidate calls to fsnotify_parent() when an event
+ * is on a file/dentry.
*/
-static inline int fsnotify_path(struct inode *inode, const struct path *path,
- __u32 mask)
+static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
+{
+ fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY);
+}
+
+static inline int fsnotify_file(struct file *file, __u32 mask)
{
- int ret = fsnotify_parent(path, NULL, mask);
+ const struct path *path = &file->f_path;
+
+ if (file->f_mode & FMODE_NONOTIFY)
+ return 0;
- if (ret)
- return ret;
- return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
/* Simple call site for access decisions */
static inline int fsnotify_perm(struct file *file, int mask)
{
int ret;
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
__u32 fsnotify_mask = 0;
- if (file->f_mode & FMODE_NONOTIFY)
- return 0;
if (!(mask & (MAY_READ | MAY_OPEN)))
return 0;
+
if (mask & MAY_OPEN) {
fsnotify_mask = FS_OPEN_PERM;
if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM);
+ ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
if (ret)
return ret;
@@ -79,10 +121,7 @@ static inline int fsnotify_perm(struct file *file, int mask)
fsnotify_mask = FS_ACCESS_PERM;
}
- if (S_ISDIR(inode->i_mode))
- fsnotify_mask |= FS_ISDIR;
-
- return fsnotify_path(inode, path, fsnotify_mask);
+ return fsnotify_file(file, fsnotify_mask);
}
/*
@@ -90,12 +129,7 @@ static inline int fsnotify_perm(struct file *file, int mask)
*/
static inline void fsnotify_link_count(struct inode *inode)
{
- __u32 mask = FS_ATTRIB;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(inode, FS_ATTRIB);
}
/*
@@ -110,28 +144,27 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = FS_MOVED_FROM;
__u32 new_dir_mask = FS_MOVED_TO;
- __u32 mask = FS_MOVE_SELF;
+ __u32 rename_mask = FS_RENAME;
const struct qstr *new_name = &moved->d_name;
- if (old_dir == new_dir)
- old_dir_mask |= FS_DN_RENAME;
-
if (isdir) {
old_dir_mask |= FS_ISDIR;
new_dir_mask |= FS_ISDIR;
- mask |= FS_ISDIR;
+ rename_mask |= FS_ISDIR;
}
- fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
- fs_cookie);
- fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
- fs_cookie);
+ /* Event with information about both old and new parent+name */
+ fsnotify_name(rename_mask, moved, FSNOTIFY_EVENT_DENTRY,
+ old_dir, old_name, 0);
+
+ fsnotify_name(old_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ old_dir, old_name, fs_cookie);
+ fsnotify_name(new_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ new_dir, new_name, fs_cookie);
if (target)
fsnotify_link_count(target);
-
- if (source)
- fsnotify(source, mask, source, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(source, FS_MOVE_SELF);
audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE);
}
@@ -156,36 +189,76 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
*/
static inline void fsnotify_inoderemove(struct inode *inode)
{
- __u32 mask = FS_DELETE_SELF;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(inode, FS_DELETE_SELF);
__fsnotify_inode_delete(inode);
}
/*
* fsnotify_create - 'name' was linked in
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_create(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_dirent(inode, dentry, FS_CREATE);
+ fsnotify_dirent(dir, dentry, FS_CREATE);
}
/*
* fsnotify_link - new hardlink in 'inode' directory
+ *
+ * Caller must make sure that new_dentry->d_name is stable.
* Note: We have to pass also the linked inode ptr as some filesystems leave
* new_dentry->d_inode NULL and instantiate inode pointer later
*/
-static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
+static inline void fsnotify_link(struct inode *dir, struct inode *inode,
+ struct dentry *new_dentry)
{
fsnotify_link_count(inode);
audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, &new_dentry->d_name, 0);
+ fsnotify_name(FS_CREATE, inode, FSNOTIFY_EVENT_INODE,
+ dir, &new_dentry->d_name, 0);
+}
+
+/*
+ * fsnotify_delete - @dentry was unlinked and unhashed
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ *
+ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
+ * as this may be called after d_delete() and old_dentry may be negative.
+ */
+static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
+ struct dentry *dentry)
+{
+ __u32 mask = FS_DELETE;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name,
+ 0);
+}
+
+/**
+ * d_delete_notify - delete a dentry and call fsnotify_delete()
+ * @dentry: The dentry to delete
+ *
+ * This helper is used to guaranty that the unlinked inode cannot be found
+ * by lookup of this name after fsnotify_delete() event has been delivered.
+ */
+static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+
+ ihold(inode);
+ d_delete(dentry);
+ fsnotify_delete(dir, inode, dentry);
+ iput(inode);
}
/*
@@ -195,20 +268,24 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
*/
static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
{
- /* Expected to be called before d_delete() */
- WARN_ON_ONCE(d_is_negative(dentry));
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify_dirent(dir, dentry, FS_DELETE);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
* fsnotify_mkdir - directory 'name' was created
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR);
+ fsnotify_dirent(dir, dentry, FS_CREATE | FS_ISDIR);
}
/*
@@ -218,10 +295,10 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
{
- /* Expected to be called before d_delete() */
- WARN_ON_ONCE(d_is_negative(dentry));
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
@@ -229,15 +306,7 @@ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
*/
static inline void fsnotify_access(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
- __u32 mask = FS_ACCESS;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY))
- fsnotify_path(inode, path, mask);
+ fsnotify_file(file, FS_ACCESS);
}
/*
@@ -245,15 +314,7 @@ static inline void fsnotify_access(struct file *file)
*/
static inline void fsnotify_modify(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
- __u32 mask = FS_MODIFY;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY))
- fsnotify_path(inode, path, mask);
+ fsnotify_file(file, FS_MODIFY);
}
/*
@@ -261,16 +322,12 @@ static inline void fsnotify_modify(struct file *file)
*/
static inline void fsnotify_open(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
__u32 mask = FS_OPEN;
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
if (file->f_flags & __FMODE_EXEC)
mask |= FS_OPEN_EXEC;
- fsnotify_path(inode, path, mask);
+ fsnotify_file(file, mask);
}
/*
@@ -278,16 +335,10 @@ static inline void fsnotify_open(struct file *file)
*/
static inline void fsnotify_close(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
- fmode_t mode = file->f_mode;
- __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
+ __u32 mask = (file->f_mode & FMODE_WRITE) ? FS_CLOSE_WRITE :
+ FS_CLOSE_NOWRITE;
- if (!(file->f_mode & FMODE_NONOTIFY))
- fsnotify_path(inode, path, mask);
+ fsnotify_file(file, mask);
}
/*
@@ -295,14 +346,7 @@ static inline void fsnotify_close(struct file *file)
*/
static inline void fsnotify_xattr(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
- __u32 mask = FS_ATTRIB;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- fsnotify_parent(NULL, dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_dentry(dentry, FS_ATTRIB);
}
/*
@@ -311,7 +355,6 @@ static inline void fsnotify_xattr(struct dentry *dentry)
*/
static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
{
- struct inode *inode = dentry->d_inode;
__u32 mask = 0;
if (ia_valid & ATTR_UID)
@@ -332,13 +375,21 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
if (ia_valid & ATTR_MODE)
mask |= FS_ATTRIB;
- if (mask) {
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
+ if (mask)
+ fsnotify_dentry(dentry, mask);
+}
- fsnotify_parent(NULL, dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
- }
+static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode,
+ int error)
+{
+ struct fs_error_report report = {
+ .error = error,
+ .inode = inode,
+ .sb = sb,
+ };
+
+ return fsnotify(FS_ERROR, &report, FSNOTIFY_EVENT_ERROR,
+ NULL, NULL, NULL, 0);
}
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 1915bdba2fad..d7d96c806bff 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -19,6 +19,8 @@
#include <linux/atomic.h>
#include <linux/user_namespace.h>
#include <linux/refcount.h>
+#include <linux/mempool.h>
+#include <linux/sched/mm.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
@@ -42,22 +44,28 @@
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FS_ERROR 0x00008000 /* Filesystem Error (fanotify) */
+
+/*
+ * FS_IN_IGNORED overloads FS_ERROR. It is only used internally by inotify
+ * which does not support FS_ERROR.
+ */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
-#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
-#define FS_ISDIR 0x40000000 /* event occurred against dir */
-#define FS_IN_ONESHOT 0x80000000 /* only send event once */
+/*
+ * Set on inode mark that cares about things that happen to its children.
+ * Always set for dnotify and inotify.
+ * Set on inode/sb/mount marks that care about parent/name info.
+ */
+#define FS_EVENT_ON_CHILD 0x08000000
-#define FS_DN_RENAME 0x10000000 /* file renamed */
+#define FS_RENAME 0x10000000 /* File was renamed */
#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
-
-/* This inode cares about things that happen to its children. Always set for
- * dnotify and inotify. */
-#define FS_EVENT_ON_CHILD 0x08000000
+#define FS_ISDIR 0x40000000 /* event occurred against dir */
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
@@ -67,29 +75,37 @@
* The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
* when a directory entry inside a child subdir changes.
*/
-#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE)
+#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
FS_OPEN_EXEC_PERM)
/*
- * This is a list of all events that may get sent to a parent based on fs event
- * happening to inodes inside that directory.
+ * This is a list of all events that may get sent to a parent that is watching
+ * with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory.
*/
#define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \
FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \
FS_OPEN | FS_OPEN_EXEC)
+/*
+ * This is a list of all events that may get sent with the parent inode as the
+ * @to_tell argument of fsnotify().
+ * It may include events that can be sent to an inode/sb/mount mark, but cannot
+ * be sent to a parent watching children.
+ */
+#define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD)
+
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
FS_EVENTS_POSS_ON_CHILD | \
- FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \
- FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED)
+ FS_DELETE_SELF | FS_MOVE_SELF | \
+ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
+ FS_ERROR)
/* Extra flags that may be reported with event or control handling of events */
-#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
- FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
+#define ALL_FSNOTIFY_FLAGS (FS_ISDIR | FS_EVENT_ON_CHILD | FS_DN_MULTISHOT)
#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
@@ -107,21 +123,46 @@ struct mem_cgroup;
* these operations for each relevant group.
*
* handle_event - main call for a group to handle an fs event
+ * @group: group to notify
+ * @mask: event type and flags
+ * @data: object that event happened on
+ * @data_type: type of object for fanotify_data_XXX() accessors
+ * @dir: optional directory associated with event -
+ * if @file_name is not NULL, this is the directory that
+ * @file_name is relative to
+ * @file_name: optional file name associated with event
+ * @cookie: inotify rename cookie
+ * @iter_info: array of marks from this group that are interested in the event
+ *
+ * handle_inode_event - simple variant of handle_event() for groups that only
+ * have inode marks and don't have ignore mask
+ * @mark: mark to notify
+ * @mask: event type and flags
+ * @inode: inode that event happened on
+ * @dir: optional directory associated with event -
+ * if @file_name is not NULL, this is the directory that
+ * @file_name is relative to.
+ * Either @inode or @dir must be non-NULL.
+ * @file_name: optional file name associated with event
+ * @cookie: inotify rename cookie
+ *
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing_mark - called when a mark is being destroyed for some reason. The group
- * MUST be holding a reference on each mark and that reference must be
- * dropped in this function. inotify uses this function to send
- * userspace messages that marks have been removed.
+ * MUST be holding a reference on each mark and that reference must be
+ * dropped in this function. inotify uses this function to send
+ * userspace messages that marks have been removed.
*/
struct fsnotify_ops {
- int (*handle_event)(struct fsnotify_group *group,
- struct inode *inode,
- u32 mask, const void *data, int data_type,
+ int (*handle_event)(struct fsnotify_group *group, u32 mask,
+ const void *data, int data_type, struct inode *dir,
const struct qstr *file_name, u32 cookie,
struct fsnotify_iter_info *iter_info);
+ int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask,
+ struct inode *inode, struct inode *dir,
+ const struct qstr *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
- void (*free_event)(struct fsnotify_event *event);
+ void (*free_event)(struct fsnotify_group *group, struct fsnotify_event *event);
/* called on final put+free to free memory */
void (*free_mark)(struct fsnotify_mark *mark);
};
@@ -133,8 +174,6 @@ struct fsnotify_ops {
*/
struct fsnotify_event {
struct list_head list;
- /* inode may ONLY be dereferenced during handle_event(). */
- struct inode *inode; /* either the inode the event happened to or its parent */
};
/*
@@ -172,11 +211,14 @@ struct fsnotify_group {
unsigned int priority;
bool shutdown; /* group is being shut down, don't queue more events */
+#define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */
+#define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */
+#define FSNOTIFY_GROUP_NOFS 0x04 /* group lock is not direct reclaim safe */
+ int flags;
+ unsigned int owner_flags; /* stored flags of mark_mutex owner */
+
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
- atomic_t num_marks; /* 1 for each mark and 1 for not being
- * past the point of no return when freeing
- * a group */
atomic_t user_waits; /* Number of tasks waiting for user
* response */
struct list_head marks_list; /* all inode marks for this group */
@@ -201,24 +243,148 @@ struct fsnotify_group {
#endif
#ifdef CONFIG_FANOTIFY
struct fanotify_group_private_data {
+ /* Hash table of events for merge */
+ struct hlist_head *merge_hash;
/* allows a group to block waiting for a userspace response */
struct list_head access_list;
wait_queue_head_t access_waitq;
int flags; /* flags from fanotify_init() */
int f_flags; /* event_f_flags from fanotify_init() */
- unsigned int max_marks;
- struct user_struct *user;
+ struct ucounts *ucounts;
+ mempool_t error_events_pool;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
};
-/* when calling fsnotify tell it if the data is a path or inode */
-#define FSNOTIFY_EVENT_NONE 0
-#define FSNOTIFY_EVENT_PATH 1
-#define FSNOTIFY_EVENT_INODE 2
+/*
+ * These helpers are used to prevent deadlock when reclaiming inodes with
+ * evictable marks of the same group that is allocating a new mark.
+ */
+static inline void fsnotify_group_lock(struct fsnotify_group *group)
+{
+ mutex_lock(&group->mark_mutex);
+ if (group->flags & FSNOTIFY_GROUP_NOFS)
+ group->owner_flags = memalloc_nofs_save();
+}
+
+static inline void fsnotify_group_unlock(struct fsnotify_group *group)
+{
+ if (group->flags & FSNOTIFY_GROUP_NOFS)
+ memalloc_nofs_restore(group->owner_flags);
+ mutex_unlock(&group->mark_mutex);
+}
+static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
+{
+ WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
+ if (group->flags & FSNOTIFY_GROUP_NOFS)
+ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
+}
+
+/* When calling fsnotify tell it if the data is a path or inode */
+enum fsnotify_data_type {
+ FSNOTIFY_EVENT_NONE,
+ FSNOTIFY_EVENT_PATH,
+ FSNOTIFY_EVENT_INODE,
+ FSNOTIFY_EVENT_DENTRY,
+ FSNOTIFY_EVENT_ERROR,
+};
+
+struct fs_error_report {
+ int error;
+ struct inode *inode;
+ struct super_block *sb;
+};
+
+static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return (struct inode *)data;
+ case FSNOTIFY_EVENT_DENTRY:
+ return d_inode(data);
+ case FSNOTIFY_EVENT_PATH:
+ return d_inode(((const struct path *)data)->dentry);
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *)data)->inode;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct dentry *fsnotify_data_dentry(const void *data, int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_DENTRY:
+ /* Non const is needed for dget() */
+ return (struct dentry *)data;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry;
+ default:
+ return NULL;
+ }
+}
+
+static inline const struct path *fsnotify_data_path(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_PATH:
+ return data;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct super_block *fsnotify_data_sb(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return ((struct inode *)data)->i_sb;
+ case FSNOTIFY_EVENT_DENTRY:
+ return ((struct dentry *)data)->d_sb;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *) data)->sb;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct fs_error_report *fsnotify_data_error_report(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_ERROR:
+ return (struct fs_error_report *) data;
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * Index to merged marks iterator array that correlates to a type of watch.
+ * The type of watched object can be deduced from the iterator type, but not
+ * the other way around, because an event can match different watched objects
+ * of the same object type.
+ * For example, both parent and child are watching an object of type inode.
+ */
+enum fsnotify_iter_type {
+ FSNOTIFY_ITER_TYPE_INODE,
+ FSNOTIFY_ITER_TYPE_VFSMOUNT,
+ FSNOTIFY_ITER_TYPE_SB,
+ FSNOTIFY_ITER_TYPE_PARENT,
+ FSNOTIFY_ITER_TYPE_INODE2,
+ FSNOTIFY_ITER_TYPE_COUNT
+};
+
+/* The type of object that a mark is attached to */
enum fsnotify_obj_type {
+ FSNOTIFY_OBJ_TYPE_ANY = -1,
FSNOTIFY_OBJ_TYPE_INODE,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
@@ -226,56 +392,69 @@ enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
-#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
-#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
-#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB)
-#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
-
-static inline bool fsnotify_valid_obj_type(unsigned int type)
+static inline bool fsnotify_valid_obj_type(unsigned int obj_type)
{
- return (type < FSNOTIFY_OBJ_TYPE_COUNT);
+ return (obj_type < FSNOTIFY_OBJ_TYPE_COUNT);
}
struct fsnotify_iter_info {
- struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT];
+ struct fsnotify_mark *marks[FSNOTIFY_ITER_TYPE_COUNT];
+ struct fsnotify_group *current_group;
unsigned int report_mask;
int srcu_idx;
};
static inline bool fsnotify_iter_should_report_type(
- struct fsnotify_iter_info *iter_info, int type)
+ struct fsnotify_iter_info *iter_info, int iter_type)
{
- return (iter_info->report_mask & (1U << type));
+ return (iter_info->report_mask & (1U << iter_type));
}
static inline void fsnotify_iter_set_report_type(
- struct fsnotify_iter_info *iter_info, int type)
+ struct fsnotify_iter_info *iter_info, int iter_type)
+{
+ iter_info->report_mask |= (1U << iter_type);
+}
+
+static inline struct fsnotify_mark *fsnotify_iter_mark(
+ struct fsnotify_iter_info *iter_info, int iter_type)
{
- iter_info->report_mask |= (1U << type);
+ if (fsnotify_iter_should_report_type(iter_info, iter_type))
+ return iter_info->marks[iter_type];
+ return NULL;
}
-static inline void fsnotify_iter_set_report_type_mark(
- struct fsnotify_iter_info *iter_info, int type,
- struct fsnotify_mark *mark)
+static inline int fsnotify_iter_step(struct fsnotify_iter_info *iter, int type,
+ struct fsnotify_mark **markp)
{
- iter_info->marks[type] = mark;
- iter_info->report_mask |= (1U << type);
+ while (type < FSNOTIFY_ITER_TYPE_COUNT) {
+ *markp = fsnotify_iter_mark(iter, type);
+ if (*markp)
+ break;
+ type++;
+ }
+ return type;
}
#define FSNOTIFY_ITER_FUNCS(name, NAME) \
static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
struct fsnotify_iter_info *iter_info) \
{ \
- return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \
- iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \
+ return fsnotify_iter_mark(iter_info, FSNOTIFY_ITER_TYPE_##NAME); \
}
FSNOTIFY_ITER_FUNCS(inode, INODE)
+FSNOTIFY_ITER_FUNCS(parent, PARENT)
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
FSNOTIFY_ITER_FUNCS(sb, SB)
-#define fsnotify_foreach_obj_type(type) \
- for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
+#define fsnotify_foreach_iter_type(type) \
+ for (type = 0; type < FSNOTIFY_ITER_TYPE_COUNT; type++)
+#define fsnotify_foreach_iter_mark_type(iter, mark, type) \
+ for (type = 0; \
+ type = fsnotify_iter_step(iter, type, &mark), \
+ type < FSNOTIFY_ITER_TYPE_COUNT; \
+ type++)
/*
* fsnotify_connp_t is what we embed in objects which connector can be attached
@@ -294,6 +473,7 @@ struct fsnotify_mark_connector {
spinlock_t lock;
unsigned short type; /* Type of object [lock] */
#define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01
+#define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02
unsigned short flags; /* flags [lock] */
__kernel_fsid_t fsid; /* fsid of filesystem containing object */
union {
@@ -338,11 +518,18 @@ struct fsnotify_mark {
struct hlist_node obj_list;
/* Head of list of marks for an object [mark ref] */
struct fsnotify_mark_connector *connector;
- /* Events types to ignore [mark->lock, group->mark_mutex] */
- __u32 ignored_mask;
-#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01
-#define FSNOTIFY_MARK_FLAG_ALIVE 0x02
-#define FSNOTIFY_MARK_FLAG_ATTACHED 0x04
+ /* Events types and flags to ignore [mark->lock, group->mark_mutex] */
+ __u32 ignore_mask;
+ /* General fsnotify mark flags */
+#define FSNOTIFY_MARK_FLAG_ALIVE 0x0001
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x0002
+ /* inotify mark flags */
+#define FSNOTIFY_MARK_FLAG_EXCL_UNLINK 0x0010
+#define FSNOTIFY_MARK_FLAG_IN_ONESHOT 0x0020
+ /* fanotify mark flags */
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
+#define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200
+#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400
unsigned int flags; /* flags [mark->lock] */
};
@@ -351,14 +538,29 @@ struct fsnotify_mark {
/* called from the vfs helpers */
/* main fsnotify call to send events */
-extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const struct qstr *name, u32 cookie);
-extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
+extern int fsnotify(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ struct inode *inode, u32 cookie);
+extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ int data_type);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
+static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
+{
+ /* FS_EVENT_ON_CHILD is set on marks that want parent/name info */
+ if (!(mask & FS_EVENT_ON_CHILD))
+ return 0;
+ /*
+ * This object might be watched by a mark that cares about parent/name
+ * info, does it care about the specific set of events that can be
+ * reported with parent/name info?
+ */
+ return mask & FS_EVENTS_POSS_TO_PARENT;
+}
+
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
/* FS_EVENT_ON_CHILD is set if the inode may care */
@@ -393,7 +595,9 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/* called from fsnotify listeners, such as fanotify or dnotify */
/* create a new group */
-extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
+extern struct fsnotify_group *fsnotify_alloc_group(
+ const struct fsnotify_ops *ops,
+ int flags);
/* get reference to a group */
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
@@ -408,17 +612,39 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
extern void fsnotify_destroy_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* attach the event to the group notification queue */
-extern int fsnotify_add_event(struct fsnotify_group *group,
- struct fsnotify_event *event,
- int (*merge)(struct list_head *,
- struct fsnotify_event *));
+extern int fsnotify_insert_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *),
+ void (*insert)(struct fsnotify_group *,
+ struct fsnotify_event *));
+
+static inline int fsnotify_add_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *))
+{
+ return fsnotify_insert_event(group, event, merge, NULL);
+}
+
/* Queue overflow event to a notification group */
static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
{
fsnotify_add_event(group, group->overflow_event, NULL);
}
-/* true if the group notification queue is empty */
+static inline bool fsnotify_is_overflow_event(u32 mask)
+{
+ return mask & FS_Q_OVERFLOW;
+}
+
+static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
+{
+ assert_spin_locked(&group->notification_lock);
+
+ return list_empty(&group->notification_list);
+}
+
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
@@ -430,6 +656,101 @@ extern void fsnotify_remove_queued_event(struct fsnotify_group *group,
/* functions used to manipulate the marks attached to inodes */
+/*
+ * Canonical "ignore mask" including event flags.
+ *
+ * Note the subtle semantic difference from the legacy ->ignored_mask.
+ * ->ignored_mask traditionally only meant which events should be ignored,
+ * while ->ignore_mask also includes flags regarding the type of objects on
+ * which events should be ignored.
+ */
+static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark)
+{
+ __u32 ignore_mask = mark->ignore_mask;
+
+ /* The event flags in ignore mask take effect */
+ if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS)
+ return ignore_mask;
+
+ /*
+ * Legacy behavior:
+ * - Always ignore events on dir
+ * - Ignore events on child if parent is watching children
+ */
+ ignore_mask |= FS_ISDIR;
+ ignore_mask &= ~FS_EVENT_ON_CHILD;
+ ignore_mask |= mark->mask & FS_EVENT_ON_CHILD;
+
+ return ignore_mask;
+}
+
+/* Legacy ignored_mask - only event types to ignore */
+static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark)
+{
+ return mark->ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/*
+ * Check if mask (or ignore mask) should be applied depending if victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir,
+ int iter_type)
+{
+ /* Should mask be applied to a directory? */
+ if (is_dir && !(mask & FS_ISDIR))
+ return false;
+
+ /* Should mask be applied to a child? */
+ if (iter_type == FSNOTIFY_ITER_TYPE_PARENT &&
+ !(mask & FS_EVENT_ON_CHILD))
+ return false;
+
+ return true;
+}
+
+/*
+ * Effective ignore mask taking into account if event victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark,
+ bool is_dir, int iter_type)
+{
+ __u32 ignore_mask = fsnotify_ignored_events(mark);
+
+ if (!ignore_mask)
+ return 0;
+
+ /* For non-dir and non-child, no need to consult the event flags */
+ if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT)
+ return ignore_mask;
+
+ ignore_mask = fsnotify_ignore_mask(mark);
+ if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type))
+ return 0;
+
+ return ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/* Get mask for calculating object interest taking ignore mask into account */
+static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark)
+{
+ __u32 mask = mark->mask;
+
+ if (!fsnotify_ignored_events(mark))
+ return mask;
+
+ /* Interest in FS_MODIFY may be needed for clearing ignore mask */
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ mask |= FS_MODIFY;
+
+ /*
+ * If mark is interested in ignoring events on children, the object must
+ * show interest in those events for fsnotify_parent() to notice it.
+ */
+ return mask | mark->ignore_mask;
+}
+
/* Get mask of events for a list of marks */
extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn);
/* Calculate mask of events for a list of marks */
@@ -444,27 +765,27 @@ extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn,
__kernel_fsid_t *fsid);
/* attach the mark to the object */
extern int fsnotify_add_mark(struct fsnotify_mark *mark,
- fsnotify_connp_t *connp, unsigned int type,
- int allow_dups, __kernel_fsid_t *fsid);
+ fsnotify_connp_t *connp, unsigned int obj_type,
+ int add_flags, __kernel_fsid_t *fsid);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
fsnotify_connp_t *connp,
- unsigned int type, int allow_dups,
+ unsigned int obj_type, int add_flags,
__kernel_fsid_t *fsid);
/* attach the mark to the inode */
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct inode *inode,
- int allow_dups)
+ int add_flags)
{
return fsnotify_add_mark(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, allow_dups, NULL);
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags, NULL);
}
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
struct inode *inode,
- int allow_dups)
+ int add_flags)
{
return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, allow_dups,
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags,
NULL);
}
@@ -477,44 +798,45 @@ extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
/* Wait until all marks queued for destruction are destroyed */
extern void fsnotify_wait_marks_destroyed(void);
-/* run all the marks in a group, and clear all of the marks attached to given object type */
-extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type);
+/* Clear all of the marks of a group attached to a given object type */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
+ unsigned int obj_type);
/* run all the marks in a group, and clear all of the vfsmount marks */
static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
}
/* run all the marks in a group, and clear all of the inode marks */
static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
}
/* run all the marks in a group, and clear all of the sn marks */
static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB);
}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
-static inline void fsnotify_init_event(struct fsnotify_event *event,
- struct inode *inode)
+static inline void fsnotify_init_event(struct fsnotify_event *event)
{
INIT_LIST_HEAD(&event->list);
- event->inode = inode;
}
#else
-static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const struct qstr *name, u32 cookie)
+static inline int fsnotify(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ struct inode *inode, u32 cookie)
{
return 0;
}
-static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
+static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask,
+ const void *data, int data_type)
{
return 0;
}
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index ecc604e61d61..40f14e5fed9d 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -12,8 +12,19 @@
#define _LINUX_FSVERITY_H
#include <linux/fs.h>
+#include <crypto/hash_info.h>
+#include <crypto/sha2.h>
#include <uapi/linux/fsverity.h>
+/*
+ * Largest digest size among all hash algorithms supported by fs-verity.
+ * Currently assumed to be <= size of fsverity_descriptor::root_hash.
+ */
+#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+/* Arbitrary limit to bound the kmalloc() size. Can be changed. */
+#define FS_VERITY_MAX_DESCRIPTOR_SIZE 16384
+
/* Verity operations for filesystems */
struct fsverity_operations {
@@ -115,29 +126,41 @@ struct fsverity_operations {
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
- /* pairs with the cmpxchg() in fsverity_set_info() */
- return READ_ONCE(inode->i_verity_info);
+ /*
+ * Pairs with the cmpxchg_release() in fsverity_set_info().
+ * I.e., another task may publish ->i_verity_info concurrently,
+ * executing a RELEASE barrier. We need to use smp_load_acquire() here
+ * to safely ACQUIRE the memory the other task published.
+ */
+ return smp_load_acquire(&inode->i_verity_info);
}
/* enable.c */
-extern int fsverity_ioctl_enable(struct file *filp, const void __user *arg);
+int fsverity_ioctl_enable(struct file *filp, const void __user *arg);
/* measure.c */
-extern int fsverity_ioctl_measure(struct file *filp, void __user *arg);
+int fsverity_ioctl_measure(struct file *filp, void __user *arg);
+int fsverity_get_digest(struct inode *inode,
+ u8 digest[FS_VERITY_MAX_DIGEST_SIZE],
+ enum hash_algo *alg);
/* open.c */
-extern int fsverity_file_open(struct inode *inode, struct file *filp);
-extern int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
-extern void fsverity_cleanup_inode(struct inode *inode);
+int fsverity_file_open(struct inode *inode, struct file *filp);
+int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
+void fsverity_cleanup_inode(struct inode *inode);
+
+/* read_metadata.c */
+
+int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
/* verify.c */
-extern bool fsverity_verify_page(struct page *page);
-extern void fsverity_verify_bio(struct bio *bio);
-extern void fsverity_enqueue_verify_work(struct work_struct *work);
+bool fsverity_verify_page(struct page *page);
+void fsverity_verify_bio(struct bio *bio);
+void fsverity_enqueue_verify_work(struct work_struct *work);
#else /* !CONFIG_FS_VERITY */
@@ -161,6 +184,13 @@ static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg)
return -EOPNOTSUPP;
}
+static inline int fsverity_get_digest(struct inode *inode,
+ u8 digest[FS_VERITY_MAX_DIGEST_SIZE],
+ enum hash_algo *alg)
+{
+ return -EOPNOTSUPP;
+}
+
/* open.c */
static inline int fsverity_file_open(struct inode *inode, struct file *filp)
@@ -178,6 +208,14 @@ static inline void fsverity_cleanup_inode(struct inode *inode)
{
}
+/* read_metadata.c */
+
+static inline int fsverity_ioctl_read_metadata(struct file *filp,
+ const void __user *uarg)
+{
+ return -EOPNOTSUPP;
+}
+
/* verify.c */
static inline bool fsverity_verify_page(struct page *page)
@@ -200,13 +238,16 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
/**
* fsverity_active() - do reads from the inode need to go through fs-verity?
+ * @inode: inode to check
*
* This checks whether ->i_verity_info has been set.
*
- * Filesystems call this from ->readpages() to check whether the pages need to
+ * Filesystems call this from ->readahead() to check whether the pages need to
* be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
* a race condition where the file is being read concurrently with
* FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.)
+ *
+ * Return: true if reads need to go through fs-verity, otherwise false
*/
static inline bool fsverity_active(const struct inode *inode)
{
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index db95244a62d4..62557d4bffc2 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -7,7 +7,9 @@
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
+#include <linux/trace_recursion.h>
#include <linux/trace_clock.h>
+#include <linux/jump_label.h>
#include <linux/kallsyms.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
@@ -29,16 +31,32 @@
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif
+#ifdef CONFIG_TRACING
+extern void ftrace_boot_snapshot(void);
+#else
+static inline void ftrace_boot_snapshot(void) { }
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+struct ftrace_ops;
+struct ftrace_regs;
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
- * does. Or at least does enough to prevent any unwelcomed side effects.
+ * does. Or at least does enough to prevent any unwelcome side effects.
+ *
+ * Also define the function prototype that these architectures use
+ * to call the ftrace_ops_list_func().
*/
#if !ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
#else
# define FTRACE_FORCE_LIST_FUNC 0
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
#endif
+#endif /* CONFIG_FUNCTION_TRACER */
/* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING
@@ -58,9 +76,6 @@ struct ftrace_direct_func;
const char *
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym);
-int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
- char *type, char *name,
- char *module_name, int *exported);
#else
static inline const char *
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
@@ -68,6 +83,13 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
{
return NULL;
}
+#endif
+
+#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported);
+#else
static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *name,
char *module_name, int *exported)
@@ -76,26 +98,43 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val
}
#endif
-
#ifdef CONFIG_FUNCTION_TRACER
extern int ftrace_enabled;
-extern int
-ftrace_enable_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-struct ftrace_ops;
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
+struct ftrace_regs {
+ struct pt_regs regs;
+};
+#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
+
+/*
+ * ftrace_instruction_pointer_set() is to be defined by the architecture
+ * if to allow setting of the instruction pointer from the ftrace_regs
+ * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports
+ * live kernel patching.
+ */
+#define ftrace_instruction_pointer_set(fregs, ip) do { } while (0)
+#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+
+static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ if (!fregs)
+ return NULL;
+
+ return arch_ftrace_get_regs(fregs);
+}
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
- * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
+ * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
* IPMODIFY are a kind of attribute flags which can be set only before
* registering the ftrace_ops, and can not be modified while registered.
* Changing those attribute flags after registering ftrace_ops will
@@ -118,10 +157,10 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* passing regs to the handler.
* Note, if this flag is set, the SAVE_REGS flag will automatically
* get set upon registering the ftrace_ops, if the arch supports it.
- * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
- * that the call back has its own recursion protection. If it does
- * not set this, then the ftrace infrastructure will add recursion
- * protection for the caller.
+ * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
+ * that the call back needs recursion protection. If it does
+ * not set this, then the ftrace infrastructure will assume
+ * that the callback can handle recursion on its own.
* STUB - The ftrace_ops is just a place holder.
* INITIALIZED - The ftrace_ops has already been initialized (first use time
* register_ftrace_function() is called, it will initialized the ops)
@@ -153,7 +192,7 @@ enum {
FTRACE_OPS_FL_DYNAMIC = BIT(1),
FTRACE_OPS_FL_SAVE_REGS = BIT(2),
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
- FTRACE_OPS_FL_RECURSION_SAFE = BIT(4),
+ FTRACE_OPS_FL_RECURSION = BIT(4),
FTRACE_OPS_FL_STUB = BIT(5),
FTRACE_OPS_FL_INITIALIZED = BIT(6),
FTRACE_OPS_FL_DELETED = BIT(7),
@@ -169,6 +208,43 @@ enum {
FTRACE_OPS_FL_DIRECT = BIT(17),
};
+/*
+ * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
+ * to a ftrace_ops. Note, the requests may fail.
+ *
+ * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the DIRECT ops is being registered.
+ * This is called with both direct_mutex and
+ * ftrace_lock are locked.
+ *
+ * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being registered.
+ * This is called with direct_mutex locked.
+ *
+ * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being unregistered.
+ * This is called with direct_mutex locked.
+ */
+enum ftrace_ops_cmd {
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
+ FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
+};
+
+/*
+ * For most ftrace_ops_cmd,
+ * Returns:
+ * 0 - Success.
+ * Negative on failure. The return value is dependent on the
+ * callback.
+ */
+typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
+
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
@@ -180,7 +256,10 @@ struct ftrace_ops_hash {
void ftrace_free_init_mem(void);
void ftrace_free_mem(struct module *mod, void *start, void *end);
#else
-static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_init_mem(void)
+{
+ ftrace_boot_snapshot();
+}
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
#endif
@@ -207,9 +286,34 @@ struct ftrace_ops {
struct ftrace_ops_hash old_hash;
unsigned long trampoline;
unsigned long trampoline_size;
+ struct list_head list;
+ ftrace_ops_func_t ops_func;
#endif
};
+extern struct ftrace_ops __rcu *ftrace_ops_list;
+extern struct ftrace_ops ftrace_list_end;
+
+/*
+ * Traverse the ftrace_ops_list, invoking all entries. The reason that we
+ * can use rcu_dereference_raw_check() is that elements removed from this list
+ * are simply leaked, so there is no need to interact with a grace-period
+ * mechanism. The rcu_dereference_raw_check() calls are needed to handle
+ * concurrent insertions into the ftrace_ops_list.
+ *
+ * Silly Alpha and silly pointer-speculation compiler optimizations!
+ */
+#define do_for_each_ftrace_op(op, list) \
+ op = rcu_dereference_raw_check(list); \
+ do
+
+/*
+ * Optimized for just a single item in the list (as that is the normal case).
+ */
+#define while_for_each_ftrace_op(op) \
+ while (likely(op = rcu_dereference_raw_check((op)->next)) && \
+ unlikely((op) != &ftrace_list_end))
+
/*
* Type of the current tracing.
*/
@@ -232,8 +336,10 @@ int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
extern void ftrace_stub(unsigned long a0, unsigned long a1,
- struct ftrace_ops *op, struct pt_regs *regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+
+int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
#else /* !CONFIG_FUNCTION_TRACER */
/*
* (un)register_ftrace_function must be a macro since the ops parameter
@@ -244,6 +350,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1,
static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
+static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_FUNCTION_TRACER */
struct ftrace_func_entry {
@@ -265,7 +375,13 @@ int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
unsigned long old_addr,
unsigned long new_addr);
unsigned long ftrace_find_rec_direct(unsigned long ip);
+int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
+int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
+int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
+int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr);
+
#else
+struct ftrace_ops;
# define ftrace_direct_func_count 0
static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
{
@@ -295,6 +411,22 @@ static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
{
return 0;
}
+static inline int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
@@ -319,9 +451,8 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
extern int stack_tracer_enabled;
-int stack_trace_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);
@@ -339,7 +470,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer);
*/
static inline void stack_tracer_disable(void)
{
- /* Preemption or interupts must be disabled */
+ /* Preemption or interrupts must be disabled */
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(disable_stack_tracer);
@@ -364,8 +495,8 @@ static inline void stack_tracer_enable(void) { }
#ifdef CONFIG_DYNAMIC_FTRACE
-int ftrace_arch_code_modify_prepare(void);
-int ftrace_arch_code_modify_post_process(void);
+void ftrace_arch_code_modify_prepare(void);
+void ftrace_arch_code_modify_post_process(void);
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
@@ -407,7 +538,7 @@ bool is_ftrace_trampoline(unsigned long addr);
* DIRECT - there is a direct function to call
*
* When a new ftrace_ops is registered and wants a function to save
- * pt_regs, the rec->flag REGS is set. When the function has been
+ * pt_regs, the rec->flags REGS is set. When the function has been
* set up to save regs, the REG_EN flag is set. Once a function
* starts saving regs it will do so until all ftrace_ops are removed
* from tracing that function.
@@ -425,12 +556,9 @@ enum {
};
#define FTRACE_REF_MAX_SHIFT 23
-#define FTRACE_FL_BITS 9
-#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
-#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
-#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
+#define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
struct dyn_ftrace {
unsigned long ip; /* address of mcount call-site */
@@ -438,9 +566,10 @@ struct dyn_ftrace {
struct dyn_arch_ftrace arch;
};
-int ftrace_force_update(void);
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
+int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ unsigned int cnt, int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
@@ -597,6 +726,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
extern int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr);
+/**
+ * ftrace_need_init_nop - return whether nop call sites should be initialized
+ *
+ * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
+ * need to call ftrace_init_nop() if the code is built with that flag.
+ * Architectures where this is not always the case may define their own
+ * condition.
+ *
+ * Return must be:
+ * 0 if ftrace_init_nop() should be called
+ * Nonzero if ftrace_init_nop() should not be called
+ */
+
+#ifndef ftrace_need_init_nop
+#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
+#endif
/**
* ftrace_init_nop - initialize a nop call site
@@ -693,7 +838,6 @@ extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
@@ -716,6 +860,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
+#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
@@ -734,6 +879,15 @@ static inline bool is_ftrace_trampoline(unsigned long addr)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef ftrace_graph_func
+#define ftrace_graph_func ftrace_stub
+#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
+#else
+#define FTRACE_OPS_GRAPH_STUB 0
+#endif
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
@@ -841,11 +995,11 @@ struct ftrace_graph_ent {
*/
struct ftrace_graph_ret {
unsigned long func; /* Current function */
+ int depth;
/* Number of functions that overran the depth limit for current task */
- unsigned long overrun;
+ unsigned int overrun;
unsigned long long calltime;
unsigned long long rettime;
- int depth;
} __packed;
/* Type of the callback handlers for tracing function graph*/
@@ -911,7 +1065,20 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
extern int register_ftrace_graph(struct fgraph_ops *ops);
extern void unregister_ftrace_graph(struct fgraph_ops *ops);
-extern bool ftrace_graph_is_dead(void);
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from doing any more harm.
+ */
+DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
+
+static inline bool ftrace_graph_is_dead(void)
+{
+ return static_branch_unlikely(&kill_ftrace_graph);
+}
+
extern void ftrace_graph_stop(void);
/* The current handlers in use */
@@ -955,47 +1122,6 @@ static inline void unpause_graph_tracing(void) { }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_TRACING
-
-/* flags for current->trace */
-enum {
- TSK_TRACE_FL_TRACE_BIT = 0,
- TSK_TRACE_FL_GRAPH_BIT = 1,
-};
-enum {
- TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
- TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
-};
-
-static inline void set_tsk_trace_trace(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_trace(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_trace(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_TRACE;
-}
-
-static inline void set_tsk_trace_graph(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_graph(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_graph(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_GRAPH;
-}
-
enum ftrace_dump_mode;
extern enum ftrace_dump_mode ftrace_dump_on_oops;
@@ -1005,8 +1131,7 @@ extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
#else /* CONFIG_TRACING */
static inline void disable_trace_on_warning(void) { }
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index ccda97dc7f8b..f6faa31289ba 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,36 +2,38 @@
#ifndef _LINUX_FTRACE_IRQ_H
#define _LINUX_FTRACE_IRQ_H
-
-#ifdef CONFIG_FTRACE_NMI_ENTER
-extern void arch_ftrace_nmi_enter(void);
-extern void arch_ftrace_nmi_exit(void);
-#else
-static inline void arch_ftrace_nmi_enter(void) { }
-static inline void arch_ftrace_nmi_exit(void) { }
-#endif
-
#ifdef CONFIG_HWLAT_TRACER
extern bool trace_hwlat_callback_enabled;
extern void trace_hwlat_callback(bool enter);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+extern bool trace_osnoise_callback_enabled;
+extern void trace_osnoise_callback(bool enter);
+#endif
+
static inline void ftrace_nmi_enter(void)
{
#ifdef CONFIG_HWLAT_TRACER
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(true);
#endif
- arch_ftrace_nmi_enter();
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(true);
+#endif
}
static inline void ftrace_nmi_exit(void)
{
- arch_ftrace_nmi_exit();
#ifdef CONFIG_HWLAT_TRACER
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(false);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(false);
+#endif
}
#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 8feeb94b8acc..89b9bdfca925 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -10,14 +10,47 @@
#define _LINUX_FWNODE_H_
#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/bits.h>
+#include <linux/err.h>
struct fwnode_operations;
struct device;
+/*
+ * fwnode link flags
+ *
+ * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
+ * NOT_DEVICE: The fwnode will never be populated as a struct device.
+ * INITIALIZED: The hardware corresponding to fwnode has been initialized.
+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
+ * driver needs its child devices to be bound with
+ * their respective drivers as soon as they are
+ * added.
+ * BEST_EFFORT: The fwnode/device needs to probe early and might be missing some
+ * suppliers. Only enforce ordering with suppliers that have
+ * drivers.
+ */
+#define FWNODE_FLAG_LINKS_ADDED BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE BIT(1)
+#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
+#define FWNODE_FLAG_BEST_EFFORT BIT(4)
+
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
struct device *dev;
+ struct list_head suppliers;
+ struct list_head consumers;
+ u8 flags;
+};
+
+struct fwnode_link {
+ struct fwnode_handle *supplier;
+ struct list_head s_hook;
+ struct fwnode_handle *consumer;
+ struct list_head c_hook;
};
/**
@@ -32,6 +65,13 @@ struct fwnode_endpoint {
const struct fwnode_handle *local_fwnode;
};
+/*
+ * ports and endpoints defined as software_nodes should all follow a common
+ * naming scheme; use these macros to ensure commonality.
+ */
+#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u"
+#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
+
#define NR_FWNODE_REFERENCE_ARGS 8
/**
@@ -68,44 +108,8 @@ struct fwnode_reference_args {
* endpoint node.
* @graph_get_port_parent: Return the parent node of a port node.
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
- * @add_links: Called after the device corresponding to the fwnode is added
- * using device_add(). The function is expected to create device
- * links to all the suppliers of the device that are available at
- * the time this function is called. The function must NOT stop
- * at the first failed device link if other unlinked supplier
- * devices are present in the system. This is necessary for the
- * driver/bus sync_state() callbacks to work correctly.
- *
- * For example, say Device-C depends on suppliers Device-S1 and
- * Device-S2 and the dependency is listed in that order in the
- * firmware. Say, S1 gets populated from the firmware after
- * late_initcall_sync(). Say S2 is populated and probed way
- * before that in device_initcall(). When C is populated, if this
- * add_links() function doesn't continue past a "failed linking to
- * S1" and continue linking C to S2, then S2 will get a
- * sync_state() callback before C is probed. This is because from
- * the perspective of S2, C was never a consumer when its
- * sync_state() evaluation is done. To avoid this, the add_links()
- * function has to go through all available suppliers of the
- * device (that corresponds to this fwnode) and link to them
- * before returning.
- *
- * If some suppliers are not yet available (indicated by an error
- * return value), this function will be called again when other
- * devices are added to allow creating device links to any newly
- * available suppliers.
- *
- * Return 0 if device links have been successfully created to all
- * the known suppliers of this device or if the supplier
- * information is not known.
- *
- * Return -ENODEV if the suppliers needed for probing this device
- * have not been registered yet (because device links can only be
- * created to devices registered with the driver core).
- *
- * Return -EAGAIN if some of the suppliers of this device have not
- * been registered yet, but none of those suppliers are necessary
- * for probing the device.
+ * @add_links: Create fwnode links to all the suppliers of the fwnode. Return
+ * zero on success, a negative error code otherwise.
*/
struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
@@ -113,6 +117,9 @@ struct fwnode_operations {
bool (*device_is_available)(const struct fwnode_handle *fwnode);
const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
const struct device *dev);
+ bool (*device_dma_supported)(const struct fwnode_handle *fwnode);
+ enum dev_dma_attr
+ (*device_get_dma_attr)(const struct fwnode_handle *fwnode);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
@@ -145,16 +152,17 @@ struct fwnode_operations {
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
- int (*add_links)(const struct fwnode_handle *fwnode,
- struct device *dev);
+ void __iomem *(*iomap)(struct fwnode_handle *fwnode, int index);
+ int (*irq_get)(const struct fwnode_handle *fwnode, unsigned int index);
+ int (*add_links)(struct fwnode_handle *fwnode);
};
-#define fwnode_has_op(fwnode, op) \
- ((fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+#define fwnode_has_op(fwnode, op) \
+ (!IS_ERR_OR_NULL(fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+
#define fwnode_call_int_op(fwnode, op, ...) \
- (fwnode ? (fwnode_has_op(fwnode, op) ? \
- (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \
- -EINVAL)
+ (fwnode_has_op(fwnode, op) ? \
+ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : (IS_ERR_OR_NULL(fwnode) ? -EINVAL : -ENXIO))
#define fwnode_call_bool_op(fwnode, op, ...) \
(fwnode_has_op(fwnode, op) ? \
@@ -170,4 +178,30 @@ struct fwnode_operations {
} while (false)
#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
+static inline void fwnode_init(struct fwnode_handle *fwnode,
+ const struct fwnode_operations *ops)
+{
+ fwnode->ops = ops;
+ INIT_LIST_HEAD(&fwnode->consumers);
+ INIT_LIST_HEAD(&fwnode->suppliers);
+}
+
+static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
+ bool initialized)
+{
+ if (IS_ERR_OR_NULL(fwnode))
+ return;
+
+ if (initialized)
+ fwnode->flags |= FWNODE_FLAG_INITIALIZED;
+ else
+ fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
+}
+
+extern u32 fw_devlink_get_flags(void);
+extern bool fw_devlink_is_strict(void);
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
+void fwnode_links_purge(struct fwnode_handle *fwnode);
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode);
+
#endif
diff --git a/include/linux/fwnode_mdio.h b/include/linux/fwnode_mdio.h
new file mode 100644
index 000000000000..faf603c48c86
--- /dev/null
+++ b/include/linux/fwnode_mdio.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * FWNODE helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_FWNODE_MDIO_H
+#define __LINUX_FWNODE_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_FWNODE_MDIO)
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr);
+
+int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child, u32 addr);
+
+#else /* CONFIG_FWNODE_MDIO */
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr)
+{
+ return -EINVAL;
+}
+
+static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child,
+ u32 addr)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_FWNODE_MDIO_H */
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 69081d899492..8c2f00018e89 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -110,7 +110,7 @@ static inline void gameport_free_port(struct gameport *gameport)
static inline void gameport_set_name(struct gameport *gameport, const char *name)
{
- strlcpy(gameport->name, name, sizeof(gameport->name));
+ strscpy(gameport->name, name, sizeof(gameport->name));
}
/*
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 5b14a0f38124..0bd581003cd5 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -76,7 +76,7 @@ struct gen_pool_chunk {
void *owner; /* private data to retrieve at alloc time */
unsigned long start_addr; /* start address of memory chunk */
unsigned long end_addr; /* end address of memory chunk (inclusive) */
- unsigned long bits[0]; /* bitmap for allocating memory chunk */
+ unsigned long bits[]; /* bitmap for allocating memory chunk */
};
/*
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index 02393c0c98f9..107613f7d792 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -38,13 +38,14 @@
#include <asm/page.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/types.h>
struct genradix_root;
struct __genradix {
- struct genradix_root __rcu *root;
+ struct genradix_root *root;
};
/*
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index bc738504ab4a..c285968e437a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -8,34 +8,11 @@
/* All generic netlink requests are serialized by a global lock. */
extern void genl_lock(void);
extern void genl_unlock(void);
-#ifdef CONFIG_LOCKDEP
-extern bool lockdep_genl_is_held(void);
-#endif
/* for synchronisation between af_netlink and genetlink */
extern atomic_t genl_sk_destructing_cnt;
extern wait_queue_head_t genl_sk_destructing_waitq;
-/**
- * rcu_dereference_genl - rcu_dereference with debug checking
- * @p: The pointer to read, prior to dereferencing
- *
- * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
- * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
- */
-#define rcu_dereference_genl(p) \
- rcu_dereference_check(p, lockdep_genl_is_held())
-
-/**
- * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
- * @p: The pointer to read, prior to dereferencing
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds genl mutex.
- */
-#define genl_dereference(p) \
- rcu_dereference_protected(p, lockdep_genl_is_held())
-
#define MODULE_ALIAS_GENL_FAMILY(family)\
MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
deleted file mode 100644
index 07dc91835b98..000000000000
--- a/include/linux/genhd.h
+++ /dev/null
@@ -1,786 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_GENHD_H
-#define _LINUX_GENHD_H
-
-/*
- * genhd.h Copyright (C) 1992 Drew Eckhardt
- * Generic hard disk header file by
- * Drew Eckhardt
- *
- * <drew@colorado.edu>
- */
-
-#include <linux/types.h>
-#include <linux/kdev_t.h>
-#include <linux/rcupdate.h>
-#include <linux/slab.h>
-#include <linux/percpu-refcount.h>
-#include <linux/uuid.h>
-#include <linux/blk_types.h>
-#include <asm/local.h>
-
-#ifdef CONFIG_BLOCK
-
-#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
-#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
-#define disk_to_dev(disk) (&(disk)->part0.__dev)
-#define part_to_dev(part) (&((part)->__dev))
-
-extern struct device_type part_type;
-extern struct kobject *block_depr;
-extern struct class block_class;
-
-enum {
-/* These three have identical behaviour; use the second one if DOS FDISK gets
- confused about extended/logical partitions starting past cylinder 1023. */
- DOS_EXTENDED_PARTITION = 5,
- LINUX_EXTENDED_PARTITION = 0x85,
- WIN98_EXTENDED_PARTITION = 0x0f,
-
- SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION,
-
- LINUX_SWAP_PARTITION = 0x82,
- LINUX_DATA_PARTITION = 0x83,
- LINUX_LVM_PARTITION = 0x8e,
- LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */
-
- SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION,
- NEW_SOLARIS_X86_PARTITION = 0xbf,
-
- DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */
- DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */
- DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */
- EZD_PARTITION = 0x55, /* EZ-DRIVE */
-
- FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */
- OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */
- NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */
- BSDI_PARTITION = 0xb7, /* BSDI Partition ID */
- MINIX_PARTITION = 0x81, /* Minix Partition ID */
- UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
-};
-
-#define DISK_MAX_PARTS 256
-#define DISK_NAME_LEN 32
-
-#include <linux/major.h>
-#include <linux/device.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/workqueue.h>
-
-struct partition {
- unsigned char boot_ind; /* 0x80 - active */
- unsigned char head; /* starting head */
- unsigned char sector; /* starting sector */
- unsigned char cyl; /* starting cylinder */
- unsigned char sys_ind; /* What partition type */
- unsigned char end_head; /* end head */
- unsigned char end_sector; /* end sector */
- unsigned char end_cyl; /* end cylinder */
- __le32 start_sect; /* starting sector counting from 0 */
- __le32 nr_sects; /* nr of sectors in partition */
-} __attribute__((packed));
-
-struct disk_stats {
- u64 nsecs[NR_STAT_GROUPS];
- unsigned long sectors[NR_STAT_GROUPS];
- unsigned long ios[NR_STAT_GROUPS];
- unsigned long merges[NR_STAT_GROUPS];
- unsigned long io_ticks;
- unsigned long time_in_queue;
- local_t in_flight[2];
-};
-
-#define PARTITION_META_INFO_VOLNAMELTH 64
-/*
- * Enough for the string representation of any kind of UUID plus NULL.
- * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
- */
-#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
-
-struct partition_meta_info {
- char uuid[PARTITION_META_INFO_UUIDLTH];
- u8 volname[PARTITION_META_INFO_VOLNAMELTH];
-};
-
-struct hd_struct {
- sector_t start_sect;
- /*
- * nr_sects is protected by sequence counter. One might extend a
- * partition while IO is happening to it and update of nr_sects
- * can be non-atomic on 32bit machines with 64bit sector_t.
- */
- sector_t nr_sects;
- seqcount_t nr_sects_seq;
- sector_t alignment_offset;
- unsigned int discard_alignment;
- struct device __dev;
- struct kobject *holder_dir;
- int policy, partno;
- struct partition_meta_info *info;
-#ifdef CONFIG_FAIL_MAKE_REQUEST
- int make_it_fail;
-#endif
- unsigned long stamp;
-#ifdef CONFIG_SMP
- struct disk_stats __percpu *dkstats;
-#else
- struct disk_stats dkstats;
-#endif
- struct percpu_ref ref;
- struct rcu_work rcu_work;
-};
-
-#define GENHD_FL_REMOVABLE 1
-/* 2 is unused */
-#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4
-#define GENHD_FL_CD 8
-#define GENHD_FL_UP 16
-#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
-#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
-#define GENHD_FL_NATIVE_CAPACITY 128
-#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
-#define GENHD_FL_NO_PART_SCAN 512
-#define GENHD_FL_HIDDEN 1024
-
-enum {
- DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
- DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
-};
-
-enum {
- /* Poll even if events_poll_msecs is unset */
- DISK_EVENT_FLAG_POLL = 1 << 0,
- /* Forward events to udev */
- DISK_EVENT_FLAG_UEVENT = 1 << 1,
-};
-
-struct disk_part_tbl {
- struct rcu_head rcu_head;
- int len;
- struct hd_struct __rcu *last_lookup;
- struct hd_struct __rcu *part[];
-};
-
-struct disk_events;
-struct badblocks;
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-struct blk_integrity {
- const struct blk_integrity_profile *profile;
- unsigned char flags;
- unsigned char tuple_size;
- unsigned char interval_exp;
- unsigned char tag_size;
-};
-
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
-struct gendisk {
- /* major, first_minor and minors are input parameters only,
- * don't use directly. Use disk_devt() and disk_max_parts().
- */
- int major; /* major number of driver */
- int first_minor;
- int minors; /* maximum number of minors, =1 for
- * disks that can't be partitioned. */
-
- char disk_name[DISK_NAME_LEN]; /* name of major driver */
- char *(*devnode)(struct gendisk *gd, umode_t *mode);
-
- unsigned short events; /* supported events */
- unsigned short event_flags; /* flags related to event processing */
-
- /* Array of pointers to partitions indexed by partno.
- * Protected with matching bdev lock but stat and other
- * non-critical accesses use RCU. Always access through
- * helpers.
- */
- struct disk_part_tbl __rcu *part_tbl;
- struct hd_struct part0;
-
- const struct block_device_operations *fops;
- struct request_queue *queue;
- void *private_data;
-
- int flags;
- struct rw_semaphore lookup_sem;
- struct kobject *slave_dir;
-
- struct timer_rand_state *random;
- atomic_t sync_io; /* RAID */
- struct disk_events *ev;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct kobject integrity_kobj;
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
- int node_id;
- struct badblocks *bb;
- struct lockdep_map lockdep_map;
-};
-
-static inline struct gendisk *part_to_disk(struct hd_struct *part)
-{
- if (likely(part)) {
- if (part->partno)
- return dev_to_disk(part_to_dev(part)->parent);
- else
- return dev_to_disk(part_to_dev(part));
- }
- return NULL;
-}
-
-static inline int disk_max_parts(struct gendisk *disk)
-{
- if (disk->flags & GENHD_FL_EXT_DEVT)
- return DISK_MAX_PARTS;
- return disk->minors;
-}
-
-static inline bool disk_part_scan_enabled(struct gendisk *disk)
-{
- return disk_max_parts(disk) > 1 &&
- !(disk->flags & GENHD_FL_NO_PART_SCAN);
-}
-
-static inline dev_t disk_devt(struct gendisk *disk)
-{
- return MKDEV(disk->major, disk->first_minor);
-}
-
-static inline dev_t part_devt(struct hd_struct *part)
-{
- return part_to_dev(part)->devt;
-}
-
-extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
-extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
-
-static inline void disk_put_part(struct hd_struct *part)
-{
- if (likely(part))
- put_device(part_to_dev(part));
-}
-
-/*
- * Smarter partition iterator without context limits.
- */
-#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
-#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
-#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
-#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
-
-struct disk_part_iter {
- struct gendisk *disk;
- struct hd_struct *part;
- int idx;
- unsigned int flags;
-};
-
-extern void disk_part_iter_init(struct disk_part_iter *piter,
- struct gendisk *disk, unsigned int flags);
-extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
-extern void disk_part_iter_exit(struct disk_part_iter *piter);
-
-extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
- sector_t sector);
-bool disk_has_partitions(struct gendisk *disk);
-
-/*
- * Macros to operate on percpu disk statistics:
- *
- * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
- * and should be called between disk_stat_lock() and
- * disk_stat_unlock().
- *
- * part_stat_read() can be called at any time.
- *
- * part_stat_{add|set_all}() and {init|free}_part_stats are for
- * internal use only.
- */
-#ifdef CONFIG_SMP
-#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
-#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
-
-#define part_stat_get_cpu(part, field, cpu) \
- (per_cpu_ptr((part)->dkstats, (cpu))->field)
-
-#define part_stat_get(part, field) \
- part_stat_get_cpu(part, field, smp_processor_id())
-
-#define part_stat_read(part, field) \
-({ \
- typeof((part)->dkstats->field) res = 0; \
- unsigned int _cpu; \
- for_each_possible_cpu(_cpu) \
- res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
- res; \
-})
-
-static inline void part_stat_set_all(struct hd_struct *part, int value)
-{
- int i;
-
- for_each_possible_cpu(i)
- memset(per_cpu_ptr(part->dkstats, i), value,
- sizeof(struct disk_stats));
-}
-
-static inline int init_part_stats(struct hd_struct *part)
-{
- part->dkstats = alloc_percpu(struct disk_stats);
- if (!part->dkstats)
- return 0;
- return 1;
-}
-
-static inline void free_part_stats(struct hd_struct *part)
-{
- free_percpu(part->dkstats);
-}
-
-#else /* !CONFIG_SMP */
-#define part_stat_lock() ({ rcu_read_lock(); 0; })
-#define part_stat_unlock() rcu_read_unlock()
-
-#define part_stat_get(part, field) ((part)->dkstats.field)
-#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field)
-#define part_stat_read(part, field) part_stat_get(part, field)
-
-static inline void part_stat_set_all(struct hd_struct *part, int value)
-{
- memset(&part->dkstats, value, sizeof(struct disk_stats));
-}
-
-static inline int init_part_stats(struct hd_struct *part)
-{
- return 1;
-}
-
-static inline void free_part_stats(struct hd_struct *part)
-{
-}
-
-#endif /* CONFIG_SMP */
-
-#define part_stat_read_msecs(part, which) \
- div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC)
-
-#define part_stat_read_accum(part, field) \
- (part_stat_read(part, field[STAT_READ]) + \
- part_stat_read(part, field[STAT_WRITE]) + \
- part_stat_read(part, field[STAT_DISCARD]))
-
-#define __part_stat_add(part, field, addnd) \
- (part_stat_get(part, field) += (addnd))
-
-#define part_stat_add(part, field, addnd) do { \
- __part_stat_add((part), field, addnd); \
- if ((part)->partno) \
- __part_stat_add(&part_to_disk((part))->part0, \
- field, addnd); \
-} while (0)
-
-#define part_stat_dec(gendiskp, field) \
- part_stat_add(gendiskp, field, -1)
-#define part_stat_inc(gendiskp, field) \
- part_stat_add(gendiskp, field, 1)
-#define part_stat_sub(gendiskp, field, subnd) \
- part_stat_add(gendiskp, field, -subnd)
-
-#define part_stat_local_dec(gendiskp, field) \
- local_dec(&(part_stat_get(gendiskp, field)))
-#define part_stat_local_inc(gendiskp, field) \
- local_inc(&(part_stat_get(gendiskp, field)))
-#define part_stat_local_read(gendiskp, field) \
- local_read(&(part_stat_get(gendiskp, field)))
-#define part_stat_local_read_cpu(gendiskp, field, cpu) \
- local_read(&(part_stat_get_cpu(gendiskp, field, cpu)))
-
-unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part);
-void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2]);
-void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
- int rw);
-void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
- int rw);
-
-static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)
-{
- if (disk)
- return kzalloc_node(sizeof(struct partition_meta_info),
- GFP_KERNEL, disk->node_id);
- return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL);
-}
-
-static inline void free_part_info(struct hd_struct *part)
-{
- kfree(part->info);
-}
-
-void update_io_ticks(struct hd_struct *part, unsigned long now);
-
-/* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk,
- const struct attribute_group **groups);
-static inline void add_disk(struct gendisk *disk)
-{
- device_add_disk(NULL, disk, NULL);
-}
-extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
-static inline void add_disk_no_queue_reg(struct gendisk *disk)
-{
- device_add_disk_no_queue_reg(NULL, disk);
-}
-
-extern void del_gendisk(struct gendisk *gp);
-extern struct gendisk *get_gendisk(dev_t dev, int *partno);
-extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
-
-extern void set_device_ro(struct block_device *bdev, int flag);
-extern void set_disk_ro(struct gendisk *disk, int flag);
-
-static inline int get_disk_ro(struct gendisk *disk)
-{
- return disk->part0.policy;
-}
-
-extern void disk_block_events(struct gendisk *disk);
-extern void disk_unblock_events(struct gendisk *disk);
-extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
-extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
-
-/* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
-extern void rand_initialize_disk(struct gendisk *disk);
-
-static inline sector_t get_start_sect(struct block_device *bdev)
-{
- return bdev->bd_part->start_sect;
-}
-static inline sector_t get_capacity(struct gendisk *disk)
-{
- return disk->part0.nr_sects;
-}
-static inline void set_capacity(struct gendisk *disk, sector_t size)
-{
- disk->part0.nr_sects = size;
-}
-
-#ifdef CONFIG_SOLARIS_X86_PARTITION
-
-#define SOLARIS_X86_NUMSLICE 16
-#define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL)
-
-struct solaris_x86_slice {
- __le16 s_tag; /* ID tag of partition */
- __le16 s_flag; /* permission flags */
- __le32 s_start; /* start sector no of partition */
- __le32 s_size; /* # of blocks in partition */
-};
-
-struct solaris_x86_vtoc {
- unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */
- __le32 v_sanity; /* to verify vtoc sanity */
- __le32 v_version; /* layout version */
- char v_volume[8]; /* volume name */
- __le16 v_sectorsz; /* sector size in bytes */
- __le16 v_nparts; /* number of partitions */
- unsigned int v_reserved[10]; /* free space */
- struct solaris_x86_slice
- v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */
- unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */
- char v_asciilabel[128]; /* for compatibility */
-};
-
-#endif /* CONFIG_SOLARIS_X86_PARTITION */
-
-#ifdef CONFIG_BSD_DISKLABEL
-/*
- * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
- * updated by Marc Espie <Marc.Espie@openbsd.org>
- */
-
-/* check against BSD src/sys/sys/disklabel.h for consistency */
-
-#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
-#define BSD_MAXPARTITIONS 16
-#define OPENBSD_MAXPARTITIONS 16
-#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
-struct bsd_disklabel {
- __le32 d_magic; /* the magic number */
- __s16 d_type; /* drive type */
- __s16 d_subtype; /* controller/d_type specific */
- char d_typename[16]; /* type name, e.g. "eagle" */
- char d_packname[16]; /* pack identifier */
- __u32 d_secsize; /* # of bytes per sector */
- __u32 d_nsectors; /* # of data sectors per track */
- __u32 d_ntracks; /* # of tracks per cylinder */
- __u32 d_ncylinders; /* # of data cylinders per unit */
- __u32 d_secpercyl; /* # of data sectors per cylinder */
- __u32 d_secperunit; /* # of data sectors per unit */
- __u16 d_sparespertrack; /* # of spare sectors per track */
- __u16 d_sparespercyl; /* # of spare sectors per cylinder */
- __u32 d_acylinders; /* # of alt. cylinders per unit */
- __u16 d_rpm; /* rotational speed */
- __u16 d_interleave; /* hardware sector interleave */
- __u16 d_trackskew; /* sector 0 skew, per track */
- __u16 d_cylskew; /* sector 0 skew, per cylinder */
- __u32 d_headswitch; /* head switch time, usec */
- __u32 d_trkseek; /* track-to-track seek, usec */
- __u32 d_flags; /* generic flags */
-#define NDDATA 5
- __u32 d_drivedata[NDDATA]; /* drive-type specific information */
-#define NSPARE 5
- __u32 d_spare[NSPARE]; /* reserved for future use */
- __le32 d_magic2; /* the magic number (again) */
- __le16 d_checksum; /* xor of data incl. partitions */
-
- /* filesystem and partition information: */
- __le16 d_npartitions; /* number of partitions in following */
- __le32 d_bbsize; /* size of boot area at sn0, bytes */
- __le32 d_sbsize; /* max size of fs superblock, bytes */
- struct bsd_partition { /* the partition table */
- __le32 p_size; /* number of sectors in partition */
- __le32 p_offset; /* starting sector */
- __le32 p_fsize; /* filesystem basic fragment size */
- __u8 p_fstype; /* filesystem type, see below */
- __u8 p_frag; /* filesystem fragments per block */
- __le16 p_cpg; /* filesystem cylinders per group */
- } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
-};
-
-#endif /* CONFIG_BSD_DISKLABEL */
-
-#ifdef CONFIG_UNIXWARE_DISKLABEL
-/*
- * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
- * and Krzysztof G. Baranowski <kgb@knm.org.pl>
- */
-
-#define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */
-#define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */
-#define UNIXWARE_NUMSLICE 16
-#define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */
-
-struct unixware_slice {
- __le16 s_label; /* label */
- __le16 s_flags; /* permission flags */
- __le32 start_sect; /* starting sector */
- __le32 nr_sects; /* number of sectors in slice */
-};
-
-struct unixware_disklabel {
- __le32 d_type; /* drive type */
- __le32 d_magic; /* the magic number */
- __le32 d_version; /* version number */
- char d_serial[12]; /* serial number of the device */
- __le32 d_ncylinders; /* # of data cylinders per device */
- __le32 d_ntracks; /* # of tracks per cylinder */
- __le32 d_nsectors; /* # of data sectors per track */
- __le32 d_secsize; /* # of bytes per sector */
- __le32 d_part_start; /* # of first sector of this partition */
- __le32 d_unknown1[12]; /* ? */
- __le32 d_alt_tbl; /* byte offset of alternate table */
- __le32 d_alt_len; /* byte length of alternate table */
- __le32 d_phys_cyl; /* # of physical cylinders per device */
- __le32 d_phys_trk; /* # of physical tracks per cylinder */
- __le32 d_phys_sec; /* # of physical sectors per track */
- __le32 d_phys_bytes; /* # of physical bytes per sector */
- __le32 d_unknown2; /* ? */
- __le32 d_unknown3; /* ? */
- __le32 d_pad[8]; /* pad */
-
- struct unixware_vtoc {
- __le32 v_magic; /* the magic number */
- __le32 v_version; /* version number */
- char v_name[8]; /* volume name */
- __le16 v_nslices; /* # of slices */
- __le16 v_unknown1; /* ? */
- __le32 v_reserved[10]; /* reserved */
- struct unixware_slice
- v_slice[UNIXWARE_NUMSLICE]; /* slice headers */
- } vtoc;
-
-}; /* 408 */
-
-#endif /* CONFIG_UNIXWARE_DISKLABEL */
-
-#ifdef CONFIG_MINIX_SUBPARTITION
-# define MINIX_NR_SUBPARTITIONS 4
-#endif /* CONFIG_MINIX_SUBPARTITION */
-
-#define ADDPART_FLAG_NONE 0
-#define ADDPART_FLAG_RAID 1
-#define ADDPART_FLAG_WHOLEDISK 2
-
-extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
-extern void blk_free_devt(dev_t devt);
-extern void blk_invalidate_devt(dev_t devt);
-extern dev_t blk_lookup_devt(const char *name, int partno);
-extern char *disk_name (struct gendisk *hd, int partno, char *buf);
-
-int bdev_disk_changed(struct block_device *bdev, bool invalidate);
-int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
-int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev);
-extern int disk_expand_part_tbl(struct gendisk *disk, int target);
-extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
- int partno, sector_t start,
- sector_t len, int flags,
- struct partition_meta_info
- *info);
-extern void __delete_partition(struct percpu_ref *);
-extern void delete_partition(struct gendisk *, int);
-extern void printk_all_partitions(void);
-
-extern struct gendisk *__alloc_disk_node(int minors, int node_id);
-extern struct kobject *get_disk_and_module(struct gendisk *disk);
-extern void put_disk(struct gendisk *disk);
-extern void put_disk_and_module(struct gendisk *disk);
-extern void blk_register_region(dev_t devt, unsigned long range,
- struct module *module,
- struct kobject *(*probe)(dev_t, int *, void *),
- int (*lock)(dev_t, void *),
- void *data);
-extern void blk_unregister_region(dev_t devt, unsigned long range);
-
-extern ssize_t part_size_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_stat_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_inflight_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-#ifdef CONFIG_FAIL_MAKE_REQUEST
-extern ssize_t part_fail_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_fail_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-#endif /* CONFIG_FAIL_MAKE_REQUEST */
-
-#define alloc_disk_node(minors, node_id) \
-({ \
- static struct lock_class_key __key; \
- const char *__name; \
- struct gendisk *__disk; \
- \
- __name = "(gendisk_completion)"#minors"("#node_id")"; \
- \
- __disk = __alloc_disk_node(minors, node_id); \
- \
- if (__disk) \
- lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
- \
- __disk; \
-})
-
-#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
-
-static inline int hd_ref_init(struct hd_struct *part)
-{
- if (percpu_ref_init(&part->ref, __delete_partition, 0,
- GFP_KERNEL))
- return -ENOMEM;
- return 0;
-}
-
-static inline void hd_struct_get(struct hd_struct *part)
-{
- percpu_ref_get(&part->ref);
-}
-
-static inline int hd_struct_try_get(struct hd_struct *part)
-{
- return percpu_ref_tryget_live(&part->ref);
-}
-
-static inline void hd_struct_put(struct hd_struct *part)
-{
- percpu_ref_put(&part->ref);
-}
-
-static inline void hd_struct_kill(struct hd_struct *part)
-{
- percpu_ref_kill(&part->ref);
-}
-
-static inline void hd_free_part(struct hd_struct *part)
-{
- free_part_stats(part);
- free_part_info(part);
- percpu_ref_exit(&part->ref);
-}
-
-/*
- * Any access of part->nr_sects which is not protected by partition
- * bd_mutex or gendisk bdev bd_mutex, should be done using this
- * accessor function.
- *
- * Code written along the lines of i_size_read() and i_size_write().
- * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
- * on.
- */
-static inline sector_t part_nr_sects_read(struct hd_struct *part)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- sector_t nr_sects;
- unsigned seq;
- do {
- seq = read_seqcount_begin(&part->nr_sects_seq);
- nr_sects = part->nr_sects;
- } while (read_seqcount_retry(&part->nr_sects_seq, seq));
- return nr_sects;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
- sector_t nr_sects;
-
- preempt_disable();
- nr_sects = part->nr_sects;
- preempt_enable();
- return nr_sects;
-#else
- return part->nr_sects;
-#endif
-}
-
-/*
- * Should be called with mutex lock held (typically bd_mutex) of partition
- * to provide mutual exlusion among writers otherwise seqcount might be
- * left in wrong state leaving the readers spinning infinitely.
- */
-static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- write_seqcount_begin(&part->nr_sects_seq);
- part->nr_sects = size;
- write_seqcount_end(&part->nr_sects_seq);
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
- preempt_disable();
- part->nr_sects = size;
- preempt_enable();
-#else
- part->nr_sects = size;
-#endif
-}
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-extern void blk_integrity_add(struct gendisk *);
-extern void blk_integrity_del(struct gendisk *);
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static inline void blk_integrity_add(struct gendisk *disk) { }
-static inline void blk_integrity_del(struct gendisk *disk) { }
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
-#else /* CONFIG_BLOCK */
-
-static inline void printk_all_partitions(void) { }
-
-static inline dev_t blk_lookup_devt(const char *name, int partno)
-{
- dev_t devt = MKDEV(0, 0);
- return devt;
-}
-#endif /* CONFIG_BLOCK */
-
-#endif /* _LINUX_GENHD_H */
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 6cb82301d8e9..4a4b387181ad 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -294,6 +294,7 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
+ .resv_start_op = 42, /* drbd is currently the only user */
.n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
.module = THIS_MODULE,
};
@@ -404,4 +405,3 @@ s_fields \
/* }}}1 */
#endif /* GENL_MAGIC_FUNC_H */
-/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index eeae59d3ceb7..f81d48987528 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -89,7 +89,7 @@ static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
nla_get_u64, nla_put_u64_0pad, false)
#define __str_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
- nla_strlcpy, nla_put, false)
+ nla_strscpy, nla_put, false)
#define __bin_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \
nla_memcpy, nla_put, false)
@@ -283,4 +283,3 @@ enum { \
/* }}}1 */
#endif /* GENL_MAGIC_STRUCT_H */
-/* vim: set foldmethod=marker nofoldenable : */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index e5b817cb86e7..ef4aea3b356e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -2,320 +2,31 @@
#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
-#include <linux/mmdebug.h>
+#include <linux/gfp_types.h>
+
#include <linux/mmzone.h>
-#include <linux/stddef.h>
-#include <linux/linkage.h>
#include <linux/topology.h>
struct vm_area_struct;
-/*
- * In case of changes, please don't forget to update
- * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
- */
-
-/* Plain integer GFP bitmasks. Do not use this directly. */
-#define ___GFP_DMA 0x01u
-#define ___GFP_HIGHMEM 0x02u
-#define ___GFP_DMA32 0x04u
-#define ___GFP_MOVABLE 0x08u
-#define ___GFP_RECLAIMABLE 0x10u
-#define ___GFP_HIGH 0x20u
-#define ___GFP_IO 0x40u
-#define ___GFP_FS 0x80u
-#define ___GFP_ZERO 0x100u
-#define ___GFP_ATOMIC 0x200u
-#define ___GFP_DIRECT_RECLAIM 0x400u
-#define ___GFP_KSWAPD_RECLAIM 0x800u
-#define ___GFP_WRITE 0x1000u
-#define ___GFP_NOWARN 0x2000u
-#define ___GFP_RETRY_MAYFAIL 0x4000u
-#define ___GFP_NOFAIL 0x8000u
-#define ___GFP_NORETRY 0x10000u
-#define ___GFP_MEMALLOC 0x20000u
-#define ___GFP_COMP 0x40000u
-#define ___GFP_NOMEMALLOC 0x80000u
-#define ___GFP_HARDWALL 0x100000u
-#define ___GFP_THISNODE 0x200000u
-#define ___GFP_ACCOUNT 0x400000u
-#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x800000u
-#else
-#define ___GFP_NOLOCKDEP 0
-#endif
-/* If the above are modified, __GFP_BITS_SHIFT may need updating */
-
-/*
- * Physical address zone modifiers (see linux/mmzone.h - low four bits)
- *
- * Do not put any conditional on these. If necessary modify the definitions
- * without the underscores and use them consistently. The definitions here may
- * be used in bit comparisons.
- */
-#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
-#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
-#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
-#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
-/**
- * DOC: Page mobility and placement hints
- *
- * Page mobility and placement hints
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * These flags provide hints about how mobile the page is. Pages with similar
- * mobility are placed within the same pageblocks to minimise problems due
- * to external fragmentation.
- *
- * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
- * moved by page migration during memory compaction or can be reclaimed.
- *
- * %__GFP_RECLAIMABLE is used for slab allocations that specify
- * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
- *
- * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
- * these pages will be spread between local zones to avoid all the dirty
- * pages being in one zone (fair zone allocation policy).
- *
- * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
- *
- * %__GFP_THISNODE forces the allocation to be satisfied from the requested
- * node with no fallbacks or placement policy enforcements.
- *
- * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
- */
-#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
-#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
-#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
-#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
-#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
-
-/**
- * DOC: Watermark modifiers
- *
- * Watermark modifiers -- controls access to emergency reserves
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * %__GFP_HIGH indicates that the caller is high-priority and that granting
- * the request is necessary before the system can make forward progress.
- * For example, creating an IO context to clean pages.
- *
- * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
- * high priority. Users are typically interrupt handlers. This may be
- * used in conjunction with %__GFP_HIGH
- *
- * %__GFP_MEMALLOC allows access to all memory. This should only be used when
- * the caller guarantees the allocation will allow more memory to be freed
- * very shortly e.g. process exiting or swapping. Users either should
- * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
- *
- * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
- * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
- */
-#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
-#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
-#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
-
-/**
- * DOC: Reclaim modifiers
- *
- * Reclaim modifiers
- * ~~~~~~~~~~~~~~~~~
- *
- * %__GFP_IO can start physical IO.
- *
- * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
- * allocator recursing into the filesystem which might already be holding
- * locks.
- *
- * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
- * This flag can be cleared to avoid unnecessary delays when a fallback
- * option is available.
- *
- * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
- * the low watermark is reached and have it reclaim pages until the high
- * watermark is reached. A caller may wish to clear this flag when fallback
- * options are available and the reclaim is likely to disrupt the system. The
- * canonical example is THP allocation where a fallback is cheap but
- * reclaim/compaction may cause indirect stalls.
- *
- * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
- *
- * The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
- * !costly allocations are too essential to fail so they are implicitly
- * non-failing by default (with some exceptions like OOM victims might fail so
- * the caller still has to check for failures) while costly requests try to be
- * not disruptive and back off even without invoking the OOM killer.
- * The following three modifiers might be used to override some of these
- * implicit rules
- *
- * %__GFP_NORETRY: The VM implementation will try only very lightweight
- * memory direct reclaim to get some memory under memory pressure (thus
- * it can sleep). It will avoid disruptive actions like OOM killer. The
- * caller must handle the failure which is quite likely to happen under
- * heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
- *
- * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
- * procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
- * compaction (which removes fragmentation) and page-out.
- * There is still a definite limit to the number of retries, but it is
- * a larger limit than with %__GFP_NORETRY.
- * Allocations with this flag may fail, but only when there is
- * genuinely little unused memory. While these allocations do not
- * directly trigger the OOM killer, their failure indicates that
- * the system is likely to need to use the OOM killer soon. The
- * caller must handle failure, but can reasonably do so by failing
- * a higher-level request, or completing it only in a much less
- * efficient manner.
- * If the allocation does fail, and the caller is in a position to
- * free some non-essential memory, doing so could benefit the system
- * as a whole.
- *
- * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. The allocation could block
- * indefinitely but will never return with failure. Testing for
- * failure is pointless.
- * New users should be evaluated carefully (and the flag should be
- * used only when there is no reasonable failure policy) but it is
- * definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
- */
-#define __GFP_IO ((__force gfp_t)___GFP_IO)
-#define __GFP_FS ((__force gfp_t)___GFP_FS)
-#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
-#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
-#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
-#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
-#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
-#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
-
-/**
- * DOC: Action modifiers
- *
- * Action modifiers
- * ~~~~~~~~~~~~~~~~
- *
- * %__GFP_NOWARN suppresses allocation failure reports.
- *
- * %__GFP_COMP address compound page metadata.
- *
- * %__GFP_ZERO returns a zeroed page on success.
- */
-#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
-#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
-#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
-
-/* Disable lockdep for GFP context tracking */
-#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
-
-/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
-#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-/**
- * DOC: Useful GFP flag combinations
- *
- * Useful GFP flag combinations
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Useful GFP flag combinations that are commonly used. It is recommended
- * that subsystems start with one of these combinations and then set/clear
- * %__GFP_FOO flags as necessary.
- *
- * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
- * watermark is applied to allow access to "atomic reserves"
- *
- * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
- * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
- *
- * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
- * accounted to kmemcg.
- *
- * %GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
- *
- * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
- * that do not require the starting of any physical IO.
- * Please try to avoid using this flag directly and instead use
- * memalloc_noio_{save,restore} to mark the whole scope which cannot
- * perform any IO with a short explanation why. All allocation requests
- * will inherit GFP_NOIO implicitly.
- *
- * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
- * Please try to avoid using this flag directly and instead use
- * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
- * recurse into the FS layer with a short explanation why. All allocation
- * requests will inherit GFP_NOFS implicitly.
- *
- * %GFP_USER is for userspace allocations that also need to be directly
- * accessibly by the kernel or hardware. It is typically used by hardware
- * for buffers that are mapped to userspace (e.g. graphics) that hardware
- * still must DMA to. cpuset limits are enforced for these allocations.
- *
- * %GFP_DMA exists for historical reasons and should be avoided where possible.
- * The flags indicates that the caller requires that the lowest zone be
- * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
- * it would require careful auditing as some users really require it and
- * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
- * lowest zone as a type of emergency reserve.
- *
- * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
- * address.
- *
- * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
- * do not need to be directly accessible by the kernel but that cannot
- * move once in use. An example may be a hardware allocation that maps
- * data directly into userspace but has no addressing limitations.
- *
- * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
- * need direct access to but can use kmap() when access is required. They
- * are expected to be movable via page reclaim or page migration. Typically,
- * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
- *
- * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
- * are compound allocations that will generally fail quickly if memory is not
- * available and will not wake kswapd/kcompactd on failure. The _LIGHT
- * version does not attempt reclaim/compaction at all and is by default used
- * in page fault path, while the non-light is used by khugepaged.
- */
-#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
-#define GFP_NOIO (__GFP_RECLAIM)
-#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
-#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
-#define GFP_DMA __GFP_DMA
-#define GFP_DMA32 __GFP_DMA32
-#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
-#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
-#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
-
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
-static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
+static inline int gfp_migratetype(const gfp_t gfp_flags)
{
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
+ BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
+ BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
+ GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
- return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+ return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
@@ -325,29 +36,6 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
-/**
- * gfpflags_normal_context - is gfp_flags a normal sleepable context?
- * @gfp_flags: gfp_flags to test
- *
- * Test whether @gfp_flags indicates that the allocation is from the
- * %current context and allowed to sleep.
- *
- * An allocation being allowed to block doesn't mean it owns the %current
- * context. When direct reclaim path tries to allocate memory, the
- * allocation context is nested inside whatever %current was doing at the
- * time of the original allocation. The nested allocation may be allowed
- * to block but modifying anything %current owns can corrupt the outer
- * context's expectations.
- *
- * %true result from this function indicates that the allocation context
- * can sleep and use anything that's associated with %current.
- */
-static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
-{
- return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
- __GFP_DIRECT_RECLAIM;
-}
-
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
@@ -467,12 +155,12 @@ static inline int gfp_zonelist(gfp_t flags)
/*
* We get the zone list from the current node and the gfp_mask.
- * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
* There are two zonelists per node, one for all zones with memory and
* one containing just zones from the node the zonelist belongs to.
*
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
+ * For the case of non-NUMA systems the NODE_DATA() gets optimized to
+ * &contig_page_data at compile-time.
*/
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
{
@@ -486,14 +174,40 @@ static inline void arch_free_page(struct page *page, int order) { }
static inline void arch_alloc_page(struct page *page, int order) { }
#endif
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
- nodemask_t *nodemask);
+struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
+struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
-static inline struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
+unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
+ nodemask_t *nodemask, int nr_pages,
+ struct list_head *page_list,
+ struct page **page_array);
+
+unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
+ unsigned long nr_pages,
+ struct page **page_array);
+
+/* Bulk allocate order-0 pages */
+static inline unsigned long
+alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
+{
+ return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL);
+}
+
+static inline unsigned long
+alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array)
{
- return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
+ return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
+}
+
+static inline unsigned long
+alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array)
+{
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
}
/*
@@ -506,7 +220,16 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
- return __alloc_pages(gfp_mask, order, nid);
+ return __alloc_pages(gfp_mask, order, nid, NULL);
+}
+
+static inline
+struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
+{
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
+
+ return __folio_alloc(gfp, order, nid, NULL);
}
/*
@@ -524,38 +247,37 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
}
#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
-
-static inline struct page *
-alloc_pages(gfp_t gfp_mask, unsigned int order)
+struct page *alloc_pages(gfp_t gfp, unsigned int order);
+struct folio *folio_alloc(gfp_t gfp, unsigned order);
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr, bool hugepage);
+#else
+static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
{
- return alloc_pages_current(gfp_mask, order);
+ return alloc_pages_node(numa_node_id(), gfp_mask, order);
}
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
- struct vm_area_struct *vma, unsigned long addr,
- int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
-#else
-#define alloc_pages(gfp_mask, order) \
- alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
- alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages(gfp_mask, order)
+static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
+{
+ return __folio_alloc_node(gfp, order, numa_node_id());
+}
+#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
+ folio_alloc(gfp, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
-#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
+static inline struct page *alloc_page_vma(gfp_t gfp,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false);
+
+ return &folio->page;
+}
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
void free_pages_exact(void *virt, size_t size);
-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
+__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
@@ -565,13 +287,19 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-extern void free_unref_page(struct page *page);
-extern void free_unref_page_list(struct list_head *list);
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
+extern void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask);
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
@@ -599,6 +327,8 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
extern void pm_restrict_gfp_mask(void);
extern void pm_restore_gfp_mask(void);
+extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
+
#ifdef CONFIG_PM_SLEEP
extern bool pm_suspended_storage(void);
#else
@@ -615,7 +345,7 @@ extern int alloc_contig_range(unsigned long start, unsigned long end,
extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
#endif
-void free_contig_range(unsigned long pfn, unsigned int nr_pages);
+void free_contig_range(unsigned long pfn, unsigned long nr_pages);
#ifdef CONFIG_CMA
/* CMA stuff */
diff --git a/include/linux/gfp_api.h b/include/linux/gfp_api.h
new file mode 100644
index 000000000000..5a05a2764a86
--- /dev/null
+++ b/include/linux/gfp_api.h
@@ -0,0 +1 @@
+#include <linux/gfp.h>
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
new file mode 100644
index 000000000000..d88c46ca82e1
--- /dev/null
+++ b/include/linux/gfp_types.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GFP_TYPES_H
+#define __LINUX_GFP_TYPES_H
+
+/* The typedef is in types.h but we want the documentation here */
+#if 0
+/**
+ * typedef gfp_t - Memory allocation flags.
+ *
+ * GFP flags are commonly used throughout Linux to indicate how memory
+ * should be allocated. The GFP acronym stands for get_free_pages(),
+ * the underlying memory allocation function. Not every GFP flag is
+ * supported by every function which may allocate memory. Most users
+ * will want to use a plain ``GFP_KERNEL``.
+ */
+typedef unsigned int __bitwise gfp_t;
+#endif
+
+/*
+ * In case of changes, please don't forget to update
+ * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
+ */
+
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA 0x01u
+#define ___GFP_HIGHMEM 0x02u
+#define ___GFP_DMA32 0x04u
+#define ___GFP_MOVABLE 0x08u
+#define ___GFP_RECLAIMABLE 0x10u
+#define ___GFP_HIGH 0x20u
+#define ___GFP_IO 0x40u
+#define ___GFP_FS 0x80u
+#define ___GFP_ZERO 0x100u
+#define ___GFP_ATOMIC 0x200u
+#define ___GFP_DIRECT_RECLAIM 0x400u
+#define ___GFP_KSWAPD_RECLAIM 0x800u
+#define ___GFP_WRITE 0x1000u
+#define ___GFP_NOWARN 0x2000u
+#define ___GFP_RETRY_MAYFAIL 0x4000u
+#define ___GFP_NOFAIL 0x8000u
+#define ___GFP_NORETRY 0x10000u
+#define ___GFP_MEMALLOC 0x20000u
+#define ___GFP_COMP 0x40000u
+#define ___GFP_NOMEMALLOC 0x80000u
+#define ___GFP_HARDWALL 0x100000u
+#define ___GFP_THISNODE 0x200000u
+#define ___GFP_ACCOUNT 0x400000u
+#define ___GFP_ZEROTAGS 0x800000u
+#ifdef CONFIG_KASAN_HW_TAGS
+#define ___GFP_SKIP_ZERO 0x1000000u
+#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u
+#define ___GFP_SKIP_KASAN_POISON 0x4000000u
+#else
+#define ___GFP_SKIP_ZERO 0
+#define ___GFP_SKIP_KASAN_UNPOISON 0
+#define ___GFP_SKIP_KASAN_POISON 0
+#endif
+#ifdef CONFIG_LOCKDEP
+#define ___GFP_NOLOCKDEP 0x8000000u
+#else
+#define ___GFP_NOLOCKDEP 0
+#endif
+/* If the above are modified, __GFP_BITS_SHIFT may need updating */
+
+/*
+ * Physical address zone modifiers (see linux/mmzone.h - low four bits)
+ *
+ * Do not put any conditional on these. If necessary modify the definitions
+ * without the underscores and use them consistently. The definitions here may
+ * be used in bit comparisons.
+ */
+#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+
+/**
+ * DOC: Page mobility and placement hints
+ *
+ * Page mobility and placement hints
+ * ---------------------------------
+ *
+ * These flags provide hints about how mobile the page is. Pages with similar
+ * mobility are placed within the same pageblocks to minimise problems due
+ * to external fragmentation.
+ *
+ * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
+ *
+ * %__GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ *
+ * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
+ *
+ * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
+ *
+ * %__GFP_THISNODE forces the allocation to be satisfied from the requested
+ * node with no fallbacks or placement policy enforcements.
+ *
+ * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
+ */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
+#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
+
+/**
+ * DOC: Watermark modifiers
+ *
+ * Watermark modifiers -- controls access to emergency reserves
+ * ------------------------------------------------------------
+ *
+ * %__GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example, creating an IO context to clean pages.
+ *
+ * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
+ * high priority. Users are typically interrupt handlers. This may be
+ * used in conjunction with %__GFP_HIGH
+ *
+ * %__GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * Users of this flag have to be extremely careful to not deplete the reserve
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory.
+ * Usage of a pre-allocated pool (e.g. mempool) should be always considered
+ * before using this flag.
+ *
+ * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
+ */
+#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
+
+/**
+ * DOC: Reclaim modifiers
+ *
+ * Reclaim modifiers
+ * -----------------
+ * Please note that all the following flags are only applicable to sleepable
+ * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
+ *
+ * %__GFP_IO can start physical IO.
+ *
+ * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
+ *
+ * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
+ *
+ * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
+ *
+ * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
+ *
+ * The default allocator behavior depends on the request size. We have a concept
+ * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * !costly allocations are too essential to fail so they are implicitly
+ * non-failing by default (with some exceptions like OOM victims might fail so
+ * the caller still has to check for failures) while costly requests try to be
+ * not disruptive and back off even without invoking the OOM killer.
+ * The following three modifiers might be used to override some of these
+ * implicit rules
+ *
+ * %__GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput
+ *
+ * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made else where. It can wait for other
+ * tasks to attempt high level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with %__GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
+ *
+ * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Using this flag for costly allocations is _highly_ discouraged.
+ */
+#define __GFP_IO ((__force gfp_t)___GFP_IO)
+#define __GFP_FS ((__force gfp_t)___GFP_FS)
+#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
+#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
+
+/**
+ * DOC: Action modifiers
+ *
+ * Action modifiers
+ * ----------------
+ *
+ * %__GFP_NOWARN suppresses allocation failure reports.
+ *
+ * %__GFP_COMP address compound page metadata.
+ *
+ * %__GFP_ZERO returns a zeroed page on success.
+ *
+ * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
+ * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
+ * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
+ * memory tags at the same time as zeroing memory has minimal additional
+ * performace impact.
+ *
+ * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation.
+ * Only effective in HW_TAGS mode.
+ *
+ * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation.
+ * Typically, used for userspace pages. Only effective in HW_TAGS mode.
+ */
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
+#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON)
+#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
+
+/* Disable lockdep for GFP context tracking */
+#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
+
+/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+/**
+ * DOC: Useful GFP flag combinations
+ *
+ * Useful GFP flag combinations
+ * ----------------------------
+ *
+ * Useful GFP flag combinations that are commonly used. It is recommended
+ * that subsystems start with one of these combinations and then set/clear
+ * %__GFP_FOO flags as necessary.
+ *
+ * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves".
+ * The current implementation doesn't support NMI and few other strict
+ * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ *
+ * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
+ * accounted to kmemcg.
+ *
+ * %GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback.
+ *
+ * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
+ *
+ * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
+ *
+ * %GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * %GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
+ * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
+ * because the DMA32 kmalloc cache array is not implemented.
+ * (Reason: there is no such user in kernel).
+ *
+ * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
+ *
+ * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
+ * are compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
+ */
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#define GFP_NOIO (__GFP_RECLAIM)
+#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
+#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_DMA __GFP_DMA
+#define GFP_DMA32 __GFP_DMA32
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \
+ __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON)
+#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
+#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+
+#endif /* __LINUX_GFP_TYPES_H */
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
index 265a099cd3b8..bcc17f95b906 100644
--- a/include/linux/goldfish.h
+++ b/include/linux/goldfish.h
@@ -8,14 +8,21 @@
/* Helpers for Goldfish virtual platform */
+#ifndef gf_ioread32
+#define gf_ioread32 ioread32
+#endif
+#ifndef gf_iowrite32
+#define gf_iowrite32 iowrite32
+#endif
+
static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
void __iomem *porth)
{
const unsigned long addr = (unsigned long)ptr;
- writel(lower_32_bits(addr), portl);
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_64BIT
- writel(upper_32_bits(addr), porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
@@ -23,9 +30,9 @@ static inline void gf_write_dma_addr(const dma_addr_t addr,
void __iomem *portl,
void __iomem *porth)
{
- writel(lower_32_bits(addr), portl);
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(upper_32_bits(addr), porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 2157717c2136..a370387fa406 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -95,18 +95,15 @@ struct device;
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label);
-void devm_gpio_free(struct device *dev, unsigned int gpio);
#else /* ! CONFIG_GPIOLIB */
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bug.h>
-#include <linux/pinctrl/pinctrl.h>
struct device;
struct gpio_chip;
-struct pinctrl_dev;
static inline bool gpio_is_valid(int number)
{
@@ -242,11 +239,6 @@ static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
return -EINVAL;
}
-static inline void devm_gpio_free(struct device *dev, unsigned int gpio)
-{
- WARN_ON(1);
-}
-
#endif /* ! CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index bf2d017dd7b7..36460ced060b 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -2,32 +2,22 @@
#ifndef __LINUX_GPIO_CONSUMER_H
#define __LINUX_GPIO_CONSUMER_H
+#include <linux/bits.h>
#include <linux/bug.h>
+#include <linux/compiler_types.h>
#include <linux/err.h>
-#include <linux/kernel.h>
struct device;
-
-/**
- * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
- * preferable to the old integer-based handles.
- *
- * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
- * until the GPIO is released.
- */
struct gpio_desc;
-
-/**
- * Opaque descriptor for a structure of GPIO array attributes. This structure
- * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be
- * passed back to get/set array functions in order to activate fast processing
- * path if applicable.
- */
struct gpio_array;
/**
- * Struct containing an array of descriptors that can be obtained using
- * gpiod_get_array().
+ * struct gpio_descs - Struct containing an array of descriptors that can be
+ * obtained using gpiod_get_array()
+ *
+ * @info: Pointer to the opaque gpio_array structure
+ * @ndescs: Number of held descriptors
+ * @desc: Array of pointers to GPIO descriptors
*/
struct gpio_descs {
struct gpio_array *info;
@@ -42,8 +32,16 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
- * Optional flags that can be passed to one of gpiod_* to configure direction
- * and output value. These values cannot be OR'd.
+ * enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to
+ * configure direction and output value. These values
+ * cannot be OR'd.
+ *
+ * @GPIOD_ASIS: Don't change anything
+ * @GPIOD_IN: Set lines to input mode
+ * @GPIOD_OUT_LOW: Set lines to output and drive them low
+ * @GPIOD_OUT_HIGH: Set lines to output and drive them high
+ * @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low
+ * @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high
*/
enum gpiod_flags {
GPIOD_ASIS = 0,
@@ -111,6 +109,8 @@ int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
int gpiod_direction_output(struct gpio_desc *desc, int value);
int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
@@ -156,7 +156,8 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+int gpiod_set_config(struct gpio_desc *desc, unsigned long config);
+int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce);
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
void gpiod_toggle_active_low(struct gpio_desc *desc);
@@ -173,10 +174,6 @@ int desc_to_gpio(const struct gpio_desc *desc);
/* Child properties interface */
struct fwnode_handle;
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
const char *con_id, int index,
enum gpiod_flags flags,
@@ -189,6 +186,8 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
#else /* CONFIG_GPIOLIB */
+#include <linux/kernel.h>
+
static inline int gpiod_count(struct device *dev, const char *con_id)
{
return 0;
@@ -349,8 +348,18 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
WARN_ON(desc);
return -ENOSYS;
}
-
-
+static inline int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ WARN_ON(desc);
+ return -ENOSYS;
+}
+static inline int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ WARN_ON(desc);
+ return -ENOSYS;
+}
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -470,7 +479,14 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
return 0;
}
-static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
+static inline int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+ return -ENOSYS;
+}
+
+static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce)
{
/* GPIO can never have been requested */
WARN_ON(desc);
@@ -534,15 +550,6 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
struct fwnode_handle;
static inline
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline
struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
const char *con_id, int index,
enum gpiod_flags flags,
@@ -598,7 +605,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO)
struct device_node;
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label);
@@ -608,7 +615,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
struct device_node;
static inline
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label)
@@ -622,7 +629,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
struct device_node;
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- struct device_node *node,
+ const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label);
@@ -633,7 +640,7 @@ struct device_node;
static inline
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- struct device_node *node,
+ const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label)
@@ -663,25 +670,26 @@ struct acpi_gpio_mapping {
* get GpioIo type explicitly, this quirk may be used.
*/
#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1)
+/* Use given pin as an absolute GPIO number in the system */
+#define ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER BIT(2)
unsigned int quirks;
};
-#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
-
struct acpi_device;
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
+
int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios);
void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
int devm_acpi_dev_add_driver_gpios(struct device *dev,
const struct acpi_gpio_mapping *gpios);
-void devm_acpi_dev_remove_driver_gpios(struct device *dev);
-#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label);
-struct acpi_device;
+#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
@@ -695,7 +703,12 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
{
return -ENXIO;
}
-static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
+
+static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin,
+ char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 6ef05bccc0a6..6aeea1071b1b 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -3,13 +3,16 @@
#define __LINUX_GPIO_DRIVER_H
#include <linux/device.h>
-#include <linux/types.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+#include <asm/msi.h>
struct gpio_desc;
struct of_phandle_args;
@@ -22,6 +25,13 @@ enum gpio_lookup_flags;
struct gpio_chip;
+union gpio_irq_fwspec {
+ struct irq_fwspec fwspec;
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ msi_alloc_info_t msiinfo;
+#endif
+};
+
#define GPIO_LINE_DIRECTION_IN 1
#define GPIO_LINE_DIRECTION_OUT 0
@@ -87,7 +97,7 @@ struct gpio_irq_chip {
* @need_valid_mask to make these GPIO lines unavailable for
* translation.
*/
- int (*child_to_parent_hwirq)(struct gpio_chip *chip,
+ int (*child_to_parent_hwirq)(struct gpio_chip *gc,
unsigned int child_hwirq,
unsigned int child_type,
unsigned int *parent_hwirq,
@@ -102,9 +112,10 @@ struct gpio_irq_chip {
* variant named &gpiochip_populate_parent_fwspec_fourcell is also
* available.
*/
- void *(*populate_parent_alloc_arg)(struct gpio_chip *chip,
- unsigned int parent_hwirq,
- unsigned int parent_type);
+ int (*populate_parent_alloc_arg)(struct gpio_chip *gc,
+ union gpio_irq_fwspec *fwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
/**
* @child_offset_to_irq:
@@ -114,7 +125,7 @@ struct gpio_irq_chip {
* callback. If this is not specified, then a default callback will be
* provided that returns the line offset.
*/
- unsigned int (*child_offset_to_irq)(struct gpio_chip *chip,
+ unsigned int (*child_offset_to_irq)(struct gpio_chip *gc,
unsigned int pin);
/**
@@ -166,13 +177,26 @@ struct gpio_irq_chip {
*/
irq_flow_handler_t parent_handler;
- /**
- * @parent_handler_data:
- *
- * Data associated, and passed to, the handler for the parent
- * interrupt.
- */
- void *parent_handler_data;
+ union {
+ /**
+ * @parent_handler_data:
+ *
+ * If @per_parent_data is false, @parent_handler_data is a
+ * single pointer used as the data associated with every
+ * parent interrupt.
+ */
+ void *parent_handler_data;
+
+ /**
+ * @parent_handler_data_array:
+ *
+ * If @per_parent_data is true, @parent_handler_data_array is
+ * an array of @num_parents pointers, and is used to associate
+ * different data for each parent. This cannot be NULL if
+ * @per_parent_data is true.
+ */
+ void **parent_handler_data_array;
+ };
/**
* @num_parents:
@@ -204,12 +228,29 @@ struct gpio_irq_chip {
bool threaded;
/**
+ * @per_parent_data:
+ *
+ * True if parent_handler_data_array describes a @num_parents
+ * sized array to be used as parent data.
+ */
+ bool per_parent_data;
+
+ /**
+ * @initialized:
+ *
+ * Flag to track GPIO chip irq member's initialization.
+ * This flag will make sure GPIO chip irq members are not used
+ * before they are initialized.
+ */
+ bool initialized;
+
+ /**
* @init_hw: optional routine to initialize hardware before
* an IRQ chip will be added. This is quite useful when
* a particular driver wants to clear IRQ related registers
* in order to avoid undesired events.
*/
- int (*init_hw)(struct gpio_chip *chip);
+ int (*init_hw)(struct gpio_chip *gc);
/**
* @init_valid_mask: optional routine to initialize @valid_mask, to be
@@ -220,14 +261,14 @@ struct gpio_irq_chip {
* then directly set some bits to "0" if they cannot be used for
* interrupts.
*/
- void (*init_valid_mask)(struct gpio_chip *chip,
+ void (*init_valid_mask)(struct gpio_chip *gc,
unsigned long *valid_mask,
unsigned int ngpios);
/**
* @valid_mask:
*
- * If not %NULL holds bitmask of GPIOs which are valid to be included
+ * If not %NULL, holds bitmask of GPIOs which are valid to be included
* in IRQ domain of the chip.
*/
unsigned long *valid_mask;
@@ -253,6 +294,19 @@ struct gpio_irq_chip {
* Store old irq_chip irq_disable callback
*/
void (*irq_disable)(struct irq_data *data);
+ /**
+ * @irq_unmask:
+ *
+ * Store old irq_chip irq_unmask callback
+ */
+ void (*irq_unmask)(struct irq_data *data);
+
+ /**
+ * @irq_mask:
+ *
+ * Store old irq_chip irq_mask callback
+ */
+ void (*irq_mask)(struct irq_data *data);
};
/**
@@ -261,15 +315,16 @@ struct gpio_irq_chip {
* number or the name of the SoC IP-block implementing it.
* @gpiodev: the internal state holder, opaque struct
* @parent: optional parent device providing the GPIOs
+ * @fwnode: optional fwnode providing this controller's properties
* @owner: helps prevent removal of modules exporting active GPIOs
* @request: optional hook for chip-specific activation, such as
* enabling module power and clock; may sleep
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
- * (same as GPIOF_DIR_XXX), or negative error.
- * It is recommended to always implement this function, even on
- * input-only or output-only gpio chips.
+ * (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN),
+ * or negative error. It is recommended to always implement this
+ * function, even on input-only or output-only gpio chips.
* @direction_input: configures signal "offset" as input, or returns error
* This can be omitted on input-only or output-only gpio chips.
* @direction_output: configures signal "offset" as output, or returns error
@@ -291,6 +346,10 @@ struct gpio_irq_chip {
* @add_pin_ranges: optional routine to initialize pin ranges, to be used when
* requires special mapping of the pins that provides GPIO functionality.
* It is called after adding GPIO chip and before adding IRQ chip.
+ * @en_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * enable hardware timestamp.
+ * @dis_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * disable hardware timestamp.
* @base: identifies the first GPIO number handled by this chip;
* or, if negative during registration, requests dynamic ID allocation.
* DEPRECATION: providing anything non-negative and nailing the base
@@ -299,6 +358,9 @@ struct gpio_irq_chip {
* get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
+ * @offset: when multiple gpio chips belong to the same device this
+ * can be used as offset within the device so friendly names can
+ * be properly assigned.
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
@@ -333,7 +395,7 @@ struct gpio_irq_chip {
* output.
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
- * they can all be accessed through a common programing interface.
+ * they can all be accessed through a common programming interface.
* Example sources would be SOC controllers, FPGAs, multifunction
* chips, dedicated GPIO expanders, and so on.
*
@@ -346,45 +408,53 @@ struct gpio_chip {
const char *label;
struct gpio_device *gpiodev;
struct device *parent;
+ struct fwnode_handle *fwnode;
struct module *owner;
- int (*request)(struct gpio_chip *chip,
- unsigned offset);
- void (*free)(struct gpio_chip *chip,
- unsigned offset);
- int (*get_direction)(struct gpio_chip *chip,
- unsigned offset);
- int (*direction_input)(struct gpio_chip *chip,
- unsigned offset);
- int (*direction_output)(struct gpio_chip *chip,
- unsigned offset, int value);
- int (*get)(struct gpio_chip *chip,
- unsigned offset);
- int (*get_multiple)(struct gpio_chip *chip,
+ int (*request)(struct gpio_chip *gc,
+ unsigned int offset);
+ void (*free)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*get_direction)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*direction_input)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*direction_output)(struct gpio_chip *gc,
+ unsigned int offset, int value);
+ int (*get)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*get_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
- void (*set)(struct gpio_chip *chip,
- unsigned offset, int value);
- void (*set_multiple)(struct gpio_chip *chip,
+ void (*set)(struct gpio_chip *gc,
+ unsigned int offset, int value);
+ void (*set_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
- int (*set_config)(struct gpio_chip *chip,
- unsigned offset,
+ int (*set_config)(struct gpio_chip *gc,
+ unsigned int offset,
unsigned long config);
- int (*to_irq)(struct gpio_chip *chip,
- unsigned offset);
+ int (*to_irq)(struct gpio_chip *gc,
+ unsigned int offset);
void (*dbg_show)(struct seq_file *s,
- struct gpio_chip *chip);
+ struct gpio_chip *gc);
- int (*init_valid_mask)(struct gpio_chip *chip,
+ int (*init_valid_mask)(struct gpio_chip *gc,
unsigned long *valid_mask,
unsigned int ngpios);
- int (*add_pin_ranges)(struct gpio_chip *chip);
+ int (*add_pin_ranges)(struct gpio_chip *gc);
+ int (*en_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
+ int (*dis_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
int base;
u16 ngpio;
+ u16 offset;
const char *const *names;
bool can_sleep;
@@ -399,7 +469,7 @@ struct gpio_chip {
void __iomem *reg_dir_in;
bool bgpio_dir_unreadable;
int bgpio_bits;
- spinlock_t bgpio_lock;
+ raw_spinlock_t bgpio_lock;
unsigned long bgpio_data;
unsigned long bgpio_dir;
#endif /* CONFIG_GPIO_GENERIC */
@@ -422,15 +492,15 @@ struct gpio_chip {
/**
* @valid_mask:
*
- * If not %NULL holds bitmask of GPIOs which are valid to be used
+ * If not %NULL, holds bitmask of GPIOs which are valid to be used
* from the chip.
*/
unsigned long *valid_mask;
#if defined(CONFIG_OF_GPIO)
/*
- * If CONFIG_OF is enabled, then all GPIO controllers described in the
- * device tree automatically may have an OF translation
+ * If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in
+ * the device tree automatically may have an OF translation
*/
/**
@@ -455,78 +525,131 @@ struct gpio_chip {
*/
int (*of_xlate)(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags);
+
+ /**
+ * @of_gpio_ranges_fallback:
+ *
+ * Optional hook for the case that no gpio-ranges property is defined
+ * within the device tree node "np" (usually DT before introduction
+ * of gpio-ranges). So this callback is helpful to provide the
+ * necessary backward compatibility for the pin ranges.
+ */
+ int (*of_gpio_ranges_fallback)(struct gpio_chip *gc,
+ struct device_node *np);
+
#endif /* CONFIG_OF_GPIO */
};
-extern const char *gpiochip_is_requested(struct gpio_chip *chip,
- unsigned offset);
+extern const char *gpiochip_is_requested(struct gpio_chip *gc,
+ unsigned int offset);
+
+/**
+ * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
+ * @chip: the chip to query
+ * @i: loop variable
+ * @base: first GPIO in the range
+ * @size: amount of GPIOs to check starting from @base
+ * @label: label of current GPIO
+ */
+#define for_each_requested_gpio_in_range(chip, i, base, size, label) \
+ for (i = 0; i < size; i++) \
+ if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else
+
+/* Iterates over all requested GPIO of the given @chip */
+#define for_each_requested_gpio(chip, i, label) \
+ for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label)
/* add/remove chips */
-extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
/**
* gpiochip_add_data() - register a gpio_chip
- * @chip: the chip to register, with chip->base initialized
+ * @gc: the chip to register, with gc->base initialized
* @data: driver-private data associated with this chip
*
* Context: potentially before irqs will work
*
* When gpiochip_add_data() is called very early during boot, so that GPIOs
- * can be freely used, the chip->parent device must be registered before
+ * can be freely used, the gc->parent device must be registered before
* the gpio framework's arch_initcall(). Otherwise sysfs initialization
* for GPIOs will fail rudely.
*
* gpiochip_add_data() must only be called after gpiolib initialization,
- * ie after core_initcall().
+ * i.e. after core_initcall().
*
- * If chip->base is negative, this requests dynamic assignment of
+ * If gc->base is negative, this requests dynamic assignment of
* a range of valid GPIOs.
*
* Returns:
* A negative errno if the chip can't be registered, such as because the
- * chip->base is invalid or already associated with a different chip.
+ * gc->base is invalid or already associated with a different chip.
* Otherwise it returns zero as a success code.
*/
#ifdef CONFIG_LOCKDEP
-#define gpiochip_add_data(chip, data) ({ \
+#define gpiochip_add_data(gc, data) ({ \
static struct lock_class_key lock_key; \
static struct lock_class_key request_key; \
- gpiochip_add_data_with_key(chip, data, &lock_key, \
+ gpiochip_add_data_with_key(gc, data, &lock_key, \
+ &request_key); \
+ })
+#define devm_gpiochip_add_data(dev, gc, data) ({ \
+ static struct lock_class_key lock_key; \
+ static struct lock_class_key request_key; \
+ devm_gpiochip_add_data_with_key(dev, gc, data, &lock_key, \
&request_key); \
})
#else
-#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)
+#define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL)
+#define devm_gpiochip_add_data(dev, gc, data) \
+ devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL)
#endif /* CONFIG_LOCKDEP */
-static inline int gpiochip_add(struct gpio_chip *chip)
+static inline int gpiochip_add(struct gpio_chip *gc)
{
- return gpiochip_add_data(chip, NULL);
+ return gpiochip_add_data(gc, NULL);
}
-extern void gpiochip_remove(struct gpio_chip *chip);
-extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
- void *data);
+extern void gpiochip_remove(struct gpio_chip *gc);
+extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data,
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
extern struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *chip, void *data));
+ int (*match)(struct gpio_chip *gc, void *data));
+
+bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
+int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset);
+
+/* irq_data versions of the above */
+int gpiochip_irq_reqres(struct irq_data *data);
+void gpiochip_irq_relres(struct irq_data *data);
-bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
-int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset);
+/* Paste this in your irq_chip structure */
+#define GPIOCHIP_IRQ_RESOURCE_HELPERS \
+ .irq_request_resources = gpiochip_irq_reqres, \
+ .irq_release_resources = gpiochip_irq_relres
+
+static inline void gpio_irq_chip_set_chip(struct gpio_irq_chip *girq,
+ const struct irq_chip *chip)
+{
+ /* Yes, dropping const is ugly, but it isn't like we have a choice */
+ girq->chip = (struct irq_chip *)chip;
+}
/* Line status inquiry for drivers */
-bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
-bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset);
+bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset);
/* Sleep persistence inquiry for drivers */
-bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset);
-bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset);
+bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset);
/* get driver data */
-void *gpiochip_get_data(struct gpio_chip *chip);
+void *gpiochip_get_data(struct gpio_chip *gc);
struct bgpio_pdata {
const char *label;
@@ -536,28 +659,14 @@ struct bgpio_pdata {
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
+int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
+int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
unsigned int parent_hwirq,
unsigned int parent_type);
-void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
- unsigned int parent_hwirq,
- unsigned int parent_type);
-
-#else
-
-static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
- unsigned int parent_hwirq,
- unsigned int parent_type)
-{
- return NULL;
-}
-
-static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
- unsigned int parent_hwirq,
- unsigned int parent_type)
-{
- return NULL;
-}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
@@ -572,6 +681,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
+#define BGPIOF_NO_SET_ON_INPUT BIT(6)
int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq);
@@ -582,83 +692,24 @@ int gpiochip_irq_domain_activate(struct irq_domain *domain,
void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *data);
-void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int parent_irq);
-
-int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type,
- bool threaded,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
-
-bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
unsigned int offset);
-#ifdef CONFIG_LOCKDEP
-
-/*
- * Lockdep requires that each irqchip instance be created with a
- * unique key so as to avoid unnecessary warnings. This upfront
- * boilerplate static inlines provides such a key for each
- * unique instance.
- */
-static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
- static struct lock_class_key lock_key;
- static struct lock_class_key request_key;
-
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, false,
- &lock_key, &request_key);
-}
-
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
-
- static struct lock_class_key lock_key;
- static struct lock_class_key request_key;
-
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, true,
- &lock_key, &request_key);
-}
-#else /* ! CONFIG_LOCKDEP */
-static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, false, NULL, NULL);
-}
-
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain);
+#else
+static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain)
{
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, true, NULL, NULL);
+ WARN_ON(1);
+ return -EINVAL;
}
-#endif /* CONFIG_LOCKDEP */
+#endif
-int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
-void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
-int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config);
/**
@@ -675,25 +726,25 @@ struct gpio_pin_range {
#ifdef CONFIG_PINCTRL
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins);
-int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+int gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group);
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
+void gpiochip_remove_pin_ranges(struct gpio_chip *gc);
#else /* ! CONFIG_PINCTRL */
static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
return 0;
}
static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
+gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group)
{
@@ -701,27 +752,24 @@ gpiochip_add_pingroup_range(struct gpio_chip *chip,
}
static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+gpiochip_remove_pin_ranges(struct gpio_chip *gc)
{
}
#endif /* CONFIG_PINCTRL */
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
unsigned int hwnum,
const char *label,
enum gpio_lookup_flags lflags,
enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
-void devprop_gpiochip_set_names(struct gpio_chip *chip,
- const struct fwnode_handle *fwnode);
-
#ifdef CONFIG_GPIOLIB
/* lock/unlock as IRQ */
-int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
+int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset);
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
@@ -735,18 +783,43 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
return ERR_PTR(-ENODEV);
}
-static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
+static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
WARN_ON(1);
return -EINVAL;
}
-static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
+static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
WARN_ON(1);
}
#endif /* CONFIG_GPIOLIB */
+#define for_each_gpiochip_node(dev, child) \
+ device_for_each_child_node(dev, child) \
+ if (!fwnode_property_present(child, "gpio-controller")) {} else
+
+static inline unsigned int gpiochip_node_count(struct device *dev)
+{
+ struct fwnode_handle *child;
+ unsigned int count = 0;
+
+ for_each_gpiochip_node(dev, child)
+ count++;
+
+ return count;
+}
+
+static inline struct fwnode_handle *gpiochip_node_get_first(struct device *dev)
+{
+ struct fwnode_handle *fwnode;
+
+ for_each_gpiochip_node(dev, fwnode)
+ return fwnode;
+
+ return NULL;
+}
+
#endif /* __LINUX_GPIO_DRIVER_H */
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 1ebe5be05d5f..0b619eb7ae83 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -14,14 +14,18 @@ enum gpio_lookup_flags {
GPIO_TRANSITORY = (1 << 3),
GPIO_PULL_UP = (1 << 4),
GPIO_PULL_DOWN = (1 << 5),
+ GPIO_PULL_DISABLE = (1 << 6),
GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT,
};
/**
* struct gpiod_lookup - lookup table
- * @chip_label: name of the chip the GPIO belongs to
- * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @key: either the name of the chip the GPIO belongs to, or the GPIO line name
+ * Note that GPIO line names are not guaranteed to be globally unique,
+ * so this will use the first match found!
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO, or
+ * U16_MAX to indicate that @key is a GPIO line name
* @con_id: name of the GPIO from the device's point of view
* @idx: index of the GPIO in case several GPIOs share the same name
* @flags: bitmask of gpio_lookup_flags GPIO_* values
@@ -30,7 +34,7 @@ enum gpio_lookup_flags {
* functions using platform data.
*/
struct gpiod_lookup {
- const char *chip_label;
+ const char *key;
u16 chip_hwnum;
const char *con_id;
unsigned int idx;
@@ -61,19 +65,31 @@ struct gpiod_hog {
};
/*
+ * Helper for lookup tables with just one single lookup for a device.
+ */
+#define GPIO_LOOKUP_SINGLE(_name, _dev_id, _key, _chip_hwnum, _con_id, _flags) \
+static struct gpiod_lookup_table _name = { \
+ .dev_id = _dev_id, \
+ .table = { \
+ GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags), \
+ {}, \
+ }, \
+}
+
+/*
* Simple definition of a single GPIO under a con_id
*/
-#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \
- GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags)
+#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \
+ GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, 0, _flags)
/*
* Use this macro if you need to have several GPIOs under the same con_id.
* Each GPIO needs to use a different index and can be accessed using
* gpiod_get_index()
*/
-#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \
-{ \
- .chip_label = _chip_label, \
+#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \
+(struct gpiod_lookup) { \
+ .key = _key, \
.chip_hwnum = _chip_hwnum, \
.con_id = _con_id, \
.idx = _idx, \
@@ -84,7 +100,7 @@ struct gpiod_hog {
* Simple definition of a single GPIO hog in an array.
*/
#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \
-{ \
+(struct gpiod_hog) { \
.chip_label = _chip_label, \
.chip_hwnum = _chip_hwnum, \
.line_name = _line_name, \
@@ -97,6 +113,7 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_hogs(struct gpiod_hog *hogs);
+void gpiod_remove_hogs(struct gpiod_hog *hogs);
#else /* ! CONFIG_GPIOLIB */
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
@@ -105,6 +122,7 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {}
+static inline void gpiod_remove_hogs(struct gpiod_hog *hogs) {}
#endif /* CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
new file mode 100644
index 000000000000..a9f7b7faf57b
--- /dev/null
+++ b/include/linux/gpio/regmap.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_GPIO_REGMAP_H
+#define _LINUX_GPIO_REGMAP_H
+
+struct device;
+struct fwnode_handle;
+struct gpio_regmap;
+struct irq_domain;
+struct regmap;
+
+#define GPIO_REGMAP_ADDR_ZERO ((unsigned int)(-1))
+#define GPIO_REGMAP_ADDR(addr) ((addr) ? : GPIO_REGMAP_ADDR_ZERO)
+
+/**
+ * struct gpio_regmap_config - Description of a generic regmap gpio_chip.
+ * @parent: The parent device
+ * @regmap: The regmap used to access the registers
+ * given, the name of the device is used
+ * @fwnode: (Optional) The firmware node.
+ * If not given, the fwnode of the parent is used.
+ * @label: (Optional) Descriptive name for GPIO controller.
+ * If not given, the name of the device is used.
+ * @ngpio: Number of GPIOs
+ * @names: (Optional) Array of names for gpios
+ * @reg_dat_base: (Optional) (in) register base address
+ * @reg_set_base: (Optional) set register base address
+ * @reg_clr_base: (Optional) clear register base address
+ * @reg_dir_in_base: (Optional) in setting register base address
+ * @reg_dir_out_base: (Optional) out setting register base address
+ * @reg_stride: (Optional) May be set if the registers (of the
+ * same type, dat, set, etc) are not consecutive.
+ * @ngpio_per_reg: Number of GPIOs per register
+ * @irq_domain: (Optional) IRQ domain if the controller is
+ * interrupt-capable
+ * @reg_mask_xlate: (Optional) Translates base address and GPIO
+ * offset to a register/bitmask pair. If not
+ * given the default gpio_regmap_simple_xlate()
+ * is used.
+ * @drvdata: (Optional) Pointer to driver specific data which is
+ * not used by gpio-remap but is provided "as is" to the
+ * driver callback(s).
+ *
+ * The ->reg_mask_xlate translates a given base address and GPIO offset to
+ * register and mask pair. The base address is one of the given register
+ * base addresses in this structure.
+ *
+ * Although all register base addresses are marked as optional, there are
+ * several rules:
+ * 1. if you only have @reg_dat_base set, then it is input-only
+ * 2. if you only have @reg_set_base set, then it is output-only
+ * 3. if you have either @reg_dir_in_base or @reg_dir_out_base set, then
+ * you have to set both @reg_dat_base and @reg_set_base
+ * 4. if you have @reg_set_base set, you may also set @reg_clr_base to have
+ * two different registers for setting and clearing the output. This is
+ * also valid for the output-only case.
+ * 5. @reg_dir_in_base and @reg_dir_out_base are exclusive; is there really
+ * hardware which has redundant registers?
+ *
+ * Note: All base addresses may have the special value %GPIO_REGMAP_ADDR_ZERO
+ * which forces the address to the value 0.
+ */
+struct gpio_regmap_config {
+ struct device *parent;
+ struct regmap *regmap;
+ struct fwnode_handle *fwnode;
+
+ const char *label;
+ int ngpio;
+ const char *const *names;
+
+ unsigned int reg_dat_base;
+ unsigned int reg_set_base;
+ unsigned int reg_clr_base;
+ unsigned int reg_dir_in_base;
+ unsigned int reg_dir_out_base;
+ int reg_stride;
+ int ngpio_per_reg;
+ struct irq_domain *irq_domain;
+
+ int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask);
+
+ void *drvdata;
+};
+
+struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config);
+void gpio_regmap_unregister(struct gpio_regmap *gpio);
+struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
+ const struct gpio_regmap_config *config);
+void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio);
+
+#endif /* _LINUX_GPIO_REGMAP_H */
diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h
index 6e62fe478712..bef9eb2093e9 100644
--- a/include/linux/greybus/greybus_manifest.h
+++ b/include/linux/greybus/greybus_manifest.h
@@ -100,7 +100,7 @@ enum {
struct greybus_descriptor_string {
__u8 length;
__u8 id;
- __u8 string[0];
+ __u8 string[];
} __packed;
/*
@@ -175,7 +175,7 @@ struct greybus_manifest_header {
struct greybus_manifest {
struct greybus_manifest_header header;
- struct greybus_descriptor descriptors[0];
+ struct greybus_descriptor descriptors[];
} __packed;
#endif /* __GREYBUS_MANIFEST_H */
diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h
index dfbc6c39a74b..aeb8f9243545 100644
--- a/include/linux/greybus/greybus_protocols.h
+++ b/include/linux/greybus/greybus_protocols.h
@@ -345,7 +345,7 @@ struct gb_cap_get_ims_certificate_request {
struct gb_cap_get_ims_certificate_response {
__u8 result_code;
- __u8 certificate[0];
+ __u8 certificate[];
} __packed;
/* CAP authenticate request/response */
@@ -358,7 +358,7 @@ struct gb_cap_authenticate_request {
struct gb_cap_authenticate_response {
__u8 result_code;
__u8 response[64];
- __u8 signature[0];
+ __u8 signature[];
} __packed;
@@ -642,7 +642,7 @@ struct gb_hid_get_report_request {
struct gb_hid_set_report_request {
__u8 report_type;
__u8 report_id;
- __u8 report[0];
+ __u8 report[];
} __packed;
/* HID input report request, via interrupt pipe */
@@ -680,7 +680,7 @@ struct gb_i2c_transfer_op {
struct gb_i2c_transfer_request {
__le16 op_count;
- struct gb_i2c_transfer_op ops[0]; /* op_count of these */
+ struct gb_i2c_transfer_op ops[]; /* op_count of these */
} __packed;
struct gb_i2c_transfer_response {
__u8 data[0]; /* inbound data */
@@ -908,7 +908,7 @@ struct gb_spi_transfer_request {
__u8 chip_select; /* of the spi device */
__u8 mode; /* of the spi device */
__le16 count;
- struct gb_spi_transfer transfers[0]; /* count of these */
+ struct gb_spi_transfer transfers[]; /* count of these */
} __packed;
struct gb_spi_transfer_response {
@@ -1188,7 +1188,7 @@ struct gb_svc_pwrmon_rail_count_get_response {
struct gb_svc_pwrmon_rail_names_get_response {
__u8 status;
- __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+ __u8 name[][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
} __packed;
#define GB_SVC_PWRMON_TYPE_CURR 0x01
@@ -1281,7 +1281,7 @@ struct gb_svc_intf_oops_request {
struct gb_raw_send_request {
__le32 len;
- __u8 data[0];
+ __u8 data[];
} __packed;
@@ -1300,7 +1300,7 @@ struct gb_raw_send_request {
/* Represents data from AP -> Module */
struct gb_uart_send_data_request {
__le16 size;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* recv-data-request flags */
@@ -1313,7 +1313,7 @@ struct gb_uart_send_data_request {
struct gb_uart_recv_data_request {
__le16 size;
__u8 flags;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_uart_receive_credits_request {
@@ -1382,14 +1382,14 @@ struct gb_loopback_transfer_request {
__le32 len;
__le32 reserved0;
__le32 reserved1;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_loopback_transfer_response {
__le32 len;
__le32 reserved0;
__le32 reserved1;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* SDIO */
@@ -1530,13 +1530,13 @@ struct gb_sdio_transfer_request {
__le16 data_blocks;
__le16 data_blksz;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_sdio_transfer_response {
__le16 data_blocks;
__le16 data_blksz;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* event request: generated by module and is defined as unidirectional */
@@ -1572,7 +1572,7 @@ struct gb_camera_configure_streams_request {
__u8 flags;
#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01
__le16 padding;
- struct gb_camera_stream_config_request config[0];
+ struct gb_camera_stream_config_request config[];
} __packed;
/* Greybus Camera Configure Streams response payload */
@@ -1593,7 +1593,7 @@ struct gb_camera_configure_streams_response {
__u8 flags;
__u8 padding[2];
__le32 data_rate;
- struct gb_camera_stream_config_response config[0];
+ struct gb_camera_stream_config_response config[];
};
/* Greybus Camera Capture request payload - response has no payload */
@@ -1602,7 +1602,7 @@ struct gb_camera_capture_request {
__u8 streams;
__u8 padding;
__le16 num_frames;
- __u8 settings[0];
+ __u8 settings[];
} __packed;
/* Greybus Camera Flush response payload - request has no payload */
@@ -1616,7 +1616,7 @@ struct gb_camera_metadata_request {
__le16 frame_number;
__u8 stream;
__u8 padding;
- __u8 metadata[0];
+ __u8 metadata[];
} __packed;
/* Lights */
@@ -1993,7 +1993,7 @@ struct gb_audio_integer64 {
struct gb_audio_enumerated {
__le32 items;
__le16 names_length;
- __u8 names[0];
+ __u8 names[];
} __packed;
struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
@@ -2033,7 +2033,7 @@ struct gb_audio_widget {
__u8 type; /* GB_AUDIO_WIDGET_TYPE_* */
__u8 state; /* GB_AUDIO_WIDGET_STATE_* */
__u8 ncontrols;
- struct gb_audio_control ctl[0]; /* 'ncontrols' entries */
+ struct gb_audio_control ctl[]; /* 'ncontrols' entries */
} __packed;
struct gb_audio_route {
@@ -2059,7 +2059,7 @@ struct gb_audio_topology {
* struct gb_audio_widget widgets[num_widgets];
* struct gb_audio_route routes[num_routes];
*/
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_audio_get_topology_size_response {
@@ -2157,7 +2157,7 @@ struct gb_audio_streaming_event_request {
struct gb_audio_send_data_request {
__le64 timestamp;
- __u8 data[0];
+ __u8 data[];
} __packed;
@@ -2171,7 +2171,7 @@ struct gb_audio_send_data_request {
struct gb_log_send_log_request {
__le16 len;
- __u8 msg[0];
+ __u8 msg[];
} __packed;
#endif /* __GREYBUS_PROTOCOLS_H */
diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h
index d3faf0c1a569..718e2857054e 100644
--- a/include/linux/greybus/hd.h
+++ b/include/linux/greybus/hd.h
@@ -58,7 +58,7 @@ struct gb_host_device {
struct gb_svc *svc;
/* Private data for the host driver */
- unsigned long hd_priv[0] __aligned(sizeof(s64));
+ unsigned long hd_priv[] __aligned(sizeof(s64));
};
#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h
index 47b839af145d..3efe2133acfd 100644
--- a/include/linux/greybus/module.h
+++ b/include/linux/greybus/module.h
@@ -23,7 +23,7 @@ struct gb_module {
bool disconnected;
- struct gb_interface *interfaces[0];
+ struct gb_interface *interfaces[];
};
#define to_gb_module(d) container_of(d, struct gb_module, dev)
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index da0af631ded5..d57cab4d4c06 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -2,31 +2,29 @@
#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
+#include <linux/context_tracking_state.h>
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
+#include <linux/sched.h>
#include <linux/vtime.h>
#include <asm/hardirq.h>
-
extern void synchronize_irq(unsigned int irq);
extern bool synchronize_hardirq(unsigned int irq);
-#if defined(CONFIG_TINY_RCU)
-
-static inline void rcu_nmi_enter(void)
-{
-}
+#ifdef CONFIG_NO_HZ_FULL
+void __rcu_irq_enter_check_tick(void);
+#else
+static inline void __rcu_irq_enter_check_tick(void) { }
+#endif
-static inline void rcu_nmi_exit(void)
+static __always_inline void rcu_irq_enter_check_tick(void)
{
+ if (context_tracking_enabled())
+ __rcu_irq_enter_check_tick();
}
-#else
-extern void rcu_nmi_enter(void);
-extern void rcu_nmi_exit(void);
-#endif
-
/*
* It is safe to do non-atomic ops on ->hardirq_context,
* because NMI handlers may not preempt and the ops are
@@ -35,58 +33,111 @@ extern void rcu_nmi_exit(void);
*/
#define __irq_enter() \
do { \
- account_irq_enter_time(current); \
preempt_count_add(HARDIRQ_OFFSET); \
- trace_hardirq_enter(); \
+ lockdep_hardirq_enter(); \
+ account_hardirq_enter(current); \
+ } while (0)
+
+/*
+ * Like __irq_enter() without time accounting for fast
+ * interrupts, e.g. reschedule IPI where time accounting
+ * is more expensive than the actual interrupt.
+ */
+#define __irq_enter_raw() \
+ do { \
+ preempt_count_add(HARDIRQ_OFFSET); \
+ lockdep_hardirq_enter(); \
} while (0)
/*
* Enter irq context (on NO_HZ, update jiffies):
*/
-extern void irq_enter(void);
+void irq_enter(void);
+/*
+ * Like irq_enter(), but RCU is already watching.
+ */
+void irq_enter_rcu(void);
/*
* Exit irq context without processing softirqs:
*/
#define __irq_exit() \
do { \
- trace_hardirq_exit(); \
- account_irq_exit_time(current); \
+ account_hardirq_exit(current); \
+ lockdep_hardirq_exit(); \
+ preempt_count_sub(HARDIRQ_OFFSET); \
+ } while (0)
+
+/*
+ * Like __irq_exit() without time accounting
+ */
+#define __irq_exit_raw() \
+ do { \
+ lockdep_hardirq_exit(); \
preempt_count_sub(HARDIRQ_OFFSET); \
} while (0)
/*
* Exit irq context and process softirqs if needed:
*/
-extern void irq_exit(void);
+void irq_exit(void);
+
+/*
+ * Like irq_exit(), but return with RCU watching.
+ */
+void irq_exit_rcu(void);
#ifndef arch_nmi_enter
#define arch_nmi_enter() do { } while (0)
#define arch_nmi_exit() do { } while (0)
#endif
-#define nmi_enter() \
+/*
+ * NMI vs Tracing
+ * --------------
+ *
+ * We must not land in a tracer until (or after) we've changed preempt_count
+ * such that in_nmi() becomes true. To that effect all NMI C entry points must
+ * be marked 'notrace' and call nmi_enter() as soon as possible.
+ */
+
+/*
+ * nmi_enter() can nest up to 15 times; see NMI_BITS.
+ */
+#define __nmi_enter() \
do { \
- arch_nmi_enter(); \
- printk_nmi_enter(); \
lockdep_off(); \
+ arch_nmi_enter(); \
+ BUG_ON(in_nmi() == NMI_MASK); \
+ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
+ } while (0)
+
+#define nmi_enter() \
+ do { \
+ __nmi_enter(); \
+ lockdep_hardirq_enter(); \
+ ct_nmi_enter(); \
+ instrumentation_begin(); \
ftrace_nmi_enter(); \
- BUG_ON(in_nmi()); \
- preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
- rcu_nmi_enter(); \
- trace_hardirq_enter(); \
+ instrumentation_end(); \
} while (0)
-#define nmi_exit() \
+#define __nmi_exit() \
do { \
- trace_hardirq_exit(); \
- rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
- preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- ftrace_nmi_exit(); \
- lockdep_on(); \
- printk_nmi_exit(); \
+ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
arch_nmi_exit(); \
+ lockdep_on(); \
+ } while (0)
+
+#define nmi_exit() \
+ do { \
+ instrumentation_begin(); \
+ ftrace_nmi_exit(); \
+ instrumentation_end(); \
+ ct_nmi_exit(); \
+ lockdep_hardirq_exit(); \
+ __nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index ad6fa21d977b..38edaa08f862 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -62,10 +62,7 @@ static inline u32 __hash_32_generic(u32 val)
return val * GOLDEN_RATIO_32;
}
-#ifndef HAVE_ARCH_HASH_32
-#define hash_32 hash_32_generic
-#endif
-static inline u32 hash_32_generic(u32 val, unsigned int bits)
+static inline u32 hash_32(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 417d2c4bc60d..f6c666730b8c 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -145,7 +145,7 @@ static inline void hash_del_rcu(struct hlist_node *node)
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
- * @tmp: a &struct used for temporary storage
+ * @tmp: a &struct hlist_node used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
@@ -173,9 +173,9 @@ static inline void hash_del_rcu(struct hlist_node *node)
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
-#define hash_for_each_possible_rcu(name, obj, member, key) \
+#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
- member)
+ member, ## cond)
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
@@ -197,7 +197,7 @@ static inline void hash_del_rcu(struct hlist_node *node)
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
- * @tmp: a &struct used for temporary storage
+ * @tmp: a &struct hlist_node used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
diff --git a/include/linux/hashtable_api.h b/include/linux/hashtable_api.h
new file mode 100644
index 000000000000..c268ac2c5c0e
--- /dev/null
+++ b/include/linux/hashtable_api.h
@@ -0,0 +1 @@
+#include <linux/hashtable.h>
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index cacc4dd27794..630a388035f1 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -22,7 +22,7 @@ struct hdlc_proto {
void (*start)(struct net_device *dev); /* if open & DCD */
void (*stop)(struct net_device *dev); /* if open & !DCD */
void (*detach)(struct net_device *dev);
- int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
+ int (*ioctl)(struct net_device *dev, struct if_settings *ifs);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
int (*netif_rx)(struct sk_buff *skb);
netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
@@ -54,7 +54,7 @@ typedef struct hdlc_device {
/* Exported from hdlc module */
/* Called by hardware driver when a user requests HDLC service */
-int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs);
/* Must be used by hardware driver on module startup/exit */
#define register_hdlc_device(dev) register_netdev(dev)
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h
index d4d633a49d36..5d70c3f98f5b 100644
--- a/include/linux/hdlcdrv.h
+++ b/include/linux/hdlcdrv.h
@@ -79,7 +79,7 @@ struct hdlcdrv_ops {
*/
int (*open)(struct net_device *);
int (*close)(struct net_device *);
- int (*ioctl)(struct net_device *, struct ifreq *,
+ int (*ioctl)(struct net_device *, void __user *,
struct hdlcdrv_ioctl *, int);
};
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 9918a6c910c5..2f4dcc8d060e 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -57,6 +57,7 @@ enum hdmi_infoframe_type {
#define HDMI_SPD_INFOFRAME_SIZE 25
#define HDMI_AUDIO_INFOFRAME_SIZE 10
#define HDMI_DRM_INFOFRAME_SIZE 26
+#define HDMI_VENDOR_INFOFRAME_SIZE 4
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
@@ -155,7 +156,7 @@ enum hdmi_content_type {
};
enum hdmi_metadata_type {
- HDMI_STATIC_METADATA_TYPE1 = 1,
+ HDMI_STATIC_METADATA_TYPE1 = 0,
};
enum hdmi_eotf {
@@ -207,7 +208,7 @@ struct hdmi_drm_infoframe {
u16 max_fall;
};
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
@@ -219,6 +220,8 @@ ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer,
ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame,
void *buffer, size_t size);
int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame);
+int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame,
+ const void *buffer, size_t size);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -333,7 +336,12 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
-int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame);
+
+struct dp_sdp;
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 46bcef380446..c27329e2a5ad 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -150,7 +150,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
* @info: return information about attribute after parsing report
*
* Parses report and returns the attribute information such as report id,
-* field index, units and exponet etc.
+* field index, units and exponent etc.
*/
int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
u8 type,
@@ -167,7 +167,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
-* Returns data upto 32 bits.
+* Return: data up to 32 bits.
*/
enum sensor_hub_read_flags {
@@ -205,8 +205,9 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
* @buffer: buffer to copy output
*
* Used to get a field in feature report. For example this can get polling
-* interval, sensitivity, activate/deactivate state. On success it returns
-* number of bytes copied to buffer. On failure, it returns value < 0.
+* interval, sensitivity, activate/deactivate state.
+* Return: On success, it returns the number of bytes copied to buffer.
+* On failure, it returns value < 0.
*/
int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
u32 field_index, int buffer_size, void *buffer);
@@ -230,6 +231,7 @@ struct hid_sensor_common {
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
struct hid_sensor_hub_attribute_info sensitivity;
+ struct hid_sensor_hub_attribute_info sensitivity_rel;
struct hid_sensor_hub_attribute_info report_latency;
struct work_struct work;
};
@@ -247,11 +249,17 @@ static inline int hid_sensor_convert_exponent(int unit_expo)
int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
- struct hid_sensor_common *st);
+ struct hid_sensor_common *st,
+ const u32 *sensitivity_addresses,
+ u32 sensitivity_addresses_len);
int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
int val1, int val2);
+int hid_sensor_write_raw_hyst_rel_value(struct hid_sensor_common *st, int val1,
+ int val2);
int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
int *val1, int *val2);
+int hid_sensor_read_raw_hyst_rel_value(struct hid_sensor_common *st,
+ int *val1, int *val2);
int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int val1, int val2);
int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index d82a97e311d3..ac631159403a 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -128,6 +128,10 @@
#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15
/* Common selectors */
+#define HID_USAGE_SENSOR_PROP_DESC 0x200300
+#define HID_USAGE_SENSOR_PROP_FRIENDLY_NAME 0x200301
+#define HID_USAGE_SENSOR_PROP_SERIAL_NUM 0x200307
+#define HID_USAGE_SENSOR_PROP_MANUFACTURER 0x200305
#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310
@@ -145,6 +149,7 @@
/* Per data field properties */
#define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00
#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000
+#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_REL_PCT 0xE000
/* Power state enumerations */
#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850
@@ -158,4 +163,14 @@
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841
+/* Custom Sensor (2000e1) */
+#define HID_USAGE_SENSOR_HINGE 0x20020B
+#define HID_USAGE_SENSOR_DATA_FIELD_LOCATION 0x200400
+#define HID_USAGE_SENSOR_DATA_FIELE_TIME_SINCE_SYS_BOOT 0x20052B
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_USAGE 0x200541
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE 0x200543
+/* Custom Sensor data 28=>x>=0 */
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(x) \
+ (HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE + (x))
+
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 875f71132b14..8677ae38599e 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -102,6 +102,7 @@ struct hid_item {
#define HID_COLLECTION_PHYSICAL 0
#define HID_COLLECTION_APPLICATION 1
#define HID_COLLECTION_LOGICAL 2
+#define HID_COLLECTION_NAMED_ARRAY 4
/*
* HID report descriptor global item tags
@@ -153,6 +154,7 @@ struct hid_item {
#define HID_UP_CONSUMER 0x000c0000
#define HID_UP_DIGITIZER 0x000d0000
#define HID_UP_PID 0x000f0000
+#define HID_UP_BATTERY 0x00850000
#define HID_UP_HPVENDOR 0xff7f0000
#define HID_UP_HPVENDOR2 0xff010000
#define HID_UP_MSVENDOR 0xff000000
@@ -163,6 +165,7 @@ struct hid_item {
#define HID_UP_LNVENDOR 0xffa00000
#define HID_UP_SENSOR 0x00200000
#define HID_UP_ASUSVENDOR 0xff310000
+#define HID_UP_GOOGLEVENDOR 0xffd10000
#define HID_USAGE 0x0000ffff
@@ -238,6 +241,7 @@ struct hid_item {
#define HID_DG_TOUCH 0x000d0033
#define HID_DG_UNTOUCH 0x000d0034
#define HID_DG_TAP 0x000d0035
+#define HID_DG_TRANSDUCER_INDEX 0x000d0038
#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
#define HID_DG_BATTERYSTRENGTH 0x000d003b
@@ -250,6 +254,15 @@ struct hid_item {
#define HID_DG_BARRELSWITCH 0x000d0044
#define HID_DG_ERASER 0x000d0045
#define HID_DG_TABLETPICK 0x000d0046
+#define HID_DG_PEN_COLOR 0x000d005c
+#define HID_DG_PEN_LINE_WIDTH 0x000d005e
+#define HID_DG_PEN_LINE_STYLE 0x000d0070
+#define HID_DG_PEN_LINE_STYLE_INK 0x000d0072
+#define HID_DG_PEN_LINE_STYLE_PENCIL 0x000d0073
+#define HID_DG_PEN_LINE_STYLE_HIGHLIGHTER 0x000d0074
+#define HID_DG_PEN_LINE_STYLE_CHISEL_MARKER 0x000d0075
+#define HID_DG_PEN_LINE_STYLE_BRUSH 0x000d0076
+#define HID_DG_PEN_LINE_STYLE_NO_PREFERENCE 0x000d0077
#define HID_CP_CONSUMERCONTROL 0x000c0001
#define HID_CP_NUMERICKEYPAD 0x000c0002
@@ -261,6 +274,8 @@ struct hid_item {
#define HID_CP_SELECTION 0x000c0080
#define HID_CP_MEDIASELECTION 0x000c0087
#define HID_CP_SELECTDISC 0x000c00ba
+#define HID_CP_VOLUMEUP 0x000c00e9
+#define HID_CP_VOLUMEDOWN 0x000c00ea
#define HID_CP_PLAYBACKSPEED 0x000c00f1
#define HID_CP_PROXIMITY 0x000c0109
#define HID_CP_SPEAKERSYSTEM 0x000c0160
@@ -296,16 +311,9 @@ struct hid_item {
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
#define HID_DG_LATENCYMODE 0x000d0060
-#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
-/*
- * HID report types --- Ouch! HID spec says 1 2 3!
- */
+#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065
-#define HID_INPUT_REPORT 0
-#define HID_OUTPUT_REPORT 1
-#define HID_FEATURE_REPORT 2
-
-#define HID_REPORT_TYPES 3
+#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
/*
* HID connect requests
@@ -325,12 +333,12 @@ struct hid_item {
* HID device quirks.
*/
-/*
+/*
* Increase this if you need to configure more HID quirks at module load time
*/
#define MAX_USBHID_BOOT_QUIRKS 4
-#define HID_QUIRK_INVERT BIT(0)
+/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */
#define HID_QUIRK_NOTOUCH BIT(1)
#define HID_QUIRK_IGNORE BIT(2)
#define HID_QUIRK_NOGET BIT(3)
@@ -342,6 +350,8 @@ struct hid_item {
/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
#define HID_QUIRK_ALWAYS_POLL BIT(10)
#define HID_QUIRK_INPUT_PER_APP BIT(11)
+#define HID_QUIRK_X_INVERT BIT(12)
+#define HID_QUIRK_Y_INVERT BIT(13)
#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
@@ -371,6 +381,7 @@ struct hid_item {
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
#define HID_GROUP_STEAM 0x0103
#define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104
+#define HID_GROUP_VIVALDI 0x0105
/*
* HID protocol status
@@ -456,31 +467,50 @@ struct hid_field {
unsigned report_count; /* number of this field in the report */
unsigned report_type; /* (input,output,feature) */
__s32 *value; /* last known value(s) */
+ __s32 *new_value; /* newly read value(s) */
+ __s32 *usages_priorities; /* priority of each usage when reading the report
+ * bits 8-16 are reserved for hid-input usage
+ */
__s32 logical_minimum;
__s32 logical_maximum;
__s32 physical_minimum;
__s32 physical_maximum;
__s32 unit_exponent;
unsigned unit;
+ bool ignored; /* this field is ignored in this event */
struct hid_report *report; /* associated report */
unsigned index; /* index into report->field[] */
/* hidinput data */
struct hid_input *hidinput; /* associated input structure */
__u16 dpad; /* dpad input code */
+ unsigned int slot_idx; /* slot index in a report */
};
#define HID_MAX_FIELDS 256
+struct hid_field_entry {
+ struct list_head list;
+ struct hid_field *field;
+ unsigned int index;
+ __s32 priority;
+};
+
struct hid_report {
struct list_head list;
struct list_head hidinput_list;
+ struct list_head field_entry_list; /* ordered list of input fields */
unsigned int id; /* id of this report */
- unsigned int type; /* report type */
+ enum hid_report_type type; /* report type */
unsigned int application; /* application usage for this report */
struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
+ struct hid_field_entry *field_entries; /* allocated memory of input field_entry */
unsigned maxfield; /* maximum valid field index */
unsigned size; /* size of the report (bits) */
struct hid_device *device; /* associated device */
+
+ /* tool related state */
+ bool tool_active; /* whether the current tool is active */
+ unsigned int tool; /* BTN_TOOL_* */
};
#define HID_MAX_IDS 256
@@ -492,7 +522,7 @@ struct hid_report_enum {
};
#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
-#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
+#define HID_MAX_BUFFER_SIZE 16384 /* 16kb */
#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
#define HID_OUTPUT_FIFO_SIZE 64
@@ -583,6 +613,7 @@ struct hid_device { /* device report descriptor */
__s32 battery_report_id;
enum hid_battery_status battery_status;
bool battery_avoid_query;
+ ktime_t battery_ratelimit_time;
#endif
unsigned long status; /* see STAT flags above */
@@ -618,6 +649,8 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list;
spinlock_t debug_list_lock;
wait_queue_head_t debug_wait;
+
+ unsigned int id; /* system unique id */
};
#define to_hid_device(pdev) \
@@ -779,7 +812,7 @@ struct hid_driver {
container_of(pdrv, struct hid_driver, driver)
/**
- * hid_ll_driver - low level driver callbacks
+ * struct hid_ll_driver - low level driver callbacks
* @start: called on probe to start the device
* @stop: called on remove
* @open: called by input layer on open
@@ -792,6 +825,7 @@ struct hid_driver {
* @raw_request: send raw report request to device (e.g. feature report)
* @output_report: send output report to device
* @idle: send idle request to device
+ * @may_wakeup: return if device may act as a wakeup source during system-suspend
*/
struct hid_ll_driver {
int (*start)(struct hid_device *hdev);
@@ -816,6 +850,7 @@ struct hid_ll_driver {
int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
+ bool (*may_wakeup)(struct hid_device *hdev);
};
extern struct hid_ll_driver i2c_hid_ll_driver;
@@ -829,6 +864,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
return hdev->ll_driver == driver;
}
+static inline bool hid_is_usb(struct hid_device *hdev)
+{
+ return hid_is_using_ll_driver(hdev, &usb_hid_driver);
+}
+
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
@@ -877,21 +917,21 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
-int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
-int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+int __hid_request(struct hid_device *hid, struct hid_report *rep, enum hid_class_request reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int application);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int field_index,
unsigned int report_counts);
@@ -912,10 +952,20 @@ s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
+#ifdef CONFIG_PM
+int hid_driver_suspend(struct hid_device *hdev, pm_message_t state);
+int hid_driver_reset_resume(struct hid_device *hdev);
+int hid_driver_resume(struct hid_device *hdev);
+#else
+static inline int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) { return 0; }
+static inline int hid_driver_reset_resume(struct hid_device *hdev) { return 0; }
+static inline int hid_driver_resume(struct hid_device *hdev) { return 0; }
+#endif
+
/**
* hid_device_io_start - enable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* This should only be called during probe or remove and only be
* called by the thread calling probe or remove. It will allow
@@ -933,7 +983,7 @@ static inline void hid_device_io_start(struct hid_device *hid) {
/**
* hid_device_io_stop - disable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* Should only be called after hid_device_io_start. It will prevent
* incoming packets from going to the driver for the duration of
@@ -959,39 +1009,65 @@ static inline void hid_device_io_stop(struct hid_device *hid) {
* @max: maximal valid usage->code to consider later (out parameter)
* @type: input event type (EV_KEY, EV_REL, ...)
* @c: code which corresponds to this usage and type
+ *
+ * The value pointed to by @bit will be set to NULL if either @type is
+ * an unhandled event type, or if @c is out of range for @type. This
+ * can be used as an error condition.
*/
static inline void hid_map_usage(struct hid_input *hidinput,
struct hid_usage *usage, unsigned long **bit, int *max,
- __u8 type, __u16 c)
+ __u8 type, unsigned int c)
{
struct input_dev *input = hidinput->input;
-
- usage->type = type;
- usage->code = c;
+ unsigned long *bmap = NULL;
+ unsigned int limit = 0;
switch (type) {
case EV_ABS:
- *bit = input->absbit;
- *max = ABS_MAX;
+ bmap = input->absbit;
+ limit = ABS_MAX;
break;
case EV_REL:
- *bit = input->relbit;
- *max = REL_MAX;
+ bmap = input->relbit;
+ limit = REL_MAX;
break;
case EV_KEY:
- *bit = input->keybit;
- *max = KEY_MAX;
+ bmap = input->keybit;
+ limit = KEY_MAX;
break;
case EV_LED:
- *bit = input->ledbit;
- *max = LED_MAX;
+ bmap = input->ledbit;
+ limit = LED_MAX;
break;
+ case EV_MSC:
+ bmap = input->mscbit;
+ limit = MSC_MAX;
+ break;
+ }
+
+ if (unlikely(c > limit || !bmap)) {
+ pr_warn_ratelimited("%s: Invalid code %d type %d\n",
+ input->name, c, type);
+ *bit = NULL;
+ return;
}
+
+ usage->type = type;
+ usage->code = c;
+ *max = limit;
+ *bit = bmap;
}
/**
* hid_map_usage_clear - map usage input bits and clear the input bit
*
+ * @hidinput: hidinput which we are interested in
+ * @usage: usage to fill in
+ * @bit: pointer to input->{}bit (out parameter)
+ * @max: maximal valid usage->code to consider later (out parameter)
+ * @type: input event type (EV_KEY, EV_REL, ...)
+ * @c: code which corresponds to this usage and type
+ *
* The same as hid_map_usage, except the @c bit is also cleared in supported
* bits (@bit).
*/
@@ -1000,7 +1076,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput,
__u8 type, __u16 c)
{
hid_map_usage(hidinput, usage, bit, max, type, c);
- clear_bit(c, *bit);
+ if (*bit)
+ clear_bit(usage->code, *bit);
}
/**
@@ -1022,6 +1099,13 @@ int __must_check hid_hw_start(struct hid_device *hdev,
void hid_hw_stop(struct hid_device *hdev);
int __must_check hid_hw_open(struct hid_device *hdev);
void hid_hw_close(struct hid_device *hdev);
+void hid_hw_request(struct hid_device *hdev,
+ struct hid_report *report, enum hid_class_request reqtype);
+int hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype);
+int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len);
/**
* hid_hw_power - requests underlying HW to go into given power mode
@@ -1040,82 +1124,36 @@ static inline int hid_hw_power(struct hid_device *hdev, int level)
/**
- * hid_hw_request - send report request to device
+ * hid_hw_idle - send idle request to device
*
* @hdev: hid device
- * @report: report to send
+ * @report: report to control
+ * @idle: idle state
* @reqtype: hid request type
*/
-static inline void hid_hw_request(struct hid_device *hdev,
- struct hid_report *report, int reqtype)
+static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
+ enum hid_class_request reqtype)
{
- if (hdev->ll_driver->request)
- return hdev->ll_driver->request(hdev, report, reqtype);
+ if (hdev->ll_driver->idle)
+ return hdev->ll_driver->idle(hdev, report, idle, reqtype);
- __hid_request(hdev, report, reqtype);
+ return 0;
}
/**
- * hid_hw_raw_request - send report request to device
+ * hid_hw_may_wakeup - return if the hid device may act as a wakeup source during system-suspend
*
* @hdev: hid device
- * @reportnum: report ID
- * @buf: in/out data to transfer
- * @len: length of buf
- * @rtype: HID report type
- * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
- *
- * @return: count of data transfered, negative if error
- *
- * Same behavior as hid_hw_request, but with raw buffers instead.
*/
-static inline int hid_hw_raw_request(struct hid_device *hdev,
- unsigned char reportnum, __u8 *buf,
- size_t len, unsigned char rtype, int reqtype)
+static inline bool hid_hw_may_wakeup(struct hid_device *hdev)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
-
- return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
- rtype, reqtype);
-}
+ if (hdev->ll_driver->may_wakeup)
+ return hdev->ll_driver->may_wakeup(hdev);
-/**
- * hid_hw_output_report - send output report to device
- *
- * @hdev: hid device
- * @buf: raw data to transfer
- * @len: length of buf
- *
- * @return: count of data transfered, negative if error
- */
-static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf,
- size_t len)
-{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
+ if (hdev->dev.parent)
+ return device_may_wakeup(hdev->dev.parent);
- if (hdev->ll_driver->output_report)
- return hdev->ll_driver->output_report(hdev, buf, len);
-
- return -ENOSYS;
-}
-
-/**
- * hid_hw_idle - send idle request to device
- *
- * @hdev: hid device
- * @report: report to control
- * @idle: idle state
- * @reqtype: hid request type
- */
-static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
- int reqtype)
-{
- if (hdev->ll_driver->idle)
- return hdev->ll_driver->idle(hdev, report, idle, reqtype);
-
- return 0;
+ return false;
}
/**
@@ -1136,12 +1174,11 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*/
static inline u32 hid_report_len(struct hid_report *report)
{
- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
- int interrupt);
+int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
diff --git a/include/linux/hidden.h b/include/linux/hidden.h
new file mode 100644
index 000000000000..49a17b6b5962
--- /dev/null
+++ b/include/linux/hidden.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * When building position independent code with GCC using the -fPIC option,
+ * (or even the -fPIE one on older versions), it will assume that we are
+ * building a dynamic object (either a shared library or an executable) that
+ * may have symbol references that can only be resolved at load time. For a
+ * variety of reasons (ELF symbol preemption, the CoW footprint of the section
+ * that is modified by the loader), this results in all references to symbols
+ * with external linkage to go via entries in the Global Offset Table (GOT),
+ * which carries absolute addresses which need to be fixed up when the
+ * executable image is loaded at an offset which is different from its link
+ * time offset.
+ *
+ * Fortunately, there is a way to inform the compiler that such symbol
+ * references will be satisfied at link time rather than at load time, by
+ * giving them 'hidden' visibility.
+ */
+
+#pragma GCC visibility push(hidden)
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
new file mode 100644
index 000000000000..034b1106d022
--- /dev/null
+++ b/include/linux/highmem-internal.h
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HIGHMEM_INTERNAL_H
+#define _LINUX_HIGHMEM_INTERNAL_H
+
+/*
+ * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
+ */
+#ifdef CONFIG_KMAP_LOCAL
+void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+void kunmap_local_indexed(const void *vaddr);
+void kmap_local_fork(struct task_struct *tsk);
+void __kmap_local_sched_out(void);
+void __kmap_local_sched_in(void);
+static inline void kmap_assert_nomap(void)
+{
+ DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
+}
+#else
+static inline void kmap_local_fork(struct task_struct *tsk) { }
+static inline void kmap_assert_nomap(void) { }
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+static inline void kmap_flush_tlb(unsigned long addr) { }
+#endif
+
+#ifndef kmap_prot
+#define kmap_prot PAGE_KERNEL
+#endif
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+void __kmap_flush_unused(void);
+struct page *__kmap_to_page(void *addr);
+
+static inline void *kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ addr = page_address(page);
+ else
+ addr = kmap_high(page);
+ kmap_flush_tlb((unsigned long)addr);
+ return addr;
+}
+
+static inline void kunmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return __kmap_to_page(addr);
+}
+
+static inline void kmap_flush_unused(void)
+{
+ __kmap_flush_unused();
+}
+
+static inline void *kmap_local_page(struct page *page)
+{
+ return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+ struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(const void *vaddr)
+{
+ kunmap_local_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
+ pagefault_disable();
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ return kmap_atomic_prot(page, kmap_prot);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
+ pagefault_disable();
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_atomic(const void *addr)
+{
+ kunmap_local_indexed(addr);
+ pagefault_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
+}
+
+unsigned int __nr_free_highpages(void);
+extern atomic_long_t _totalhigh_pages;
+
+static inline unsigned int nr_free_highpages(void)
+{
+ return __nr_free_highpages();
+}
+
+static inline unsigned long totalhigh_pages(void)
+{
+ return (unsigned long)atomic_long_read(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_add(long count)
+{
+ atomic_long_add(count, &_totalhigh_pages);
+}
+
+static inline bool is_kmap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+ return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP);
+}
+#else /* CONFIG_HIGHMEM */
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return virt_to_page(addr);
+}
+
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ return page_address(page);
+}
+
+static inline void kunmap_high(struct page *page) { }
+static inline void kmap_flush_unused(void) { }
+
+static inline void kunmap(struct page *page)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(page_address(page));
+#endif
+}
+
+static inline void *kmap_local_page(struct page *page)
+{
+ return page_address(page);
+}
+
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+ return page_address(&folio->page) + offset;
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(const void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+#endif
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ return kmap_atomic(page);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ return kmap_atomic(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_atomic(const void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+#endif
+ pagefault_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
+}
+
+static inline unsigned int nr_free_highpages(void) { return 0; }
+static inline unsigned long totalhigh_pages(void) { return 0UL; }
+
+static inline bool is_kmap_addr(const void *x)
+{
+ return false;
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+/**
+ * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
+ * @__addr: Virtual address to be unmapped
+ *
+ * Unmaps an address previously mapped by kmap_atomic() and re-enables
+ * pagefaults. Depending on PREEMP_RT configuration, re-enables also
+ * migration and preemption. Users should not count on these side effects.
+ *
+ * Mappings should be unmapped in the reverse order that they were mapped.
+ * See kmap_local_page() for details on nesting.
+ *
+ * @__addr can be any address within the mapped page, so there is no need
+ * to subtract any offset that has been added. In contrast to kunmap(),
+ * this function takes the address returned from kmap_atomic(), not the
+ * page passed to it. The compiler will warn you if you pass the page.
+ */
+#define kunmap_atomic(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_atomic(__addr); \
+} while (0)
+
+/**
+ * kunmap_local - Unmap a page mapped via kmap_local_page().
+ * @__addr: An address within the page mapped
+ *
+ * @__addr can be any address within the mapped page. Commonly it is the
+ * address return from kmap_local_page(), but it can also include offsets.
+ *
+ * Unmapping should be done in the reverse order of the mapping. See
+ * kmap_local_page() for details.
+ */
+#define kunmap_local(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_local(__addr); \
+} while (0)
+
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index ea5cdbd8c2c3..e9912da5441b 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,186 +5,229 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <asm/cacheflush.h>
+#include "highmem-internal.h"
-#ifndef ARCH_HAS_FLUSH_ANON_PAGE
-static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
-{
-}
-#endif
-
-#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
-}
-#endif
-
-#include <asm/kmap_types.h>
-
-#ifdef CONFIG_HIGHMEM
-#include <asm/highmem.h>
-
-/* declarations for linux/mm/highmem.c */
-unsigned int nr_free_highpages(void);
-extern atomic_long_t _totalhigh_pages;
-static inline unsigned long totalhigh_pages(void)
-{
- return (unsigned long)atomic_long_read(&_totalhigh_pages);
-}
-
-static inline void totalhigh_pages_inc(void)
-{
- atomic_long_inc(&_totalhigh_pages);
-}
-
-static inline void totalhigh_pages_dec(void)
-{
- atomic_long_dec(&_totalhigh_pages);
-}
-
-static inline void totalhigh_pages_add(long count)
-{
- atomic_long_add(count, &_totalhigh_pages);
-}
-
-static inline void totalhigh_pages_set(long val)
-{
- atomic_long_set(&_totalhigh_pages, val);
-}
-
-void kmap_flush_unused(void);
+/**
+ * kmap - Map a page for long term usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can only be invoked from preemptible task context because on 32bit
+ * systems with CONFIG_HIGHMEM enabled this function might sleep.
+ *
+ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
+ * this returns the virtual address of the direct kernel mapping.
+ *
+ * The returned virtual address is globally visible and valid up to the
+ * point where it is unmapped via kunmap(). The pointer can be handed to
+ * other contexts.
+ *
+ * For highmem pages on 32bit systems this can be slow as the mapping space
+ * is limited and protected by a global lock. In case that there is no
+ * mapping slot available the function blocks until a slot is released via
+ * kunmap().
+ */
+static inline void *kmap(struct page *page);
-struct page *kmap_to_page(void *addr);
+/**
+ * kunmap - Unmap the virtual address mapped by kmap()
+ * @page: Pointer to the page which was mapped by kmap()
+ *
+ * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
+ * pages in the low memory area.
+ */
+static inline void kunmap(struct page *page);
-#else /* CONFIG_HIGHMEM */
+/**
+ * kmap_to_page - Get the page for a kmap'ed address
+ * @addr: The address to look up
+ *
+ * Returns: The page which is mapped to @addr.
+ */
+static inline struct page *kmap_to_page(void *addr);
-static inline unsigned int nr_free_highpages(void) { return 0; }
+/**
+ * kmap_flush_unused - Flush all unused kmap mappings in order to
+ * remove stray mappings
+ */
+static inline void kmap_flush_unused(void);
-static inline struct page *kmap_to_page(void *addr)
-{
- return virt_to_page(addr);
-}
+/**
+ * kmap_local_page - Map a page for temporary usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can be invoked from any context, including interrupts.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation:
+ *
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
-static inline unsigned long totalhigh_pages(void) { return 0UL; }
+/**
+ * kmap_local_folio - Map a page in this folio for temporary usage
+ * @folio: The folio containing the page.
+ * @offset: The byte offset within the folio which identifies the page.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation::
+ *
+ * addr1 = kmap_local_folio(folio1, offset1);
+ * addr2 = kmap_local_folio(folio2, offset2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_folio() can rely on this side effect.
+ *
+ * Context: Can be invoked from any context.
+ * Return: The virtual address of @offset.
+ */
+static inline void *kmap_local_folio(struct folio *folio, size_t offset);
-#ifndef ARCH_HAS_KMAP
-static inline void *kmap(struct page *page)
-{
- might_sleep();
- return page_address(page);
-}
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * In fact a wrapper around kmap_local_page() which also disables pagefaults
+ * and, depending on PREEMPT_RT configuration, also CPU migration and
+ * preemption. Therefore users should not count on the latter two side effects.
+ *
+ * Mappings should always be released by kunmap_atomic().
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
+ *
+ * It is used in atomic context when code wants to access the contents of a
+ * page that might be allocated from high memory (see __GFP_HIGHMEM), for
+ * example a page in the pagecache. The API has two functions, and they
+ * can be used in a manner similar to the following::
+ *
+ * // Find the page of interest.
+ * struct page *page = find_get_page(mapping, offset);
+ *
+ * // Gain access to the contents of that page.
+ * void *vaddr = kmap_atomic(page);
+ *
+ * // Do something to the contents of that page.
+ * memset(vaddr, 0, PAGE_SIZE);
+ *
+ * // Unmap that page.
+ * kunmap_atomic(vaddr);
+ *
+ * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
+ * call, not the argument.
+ *
+ * If you need to map two pages because you want to copy from one page to
+ * another you need to keep the kmap_atomic calls strictly nested, like:
+ *
+ * vaddr1 = kmap_atomic(page1);
+ * vaddr2 = kmap_atomic(page2);
+ *
+ * memcpy(vaddr1, vaddr2, PAGE_SIZE);
+ *
+ * kunmap_atomic(vaddr2);
+ * kunmap_atomic(vaddr1);
+ */
+static inline void *kmap_atomic(struct page *page);
-static inline void kunmap(struct page *page)
-{
-}
+/* Highmem related interfaces for management code */
+static inline unsigned int nr_free_highpages(void);
+static inline unsigned long totalhigh_pages(void);
-static inline void *kmap_atomic(struct page *page)
-{
- preempt_disable();
- pagefault_disable();
- return page_address(page);
-}
-#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-
-static inline void __kunmap_atomic(void *addr)
+#ifndef ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
- pagefault_enable();
- preempt_enable();
}
-
-#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-
-#define kmap_flush_unused() do {} while(0)
-#endif
-
-#endif /* CONFIG_HIGHMEM */
-
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
-
-DECLARE_PER_CPU(int, __kmap_atomic_idx);
-
-static inline int kmap_atomic_idx_push(void)
-{
- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
- BUG_ON(idx >= KM_TYPE_NR);
#endif
- return idx;
-}
-static inline int kmap_atomic_idx(void)
+#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
- return __this_cpu_read(__kmap_atomic_idx) - 1;
}
-
-static inline void kmap_atomic_idx_pop(void)
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
-
- BUG_ON(idx < 0);
-#else
- __this_cpu_dec(__kmap_atomic_idx);
-#endif
}
-
#endif
-/*
- * Prevent people trying to call kunmap_atomic() as if it were kunmap()
- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
- */
-#define kunmap_atomic(addr) \
-do { \
- BUILD_BUG_ON(__same_type((addr), struct page *)); \
- __kunmap_atomic(addr); \
-} while (0)
-
-
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
clear_user_page(addr, vaddr, page);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
#endif
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
/**
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
* @vma: The VMA the page is to be allocated for
* @vaddr: The virtual address the page will be inserted into
*
- * This function will allocate a page for a VMA but the caller is expected
- * to specify via movableflags whether the page will be movable in the
- * future or not
+ * Returns: The allocated and zeroed HIGHMEM page
+ *
+ * This function will allocate a page for a VMA that the caller knows will
+ * be able to migrate in the future using move_pages() or reclaimed
*
* An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
* implementation.
*/
static inline struct page *
-__alloc_zeroed_user_highpage(gfp_t movableflags,
- struct vm_area_struct *vma,
- unsigned long vaddr)
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
{
- struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
- vma, vaddr);
+ struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
if (page)
clear_user_highpage(page, vaddr);
@@ -193,35 +236,47 @@ __alloc_zeroed_user_highpage(gfp_t movableflags,
}
#endif
-/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- */
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
- unsigned long vaddr)
+static inline void clear_highpage(struct page *page)
{
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+ void *kaddr = kmap_local_page(page);
+ clear_page(kaddr);
+ kunmap_local(kaddr);
}
-static inline void clear_highpage(struct page *page)
+static inline void clear_highpage_kasan_tagged(struct page *page)
{
- void *kaddr = kmap_atomic(page);
- clear_page(kaddr);
- kunmap_atomic(kaddr);
+ u8 tag;
+
+ tag = page_kasan_tag(page);
+ page_kasan_tag_reset(page);
+ clear_highpage(page);
+ page_kasan_tag_set(page, tag);
}
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
+
+static inline void tag_clear_highpage(struct page *page)
+{
+}
+
+#endif
+
+/*
+ * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
+ * If we pass in a head page, we can zero up to the size of the compound page.
+ */
+#ifdef CONFIG_HIGHMEM
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2);
+#else
static inline void zero_user_segments(struct page *page,
- unsigned start1, unsigned end1,
- unsigned start2, unsigned end2)
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
{
- void *kaddr = kmap_atomic(page);
+ void *kaddr = kmap_local_page(page);
+ unsigned int i;
- BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+ BUG_ON(end1 > page_size(page) || end2 > page_size(page));
if (end1 > start1)
memset(kaddr + start1, 0, end1 - start1);
@@ -229,9 +284,11 @@ static inline void zero_user_segments(struct page *page,
if (end2 > start2)
memset(kaddr + start2, 0, end2 - start2);
- kunmap_atomic(kaddr);
- flush_dcache_page(page);
+ kunmap_local(kaddr);
+ for (i = 0; i < compound_nr(page); i++)
+ flush_dcache_page(page + i);
}
+#endif
static inline void zero_user_segment(struct page *page,
unsigned start, unsigned end)
@@ -252,11 +309,12 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
@@ -267,13 +325,106 @@ static inline void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_page(vto, vfrom);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
+static inline void memcpy_page(struct page *dst_page, size_t dst_off,
+ struct page *src_page, size_t src_off,
+ size_t len)
+{
+ char *dst = kmap_local_page(dst_page);
+ char *src = kmap_local_page(src_page);
+
+ VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
+ memcpy(dst + dst_off, src + src_off, len);
+ kunmap_local(src);
+ kunmap_local(dst);
+}
+
+static inline void memset_page(struct page *page, size_t offset, int val,
+ size_t len)
+{
+ char *addr = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memset(addr + offset, val, len);
+ kunmap_local(addr);
+}
+
+static inline void memcpy_from_page(char *to, struct page *page,
+ size_t offset, size_t len)
+{
+ char *from = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to, from + offset, len);
+ kunmap_local(from);
+}
+
+static inline void memcpy_to_page(struct page *page, size_t offset,
+ const char *from, size_t len)
+{
+ char *to = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to + offset, from, len);
+ flush_dcache_page(page);
+ kunmap_local(to);
+}
+
+static inline void memzero_page(struct page *page, size_t offset, size_t len)
+{
+ char *addr = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memset(addr + offset, 0, len);
+ flush_dcache_page(page);
+ kunmap_local(addr);
+}
+
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @xend1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @xend2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+ size_t start1, size_t xend1, size_t start2, size_t xend2)
+{
+ zero_user_segments(&folio->page, start1, xend1, start2, xend2);
+}
+
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @xend: One more than the last byte to zero.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+ size_t start, size_t xend)
+{
+ zero_user_segments(&folio->page, start, xend, 0, 0);
+}
+
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+ size_t start, size_t length)
+{
+ zero_user_segments(&folio->page, start, start + length, 0, 0);
+}
+
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h
index 774f7d3b8f6a..369221fd5518 100644
--- a/include/linux/hil_mlc.h
+++ b/include/linux/hil_mlc.h
@@ -103,7 +103,7 @@ struct hilse_node {
/* Methods for back-end drivers, e.g. hp_sdc_mlc */
typedef int (hil_mlc_cts) (hil_mlc *mlc);
-typedef void (hil_mlc_out) (hil_mlc *mlc);
+typedef int (hil_mlc_out) (hil_mlc *mlc);
typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout);
struct hil_mlc_devinfo {
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index 9dc01f7ab5b4..07414c241e65 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -23,6 +23,10 @@
#ifdef __KERNEL__
+struct neigh_parms;
+struct net_device;
+struct sk_buff;
+
struct hippi_cb {
__u32 ifield;
};
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
new file mode 100644
index 000000000000..e230c7c46110
--- /dev/null
+++ b/include/linux/hisi_acc_qm.h
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+#ifndef HISI_ACC_QM_H
+#define HISI_ACC_QM_H
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define QM_QNUM_V1 4096
+#define QM_QNUM_V2 1024
+#define QM_MAX_VFS_NUM_V2 63
+
+/* qm user domain */
+#define QM_ARUSER_M_CFG_1 0x100088
+#define AXUSER_SNOOP_ENABLE BIT(30)
+#define AXUSER_CMD_TYPE GENMASK(14, 12)
+#define AXUSER_CMD_SMMU_NORMAL 1
+#define AXUSER_NS BIT(6)
+#define AXUSER_NO BIT(5)
+#define AXUSER_FP BIT(4)
+#define AXUSER_SSV BIT(0)
+#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \
+ FIELD_PREP(AXUSER_CMD_TYPE, \
+ AXUSER_CMD_SMMU_NORMAL) | \
+ AXUSER_NS | AXUSER_NO | AXUSER_FP)
+#define QM_ARUSER_M_CFG_ENABLE 0x100090
+#define ARUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_AWUSER_M_CFG_1 0x100098
+#define QM_AWUSER_M_CFG_ENABLE 0x1000a0
+#define AWUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_WUSER_M_CFG_ENABLE 0x1000a8
+#define WUSER_M_CFG_ENABLE 0xffffffff
+
+/* mailbox */
+#define QM_MB_CMD_SQC 0x0
+#define QM_MB_CMD_CQC 0x1
+#define QM_MB_CMD_EQC 0x2
+#define QM_MB_CMD_AEQC 0x3
+#define QM_MB_CMD_SQC_BT 0x4
+#define QM_MB_CMD_CQC_BT 0x5
+#define QM_MB_CMD_SQC_VFT_V2 0x6
+#define QM_MB_CMD_STOP_QP 0x8
+#define QM_MB_CMD_SRC 0xc
+#define QM_MB_CMD_DST 0xd
+
+#define QM_MB_CMD_SEND_BASE 0x300
+#define QM_MB_EVENT_SHIFT 8
+#define QM_MB_BUSY_SHIFT 13
+#define QM_MB_OP_SHIFT 14
+#define QM_MB_CMD_DATA_ADDR_L 0x304
+#define QM_MB_CMD_DATA_ADDR_H 0x308
+#define QM_MB_MAX_WAIT_CNT 6000
+
+/* doorbell */
+#define QM_DOORBELL_CMD_SQ 0
+#define QM_DOORBELL_CMD_CQ 1
+#define QM_DOORBELL_CMD_EQ 2
+#define QM_DOORBELL_CMD_AEQ 3
+
+#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
+#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
+#define QM_QP_MAX_NUM_SHIFT 11
+#define QM_DB_CMD_SHIFT_V2 12
+#define QM_DB_RAND_SHIFT_V2 16
+#define QM_DB_INDEX_SHIFT_V2 32
+#define QM_DB_PRIORITY_SHIFT_V2 48
+#define QM_VF_STATE 0x60
+
+/* qm cache */
+#define QM_CACHE_CTL 0x100050
+#define SQC_CACHE_ENABLE BIT(0)
+#define CQC_CACHE_ENABLE BIT(1)
+#define SQC_CACHE_WB_ENABLE BIT(4)
+#define SQC_CACHE_WB_THRD GENMASK(10, 5)
+#define CQC_CACHE_WB_ENABLE BIT(11)
+#define CQC_CACHE_WB_THRD GENMASK(17, 12)
+#define QM_AXI_M_CFG 0x1000ac
+#define AXI_M_CFG 0xffff
+#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
+#define AXI_M_CFG_ENABLE 0xffffffff
+#define QM_PEH_AXUSER_CFG 0x1000cc
+#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
+#define PEH_AXUSER_CFG 0x401001
+#define PEH_AXUSER_CFG_ENABLE 0xffffffff
+
+#define QM_MIN_QNUM 2
+#define HISI_ACC_SGL_SGE_NR_MAX 255
+#define QM_SHAPER_CFG 0x100164
+#define QM_SHAPER_ENABLE BIT(30)
+#define QM_SHAPER_TYPE1_OFFSET 10
+
+/* page number for queue file region */
+#define QM_DOORBELL_PAGE_NR 1
+
+/* uacce mode of the driver */
+#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
+#define UACCE_MODE_SVA 1 /* use uacce sva mode */
+#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
+
+enum qm_stop_reason {
+ QM_NORMAL,
+ QM_SOFT_RESET,
+ QM_FLR,
+};
+
+enum qm_state {
+ QM_INIT = 0,
+ QM_START,
+ QM_CLOSE,
+ QM_STOP,
+};
+
+enum qp_state {
+ QP_INIT = 1,
+ QP_START,
+ QP_STOP,
+ QP_CLOSE,
+};
+
+enum qm_hw_ver {
+ QM_HW_UNKNOWN = -1,
+ QM_HW_V1 = 0x20,
+ QM_HW_V2 = 0x21,
+ QM_HW_V3 = 0x30,
+};
+
+enum qm_fun_type {
+ QM_HW_PF,
+ QM_HW_VF,
+};
+
+enum qm_debug_file {
+ CURRENT_QM,
+ CURRENT_Q,
+ CLEAR_ENABLE,
+ DEBUG_FILE_NUM,
+};
+
+enum qm_vf_state {
+ QM_READY = 0,
+ QM_NOT_READY,
+};
+
+enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+ QM_SUPPORT_STOP_QP,
+ QM_SUPPORT_MB_COMMAND,
+ QM_SUPPORT_SVA_PREFETCH,
+ QM_SUPPORT_RPM,
+};
+
+struct dfx_diff_registers {
+ u32 *regs;
+ u32 reg_offset;
+ u32 reg_len;
+};
+
+struct qm_dfx {
+ atomic64_t err_irq_cnt;
+ atomic64_t aeq_irq_cnt;
+ atomic64_t abnormal_irq_cnt;
+ atomic64_t create_qp_err_cnt;
+ atomic64_t mb_err_cnt;
+};
+
+struct debugfs_file {
+ enum qm_debug_file index;
+ struct mutex lock;
+ struct qm_debug *debug;
+};
+
+struct qm_debug {
+ u32 curr_qm_qp_num;
+ u32 sqe_mask_offset;
+ u32 sqe_mask_len;
+ struct qm_dfx dfx;
+ struct dentry *debug_root;
+ struct dentry *qm_d;
+ struct debugfs_file files[DEBUG_FILE_NUM];
+ unsigned int *qm_last_words;
+ /* ACC engines recoreding last regs */
+ unsigned int *last_words;
+ struct dfx_diff_registers *qm_diff_regs;
+ struct dfx_diff_registers *acc_diff_regs;
+};
+
+struct qm_shaper_factor {
+ u32 func_qos;
+ u64 cir_b;
+ u64 cir_u;
+ u64 cir_s;
+ u64 cbs_s;
+};
+
+struct qm_dma {
+ void *va;
+ dma_addr_t dma;
+ size_t size;
+};
+
+struct hisi_qm_status {
+ u32 eq_head;
+ bool eqc_phase;
+ u32 aeq_head;
+ bool aeqc_phase;
+ atomic_t flags;
+ int stop_reason;
+};
+
+struct hisi_qm;
+
+struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ u32 ecc_2bits_mask;
+ u32 qm_shutdown_mask;
+ u32 dev_shutdown_mask;
+ u32 qm_reset_mask;
+ u32 dev_reset_mask;
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+};
+
+struct hisi_qm_err_status {
+ u32 is_qm_ecc_mbit;
+ u32 is_dev_ecc_mbit;
+};
+
+struct hisi_qm_err_ini {
+ int (*hw_init)(struct hisi_qm *qm);
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ void (*open_sva_prefetch)(struct hisi_qm *qm);
+ void (*close_sva_prefetch)(struct hisi_qm *qm);
+ void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
+ void (*show_last_dfx_regs)(struct hisi_qm *qm);
+ void (*err_info_init)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_cap_info {
+ u32 type;
+ /* Register offset */
+ u32 offset;
+ /* Bit offset in register */
+ u32 shift;
+ u32 mask;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+ int (*register_to_crypto)(struct hisi_qm *qm);
+ void (*unregister_from_crypto)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_poll_data {
+ struct hisi_qm *qm;
+ struct work_struct work;
+ u16 *qp_finish_id;
+};
+
+struct hisi_qm {
+ enum qm_hw_ver ver;
+ enum qm_fun_type fun_type;
+ const char *dev_name;
+ struct pci_dev *pdev;
+ void __iomem *io_base;
+ void __iomem *db_io_base;
+
+ /* Capbility version, 0: not supports */
+ u32 cap_ver;
+ u32 sqe_size;
+ u32 qp_base;
+ u32 qp_num;
+ u32 qp_in_used;
+ u32 ctrl_qp_num;
+ u32 max_qp_num;
+ u32 vfs_num;
+ u32 db_interval;
+ u16 eq_depth;
+ u16 aeq_depth;
+ struct list_head list;
+ struct hisi_qm_list *qm_list;
+
+ struct qm_dma qdma;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqe *eqe;
+ struct qm_aeqe *aeqe;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqe_dma;
+ dma_addr_t aeqe_dma;
+
+ struct hisi_qm_status status;
+ const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_info err_info;
+ struct hisi_qm_err_status err_status;
+ unsigned long misc_ctl; /* driver removing and reset sched */
+ /* Device capability bit */
+ unsigned long caps;
+
+ struct rw_semaphore qps_lock;
+ struct idr qp_idr;
+ struct hisi_qp *qp_array;
+ struct hisi_qm_poll_data *poll_data;
+
+ struct mutex mailbox_lock;
+
+ const struct hisi_qm_hw_ops *ops;
+
+ struct qm_debug debug;
+
+ u32 error_mask;
+
+ struct workqueue_struct *wq;
+ struct work_struct rst_work;
+ struct work_struct cmd_process;
+
+ const char *algs;
+ bool use_sva;
+ bool is_frozen;
+
+ resource_size_t phys_base;
+ resource_size_t db_phys_base;
+ struct uacce_device *uacce;
+ int mode;
+ struct qm_shaper_factor *factor;
+ u32 mb_qos;
+ u32 type_rate;
+};
+
+struct hisi_qp_status {
+ atomic_t used;
+ u16 sq_tail;
+ u16 cq_head;
+ bool cqc_phase;
+ atomic_t flags;
+};
+
+struct hisi_qp_ops {
+ int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
+};
+
+struct hisi_qp {
+ u32 qp_id;
+ u16 sq_depth;
+ u16 cq_depth;
+ u8 alg_type;
+ u8 req_type;
+
+ struct qm_dma qdma;
+ void *sqe;
+ struct qm_cqe *cqe;
+ dma_addr_t sqe_dma;
+ dma_addr_t cqe_dma;
+
+ struct hisi_qp_status qp_status;
+ struct hisi_qp_ops *hw_ops;
+ void *qp_ctx;
+ void (*req_cb)(struct hisi_qp *qp, void *data);
+ void (*event_cb)(struct hisi_qp *qp);
+
+ struct hisi_qm *qm;
+ bool is_resetting;
+ bool is_in_kernel;
+ u16 pasid;
+ struct uacce_queue *uacce_q;
+};
+
+static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ device, NULL);
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %u\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || n < QM_MIN_QNUM || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM_V2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_SVA &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
+}
+
+static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
+{
+ INIT_LIST_HEAD(&qm_list->list);
+ mutex_init(&qm_list->lock);
+}
+
+int hisi_qm_init(struct hisi_qm *qm);
+void hisi_qm_uninit(struct hisi_qm *qm);
+int hisi_qm_start(struct hisi_qm *qm);
+int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
+int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
+int hisi_qm_stop_qp(struct hisi_qp *qp);
+int hisi_qp_send(struct hisi_qp *qp, const void *msg);
+void hisi_qm_debug_init(struct hisi_qm *qm);
+void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+int hisi_qm_diff_regs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, int reg_len);
+void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len);
+void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+ struct dfx_diff_registers *dregs, int regs_len);
+
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
+
+int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
+int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
+ bool op);
+
+struct hisi_acc_sgl_pool;
+struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
+ struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma);
+void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_hw_sgl *hw_sgl);
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr);
+void hisi_acc_free_sgl_pool(struct device *dev,
+ struct hisi_acc_sgl_pool *pool);
+int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
+ u8 alg_type, int node, struct hisi_qp **qps);
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_resume(struct device *dev);
+int hisi_qm_suspend(struct device *dev);
+void hisi_qm_pm_uninit(struct hisi_qm *qm);
+void hisi_qm_pm_init(struct hisi_qm *qm);
+int hisi_qm_get_dfx_access(struct hisi_qm *qm);
+void hisi_qm_put_dfx_access(struct hisi_qm *qm);
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read);
+
+/* Used by VFIO ACC live migration driver */
+struct pci_driver *hisi_sec_get_pf_driver(void);
+struct pci_driver *hisi_hpre_get_pf_driver(void);
+struct pci_driver *hisi_zip_get_pf_driver(void);
+#endif
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index ddf9f7144c43..126a36571667 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -3,245 +3,112 @@
* Copyright 2013 Red Hat Inc.
*
* Authors: Jérôme Glisse <jglisse@redhat.com>
- */
-/*
- * Heterogeneous Memory Management (HMM)
- *
- * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
- * is for. Here we focus on the HMM API description, with some explanation of
- * the underlying implementation.
- *
- * Short description: HMM provides a set of helpers to share a virtual address
- * space between CPU and a device, so that the device can access any valid
- * address of the process (while still obeying memory protection). HMM also
- * provides helpers to migrate process memory to device memory, and back. Each
- * set of functionality (address space mirroring, and migration to and from
- * device memory) can be used independently of the other.
- *
- *
- * HMM address space mirroring API:
- *
- * Use HMM address space mirroring if you want to mirror a range of the CPU
- * page tables of a process into a device page table. Here, "mirror" means "keep
- * synchronized". Prerequisites: the device must provide the ability to write-
- * protect its page tables (at PAGE_SIZE granularity), and must be able to
- * recover from the resulting potential page faults.
*
- * HMM guarantees that at any point in time, a given virtual address points to
- * either the same memory in both CPU and device page tables (that is: CPU and
- * device page tables each point to the same pages), or that one page table (CPU
- * or device) points to no entry, while the other still points to the old page
- * for the address. The latter case happens when the CPU page table update
- * happens first, and then the update is mirrored over to the device page table.
- * This does not cause any issue, because the CPU page table cannot start
- * pointing to a new page until the device page table is invalidated.
- *
- * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
- * updates to each device driver that has registered a mirror. It also provides
- * some API calls to help with taking a snapshot of the CPU page table, and to
- * synchronize with any updates that might happen concurrently.
- *
- *
- * HMM migration to and from device memory:
- *
- * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
- * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
- * of the device memory, and allows the device driver to manage its memory
- * using those struct pages. Having struct pages for device memory makes
- * migration easier. Because that memory is not addressable by the CPU it must
- * never be pinned to the device; in other words, any CPU page fault can always
- * cause the device memory to be migrated (copied/moved) back to regular memory.
- *
- * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
- * allows use of a device DMA engine to perform the copy operation between
- * regular system memory and device memory.
+ * See Documentation/mm/hmm.rst for reasons and overview of what HMM is.
*/
#ifndef LINUX_HMM_H
#define LINUX_HMM_H
-#include <linux/kconfig.h>
-#include <asm/pgtable.h>
+#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/migrate.h>
-#include <linux/memremap.h>
-#include <linux/completion.h>
-#include <linux/mmu_notifier.h>
+struct mmu_interval_notifier;
/*
- * hmm_pfn_flag_e - HMM flag enums
- *
- * Flags:
- * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
- * HMM_PFN_WRITE: CPU page table has write permission set
- * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
- *
- * The driver provides a flags array for mapping page protections to device
- * PTE bits. If the driver valid bit for an entry is bit 3,
- * i.e., (entry & (1 << 3)), then the driver must provide
- * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
- * Same logic apply to all flags. This is the same idea as vm_page_prot in vma
- * except that this is per device driver rather than per architecture.
+ * On output:
+ * 0 - The page is faultable and a future call with
+ * HMM_PFN_REQ_FAULT could succeed.
+ * HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at
+ * least readable. If dev_private_owner is !NULL then this could
+ * point at a DEVICE_PRIVATE page.
+ * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
+ * HMM_PFN_ERROR - accessing the pfn is impossible and the device should
+ * fail. ie poisoned memory, special pages, no vma, etc
+ *
+ * On input:
+ * 0 - Return the current state of the page, do not fault it.
+ * HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault()
+ * will fail
+ * HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault()
+ * will fail. Must be combined with HMM_PFN_REQ_FAULT.
*/
-enum hmm_pfn_flag_e {
- HMM_PFN_VALID = 0,
- HMM_PFN_WRITE,
- HMM_PFN_DEVICE_PRIVATE,
- HMM_PFN_FLAG_MAX
+enum hmm_pfn_flags {
+ /* Output fields and flags */
+ HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
+ HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
+ HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
+ HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8),
+
+ /* Input flags */
+ HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
+ HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
+
+ HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT,
};
/*
- * hmm_pfn_value_e - HMM pfn special value
- *
- * Flags:
- * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
- * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
- * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
- * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
- * be mirrored by a device, because the entry will never have HMM_PFN_VALID
- * set and the pfn value is undefined.
- *
- * Driver provides values for none entry, error entry, and special entry.
- * Driver can alias (i.e., use same value) error and special, but
- * it should not alias none with error or special.
+ * hmm_pfn_to_page() - return struct page pointed to by a device entry
*
- * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
- * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
- * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
- * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
+ * This must be called under the caller 'user_lock' after a successful
+ * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
+ * already.
*/
-enum hmm_pfn_value_e {
- HMM_PFN_ERROR,
- HMM_PFN_NONE,
- HMM_PFN_SPECIAL,
- HMM_PFN_VALUE_MAX
-};
+static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
+{
+ return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
+}
+
+/*
+ * hmm_pfn_to_map_order() - return the CPU mapping size order
+ *
+ * This is optionally useful to optimize processing of the pfn result
+ * array. It indicates that the page starts at the order aligned VA and is
+ * 1<<order bytes long. Every pfn within an high order page will have the
+ * same pfn flags, both access protections and the map_order. The caller must
+ * be careful with edge cases as the start and end VA of the given page may
+ * extend past the range used with hmm_range_fault().
+ *
+ * This must be called under the caller 'user_lock' after a successful
+ * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
+ * already.
+ */
+static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn)
+{
+ return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F;
+}
/*
* struct hmm_range - track invalidation lock on virtual address range
*
* @notifier: a mmu_interval_notifier that includes the start/end
* @notifier_seq: result of mmu_interval_read_begin()
- * @hmm: the core HMM structure this range is active against
- * @vma: the vm area struct for the range
- * @list: all range lock are on a list
* @start: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
- * @pfns: array of pfns (big enough for the range)
- * @flags: pfn flags to match device driver page table
- * @values: pfn value for some special case (none, special, error, ...)
+ * @hmm_pfns: array of pfns (big enough for the range)
* @default_flags: default flags for the range (write, read, ... see hmm doc)
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
- * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
- * @valid: pfns array did not change since it has been fill by an HMM function
+ * @dev_private_owner: owner of device private pages
*/
struct hmm_range {
struct mmu_interval_notifier *notifier;
unsigned long notifier_seq;
unsigned long start;
unsigned long end;
- uint64_t *pfns;
- const uint64_t *flags;
- const uint64_t *values;
- uint64_t default_flags;
- uint64_t pfn_flags_mask;
- uint8_t pfn_shift;
+ unsigned long *hmm_pfns;
+ unsigned long default_flags;
+ unsigned long pfn_flags_mask;
+ void *dev_private_owner;
};
/*
- * hmm_device_entry_to_page() - return struct page pointed to by a device entry
- * @range: range use to decode device entry value
- * @entry: device entry value to get corresponding struct page from
- * Return: struct page pointer if entry is a valid, NULL otherwise
- *
- * If the device entry is valid (ie valid flag set) then return the struct page
- * matching the entry value. Otherwise return NULL.
+ * Please see Documentation/mm/hmm.rst for how to use the range API.
*/
-static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
- uint64_t entry)
-{
- if (entry == range->values[HMM_PFN_NONE])
- return NULL;
- if (entry == range->values[HMM_PFN_ERROR])
- return NULL;
- if (entry == range->values[HMM_PFN_SPECIAL])
- return NULL;
- if (!(entry & range->flags[HMM_PFN_VALID]))
- return NULL;
- return pfn_to_page(entry >> range->pfn_shift);
-}
-
-/*
- * hmm_device_entry_to_pfn() - return pfn value store in a device entry
- * @range: range use to decode device entry value
- * @entry: device entry to extract pfn from
- * Return: pfn value if device entry is valid, -1UL otherwise
- */
-static inline unsigned long
-hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
-{
- if (pfn == range->values[HMM_PFN_NONE])
- return -1UL;
- if (pfn == range->values[HMM_PFN_ERROR])
- return -1UL;
- if (pfn == range->values[HMM_PFN_SPECIAL])
- return -1UL;
- if (!(pfn & range->flags[HMM_PFN_VALID]))
- return -1UL;
- return (pfn >> range->pfn_shift);
-}
-
-/*
- * hmm_device_entry_from_page() - create a valid device entry for a page
- * @range: range use to encode HMM pfn value
- * @page: page for which to create the device entry
- * Return: valid device entry for the page
- */
-static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
- struct page *page)
-{
- return (page_to_pfn(page) << range->pfn_shift) |
- range->flags[HMM_PFN_VALID];
-}
-
-/*
- * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
- * @range: range use to encode HMM pfn value
- * @pfn: pfn value for which to create the device entry
- * Return: valid device entry for the pfn
- */
-static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
- unsigned long pfn)
-{
- return (pfn << range->pfn_shift) |
- range->flags[HMM_PFN_VALID];
-}
-
-/*
- * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
- */
-#define HMM_FAULT_ALLOW_RETRY (1 << 0)
-
-/* Don't fault in missing PTEs, just snapshot the current state. */
-#define HMM_FAULT_SNAPSHOT (1 << 1)
-
-#ifdef CONFIG_HMM_MIRROR
-/*
- * Please see Documentation/vm/hmm.rst for how to use the range API.
- */
-long hmm_range_fault(struct hmm_range *range, unsigned int flags);
-#else
-static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags)
-{
- return -EOPNOTSUPP;
-}
-#endif
+int hmm_range_fault(struct hmm_range *range);
/*
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
*
* When waiting for mmu notifiers we need some kind of time out otherwise we
- * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
+ * could potentially wait for ever, 1000ms ie 1s sounds like a long time to
* wait already.
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 62d216ff1097..cb2100d9b0ff 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -7,6 +7,8 @@
#define __LINUX_HOST1X_H
#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
enum host1x_class {
@@ -15,21 +17,57 @@ enum host1x_class {
HOST1X_CLASS_GR2D_SB = 0x52,
HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
+ HOST1X_CLASS_NVDEC = 0xF0,
+ HOST1X_CLASS_NVDEC1 = 0xF5,
};
+struct host1x;
struct host1x_client;
struct iommu_group;
+u64 host1x_get_dma_mask(struct host1x *host1x);
+
+/**
+ * struct host1x_bo_cache - host1x buffer object cache
+ * @mappings: list of mappings
+ * @lock: synchronizes accesses to the list of mappings
+ *
+ * Note that entries are not periodically evicted from this cache and instead need to be
+ * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
+ * released when the last reference to a buffer object represented by a mapping in this
+ * cache is dropped.
+ */
+struct host1x_bo_cache {
+ struct list_head mappings;
+ struct mutex lock;
+};
+
+static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
+{
+ INIT_LIST_HEAD(&cache->mappings);
+ mutex_init(&cache->lock);
+}
+
+static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
+{
+ /* XXX warn if not empty? */
+ mutex_destroy(&cache->lock);
+}
+
/**
* struct host1x_client_ops - host1x client operations
+ * @early_init: host1x client early initialization code
* @init: host1x client initialization code
* @exit: host1x client tear down code
+ * @late_exit: host1x client late tear down code
* @suspend: host1x client suspend code
* @resume: host1x client resume code
*/
struct host1x_client_ops {
+ int (*early_init)(struct host1x_client *client);
int (*init)(struct host1x_client *client);
int (*exit)(struct host1x_client *client);
+ int (*late_exit)(struct host1x_client *client);
int (*suspend)(struct host1x_client *client);
int (*resume)(struct host1x_client *client);
};
@@ -45,6 +83,10 @@ struct host1x_client_ops {
* @channel: host1x channel associated with this client
* @syncpts: array of syncpoints requested for this client
* @num_syncpts: number of syncpoints requested for this client
+ * @parent: pointer to parent structure
+ * @usecount: reference count for this structure
+ * @lock: mutex for mutually exclusive concurrency
+ * @cache: host1x buffer object cache
*/
struct host1x_client {
struct list_head list;
@@ -63,6 +105,8 @@ struct host1x_client {
struct host1x_client *parent;
unsigned int usecount;
struct mutex lock;
+
+ struct host1x_bo_cache cache;
};
/*
@@ -72,23 +116,48 @@ struct host1x_client {
struct host1x_bo;
struct sg_table;
+struct host1x_bo_mapping {
+ struct kref ref;
+ struct dma_buf_attachment *attach;
+ enum dma_data_direction direction;
+ struct list_head list;
+ struct host1x_bo *bo;
+ struct sg_table *sgt;
+ unsigned int chunks;
+ struct device *dev;
+ dma_addr_t phys;
+ size_t size;
+
+ struct host1x_bo_cache *cache;
+ struct list_head entry;
+};
+
+static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
+{
+ return container_of(ref, struct host1x_bo_mapping, ref);
+}
+
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
- struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
- dma_addr_t *phys);
- void (*unpin)(struct device *dev, struct sg_table *sgt);
+ struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir);
+ void (*unpin)(struct host1x_bo_mapping *map);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
};
struct host1x_bo {
const struct host1x_bo_ops *ops;
+ struct list_head mappings;
+ spinlock_t lock;
};
static inline void host1x_bo_init(struct host1x_bo *bo,
const struct host1x_bo_ops *ops)
{
+ INIT_LIST_HEAD(&bo->mappings);
+ spin_lock_init(&bo->lock);
bo->ops = ops;
}
@@ -102,18 +171,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
-static inline struct sg_table *host1x_bo_pin(struct device *dev,
- struct host1x_bo *bo,
- dma_addr_t *phys)
-{
- return bo->ops->pin(dev, bo, phys);
-}
-
-static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
- struct sg_table *sgt)
-{
- bo->ops->unpin(dev, sgt);
-}
+struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir,
+ struct host1x_bo_cache *cache);
+void host1x_bo_unpin(struct host1x_bo_mapping *map);
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
{
@@ -136,7 +197,9 @@ struct host1x_syncpt_base;
struct host1x_syncpt;
struct host1x;
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
u32 host1x_syncpt_id(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
@@ -147,11 +210,19 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
u32 *value);
struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags);
-void host1x_syncpt_free(struct host1x_syncpt *sp);
+void host1x_syncpt_put(struct host1x_syncpt *sp);
+struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+ unsigned long flags,
+ const char *name);
struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
+void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
+ u32 syncpt_id);
+
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
+
/*
* host1x channel
*/
@@ -161,6 +232,7 @@ struct host1x_job;
struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_stop(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
@@ -198,8 +270,8 @@ struct host1x_job {
struct host1x_client *client;
/* Gathers and their memory */
- struct host1x_job_gather *gathers;
- unsigned int num_gathers;
+ struct host1x_job_cmd *cmds;
+ unsigned int num_cmds;
/* Array of handles to be pinned & unpinned */
struct host1x_reloc *relocs;
@@ -212,13 +284,19 @@ struct host1x_job {
dma_addr_t *reloc_addr_phys;
/* Sync point id, number of increments and end related to the submit */
- u32 syncpt_id;
+ struct host1x_syncpt *syncpt;
u32 syncpt_incrs;
u32 syncpt_end;
+ /* Completion waiter ref */
+ void *waiter;
+
/* Maximum time to wait for this job */
unsigned int timeout;
+ /* Job has timed out and should be released */
+ bool cancelled;
+
/* Index and number of slots used in the push buffer */
unsigned int first_get;
unsigned int num_slots;
@@ -239,12 +317,33 @@ struct host1x_job {
/* Add a channel wait for previous ops to complete */
bool serialize;
+
+ /* Fast-forward syncpoint increments on job timeout */
+ bool syncpt_recovery;
+
+ /* Callback called when job is freed */
+ void (*release)(struct host1x_job *job);
+ void *user_data;
+
+ /* Whether host1x-side firewall should be ran for this job or not */
+ bool enable_firewall;
+
+ /* Options for configuring engine data stream ID */
+ /* Context device to use for job */
+ struct host1x_memory_context *memory_context;
+ /* Stream ID to use if context isolation is disabled (!memory_context) */
+ u32 engine_fallback_streamid;
+ /* Engine offset to program stream ID to */
+ u32 engine_streamid_offset;
};
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
- u32 num_cmdbufs, u32 num_relocs);
+ u32 num_cmdbufs, u32 num_relocs,
+ bool skip_firewall);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
unsigned int words, unsigned int offset);
+void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
+ bool relative, u32 next_class);
struct host1x_job *host1x_job_get(struct host1x_job *job);
void host1x_job_put(struct host1x_job *job);
int host1x_job_pin(struct host1x_job *job, struct device *dev);
@@ -314,7 +413,32 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device);
-int host1x_client_register(struct host1x_client *client);
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ __host1x_client_register(client); \
+ })
+
int host1x_client_unregister(struct host1x_client *client);
int host1x_client_suspend(struct host1x_client *client);
@@ -322,10 +446,46 @@ int host1x_client_resume(struct host1x_client *client);
struct tegra_mipi_device;
-struct tegra_mipi_device *tegra_mipi_request(struct device *device);
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+ struct device_node *np);
void tegra_mipi_free(struct tegra_mipi_device *device);
int tegra_mipi_enable(struct tegra_mipi_device *device);
int tegra_mipi_disable(struct tegra_mipi_device *device);
-int tegra_mipi_calibrate(struct tegra_mipi_device *device);
+int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
+int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
+
+/* host1x memory contexts */
+
+struct host1x_memory_context {
+ struct host1x *host;
+
+ refcount_t ref;
+ struct pid *owner;
+
+ struct device dev;
+ u64 dma_mask;
+ u32 stream_id;
+};
+
+#ifdef CONFIG_IOMMU_API
+struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct pid *pid);
+void host1x_memory_context_get(struct host1x_memory_context *cd);
+void host1x_memory_context_put(struct host1x_memory_context *cd);
+#else
+static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct pid *pid)
+{
+ return NULL;
+}
+
+static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
+{
+}
+
+static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
+{
+}
+#endif
#endif
diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h
new file mode 100644
index 000000000000..72462737a6db
--- /dev/null
+++ b/include/linux/host1x_context_bus.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ */
+
+#ifndef __LINUX_HOST1X_CONTEXT_BUS_H
+#define __LINUX_HOST1X_CONTEXT_BUS_H
+
+#include <linux/device.h>
+
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+extern struct bus_type host1x_context_device_bus_type;
+#endif
+
+#endif
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
index 6f1dee7e67e0..9be8704e2d38 100644
--- a/include/linux/hp_sdc.h
+++ b/include/linux/hp_sdc.h
@@ -180,7 +180,7 @@ switch (val) { \
#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */
-/* The documents provided do not explicitly state that all registers betweem
+/* The documents provided do not explicitly state that all registers between
* 0x01 and 0x1f inclusive can be read by sending their register index as a
* command, but this is implied and appears to be the case.
*/
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 15c8ac313678..0ee140176f10 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/percpu.h>
+#include <linux/seqlock.h>
#include <linux/timer.h>
#include <linux/timerqueue.h>
@@ -159,7 +160,7 @@ struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
unsigned int index;
clockid_t clockid;
- seqcount_t seq;
+ seqcount_raw_spinlock_t seq;
struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
@@ -317,16 +318,12 @@ struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
-extern void clock_was_set_delayed(void);
-
extern unsigned int hrtimer_resolution;
#else
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline void clock_was_set_delayed(void) { }
-
#endif
static inline ktime_t
@@ -350,13 +347,13 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
timer->base->get_time());
}
-extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
+extern void timerfd_resume(void);
#else
static inline void timerfd_clock_was_set(void) { }
+static inline void timerfd_resume(void) { }
#endif
-extern void hrtimers_resume(void);
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
@@ -446,6 +443,10 @@ static inline void hrtimer_restart(struct hrtimer *timer)
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+/**
+ * hrtimer_get_remaining - get remaining time for the timer
+ * @timer: the timer to read
+ */
static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
@@ -457,7 +458,7 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
/**
- * hrtimer_is_queued = check, whether the timer is on one of the queues
+ * hrtimer_is_queued - check, whether the timer is on one of the queues
* @timer: Timer to check
*
* Returns: True if the timer is queued, false otherwise
diff --git a/include/linux/hrtimer_api.h b/include/linux/hrtimer_api.h
new file mode 100644
index 000000000000..8d9700894468
--- /dev/null
+++ b/include/linux/hrtimer_api.h
@@ -0,0 +1 @@
+#include <linux/hrtimer.h>
diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h
index 842fce69ac06..5f8ac9b1d724 100644
--- a/include/linux/htcpld.h
+++ b/include/linux/htcpld.h
@@ -13,8 +13,6 @@ struct htcpld_chip_platform_data {
};
struct htcpld_core_platform_data {
- unsigned int int_reset_gpio_hi;
- unsigned int int_reset_gpio_lo;
unsigned int i2c_adapter_id;
struct htcpld_chip_platform_data *chip;
diff --git a/include/linux/hte.h b/include/linux/hte.h
new file mode 100644
index 000000000000..8289055061ab
--- /dev/null
+++ b/include/linux/hte.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_HTE_H
+#define __LINUX_HTE_H
+
+#include <linux/errno.h>
+
+struct hte_chip;
+struct hte_device;
+struct of_phandle_args;
+
+/**
+ * enum hte_edge - HTE line edge flags.
+ *
+ * @HTE_EDGE_NO_SETUP: No edge setup. In this case consumer will setup edges,
+ * for example during request irq call.
+ * @HTE_RISING_EDGE_TS: Rising edge.
+ * @HTE_FALLING_EDGE_TS: Falling edge.
+ *
+ */
+enum hte_edge {
+ HTE_EDGE_NO_SETUP = 1U << 0,
+ HTE_RISING_EDGE_TS = 1U << 1,
+ HTE_FALLING_EDGE_TS = 1U << 2,
+};
+
+/**
+ * enum hte_return - HTE subsystem return values used during callback.
+ *
+ * @HTE_CB_HANDLED: The consumer handled the data.
+ * @HTE_RUN_SECOND_CB: The consumer needs further processing, in that case
+ * HTE subsystem calls secondary callback provided by the consumer where it
+ * is allowed to sleep.
+ */
+enum hte_return {
+ HTE_CB_HANDLED,
+ HTE_RUN_SECOND_CB,
+};
+
+/**
+ * struct hte_ts_data - HTE timestamp data.
+ *
+ * @tsc: Timestamp value.
+ * @seq: Sequence counter of the timestamps.
+ * @raw_level: Level of the line at the timestamp if provider supports it,
+ * -1 otherwise.
+ */
+struct hte_ts_data {
+ u64 tsc;
+ u64 seq;
+ int raw_level;
+};
+
+/**
+ * struct hte_clk_info - Clock source info that HTE provider uses to timestamp.
+ *
+ * @hz: Supported clock rate in HZ, for example 1KHz clock = 1000.
+ * @type: Supported clock type.
+ */
+struct hte_clk_info {
+ u64 hz;
+ clockid_t type;
+};
+
+/**
+ * typedef hte_ts_cb_t - HTE timestamp data processing primary callback.
+ *
+ * The callback is used to push timestamp data to the client and it is
+ * not allowed to sleep.
+ *
+ * @ts: HW timestamp data.
+ * @data: Client supplied data.
+ */
+typedef enum hte_return (*hte_ts_cb_t)(struct hte_ts_data *ts, void *data);
+
+/**
+ * typedef hte_ts_sec_cb_t - HTE timestamp data processing secondary callback.
+ *
+ * This is used when the client needs further processing where it is
+ * allowed to sleep.
+ *
+ * @data: Client supplied data.
+ *
+ */
+typedef enum hte_return (*hte_ts_sec_cb_t)(void *data);
+
+/**
+ * struct hte_line_attr - Line attributes.
+ *
+ * @line_id: The logical ID understood by the consumers and providers.
+ * @line_data: Line data related to line_id.
+ * @edge_flags: Edge setup flags.
+ * @name: Descriptive name of the entity that is being monitored for the
+ * hardware timestamping. If null, HTE core will construct the name.
+ *
+ */
+struct hte_line_attr {
+ u32 line_id;
+ void *line_data;
+ unsigned long edge_flags;
+ const char *name;
+};
+
+/**
+ * struct hte_ts_desc - HTE timestamp descriptor.
+ *
+ * This structure is a communication token between consumers to subsystem
+ * and subsystem to providers.
+ *
+ * @attr: The line attributes.
+ * @hte_data: Subsystem's private data, set by HTE subsystem.
+ */
+struct hte_ts_desc {
+ struct hte_line_attr attr;
+ void *hte_data;
+};
+
+/**
+ * struct hte_ops - HTE operations set by providers.
+ *
+ * @request: Hook for requesting a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @release: Hook for releasing a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @enable: Hook to enable the specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @disable: Hook to disable specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @get_clk_src_info: Hook to get the clock information the provider uses
+ * to timestamp. Returns 0 for success and negative error code for failure. On
+ * success HTE subsystem fills up provided struct hte_clk_info.
+ *
+ * xlated_id parameter is used to communicate between HTE subsystem and the
+ * providers and is translated by the provider.
+ */
+struct hte_ops {
+ int (*request)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*release)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*enable)(struct hte_chip *chip, u32 xlated_id);
+ int (*disable)(struct hte_chip *chip, u32 xlated_id);
+ int (*get_clk_src_info)(struct hte_chip *chip,
+ struct hte_clk_info *ci);
+};
+
+/**
+ * struct hte_chip - Abstract HTE chip.
+ *
+ * @name: functional name of the HTE IP block.
+ * @dev: device providing the HTE.
+ * @ops: callbacks for this HTE.
+ * @nlines: number of lines/signals supported by this chip.
+ * @xlate_of: Callback which translates consumer supplied logical ids to
+ * physical ids, return 0 for the success and negative for the failures.
+ * It stores (between 0 to @nlines) in xlated_id parameter for the success.
+ * @xlate_plat: Same as above but for the consumers with no DT node.
+ * @match_from_linedata: Match HTE device using the line_data.
+ * @of_hte_n_cells: Number of cells used to form the HTE specifier.
+ * @gdev: HTE subsystem abstract device, internal to the HTE subsystem.
+ * @data: chip specific private data.
+ */
+struct hte_chip {
+ const char *name;
+ struct device *dev;
+ const struct hte_ops *ops;
+ u32 nlines;
+ int (*xlate_of)(struct hte_chip *gc,
+ const struct of_phandle_args *args,
+ struct hte_ts_desc *desc, u32 *xlated_id);
+ int (*xlate_plat)(struct hte_chip *gc, struct hte_ts_desc *desc,
+ u32 *xlated_id);
+ bool (*match_from_linedata)(const struct hte_chip *chip,
+ const struct hte_ts_desc *hdesc);
+ u8 of_hte_n_cells;
+
+ struct hte_device *gdev;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_HTE)
+/* HTE APIs for the providers */
+int devm_hte_register_chip(struct hte_chip *chip);
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+ struct hte_ts_data *data);
+
+/* HTE APIs for the consumers */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags, const char *name,
+ void *data);
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index);
+int hte_ts_put(struct hte_ts_desc *desc);
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data);
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+ hte_ts_cb_t cb, hte_ts_sec_cb_t tcb, void *data);
+int of_hte_req_count(struct device *dev);
+int hte_enable_ts(struct hte_ts_desc *desc);
+int hte_disable_ts(struct hte_ts_desc *desc);
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci);
+
+#else /* !CONFIG_HTE */
+static inline int devm_hte_register_chip(struct hte_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_push_ts_ns(const struct hte_chip *chip,
+ u32 xlated_id,
+ const struct hte_ts_data *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags,
+ const char *name, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_get(struct device *dev, struct hte_ts_desc *desc,
+ int index)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_put(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_hte_request_ts_ns(struct device *dev,
+ struct hte_ts_desc *desc,
+ hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb,
+ void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int of_hte_req_count(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_enable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_disable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_HTE */
+
+#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 5aca3d1bdb32..a1341fdcf666 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -7,49 +7,79 @@
#include <linux/fs.h> /* only for vma_is_dax() */
-extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
-extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
- struct vm_area_struct *vma);
-extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
-extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
- struct vm_area_struct *vma);
+vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
+int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+void huge_pmd_set_accessed(struct vm_fault *vmf);
+int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+ struct vm_area_struct *vma);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
+void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}
#endif
-extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
-extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr,
- pmd_t *pmd,
- unsigned int flags);
-extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr, unsigned long next);
-extern int zap_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr);
-extern int zap_huge_pud(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pud_t *pud, unsigned long addr);
-extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned char *vec);
-extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd);
-extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot,
- int prot_numa);
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
+struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd,
+ unsigned int flags);
+bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, unsigned long next);
+int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr);
+int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
+ unsigned long addr);
+bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
+int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
+ pgprot_t pgprot, bool write);
+
+/**
+ * vmf_insert_pfn_pmd - insert a pmd size pfn
+ * @vmf: Structure describing the fault
+ * @pfn: pfn to insert
+ * @pgprot: page protection to use
+ * @write: whether it's a write fault
+ *
+ * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
+ *
+ * Return: vm_fault_t value.
+ */
+static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
+ bool write)
+{
+ return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
+}
+vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
+ pgprot_t pgprot, bool write);
+
+/**
+ * vmf_insert_pfn_pud - insert a pud size pfn
+ * @vmf: Structure describing the fault
+ * @pfn: pfn to insert
+ * @pgprot: page protection to use
+ * @write: whether it's a write fault
+ *
+ * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
+ *
+ * Return: vm_fault_t value.
+ */
+static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
+ bool write)
+{
+ return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
+}
+
enum transparent_hugepage_flag {
+ TRANSPARENT_HUGEPAGE_NEVER_DAX,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
@@ -58,21 +88,18 @@ enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
-#ifdef CONFIG_DEBUG_VM
- TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
-#endif
};
struct kobject;
struct kobj_attribute;
-extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count,
- enum transparent_hugepage_flag flag);
-extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf,
- enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count,
+ enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf,
+ enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
@@ -87,82 +114,74 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
-extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
-
extern unsigned long transparent_hugepage_flags;
+#define hugepage_flags_enabled() \
+ (transparent_hugepage_flags & \
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
+#define hugepage_flags_always() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_FLAG))
+
/*
- * to be used on vmas which are known to support THP.
- * Use transparent_hugepage_enabled otherwise
+ * Do the below checks:
+ * - For file vma, check if the linear page offset of vma is
+ * HPAGE_PMD_NR aligned within the file. The hugepage is
+ * guaranteed to be hugepage-aligned within the file, but we must
+ * check that the PMD-aligned addresses in the VMA map to
+ * PMD-aligned offsets within the file, else the hugepage will
+ * not be PMD-mappable.
+ * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
+ * area.
*/
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & VM_NOHUGEPAGE)
- return false;
-
- if (is_vma_temporary_stack(vma))
- return false;
-
- if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- return false;
-
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
- return true;
- /*
- * For dax vmas, try to always use hugepage mappings. If the kernel does
- * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
- * mappings, and device-dax namespaces, that try to guarantee a given
- * mapping size, will fail to enable
- */
- if (vma_is_dax(vma))
- return true;
-
- if (transparent_hugepage_flags &
- (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
- return !!(vma->vm_flags & VM_HUGEPAGE);
-
- return false;
-}
-
-bool transparent_hugepage_enabled(struct vm_area_struct *vma);
-
-#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
-
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+ unsigned long addr)
{
+ unsigned long haddr;
+
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
- if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
- (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
+ if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
+ HPAGE_PMD_NR))
return false;
}
+ haddr = addr & HPAGE_PMD_MASK;
+
if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
return false;
return true;
}
+static inline bool file_thp_enabled(struct vm_area_struct *vma)
+{
+ struct inode *inode;
+
+ if (!vma->vm_file)
+ return false;
+
+ inode = vma->vm_file->f_inode;
+
+ return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
+ (vma->vm_flags & VM_EXEC) &&
+ !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
+}
+
+bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
+ bool smaps, bool in_pf, bool enforce_sysfs);
+
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
-#ifdef CONFIG_DEBUG_VM
-#define transparent_hugepage_debug_cow() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
-#else /* CONFIG_DEBUG_VM */
-#define transparent_hugepage_debug_cow() 0
-#endif /* CONFIG_DEBUG_VM */
-extern unsigned long thp_get_unmapped_area(struct file *filp,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags);
+unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags);
-extern void prep_transhuge_page(struct page *page);
-extern void free_transhuge_page(struct page *page);
-bool is_transparent_hugepage(struct page *page);
+void prep_transhuge_page(struct page *page);
+void free_transhuge_page(struct page *page);
-bool can_split_huge_page(struct page *page, int *pextra_pins);
+bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
@@ -171,7 +190,7 @@ static inline int split_huge_page(struct page *page)
void deferred_split_huge_page(struct page *page);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page);
+ unsigned long address, bool freeze, struct folio *folio);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
@@ -184,7 +203,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
- bool freeze, struct page *page);
+ bool freeze, struct folio *folio);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
@@ -197,23 +216,22 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
__split_huge_pud(__vma, __pud, __address); \
} while (0)
-extern int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice);
-extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next);
-extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma);
-extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
- struct vm_area_struct *vma);
+int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
+ int advice);
+int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end);
+void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, long adjust_next);
+spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
+spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
static inline int is_swap_pmd(pmd_t pmd)
{
return !pmd_none(pmd) && !pmd_present(pmd);
}
-/* mmap_sem must be held on entry */
+/* mmap_lock must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
@@ -230,11 +248,14 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
else
return NULL;
}
-static inline int hpage_nr_pages(struct page *page)
+
+/**
+ * folio_test_pmd_mappable - Can we map this folio with a PMD?
+ * @folio: The folio to test
+ */
+static inline bool folio_test_pmd_mappable(struct folio *folio)
{
- if (unlikely(PageTransHuge(page)))
- return HPAGE_PMD_NR;
- return 1;
+ return folio_order(folio) >= HPAGE_PMD_ORDER;
}
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -242,9 +263,10 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags, struct dev_pagemap **pgmap);
-extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
extern struct page *huge_zero_page;
+extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_page(struct page *page)
{
@@ -253,7 +275,7 @@ static inline bool is_huge_zero_page(struct page *page)
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return is_huge_zero_page(pmd_page(pmd));
+ return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
}
static inline bool is_huge_zero_pud(pud_t pud)
@@ -274,8 +296,8 @@ static inline bool thp_migration_supported(void)
static inline struct list_head *page_deferred_list(struct page *page)
{
/*
- * Global or memcg deferred list in the second tail pages is
- * occupied by compound_head.
+ * See organization of tail pages of compound page in
+ * "struct page" definition.
*/
return &page[2].deferred_list;
}
@@ -289,39 +311,33 @@ static inline struct list_head *page_deferred_list(struct page *page)
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-#define hpage_nr_pages(x) 1
-
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool folio_test_pmd_mappable(struct folio *folio)
{
return false;
}
-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
+ unsigned long addr)
{
return false;
}
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+static inline bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs)
{
return false;
}
static inline void prep_transhuge_page(struct page *page) {}
-static inline bool is_transparent_hugepage(struct page *page)
-{
- return false;
-}
-
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
static inline bool
-can_split_huge_page(struct page *page, int *pextra_pins)
+can_split_folio(struct folio *folio, int *pextra_pins)
{
- BUILD_BUG();
return false;
}
static inline int
@@ -338,9 +354,9 @@ static inline void deferred_split_huge_page(struct page *page) {}
do { } while (0)
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze, struct folio *folio) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze, struct folio *folio) {}
#define split_huge_pud(__vma, __pmd, __address) \
do { } while (0)
@@ -348,9 +364,16 @@ static inline void split_huge_pmd_address(struct vm_area_struct *vma,
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
- BUG();
- return 0;
+ return -EINVAL;
+}
+
+static inline int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end)
+{
+ return -EINVAL;
}
+
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
@@ -372,8 +395,7 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
return NULL;
}
-static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
- pmd_t orig_pmd)
+static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
return 0;
}
@@ -383,6 +405,11 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pud(pud_t pud)
{
return false;
@@ -411,4 +438,27 @@ static inline bool thp_migration_supported(void)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int split_folio_to_list(struct folio *folio,
+ struct list_head *list)
+{
+ return split_huge_page_to_list(&folio->page, list);
+}
+
+static inline int split_folio(struct folio *folio)
+{
+ return split_folio_to_list(folio, NULL);
+}
+
+/*
+ * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
+ * limitations in the implementation like arm64 MTE can override this to
+ * false
+ */
+#ifndef arch_thp_swp_supported
+static inline bool arch_thp_swp_supported(void)
+{
+ return true;
+}
+#endif
+
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 1e897e4168ac..8b4f93e84868 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -9,13 +9,16 @@
#include <linux/cgroup.h>
#include <linux/list.h>
#include <linux/kref.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
+#include <linux/gfp.h>
+#include <linux/userfaultfd_k.h>
struct ctl_table;
struct user_struct;
struct mmu_gather;
+struct node;
-#ifndef is_hugepd
+#ifndef CONFIG_ARCH_HAS_HUGEPD
typedef struct { unsigned long pd; } hugepd_t;
#define is_hugepd(hugepd) (0)
#define __hugepd(x) ((hugepd_t) { (x) })
@@ -27,16 +30,36 @@ typedef struct { unsigned long pd; } hugepd_t;
#include <linux/shm.h>
#include <asm/tlbflush.h>
+/*
+ * For HugeTLB page, there are more metadata to save in the struct page. But
+ * the head struct page cannot meet our needs, so we have to abuse other tail
+ * struct page to store the metadata. In order to avoid conflicts caused by
+ * subsequent use of more tail struct pages, we gather these discrete indexes
+ * of tail struct page here.
+ */
+enum {
+ SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
+#ifdef CONFIG_CGROUP_HUGETLB
+ SUBPAGE_INDEX_CGROUP, /* reuse page->private */
+ SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
+ __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ SUBPAGE_INDEX_HWPOISON,
+#endif
+ __NR_USED_SUBPAGE,
+};
+
struct hugepage_subpool {
spinlock_t lock;
long count;
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
long used_hpages; /* Used count against maximum, includes */
- /* both alloced and reserved pages. */
+ /* both allocated and reserved pages. */
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
- /* sasitfy minimum size. */
+ /* satisfy minimum size. */
};
struct resv_map {
@@ -46,7 +69,58 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+#ifdef CONFIG_CGROUP_HUGETLB
+ /*
+ * On private mappings, the counter to uncharge reservations is stored
+ * here. If these fields are 0, then either the mapping is shared, or
+ * cgroup accounting is disabled for this resv_map.
+ */
+ struct page_counter *reservation_counter;
+ unsigned long pages_per_hpage;
+ struct cgroup_subsys_state *css;
+#endif
+};
+
+/*
+ * Region tracking -- allows tracking of reservations and instantiated pages
+ * across the pages in a mapping.
+ *
+ * The region data structures are embedded into a resv_map and protected
+ * by a resv_map's lock. The set of regions within the resv_map represent
+ * reservations for huge pages, or huge pages that have already been
+ * instantiated within the map. The from and to elements are huge page
+ * indices into the associated mapping. from indicates the starting index
+ * of the region. to represents the first index past the end of the region.
+ *
+ * For example, a file region structure with from == 0 and to == 4 represents
+ * four huge pages in a mapping. It is important to note that the to element
+ * represents the first element past the end of the region. This is used in
+ * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
+ *
+ * Interval notation of the form [from, to) will be used to indicate that
+ * the endpoint from is inclusive and to is exclusive.
+ */
+struct file_region {
+ struct list_head link;
+ long from;
+ long to;
+#ifdef CONFIG_CGROUP_HUGETLB
+ /*
+ * On shared mappings, each reserved region appears as a struct
+ * file_region in resv_map. These fields hold the info needed to
+ * uncharge each reservation.
+ */
+ struct page_counter *reservation_counter;
+ struct cgroup_subsys_state *css;
+#endif
+};
+
+struct hugetlb_vma_lock {
+ struct kref refs;
+ struct rw_semaphore rw_sema;
+ struct vm_area_struct *vma;
};
+
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -59,47 +133,56 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
-int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-
-#ifdef CONFIG_NUMA
-int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-#endif
-
-int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
+void hugetlb_dup_vma_private(struct vm_area_struct *vma);
+void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
+int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
+int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+
+int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr, unsigned long new_addr,
+ unsigned long len);
+int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
+ struct vm_area_struct *, struct vm_area_struct *);
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
unsigned long *, unsigned long *, long, unsigned int,
int *);
void unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *);
+ unsigned long, unsigned long, struct page *,
+ zap_flags_t);
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct page *ref_page);
-void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page);
+ struct page *ref_page, zap_flags_t zap_flags);
void hugetlb_report_meminfo(struct seq_file *);
-int hugetlb_report_node_meminfo(int, char *);
-void hugetlb_show_meminfo(void);
+int hugetlb_report_node_meminfo(char *buf, int len, int nid);
+void hugetlb_show_meminfo_node(int nid);
unsigned long hugetlb_total_pages(void);
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
+#ifdef CONFIG_USERFAULTFD
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- struct page **pagep);
-int hugetlb_reserve_pages(struct inode *inode, long from, long to,
+ enum mcopy_atomic_mode mode,
+ struct page **pagep,
+ bool wp_copy);
+#endif /* CONFIG_USERFAULTFD */
+bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-bool isolate_huge_page(struct page *page, struct list_head *list);
+int isolate_hugetlb(struct page *page, struct list_head *list);
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
void putback_active_hugepage(struct page *page);
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
@@ -107,18 +190,23 @@ void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
-pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pud_t *pud);
+
+struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;
/* arch callbacks */
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+unsigned long hugetlb_mask_last_page(struct hstate *h);
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -126,23 +214,37 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pd(struct vm_area_struct *vma,
unsigned long address, hugepd_t hpd,
int flags, int pdshift);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags);
+struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
+ int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
pgd_t *pgd, int flags);
+void hugetlb_vma_lock_read(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
+void hugetlb_vma_lock_write(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
+void hugetlb_vma_lock_release(struct kref *kref);
+
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot);
+ unsigned long address, unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags);
bool is_hugetlb_entry_migration(pte_t pte);
+void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
-static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
+{
+}
+
+static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -151,8 +253,15 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
-static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
- pte_t *ptep)
+static inline struct address_space *hugetlb_page_mapping_lock_write(
+ struct page *hpage)
+{
+ return NULL;
+}
+
+static inline int huge_pmd_unshare(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
return 0;
}
@@ -180,7 +289,19 @@ static inline struct page *follow_huge_addr(struct mm_struct *mm,
}
static inline int copy_hugetlb_page_range(struct mm_struct *dst,
- struct mm_struct *src, struct vm_area_struct *vma)
+ struct mm_struct *src,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
+{
+ BUG();
+ return 0;
+}
+
+static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr,
+ unsigned long new_addr,
+ unsigned long len)
{
BUG();
return 0;
@@ -190,12 +311,12 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
-static inline int hugetlb_report_node_meminfo(int nid, char *buf)
+static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
{
return 0;
}
-static inline void hugetlb_show_meminfo(void)
+static inline void hugetlb_show_meminfo_node(int nid)
{
}
@@ -206,8 +327,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
return NULL;
}
-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
- unsigned long address, pmd_t *pmd, int flags)
+static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
+ unsigned long address, int flags)
{
return NULL;
}
@@ -230,6 +351,31 @@ static inline int prepare_hugepage_range(struct file *file,
return -EINVAL;
}
+static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+{
+}
+
+static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+{
+ return 1;
+}
+
+static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+{
+}
+
static inline int pmd_huge(pmd_t pmd)
{
return 0;
@@ -253,16 +399,20 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
BUG();
}
+#ifdef CONFIG_USERFAULTFD
static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- struct page **pagep)
+ enum mcopy_atomic_mode mode,
+ struct page **pagep,
+ bool wp_copy)
{
BUG();
return 0;
}
+#endif /* CONFIG_USERFAULTFD */
static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
@@ -270,9 +420,19 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL;
}
-static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+static inline int isolate_hugetlb(struct page *page, struct list_head *list)
{
- return false;
+ return -EBUSY;
+}
+
+static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+ return 0;
+}
+
+static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+ return 0;
}
static inline void putback_active_hugepage(struct page *page)
@@ -286,21 +446,16 @@ static inline void move_hugetlb_state(struct page *oldpage,
static inline unsigned long hugetlb_change_protection(
struct vm_area_struct *vma, unsigned long address,
- unsigned long end, pgprot_t newprot)
+ unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags)
{
return 0;
}
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
-{
- BUG();
-}
-
-static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+ unsigned long end, struct page *ref_page,
+ zap_flags_t zap_flags)
{
BUG();
}
@@ -313,6 +468,8 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
return 0;
}
+static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+
#endif /* !CONFIG_HUGETLB_PAGE */
/*
* hugepages at page global directory. If arch support
@@ -379,8 +536,7 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
- struct user_struct **user, int creat_flags,
- int page_size_log);
+ int creat_flags, int page_size_log);
static inline bool is_file_hugepages(struct file *file)
{
@@ -390,18 +546,24 @@ static inline bool is_file_hugepages(struct file *file)
return is_file_shm_hugepages(file);
}
-
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return HUGETLBFS_SB(i->i_sb)->hstate;
+}
#else /* !CONFIG_HUGETLBFS */
#define is_file_hugepages(file) false
static inline struct file *
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
- struct user_struct **user, int creat_flags,
- int page_size_log)
+ int creat_flags, int page_size_log)
{
return ERR_PTR(-ENOSYS);
}
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return NULL;
+}
#endif /* !CONFIG_HUGETLBFS */
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
@@ -410,14 +572,106 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+unsigned long
+generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+/*
+ * huegtlb page specific state flags. These flags are located in page.private
+ * of the hugetlb head page. Functions created via the below macros should be
+ * used to manipulate these flags.
+ *
+ * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
+ * allocation time. Cleared when page is fully instantiated. Free
+ * routine checks flag to restore a reservation on error paths.
+ * Synchronization: Examined or modified by code that knows it has
+ * the only reference to page. i.e. After allocation but before use
+ * or when the page is being freed.
+ * HPG_migratable - Set after a newly allocated page is added to the page
+ * cache and/or page tables. Indicates the page is a candidate for
+ * migration.
+ * Synchronization: Initially set after new page allocation with no
+ * locking. When examined and modified during migration processing
+ * (isolate, migrate, putback) the hugetlb_lock is held.
+ * HPG_temporary - Set on a page that is temporarily allocated from the buddy
+ * allocator. Typically used for migration target pages when no pages
+ * are available in the pool. The hugetlb free page path will
+ * immediately free pages with this flag set to the buddy allocator.
+ * Synchronization: Can be set after huge page allocation from buddy when
+ * code knows it has only reference. All other examinations and
+ * modifications require hugetlb_lock.
+ * HPG_freed - Set when page is on the free lists.
+ * Synchronization: hugetlb_lock held for examination and modification.
+ * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
+ * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
+ * that is not tracked by raw_hwp_page list.
+ */
+enum hugetlb_page_flags {
+ HPG_restore_reserve = 0,
+ HPG_migratable,
+ HPG_temporary,
+ HPG_freed,
+ HPG_vmemmap_optimized,
+ HPG_raw_hwp_unreliable,
+ __NR_HPAGEFLAGS,
+};
+
+/*
+ * Macros to create test, set and clear function definitions for
+ * hugetlb specific page flags.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define TESTHPAGEFLAG(uname, flname) \
+static inline int HPage##uname(struct page *page) \
+ { return test_bit(HPG_##flname, &(page->private)); }
+
+#define SETHPAGEFLAG(uname, flname) \
+static inline void SetHPage##uname(struct page *page) \
+ { set_bit(HPG_##flname, &(page->private)); }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static inline void ClearHPage##uname(struct page *page) \
+ { clear_bit(HPG_##flname, &(page->private)); }
+#else
+#define TESTHPAGEFLAG(uname, flname) \
+static inline int HPage##uname(struct page *page) \
+ { return 0; }
+
+#define SETHPAGEFLAG(uname, flname) \
+static inline void SetHPage##uname(struct page *page) \
+ { }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static inline void ClearHPage##uname(struct page *page) \
+ { }
+#endif
+
+#define HPAGEFLAG(uname, flname) \
+ TESTHPAGEFLAG(uname, flname) \
+ SETHPAGEFLAG(uname, flname) \
+ CLEARHPAGEFLAG(uname, flname) \
+
+/*
+ * Create functions associated with hugetlb page flags
+ */
+HPAGEFLAG(RestoreReserve, restore_reserve)
+HPAGEFLAG(Migratable, migratable)
+HPAGEFLAG(Temporary, temporary)
+HPAGEFLAG(Freed, freed)
+HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
+HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+
#ifdef CONFIG_HUGETLB_PAGE
#define HSTATE_NAME_LEN 32
/* Defines one hugetlb page size */
struct hstate {
+ struct mutex resize_lock;
int next_nid_to_alloc;
int next_nid_to_free;
unsigned int order;
+ unsigned int demote_order;
unsigned long mask;
unsigned long max_huge_pages;
unsigned long nr_huge_pages;
@@ -427,13 +681,14 @@ struct hstate {
unsigned long nr_overcommit_huge_pages;
struct list_head hugepage_activelist;
struct list_head hugepage_freelists[MAX_NUMNODES];
+ unsigned int max_huge_pages_node[MAX_NUMNODES];
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
- struct cftype cgroup_files_dfl[5];
- struct cftype cgroup_files_legacy[5];
+ struct cftype cgroup_files_dfl[8];
+ struct cftype cgroup_files_legacy[10];
#endif
char name[HSTATE_NAME_LEN];
};
@@ -443,24 +698,25 @@ struct huge_bootmem_page {
struct hstate *hstate;
};
+int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_node(struct hstate *h, int nid);
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
- nodemask_t *nmask);
+ nodemask_t *nmask, gfp_t gfp_mask);
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
-struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
- int nid, nodemask_t *nmask);
-int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct page *page);
/* arch callback */
-int __init __alloc_bootmem_huge_page(struct hstate *h);
-int __init alloc_bootmem_huge_page(struct hstate *h);
+int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
+int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
+bool __init hugetlb_node_alloc_supported(void);
-void __init hugetlb_bad_size(void);
void __init hugetlb_add_hstate(unsigned order);
+bool __init arch_hugetlb_valid_size(unsigned long size);
struct hstate *size_to_hstate(unsigned long size);
#ifndef HUGE_MAX_HSTATE
@@ -472,9 +728,18 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
-static inline struct hstate *hstate_inode(struct inode *i)
+/*
+ * hugetlb page subpool pointer located in hpage[1].private
+ */
+static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
{
- return HUGETLBFS_SB(i->i_sb)->hstate;
+ return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
+}
+
+static inline void hugetlb_set_page_subpool(struct page *hpage,
+ struct hugepage_subpool *subpool)
+{
+ set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
}
static inline struct hstate *hstate_file(struct file *f)
@@ -495,7 +760,7 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return hstate_file(vma->vm_file);
}
-static inline unsigned long huge_page_size(struct hstate *h)
+static inline unsigned long huge_page_size(const struct hstate *h)
{
return (unsigned long)PAGE_SIZE << h->order;
}
@@ -524,7 +789,7 @@ static inline bool hstate_is_gigantic(struct hstate *h)
return huge_page_order(h) >= MAX_ORDER;
}
-static inline unsigned int pages_per_huge_page(struct hstate *h)
+static inline unsigned int pages_per_huge_page(const struct hstate *h)
{
return 1 << h->order;
}
@@ -536,11 +801,25 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
#include <asm/hugetlb.h>
+#ifndef is_hugepage_only_range
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+#define is_hugepage_only_range is_hugepage_only_range
+#endif
+
+#ifndef arch_clear_hugepage_flags
+static inline void arch_clear_hugepage_flags(struct page *page) { }
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#endif
+
#ifndef arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
+ vm_flags_t flags)
{
- return entry;
+ return pte_mkhuge(entry);
}
#endif
@@ -560,21 +839,18 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-pgoff_t __basepage_index(struct page *page);
-
-/* Return page->index in PAGE_SIZE units */
-static inline pgoff_t basepage_index(struct page *page)
-{
- if (!PageCompound(page))
- return page->index;
-
- return __basepage_index(page);
-}
-
extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
+#ifdef CONFIG_MEMORY_FAILURE
+extern void hugetlb_clear_page_hwpoison(struct page *hpage);
+#else
+static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
+{
+}
+#endif
+
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
#ifndef arch_hugetlb_migration_supported
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
@@ -604,7 +880,7 @@ static inline bool hugepage_migration_supported(struct hstate *h)
* It determines whether or not a huge page should be placed on
* movable zone or not. Movability of any huge page should be
* required only if huge page size is supported for migration.
- * There wont be any reason for the huge page to be movable if
+ * There won't be any reason for the huge page to be movable if
* it is not migratable to start with. Also the size of the huge
* page should be large enough to be placed under a movable zone
* and still feasible enough to be migratable. Just the presence
@@ -624,6 +900,27 @@ static inline bool hugepage_movable_supported(struct hstate *h)
return true;
}
+/* Movability of hugepages depends on migration support. */
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+ if (hugepage_movable_supported(h))
+ return GFP_HIGHUSER_MOVABLE;
+ else
+ return GFP_HIGHUSER;
+}
+
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+ gfp_t modified_mask = htlb_alloc_mask(h);
+
+ /* Some callers might want to enforce node */
+ modified_mask |= (gfp_mask & __GFP_THISNODE);
+
+ modified_mask |= (gfp_mask & __GFP_NOWARN);
+
+ return modified_mask;
+}
+
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
@@ -644,6 +941,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->hugetlb_usage, 0);
+}
+
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
atomic_long_add(l, &mm->hugetlb_usage);
@@ -654,14 +956,6 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
atomic_long_sub(l, &mm->hugetlb_usage);
}
-#ifndef set_huge_swap_pte_at
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
-{
- set_huge_pte_at(mm, addr, ptep, pte);
-}
-#endif
-
#ifndef huge_ptep_modify_prot_start
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
@@ -681,23 +975,35 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif
+#ifdef CONFIG_NUMA
+void hugetlb_register_node(struct node *node);
+void hugetlb_unregister_node(struct node *node);
+#endif
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
-static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
- unsigned long addr,
- int avoid_reserve)
+static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
{
return NULL;
}
-static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
+static inline int isolate_or_dissolve_huge_page(struct page *page,
+ struct list_head *list)
+{
+ return -ENOMEM;
+}
+
+static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr,
+ int avoid_reserve)
{
return NULL;
}
static inline struct page *
-alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
+alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
{
return NULL;
}
@@ -729,12 +1035,12 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return NULL;
}
-static inline struct hstate *hstate_inode(struct inode *i)
+static inline struct hstate *page_hstate(struct page *page)
{
return NULL;
}
-static inline struct hstate *page_hstate(struct page *page)
+static inline struct hstate *size_to_hstate(unsigned long size)
{
return NULL;
}
@@ -789,11 +1095,6 @@ static inline int hstate_index(struct hstate *h)
return 0;
}
-static inline pgoff_t basepage_index(struct page *page)
-{
- return page->index;
-}
-
static inline int dissolve_free_huge_page(struct page *page)
{
return 0;
@@ -815,12 +1116,26 @@ static inline bool hugepage_movable_supported(struct hstate *h)
return false;
}
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+ return 0;
+}
+
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+ return 0;
+}
+
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
return &mm->page_table_lock;
}
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+}
+
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}
@@ -829,8 +1144,22 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+}
+
+static inline void hugetlb_register_node(struct node *node)
+{
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
{
}
#endif /* CONFIG_HUGETLB_PAGE */
@@ -845,4 +1174,22 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
return ptl;
}
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
+extern void __init hugetlb_cma_reserve(int order);
+#else
+static inline __init void hugetlb_cma_reserve(int order)
+{
+}
+#endif
+
+bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
+
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#endif
+
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 063962f6dfc6..630cd255d0cf 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -18,32 +18,103 @@
#include <linux/mmdebug.h>
struct hugetlb_cgroup;
+struct resv_map;
+struct file_region;
+
+#ifdef CONFIG_CGROUP_HUGETLB
/*
* Minimum page order trackable by hugetlb cgroup.
- * At least 3 pages are necessary for all the tracking information.
+ * At least 4 pages are necessary for all the tracking information.
+ * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault
+ * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD])
+ * is the reservation usage cgroup.
*/
-#define HUGETLB_CGROUP_MIN_ORDER 2
+#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1)
-#ifdef CONFIG_CGROUP_HUGETLB
+enum hugetlb_memory_event {
+ HUGETLB_MAX,
+ HUGETLB_NR_MEMORY_EVENTS,
+};
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+struct hugetlb_cgroup_per_node {
+ /* hugetlb usage in pages over all hstates. */
+ unsigned long usage[HUGE_MAX_HSTATE];
+};
+
+struct hugetlb_cgroup {
+ struct cgroup_subsys_state css;
+
+ /*
+ * the counter to account for hugepages from hugetlb.
+ */
+ struct page_counter hugepage[HUGE_MAX_HSTATE];
+
+ /*
+ * the counter to account for hugepage reservations from hugetlb.
+ */
+ struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
+
+ atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+ atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+
+ /* Handle for "hugetlb.events" */
+ struct cgroup_file events_file[HUGE_MAX_HSTATE];
+
+ /* Handle for "hugetlb.events.local" */
+ struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
+
+ struct hugetlb_cgroup_per_node *nodeinfo[];
+};
+
+static inline struct hugetlb_cgroup *
+__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
- return (struct hugetlb_cgroup *)page[2].private;
+ if (rsvd)
+ return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD);
+ else
+ return (void *)page_private(page + SUBPAGE_INDEX_CGROUP);
+}
+
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+{
+ return __hugetlb_cgroup_from_page(page, false);
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_page_rsvd(struct page *page)
+{
+ return __hugetlb_cgroup_from_page(page, true);
+}
+
+static inline void __set_hugetlb_cgroup(struct page *page,
+ struct hugetlb_cgroup *h_cg, bool rsvd)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return -1;
- page[2].private = (unsigned long)h_cg;
- return 0;
+ return;
+ if (rsvd)
+ set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD,
+ (unsigned long)h_cg);
+ else
+ set_page_private(page + SUBPAGE_INDEX_CGROUP,
+ (unsigned long)h_cg);
+}
+
+static inline void set_hugetlb_cgroup(struct page *page,
+ struct hugetlb_cgroup *h_cg)
+{
+ __set_hugetlb_cgroup(page, h_cg, false);
+}
+
+static inline void set_hugetlb_cgroup_rsvd(struct page *page,
+ struct hugetlb_cgroup *h_cg)
+{
+ __set_hugetlb_cgroup(page, h_cg, true);
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -51,29 +122,90 @@ static inline bool hugetlb_cgroup_disabled(void)
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
}
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+ css_put(&h_cg->css);
+}
+
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_get(resv_map->css);
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_put(resv_map->css);
+}
+
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
+extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page);
+extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page);
extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page);
+extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
+ struct page *page);
+
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
+ unsigned long start,
+ unsigned long end);
+
+extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ struct file_region *rg,
+ unsigned long nr_pages,
+ bool region_del);
+
extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage,
struct page *newhpage);
#else
+static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ struct file_region *rg,
+ unsigned long nr_pages,
+ bool region_del)
+{
+}
+
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
{
return NULL;
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_page_resv(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_page_rsvd(struct page *page)
+{
+ return NULL;
+}
+
+static inline void set_hugetlb_cgroup(struct page *page,
+ struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void set_hugetlb_cgroup_rsvd(struct page *page,
+ struct hugetlb_cgroup *h_cg)
{
- return 0;
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -81,28 +213,71 @@ static inline bool hugetlb_cgroup_disabled(void)
return true;
}
-static inline int
-hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup **ptr)
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
+static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
{
return 0;
}
-static inline void
-hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg,
- struct page *page)
+static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return 0;
+}
+
+static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
{
}
static inline void
-hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
+hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
+ unsigned long nr_pages,
+ struct page *page)
+{
+}
+static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
{
}
static inline void
-hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg)
+hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
+ unsigned long start,
+ unsigned long end)
{
}
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 6058c3844a76..f319bd26b030 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -72,14 +72,17 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
void *context);
extern int register_perf_hw_breakpoint(struct perf_event *bp);
-extern int __register_perf_hw_breakpoint(struct perf_event *bp);
extern void unregister_hw_breakpoint(struct perf_event *bp);
extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
+extern bool hw_breakpoint_is_used(void);
extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
+int arch_reserve_bp_slot(struct perf_event *bp);
+void arch_release_bp_slot(struct perf_event *bp);
+void arch_unregister_hw_breakpoint(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
@@ -115,11 +118,11 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
void *context) { return NULL; }
static inline int
register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
-static inline int
-__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
static inline void
unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
+static inline bool hw_breakpoint_is_used(void) { return false; }
+
static inline int
reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
static inline void release_bp_slot(struct perf_event *bp) { }
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 8e6dd908da21..77c2885c4c13 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -50,6 +50,7 @@ struct hwrng {
struct list_head list;
struct kref ref;
struct completion cleanup_done;
+ struct completion dying;
};
struct device;
@@ -60,7 +61,7 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
-/** Feed random bits into the pool. */
-extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
+
+extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 5e609f25878c..14325f93c6b2 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -85,6 +85,8 @@ enum hwmon_temp_attributes {
hwmon_temp_lowest,
hwmon_temp_highest,
hwmon_temp_reset_history,
+ hwmon_temp_rated_min,
+ hwmon_temp_rated_max,
};
#define HWMON_T_ENABLE BIT(hwmon_temp_enable)
@@ -112,6 +114,8 @@ enum hwmon_temp_attributes {
#define HWMON_T_LOWEST BIT(hwmon_temp_lowest)
#define HWMON_T_HIGHEST BIT(hwmon_temp_highest)
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
+#define HWMON_T_RATED_MIN BIT(hwmon_temp_rated_min)
+#define HWMON_T_RATED_MAX BIT(hwmon_temp_rated_max)
enum hwmon_in_attributes {
hwmon_in_enable,
@@ -130,6 +134,8 @@ enum hwmon_in_attributes {
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
+ hwmon_in_rated_min,
+ hwmon_in_rated_max,
};
#define HWMON_I_ENABLE BIT(hwmon_in_enable)
@@ -148,6 +154,8 @@ enum hwmon_in_attributes {
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
+#define HWMON_I_RATED_MIN BIT(hwmon_in_rated_min)
+#define HWMON_I_RATED_MAX BIT(hwmon_in_rated_max)
enum hwmon_curr_attributes {
hwmon_curr_enable,
@@ -166,6 +174,8 @@ enum hwmon_curr_attributes {
hwmon_curr_max_alarm,
hwmon_curr_lcrit_alarm,
hwmon_curr_crit_alarm,
+ hwmon_curr_rated_min,
+ hwmon_curr_rated_max,
};
#define HWMON_C_ENABLE BIT(hwmon_curr_enable)
@@ -184,6 +194,8 @@ enum hwmon_curr_attributes {
#define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm)
#define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm)
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
+#define HWMON_C_RATED_MIN BIT(hwmon_curr_rated_min)
+#define HWMON_C_RATED_MAX BIT(hwmon_curr_rated_max)
enum hwmon_power_attributes {
hwmon_power_enable,
@@ -215,6 +227,8 @@ enum hwmon_power_attributes {
hwmon_power_max_alarm,
hwmon_power_lcrit_alarm,
hwmon_power_crit_alarm,
+ hwmon_power_rated_min,
+ hwmon_power_rated_max,
};
#define HWMON_P_ENABLE BIT(hwmon_power_enable)
@@ -246,6 +260,8 @@ enum hwmon_power_attributes {
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
+#define HWMON_P_RATED_MIN BIT(hwmon_power_rated_min)
+#define HWMON_P_RATED_MAX BIT(hwmon_power_rated_max)
enum hwmon_energy_attributes {
hwmon_energy_enable,
@@ -267,6 +283,8 @@ enum hwmon_humidity_attributes {
hwmon_humidity_max_hyst,
hwmon_humidity_alarm,
hwmon_humidity_fault,
+ hwmon_humidity_rated_min,
+ hwmon_humidity_rated_max,
};
#define HWMON_H_ENABLE BIT(hwmon_humidity_enable)
@@ -278,6 +296,8 @@ enum hwmon_humidity_attributes {
#define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst)
#define HWMON_H_ALARM BIT(hwmon_humidity_alarm)
#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
+#define HWMON_H_RATED_MIN BIT(hwmon_humidity_rated_min)
+#define HWMON_H_RATED_MAX BIT(hwmon_humidity_rated_max)
enum hwmon_fan_attributes {
hwmon_fan_enable,
@@ -312,12 +332,14 @@ enum hwmon_pwm_attributes {
hwmon_pwm_enable,
hwmon_pwm_mode,
hwmon_pwm_freq,
+ hwmon_pwm_auto_channels_temp,
};
#define HWMON_PWM_INPUT BIT(hwmon_pwm_input)
#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable)
#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
+#define HWMON_PWM_AUTO_CHANNELS_TEMP BIT(hwmon_pwm_auto_channels_temp)
enum hwmon_intrusion_attributes {
hwmon_intrusion_alarm,
@@ -383,7 +405,7 @@ struct hwmon_ops {
};
/**
- * Channel information
+ * struct hwmon_channel_info - Channel information
* @type: Channel type.
* @config: Pointer to NULL-terminated list of channel parameters.
* Use for per-channel attributes.
@@ -402,7 +424,7 @@ struct hwmon_channel_info {
})
/**
- * Chip configuration
+ * struct hwmon_chip_info - Chip configuration
* @ops: Pointer to hwmon operations.
* @info: Null-terminated list of channel information.
*/
@@ -428,6 +450,9 @@ hwmon_device_register_with_info(struct device *dev,
const struct hwmon_chip_info *info,
const struct attribute_group **extra_groups);
struct device *
+hwmon_device_register_for_thermal(struct device *dev, const char *name,
+ void *drvdata);
+struct device *
devm_hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
@@ -436,6 +461,12 @@ devm_hwmon_device_register_with_info(struct device *dev,
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
+int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel);
+
+char *hwmon_sanitize_name(const char *name);
+char *devm_hwmon_sanitize_name(struct device *dev, const char *name);
+
/**
* hwmon_is_bad_char - Is the char invalid in a hwmon name
* @ch: the char to be considered
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 692c89ccf5df..3b42264333ef 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -14,6 +14,7 @@
#include <uapi/linux/hyperv.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/list.h>
@@ -23,12 +24,55 @@
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/reciprocal_div.h>
+#include <asm/hyperv-tlfs.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
#pragma pack(push, 1)
+/*
+ * Types for GPADL, decides is how GPADL header is created.
+ *
+ * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
+ * same as HV_HYP_PAGE_SIZE.
+ *
+ * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
+ * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
+ * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
+ * HV_HYP_PAGE will be different between different types of GPADL, for example
+ * if PAGE_SIZE is 64K:
+ *
+ * BUFFER:
+ *
+ * gva: |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
+ * index: 0 1 2 15 16 17 18 .. 31 32 ...
+ * | | ... | | | ... | ...
+ * v V V V V V
+ * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
+ * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
+ *
+ * RING:
+ *
+ * | header | data | header | data |
+ * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
+ * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
+ * | / / / | / /
+ * | / / / | / /
+ * | / / ... / ... | / ... /
+ * | / / / | / /
+ * | / / / | / /
+ * V V V V V V v
+ * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
+ * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
+ */
+enum hv_gpadl_type {
+ HV_GPADL_BUFFER,
+ HV_GPADL_RING
+};
+
/* Single-page buffer */
struct hv_page_buffer {
u32 len;
@@ -111,15 +155,19 @@ struct hv_ring_buffer {
} feature_bits;
/* Pad it to PAGE_SIZE so that data starts on page boundary */
- u8 reserved2[4028];
+ u8 reserved2[PAGE_SIZE - 68];
/*
* Ring data starts here + RingDataStartOffset
* !!! DO NOT place any fields below this !!!
*/
- u8 buffer[0];
+ u8 buffer[];
} __packed;
+/* Calculate the proper size of a ringbuffer, it must be page-aligned */
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
+ (payload_sz))
+
struct hv_ring_buffer_info {
struct hv_ring_buffer *ring_buffer;
u32 ring_size; /* Include the shared header */
@@ -133,6 +181,10 @@ struct hv_ring_buffer_info {
* being freed while the ring buffer is being accessed.
*/
struct mutex ring_buffer_mutex;
+
+ /* Buffer that holds a copy of an incoming host packet */
+ void *pkt_buffer;
+ u32 pkt_buffer_size;
};
@@ -178,14 +230,19 @@ static inline u32 hv_get_avail_to_write_percent(
* two 16 bit quantities: major_number. minor_number.
*
* 0 . 13 (Windows Server 2008)
- * 1 . 1 (Windows 7)
- * 2 . 4 (Windows 8)
- * 3 . 0 (Windows 8 R2)
+ * 1 . 1 (Windows 7, WS2008 R2)
+ * 2 . 4 (Windows 8, WS2012)
+ * 3 . 0 (Windows 8.1, WS2012 R2)
* 4 . 0 (Windows 10)
* 4 . 1 (Windows 10 RS3)
* 5 . 0 (Newer Windows 10)
* 5 . 1 (Windows 10 RS4)
* 5 . 2 (Windows Server 2019, RS5)
+ * 5 . 3 (Windows Server 2022)
+ *
+ * The WS2008 and WIN7 versions are listed here for
+ * completeness but are no longer supported in the
+ * Linux kernel.
*/
#define VERSION_WS2008 ((0 << 16) | (13))
@@ -197,6 +254,7 @@ static inline u32 hv_get_avail_to_write_percent(
#define VERSION_WIN10_V5 ((5 << 16) | (0))
#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
+#define VERSION_WIN10_V5_3 ((5 << 16) | (3))
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -236,7 +294,7 @@ struct vmbus_channel_offer {
/*
* Pipes:
- * The following sructure is an integrated pipe protocol, which
+ * The following structure is an integrated pipe protocol, which
* is implemented on top of standard user-defined data. Pipe
* clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
* use.
@@ -313,7 +371,7 @@ struct vmadd_remove_transfer_page_set {
struct gpa_range {
u32 byte_count;
u32 byte_offset;
- u64 pfn_array[0];
+ u64 pfn_array[];
};
/*
@@ -425,8 +483,9 @@ enum vmbus_channel_message_type {
CHANNELMSG_19 = 19,
CHANNELMSG_20 = 20,
CHANNELMSG_TL_CONNECT_REQUEST = 21,
- CHANNELMSG_22 = 22,
+ CHANNELMSG_MODIFYCHANNEL = 22,
CHANNELMSG_TL_CONNECT_RESULT = 23,
+ CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24,
CHANNELMSG_COUNT
};
@@ -483,12 +542,6 @@ struct vmbus_channel_rescind_offer {
u32 child_relid;
} __packed;
-static inline u32
-hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
-{
- return rbi->ring_buffer->pending_send_sz;
-}
-
/*
* Request Offer -- no parameters, SynIC message contains the partition ID
* Set Snoop -- no parameters, SynIC message contains the partition ID
@@ -540,6 +593,13 @@ struct vmbus_channel_open_result {
u32 status;
} __packed;
+/* Modify Channel Result parameters */
+struct vmbus_channel_modifychannel_response {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 status;
+} __packed;
+
/* Close channel parameters; */
struct vmbus_channel_close_channel {
struct vmbus_channel_message_header header;
@@ -563,7 +623,7 @@ struct vmbus_channel_gpadl_header {
u32 gpadl;
u16 range_buflen;
u16 rangecount;
- struct gpa_range range[0];
+ struct gpa_range range[];
} __packed;
/* This is the followup packet that contains more PFNs. */
@@ -571,7 +631,7 @@ struct vmbus_channel_gpadl_body {
struct vmbus_channel_message_header header;
u32 msgnumber;
u32 gpadl;
- u64 pfn[0];
+ u64 pfn[];
} __packed;
struct vmbus_channel_gpadl_created {
@@ -620,6 +680,13 @@ struct vmbus_channel_tl_connect_request {
guid_t host_service_id;
} __packed;
+/* Modify Channel parameters, cf. vmbus_send_modifychannel() */
+struct vmbus_channel_modifychannel {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 target_vp;
+} __packed;
+
struct vmbus_channel_version_response {
struct vmbus_channel_message_header header;
u8 version_supported;
@@ -665,6 +732,7 @@ struct vmbus_channel_msginfo {
struct vmbus_channel_gpadl_torndown gpadl_torndown;
struct vmbus_channel_gpadl_created gpadl_created;
struct vmbus_channel_version_response version_response;
+ struct vmbus_channel_modifychannel_response modify_response;
} response;
u32 msgsize;
@@ -672,7 +740,7 @@ struct vmbus_channel_msginfo {
* The channel message that goes out on the "wire".
* It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
*/
- unsigned char msg[0];
+ unsigned char msg[];
};
struct vmbus_close_msg {
@@ -689,11 +757,6 @@ union hv_connection_id {
} u;
};
-enum hv_numa_policy {
- HV_BALANCED = 0,
- HV_LOCALIZED,
-};
-
enum vmbus_device_type {
HV_IDE = 0,
HV_SCSI,
@@ -714,10 +777,41 @@ enum vmbus_device_type {
HV_UNKNOWN,
};
+/*
+ * Provides request ids for VMBus. Encapsulates guest memory
+ * addresses and stores the next available slot in req_arr
+ * to generate new ids in constant time.
+ */
+struct vmbus_requestor {
+ u64 *req_arr;
+ unsigned long *req_bitmap; /* is a given slot available? */
+ u32 size;
+ u64 next_request_id;
+ spinlock_t req_lock; /* provides atomicity */
+};
+
+#define VMBUS_NO_RQSTOR U64_MAX
+#define VMBUS_RQST_ERROR (U64_MAX - 1)
+#define VMBUS_RQST_ADDR_ANY U64_MAX
+/* NetVSC-specific */
+#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
+/* StorVSC-specific */
+#define VMBUS_RQST_INIT (U64_MAX - 2)
+#define VMBUS_RQST_RESET (U64_MAX - 3)
+
struct vmbus_device {
u16 dev_type;
guid_t guid;
bool perf_device;
+ bool allowed_in_isolated;
+};
+
+#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
+
+struct vmbus_gpadl {
+ u32 gpadl_handle;
+ u32 size;
+ void *buffer;
};
struct vmbus_channel {
@@ -736,9 +830,10 @@ struct vmbus_channel {
u8 monitor_bit;
bool rescind; /* got rescind msg */
+ bool rescind_ref; /* got rescind msg, got channel reference */
struct completion rescind_event;
- u32 ringbuffer_gpadlhandle;
+ struct vmbus_gpadl ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
struct page *ringbuffer_page;
@@ -771,6 +866,15 @@ struct vmbus_channel {
void (*onchannel_callback)(void *context);
void *channel_callback_context;
+ void (*change_target_cpu_callback)(struct vmbus_channel *channel,
+ u32 old, u32 new);
+
+ /*
+ * Synchronize channel scheduling and channel removal; see the inline
+ * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
+ */
+ spinlock_t sched_lock;
+
/*
* A channel can be marked for one of three modes of reading:
* BATCHED - callback called from taslket and should read
@@ -792,30 +896,24 @@ struct vmbus_channel {
u64 sig_event;
/*
- * Starting with win8, this field will be used to specify
- * the target virtual processor on which to deliver the interrupt for
- * the host to guest communication.
- * Prior to win8, incoming channel interrupts would only
- * be delivered on cpu 0. Setting this value to 0 would
- * preserve the earlier behavior.
+ * Starting with win8, this field will be used to specify the
+ * target CPU on which to deliver the interrupt for the host
+ * to guest communication.
+ *
+ * Prior to win8, incoming channel interrupts would only be
+ * delivered on CPU 0. Setting this value to 0 would preserve
+ * the earlier behavior.
*/
- u32 target_vp;
- /* The corresponding CPUID in the guest */
u32 target_cpu;
/*
- * State to manage the CPU affiliation of channels.
- */
- struct cpumask alloced_cpus_in_node;
- int numa_node;
- /*
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support
* a scalable communication infrastructure with the host.
- * The support for sub-channels is implemented as an extention
+ * The support for sub-channels is implemented as an extension
* to the current infrastructure.
* The initial offer is considered the primary channel and this
* offer message will indicate if the host supports sub-channels.
- * The guest is free to ask for sub-channels to be offerred and can
+ * The guest is free to ask for sub-channels to be offered and can
* open these sub-channels as a normal "primary" channel. However,
* all sub-channels will have the same type and instance guids as the
* primary channel. Requests sent on a given channel will result in a
@@ -836,12 +934,6 @@ struct vmbus_channel {
void (*chn_rescind_callback)(struct vmbus_channel *channel);
/*
- * The spinlock to protect the structure. It is being used to protect
- * test-and-set access to various attributes of the structure as well
- * as all sc_list operations.
- */
- spinlock_t lock;
- /*
* All Sub-channels of a primary channel are linked here.
*/
struct list_head sc_list;
@@ -854,11 +946,6 @@ struct vmbus_channel {
* Support per-channel state for use by vmbus drivers.
*/
void *per_channel_state;
- /*
- * To support per-cpu lookup mapping of relid to channel,
- * link up channels based on their CPU affinity.
- */
- struct list_head percpu_list;
/*
* Defer freeing channel until after all cpu's have
@@ -890,26 +977,21 @@ struct vmbus_channel {
* Clearly, these optimizations improve throughput at the expense of
* latency. Furthermore, since the channel is shared for both
* control and data messages, control messages currently suffer
- * unnecessary latency adversley impacting performance and boot
+ * unnecessary latency adversely impacting performance and boot
* time. To fix this issue, permit tagging the channel as being
* in "low latency" mode. In this mode, we will bypass the monitor
* mechanism.
*/
bool low_latency;
+ bool probe_done;
+
/*
- * NUMA distribution policy:
- * We support two policies:
- * 1) Balanced: Here all performance critical channels are
- * distributed evenly amongst all the NUMA nodes.
- * This policy will be the default policy.
- * 2) Localized: All channels of a given instance of a
- * performance critical service will be assigned CPUs
- * within a selected NUMA node.
+ * Cache the device ID here for easy access; this is useful, in
+ * particular, in situations where the channel's device_obj has
+ * not been allocated/initialized yet.
*/
- enum hv_numa_policy affinity_policy;
-
- bool probe_done;
+ u16 device_id;
/*
* We must offload the handling of the primary/sub channels
@@ -951,23 +1033,54 @@ struct vmbus_channel {
u32 fuzz_testing_interrupt_delay;
u32 fuzz_testing_message_delay;
+ /* callback to generate a request ID from a request address */
+ u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
+ /* callback to retrieve a request address from a request ID */
+ u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
+
+ /* request/transaction ids for VMBus */
+ struct vmbus_requestor requestor;
+ u32 rqstor_size;
+
+ /* The max size of a packet on this channel */
+ u32 max_pkt_size;
};
-static inline bool is_hvsock_channel(const struct vmbus_channel *c)
+#define lock_requestor(channel, flags) \
+do { \
+ struct vmbus_requestor *rqstor = &(channel)->requestor; \
+ \
+ spin_lock_irqsave(&rqstor->req_lock, flags); \
+} while (0)
+
+static __always_inline void unlock_requestor(struct vmbus_channel *channel,
+ unsigned long flags)
+{
+ struct vmbus_requestor *rqstor = &channel->requestor;
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+}
+
+u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
+u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
+
+static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
{
- return !!(c->offermsg.offer.chn_flags &
- VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
}
-static inline bool is_sub_channel(const struct vmbus_channel *c)
+static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
- return c->offermsg.offer.sub_channel_index != 0;
+ return is_hvsock_offer(&c->offermsg);
}
-static inline void set_channel_affinity_state(struct vmbus_channel *c,
- enum hv_numa_policy policy)
+static inline bool is_sub_channel(const struct vmbus_channel *c)
{
- c->affinity_policy = policy;
+ return c->offermsg.offer.sub_channel_index != 0;
}
static inline void set_channel_read_mode(struct vmbus_channel *c,
@@ -1007,17 +1120,7 @@ static inline void set_channel_pending_send_size(struct vmbus_channel *c,
c->outbound.ring_buffer->pending_send_sz = size;
}
-static inline void set_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = true;
-}
-
-static inline void clear_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = false;
-}
-
-void vmbus_onmessage(void *context);
+void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
int vmbus_request_offers(void);
@@ -1031,19 +1134,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *));
-/*
- * Check if sub-channels have already been offerred. This API will be useful
- * when the driver is unloaded after establishing sub-channels. In this case,
- * when the driver is re-loaded, the driver would have to check if the
- * subchannels have already been established before attempting to request
- * the creation of sub-channels.
- * This function returns TRUE to indicate that subchannels have already been
- * created.
- * This function should be invoked after setting the callback function for
- * sub-channel creation.
- */
-bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
-
/* The format must be the same as struct vmdata_gpa_direct */
struct vmbus_channel_packet_page_buffer {
u16 type;
@@ -1099,6 +1189,13 @@ extern int vmbus_open(struct vmbus_channel *channel,
extern void vmbus_close(struct vmbus_channel *channel);
+extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ u64 *trans_id,
+ enum vmbus_packet_type type,
+ u32 flags);
extern int vmbus_sendpacket(struct vmbus_channel *channel,
void *buffer,
u32 bufferLen,
@@ -1123,10 +1220,10 @@ extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
void *kbuffer,
u32 size,
- u32 *gpadl_handle);
+ struct vmbus_gpadl *gpadl);
extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
- u32 gpadl_handle);
+ struct vmbus_gpadl *gpadl);
void vmbus_reset_channel_cb(struct vmbus_channel *channel);
@@ -1195,10 +1292,16 @@ struct hv_device {
u16 device_id;
struct device device;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
struct vmbus_channel *channel;
struct kset *channels_kset;
+ struct device_dma_parameters dma_parms;
+ u64 dma_mask;
/* place holder to keep track of the dir for hv device in debugfs */
struct dentry *debug_dir;
@@ -1387,12 +1490,14 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
- * Linux doesn't support the 3 devices: the first two are for
- * Automatic Virtual Machine Activation, and the third is for
- * Remote Desktop Virtualization.
+ * Linux doesn't support these 4 devices: the first two are for
+ * Automatic Virtual Machine Activation, the third is for
+ * Remote Desktop Virtualization, and the fourth is Initial
+ * Machine Configuration (IMC) used only by Windows guests.
* {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
* {3375baf4-9e15-4b30-b765-67acb10d607b}
* {276aacf4-ac15-426c-98dd-7521ad3f01fe}
+ * {c376c1c3-d276-48d2-90a9-c04748072c60}
*/
#define HV_AVMA1_GUID \
@@ -1407,6 +1512,10 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
.guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+#define HV_IMC_GUID \
+ .guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
+ 0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
+
/*
* Common header for Hyper-V ICs
*/
@@ -1417,6 +1526,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
#define ICMSGTYPE_SHUTDOWN 3
#define ICMSGTYPE_TIMESYNC 4
#define ICMSGTYPE_VSS 5
+#define ICMSGTYPE_FCOPY 7
#define ICMSGHDRFLAG_TRANSACTION 1
#define ICMSGHDRFLAG_REQUEST 2
@@ -1460,11 +1570,17 @@ struct icmsg_hdr {
u8 reserved[2];
} __packed;
+#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
+#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
+#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
+ (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
+ (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
+
struct icmsg_negotiate {
u16 icframe_vercnt;
u16 icmsg_vercnt;
u32 reserved;
- struct ic_version icversion_data[1]; /* any size array */
+ struct ic_version icversion_data[]; /* any size array */
} __packed;
struct shutdown_msg_data {
@@ -1514,8 +1630,13 @@ struct hyperv_service_callback {
void (*callback)(void *context);
};
+struct hv_dma_range {
+ dma_addr_t dma;
+ u32 mapping_size;
+};
+
#define MAX_SRV_VER 0x7ffffff
-extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
const int *fw_version, int fw_vercnt,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
@@ -1531,6 +1652,7 @@ extern __u32 vmbus_proto_version;
int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
const guid_t *shv_host_servie_id);
+int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
void vmbus_set_event(struct vmbus_channel *channel);
/* Get the start of the ring buffer. */
@@ -1586,6 +1708,11 @@ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
return (desc->len8 << 3) - (desc->offset8 << 3);
}
+/* Get packet length associated with descriptor */
+static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
+{
+ return desc->len8 << 3;
+}
struct vmpacket_descriptor *
hv_pkt_iter_first(struct vmbus_channel *channel);
@@ -1596,10 +1723,6 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
void hv_pkt_iter_close(struct vmbus_channel *channel);
-/*
- * Get next packet descriptor from iterator
- * If at end of list, return NULL and update host.
- */
static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel *channel,
const struct vmpacket_descriptor *pkt)
@@ -1646,4 +1769,23 @@ struct hyperv_pci_block_ops {
extern struct hyperv_pci_block_ops hvpci_block_ops;
+static inline unsigned long virt_to_hvpfn(void *addr)
+{
+ phys_addr_t paddr;
+
+ if (is_vmalloc_addr(addr))
+ paddr = page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
+ else
+ paddr = __pa(addr);
+
+ return paddr >> HV_HYP_PAGE_SHIFT;
+}
+
+#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
+#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
+#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
+#define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT)
+#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
+
#endif /* _HYPERV_H */
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index fc08b433c856..9efbc54e35e5 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -32,4 +32,12 @@ static inline bool jailhouse_paravirt(void)
#endif /* !CONFIG_X86 */
+static inline bool hypervisor_isolated_pci_functions(void)
+{
+ if (IS_ENABLED(CONFIG_S390))
+ return true;
+
+ return jailhouse_paravirt();
+}
+
#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index d03071732db4..7c522fdd9ea7 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -53,6 +53,20 @@
#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
+/**
+ * struct pca_i2c_bus_settings - The configured PCA i2c bus settings
+ * @mode: Configured i2c bus mode
+ * @tlow: Configured SCL LOW period
+ * @thi: Configured SCL HIGH period
+ * @clock_freq: The configured clock frequency
+ */
+struct pca_i2c_bus_settings {
+ int mode;
+ int tlow;
+ int thi;
+ int clock_freq;
+};
+
struct i2c_algo_pca_data {
void *data; /* private low level data */
void (*write_byte) (void *data, int reg, int val);
@@ -64,6 +78,7 @@ struct i2c_algo_pca_data {
* For PCA9665, use the frequency you want here. */
unsigned int i2c_clock;
unsigned int chip;
+ struct pca_i2c_bus_settings bus_settings;
};
int i2c_pca_add_bus(struct i2c_adapter *);
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index c5a977320f82..98ef73b7c8fd 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -29,7 +29,7 @@ struct i2c_mux_core {
int num_adapters;
int max_adapters;
- struct i2c_adapter *adapter[0];
+ struct i2c_adapter *adapter[];
};
struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index 585ad6fc3847..ced1c6ead52a 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -2,7 +2,7 @@
/*
* i2c-smbus.h - SMBus extensions to the I2C protocol
*
- * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
+ * Copyright (C) 2010-2019 Jean Delvare <jdelvare@suse.de>
*/
#ifndef _LINUX_I2C_SMBUS_H
@@ -15,33 +15,38 @@
/**
* i2c_smbus_alert_setup - platform data for the smbus_alert i2c client
- * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0)
- * triggered
* @irq: IRQ number, if the smbus_alert driver should take care of interrupt
* handling
*
* If irq is not specified, the smbus_alert driver doesn't take care of
* interrupt handling. In that case it is up to the I2C bus driver to either
* handle the interrupts or to poll for alerts.
- *
- * If irq is specified then it it crucial that alert_edge_triggered is
- * properly set.
*/
struct i2c_smbus_alert_setup {
int irq;
};
-struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
- struct i2c_smbus_alert_setup *setup);
+struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup);
int i2c_handle_smbus_alert(struct i2c_client *ara);
-#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
-int of_i2c_setup_smbus_alert(struct i2c_adapter *adap);
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_I2C_SLAVE)
+struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter);
+void i2c_free_slave_host_notify_device(struct i2c_client *client);
#else
-static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap)
+static inline struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline void i2c_free_slave_host_notify_device(struct i2c_client *client)
{
- return 0;
}
#endif
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI)
+void i2c_register_spd(struct i2c_adapter *adap);
+#else
+static inline void i2c_register_spd(struct i2c_adapter *adap) { }
+#endif
+
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index f834687989f7..f7c49bbdb8a1 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -2,7 +2,7 @@
/*
* i2c.h - definitions for the Linux i2c bus interface
* Copyright (C) 1995-2000 Simon G. Vogl
- * Copyright (C) 2013-2019 Wolfram Sang <wsa@the-dreams.de>
+ * Copyright (C) 2013-2019 Wolfram Sang <wsa@kernel.org>
*
* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
* Frodo Looijaard <frodol@dds.nl>
@@ -11,10 +11,12 @@
#define _LINUX_I2C_H
#include <linux/acpi.h> /* for acpi_handle */
+#include <linux/bits.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
#include <linux/rtmutex.h>
#include <linux/irqdomain.h> /* for Host Notify IRQ */
#include <linux/of.h> /* for struct device_node */
@@ -39,16 +41,27 @@ enum i2c_slave_event;
typedef int (*i2c_slave_cb_t)(struct i2c_client *client,
enum i2c_slave_event event, u8 *val);
+/* I2C Frequency Modes */
+#define I2C_MAX_STANDARD_MODE_FREQ 100000
+#define I2C_MAX_FAST_MODE_FREQ 400000
+#define I2C_MAX_FAST_MODE_PLUS_FREQ 1000000
+#define I2C_MAX_TURBO_MODE_FREQ 1400000
+#define I2C_MAX_HIGH_SPEED_MODE_FREQ 3400000
+#define I2C_MAX_ULTRA_FAST_MODE_FREQ 5000000
+
struct module;
struct property_entry;
#if IS_ENABLED(CONFIG_I2C)
+/* Return the Frequency mode string based on the bus frequency */
+const char *i2c_freq_mode_string(u32 bus_freq_hz);
+
/*
* The master routines are the ones normally used to transmit data to devices
* on a bus (or read from them). Apart from two basic transfer functions to
* transmit one message at a time, a more complex version can be used to
* transmit an arbitrary number of messages without interruption.
- * @count must be be less than 64k since msg.len is u16.
+ * @count must be less than 64k since msg.len is u16.
*/
int i2c_transfer_buffer_flags(const struct i2c_client *client,
char *buf, int count, u16 flags);
@@ -136,6 +149,7 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
+u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count);
s32 i2c_smbus_read_byte(const struct i2c_client *client);
s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command);
@@ -210,6 +224,15 @@ enum i2c_alert_protocol {
};
/**
+ * enum i2c_driver_flags - Flags for an I2C device driver
+ *
+ * @I2C_DRV_ACPI_WAIVE_D0_PROBE: Don't put the device in D0 state for probe
+ */
+enum i2c_driver_flags {
+ I2C_DRV_ACPI_WAIVE_D0_PROBE = BIT(0),
+};
+
+/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @probe: Callback for device binding - soon to be deprecated
@@ -223,7 +246,7 @@ enum i2c_alert_protocol {
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
- * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping
+ * @flags: A bitmask of flags defined in &enum i2c_driver_flags
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
@@ -250,7 +273,7 @@ struct i2c_driver {
/* Standard driver model interfaces */
int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
- int (*remove)(struct i2c_client *client);
+ void (*remove)(struct i2c_client *client);
/* New driver model interface to aid the seamless removal of the
* current probe()'s, more commonly unused than used second parameter.
@@ -283,7 +306,7 @@ struct i2c_driver {
const unsigned short *address_list;
struct list_head clients;
- bool disable_i2c_core_irq_mapping;
+ u32 flags;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -301,6 +324,8 @@ struct i2c_driver {
* userspace_devices list
* @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter
* calls it to pass on slave events to the slave driver.
+ * @devres_group_id: id of the devres group that will be created for resources
+ * acquired when probing this device.
*
* An i2c_client identifies a single device (i.e. chip) connected to an
* i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -329,28 +354,28 @@ struct i2c_client {
#if IS_ENABLED(CONFIG_I2C_SLAVE)
i2c_slave_cb_t slave_cb; /* callback for slave mode */
#endif
+ void *devres_group_id; /* ID of probe devres group */
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
-struct i2c_client *i2c_verify_client(struct device *dev);
struct i2c_adapter *i2c_verify_adapter(struct device *dev);
const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client);
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
- struct device * const dev = container_of(kobj, struct device, kobj);
+ struct device * const dev = kobj_to_dev(kobj);
return to_i2c_client(dev);
}
-static inline void *i2c_get_clientdata(const struct i2c_client *dev)
+static inline void *i2c_get_clientdata(const struct i2c_client *client)
{
- return dev_get_drvdata(&dev->dev);
+ return dev_get_drvdata(&client->dev);
}
-static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
+static inline void i2c_set_clientdata(struct i2c_client *client, void *data)
{
- dev_set_drvdata(&dev->dev, data);
+ dev_set_drvdata(&client->dev, data);
}
/* I2C slave support */
@@ -367,12 +392,8 @@ enum i2c_slave_event {
int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
int i2c_slave_unregister(struct i2c_client *client);
bool i2c_detect_slave_mode(struct device *dev);
-
-static inline int i2c_slave_event(struct i2c_client *client,
- enum i2c_slave_event event, u8 *val)
-{
- return client->slave_cb(client, event, val);
-}
+int i2c_slave_event(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
#else
static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
#endif
@@ -386,7 +407,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* @platform_data: stored in i2c_client.dev.platform_data
* @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
- * @properties: additional device properties for the device
+ * @swnode: software node for the device
* @resources: resources associated with the device
* @num_resources: number of resources in the @resources array
* @irq: stored in i2c_client.irq
@@ -400,7 +421,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* that are present. This information is used to grow the driver model tree.
* For mainboards this is done statically using i2c_register_board_info();
* bus numbers identify adapters that aren't yet available. For add-on boards,
- * i2c_new_device() does this dynamically with the adapter already known.
+ * i2c_new_client_device() does this dynamically with the adapter already known.
*/
struct i2c_board_info {
char type[I2C_NAME_SIZE];
@@ -410,7 +431,7 @@ struct i2c_board_info {
void *platform_data;
struct device_node *of_node;
struct fwnode_handle *fwnode;
- const struct property_entry *properties;
+ const struct software_node *swnode;
const struct resource *resources;
unsigned int num_resources;
int irq;
@@ -431,14 +452,12 @@ struct i2c_board_info {
#if IS_ENABLED(CONFIG_I2C)
-/* Add-on boards should register/unregister their devices; e.g. a board
+/*
+ * Add-on boards should register/unregister their devices; e.g. a board
* with integrated I2C, a config eeprom, sensors, and a codec that's
* used in conjunction with the primary hardware.
*/
struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
-
-struct i2c_client *
i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
/* If you don't know the exact address of an I2C device, use this variant
@@ -453,12 +472,6 @@ i2c_new_scanned_device(struct i2c_adapter *adap,
unsigned short const *addr_list,
int (*probe)(struct i2c_adapter *adap, unsigned short addr));
-struct i2c_client *
-i2c_new_probed_device(struct i2c_adapter *adap,
- struct i2c_board_info *info,
- unsigned short const *addr_list,
- int (*probe)(struct i2c_adapter *adap, unsigned short addr));
-
/* Common custom probe functions */
int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
@@ -474,6 +487,13 @@ i2c_new_ancillary_device(struct i2c_client *client,
u16 default_addr);
void i2c_unregister_device(struct i2c_client *client);
+
+struct i2c_client *i2c_verify_client(struct device *dev);
+#else
+static inline struct i2c_client *i2c_verify_client(struct device *dev)
+{
+ return NULL;
+}
#endif /* I2C */
/* Mainboard arch_initcall() code should register all its I2C devices.
@@ -506,7 +526,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context
* so e.g. PMICs can be accessed very late before shutdown. Optional.
* @functionality: Return the flags that this algorithm/adapter pair supports
- * from the I2C_FUNC_* flags.
+ * from the ``I2C_FUNC_*`` flags.
* @reg_slave: Register given client to I2C slave mode of this adapter
* @unreg_slave: Unregister given client from I2C slave mode of this adapter
*
@@ -515,9 +535,10 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
*
- * The return codes from the @master_xfer{_atomic} fields should indicate the
+ * The return codes from the ``master_xfer{_atomic}`` fields should indicate the
* type of error code that occurred during the transfer, as documented in the
- * Kernel Documentation file Documentation/i2c/fault-codes.rst.
+ * Kernel Documentation file Documentation/i2c/fault-codes.rst. Otherwise, the
+ * number of messages executed should be returned.
*/
struct i2c_algorithm {
/*
@@ -609,6 +630,14 @@ struct i2c_timings {
* may configure padmux here for SDA/SCL line or something else they want.
* @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery.
* @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery.
+ * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins.
+ * Optional.
+ * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned
+ * to the I2C bus. Optional. Populated internally for GPIO recovery, if
+ * state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid.
+ * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as
+ * GPIOs. Optional. Populated internally for GPIO recovery, if this state
+ * is called "gpio" or "recovery" and pinctrl is valid.
*/
struct i2c_bus_recovery_info {
int (*recover_bus)(struct i2c_adapter *adap);
@@ -625,6 +654,9 @@ struct i2c_bus_recovery_info {
/* gpio recovery */
struct gpio_desc *scl_gpiod;
struct gpio_desc *sda_gpiod;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_gpio;
};
int i2c_recover_bus(struct i2c_adapter *adap);
@@ -679,6 +711,8 @@ struct i2c_adapter_quirks {
#define I2C_AQ_NO_ZERO_LEN_READ BIT(5)
#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6)
#define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
+/* adapter cannot do repeated START */
+#define I2C_AQ_NO_REP_START BIT(7)
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
@@ -713,6 +747,7 @@ struct i2c_adapter {
const struct i2c_adapter_quirks *quirks;
struct irq_domain *host_notify_domain;
+ struct regulator *bus_regulator;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -836,6 +871,7 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
*/
#if IS_ENABLED(CONFIG_I2C)
int i2c_add_adapter(struct i2c_adapter *adap);
+int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter);
void i2c_del_adapter(struct i2c_adapter *adap);
int i2c_add_numbered_adapter(struct i2c_adapter *adap);
@@ -984,29 +1020,48 @@ struct acpi_resource_i2c_serialbus;
#if IS_ENABLED(CONFIG_ACPI)
bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c);
+int i2c_acpi_client_count(struct acpi_device *adev);
u32 i2c_acpi_find_bus_speed(struct device *dev);
-struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
- struct i2c_board_info *info);
+struct i2c_client *i2c_acpi_new_device_by_fwnode(struct fwnode_handle *fwnode,
+ int index,
+ struct i2c_board_info *info);
struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle);
+bool i2c_acpi_waive_d0_probe(struct device *dev);
#else
static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c)
{
return false;
}
+static inline int i2c_acpi_client_count(struct acpi_device *adev)
+{
+ return 0;
+}
static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
{
return 0;
}
-static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
- int index, struct i2c_board_info *info)
+static inline struct i2c_client *i2c_acpi_new_device_by_fwnode(
+ struct fwnode_handle *fwnode, int index,
+ struct i2c_board_info *info)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
{
return NULL;
}
+static inline bool i2c_acpi_waive_d0_probe(struct device *dev)
+{
+ return false;
+}
#endif /* CONFIG_ACPI */
+static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
+ int index,
+ struct i2c_board_info *info)
+{
+ return i2c_acpi_new_device_by_fwnode(dev_fwnode(dev), index, info);
+}
+
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
index 73b0982cc519..ad59a4ae60d1 100644
--- a/include/linux/i3c/ccc.h
+++ b/include/linux/i3c/ccc.h
@@ -132,7 +132,7 @@ struct i3c_ccc_dev_desc {
struct i3c_ccc_defslvs {
u8 count;
struct i3c_ccc_dev_desc master;
- struct i3c_ccc_dev_desc slaves[0];
+ struct i3c_ccc_dev_desc slaves[];
} __packed;
/**
@@ -240,7 +240,7 @@ struct i3c_ccc_bridged_slave_desc {
*/
struct i3c_ccc_setbrgtgt {
u8 count;
- struct i3c_ccc_bridged_slave_desc bslaves[0];
+ struct i3c_ccc_bridged_slave_desc bslaves[];
} __packed;
/**
@@ -318,7 +318,7 @@ enum i3c_ccc_setxtime_subcmd {
*/
struct i3c_ccc_setxtime {
u8 subcmd;
- u8 data[0];
+ u8 data[];
} __packed;
#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0)
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index de102e4418ab..8242e13e7b0b 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -176,7 +176,7 @@ struct i3c_device;
struct i3c_driver {
struct device_driver driver;
int (*probe)(struct i3c_device *dev);
- int (*remove)(struct i3c_device *dev);
+ void (*remove)(struct i3c_device *dev);
const struct i3c_device_id *id_table;
};
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 9cb39d901cd5..604a126b78c8 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -85,7 +85,6 @@ struct i2c_dev_boardinfo {
*/
struct i2c_dev_desc {
struct i3c_i2c_dev_desc common;
- const struct i2c_dev_boardinfo *boardinfo;
struct i2c_client *dev;
u16 addr;
u8 lvr;
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 81ca84ce3119..0af4d210ee31 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -15,6 +15,7 @@
#include <linux/skbuff.h>
#include <uapi/linux/icmp.h>
+#include <uapi/linux/errqueue.h>
static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
@@ -35,4 +36,8 @@ static inline bool icmp_is_err(int type)
return false;
}
+void ip_icmp_error_rfc4884(const struct sk_buff *skb,
+ struct sock_ee_data_rfc4884 *out,
+ int thlen, int off);
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 33d379602314..db0f4fcfdaf4 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -3,6 +3,7 @@
#define _LINUX_ICMPV6_H
#include <linux/skbuff.h>
+#include <linux/ipv6.h>
#include <uapi/linux/icmpv6.h>
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
@@ -13,19 +14,52 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#include <linux/netdevice.h>
#if IS_ENABLED(CONFIG_IPV6)
-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr);
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+#if IS_BUILTIN(CONFIG_IPV6)
+static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm)
+{
+ icmp6_send(skb, type, code, info, NULL, parm);
+}
+static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+#else
+extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm);
extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+#endif
+
+static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+{
+ __icmpv6_send(skb, type, code, info, IP6CB(skb));
+}
+
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
#if IS_ENABLED(CONFIG_NF_NAT)
void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
#else
-#define icmpv6_ndo_send icmpv6_send
+static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+{
+ struct inet6_skb_parm parm = { 0 };
+ __icmpv6_send(skb_in, type, code, info, &parm);
+}
#endif
#else
@@ -45,8 +79,9 @@ extern int icmpv6_init(void);
extern int icmpv6_err_convert(u8 type, u8 code,
int *err);
extern void icmpv6_cleanup(void);
-extern void icmpv6_param_prob(struct sk_buff *skb,
- u8 code, int pos);
+extern void icmpv6_param_prob_reason(struct sk_buff *skb,
+ u8 code, int pos,
+ enum skb_drop_reason reason);
struct flowi6;
struct in6_addr;
@@ -57,6 +92,12 @@ extern void icmpv6_flow_init(struct sock *sk,
const struct in6_addr *daddr,
int oif);
+static inline void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
+{
+ icmpv6_param_prob_reason(skb, code, pos,
+ SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
static inline bool icmpv6_is_err(int type)
{
switch (type) {
diff --git a/include/linux/ide.h b/include/linux/ide.h
deleted file mode 100644
index a254841bd315..000000000000
--- a/include/linux/ide.h
+++ /dev/null
@@ -1,1628 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _IDE_H
-#define _IDE_H
-/*
- * linux/include/linux/ide.h
- *
- * Copyright (C) 1994-2002 Linus Torvalds & authors
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/ata.h>
-#include <linux/blk-mq.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/bio.h>
-#include <linux/pci.h>
-#include <linux/completion.h>
-#include <linux/pm.h>
-#include <linux/mutex.h>
-/* for request_sense */
-#include <linux/cdrom.h>
-#include <scsi/scsi_cmnd.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-
-/*
- * Probably not wise to fiddle with these
- */
-#define SUPPORT_VLB_SYNC 1
-#define IDE_DEFAULT_MAX_FAILURES 1
-#define ERROR_MAX 8 /* Max read/write errors per sector */
-#define ERROR_RESET 3 /* Reset controller every 4th retry */
-#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
-
-struct device;
-
-/* values for ide_request.type */
-enum ata_priv_type {
- ATA_PRIV_MISC,
- ATA_PRIV_TASKFILE,
- ATA_PRIV_PC,
- ATA_PRIV_SENSE, /* sense request */
- ATA_PRIV_PM_SUSPEND, /* suspend request */
- ATA_PRIV_PM_RESUME, /* resume request */
-};
-
-struct ide_request {
- struct scsi_request sreq;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
- u8 type;
- void *special;
-};
-
-static inline struct ide_request *ide_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline bool ata_misc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
-}
-
-static inline bool ata_taskfile_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
-}
-
-static inline bool ata_pc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
-}
-
-static inline bool ata_sense_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
-}
-
-static inline bool ata_pm_request(struct request *rq)
-{
- return blk_rq_is_private(rq) &&
- (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
- ide_req(rq)->type == ATA_PRIV_PM_RESUME);
-}
-
-/* Error codes returned in result to the higher part of the driver. */
-enum {
- IDE_DRV_ERROR_GENERAL = 101,
- IDE_DRV_ERROR_FILEMARK = 102,
- IDE_DRV_ERROR_EOD = 103,
-};
-
-/*
- * Definitions for accessing IDE controller registers
- */
-#define IDE_NR_PORTS (10)
-
-struct ide_io_ports {
- unsigned long data_addr;
-
- union {
- unsigned long error_addr; /* read: error */
- unsigned long feature_addr; /* write: feature */
- };
-
- unsigned long nsect_addr;
- unsigned long lbal_addr;
- unsigned long lbam_addr;
- unsigned long lbah_addr;
-
- unsigned long device_addr;
-
- union {
- unsigned long status_addr; /*  read: status  */
- unsigned long command_addr; /* write: command */
- };
-
- unsigned long ctl_addr;
-
- unsigned long irq_addr;
-};
-
-#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
-
-#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
-#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
-#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
-#define DRIVE_READY (ATA_DRDY | ATA_DSC)
-
-#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
-
-#define SATA_NR_PORTS (3) /* 16 possible ?? */
-
-#define SATA_STATUS_OFFSET (0)
-#define SATA_ERROR_OFFSET (1)
-#define SATA_CONTROL_OFFSET (2)
-
-/*
- * Our Physical Region Descriptor (PRD) table should be large enough
- * to handle the biggest I/O request we are likely to see. Since requests
- * can have no more than 256 sectors, and since the typical blocksize is
- * two or more sectors, we could get by with a limit of 128 entries here for
- * the usual worst case. Most requests seem to include some contiguous blocks,
- * further reducing the number of table entries required.
- *
- * The driver reverts to PIO mode for individual requests that exceed
- * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
- * 100% of all crazy scenarios here is not necessary.
- *
- * As it turns out though, we must allocate a full 4KB page for this,
- * so the two PRD tables (ide0 & ide1) will each get half of that,
- * allowing each to have about 256 entries (8 bytes each) from this.
- */
-#define PRD_BYTES 8
-#define PRD_ENTRIES 256
-
-/*
- * Some more useful definitions
- */
-#define PARTN_BITS 6 /* number of minor dev bits for partitions */
-#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
-
-/*
- * Timeouts for various operations:
- */
-enum {
- /* spec allows up to 20ms, but CF cards and SSD drives need more */
- WAIT_DRQ = 1 * HZ, /* 1s */
- /* some laptops are very slow */
- WAIT_READY = 5 * HZ, /* 5s */
- /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
- WAIT_PIDENTIFY = 10 * HZ, /* 10s */
- /* worst case when spinning up */
- WAIT_WORSTCASE = 30 * HZ, /* 30s */
- /* maximum wait for an IRQ to happen */
- WAIT_CMD = 10 * HZ, /* 10s */
- /* Some drives require a longer IRQ timeout. */
- WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
- /*
- * Some drives (for example, Seagate STT3401A Travan) require a very
- * long timeout, because they don't return an interrupt or clear their
- * BSY bit until after the command completes (even retension commands).
- */
- WAIT_TAPE_CMD = 900 * HZ, /* 900s */
- /* minimum sleep time */
- WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
-};
-
-/*
- * Op codes for special requests to be handled by ide_special_rq().
- * Values should be in the range of 0x20 to 0x3f.
- */
-#define REQ_DRIVE_RESET 0x20
-#define REQ_DEVSET_EXEC 0x21
-#define REQ_PARK_HEADS 0x22
-#define REQ_UNPARK_HEADS 0x23
-
-/*
- * hwif_chipset_t is used to keep track of the specific hardware
- * chipset used by each IDE interface, if known.
- */
-enum { ide_unknown, ide_generic, ide_pci,
- ide_cmd640, ide_dtc2278, ide_ali14xx,
- ide_qd65xx, ide_umc8672, ide_ht6560b,
- ide_4drives, ide_pmac, ide_acorn,
- ide_au1xxx, ide_palm3710
-};
-
-typedef u8 hwif_chipset_t;
-
-/*
- * Structure to hold all information about the location of this port
- */
-struct ide_hw {
- union {
- struct ide_io_ports io_ports;
- unsigned long io_ports_array[IDE_NR_PORTS];
- };
-
- int irq; /* our irq number */
- struct device *dev, *parent;
- unsigned long config;
-};
-
-static inline void ide_std_init_ports(struct ide_hw *hw,
- unsigned long io_addr,
- unsigned long ctl_addr)
-{
- unsigned int i;
-
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = io_addr++;
-
- hw->io_ports.ctl_addr = ctl_addr;
-}
-
-#define MAX_HWIFS 10
-
-/*
- * Now for the data we need to maintain per-drive: ide_drive_t
- */
-
-#define ide_scsi 0x21
-#define ide_disk 0x20
-#define ide_optical 0x7
-#define ide_cdrom 0x5
-#define ide_tape 0x1
-#define ide_floppy 0x0
-
-/*
- * Special Driver Flags
- */
-enum {
- IDE_SFLAG_SET_GEOMETRY = BIT(0),
- IDE_SFLAG_RECALIBRATE = BIT(1),
- IDE_SFLAG_SET_MULTMODE = BIT(2),
-};
-
-/*
- * Status returned from various ide_ functions
- */
-typedef enum {
- ide_stopped, /* no drive operation was started */
- ide_started, /* a drive operation was started, handler was set */
-} ide_startstop_t;
-
-enum {
- IDE_VALID_ERROR = BIT(1),
- IDE_VALID_FEATURE = IDE_VALID_ERROR,
- IDE_VALID_NSECT = BIT(2),
- IDE_VALID_LBAL = BIT(3),
- IDE_VALID_LBAM = BIT(4),
- IDE_VALID_LBAH = BIT(5),
- IDE_VALID_DEVICE = BIT(6),
- IDE_VALID_LBA = IDE_VALID_LBAL |
- IDE_VALID_LBAM |
- IDE_VALID_LBAH,
- IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_IN_TF = IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
- IDE_VALID_IN_HOB = IDE_VALID_ERROR |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
-};
-
-enum {
- IDE_TFLAG_LBA48 = BIT(0),
- IDE_TFLAG_WRITE = BIT(1),
- IDE_TFLAG_CUSTOM_HANDLER = BIT(2),
- IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3),
- /* force 16-bit I/O operations */
- IDE_TFLAG_IO_16BIT = BIT(4),
- /* struct ide_cmd was allocated using kmalloc() */
- IDE_TFLAG_DYN = BIT(5),
- IDE_TFLAG_FS = BIT(6),
- IDE_TFLAG_MULTI_PIO = BIT(7),
- IDE_TFLAG_SET_XFER = BIT(8),
-};
-
-enum {
- IDE_FTFLAG_FLAGGED = BIT(0),
- IDE_FTFLAG_SET_IN_FLAGS = BIT(1),
- IDE_FTFLAG_OUT_DATA = BIT(2),
- IDE_FTFLAG_IN_DATA = BIT(3),
-};
-
-struct ide_taskfile {
- u8 data; /* 0: data byte (for TASKFILE ioctl) */
- union { /* 1: */
- u8 error; /* read: error */
- u8 feature; /* write: feature */
- };
- u8 nsect; /* 2: number of sectors */
- u8 lbal; /* 3: LBA low */
- u8 lbam; /* 4: LBA mid */
- u8 lbah; /* 5: LBA high */
- u8 device; /* 6: device select */
- union { /* 7: */
- u8 status; /* read: status */
- u8 command; /* write: command */
- };
-};
-
-struct ide_cmd {
- struct ide_taskfile tf;
- struct ide_taskfile hob;
- struct {
- struct {
- u8 tf;
- u8 hob;
- } out, in;
- } valid;
-
- u16 tf_flags;
- u8 ftf_flags; /* for TASKFILE ioctl */
- int protocol;
-
- int sg_nents; /* number of sg entries */
- int orig_sg_nents;
- int sg_dma_direction; /* DMA transfer direction */
-
- unsigned int nbytes;
- unsigned int nleft;
- unsigned int last_xfer_len;
-
- struct scatterlist *cursg;
- unsigned int cursg_ofs;
-
- struct request *rq; /* copy of request */
-};
-
-/* ATAPI packet command flags */
-enum {
- /* set when an error is considered normal - no retry (ide-tape) */
- PC_FLAG_ABORT = BIT(0),
- PC_FLAG_SUPPRESS_ERROR = BIT(1),
- PC_FLAG_WAIT_FOR_DSC = BIT(2),
- PC_FLAG_DMA_OK = BIT(3),
- PC_FLAG_DMA_IN_PROGRESS = BIT(4),
- PC_FLAG_DMA_ERROR = BIT(5),
- PC_FLAG_WRITING = BIT(6),
-};
-
-#define ATAPI_WAIT_PC (60 * HZ)
-
-struct ide_atapi_pc {
- /* actual packet bytes */
- u8 c[12];
- /* incremented on each retry */
- int retries;
- int error;
-
- /* bytes to transfer */
- int req_xfer;
-
- /* the corresponding request */
- struct request *rq;
-
- unsigned long flags;
-
- /*
- * those are more or less driver-specific and some of them are subject
- * to change/removal later.
- */
- unsigned long timeout;
-};
-
-struct ide_devset;
-struct ide_driver;
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-struct ide_acpi_drive_link;
-struct ide_acpi_hwif_link;
-#endif
-
-struct ide_drive_s;
-
-struct ide_disk_ops {
- int (*check)(struct ide_drive_s *, const char *);
- int (*get_capacity)(struct ide_drive_s *);
- void (*unlock_native_capacity)(struct ide_drive_s *);
- void (*setup)(struct ide_drive_s *);
- void (*flush)(struct ide_drive_s *);
- int (*init_media)(struct ide_drive_s *, struct gendisk *);
- int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
- int);
- ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
- sector_t);
- int (*ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
- int (*compat_ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
-};
-
-/* ATAPI device flags */
-enum {
- IDE_AFLAG_DRQ_INTERRUPT = BIT(0),
-
- /* ide-cd */
- /* Drive cannot eject the disc. */
- IDE_AFLAG_NO_EJECT = BIT(1),
- /* Drive is a pre ATAPI 1.2 drive. */
- IDE_AFLAG_PRE_ATAPI12 = BIT(2),
- /* TOC addresses are in BCD. */
- IDE_AFLAG_TOCADDR_AS_BCD = BIT(3),
- /* TOC track numbers are in BCD. */
- IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4),
- /* Saved TOC information is current. */
- IDE_AFLAG_TOC_VALID = BIT(6),
- /* We think that the drive door is locked. */
- IDE_AFLAG_DOOR_LOCKED = BIT(7),
- /* SET_CD_SPEED command is unsupported. */
- IDE_AFLAG_NO_SPEED_SELECT = BIT(8),
- IDE_AFLAG_VERTOS_300_SSD = BIT(9),
- IDE_AFLAG_VERTOS_600_ESD = BIT(10),
- IDE_AFLAG_SANYO_3CD = BIT(11),
- IDE_AFLAG_FULL_CAPS_PAGE = BIT(12),
- IDE_AFLAG_PLAY_AUDIO_OK = BIT(13),
- IDE_AFLAG_LE_SPEED_FIELDS = BIT(14),
-
- /* ide-floppy */
- /* Avoid commands not supported in Clik drive */
- IDE_AFLAG_CLIK_DRIVE = BIT(15),
- /* Requires BH algorithm for packets */
- IDE_AFLAG_ZIP_DRIVE = BIT(16),
- /* Supports format progress report */
- IDE_AFLAG_SRFP = BIT(17),
-
- /* ide-tape */
- IDE_AFLAG_IGNORE_DSC = BIT(18),
- /* 0 When the tape position is unknown */
- IDE_AFLAG_ADDRESS_VALID = BIT(19),
- /* Device already opened */
- IDE_AFLAG_BUSY = BIT(20),
- /* Attempt to auto-detect the current user block size */
- IDE_AFLAG_DETECT_BS = BIT(21),
- /* Currently on a filemark */
- IDE_AFLAG_FILEMARK = BIT(22),
- /* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDE_AFLAG_MEDIUM_PRESENT = BIT(23),
-
- IDE_AFLAG_NO_AUTOCLOSE = BIT(24),
-};
-
-/* device flags */
-enum {
- /* restore settings after device reset */
- IDE_DFLAG_KEEP_SETTINGS = BIT(0),
- /* device is using DMA for read/write */
- IDE_DFLAG_USING_DMA = BIT(1),
- /* okay to unmask other IRQs */
- IDE_DFLAG_UNMASK = BIT(2),
- /* don't attempt flushes */
- IDE_DFLAG_NOFLUSH = BIT(3),
- /* DSC overlap */
- IDE_DFLAG_DSC_OVERLAP = BIT(4),
- /* give potential excess bandwidth */
- IDE_DFLAG_NICE1 = BIT(5),
- /* device is physically present */
- IDE_DFLAG_PRESENT = BIT(6),
- /* disable Host Protected Area */
- IDE_DFLAG_NOHPA = BIT(7),
- /* id read from device (synthetic if not set) */
- IDE_DFLAG_ID_READ = BIT(8),
- IDE_DFLAG_NOPROBE = BIT(9),
- /* need to do check_media_change() */
- IDE_DFLAG_REMOVABLE = BIT(10),
- /* needed for removable devices */
- IDE_DFLAG_ATTACH = BIT(11),
- IDE_DFLAG_FORCED_GEOM = BIT(12),
- /* disallow setting unmask bit */
- IDE_DFLAG_NO_UNMASK = BIT(13),
- /* disallow enabling 32-bit I/O */
- IDE_DFLAG_NO_IO_32BIT = BIT(14),
- /* for removable only: door lock/unlock works */
- IDE_DFLAG_DOORLOCKING = BIT(15),
- /* disallow DMA */
- IDE_DFLAG_NODMA = BIT(16),
- /* powermanagement told us not to do anything, so sleep nicely */
- IDE_DFLAG_BLOCKED = BIT(17),
- /* sleeping & sleep field valid */
- IDE_DFLAG_SLEEPING = BIT(18),
- IDE_DFLAG_POST_RESET = BIT(19),
- IDE_DFLAG_UDMA33_WARNED = BIT(20),
- IDE_DFLAG_LBA48 = BIT(21),
- /* status of write cache */
- IDE_DFLAG_WCACHE = BIT(22),
- /* used for ignoring ATA_DF */
- IDE_DFLAG_NOWERR = BIT(23),
- /* retrying in PIO */
- IDE_DFLAG_DMA_PIO_RETRY = BIT(24),
- IDE_DFLAG_LBA = BIT(25),
- /* don't unload heads */
- IDE_DFLAG_NO_UNLOAD = BIT(26),
- /* heads unloaded, please don't reset port */
- IDE_DFLAG_PARKED = BIT(27),
- IDE_DFLAG_MEDIA_CHANGED = BIT(28),
- /* write protect */
- IDE_DFLAG_WP = BIT(29),
- IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30),
- IDE_DFLAG_NIEN_QUIRK = BIT(31),
-};
-
-struct ide_drive_s {
- char name[4]; /* drive name, such as "hda" */
- char driver_req[10]; /* requests specific driver */
-
- struct request_queue *queue; /* request queue */
-
- bool (*prep_rq)(struct ide_drive_s *, struct request *);
-
- struct blk_mq_tag_set tag_set;
-
- struct request *rq; /* current request */
- void *driver_data; /* extra driver data */
- u16 *id; /* identification info */
-#ifdef CONFIG_IDE_PROC_FS
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
- const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
-#endif
- struct hwif_s *hwif; /* actually (ide_hwif_t *) */
-
- const struct ide_disk_ops *disk_ops;
-
- unsigned long dev_flags;
-
- unsigned long sleep; /* sleep until this time */
- unsigned long timeout; /* max time to wait for irq */
-
- u8 special_flags; /* special action flags */
-
- u8 select; /* basic drive/head select reg value */
- u8 retry_pio; /* retrying dma capable host in pio */
- u8 waiting_for_dma; /* dma currently in progress */
- u8 dma; /* atapi dma flag */
-
- u8 init_speed; /* transfer rate set at boot */
- u8 current_speed; /* current transfer rate set */
- u8 desired_speed; /* desired transfer rate set */
- u8 pio_mode; /* for ->set_pio_mode _only_ */
- u8 dma_mode; /* for ->set_dma_mode _only_ */
- u8 dn; /* now wide spread use */
- u8 acoustic; /* acoustic management */
- u8 media; /* disk, cdrom, tape, floppy, ... */
- u8 ready_stat; /* min status value for drive ready */
- u8 mult_count; /* current multiple sector setting */
- u8 mult_req; /* requested multiple sector setting */
- u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
- u8 bad_wstat; /* used for ignoring ATA_DF */
- u8 head; /* "real" number of heads */
- u8 sect; /* "real" sectors per track */
- u8 bios_head; /* BIOS/fdisk/LILO number of heads */
- u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
-
- /* delay this long before sending packet command */
- u8 pc_delay;
-
- unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
- unsigned int cyl; /* "real" number of cyls */
- void *drive_data; /* used by set_pio_mode/dev_select() */
- unsigned int failures; /* current failure count */
- unsigned int max_failures; /* maximum allowed failure count */
- u64 probed_capacity;/* initial/native media capacity */
- u64 capacity64; /* total number of sectors */
-
- int lun; /* logical unit */
- int crc_count; /* crc counter to reduce drive speed */
-
- unsigned long debug_mask; /* debugging levels switch */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_drive_link *acpidata;
-#endif
- struct list_head list;
- struct device gendev;
- struct completion gendev_rel_comp; /* to deal with device release() */
-
- /* current packet command */
- struct ide_atapi_pc *pc;
-
- /* last failed packet command */
- struct ide_atapi_pc *failed_pc;
-
- /* callback for packet commands */
- int (*pc_callback)(struct ide_drive_s *, int);
-
- ide_startstop_t (*irq_handler)(struct ide_drive_s *);
-
- unsigned long atapi_flags;
-
- struct ide_atapi_pc request_sense_pc;
-
- /* current sense rq and buffer */
- bool sense_rq_armed;
- bool sense_rq_active;
- struct request *sense_rq;
- struct request_sense sense_data;
-
- /* async sense insertion */
- struct work_struct rq_work;
- struct list_head rq_list;
-};
-
-typedef struct ide_drive_s ide_drive_t;
-
-#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
-
-#define to_ide_drv(obj, cont_type) \
- container_of(obj, struct cont_type, dev)
-
-#define ide_drv_g(disk, cont_type) \
- container_of((disk)->private_data, struct cont_type, driver)
-
-struct ide_port_info;
-
-struct ide_tp_ops {
- void (*exec_command)(struct hwif_s *, u8);
- u8 (*read_status)(struct hwif_s *);
- u8 (*read_altstatus)(struct hwif_s *);
- void (*write_devctl)(struct hwif_s *, u8);
-
- void (*dev_select)(ide_drive_t *);
- void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
- void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
-
- void (*input_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
- void (*output_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
-};
-
-extern const struct ide_tp_ops default_tp_ops;
-
-/**
- * struct ide_port_ops - IDE port operations
- *
- * @init_dev: host specific initialization of a device
- * @set_pio_mode: routine to program host for PIO mode
- * @set_dma_mode: routine to program host for DMA mode
- * @reset_poll: chipset polling based on hba specifics
- * @pre_reset: chipset specific changes to default for device-hba resets
- * @resetproc: routine to reset controller after a disk reset
- * @maskproc: special host masking for drive selection
- * @quirkproc: check host's drive quirk list
- * @clear_irq: clear IRQ
- *
- * @mdma_filter: filter MDMA modes
- * @udma_filter: filter UDMA modes
- *
- * @cable_detect: detect cable type
- */
-struct ide_port_ops {
- void (*init_dev)(ide_drive_t *);
- void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
- void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
- blk_status_t (*reset_poll)(ide_drive_t *);
- void (*pre_reset)(ide_drive_t *);
- void (*resetproc)(ide_drive_t *);
- void (*maskproc)(ide_drive_t *, int);
- void (*quirkproc)(ide_drive_t *);
- void (*clear_irq)(ide_drive_t *);
- int (*test_irq)(struct hwif_s *);
-
- u8 (*mdma_filter)(ide_drive_t *);
- u8 (*udma_filter)(ide_drive_t *);
-
- u8 (*cable_detect)(struct hwif_s *);
-};
-
-struct ide_dma_ops {
- void (*dma_host_set)(struct ide_drive_s *, int);
- int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
- void (*dma_start)(struct ide_drive_s *);
- int (*dma_end)(struct ide_drive_s *);
- int (*dma_test_irq)(struct ide_drive_s *);
- void (*dma_lost_irq)(struct ide_drive_s *);
- /* below ones are optional */
- int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
- int (*dma_timer_expiry)(struct ide_drive_s *);
- void (*dma_clear)(struct ide_drive_s *);
- /*
- * The following method is optional and only required to be
- * implemented for the SFF-8038i compatible controllers.
- */
- u8 (*dma_sff_read_status)(struct hwif_s *);
-};
-
-enum {
- IDE_PFLAG_PROBING = BIT(0),
-};
-
-struct ide_host;
-
-typedef struct hwif_s {
- struct hwif_s *mate; /* other hwif from same PCI chip */
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
-
- struct ide_host *host;
-
- char name[6]; /* name of interface, eg. "ide0" */
-
- struct ide_io_ports io_ports;
-
- unsigned long sata_scr[SATA_NR_PORTS];
-
- ide_drive_t *devices[MAX_DRIVES + 1];
-
- unsigned long port_flags;
-
- u8 major; /* our major number */
- u8 index; /* 0 for ide0; 1 for ide1; ... */
- u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
-
- u32 host_flags;
-
- u8 pio_mask;
-
- u8 ultra_mask;
- u8 mwdma_mask;
- u8 swdma_mask;
-
- u8 cbl; /* cable type */
-
- hwif_chipset_t chipset; /* sub-module for tuning.. */
-
- struct device *dev;
-
- void (*rw_disk)(ide_drive_t *, struct request *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- /* dma physical region descriptor table (cpu view) */
- unsigned int *dmatable_cpu;
- /* dma physical region descriptor table (dma view) */
- dma_addr_t dmatable_dma;
-
- /* maximum number of PRD table entries */
- int prd_max_nents;
- /* PRD entry size in bytes */
- int prd_ent_size;
-
- /* Scatter-gather list used to build the above */
- struct scatterlist *sg_table;
- int sg_max_nents; /* Maximum number of entries in it */
-
- struct ide_cmd cmd; /* current command */
-
- int rqsize; /* max sectors per request */
- int irq; /* our irq number */
-
- unsigned long dma_base; /* base addr for dma ports */
-
- unsigned long config_data; /* for use by chipset-specific code */
- unsigned long select_data; /* for use by chipset-specific code */
-
- unsigned long extra_base; /* extra addr for dma ports */
- unsigned extra_ports; /* number of extra dma ports */
-
- unsigned present : 1; /* this interface exists */
- unsigned busy : 1; /* serializes devices on a port */
-
- struct device gendev;
- struct device *portdev;
-
- struct completion gendev_rel_comp; /* To deal with device release() */
-
- void *hwif_data; /* extra hwif data */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_hwif_link *acpidata;
-#endif
-
- /* IRQ handler, if active */
- ide_startstop_t (*handler)(ide_drive_t *);
-
- /* BOOL: polling active & poll_timeout field valid */
- unsigned int polling : 1;
-
- /* current drive */
- ide_drive_t *cur_dev;
-
- /* current request */
- struct request *rq;
-
- /* failsafe timer */
- struct timer_list timer;
- /* timeout value during long polls */
- unsigned long poll_timeout;
- /* queried upon timeouts */
- int (*expiry)(ide_drive_t *);
-
- int req_gen;
- int req_gen_timer;
-
- spinlock_t lock;
-} ____cacheline_internodealigned_in_smp ide_hwif_t;
-
-#define MAX_HOST_PORTS 4
-
-struct ide_host {
- ide_hwif_t *ports[MAX_HOST_PORTS + 1];
- unsigned int n_ports;
- struct device *dev[2];
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- irq_handler_t irq_handler;
-
- unsigned long host_flags;
-
- int irq_flags;
-
- void *host_priv;
- ide_hwif_t *cur_port; /* for hosts requiring serialization */
-
- /* used for hosts requiring serialization */
- volatile unsigned long host_busy;
-};
-
-#define IDE_HOST_BUSY 0
-
-/*
- * internal ide interrupt handler type
- */
-typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
-typedef int (ide_expiry_t)(ide_drive_t *);
-
-/* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
-
-extern struct mutex ide_setting_mtx;
-
-/*
- * configurable drive settings
- */
-
-#define DS_SYNC BIT(0)
-
-struct ide_devset {
- int (*get)(ide_drive_t *);
- int (*set)(ide_drive_t *, int);
- unsigned int flags;
-};
-
-#define __DEVSET(_flags, _get, _set) { \
- .flags = _flags, \
- .get = _get, \
- .set = _set, \
-}
-
-#define ide_devset_get(name, field) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return drive->field; \
-}
-
-#define ide_devset_set(name, field) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- drive->field = arg; \
- return 0; \
-}
-
-#define ide_devset_get_flag(name, flag) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return !!(drive->dev_flags & flag); \
-}
-
-#define ide_devset_set_flag(name, flag) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- if (arg) \
- drive->dev_flags |= flag; \
- else \
- drive->dev_flags &= ~flag; \
- return 0; \
-}
-
-#define __IDE_DEVSET(_name, _flags, _get, _set) \
-const struct ide_devset ide_devset_##_name = \
- __DEVSET(_flags, _get, _set)
-
-#define IDE_DEVSET(_name, _flags, _get, _set) \
-static __IDE_DEVSET(_name, _flags, _get, _set)
-
-#define ide_devset_rw(_name, _func) \
-IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_devset_w(_name, _func) \
-IDE_DEVSET(_name, 0, NULL, set_##_func)
-
-#define ide_ext_devset_rw(_name, _func) \
-__IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_ext_devset_rw_sync(_name, _func) \
-__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
-
-#define ide_decl_devset(_name) \
-extern const struct ide_devset ide_devset_##_name
-
-ide_decl_devset(io_32bit);
-ide_decl_devset(keepsettings);
-ide_decl_devset(pio_mode);
-ide_decl_devset(unmaskirq);
-ide_decl_devset(using_dma);
-
-#ifdef CONFIG_IDE_PROC_FS
-/*
- * /proc/ide interface
- */
-
-#define ide_devset_rw_field(_name, _field) \
-ide_devset_get(_name, _field); \
-ide_devset_set(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-#define ide_devset_ro_field(_name, _field) \
-ide_devset_get(_name, _field); \
-IDE_DEVSET(_name, 0, get_##_name, NULL)
-
-#define ide_devset_rw_flag(_name, _field) \
-ide_devset_get_flag(_name, _field); \
-ide_devset_set_flag(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-struct ide_proc_devset {
- const char *name;
- const struct ide_devset *setting;
- int min, max;
- int (*mulf)(ide_drive_t *);
- int (*divf)(ide_drive_t *);
-};
-
-#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
- .name = __stringify(_name), \
- .setting = &ide_devset_##_name, \
- .min = _min, \
- .max = _max, \
- .mulf = _mulf, \
- .divf = _divf, \
-}
-
-#define IDE_PROC_DEVSET(_name, _min, _max) \
-__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
-
-typedef struct {
- const char *name;
- umode_t mode;
- int (*show)(struct seq_file *, void *);
-} ide_proc_entry_t;
-
-void proc_ide_create(void);
-void proc_ide_destroy(void);
-void ide_proc_register_port(ide_hwif_t *);
-void ide_proc_port_register_devices(ide_hwif_t *);
-void ide_proc_unregister_device(ide_drive_t *);
-void ide_proc_unregister_port(ide_hwif_t *);
-void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
-void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
-
-int ide_capacity_proc_show(struct seq_file *m, void *v);
-int ide_geometry_proc_show(struct seq_file *m, void *v);
-#else
-static inline void proc_ide_create(void) { ; }
-static inline void proc_ide_destroy(void) { ; }
-static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
-static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_register_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-static inline void ide_proc_unregister_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-#endif
-
-enum {
- /* enter/exit functions */
- IDE_DBG_FUNC = BIT(0),
- /* sense key/asc handling */
- IDE_DBG_SENSE = BIT(1),
- /* packet commands handling */
- IDE_DBG_PC = BIT(2),
- /* request handling */
- IDE_DBG_RQ = BIT(3),
- /* driver probing/setup */
- IDE_DBG_PROBE = BIT(4),
-};
-
-/* DRV_NAME has to be defined in the driver before using the macro below */
-#define __ide_debug_log(lvl, fmt, args...) \
-{ \
- if (unlikely(drive->debug_mask & lvl)) \
- printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
- __func__, ## args); \
-}
-
-/*
- * Power Management state machine (rq->pm->pm_step).
- *
- * For each step, the core calls ide_start_power_step() first.
- * This can return:
- * - ide_stopped : In this case, the core calls us back again unless
- * step have been set to ide_power_state_completed.
- * - ide_started : In this case, the channel is left busy until an
- * async event (interrupt) occurs.
- * Typically, ide_start_power_step() will issue a taskfile request with
- * do_rw_taskfile().
- *
- * Upon reception of the interrupt, the core will call ide_complete_power_step()
- * with the error code if any. This routine should update the step value
- * and return. It should not start a new request. The core will call
- * ide_start_power_step() for the new step value, unless step have been
- * set to IDE_PM_COMPLETED.
- */
-enum {
- IDE_PM_START_SUSPEND,
- IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
- IDE_PM_STANDBY,
-
- IDE_PM_START_RESUME,
- IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
- IDE_PM_IDLE,
- IDE_PM_RESTORE_DMA,
-
- IDE_PM_COMPLETED,
-};
-
-int generic_ide_suspend(struct device *, pm_message_t);
-int generic_ide_resume(struct device *);
-
-void ide_complete_power_step(ide_drive_t *, struct request *);
-ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
-void ide_complete_pm_rq(ide_drive_t *, struct request *);
-void ide_check_pm_state(ide_drive_t *, struct request *);
-
-/*
- * Subdrivers support.
- *
- * The gendriver.owner field should be set to the module owner of this driver.
- * The gendriver.name field should be set to the name of this driver
- */
-struct ide_driver {
- const char *version;
- ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
- struct device_driver gen_driver;
- int (*probe)(ide_drive_t *);
- void (*remove)(ide_drive_t *);
- void (*resume)(ide_drive_t *);
- void (*shutdown)(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
- ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
- const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
-#endif
-};
-
-#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
-
-int ide_device_get(ide_drive_t *);
-void ide_device_put(ide_drive_t *);
-
-struct ide_ioctl_devset {
- unsigned int get_ioctl;
- unsigned int set_ioctl;
- const struct ide_devset *setting;
-};
-
-int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
- unsigned long, const struct ide_ioctl_devset *);
-
-int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
-
-extern int ide_vlb_clk;
-extern int ide_pci_clk;
-
-int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
-void ide_kill_rq(ide_drive_t *, struct request *);
-void ide_insert_request_head(ide_drive_t *, struct request *);
-
-void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-
-void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
- unsigned int);
-
-void ide_pad_transfer(ide_drive_t *, int, int);
-
-ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
-
-void ide_fix_driveid(u16 *);
-
-extern void ide_fixstring(u8 *, const int, const int);
-
-int ide_busy_sleep(ide_drive_t *, unsigned long, int);
-
-int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
-int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
-
-ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
-ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
-
-extern ide_startstop_t ide_do_reset (ide_drive_t *);
-
-extern int ide_devset_execute(ide_drive_t *drive,
- const struct ide_devset *setting, int arg);
-
-void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
-
-void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
-void ide_tf_dump(const char *, struct ide_cmd *);
-
-void ide_exec_command(ide_hwif_t *, u8);
-u8 ide_read_status(ide_hwif_t *);
-u8 ide_read_altstatus(ide_hwif_t *);
-void ide_write_devctl(ide_hwif_t *, u8);
-
-void ide_dev_select(ide_drive_t *);
-void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
-void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
-
-void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-
-void SELECT_MASK(ide_drive_t *, int);
-
-u8 ide_read_error(ide_drive_t *);
-void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
-
-int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
-
-int ide_check_atapi_device(ide_drive_t *, const char *);
-
-void ide_init_pc(struct ide_atapi_pc *);
-
-/* Disk head parking */
-extern wait_queue_head_t ide_park_wq;
-ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len);
-
-/*
- * Special requests for ide-tape block device strategy routine.
- *
- * In order to service a character device command, we add special requests to
- * the tail of our block device request queue and wait for their completion.
- */
-enum {
- REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */
- REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */
- REQ_IDETAPE_READ = BIT(2),
- REQ_IDETAPE_WRITE = BIT(3),
-};
-
-int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
- void *, unsigned int);
-
-int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
-int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
-int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
-void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *drive);
-
-void ide_prep_sense(ide_drive_t *drive, struct request *rq);
-int ide_queue_sense_rq(ide_drive_t *drive, void *special);
-
-int ide_cd_expiry(ide_drive_t *);
-
-int ide_cd_get_xferlen(struct request *);
-
-ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
-
-ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
-
-void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
-
-void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
-
-int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
-int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
-
-int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
-
-int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
-
-extern int ide_driveid_update(ide_drive_t *);
-extern int ide_config_drive_speed(ide_drive_t *, u8);
-extern u8 eighty_ninty_three (ide_drive_t *);
-extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
-
-extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
-
-extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
-
-extern void ide_timer_expiry(struct timer_list *t);
-extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
-extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
-extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
-
-void ide_init_disk(struct gendisk *, ide_drive_t *);
-
-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
-#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
-#else
-#define ide_pci_register_driver(d) pci_register_driver(d)
-#endif
-
-static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
-{
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
- return 1;
- return 0;
-}
-
-void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
- struct ide_hw *, struct ide_hw **);
-void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-int ide_pci_set_master(struct pci_dev *, const char *);
-unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
-int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
-int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
-#else
-static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- return -EINVAL;
-}
-#endif
-
-struct ide_pci_enablebit {
- u8 reg; /* byte pci reg holding the enable-bit */
- u8 mask; /* mask to isolate the enable-bit */
- u8 val; /* value of masked reg when "enabled" */
-};
-
-enum {
- /* Uses ISA control ports not PCI ones. */
- IDE_HFLAG_ISA_PORTS = BIT(0),
- /* single port device */
- IDE_HFLAG_SINGLE = BIT(1),
- /* don't use legacy PIO blacklist */
- IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2),
- /* set for the second port of QD65xx */
- IDE_HFLAG_QD_2ND_PORT = BIT(3),
- /* use PIO8/9 for prefetch off/on */
- IDE_HFLAG_ABUSE_PREFETCH = BIT(4),
- /* use PIO6/7 for fast-devsel off/on */
- IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5),
- /* use 100-102 and 200-202 PIO values to set DMA modes */
- IDE_HFLAG_ABUSE_DMA_MODES = BIT(6),
- /*
- * keep DMA setting when programming PIO mode, may be used only
- * for hosts which have separate PIO and DMA timings (ie. PMAC)
- */
- IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7),
- /* program host for the transfer mode after programming device */
- IDE_HFLAG_POST_SET_MODE = BIT(8),
- /* don't program host/device for the transfer mode ("smart" hosts) */
- IDE_HFLAG_NO_SET_MODE = BIT(9),
- /* trust BIOS for programming chipset/device for DMA */
- IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10),
- /* host is CS5510/CS5520 */
- IDE_HFLAG_CS5520 = BIT(11),
- /* ATAPI DMA is unsupported */
- IDE_HFLAG_NO_ATAPI_DMA = BIT(12),
- /* set if host is a "non-bootable" controller */
- IDE_HFLAG_NON_BOOTABLE = BIT(13),
- /* host doesn't support DMA */
- IDE_HFLAG_NO_DMA = BIT(14),
- /* check if host is PCI IDE device before allowing DMA */
- IDE_HFLAG_NO_AUTODMA = BIT(15),
- /* host uses MMIO */
- IDE_HFLAG_MMIO = BIT(16),
- /* no LBA48 */
- IDE_HFLAG_NO_LBA48 = BIT(17),
- /* no LBA48 DMA */
- IDE_HFLAG_NO_LBA48_DMA = BIT(18),
- /* data FIFO is cleared by an error */
- IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19),
- /* serialize ports */
- IDE_HFLAG_SERIALIZE = BIT(20),
- /* host is DTC2278 */
- IDE_HFLAG_DTC2278 = BIT(21),
- /* 4 devices on a single set of I/O ports */
- IDE_HFLAG_4DRIVES = BIT(22),
- /* host is TRM290 */
- IDE_HFLAG_TRM290 = BIT(23),
- /* use 32-bit I/O ops */
- IDE_HFLAG_IO_32BIT = BIT(24),
- /* unmask IRQs */
- IDE_HFLAG_UNMASK_IRQS = BIT(25),
- IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26),
- /* serialize ports if DMA is possible (for sl82c105) */
- IDE_HFLAG_SERIALIZE_DMA = BIT(27),
- /* force host out of "simplex" mode */
- IDE_HFLAG_CLEAR_SIMPLEX = BIT(28),
- /* DSC overlap is unsupported */
- IDE_HFLAG_NO_DSC = BIT(29),
- /* never use 32-bit I/O ops */
- IDE_HFLAG_NO_IO_32BIT = BIT(30),
- /* never unmask IRQs */
- IDE_HFLAG_NO_UNMASK_IRQS = BIT(31),
-};
-
-#ifdef CONFIG_BLK_DEV_OFFBOARD
-# define IDE_HFLAG_OFF_BOARD 0
-#else
-# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
-#endif
-
-struct ide_port_info {
- char *name;
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- void (*init_iops)(ide_hwif_t *);
- void (*init_hwif)(ide_hwif_t *);
- int (*init_dma)(ide_hwif_t *,
- const struct ide_port_info *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- struct ide_pci_enablebit enablebits[2];
-
- hwif_chipset_t chipset;
-
- u16 max_sectors; /* if < than the default one */
-
- u32 host_flags;
-
- int irq_flags;
-
- u8 pio_mask;
- u8 swdma_mask;
- u8 mwdma_mask;
- u8 udma_mask;
-};
-
-/*
- * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
- * requests.
- */
-struct ide_pm_state {
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
-
-int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
-int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
- const struct ide_port_info *, void *);
-void ide_pci_remove(struct pci_dev *);
-
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *, pm_message_t);
-int ide_pci_resume(struct pci_dev *);
-#else
-#define ide_pci_suspend NULL
-#define ide_pci_resume NULL
-#endif
-
-void ide_map_sg(ide_drive_t *, struct ide_cmd *);
-void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
-
-#define BAD_DMA_DRIVE 0
-#define GOOD_DMA_DRIVE 1
-
-struct drive_list_entry {
- const char *id_model;
- const char *id_firmware;
-};
-
-int ide_in_drive_list(u16 *, const struct drive_list_entry *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA
-int ide_dma_good_drive(ide_drive_t *);
-int __ide_dma_bad_drive(ide_drive_t *);
-
-u8 ide_find_dma_mode(ide_drive_t *, u8);
-
-static inline u8 ide_max_dma_mode(ide_drive_t *drive)
-{
- return ide_find_dma_mode(drive, XFER_UDMA_6);
-}
-
-void ide_dma_off_quietly(ide_drive_t *);
-void ide_dma_off(ide_drive_t *);
-void ide_dma_on(ide_drive_t *);
-int ide_set_dma(ide_drive_t *);
-void ide_check_dma_crc(ide_drive_t *);
-ide_startstop_t ide_dma_intr(ide_drive_t *);
-
-int ide_allocate_dma_engine(ide_hwif_t *);
-void ide_release_dma_engine(ide_hwif_t *);
-
-int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
-void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
-int config_drive_for_dma(ide_drive_t *);
-int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
-void ide_dma_host_set(ide_drive_t *, int);
-int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
-extern void ide_dma_start(ide_drive_t *);
-int ide_dma_end(ide_drive_t *);
-int ide_dma_test_irq(ide_drive_t *);
-int ide_dma_sff_timer_expiry(ide_drive_t *);
-u8 ide_dma_sff_read_status(ide_hwif_t *);
-extern const struct ide_dma_ops sff_dma_ops;
-#else
-static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
-#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
-
-void ide_dma_lost_irq(ide_drive_t *);
-ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
-
-#else
-static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
-static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
-static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
-static inline void ide_dma_off(ide_drive_t *drive) { ; }
-static inline void ide_dma_on(ide_drive_t *drive) { ; }
-static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
-static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
-static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
-static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
-static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
-static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
-static inline int ide_dma_prepare(ide_drive_t *drive,
- struct ide_cmd *cmd) { return 1; }
-static inline void ide_dma_unmap_sg(ide_drive_t *drive,
- struct ide_cmd *cmd) { ; }
-#endif /* CONFIG_BLK_DEV_IDEDMA */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_acpi_init(void);
-bool ide_port_acpi(ide_hwif_t *hwif);
-extern int ide_acpi_exec_tfs(ide_drive_t *drive);
-extern void ide_acpi_get_timing(ide_hwif_t *hwif);
-extern void ide_acpi_push_timing(ide_hwif_t *hwif);
-void ide_acpi_init_port(ide_hwif_t *);
-void ide_acpi_port_init_devices(ide_hwif_t *);
-extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
-#else
-static inline int ide_acpi_init(void) { return 0; }
-static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
-static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
-static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
-#endif
-
-void ide_register_region(struct gendisk *);
-void ide_unregister_region(struct gendisk *);
-
-void ide_check_nien_quirk_list(ide_drive_t *);
-void ide_undecoded_slave(ide_drive_t *);
-
-void ide_port_apply_params(ide_hwif_t *);
-int ide_sysfs_register_port(ide_hwif_t *);
-
-struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
- unsigned int);
-void ide_host_free(struct ide_host *);
-int ide_host_register(struct ide_host *, const struct ide_port_info *,
- struct ide_hw **);
-int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
- struct ide_host **);
-void ide_host_remove(struct ide_host *);
-int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
-void ide_port_unregister_devices(ide_hwif_t *);
-void ide_port_scan(ide_hwif_t *);
-
-static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
-{
- return hwif->hwif_data;
-}
-
-static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
-{
- hwif->hwif_data = data;
-}
-
-u64 ide_get_lba_addr(struct ide_cmd *, int);
-u8 ide_dump_status(ide_drive_t *, const char *, u8);
-
-struct ide_timing {
- u8 mode;
- u8 setup; /* t1 */
- u16 act8b; /* t2 for 8-bit io */
- u16 rec8b; /* t2i for 8-bit io */
- u16 cyc8b; /* t0 for 8-bit io */
- u16 active; /* t2 or tD */
- u16 recover; /* t2i or tK */
- u16 cycle; /* t0 */
- u16 udma; /* t2CYCTYP/2 */
-};
-
-enum {
- IDE_TIMING_SETUP = BIT(0),
- IDE_TIMING_ACT8B = BIT(1),
- IDE_TIMING_REC8B = BIT(2),
- IDE_TIMING_CYC8B = BIT(3),
- IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
- IDE_TIMING_CYC8B,
- IDE_TIMING_ACTIVE = BIT(4),
- IDE_TIMING_RECOVER = BIT(5),
- IDE_TIMING_CYCLE = BIT(6),
- IDE_TIMING_UDMA = BIT(7),
- IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
- IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
- IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
-};
-
-struct ide_timing *ide_timing_find_mode(u8);
-u16 ide_pio_cycle_time(ide_drive_t *, u8);
-void ide_timing_merge(struct ide_timing *, struct ide_timing *,
- struct ide_timing *, unsigned int);
-int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
-
-#ifdef CONFIG_IDE_XFER_MODE
-int ide_scan_pio_blacklist(char *);
-const char *ide_xfer_verbose(u8);
-int ide_pio_need_iordy(ide_drive_t *, const u8);
-int ide_set_pio_mode(ide_drive_t *, u8);
-int ide_set_dma_mode(ide_drive_t *, u8);
-void ide_set_pio(ide_drive_t *, u8);
-int ide_set_xfer_rate(ide_drive_t *, u8);
-#else
-static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
-static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
-#endif
-
-static inline void ide_set_max_pio(ide_drive_t *drive)
-{
- ide_set_pio(drive, 255);
-}
-
-char *ide_media_string(ide_drive_t *);
-
-extern const struct attribute_group *ide_dev_groups[];
-extern struct bus_type ide_bus_type;
-extern struct class *ide_port_class;
-
-static inline void ide_dump_identify(u8 *id)
-{
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
-}
-
-static inline int hwif_to_node(ide_hwif_t *hwif)
-{
- return hwif->dev ? dev_to_node(hwif->dev) : -1;
-}
-
-static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
-{
- ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
-
- return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
-}
-
-static inline void *ide_get_drivedata(ide_drive_t *drive)
-{
- return drive->drive_data;
-}
-
-static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
-{
- drive->drive_data = data;
-}
-
-#define ide_port_for_each_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
-
-#define ide_port_for_each_present_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
- if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
-
-#define ide_host_for_each_port(i, port, host) \
- for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
-
-
-#endif /* _IDE_H */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
index a445cd1a36c5..fb88e23a99d3 100644
--- a/include/linux/idle_inject.h
+++ b/include/linux/idle_inject.h
@@ -26,4 +26,8 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev,
void idle_inject_get_duration(struct idle_inject_device *ii_dev,
unsigned int *run_duration_us,
unsigned int *idle_duration_us);
+
+void idle_inject_set_latency(struct idle_inject_device *ii_dev,
+ unsigned int latency_us);
+
#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index ac6e946b6767..a0dce14090a9 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -171,7 +171,7 @@ static inline bool idr_is_empty(const struct idr *idr)
*/
static inline void idr_preload_end(void)
{
- preempt_enable();
+ local_unlock(&radix_tree_preloads.lock);
}
/**
@@ -263,7 +263,8 @@ void ida_destroy(struct ida *ida);
*
* Allocate an ID between 0 and %INT_MAX, inclusive.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -280,7 +281,8 @@ static inline int ida_alloc(struct ida *ida, gfp_t gfp)
*
* Allocate an ID between @min and %INT_MAX, inclusive.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -297,7 +299,8 @@ static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
*
* Allocate an ID between 0 and @max, inclusive.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -311,6 +314,10 @@ static inline void ida_init(struct ida *ida)
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
+/*
+ * ida_simple_get() and ida_simple_remove() are deprecated. Use
+ * ida_alloc() and ida_free() instead respectively.
+ */
#define ida_simple_get(ida, start, end, gfp) \
ida_alloc_range(ida, start, (end) - 1, gfp)
#define ida_simple_remove(ida, id) ida_free(ida, id)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 7d3f2ced92d1..79690938d9a2 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2019 Intel Corporation
+ * Copyright (c) 2018 - 2022 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -75,6 +76,7 @@
#define IEEE80211_STYPE_ACTION 0x00D0
/* control */
+#define IEEE80211_STYPE_TRIGGER 0x0020
#define IEEE80211_STYPE_CTL_EXT 0x0060
#define IEEE80211_STYPE_BACK_REQ 0x0080
#define IEEE80211_STYPE_BACK 0x0090
@@ -105,6 +107,54 @@
/* extension, added by 802.11ad */
#define IEEE80211_STYPE_DMG_BEACON 0x0000
+#define IEEE80211_STYPE_S1G_BEACON 0x0010
+
+/* bits unique to S1G beacon */
+#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
+
+/* see 802.11ah-2016 9.9 NDP CMAC frames */
+#define IEEE80211_S1G_1MHZ_NDP_BITS 25
+#define IEEE80211_S1G_1MHZ_NDP_BYTES 4
+#define IEEE80211_S1G_2MHZ_NDP_BITS 37
+#define IEEE80211_S1G_2MHZ_NDP_BYTES 5
+
+#define IEEE80211_NDP_FTYPE_CTS 0
+#define IEEE80211_NDP_FTYPE_CF_END 0
+#define IEEE80211_NDP_FTYPE_PS_POLL 1
+#define IEEE80211_NDP_FTYPE_ACK 2
+#define IEEE80211_NDP_FTYPE_PS_POLL_ACK 3
+#define IEEE80211_NDP_FTYPE_BA 4
+#define IEEE80211_NDP_FTYPE_BF_REPORT_POLL 5
+#define IEEE80211_NDP_FTYPE_PAGING 6
+#define IEEE80211_NDP_FTYPE_PREQ 7
+
+#define SM64(f, v) ((((u64)v) << f##_S) & f)
+
+/* NDP CMAC frame fields */
+#define IEEE80211_NDP_FTYPE 0x0000000000000007
+#define IEEE80211_NDP_FTYPE_S 0x0000000000000000
+
+/* 1M Probe Request 11ah 9.9.3.1.1 */
+#define IEEE80211_NDP_1M_PREQ_ANO 0x0000000000000008
+#define IEEE80211_NDP_1M_PREQ_ANO_S 3
+#define IEEE80211_NDP_1M_PREQ_CSSID 0x00000000000FFFF0
+#define IEEE80211_NDP_1M_PREQ_CSSID_S 4
+#define IEEE80211_NDP_1M_PREQ_RTYPE 0x0000000000100000
+#define IEEE80211_NDP_1M_PREQ_RTYPE_S 20
+#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000
+#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000
+/* 2M Probe Request 11ah 9.9.3.1.2 */
+#define IEEE80211_NDP_2M_PREQ_ANO 0x0000000000000008
+#define IEEE80211_NDP_2M_PREQ_ANO_S 3
+#define IEEE80211_NDP_2M_PREQ_CSSID 0x0000000FFFFFFFF0
+#define IEEE80211_NDP_2M_PREQ_CSSID_S 4
+#define IEEE80211_NDP_2M_PREQ_RTYPE 0x0000001000000000
+#define IEEE80211_NDP_2M_PREQ_RTYPE_S 36
+
+#define IEEE80211_ANO_NETTYPE_WILD 15
+
+/* bits unique to S1G beacon */
+#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
#define IEEE80211_CTL_EXT_POLL 0x2000
@@ -121,6 +171,21 @@
#define IEEE80211_MAX_SN IEEE80211_SN_MASK
#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
+
+/* PV1 Layout 11ah 9.8.3.1 */
+#define IEEE80211_PV1_FCTL_VERS 0x0003
+#define IEEE80211_PV1_FCTL_FTYPE 0x001c
+#define IEEE80211_PV1_FCTL_STYPE 0x00e0
+#define IEEE80211_PV1_FCTL_TODS 0x0100
+#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200
+#define IEEE80211_PV1_FCTL_PM 0x0400
+#define IEEE80211_PV1_FCTL_MOREDATA 0x0800
+#define IEEE80211_PV1_FCTL_PROTECTED 0x1000
+#define IEEE80211_PV1_FCTL_END_SP 0x2000
+#define IEEE80211_PV1_FCTL_RELAYED 0x4000
+#define IEEE80211_PV1_FCTL_ACK_POLICY 0x8000
+#define IEEE80211_PV1_FCTL_CTL_EXT 0x0f00
+
static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
{
return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
@@ -148,6 +213,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_MAX_FRAG_THRESHOLD 2352
#define IEEE80211_MAX_RTS_THRESHOLD 2353
#define IEEE80211_MAX_AID 2007
+#define IEEE80211_MAX_AID_S1G 8191
#define IEEE80211_MAX_TIM_LEN 251
#define IEEE80211_MAX_MESH_PEERINGS 63
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
@@ -230,12 +296,25 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_HT_CTL_LEN 4
+/* trigger type within common_info of trigger frame */
+#define IEEE80211_TRIGGER_TYPE_MASK 0xf
+#define IEEE80211_TRIGGER_TYPE_BASIC 0x0
+#define IEEE80211_TRIGGER_TYPE_BFRP 0x1
+#define IEEE80211_TRIGGER_TYPE_MU_BAR 0x2
+#define IEEE80211_TRIGGER_TYPE_MU_RTS 0x3
+#define IEEE80211_TRIGGER_TYPE_BSRP 0x4
+#define IEEE80211_TRIGGER_TYPE_GCR_MU_BAR 0x5
+#define IEEE80211_TRIGGER_TYPE_BQRP 0x6
+#define IEEE80211_TRIGGER_TYPE_NFRP 0x7
+
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
+ struct_group(addrs,
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ );
__le16 seq_ctrl;
u8 addr4[ETH_ALEN];
} __packed __aligned(2);
@@ -259,6 +338,15 @@ struct ieee80211_qos_hdr {
__le16 qos_ctrl;
} __packed __aligned(2);
+struct ieee80211_trigger {
+ __le16 frame_control;
+ __le16 duration;
+ u8 ra[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ __le64 common_info;
+ u8 variable[];
+} __packed __aligned(2);
+
/**
* ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
* @fc: frame control bytes in little-endian byteorder
@@ -372,6 +460,17 @@ static inline bool ieee80211_is_data(__le16 fc)
}
/**
+ * ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_ext(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT);
+}
+
+
+/**
* ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
* @fc: frame control bytes in little-endian byteorder
*/
@@ -470,6 +569,40 @@ static inline bool ieee80211_is_beacon(__le16 fc)
}
/**
+ * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT &&
+ * IEEE80211_STYPE_S1G_BEACON
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_s1g_beacon(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE |
+ IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON);
+}
+
+/**
+ * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT &&
+ * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_next_tbtt_present(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) &&
+ fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT);
+}
+
+/**
+ * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only
+ * true for S1G beacons when they're short.
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_s1g_short_beacon(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc);
+}
+
+/**
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
*/
@@ -620,6 +753,25 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
}
/**
+ * ieee80211_is_trigger - check if frame is trigger frame
+ * @fc: frame control field in little-endian byteorder
+ */
+static inline bool ieee80211_is_trigger(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_TRIGGER);
+}
+
+/**
+ * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_any_nullfunc(__le16 fc)
+{
+ return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
+}
+
+/**
* ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
* @fc: frame control field in little-endian byteorder
*/
@@ -707,7 +859,7 @@ struct ieee80211_msrment_ie {
u8 token;
u8 mode;
u8 type;
- u8 request[0];
+ u8 request[];
} __packed;
/**
@@ -850,6 +1002,7 @@ enum ieee80211_ht_chanwidth_values {
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag
* @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
* (the NSS value is the value of this field + 1)
* @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
@@ -857,16 +1010,36 @@ enum ieee80211_ht_chanwidth_values {
* using a beamforming steering matrix
*/
enum ieee80211_vht_opmode_bits {
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03,
IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0,
IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1,
IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2,
IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
+ IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04,
IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70,
IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
};
+/**
+ * enum ieee80211_s1g_chanwidth
+ * These are defined in IEEE802.11-2016ah Table 10-20
+ * as BSS Channel Width
+ *
+ * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel
+ */
+enum ieee80211_s1g_chanwidth {
+ IEEE80211_S1G_CHANWIDTH_1MHZ = 0,
+ IEEE80211_S1G_CHANWIDTH_2MHZ = 1,
+ IEEE80211_S1G_CHANWIDTH_4MHZ = 3,
+ IEEE80211_S1G_CHANWIDTH_8MHZ = 7,
+ IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
+};
+
#define WLAN_SA_QUERY_TR_ID_LEN 2
#define WLAN_MEMBERSHIP_LEN 8
#define WLAN_USER_POSITION_LEN 16
@@ -884,11 +1057,115 @@ struct ieee80211_tpc_report_ie {
#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1)
#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1
#define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_MASK GENMASK(7, 5)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT 10
struct ieee80211_addba_ext_ie {
u8 data;
} __packed;
+/**
+ * struct ieee80211_s1g_bcn_compat_ie
+ *
+ * S1G Beacon Compatibility element
+ */
+struct ieee80211_s1g_bcn_compat_ie {
+ __le16 compat_info;
+ __le16 beacon_int;
+ __le32 tsf_completion;
+} __packed;
+
+/**
+ * struct ieee80211_s1g_oper_ie
+ *
+ * S1G Operation element
+ */
+struct ieee80211_s1g_oper_ie {
+ u8 ch_width;
+ u8 oper_class;
+ u8 primary_ch;
+ u8 oper_ch;
+ __le16 basic_mcs_nss;
+} __packed;
+
+/**
+ * struct ieee80211_aid_response_ie
+ *
+ * AID Response element
+ */
+struct ieee80211_aid_response_ie {
+ __le16 aid;
+ u8 switch_count;
+ __le16 response_int;
+} __packed;
+
+struct ieee80211_s1g_cap {
+ u8 capab_info[10];
+ u8 supp_mcs_nss[5];
+} __packed;
+
+struct ieee80211_ext {
+ __le16 frame_control;
+ __le16 duration;
+ union {
+ struct {
+ u8 sa[ETH_ALEN];
+ __le32 timestamp;
+ u8 change_seq;
+ u8 variable[0];
+ } __packed s1g_beacon;
+ struct {
+ u8 sa[ETH_ALEN];
+ __le32 timestamp;
+ u8 change_seq;
+ u8 next_tbtt[3];
+ u8 variable[0];
+ } __packed s1g_short_beacon;
+ } u;
+} __packed __aligned(2);
+
+#define IEEE80211_TWT_CONTROL_NDP BIT(0)
+#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
+#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
+#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
+#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
+
+#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0)
+#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1)
+#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
+#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
+#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
+#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7)
+#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10)
+#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
+
+enum ieee80211_twt_setup_cmd {
+ TWT_SETUP_CMD_REQUEST,
+ TWT_SETUP_CMD_SUGGEST,
+ TWT_SETUP_CMD_DEMAND,
+ TWT_SETUP_CMD_GROUPING,
+ TWT_SETUP_CMD_ACCEPT,
+ TWT_SETUP_CMD_ALTERNATE,
+ TWT_SETUP_CMD_DICTATE,
+ TWT_SETUP_CMD_REJECT,
+};
+
+struct ieee80211_twt_params {
+ __le16 req_type;
+ __le64 twt;
+ u8 min_twt_dur;
+ __le16 mantissa;
+ u8 channel;
+} __packed;
+
+struct ieee80211_twt_setup {
+ u8 dialog_token;
+ u8 element_id;
+ u8 length;
+ u8 control;
+ u8 params[];
+} __packed;
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -902,7 +1179,7 @@ struct ieee80211_mgmt {
__le16 auth_transaction;
__le16 status_code;
/* possibly followed by Challenge text */
- u8 variable[0];
+ u8 variable[];
} __packed auth;
struct {
__le16 reason_code;
@@ -911,21 +1188,26 @@ struct ieee80211_mgmt {
__le16 capab_info;
__le16 listen_interval;
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_req;
struct {
__le16 capab_info;
__le16 status_code;
__le16 aid;
/* followed by Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_resp, reassoc_resp;
struct {
__le16 capab_info;
+ __le16 status_code;
+ u8 variable[];
+ } __packed s1g_assoc_resp, s1g_reassoc_resp;
+ struct {
+ __le16 capab_info;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed reassoc_req;
struct {
__le16 reason_code;
@@ -936,11 +1218,11 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
+ u8 variable[];
} __packed beacon;
struct {
/* only variable items: SSID, Supported rates */
- u8 variable[0];
+ DECLARE_FLEX_ARRAY(u8, variable);
} __packed probe_req;
struct {
__le64 timestamp;
@@ -948,7 +1230,7 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params */
- u8 variable[0];
+ u8 variable[];
} __packed probe_resp;
struct {
u8 category;
@@ -957,16 +1239,16 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
u8 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed wme_action;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch;
struct{
u8 action_code;
struct ieee80211_ext_chansw_ie data;
- u8 variable[0];
+ u8 variable[];
} __packed ext_chan_switch;
struct{
u8 action_code;
@@ -982,7 +1264,7 @@ struct ieee80211_mgmt {
__le16 timeout;
__le16 start_seq_num;
/* followed by BA Extension */
- u8 variable[0];
+ u8 variable[];
} __packed addba_req;
struct{
u8 action_code;
@@ -998,11 +1280,11 @@ struct ieee80211_mgmt {
} __packed delba;
struct {
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed self_prot;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed mesh_action;
struct {
u8 action;
@@ -1046,8 +1328,21 @@ struct ieee80211_mgmt {
u8 toa[6];
__le16 tod_error;
__le16 toa_error;
- u8 variable[0];
+ u8 variable[];
} __packed ftm;
+ struct {
+ u8 action_code;
+ u8 variable[];
+ } __packed s1g;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 follow_up;
+ u32 tod;
+ u32 toa;
+ u8 max_tod_error;
+ u8 max_toa_error;
+ } __packed wnm_timing_msr;
} u;
} __packed action;
} u;
@@ -1056,6 +1351,8 @@ struct ieee80211_mgmt {
/* Supported rates membership selectors */
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
+#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
+#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -1445,10 +1742,12 @@ struct ieee80211_ht_operation {
* A-MPDU buffer sizes
* According to HT size varies from 8 to 64 frames
* HE adds the ability to have up to 256 frames.
+ * EHT adds the ability to have up to 1K frames.
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
-#define IEEE80211_MAX_AMPDU_BUF 0x100
+#define IEEE80211_MAX_AMPDU_BUF_HE 0x100
+#define IEEE80211_MAX_AMPDU_BUF_EHT 0x400
/* Spatial Multiplexing Power Save Modes (for capability) */
@@ -1632,7 +1931,7 @@ struct ieee80211_he_operation {
__le32 he_oper_params;
__le16 he_mcs_nss_set;
/* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */
- u8 optional[0];
+ u8 optional[];
} __packed;
/**
@@ -1644,7 +1943,7 @@ struct ieee80211_he_operation {
struct ieee80211_he_spr {
u8 he_sr_control;
/* Optional 0 to 19 bytes: depends on @he_sr_control */
- u8 optional[0];
+ u8 optional[];
} __packed;
/**
@@ -1673,6 +1972,131 @@ struct ieee80211_mu_edca_param_set {
struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
} __packed;
+#define IEEE80211_EHT_MCS_NSS_RX 0x0f
+#define IEEE80211_EHT_MCS_NSS_TX 0xf0
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max
+ * supported NSS for per MCS.
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 7.
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 8 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ */
+struct ieee80211_eht_mcs_nss_supp_20mhz_only {
+ u8 rx_tx_mcs7_max_nss;
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+};
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except
+ * 20MHz only stations).
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ */
+struct ieee80211_eht_mcs_nss_supp_bw {
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+};
+
+/**
+ * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data
+ *
+ * This structure is the "EHT Capabilities element" fixed fields as
+ * described in P802.11be_D2.0 section 9.4.2.313.
+ *
+ * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP*
+ * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP*
+ */
+struct ieee80211_eht_cap_elem_fixed {
+ u8 mac_cap_info[2];
+ u8 phy_cap_info[9];
+} __packed;
+
+/**
+ * struct ieee80211_eht_cap_elem - EHT capabilities element
+ * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed
+ * @optional: optional parts
+ */
+struct ieee80211_eht_cap_elem {
+ struct ieee80211_eht_cap_elem_fixed fixed;
+
+ /*
+ * Followed by:
+ * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets.
+ * EHT PPE Thresholds field: variable length.
+ */
+ u8 optional[];
+} __packed;
+
+#define IEEE80211_EHT_OPER_INFO_PRESENT 0x01
+#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x02
+#define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30
+
+/**
+ * struct ieee80211_eht_operation - eht operation element
+ *
+ * This structure is the "EHT Operation Element" fields as
+ * described in P802.11be_D2.0 section 9.4.2.311
+ *
+ * @params: EHT operation element parameters. See &IEEE80211_EHT_OPER_*
+ * @basic_mcs_nss: indicates the EHT-MCSs for each number of spatial streams in
+ * EHT PPDUs that are supported by all EHT STAs in the BSS in transmit and
+ * receive.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation {
+ u8 params;
+ __le32 basic_mcs_nss;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_eht_operation_info - eht operation information
+ *
+ * @control: EHT operation information control.
+ * @ccfs0: defines a channel center frequency for a 20, 40, 80, 160, or 320 MHz
+ * EHT BSS.
+ * @ccfs1: defines a channel center frequency for a 160 or 320 MHz EHT BSS.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation_info {
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 optional[];
+} __packed;
+
/* 802.11ac VHT Capabilities */
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
@@ -1722,6 +2146,9 @@ struct ieee80211_mu_edca_param_set {
* @ext_nss_bw_capable: indicates whether or not the local transmitter
* (rate scaling algorithm) can deal with the new logic
* (dot11VHTExtendedNSSBWCapable)
+ * @max_vht_nss: current maximum NSS as advertised by the STA in
+ * operating mode notification, can be 0 in which case the
+ * capability data will be used to derive this (from MCS support)
*
* Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
* vary for a given BW/MCS. This function parses the data.
@@ -1730,7 +2157,46 @@ struct ieee80211_mu_edca_param_set {
*/
int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
enum ieee80211_vht_chanwidth bw,
- int mcs, bool ext_nss_bw_capable);
+ int mcs, bool ext_nss_bw_capable,
+ unsigned int max_vht_nss);
+
+/**
+ * enum ieee80211_ap_reg_power - regulatory power for a Access Point
+ *
+ * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
+ * @IEEE80211_REG_LPI: Indoor Access Point
+ * @IEEE80211_REG_SP: Standard power Access Point
+ * @IEEE80211_REG_VLP: Very low power Access Point
+ * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal
+ * @IEEE80211_REG_AP_POWER_MAX: maximum value
+ */
+enum ieee80211_ap_reg_power {
+ IEEE80211_REG_UNSET_AP,
+ IEEE80211_REG_LPI_AP,
+ IEEE80211_REG_SP_AP,
+ IEEE80211_REG_VLP_AP,
+ IEEE80211_REG_AP_POWER_AFTER_LAST,
+ IEEE80211_REG_AP_POWER_MAX =
+ IEEE80211_REG_AP_POWER_AFTER_LAST - 1,
+};
+
+/**
+ * enum ieee80211_client_reg_power - regulatory power for a client
+ *
+ * @IEEE80211_REG_UNSET_CLIENT: Client has no regulatory power mode
+ * @IEEE80211_REG_DEFAULT_CLIENT: Default Client
+ * @IEEE80211_REG_SUBORDINATE_CLIENT: Subordinate Client
+ * @IEEE80211_REG_CLIENT_POWER_AFTER_LAST: internal
+ * @IEEE80211_REG_CLIENT_POWER_MAX: maximum value
+ */
+enum ieee80211_client_reg_power {
+ IEEE80211_REG_UNSET_CLIENT,
+ IEEE80211_REG_DEFAULT_CLIENT,
+ IEEE80211_REG_SUBORDINATE_CLIENT,
+ IEEE80211_REG_CLIENT_POWER_AFTER_LAST,
+ IEEE80211_REG_CLIENT_POWER_MAX =
+ IEEE80211_REG_CLIENT_POWER_AFTER_LAST - 1,
+};
/* 802.11ax HE MAC capabilities */
#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
@@ -1796,10 +2262,10 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
* A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
* same field in the HE capabilities.
*/
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18
#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
@@ -1808,10 +2274,10 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
#define IEEE80211_HE_MAC_CAP4_QTP 0x02
#define IEEE80211_HE_MAC_CAP4_BQR 0x04
-#define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08
#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
#define IEEE80211_HE_MAC_CAP4_OPS 0x20
-#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
+#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40
/* Multi TID agg TX is split between byte #4 and #5
* The value is a combination of B39,B40,B41
*/
@@ -1819,18 +2285,24 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
-#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04
+#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04
#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20
#define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40
#define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80
+#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
+#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
+#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
+
/* 802.11ax HE PHY capabilities */
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL 0x1e
+
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
@@ -1874,7 +2346,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
-#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40
+#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40
#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
@@ -1921,15 +2393,15 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
-#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04
-#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08
#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
-#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01
-#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02
+#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02
#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
@@ -1960,11 +2432,14 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US 0x00
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US 0x40
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US 0x80
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US 0x0
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US 0x1
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US 0x2
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED 0x3
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS 6
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01
/* 802.11ax HE TX/RX MCS NSS Support */
#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
@@ -2008,6 +2483,7 @@ ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
+#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7)
/*
* Calculate 802.11ax HE capabilities IE PPE field size
@@ -2037,8 +2513,31 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
return n;
}
+static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data;
+ u8 needed = sizeof(*he_cap_ie_elem);
+
+ if (len < needed)
+ return false;
+
+ needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+ if (len < needed)
+ return false;
+
+ if (he_cap_ie_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ if (len < needed + 1)
+ return false;
+ needed += ieee80211_he_ppe_size(data[needed],
+ he_cap_ie_elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
/* HE Operation defines */
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
@@ -2047,13 +2546,77 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000
#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
+#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+
+/**
+ * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
+ * @primary: primary channel
+ * @control: control flags
+ * @ccfs0: channel center frequency segment 0
+ * @ccfs1: channel center frequency segment 1
+ * @minrate: minimum rate (in 1 Mbps units)
+ */
+struct ieee80211_he_6ghz_oper {
+ u8 primary;
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
+#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 minrate;
+} __packed;
+
+/*
+ * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021",
+ * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation
+ * subfield encoding", and two category for each type in "Table E-12-Regulatory
+ * Info subfield encoding in the United States".
+ * So it it totally max 8 Transmit Power Envelope element.
+ */
+#define IEEE80211_TPE_MAX_IE_COUNT 8
+/*
+ * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield"
+ * of "IEEE Std 802.11ax™‐2021", the max power level is 8.
+ */
+#define IEEE80211_MAX_NUM_PWR_LEVEL 8
+
+#define IEEE80211_TPE_MAX_POWER_COUNT 8
+
+/* transmit power interpretation type of transmit power envelope element */
+enum ieee80211_tx_power_intrpt_type {
+ IEEE80211_TPE_LOCAL_EIRP,
+ IEEE80211_TPE_LOCAL_EIRP_PSD,
+ IEEE80211_TPE_REG_CLIENT_EIRP,
+ IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+};
+
+/**
+ * struct ieee80211_tx_pwr_env
+ *
+ * This structure represents the "Transmit Power Envelope element"
+ */
+struct ieee80211_tx_pwr_env {
+ u8 tx_power_info;
+ s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT];
+} __packed;
+
+#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
+#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
+#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
+
/*
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
- * @he_oper_ie: byte data of the He Operations IE, stating from the the byte
+ * @he_oper_ie: byte data of the He Operations IE, stating from the byte
* after the ext ID byte. It is assumed that he_oper_ie has at least
* sizeof(struct ieee80211_he_operation) bytes, the caller must have
* validated this.
@@ -2062,7 +2625,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
static inline u8
ieee80211_he_oper_size(const u8 *he_oper_ie)
{
- struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
+ const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie;
u8 oper_len = sizeof(struct ieee80211_he_operation);
u32 he_oper_params;
@@ -2077,7 +2640,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
oper_len++;
if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)
- oper_len += 4;
+ oper_len += sizeof(struct ieee80211_he_6ghz_oper);
/* Add the first byte (extension ID) to the total length */
oper_len++;
@@ -2085,13 +2648,44 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
return oper_len;
}
+/**
+ * ieee80211_he_6ghz_oper - obtain 6 GHz operation field
+ * @he_oper: HE operation element (must be pre-validated for size)
+ * but may be %NULL
+ *
+ * Return: a pointer to the 6 GHz operation field, or %NULL
+ */
+static inline const struct ieee80211_he_6ghz_oper *
+ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
+{
+ const u8 *ret = (const void *)&he_oper->optional;
+ u32 he_oper_params;
+
+ if (!he_oper)
+ return NULL;
+
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+
+ if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
+ return NULL;
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ ret += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
+ ret++;
+
+ return (const void *)ret;
+}
+
/* HE Spatial Reuse defines */
-#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT 0x4
-#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT 0x8
+#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0)
+#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1)
+#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2)
+#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3)
+#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4)
/*
* ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
- * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the the byte
+ * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte
* after the ext ID byte. It is assumed that he_spr_ie has at least
* sizeof(struct ieee80211_he_spr) bytes, the caller must have validated
* this
@@ -2100,16 +2694,16 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
static inline u8
ieee80211_he_spr_size(const u8 *he_spr_ie)
{
- struct ieee80211_he_spr *he_spr = (void *)he_spr_ie;
+ const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie;
u8 spr_len = sizeof(struct ieee80211_he_spr);
- u32 he_spr_params;
+ u8 he_spr_params;
/* Make sure the input is not NULL */
if (!he_spr_ie)
return 0;
/* Calc required length */
- he_spr_params = le32_to_cpu(he_spr->he_sr_control);
+ he_spr_params = he_spr->he_sr_control;
if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
spr_len++;
if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
@@ -2121,6 +2715,296 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
return spr_len;
}
+/* S1G Capabilities Information field */
+#define IEEE80211_S1G_CAPABILITY_LEN 15
+
+#define S1G_CAP0_S1G_LONG BIT(0)
+#define S1G_CAP0_SGI_1MHZ BIT(1)
+#define S1G_CAP0_SGI_2MHZ BIT(2)
+#define S1G_CAP0_SGI_4MHZ BIT(3)
+#define S1G_CAP0_SGI_8MHZ BIT(4)
+#define S1G_CAP0_SGI_16MHZ BIT(5)
+#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
+
+#define S1G_SUPP_CH_WIDTH_2 0
+#define S1G_SUPP_CH_WIDTH_4 1
+#define S1G_SUPP_CH_WIDTH_8 2
+#define S1G_SUPP_CH_WIDTH_16 3
+#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
+ cap[0])) << 1)
+
+#define S1G_CAP1_RX_LDPC BIT(0)
+#define S1G_CAP1_TX_STBC BIT(1)
+#define S1G_CAP1_RX_STBC BIT(2)
+#define S1G_CAP1_SU_BFER BIT(3)
+#define S1G_CAP1_SU_BFEE BIT(4)
+#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
+
+#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
+#define S1G_CAP2_MU_BFER BIT(3)
+#define S1G_CAP2_MU_BFEE BIT(4)
+#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
+#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
+
+#define S1G_CAP3_RD_RESPONDER BIT(0)
+#define S1G_CAP3_HT_DELAYED_BA BIT(1)
+#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
+#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
+#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
+
+#define S1G_CAP4_UPLINK_SYNC BIT(0)
+#define S1G_CAP4_DYNAMIC_AID BIT(1)
+#define S1G_CAP4_BAT BIT(2)
+#define S1G_CAP4_TIME_ADE BIT(3)
+#define S1G_CAP4_NON_TIM BIT(4)
+#define S1G_CAP4_GROUP_AID BIT(5)
+#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
+
+#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
+#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
+#define S1G_CAP5_AMSDU BIT(2)
+#define S1G_CAP5_AMPDU BIT(3)
+#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
+#define S1G_CAP5_FLOW_CONTROL BIT(5)
+#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
+
+#define S1G_CAP6_OBSS_MITIGATION BIT(0)
+#define S1G_CAP6_FRAGMENT_BA BIT(1)
+#define S1G_CAP6_NDP_PS_POLL BIT(2)
+#define S1G_CAP6_RAW_OPERATION BIT(3)
+#define S1G_CAP6_PAGE_SLICING BIT(4)
+#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
+#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
+
+#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
+#define S1G_CAP7_DUP_1MHZ BIT(1)
+#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
+#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
+#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
+#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
+#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
+#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
+
+#define S1G_CAP8_TWT_GROUPING BIT(0)
+#define S1G_CAP8_BDT BIT(1)
+#define S1G_CAP8_COLOR GENMASK(4, 2)
+#define S1G_CAP8_TWT_REQUEST BIT(5)
+#define S1G_CAP8_TWT_RESPOND BIT(6)
+#define S1G_CAP8_PV1_FRAME BIT(7)
+
+#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+
+#define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0)
+#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
+
+/* EHT MAC capabilities as defined in P802.11be_D2.0 section 9.4.2.313.2 */
+#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01
+#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08
+#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10
+#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK 0xc0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895 0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991 1
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 2
+
+#define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01
+
+/* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */
+#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02
+#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08
+#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x10
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x20
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x40
+
+/* EHT beamformee number of spatial streams <= 80MHz is split */
+#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x80
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03
+
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x1c
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0xe0
+
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x07
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x38
+
+/* EHT number of sounding dimensions for 320MHz is split */
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x01
+#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x02
+#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x04
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x08
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x10
+#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x20
+#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x40
+#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x80
+
+#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x01
+#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x08
+#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0xf0
+
+#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x01
+#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x08
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x30
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 1
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 2
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 3
+
+/* Maximum number of supported EHT LTF is split */
+#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
+
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
+#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
+
+#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW 0x01
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x02
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x10
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x20
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT 0x80
+
+#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01
+#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02
+
+/*
+ * EHT operation channel width as defined in P802.11be_D2.0 section 9.4.2.311
+ */
+#define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ 1
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ 2
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ 3
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ 4
+
+/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */
+static inline u8
+ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_eht_cap_elem_fixed *eht_cap,
+ bool from_ap)
+{
+ u8 count = 0;
+
+ /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ return 3;
+
+ /* on 2.4 GHz, these three bits are reserved, so should be 0 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+ count += 3;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 3;
+
+ if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ count += 3;
+
+ if (count)
+ return count;
+
+ return from_ap ? 3 : 4;
+}
+
+/* 802.11be EHT PPE Thresholds */
+#define IEEE80211_EHT_PPE_THRES_NSS_POS 0
+#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0xf
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x1f0
+#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 3
+#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 9
+
+/*
+ * Calculate 802.11be EHT capabilities IE EHT field size
+ */
+static inline u8
+ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u32 n;
+
+ if (!(phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT))
+ return 0;
+
+ n = hweight16(ppe_thres_hdr &
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK);
+
+ /*
+ * Each pair is 6 bits, and we need to add the 9 "header" bits to the
+ * total size.
+ */
+ n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 +
+ IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline bool
+ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len,
+ bool from_ap)
+{
+ const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data;
+ u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed);
+
+ if (len < needed || !he_capa)
+ return false;
+
+ needed += ieee80211_eht_mcs_nss_size((const void *)he_capa,
+ (const void *)data,
+ from_ap);
+ if (len < needed)
+ return false;
+
+ if (elem->phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u16 ppe_thres_hdr;
+
+ if (len < needed + sizeof(ppe_thres_hdr))
+ return false;
+
+ ppe_thres_hdr = get_unaligned_le16(data + needed);
+ needed += ieee80211_eht_ppe_size(ppe_thres_hdr,
+ elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+static inline bool
+ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_eht_operation *elem = (const void *)data;
+ u8 needed = sizeof(*elem);
+
+ if (len < needed)
+ return false;
+
+ if (elem->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
+ needed += 3;
+
+ if (elem->params &
+ IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)
+ needed += 2;
+ }
+
+ return len >= needed;
+}
+
+#define LISTEN_INT_USF GENMASK(15, 14)
+#define LISTEN_INT_UI GENMASK(13, 0)
+
+#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF)
+#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI)
+
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -2273,6 +3157,8 @@ enum ieee80211_statuscode {
/* 802.11ai */
WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
+ WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126,
+ WLAN_STATUS_SAE_PK = 127,
};
@@ -2510,19 +3396,29 @@ enum ieee80211_eid {
WLAN_EID_VHT_OPERATION = 192,
WLAN_EID_EXTENDED_BSS_LOAD = 193,
WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
- WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_TX_POWER_ENVELOPE = 195,
WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
WLAN_EID_AID = 197,
WLAN_EID_QUIET_CHANNEL = 198,
WLAN_EID_OPMODE_NOTIF = 199,
+ WLAN_EID_REDUCED_NEIGHBOR_REPORT = 201,
+
+ WLAN_EID_AID_REQUEST = 210,
+ WLAN_EID_AID_RESPONSE = 211,
+ WLAN_EID_S1G_BCN_COMPAT = 213,
+ WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214,
+ WLAN_EID_S1G_TWT = 216,
+ WLAN_EID_S1G_CAPABILITIES = 217,
WLAN_EID_VENDOR_SPECIFIC = 221,
WLAN_EID_QOS_PARAMETER = 222,
+ WLAN_EID_S1G_OPERATION = 232,
WLAN_EID_CAG_NUMBER = 237,
WLAN_EID_AP_CSN = 239,
WLAN_EID_FILS_INDICATION = 240,
WLAN_EID_DILS = 241,
WLAN_EID_FRAGMENT = 242,
+ WLAN_EID_RSNX = 244,
WLAN_EID_EXTENSION = 255
};
@@ -2544,9 +3440,22 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_UORA = 37,
WLAN_EID_EXT_HE_MU_EDCA = 38,
WLAN_EID_EXT_HE_SPR = 39,
+ WLAN_EID_EXT_NDP_FEEDBACK_REPORT_PARAMSET = 41,
+ WLAN_EID_EXT_BSS_COLOR_CHG_ANN = 42,
+ WLAN_EID_EXT_QUIET_TIME_PERIOD_SETUP = 43,
+ WLAN_EID_EXT_ESS_REPORT = 45,
+ WLAN_EID_EXT_OPS = 46,
+ WLAN_EID_EXT_HE_BSS_LOAD = 47,
WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52,
WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55,
WLAN_EID_EXT_NON_INHERITANCE = 56,
+ WLAN_EID_EXT_KNOWN_BSSID = 57,
+ WLAN_EID_EXT_SHORT_SSID_LIST = 58,
+ WLAN_EID_EXT_HE_6GHZ_CAPA = 59,
+ WLAN_EID_EXT_UL_MU_POWER_CAPA = 60,
+ WLAN_EID_EXT_EHT_OPERATION = 106,
+ WLAN_EID_EXT_EHT_MULTI_LINK = 107,
+ WLAN_EID_EXT_EHT_CAPABILITY = 108,
};
/* Action category code */
@@ -2557,6 +3466,7 @@ enum ieee80211_category {
WLAN_CATEGORY_BACK = 3,
WLAN_CATEGORY_PUBLIC = 4,
WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ WLAN_CATEGORY_FAST_BBS_TRANSITION = 6,
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -2571,6 +3481,7 @@ enum ieee80211_category {
WLAN_CATEGORY_FST = 18,
WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VHT = 21,
+ WLAN_CATEGORY_S1G = 22,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -2628,6 +3539,17 @@ enum ieee80211_mesh_actioncode {
WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
};
+/* Unprotected WNM action codes */
+enum ieee80211_unprotected_wnm_actioncode {
+ WLAN_UNPROTECTED_WNM_ACTION_TIM = 0,
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1,
+};
+
+/* Public action codes */
+enum ieee80211_public_actioncode {
+ WLAN_PUBLIC_ACTION_FTM_RESPONSE = 33,
+};
+
/* Security key length */
enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
@@ -2644,6 +3566,20 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
+enum ieee80211_s1g_actioncode {
+ WLAN_S1G_AID_SWITCH_REQUEST,
+ WLAN_S1G_AID_SWITCH_RESPONSE,
+ WLAN_S1G_SYNC_CONTROL,
+ WLAN_S1G_STA_INFO_ANNOUNCE,
+ WLAN_S1G_EDCA_PARAM_SET,
+ WLAN_S1G_EL_OPERATION,
+ WLAN_S1G_TWT_SETUP,
+ WLAN_S1G_TWT_TEARDOWN,
+ WLAN_S1G_SECT_GROUP_ID_LIST,
+ WLAN_S1G_SECT_ID_FEEDBACK,
+ WLAN_S1G_TWT_INFORMATION = 11,
+};
+
#define IEEE80211_WEP_IV_LEN 4
#define IEEE80211_WEP_ICV_LEN 4
#define IEEE80211_CCMP_HDR_LEN 8
@@ -2734,7 +3670,12 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6)
-/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
+/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte
+ * of the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(7)
+
+/* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */
#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
@@ -2777,7 +3718,7 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(7)
/* Defines support for enhanced multi-bssid advertisement*/
-#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(1)
+#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(3)
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
@@ -3028,12 +3969,19 @@ struct ieee80211_multiple_bssid_configuration {
#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_AP_PEER_KEY SUITE(0x000FAC, 10)
#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11)
#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12)
+#define WLAN_AKM_SUITE_FT_8021X_SHA384 SUITE(0x000FAC, 13)
#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14)
#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15)
#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16)
#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17)
+#define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18)
+#define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19)
+#define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20)
+
+#define WLAN_AKM_SUITE_WFA_DPP SUITE(WLAN_OUI_WFA, 2)
#define WLAN_MAX_KEY_LEN 32
@@ -3045,6 +3993,7 @@ struct ieee80211_multiple_bssid_configuration {
#define WLAN_OUI_WFA 0x506f9a
#define WLAN_OUI_TYPE_WFA_P2P 9
+#define WLAN_OUI_TYPE_WFA_DPP 0x1A
#define WLAN_OUI_MICROSOFT 0x0050f2
#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
@@ -3088,6 +4037,24 @@ struct ieee80211_tspec_ie {
__le16 medium_time;
} __packed;
+struct ieee80211_he_6ghz_capa {
+ /* uses IEEE80211_HE_6GHZ_CAP_* below */
+ __le16 capa;
+} __packed;
+
+/* HE 6 GHz band capabilities */
+/* uses enum ieee80211_min_mpdu_spacing values */
+#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007
+/* uses enum ieee80211_vht_max_ampdu_length_exp values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038
+/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0
+/* WLAN_HT_CAP_SM_PS_* values */
+#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600
+#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800
+#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000
+#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000
+
/**
* ieee80211_get_qos_ctl - get pointer to qos control bytes
* @hdr: the frame
@@ -3182,6 +4149,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
*category != WLAN_CATEGORY_SELF_PROTECTED &&
*category != WLAN_CATEGORY_UNPROT_DMG &&
*category != WLAN_CATEGORY_VHT &&
+ *category != WLAN_CATEGORY_S1G &&
*category != WLAN_CATEGORY_VENDOR_SPECIFIC;
}
@@ -3312,6 +4280,18 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
+/* convert frequencies */
+#define MHZ_TO_KHZ(freq) ((freq) * 1000)
+#define KHZ_TO_MHZ(freq) ((freq) / 1000)
+#define PR_KHZ(f) KHZ_TO_MHZ(f), f % 1000
+#define KHZ_F "%d.%03d"
+
+/* convert powers */
+#define DBI_TO_MBI(gain) ((gain) * 100)
+#define MBI_TO_DBI(gain) ((gain) / 100)
+#define DBM_TO_MBM(gain) ((gain) * 100)
+#define MBM_TO_DBM(gain) ((gain) / 100)
+
/**
* ieee80211_action_contains_tpc - checks if the frame contains TPC element
* @skb: the skb containing the frame, length will be checked
@@ -3359,6 +4339,40 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return false;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+
+ if (mgmt->u.action.category == WLAN_CATEGORY_WNM_UNPROTECTED &&
+ mgmt->u.action.u.wnm_timing_msr.action_code ==
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.wnm_timing_msr))
+ return true;
+
+ return false;
+}
+
+static inline bool ieee80211_is_ftm(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (!ieee80211_is_public_action((void *)mgmt, skb->len))
+ return false;
+
+ if (mgmt->u.action.u.ftm.action_code ==
+ WLAN_PUBLIC_ACTION_FTM_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.ftm))
+ return true;
+
+ return false;
+}
+
struct element {
u8 id;
u8 datalen;
@@ -3412,4 +4426,269 @@ static inline bool for_each_element_completed(const struct element *element,
return (const u8 *)element == (const u8 *)data + datalen;
}
+/**
+ * RSNX Capabilities:
+ * bits 0-3: Field length (n-1)
+ */
+#define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4)
+#define WLAN_RSNX_CAPA_SAE_H2E BIT(5)
+
+/*
+ * reduced neighbor report, based on Draft P802.11ax_D6.1,
+ * section 9.4.2.170 and accepted contributions.
+ */
+#define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03
+#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04
+#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08
+#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0
+#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9
+#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13
+
+#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01
+#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02
+#define IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID 0x04
+#define IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID 0x08
+#define IEEE80211_RNR_TBTT_PARAMS_COLOC_ESS 0x10
+#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20
+#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40
+
+struct ieee80211_neighbor_ap_info {
+ u8 tbtt_info_hdr;
+ u8 tbtt_info_len;
+ u8 op_class;
+ u8 channel;
+} __packed;
+
+enum ieee80211_range_params_max_total_ltf {
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_4 = 0,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_8,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_16,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED,
+};
+
+/* multi-link device */
+#define IEEE80211_MLD_MAX_NUM_LINKS 15
+
+#define IEEE80211_ML_CONTROL_TYPE 0x0007
+#define IEEE80211_ML_CONTROL_TYPE_BASIC 0
+#define IEEE80211_ML_CONTROL_TYPE_PREQ 1
+#define IEEE80211_ML_CONTROL_TYPE_RECONF 2
+#define IEEE80211_ML_CONTROL_TYPE_TDLS 3
+#define IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS 4
+#define IEEE80211_ML_CONTROL_PRESENCE_MASK 0xfff0
+
+struct ieee80211_multi_link_elem {
+ __le16 control;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_BASIC_PRES_LINK_ID 0x0010
+#define IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT 0x0020
+#define IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY 0x0040
+#define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080
+#define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100
+#define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200
+
+#define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff
+#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00
+#define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS 0xf000
+
+#define IEEE80211_EML_CAP_EMLSR_SUPP 0x0001
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x000e
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x0070
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_16US 1
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_32US 2
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 3
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_128US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 5
+#define IEEE80211_EML_CAP_EMLMR_SUPPORT 0x0080
+#define IEEE80211_EML_CAP_EMLMR_DELAY 0x0700
+#define IEEE80211_EML_CAP_EMLMR_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLMR_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLMR_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLMR_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLMR_DELAY_256US 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT 0x7800
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_0 0
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128US 1
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_256US 2
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_512US 3
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_1TU 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_2TU 5
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_4TU 6
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_8TU 7
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_16TU 8
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_32TU 9
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_64TU 10
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 11
+
+#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f
+#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
+#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
+#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
+
+struct ieee80211_mle_basic_common_info {
+ u8 len;
+ u8 mld_mac_addr[ETH_ALEN];
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_PREQ_PRES_MLD_ID 0x0010
+
+struct ieee80211_mle_preq_common_info {
+ u8 len;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010
+
+/* no fixed fields in RECONF */
+
+struct ieee80211_mle_tdls_common_info {
+ u8 len;
+ u8 ap_mld_mac_addr[ETH_ALEN];
+} __packed;
+
+#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010
+
+/* no fixed fields in PRIO_ACCESS */
+
+/**
+ * ieee80211_mle_common_size - check multi-link element common size
+ * @data: multi-link element, must already be checked for size using
+ * ieee80211_mle_size_ok()
+ */
+static inline u8 ieee80211_mle_common_size(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ u8 common = 0;
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ common += sizeof(struct ieee80211_mle_preq_common_info);
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ return common;
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ common += sizeof(struct ieee80211_mle_tdls_common_info);
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ return common;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return common + mle->variable[0];
+}
+
+/**
+ * ieee80211_mle_size_ok - validate multi-link element size
+ * @data: pointer to the element data
+ * @len: length of the containing element
+ */
+static inline bool ieee80211_mle_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u8 fixed = sizeof(*mle);
+ u8 common = 0;
+ bool check_common_len = false;
+ u16 control;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+ check_common_len = true;
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ common += sizeof(struct ieee80211_mle_preq_common_info);
+ if (control & IEEE80211_MLC_PREQ_PRES_MLD_ID)
+ common += 1;
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ common += sizeof(struct ieee80211_mle_tdls_common_info);
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ break;
+ default:
+ /* we don't know this type */
+ return true;
+ }
+
+ if (len < fixed + common)
+ return false;
+
+ if (!check_common_len)
+ return true;
+
+ /* if present, common length is the first octet there */
+ return mle->variable[0] >= common;
+}
+
+enum ieee80211_mle_subelems {
+ IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0,
+};
+
+#define IEEE80211_MLE_STA_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT 0x0040
+#define IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT 0x0080
+#define IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT 0x0100
+#define IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT 0x0200
+#define IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE 0x0400
+#define IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT 0x0800
+
+struct ieee80211_mle_per_sta_profile {
+ __le16 control;
+ u8 sta_info_len;
+ u8 variable[];
+} __packed;
+
+#define for_each_mle_subelement(_elem, _data, _len) \
+ if (ieee80211_mle_size_ok(_data, _len)) \
+ for_each_element(_elem, \
+ _data + ieee80211_mle_common_size(_data),\
+ _len - ieee80211_mle_common_size(_data))
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 95c831162212..f1f9412b6ac6 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -134,18 +134,46 @@ enum {
* a successful transmission.
*/
IEEE802154_SUCCESS = 0x0,
-
+ /* The requested operation failed. */
+ IEEE802154_MAC_ERROR = 0x1,
+ /* The requested operation has been cancelled. */
+ IEEE802154_CANCELLED = 0x2,
+ /*
+ * Device is ready to poll the coordinator for data in a non beacon
+ * enabled PAN.
+ */
+ IEEE802154_READY_FOR_POLL = 0x3,
+ /* Wrong frame counter. */
+ IEEE802154_COUNTER_ERROR = 0xdb,
+ /*
+ * The frame does not conforms to the incoming key usage policy checking
+ * procedure.
+ */
+ IEEE802154_IMPROPER_KEY_TYPE = 0xdc,
+ /*
+ * The frame does not conforms to the incoming security level usage
+ * policy checking procedure.
+ */
+ IEEE802154_IMPROPER_SECURITY_LEVEL = 0xdd,
+ /* Secured frame received with an empty Frame Version field. */
+ IEEE802154_UNSUPPORTED_LEGACY = 0xde,
+ /*
+ * A secured frame is received or must be sent but security is not
+ * enabled in the device. Or, the Auxiliary Security Header has security
+ * level of zero in it.
+ */
+ IEEE802154_UNSUPPORTED_SECURITY = 0xdf,
/* The beacon was lost following a synchronization request. */
- IEEE802154_BEACON_LOSS = 0xe0,
+ IEEE802154_BEACON_LOST = 0xe0,
/*
* A transmission could not take place due to activity on the
* channel, i.e., the CSMA-CA mechanism has failed.
*/
- IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
+ IEEE802154_CHANNEL_ACCESS_FAILURE = 0xe1,
/* The GTS request has been denied by the PAN coordinator. */
- IEEE802154_DENINED = 0xe2,
+ IEEE802154_DENIED = 0xe2,
/* The attempt to disable the transceiver has failed. */
- IEEE802154_DISABLE_TRX_FAIL = 0xe3,
+ IEEE802154_DISABLE_TRX_FAILURE = 0xe3,
/*
* The received frame induces a failed security check according to
* the security suite.
@@ -185,9 +213,9 @@ enum {
* A PAN identifier conflict has been detected and communicated to the
* PAN coordinator.
*/
- IEEE802154_PANID_CONFLICT = 0xee,
+ IEEE802154_PAN_ID_CONFLICT = 0xee,
/* A coordinator realignment command has been received. */
- IEEE802154_REALIGMENT = 0xef,
+ IEEE802154_REALIGNMENT = 0xef,
/* The transaction has expired and its information discarded. */
IEEE802154_TRANSACTION_EXPIRED = 0xf0,
/* There is no capacity to store the transaction. */
@@ -203,12 +231,49 @@ enum {
* A SET/GET request was issued with the identifier of a PIB attribute
* that is not supported.
*/
- IEEE802154_UNSUPPORTED_ATTR = 0xf4,
+ IEEE802154_UNSUPPORTED_ATTRIBUTE = 0xf4,
+ /* Missing source or destination address or address mode. */
+ IEEE802154_INVALID_ADDRESS = 0xf5,
+ /*
+ * MLME asked to turn the receiver on, but the on time duration is too
+ * big compared to the macBeaconOrder.
+ */
+ IEEE802154_ON_TIME_TOO_LONG = 0xf6,
+ /*
+ * MLME asaked to turn the receiver on, but the request was delayed for
+ * too long before getting processed.
+ */
+ IEEE802154_PAST_TIME = 0xf7,
+ /*
+ * The StartTime parameter is nonzero, and the MLME is not currently
+ * tracking the beacon of the coordinator through which it is
+ * associated.
+ */
+ IEEE802154_TRACKING_OFF = 0xf8,
+ /*
+ * The index inside the hierarchical values in PIBAttribute is out of
+ * range.
+ */
+ IEEE802154_INVALID_INDEX = 0xf9,
+ /*
+ * The number of PAN descriptors discovered during a scan has been
+ * reached.
+ */
+ IEEE802154_LIMIT_REACHED = 0xfa,
+ /*
+ * The PIBAttribute parameter specifies an attribute that is a read-only
+ * attribute.
+ */
+ IEEE802154_READ_ONLY = 0xfb,
/*
* A request to perform a scan operation failed because the MLME was
* in the process of performing a previously initiated scan operation.
*/
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
+ /* The outgoing superframe overlaps the incoming superframe. */
+ IEEE802154_SUPERFRAME_OVERLAP = 0xfd,
+ /* Any other error situation. */
+ IEEE802154_SYSTEM_ERROR = 0xff,
};
/* frame control handling */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index bf5c5f32c65e..1ed52441972f 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -48,9 +48,11 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
+ case ARPHRD_IP6GRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
case ARPHRD_RAWIP:
+ case ARPHRD_PIMREG:
return false;
default:
return true;
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 9e57c4411734..d62ef428e3aa 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -19,7 +19,14 @@ struct br_ip {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ip6;
#endif
- } u;
+ } src;
+ union {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+ unsigned char mac_addr[ETH_ALEN];
+ } dst;
__be16 proto;
__u16 vid;
};
@@ -47,16 +54,27 @@ struct br_ip_list {
#define BR_BCAST_FLOOD BIT(14)
#define BR_NEIGH_SUPPRESS BIT(15)
#define BR_ISOLATED BIT(16)
+#define BR_MRP_AWARE BIT(17)
+#define BR_MRP_LOST_CONT BIT(18)
+#define BR_MRP_LOST_IN_CONT BIT(19)
+#define BR_TX_FWD_OFFLOAD BIT(20)
+#define BR_PORT_LOCKED BIT(21)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
-extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
+struct net_bridge;
+void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br,
+ unsigned int cmd, struct ifreq *ifr,
+ void __user *uarg));
+int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
+ struct ifreq *ifr, void __user *uarg);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+bool br_multicast_has_router_adjacent(struct net_device *dev, int proto);
bool br_multicast_enabled(const struct net_device *dev);
bool br_multicast_router(const struct net_device *dev);
#else
@@ -75,6 +93,13 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
{
return false;
}
+
+static inline bool br_multicast_has_router_adjacent(struct net_device *dev,
+ int proto)
+{
+ return true;
+}
+
static inline bool br_multicast_enabled(const struct net_device *dev)
{
return false;
@@ -92,6 +117,11 @@ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid);
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto);
int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo);
+int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo);
+bool br_mst_enabled(const struct net_device *dev);
+int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids);
+int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
@@ -118,6 +148,28 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
{
return -EINVAL;
}
+
+static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
+{
+ return -EINVAL;
+}
+
+static inline bool br_mst_enabled(const struct net_device *dev)
+{
+ return false;
+}
+
+static inline int br_mst_get_info(const struct net_device *dev, u16 msti,
+ unsigned long *vids)
+{
+ return -EINVAL;
+}
+static inline int br_mst_get_state(const struct net_device *dev, u16 msti,
+ u8 *state)
+{
+ return -EINVAL;
+}
#endif
#if IS_ENABLED(CONFIG_BRIDGE)
@@ -126,6 +178,8 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev,
__u16 vid);
void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
+u8 br_port_get_stp_state(const struct net_device *dev);
+clock_t br_get_ageing_time(const struct net_device *br_dev);
#else
static inline struct net_device *
br_fdb_find_port(const struct net_device *br_dev,
@@ -144,6 +198,16 @@ br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
{
return false;
}
+
+static inline u8 br_port_get_stp_state(const struct net_device *dev)
+{
+ return BR_STATE_DISABLED;
+}
+
+static inline clock_t br_get_ageing_time(const struct net_device *br_dev)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h
index d984694c384d..07f9b660b741 100644
--- a/include/linux/if_eql.h
+++ b/include/linux/if_eql.h
@@ -21,11 +21,13 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
+#include <net/net_trackers.h>
#include <uapi/linux/if_eql.h>
typedef struct slave {
struct list_head list;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
long priority;
long priority_bps;
long priority_Bps;
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
deleted file mode 100644
index 52224de798aa..000000000000
--- a/include/linux/if_frad.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are
- * created for each DLCI associated with a FRAD. The FRAD driver
- * is not truly a network device, but the lower level device
- * handler. This allows other FRAD manufacturers to use the DLCI
- * code, including its RFC1490 encapsulation alongside the current
- * implementation for the Sangoma cards.
- *
- * Version: @(#)if_ifrad.h 0.15 31 Mar 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan changed structure defs (packed)
- * re-arranged flags
- * added DLCI_RET vars
- */
-#ifndef _FRAD_H_
-#define _FRAD_H_
-
-#include <uapi/linux/if_frad.h>
-
-
-#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE)
-
-/* these are the fields of an RFC 1490 header */
-struct frhdr
-{
- unsigned char control;
-
- /* for IP packets, this can be the NLPID */
- unsigned char pad;
-
- unsigned char NLPID;
- unsigned char OUI[3];
- __be16 PID;
-
-#define IP_NLPID pad
-} __packed;
-
-/* see RFC 1490 for the definition of the following */
-#define FRAD_I_UI 0x03
-
-#define FRAD_P_PADDING 0x00
-#define FRAD_P_Q933 0x08
-#define FRAD_P_SNAP 0x80
-#define FRAD_P_CLNP 0x81
-#define FRAD_P_IP 0xCC
-
-struct dlci_local
-{
- struct net_device *master;
- struct net_device *slave;
- struct dlci_conf config;
- int configured;
- struct list_head list;
-
- /* callback function */
- void (*receive)(struct sk_buff *skb, struct net_device *);
-};
-
-struct frad_local
-{
- /* devices which this FRAD is slaved to */
- struct net_device *master[CONFIG_DLCI_MAX];
- short dlci[CONFIG_DLCI_MAX];
-
- struct frad_conf config;
- int configured; /* has this device been configured */
- int initialized; /* mem_start, port, irq set ? */
-
- /* callback functions */
- int (*activate)(struct net_device *, struct net_device *);
- int (*deactivate)(struct net_device *, struct net_device *);
- int (*assoc)(struct net_device *, struct net_device *);
- int (*deassoc)(struct net_device *, struct net_device *);
- int (*dlci_conf)(struct net_device *, struct net_device *, int get);
-
- /* fields that are used by the Sangoma SDLA cards */
- struct timer_list timer;
- struct net_device *dev;
- int type; /* adapter type */
- int state; /* state of the S502/8 control latch */
- int buffer; /* current buffer for S508 firmware */
-};
-
-#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */
-
-extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *));
-
-#endif
diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h
new file mode 100644
index 000000000000..0404f5bf4f30
--- /dev/null
+++ b/include/linux/if_hsr.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_HSR_H_
+#define _LINUX_IF_HSR_H_
+
+#include <linux/types.h>
+
+struct net_device;
+
+/* used to differentiate various protocols */
+enum hsr_version {
+ HSR_V0 = 0,
+ HSR_V1,
+ PRP_V1,
+};
+
+/* HSR Tag.
+ * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
+ * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
+ * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
+ * encapsulated protocol } instead.
+ *
+ * Field names as defined in the IEC:2010 standard for HSR.
+ */
+struct hsr_tag {
+ __be16 path_and_LSDU_size;
+ __be16 sequence_nr;
+ __be16 encap_proto;
+} __packed;
+
+#define HSR_HLEN 6
+
+#if IS_ENABLED(CONFIG_HSR)
+extern bool is_hsr_master(struct net_device *dev);
+extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
+#else
+static inline bool is_hsr_master(struct net_device *dev)
+{
+ return false;
+}
+static inline int hsr_get_version(struct net_device *dev,
+ enum hsr_version *ver)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_HSR */
+
+#endif /*_LINUX_IF_HSR_H_*/
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index a367ead4bf4b..523025106a64 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -21,6 +21,7 @@ struct macvlan_dev {
struct hlist_node hlist;
struct macvlan_port *port;
struct net_device *lowerdev;
+ netdevice_tracker dev_tracker;
void *accel_priv;
struct vlan_pcpu_stats __percpu *pcpu_stats;
@@ -30,6 +31,7 @@ struct macvlan_dev {
enum macvlan_mode mode;
u16 flags;
unsigned int macaddr_count;
+ u32 bc_queue_len_req;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
@@ -42,13 +44,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
if (likely(success)) {
struct vlan_pcpu_stats *pcpu_stats;
- pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += len;
+ u64_stats_inc(&pcpu_stats->rx_packets);
+ u64_stats_add(&pcpu_stats->rx_bytes, len);
if (multicast)
- pcpu_stats->rx_multicast++;
+ u64_stats_inc(&pcpu_stats->rx_multicast);
u64_stats_update_end(&pcpu_stats->syncp);
+ put_cpu_ptr(vlan->pcpu_stats);
} else {
this_cpu_inc(vlan->pcpu_stats->rx_errors);
}
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 96d40942e5a3..c87efd333faa 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -4,8 +4,6 @@
*
* This file supplies definitions required by the PPP over L2TP driver
* (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c
- *
- * License:
*/
#ifndef __LINUX_IF_PPPOL2TP_H
#define __LINUX_IF_PPPOL2TP_H
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 69e813bcb947..ff3beda1312c 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -5,8 +5,6 @@
*
* This file supplies definitions required by the PPP over Ethernet driver
* (pppox.c). All version information wrt this file is located in pppox.c
- *
- * License:
*/
#ifndef __LINUX_IF_PPPOX_H
#define __LINUX_IF_PPPOX_H
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
index 9661416a9bb4..839d1e48b85e 100644
--- a/include/linux/if_rmnet.h
+++ b/include/linux/if_rmnet.h
@@ -1,55 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0-only
- * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_IF_RMNET_H_
#define _LINUX_IF_RMNET_H_
+#include <linux/types.h>
+
struct rmnet_map_header {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- u8 pad_len:6;
- u8 reserved_bit:1;
- u8 cd_bit:1;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- u8 cd_bit:1;
- u8 reserved_bit:1;
- u8 pad_len:6;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 mux_id;
- __be16 pkt_len;
+ u8 flags; /* MAP_CMD_FLAG, MAP_PAD_LEN_MASK */
+ u8 mux_id;
+ __be16 pkt_len; /* Length of packet, including pad */
} __aligned(1);
+/* rmnet_map_header flags field:
+ * PAD_LEN: number of pad bytes following packet data
+ * CMD: 1 = packet contains a MAP command; 0 = packet contains data
+ * NEXT_HEADER: 1 = packet contains V5 CSUM header 0 = no V5 CSUM header
+ */
+#define MAP_PAD_LEN_MASK GENMASK(5, 0)
+#define MAP_NEXT_HEADER_FLAG BIT(6)
+#define MAP_CMD_FLAG BIT(7)
+
struct rmnet_map_dl_csum_trailer {
- u8 reserved1;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- u8 valid:1;
- u8 reserved2:7;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- u8 reserved2:7;
- u8 valid:1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u16 csum_start_offset;
- u16 csum_length;
- __be16 csum_value;
+ u8 reserved1;
+ u8 flags; /* MAP_CSUM_DL_VALID_FLAG */
+ __be16 csum_start_offset;
+ __be16 csum_length;
+ __sum16 csum_value;
} __aligned(1);
+/* rmnet_map_dl_csum_trailer flags field:
+ * VALID: 1 = checksum and length valid; 0 = ignore them
+ */
+#define MAP_CSUM_DL_VALID_FLAG BIT(0)
+
struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- u16 csum_insert_offset:14;
- u16 udp_ind:1;
- u16 csum_enabled:1;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- u16 csum_enabled:1;
- u16 udp_ind:1;
- u16 csum_insert_offset:14;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
+ __be16 csum_info; /* MAP_CSUM_UL_* */
} __aligned(1);
+/* csum_info field:
+ * OFFSET: where (offset in bytes) to insert computed checksum
+ * UDP: 1 = UDP checksum (zero checkum means no checksum)
+ * ENABLED: 1 = checksum computation requested
+ */
+#define MAP_CSUM_UL_OFFSET_MASK GENMASK(13, 0)
+#define MAP_CSUM_UL_UDP_FLAG BIT(14)
+#define MAP_CSUM_UL_ENABLED_FLAG BIT(15)
+
+/* MAP CSUM headers */
+struct rmnet_map_v5_csum_header {
+ u8 header_info;
+ u8 csum_info;
+ __be16 reserved;
+} __aligned(1);
+
+/* v5 header_info field
+ * NEXT_HEADER: represents whether there is any next header
+ * HEADER_TYPE: represents the type of this header
+ *
+ * csum_info field
+ * CSUM_VALID_OR_REQ:
+ * 1 = for UL, checksum computation is requested.
+ * 1 = for DL, validated the checksum and has found it valid
+ */
+
+#define MAPV5_HDRINFO_NXT_HDR_FLAG BIT(0)
+#define MAPV5_HDRINFO_HDR_TYPE_FMASK GENMASK(7, 1)
+#define MAPV5_CSUMINFO_VALID_FLAG BIT(7)
+
+#define RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD 2
#endif /* !(_LINUX_IF_RMNET_H_) */
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 915a187cfabd..553552fa635c 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -2,14 +2,18 @@
#ifndef _LINUX_IF_TAP_H_
#define _LINUX_IF_TAP_H_
+#include <net/sock.h>
+#include <linux/skb_array.h>
+
+struct file;
+struct socket;
+
#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
struct ptr_ring *tap_get_ptr_ring(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
-struct file;
-struct socket;
static inline struct socket *tap_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
@@ -20,9 +24,6 @@ static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
}
#endif /* CONFIG_TAP */
-#include <net/sock.h>
-#include <linux/skb_array.h>
-
/*
* Maximum times a tap device can be opened. This can be used to
* configure the number of receive queue, e.g. for multiqueue virtio.
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index ec7e4bd07f82..fc985e5c739d 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -12,11 +12,11 @@
#include <uapi/linux/if_team.h>
struct team_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
@@ -67,7 +67,7 @@ struct team_port {
u16 queue_id;
struct list_head qom_list; /* node in queue override mapping list */
struct rcu_head rcu;
- long mode_priv[0];
+ long mode_priv[];
};
static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
@@ -102,10 +102,7 @@ static inline bool team_port_dev_txable(const struct net_device *port_dev)
static inline void team_netpoll_send_skb(struct team_port *port,
struct sk_buff *skb)
{
- struct netpoll *np = port->np;
-
- if (np)
- netpoll_send_skb(np, skb);
+ netpoll_send_skb(port->np, skb);
}
#else
static inline void team_netpoll_send_skb(struct team_port *port,
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 5bda8cf457b6..2a7660843444 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -27,9 +27,18 @@ struct tun_xdp_hdr {
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
-bool tun_is_xdp_frame(void *ptr);
-void *tun_xdp_to_ptr(void *ptr);
-void *tun_ptr_to_xdp(void *ptr);
+static inline bool tun_is_xdp_frame(void *ptr)
+{
+ return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
+{
+ return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
+}
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
@@ -48,11 +57,11 @@ static inline bool tun_is_xdp_frame(void *ptr)
{
return false;
}
-static inline void *tun_xdp_to_ptr(void *ptr)
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
{
return NULL;
}
-static inline void *tun_ptr_to_xdp(void *ptr)
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
{
return NULL;
}
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index b05e855f1ddd..e00c4ee81ff7 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -25,6 +25,8 @@
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
+#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */
+
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
@@ -44,8 +46,10 @@ struct vlan_hdr {
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
+ struct_group(addrs,
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ );
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
@@ -114,11 +118,11 @@ static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
* @tx_dropped: number of tx drops
*/
struct vlan_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errors;
u32 tx_dropped;
@@ -160,6 +164,7 @@ struct netpoll;
* @vlan_id: VLAN identifier
* @flags: device flags
* @real_dev: underlying netdevice
+ * @dev_tracker: refcount tracker for @real_dev reference
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
@@ -175,6 +180,8 @@ struct vlan_dev_priv {
u16 flags;
struct net_device *real_dev;
+ netdevice_tracker dev_tracker;
+
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
@@ -577,10 +584,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
int *depth)
{
- unsigned int vlan_depth = skb->mac_len;
+ unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
/* if type is 802.1Q/AD then the header should already be
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
@@ -595,13 +602,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
vlan_depth = ETH_HLEN;
}
do {
- struct vlan_hdr *vh;
+ struct vlan_hdr vhdr, *vh;
- if (unlikely(!pskb_may_pull(skb,
- vlan_depth + VLAN_HLEN)))
+ vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+ if (unlikely(!vh || !--parse_depth))
return 0;
- vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
} while (eth_type_vlan(type));
@@ -620,11 +626,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
{
return __vlan_get_protocol(skb, skb->protocol, NULL);
}
+/* A getter for the SKB protocol field which will handle VLAN tags consistently
+ * whether VLAN acceleration is enabled or not.
+ */
+static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
+{
+ if (!skip_vlan)
+ /* VLAN acceleration strips the VLAN header from the skb and
+ * moves it to skb->vlan_proto
+ */
+ return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
+
+ return vlan_get_protocol(skb);
+}
+
static inline void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 463047d0190b..78890143f079 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -38,12 +38,9 @@ struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- __be32 sl_addr[0];
+ __be32 sl_addr[];
};
-#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \
- (count) * sizeof(__be32))
-
#define IP_SFBLOCK 10 /* allocate this many at once */
/* ip_mc_socklist is real list now. Speed is not argument;
@@ -121,9 +118,9 @@ extern int ip_mc_source(int add, int omode, struct sock *sk,
struct ip_mreq_source *mreqs, int ifindex);
extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
- struct ip_msfilter __user *optval, int __user *optlen);
+ sockptr_t optval, sockptr_t optlen);
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
- struct group_filter __user *optval, int __user *optlen);
+ sockptr_t optval, size_t offset);
extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt,
int dif, int sdif);
extern void ip_mc_init_dev(struct in_device *);
diff --git a/include/linux/ihex.h b/include/linux/ihex.h
index 98cb5ce0b0a0..b824877e6d1b 100644
--- a/include/linux/ihex.h
+++ b/include/linux/ihex.h
@@ -18,7 +18,7 @@
struct ihex_binrec {
__be32 addr;
__be16 len;
- uint8_t data[0];
+ uint8_t data[];
} __attribute__((packed));
static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p)
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 5a127c0ed200..7852f6c9a714 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -26,31 +26,40 @@ struct ad_sd_calib_data {
};
struct ad_sigma_delta;
+struct device;
struct iio_dev;
/**
* struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
* @set_channel: Will be called to select the current channel, may be NULL.
+ * @append_status: Will be called to enable status append at the end of the sample, may be NULL.
* @set_mode: Will be called to select the current mode, may be NULL.
+ * @disable_all: Will be called to disable all channels, may be NULL.
* @postprocess_sample: Is called for each sampled data word, can be used to
* modify or drop the sample data, it, may be NULL.
* @has_registers: true if the device has writable and readable registers, false
* if there is just one read-only sample data shift register.
* @addr_shift: Shift of the register address in the communications register.
* @read_mask: Mask for the communications register having the read bit set.
+ * @status_ch_mask: Mask for the channel number stored in status register.
* @data_reg: Address of the data register, if 0 the default address of 0x3 will
* be used.
* @irq_flags: flags for the interrupt used by the triggered buffer
+ * @num_slots: Number of sequencer slots
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
+ int (*append_status)(struct ad_sigma_delta *, bool append);
int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
+ int (*disable_all)(struct ad_sigma_delta *);
int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
bool has_registers;
unsigned int addr_shift;
unsigned int read_mask;
+ unsigned int status_ch_mask;
unsigned int data_reg;
unsigned long irq_flags;
+ unsigned int num_slots;
};
/**
@@ -75,12 +84,23 @@ struct ad_sigma_delta {
uint8_t comm;
const struct ad_sigma_delta_info *info;
+ unsigned int active_slots;
+ unsigned int current_slot;
+ unsigned int num_slots;
+ bool status_appended;
+ /* map slots to channels in order to know what to expect from devices */
+ unsigned int *slots;
+ uint8_t *samples_buf;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * 'tx_buf' is up to 32 bits.
+ * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
+ * rounded to 16 bytes to take into account padding.
*/
- uint8_t data[4] ____cacheline_aligned;
+ uint8_t tx_buf[4] ____cacheline_aligned;
+ uint8_t rx_buf[16] __aligned(8);
};
static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
@@ -92,6 +112,29 @@ static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
return 0;
}
+static inline int ad_sigma_delta_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ int ret;
+
+ if (sd->info->append_status) {
+ ret = sd->info->append_status(sd, append);
+ if (ret < 0)
+ return ret;
+
+ sd->status_appended = append;
+ }
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd)
+{
+ if (sd->info->disable_all)
+ return sd->info->disable_all(sd);
+
+ return 0;
+}
+
static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
unsigned int mode)
{
@@ -128,67 +171,8 @@ int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
struct spi_device *spi, const struct ad_sigma_delta_info *info);
-int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev);
-void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
+int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
-#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, _extend_name, _type, _mask_all) \
- { \
- .type = (_type), \
- .differential = (_channel2 == -1 ? 0 : 1), \
- .indexed = 1, \
- .channel = (_channel1), \
- .channel2 = (_channel2), \
- .address = (_address), \
- .extend_name = (_extend_name), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_OFFSET), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = _mask_all, \
- .scan_index = (_si), \
- .scan_type = { \
- .sign = 'u', \
- .realbits = (_bits), \
- .storagebits = (_storagebits), \
- .shift = (_shift), \
- .endianness = IIO_BE, \
- }, \
- }
-
-#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
- _storagebits, _shift, "shorted", IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE, 0)
-
-#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_TEMP, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
- _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, "supply", IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
#endif
diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
new file mode 100644
index 000000000000..52620e5b8052
--- /dev/null
+++ b/include/linux/iio/adc/adi-axi-adc.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices Generic AXI ADC IP core driver/library
+ * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+#ifndef __ADI_AXI_ADC_H__
+#define __ADI_AXI_ADC_H__
+
+struct device;
+struct iio_chan_spec;
+
+/**
+ * struct adi_axi_adc_chip_info - Chip specific information
+ * @name Chip name
+ * @id Chip ID (usually product ID)
+ * @channels Channel specifications of type @struct iio_chan_spec
+ * @num_channels Number of @channels
+ * @scale_table Supported scales by the chip; tuples of 2 ints
+ * @num_scales Number of scales in the table
+ * @max_rate Maximum sampling rate supported by the device
+ */
+struct adi_axi_adc_chip_info {
+ const char *name;
+ unsigned int id;
+
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+
+ const unsigned int (*scale_table)[2];
+ int num_scales;
+
+ unsigned long max_rate;
+};
+
+/**
+ * struct adi_axi_adc_conv - data of the ADC attached to the AXI ADC
+ * @chip_info chip info details for the client ADC
+ * @preenable_setup op to run in the client before enabling the AXI ADC
+ * @reg_access IIO debugfs_reg_access hook for the client ADC
+ * @read_raw IIO read_raw hook for the client ADC
+ * @write_raw IIO write_raw hook for the client ADC
+ */
+struct adi_axi_adc_conv {
+ const struct adi_axi_adc_chip_info *chip_info;
+
+ int (*preenable_setup)(struct adi_axi_adc_conv *conv);
+ int (*reg_access)(struct adi_axi_adc_conv *conv, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+ int (*read_raw)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+ int (*write_raw)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+};
+
+struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv);
+
+void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv);
+
+#endif
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h
index e074902a24cc..aa21b032e861 100644
--- a/drivers/iio/adc/qcom-vadc-common.h
+++ b/include/linux/iio/adc/qcom-vadc-common.h
@@ -6,6 +6,9 @@
#ifndef QCOM_VADC_COMMON_H
#define QCOM_VADC_COMMON_H
+#include <linux/math.h>
+#include <linux/types.h>
+
#define VADC_CONV_TIME_MIN_US 2000
#define VADC_CONV_TIME_MAX_US 2100
@@ -49,16 +52,8 @@
#define ADC5_FULL_SCALE_CODE 0x70e4
#define ADC5_USR_DATA_CHECK 0x8000
-/**
- * struct vadc_map_pt - Map the graph representation for ADC channel
- * @x: Represent the ADC digitized code.
- * @y: Represent the physical data which can be temperature, voltage,
- * resistance.
- */
-struct vadc_map_pt {
- s32 x;
- s32 y;
-};
+#define R_PU_100K 100000
+#define RATIO_MAX_ADC7 BIT(14)
/*
* VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels.
@@ -86,16 +81,6 @@ struct vadc_linear_graph {
};
/**
- * struct vadc_prescale_ratio - Represent scaling ratio for ADC input.
- * @num: the inverse numerator of the gain applied to the input channel.
- * @den: the inverse denominator of the gain applied to the input channel.
- */
-struct vadc_prescale_ratio {
- u32 num;
- u32 den;
-};
-
-/**
* enum vadc_scale_fn_type - Scaling function to convert ADC code to
* physical scaled units for the channel.
* SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
@@ -110,8 +95,12 @@ struct vadc_prescale_ratio {
* lookup table. The hardware applies offset/slope to adc code.
* SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
* 100k pullup. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
+ * lookup table for PMIC7. The hardware applies offset/slope to adc code.
* SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
* The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * The hardware applies offset/slope to adc code. This is for PMIC7.
* SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
* charger temperature.
* SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
@@ -126,7 +115,9 @@ enum vadc_scale_fn_type {
SCALE_HW_CALIB_DEFAULT,
SCALE_HW_CALIB_THERM_100K_PULLUP,
SCALE_HW_CALIB_XOTHERM,
+ SCALE_HW_CALIB_THERM_100K_PU_PM7,
SCALE_HW_CALIB_PMIC_THERM,
+ SCALE_HW_CALIB_PMIC_THERM_PM7,
SCALE_HW_CALIB_PM5_CHG_TEMP,
SCALE_HW_CALIB_PM5_SMB_TEMP,
SCALE_HW_CALIB_INVALID,
@@ -136,6 +127,7 @@ struct adc5_data {
const u32 full_scale_code_volt;
const u32 full_scale_code_cur;
const struct adc5_channels *adc_chans;
+ const struct iio_info *info;
unsigned int *decimation;
unsigned int *hw_settle_1;
unsigned int *hw_settle_2;
@@ -143,20 +135,33 @@ struct adc5_data {
int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
const struct vadc_linear_graph *calib_graph,
- const struct vadc_prescale_ratio *prescale,
+ const struct u32_fract *prescale,
bool absolute,
u16 adc_code, int *result_mdec);
struct qcom_adc5_scale_type {
- int (*scale_fn)(const struct vadc_prescale_ratio *prescale,
+ int (*scale_fn)(const struct u32_fract *prescale,
const struct adc5_data *data, u16 adc_code, int *result);
};
int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
- const struct vadc_prescale_ratio *prescale,
+ unsigned int prescale_ratio,
const struct adc5_data *data,
u16 adc_code, int *result_mdec);
+u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio,
+ u32 full_scale_code_volt, int temp);
+
+u16 qcom_adc_tm5_gen2_temp_res_scale(int temp);
+
+int qcom_adc5_prescaling_from_dt(u32 num, u32 den);
+
+int qcom_adc5_hw_settle_time_from_dt(u32 value, const unsigned int *hw_settle);
+
+int qcom_adc5_avg_samples_from_dt(u32 value);
+
+int qcom_adc5_decimation_from_dt(u32 value, const unsigned int *decimation);
+
int qcom_vadc_decimation_from_dt(u32 value);
#endif /* QCOM_VADC_COMMON_H */
diff --git a/include/linux/iio/afe/rescale.h b/include/linux/iio/afe/rescale.h
new file mode 100644
index 000000000000..6eecb435488f
--- /dev/null
+++ b/include/linux/iio/afe/rescale.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 Axentia Technologies AB
+ */
+
+#ifndef __IIO_RESCALE_H__
+#define __IIO_RESCALE_H__
+
+#include <linux/types.h>
+#include <linux/iio/iio.h>
+
+struct device;
+struct rescale;
+
+struct rescale_cfg {
+ enum iio_chan_type type;
+ int (*props)(struct device *dev, struct rescale *rescale);
+};
+
+struct rescale {
+ const struct rescale_cfg *cfg;
+ struct iio_channel *source;
+ struct iio_chan_spec chan;
+ struct iio_chan_spec_ext_info *ext_info;
+ bool chan_processed;
+ s32 numerator;
+ s32 denominator;
+ s32 offset;
+};
+
+int rescale_process_scale(struct rescale *rescale, int scale_type,
+ int *val, int *val2);
+int rescale_process_offset(struct rescale *rescale, int scale_type,
+ int scale, int scale2, int schan_off,
+ int *val, int *val2);
+#endif /* __IIO_RESCALE_H__ */
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 016d8a068353..6564bdcdac66 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -11,17 +11,12 @@
#include <linux/kref.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
-struct iio_buffer_block {
- u32 size;
- u32 bytes_used;
-};
-
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
* @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index b3a57444a886..5c355be89814 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -7,11 +7,11 @@
#ifndef __IIO_DMAENGINE_H__
#define __IIO_DMAENGINE_H__
-struct iio_buffer;
+struct iio_dev;
struct device;
-struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel);
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+int devm_iio_dmaengine_buffer_setup(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel);
#endif
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index fbba4093f06c..418b1307d3f2 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -11,11 +11,15 @@
struct iio_buffer;
-void iio_buffer_set_attrs(struct iio_buffer *buffer,
- const struct attribute **attrs);
+enum iio_buffer_direction {
+ IIO_BUFFER_DIRECTION_IN,
+ IIO_BUFFER_DIRECTION_OUT,
+};
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
+int iio_pop_from_buffer(struct iio_buffer *buffer, void *data);
+
/**
* iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
* @indio_dev: iio_dev structure for device.
@@ -41,10 +45,14 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
return iio_push_to_buffers(indio_dev, data);
}
+int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
+ const void *data, size_t data_sz,
+ int64_t timestamp);
+
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
-void iio_device_attach_buffer(struct iio_dev *indio_dev,
- struct iio_buffer *buffer);
+int iio_device_attach_buffer(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer);
#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index a4d2d8061ef6..e2ca8ea23e19 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -6,6 +6,9 @@
#ifdef CONFIG_IIO_BUFFER
+#include <uapi/linux/iio/buffer.h>
+#include <linux/iio/buffer.h>
+
struct iio_dev;
struct iio_buffer;
@@ -21,6 +24,10 @@ struct iio_buffer;
* @read: try to get a specified number of bytes (must exist)
* @data_available: indicates how much data is available for reading from
* the buffer.
+ * @remove_from: remove scan from buffer. Drivers should calls this to
+ * remove a scan from a buffer.
+ * @write: try to write a number of bytes
+ * @space_available: returns the amount of bytes available in a buffer
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @set_bytes_per_datum:set number of bytes per datum
@@ -47,6 +54,9 @@ struct iio_buffer_access_funcs {
int (*store_to)(struct iio_buffer *buffer, const void *data);
int (*read)(struct iio_buffer *buffer, size_t n, char __user *buf);
size_t (*data_available)(struct iio_buffer *buffer);
+ int (*remove_from)(struct iio_buffer *buffer, void *data);
+ int (*write)(struct iio_buffer *buffer, size_t n, const char __user *buf);
+ size_t (*space_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
@@ -72,9 +82,15 @@ struct iio_buffer {
/** @length: Number of datums in buffer. */
unsigned int length;
+ /** @flags: File ops flags including busy flag. */
+ unsigned long flags;
+
/** @bytes_per_datum: Size of individual datum including timestamp. */
size_t bytes_per_datum;
+ /* @direction: Direction of the data stream (in/out). */
+ enum iio_buffer_direction direction;
+
/**
* @access: Buffer access functions associated with the
* implementation.
@@ -94,29 +110,17 @@ struct iio_buffer {
unsigned int watermark;
/* private: */
- /*
- * @scan_el_attrs: Control of scan elements if that scan mode
- * control method is used.
- */
- struct attribute_group *scan_el_attrs;
-
/* @scan_timestamp: Does the scan mode include a timestamp. */
bool scan_timestamp;
- /* @scan_el_dev_attr_list: List of scan element related attributes. */
- struct list_head scan_el_dev_attr_list;
-
- /* @buffer_group: Attributes of the buffer group. */
- struct attribute_group buffer_group;
+ /* @buffer_attr_list: List of buffer attributes. */
+ struct list_head buffer_attr_list;
/*
- * @scan_el_group: Attribute group for those attributes not
- * created from the iio_chan_info array.
+ * @buffer_group: Attributes of the new buffer group.
+ * Includes scan elements attributes.
*/
- struct attribute_group scan_el_group;
-
- /* @stufftoread: Flag to indicate new data. */
- bool stufftoread;
+ struct attribute_group buffer_group;
/* @attrs: Standard attributes of the buffer. */
const struct attribute **attrs;
@@ -124,6 +128,9 @@ struct iio_buffer {
/* @demux_bounce: Buffer for doing gather from incoming scan. */
void *demux_bounce;
+ /* @attached_entry: Entry in the devices list of buffers attached by the driver. */
+ struct list_head attached_entry;
+
/* @buffer_list: Entry in the devices list of current buffers. */
struct list_head buffer_list;
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
index bb331e6356a9..e72167b96d27 100644
--- a/include/linux/iio/common/cros_ec_sensors_core.h
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -12,6 +12,7 @@
#include <linux/irqreturn.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
enum {
CROS_EC_SENSOR_X,
@@ -29,8 +30,7 @@ enum {
*/
#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2)
-/* Minimum sampling period to use when device is suspending */
-#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */
+typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p);
/**
* struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
@@ -41,7 +41,10 @@ enum {
* @param: motion sensor parameters structure
* @resp: motion sensor response structure
* @type: type of motion sensor
- * @loc: location where the motion sensor is placed
+ * @range_updated: True if the range of the sensor has been
+ * updated.
+ * @curr_range: If updated, the current range value.
+ * It will be reapplied at every resume.
* @calib: calibration parameters. Note that trigger
* captured data will always provide the calibrated
* data
@@ -50,7 +53,9 @@ enum {
* the timestamp. The timestamp is always last and
* is always 8-byte aligned.
* @read_ec_sensors_data: function used for accessing sensors values
- * @cuur_sampl_freq: current sampling period
+ * @fifo_max_event_count: Size of the EC sensor FIFO
+ * @frequencies: Table of known available frequencies:
+ * 0, Min and Max in mHz
*/
struct cros_ec_sensors_core_state {
struct cros_ec_device *ec;
@@ -61,113 +66,51 @@ struct cros_ec_sensors_core_state {
struct ec_response_motion_sense *resp;
enum motionsensor_type type;
- enum motionsensor_location loc;
+
+ bool range_updated;
+ int curr_range;
struct calib_data {
s16 offset;
u16 scale;
} calib[CROS_EC_SENSOR_MAX_AXIS];
s8 sign[CROS_EC_SENSOR_MAX_AXIS];
- u8 samples[CROS_EC_SAMPLE_SIZE];
+ u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8);
int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data);
- int curr_sampl_freq;
-
- /* Table of known available frequencies : 0, Min and Max in mHz */
- int frequencies[3];
+ u32 fifo_max_event_count;
+ int frequencies[6];
};
-/**
- * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory
- * @indio_dev: pointer to IIO device
- * @scan_mask: bitmap of the sensor indices to scan
- * @data: location to store data
- *
- * This is the safe function for reading the EC data. It guarantees that the
- * data sampled was not modified by the EC while being read.
- *
- * Return: 0 on success, -errno on failure.
- */
int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
s16 *data);
-/**
- * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
- * @indio_dev: pointer to IIO device
- * @scan_mask: bitmap of the sensor indices to scan
- * @data: location to store data
- *
- * Return: 0 on success, -errno on failure.
- */
int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
s16 *data);
struct platform_device;
-/**
- * cros_ec_sensors_core_init() - basic initialization of the core structure
- * @pdev: platform device created for the sensors
- * @indio_dev: iio device structure of the device
- * @physical_device: true if the device refers to a physical device
- *
- * Return: 0 on success, -errno on failure.
- */
int cros_ec_sensors_core_init(struct platform_device *pdev,
- struct iio_dev *indio_dev, bool physical_device);
+ struct iio_dev *indio_dev, bool physical_device,
+ cros_ec_sensors_capture_t trigger_capture);
+
+int cros_ec_sensors_core_register(struct device *dev,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t push_data);
-/**
- * cros_ec_sensors_capture() - the trigger handler function
- * @irq: the interrupt number.
- * @p: a pointer to the poll function.
- *
- * On a trigger event occurring, if the pollfunc is attached then this
- * handler is called as a threaded interrupt (and hence may sleep). It
- * is responsible for grabbing data from the device and pushing it into
- * the associated buffer.
- *
- * Return: IRQ_HANDLED
- */
irqreturn_t cros_ec_sensors_capture(int irq, void *p);
+int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
+ s16 *data,
+ s64 timestamp);
-/**
- * cros_ec_motion_send_host_cmd() - send motion sense host command
- * @st: pointer to state information for device
- * @opt_length: optional length to reduce the response size, useful on the data
- * path. Otherwise, the maximal allowed response size is used
- *
- * When called, the sub-command is assumed to be set in param->cmd.
- *
- * Return: 0 on success, -errno on failure.
- */
int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
u16 opt_length);
-/**
- * cros_ec_sensors_core_read() - function to request a value from the sensor
- * @st: pointer to state information for device
- * @chan: channel specification structure table
- * @val: will contain one element making up the returned value
- * @val2: will contain another element making up the returned value
- * @mask: specifies which values to be requested
- *
- * Return: the type of value returned by the device
- */
int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask);
-/**
- * cros_ec_sensors_core_read_avail() - get available values
- * @indio_dev: pointer to state information for device
- * @chan: channel specification structure table
- * @vals: list of available values
- * @type: type of data returned
- * @length: number of data returned in the array
- * @mask: specifies which values to be requested
- *
- * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST
- */
int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals,
@@ -175,23 +118,13 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
int *length,
long mask);
-/**
- * cros_ec_sensors_core_write() - function to write a value to the sensor
- * @st: pointer to state information for device
- * @chan: channel specification structure table
- * @val: first part of value to write
- * @val2: second part of value to write
- * @mask: specifies which values to write
- *
- * Return: the type of value returned by the device
- */
int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int val, int val2, long mask);
extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
-/* List of extended channel specification for all sensors */
+/* List of extended channel specification for all sensors. */
extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 33e939977444..db4a1b260348 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -13,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/irqreturn.h>
+#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
@@ -20,6 +21,8 @@
#include <linux/platform_data/st_sensors_pdata.h>
+#define LSM9DS0_IMU_DEV_NAME "lsm9ds0"
+
/*
* Buffer size max case: 2bytes per channel, 3 channels in total +
* 8bytes timestamp channel (s64)
@@ -46,8 +49,8 @@
#define ST_SENSORS_MAX_NAME 17
#define ST_SENSORS_MAX_4WAI 8
-#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
- ch2, s, endian, rbits, sbits, addr) \
+#define ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, ext) \
{ \
.type = device_type, \
.modified = mod, \
@@ -63,8 +66,14 @@
.storagebits = sbits, \
.endianness = endian, \
}, \
+ .ext_info = ext, \
}
+#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr) \
+ ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, NULL)
+
#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \
IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \
st_sensors_sysfs_sampling_frequency_avail)
@@ -211,8 +220,8 @@ struct st_sensor_settings {
/**
* struct st_sensor_data - ST sensor device status
- * @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
+ * @mount_matrix: The mounting matrix of the sensor.
* @sensor_settings: Pointer to the specific sensor settings in use.
* @current_fullscale: Maximum range of measure by the sensor.
* @vdd: Pointer to sensor's Vdd power supply
@@ -228,11 +237,11 @@ struct st_sensor_settings {
* @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
* @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
* @buffer_data: Data used by buffer part.
+ * @odr_lock: Local lock for preventing concurrent ODR accesses/changes
*/
struct st_sensor_data {
- struct device *dev;
struct iio_trigger *trig;
- struct iio_mount_matrix *mount_matrix;
+ struct iio_mount_matrix mount_matrix;
struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
struct regulator *vdd;
@@ -253,6 +262,8 @@ struct st_sensor_data {
s64 hw_timestamp;
char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
+
+ struct mutex odr_lock;
};
#ifdef CONFIG_IIO_BUFFER
@@ -263,7 +274,6 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p);
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops);
-void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
int st_sensors_validate_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
#else
@@ -272,10 +282,6 @@ static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
{
return 0;
}
-static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
-{
- return;
-}
#define st_sensors_validate_device NULL
#endif
@@ -288,8 +294,6 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
int st_sensors_power_enable(struct iio_dev *indio_dev);
-void st_sensors_power_disable(struct iio_dev *indio_dev);
-
int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
@@ -317,4 +321,20 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
void st_sensors_dev_name_probe(struct device *dev, char *name, int len);
+/* Accelerometer */
+const struct st_sensor_settings *st_accel_get_settings(const char *name);
+int st_accel_common_probe(struct iio_dev *indio_dev);
+
+/* Gyroscope */
+const struct st_sensor_settings *st_gyro_get_settings(const char *name);
+int st_gyro_common_probe(struct iio_dev *indio_dev);
+
+/* Magnetometer */
+const struct st_sensor_settings *st_magn_get_settings(const char *name);
+int st_magn_common_probe(struct iio_dev *indio_dev);
+
+/* Pressure */
+const struct st_sensor_settings *st_press_get_settings(const char *name);
+int st_press_common_probe(struct iio_dev *indio_dev);
+
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 2bde8c912d4d..6802596b017c 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -13,6 +13,7 @@
struct iio_dev;
struct iio_chan_spec;
struct device;
+struct fwnode_handle;
/**
* struct iio_channel - everything needed for a consumer to use a channel
@@ -64,15 +65,6 @@ void iio_channel_release(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *consumer_channel);
/**
- * devm_iio_channel_release() - Resource managed version of
- * iio_channel_release().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: The channel to be released.
- */
-void devm_iio_channel_release(struct device *dev, struct iio_channel *chan);
-
-/**
* iio_channel_get_all() - get all channels associated with a client
* @dev: Pointer to consumer device.
*
@@ -107,13 +99,33 @@ void iio_channel_release_all(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get_all(struct device *dev);
/**
- * devm_iio_channel_release_all() - Resource managed version of
- * iio_channel_release_all().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: Array channel to be released.
+ * fwnode_iio_channel_get_by_name() - get description of all that is needed to access channel.
+ * @fwnode: Pointer to consumer Firmware node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
*/
-void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan);
+struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
+ const char *name);
+
+/**
+ * devm_fwnode_iio_channel_get_by_name() - Resource managed version of
+ * fwnode_iio_channel_get_by_name().
+ * @dev: Pointer to consumer device.
+ * @fwnode: Pointer to consumer Firmware node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ *
+ * Returns a pointer to negative errno if it is not able to get the iio channel
+ * otherwise returns valid pointer for iio channel.
+ *
+ * The allocated iio channel is automatically released when the device is
+ * unbound.
+ */
+struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *consumer_channel);
struct iio_cb_buffer;
/**
@@ -224,6 +236,21 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_read_channel_processed_scale() - read and scale a processed value
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ * @scale: Scale factor to apply during the conversion
+ *
+ * Returns an error code or 0.
+ *
+ * This function will read a processed value from a channel. This will work
+ * like @iio_read_channel_processed() but also scale with an additional
+ * scale factor while attempting to minimize any precision loss.
+ */
+int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
+ unsigned int scale);
+
+/**
* iio_write_channel_attribute() - Write values to the device attribute.
* @chan: The channel being queried.
* @val: Value being written.
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
index e9801c8d49c0..1f7e53c506b6 100644
--- a/include/linux/iio/dac/mcp4725.h
+++ b/include/linux/iio/dac/mcp4725.h
@@ -15,7 +15,7 @@
* @vref_buffered: Controls buffering of the external reference voltage.
*
* Vref related settings are available only on MCP4756. See
- * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information.
+ * Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml for more information.
*/
struct mcp4725_platform_data {
bool use_vref;
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
index 36de60a5da7a..7a157ed218f6 100644
--- a/include/linux/iio/driver.h
+++ b/include/linux/iio/driver.h
@@ -8,6 +8,7 @@
#ifndef _IIO_INKERN_H_
#define _IIO_INKERN_H_
+struct device;
struct iio_dev;
struct iio_map;
@@ -26,4 +27,17 @@ int iio_map_array_register(struct iio_dev *indio_dev,
*/
int iio_map_array_unregister(struct iio_dev *indio_dev);
+/**
+ * devm_iio_map_array_register - device-managed version of iio_map_array_register
+ * @dev: Device object to which to bind the unwinding of this registration
+ * @indio_dev: Pointer to the iio_dev structure
+ * @maps: Pointer to an IIO map object which is to be registered to this IIO device
+ *
+ * This function will call iio_map_array_register() to register an IIO map object
+ * and will also hook a callback to the iio_map_array_unregister() function to
+ * handle de-registration of the IIO map object when the device's refcount goes to
+ * zero.
+ */
+int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps);
+
#endif
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
index 44d48bb1d39f..e8255c2e33bc 100644
--- a/include/linux/iio/hw-consumer.h
+++ b/include/linux/iio/hw-consumer.h
@@ -14,7 +14,6 @@ struct iio_hw_consumer;
struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
-void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc);
int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
new file mode 100644
index 000000000000..d1f8b30a7c8b
--- /dev/null
+++ b/include/linux/iio/iio-opaque.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _INDUSTRIAL_IO_OPAQUE_H_
+#define _INDUSTRIAL_IO_OPAQUE_H_
+
+/**
+ * struct iio_dev_opaque - industrial I/O device opaque information
+ * @indio_dev: public industrial I/O device information
+ * @id: used to identify device internally
+ * @currentmode: operating mode currently in use, may be eventually
+ * checked by device drivers but should be considered
+ * read-only as this is a core internal bit
+ * @driver_module: used to make it harder to undercut users
+ * @mlock_key: lockdep class for iio_dev lock
+ * @info_exist_lock: lock to prevent use during removal
+ * @trig_readonly: mark the current trigger immutable
+ * @event_interface: event chrdevs associated with interrupt lines
+ * @attached_buffers: array of buffers statically attached by the driver
+ * @attached_buffers_cnt: number of buffers in the array of statically attached buffers
+ * @buffer_ioctl_handler: ioctl() handler for this IIO device's buffer interface
+ * @buffer_list: list of all buffers currently attached
+ * @channel_attr_list: keep track of automatically created channel
+ * attributes
+ * @chan_attr_group: group for all attrs in base directory
+ * @ioctl_handlers: ioctl handlers registered with the core handler
+ * @groups: attribute groups
+ * @groupcounter: index of next attribute group
+ * @legacy_scan_el_group: attribute group for legacy scan elements attribute group
+ * @legacy_buffer_group: attribute group for legacy buffer attributes group
+ * @bounce_buffer: for devices that call iio_push_to_buffers_with_timestamp_unaligned()
+ * @bounce_buffer_size: size of currently allocate bounce buffer
+ * @scan_index_timestamp: cache of the index to the timestamp
+ * @clock_id: timestamping clock posix identifier
+ * @chrdev: associated character device
+ * @flags: file ops related flags including busy flag.
+ * @debugfs_dentry: device specific debugfs dentry
+ * @cached_reg_addr: cached register address for debugfs reads
+ * @read_buf: read buffer to be used for the initial reg read
+ * @read_buf_len: data length in @read_buf
+ */
+struct iio_dev_opaque {
+ struct iio_dev indio_dev;
+ int currentmode;
+ int id;
+ struct module *driver_module;
+ struct lock_class_key mlock_key;
+ struct mutex info_exist_lock;
+ bool trig_readonly;
+ struct iio_event_interface *event_interface;
+ struct iio_buffer **attached_buffers;
+ unsigned int attached_buffers_cnt;
+ struct iio_ioctl_handler *buffer_ioctl_handler;
+ struct list_head buffer_list;
+ struct list_head channel_attr_list;
+ struct attribute_group chan_attr_group;
+ struct list_head ioctl_handlers;
+ const struct attribute_group **groups;
+ int groupcounter;
+ struct attribute_group legacy_scan_el_group;
+ struct attribute_group legacy_buffer_group;
+ void *bounce_buffer;
+ size_t bounce_buffer_size;
+
+ unsigned int scan_index_timestamp;
+ clockid_t clock_id;
+ struct cdev chrdev;
+ unsigned long flags;
+
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dentry;
+ unsigned cached_reg_addr;
+ char read_buf[20];
+ unsigned int read_buf_len;
+#endif
+};
+
+#define to_iio_dev_opaque(_indio_dev) \
+ container_of((_indio_dev), struct iio_dev_opaque, indio_dev)
+
+#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 862ce0019eba..f0ec8a5e5a7a 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -9,14 +9,16 @@
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/slab.h>
#include <linux/iio/types.h>
-#include <linux/of.h>
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
* Currently assumes nano seconds.
*/
+struct fwnode_reference_args;
+
enum iio_shared_by {
IIO_SEPARATE,
IIO_SHARED_BY_TYPE,
@@ -103,15 +105,16 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
/**
* IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
* @_name: Attribute name ("_available" will be appended to the name)
+ * @_shared: Whether the attribute is shared between all channels
* @_e: Pointer to an iio_enum struct
*
* Creates a read only attribute which lists all the available enum items in a
* space separated list. This should usually be used together with IIO_ENUM()
*/
-#define IIO_ENUM_AVAILABLE(_name, _e) \
+#define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
{ \
.name = (_name "_available"), \
- .shared = IIO_SHARED_BY_TYPE, \
+ .shared = _shared, \
.read = iio_enum_available_read, \
.private = (uintptr_t)(_e), \
}
@@ -127,8 +130,7 @@ struct iio_mount_matrix {
ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
const struct iio_chan_spec *chan, char *buf);
-int iio_read_mount_matrix(struct device *dev, const char *propname,
- struct iio_mount_matrix *matrix);
+int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
typedef const struct iio_mount_matrix *
(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
@@ -313,9 +315,55 @@ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
}
s64 iio_get_time_ns(const struct iio_dev *indio_dev);
-unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
-/* Device operating modes */
+/*
+ * Device operating modes
+ * @INDIO_DIRECT_MODE: There is an access to either:
+ * a) The last single value available for devices that do not provide
+ * on-demand reads.
+ * b) A new value after performing an on-demand read otherwise.
+ * On most devices, this is a single-shot read. On some devices with data
+ * streams without an 'on-demand' function, this might also be the 'last value'
+ * feature. Above all, this mode internally means that we are not in any of the
+ * other modes, and sysfs reads should work.
+ * Device drivers should inform the core if they support this mode.
+ * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
+ * It indicates that an explicit trigger is required. This requests the core to
+ * attach a poll function when enabling the buffer, which is indicated by the
+ * _TRIGGERED suffix.
+ * The core will ensure this mode is set when registering a triggered buffer
+ * with iio_triggered_buffer_setup().
+ * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
+ * No poll function can be attached because there is no triggered infrastructure
+ * we can use to cause capture. There is a kfifo that the driver will fill, but
+ * not "only one scan at a time". Typically, hardware will have a buffer that
+ * can hold multiple scans. Software may read one or more scans at a single time
+ * and push the available data to a Kfifo. This means the core will not attach
+ * any poll function when enabling the buffer.
+ * The core will ensure this mode is set when registering a simple kfifo buffer
+ * with devm_iio_kfifo_buffer_setup().
+ * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
+ * Same as above but this time the buffer is not a kfifo where we have direct
+ * access to the data. Instead, the consumer driver must access the data through
+ * non software visible channels (or DMA when there is no demux possible in
+ * software)
+ * The core will ensure this mode is set when registering a dmaengine buffer
+ * with devm_iio_dmaengine_buffer_setup().
+ * @INDIO_EVENT_TRIGGERED: Very unusual mode.
+ * Triggers usually refer to an external event which will start data capture.
+ * Here it is kind of the opposite as, a particular state of the data might
+ * produce an event which can be considered as an event. We don't necessarily
+ * have access to the data itself, but to the event produced. For example, this
+ * can be a threshold detector. The internal path of this mode is very close to
+ * the INDIO_BUFFER_TRIGGERED mode.
+ * The core will ensure this mode is set when registering a triggered event.
+ * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
+ * Here, triggers can result in data capture and can be routed to multiple
+ * hardware components, which make them close to regular triggers in the way
+ * they must be managed by the core, but without the entire interrupts/poll
+ * functions burden. Interrupts are irrelevant as the data flow is hardware
+ * mediated and distributed.
+ */
#define INDIO_DIRECT_MODE 0x01
#define INDIO_BUFFER_TRIGGERED 0x02
#define INDIO_BUFFER_SOFTWARE 0x04
@@ -362,6 +410,8 @@ struct iio_trigger; /* forward declaration */
* and max. For lists, all possible values are enumerated.
* @write_raw: function to write a value to the device.
* Parameters are the same as for read_raw.
+ * @read_label: function to request label name for a specified label,
+ * for better channel identification.
* @write_raw_get_fmt: callback function to query the expected
* format/precision. If not set by the driver, write_raw
* returns IIO_VAL_INT_PLUS_MICRO.
@@ -379,6 +429,8 @@ struct iio_trigger; /* forward declaration */
* provide a custom of_xlate function that reads the
* *args* and returns the appropriate index in registered
* IIO channels array.
+ * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
+ * Functionally the same as @of_xlate.
* @hwfifo_set_watermark: function pointer to set the current hardware
* fifo watermark level; see hwfifo_* entries in
* Documentation/ABI/testing/sysfs-bus-iio for details on
@@ -420,6 +472,10 @@ struct iio_info {
int val2,
long mask);
+ int (*read_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label);
+
int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask);
@@ -454,8 +510,8 @@ struct iio_info {
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
- int (*of_xlate)(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec);
+ int (*fwnode_xlate)(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec);
int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
unsigned count);
@@ -482,59 +538,41 @@ struct iio_buffer_setup_ops {
/**
* struct iio_dev - industrial I/O device
- * @id: [INTERN] used to identify device internally
- * @driver_module: [INTERN] used to make it harder to undercut users
- * @modes: [DRIVER] operating modes supported by device
- * @currentmode: [DRIVER] current operating mode
+ * @modes: [DRIVER] bitmask listing all the operating modes
+ * supported by the IIO device. This list should be
+ * initialized before registering the IIO device. It can
+ * also be filed up by the IIO core, as a result of
+ * enabling particular features in the driver
+ * (see iio_triggered_event_setup()).
* @dev: [DRIVER] device structure, should be assigned a parent
* and owner
- * @event_interface: [INTERN] event chrdevs associated with interrupt lines
* @buffer: [DRIVER] any buffer present
- * @buffer_list: [INTERN] list of all buffers currently attached
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @mlock: [DRIVER] lock used to prevent simultaneous device state
+ * @mlock: [INTERN] lock used to prevent simultaneous device state
* changes
* @available_scan_masks: [DRIVER] optional array of allowed bitmasks
* @masklength: [INTERN] the length of the mask established from
* channels
* @active_scan_mask: [INTERN] union of all scan masks requested by buffers
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
- * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
- * @trig_readonly: [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
* @num_channels: [DRIVER] number of channels specified in @channels.
- * @channel_attr_list: [INTERN] keep track of automatically created channel
- * attributes
- * @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
* @label: [DRIVER] unique name to identify which device this is
* @info: [DRIVER] callbacks and constant info from driver
- * @clock_id: [INTERN] timestamping clock posix identifier
- * @info_exist_lock: [INTERN] lock to prevent use during removal
* @setup_ops: [DRIVER] callbacks to call before and after buffer
* enable/disable
- * @chrdev: [INTERN] associated character device
- * @groups: [INTERN] attribute groups
- * @groupcounter: [INTERN] index of next attribute group
- * @flags: [INTERN] file ops related flags including busy flag.
- * @debugfs_dentry: [INTERN] device specific debugfs dentry.
- * @cached_reg_addr: [INTERN] cached register address for debugfs reads.
+ * @priv: [DRIVER] reference to driver's private information
+ * **MUST** be accessed **ONLY** via iio_priv() helper
*/
struct iio_dev {
- int id;
- struct module *driver_module;
-
int modes;
- int currentmode;
struct device dev;
- struct iio_event_interface *event_interface;
-
struct iio_buffer *buffer;
- struct list_head buffer_list;
int scan_bytes;
struct mutex mlock;
@@ -542,35 +580,25 @@ struct iio_dev {
unsigned masklength;
const unsigned long *active_scan_mask;
bool scan_timestamp;
- unsigned scan_index_timestamp;
struct iio_trigger *trig;
- bool trig_readonly;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
struct iio_chan_spec const *channels;
int num_channels;
- struct list_head channel_attr_list;
- struct attribute_group chan_attr_group;
const char *name;
const char *label;
const struct iio_info *info;
- clockid_t clock_id;
- struct mutex info_exist_lock;
const struct iio_buffer_setup_ops *setup_ops;
- struct cdev chrdev;
-#define IIO_MAX_GROUPS 6
- const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
- int groupcounter;
- unsigned long flags;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_dentry;
- unsigned cached_reg_addr;
-#endif
+ void *priv;
};
+int iio_device_id(struct iio_dev *indio_dev);
+int iio_device_get_current_mode(struct iio_dev *indio_dev);
+bool iio_buffer_enabled(struct iio_dev *indio_dev);
+
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
/**
@@ -591,17 +619,13 @@ void iio_device_unregister(struct iio_dev *indio_dev);
* calls iio_device_register() internally. Refer to that function for more
* information.
*
- * If an iio_dev registered with this function needs to be unregistered
- * separately, devm_iio_device_unregister() must be used.
- *
* RETURNS:
* 0 on success, negative error number on failure.
*/
#define devm_iio_device_register(dev, indio_dev) \
- __devm_iio_device_register((dev), (indio_dev), THIS_MODULE);
+ __devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
struct module *this_mod);
-void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
void iio_device_release_direct_mode(struct iio_dev *indio_dev);
@@ -618,14 +642,8 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
put_device(&indio_dev->dev);
}
-/**
- * iio_device_get_clock() - Retrieve current timestamping clock for the device
- * @indio_dev: IIO device structure containing the device
- */
-static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
-{
- return indio_dev->clock_id;
-}
+clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
+int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
/**
* dev_to_iio_dev() - Get IIO device struct from a device struct
@@ -649,6 +667,26 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
}
+/**
+ * iio_device_set_parent() - assign parent device to the IIO device object
+ * @indio_dev: IIO device structure
+ * @parent: reference to parent device object
+ *
+ * This utility must be called between IIO device allocation
+ * (via devm_iio_device_alloc()) & IIO device registration
+ * (via iio_device_register() and devm_iio_device_register())).
+ * By default, the device allocation will also assign a parent device to
+ * the IIO device object. In cases where devm_iio_device_alloc() is used,
+ * sometimes the parent device must be different than the device used to
+ * manage the allocation.
+ * In that case, this helper should be used to change the parent, hence the
+ * requirement to call this between allocation & registration.
+ **/
+static inline void iio_device_set_parent(struct iio_dev *indio_dev,
+ struct device *parent)
+{
+ indio_dev->dev.parent = parent;
+}
/**
* iio_device_set_drvdata() - Set device driver data
@@ -669,54 +707,41 @@ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
*
* Returns the data previously set with iio_device_set_drvdata()
*/
-static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev)
+static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
{
return dev_get_drvdata(&indio_dev->dev);
}
-/* Can we make this smaller? */
-#define IIO_ALIGN L1_CACHE_BYTES
-struct iio_dev *iio_device_alloc(int sizeof_priv);
+/*
+ * Used to ensure the iio_priv() structure is aligned to allow that structure
+ * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
+ * must not share cachelines with the rest of the structure, thus making
+ * them safe for use with non-coherent DMA.
+ */
+#define IIO_DMA_MINALIGN ARCH_KMALLOC_MINALIGN
+struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
+/* The information at the returned address is guaranteed to be cacheline aligned */
static inline void *iio_priv(const struct iio_dev *indio_dev)
{
- return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN);
-}
-
-static inline struct iio_dev *iio_priv_to_dev(void *priv)
-{
- return (struct iio_dev *)((char *)priv -
- ALIGN(sizeof(struct iio_dev), IIO_ALIGN));
+ return indio_dev->priv;
}
void iio_device_free(struct iio_dev *indio_dev);
-int devm_iio_device_match(struct device *dev, void *res, void *data);
-struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
-void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
-struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
- const char *fmt, ...);
-void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
-
-/**
- * iio_buffer_enabled() - helper function to test if the buffer is enabled
- * @indio_dev: IIO device structure for device
- **/
-static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
-{
- return indio_dev->currentmode
- & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
- INDIO_BUFFER_SOFTWARE);
-}
-
+struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
+
+#define devm_iio_trigger_alloc(parent, fmt, ...) \
+ __devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+__printf(3, 4)
+struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
/**
* iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
* @indio_dev: IIO device structure for device
**/
#if defined(CONFIG_DEBUG_FS)
-static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
-{
- return indio_dev->debugfs_dentry;
-}
+struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
#else
static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
{
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index d2fcf45b4cef..515ca09764fe 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -20,7 +20,6 @@
#define ADIS_REG_PAGE_ID 0x00
struct adis;
-struct adis_burst;
/**
* struct adis_timeouts - ADIS chip variant timeouts
@@ -33,6 +32,7 @@ struct adis_timeout {
u16 sw_reset_ms;
u16 self_test_ms;
};
+
/**
* struct adis_data - ADIS chip variant specific data
* @read_delay: SPI delay for read operations in us
@@ -41,9 +41,23 @@ struct adis_timeout {
* @glob_cmd_reg: Register address of the GLOB_CMD register
* @msc_ctrl_reg: Register address of the MSC_CTRL register
* @diag_stat_reg: Register address of the DIAG_STAT register
- * @status_error_msgs: Array of error messgaes
- * @status_error_mask:
+ * @prod_id_reg: Register address of the PROD_ID register
+ * @prod_id: Product ID code that should be expected when reading @prod_id_reg
+ * @self_test_mask: Bitmask of supported self-test operations
+ * @self_test_reg: Register address to request self test command
+ * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg
+ * @status_error_msgs: Array of error messages
+ * @status_error_mask: Bitmask of errors supported by the device
* @timeouts: Chip specific delays
+ * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable
+ * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin
+ * @has_paging: True if ADIS device has paged registers
+ * @burst_reg_cmd: Register command that triggers burst
+ * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined,
+ * this should be the minimum size supported by the device.
+ * @burst_max_len: Holds the maximum burst size when the device supports
+ * more than one burst mode with different sizes
+ * @burst_max_speed_hz: Maximum spi speed that can be used in burst mode
*/
struct adis_data {
unsigned int read_delay;
@@ -53,8 +67,12 @@ struct adis_data {
unsigned int glob_cmd_reg;
unsigned int msc_ctrl_reg;
unsigned int diag_stat_reg;
+ unsigned int prod_id_reg;
+
+ unsigned int prod_id;
unsigned int self_test_mask;
+ unsigned int self_test_reg;
bool self_test_no_autoclear;
const struct adis_timeout *timeouts;
@@ -62,29 +80,63 @@ struct adis_data {
unsigned int status_error_mask;
int (*enable_irq)(struct adis *adis, bool enable);
+ bool unmasked_drdy;
bool has_paging;
+
+ unsigned int burst_reg_cmd;
+ unsigned int burst_len;
+ unsigned int burst_max_len;
+ unsigned int burst_max_speed_hz;
};
+/**
+ * struct adis - ADIS device instance data
+ * @spi: Reference to SPI device which owns this ADIS IIO device
+ * @trig: IIO trigger object data
+ * @data: ADIS chip variant specific data
+ * @burst: ADIS burst transfer information
+ * @burst_extra_len: Burst extra length. Should only be used by devices that can
+ * dynamically change their burst mode length.
+ * @state_lock: Lock used by the device to protect state
+ * @msg: SPI message object
+ * @xfer: SPI transfer objects to be used for a @msg
+ * @current_page: Some ADIS devices have registers, this selects current page
+ * @irq_flag: IRQ handling flags as passed to request_irq()
+ * @buffer: Data buffer for information read from the device
+ * @tx: DMA safe TX buffer for SPI transfers
+ * @rx: DMA safe RX buffer for SPI transfers
+ */
struct adis {
struct spi_device *spi;
struct iio_trigger *trig;
const struct adis_data *data;
- struct adis_burst *burst;
-
+ unsigned int burst_extra_len;
+ /**
+ * The state_lock is meant to be used during operations that require
+ * a sequence of SPI R/W in order to protect the SPI transfer
+ * information (fields 'xfer', 'msg' & 'current_page') between
+ * potential concurrent accesses.
+ * This lock is used by all "adis_{functions}" that have to read/write
+ * registers. These functions also have unlocked variants
+ * (see "__adis_{functions}"), which don't hold this lock.
+ * This allows users of the ADIS library to group SPI R/W into
+ * the drivers, but they also must manage this lock themselves.
+ */
struct mutex state_lock;
struct spi_message msg;
struct spi_transfer *xfer;
unsigned int current_page;
+ unsigned long irq_flag;
void *buffer;
- uint8_t tx[10] ____cacheline_aligned;
- uint8_t rx[4];
+ u8 tx[10] ____cacheline_aligned;
+ u8 rx[4];
};
int adis_init(struct adis *adis, struct iio_dev *indio_dev,
- struct spi_device *spi, const struct adis_data *data);
+ struct spi_device *spi, const struct adis_data *data);
int __adis_reset(struct adis *adis);
/**
@@ -105,9 +157,9 @@ static inline int adis_reset(struct adis *adis)
}
int __adis_write_reg(struct adis *adis, unsigned int reg,
- unsigned int val, unsigned int size);
+ unsigned int val, unsigned int size);
int __adis_read_reg(struct adis *adis, unsigned int reg,
- unsigned int *val, unsigned int size);
+ unsigned int *val, unsigned int size);
/**
* __adis_write_reg_8() - Write single byte to a register (unlocked)
@@ -116,7 +168,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg,
* @value: The value to write
*/
static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
- uint8_t val)
+ u8 val)
{
return __adis_write_reg(adis, reg, val, 1);
}
@@ -128,7 +180,7 @@ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
- uint16_t val)
+ u16 val)
{
return __adis_write_reg(adis, reg, val, 2);
}
@@ -140,7 +192,7 @@ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
- uint32_t val)
+ u32 val)
{
return __adis_write_reg(adis, reg, val, 4);
}
@@ -152,7 +204,7 @@ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
- uint16_t *val)
+ u16 *val)
{
unsigned int tmp;
int ret;
@@ -171,7 +223,7 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
- uint32_t *val)
+ u32 *val)
{
unsigned int tmp;
int ret;
@@ -191,7 +243,7 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
* @size: The size of the @value (in bytes)
*/
static inline int adis_write_reg(struct adis *adis, unsigned int reg,
- unsigned int val, unsigned int size)
+ unsigned int val, unsigned int size)
{
int ret;
@@ -210,7 +262,7 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg,
* @size: The size of the @val buffer
*/
static int adis_read_reg(struct adis *adis, unsigned int reg,
- unsigned int *val, unsigned int size)
+ unsigned int *val, unsigned int size)
{
int ret;
@@ -228,7 +280,7 @@ static int adis_read_reg(struct adis *adis, unsigned int reg,
* @value: The value to write
*/
static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
- uint8_t val)
+ u8 val)
{
return adis_write_reg(adis, reg, val, 1);
}
@@ -240,7 +292,7 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
- uint16_t val)
+ u16 val)
{
return adis_write_reg(adis, reg, val, 2);
}
@@ -252,7 +304,7 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
- uint32_t val)
+ u32 val)
{
return adis_write_reg(adis, reg, val, 4);
}
@@ -264,7 +316,7 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
- uint16_t *val)
+ u16 *val)
{
unsigned int tmp;
int ret;
@@ -283,7 +335,7 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
- uint32_t *val)
+ u32 *val)
{
unsigned int tmp;
int ret;
@@ -295,8 +347,64 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
return ret;
}
+int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
+ const u32 val, u8 size);
+/**
+ * adis_update_bits_base() - ADIS Update bits function - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @size: Size of the register to update
+ *
+ * Updates the desired bits of @reg in accordance with @mask and @val.
+ */
+static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
+ const u32 mask, const u32 val, u8 size)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_update_bits_base(adis, reg, mask, val, size);
+ mutex_unlock(&adis->state_lock);
+ return ret;
+}
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
+})
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define __adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ __adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
+})
+
int adis_enable_irq(struct adis *adis, bool enable);
int __adis_check_status(struct adis *adis);
+int __adis_initial_startup(struct adis *adis);
static inline int adis_check_status(struct adis *adis)
{
@@ -309,11 +417,31 @@ static inline int adis_check_status(struct adis *adis)
return ret;
}
-int adis_initial_startup(struct adis *adis);
+/* locked version of __adis_initial_startup() */
+static inline int adis_initial_startup(struct adis *adis)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_initial_startup(adis);
+ mutex_unlock(&adis->state_lock);
+
+ return ret;
+}
+
+static inline void adis_dev_lock(struct adis *adis)
+{
+ mutex_lock(&adis->state_lock);
+}
+
+static inline void adis_dev_unlock(struct adis *adis)
+{
+ mutex_unlock(&adis->state_lock);
+}
int adis_single_conversion(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int error_mask,
- int *val);
+ const struct iio_chan_spec *chan,
+ unsigned int error_mask, int *val);
#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \
.type = IIO_VOLTAGE, \
@@ -362,7 +490,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
.modified = 1, \
.channel2 = IIO_MOD_ ## mod, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- info_sep, \
+ (info_sep), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = info_all, \
.address = (addr), \
@@ -389,52 +517,30 @@ int adis_single_conversion(struct iio_dev *indio_dev,
#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
-/**
- * struct adis_burst - ADIS data for burst transfers
- * @en burst mode enabled
- * @reg_cmd register command that triggers burst
- * @extra_len extra length to account in the SPI RX buffer
- */
-struct adis_burst {
- bool en;
- unsigned int reg_cmd;
- unsigned int extra_len;
-};
-
-int adis_setup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *));
-void adis_cleanup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev);
+int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler);
-int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
-void adis_remove_trigger(struct adis *adis);
+int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
int adis_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask);
+ const unsigned long *scan_mask);
#else /* CONFIG_IIO_BUFFER */
-static inline int adis_setup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *))
+static inline int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler)
{
return 0;
}
-static inline void adis_cleanup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev)
-{
-}
-
-static inline int adis_probe_trigger(struct adis *adis,
- struct iio_dev *indio_dev)
+static inline int devm_adis_probe_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
{
return 0;
}
-static inline void adis_remove_trigger(struct adis *adis)
-{
-}
-
#define adis_update_scan_mode NULL
#endif /* CONFIG_IIO_BUFFER */
@@ -442,7 +548,8 @@ static inline void adis_remove_trigger(struct adis *adis)
#ifdef CONFIG_DEBUG_FS
int adis_debugfs_reg_access(struct iio_dev *indio_dev,
- unsigned int reg, unsigned int writeval, unsigned int *readval);
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval);
#else
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 764659e01b68..8a83fb58232d 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -3,12 +3,19 @@
#define __LINUX_IIO_KFIFO_BUF_H__
struct iio_buffer;
+struct iio_buffer_setup_ops;
+struct iio_dev;
struct device;
struct iio_buffer *iio_kfifo_allocate(void);
void iio_kfifo_free(struct iio_buffer *r);
-struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
-void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
+int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const struct iio_buffer_setup_ops *setup_ops,
+ const struct attribute **buffer_attrs);
+
+#define devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops) \
+ devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index b532c875bc24..e51fba66de4b 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -9,6 +9,7 @@
#ifndef _INDUSTRIAL_IO_SYSFS_H_
#define _INDUSTRIAL_IO_SYSFS_H_
+struct iio_buffer;
struct iio_chan_spec;
/**
@@ -17,12 +18,14 @@ struct iio_chan_spec;
* @address: associated register address
* @l: list head for maintaining list of dynamically created attrs
* @c: specification for the underlying channel
+ * @buffer: the IIO buffer to which this attribute belongs to (if any)
*/
struct iio_dev_attr {
struct device_attribute dev_attr;
u64 address;
struct list_head l;
struct iio_chan_spec const *c;
+ struct iio_buffer *buffer;
};
#define to_iio_dev_attr(_dev_attr) \
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 84995e2967ac..f6360d9a492d 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -21,7 +21,7 @@ struct iio_trigger;
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
* @set_trigger_state: switch on/off the trigger on demand
- * @try_reenable: function to reenable the trigger when the
+ * @reenable: function to reenable the trigger when the
* use count is zero (may be NULL)
* @validate_device: function to validate the device when the
* current trigger gets changed.
@@ -31,7 +31,7 @@ struct iio_trigger;
**/
struct iio_trigger_ops {
int (*set_trigger_state)(struct iio_trigger *trig, bool state);
- int (*try_reenable)(struct iio_trigger *trig);
+ void (*reenable)(struct iio_trigger *trig);
int (*validate_device)(struct iio_trigger *trig,
struct iio_dev *indio_dev);
};
@@ -55,6 +55,7 @@ struct iio_trigger_ops {
* @attached_own_device:[INTERN] if we are using our own device as trigger,
* i.e. if we registered a poll function to the same
* device as the one providing the trigger.
+ * @reenable_work: [INTERN] work item used to ensure reenable can sleep.
**/
struct iio_trigger {
const struct iio_trigger_ops *ops;
@@ -74,6 +75,7 @@ struct iio_trigger {
unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
struct mutex pool_lock;
bool attached_own_device;
+ struct work_struct reenable_work;
};
@@ -91,13 +93,18 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
{
get_device(&trig->dev);
+
+ WARN_ONCE(list_empty(&trig->list),
+ "Getting non-registered iio trigger %s is prohibited\n",
+ trig->name);
+
__module_get(trig->owner);
return trig;
}
/**
- * iio_device_set_drvdata() - Set trigger driver data
+ * iio_trigger_set_drvdata() - Set trigger driver data
* @trig: IIO trigger structure
* @data: Driver specific data
*
@@ -124,16 +131,10 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
* iio_trigger_register() - register a trigger with the IIO core
* @trig_info: trigger to be registered
**/
-#define iio_trigger_register(trig_info) \
- __iio_trigger_register((trig_info), THIS_MODULE)
-int __iio_trigger_register(struct iio_trigger *trig_info,
- struct module *this_mod);
+int iio_trigger_register(struct iio_trigger *trig_info);
-#define devm_iio_trigger_register(dev, trig_info) \
- __devm_iio_trigger_register((dev), (trig_info), THIS_MODULE)
-int __devm_iio_trigger_register(struct device *dev,
- struct iio_trigger *trig_info,
- struct module *this_mod);
+int devm_iio_trigger_register(struct device *dev,
+ struct iio_trigger *trig_info);
/**
* iio_trigger_unregister() - unregister a trigger from the core
@@ -141,9 +142,6 @@ int __devm_iio_trigger_register(struct device *dev,
**/
void iio_trigger_unregister(struct iio_trigger *trig_info);
-void devm_iio_trigger_unregister(struct device *dev,
- struct iio_trigger *trig_info);
-
/**
* iio_trigger_set_immutable() - set an immutable trigger on destination
*
@@ -164,7 +162,13 @@ void iio_trigger_poll_chained(struct iio_trigger *trig);
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
-__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...);
+#define iio_trigger_alloc(parent, fmt, ...) \
+ __iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+
+__printf(3, 4)
+struct iio_trigger *__iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
void iio_trigger_free(struct iio_trigger *trig);
/**
diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h
index c3c6ba5ec423..2c05dfad88d7 100644
--- a/include/linux/iio/trigger_consumer.h
+++ b/include/linux/iio/trigger_consumer.h
@@ -38,7 +38,7 @@ struct iio_poll_func {
};
-struct iio_poll_func
+__printf(5, 6) struct iio_poll_func
*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
int type,
@@ -50,11 +50,4 @@ irqreturn_t iio_pollfunc_store_time(int irq, void *p);
void iio_trigger_notify_done(struct iio_trigger *trig);
-/*
- * Two functions for common case where all that happens is a pollfunc
- * is attached and detached from a trigger
- */
-int iio_triggered_buffer_postenable(struct iio_dev *indio_dev);
-int iio_triggered_buffer_predisable(struct iio_dev *indio_dev);
-
#endif
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index 238ad30ce166..7490b05fc5b2 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -2,23 +2,37 @@
#ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_
#define _LINUX_IIO_TRIGGERED_BUFFER_H_
+#include <linux/iio/buffer.h>
#include <linux/interrupt.h>
+struct attribute;
struct iio_dev;
struct iio_buffer_setup_ops;
-int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
+int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *setup_ops);
+ enum iio_buffer_direction direction,
+ const struct iio_buffer_setup_ops *setup_ops,
+ const struct attribute **buffer_attrs);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
-int devm_iio_triggered_buffer_setup(struct device *dev,
- struct iio_dev *indio_dev,
- irqreturn_t (*h)(int irq, void *p),
- irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *ops);
-void devm_iio_triggered_buffer_cleanup(struct device *dev,
- struct iio_dev *indio_dev);
+#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \
+ iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, (setup_ops), \
+ NULL)
+
+int devm_iio_triggered_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
+ const struct iio_buffer_setup_ops *ops,
+ const struct attribute **buffer_attrs);
+
+#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \
+ devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, \
+ (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index e6fd3645963c..82faa98c719a 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -16,6 +16,9 @@ enum iio_event_info {
IIO_EV_INFO_PERIOD,
IIO_EV_INFO_HIGH_PASS_FILTER_3DB,
IIO_EV_INFO_LOW_PASS_FILTER_3DB,
+ IIO_EV_INFO_TIMEOUT,
+ IIO_EV_INFO_RESET_TIMEOUT,
+ IIO_EV_INFO_TAP2_MIN_DELAY,
};
#define IIO_VAL_INT 1
@@ -23,6 +26,7 @@ enum iio_event_info {
#define IIO_VAL_INT_PLUS_NANO 3
#define IIO_VAL_INT_PLUS_MICRO_DB 4
#define IIO_VAL_INT_MULTIPLE 5
+#define IIO_VAL_INT_64 6 /* 64-bit data, val is lower 32 bits */
#define IIO_VAL_FRACTIONAL 10
#define IIO_VAL_FRACTIONAL_LOG2 11
#define IIO_VAL_CHAR 12
@@ -50,6 +54,7 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_PHASE,
IIO_CHAN_INFO_HARDWAREGAIN,
IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_HYSTERESIS_RELATIVE,
IIO_CHAN_INFO_INT_TIME,
IIO_CHAN_INFO_ENABLE,
IIO_CHAN_INFO_CALIBHEIGHT,
@@ -59,6 +64,8 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_CALIBEMISSIVITY,
IIO_CHAN_INFO_OVERSAMPLING_RATIO,
IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
+ IIO_CHAN_INFO_CALIBAMBIENT,
+ IIO_CHAN_INFO_ZEROPOINT,
};
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 1659217e9b60..81708ca0ebc7 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -7,46 +7,55 @@
#ifndef _LINUX_IMA_H
#define _LINUX_IMA_H
+#include <linux/kernel_read_file.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/kexec.h>
+#include <crypto/hash_info.h>
struct linux_binprm;
#ifdef CONFIG_IMA
+extern enum hash_algo ima_get_current_hash_algo(void);
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_file_check(struct file *file, int mask);
-extern void ima_post_create_tmpfile(struct inode *inode);
+extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
+ struct inode *inode);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
-extern int ima_load_data(enum kernel_load_data_id id);
-extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
+extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
+extern int ima_load_data(enum kernel_load_data_id id, bool contents);
+extern int ima_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id, char *description);
+extern int ima_read_file(struct file *file, enum kernel_read_file_id id,
+ bool contents);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
-extern void ima_post_path_mknod(struct dentry *dentry);
+extern void ima_post_path_mknod(struct user_namespace *mnt_userns,
+ struct dentry *dentry);
extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
-extern void ima_kexec_cmdline(const void *buf, int size);
+extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size);
+extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size);
+extern int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest, size_t digest_len);
+
+#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
+extern void ima_appraise_parse_cmdline(void);
+#else
+static inline void ima_appraise_parse_cmdline(void) {}
+#endif
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
-#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390) \
- || defined(CONFIG_PPC_SECURE_BOOT)
-extern bool arch_ima_get_secureboot(void);
-extern const char * const *arch_get_ima_policy(void);
#else
-static inline bool arch_ima_get_secureboot(void)
-{
- return false;
-}
-
-static inline const char * const *arch_get_ima_policy(void)
+static inline enum hash_algo ima_get_current_hash_algo(void)
{
- return NULL;
+ return HASH_ALGO__LAST;
}
-#endif
-#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
{
return 0;
@@ -57,7 +66,8 @@ static inline int ima_file_check(struct file *file, int mask)
return 0;
}
-static inline void ima_post_create_tmpfile(struct inode *inode)
+static inline void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
+ struct inode *inode)
{
}
@@ -71,12 +81,26 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
-static inline int ima_load_data(enum kernel_load_data_id id)
+static inline int ima_file_mprotect(struct vm_area_struct *vma,
+ unsigned long prot)
+{
+ return 0;
+}
+
+static inline int ima_load_data(enum kernel_load_data_id id, bool contents)
{
return 0;
}
-static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
+static inline int ima_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id,
+ char *description)
+{
+ return 0;
+}
+
+static inline int ima_read_file(struct file *file, enum kernel_read_file_id id,
+ bool contents)
{
return 0;
}
@@ -87,7 +111,8 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
return 0;
}
-static inline void ima_post_path_mknod(struct dentry *dentry)
+static inline void ima_post_path_mknod(struct user_namespace *mnt_userns,
+ struct dentry *dentry)
{
return;
}
@@ -97,9 +122,44 @@ static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
return -EOPNOTSUPP;
}
-static inline void ima_kexec_cmdline(const void *buf, int size) {}
+static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
+
+static inline int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest,
+ size_t digest_len)
+{
+ return -ENOENT;
+}
+
#endif /* CONFIG_IMA */
+#ifdef CONFIG_HAVE_IMA_KEXEC
+int __init ima_free_kexec_buffer(void);
+int __init ima_get_kexec_buffer(void **addr, size_t *size);
+#endif
+
+#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
+extern bool arch_ima_get_secureboot(void);
+extern const char * const *arch_get_ima_policy(void);
+#else
+static inline bool arch_ima_get_secureboot(void)
+{
+ return false;
+}
+
+static inline const char * const *arch_get_ima_policy(void)
+{
+ return NULL;
+}
+#endif
+
#ifndef CONFIG_IMA_KEXEC
struct kimage;
@@ -123,7 +183,8 @@ static inline void ima_post_key_create_or_update(struct key *keyring,
#ifdef CONFIG_IMA_APPRAISE
extern bool is_ima_appraise_enabled(void);
-extern void ima_inode_post_setattr(struct dentry *dentry);
+extern void ima_inode_post_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry);
extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len);
extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
@@ -133,7 +194,8 @@ static inline bool is_ima_appraise_enabled(void)
return 0;
}
-static inline void ima_inode_post_setattr(struct dentry *dentry)
+static inline void ima_inode_post_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry)
{
return;
}
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index 00d7e8e919c6..c1c76a70a6ce 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -23,15 +23,29 @@
likely(f == f2) ? f2(__VA_ARGS__) : \
INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
})
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+ likely(f == f3) ? f3(__VA_ARGS__) : \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
+ })
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+ likely(f == f4) ? f4(__VA_ARGS__) : \
+ INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
+ })
#define INDIRECT_CALLABLE_DECLARE(f) f
#define INDIRECT_CALLABLE_SCOPE
+#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f)
#else
#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
#define INDIRECT_CALLABLE_DECLARE(f)
#define INDIRECT_CALLABLE_SCOPE static
+#define EXPORT_INDIRECT_CALLABLE(f)
#endif
/*
@@ -48,4 +62,10 @@
#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
#endif
+#if IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
+#endif
+
#endif
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index c91cf2dee12a..84abb30a3fbb 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -10,11 +10,9 @@ struct inet_hashinfo;
struct inet_diag_handler {
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ const struct inet_diag_req_v2 *r);
- int (*dump_one)(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+ int (*dump_one)(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req);
void (*idiag_get_info)(struct sock *sk,
@@ -35,18 +33,25 @@ struct inet_diag_handler {
__u16 idiag_info_size;
};
+struct bpf_sk_storage_diag;
+struct inet_diag_dump_data {
+ struct nlattr *req_nlas[__INET_DIAG_REQ_MAX];
+#define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE]
+#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES]
+
+ struct bpf_sk_storage_diag *bpf_stg_diag;
+};
+
struct inet_connection_sock;
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, const struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh, bool net_admin);
+ struct sk_buff *skb, struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *req,
+ u16 nlmsg_flags, bool net_admin);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ const struct inet_diag_req_v2 *r);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *req);
struct sock *inet_diag_find_one_icsk(struct net *net,
@@ -66,7 +71,13 @@ static inline size_t inet_diag_msg_attrs_size(void)
+ nla_total_size(1) /* INET_DIAG_SKV6ONLY */
#endif
+ nla_total_size(4) /* INET_DIAG_MARK */
- + nla_total_size(4); /* INET_DIAG_CLASS_ID */
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ + nla_total_size_64bit(sizeof(u64)) /* INET_DIAG_CGROUP_ID */
+#endif
+ + nla_total_size(sizeof(struct inet_diag_sockopt))
+ /* INET_DIAG_SOCKOPT */
+ ;
}
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 3515ca64e638..ddb27fc0ee8c 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -24,6 +24,8 @@ struct ipv4_devconf {
struct in_device {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
refcount_t refcnt;
int dead;
struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */
@@ -41,7 +43,7 @@ struct in_device {
unsigned long mr_qri; /* Query Response Interval */
unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
- unsigned char mr_ifc_count;
+ u32 mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */
@@ -105,7 +107,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
-#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
+#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP_PVLAN)
#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
@@ -126,13 +128,15 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
- IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
+ IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
-#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
+#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ACCEPT)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+#define IN_DEV_ARP_EVICT_NOCARRIER(in_dev) IN_DEV_ANDCONF((in_dev), \
+ ARP_EVICT_NOCARRIER)
struct in_ifaddr {
struct hlist_node hash;
@@ -146,6 +150,7 @@ struct in_ifaddr {
__be32 ifa_broadcast;
unsigned char ifa_scope;
unsigned char ifa_prefixlen;
+ unsigned char ifa_proto;
__u32 ifa_flags;
char ifa_label[IFNAMSIZ];
@@ -178,6 +183,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
+#ifdef CONFIG_INET
+int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size);
+#else
+static inline int inet_gifconf(struct net_device *dev, char __user *buf,
+ int len, int size)
+{
+ return 0;
+}
+#endif
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
diff --git a/include/linux/init.h b/include/linux/init.h
index 212fc9e2f691..077d7f93b402 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -47,11 +47,11 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold __latent_entropy __noinitretpoline
-#define __initdata __section(.init.data)
-#define __initconst __section(.init.rodata)
-#define __exitdata __section(.exit.data)
-#define __exit_call __used __section(.exitcall.exit)
+#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline
+#define __initdata __section(".init.data")
+#define __initconst __section(".init.rodata")
+#define __exitdata __section(".exit.data")
+#define __exit_call __used __section(".exitcall.exit")
/*
* modpost check for section mismatches during the kernel build.
@@ -70,9 +70,9 @@
*
* The markers follow same syntax rules as __init / __initdata.
*/
-#define __ref __section(.ref.text) noinline
-#define __refdata __section(.ref.data)
-#define __refconst __section(.ref.rodata)
+#define __ref __section(".ref.text") noinline
+#define __refdata __section(".ref.data")
+#define __refconst __section(".ref.rodata")
#ifdef MODULE
#define __exitused
@@ -80,16 +80,16 @@
#define __exitused __used
#endif
-#define __exit __section(.exit.text) __exitused __cold notrace
+#define __exit __section(".exit.text") __exitused __cold notrace
/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(.meminit.text) __cold notrace \
+#define __meminit __section(".meminit.text") __cold notrace \
__latent_entropy
-#define __meminitdata __section(.meminit.data)
-#define __meminitconst __section(.meminit.rodata)
-#define __memexit __section(.memexit.text) __exitused __cold notrace
-#define __memexitdata __section(.memexit.data)
-#define __memexitconst __section(.memexit.rodata)
+#define __meminitdata __section(".meminit.data")
+#define __meminitconst __section(".meminit.rodata")
+#define __memexit __section(".memexit.text") __exitused __cold notrace
+#define __memexitdata __section(".memexit.data")
+#define __memexitconst __section(".memexit.rodata")
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
@@ -134,7 +134,7 @@ static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
-/* Used for contructor calls. */
+/* Used for constructor calls. */
typedef void (*ctor_fn_t)(void);
struct file_system_type;
@@ -184,19 +184,81 @@ extern bool initcall_debug;
* as KEEP() in the linker script.
*/
+/* Format: <modname>__<counter>_<line>_<fn> */
+#define __initcall_id(fn) \
+ __PASTE(__KBUILD_MODNAME, \
+ __PASTE(__, \
+ __PASTE(__COUNTER__, \
+ __PASTE(_, \
+ __PASTE(__LINE__, \
+ __PASTE(_, fn))))))
+
+/* Format: __<prefix>__<iid><id> */
+#define __initcall_name(prefix, __iid, id) \
+ __PASTE(__, \
+ __PASTE(prefix, \
+ __PASTE(__, \
+ __PASTE(__iid, id))))
+
+#ifdef CONFIG_LTO_CLANG
+/*
+ * With LTO, the compiler doesn't necessarily obey link order for
+ * initcalls. In order to preserve the correct order, we add each
+ * variable into its own section and generate a linker script (in
+ * scripts/link-vmlinux.sh) to specify the order of the sections.
+ */
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init.." #__iid
+
+/*
+ * With LTO, the compiler can rename static functions to avoid
+ * global naming collisions. We use a global stub function for
+ * initcalls to create a stable symbol name whose address can be
+ * taken in inline assembly when PREL32 relocations are used.
+ */
+#define __initcall_stub(fn, __iid, id) \
+ __initcall_name(initstub, __iid, id)
+
+#define __define_initcall_stub(__stub, fn) \
+ int __init __stub(void); \
+ int __init __stub(void) \
+ { \
+ return fn(); \
+ } \
+ __ADDRESSABLE(__stub)
+#else
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init"
+
+#define __initcall_stub(fn, __iid, id) fn
+
+#define __define_initcall_stub(__stub, fn) \
+ __ADDRESSABLE(fn)
+#endif
+
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define ___define_initcall(fn, id, __sec) \
- __ADDRESSABLE(fn) \
- asm(".section \"" #__sec ".init\", \"a\" \n" \
- "__initcall_" #fn #id ": \n" \
- ".long " #fn " - . \n" \
- ".previous \n");
+#define ____define_initcall(fn, __stub, __name, __sec) \
+ __define_initcall_stub(__stub, fn) \
+ asm(".section \"" __sec "\", \"a\" \n" \
+ __stringify(__name) ": \n" \
+ ".long " __stringify(__stub) " - . \n" \
+ ".previous \n"); \
+ static_assert(__same_type(initcall_t, &fn));
#else
-#define ___define_initcall(fn, id, __sec) \
- static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(#__sec ".init"))) = fn;
+#define ____define_initcall(fn, __unused, __name, __sec) \
+ static initcall_t __name __used \
+ __attribute__((__section__(__sec))) = fn;
#endif
+#define __unique_initcall(fn, id, __sec, __iid) \
+ ____define_initcall(fn, \
+ __initcall_stub(fn, __iid, id), \
+ __initcall_name(initcall, __iid, id), \
+ __initcall_section(__sec, __iid))
+
+#define ___define_initcall(fn, id, __sec) \
+ __unique_initcall(fn, id, __sec, __initcall_id(fn))
+
#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id)
/*
@@ -236,7 +298,7 @@ extern bool initcall_debug;
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
-#define console_initcall(fn) ___define_initcall(fn,, .con_initcall)
+#define console_initcall(fn) ___define_initcall(fn, con, .con_initcall)
struct obs_kernel_param {
const char *str;
@@ -254,16 +316,23 @@ struct obs_kernel_param {
static const char __setup_str_##unique_id[] __initconst \
__aligned(1) = str; \
static struct obs_kernel_param __setup_##unique_id \
- __used __section(.init.setup) \
- __attribute__((aligned((sizeof(long))))) \
+ __used __section(".init.setup") \
+ __aligned(__alignof__(struct obs_kernel_param)) \
= { __setup_str_##unique_id, fn, early }
+/*
+ * NOTE: __setup functions return values:
+ * @fn returns 1 (or non-zero) if the option argument is "handled"
+ * and returns 0 if the option argument is "not handled".
+ */
#define __setup(str, fn) \
__setup_param(str, fn, fn, 0)
/*
- * NOTE: fn is as per module_param, not __setup!
- * Emits warning if fn returns non-zero.
+ * NOTE: @fn is as per module_param, not __setup!
+ * I.e., @fn returns 0 for no error or non-zero for error
+ * (possibly @fn returns a -errno value, but it does not matter).
+ * Emits warning if @fn returns non-zero.
*/
#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
@@ -277,14 +346,14 @@ struct obs_kernel_param {
var = 1; \
return 0; \
} \
- __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \
+ early_param(str_on, parse_##var##_on); \
\
static int __init parse_##var##_off(char *arg) \
{ \
var = 0; \
return 0; \
} \
- __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1)
+ early_param(str_off, parse_##var##_off)
/* Relies on boot_command_line being set */
void __init parse_early_param(void);
@@ -298,7 +367,7 @@ void __init parse_early_options(char *cmdline);
#endif
/* Data marked not to be saved by software suspend */
-#define __nosavedata __section(.data..nosave)
+#define __nosavedata __section(".data..nosave")
#ifdef MODULE
#define __exit_p(x) x
diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h
new file mode 100644
index 000000000000..92045d18cbfc
--- /dev/null
+++ b/include/linux/init_syscalls.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+int __init init_mount(const char *dev_name, const char *dir_name,
+ const char *type_page, unsigned long flags, void *data_page);
+int __init init_umount(const char *name, int flags);
+int __init init_chdir(const char *filename);
+int __init init_chroot(const char *filename);
+int __init init_chown(const char *filename, uid_t user, gid_t group, int flags);
+int __init init_chmod(const char *filename, umode_t mode);
+int __init init_eaccess(const char *filename);
+int __init init_stat(const char *filename, struct kstat *stat, int flags);
+int __init init_mknod(const char *filename, umode_t mode, unsigned int dev);
+int __init init_link(const char *oldname, const char *newname);
+int __init init_symlink(const char *oldname, const char *newname);
+int __init init_unlink(const char *pathname);
+int __init init_mkdir(const char *pathname, umode_t mode);
+int __init init_rmdir(const char *pathname);
+int __init init_utimes(char *filename, struct timespec64 *ts);
+int __init init_dup(struct file *file);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2c620d7ac432..40fc5813cf93 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,7 +25,6 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
extern struct nsproxy init_nsproxy;
-extern struct group_info init_groups;
extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
@@ -40,12 +39,12 @@ extern struct cred init_cred;
/* Attach to the init_task data structure for proper alignment */
#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
-#define __init_task_data __attribute__((__section__(".data..init_task")))
+#define __init_task_data __section(".data..init_task")
#else
#define __init_task_data /**/
#endif
/* Attach to the thread_info data structure for proper alignment */
-#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
+#define __init_thread_info __section(".data..init_thread_info")
#endif
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index aa5914355728..f1a1f4c92ded 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -1,12 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
-
-/* 1 = load ramdisk, 0 = don't load */
-extern int rd_doload;
+#ifndef __LINUX_INITRD_H
+#define __LINUX_INITRD_H
-/* 1 = prompt for ramdisk, 0 = don't prompt */
-extern int rd_prompt;
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
/* starting block # of image */
extern int rd_image_start;
@@ -21,12 +18,20 @@ extern int initrd_below_start_ok;
extern unsigned long initrd_start, initrd_end;
extern void free_initrd_mem(unsigned long, unsigned long);
+#ifdef CONFIG_BLK_DEV_INITRD
+extern void __init reserve_initrd_mem(void);
+extern void wait_for_initramfs(void);
+#else
+static inline void __init reserve_initrd_mem(void) {}
+static inline void wait_for_initramfs(void) {}
+#endif
+
extern phys_addr_t phys_initrd_start;
extern unsigned long phys_initrd_size;
-extern unsigned int real_root_dev;
-
extern char __initramfs_start[];
extern unsigned long __initramfs_size;
void console_on_rootfs(void);
+
+#endif /* __LINUX_INITRD_H */
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 6a24905f6e1e..8d20caa1b268 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -7,11 +7,8 @@
#ifndef _LINUX_INOTIFY_H
#define _LINUX_INOTIFY_H
-#include <linux/sysctl.h>
#include <uapi/linux/inotify.h>
-extern struct ctl_table inotify_table[]; /* for sysctl */
-
#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
IN_MOVED_TO | IN_CREATE | IN_DELETE | \
diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h
deleted file mode 100644
index 14821fd231c0..000000000000
--- a/include/linux/input-polldev.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _INPUT_POLLDEV_H
-#define _INPUT_POLLDEV_H
-
-/*
- * Copyright (c) 2007 Dmitry Torokhov
- */
-
-#include <linux/input.h>
-#include <linux/workqueue.h>
-
-/**
- * struct input_polled_dev - simple polled input device
- * @private: private driver data.
- * @open: driver-supplied method that prepares device for polling
- * (enabled the device and maybe flushes device state).
- * @close: driver-supplied method that is called when device is no
- * longer being polled. Used to put device into low power mode.
- * @poll: driver-supplied method that polls the device and posts
- * input events (mandatory).
- * @poll_interval: specifies how often the poll() method should be called.
- * Defaults to 500 msec unless overridden when registering the device.
- * @poll_interval_max: specifies upper bound for the poll interval.
- * Defaults to the initial value of @poll_interval.
- * @poll_interval_min: specifies lower bound for the poll interval.
- * Defaults to 0.
- * @input: input device structure associated with the polled device.
- * Must be properly initialized by the driver (id, name, phys, bits).
- *
- * Polled input device provides a skeleton for supporting simple input
- * devices that do not raise interrupts but have to be periodically
- * scanned or polled to detect changes in their state.
- */
-struct input_polled_dev {
- void *private;
-
- void (*open)(struct input_polled_dev *dev);
- void (*close)(struct input_polled_dev *dev);
- void (*poll)(struct input_polled_dev *dev);
- unsigned int poll_interval; /* msec */
- unsigned int poll_interval_max; /* msec */
- unsigned int poll_interval_min; /* msec */
-
- struct input_dev *input;
-
-/* private: */
- struct delayed_work work;
-
- bool devres_managed;
-};
-
-struct input_polled_dev *input_allocate_polled_device(void);
-struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev);
-void input_free_polled_device(struct input_polled_dev *dev);
-int input_register_polled_device(struct input_polled_dev *dev);
-void input_unregister_polled_device(struct input_polled_dev *dev);
-
-#endif
diff --git a/include/linux/input.h b/include/linux/input.h
index 56f2fd32e609..49790c1bd2c4 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -90,9 +90,11 @@ enum input_clock_type {
* @open: this method is called when the very first user calls
* input_open_device(). The driver must prepare the device
* to start generating events (start polling thread,
- * request an IRQ, submit URB, etc.)
+ * request an IRQ, submit URB, etc.). The meaning of open() is
+ * to start providing events to the input core.
* @close: this method is called when the very last user calls
- * input_close_device().
+ * input_close_device(). The meaning of close() is to stop
+ * providing events to the input core.
* @flush: purges the device. Most commonly used to get rid of force
* feedback effects loaded into the device when disconnecting
* from it
@@ -127,6 +129,10 @@ enum input_clock_type {
* and needs not be explicitly unregistered or freed.
* @timestamp: storage for a timestamp set by input_set_timestamp called
* by a driver
+ * @inhibited: indicates that the input device is inhibited. If that is
+ * the case then input core ignores any events generated by the device.
+ * Device's close() is called when it is being inhibited and its open()
+ * is called when it is being uninhibited.
*/
struct input_dev {
const char *name;
@@ -201,6 +207,8 @@ struct input_dev {
bool devres_managed;
ktime_t timestamp[INPUT_CLK_MAX];
+
+ bool inhibited;
};
#define to_input_dev(d) container_of(d, struct input_dev, dev)
@@ -467,6 +475,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even
void input_alloc_absinfo(struct input_dev *dev);
void input_set_abs_params(struct input_dev *dev, unsigned int axis,
int min, int max, int fuzz, int flat);
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+ const struct input_dev *src, unsigned int src_axis);
#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \
static inline int input_abs_get_##_suffix(struct input_dev *dev, \
@@ -502,6 +512,8 @@ bool input_match_device_id(const struct input_dev *dev,
void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
+bool input_device_enabled(struct input_dev *dev);
+
extern struct class input_class;
/**
diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h
index c0523af96893..0e4742c8c81e 100644
--- a/include/linux/input/adp5589.h
+++ b/include/linux/input/adp5589.h
@@ -175,13 +175,6 @@ struct i2c_client; /* forward declaration */
struct adp5589_gpio_platform_data {
int gpio_start; /* GPIO Chip base # */
- int (*setup)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- void *context;
};
#endif
diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h
deleted file mode 100644
index ed0776997a7a..000000000000
--- a/include/linux/input/auo-pixcir-ts.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Driver for AUO in-cell touchscreens
- *
- * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
- *
- * based on auo_touch.h from Dell Streak kernel
- *
- * Copyright (c) 2008 QUALCOMM Incorporated.
- * Copyright (c) 2008 QUALCOMM USA, INC.
- */
-
-#ifndef __AUO_PIXCIR_TS_H__
-#define __AUO_PIXCIR_TS_H__
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * compare coordinates: interrupt is asserted when coordinates change
- * indicate touch: interrupt is asserted during touch
- */
-#define AUO_PIXCIR_INT_PERIODICAL 0x00
-#define AUO_PIXCIR_INT_COMP_COORD 0x01
-#define AUO_PIXCIR_INT_TOUCH_IND 0x02
-
-/*
- * @gpio_int interrupt gpio
- * @int_setting one of AUO_PIXCIR_INT_*
- * @init_hw hardwarespecific init
- * @exit_hw hardwarespecific shutdown
- * @x_max x-resolution
- * @y_max y-resolution
- */
-struct auo_pixcir_ts_platdata {
- int gpio_int;
- int gpio_rst;
-
- int int_setting;
-
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif
diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h
deleted file mode 100644
index 77582ae1745a..000000000000
--- a/include/linux/input/cy8ctmg110_pdata.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_CY8CTMG110_PDATA_H
-#define _LINUX_CY8CTMG110_PDATA_H
-
-struct cy8ctmg110_pdata
-{
- int reset_pin; /* Reset pin is wired to this GPIO (optional) */
- int irq_pin; /* IRQ pin is wired to this GPIO */
-};
-
-#endif
diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h
deleted file mode 100644
index 118b9af6e01a..000000000000
--- a/include/linux/input/cyttsp.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- */
-#ifndef _CYTTSP_H_
-#define _CYTTSP_H_
-
-#define CY_SPI_NAME "cyttsp-spi"
-#define CY_I2C_NAME "cyttsp-i2c"
-/* Active Power state scanning/processing refresh interval */
-#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
-/* touch timeout for the Active power */
-#define CY_TCH_TMOUT_DFLT 0xFF /* ms */
-/* Low Power state scanning/processing refresh interval */
-#define CY_LP_INTRVL_DFLT 0x0A /* ms */
-/* Active distance in pixels for a gesture to be reported */
-#define CY_ACT_DIST_DFLT 0xF8 /* pixels */
-
-#endif /* _CYTTSP_H_ */
diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h
index 1ecb6b45812c..51cca17ee94c 100644
--- a/include/linux/input/elan-i2c-ids.h
+++ b/include/linux/input/elan-i2c-ids.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Elan I2C/SMBus Touchpad device whitelist
*
@@ -11,10 +12,6 @@
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
* copyright (c) 2011-2012 Google, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Trademarks are the property of their respective owners.
*/
@@ -67,8 +64,15 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN062B", 0 },
{ "ELAN062C", 0 },
{ "ELAN062D", 0 },
+ { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */
+ { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */
{ "ELAN0631", 0 },
{ "ELAN0632", 0 },
+ { "ELAN0633", 0 }, /* Lenovo S145 */
+ { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */
+ { "ELAN0635", 0 }, /* Lenovo V1415-IIL */
+ { "ELAN0636", 0 }, /* Lenovo V1415-Dali */
+ { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */
{ "ELAN1000", 0 },
{ }
};
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
deleted file mode 100644
index 3614a13a8297..000000000000
--- a/include/linux/input/gp2ap002a00f.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GP2AP002A00F_H_
-#define _GP2AP002A00F_H_
-
-#include <linux/i2c.h>
-
-#define GP2A_I2C_NAME "gp2ap002a00f"
-
-/**
- * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
- * @vout_gpio: The gpio connected to the object detected pin (VOUT)
- * @wakeup: Set to true if the proximity can wake the device from suspend
- * @hw_setup: Callback for setting up hardware such as gpios and vregs
- * @hw_shutdown: Callback for properly shutting down hardware
- */
-struct gp2a_platform_data {
- int vout_gpio;
- bool wakeup;
- int (*hw_setup)(struct i2c_client *client);
- int (*hw_shutdown)(struct i2c_client *client);
-};
-
-#endif
diff --git a/include/linux/input/lm8333.h b/include/linux/input/lm8333.h
index 79f918c6e8c5..906da5fc06e0 100644
--- a/include/linux/input/lm8333.h
+++ b/include/linux/input/lm8333.h
@@ -1,6 +1,6 @@
/*
* public include for LM8333 keypad driver - same license as driver
- * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*/
#ifndef _LM8333_H
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 9e409bb13642..3b8580bd33c1 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -100,6 +100,11 @@ static inline bool input_is_mt_axis(int axis)
bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
+static inline void input_mt_report_slot_inactive(struct input_dev *dev)
+{
+ input_mt_report_slot_state(dev, 0, false);
+}
+
void input_mt_report_finger_count(struct input_dev *dev, int count);
void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
void input_mt_drop_unused(struct input_dev *dev);
diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h
index d25d1452dc6e..d0dddc14ebc8 100644
--- a/include/linux/input/sparse-keymap.h
+++ b/include/linux/input/sparse-keymap.h
@@ -20,6 +20,7 @@
* private definitions.
* @code: Device-specific data identifying the button/switch
* @keycode: KEY_* code assigned to a key/button
+ * @sw: struct with code/value used by KE_SW and KE_VSW
* @sw.code: SW_* code assigned to a switch
* @sw.value: Value that should be sent in an input even when KE_SW
* switch is toggled. KE_VSW switches ignore this field and
diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h
new file mode 100644
index 000000000000..7e4b7023bf04
--- /dev/null
+++ b/include/linux/input/vivaldi-fmap.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VIVALDI_FMAP_H
+#define _VIVALDI_FMAP_H
+
+#include <linux/types.h>
+
+#define VIVALDI_MAX_FUNCTION_ROW_KEYS 24
+
+/**
+ * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards
+ * @function_row_physmap: An array of scancodes or their equivalent (HID usage
+ * codes, encoded rows/columns, etc) for the top
+ * row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
+ *
+ * This structure is supposed to be used by ChromeOS keyboards using
+ * the Vivaldi keyboard function row design.
+ */
+struct vivaldi_data {
+ u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS];
+ unsigned int num_function_row_keys;
+};
+
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+ char *buf);
+
+#endif /* _VIVALDI_FMAP_H */
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
new file mode 100644
index 000000000000..cda1f706eaeb
--- /dev/null
+++ b/include/linux/instruction_pointer.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INSTRUCTION_POINTER_H
+#define _LINUX_INSTRUCTION_POINTER_H
+
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+
+#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
new file mode 100644
index 000000000000..bc7babe91b2e
--- /dev/null
+++ b/include/linux/instrumentation.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_INSTRUMENTATION_H
+#define __LINUX_INSTRUMENTATION_H
+
+#ifdef CONFIG_NOINSTR_VALIDATION
+
+#include <linux/stringify.h>
+
+/* Begin/end of an instrumentation safe region */
+#define __instrumentation_begin(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
+ ".pushsection .discard.instr_begin\n\t" \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t" : : "i" (c)); \
+})
+#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
+
+/*
+ * Because instrumentation_{begin,end}() can nest, objtool validation considers
+ * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
+ * When the value is greater than 0, we consider instrumentation allowed.
+ *
+ * There is a problem with code like:
+ *
+ * noinstr void foo()
+ * {
+ * instrumentation_begin();
+ * ...
+ * if (cond) {
+ * instrumentation_begin();
+ * ...
+ * instrumentation_end();
+ * }
+ * bar();
+ * instrumentation_end();
+ * }
+ *
+ * If instrumentation_end() would be an empty label, like all the other
+ * annotations, the inner _end(), which is at the end of a conditional block,
+ * would land on the instruction after the block.
+ *
+ * If we then consider the sum of the !cond path, we'll see that the call to
+ * bar() is with a 0-value, even though, we meant it to happen with a positive
+ * value.
+ *
+ * To avoid this, have _end() be a NOP instruction, this ensures it will be
+ * part of the condition block and does not escape.
+ */
+#define __instrumentation_end(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
+ ".pushsection .discard.instr_end\n\t" \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t" : : "i" (c)); \
+})
+#define instrumentation_end() __instrumentation_end(__COUNTER__)
+#else /* !CONFIG_NOINSTR_VALIDATION */
+# define instrumentation_begin() do { } while(0)
+# define instrumentation_end() do { } while(0)
+#endif /* CONFIG_NOINSTR_VALIDATION */
+
+#endif /* __LINUX_INSTRUMENTATION_H */
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
new file mode 100644
index 000000000000..501fa8486749
--- /dev/null
+++ b/include/linux/instrumented.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This header provides generic wrappers for memory access instrumentation that
+ * the compiler cannot emit for: KASAN, KCSAN, KMSAN.
+ */
+#ifndef _LINUX_INSTRUMENTED_H
+#define _LINUX_INSTRUMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+/**
+ * instrument_read - instrument regular read access
+ *
+ * Instrument a regular read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_read(v, size);
+}
+
+/**
+ * instrument_write - instrument regular write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_write(v, size);
+}
+
+/**
+ * instrument_read_write - instrument regular read-write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_read_write(v, size);
+}
+
+/**
+ * instrument_atomic_read - instrument atomic read access
+ *
+ * Instrument an atomic read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_atomic_read(v, size);
+}
+
+/**
+ * instrument_atomic_write - instrument atomic write access
+ *
+ * Instrument an atomic write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_write(v, size);
+}
+
+/**
+ * instrument_atomic_read_write - instrument atomic read-write access
+ *
+ * Instrument an atomic read-write access. The instrumentation should be
+ * inserted before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_read_write(v, size);
+}
+
+/**
+ * instrument_copy_to_user - instrument reads of copy_to_user
+ *
+ * Instrument reads from kernel memory, that are due to copy_to_user (and
+ * variants). The instrumentation must be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ kcsan_check_read(from, n);
+ kmsan_copy_to_user(to, from, n, 0);
+}
+
+/**
+ * instrument_copy_from_user_before - add instrumentation before copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_from_user_before(const void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ kcsan_check_write(to, n);
+}
+
+/**
+ * instrument_copy_from_user_after - add instrumentation after copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted after the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ * @left number of bytes not copied (as returned by copy_from_user)
+ */
+static __always_inline void
+instrument_copy_from_user_after(const void *to, const void __user *from,
+ unsigned long n, unsigned long left)
+{
+ kmsan_unpoison_memory(to, n - left);
+}
+
+/**
+ * instrument_get_user() - add instrumentation to get_user()-like macros
+ *
+ * get_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ *
+ * @to destination variable, may not be address-taken
+ */
+#define instrument_get_user(to) \
+({ \
+ u64 __tmp = (u64)(to); \
+ kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \
+ to = __tmp; \
+})
+
+
+/**
+ * instrument_put_user() - add instrumentation to put_user()-like macros
+ *
+ * put_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ *
+ * @from source address
+ * @ptr userspace pointer to copy to
+ * @size number of bytes to copy
+ */
+#define instrument_put_user(from, ptr, size) \
+({ \
+ kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \
+})
+
+#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 2271939c5c31..2ea0f2f65ab6 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -13,6 +13,7 @@ enum integrity_status {
INTEGRITY_PASS = 0,
INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
+ INTEGRITY_FAIL_IMMUTABLE,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
INTEGRITY_UNKNOWN,
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
deleted file mode 100644
index 980234ae0312..000000000000
--- a/include/linux/intel-iommu.h
+++ /dev/null
@@ -1,734 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2006-2015, Intel Corporation.
- *
- * Authors: Ashok Raj <ashok.raj@intel.com>
- * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- * David Woodhouse <David.Woodhouse@intel.com>
- */
-
-#ifndef _INTEL_IOMMU_H_
-#define _INTEL_IOMMU_H_
-
-#include <linux/types.h>
-#include <linux/iova.h>
-#include <linux/io.h>
-#include <linux/idr.h>
-#include <linux/mmu_notifier.h>
-#include <linux/list.h>
-#include <linux/iommu.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/dmar.h>
-
-#include <asm/cacheflush.h>
-#include <asm/iommu.h>
-
-/*
- * VT-d hardware uses 4KiB page size regardless of host page size.
- */
-#define VTD_PAGE_SHIFT (12)
-#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
-#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
-#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
-
-#define VTD_STRIDE_SHIFT (9)
-#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
-
-#define DMA_PTE_READ BIT_ULL(0)
-#define DMA_PTE_WRITE BIT_ULL(1)
-#define DMA_PTE_LARGE_PAGE BIT_ULL(7)
-#define DMA_PTE_SNP BIT_ULL(11)
-
-#define DMA_FL_PTE_PRESENT BIT_ULL(0)
-#define DMA_FL_PTE_XD BIT_ULL(63)
-
-#define CONTEXT_TT_MULTI_LEVEL 0
-#define CONTEXT_TT_DEV_IOTLB 1
-#define CONTEXT_TT_PASS_THROUGH 2
-#define CONTEXT_PASIDE BIT_ULL(3)
-
-/*
- * Intel IOMMU register specification per version 1.0 public spec.
- */
-#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
-#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
-#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
-#define DMAR_GCMD_REG 0x18 /* Global command register */
-#define DMAR_GSTS_REG 0x1c /* Global status register */
-#define DMAR_RTADDR_REG 0x20 /* Root entry table */
-#define DMAR_CCMD_REG 0x28 /* Context command reg */
-#define DMAR_FSTS_REG 0x34 /* Fault Status register */
-#define DMAR_FECTL_REG 0x38 /* Fault control register */
-#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
-#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
-#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
-#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
-#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
-#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
-#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
-#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
-#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
-#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
-#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
-#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
-#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
-#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
-#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
-#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
-#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
-#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
-#define DMAR_PRS_REG 0xdc /* Page request status register */
-#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
-#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
-#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
-#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
-#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
-#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
-#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
-#define DMAR_MTRR_FIX16K_80000_REG 0x128
-#define DMAR_MTRR_FIX16K_A0000_REG 0x130
-#define DMAR_MTRR_FIX4K_C0000_REG 0x138
-#define DMAR_MTRR_FIX4K_C8000_REG 0x140
-#define DMAR_MTRR_FIX4K_D0000_REG 0x148
-#define DMAR_MTRR_FIX4K_D8000_REG 0x150
-#define DMAR_MTRR_FIX4K_E0000_REG 0x158
-#define DMAR_MTRR_FIX4K_E8000_REG 0x160
-#define DMAR_MTRR_FIX4K_F0000_REG 0x168
-#define DMAR_MTRR_FIX4K_F8000_REG 0x170
-#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
-#define DMAR_MTRR_PHYSMASK0_REG 0x188
-#define DMAR_MTRR_PHYSBASE1_REG 0x190
-#define DMAR_MTRR_PHYSMASK1_REG 0x198
-#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
-#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
-#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
-#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
-#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
-#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
-#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
-#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
-#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
-#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
-#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
-#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
-#define DMAR_MTRR_PHYSBASE8_REG 0x200
-#define DMAR_MTRR_PHYSMASK8_REG 0x208
-#define DMAR_MTRR_PHYSBASE9_REG 0x210
-#define DMAR_MTRR_PHYSMASK9_REG 0x218
-#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
-#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
-#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
-
-#define OFFSET_STRIDE (9)
-
-#define dmar_readq(a) readq(a)
-#define dmar_writeq(a,v) writeq(v,a)
-#define dmar_readl(a) readl(a)
-#define dmar_writel(a, v) writel(v, a)
-
-#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
-#define DMAR_VER_MINOR(v) ((v) & 0x0f)
-
-/*
- * Decoding Capability Register
- */
-#define cap_5lp_support(c) (((c) >> 60) & 1)
-#define cap_pi_support(c) (((c) >> 59) & 1)
-#define cap_fl1gp_support(c) (((c) >> 56) & 1)
-#define cap_read_drain(c) (((c) >> 55) & 1)
-#define cap_write_drain(c) (((c) >> 54) & 1)
-#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
-#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
-#define cap_pgsel_inv(c) (((c) >> 39) & 1)
-
-#define cap_super_page_val(c) (((c) >> 34) & 0xf)
-#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
- * OFFSET_STRIDE) + 21)
-
-#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
-#define cap_max_fault_reg_offset(c) \
- (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
-
-#define cap_zlr(c) (((c) >> 22) & 1)
-#define cap_isoch(c) (((c) >> 23) & 1)
-#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
-#define cap_sagaw(c) (((c) >> 8) & 0x1f)
-#define cap_caching_mode(c) (((c) >> 7) & 1)
-#define cap_phmr(c) (((c) >> 6) & 1)
-#define cap_plmr(c) (((c) >> 5) & 1)
-#define cap_rwbf(c) (((c) >> 4) & 1)
-#define cap_afl(c) (((c) >> 3) & 1)
-#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
-/*
- * Extended Capability Register
- */
-
-#define ecap_smpwc(e) (((e) >> 48) & 0x1)
-#define ecap_flts(e) (((e) >> 47) & 0x1)
-#define ecap_slts(e) (((e) >> 46) & 0x1)
-#define ecap_smts(e) (((e) >> 43) & 0x1)
-#define ecap_dit(e) ((e >> 41) & 0x1)
-#define ecap_pasid(e) ((e >> 40) & 0x1)
-#define ecap_pss(e) ((e >> 35) & 0x1f)
-#define ecap_eafs(e) ((e >> 34) & 0x1)
-#define ecap_nwfs(e) ((e >> 33) & 0x1)
-#define ecap_srs(e) ((e >> 31) & 0x1)
-#define ecap_ers(e) ((e >> 30) & 0x1)
-#define ecap_prs(e) ((e >> 29) & 0x1)
-#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
-#define ecap_dis(e) ((e >> 27) & 0x1)
-#define ecap_nest(e) ((e >> 26) & 0x1)
-#define ecap_mts(e) ((e >> 25) & 0x1)
-#define ecap_ecs(e) ((e >> 24) & 0x1)
-#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
-#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
-#define ecap_coherent(e) ((e) & 0x1)
-#define ecap_qis(e) ((e) & 0x2)
-#define ecap_pass_through(e) ((e >> 6) & 0x1)
-#define ecap_eim_support(e) ((e >> 4) & 0x1)
-#define ecap_ir_support(e) ((e >> 3) & 0x1)
-#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
-#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
-#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
-
-/* IOTLB_REG */
-#define DMA_TLB_FLUSH_GRANU_OFFSET 60
-#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
-#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
-#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
-#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
-#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
-#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
-#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
-#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
-#define DMA_TLB_IVT (((u64)1) << 63)
-#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_TLB_MAX_SIZE (0x3f)
-
-/* INVALID_DESC */
-#define DMA_CCMD_INVL_GRANU_OFFSET 61
-#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
-#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
-#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
-#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
-#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
-#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
-#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_ID_TLB_ADDR(addr) (addr)
-#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
-
-/* PMEN_REG */
-#define DMA_PMEN_EPM (((u32)1)<<31)
-#define DMA_PMEN_PRS (((u32)1)<<0)
-
-/* GCMD_REG */
-#define DMA_GCMD_TE (((u32)1) << 31)
-#define DMA_GCMD_SRTP (((u32)1) << 30)
-#define DMA_GCMD_SFL (((u32)1) << 29)
-#define DMA_GCMD_EAFL (((u32)1) << 28)
-#define DMA_GCMD_WBF (((u32)1) << 27)
-#define DMA_GCMD_QIE (((u32)1) << 26)
-#define DMA_GCMD_SIRTP (((u32)1) << 24)
-#define DMA_GCMD_IRE (((u32) 1) << 25)
-#define DMA_GCMD_CFI (((u32) 1) << 23)
-
-/* GSTS_REG */
-#define DMA_GSTS_TES (((u32)1) << 31)
-#define DMA_GSTS_RTPS (((u32)1) << 30)
-#define DMA_GSTS_FLS (((u32)1) << 29)
-#define DMA_GSTS_AFLS (((u32)1) << 28)
-#define DMA_GSTS_WBFS (((u32)1) << 27)
-#define DMA_GSTS_QIES (((u32)1) << 26)
-#define DMA_GSTS_IRTPS (((u32)1) << 24)
-#define DMA_GSTS_IRES (((u32)1) << 25)
-#define DMA_GSTS_CFIS (((u32)1) << 23)
-
-/* DMA_RTADDR_REG */
-#define DMA_RTADDR_RTT (((u64)1) << 11)
-#define DMA_RTADDR_SMT (((u64)1) << 10)
-
-/* CCMD_REG */
-#define DMA_CCMD_ICC (((u64)1) << 63)
-#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
-#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
-#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
-#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
-#define DMA_CCMD_MASK_NOBIT 0
-#define DMA_CCMD_MASK_1BIT 1
-#define DMA_CCMD_MASK_2BIT 2
-#define DMA_CCMD_MASK_3BIT 3
-#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
-#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
-
-/* FECTL_REG */
-#define DMA_FECTL_IM (((u32)1) << 31)
-
-/* FSTS_REG */
-#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
-#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
-#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
-#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
-#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
-#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
-#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
-
-/* FRCD_REG, 32 bits access */
-#define DMA_FRCD_F (((u32)1) << 31)
-#define dma_frcd_type(d) ((d >> 30) & 1)
-#define dma_frcd_fault_reason(c) (c & 0xff)
-#define dma_frcd_source_id(c) (c & 0xffff)
-#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
-#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
-/* low 64 bit */
-#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
-
-/* PRS_REG */
-#define DMA_PRS_PPR ((u32)1)
-
-#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
-do { \
- cycles_t start_time = get_cycles(); \
- while (1) { \
- sts = op(iommu->reg + offset); \
- if (cond) \
- break; \
- if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
- panic("DMAR hardware is malfunctioning\n"); \
- cpu_relax(); \
- } \
-} while (0)
-
-#define QI_LENGTH 256 /* queue length */
-
-enum {
- QI_FREE,
- QI_IN_USE,
- QI_DONE,
- QI_ABORT
-};
-
-#define QI_CC_TYPE 0x1
-#define QI_IOTLB_TYPE 0x2
-#define QI_DIOTLB_TYPE 0x3
-#define QI_IEC_TYPE 0x4
-#define QI_IWD_TYPE 0x5
-#define QI_EIOTLB_TYPE 0x6
-#define QI_PC_TYPE 0x7
-#define QI_DEIOTLB_TYPE 0x8
-#define QI_PGRP_RESP_TYPE 0x9
-#define QI_PSTRM_RESP_TYPE 0xa
-
-#define QI_IEC_SELECTIVE (((u64)1) << 4)
-#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
-#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
-
-#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
-#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
-
-#define QI_IOTLB_DID(did) (((u64)did) << 16)
-#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
-#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
-#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
-#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
-#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_IOTLB_AM(am) (((u8)am))
-
-#define QI_CC_FM(fm) (((u64)fm) << 48)
-#define QI_CC_SID(sid) (((u64)sid) << 32)
-#define QI_CC_DID(did) (((u64)did) << 16)
-#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
-
-#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
-#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
-#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
- ((u64)((pfsid >> 4) & 0xfff) << 52))
-#define QI_DEV_IOTLB_SIZE 1
-#define QI_DEV_IOTLB_MAX_INVS 32
-
-#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
-#define QI_PC_DID(did) (((u64)did) << 16)
-#define QI_PC_GRAN(gran) (((u64)gran) << 4)
-
-#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
-#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
-
-#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_EIOTLB_AM(am) (((u64)am))
-#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
-#define QI_EIOTLB_DID(did) (((u64)did) << 16)
-#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
-
-#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
-#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
-#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
-#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
-#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
-#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
- ((u64)((pfsid >> 4) & 0xfff) << 52))
-#define QI_DEV_EIOTLB_MAX_INVS 32
-
-/* Page group response descriptor QW0 */
-#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
-#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
-#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
-#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
-#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
-
-/* Page group response descriptor QW1 */
-#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
-#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
-
-
-#define QI_RESP_SUCCESS 0x0
-#define QI_RESP_INVALID 0x1
-#define QI_RESP_FAILURE 0xf
-
-#define QI_GRAN_NONG_PASID 2
-#define QI_GRAN_PSI_PASID 3
-
-#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
-
-struct qi_desc {
- u64 qw0;
- u64 qw1;
- u64 qw2;
- u64 qw3;
-};
-
-struct q_inval {
- raw_spinlock_t q_lock;
- void *desc; /* invalidation queue */
- int *desc_status; /* desc status */
- int free_head; /* first free entry */
- int free_tail; /* last free entry */
- int free_cnt;
-};
-
-#ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER 8
-#define INTR_REMAP_TABLE_REG_SIZE 0xf
-#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
-
-#define INTR_REMAP_TABLE_ENTRIES 65536
-
-struct irq_domain;
-
-struct ir_table {
- struct irte *base;
- unsigned long *bitmap;
-};
-#endif
-
-struct iommu_flush {
- void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
- void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-};
-
-enum {
- SR_DMAR_FECTL_REG,
- SR_DMAR_FEDATA_REG,
- SR_DMAR_FEADDR_REG,
- SR_DMAR_FEUADDR_REG,
- MAX_SR_DMAR_REGS
-};
-
-#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
-#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
-#define VTD_FLAG_SVM_CAPABLE (1 << 2)
-
-extern int intel_iommu_sm;
-extern spinlock_t device_domain_lock;
-
-#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
-#define pasid_supported(iommu) (sm_supported(iommu) && \
- ecap_pasid((iommu)->ecap))
-
-struct pasid_entry;
-struct pasid_state_entry;
-struct page_req_dsc;
-
-/*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
-struct root_entry {
- u64 lo;
- u64 hi;
-};
-
-/*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
-struct context_entry {
- u64 lo;
- u64 hi;
-};
-
-struct dmar_domain {
- int nid; /* node id */
-
- unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
- /* Refcount of devices per iommu */
-
-
- u16 iommu_did[DMAR_UNITS_SUPPORTED];
- /* Domain ids per IOMMU. Use u16 since
- * domain ids are 16 bit wide according
- * to VT-d spec, section 9.3 */
- unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */
-
- bool has_iotlb_device;
- struct list_head devices; /* all devices' list */
- struct list_head auxd; /* link to device's auxiliary list */
- struct iova_domain iovad; /* iova's that belong to this domain */
-
- struct dma_pte *pgd; /* virtual address */
- int gaw; /* max guest address width */
-
- /* adjusted guest address width, 0 is level 2 30-bit */
- int agaw;
-
- int flags; /* flags to find out type of domain */
-
- int iommu_coherency;/* indicate coherency of iommu access */
- int iommu_snooping; /* indicate snooping control feature*/
- int iommu_count; /* reference count of iommu */
- int iommu_superpage;/* Level of superpages supported:
- 0 == 4KiB (no superpages), 1 == 2MiB,
- 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
- u64 max_addr; /* maximum mapped address */
-
- int default_pasid; /*
- * The default pasid used for non-SVM
- * traffic on mediated devices.
- */
-
- struct iommu_domain domain; /* generic domain data structure for
- iommu core */
-};
-
-struct intel_iommu {
- void __iomem *reg; /* Pointer to hardware regs, virtual addr */
- u64 reg_phys; /* physical address of hw register set */
- u64 reg_size; /* size of hw register set */
- u64 cap;
- u64 ecap;
- u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
- raw_spinlock_t register_lock; /* protect register handling */
- int seq_id; /* sequence id of the iommu */
- int agaw; /* agaw of this iommu */
- int msagaw; /* max sagaw of this iommu */
- unsigned int irq, pr_irq;
- u16 segment; /* PCI segment# */
- unsigned char name[13]; /* Device Name */
-
-#ifdef CONFIG_INTEL_IOMMU
- unsigned long *domain_ids; /* bitmap of domains */
- struct dmar_domain ***domains; /* ptr to domains */
- spinlock_t lock; /* protect context, domain ids */
- struct root_entry *root_entry; /* virtual address */
-
- struct iommu_flush flush;
-#endif
-#ifdef CONFIG_INTEL_IOMMU_SVM
- struct page_req_dsc *prq;
- unsigned char prq_name[16]; /* Name for PRQ interrupt */
-#endif
- struct q_inval *qi; /* Queued invalidation info */
- u32 *iommu_state; /* Store iommu states between suspend and resume.*/
-
-#ifdef CONFIG_IRQ_REMAP
- struct ir_table *ir_table; /* Interrupt remapping info */
- struct irq_domain *ir_domain;
- struct irq_domain *ir_msi_domain;
-#endif
- struct iommu_device iommu; /* IOMMU core code handle */
- int node;
- u32 flags; /* Software defined flags */
-};
-
-/* PCI domain-device relationship */
-struct device_domain_info {
- struct list_head link; /* link to domain siblings */
- struct list_head global; /* link to global list */
- struct list_head table; /* link to pasid table */
- struct list_head auxiliary_domains; /* auxiliary domains
- * attached to this device
- */
- u8 bus; /* PCI bus number */
- u8 devfn; /* PCI devfn number */
- u16 pfsid; /* SRIOV physical function source ID */
- u8 pasid_supported:3;
- u8 pasid_enabled:1;
- u8 pri_supported:1;
- u8 pri_enabled:1;
- u8 ats_supported:1;
- u8 ats_enabled:1;
- u8 auxd_enabled:1; /* Multiple domains per device */
- u8 ats_qdep;
- struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
- struct intel_iommu *iommu; /* IOMMU used by this device */
- struct dmar_domain *domain; /* pointer to domain */
- struct pasid_table *pasid_table; /* pasid table */
-};
-
-static inline void __iommu_flush_cache(
- struct intel_iommu *iommu, void *addr, int size)
-{
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(addr, size);
-}
-
-/*
- * 0: readable
- * 1: writable
- * 2-6: reserved
- * 7: super page
- * 8-10: available
- * 11: snoop behavior
- * 12-63: Host physcial address
- */
-struct dma_pte {
- u64 val;
-};
-
-static inline void dma_clear_pte(struct dma_pte *pte)
-{
- pte->val = 0;
-}
-
-static inline u64 dma_pte_addr(struct dma_pte *pte)
-{
-#ifdef CONFIG_64BIT
- return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
-#else
- /* Must have a full atomic 64-bit read */
- return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
- VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
-#endif
-}
-
-static inline bool dma_pte_present(struct dma_pte *pte)
-{
- return (pte->val & 3) != 0;
-}
-
-static inline bool dma_pte_superpage(struct dma_pte *pte)
-{
- return (pte->val & DMA_PTE_LARGE_PAGE);
-}
-
-static inline int first_pte_in_page(struct dma_pte *pte)
-{
- return !((unsigned long)pte & ~VTD_PAGE_MASK);
-}
-
-extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
-extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
-
-extern int dmar_enable_qi(struct intel_iommu *iommu);
-extern void dmar_disable_qi(struct intel_iommu *iommu);
-extern int dmar_reenable_qi(struct intel_iommu *iommu);
-extern void qi_global_iec(struct intel_iommu *iommu);
-
-extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
-extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u16 qdep, u64 addr, unsigned mask);
-void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
- unsigned long npages, bool ih);
-extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
-
-extern int dmar_ir_support(void);
-
-void *alloc_pgtable_page(int node);
-void free_pgtable_page(void *vaddr);
-struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
-int for_each_device_domain(int (*fn)(struct device_domain_info *info,
- void *data), void *data);
-void iommu_flush_write_buffer(struct intel_iommu *iommu);
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
-struct dmar_domain *find_domain(struct device *dev);
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-extern void intel_svm_check(struct intel_iommu *iommu);
-extern int intel_svm_enable_prq(struct intel_iommu *iommu);
-extern int intel_svm_finish_prq(struct intel_iommu *iommu);
-
-struct svm_dev_ops;
-
-struct intel_svm_dev {
- struct list_head list;
- struct rcu_head rcu;
- struct device *dev;
- struct svm_dev_ops *ops;
- int users;
- u16 did;
- u16 dev_iotlb:1;
- u16 sid, qdep;
-};
-
-struct intel_svm {
- struct mmu_notifier notifier;
- struct mm_struct *mm;
- struct intel_iommu *iommu;
- int flags;
- int pasid;
- struct list_head devs;
- struct list_head list;
-};
-
-extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
-#else
-static inline void intel_svm_check(struct intel_iommu *iommu) {}
-#endif
-
-#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
-void intel_iommu_debugfs_init(void);
-#else
-static inline void intel_iommu_debugfs_init(void) {}
-#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
-
-extern const struct attribute_group *intel_iommu_groups[];
-bool context_present(struct context_entry *context);
-struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
- u8 devfn, int alloc);
-
-#ifdef CONFIG_INTEL_IOMMU
-extern int iommu_calculate_agaw(struct intel_iommu *iommu);
-extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
-extern int dmar_disabled;
-extern int intel_iommu_enabled;
-extern int intel_iommu_tboot_noforce;
-#else
-static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-#define dmar_disabled (1)
-#define intel_iommu_enabled (0)
-#endif
-
-#endif
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index 0d6b4bc191c5..f45f13304add 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -8,11 +8,17 @@
#ifndef _INTEL_ISH_CLIENT_IF_H_
#define _INTEL_ISH_CLIENT_IF_H_
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
struct ishtp_cl_device;
struct ishtp_device;
struct ishtp_cl;
struct ishtp_fw_client;
+typedef __printf(2, 3) void (*ishtp_print_log)(struct ishtp_device *dev,
+ const char *format, ...);
+
/* Client state */
enum cl_state {
ISHTP_CL_INITIALIZING = 0,
@@ -34,9 +40,9 @@ enum cl_state {
struct ishtp_cl_driver {
struct device_driver driver;
const char *name;
- const guid_t *guid;
+ const struct ishtp_device_id *id;
int (*probe)(struct ishtp_cl_device *dev);
- int (*remove)(struct ishtp_cl_device *dev);
+ void (*remove)(struct ishtp_cl_device *dev);
int (*reset)(struct ishtp_cl_device *dev);
const struct dev_pm_ops *pm;
};
@@ -75,8 +81,10 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device,
/* Get the device * from ishtp device instance */
struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* wait for IPC resume */
+bool ishtp_wait_resume(struct ishtp_device *dev);
/* Trace interface for clients */
-void *ishtp_trace_callback(struct ishtp_cl_device *cl_device);
+ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
/* Get device pointer of PCI device for DMA acces */
struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h
deleted file mode 100644
index fcd841a90f2f..000000000000
--- a/include/linux/intel-pti.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Intel 2011
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The PTI (Parallel Trace Interface) driver directs trace data routed from
- * various parts in the system out through the Intel Penwell PTI port and
- * out of the mobile device for analysis with a debugging tool
- * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
- * compact JTAG, standard.
- *
- * This header file will allow other parts of the OS to use the
- * interface to write out it's contents for debugging a mobile system.
- */
-
-#ifndef LINUX_INTEL_PTI_H_
-#define LINUX_INTEL_PTI_H_
-
-/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
-#define PTI_LASTDWORD_DTS 0x30
-
-/* basic structure used as a write address to the PTI HW */
-struct pti_masterchannel {
- u8 master;
- u8 channel;
-};
-
-/* the following functions are defined in misc/pti.c */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
-struct pti_masterchannel *pti_request_masterchannel(u8 type,
- const char *thread_name);
-void pti_release_masterchannel(struct pti_masterchannel *mc);
-
-#endif /* LINUX_INTEL_PTI_H_ */
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index d7c403d0dd27..207ef06ba3e1 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -8,29 +8,10 @@
#ifndef __INTEL_SVM_H__
#define __INTEL_SVM_H__
-struct device;
-
-struct svm_dev_ops {
- void (*fault_cb)(struct device *dev, int pasid, u64 address,
- void *private, int rwxp, int response);
-};
-
-/* Values for rxwp in fault_cb callback */
-#define SVM_REQ_READ (1<<3)
-#define SVM_REQ_WRITE (1<<2)
-#define SVM_REQ_EXEC (1<<1)
-#define SVM_REQ_PRIV (1<<0)
-
-
-/*
- * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
- * PASID for the current process. Even if a PASID already exists, a new one
- * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID
- * will not be given to subsequent callers. This facility allows a driver to
- * disambiguate between multiple device contexts which access the same MM,
- * if there is no other way to do so. It should be used sparingly, if at all.
- */
-#define SVM_FLAG_PRIVATE_PASID (1<<0)
+/* Page Request Queue depth */
+#define PRQ_ORDER 4
+#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
+#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
/*
* The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only
@@ -43,91 +24,6 @@ struct svm_dev_ops {
* It is unlikely that we will ever hook into flush_tlb_kernel_range() to
* do such IOTLB flushes automatically.
*/
-#define SVM_FLAG_SUPERVISOR_MODE (1<<1)
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-
-/**
- * intel_svm_bind_mm() - Bind the current process to a PASID
- * @dev: Device to be granted access
- * @pasid: Address for allocated PASID
- * @flags: Flags. Later for requesting supervisor mode, etc.
- * @ops: Callbacks to device driver
- *
- * This function attempts to enable PASID support for the given device.
- * If the @pasid argument is non-%NULL, a PASID is allocated for access
- * to the MM of the current process.
- *
- * By using a %NULL value for the @pasid argument, this function can
- * be used to simply validate that PASID support is available for the
- * given device — i.e. that it is behind an IOMMU which has the
- * requisite support, and is enabled.
- *
- * Page faults are handled transparently by the IOMMU code, and there
- * should be no need for the device driver to be involved. If a page
- * fault cannot be handled (i.e. is an invalid address rather than
- * just needs paging in), then the page request will be completed by
- * the core IOMMU code with appropriate status, and the device itself
- * can then report the resulting fault to its driver via whatever
- * mechanism is appropriate.
- *
- * Multiple calls from the same process may result in the same PASID
- * being re-used. A reference count is kept.
- */
-extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
- struct svm_dev_ops *ops);
-
-/**
- * intel_svm_unbind_mm() - Unbind a specified PASID
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be unbound
- *
- * This function allows a PASID to be retired when the device no
- * longer requires access to the address space of a given process.
- *
- * If the use count for the PASID in question reaches zero, the
- * PASID is revoked and may no longer be used by hardware.
- *
- * Device drivers are required to ensure that no access (including
- * page requests) is currently outstanding for the PASID in question,
- * before calling this function.
- */
-extern int intel_svm_unbind_mm(struct device *dev, int pasid);
-
-/**
- * intel_svm_is_pasid_valid() - check if pasid is valid
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be checked
- *
- * This function checks if the specified pasid is still valid. A
- * valid pasid means the backing mm is still having a valid user.
- * For kernel callers init_mm is always valid. for other mm, if mm->mm_users
- * is non-zero, it is valid.
- *
- * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid
- * 1 if pasid is valid.
- */
-extern int intel_svm_is_pasid_valid(struct device *dev, int pasid);
-
-#else /* CONFIG_INTEL_IOMMU_SVM */
-
-static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
- int flags, struct svm_dev_ops *ops)
-{
- return -ENOSYS;
-}
-
-static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
-{
- BUG();
-}
-
-static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_INTEL_IOMMU_SVM */
-
-#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
+#define SVM_FLAG_SUPERVISOR_MODE BIT(0)
#endif /* __INTEL_SVM_H__ */
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index efb3ce892c20..9f4b6f5b822f 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -29,21 +29,24 @@ enum rapl_domain_reg_id {
RAPL_DOMAIN_REG_PERF,
RAPL_DOMAIN_REG_POLICY,
RAPL_DOMAIN_REG_INFO,
+ RAPL_DOMAIN_REG_PL4,
RAPL_DOMAIN_REG_MAX,
};
-struct rapl_package;
+struct rapl_domain;
enum rapl_primitives {
ENERGY_COUNTER,
POWER_LIMIT1,
POWER_LIMIT2,
+ POWER_LIMIT4,
FW_LOCK,
PL1_ENABLE, /* power limit 1, aka long term */
PL1_CLAMP, /* allow frequency to go below OS request */
PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
PL2_CLAMP,
+ PL4_ENABLE, /* power limit 4, aka max peak power */
TIME_WINDOW1, /* long term */
TIME_WINDOW2, /* short term */
@@ -55,6 +58,12 @@ enum rapl_primitives {
THROTTLED_TIME,
PRIORITY_LEVEL,
+ PSYS_POWER_LIMIT1,
+ PSYS_POWER_LIMIT2,
+ PSYS_PL1_ENABLE,
+ PSYS_PL2_ENABLE,
+ PSYS_TIME_WINDOW1,
+ PSYS_TIME_WINDOW2,
/* below are not raw primitive data */
AVERAGE_POWER,
NR_RAPL_PRIMITIVES,
@@ -65,7 +74,7 @@ struct rapl_domain_data {
unsigned long timestamp;
};
-#define NR_POWER_LIMITS (2)
+#define NR_POWER_LIMITS (3)
struct rapl_power_limit {
struct powercap_zone_constraint *constraint;
int prim_id; /* primitive ID used to enable */
@@ -76,8 +85,10 @@ struct rapl_power_limit {
struct rapl_package;
+#define RAPL_DOMAIN_NAME_LENGTH 16
+
struct rapl_domain {
- const char *name;
+ char name[RAPL_DOMAIN_NAME_LENGTH];
enum rapl_domain_type id;
u64 regs[RAPL_DOMAIN_REG_MAX];
struct powercap_zone power_zone;
@@ -149,7 +160,4 @@ struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv
struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
void rapl_remove_package(struct rapl_package *rp);
-int rapl_add_platform_domain(struct rapl_if_priv *priv);
-void rapl_remove_platform_domain(struct rapl_if_priv *priv);
-
#endif /* __INTEL_RAPL_H__ */
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 0c494534b4d3..cd5c5a27557f 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -15,6 +15,17 @@ struct icc_node;
struct of_phandle_args;
/**
+ * struct icc_node_data - icc node data
+ *
+ * @node: icc node
+ * @tag: tag
+ */
+struct icc_node_data {
+ struct icc_node *node;
+ u32 tag;
+};
+
+/**
* struct icc_onecell_data - driver data for onecell interconnect providers
*
* @num_nodes: number of nodes in this device
@@ -38,9 +49,12 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
* @aggregate: pointer to device specific aggregate operation function
* @pre_aggregate: pointer to device specific function that is called
* before the aggregation begins (optional)
+ * @get_bw: pointer to device specific function to get current bandwidth
* @xlate: provider-specific callback for mapping nodes from phandle arguments
+ * @xlate_extended: vendor-specific callback for mapping node data from phandle arguments
* @dev: the device this interconnect provider belongs to
* @users: count of active users
+ * @inter_set: whether inter-provider pairs will be configured with @set
* @data: pointer to private data
*/
struct icc_provider {
@@ -50,9 +64,12 @@ struct icc_provider {
int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
void (*pre_aggregate)(struct icc_node *node);
+ int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
+ struct icc_node_data* (*xlate_extended)(struct of_phandle_args *spec, void *data);
struct device *dev;
int users;
+ bool inter_set;
void *data;
};
@@ -71,6 +88,8 @@ struct icc_provider {
* @req_list: a list of QoS constraint requests associated with this node
* @avg_bw: aggregated value of average bandwidth requests from all consumers
* @peak_bw: aggregated value of peak bandwidth requests from all consumers
+ * @init_avg: average bandwidth value that is read from the hardware during init
+ * @init_peak: peak bandwidth value that is read from the hardware during init
* @data: pointer to private data
*/
struct icc_node {
@@ -87,6 +106,8 @@ struct icc_node {
struct hlist_head req_list;
u32 avg_bw;
u32 peak_bw;
+ u32 init_avg;
+ u32 init_peak;
void *data;
};
@@ -102,7 +123,9 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider);
void icc_node_del(struct icc_node *node);
int icc_nodes_remove(struct icc_provider *provider);
int icc_provider_add(struct icc_provider *provider);
-int icc_provider_del(struct icc_provider *provider);
+void icc_provider_del(struct icc_provider *provider);
+struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec);
+void icc_sync_state(struct device *dev);
#else
@@ -117,7 +140,7 @@ static inline struct icc_node *icc_node_create(int id)
return ERR_PTR(-ENOTSUPP);
}
-void icc_node_destroy(int id)
+static inline void icc_node_destroy(int id)
{
}
@@ -126,16 +149,16 @@ static inline int icc_link_create(struct icc_node *node, const int dst_id)
return -ENOTSUPP;
}
-int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+static inline int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
{
return -ENOTSUPP;
}
-void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+static inline void icc_node_add(struct icc_node *node, struct icc_provider *provider)
{
}
-void icc_node_del(struct icc_node *node)
+static inline void icc_node_del(struct icc_node *node)
{
}
@@ -149,9 +172,13 @@ static inline int icc_provider_add(struct icc_provider *provider)
return -ENOTSUPP;
}
-static inline int icc_provider_del(struct icc_provider *provider)
+static inline void icc_provider_del(struct icc_provider *provider)
{
- return -ENOTSUPP;
+}
+
+static inline struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
+{
+ return ERR_PTR(-ENOTSUPP);
}
#endif /* CONFIG_INTERCONNECT */
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index d70a914cba11..2b0e784ba771 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -23,14 +23,41 @@
struct icc_path;
struct device;
+/**
+ * struct icc_bulk_data - Data used for bulk icc operations.
+ *
+ * @path: reference to the interconnect path (internal use)
+ * @name: the name from the "interconnect-names" DT property
+ * @avg_bw: average bandwidth in icc units
+ * @peak_bw: peak bandwidth in icc units
+ */
+struct icc_bulk_data {
+ struct icc_path *path;
+ const char *name;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
#if IS_ENABLED(CONFIG_INTERCONNECT)
struct icc_path *icc_get(struct device *dev, const int src_id,
const int dst_id);
struct icc_path *of_icc_get(struct device *dev, const char *name);
+struct icc_path *devm_of_icc_get(struct device *dev, const char *name);
+int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths);
+struct icc_path *of_icc_get_by_index(struct device *dev, int idx);
void icc_put(struct icc_path *path);
+int icc_enable(struct icc_path *path);
+int icc_disable(struct icc_path *path);
int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
void icc_set_tag(struct icc_path *path, u32 tag);
+const char *icc_get_name(struct icc_path *path);
+int __must_check of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths);
+void icc_bulk_put(int num_paths, struct icc_bulk_data *paths);
+int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths);
+int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths);
+void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
#else
@@ -46,10 +73,31 @@ static inline struct icc_path *of_icc_get(struct device *dev,
return NULL;
}
+static inline struct icc_path *devm_of_icc_get(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
+{
+ return NULL;
+}
+
static inline void icc_put(struct icc_path *path)
{
}
+static inline int icc_enable(struct icc_path *path)
+{
+ return 0;
+}
+
+static inline int icc_disable(struct icc_path *path)
+{
+ return 0;
+}
+
static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
{
return 0;
@@ -59,6 +107,40 @@ static inline void icc_set_tag(struct icc_path *path, u32 tag)
{
}
+static inline const char *icc_get_name(struct icc_path *path)
+{
+ return NULL;
+}
+
+static inline int of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int devm_of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
+{
+}
+
+static inline int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths)
+{
+}
+
#endif /* CONFIG_INTERCONNECT */
#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c5fe60ec6b84..a92bce40b04b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -13,6 +13,7 @@
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
+#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
@@ -61,6 +62,11 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
+ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
+ * Users will enable it explicitly by enable_irq() or enable_nmi()
+ * later.
+ * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
+ * depends on IRQF_PERCPU.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -74,6 +80,8 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
+#define IRQF_NO_AUTOEN 0x00080000
+#define IRQF_NO_DEBUG 0x00100000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -214,24 +222,7 @@ devm_request_any_context_irq(struct device *dev, unsigned int irq,
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
-/*
- * On lockdep we dont want to enable hardirqs in hardirq
- * context. Use local_irq_enable_in_hardirq() to annotate
- * kernel code that has to do this nevertheless (pretty much
- * the only valid case is for old/broken hardware that is
- * insanely slow).
- *
- * NOTE: in theory this might break fragile code that relies
- * on hardirq delivery - in practice we dont seem to have such
- * places left. So the only effect should be slightly increased
- * irqs-off latencies.
- */
-#ifdef CONFIG_LOCKDEP
-# define local_irq_enable_in_hardirq() do { } while (0)
-#else
-# define local_irq_enable_in_hardirq() local_irq_enable()
-#endif
-
+bool irq_has_action(unsigned int irq);
extern void disable_irq_nosync(unsigned int irq);
extern bool disable_hardirq(unsigned int irq);
extern void disable_irq(unsigned int irq);
@@ -248,6 +239,8 @@ extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
extern int prepare_percpu_nmi(unsigned int irq);
extern void teardown_percpu_nmi(unsigned int irq);
+extern int irq_inject_interrupt(unsigned int irq);
+
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
@@ -312,44 +305,54 @@ struct irq_affinity_desc {
extern cpumask_var_t irq_default_affinity;
-/* Internal implementation. Use the helpers below */
-extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
- bool force);
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+
+extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
+
+extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
+ bool setaffinity);
/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
+ * irq_update_affinity_hint - Update the affinity hint
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Fails if cpumask does not contain an online CPU
+ * Updates the affinity hint, but does not change the affinity of the interrupt.
*/
static inline int
-irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, false);
+ return __irq_apply_affinity_hint(irq, m, false);
}
/**
- * irq_force_affinity - Force the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
- *
- * Same as irq_set_affinity, but without checking the mask against
- * online cpus.
+ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
+ * cpumask to the interrupt
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Solely for low level cpu hotplug code, where we need to make per
- * cpu interrupts affine before the cpu becomes online.
+ * Updates the affinity hint and if @m is not NULL it applies it as the
+ * affinity of that interrupt.
*/
static inline int
-irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, true);
+ return __irq_apply_affinity_hint(irq, m, true);
}
-extern int irq_can_set_affinity(unsigned int irq);
-extern int irq_select_affinity(unsigned int irq);
+/*
+ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
+ * instead.
+ */
+static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+ return irq_set_affinity_and_hint(irq, m);
+}
-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+extern int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
@@ -379,12 +382,30 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
+static inline int irq_update_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
+static inline int irq_set_affinity_and_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
static inline int irq_set_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
+static inline int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity)
+{
+ return -EINVAL;
+}
+
static inline int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
{
@@ -487,12 +508,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
#ifdef CONFIG_IRQ_FORCED_THREADING
# ifdef CONFIG_PREEMPT_RT
-# define force_irqthreads (true)
+# define force_irqthreads() (true)
# else
-extern bool force_irqthreads;
+DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
+# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
# endif
#else
-#define force_irqthreads (0)
+#define force_irqthreads() (false)
#endif
#ifndef local_softirq_pending
@@ -539,7 +561,16 @@ enum
NR_SOFTIRQS
};
-#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/*
+ * The following vectors can be safely ignored after ksoftirqd is parked:
+ *
+ * _ RCU:
+ * 1) rcutree_migrate_callbacks() migrates the queue.
+ * 2) rcu_report_dead() reports the final quiescent states.
+ *
+ * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ */
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
@@ -558,12 +589,12 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-#ifdef __ARCH_HAS_DO_SOFTIRQ
-void do_softirq_own_stack(void);
+#ifdef CONFIG_PREEMPT_RT
+extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
#else
-static inline void do_softirq_own_stack(void)
+static inline void do_softirq_post_smp_call_flush(unsigned int unused)
{
- __do_softirq();
+ do_softirq();
}
#endif
@@ -583,6 +614,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
/* Tasklets --- multithreaded analogue of BHs.
+ This API is deprecated. Please consider using threaded IRQs instead:
+ https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
+
Main feature differing them of generic softirqs: tasklet
is running only on one CPU simultaneously.
@@ -606,16 +640,42 @@ struct tasklet_struct
struct tasklet_struct *next;
unsigned long state;
atomic_t count;
- void (*func)(unsigned long);
+ bool use_callback;
+ union {
+ void (*func)(unsigned long data);
+ void (*callback)(struct tasklet_struct *t);
+ };
unsigned long data;
};
-#define DECLARE_TASKLET(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+#define DECLARE_TASKLET(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
+
+#define DECLARE_TASKLET_DISABLED(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
+#define DECLARE_TASKLET_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .func = _func, \
+}
+
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .func = _func, \
+}
enum
{
@@ -623,26 +683,21 @@ enum
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline void tasklet_unlock(struct tasklet_struct *t)
-{
- smp_mb__before_atomic();
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
-}
+void tasklet_unlock(struct tasklet_struct *t);
+void tasklet_unlock_wait(struct tasklet_struct *t);
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
#else
-#define tasklet_trylock(t) 1
-#define tasklet_unlock_wait(t) do { } while (0)
-#define tasklet_unlock(t) do { } while (0)
+static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+static inline void tasklet_unlock(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
#endif
extern void __tasklet_schedule(struct tasklet_struct *t);
@@ -667,6 +722,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t)
smp_mb__after_atomic();
}
+/*
+ * Do not use in new code. Disabling tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+{
+ tasklet_disable_nosync(t);
+ tasklet_unlock_spin_wait(t);
+ smp_mb();
+}
+
static inline void tasklet_disable(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);
@@ -681,9 +747,10 @@ static inline void tasklet_enable(struct tasklet_struct *t)
}
extern void tasklet_kill(struct tasklet_struct *t);
-extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
+extern void tasklet_setup(struct tasklet_struct *t,
+ void (*callback)(struct tasklet_struct *));
/*
* Autoprobing for irqs:
@@ -758,8 +825,10 @@ extern int arch_early_irq_init(void);
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
-#define __softirq_entry \
- __attribute__((__section__(".softirqentry.text")))
+#ifndef __irq_entry
+# define __irq_entry __section(".irqentry.text")
+#endif
+
+#define __softirq_entry __section(".softirqentry.text")
#endif
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index ae21b72cce85..f32522bb3aa5 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -57,7 +57,7 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#ifndef ioread64_hi_lo
#define ioread64_hi_lo ioread64_hi_lo
-static inline u64 ioread64_hi_lo(void __iomem *addr)
+static inline u64 ioread64_hi_lo(const void __iomem *addr)
{
u32 low, high;
@@ -79,7 +79,7 @@ static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
#ifndef ioread64be_hi_lo
#define ioread64be_hi_lo ioread64be_hi_lo
-static inline u64 ioread64be_hi_lo(void __iomem *addr)
+static inline u64 ioread64be_hi_lo(const void __iomem *addr)
{
u32 low, high;
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index faaa842dbdb9..448a21435dba 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -57,7 +57,7 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#ifndef ioread64_lo_hi
#define ioread64_lo_hi ioread64_lo_hi
-static inline u64 ioread64_lo_hi(void __iomem *addr)
+static inline u64 ioread64_lo_hi(const void __iomem *addr)
{
u32 low, high;
@@ -79,7 +79,7 @@ static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
#ifndef ioread64be_lo_hi
#define ioread64be_lo_hi ioread64be_lo_hi
-static inline u64 ioread64be_lo_hi(void __iomem *addr)
+static inline u64 ioread64be_lo_hi(const void __iomem *addr)
{
u32 low, high;
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 837058bc1c9f..66a774d2710e 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -10,13 +10,14 @@
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
/*
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
- * See Documentation/io-mapping.txt
+ * See Documentation/driver-api/io-mapping.rst
*/
struct io_mapping {
@@ -68,13 +69,32 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
+ preempt_disable();
+ pagefault_disable();
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
- iounmap_atomic(vaddr);
+ kunmap_local_indexed((void __force *)vaddr);
+ pagefault_enable();
+ preempt_enable();
+}
+
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ resource_size_t phys_addr;
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ kunmap_local_indexed((void __force *)vaddr);
}
static inline void __iomem *
@@ -96,10 +116,9 @@ io_mapping_unmap(void __iomem *vaddr)
iounmap(vaddr);
}
-#else
+#else /* HAVE_ATOMIC_IOMAP */
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
/* Create the io_mapping object*/
static inline struct io_mapping *
@@ -107,16 +126,13 @@ io_mapping_init_wc(struct io_mapping *iomap,
resource_size_t base,
unsigned long size)
{
+ iomap->iomem = ioremap_wc(base, size);
+ if (!iomap->iomem)
+ return NULL;
+
iomap->base = base;
iomap->size = size;
- iomap->iomem = ioremap_wc(base, size);
-#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
- iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
-#elif defined(pgprot_writecombine)
iomap->prot = pgprot_writecombine(PAGE_KERNEL);
-#else
- iomap->prot = pgprot_noncached(PAGE_KERNEL);
-#endif
return iomap;
}
@@ -159,7 +175,18 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
preempt_enable();
}
-#endif /* HAVE_ATOMIC_IOMAP */
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ io_mapping_unmap(vaddr);
+}
+
+#endif /* !HAVE_ATOMIC_IOMAP */
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,
@@ -187,3 +214,6 @@ io_mapping_free(struct io_mapping *iomap)
}
#endif /* _LINUX_IO_MAPPING_H */
+
+int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size);
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 53d53c6c2be9..1f068dfdb140 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -15,6 +15,10 @@ enum io_pgtable_fmt {
ARM_64_LPAE_S2,
ARM_V7S,
ARM_MALI_LPAE,
+ AMD_IOMMU_V1,
+ AMD_IOMMU_V2,
+ APPLE_DART,
+ APPLE_DART2,
IO_PGTABLE_NUM_FMTS,
};
@@ -25,13 +29,11 @@ enum io_pgtable_fmt {
* @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
* (sometimes referred to as the "walk cache") for a virtual
* address range.
- * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
- * address range.
* @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
* single page. IOMMUs that cannot batch TLB invalidation
* operations efficiently will typically issue them here, but
* others may decide to update the iommu_iotlb_gather structure
- * and defer the invalidation until iommu_tlb_sync() instead.
+ * and defer the invalidation until iommu_iotlb_sync() instead.
*
* Note that these can all be called in atomic context and must therefore
* not block.
@@ -40,8 +42,6 @@ struct iommu_flush_ops {
void (*tlb_flush_all)(void *cookie);
void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
void *cookie);
- void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
- void *cookie);
void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie);
};
@@ -72,27 +72,26 @@ struct io_pgtable_cfg {
* hardware which does not implement the permissions of a given
* format, and/or requires some format-specific default value.
*
- * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
- * (unmapped) entries but the hardware might do so anyway, perform
- * TLB maintenance when mapping as well as when unmapping.
- *
* IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
- * to support up to 34 bits PA where the bit32 and bit33 are
- * encoded in the bit9 and bit4 of the PTE respectively.
+ * to support up to 35 bits PA where the bit32, bit33 and bit34 are
+ * encoded in the bit9, bit4 and bit5 of the PTE respectively.
*
- * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
- * on unmap, for DMA domains using the flush queue mechanism for
- * delayed invalidation.
+ * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs
+ * extend the translation table base support up to 35 bits PA, the
+ * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT.
*
* IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
* for use in the upper half of a split address space.
+ *
+ * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
+ * attributes set in the TCR for a non-coherent page-table walker.
*/
- #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
- #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
- #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
- #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
- #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
- #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
+ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
+ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
+ #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
@@ -140,6 +139,11 @@ struct io_pgtable_cfg {
u64 transtab;
u64 memattr;
} arm_mali_lpae_cfg;
+
+ struct {
+ u64 ttbr[4];
+ u32 n_ttbrs;
+ } apple_dart_cfg;
};
};
@@ -147,7 +151,9 @@ struct io_pgtable_cfg {
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
*
* @map: Map a physically contiguous memory region.
+ * @map_pages: Map a physically contiguous range of pages of the same size.
* @unmap: Unmap a physically contiguous memory region.
+ * @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
* @iova_to_phys: Translate iova to physical address.
*
* These functions map directly onto the iommu_ops member functions with
@@ -155,9 +161,15 @@ struct io_pgtable_cfg {
*/
struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+ int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather);
+ size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
};
@@ -210,21 +222,16 @@ struct io_pgtable {
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
- iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all)
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
}
static inline void
io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
size_t size, size_t granule)
{
- iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
-}
-
-static inline void
-io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
- size_t size, size_t granule)
-{
- iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk)
+ iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
}
static inline void
@@ -232,7 +239,7 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop,
struct iommu_iotlb_gather * gather, unsigned long iova,
size_t granule)
{
- if (iop->cfg.tlb->tlb_add_page)
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page)
iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
}
@@ -254,5 +261,8 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/io.h b/include/linux/io.h
index b1c44bb4b2d7..308f4f0cfb93 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -31,15 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
}
#endif
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-void __init ioremap_huge_init(void);
-int arch_ioremap_p4d_supported(void);
-int arch_ioremap_pud_supported(void);
-int arch_ioremap_pmd_supported(void);
-#else
-static inline void ioremap_huge_init(void) { }
-#endif
-
/*
* Managed iomap interface
*/
@@ -77,25 +68,23 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
-void *__devm_memremap_pages(struct device *dev, struct resource *res);
-
#ifdef CONFIG_PCI
/*
* The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
- * Posting") mandate non-posted configuration transactions. There is
- * no ioremap API in the kernel that can guarantee non-posted write
- * semantics across arches so provide a default implementation for
- * mapping PCI config space that defaults to ioremap(); arches
- * should override it if they have memory mapping implementations that
- * guarantee non-posted writes semantics to make the memory mapping
- * compliant with the PCI specification.
+ * Posting") mandate non-posted configuration transactions. This default
+ * implementation attempts to use the ioremap_np() API to provide this
+ * on arches that support it, and falls back to ioremap() on those that
+ * don't. Overriding this function is deprecated; arches that properly
+ * support non-posted accesses should implement ioremap_np() instead, which
+ * this default implementation can then use to return mappings compliant with
+ * the PCI specification.
*/
#ifndef pci_remap_cfgspace
#define pci_remap_cfgspace pci_remap_cfgspace
static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
size_t size)
{
- return ioremap(offset, size);
+ return ioremap_np(offset, size) ?: ioremap(offset, size);
}
#endif
#endif
@@ -141,6 +130,8 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);
+
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
@@ -175,4 +166,7 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
}
#endif
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size);
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
new file mode 100644
index 000000000000..43bc8a2edccf
--- /dev/null
+++ b/include/linux/io_uring.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_H
+#define _LINUX_IO_URING_H
+
+#include <linux/sched.h>
+#include <linux/xarray.h>
+#include <uapi/linux/io_uring.h>
+
+enum io_uring_cmd_flags {
+ IO_URING_F_COMPLETE_DEFER = 1,
+ IO_URING_F_UNLOCKED = 2,
+ /* int's last bit, sign checks are usually faster than a bit test */
+ IO_URING_F_NONBLOCK = INT_MIN,
+
+ /* ctx state flags, for URING_CMD */
+ IO_URING_F_SQE128 = 4,
+ IO_URING_F_CQE32 = 8,
+ IO_URING_F_IOPOLL = 16,
+};
+
+struct io_uring_cmd {
+ struct file *file;
+ const void *cmd;
+ union {
+ /* callback to defer completions to task context */
+ void (*task_work_cb)(struct io_uring_cmd *cmd);
+ /* used for polled completion */
+ void *cookie;
+ };
+ u32 cmd_op;
+ u32 flags;
+ u8 pdu[32]; /* available inline for free use */
+};
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd);
+void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
+void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *));
+struct sock *io_uring_get_socket(struct file *file);
+void __io_uring_cancel(bool cancel_all);
+void __io_uring_free(struct task_struct *tsk);
+void io_uring_unreg_ringfd(void);
+const char *io_uring_get_opcode(u8 opcode);
+
+static inline void io_uring_files_cancel(void)
+{
+ if (current->io_uring) {
+ io_uring_unreg_ringfd();
+ __io_uring_cancel(false);
+ }
+}
+static inline void io_uring_task_cancel(void)
+{
+ if (current->io_uring)
+ __io_uring_cancel(true);
+}
+static inline void io_uring_free(struct task_struct *tsk)
+{
+ if (tsk->io_uring)
+ __io_uring_free(tsk);
+}
+#else
+static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+{
+ return -EOPNOTSUPP;
+}
+static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+ ssize_t ret2)
+{
+}
+static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *))
+{
+}
+static inline struct sock *io_uring_get_socket(struct file *file)
+{
+ return NULL;
+}
+static inline void io_uring_task_cancel(void)
+{
+}
+static inline void io_uring_files_cancel(void)
+{
+}
+static inline void io_uring_free(struct task_struct *tsk)
+{
+}
+static inline const char *io_uring_get_opcode(u8 opcode)
+{
+ return "";
+}
+#endif
+
+#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
new file mode 100644
index 000000000000..f5b687a787a3
--- /dev/null
+++ b/include/linux/io_uring_types.h
@@ -0,0 +1,580 @@
+#ifndef IO_URING_TYPES_H
+#define IO_URING_TYPES_H
+
+#include <linux/blkdev.h>
+#include <linux/task_work.h>
+#include <linux/bitmap.h>
+#include <linux/llist.h>
+#include <uapi/linux/io_uring.h>
+
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
+};
+
+struct io_wq_work {
+ struct io_wq_work_node list;
+ unsigned flags;
+ /* place it here instead of io_kiocb as it fills padding and saves 4B */
+ int cancel_seq;
+};
+
+struct io_fixed_file {
+ /* file * with additional FFS_* flags */
+ unsigned long file_ptr;
+};
+
+struct io_file_table {
+ struct io_fixed_file *files;
+ unsigned long *bitmap;
+ unsigned int alloc_hint;
+};
+
+struct io_hash_bucket {
+ spinlock_t lock;
+ struct hlist_head list;
+} ____cacheline_aligned_in_smp;
+
+struct io_hash_table {
+ struct io_hash_bucket *hbs;
+ unsigned hash_bits;
+};
+
+/*
+ * Arbitrary limit, can be raised if need be
+ */
+#define IO_RINGFD_REG_MAX 16
+
+struct io_uring_task {
+ /* submission side */
+ int cached_refs;
+ const struct io_ring_ctx *last;
+ struct io_wq *io_wq;
+ struct file *registered_rings[IO_RINGFD_REG_MAX];
+
+ struct xarray xa;
+ struct wait_queue_head wait;
+ atomic_t in_idle;
+ atomic_t inflight_tracked;
+ struct percpu_counter inflight;
+
+ struct { /* task_work */
+ struct llist_head task_list;
+ struct callback_head task_work;
+ } ____cacheline_aligned_in_smp;
+};
+
+struct io_uring {
+ u32 head ____cacheline_aligned_in_smp;
+ u32 tail ____cacheline_aligned_in_smp;
+};
+
+/*
+ * This data is shared with the application through the mmap at offsets
+ * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
+ *
+ * The offsets to the member fields are published through struct
+ * io_sqring_offsets when calling io_uring_setup.
+ */
+struct io_rings {
+ /*
+ * Head and tail offsets into the ring; the offsets need to be
+ * masked to get valid indices.
+ *
+ * The kernel controls head of the sq ring and the tail of the cq ring,
+ * and the application controls tail of the sq ring and the head of the
+ * cq ring.
+ */
+ struct io_uring sq, cq;
+ /*
+ * Bitmasks to apply to head and tail offsets (constant, equals
+ * ring_entries - 1)
+ */
+ u32 sq_ring_mask, cq_ring_mask;
+ /* Ring sizes (constant, power of 2) */
+ u32 sq_ring_entries, cq_ring_entries;
+ /*
+ * Number of invalid entries dropped by the kernel due to
+ * invalid index stored in array
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * After a new SQ head value was read by the application this
+ * counter includes all submissions that were dropped reaching
+ * the new SQ head (and possibly more).
+ */
+ u32 sq_dropped;
+ /*
+ * Runtime SQ flags
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application.
+ *
+ * The application needs a full memory barrier before checking
+ * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
+ */
+ atomic_t sq_flags;
+ /*
+ * Runtime CQ flags
+ *
+ * Written by the application, shouldn't be modified by the
+ * kernel.
+ */
+ u32 cq_flags;
+ /*
+ * Number of completion events lost because the queue was full;
+ * this should be avoided by the application by making sure
+ * there are not more requests pending than there is space in
+ * the completion queue.
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * As completion events come in out of order this counter is not
+ * ordered with any other data.
+ */
+ u32 cq_overflow;
+ /*
+ * Ring buffer of completion events.
+ *
+ * The kernel writes completion events fresh every time they are
+ * produced, so the application is allowed to modify pending
+ * entries.
+ */
+ struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
+};
+
+struct io_restriction {
+ DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
+ DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
+ u8 sqe_flags_allowed;
+ u8 sqe_flags_required;
+ bool registered;
+};
+
+struct io_submit_link {
+ struct io_kiocb *head;
+ struct io_kiocb *last;
+};
+
+struct io_submit_state {
+ /* inline/task_work completion list, under ->uring_lock */
+ struct io_wq_work_node free_list;
+ /* batch completion logic */
+ struct io_wq_work_list compl_reqs;
+ struct io_submit_link link;
+
+ bool plug_started;
+ bool need_plug;
+ unsigned short submit_nr;
+ struct blk_plug plug;
+};
+
+struct io_ev_fd {
+ struct eventfd_ctx *cq_ev_fd;
+ unsigned int eventfd_async: 1;
+ struct rcu_head rcu;
+ atomic_t refs;
+ atomic_t ops;
+};
+
+struct io_alloc_cache {
+ struct hlist_head list;
+ unsigned int nr_cached;
+};
+
+struct io_ring_ctx {
+ /* const or read-mostly hot data */
+ struct {
+ struct percpu_ref refs;
+
+ struct io_rings *rings;
+ unsigned int flags;
+ enum task_work_notify_mode notify_method;
+ unsigned int compat: 1;
+ unsigned int drain_next: 1;
+ unsigned int restricted: 1;
+ unsigned int off_timeout_used: 1;
+ unsigned int drain_active: 1;
+ unsigned int drain_disabled: 1;
+ unsigned int has_evfd: 1;
+ unsigned int syscall_iopoll: 1;
+ } ____cacheline_aligned_in_smp;
+
+ /* submission data */
+ struct {
+ struct mutex uring_lock;
+
+ /*
+ * Ring buffer of indices into array of io_uring_sqe, which is
+ * mmapped by the application using the IORING_OFF_SQES offset.
+ *
+ * This indirection could e.g. be used to assign fixed
+ * io_uring_sqe entries to operations and only submit them to
+ * the queue when needed.
+ *
+ * The kernel modifies neither the indices array nor the entries
+ * array.
+ */
+ u32 *sq_array;
+ struct io_uring_sqe *sq_sqes;
+ unsigned cached_sq_head;
+ unsigned sq_entries;
+
+ /*
+ * Fixed resources fast path, should be accessed only under
+ * uring_lock, and updated through io_uring_register(2)
+ */
+ struct io_rsrc_node *rsrc_node;
+ int rsrc_cached_refs;
+ atomic_t cancel_seq;
+ struct io_file_table file_table;
+ unsigned nr_user_files;
+ unsigned nr_user_bufs;
+ struct io_mapped_ubuf **user_bufs;
+
+ struct io_submit_state submit_state;
+
+ struct io_buffer_list *io_bl;
+ struct xarray io_bl_xa;
+ struct list_head io_buffers_cache;
+
+ struct io_hash_table cancel_table_locked;
+ struct list_head cq_overflow_list;
+ struct io_alloc_cache apoll_cache;
+ struct io_alloc_cache netmsg_cache;
+ } ____cacheline_aligned_in_smp;
+
+ /* IRQ completion list, under ->completion_lock */
+ struct io_wq_work_list locked_free_list;
+ unsigned int locked_free_nr;
+
+ const struct cred *sq_creds; /* cred used for __io_sq_thread() */
+ struct io_sq_data *sq_data; /* if using sq thread polling */
+
+ struct wait_queue_head sqo_sq_wait;
+ struct list_head sqd_list;
+
+ unsigned long check_cq;
+
+ unsigned int file_alloc_start;
+ unsigned int file_alloc_end;
+
+ struct xarray personalities;
+ u32 pers_next;
+
+ struct {
+ /*
+ * We cache a range of free CQEs we can use, once exhausted it
+ * should go through a slower range setup, see __io_get_cqe()
+ */
+ struct io_uring_cqe *cqe_cached;
+ struct io_uring_cqe *cqe_sentinel;
+
+ unsigned cached_cq_tail;
+ unsigned cq_entries;
+ struct io_ev_fd __rcu *io_ev_fd;
+ struct wait_queue_head cq_wait;
+ unsigned cq_extra;
+ } ____cacheline_aligned_in_smp;
+
+ struct {
+ spinlock_t completion_lock;
+
+ /*
+ * ->iopoll_list is protected by the ctx->uring_lock for
+ * io_uring instances that don't use IORING_SETUP_SQPOLL.
+ * For SQPOLL, only the single threaded io_sq_thread() will
+ * manipulate the list, hence no extra locking is needed there.
+ */
+ struct io_wq_work_list iopoll_list;
+ struct io_hash_table cancel_table;
+ bool poll_multi_queue;
+
+ struct llist_head work_llist;
+
+ struct list_head io_buffers_comp;
+ } ____cacheline_aligned_in_smp;
+
+ /* timeouts */
+ struct {
+ spinlock_t timeout_lock;
+ atomic_t cq_timeouts;
+ struct list_head timeout_list;
+ struct list_head ltimeout_list;
+ unsigned cq_last_tm_flush;
+ } ____cacheline_aligned_in_smp;
+
+ /* Keep this last, we don't need it for the fast path */
+
+ struct io_restriction restrictions;
+ struct task_struct *submitter_task;
+
+ /* slow path rsrc auxilary data, used by update/register */
+ struct io_rsrc_node *rsrc_backup_node;
+ struct io_mapped_ubuf *dummy_ubuf;
+ struct io_rsrc_data *file_data;
+ struct io_rsrc_data *buf_data;
+
+ struct delayed_work rsrc_put_work;
+ struct llist_head rsrc_put_llist;
+ struct list_head rsrc_ref_list;
+ spinlock_t rsrc_ref_lock;
+
+ struct list_head io_buffers_pages;
+
+ #if defined(CONFIG_UNIX)
+ struct socket *ring_sock;
+ #endif
+ /* hashed buffered write serialization */
+ struct io_wq_hash *hash_map;
+
+ /* Only used for accounting purposes */
+ struct user_struct *user;
+ struct mm_struct *mm_account;
+
+ /* ctx exit and cancelation */
+ struct llist_head fallback_llist;
+ struct delayed_work fallback_work;
+ struct work_struct exit_work;
+ struct list_head tctx_list;
+ struct completion ref_comp;
+
+ /* io-wq management, e.g. thread count */
+ u32 iowq_limits[2];
+ bool iowq_limits_set;
+
+ struct list_head defer_list;
+ unsigned sq_thread_idle;
+ /* protected by ->completion_lock */
+ unsigned evfd_last_cq_tail;
+};
+
+enum {
+ REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
+ REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
+ REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
+ REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
+ REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
+ REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
+ REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
+
+ /* first byte is taken by user flags, shift it to not overlap */
+ REQ_F_FAIL_BIT = 8,
+ REQ_F_INFLIGHT_BIT,
+ REQ_F_CUR_POS_BIT,
+ REQ_F_NOWAIT_BIT,
+ REQ_F_LINK_TIMEOUT_BIT,
+ REQ_F_NEED_CLEANUP_BIT,
+ REQ_F_POLLED_BIT,
+ REQ_F_BUFFER_SELECTED_BIT,
+ REQ_F_BUFFER_RING_BIT,
+ REQ_F_REISSUE_BIT,
+ REQ_F_CREDS_BIT,
+ REQ_F_REFCOUNT_BIT,
+ REQ_F_ARM_LTIMEOUT_BIT,
+ REQ_F_ASYNC_DATA_BIT,
+ REQ_F_SKIP_LINK_CQES_BIT,
+ REQ_F_SINGLE_POLL_BIT,
+ REQ_F_DOUBLE_POLL_BIT,
+ REQ_F_PARTIAL_IO_BIT,
+ REQ_F_CQE32_INIT_BIT,
+ REQ_F_APOLL_MULTISHOT_BIT,
+ REQ_F_CLEAR_POLLIN_BIT,
+ REQ_F_HASH_LOCKED_BIT,
+ /* keep async read/write and isreg together and in order */
+ REQ_F_SUPPORT_NOWAIT_BIT,
+ REQ_F_ISREG_BIT,
+
+ /* not a real bit, just to check we're not overflowing the space */
+ __REQ_F_LAST_BIT,
+};
+
+enum {
+ /* ctx owns file */
+ REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
+ /* drain existing IO first */
+ REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
+ /* linked sqes */
+ REQ_F_LINK = BIT(REQ_F_LINK_BIT),
+ /* doesn't sever on completion < 0 */
+ REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
+ /* IOSQE_ASYNC */
+ REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
+ /* IOSQE_BUFFER_SELECT */
+ REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
+ /* IOSQE_CQE_SKIP_SUCCESS */
+ REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
+
+ /* fail rest of links */
+ REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
+ /* on inflight list, should be cancelled and waited on exit reliably */
+ REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
+ /* read/write uses file position */
+ REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
+ /* must not punt to workers */
+ REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
+ /* has or had linked timeout */
+ REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
+ /* needs cleanup */
+ REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
+ /* already went through poll handler */
+ REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
+ /* buffer already selected */
+ REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
+ /* buffer selected from ring, needs commit */
+ REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
+ /* caller should reissue async */
+ REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
+ /* supports async reads/writes */
+ REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
+ /* regular file */
+ REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
+ /* has creds assigned */
+ REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
+ /* skip refcounting if not set */
+ REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
+ /* there is a linked timeout that has to be armed */
+ REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
+ /* ->async_data allocated */
+ REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
+ /* don't post CQEs while failing linked requests */
+ REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
+ /* single poll may be active */
+ REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
+ /* double poll may active */
+ REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
+ /* request has already done partial IO */
+ REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
+ /* fast poll multishot mode */
+ REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
+ /* ->extra1 and ->extra2 are initialised */
+ REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
+ /* recvmsg special flag, clear EPOLLIN */
+ REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
+ /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
+ REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
+};
+
+typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
+
+struct io_task_work {
+ struct llist_node node;
+ io_req_tw_func_t func;
+};
+
+struct io_cqe {
+ __u64 user_data;
+ __s32 res;
+ /* fd initially, then cflags for completion */
+ union {
+ __u32 flags;
+ int fd;
+ };
+};
+
+/*
+ * Each request type overlays its private data structure on top of this one.
+ * They must not exceed this one in size.
+ */
+struct io_cmd_data {
+ struct file *file;
+ /* each command gets 56 bytes of data */
+ __u8 data[56];
+};
+
+static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
+}
+#define io_kiocb_to_cmd(req, cmd_type) ( \
+ io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
+ ((cmd_type *)&(req)->cmd) \
+)
+#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
+
+struct io_kiocb {
+ union {
+ /*
+ * NOTE! Each of the io_kiocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'file' in this struct.
+ */
+ struct file *file;
+ struct io_cmd_data cmd;
+ };
+
+ u8 opcode;
+ /* polled IO has completed */
+ u8 iopoll_completed;
+ /*
+ * Can be either a fixed buffer index, or used with provided buffers.
+ * For the latter, before issue it points to the buffer group ID,
+ * and after selection it points to the buffer ID itself.
+ */
+ u16 buf_index;
+ unsigned int flags;
+
+ struct io_cqe cqe;
+
+ struct io_ring_ctx *ctx;
+ struct task_struct *task;
+
+ struct io_rsrc_node *rsrc_node;
+
+ union {
+ /* store used ubuf, so we can prevent reloading */
+ struct io_mapped_ubuf *imu;
+
+ /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
+ struct io_buffer *kbuf;
+
+ /*
+ * stores buffer ID for ring provided buffers, valid IFF
+ * REQ_F_BUFFER_RING is set.
+ */
+ struct io_buffer_list *buf_list;
+ };
+
+ union {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+ __poll_t apoll_events;
+ };
+ atomic_t refs;
+ atomic_t poll_refs;
+ struct io_task_work io_task_work;
+ /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
+ union {
+ struct hlist_node hash_node;
+ struct {
+ u64 extra1;
+ u64 extra2;
+ };
+ };
+ /* internal polling, see IORING_FEAT_FAST_POLL */
+ struct async_poll *apoll;
+ /* opcode allocated if it needs to store data for async defer */
+ void *async_data;
+ /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ struct io_kiocb *link;
+ /* custom credentials, valid IFF REQ_F_CREDS is set */
+ const struct cred *creds;
+ struct io_wq_work work;
+};
+
+struct io_overflow_cqe {
+ struct list_head list;
+ struct io_uring_cqe cqe;
+};
+
+#endif
diff --git a/include/linux/ioam6.h b/include/linux/ioam6.h
new file mode 100644
index 000000000000..94a24b36998f
--- /dev/null
+++ b/include/linux/ioam6.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_H
+#define _LINUX_IOAM6_H
+
+#include <uapi/linux/ioam6.h>
+
+#endif /* _LINUX_IOAM6_H */
diff --git a/include/linux/ioam6_genl.h b/include/linux/ioam6_genl.h
new file mode 100644
index 000000000000..176e67919de3
--- /dev/null
+++ b/include/linux/ioam6_genl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Generic Netlink API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_GENL_H
+#define _LINUX_IOAM6_GENL_H
+
+#include <uapi/linux/ioam6_genl.h>
+
+#endif /* _LINUX_IOAM6_GENL_H */
diff --git a/include/linux/ioam6_iptunnel.h b/include/linux/ioam6_iptunnel.h
new file mode 100644
index 000000000000..07d9dfedd29d
--- /dev/null
+++ b/include/linux/ioam6_iptunnel.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Lightweight Tunnel API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_IPTUNNEL_H
+#define _LINUX_IOAM6_IPTUNNEL_H
+
+#include <uapi/linux/ioam6_iptunnel.h>
+
+#endif /* _LINUX_IOAM6_IPTUNNEL_H */
diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
index 6f000d7a0ddc..af1c9d62e642 100644
--- a/include/linux/ioasid.h
+++ b/include/linux/ioasid.h
@@ -40,6 +40,10 @@ void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator);
int ioasid_set_data(ioasid_t ioasid, void *data);
+static inline bool pasid_valid(ioasid_t ioasid)
+{
+ return ioasid != INVALID_IOASID;
+}
#else /* !CONFIG_IOASID */
static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
@@ -48,9 +52,7 @@ static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
return INVALID_IOASID;
}
-static inline void ioasid_free(ioasid_t ioasid)
-{
-}
+static inline void ioasid_free(ioasid_t ioasid) { }
static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
bool (*getter)(void *))
@@ -72,5 +74,10 @@ static inline int ioasid_set_data(ioasid_t ioasid, void *data)
return -ENOTSUPP;
}
+static inline bool pasid_valid(ioasid_t ioasid)
+{
+ return false;
+}
+
#endif /* CONFIG_IOASID */
#endif /* __LINUX_IOASID_H */
diff --git a/include/linux/ioc3.h b/include/linux/ioc3.h
deleted file mode 100644
index 38b286e9a46c..000000000000
--- a/include/linux/ioc3.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org>
- */
-
-#ifndef _LINUX_IOC3_H
-#define _LINUX_IOC3_H
-
-#include <asm/sn/ioc3.h>
-
-#define IOC3_MAX_SUBMODULES 32
-
-#define IOC3_CLASS_NONE 0
-#define IOC3_CLASS_BASE_IP27 1
-#define IOC3_CLASS_BASE_IP30 2
-#define IOC3_CLASS_MENET_123 3
-#define IOC3_CLASS_MENET_4 4
-#define IOC3_CLASS_CADDUO 5
-#define IOC3_CLASS_SERIAL 6
-
-/* One of these per IOC3 */
-struct ioc3_driver_data {
- struct list_head list;
- int id; /* IOC3 sequence number */
- /* PCI mapping */
- unsigned long pma; /* physical address */
- struct ioc3 __iomem *vma; /* pointer to registers */
- struct pci_dev *pdev; /* PCI device */
- /* IRQ stuff */
- int dual_irq; /* set if separate IRQs are used */
- int irq_io, irq_eth; /* IRQ numbers */
- /* GPIO magic */
- spinlock_t gpio_lock;
- unsigned int gpdr_shadow;
- /* NIC identifiers */
- char nic_part[32];
- char nic_serial[16];
- char nic_mac[6];
- /* submodule set */
- int class;
- void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */
- int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */
- /* is_ir_lock must be held while
- * modifying sio_ie values, so
- * we can be sure that sio_ie is
- * not changing when we read it
- * along with sio_ir.
- */
- spinlock_t ir_lock; /* SIO_IE[SC] mod lock */
-};
-
-/* One per submodule */
-struct ioc3_submodule {
- char *name; /* descriptive submodule name */
- struct module *owner; /* owning kernel module */
- int ethernet; /* set for ethernet drivers */
- int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *);
- int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *);
- int id; /* assigned by IOC3, index for the "data" array */
- /* IRQ stuff */
- unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */
- int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */
- int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
- /* private submodule data */
- void *data; /* assigned by submodule */
-};
-
-/**********************************
- * Functions needed by submodules *
- **********************************/
-
-#define IOC3_W_IES 0
-#define IOC3_W_IEC 1
-
-/* registers a submodule for all existing and future IOC3 chips */
-extern int ioc3_register_submodule(struct ioc3_submodule *);
-/* unregisters a submodule */
-extern void ioc3_unregister_submodule(struct ioc3_submodule *);
-/* enables IRQs indicated by irq_mask for a specified IOC3 chip */
-extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* ackowledges specified IRQs */
-extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* disables IRQs indicated by irq_mask for a specified IOC3 chip */
-extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* atomically sets GPCR bits */
-extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int);
-/* general ireg writer */
-extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg);
-
-#endif
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index dba15ca8e60b..14f7eaf1b443 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -8,6 +8,7 @@
enum {
ICQ_EXITED = 1 << 2,
+ ICQ_DESTROYED = 1 << 3,
};
/*
@@ -98,61 +99,40 @@ struct io_cq {
struct io_context {
atomic_long_t refcount;
atomic_t active_ref;
- atomic_t nr_tasks;
-
- /* all the fields below are protected by this lock */
- spinlock_t lock;
unsigned short ioprio;
- /*
- * For request batching
- */
- int nr_batch_requests; /* Number of requests left in the batch */
- unsigned long last_waited; /* Time last woken after wait for request */
+#ifdef CONFIG_BLK_ICQ
+ /* all the fields below are protected by this lock */
+ spinlock_t lock;
struct radix_tree_root icq_tree;
struct io_cq __rcu *icq_hint;
struct hlist_head icq_list;
struct work_struct release_work;
+#endif /* CONFIG_BLK_ICQ */
};
-/**
- * get_io_context_active - get active reference on ioc
- * @ioc: ioc of interest
- *
- * Only iocs with active reference can issue new IOs. This function
- * acquires an active reference on @ioc. The caller must already have an
- * active reference on @ioc.
- */
-static inline void get_io_context_active(struct io_context *ioc)
-{
- WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
- WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
- atomic_long_inc(&ioc->refcount);
- atomic_inc(&ioc->active_ref);
-}
-
-static inline void ioc_task_link(struct io_context *ioc)
-{
- get_io_context_active(ioc);
-
- WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
- atomic_inc(&ioc->nr_tasks);
-}
-
struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
-void put_io_context_active(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
-struct io_context *get_task_io_context(struct task_struct *task,
- gfp_t gfp_flags, int node);
+int __copy_io(unsigned long clone_flags, struct task_struct *tsk);
+static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+ if (!current->io_context)
+ return 0;
+ return __copy_io(clone_flags, tsk);
+}
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
static inline void exit_io_context(struct task_struct *task) { }
-#endif
+static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
-#endif
+#endif /* IOCONTEXT_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 8b09463dae0d..238a03087e17 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -13,6 +13,7 @@
struct address_space;
struct fiemap_extent_info;
struct inode;
+struct iomap_dio;
struct iomap_writepage_ctx;
struct iov_iter;
struct kiocb;
@@ -54,6 +55,7 @@ struct vm_fault;
#define IOMAP_F_SHARED 0x04
#define IOMAP_F_MERGED 0x08
#define IOMAP_F_BUFFER_HEAD 0x10
+#define IOMAP_F_ZONE_APPEND 0x20
/*
* Flags set by the core iomap code during operations:
@@ -89,13 +91,30 @@ struct iomap {
const struct iomap_page_ops *page_ops;
};
-static inline sector_t
-iomap_sector(struct iomap *iomap, loff_t pos)
+static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
{
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
/*
+ * Returns the inline data pointer for logical offset @pos.
+ */
+static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
+{
+ return iomap->inline_data + pos - iomap->offset;
+}
+
+/*
+ * Check if the mapping's length is within the valid range for inline data.
+ * This is used to guard against accessing data beyond the page inline_data
+ * points at.
+ */
+static inline bool iomap_inline_data_valid(const struct iomap *iomap)
+{
+ return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
+}
+
+/*
* When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
* and page_done will be called for each page written to. This only applies to
* buffered writes as unbuffered writes will not typically have pages
@@ -106,10 +125,9 @@ iomap_sector(struct iomap *iomap, loff_t pos)
* associated page could not be obtained.
*/
struct iomap_page_ops {
- int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
- struct iomap *iomap);
+ int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
- struct page *page, struct iomap *iomap);
+ struct page *page);
};
/*
@@ -121,6 +139,13 @@ struct iomap_page_ops {
#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
#define IOMAP_NOWAIT (1 << 5) /* do not block */
+#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
+#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
+#ifdef CONFIG_FS_DAX
+#define IOMAP_DAX (1 << 8) /* DAX mapping */
+#else
+#define IOMAP_DAX 0
+#endif /* CONFIG_FS_DAX */
struct iomap_ops {
/*
@@ -142,33 +167,70 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
-/*
- * Main iomap iterator function.
+/**
+ * struct iomap_iter - Iterate through a range of a file
+ * @inode: Set at the start of the iteration and should not change.
+ * @pos: The current file position we are operating on. It is updated by
+ * calls to iomap_iter(). Treat as read-only in the body.
+ * @len: The remaining length of the file segment we're operating on.
+ * It is updated at the same time as @pos.
+ * @processed: The number of bytes processed by the body in the most recent
+ * iteration, or a negative errno. 0 causes the iteration to stop.
+ * @flags: Zero or more of the iomap_begin flags above.
+ * @iomap: Map describing the I/O iteration
+ * @srcmap: Source map for COW operations
+ */
+struct iomap_iter {
+ struct inode *inode;
+ loff_t pos;
+ u64 len;
+ s64 processed;
+ unsigned flags;
+ struct iomap iomap;
+ struct iomap srcmap;
+ void *private;
+};
+
+int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
+
+/**
+ * iomap_length - length of the current iomap iteration
+ * @iter: iteration structure
+ *
+ * Returns the length that the operation applies to for the current iteration.
*/
-typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
- void *data, struct iomap *iomap, struct iomap *srcmap);
+static inline u64 iomap_length(const struct iomap_iter *iter)
+{
+ u64 end = iter->iomap.offset + iter->iomap.length;
-loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, const struct iomap_ops *ops, void *data,
- iomap_actor_t actor);
+ if (iter->srcmap.type != IOMAP_HOLE)
+ end = min(end, iter->srcmap.offset + iter->srcmap.length);
+ return min(iter->len, end - iter->pos);
+}
+
+/**
+ * iomap_iter_srcmap - return the source map for the current iomap iteration
+ * @i: iteration structure
+ *
+ * Write operations on file systems with reflink support might require a
+ * source and a destination map. This function retourns the source map
+ * for a given operation, which may or may no be identical to the destination
+ * map in &i->iomap.
+ */
+static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
+{
+ if (i->srcmap.type != IOMAP_HOLE)
+ return &i->srcmap;
+ return &i->iomap;
+}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
-int iomap_readpage(struct page *page, const struct iomap_ops *ops);
-int iomap_readpages(struct address_space *mapping, struct list_head *pages,
- unsigned nr_pages, const struct iomap_ops *ops);
-int iomap_set_page_dirty(struct page *page);
-int iomap_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count);
-int iomap_releasepage(struct page *page, gfp_t gfp_mask);
-void iomap_invalidatepage(struct page *page, unsigned int offset,
- unsigned int len);
-#ifdef CONFIG_MIGRATION
-int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode);
-#else
-#define iomap_migrate_page NULL
-#endif
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
+void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
+bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
@@ -178,7 +240,7 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
const struct iomap_ops *ops);
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len, const struct iomap_ops *ops);
+ u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
loff_t iomap_seek_data(struct inode *inode, loff_t offset,
@@ -193,10 +255,11 @@ struct iomap_ioend {
struct list_head io_list; /* next ioend in chain */
u16 io_type;
u16 io_flags; /* IOMAP_F_* */
+ u32 io_folios; /* folios added to ioend */
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
loff_t io_offset; /* offset in the file */
- void *io_private; /* file system private data */
+ sector_t io_sector; /* start sector of ioend */
struct bio *io_bio; /* bio being built */
struct bio io_inline_bio; /* MUST BE LAST! */
};
@@ -221,7 +284,7 @@ struct iomap_writeback_ops {
* Optional, allows the file system to discard state on a page where
* we failed to submit any I/O.
*/
- void (*discard_page)(struct page *page);
+ void (*discard_folio)(struct folio *folio, loff_t pos);
};
struct iomap_writepage_ctx {
@@ -232,13 +295,8 @@ struct iomap_writepage_ctx {
void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
void iomap_ioend_try_merge(struct iomap_ioend *ioend,
- struct list_head *more_ioends,
- void (*merge_private)(struct iomap_ioend *ioend,
- struct iomap_ioend *next));
+ struct list_head *more_ioends);
void iomap_sort_ioends(struct list_head *ioend_list);
-int iomap_writepage(struct page *page, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops);
int iomap_writepages(struct address_space *mapping,
struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
const struct iomap_writeback_ops *ops);
@@ -252,12 +310,54 @@ int iomap_writepages(struct address_space *mapping,
struct iomap_dio_ops {
int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
unsigned flags);
+ void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
+ loff_t file_offset);
+
+ /*
+ * Filesystems wishing to attach private information to a direct io bio
+ * must provide a ->submit_io method that attaches the additional
+ * information to the bio and changes the ->bi_end_io callback to a
+ * custom function. This function should, at a minimum, perform any
+ * relevant post-processing of the bio and end with a call to
+ * iomap_dio_bio_end_io.
+ */
+ struct bio_set *bio_set;
};
+/*
+ * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not
+ * synchronous.
+ */
+#define IOMAP_DIO_FORCE_WAIT (1 << 0)
+
+/*
+ * Do not allocate blocks or zero partial blocks, but instead fall back to
+ * the caller by returning -EAGAIN. Used to optimize direct I/O writes that
+ * are not aligned to the file system block size.
+ */
+#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
+
+/*
+ * When a page fault occurs, return a partial synchronous result and allow
+ * the caller to retry the rest of the operation after dealing with the page
+ * fault.
+ */
+#define IOMAP_DIO_PARTIAL (1 << 2)
+
+/*
+ * The caller will sync the write if needed; do not sync it within
+ * iomap_dio_rw. Overrides IOMAP_DIO_FORCE_WAIT.
+ */
+#define IOMAP_DIO_NOSYNC (1 << 3)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- bool wait_for_completion);
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
+ unsigned int dio_flags, void *private, size_t done_before);
+struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ unsigned int dio_flags, void *private, size_t done_before);
+ssize_t iomap_dio_complete(struct iomap_dio *dio);
+void iomap_dio_bio_end_io(struct bio *bio);
#ifdef CONFIG_SWAP
struct file;
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 70d01edcbf8b..74be34f3a20a 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -3,7 +3,9 @@
#define _LINUX_IOMMU_HELPER_H
#include <linux/bug.h>
-#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/types.h>
static inline unsigned long iommu_device_max_index(unsigned long size,
unsigned long offset,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d1b5f4d98569..3c9da1f8979e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -31,21 +31,17 @@
* if the IOMMU page table format is equivalent.
*/
#define IOMMU_PRIV (1 << 5)
-/*
- * Non-coherent masters can use this page protection flag to set cacheable
- * memory attributes for only a transparent outer level of cache, also known as
- * the last-level or system cache.
- */
-#define IOMMU_SYS_CACHE_ONLY (1 << 6)
struct iommu_ops;
struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
+struct iommu_domain_ops;
struct notifier_block;
struct iommu_sva;
struct iommu_fault_event;
+struct iommu_dma_cookie;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
@@ -53,8 +49,6 @@ struct iommu_fault_event;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
-typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
- void *);
typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
struct iommu_domain_geometry {
@@ -68,6 +62,7 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
/*
* This are the possible domain-types
@@ -80,53 +75,39 @@ struct iommu_domain_geometry {
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
+ * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
+ * invalidation.
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
+#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API | \
+ __IOMMU_DOMAIN_DMA_FQ)
struct iommu_domain {
unsigned type;
- const struct iommu_ops *ops;
+ const struct iommu_domain_ops *ops;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
- void *iova_cookie;
+ struct iommu_dma_cookie *iova_cookie;
};
+static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
+{
+ return domain->type & __IOMMU_DOMAIN_DMA_API;
+}
+
enum iommu_cap {
- IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
- transactions */
+ IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
-};
-
-/*
- * Following constraints are specifc to FSL_PAMUV1:
- * -aperture must be power of 2, and naturally aligned
- * -number of windows must be power of 2, and address space size
- * of each window is determined by aperture size / # of windows
- * -the actual size of the mapped region of a window must be power
- * of 2 starting with 4KB and physical address must be naturally
- * aligned.
- * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
- * The caller can invoke iommu_domain_get_attr to check if the underlying
- * iommu implementation supports these constraints.
- */
-
-enum iommu_attr {
- DOMAIN_ATTR_GEOMETRY,
- DOMAIN_ATTR_PAGING,
- DOMAIN_ATTR_WINDOWS,
- DOMAIN_ATTR_FSL_PAMU_STASH,
- DOMAIN_ATTR_FSL_PAMU_ENABLE,
- DOMAIN_ATTR_FSL_PAMUV1,
- DOMAIN_ATTR_NESTING, /* two stages of translation */
- DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
- DOMAIN_ATTR_MAX,
+ IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
+ DMA protection and we should too */
};
/* These are the possible reserved region types */
@@ -154,6 +135,7 @@ enum iommu_resv_type {
* @length: Length of the region in bytes
* @prot: IOMMU Protection flags (READ/WRITE/...)
* @type: Type of the reserved region
+ * @free: Callback to free associated memory allocations
*/
struct iommu_resv_region {
struct list_head list;
@@ -161,168 +143,180 @@ struct iommu_resv_region {
size_t length;
int prot;
enum iommu_resv_type type;
+ void (*free)(struct device *dev, struct iommu_resv_region *region);
};
-/* Per device IOMMU features */
-enum iommu_dev_features {
- IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
- IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
-};
+struct iommu_iort_rmr_data {
+ struct iommu_resv_region rr;
-#define IOMMU_PASID_INVALID (-1U)
+ /* Stream IDs associated with IORT RMR entry */
+ const u32 *sids;
+ u32 num_sids;
+};
/**
- * struct iommu_sva_ops - device driver callbacks for an SVA context
- *
- * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
- * @mm_exit returns, the device must not issue any more transaction
- * with the PASID given as argument.
- *
- * The @mm_exit handler is allowed to sleep. Be careful about the
- * locks taken in @mm_exit, because they might lead to deadlocks if
- * they are also held when dropping references to the mm. Consider the
- * following call chain:
- * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
- * Using mmput_async() prevents this scenario.
+ * enum iommu_dev_features - Per device IOMMU features
+ * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
+ * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
+ * enabling %IOMMU_DEV_FEAT_SVA requires
+ * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
+ * Faults themselves instead of relying on the IOMMU. When
+ * supported, this feature must be enabled before and
+ * disabled after %IOMMU_DEV_FEAT_SVA.
*
+ * Device drivers enable a feature using iommu_dev_enable_feature().
*/
-struct iommu_sva_ops {
- iommu_mm_exit_handler_t mm_exit;
+enum iommu_dev_features {
+ IOMMU_DEV_FEAT_SVA,
+ IOMMU_DEV_FEAT_IOPF,
};
+#define IOMMU_PASID_INVALID (-1U)
+
#ifdef CONFIG_IOMMU_API
/**
* struct iommu_iotlb_gather - Range information for a pending IOTLB flush
*
* @start: IOVA representing the start of the range to be flushed
- * @end: IOVA representing the end of the range to be flushed (exclusive)
+ * @end: IOVA representing the end of the range to be flushed (inclusive)
* @pgsize: The interval at which to perform the flush
+ * @freelist: Removed pages to free after sync
+ * @queued: Indicates that the flush will be queued
*
* This structure is intended to be updated by multiple calls to the
* ->unmap() function in struct iommu_ops before eventually being passed
- * into ->iotlb_sync().
+ * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
+ * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
+ * them. @queued is set to indicate when ->iotlb_flush_all() will be called
+ * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
*/
struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
+ struct list_head freelist;
+ bool queued;
};
/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
* @domain_alloc: allocate iommu domain
- * @domain_free: free iommu domain
- * @attach_dev: attach device to an iommu domain
- * @detach_dev: detach device from an iommu domain
- * @map: map a physically contiguous memory region to an iommu domain
- * @unmap: unmap a physically contiguous memory region from an iommu domain
- * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
- * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
- * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
- * queue
- * @iova_to_phys: translate iova to physical address
- * @add_device: add device to iommu grouping
- * @remove_device: remove device from iommu grouping
+ * @probe_device: Add device to iommu driver handling
+ * @release_device: Remove device from iommu driver handling
+ * @probe_finalize: Do final setup work after the device is added to an IOMMU
+ * group and attached to the groups domain
* @device_group: find iommu group for a particular device
- * @domain_get_attr: Query domain attributes
- * @domain_set_attr: Change domain attributes
* @get_resv_regions: Request list of reserved regions for a device
- * @put_resv_regions: Free list of reserved regions for a device
- * @apply_resv_region: Temporary helper call-back for iova reserved ranges
- * @domain_window_enable: Configure and enable a particular window for a domain
- * @domain_window_disable: Disable a particular window for a domain
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
- * @dev_has/enable/disable_feat: per device entries to check/enable/disable
+ * @dev_enable/disable_feat: per device entries to enable/disable
* iommu specific features.
- * @dev_feat_enabled: check enabled feature
- * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
- * @aux_get_pasid: get the pasid given an aux-domain
* @sva_bind: Bind process address space to device
* @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
- * @cache_invalidate: invalidate translation caches
- * @sva_bind_gpasid: bind guest pasid and mm
- * @sva_unbind_gpasid: unbind guest pasid and mm
+ * @def_domain_type: device default domain type, return value:
+ * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
+ * - IOMMU_DOMAIN_DMA: must use a dma domain
+ * - 0: use the default setting
+ * @default_domain_ops: the default ops for domains
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
*/
struct iommu_ops {
- bool (*capable)(enum iommu_cap);
+ bool (*capable)(struct device *dev, enum iommu_cap);
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
- void (*domain_free)(struct iommu_domain *);
- int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
- void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
- int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
- size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *iotlb_gather);
- void (*flush_iotlb_all)(struct iommu_domain *domain);
- void (*iotlb_sync_map)(struct iommu_domain *domain);
- void (*iotlb_sync)(struct iommu_domain *domain,
- struct iommu_iotlb_gather *iotlb_gather);
- phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
- int (*add_device)(struct device *dev);
- void (*remove_device)(struct device *dev);
+ struct iommu_device *(*probe_device)(struct device *dev);
+ void (*release_device)(struct device *dev);
+ void (*probe_finalize)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
- int (*domain_get_attr)(struct iommu_domain *domain,
- enum iommu_attr attr, void *data);
- int (*domain_set_attr)(struct iommu_domain *domain,
- enum iommu_attr attr, void *data);
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
- void (*put_resv_regions)(struct device *dev, struct list_head *list);
- void (*apply_resv_region)(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region);
-
- /* Window handling functions */
- int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size, int prot);
- void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
- bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
+ bool (*is_attach_deferred)(struct device *dev);
/* Per device IOMMU features */
- bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
- bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
- /* Aux-domain specific attach/detach entries */
- int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
- void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
- int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
-
struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
void *drvdata);
void (*sva_unbind)(struct iommu_sva *handle);
- int (*sva_get_pasid)(struct iommu_sva *handle);
+ u32 (*sva_get_pasid)(struct iommu_sva *handle);
int (*page_response)(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg);
- int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
- struct iommu_cache_invalidate_info *inv_info);
- int (*sva_bind_gpasid)(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data);
- int (*sva_unbind_gpasid)(struct device *dev, int pasid);
+ int (*def_domain_type)(struct device *dev);
+ const struct iommu_domain_ops *default_domain_ops;
unsigned long pgsize_bitmap;
struct module *owner;
};
/**
+ * struct iommu_domain_ops - domain specific operations
+ * @attach_dev: attach an iommu domain to a device
+ * @detach_dev: detach an iommu domain from a device
+ * @map: map a physically contiguous memory region to an iommu domain
+ * @map_pages: map a physically contiguous set of pages of the same size to
+ * an iommu domain.
+ * @unmap: unmap a physically contiguous memory region from an iommu domain
+ * @unmap_pages: unmap a number of pages of the same size from an iommu domain
+ * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
+ * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
+ * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
+ * queue
+ * @iova_to_phys: translate iova to physical address
+ * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
+ * including no-snoop TLPs on PCIe or other platform
+ * specific mechanisms.
+ * @enable_nesting: Enable nesting
+ * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
+ * @free: Release the domain after use.
+ */
+struct iommu_domain_ops {
+ int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
+ void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
+
+ int (*map)(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+ int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+ size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *iotlb_gather);
+ size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
+ void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
+ void (*iotlb_sync)(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
+ dma_addr_t iova);
+
+ bool (*enforce_cache_coherency)(struct iommu_domain *domain);
+ int (*enable_nesting)(struct iommu_domain *domain);
+ int (*set_pgtable_quirks)(struct iommu_domain *domain,
+ unsigned long quirks);
+
+ void (*free)(struct iommu_domain *domain);
+};
+
+/**
* struct iommu_device - IOMMU core representation of one IOMMU hardware
* instance
* @list: Used by the iommu-core to keep a list of registered iommus
@@ -365,20 +359,29 @@ struct iommu_fault_param {
};
/**
- * struct iommu_param - collection of per-device IOMMU data
+ * struct dev_iommu - Collection of per-device IOMMU data
*
* @fault_param: IOMMU detected device fault reporting data
+ * @iopf_param: I/O Page Fault queue and data
+ * @fwspec: IOMMU fwspec data
+ * @iommu_dev: IOMMU device this device is linked to
+ * @priv: IOMMU Driver private data
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
- * struct iommu_fwspec *iommu_fwspec;
*/
-struct iommu_param {
+struct dev_iommu {
struct mutex lock;
- struct iommu_fault_param *fault_param;
+ struct iommu_fault_param *fault_param;
+ struct iopf_device_param *iopf_param;
+ struct iommu_fwspec *fwspec;
+ struct iommu_device *iommu_dev;
+ void *priv;
};
-int iommu_device_register(struct iommu_device *iommu);
+int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops,
+ struct device *hwdev);
void iommu_device_unregister(struct iommu_device *iommu);
int iommu_device_sysfs_add(struct iommu_device *iommu,
struct device *parent,
@@ -387,25 +390,7 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
void iommu_device_sysfs_remove(struct iommu_device *iommu);
int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
-
-static inline void __iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
-{
- iommu->ops = ops;
-}
-
-#define iommu_device_set_ops(iommu, ops) \
-do { \
- struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
- __ops->owner = THIS_MODULE; \
- __iommu_device_set_ops(iommu, __ops); \
-} while (0)
-
-static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
- struct fwnode_handle *fwnode)
-{
- iommu->fwnode = fwnode;
-}
+int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
@@ -416,19 +401,24 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
*gather = (struct iommu_iotlb_gather) {
.start = ULONG_MAX,
+ .freelist = LIST_HEAD_INIT(gather->freelist),
};
}
-#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
-#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
-#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
-#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
-#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
-#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
+static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
+{
+ /*
+ * Assume that valid ops must be installed if iommu_probe_device()
+ * has succeeded. The device ops are essentially for internal use
+ * within the IOMMU subsystem itself, so we should be able to trust
+ * ourselves not to misuse the helper.
+ */
+ return dev->iommu->iommu_dev->ops;
+}
-extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
+extern int bus_iommu_probe(struct bus_type *bus);
extern bool iommu_present(struct bus_type *bus);
-extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
+extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
extern struct iommu_group *iommu_group_get_by_id(int id);
extern void iommu_domain_free(struct iommu_domain *domain);
@@ -436,13 +426,8 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
-extern int iommu_cache_invalidate(struct iommu_domain *domain,
- struct device *dev,
- struct iommu_cache_invalidate_info *inv_info);
-extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data);
extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid);
+ struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
@@ -454,27 +439,23 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather);
-extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg,unsigned int nents, int prot);
-extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot);
+extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot);
+extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
-extern void generic_iommu_put_resv_regions(struct device *dev,
- struct list_head *list);
-extern int iommu_request_dm_for_dev(struct device *dev);
-extern int iommu_request_dma_domain_for_dev(struct device *dev);
extern void iommu_set_default_passthrough(bool cmd_line);
extern void iommu_set_default_translated(bool cmd_line);
extern bool iommu_default_passthrough(void);
extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
- enum iommu_resv_type type);
+ enum iommu_resv_type type, gfp_t gfp);
extern int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head);
@@ -496,10 +477,6 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
extern struct iommu_group *iommu_group_get(struct device *dev);
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
-extern int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb);
-extern int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb);
extern int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
void *data);
@@ -512,30 +489,24 @@ extern int iommu_page_response(struct device *dev,
struct iommu_page_response *msg);
extern int iommu_group_id(struct iommu_group *group);
-extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
-extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
- void *data);
-extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
- void *data);
+int iommu_enable_nesting(struct iommu_domain *domain);
+int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks);
-/* Window handling function prototypes */
-extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t offset, u64 size,
- int prot);
-extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
+void iommu_set_dma_strict(void);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
if (domain->ops->flush_iotlb_all)
domain->ops->flush_iotlb_all(domain);
}
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
if (domain->ops->iotlb_sync)
@@ -544,29 +515,80 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain,
iommu_iotlb_gather_init(iotlb_gather);
}
+/**
+ * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
+ *
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to check whether a new range and the gathered range
+ * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
+ * than merging the two, which might lead to unnecessary invalidations.
+ */
+static inline
+bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long start = iova, end = start + size - 1;
+
+ return gather->end != 0 &&
+ (end + 1 < gather->start || start > gather->end + 1);
+}
+
+
+/**
+ * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
+ * where only the address range matters, and simply minimising intermediate
+ * syncs is preferred.
+ */
+static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long end = iova + size - 1;
+
+ if (gather->start > iova)
+ gather->start = iova;
+ if (gather->end < end)
+ gather->end = end;
+}
+
+/**
+ * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
+ * @domain: IOMMU domain to be invalidated
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build invalidation commands based on individual
+ * pages, or with page size/table level hints which cannot be gathered if they
+ * differ.
+ */
static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
- unsigned long start = iova, end = start + size;
-
/*
* If the new page is disjoint from the current range or is mapped at
* a different granularity, then sync the TLB so that the gather
* structure can be rewritten.
*/
- if (gather->pgsize != size ||
- end < gather->start || start > gather->end) {
- if (gather->pgsize)
- iommu_tlb_sync(domain, gather);
- gather->pgsize = size;
- }
+ if ((gather->pgsize && gather->pgsize != size) ||
+ iommu_iotlb_gather_is_disjoint(gather, iova, size))
+ iommu_iotlb_sync(domain, gather);
- if (gather->end < end)
- gather->end = end;
+ gather->pgsize = size;
+ iommu_iotlb_gather_add_range(gather, iova, size);
+}
- if (gather->start > start)
- gather->start = start;
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
+{
+ return gather && gather->queued;
}
/* PCI device grouping function */
@@ -580,19 +602,16 @@ struct iommu_group *fsl_mc_device_group(struct device *dev);
* struct iommu_fwspec - per-device IOMMU instance data
* @ops: ops for this device's IOMMU
* @iommu_fwnode: firmware handle for this device's IOMMU
- * @iommu_priv: IOMMU driver private data for this device
- * @num_pasid_bits: number of PASID bits supported by this device
+ * @flags: IOMMU_FWSPEC_* flags
* @num_ids: number of associated device IDs
* @ids: IDs which this device may present to the IOMMU
*/
struct iommu_fwspec {
const struct iommu_ops *ops;
struct fwnode_handle *iommu_fwnode;
- void *iommu_priv;
u32 flags;
- u32 num_pasid_bits;
unsigned int num_ids;
- u32 ids[1];
+ u32 ids[];
};
/* ATS is supported */
@@ -603,7 +622,6 @@ struct iommu_fwspec {
*/
struct iommu_sva {
struct device *dev;
- const struct iommu_sva_ops *ops;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -614,33 +632,49 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
- return dev->iommu_fwspec;
+ if (dev->iommu)
+ return dev->iommu->fwspec;
+ else
+ return NULL;
}
static inline void dev_iommu_fwspec_set(struct device *dev,
struct iommu_fwspec *fwspec)
{
- dev->iommu_fwspec = fwspec;
+ dev->iommu->fwspec = fwspec;
+}
+
+static inline void *dev_iommu_priv_get(struct device *dev)
+{
+ if (dev->iommu)
+ return dev->iommu->priv;
+ else
+ return NULL;
+}
+
+static inline void dev_iommu_priv_set(struct device *dev, void *priv)
+{
+ dev->iommu->priv = priv;
}
int iommu_probe_device(struct device *dev);
void iommu_release_device(struct device *dev);
-bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
-int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
-void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
-int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm,
void *drvdata);
void iommu_sva_unbind_device(struct iommu_sva *handle);
-int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *ops);
-int iommu_sva_get_pasid(struct iommu_sva *handle);
+u32 iommu_sva_get_pasid(struct iommu_sva *handle);
+
+int iommu_device_use_default_domain(struct device *dev);
+void iommu_device_unuse_default_domain(struct device *dev);
+
+int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
+void iommu_group_release_dma_owner(struct iommu_group *group);
+bool iommu_group_dma_owner_claimed(struct iommu_group *group);
#else /* CONFIG_IOMMU_API */
@@ -656,7 +690,7 @@ static inline bool iommu_present(struct bus_type *bus)
return false;
}
-static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
+static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{
return false;
}
@@ -717,41 +751,29 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
return 0;
}
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
+static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
{
- return 0;
+ return -ENODEV;
}
-static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{
- return 0;
+ return -ENODEV;
}
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
}
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
}
-static inline int iommu_domain_window_enable(struct iommu_domain *domain,
- u32 wnd_nr, phys_addr_t paddr,
- u64 size, int prot)
-{
- return -ENODEV;
-}
-
-static inline void iommu_domain_window_disable(struct iommu_domain *domain,
- u32 wnd_nr)
-{
-}
-
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
return 0;
@@ -778,16 +800,6 @@ static inline int iommu_get_group_resv_regions(struct iommu_group *group,
return -ENODEV;
}
-static inline int iommu_request_dm_for_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iommu_request_dma_domain_for_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
static inline void iommu_set_default_passthrough(bool cmd_line)
{
}
@@ -860,18 +872,6 @@ static inline void iommu_group_put(struct iommu_group *group)
{
}
-static inline int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
-static inline int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return 0;
-}
-
static inline
int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
@@ -902,33 +902,19 @@ static inline int iommu_group_id(struct iommu_group *group)
return -ENODEV;
}
-static inline int iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks)
{
- return -EINVAL;
-}
-
-static inline int iommu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
-{
- return -EINVAL;
+ return 0;
}
-static inline int iommu_device_register(struct iommu_device *iommu)
+static inline int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops,
+ struct device *hwdev)
{
return -ENODEV;
}
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
-{
-}
-
-static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
- struct fwnode_handle *fwnode)
-{
-}
-
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
return NULL;
@@ -944,6 +930,11 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
{
}
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
+{
+ return false;
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
@@ -992,18 +983,6 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
return NULL;
}
-static inline bool
-iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
-{
- return false;
-}
-
-static inline bool
-iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
-{
- return false;
-}
-
static inline int
iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
@@ -1016,65 +995,67 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
return -ENODEV;
}
-static inline int
-iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
+static inline struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
{
- return -ENODEV;
+ return NULL;
}
-static inline void
-iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
+static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
{
}
-static inline int
-iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
- return -ENODEV;
+ return IOMMU_PASID_INVALID;
}
-static inline struct iommu_sva *
-iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
return NULL;
}
-static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
-{
-}
-
-static inline int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *ops)
+static inline int iommu_device_use_default_domain(struct device *dev)
{
- return -EINVAL;
+ return 0;
}
-static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
+static inline void iommu_device_unuse_default_domain(struct device *dev)
{
- return IOMMU_PASID_INVALID;
}
static inline int
-iommu_cache_invalidate(struct iommu_domain *domain,
- struct device *dev,
- struct iommu_cache_invalidate_info *inv_info)
+iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
{
return -ENODEV;
}
-static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data)
+
+static inline void iommu_group_release_dma_owner(struct iommu_group *group)
{
- return -ENODEV;
}
-static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev, int pasid)
+static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
{
- return -ENODEV;
+ return false;
}
-
#endif /* CONFIG_IOMMU_API */
+/**
+ * iommu_map_sgtable - Map the given buffer to the IOMMU domain
+ * @domain: The IOMMU domain to perform the mapping
+ * @iova: The start address to map the buffer
+ * @sgt: The sg_table object describing the buffer
+ * @prot: IOMMU protection bits
+ *
+ * Creates a mapping at @iova for the buffer described by a scatterlist
+ * stored in the given sg_table object in the provided IOMMU domain.
+ */
+static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+ unsigned long iova, struct sg_table *sgt, int prot)
+{
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+}
+
#ifdef CONFIG_IOMMU_DEBUGFS
extern struct dentry *iommu_debugfs_dir;
void iommu_debugfs_setup(void);
@@ -1082,4 +1063,40 @@ void iommu_debugfs_setup(void);
static inline void iommu_debugfs_setup(void) {}
#endif
+#ifdef CONFIG_IOMMU_DMA
+#include <linux/msi.h>
+
+/* Setup call for arch DMA mapping code */
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
+
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
+
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
+void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
+
+#else /* CONFIG_IOMMU_DMA */
+
+struct msi_desc;
+struct msi_msg;
+
+static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
+{
+}
+
+static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+{
+ return 0;
+}
+
+static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+}
+
+#endif /* CONFIG_IOMMU_DMA */
+
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 35e15dfd4155..2c8860e406bd 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -14,36 +14,41 @@
#include <linux/io.h>
/**
- * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
- * @op: accessor function (takes @addr as its only argument)
- * @addr: Address to poll
+ * read_poll_timeout - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @sleep_us: Maximum time to sleep between reads in us (0
* tight-loops). Should be less than ~20ms since usleep_range
* is used (see Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * @args: arguments for @op poll
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val. Must not
+ * case, the last read value at @args is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
-#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
({ \
u64 __timeout_us = (timeout_us); \
unsigned long __sleep_us = (sleep_us); \
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
might_sleep_if((__sleep_us) != 0); \
+ if (sleep_before_read && __sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
for (;;) { \
- (val) = op(addr); \
+ (val) = op(args); \
if (cond) \
break; \
if (__timeout_us && \
ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = op(addr); \
+ (val) = op(args); \
break; \
} \
if (__sleep_us) \
@@ -53,42 +58,87 @@
})
/**
- * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
- * @op: accessor function (takes @addr as its only argument)
- * @addr: Address to poll
+ * read_poll_timeout_atomic - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @delay_us: Time to udelay between reads in us (0 tight-loops). Should
* be less than ~10us since udelay is used (see
* Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
+ * @delay_before_read: if it is true, delay @delay_us before read.
+ * @args: arguments for @op poll
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val.
+ * case, the last read value at @args is stored in @val.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
-#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
+ delay_before_read, args...) \
({ \
u64 __timeout_us = (timeout_us); \
unsigned long __delay_us = (delay_us); \
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ if (delay_before_read && __delay_us) \
+ udelay(__delay_us); \
for (;;) { \
- (val) = op(addr); \
+ (val) = op(args); \
if (cond) \
break; \
if (__timeout_us && \
ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = op(addr); \
+ (val) = op(args); \
break; \
} \
if (__delay_us) \
- udelay(__delay_us); \
+ udelay(__delay_us); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
+/**
+ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+ read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr)
+
+/**
+ * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
+ * be less than ~10us since udelay is used (see
+ * Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr)
#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index a9b9170b5dd2..27642ca15d93 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -10,9 +10,10 @@
#define _LINUX_IOPORT_H
#ifndef __ASSEMBLY__
+#include <linux/bits.h>
#include <linux/compiler.h>
+#include <linux/minmax.h>
#include <linux/types.h>
-#include <linux/bits.h>
/*
* Resources are tree-like, allowing
* nesting etc..
@@ -58,6 +59,10 @@ struct resource {
#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
+/* IORESOURCE_SYSRAM specific bits. */
+#define IORESOURCE_SYSRAM_DRIVER_MANAGED 0x02000000 /* Always detected via a driver. */
+#define IORESOURCE_SYSRAM_MERGEABLE 0x04000000 /* Resource can be merged. */
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
@@ -74,7 +79,8 @@ struct resource {
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
-#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_WAKECAPABLE (1<<6)
/* PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
@@ -103,6 +109,7 @@ struct resource {
#define IORESOURCE_MEM_32BIT (3<<3)
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+#define IORESOURCE_MEM_NONPOSTED (1<<7)
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
@@ -135,6 +142,7 @@ enum {
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_RESERVED = 7,
IORES_DESC_SOFT_RESERVED = 8,
+ IORES_DESC_CXL = 9,
};
/*
@@ -165,6 +173,11 @@ enum {
#define DEFINE_RES_MEM(_start, _size) \
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
+#define DEFINE_RES_REG_NAMED(_start, _size, _name) \
+ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_REG)
+#define DEFINE_RES_REG(_start, _size) \
+ DEFINE_RES_REG_NAMED((_start), (_size), NULL)
+
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
#define DEFINE_RES_IRQ(_irq) \
@@ -225,12 +238,39 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2)
return r1->start <= r2->start && r1->end >= r2->end;
}
+/* True if any part of r1 overlaps r2 */
+static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+{
+ return r1->start <= r2->end && r1->end >= r2->start;
+}
+
+static inline bool
+resource_intersection(struct resource *r1, struct resource *r2, struct resource *r)
+{
+ if (!resource_overlaps(r1, r2))
+ return false;
+ r->start = max(r1->start, r2->start);
+ r->end = min(r1->end, r2->end);
+ return true;
+}
+
+static inline bool
+resource_union(struct resource *r1, struct resource *r2, struct resource *r)
+{
+ if (!resource_overlaps(r1, r2))
+ return false;
+ r->start = min(r1->start, r2->start);
+ r->end = max(r1->end, r2->end);
+ return true;
+}
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
+#define request_mem_region_muxed(start, n, name) \
+ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_MUXED)
#define request_mem_region_exclusive(start,n,name) \
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
@@ -247,8 +287,10 @@ extern struct resource * __request_region(struct resource *,
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int release_mem_region_adjustable(struct resource *, resource_size_t,
- resource_size_t);
+extern void release_mem_region_adjustable(resource_size_t, resource_size_t);
+#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void merge_system_ram_resource(struct resource *res);
#endif
/* Wrappers for managed devices */
@@ -290,16 +332,21 @@ extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
-/* True if any part of r1 overlaps r2 */
-static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
-{
- return (r1->start <= r2->end && r1->end >= r2->start);
-}
-
struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size);
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name);
+struct resource *alloc_free_mem_region(struct resource *base,
+ unsigned long size, unsigned long align, const char *name);
+
+static inline void irqresource_disabled(struct resource *res, u32 irq)
+{
+ res->start = irq;
+ res->end = irq;
+ res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
+}
+
+extern struct address_space *iomem_get_mapping(void);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index e9bfe6972aed..7578d4f6a969 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -6,46 +6,22 @@
#include <linux/sched/rt.h>
#include <linux/iocontext.h>
-/*
- * Gives us 8 prio classes with 13-bits of data for each class
- */
-#define IOPRIO_CLASS_SHIFT (13)
-#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
-
-#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
-#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
-#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
-
-#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
+#include <uapi/linux/ioprio.h>
/*
- * These are the io priority groups as implemented by CFQ. RT is the realtime
- * class, it always gets premium service. BE is the best-effort scheduling
- * class, the default for any process. IDLE is the idle scheduling class, it
- * is only served when no one else is using the disk.
+ * Default IO priority.
*/
-enum {
- IOPRIO_CLASS_NONE,
- IOPRIO_CLASS_RT,
- IOPRIO_CLASS_BE,
- IOPRIO_CLASS_IDLE,
-};
+#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0)
/*
- * 8 best effort priority levels are supported
+ * Check that a priority value has a valid class.
*/
-#define IOPRIO_BE_NR (8)
-
-enum {
- IOPRIO_WHO_PROCESS = 1,
- IOPRIO_WHO_PGRP,
- IOPRIO_WHO_USER,
-};
+static inline bool ioprio_valid(unsigned short ioprio)
+{
+ unsigned short class = IOPRIO_PRIO_CLASS(ioprio);
-/*
- * Fallback BE priority
- */
-#define IOPRIO_NORM (4)
+ return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE;
+}
/*
* if process has set io priority explicitly, use that. if not, convert
@@ -70,23 +46,19 @@ static inline int task_nice_ioclass(struct task_struct *task)
return IOPRIO_CLASS_BE;
}
-/*
- * If the calling process has set an I/O priority, use that. Otherwise, return
- * the default I/O priority.
- */
-static inline int get_current_ioprio(void)
+#ifdef CONFIG_BLOCK
+int __get_task_ioprio(struct task_struct *p);
+#else
+static inline int __get_task_ioprio(struct task_struct *p)
{
- struct io_context *ioc = current->io_context;
-
- if (ioc)
- return ioc->ioprio;
- return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ return IOPRIO_DEFAULT;
}
+#endif /* CONFIG_BLOCK */
-/*
- * For inheritance, return the highest of the two given priorities
- */
-extern int ioprio_best(unsigned short aprio, unsigned short bprio);
+static inline int get_current_ioprio(void)
+{
+ return __get_task_ioprio(current);
+}
extern int set_task_ioprio(struct task_struct *task, int ioprio);
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
new file mode 100644
index 000000000000..cb71aa616bd3
--- /dev/null
+++ b/include/linux/iosys-map.h
@@ -0,0 +1,516 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer abstraction for IO/system memory
+ */
+
+#ifndef __IOSYS_MAP_H__
+#define __IOSYS_MAP_H__
+
+#include <linux/compiler_types.h>
+#include <linux/io.h>
+#include <linux/string.h>
+
+/**
+ * DOC: overview
+ *
+ * When accessing a memory region, depending on its location, users may have to
+ * access it with I/O operations or memory load/store operations. For example,
+ * copying to system memory could be done with memcpy(), copying to I/O memory
+ * would be done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ * void *vaddr = ...; // pointer to system memory
+ * memcpy(vaddr, src, len);
+ *
+ * void *vaddr_iomem = ...; // pointer to I/O memory
+ * memcpy_toio(vaddr_iomem, src, len);
+ *
+ * The user of such pointer may not have information about the mapping of that
+ * region or may want to have a single code path to handle operations on that
+ * buffer, regardless if it's located in system or IO memory. The type
+ * :c:type:`struct iosys_map <iosys_map>` and its helpers abstract that so the
+ * buffer can be passed around to other drivers or have separate duties inside
+ * the same driver for allocation, read and write operations.
+ *
+ * Open-coding access to :c:type:`struct iosys_map <iosys_map>` is considered
+ * bad style. Rather then accessing its fields directly, use one of the provided
+ * helper functions, or implement your own. For example, instances of
+ * :c:type:`struct iosys_map <iosys_map>` can be initialized statically with
+ * IOSYS_MAP_INIT_VADDR(), or at runtime with iosys_map_set_vaddr(). These
+ * helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr(&map, 0xdeadbeaf);
+ *
+ * To set an address in I/O memory, use IOSYS_MAP_INIT_VADDR_IOMEM() or
+ * iosys_map_set_vaddr_iomem().
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf);
+ *
+ * Instances of struct iosys_map do not have to be cleaned up, but
+ * can be cleared to NULL with iosys_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * .. code-block:: c
+ *
+ * iosys_map_clear(&map);
+ *
+ * Test if a mapping is valid with either iosys_map_is_set() or
+ * iosys_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ * if (iosys_map_is_set(&map) != iosys_map_is_null(&map))
+ * // always true
+ *
+ * Instances of :c:type:`struct iosys_map <iosys_map>` can be compared for
+ * equality with iosys_map_is_equal(). Mappings that point to different memory
+ * spaces, system or I/O, are never equal. That's even true if both spaces are
+ * located in the same address space, both mappings contain the same address
+ * value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map sys_map; // refers to system memory
+ * struct iosys_map io_map; // refers to I/O memory
+ *
+ * if (iosys_map_is_equal(&sys_map, &io_map))
+ * // always false
+ *
+ * A set up instance of struct iosys_map can be used to access or manipulate the
+ * buffer memory. Depending on the location of the memory, the provided helpers
+ * will pick the correct operations. Data can be copied into the memory with
+ * iosys_map_memcpy_to(). The address can be manipulated with iosys_map_incr().
+ *
+ * .. code-block:: c
+ *
+ * const void *src = ...; // source buffer
+ * size_t len = ...; // length of src
+ *
+ * iosys_map_memcpy_to(&map, src, len);
+ * iosys_map_incr(&map, len); // go to first byte after the memcpy
+ */
+
+/**
+ * struct iosys_map - Pointer to IO/system memory
+ * @vaddr_iomem: The buffer's address if in I/O memory
+ * @vaddr: The buffer's address if in system memory
+ * @is_iomem: True if the buffer is located in I/O memory, or false
+ * otherwise.
+ */
+struct iosys_map {
+ union {
+ void __iomem *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+/**
+ * IOSYS_MAP_INIT_VADDR - Initializes struct iosys_map to an address in system memory
+ * @vaddr_: A system-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR(vaddr_) \
+ { \
+ .vaddr = (vaddr_), \
+ .is_iomem = false, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_VADDR_IOMEM - Initializes struct iosys_map to an address in I/O memory
+ * @vaddr_iomem_: An I/O-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR_IOMEM(vaddr_iomem_) \
+ { \
+ .vaddr_iomem = (vaddr_iomem_), \
+ .is_iomem = true, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map
+ * @map_: The dma-buf mapping structure to copy from
+ * @offset_: Offset to add to the other mapping
+ *
+ * Initializes a new iosys_map struct based on another passed as argument. It
+ * does a shallow copy of the struct so it's possible to update the back storage
+ * without changing where the original map points to. It is the equivalent of
+ * doing:
+ *
+ * .. code-block:: c
+ *
+ * iosys_map map = other_map;
+ * iosys_map_incr(&map, &offset);
+ *
+ * Example usage:
+ *
+ * .. code-block:: c
+ *
+ * void foo(struct device *dev, struct iosys_map *base_map)
+ * {
+ * ...
+ * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET);
+ * ...
+ * }
+ *
+ * The advantage of using the initializer over just increasing the offset with
+ * iosys_map_incr() like above is that the new map will always point to the
+ * right place of the buffer during its scope. It reduces the risk of updating
+ * the wrong part of the buffer and having no compiler warning about that. If
+ * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn
+ * about the use of uninitialized variable.
+ */
+#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \
+ struct iosys_map copy = *map_; \
+ iosys_map_incr(&copy, offset_); \
+ copy; \
+})
+
+/**
+ * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory
+ * @map: The iosys_map structure
+ * @vaddr: A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr)
+{
+ map->vaddr = vaddr;
+ map->is_iomem = false;
+}
+
+/**
+ * iosys_map_set_vaddr_iomem - Sets a iosys mapping structure to an address in I/O memory
+ * @map: The iosys_map structure
+ * @vaddr_iomem: An I/O-memory address
+ *
+ * Sets the address and the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map,
+ void __iomem *vaddr_iomem)
+{
+ map->vaddr_iomem = vaddr_iomem;
+ map->is_iomem = true;
+}
+
+/**
+ * iosys_map_is_equal - Compares two iosys mapping structures for equality
+ * @lhs: The iosys_map structure
+ * @rhs: A iosys_map structure to compare with
+ *
+ * Two iosys mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool iosys_map_is_equal(const struct iosys_map *lhs,
+ const struct iosys_map *rhs)
+{
+ if (lhs->is_iomem != rhs->is_iomem)
+ return false;
+ else if (lhs->is_iomem)
+ return lhs->vaddr_iomem == rhs->vaddr_iomem;
+ else
+ return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * iosys_map_is_null - Tests for a iosys mapping to be NULL
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool iosys_map_is_null(const struct iosys_map *map)
+{
+ if (map->is_iomem)
+ return !map->vaddr_iomem;
+ return !map->vaddr;
+}
+
+/**
+ * iosys_map_is_set - Tests if the iosys mapping has been set
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool iosys_map_is_set(const struct iosys_map *map)
+{
+ return !iosys_map_is_null(map);
+}
+
+/**
+ * iosys_map_clear - Clears a iosys mapping structure
+ * @map: The iosys_map structure
+ *
+ * Clears all fields to zero, including struct iosys_map.is_iomem, so
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void iosys_map_clear(struct iosys_map *map)
+{
+ if (map->is_iomem) {
+ map->vaddr_iomem = NULL;
+ map->is_iomem = false;
+ } else {
+ map->vaddr = NULL;
+ }
+}
+
+/**
+ * iosys_map_memcpy_to - Memcpy into offset of iosys_map
+ * @dst: The iosys_map structure
+ * @dst_offset: The offset from which to copy
+ * @src: The source buffer
+ * @len: The number of byte in src
+ *
+ * Copies data into a iosys_map with an offset. The source buffer is in
+ * system memory. Depending on the buffer's location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset,
+ const void *src, size_t len)
+{
+ if (dst->is_iomem)
+ memcpy_toio(dst->vaddr_iomem + dst_offset, src, len);
+ else
+ memcpy(dst->vaddr + dst_offset, src, len);
+}
+
+/**
+ * iosys_map_memcpy_from - Memcpy from iosys_map into system memory
+ * @dst: Destination in system memory
+ * @src: The iosys_map structure
+ * @src_offset: The offset from which to copy
+ * @len: The number of byte in src
+ *
+ * Copies data from a iosys_map with an offset. The dest buffer is in
+ * system memory. Depending on the mapping location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src,
+ size_t src_offset, size_t len)
+{
+ if (src->is_iomem)
+ memcpy_fromio(dst, src->vaddr_iomem + src_offset, len);
+ else
+ memcpy(dst, src->vaddr + src_offset, len);
+}
+
+/**
+ * iosys_map_incr - Increments the address stored in a iosys mapping
+ * @map: The iosys_map structure
+ * @incr: The number of bytes to increment
+ *
+ * Increments the address stored in a iosys mapping. Depending on the
+ * buffer's location, the correct value will be updated.
+ */
+static inline void iosys_map_incr(struct iosys_map *map, size_t incr)
+{
+ if (map->is_iomem)
+ map->vaddr_iomem += incr;
+ else
+ map->vaddr += incr;
+}
+
+/**
+ * iosys_map_memset - Memset iosys_map
+ * @dst: The iosys_map structure
+ * @offset: Offset from dst where to start setting value
+ * @value: The value to set
+ * @len: The number of bytes to set in dst
+ *
+ * Set value in iosys_map. Depending on the buffer's location, the helper
+ * picks the correct method of accessing the memory.
+ */
+static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
+ int value, size_t len)
+{
+ if (dst->is_iomem)
+ memset_io(dst->vaddr_iomem + offset, value, len);
+ else
+ memset(dst->vaddr + offset, value, len);
+}
+
+#ifdef CONFIG_64BIT
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: val_ = readq(vaddr_iomem_)
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: writeq(val_, vaddr_iomem_)
+#else
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_fromio(&(val_), vaddr_iomem_, sizeof(u64))
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_toio(vaddr_iomem_, &(val_), sizeof(u64))
+#endif
+
+#define __iosys_map_rd_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: val__ = readb(vaddr_iomem__), \
+ u16: val__ = readw(vaddr_iomem__), \
+ u32: val__ = readl(vaddr_iomem__), \
+ __iosys_map_rd_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_rd_sys(val__, vaddr__, type__) \
+ val__ = READ_ONCE(*(type__ *)(vaddr__))
+
+#define __iosys_map_wr_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: writeb(val__, vaddr_iomem__), \
+ u16: writew(val__, vaddr_iomem__), \
+ u32: writel(val__, vaddr_iomem__), \
+ __iosys_map_wr_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_wr_sys(val__, vaddr__, type__) \
+ WRITE_ONCE(*(type__ *)(vaddr__), val__)
+
+/**
+ * iosys_map_rd - Read a C-type value from the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from which to read
+ * @type__: Type of the value being read
+ *
+ * Read a C type value (u8, u16, u32 and u64) from iosys_map. For other types or
+ * if pointer may be unaligned (and problematic for the architecture supported),
+ * use iosys_map_memcpy_from().
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd(map__, offset__, type__) ({ \
+ type__ val; \
+ if ((map__)->is_iomem) { \
+ __iosys_map_rd_io(val, (map__)->vaddr_iomem + (offset__), type__);\
+ } else { \
+ __iosys_map_rd_sys(val, (map__)->vaddr + (offset__), type__); \
+ } \
+ val; \
+})
+
+/**
+ * iosys_map_wr - Write a C-type value to the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from the mapping to write to
+ * @type__: Type of the value being written
+ * @val__: Value to write
+ *
+ * Write a C type value (u8, u16, u32 and u64) to the iosys_map. For other types
+ * or if pointer may be unaligned (and problematic for the architecture
+ * supported), use iosys_map_memcpy_to()
+ */
+#define iosys_map_wr(map__, offset__, type__, val__) ({ \
+ type__ val = (val__); \
+ if ((map__)->is_iomem) { \
+ __iosys_map_wr_io(val, (map__)->vaddr_iomem + (offset__), type__);\
+ } else { \
+ __iosys_map_wr_sys(val, (map__)->vaddr + (offset__), type__); \
+ } \
+})
+
+/**
+ * iosys_map_rd_field - Read a member from a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beggining of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ *
+ * Read a value from iosys_map considering its layout is described by a C struct
+ * starting at @struct_offset__. The field offset and size is calculated and its
+ * value read. If the field access would incur in un-aligned access, then either
+ * iosys_map_memcpy_from() needs to be used or the architecture must support it.
+ * For example: suppose there is a @struct foo defined as below and the value
+ * ``foo.field2.inner2`` needs to be read from the iosys_map:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * int field1;
+ * struct {
+ * int inner1;
+ * int inner2;
+ * } field2;
+ * int field3;
+ * } __packed;
+ *
+ * This is the expected memory layout of a buffer using iosys_map_rd_field():
+ *
+ * +------------------------------+--------------------------+
+ * | Address | Content |
+ * +==============================+==========================+
+ * | buffer + 0000 | start of mmapped buffer |
+ * | | pointed by iosys_map |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + ``struct_offset__`` | start of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + wwww | ``foo.field2.inner2`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + yyyy | end of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + zzzz | end of mmaped buffer |
+ * +------------------------------+--------------------------+
+ *
+ * Values automatically calculated by this macro or not needed are denoted by
+ * wwww, yyyy and zzzz. This is the code to read that value:
+ *
+ * .. code-block:: c
+ *
+ * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2);
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \
+ struct_type__ *s; \
+ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s->field__)); \
+})
+
+/**
+ * iosys_map_wr_field - Write to a member of a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beggining of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ * @val__: Value to write
+ *
+ * Write a value to the iosys_map considering its layout is described by a C
+ * struct starting at @struct_offset__. The field offset and size is calculated
+ * and the @val__ is written. If the field access would incur in un-aligned
+ * access, then either iosys_map_memcpy_to() needs to be used or the
+ * architecture must support it. Refer to iosys_map_rd_field() for expected
+ * usage and memory layout.
+ */
+#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \
+ struct_type__ *s; \
+ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s->field__), val__); \
+})
+
+#endif /* __IOSYS_MAP_H__ */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index a0637abffee8..83c00fac2acb 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rbtree.h>
-#include <linux/atomic.h>
#include <linux/dma-mapping.h>
/* iova structure */
@@ -22,47 +21,8 @@ struct iova {
unsigned long pfn_lo; /* Lowest allocated pfn */
};
-struct iova_magazine;
-struct iova_cpu_rcache;
-#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
-#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
-
-struct iova_rcache {
- spinlock_t lock;
- unsigned long depot_size;
- struct iova_magazine *depot[MAX_GLOBAL_MAGS];
- struct iova_cpu_rcache __percpu *cpu_rcaches;
-};
-
-struct iova_domain;
-
-/* Call-Back from IOVA code into IOMMU drivers */
-typedef void (* iova_flush_cb)(struct iova_domain *domain);
-
-/* Destructor for per-entry data */
-typedef void (* iova_entry_dtor)(unsigned long data);
-
-/* Number of entries per Flush Queue */
-#define IOVA_FQ_SIZE 256
-
-/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
-#define IOVA_FQ_TIMEOUT 10
-
-/* Flush Queue entry for defered flushing */
-struct iova_fq_entry {
- unsigned long iova_pfn;
- unsigned long pages;
- unsigned long data;
- u64 counter; /* Flush counter when this entrie was added */
-};
-
-/* Per-CPU Flush Queue structure */
-struct iova_fq {
- struct iova_fq_entry entries[IOVA_FQ_SIZE];
- unsigned head, tail;
- spinlock_t lock;
-};
+struct iova_rcache;
/* holds all the iova translations for a domain */
struct iova_domain {
@@ -74,27 +34,10 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
unsigned long max32_alloc_size; /* Size of last failed allocation */
- struct iova_fq __percpu *fq; /* Flush Queue */
-
- atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
- have been started */
-
- atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
- have been finished */
-
struct iova anchor; /* rbtree lookup anchor */
- struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
-
- iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
- TLBs */
-
- iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
- iova entry */
- struct timer_list fq_timer; /* Timer to regularily empty the
- flush-queues */
- atomic_t fq_timer_on; /* 1 when timer is active, 0
- when not */
+ struct iova_rcache *rcaches;
+ struct hlist_node cpuhp_dead;
};
static inline unsigned long iova_size(struct iova *iova)
@@ -132,12 +75,12 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
-#if IS_ENABLED(CONFIG_IOMMU_IOVA)
+#if IS_REACHABLE(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
-struct iova *alloc_iova_mem(void);
-void free_iova_mem(struct iova *iova);
+unsigned long iova_rcache_range(void);
+
void free_iova(struct iova_domain *iovad, unsigned long pfn);
void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
@@ -145,24 +88,15 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
bool size_aligned);
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
-void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
-void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
-bool has_iova_flush_queue(struct iova_domain *iovad);
-int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
+int iova_domain_init_rcaches(struct iova_domain *iovad);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
-struct iova *split_and_remove_iova(struct iova_domain *iovad,
- struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
-void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
#else
static inline int iova_cache_get(void)
{
@@ -173,15 +107,6 @@ static inline void iova_cache_put(void)
{
}
-static inline struct iova *alloc_iova_mem(void)
-{
- return NULL;
-}
-
-static inline void free_iova_mem(struct iova *iova)
-{
-}
-
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
{
}
@@ -204,12 +129,6 @@ static inline void free_iova_fast(struct iova_domain *iovad,
{
}
-static inline void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data)
-{
-}
-
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn,
@@ -225,29 +144,12 @@ static inline struct iova *reserve_iova(struct iova_domain *iovad,
return NULL;
}
-static inline void copy_reserved_iova(struct iova_domain *from,
- struct iova_domain *to)
-{
-}
-
static inline void init_iova_domain(struct iova_domain *iovad,
unsigned long granule,
unsigned long start_pfn)
{
}
-static inline bool has_iova_flush_queue(struct iova_domain *iovad)
-{
- return false;
-}
-
-static inline int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb,
- iova_entry_dtor entry_dtor)
-{
- return -ENODEV;
-}
-
static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn)
{
@@ -258,18 +160,6 @@ static inline void put_iova_domain(struct iova_domain *iovad)
{
}
-static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
- struct iova *iova,
- unsigned long pfn_lo,
- unsigned long pfn_hi)
-{
- return NULL;
-}
-
-static inline void free_cpu_cached_iovas(unsigned int cpu,
- struct iova_domain *iovad)
-{
-}
#endif
#endif
diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h
new file mode 100644
index 000000000000..c006cf0a25f3
--- /dev/null
+++ b/include/linux/iova_bitmap.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+#ifndef _IOVA_BITMAP_H_
+#define _IOVA_BITMAP_H_
+
+#include <linux/types.h>
+
+struct iova_bitmap;
+
+typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length,
+ void *opaque);
+
+struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+ unsigned long page_size,
+ u64 __user *data);
+void iova_bitmap_free(struct iova_bitmap *bitmap);
+int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn);
+void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length);
+
+#endif
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index c309f43bde45..e8240cf2611a 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -10,6 +10,8 @@
#include <linux/ns_common.h>
#include <linux/refcount.h>
#include <linux/rhashtable-types.h>
+#include <linux/sysctl.h>
+#include <linux/percpu_counter.h>
struct user_namespace;
@@ -27,7 +29,6 @@ struct ipc_ids {
};
struct ipc_namespace {
- refcount_t count;
struct ipc_ids ids[3];
int sem_ctls[4];
@@ -36,8 +37,8 @@ struct ipc_namespace {
unsigned int msg_ctlmax;
unsigned int msg_ctlmnb;
unsigned int msg_ctlmni;
- atomic_t msg_bytes;
- atomic_t msg_hdrs;
+ struct percpu_counter percpu_msg_bytes;
+ struct percpu_counter percpu_msg_hdrs;
size_t shm_ctlmax;
size_t shm_ctlall;
@@ -64,10 +65,18 @@ struct ipc_namespace {
unsigned int mq_msg_default;
unsigned int mq_msgsize_default;
+ struct ctl_table_set mq_set;
+ struct ctl_table_header *mq_sysctls;
+
+ struct ctl_table_set ipc_set;
+ struct ctl_table_header *ipc_sysctls;
+
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
struct ucounts *ucounts;
+ struct llist_node mnt_llist;
+
struct ns_common ns;
} __randomize_layout;
@@ -126,10 +135,20 @@ extern struct ipc_namespace *copy_ipcs(unsigned long flags,
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
- refcount_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ if (ns) {
+ if (refcount_inc_not_zero(&ns->ns.count))
+ return ns;
+ }
+
+ return NULL;
+}
+
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
@@ -146,6 +165,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ return ns;
+}
+
static inline void put_ipc_ns(struct ipc_namespace *ns)
{
}
@@ -153,15 +177,37 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
-struct ctl_table_header;
-extern struct ctl_table_header *mq_register_sysctl_table(void);
+void retire_mq_sysctls(struct ipc_namespace *ns);
+bool setup_mq_sysctls(struct ipc_namespace *ns);
#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
-static inline struct ctl_table_header *mq_register_sysctl_table(void)
+static inline void retire_mq_sysctls(struct ipc_namespace *ns)
{
- return NULL;
+}
+
+static inline bool setup_mq_sysctls(struct ipc_namespace *ns)
+{
+ return true;
}
#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
+
+#ifdef CONFIG_SYSVIPC_SYSCTL
+
+bool setup_ipc_sysctls(struct ipc_namespace *ns);
+void retire_ipc_sysctls(struct ipc_namespace *ns);
+
+#else /* CONFIG_SYSVIPC_SYSCTL */
+
+static inline void retire_ipc_sysctls(struct ipc_namespace *ns)
+{
+}
+
+static inline bool setup_ipc_sysctls(struct ipc_namespace *ns)
+{
+ return true;
+}
+
+#endif /* CONFIG_SYSVIPC_SYSCTL */
#endif
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index ef61676cfe05..a1c9c0d48ebf 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -72,6 +72,11 @@ struct ipmi_recv_msg {
unsigned char msg_data[IPMI_MAX_MSG_LENGTH];
};
+#define INIT_IPMI_RECV_MSG(done_handler) \
+{ \
+ .done = done_handler \
+}
+
/* Allocate and free the receive message. */
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
@@ -333,4 +338,9 @@ struct ipmi_smi_info {
/* This is to get the private info of struct ipmi_smi */
extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
+#define GET_DEVICE_ID_MAX_RETRY 5
+
+/* Helper function for computing the IPMB checksum of some data. */
+unsigned char ipmb_checksum(unsigned char *data, int size);
+
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index deec18b8944a..5d69820d8b02 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -39,6 +39,59 @@ struct ipmi_smi;
#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2)
/*
+ * SMI messages
+ *
+ * When communicating with an SMI, messages come in two formats:
+ *
+ * * Normal (to a BMC over a BMC interface)
+ *
+ * * IPMB (over a IPMB to another MC)
+ *
+ * When normal, commands are sent using the format defined by a
+ * standard message over KCS (NetFn must be even):
+ *
+ * +-----------+-----+------+
+ * | NetFn/LUN | Cmd | Data |
+ * +-----------+-----+------+
+ *
+ * And responses, similarly, with an completion code added (NetFn must
+ * be odd):
+ *
+ * +-----------+-----+------+------+
+ * | NetFn/LUN | Cmd | CC | Data |
+ * +-----------+-----+------+------+
+ *
+ * With normal messages, only commands are sent and only responses are
+ * received.
+ *
+ * In IPMB mode, we are acting as an IPMB device. Commands will be in
+ * the following format (NetFn must be even):
+ *
+ * +-------------+------+-------------+-----+------+
+ * | NetFn/rsLUN | Addr | rqSeq/rqLUN | Cmd | Data |
+ * +-------------+------+-------------+-----+------+
+ *
+ * Responses will using the following format:
+ *
+ * +-------------+------+-------------+-----+------+------+
+ * | NetFn/rqLUN | Addr | rqSeq/rsLUN | Cmd | CC | Data |
+ * +-------------+------+-------------+-----+------+------+
+ *
+ * This is similar to the format defined in the IPMB manual section
+ * 2.11.1 with the checksums and the first address removed. Also, the
+ * address is always the remote address.
+ *
+ * IPMB messages can be commands and responses in both directions.
+ * Received commands are handled as received commands from the message
+ * queue.
+ */
+
+enum ipmi_smi_msg_type {
+ IPMI_SMI_MSG_TYPE_NORMAL = 0,
+ IPMI_SMI_MSG_TYPE_IPMB_DIRECT
+};
+
+/*
* Messages to/from the lower layer. The smi interface will take one
* of these to send. After the send has occurred and a response has
* been received, it will report this same data structure back up to
@@ -54,6 +107,8 @@ struct ipmi_smi;
struct ipmi_smi_msg {
struct list_head link;
+ enum ipmi_smi_msg_type type;
+
long msgid;
void *user_data;
@@ -70,9 +125,19 @@ struct ipmi_smi_msg {
void (*done)(struct ipmi_smi_msg *msg);
};
+#define INIT_IPMI_SMI_MSG(done_handler) \
+{ \
+ .done = done_handler, \
+ .type = IPMI_SMI_MSG_TYPE_NORMAL \
+}
+
struct ipmi_smi_handlers {
struct module *owner;
+ /* Capabilities of the SMI. */
+#define IPMI_SMI_CAN_HANDLE_IPMB_DIRECT (1 << 0)
+ unsigned int flags;
+
/*
* The low-level interface cannot start sending messages to
* the upper layer until this function is called. This may
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ea7c7906591e..37dfdcfcdd54 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -31,6 +31,7 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __u32 ra_defrtr_metric;
__s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
@@ -50,7 +51,7 @@ struct ipv6_devconf {
__s32 use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
- __s32 mc_forwarding;
+ atomic_t mc_forwarding;
#endif
__s32 disable_ipv6;
__s32 drop_unicast_in_l2_multicast;
@@ -60,6 +61,7 @@ struct ipv6_devconf {
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
__s32 drop_unsolicited_na;
+ __s32 accept_untracked_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
@@ -74,6 +76,11 @@ struct ipv6_devconf {
__u32 addr_gen_mode;
__s32 disable_policy;
__s32 ndisc_tclass;
+ __s32 rpl_seg_enabled;
+ __u32 ioam6_id;
+ __u32 ioam6_id_wide;
+ __u8 ioam6_enabled;
+ __u8 ndisc_evict_nocarrier;
struct ctl_table_header *sysctl_header;
};
@@ -83,7 +90,6 @@ struct ipv6_params {
__s32 autoconf;
};
extern struct ipv6_params ipv6_defaults;
-#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -128,6 +134,7 @@ struct inet6_skb_parm {
__u16 dsthao;
#endif
__u16 frag_max_size;
+ __u16 srhoff;
#define IP6SKB_XFRM_TRANSFORMED 1
#define IP6SKB_FORWARDED 2
@@ -137,6 +144,8 @@ struct inet6_skb_parm {
#define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
+#define IP6SKB_SEG6 256
+#define IP6SKB_FAKEJUMBO 512
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -176,17 +185,6 @@ static inline int inet6_sdif(const struct sk_buff *skb)
return 0;
}
-/* can not be used in TCP layer after tcp_v6_fill_cb */
-static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
-{
-#if defined(CONFIG_NET_L3_MASTER_DEV)
- if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
- return true;
-#endif
- return false;
-}
-
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
};
@@ -222,7 +220,7 @@ struct ipv6_pinfo {
/*
* Packed in 16bits.
- * Omit one shift by by putting the signed field at MSB.
+ * Omit one shift by putting the signed field at MSB.
*/
#if defined(__BIG_ENDIAN_BITFIELD)
__s16 hop_limit:9;
@@ -282,13 +280,13 @@ struct ipv6_pinfo {
autoflowlabel:1,
autoflowlabel_set:1,
mc_all:1,
+ recverr_rfc4884:1,
rtalert_isolate:1;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;
__u32 dst_cookie;
- __u32 rx_dst_cookie;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
@@ -343,19 +341,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
return (struct raw6_sock *)sk;
}
-static inline void inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from)
-{
- int ancestor_size = sizeof(struct inet_sock);
-
- if (sk_from->sk_family == PF_INET6)
- ancestor_size += sizeof(struct ipv6_pinfo);
-
- __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
-}
-
-#define __ipv6_only_sock(sk) (sk->sk_ipv6only)
-#define ipv6_only_sock(sk) (__ipv6_only_sock(sk))
+#define ipv6_only_sock(sk) (sk->sk_ipv6only)
#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
inet6_sk(sk)->rxopt.bits.rxinfo)
@@ -372,7 +358,6 @@ static inline int inet_v6_ipv6only(const struct sock *sk)
return ipv6_only_sock(sk);
}
#else
-#define __ipv6_only_sock(sk) 0
#define ipv6_only_sock(sk) 0
#define ipv6_sk_rxinfo(sk) 0
@@ -386,19 +371,12 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
return NULL;
}
-static inline struct inet6_request_sock *
- inet6_rsk(const struct request_sock *rsk)
-{
- return NULL;
-}
-
static inline struct raw6_sock *raw6_sk(const struct sock *sk)
{
return NULL;
}
#define inet6_rcv_saddr(__sk) NULL
-#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _IPV6_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3ed5a055b5f4..c3eb89606c2b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -71,6 +71,8 @@ enum irqchip_irq_state;
* it from the spurious interrupt detection
* mechanism and from core side polling.
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
+ * IRQ_HIDDEN - Don't show up in /proc/interrupts
+ * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -97,13 +99,15 @@ enum {
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
+ IRQ_HIDDEN = (1 << 20),
+ IRQ_NO_DEBUG = (1 << 21),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -114,7 +118,7 @@ enum {
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
- * all descendent irqchips.
+ * all descendant irqchips.
*/
enum {
IRQ_SET_MASK_OK = 0,
@@ -147,7 +151,9 @@ struct irq_common_data {
#endif
void *handler_data;
struct msi_desc *msi_desc;
+#ifdef CONFIG_SMP
cpumask_var_t affinity;
+#endif
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
cpumask_var_t effective_affinity;
#endif
@@ -211,6 +217,12 @@ struct irq_data {
* IRQD_CAN_RESERVE - Can use reservation mode
* IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
* required
+ * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
+ * from actual interrupt context.
+ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
+ * irq_chip::irq_set_affinity() when deactivated.
+ * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if
+ * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -234,6 +246,9 @@ enum {
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
IRQD_CAN_RESERVE = (1 << 26),
IRQD_MSI_NOMASK_QUIRK = (1 << 27),
+ IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
+ IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
+ IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -291,7 +306,7 @@ static inline bool irqd_is_level_type(struct irq_data *d)
/*
* Must only be called of irqchip.irq_set_affinity() or low level
- * hieararchy domain allocation functions.
+ * hierarchy domain allocation functions.
*/
static inline void irqd_set_single_target(struct irq_data *d)
{
@@ -303,6 +318,21 @@ static inline bool irqd_is_single_target(struct irq_data *d)
return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
}
+static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX;
+}
+
+static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
+}
+
+static inline bool irqd_is_enabled_on_suspend(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND;
+}
+
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
@@ -408,6 +438,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
}
+static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+}
+
+static inline bool irqd_affinity_on_activate(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
+}
+
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -418,7 +458,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
/**
* struct irq_chip - hardware interrupt chip descriptor
*
- * @parent_device: pointer to parent device for irqchip
* @name: name for /proc/interrupts
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
@@ -465,7 +504,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @flags: chip specific flags
*/
struct irq_chip {
- struct device *parent_device;
const char *name;
unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data);
@@ -486,9 +524,10 @@ struct irq_chip {
void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
-
+#endif
void (*irq_suspend)(struct irq_data *data);
void (*irq_resume)(struct irq_data *data);
void (*irq_pm_shutdown)(struct irq_data *data);
@@ -519,27 +558,34 @@ struct irq_chip {
/*
* irq_chip specific flags
*
- * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
- * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
- * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
- * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
- * when irq enabled
- * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
- * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
- * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
- * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
- * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
+ * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
+ * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
+ * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
+ * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
+ * when irq enabled
+ * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
+ * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
+ * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI: Chip can provide two doorbells for Level MSIs
+ * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
+ * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs
+ * in the suspend path if they are in disabled state
+ * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
+ * IRQCHIP_IMMUTABLE: Don't ever change anything in this chip
*/
enum {
- IRQCHIP_SET_TYPE_MASKED = (1 << 0),
- IRQCHIP_EOI_IF_HANDLED = (1 << 1),
- IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
- IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
- IRQCHIP_SKIP_SET_WAKE = (1 << 4),
- IRQCHIP_ONESHOT_SAFE = (1 << 5),
- IRQCHIP_EOI_THREADED = (1 << 6),
- IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
- IRQCHIP_SUPPORTS_NMI = (1 << 8),
+ IRQCHIP_SET_TYPE_MASKED = (1 << 0),
+ IRQCHIP_EOI_IF_HANDLED = (1 << 1),
+ IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
+ IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
+ IRQCHIP_SKIP_SET_WAKE = (1 << 4),
+ IRQCHIP_ONESHOT_SAFE = (1 << 5),
+ IRQCHIP_EOI_THREADED = (1 << 6),
+ IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
+ IRQCHIP_SUPPORTS_NMI = (1 << 8),
+ IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
+ IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
+ IRQCHIP_IMMUTABLE = (1 << 11),
};
#include <linux/irqdesc.h>
@@ -560,13 +606,13 @@ enum {
#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
struct irqaction;
-extern int setup_irq(unsigned int irq, struct irqaction *new);
-extern void remove_irq(unsigned int irq, struct irqaction *act);
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
+#endif
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
@@ -668,10 +714,11 @@ extern struct irq_chip no_irq_chip;
extern struct irq_chip dummy_irq_chip;
extern void
-irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
irq_flow_handler_t handle, const char *name);
-static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+static inline void irq_set_chip_and_handler(unsigned int irq,
+ const struct irq_chip *chip,
irq_flow_handler_t handle)
{
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
@@ -761,7 +808,7 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq)
}
/* Set/get chip/data for an IRQ: */
-extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
+extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip);
extern int irq_set_handler_data(unsigned int irq, void *data);
extern int irq_set_chip_data(unsigned int irq, void *data);
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
@@ -834,21 +881,34 @@ static inline int irq_data_get_node(struct irq_data *d)
return irq_common_data_get_node(d->common);
}
-static inline struct cpumask *irq_get_affinity_mask(int irq)
+static inline
+const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
- struct irq_data *d = irq_get_irq_data(irq);
+#ifdef CONFIG_SMP
+ return d->common->affinity;
+#else
+ return cpumask_of(0);
+#endif
+}
- return d ? d->common->affinity : NULL;
+static inline void irq_data_update_affinity(struct irq_data *d,
+ const struct cpumask *m)
+{
+#ifdef CONFIG_SMP
+ cpumask_copy(d->common->affinity, m);
+#endif
}
-static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
+static inline const struct cpumask *irq_get_affinity_mask(int irq)
{
- return d->common->affinity;
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? irq_data_get_affinity_mask(d) : NULL;
}
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
return d->common->effective_affinity;
}
@@ -863,12 +923,20 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d,
{
}
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
- return d->common->affinity;
+ return irq_data_get_affinity_mask(d);
}
#endif
+static inline
+const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? irq_data_get_effective_affinity_mask(d) : NULL;
+}
+
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
@@ -884,7 +952,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
#define irq_alloc_desc(node) \
- irq_alloc_descs(-1, 0, 1, node)
+ irq_alloc_descs(-1, 1, 1, node)
#define irq_alloc_desc_at(at, node) \
irq_alloc_descs(at, at, 1, node)
@@ -899,7 +967,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
__devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
#define devm_irq_alloc_desc(dev, node) \
- devm_irq_alloc_descs(dev, -1, 0, 1, node)
+ devm_irq_alloc_descs(dev, -1, 1, 1, node)
#define devm_irq_alloc_desc_at(dev, at, node) \
devm_irq_alloc_descs(dev, at, at, 1, node)
@@ -916,21 +984,6 @@ static inline void irq_free_desc(unsigned int irq)
irq_free_descs(irq, 1);
}
-#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
-unsigned int irq_alloc_hwirqs(int cnt, int node);
-static inline unsigned int irq_alloc_hwirq(int node)
-{
- return irq_alloc_hwirqs(1, node);
-}
-void irq_free_hwirqs(unsigned int from, int cnt);
-static inline void irq_free_hwirq(unsigned int irq)
-{
- return irq_free_hwirqs(irq, 1);
-}
-int arch_setup_hwirq(unsigned int irq, int node);
-void arch_teardown_hwirq(unsigned int irq);
-#endif
-
#ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc(unsigned int irq);
#endif
@@ -1030,7 +1083,7 @@ struct irq_chip_generic {
unsigned long unused;
struct irq_domain *domain;
struct list_head list;
- struct irq_chip_type chip_types[0];
+ struct irq_chip_type chip_types[];
};
/**
@@ -1066,7 +1119,7 @@ struct irq_domain_chip_generic {
unsigned int irq_flags_to_clear;
unsigned int irq_flags_to_set;
enum irq_gc_flags gc_flags;
- struct irq_chip_generic *gc[0];
+ struct irq_chip_generic *gc[];
};
/* Generic chip callback functions */
@@ -1084,6 +1137,7 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on);
/* Setup functions for irq_chip_generic */
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq);
+void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq);
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler);
@@ -1228,6 +1282,15 @@ int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
* top-level IRQ handler.
*/
extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
+asmlinkage void generic_handle_arch_irq(struct pt_regs *regs);
+#else
+#ifndef set_handle_irq
+#define set_handle_irq(handle_irq) \
+ do { \
+ (void)handle_irq; \
+ WARN_ON(1); \
+ } while (0)
+#endif
#endif
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
deleted file mode 100644
index 6e8895cd4d92..000000000000
--- a/include/linux/irq_cpustat.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __irq_cpustat_h
-#define __irq_cpustat_h
-
-/*
- * Contains default mappings for irq_cpustat_t, used by almost every
- * architecture. Some arch (like s390) have per cpu hardware pages and
- * they define their own mappings for irq_stat.
- *
- * Keith Owens <kaos@ocs.com.au> July 2000.
- */
-
-
-/*
- * Simple wrappers reducing source bloat. Define all irq_stat fields
- * here, even ones that are arch dependent. That way we get common
- * definitions instead of differing sets for each arch.
- */
-
-#ifndef __ARCH_IRQ_STAT
-DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); /* defined in asm/hardirq.h */
-#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu))
-#endif
-
-/* arch dependent irq_stat fields */
-#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */
-
-#endif /* __irq_cpustat_h */
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
index 4500d453a63e..ab831e5ae748 100644
--- a/include/linux/irq_sim.h
+++ b/include/linux/irq_sim.h
@@ -1,41 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
+ * Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com>
*/
#ifndef _LINUX_IRQ_SIM_H
#define _LINUX_IRQ_SIM_H
-#include <linux/irq_work.h>
#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/irqdomain.h>
/*
* Provides a framework for allocating simulated interrupts which can be
* requested like normal irqs and enqueued from process context.
*/
-struct irq_sim_work_ctx {
- struct irq_work work;
- unsigned long *pending;
-};
-
-struct irq_sim_irq_ctx {
- int irqnum;
- bool enabled;
-};
-
-struct irq_sim {
- struct irq_sim_work_ctx work_ctx;
- int irq_base;
- unsigned int irq_count;
- struct irq_sim_irq_ctx *irqs;
-};
-
-int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs);
-int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
- unsigned int num_irqs);
-void irq_sim_fini(struct irq_sim *sim);
-void irq_sim_fire(struct irq_sim *sim, unsigned int offset);
-int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset);
+struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
+ unsigned int num_irqs);
+struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
+ struct fwnode_handle *fwnode,
+ unsigned int num_irqs);
+void irq_domain_remove_sim(struct irq_domain *domain);
#endif /* _LINUX_IRQ_SIM_H */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 02da997ad12c..8cd11a223260 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -2,7 +2,8 @@
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
-#include <linux/llist.h>
+#include <linux/smp_types.h>
+#include <linux/rcuwait.h>
/*
* An entry can be in one of four states:
@@ -13,32 +14,45 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-#define IRQ_WORK_PENDING BIT(0)
-#define IRQ_WORK_BUSY BIT(1)
-
-/* Doesn't want IPI, wait for tick: */
-#define IRQ_WORK_LAZY BIT(2)
-
-#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
-
struct irq_work {
- atomic_t flags;
- struct llist_node llnode;
+ struct __call_single_node node;
void (*func)(struct irq_work *);
+ struct rcuwait irqwait;
};
+#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
+ .node = { .u_flags = (_flags), }, \
+ .func = (_func), \
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
+}
+
+#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
+#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
+#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)
+
+#define DEFINE_IRQ_WORK(name, _f) \
+ struct irq_work name = IRQ_WORK_INIT(_f)
+
static inline
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
- atomic_set(&work->flags, 0);
- work->func = func;
+ *work = IRQ_WORK_INIT(func);
}
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
- .flags = ATOMIC_INIT(0), \
- .func = (_f) \
+static inline bool irq_work_is_pending(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
+}
+
+static inline bool irq_work_is_busy(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
}
+static inline bool irq_work_is_hard(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
+}
bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
@@ -51,9 +65,11 @@ void irq_work_sync(struct irq_work *work);
void irq_work_run(void);
bool irq_work_needs_cpu(void);
+void irq_work_single(void *arg);
#else
static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
+static inline void irq_work_single(void *arg) { }
#endif
#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 950e4b2458f0..d5e6024cb2a8 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -12,7 +12,16 @@
#define _LINUX_IRQCHIP_H
#include <linux/acpi.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+/* Undefined on purpose */
+extern of_irq_init_cb_t typecheck_irq_init_cb;
+
+#define typecheck_irq_init_cb(fn) \
+ (__typecheck(typecheck_irq_init_cb, &fn) ? fn : fn)
/*
* This macro must be used by the different irqchip drivers to declare
@@ -21,10 +30,37 @@
*
* @name: name that must be unique across all IRQCHIP_DECLARE of the
* same file.
- * @compstr: compatible string of the irqchip driver
+ * @compat: compatible string of the irqchip driver
* @fn: initialization function
*/
-#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+#define IRQCHIP_DECLARE(name, compat, fn) \
+ OF_DECLARE_2(irqchip, name, compat, typecheck_irq_init_cb(fn))
+
+extern int platform_irqchip_probe(struct platform_device *pdev);
+
+#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \
+static const struct of_device_id drv_name##_irqchip_match_table[] = {
+
+#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \
+ .data = typecheck_irq_init_cb(fn), },
+
+
+#define IRQCHIP_PLATFORM_DRIVER_END(drv_name, ...) \
+ {}, \
+}; \
+MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \
+static struct platform_driver drv_name##_driver = { \
+ .probe = IS_ENABLED(CONFIG_IRQCHIP) ? \
+ platform_irqchip_probe : NULL, \
+ .driver = { \
+ .name = #drv_name, \
+ .owner = THIS_MODULE, \
+ .of_match_table = drv_name##_irqchip_match_table, \
+ .suppress_bind_attrs = true, \
+ __VA_ARGS__ \
+ }, \
+}; \
+builtin_platform_driver(drv_name##_driver)
/*
* This macro must be used by the different irqchip drivers to declare
@@ -39,8 +75,9 @@
* @fn: initialization function
*/
#define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \
- ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \
- subtable, validate, data, fn)
+ ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(irqchip, name, \
+ ACPI_SIG_MADT, subtable, \
+ validate, data, fn)
#ifdef CONFIG_IRQCHIP
void irqchip_init(void);
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index b9850f5f1906..1177f3a1aed5 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -7,8 +7,7 @@
#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H
#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H
-#include <linux/types.h>
-#include <linux/ioport.h>
+#include <linux/irqchip/arm-vgic-info.h>
#define GICD_INT_DEF_PRI 0xa0
#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
@@ -16,26 +15,6 @@
(GICD_INT_DEF_PRI << 8) |\
GICD_INT_DEF_PRI)
-enum gic_type {
- GIC_V2,
- GIC_V3,
-};
-
-struct gic_kvm_info {
- /* GIC type */
- enum gic_type type;
- /* Virtual CPU interface */
- struct resource vcpu;
- /* Interrupt number */
- unsigned int maint_irq;
- /* Virtual control interface */
- struct resource vctrl;
- /* vlpi support */
- bool has_v4;
-};
-
-const struct gic_kvm_info *gic_get_kvm_info(void);
-
struct irq_domain;
struct fwnode_handle;
int gicv2m_init(struct fwnode_handle *parent_handle,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 83439bfb6c5b..728691365464 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -19,7 +19,6 @@
#define GICD_CLRSPI_NSR 0x0048
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
-#define GICD_SEIR 0x0068
#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
@@ -57,6 +56,7 @@
#define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
@@ -90,6 +90,7 @@
#define GICD_TYPER_ESPIS(typer) \
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
+#define GICD_TYPER2_nASSGIcap (1U << 8)
#define GICD_TYPER2_VIL (1U << 7)
#define GICD_TYPER2_VID GENMASK(4, 0)
@@ -117,18 +118,17 @@
#define GICR_WAKER 0x0014
#define GICR_SETLPIR 0x0040
#define GICR_CLRLPIR 0x0048
-#define GICR_SEIR GICD_SEIR
#define GICR_PROPBASER 0x0070
#define GICR_PENDBASER 0x0078
#define GICR_INVLPIR 0x00A0
#define GICR_INVALLR 0x00B0
#define GICR_SYNCR 0x00C0
-#define GICR_MOVLPIR 0x0100
-#define GICR_MOVALLR 0x0110
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+#define GICR_CTLR_CES (1UL << 1)
+#define GICR_CTLR_IR (1UL << 2)
#define GICR_CTLR_RWP (1UL << 3)
#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
@@ -241,6 +241,7 @@
#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_DIRTY (1U << 2)
#define GICR_TYPER_DirectLPIS (1U << 3)
#define GICR_TYPER_LAST (1U << 4)
#define GICR_TYPER_RVPEID (1U << 7)
@@ -320,6 +321,9 @@
#define GICR_VPENDBASER_NonShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
+#define GICR_VPENDBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable)
+
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
@@ -343,6 +347,15 @@
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
+#define GICR_VSGIR 0x0080
+
+#define GICR_VSGIR_VPEID GENMASK(15, 0)
+
+#define GICR_VSGIPENDR 0x0088
+
+#define GICR_VSGIPENDR_BUSY (1U << 31)
+#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
+
/*
* ITS registers, offsets from ITS_base
*/
@@ -366,6 +379,11 @@
#define GITS_TRANSLATER 0x10040
+#define GITS_SGIR 0x20020
+
+#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
+#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
+
#define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
@@ -500,8 +518,9 @@
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
-/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */
+/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
+#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
/*
@@ -558,67 +577,11 @@
#define ICC_SRE_EL1_DFB (1U << 1)
#define ICC_SRE_EL1_SRE (1U << 0)
-/*
- * Hypervisor interface registers (SRE only)
- */
-#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
-
-#define ICH_LR_EOI (1ULL << 41)
-#define ICH_LR_GROUP (1ULL << 60)
-#define ICH_LR_HW (1ULL << 61)
-#define ICH_LR_STATE (3ULL << 62)
-#define ICH_LR_PENDING_BIT (1ULL << 62)
-#define ICH_LR_ACTIVE_BIT (1ULL << 63)
-#define ICH_LR_PHYS_ID_SHIFT 32
-#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
-#define ICH_LR_PRIORITY_SHIFT 48
-#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
-
/* These are for GICv2 emulation only */
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
-#define ICH_MISR_EOI (1 << 0)
-#define ICH_MISR_U (1 << 1)
-
-#define ICH_HCR_EN (1 << 0)
-#define ICH_HCR_UIE (1 << 1)
-#define ICH_HCR_NPIE (1 << 3)
-#define ICH_HCR_TC (1 << 10)
-#define ICH_HCR_TALL0 (1 << 11)
-#define ICH_HCR_TALL1 (1 << 12)
-#define ICH_HCR_EOIcount_SHIFT 27
-#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
-
-#define ICH_VMCR_ACK_CTL_SHIFT 2
-#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
-#define ICH_VMCR_FIQ_EN_SHIFT 3
-#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
-#define ICH_VMCR_CBPR_SHIFT 4
-#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
-#define ICH_VMCR_EOIM_SHIFT 9
-#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
-#define ICH_VMCR_BPR1_SHIFT 18
-#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
-#define ICH_VMCR_BPR0_SHIFT 21
-#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
-#define ICH_VMCR_PMR_SHIFT 24
-#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
-#define ICH_VMCR_ENG0_SHIFT 0
-#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
-#define ICH_VMCR_ENG1_SHIFT 1
-#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
-
-#define ICH_VTR_PRI_BITS_SHIFT 29
-#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
-#define ICH_VTR_ID_BITS_SHIFT 23
-#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
-#define ICH_VTR_SEIS_SHIFT 22
-#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
-#define ICH_VTR_A3V_SHIFT 21
-#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
-
#define ICC_IAR1_EL1_SPURIOUS 0x3ff
#define ICC_SRE_EL2_SRE (1 << 0)
@@ -650,10 +613,11 @@
struct rdists {
struct {
+ raw_spinlock_t rd_lock;
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
- bool lpi_enabled;
+ u64 flags;
cpumask_t *vpe_table_mask;
void *vpe_l1_base;
} __percpu *rdist;
@@ -662,13 +626,16 @@ struct rdists {
u64 flags;
u32 gicd_typer;
u32 gicd_typer2;
+ int cpuhp_memreserve_state;
bool has_vlpis;
bool has_rvpeid;
bool has_direct_lpi;
+ bool has_vpend_valid_dirty;
};
struct irq_domain;
struct fwnode_handle;
+int __init its_lpi_memreserve_init(void);
int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *domain);
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index d9c34968467a..2c63375bbd43 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -39,6 +39,8 @@ struct its_vpe {
irq_hw_number_t vpe_db_lpi;
/* VPE resident */
bool resident;
+ /* VPT parse complete */
+ bool ready;
union {
/* GICv4.0 implementations */
struct {
@@ -49,11 +51,23 @@ struct its_vpe {
};
/* GICv4.1 implementations */
struct {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *sgi_domain;
+ struct {
+ u8 priority;
+ bool enabled;
+ bool group;
+ } sgi_config[16];
atomic_t vmapp_count;
};
};
/*
+ * Ensures mutual exclusion between affinity setting of the
+ * vPE and vLPI operations using vpe->col_idx.
+ */
+ raw_spinlock_t vpe_lock;
+ /*
* This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in
* programming of the ITS.
@@ -92,7 +106,9 @@ enum its_vcpu_info_cmd_type {
PROP_UPDATE_AND_INV_VLPI,
SCHEDULE_VPE,
DESCHEDULE_VPE,
+ COMMIT_VPE,
INVALL_VPE,
+ PROP_UPDATE_VSGI,
};
struct its_cmd_info {
@@ -105,19 +121,30 @@ struct its_cmd_info {
bool g0en;
bool g1en;
};
+ struct {
+ u8 priority;
+ bool group;
+ };
};
};
int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm);
-int its_schedule_vpe(struct its_vpe *vpe, bool on);
+int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
+int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
+int its_commit_vpe(struct its_vpe *vpe);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
+int its_prop_update_vsgi(int irq, u8 priority, bool group);
struct irq_domain_ops;
-int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
+int its_init_v4(struct irq_domain *domain,
+ const struct irq_domain_ops *vpe_ops,
+ const struct irq_domain_ops *sgi_ops);
+
+bool gic_cpuif_has_vsgi(void);
#endif
diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h
new file mode 100644
index 000000000000..a75b2c7de69d
--- /dev/null
+++ b/include/linux/irqchip/arm-vgic-info.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/linux/irqchip/arm-vgic-info.h
+ *
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+#define __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+enum gic_type {
+ /* Full GICv2 */
+ GIC_V2,
+ /* Full GICv3, optionally with v2 compat */
+ GIC_V3,
+};
+
+struct gic_kvm_info {
+ /* GIC type */
+ enum gic_type type;
+ /* Virtual CPU interface */
+ struct resource vcpu;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* No interrupt mask, no need to use the above field */
+ bool no_maint_irq_mask;
+ /* Virtual control interface */
+ struct resource vctrl;
+ /* vlpi support */
+ bool has_v4;
+ /* rvpeid support */
+ bool has_v4_1;
+ /* Deactivation impared, subpar stuff */
+ bool no_hw_deactivation;
+};
+
+#ifdef CONFIG_KVM
+void vgic_set_kvm_info(const struct gic_kvm_info *info);
+#else
+static inline void vgic_set_kvm_info(const struct gic_kvm_info *info) {}
+#endif
+
+#endif
diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h
index a158b97242c7..f2b11d1df23d 100644
--- a/include/linux/irqchip/arm-vic.h
+++ b/include/linux/irqchip/arm-vic.h
@@ -9,17 +9,6 @@
#include <linux/types.h>
-#define VIC_RAW_STATUS 0x08
-#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */
-#define VIC_INT_ENABLE_CLEAR 0x14
-
-struct device_node;
-struct pt_regs;
-
-void __vic_init(void __iomem *base, int parent_irq, int irq_start,
- u32 vic_sources, u32 resume_sources, struct device_node *node);
void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
-int vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
- u32 vic_sources, u32 resume_sources);
#endif
diff --git a/include/linux/irqchip/irq-bcm2836.h b/include/linux/irqchip/irq-bcm2836.h
index f224d6f9e550..ac5719d8f56b 100644
--- a/include/linux/irqchip/irq-bcm2836.h
+++ b/include/linux/irqchip/irq-bcm2836.h
@@ -30,7 +30,7 @@
*/
#define LOCAL_MAILBOX_INT_CONTROL0 0x050
/*
- * The CPU's interrupt status register. Bits are defined by the the
+ * The CPU's interrupt status register. Bits are defined by the
* LOCAL_IRQ_* bits below.
*/
#define LOCAL_IRQ_PENDING0 0x060
diff --git a/include/linux/irqchip/irq-ixp4xx.h b/include/linux/irqchip/irq-ixp4xx.h
deleted file mode 100644
index 9395917d6936..000000000000
--- a/include/linux/irqchip/irq-ixp4xx.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IRQ_IXP4XX_H
-#define __IRQ_IXP4XX_H
-
-#include <linux/ioport.h>
-struct irq_domain;
-
-void ixp4xx_irq_init(resource_size_t irqbase,
- bool is_356);
-struct irq_domain *ixp4xx_get_irq_domain(void);
-
-#endif /* __IRQ_IXP4XX_H */
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
index 216e5adf80ce..dca379c0d7eb 100644
--- a/include/linux/irqchip/irq-omap-intc.h
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -2,7 +2,7 @@
/**
* irq-omap-intc.h - INTC Idle Functions
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Felipe Balbi <balbi@ti.com>
*/
diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
index cb8455c87c8a..aa1813749a4f 100644
--- a/include/linux/irqchip/mmp.h
+++ b/include/linux/irqchip/mmp.h
@@ -4,4 +4,7 @@
extern struct irq_chip icu_irq_chip;
+extern void icu_init_irq(void);
+extern void mmp2_init_icu(void);
+
#endif /* __IRQCHIP_MMP_H */
diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h
deleted file mode 100644
index a978fc8c7996..000000000000
--- a/include/linux/irqchip/versatile-fpga.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PLAT_FPGA_IRQ_H
-#define PLAT_FPGA_IRQ_H
-
-struct device_node;
-struct pt_regs;
-
-void fpga_handle_irq(struct pt_regs *regs);
-void fpga_irq_init(void __iomem *, const char *, int, int, u32,
- struct device_node *node);
-int fpga_irq_of_init(struct device_node *node,
- struct device_node *parent);
-
-#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index d6e2ab538ef2..844a8e30e6de 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -22,9 +22,8 @@ struct pt_regs;
* @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler
- * @preflow_handler: handler called before the flow handler (currently used by sparc)
* @action: the irq action chain
- * @status: status information
+ * @status_use_accessors: status information
* @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
@@ -33,7 +32,7 @@ struct pt_regs;
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @threads_handled: stats field for deferred spurious detection of threaded handlers
- * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+ * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers
* @lock: locking for SMP
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
@@ -58,9 +57,6 @@ struct irq_desc {
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
-#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
- irq_preflow_handler_t preflow_handler;
-#endif
struct irqaction *action; /* IRQ action list */
unsigned int status_use_accessors;
unsigned int core_internal_state__do_not_mess_with_it;
@@ -117,6 +113,12 @@ static inline void irq_unlock_sparse(void) { }
extern struct irq_desc irq_desc[NR_IRQS];
#endif
+static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc,
+ unsigned int cpu)
+{
+ return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
+}
+
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
return container_of(data->common, struct irq_desc, irq_common_data);
@@ -156,39 +158,25 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc)
desc->handle_irq(desc);
}
+int handle_irq_desc(struct irq_desc *desc);
int generic_handle_irq(unsigned int irq);
+int generic_handle_irq_safe(unsigned int irq);
-#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+#ifdef CONFIG_IRQ_DOMAIN
/*
* Convert a HW interrupt number to a logical one using a IRQ domain,
* and handle the result interrupt number. Return -EINVAL if
- * conversion failed. Providing a NULL domain indicates that the
- * conversion has already been done.
+ * conversion failed.
*/
-int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
- bool lookup, struct pt_regs *regs);
-
-static inline int handle_domain_irq(struct irq_domain *domain,
- unsigned int hwirq, struct pt_regs *regs)
-{
- return __handle_domain_irq(domain, hwirq, true, regs);
-}
-
-#ifdef CONFIG_IRQ_DOMAIN
-int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
- struct pt_regs *regs);
-#endif
+int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */
static inline int irq_desc_has_action(struct irq_desc *desc)
{
- return desc->action != NULL;
-}
-
-static inline int irq_has_action(unsigned int irq)
-{
- return irq_desc_has_action(irq_to_desc(irq));
+ return desc && desc->action != NULL;
}
/**
@@ -222,61 +210,42 @@ static inline void irq_set_handler_locked(struct irq_data *data,
* Must be called with irq_desc locked and valid parameters.
*/
static inline void
-irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
+irq_set_chip_handler_name_locked(struct irq_data *data,
+ const struct irq_chip *chip,
irq_flow_handler_t handler, const char *name)
{
struct irq_desc *desc = irq_data_to_desc(data);
desc->handle_irq = handler;
desc->name = name;
- data->chip = chip;
+ data->chip = (struct irq_chip *)chip;
}
+bool irq_check_status_bit(unsigned int irq, unsigned int bitmask);
+
static inline bool irq_balancing_disabled(unsigned int irq)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
+ return irq_check_status_bit(irq, IRQ_NO_BALANCING_MASK);
}
static inline bool irq_is_percpu(unsigned int irq)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_PER_CPU;
+ return irq_check_status_bit(irq, IRQ_PER_CPU);
}
static inline bool irq_is_percpu_devid(unsigned int irq)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_PER_CPU_DEVID;
+ return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID);
}
+void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
+ struct lock_class_key *request_class);
static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (desc) {
- lockdep_set_class(&desc->lock, lock_class);
- lockdep_set_class(&desc->request_mutex, request_class);
- }
-}
-
-#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
-static inline void
-__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- desc->preflow_handler = handler;
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ __irq_set_lockdep_class(irq, lock_class, request_class);
}
-#endif
#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 8d062e86d954..00d577f90883 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -37,17 +37,15 @@
#include <linux/radix-tree.h>
struct device_node;
+struct fwnode_handle;
struct irq_domain;
-struct of_device_id;
struct irq_chip;
struct irq_data;
+struct irq_desc;
struct cpumask;
struct seq_file;
struct irq_affinity_desc;
-/* Number of irqs reserved for a legacy isa controller */
-#define NUM_ISA_INTERRUPTS 16
-
#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
/**
@@ -66,6 +64,10 @@ struct irq_fwspec {
u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
+/* Conversion function from of_phandle_args fields to fwspec */
+void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
+ unsigned int count, struct irq_fwspec *fwspec);
+
/*
* Should several domains have the same device node, but serve
* different purposes (for example one domain is for PCI/MSI, and the
@@ -84,6 +86,7 @@ enum irq_domain_bus_token {
DOMAIN_BUS_FSL_MC_MSI,
DOMAIN_BUS_TI_SCI_INTA_MSI,
DOMAIN_BUS_WAKEUP,
+ DOMAIN_BUS_VMD_MSI,
};
/**
@@ -128,7 +131,7 @@ struct irq_domain_ops {
#endif
};
-extern struct irq_domain_ops irq_generic_chip_ops;
+extern const struct irq_domain_ops irq_generic_chip_ops;
struct irq_domain_chip_generic;
@@ -148,15 +151,15 @@ struct irq_domain_chip_generic;
* @gc: Pointer to a list of generic chips. There is a helper function for
* setting up one or more generic chips for interrupt controllers
* drivers using the generic chip library which uses this pointer.
+ * @dev: Pointer to a device that the domain represent, and that will be
+ * used for power management purposes.
* @parent: Pointer to parent irq_domain to support hierarchy irq_domains
- * @debugfs_file: dentry for the domain debugfs file
*
* Revmap data, used internally by irq_domain
- * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
- * support direct mapping
- * @revmap_size: Size of the linear map table @linear_revmap[]
+ * @revmap_size: Size of the linear map table @revmap[]
* @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
- * @linear_revmap: Linear table of hwirq->virq reverse mappings
+ * @revmap_mutex: Lock for the revmap
+ * @revmap: Linear table of irq_data pointers
*/
struct irq_domain {
struct list_head link;
@@ -170,20 +173,17 @@ struct irq_domain {
struct fwnode_handle *fwnode;
enum irq_domain_bus_token bus_token;
struct irq_domain_chip_generic *gc;
+ struct device *dev;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_domain *parent;
#endif
-#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
- struct dentry *debugfs_file;
-#endif
/* reverse map data. The linear map gets appended to the irq_domain */
irq_hw_number_t hwirq_max;
- unsigned int revmap_direct_max_irq;
unsigned int revmap_size;
struct radix_tree_root revmap_tree;
- struct mutex revmap_tree_mutex;
- unsigned int linear_revmap[];
+ struct mutex revmap_mutex;
+ struct irq_data __rcu *revmap[];
};
/* Irq domain flags */
@@ -213,6 +213,9 @@ enum {
*/
IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6),
+ /* Irq domain doesn't translate anything */
+ IRQ_DOMAIN_FLAG_NO_MAP = (1 << 7),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
@@ -226,6 +229,13 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
return to_of_node(d->fwnode);
}
+static inline void irq_domain_set_pm_device(struct irq_domain *d,
+ struct device *dev)
+{
+ if (d)
+ d->dev = dev;
+}
+
#ifdef CONFIG_IRQ_DOMAIN
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
const char *name, phys_addr_t *pa);
@@ -255,21 +265,27 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data);
-struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data);
+struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
extern bool irq_domain_check_msi_remap(void);
@@ -322,6 +338,15 @@ static inline struct irq_domain *irq_find_host(struct device_node *node)
return d;
}
+static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data);
+}
+
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
@@ -336,6 +361,8 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no
{
return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
}
+
+#ifdef CONFIG_IRQ_DOMAIN_NOMAP
static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq,
const struct irq_domain_ops *ops,
@@ -343,14 +370,10 @@ static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_nod
{
return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
}
-static inline struct irq_domain *irq_domain_add_legacy_isa(
- struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
- host_data);
-}
+
+extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+#endif
+
static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
@@ -380,40 +403,49 @@ extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
extern void irq_domain_associate_many(struct irq_domain *domain,
unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
-extern void irq_domain_disassociate(struct irq_domain *domain,
- unsigned int irq);
-extern unsigned int irq_create_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
+extern unsigned int irq_create_mapping_affinity(struct irq_domain *host,
+ irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity);
extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
extern void irq_dispose_mapping(unsigned int virq);
+static inline unsigned int irq_create_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq)
+{
+ return irq_create_mapping_affinity(host, hwirq, NULL);
+}
+
+extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ unsigned int *irq);
+
+static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ return __irq_resolve_mapping(domain, hwirq, NULL);
+}
+
/**
- * irq_linear_revmap() - Find a linux irq from a hw irq number.
+ * irq_find_mapping() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
- *
- * This is a fast path alternative to irq_find_mapping() that can be
- * called directly by irq controller code to save a handful of
- * instructions. It is always safe to call, but won't find irqs mapped
- * using the radix tree.
*/
-static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+static inline unsigned int irq_find_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0;
+ unsigned int irq;
+
+ if (__irq_resolve_mapping(domain, hwirq, &irq))
+ return irq;
+
+ return 0;
}
-extern unsigned int irq_find_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
-extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
-extern int irq_create_strict_mappings(struct irq_domain *domain,
- unsigned int irq_base,
- irq_hw_number_t hwirq_base, int count);
-static inline int irq_create_identity_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq)
+static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- return irq_create_strict_mappings(host, hwirq, hwirq, 1);
+ return irq_find_mapping(domain, hwirq);
}
extern const struct irq_domain_ops irq_domain_simple_ops;
@@ -447,9 +479,11 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq);
extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq, struct irq_chip *chip,
+ irq_hw_number_t hwirq,
+ const struct irq_chip *chip,
void *chip_data, irq_flow_handler_t handler,
void *handler_data, const char *handler_name);
+extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
unsigned int flags, unsigned int size,
@@ -489,9 +523,8 @@ extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
unsigned int virq,
irq_hw_number_t hwirq,
- struct irq_chip *chip,
+ const struct irq_chip *chip,
void *chip_data);
-extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
extern void irq_domain_free_irqs_common(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs);
@@ -509,6 +542,9 @@ extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs);
+extern int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
+ unsigned int virq);
+
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 21619c92c377..5ec0fa71399e 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -14,35 +14,125 @@
#include <linux/typecheck.h>
#include <asm/irqflags.h>
+#include <asm/percpu.h>
-/* Currently trace_softirqs_on/off is used only by lockdep */
+/* Currently lockdep_softirqs_on/off is used only by lockdep */
#ifdef CONFIG_PROVE_LOCKING
- extern void trace_softirqs_on(unsigned long ip);
- extern void trace_softirqs_off(unsigned long ip);
+ extern void lockdep_softirqs_on(unsigned long ip);
+ extern void lockdep_softirqs_off(unsigned long ip);
+ extern void lockdep_hardirqs_on_prepare(void);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
#else
- static inline void trace_softirqs_on(unsigned long ip) { }
- static inline void trace_softirqs_off(unsigned long ip) { }
+ static inline void lockdep_softirqs_on(unsigned long ip) { }
+ static inline void lockdep_softirqs_off(unsigned long ip) { }
+ static inline void lockdep_hardirqs_on_prepare(void) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
- extern void trace_hardirqs_on(void);
- extern void trace_hardirqs_off(void);
-# define trace_hardirq_context(p) ((p)->hardirq_context)
-# define trace_softirq_context(p) ((p)->softirq_context)
-# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
-# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
-# define trace_hardirq_enter() \
+
+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+};
+
+DECLARE_PER_CPU(int, hardirqs_enabled);
+DECLARE_PER_CPU(int, hardirq_context);
+
+extern void trace_hardirqs_on_prepare(void);
+extern void trace_hardirqs_off_finish(void);
+extern void trace_hardirqs_on(void);
+extern void trace_hardirqs_off(void);
+
+# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context))
+# define lockdep_softirq_context(p) ((p)->softirq_context)
+# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
+# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
+# define lockdep_hardirq_enter() \
+do { \
+ if (__this_cpu_inc_return(hardirq_context) == 1)\
+ current->hardirq_threaded = 0; \
+} while (0)
+# define lockdep_hardirq_threaded() \
do { \
- current->hardirq_context++; \
+ current->hardirq_threaded = 1; \
} while (0)
-# define trace_hardirq_exit() \
+# define lockdep_hardirq_exit() \
do { \
- current->hardirq_context--; \
+ __this_cpu_dec(hardirq_context); \
} while (0)
+
+# define lockdep_hrtimer_enter(__hrtimer) \
+({ \
+ bool __expires_hardirq = true; \
+ \
+ if (!__hrtimer->is_hard) { \
+ current->irq_config = 1; \
+ __expires_hardirq = false; \
+ } \
+ __expires_hardirq; \
+})
+
+# define lockdep_hrtimer_exit(__expires_hardirq) \
+ do { \
+ if (!__expires_hardirq) \
+ current->irq_config = 0; \
+ } while (0)
+
+# define lockdep_posixtimer_enter() \
+ do { \
+ current->irq_config = 1; \
+ } while (0)
+
+# define lockdep_posixtimer_exit() \
+ do { \
+ current->irq_config = 0; \
+ } while (0)
+
+# define lockdep_irq_work_enter(_flags) \
+ do { \
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
+ current->irq_config = 1; \
+ } while (0)
+# define lockdep_irq_work_exit(_flags) \
+ do { \
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
+ current->irq_config = 0; \
+ } while (0)
+
+#else
+# define trace_hardirqs_on_prepare() do { } while (0)
+# define trace_hardirqs_off_finish() do { } while (0)
+# define trace_hardirqs_on() do { } while (0)
+# define trace_hardirqs_off() do { } while (0)
+# define lockdep_hardirq_context() 0
+# define lockdep_softirq_context(p) 0
+# define lockdep_hardirqs_enabled() 0
+# define lockdep_softirqs_enabled(p) 0
+# define lockdep_hardirq_enter() do { } while (0)
+# define lockdep_hardirq_threaded() do { } while (0)
+# define lockdep_hardirq_exit() do { } while (0)
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
+# define lockdep_hrtimer_enter(__hrtimer) false
+# define lockdep_hrtimer_exit(__context) do { } while (0)
+# define lockdep_posixtimer_enter() do { } while (0)
+# define lockdep_posixtimer_exit() do { } while (0)
+# define lockdep_irq_work_enter(__work) do { } while (0)
+# define lockdep_irq_work_exit(__work) do { } while (0)
+#endif
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
@@ -51,17 +141,10 @@ do { \
do { \
current->softirq_context--; \
} while (0)
+
#else
-# define trace_hardirqs_on() do { } while (0)
-# define trace_hardirqs_off() do { } while (0)
-# define trace_hardirq_context(p) 0
-# define trace_softirq_context(p) 0
-# define trace_hardirqs_enabled(p) 0
-# define trace_softirqs_enabled(p) 0
-# define trace_hardirq_enter() do { } while (0)
-# define trace_hardirq_exit() do { } while (0)
-# define lockdep_softirq_enter() do { } while (0)
-# define lockdep_softirq_exit() do { } while (0)
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
@@ -73,6 +156,17 @@ do { \
# define start_critical_timings() do { } while (0)
#endif
+#ifdef CONFIG_DEBUG_IRQFLAGS
+extern void warn_bogus_irq_restore(void);
+#define raw_check_bogus_irq_restore() \
+ do { \
+ if (unlikely(!arch_irqs_disabled())) \
+ warn_bogus_irq_restore(); \
+ } while (0)
+#else
+#define raw_check_bogus_irq_restore() do { } while (0)
+#endif
+
/*
* Wrap the arch provided IRQ routines to provide appropriate checks.
*/
@@ -86,6 +180,7 @@ do { \
#define raw_local_irq_restore(flags) \
do { \
typecheck(unsigned long, flags); \
+ raw_check_bogus_irq_restore(); \
arch_local_irq_restore(flags); \
} while (0)
#define raw_local_save_flags(flags) \
@@ -106,26 +201,33 @@ do { \
* if !TRACE_IRQFLAGS.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-#define local_irq_enable() \
- do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
-#define local_irq_disable() \
- do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
+
+#define local_irq_enable() \
+ do { \
+ trace_hardirqs_on(); \
+ raw_local_irq_enable(); \
+ } while (0)
+
+#define local_irq_disable() \
+ do { \
+ bool was_disabled = raw_irqs_disabled();\
+ raw_local_irq_disable(); \
+ if (!was_disabled) \
+ trace_hardirqs_off(); \
+ } while (0)
+
#define local_irq_save(flags) \
do { \
raw_local_irq_save(flags); \
- trace_hardirqs_off(); \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_off(); \
} while (0)
-
#define local_irq_restore(flags) \
do { \
- if (raw_irqs_disabled_flags(flags)) { \
- raw_local_irq_restore(flags); \
- trace_hardirqs_off(); \
- } else { \
+ if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_on(); \
- raw_local_irq_restore(flags); \
- } \
+ raw_local_irq_restore(flags); \
} while (0)
#define safe_halt() \
@@ -139,10 +241,7 @@ do { \
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
-#define local_irq_save(flags) \
- do { \
- raw_local_irq_save(flags); \
- } while (0)
+#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
#define safe_halt() do { raw_safe_halt(); } while (0)
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index 1e6f4e7123d6..c30f454a9518 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -10,6 +10,5 @@
struct irq_desc;
struct irq_data;
typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
-typedef void (*irq_preflow_handler_t)(struct irq_data *data);
#endif
diff --git a/include/linux/isa-dma.h b/include/linux/isa-dma.h
new file mode 100644
index 000000000000..61504a8c1b9e
--- /dev/null
+++ b/include/linux/isa-dma.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_ISA_DMA_H
+#define __LINUX_ISA_DMA_H
+
+#include <asm/dma.h>
+
+#if defined(CONFIG_PCI) && defined(CONFIG_X86_32)
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* __LINUX_ISA_DMA_H */
diff --git a/include/linux/isa.h b/include/linux/isa.h
index 41336da0f4e7..4fbbf5e36e08 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -13,7 +13,7 @@
struct isa_driver {
int (*match)(struct device *, unsigned int);
int (*probe)(struct device *, unsigned int);
- int (*remove)(struct device *, unsigned int);
+ void (*remove)(struct device *, unsigned int);
void (*shutdown)(struct device *, unsigned int);
int (*suspend)(struct device *, unsigned int, pm_message_t);
int (*resume)(struct device *, unsigned int);
@@ -38,6 +38,32 @@ static inline void isa_unregister_driver(struct isa_driver *d)
}
#endif
+#define module_isa_driver_init(__isa_driver, __num_isa_dev) \
+static int __init __isa_driver##_init(void) \
+{ \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq) \
+static int __init __isa_driver##_init(void) \
+{ \
+ if (__num_irq != __num_isa_dev) { \
+ pr_err("%s: Number of irq (%u) does not match number of base (%u)\n", \
+ __isa_driver.driver.name, __num_irq, __num_isa_dev); \
+ return -EINVAL; \
+ } \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_exit(__isa_driver) \
+static void __exit __isa_driver##_exit(void) \
+{ \
+ isa_unregister_driver(&(__isa_driver)); \
+} \
+module_exit(__isa_driver##_exit)
+
/**
* module_isa_driver() - Helper macro for registering a ISA driver
* @__isa_driver: isa_driver struct
@@ -48,16 +74,22 @@ static inline void isa_unregister_driver(struct isa_driver *d)
* use this macro once, and calling it replaces module_init and module_exit.
*/
#define module_isa_driver(__isa_driver, __num_isa_dev) \
-static int __init __isa_driver##_init(void) \
-{ \
- return isa_register_driver(&(__isa_driver), __num_isa_dev); \
-} \
-module_init(__isa_driver##_init); \
-static void __exit __isa_driver##_exit(void) \
-{ \
- isa_unregister_driver(&(__isa_driver)); \
-} \
-module_exit(__isa_driver##_exit);
+module_isa_driver_init(__isa_driver, __num_isa_dev); \
+module_isa_driver_exit(__isa_driver)
+
+/**
+ * module_isa_driver_with_irq() - Helper macro for registering an ISA driver with irq
+ * @__isa_driver: isa_driver struct
+ * @__num_isa_dev: number of devices to register
+ * @__num_irq: number of IRQ to register
+ *
+ * Helper macro for ISA drivers with irq that do not do anything special in
+ * module init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init and module_exit.
+ */
+#define module_isa_driver_with_irq(__isa_driver, __num_isa_dev, __num_irq) \
+module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq); \
+module_isa_driver_exit(__isa_driver)
/**
* max_num_isa_dev() - Maximum possible number registered of an ISA device
diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h
index 11edb2109a68..dba18c95844b 100644
--- a/include/linux/isapnp.h
+++ b/include/linux/isapnp.h
@@ -75,9 +75,6 @@ static inline int isapnp_proc_done(void) { return 0; }
#endif
/* compat */
-struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
- struct pnp_card *from);
struct pnp_dev *pnp_find_dev(struct pnp_card *card,
unsigned short vendor,
unsigned short function,
@@ -92,9 +89,6 @@ static inline int isapnp_cfg_end(void) { return -ENODEV; }
static inline unsigned char isapnp_read_byte(unsigned char idx) { return 0xff; }
static inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; }
-static inline struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
- struct pnp_card *from) { return NULL; }
static inline struct pnp_dev *pnp_find_dev(struct pnp_card *card,
unsigned short vendor,
unsigned short function,
diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h
index b7b45ca82bea..790e7fcfc1a6 100644
--- a/include/linux/iscsi_ibft.h
+++ b/include/linux/iscsi_ibft.h
@@ -13,26 +13,22 @@
#ifndef ISCSI_IBFT_H
#define ISCSI_IBFT_H
-#include <linux/acpi.h>
+#include <linux/types.h>
/*
- * Logical location of iSCSI Boot Format Table.
- * If the value is NULL there is no iBFT on the machine.
+ * Physical location of iSCSI Boot Format Table.
+ * If the value is 0 there is no iBFT on the machine.
*/
-extern struct acpi_table_ibft *ibft_addr;
+extern phys_addr_t ibft_phys_addr;
/*
* Routine used to find and reserve the iSCSI Boot Format Table. The
- * mapped address is set in the ibft_addr variable.
+ * physical address is set in the ibft_phys_addr variable.
*/
#ifdef CONFIG_ISCSI_IBFT_FIND
-unsigned long find_ibft_region(unsigned long *sizep);
+void reserve_ibft_region(void);
#else
-static inline unsigned long find_ibft_region(unsigned long *sizep)
-{
- *sizep = 0;
- return 0;
-}
+static inline void reserve_ibft_region(void) {}
#endif
#endif /* ISCSI_IBFT_H */
diff --git a/include/linux/isicom.h b/include/linux/isicom.h
deleted file mode 100644
index 7de6822d7b1a..000000000000
--- a/include/linux/isicom.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_ISICOM_H
-#define _LINUX_ISICOM_H
-
-#define YES 1
-#define NO 0
-
-/*
- * ISICOM Driver definitions ...
- *
- */
-
-#define ISICOM_NAME "ISICom"
-
-/*
- * PCI definitions
- */
-
-#define DEVID_COUNT 9
-#define VENDOR_ID 0x10b5
-
-/*
- * These are now officially allocated numbers
- */
-
-#define ISICOM_NMAJOR 112 /* normal */
-#define ISICOM_CMAJOR 113 /* callout */
-#define ISICOM_MAGIC (('M' << 8) | 'T')
-
-#define WAKEUP_CHARS 256 /* hard coded for now */
-#define TX_SIZE 254
-
-#define BOARD_COUNT 4
-#define PORT_COUNT (BOARD_COUNT*16)
-
-/* character sizes */
-
-#define ISICOM_CS5 0x0000
-#define ISICOM_CS6 0x0001
-#define ISICOM_CS7 0x0002
-#define ISICOM_CS8 0x0003
-
-/* stop bits */
-
-#define ISICOM_1SB 0x0000
-#define ISICOM_2SB 0x0004
-
-/* parity */
-
-#define ISICOM_NOPAR 0x0000
-#define ISICOM_ODPAR 0x0008
-#define ISICOM_EVPAR 0x0018
-
-/* flow control */
-
-#define ISICOM_CTSRTS 0x03
-#define ISICOM_INITIATE_XONXOFF 0x04
-#define ISICOM_RESPOND_XONXOFF 0x08
-
-#define BOARD(line) (((line) >> 4) & 0x3)
-
- /* isi kill queue bitmap */
-
-#define ISICOM_KILLTX 0x01
-#define ISICOM_KILLRX 0x02
-
- /* isi_board status bitmap */
-
-#define FIRMWARE_LOADED 0x0001
-#define BOARD_ACTIVE 0x0002
-#define BOARD_INIT 0x0004
-
- /* isi_port status bitmap */
-
-#define ISI_CTS 0x1000
-#define ISI_DSR 0x2000
-#define ISI_RI 0x4000
-#define ISI_DCD 0x8000
-#define ISI_DTR 0x0100
-#define ISI_RTS 0x0200
-
-
-#define ISI_TXOK 0x0001
-
-#endif /* ISICOM_H */
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
index 2917ef990d43..e27bd4f55d84 100644
--- a/include/linux/iversion.h
+++ b/include/linux/iversion.h
@@ -123,17 +123,12 @@ inode_peek_iversion_raw(const struct inode *inode)
static inline void
inode_set_max_iversion_raw(struct inode *inode, u64 val)
{
- u64 cur, old;
+ u64 cur = inode_peek_iversion_raw(inode);
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
+ do {
if (cur > val)
break;
- old = atomic64_cmpxchg(&inode->i_version, cur, val);
- if (likely(old == cur))
- break;
- cur = old;
- }
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, val));
}
/**
@@ -177,56 +172,7 @@ inode_set_iversion_queried(struct inode *inode, u64 val)
I_VERSION_QUERIED);
}
-/**
- * inode_maybe_inc_iversion - increments i_version
- * @inode: inode with the i_version that should be updated
- * @force: increment the counter even if it's not necessary?
- *
- * Every time the inode is modified, the i_version field must be seen to have
- * changed by any observer.
- *
- * If "force" is set or the QUERIED flag is set, then ensure that we increment
- * the value, and clear the queried flag.
- *
- * In the common case where neither is set, then we can return "false" without
- * updating i_version.
- *
- * If this function returns false, and no other metadata has changed, then we
- * can avoid logging the metadata.
- */
-static inline bool
-inode_maybe_inc_iversion(struct inode *inode, bool force)
-{
- u64 cur, old, new;
-
- /*
- * The i_version field is not strictly ordered with any other inode
- * information, but the legacy inode_inc_iversion code used a spinlock
- * to serialize increments.
- *
- * Here, we add full memory barriers to ensure that any de-facto
- * ordering with other info is preserved.
- *
- * This barrier pairs with the barrier in inode_query_iversion()
- */
- smp_mb();
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
- /* If flag is clear then we needn't do anything */
- if (!force && !(cur & I_VERSION_QUERIED))
- return false;
-
- /* Since lowest bit is flag, add 2 to avoid it */
- new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
-
- old = atomic64_cmpxchg(&inode->i_version, cur, new);
- if (likely(old == cur))
- break;
- cur = old;
- }
- return true;
-}
-
+bool inode_maybe_inc_iversion(struct inode *inode, bool force);
/**
* inode_inc_iversion - forcibly increment i_version
@@ -304,10 +250,10 @@ inode_peek_iversion(const struct inode *inode)
static inline u64
inode_query_iversion(struct inode *inode)
{
- u64 cur, old, new;
+ u64 cur, new;
cur = inode_peek_iversion_raw(inode);
- for (;;) {
+ do {
/* If flag is already set, then no need to swap */
if (cur & I_VERSION_QUERIED) {
/*
@@ -320,14 +266,23 @@ inode_query_iversion(struct inode *inode)
}
new = cur | I_VERSION_QUERIED;
- old = atomic64_cmpxchg(&inode->i_version, cur, new);
- if (likely(old == cur))
- break;
- cur = old;
- }
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
return cur >> I_VERSION_QUERIED_SHIFT;
}
+/*
+ * For filesystems without any sort of change attribute, the best we can
+ * do is fake one up from the ctime:
+ */
+static inline u64 time_to_chattr(struct timespec64 *t)
+{
+ u64 chattr = t->tv_sec;
+
+ chattr <<= 32;
+ chattr += t->tv_nsec;
+ return chattr;
+}
+
/**
* inode_eq_iversion_raw - check whether the raw i_version counter has changed
* @inode: inode to check
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f613d8529863..0b7242370b56 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -27,6 +27,7 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/bit_spinlock.h>
+#include <linux/blkdev.h>
#include <crypto/hash.h>
#endif
@@ -53,20 +54,20 @@
* CONFIG_JBD2_DEBUG is on.
*/
#define JBD2_EXPENSIVE_CHECKING
-extern ushort jbd2_journal_enable_debug;
void __jbd2_debug(int level, const char *file, const char *func,
unsigned int line, const char *fmt, ...);
-#define jbd_debug(n, fmt, a...) \
+#define jbd2_debug(n, fmt, a...) \
__jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
#else
-#define jbd_debug(n, fmt, a...) /**/
+#define jbd2_debug(n, fmt, a...) no_printk(fmt, ##a)
#endif
extern void *jbd2_alloc(size_t size, gfp_t flags);
extern void jbd2_free(void *ptr, size_t size);
#define JBD2_MIN_JOURNAL_BLOCKS 1024
+#define JBD2_DEFAULT_FAST_COMMIT_BLOCKS 256
#ifdef __KERNEL__
@@ -262,7 +263,10 @@ typedef struct journal_superblock_s
/* 0x0050 */
__u8 s_checksum_type; /* checksum type */
__u8 s_padding2[3];
- __u32 s_padding[42];
+/* 0x0054 */
+ __be32 s_num_fc_blks; /* Number of fast commit blocks */
+/* 0x0058 */
+ __u32 s_padding[41];
__be32 s_checksum; /* crc32c(superblock) */
/* 0x0100 */
@@ -288,6 +292,7 @@ typedef struct journal_superblock_s
#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
+#define JBD2_FEATURE_INCOMPAT_FAST_COMMIT 0x00000020
/* See "journal feature predicate functions" below */
@@ -298,7 +303,8 @@ typedef struct journal_superblock_s
JBD2_FEATURE_INCOMPAT_64BIT | \
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
- JBD2_FEATURE_INCOMPAT_CSUM_V3)
+ JBD2_FEATURE_INCOMPAT_CSUM_V3 | \
+ JBD2_FEATURE_INCOMPAT_FAST_COMMIT)
#ifdef __KERNEL__
@@ -394,7 +400,7 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
/**
- * struct jbd_inode - The jbd_inode type is the structure linking inodes in
+ * struct jbd2_inode - The jbd_inode type is the structure linking inodes in
* ordered mode present in a transaction so that we can sync them during commit.
*/
struct jbd2_inode {
@@ -451,8 +457,8 @@ struct jbd2_inode {
struct jbd2_revoke_table_s;
/**
- * struct handle_s - The handle_s type is the concrete type associated with
- * handle_t.
+ * struct jbd2_journal_handle - The jbd2_journal_handle type is the concrete
+ * type associated with handle_t.
* @h_transaction: Which compound transaction is this update a part of?
* @h_journal: Which journal handle belongs to - used iff h_reserved set.
* @h_rsv_handle: Handle reserved for finishing the logical operation.
@@ -531,6 +537,7 @@ struct transaction_chp_stats_s {
* The transaction keeps track of all of the buffers modified by a
* running transaction, and all of the buffers committed but not yet
* flushed to home for finished transactions.
+ * (Locking Documentation improved by LockDoc)
*/
/*
@@ -546,9 +553,6 @@ struct transaction_chp_stats_s {
* ->j_list_lock
*
* j_state_lock
- * ->t_handle_lock
- *
- * j_state_lock
* ->j_list_lock (journal_unmap_buffer)
*
*/
@@ -586,18 +590,22 @@ struct transaction_s
*/
unsigned long t_log_start;
- /* Number of buffers on the t_buffers list [j_list_lock] */
+ /*
+ * Number of buffers on the t_buffers list [j_list_lock, no locks
+ * needed for jbd2 thread]
+ */
int t_nr_buffers;
/*
* Doubly-linked circular list of all buffers reserved but not yet
- * modified by this transaction [j_list_lock]
+ * modified by this transaction [j_list_lock, no locks needed fo
+ * jbd2 thread]
*/
struct journal_head *t_reserved_list;
/*
* Doubly-linked circular list of all metadata buffers owned by this
- * transaction [j_list_lock]
+ * transaction [j_list_lock, no locks needed for jbd2 thread]
*/
struct journal_head *t_buffers;
@@ -621,14 +629,18 @@ struct transaction_s
struct journal_head *t_checkpoint_io_list;
/*
- * Doubly-linked circular list of metadata buffers being shadowed by log
- * IO. The IO buffers on the iobuf list and the shadow buffers on this
- * list match each other one for one at all times. [j_list_lock]
+ * Doubly-linked circular list of metadata buffers being
+ * shadowed by log IO. The IO buffers on the iobuf list and
+ * the shadow buffers on this list match each other one for
+ * one at all times. [j_list_lock, no locks needed for jbd2
+ * thread]
*/
struct journal_head *t_shadow_list;
/*
- * List of inodes whose data we've modified in data=ordered mode.
+ * List of inodes associated with the transaction; e.g., ext4 uses
+ * this to track inodes in data=ordered and data=journal mode that
+ * need special handling on transaction commit; also used by ocfs2.
* [j_list_lock]
*/
struct list_head t_inode_list;
@@ -649,12 +661,12 @@ struct transaction_s
unsigned long t_start;
/*
- * When commit was requested
+ * When commit was requested [j_state_lock]
*/
unsigned long t_requested;
/*
- * Checkpointing stats [j_checkpoint_sem]
+ * Checkpointing stats [j_list_lock]
*/
struct transaction_chp_stats_s t_chp_stats;
@@ -746,6 +758,11 @@ jbd2_time_diff(unsigned long start, unsigned long end)
#define JBD2_NR_BATCH 64
+enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
+
+#define JBD2_FC_REPLAY_STOP 0
+#define JBD2_FC_REPLAY_CONTINUE 1
+
/**
* struct journal_s - The journal_s type is the concrete type associated with
* journal_t.
@@ -753,11 +770,17 @@ jbd2_time_diff(unsigned long start, unsigned long end)
struct journal_s
{
/**
- * @j_flags: General journaling state flags [j_state_lock]
+ * @j_flags: General journaling state flags [j_state_lock,
+ * no lock for quick racy checks]
*/
unsigned long j_flags;
/**
+ * @j_atomic_flags: Atomic journaling state flags.
+ */
+ unsigned long j_atomic_flags;
+
+ /**
* @j_errno:
*
* Is there an outstanding uncleared error on the journal (from a prior
@@ -766,6 +789,11 @@ struct journal_s
int j_errno;
/**
+ * @j_abort_mutex: Lock the whole aborting procedure.
+ */
+ struct mutex j_abort_mutex;
+
+ /**
* @j_sb_buffer: The first part of the superblock buffer.
*/
struct buffer_head *j_sb_buffer;
@@ -788,7 +816,8 @@ struct journal_s
/**
* @j_barrier_count:
*
- * Number of processes waiting to create a barrier lock [j_state_lock]
+ * Number of processes waiting to create a barrier lock [j_state_lock,
+ * no lock for quick racy checks]
*/
int j_barrier_count;
@@ -801,7 +830,8 @@ struct journal_s
* @j_running_transaction:
*
* Transactions: The current running transaction...
- * [j_state_lock] [caller holding open handle]
+ * [j_state_lock, no lock for quick racy checks] [caller holding
+ * open handle]
*/
transaction_t *j_running_transaction;
@@ -852,6 +882,13 @@ struct journal_s
wait_queue_head_t j_wait_reserved;
/**
+ * @j_fc_wait:
+ *
+ * Wait queue to wait for completion of async fast commits.
+ */
+ wait_queue_head_t j_fc_wait;
+
+ /**
* @j_checkpoint_mutex:
*
* Semaphore for locking against concurrent checkpoints.
@@ -869,6 +906,29 @@ struct journal_s
struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
/**
+ * @j_shrinker:
+ *
+ * Journal head shrinker, reclaim buffer's journal head which
+ * has been written back.
+ */
+ struct shrinker j_shrinker;
+
+ /**
+ * @j_checkpoint_jh_count:
+ *
+ * Number of journal buffers on the checkpoint list. [j_list_lock]
+ */
+ struct percpu_counter j_checkpoint_jh_count;
+
+ /**
+ * @j_shrink_transaction:
+ *
+ * Record next transaction will shrink on the checkpoint list.
+ * [j_list_lock]
+ */
+ transaction_t *j_shrink_transaction;
+
+ /**
* @j_head:
*
* Journal head: identifies the first unused block in the journal.
@@ -909,6 +969,31 @@ struct journal_s
unsigned long j_last;
/**
+ * @j_fc_first:
+ *
+ * The block number of the first fast commit block in the journal
+ * [j_state_lock].
+ */
+ unsigned long j_fc_first;
+
+ /**
+ * @j_fc_off:
+ *
+ * Number of fast commit blocks currently allocated. Accessed only
+ * during fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
+ */
+ unsigned long j_fc_off;
+
+ /**
+ * @j_fc_last:
+ *
+ * The block number one beyond the last fast commit block in the journal
+ * [j_state_lock].
+ */
+ unsigned long j_fc_last;
+
+ /**
* @j_dev: Device where we store the journal.
*/
struct block_device *j_dev;
@@ -939,9 +1024,9 @@ struct journal_s
struct block_device *j_fs_dev;
/**
- * @j_maxlen: Total maximum capacity of the journal region on disk.
+ * @j_total_len: Total maximum capacity of the journal region on disk.
*/
- unsigned int j_maxlen;
+ unsigned int j_total_len;
/**
* @j_reserved_credits:
@@ -981,7 +1066,7 @@ struct journal_s
* @j_commit_sequence:
*
* Sequence number of the most recently committed transaction
- * [j_state_lock].
+ * [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_sequence;
@@ -989,7 +1074,7 @@ struct journal_s
* @j_commit_request:
*
* Sequence number of the most recent transaction wanting commit
- * [j_state_lock]
+ * [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_request;
@@ -1059,6 +1144,13 @@ struct journal_s
struct buffer_head **j_wbuf;
/**
+ * @j_fc_wbuf: Array of fast commit bhs for fast commit. Accessed only
+ * during a fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
+ */
+ struct buffer_head **j_fc_wbuf;
+
+ /**
* @j_wbufsize:
*
* Size of @j_wbuf array.
@@ -1066,6 +1158,13 @@ struct journal_s
int j_wbufsize;
/**
+ * @j_fc_wbufsize:
+ *
+ * Size of @j_fc_wbuf array.
+ */
+ int j_fc_wbufsize;
+
+ /**
* @j_last_sync_writer:
*
* The pid of the last person to run a synchronous operation
@@ -1105,6 +1204,27 @@ struct journal_s
void (*j_commit_callback)(journal_t *,
transaction_t *);
+ /**
+ * @j_submit_inode_data_buffers:
+ *
+ * This function is called for all inodes associated with the
+ * committing transaction marked with JI_WRITE_DATA flag
+ * before we start to write out the transaction to the journal.
+ */
+ int (*j_submit_inode_data_buffers)
+ (struct jbd2_inode *);
+
+ /**
+ * @j_finish_inode_data_buffers:
+ *
+ * This function is called for all inodes associated with the
+ * committing transaction marked with JI_WAIT_DATA flag
+ * after we have written the transaction to the journal
+ * but before we write out the commit block.
+ */
+ int (*j_finish_inode_data_buffers)
+ (struct jbd2_inode *);
+
/*
* Journal statistics
*/
@@ -1164,6 +1284,30 @@ struct journal_s
*/
struct lockdep_map j_trans_commit_map;
#endif
+
+ /**
+ * @j_fc_cleanup_callback:
+ *
+ * Clean-up after fast commit or full commit. JBD2 calls this function
+ * after every commit operation.
+ */
+ void (*j_fc_cleanup_callback)(struct journal_s *journal, int full, tid_t tid);
+
+ /**
+ * @j_fc_replay_callback:
+ *
+ * File-system specific function that performs replay of a fast
+ * commit. JBD2 calls this function for each fast commit block found in
+ * the journal. This function should return JBD2_FC_REPLAY_CONTINUE
+ * to indicate that the block was processed correctly and more fast
+ * commit replay should continue. Return value of JBD2_FC_REPLAY_STOP
+ * indicates the end of replay (no more blocks remaining). A negative
+ * return value indicates error.
+ */
+ int (*j_fc_replay_callback)(struct journal_s *journal,
+ struct buffer_head *bh,
+ enum passtype pass, int off,
+ tid_t expected_commit_id);
};
#define jbd2_might_wait_for_commit(j) \
@@ -1234,6 +1378,7 @@ JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT)
JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT)
JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2)
JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
+JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
/*
* Journal flag definitions
@@ -1247,7 +1392,18 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
* data write error in ordered
* mode */
-#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
+#define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */
+#define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */
+#define JBD2_JOURNAL_FLUSH_DISCARD 0x0001
+#define JBD2_JOURNAL_FLUSH_ZEROOUT 0x0002
+#define JBD2_JOURNAL_FLUSH_VALID (JBD2_JOURNAL_FLUSH_DISCARD | \
+ JBD2_JOURNAL_FLUSH_ZEROOUT)
+
+/*
+ * Journal atomic flag definitions
+ */
+#define JBD2_CHECKPOINT_IO_ERROR 0x001 /* Detect io error while writing
+ * buffer back to disk */
/*
* Function declarations for the journaling transaction and buffer
@@ -1259,9 +1415,7 @@ extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
{
list_add_tail(&bh->b_assoc_buffers, head);
@@ -1285,6 +1439,7 @@ extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
@@ -1325,9 +1480,6 @@ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr);
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
/* Transaction cache support */
extern void jbd2_journal_destroy_transaction_cache(void);
extern int __init jbd2_journal_init_transaction_cache(void);
@@ -1374,14 +1526,16 @@ void jbd2_journal_set_triggers(struct buffer_head *,
struct jbd2_buffer_trigger_type *type);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
-extern int jbd2_journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
-extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+int jbd2_journal_invalidate_folio(journal_t *, struct folio *,
+ size_t offset, size_t length);
+bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio);
extern int jbd2_journal_stop(handle_t *);
-extern int jbd2_journal_flush (journal_t *);
+extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);
extern void jbd2_journal_lock_updates (journal_t *);
extern void jbd2_journal_unlock_updates (journal_t *);
+void jbd2_journal_wait_updates(journal_t *);
+
extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int bsize);
@@ -1402,7 +1556,7 @@ extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
- unsigned long, int);
+ unsigned long, blk_opf_t);
extern void jbd2_journal_abort (journal_t *, int);
extern int jbd2_journal_errno (journal_t *);
extern void jbd2_journal_ack_err (journal_t *);
@@ -1416,6 +1570,10 @@ extern int jbd2_journal_inode_ranged_write(handle_t *handle,
extern int jbd2_journal_inode_ranged_wait(handle_t *handle,
struct jbd2_inode *inode, loff_t start_byte,
loff_t length);
+extern int jbd2_journal_submit_inode_data_buffers(
+ struct jbd2_inode *jinode);
+extern int jbd2_journal_finish_inode_data_buffers(
+ struct jbd2_inode *jinode);
extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
struct jbd2_inode *inode, loff_t new_size);
extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
@@ -1488,7 +1646,6 @@ extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
*/
int jbd2_log_start_commit(journal_t *journal, tid_t tid);
-int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
int jbd2_transaction_committed(journal_t *journal, tid_t tid);
@@ -1500,6 +1657,21 @@ void __jbd2_log_wait_for_space(journal_t *journal);
extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
extern int jbd2_cleanup_journal_tail(journal_t *);
+/* Fast commit related APIs */
+int jbd2_fc_begin_commit(journal_t *journal, tid_t tid);
+int jbd2_fc_end_commit(journal_t *journal);
+int jbd2_fc_end_commit_fallback(journal_t *journal);
+int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
+int jbd2_submit_inode_data(struct jbd2_inode *jinode);
+int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
+int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
+int jbd2_fc_release_bufs(journal_t *journal);
+
+static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
+{
+ return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
+}
+
/*
* is_journal_abort
*
@@ -1560,6 +1732,13 @@ static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
return journal->j_chksum_driver != NULL;
}
+static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb)
+{
+ int num_fc_blocks = be32_to_cpu(jsb->s_num_fc_blks);
+
+ return num_fc_blocks ? num_fc_blocks : JBD2_DEFAULT_FAST_COMMIT_BLOCKS;
+}
+
/*
* Return number of free blocks in the log. Must be called under j_state_lock.
*/
@@ -1587,8 +1766,6 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-extern int jbd_blocks_per_page(struct inode *inode);
-
/* JBD uses a CRC32 checksum */
#define JBD_MAX_CHECKSUM_SIZE 4
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index ba2f6a9776b6..ab7f8c152b89 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
*
- * http://burtleburtle.net/bob/hash/
+ * https://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
@@ -86,19 +86,20 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
}
/* Last block: affect all 32 bits of (c) */
switch (length) {
- case 12: c += (u32)k[11]<<24; /* fall through */
- case 11: c += (u32)k[10]<<16; /* fall through */
- case 10: c += (u32)k[9]<<8; /* fall through */
- case 9: c += k[8]; /* fall through */
- case 8: b += (u32)k[7]<<24; /* fall through */
- case 7: b += (u32)k[6]<<16; /* fall through */
- case 6: b += (u32)k[5]<<8; /* fall through */
- case 5: b += k[4]; /* fall through */
- case 4: a += (u32)k[3]<<24; /* fall through */
- case 3: a += (u32)k[2]<<16; /* fall through */
- case 2: a += (u32)k[1]<<8; /* fall through */
+ case 12: c += (u32)k[11]<<24; fallthrough;
+ case 11: c += (u32)k[10]<<16; fallthrough;
+ case 10: c += (u32)k[9]<<8; fallthrough;
+ case 9: c += k[8]; fallthrough;
+ case 8: b += (u32)k[7]<<24; fallthrough;
+ case 7: b += (u32)k[6]<<16; fallthrough;
+ case 6: b += (u32)k[5]<<8; fallthrough;
+ case 5: b += k[4]; fallthrough;
+ case 4: a += (u32)k[3]<<24; fallthrough;
+ case 3: a += (u32)k[2]<<16; fallthrough;
+ case 2: a += (u32)k[1]<<8; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
+ break;
case 0: /* Nothing left to add */
break;
}
@@ -132,10 +133,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
/* Handle the last 3 u32's */
switch (length) {
- case 3: c += k[2]; /* fall through */
- case 2: b += k[1]; /* fall through */
+ case 3: c += k[2]; fallthrough;
+ case 2: b += k[1]; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
+ break;
case 0: /* Nothing left to add */
break;
}
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index e3279ef24d28..5e13f801c902 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -3,11 +3,13 @@
#define _LINUX_JIFFIES_H
#include <linux/cache.h>
+#include <linux/limits.h>
#include <linux/math64.h>
-#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/timex.h>
+#include <vdso/jiffies.h>
#include <asm/param.h> /* for HZ */
#include <generated/timeconst.h>
@@ -59,9 +61,6 @@
extern int register_refined_jiffies(long clock_tick_rate);
-/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
-#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
-
/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 3526c0aee954..570831ca9951 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -68,7 +68,7 @@
* Lacking toolchain and or architecture support, static keys fall back to a
* simple conditional branch.
*
- * Additional babbling in: Documentation/static-keys.txt
+ * Additional babbling in: Documentation/staging/static-keys.rst
*/
#ifndef __ASSEMBLY__
@@ -82,10 +82,9 @@ extern bool static_key_initialized;
"%s(): static key '%pS' used before call to jump_label_init()", \
__func__, (key))
-#ifdef CONFIG_JUMP_LABEL
-
struct static_key {
atomic_t enabled;
+#ifdef CONFIG_JUMP_LABEL
/*
* Note:
* To make anonymous unions work with old compilers, the static
@@ -104,13 +103,9 @@ struct static_key {
struct jump_entry *entries;
struct static_key_mod *next;
};
+#endif /* CONFIG_JUMP_LABEL */
};
-#else
-struct static_key {
- atomic_t enabled;
-};
-#endif /* CONFIG_JUMP_LABEL */
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_JUMP_LABEL
@@ -171,9 +166,21 @@ static inline bool jump_entry_is_init(const struct jump_entry *entry)
return (unsigned long)entry->key & 2UL;
}
-static inline void jump_entry_set_init(struct jump_entry *entry)
+static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
+{
+ if (set)
+ entry->key |= 2;
+ else
+ entry->key &= ~2;
+}
+
+static inline int jump_entry_size(struct jump_entry *entry)
{
- entry->key |= 2;
+#ifdef JUMP_LABEL_NOP_SIZE
+ return JUMP_LABEL_NOP_SIZE;
+#else
+ return arch_jump_entry_size(entry);
+#endif
}
#endif
@@ -213,8 +220,6 @@ extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type);
-extern void arch_jump_label_transform_static(struct jump_entry *entry,
- enum jump_label_type type);
extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type);
extern void arch_jump_label_transform_apply(void);
@@ -223,12 +228,12 @@ extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void static_key_slow_inc_cpuslocked(struct static_key *key);
extern void static_key_slow_dec_cpuslocked(struct static_key *key);
-extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
+extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -239,19 +244,19 @@ extern void static_key_disable_cpuslocked(struct static_key *key);
*/
#define STATIC_KEY_INIT_TRUE \
{ .enabled = { 1 }, \
- { .entries = (void *)JUMP_TYPE_TRUE } }
+ { .type = JUMP_TYPE_TRUE } }
#define STATIC_KEY_INIT_FALSE \
{ .enabled = { 0 }, \
- { .entries = (void *)JUMP_TYPE_FALSE } }
+ { .type = JUMP_TYPE_FALSE } }
#else /* !CONFIG_JUMP_LABEL */
#include <linux/atomic.h>
#include <linux/bug.h>
-static inline int static_key_count(struct static_key *key)
+static __always_inline int static_key_count(struct static_key *key)
{
- return atomic_read(&key->enabled);
+ return arch_atomic_read(&key->enabled);
}
static __always_inline void jump_label_init(void)
@@ -261,14 +266,14 @@ static __always_inline void jump_label_init(void)
static __always_inline bool static_key_false(struct static_key *key)
{
- if (unlikely(static_key_count(key) > 0))
+ if (unlikely_notrace(static_key_count(key) > 0))
return true;
return false;
}
static __always_inline bool static_key_true(struct static_key *key)
{
- if (likely(static_key_count(key) > 0))
+ if (likely_notrace(static_key_count(key) > 0))
return true;
return false;
}
@@ -296,11 +301,6 @@ static inline int jump_label_text_reserved(void *start, void *end)
static inline void jump_label_lock(void) {}
static inline void jump_label_unlock(void) {}
-static inline int jump_label_apply_nops(struct module *mod)
-{
- return 0;
-}
-
static inline void static_key_enable(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
@@ -382,6 +382,21 @@ struct static_key_false {
[0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
}
+#define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name)
+#define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name)
+#define DEFINE_STATIC_KEY_MAYBE(cfg, name) \
+ __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
+#define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name)
+#define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name)
+#define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \
+ __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
+
+#define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name)
+#define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name)
+#define DECLARE_STATIC_KEY_MAYBE(cfg, name) \
+ __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
extern bool ____wrong_branch_error(void);
#define static_key_enabled(x) \
@@ -460,7 +475,7 @@ extern bool ____wrong_branch_error(void);
branch = !arch_static_branch_jump(&(x)->key, true); \
else \
branch = ____wrong_branch_error(); \
- likely(branch); \
+ likely_notrace(branch); \
})
#define static_branch_unlikely(x) \
@@ -472,16 +487,20 @@ extern bool ____wrong_branch_error(void);
branch = arch_static_branch(&(x)->key, false); \
else \
branch = ____wrong_branch_error(); \
- unlikely(branch); \
+ unlikely_notrace(branch); \
})
#else /* !CONFIG_JUMP_LABEL */
-#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
-#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
+#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
#endif /* CONFIG_JUMP_LABEL */
+#define static_branch_maybe(config, x) \
+ (IS_ENABLED(config) ? static_branch_likely(x) \
+ : static_branch_unlikely(x))
+
/*
* Advanced usage; refcount, branch is enabled when: count != 0
*/
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 657a83b943f0..649faac31ddb 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -7,6 +7,7 @@
#define _LINUX_KALLSYMS_H
#include <linux/errno.h>
+#include <linux/buildid.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/mm.h>
@@ -14,31 +15,25 @@
#include <asm/sections.h>
-#define KSYM_NAME_LEN 128
-#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
- 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+#define KSYM_NAME_LEN 512
+#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
+ (KSYM_NAME_LEN - 1) + \
+ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \
+ (BUILD_ID_SIZE_MAX * 2) + 1)
+struct cred;
struct module;
-static inline int is_kernel_inittext(unsigned long addr)
-{
- if (addr >= (unsigned long)_sinittext
- && addr <= (unsigned long)_einittext)
- return 1;
- return 0;
-}
-
static inline int is_kernel_text(unsigned long addr)
{
- if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
- arch_is_kernel_text(addr))
+ if (__is_kernel_text(addr))
return 1;
return in_gate_area_no_mm(addr);
}
static inline int is_kernel(unsigned long addr)
{
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+ if (__is_kernel(addr))
return 1;
return in_gate_area_no_mm(addr);
}
@@ -53,7 +48,7 @@ static inline int is_ksym_addr(unsigned long addr)
static inline void *dereference_symbol_descriptor(void *ptr)
{
-#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
struct module *mod;
ptr = dereference_kernel_function_descriptor(ptr);
@@ -71,14 +66,13 @@ static inline void *dereference_symbol_descriptor(void *ptr)
}
#ifdef CONFIG_KALLSYMS
-/* Lookup the address for a symbol. Returns 0 if not found. */
-unsigned long kallsyms_lookup_name(const char *name);
-
-/* Call a function on each kallsyms symbol in the core kernel */
int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
unsigned long),
void *data);
+/* Lookup the address for a symbol. Returns 0 if not found. */
+unsigned long kallsyms_lookup_name(const char *name);
+
extern int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset);
@@ -91,14 +85,16 @@ const char *kallsyms_lookup(unsigned long addr,
/* Look up a kernel symbol and return it in a text buffer. */
extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_build_id(char *buffer, unsigned long address);
extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
+extern int sprint_backtrace_build_id(char *buffer, unsigned long address);
int lookup_symbol_name(unsigned long addr, char *symname);
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
/* How and when do we show kallsyms values? */
-extern int kallsyms_show_value(void);
+extern bool kallsyms_show_value(const struct cred *cred);
#else /* !CONFIG_KALLSYMS */
@@ -107,14 +103,6 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
return 0;
}
-static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *,
- unsigned long),
- void *data)
-{
- return 0;
-}
-
static inline int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset)
@@ -136,6 +124,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_symbol_build_id(char *buffer, unsigned long address)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
{
*buffer = '\0';
@@ -148,6 +142,12 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int lookup_symbol_name(unsigned long addr, char *symname)
{
return -ERANGE;
@@ -158,16 +158,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
return -ERANGE;
}
-static inline int kallsyms_show_value(void)
+static inline bool kallsyms_show_value(const struct cred *cred)
{
return false;
}
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
+ unsigned long), void *data)
+{
+ return -EOPNOTSUPP;
+}
#endif /*CONFIG_KALLSYMS*/
-static inline void print_ip_sym(unsigned long ip)
+static inline void print_ip_sym(const char *loglvl, unsigned long ip)
{
- printk("[<%px>] %pS\n", (void *) ip, (void *) ip);
+ printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip);
}
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index ac6aba632f2d..3d6d22a25bdc 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -5,11 +5,17 @@
#include <linux/types.h>
/*
+ * The annotations present in this file are only relevant for the software
+ * KASAN modes that rely on compiler instrumentation, and will be optimized
+ * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead.
+ */
+
+/*
* __kasan_check_*: Always available when KASAN is enabled. This may be used
* even in compilation units that selectively disable KASAN, but must use KASAN
* to validate access to an address. Never use these in header files!
*/
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bool __kasan_check_read(const volatile void *p, unsigned int size);
bool __kasan_check_write(const volatile void *p, unsigned int size);
#else
diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h
new file mode 100644
index 000000000000..6f612d69ea0c
--- /dev/null
+++ b/include/linux/kasan-enabled.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_ENABLED_H
+#define _LINUX_KASAN_ENABLED_H
+
+#include <linux/static_key.h>
+
+#ifdef CONFIG_KASAN_HW_TAGS
+
+DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
+
+static __always_inline bool kasan_enabled(void)
+{
+ return static_branch_likely(&kasan_flag_enabled);
+}
+
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return kasan_enabled();
+}
+
+#else /* CONFIG_KASAN_HW_TAGS */
+
+static inline bool kasan_enabled(void)
+{
+ return IS_ENABLED(CONFIG_KASAN);
+}
+
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#endif /* LINUX_KASAN_ENABLED_H */
diff --git a/include/linux/kasan-tags.h b/include/linux/kasan-tags.h
new file mode 100644
index 000000000000..4f85f562512c
--- /dev/null
+++ b/include/linux/kasan-tags.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_TAGS_H
+#define _LINUX_KASAN_TAGS_H
+
+#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */
+#else
+#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */
+#endif
+
+#endif /* LINUX_KASAN_TAGS_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5cde9e7c2664..d811b3d7d2a1 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -2,22 +2,53 @@
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H
+#include <linux/bug.h>
+#include <linux/kasan-enabled.h>
+#include <linux/kernel.h>
+#include <linux/static_key.h>
#include <linux/types.h>
struct kmem_cache;
struct page;
+struct slab;
struct vm_struct;
struct task_struct;
#ifdef CONFIG_KASAN
+#include <linux/linkage.h>
#include <asm/kasan.h>
-#include <asm/pgtable.h>
+
+#endif
+
+typedef unsigned int __bitwise kasan_vmalloc_flags_t;
+
+#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
+#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
+#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
+#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+#include <linux/pgtable.h>
+
+/* Software KASAN implementations use shadow memory. */
+
+#ifdef CONFIG_KASAN_SW_TAGS
+/* This matches KASAN_TAG_INVALID. */
+#define KASAN_SHADOW_INIT 0xFE
+#else
+#define KASAN_SHADOW_INIT 0
+#endif
+
+#ifndef PTE_HWTABLE_PTRS
+#define PTE_HWTABLE_PTRS 0
+#endif
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
-extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
-extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
-extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
+extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
+extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
@@ -29,91 +60,199 @@ static inline void *kasan_mem_to_shadow(const void *addr)
+ KASAN_SHADOW_OFFSET;
}
+int kasan_add_zero_shadow(void *start, unsigned long size);
+void kasan_remove_zero_shadow(void *start, unsigned long size);
+
/* Enable reporting bugs after kasan_disable_current() */
extern void kasan_enable_current(void);
/* Disable reporting bugs for current task */
extern void kasan_disable_current(void);
-void kasan_unpoison_shadow(const void *address, size_t size);
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-void kasan_unpoison_task_stack(struct task_struct *task);
-void kasan_unpoison_stack_above_sp_to(const void *watermark);
+static inline int kasan_add_zero_shadow(void *start, unsigned long size)
+{
+ return 0;
+}
+static inline void kasan_remove_zero_shadow(void *start,
+ unsigned long size)
+{}
-void kasan_alloc_pages(struct page *page, unsigned int order);
-void kasan_free_pages(struct page *page, unsigned int order);
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
-void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
- slab_flags_t *flags);
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-void kasan_poison_slab(struct page *page);
-void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-void kasan_poison_object_data(struct kmem_cache *cache, void *object);
-void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
- const void *object);
-
-void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
- gfp_t flags);
-void kasan_kfree_large(void *ptr, unsigned long ip);
-void kasan_poison_kfree(void *ptr, unsigned long ip);
-void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
- size_t size, gfp_t flags);
-void * __must_check kasan_krealloc(const void *object, size_t new_size,
- gfp_t flags);
-
-void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
- gfp_t flags);
-bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
+#ifdef CONFIG_KASAN_HW_TAGS
+
+#else /* CONFIG_KASAN_HW_TAGS */
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+static inline bool kasan_has_integrated_init(void)
+{
+ return kasan_hw_tags_enabled();
+}
+
+#ifdef CONFIG_KASAN
struct kasan_cache {
+#ifdef CONFIG_KASAN_GENERIC
int alloc_meta_offset;
int free_meta_offset;
+#endif
+ bool is_kmalloc;
};
-/*
- * These functions provide a special case to support backing module
- * allocations with real shadow memory. With KASAN vmalloc, the special
- * case is unnecessary, as the work is handled in the generic case.
- */
-#ifndef CONFIG_KASAN_VMALLOC
-int kasan_module_alloc(void *addr, size_t size);
-void kasan_free_shadow(const struct vm_struct *vm);
-#else
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
-#endif
+void __kasan_unpoison_range(const void *addr, size_t size);
+static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_range(addr, size);
+}
-int kasan_add_zero_shadow(void *start, unsigned long size);
-void kasan_remove_zero_shadow(void *start, unsigned long size);
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_poison_pages(struct page *page,
+ unsigned int order, bool init)
+{
+ if (kasan_enabled())
+ __kasan_poison_pages(page, order, init);
+}
-size_t __ksize(const void *);
-static inline void kasan_unpoison_slab(const void *ptr)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_unpoison_pages(struct page *page,
+ unsigned int order, bool init)
{
- kasan_unpoison_shadow(ptr, __ksize(ptr));
+ if (kasan_enabled())
+ __kasan_unpoison_pages(page, order, init);
}
-size_t kasan_metadata_size(struct kmem_cache *cache);
-bool kasan_save_enable_multi_shot(void);
-void kasan_restore_multi_shot(bool enabled);
+void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
+static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
+{
+ if (kasan_enabled())
+ __kasan_cache_create_kmalloc(cache);
+}
-#else /* CONFIG_KASAN */
+void __kasan_poison_slab(struct slab *slab);
+static __always_inline void kasan_poison_slab(struct slab *slab)
+{
+ if (kasan_enabled())
+ __kasan_poison_slab(slab);
+}
-static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
+static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+ void *object)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_object_data(cache, object);
+}
-static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
-static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
+void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
+static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+ void *object)
+{
+ if (kasan_enabled())
+ __kasan_poison_object_data(cache, object);
+}
-static inline void kasan_enable_current(void) {}
-static inline void kasan_disable_current(void) {}
+void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object);
+static __always_inline void * __must_check kasan_init_slab_obj(
+ struct kmem_cache *cache, const void *object)
+{
+ if (kasan_enabled())
+ return __kasan_init_slab_obj(cache, object);
+ return (void *)object;
+}
-static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+bool __kasan_slab_free(struct kmem_cache *s, void *object,
+ unsigned long ip, bool init);
+static __always_inline bool kasan_slab_free(struct kmem_cache *s,
+ void *object, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_slab_free(s, object, _RET_IP_, init);
+ return false;
+}
-static inline void kasan_cache_create(struct kmem_cache *cache,
- unsigned int *size,
- slab_flags_t *flags) {}
+void __kasan_kfree_large(void *ptr, unsigned long ip);
+static __always_inline void kasan_kfree_large(void *ptr)
+{
+ if (kasan_enabled())
+ __kasan_kfree_large(ptr, _RET_IP_);
+}
+
+void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
+static __always_inline void kasan_slab_free_mempool(void *ptr)
+{
+ if (kasan_enabled())
+ __kasan_slab_free_mempool(ptr, _RET_IP_);
+}
-static inline void kasan_poison_slab(struct page *page) {}
+void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
+ void *object, gfp_t flags, bool init);
+static __always_inline void * __must_check kasan_slab_alloc(
+ struct kmem_cache *s, void *object, gfp_t flags, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_slab_alloc(s, object, flags, init);
+ return object;
+}
+
+void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
+ const void *object, size_t size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_kmalloc(s, object, size, flags);
+ return (void *)object;
+}
+
+void * __must_check __kasan_kmalloc_large(const void *ptr,
+ size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
+ size_t size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_kmalloc_large(ptr, size, flags);
+ return (void *)ptr;
+}
+
+void * __must_check __kasan_krealloc(const void *object,
+ size_t new_size, gfp_t flags);
+static __always_inline void * __must_check kasan_krealloc(const void *object,
+ size_t new_size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_krealloc(object, new_size, flags);
+ return (void *)object;
+}
+
+/*
+ * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
+ * the hardware tag-based mode that doesn't rely on compiler instrumentation.
+ */
+bool __kasan_check_byte(const void *addr, unsigned long ip);
+static __always_inline bool kasan_check_byte(const void *addr)
+{
+ if (kasan_enabled())
+ return __kasan_check_byte(addr, _RET_IP_);
+ return true;
+}
+
+#else /* CONFIG_KASAN */
+
+static inline void kasan_unpoison_range(const void *address, size_t size) {}
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
+ bool init) {}
+static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
+ bool init) {}
+static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
+static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
@@ -123,110 +262,214 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{
return (void *)object;
}
-
-static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
{
- return ptr;
+ return false;
+}
+static inline void kasan_kfree_large(void *ptr) {}
+static inline void kasan_slab_free_mempool(void *ptr) {}
+static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags, bool init)
+{
+ return object;
}
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
-static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags)
{
return (void *)object;
}
+static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
+{
+ return (void *)ptr;
+}
static inline void *kasan_krealloc(const void *object, size_t new_size,
gfp_t flags)
{
return (void *)object;
}
-
-static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
- gfp_t flags)
-{
- return object;
-}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip)
+static inline bool kasan_check_byte(const void *address)
{
- return false;
+ return true;
}
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
-
-static inline int kasan_add_zero_shadow(void *start, unsigned long size)
-{
- return 0;
-}
-static inline void kasan_remove_zero_shadow(void *start,
- unsigned long size)
-{}
-
-static inline void kasan_unpoison_slab(const void *ptr) { }
-static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
-
#endif /* CONFIG_KASAN */
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
+void kasan_unpoison_task_stack(struct task_struct *task);
+#else
+static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+#endif
+
#ifdef CONFIG_KASAN_GENERIC
-#define KASAN_SHADOW_INIT 0
+size_t kasan_metadata_size(struct kmem_cache *cache);
+slab_flags_t kasan_never_merge(void);
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+ slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
+void kasan_record_aux_stack(void *ptr);
+void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
+/* Tag-based KASAN modes do not use per-object metadata. */
+static inline size_t kasan_metadata_size(struct kmem_cache *cache)
+{
+ return 0;
+}
+/* And thus nothing prevents cache merging. */
+static inline slab_flags_t kasan_never_merge(void)
+{
+ return 0;
+}
+/* And no cache-related metadata initialization is required. */
+static inline void kasan_cache_create(struct kmem_cache *cache,
+ unsigned int *size,
+ slab_flags_t *flags) {}
+
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
+static inline void kasan_record_aux_stack(void *ptr) {}
+static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
-#ifdef CONFIG_KASAN_SW_TAGS
-
-#define KASAN_SHADOW_INIT 0xFF
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
-void kasan_init_tags(void);
-
-void *kasan_reset_tag(const void *addr);
+static inline void *kasan_reset_tag(const void *addr)
+{
+ return (void *)arch_kasan_reset_tag(addr);
+}
-void kasan_report(unsigned long addr, size_t size,
+/**
+ * kasan_report - print a report about a bad memory access detected by KASAN
+ * @addr: address of the bad access
+ * @size: size of the bad access
+ * @is_write: whether the bad access is a write or a read
+ * @ip: instruction pointer for the accessibility check or the bad access itself
+ */
+bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
-#else /* CONFIG_KASAN_SW_TAGS */
-
-static inline void kasan_init_tags(void) { }
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline void *kasan_reset_tag(const void *addr)
{
return (void *)addr;
}
-#endif /* CONFIG_KASAN_SW_TAGS */
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
+
+#ifdef CONFIG_KASAN_HW_TAGS
+
+void kasan_report_async(void);
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_SW_TAGS
+void __init kasan_init_sw_tags(void);
+#else
+static inline void kasan_init_sw_tags(void) { }
+#endif
+
+#ifdef CONFIG_KASAN_HW_TAGS
+void kasan_init_hw_tags_cpu(void);
+void __init kasan_init_hw_tags(void);
+#else
+static inline void kasan_init_hw_tags_cpu(void) { }
+static inline void kasan_init_hw_tags(void) { }
+#endif
#ifdef CONFIG_KASAN_VMALLOC
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
-void kasan_poison_vmalloc(const void *start, unsigned long size);
-void kasan_unpoison_vmalloc(const void *start, unsigned long size);
void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end);
-#else
+
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size)
+{ }
static inline int kasan_populate_vmalloc(unsigned long start,
unsigned long size)
{
return 0;
}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end) { }
-static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
-{ }
-static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
-{ }
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ kasan_vmalloc_flags_t flags);
+static __always_inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_unpoison_vmalloc(start, size, flags);
+ return (void *)start;
+}
+
+void __kasan_poison_vmalloc(const void *start, unsigned long size);
+static __always_inline void kasan_poison_vmalloc(const void *start,
+ unsigned long size)
+{
+ if (kasan_enabled())
+ __kasan_poison_vmalloc(start, size);
+}
+
+#else /* CONFIG_KASAN_VMALLOC */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size) { }
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size)
+{
+ return 0;
+}
static inline void kasan_release_vmalloc(unsigned long start,
unsigned long end,
unsigned long free_region_start,
- unsigned long free_region_end) {}
-#endif
+ unsigned long free_region_end) { }
+
+static inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ return (void *)start;
+}
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
+{ }
+
+#endif /* CONFIG_KASAN_VMALLOC */
+
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+
+/*
+ * These functions allocate and free shadow memory for kernel modules.
+ * They are only required when KASAN_VMALLOC is not supported, as otherwise
+ * shadow memory is allocated by the generic vmalloc handlers.
+ */
+int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
+void kasan_free_module_shadow(const struct vm_struct *vm);
+
+#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+
+static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
+static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
+
+#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
#ifdef CONFIG_KASAN_INLINE
void kasan_non_canonical_hook(unsigned long addr);
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index bb2246c8ec13..c40811d79769 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -6,12 +6,7 @@
#include <linux/interrupt.h>
#include <linux/keyboard.h>
-extern struct tasklet_struct keyboard_tasklet;
-
extern char *func_table[MAX_NR_FUNC];
-extern char func_buf[];
-extern char *funcbufptr;
-extern int funcbufsize, funcbufleft;
/*
* kbd->xxx contains the VC-local things (flag settings etc..)
@@ -74,12 +69,6 @@ extern void (*kbd_ledfunc)(unsigned int led);
extern int set_console(int nr);
extern void schedule_console_callback(void);
-/* FIXME: review locking for vt.c callers */
-static inline void set_leds(void)
-{
- tasklet_schedule(&keyboard_tasklet);
-}
-
static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag)
{
return ((kbd->modeflags >> flag) & 1);
@@ -138,7 +127,7 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag)
struct console;
-void compute_shiftstate(void);
+void vt_set_leds_compute_shiftstate(void);
/* defkeymap.c */
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index cc8fa109cfa3..20d1079e92b4 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -51,7 +51,8 @@
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
- * otherwise.
+ * otherwise. CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1" in
+ * autoconf.h.
*/
#define IS_MODULE(option) __is_defined(option##_MODULE)
@@ -66,7 +67,8 @@
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
- * 0 otherwise.
+ * 0 otherwise. Note that CONFIG_FOO=y results in "#define CONFIG_FOO 1" in
+ * autoconf.h, while CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1".
*/
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index da676cdbd727..86c0f1d18998 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -11,14 +11,11 @@ enum kcore_type {
KCORE_RAM,
KCORE_VMEMMAP,
KCORE_USER,
- KCORE_OTHER,
- KCORE_REMAP,
};
struct kcore_list {
struct list_head list;
unsigned long addr;
- unsigned long vaddr;
size_t size;
int type;
};
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index a10e84707d82..55dc338f6bcd 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_KCOV_H
#define _LINUX_KCOV_H
+#include <linux/sched.h>
#include <uapi/linux/kcov.h>
struct task_struct;
@@ -52,6 +53,25 @@ static inline void kcov_remote_start_usb(u64 id)
kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
}
+/*
+ * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
+ * work around for kcov's lack of nested remote coverage sections support in
+ * task context. Adding suport for nested sections is tracked in:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ */
+
+static inline void kcov_remote_start_usb_softirq(u64 id)
+{
+ if (in_serving_softirq())
+ kcov_remote_start_usb(id);
+}
+
+static inline void kcov_remote_stop_softirq(void)
+{
+ if (in_serving_softirq())
+ kcov_remote_stop();
+}
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
@@ -66,6 +86,8 @@ static inline u64 kcov_common_handle(void)
}
static inline void kcov_remote_start_common(u64 id) {}
static inline void kcov_remote_start_usb(u64 id) {}
+static inline void kcov_remote_start_usb_softirq(u64 id) {}
+static inline void kcov_remote_stop_softirq(void) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
new file mode 100644
index 000000000000..92f3843d9ebb
--- /dev/null
+++ b/include/linux/kcsan-checks.h
@@ -0,0 +1,533 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KCSAN access checks and modifiers. These can be used to explicitly check
+ * uninstrumented accesses, or change KCSAN checking behaviour of accesses.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
+
+#ifndef _LINUX_KCSAN_CHECKS_H
+#define _LINUX_KCSAN_CHECKS_H
+
+/* Note: Only include what is already included by compiler.h. */
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+/* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
+#define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
+#define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
+#define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
+/* The following are special, and never due to compiler instrumentation. */
+#define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
+#define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
+
+/*
+ * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
+ * even in compilation units that selectively disable KCSAN, but must use KCSAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KCSAN
+/**
+ * __kcsan_check_access - check generic access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ */
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+
+/*
+ * See definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ * Note: The mappings are arbitrary, and do not reflect any real mappings of C11
+ * memory orders to the LKMM memory orders and vice-versa!
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb __ATOMIC_SEQ_CST
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb __ATOMIC_ACQ_REL
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb __ATOMIC_ACQUIRE
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_release __ATOMIC_RELEASE
+
+/**
+ * __kcsan_mb - full memory barrier instrumentation
+ */
+void __kcsan_mb(void);
+
+/**
+ * __kcsan_wmb - write memory barrier instrumentation
+ */
+void __kcsan_wmb(void);
+
+/**
+ * __kcsan_rmb - read memory barrier instrumentation
+ */
+void __kcsan_rmb(void);
+
+/**
+ * __kcsan_release - release barrier instrumentation
+ */
+void __kcsan_release(void);
+
+/**
+ * kcsan_disable_current - disable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_disable_current(void);
+
+/**
+ * kcsan_enable_current - re-enable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_enable_current(void);
+void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
+
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n: number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask: bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
+/* Scoped access information. */
+struct kcsan_scoped_access {
+ union {
+ struct list_head list; /* scoped_accesses list */
+ /*
+ * Not an entry in scoped_accesses list; stack depth from where
+ * the access was initialized.
+ */
+ int stack_depth;
+ };
+
+ /* Access information. */
+ const volatile void *ptr;
+ size_t size;
+ int type;
+ /* Location where scoped access was set up. */
+ unsigned long ip;
+};
+/*
+ * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
+ * out of scope; relies on attribute "cleanup", which is supported by all
+ * compilers that support KCSAN.
+ */
+#define __kcsan_cleanup_scoped \
+ __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
+
+/**
+ * kcsan_begin_scoped_access - begin scoped access
+ *
+ * Begin scoped access and initialize @sa, which will cause KCSAN to
+ * continuously check the memory range in the current thread until
+ * kcsan_end_scoped_access() is called for @sa.
+ *
+ * Scoped accesses are implemented by appending @sa to an internal list for the
+ * current execution context, and then checked on every call into the KCSAN
+ * runtime.
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ * @sa: struct kcsan_scoped_access to use for the scope of the access
+ */
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa);
+
+/**
+ * kcsan_end_scoped_access - end scoped access
+ *
+ * End a scoped access, which will stop KCSAN checking the memory range.
+ * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
+ *
+ * @sa: a previously initialized struct kcsan_scoped_access
+ */
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
+
+
+#else /* CONFIG_KCSAN */
+
+static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+
+static inline void __kcsan_mb(void) { }
+static inline void __kcsan_wmb(void) { }
+static inline void __kcsan_rmb(void) { }
+static inline void __kcsan_release(void) { }
+static inline void kcsan_disable_current(void) { }
+static inline void kcsan_enable_current(void) { }
+static inline void kcsan_enable_current_nowarn(void) { }
+static inline void kcsan_nestable_atomic_begin(void) { }
+static inline void kcsan_nestable_atomic_end(void) { }
+static inline void kcsan_flat_atomic_begin(void) { }
+static inline void kcsan_flat_atomic_end(void) { }
+static inline void kcsan_atomic_next(int n) { }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
+
+struct kcsan_scoped_access { };
+#define __kcsan_cleanup_scoped __maybe_unused
+static inline struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa) { return sa; }
+static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
+
+#endif /* CONFIG_KCSAN */
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Only calls into the runtime when the particular compilation unit has KCSAN
+ * instrumentation enabled. May be used in header files.
+ */
+#define kcsan_check_access __kcsan_check_access
+
+/*
+ * Only use these to disable KCSAN for accesses in the current compilation unit;
+ * calls into libraries may still perform KCSAN checks.
+ */
+#define __kcsan_disable_current kcsan_disable_current
+#define __kcsan_enable_current kcsan_enable_current_nowarn
+#else /* __SANITIZE_THREAD__ */
+static inline void kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+static inline void __kcsan_enable_current(void) { }
+static inline void __kcsan_disable_current(void) { }
+#endif /* __SANITIZE_THREAD__ */
+
+#if defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__SANITIZE_THREAD__)
+/*
+ * Normal barrier instrumentation is not done via explicit calls, but by mapping
+ * to a repurposed __atomic_signal_fence(), which normally does not generate any
+ * real instructions, but is still intercepted by fsanitize=thread. This means,
+ * like any other compile-time instrumentation, barrier instrumentation can be
+ * disabled with the __no_kcsan function attribute.
+ *
+ * Also see definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ *
+ * These are all macros, like <asm/barrier.h>, since some architectures use them
+ * in non-static inline functions.
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE(name) \
+ do { \
+ barrier(); \
+ __atomic_signal_fence(__KCSAN_BARRIER_TO_SIGNAL_FENCE_##name); \
+ barrier(); \
+ } while (0)
+#define kcsan_mb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(mb)
+#define kcsan_wmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(wmb)
+#define kcsan_rmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(rmb)
+#define kcsan_release() __KCSAN_BARRIER_TO_SIGNAL_FENCE(release)
+#elif defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__KCSAN_INSTRUMENT_BARRIERS__)
+#define kcsan_mb __kcsan_mb
+#define kcsan_wmb __kcsan_wmb
+#define kcsan_rmb __kcsan_rmb
+#define kcsan_release __kcsan_release
+#else /* CONFIG_KCSAN_WEAK_MEMORY && ... */
+#define kcsan_mb() do { } while (0)
+#define kcsan_wmb() do { } while (0)
+#define kcsan_rmb() do { } while (0)
+#define kcsan_release() do { } while (0)
+#endif /* CONFIG_KCSAN_WEAK_MEMORY && ... */
+
+/**
+ * __kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
+
+/**
+ * __kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * __kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
+
+/**
+ * kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
+/*
+ * Check for atomic accesses: if atomic accesses are not ignored, this simply
+ * aliases to kcsan_check_access(), otherwise becomes a no-op.
+ */
+#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#define kcsan_check_atomic_read_write(...) do { } while (0)
+#else
+#define kcsan_check_atomic_read(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
+#define kcsan_check_atomic_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#define kcsan_check_atomic_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
+#endif
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
+ *
+ * Assert that there are no concurrent writes to @var; other readers are
+ * allowed. This assertion can be used to specify properties of concurrent code,
+ * where violation cannot be detected as a normal data race.
+ *
+ * For example, if we only have a single writer, but multiple concurrent
+ * readers, to avoid data races, all these accesses must be marked; even
+ * concurrent marked writes racing with the single writer are bugs.
+ * Unfortunately, due to being marked, they are no longer data races. For cases
+ * like these, we can use the macro as follows:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * ASSERT_EXCLUSIVE_WRITER(shared_foo);
+ * WRITE_ONCE(shared_foo, ...);
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void reader(void) {
+ * // update_foo_lock does not need to be held!
+ * ... = READ_ONCE(shared_foo);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent writes are expected exists.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
+
+/*
+ * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
+ * expected to be unique for the scope in which instances of kcsan_scoped_access
+ * are declared.
+ */
+#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
+#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
+ struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
+ __kcsan_cleanup_scoped; \
+ struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
+ __maybe_unused = kcsan_begin_scoped_access( \
+ &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
+ &__kcsan_scoped_name(id, _))
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to @var for the duration of the
+ * scope in which it is introduced. This provides a better way to fully cover
+ * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
+ * increases the likelihood for KCSAN to detect racing accesses.
+ *
+ * For example, it allows finding race-condition bugs that only occur due to
+ * state changes within the scope itself:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * {
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
+ * WRITE_ONCE(shared_foo, 42);
+ * ...
+ * // shared_foo should still be 42 here!
+ * }
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void buggy(void) {
+ * if (READ_ONCE(shared_foo) == 42)
+ * WRITE_ONCE(shared_foo, 1); // bug!
+ * }
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
+ *
+ * For example, where exclusive access is expected after determining no other
+ * users of an object are left, but the object is not actually freed. We can
+ * check that this property actually holds as follows:
+ *
+ * .. code-block:: c
+ *
+ * if (refcount_dec_and_test(&obj->refcnt)) {
+ * ASSERT_EXCLUSIVE_ACCESS(*obj);
+ * do_some_cleanup(obj);
+ * release_for_reuse(obj);
+ * }
+ *
+ * Note:
+ *
+ * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent accesses are expected exists.
+ *
+ * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
+ * fit to detect use-after-free bugs.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor writers)
+ * for the entire duration of the scope in which it is introduced. This provides
+ * a better way to fully cover the enclosing scope, compared to multiple
+ * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
+ * racing accesses.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. This assertion captures more detailed
+ * bit-level properties, compared to the other (word granularity) assertions.
+ * Only the bits set in @mask are checked for concurrent modifications, while
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
+ * are ignored.
+ *
+ * Use this for variables, where some bits must not be modified concurrently,
+ * yet other bits are expected to be modified concurrently.
+ *
+ * For example, variables where, after initialization, some bits are read-only,
+ * but other bits may still be modified concurrently. A reader may wish to
+ * assert that this is true as follows:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
+ * to access the masked bits only, and KCSAN optimistically assumes it is
+ * therefore safe, even in the presence of data races, and marking it with
+ * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
+ * it may still be advisable to do so, since we cannot reason about all compiler
+ * optimizations when it comes to bit manipulations (on the reader and writer
+ * side). If you are sure nothing can go wrong, we can write the above simply
+ * as:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Another example, where this may be used, is when certain bits of @var may
+ * only be modified when holding the appropriate lock, but other bits may still
+ * be modified concurrently. Writers, where other bits may change concurrently,
+ * could use the assertion as follows:
+ *
+ * .. code-block:: c
+ *
+ * spin_lock(&foo_lock);
+ * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ * old_flags = flags;
+ * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ * spin_unlock(&foo_lock);
+ *
+ * @var: variable to assert on
+ * @mask: only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask) \
+ do { \
+ kcsan_set_access_mask(mask); \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+ kcsan_set_access_mask(0); \
+ kcsan_atomic_next(1); \
+ } while (0)
+
+#endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
new file mode 100644
index 000000000000..c07c71f5ba4f
--- /dev/null
+++ b/include/linux/kcsan.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and
+ * data structures to set up runtime. See kcsan-checks.h for explicit checks and
+ * modifiers. For more info please see Documentation/dev-tools/kcsan.rst.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
+
+#ifndef _LINUX_KCSAN_H
+#define _LINUX_KCSAN_H
+
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KCSAN
+
+/*
+ * Context for each thread of execution: for tasks, this is stored in
+ * task_struct, and interrupts access internal per-CPU storage.
+ */
+struct kcsan_ctx {
+ int disable_count; /* disable counter */
+ int disable_scoped; /* disable scoped access counter */
+ int atomic_next; /* number of following atomic ops */
+
+ /*
+ * We distinguish between: (a) nestable atomic regions that may contain
+ * other nestable regions; and (b) flat atomic regions that do not keep
+ * track of nesting. Both (a) and (b) are entirely independent of each
+ * other, and a flat region may be started in a nestable region or
+ * vice-versa.
+ *
+ * This is required because, for example, in the annotations for
+ * seqlocks, we declare seqlock writer critical sections as (a) nestable
+ * atomic regions, but reader critical sections as (b) flat atomic
+ * regions, but have encountered cases where seqlock reader critical
+ * sections are contained within writer critical sections (the opposite
+ * may be possible, too).
+ *
+ * To support these cases, we independently track the depth of nesting
+ * for (a), and whether the leaf level is flat for (b).
+ */
+ int atomic_nest_count;
+ bool in_flat_atomic;
+
+ /*
+ * Access mask for all accesses if non-zero.
+ */
+ unsigned long access_mask;
+
+ /* List of scoped accesses; likely to be empty. */
+ struct list_head scoped_accesses;
+
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ /*
+ * Scoped access for modeling access reordering to detect missing memory
+ * barriers; only keep 1 to keep fast-path complexity manageable.
+ */
+ struct kcsan_scoped_access reorder_access;
+#endif
+};
+
+/**
+ * kcsan_init - initialize KCSAN runtime
+ */
+void kcsan_init(void);
+
+#else /* CONFIG_KCSAN */
+
+static inline void kcsan_init(void) { }
+
+#endif /* CONFIG_KCSAN */
+
+#endif /* _LINUX_KCSAN_H */
diff --git a/include/linux/kd.h b/include/linux/kd.h
deleted file mode 100644
index b130a18f860f..000000000000
--- a/include/linux/kd.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_KD_H
-#define _LINUX_KD_H
-
-#include <uapi/linux/kd.h>
-
-#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */
-#endif /* _LINUX_KD_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 24cd447659e0..07dfb6a20a1c 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,6 +13,8 @@
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
+#include <linux/list.h>
+
/* Shifted versions of the command enable bits are be used if the command
* has no arguments (see kdb_check_flags). This allows commands, such as
* go, to have different permissions depending upon whether it is called
@@ -64,6 +66,17 @@ typedef enum {
typedef int (*kdb_func_t)(int, const char **);
+/* The KDB shell command table */
+typedef struct _kdbtab {
+ char *name; /* Command name */
+ kdb_func_t func; /* Function to execute command */
+ char *usage; /* Usage String for this command */
+ char *help; /* Help message for this command */
+ short minlen; /* Minimum legal # cmd chars required */
+ kdb_cmdflags_t flags; /* Command behaviour flags */
+ struct list_head list_node; /* Command list */
+} kdbtab_t;
+
#ifdef CONFIG_KGDB_KDB
#include <linux/init.h>
#include <linux/sched.h>
@@ -125,7 +138,7 @@ extern const char *kdb_diemsg;
#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do
* not use keyboard */
-extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */
+extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */
extern void kdb_save_flags(void);
extern void kdb_restore_flags(void);
@@ -193,19 +206,13 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
#endif /* ! CONFIG_KALLSYMS */
/* Dynamic kdb shell command registration */
-extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
- short, kdb_cmdflags_t);
-extern int kdb_unregister(char *);
+extern int kdb_register(kdbtab_t *cmd);
+extern void kdb_unregister(kdbtab_t *cmd);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
-static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen) { return 0; }
-static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen,
- kdb_cmdflags_t flags) { return 0; }
-static inline int kdb_unregister(char *cmd) { return 0; }
+static inline int kdb_register(kdbtab_t *cmd) { return 0; }
+static inline void kdb_unregister(kdbtab_t *cmd) {}
#endif /* CONFIG_KGDB_KDB */
enum {
KDB_NOT_INITIALIZED,
@@ -215,5 +222,6 @@ enum {
extern int kdbgetintenv(const char *, int *);
extern int kdb_set(int, const char **);
+int kdb_lsmod(int argc, const char **argv);
#endif /* !_KDB_H */
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
index 85b5151911cf..4856706fbfeb 100644
--- a/include/linux/kdev_t.h
+++ b/include/linux/kdev_t.h
@@ -21,61 +21,61 @@
})
/* acceptable for old filesystems */
-static inline bool old_valid_dev(dev_t dev)
+static __always_inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
-static inline u16 old_encode_dev(dev_t dev)
+static __always_inline u16 old_encode_dev(dev_t dev)
{
return (MAJOR(dev) << 8) | MINOR(dev);
}
-static inline dev_t old_decode_dev(u16 val)
+static __always_inline dev_t old_decode_dev(u16 val)
{
return MKDEV((val >> 8) & 255, val & 255);
}
-static inline u32 new_encode_dev(dev_t dev)
+static __always_inline u32 new_encode_dev(dev_t dev)
{
unsigned major = MAJOR(dev);
unsigned minor = MINOR(dev);
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
-static inline dev_t new_decode_dev(u32 dev)
+static __always_inline dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return MKDEV(major, minor);
}
-static inline u64 huge_encode_dev(dev_t dev)
+static __always_inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
-static inline dev_t huge_decode_dev(u64 dev)
+static __always_inline dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
-static inline int sysv_valid_dev(dev_t dev)
+static __always_inline int sysv_valid_dev(dev_t dev)
{
return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
}
-static inline u32 sysv_encode_dev(dev_t dev)
+static __always_inline u32 sysv_encode_dev(dev_t dev)
{
return MINOR(dev) | (MAJOR(dev) << 18);
}
-static inline unsigned sysv_major(u32 dev)
+static __always_inline unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
-static inline unsigned sysv_minor(u32 dev)
+static __always_inline unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index abd20ef93c98..eee1877a354e 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -17,5 +17,6 @@
#define KPF_ARCH 38
#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
+#define KPF_ARCH_2 41
#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 0d9db2a14f44..fe6efb24d151 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -1,23 +1,38 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NOTE:
+ *
+ * This header has combined a lot of unrelated to each other stuff.
+ * The process of splitting its content is in progress while keeping
+ * backward compatibility. That's why it's highly recommended NOT to
+ * include this header inside another header file, especially under
+ * generic or architectural include/ directory.
+ */
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
-
-#include <stdarg.h>
+#include <linux/stdarg.h>
+#include <linux/align.h>
#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/bitops.h>
+#include <linux/kstrtox.h>
#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/typecheck.h>
+#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
+#include <linux/static_call_types.h>
+#include <linux/instruction_pointer.h>
#include <asm/byteorder.h>
-#include <asm/div64.h>
+
#include <uapi/linux/kernel.h>
-#include <asm/div64.h>
#define STACK_MAGIC 0xdeadbeef
@@ -29,13 +44,6 @@
*/
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
-/* @a is a power of 2 value */
-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
-#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
-#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
-
/* generic data direction definitions */
#define READ 0
#define WRITE 1
@@ -46,6 +54,8 @@
*/
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
+
#define u64_to_user_ptr(x) ( \
{ \
typecheck(u64, (x)); \
@@ -53,156 +63,69 @@
} \
)
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
/**
- * round_up - round up to next specified power of 2
- * @x: the value to round
- * @y: multiple to round up to (must be a power of 2)
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
*
- * Rounds @x up to next multiple of @y (which must be a power of 2).
- * To perform arbitrary rounding up, use roundup() below.
+ * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
+ * the "right shift count >= width of type" warning when that quantity is
+ * 32-bits.
*/
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+
/**
- * round_down - round down to next specified power of 2
- * @x: the value to round
- * @y: multiple to round down to (must be a power of 2)
- *
- * Rounds @x down to next multiple of @y (which must be a power of 2).
- * To perform arbitrary rounding down, use rounddown() below.
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
*/
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-
-#define typeof_member(T, m) typeof(((T*)0)->m)
-
-#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
-
-#define DIV_ROUND_DOWN_ULL(ll, d) \
- ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
-
-#define DIV_ROUND_UP_ULL(ll, d) \
- DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
-
-#if BITS_PER_LONG == 32
-# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
-#else
-# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
-#endif
+#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
/**
- * roundup - round up to the next specified multiple
- * @x: the value to up
- * @y: multiple to round up to
- *
- * Rounds @x up to next multiple of @y. If @y will always be a power
- * of 2, consider using the faster round_up().
+ * upper_16_bits - return bits 16-31 of a number
+ * @n: the number we're accessing
*/
-#define roundup(x, y) ( \
-{ \
- typeof(y) __y = y; \
- (((x) + (__y - 1)) / __y) * __y; \
-} \
-)
+#define upper_16_bits(n) ((u16)((n) >> 16))
+
/**
- * rounddown - round down to next specified multiple
- * @x: the value to round
- * @y: multiple to round down to
- *
- * Rounds @x down to next multiple of @y. If @y will always be a power
- * of 2, consider using the faster round_down().
+ * lower_16_bits - return bits 0-15 of a number
+ * @n: the number we're accessing
*/
-#define rounddown(x, y) ( \
-{ \
- typeof(x) __x = (x); \
- __x - (__x % (y)); \
-} \
-)
+#define lower_16_bits(n) ((u16)((n) & 0xffff))
-/*
- * Divide positive or negative dividend by positive or negative divisor
- * and round to closest integer. Result is undefined for negative
- * divisors if the dividend variable type is unsigned and for negative
- * dividends if the divisor variable type is unsigned.
- */
-#define DIV_ROUND_CLOSEST(x, divisor)( \
-{ \
- typeof(x) __x = x; \
- typeof(divisor) __d = divisor; \
- (((typeof(x))-1) > 0 || \
- ((typeof(divisor))-1) > 0 || \
- (((__x) > 0) == ((__d) > 0))) ? \
- (((__x) + ((__d) / 2)) / (__d)) : \
- (((__x) - ((__d) / 2)) / (__d)); \
-} \
-)
-/*
- * Same as above but for u64 dividends. divisor must be a 32-bit
- * number.
- */
-#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
-{ \
- typeof(divisor) __d = divisor; \
- unsigned long long _tmp = (x) + (__d) / 2; \
- do_div(_tmp, __d); \
- _tmp; \
-} \
-)
+struct completion;
+struct user;
-/*
- * Multiplies an integer by a fraction, while avoiding unnecessary
- * overflow or loss of precision.
- */
-#define mult_frac(x, numer, denom)( \
-{ \
- typeof(x) quot = (x) / (denom); \
- typeof(x) rem = (x) % (denom); \
- (quot * (numer)) + ((rem * (numer)) / (denom)); \
-} \
-)
+#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
+extern int __cond_resched(void);
+# define might_resched() __cond_resched()
-#define _RET_IP_ (unsigned long)__builtin_return_address(0)
-#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-#define sector_div(a, b) do_div(a, b)
+extern int __cond_resched(void);
-/**
- * upper_32_bits - return bits 32-63 of a number
- * @n: the number we're accessing
- *
- * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
- * the "right shift count >= width of type" warning when that quantity is
- * 32-bits.
- */
-#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+DECLARE_STATIC_CALL(might_resched, __cond_resched);
-/**
- * lower_32_bits - return bits 0-31 of a number
- * @n: the number we're accessing
- */
-#define lower_32_bits(n) ((u32)(n))
+static __always_inline void might_resched(void)
+{
+ static_call_mod(might_resched)();
+}
-struct completion;
-struct pt_regs;
-struct user;
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+extern int dynamic_might_resched(void);
+# define might_resched() dynamic_might_resched()
-#ifdef CONFIG_PREEMPT_VOLUNTARY
-extern int _cond_resched(void);
-# define might_resched() _cond_resched()
#else
+
# define might_resched() do { } while (0)
-#endif
+
+#endif /* CONFIG_PREEMPT_* */
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-extern void ___might_sleep(const char *file, int line, int preempt_offset);
-extern void __might_sleep(const char *file, int line, int preempt_offset);
+extern void __might_resched(const char *file, int line, unsigned int offsets);
+extern void __might_sleep(const char *file, int line);
extern void __cant_sleep(const char *file, int line, int preempt_offset);
+extern void __cant_migrate(const char *file, int line);
/**
* might_sleep - annotation for functions that can sleep
@@ -217,7 +140,7 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
/**
* cant_sleep - annotation for functions that cannot sleep
*
@@ -226,6 +149,18 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
# define cant_sleep() \
do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
+
+/**
+ * cant_migrate - annotation for functions that cannot migrate
+ *
+ * Will print a stack trace if executed in code which is migratable
+ */
+# define cant_migrate() \
+ do { \
+ if (IS_ENABLED(CONFIG_SMP)) \
+ __cant_migrate(__FILE__, __LINE__); \
+ } while (0)
+
/**
* non_block_start - annotate the start of section where sleeping is prohibited
*
@@ -244,12 +179,12 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
*/
# define non_block_end() WARN_ON(current->non_block_count-- == 0)
#else
- static inline void ___might_sleep(const char *file, int line,
- int preempt_offset) { }
- static inline void __might_sleep(const char *file, int line,
- int preempt_offset) { }
+ static inline void __might_resched(const char *file, int line,
+ unsigned int offsets) { }
+static inline void __might_sleep(const char *file, int line) { }
# define might_sleep() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
+# define cant_migrate() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
# define non_block_start() do { } while (0)
# define non_block_end() do { } while (0)
@@ -257,48 +192,6 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
-/**
- * abs - return absolute value of an argument
- * @x: the value. If it is unsigned type, it is converted to signed type first.
- * char is treated as if it was signed (regardless of whether it really is)
- * but the macro's return type is preserved as char.
- *
- * Return: an absolute value of x.
- */
-#define abs(x) __abs_choose_expr(x, long long, \
- __abs_choose_expr(x, long, \
- __abs_choose_expr(x, int, \
- __abs_choose_expr(x, short, \
- __abs_choose_expr(x, char, \
- __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), char), \
- (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
- ((void)0)))))))
-
-#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), signed type) || \
- __builtin_types_compatible_p(typeof(x), unsigned type), \
- ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
-
-/**
- * reciprocal_scale - "scale" a value into range [0, ep_ro)
- * @val: value
- * @ep_ro: right open interval endpoint
- *
- * Perform a "reciprocal multiplication" in order to "scale" a value into
- * range [0, @ep_ro), where the upper interval endpoint is right-open.
- * This is useful, e.g. for accessing a index of an array containing
- * @ep_ro elements, for example. Think of it as sort of modulus, only that
- * the result isn't that of modulo. ;) Note that if initial input is a
- * small value, then result will return 0.
- *
- * Return: a result based on @val in interval [0, @ep_ro).
- */
-static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
-{
- return (u32)(((u64) val * ep_ro) >> 32);
-}
-
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
#define might_fault() __might_fault(__FILE__, __LINE__)
@@ -307,159 +200,7 @@ void __might_fault(const char *file, int line);
static inline void might_fault(void) { }
#endif
-extern struct atomic_notifier_head panic_notifier_list;
-extern long (*panic_blink)(int state);
-__printf(1, 2)
-void panic(const char *fmt, ...) __noreturn __cold;
-void nmi_panic(struct pt_regs *regs, const char *msg);
-extern void oops_enter(void);
-extern void oops_exit(void);
-void print_oops_end_marker(void);
-extern int oops_may_print(void);
void do_exit(long error_code) __noreturn;
-void complete_and_exit(struct completion *, long) __noreturn;
-
-/* Internal, do not use. */
-int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
-int __must_check _kstrtol(const char *s, unsigned int base, long *res);
-
-int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
-
-/**
- * kstrtoul - convert a string to an unsigned long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign, but not a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the simple_strtoull. Return code must be checked.
-*/
-static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
- */
- if (sizeof(unsigned long) == sizeof(unsigned long long) &&
- __alignof__(unsigned long) == __alignof__(unsigned long long))
- return kstrtoull(s, base, (unsigned long long *)res);
- else
- return _kstrtoul(s, base, res);
-}
-
-/**
- * kstrtol - convert a string to a long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign or a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the simple_strtoull. Return code must be checked.
- */
-static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(long, long long) = 0.
- */
- if (sizeof(long) == sizeof(long long) &&
- __alignof__(long) == __alignof__(long long))
- return kstrtoll(s, base, (long long *)res);
- else
- return _kstrtol(s, base, res);
-}
-
-int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
-int __must_check kstrtoint(const char *s, unsigned int base, int *res);
-
-static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
-{
- return kstrtoull(s, base, res);
-}
-
-static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
-{
- return kstrtoll(s, base, res);
-}
-
-static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
-{
- return kstrtouint(s, base, res);
-}
-
-static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
-{
- return kstrtoint(s, base, res);
-}
-
-int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
-int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
-int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
-int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
-int __must_check kstrtobool(const char *s, bool *res);
-
-int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
-int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
-int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
-int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
-int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
-int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
-int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
-int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
-int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
-int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
-
-static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
-{
- return kstrtoull_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
-{
- return kstrtoll_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
-{
- return kstrtouint_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
-{
- return kstrtoint_from_user(s, count, base, res);
-}
-
-/*
- * Use kstrto<foo> instead.
- *
- * NOTE: simple_strto<foo> does not check for the range overflow and,
- * depending on the input, may give interesting results.
- *
- * Use these functions if and only if you cannot use kstrto<foo>, because
- * the conversion ends on the first non-digit character, which may be far
- * beyond the supported range. It might be useful to parse the strings like
- * 10x50 or 12:21 without altering original string or temporary buffer in use.
- * Keep in mind above caveat.
- */
-
-extern unsigned long simple_strtoul(const char *,char **,unsigned int);
-extern long simple_strtol(const char *,char **,unsigned int);
-extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
-extern long long simple_strtoll(const char *,char **,unsigned int);
extern int num_to_str(char *buf, int size,
unsigned long long num, unsigned int width);
@@ -488,6 +229,8 @@ int sscanf(const char *, const char *, ...);
extern __scanf(2, 0)
int vsscanf(const char *, const char *, va_list);
+extern int no_hash_pointers_enable(char *str);
+
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
@@ -495,62 +238,12 @@ extern bool parse_option_str(const char *str, const char *option);
extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
-extern int init_kernel_text(unsigned long addr);
-extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
-u64 int_pow(u64 base, unsigned int exp);
-unsigned long int_sqrt(unsigned long);
-
-#if BITS_PER_LONG < 64
-u32 int_sqrt64(u64 x);
-#else
-static inline u32 int_sqrt64(u64 x)
-{
- return (u32)int_sqrt(x);
-}
-#endif
-
extern void bust_spinlocks(int yes);
-extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
-extern int panic_timeout;
-extern unsigned long panic_print;
-extern int panic_on_oops;
-extern int panic_on_unrecovered_nmi;
-extern int panic_on_io_nmi;
-extern int panic_on_warn;
-extern int sysctl_panic_on_rcu_stall;
-extern int sysctl_panic_on_stackoverflow;
-
-extern bool crash_kexec_post_notifiers;
-
-/*
- * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
- * holds a CPU number which is executing panic() currently. A value of
- * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
- */
-extern atomic_t panic_cpu;
-#define PANIC_CPU_INVALID -1
-/*
- * Only to be used by arch init code. If the user over-wrote the default
- * CONFIG_PANIC_TIMEOUT, honor it.
- */
-static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
-{
- if (panic_timeout == arch_default_timeout)
- panic_timeout = timeout;
-}
-extern const char *print_tainted(void);
-enum lockdep_ok {
- LOCKDEP_STILL_OK,
- LOCKDEP_NOW_UNRELIABLE
-};
-extern void add_taint(unsigned flag, enum lockdep_ok);
-extern int test_taint(unsigned flag);
-extern unsigned long get_taint(void);
extern int root_mountflags;
extern bool early_boot_irqs_disabled;
@@ -562,6 +255,7 @@ extern bool early_boot_irqs_disabled;
extern enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
+ SYSTEM_FREEING_INITMEM,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
@@ -569,35 +263,6 @@ extern enum system_states {
SYSTEM_SUSPEND,
} system_state;
-/* This cannot be an enum because some may be used in assembly source. */
-#define TAINT_PROPRIETARY_MODULE 0
-#define TAINT_FORCED_MODULE 1
-#define TAINT_CPU_OUT_OF_SPEC 2
-#define TAINT_FORCED_RMMOD 3
-#define TAINT_MACHINE_CHECK 4
-#define TAINT_BAD_PAGE 5
-#define TAINT_USER 6
-#define TAINT_DIE 7
-#define TAINT_OVERRIDDEN_ACPI_TABLE 8
-#define TAINT_WARN 9
-#define TAINT_CRAP 10
-#define TAINT_FIRMWARE_WORKAROUND 11
-#define TAINT_OOT_MODULE 12
-#define TAINT_UNSIGNED_MODULE 13
-#define TAINT_SOFTLOCKUP 14
-#define TAINT_LIVEPATCH 15
-#define TAINT_AUX 16
-#define TAINT_RANDSTRUCT 17
-#define TAINT_FLAGS_COUNT 18
-
-struct taint_flag {
- char c_true; /* character printed when tainted */
- char c_false; /* character printed when not tainted */
- bool module; /* also show as a per-module taint flag */
-};
-
-extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
-
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
@@ -620,7 +285,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
return buf;
}
-extern int hex_to_bin(char ch);
+extern int hex_to_bin(unsigned char ch);
extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
extern char *bin2hex(char *dst, const void *src, size_t count);
@@ -714,7 +379,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__trace_printk_check_format(fmt, ##args); \
@@ -758,7 +423,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
#define trace_puts(str) ({ \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(str) ? str : NULL; \
\
if (__builtin_constant_p(str)) \
@@ -780,7 +445,7 @@ extern void trace_dump_stack(int skip);
do { \
if (__builtin_constant_p(fmt)) { \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
@@ -819,155 +484,6 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/*
- * min()/max()/clamp() macros must accomplish three things:
- *
- * - avoid multiple evaluations of the arguments (so side-effects like
- * "x++" happen only once) when non-constant.
- * - perform strict type-checking (to generate warnings instead of
- * nasty runtime surprises). See the "unnecessary" pointer comparison
- * in __typecheck().
- * - retain result as a constant expressions when called with only
- * constant expressions (to avoid tripping VLA warnings in stack
- * allocation usage).
- */
-#define __typecheck(x, y) \
- (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
-
-/*
- * This returns a constant expression while determining if an argument is
- * a constant expression, most importantly without evaluating the argument.
- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
- */
-#define __is_constexpr(x) \
- (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
-
-#define __no_side_effects(x, y) \
- (__is_constexpr(x) && __is_constexpr(y))
-
-#define __safe_cmp(x, y) \
- (__typecheck(x, y) && __no_side_effects(x, y))
-
-#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
-
-#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
- typeof(x) unique_x = (x); \
- typeof(y) unique_y = (y); \
- __cmp(unique_x, unique_y, op); })
-
-#define __careful_cmp(x, y, op) \
- __builtin_choose_expr(__safe_cmp(x, y), \
- __cmp(x, y, op), \
- __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
-
-/**
- * min - return minimum of two values of the same or compatible types
- * @x: first value
- * @y: second value
- */
-#define min(x, y) __careful_cmp(x, y, <)
-
-/**
- * max - return maximum of two values of the same or compatible types
- * @x: first value
- * @y: second value
- */
-#define max(x, y) __careful_cmp(x, y, >)
-
-/**
- * min3 - return minimum of three values
- * @x: first value
- * @y: second value
- * @z: third value
- */
-#define min3(x, y, z) min((typeof(x))min(x, y), z)
-
-/**
- * max3 - return maximum of three values
- * @x: first value
- * @y: second value
- * @z: third value
- */
-#define max3(x, y, z) max((typeof(x))max(x, y), z)
-
-/**
- * min_not_zero - return the minimum that is _not_ zero, unless both are zero
- * @x: value1
- * @y: value2
- */
-#define min_not_zero(x, y) ({ \
- typeof(x) __x = (x); \
- typeof(y) __y = (y); \
- __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
-
-/**
- * clamp - return a value clamped to a given range with strict typechecking
- * @val: current value
- * @lo: lowest allowable value
- * @hi: highest allowable value
- *
- * This macro does strict typechecking of @lo/@hi to make sure they are of the
- * same type as @val. See the unnecessary pointer comparisons.
- */
-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
-
-/*
- * ..and if you can't take the strict
- * types, you can specify one yourself.
- *
- * Or not use min/max/clamp at all, of course.
- */
-
-/**
- * min_t - return minimum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
- */
-#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
-
-/**
- * max_t - return maximum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
- */
-#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
-
-/**
- * clamp_t - return a value clamped to a given range using a given type
- * @type: the type of variable to use
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of type
- * @type to make all the comparisons.
- */
-#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
-
-/**
- * clamp_val - return a value clamped to a given range using val's type
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of whatever
- * type the input argument @val is. This is useful when @val is an unsigned
- * type and @lo and @hi are literals that will otherwise be assigned a signed
- * integer type.
- */
-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
-
-
-/**
- * swap - swap values of @a and @b
- * @a: first value
- * @b: second value
- */
-#define swap(a, b) \
- do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
-
/* This counts to 12. Any more, it will return 13th argument. */
#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
@@ -975,36 +491,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#define __CONCAT(a, b) a ## b
#define CONCATENATE(a, b) __CONCAT(a, b)
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type *)(__mptr - offsetof(type, member))); })
-
-/**
- * container_of_safe - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged.
- */
-#define container_of_safe(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
- ((type *)(__mptr - offsetof(type, member))); })
-
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h
new file mode 100644
index 000000000000..90451e2e12bd
--- /dev/null
+++ b/include/linux/kernel_read_file.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KERNEL_READ_FILE_H
+#define _LINUX_KERNEL_READ_FILE_H
+
+#include <linux/file.h>
+#include <linux/types.h>
+
+/* This is a list of *what* is being read, not *how* nor *where*. */
+#define __kernel_read_file_id(id) \
+ id(UNKNOWN, unknown) \
+ id(FIRMWARE, firmware) \
+ id(MODULE, kernel-module) \
+ id(KEXEC_IMAGE, kexec-image) \
+ id(KEXEC_INITRAMFS, kexec-initramfs) \
+ id(POLICY, security-policy) \
+ id(X509_CERTIFICATE, x509-certificate) \
+ id(MAX_ID, )
+
+#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
+#define __fid_stringify(dummy, str) #str,
+
+enum kernel_read_file_id {
+ __kernel_read_file_id(__fid_enumify)
+};
+
+static const char * const kernel_read_file_str[] = {
+ __kernel_read_file_id(__fid_stringify)
+};
+
+static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
+{
+ if ((unsigned int)id >= READING_MAX_ID)
+ return kernel_read_file_str[READING_UNKNOWN];
+
+ return kernel_read_file_str[id];
+}
+
+ssize_t kernel_read_file(struct file *file, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_fd(int fd, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+
+#endif /* _LINUX_KERNEL_READ_FILE_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 89f0745c096d..ddb5a358fd82 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,6 +28,9 @@ enum cpu_usage_stat {
CPUTIME_STEAL,
CPUTIME_GUEST,
CPUTIME_GUEST_NICE,
+#ifdef CONFIG_SCHED_CORE
+ CPUTIME_FORCEIDLE,
+#endif
NR_STATS,
};
@@ -67,7 +70,6 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
/*
* Number of interrupts per specific IRQ source, since bootup
*/
-extern unsigned int kstat_irqs(unsigned int irq);
extern unsigned int kstat_irqs_usr(unsigned int irq);
/*
@@ -103,6 +105,7 @@ extern void account_system_index_time(struct task_struct *, u64,
enum cpu_usage_stat);
extern void account_steal_time(u64);
extern void account_idle_time(u64);
+extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static inline void account_process_tick(struct task_struct *tsk, int user)
@@ -115,4 +118,8 @@ extern void account_process_tick(struct task_struct *, int user);
extern void account_idle_ticks(unsigned long ticks);
+#ifdef CONFIG_SCHED_CORE
+extern void __account_forceidle_time(struct task_struct *tsk, u64 delta);
+#endif
+
#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index dded2e5a9f42..73f5c120def8 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -6,7 +6,6 @@
#ifndef __LINUX_KERNFS_H
#define __LINUX_KERNFS_H
-#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -14,14 +13,19 @@
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/types.h>
#include <linux/uidgid.h>
#include <linux/wait.h>
+#include <linux/rwsem.h>
+#include <linux/cache.h>
struct file;
struct dentry;
struct iattr;
struct seq_file;
struct vm_area_struct;
+struct vm_operations_struct;
struct super_block;
struct file_system_type;
struct poll_table_struct;
@@ -31,14 +35,72 @@ struct kernfs_fs_context;
struct kernfs_open_node;
struct kernfs_iattrs;
+/*
+ * NR_KERNFS_LOCK_BITS determines size (NR_KERNFS_LOCKS) of hash
+ * table of locks.
+ * Having a small hash table would impact scalability, since
+ * more and more kernfs_node objects will end up using same lock
+ * and having a very large hash table would waste memory.
+ *
+ * At the moment size of hash table of locks is being set based on
+ * the number of CPUs as follows:
+ *
+ * NR_CPU NR_KERNFS_LOCK_BITS NR_KERNFS_LOCKS
+ * 1 1 2
+ * 2-3 2 4
+ * 4-7 4 16
+ * 8-15 6 64
+ * 16-31 8 256
+ * 32 and more 10 1024
+ *
+ * The above relation between NR_CPU and number of locks is based
+ * on some internal experimentation which involved booting qemu
+ * with different values of smp, performing some sysfs operations
+ * on all CPUs and observing how increase in number of locks impacts
+ * completion time of these sysfs operations on each CPU.
+ */
+#ifdef CONFIG_SMP
+#define NR_KERNFS_LOCK_BITS (2 * (ilog2(NR_CPUS < 32 ? NR_CPUS : 32)))
+#else
+#define NR_KERNFS_LOCK_BITS 1
+#endif
+
+#define NR_KERNFS_LOCKS (1 << NR_KERNFS_LOCK_BITS)
+
+/*
+ * There's one kernfs_open_file for each open file and one kernfs_open_node
+ * for each kernfs_node with one or more open files.
+ *
+ * filp->private_data points to seq_file whose ->private points to
+ * kernfs_open_file.
+ *
+ * kernfs_open_files are chained at kernfs_open_node->files, which is
+ * protected by kernfs_global_locks.open_file_mutex[i].
+ *
+ * To reduce possible contention in sysfs access, arising due to single
+ * locks, use an array of locks (e.g. open_file_mutex) and use kernfs_node
+ * object address as hash keys to get the index of these locks.
+ *
+ * Hashed mutexes are safe to use here because operations using these don't
+ * rely on global exclusion.
+ *
+ * In future we intend to replace other global locks with hashed ones as well.
+ * kernfs_global_locks acts as a holder for all such hash tables.
+ */
+struct kernfs_global_locks {
+ struct mutex open_file_mutex[NR_KERNFS_LOCKS];
+};
+
enum kernfs_node_type {
KERNFS_DIR = 0x0001,
KERNFS_FILE = 0x0002,
KERNFS_LINK = 0x0004,
};
-#define KERNFS_TYPE_MASK 0x000f
-#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+#define KERNFS_TYPE_MASK 0x000f
+#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+#define KERNFS_MAX_USER_XATTRS 128
+#define KERNFS_USER_XATTR_SIZE_LIMIT (128 << 10)
enum kernfs_node_flag {
KERNFS_ACTIVATED = 0x0010,
@@ -46,10 +108,12 @@ enum kernfs_node_flag {
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
+ KERNFS_HIDDEN = 0x0200,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
KERNFS_HAS_RELEASE = 0x2000,
+ KERNFS_REMOVING = 0x4000,
};
/* @flags for kernfs_create_root() */
@@ -78,6 +142,11 @@ enum kernfs_root_flag {
* fhandle to access nodes of the fs.
*/
KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004,
+
+ /*
+ * Support user xattrs to be written to nodes rooted at this root.
+ */
+ KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008,
};
/* type-specific structures for kernfs_node union members */
@@ -91,6 +160,11 @@ struct kernfs_elem_dir {
* better directly in kernfs_node but is here to save space.
*/
struct kernfs_root *root;
+ /*
+ * Monotonic revision counter, used to identify if a directory
+ * node has changed during negative dentry revalidation.
+ */
+ unsigned long rev;
};
struct kernfs_elem_symlink {
@@ -99,7 +173,7 @@ struct kernfs_elem_symlink {
struct kernfs_elem_attr {
const struct kernfs_ops *ops;
- struct kernfs_open_node *open;
+ struct kernfs_open_node __rcu *open;
loff_t size;
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
@@ -109,7 +183,7 @@ struct kernfs_elem_attr {
* kernfs node is represented by single kernfs_node. Most fields are
* private to kernfs and shouldn't be accessed directly by kernfs users.
*
- * As long as s_count reference is held, the kernfs_node itself is
+ * As long as count reference is held, the kernfs_node itself is
* accessible. Dereferencing elem or any other outer entity requires
* active reference.
*/
@@ -170,22 +244,7 @@ struct kernfs_syscall_ops {
struct kernfs_root *root);
};
-struct kernfs_root {
- /* published fields */
- struct kernfs_node *kn;
- unsigned int flags; /* KERNFS_ROOT_* flags */
-
- /* private fields, do not use outside kernfs proper */
- struct idr ino_idr;
- u32 last_id_lowbits;
- u32 id_highbits;
- struct kernfs_syscall_ops *syscall_ops;
-
- /* list of kernfs_super_info of this root, protected by kernfs_mutex */
- struct list_head supers;
-
- wait_queue_head_t deactivate_waitq;
-};
+struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root);
struct kernfs_open_file {
/* published fields */
@@ -257,10 +316,6 @@ struct kernfs_ops {
struct poll_table_struct *pt);
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lockdep_key;
-#endif
};
/*
@@ -376,6 +431,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
struct kernfs_node *target);
void kernfs_activate(struct kernfs_node *kn);
+void kernfs_show(struct kernfs_node *kn, bool show);
void kernfs_remove(struct kernfs_node *kn);
void kernfs_break_active_protection(struct kernfs_node *kn);
void kernfs_unbreak_active_protection(struct kernfs_node *kn);
@@ -556,30 +612,6 @@ kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
priv, NULL);
}
-static inline struct kernfs_node *
-kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, kuid_t uid, kgid_t gid,
- loff_t size, const struct kernfs_ops *ops,
- void *priv, const void *ns)
-{
- struct lock_class_key *key = NULL;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- key = (struct lock_class_key *)&ops->lockdep_key;
-#endif
- return __kernfs_create_file(parent, name, mode, uid, gid,
- size, ops, priv, ns, key);
-}
-
-static inline struct kernfs_node *
-kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
- loff_t size, const struct kernfs_ops *ops, void *priv)
-{
- return kernfs_create_file_ns(parent, name, mode,
- GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
- size, ops, priv, NULL);
-}
-
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
const char *name)
{
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 1776eb2e43a4..41a686996aaa 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -19,6 +19,13 @@
#include <asm/io.h>
#include <uapi/linux/kexec.h>
+#include <linux/verification.h>
+
+/* Location of a reserved region to hold the crash kernel.
+ */
+extern struct resource crashk_res;
+extern struct resource crashk_low_res;
+extern note_buf_t __percpu *crash_notes;
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
@@ -182,22 +189,54 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
void *buf, unsigned int size,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+void *kexec_image_load_default(struct kimage *image);
-int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len);
-void * __weak arch_kexec_kernel_image_load(struct kimage *image);
-int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
-int __weak arch_kexec_apply_relocations(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
+#ifndef arch_kexec_kernel_image_probe
+static inline int
+arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len)
+{
+ return kexec_image_probe_default(image, buf, buf_len);
+}
+#endif
+
+#ifndef arch_kimage_file_post_load_cleanup
+static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ return kexec_image_post_load_cleanup_default(image);
+}
+#endif
+
+#ifndef arch_kexec_kernel_image_load
+static inline void *arch_kexec_kernel_image_load(struct kimage *image)
+{
+ return kexec_image_load_default(image);
+}
+#endif
+
+#ifdef CONFIG_KEXEC_SIG
+#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
+int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
+#endif
+#endif
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+#ifndef arch_kexec_locate_mem_hole
+/**
+ * arch_kexec_locate_mem_hole - Find free memory to place the segments.
+ * @kbuf: Parameters for the memory search.
+ *
+ * On success, kbuf->mem will have the start address of the memory region found.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
+{
+ return kexec_locate_mem_hole(kbuf);
+}
+#endif
+
/* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096
@@ -208,14 +247,52 @@ struct crash_mem_range {
struct crash_mem {
unsigned int max_nr_ranges;
unsigned int nr_ranges;
- struct crash_mem_range ranges[0];
+ struct crash_mem_range ranges[];
};
extern int crash_exclude_mem_range(struct crash_mem *mem,
unsigned long long mstart,
unsigned long long mend);
-extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
void **addr, unsigned long *sz);
+
+#ifndef arch_kexec_apply_relocations_add
+/*
+ * arch_kexec_apply_relocations_add - apply relocations of type RELA
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELAs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("RELA relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
+
+#ifndef arch_kexec_apply_relocations
+/*
+ * arch_kexec_apply_relocations - apply relocations of type REL
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("REL relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
#endif /* CONFIG_KEXEC_FILE */
#ifdef CONFIG_KEXEC_ELF
@@ -293,6 +370,19 @@ struct kimage {
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
#endif
+
+#ifdef CONFIG_IMA_KEXEC
+ /* Virtual address of IMA measurement buffer for kexec syscall */
+ void *ima_buffer;
+
+ phys_addr_t ima_buffer_addr;
+ size_t ima_buffer_size;
+#endif
+
+ /* Core ELF header buffer */
+ void *elf_headers;
+ unsigned long elf_headers_sz;
+ unsigned long elf_load_addr;
};
/* kexec interface functions */
@@ -302,6 +392,11 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
+
+#ifndef machine_kexec_post_load
+static inline int machine_kexec_post_load(struct kimage *image) { return 0; }
+#endif
+
extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
@@ -328,21 +423,26 @@ extern int kexec_load_disabled;
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
KEXEC_FILE_NO_INITRAMFS)
-/* Location of a reserved region to hold the crash kernel.
- */
-extern struct resource crashk_res;
-extern struct resource crashk_low_res;
-extern note_buf_t __percpu *crash_notes;
-
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
int crash_shrink_memory(unsigned long new_size);
-size_t crash_get_memory_size(void);
-void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
+ssize_t crash_get_memory_size(void);
+
+#ifndef arch_kexec_protect_crashkres
+/*
+ * Protection mechanism for crashkernel reserved memory after
+ * the kdump kernel is loaded.
+ *
+ * Provide an empty default implementation here -- architecture
+ * code may override this
+ */
+static inline void arch_kexec_protect_crashkres(void) { }
+#endif
-void arch_kexec_protect_crashkres(void);
-void arch_kexec_unprotect_crashkres(void);
+#ifndef arch_kexec_unprotect_crashkres
+static inline void arch_kexec_unprotect_crashkres(void) { }
+#endif
#ifndef page_to_boot_pfn
static inline unsigned long page_to_boot_pfn(struct page *page)
@@ -372,6 +472,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
}
#endif
+#ifndef crash_free_reserved_phys_range
+static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE)
+ free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
+}
+#endif
+
static inline unsigned long virt_to_boot_phys(void *addr)
{
return phys_to_boot_phys(__pa((unsigned long)addr));
@@ -400,6 +510,12 @@ static inline int kexec_crash_loaded(void) { return 0; }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
+#ifdef CONFIG_KEXEC_SIG
+void set_kexec_sig_enforced(void);
+#else
+static inline void set_kexec_sig_enforced(void) {}
+#endif
+
#endif /* !defined(__ASSEBMLY__) */
#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 4ded94bcf274..7d985a1dfe4a 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -29,6 +29,7 @@ struct kernel_pkey_params;
* clear the contents.
*/
struct key_preparsed_payload {
+ const char *orig_description; /* Actual or proposed description (maybe NULL) */
char *description; /* Proposed key description (or NULL) */
union key_payload payload; /* Proposed payload */
const void *data; /* Raw data */
@@ -127,7 +128,7 @@ struct key_type {
* much is copied into the buffer
* - shouldn't do the copy if the buffer is NULL
*/
- long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+ long (*read)(const struct key *key, char *buffer, size_t buflen);
/* handle request_key() for this type instead of invoking
* /sbin/request-key (optional)
diff --git a/include/linux/key.h b/include/linux/key.h
index 6cf8e71cf8b7..d27477faf00d 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -71,6 +71,29 @@ struct net;
#define KEY_PERM_UNDEF 0xffffffff
+/*
+ * The permissions required on a key that we're looking up.
+ */
+enum key_need_perm {
+ KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */
+ KEY_NEED_VIEW, /* Require permission to view attributes */
+ KEY_NEED_READ, /* Require permission to read content */
+ KEY_NEED_WRITE, /* Require permission to update / modify */
+ KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */
+ KEY_NEED_LINK, /* Require permission to link */
+ KEY_NEED_SETATTR, /* Require permission to change attributes */
+ KEY_NEED_UNLINK, /* Require permission to unlink key */
+ KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */
+ KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */
+ KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */
+};
+
+enum key_lookup_flag {
+ KEY_LOOKUP_CREATE = 0x01,
+ KEY_LOOKUP_PARTIAL = 0x02,
+ KEY_LOOKUP_ALL = (KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL),
+};
+
struct seq_file;
struct user_struct;
struct signal_struct;
@@ -176,6 +199,9 @@ struct key {
struct list_head graveyard_link;
struct rb_node serial_node;
};
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ struct watch_list *watchers; /* Entities watching this key for changes */
+#endif
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
@@ -269,6 +295,7 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
+#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
@@ -340,7 +367,7 @@ static inline struct key *request_key(struct key_type *type,
* completion of keys undergoing construction with a non-interruptible wait.
*/
#define request_key_net(type, description, net, callout_info) \
- request_key_tag(type, description, net->key_domain, callout_info);
+ request_key_tag(type, description, net->key_domain, callout_info)
/**
* request_key_net_rcu - Request a key for a net namespace under RCU conditions
@@ -352,7 +379,7 @@ static inline struct key *request_key(struct key_type *type,
* network namespace are used.
*/
#define request_key_net_rcu(type, description, net) \
- request_key_rcu(type, description, net->key_domain);
+ request_key_rcu(type, description, net->key_domain)
#endif /* CONFIG_NET */
extern int wait_for_key_construction(struct key *key, bool intr);
@@ -417,20 +444,9 @@ static inline key_serial_t key_serial(const struct key *key)
extern void key_set_timeout(struct key *, unsigned);
extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
- key_perm_t perm);
+ enum key_need_perm need_perm);
extern void key_free_user_ns(struct user_namespace *);
-/*
- * The permissions required on a key that we're looking up.
- */
-#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */
-#define KEY_NEED_READ 0x02 /* Require permission to read content */
-#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */
-#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */
-#define KEY_NEED_LINK 0x10 /* Require permission to link */
-#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
-#define KEY_NEED_ALL 0x3f /* All the above permissions */
-
static inline short key_read_state(const struct key *key)
{
/* Barrier versus mark_key_instantiated(). */
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
new file mode 100644
index 000000000000..726857a4b680
--- /dev/null
+++ b/include/linux/kfence.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
+ * handler integration. For more info see Documentation/dev-tools/kfence.rst.
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef _LINUX_KFENCE_H
+#define _LINUX_KFENCE_H
+
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KFENCE
+
+#include <linux/atomic.h>
+#include <linux/static_key.h>
+
+extern unsigned long kfence_sample_interval;
+
+/*
+ * We allocate an even number of pages, as it simplifies calculations to map
+ * address to metadata indices; effectively, the very first page serves as an
+ * extended guard page, but otherwise has no special purpose.
+ */
+#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
+extern char *__kfence_pool;
+
+DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
+extern atomic_t kfence_allocation_gate;
+
+/**
+ * is_kfence_address() - check if an address belongs to KFENCE pool
+ * @addr: address to check
+ *
+ * Return: true or false depending on whether the address is within the KFENCE
+ * object range.
+ *
+ * KFENCE objects live in a separate page range and are not to be intermixed
+ * with regular heap objects (e.g. KFENCE objects must never be added to the
+ * allocator freelists). Failing to do so may and will result in heap
+ * corruptions, therefore is_kfence_address() must be used to check whether
+ * an object requires specific handling.
+ *
+ * Note: This function may be used in fast-paths, and is performance critical.
+ * Future changes should take this into account; for instance, we want to avoid
+ * introducing another load and therefore need to keep KFENCE_POOL_SIZE a
+ * constant (until immediate patching support is added to the kernel).
+ */
+static __always_inline bool is_kfence_address(const void *addr)
+{
+ /*
+ * The __kfence_pool != NULL check is required to deal with the case
+ * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
+ * the slow-path after the range-check!
+ */
+ return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
+}
+
+/**
+ * kfence_alloc_pool() - allocate the KFENCE pool via memblock
+ */
+void __init kfence_alloc_pool(void);
+
+/**
+ * kfence_init() - perform KFENCE initialization at boot time
+ *
+ * Requires that kfence_alloc_pool() was called before. This sets up the
+ * allocation gate timer, and requires that workqueues are available.
+ */
+void __init kfence_init(void);
+
+/**
+ * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
+ * @s: cache being shut down
+ *
+ * Before shutting down a cache, one must ensure there are no remaining objects
+ * allocated from it. Because KFENCE objects are not referenced from the cache
+ * directly, we need to check them here.
+ *
+ * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
+ * not return if allocated objects still exist: it prints an error message and
+ * simply aborts destruction of a cache, leaking memory.
+ *
+ * If the only such objects are KFENCE objects, we will not leak the entire
+ * cache, but instead try to provide more useful debug info by making allocated
+ * objects "zombie allocations". Objects may then still be used or freed (which
+ * is handled gracefully), but usage will result in showing KFENCE error reports
+ * which include stack traces to the user of the object, the original allocation
+ * site, and caller to shutdown_cache().
+ */
+void kfence_shutdown_cache(struct kmem_cache *s);
+
+/*
+ * Allocate a KFENCE object. Allocators must not call this function directly,
+ * use kfence_alloc() instead.
+ */
+void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
+
+/**
+ * kfence_alloc() - allocate a KFENCE object with a low probability
+ * @s: struct kmem_cache with object requirements
+ * @size: exact size of the object to allocate (can be less than @s->size
+ * e.g. for kmalloc caches)
+ * @flags: GFP flags
+ *
+ * Return:
+ * * NULL - must proceed with allocating as usual,
+ * * non-NULL - pointer to a KFENCE object.
+ *
+ * kfence_alloc() should be inserted into the heap allocation fast path,
+ * allowing it to transparently return KFENCE-allocated objects with a low
+ * probability using a static branch (the probability is controlled by the
+ * kfence.sample_interval boot parameter).
+ */
+static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
+{
+#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
+ if (!static_branch_unlikely(&kfence_allocation_key))
+ return NULL;
+#else
+ if (!static_branch_likely(&kfence_allocation_key))
+ return NULL;
+#endif
+ if (likely(atomic_read(&kfence_allocation_gate)))
+ return NULL;
+ return __kfence_alloc(s, size, flags);
+}
+
+/**
+ * kfence_ksize() - get actual amount of memory allocated for a KFENCE object
+ * @addr: pointer to a heap object
+ *
+ * Return:
+ * * 0 - not a KFENCE object, must call __ksize() instead,
+ * * non-0 - this many bytes can be accessed without causing a memory error.
+ *
+ * kfence_ksize() returns the number of bytes requested for a KFENCE object at
+ * allocation time. This number may be less than the object size of the
+ * corresponding struct kmem_cache.
+ */
+size_t kfence_ksize(const void *addr);
+
+/**
+ * kfence_object_start() - find the beginning of a KFENCE object
+ * @addr: address within a KFENCE-allocated object
+ *
+ * Return: address of the beginning of the object.
+ *
+ * SL[AU]B-allocated objects are laid out within a page one by one, so it is
+ * easy to calculate the beginning of an object given a pointer inside it and
+ * the object size. The same is not true for KFENCE, which places a single
+ * object at either end of the page. This helper function is used to find the
+ * beginning of a KFENCE-allocated object.
+ */
+void *kfence_object_start(const void *addr);
+
+/**
+ * __kfence_free() - release a KFENCE heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Requires: is_kfence_address(addr)
+ *
+ * Release a KFENCE object and mark it as freed.
+ */
+void __kfence_free(void *addr);
+
+/**
+ * kfence_free() - try to release an arbitrary heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Return:
+ * * false - object doesn't belong to KFENCE pool and was ignored,
+ * * true - object was released to KFENCE pool.
+ *
+ * Release a KFENCE object and mark it as freed. May be called on any object,
+ * even non-KFENCE objects, to simplify integration of the hooks into the
+ * allocator's free codepath. The allocator must check the return value to
+ * determine if it was a KFENCE object or not.
+ */
+static __always_inline __must_check bool kfence_free(void *addr)
+{
+ if (!is_kfence_address(addr))
+ return false;
+ __kfence_free(addr);
+ return true;
+}
+
+/**
+ * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
+ * @addr: faulting address
+ * @is_write: is access a write
+ * @regs: current struct pt_regs (can be NULL, but shows full stack trace)
+ *
+ * Return:
+ * * false - address outside KFENCE pool,
+ * * true - page fault handled by KFENCE, no additional handling required.
+ *
+ * A page fault inside KFENCE pool indicates a memory error, such as an
+ * out-of-bounds access, a use-after-free or an invalid memory access. In these
+ * cases KFENCE prints an error message and marks the offending page as
+ * present, so that the kernel can proceed.
+ */
+bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
+#else /* CONFIG_KFENCE */
+
+static inline bool is_kfence_address(const void *addr) { return false; }
+static inline void kfence_alloc_pool(void) { }
+static inline void kfence_init(void) { }
+static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
+static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
+static inline size_t kfence_ksize(const void *addr) { return 0; }
+static inline void *kfence_object_start(const void *addr) { return NULL; }
+static inline void __kfence_free(void *addr) { }
+static inline bool __must_check kfence_free(void *addr) { return false; }
+static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
+ struct pt_regs *regs)
+{
+ return false;
+}
+
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ return false;
+}
+#endif
+
+#endif
+
+#endif /* _LINUX_KFENCE_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index fc4b0b10210f..0b35a41440ff 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -247,6 +247,37 @@ __kfifo_int_must_check_helper(int val)
})
/**
+ * kfifo_is_empty_spinlocked - returns true if the fifo is empty using
+ * a spinlock for locking
+ * @fifo: address of the fifo to be used
+ * @lock: spinlock to be used for locking
+ */
+#define kfifo_is_empty_spinlocked(fifo, lock) \
+({ \
+ unsigned long __flags; \
+ bool __ret; \
+ spin_lock_irqsave(lock, __flags); \
+ __ret = kfifo_is_empty(fifo); \
+ spin_unlock_irqrestore(lock, __flags); \
+ __ret; \
+})
+
+/**
+ * kfifo_is_empty_spinlocked_noirqsave - returns true if the fifo is empty
+ * using a spinlock for locking, doesn't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @lock: spinlock to be used for locking
+ */
+#define kfifo_is_empty_spinlocked_noirqsave(fifo, lock) \
+({ \
+ bool __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_is_empty(fifo); \
+ spin_unlock(lock); \
+ __ret; \
+})
+
+/**
* kfifo_is_full - returns true if the fifo is full
* @fifo: address of the fifo to be used
*/
@@ -517,6 +548,26 @@ __kfifo_uint_must_check_helper( \
__ret; \
})
+/**
+ * kfifo_in_spinlocked_noirqsave - put data into fifo using a spinlock for
+ * locking, don't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @buf: the data to be added
+ * @n: number of elements to be added
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This is a variant of kfifo_in_spinlocked() but uses spin_lock/unlock()
+ * for locking and doesn't disable interrupts.
+ */
+#define kfifo_in_spinlocked_noirqsave(fifo, buf, n, lock) \
+({ \
+ unsigned int __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_in(fifo, buf, n); \
+ spin_unlock(lock); \
+ __ret; \
+})
+
/* alias for kfifo_in_spinlocked, will be removed in a future release */
#define kfifo_in_locked(fifo, buf, n, lock) \
kfifo_in_spinlocked(fifo, buf, n, lock)
@@ -569,6 +620,28 @@ __kfifo_uint_must_check_helper( \
}) \
)
+/**
+ * kfifo_out_spinlocked_noirqsave - get data from the fifo using a spinlock
+ * for locking, don't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @buf: pointer to the storage buffer
+ * @n: max. number of elements to get
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This is a variant of kfifo_out_spinlocked() which uses spin_lock/unlock()
+ * for locking and doesn't disable interrupts.
+ */
+#define kfifo_out_spinlocked_noirqsave(fifo, buf, n, lock) \
+__kfifo_uint_must_check_helper( \
+({ \
+ unsigned int __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_out(fifo, buf, n); \
+ spin_unlock(lock); \
+ __ret; \
+}) \
+)
+
/* alias for kfifo_out_spinlocked, will be removed in a future release */
#define kfifo_out_locked(fifo, buf, n, lock) \
kfifo_out_spinlocked(fifo, buf, n, lock)
@@ -615,7 +688,7 @@ __kfifo_uint_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_to_user(fifo, to, len, copied) \
-__kfifo_uint_must_check_helper( \
+__kfifo_int_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index b072aeb1fd78..258cdde8d356 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -16,6 +16,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/atomic.h>
+#include <linux/kprobes.h>
#ifdef CONFIG_HAVE_ARCH_KGDB
#include <asm/kgdb.h>
#endif
@@ -104,9 +105,9 @@ extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs);
*/
/**
- * kgdb_arch_init - Perform any architecture specific initalization.
+ * kgdb_arch_init - Perform any architecture specific initialization.
*
- * This function will handle the initalization of any architecture
+ * This function will handle the initialization of any architecture
* specific callbacks.
*/
extern int kgdb_arch_init(void);
@@ -177,6 +178,17 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
struct pt_regs *regs);
/**
+ * kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML
+ * packets.
+ * @remcom_in_buffer: The buffer of the packet we have read.
+ * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ */
+
+extern void
+kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+ char *remcom_out_buffer);
+
+/**
* kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
* @ignored: This parameter is only here to match the prototype.
*
@@ -217,9 +229,9 @@ extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
/**
- * kgdb_arch_late - Perform any architecture specific initalization.
+ * kgdb_arch_late - Perform any architecture specific initialization.
*
- * This function will handle the late initalization of any
+ * This function will handle the late initialization of any
* architecture specific callbacks. This is an optional function for
* handling things like late initialization of hw breakpoints. The
* default implementation does nothing.
@@ -269,12 +281,14 @@ struct kgdb_arch {
* @write_char: Pointer to a function that will write one char.
* @flush: Pointer to a function that will flush any pending writes.
* @init: Pointer to a function that will initialize the device.
+ * @deinit: Pointer to a function that will deinit the device. Implies that
+ * this I/O driver is temporary and expects to be replaced. Called when
+ * an I/O driver is replaced or explicitly unregistered.
* @pre_exception: Pointer to a function that will do any prep work for
* the I/O driver.
* @post_exception: Pointer to a function that will do any cleanup work
* for the I/O driver.
- * @is_console: 1 if the end device is a console 0 if the I/O device is
- * not a console
+ * @cons: valid if the I/O device is a console; else NULL.
*/
struct kgdb_io {
const char *name;
@@ -282,9 +296,10 @@ struct kgdb_io {
void (*write_char) (u8);
void (*flush) (void);
int (*init) (void);
+ void (*deinit) (void);
void (*pre_exception) (void);
void (*post_exception) (void);
- int is_console;
+ struct console *cons;
};
extern const struct kgdb_arch arch_kgdb_ops;
@@ -298,7 +313,7 @@ extern bool kgdb_nmi_poll_knock(void);
#else
static inline int kgdb_register_nmi_console(void) { return 0; }
static inline int kgdb_unregister_nmi_console(void) { return 0; }
-static inline bool kgdb_nmi_poll_knock(void) { return 1; }
+static inline bool kgdb_nmi_poll_knock(void) { return true; }
#endif
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
@@ -310,7 +325,7 @@ extern char *kgdb_mem2hex(char *mem, char *buf, int count);
extern int kgdb_hex2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
-extern void kgdb_schedule_breakpoint(void);
+extern int kgdb_has_hit_break(unsigned long addr);
extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
@@ -320,16 +335,35 @@ extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
atomic_t *snd_rdy);
extern void gdbstub_exit(int status);
+/*
+ * kgdb and kprobes both use the same (kprobe) blocklist (which makes sense
+ * given they are both typically hooked up to the same trap meaning on most
+ * architectures one cannot be used to debug the other)
+ *
+ * However on architectures where kprobes is not (yet) implemented we permit
+ * breakpoints everywhere rather than blocking everything by default.
+ */
+static inline bool kgdb_within_blocklist(unsigned long addr)
+{
+#ifdef CONFIG_KGDB_HONOUR_BLOCKLIST
+ return within_kprobe_blacklist(addr);
+#else
+ return false;
+#endif
+}
+
extern int kgdb_single_step;
extern atomic_t kgdb_active;
#define in_dbg_master() \
- (raw_smp_processor_id() == atomic_read(&kgdb_active))
+ (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active)))
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
extern void kgdb_panic(const char *msg);
+extern void kgdb_free_init_mem(void);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
static inline void kgdb_panic(const char *msg) {}
+static inline void kgdb_free_init_mem(void) { }
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index bc45ea1efbf7..70162d707caf 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -4,45 +4,32 @@
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
extern int khugepaged_init(void);
extern void khugepaged_destroy(void);
extern int start_stop_khugepaged(void);
-extern int __khugepaged_enter(struct mm_struct *mm);
+extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags);
+extern void khugepaged_enter_vma(struct vm_area_struct *vma,
+ unsigned long vm_flags);
+extern void khugepaged_min_free_kbytes_update(void);
#ifdef CONFIG_SHMEM
-extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
+extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
#else
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
+ return 0;
}
#endif
-#define khugepaged_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define khugepaged_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
-#define khugepaged_req_madv() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
-#define khugepaged_defrag() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
-
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
- return __khugepaged_enter(mm);
- return 0;
+ __khugepaged_enter(mm);
}
static inline void khugepaged_exit(struct mm_struct *mm)
@@ -50,39 +37,24 @@ static inline void khugepaged_exit(struct mm_struct *mm)
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
__khugepaged_exit(mm);
}
-
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
- if ((khugepaged_always() ||
- (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
- !(vm_flags & VM_NOHUGEPAGE) &&
- !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- if (__khugepaged_enter(vma->vm_mm))
- return -ENOMEM;
- return 0;
-}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- return 0;
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
- return 0;
}
-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
return 0;
}
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+
+static inline void khugepaged_min_free_kbytes_update(void)
{
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 34684b2026ab..6a3cd1bf4680 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -29,10 +29,9 @@ extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
-extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
gfp_t gfp) __ref;
extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
-extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -107,15 +106,12 @@ static inline void kmemleak_no_scan(const void *ptr)
{
}
static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
- int min_count, gfp_t gfp)
+ gfp_t gfp)
{
}
static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
}
-static inline void kmemleak_not_leak_phys(phys_addr_t phys)
-{
-}
static inline void kmemleak_ignore_phys(phys_addr_t phys)
{
}
diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h
new file mode 100644
index 000000000000..c4cae333deec
--- /dev/null
+++ b/include/linux/kmsan-checks.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN checks to be used for one-off annotations in subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#ifndef _LINUX_KMSAN_CHECKS_H
+#define _LINUX_KMSAN_CHECKS_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_poison_memory() - Mark the memory range as uninitialized.
+ * @address: address to start with.
+ * @size: size of buffer to poison.
+ * @flags: GFP flags for allocations done by this function.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * uninitialized. Error reports for this memory will reference the call site of
+ * kmsan_poison_memory() as origin.
+ */
+void kmsan_poison_memory(const void *address, size_t size, gfp_t flags);
+
+/**
+ * kmsan_unpoison_memory() - Mark the memory range as initialized.
+ * @address: address to start with.
+ * @size: size of buffer to unpoison.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * initialized.
+ */
+void kmsan_unpoison_memory(const void *address, size_t size);
+
+/**
+ * kmsan_check_memory() - Check the memory range for being initialized.
+ * @address: address to start with.
+ * @size: size of buffer to check.
+ *
+ * If any piece of the given range is marked as uninitialized, KMSAN will report
+ * an error.
+ */
+void kmsan_check_memory(const void *address, size_t size);
+
+/**
+ * kmsan_copy_to_user() - Notify KMSAN about a data transfer to userspace.
+ * @to: destination address in the userspace.
+ * @from: source address in the kernel.
+ * @to_copy: number of bytes to copy.
+ * @left: number of bytes not copied.
+ *
+ * If this is a real userspace data transfer, KMSAN checks the bytes that were
+ * actually copied to ensure there was no information leak. If @to belongs to
+ * the kernel space (which is possible for compat syscalls), KMSAN just copies
+ * the metadata.
+ */
+void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
+ size_t left);
+
+#else
+
+static inline void kmsan_poison_memory(const void *address, size_t size,
+ gfp_t flags)
+{
+}
+static inline void kmsan_unpoison_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_check_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_copy_to_user(void __user *to, const void *from,
+ size_t to_copy, size_t left)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_CHECKS_H */
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
new file mode 100644
index 000000000000..e38ae3c34618
--- /dev/null
+++ b/include/linux/kmsan.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN API for subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_H
+#define _LINUX_KMSAN_H
+
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+struct page;
+struct kmem_cache;
+struct task_struct;
+struct scatterlist;
+struct urb;
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_task_create() - Initialize KMSAN state for the task.
+ * @task: task to initialize.
+ */
+void kmsan_task_create(struct task_struct *task);
+
+/**
+ * kmsan_task_exit() - Notify KMSAN that a task has exited.
+ * @task: task about to finish.
+ */
+void kmsan_task_exit(struct task_struct *task);
+
+/**
+ * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
+ *
+ * Allocate and initialize KMSAN metadata for early allocations.
+ */
+void __init kmsan_init_shadow(void);
+
+/**
+ * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
+ */
+void __init kmsan_init_runtime(void);
+
+/**
+ * kmsan_memblock_free_pages() - handle freeing of memblock pages.
+ * @page: struct page to free.
+ * @order: order of @page.
+ *
+ * Freed pages are either returned to buddy allocator or held back to be used
+ * as metadata pages.
+ */
+bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order);
+
+/**
+ * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
+ * @page: struct page pointer returned by alloc_pages().
+ * @order: order of allocated struct page.
+ * @flags: GFP flags used by alloc_pages()
+ *
+ * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
+ * @flags contain __GFP_ZERO.
+ */
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
+
+/**
+ * kmsan_free_page() - Notify KMSAN about a free_pages() call.
+ * @page: struct page pointer passed to free_pages().
+ * @order: order of deallocated struct page.
+ *
+ * KMSAN marks freed memory as uninitialized.
+ */
+void kmsan_free_page(struct page *page, unsigned int order);
+
+/**
+ * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
+ * @dst: destination page.
+ * @src: source page.
+ *
+ * KMSAN copies the contents of metadata pages for @src into the metadata pages
+ * for @dst. If @dst has no associated metadata pages, nothing happens.
+ * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
+ */
+void kmsan_copy_page_meta(struct page *dst, struct page *src);
+
+/**
+ * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
+ * newly created object, marking it as initialized or uninitialized.
+ */
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+
+/**
+ * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ *
+ * KMSAN marks the freed object as uninitialized.
+ */
+void kmsan_slab_free(struct kmem_cache *s, void *object);
+
+/**
+ * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
+ * @ptr: object pointer.
+ * @size: object size.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Similar to kmsan_slab_alloc(), but for large allocations.
+ */
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+
+/**
+ * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
+ * @ptr: object pointer.
+ *
+ * Similar to kmsan_slab_free(), but for large allocations.
+ */
+void kmsan_kfree_large(const void *ptr);
+
+/**
+ * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
+ * @start: start of vmapped range.
+ * @end: end of vmapped range.
+ * @prot: page protection flags used for vmap.
+ * @pages: array of pages.
+ * @page_shift: page_shift passed to vmap_range_noflush().
+ *
+ * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+ * vmalloc metadata address range.
+ */
+void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift);
+
+/**
+ * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+ * @start: start of vunmapped range.
+ * @end: end of vunmapped range.
+ *
+ * KMSAN unmaps the contiguous metadata ranges created by
+ * kmsan_map_kernel_range_noflush().
+ */
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
+ * @addr: range start.
+ * @end: range end.
+ * @phys_addr: physical range start.
+ * @prot: page protection flags used for ioremap_page_range().
+ * @page_shift: page_shift argument passed to vmap_range_noflush().
+ *
+ * KMSAN creates new metadata pages for the physical pages mapped into the
+ * virtual memory.
+ */
+void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift);
+
+/**
+ * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+ * @start: range start.
+ * @end: range end.
+ *
+ * KMSAN unmaps the metadata pages for the given range and, unlike for
+ * vunmap_page_range(), also deallocates them.
+ */
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_handle_dma() - Handle a DMA data transfer.
+ * @page: first page of the buffer.
+ * @offset: offset of the buffer within the first page.
+ * @size: buffer size.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffer, if it is copied to device;
+ * * initializes the buffer, if it is copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
+ * @sg: scatterlist holding DMA buffers.
+ * @nents: number of scatterlist entries.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffers in the scatterlist, if they are copied to device;
+ * * initializes the buffers, if they are copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_urb() - Handle a USB data transfer.
+ * @urb: struct urb pointer.
+ * @is_out: data transfer direction (true means output to hardware).
+ *
+ * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
+ * KMSAN initializes the transfer buffer.
+ */
+void kmsan_handle_urb(const struct urb *urb, bool is_out);
+
+/**
+ * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
+ * @regs: struct pt_regs pointer received from assembly code.
+ *
+ * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
+ * false positive reports. Unlike kmsan_unpoison_memory(),
+ * kmsan_unpoison_entry_regs() can be called from the regions where
+ * kmsan_in_runtime() returns true, which is the case in early entry code.
+ */
+void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
+
+#else
+
+static inline void kmsan_init_shadow(void)
+{
+}
+
+static inline void kmsan_init_runtime(void)
+{
+}
+
+static inline bool kmsan_memblock_free_pages(struct page *page,
+ unsigned int order)
+{
+ return true;
+}
+
+static inline void kmsan_task_create(struct task_struct *task)
+{
+}
+
+static inline void kmsan_task_exit(struct task_struct *task)
+{
+}
+
+static inline int kmsan_alloc_page(struct page *page, unsigned int order,
+ gfp_t flags)
+{
+ return 0;
+}
+
+static inline void kmsan_free_page(struct page *page, unsigned int order)
+{
+}
+
+static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+}
+
+static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+}
+
+static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_kfree_large(const void *ptr)
+{
+}
+
+static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
+ unsigned long end,
+ pgprot_t prot,
+ struct page **pages,
+ unsigned int page_shift)
+{
+}
+
+static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_ioremap_page_range(unsigned long start,
+ unsigned long end,
+ phys_addr_t phys_addr,
+ pgprot_t prot,
+ unsigned int page_shift)
+{
+}
+
+static inline void kmsan_iounmap_page_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_handle_dma(struct page *page, size_t offset,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
+{
+}
+
+static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_H */
diff --git a/include/linux/kmsan_string.h b/include/linux/kmsan_string.h
new file mode 100644
index 000000000000..7287da6f52ef
--- /dev/null
+++ b/include/linux/kmsan_string.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN string functions API used in other headers.
+ *
+ * Copyright (C) 2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_STRING_H
+#define _LINUX_KMSAN_STRING_H
+
+/*
+ * KMSAN overrides the default memcpy/memset/memmove implementations in the
+ * kernel, which requires having __msan_XXX function prototypes in several other
+ * headers. Keep them in one place instead of open-coding.
+ */
+void *__msan_memcpy(void *dst, const void *src, size_t size);
+void *__msan_memset(void *s, int c, size_t n);
+void *__msan_memmove(void *dest, const void *src, size_t len);
+
+#endif /* _LINUX_KMSAN_STRING_H */
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
new file mode 100644
index 000000000000..8bfa6c98176d
--- /dev/null
+++ b/include/linux/kmsan_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A minimal header declaring types added by KMSAN to existing kernel structs.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_TYPES_H
+#define _LINUX_KMSAN_TYPES_H
+
+/* These constants are defined in the MSan LLVM instrumentation pass. */
+#define KMSAN_RETVAL_SIZE 800
+#define KMSAN_PARAM_SIZE 800
+
+struct kmsan_context_state {
+ char param_tls[KMSAN_PARAM_SIZE];
+ char retval_tls[KMSAN_RETVAL_SIZE];
+ char va_arg_tls[KMSAN_PARAM_SIZE];
+ char va_arg_origin_tls[KMSAN_PARAM_SIZE];
+ u64 va_arg_overflow_size_tls;
+ char param_origin_tls[KMSAN_PARAM_SIZE];
+ u32 retval_origin_tls;
+};
+
+#undef KMSAN_PARAM_SIZE
+#undef KMSAN_RETVAL_SIZE
+
+struct kmsan_ctx {
+ struct kmsan_context_state cstate;
+ int kmsan_in_runtime;
+ bool allow_reporting;
+};
+
+#endif /* _LINUX_KMSAN_TYPES_H */
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 2e7a1e032c71..906521c2329c 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -25,9 +25,18 @@ enum kmsg_dump_reason {
KMSG_DUMP_PANIC,
KMSG_DUMP_OOPS,
KMSG_DUMP_EMERG,
- KMSG_DUMP_RESTART,
- KMSG_DUMP_HALT,
- KMSG_DUMP_POWEROFF,
+ KMSG_DUMP_SHUTDOWN,
+ KMSG_DUMP_MAX
+};
+
+/**
+ * struct kmsg_dump_iter - iterator for retrieving kernel messages
+ * @cur_seq: Points to the oldest message to dump
+ * @next_seq: Points after the newest message to dump
+ */
+struct kmsg_dump_iter {
+ u64 cur_seq;
+ u64 next_seq;
};
/**
@@ -42,64 +51,43 @@ struct kmsg_dumper {
struct list_head list;
void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
enum kmsg_dump_reason max_reason;
- bool active;
bool registered;
-
- /* private state of the kmsg iterator */
- u32 cur_idx;
- u32 next_idx;
- u64 cur_seq;
- u64 next_seq;
};
#ifdef CONFIG_PRINTK
void kmsg_dump(enum kmsg_dump_reason reason);
-bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
- char *line, size_t size, size_t *len);
-
-bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
-bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
- char *buf, size_t size, size_t *len);
-
-void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
+bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
+ char *buf, size_t size, size_t *len_out);
-void kmsg_dump_rewind(struct kmsg_dumper *dumper);
+void kmsg_dump_rewind(struct kmsg_dump_iter *iter);
int kmsg_dump_register(struct kmsg_dumper *dumper);
int kmsg_dump_unregister(struct kmsg_dumper *dumper);
+
+const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason);
#else
static inline void kmsg_dump(enum kmsg_dump_reason reason)
{
}
-static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
- bool syslog, const char *line,
- size_t size, size_t *len)
-{
- return false;
-}
-
-static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+static inline bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
const char *line, size_t size, size_t *len)
{
return false;
}
-static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+static inline bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
char *buf, size_t size, size_t *len)
{
return false;
}
-static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
-{
-}
-
-static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+static inline void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
{
}
@@ -112,6 +100,11 @@ static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
{
return -EINVAL;
}
+
+static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
+{
+ return "Disabled";
+}
#endif
#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e2ca0a292e21..57fb972fea05 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -7,7 +7,7 @@
* Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2008 Novell Inc.
*
- * Please read Documentation/kobject.txt before using the kobject
+ * Please read Documentation/core-api/kobject.rst before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
*/
@@ -19,17 +19,17 @@
#include <linux/list.h>
#include <linux/sysfs.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/kobject_ns.h>
-#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
-#define UEVENT_NUM_ENVP 32 /* number of env pointers */
+#define UEVENT_NUM_ENVP 64 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
#ifdef CONFIG_UEVENT_HELPER
@@ -59,7 +59,6 @@ enum kobject_action {
KOBJ_OFFLINE,
KOBJ_BIND,
KOBJ_UNBIND,
- KOBJ_MAX
};
struct kobject {
@@ -67,7 +66,7 @@ struct kobject {
struct list_head entry;
struct kobject *parent;
struct kset *kset;
- struct kobj_type *ktype;
+ const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
@@ -91,18 +90,17 @@ static inline const char *kobject_name(const struct kobject *kobj)
return kobj->name;
}
-extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
+extern void kobject_init(struct kobject *kobj, const struct kobj_type *ktype);
extern __printf(3, 4) __must_check
int kobject_add(struct kobject *kobj, struct kobject *parent,
const char *fmt, ...);
extern __printf(4, 5) __must_check
int kobject_init_and_add(struct kobject *kobj,
- struct kobj_type *ktype, struct kobject *parent,
+ const struct kobj_type *ktype, struct kobject *parent,
const char *fmt, ...);
extern void kobject_del(struct kobject *kobj);
-extern struct kobject * __must_check kobject_create(void);
extern struct kobject * __must_check kobject_create_and_add(const char *name,
struct kobject *parent);
@@ -119,27 +117,9 @@ extern void kobject_get_ownership(struct kobject *kobj,
kuid_t *uid, kgid_t *gid);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
-/**
- * kobject_has_children - Returns whether a kobject has children.
- * @kobj: the object to test
- *
- * This will return whether a kobject has other kobjects as children.
- *
- * It does NOT account for the presence of attribute files, only sub
- * directories. It also assumes there is no concurrent addition or
- * removal of such children, and thus relies on external locking.
- */
-static inline bool kobject_has_children(struct kobject *kobj)
-{
- WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
-
- return kobj->sd && kobj->sd->dir.subdirs;
-}
-
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs; /* use default_groups instead */
const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
@@ -155,10 +135,9 @@ struct kobj_uevent_env {
};
struct kset_uevent_ops {
- int (* const filter)(struct kset *kset, struct kobject *kobj);
- const char *(* const name)(struct kset *kset, struct kobject *kobj);
- int (* const uevent)(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env);
+ int (* const filter)(struct kobject *kobj);
+ const char *(* const name)(struct kobject *kobj);
+ int (* const uevent)(struct kobject *kobj, struct kobj_uevent_env *env);
};
struct kobj_attribute {
@@ -219,7 +198,7 @@ static inline void kset_put(struct kset *k)
kobject_put(&k->kobj);
}
-static inline struct kobj_type *get_ktype(struct kobject *kobj)
+static inline const struct kobj_type *get_ktype(struct kobject *kobj)
{
return kobj->ktype;
}
diff --git a/include/linux/kobject_api.h b/include/linux/kobject_api.h
new file mode 100644
index 000000000000..6e36a054c2d6
--- /dev/null
+++ b/include/linux/kobject_api.h
@@ -0,0 +1 @@
+#include <linux/kobject.h>
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index 069aa2ebef90..2b5b64256cf4 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -8,7 +8,7 @@
*
* Split from kobject.h by David Howells (dhowells@redhat.com)
*
- * Please read Documentation/kobject.txt before using the kobject
+ * Please read Documentation/core-api/kobject.rst before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
*/
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 04bdaf01112c..a0b92be98984 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -3,7 +3,6 @@
#define _LINUX_KPROBES_H
/*
* Kernel Probes (KProbes)
- * include/linux/kprobes.h
*
* Copyright (C) IBM Corporation, 2002, 2004
*
@@ -27,6 +26,9 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
+#include <linux/refcount.h>
+#include <linux/freelist.h>
+#include <linux/rethook.h>
#include <asm/kprobes.h>
#ifdef CONFIG_KPROBES
@@ -37,7 +39,7 @@
#define KPROBE_REENTER 0x00000004
#define KPROBE_HIT_SSDONE 0x00000008
-#else /* CONFIG_KPROBES */
+#else /* !CONFIG_KPROBES */
#include <asm-generic/kprobes.h>
typedef int kprobe_opcode_t;
struct arch_specific_insn {
@@ -52,8 +54,6 @@ struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
-typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
- int trapnr);
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
struct pt_regs *);
@@ -81,12 +81,6 @@ struct kprobe {
/* Called after addr is executed, unless... */
kprobe_post_handler_t post_handler;
- /*
- * ... called if executing addr causes a fault (eg. page fault).
- * Return 1 if it handled fault, otherwise kernel will see it.
- */
- kprobe_fault_handler_t fault_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
@@ -109,27 +103,28 @@ struct kprobe {
* this flag is only for optimized_kprobe.
*/
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
+#define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */
/* Has this kprobe gone ? */
-static inline int kprobe_gone(struct kprobe *p)
+static inline bool kprobe_gone(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_GONE;
}
/* Is this kprobe disabled ? */
-static inline int kprobe_disabled(struct kprobe *p)
+static inline bool kprobe_disabled(struct kprobe *p)
{
return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
}
/* Is this kprobe really running optimized path ? */
-static inline int kprobe_optimized(struct kprobe *p)
+static inline bool kprobe_optimized(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_OPTIMIZED;
}
/* Is this kprobe uses ftrace ? */
-static inline int kprobe_ftrace(struct kprobe *p)
+static inline bool kprobe_ftrace(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_FTRACE;
}
@@ -144,6 +139,11 @@ static inline int kprobe_ftrace(struct kprobe *p)
* ignored, due to maxactive being too low.
*
*/
+struct kretprobe_holder {
+ struct kretprobe *rp;
+ refcount_t ref;
+};
+
struct kretprobe {
struct kprobe kp;
kretprobe_handler_t handler;
@@ -151,17 +151,30 @@ struct kretprobe {
int maxactive;
int nmissed;
size_t data_size;
- struct hlist_head free_instances;
- raw_spinlock_t lock;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook *rh;
+#else
+ struct freelist_head freelist;
+ struct kretprobe_holder *rph;
+#endif
};
+#define KRETPROBE_MAX_DATA_SIZE 4096
+
struct kretprobe_instance {
- struct hlist_node hlist;
- struct kretprobe *rp;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook_node node;
+#else
+ union {
+ struct freelist_node freelist;
+ struct rcu_head rcu;
+ };
+ struct llist_node llist;
+ struct kretprobe_holder *rph;
kprobe_opcode_t *ret_addr;
- struct task_struct *task;
void *fp;
- char data[0];
+#endif
+ char data[];
};
struct kretprobe_blackpoint {
@@ -179,19 +192,77 @@ struct kprobe_blacklist_entry {
DECLARE_PER_CPU(struct kprobe *, current_kprobe);
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+extern void kprobe_busy_begin(void);
+extern void kprobe_busy_end(void);
+
+#ifdef CONFIG_KRETPROBES
+/* Check whether @p is used for implementing a trampoline. */
+extern int arch_trampoline_kprobe(struct kprobe *p);
+
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+ "Kretprobe is accessed from instance under preemptive context");
+
+ return (struct kretprobe *)READ_ONCE(ri->node.rethook->data);
+}
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+ return ri->node.ret_addr;
+}
+#else
+extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs);
+void arch_kretprobe_fixup_return(struct pt_regs *regs,
+ kprobe_opcode_t *correct_ret_addr);
+
+void __kretprobe_trampoline(void);
/*
- * For #ifdef avoidance:
+ * Since some architecture uses structured function pointer,
+ * use dereference_function_descriptor() to get real function address.
*/
-static inline int kprobes_built_in(void)
+static nokprobe_inline void *kretprobe_trampoline_addr(void)
{
- return 1;
+ return dereference_kernel_function_descriptor(__kretprobe_trampoline);
}
-#ifdef CONFIG_KRETPROBES
-extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs);
-extern int arch_trampoline_kprobe(struct kprobe *p);
-#else /* CONFIG_KRETPROBES */
+/* If the trampoline handler called from a kprobe, use this version */
+unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
+ void *frame_pointer);
+
+static nokprobe_inline
+unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
+ void *frame_pointer)
+{
+ unsigned long ret;
+ /*
+ * Set a dummy kprobe for avoiding kretprobe recursion.
+ * Since kretprobe never runs in kprobe handler, no kprobe must
+ * be running at this point.
+ */
+ kprobe_busy_begin();
+ ret = __kretprobe_trampoline_handler(regs, frame_pointer);
+ kprobe_busy_end();
+
+ return ret;
+}
+
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+ "Kretprobe is accessed from instance under preemptive context");
+
+ return READ_ONCE(ri->rph->rp);
+}
+
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+ return (unsigned long)ri->ret_addr;
+}
+#endif /* CONFIG_KRETPROBE_ON_RETHOOK */
+
+#else /* !CONFIG_KRETPROBES */
static inline void arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -202,21 +273,15 @@ static inline int arch_trampoline_kprobe(struct kprobe *p)
}
#endif /* CONFIG_KRETPROBES */
-extern struct kretprobe_blackpoint kretprobe_blacklist[];
+/* Markers of '_kprobe_blacklist' section */
+extern unsigned long __start_kprobe_blacklist[];
+extern unsigned long __stop_kprobe_blacklist[];
-static inline void kretprobe_assert(struct kretprobe_instance *ri,
- unsigned long orig_ret_address, unsigned long trampoline_address)
-{
- if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
- printk("kretprobe BUG!: Processing kretprobe %p @ %p\n",
- ri->rp, ri->rp->kp.addr);
- BUG();
- }
-}
+extern struct kretprobe_blackpoint kretprobe_blacklist[];
#ifdef CONFIG_KPROBES_SANITY_TEST
extern int init_test_probes(void);
-#else
+#else /* !CONFIG_KPROBES_SANITY_TEST */
static inline int init_test_probes(void)
{
return 0;
@@ -227,12 +292,10 @@ extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void);
-extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void);
-extern bool arch_kprobe_on_func_entry(unsigned long offset);
-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
+extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
extern int kprobe_add_ksym_blacklist(unsigned long entry);
@@ -242,6 +305,7 @@ struct kprobe_insn_cache {
struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */
void (*free)(void *); /* free insn page */
+ const char *sym; /* symbol for insn pages */
struct list_head pages; /* list of kprobe_insn_page */
size_t insn_size; /* size of instruction slot */
int nr_garbage;
@@ -272,7 +336,11 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
}
-#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
+#define KPROBE_INSN_PAGE_SYM "kprobe_insn_page"
+#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
+int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
+ unsigned long *value, char *type, char *sym);
+#else /* !__ARCH_WANT_KPROBES_INSN_SLOT */
#define DEFINE_INSN_CACHE_OPS(__name) \
static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
@@ -303,41 +371,35 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr);
+ kprobe_opcode_t *addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
DEFINE_INSN_CACHE_OPS(optinsn);
-#ifdef CONFIG_SYSCTL
-extern int sysctl_kprobes_optimization;
-extern int proc_kprobes_optimization_handler(struct ctl_table *table,
- int write, void __user *buffer,
- size_t *length, loff_t *ppos);
-#endif
extern void wait_for_kprobe_optimizer(void);
-#else
+#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
+
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs);
+ struct ftrace_ops *ops, struct ftrace_regs *fregs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
-#endif
-
-int arch_check_ftrace_location(struct kprobe *p);
+#else
+static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_KPROBES_ON_FTRACE */
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
-void kretprobe_hash_lock(struct task_struct *tsk,
- struct hlist_head **head, unsigned long *flags);
-void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags);
-struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
/* kprobe_running() will just return the current_kprobe on this CPU */
static inline struct kprobe *kprobe_running(void)
{
- return (__this_cpu_read(current_kprobe));
+ return __this_cpu_read(current_kprobe);
}
static inline void reset_current_kprobe(void)
@@ -351,19 +413,25 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
}
kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
+kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry);
+
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
void unregister_kretprobe(struct kretprobe *rp);
int register_kretprobes(struct kretprobe **rps, int num);
void unregister_kretprobes(struct kretprobe **rps, int num);
+#if defined(CONFIG_KRETPROBE_ON_RETHOOK) || !defined(CONFIG_KRETPROBES)
+#define kprobe_flush_task(tk) do {} while (0)
+#else
void kprobe_flush_task(struct task_struct *tk);
-void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
+#endif
+
+void kprobe_free_init_mem(void);
int disable_kprobe(struct kprobe *kp);
int enable_kprobe(struct kprobe *kp);
@@ -371,14 +439,17 @@ int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
void *alloc_insn_page(void);
-void free_insn_page(void *page);
+void *alloc_optinsn_page(void);
+void free_optinsn_page(void *page);
+
+int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *sym);
+
+int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
+ char *type, char *sym);
#else /* !CONFIG_KPROBES: */
-static inline int kprobes_built_in(void)
-{
- return 0;
-}
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
return 0;
@@ -391,13 +462,16 @@ static inline struct kprobe *kprobe_running(void)
{
return NULL;
}
+#define kprobe_busy_begin() do {} while (0)
+#define kprobe_busy_end() do {} while (0)
+
static inline int register_kprobe(struct kprobe *p)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kprobes(struct kprobe **kps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kprobe(struct kprobe *p)
{
@@ -407,11 +481,11 @@ static inline void unregister_kprobes(struct kprobe **kps, int num)
}
static inline int register_kretprobe(struct kretprobe *rp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kretprobes(struct kretprobe **rps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kretprobe(struct kretprobe *rp)
{
@@ -422,20 +496,29 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
static inline void kprobe_flush_task(struct task_struct *tk)
{
}
+static inline void kprobe_free_init_mem(void)
+{
+}
static inline int disable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int enable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline bool within_kprobe_blacklist(unsigned long addr)
{
return true;
}
+static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *sym)
+{
+ return -ERANGE;
+}
#endif /* CONFIG_KPROBES */
+
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -450,19 +533,56 @@ static inline bool is_kprobe_insn_slot(unsigned long addr)
{
return false;
}
-#endif
+#endif /* !CONFIG_KPROBES */
+
#ifndef CONFIG_OPTPROBES
static inline bool is_kprobe_optinsn_slot(unsigned long addr)
{
return false;
}
+#endif /* !CONFIG_OPTPROBES */
+
+#ifdef CONFIG_KRETPROBES
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return is_rethook_trampoline(addr);
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return rethook_find_ret_addr(tsk, (unsigned long)fp, cur);
+}
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return (void *)addr == kretprobe_trampoline_addr();
+}
+
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur);
+#endif
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return false;
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return 0;
+}
#endif
/* Returns true if kprobes handled the fault */
static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
unsigned int trap)
{
- if (!kprobes_built_in())
+ if (!IS_ENABLED(CONFIG_KPROBES))
return false;
if (user_mode(regs))
return false;
diff --git a/include/linux/kref_api.h b/include/linux/kref_api.h
new file mode 100644
index 000000000000..d67e554721d2
--- /dev/null
+++ b/include/linux/kref_api.h
@@ -0,0 +1 @@
+#include <linux/kref.h>
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h
index 0738389b42b6..1a37a664f915 100644
--- a/include/linux/ks0108.h
+++ b/include/linux/ks0108.h
@@ -4,7 +4,7 @@
* Version: 0.1.0
* Description: ks0108 LCD Controller driver header
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index e48b1e453ff5..7e232ba59b86 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -15,9 +15,6 @@
#include <linux/sched.h>
#include <linux/sched/coredump.h>
-struct stable_node;
-struct mem_cgroup;
-
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
@@ -51,10 +48,8 @@ static inline void ksm_exit(struct mm_struct *mm)
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
-void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
-void ksm_migrate_page(struct page *newpage, struct page *oldpage);
-bool reuse_ksm_page(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
+void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
+void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
#else /* !CONFIG_KSM */
@@ -80,18 +75,13 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
return page;
}
-static inline void rmap_walk_ksm(struct page *page,
+static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}
-static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
-{
-}
-static inline bool reuse_ksm_page(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
{
- return false;
}
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
new file mode 100644
index 000000000000..529974e22ea7
--- /dev/null
+++ b/include/linux/kstrtox.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KSTRTOX_H
+#define _LINUX_KSTRTOX_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/* Internal, do not use. */
+int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
+int __must_check _kstrtol(const char *s, unsigned int base, long *res);
+
+int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
+
+/**
+ * kstrtoul - convert a string to an unsigned long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtoul(). Return code must be checked.
+*/
+static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
+ */
+ if (sizeof(unsigned long) == sizeof(unsigned long long) &&
+ __alignof__(unsigned long) == __alignof__(unsigned long long))
+ return kstrtoull(s, base, (unsigned long long *)res);
+ else
+ return _kstrtoul(s, base, res);
+}
+
+/**
+ * kstrtol - convert a string to a long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtol(). Return code must be checked.
+ */
+static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(long, long long) = 0.
+ */
+ if (sizeof(long) == sizeof(long long) &&
+ __alignof__(long) == __alignof__(long long))
+ return kstrtoll(s, base, (long long *)res);
+ else
+ return _kstrtol(s, base, res);
+}
+
+int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
+int __must_check kstrtoint(const char *s, unsigned int base, int *res);
+
+static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
+{
+ return kstrtoull(s, base, res);
+}
+
+static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
+{
+ return kstrtoll(s, base, res);
+}
+
+static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
+{
+ return kstrtouint(s, base, res);
+}
+
+static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
+{
+ return kstrtoint(s, base, res);
+}
+
+int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
+int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
+int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
+int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
+int __must_check kstrtobool(const char *s, bool *res);
+
+int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
+int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
+int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
+int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
+int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
+int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
+int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
+int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
+int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
+int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
+
+static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
+{
+ return kstrtoull_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
+{
+ return kstrtoll_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
+{
+ return kstrtouint_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
+{
+ return kstrtoint_from_user(s, count, base, res);
+}
+
+/*
+ * Use kstrto<foo> instead.
+ *
+ * NOTE: simple_strto<foo> does not check for the range overflow and,
+ * depending on the input, may give interesting results.
+ *
+ * Use these functions if and only if you cannot use kstrto<foo>, because
+ * the conversion ends on the first non-digit character, which may be far
+ * beyond the supported range. It might be useful to parse the strings like
+ * 10x50 or 12:21 without altering original string or temporary buffer in use.
+ * Keep in mind above caveat.
+ */
+
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+
+static inline int strtobool(const char *s, bool *res)
+{
+ return kstrtobool(s, res);
+}
+
+#endif /* _LINUX_KSTRTOX_H */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 0f9da966934e..30e5bec81d2b 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -5,6 +5,8 @@
#include <linux/err.h>
#include <linux/sched.h>
+struct mm_struct;
+
__printf(4, 5)
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data,
@@ -16,7 +18,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
* @threadfn: the function to run in the thread
* @data: data pointer for @threadfn()
* @namefmt: printf-style format string for the thread name
- * @arg...: arguments for @namefmt.
+ * @arg: arguments for @namefmt.
*
* This macro will create a kthread on the current node, leaving it in
* the stopped state. This is just a helper for kthread_create_on_node();
@@ -31,6 +33,12 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);
+void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk);
+bool set_kthread_struct(struct task_struct *p);
+
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
/**
* kthread_run - create and wake a thread.
* @threadfn: the function to run until signal_pending(current).
@@ -49,6 +57,31 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
__k; \
})
+/**
+ * kthread_run_on_cpu - create and wake a cpu bound thread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @cpu: The cpu on which the thread should be bound,
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_on_cpu()
+ * followed by wake_up_process(). Returns the kthread or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct task_struct *
+kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
+ unsigned int cpu, const char *namefmt)
+{
+ struct task_struct *p;
+
+ p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
+ if (!IS_ERR(p))
+ wake_up_process(p);
+
+ return p;
+}
+
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
@@ -57,11 +90,14 @@ bool kthread_should_stop(void);
bool kthread_should_park(void);
bool __kthread_should_park(struct task_struct *k);
bool kthread_freezable_should_stop(bool *was_frozen);
+void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
+void kthread_exit(long result) __noreturn;
+void kthread_complete_and_exit(struct completion *, long) __noreturn;
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
@@ -105,12 +141,6 @@ struct kthread_delayed_work {
struct timer_list timer;
};
-#define KTHREAD_WORKER_INIT(worker) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
- .work_list = LIST_HEAD_INIT((worker).work_list), \
- .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
- }
-
#define KTHREAD_WORK_INIT(work, fn) { \
.node = LIST_HEAD_INIT((work).node), \
.func = (fn), \
@@ -122,9 +152,6 @@ struct kthread_delayed_work {
TIMER_IRQSAFE), \
}
-#define DEFINE_KTHREAD_WORKER(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
-
#define DEFINE_KTHREAD_WORK(work, fn) \
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
@@ -132,19 +159,6 @@ struct kthread_delayed_work {
struct kthread_delayed_work dwork = \
KTHREAD_DELAYED_WORK_INIT(dwork, fn)
-/*
- * kthread_worker.lock needs its own lockdep class key when defined on
- * stack with lockdep enabled. Use the following macros in such cases.
- */
-#ifdef CONFIG_LOCKDEP
-# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
- ({ kthread_init_worker(&worker); worker; })
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
-#else
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
-#endif
-
extern void __kthread_init_worker(struct kthread_worker *worker,
const char *name, struct lock_class_key *key);
@@ -165,7 +179,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
do { \
kthread_init_work(&(dwork)->work, (fn)); \
timer_setup(&(dwork)->timer, \
- kthread_delayed_work_timer_fn, 0); \
+ kthread_delayed_work_timer_fn, \
+ TIMER_IRQSAFE); \
} while (0)
int kthread_worker_fn(void *worker_ptr);
@@ -197,6 +212,9 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
void kthread_destroy_worker(struct kthread_worker *worker);
+void kthread_use_mm(struct mm_struct *mm);
+void kthread_unuse_mm(struct mm_struct *mm);
+
struct cgroup_subsys_state;
#ifdef CONFIG_BLK_CGROUP
@@ -204,9 +222,5 @@ void kthread_associate_blkcg(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *kthread_blkcg(void);
#else
static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
-static inline struct cgroup_subsys_state *kthread_blkcg(void)
-{
- return NULL;
-}
#endif
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index d1fb05135665..73f20deb497d 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -23,6 +23,7 @@
#include <linux/time.h>
#include <linux/jiffies.h>
+#include <asm/bug.h>
/* Nanosecond scalar representation for kernel time values */
typedef s64 ktime_t;
@@ -216,14 +217,7 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
}
}
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-#define LOW_RES_NSEC TICK_NSEC
-#define KTIME_LOW_RES (LOW_RES_NSEC)
+#include <vdso/ktime.h>
static inline ktime_t ns_to_ktime(u64 ns)
{
@@ -236,6 +230,5 @@ static inline ktime_t ms_to_ktime(u64 ms)
}
# include <linux/timekeeping.h>
-# include <linux/timekeeping32.h>
#endif
diff --git a/include/linux/ktime_api.h b/include/linux/ktime_api.h
new file mode 100644
index 000000000000..f697d493960f
--- /dev/null
+++ b/include/linux/ktime_api.h
@@ -0,0 +1 @@
+#include <linux/ktime.h>
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
new file mode 100644
index 000000000000..906f899813dc
--- /dev/null
+++ b/include/linux/kvm_dirty_ring.h
@@ -0,0 +1,97 @@
+#ifndef KVM_DIRTY_RING_H
+#define KVM_DIRTY_RING_H
+
+#include <linux/kvm.h>
+
+/**
+ * kvm_dirty_ring: KVM internal dirty ring structure
+ *
+ * @dirty_index: free running counter that points to the next slot in
+ * dirty_ring->dirty_gfns, where a new dirty page should go
+ * @reset_index: free running counter that points to the next dirty page
+ * in dirty_ring->dirty_gfns for which dirty trap needs to
+ * be reenabled
+ * @size: size of the compact list, dirty_ring->dirty_gfns
+ * @soft_limit: when the number of dirty pages in the list reaches this
+ * limit, vcpu that owns this ring should exit to userspace
+ * to allow userspace to harvest all the dirty pages
+ * @dirty_gfns: the array to keep the dirty gfns
+ * @index: index of this dirty ring
+ */
+struct kvm_dirty_ring {
+ u32 dirty_index;
+ u32 reset_index;
+ u32 size;
+ u32 soft_limit;
+ struct kvm_dirty_gfn *dirty_gfns;
+ int index;
+};
+
+#ifndef CONFIG_HAVE_KVM_DIRTY_RING
+/*
+ * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
+ * not be included as well, so define these nop functions for the arch.
+ */
+static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
+{
+ return 0;
+}
+
+static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
+ int index, u32 size)
+{
+ return 0;
+}
+
+static inline int kvm_dirty_ring_reset(struct kvm *kvm,
+ struct kvm_dirty_ring *ring)
+{
+ return 0;
+}
+
+static inline void kvm_dirty_ring_push(struct kvm_dirty_ring *ring,
+ u32 slot, u64 offset)
+{
+}
+
+static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring,
+ u32 offset)
+{
+ return NULL;
+}
+
+static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
+{
+}
+
+static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
+{
+ return true;
+}
+
+#else /* CONFIG_HAVE_KVM_DIRTY_RING */
+
+u32 kvm_dirty_ring_get_rsvd_entries(void);
+int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
+
+/*
+ * called with kvm->slots_lock held, returns the number of
+ * processed pages.
+ */
+int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
+
+/*
+ * returns =0: successfully pushed
+ * <0: unable to push, need to wait
+ */
+void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset);
+
+/* for use in vm_operations_struct */
+struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
+
+void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
+bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring);
+
+#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
+
+#endif /* KVM_DIRTY_RING_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bcb9b2ac0791..18592bdf4c1b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -10,7 +10,9 @@
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/sched/stat.h>
#include <linux/bug.h>
+#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/preempt.h>
@@ -23,9 +25,16 @@
#include <linux/irqflags.h>
#include <linux/context_tracking.h>
#include <linux/irqbypass.h>
-#include <linux/swait.h>
+#include <linux/rcuwait.h>
#include <linux/refcount.h>
#include <linux/nospec.h>
+#include <linux/notifier.h>
+#include <linux/ftrace.h>
+#include <linux/hashtable.h>
+#include <linux/instrumentation.h>
+#include <linux/interval_tree.h>
+#include <linux/rbtree.h>
+#include <linux/xarray.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -34,9 +43,10 @@
#include <linux/kvm_types.h>
#include <asm/kvm_host.h>
+#include <linux/kvm_dirty_ring.h>
-#ifndef KVM_MAX_VCPU_ID
-#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#ifndef KVM_MAX_VCPU_IDS
+#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
#endif
/*
@@ -138,22 +148,40 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_MASK GENMASK(7,0)
#define KVM_REQUEST_NO_WAKEUP BIT(8)
#define KVM_REQUEST_WAIT BIT(9)
+#define KVM_REQUEST_NO_ACTION BIT(10)
/*
* Architecture-independent vcpu->requests bit members
- * Bits 4-7 are reserved for more arch-independent bits.
+ * Bits 3-7 are reserved for more arch-independent bits.
*/
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_PENDING_TIMER 2
-#define KVM_REQ_UNHALT 3
+#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_UNBLOCK 2
#define KVM_REQUEST_ARCH_BASE 8
+/*
+ * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
+ * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
+ * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
+ * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
+ * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
+ * guarantee the vCPU received an IPI and has actually exited guest mode.
+ */
+#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
})
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
+bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap);
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
+ struct kvm_vcpu *except);
+bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap);
+
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -190,8 +218,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev);
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr);
@@ -206,15 +234,30 @@ struct kvm_async_pf {
unsigned long addr;
struct kvm_arch_async_pf arch;
bool wakeup_all;
+ bool notpresent_injected;
};
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- unsigned long hva, struct kvm_arch_async_pf *arch);
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ unsigned long hva, struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+struct kvm_gfn_range {
+ struct kvm_memory_slot *slot;
+ gfn_t start;
+ gfn_t end;
+ pte_t pte;
+ bool may_block;
+};
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+#endif
+
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
@@ -248,6 +291,11 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
return !!map->hva;
}
+static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
+{
+ return single_task_running() && !need_resched() && ktime_before(cur, stop);
+}
+
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@@ -266,22 +314,23 @@ struct kvm_vcpu {
int cpu;
int vcpu_id; /* id given by userspace at creation */
int vcpu_idx; /* index in kvm->vcpus array */
- int srcu_idx;
+ int ____srcu_idx; /* Don't use this directly. You've been warned. */
+#ifdef CONFIG_PROVE_RCU
+ int srcu_depth;
+#endif
int mode;
u64 requests;
unsigned long guest_debug;
- int pre_pcpu;
- struct list_head blocked_vcpu_list;
-
struct mutex mutex;
struct kvm_run *run;
- struct swait_queue_head wq;
+#ifndef __KVM_HAVE_ARCH_WQP
+ struct rcuwait wait;
+#endif
struct pid __rcu *pid;
int sigset_active;
sigset_t sigset;
- struct kvm_vcpu_stat stat;
unsigned int halt_poll_ns;
bool valid_wakeup;
@@ -318,9 +367,169 @@ struct kvm_vcpu {
bool preempted;
bool ready;
struct kvm_vcpu_arch arch;
- struct dentry *debugfs_dentry;
+ struct kvm_vcpu_stat stat;
+ char stats_id[KVM_STATS_NAME_SIZE];
+ struct kvm_dirty_ring dirty_ring;
+
+ /*
+ * The most recently used memslot by this vCPU and the slots generation
+ * for which it is valid.
+ * No wraparound protection is needed since generations won't overflow in
+ * thousands of years, even assuming 1M memslot operations per second.
+ */
+ struct kvm_memory_slot *last_used_slot;
+ u64 last_used_slot_gen;
};
+/*
+ * Start accounting time towards a guest.
+ * Must be called before entering guest context.
+ */
+static __always_inline void guest_timing_enter_irqoff(void)
+{
+ /*
+ * This is running in ioctl context so its safe to assume that it's the
+ * stime pending cputime to flush.
+ */
+ instrumentation_begin();
+ vtime_account_guest_enter();
+ instrumentation_end();
+}
+
+/*
+ * Enter guest context and enter an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_enter_irqoff(void)
+{
+ /*
+ * KVM does not hold any references to rcu protected data when it
+ * switches CPU into a guest mode. In fact switching to a guest mode
+ * is very similar to exiting to userspace from rcu point of view. In
+ * addition CPU may stay in a guest mode for quite a long time (up to
+ * one time slice). Lets treat guest mode as quiescent state, just like
+ * we do with user-mode execution.
+ */
+ if (!context_tracking_guest_enter()) {
+ instrumentation_begin();
+ rcu_virt_note_context_switch(smp_processor_id());
+ instrumentation_end();
+ }
+}
+
+/*
+ * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
+ * guest_state_enter_irqoff().
+ */
+static __always_inline void guest_enter_irqoff(void)
+{
+ guest_timing_enter_irqoff();
+ guest_context_enter_irqoff();
+}
+
+/**
+ * guest_state_enter_irqoff - Fixup state when entering a guest
+ *
+ * Entry to a guest will enable interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code before entering a guest.
+ * Must be called with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_enter_irqoff() before this.
+ *
+ * Note: this is analogous to exit_to_user_mode().
+ */
+static __always_inline void guest_state_enter_irqoff(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ guest_context_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/*
+ * Exit guest context and exit an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_exit_irqoff(void)
+{
+ context_tracking_guest_exit();
+}
+
+/*
+ * Stop accounting time towards a guest.
+ * Must be called after exiting guest context.
+ */
+static __always_inline void guest_timing_exit_irqoff(void)
+{
+ instrumentation_begin();
+ /* Flush the guest cputime we spent on the guest */
+ vtime_account_guest_exit();
+ instrumentation_end();
+}
+
+/*
+ * Deprecated. Architectures should move to guest_state_exit_irqoff() and
+ * guest_timing_exit_irqoff().
+ */
+static __always_inline void guest_exit_irqoff(void)
+{
+ guest_context_exit_irqoff();
+ guest_timing_exit_irqoff();
+}
+
+static inline void guest_exit(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_exit_irqoff();
+ local_irq_restore(flags);
+}
+
+/**
+ * guest_state_exit_irqoff - Establish state when returning from guest mode
+ *
+ * Entry from a guest disables interrupts, but guest mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific code after exiting a guest.
+ * Must be invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_exit_irqoff() after this.
+ *
+ * Note: this is analogous to enter_from_user_mode().
+ */
+static __always_inline void guest_state_exit_irqoff(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_context_exit_irqoff();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
@@ -338,7 +547,26 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
*/
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
+/*
+ * Since at idle each memslot belongs to two memslot sets it has to contain
+ * two embedded nodes for each data structure that it forms a part of.
+ *
+ * Two memslot sets (one active and one inactive) are necessary so the VM
+ * continues to run on one memslot set while the other is being modified.
+ *
+ * These two memslot sets normally point to the same set of memslots.
+ * They can, however, be desynchronized when performing a memslot management
+ * operation by replacing the memslot to be modified by its copy.
+ * After the operation is complete, both memslot sets once again point to
+ * the same, common set of memslot data.
+ *
+ * The memslots themselves are independent of each other so they can be
+ * individually added or deleted.
+ */
struct kvm_memory_slot {
+ struct hlist_node id_node[2];
+ struct interval_tree_node hva_node[2];
+ struct rb_node gfn_node[2];
gfn_t base_gfn;
unsigned long npages;
unsigned long *dirty_bitmap;
@@ -346,8 +574,14 @@ struct kvm_memory_slot {
unsigned long userspace_addr;
u32 flags;
short id;
+ u16 as_id;
};
+static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
+{
+ return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
+}
+
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
@@ -360,6 +594,10 @@ static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *mem
return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
}
+#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
+#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
+#endif
+
struct kvm_s390_adapter_int {
u64 ind_addr;
u64 summary_addr;
@@ -373,6 +611,13 @@ struct kvm_hv_sint {
u32 sint;
};
+struct kvm_xen_evtchn {
+ u32 port;
+ u32 vcpu_id;
+ int vcpu_idx;
+ u32 priority;
+};
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
@@ -393,6 +638,7 @@ struct kvm_kernel_irq_routing_entry {
} msi;
struct kvm_s390_adapter_int adapter;
struct kvm_hv_sint hv_sint;
+ struct kvm_xen_evtchn xen_evtchn;
};
struct hlist_node link;
};
@@ -405,17 +651,16 @@ struct kvm_irq_routing_table {
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
- struct hlist_head map[0];
+ struct hlist_head map[];
};
#endif
-#ifndef KVM_PRIVATE_MEM_SLOTS
-#define KVM_PRIVATE_MEM_SLOTS 0
+#ifndef KVM_INTERNAL_MEM_SLOTS
+#define KVM_INTERNAL_MEM_SLOTS 0
#endif
-#ifndef KVM_MEM_SLOTS_NUM
-#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
-#endif
+#define KVM_MEM_SLOTS_NUM SHRT_MAX
+#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
@@ -424,26 +669,56 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
}
#endif
-/*
- * Note:
- * memslots are not sorted by id anymore, please use id_to_memslot()
- * to get the memslot by its id.
- */
struct kvm_memslots {
u64 generation;
- struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
- /* The mapping table from slot id to the index in memslots[]. */
- short id_to_index[KVM_MEM_SLOTS_NUM];
- atomic_t lru_slot;
- int used_slots;
+ atomic_long_t last_used_slot;
+ struct rb_root_cached hva_tree;
+ struct rb_root gfn_tree;
+ /*
+ * The mapping table from slot id to memslot.
+ *
+ * 7-bit bucket count matches the size of the old id to index array for
+ * 512 slots, while giving good performance with this slot count.
+ * Higher bucket counts bring only small performance improvements but
+ * always result in higher memory usage (even for lower memslot counts).
+ */
+ DECLARE_HASHTABLE(id_hash, 7);
+ int node_idx;
};
struct kvm {
+#ifdef KVM_HAVE_MMU_RWLOCK
+ rwlock_t mmu_lock;
+#else
spinlock_t mmu_lock;
+#endif /* KVM_HAVE_MMU_RWLOCK */
+
struct mutex slots_lock;
+
+ /*
+ * Protects the arch-specific fields of struct kvm_memory_slots in
+ * use by the VM. To be used under the slots_lock (above) or in a
+ * kvm->srcu critical section where acquiring the slots_lock would
+ * lead to deadlock with the synchronize_srcu in
+ * install_new_memslots.
+ */
+ struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
+ unsigned long nr_memslot_pages;
+ /* The two memslot sets - active and inactive (per address space) */
+ struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
+ /* The current active memslot set for each address space */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
- struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ struct xarray vcpu_array;
+
+ /* Used to wait for completion of MMU notifiers. */
+ spinlock_t mn_invalidate_lock;
+ unsigned long mn_active_invalidate_count;
+ struct rcuwait mn_memslots_update_rcuwait;
+
+ /* For management / invalidation of gfn_to_pfn_caches */
+ spinlock_t gpc_lock;
+ struct list_head gpc_list;
/*
* created_vcpus is protected by kvm->lock, and is incremented
@@ -452,6 +727,7 @@ struct kvm {
* and is accessed atomically.
*/
atomic_t online_vcpus;
+ int max_vcpus;
int created_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
@@ -488,17 +764,27 @@ struct kvm {
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
struct mmu_notifier mmu_notifier;
- unsigned long mmu_notifier_seq;
- long mmu_notifier_count;
+ unsigned long mmu_invalidate_seq;
+ long mmu_invalidate_in_progress;
+ unsigned long mmu_invalidate_range_start;
+ unsigned long mmu_invalidate_range_end;
#endif
- long tlbs_dirty;
struct list_head devices;
- bool manual_dirty_log_protect;
+ u64 manual_dirty_log_protect;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
pid_t userspace_pid;
+ unsigned int max_halt_poll_ns;
+ u32 dirty_ring_size;
+ bool vm_bugged;
+ bool vm_dead;
+
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+ struct notifier_block pm_notifier;
+#endif
+ char stats_id[KVM_STATS_NAME_SIZE];
};
#define kvm_err(fmt, ...) \
@@ -527,6 +813,61 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline void kvm_vm_dead(struct kvm *kvm)
+{
+ kvm->vm_dead = true;
+ kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
+}
+
+static inline void kvm_vm_bugged(struct kvm *kvm)
+{
+ kvm->vm_bugged = true;
+ kvm_vm_dead(kvm);
+}
+
+
+#define KVM_BUG(cond, kvm, fmt...) \
+({ \
+ int __ret = (cond); \
+ \
+ if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+#define KVM_BUG_ON(cond, kvm) \
+({ \
+ int __ret = (cond); \
+ \
+ if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(vcpu->srcu_depth++,
+ "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
+#endif
+ vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+}
+
+static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
+{
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
+
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(--vcpu->srcu_depth,
+ "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
+#endif
+}
+
+static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
+{
+ return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
+}
+
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
@@ -541,19 +882,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb();
- return kvm->vcpus[i];
+ return xa_load(&kvm->vcpu_array, i);
}
-#define kvm_for_each_vcpu(idx, vcpup, kvm) \
- for (idx = 0; \
- idx < atomic_read(&kvm->online_vcpus) && \
- (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
- idx++)
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
+ (atomic_read(&kvm->online_vcpus) - 1))
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
if (id < 0)
return NULL;
@@ -567,17 +906,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
-{
- return vcpu->vcpu_idx;
-}
-
-#define kvm_for_each_memslot(memslot, slots) \
- for (memslot = &slots->memslots[0]; \
- memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
- memslot++)
-
-void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
+void kvm_destroy_vcpus(struct kvm *kvm);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
@@ -612,7 +941,9 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
+bool kvm_get_kvm_safe(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
+bool file_is_kvm(struct file *file);
void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
@@ -635,18 +966,124 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
return __kvm_memslots(vcpu->kvm, as_id);
}
-static inline struct kvm_memory_slot *
-id_to_memslot(struct kvm_memslots *slots, int id)
+static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
+{
+ return RB_EMPTY_ROOT(&slots->gfn_tree);
+}
+
+#define kvm_for_each_memslot(memslot, bkt, slots) \
+ hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
+ if (WARN_ON_ONCE(!memslot->npages)) { \
+ } else
+
+static inline
+struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
- int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
+ int idx = slots->node_idx;
- slot = &slots->memslots[index];
+ hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
+ if (slot->id == id)
+ return slot;
+ }
- WARN_ON(slot->id != id);
- return slot;
+ return NULL;
}
+/* Iterator used for walking memslots that overlap a gfn range. */
+struct kvm_memslot_iter {
+ struct kvm_memslots *slots;
+ struct rb_node *node;
+ struct kvm_memory_slot *slot;
+};
+
+static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
+{
+ iter->node = rb_next(iter->node);
+ if (!iter->node)
+ return;
+
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
+}
+
+static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
+ struct kvm_memslots *slots,
+ gfn_t start)
+{
+ int idx = slots->node_idx;
+ struct rb_node *tmp;
+ struct kvm_memory_slot *slot;
+
+ iter->slots = slots;
+
+ /*
+ * Find the so called "upper bound" of a key - the first node that has
+ * its key strictly greater than the searched one (the start gfn in our case).
+ */
+ iter->node = NULL;
+ for (tmp = slots->gfn_tree.rb_node; tmp; ) {
+ slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
+ if (start < slot->base_gfn) {
+ iter->node = tmp;
+ tmp = tmp->rb_left;
+ } else {
+ tmp = tmp->rb_right;
+ }
+ }
+
+ /*
+ * Find the slot with the lowest gfn that can possibly intersect with
+ * the range, so we'll ideally have slot start <= range start
+ */
+ if (iter->node) {
+ /*
+ * A NULL previous node means that the very first slot
+ * already has a higher start gfn.
+ * In this case slot start > range start.
+ */
+ tmp = rb_prev(iter->node);
+ if (tmp)
+ iter->node = tmp;
+ } else {
+ /* a NULL node below means no slots */
+ iter->node = rb_last(&slots->gfn_tree);
+ }
+
+ if (iter->node) {
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
+
+ /*
+ * It is possible in the slot start < range start case that the
+ * found slot ends before or at range start (slot end <= range start)
+ * and so it does not overlap the requested range.
+ *
+ * In such non-overlapping case the next slot (if it exists) will
+ * already have slot start > range start, otherwise the logic above
+ * would have found it instead of the current slot.
+ */
+ if (iter->slot->base_gfn + iter->slot->npages <= start)
+ kvm_memslot_iter_next(iter);
+ }
+}
+
+static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
+{
+ if (!iter->node)
+ return false;
+
+ /*
+ * If this slot starts beyond or at the end of the range so does
+ * every next one
+ */
+ return iter->slot->base_gfn < end;
+}
+
+/* Iterate over each memslot at least partially intersecting [start, end) range */
+#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
+ for (kvm_memslot_iter_start(iter, slots, start); \
+ kvm_memslot_iter_is_valid(iter, end); \
+ kvm_memslot_iter_next(iter))
+
/*
* KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
* - create a new memory slot
@@ -669,22 +1106,16 @@ int kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont);
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages);
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
-bool kvm_largepages_enabled(void);
-void kvm_disable_largepages(void);
/* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
@@ -702,30 +1133,30 @@ unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
bool *writable);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
-void kvm_set_page_accessed(struct page *page);
-kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
-kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
+kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
+kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
+kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool *async, bool write_fault,
- bool *writable);
+ bool *writable, hva_t *hva);
void kvm_release_pfn_clean(kvm_pfn_t pfn);
void kvm_release_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
-void kvm_get_pfn(kvm_pfn_t pfn);
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
+int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned int offset,
+ unsigned long len);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
@@ -738,32 +1169,54 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
-#define __kvm_put_guest(kvm, gfn, offset, value, type) \
+#define __kvm_get_guest(kvm, gfn, offset, v) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = get_user(v, __uaddr); \
+ __ret; \
+})
+
+#define kvm_get_guest(kvm, gpa, v) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ \
+ __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), v); \
+})
+
+#define __kvm_put_guest(kvm, gfn, offset, v) \
({ \
unsigned long __addr = gfn_to_hva(kvm, gfn); \
- type __user *__uaddr = (type __user *)(__addr + offset); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
int __ret = -EFAULT; \
\
if (!kvm_is_error_hva(__addr)) \
- __ret = put_user(value, __uaddr); \
+ __ret = put_user(v, __uaddr); \
if (!__ret) \
mark_page_dirty(kvm, gfn); \
__ret; \
})
-#define kvm_put_guest(kvm, gpa, value, type) \
+#define kvm_put_guest(kvm, gpa, v) \
({ \
gpa_t __gpa = gpa; \
struct kvm *__kvm = kvm; \
+ \
__kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
- offset_in_page(__gpa), (value), type); \
+ offset_in_page(__gpa), v); \
})
-int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
+bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
+void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
@@ -771,12 +1224,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache, bool atomic);
-struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -791,10 +1239,116 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+/**
+ * kvm_gpc_init - initialize gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This sets up a gfn_to_pfn_cache by initializing locks. Note, the cache must
+ * be zero-allocated (or zeroed by the caller before init).
+ */
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
+
+/**
+ * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
+ * physical address.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @vcpu: vCPU to be used for marking pages dirty and to be woken on
+ * invalidation.
+ * @usage: indicates if the resulting host physical PFN is used while
+ * the @vcpu is IN_GUEST_MODE (in which case invalidation of
+ * the cache from MMU notifiers---but not for KVM memslot
+ * changes!---will also force @vcpu to exit the guest and
+ * refresh the cache); and/or if the PFN used directly
+ * by KVM (and thus needs a kernel virtual mapping).
+ * @gpa: guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This primes a gfn_to_pfn_cache and links it into the @kvm's list for
+ * invalidations to be processed. Callers are required to use
+ * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
+ * accessing the target page.
+ */
+int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+ gpa_t gpa, unsigned long len);
+
+/**
+ * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @gpa: current guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: %true if the cache is still valid and the address matches.
+ * %false if the cache is not valid.
+ *
+ * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
+ * while calling this function, and then continue to hold the lock until the
+ * access is complete.
+ *
+ * Callers in IN_GUEST_MODE may do so without locking, although they should
+ * still hold a read lock on kvm->scru for the memslot checks.
+ */
+bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ gpa_t gpa, unsigned long len);
+
+/**
+ * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @gpa: updated guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
+ * returm from this function does not mean the page can be immediately
+ * accessed because it may have raced with an invalidation. Callers must
+ * still lock and check the cache status, as this function does not return
+ * with the lock still held to permit access.
+ */
+int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ gpa_t gpa, unsigned long len);
+
+/**
+ * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This unmaps the referenced page. The cache is left in the invalid state
+ * but at least the mapping from GPA to userspace HVA will remain cached
+ * and can be reused on a subsequent refresh.
+ */
+void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
+
+/**
+ * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This removes a cache from the @kvm's list to be processed on MMU notifier
+ * invocation.
+ */
+void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
+
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
-void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
@@ -803,13 +1357,19 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
void kvm_flush_remote_tlbs(struct kvm *kvm);
-void kvm_reload_remote_mmus(struct kvm *kvm);
-bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
- unsigned long *vcpu_bitmap, cpumask_var_t tmp);
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
-bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
- unsigned long *vcpu_bitmap);
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
+int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
+int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
+void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
+void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
+#endif
+
+void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+ unsigned long end);
+void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+ unsigned long end);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -819,23 +1379,20 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
-int kvm_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log, int *is_dirty);
-
-int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *flush);
-int kvm_clear_dirty_log_protect(struct kvm *kvm,
- struct kvm_clear_dirty_log *log, bool *flush);
-
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
unsigned long mask);
-
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log);
-int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
- struct kvm_clear_dirty_log *log);
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
+
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
+#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
+int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
+ int *is_dirty, struct kvm_memory_slot **memslot);
+#endif
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
@@ -843,6 +1400,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap);
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
+long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
@@ -862,7 +1421,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
int kvm_arch_init(void *opaque);
void kvm_arch_exit(void);
@@ -876,21 +1435,29 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
+#endif
+
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
-void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
+void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
+#else
+static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#endif
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
-int kvm_arch_hardware_setup(void);
+int kvm_arch_hardware_setup(void *opaque);
void kvm_arch_hardware_unsetup(void);
-int kvm_arch_check_processor_compat(void);
+int kvm_arch_check_processor_compat(void *opaque);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
+int kvm_arch_create_vm_debugfs(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
@@ -901,10 +1468,17 @@ static inline struct kvm *kvm_arch_alloc_vm(void)
{
return kzalloc(sizeof(struct kvm), GFP_KERNEL);
}
+#endif
+
+static inline void __kvm_arch_free_vm(struct kvm *kvm)
+{
+ kvfree(kvm);
+}
+#ifndef __KVM_HAVE_ARCH_VM_FREE
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
- kfree(kvm);
+ __kvm_arch_free_vm(kvm);
}
#endif
@@ -946,21 +1520,35 @@ static inline void kvm_arch_end_assignment(struct kvm *kvm)
{
}
-static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
return false;
}
#endif
-static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_WQP
- return vcpu->arch.wqp;
+ return vcpu->arch.waitp;
#else
- return &vcpu->wq;
+ return &vcpu->wait;
#endif
}
+/*
+ * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
+ * true if the vCPU was blocking and was awakened, false otherwise.
+ */
+static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
+{
+ return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+}
+
+static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
+{
+ return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
+}
+
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
/*
* returns true if the virtual interrupt controller is initialized and
@@ -975,15 +1563,24 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
}
#endif
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+void kvm_unregister_perf_callbacks(void);
+#else
+static inline void kvm_register_perf_callbacks(void *ign) {}
+static inline void kvm_unregister_perf_callbacks(void) {}
+#endif /* CONFIG_GUEST_PERF_EVENTS */
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
-bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
-bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
-bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
+struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn);
+bool kvm_is_zone_device_page(struct page *page);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -1014,50 +1611,91 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
/*
- * search_memslots() and __gfn_to_memslot() are here because they are
- * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
- * gfn_to_memslot() itself isn't here as an inline because that would
- * bloat other code too much.
+ * Returns a pointer to the memslot if it contains gfn.
+ * Otherwise returns NULL.
*/
static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
- int start = 0, end = slots->used_slots;
- int slot = atomic_read(&slots->lru_slot);
- struct kvm_memory_slot *memslots = slots->memslots;
-
- if (gfn >= memslots[slot].base_gfn &&
- gfn < memslots[slot].base_gfn + memslots[slot].npages)
- return &memslots[slot];
+ if (!slot)
+ return NULL;
- while (start < end) {
- slot = start + (end - start) / 2;
+ if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
+ return slot;
+ else
+ return NULL;
+}
- if (gfn >= memslots[slot].base_gfn)
- end = slot;
- else
- start = slot + 1;
+/*
+ * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
+ *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ */
+static inline struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
+ struct rb_node *node;
+ int idx = slots->node_idx;
+
+ slot = NULL;
+ for (node = slots->gfn_tree.rb_node; node; ) {
+ slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
+ if (gfn >= slot->base_gfn) {
+ if (gfn < slot->base_gfn + slot->npages)
+ return slot;
+ node = node->rb_right;
+ } else
+ node = node->rb_left;
}
- if (gfn >= memslots[start].base_gfn &&
- gfn < memslots[start].base_gfn + memslots[start].npages) {
- atomic_set(&slots->lru_slot, start);
- return &memslots[start];
+ return approx ? slot : NULL;
+}
+
+static inline struct kvm_memory_slot *
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
+
+ slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
+ slot = try_get_memslot(slot, gfn);
+ if (slot)
+ return slot;
+
+ slot = search_memslots(slots, gfn, approx);
+ if (slot) {
+ atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
+ return slot;
}
return NULL;
}
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
- return search_memslots(slots, gfn);
+ return ____gfn_to_memslot(slots, gfn, false);
}
static inline unsigned long
-__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
- return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+ /*
+ * The index was checked originally in search_memslots. To avoid
+ * that a malicious guest builds a Spectre gadget out of e.g. page
+ * table walks, do not let the processor speculate loads outside
+ * the guest's registered memslots.
+ */
+ unsigned long offset = gfn - slot->base_gfn;
+ offset = array_index_nospec(offset, slot->npages);
+ return slot->userspace_addr + offset * PAGE_SIZE;
}
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
@@ -1088,12 +1726,6 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT;
}
-static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
- gpa_t gpa)
-{
- return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
-}
-
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
{
unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
@@ -1108,39 +1740,222 @@ enum kvm_stat_kind {
struct kvm_stat_data {
struct kvm *kvm;
- struct kvm_stats_debugfs_item *dbgfs_item;
+ const struct _kvm_stats_desc *desc;
+ enum kvm_stat_kind kind;
};
-struct kvm_stats_debugfs_item {
- const char *name;
- int offset;
- enum kvm_stat_kind kind;
- int mode;
+struct _kvm_stats_desc {
+ struct kvm_stats_desc desc;
+ char name[KVM_STATS_NAME_SIZE];
};
-#define KVM_DBGFS_GET_MODE(dbgfs_item) \
- ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
+#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
+ .flags = type | unit | base | \
+ BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
+ BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
+ BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
+ .exponent = exp, \
+ .size = sz, \
+ .bucket_size = bsz
+
+#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
+#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
+ SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
+
+#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
+ unit, base, exponent, sz, bsz)
+#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
+ unit, base, exponent, sz, 0)
+
+/* Cumulative counter, read/write */
+#define STATS_DESC_COUNTER(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Instantaneous counter, read only */
+#define STATS_DESC_ICOUNTER(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak counter, read/write */
+#define STATS_DESC_PCOUNTER(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Instantaneous boolean value, read only */
+#define STATS_DESC_IBOOLEAN(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak (sticky) boolean value, read/write */
+#define STATS_DESC_PBOOLEAN(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Cumulative time in nanosecond */
+#define STATS_DESC_TIME_NSEC(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9)
+/* Linear histogram for time in nanosecond */
+#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
+ STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz, bsz)
+/* Logarithmic histogram for time in nanosecond */
+#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
+ STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz)
+
+#define KVM_GENERIC_VM_STATS() \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
+
+#define KVM_GENERIC_VCPU_STATS() \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
-extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
+ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
+ const struct _kvm_stats_desc *desc,
+ void *stats, size_t size_stats,
+ char __user *user_buffer, size_t size, loff_t *offset);
+
+/**
+ * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the linear histogram's bucket
+ * @bucket_size: the size (width) of a bucket
+ */
+static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
+ u64 value, size_t bucket_size)
+{
+ size_t index = div64_u64(value, bucket_size);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+/**
+ * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the logarithmic histogram's bucket
+ */
+static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
+{
+ size_t index = fls64(value);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
+ kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
+#define KVM_STATS_LOG_HIST_UPDATE(array, value) \
+ kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
+
+
+extern const struct kvm_stats_header kvm_vm_stats_header;
+extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
+extern const struct kvm_stats_header kvm_vcpu_stats_header;
+extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
+
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
- if (unlikely(kvm->mmu_notifier_count))
+ if (unlikely(kvm->mmu_invalidate_in_progress))
return 1;
/*
- * Ensure the read of mmu_notifier_count happens before the read
- * of mmu_notifier_seq. This interacts with the smp_wmb() in
- * mmu_notifier_invalidate_range_end to make sure that the caller
- * either sees the old (non-zero) value of mmu_notifier_count or
- * the new (incremented) value of mmu_notifier_seq.
- * PowerPC Book3s HV KVM calls this under a per-page lock
- * rather than under kvm->mmu_lock, for scalability, so
- * can't rely on kvm->mmu_lock to keep things ordered.
+ * Ensure the read of mmu_invalidate_in_progress happens before
+ * the read of mmu_invalidate_seq. This interacts with the
+ * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
+ * that the caller either sees the old (non-zero) value of
+ * mmu_invalidate_in_progress or the new (incremented) value of
+ * mmu_invalidate_seq.
+ *
+ * PowerPC Book3s HV KVM calls this under a per-page lock rather
+ * than under kvm->mmu_lock, for scalability, so can't rely on
+ * kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_invalidate_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+
+static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+ unsigned long mmu_seq,
+ unsigned long hva)
+{
+ lockdep_assert_held(&kvm->mmu_lock);
+ /*
+ * If mmu_invalidate_in_progress is non-zero, then the range maintained
+ * by kvm_mmu_notifier_invalidate_range_start contains all addresses
+ * that might be being invalidated. Note that it may include some false
+ * positives, due to shortcuts when handing concurrent invalidations.
+ */
+ if (unlikely(kvm->mmu_invalidate_in_progress) &&
+ hva >= kvm->mmu_invalidate_range_start &&
+ hva < kvm->mmu_invalidate_range_end)
+ return 1;
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
@@ -1212,7 +2027,7 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
void kvm_arch_irq_routing_update(struct kvm *kvm);
-static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
* Ensure the rest of the request is published to kvm_check_request's
@@ -1222,6 +2037,19 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
+static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+{
+ /*
+ * Request that don't require vCPU action should never be logged in
+ * vcpu->requests. The vCPU won't clear the request, so it will stay
+ * logged indefinitely and prevent the vCPU from entering the guest.
+ */
+ BUILD_BUG_ON(!__builtin_constant_p(req) ||
+ (req & KVM_REQUEST_NO_ACTION));
+
+ __kvm_make_request(req, vcpu);
+}
+
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
{
return READ_ONCE(vcpu->requests);
@@ -1343,6 +2171,12 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
}
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
+static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
+{
+ return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
+ !(memslot->flags & KVM_MEMSLOT_INVALID));
+}
+
struct kvm_vcpu *kvm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
@@ -1356,6 +2190,8 @@ void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
+ struct kvm_kernel_irq_routing_entry *);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
@@ -1394,8 +2230,10 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end, bool blockable);
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
+
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
@@ -1412,4 +2250,35 @@ int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
uintptr_t data, const char *name,
struct task_struct **thread_ptr);
+#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
+{
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
+ vcpu->stat.signal_exits++;
+}
+#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+
+/*
+ * If more than one page is being (un)accounted, @virt must be the address of
+ * the first page of a block of pages what were allocated together (i.e
+ * accounted together).
+ *
+ * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
+ * is thread-safe.
+ */
+static inline void kvm_account_pgtable_pages(void *virt, int nr)
+{
+ mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
+}
+
+/*
+ * This defines how many reserved entries we want to keep before we
+ * kick the vcpu to the userspace to avoid dirty ring full. This
+ * value can be tuned to higher if e.g. PML is enabled on the host.
+ */
+#define KVM_DIRTY_RING_RSVD_ENTRIES 64
+
+/* Max number of entries allowed for each kvm dirty ring */
+#define KVM_DIRTY_RING_MAX_ENTRIES 65536
+
#endif
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index dc1da020305b..dac047abdba7 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -42,7 +42,7 @@ struct kvm_kernel_irqfd {
wait_queue_entry_t wait;
/* Update side is protected by irqfds.lock */
struct kvm_kernel_irq_routing_entry irq_entry;
- seqcount_t irq_entry_sc;
+ seqcount_spinlock_t irq_entry_sc;
/* Used for level IRQ fast-path */
int gsi;
struct work_struct inject;
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 68e84cf42a3f..3ca3db020e0e 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -18,7 +18,12 @@ struct kvm_memslots;
enum kvm_mr_change;
+#include <linux/bits.h>
+#include <linux/mutex.h>
#include <linux/types.h>
+#include <linux/spinlock_types.h>
+
+#include <asm/kvm_types.h>
/*
* Address types:
@@ -43,6 +48,12 @@ typedef u64 hfn_t;
typedef hfn_t kvm_pfn_t;
+enum pfn_cache_usage {
+ KVM_GUEST_USES_PFN = BIT(0),
+ KVM_HOST_USES_PFN = BIT(1),
+ KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
+};
+
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
@@ -53,9 +64,62 @@ struct gfn_to_hva_cache {
struct gfn_to_pfn_cache {
u64 generation;
- gfn_t gfn;
+ gpa_t gpa;
+ unsigned long uhva;
+ struct kvm_memory_slot *memslot;
+ struct kvm_vcpu *vcpu;
+ struct list_head list;
+ rwlock_t lock;
+ struct mutex refresh_lock;
+ void *khva;
kvm_pfn_t pfn;
- bool dirty;
+ enum pfn_cache_usage usage;
+ bool active;
+ bool valid;
+};
+
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+/*
+ * Memory caches are used to preallocate memory ahead of various MMU flows,
+ * e.g. page fault handlers. Gracefully handling allocation failures deep in
+ * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
+ * holding MMU locks. Note, these caches act more like prefetch buffers than
+ * classical caches, i.e. objects are not returned to the cache on being freed.
+ *
+ * The @capacity field and @objects array are lazily initialized when the cache
+ * is topped up (__kvm_mmu_topup_memory_cache()).
+ */
+struct kvm_mmu_memory_cache {
+ int nobjs;
+ gfp_t gfp_zero;
+ gfp_t gfp_custom;
+ struct kmem_cache *kmem_cache;
+ int capacity;
+ void **objects;
+};
+#endif
+
+#define HALT_POLL_HIST_COUNT 32
+
+struct kvm_vm_stat_generic {
+ u64 remote_tlb_flush;
+ u64 remote_tlb_flush_requests;
+};
+
+struct kvm_vcpu_stat_generic {
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 halt_poll_success_ns;
+ u64 halt_poll_fail_ns;
+ u64 halt_wait_ns;
+ u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
+ u64 blocking;
};
+#define KVM_STATS_NAME_SIZE 48
+
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/lapb.h b/include/linux/lapb.h
index eb56472f23b2..b5333f9413dc 100644
--- a/include/linux/lapb.h
+++ b/include/linux/lapb.h
@@ -6,6 +6,11 @@
#ifndef LAPB_KERNEL_H
#define LAPB_KERNEL_H
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct net_device;
+
#define LAPB_OK 0
#define LAPB_BADTOKEN 1
#define LAPB_INVALUE 2
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index 9022f0c2e2e4..84f1053cf2a8 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -38,9 +38,6 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
void clear_tsk_latency_tracing(struct task_struct *p);
-extern int sysctl_latencytop(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
#else
static inline void
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 21a3358a1731..612b4cab3819 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -85,6 +85,7 @@ static inline struct led_classdev_flash *lcdev_to_flcdev(
return container_of(lcdev, struct led_classdev_flash, led_cdev);
}
+#if IS_ENABLED(CONFIG_LEDS_CLASS_FLASH)
/**
* led_classdev_flash_register_ext - register a new object of LED class with
* init data and with support for flash LEDs
@@ -98,12 +99,6 @@ int led_classdev_flash_register_ext(struct device *parent,
struct led_classdev_flash *fled_cdev,
struct led_init_data *init_data);
-static inline int led_classdev_flash_register(struct device *parent,
- struct led_classdev_flash *fled_cdev)
-{
- return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
-}
-
/**
* led_classdev_flash_unregister - unregisters an object of led_classdev class
* with support for flash LEDs
@@ -118,15 +113,44 @@ int devm_led_classdev_flash_register_ext(struct device *parent,
struct led_init_data *init_data);
+void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
+
+#else
+
+static inline int led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev) {};
+static inline int devm_led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{};
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_FLASH) */
+
+static inline int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
+
static inline int devm_led_classdev_flash_register(struct device *parent,
struct led_classdev_flash *fled_cdev)
{
return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL);
}
-void devm_led_classdev_flash_unregister(struct device *parent,
- struct led_classdev_flash *fled_cdev);
-
/**
* led_set_flash_strobe - setup flash strobe
* @fled_cdev: the flash LED to set strobe on
diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h
new file mode 100644
index 000000000000..210d57bcd767
--- /dev/null
+++ b/include/linux/led-class-multicolor.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* LED Multicolor class interface
+ * Copyright (C) 2019-20 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef _LINUX_MULTICOLOR_LEDS_H_INCLUDED
+#define _LINUX_MULTICOLOR_LEDS_H_INCLUDED
+
+#include <linux/leds.h>
+#include <dt-bindings/leds/common.h>
+
+struct mc_subled {
+ unsigned int color_index;
+ unsigned int brightness;
+ unsigned int intensity;
+ unsigned int channel;
+};
+
+struct led_classdev_mc {
+ /* led class device */
+ struct led_classdev led_cdev;
+ unsigned int num_colors;
+
+ struct mc_subled *subled_info;
+};
+
+static inline struct led_classdev_mc *lcdev_to_mccdev(
+ struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct led_classdev_mc, led_cdev);
+}
+
+#if IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR)
+/**
+ * led_classdev_multicolor_register_ext - register a new object of led_classdev
+ * class with support for multicolor LEDs
+ * @parent: the multicolor LED to register
+ * @mcled_cdev: the led_classdev_mc structure for this device
+ * @init_data: the LED class multicolor device initialization data
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data);
+
+/**
+ * led_classdev_multicolor_unregister - unregisters an object of led_classdev
+ * class with support for multicolor LEDs
+ * @mcled_cdev: the multicolor LED to unregister
+ *
+ * Unregister a previously registered via led_classdev_multicolor_register
+ * object
+ */
+void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev);
+
+/* Calculate brightness for the monochrome LED cluster */
+int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
+ enum led_brightness brightness);
+
+int devm_led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data);
+
+void devm_led_classdev_multicolor_unregister(struct device *parent,
+ struct led_classdev_mc *mcled_cdev);
+#else
+
+static inline int led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {};
+static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
+ enum led_brightness brightness)
+{
+ return 0;
+}
+
+static inline int devm_led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void devm_led_classdev_multicolor_unregister(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{};
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */
+
+static inline int led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
+}
+
+static inline int devm_led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev,
+ NULL);
+}
+
+#endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */
diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h
deleted file mode 100644
index 50d330ed1100..000000000000
--- a/include/linux/leds-tca6507.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TCA6507 LED chip driver.
- *
- * Copyright (C) 2011 Neil Brown <neil@brown.name>
- */
-
-#ifndef __LINUX_TCA6507_H
-#define __LINUX_TCA6507_H
-#include <linux/leds.h>
-
-struct tca6507_platform_data {
- struct led_platform_data leds;
-#ifdef CONFIG_GPIOLIB
- int gpio_base;
- void (*setup)(unsigned gpio_base, unsigned ngpio);
-#endif
-};
-
-#define TCA6507_MAKE_GPIO 1
-#endif /* __LINUX_TCA6507_H*/
diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h
index 5eb111f38803..420b61e5a213 100644
--- a/include/linux/leds-ti-lmu-common.h
+++ b/include/linux/leds-ti-lmu-common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
// TI LMU Common Core
-// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+// Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
#ifndef _TI_LMU_COMMON_H_
#define _TI_LMU_COMMON_H_
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 75353e5f9d13..ba4861ec73d3 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -25,6 +25,7 @@ struct device_node;
* LED Core
*/
+/* This is obsolete/useless. We now support variable maximum brightness. */
enum led_brightness {
LED_OFF = 0,
LED_ON = 1,
@@ -32,6 +33,12 @@ enum led_brightness {
LED_FULL = 255,
};
+enum led_default_state {
+ LEDS_DEFSTATE_OFF = 0,
+ LEDS_DEFSTATE_ON = 1,
+ LEDS_DEFSTATE_KEEP = 2,
+};
+
struct led_init_data {
/* device fwnode handle */
struct fwnode_handle *fwnode;
@@ -56,10 +63,14 @@ struct led_init_data {
bool devname_mandatory;
};
+struct led_hw_trigger_type {
+ int dummy;
+};
+
struct led_classdev {
const char *name;
- enum led_brightness brightness;
- enum led_brightness max_brightness;
+ unsigned int brightness;
+ unsigned int max_brightness;
int flags;
/* Lower 16 bits reflect status */
@@ -140,6 +151,9 @@ struct led_classdev {
void *trigger_data;
/* true if activated - deactivate routine uses it to do cleanup */
bool activated;
+
+ /* LEDs that have private triggers have this set */
+ struct led_hw_trigger_type *trigger_type;
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
@@ -245,8 +259,7 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
* software blink timer that implements blinking when the
* hardware doesn't. This function is guaranteed not to sleep.
*/
-void led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness);
+void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness);
/**
* led_set_brightness_sync - set LED brightness synchronously
@@ -259,8 +272,7 @@ void led_set_brightness(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-int led_set_brightness_sync(struct led_classdev *led_cdev,
- enum led_brightness value);
+int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value);
/**
* led_update_brightness - update LED brightness
@@ -344,8 +356,11 @@ struct led_trigger {
int (*activate)(struct led_classdev *led_cdev);
void (*deactivate)(struct led_classdev *led_cdev);
+ /* LED-private triggers have this set */
+ struct led_hw_trigger_type *trigger_type;
+
/* LEDs under control by this trigger (for simple triggers) */
- rwlock_t leddev_list_lock;
+ spinlock_t leddev_list_lock;
struct list_head led_cdevs;
/* Link to next registered trigger */
@@ -511,9 +526,9 @@ struct gpio_led {
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
struct gpio_desc *gpiod;
};
-#define LEDS_GPIO_DEFSTATE_OFF 0
-#define LEDS_GPIO_DEFSTATE_ON 1
-#define LEDS_GPIO_DEFSTATE_KEEP 2
+#define LEDS_GPIO_DEFSTATE_OFF LEDS_DEFSTATE_OFF
+#define LEDS_GPIO_DEFSTATE_ON LEDS_DEFSTATE_ON
+#define LEDS_GPIO_DEFSTATE_KEEP LEDS_DEFSTATE_KEEP
struct gpio_led_platform_data {
int num_leds;
@@ -554,7 +569,7 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
void led_classdev_notify_brightness_hw_changed(
- struct led_classdev *led_cdev, enum led_brightness brightness);
+ struct led_classdev *led_cdev, unsigned int brightness);
#else
static inline void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness) { }
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
deleted file mode 100644
index 93d101d28943..000000000000
--- a/include/linux/leds_pwm.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * PWM LED driver data - see drivers/leds/leds-pwm.c
- */
-#ifndef __LINUX_LEDS_PWM_H
-#define __LINUX_LEDS_PWM_H
-
-struct led_pwm {
- const char *name;
- const char *default_trigger;
- unsigned pwm_id __deprecated;
- u8 active_low;
- unsigned max_brightness;
- unsigned pwm_period_ns;
-};
-
-struct led_pwm_platform_data {
- int num_leds;
- struct led_pwm *leds;
-};
-
-#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2ca9b7056a82..fe990176e6ee 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -22,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/cdrom.h>
#include <linux/sched.h>
+#include <linux/async.h>
/*
* Define if arch has non-standard setup. This is a _PCI_ standard
@@ -38,27 +39,9 @@
* compile-time options: to be removed as soon as all the drivers are
* converted to the new debugging mechanism
*/
-#undef ATA_DEBUG /* debugging output */
-#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
-#undef ATA_NDEBUG /* define to disable quick runtime checks */
-/* note: prints function name for you */
-#ifdef ATA_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef ATA_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* ATA_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* ATA_DEBUG */
-
-#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-
#define ata_print_version_once(dev, version) \
({ \
static bool __print_once; \
@@ -69,38 +52,6 @@
} \
})
-/* NEW: debug levels */
-#define HAVE_LIBATA_MSG 1
-
-enum {
- ATA_MSG_DRV = 0x0001,
- ATA_MSG_INFO = 0x0002,
- ATA_MSG_PROBE = 0x0004,
- ATA_MSG_WARN = 0x0008,
- ATA_MSG_MALLOC = 0x0010,
- ATA_MSG_CTL = 0x0020,
- ATA_MSG_INTR = 0x0040,
- ATA_MSG_ERR = 0x0080,
-};
-
-#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
-#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
-#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
-#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
-#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
-#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
-#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
-#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
-
-static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
-{
- if (dval < 0 || dval >= (sizeof(u32) * 8))
- return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
- if (!dval)
- return 0;
- return (1 << dval) - 1;
-}
-
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
@@ -150,7 +101,7 @@ enum {
ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */
- ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */
+ ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 21), /* Priority cmds sent to dev */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24),
@@ -162,6 +113,10 @@ enum {
ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */
ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */
+ ATA_DFLAG_FEATURES_MASK = ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
+ ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
+ ATA_DFLAG_NCQ_PRIO,
+
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
@@ -176,6 +131,7 @@ enum {
ATA_DEV_NONE = 11, /* no device */
/* struct ata_link flags */
+ /* NOTE: struct ata_force_param currently stores lflags in u16 */
ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */
ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */
@@ -187,7 +143,7 @@ enum {
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
- ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */
+ ATA_LFLAG_NO_DEBOUNCE_DELAY = (1 << 11), /* no debounce delay on link resume */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -319,7 +275,7 @@ enum {
PORT_DISABLED = 2,
/* encoding various smaller bitmaps into a single
- * unsigned long bitmap
+ * unsigned int bitmap
*/
ATA_NR_PIO_MODES = 7,
ATA_NR_MWDMA_MODES = 5,
@@ -390,7 +346,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
@@ -421,6 +377,10 @@ enum {
ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
+ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
+ ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
+ ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
+ ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -466,12 +426,9 @@ enum {
};
enum ata_xfer_mask {
- ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1)
- << ATA_SHIFT_PIO,
- ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1)
- << ATA_SHIFT_MWDMA,
- ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1)
- << ATA_SHIFT_UDMA,
+ ATA_MASK_PIO = ((1U << ATA_NR_PIO_MODES) - 1) << ATA_SHIFT_PIO,
+ ATA_MASK_MWDMA = ((1U << ATA_NR_MWDMA_MODES) - 1) << ATA_SHIFT_MWDMA,
+ ATA_MASK_UDMA = ((1U << ATA_NR_UDMA_MODES) - 1) << ATA_SHIFT_UDMA,
};
enum hsm_task_states {
@@ -531,12 +488,15 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
unsigned long deadline);
typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
-extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_unload_heads;
+#ifdef CONFIG_SATA_HOST
+extern struct device_attribute dev_attr_link_power_management_policy;
+extern struct device_attribute dev_attr_ncq_prio_supported;
extern struct device_attribute dev_attr_ncq_prio_enable;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
+#endif
enum sw_activity {
OFF,
@@ -556,7 +516,10 @@ struct ata_taskfile {
u8 hob_lbam;
u8 hob_lbah;
- u8 feature;
+ union {
+ u8 error;
+ u8 feature;
+ };
u8 nsect;
u8 lbal;
u8 lbam;
@@ -564,7 +527,10 @@ struct ata_taskfile {
u8 device;
- u8 command; /* IO operation */
+ union {
+ u8 status;
+ u8 command;
+ };
u32 auxiliary; /* auxiliary field */
/* from SATA 3.1 and */
@@ -608,7 +574,7 @@ struct ata_host {
struct task_struct *eh_owner;
struct ata_port *simplex_claimed; /* channel owning the DMA */
- struct ata_port *ports[0];
+ struct ata_port *ports[];
};
struct ata_queued_cmd {
@@ -667,6 +633,18 @@ struct ata_ering {
struct ata_ering_entry ring[ATA_ERING_SIZE];
};
+struct ata_cpr {
+ u8 num;
+ u8 num_storage_elements;
+ u64 start_lba;
+ u64 num_lbas;
+};
+
+struct ata_cpr_log {
+ u8 nr_cpr;
+ struct ata_cpr cpr[];
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
@@ -699,9 +677,9 @@ struct ata_device {
unsigned int cdb_len;
/* per-dev xfer mask */
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
/* for CHS addressing */
u16 cylinders; /* Number of cylinders */
@@ -726,6 +704,9 @@ struct ata_device {
u32 zac_zones_optimal_nonseq;
u32 zac_zones_max_open;
+ /* Concurrent positioning ranges */
+ struct ata_cpr_log *cpr_log;
+
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
@@ -836,10 +817,8 @@ struct ata_port {
unsigned int cbl; /* cable type; ATA_CBL_xxx */
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1];
- unsigned long sas_tag_allocated; /* for sas tag allocation only */
u64 qc_active;
int nr_active_links; /* #links with active qcs */
- unsigned int sas_last_tag; /* track next tag hw expects */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -859,7 +838,6 @@ struct ata_port {
unsigned int hsm_task_state;
- u32 msg_enable;
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
int eh_tries;
@@ -869,7 +847,9 @@ struct ata_port {
enum ata_lpm_policy target_lpm_policy;
struct timer_list fastdrain_timer;
- unsigned long fastdrain_cnt;
+ unsigned int fastdrain_cnt;
+
+ async_cookie_t cookie;
int em_message_type;
void *private_data;
@@ -902,11 +882,12 @@ struct ata_port_operations {
* Configuration and exception handling
*/
int (*cable_detect)(struct ata_port *ap);
- unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
+ unsigned int (*mode_filter)(struct ata_device *dev, unsigned int xfer_mask);
void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
- unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
+ unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf,
+ __le16 *id);
void (*dev_config)(struct ata_device *dev);
@@ -997,9 +978,9 @@ struct ata_port_operations {
struct ata_port_info {
unsigned long flags;
unsigned long link_flags;
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
struct ata_port_operations *port_ops;
void *private_data;
};
@@ -1020,10 +1001,6 @@ struct ata_timing {
/*
* Core layer - drivers/ata/libata-core.c
*/
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
-
extern struct ata_port_operations ata_dummy_port_ops;
extern const struct ata_port_info ata_dummy_port_info;
@@ -1061,33 +1038,14 @@ static inline int is_multi_taskfile(struct ata_taskfile *tf)
(tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
}
-static inline const unsigned long *
-sata_ehc_deb_timing(struct ata_eh_context *ehc)
-{
- if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
- return sata_deb_timing_hotplug;
- else
- return sata_deb_timing_normal;
-}
-
static inline int ata_port_is_dummy(struct ata_port *ap)
{
return ap->ops == &ata_dummy_port_ops;
}
-extern int sata_set_spd(struct ata_link *link);
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
-extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
- unsigned long deadline);
-extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- bool spm_wakeup);
-extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
- bool *online, int (*check_ready)(struct ata_link *));
extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
@@ -1095,7 +1053,6 @@ extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
-extern int ata_slave_link_init(struct ata_port *ap);
extern void ata_host_get(struct ata_host *host);
extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
@@ -1115,28 +1072,17 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
#define ATA_SCSI_COMPAT_IOCTL /* empty */
#endif
extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+#if IS_REACHABLE(CONFIG_ATA)
+bool ata_scsi_dma_need_drain(struct request *rq);
+#else
+#define ata_scsi_dma_need_drain NULL
+#endif
extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
unsigned int cmd, void __user *arg);
-extern void ata_sas_port_destroy(struct ata_port *);
-extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
- struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
-extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
-extern void ata_sas_tport_delete(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
-extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
-extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
-extern int sata_scr_valid(struct ata_link *link);
-extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
-extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
-extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern bool ata_link_online(struct ata_link *link);
extern bool ata_link_offline(struct ata_link *link);
#ifdef CONFIG_PM
-extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
+extern void ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
extern void ata_sas_port_suspend(struct ata_port *ap);
extern void ata_sas_port_resume(struct ata_port *ap);
@@ -1153,33 +1099,33 @@ extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
u32 val, unsigned long interval, unsigned long timeout);
extern int atapi_cmd_type(u8 opcode);
-extern void ata_tf_to_fis(const struct ata_taskfile *tf,
- u8 pmp, int is_cmd, u8 *fis);
-extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
-extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
- unsigned long mwdma_mask, unsigned long udma_mask);
-extern void ata_unpack_xfermask(unsigned long xfer_mask,
- unsigned long *pio_mask, unsigned long *mwdma_mask,
- unsigned long *udma_mask);
-extern u8 ata_xfer_mask2mode(unsigned long xfer_mask);
-extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
-extern int ata_xfer_mode2shift(unsigned long xfer_mode);
-extern const char *ata_mode_string(unsigned long xfer_mask);
-extern unsigned long ata_id_xfermask(const u16 *id);
+extern unsigned int ata_pack_xfermask(unsigned int pio_mask,
+ unsigned int mwdma_mask,
+ unsigned int udma_mask);
+extern void ata_unpack_xfermask(unsigned int xfer_mask,
+ unsigned int *pio_mask,
+ unsigned int *mwdma_mask,
+ unsigned int *udma_mask);
+extern u8 ata_xfer_mask2mode(unsigned int xfer_mask);
+extern unsigned int ata_xfer_mode2mask(u8 xfer_mode);
+extern int ata_xfer_mode2shift(u8 xfer_mode);
+extern const char *ata_mode_string(unsigned int xfer_mask);
+extern unsigned int ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern unsigned int ata_port_classify(struct ata_port *ap,
+ const struct ata_taskfile *tf);
extern void ata_dev_disable(struct ata_device *adev);
extern void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id);
+ struct ata_taskfile *tf, __le16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
-extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
@@ -1190,13 +1136,102 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+
+/*
+ * SATA specific code - drivers/ata/libata-sata.c
+ */
+#ifdef CONFIG_SATA_HOST
+extern const unsigned long sata_deb_timing_normal[];
+extern const unsigned long sata_deb_timing_hotplug[];
+extern const unsigned long sata_deb_timing_long[];
+
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
+ return sata_deb_timing_hotplug;
+ else
+ return sata_deb_timing_normal;
+}
+
+extern int sata_scr_valid(struct ata_link *link);
+extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
+extern int sata_set_spd(struct ata_link *link);
+extern int sata_link_hardreset(struct ata_link *link,
+ const unsigned long *timing, unsigned long deadline,
+ bool *online, int (*check_ready)(struct ata_link *));
+extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+ unsigned long deadline);
+extern void ata_eh_analyze_ncq_error(struct ata_link *link);
+#else
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ return NULL;
+}
+static inline int sata_scr_valid(struct ata_link *link) { return 0; }
+static inline int sata_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_scr_write(struct ata_link *link, int reg, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_link_hardreset(struct ata_link *link,
+ const unsigned long *timing,
+ unsigned long deadline,
+ bool *online,
+ int (*check_ready)(struct ata_link *))
+{
+ if (online)
+ *online = false;
+ return -EOPNOTSUPP;
+}
+static inline int sata_link_resume(struct ata_link *link,
+ const unsigned long *params,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
+#endif
+extern int sata_link_debounce(struct ata_link *link,
+ const unsigned long *params, unsigned long deadline);
+extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ bool spm_wakeup);
+extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_sas_port_destroy(struct ata_port *);
+extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
+ struct ata_port_info *, struct Scsi_Host *);
+extern void ata_sas_async_probe(struct ata_port *ap);
+extern int ata_sas_sync_probe(struct ata_port *ap);
+extern int ata_sas_port_init(struct ata_port *);
+extern int ata_sas_port_start(struct ata_port *ap);
+extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_sas_tport_delete(struct ata_port *ap);
+extern void ata_sas_port_stop(struct ata_port *ap);
+extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
+extern void ata_tf_to_fis(const struct ata_taskfile *tf,
+ u8 pmp, int is_cmd, u8 *fis);
+extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
+extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
+extern int sata_async_notification(struct ata_port *ap);
extern int ata_cable_40wire(struct ata_port *ap);
extern int ata_cable_80wire(struct ata_port *ap);
@@ -1206,12 +1241,6 @@ extern int ata_cable_unknown(struct ata_port *ap);
/* Timing helpers */
extern unsigned int ata_pio_need_iordy(const struct ata_device *);
-extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
-extern int ata_timing_compute(struct ata_device *, unsigned short,
- struct ata_timing *, int, int);
-extern void ata_timing_merge(const struct ata_timing *,
- const struct ata_timing *, struct ata_timing *,
- unsigned int);
extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
/* PCI */
@@ -1253,8 +1282,8 @@ static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
}
int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
-unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
- const struct ata_acpi_gtm *gtm);
+unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ const struct ata_acpi_gtm *gtm);
int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
#else
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
@@ -1295,14 +1324,12 @@ extern void ata_port_wait_eh(struct ata_port *ap);
extern int ata_link_abort(struct ata_link *link);
extern int ata_port_abort(struct ata_port *ap);
extern int ata_port_freeze(struct ata_port *ap);
-extern int sata_async_notification(struct ata_port *ap);
extern void ata_eh_freeze_port(struct ata_port *ap);
extern void ata_eh_thaw_port(struct ata_port *ap);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
-extern void ata_eh_analyze_ncq_error(struct ata_link *link);
extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
@@ -1335,7 +1362,7 @@ extern int ata_link_nr_enabled(struct ata_link *link);
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
-extern struct device_attribute *ata_common_sdev_attrs[];
+extern const struct attribute_group *ata_common_sdev_groups[];
/*
* All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
@@ -1343,27 +1370,51 @@ extern struct device_attribute *ata_common_sdev_attrs[];
* edge driver's module reference, otherwise the driver can be unloaded
* even if the scsi_device is being accessed.
*/
-#define ATA_BASE_SHT(drv_name) \
+#define __ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \
.name = drv_name, \
.ioctl = ata_scsi_ioctl, \
ATA_SCSI_COMPAT_IOCTL \
.queuecommand = ata_scsi_queuecmd, \
- .can_queue = ATA_DEF_QUEUE, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .dma_need_drain = ata_scsi_dma_need_drain, \
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
- .slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
- .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
- .sdev_attrs = ata_common_sdev_attrs
+ .unlock_native_capacity = ata_scsi_unlock_native_capacity,\
+ .max_sectors = ATA_MAX_SECTORS_LBA48
+
+#define ATA_SUBBASE_SHT(drv_name) \
+ __ATA_BASE_SHT(drv_name), \
+ .can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
+
+#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
+ __ATA_BASE_SHT(drv_name), \
+ .can_queue = drv_qd, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
+
+#define ATA_BASE_SHT(drv_name) \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_common_sdev_groups
+
+#ifdef CONFIG_SATA_HOST
+extern const struct attribute_group *ata_ncq_sdev_groups[];
#define ATA_NCQ_SHT(drv_name) \
- ATA_BASE_SHT(drv_name), \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_ncq_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth
+#define ATA_NCQ_SHT_QD(drv_name, drv_qd) \
+ ATA_SUBBASE_SHT_QD(drv_name, drv_qd), \
+ .sdev_groups = ata_ncq_sdev_groups, \
+ .change_queue_depth = ata_scsi_change_queue_depth
+#endif
+
/*
* PMP helpers
*/
@@ -1395,7 +1446,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
static inline bool ata_is_host_link(const struct ata_link *link)
{
- return 1;
+ return true;
}
#endif /* CONFIG_SATA_PMP */
@@ -1406,51 +1457,61 @@ static inline int sata_srst_pmp(struct ata_link *link)
return link->pmp;
}
-/*
- * printk helpers
- */
-__printf(3, 4)
-void ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...);
+#define ata_port_printk(level, ap, fmt, ...) \
+ pr_ ## level ("ata%u: " fmt, (ap)->print_id, ##__VA_ARGS__)
#define ata_port_err(ap, fmt, ...) \
- ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_port_printk(err, ap, fmt, ##__VA_ARGS__)
#define ata_port_warn(ap, fmt, ...) \
- ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_port_printk(warn, ap, fmt, ##__VA_ARGS__)
#define ata_port_notice(ap, fmt, ...) \
- ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_port_printk(notice, ap, fmt, ##__VA_ARGS__)
#define ata_port_info(ap, fmt, ...) \
- ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_port_printk(info, ap, fmt, ##__VA_ARGS__)
#define ata_port_dbg(ap, fmt, ...) \
- ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_port_printk(debug, ap, fmt, ##__VA_ARGS__)
+
+#define ata_link_printk(level, link, fmt, ...) \
+do { \
+ if (sata_pmp_attached((link)->ap) || \
+ (link)->ap->slave_link) \
+ pr_ ## level ("ata%u.%02u: " fmt, \
+ (link)->ap->print_id, \
+ (link)->pmp, \
+ ##__VA_ARGS__); \
+ else \
+ pr_ ## level ("ata%u: " fmt, \
+ (link)->ap->print_id, \
+ ##__VA_ARGS__); \
+} while (0)
#define ata_link_err(link, fmt, ...) \
- ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_link_printk(err, link, fmt, ##__VA_ARGS__)
#define ata_link_warn(link, fmt, ...) \
- ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_link_printk(warn, link, fmt, ##__VA_ARGS__)
#define ata_link_notice(link, fmt, ...) \
- ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_link_printk(notice, link, fmt, ##__VA_ARGS__)
#define ata_link_info(link, fmt, ...) \
- ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_link_printk(info, link, fmt, ##__VA_ARGS__)
#define ata_link_dbg(link, fmt, ...) \
- ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_link_printk(debug, link, fmt, ##__VA_ARGS__)
+
+#define ata_dev_printk(level, dev, fmt, ...) \
+ pr_ ## level("ata%u.%02u: " fmt, \
+ (dev)->link->ap->print_id, \
+ (dev)->link->pmp + (dev)->devno, \
+ ##__VA_ARGS__)
#define ata_dev_err(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_dev_printk(err, dev, fmt, ##__VA_ARGS__)
#define ata_dev_warn(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_dev_printk(warn, dev, fmt, ##__VA_ARGS__)
#define ata_dev_notice(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_dev_printk(notice, dev, fmt, ##__VA_ARGS__)
#define ata_dev_info(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_dev_printk(info, dev, fmt, ##__VA_ARGS__)
#define ata_dev_dbg(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_dev_printk(debug, dev, fmt, ##__VA_ARGS__)
void ata_print_version(const struct device *dev, const char *version);
@@ -1635,6 +1696,8 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev,
*/
static inline int ata_ncq_enabled(struct ata_device *dev)
{
+ if (!IS_ENABLED(CONFIG_SATA_HOST))
+ return 0;
return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
}
@@ -1804,6 +1867,16 @@ static inline int ata_dma_enabled(struct ata_device *adev)
}
/**************************************************************************
+ * PATA timings - drivers/ata/libata-pata-timings.c
+ */
+extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
+extern int ata_timing_compute(struct ata_device *, unsigned short,
+ struct ata_timing *, int, int);
+extern void ata_timing_merge(const struct ata_timing *,
+ const struct ata_timing *, struct ata_timing *,
+ unsigned int);
+
+/**************************************************************************
* PMP - drivers/ata/libata-pmp.c
*/
#ifdef CONFIG_SATA_PMP
@@ -1972,11 +2045,8 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
{
u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
-#ifdef ATA_DEBUG
if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
- ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
- status);
-#endif
+ ata_port_dbg(ap, "abnormal Status 0x%X\n", status);
return status;
}
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 9df091bd30ba..c74acfa1a3fe 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -25,8 +25,6 @@ struct badrange {
};
enum {
- /* when a dimm supports both PMEM and BLK access a label is required */
- NDD_ALIASING = 0,
/* unarmed memory devices may not persist writes */
NDD_UNARMED = 1,
/* locked memory devices should not be accessed */
@@ -35,8 +33,8 @@ enum {
NDD_SECURITY_OVERWRITE = 3,
/* tracking whether or not there is a pending device reference */
NDD_WORK_PENDING = 4,
- /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */
- NDD_NOBLK = 5,
+ /* dimm supports namespace labels */
+ NDD_LABELING = 6,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -61,6 +59,9 @@ enum {
/* Platform provides asynchronous flush mechanism */
ND_REGION_ASYNC = 3,
+ /* Region was created by CXL subsystem */
+ ND_REGION_CXL = 4,
+
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
@@ -74,8 +75,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct device_node;
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
- unsigned long bus_dsm_mask;
unsigned long cmd_mask;
+ unsigned long dimm_family_mask;
+ unsigned long bus_family_mask;
struct module *module;
char *provider_name;
struct device_node *of_node;
@@ -83,6 +85,7 @@ struct nvdimm_bus_descriptor {
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *data);
+ const struct nvdimm_bus_fw_ops *fw_ops;
};
struct nd_cmd_desc {
@@ -122,6 +125,7 @@ struct nd_region_desc {
int numa_node;
int target_node;
unsigned long flags;
+ int memregion;
struct device_node *of_node;
int (*flush)(struct nd_region *nd_region, struct bio *bio);
};
@@ -136,22 +140,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
}
struct nvdimm_bus;
-struct module;
-struct device;
-struct nd_blk_region;
-struct nd_blk_region_desc {
- int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
- int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
- void *iobuf, u64 len, int rw);
- struct nd_region_desc ndr_desc;
-};
-
-static inline struct nd_blk_region_desc *to_blk_region_desc(
- struct nd_region_desc *ndr_desc)
-{
- return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
-
-}
/*
* Note that separate bits for locked + unlocked are defined so that
@@ -197,6 +185,49 @@ struct nvdimm_security_ops {
int (*query_overwrite)(struct nvdimm *nvdimm);
};
+enum nvdimm_fwa_state {
+ NVDIMM_FWA_INVALID,
+ NVDIMM_FWA_IDLE,
+ NVDIMM_FWA_ARMED,
+ NVDIMM_FWA_BUSY,
+ NVDIMM_FWA_ARM_OVERFLOW,
+};
+
+enum nvdimm_fwa_trigger {
+ NVDIMM_FWA_ARM,
+ NVDIMM_FWA_DISARM,
+};
+
+enum nvdimm_fwa_capability {
+ NVDIMM_FWA_CAP_INVALID,
+ NVDIMM_FWA_CAP_NONE,
+ NVDIMM_FWA_CAP_QUIESCE,
+ NVDIMM_FWA_CAP_LIVE,
+};
+
+enum nvdimm_fwa_result {
+ NVDIMM_FWA_RESULT_INVALID,
+ NVDIMM_FWA_RESULT_NONE,
+ NVDIMM_FWA_RESULT_SUCCESS,
+ NVDIMM_FWA_RESULT_NOTSTAGED,
+ NVDIMM_FWA_RESULT_NEEDRESET,
+ NVDIMM_FWA_RESULT_FAIL,
+};
+
+struct nvdimm_bus_fw_ops {
+ enum nvdimm_fwa_state (*activate_state)
+ (struct nvdimm_bus_descriptor *nd_desc);
+ enum nvdimm_fwa_capability (*capability)
+ (struct nvdimm_bus_descriptor *nd_desc);
+ int (*activate)(struct nvdimm_bus_descriptor *nd_desc);
+};
+
+struct nvdimm_fw_ops {
+ enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm);
+ enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm);
+ int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
+};
+
void badrange_init(struct badrange *badrange);
int badrange_add(struct badrange *badrange, u64 addr, u64 length);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
@@ -211,7 +242,6 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm);
struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev);
struct device *nd_region_dev(struct nd_region *nd_region);
-struct nd_blk_region *to_nd_blk_region(struct device *dev);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
const char *nvdimm_name(struct nvdimm *nvdimm);
@@ -222,15 +252,18 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq, const char *dimm_id,
- const struct nvdimm_security_ops *sec_ops);
+ const struct nvdimm_security_ops *sec_ops,
+ const struct nvdimm_fw_ops *fw_ops);
static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq)
{
return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
- cmd_mask, num_flush, flush_wpq, NULL, NULL);
+ cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
}
+void nvdimm_delete(struct nvdimm *nvdimm);
+void nvdimm_region_delete(struct nd_region *nd_region);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
@@ -247,10 +280,6 @@ struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc);
void *nd_region_provider_data(struct nd_region *nd_region);
-void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
-void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
-struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
-unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
deleted file mode 100644
index ee8ec2e68055..000000000000
--- a/include/linux/lightnvm.h
+++ /dev/null
@@ -1,700 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef NVM_H
-#define NVM_H
-
-#include <linux/blkdev.h>
-#include <linux/types.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
- NVM_IO_OK = 0,
- NVM_IO_REQUEUE = 1,
- NVM_IO_DONE = 2,
- NVM_IO_ERR = 3,
-
- NVM_IOTYPE_NONE = 0,
- NVM_IOTYPE_GC = 1,
-};
-
-/* common format */
-#define NVM_GEN_CH_BITS (8)
-#define NVM_GEN_LUN_BITS (8)
-#define NVM_GEN_BLK_BITS (16)
-#define NVM_GEN_RESERVED (32)
-
-/* 1.2 format */
-#define NVM_12_PG_BITS (16)
-#define NVM_12_PL_BITS (4)
-#define NVM_12_SEC_BITS (4)
-#define NVM_12_RESERVED (8)
-
-/* 2.0 format */
-#define NVM_20_SEC_BITS (24)
-#define NVM_20_RESERVED (8)
-
-enum {
- NVM_OCSSD_SPEC_12 = 12,
- NVM_OCSSD_SPEC_20 = 20,
-};
-
-struct ppa_addr {
- /* Generic structure for all addresses */
- union {
- /* generic device format */
- struct {
- u64 ch : NVM_GEN_CH_BITS;
- u64 lun : NVM_GEN_LUN_BITS;
- u64 blk : NVM_GEN_BLK_BITS;
- u64 reserved : NVM_GEN_RESERVED;
- } a;
-
- /* 1.2 device format */
- struct {
- u64 ch : NVM_GEN_CH_BITS;
- u64 lun : NVM_GEN_LUN_BITS;
- u64 blk : NVM_GEN_BLK_BITS;
- u64 pg : NVM_12_PG_BITS;
- u64 pl : NVM_12_PL_BITS;
- u64 sec : NVM_12_SEC_BITS;
- u64 reserved : NVM_12_RESERVED;
- } g;
-
- /* 2.0 device format */
- struct {
- u64 grp : NVM_GEN_CH_BITS;
- u64 pu : NVM_GEN_LUN_BITS;
- u64 chk : NVM_GEN_BLK_BITS;
- u64 sec : NVM_20_SEC_BITS;
- u64 reserved : NVM_20_RESERVED;
- } m;
-
- struct {
- u64 line : 63;
- u64 is_cached : 1;
- } c;
-
- u64 ppa;
- };
-};
-
-struct nvm_rq;
-struct nvm_id;
-struct nvm_dev;
-struct nvm_tgt_dev;
-struct nvm_chk_meta;
-
-typedef int (nvm_id_fn)(struct nvm_dev *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
- struct nvm_chk_meta *);
-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
-typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
- dma_addr_t *);
-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
-
-struct nvm_dev_ops {
- nvm_id_fn *identity;
- nvm_op_bb_tbl_fn *get_bb_tbl;
- nvm_op_set_bb_fn *set_bb_tbl;
-
- nvm_get_chk_meta_fn *get_chk_meta;
-
- nvm_submit_io_fn *submit_io;
-
- nvm_create_dma_pool_fn *create_dma_pool;
- nvm_destroy_dma_pool_fn *destroy_dma_pool;
- nvm_dev_dma_alloc_fn *dev_dma_alloc;
- nvm_dev_dma_free_fn *dev_dma_free;
-};
-
-#ifdef CONFIG_NVM
-
-#include <linux/blkdev.h>
-#include <linux/file.h>
-#include <linux/dmapool.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
- /* HW Responsibilities */
- NVM_RSP_L2P = 1 << 0,
- NVM_RSP_ECC = 1 << 1,
-
- /* Physical Adressing Mode */
- NVM_ADDRMODE_LINEAR = 0,
- NVM_ADDRMODE_CHANNEL = 1,
-
- /* Plane programming mode for LUN */
- NVM_PLANE_SINGLE = 1,
- NVM_PLANE_DOUBLE = 2,
- NVM_PLANE_QUAD = 4,
-
- /* Status codes */
- NVM_RSP_SUCCESS = 0x0,
- NVM_RSP_NOT_CHANGEABLE = 0x1,
- NVM_RSP_ERR_FAILWRITE = 0x40ff,
- NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
- NVM_RSP_ERR_FAILECC = 0x4281,
- NVM_RSP_ERR_FAILCRC = 0x4004,
- NVM_RSP_WARN_HIGHECC = 0x4700,
-
- /* Device opcodes */
- NVM_OP_PWRITE = 0x91,
- NVM_OP_PREAD = 0x92,
- NVM_OP_ERASE = 0x90,
-
- /* PPA Command Flags */
- NVM_IO_SNGL_ACCESS = 0x0,
- NVM_IO_DUAL_ACCESS = 0x1,
- NVM_IO_QUAD_ACCESS = 0x2,
-
- /* NAND Access Modes */
- NVM_IO_SUSPEND = 0x80,
- NVM_IO_SLC_MODE = 0x100,
- NVM_IO_SCRAMBLE_ENABLE = 0x200,
-
- /* Block Types */
- NVM_BLK_T_FREE = 0x0,
- NVM_BLK_T_BAD = 0x1,
- NVM_BLK_T_GRWN_BAD = 0x2,
- NVM_BLK_T_DEV = 0x4,
- NVM_BLK_T_HOST = 0x8,
-
- /* Memory capabilities */
- NVM_ID_CAP_SLC = 0x1,
- NVM_ID_CAP_CMD_SUSPEND = 0x2,
- NVM_ID_CAP_SCRAMBLE = 0x4,
- NVM_ID_CAP_ENCRYPT = 0x8,
-
- /* Memory types */
- NVM_ID_FMTYPE_SLC = 0,
- NVM_ID_FMTYPE_MLC = 1,
-
- /* Device capabilities */
- NVM_ID_DCAP_BBLKMGMT = 0x1,
- NVM_UD_DCAP_ECC = 0x2,
-};
-
-struct nvm_id_lp_mlc {
- u16 num_pairs;
- u8 pairs[886];
-};
-
-struct nvm_id_lp_tbl {
- __u8 id[8];
- struct nvm_id_lp_mlc mlc;
-};
-
-struct nvm_addrf_12 {
- u8 ch_len;
- u8 lun_len;
- u8 blk_len;
- u8 pg_len;
- u8 pln_len;
- u8 sec_len;
-
- u8 ch_offset;
- u8 lun_offset;
- u8 blk_offset;
- u8 pg_offset;
- u8 pln_offset;
- u8 sec_offset;
-
- u64 ch_mask;
- u64 lun_mask;
- u64 blk_mask;
- u64 pg_mask;
- u64 pln_mask;
- u64 sec_mask;
-};
-
-struct nvm_addrf {
- u8 ch_len;
- u8 lun_len;
- u8 chk_len;
- u8 sec_len;
- u8 rsv_len[2];
-
- u8 ch_offset;
- u8 lun_offset;
- u8 chk_offset;
- u8 sec_offset;
- u8 rsv_off[2];
-
- u64 ch_mask;
- u64 lun_mask;
- u64 chk_mask;
- u64 sec_mask;
- u64 rsv_mask[2];
-};
-
-enum {
- /* Chunk states */
- NVM_CHK_ST_FREE = 1 << 0,
- NVM_CHK_ST_CLOSED = 1 << 1,
- NVM_CHK_ST_OPEN = 1 << 2,
- NVM_CHK_ST_OFFLINE = 1 << 3,
-
- /* Chunk types */
- NVM_CHK_TP_W_SEQ = 1 << 0,
- NVM_CHK_TP_W_RAN = 1 << 1,
- NVM_CHK_TP_SZ_SPEC = 1 << 4,
-};
-
-/*
- * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
- * buffer can be used when converting from little endian to cpu addressing.
- */
-struct nvm_chk_meta {
- u8 state;
- u8 type;
- u8 wi;
- u8 rsvd[5];
- u64 slba;
- u64 cnlb;
- u64 wp;
-};
-
-struct nvm_target {
- struct list_head list;
- struct nvm_tgt_dev *dev;
- struct nvm_tgt_type *type;
- struct gendisk *disk;
-};
-
-#define ADDR_EMPTY (~0ULL)
-
-#define NVM_TARGET_DEFAULT_OP (101)
-#define NVM_TARGET_MIN_OP (3)
-#define NVM_TARGET_MAX_OP (80)
-
-#define NVM_VERSION_MAJOR 1
-#define NVM_VERSION_MINOR 0
-#define NVM_VERSION_PATCH 0
-
-#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
-
-struct nvm_rq;
-typedef void (nvm_end_io_fn)(struct nvm_rq *);
-
-struct nvm_rq {
- struct nvm_tgt_dev *dev;
-
- struct bio *bio;
-
- union {
- struct ppa_addr ppa_addr;
- dma_addr_t dma_ppa_list;
- };
-
- struct ppa_addr *ppa_list;
-
- void *meta_list;
- dma_addr_t dma_meta_list;
-
- nvm_end_io_fn *end_io;
-
- uint8_t opcode;
- uint16_t nr_ppas;
- uint16_t flags;
-
- u64 ppa_status; /* ppa media status */
- int error;
-
- int is_seq; /* Sequential hint flag. 1.2 only */
-
- void *private;
-};
-
-static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
-{
- return pdu - sizeof(struct nvm_rq);
-}
-
-static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
-{
- return rqdata + 1;
-}
-
-static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
-{
- return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-}
-
-enum {
- NVM_BLK_ST_FREE = 0x1, /* Free block */
- NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
- NVM_BLK_ST_BAD = 0x8, /* Bad block */
-};
-
-/* Instance geometry */
-struct nvm_geo {
- /* device reported version */
- u8 major_ver_id;
- u8 minor_ver_id;
-
- /* kernel short version */
- u8 version;
-
- /* instance specific geometry */
- int num_ch;
- int num_lun; /* per channel */
-
- /* calculated values */
- int all_luns; /* across channels */
- int all_chunks; /* across channels */
-
- int op; /* over-provision in instance */
-
- sector_t total_secs; /* across channels */
-
- /* chunk geometry */
- u32 num_chk; /* chunks per lun */
- u32 clba; /* sectors per chunk */
- u16 csecs; /* sector size */
- u16 sos; /* out-of-band area size */
- bool ext; /* metadata in extended data buffer */
- u32 mdts; /* Max data transfer size*/
-
- /* device write constrains */
- u32 ws_min; /* minimum write size */
- u32 ws_opt; /* optimal write size */
- u32 mw_cunits; /* distance required for successful read */
- u32 maxoc; /* maximum open chunks */
- u32 maxocpu; /* maximum open chunks per parallel unit */
-
- /* device capabilities */
- u32 mccap;
-
- /* device timings */
- u32 trdt; /* Avg. Tread (ns) */
- u32 trdm; /* Max Tread (ns) */
- u32 tprt; /* Avg. Tprog (ns) */
- u32 tprm; /* Max Tprog (ns) */
- u32 tbet; /* Avg. Terase (ns) */
- u32 tbem; /* Max Terase (ns) */
-
- /* generic address format */
- struct nvm_addrf addrf;
-
- /* 1.2 compatibility */
- u8 vmnt;
- u32 cap;
- u32 dom;
-
- u8 mtype;
- u8 fmtype;
-
- u16 cpar;
- u32 mpos;
-
- u8 num_pln;
- u8 pln_mode;
- u16 num_pg;
- u16 fpg_sz;
-};
-
-/* sub-device structure */
-struct nvm_tgt_dev {
- /* Device information */
- struct nvm_geo geo;
-
- /* Base ppas for target LUNs */
- struct ppa_addr *luns;
-
- struct request_queue *q;
-
- struct nvm_dev *parent;
- void *map;
-};
-
-struct nvm_dev {
- struct nvm_dev_ops *ops;
-
- struct list_head devices;
-
- /* Device information */
- struct nvm_geo geo;
-
- unsigned long *lun_map;
- void *dma_pool;
-
- /* Backend device */
- struct request_queue *q;
- char name[DISK_NAME_LEN];
- void *private_data;
-
- struct kref ref;
- void *rmap;
-
- struct mutex mlock;
- spinlock_t lock;
-
- /* target management */
- struct list_head area_list;
- struct list_head targets;
-};
-
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr l;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
- l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
- l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
- l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
- l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
- l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
- l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &geo->addrf;
-
- l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
- l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
- l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
- l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
- }
-
- return l;
-}
-
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr l;
-
- l.ppa = 0;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
- l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
- l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
- l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
- l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
- l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
- l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &geo->addrf;
-
- l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
- l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
- l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
- l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
- }
-
- return l;
-}
-
-static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
- struct ppa_addr p)
-{
- struct nvm_geo *geo = &dev->geo;
- u64 caddr;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
-
- caddr = (u64)p.g.pg << ppaf->pg_offset;
- caddr |= (u64)p.g.pl << ppaf->pln_offset;
- caddr |= (u64)p.g.sec << ppaf->sec_offset;
- } else {
- caddr = p.m.sec;
- }
-
- return caddr;
-}
-
-static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
- void *addrf, u32 ppa32)
-{
- struct ppa_addr ppa64;
-
- ppa64.ppa = 0;
-
- if (ppa32 == -1) {
- ppa64.ppa = ADDR_EMPTY;
- } else if (ppa32 & (1U << 31)) {
- ppa64.c.line = ppa32 & ((~0U) >> 1);
- ppa64.c.is_cached = 1;
- } else {
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = addrf;
-
- ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
- ppaf->ch_offset;
- ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
- ppaf->lun_offset;
- ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
- ppaf->blk_offset;
- ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
- ppaf->pg_offset;
- ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
- ppaf->pln_offset;
- ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
- ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = addrf;
-
- ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
- lbaf->ch_offset;
- ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
- lbaf->lun_offset;
- ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
- lbaf->chk_offset;
- ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
- lbaf->sec_offset;
- }
- }
-
- return ppa64;
-}
-
-static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
- void *addrf, struct ppa_addr ppa64)
-{
- u32 ppa32 = 0;
-
- if (ppa64.ppa == ADDR_EMPTY) {
- ppa32 = ~0U;
- } else if (ppa64.c.is_cached) {
- ppa32 |= ppa64.c.line;
- ppa32 |= 1U << 31;
- } else {
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = addrf;
-
- ppa32 |= ppa64.g.ch << ppaf->ch_offset;
- ppa32 |= ppa64.g.lun << ppaf->lun_offset;
- ppa32 |= ppa64.g.blk << ppaf->blk_offset;
- ppa32 |= ppa64.g.pg << ppaf->pg_offset;
- ppa32 |= ppa64.g.pl << ppaf->pln_offset;
- ppa32 |= ppa64.g.sec << ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = addrf;
-
- ppa32 |= ppa64.m.grp << lbaf->ch_offset;
- ppa32 |= ppa64.m.pu << lbaf->lun_offset;
- ppa32 |= ppa64.m.chk << lbaf->chk_offset;
- ppa32 |= ppa64.m.sec << lbaf->sec_offset;
- }
- }
-
- return ppa32;
-}
-
-static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
- struct ppa_addr *ppa)
-{
- struct nvm_geo *geo = &dev->geo;
- int last = 0;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- int sec = ppa->g.sec;
-
- sec++;
- if (sec == geo->ws_min) {
- int pg = ppa->g.pg;
-
- sec = 0;
- pg++;
- if (pg == geo->num_pg) {
- int pl = ppa->g.pl;
-
- pg = 0;
- pl++;
- if (pl == geo->num_pln)
- last = 1;
-
- ppa->g.pl = pl;
- }
- ppa->g.pg = pg;
- }
- ppa->g.sec = sec;
- } else {
- ppa->m.sec++;
- if (ppa->m.sec == geo->clba)
- last = 1;
- }
-
- return last;
-}
-
-typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
-typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
- int flags);
-typedef void (nvm_tgt_exit_fn)(void *, bool);
-typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
-typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
-
-enum {
- NVM_TGT_F_DEV_L2P = 0,
- NVM_TGT_F_HOST_L2P = 1 << 0,
-};
-
-struct nvm_tgt_type {
- const char *name;
- unsigned int version[3];
- int flags;
-
- /* target entry points */
- nvm_tgt_make_rq_fn *make_rq;
- nvm_tgt_capacity_fn *capacity;
-
- /* module-specific init/teardown */
- nvm_tgt_init_fn *init;
- nvm_tgt_exit_fn *exit;
-
- /* sysfs */
- nvm_tgt_sysfs_init_fn *sysfs_init;
- nvm_tgt_sysfs_exit_fn *sysfs_exit;
-
- /* For internal use */
- struct list_head list;
- struct module *owner;
-};
-
-extern int nvm_register_tgt_type(struct nvm_tgt_type *);
-extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
-
-extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
-extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
-
-extern struct nvm_dev *nvm_alloc_dev(int);
-extern int nvm_register(struct nvm_dev *);
-extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
- int, struct nvm_chk_meta *);
-extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
- int, int);
-extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern void nvm_end_io(struct nvm_rq *);
-
-#else /* CONFIG_NVM */
-struct nvm_dev_ops;
-
-static inline struct nvm_dev *nvm_alloc_dev(int node)
-{
- return ERR_PTR(-EINVAL);
-}
-static inline int nvm_register(struct nvm_dev *dev)
-{
- return -EINVAL;
-}
-static inline void nvm_unregister(struct nvm_dev *dev) {}
-#endif /* CONFIG_NVM */
-#endif /* LIGHTNVM.H */
diff --git a/include/linux/limits.h b/include/linux/limits.h
index 76afcd24ff8c..f6bcc9369010 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -4,20 +4,10 @@
#include <uapi/linux/limits.h>
#include <linux/types.h>
+#include <vdso/limits.h>
-#define USHRT_MAX ((unsigned short)~0U)
-#define SHRT_MAX ((short)(USHRT_MAX >> 1))
-#define SHRT_MIN ((short)(-SHRT_MAX - 1))
-#define INT_MAX ((int)(~0U >> 1))
-#define INT_MIN (-INT_MAX - 1)
-#define UINT_MAX (~0U)
-#define LONG_MAX ((long)(~0UL >> 1))
-#define LONG_MIN (-LONG_MAX - 1)
-#define ULONG_MAX (~0UL)
-#define LLONG_MAX ((long long)(~0ULL >> 1))
-#define LLONG_MIN (-LLONG_MAX - 1)
-#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
+#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1))
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
#define U8_MAX ((u8)~0U)
@@ -27,6 +17,7 @@
#define S16_MAX ((s16)(U16_MAX >> 1))
#define S16_MIN ((s16)(-S16_MAX - 1))
#define U32_MAX ((u32)~0U)
+#define U32_MIN ((u32)0)
#define S32_MAX ((s32)(U32_MAX >> 1))
#define S32_MIN ((s32)(-S32_MAX - 1))
#define U64_MAX ((u64)~0ULL)
diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h
new file mode 100644
index 000000000000..2e4f4c3539c0
--- /dev/null
+++ b/include/linux/linear_range.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 ROHM Semiconductors */
+
+#ifndef LINEAR_RANGE_H
+#define LINEAR_RANGE_H
+
+#include <linux/types.h>
+
+/**
+ * struct linear_range - table of selector - value pairs
+ *
+ * Define a lookup-table for range of values. Intended to help when looking
+ * for a register value matching certaing physical measure (like voltage).
+ * Usable when increment of one in register always results a constant increment
+ * of the physical measure (like voltage).
+ *
+ * @min: Lowest value in range
+ * @min_sel: Lowest selector for range
+ * @max_sel: Highest selector for range
+ * @step: Value step size
+ */
+struct linear_range {
+ unsigned int min;
+ unsigned int min_sel;
+ unsigned int max_sel;
+ unsigned int step;
+};
+
+#define LINEAR_RANGE(_min, _min_sel, _max_sel, _step) \
+ { \
+ .min = _min, \
+ .min_sel = _min_sel, \
+ .max_sel = _max_sel, \
+ .step = _step, \
+ }
+
+#define LINEAR_RANGE_IDX(_idx, _min, _min_sel, _max_sel, _step) \
+ [_idx] = LINEAR_RANGE(_min, _min_sel, _max_sel, _step)
+
+unsigned int linear_range_values_in_range(const struct linear_range *r);
+unsigned int linear_range_values_in_range_array(const struct linear_range *r,
+ int ranges);
+unsigned int linear_range_get_max_value(const struct linear_range *r);
+
+int linear_range_get_value(const struct linear_range *r, unsigned int selector,
+ unsigned int *val);
+int linear_range_get_value_array(const struct linear_range *r, int ranges,
+ unsigned int selector, unsigned int *val);
+int linear_range_get_selector_low(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found);
+int linear_range_get_selector_high(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found);
+void linear_range_get_selector_within(const struct linear_range *r,
+ unsigned int val, unsigned int *selector);
+int linear_range_get_selector_low_array(const struct linear_range *r,
+ int ranges, unsigned int val,
+ unsigned int *selector, bool *found);
+
+#endif
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 9280209d1f62..1feab6136b5b 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -36,8 +36,8 @@
__stringify(name))
#endif
-#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
-#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_data __section(".data..page_aligned") __aligned(PAGE_SIZE)
+#define __page_aligned_bss __section(".bss..page_aligned") __aligned(PAGE_SIZE)
/*
* For assembly routines.
@@ -105,7 +105,7 @@
/* === DEPRECATED annotations === */
-#ifndef CONFIG_X86
+#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef GLOBAL
/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */
#define GLOBAL(name) \
@@ -118,10 +118,10 @@
#define ENTRY(name) \
SYM_FUNC_START(name)
#endif
-#endif /* CONFIG_X86 */
+#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
#endif /* LINKER_SCRIPT */
-#ifndef CONFIG_X86
+#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef WEAK
/* deprecated, use SYM_FUNC_START_WEAK* */
#define WEAK(name) \
@@ -143,7 +143,7 @@
#define ENDPROC(name) \
SYM_FUNC_END(name)
#endif
-#endif /* CONFIG_X86 */
+#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
/* === generic annotations === */
@@ -165,7 +165,15 @@
#ifndef SYM_END
#define SYM_END(name, sym_type) \
.type name sym_type ASM_NL \
- .size name, .-name
+ .set .L__sym_size_##name, .-name ASM_NL \
+ .size name, .L__sym_size_##name
+#endif
+
+/* SYM_ALIAS -- use only if you have to */
+#ifndef SYM_ALIAS
+#define SYM_ALIAS(alias, name, linkage) \
+ linkage(alias) ASM_NL \
+ .set alias, name ASM_NL
#endif
/* === code annotations === */
@@ -178,6 +186,11 @@
* Objtool generates debug info for both FUNC & CODE, but needs special
* annotations for each CODE's start (to describe the actual stack frame).
*
+ * Objtool requires that all code must be contained in an ELF symbol. Symbol
+ * names that have a .L prefix do not emit symbol table entries. .L
+ * prefixed symbols can be used within a code region, but should be avoided for
+ * denoting a range of code via ``SYM_*_START/END`` annotations.
+ *
* ALIAS -- does not generate debug info -- the aliased function will
*/
@@ -195,30 +208,8 @@
SYM_ENTRY(name, linkage, SYM_A_NONE)
#endif
-/*
- * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one
- * function
- */
-#ifndef SYM_FUNC_START_LOCAL_ALIAS
-#define SYM_FUNC_START_LOCAL_ALIAS(name) \
- SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
-#endif
-
-/*
- * SYM_FUNC_START_ALIAS -- use where there are two global names for one
- * function
- */
-#ifndef SYM_FUNC_START_ALIAS
-#define SYM_FUNC_START_ALIAS(name) \
- SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
-#endif
-
/* SYM_FUNC_START -- use for global functions */
#ifndef SYM_FUNC_START
-/*
- * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two
- * later.
- */
#define SYM_FUNC_START(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
@@ -231,7 +222,6 @@
/* SYM_FUNC_START_LOCAL -- use for local functions */
#ifndef SYM_FUNC_START_LOCAL
-/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_START_LOCAL(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
#endif
@@ -254,22 +244,39 @@
SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
#endif
-/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */
-#ifndef SYM_FUNC_END_ALIAS
-#define SYM_FUNC_END_ALIAS(name) \
- SYM_END(name, SYM_T_FUNC)
-#endif
-
/*
* SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
* SYM_FUNC_START_WEAK, ...
*/
#ifndef SYM_FUNC_END
-/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_END(name) \
SYM_END(name, SYM_T_FUNC)
#endif
+/*
+ * SYM_FUNC_ALIAS -- define a global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS
+#define SYM_FUNC_ALIAS(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_GLOBAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_LOCAL -- define a local alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_LOCAL
+#define SYM_FUNC_ALIAS_LOCAL(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_LOCAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_WEAK -- define a weak global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_WEAK
+#define SYM_FUNC_ALIAS_WEAK(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_WEAK)
+#endif
+
/* SYM_CODE_START -- use for non-C (special) functions */
#ifndef SYM_CODE_START
#define SYM_CODE_START(name) \
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index fe740031339d..15e0e0209da4 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -66,12 +66,7 @@ static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
linkmode_clear_bit(nr, addr);
}
-static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
-{
- __change_bit(nr, addr);
-}
-
-static inline int linkmode_test_bit(int nr, volatile unsigned long *addr)
+static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
{
return test_bit(nr, addr);
}
@@ -82,10 +77,22 @@ static inline int linkmode_equal(const unsigned long *src1,
return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static inline int linkmode_intersects(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
static inline int linkmode_subset(const unsigned long *src1,
const unsigned long *src2)
{
return bitmap_subset(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+void linkmode_resolve_pause(const unsigned long *local_adv,
+ const unsigned long *partner_adv,
+ bool *tx_pause, bool *rx_pause);
+
+void linkmode_set_pause(unsigned long *advertisement, bool tx, bool rx);
+
#endif /* __LINKMODE_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index 884216db3246..61762054b4be 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -2,14 +2,16 @@
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
+#include <linux/container_of.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/poison.h>
#include <linux/const.h>
-#include <linux/kernel.h>
+
+#include <asm/barrier.h>
/*
- * Simple doubly linked list implementation.
+ * Circular doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
@@ -33,7 +35,7 @@
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
- list->prev = list;
+ WRITE_ONCE(list->prev, list);
}
#ifdef CONFIG_DEBUG_LIST
@@ -256,8 +258,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_first(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_first(const struct list_head *list, const struct list_head *head)
{
return list->prev == head;
}
@@ -267,13 +268,22 @@ static inline int list_is_first(const struct list_head *list,
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_last(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_last(const struct list_head *list, const struct list_head *head)
{
return list->next == head;
}
/**
+ * list_is_head - tests whether @list is the list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_head(const struct list_head *list, const struct list_head *head)
+{
+ return list == head;
+}
+
+/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
@@ -283,6 +293,24 @@ static inline int list_empty(const struct list_head *head)
}
/**
+ * list_del_init_careful - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ *
+ * This is the same as list_del_init(), except designed to be used
+ * together with list_empty_careful() in a way to guarantee ordering
+ * of other memory operations.
+ *
+ * Any memory operations done before a list_del_init_careful() are
+ * guaranteed to be visible after a list_empty_careful() test.
+ */
+static inline void list_del_init_careful(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ WRITE_ONCE(entry->prev, entry);
+ smp_store_release(&entry->next, entry);
+}
+
+/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
*
@@ -297,8 +325,8 @@ static inline int list_empty(const struct list_head *head)
*/
static inline int list_empty_careful(const struct list_head *head)
{
- struct list_head *next = head->next;
- return (next == head) && (next == head->prev);
+ struct list_head *next = smp_load_acquire(&head->next);
+ return list_is_head(next, head) && (next == READ_ONCE(head->prev));
}
/**
@@ -373,10 +401,9 @@ static inline void list_cut_position(struct list_head *list,
{
if (list_empty(head))
return;
- if (list_is_singular(head) &&
- (head->next != entry && head != entry))
+ if (list_is_singular(head) && !list_is_head(entry, head) && (entry != head->next))
return;
- if (entry == head)
+ if (list_is_head(entry, head))
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
@@ -537,6 +564,19 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
+ * list_next_entry_circular - get the next element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the last element (return the first element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_next_entry_circular(pos, head, member) \
+ (list_is_last(&(pos)->member, head) ? \
+ list_first_entry(head, typeof(*(pos)), member) : list_next_entry(pos, member))
+
+/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -545,12 +585,35 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.prev, typeof(*(pos)), member)
/**
+ * list_prev_entry_circular - get the prev element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the first element (return the last element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_prev_entry_circular(pos, head, member) \
+ (list_is_first(&(pos)->member, head) ? \
+ list_last_entry(head, typeof(*(pos)), member) : list_prev_entry(pos, member))
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
+ for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
+
+/**
+ * list_for_each_rcu - Iterate over a list in an RCU-safe fashion
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference((head)->next); \
+ !list_is_head(pos, (head)); \
+ pos = rcu_dereference(pos->next))
/**
* list_for_each_continue - continue iteration over a list
@@ -560,7 +623,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Continue to iterate over a list, continuing after the current position.
*/
#define list_for_each_continue(pos, head) \
- for (pos = pos->next; pos != (head); pos = pos->next)
+ for (pos = pos->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
@@ -568,7 +631,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; pos != (head); pos = pos->prev)
+ for (pos = (head)->prev; !list_is_head(pos, (head)); pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
@@ -577,8 +640,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
+ for (pos = (head)->next, n = pos->next; \
+ !list_is_head(pos, (head)); \
+ pos = n, n = pos->next)
/**
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
@@ -588,10 +652,19 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
- pos != (head); \
+ !list_is_head(pos, (head)); \
pos = n, n = pos->prev)
/**
+ * list_entry_is_head - test if the entry points to the head of the list
+ * @pos: the type * to cursor
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_entry_is_head(pos, head, member) \
+ (&pos->member == (head))
+
+/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
@@ -599,7 +672,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -610,7 +683,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -635,7 +708,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -649,7 +722,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_prev_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -661,7 +734,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate over list of given type, continuing from current position.
*/
#define list_for_each_entry_from(pos, head, member) \
- for (; &pos->member != (head); \
+ for (; !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -674,7 +747,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate backwards over list of given type, continuing from current position.
*/
#define list_for_each_entry_from_reverse(pos, head, member) \
- for (; &pos->member != (head); \
+ for (; !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -687,7 +760,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member), \
n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -703,7 +776,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_next_entry(pos, member), \
n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -718,7 +791,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -734,7 +807,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member), \
n = list_prev_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_prev_entry(n, member))
/**
@@ -874,7 +947,7 @@ static inline void hlist_add_before(struct hlist_node *n,
}
/**
- * hlist_add_behing - add a new entry after the one specified
+ * hlist_add_behind - add a new entry after the one specified
* @n: new entry to be added
* @prev: hlist node to add it after, which must be non-NULL
*/
@@ -989,7 +1062,7 @@ static inline void hlist_move_list(struct hlist_head *old,
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
- * @n: another &struct hlist_node to use as temporary storage
+ * @n: a &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index d5ceb2839a2d..b35968ee9fb5 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/nodemask.h>
#include <linux/shrinker.h>
+#include <linux/xarray.h>
struct mem_cgroup;
@@ -33,8 +34,8 @@ struct list_lru_one {
struct list_lru_memcg {
struct rcu_head rcu;
- /* array of per cgroup lists, indexed by memcg_cache_id */
- struct list_lru_one *lru[0];
+ /* array of per cgroup per node lists, indexed by node id */
+ struct list_lru_one node[];
};
struct list_lru_node {
@@ -42,11 +43,7 @@ struct list_lru_node {
spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru;
-#ifdef CONFIG_MEMCG_KMEM
- /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
- struct list_lru_memcg __rcu *memcg_lrus;
-#endif
- long nr_items;
+ long nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
@@ -55,6 +52,7 @@ struct list_lru {
struct list_head list;
int shrinker_id;
bool memcg_aware;
+ struct xarray xa;
#endif
};
@@ -69,8 +67,9 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
#define list_lru_init_memcg(lru, shrinker) \
__list_lru_init((lru), true, NULL, shrinker)
-int memcg_update_all_list_lrus(int num_memcgs);
-void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
+int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
+ gfp_t gfp);
+void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
/**
* list_lru_add: add an element to the lru list's tail
@@ -146,7 +145,7 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
@@ -172,7 +171,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h
index 20f178c24e9d..453105f74e05 100644
--- a/include/linux/list_sort.h
+++ b/include/linux/list_sort.h
@@ -6,8 +6,9 @@
struct list_head;
+typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *,
+ const struct list_head *, const struct list_head *);
+
__attribute__((nonnull(2,3)))
-void list_sort(void *priv, struct list_head *head,
- int (*cmp)(void *priv, struct list_head *a,
- struct list_head *b));
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp);
#endif
diff --git a/include/linux/litex.h b/include/linux/litex.h
new file mode 100644
index 000000000000..f2edb86d5f44
--- /dev/null
+++ b/include/linux/litex.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common LiteX header providing
+ * helper functions for accessing CSRs.
+ *
+ * Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
+ */
+
+#ifndef _LINUX_LITEX_H
+#define _LINUX_LITEX_H
+
+#include <linux/io.h>
+
+static inline void _write_litex_subregister(u32 val, void __iomem *addr)
+{
+ writel((u32 __force)cpu_to_le32(val), addr);
+}
+
+static inline u32 _read_litex_subregister(void __iomem *addr)
+{
+ return le32_to_cpu((__le32 __force)readl(addr));
+}
+
+/*
+ * LiteX SoC Generator, depending on the configuration, can split a single
+ * logical CSR (Control&Status Register) into a series of consecutive physical
+ * registers.
+ *
+ * For example, in the configuration with 8-bit CSR Bus, a 32-bit aligned,
+ * 32-bit wide logical CSR will be laid out as four 32-bit physical
+ * subregisters, each one containing one byte of meaningful data.
+ *
+ * For Linux support, upstream LiteX enforces a 32-bit wide CSR bus, which
+ * means that only larger-than-32-bit CSRs will be split across multiple
+ * subregisters (e.g., a 64-bit CSR will be spread across two consecutive
+ * 32-bit subregisters).
+ *
+ * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
+ */
+
+static inline void litex_write8(void __iomem *reg, u8 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write16(void __iomem *reg, u16 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write32(void __iomem *reg, u32 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write64(void __iomem *reg, u64 val)
+{
+ _write_litex_subregister(val >> 32, reg);
+ _write_litex_subregister(val, reg + 4);
+}
+
+static inline u8 litex_read8(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u16 litex_read16(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u32 litex_read32(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u64 litex_read64(void __iomem *reg)
+{
+ return ((u64)_read_litex_subregister(reg) << 32) |
+ _read_litex_subregister(reg + 4);
+}
+
+#endif /* _LINUX_LITEX_H */
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index e894e74905f3..293e29960c6e 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -16,8 +16,6 @@
#if IS_ENABLED(CONFIG_LIVEPATCH)
-#include <asm/livepatch.h>
-
/* task patch states */
#define KLP_UNDEFINED -1
#define KLP_UNPATCHED 0
@@ -195,9 +193,6 @@ struct klp_patch {
int klp_enable_patch(struct klp_patch *);
-void arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj);
-
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
@@ -234,6 +229,11 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
struct klp_state *klp_get_prev_state(unsigned long id);
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname);
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
@@ -242,6 +242,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {}
+static inline
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname)
+{
+ return 0;
+}
+
#endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 2e9c7215882b..85bda2d02d65 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -49,7 +49,9 @@
*/
#include <linux/atomic.h>
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
struct llist_head {
struct llist_node *first;
@@ -197,6 +199,16 @@ static inline struct llist_node *llist_next(struct llist_node *node)
extern bool llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
struct llist_head *head);
+
+static inline bool __llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head)
+{
+ new_last->next = head->first;
+ head->first = new_first;
+ return new_last->next == NULL;
+}
+
/**
* llist_add - add a new entry
* @new: new entry to be added
@@ -209,6 +221,11 @@ static inline bool llist_add(struct llist_node *new, struct llist_head *head)
return llist_add_batch(new, new, head);
}
+static inline bool __llist_add(struct llist_node *new, struct llist_head *head)
+{
+ return __llist_add_batch(new, new, head);
+}
+
/**
* llist_del_all - delete all entries from lock-less list
* @head: the head of lock-less list to delete all entries
@@ -222,6 +239,14 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
return xchg(&head->first, NULL);
}
+static inline struct llist_node *__llist_del_all(struct llist_head *head)
+{
+ struct llist_node *first = head->first;
+
+ head->first = NULL;
+ return first;
+}
+
extern struct llist_node *llist_del_first(struct llist_head *head);
struct llist_node *llist_reverse_order(struct llist_node *head);
diff --git a/include/linux/llist_api.h b/include/linux/llist_api.h
new file mode 100644
index 000000000000..625bec0393a1
--- /dev/null
+++ b/include/linux/llist_api.h
@@ -0,0 +1 @@
+#include <linux/llist.h>
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
new file mode 100644
index 000000000000..e55010fa7329
--- /dev/null
+++ b/include/linux/local_lock.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LOCAL_LOCK_H
+#define _LINUX_LOCAL_LOCK_H
+
+#include <linux/local_lock_internal.h>
+
+/**
+ * local_lock_init - Runtime initialize a lock instance
+ */
+#define local_lock_init(lock) __local_lock_init(lock)
+
+/**
+ * local_lock - Acquire a per CPU local lock
+ * @lock: The lock variable
+ */
+#define local_lock(lock) __local_lock(lock)
+
+/**
+ * local_lock_irq - Acquire a per CPU local lock and disable interrupts
+ * @lock: The lock variable
+ */
+#define local_lock_irq(lock) __local_lock_irq(lock)
+
+/**
+ * local_lock_irqsave - Acquire a per CPU local lock, save and disable
+ * interrupts
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ */
+#define local_lock_irqsave(lock, flags) \
+ __local_lock_irqsave(lock, flags)
+
+/**
+ * local_unlock - Release a per CPU local lock
+ * @lock: The lock variable
+ */
+#define local_unlock(lock) __local_unlock(lock)
+
+/**
+ * local_unlock_irq - Release a per CPU local lock and enable interrupts
+ * @lock: The lock variable
+ */
+#define local_unlock_irq(lock) __local_unlock_irq(lock)
+
+/**
+ * local_unlock_irqrestore - Release a per CPU local lock and restore
+ * interrupt flags
+ * @lock: The lock variable
+ * @flags: Interrupt flags to restore
+ */
+#define local_unlock_irqrestore(lock, flags) \
+ __local_unlock_irqrestore(lock, flags)
+
+#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
new file mode 100644
index 000000000000..975e33b793a7
--- /dev/null
+++ b/include/linux/local_lock_internal.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LOCAL_LOCK_H
+# error "Do not include directly, include linux/local_lock.h"
+#endif
+
+#include <linux/percpu-defs.h>
+#include <linux/lockdep.h>
+
+#ifndef CONFIG_PREEMPT_RT
+
+typedef struct {
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+#endif
+} local_lock_t;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCAL_LOCK_DEBUG_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ .lock_type = LD_LOCK_PERCPU, \
+ }, \
+ .owner = NULL,
+
+static inline void local_lock_acquire(local_lock_t *l)
+{
+ lock_map_acquire(&l->dep_map);
+ DEBUG_LOCKS_WARN_ON(l->owner);
+ l->owner = current;
+}
+
+static inline void local_lock_release(local_lock_t *l)
+{
+ DEBUG_LOCKS_WARN_ON(l->owner != current);
+ l->owner = NULL;
+ lock_map_release(&l->dep_map);
+}
+
+static inline void local_lock_debug_init(local_lock_t *l)
+{
+ l->owner = NULL;
+}
+#else /* CONFIG_DEBUG_LOCK_ALLOC */
+# define LOCAL_LOCK_DEBUG_INIT(lockname)
+static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+
+#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+
+#define __local_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_PERCPU); \
+ local_lock_debug_init(lock); \
+} while (0)
+
+#define __local_lock(lock) \
+ do { \
+ preempt_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+#define __local_lock_irq(lock) \
+ do { \
+ local_irq_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ local_irq_save(flags); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+#define __local_unlock(lock) \
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+ preempt_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) \
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+ local_irq_enable(); \
+ } while (0)
+
+#define __local_unlock_irqrestore(lock, flags) \
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+ local_irq_restore(flags); \
+ } while (0)
+
+#else /* !CONFIG_PREEMPT_RT */
+
+/*
+ * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
+ * critical section while staying preemptible.
+ */
+typedef spinlock_t local_lock_t;
+
+#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+
+#define __local_lock_init(l) \
+ do { \
+ local_spin_lock_init((l)); \
+ } while (0)
+
+#define __local_lock(__lock) \
+ do { \
+ migrate_disable(); \
+ spin_lock(this_cpu_ptr((__lock))); \
+ } while (0)
+
+#define __local_lock_irq(lock) __local_lock(lock)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_lock(lock); \
+ } while (0)
+
+#define __local_unlock(__lock) \
+ do { \
+ spin_unlock(this_cpu_ptr((__lock))); \
+ migrate_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) __local_unlock(lock)
+
+#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
+#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 0520c0cd73f4..3bc9f7410e21 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -27,7 +27,8 @@ struct rpc_task;
struct nlmsvc_binding {
__be32 (*fopen)(struct svc_rqst *,
struct nfs_fh *,
- struct file **);
+ struct file **,
+ int mode);
void (*fclose)(struct file *);
};
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 666f5f310a04..70ce419e2709 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -10,6 +10,8 @@
#ifndef LINUX_LOCKD_LOCKD_H
#define LINUX_LOCKD_LOCKD_H
+/* XXX: a lot of this should really be under fs/lockd. */
+
#include <linux/in.h>
#include <linux/in6.h>
#include <net/ipv6.h>
@@ -154,7 +156,8 @@ struct nlm_rqst {
struct nlm_file {
struct hlist_node f_list; /* linked list */
struct nfs_fh f_handle; /* NFS file handle */
- struct file * f_file; /* VFS file pointer */
+ struct file * f_file[2]; /* VFS file pointers,
+ indexed by O_ flags */
struct nlm_share * f_shares; /* DOS shares */
struct list_head f_blocks; /* blocked locks */
unsigned int f_locks; /* guesstimate # of locks */
@@ -267,6 +270,7 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
/*
* Server-side lock handling
*/
+int lock_to_openmode(struct file_lock *);
__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *, int,
struct nlm_cookie *, int);
@@ -286,8 +290,9 @@ void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t);
* File handling for the server personality
*/
__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
- struct nfs_fh *);
+ struct nlm_lock *);
void nlm_release_file(struct nlm_file *);
+void nlmsvc_put_lockowner(struct nlm_lockowner *);
void nlmsvc_release_lockowner(struct nlm_lock *);
void nlmsvc_mark_resources(struct net *);
void nlmsvc_free_host_resources(struct nlm_host *);
@@ -299,9 +304,15 @@ void nlmsvc_invalidate_all(void);
int nlmsvc_unlock_all_by_sb(struct super_block *sb);
int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+static inline struct file *nlmsvc_file_file(struct nlm_file *file)
+{
+ return file->f_file[O_RDONLY] ?
+ file->f_file[O_RDONLY] : file->f_file[O_WRONLY];
+}
+
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
- return locks_inode(file->f_file);
+ return locks_inode(nlmsvc_file_file(file));
}
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 7ab9f264313f..67e4a2c5500b 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -41,6 +41,8 @@ struct nlm_lock {
struct nfs_fh fh;
struct xdr_netobj oh;
u32 svid;
+ u64 lock_start;
+ u64 lock_len;
struct file_lock fl;
};
@@ -96,24 +98,19 @@ struct nlm_reboot {
*/
#define NLMSVC_XDRSIZE sizeof(struct nlm_args)
-int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_testres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_notify(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
+bool nlmsvc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+
+bool nlmsvc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
#endif /* LOCKD_XDR_H */
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index e709fe5924f2..9a6b55da8fd6 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -22,27 +22,21 @@
#define nlm4_fbig cpu_to_be32(NLM_FBIG)
#define nlm4_failed cpu_to_be32(NLM_FAILED)
+bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
-int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_testres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_notify(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
extern const struct rpc_version nlm_version4;
#endif /* LOCKD_XDR4_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 664f52c6dd4c..1f1099dac3f0 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -10,16 +10,11 @@
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H
-struct task_struct;
-struct lockdep_map;
-
-/* for sysctl */
-extern int prove_locking;
-extern int lock_stat;
-
-#define MAX_LOCKDEP_SUBCLASSES 8UL
+#include <linux/lockdep_types.h>
+#include <linux/smp.h>
+#include <asm/percpu.h>
-#include <linux/types.h>
+struct task_struct;
#ifdef CONFIG_LOCKDEP
@@ -28,142 +23,6 @@ extern int lock_stat;
#include <linux/debug_locks.h>
#include <linux/stacktrace.h>
-/*
- * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
- * the total number of states... :-(
- */
-#define XXX_LOCK_USAGE_STATES (1+2*4)
-
-/*
- * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
- * cached in the instance of lockdep_map
- *
- * Currently main class (subclass == 0) and signle depth subclass
- * are cached in lockdep_map. This optimization is mainly targeting
- * on rq->lock. double_rq_lock() acquires this highly competitive with
- * single depth.
- */
-#define NR_LOCKDEP_CACHING_CLASSES 2
-
-/*
- * A lockdep key is associated with each lock object. For static locks we use
- * the lock address itself as the key. Dynamically allocated lock objects can
- * have a statically or dynamically allocated key. Dynamically allocated lock
- * keys must be registered before being used and must be unregistered before
- * the key memory is freed.
- */
-struct lockdep_subclass_key {
- char __one_byte;
-} __attribute__ ((__packed__));
-
-/* hash_entry is used to keep track of dynamically allocated keys. */
-struct lock_class_key {
- union {
- struct hlist_node hash_entry;
- struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
- };
-};
-
-extern struct lock_class_key __lockdep_no_validate__;
-
-struct lock_trace;
-
-#define LOCKSTAT_POINTS 4
-
-/*
- * The lock-class itself. The order of the structure members matters.
- * reinit_class() zeroes the key member and all subsequent members.
- */
-struct lock_class {
- /*
- * class-hash:
- */
- struct hlist_node hash_entry;
-
- /*
- * Entry in all_lock_classes when in use. Entry in free_lock_classes
- * when not in use. Instances that are being freed are on one of the
- * zapped_classes lists.
- */
- struct list_head lock_entry;
-
- /*
- * These fields represent a directed graph of lock dependencies,
- * to every node we attach a list of "forward" and a list of
- * "backward" graph nodes.
- */
- struct list_head locks_after, locks_before;
-
- const struct lockdep_subclass_key *key;
- unsigned int subclass;
- unsigned int dep_gen_id;
-
- /*
- * IRQ/softirq usage tracking bits:
- */
- unsigned long usage_mask;
- const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
-
- /*
- * Generation counter, when doing certain classes of graph walking,
- * to ensure that we check one node only once:
- */
- int name_version;
- const char *name;
-
-#ifdef CONFIG_LOCK_STAT
- unsigned long contention_point[LOCKSTAT_POINTS];
- unsigned long contending_point[LOCKSTAT_POINTS];
-#endif
-} __no_randomize_layout;
-
-#ifdef CONFIG_LOCK_STAT
-struct lock_time {
- s64 min;
- s64 max;
- s64 total;
- unsigned long nr;
-};
-
-enum bounce_type {
- bounce_acquired_write,
- bounce_acquired_read,
- bounce_contended_write,
- bounce_contended_read,
- nr_bounce_types,
-
- bounce_acquired = bounce_acquired_write,
- bounce_contended = bounce_contended_write,
-};
-
-struct lock_class_stats {
- unsigned long contention_point[LOCKSTAT_POINTS];
- unsigned long contending_point[LOCKSTAT_POINTS];
- struct lock_time read_waittime;
- struct lock_time write_waittime;
- struct lock_time read_holdtime;
- struct lock_time write_holdtime;
- unsigned long bounces[nr_bounce_types];
-};
-
-struct lock_class_stats lock_stats(struct lock_class *class);
-void clear_lock_stats(struct lock_class *class);
-#endif
-
-/*
- * Map the lock object (the lock instance) to the lock-class object.
- * This is embedded into specific lock instances:
- */
-struct lockdep_map {
- struct lock_class_key *key;
- struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
- const char *name;
-#ifdef CONFIG_LOCK_STAT
- int cpu;
- unsigned long ip;
-#endif
-};
-
static inline void lockdep_copy_map(struct lockdep_map *to,
struct lockdep_map *from)
{
@@ -191,7 +50,11 @@ struct lock_list {
struct lock_class *class;
struct lock_class *links_to;
const struct lock_trace *trace;
- int distance;
+ u16 distance;
+ /* bitmap of different dependencies from head to this */
+ u8 dep;
+ /* used by BFS to record whether "prev -> this" only has -(*R)-> */
+ u8 only_xr;
/*
* The parent field is used to implement breadth-first search, and the
@@ -287,8 +150,27 @@ extern void lockdep_set_selftest_task(struct task_struct *task);
extern void lockdep_init_task(struct task_struct *task);
-extern void lockdep_off(void);
-extern void lockdep_on(void);
+/*
+ * Split the recursion counter in two to readily detect 'off' vs recursion.
+ */
+#define LOCKDEP_RECURSION_BITS 16
+#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
+#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
+
+/*
+ * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
+ * to header dependencies.
+ */
+
+#define lockdep_off() \
+do { \
+ current->lockdep_recursion += LOCKDEP_OFF; \
+} while (0)
+
+#define lockdep_on() \
+do { \
+ current->lockdep_recursion -= LOCKDEP_OFF; \
+} while (0)
extern void lockdep_register_key(struct lock_class_key *key);
extern void lockdep_unregister_key(struct lock_class_key *key);
@@ -299,8 +181,28 @@ extern void lockdep_unregister_key(struct lock_class_key *key);
* to lockdep:
*/
-extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass);
+extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
+
+static inline void
+lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer)
+{
+ lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
+}
+
+static inline void
+lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner)
+{
+ lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
+}
+
+static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass)
+{
+ lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
+}
/*
* Reinitialize a lock key - for cases where there is special locking or
@@ -308,18 +210,33 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
* of dependencies wrong: they are either too broad (they need a class-split)
* or they are too narrow (they suffer from a false class-split):
*/
-#define lockdep_set_class(lock, key) \
- lockdep_init_map(&(lock)->dep_map, #key, key, 0)
-#define lockdep_set_class_and_name(lock, key, name) \
- lockdep_init_map(&(lock)->dep_map, name, key, 0)
-#define lockdep_set_class_and_subclass(lock, key, sub) \
- lockdep_init_map(&(lock)->dep_map, #key, key, sub)
-#define lockdep_set_subclass(lock, sub) \
- lockdep_init_map(&(lock)->dep_map, #lock, \
- (lock)->dep_map.key, sub)
+#define lockdep_set_class(lock, key) \
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+#define lockdep_set_class_and_name(lock, key, name) \
+ lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+#define lockdep_set_subclass(lock, sub) \
+ lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
+
/*
* Compare locking classes
*/
@@ -351,6 +268,11 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, unsigned long ip);
+/* lock_is_held_type() returns */
+#define LOCK_STATE_UNKNOWN -1
+#define LOCK_STATE_NOT_HELD 0
+#define LOCK_STATE_HELD 1
+
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
@@ -368,6 +290,9 @@ extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
+#define lock_set_novalidate_class(l, n, i) \
+ lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
+
static inline void lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
@@ -376,8 +301,6 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
-struct pin_cookie { unsigned int val; };
-
#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
@@ -386,21 +309,29 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
-#define lockdep_assert_held(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert(cond) \
+ do { WARN_ON(debug_locks && !(cond)); } while (0)
+
+#define lockdep_assert_once(cond) \
+ do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
+
+#define lockdep_assert_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+
+#define lockdep_assert_not_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
-#define lockdep_assert_held_write(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
- } while (0)
+#define lockdep_assert_held_write(l) \
+ lockdep_assert(lockdep_is_held_type(l, 0))
-#define lockdep_assert_held_read(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
- } while (0)
+#define lockdep_assert_held_read(l) \
+ lockdep_assert(lockdep_is_held_type(l, 1))
-#define lockdep_assert_held_once(l) do { \
- WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert_held_once(l) \
+ lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+
+#define lockdep_assert_none_held_once() \
+ lockdep_assert_once(!current->lockdep_depth)
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
@@ -429,9 +360,16 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
-# define lock_set_class(l, n, k, s, i) do { } while (0)
+# define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0)
+# define lock_set_novalidate_class(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
+# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
+ do { (void)(name); (void)(key); } while (0)
+# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
+ do { (void)(name); (void)(key); } while (0)
+# define lockdep_init_map_wait(lock, name, key, sub, inner) \
+ do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
@@ -452,10 +390,6 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
-/*
- * The class key takes no space if lockdep is disabled:
- */
-struct lock_class_key { };
static inline void lockdep_register_key(struct lock_class_key *key)
{
@@ -465,24 +399,28 @@ static inline void lockdep_unregister_key(struct lock_class_key *key)
{
}
-/*
- * The lockdep_map takes no space if lockdep is disabled:
- */
-struct lockdep_map { };
-
#define lockdep_depth(tsk) (0)
+/*
+ * Dummy forward declarations, allow users to write less ifdef-y code
+ * and depend on dead code elimination.
+ */
+extern int lock_is_held(const void *);
+extern int lockdep_is_held(const void *);
#define lockdep_is_held_type(l, r) (1)
+#define lockdep_assert(c) do { } while (0)
+#define lockdep_assert_once(c) do { } while (0)
+
#define lockdep_assert_held(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
+#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
+#define lockdep_assert_none_held_once() do { } while (0)
#define lockdep_recursing(tsk) (0)
-struct pin_cookie { };
-
#define NIL_COOKIE (struct pin_cookie){ }
#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
@@ -547,23 +485,6 @@ do { \
#endif /* CONFIG_LOCK_STAT */
-#ifdef CONFIG_LOCKDEP
-
-/*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_*_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- LOCK_CONTENDED((_lock), (try), (lock))
-
-#else /* CONFIG_LOCKDEP */
-
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- lockfl((_lock), (flags))
-
-#endif /* CONFIG_LOCKDEP */
-
#ifdef CONFIG_PROVE_LOCKING
extern void print_irqtrace_events(struct task_struct *curr);
#else
@@ -572,6 +493,20 @@ static inline void print_irqtrace_events(struct task_struct *curr)
}
#endif
+/* Variable used to make lockdep treat read_lock() as recursive in selftests */
+#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
+extern unsigned int force_read_lock_recursive;
+#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+#define force_read_lock_recursive 0
+#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+
+#ifdef CONFIG_LOCKDEP
+extern bool read_lock_is_recursive(void);
+#else /* CONFIG_LOCKDEP */
+/* If !LOCKDEP, the value is meaningless */
+#define read_lock_is_recursive() 0
+#endif
+
/*
* For trivial one-depth nesting of a lock-class, the following
* global define can be used. (Subsystems with multiple levels
@@ -593,7 +528,14 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define spin_release(l, i) lock_release(l, i)
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
-#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define rwlock_acquire_read(l, s, t, i) \
+do { \
+ if (read_lock_is_recursive()) \
+ lock_acquire_shared_recursive(l, s, t, NULL, i); \
+ else \
+ lock_acquire_shared(l, s, t, NULL, i); \
+} while (0)
+
#define rwlock_release(l, i) lock_release(l, i)
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
@@ -615,19 +557,19 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define lock_map_release(l) lock_release(l, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
-# define might_lock(lock) \
+# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-# define might_lock_read(lock) \
+# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-# define might_lock_nested(lock, subclass) \
+# define might_lock_nested(lock, subclass) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
@@ -635,31 +577,80 @@ do { \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-#define lockdep_assert_irqs_enabled() do { \
- WARN_ONCE(debug_locks && !current->lockdep_recursion && \
- !current->hardirqs_enabled, \
- "IRQs not enabled as expected\n"); \
- } while (0)
+DECLARE_PER_CPU(int, hardirqs_enabled);
+DECLARE_PER_CPU(int, hardirq_context);
+DECLARE_PER_CPU(unsigned int, lockdep_recursion);
-#define lockdep_assert_irqs_disabled() do { \
- WARN_ONCE(debug_locks && !current->lockdep_recursion && \
- current->hardirqs_enabled, \
- "IRQs not disabled as expected\n"); \
- } while (0)
+#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
-#define lockdep_assert_in_irq() do { \
- WARN_ONCE(debug_locks && !current->lockdep_recursion && \
- !current->hardirq_context, \
- "Not in hardirq as expected\n"); \
- } while (0)
+#define lockdep_assert_irqs_enabled() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
+} while (0)
+
+#define lockdep_assert_irqs_disabled() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
+} while (0)
+
+#define lockdep_assert_in_irq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
+} while (0)
+
+#define lockdep_assert_preemption_enabled() \
+do { \
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
+ __lockdep_enabled && \
+ (preempt_count() != 0 || \
+ !this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+#define lockdep_assert_preemption_disabled() \
+do { \
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
+ __lockdep_enabled && \
+ (preempt_count() == 0 && \
+ this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+/*
+ * Acceptable for protecting per-CPU resources accessed from BH.
+ * Much like in_softirq() - semantics are ambiguous, use carefully.
+ */
+#define lockdep_assert_in_softirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (!in_softirq() || in_irq() || in_nmi())); \
+} while (0)
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
# define might_lock_nested(lock, subclass) do { } while (0)
+
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
+
+# define lockdep_assert_preemption_enabled() do { } while (0)
+# define lockdep_assert_preemption_disabled() do { } while (0)
+# define lockdep_assert_in_softirq() do { } while (0)
+#endif
+
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+
+# define lockdep_assert_RT_in_threaded_ctx() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ lockdep_hardirq_context() && \
+ !(current->hardirq_threaded || current->irq_config), \
+ "Not in threaded context on PREEMPT_RT as expected\n"); \
+} while (0)
+
+#else
+
+# define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
+
#endif
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/lockdep_api.h b/include/linux/lockdep_api.h
new file mode 100644
index 000000000000..907e66979ab2
--- /dev/null
+++ b/include/linux/lockdep_api.h
@@ -0,0 +1 @@
+#include <linux/lockdep.h>
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
new file mode 100644
index 000000000000..d22430840b53
--- /dev/null
+++ b/include/linux/lockdep_types.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Runtime locking correctness validator
+ *
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * see Documentation/locking/lockdep-design.rst for more details.
+ */
+#ifndef __LINUX_LOCKDEP_TYPES_H
+#define __LINUX_LOCKDEP_TYPES_H
+
+#include <linux/types.h>
+
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+enum lockdep_wait_type {
+ LD_WAIT_INV = 0, /* not checked, catch all */
+
+ LD_WAIT_FREE, /* wait free, rcu etc.. */
+ LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
+
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+ LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */
+#else
+ LD_WAIT_CONFIG = LD_WAIT_SPIN,
+#endif
+ LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
+
+ LD_WAIT_MAX, /* must be last */
+};
+
+enum lockdep_lock_type {
+ LD_LOCK_NORMAL = 0, /* normal, catch all */
+ LD_LOCK_PERCPU, /* percpu */
+ LD_LOCK_MAX,
+};
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
+ * the total number of states... :-(
+ *
+ * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
+ * of those we generates 4 states, Additionally we report on USED and USED_READ.
+ */
+#define XXX_LOCK_USAGE_STATES 2
+#define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2)
+
+/*
+ * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
+ * cached in the instance of lockdep_map
+ *
+ * Currently main class (subclass == 0) and single depth subclass
+ * are cached in lockdep_map. This optimization is mainly targeting
+ * on rq->lock. double_rq_lock() acquires this highly competitive with
+ * single depth.
+ */
+#define NR_LOCKDEP_CACHING_CLASSES 2
+
+/*
+ * A lockdep key is associated with each lock object. For static locks we use
+ * the lock address itself as the key. Dynamically allocated lock objects can
+ * have a statically or dynamically allocated key. Dynamically allocated lock
+ * keys must be registered before being used and must be unregistered before
+ * the key memory is freed.
+ */
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+/* hash_entry is used to keep track of dynamically allocated keys. */
+struct lock_class_key {
+ union {
+ struct hlist_node hash_entry;
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+ };
+};
+
+extern struct lock_class_key __lockdep_no_validate__;
+
+struct lock_trace;
+
+#define LOCKSTAT_POINTS 4
+
+/*
+ * The lock-class itself. The order of the structure members matters.
+ * reinit_class() zeroes the key member and all subsequent members.
+ */
+struct lock_class {
+ /*
+ * class-hash:
+ */
+ struct hlist_node hash_entry;
+
+ /*
+ * Entry in all_lock_classes when in use. Entry in free_lock_classes
+ * when not in use. Instances that are being freed are on one of the
+ * zapped_classes lists.
+ */
+ struct list_head lock_entry;
+
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
+ const struct lockdep_subclass_key *key;
+ unsigned int subclass;
+ unsigned int dep_gen_id;
+
+ /*
+ * IRQ/softirq usage tracking bits:
+ */
+ unsigned long usage_mask;
+ const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
+
+ /*
+ * Generation counter, when doing certain classes of graph walking,
+ * to ensure that we check one node only once:
+ */
+ int name_version;
+ const char *name;
+
+ u8 wait_type_inner;
+ u8 wait_type_outer;
+ u8 lock_type;
+ /* u8 hole; */
+
+#ifdef CONFIG_LOCK_STAT
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
+#endif
+} __no_randomize_layout;
+
+#ifdef CONFIG_LOCK_STAT
+struct lock_time {
+ s64 min;
+ s64 max;
+ s64 total;
+ unsigned long nr;
+};
+
+enum bounce_type {
+ bounce_acquired_write,
+ bounce_acquired_read,
+ bounce_contended_write,
+ bounce_contended_read,
+ nr_bounce_types,
+
+ bounce_acquired = bounce_acquired_write,
+ bounce_contended = bounce_contended_write,
+};
+
+struct lock_class_stats {
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
+ struct lock_time read_waittime;
+ struct lock_time write_waittime;
+ struct lock_time read_holdtime;
+ struct lock_time write_holdtime;
+ unsigned long bounces[nr_bounce_types];
+};
+
+struct lock_class_stats lock_stats(struct lock_class *class);
+void clear_lock_stats(struct lock_class *class);
+#endif
+
+/*
+ * Map the lock object (the lock instance) to the lock-class object.
+ * This is embedded into specific lock instances:
+ */
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
+ const char *name;
+ u8 wait_type_outer; /* can be taken in this context */
+ u8 wait_type_inner; /* presents this context */
+ u8 lock_type;
+ /* u8 hole; */
+#ifdef CONFIG_LOCK_STAT
+ int cpu;
+ unsigned long ip;
+#endif
+};
+
+struct pin_cookie { unsigned int val; };
+
+#else /* !CONFIG_LOCKDEP */
+
+/*
+ * The class key takes no space if lockdep is disabled:
+ */
+struct lock_class_key { };
+
+/*
+ * The lockdep_map takes no space if lockdep is disabled:
+ */
+struct lockdep_map { };
+
+struct pin_cookie { };
+
+#endif /* !LOCKDEP */
+
+#endif /* __LINUX_LOCKDEP_TYPES_H */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 99f17cc8e163..c3a1f78bc884 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -38,7 +38,6 @@ extern void lockref_get(struct lockref *);
extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_put_not_zero(struct lockref *);
-extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
extern void lockref_mark_dead(struct lockref *);
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 83a4a3ca3e8a..9f30d087a128 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -18,7 +18,7 @@
* - the arch is not required to handle n==0 if implementing the fallback
*/
#ifndef CONFIG_ARCH_HAS_ILOG2_U32
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
@@ -26,7 +26,7 @@ int __ilog2_u32(u32 n)
#endif
#ifndef CONFIG_ARCH_HAS_ILOG2_U64
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
@@ -156,7 +156,8 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define ilog2(n) \
( \
__builtin_constant_p(n) ? \
- const_ilog2(n) : \
+ ((n) < 2 ? 0 : \
+ 63 - __builtin_clzll(n)) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
@@ -173,7 +174,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 1 : \
+ ((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
diff --git a/include/linux/logic_iomem.h b/include/linux/logic_iomem.h
new file mode 100644
index 000000000000..3fa65c964379
--- /dev/null
+++ b/include/linux/logic_iomem.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef __LOGIC_IOMEM_H
+#define __LOGIC_IOMEM_H
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+/**
+ * struct logic_iomem_ops - emulated IO memory ops
+ * @read: read an 8, 16, 32 or 64 bit quantity from the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @write: write an 8, 16 32 or 64 bit quantity to the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @set: optional, for memset_io()
+ * @copy_from: optional, for memcpy_fromio()
+ * @copy_to: optional, for memcpy_toio()
+ * @unmap: optional, this region is getting unmapped
+ */
+struct logic_iomem_ops {
+ unsigned long (*read)(void *priv, unsigned int offset, int size);
+ void (*write)(void *priv, unsigned int offset, int size,
+ unsigned long val);
+
+ void (*set)(void *priv, unsigned int offset, u8 value, int size);
+ void (*copy_from)(void *priv, void *buffer, unsigned int offset,
+ int size);
+ void (*copy_to)(void *priv, unsigned int offset, const void *buffer,
+ int size);
+
+ void (*unmap)(void *priv);
+};
+
+/**
+ * struct logic_iomem_region_ops - ops for an IO memory handler
+ * @map: map a range in the registered IO memory region, must
+ * fill *ops with the ops and may fill *priv to be passed
+ * to the ops. The offset is given as the offset into the
+ * registered resource region.
+ * The return value is negative for errors, or >= 0 for
+ * success. On success, the return value is added to the
+ * offset for later ops, to allow for partial mappings.
+ */
+struct logic_iomem_region_ops {
+ long (*map)(unsigned long offset, size_t size,
+ const struct logic_iomem_ops **ops,
+ void **priv);
+};
+
+/**
+ * logic_iomem_add_region - register an IO memory region
+ * @resource: the resource description for this region
+ * @ops: the IO memory mapping ops for this resource
+ */
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops);
+
+#endif /* __LOGIC_IOMEM_H */
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 429d67d815ce..07add7882a5d 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -32,7 +32,7 @@ This header file (and its .c file; kernel-doc of functions see there)
Because of this later property, it is called "lru_cache".
As it actually Tracks Objects in an Active SeT, we could also call it
toast (incidentally that is what may happen to the data on the
- backend storage uppon next resync, if we don't get it right).
+ backend storage upon next resync, if we don't get it right).
What for?
@@ -152,7 +152,7 @@ struct lc_element {
* for paranoia, and for "lc_element_to_index" */
unsigned lc_index;
/* if we want to track a larger set of objects,
- * it needs to become arch independend u64 */
+ * it needs to become an architecture independent u64 */
unsigned lc_number;
/* special label when on free list */
#define LC_FREE (~0U)
@@ -263,7 +263,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
*
* Allows (expects) the set to be "dirty". Note that the reference counts and
* order on the active and lru lists may still change. Used to serialize
- * changing transactions. Returns true if we aquired the lock.
+ * changing transactions. Returns true if we acquired the lock.
*/
static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
{
@@ -275,7 +275,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
* @lc: the lru cache to operate on
*
* Note that the reference counts and order on the active and lru lists may
- * still change. Only works on a "clean" set. Returns true if we aquired the
+ * still change. Only works on a "clean" set. Returns true if we acquired the
* lock, which means there are no pending changes, and any further attempt to
* change the set will not succeed until the next lc_unlock().
*/
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 99d629fd9944..97a8b21eb033 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -26,7 +26,7 @@
struct lsm_network_audit {
int netif;
- struct sock *sk;
+ const struct sock *sk;
u16 family;
__be16 dport;
__be16 sport;
@@ -48,13 +48,13 @@ struct lsm_ioctlop_audit {
};
struct lsm_ibpkey_audit {
- u64 subnet_prefix;
- u16 pkey;
+ u64 subnet_prefix;
+ u16 pkey;
};
struct lsm_ibendport_audit {
- char dev_name[IB_DEVICE_NAME_MAX];
- u8 port;
+ const char *dev_name;
+ u8 port;
};
/* Auxiliary data to use in generating the audit record. */
@@ -75,6 +75,8 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_IBPKEY 13
#define LSM_AUDIT_DATA_IBENDPORT 14
#define LSM_AUDIT_DATA_LOCKDOWN 15
+#define LSM_AUDIT_DATA_NOTIFICATION 16
+#define LSM_AUDIT_DATA_ANONINODE 17
union {
struct path path;
struct dentry *dentry;
@@ -95,6 +97,7 @@ struct common_audit_data {
struct lsm_ibpkey_audit *ibpkey;
struct lsm_ibendport_audit *ibendport;
int reason;
+ const char *anonclass;
} u;
/* this union contains LSM specific data */
union {
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
new file mode 100644
index 000000000000..ec119da1d89b
--- /dev/null
+++ b/include/linux/lsm_hook_defs.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Linux Security Module Hook declarations.
+ *
+ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
+ * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ * Copyright (C) 2001 James Morris <jmorris@intercode.com.au>
+ * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
+ * Copyright (C) 2015 Intel Corporation.
+ * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2016 Mellanox Techonologies
+ * Copyright (C) 2020 Google LLC.
+ */
+
+/*
+ * The macro LSM_HOOK is used to define the data structures required by
+ * the LSM framework using the pattern:
+ *
+ * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...)
+ *
+ * struct security_hook_heads {
+ * #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
+ * #include <linux/lsm_hook_defs.h>
+ * #undef LSM_HOOK
+ * };
+ */
+LSM_HOOK(int, 0, binder_set_context_mgr, const struct cred *mgr)
+LSM_HOOK(int, 0, binder_transaction, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from,
+ const struct cred *to, struct file *file)
+LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child,
+ unsigned int mode)
+LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent)
+LSM_HOOK(int, 0, capget, struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted)
+LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective, const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted)
+LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns,
+ int cap, unsigned int opts)
+LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb)
+LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
+LSM_HOOK(int, 0, syslog, int type)
+LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
+ const struct timezone *tz)
+LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
+ struct fs_context *src_sc)
+LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
+ struct fs_parameter *param)
+LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_delete, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts)
+LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts)
+LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts)
+LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts)
+LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb)
+LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb)
+LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry)
+LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
+LSM_HOOK(int, 0, sb_umount, struct vfsmount *mnt, int flags)
+LSM_HOOK(int, 0, sb_pivotroot, const struct path *old_path,
+ const struct path *new_path)
+LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts,
+ unsigned long kern_flags, unsigned long *set_kern_flags)
+LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb,
+ struct super_block *newsb, unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+LSM_HOOK(int, 0, move_mount, const struct path *from_path,
+ const struct path *to_path)
+LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry,
+ int mode, const struct qstr *name, const char **xattr_name,
+ void **ctx, u32 *ctxlen)
+LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
+ struct qstr *name, const struct cred *old, struct cred *new)
+
+#ifdef CONFIG_SECURITY_PATH
+LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+LSM_HOOK(int, 0, path_truncate, const struct path *path)
+LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry,
+ const char *old_name)
+LSM_HOOK(int, 0, path_link, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry)
+LSM_HOOK(int, 0, path_rename, const struct path *old_dir,
+ struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
+LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode)
+LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid)
+LSM_HOOK(int, 0, path_chroot, const struct path *path)
+#endif /* CONFIG_SECURITY_PATH */
+
+/* Needed for inode based security check */
+LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
+ unsigned int obj_type)
+LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
+LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
+LSM_HOOK(int, 0, inode_init_security, struct inode *inode,
+ struct inode *dir, const struct qstr *qstr, const char **name,
+ void **value, size_t *len)
+LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode,
+ const struct qstr *name, const struct inode *context_inode)
+LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry)
+LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_symlink, struct inode *dir, struct dentry *dentry,
+ const char *old_name)
+LSM_HOOK(int, 0, inode_mkdir, struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, inode_rmdir, struct inode *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_mknod, struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t dev)
+LSM_HOOK(int, 0, inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode,
+ bool rcu)
+LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask)
+LSM_HOOK(int, 0, inode_setattr, struct dentry *dentry, struct iattr *attr)
+LSM_HOOK(int, 0, inode_getattr, const struct path *path)
+LSM_HOOK(int, 0, inode_setxattr, struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry,
+ const char *name, const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_removexattr, struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_killpriv, struct user_namespace *mnt_userns,
+ struct dentry *dentry)
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name, void **buffer, bool alloc)
+LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
+ size_t buffer_size)
+LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
+LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
+LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name)
+LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
+LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
+LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
+ unsigned long arg)
+LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
+LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot)
+LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd)
+LSM_HOOK(int, 0, file_fcntl, struct file *file, unsigned int cmd,
+ unsigned long arg)
+LSM_HOOK(void, LSM_RET_VOID, file_set_fowner, struct file *file)
+LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk,
+ struct fown_struct *fown, int sig)
+LSM_HOOK(int, 0, file_receive, struct file *file)
+LSM_HOOK(int, 0, file_open, struct file *file)
+LSM_HOOK(int, 0, task_alloc, struct task_struct *task,
+ unsigned long clone_flags)
+LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task)
+LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp)
+LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred)
+LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old,
+ gfp_t gfp)
+LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new,
+ const struct cred *old)
+LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid)
+LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid)
+LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode)
+LSM_HOOK(int, 0, kernel_module_request, char *kmod_name)
+LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id, bool contents)
+LSM_HOOK(int, 0, kernel_post_load_data, char *buf, loff_t size,
+ enum kernel_load_data_id id, char *description)
+LSM_HOOK(int, 0, kernel_read_file, struct file *file,
+ enum kernel_read_file_id id, bool contents)
+LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf,
+ loff_t size, enum kernel_read_file_id id)
+LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old,
+ int flags)
+LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old,
+ int flags)
+LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old)
+LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
+LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
+LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
+LSM_HOOK(void, LSM_RET_VOID, current_getsecid_subj, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, task_getsecid_obj,
+ struct task_struct *p, u32 *secid)
+LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice)
+LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio)
+LSM_HOOK(int, 0, task_getioprio, struct task_struct *p)
+LSM_HOOK(int, 0, task_prlimit, const struct cred *cred,
+ const struct cred *tcred, unsigned int flags)
+LSM_HOOK(int, 0, task_setrlimit, struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
+LSM_HOOK(int, 0, task_setscheduler, struct task_struct *p)
+LSM_HOOK(int, 0, task_getscheduler, struct task_struct *p)
+LSM_HOOK(int, 0, task_movememory, struct task_struct *p)
+LSM_HOOK(int, 0, task_kill, struct task_struct *p, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p,
+ struct inode *inode)
+LSM_HOOK(int, 0, userns_create, const struct cred *cred)
+LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag)
+LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp,
+ u32 *secid)
+LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg)
+LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg)
+LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, msg_queue_free_security,
+ struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, msg_queue_associate, struct kern_ipc_perm *perm, int msqflg)
+LSM_HOOK(int, 0, msg_queue_msgctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, msg_queue_msgsnd, struct kern_ipc_perm *perm,
+ struct msg_msg *msg, int msqflg)
+LSM_HOOK(int, 0, msg_queue_msgrcv, struct kern_ipc_perm *perm,
+ struct msg_msg *msg, struct task_struct *target, long type, int mode)
+LSM_HOOK(int, 0, shm_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, shm_free_security, struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, shm_associate, struct kern_ipc_perm *perm, int shmflg)
+LSM_HOOK(int, 0, shm_shmctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, shm_shmat, struct kern_ipc_perm *perm, char __user *shmaddr,
+ int shmflg)
+LSM_HOOK(int, 0, sem_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, sem_free_security, struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, sem_associate, struct kern_ipc_perm *perm, int semflg)
+LSM_HOOK(int, 0, sem_semctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
+ unsigned nsops, int alter)
+LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
+LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
+ struct inode *inode)
+LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
+ char **value)
+LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
+LSM_HOOK(int, 0, ismaclabel, const char *name)
+LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata,
+ u32 *seclen)
+LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
+LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
+LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
+LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
+ u32 *ctxlen)
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+LSM_HOOK(int, 0, post_notification, const struct cred *w_cred,
+ const struct cred *cred, struct watch_notification *n)
+#endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
+LSM_HOOK(int, 0, watch_key, struct key *key)
+#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */
+
+#ifdef CONFIG_SECURITY_NETWORK
+LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other,
+ struct sock *newsk)
+LSM_HOOK(int, 0, unix_may_send, struct socket *sock, struct socket *other)
+LSM_HOOK(int, 0, socket_create, int family, int type, int protocol, int kern)
+LSM_HOOK(int, 0, socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+LSM_HOOK(int, 0, socket_socketpair, struct socket *socka, struct socket *sockb)
+LSM_HOOK(int, 0, socket_bind, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+LSM_HOOK(int, 0, socket_connect, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+LSM_HOOK(int, 0, socket_listen, struct socket *sock, int backlog)
+LSM_HOOK(int, 0, socket_accept, struct socket *sock, struct socket *newsock)
+LSM_HOOK(int, 0, socket_sendmsg, struct socket *sock, struct msghdr *msg,
+ int size)
+LSM_HOOK(int, 0, socket_recvmsg, struct socket *sock, struct msghdr *msg,
+ int size, int flags)
+LSM_HOOK(int, 0, socket_getsockname, struct socket *sock)
+LSM_HOOK(int, 0, socket_getpeername, struct socket *sock)
+LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
+LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
+LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
+LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
+LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
+ char __user *optval, int __user *optlen, unsigned len)
+LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
+LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
+LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk,
+ struct sock *newsk)
+LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent)
+LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk,
+ const struct request_sock *req)
+LSM_HOOK(void, LSM_RET_VOID, inet_conn_established, struct sock *sk,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid)
+LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
+LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
+LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
+ struct flowi_common *flic)
+LSM_HOOK(int, 0, tun_dev_alloc_security, void **security)
+LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security)
+LSM_HOOK(int, 0, tun_dev_create, void)
+LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
+LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
+LSM_HOOK(int, 0, tun_dev_open, void *security)
+LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen)
+LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc,
+ struct sock *sk, struct sock *newsk)
+LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc,
+ struct sk_buff *skb)
+#endif /* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey)
+LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name,
+ u8 port_num)
+LSM_HOOK(int, 0, ib_alloc_security, void **sec)
+LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec)
+#endif /* CONFIG_SECURITY_INFINIBAND */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+LSM_HOOK(int, 0, xfrm_policy_alloc_security, struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp)
+LSM_HOOK(int, 0, xfrm_policy_clone_security, struct xfrm_sec_ctx *old_ctx,
+ struct xfrm_sec_ctx **new_ctx)
+LSM_HOOK(void, LSM_RET_VOID, xfrm_policy_free_security,
+ struct xfrm_sec_ctx *ctx)
+LSM_HOOK(int, 0, xfrm_policy_delete_security, struct xfrm_sec_ctx *ctx)
+LSM_HOOK(int, 0, xfrm_state_alloc, struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
+LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x)
+LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x)
+LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid)
+LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x,
+ struct xfrm_policy *xp, const struct flowi_common *flic)
+LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
+ int ckall)
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+
+/* key management security hooks */
+#ifdef CONFIG_KEYS
+LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
+ unsigned long flags)
+LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
+LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm)
+LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
+ void **lsmrule)
+LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
+LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
+LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
+#endif /* CONFIG_AUDIT */
+
+#ifdef CONFIG_BPF_SYSCALL
+LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
+LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
+LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
+LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map)
+LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map)
+LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux)
+LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux)
+#endif /* CONFIG_BPF_SYSCALL */
+
+LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
+
+#ifdef CONFIG_PERF_EVENTS
+LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type)
+LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event)
+LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event)
+LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
+LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
+#endif /* CONFIG_PERF_EVENTS */
+
+#ifdef CONFIG_IO_URING
+LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
+LSM_HOOK(int, 0, uring_sqpoll, void)
+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
+#endif /* CONFIG_IO_URING */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 20d8cf194fb7..4ec80b96c22e 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -34,40 +34,48 @@
*
* Security hooks for program execution operations.
*
- * @bprm_set_creds:
- * Save security information in the bprm->security field, typically based
- * on information about the bprm->file, for later use by the apply_creds
- * hook. This hook may also optionally check permissions (e.g. for
- * transitions between security domains).
- * This hook may be called multiple times during a single execve, e.g. for
- * interpreters. The hook can tell whether it has already been called by
- * checking to see if @bprm->security is non-NULL. If so, then the hook
- * may decide either to retain the security information saved earlier or
- * to replace it. The hook must set @bprm->secureexec to 1 if a "secure
- * exec" has happened as a result of this hook call. The flag is used to
- * indicate the need for a sanitized execution environment, and is also
- * passed in the ELF auxiliary table on the initial stack to indicate
- * whether libc should enable secure mode.
+ * @bprm_creds_for_exec:
+ * If the setup in prepare_exec_creds did not setup @bprm->cred->security
+ * properly for executing @bprm->file, update the LSM's portion of
+ * @bprm->cred->security to be what commit_creds needs to install for the
+ * new program. This hook may also optionally check permissions
+ * (e.g. for transitions between security domains).
+ * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
+ * request libc enable secure mode.
+ * @bprm contains the linux_binprm structure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @bprm_creds_from_file:
+ * If @file is setpcap, suid, sgid or otherwise marked to change
+ * privilege upon exec, update @bprm->cred to reflect that change.
+ * This is called after finding the binary that will be executed.
+ * without an interpreter. This ensures that the credentials will not
+ * be derived from a script that the binary will need to reopen, which
+ * when reopend may end up being a completely different file. This
+ * hook may also optionally check permissions (e.g. for transitions
+ * between security domains).
+ * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
+ * request libc enable secure mode.
+ * The hook must add to @bprm->per_clear any personality flags that
+ * should be cleared from current->personality.
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_check_security:
* This hook mediates the point when a search for a binary handler will
- * begin. It allows a check the @bprm->security value which is set in the
- * preceding set_creds call. The primary difference from set_creds is
- * that the argv list and envp list are reliably available in @bprm. This
- * hook may be called multiple times during a single execve; and in each
- * pass set_creds is called first.
+ * begin. It allows a check against the @bprm->cred->security value
+ * which was set in the preceding creds_for_exec call. The argv list and
+ * envp list are reliably available in @bprm. This hook may be called
+ * multiple times during a single execve.
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_committing_creds:
* Prepare to install the new security attributes of a process being
* transformed by an execve operation, based on the old credentials
* pointed to by @current->cred and the information set in @bprm->cred by
- * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
- * This hook is a good place to perform state changes on the process such
- * as closing open file descriptors to which access will no longer be
- * granted when the attributes are changed. This is called immediately
- * before commit_creds().
+ * the bprm_creds_for_exec hook. @bprm points to the linux_binprm
+ * structure. This hook is a good place to perform state changes on the
+ * process such as closing open file descriptors to which access will no
+ * longer be granted when the attributes are changed. This is called
+ * immediately before commit_creds().
* @bprm_committed_creds:
* Tidy up after the installation of the new security attributes of a
* process being transformed by an execve operation. The new credentials
@@ -77,7 +85,7 @@
* state. This is called immediately after commit_creds().
*
* Security hooks for mount using fs_context.
- * [See also Documentation/filesystems/mount_api.txt]
+ * [See also Documentation/filesystems/mount_api.rst]
*
* @fs_context_dup:
* Allocate and attach a security structure to sc->security. This pointer
@@ -100,9 +108,16 @@
* allocated.
* @sb contains the super_block structure to be modified.
* Return 0 if operation was successful.
+ * @sb_delete:
+ * Release objects tied to a superblock (e.g. inodes).
+ * @sb contains the super_block structure being released.
* @sb_free_security:
* Deallocate and clear the sb->s_security field.
* @sb contains the super_block structure to be modified.
+ * @sb_free_mnt_opts:
+ * Free memory associated with @mnt_ops.
+ * @sb_eat_lsm_opts:
+ * Eat (scan @orig options) and save them in @mnt_opts.
* @sb_statfs:
* Check permission before obtaining filesystem statistics for the @mnt
* mountpoint.
@@ -130,12 +145,22 @@
* @orig the original mount data copied from userspace.
* @copy copied data which will be passed to the security module.
* Returns 0 if the copy was successful.
+ * @sb_mnt_opts_compat:
+ * Determine if the new mount options in @mnt_opts are allowed given
+ * the existing mounted filesystem at @sb.
+ * @sb superblock being compared
+ * @mnt_opts new mount options
+ * Return 0 if options are compatible.
* @sb_remount:
* Extracts security system specific mount options and verifies no changes
* are being made to those options.
* @sb superblock being remounted
* @data contains the filesystem-specific data.
* Return 0 if permission is granted.
+ * @sb_kern_mount:
+ * Mount this @sb if allowed by permissions.
+ * @sb_show_options:
+ * Show (print on @m) mount options for this @sb.
* @sb_umount:
* Check permission before the @mnt file system is unmounted.
* @mnt contains the mounted file system.
@@ -169,6 +194,9 @@
* @dentry dentry to use in calculating the context.
* @mode mode used to determine resource type.
* @name name of the last path component used to create file
+ * @xattr_name pointer to place the pointer to security xattr name.
+ * Caller does not have to free the resulting pointer. Its
+ * a pointer to static string.
* @ctx pointer to place the pointer to the resulting context in.
* @ctxlen point to place the length of the resulting context.
* @dentry_create_files_as:
@@ -215,6 +243,15 @@
* Returns 0 if @name and @value have been successfully set,
* -EOPNOTSUPP if no security attribute is needed, or
* -ENOMEM on memory allocation failure.
+ * @inode_init_security_anon:
+ * Set up the incore security field for the new anonymous inode
+ * and return whether the inode creation is permitted by the security
+ * module or not.
+ * @inode contains the inode structure
+ * @name name of the anonymous inode class
+ * @context_inode optional related inode
+ * Returns 0 on success, -EACCES if the security module denies the
+ * creation of this inode, or another -errno upon other errors.
* @inode_create:
* Check permission to create a regular file.
* @dir contains inode structure of the parent of the new file.
@@ -321,6 +358,7 @@
* @old_dentry contains the dentry structure of the old link.
* @new_dir contains the path structure for parent of the new link.
* @new_dentry contains the dentry structure of the new link.
+ * @flags may contain rename options such as RENAME_EXCHANGE.
* Return 0 if permission is granted.
* @path_chmod:
* Check for permission to change a mode of the file @path. The new
@@ -426,6 +464,7 @@
* @inode_killpriv:
* The setuid bit is being removed. Remove similar security labels.
* Called with the dentry->d_inode->i_mutex held.
+ * @mnt_userns: user namespace of the mount
* @dentry is the dentry being changed.
* Return 0 on success. If error is returned, then the operation
* causing setuid bit removal is failed.
@@ -451,6 +490,12 @@
* security module does not know about attribute or a negative error code
* to abort the copy up. Note that the caller is responsible for reading
* and writing the xattrs as this hook is merely a filter.
+ * @d_instantiate:
+ * Fill in @inode security information for a @dentry if allowed.
+ * @getprocattr:
+ * Read attribute @name for process @p and store it into @value if allowed.
+ * @setprocattr:
+ * Write (set) attribute @name to @value, size @size if allowed.
*
* Security hooks for kernfs node operations
*
@@ -611,12 +656,23 @@
* @kernel_load_data:
* Load data provided by userspace.
* @id kernel load data identifier
+ * @contents if a subsequent @kernel_post_load_data will be called.
+ * Return 0 if permission is granted.
+ * @kernel_post_load_data:
+ * Load data provided by a non-file source (usually userspace buffer).
+ * @buf pointer to buffer containing the data contents.
+ * @size length of the data contents.
+ * @id kernel load data identifier
+ * @description a text description of what was loaded, @id-specific
* Return 0 if permission is granted.
+ * This must be paired with a prior @kernel_load_data call that had
+ * @contents set to true.
* @kernel_read_file:
* Read a file specified by userspace.
* @file contains the file structure pointing to the file being read
* by the kernel.
* @id kernel read file identifier
+ * @contents if a subsequent @kernel_post_read_file will be called.
* Return 0 if permission is granted.
* @kernel_post_read_file:
* Read a file specified by userspace.
@@ -625,6 +681,8 @@
* @buf pointer to buffer containing the file contents.
* @size length of the file contents.
* @id kernel read file identifier
+ * This must be paired with a prior @kernel_read_file call that had
+ * @contents set to true.
* Return 0 if permission is granted.
* @task_fix_setuid:
* Update the module's state after setting one or more of the user
@@ -635,6 +693,22 @@
* @old is the set of credentials that are being replaces
* @flags contains one of the LSM_SETID_* values.
* Return 0 on success.
+ * @task_fix_setgid:
+ * Update the module's state after setting one or more of the group
+ * identity attributes of the current process. The @flags parameter
+ * indicates which of the set*gid system calls invoked this hook.
+ * @new is the set of credentials that will be installed. Modifications
+ * should be made to this rather than to @current->cred.
+ * @old is the set of credentials that are being replaced.
+ * @flags contains one of the LSM_SETID_* values.
+ * Return 0 on success.
+ * @task_fix_setgroups:
+ * Update the module's state after setting the supplementary group
+ * identity attributes of the current process.
+ * @new is the set of credentials that will be installed. Modifications
+ * should be made to this rather than to @current->cred.
+ * @old is the set of credentials that are being replaced.
+ * Return 0 on success.
* @task_setpgid:
* Check permission before setting the process group identifier of the
* process @p to @pgid.
@@ -651,9 +725,13 @@
* @p.
* @p contains the task_struct for the process.
* Return 0 if permission is granted.
- * @task_getsecid:
- * Retrieve the security identifier of the process @p.
- * @p contains the task_struct for the process and place is into @secid.
+ * @current_getsecid_subj:
+ * Retrieve the subjective security identifier of the current task and
+ * return it in @secid.
+ * In case of failure, @secid will be set to zero.
+ * @task_getsecid_obj:
+ * Retrieve the objective security identifier of the task_struct in @p
+ * and return it in @secid.
* In case of failure, @secid will be set to zero.
*
* @task_setnice:
@@ -728,6 +806,10 @@
* security attributes, e.g. for /proc/pid inodes.
* @p contains the task_struct for the task.
* @inode contains the inode structure for the inode.
+ * @userns_create:
+ * Check permission prior to creating a new user namespace.
+ * @cred points to prepared creds.
+ * Return 0 if successful, otherwise < 0 error code.
*
* Security hooks for Netlink messaging.
*
@@ -782,7 +864,7 @@
* structure. Note that the security field was not added directly to the
* socket structure, but rather, the socket security information is stored
* in the associated inode. Typically, the inode alloc_security hook will
- * allocate and and attach security information to
+ * allocate and attach security information to
* SOCK_INODE(sock)->i_security. This hook may be used to update the
* SOCK_INODE(sock)->i_security field with additional information that
* wasn't available when the inode was allocated.
@@ -953,9 +1035,9 @@
* Security hooks for SCTP
*
* @sctp_assoc_request:
- * Passes the @ep and @chunk->skb of the association INIT packet to
+ * Passes the @asoc and @chunk->skb of the association INIT packet to
* the security module.
- * @ep pointer to sctp endpoint structure.
+ * @asoc pointer to sctp association structure.
* @skb pointer to skbuff of association packet.
* Return 0 on success, error on failure.
* @sctp_bind_connect:
@@ -973,9 +1055,14 @@
* Called whenever a new socket is created by accept(2) (i.e. a TCP
* style socket) or when a socket is 'peeled off' e.g userspace
* calls sctp_peeloff(3).
- * @ep pointer to current sctp endpoint structure.
+ * @asoc pointer to current sctp association structure.
* @sk pointer to current sock structure.
- * @sk pointer to new sock structure.
+ * @newsk pointer to new sock structure.
+ * @sctp_assoc_established:
+ * Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet
+ * to the security module.
+ * @asoc pointer to sctp association structure.
+ * @skb pointer to skbuff of association packet.
*
* Security hooks for Infiniband
*
@@ -1059,7 +1146,7 @@
* @xfrm_state_pol_flow_match:
* @x contains the state to match.
* @xp contains the policy to check for a match.
- * @fl contains the flow to check for a match.
+ * @flic contains the flowi_common struct to check for a match.
* Return 1 if there is a match.
* @xfrm_decode_session:
* @skb points to skb to decode.
@@ -1113,6 +1200,7 @@
* In case of failure, @secid will be set to zero.
*
* Security hooks for individual messages held in System V IPC message queues
+ *
* @msg_msg_alloc_security:
* Allocate and attach a security structure to the msg->security field.
* The security field is initialized to NULL when the structure is first
@@ -1241,22 +1329,22 @@
*
* @binder_set_context_mgr:
* Check whether @mgr is allowed to be the binder context manager.
- * @mgr contains the task_struct for the task being registered.
+ * @mgr contains the struct cred for the current binder process.
* Return 0 if permission is granted.
* @binder_transaction:
* Check whether @from is allowed to invoke a binder transaction call
* to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
+ * @from contains the struct cred for the sending process.
+ * @to contains the struct cred for the receiving process.
* @binder_transfer_binder:
* Check whether @from is allowed to transfer a binder reference to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
+ * @from contains the struct cred for the sending process.
+ * @to contains the struct cred for the receiving process.
* @binder_transfer_file:
* Check whether @from is allowed to transfer @file to @to.
- * @from contains the task_struct for the sending task.
+ * @from contains the struct cred for the sending process.
* @file contains the struct file being transferred.
- * @to contains the task_struct for the receiving task.
+ * @to contains the struct cred for the receiving process.
*
* @ptrace_access_check:
* Check permission before allowing the current process to trace the
@@ -1302,6 +1390,10 @@
* @cap contains the capability <include/linux/capability.h>.
* @opts contains options for the capable check <include/linux/security.h>
* Return 0 if the capability is granted for @tsk.
+ * @quotactl:
+ * Check whether the quotactl syscall is allowed for this @sb.
+ * @quota_on:
+ * Check whether QUOTAON is allowed for this @dentry.
* @syslog:
* Check permission before accessing the kernel message ring or changing
* logging to the console.
@@ -1416,6 +1508,20 @@
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
*
+ * Security hooks for the general notification queue:
+ *
+ * @post_notification:
+ * Check to see if a watch notification can be posted to a particular
+ * queue.
+ * @w_cred: The credentials of the whoever set the watch.
+ * @cred: The event-triggerer's credentials
+ * @n: The notification being posted
+ *
+ * @watch_key:
+ * Check to see if a process is allowed to watch for event notifications
+ * from a key or keyring.
+ * @key: The key to watch.
+ *
* Security hooks for using the eBPF maps and programs functionalities through
* eBPF syscalls.
*
@@ -1449,632 +1555,51 @@
* @bpf_prog_free_security:
* Clean up the security information stored inside bpf prog.
*
- * @locked_down
+ * @locked_down:
* Determine whether a kernel feature that potentially enables arbitrary
* code execution in kernel space should be permitted.
*
* @what: kernel feature being accessed
+ *
+ * Security hooks for perf events
+ *
+ * @perf_event_open:
+ * Check whether the @type of perf_event_open syscall is allowed.
+ * @perf_event_alloc:
+ * Allocate and save perf_event security info.
+ * @perf_event_free:
+ * Release (free) perf_event security info.
+ * @perf_event_read:
+ * Read perf_event security info if allowed.
+ * @perf_event_write:
+ * Write perf_event security info if allowed.
+ *
+ * Security hooks for io_uring
+ *
+ * @uring_override_creds:
+ * Check if the current task, executing an io_uring operation, is allowed
+ * to override it's credentials with @new.
+ *
+ * @new: the new creds to use
+ *
+ * @uring_sqpoll:
+ * Check whether the current task is allowed to spawn a io_uring polling
+ * thread (IORING_SETUP_SQPOLL).
+ *
+ * @uring_cmd:
+ * Check whether the file_operations uring_cmd is allowed to run.
+ *
*/
union security_list_options {
- int (*binder_set_context_mgr)(struct task_struct *mgr);
- int (*binder_transaction)(struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_binder)(struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_file)(struct task_struct *from,
- struct task_struct *to,
- struct file *file);
-
- int (*ptrace_access_check)(struct task_struct *child,
- unsigned int mode);
- int (*ptrace_traceme)(struct task_struct *parent);
- int (*capget)(struct task_struct *target, kernel_cap_t *effective,
- kernel_cap_t *inheritable, kernel_cap_t *permitted);
- int (*capset)(struct cred *new, const struct cred *old,
- const kernel_cap_t *effective,
- const kernel_cap_t *inheritable,
- const kernel_cap_t *permitted);
- int (*capable)(const struct cred *cred,
- struct user_namespace *ns,
- int cap,
- unsigned int opts);
- int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
- int (*quota_on)(struct dentry *dentry);
- int (*syslog)(int type);
- int (*settime)(const struct timespec64 *ts, const struct timezone *tz);
- int (*vm_enough_memory)(struct mm_struct *mm, long pages);
-
- int (*bprm_set_creds)(struct linux_binprm *bprm);
- int (*bprm_check_security)(struct linux_binprm *bprm);
- void (*bprm_committing_creds)(struct linux_binprm *bprm);
- void (*bprm_committed_creds)(struct linux_binprm *bprm);
-
- int (*fs_context_dup)(struct fs_context *fc, struct fs_context *src_sc);
- int (*fs_context_parse_param)(struct fs_context *fc, struct fs_parameter *param);
-
- int (*sb_alloc_security)(struct super_block *sb);
- void (*sb_free_security)(struct super_block *sb);
- void (*sb_free_mnt_opts)(void *mnt_opts);
- int (*sb_eat_lsm_opts)(char *orig, void **mnt_opts);
- int (*sb_remount)(struct super_block *sb, void *mnt_opts);
- int (*sb_kern_mount)(struct super_block *sb);
- int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
- int (*sb_statfs)(struct dentry *dentry);
- int (*sb_mount)(const char *dev_name, const struct path *path,
- const char *type, unsigned long flags, void *data);
- int (*sb_umount)(struct vfsmount *mnt, int flags);
- int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path);
- int (*sb_set_mnt_opts)(struct super_block *sb,
- void *mnt_opts,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_clone_mnt_opts)(const struct super_block *oldsb,
- struct super_block *newsb,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_add_mnt_opt)(const char *option, const char *val, int len,
- void **mnt_opts);
- int (*move_mount)(const struct path *from_path, const struct path *to_path);
- int (*dentry_init_security)(struct dentry *dentry, int mode,
- const struct qstr *name, void **ctx,
- u32 *ctxlen);
- int (*dentry_create_files_as)(struct dentry *dentry, int mode,
- struct qstr *name,
- const struct cred *old,
- struct cred *new);
-
-
-#ifdef CONFIG_SECURITY_PATH
- int (*path_unlink)(const struct path *dir, struct dentry *dentry);
- int (*path_mkdir)(const struct path *dir, struct dentry *dentry,
- umode_t mode);
- int (*path_rmdir)(const struct path *dir, struct dentry *dentry);
- int (*path_mknod)(const struct path *dir, struct dentry *dentry,
- umode_t mode, unsigned int dev);
- int (*path_truncate)(const struct path *path);
- int (*path_symlink)(const struct path *dir, struct dentry *dentry,
- const char *old_name);
- int (*path_link)(struct dentry *old_dentry, const struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry,
- const struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_chmod)(const struct path *path, umode_t mode);
- int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid);
- int (*path_chroot)(const struct path *path);
-#endif
- /* Needed for inode based security check */
- int (*path_notify)(const struct path *path, u64 mask,
- unsigned int obj_type);
- int (*inode_alloc_security)(struct inode *inode);
- void (*inode_free_security)(struct inode *inode);
- int (*inode_init_security)(struct inode *inode, struct inode *dir,
- const struct qstr *qstr,
- const char **name, void **value,
- size_t *len);
- int (*inode_create)(struct inode *dir, struct dentry *dentry,
- umode_t mode);
- int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry);
- int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
- int (*inode_symlink)(struct inode *dir, struct dentry *dentry,
- const char *old_name);
- int (*inode_mkdir)(struct inode *dir, struct dentry *dentry,
- umode_t mode);
- int (*inode_rmdir)(struct inode *dir, struct dentry *dentry);
- int (*inode_mknod)(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t dev);
- int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir,
- struct dentry *new_dentry);
- int (*inode_readlink)(struct dentry *dentry);
- int (*inode_follow_link)(struct dentry *dentry, struct inode *inode,
- bool rcu);
- int (*inode_permission)(struct inode *inode, int mask);
- int (*inode_setattr)(struct dentry *dentry, struct iattr *attr);
- int (*inode_getattr)(const struct path *path);
- int (*inode_setxattr)(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
- void (*inode_post_setxattr)(struct dentry *dentry, const char *name,
- const void *value, size_t size,
- int flags);
- int (*inode_getxattr)(struct dentry *dentry, const char *name);
- int (*inode_listxattr)(struct dentry *dentry);
- int (*inode_removexattr)(struct dentry *dentry, const char *name);
- int (*inode_need_killpriv)(struct dentry *dentry);
- int (*inode_killpriv)(struct dentry *dentry);
- int (*inode_getsecurity)(struct inode *inode, const char *name,
- void **buffer, bool alloc);
- int (*inode_setsecurity)(struct inode *inode, const char *name,
- const void *value, size_t size,
- int flags);
- int (*inode_listsecurity)(struct inode *inode, char *buffer,
- size_t buffer_size);
- void (*inode_getsecid)(struct inode *inode, u32 *secid);
- int (*inode_copy_up)(struct dentry *src, struct cred **new);
- int (*inode_copy_up_xattr)(const char *name);
-
- int (*kernfs_init_security)(struct kernfs_node *kn_dir,
- struct kernfs_node *kn);
-
- int (*file_permission)(struct file *file, int mask);
- int (*file_alloc_security)(struct file *file);
- void (*file_free_security)(struct file *file);
- int (*file_ioctl)(struct file *file, unsigned int cmd,
- unsigned long arg);
- int (*mmap_addr)(unsigned long addr);
- int (*mmap_file)(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags);
- int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot,
- unsigned long prot);
- int (*file_lock)(struct file *file, unsigned int cmd);
- int (*file_fcntl)(struct file *file, unsigned int cmd,
- unsigned long arg);
- void (*file_set_fowner)(struct file *file);
- int (*file_send_sigiotask)(struct task_struct *tsk,
- struct fown_struct *fown, int sig);
- int (*file_receive)(struct file *file);
- int (*file_open)(struct file *file);
-
- int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
- void (*task_free)(struct task_struct *task);
- int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
- void (*cred_free)(struct cred *cred);
- int (*cred_prepare)(struct cred *new, const struct cred *old,
- gfp_t gfp);
- void (*cred_transfer)(struct cred *new, const struct cred *old);
- void (*cred_getsecid)(const struct cred *c, u32 *secid);
- int (*kernel_act_as)(struct cred *new, u32 secid);
- int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
- int (*kernel_module_request)(char *kmod_name);
- int (*kernel_load_data)(enum kernel_load_data_id id);
- int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
- int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
- enum kernel_read_file_id id);
- int (*task_fix_setuid)(struct cred *new, const struct cred *old,
- int flags);
- int (*task_setpgid)(struct task_struct *p, pid_t pgid);
- int (*task_getpgid)(struct task_struct *p);
- int (*task_getsid)(struct task_struct *p);
- void (*task_getsecid)(struct task_struct *p, u32 *secid);
- int (*task_setnice)(struct task_struct *p, int nice);
- int (*task_setioprio)(struct task_struct *p, int ioprio);
- int (*task_getioprio)(struct task_struct *p);
- int (*task_prlimit)(const struct cred *cred, const struct cred *tcred,
- unsigned int flags);
- int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
- struct rlimit *new_rlim);
- int (*task_setscheduler)(struct task_struct *p);
- int (*task_getscheduler)(struct task_struct *p);
- int (*task_movememory)(struct task_struct *p);
- int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info,
- int sig, const struct cred *cred);
- int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5);
- void (*task_to_inode)(struct task_struct *p, struct inode *inode);
-
- int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag);
- void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid);
-
- int (*msg_msg_alloc_security)(struct msg_msg *msg);
- void (*msg_msg_free_security)(struct msg_msg *msg);
-
- int (*msg_queue_alloc_security)(struct kern_ipc_perm *perm);
- void (*msg_queue_free_security)(struct kern_ipc_perm *perm);
- int (*msg_queue_associate)(struct kern_ipc_perm *perm, int msqflg);
- int (*msg_queue_msgctl)(struct kern_ipc_perm *perm, int cmd);
- int (*msg_queue_msgsnd)(struct kern_ipc_perm *perm, struct msg_msg *msg,
- int msqflg);
- int (*msg_queue_msgrcv)(struct kern_ipc_perm *perm, struct msg_msg *msg,
- struct task_struct *target, long type,
- int mode);
-
- int (*shm_alloc_security)(struct kern_ipc_perm *perm);
- void (*shm_free_security)(struct kern_ipc_perm *perm);
- int (*shm_associate)(struct kern_ipc_perm *perm, int shmflg);
- int (*shm_shmctl)(struct kern_ipc_perm *perm, int cmd);
- int (*shm_shmat)(struct kern_ipc_perm *perm, char __user *shmaddr,
- int shmflg);
-
- int (*sem_alloc_security)(struct kern_ipc_perm *perm);
- void (*sem_free_security)(struct kern_ipc_perm *perm);
- int (*sem_associate)(struct kern_ipc_perm *perm, int semflg);
- int (*sem_semctl)(struct kern_ipc_perm *perm, int cmd);
- int (*sem_semop)(struct kern_ipc_perm *perm, struct sembuf *sops,
- unsigned nsops, int alter);
-
- int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
-
- void (*d_instantiate)(struct dentry *dentry, struct inode *inode);
-
- int (*getprocattr)(struct task_struct *p, char *name, char **value);
- int (*setprocattr)(const char *name, void *value, size_t size);
- int (*ismaclabel)(const char *name);
- int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
- int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
- void (*release_secctx)(char *secdata, u32 seclen);
-
- void (*inode_invalidate_secctx)(struct inode *inode);
- int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
- int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
- int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
-
-#ifdef CONFIG_SECURITY_NETWORK
- int (*unix_stream_connect)(struct sock *sock, struct sock *other,
- struct sock *newsk);
- int (*unix_may_send)(struct socket *sock, struct socket *other);
-
- int (*socket_create)(int family, int type, int protocol, int kern);
- int (*socket_post_create)(struct socket *sock, int family, int type,
- int protocol, int kern);
- int (*socket_socketpair)(struct socket *socka, struct socket *sockb);
- int (*socket_bind)(struct socket *sock, struct sockaddr *address,
- int addrlen);
- int (*socket_connect)(struct socket *sock, struct sockaddr *address,
- int addrlen);
- int (*socket_listen)(struct socket *sock, int backlog);
- int (*socket_accept)(struct socket *sock, struct socket *newsock);
- int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg,
- int size);
- int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg,
- int size, int flags);
- int (*socket_getsockname)(struct socket *sock);
- int (*socket_getpeername)(struct socket *sock);
- int (*socket_getsockopt)(struct socket *sock, int level, int optname);
- int (*socket_setsockopt)(struct socket *sock, int level, int optname);
- int (*socket_shutdown)(struct socket *sock, int how);
- int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb);
- int (*socket_getpeersec_stream)(struct socket *sock,
- char __user *optval,
- int __user *optlen, unsigned len);
- int (*socket_getpeersec_dgram)(struct socket *sock,
- struct sk_buff *skb, u32 *secid);
- int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority);
- void (*sk_free_security)(struct sock *sk);
- void (*sk_clone_security)(const struct sock *sk, struct sock *newsk);
- void (*sk_getsecid)(struct sock *sk, u32 *secid);
- void (*sock_graft)(struct sock *sk, struct socket *parent);
- int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req);
- void (*inet_csk_clone)(struct sock *newsk,
- const struct request_sock *req);
- void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
- int (*secmark_relabel_packet)(u32 secid);
- void (*secmark_refcount_inc)(void);
- void (*secmark_refcount_dec)(void);
- void (*req_classify_flow)(const struct request_sock *req,
- struct flowi *fl);
- int (*tun_dev_alloc_security)(void **security);
- void (*tun_dev_free_security)(void *security);
- int (*tun_dev_create)(void);
- int (*tun_dev_attach_queue)(void *security);
- int (*tun_dev_attach)(struct sock *sk, void *security);
- int (*tun_dev_open)(void *security);
- int (*sctp_assoc_request)(struct sctp_endpoint *ep,
- struct sk_buff *skb);
- int (*sctp_bind_connect)(struct sock *sk, int optname,
- struct sockaddr *address, int addrlen);
- void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk,
- struct sock *newsk);
-#endif /* CONFIG_SECURITY_NETWORK */
-
-#ifdef CONFIG_SECURITY_INFINIBAND
- int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey);
- int (*ib_endport_manage_subnet)(void *sec, const char *dev_name,
- u8 port_num);
- int (*ib_alloc_security)(void **sec);
- void (*ib_free_security)(void *sec);
-#endif /* CONFIG_SECURITY_INFINIBAND */
-
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx,
- gfp_t gfp);
- int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx,
- struct xfrm_sec_ctx **new_ctx);
- void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx);
- int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx);
- int (*xfrm_state_alloc)(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx);
- int (*xfrm_state_alloc_acquire)(struct xfrm_state *x,
- struct xfrm_sec_ctx *polsec,
- u32 secid);
- void (*xfrm_state_free_security)(struct xfrm_state *x);
- int (*xfrm_state_delete_security)(struct xfrm_state *x);
- int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid,
- u8 dir);
- int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
- struct xfrm_policy *xp,
- const struct flowi *fl);
- int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
-
- /* key management security hooks */
-#ifdef CONFIG_KEYS
- int (*key_alloc)(struct key *key, const struct cred *cred,
- unsigned long flags);
- void (*key_free)(struct key *key);
- int (*key_permission)(key_ref_t key_ref, const struct cred *cred,
- unsigned perm);
- int (*key_getsecurity)(struct key *key, char **_buffer);
-#endif /* CONFIG_KEYS */
-
-#ifdef CONFIG_AUDIT
- int (*audit_rule_init)(u32 field, u32 op, char *rulestr,
- void **lsmrule);
- int (*audit_rule_known)(struct audit_krule *krule);
- int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule);
- void (*audit_rule_free)(void *lsmrule);
-#endif /* CONFIG_AUDIT */
-
-#ifdef CONFIG_BPF_SYSCALL
- int (*bpf)(int cmd, union bpf_attr *attr,
- unsigned int size);
- int (*bpf_map)(struct bpf_map *map, fmode_t fmode);
- int (*bpf_prog)(struct bpf_prog *prog);
- int (*bpf_map_alloc_security)(struct bpf_map *map);
- void (*bpf_map_free_security)(struct bpf_map *map);
- int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux);
- void (*bpf_prog_free_security)(struct bpf_prog_aux *aux);
-#endif /* CONFIG_BPF_SYSCALL */
- int (*locked_down)(enum lockdown_reason what);
-#ifdef CONFIG_PERF_EVENTS
- int (*perf_event_open)(struct perf_event_attr *attr, int type);
- int (*perf_event_alloc)(struct perf_event *event);
- void (*perf_event_free)(struct perf_event *event);
- int (*perf_event_read)(struct perf_event *event);
- int (*perf_event_write)(struct perf_event *event);
-
-#endif
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
+ #include "lsm_hook_defs.h"
+ #undef LSM_HOOK
};
struct security_hook_heads {
- struct hlist_head binder_set_context_mgr;
- struct hlist_head binder_transaction;
- struct hlist_head binder_transfer_binder;
- struct hlist_head binder_transfer_file;
- struct hlist_head ptrace_access_check;
- struct hlist_head ptrace_traceme;
- struct hlist_head capget;
- struct hlist_head capset;
- struct hlist_head capable;
- struct hlist_head quotactl;
- struct hlist_head quota_on;
- struct hlist_head syslog;
- struct hlist_head settime;
- struct hlist_head vm_enough_memory;
- struct hlist_head bprm_set_creds;
- struct hlist_head bprm_check_security;
- struct hlist_head bprm_committing_creds;
- struct hlist_head bprm_committed_creds;
- struct hlist_head fs_context_dup;
- struct hlist_head fs_context_parse_param;
- struct hlist_head sb_alloc_security;
- struct hlist_head sb_free_security;
- struct hlist_head sb_free_mnt_opts;
- struct hlist_head sb_eat_lsm_opts;
- struct hlist_head sb_remount;
- struct hlist_head sb_kern_mount;
- struct hlist_head sb_show_options;
- struct hlist_head sb_statfs;
- struct hlist_head sb_mount;
- struct hlist_head sb_umount;
- struct hlist_head sb_pivotroot;
- struct hlist_head sb_set_mnt_opts;
- struct hlist_head sb_clone_mnt_opts;
- struct hlist_head sb_add_mnt_opt;
- struct hlist_head move_mount;
- struct hlist_head dentry_init_security;
- struct hlist_head dentry_create_files_as;
-#ifdef CONFIG_SECURITY_PATH
- struct hlist_head path_unlink;
- struct hlist_head path_mkdir;
- struct hlist_head path_rmdir;
- struct hlist_head path_mknod;
- struct hlist_head path_truncate;
- struct hlist_head path_symlink;
- struct hlist_head path_link;
- struct hlist_head path_rename;
- struct hlist_head path_chmod;
- struct hlist_head path_chown;
- struct hlist_head path_chroot;
-#endif
- /* Needed for inode based modules as well */
- struct hlist_head path_notify;
- struct hlist_head inode_alloc_security;
- struct hlist_head inode_free_security;
- struct hlist_head inode_init_security;
- struct hlist_head inode_create;
- struct hlist_head inode_link;
- struct hlist_head inode_unlink;
- struct hlist_head inode_symlink;
- struct hlist_head inode_mkdir;
- struct hlist_head inode_rmdir;
- struct hlist_head inode_mknod;
- struct hlist_head inode_rename;
- struct hlist_head inode_readlink;
- struct hlist_head inode_follow_link;
- struct hlist_head inode_permission;
- struct hlist_head inode_setattr;
- struct hlist_head inode_getattr;
- struct hlist_head inode_setxattr;
- struct hlist_head inode_post_setxattr;
- struct hlist_head inode_getxattr;
- struct hlist_head inode_listxattr;
- struct hlist_head inode_removexattr;
- struct hlist_head inode_need_killpriv;
- struct hlist_head inode_killpriv;
- struct hlist_head inode_getsecurity;
- struct hlist_head inode_setsecurity;
- struct hlist_head inode_listsecurity;
- struct hlist_head inode_getsecid;
- struct hlist_head inode_copy_up;
- struct hlist_head inode_copy_up_xattr;
- struct hlist_head kernfs_init_security;
- struct hlist_head file_permission;
- struct hlist_head file_alloc_security;
- struct hlist_head file_free_security;
- struct hlist_head file_ioctl;
- struct hlist_head mmap_addr;
- struct hlist_head mmap_file;
- struct hlist_head file_mprotect;
- struct hlist_head file_lock;
- struct hlist_head file_fcntl;
- struct hlist_head file_set_fowner;
- struct hlist_head file_send_sigiotask;
- struct hlist_head file_receive;
- struct hlist_head file_open;
- struct hlist_head task_alloc;
- struct hlist_head task_free;
- struct hlist_head cred_alloc_blank;
- struct hlist_head cred_free;
- struct hlist_head cred_prepare;
- struct hlist_head cred_transfer;
- struct hlist_head cred_getsecid;
- struct hlist_head kernel_act_as;
- struct hlist_head kernel_create_files_as;
- struct hlist_head kernel_load_data;
- struct hlist_head kernel_read_file;
- struct hlist_head kernel_post_read_file;
- struct hlist_head kernel_module_request;
- struct hlist_head task_fix_setuid;
- struct hlist_head task_setpgid;
- struct hlist_head task_getpgid;
- struct hlist_head task_getsid;
- struct hlist_head task_getsecid;
- struct hlist_head task_setnice;
- struct hlist_head task_setioprio;
- struct hlist_head task_getioprio;
- struct hlist_head task_prlimit;
- struct hlist_head task_setrlimit;
- struct hlist_head task_setscheduler;
- struct hlist_head task_getscheduler;
- struct hlist_head task_movememory;
- struct hlist_head task_kill;
- struct hlist_head task_prctl;
- struct hlist_head task_to_inode;
- struct hlist_head ipc_permission;
- struct hlist_head ipc_getsecid;
- struct hlist_head msg_msg_alloc_security;
- struct hlist_head msg_msg_free_security;
- struct hlist_head msg_queue_alloc_security;
- struct hlist_head msg_queue_free_security;
- struct hlist_head msg_queue_associate;
- struct hlist_head msg_queue_msgctl;
- struct hlist_head msg_queue_msgsnd;
- struct hlist_head msg_queue_msgrcv;
- struct hlist_head shm_alloc_security;
- struct hlist_head shm_free_security;
- struct hlist_head shm_associate;
- struct hlist_head shm_shmctl;
- struct hlist_head shm_shmat;
- struct hlist_head sem_alloc_security;
- struct hlist_head sem_free_security;
- struct hlist_head sem_associate;
- struct hlist_head sem_semctl;
- struct hlist_head sem_semop;
- struct hlist_head netlink_send;
- struct hlist_head d_instantiate;
- struct hlist_head getprocattr;
- struct hlist_head setprocattr;
- struct hlist_head ismaclabel;
- struct hlist_head secid_to_secctx;
- struct hlist_head secctx_to_secid;
- struct hlist_head release_secctx;
- struct hlist_head inode_invalidate_secctx;
- struct hlist_head inode_notifysecctx;
- struct hlist_head inode_setsecctx;
- struct hlist_head inode_getsecctx;
-#ifdef CONFIG_SECURITY_NETWORK
- struct hlist_head unix_stream_connect;
- struct hlist_head unix_may_send;
- struct hlist_head socket_create;
- struct hlist_head socket_post_create;
- struct hlist_head socket_socketpair;
- struct hlist_head socket_bind;
- struct hlist_head socket_connect;
- struct hlist_head socket_listen;
- struct hlist_head socket_accept;
- struct hlist_head socket_sendmsg;
- struct hlist_head socket_recvmsg;
- struct hlist_head socket_getsockname;
- struct hlist_head socket_getpeername;
- struct hlist_head socket_getsockopt;
- struct hlist_head socket_setsockopt;
- struct hlist_head socket_shutdown;
- struct hlist_head socket_sock_rcv_skb;
- struct hlist_head socket_getpeersec_stream;
- struct hlist_head socket_getpeersec_dgram;
- struct hlist_head sk_alloc_security;
- struct hlist_head sk_free_security;
- struct hlist_head sk_clone_security;
- struct hlist_head sk_getsecid;
- struct hlist_head sock_graft;
- struct hlist_head inet_conn_request;
- struct hlist_head inet_csk_clone;
- struct hlist_head inet_conn_established;
- struct hlist_head secmark_relabel_packet;
- struct hlist_head secmark_refcount_inc;
- struct hlist_head secmark_refcount_dec;
- struct hlist_head req_classify_flow;
- struct hlist_head tun_dev_alloc_security;
- struct hlist_head tun_dev_free_security;
- struct hlist_head tun_dev_create;
- struct hlist_head tun_dev_attach_queue;
- struct hlist_head tun_dev_attach;
- struct hlist_head tun_dev_open;
- struct hlist_head sctp_assoc_request;
- struct hlist_head sctp_bind_connect;
- struct hlist_head sctp_sk_clone;
-#endif /* CONFIG_SECURITY_NETWORK */
-#ifdef CONFIG_SECURITY_INFINIBAND
- struct hlist_head ib_pkey_access;
- struct hlist_head ib_endport_manage_subnet;
- struct hlist_head ib_alloc_security;
- struct hlist_head ib_free_security;
-#endif /* CONFIG_SECURITY_INFINIBAND */
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- struct hlist_head xfrm_policy_alloc_security;
- struct hlist_head xfrm_policy_clone_security;
- struct hlist_head xfrm_policy_free_security;
- struct hlist_head xfrm_policy_delete_security;
- struct hlist_head xfrm_state_alloc;
- struct hlist_head xfrm_state_alloc_acquire;
- struct hlist_head xfrm_state_free_security;
- struct hlist_head xfrm_state_delete_security;
- struct hlist_head xfrm_policy_lookup;
- struct hlist_head xfrm_state_pol_flow_match;
- struct hlist_head xfrm_decode_session;
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
-#ifdef CONFIG_KEYS
- struct hlist_head key_alloc;
- struct hlist_head key_free;
- struct hlist_head key_permission;
- struct hlist_head key_getsecurity;
-#endif /* CONFIG_KEYS */
-#ifdef CONFIG_AUDIT
- struct hlist_head audit_rule_init;
- struct hlist_head audit_rule_known;
- struct hlist_head audit_rule_match;
- struct hlist_head audit_rule_free;
-#endif /* CONFIG_AUDIT */
-#ifdef CONFIG_BPF_SYSCALL
- struct hlist_head bpf;
- struct hlist_head bpf_map;
- struct hlist_head bpf_prog;
- struct hlist_head bpf_map_alloc_security;
- struct hlist_head bpf_map_free_security;
- struct hlist_head bpf_prog_alloc_security;
- struct hlist_head bpf_prog_free_security;
-#endif /* CONFIG_BPF_SYSCALL */
- struct hlist_head locked_down;
-#ifdef CONFIG_PERF_EVENTS
- struct hlist_head perf_event_open;
- struct hlist_head perf_event_alloc;
- struct hlist_head perf_event_free;
- struct hlist_head perf_event_read;
- struct hlist_head perf_event_write;
-#endif
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
+ #include "lsm_hook_defs.h"
+ #undef LSM_HOOK
} __randomize_layout;
/*
@@ -2085,7 +1610,7 @@ struct security_hook_list {
struct hlist_node list;
struct hlist_head *head;
union security_list_options hook;
- char *lsm;
+ const char *lsm;
} __randomize_layout;
/*
@@ -2095,12 +1620,19 @@ struct lsm_blob_sizes {
int lbs_cred;
int lbs_file;
int lbs_inode;
+ int lbs_superblock;
int lbs_ipc;
int lbs_msg_msg;
int lbs_task;
};
/*
+ * LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
+ * LSM hooks (in include/linux/lsm_hook_defs.h).
+ */
+#define LSM_RET_VOID ((void) 0)
+
+/*
* Initializing a security_hook_list structure takes
* up a lot of space in a source file. This macro takes
* care of the common case and reduces the amount of
@@ -2113,7 +1645,7 @@ extern struct security_hook_heads security_hook_heads;
extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
- char *lsm);
+ const char *lsm);
#define LSM_FLAG_LEGACY_MAJOR BIT(0)
#define LSM_FLAG_EXCLUSIVE BIT(1)
@@ -2137,12 +1669,12 @@ extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
#define DEFINE_LSM(lsm) \
static struct lsm_info __lsm_##lsm \
- __used __section(.lsm_info.init) \
+ __used __section(".lsm_info.init") \
__aligned(sizeof(unsigned long))
#define DEFINE_EARLY_LSM(lsm) \
static struct lsm_info __early_lsm_##lsm \
- __used __section(.early_lsm_info.init) \
+ __used __section(".early_lsm_info.init") \
__aligned(sizeof(unsigned long))
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index a7330eb3ec64..7dd1f01ec4f9 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -18,7 +18,6 @@
#ifndef mISDNIF_H
#define mISDNIF_H
-#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/socket.h>
diff --git a/include/linux/mailbox/arm_mhuv2_message.h b/include/linux/mailbox/arm_mhuv2_message.h
new file mode 100644
index 000000000000..821b9d96daa4
--- /dev/null
+++ b/include/linux/mailbox/arm_mhuv2_message.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM MHUv2 Mailbox Message
+ *
+ * Copyright (C) 2020 Arm Ltd.
+ * Copyright (C) 2020 Linaro Ltd.
+ */
+
+#ifndef _LINUX_ARM_MHUV2_MESSAGE_H_
+#define _LINUX_ARM_MHUV2_MESSAGE_H_
+
+#include <linux/types.h>
+
+/* Data structure for data-transfer protocol */
+struct arm_mhuv2_mbox_msg {
+ void *data;
+ size_t len;
+};
+
+#endif /* _LINUX_ARM_MHUV2_MESSAGE_H_ */
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index a4dc45fbec0a..a8f0070c7aa9 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -17,6 +17,7 @@
#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
#define CMDQ_WFE_UPDATE BIT(31)
+#define CMDQ_WFE_UPDATE_VALUE BIT(16)
#define CMDQ_WFE_WAIT BIT(15)
#define CMDQ_WFE_WAIT_VALUE 0x1
@@ -27,8 +28,7 @@
* bit 16-27: update value
* bit 31: 1 - update, 0 - no update
*/
-#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
- CMDQ_WFE_WAIT_VALUE)
+#define CMDQ_WFE_OPTION (CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE)
/** cmdq event maximum */
#define CMDQ_MAX_EVENT 0x3ff
@@ -59,23 +59,15 @@ enum cmdq_code {
CMDQ_CODE_JUMP = 0x10,
CMDQ_CODE_WFE = 0x20,
CMDQ_CODE_EOC = 0x40,
-};
-
-enum cmdq_cb_status {
- CMDQ_CB_NORMAL = 0,
- CMDQ_CB_ERROR
+ CMDQ_CODE_READ_S = 0x80,
+ CMDQ_CODE_WRITE_S = 0x90,
+ CMDQ_CODE_WRITE_S_MASK = 0x91,
+ CMDQ_CODE_LOGIC = 0xa0,
};
struct cmdq_cb_data {
- enum cmdq_cb_status sta;
- void *data;
-};
-
-typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data);
-
-struct cmdq_task_cb {
- cmdq_async_flush_cb cb;
- void *data;
+ int sta;
+ struct cmdq_pkt *pkt;
};
struct cmdq_pkt {
@@ -83,9 +75,9 @@ struct cmdq_pkt {
dma_addr_t pa_base;
size_t cmd_buf_size; /* command occupied size */
size_t buf_size; /* real buffer size */
- struct cmdq_task_cb cb;
- struct cmdq_task_cb async_cb;
void *cl;
};
+u8 cmdq_get_shift_pa(struct mbox_chan *chan);
+
#endif /* __MTK_CMDQ_MAILBOX_H__ */
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
index 9542b41eacfd..35ce84c8ca02 100644
--- a/include/linux/mailbox/zynqmp-ipi-message.h
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -14,7 +14,7 @@
*/
struct zynqmp_ipi_message {
size_t len;
- u8 data[0];
+ u8 data[];
};
#endif /* _LINUX_ZYNQMP_IPI_MESSAGE_H_ */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 36d6ce673503..6fee33cb52f5 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -83,6 +83,7 @@ struct mbox_controller {
const struct of_phandle_args *sp);
/* Internal to API */
struct hrtimer poll_hrt;
+ spinlock_t poll_hrt_lock;
struct list_head node;
};
diff --git a/include/linux/map_benchmark.h b/include/linux/map_benchmark.h
new file mode 100644
index 000000000000..62674c83bde4
--- /dev/null
+++ b/include/linux/map_benchmark.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 HiSilicon Limited.
+ */
+
+#ifndef _KERNEL_DMA_BENCHMARK_H
+#define _KERNEL_DMA_BENCHMARK_H
+
+#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
+#define DMA_MAP_MAX_THREADS 1024
+#define DMA_MAP_MAX_SECONDS 300
+#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC)
+
+#define DMA_MAP_BIDIRECTIONAL 0
+#define DMA_MAP_TO_DEVICE 1
+#define DMA_MAP_FROM_DEVICE 2
+
+struct map_benchmark {
+ __u64 avg_map_100ns; /* average map latency in 100ns */
+ __u64 map_stddev; /* standard deviation of map latency */
+ __u64 avg_unmap_100ns; /* as above */
+ __u64 unmap_stddev;
+ __u32 threads; /* how many threads will do map/unmap in parallel */
+ __u32 seconds; /* how long the test will last */
+ __s32 node; /* which numa node this benchmark will run on */
+ __u32 dma_bits; /* DMA addressing capability */
+ __u32 dma_dir; /* DMA data direction */
+ __u32 dma_trans_ns; /* time for DMA transmission in ns */
+ __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
+};
+#endif /* _KERNEL_DMA_BENCHMARK_H */
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
new file mode 100644
index 000000000000..e594db58a0f1
--- /dev/null
+++ b/include/linux/maple_tree.h
@@ -0,0 +1,692 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_MAPLE_TREE_H
+#define _LINUX_MAPLE_TREE_H
+/*
+ * Maple Tree - An RCU-safe adaptive tree for storing ranges
+ * Copyright (c) 2018-2022 Oracle
+ * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+/* #define CONFIG_MAPLE_RCU_DISABLED */
+/* #define CONFIG_DEBUG_MAPLE_TREE_VERBOSE */
+
+/*
+ * Allocated nodes are mutable until they have been inserted into the tree,
+ * at which time they cannot change their type until they have been removed
+ * from the tree and an RCU grace period has passed.
+ *
+ * Removed nodes have their ->parent set to point to themselves. RCU readers
+ * check ->parent before relying on the value that they loaded from the
+ * slots array. This lets us reuse the slots array for the RCU head.
+ *
+ * Nodes in the tree point to their parent unless bit 0 is set.
+ */
+#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+/* 64bit sizes */
+#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
+#else
+/* 32bit sizes */
+#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
+#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
+
+#define MAPLE_NODE_MASK 255UL
+
+/*
+ * The node->parent of the root node has bit 0 set and the rest of the pointer
+ * is a pointer to the tree itself. No more bits are available in this pointer
+ * (on m68k, the data structure may only be 2-byte aligned).
+ *
+ * Internal non-root nodes can only have maple_range_* nodes as parents. The
+ * parent pointer is 256B aligned like all other tree nodes. When storing a 32
+ * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
+ * extra bit to store the offset. This extra bit comes from a reuse of the last
+ * bit in the node type. This is possible by using bit 1 to indicate if bit 2
+ * is part of the type or the slot.
+ *
+ * Once the type is decided, the decision of an allocation range type or a range
+ * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
+ * flag.
+ *
+ * Node types:
+ * 0x??1 = Root
+ * 0x?00 = 16 bit nodes
+ * 0x010 = 32 bit nodes
+ * 0x110 = 64 bit nodes
+ *
+ * Slot size and location in the parent pointer:
+ * type : slot location
+ * 0x??1 : Root
+ * 0x?00 : 16 bit values, type in 0-1, slot in 2-6
+ * 0x010 : 32 bit values, type in 0-2, slot in 3-6
+ * 0x110 : 64 bit values, type in 0-2, slot in 3-6
+ */
+
+/*
+ * This metadata is used to optimize the gap updating code and in reverse
+ * searching for gaps or any other code that needs to find the end of the data.
+ */
+struct maple_metadata {
+ unsigned char end;
+ unsigned char gap;
+};
+
+/*
+ * Leaf nodes do not store pointers to nodes, they store user data. Users may
+ * store almost any bit pattern. As noted above, the optimisation of storing an
+ * entry at 0 in the root pointer cannot be done for data which have the bottom
+ * two bits set to '10'. We also reserve values with the bottom two bits set to
+ * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
+ * return errnos as a negative errno shifted right by two bits and the bottom
+ * two bits set to '10', and while choosing to store these values in the array
+ * is not an error, it may lead to confusion if you're testing for an error with
+ * mas_is_err().
+ *
+ * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
+ * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges, Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ */
+
+struct maple_range_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
+ union {
+ void __rcu *slot[MAPLE_RANGE64_SLOTS];
+ struct {
+ void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
+ struct maple_metadata meta;
+ };
+ };
+};
+
+/*
+ * At tree creation time, the user can specify that they're willing to trade off
+ * storing fewer entries in a tree in return for storing more information in
+ * each node.
+ *
+ * The maple tree supports recording the largest range of NULL entries available
+ * in this node, also called gaps. This optimises the tree for allocating a
+ * range.
+ */
+struct maple_arange_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
+ void __rcu *slot[MAPLE_ARANGE64_SLOTS];
+ unsigned long gap[MAPLE_ARANGE64_SLOTS];
+ struct maple_metadata meta;
+};
+
+struct maple_alloc {
+ unsigned long total;
+ unsigned char node_count;
+ unsigned int request_count;
+ struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
+};
+
+struct maple_topiary {
+ struct maple_pnode *parent;
+ struct maple_enode *next; /* Overlaps the pivot */
+};
+
+enum maple_type {
+ maple_dense,
+ maple_leaf_64,
+ maple_range_64,
+ maple_arange_64,
+};
+
+
+/**
+ * DOC: Maple tree flags
+ *
+ * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
+ * * MT_FLAGS_USE_RCU - Operate in RCU mode
+ * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
+ * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
+ * * MT_FLAGS_LOCK_MASK - How the mt_lock is used
+ * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
+ * * MT_FLAGS_LOCK_BH - Acquired bh-safe
+ * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
+ *
+ * MAPLE_HEIGHT_MAX The largest height that can be stored
+ */
+#define MT_FLAGS_ALLOC_RANGE 0x01
+#define MT_FLAGS_USE_RCU 0x02
+#define MT_FLAGS_HEIGHT_OFFSET 0x02
+#define MT_FLAGS_HEIGHT_MASK 0x7C
+#define MT_FLAGS_LOCK_MASK 0x300
+#define MT_FLAGS_LOCK_IRQ 0x100
+#define MT_FLAGS_LOCK_BH 0x200
+#define MT_FLAGS_LOCK_EXTERN 0x300
+
+#define MAPLE_HEIGHT_MAX 31
+
+
+#define MAPLE_NODE_TYPE_MASK 0x0F
+#define MAPLE_NODE_TYPE_SHIFT 0x03
+
+#define MAPLE_RESERVED_RANGE 4096
+
+#ifdef CONFIG_LOCKDEP
+typedef struct lockdep_map *lockdep_map_p;
+#define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock)
+#define mt_set_external_lock(mt, lock) \
+ (mt)->ma_external_lock = &(lock)->dep_map
+#else
+typedef struct { /* nothing */ } lockdep_map_p;
+#define mt_lock_is_held(mt) 1
+#define mt_set_external_lock(mt, lock) do { } while (0)
+#endif
+
+/*
+ * If the tree contains a single entry at index 0, it is usually stored in
+ * tree->ma_root. To optimise for the page cache, an entry which ends in '00',
+ * '01' or '11' is stored in the root, but an entry which ends in '10' will be
+ * stored in a node. Bits 3-6 are used to store enum maple_type.
+ *
+ * The flags are used both to store some immutable information about this tree
+ * (set at tree creation time) and dynamic information set under the spinlock.
+ *
+ * Another use of flags are to indicate global states of the tree. This is the
+ * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in
+ * RCU mode. This mode was added to allow the tree to reuse nodes instead of
+ * re-allocating and RCU freeing nodes when there is a single user.
+ */
+struct maple_tree {
+ union {
+ spinlock_t ma_lock;
+ lockdep_map_p ma_external_lock;
+ };
+ void __rcu *ma_root;
+ unsigned int ma_flags;
+};
+
+/**
+ * MTREE_INIT() - Initialize a maple tree
+ * @name: The maple tree name
+ * @__flags: The maple tree flags
+ *
+ */
+#define MTREE_INIT(name, __flags) { \
+ .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
+ .ma_flags = __flags, \
+ .ma_root = NULL, \
+}
+
+/**
+ * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
+ * @name: The tree name
+ * @__flags: The maple tree flags
+ * @__lock: The external lock
+ */
+#ifdef CONFIG_LOCKDEP
+#define MTREE_INIT_EXT(name, __flags, __lock) { \
+ .ma_external_lock = &(__lock).dep_map, \
+ .ma_flags = (__flags), \
+ .ma_root = NULL, \
+}
+#else
+#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
+#endif
+
+#define DEFINE_MTREE(name) \
+ struct maple_tree name = MTREE_INIT(name, 0)
+
+#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
+
+/*
+ * The Maple Tree squeezes various bits in at various points which aren't
+ * necessarily obvious. Usually, this is done by observing that pointers are
+ * N-byte aligned and thus the bottom log_2(N) bits are available for use. We
+ * don't use the high bits of pointers to store additional information because
+ * we don't know what bits are unused on any given architecture.
+ *
+ * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
+ * low bits for our own purposes. Nodes are currently of 4 types:
+ * 1. Single pointer (Range is 0-0)
+ * 2. Non-leaf Allocation Range nodes
+ * 3. Non-leaf Range nodes
+ * 4. Leaf Range nodes All nodes consist of a number of node slots,
+ * pivots, and a parent pointer.
+ */
+
+struct maple_node {
+ union {
+ struct {
+ struct maple_pnode *parent;
+ void __rcu *slot[MAPLE_NODE_SLOTS];
+ };
+ struct {
+ void *pad;
+ struct rcu_head rcu;
+ struct maple_enode *piv_parent;
+ unsigned char parent_slot;
+ enum maple_type type;
+ unsigned char slot_len;
+ unsigned int ma_flags;
+ };
+ struct maple_range_64 mr64;
+ struct maple_arange_64 ma64;
+ struct maple_alloc alloc;
+ };
+};
+
+/*
+ * More complicated stores can cause two nodes to become one or three and
+ * potentially alter the height of the tree. Either half of the tree may need
+ * to be rebalanced against the other. The ma_topiary struct is used to track
+ * which nodes have been 'cut' from the tree so that the change can be done
+ * safely at a later date. This is done to support RCU.
+ */
+struct ma_topiary {
+ struct maple_enode *head;
+ struct maple_enode *tail;
+ struct maple_tree *mtree;
+};
+
+void *mtree_load(struct maple_tree *mt, unsigned long index);
+
+int mtree_insert(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+
+int mtree_store_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_store(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+void *mtree_erase(struct maple_tree *mt, unsigned long index);
+
+void mtree_destroy(struct maple_tree *mt);
+void __mt_destroy(struct maple_tree *mt);
+
+/**
+ * mtree_empty() - Determine if a tree has any present entries.
+ * @mt: Maple Tree.
+ *
+ * Context: Any context.
+ * Return: %true if the tree contains only NULL pointers.
+ */
+static inline bool mtree_empty(const struct maple_tree *mt)
+{
+ return mt->ma_root == NULL;
+}
+
+/* Advanced API */
+
+/*
+ * The maple state is defined in the struct ma_state and is used to keep track
+ * of information during operations, and even between operations when using the
+ * advanced API.
+ *
+ * If state->node has bit 0 set then it references a tree location which is not
+ * a node (eg the root). If bit 1 is set, the rest of the bits are a negative
+ * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
+ * node type.
+ *
+ * state->alloc either has a request number of nodes or an allocated node. If
+ * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
+ * and the remaining bits are the value. If state->alloc is a node, then the
+ * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
+ * storing more allocated nodes, a total number of nodes allocated, and the
+ * node_count in this node. node_count is the number of allocated nodes in this
+ * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
+ * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
+ * by removing a node from the state->alloc node until state->alloc->node_count
+ * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
+ * to state->alloc. Nodes are pushed onto state->alloc by putting the current
+ * state->alloc into the pushed node's slot[0].
+ *
+ * The state also contains the implied min/max of the state->node, the depth of
+ * this search, and the offset. The implied min/max are either from the parent
+ * node or are 0-oo for the root node. The depth is incremented or decremented
+ * every time a node is walked down or up. The offset is the slot/pivot of
+ * interest in the node - either for reading or writing.
+ *
+ * When returning a value the maple state index and last respectively contain
+ * the start and end of the range for the entry. Ranges are inclusive in the
+ * Maple Tree.
+ */
+struct ma_state {
+ struct maple_tree *tree; /* The tree we're operating in */
+ unsigned long index; /* The index we're operating on - range start */
+ unsigned long last; /* The last index we're operating on - range end */
+ struct maple_enode *node; /* The node containing this entry */
+ unsigned long min; /* The minimum index of this node - implied pivot min */
+ unsigned long max; /* The maximum index of this node - implied pivot max */
+ struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ unsigned char depth; /* depth of tree descent during write */
+ unsigned char offset;
+ unsigned char mas_flags;
+};
+
+struct ma_wr_state {
+ struct ma_state *mas;
+ struct maple_node *node; /* Decoded mas->node */
+ unsigned long r_min; /* range min */
+ unsigned long r_max; /* range max */
+ enum maple_type type; /* mas->node type */
+ unsigned char offset_end; /* The offset where the write ends */
+ unsigned char node_end; /* mas->node end */
+ unsigned long *pivots; /* mas->node->pivots pointer */
+ unsigned long end_piv; /* The pivot at the offset end */
+ void __rcu **slots; /* mas->node->slots pointer */
+ void *entry; /* The entry to write */
+ void *content; /* The existing entry that is being overwritten */
+};
+
+#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
+
+
+/*
+ * Special values for ma_state.node.
+ * MAS_START means we have not searched the tree.
+ * MAS_ROOT means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * MAS_NONE means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * MA_ERROR represents an errno. After dropping the lock and attempting
+ * to resolve the error, the walk would have to be restarted from the
+ * top of the tree as the tree may have been modified.
+ */
+#define MAS_START ((struct maple_enode *)1UL)
+#define MAS_ROOT ((struct maple_enode *)5UL)
+#define MAS_NONE ((struct maple_enode *)9UL)
+#define MAS_PAUSE ((struct maple_enode *)17UL)
+#define MA_ERROR(err) \
+ ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+
+#define MA_STATE(name, mt, first, end) \
+ struct ma_state name = { \
+ .tree = mt, \
+ .index = first, \
+ .last = end, \
+ .node = MAS_START, \
+ .min = 0, \
+ .max = ULONG_MAX, \
+ .alloc = NULL, \
+ }
+
+#define MA_WR_STATE(name, ma_state, wr_entry) \
+ struct ma_wr_state name = { \
+ .mas = ma_state, \
+ .content = NULL, \
+ .entry = wr_entry, \
+ }
+
+#define MA_TOPIARY(name, tree) \
+ struct ma_topiary name = { \
+ .head = NULL, \
+ .tail = NULL, \
+ .mtree = tree, \
+ }
+
+void *mas_walk(struct ma_state *mas);
+void *mas_store(struct ma_state *mas, void *entry);
+void *mas_erase(struct ma_state *mas);
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
+void mas_store_prealloc(struct ma_state *mas, void *entry);
+void *mas_find(struct ma_state *mas, unsigned long max);
+void *mas_find_rev(struct ma_state *mas, unsigned long min);
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
+bool mas_is_err(struct ma_state *mas);
+
+bool mas_nomem(struct ma_state *mas, gfp_t gfp);
+void mas_pause(struct ma_state *mas);
+void maple_tree_init(void);
+void mas_destroy(struct ma_state *mas);
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
+
+void *mas_prev(struct ma_state *mas, unsigned long min);
+void *mas_next(struct ma_state *mas, unsigned long max);
+
+int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
+ unsigned long size);
+
+/* Checks if a mas has not found anything */
+static inline bool mas_is_none(struct ma_state *mas)
+{
+ return mas->node == MAS_NONE;
+}
+
+/* Checks if a mas has been paused */
+static inline bool mas_is_paused(struct ma_state *mas)
+{
+ return mas->node == MAS_PAUSE;
+}
+
+void mas_dup_tree(struct ma_state *oldmas, struct ma_state *mas);
+void mas_dup_store(struct ma_state *mas, void *entry);
+
+/*
+ * This finds an empty area from the highest address to the lowest.
+ * AKA "Topdown" version,
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size);
+/**
+ * mas_reset() - Reset a Maple Tree operation state.
+ * @mas: Maple Tree operation state.
+ *
+ * Resets the error or walk state of the @mas so future walks of the
+ * array will start from the root. Use this if you have dropped the
+ * lock and want to reuse the ma_state.
+ *
+ * Context: Any context.
+ */
+static inline void mas_reset(struct ma_state *mas)
+{
+ mas->node = MAS_START;
+}
+
+/**
+ * mas_for_each() - Iterate over a range of the maple tree.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__max: maximum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ *
+ */
+#define mas_for_each(__mas, __entry, __max) \
+ while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+
+/**
+ * mas_set_range() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * Move the operation state to refer to a different range. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline
+void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
+{
+ mas->index = start;
+ mas->last = last;
+ mas->node = MAS_START;
+}
+
+/**
+ * mas_set() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @index: New index into the Maple Tree.
+ *
+ * Move the operation state to refer to a different index. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline void mas_set(struct ma_state *mas, unsigned long index)
+{
+
+ mas_set_range(mas, index, index);
+}
+
+static inline bool mt_external_lock(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
+}
+
+/**
+ * mt_init_flags() - Initialise an empty maple tree with flags.
+ * @mt: Maple Tree
+ * @flags: maple tree flags.
+ *
+ * If you need to initialise a Maple Tree with special flags (eg, an
+ * allocation tree), use this function.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
+{
+ mt->ma_flags = flags;
+ if (!mt_external_lock(mt))
+ spin_lock_init(&mt->ma_lock);
+ rcu_assign_pointer(mt->ma_root, NULL);
+}
+
+/**
+ * mt_init() - Initialise an empty maple tree.
+ * @mt: Maple Tree
+ *
+ * An empty Maple Tree.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init(struct maple_tree *mt)
+{
+ mt_init_flags(mt, 0);
+}
+
+static inline bool mt_in_rcu(struct maple_tree *mt)
+{
+#ifdef CONFIG_MAPLE_RCU_DISABLED
+ return false;
+#endif
+ return mt->ma_flags & MT_FLAGS_USE_RCU;
+}
+
+/**
+ * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_clear_in_rcu(struct maple_tree *mt)
+{
+ if (!mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ BUG_ON(!mt_lock_is_held(mt));
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+/**
+ * mt_set_in_rcu() - Switch the tree to RCU safe mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_set_in_rcu(struct maple_tree *mt)
+{
+ if (mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ BUG_ON(!mt_lock_is_held(mt));
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+static inline unsigned int mt_height(const struct maple_tree *mt)
+
+{
+ return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
+}
+
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max);
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
+
+/**
+ * mt_for_each - Iterate over each entry starting at index until max.
+ * @__tree: The Maple Tree
+ * @__entry: The current entry
+ * @__index: The index to update to track the location in the tree
+ * @__max: The maximum limit for @index
+ *
+ * Note: Will not return the zero entry.
+ */
+#define mt_for_each(__tree, __entry, __index, __max) \
+ for (__entry = mt_find(__tree, &(__index), __max); \
+ __entry; __entry = mt_find_after(__tree, &(__index), __max))
+
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt);
+void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
+#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index af6b11d4d673..0f06c2287b52 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -15,19 +15,27 @@
#define MARVELL_PHY_ID_88E1149R 0x01410e50
#define MARVELL_PHY_ID_88E1240 0x01410e30
#define MARVELL_PHY_ID_88E1318S 0x01410e90
+#define MARVELL_PHY_ID_88E1340S 0x01410dc0
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
#define MARVELL_PHY_ID_88E1540 0x01410eb0
#define MARVELL_PHY_ID_88E1545 0x01410ea0
+#define MARVELL_PHY_ID_88E1548P 0x01410ec0
#define MARVELL_PHY_ID_88E3016 0x01410e60
#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
+#define MARVELL_PHY_ID_88X2222 0x01410f10
-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
+#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
+
+/* These Ethernet switch families contain embedded PHYs, but they do
* not have a model ID. So the switch driver traps reads to the ID2
* register and returns the switch family ID
*/
-#define MARVELL_PHY_ID_88E6390 0x01410f90
+#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41
+#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90
+#define MARVELL_PHY_ID_88E6393_FAMILY 0x002b0b9b
#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
diff --git a/include/linux/math.h b/include/linux/math.h
new file mode 100644
index 000000000000..439b8f0b9ebd
--- /dev/null
+++ b/include/linux/math.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MATH_H
+#define _LINUX_MATH_H
+
+#include <linux/types.h>
+#include <asm/div64.h>
+#include <uapi/linux/kernel.h>
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+
+/**
+ * round_up - round up to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round up to (must be a power of 2)
+ *
+ * Rounds @x up to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding up, use roundup() below.
+ */
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+
+/**
+ * round_down - round down to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round down to (must be a power of 2)
+ *
+ * Rounds @x down to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding down, use rounddown() below.
+ */
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
+
+#define DIV_ROUND_DOWN_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
+
+#define DIV_ROUND_UP_ULL(ll, d) \
+ DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
+
+#if BITS_PER_LONG == 32
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
+#else
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
+#endif
+
+/**
+ * roundup - round up to the next specified multiple
+ * @x: the value to up
+ * @y: multiple to round up to
+ *
+ * Rounds @x up to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_up().
+ */
+#define roundup(x, y) ( \
+{ \
+ typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+/**
+ * rounddown - round down to next specified multiple
+ * @x: the value to round
+ * @y: multiple to round down to
+ *
+ * Rounds @x down to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_down().
+ */
+#define rounddown(x, y) ( \
+{ \
+ typeof(x) __x = (x); \
+ __x - (__x % (y)); \
+} \
+)
+
+/*
+ * Divide positive or negative dividend by positive or negative divisor
+ * and round to closest integer. Result is undefined for negative
+ * divisors if the dividend variable type is unsigned and for negative
+ * dividends if the divisor variable type is unsigned.
+ */
+#define DIV_ROUND_CLOSEST(x, divisor)( \
+{ \
+ typeof(x) __x = x; \
+ typeof(divisor) __d = divisor; \
+ (((typeof(x))-1) > 0 || \
+ ((typeof(divisor))-1) > 0 || \
+ (((__x) > 0) == ((__d) > 0))) ? \
+ (((__x) + ((__d) / 2)) / (__d)) : \
+ (((__x) - ((__d) / 2)) / (__d)); \
+} \
+)
+/*
+ * Same as above but for u64 dividends. divisor must be a 32-bit
+ * number.
+ */
+#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
+{ \
+ typeof(divisor) __d = divisor; \
+ unsigned long long _tmp = (x) + (__d) / 2; \
+ do_div(_tmp, __d); \
+ _tmp; \
+} \
+)
+
+#define __STRUCT_FRACT(type) \
+struct type##_fract { \
+ __##type numerator; \
+ __##type denominator; \
+};
+__STRUCT_FRACT(s16)
+__STRUCT_FRACT(u16)
+__STRUCT_FRACT(s32)
+__STRUCT_FRACT(u32)
+#undef __STRUCT_FRACT
+
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+#define mult_frac(x, numer, denom)( \
+{ \
+ typeof(x) quot = (x) / (denom); \
+ typeof(x) rem = (x) % (denom); \
+ (quot * (numer)) + ((rem * (numer)) / (denom)); \
+} \
+)
+
+#define sector_div(a, b) do_div(a, b)
+
+/**
+ * abs - return absolute value of an argument
+ * @x: the value. If it is unsigned type, it is converted to signed type first.
+ * char is treated as if it was signed (regardless of whether it really is)
+ * but the macro's return type is preserved as char.
+ *
+ * Return: an absolute value of x.
+ */
+#define abs(x) __abs_choose_expr(x, long long, \
+ __abs_choose_expr(x, long, \
+ __abs_choose_expr(x, int, \
+ __abs_choose_expr(x, short, \
+ __abs_choose_expr(x, char, \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), char), \
+ (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
+ ((void)0)))))))
+
+#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), signed type) || \
+ __builtin_types_compatible_p(typeof(x), unsigned type), \
+ ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
+
+/**
+ * reciprocal_scale - "scale" a value into range [0, ep_ro)
+ * @val: value
+ * @ep_ro: right open interval endpoint
+ *
+ * Perform a "reciprocal multiplication" in order to "scale" a value into
+ * range [0, @ep_ro), where the upper interval endpoint is right-open.
+ * This is useful, e.g. for accessing a index of an array containing
+ * @ep_ro elements, for example. Think of it as sort of modulus, only that
+ * the result isn't that of modulo. ;) Note that if initial input is a
+ * small value, then result will return 0.
+ *
+ * Return: a result based on @val in interval [0, @ep_ro).
+ */
+static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
+{
+ return (u32)(((u64) val * ep_ro) >> 32);
+}
+
+u64 int_pow(u64 base, unsigned int exp);
+unsigned long int_sqrt(unsigned long);
+
+#if BITS_PER_LONG < 64
+u32 int_sqrt64(u64 x);
+#else
+static inline u32 int_sqrt64(u64 x)
+{
+ return (u32)int_sqrt(x);
+}
+#endif
+
+#endif /* _LINUX_MATH_H */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 65bef21cdddb..a14f40de1dca 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -3,6 +3,8 @@
#define _LINUX_MATH64_H
#include <linux/types.h>
+#include <linux/math.h>
+#include <vdso/math64.h>
#include <asm/div64.h>
#if BITS_PER_LONG == 64
@@ -27,7 +29,7 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
return dividend / divisor;
}
-/**
+/*
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
* @dividend: signed 64bit dividend
* @divisor: signed 32bit divisor
@@ -41,7 +43,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
return dividend / divisor;
}
-/**
+/*
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 64bit divisor
@@ -55,7 +57,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
return dividend / divisor;
}
-/**
+/*
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 64bit divisor
@@ -67,7 +69,7 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
return dividend / divisor;
}
-/**
+/*
* div64_s64 - signed 64bit divide with 64bit divisor
* @dividend: signed 64bit dividend
* @divisor: signed 64bit divisor
@@ -142,25 +144,6 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
-static __always_inline u32
-__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
-{
- u32 ret = 0;
-
- while (dividend >= divisor) {
- /* The following asm() prevents the compiler from
- optimising this loop into a modulo operation. */
- asm("" : "+rm"(dividend));
-
- dividend -= divisor;
- ret++;
- }
-
- *remainder = dividend;
-
- return ret;
-}
-
#ifndef mul_u32_u32
/*
* Many a GCC version messes this up and generates a 64x64 mult :-(
@@ -252,6 +235,24 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
#endif
+#ifndef mul_s64_u64_shr
+static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
+{
+ u64 ret;
+
+ /*
+ * Extract the sign before the multiplication and put it back
+ * afterwards if needed.
+ */
+ ret = mul_u64_u64_shr(abs(a), b, shift);
+
+ if (a < 0)
+ ret = -((s64) ret);
+
+ return ret;
+}
+#endif /* mul_s64_u64_shr */
+
#ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
@@ -281,6 +282,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
}
#endif /* mul_u64_u32_div */
+u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+
#define DIV64_U64_ROUND_UP(ll, d) \
({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
@@ -297,4 +300,36 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
+/*
+ * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 32bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV_U64_ROUND_CLOSEST(dividend, divisor) \
+ ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
+
+/*
+ * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ *
+ * Divide signed 64bit dividend by signed 32bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \
+{ \
+ s64 __x = (dividend); \
+ s32 __d = (divisor); \
+ ((__x > 0) == (__d > 0)) ? \
+ div_s64((__x + (__d / 2)), __d) : \
+ div_s64((__x - (__d / 2)), __d); \
+} \
+)
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h
deleted file mode 100644
index 593602fc9317..000000000000
--- a/include/linux/max17040_battery.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- */
-
-#ifndef __MAX17040_BATTERY_H_
-#define __MAX17040_BATTERY_H_
-
-struct max17040_platform_data {
- int (*battery_online)(void);
- int (*charger_online)(void);
- int (*charger_enable)(void);
-};
-
-#endif
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 20f1e3ff6013..2da63fd7b98f 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -13,8 +13,16 @@ struct mb_cache;
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
- /* Hash table list - protected by hash chain bitlock */
+ /*
+ * Hash table list - protected by hash chain bitlock. The entry is
+ * guaranteed to be hashed while e_refcnt > 0.
+ */
struct hlist_bl_node e_hash_list;
+ /*
+ * Entry refcount. Once it reaches zero, entry is unhashed and freed.
+ * While refcount > 0, the entry is guaranteed to stay in the hash and
+ * e.g. mb_cache_entry_try_delete() will fail.
+ */
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
@@ -29,17 +37,24 @@ void mb_cache_destroy(struct mb_cache *cache);
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
u64 value, bool reusable);
-void __mb_cache_entry_free(struct mb_cache_entry *entry);
-static inline int mb_cache_entry_put(struct mb_cache *cache,
- struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
+static inline void mb_cache_entry_put(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
{
- if (!atomic_dec_and_test(&entry->e_refcnt))
- return 0;
- __mb_cache_entry_free(entry);
- return 1;
+ unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
+
+ if (cnt > 0) {
+ if (cnt <= 2)
+ wake_up_var(&entry->e_refcnt);
+ return;
+ }
+ __mb_cache_entry_free(cache, entry);
}
-void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
u64 value);
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index 0661af17a758..b0da04fe087b 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
/* 2 values for divider stage reset, others for "testing purposes only" */
# define RTC_DIV_RESET1 0x60
# define RTC_DIV_RESET2 0x70
+ /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
+# define RTC_AMD_BANK_SELECT 0x10
/* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
# define RTC_RATE_SELECT 0x0F
@@ -123,7 +125,11 @@ struct cmos_rtc_board_info {
#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
#endif /* ARCH_RTC_LOCATION */
-unsigned int mc146818_get_time(struct rtc_time *time);
+bool mc146818_does_rtc_work(void);
+int mc146818_get_time(struct rtc_time *time);
int mc146818_set_time(struct rtc_time *time);
+bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ void *param);
+
#endif /* _MC146818RTC_H */
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 71dd10a3d928..f6efb16f9d1b 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -120,7 +120,7 @@ extern int __must_check __mcb_register_driver(struct mcb_driver *drv,
__mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
extern void mcb_unregister_driver(struct mcb_driver *driver);
#define module_mcb_driver(__mcb_driver) \
- module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver);
+ module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver)
extern void mcb_bus_add_devices(const struct mcb_bus *bus);
extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev);
extern struct mcb_bus *mcb_alloc_bus(struct device *carrier);
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index 0ce30ca78db0..139d05b26f82 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -10,139 +10,80 @@
#ifndef MDEV_H
#define MDEV_H
-struct mdev_device;
+#include <linux/device.h>
+#include <linux/uuid.h>
-/*
- * Called by the parent device driver to set the device which represents
- * this mdev in iommu protection scope. By default, the iommu device is
- * NULL, that indicates using vendor defined isolation.
- *
- * @dev: the mediated device that iommu will isolate.
- * @iommu_device: a pci device which represents the iommu for @dev.
- *
- * Return 0 for success, otherwise negative error value.
- */
-int mdev_set_iommu_device(struct device *dev, struct device *iommu_device);
+struct mdev_type;
-struct device *mdev_get_iommu_device(struct device *dev);
+struct mdev_device {
+ struct device dev;
+ guid_t uuid;
+ struct list_head next;
+ struct mdev_type *type;
+ bool active;
+};
-/**
- * struct mdev_parent_ops - Structure to be registered for each parent device to
- * register the device to mdev module.
- *
- * @owner: The module owner.
- * @dev_attr_groups: Attributes of the parent device.
- * @mdev_attr_groups: Attributes of the mediated device.
- * @supported_type_groups: Attributes to define supported types. It is mandatory
- * to provide supported types.
- * @create: Called to allocate basic resources in parent device's
- * driver for a particular mediated device. It is
- * mandatory to provide create ops.
- * @kobj: kobject of type for which 'create' is called.
- * @mdev: mdev_device structure on of mediated device
- * that is being created
- * Returns integer: success (0) or error (< 0)
- * @remove: Called to free resources in parent device's driver for a
- * a mediated device. It is mandatory to provide 'remove'
- * ops.
- * @mdev: mdev_device device structure which is being
- * destroyed
- * Returns integer: success (0) or error (< 0)
- * @open: Open mediated device.
- * @mdev: mediated device.
- * Returns integer: success (0) or error (< 0)
- * @release: release mediated device
- * @mdev: mediated device.
- * @read: Read emulation callback
- * @mdev: mediated device structure
- * @buf: read buffer
- * @count: number of bytes to read
- * @ppos: address.
- * Retuns number on bytes read on success or error.
- * @write: Write emulation callback
- * @mdev: mediated device structure
- * @buf: write buffer
- * @count: number of bytes to be written
- * @ppos: address.
- * Retuns number on bytes written on success or error.
- * @ioctl: IOCTL callback
- * @mdev: mediated device structure
- * @cmd: ioctl command
- * @arg: arguments to ioctl
- * @mmap: mmap callback
- * @mdev: mediated device structure
- * @vma: vma structure
- * Parent device that support mediated device should be registered with mdev
- * module with mdev_parent_ops structure.
- **/
-struct mdev_parent_ops {
- struct module *owner;
- const struct attribute_group **dev_attr_groups;
- const struct attribute_group **mdev_attr_groups;
- struct attribute_group **supported_type_groups;
+struct mdev_type {
+ /* set by the driver before calling mdev_register parent: */
+ const char *sysfs_name;
+ const char *pretty_name;
+
+ /* set by the core, can be used drivers */
+ struct mdev_parent *parent;
- int (*create)(struct kobject *kobj, struct mdev_device *mdev);
- int (*remove)(struct mdev_device *mdev);
- int (*open)(struct mdev_device *mdev);
- void (*release)(struct mdev_device *mdev);
- ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
- size_t count, loff_t *ppos);
- ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
- size_t count, loff_t *ppos);
- long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
- unsigned long arg);
- int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
+ /* internal only */
+ struct kobject kobj;
+ struct kobject *devices_kobj;
};
-/* interface for exporting mdev supported type attributes */
-struct mdev_type_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf);
- ssize_t (*store)(struct kobject *kobj, struct device *dev,
- const char *buf, size_t count);
+/* embedded into the struct device that the mdev devices hang off */
+struct mdev_parent {
+ struct device *dev;
+ struct mdev_driver *mdev_driver;
+ struct kset *mdev_types_kset;
+ /* Synchronize device creation/removal with parent unregistration */
+ struct rw_semaphore unreg_sem;
+ struct mdev_type **types;
+ unsigned int nr_types;
+ atomic_t available_instances;
};
-#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \
-struct mdev_type_attribute mdev_type_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-#define MDEV_TYPE_ATTR_RW(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name)
-#define MDEV_TYPE_ATTR_RO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
-#define MDEV_TYPE_ATTR_WO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
+static inline struct mdev_device *to_mdev_device(struct device *dev)
+{
+ return container_of(dev, struct mdev_device, dev);
+}
/**
* struct mdev_driver - Mediated device driver
- * @name: driver name
+ * @device_api: string to return for the device_api sysfs
+ * @max_instances: maximum number of instances supported (optional)
* @probe: called when new device created
* @remove: called when device removed
+ * @get_available: Return the max number of instances that can be created
+ * @show_description: Print a description of the mtype
* @driver: device driver structure
- *
**/
struct mdev_driver {
- const char *name;
- int (*probe)(struct device *dev);
- void (*remove)(struct device *dev);
+ const char *device_api;
+ unsigned int max_instances;
+ int (*probe)(struct mdev_device *dev);
+ void (*remove)(struct mdev_device *dev);
+ unsigned int (*get_available)(struct mdev_type *mtype);
+ ssize_t (*show_description)(struct mdev_type *mtype, char *buf);
struct device_driver driver;
};
-#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
-
-void *mdev_get_drvdata(struct mdev_device *mdev);
-void mdev_set_drvdata(struct mdev_device *mdev, void *data);
-const guid_t *mdev_uuid(struct mdev_device *mdev);
-
-extern struct bus_type mdev_bus_type;
-
-int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops);
-void mdev_unregister_device(struct device *dev);
+int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ struct mdev_driver *mdev_driver, struct mdev_type **types,
+ unsigned int nr_types);
+void mdev_unregister_parent(struct mdev_parent *parent);
-int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
+int mdev_register_driver(struct mdev_driver *drv);
void mdev_unregister_driver(struct mdev_driver *drv);
-struct device *mdev_parent_dev(struct mdev_device *mdev);
-struct device *mdev_dev(struct mdev_device *mdev);
-struct mdev_device *mdev_from_dev(struct device *dev);
+static inline struct device *mdev_dev(struct mdev_device *mdev)
+{
+ return &mdev->dev;
+}
#endif /* MDEV_H */
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 5d71e8a8500f..373630fe5c28 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -33,8 +33,14 @@ struct mdiobb_ops {
struct mdiobb_ctrl {
const struct mdiobb_ops *ops;
+ unsigned int override_op_c22;
+ u8 op_c22_read;
+ u8 op_c22_write;
};
+int mdiobb_read(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
/* The returned bus is not yet registered with the phy layer. */
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index a7604248777b..00177567cfef 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -7,10 +7,20 @@
#define __LINUX_MDIO_H__
#include <uapi/linux/mdio.h>
+#include <linux/bitfield.h>
#include <linux/mod_devicetable.h>
+/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
+ * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
+ */
+#define MII_ADDR_C45 (1<<30)
+#define MII_DEVADDR_C45_SHIFT 16
+#define MII_DEVADDR_C45_MASK GENMASK(20, 16)
+#define MII_REGADDR_C45_MASK GENMASK(15, 0)
+
struct gpio_desc;
struct mii_bus;
+struct reset_control;
/* Multiple levels of nesting are possible. However typically this is
* limited to nested DSA like layer, a MUX layer, and the normal
@@ -41,7 +51,11 @@ struct mdio_device {
unsigned int reset_assert_delay;
unsigned int reset_deassert_delay;
};
-#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
+
+static inline struct mdio_device *to_mdio_device(const struct device *dev)
+{
+ return container_of(dev, struct mdio_device, dev);
+}
/* struct mdio_driver_common: Common to all MDIO drivers */
struct mdio_driver_common {
@@ -49,8 +63,12 @@ struct mdio_driver_common {
int flags;
};
#define MDIO_DEVICE_FLAG_PHY 1
-#define to_mdio_common_driver(d) \
- container_of(d, struct mdio_driver_common, driver)
+
+static inline struct mdio_driver_common *
+to_mdio_common_driver(const struct device_driver *driver)
+{
+ return container_of(driver, struct mdio_driver_common, driver);
+}
/* struct mdio_driver: Generic MDIO driver */
struct mdio_driver {
@@ -64,9 +82,17 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
+
+ /* Quiesces the device on system shutdown, turns off interrupts etc */
+ void (*shutdown)(struct mdio_device *mdiodev);
};
-#define to_mdio_driver(d) \
- container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv)
+
+static inline struct mdio_driver *
+to_mdio_driver(const struct device_driver *driver)
+{
+ return container_of(to_mdio_common_driver(driver), struct mdio_driver,
+ mdiodrv);
+}
/* device driver data */
static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data)
@@ -298,7 +324,7 @@ static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising)
/**
* mii_10gbt_stat_mod_linkmode_lpa_t
* @advertising: target the linkmode advertisement settings
- * @adv: value of the C45 10GBASE-T AN STATUS register
+ * @lpa: value of the C45 10GBASE-T AN STATUS register
*
* A small helper function that translates C45 10GBASE-T AN STATUS register bits
* to linkmode advertisement settings. Other bits in advertising aren't changed.
@@ -314,13 +340,153 @@ static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising,
advertising, lpa & MDIO_AN_10GBT_STAT_LP10G);
}
+/**
+ * mii_t1_adv_l_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [15:0] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [15:0] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_l_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_CAP);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_ASYM);
+}
+
+/**
+ * mii_t1_adv_m_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [31:16] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [31:16] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_B10L);
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_l_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [15:0] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_l_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_m_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [31:16] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_B10L;
+
+ return result;
+}
+
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
+ u16 set);
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
+
+static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
+{
+ return mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
+}
+
+static inline int mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
+
+static inline int mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
+}
+
+static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
+{
+ return mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
+}
+
+static inline u32 mdiobus_c45_addr(int devad, u16 regnum)
+{
+ return MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum;
+}
+
+static inline u16 mdiobus_c45_regad(u32 regnum)
+{
+ return FIELD_GET(MII_REGADDR_C45_MASK, regnum);
+}
+
+static inline u16 mdiobus_c45_devad(u32 regnum)
+{
+ return FIELD_GET(MII_DEVADDR_C45_MASK, regnum);
+}
+
+static inline int __mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
+ u16 regnum)
+{
+ return __mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
+}
+
+static inline int __mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
+ u16 regnum, u16 val)
+{
+ return __mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum),
+ val);
+}
+
+static inline int mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
+ u16 regnum)
+{
+ return mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
+}
+
+static inline int mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
+ u16 regnum, u16 val)
+{
+ return mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum), val);
+}
int mdiobus_register_device(struct mdio_device *mdiodev);
int mdiobus_unregister_device(struct mdio_device *mdiodev);
@@ -329,6 +495,7 @@ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
/**
* mdio_module_driver() - Helper macro for registering mdio drivers
+ * @_mdio_driver: driver to register
*
* Helper macro for MDIO drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/mdio/mdio-i2c.h b/include/linux/mdio/mdio-i2c.h
new file mode 100644
index 000000000000..65b550a6fc32
--- /dev/null
+++ b/include/linux/mdio/mdio-i2c.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MDIO I2C bridge
+ *
+ * Copyright (C) 2015 Russell King
+ */
+#ifndef MDIO_I2C_H
+#define MDIO_I2C_H
+
+struct device;
+struct i2c_adapter;
+struct mii_bus;
+
+enum mdio_i2c_proto {
+ MDIO_I2C_NONE,
+ MDIO_I2C_MARVELL_C22,
+ MDIO_I2C_C45,
+ MDIO_I2C_ROLLBALL,
+};
+
+struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol);
+
+#endif
diff --git a/include/linux/mdio/mdio-mscc-miim.h b/include/linux/mdio/mdio-mscc-miim.h
new file mode 100644
index 000000000000..5b4ed2c3cbb9
--- /dev/null
+++ b/include/linux/mdio/mdio-mscc-miim.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Driver for the MDIO interface of Microsemi network switches.
+ *
+ * Author: Colin Foster <colin.foster@in-advantage.com>
+ * Copyright (C) 2021 Innovative Advantage
+ */
+#ifndef MDIO_MSCC_MIIM_H
+#define MDIO_MSCC_MIIM_H
+
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+int mscc_miim_setup(struct device *device, struct mii_bus **bus,
+ const char *name, struct regmap *mii_regmap,
+ int status_offset);
+
+#endif
diff --git a/drivers/net/phy/mdio-xgene.h b/include/linux/mdio/mdio-xgene.h
index 8af93ada8b64..9e588965dc83 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/include/linux/mdio/mdio-xgene.h
@@ -8,6 +8,10 @@
#ifndef __MDIO_XGENE_H__
#define __MDIO_XGENE_H__
+#include <linux/bits.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
#define BLOCK_DIAG_CSR_OFFSET 0xd000
#define XGENET_CONFIG_REG_ADDR 0x20
diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h
new file mode 100644
index 000000000000..506912ad363b
--- /dev/null
+++ b/include/linux/mei_aux.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_MEI_AUX_H
+#define _LINUX_MEI_AUX_H
+
+#include <linux/auxiliary_bus.h>
+
+/**
+ * struct mei_aux_device - mei auxiliary device
+ * @aux_dev: - auxiliary device object
+ * @irq: interrupt driving the mei auxiliary device
+ * @bar: mmio resource bar reserved to mei auxiliary device
+ * @ext_op_mem: resource for extend operational memory
+ * used in graphics PXP mode.
+ * @slow_firmware: The device has slow underlying firmware.
+ * Such firmware will require to use larger operation timeouts.
+ */
+struct mei_aux_device {
+ struct auxiliary_device aux_dev;
+ int irq;
+ struct resource bar;
+ struct resource ext_op_mem;
+ bool slow_firmware;
+};
+
+#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct mei_aux_device, aux_dev)
+
+#endif /* _LINUX_MEI_AUX_H */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 52aa4821093a..df1fab44ea5c 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -68,7 +68,7 @@ struct mei_cl_driver {
int (*probe)(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id);
- int (*remove)(struct mei_cl_device *cldev);
+ void (*remove)(struct mei_cl_device *cldev);
};
int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
@@ -91,10 +91,17 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
mei_cldev_driver_register,\
mei_cldev_driver_unregister)
-ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
size_t length);
+ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag);
+ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag);
+ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
+ size_t length, u8 *vtag);
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
@@ -108,6 +115,9 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data);
int mei_cldev_enable(struct mei_cl_device *cldev);
int mei_cldev_disable(struct mei_cl_device *cldev);
-bool mei_cldev_enabled(struct mei_cl_device *cldev);
+bool mei_cldev_enabled(const struct mei_cl_device *cldev);
+
+void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size);
+int mei_cldev_dma_unmap(struct mei_cl_device *cldev);
#endif /* _LINUX_MEI_CL_BUS_H */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 5c4a18a91f89..ae4526389261 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -16,10 +16,6 @@
#include <asm/mem_encrypt.h>
-#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
-
-static inline bool mem_encrypt_active(void) { return false; }
-
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
#ifdef CONFIG_AMD_MEM_ENCRYPT
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 079d17d96410..50ad19662a32 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
-#ifdef __KERNEL__
/*
* Logical memory blocks.
@@ -28,20 +27,31 @@ extern unsigned long long max_possible_pfn;
/**
* enum memblock_flags - definition of memory region attributes
* @MEMBLOCK_NONE: no special request
- * @MEMBLOCK_HOTPLUG: hotpluggable region
+ * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
+ * map during early boot as hot(un)pluggable system RAM (e.g., memory range
+ * that might get hotunplugged later). With "movable_node" set on the kernel
+ * commandline, try keeping this memory region hotunpluggable. Does not apply
+ * to memblocks added ("hotplugged") after early boot.
* @MEMBLOCK_MIRROR: mirrored region
- * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
+ * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
+ * reserved in the memory map; refer to memblock_mark_nomap() description
+ * for further details
+ * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
+ * via a driver, and never indicated in the firmware-provided memory map as
+ * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
+ * kernel resource tree.
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
+ MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
};
/**
* struct memblock_region - represents a memory region
- * @base: physical address of the region
+ * @base: base address of the region
* @size: size of the region
* @flags: memory region attributes
* @nid: NUMA node id
@@ -50,7 +60,7 @@ struct memblock_region {
phys_addr_t base;
phys_addr_t size;
enum memblock_flags flags;
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+#ifdef CONFIG_NUMA
int nid;
#endif
};
@@ -75,22 +85,17 @@ struct memblock_type {
* struct memblock - memblock allocator metadata
* @bottom_up: is bottom up direction?
* @current_limit: physical address of the current allocation limit
- * @memory: usabe memory regions
+ * @memory: usable memory regions
* @reserved: reserved memory regions
- * @physmem: all physical memory
*/
struct memblock {
bool bottom_up; /* is bottom up direction? */
phys_addr_t current_limit;
struct memblock_type memory;
struct memblock_type reserved;
-#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
- struct memblock_type physmem;
-#endif
};
extern struct memblock memblock;
-extern int memblock_debug;
#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
#define __init_memblock __meminit
@@ -102,16 +107,12 @@ void memblock_discard(void);
static inline void memblock_discard(void) {}
#endif
-#define memblock_dbg(fmt, ...) \
- if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-
-phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
- phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void);
-int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
-int memblock_free(phys_addr_t base, phys_addr_t size);
+int memblock_phys_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
@@ -125,7 +126,8 @@ int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
-unsigned long memblock_free_all(void);
+void memblock_free_all(void);
+void memblock_free(void *ptr, size_t size);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
@@ -140,13 +142,34 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
- phys_addr_t *out_end);
+void memblock_free_late(phys_addr_t base, phys_addr_t size);
+
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
+ phys_addr_t *out_start,
+ phys_addr_t *out_end)
+{
+ extern struct memblock_type physmem;
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+ __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
+ out_start, out_end, NULL);
+}
/**
- * for_each_mem_range - iterate through memblock areas from type_a and not
+ * for_each_physmem_range - iterate through physmem areas not included in type.
+ * @i: u64 used as loop variable
+ * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_physmem_range(i, type, p_start, p_end) \
+ for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
+ i != (u64)ULLONG_MAX; \
+ __next_physmem_range(&i, type, p_start, p_end))
+#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
+
+/**
+ * __for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate
@@ -157,7 +180,7 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range(i, type_a, type_b, nid, flags, \
+#define __for_each_mem_range(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
@@ -166,7 +189,7 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
p_start, p_end, p_nid))
/**
- * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * __for_each_mem_range_rev - reverse iterate through memblock areas from
* type_a and not included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate
@@ -177,17 +200,40 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
- p_start, p_end, p_nid) \
+#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
+ p_start, p_end, p_nid) \
for (i = (u64)ULLONG_MAX, \
- __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
__next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
/**
- * for_each_reserved_mem_region - iterate over all reserved memblock areas
+ * for_each_mem_range - iterate through memory areas.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range(i, p_start, p_end) \
+ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
+ p_start, p_end, NULL)
+
+/**
+ * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * type_a and not included in type_b. Or just type_a if type_b is NULL.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range_rev(i, p_start, p_end) \
+ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
+ p_start, p_end, NULL)
+
+/**
+ * for_each_reserved_mem_range - iterate over all reserved memblock areas
* @i: u64 used as loop variable
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
@@ -195,10 +241,9 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
* Walks over reserved areas of memblock. Available as soon as memblock
* is initialized.
*/
-#define for_each_reserved_mem_region(i, p_start, p_end) \
- for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
- i != (u64)ULLONG_MAX; \
- __next_reserved_mem_region(&i, p_start, p_end))
+#define for_each_reserved_mem_range(i, p_start, p_end) \
+ __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_NONE, p_start, p_end, NULL)
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
@@ -215,7 +260,11 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+static inline bool memblock_is_driver_managed(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_DRIVER_MANAGED;
+}
+
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
@@ -234,14 +283,13 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
unsigned long *out_spfn,
unsigned long *out_epfn);
/**
- * for_each_free_mem_range_in_zone - iterate through zone specific free
+ * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
* memblock areas
* @i: u64 used as loop variable
* @zone: zone in which all of the memory blocks reside
@@ -261,7 +309,7 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
__next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
/**
- * for_each_free_mem_range_in_zone_from - iterate through zone specific
+ * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
* free memblock areas from a given point
* @i: u64 used as loop variable
* @zone: zone in which all of the memory blocks reside
@@ -275,6 +323,9 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
for (; i != U64_MAX; \
__next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
+
+int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
+
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
/**
@@ -290,8 +341,8 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
* soon as memblock is initialized.
*/
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
- for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
- nid, flags, p_start, p_end, p_nid)
+ __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
+ nid, flags, p_start, p_end, p_nid)
/**
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -307,13 +358,13 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
*/
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
p_nid) \
- for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
- nid, flags, p_start, p_end, p_nid)
+ __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
+ nid, flags, p_start, p_end, p_nid)
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid);
+#ifdef CONFIG_NUMA
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
r->nid = nid;
@@ -332,12 +383,12 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
{
return 0;
}
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+#endif /* CONFIG_NUMA */
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-#define MEMBLOCK_ALLOC_KASAN 1
+#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0
@@ -348,10 +399,13 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end);
+phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
+ phys_addr_t align, phys_addr_t start,
+ phys_addr_t end, int nid, bool exact_nid);
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
-static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
- phys_addr_t align)
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+ phys_addr_t align)
{
return memblock_phys_alloc_range(size, align, 0,
MEMBLOCK_ALLOC_ACCESSIBLE);
@@ -367,13 +421,13 @@ void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
-static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_raw(phys_addr_t size,
+static inline void *memblock_alloc_raw(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
@@ -381,7 +435,7 @@ static inline void * __init memblock_alloc_raw(phys_addr_t size,
NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_from(phys_addr_t size,
+static inline void *memblock_alloc_from(phys_addr_t size,
phys_addr_t align,
phys_addr_t min_addr)
{
@@ -389,41 +443,24 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_low(phys_addr_t size,
+static inline void *memblock_alloc_low(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_node(phys_addr_t size,
+static inline void *memblock_alloc_node(phys_addr_t size,
phys_addr_t align, int nid)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
-static inline void __init memblock_free_early(phys_addr_t base,
- phys_addr_t size)
-{
- memblock_free(base, size);
-}
-
-static inline void __init memblock_free_early_nid(phys_addr_t base,
- phys_addr_t size, int nid)
-{
- memblock_free(base, size);
-}
-
-static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
/*
* Set the allocation direction to bottom-up or top-down.
*/
-static inline void __init memblock_set_bottom_up(bool enable)
+static inline __init_memblock void memblock_set_bottom_up(bool enable)
{
memblock.bottom_up = enable;
}
@@ -433,14 +470,13 @@ static inline void __init memblock_set_bottom_up(bool enable)
* if this is true, that said, memblock will allocate memory
* in bottom-up direction.
*/
-static inline bool memblock_bottom_up(void)
+static inline __init_memblock bool memblock_bottom_up(void)
{
return memblock.bottom_up;
}
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
-phys_addr_t memblock_mem_size(unsigned long limit_pfn);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
@@ -452,13 +488,7 @@ bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
bool memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
-extern void __memblock_dump_all(void);
-
-static inline void memblock_dump_all(void)
-{
- if (memblock_debug)
- __memblock_dump_all();
-}
+void memblock_dump_all(void);
/**
* memblock_set_current_limit - Set the current allocation limit to allow
@@ -523,15 +553,23 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
return PFN_UP(reg->base + reg->size);
}
-#define for_each_memblock(memblock_type, region) \
- for (region = memblock.memblock_type.regions; \
- region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
+/**
+ * for_each_mem_region - itereate over memory regions
+ * @region: loop variable
+ */
+#define for_each_mem_region(region) \
+ for (region = memblock.memory.regions; \
+ region < (memblock.memory.regions + memblock.memory.cnt); \
region++)
-#define for_each_memblock_type(i, memblock_type, rgn) \
- for (i = 0, rgn = &memblock_type->regions[0]; \
- i < memblock_type->cnt; \
- i++, rgn = &memblock_type->regions[i])
+/**
+ * for_each_reserved_mem_region - itereate over reserved memory regions
+ * @region: loop variable
+ */
+#define for_each_reserved_mem_region(region) \
+ for (region = memblock.reserved.regions; \
+ region < (memblock.reserved.regions + memblock.reserved.cnt); \
+ region++)
extern void *alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
@@ -566,6 +604,5 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
}
#endif
-#endif /* __KERNEL__ */
#endif /* _LINUX_MEMBLOCK_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index a7a0a1a5c8d5..e1644a24009c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -23,19 +23,20 @@
#include <linux/page-flags.h>
struct mem_cgroup;
+struct obj_cgroup;
struct page;
struct mm_struct;
struct kmem_cache;
/* Cgroup-specific page state, on top of universal node page state */
enum memcg_stat_item {
- MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
- MEMCG_RSS,
- MEMCG_RSS_HUGE,
- MEMCG_SWAP,
+ MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
- /* XXX: why are these zone and not node counters? */
- MEMCG_KERNEL_STACK_KB,
+ MEMCG_PERCPU_B,
+ MEMCG_VMALLOC,
+ MEMCG_KMEM,
+ MEMCG_ZSWAP_B,
+ MEMCG_ZSWAPPED,
MEMCG_NR_STAT,
};
@@ -45,17 +46,13 @@ enum memcg_memory_event {
MEMCG_MAX,
MEMCG_OOM,
MEMCG_OOM_KILL,
+ MEMCG_OOM_GROUP_KILL,
+ MEMCG_SWAP_HIGH,
MEMCG_SWAP_MAX,
MEMCG_SWAP_FAIL,
MEMCG_NR_MEMORY_EVENTS,
};
-enum mem_cgroup_protection {
- MEMCG_PROT_NONE,
- MEMCG_PROT_LOW,
- MEMCG_PROT_MIN,
-};
-
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
unsigned int generation;
@@ -73,8 +70,8 @@ struct mem_cgroup_id {
/*
* Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremated by the number of pages. This counter is used for
- * for trigger some periodic events. This is straightforward and better
+ * it will be incremented by the number of pages. This counter is used
+ * to trigger some periodic events. This is straightforward and better
* than using jiffies etc. to handle periodic memcg event.
*/
enum mem_cgroup_events_target {
@@ -83,12 +80,8 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-struct memcg_vmstats_percpu {
- long stat[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
-};
+struct memcg_vmstats_percpu;
+struct memcg_vmstats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
@@ -96,17 +89,30 @@ struct mem_cgroup_reclaim_iter {
unsigned int generation;
};
-struct lruvec_stat {
- long count[NR_VM_NODE_STAT_ITEMS];
-};
-
/*
- * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
- * which have elements charged to this memcg.
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to this memcg.
*/
-struct memcg_shrinker_map {
+struct shrinker_info {
struct rcu_head rcu;
- unsigned long map[0];
+ atomic_long_t *nr_deferred;
+ unsigned long *map;
+};
+
+struct lruvec_stats_percpu {
+ /* Local (CPU and cgroup) state */
+ long state[NR_VM_NODE_STAT_ITEMS];
+
+ /* Delta calculation for lockless upward propagation */
+ long state_prev[NR_VM_NODE_STAT_ITEMS];
+};
+
+struct lruvec_stats {
+ /* Aggregated (CPU and subtree) state */
+ long state[NR_VM_NODE_STAT_ITEMS];
+
+ /* Pending child counts during tree propagation */
+ long state_pending[NR_VM_NODE_STAT_ITEMS];
};
/*
@@ -115,18 +121,14 @@ struct memcg_shrinker_map {
struct mem_cgroup_per_node {
struct lruvec lruvec;
- /* Legacy local VM stats */
- struct lruvec_stat __percpu *lruvec_stat_local;
-
- /* Subtree VM stats (batched updates) */
- struct lruvec_stat __percpu *lruvec_stat_cpu;
- atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
+ struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
+ struct lruvec_stats lruvec_stats;
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter;
- struct memcg_shrinker_map __rcu *shrinker_map;
+ struct shrinker_info __rcu *shrinker_info;
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
@@ -148,7 +150,7 @@ struct mem_cgroup_threshold_ary {
/* Size of entries[] */
unsigned int size;
/* Array of thresholds */
- struct mem_cgroup_threshold entries[0];
+ struct mem_cgroup_threshold entries[];
};
struct mem_cgroup_thresholds {
@@ -162,21 +164,6 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
-enum memcg_kmem_state {
- KMEM_NONE,
- KMEM_ALLOCATED,
- KMEM_ONLINE,
-};
-
-#if defined(CONFIG_SMP)
-struct memcg_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define MEMCG_PADDING(name) struct memcg_padding name;
-#else
-#define MEMCG_PADDING(name)
-#endif
-
/*
* Remember four most recent foreign writebacks with dirty pages in this
* cgroup. Inode sharing is expected to be uncommon and, even if we miss
@@ -195,6 +182,22 @@ struct memcg_cgwb_frn {
};
/*
+ * Bucket for arbitrarily byte-sized objects charged to a memory
+ * cgroup. The bucket can be reparented in one piece when the cgroup
+ * is destroyed, without having to round up the individual references
+ * of all live memory objects in the wild.
+ */
+struct obj_cgroup {
+ struct percpu_ref refcnt;
+ struct mem_cgroup *memcg;
+ atomic_t nr_charged_bytes;
+ union {
+ struct list_head list; /* protected by objcg_lock */
+ struct rcu_head rcu;
+ };
+};
+
+/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
* statistics based on the statistics developed by Rik Van Riel for clock-pro,
@@ -207,31 +210,30 @@ struct mem_cgroup {
struct mem_cgroup_id id;
/* Accounted resources */
- struct page_counter memory;
- struct page_counter swap;
+ struct page_counter memory; /* Both v1 & v2 */
- /* Legacy consumer-oriented counters */
- struct page_counter memsw;
- struct page_counter kmem;
- struct page_counter tcpmem;
+ union {
+ struct page_counter swap; /* v2 only */
+ struct page_counter memsw; /* v1 only */
+ };
- /* Upper bound of normal memory consumption range */
- unsigned long high;
+ /* Legacy consumer-oriented counters */
+ struct page_counter kmem; /* v1 only */
+ struct page_counter tcpmem; /* v1 only */
/* Range enforcement for interrupt charges */
struct work_struct high_work;
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+ unsigned long zswap_max;
+#endif
+
unsigned long soft_limit;
/* vmpressure notifications */
struct vmpressure vmpressure;
/*
- * Should the accounting and control be hierarchical, per subtree?
- */
- bool use_hierarchy;
-
- /*
* Should the OOM killer kill all belonging tasks, had it kill one?
*/
bool oom_group;
@@ -272,24 +274,10 @@ struct mem_cgroup {
spinlock_t move_lock;
unsigned long move_lock_flags;
- MEMCG_PADDING(_pad1_);
-
- /*
- * set > 0 if pages under this cgroup are moving to other cgroup.
- */
- atomic_t moving_account;
- struct task_struct *move_lock_task;
-
- /* Legacy local VM stats and events */
- struct memcg_vmstats_percpu __percpu *vmstats_local;
-
- /* Subtree VM stats and events (batched updates) */
- struct memcg_vmstats_percpu __percpu *vmstats_percpu;
-
- MEMCG_PADDING(_pad2_);
+ CACHELINE_PADDING(_pad1_);
- atomic_long_t vmstats[MEMCG_NR_STAT];
- atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
+ /* memory.stat */
+ struct memcg_vmstats *vmstats;
/* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
@@ -302,12 +290,22 @@ struct mem_cgroup {
int tcpmem_pressure;
#ifdef CONFIG_MEMCG_KMEM
- /* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
- enum memcg_kmem_state kmem_state;
- struct list_head kmem_caches;
+ struct obj_cgroup __rcu *objcg;
+ /* list of inherited objcgs, protected by objcg_lock */
+ struct list_head objcg_list;
#endif
+ CACHELINE_PADDING(_pad2_);
+
+ /*
+ * set > 0 if pages under this cgroup are moving to other cgroup.
+ */
+ atomic_t moving_account;
+ struct task_struct *move_lock_task;
+
+ struct memcg_vmstats_percpu __percpu *vmstats_percpu;
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
@@ -322,18 +320,238 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
- struct mem_cgroup_per_node *nodeinfo[0];
- /* WARNING: nodeinfo must be the last member here */
+#ifdef CONFIG_LRU_GEN
+ /* per-memcg mm_struct list */
+ struct lru_gen_mm_list mm_list;
+#endif
+
+ struct mem_cgroup_per_node *nodeinfo[];
};
/*
- * size of first charge trial. "32" comes from vmscan.c's magic value.
- * TODO: maybe necessary to use big numbers in big irons.
+ * size of first charge trial.
+ * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
+ * workload.
*/
-#define MEMCG_CHARGE_BATCH 32U
+#define MEMCG_CHARGE_BATCH 64U
extern struct mem_cgroup *root_mem_cgroup;
+enum page_memcg_data_flags {
+ /* page->memcg_data is a pointer to an objcgs vector */
+ MEMCG_DATA_OBJCGS = (1UL << 0),
+ /* page has been accounted as a non-slab kernel page */
+ MEMCG_DATA_KMEM = (1UL << 1),
+ /* the next bit after the last actual flag */
+ __NR_MEMCG_DATA_FLAGS = (1UL << 2),
+};
+
+#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
+
+static inline bool folio_memcg_kmem(struct folio *folio);
+
+/*
+ * After the initialization objcg->memcg is always pointing at
+ * a valid memcg, but can be atomically swapped to the parent memcg.
+ *
+ * The caller must ensure that the returned memcg won't be released:
+ * e.g. acquire the rcu_read_lock or css_set_lock.
+ */
+static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
+{
+ return READ_ONCE(objcg->memcg);
+}
+
+/*
+ * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * kmem folios.
+ */
+static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
+{
+ unsigned long memcg_data = folio->memcg_data;
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * __folio_objcg - get the object cgroup associated with a kmem folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the object cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper object cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * LRU folios.
+ */
+static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
+{
+ unsigned long memcg_data = folio->memcg_data;
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
+ VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
+
+ return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * folio_memcg - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios.
+ *
+ * For a non-kmem folio any of the following ensures folio and memcg binding
+ * stability:
+ *
+ * - the folio lock
+ * - LRU isolation
+ * - lock_page_memcg()
+ * - exclusive reference
+ * - mem_cgroup_trylock_pages()
+ *
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
+ */
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ if (folio_memcg_kmem(folio))
+ return obj_cgroup_memcg(__folio_objcg(folio));
+ return __folio_memcg(folio);
+}
+
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return folio_memcg(page_folio(page));
+}
+
+/**
+ * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios.
+ *
+ * Return: A pointer to the memory cgroup associated with the folio,
+ * or NULL.
+ */
+static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
+{
+ unsigned long memcg_data = READ_ONCE(folio->memcg_data);
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (memcg_data & MEMCG_DATA_KMEM) {
+ struct obj_cgroup *objcg;
+
+ objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return obj_cgroup_memcg(objcg);
+ }
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * page_memcg_check - get the memory cgroup associated with a page
+ * @page: a pointer to the page struct
+ *
+ * Returns a pointer to the memory cgroup associated with the page,
+ * or NULL. This function unlike page_memcg() can take any page
+ * as an argument. It has to be used in cases when it's not known if a page
+ * has an associated memory cgroup pointer or an object cgroups vector or
+ * an object cgroup.
+ *
+ * For a non-kmem page any of the following ensures page and memcg binding
+ * stability:
+ *
+ * - the page lock
+ * - LRU isolation
+ * - lock_page_memcg()
+ * - exclusive reference
+ * - mem_cgroup_trylock_pages()
+ *
+ * For a kmem page a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem page from being released.
+ */
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ /*
+ * Because page->memcg_data might be changed asynchronously
+ * for slab pages, READ_ONCE() should be used here.
+ */
+ unsigned long memcg_data = READ_ONCE(page->memcg_data);
+
+ if (memcg_data & MEMCG_DATA_OBJCGS)
+ return NULL;
+
+ if (memcg_data & MEMCG_DATA_KMEM) {
+ struct obj_cgroup *objcg;
+
+ objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return obj_cgroup_memcg(objcg);
+ }
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ struct mem_cgroup *memcg;
+
+ rcu_read_lock();
+retry:
+ memcg = obj_cgroup_memcg(objcg);
+ if (unlikely(!css_tryget(&memcg->css)))
+ goto retry;
+ rcu_read_unlock();
+
+ return memcg;
+}
+
+#ifdef CONFIG_MEMCG_KMEM
+/*
+ * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
+ * @folio: Pointer to the folio.
+ *
+ * Checks if the folio has MemcgKmem flag set. The caller must ensure
+ * that the folio has an associated memory cgroup. It's not safe to call
+ * this function against some types of folios, e.g. slab folios.
+ */
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
+ VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
+ return folio->memcg_data & MEMCG_DATA_KMEM;
+}
+
+
+#else
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ return false;
+}
+
+#endif
+
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return folio_memcg_kmem(page_folio(page));
+}
+
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
return (memcg == root_mem_cgroup);
@@ -344,49 +562,147 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
- bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
+ *min = *low = 0;
+
if (mem_cgroup_disabled())
- return 0;
+ return;
+
+ /*
+ * There is no reclaim protection applied to a targeted reclaim.
+ * We are special casing this specific case here because
+ * mem_cgroup_protected calculation is not robust enough to keep
+ * the protection invariant for calculated effective values for
+ * parallel reclaimers with different reclaim target. This is
+ * especially a problem for tail memcgs (as they have pages on LRU)
+ * which would want to have effective values 0 for targeted reclaim
+ * but a different value for external reclaim.
+ *
+ * Example
+ * Let's have global and A's reclaim in parallel:
+ * |
+ * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
+ * |\
+ * | C (low = 1G, usage = 2.5G)
+ * B (low = 1G, usage = 0.5G)
+ *
+ * For the global reclaim
+ * A.elow = A.low
+ * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
+ * C.elow = min(C.usage, C.low)
+ *
+ * With the effective values resetting we have A reclaim
+ * A.elow = 0
+ * B.elow = B.low
+ * C.elow = C.low
+ *
+ * If the global reclaim races with A's reclaim then
+ * B.elow = C.elow = 0 because children_low_usage > A.elow)
+ * is possible and reclaiming B would be violating the protection.
+ *
+ */
+ if (root == memcg)
+ return;
+
+ *min = READ_ONCE(memcg->memory.emin);
+ *low = READ_ONCE(memcg->memory.elow);
+}
+
+void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
+
+static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
+{
+ /*
+ * The root memcg doesn't account charges, and doesn't support
+ * protection.
+ */
+ return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
+
+}
- if (in_low_reclaim)
- return READ_ONCE(memcg->memory.emin);
+static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+{
+ if (!mem_cgroup_supports_protection(memcg))
+ return false;
- return max(READ_ONCE(memcg->memory.emin),
- READ_ONCE(memcg->memory.elow));
+ return READ_ONCE(memcg->memory.elow) >=
+ page_counter_read(&memcg->memory);
}
-enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
- struct mem_cgroup *memcg);
+static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
+{
+ if (!mem_cgroup_supports_protection(memcg))
+ return false;
-int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, struct mem_cgroup **memcgp,
- bool compound);
-int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, struct mem_cgroup **memcgp,
- bool compound);
-void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
- bool lrucare, bool compound);
-void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
- bool compound);
-void mem_cgroup_uncharge(struct page *page);
-void mem_cgroup_uncharge_list(struct list_head *page_list);
+ return READ_ONCE(memcg->memory.emin) >=
+ page_counter_read(&memcg->memory);
+}
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
+int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
-static struct mem_cgroup_per_node *
-mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
+/**
+ * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
+ * @folio: Folio to charge.
+ * @mm: mm context of the allocating task.
+ * @gfp: Reclaim mode.
+ *
+ * Try to charge @folio to the memcg that @mm belongs to, reclaiming
+ * pages according to @gfp if necessary. If @mm is NULL, try to
+ * charge to the active memcg.
+ *
+ * Do not use this for folios allocated for swapin.
+ *
+ * Return: 0 on success. Otherwise, an error code is returned.
+ */
+static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp)
{
- return memcg->nodeinfo[nid];
+ if (mem_cgroup_disabled())
+ return 0;
+ return __mem_cgroup_charge(folio, mm, gfp);
}
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp, swp_entry_t entry);
+void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
+
+void __mem_cgroup_uncharge(struct folio *folio);
+
+/**
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
+ *
+ * Uncharge a folio previously charged with mem_cgroup_charge().
+ */
+static inline void mem_cgroup_uncharge(struct folio *folio)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge(folio);
+}
+
+void __mem_cgroup_uncharge_list(struct list_head *page_list);
+static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge_list(page_list);
+}
+
+void mem_cgroup_migrate(struct folio *old, struct folio *new);
+
/**
* mem_cgroup_lruvec - get the lru list vector for a memcg & node
* @memcg: memcg of the wanted lruvec
+ * @pgdat: pglist_data
*
* Returns the lru list vector holding pages for a given @memcg &
- * @node combination. This can be the node lruvec, if the memory
+ * @pgdat combination. This can be the node lruvec, if the memory
* controller is disabled.
*/
static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
@@ -403,7 +719,7 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
if (!memcg)
memcg = root_mem_cgroup;
- mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
+ mz = memcg->nodeinfo[pgdat->node_id];
lruvec = &mz->lruvec;
out:
/*
@@ -416,19 +732,64 @@ out:
return lruvec;
}
-struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
+/**
+ * folio_lruvec - return lruvec for isolating/putting an LRU folio
+ * @folio: Pointer to the folio.
+ *
+ * This function relies on folio->mem_cgroup being stable.
+ */
+static inline struct lruvec *folio_lruvec(struct folio *folio)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
+ return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
+}
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
-struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
+struct lruvec *folio_lruvec_lock(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
+ unsigned long *flags);
+
+#ifdef CONFIG_DEBUG_VM
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
+#else
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
+{
+}
+#endif
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
return css ? container_of(css, struct mem_cgroup, css) : NULL;
}
+static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
+{
+ return percpu_ref_tryget(&objcg->refcnt);
+}
+
+static inline void obj_cgroup_get(struct obj_cgroup *objcg)
+{
+ percpu_ref_get(&objcg->refcnt);
+}
+
+static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
+ unsigned long nr)
+{
+ percpu_ref_get_many(&objcg->refcnt, nr);
+}
+
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+ percpu_ref_put(&objcg->refcnt);
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
@@ -454,6 +815,15 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
+}
+
+struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
+#endif
+
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return mem_cgroup_from_css(seq_css(m));
@@ -479,9 +849,7 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
*/
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
- if (!memcg->memory.parent)
- return NULL;
- return mem_cgroup_from_counter(memcg->memory.parent, memory);
+ return mem_cgroup_from_css(memcg->css.parent);
}
static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
@@ -489,8 +857,6 @@ static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
{
if (root == memcg)
return true;
- if (!root->use_hierarchy)
- return false;
return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
}
@@ -518,11 +884,6 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
return !!(memcg->css.flags & CSS_ONLINE);
}
-/*
- * For memory reclaim.
- */
-int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zid, int nr_pages);
@@ -533,7 +894,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
struct mem_cgroup_per_node *mz;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- return mz->lru_zone_size[zone_idx][lru];
+ return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
void mem_cgroup_handle_over_high(void);
@@ -569,48 +930,29 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
struct mem_cgroup *oom_domain);
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
-#ifdef CONFIG_MEMCG_SWAP
-extern int do_swap_account;
-#endif
-
-struct mem_cgroup *lock_page_memcg(struct page *page);
-void __unlock_page_memcg(struct mem_cgroup *memcg);
+void folio_memcg_lock(struct folio *folio);
+void folio_memcg_unlock(struct folio *folio);
+void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);
-/*
- * idx can be of type enum memcg_stat_item or node_stat_item.
- * Keep in sync with memcg_exact_page_state().
- */
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
-{
- long x = atomic_long_read(&memcg->vmstats[idx]);
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- return x;
-}
+void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
-/*
- * idx can be of type enum memcg_stat_item or node_stat_item.
- * Keep in sync with memcg_exact_page_state().
- */
-static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
- int idx)
+/* try to stablize folio_memcg() for all the pages in a memcg */
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
{
- long x = 0;
- int cpu;
+ rcu_read_lock();
- for_each_possible_cpu(cpu)
- x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- return x;
+ if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
+ return true;
+
+ rcu_read_unlock();
+ return false;
}
-void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
+static inline void mem_cgroup_unlock_pages(void)
+{
+ rcu_read_unlock();
+}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
@@ -623,37 +965,23 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
-/**
- * mod_memcg_page_state - update page state statistics
- * @page: the page
- * @idx: page state item to account
- * @val: number of pages (positive or negative)
- *
- * The @page must be locked or the caller must use lock_page_memcg()
- * to prevent double accounting when the page is concurrently being
- * moved to another memcg:
- *
- * lock_page(page) or lock_page_memcg(page)
- * if (TestClearPageState(page))
- * mod_memcg_page_state(page, state, -1);
- * unlock_page(page) or unlock_page_memcg(page)
- *
- * Kernel pages are an exception to this, since they'll never move.
- */
-static inline void __mod_memcg_page_state(struct page *page,
- int idx, int val)
-{
- if (page->mem_cgroup)
- __mod_memcg_state(page->mem_cgroup, idx, val);
-}
-
static inline void mod_memcg_page_state(struct page *page,
int idx, int val)
{
- if (page->mem_cgroup)
- mod_memcg_state(page->mem_cgroup, idx, val);
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = page_memcg(page);
+ if (memcg)
+ mod_memcg_state(memcg, idx, val);
+ rcu_read_unlock();
}
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
+
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
@@ -664,7 +992,7 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- x = atomic_long_read(&pn->lruvec_stat[idx]);
+ x = READ_ONCE(pn->lruvec_stats.state[idx]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -684,7 +1012,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
for_each_possible_cpu(cpu)
- x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
+ x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -692,50 +1020,33 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return x;
}
-void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
- int val);
-void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
+void mem_cgroup_flush_stats(void);
+void mem_cgroup_flush_stats_delayed(void);
+
+void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ int val);
+void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
+ int val)
{
unsigned long flags;
local_irq_save(flags);
- __mod_lruvec_state(lruvec, idx, val);
+ __mod_lruvec_kmem_state(p, idx, val);
local_irq_restore(flags);
}
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- pg_data_t *pgdat = page_pgdat(page);
- struct lruvec *lruvec;
-
- /* Untracked pages have no memcg, no lruvec. Update only the node */
- if (!page->mem_cgroup) {
- __mod_node_page_state(pgdat, idx, val);
- return;
- }
-
- lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat);
- __mod_lruvec_state(lruvec, idx, val);
-}
-
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
{
unsigned long flags;
local_irq_save(flags);
- __mod_lruvec_page_state(page, idx, val);
+ __mod_memcg_lruvec_state(lruvec, idx, val);
local_irq_restore(flags);
}
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned);
-
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
unsigned long count);
@@ -753,8 +1064,19 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
static inline void count_memcg_page_event(struct page *page,
enum vm_event_item idx)
{
- if (page->mem_cgroup)
- count_memcg_events(page->mem_cgroup, idx, 1);
+ struct mem_cgroup *memcg = page_memcg(page);
+
+ if (memcg)
+ count_memcg_events(memcg, idx, 1);
+}
+
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ if (memcg)
+ count_memcg_events(memcg, idx, nr);
}
static inline void count_memcg_event_mm(struct mm_struct *mm,
@@ -775,13 +1097,22 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
+ bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
+ event == MEMCG_SWAP_FAIL;
+
atomic_long_inc(&memcg->memory_events_local[event]);
- cgroup_file_notify(&memcg->events_local_file);
+ if (!swap_event)
+ cgroup_file_notify(&memcg->events_local_file);
do {
atomic_long_inc(&memcg->memory_events[event]);
- cgroup_file_notify(&memcg->events_file);
+ if (swap_event)
+ cgroup_file_notify(&memcg->swap_events_file);
+ else
+ cgroup_file_notify(&memcg->events_file);
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ break;
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
break;
} while ((memcg = parent_mem_cgroup(memcg)) &&
@@ -803,16 +1134,47 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
rcu_read_unlock();
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-void mem_cgroup_split_huge_fixup(struct page *head);
-#endif
+void split_page_memcg(struct page *head, unsigned int nr);
+
+unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned);
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
#define MEM_CGROUP_ID_MAX 0
-struct mem_cgroup;
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return NULL;
+}
+
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ return NULL;
+}
+
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return false;
+}
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
@@ -834,50 +1196,46 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
{
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
- bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
- return 0;
+ *min = *low = 0;
}
-static inline enum mem_cgroup_protection mem_cgroup_protected(
- struct mem_cgroup *root, struct mem_cgroup *memcg)
+static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
{
- return MEMCG_PROT_NONE;
}
-static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask,
- struct mem_cgroup **memcgp,
- bool compound)
+static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
{
- *memcgp = NULL;
- return 0;
+ return false;
+}
+
+static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
+{
+ return false;
}
-static inline int mem_cgroup_try_charge_delay(struct page *page,
- struct mm_struct *mm,
- gfp_t gfp_mask,
- struct mem_cgroup **memcgp,
- bool compound)
+static inline int mem_cgroup_charge(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp)
{
- *memcgp = NULL;
return 0;
}
-static inline void mem_cgroup_commit_charge(struct page *page,
- struct mem_cgroup *memcg,
- bool lrucare, bool compound)
+static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
+ return 0;
}
-static inline void mem_cgroup_cancel_charge(struct page *page,
- struct mem_cgroup *memcg,
- bool compound)
+static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
{
}
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void mem_cgroup_uncharge(struct folio *folio)
{
}
@@ -885,7 +1243,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
-static inline void mem_cgroup_migrate(struct page *old, struct page *new)
+static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
@@ -895,12 +1253,17 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
return &pgdat->__lruvec;
}
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct pglist_data *pgdat)
+static inline struct lruvec *folio_lruvec(struct folio *folio)
{
+ struct pglist_data *pgdat = folio_pgdat(folio);
return &pgdat->__lruvec;
}
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
+{
+}
+
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
return NULL;
@@ -917,15 +1280,45 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
return NULL;
}
-static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
return NULL;
}
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
+static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
+}
+
+static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock_irq(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
+}
+
+static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
+ unsigned long *flagsp)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
+ return &pgdat->__lruvec;
+}
+
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -957,6 +1350,18 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
+static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
+{
+ return NULL;
+}
+#endif
+
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return NULL;
@@ -999,17 +1404,32 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}
-static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+static inline void lock_page_memcg(struct page *page)
{
- return NULL;
}
-static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
+static inline void unlock_page_memcg(struct page *page)
{
}
-static inline void unlock_page_memcg(struct page *page)
+static inline void folio_memcg_lock(struct folio *folio)
+{
+}
+
+static inline void folio_memcg_unlock(struct folio *folio)
+{
+}
+
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
+{
+ /* to match folio_memcg_rcu() */
+ rcu_read_lock();
+ return true;
+}
+
+static inline void mem_cgroup_unlock_pages(void)
{
+ rcu_read_unlock();
}
static inline void mem_cgroup_handle_over_high(void)
@@ -1044,17 +1464,6 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
}
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
-{
- return 0;
-}
-
-static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
- int idx)
-{
- return 0;
-}
-
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
int idx,
int nr)
@@ -1067,16 +1476,14 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
{
}
-static inline void __mod_memcg_page_state(struct page *page,
- int idx,
- int nr)
+static inline void mod_memcg_page_state(struct page *page,
+ int idx, int val)
{
}
-static inline void mod_memcg_page_state(struct page *page,
- int idx,
- int nr)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
+ return 0;
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
@@ -1091,31 +1498,20 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats(void)
{
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats_delayed(void)
{
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
{
- __mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- mod_node_page_state(page_pgdat(page), idx, val);
-}
-
-static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
struct page *page = virt_to_head_page(p);
@@ -1123,16 +1519,12 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
__mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned)
+static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
+ int val)
{
- return 0;
-}
+ struct page *page = virt_to_head_page(p);
-static inline void mem_cgroup_split_huge_fixup(struct page *head)
-{
+ mod_node_page_state(page_pgdat(page), idx, val);
}
static inline void count_memcg_events(struct mem_cgroup *memcg,
@@ -1152,124 +1544,102 @@ static inline void count_memcg_page_event(struct page *page,
{
}
-static inline
-void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
-{
-}
-#endif /* CONFIG_MEMCG */
-
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __inc_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
{
- __mod_memcg_state(memcg, idx, 1);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __dec_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline
+void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
- __mod_memcg_state(memcg, idx, -1);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __inc_memcg_page_state(struct page *page,
- int idx)
+static inline void split_page_memcg(struct page *head, unsigned int nr)
{
- __mod_memcg_page_state(page, idx, 1);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __dec_memcg_page_state(struct page *page,
- int idx)
+static inline
+unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
{
- __mod_memcg_page_state(page, idx, -1);
+ return 0;
}
+#endif /* CONFIG_MEMCG */
-static inline void __inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_state(lruvec, idx, 1);
+ __mod_lruvec_kmem_state(p, idx, 1);
}
-static inline void __dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_state(lruvec, idx, -1);
+ __mod_lruvec_kmem_state(p, idx, -1);
}
-static inline void __inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
- __mod_lruvec_page_state(page, idx, 1);
-}
+ struct mem_cgroup *memcg;
-static inline void __dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- __mod_lruvec_page_state(page, idx, -1);
+ memcg = lruvec_memcg(lruvec);
+ if (!memcg)
+ return NULL;
+ memcg = parent_mem_cgroup(memcg);
+ if (!memcg)
+ return NULL;
+ return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
}
-static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
+static inline void unlock_page_lruvec(struct lruvec *lruvec)
{
- __mod_lruvec_slab_state(p, idx, 1);
+ spin_unlock(&lruvec->lru_lock);
}
-static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
+static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
{
- __mod_lruvec_slab_state(p, idx, -1);
+ spin_unlock_irq(&lruvec->lru_lock);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void inc_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
+ unsigned long flags)
{
- mod_memcg_state(memcg, idx, 1);
+ spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void dec_memcg_state(struct mem_cgroup *memcg,
- int idx)
+/* Test requires a stable page->memcg binding, see page_memcg() */
+static inline bool folio_matches_lruvec(struct folio *folio,
+ struct lruvec *lruvec)
{
- mod_memcg_state(memcg, idx, -1);
+ return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
+ lruvec_memcg(lruvec) == folio_memcg(folio);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void inc_memcg_page_state(struct page *page,
- int idx)
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
+ struct lruvec *locked_lruvec)
{
- mod_memcg_page_state(page, idx, 1);
-}
+ if (locked_lruvec) {
+ if (folio_matches_lruvec(folio, locked_lruvec))
+ return locked_lruvec;
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void dec_memcg_page_state(struct page *page,
- int idx)
-{
- mod_memcg_page_state(page, idx, -1);
-}
+ unlock_page_lruvec_irq(locked_lruvec);
+ }
-static inline void inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- mod_lruvec_state(lruvec, idx, 1);
+ return folio_lruvec_lock_irq(folio);
}
-static inline void dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
+ struct lruvec *locked_lruvec, unsigned long *flags)
{
- mod_lruvec_state(lruvec, idx, -1);
-}
+ if (locked_lruvec) {
+ if (folio_matches_lruvec(folio, locked_lruvec))
+ return locked_lruvec;
-static inline void inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, 1);
-}
+ unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+ }
-static inline void dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, -1);
+ return folio_lruvec_lock_irqsave(folio, flags);
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -1279,17 +1649,17 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
unsigned long *pwriteback);
-void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
+void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
struct bdi_writeback *wb);
-static inline void mem_cgroup_track_foreign_dirty(struct page *page,
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
if (mem_cgroup_disabled())
return;
- if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
- mem_cgroup_track_foreign_dirty_slowpath(page, wb);
+ if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
+ mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
}
void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
@@ -1309,7 +1679,7 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
{
}
-static inline void mem_cgroup_track_foreign_dirty(struct page *page,
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
}
@@ -1321,7 +1691,8 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
+ gfp_t gfp_mask);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
@@ -1333,16 +1704,16 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
return true;
do {
- if (time_before(jiffies, memcg->socket_pressure))
+ if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
return true;
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
}
-extern int memcg_expand_shrinker_maps(int new_id);
-
-extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id);
+int alloc_shrinker_info(struct mem_cgroup *memcg);
+void free_shrinker_info(struct mem_cgroup *memcg);
+void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
+void reparent_shrinker_deferred(struct mem_cgroup *memcg);
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1352,122 +1723,145 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
return false;
}
-static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id)
+static inline void set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id)
{
}
#endif
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
-void memcg_kmem_put_cache(struct kmem_cache *cachep);
-
#ifdef CONFIG_MEMCG_KMEM
-int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
-void __memcg_kmem_uncharge(struct page *page, int order);
-int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg);
-void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
- unsigned int nr_pages);
+bool mem_cgroup_kmem_disabled(void);
+int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
+void __memcg_kmem_uncharge_page(struct page *page, int order);
-extern struct static_key_false memcg_kmem_enabled_key;
-extern struct workqueue_struct *memcg_kmem_cache_wq;
+struct obj_cgroup *get_obj_cgroup_from_current(void);
+struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
-extern int memcg_nr_cache_ids;
-void memcg_get_cache_ids(void);
-void memcg_put_cache_ids(void);
+int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
+void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
-/*
- * Helper macro to loop through all memcg-specific caches. Callers must still
- * check if the cache is valid (it is either valid or NULL).
- * the slab_mutex must be held when looping through those caches
- */
-#define for_each_memcg_cache_index(_idx) \
- for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
+extern struct static_key_false memcg_kmem_enabled_key;
static inline bool memcg_kmem_enabled(void)
{
- return static_branch_unlikely(&memcg_kmem_enabled_key);
+ return static_branch_likely(&memcg_kmem_enabled_key);
}
-static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
{
if (memcg_kmem_enabled())
- return __memcg_kmem_charge(page, gfp, order);
+ return __memcg_kmem_charge_page(page, gfp, order);
return 0;
}
-static inline void memcg_kmem_uncharge(struct page *page, int order)
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
{
if (memcg_kmem_enabled())
- __memcg_kmem_uncharge(page, order);
+ __memcg_kmem_uncharge_page(page, order);
}
-static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
- int order, struct mem_cgroup *memcg)
+/*
+ * A helper for accessing memcg's kmem_id, used for getting
+ * corresponding LRU lists.
+ */
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
- if (memcg_kmem_enabled())
- return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
- return 0;
+ return memcg ? memcg->kmemcg_id : -1;
}
-static inline void memcg_kmem_uncharge_memcg(struct page *page, int order,
- struct mem_cgroup *memcg)
-{
- if (memcg_kmem_enabled())
- __memcg_kmem_uncharge_memcg(memcg, 1 << order);
-}
+struct mem_cgroup *mem_cgroup_from_obj(void *p);
+struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
-/*
- * helper for accessing a memcg's index. It will be used as an index in the
- * child cache array in kmem_cache, and also to derive its name. This function
- * will return -1 when this is not a kmem-limited memcg.
- */
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+ enum vm_event_item idx)
{
- return memcg ? memcg->kmemcg_id : -1;
+ struct mem_cgroup *memcg;
+
+ if (!memcg_kmem_enabled())
+ return;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ count_memcg_events(memcg, idx, 1);
+ rcu_read_unlock();
}
#else
+static inline bool mem_cgroup_kmem_disabled(void)
+{
+ return true;
+}
-static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
{
return 0;
}
-static inline void memcg_kmem_uncharge(struct page *page, int order)
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
{
}
-static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
{
return 0;
}
-static inline void __memcg_kmem_uncharge(struct page *page, int order)
+static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
{
}
-#define for_each_memcg_cache_index(_idx) \
- for (; NULL; )
+static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
+{
+ return NULL;
+}
static inline bool memcg_kmem_enabled(void)
{
return false;
}
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return -1;
}
-static inline void memcg_get_cache_ids(void)
+static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
+ return NULL;
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
+{
+ return NULL;
}
-static inline void memcg_put_cache_ids(void)
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+ enum vm_event_item idx)
{
}
#endif /* CONFIG_MEMCG_KMEM */
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
+void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
+void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+#else
+static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
+{
+ return true;
+}
+static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+#endif
+
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
new file mode 100644
index 000000000000..965009aa01d7
--- /dev/null
+++ b/include/linux/memory-tiers.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_TIERS_H
+#define _LINUX_MEMORY_TIERS_H
+
+#include <linux/types.h>
+#include <linux/nodemask.h>
+#include <linux/kref.h>
+#include <linux/mmzone.h>
+/*
+ * Each tier cover a abstrace distance chunk size of 128
+ */
+#define MEMTIER_CHUNK_BITS 7
+#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
+/*
+ * Smaller abstract distance values imply faster (higher) memory tiers. Offset
+ * the DRAM adistance so that we can accommodate devices with a slightly lower
+ * adistance value (slightly faster) than default DRAM adistance to be part of
+ * the same memory tier.
+ */
+#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+#define MEMTIER_HOTPLUG_PRIO 100
+
+struct memory_tier;
+struct memory_dev_type {
+ /* list of memory types that are part of same tier as this type */
+ struct list_head tier_sibiling;
+ /* abstract distance for this specific memory type */
+ int adistance;
+ /* Nodes of same abstract distance */
+ nodemask_t nodes;
+ struct kref kref;
+};
+
+#ifdef CONFIG_NUMA
+extern bool numa_demotion_enabled;
+struct memory_dev_type *alloc_memory_type(int adistance);
+void destroy_memory_type(struct memory_dev_type *memtype);
+void init_node_memory_type(int node, struct memory_dev_type *default_type);
+void clear_node_memory_type(int node, struct memory_dev_type *memtype);
+#ifdef CONFIG_MIGRATION
+int next_demotion_node(int node);
+void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
+bool node_is_toptier(int node);
+#else
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif
+
+#else
+
+#define numa_demotion_enabled false
+/*
+ * CONFIG_NUMA implementation returns non NULL error.
+ */
+static inline struct memory_dev_type *alloc_memory_type(int adistance)
+{
+ return NULL;
+}
+
+static inline void destroy_memory_type(struct memory_dev_type *memtype)
+{
+
+}
+
+static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
+{
+
+}
+
+static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+
+}
+
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif /* CONFIG_NUMA */
+#endif /* _LINUX_MEMORY_TIERS_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 0b8d791b6669..aa619464a1df 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -23,14 +23,68 @@
#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+/**
+ * struct memory_group - a logical group of memory blocks
+ * @nid: The node id for all memory blocks inside the memory group.
+ * @blocks: List of all memory blocks belonging to this memory group.
+ * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this
+ * memory group.
+ * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this
+ * memory group.
+ * @is_dynamic: The memory group type: static vs. dynamic
+ * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum
+ * number of pages we'll have in this static memory group.
+ * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages
+ * in which memory is added/removed in this dynamic memory group.
+ * This granularity defines the alignment of a unit in physical
+ * address space; it has to be at least as big as a single
+ * memory block.
+ *
+ * A memory group logically groups memory blocks; each memory block
+ * belongs to at most one memory group. A memory group corresponds to
+ * a memory device, such as a DIMM or a NUMA node, which spans multiple
+ * memory blocks and might even span multiple non-contiguous physical memory
+ * ranges.
+ *
+ * Modification of members after registration is serialized by memory
+ * hot(un)plug code.
+ */
+struct memory_group {
+ int nid;
+ struct list_head memory_blocks;
+ unsigned long present_kernel_pages;
+ unsigned long present_movable_pages;
+ bool is_dynamic;
+ union {
+ struct {
+ unsigned long max_pages;
+ } s;
+ struct {
+ unsigned long unit_pages;
+ } d;
+ };
+};
+
struct memory_block {
unsigned long start_section_nr;
unsigned long state; /* serialized by the dev->lock */
- int section_count; /* serialized by mem_sysfs_mutex */
int online_type; /* for passing data to online routine */
- int phys_device; /* to which fru does this belong? */
- struct device dev;
int nid; /* NID for this memory block */
+ /*
+ * The single zone of this memory block if all PFNs of this memory block
+ * that are System RAM (not a memory hole, not ZONE_DEVICE ranges) are
+ * managed by a single zone. NULL if multiple zones (including nodes)
+ * apply.
+ */
+ struct zone *zone;
+ struct device dev;
+ /*
+ * Number of vmemmap pages. These pages
+ * lay at the beginning of the memory block.
+ */
+ unsigned long nr_vmemmap_pages;
+ struct memory_group *group; /* group (if any) for this block */
+ struct list_head group_next; /* next block inside memory group */
};
int arch_get_memory_phys_device(unsigned long start_pfn);
@@ -49,7 +103,6 @@ struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
int status_change_nid_normal;
- int status_change_nid_high;
int status_change_nid;
};
@@ -63,7 +116,7 @@ struct mem_section;
#define SLAB_CALLBACK_PRI 1
#define IPC_CALLBACK_PRI 10
-#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifndef CONFIG_MEMORY_HOTPLUG
static inline void memory_dev_init(void)
{
return;
@@ -79,22 +132,35 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
-#else
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
+}
+/* These aren't inline functions due to a GCC bug. */
+#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
+#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
+#else /* CONFIG_MEMORY_HOTPLUG */
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
-int create_memory_block_devices(unsigned long start, unsigned long size);
+int create_memory_block_devices(unsigned long start, unsigned long size,
+ unsigned long vmemmap_pages,
+ struct memory_group *group);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
-extern struct memory_block *find_memory_block(struct mem_section *);
+extern struct memory_block *find_memory_block(unsigned long section_nr);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
-#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
-#ifdef CONFIG_MEMORY_HOTPLUG
+extern int memory_group_register_static(int nid, unsigned long max_pages);
+extern int memory_group_register_dynamic(int nid, unsigned long unit_pages);
+extern int memory_group_unregister(int mgid);
+struct memory_group *memory_group_find_by_id(int mgid);
+typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg);
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
@@ -102,12 +168,12 @@ extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
})
#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
-#else
-#define hotplug_memory_notifier(fn, pri) ({ 0; })
-/* These aren't inline functions due to a GCC bug. */
-#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
-#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
-#endif
+
+#ifdef CONFIG_NUMA
+void memory_block_add_nid(struct memory_block *mem, int nid,
+ enum meminit_context context);
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Kernel text modification mutex, used for code patching. Users of this lock
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index f4d59155f3d4..9fcbf5706595 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -11,58 +11,115 @@ struct page;
struct zone;
struct pglist_data;
struct mem_section;
-struct memory_block;
+struct memory_group;
struct resource;
struct vmem_altmap;
+struct dev_pagemap;
-#ifdef CONFIG_MEMORY_HOTPLUG
+#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
/*
- * Return page for the valid pfn only if the page is online. All pfn
- * walkers which rely on the fully initialized page->flags and others
- * should use this rather than pfn_valid && pfn_to_page
+ * For supporting node-hotadd, we have to allocate a new pgdat.
+ *
+ * If an arch has generic style NODE_DATA(),
+ * node_data[nid] = kzalloc() works well. But it depends on the architecture.
+ *
+ * In general, generic_alloc_nodedata() is used.
+ *
*/
-#define pfn_to_online_page(pfn) \
-({ \
- struct page *___page = NULL; \
- unsigned long ___pfn = pfn; \
- unsigned long ___nr = pfn_to_section_nr(___pfn); \
- \
- if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
- pfn_valid_within(___pfn)) \
- ___page = pfn_to_page(___pfn); \
- ___page; \
-})
+extern pg_data_t *arch_alloc_nodedata(int nid);
+extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
+
+#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
+#ifdef CONFIG_NUMA
/*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
+ * XXX: node aware allocation can't work well to get new node's memory at this time.
+ * Because, pgdat for the new node is not allocated/initialized yet itself.
+ * To use new node's memory, more consideration will be necessary.
*/
-enum {
- MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
- SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
- MIX_SECTION_INFO,
- NODE_INFO,
- MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
-};
+#define generic_alloc_nodedata(nid) \
+({ \
+ memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
+})
+
+extern pg_data_t *node_data[];
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+ node_data[nid] = pgdat;
+}
+
+#else /* !CONFIG_NUMA */
+
+/* never called */
+static inline pg_data_t *generic_alloc_nodedata(int nid)
+{
+ BUG();
+ return NULL;
+}
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+}
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+struct page *pfn_to_online_page(unsigned long pfn);
/* Types for control the zone type of onlined and offlined memory */
enum {
- MMOP_OFFLINE = -1,
- MMOP_ONLINE_KEEP,
+ /* Offline the memory. */
+ MMOP_OFFLINE = 0,
+ /* Online the memory. Zone depends, see default_zone_for_pfn(). */
+ MMOP_ONLINE,
+ /* Online the memory to ZONE_NORMAL. */
MMOP_ONLINE_KERNEL,
+ /* Online the memory to ZONE_MOVABLE. */
MMOP_ONLINE_MOVABLE,
};
+/* Flags for add_memory() and friends to specify memory hotplug details. */
+typedef int __bitwise mhp_t;
+
+/* No special request */
+#define MHP_NONE ((__force mhp_t)0)
+/*
+ * Allow merging of the added System RAM resource with adjacent,
+ * mergeable resources. After a successful call to add_memory_resource()
+ * with this flag set, the resource pointer must no longer be used as it
+ * might be stale, or the resource might have changed.
+ */
+#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
+
/*
- * Restrictions for the memory hotplug:
- * flags: MHP_ flags
- * altmap: alternative allocator for memmap array
+ * We want memmap (struct page array) to be self contained.
+ * To do so, we will use the beginning of the hot-added range to build
+ * the page tables for the memmap array that describes the entire range.
+ * Only selected architectures support it with SPARSE_VMEMMAP.
*/
-struct mhp_restrictions {
- unsigned long flags;
+#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
+/*
+ * The nid field specifies a memory group id (mgid) instead. The memory group
+ * implies the node id (nid).
+ */
+#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
+
+/*
+ * Extended parameters for memory hotplug:
+ * altmap: alternative allocator for memmap array (optional)
+ * pgprot: page protection flags to apply to newly created page tables
+ * (required)
+ */
+struct mhp_params {
struct vmem_altmap *altmap;
+ pgprot_t pgprot;
+ struct dev_pagemap *pgmap;
};
+bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
+struct range mhp_get_pluggable_range(bool need_mapping);
+
/*
* Zone resizing functions
*
@@ -90,16 +147,17 @@ static inline void zone_seqlock_init(struct zone *zone)
{
seqlock_init(&zone->span_seqlock);
}
-extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
-extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
-extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
+extern void adjust_present_page_count(struct page *page,
+ struct memory_group *group,
+ long nr_pages);
/* VM interface that may be used by firmware interface */
+extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone);
+extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
extern int online_pages(unsigned long pfn, unsigned long nr_pages,
- int online_type, int nid);
-extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
- unsigned long end_pfn);
-extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
- unsigned long end_pfn);
+ struct zone *zone, struct memory_group *group);
+extern void __offline_isolated_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
@@ -110,10 +168,13 @@ extern int restore_online_page_callback(online_page_callback_t callback);
extern int try_online_node(int nid);
extern int arch_add_memory(int nid, u64 start, u64 size,
- struct mhp_restrictions *restrictions);
+ struct mhp_params *params);
extern u64 max_mem_size;
-extern bool memhp_auto_online;
+extern int mhp_online_type_from_str(const char *str);
+
+/* Default online_type (MMOP_*) when new memory blocks are added. */
+extern int mhp_default_online_type;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -121,111 +182,46 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled;
}
-extern void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap);
+extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
struct vmem_altmap *altmap);
/* reasonably generic interface to expand the physical pages */
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
- struct mhp_restrictions *restrictions);
+ struct mhp_params *params);
#ifndef CONFIG_ARCH_HAS_ADD_PAGES
static inline int add_pages(int nid, unsigned long start_pfn,
- unsigned long nr_pages, struct mhp_restrictions *restrictions)
+ unsigned long nr_pages, struct mhp_params *params)
{
- return __add_pages(nid, start_pfn, nr_pages, restrictions);
+ return __add_pages(nid, start_pfn, nr_pages, params);
}
#else /* ARCH_HAS_ADD_PAGES */
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
- struct mhp_restrictions *restrictions);
+ struct mhp_params *params);
#endif /* ARCH_HAS_ADD_PAGES */
-#ifdef CONFIG_NUMA
-extern int memory_add_physaddr_to_nid(u64 start);
-#else
-static inline int memory_add_physaddr_to_nid(u64 start)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_free_nodedata(pg_data_t *pgdat);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
-
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
+void get_online_mems(void);
+void put_online_mems(void);
-#ifdef CONFIG_NUMA
-/*
- * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
- * XXX: kmalloc_node() can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
-})
-/*
- * This definition is just for error path in node hotadd.
- * For node hotremove, we have to replace this.
- */
-#define generic_free_nodedata(pgdat) kfree(pgdat)
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+/* See kswapd_is_running() */
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
{
- node_data[nid] = pgdat;
+ mutex_lock(&pgdat->kswapd_lock);
}
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void generic_free_nodedata(pg_data_t *pgdat)
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
{
+ mutex_unlock(&pgdat->kswapd_lock);
}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
-}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
-#else
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
{
+ mutex_init(&pgdat->kswapd_lock);
}
-#endif
-extern void put_page_bootmem(struct page *page);
-extern void get_page_bootmem(unsigned long ingo, struct page *page,
- unsigned long type);
-
-void get_online_mems(void);
-void put_online_mems(void);
-
-void mem_hotplug_begin(void);
-void mem_hotplug_done(void);
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
@@ -248,17 +244,6 @@ static inline void zone_span_writelock(struct zone *zone) {}
static inline void zone_span_writeunlock(struct zone *zone) {}
static inline void zone_seqlock_init(struct zone *zone) {}
-static inline int mhp_notimplemented(const char *func)
-{
- printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
- dump_stack();
- return -ENOSYS;
-}
-
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
-}
-
static inline int try_online_node(int nid)
{
return 0;
@@ -274,8 +259,19 @@ static inline bool movable_node_is_enabled(void)
{
return false;
}
+
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
+/*
+ * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
+ * platforms might override and use arch_get_mappable_range()
+ * for internal non memory hotplug purposes.
+ */
+struct range arch_get_mappable_range(void);
+
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* pgdat resizing functions
@@ -306,56 +302,63 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern int remove_memory(int nid, u64 start, u64 size);
-extern void __remove_memory(int nid, u64 start, u64 size);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group);
+extern int remove_memory(u64 start, u64 size);
+extern void __remove_memory(u64 start, u64 size);
+extern int offline_and_remove_memory(u64 start, u64 size);
#else
-static inline bool is_mem_section_removable(unsigned long pfn,
- unsigned long nr_pages)
-{
- return false;
-}
-
static inline void try_offline_node(int nid) {}
-static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
{
return -EINVAL;
}
-static inline int remove_memory(int nid, u64 start, u64 size)
+static inline int remove_memory(u64 start, u64 size)
{
return -EBUSY;
}
-static inline void __remove_memory(int nid, u64 start, u64 size) {}
+static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
extern void set_zone_contiguous(struct zone *zone);
extern void clear_zone_contiguous(struct zone *zone);
-extern void __ref free_area_init_core_hotplug(int nid);
-extern int __add_memory(int nid, u64 start, u64 size);
-extern int add_memory(int nid, u64 start, u64 size);
-extern int add_memory_resource(int nid, struct resource *resource);
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
+extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory_resource(int nid, struct resource *resource,
+ mhp_t mhp_flags);
+extern int add_memory_driver_managed(int nid, u64 start, u64 size,
+ const char *resource_name,
+ mhp_t mhp_flags);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap);
+ unsigned long nr_pages,
+ struct vmem_altmap *altmap, int migratetype);
extern void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages);
-extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_section(int nid, unsigned long pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap);
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
extern void sparse_remove_section(struct mem_section *ms,
unsigned long pfn, unsigned long nr_pages,
unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
- int online_type);
-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
+extern struct zone *zone_for_pfn_range(int online_type, int nid,
+ struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages);
+extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
+ struct mhp_params *params);
+void arch_remove_linear_mapping(u64 start, u64 size);
+extern bool mhp_supports_memmap_on_memory(unsigned long size);
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5228c62af416..d232de7cdc56 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -6,9 +6,8 @@
#ifndef _LINUX_MEMPOLICY_H
#define _LINUX_MEMPOLICY_H 1
-
+#include <linux/sched.h>
#include <linux/mmzone.h>
-#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
@@ -28,10 +27,10 @@ struct mm_struct;
* the process policy is used. Interrupts ignore the memory policy
* of the current process.
*
- * Locking policy for interlave:
+ * Locking policy for interleave:
* In process context there is no locking because only the process accesses
* its own state. All vma manipulation is somewhat protected by a down_read on
- * mmap_sem.
+ * mmap_lock.
*
* Freeing policy:
* Mempolicy objects are reference counted. A mempolicy will be freed when
@@ -46,11 +45,9 @@ struct mempolicy {
atomic_t refcnt;
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
- union {
- short preferred_node; /* preferred */
- nodemask_t nodes; /* interleave/bind */
- /* undefined for default */
- } v;
+ nodemask_t nodes; /* interleave/bind/perfer */
+ int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */
+
union {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
nodemask_t user_nodemask; /* nodemask passed by user */
@@ -150,8 +147,10 @@ extern int huge_node(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
-extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
+extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
+extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
+
extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
@@ -173,38 +172,18 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol);
extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
-static inline bool vma_migratable(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- return false;
-
- /*
- * DAX device mappings require predictable access latency, so avoid
- * incurring periodic faults.
- */
- if (vma_is_dax(vma))
- return false;
-
-#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
- if (vma->vm_flags & VM_HUGETLB)
- return false;
-#endif
-
- /*
- * Migration allocates pages in the highest zone. If we cannot
- * do so then migration (at least from node to node) is not
- * possible.
- */
- if (vma->vm_file &&
- gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
- < policy_zone)
- return false;
- return true;
-}
+extern bool vma_migratable(struct vm_area_struct *vma);
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+ return (pol->mode == MPOL_PREFERRED_MANY);
+}
+
+extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
+
#else
struct mempolicy {};
@@ -308,5 +287,11 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
static inline void mpol_put_task_policy(struct task_struct *task)
{
}
+
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+ return false;
+}
+
#endif /* CONFIG_NUMA */
#endif
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
index e11595256cac..c04c4fd2e209 100644
--- a/include/linux/memregion.h
+++ b/include/linux/memregion.h
@@ -16,7 +16,7 @@ static inline int memregion_alloc(gfp_t gfp)
{
return -ENOMEM;
}
-void memregion_free(int id)
+static inline void memregion_free(int id)
{
}
#endif
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 6fefb09af7c3..7fcaf3180a5b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_
+
+#include <linux/mmzone.h>
+#include <linux/range.h>
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
@@ -16,7 +19,7 @@ struct device;
* @alloc: track pages consumed, private to vmemmap_populate()
*/
struct vmem_altmap {
- const unsigned long base_pfn;
+ unsigned long base_pfn;
const unsigned long end_pfn;
const unsigned long reserve;
unsigned long free;
@@ -25,7 +28,7 @@ struct vmem_altmap {
};
/*
- * Specialize ZONE_DEVICE memory into multiple types each having differents
+ * Specialize ZONE_DEVICE memory into multiple types each has a different
* usage.
*
* MEMORY_DEVICE_PRIVATE:
@@ -36,7 +39,14 @@ struct vmem_altmap {
* must be treated as an opaque object, rather than a "normal" struct page.
*
* A more complete discussion of unaddressable memory may be found in
- * include/linux/hmm.h and Documentation/vm/hmm.rst.
+ * include/linux/hmm.h and Documentation/mm/hmm.rst.
+ *
+ * MEMORY_DEVICE_COHERENT:
+ * Device memory that is cache coherent from device and CPU point of view. This
+ * is used on platforms that have an advanced system bus (like CAPI or CXL). A
+ * driver can hotplug the device memory using ZONE_DEVICE and with that memory
+ * type. Any page of a process can be migrated to such memory. However no one
+ * should be allowed to pin such memory so that it can always be evicted.
*
* MEMORY_DEVICE_FS_DAX:
* Host memory that has similar access semantics as System RAM i.e. DMA
@@ -46,11 +56,10 @@ struct vmem_altmap {
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
*
- * MEMORY_DEVICE_DEVDAX:
+ * MEMORY_DEVICE_GENERIC:
* Host memory that has similar access semantics as System RAM i.e. DMA
- * coherent and supports page pinning. In contrast to
- * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax
- * character device.
+ * coherent and supports page pinning. This is for example used by DAX devices
+ * that expose memory using a character device.
*
* MEMORY_DEVICE_PCI_P2PDMA:
* Device memory residing in a PCI BAR intended for use with Peer-to-Peer
@@ -59,34 +68,37 @@ struct vmem_altmap {
enum memory_type {
/* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
+ MEMORY_DEVICE_COHERENT,
MEMORY_DEVICE_FS_DAX,
- MEMORY_DEVICE_DEVDAX,
+ MEMORY_DEVICE_GENERIC,
MEMORY_DEVICE_PCI_P2PDMA,
};
struct dev_pagemap_ops {
/*
- * Called once the page refcount reaches 1. (ZONE_DEVICE pages never
- * reach 0 refcount unless there is a refcount bug. This allows the
- * device driver to implement its own memory management.)
+ * Called once the page refcount reaches 0. The reference count will be
+ * reset to one by the core code after the method is called to prepare
+ * for handing out the page again.
*/
void (*page_free)(struct page *page);
/*
- * Transition the refcount in struct dev_pagemap to the dead state.
- */
- void (*kill)(struct dev_pagemap *pgmap);
-
- /*
- * Wait for refcount in struct dev_pagemap to be idle and reap it.
- */
- void (*cleanup)(struct dev_pagemap *pgmap);
-
- /*
* Used for private (un-addressable) device memory only. Must migrate
* the page back to a CPU accessible page.
*/
vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
+
+ /*
+ * Handle the memory failure happens on a range of pfns. Notify the
+ * processes who are using these pfns, and try to recover the data on
+ * them if necessary. The mf_flags is finally passed to the recover
+ * function through the whole notify routine.
+ *
+ * When this is not implemented, or it returns -EOPNOTSUPP, the caller
+ * will fall back to a common handler called mf_generic_kill_procs().
+ */
+ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
+ unsigned long nr_pages, int mf_flags);
};
#define PGMAP_ALTMAP_VALID (1 << 0)
@@ -94,27 +106,44 @@ struct dev_pagemap_ops {
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations
- * @res: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping
- * @internal_ref: internal reference if @ref is not provided by the caller
- * @done: completion for @internal_ref
- * @dev: host device of the mapping for debug
- * @data: private data pointer for page_free()
+ * @done: completion for @ref
* @type: memory type: see MEMORY_* in memory_hotplug.h
* @flags: PGMAP_* flags to specify defailed behavior
+ * @vmemmap_shift: structural definition of how the vmemmap page metadata
+ * is populated, specifically the metadata page order.
+ * A zero value (default) uses base pages as the vmemmap metadata
+ * representation. A bigger value will set up compound struct pages
+ * of the requested order value.
* @ops: method table
+ * @owner: an opaque pointer identifying the entity that manages this
+ * instance. Used by various helpers to make sure that no
+ * foreign ZONE_DEVICE memory is accessed.
+ * @nr_range: number of ranges to be mapped
+ * @range: range to be mapped when nr_range == 1
+ * @ranges: array of ranges to be mapped when nr_range > 1
*/
struct dev_pagemap {
struct vmem_altmap altmap;
- struct resource res;
- struct percpu_ref *ref;
- struct percpu_ref internal_ref;
+ struct percpu_ref ref;
struct completion done;
enum memory_type type;
unsigned int flags;
+ unsigned long vmemmap_shift;
const struct dev_pagemap_ops *ops;
+ void *owner;
+ int nr_range;
+ union {
+ struct range range;
+ struct range ranges[0];
+ };
};
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+ return pgmap->ops && pgmap->ops->memory_failure;
+}
+
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
{
if (pgmap->flags & PGMAP_ALTMAP_VALID)
@@ -122,16 +151,54 @@ static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
return NULL;
}
+static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
+{
+ return 1 << pgmap->vmemmap_shift;
+}
+
+static inline bool is_device_private_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PRIVATE;
+}
+
+static inline bool folio_is_device_private(const struct folio *folio)
+{
+ return is_device_private_page(&folio->page);
+}
+
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+}
+
+static inline bool is_device_coherent_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_COHERENT;
+}
+
+static inline bool folio_is_device_coherent(const struct folio *folio)
+{
+ return is_device_coherent_page(&folio->page);
+}
+
#ifdef CONFIG_ZONE_DEVICE
+void zone_device_page_init(struct page *page);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
+bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
+unsigned long memremap_compat_align(void);
#else
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
@@ -156,6 +223,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
return NULL;
}
+static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
+{
+ return false;
+}
+
static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
return 0;
@@ -165,11 +237,18 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
unsigned long nr_pfns)
{
}
+
+/* when memremap_pages() is disabled all archs can remap a single page */
+static inline unsigned long memremap_compat_align(void)
+{
+ return PAGE_SIZE;
+}
#endif /* CONFIG_ZONE_DEVICE */
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
- percpu_ref_put(pgmap->ref);
+ percpu_ref_put(&pgmap->ref);
}
+
#endif /* _LINUX_MEMREMAP_H_ */
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index 216a713bef7f..ebf73d4ee969 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -281,6 +281,7 @@ struct memstick_host {
struct memstick_dev *card;
unsigned int retries;
+ bool removing;
/* Notify the host that some requests are pending. */
void (*request)(struct memstick_host *host);
@@ -288,7 +289,7 @@ struct memstick_host {
int (*set_param)(struct memstick_host *host,
enum memstick_param param,
int value);
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
struct memstick_driver {
diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h
deleted file mode 100644
index a881d8495186..000000000000
--- a/include/linux/mfd/ab3100.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * AB3100 core access functions
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-
-#include <linux/regulator/machine.h>
-
-struct device;
-
-#ifndef MFD_AB3100_H
-#define MFD_AB3100_H
-
-
-#define AB3100_P1A 0xc0
-#define AB3100_P1B 0xc1
-#define AB3100_P1C 0xc2
-#define AB3100_P1D 0xc3
-#define AB3100_P1E 0xc4
-#define AB3100_P1F 0xc5
-#define AB3100_P1G 0xc6
-#define AB3100_R2A 0xc7
-#define AB3100_R2B 0xc8
-
-/*
- * AB3100, EVENTA1, A2 and A3 event register flags
- * these are catenated into a single 32-bit flag in the code
- * for event notification broadcasts.
- */
-#define AB3100_EVENTA1_ONSWA (0x01<<16)
-#define AB3100_EVENTA1_ONSWB (0x02<<16)
-#define AB3100_EVENTA1_ONSWC (0x04<<16)
-#define AB3100_EVENTA1_DCIO (0x08<<16)
-#define AB3100_EVENTA1_OVER_TEMP (0x10<<16)
-#define AB3100_EVENTA1_SIM_OFF (0x20<<16)
-#define AB3100_EVENTA1_VBUS (0x40<<16)
-#define AB3100_EVENTA1_VSET_USB (0x80<<16)
-
-#define AB3100_EVENTA2_READY_TX (0x01<<8)
-#define AB3100_EVENTA2_READY_RX (0x02<<8)
-#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8)
-#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8)
-#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8)
-#define AB3100_EVENTA2_MIDR (0x20<<8)
-#define AB3100_EVENTA2_BATTERY_REM (0x40<<8)
-#define AB3100_EVENTA2_ALARM (0x80<<8)
-
-#define AB3100_EVENTA3_ADC_TRIG5 (0x01)
-#define AB3100_EVENTA3_ADC_TRIG4 (0x02)
-#define AB3100_EVENTA3_ADC_TRIG3 (0x04)
-#define AB3100_EVENTA3_ADC_TRIG2 (0x08)
-#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10)
-#define AB3100_EVENTA3_ADC_TRIGVTX (0x20)
-#define AB3100_EVENTA3_ADC_TRIG1 (0x40)
-#define AB3100_EVENTA3_ADC_TRIG0 (0x80)
-
-/* AB3100, STR register flags */
-#define AB3100_STR_ONSWA (0x01)
-#define AB3100_STR_ONSWB (0x02)
-#define AB3100_STR_ONSWC (0x04)
-#define AB3100_STR_DCIO (0x08)
-#define AB3100_STR_BOOT_MODE (0x10)
-#define AB3100_STR_SIM_OFF (0x20)
-#define AB3100_STR_BATT_REMOVAL (0x40)
-#define AB3100_STR_VBUS (0x80)
-
-/*
- * AB3100 contains 8 regulators, one external regulator controller
- * and a buck converter, further the LDO E and buck converter can
- * have separate settings if they are in sleep mode, this is
- * modeled as a separate regulator.
- */
-#define AB3100_NUM_REGULATORS 10
-
-/**
- * struct ab3100
- * @access_mutex: lock out concurrent accesses to the AB3100 registers
- * @dev: pointer to the containing device
- * @i2c_client: I2C client for this chip
- * @testreg_client: secondary client for test registers
- * @chip_name: name of this chip variant
- * @chip_id: 8 bit chip ID for this chip variant
- * @event_subscribers: event subscribers are listed here
- * @startup_events: a copy of the first reading of the event registers
- * @startup_events_read: whether the first events have been read
- *
- * This struct is PRIVATE and devices using it should NOT
- * access ANY fields. It is used as a token for calling the
- * AB3100 functions.
- */
-struct ab3100 {
- struct mutex access_mutex;
- struct device *dev;
- struct i2c_client *i2c_client;
- struct i2c_client *testreg_client;
- char chip_name[32];
- u8 chip_id;
- struct blocking_notifier_head event_subscribers;
- u8 startup_events[3];
- bool startup_events_read;
-};
-
-/**
- * struct ab3100_platform_data
- * Data supplied to initialize board connections to the AB3100
- * @reg_constraints: regulator constraints for target board
- * the order of these constraints are: LDO A, C, D, E,
- * F, G, H, K, EXT and BUCK.
- * @reg_initvals: initial values for the regulator registers
- * plus two sleep settings for LDO E and the BUCK converter.
- * exactly AB3100_NUM_REGULATORS+2 values must be sent in.
- * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK,
- * BUCK sleep, LDO D. (LDO D need to be initialized last.)
- * @external_voltage: voltage level of the external regulator.
- */
-struct ab3100_platform_data {
- struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS];
- u8 reg_initvals[AB3100_NUM_REGULATORS+2];
- int external_voltage;
-};
-
-int ab3100_event_register(struct ab3100 *ab3100,
- struct notifier_block *nb);
-int ab3100_event_unregister(struct ab3100 *ab3100,
- struct notifier_block *nb);
-
-#endif /* MFD_AB3100_H */
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 23040b6f1615..7f07cfe44753 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -28,282 +28,6 @@ struct abx500_init_settings {
u8 setting;
};
-/* Battery driver related data */
-/*
- * ADC for the battery thermistor.
- * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
- * with a NTC resistor to both identify the battery and to measure its
- * temperature. Different phone manufactures uses different techniques to both
- * identify the battery and to read its temperature.
- */
-enum abx500_adc_therm {
- ABx500_ADC_THERM_BATCTRL,
- ABx500_ADC_THERM_BATTEMP,
-};
-
-/**
- * struct abx500_res_to_temp - defines one point in a temp to res curve. To
- * be used in battery packs that combines the identification resistor with a
- * NTC resistor.
- * @temp: battery pack temperature in Celsius
- * @resist: NTC resistor net total resistance
- */
-struct abx500_res_to_temp {
- int temp;
- int resist;
-};
-
-/**
- * struct abx500_v_to_cap - Table for translating voltage to capacity
- * @voltage: Voltage in mV
- * @capacity: Capacity in percent
- */
-struct abx500_v_to_cap {
- int voltage;
- int capacity;
-};
-
-/* Forward declaration */
-struct abx500_fg;
-
-/**
- * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer: Time between measurements while recovering
- * @recovery_total_time: Total recovery time
- * @init_timer: Measurement interval during startup
- * @init_discard_time: Time we discard voltage measurement at startup
- * @init_total_time: Total init time during startup
- * @high_curr_time: Time current has to be high to go to recovery
- * @accu_charging: FG accumulation time while charging
- * @accu_high_curr: FG accumulation time in high current mode
- * @high_curr_threshold: High current threshold, in mA
- * @lowbat_threshold: Low battery threshold, in mV
- * @overbat_threshold: Over battery threshold, in mV
- * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
- * Resolution in 50 mV step.
- * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
- * Resolution in 50 mV step.
- * @user_cap_limit Capacity reported from user must be within this
- * limit to be considered as sane, in percentage
- * points.
- * @maint_thres This is the threshold where we stop reporting
- * battery full while in maintenance, in per cent
- * @pcut_enable: Enable power cut feature in ab8505
- * @pcut_max_time: Max time threshold
- * @pcut_flag_time: Flagtime threshold
- * @pcut_max_restart: Max number of restarts
- * @pcut_debounce_time: Sets battery debounce time
- */
-struct abx500_fg_parameters {
- int recovery_sleep_timer;
- int recovery_total_time;
- int init_timer;
- int init_discard_time;
- int init_total_time;
- int high_curr_time;
- int accu_charging;
- int accu_high_curr;
- int high_curr_threshold;
- int lowbat_threshold;
- int overbat_threshold;
- int battok_falling_th_sel0;
- int battok_raising_th_sel1;
- int user_cap_limit;
- int maint_thres;
- bool pcut_enable;
- u8 pcut_max_time;
- u8 pcut_flag_time;
- u8 pcut_max_restart;
- u8 pcut_debounce_time;
-};
-
-/**
- * struct abx500_charger_maximization - struct used by the board config.
- * @use_maxi: Enable maximization for this battery type
- * @maxi_chg_curr: Maximum charger current allowed
- * @maxi_wait_cycles: cycles to wait before setting charger current
- * @charger_curr_step delta between two charger current settings (mA)
- */
-struct abx500_maxim_parameters {
- bool ena_maxi;
- int chg_curr;
- int wait_cycles;
- int charger_curr_step;
-};
-
-/**
- * struct abx500_battery_type - different batteries supported
- * @name: battery technology
- * @resis_high: battery upper resistance limit
- * @resis_low: battery lower resistance limit
- * @charge_full_design: Maximum battery capacity in mAh
- * @nominal_voltage: Nominal voltage of the battery in mV
- * @termination_vol: max voltage upto which battery can be charged
- * @termination_curr battery charging termination current in mA
- * @recharge_cap battery capacity limit that will trigger a new
- * full charging cycle in the case where maintenan-
- * -ce charging has been disabled
- * @normal_cur_lvl: charger current in normal state in mA
- * @normal_vol_lvl: charger voltage in normal state in mV
- * @maint_a_cur_lvl: charger current in maintenance A state in mA
- * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
- * @maint_a_chg_timer_h: charge time in maintenance A state
- * @maint_b_cur_lvl: charger current in maintenance B state in mA
- * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
- * @maint_b_chg_timer_h: charge time in maintenance B state
- * @low_high_cur_lvl: charger current in temp low/high state in mA
- * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
- * @battery_resistance: battery inner resistance in mOhm.
- * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
- * @r_to_t_tbl: table containing resistance to temp points
- * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
- * @v_to_cap_tbl: Voltage to capacity (in %) table
- * @n_batres_tbl_elements number of elements in the batres_tbl
- * @batres_tbl battery internal resistance vs temperature table
- */
-struct abx500_battery_type {
- int name;
- int resis_high;
- int resis_low;
- int charge_full_design;
- int nominal_voltage;
- int termination_vol;
- int termination_curr;
- int recharge_cap;
- int normal_cur_lvl;
- int normal_vol_lvl;
- int maint_a_cur_lvl;
- int maint_a_vol_lvl;
- int maint_a_chg_timer_h;
- int maint_b_cur_lvl;
- int maint_b_vol_lvl;
- int maint_b_chg_timer_h;
- int low_high_cur_lvl;
- int low_high_vol_lvl;
- int battery_resistance;
- int n_temp_tbl_elements;
- const struct abx500_res_to_temp *r_to_t_tbl;
- int n_v_cap_tbl_elements;
- const struct abx500_v_to_cap *v_to_cap_tbl;
- int n_batres_tbl_elements;
- const struct batres_vs_temp *batres_tbl;
-};
-
-/**
- * struct abx500_bm_capacity_levels - abx500 capacity level data
- * @critical: critical capacity level in percent
- * @low: low capacity level in percent
- * @normal: normal capacity level in percent
- * @high: high capacity level in percent
- * @full: full capacity level in percent
- */
-struct abx500_bm_capacity_levels {
- int critical;
- int low;
- int normal;
- int high;
- int full;
-};
-
-/**
- * struct abx500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max: maximum allowed USB charger voltage in mV
- * @usb_curr_max: maximum allowed USB charger current in mA
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct abx500_bm_charger_parameters {
- int usb_volt_max;
- int usb_curr_max;
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct abx500_bm_data - abx500 battery management data
- * @temp_under under this temp, charging is stopped
- * @temp_low between this temp and temp_under charging is reduced
- * @temp_high between this temp and temp_over charging is reduced
- * @temp_over over this temp, charging is stopped
- * @temp_now present battery temperature
- * @temp_interval_chg temperature measurement interval in s when charging
- * @temp_interval_nochg temperature measurement interval in s when not charging
- * @main_safety_tmr_h safety timer for main charger
- * @usb_safety_tmr_h safety timer for usb charger
- * @bkup_bat_v voltage which we charge the backup battery with
- * @bkup_bat_i current which we charge the backup battery with
- * @no_maintenance indicates that maintenance charging is disabled
- * @capacity_scaling indicates whether capacity scaling is to be used
- * @abx500_adc_therm placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat flag to enable charging of unknown batteries
- * @enable_overshoot flag to enable VBAT overshoot control
- * @auto_trig flag to enable auto adc trigger
- * @fg_res resistance of FG resistor in 0.1mOhm
- * @n_btypes number of elements in array bat_type
- * @batt_id index of the identified battery in array bat_type
- * @interval_charging charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis temperature hysteresis
- * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
- * @n_chg_out_curr number of elements in array chg_output_curr
- * @n_chg_in_curr number of elements in array chg_input_curr
- * @chg_output_curr charger output current level map
- * @chg_input_curr charger input current level map
- * @maxi maximization parameters
- * @cap_levels capacity in percent for the different capacity levels
- * @bat_type table of supported battery types
- * @chg_params charger parameters
- * @fg_params fuel gauge parameters
- */
-struct abx500_bm_data {
- int temp_under;
- int temp_low;
- int temp_high;
- int temp_over;
- int temp_now;
- int temp_interval_chg;
- int temp_interval_nochg;
- int main_safety_tmr_h;
- int usb_safety_tmr_h;
- int bkup_bat_v;
- int bkup_bat_i;
- bool autopower_cfg;
- bool ac_enabled;
- bool usb_enabled;
- bool no_maintenance;
- bool capacity_scaling;
- bool chg_unknown_bat;
- bool enable_overshoot;
- bool auto_trig;
- enum abx500_adc_therm adc_therm;
- int fg_res;
- int n_btypes;
- int batt_id;
- int interval_charging;
- int interval_not_charging;
- int temp_hysteresis;
- int gnd_lift_resistance;
- int n_chg_out_curr;
- int n_chg_in_curr;
- int *chg_output_curr;
- int *chg_input_curr;
- const struct abx500_maxim_parameters *maxi;
- const struct abx500_bm_capacity_levels *cap_levels;
- struct abx500_battery_type *bat_type;
- const struct abx500_bm_charger_parameters *chg_params;
- const struct abx500_fg_parameters *fg_params;
-};
-
-enum {
- NTC_EXTERNAL = 0,
- NTC_INTERNAL,
-};
-
-int ab8500_bm_of_probe(struct device *dev,
- struct device_node *np,
- struct abx500_bm_data *bm);
-
int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
u8 value);
int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 524a7e4702c2..302a330c5c84 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -368,7 +368,6 @@ struct ab8500 {
int it_latchhier_num;
};
-struct ab8500_regulator_platform_data;
struct ab8500_codec_platform_data;
struct ab8500_sysctrl_platform_data;
@@ -376,11 +375,9 @@ struct ab8500_sysctrl_platform_data;
* struct ab8500_platform_data - AB8500 platform data
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
* @init: board-specific initialization after detection of ab8500
- * @regulator: machine-specific constraints for regulators
*/
struct ab8500_platform_data {
void (*init) (struct ab8500 *);
- struct ab8500_regulator_platform_data *regulator;
struct ab8500_codec_platform_data *codec;
struct ab8500_sysctrl_platform_data *sysctrl;
};
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
deleted file mode 100644
index 9b97d284d0ce..000000000000
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2012
- * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
- */
-
-#ifndef _UX500_CHARGALG_H
-#define _UX500_CHARGALG_H
-
-#include <linux/power_supply.h>
-
-/*
- * Valid only for supplies of type:
- * - POWER_SUPPLY_TYPE_MAINS,
- * - POWER_SUPPLY_TYPE_USB,
- * because only them store as drv_data pointer to struct ux500_charger.
- */
-#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy)
-
-/* Forward declaration */
-struct ux500_charger;
-
-struct ux500_charger_ops {
- int (*enable) (struct ux500_charger *, int, int, int);
- int (*check_enable) (struct ux500_charger *, int, int);
- int (*kick_wd) (struct ux500_charger *);
- int (*update_curr) (struct ux500_charger *, int);
-};
-
-/**
- * struct ux500_charger - power supply ux500 charger sub class
- * @psy power supply base class
- * @ops ux500 charger operations
- * @max_out_volt maximum output charger voltage in mV
- * @max_out_curr maximum output charger current in mA
- * @enabled indicates if this charger is used or not
- * @external external charger unit (pm2xxx)
- */
-struct ux500_charger {
- struct power_supply *psy;
- struct ux500_charger_ops ops;
- int max_out_volt;
- int max_out_curr;
- int wdt_refresh;
- bool enabled;
- bool external;
-};
-
-extern struct blocking_notifier_head charger_notifier_list;
-
-#endif
diff --git a/include/linux/mfd/atc260x/atc2603c.h b/include/linux/mfd/atc260x/atc2603c.h
new file mode 100644
index 000000000000..07ac640ef3e1
--- /dev/null
+++ b/include/linux/mfd/atc260x/atc2603c.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ATC2603C PMIC register definitions
+ *
+ * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_ATC2603C_H
+#define __LINUX_MFD_ATC260X_ATC2603C_H
+
+enum atc2603c_irq_def {
+ ATC2603C_IRQ_AUDIO = 0,
+ ATC2603C_IRQ_OV,
+ ATC2603C_IRQ_OC,
+ ATC2603C_IRQ_OT,
+ ATC2603C_IRQ_UV,
+ ATC2603C_IRQ_ALARM,
+ ATC2603C_IRQ_ONOFF,
+ ATC2603C_IRQ_SGPIO,
+ ATC2603C_IRQ_IR,
+ ATC2603C_IRQ_REMCON,
+ ATC2603C_IRQ_POWER_IN,
+};
+
+/* PMU Registers */
+#define ATC2603C_PMU_SYS_CTL0 0x00
+#define ATC2603C_PMU_SYS_CTL1 0x01
+#define ATC2603C_PMU_SYS_CTL2 0x02
+#define ATC2603C_PMU_SYS_CTL3 0x03
+#define ATC2603C_PMU_SYS_CTL4 0x04
+#define ATC2603C_PMU_SYS_CTL5 0x05
+#define ATC2603C_PMU_SYS_CTL6 0x06
+#define ATC2603C_PMU_SYS_CTL7 0x07
+#define ATC2603C_PMU_SYS_CTL8 0x08
+#define ATC2603C_PMU_SYS_CTL9 0x09
+#define ATC2603C_PMU_BAT_CTL0 0x0A
+#define ATC2603C_PMU_BAT_CTL1 0x0B
+#define ATC2603C_PMU_VBUS_CTL0 0x0C
+#define ATC2603C_PMU_VBUS_CTL1 0x0D
+#define ATC2603C_PMU_WALL_CTL0 0x0E
+#define ATC2603C_PMU_WALL_CTL1 0x0F
+#define ATC2603C_PMU_SYS_PENDING 0x10
+#define ATC2603C_PMU_DC1_CTL0 0x11
+#define ATC2603C_PMU_DC1_CTL1 0x12 // Undocumented
+#define ATC2603C_PMU_DC1_CTL2 0x13 // Undocumented
+#define ATC2603C_PMU_DC2_CTL0 0x14
+#define ATC2603C_PMU_DC2_CTL1 0x15 // Undocumented
+#define ATC2603C_PMU_DC2_CTL2 0x16 // Undocumented
+#define ATC2603C_PMU_DC3_CTL0 0x17
+#define ATC2603C_PMU_DC3_CTL1 0x18 // Undocumented
+#define ATC2603C_PMU_DC3_CTL2 0x19 // Undocumented
+#define ATC2603C_PMU_DC4_CTL0 0x1A // Undocumented
+#define ATC2603C_PMU_DC4_CTL1 0x1B // Undocumented
+#define ATC2603C_PMU_DC5_CTL0 0x1C // Undocumented
+#define ATC2603C_PMU_DC5_CTL1 0x1D // Undocumented
+#define ATC2603C_PMU_LDO1_CTL 0x1E
+#define ATC2603C_PMU_LDO2_CTL 0x1F
+#define ATC2603C_PMU_LDO3_CTL 0x20
+#define ATC2603C_PMU_LDO4_CTL 0x21 // Undocumented
+#define ATC2603C_PMU_LDO5_CTL 0x22
+#define ATC2603C_PMU_LDO6_CTL 0x23
+#define ATC2603C_PMU_LDO7_CTL 0x24
+#define ATC2603C_PMU_LDO8_CTL 0x25 // Undocumented
+#define ATC2603C_PMU_LDO9_CTL 0x26 // Undocumented
+#define ATC2603C_PMU_LDO10_CTL 0x27 // Undocumented
+#define ATC2603C_PMU_LDO11_CTL 0x28
+#define ATC2603C_PMU_SWITCH_CTL 0x29
+#define ATC2603C_PMU_OV_CTL0 0x2A
+#define ATC2603C_PMU_OV_CTL1 0x2B
+#define ATC2603C_PMU_OV_STATUS 0x2C
+#define ATC2603C_PMU_OV_EN 0x2D
+#define ATC2603C_PMU_OV_INT_EN 0x2E
+#define ATC2603C_PMU_OC_CTL 0x2F
+#define ATC2603C_PMU_OC_STATUS 0x30
+#define ATC2603C_PMU_OC_EN 0x31
+#define ATC2603C_PMU_OC_INT_EN 0x32
+#define ATC2603C_PMU_UV_CTL0 0x33
+#define ATC2603C_PMU_UV_CTL1 0x34
+#define ATC2603C_PMU_UV_STATUS 0x35
+#define ATC2603C_PMU_UV_EN 0x36
+#define ATC2603C_PMU_UV_INT_EN 0x37
+#define ATC2603C_PMU_OT_CTL 0x38
+#define ATC2603C_PMU_CHARGER_CTL0 0x39
+#define ATC2603C_PMU_CHARGER_CTL1 0x3A
+#define ATC2603C_PMU_CHARGER_CTL2 0x3B
+#define ATC2603C_PMU_BAKCHARGER_CTL 0x3C // Undocumented
+#define ATC2603C_PMU_APDS_CTL 0x3D
+#define ATC2603C_PMU_AUXADC_CTL0 0x3E
+#define ATC2603C_PMU_AUXADC_CTL1 0x3F
+#define ATC2603C_PMU_BATVADC 0x40
+#define ATC2603C_PMU_BATIADC 0x41
+#define ATC2603C_PMU_WALLVADC 0x42
+#define ATC2603C_PMU_WALLIADC 0x43
+#define ATC2603C_PMU_VBUSVADC 0x44
+#define ATC2603C_PMU_VBUSIADC 0x45
+#define ATC2603C_PMU_SYSPWRADC 0x46
+#define ATC2603C_PMU_REMCONADC 0x47
+#define ATC2603C_PMU_SVCCADC 0x48
+#define ATC2603C_PMU_CHGIADC 0x49
+#define ATC2603C_PMU_IREFADC 0x4A
+#define ATC2603C_PMU_BAKBATADC 0x4B
+#define ATC2603C_PMU_ICTEMPADC 0x4C
+#define ATC2603C_PMU_AUXADC0 0x4D
+#define ATC2603C_PMU_AUXADC1 0x4E
+#define ATC2603C_PMU_AUXADC2 0x4F
+#define ATC2603C_PMU_ICMADC 0x50
+#define ATC2603C_PMU_BDG_CTL 0x51 // Undocumented
+#define ATC2603C_RTC_CTL 0x52
+#define ATC2603C_RTC_MSALM 0x53
+#define ATC2603C_RTC_HALM 0x54
+#define ATC2603C_RTC_YMDALM 0x55
+#define ATC2603C_RTC_MS 0x56
+#define ATC2603C_RTC_H 0x57
+#define ATC2603C_RTC_DC 0x58
+#define ATC2603C_RTC_YMD 0x59
+#define ATC2603C_EFUSE_DAT 0x5A // Undocumented
+#define ATC2603C_EFUSECRTL1 0x5B // Undocumented
+#define ATC2603C_EFUSECRTL2 0x5C // Undocumented
+#define ATC2603C_PMU_FW_USE0 0x5D // Undocumented
+#define ATC2603C_PMU_FW_USE1 0x5E // Undocumented
+#define ATC2603C_PMU_FW_USE2 0x5F // Undocumented
+#define ATC2603C_PMU_FW_USE3 0x60 // Undocumented
+#define ATC2603C_PMU_FW_USE4 0x61 // Undocumented
+#define ATC2603C_PMU_ABNORMAL_STATUS 0x62
+#define ATC2603C_PMU_WALL_APDS_CTL 0x63
+#define ATC2603C_PMU_REMCON_CTL0 0x64
+#define ATC2603C_PMU_REMCON_CTL1 0x65
+#define ATC2603C_PMU_MUX_CTL0 0x66
+#define ATC2603C_PMU_SGPIO_CTL0 0x67
+#define ATC2603C_PMU_SGPIO_CTL1 0x68
+#define ATC2603C_PMU_SGPIO_CTL2 0x69
+#define ATC2603C_PMU_SGPIO_CTL3 0x6A
+#define ATC2603C_PMU_SGPIO_CTL4 0x6B
+#define ATC2603C_PWMCLK_CTL 0x6C
+#define ATC2603C_PWM0_CTL 0x6D
+#define ATC2603C_PWM1_CTL 0x6E
+#define ATC2603C_PMU_ADC_DBG0 0x70
+#define ATC2603C_PMU_ADC_DBG1 0x71
+#define ATC2603C_PMU_ADC_DBG2 0x72
+#define ATC2603C_PMU_ADC_DBG3 0x73
+#define ATC2603C_PMU_ADC_DBG4 0x74
+#define ATC2603C_IRC_CTL 0x80
+#define ATC2603C_IRC_STAT 0x81
+#define ATC2603C_IRC_CC 0x82
+#define ATC2603C_IRC_KDC 0x83
+#define ATC2603C_IRC_WK 0x84
+#define ATC2603C_IRC_RCC 0x85
+#define ATC2603C_IRC_FILTER 0x86
+
+/* AUDIO_OUT Registers */
+#define ATC2603C_AUDIOINOUT_CTL 0xA0
+#define ATC2603C_AUDIO_DEBUGOUTCTL 0xA1
+#define ATC2603C_DAC_DIGITALCTL 0xA2
+#define ATC2603C_DAC_VOLUMECTL0 0xA3
+#define ATC2603C_DAC_ANALOG0 0xA4
+#define ATC2603C_DAC_ANALOG1 0xA5
+#define ATC2603C_DAC_ANALOG2 0xA6
+#define ATC2603C_DAC_ANALOG3 0xA7
+
+/* AUDIO_IN Registers */
+#define ATC2603C_ADC_DIGITALCTL 0xA8
+#define ATC2603C_ADC_HPFCTL 0xA9
+#define ATC2603C_ADC_CTL 0xAA
+#define ATC2603C_AGC_CTL0 0xAB
+#define ATC2603C_AGC_CTL1 0xAC // Undocumented
+#define ATC2603C_AGC_CTL2 0xAD
+#define ATC2603C_ADC_ANALOG0 0xAE
+#define ATC2603C_ADC_ANALOG1 0xAF
+
+/* PCM_IF Registers */
+#define ATC2603C_PCM0_CTL 0xB0 // Undocumented
+#define ATC2603C_PCM1_CTL 0xB1 // Undocumented
+#define ATC2603C_PCM2_CTL 0xB2 // Undocumented
+#define ATC2603C_PCMIF_CTL 0xB3 // Undocumented
+
+/* CMU_CONTROL Registers */
+#define ATC2603C_CMU_DEVRST 0xC1 // Undocumented
+
+/* INTS Registers */
+#define ATC2603C_INTS_PD 0xC8
+#define ATC2603C_INTS_MSK 0xC9
+
+/* MFP Registers */
+#define ATC2603C_MFP_CTL 0xD0
+#define ATC2603C_PAD_VSEL 0xD1 // Undocumented
+#define ATC2603C_GPIO_OUTEN 0xD2
+#define ATC2603C_GPIO_INEN 0xD3
+#define ATC2603C_GPIO_DAT 0xD4
+#define ATC2603C_PAD_DRV 0xD5
+#define ATC2603C_PAD_EN 0xD6
+#define ATC2603C_DEBUG_SEL 0xD7 // Undocumented
+#define ATC2603C_DEBUG_IE 0xD8 // Undocumented
+#define ATC2603C_DEBUG_OE 0xD9 // Undocumented
+#define ATC2603C_BIST_START 0x0A // Undocumented
+#define ATC2603C_BIST_RESULT 0x0B // Undocumented
+#define ATC2603C_CHIP_VER 0xDC
+
+/* TWSI Registers */
+#define ATC2603C_SADDR 0xFF
+
+/* PMU_SYS_CTL0 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL0_IR_WK_EN BIT(5)
+#define ATC2603C_PMU_SYS_CTL0_RESET_WK_EN BIT(6)
+#define ATC2603C_PMU_SYS_CTL0_HDSW_WK_EN BIT(7)
+#define ATC2603C_PMU_SYS_CTL0_ALARM_WK_EN BIT(8)
+#define ATC2603C_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL0_RESTART_EN BIT(10)
+#define ATC2603C_PMU_SYS_CTL0_SGPIOIRQ_WK_EN BIT(11)
+#define ATC2603C_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12)
+#define ATC2603C_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13)
+#define ATC2603C_PMU_SYS_CTL0_WALL_WK_EN BIT(14)
+#define ATC2603C_PMU_SYS_CTL0_USB_WK_EN BIT(15)
+#define ATC2603C_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10)))
+
+/* PMU_SYS_CTL1 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL1_EN_S1 BIT(0)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4_EN BIT(2)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4_3_1V BIT(4)
+#define ATC2603C_PMU_SYS_CTL1_IR_WK_FLAG BIT(5)
+#define ATC2603C_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6)
+#define ATC2603C_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7)
+#define ATC2603C_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8)
+#define ATC2603C_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_PRESS_RESET_IRQ_PD BIT(10)
+#define ATC2603C_PMU_SYS_CTL1_SGPIOIRQ_WK_FLAG BIT(11)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13)
+#define ATC2603C_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14)
+#define ATC2603C_PMU_SYS_CTL1_USB_WK_FLAG BIT(15)
+
+/* PMU_SYS_CTL2 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL2_PMU_A_EN BIT(0)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2)
+#define ATC2603C_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3)
+#define ATC2603C_PMU_SYS_CTL2_S2_TIMER_EN BIT(6)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_RESET_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_INT_EN BIT(12)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS BIT(15)
+
+/* PMU_SYS_CTL3 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7)
+#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10)
+#define ATC2603C_PMU_SYS_CTL3_S3_TIMER_EN BIT(13)
+#define ATC2603C_PMU_SYS_CTL3_EN_S3 BIT(14)
+#define ATC2603C_PMU_SYS_CTL3_EN_S2 BIT(15)
+
+/* PMU_SYS_CTL5 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL5_WALLWKDTEN BIT(7)
+#define ATC2603C_PMU_SYS_CTL5_VBUSWKDTEN BIT(8)
+#define ATC2603C_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10)
+
+/* INTS_MSK Register Mask Bits */
+#define ATC2603C_INTS_MSK_AUDIO BIT(0)
+#define ATC2603C_INTS_MSK_OV BIT(1)
+#define ATC2603C_INTS_MSK_OC BIT(2)
+#define ATC2603C_INTS_MSK_OT BIT(3)
+#define ATC2603C_INTS_MSK_UV BIT(4)
+#define ATC2603C_INTS_MSK_ALARM BIT(5)
+#define ATC2603C_INTS_MSK_ONOFF BIT(6)
+#define ATC2603C_INTS_MSK_SGPIO BIT(7)
+#define ATC2603C_INTS_MSK_IR BIT(8)
+#define ATC2603C_INTS_MSK_REMCON BIT(9)
+#define ATC2603C_INTS_MSK_POWERIN BIT(10)
+
+/* CMU_DEVRST Register Mask Bits */
+#define ATC2603C_CMU_DEVRST_MFP BIT(1)
+#define ATC2603C_CMU_DEVRST_INTS BIT(2)
+#define ATC2603C_CMU_DEVRST_AUDIO BIT(4)
+
+/* PAD_EN Register Mask Bits */
+#define ATC2603C_PAD_EN_EXTIRQ BIT(0)
+
+#endif /* __LINUX_MFD_ATC260X_ATC2603C_H */
diff --git a/include/linux/mfd/atc260x/atc2609a.h b/include/linux/mfd/atc260x/atc2609a.h
new file mode 100644
index 000000000000..b957d7bd73e9
--- /dev/null
+++ b/include/linux/mfd/atc260x/atc2609a.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ATC2609A PMIC register definitions
+ *
+ * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_ATC2609A_H
+#define __LINUX_MFD_ATC260X_ATC2609A_H
+
+enum atc2609a_irq_def {
+ ATC2609A_IRQ_AUDIO = 0,
+ ATC2609A_IRQ_OV,
+ ATC2609A_IRQ_OC,
+ ATC2609A_IRQ_OT,
+ ATC2609A_IRQ_UV,
+ ATC2609A_IRQ_ALARM,
+ ATC2609A_IRQ_ONOFF,
+ ATC2609A_IRQ_WKUP,
+ ATC2609A_IRQ_IR,
+ ATC2609A_IRQ_REMCON,
+ ATC2609A_IRQ_POWER_IN,
+};
+
+/* PMU Registers */
+#define ATC2609A_PMU_SYS_CTL0 0x00
+#define ATC2609A_PMU_SYS_CTL1 0x01
+#define ATC2609A_PMU_SYS_CTL2 0x02
+#define ATC2609A_PMU_SYS_CTL3 0x03
+#define ATC2609A_PMU_SYS_CTL4 0x04
+#define ATC2609A_PMU_SYS_CTL5 0x05
+#define ATC2609A_PMU_SYS_CTL6 0x06
+#define ATC2609A_PMU_SYS_CTL7 0x07
+#define ATC2609A_PMU_SYS_CTL8 0x08
+#define ATC2609A_PMU_SYS_CTL9 0x09
+#define ATC2609A_PMU_BAT_CTL0 0x0A
+#define ATC2609A_PMU_BAT_CTL1 0x0B
+#define ATC2609A_PMU_VBUS_CTL0 0x0C
+#define ATC2609A_PMU_VBUS_CTL1 0x0D
+#define ATC2609A_PMU_WALL_CTL0 0x0E
+#define ATC2609A_PMU_WALL_CTL1 0x0F
+#define ATC2609A_PMU_SYS_PENDING 0x10
+#define ATC2609A_PMU_APDS_CTL0 0x11
+#define ATC2609A_PMU_APDS_CTL1 0x12
+#define ATC2609A_PMU_APDS_CTL2 0x13
+#define ATC2609A_PMU_CHARGER_CTL 0x14
+#define ATC2609A_PMU_BAKCHARGER_CTL 0x15
+#define ATC2609A_PMU_SWCHG_CTL0 0x16
+#define ATC2609A_PMU_SWCHG_CTL1 0x17
+#define ATC2609A_PMU_SWCHG_CTL2 0x18
+#define ATC2609A_PMU_SWCHG_CTL3 0x19
+#define ATC2609A_PMU_SWCHG_CTL4 0x1A
+#define ATC2609A_PMU_DC_OSC 0x1B
+#define ATC2609A_PMU_DC0_CTL0 0x1C
+#define ATC2609A_PMU_DC0_CTL1 0x1D
+#define ATC2609A_PMU_DC0_CTL2 0x1E
+#define ATC2609A_PMU_DC0_CTL3 0x1F
+#define ATC2609A_PMU_DC0_CTL4 0x20
+#define ATC2609A_PMU_DC0_CTL5 0x21
+#define ATC2609A_PMU_DC0_CTL6 0x22
+#define ATC2609A_PMU_DC1_CTL0 0x23
+#define ATC2609A_PMU_DC1_CTL1 0x24
+#define ATC2609A_PMU_DC1_CTL2 0x25
+#define ATC2609A_PMU_DC1_CTL3 0x26
+#define ATC2609A_PMU_DC1_CTL4 0x27
+#define ATC2609A_PMU_DC1_CTL5 0x28
+#define ATC2609A_PMU_DC1_CTL6 0x29
+#define ATC2609A_PMU_DC2_CTL0 0x2A
+#define ATC2609A_PMU_DC2_CTL1 0x2B
+#define ATC2609A_PMU_DC2_CTL2 0x2C
+#define ATC2609A_PMU_DC2_CTL3 0x2D
+#define ATC2609A_PMU_DC2_CTL4 0x2E
+#define ATC2609A_PMU_DC2_CTL5 0x2F
+#define ATC2609A_PMU_DC2_CTL6 0x30
+#define ATC2609A_PMU_DC3_CTL0 0x31
+#define ATC2609A_PMU_DC3_CTL1 0x32
+#define ATC2609A_PMU_DC3_CTL2 0x33
+#define ATC2609A_PMU_DC3_CTL3 0x34
+#define ATC2609A_PMU_DC3_CTL4 0x35
+#define ATC2609A_PMU_DC3_CTL5 0x36
+#define ATC2609A_PMU_DC3_CTL6 0x37
+#define ATC2609A_PMU_DC_ZR 0x38
+#define ATC2609A_PMU_LDO0_CTL0 0x39
+#define ATC2609A_PMU_LDO0_CTL1 0x3A
+#define ATC2609A_PMU_LDO1_CTL0 0x3B
+#define ATC2609A_PMU_LDO1_CTL1 0x3C
+#define ATC2609A_PMU_LDO2_CTL0 0x3D
+#define ATC2609A_PMU_LDO2_CTL1 0x3E
+#define ATC2609A_PMU_LDO3_CTL0 0x3F
+#define ATC2609A_PMU_LDO3_CTL1 0x40
+#define ATC2609A_PMU_LDO4_CTL0 0x41
+#define ATC2609A_PMU_LDO4_CTL1 0x42
+#define ATC2609A_PMU_LDO5_CTL0 0x43
+#define ATC2609A_PMU_LDO5_CTL1 0x44
+#define ATC2609A_PMU_LDO6_CTL0 0x45
+#define ATC2609A_PMU_LDO6_CTL1 0x46
+#define ATC2609A_PMU_LDO7_CTL0 0x47
+#define ATC2609A_PMU_LDO7_CTL1 0x48
+#define ATC2609A_PMU_LDO8_CTL0 0x49
+#define ATC2609A_PMU_LDO8_CTL1 0x4A
+#define ATC2609A_PMU_LDO9_CTL 0x4B
+#define ATC2609A_PMU_OV_INT_EN 0x4C
+#define ATC2609A_PMU_OV_STATUS 0x4D
+#define ATC2609A_PMU_UV_INT_EN 0x4E
+#define ATC2609A_PMU_UV_STATUS 0x4F
+#define ATC2609A_PMU_OC_INT_EN 0x50
+#define ATC2609A_PMU_OC_STATUS 0x51
+#define ATC2609A_PMU_OT_CTL 0x52
+#define ATC2609A_PMU_CM_CTL0 0x53
+#define ATC2609A_PMU_FW_USE0 0x54
+#define ATC2609A_PMU_FW_USE1 0x55
+#define ATC2609A_PMU_ADC12B_I 0x56
+#define ATC2609A_PMU_ADC12B_V 0x57
+#define ATC2609A_PMU_ADC12B_DUMMY 0x58
+#define ATC2609A_PMU_AUXADC_CTL0 0x59
+#define ATC2609A_PMU_AUXADC_CTL1 0x5A
+#define ATC2609A_PMU_BATVADC 0x5B
+#define ATC2609A_PMU_BATIADC 0x5C
+#define ATC2609A_PMU_WALLVADC 0x5D
+#define ATC2609A_PMU_WALLIADC 0x5E
+#define ATC2609A_PMU_VBUSVADC 0x5F
+#define ATC2609A_PMU_VBUSIADC 0x60
+#define ATC2609A_PMU_SYSPWRADC 0x61
+#define ATC2609A_PMU_REMCONADC 0x62
+#define ATC2609A_PMU_SVCCADC 0x63
+#define ATC2609A_PMU_CHGIADC 0x64
+#define ATC2609A_PMU_IREFADC 0x65
+#define ATC2609A_PMU_BAKBATADC 0x66
+#define ATC2609A_PMU_ICTEMPADC 0x67
+#define ATC2609A_PMU_AUXADC0 0x68
+#define ATC2609A_PMU_AUXADC1 0x69
+#define ATC2609A_PMU_AUXADC2 0x6A
+#define ATC2609A_PMU_AUXADC3 0x6B
+#define ATC2609A_PMU_ICTEMPADC_ADJ 0x6C
+#define ATC2609A_PMU_BDG_CTL 0x6D
+#define ATC2609A_RTC_CTL 0x6E
+#define ATC2609A_RTC_MSALM 0x6F
+#define ATC2609A_RTC_HALM 0x70
+#define ATC2609A_RTC_YMDALM 0x71
+#define ATC2609A_RTC_MS 0x72
+#define ATC2609A_RTC_H 0x73
+#define ATC2609A_RTC_DC 0x74
+#define ATC2609A_RTC_YMD 0x75
+#define ATC2609A_EFUSE_DAT 0x76
+#define ATC2609A_EFUSECRTL1 0x77
+#define ATC2609A_EFUSECRTL2 0x78
+#define ATC2609A_PMU_DC4_CTL0 0x79
+#define ATC2609A_PMU_DC4_CTL1 0x7A
+#define ATC2609A_PMU_DC4_CTL2 0x7B
+#define ATC2609A_PMU_DC4_CTL3 0x7C
+#define ATC2609A_PMU_DC4_CTL4 0x7D
+#define ATC2609A_PMU_DC4_CTL5 0x7E
+#define ATC2609A_PMU_DC4_CTL6 0x7F
+#define ATC2609A_PMU_PWR_STATUS 0x80
+#define ATC2609A_PMU_S2_PWR 0x81
+#define ATC2609A_CLMT_CTL0 0x82
+#define ATC2609A_CLMT_DATA0 0x83
+#define ATC2609A_CLMT_DATA1 0x84
+#define ATC2609A_CLMT_DATA2 0x85
+#define ATC2609A_CLMT_DATA3 0x86
+#define ATC2609A_CLMT_ADD0 0x87
+#define ATC2609A_CLMT_ADD1 0x88
+#define ATC2609A_CLMT_OCV_TABLE 0x89
+#define ATC2609A_CLMT_R_TABLE 0x8A
+#define ATC2609A_PMU_PWRON_CTL0 0x8D
+#define ATC2609A_PMU_PWRON_CTL1 0x8E
+#define ATC2609A_PMU_PWRON_CTL2 0x8F
+#define ATC2609A_IRC_CTL 0x90
+#define ATC2609A_IRC_STAT 0x91
+#define ATC2609A_IRC_CC 0x92
+#define ATC2609A_IRC_KDC 0x93
+#define ATC2609A_IRC_WK 0x94
+#define ATC2609A_IRC_RCC 0x95
+
+/* AUDIO_OUT Registers */
+#define ATC2609A_AUDIOINOUT_CTL 0xA0
+#define ATC2609A_AUDIO_DEBUGOUTCTL 0xA1
+#define ATC2609A_DAC_DIGITALCTL 0xA2
+#define ATC2609A_DAC_VOLUMECTL0 0xA3
+#define ATC2609A_DAC_ANALOG0 0xA4
+#define ATC2609A_DAC_ANALOG1 0xA5
+#define ATC2609A_DAC_ANALOG2 0xA6
+#define ATC2609A_DAC_ANALOG3 0xA7
+
+/* AUDIO_IN Registers */
+#define ATC2609A_ADC_DIGITALCTL 0xA8
+#define ATC2609A_ADC_HPFCTL 0xA9
+#define ATC2609A_ADC_CTL 0xAA
+#define ATC2609A_AGC_CTL0 0xAB
+#define ATC2609A_AGC_CTL1 0xAC
+#define ATC2609A_AGC_CTL2 0xAD
+#define ATC2609A_ADC_ANALOG0 0xAE
+#define ATC2609A_ADC_ANALOG1 0xAF
+
+/* PCM_IF Registers */
+#define ATC2609A_PCM0_CTL 0xB0
+#define ATC2609A_PCM1_CTL 0xB1
+#define ATC2609A_PCM2_CTL 0xB2
+#define ATC2609A_PCMIF_CTL 0xB3
+
+/* CMU_CONTROL Registers */
+#define ATC2609A_CMU_DEVRST 0xC1
+
+/* INTS Registers */
+#define ATC2609A_INTS_PD 0xC8
+#define ATC2609A_INTS_MSK 0xC9
+
+/* MFP Registers */
+#define ATC2609A_MFP_CTL 0xD0
+#define ATC2609A_PAD_VSEL 0xD1
+#define ATC2609A_GPIO_OUTEN 0xD2
+#define ATC2609A_GPIO_INEN 0xD3
+#define ATC2609A_GPIO_DAT 0xD4
+#define ATC2609A_PAD_DRV 0xD5
+#define ATC2609A_PAD_EN 0xD6
+#define ATC2609A_DEBUG_SEL 0xD7
+#define ATC2609A_DEBUG_IE 0xD8
+#define ATC2609A_DEBUG_OE 0xD9
+#define ATC2609A_CHIP_VER 0xDC
+
+/* PWSI Registers */
+#define ATC2609A_PWSI_CTL 0xF0
+#define ATC2609A_PWSI_STATUS 0xF1
+
+/* TWSI Registers */
+#define ATC2609A_SADDR 0xFF
+
+/* PMU_SYS_CTL0 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL0_IR_WK_EN BIT(5)
+#define ATC2609A_PMU_SYS_CTL0_RESET_WK_EN BIT(6)
+#define ATC2609A_PMU_SYS_CTL0_HDSW_WK_EN BIT(7)
+#define ATC2609A_PMU_SYS_CTL0_ALARM_WK_EN BIT(8)
+#define ATC2609A_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL0_RESTART_EN BIT(10)
+#define ATC2609A_PMU_SYS_CTL0_WKIRQ_WK_EN BIT(11)
+#define ATC2609A_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12)
+#define ATC2609A_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13)
+#define ATC2609A_PMU_SYS_CTL0_WALL_WK_EN BIT(14)
+#define ATC2609A_PMU_SYS_CTL0_USB_WK_EN BIT(15)
+#define ATC2609A_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10)))
+
+/* PMU_SYS_CTL1 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL1_EN_S1 BIT(0)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4_EN BIT(2)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4_3_1V BIT(4)
+#define ATC2609A_PMU_SYS_CTL1_IR_WK_FLAG BIT(5)
+#define ATC2609A_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6)
+#define ATC2609A_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7)
+#define ATC2609A_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8)
+#define ATC2609A_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9)
+#define ATC2609A_PMU_SYS_CTL1_RESTART_WK_FLAG BIT(10)
+#define ATC2609A_PMU_SYS_CTL1_WKIRQ_WK_FLAG BIT(11)
+#define ATC2609A_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12)
+#define ATC2609A_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13)
+#define ATC2609A_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14)
+#define ATC2609A_PMU_SYS_CTL1_USB_WK_FLAG BIT(15)
+
+/* PMU_SYS_CTL2 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL2_PMU_A_EN BIT(0)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2)
+#define ATC2609A_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3)
+#define ATC2609A_PMU_SYS_CTL2_S2_TIMER_EN BIT(6)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_LSP_INT_EN BIT(12)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS BIT(15)
+
+/* PMU_SYS_CTL3 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7)
+#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10)
+#define ATC2609A_PMU_SYS_CTL3_S3_TIMER_EN BIT(13)
+#define ATC2609A_PMU_SYS_CTL3_EN_S3 BIT(14)
+#define ATC2609A_PMU_SYS_CTL3_EN_S2 BIT(15)
+
+/* PMU_SYS_CTL5 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL5_WALLWKDTEN BIT(7)
+#define ATC2609A_PMU_SYS_CTL5_VBUSWKDTEN BIT(8)
+#define ATC2609A_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10)
+
+/* INTS_MSK Register Mask Bits */
+#define ATC2609A_INTS_MSK_AUDIO BIT(0)
+#define ATC2609A_INTS_MSK_OV BIT(1)
+#define ATC2609A_INTS_MSK_OC BIT(2)
+#define ATC2609A_INTS_MSK_OT BIT(3)
+#define ATC2609A_INTS_MSK_UV BIT(4)
+#define ATC2609A_INTS_MSK_ALARM BIT(5)
+#define ATC2609A_INTS_MSK_ONOFF BIT(6)
+#define ATC2609A_INTS_MSK_WKUP BIT(7)
+#define ATC2609A_INTS_MSK_IR BIT(8)
+#define ATC2609A_INTS_MSK_REMCON BIT(9)
+#define ATC2609A_INTS_MSK_POWERIN BIT(10)
+
+/* CMU_DEVRST Register Mask Bits */
+#define ATC2609A_CMU_DEVRST_AUDIO BIT(0)
+#define ATC2609A_CMU_DEVRST_MFP BIT(1)
+#define ATC2609A_CMU_DEVRST_INTS BIT(2)
+
+/* PAD_EN Register Mask Bits */
+#define ATC2609A_PAD_EN_EXTIRQ BIT(0)
+
+#endif /* __LINUX_MFD_ATC260X_ATC2609A_H */
diff --git a/include/linux/mfd/atc260x/core.h b/include/linux/mfd/atc260x/core.h
new file mode 100644
index 000000000000..777b6c345d44
--- /dev/null
+++ b/include/linux/mfd/atc260x/core.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core MFD defines for ATC260x PMICs
+ *
+ * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_CORE_H
+#define __LINUX_MFD_ATC260X_CORE_H
+
+#include <linux/mfd/atc260x/atc2603c.h>
+#include <linux/mfd/atc260x/atc2609a.h>
+
+enum atc260x_type {
+ ATC2603A = 0,
+ ATC2603C,
+ ATC2609A,
+};
+
+enum atc260x_ver {
+ ATC260X_A = 0,
+ ATC260X_B,
+ ATC260X_C,
+ ATC260X_D,
+ ATC260X_E,
+ ATC260X_F,
+ ATC260X_G,
+ ATC260X_H,
+};
+
+struct atc260x {
+ struct device *dev;
+
+ struct regmap *regmap;
+ const struct regmap_irq_chip *regmap_irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct mutex *regmap_mutex; /* mutex for custom regmap locking */
+
+ const struct mfd_cell *cells;
+ int nr_cells;
+ int irq;
+
+ enum atc260x_type ic_type;
+ enum atc260x_ver ic_ver;
+ const char *type_name;
+ unsigned int rev_reg;
+
+ const struct atc260x_init_regs *init_regs; /* regs for device init */
+};
+
+struct regmap_config;
+
+int atc260x_match_device(struct atc260x *atc260x, struct regmap_config *regmap_cfg);
+int atc260x_device_probe(struct atc260x *atc260x);
+
+#endif /* __LINUX_MFD_ATC260X_CORE_H */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index fd5957c042da..9ab0e2fca7ea 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -696,6 +696,6 @@ int axp20x_device_probe(struct axp20x_dev *axp20x);
*
* This tells the axp20x core to remove the associated mfd devices
*/
-int axp20x_device_remove(struct axp20x_dev *axp20x);
+void axp20x_device_remove(struct axp20x_dev *axp20x);
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h
index ed37dc40e82a..f70a810c55f7 100644
--- a/include/linux/mfd/bcm2835-pm.h
+++ b/include/linux/mfd/bcm2835-pm.h
@@ -9,6 +9,7 @@ struct bcm2835_pm {
struct device *dev;
void __iomem *base;
void __iomem *asb;
+ void __iomem *rpivid_asb;
};
#endif /* BCM2835_MFD_PM_H */
diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h
index eb05569f752b..8efd99d07c9e 100644
--- a/include/linux/mfd/bd9571mwv.h
+++ b/include/linux/mfd/bd9571mwv.h
@@ -1,16 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * ROHM BD9571MWV-M driver
+ * ROHM BD9571MWV-M and BD9574MWF-M driver
*
* Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
+ * Copyright (C) 2020 Renesas Electronics Corporation
*
* Based on the TPS65086 driver
*/
@@ -21,11 +14,12 @@
#include <linux/device.h>
#include <linux/regmap.h>
-/* List of registers for BD9571MWV */
+/* List of registers for BD9571MWV and BD9574MWF */
#define BD9571MWV_VENDOR_CODE 0x00
#define BD9571MWV_VENDOR_CODE_VAL 0xdb
#define BD9571MWV_PRODUCT_CODE 0x01
-#define BD9571MWV_PRODUCT_CODE_VAL 0x60
+#define BD9571MWV_PRODUCT_CODE_BD9571MWV 0x60
+#define BD9571MWV_PRODUCT_CODE_BD9574MWF 0x74
#define BD9571MWV_PRODUCT_REVISION 0x02
#define BD9571MWV_I2C_FUSA_MODE 0x10
@@ -55,6 +49,7 @@
#define BD9571MWV_VD33_VID 0x44
#define BD9571MWV_DVFS_VINIT 0x50
+#define BD9574MWF_VD09_VINIT 0x51
#define BD9571MWV_DVFS_SETVMAX 0x52
#define BD9571MWV_DVFS_BOOSTVID 0x53
#define BD9571MWV_DVFS_SETVID 0x54
@@ -68,6 +63,7 @@
#define BD9571MWV_GPIO_INT_SET 0x64
#define BD9571MWV_GPIO_INT 0x65
#define BD9571MWV_GPIO_INTMASK 0x66
+#define BD9574MWF_GPIO_MUX 0x67
#define BD9571MWV_REG_KEEP(n) (0x70 + (n))
@@ -77,6 +73,8 @@
#define BD9571MWV_PROT_ERROR_STATUS2 0x83
#define BD9571MWV_PROT_ERROR_STATUS3 0x84
#define BD9571MWV_PROT_ERROR_STATUS4 0x85
+#define BD9574MWF_PROT_ERROR_STATUS5 0x86
+#define BD9574MWF_SYSTEM_ERROR_STATUS 0x87
#define BD9571MWV_INT_INTREQ 0x90
#define BD9571MWV_INT_INTREQ_MD1_INT BIT(0)
@@ -89,6 +87,12 @@
#define BD9571MWV_INT_INTREQ_BKUP_TRG_INT BIT(7)
#define BD9571MWV_INT_INTMASK 0x91
+#define BD9574MWF_SSCG_CNT 0xA0
+#define BD9574MWF_POFFB_MRB 0xA1
+#define BD9574MWF_SMRB_WR_PROT 0xA2
+#define BD9574MWF_SMRB_ASSERT 0xA3
+#define BD9574MWF_SMRB_STATUS 0xA4
+
#define BD9571MWV_ACCESS_KEY 0xff
/* Define the BD9571MWV IRQ numbers */
@@ -98,23 +102,8 @@ enum bd9571mwv_irqs {
BD9571MWV_IRQ_MD2_E2,
BD9571MWV_IRQ_PROT_ERR,
BD9571MWV_IRQ_GP,
- BD9571MWV_IRQ_128H_OF,
+ BD9571MWV_IRQ_128H_OF, /* BKUP_HOLD on BD9574MWF */
BD9571MWV_IRQ_WDT_OF,
BD9571MWV_IRQ_BKUP_TRG,
};
-
-/**
- * struct bd9571mwv - state holder for the bd9571mwv driver
- *
- * Device data may be used to access the BD9571MWV chip
- */
-struct bd9571mwv {
- struct device *dev;
- struct regmap *regmap;
-
- /* IRQ Data */
- int irq;
- struct regmap_irq_chip_data *irq_data;
-};
-
#endif /* __LINUX_MFD_BD9571MWV_H */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index d01d1299e49d..0bc7cba798a3 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -14,7 +14,7 @@
#define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource))
-#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _match)\
+#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, _use_of_reg, _match) \
{ \
.name = (_name), \
.resources = (_res), \
@@ -22,27 +22,35 @@
.platform_data = (_pdata), \
.pdata_size = (_pdsize), \
.of_compatible = (_compat), \
+ .of_reg = (_of_reg), \
+ .use_of_reg = (_use_of_reg), \
.acpi_match = (_match), \
.id = (_id), \
}
-#define OF_MFD_CELL(_name, _res, _pdata, _pdsize,_id, _compat) \
- MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) \
+#define MFD_CELL_OF_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL)
-#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \
- MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) \
+#define MFD_CELL_OF(_name, _res, _pdata, _pdsize, _id, _compat) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL)
-#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
- MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) \
+#define MFD_CELL_ACPI(_name, _res, _pdata, _pdsize, _id, _match) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match)
-#define MFD_CELL_RES(_name, _res) \
- MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) \
+#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, NULL)
-#define MFD_CELL_NAME(_name) \
- MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) \
+#define MFD_CELL_RES(_name, _res) \
+ MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_CELL_NAME(_name) \
+ MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_DEP_LEVEL_NORMAL 0
+#define MFD_DEP_LEVEL_HIGH 1
struct irq_domain;
-struct property_entry;
+struct software_node;
/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */
struct mfd_cell_acpi_match {
@@ -58,6 +66,7 @@ struct mfd_cell_acpi_match {
struct mfd_cell {
const char *name;
int id;
+ int level;
int (*enable)(struct platform_device *dev);
int (*disable)(struct platform_device *dev);
@@ -69,15 +78,25 @@ struct mfd_cell {
void *platform_data;
size_t pdata_size;
- /* device properties passed to the sub devices drivers */
- struct property_entry *properties;
+ /* Software node for the device. */
+ const struct software_node *swnode;
/*
* Device Tree compatible string
- * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details
+ * See: Documentation/devicetree/usage-model.rst Chapter 2.2 for details
*/
const char *of_compatible;
+ /*
+ * Address as defined in Device Tree. Used to compement 'of_compatible'
+ * (above) when matching OF nodes with devices that have identical
+ * compatible strings
+ */
+ const u64 of_reg;
+
+ /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+ bool use_of_reg;
+
/* Matches ACPI */
const struct mfd_cell_acpi_match *acpi_match;
@@ -135,6 +154,7 @@ static inline int mfd_add_hotplug_devices(struct device *parent,
}
extern void mfd_remove_devices(struct device *parent);
+extern void mfd_remove_devices_late(struct device *parent);
extern int devm_mfd_add_devices(struct device *dev, int id,
const struct mfd_cell *cells, int n_devs,
diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
index eac48e483190..d3f126990ad0 100644
--- a/include/linux/mfd/da9055/pdata.h
+++ b/include/linux/mfd/da9055/pdata.h
@@ -35,7 +35,7 @@ struct da9055_pdata {
int *gpio_rsel;
/*
* Regulator mode control bits value (GPI offset) that
- * that controls the regulator state, 0 if not available.
+ * controls the regulator state, 0 if not available.
*/
enum gpio_select *reg_ren;
/*
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 5cd06ab26352..8db52324f416 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -35,6 +35,8 @@ enum da9063_variant_codes {
PMIC_DA9063_AD = 0x3,
PMIC_DA9063_BB = 0x5,
PMIC_DA9063_CA = 0x6,
+ PMIC_DA9063_DA = 0x7,
+ PMIC_DA9063_EA = 0x8,
};
/* Interrupts */
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index ba706b0e28c2..6e0f66a2e727 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -292,8 +292,10 @@
#define DA9063_BB_REG_GP_ID_19 0x134
/* Chip ID and variant */
-#define DA9063_REG_CHIP_ID 0x181
-#define DA9063_REG_CHIP_VARIANT 0x182
+#define DA9063_REG_DEVICE_ID 0x181
+#define DA9063_REG_VARIANT_ID 0x182
+#define DA9063_REG_CUSTOMER_ID 0x183
+#define DA9063_REG_CONFIG_ID 0x184
/*
* PMIC registers bits
@@ -929,9 +931,6 @@
#define DA9063_RTC_CLOCK 0x40
#define DA9063_OUT_32K_EN 0x80
-/* DA9063_REG_CHIP_VARIANT */
-#define DA9063_CHIP_VARIANT_SHIFT 4
-
/* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */
#define DA9063_BIO_ILIM_MASK 0x0F
#define DA9063_BMEM_ILIM_MASK 0xF0
@@ -1038,6 +1037,9 @@
#define DA9063_NONKEY_PIN_AUTODOWN 0x02
#define DA9063_NONKEY_PIN_AUTOFLPRT 0x03
+/* DA9063_REG_CONFIG_J (addr=0x10F) */
+#define DA9063_TWOWIRE_TO 0x40
+
/* DA9063_REG_MON_REG_5 (addr=0x116) */
#define DA9063_MON_A8_IDX_MASK 0x07
#define DA9063_MON_A8_IDX_NONE 0x00
@@ -1065,4 +1067,10 @@
#define DA9063_MON_A10_IDX_LDO9 0x04
#define DA9063_MON_A10_IDX_LDO10 0x05
+/* DA9063_REG_VARIANT_ID (addr=0x182) */
+#define DA9063_VARIANT_ID_VRC_SHIFT 0
+#define DA9063_VARIANT_ID_VRC_MASK 0x0F
+#define DA9063_VARIANT_ID_MRC_SHIFT 4
+#define DA9063_VARIANT_ID_MRC_MASK 0xF0
+
#endif /* _DA9063_REG_H */
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 4b63d3ecdcff..a62de3d155ed 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -720,7 +720,7 @@ static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val)
static inline bool db8500_prcmu_is_ac_wake_requested(void)
{
- return 0;
+ return false;
}
static inline int db8500_prcmu_set_arm_opp(u8 opp)
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index e6ee2ec35de9..e7a7e70fdb38 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -186,10 +186,11 @@ enum ddr_pwrst {
#define PRCMU_FW_PROJECT_U8500_C3 8
#define PRCMU_FW_PROJECT_U8500_C4 9
#define PRCMU_FW_PROJECT_U9500_MBL 10
-#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */
+#define PRCMU_FW_PROJECT_U8500_SSG1 11 /* Samsung specific */
#define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */
#define PRCMU_FW_PROJECT_U8520 13
#define PRCMU_FW_PROJECT_U8420 14
+#define PRCMU_FW_PROJECT_U8500_SSG2 15 /* Samsung specific */
#define PRCMU_FW_PROJECT_U8420_SYSCLK 17
#define PRCMU_FW_PROJECT_A9420 20
/* [32..63] 9540 and derivatives */
@@ -555,31 +556,11 @@ static inline void prcmu_clear(unsigned int reg, u32 bits)
#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-#ifdef CONFIG_DBX500_PRCMU_QOS_POWER
-
-unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
-void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
-void prcmu_qos_force_opp(int, s32);
-int prcmu_qos_requirement(int pm_qos_class);
-int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value);
-int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value);
-void prcmu_qos_remove_requirement(int pm_qos_class, char *name);
-int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-
-#else
-
static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
{
return 0;
}
-static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {}
-
-static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {}
-
static inline int prcmu_qos_requirement(int prcmu_qos_class)
{
return 0;
@@ -612,6 +593,4 @@ static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
return 0;
}
-#endif
-
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/gsc.h b/include/linux/mfd/gsc.h
new file mode 100644
index 000000000000..6bd639c285b4
--- /dev/null
+++ b/include/linux/mfd/gsc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Gateworks Corporation
+ */
+#ifndef __LINUX_MFD_GSC_H_
+#define __LINUX_MFD_GSC_H_
+
+#include <linux/regmap.h>
+
+/* Device Addresses */
+#define GSC_MISC 0x20
+#define GSC_UPDATE 0x21
+#define GSC_GPIO 0x23
+#define GSC_HWMON 0x29
+#define GSC_EEPROM0 0x50
+#define GSC_EEPROM1 0x51
+#define GSC_EEPROM2 0x52
+#define GSC_EEPROM3 0x53
+#define GSC_RTC 0x68
+
+/* Register offsets */
+enum {
+ GSC_CTRL_0 = 0x00,
+ GSC_CTRL_1 = 0x01,
+ GSC_TIME = 0x02,
+ GSC_TIME_ADD = 0x06,
+ GSC_IRQ_STATUS = 0x0A,
+ GSC_IRQ_ENABLE = 0x0B,
+ GSC_FW_CRC = 0x0C,
+ GSC_FW_VER = 0x0E,
+ GSC_WP = 0x0F,
+};
+
+/* Bit definitions */
+#define GSC_CTRL_0_PB_HARD_RESET 0
+#define GSC_CTRL_0_PB_CLEAR_SECURE_KEY 1
+#define GSC_CTRL_0_PB_SOFT_POWER_DOWN 2
+#define GSC_CTRL_0_PB_BOOT_ALTERNATE 3
+#define GSC_CTRL_0_PERFORM_CRC 4
+#define GSC_CTRL_0_TAMPER_DETECT 5
+#define GSC_CTRL_0_SWITCH_HOLD 6
+
+#define GSC_CTRL_1_SLEEP_ENABLE 0
+#define GSC_CTRL_1_SLEEP_ACTIVATE 1
+#define GSC_CTRL_1_SLEEP_ADD 2
+#define GSC_CTRL_1_SLEEP_NOWAKEPB 3
+#define GSC_CTRL_1_WDT_TIME 4
+#define GSC_CTRL_1_WDT_ENABLE 5
+#define GSC_CTRL_1_SWITCH_BOOT_ENABLE 6
+#define GSC_CTRL_1_SWITCH_BOOT_CLEAR 7
+
+#define GSC_IRQ_PB 0
+#define GSC_IRQ_KEY_ERASED 1
+#define GSC_IRQ_EEPROM_WP 2
+#define GSC_IRQ_RESV 3
+#define GSC_IRQ_GPIO 4
+#define GSC_IRQ_TAMPER 5
+#define GSC_IRQ_WDT_TIMEOUT 6
+#define GSC_IRQ_SWITCH_HOLD 7
+
+int gsc_read(void *context, unsigned int reg, unsigned int *val);
+int gsc_write(void *context, unsigned int reg, unsigned int val);
+
+struct gsc_dev {
+ struct device *dev;
+
+ struct i2c_client *i2c; /* 0x20: interrupt controller, WDT */
+ struct i2c_client *i2c_hwmon; /* 0x29: hwmon, fan controller */
+
+ struct regmap *regmap;
+
+ unsigned int fwver;
+ unsigned short fwcrc;
+};
+
+#endif /* __LINUX_MFD_GSC_H_ */
diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h
index bbc64484c021..2cadf8897c64 100644
--- a/include/linux/mfd/hi6421-pmic.h
+++ b/include/linux/mfd/hi6421-pmic.h
@@ -5,7 +5,7 @@
* Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
* http://www.hisilicon.com
* Copyright (c) <2013-2014> Linaro Ltd.
- * http://www.linaro.org
+ * https://www.linaro.org
*
* Author: Guodong Xu <guodong.xu@linaro.org>
*/
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index b06171322178..6a012784dd1b 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -2,7 +2,7 @@
/*
* Device driver for regulators in hi655x IC
*
- * Copyright (c) 2016 Hisilicon.
+ * Copyright (c) 2016 HiSilicon Ltd.
*
* Authors:
* Chen Feng <puck.chen@hisilicon.com>
@@ -12,6 +12,8 @@
#ifndef __HI655X_PMIC_H
#define __HI655X_PMIC_H
+#include <linux/gpio/consumer.h>
+
/* Hi655x registers are mapped to memory bus in 4 bytes stride */
#define HI655X_STRIDE 4
#define HI655X_BUS_ADDR(x) ((x) << 2)
@@ -53,7 +55,7 @@ struct hi655x_pmic {
struct resource *res;
struct device *dev;
struct regmap *regmap;
- int gpio;
+ struct gpio_desc *gpio;
unsigned int ver;
struct regmap_irq_chip_data *irq_data;
};
diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h
new file mode 100644
index 000000000000..1db532feeb91
--- /dev/null
+++ b/include/linux/mfd/idt82p33_reg.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT82P33_REG
+#define HAVE_IDT82P33_REG
+
+#define REG_ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+
+/* Register address */
+#define DPLL1_TOD_CNFG 0x134
+#define DPLL2_TOD_CNFG 0x1B4
+
+#define DPLL1_TOD_STS 0x10B
+#define DPLL2_TOD_STS 0x18B
+
+#define DPLL1_TOD_TRIGGER 0x115
+#define DPLL2_TOD_TRIGGER 0x195
+
+#define DPLL1_OPERATING_MODE_CNFG 0x120
+#define DPLL2_OPERATING_MODE_CNFG 0x1A0
+
+#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
+#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
+
+#define DPLL1_PHASE_OFFSET_CNFG 0x143
+#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
+
+#define DPLL1_SYNC_EDGE_CNFG 0x140
+#define DPLL2_SYNC_EDGE_CNFG 0x1C0
+
+#define DPLL1_INPUT_MODE_CNFG 0x116
+#define DPLL2_INPUT_MODE_CNFG 0x196
+
+#define DPLL1_OPERATING_STS 0x102
+#define DPLL2_OPERATING_STS 0x182
+
+#define DPLL1_CURRENT_FREQ_STS 0x103
+#define DPLL2_CURRENT_FREQ_STS 0x183
+
+#define REG_SOFT_RESET 0X381
+
+#define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn)))
+#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
+
+/* Register bit definitions */
+#define SYNC_TOD BIT(1)
+#define PH_OFFSET_EN BIT(7)
+#define SQUELCH_ENABLE BIT(5)
+
+/* Bit definitions for the DPLL_MODE register */
+#define PLL_MODE_SHIFT (0)
+#define PLL_MODE_MASK (0x1F)
+#define COMBO_MODE_EN BIT(5)
+#define COMBO_MODE_SHIFT (6)
+#define COMBO_MODE_MASK (0x3)
+
+/* Bit definitions for DPLL_OPERATING_STS register */
+#define OPERATING_STS_MASK (0x7)
+#define OPERATING_STS_SHIFT (0x0)
+
+/* Bit definitions for DPLL_TOD_TRIGGER register */
+#define READ_TRIGGER_MASK (0xF)
+#define READ_TRIGGER_SHIFT (0x0)
+#define WRITE_TRIGGER_MASK (0xF0)
+#define WRITE_TRIGGER_SHIFT (0x4)
+
+/* Bit definitions for REG_SOFT_RESET register */
+#define SOFT_RESET_EN BIT(7)
+
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
+ PLL_MODE_FORCE_FREERUN = 1,
+ PLL_MODE_FORCE_HOLDOVER = 2,
+ PLL_MODE_FORCE_LOCKED = 4,
+ PLL_MODE_FORCE_PRE_LOCKED2 = 5,
+ PLL_MODE_FORCE_PRE_LOCKED = 6,
+ PLL_MODE_FORCE_LOST_PHASE = 7,
+ PLL_MODE_DCO = 10,
+ PLL_MODE_WPH = 18,
+ PLL_MODE_MAX = PLL_MODE_WPH,
+};
+
+enum hw_tod_trig_sel {
+ HW_TOD_TRIG_SEL_MIN = 0,
+ HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_NO_READ = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_SYNC_SEL = 1,
+ HW_TOD_TRIG_SEL_IN12 = 2,
+ HW_TOD_TRIG_SEL_IN13 = 3,
+ HW_TOD_TRIG_SEL_IN14 = 4,
+ HW_TOD_TRIG_SEL_TOD_PPS = 5,
+ HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
+ HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
+ HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
+ HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+};
+
+/** @brief Enumerated type listing DPLL operational modes */
+enum dpll_state {
+ DPLL_STATE_FREERUN = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_LOCKED = 4,
+ DPLL_STATE_PRELOCKED2 = 5,
+ DPLL_STATE_PRELOCKED = 6,
+ DPLL_STATE_LOSTPHASE = 7,
+ DPLL_STATE_MAX
+};
+
+#endif
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
new file mode 100644
index 000000000000..0c706085c205
--- /dev/null
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -0,0 +1,768 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Based on 5.2.0, Family Programming Guide (Sept 30, 2020)
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+#define HW_DPLL_4 (0x8e00)
+#define HW_DPLL_5 (0x8f00)
+#define HW_DPLL_6 (0x9000)
+#define HW_DPLL_7 (0x9100)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define HW_Q8_CTRL_SPARE (0xa7d4)
+#define HW_Q11_CTRL_SPARE (0xa7ec)
+
+/**
+ * Select FOD5 as sync_trigger for Q8 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q8 divider.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD5 as driver for clock and sync for Q8 divider.
+ * Enable fanout buffer for FOD5.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+/**
+ * Select FOD6 as sync_trigger for Q11 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q11 divider.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD6 as driver for clock and sync for Q11 divider.
+ * Enable fanout buffer for FOD6.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_V520 0x0013
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define BOOT_STATUS 0x0000
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
+#define STATUS 0xc03c
+#define DPLL0_STATUS 0x0018
+#define DPLL1_STATUS 0x0019
+#define DPLL2_STATUS 0x001a
+#define DPLL3_STATUS 0x001b
+#define DPLL4_STATUS 0x001c
+#define DPLL5_STATUS 0x001d
+#define DPLL6_STATUS 0x001e
+#define DPLL7_STATUS 0x001f
+#define DPLL_SYS_STATUS 0x0020
+#define DPLL_SYS_APLL_STATUS 0x0021
+#define DPLL0_FILTER_STATUS 0x0044
+#define DPLL1_FILTER_STATUS 0x004c
+#define DPLL2_FILTER_STATUS 0x0054
+#define DPLL3_FILTER_STATUS 0x005c
+#define DPLL4_FILTER_STATUS 0x0064
+#define DPLL5_FILTER_STATUS 0x006c
+#define DPLL6_FILTER_STATUS 0x0074
+#define DPLL7_FILTER_STATUS 0x007c
+#define DPLLSYS_FILTER_STATUS 0x0084
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+#define GPIO0_TO_7_OUT_V520 0x0002
+#define GPIO8_TO_15_OUT_V520 0x0003
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+#define INPUT_1 0xc1c0
+#define INPUT_2 0xc1d0
+#define INPUT_3 0xc200
+#define INPUT_4 0xc210
+#define INPUT_5 0xc220
+#define INPUT_6 0xc230
+#define INPUT_7 0xc240
+#define INPUT_8 0xc250
+#define INPUT_9 0xc260
+#define INPUT_10 0xc280
+#define INPUT_11 0xc290
+#define INPUT_12 0xc2a0
+#define INPUT_13 0xc2b0
+#define INPUT_14 0xc2c0
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+#define REF_MON_1 0xc2ec
+#define REF_MON_2 0xc300
+#define REF_MON_3 0xc30c
+#define REF_MON_4 0xc318
+#define REF_MON_5 0xc324
+#define REF_MON_6 0xc330
+#define REF_MON_7 0xc33c
+#define REF_MON_8 0xc348
+#define REF_MON_9 0xc354
+#define REF_MON_10 0xc360
+#define REF_MON_11 0xc36c
+#define REF_MON_12 0xc380
+#define REF_MON_13 0xc38c
+#define REF_MON_14 0xc398
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+#define DPLL_MODE_V520 0x003B
+#define DPLL_1 0xc400
+#define DPLL_2 0xc438
+#define DPLL_2_V520 0xc43c
+#define DPLL_3 0xc480
+#define DPLL_4 0xc4b8
+#define DPLL_4_V520 0xc4bc
+#define DPLL_5 0xc500
+#define DPLL_6 0xc538
+#define DPLL_6_V520 0xc53c
+#define DPLL_7 0xc580
+#define SYS_DPLL 0xc5b8
+#define SYS_DPLL_V520 0xc5bc
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+#define DPLL_CTRL_DPLL_FOD_FREQ 0x001c
+#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
+#define DPLL_CTRL_1 0xc63c
+#define DPLL_CTRL_2 0xc680
+#define DPLL_CTRL_3 0xc6bc
+#define DPLL_CTRL_4 0xc700
+#define DPLL_CTRL_5 0xc73c
+#define DPLL_CTRL_6 0xc780
+#define DPLL_CTRL_7 0xc7bc
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+#define DPLL_PHASE_1 0xc81c
+#define DPLL_PHASE_2 0xc820
+#define DPLL_PHASE_3 0xc824
+#define DPLL_PHASE_4 0xc828
+#define DPLL_PHASE_5 0xc82c
+#define DPLL_PHASE_6 0xc830
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+#define DPLL_FREQ_1 0xc840
+#define DPLL_FREQ_2 0xc848
+#define DPLL_FREQ_3 0xc850
+#define DPLL_FREQ_4 0xc858
+#define DPLL_FREQ_5 0xc860
+#define DPLL_FREQ_6 0xc868
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+#define DPLL_PHASE_PULL_IN_1 0xc888
+#define DPLL_PHASE_PULL_IN_2 0xc890
+#define DPLL_PHASE_PULL_IN_3 0xc898
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+#define GPIO_CTRL_V520 0x0011
+#define GPIO_1 0xc8d4
+#define GPIO_2 0xc8e6
+#define GPIO_3 0xc900
+#define GPIO_4 0xc912
+#define GPIO_5 0xc924
+#define GPIO_6 0xc936
+#define GPIO_7 0xc948
+#define GPIO_8 0xc95a
+#define GPIO_9 0xc980
+#define GPIO_10 0xc992
+#define GPIO_11 0xc9a4
+#define GPIO_12 0xc9b6
+#define GPIO_13 0xc9c8
+#define GPIO_14 0xc9da
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+#define OUTPUT_0 0xca14
+#define OUTPUT_0_V520 0xca20
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+#define OUTPUT_1 0xca24
+#define OUTPUT_1_V520 0xca30
+#define OUTPUT_2 0xca34
+#define OUTPUT_2_V520 0xca40
+#define OUTPUT_3 0xca44
+#define OUTPUT_3_V520 0xca50
+#define OUTPUT_4 0xca54
+#define OUTPUT_4_V520 0xca60
+#define OUTPUT_5 0xca64
+#define OUTPUT_5_V520 0xca80
+#define OUTPUT_6 0xca80
+#define OUTPUT_6_V520 0xca90
+#define OUTPUT_7 0xca90
+#define OUTPUT_7_V520 0xcaa0
+#define OUTPUT_8 0xcaa0
+#define OUTPUT_8_V520 0xcab0
+#define OUTPUT_9 0xcab0
+#define OUTPUT_9_V520 0xcac0
+#define OUTPUT_10 0xcac0
+#define OUTPUT_10_V520 0xcad0
+#define OUTPUT_11 0xcad0
+#define OUTPUT_11_V520 0xcae0
+
+#define SERIAL 0xcae0
+#define SERIAL_V520 0xcaf0
+
+#define PWM_ENCODER_0 0xcb00
+#define PWM_ENCODER_1 0xcb08
+#define PWM_ENCODER_2 0xcb10
+#define PWM_ENCODER_3 0xcb18
+#define PWM_ENCODER_4 0xcb20
+#define PWM_ENCODER_5 0xcb28
+#define PWM_ENCODER_6 0xcb30
+#define PWM_ENCODER_7 0xcb38
+#define PWM_DECODER_0 0xcb40
+#define PWM_DECODER_1 0xcb48
+#define PWM_DECODER_1_V520 0xcb4a
+#define PWM_DECODER_2 0xcb50
+#define PWM_DECODER_2_V520 0xcb54
+#define PWM_DECODER_3 0xcb58
+#define PWM_DECODER_3_V520 0xcb5e
+#define PWM_DECODER_4 0xcb60
+#define PWM_DECODER_4_V520 0xcb68
+#define PWM_DECODER_5 0xcb68
+#define PWM_DECODER_5_V520 0xcb80
+#define PWM_DECODER_6 0xcb70
+#define PWM_DECODER_6_V520 0xcb8a
+#define PWM_DECODER_7 0xcb80
+#define PWM_DECODER_7_V520 0xcb94
+#define PWM_DECODER_8 0xcb88
+#define PWM_DECODER_8_V520 0xcb9e
+#define PWM_DECODER_9 0xcb90
+#define PWM_DECODER_9_V520 0xcba8
+#define PWM_DECODER_10 0xcb98
+#define PWM_DECODER_10_V520 0xcbb2
+#define PWM_DECODER_11 0xcba0
+#define PWM_DECODER_11_V520 0xcbbc
+#define PWM_DECODER_12 0xcba8
+#define PWM_DECODER_12_V520 0xcbc6
+#define PWM_DECODER_13 0xcbb0
+#define PWM_DECODER_13_V520 0xcbd0
+#define PWM_DECODER_14 0xcbb8
+#define PWM_DECODER_14_V520 0xcbda
+#define PWM_DECODER_15 0xcbc0
+#define PWM_DECODER_15_V520 0xcbe4
+#define PWM_USER_DATA 0xcbc8
+#define PWM_USER_DATA_V520 0xcbf0
+
+#define TOD_0 0xcbcc
+#define TOD_0_V520 0xcc00
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+#define TOD_CFG_V520 0x0001
+#define TOD_1 0xcbce
+#define TOD_1_V520 0xcc02
+#define TOD_2 0xcbd0
+#define TOD_2_V520 0xcc04
+#define TOD_3 0xcbd2
+#define TOD_3_V520 0xcc06
+
+#define TOD_WRITE_0 0xcc00
+#define TOD_WRITE_0_V520 0xcc10
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+#define TOD_WRITE_1 0xcc10
+#define TOD_WRITE_1_V520 0xcc20
+#define TOD_WRITE_2 0xcc20
+#define TOD_WRITE_2_V520 0xcc30
+#define TOD_WRITE_3 0xcc30
+#define TOD_WRITE_3_V520 0xcc40
+
+#define TOD_READ_PRIMARY_0 0xcc40
+#define TOD_READ_PRIMARY_0_V520 0xcc50
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+#define TOD_READ_PRIMARY_CMD_V520 0x000f
+#define TOD_READ_PRIMARY_1 0xcc50
+#define TOD_READ_PRIMARY_1_V520 0xcc60
+#define TOD_READ_PRIMARY_2 0xcc60
+#define TOD_READ_PRIMARY_2_V520 0xcc80
+#define TOD_READ_PRIMARY_3 0xcc80
+#define TOD_READ_PRIMARY_3_V520 0xcc90
+
+#define TOD_READ_SECONDARY_0 0xcc90
+#define TOD_READ_SECONDARY_0_V520 0xcca0
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_SECONDARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_SECONDARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_SECONDARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_SECONDARY_CMD 0x000e
+#define TOD_READ_SECONDARY_CMD_V520 0x000f
+
+#define TOD_READ_SECONDARY_1 0xcca0
+#define TOD_READ_SECONDARY_1_V520 0xccb0
+#define TOD_READ_SECONDARY_2 0xccb0
+#define TOD_READ_SECONDARY_2_V520 0xccc0
+#define TOD_READ_SECONDARY_3 0xccc0
+#define TOD_READ_SECONDARY_3_V520 0xccd0
+
+#define OUTPUT_TDC_CFG 0xccd0
+#define OUTPUT_TDC_CFG_V520 0xcce0
+#define OUTPUT_TDC_0 0xcd00
+#define OUTPUT_TDC_1 0xcd08
+#define OUTPUT_TDC_2 0xcd10
+#define OUTPUT_TDC_3 0xcd18
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+#define SCRATCH_V520 0xcf4c
+
+#define EEPROM 0xcf68
+#define EEPROM_V520 0xcf64
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the DPLL_MANU_REF_CFG register */
+#define MANUAL_REFERENCE_SHIFT (0)
+#define MANUAL_REFERENCE_MASK (0x1f)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+/* 4.8.7 */
+#define TOD_WRITE_TYPE_SHIFT (4)
+#define TOD_WRITE_TYPE_MASK (0x3)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
+#define COMBO_MASTER_HOLD BIT(0)
+
+/* Bit definitions for DPLL_SYS_STATUS register */
+#define DPLL_SYS_STATE_MASK (0xf)
+
+/* Bit definitions for SYS_APLL_STATUS register */
+#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
+#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
+#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
+
+/* Bit definitions for the DPLL0_STATUS register */
+#define DPLL_STATE_MASK (0xf)
+#define DPLL_STATE_SHIFT (0x0)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_PLL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_DISABLED = 6,
+ PLL_MODE_MAX = PLL_MODE_DISABLED,
+};
+
+/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
+enum manual_reference {
+ MANU_REF_MIN = 0,
+ MANU_REF_CLK0 = MANU_REF_MIN,
+ MANU_REF_CLK1,
+ MANU_REF_CLK2,
+ MANU_REF_CLK3,
+ MANU_REF_CLK4,
+ MANU_REF_CLK5,
+ MANU_REF_CLK6,
+ MANU_REF_CLK7,
+ MANU_REF_CLK8,
+ MANU_REF_CLK9,
+ MANU_REF_CLK10,
+ MANU_REF_CLK11,
+ MANU_REF_CLK12,
+ MANU_REF_CLK13,
+ MANU_REF_CLK14,
+ MANU_REF_CLK15,
+ MANU_REF_WRITE_PHASE,
+ MANU_REF_WRITE_FREQUENCY,
+ MANU_REF_XO_DPLL,
+ MANU_REF_MAX = MANU_REF_XO_DPLL,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+enum scsr_read_trig_sel {
+ /* CANCEL CURRENT TOD READ; MODULE BECOMES IDLE - NO TRIGGER OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_DISABLE = 0,
+ /* TRIGGER IMMEDIATELY */
+ SCSR_TOD_READ_TRIG_SEL_IMMEDIATE = 1,
+ /* TRIGGER ON RISING EDGE OF INTERNAL TOD PPS SIGNAL */
+ SCSR_TOD_READ_TRIG_SEL_TODPPS = 2,
+ /* TRGGER ON RISING EDGE OF SELECTED REFERENCE INPUT */
+ SCSR_TOD_READ_TRIG_SEL_REFCLK = 3,
+ /* TRIGGER ON RISING EDGE OF SELECTED PWM DECODER 1PPS OUTPUT */
+ SCSR_TOD_READ_TRIG_SEL_PWMPPS = 4,
+ SCSR_TOD_READ_TRIG_SEL_RESERVED = 5,
+ /* TRIGGER WHEN WRITE FREQUENCY EVENT OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_WRITEFREQUENCYEVENT = 6,
+ /* TRIGGER ON SELECTED GPIO */
+ SCSR_TOD_READ_TRIG_SEL_GPIO = 7,
+ SCSR_TOD_READ_TRIG_SEL_MAX = SCSR_TOD_READ_TRIG_SEL_GPIO,
+};
+
+/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKACQ = 1,
+ DPLL_STATE_LOCKREC = 2,
+ DPLL_STATE_LOCKED = 3,
+ DPLL_STATE_HOLDOVER = 4,
+ DPLL_STATE_OPEN_LOOP = 5,
+ DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_trig_sel {
+ SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
+ SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
+ SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
+ SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
+ SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
+ SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
+ SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_type_sel {
+ SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
+ SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+};
+#endif
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
new file mode 100644
index 000000000000..f0044b14136e
--- /dev/null
+++ b/include/linux/mfd/intel-m10-bmc.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel MAX 10 Board Management Controller chip.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation, Inc.
+ */
+#ifndef __MFD_INTEL_M10_BMC_H
+#define __MFD_INTEL_M10_BMC_H
+
+#include <linux/regmap.h>
+
+#define M10BMC_LEGACY_BUILD_VER 0x300468
+#define M10BMC_SYS_BASE 0x300800
+#define M10BMC_SYS_END 0x300fff
+#define M10BMC_FLASH_BASE 0x10000000
+#define M10BMC_FLASH_END 0x1fffffff
+#define M10BMC_MEM_END M10BMC_FLASH_END
+
+#define M10BMC_STAGING_BASE 0x18000000
+#define M10BMC_STAGING_SIZE 0x3800000
+
+/* Register offset of system registers */
+#define NIOS2_FW_VERSION 0x0
+#define M10BMC_MAC_LOW 0x10
+#define M10BMC_MAC_BYTE4 GENMASK(7, 0)
+#define M10BMC_MAC_BYTE3 GENMASK(15, 8)
+#define M10BMC_MAC_BYTE2 GENMASK(23, 16)
+#define M10BMC_MAC_BYTE1 GENMASK(31, 24)
+#define M10BMC_MAC_HIGH 0x14
+#define M10BMC_MAC_BYTE6 GENMASK(7, 0)
+#define M10BMC_MAC_BYTE5 GENMASK(15, 8)
+#define M10BMC_MAC_COUNT GENMASK(23, 16)
+#define M10BMC_TEST_REG 0x3c
+#define M10BMC_BUILD_VER 0x68
+#define M10BMC_VER_MAJOR_MSK GENMASK(23, 16)
+#define M10BMC_VER_PCB_INFO_MSK GENMASK(31, 24)
+#define M10BMC_VER_LEGACY_INVALID 0xffffffff
+
+/* Secure update doorbell register, in system register region */
+#define M10BMC_DOORBELL 0x400
+
+/* Authorization Result register, in system register region */
+#define M10BMC_AUTH_RESULT 0x404
+
+/* Doorbell register fields */
+#define DRBL_RSU_REQUEST BIT(0)
+#define DRBL_RSU_PROGRESS GENMASK(7, 4)
+#define DRBL_HOST_STATUS GENMASK(11, 8)
+#define DRBL_RSU_STATUS GENMASK(23, 16)
+#define DRBL_PKVL_EEPROM_LOAD_SEC BIT(24)
+#define DRBL_PKVL1_POLL_EN BIT(25)
+#define DRBL_PKVL2_POLL_EN BIT(26)
+#define DRBL_CONFIG_SEL BIT(28)
+#define DRBL_REBOOT_REQ BIT(29)
+#define DRBL_REBOOT_DISABLED BIT(30)
+
+/* Progress states */
+#define RSU_PROG_IDLE 0x0
+#define RSU_PROG_PREPARE 0x1
+#define RSU_PROG_READY 0x3
+#define RSU_PROG_AUTHENTICATING 0x4
+#define RSU_PROG_COPYING 0x5
+#define RSU_PROG_UPDATE_CANCEL 0x6
+#define RSU_PROG_PROGRAM_KEY_HASH 0x7
+#define RSU_PROG_RSU_DONE 0x8
+#define RSU_PROG_PKVL_PROM_DONE 0x9
+
+/* Device and error states */
+#define RSU_STAT_NORMAL 0x0
+#define RSU_STAT_TIMEOUT 0x1
+#define RSU_STAT_AUTH_FAIL 0x2
+#define RSU_STAT_COPY_FAIL 0x3
+#define RSU_STAT_FATAL 0x4
+#define RSU_STAT_PKVL_REJECT 0x5
+#define RSU_STAT_NON_INC 0x6
+#define RSU_STAT_ERASE_FAIL 0x7
+#define RSU_STAT_WEAROUT 0x8
+#define RSU_STAT_NIOS_OK 0x80
+#define RSU_STAT_USER_OK 0x81
+#define RSU_STAT_FACTORY_OK 0x82
+#define RSU_STAT_USER_FAIL 0x83
+#define RSU_STAT_FACTORY_FAIL 0x84
+#define RSU_STAT_NIOS_FLASH_ERR 0x85
+#define RSU_STAT_FPGA_FLASH_ERR 0x86
+
+#define HOST_STATUS_IDLE 0x0
+#define HOST_STATUS_WRITE_DONE 0x1
+#define HOST_STATUS_ABORT_RSU 0x2
+
+#define rsu_prog(doorbell) FIELD_GET(DRBL_RSU_PROGRESS, doorbell)
+#define rsu_stat(doorbell) FIELD_GET(DRBL_RSU_STATUS, doorbell)
+
+/* interval 100ms and timeout 5s */
+#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000)
+#define NIOS_HANDSHAKE_TIMEOUT_US (5 * 1000 * 1000)
+
+/* RSU PREP Timeout (2 minutes) to erase flash staging area */
+#define RSU_PREP_INTERVAL_MS 100
+#define RSU_PREP_TIMEOUT_MS (2 * 60 * 1000)
+
+/* RSU Complete Timeout (40 minutes) for full flash update */
+#define RSU_COMPLETE_INTERVAL_MS 1000
+#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000)
+
+/* Addresses for security related data in FLASH */
+#define BMC_REH_ADDR 0x17ffc004
+#define BMC_PROG_ADDR 0x17ffc000
+#define BMC_PROG_MAGIC 0x5746
+
+#define SR_REH_ADDR 0x17ffd004
+#define SR_PROG_ADDR 0x17ffd000
+#define SR_PROG_MAGIC 0x5253
+
+#define PR_REH_ADDR 0x17ffe004
+#define PR_PROG_ADDR 0x17ffe000
+#define PR_PROG_MAGIC 0x5250
+
+/* Address of 4KB inverted bit vector containing staging area FLASH count */
+#define STAGING_FLASH_COUNT 0x17ffb000
+
+/**
+ * struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
+ * @dev: this device
+ * @regmap: the regmap used to access registers by m10bmc itself
+ */
+struct intel_m10bmc {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+/*
+ * register access helper functions.
+ *
+ * m10bmc_raw_read - read m10bmc register per addr
+ * m10bmc_sys_read - read m10bmc system register per offset
+ */
+static inline int
+m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(m10bmc->regmap, addr, val);
+ if (ret)
+ dev_err(m10bmc->dev, "fail to read raw reg %x: %d\n",
+ addr, ret);
+
+ return ret;
+}
+
+/*
+ * The base of the system registers could be configured by HW developers, and
+ * in HW SPEC, the base is not added to the addresses of the system registers.
+ *
+ * This macro helps to simplify the accessing of the system registers. And if
+ * the base is reconfigured in HW, SW developers could simply change the
+ * M10BMC_SYS_BASE accordingly.
+ */
+#define m10bmc_sys_read(m10bmc, offset, val) \
+ m10bmc_raw_read(m10bmc, M10BMC_SYS_BASE + (offset), val)
+
+#endif /* __MFD_INTEL_M10_BMC_H */
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
deleted file mode 100644
index 317e8608cf41..000000000000
--- a/include/linux/mfd/intel_msic.h
+++ /dev/null
@@ -1,453 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Core interface for Intel MSIC
- *
- * Copyright (C) 2011, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#ifndef __LINUX_MFD_INTEL_MSIC_H__
-#define __LINUX_MFD_INTEL_MSIC_H__
-
-/* ID */
-#define INTEL_MSIC_ID0 0x000 /* RO */
-#define INTEL_MSIC_ID1 0x001 /* RO */
-
-/* IRQ */
-#define INTEL_MSIC_IRQLVL1 0x002
-#define INTEL_MSIC_ADC1INT 0x003
-#define INTEL_MSIC_CCINT 0x004
-#define INTEL_MSIC_PWRSRCINT 0x005
-#define INTEL_MSIC_PWRSRCINT1 0x006
-#define INTEL_MSIC_CHRINT 0x007
-#define INTEL_MSIC_CHRINT1 0x008
-#define INTEL_MSIC_RTCIRQ 0x009
-#define INTEL_MSIC_GPIO0LVIRQ 0x00a
-#define INTEL_MSIC_GPIO1LVIRQ 0x00b
-#define INTEL_MSIC_GPIOHVIRQ 0x00c
-#define INTEL_MSIC_VRINT 0x00d
-#define INTEL_MSIC_OCAUDIO 0x00e
-#define INTEL_MSIC_ACCDET 0x00f
-#define INTEL_MSIC_RESETIRQ1 0x010
-#define INTEL_MSIC_RESETIRQ2 0x011
-#define INTEL_MSIC_MADC1INT 0x012
-#define INTEL_MSIC_MCCINT 0x013
-#define INTEL_MSIC_MPWRSRCINT 0x014
-#define INTEL_MSIC_MPWRSRCINT1 0x015
-#define INTEL_MSIC_MCHRINT 0x016
-#define INTEL_MSIC_MCHRINT1 0x017
-#define INTEL_MSIC_RTCIRQMASK 0x018
-#define INTEL_MSIC_GPIO0LVIRQMASK 0x019
-#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a
-#define INTEL_MSIC_GPIOHVIRQMASK 0x01b
-#define INTEL_MSIC_VRINTMASK 0x01c
-#define INTEL_MSIC_OCAUDIOMASK 0x01d
-#define INTEL_MSIC_ACCDETMASK 0x01e
-#define INTEL_MSIC_RESETIRQ1MASK 0x01f
-#define INTEL_MSIC_RESETIRQ2MASK 0x020
-#define INTEL_MSIC_IRQLVL1MSK 0x021
-#define INTEL_MSIC_PBCONFIG 0x03e
-#define INTEL_MSIC_PBSTATUS 0x03f /* RO */
-
-/* GPIO */
-#define INTEL_MSIC_GPIO0LV7CTLO 0x040
-#define INTEL_MSIC_GPIO0LV6CTLO 0x041
-#define INTEL_MSIC_GPIO0LV5CTLO 0x042
-#define INTEL_MSIC_GPIO0LV4CTLO 0x043
-#define INTEL_MSIC_GPIO0LV3CTLO 0x044
-#define INTEL_MSIC_GPIO0LV2CTLO 0x045
-#define INTEL_MSIC_GPIO0LV1CTLO 0x046
-#define INTEL_MSIC_GPIO0LV0CTLO 0x047
-#define INTEL_MSIC_GPIO1LV7CTLOS 0x048
-#define INTEL_MSIC_GPIO1LV6CTLO 0x049
-#define INTEL_MSIC_GPIO1LV5CTLO 0x04a
-#define INTEL_MSIC_GPIO1LV4CTLO 0x04b
-#define INTEL_MSIC_GPIO1LV3CTLO 0x04c
-#define INTEL_MSIC_GPIO1LV2CTLO 0x04d
-#define INTEL_MSIC_GPIO1LV1CTLO 0x04e
-#define INTEL_MSIC_GPIO1LV0CTLO 0x04f
-#define INTEL_MSIC_GPIO0LV7CTLI 0x050
-#define INTEL_MSIC_GPIO0LV6CTLI 0x051
-#define INTEL_MSIC_GPIO0LV5CTLI 0x052
-#define INTEL_MSIC_GPIO0LV4CTLI 0x053
-#define INTEL_MSIC_GPIO0LV3CTLI 0x054
-#define INTEL_MSIC_GPIO0LV2CTLI 0x055
-#define INTEL_MSIC_GPIO0LV1CTLI 0x056
-#define INTEL_MSIC_GPIO0LV0CTLI 0x057
-#define INTEL_MSIC_GPIO1LV7CTLIS 0x058
-#define INTEL_MSIC_GPIO1LV6CTLI 0x059
-#define INTEL_MSIC_GPIO1LV5CTLI 0x05a
-#define INTEL_MSIC_GPIO1LV4CTLI 0x05b
-#define INTEL_MSIC_GPIO1LV3CTLI 0x05c
-#define INTEL_MSIC_GPIO1LV2CTLI 0x05d
-#define INTEL_MSIC_GPIO1LV1CTLI 0x05e
-#define INTEL_MSIC_GPIO1LV0CTLI 0x05f
-#define INTEL_MSIC_PWM0CLKDIV1 0x061
-#define INTEL_MSIC_PWM0CLKDIV0 0x062
-#define INTEL_MSIC_PWM1CLKDIV1 0x063
-#define INTEL_MSIC_PWM1CLKDIV0 0x064
-#define INTEL_MSIC_PWM2CLKDIV1 0x065
-#define INTEL_MSIC_PWM2CLKDIV0 0x066
-#define INTEL_MSIC_PWM0DUTYCYCLE 0x067
-#define INTEL_MSIC_PWM1DUTYCYCLE 0x068
-#define INTEL_MSIC_PWM2DUTYCYCLE 0x069
-#define INTEL_MSIC_GPIO0HV3CTLO 0x06d
-#define INTEL_MSIC_GPIO0HV2CTLO 0x06e
-#define INTEL_MSIC_GPIO0HV1CTLO 0x06f
-#define INTEL_MSIC_GPIO0HV0CTLO 0x070
-#define INTEL_MSIC_GPIO1HV3CTLO 0x071
-#define INTEL_MSIC_GPIO1HV2CTLO 0x072
-#define INTEL_MSIC_GPIO1HV1CTLO 0x073
-#define INTEL_MSIC_GPIO1HV0CTLO 0x074
-#define INTEL_MSIC_GPIO0HV3CTLI 0x075
-#define INTEL_MSIC_GPIO0HV2CTLI 0x076
-#define INTEL_MSIC_GPIO0HV1CTLI 0x077
-#define INTEL_MSIC_GPIO0HV0CTLI 0x078
-#define INTEL_MSIC_GPIO1HV3CTLI 0x079
-#define INTEL_MSIC_GPIO1HV2CTLI 0x07a
-#define INTEL_MSIC_GPIO1HV1CTLI 0x07b
-#define INTEL_MSIC_GPIO1HV0CTLI 0x07c
-
-/* SVID */
-#define INTEL_MSIC_SVIDCTRL0 0x080
-#define INTEL_MSIC_SVIDCTRL1 0x081
-#define INTEL_MSIC_SVIDCTRL2 0x082
-#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */
-#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087
-#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088
-#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089
-#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a
-#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b
-#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c
-#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */
-
-/* VREG */
-#define INTEL_MSIC_VCCLATCH 0x0c0
-#define INTEL_MSIC_VNNLATCH 0x0c1
-#define INTEL_MSIC_VCCCNT 0x0c2
-#define INTEL_MSIC_SMPSRAMP 0x0c3
-#define INTEL_MSIC_VNNCNT 0x0c4
-#define INTEL_MSIC_VNNAONCNT 0x0c5
-#define INTEL_MSIC_VCC122AONCNT 0x0c6
-#define INTEL_MSIC_V180AONCNT 0x0c7
-#define INTEL_MSIC_V500CNT 0x0c8
-#define INTEL_MSIC_VIHFCNT 0x0c9
-#define INTEL_MSIC_LDORAMP1 0x0ca
-#define INTEL_MSIC_LDORAMP2 0x0cb
-#define INTEL_MSIC_VCC108AONCNT 0x0cc
-#define INTEL_MSIC_VCC108ASCNT 0x0cd
-#define INTEL_MSIC_VCC108CNT 0x0ce
-#define INTEL_MSIC_VCCA100ASCNT 0x0cf
-#define INTEL_MSIC_VCCA100CNT 0x0d0
-#define INTEL_MSIC_VCC180AONCNT 0x0d1
-#define INTEL_MSIC_VCC180CNT 0x0d2
-#define INTEL_MSIC_VCC330CNT 0x0d3
-#define INTEL_MSIC_VUSB330CNT 0x0d4
-#define INTEL_MSIC_VCCSDIOCNT 0x0d5
-#define INTEL_MSIC_VPROG1CNT 0x0d6
-#define INTEL_MSIC_VPROG2CNT 0x0d7
-#define INTEL_MSIC_VEMMCSCNT 0x0d8
-#define INTEL_MSIC_VEMMC1CNT 0x0d9
-#define INTEL_MSIC_VEMMC2CNT 0x0da
-#define INTEL_MSIC_VAUDACNT 0x0db
-#define INTEL_MSIC_VHSPCNT 0x0dc
-#define INTEL_MSIC_VHSNCNT 0x0dd
-#define INTEL_MSIC_VHDMICNT 0x0de
-#define INTEL_MSIC_VOTGCNT 0x0df
-#define INTEL_MSIC_V1P35CNT 0x0e0
-#define INTEL_MSIC_V330AONCNT 0x0e1
-
-/* RESET */
-#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */
-#define INTEL_MSIC_ERCONFIG 0x101
-
-/* BURST */
-#define INTEL_MSIC_BATCURRENTLIMIT12 0x102
-#define INTEL_MSIC_BATTIMELIMIT12 0x103
-#define INTEL_MSIC_BATTIMELIMIT3 0x104
-#define INTEL_MSIC_BATTIMEDB 0x105
-#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106
-#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107
-#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108
-
-/* RTC */
-#define INTEL_MSIC_RTCB1 0x140 /* RO */
-#define INTEL_MSIC_RTCB2 0x141 /* RO */
-#define INTEL_MSIC_RTCB3 0x142 /* RO */
-#define INTEL_MSIC_RTCB4 0x143 /* RO */
-#define INTEL_MSIC_RTCOB1 0x144
-#define INTEL_MSIC_RTCOB2 0x145
-#define INTEL_MSIC_RTCOB3 0x146
-#define INTEL_MSIC_RTCOB4 0x147
-#define INTEL_MSIC_RTCAB1 0x148
-#define INTEL_MSIC_RTCAB2 0x149
-#define INTEL_MSIC_RTCAB3 0x14a
-#define INTEL_MSIC_RTCAB4 0x14b
-#define INTEL_MSIC_RTCWAB1 0x14c
-#define INTEL_MSIC_RTCWAB2 0x14d
-#define INTEL_MSIC_RTCWAB3 0x14e
-#define INTEL_MSIC_RTCWAB4 0x14f
-#define INTEL_MSIC_RTCSC1 0x150
-#define INTEL_MSIC_RTCSC2 0x151
-#define INTEL_MSIC_RTCSC3 0x152
-#define INTEL_MSIC_RTCSC4 0x153
-#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */
-#define INTEL_MSIC_RTCCONFIG1 0x155
-#define INTEL_MSIC_RTCCONFIG2 0x156
-
-/* CHARGER */
-#define INTEL_MSIC_BDTIMER 0x180
-#define INTEL_MSIC_BATTRMV 0x181
-#define INTEL_MSIC_VBUSDET 0x182
-#define INTEL_MSIC_VBUSDET1 0x183
-#define INTEL_MSIC_ADPHVDET 0x184
-#define INTEL_MSIC_ADPLVDET 0x185
-#define INTEL_MSIC_ADPDETDBDM 0x186
-#define INTEL_MSIC_LOWBATTDET 0x187
-#define INTEL_MSIC_CHRCTRL 0x188
-#define INTEL_MSIC_CHRCVOLTAGE 0x189
-#define INTEL_MSIC_CHRCCURRENT 0x18a
-#define INTEL_MSIC_SPCHARGER 0x18b
-#define INTEL_MSIC_CHRTTIME 0x18c
-#define INTEL_MSIC_CHRCTRL1 0x18d
-#define INTEL_MSIC_PWRSRCLMT 0x18e
-#define INTEL_MSIC_CHRSTWDT 0x18f
-#define INTEL_MSIC_WDTWRITE 0x190 /* WO */
-#define INTEL_MSIC_CHRSAFELMT 0x191
-#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */
-#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */
-#define INTEL_MSIC_CHRLEDPWM 0x194
-#define INTEL_MSIC_CHRLEDCTRL 0x195
-
-/* ADC */
-#define INTEL_MSIC_ADC1CNTL1 0x1c0
-#define INTEL_MSIC_ADC1CNTL2 0x1c1
-#define INTEL_MSIC_ADC1CNTL3 0x1c2
-#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */
-#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */
-#define INTEL_MSIC_ADC1ADDR0 0x1c5
-#define INTEL_MSIC_ADC1ADDR1 0x1c6
-#define INTEL_MSIC_ADC1ADDR2 0x1c7
-#define INTEL_MSIC_ADC1ADDR3 0x1c8
-#define INTEL_MSIC_ADC1ADDR4 0x1c9
-#define INTEL_MSIC_ADC1ADDR5 0x1ca
-#define INTEL_MSIC_ADC1ADDR6 0x1cb
-#define INTEL_MSIC_ADC1ADDR7 0x1cc
-#define INTEL_MSIC_ADC1ADDR8 0x1cd
-#define INTEL_MSIC_ADC1ADDR9 0x1ce
-#define INTEL_MSIC_ADC1ADDR10 0x1cf
-#define INTEL_MSIC_ADC1ADDR11 0x1d0
-#define INTEL_MSIC_ADC1ADDR12 0x1d1
-#define INTEL_MSIC_ADC1ADDR13 0x1d2
-#define INTEL_MSIC_ADC1ADDR14 0x1d3
-#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */
-#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */
-#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */
-#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */
-#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */
-#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */
-#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */
-#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */
-#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */
-#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */
-#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */
-#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */
-#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */
-#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */
-#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */
-#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */
-#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */
-#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */
-#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */
-#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */
-#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */
-#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */
-#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */
-#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */
-#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */
-#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */
-#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */
-#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */
-#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */
-#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */
-#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */
-#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */
-#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */
-#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */
-#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */
-#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */
-#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */
-#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */
-#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */
-#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */
-#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */
-#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */
-#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */
-#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */
-#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */
-#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */
-#define INTEL_MSIC_CCCNTL 0x202
-#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */
-#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */
-#define INTEL_MSIC_CCADCHA 0x205 /* RO */
-#define INTEL_MSIC_CCADCLA 0x206 /* RO */
-
-/* AUDIO */
-#define INTEL_MSIC_AUDPLLCTRL 0x240
-#define INTEL_MSIC_DMICBUF0123 0x241
-#define INTEL_MSIC_DMICBUF45 0x242
-#define INTEL_MSIC_DMICGPO 0x244
-#define INTEL_MSIC_DMICMUX 0x245
-#define INTEL_MSIC_DMICCLK 0x246
-#define INTEL_MSIC_MICBIAS 0x247
-#define INTEL_MSIC_ADCCONFIG 0x248
-#define INTEL_MSIC_MICAMP1 0x249
-#define INTEL_MSIC_MICAMP2 0x24a
-#define INTEL_MSIC_NOISEMUX 0x24b
-#define INTEL_MSIC_AUDIOMUX12 0x24c
-#define INTEL_MSIC_AUDIOMUX34 0x24d
-#define INTEL_MSIC_AUDIOSINC 0x24e
-#define INTEL_MSIC_AUDIOTXEN 0x24f
-#define INTEL_MSIC_HSEPRXCTRL 0x250
-#define INTEL_MSIC_IHFRXCTRL 0x251
-#define INTEL_MSIC_VOICETXVOL 0x252
-#define INTEL_MSIC_SIDETONEVOL 0x253
-#define INTEL_MSIC_MUSICSHARVOL 0x254
-#define INTEL_MSIC_VOICETXCTRL 0x255
-#define INTEL_MSIC_HSMIXER 0x256
-#define INTEL_MSIC_DACCONFIG 0x257
-#define INTEL_MSIC_SOFTMUTE 0x258
-#define INTEL_MSIC_HSLVOLCTRL 0x259
-#define INTEL_MSIC_HSRVOLCTRL 0x25a
-#define INTEL_MSIC_IHFLVOLCTRL 0x25b
-#define INTEL_MSIC_IHFRVOLCTRL 0x25c
-#define INTEL_MSIC_DRIVEREN 0x25d
-#define INTEL_MSIC_LINEOUTCTRL 0x25e
-#define INTEL_MSIC_VIB1CTRL1 0x25f
-#define INTEL_MSIC_VIB1CTRL2 0x260
-#define INTEL_MSIC_VIB1CTRL3 0x261
-#define INTEL_MSIC_VIB1SPIPCM_1 0x262
-#define INTEL_MSIC_VIB1SPIPCM_2 0x263
-#define INTEL_MSIC_VIB1CTRL5 0x264
-#define INTEL_MSIC_VIB2CTRL1 0x265
-#define INTEL_MSIC_VIB2CTRL2 0x266
-#define INTEL_MSIC_VIB2CTRL3 0x267
-#define INTEL_MSIC_VIB2SPIPCM_1 0x268
-#define INTEL_MSIC_VIB2SPIPCM_2 0x269
-#define INTEL_MSIC_VIB2CTRL5 0x26a
-#define INTEL_MSIC_BTNCTRL1 0x26b
-#define INTEL_MSIC_BTNCTRL2 0x26c
-#define INTEL_MSIC_PCM1TXSLOT01 0x26d
-#define INTEL_MSIC_PCM1TXSLOT23 0x26e
-#define INTEL_MSIC_PCM1TXSLOT45 0x26f
-#define INTEL_MSIC_PCM1RXSLOT0123 0x270
-#define INTEL_MSIC_PCM1RXSLOT045 0x271
-#define INTEL_MSIC_PCM2TXSLOT01 0x272
-#define INTEL_MSIC_PCM2TXSLOT23 0x273
-#define INTEL_MSIC_PCM2TXSLOT45 0x274
-#define INTEL_MSIC_PCM2RXSLOT01 0x275
-#define INTEL_MSIC_PCM2RXSLOT23 0x276
-#define INTEL_MSIC_PCM2RXSLOT45 0x277
-#define INTEL_MSIC_PCM1CTRL1 0x278
-#define INTEL_MSIC_PCM1CTRL2 0x279
-#define INTEL_MSIC_PCM1CTRL3 0x27a
-#define INTEL_MSIC_PCM2CTRL1 0x27b
-#define INTEL_MSIC_PCM2CTRL2 0x27c
-
-/* HDMI */
-#define INTEL_MSIC_HDMIPUEN 0x280
-#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */
-
-/* Physical address of the start of the MSIC interrupt tree in SRAM */
-#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0
-
-/**
- * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
- * @gpio_base: base number for the GPIOs
- */
-struct intel_msic_gpio_pdata {
- unsigned gpio_base;
-};
-
-/**
- * struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver
- * @gpio: GPIO number used for OCD interrupts
- *
- * The MSIC MFD driver converts @gpio into an IRQ number and passes it to
- * the OCD driver as %IORESOURCE_IRQ.
- */
-struct intel_msic_ocd_pdata {
- unsigned gpio;
-};
-
-/* MSIC embedded blocks (subdevices) */
-enum intel_msic_block {
- INTEL_MSIC_BLOCK_TOUCH,
- INTEL_MSIC_BLOCK_ADC,
- INTEL_MSIC_BLOCK_BATTERY,
- INTEL_MSIC_BLOCK_GPIO,
- INTEL_MSIC_BLOCK_AUDIO,
- INTEL_MSIC_BLOCK_HDMI,
- INTEL_MSIC_BLOCK_THERMAL,
- INTEL_MSIC_BLOCK_POWER_BTN,
- INTEL_MSIC_BLOCK_OCD,
-
- INTEL_MSIC_BLOCK_LAST,
-};
-
-/**
- * struct intel_msic_platform_data - platform data for the MSIC driver
- * @irq: array of interrupt numbers, one per device. If @irq is set to %0
- * for a given block, the corresponding platform device is not
- * created. For devices which don't have an interrupt, use %0xff
- * (this is same as in SFI spec).
- * @gpio: platform data for the MSIC GPIO driver
- * @ocd: platform data for the MSIC OCD driver
- *
- * Once the MSIC driver is initialized, the register interface is ready to
- * use. All the platform devices for subdevices are created after the
- * register interface is ready so that we can guarantee its availability to
- * the subdevice drivers.
- *
- * Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ
- * resources of the created platform device.
- */
-struct intel_msic_platform_data {
- int irq[INTEL_MSIC_BLOCK_LAST];
- struct intel_msic_gpio_pdata *gpio;
- struct intel_msic_ocd_pdata *ocd;
-};
-
-struct intel_msic;
-
-extern int intel_msic_reg_read(unsigned short reg, u8 *val);
-extern int intel_msic_reg_write(unsigned short reg, u8 val);
-extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask);
-extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count);
-extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count);
-
-/*
- * pdev_to_intel_msic - gets an MSIC instance from the platform device
- * @pdev: platform device pointer
- *
- * The client drivers need to have pointer to the MSIC instance if they
- * want to call intel_msic_irq_read(). This macro can be used for
- * convenience to get the MSIC pointer from @pdev where needed. This is
- * _only_ valid for devices which are managed by the MSIC.
- */
-#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent))
-
-extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg,
- u8 *val);
-
-#endif /* __LINUX_MFD_INTEL_MSIC_H__ */
diff --git a/include/linux/mfd/intel_pmc_bxt.h b/include/linux/mfd/intel_pmc_bxt.h
new file mode 100644
index 000000000000..f51a43d25ffd
--- /dev/null
+++ b/include/linux/mfd/intel_pmc_bxt.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MFD_INTEL_PMC_BXT_H
+#define MFD_INTEL_PMC_BXT_H
+
+/* GCR reg offsets from GCR base */
+#define PMC_GCR_PMC_CFG_REG 0x08
+#define PMC_GCR_TELEM_DEEP_S0IX_REG 0x78
+#define PMC_GCR_TELEM_SHLW_S0IX_REG 0x80
+
+/* PMC_CFG_REG bit masks */
+#define PMC_CFG_NO_REBOOT_EN BIT(4)
+
+/**
+ * struct intel_pmc_dev - Intel PMC device structure
+ * @dev: Pointer to the parent PMC device
+ * @scu: Pointer to the SCU IPC device data structure
+ * @gcr_mem_base: Virtual base address of GCR (Global Configuration Registers)
+ * @gcr_lock: Lock used to serialize access to GCR registers
+ * @telem_base: Pointer to telemetry SSRAM base resource or %NULL if not
+ * available
+ */
+struct intel_pmc_dev {
+ struct device *dev;
+ struct intel_scu_ipc_dev *scu;
+ void __iomem *gcr_mem_base;
+ spinlock_t gcr_lock;
+ struct resource *telem_base;
+};
+
+#if IS_ENABLED(CONFIG_MFD_INTEL_PMC_BXT)
+int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset, u64 *data);
+int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset, u32 mask, u32 val);
+int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data);
+#else
+static inline int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset,
+ u64 *data)
+{
+ return -ENOTSUPP;
+}
+
+static inline int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset,
+ u32 mask, u32 val)
+{
+ return -ENOTSUPP;
+}
+
+static inline int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif /* MFD_INTEL_PMC_BXT_H */
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index bfecd6bd4990..945bde1fe55c 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -13,6 +13,27 @@
#include <linux/regmap.h>
+enum intel_cht_wc_models {
+ INTEL_CHT_WC_UNKNOWN,
+ INTEL_CHT_WC_GPD_WIN_POCKET,
+ INTEL_CHT_WC_XIAOMI_MIPAD2,
+ INTEL_CHT_WC_LENOVO_YOGABOOK1,
+};
+
+/**
+ * struct intel_soc_pmic - Intel SoC PMIC data
+ * @irq: Master interrupt number of the parent PMIC device
+ * @regmap: Pointer to the parent PMIC device regmap structure
+ * @irq_chip_data: IRQ chip data for the PMIC itself
+ * @irq_chip_data_pwrbtn: Chained IRQ chip data for the Power Button
+ * @irq_chip_data_tmu: Chained IRQ chip data for the Time Management Unit
+ * @irq_chip_data_bcu: Chained IRQ chip data for the Burst Control Unit
+ * @irq_chip_data_adc: Chained IRQ chip data for the General Purpose ADC
+ * @irq_chip_data_chgr: Chained IRQ chip data for the External Charger
+ * @irq_chip_data_crit: Chained IRQ chip data for the Critical Event Handler
+ * @dev: Pointer to the parent PMIC device
+ * @scu: Pointer to the SCU IPC device data structure
+ */
struct intel_soc_pmic {
int irq;
struct regmap *regmap;
@@ -24,6 +45,8 @@ struct intel_soc_pmic {
struct regmap_irq_chip_data *irq_chip_data_chgr;
struct regmap_irq_chip_data *irq_chip_data_crit;
struct device *dev;
+ struct intel_scu_ipc_dev *scu;
+ enum intel_cht_wc_models cht_wc_model;
};
int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h
index ee48a4321c57..d5caa4c86ecc 100644
--- a/include/linux/mfd/ipaq-micro.h
+++ b/include/linux/mfd/ipaq-micro.h
@@ -75,8 +75,8 @@ struct ipaq_micro_rxdev {
* @id: 4-bit ID of the message
* @tx_len: length of TX data
* @tx_data: TX data to send
- * @rx_len: length of receieved RX data
- * @rx_data: RX data to recieve
+ * @rx_len: length of received RX data
+ * @rx_data: RX data to receive
* @ack: a completion that will be completed when RX is complete
* @node: list node if message gets queued
*/
diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h
new file mode 100644
index 000000000000..ffc86010af74
--- /dev/null
+++ b/include/linux/mfd/iqs62x.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Azoteq IQS620A/621/622/624/625 Multi-Function Sensors
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#ifndef __LINUX_MFD_IQS62X_H
+#define __LINUX_MFD_IQS62X_H
+
+#define IQS620_PROD_NUM 0x41
+#define IQS621_PROD_NUM 0x46
+#define IQS622_PROD_NUM 0x42
+#define IQS624_PROD_NUM 0x43
+#define IQS625_PROD_NUM 0x4E
+
+#define IQS620_HW_NUM_V0 0x82
+#define IQS620_HW_NUM_V1 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V2 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V3 0x92
+
+#define IQS621_ALS_FLAGS 0x16
+#define IQS622_ALS_FLAGS 0x14
+
+#define IQS624_HALL_UI 0x70
+#define IQS624_HALL_UI_WHL_EVENT BIT(4)
+#define IQS624_HALL_UI_INT_EVENT BIT(3)
+#define IQS624_HALL_UI_AUTO_CAL BIT(2)
+
+#define IQS624_INTERVAL_DIV 0x7D
+
+#define IQS620_GLBL_EVENT_MASK 0xD7
+#define IQS620_GLBL_EVENT_MASK_PMU BIT(6)
+
+#define IQS62X_NUM_KEYS 16
+#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 6)
+
+#define IQS62X_EVENT_SIZE 10
+
+enum iqs62x_ui_sel {
+ IQS62X_UI_PROX,
+ IQS62X_UI_SAR1,
+};
+
+enum iqs62x_event_reg {
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_SYS,
+ IQS62X_EVENT_PROX,
+ IQS62X_EVENT_HYST,
+ IQS62X_EVENT_HALL,
+ IQS62X_EVENT_ALS,
+ IQS62X_EVENT_IR,
+ IQS62X_EVENT_WHEEL,
+ IQS62X_EVENT_INTER,
+ IQS62X_EVENT_UI_LO,
+ IQS62X_EVENT_UI_HI,
+};
+
+enum iqs62x_event_flag {
+ /* keys */
+ IQS62X_EVENT_PROX_CH0_T,
+ IQS62X_EVENT_PROX_CH0_P,
+ IQS62X_EVENT_PROX_CH1_T,
+ IQS62X_EVENT_PROX_CH1_P,
+ IQS62X_EVENT_PROX_CH2_T,
+ IQS62X_EVENT_PROX_CH2_P,
+ IQS62X_EVENT_HYST_POS_T,
+ IQS62X_EVENT_HYST_POS_P,
+ IQS62X_EVENT_HYST_NEG_T,
+ IQS62X_EVENT_HYST_NEG_P,
+ IQS62X_EVENT_SAR1_ACT,
+ IQS62X_EVENT_SAR1_QRD,
+ IQS62X_EVENT_SAR1_MOVE,
+ IQS62X_EVENT_SAR1_HALT,
+ IQS62X_EVENT_WHEEL_UP,
+ IQS62X_EVENT_WHEEL_DN,
+
+ /* switches */
+ IQS62X_EVENT_HALL_N_T,
+ IQS62X_EVENT_HALL_N_P,
+ IQS62X_EVENT_HALL_S_T,
+ IQS62X_EVENT_HALL_S_P,
+
+ /* everything else */
+ IQS62X_EVENT_SYS_RESET,
+ IQS62X_EVENT_SYS_ATI,
+};
+
+struct iqs62x_event_data {
+ u16 ui_data;
+ u8 als_flags;
+ u8 ir_flags;
+ u8 interval;
+};
+
+struct iqs62x_event_desc {
+ enum iqs62x_event_reg reg;
+ u8 mask;
+ u8 val;
+};
+
+struct iqs62x_dev_desc {
+ const char *dev_name;
+ const struct mfd_cell *sub_devs;
+ int num_sub_devs;
+ u8 prod_num;
+ u8 sw_num;
+ const u8 *cal_regs;
+ int num_cal_regs;
+ u8 prox_mask;
+ u8 sar_mask;
+ u8 hall_mask;
+ u8 hyst_mask;
+ u8 temp_mask;
+ u8 als_mask;
+ u8 ir_mask;
+ u8 prox_settings;
+ u8 als_flags;
+ u8 hall_flags;
+ u8 hyst_shift;
+ u8 interval;
+ u8 interval_div;
+ const char *fw_name;
+ const enum iqs62x_event_reg (*event_regs)[IQS62X_EVENT_SIZE];
+};
+
+struct iqs62x_core {
+ const struct iqs62x_dev_desc *dev_desc;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct blocking_notifier_head nh;
+ struct list_head fw_blk_head;
+ struct completion ati_done;
+ struct completion fw_done;
+ enum iqs62x_ui_sel ui_sel;
+ unsigned long event_cache;
+ u8 sw_num;
+ u8 hw_num;
+};
+
+extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS];
+
+#endif /* __LINUX_MFD_IQS62X_H */
diff --git a/include/linux/mfd/khadas-mcu.h b/include/linux/mfd/khadas-mcu.h
new file mode 100644
index 000000000000..a99ba2ed0e4e
--- /dev/null
+++ b/include/linux/mfd/khadas-mcu.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Khadas System control Microcontroller Register map
+ *
+ * Copyright (C) 2020 BayLibre SAS
+ *
+ * Author(s): Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef MFD_KHADAS_MCU_H
+#define MFD_KHADAS_MCU_H
+
+#define KHADAS_MCU_PASSWD_VEN_0_REG 0x00 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_1_REG 0x01 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_2_REG 0x02 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_3_REG 0x03 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_4_REG 0x04 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_5_REG 0x05 /* RO */
+#define KHADAS_MCU_MAC_0_REG 0x06 /* RO */
+#define KHADAS_MCU_MAC_1_REG 0x07 /* RO */
+#define KHADAS_MCU_MAC_2_REG 0x08 /* RO */
+#define KHADAS_MCU_MAC_3_REG 0x09 /* RO */
+#define KHADAS_MCU_MAC_4_REG 0x0a /* RO */
+#define KHADAS_MCU_MAC_5_REG 0x0b /* RO */
+#define KHADAS_MCU_USID_0_REG 0x0c /* RO */
+#define KHADAS_MCU_USID_1_REG 0x0d /* RO */
+#define KHADAS_MCU_USID_2_REG 0x0e /* RO */
+#define KHADAS_MCU_USID_3_REG 0x0f /* RO */
+#define KHADAS_MCU_USID_4_REG 0x10 /* RO */
+#define KHADAS_MCU_USID_5_REG 0x11 /* RO */
+#define KHADAS_MCU_VERSION_0_REG 0x12 /* RO */
+#define KHADAS_MCU_VERSION_1_REG 0x13 /* RO */
+#define KHADAS_MCU_DEVICE_NO_0_REG 0x14 /* RO */
+#define KHADAS_MCU_DEVICE_NO_1_REG 0x15 /* RO */
+#define KHADAS_MCU_FACTORY_TEST_REG 0x16 /* R */
+#define KHADAS_MCU_BOOT_MODE_REG 0x20 /* RW */
+#define KHADAS_MCU_BOOT_EN_WOL_REG 0x21 /* RW */
+#define KHADAS_MCU_BOOT_EN_RTC_REG 0x22 /* RW */
+#define KHADAS_MCU_BOOT_EN_EXP_REG 0x23 /* RW */
+#define KHADAS_MCU_BOOT_EN_IR_REG 0x24 /* RW */
+#define KHADAS_MCU_BOOT_EN_DCIN_REG 0x25 /* RW */
+#define KHADAS_MCU_BOOT_EN_KEY_REG 0x26 /* RW */
+#define KHADAS_MCU_KEY_MODE_REG 0x27 /* RW */
+#define KHADAS_MCU_LED_MODE_ON_REG 0x28 /* RW */
+#define KHADAS_MCU_LED_MODE_OFF_REG 0x29 /* RW */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_REG 0x2c /* RW */
+#define KHADAS_MCU_MAC_SWITCH_REG 0x2d /* RW */
+#define KHADAS_MCU_MCU_SLEEP_MODE_REG 0x2e /* RW */
+#define KHADAS_MCU_IR_CODE1_0_REG 0x2f /* RW */
+#define KHADAS_MCU_IR_CODE1_1_REG 0x30 /* RW */
+#define KHADAS_MCU_IR_CODE1_2_REG 0x31 /* RW */
+#define KHADAS_MCU_IR_CODE1_3_REG 0x32 /* RW */
+#define KHADAS_MCU_USB_PCIE_SWITCH_REG 0x33 /* RW */
+#define KHADAS_MCU_IR_CODE2_0_REG 0x34 /* RW */
+#define KHADAS_MCU_IR_CODE2_1_REG 0x35 /* RW */
+#define KHADAS_MCU_IR_CODE2_2_REG 0x36 /* RW */
+#define KHADAS_MCU_IR_CODE2_3_REG 0x37 /* RW */
+#define KHADAS_MCU_PASSWD_USER_0_REG 0x40 /* RW */
+#define KHADAS_MCU_PASSWD_USER_1_REG 0x41 /* RW */
+#define KHADAS_MCU_PASSWD_USER_2_REG 0x42 /* RW */
+#define KHADAS_MCU_PASSWD_USER_3_REG 0x43 /* RW */
+#define KHADAS_MCU_PASSWD_USER_4_REG 0x44 /* RW */
+#define KHADAS_MCU_PASSWD_USER_5_REG 0x45 /* RW */
+#define KHADAS_MCU_USER_DATA_0_REG 0x46 /* RW 56 bytes */
+#define KHADAS_MCU_PWR_OFF_CMD_REG 0x80 /* WO */
+#define KHADAS_MCU_PASSWD_START_REG 0x81 /* WO */
+#define KHADAS_MCU_CHECK_VEN_PASSWD_REG 0x82 /* WO */
+#define KHADAS_MCU_CHECK_USER_PASSWD_REG 0x83 /* WO */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG 0x86 /* RO */
+#define KHADAS_MCU_WOL_INIT_START_REG 0x87 /* WO */
+#define KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG 0x88 /* WO */
+
+enum {
+ KHADAS_BOARD_VIM1 = 0x1,
+ KHADAS_BOARD_VIM2,
+ KHADAS_BOARD_VIM3,
+ KHADAS_BOARD_EDGE = 0x11,
+ KHADAS_BOARD_EDGE_V,
+};
+
+/**
+ * struct khadas_mcu - Khadas MCU structure
+ * @device: device reference used for logs
+ * @regmap: register map
+ */
+struct khadas_mcu {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#endif /* MFD_KHADAS_MCU_H */
diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h
index edbec8350a49..fe8174cc8637 100644
--- a/include/linux/mfd/lp873x.h
+++ b/include/linux/mfd/lp873x.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions to access LP873X power management chip.
*
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_LP873X_H
diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h
index ce965354bbad..4c895072d91b 100644
--- a/include/linux/mfd/lp87565.h
+++ b/include/linux/mfd/lp87565.h
@@ -2,7 +2,7 @@
/*
* Functions to access LP87565 power management chip.
*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_LP87565_H
@@ -14,6 +14,7 @@
enum lp87565_device_type {
LP87565_DEVICE_TYPE_UNKNOWN = 0,
+ LP87565_DEVICE_TYPE_LP87524_Q1,
LP87565_DEVICE_TYPE_LP87561_Q1,
LP87565_DEVICE_TYPE_LP87565_Q1,
};
@@ -221,34 +222,20 @@ enum lp87565_device_type {
#define LP87565_GPIO2_SEL BIT(1)
#define LP87565_GPIO1_SEL BIT(0)
-#define LP87565_GOIO3_OD BIT(6)
-#define LP87565_GOIO2_OD BIT(5)
-#define LP87565_GOIO1_OD BIT(4)
-#define LP87565_GOIO3_DIR BIT(2)
-#define LP87565_GOIO2_DIR BIT(1)
-#define LP87565_GOIO1_DIR BIT(0)
-
-#define LP87565_GOIO3_IN BIT(2)
-#define LP87565_GOIO2_IN BIT(1)
-#define LP87565_GOIO1_IN BIT(0)
-
-#define LP87565_GOIO3_OUT BIT(2)
-#define LP87565_GOIO2_OUT BIT(1)
-#define LP87565_GOIO1_OUT BIT(0)
-
-/* Number of step-down converters available */
-#define LP87565_NUM_BUCK 6
-
-enum LP87565_regulator_id {
- /* BUCK's */
- LP87565_BUCK_0,
- LP87565_BUCK_1,
- LP87565_BUCK_2,
- LP87565_BUCK_3,
- LP87565_BUCK_10,
- LP87565_BUCK_23,
- LP87565_BUCK_3210,
-};
+#define LP87565_GPIO3_OD BIT(6)
+#define LP87565_GPIO2_OD BIT(5)
+#define LP87565_GPIO1_OD BIT(4)
+#define LP87565_GPIO3_DIR BIT(2)
+#define LP87565_GPIO2_DIR BIT(1)
+#define LP87565_GPIO1_DIR BIT(0)
+
+#define LP87565_GPIO3_IN BIT(2)
+#define LP87565_GPIO2_IN BIT(1)
+#define LP87565_GPIO1_IN BIT(0)
+
+#define LP87565_GPIO3_OUT BIT(2)
+#define LP87565_GPIO2_OUT BIT(1)
+#define LP87565_GPIO1_OUT BIT(0)
/**
* struct LP87565 - state holder for the LP87565 driver
@@ -265,5 +252,6 @@ struct lp87565 {
u8 rev;
u8 dev_type;
struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
};
#endif /* __LINUX_MFD_LP87565_H */
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 6ddca2bbb3a8..ea4a4b1b246a 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -8,7 +8,7 @@
#ifndef LPC_ICH_H
#define LPC_ICH_H
-#include <linux/platform_data/intel-spi.h>
+#include <linux/platform_data/x86/spi-intel.h>
/* GPIO resources */
#define ICH_RES_GPIO 0
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index ad2c138105d4..03a8a788424a 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -186,6 +186,7 @@ struct madera {
struct regulator_bulk_data core_supplies[MADERA_MAX_CORE_SUPPLIES];
struct regulator *dcvdd;
bool internal_dcvdd;
+ bool reset_errata;
struct madera_pdata pdata;
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index fa9595dd42ba..32e3470708ed 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -21,7 +21,6 @@
struct gpio_desc;
struct pinctrl_map;
-struct madera_codec_pdata;
/**
* struct madera_pdata - Configuration data for Madera devices
@@ -32,7 +31,7 @@ struct madera_codec_pdata;
* @irq_flags: Mode for primary IRQ (defaults to active low)
* @gpio_base: Base GPIO number
* @gpio_configs: Array of GPIO configurations (See
- * Documentation/driver-api/pinctl.rst)
+ * Documentation/driver-api/pin-control.rst)
* @n_gpio_configs: Number of entries in gpio_configs
* @gpsw: General purpose switch mode setting. Depends on the external
* hardware connected to the switch. (See the SW1_MODE field
diff --git a/include/linux/mfd/madera/registers.h b/include/linux/mfd/madera/registers.h
index fe909d177762..b44aeb461d0c 100644
--- a/include/linux/mfd/madera/registers.h
+++ b/include/linux/mfd/madera/registers.h
@@ -1286,566 +1286,438 @@
/* (0x0000) Software_Reset */
#define MADERA_SW_RST_DEV_ID1_MASK 0xFFFF
#define MADERA_SW_RST_DEV_ID1_SHIFT 0
-#define MADERA_SW_RST_DEV_ID1_WIDTH 16
/* (0x0001) Hardware_Revision */
#define MADERA_HW_REVISION_MASK 0x00FF
#define MADERA_HW_REVISION_SHIFT 0
-#define MADERA_HW_REVISION_WIDTH 8
/* (0x0020) Tone_Generator_1 */
#define MADERA_TONE2_ENA 0x0002
#define MADERA_TONE2_ENA_MASK 0x0002
#define MADERA_TONE2_ENA_SHIFT 1
-#define MADERA_TONE2_ENA_WIDTH 1
#define MADERA_TONE1_ENA 0x0001
#define MADERA_TONE1_ENA_MASK 0x0001
#define MADERA_TONE1_ENA_SHIFT 0
-#define MADERA_TONE1_ENA_WIDTH 1
/* (0x0021) Tone_Generator_2 */
#define MADERA_TONE1_LVL_0_MASK 0xFFFF
#define MADERA_TONE1_LVL_0_SHIFT 0
-#define MADERA_TONE1_LVL_0_WIDTH 16
/* (0x0022) Tone_Generator_3 */
#define MADERA_TONE1_LVL_MASK 0x00FF
#define MADERA_TONE1_LVL_SHIFT 0
-#define MADERA_TONE1_LVL_WIDTH 8
/* (0x0023) Tone_Generator_4 */
#define MADERA_TONE2_LVL_0_MASK 0xFFFF
#define MADERA_TONE2_LVL_0_SHIFT 0
-#define MADERA_TONE2_LVL_0_WIDTH 16
/* (0x0024) Tone_Generator_5 */
#define MADERA_TONE2_LVL_MASK 0x00FF
#define MADERA_TONE2_LVL_SHIFT 0
-#define MADERA_TONE2_LVL_WIDTH 8
/* (0x0030) PWM_Drive_1 */
#define MADERA_PWM2_ENA 0x0002
#define MADERA_PWM2_ENA_MASK 0x0002
#define MADERA_PWM2_ENA_SHIFT 1
-#define MADERA_PWM2_ENA_WIDTH 1
#define MADERA_PWM1_ENA 0x0001
#define MADERA_PWM1_ENA_MASK 0x0001
#define MADERA_PWM1_ENA_SHIFT 0
-#define MADERA_PWM1_ENA_WIDTH 1
/* (0x00A0) Comfort_Noise_Generator */
#define MADERA_NOISE_GEN_ENA 0x0020
#define MADERA_NOISE_GEN_ENA_MASK 0x0020
#define MADERA_NOISE_GEN_ENA_SHIFT 5
-#define MADERA_NOISE_GEN_ENA_WIDTH 1
#define MADERA_NOISE_GEN_GAIN_MASK 0x001F
#define MADERA_NOISE_GEN_GAIN_SHIFT 0
-#define MADERA_NOISE_GEN_GAIN_WIDTH 5
/* (0x0100) Clock_32k_1 */
#define MADERA_CLK_32K_ENA 0x0040
#define MADERA_CLK_32K_ENA_MASK 0x0040
#define MADERA_CLK_32K_ENA_SHIFT 6
-#define MADERA_CLK_32K_ENA_WIDTH 1
#define MADERA_CLK_32K_SRC_MASK 0x0003
#define MADERA_CLK_32K_SRC_SHIFT 0
-#define MADERA_CLK_32K_SRC_WIDTH 2
/* (0x0101) System_Clock_1 */
#define MADERA_SYSCLK_FRAC 0x8000
#define MADERA_SYSCLK_FRAC_MASK 0x8000
#define MADERA_SYSCLK_FRAC_SHIFT 15
-#define MADERA_SYSCLK_FRAC_WIDTH 1
#define MADERA_SYSCLK_FREQ_MASK 0x0700
#define MADERA_SYSCLK_FREQ_SHIFT 8
-#define MADERA_SYSCLK_FREQ_WIDTH 3
#define MADERA_SYSCLK_ENA 0x0040
#define MADERA_SYSCLK_ENA_MASK 0x0040
#define MADERA_SYSCLK_ENA_SHIFT 6
-#define MADERA_SYSCLK_ENA_WIDTH 1
#define MADERA_SYSCLK_SRC_MASK 0x000F
#define MADERA_SYSCLK_SRC_SHIFT 0
-#define MADERA_SYSCLK_SRC_WIDTH 4
/* (0x0102) Sample_rate_1 */
#define MADERA_SAMPLE_RATE_1_MASK 0x001F
#define MADERA_SAMPLE_RATE_1_SHIFT 0
-#define MADERA_SAMPLE_RATE_1_WIDTH 5
/* (0x0103) Sample_rate_2 */
#define MADERA_SAMPLE_RATE_2_MASK 0x001F
#define MADERA_SAMPLE_RATE_2_SHIFT 0
-#define MADERA_SAMPLE_RATE_2_WIDTH 5
/* (0x0104) Sample_rate_3 */
#define MADERA_SAMPLE_RATE_3_MASK 0x001F
#define MADERA_SAMPLE_RATE_3_SHIFT 0
-#define MADERA_SAMPLE_RATE_3_WIDTH 5
/* (0x0112) Async_clock_1 */
#define MADERA_ASYNC_CLK_FREQ_MASK 0x0700
#define MADERA_ASYNC_CLK_FREQ_SHIFT 8
-#define MADERA_ASYNC_CLK_FREQ_WIDTH 3
#define MADERA_ASYNC_CLK_ENA 0x0040
#define MADERA_ASYNC_CLK_ENA_MASK 0x0040
#define MADERA_ASYNC_CLK_ENA_SHIFT 6
-#define MADERA_ASYNC_CLK_ENA_WIDTH 1
#define MADERA_ASYNC_CLK_SRC_MASK 0x000F
#define MADERA_ASYNC_CLK_SRC_SHIFT 0
-#define MADERA_ASYNC_CLK_SRC_WIDTH 4
/* (0x0113) Async_sample_rate_1 */
#define MADERA_ASYNC_SAMPLE_RATE_1_MASK 0x001F
#define MADERA_ASYNC_SAMPLE_RATE_1_SHIFT 0
-#define MADERA_ASYNC_SAMPLE_RATE_1_WIDTH 5
/* (0x0114) Async_sample_rate_2 */
#define MADERA_ASYNC_SAMPLE_RATE_2_MASK 0x001F
#define MADERA_ASYNC_SAMPLE_RATE_2_SHIFT 0
-#define MADERA_ASYNC_SAMPLE_RATE_2_WIDTH 5
/* (0x0120) DSP_Clock_1 */
#define MADERA_DSP_CLK_FREQ_LEGACY 0x0700
#define MADERA_DSP_CLK_FREQ_LEGACY_MASK 0x0700
#define MADERA_DSP_CLK_FREQ_LEGACY_SHIFT 8
-#define MADERA_DSP_CLK_FREQ_LEGACY_WIDTH 3
#define MADERA_DSP_CLK_ENA 0x0040
#define MADERA_DSP_CLK_ENA_MASK 0x0040
#define MADERA_DSP_CLK_ENA_SHIFT 6
-#define MADERA_DSP_CLK_ENA_WIDTH 1
#define MADERA_DSP_CLK_SRC 0x000F
#define MADERA_DSP_CLK_SRC_MASK 0x000F
#define MADERA_DSP_CLK_SRC_SHIFT 0
-#define MADERA_DSP_CLK_SRC_WIDTH 4
/* (0x0122) DSP_Clock_2 */
#define MADERA_DSP_CLK_FREQ_MASK 0x03FF
#define MADERA_DSP_CLK_FREQ_SHIFT 0
-#define MADERA_DSP_CLK_FREQ_WIDTH 10
/* (0x0149) Output_system_clock */
#define MADERA_OPCLK_ENA 0x8000
#define MADERA_OPCLK_ENA_MASK 0x8000
#define MADERA_OPCLK_ENA_SHIFT 15
-#define MADERA_OPCLK_ENA_WIDTH 1
#define MADERA_OPCLK_DIV_MASK 0x00F8
#define MADERA_OPCLK_DIV_SHIFT 3
-#define MADERA_OPCLK_DIV_WIDTH 5
#define MADERA_OPCLK_SEL_MASK 0x0007
#define MADERA_OPCLK_SEL_SHIFT 0
-#define MADERA_OPCLK_SEL_WIDTH 3
/* (0x014A) Output_async_clock */
#define MADERA_OPCLK_ASYNC_ENA 0x8000
#define MADERA_OPCLK_ASYNC_ENA_MASK 0x8000
#define MADERA_OPCLK_ASYNC_ENA_SHIFT 15
-#define MADERA_OPCLK_ASYNC_ENA_WIDTH 1
#define MADERA_OPCLK_ASYNC_DIV_MASK 0x00F8
#define MADERA_OPCLK_ASYNC_DIV_SHIFT 3
-#define MADERA_OPCLK_ASYNC_DIV_WIDTH 5
#define MADERA_OPCLK_ASYNC_SEL_MASK 0x0007
#define MADERA_OPCLK_ASYNC_SEL_SHIFT 0
-#define MADERA_OPCLK_ASYNC_SEL_WIDTH 3
/* (0x0171) FLL1_Control_1 */
#define CS47L92_FLL1_REFCLK_SRC_MASK 0xF000
#define CS47L92_FLL1_REFCLK_SRC_SHIFT 12
-#define CS47L92_FLL1_REFCLK_SRC_WIDTH 4
#define MADERA_FLL1_HOLD_MASK 0x0004
#define MADERA_FLL1_HOLD_SHIFT 2
-#define MADERA_FLL1_HOLD_WIDTH 1
#define MADERA_FLL1_FREERUN 0x0002
#define MADERA_FLL1_FREERUN_MASK 0x0002
#define MADERA_FLL1_FREERUN_SHIFT 1
-#define MADERA_FLL1_FREERUN_WIDTH 1
#define MADERA_FLL1_ENA 0x0001
#define MADERA_FLL1_ENA_MASK 0x0001
#define MADERA_FLL1_ENA_SHIFT 0
-#define MADERA_FLL1_ENA_WIDTH 1
/* (0x0172) FLL1_Control_2 */
#define MADERA_FLL1_CTRL_UPD 0x8000
#define MADERA_FLL1_CTRL_UPD_MASK 0x8000
#define MADERA_FLL1_CTRL_UPD_SHIFT 15
-#define MADERA_FLL1_CTRL_UPD_WIDTH 1
#define MADERA_FLL1_N_MASK 0x03FF
#define MADERA_FLL1_N_SHIFT 0
-#define MADERA_FLL1_N_WIDTH 10
/* (0x0173) FLL1_Control_3 */
#define MADERA_FLL1_THETA_MASK 0xFFFF
#define MADERA_FLL1_THETA_SHIFT 0
-#define MADERA_FLL1_THETA_WIDTH 16
/* (0x0174) FLL1_Control_4 */
#define MADERA_FLL1_LAMBDA_MASK 0xFFFF
#define MADERA_FLL1_LAMBDA_SHIFT 0
-#define MADERA_FLL1_LAMBDA_WIDTH 16
/* (0x0175) FLL1_Control_5 */
#define MADERA_FLL1_FRATIO_MASK 0x0F00
#define MADERA_FLL1_FRATIO_SHIFT 8
-#define MADERA_FLL1_FRATIO_WIDTH 4
#define MADERA_FLL1_FB_DIV_MASK 0x03FF
#define MADERA_FLL1_FB_DIV_SHIFT 0
-#define MADERA_FLL1_FB_DIV_WIDTH 10
/* (0x0176) FLL1_Control_6 */
#define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0
#define MADERA_FLL1_REFCLK_DIV_SHIFT 6
-#define MADERA_FLL1_REFCLK_DIV_WIDTH 2
#define MADERA_FLL1_REFCLK_SRC_MASK 0x000F
#define MADERA_FLL1_REFCLK_SRC_SHIFT 0
-#define MADERA_FLL1_REFCLK_SRC_WIDTH 4
/* (0x0179) FLL1_Control_7 */
#define MADERA_FLL1_GAIN_MASK 0x003c
#define MADERA_FLL1_GAIN_SHIFT 2
-#define MADERA_FLL1_GAIN_WIDTH 4
/* (0x017A) FLL1_EFS_2 */
#define MADERA_FLL1_PHASE_GAIN_MASK 0xF000
#define MADERA_FLL1_PHASE_GAIN_SHIFT 12
-#define MADERA_FLL1_PHASE_GAIN_WIDTH 4
#define MADERA_FLL1_PHASE_ENA_MASK 0x0800
#define MADERA_FLL1_PHASE_ENA_SHIFT 11
-#define MADERA_FLL1_PHASE_ENA_WIDTH 1
/* (0x017A) FLL1_Control_10 */
#define MADERA_FLL1_HP_MASK 0xC000
#define MADERA_FLL1_HP_SHIFT 14
-#define MADERA_FLL1_HP_WIDTH 2
#define MADERA_FLL1_PHASEDET_ENA_MASK 0x1000
#define MADERA_FLL1_PHASEDET_ENA_SHIFT 12
-#define MADERA_FLL1_PHASEDET_ENA_WIDTH 1
/* (0x017B) FLL1_Control_11 */
#define MADERA_FLL1_LOCKDET_THR_MASK 0x001E
#define MADERA_FLL1_LOCKDET_THR_SHIFT 1
-#define MADERA_FLL1_LOCKDET_THR_WIDTH 4
#define MADERA_FLL1_LOCKDET_MASK 0x0001
#define MADERA_FLL1_LOCKDET_SHIFT 0
-#define MADERA_FLL1_LOCKDET_WIDTH 1
/* (0x017D) FLL1_Digital_Test_1 */
#define MADERA_FLL1_SYNC_EFS_ENA_MASK 0x0100
#define MADERA_FLL1_SYNC_EFS_ENA_SHIFT 8
-#define MADERA_FLL1_SYNC_EFS_ENA_WIDTH 1
#define MADERA_FLL1_CLK_VCO_FAST_SRC_MASK 0x0003
#define MADERA_FLL1_CLK_VCO_FAST_SRC_SHIFT 0
-#define MADERA_FLL1_CLK_VCO_FAST_SRC_WIDTH 2
/* (0x0181) FLL1_Synchroniser_1 */
#define MADERA_FLL1_SYNC_ENA 0x0001
#define MADERA_FLL1_SYNC_ENA_MASK 0x0001
#define MADERA_FLL1_SYNC_ENA_SHIFT 0
-#define MADERA_FLL1_SYNC_ENA_WIDTH 1
/* (0x0182) FLL1_Synchroniser_2 */
#define MADERA_FLL1_SYNC_N_MASK 0x03FF
#define MADERA_FLL1_SYNC_N_SHIFT 0
-#define MADERA_FLL1_SYNC_N_WIDTH 10
/* (0x0183) FLL1_Synchroniser_3 */
#define MADERA_FLL1_SYNC_THETA_MASK 0xFFFF
#define MADERA_FLL1_SYNC_THETA_SHIFT 0
-#define MADERA_FLL1_SYNC_THETA_WIDTH 16
/* (0x0184) FLL1_Synchroniser_4 */
#define MADERA_FLL1_SYNC_LAMBDA_MASK 0xFFFF
#define MADERA_FLL1_SYNC_LAMBDA_SHIFT 0
-#define MADERA_FLL1_SYNC_LAMBDA_WIDTH 16
/* (0x0185) FLL1_Synchroniser_5 */
#define MADERA_FLL1_SYNC_FRATIO_MASK 0x0700
#define MADERA_FLL1_SYNC_FRATIO_SHIFT 8
-#define MADERA_FLL1_SYNC_FRATIO_WIDTH 3
/* (0x0186) FLL1_Synchroniser_6 */
#define MADERA_FLL1_SYNCCLK_DIV_MASK 0x00C0
#define MADERA_FLL1_SYNCCLK_DIV_SHIFT 6
-#define MADERA_FLL1_SYNCCLK_DIV_WIDTH 2
#define MADERA_FLL1_SYNCCLK_SRC_MASK 0x000F
#define MADERA_FLL1_SYNCCLK_SRC_SHIFT 0
-#define MADERA_FLL1_SYNCCLK_SRC_WIDTH 4
/* (0x0187) FLL1_Synchroniser_7 */
#define MADERA_FLL1_SYNC_GAIN_MASK 0x003c
#define MADERA_FLL1_SYNC_GAIN_SHIFT 2
-#define MADERA_FLL1_SYNC_GAIN_WIDTH 4
#define MADERA_FLL1_SYNC_DFSAT 0x0001
#define MADERA_FLL1_SYNC_DFSAT_MASK 0x0001
#define MADERA_FLL1_SYNC_DFSAT_SHIFT 0
-#define MADERA_FLL1_SYNC_DFSAT_WIDTH 1
/* (0x01D1) FLL_AO_Control_1 */
#define MADERA_FLL_AO_HOLD 0x0004
#define MADERA_FLL_AO_HOLD_MASK 0x0004
#define MADERA_FLL_AO_HOLD_SHIFT 2
-#define MADERA_FLL_AO_HOLD_WIDTH 1
#define MADERA_FLL_AO_FREERUN 0x0002
#define MADERA_FLL_AO_FREERUN_MASK 0x0002
#define MADERA_FLL_AO_FREERUN_SHIFT 1
-#define MADERA_FLL_AO_FREERUN_WIDTH 1
#define MADERA_FLL_AO_ENA 0x0001
#define MADERA_FLL_AO_ENA_MASK 0x0001
#define MADERA_FLL_AO_ENA_SHIFT 0
-#define MADERA_FLL_AO_ENA_WIDTH 1
/* (0x01D2) FLL_AO_Control_2 */
#define MADERA_FLL_AO_CTRL_UPD 0x8000
#define MADERA_FLL_AO_CTRL_UPD_MASK 0x8000
#define MADERA_FLL_AO_CTRL_UPD_SHIFT 15
-#define MADERA_FLL_AO_CTRL_UPD_WIDTH 1
/* (0x01D6) FLL_AO_Control_6 */
#define MADERA_FLL_AO_REFCLK_SRC_MASK 0x000F
#define MADERA_FLL_AO_REFCLK_SRC_SHIFT 0
-#define MADERA_FLL_AO_REFCLK_SRC_WIDTH 4
/* (0x0200) Mic_Charge_Pump_1 */
#define MADERA_CPMIC_BYPASS 0x0002
#define MADERA_CPMIC_BYPASS_MASK 0x0002
#define MADERA_CPMIC_BYPASS_SHIFT 1
-#define MADERA_CPMIC_BYPASS_WIDTH 1
#define MADERA_CPMIC_ENA 0x0001
#define MADERA_CPMIC_ENA_MASK 0x0001
#define MADERA_CPMIC_ENA_SHIFT 0
-#define MADERA_CPMIC_ENA_WIDTH 1
/* (0x0210) LDO1_Control_1 */
#define MADERA_LDO1_VSEL_MASK 0x07E0
#define MADERA_LDO1_VSEL_SHIFT 5
-#define MADERA_LDO1_VSEL_WIDTH 6
#define MADERA_LDO1_FAST 0x0010
#define MADERA_LDO1_FAST_MASK 0x0010
#define MADERA_LDO1_FAST_SHIFT 4
-#define MADERA_LDO1_FAST_WIDTH 1
#define MADERA_LDO1_DISCH 0x0004
#define MADERA_LDO1_DISCH_MASK 0x0004
#define MADERA_LDO1_DISCH_SHIFT 2
-#define MADERA_LDO1_DISCH_WIDTH 1
#define MADERA_LDO1_BYPASS 0x0002
#define MADERA_LDO1_BYPASS_MASK 0x0002
#define MADERA_LDO1_BYPASS_SHIFT 1
-#define MADERA_LDO1_BYPASS_WIDTH 1
#define MADERA_LDO1_ENA 0x0001
#define MADERA_LDO1_ENA_MASK 0x0001
#define MADERA_LDO1_ENA_SHIFT 0
-#define MADERA_LDO1_ENA_WIDTH 1
/* (0x0213) LDO2_Control_1 */
#define MADERA_LDO2_VSEL_MASK 0x07E0
#define MADERA_LDO2_VSEL_SHIFT 5
-#define MADERA_LDO2_VSEL_WIDTH 6
#define MADERA_LDO2_FAST 0x0010
#define MADERA_LDO2_FAST_MASK 0x0010
#define MADERA_LDO2_FAST_SHIFT 4
-#define MADERA_LDO2_FAST_WIDTH 1
#define MADERA_LDO2_DISCH 0x0004
#define MADERA_LDO2_DISCH_MASK 0x0004
#define MADERA_LDO2_DISCH_SHIFT 2
-#define MADERA_LDO2_DISCH_WIDTH 1
#define MADERA_LDO2_BYPASS 0x0002
#define MADERA_LDO2_BYPASS_MASK 0x0002
#define MADERA_LDO2_BYPASS_SHIFT 1
-#define MADERA_LDO2_BYPASS_WIDTH 1
#define MADERA_LDO2_ENA 0x0001
#define MADERA_LDO2_ENA_MASK 0x0001
#define MADERA_LDO2_ENA_SHIFT 0
-#define MADERA_LDO2_ENA_WIDTH 1
/* (0x0218) Mic_Bias_Ctrl_1 */
#define MADERA_MICB1_EXT_CAP 0x8000
#define MADERA_MICB1_EXT_CAP_MASK 0x8000
#define MADERA_MICB1_EXT_CAP_SHIFT 15
-#define MADERA_MICB1_EXT_CAP_WIDTH 1
#define MADERA_MICB1_LVL_MASK 0x01E0
#define MADERA_MICB1_LVL_SHIFT 5
-#define MADERA_MICB1_LVL_WIDTH 4
#define MADERA_MICB1_ENA 0x0001
#define MADERA_MICB1_ENA_MASK 0x0001
#define MADERA_MICB1_ENA_SHIFT 0
-#define MADERA_MICB1_ENA_WIDTH 1
/* (0x021C) Mic_Bias_Ctrl_5 */
#define MADERA_MICB1D_ENA 0x1000
#define MADERA_MICB1D_ENA_MASK 0x1000
#define MADERA_MICB1D_ENA_SHIFT 12
-#define MADERA_MICB1D_ENA_WIDTH 1
#define MADERA_MICB1C_ENA 0x0100
#define MADERA_MICB1C_ENA_MASK 0x0100
#define MADERA_MICB1C_ENA_SHIFT 8
-#define MADERA_MICB1C_ENA_WIDTH 1
#define MADERA_MICB1B_ENA 0x0010
#define MADERA_MICB1B_ENA_MASK 0x0010
#define MADERA_MICB1B_ENA_SHIFT 4
-#define MADERA_MICB1B_ENA_WIDTH 1
#define MADERA_MICB1A_ENA 0x0001
#define MADERA_MICB1A_ENA_MASK 0x0001
#define MADERA_MICB1A_ENA_SHIFT 0
-#define MADERA_MICB1A_ENA_WIDTH 1
/* (0x021E) Mic_Bias_Ctrl_6 */
#define MADERA_MICB2D_ENA 0x1000
#define MADERA_MICB2D_ENA_MASK 0x1000
#define MADERA_MICB2D_ENA_SHIFT 12
-#define MADERA_MICB2D_ENA_WIDTH 1
#define MADERA_MICB2C_ENA 0x0100
#define MADERA_MICB2C_ENA_MASK 0x0100
#define MADERA_MICB2C_ENA_SHIFT 8
-#define MADERA_MICB2C_ENA_WIDTH 1
#define MADERA_MICB2B_ENA 0x0010
#define MADERA_MICB2B_ENA_MASK 0x0010
#define MADERA_MICB2B_ENA_SHIFT 4
-#define MADERA_MICB2B_ENA_WIDTH 1
#define MADERA_MICB2A_ENA 0x0001
#define MADERA_MICB2A_ENA_MASK 0x0001
#define MADERA_MICB2A_ENA_SHIFT 0
-#define MADERA_MICB2A_ENA_WIDTH 1
/* (0x0225) - HP Ctrl 1L */
#define MADERA_RMV_SHRT_HP1L 0x4000
#define MADERA_RMV_SHRT_HP1L_MASK 0x4000
#define MADERA_RMV_SHRT_HP1L_SHIFT 14
-#define MADERA_RMV_SHRT_HP1L_WIDTH 1
#define MADERA_HP1L_FLWR 0x0004
#define MADERA_HP1L_FLWR_MASK 0x0004
#define MADERA_HP1L_FLWR_SHIFT 2
-#define MADERA_HP1L_FLWR_WIDTH 1
#define MADERA_HP1L_SHRTI 0x0002
#define MADERA_HP1L_SHRTI_MASK 0x0002
#define MADERA_HP1L_SHRTI_SHIFT 1
-#define MADERA_HP1L_SHRTI_WIDTH 1
#define MADERA_HP1L_SHRTO 0x0001
#define MADERA_HP1L_SHRTO_MASK 0x0001
#define MADERA_HP1L_SHRTO_SHIFT 0
-#define MADERA_HP1L_SHRTO_WIDTH 1
/* (0x0226) - HP Ctrl 1R */
#define MADERA_RMV_SHRT_HP1R 0x4000
#define MADERA_RMV_SHRT_HP1R_MASK 0x4000
#define MADERA_RMV_SHRT_HP1R_SHIFT 14
-#define MADERA_RMV_SHRT_HP1R_WIDTH 1
#define MADERA_HP1R_FLWR 0x0004
#define MADERA_HP1R_FLWR_MASK 0x0004
#define MADERA_HP1R_FLWR_SHIFT 2
-#define MADERA_HP1R_FLWR_WIDTH 1
#define MADERA_HP1R_SHRTI 0x0002
#define MADERA_HP1R_SHRTI_MASK 0x0002
#define MADERA_HP1R_SHRTI_SHIFT 1
-#define MADERA_HP1R_SHRTI_WIDTH 1
#define MADERA_HP1R_SHRTO 0x0001
#define MADERA_HP1R_SHRTO_MASK 0x0001
#define MADERA_HP1R_SHRTO_SHIFT 0
-#define MADERA_HP1R_SHRTO_WIDTH 1
/* (0x0293) Accessory_Detect_Mode_1 */
#define MADERA_ACCDET_SRC 0x2000
#define MADERA_ACCDET_SRC_MASK 0x2000
#define MADERA_ACCDET_SRC_SHIFT 13
-#define MADERA_ACCDET_SRC_WIDTH 1
#define MADERA_ACCDET_POLARITY_INV_ENA 0x0080
#define MADERA_ACCDET_POLARITY_INV_ENA_MASK 0x0080
#define MADERA_ACCDET_POLARITY_INV_ENA_SHIFT 7
-#define MADERA_ACCDET_POLARITY_INV_ENA_WIDTH 1
#define MADERA_ACCDET_MODE_MASK 0x0007
#define MADERA_ACCDET_MODE_SHIFT 0
-#define MADERA_ACCDET_MODE_WIDTH 3
/* (0x0299) Headphone_Detect_0 */
#define MADERA_HPD_GND_SEL 0x0007
#define MADERA_HPD_GND_SEL_MASK 0x0007
#define MADERA_HPD_GND_SEL_SHIFT 0
-#define MADERA_HPD_GND_SEL_WIDTH 3
#define MADERA_HPD_SENSE_SEL 0x00F0
#define MADERA_HPD_SENSE_SEL_MASK 0x00F0
#define MADERA_HPD_SENSE_SEL_SHIFT 4
-#define MADERA_HPD_SENSE_SEL_WIDTH 4
#define MADERA_HPD_FRC_SEL 0x0F00
#define MADERA_HPD_FRC_SEL_MASK 0x0F00
#define MADERA_HPD_FRC_SEL_SHIFT 8
-#define MADERA_HPD_FRC_SEL_WIDTH 4
#define MADERA_HPD_OUT_SEL 0x7000
#define MADERA_HPD_OUT_SEL_MASK 0x7000
#define MADERA_HPD_OUT_SEL_SHIFT 12
-#define MADERA_HPD_OUT_SEL_WIDTH 3
#define MADERA_HPD_OVD_ENA_SEL 0x8000
#define MADERA_HPD_OVD_ENA_SEL_MASK 0x8000
#define MADERA_HPD_OVD_ENA_SEL_SHIFT 15
-#define MADERA_HPD_OVD_ENA_SEL_WIDTH 1
/* (0x029B) Headphone_Detect_1 */
#define MADERA_HP_IMPEDANCE_RANGE_MASK 0x0600
#define MADERA_HP_IMPEDANCE_RANGE_SHIFT 9
-#define MADERA_HP_IMPEDANCE_RANGE_WIDTH 2
#define MADERA_HP_STEP_SIZE 0x0100
#define MADERA_HP_STEP_SIZE_MASK 0x0100
#define MADERA_HP_STEP_SIZE_SHIFT 8
-#define MADERA_HP_STEP_SIZE_WIDTH 1
#define MADERA_HP_CLK_DIV_MASK 0x0018
#define MADERA_HP_CLK_DIV_SHIFT 3
-#define MADERA_HP_CLK_DIV_WIDTH 2
#define MADERA_HP_RATE_MASK 0x0006
#define MADERA_HP_RATE_SHIFT 1
-#define MADERA_HP_RATE_WIDTH 2
#define MADERA_HP_POLL 0x0001
#define MADERA_HP_POLL_MASK 0x0001
#define MADERA_HP_POLL_SHIFT 0
-#define MADERA_HP_POLL_WIDTH 1
/* (0x029C) Headphone_Detect_2 */
#define MADERA_HP_DONE_MASK 0x8000
#define MADERA_HP_DONE_SHIFT 15
-#define MADERA_HP_DONE_WIDTH 1
#define MADERA_HP_LVL_MASK 0x7FFF
#define MADERA_HP_LVL_SHIFT 0
-#define MADERA_HP_LVL_WIDTH 15
/* (0x029D) Headphone_Detect_3 */
#define MADERA_HP_DACVAL_MASK 0x03FF
#define MADERA_HP_DACVAL_SHIFT 0
-#define MADERA_HP_DACVAL_WIDTH 10
/* (0x029F) - Headphone Detect 5 */
#define MADERA_HP_DACVAL_DOWN_MASK 0x03FF
#define MADERA_HP_DACVAL_DOWN_SHIFT 0
-#define MADERA_HP_DACVAL_DOWN_WIDTH 10
/* (0x02A2) Mic_Detect_1_Control_0 */
#define MADERA_MICD1_GND_MASK 0x0007
#define MADERA_MICD1_GND_SHIFT 0
-#define MADERA_MICD1_GND_WIDTH 3
#define MADERA_MICD1_SENSE_MASK 0x00F0
#define MADERA_MICD1_SENSE_SHIFT 4
-#define MADERA_MICD1_SENSE_WIDTH 4
#define MADERA_MICD1_ADC_MODE_MASK 0x8000
#define MADERA_MICD1_ADC_MODE_SHIFT 15
-#define MADERA_MICD1_ADC_MODE_WIDTH 1
/* (0x02A3) Mic_Detect_1_Control_1 */
#define MADERA_MICD_BIAS_STARTTIME_MASK 0xF000
#define MADERA_MICD_BIAS_STARTTIME_SHIFT 12
-#define MADERA_MICD_BIAS_STARTTIME_WIDTH 4
#define MADERA_MICD_RATE_MASK 0x0F00
#define MADERA_MICD_RATE_SHIFT 8
-#define MADERA_MICD_RATE_WIDTH 4
#define MADERA_MICD_BIAS_SRC_MASK 0x00F0
#define MADERA_MICD_BIAS_SRC_SHIFT 4
-#define MADERA_MICD_BIAS_SRC_WIDTH 4
#define MADERA_MICD_DBTIME 0x0002
#define MADERA_MICD_DBTIME_MASK 0x0002
#define MADERA_MICD_DBTIME_SHIFT 1
-#define MADERA_MICD_DBTIME_WIDTH 1
#define MADERA_MICD_ENA 0x0001
#define MADERA_MICD_ENA_MASK 0x0001
#define MADERA_MICD_ENA_SHIFT 0
-#define MADERA_MICD_ENA_WIDTH 1
/* (0x02A4) Mic_Detect_1_Control_2 */
#define MADERA_MICD_LVL_SEL_MASK 0x00FF
#define MADERA_MICD_LVL_SEL_SHIFT 0
-#define MADERA_MICD_LVL_SEL_WIDTH 8
/* (0x02A5) Mic_Detect_1_Control_3 */
#define MADERA_MICD_LVL_0 0x0004
@@ -1859,1746 +1731,1341 @@
#define MADERA_MICD_LVL_8 0x0400
#define MADERA_MICD_LVL_MASK 0x07FC
#define MADERA_MICD_LVL_SHIFT 2
-#define MADERA_MICD_LVL_WIDTH 9
#define MADERA_MICD_VALID 0x0002
#define MADERA_MICD_VALID_MASK 0x0002
#define MADERA_MICD_VALID_SHIFT 1
-#define MADERA_MICD_VALID_WIDTH 1
#define MADERA_MICD_STS 0x0001
#define MADERA_MICD_STS_MASK 0x0001
#define MADERA_MICD_STS_SHIFT 0
-#define MADERA_MICD_STS_WIDTH 1
/* (0x02AB) Mic_Detect_1_Control_4 */
#define MADERA_MICDET_ADCVAL_DIFF_MASK 0xFF00
#define MADERA_MICDET_ADCVAL_DIFF_SHIFT 8
-#define MADERA_MICDET_ADCVAL_DIFF_WIDTH 8
#define MADERA_MICDET_ADCVAL_MASK 0x007F
#define MADERA_MICDET_ADCVAL_SHIFT 0
-#define MADERA_MICDET_ADCVAL_WIDTH 7
/* (0x02C6) Micd_Clamp_control */
#define MADERA_MICD_CLAMP_OVD 0x0010
#define MADERA_MICD_CLAMP_OVD_MASK 0x0010
#define MADERA_MICD_CLAMP_OVD_SHIFT 4
-#define MADERA_MICD_CLAMP_OVD_WIDTH 1
#define MADERA_MICD_CLAMP_MODE_MASK 0x000F
#define MADERA_MICD_CLAMP_MODE_SHIFT 0
-#define MADERA_MICD_CLAMP_MODE_WIDTH 4
/* (0x02C8) GP_Switch_1 */
#define MADERA_SW2_MODE_MASK 0x000C
#define MADERA_SW2_MODE_SHIFT 2
-#define MADERA_SW2_MODE_WIDTH 2
#define MADERA_SW1_MODE_MASK 0x0003
#define MADERA_SW1_MODE_SHIFT 0
-#define MADERA_SW1_MODE_WIDTH 2
/* (0x02D3) Jack_detect_analogue */
#define MADERA_JD2_ENA 0x0002
#define MADERA_JD2_ENA_MASK 0x0002
#define MADERA_JD2_ENA_SHIFT 1
-#define MADERA_JD2_ENA_WIDTH 1
#define MADERA_JD1_ENA 0x0001
#define MADERA_JD1_ENA_MASK 0x0001
#define MADERA_JD1_ENA_SHIFT 0
-#define MADERA_JD1_ENA_WIDTH 1
/* (0x0300) Input_Enables */
#define MADERA_IN6L_ENA 0x0800
#define MADERA_IN6L_ENA_MASK 0x0800
#define MADERA_IN6L_ENA_SHIFT 11
-#define MADERA_IN6L_ENA_WIDTH 1
#define MADERA_IN6R_ENA 0x0400
#define MADERA_IN6R_ENA_MASK 0x0400
#define MADERA_IN6R_ENA_SHIFT 10
-#define MADERA_IN6R_ENA_WIDTH 1
#define MADERA_IN5L_ENA 0x0200
#define MADERA_IN5L_ENA_MASK 0x0200
#define MADERA_IN5L_ENA_SHIFT 9
-#define MADERA_IN5L_ENA_WIDTH 1
#define MADERA_IN5R_ENA 0x0100
#define MADERA_IN5R_ENA_MASK 0x0100
#define MADERA_IN5R_ENA_SHIFT 8
-#define MADERA_IN5R_ENA_WIDTH 1
#define MADERA_IN4L_ENA 0x0080
#define MADERA_IN4L_ENA_MASK 0x0080
#define MADERA_IN4L_ENA_SHIFT 7
-#define MADERA_IN4L_ENA_WIDTH 1
#define MADERA_IN4R_ENA 0x0040
#define MADERA_IN4R_ENA_MASK 0x0040
#define MADERA_IN4R_ENA_SHIFT 6
-#define MADERA_IN4R_ENA_WIDTH 1
#define MADERA_IN3L_ENA 0x0020
#define MADERA_IN3L_ENA_MASK 0x0020
#define MADERA_IN3L_ENA_SHIFT 5
-#define MADERA_IN3L_ENA_WIDTH 1
#define MADERA_IN3R_ENA 0x0010
#define MADERA_IN3R_ENA_MASK 0x0010
#define MADERA_IN3R_ENA_SHIFT 4
-#define MADERA_IN3R_ENA_WIDTH 1
#define MADERA_IN2L_ENA 0x0008
#define MADERA_IN2L_ENA_MASK 0x0008
#define MADERA_IN2L_ENA_SHIFT 3
-#define MADERA_IN2L_ENA_WIDTH 1
#define MADERA_IN2R_ENA 0x0004
#define MADERA_IN2R_ENA_MASK 0x0004
#define MADERA_IN2R_ENA_SHIFT 2
-#define MADERA_IN2R_ENA_WIDTH 1
#define MADERA_IN1L_ENA 0x0002
#define MADERA_IN1L_ENA_MASK 0x0002
#define MADERA_IN1L_ENA_SHIFT 1
-#define MADERA_IN1L_ENA_WIDTH 1
#define MADERA_IN1R_ENA 0x0001
#define MADERA_IN1R_ENA_MASK 0x0001
#define MADERA_IN1R_ENA_SHIFT 0
-#define MADERA_IN1R_ENA_WIDTH 1
/* (0x0308) Input_Rate */
#define MADERA_IN_RATE_MASK 0xF800
#define MADERA_IN_RATE_SHIFT 11
-#define MADERA_IN_RATE_WIDTH 5
#define MADERA_IN_MODE_MASK 0x0400
#define MADERA_IN_MODE_SHIFT 10
-#define MADERA_IN_MODE_WIDTH 1
/* (0x0309) Input_Volume_Ramp */
#define MADERA_IN_VD_RAMP_MASK 0x0070
#define MADERA_IN_VD_RAMP_SHIFT 4
-#define MADERA_IN_VD_RAMP_WIDTH 3
#define MADERA_IN_VI_RAMP_MASK 0x0007
#define MADERA_IN_VI_RAMP_SHIFT 0
-#define MADERA_IN_VI_RAMP_WIDTH 3
/* (0x030C) HPF_Control */
#define MADERA_IN_HPF_CUT_MASK 0x0007
#define MADERA_IN_HPF_CUT_SHIFT 0
-#define MADERA_IN_HPF_CUT_WIDTH 3
/* (0x0310) IN1L_Control */
#define MADERA_IN1L_HPF_MASK 0x8000
#define MADERA_IN1L_HPF_SHIFT 15
-#define MADERA_IN1L_HPF_WIDTH 1
#define MADERA_IN1_DMIC_SUP_MASK 0x1800
#define MADERA_IN1_DMIC_SUP_SHIFT 11
-#define MADERA_IN1_DMIC_SUP_WIDTH 2
#define MADERA_IN1_MODE_MASK 0x0400
#define MADERA_IN1_MODE_SHIFT 10
-#define MADERA_IN1_MODE_WIDTH 1
#define MADERA_IN1L_PGA_VOL_MASK 0x00FE
#define MADERA_IN1L_PGA_VOL_SHIFT 1
-#define MADERA_IN1L_PGA_VOL_WIDTH 7
/* (0x0311) ADC_Digital_Volume_1L */
#define MADERA_IN1L_SRC_MASK 0x4000
#define MADERA_IN1L_SRC_SHIFT 14
-#define MADERA_IN1L_SRC_WIDTH 1
#define MADERA_IN1L_SRC_SE_MASK 0x2000
#define MADERA_IN1L_SRC_SE_SHIFT 13
-#define MADERA_IN1L_SRC_SE_WIDTH 1
#define MADERA_IN1L_LP_MODE 0x0800
#define MADERA_IN1L_LP_MODE_MASK 0x0800
#define MADERA_IN1L_LP_MODE_SHIFT 11
-#define MADERA_IN1L_LP_MODE_WIDTH 1
#define MADERA_IN_VU 0x0200
#define MADERA_IN_VU_MASK 0x0200
#define MADERA_IN_VU_SHIFT 9
-#define MADERA_IN_VU_WIDTH 1
#define MADERA_IN1L_MUTE 0x0100
#define MADERA_IN1L_MUTE_MASK 0x0100
#define MADERA_IN1L_MUTE_SHIFT 8
-#define MADERA_IN1L_MUTE_WIDTH 1
#define MADERA_IN1L_DIG_VOL_MASK 0x00FF
#define MADERA_IN1L_DIG_VOL_SHIFT 0
-#define MADERA_IN1L_DIG_VOL_WIDTH 8
/* (0x0312) DMIC1L_Control */
#define MADERA_IN1_OSR_MASK 0x0700
#define MADERA_IN1_OSR_SHIFT 8
-#define MADERA_IN1_OSR_WIDTH 3
/* (0x0313) IN1L_Rate_Control */
#define MADERA_IN1L_RATE_MASK 0xF800
#define MADERA_IN1L_RATE_SHIFT 11
-#define MADERA_IN1L_RATE_WIDTH 5
/* (0x0314) IN1R_Control */
#define MADERA_IN1R_HPF_MASK 0x8000
#define MADERA_IN1R_HPF_SHIFT 15
-#define MADERA_IN1R_HPF_WIDTH 1
#define MADERA_IN1R_PGA_VOL_MASK 0x00FE
#define MADERA_IN1R_PGA_VOL_SHIFT 1
-#define MADERA_IN1R_PGA_VOL_WIDTH 7
#define MADERA_IN1_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN1_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN1_DMICCLK_SRC_WIDTH 2
/* (0x0315) ADC_Digital_Volume_1R */
#define MADERA_IN1R_SRC_MASK 0x4000
#define MADERA_IN1R_SRC_SHIFT 14
-#define MADERA_IN1R_SRC_WIDTH 1
#define MADERA_IN1R_SRC_SE_MASK 0x2000
#define MADERA_IN1R_SRC_SE_SHIFT 13
-#define MADERA_IN1R_SRC_SE_WIDTH 1
#define MADERA_IN1R_LP_MODE 0x0800
#define MADERA_IN1R_LP_MODE_MASK 0x0800
#define MADERA_IN1R_LP_MODE_SHIFT 11
-#define MADERA_IN1R_LP_MODE_WIDTH 1
#define MADERA_IN1R_MUTE 0x0100
#define MADERA_IN1R_MUTE_MASK 0x0100
#define MADERA_IN1R_MUTE_SHIFT 8
-#define MADERA_IN1R_MUTE_WIDTH 1
#define MADERA_IN1R_DIG_VOL_MASK 0x00FF
#define MADERA_IN1R_DIG_VOL_SHIFT 0
-#define MADERA_IN1R_DIG_VOL_WIDTH 8
/* (0x0317) IN1R_Rate_Control */
#define MADERA_IN1R_RATE_MASK 0xF800
#define MADERA_IN1R_RATE_SHIFT 11
-#define MADERA_IN1R_RATE_WIDTH 5
/* (0x0318) IN2L_Control */
#define MADERA_IN2L_HPF_MASK 0x8000
#define MADERA_IN2L_HPF_SHIFT 15
-#define MADERA_IN2L_HPF_WIDTH 1
#define MADERA_IN2_DMIC_SUP_MASK 0x1800
#define MADERA_IN2_DMIC_SUP_SHIFT 11
-#define MADERA_IN2_DMIC_SUP_WIDTH 2
#define MADERA_IN2_MODE_MASK 0x0400
#define MADERA_IN2_MODE_SHIFT 10
-#define MADERA_IN2_MODE_WIDTH 1
#define MADERA_IN2L_PGA_VOL_MASK 0x00FE
#define MADERA_IN2L_PGA_VOL_SHIFT 1
-#define MADERA_IN2L_PGA_VOL_WIDTH 7
/* (0x0319) ADC_Digital_Volume_2L */
#define MADERA_IN2L_SRC_MASK 0x4000
#define MADERA_IN2L_SRC_SHIFT 14
-#define MADERA_IN2L_SRC_WIDTH 1
#define MADERA_IN2L_SRC_SE_MASK 0x2000
#define MADERA_IN2L_SRC_SE_SHIFT 13
-#define MADERA_IN2L_SRC_SE_WIDTH 1
#define MADERA_IN2L_LP_MODE 0x0800
#define MADERA_IN2L_LP_MODE_MASK 0x0800
#define MADERA_IN2L_LP_MODE_SHIFT 11
-#define MADERA_IN2L_LP_MODE_WIDTH 1
#define MADERA_IN2L_MUTE 0x0100
#define MADERA_IN2L_MUTE_MASK 0x0100
#define MADERA_IN2L_MUTE_SHIFT 8
-#define MADERA_IN2L_MUTE_WIDTH 1
#define MADERA_IN2L_DIG_VOL_MASK 0x00FF
#define MADERA_IN2L_DIG_VOL_SHIFT 0
-#define MADERA_IN2L_DIG_VOL_WIDTH 8
/* (0x031A) DMIC2L_Control */
#define MADERA_IN2_OSR_MASK 0x0700
#define MADERA_IN2_OSR_SHIFT 8
-#define MADERA_IN2_OSR_WIDTH 3
/* (0x031C) IN2R_Control */
#define MADERA_IN2R_HPF_MASK 0x8000
#define MADERA_IN2R_HPF_SHIFT 15
-#define MADERA_IN2R_HPF_WIDTH 1
#define MADERA_IN2R_PGA_VOL_MASK 0x00FE
#define MADERA_IN2R_PGA_VOL_SHIFT 1
-#define MADERA_IN2R_PGA_VOL_WIDTH 7
#define MADERA_IN2_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN2_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN2_DMICCLK_SRC_WIDTH 2
/* (0x031D) ADC_Digital_Volume_2R */
#define MADERA_IN2R_SRC_MASK 0x4000
#define MADERA_IN2R_SRC_SHIFT 14
-#define MADERA_IN2R_SRC_WIDTH 1
#define MADERA_IN2R_SRC_SE_MASK 0x2000
#define MADERA_IN2R_SRC_SE_SHIFT 13
-#define MADERA_IN2R_SRC_SE_WIDTH 1
#define MADERA_IN2R_LP_MODE 0x0800
#define MADERA_IN2R_LP_MODE_MASK 0x0800
#define MADERA_IN2R_LP_MODE_SHIFT 11
-#define MADERA_IN2R_LP_MODE_WIDTH 1
#define MADERA_IN2R_MUTE 0x0100
#define MADERA_IN2R_MUTE_MASK 0x0100
#define MADERA_IN2R_MUTE_SHIFT 8
-#define MADERA_IN2R_MUTE_WIDTH 1
#define MADERA_IN2R_DIG_VOL_MASK 0x00FF
#define MADERA_IN2R_DIG_VOL_SHIFT 0
-#define MADERA_IN2R_DIG_VOL_WIDTH 8
/* (0x0320) IN3L_Control */
#define MADERA_IN3L_HPF_MASK 0x8000
#define MADERA_IN3L_HPF_SHIFT 15
-#define MADERA_IN3L_HPF_WIDTH 1
#define MADERA_IN3_DMIC_SUP_MASK 0x1800
#define MADERA_IN3_DMIC_SUP_SHIFT 11
-#define MADERA_IN3_DMIC_SUP_WIDTH 2
#define MADERA_IN3_MODE_MASK 0x0400
#define MADERA_IN3_MODE_SHIFT 10
-#define MADERA_IN3_MODE_WIDTH 1
#define MADERA_IN3L_PGA_VOL_MASK 0x00FE
#define MADERA_IN3L_PGA_VOL_SHIFT 1
-#define MADERA_IN3L_PGA_VOL_WIDTH 7
/* (0x0321) ADC_Digital_Volume_3L */
#define MADERA_IN3L_MUTE 0x0100
#define MADERA_IN3L_MUTE_MASK 0x0100
#define MADERA_IN3L_MUTE_SHIFT 8
-#define MADERA_IN3L_MUTE_WIDTH 1
#define MADERA_IN3L_DIG_VOL_MASK 0x00FF
#define MADERA_IN3L_DIG_VOL_SHIFT 0
-#define MADERA_IN3L_DIG_VOL_WIDTH 8
/* (0x0322) DMIC3L_Control */
#define MADERA_IN3_OSR_MASK 0x0700
#define MADERA_IN3_OSR_SHIFT 8
-#define MADERA_IN3_OSR_WIDTH 3
/* (0x0324) IN3R_Control */
#define MADERA_IN3R_HPF_MASK 0x8000
#define MADERA_IN3R_HPF_SHIFT 15
-#define MADERA_IN3R_HPF_WIDTH 1
#define MADERA_IN3R_PGA_VOL_MASK 0x00FE
#define MADERA_IN3R_PGA_VOL_SHIFT 1
-#define MADERA_IN3R_PGA_VOL_WIDTH 7
#define MADERA_IN3_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN3_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN3_DMICCLK_SRC_WIDTH 2
/* (0x0325) ADC_Digital_Volume_3R */
#define MADERA_IN3R_MUTE 0x0100
#define MADERA_IN3R_MUTE_MASK 0x0100
#define MADERA_IN3R_MUTE_SHIFT 8
-#define MADERA_IN3R_MUTE_WIDTH 1
#define MADERA_IN3R_DIG_VOL_MASK 0x00FF
#define MADERA_IN3R_DIG_VOL_SHIFT 0
-#define MADERA_IN3R_DIG_VOL_WIDTH 8
/* (0x0328) IN4L_Control */
#define MADERA_IN4L_HPF_MASK 0x8000
#define MADERA_IN4L_HPF_SHIFT 15
-#define MADERA_IN4L_HPF_WIDTH 1
#define MADERA_IN4_DMIC_SUP_MASK 0x1800
#define MADERA_IN4_DMIC_SUP_SHIFT 11
-#define MADERA_IN4_DMIC_SUP_WIDTH 2
/* (0x0329) ADC_Digital_Volume_4L */
#define MADERA_IN4L_MUTE 0x0100
#define MADERA_IN4L_MUTE_MASK 0x0100
#define MADERA_IN4L_MUTE_SHIFT 8
-#define MADERA_IN4L_MUTE_WIDTH 1
#define MADERA_IN4L_DIG_VOL_MASK 0x00FF
#define MADERA_IN4L_DIG_VOL_SHIFT 0
-#define MADERA_IN4L_DIG_VOL_WIDTH 8
/* (0x032A) DMIC4L_Control */
#define MADERA_IN4_OSR_MASK 0x0700
#define MADERA_IN4_OSR_SHIFT 8
-#define MADERA_IN4_OSR_WIDTH 3
/* (0x032C) IN4R_Control */
#define MADERA_IN4R_HPF_MASK 0x8000
#define MADERA_IN4R_HPF_SHIFT 15
-#define MADERA_IN4R_HPF_WIDTH 1
#define MADERA_IN4_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN4_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN4_DMICCLK_SRC_WIDTH 2
/* (0x032D) ADC_Digital_Volume_4R */
#define MADERA_IN4R_MUTE 0x0100
#define MADERA_IN4R_MUTE_MASK 0x0100
#define MADERA_IN4R_MUTE_SHIFT 8
-#define MADERA_IN4R_MUTE_WIDTH 1
#define MADERA_IN4R_DIG_VOL_MASK 0x00FF
#define MADERA_IN4R_DIG_VOL_SHIFT 0
-#define MADERA_IN4R_DIG_VOL_WIDTH 8
/* (0x0330) IN5L_Control */
#define MADERA_IN5L_HPF_MASK 0x8000
#define MADERA_IN5L_HPF_SHIFT 15
-#define MADERA_IN5L_HPF_WIDTH 1
#define MADERA_IN5_DMIC_SUP_MASK 0x1800
#define MADERA_IN5_DMIC_SUP_SHIFT 11
-#define MADERA_IN5_DMIC_SUP_WIDTH 2
/* (0x0331) ADC_Digital_Volume_5L */
#define MADERA_IN5L_MUTE 0x0100
#define MADERA_IN5L_MUTE_MASK 0x0100
#define MADERA_IN5L_MUTE_SHIFT 8
-#define MADERA_IN5L_MUTE_WIDTH 1
#define MADERA_IN5L_DIG_VOL_MASK 0x00FF
#define MADERA_IN5L_DIG_VOL_SHIFT 0
-#define MADERA_IN5L_DIG_VOL_WIDTH 8
/* (0x0332) DMIC5L_Control */
#define MADERA_IN5_OSR_MASK 0x0700
#define MADERA_IN5_OSR_SHIFT 8
-#define MADERA_IN5_OSR_WIDTH 3
/* (0x0334) IN5R_Control */
#define MADERA_IN5R_HPF_MASK 0x8000
#define MADERA_IN5R_HPF_SHIFT 15
-#define MADERA_IN5R_HPF_WIDTH 1
#define MADERA_IN5_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN5_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN5_DMICCLK_SRC_WIDTH 2
/* (0x0335) ADC_Digital_Volume_5R */
#define MADERA_IN5R_MUTE 0x0100
#define MADERA_IN5R_MUTE_MASK 0x0100
#define MADERA_IN5R_MUTE_SHIFT 8
-#define MADERA_IN5R_MUTE_WIDTH 1
#define MADERA_IN5R_DIG_VOL_MASK 0x00FF
#define MADERA_IN5R_DIG_VOL_SHIFT 0
-#define MADERA_IN5R_DIG_VOL_WIDTH 8
/* (0x0338) IN6L_Control */
#define MADERA_IN6L_HPF_MASK 0x8000
#define MADERA_IN6L_HPF_SHIFT 15
-#define MADERA_IN6L_HPF_WIDTH 1
#define MADERA_IN6_DMIC_SUP_MASK 0x1800
#define MADERA_IN6_DMIC_SUP_SHIFT 11
-#define MADERA_IN6_DMIC_SUP_WIDTH 2
/* (0x0339) ADC_Digital_Volume_6L */
#define MADERA_IN6L_MUTE 0x0100
#define MADERA_IN6L_MUTE_MASK 0x0100
#define MADERA_IN6L_MUTE_SHIFT 8
-#define MADERA_IN6L_MUTE_WIDTH 1
#define MADERA_IN6L_DIG_VOL_MASK 0x00FF
#define MADERA_IN6L_DIG_VOL_SHIFT 0
-#define MADERA_IN6L_DIG_VOL_WIDTH 8
/* (0x033A) DMIC6L_Control */
#define MADERA_IN6_OSR_MASK 0x0700
#define MADERA_IN6_OSR_SHIFT 8
-#define MADERA_IN6_OSR_WIDTH 3
/* (0x033C) IN6R_Control */
#define MADERA_IN6R_HPF_MASK 0x8000
#define MADERA_IN6R_HPF_SHIFT 15
-#define MADERA_IN6R_HPF_WIDTH 1
/* (0x033D) ADC_Digital_Volume_6R */
#define MADERA_IN6R_MUTE 0x0100
#define MADERA_IN6R_MUTE_MASK 0x0100
#define MADERA_IN6R_MUTE_SHIFT 8
-#define MADERA_IN6R_MUTE_WIDTH 1
#define MADERA_IN6R_DIG_VOL_MASK 0x00FF
#define MADERA_IN6R_DIG_VOL_SHIFT 0
-#define MADERA_IN6R_DIG_VOL_WIDTH 8
/* (0x033E) DMIC6R_Control */
#define MADERA_IN6_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN6_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN6_DMICCLK_SRC_WIDTH 2
/* (0x0400) Output_Enables_1 */
#define MADERA_EP_SEL 0x8000
#define MADERA_EP_SEL_MASK 0x8000
#define MADERA_EP_SEL_SHIFT 15
-#define MADERA_EP_SEL_WIDTH 1
#define MADERA_OUT6L_ENA 0x0800
#define MADERA_OUT6L_ENA_MASK 0x0800
#define MADERA_OUT6L_ENA_SHIFT 11
-#define MADERA_OUT6L_ENA_WIDTH 1
#define MADERA_OUT6R_ENA 0x0400
#define MADERA_OUT6R_ENA_MASK 0x0400
#define MADERA_OUT6R_ENA_SHIFT 10
-#define MADERA_OUT6R_ENA_WIDTH 1
#define MADERA_OUT5L_ENA 0x0200
#define MADERA_OUT5L_ENA_MASK 0x0200
#define MADERA_OUT5L_ENA_SHIFT 9
-#define MADERA_OUT5L_ENA_WIDTH 1
#define MADERA_OUT5R_ENA 0x0100
#define MADERA_OUT5R_ENA_MASK 0x0100
#define MADERA_OUT5R_ENA_SHIFT 8
-#define MADERA_OUT5R_ENA_WIDTH 1
#define MADERA_OUT4L_ENA 0x0080
#define MADERA_OUT4L_ENA_MASK 0x0080
#define MADERA_OUT4L_ENA_SHIFT 7
-#define MADERA_OUT4L_ENA_WIDTH 1
#define MADERA_OUT4R_ENA 0x0040
#define MADERA_OUT4R_ENA_MASK 0x0040
#define MADERA_OUT4R_ENA_SHIFT 6
-#define MADERA_OUT4R_ENA_WIDTH 1
#define MADERA_OUT3L_ENA 0x0020
#define MADERA_OUT3L_ENA_MASK 0x0020
#define MADERA_OUT3L_ENA_SHIFT 5
-#define MADERA_OUT3L_ENA_WIDTH 1
#define MADERA_OUT3R_ENA 0x0010
#define MADERA_OUT3R_ENA_MASK 0x0010
#define MADERA_OUT3R_ENA_SHIFT 4
-#define MADERA_OUT3R_ENA_WIDTH 1
#define MADERA_OUT2L_ENA 0x0008
#define MADERA_OUT2L_ENA_MASK 0x0008
#define MADERA_OUT2L_ENA_SHIFT 3
-#define MADERA_OUT2L_ENA_WIDTH 1
#define MADERA_OUT2R_ENA 0x0004
#define MADERA_OUT2R_ENA_MASK 0x0004
#define MADERA_OUT2R_ENA_SHIFT 2
-#define MADERA_OUT2R_ENA_WIDTH 1
#define MADERA_OUT1L_ENA 0x0002
#define MADERA_OUT1L_ENA_MASK 0x0002
#define MADERA_OUT1L_ENA_SHIFT 1
-#define MADERA_OUT1L_ENA_WIDTH 1
#define MADERA_OUT1R_ENA 0x0001
#define MADERA_OUT1R_ENA_MASK 0x0001
#define MADERA_OUT1R_ENA_SHIFT 0
-#define MADERA_OUT1R_ENA_WIDTH 1
/* (0x0408) Output_Rate_1 */
#define MADERA_CP_DAC_MODE_MASK 0x0040
#define MADERA_CP_DAC_MODE_SHIFT 6
-#define MADERA_CP_DAC_MODE_WIDTH 1
#define MADERA_OUT_EXT_CLK_DIV_MASK 0x0030
#define MADERA_OUT_EXT_CLK_DIV_SHIFT 4
-#define MADERA_OUT_EXT_CLK_DIV_WIDTH 2
#define MADERA_OUT_CLK_SRC_MASK 0x0007
#define MADERA_OUT_CLK_SRC_SHIFT 0
-#define MADERA_OUT_CLK_SRC_WIDTH 3
/* (0x0409) Output_Volume_Ramp */
#define MADERA_OUT_VD_RAMP_MASK 0x0070
#define MADERA_OUT_VD_RAMP_SHIFT 4
-#define MADERA_OUT_VD_RAMP_WIDTH 3
#define MADERA_OUT_VI_RAMP_MASK 0x0007
#define MADERA_OUT_VI_RAMP_SHIFT 0
-#define MADERA_OUT_VI_RAMP_WIDTH 3
/* (0x0410) Output_Path_Config_1L */
#define MADERA_OUT1_MONO 0x1000
#define MADERA_OUT1_MONO_MASK 0x1000
#define MADERA_OUT1_MONO_SHIFT 12
-#define MADERA_OUT1_MONO_WIDTH 1
#define MADERA_OUT1L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT1L_ANC_SRC_SHIFT 10
-#define MADERA_OUT1L_ANC_SRC_WIDTH 2
/* (0x0411) DAC_Digital_Volume_1L */
#define MADERA_OUT1L_VU 0x0200
#define MADERA_OUT1L_VU_MASK 0x0200
#define MADERA_OUT1L_VU_SHIFT 9
-#define MADERA_OUT1L_VU_WIDTH 1
#define MADERA_OUT1L_MUTE 0x0100
#define MADERA_OUT1L_MUTE_MASK 0x0100
#define MADERA_OUT1L_MUTE_SHIFT 8
-#define MADERA_OUT1L_MUTE_WIDTH 1
#define MADERA_OUT1L_VOL_MASK 0x00FF
#define MADERA_OUT1L_VOL_SHIFT 0
-#define MADERA_OUT1L_VOL_WIDTH 8
/* (0x0412) Output_Path_Config_1 */
#define MADERA_HP1_GND_SEL_MASK 0x0007
#define MADERA_HP1_GND_SEL_SHIFT 0
-#define MADERA_HP1_GND_SEL_WIDTH 3
/* (0x0414) Output_Path_Config_1R */
#define MADERA_OUT1R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT1R_ANC_SRC_SHIFT 10
-#define MADERA_OUT1R_ANC_SRC_WIDTH 2
/* (0x0415) DAC_Digital_Volume_1R */
#define MADERA_OUT1R_MUTE 0x0100
#define MADERA_OUT1R_MUTE_MASK 0x0100
#define MADERA_OUT1R_MUTE_SHIFT 8
-#define MADERA_OUT1R_MUTE_WIDTH 1
#define MADERA_OUT1R_VOL_MASK 0x00FF
#define MADERA_OUT1R_VOL_SHIFT 0
-#define MADERA_OUT1R_VOL_WIDTH 8
/* (0x0418) Output_Path_Config_2L */
#define MADERA_OUT2L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT2L_ANC_SRC_SHIFT 10
-#define MADERA_OUT2L_ANC_SRC_WIDTH 2
/* (0x0419) DAC_Digital_Volume_2L */
#define MADERA_OUT2L_MUTE 0x0100
#define MADERA_OUT2L_MUTE_MASK 0x0100
#define MADERA_OUT2L_MUTE_SHIFT 8
-#define MADERA_OUT2L_MUTE_WIDTH 1
#define MADERA_OUT2L_VOL_MASK 0x00FF
#define MADERA_OUT2L_VOL_SHIFT 0
-#define MADERA_OUT2L_VOL_WIDTH 8
/* (0x041A) Output_Path_Config_2 */
#define MADERA_HP2_GND_SEL_MASK 0x0007
#define MADERA_HP2_GND_SEL_SHIFT 0
-#define MADERA_HP2_GND_SEL_WIDTH 3
/* (0x041C) Output_Path_Config_2R */
#define MADERA_OUT2R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT2R_ANC_SRC_SHIFT 10
-#define MADERA_OUT2R_ANC_SRC_WIDTH 2
/* (0x041D) DAC_Digital_Volume_2R */
#define MADERA_OUT2R_MUTE 0x0100
#define MADERA_OUT2R_MUTE_MASK 0x0100
#define MADERA_OUT2R_MUTE_SHIFT 8
-#define MADERA_OUT2R_MUTE_WIDTH 1
#define MADERA_OUT2R_VOL_MASK 0x00FF
#define MADERA_OUT2R_VOL_SHIFT 0
-#define MADERA_OUT2R_VOL_WIDTH 8
/* (0x0420) Output_Path_Config_3L */
#define MADERA_OUT3L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT3L_ANC_SRC_SHIFT 10
-#define MADERA_OUT3L_ANC_SRC_WIDTH 2
/* (0x0421) DAC_Digital_Volume_3L */
#define MADERA_OUT3L_MUTE 0x0100
#define MADERA_OUT3L_MUTE_MASK 0x0100
#define MADERA_OUT3L_MUTE_SHIFT 8
-#define MADERA_OUT3L_MUTE_WIDTH 1
#define MADERA_OUT3L_VOL_MASK 0x00FF
#define MADERA_OUT3L_VOL_SHIFT 0
-#define MADERA_OUT3L_VOL_WIDTH 8
/* (0x0424) Output_Path_Config_3R */
#define MADERA_OUT3R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT3R_ANC_SRC_SHIFT 10
-#define MADERA_OUT3R_ANC_SRC_WIDTH 2
/* (0x0425) DAC_Digital_Volume_3R */
#define MADERA_OUT3R_MUTE 0x0100
#define MADERA_OUT3R_MUTE_MASK 0x0100
#define MADERA_OUT3R_MUTE_SHIFT 8
-#define MADERA_OUT3R_MUTE_WIDTH 1
#define MADERA_OUT3R_VOL_MASK 0x00FF
#define MADERA_OUT3R_VOL_SHIFT 0
-#define MADERA_OUT3R_VOL_WIDTH 8
/* (0x0428) Output_Path_Config_4L */
#define MADERA_OUT4L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT4L_ANC_SRC_SHIFT 10
-#define MADERA_OUT4L_ANC_SRC_WIDTH 2
/* (0x0429) DAC_Digital_Volume_4L */
#define MADERA_OUT4L_MUTE 0x0100
#define MADERA_OUT4L_MUTE_MASK 0x0100
#define MADERA_OUT4L_MUTE_SHIFT 8
-#define MADERA_OUT4L_MUTE_WIDTH 1
#define MADERA_OUT4L_VOL_MASK 0x00FF
#define MADERA_OUT4L_VOL_SHIFT 0
-#define MADERA_OUT4L_VOL_WIDTH 8
/* (0x042C) Output_Path_Config_4R */
#define MADERA_OUT4R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT4R_ANC_SRC_SHIFT 10
-#define MADERA_OUT4R_ANC_SRC_WIDTH 2
/* (0x042D) DAC_Digital_Volume_4R */
#define MADERA_OUT4R_MUTE 0x0100
#define MADERA_OUT4R_MUTE_MASK 0x0100
#define MADERA_OUT4R_MUTE_SHIFT 8
-#define MADERA_OUT4R_MUTE_WIDTH 1
#define MADERA_OUT4R_VOL_MASK 0x00FF
#define MADERA_OUT4R_VOL_SHIFT 0
-#define MADERA_OUT4R_VOL_WIDTH 8
/* (0x0430) Output_Path_Config_5L */
#define MADERA_OUT5_OSR 0x2000
#define MADERA_OUT5_OSR_MASK 0x2000
#define MADERA_OUT5_OSR_SHIFT 13
-#define MADERA_OUT5_OSR_WIDTH 1
#define MADERA_OUT5L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT5L_ANC_SRC_SHIFT 10
-#define MADERA_OUT5L_ANC_SRC_WIDTH 2
/* (0x0431) DAC_Digital_Volume_5L */
#define MADERA_OUT5L_MUTE 0x0100
#define MADERA_OUT5L_MUTE_MASK 0x0100
#define MADERA_OUT5L_MUTE_SHIFT 8
-#define MADERA_OUT5L_MUTE_WIDTH 1
#define MADERA_OUT5L_VOL_MASK 0x00FF
#define MADERA_OUT5L_VOL_SHIFT 0
-#define MADERA_OUT5L_VOL_WIDTH 8
/* (0x0434) Output_Path_Config_5R */
#define MADERA_OUT5R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT5R_ANC_SRC_SHIFT 10
-#define MADERA_OUT5R_ANC_SRC_WIDTH 2
/* (0x0435) DAC_Digital_Volume_5R */
#define MADERA_OUT5R_MUTE 0x0100
#define MADERA_OUT5R_MUTE_MASK 0x0100
#define MADERA_OUT5R_MUTE_SHIFT 8
-#define MADERA_OUT5R_MUTE_WIDTH 1
#define MADERA_OUT5R_VOL_MASK 0x00FF
#define MADERA_OUT5R_VOL_SHIFT 0
-#define MADERA_OUT5R_VOL_WIDTH 8
/* (0x0438) Output_Path_Config_6L */
#define MADERA_OUT6_OSR 0x2000
#define MADERA_OUT6_OSR_MASK 0x2000
#define MADERA_OUT6_OSR_SHIFT 13
-#define MADERA_OUT6_OSR_WIDTH 1
#define MADERA_OUT6L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT6L_ANC_SRC_SHIFT 10
-#define MADERA_OUT6L_ANC_SRC_WIDTH 2
/* (0x0439) DAC_Digital_Volume_6L */
#define MADERA_OUT6L_MUTE 0x0100
#define MADERA_OUT6L_MUTE_MASK 0x0100
#define MADERA_OUT6L_MUTE_SHIFT 8
-#define MADERA_OUT6L_MUTE_WIDTH 1
#define MADERA_OUT6L_VOL_MASK 0x00FF
#define MADERA_OUT6L_VOL_SHIFT 0
-#define MADERA_OUT6L_VOL_WIDTH 8
/* (0x043C) Output_Path_Config_6R */
#define MADERA_OUT6R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT6R_ANC_SRC_SHIFT 10
-#define MADERA_OUT6R_ANC_SRC_WIDTH 2
/* (0x043D) DAC_Digital_Volume_6R */
#define MADERA_OUT6R_MUTE 0x0100
#define MADERA_OUT6R_MUTE_MASK 0x0100
#define MADERA_OUT6R_MUTE_SHIFT 8
-#define MADERA_OUT6R_MUTE_WIDTH 1
#define MADERA_OUT6R_VOL_MASK 0x00FF
#define MADERA_OUT6R_VOL_SHIFT 0
-#define MADERA_OUT6R_VOL_WIDTH 8
/* (0x0450) - DAC AEC Control 1 */
#define MADERA_AEC1_LOOPBACK_SRC_MASK 0x003C
#define MADERA_AEC1_LOOPBACK_SRC_SHIFT 2
-#define MADERA_AEC1_LOOPBACK_SRC_WIDTH 4
#define MADERA_AEC1_ENA_STS 0x0002
#define MADERA_AEC1_ENA_STS_MASK 0x0002
#define MADERA_AEC1_ENA_STS_SHIFT 1
-#define MADERA_AEC1_ENA_STS_WIDTH 1
#define MADERA_AEC1_LOOPBACK_ENA 0x0001
#define MADERA_AEC1_LOOPBACK_ENA_MASK 0x0001
#define MADERA_AEC1_LOOPBACK_ENA_SHIFT 0
-#define MADERA_AEC1_LOOPBACK_ENA_WIDTH 1
/* (0x0451) DAC_AEC_Control_2 */
#define MADERA_AEC2_LOOPBACK_SRC_MASK 0x003C
#define MADERA_AEC2_LOOPBACK_SRC_SHIFT 2
-#define MADERA_AEC2_LOOPBACK_SRC_WIDTH 4
#define MADERA_AEC2_ENA_STS 0x0002
#define MADERA_AEC2_ENA_STS_MASK 0x0002
#define MADERA_AEC2_ENA_STS_SHIFT 1
-#define MADERA_AEC2_ENA_STS_WIDTH 1
#define MADERA_AEC2_LOOPBACK_ENA 0x0001
#define MADERA_AEC2_LOOPBACK_ENA_MASK 0x0001
#define MADERA_AEC2_LOOPBACK_ENA_SHIFT 0
-#define MADERA_AEC2_LOOPBACK_ENA_WIDTH 1
/* (0x0458) Noise_Gate_Control */
#define MADERA_NGATE_HOLD_MASK 0x0030
#define MADERA_NGATE_HOLD_SHIFT 4
-#define MADERA_NGATE_HOLD_WIDTH 2
#define MADERA_NGATE_THR_MASK 0x000E
#define MADERA_NGATE_THR_SHIFT 1
-#define MADERA_NGATE_THR_WIDTH 3
#define MADERA_NGATE_ENA 0x0001
#define MADERA_NGATE_ENA_MASK 0x0001
#define MADERA_NGATE_ENA_SHIFT 0
-#define MADERA_NGATE_ENA_WIDTH 1
/* (0x0490) PDM_SPK1_CTRL_1 */
#define MADERA_SPK1R_MUTE 0x2000
#define MADERA_SPK1R_MUTE_MASK 0x2000
#define MADERA_SPK1R_MUTE_SHIFT 13
-#define MADERA_SPK1R_MUTE_WIDTH 1
#define MADERA_SPK1L_MUTE 0x1000
#define MADERA_SPK1L_MUTE_MASK 0x1000
#define MADERA_SPK1L_MUTE_SHIFT 12
-#define MADERA_SPK1L_MUTE_WIDTH 1
#define MADERA_SPK1_MUTE_ENDIAN 0x0100
#define MADERA_SPK1_MUTE_ENDIAN_MASK 0x0100
#define MADERA_SPK1_MUTE_ENDIAN_SHIFT 8
-#define MADERA_SPK1_MUTE_ENDIAN_WIDTH 1
#define MADERA_SPK1_MUTE_SEQ1_MASK 0x00FF
#define MADERA_SPK1_MUTE_SEQ1_SHIFT 0
-#define MADERA_SPK1_MUTE_SEQ1_WIDTH 8
/* (0x0491) PDM_SPK1_CTRL_2 */
#define MADERA_SPK1_FMT 0x0001
#define MADERA_SPK1_FMT_MASK 0x0001
#define MADERA_SPK1_FMT_SHIFT 0
-#define MADERA_SPK1_FMT_WIDTH 1
/* (0x0492) PDM_SPK2_CTRL_1 */
#define MADERA_SPK2R_MUTE 0x2000
#define MADERA_SPK2R_MUTE_MASK 0x2000
#define MADERA_SPK2R_MUTE_SHIFT 13
-#define MADERA_SPK2R_MUTE_WIDTH 1
#define MADERA_SPK2L_MUTE 0x1000
#define MADERA_SPK2L_MUTE_MASK 0x1000
#define MADERA_SPK2L_MUTE_SHIFT 12
-#define MADERA_SPK2L_MUTE_WIDTH 1
/* (0x04A0) - HP1 Short Circuit Ctrl */
#define MADERA_HP1_SC_ENA 0x1000
#define MADERA_HP1_SC_ENA_MASK 0x1000
#define MADERA_HP1_SC_ENA_SHIFT 12
-#define MADERA_HP1_SC_ENA_WIDTH 1
/* (0x04A1) - HP2 Short Circuit Ctrl */
#define MADERA_HP2_SC_ENA 0x1000
#define MADERA_HP2_SC_ENA_MASK 0x1000
#define MADERA_HP2_SC_ENA_SHIFT 12
-#define MADERA_HP2_SC_ENA_WIDTH 1
/* (0x04A2) - HP3 Short Circuit Ctrl */
#define MADERA_HP3_SC_ENA 0x1000
#define MADERA_HP3_SC_ENA_MASK 0x1000
#define MADERA_HP3_SC_ENA_SHIFT 12
-#define MADERA_HP3_SC_ENA_WIDTH 1
/* (0x04A8) - HP_Test_Ctrl_5 */
#define MADERA_HP1L_ONEFLT 0x0100
#define MADERA_HP1L_ONEFLT_MASK 0x0100
#define MADERA_HP1L_ONEFLT_SHIFT 8
-#define MADERA_HP1L_ONEFLT_WIDTH 1
/* (0x04A9) - HP_Test_Ctrl_6 */
#define MADERA_HP1R_ONEFLT 0x0100
#define MADERA_HP1R_ONEFLT_MASK 0x0100
#define MADERA_HP1R_ONEFLT_SHIFT 8
-#define MADERA_HP1R_ONEFLT_WIDTH 1
/* (0x0500) AIF1_BCLK_Ctrl */
#define MADERA_AIF1_BCLK_INV 0x0080
#define MADERA_AIF1_BCLK_INV_MASK 0x0080
#define MADERA_AIF1_BCLK_INV_SHIFT 7
-#define MADERA_AIF1_BCLK_INV_WIDTH 1
#define MADERA_AIF1_BCLK_MSTR 0x0020
#define MADERA_AIF1_BCLK_MSTR_MASK 0x0020
#define MADERA_AIF1_BCLK_MSTR_SHIFT 5
-#define MADERA_AIF1_BCLK_MSTR_WIDTH 1
#define MADERA_AIF1_BCLK_FREQ_MASK 0x001F
#define MADERA_AIF1_BCLK_FREQ_SHIFT 0
-#define MADERA_AIF1_BCLK_FREQ_WIDTH 5
/* (0x0501) AIF1_Tx_Pin_Ctrl */
#define MADERA_AIF1TX_LRCLK_SRC 0x0008
#define MADERA_AIF1TX_LRCLK_SRC_MASK 0x0008
#define MADERA_AIF1TX_LRCLK_SRC_SHIFT 3
-#define MADERA_AIF1TX_LRCLK_SRC_WIDTH 1
#define MADERA_AIF1TX_LRCLK_INV 0x0004
#define MADERA_AIF1TX_LRCLK_INV_MASK 0x0004
#define MADERA_AIF1TX_LRCLK_INV_SHIFT 2
-#define MADERA_AIF1TX_LRCLK_INV_WIDTH 1
#define MADERA_AIF1TX_LRCLK_MSTR 0x0001
#define MADERA_AIF1TX_LRCLK_MSTR_MASK 0x0001
#define MADERA_AIF1TX_LRCLK_MSTR_SHIFT 0
-#define MADERA_AIF1TX_LRCLK_MSTR_WIDTH 1
/* (0x0502) AIF1_Rx_Pin_Ctrl */
#define MADERA_AIF1RX_LRCLK_INV 0x0004
#define MADERA_AIF1RX_LRCLK_INV_MASK 0x0004
#define MADERA_AIF1RX_LRCLK_INV_SHIFT 2
-#define MADERA_AIF1RX_LRCLK_INV_WIDTH 1
#define MADERA_AIF1RX_LRCLK_FRC 0x0002
#define MADERA_AIF1RX_LRCLK_FRC_MASK 0x0002
#define MADERA_AIF1RX_LRCLK_FRC_SHIFT 1
-#define MADERA_AIF1RX_LRCLK_FRC_WIDTH 1
#define MADERA_AIF1RX_LRCLK_MSTR 0x0001
#define MADERA_AIF1RX_LRCLK_MSTR_MASK 0x0001
#define MADERA_AIF1RX_LRCLK_MSTR_SHIFT 0
-#define MADERA_AIF1RX_LRCLK_MSTR_WIDTH 1
/* (0x0503) AIF1_Rate_Ctrl */
#define MADERA_AIF1_RATE_MASK 0xF800
#define MADERA_AIF1_RATE_SHIFT 11
-#define MADERA_AIF1_RATE_WIDTH 5
#define MADERA_AIF1_TRI 0x0040
#define MADERA_AIF1_TRI_MASK 0x0040
#define MADERA_AIF1_TRI_SHIFT 6
-#define MADERA_AIF1_TRI_WIDTH 1
/* (0x0504) AIF1_Format */
#define MADERA_AIF1_FMT_MASK 0x0007
#define MADERA_AIF1_FMT_SHIFT 0
-#define MADERA_AIF1_FMT_WIDTH 3
/* (0x0506) AIF1_Rx_BCLK_Rate */
#define MADERA_AIF1RX_BCPF_MASK 0x1FFF
#define MADERA_AIF1RX_BCPF_SHIFT 0
-#define MADERA_AIF1RX_BCPF_WIDTH 13
/* (0x0507) AIF1_Frame_Ctrl_1 */
#define MADERA_AIF1TX_WL_MASK 0x3F00
#define MADERA_AIF1TX_WL_SHIFT 8
-#define MADERA_AIF1TX_WL_WIDTH 6
#define MADERA_AIF1TX_SLOT_LEN_MASK 0x00FF
#define MADERA_AIF1TX_SLOT_LEN_SHIFT 0
-#define MADERA_AIF1TX_SLOT_LEN_WIDTH 8
/* (0x0508) AIF1_Frame_Ctrl_2 */
#define MADERA_AIF1RX_WL_MASK 0x3F00
#define MADERA_AIF1RX_WL_SHIFT 8
-#define MADERA_AIF1RX_WL_WIDTH 6
#define MADERA_AIF1RX_SLOT_LEN_MASK 0x00FF
#define MADERA_AIF1RX_SLOT_LEN_SHIFT 0
-#define MADERA_AIF1RX_SLOT_LEN_WIDTH 8
/* (0x0509) AIF1_Frame_Ctrl_3 */
#define MADERA_AIF1TX1_SLOT_MASK 0x003F
#define MADERA_AIF1TX1_SLOT_SHIFT 0
-#define MADERA_AIF1TX1_SLOT_WIDTH 6
/* (0x0519) AIF1_Tx_Enables */
#define MADERA_AIF1TX8_ENA 0x0080
#define MADERA_AIF1TX8_ENA_MASK 0x0080
#define MADERA_AIF1TX8_ENA_SHIFT 7
-#define MADERA_AIF1TX8_ENA_WIDTH 1
#define MADERA_AIF1TX7_ENA 0x0040
#define MADERA_AIF1TX7_ENA_MASK 0x0040
#define MADERA_AIF1TX7_ENA_SHIFT 6
-#define MADERA_AIF1TX7_ENA_WIDTH 1
#define MADERA_AIF1TX6_ENA 0x0020
#define MADERA_AIF1TX6_ENA_MASK 0x0020
#define MADERA_AIF1TX6_ENA_SHIFT 5
-#define MADERA_AIF1TX6_ENA_WIDTH 1
#define MADERA_AIF1TX5_ENA 0x0010
#define MADERA_AIF1TX5_ENA_MASK 0x0010
#define MADERA_AIF1TX5_ENA_SHIFT 4
-#define MADERA_AIF1TX5_ENA_WIDTH 1
#define MADERA_AIF1TX4_ENA 0x0008
#define MADERA_AIF1TX4_ENA_MASK 0x0008
#define MADERA_AIF1TX4_ENA_SHIFT 3
-#define MADERA_AIF1TX4_ENA_WIDTH 1
#define MADERA_AIF1TX3_ENA 0x0004
#define MADERA_AIF1TX3_ENA_MASK 0x0004
#define MADERA_AIF1TX3_ENA_SHIFT 2
-#define MADERA_AIF1TX3_ENA_WIDTH 1
#define MADERA_AIF1TX2_ENA 0x0002
#define MADERA_AIF1TX2_ENA_MASK 0x0002
#define MADERA_AIF1TX2_ENA_SHIFT 1
-#define MADERA_AIF1TX2_ENA_WIDTH 1
#define MADERA_AIF1TX1_ENA 0x0001
#define MADERA_AIF1TX1_ENA_MASK 0x0001
#define MADERA_AIF1TX1_ENA_SHIFT 0
-#define MADERA_AIF1TX1_ENA_WIDTH 1
/* (0x051A) AIF1_Rx_Enables */
#define MADERA_AIF1RX8_ENA 0x0080
#define MADERA_AIF1RX8_ENA_MASK 0x0080
#define MADERA_AIF1RX8_ENA_SHIFT 7
-#define MADERA_AIF1RX8_ENA_WIDTH 1
#define MADERA_AIF1RX7_ENA 0x0040
#define MADERA_AIF1RX7_ENA_MASK 0x0040
#define MADERA_AIF1RX7_ENA_SHIFT 6
-#define MADERA_AIF1RX7_ENA_WIDTH 1
#define MADERA_AIF1RX6_ENA 0x0020
#define MADERA_AIF1RX6_ENA_MASK 0x0020
#define MADERA_AIF1RX6_ENA_SHIFT 5
-#define MADERA_AIF1RX6_ENA_WIDTH 1
#define MADERA_AIF1RX5_ENA 0x0010
#define MADERA_AIF1RX5_ENA_MASK 0x0010
#define MADERA_AIF1RX5_ENA_SHIFT 4
-#define MADERA_AIF1RX5_ENA_WIDTH 1
#define MADERA_AIF1RX4_ENA 0x0008
#define MADERA_AIF1RX4_ENA_MASK 0x0008
#define MADERA_AIF1RX4_ENA_SHIFT 3
-#define MADERA_AIF1RX4_ENA_WIDTH 1
#define MADERA_AIF1RX3_ENA 0x0004
#define MADERA_AIF1RX3_ENA_MASK 0x0004
#define MADERA_AIF1RX3_ENA_SHIFT 2
-#define MADERA_AIF1RX3_ENA_WIDTH 1
#define MADERA_AIF1RX2_ENA 0x0002
#define MADERA_AIF1RX2_ENA_MASK 0x0002
#define MADERA_AIF1RX2_ENA_SHIFT 1
-#define MADERA_AIF1RX2_ENA_WIDTH 1
#define MADERA_AIF1RX1_ENA 0x0001
#define MADERA_AIF1RX1_ENA_MASK 0x0001
#define MADERA_AIF1RX1_ENA_SHIFT 0
-#define MADERA_AIF1RX1_ENA_WIDTH 1
/* (0x0559) AIF2_Tx_Enables */
#define MADERA_AIF2TX8_ENA 0x0080
#define MADERA_AIF2TX8_ENA_MASK 0x0080
#define MADERA_AIF2TX8_ENA_SHIFT 7
-#define MADERA_AIF2TX8_ENA_WIDTH 1
#define MADERA_AIF2TX7_ENA 0x0040
#define MADERA_AIF2TX7_ENA_MASK 0x0040
#define MADERA_AIF2TX7_ENA_SHIFT 6
-#define MADERA_AIF2TX7_ENA_WIDTH 1
#define MADERA_AIF2TX6_ENA 0x0020
#define MADERA_AIF2TX6_ENA_MASK 0x0020
#define MADERA_AIF2TX6_ENA_SHIFT 5
-#define MADERA_AIF2TX6_ENA_WIDTH 1
#define MADERA_AIF2TX5_ENA 0x0010
#define MADERA_AIF2TX5_ENA_MASK 0x0010
#define MADERA_AIF2TX5_ENA_SHIFT 4
-#define MADERA_AIF2TX5_ENA_WIDTH 1
#define MADERA_AIF2TX4_ENA 0x0008
#define MADERA_AIF2TX4_ENA_MASK 0x0008
#define MADERA_AIF2TX4_ENA_SHIFT 3
-#define MADERA_AIF2TX4_ENA_WIDTH 1
#define MADERA_AIF2TX3_ENA 0x0004
#define MADERA_AIF2TX3_ENA_MASK 0x0004
#define MADERA_AIF2TX3_ENA_SHIFT 2
-#define MADERA_AIF2TX3_ENA_WIDTH 1
#define MADERA_AIF2TX2_ENA 0x0002
#define MADERA_AIF2TX2_ENA_MASK 0x0002
#define MADERA_AIF2TX2_ENA_SHIFT 1
-#define MADERA_AIF2TX2_ENA_WIDTH 1
#define MADERA_AIF2TX1_ENA 0x0001
#define MADERA_AIF2TX1_ENA_MASK 0x0001
#define MADERA_AIF2TX1_ENA_SHIFT 0
-#define MADERA_AIF2TX1_ENA_WIDTH 1
/* (0x055A) AIF2_Rx_Enables */
#define MADERA_AIF2RX8_ENA 0x0080
#define MADERA_AIF2RX8_ENA_MASK 0x0080
#define MADERA_AIF2RX8_ENA_SHIFT 7
-#define MADERA_AIF2RX8_ENA_WIDTH 1
#define MADERA_AIF2RX7_ENA 0x0040
#define MADERA_AIF2RX7_ENA_MASK 0x0040
#define MADERA_AIF2RX7_ENA_SHIFT 6
-#define MADERA_AIF2RX7_ENA_WIDTH 1
#define MADERA_AIF2RX6_ENA 0x0020
#define MADERA_AIF2RX6_ENA_MASK 0x0020
#define MADERA_AIF2RX6_ENA_SHIFT 5
-#define MADERA_AIF2RX6_ENA_WIDTH 1
#define MADERA_AIF2RX5_ENA 0x0010
#define MADERA_AIF2RX5_ENA_MASK 0x0010
#define MADERA_AIF2RX5_ENA_SHIFT 4
-#define MADERA_AIF2RX5_ENA_WIDTH 1
#define MADERA_AIF2RX4_ENA 0x0008
#define MADERA_AIF2RX4_ENA_MASK 0x0008
#define MADERA_AIF2RX4_ENA_SHIFT 3
-#define MADERA_AIF2RX4_ENA_WIDTH 1
#define MADERA_AIF2RX3_ENA 0x0004
#define MADERA_AIF2RX3_ENA_MASK 0x0004
#define MADERA_AIF2RX3_ENA_SHIFT 2
-#define MADERA_AIF2RX3_ENA_WIDTH 1
#define MADERA_AIF2RX2_ENA 0x0002
#define MADERA_AIF2RX2_ENA_MASK 0x0002
#define MADERA_AIF2RX2_ENA_SHIFT 1
-#define MADERA_AIF2RX2_ENA_WIDTH 1
#define MADERA_AIF2RX1_ENA 0x0001
#define MADERA_AIF2RX1_ENA_MASK 0x0001
#define MADERA_AIF2RX1_ENA_SHIFT 0
-#define MADERA_AIF2RX1_ENA_WIDTH 1
/* (0x0599) AIF3_Tx_Enables */
#define MADERA_AIF3TX8_ENA 0x0080
#define MADERA_AIF3TX8_ENA_MASK 0x0080
#define MADERA_AIF3TX8_ENA_SHIFT 7
-#define MADERA_AIF3TX8_ENA_WIDTH 1
#define MADERA_AIF3TX7_ENA 0x0040
#define MADERA_AIF3TX7_ENA_MASK 0x0040
#define MADERA_AIF3TX7_ENA_SHIFT 6
-#define MADERA_AIF3TX7_ENA_WIDTH 1
#define MADERA_AIF3TX6_ENA 0x0020
#define MADERA_AIF3TX6_ENA_MASK 0x0020
#define MADERA_AIF3TX6_ENA_SHIFT 5
-#define MADERA_AIF3TX6_ENA_WIDTH 1
#define MADERA_AIF3TX5_ENA 0x0010
#define MADERA_AIF3TX5_ENA_MASK 0x0010
#define MADERA_AIF3TX5_ENA_SHIFT 4
-#define MADERA_AIF3TX5_ENA_WIDTH 1
#define MADERA_AIF3TX4_ENA 0x0008
#define MADERA_AIF3TX4_ENA_MASK 0x0008
#define MADERA_AIF3TX4_ENA_SHIFT 3
-#define MADERA_AIF3TX4_ENA_WIDTH 1
#define MADERA_AIF3TX3_ENA 0x0004
#define MADERA_AIF3TX3_ENA_MASK 0x0004
#define MADERA_AIF3TX3_ENA_SHIFT 2
-#define MADERA_AIF3TX3_ENA_WIDTH 1
#define MADERA_AIF3TX2_ENA 0x0002
#define MADERA_AIF3TX2_ENA_MASK 0x0002
#define MADERA_AIF3TX2_ENA_SHIFT 1
-#define MADERA_AIF3TX2_ENA_WIDTH 1
#define MADERA_AIF3TX1_ENA 0x0001
#define MADERA_AIF3TX1_ENA_MASK 0x0001
#define MADERA_AIF3TX1_ENA_SHIFT 0
-#define MADERA_AIF3TX1_ENA_WIDTH 1
/* (0x059A) AIF3_Rx_Enables */
#define MADERA_AIF3RX8_ENA 0x0080
#define MADERA_AIF3RX8_ENA_MASK 0x0080
#define MADERA_AIF3RX8_ENA_SHIFT 7
-#define MADERA_AIF3RX8_ENA_WIDTH 1
#define MADERA_AIF3RX7_ENA 0x0040
#define MADERA_AIF3RX7_ENA_MASK 0x0040
#define MADERA_AIF3RX7_ENA_SHIFT 6
-#define MADERA_AIF3RX7_ENA_WIDTH 1
#define MADERA_AIF3RX6_ENA 0x0020
#define MADERA_AIF3RX6_ENA_MASK 0x0020
#define MADERA_AIF3RX6_ENA_SHIFT 5
-#define MADERA_AIF3RX6_ENA_WIDTH 1
#define MADERA_AIF3RX5_ENA 0x0010
#define MADERA_AIF3RX5_ENA_MASK 0x0010
#define MADERA_AIF3RX5_ENA_SHIFT 4
-#define MADERA_AIF3RX5_ENA_WIDTH 1
#define MADERA_AIF3RX4_ENA 0x0008
#define MADERA_AIF3RX4_ENA_MASK 0x0008
#define MADERA_AIF3RX4_ENA_SHIFT 3
-#define MADERA_AIF3RX4_ENA_WIDTH 1
#define MADERA_AIF3RX3_ENA 0x0004
#define MADERA_AIF3RX3_ENA_MASK 0x0004
#define MADERA_AIF3RX3_ENA_SHIFT 2
-#define MADERA_AIF3RX3_ENA_WIDTH 1
#define MADERA_AIF3RX2_ENA 0x0002
#define MADERA_AIF3RX2_ENA_MASK 0x0002
#define MADERA_AIF3RX2_ENA_SHIFT 1
-#define MADERA_AIF3RX2_ENA_WIDTH 1
#define MADERA_AIF3RX1_ENA 0x0001
#define MADERA_AIF3RX1_ENA_MASK 0x0001
#define MADERA_AIF3RX1_ENA_SHIFT 0
-#define MADERA_AIF3RX1_ENA_WIDTH 1
/* (0x05B9) AIF4_Tx_Enables */
#define MADERA_AIF4TX2_ENA 0x0002
#define MADERA_AIF4TX2_ENA_MASK 0x0002
#define MADERA_AIF4TX2_ENA_SHIFT 1
-#define MADERA_AIF4TX2_ENA_WIDTH 1
#define MADERA_AIF4TX1_ENA 0x0001
#define MADERA_AIF4TX1_ENA_MASK 0x0001
#define MADERA_AIF4TX1_ENA_SHIFT 0
-#define MADERA_AIF4TX1_ENA_WIDTH 1
/* (0x05BA) AIF4_Rx_Enables */
#define MADERA_AIF4RX2_ENA 0x0002
#define MADERA_AIF4RX2_ENA_MASK 0x0002
#define MADERA_AIF4RX2_ENA_SHIFT 1
-#define MADERA_AIF4RX2_ENA_WIDTH 1
#define MADERA_AIF4RX1_ENA 0x0001
#define MADERA_AIF4RX1_ENA_MASK 0x0001
#define MADERA_AIF4RX1_ENA_SHIFT 0
-#define MADERA_AIF4RX1_ENA_WIDTH 1
/* (0x05C2) SPD1_TX_Control */
#define MADERA_SPD1_VAL2 0x2000
#define MADERA_SPD1_VAL2_MASK 0x2000
#define MADERA_SPD1_VAL2_SHIFT 13
-#define MADERA_SPD1_VAL2_WIDTH 1
#define MADERA_SPD1_VAL1 0x1000
#define MADERA_SPD1_VAL1_MASK 0x1000
#define MADERA_SPD1_VAL1_SHIFT 12
-#define MADERA_SPD1_VAL1_WIDTH 1
#define MADERA_SPD1_RATE_MASK 0x00F0
#define MADERA_SPD1_RATE_SHIFT 4
-#define MADERA_SPD1_RATE_WIDTH 4
#define MADERA_SPD1_ENA 0x0001
#define MADERA_SPD1_ENA_MASK 0x0001
#define MADERA_SPD1_ENA_SHIFT 0
-#define MADERA_SPD1_ENA_WIDTH 1
/* (0x05F5) SLIMbus_RX_Channel_Enable */
#define MADERA_SLIMRX8_ENA 0x0080
#define MADERA_SLIMRX8_ENA_MASK 0x0080
#define MADERA_SLIMRX8_ENA_SHIFT 7
-#define MADERA_SLIMRX8_ENA_WIDTH 1
#define MADERA_SLIMRX7_ENA 0x0040
#define MADERA_SLIMRX7_ENA_MASK 0x0040
#define MADERA_SLIMRX7_ENA_SHIFT 6
-#define MADERA_SLIMRX7_ENA_WIDTH 1
#define MADERA_SLIMRX6_ENA 0x0020
#define MADERA_SLIMRX6_ENA_MASK 0x0020
#define MADERA_SLIMRX6_ENA_SHIFT 5
-#define MADERA_SLIMRX6_ENA_WIDTH 1
#define MADERA_SLIMRX5_ENA 0x0010
#define MADERA_SLIMRX5_ENA_MASK 0x0010
#define MADERA_SLIMRX5_ENA_SHIFT 4
-#define MADERA_SLIMRX5_ENA_WIDTH 1
#define MADERA_SLIMRX4_ENA 0x0008
#define MADERA_SLIMRX4_ENA_MASK 0x0008
#define MADERA_SLIMRX4_ENA_SHIFT 3
-#define MADERA_SLIMRX4_ENA_WIDTH 1
#define MADERA_SLIMRX3_ENA 0x0004
#define MADERA_SLIMRX3_ENA_MASK 0x0004
#define MADERA_SLIMRX3_ENA_SHIFT 2
-#define MADERA_SLIMRX3_ENA_WIDTH 1
#define MADERA_SLIMRX2_ENA 0x0002
#define MADERA_SLIMRX2_ENA_MASK 0x0002
#define MADERA_SLIMRX2_ENA_SHIFT 1
-#define MADERA_SLIMRX2_ENA_WIDTH 1
#define MADERA_SLIMRX1_ENA 0x0001
#define MADERA_SLIMRX1_ENA_MASK 0x0001
#define MADERA_SLIMRX1_ENA_SHIFT 0
-#define MADERA_SLIMRX1_ENA_WIDTH 1
/* (0x05F6) SLIMbus_TX_Channel_Enable */
#define MADERA_SLIMTX8_ENA 0x0080
#define MADERA_SLIMTX8_ENA_MASK 0x0080
#define MADERA_SLIMTX8_ENA_SHIFT 7
-#define MADERA_SLIMTX8_ENA_WIDTH 1
#define MADERA_SLIMTX7_ENA 0x0040
#define MADERA_SLIMTX7_ENA_MASK 0x0040
#define MADERA_SLIMTX7_ENA_SHIFT 6
-#define MADERA_SLIMTX7_ENA_WIDTH 1
#define MADERA_SLIMTX6_ENA 0x0020
#define MADERA_SLIMTX6_ENA_MASK 0x0020
#define MADERA_SLIMTX6_ENA_SHIFT 5
-#define MADERA_SLIMTX6_ENA_WIDTH 1
#define MADERA_SLIMTX5_ENA 0x0010
#define MADERA_SLIMTX5_ENA_MASK 0x0010
#define MADERA_SLIMTX5_ENA_SHIFT 4
-#define MADERA_SLIMTX5_ENA_WIDTH 1
#define MADERA_SLIMTX4_ENA 0x0008
#define MADERA_SLIMTX4_ENA_MASK 0x0008
#define MADERA_SLIMTX4_ENA_SHIFT 3
-#define MADERA_SLIMTX4_ENA_WIDTH 1
#define MADERA_SLIMTX3_ENA 0x0004
#define MADERA_SLIMTX3_ENA_MASK 0x0004
#define MADERA_SLIMTX3_ENA_SHIFT 2
-#define MADERA_SLIMTX3_ENA_WIDTH 1
#define MADERA_SLIMTX2_ENA 0x0002
#define MADERA_SLIMTX2_ENA_MASK 0x0002
#define MADERA_SLIMTX2_ENA_SHIFT 1
-#define MADERA_SLIMTX2_ENA_WIDTH 1
#define MADERA_SLIMTX1_ENA 0x0001
#define MADERA_SLIMTX1_ENA_MASK 0x0001
#define MADERA_SLIMTX1_ENA_SHIFT 0
-#define MADERA_SLIMTX1_ENA_WIDTH 1
/* (0x0E10) EQ1_1 */
#define MADERA_EQ1_B1_GAIN_MASK 0xF800
#define MADERA_EQ1_B1_GAIN_SHIFT 11
-#define MADERA_EQ1_B1_GAIN_WIDTH 5
#define MADERA_EQ1_B2_GAIN_MASK 0x07C0
#define MADERA_EQ1_B2_GAIN_SHIFT 6
-#define MADERA_EQ1_B2_GAIN_WIDTH 5
#define MADERA_EQ1_B3_GAIN_MASK 0x003E
#define MADERA_EQ1_B3_GAIN_SHIFT 1
-#define MADERA_EQ1_B3_GAIN_WIDTH 5
#define MADERA_EQ1_ENA 0x0001
#define MADERA_EQ1_ENA_MASK 0x0001
#define MADERA_EQ1_ENA_SHIFT 0
-#define MADERA_EQ1_ENA_WIDTH 1
/* (0x0E11) EQ1_2 */
#define MADERA_EQ1_B4_GAIN_MASK 0xF800
#define MADERA_EQ1_B4_GAIN_SHIFT 11
-#define MADERA_EQ1_B4_GAIN_WIDTH 5
#define MADERA_EQ1_B5_GAIN_MASK 0x07C0
#define MADERA_EQ1_B5_GAIN_SHIFT 6
-#define MADERA_EQ1_B5_GAIN_WIDTH 5
#define MADERA_EQ1_B1_MODE 0x0001
#define MADERA_EQ1_B1_MODE_MASK 0x0001
#define MADERA_EQ1_B1_MODE_SHIFT 0
-#define MADERA_EQ1_B1_MODE_WIDTH 1
/* (0x0E26) EQ2_1 */
#define MADERA_EQ2_B1_GAIN_MASK 0xF800
#define MADERA_EQ2_B1_GAIN_SHIFT 11
-#define MADERA_EQ2_B1_GAIN_WIDTH 5
#define MADERA_EQ2_B2_GAIN_MASK 0x07C0
#define MADERA_EQ2_B2_GAIN_SHIFT 6
-#define MADERA_EQ2_B2_GAIN_WIDTH 5
#define MADERA_EQ2_B3_GAIN_MASK 0x003E
#define MADERA_EQ2_B3_GAIN_SHIFT 1
-#define MADERA_EQ2_B3_GAIN_WIDTH 5
#define MADERA_EQ2_ENA 0x0001
#define MADERA_EQ2_ENA_MASK 0x0001
#define MADERA_EQ2_ENA_SHIFT 0
-#define MADERA_EQ2_ENA_WIDTH 1
/* (0x0E27) EQ2_2 */
#define MADERA_EQ2_B4_GAIN_MASK 0xF800
#define MADERA_EQ2_B4_GAIN_SHIFT 11
-#define MADERA_EQ2_B4_GAIN_WIDTH 5
#define MADERA_EQ2_B5_GAIN_MASK 0x07C0
#define MADERA_EQ2_B5_GAIN_SHIFT 6
-#define MADERA_EQ2_B5_GAIN_WIDTH 5
#define MADERA_EQ2_B1_MODE 0x0001
#define MADERA_EQ2_B1_MODE_MASK 0x0001
#define MADERA_EQ2_B1_MODE_SHIFT 0
-#define MADERA_EQ2_B1_MODE_WIDTH 1
/* (0x0E3C) EQ3_1 */
#define MADERA_EQ3_B1_GAIN_MASK 0xF800
#define MADERA_EQ3_B1_GAIN_SHIFT 11
-#define MADERA_EQ3_B1_GAIN_WIDTH 5
#define MADERA_EQ3_B2_GAIN_MASK 0x07C0
#define MADERA_EQ3_B2_GAIN_SHIFT 6
-#define MADERA_EQ3_B2_GAIN_WIDTH 5
#define MADERA_EQ3_B3_GAIN_MASK 0x003E
#define MADERA_EQ3_B3_GAIN_SHIFT 1
-#define MADERA_EQ3_B3_GAIN_WIDTH 5
#define MADERA_EQ3_ENA 0x0001
#define MADERA_EQ3_ENA_MASK 0x0001
#define MADERA_EQ3_ENA_SHIFT 0
-#define MADERA_EQ3_ENA_WIDTH 1
/* (0x0E3D) EQ3_2 */
#define MADERA_EQ3_B4_GAIN_MASK 0xF800
#define MADERA_EQ3_B4_GAIN_SHIFT 11
-#define MADERA_EQ3_B4_GAIN_WIDTH 5
#define MADERA_EQ3_B5_GAIN_MASK 0x07C0
#define MADERA_EQ3_B5_GAIN_SHIFT 6
-#define MADERA_EQ3_B5_GAIN_WIDTH 5
#define MADERA_EQ3_B1_MODE 0x0001
#define MADERA_EQ3_B1_MODE_MASK 0x0001
#define MADERA_EQ3_B1_MODE_SHIFT 0
-#define MADERA_EQ3_B1_MODE_WIDTH 1
/* (0x0E52) EQ4_1 */
#define MADERA_EQ4_B1_GAIN_MASK 0xF800
#define MADERA_EQ4_B1_GAIN_SHIFT 11
-#define MADERA_EQ4_B1_GAIN_WIDTH 5
#define MADERA_EQ4_B2_GAIN_MASK 0x07C0
#define MADERA_EQ4_B2_GAIN_SHIFT 6
-#define MADERA_EQ4_B2_GAIN_WIDTH 5
#define MADERA_EQ4_B3_GAIN_MASK 0x003E
#define MADERA_EQ4_B3_GAIN_SHIFT 1
-#define MADERA_EQ4_B3_GAIN_WIDTH 5
#define MADERA_EQ4_ENA 0x0001
#define MADERA_EQ4_ENA_MASK 0x0001
#define MADERA_EQ4_ENA_SHIFT 0
-#define MADERA_EQ4_ENA_WIDTH 1
/* (0x0E53) EQ4_2 */
#define MADERA_EQ4_B4_GAIN_MASK 0xF800
#define MADERA_EQ4_B4_GAIN_SHIFT 11
-#define MADERA_EQ4_B4_GAIN_WIDTH 5
#define MADERA_EQ4_B5_GAIN_MASK 0x07C0
#define MADERA_EQ4_B5_GAIN_SHIFT 6
-#define MADERA_EQ4_B5_GAIN_WIDTH 5
#define MADERA_EQ4_B1_MODE 0x0001
#define MADERA_EQ4_B1_MODE_MASK 0x0001
#define MADERA_EQ4_B1_MODE_SHIFT 0
-#define MADERA_EQ4_B1_MODE_WIDTH 1
/* (0x0E80) DRC1_ctrl1 */
#define MADERA_DRC1L_ENA 0x0002
#define MADERA_DRC1L_ENA_MASK 0x0002
#define MADERA_DRC1L_ENA_SHIFT 1
-#define MADERA_DRC1L_ENA_WIDTH 1
#define MADERA_DRC1R_ENA 0x0001
#define MADERA_DRC1R_ENA_MASK 0x0001
#define MADERA_DRC1R_ENA_SHIFT 0
-#define MADERA_DRC1R_ENA_WIDTH 1
/* (0x0E88) DRC2_ctrl1 */
#define MADERA_DRC2L_ENA 0x0002
#define MADERA_DRC2L_ENA_MASK 0x0002
#define MADERA_DRC2L_ENA_SHIFT 1
-#define MADERA_DRC2L_ENA_WIDTH 1
#define MADERA_DRC2R_ENA 0x0001
#define MADERA_DRC2R_ENA_MASK 0x0001
#define MADERA_DRC2R_ENA_SHIFT 0
-#define MADERA_DRC2R_ENA_WIDTH 1
/* (0x0EC0) HPLPF1_1 */
#define MADERA_LHPF1_MODE 0x0002
#define MADERA_LHPF1_MODE_MASK 0x0002
#define MADERA_LHPF1_MODE_SHIFT 1
-#define MADERA_LHPF1_MODE_WIDTH 1
#define MADERA_LHPF1_ENA 0x0001
#define MADERA_LHPF1_ENA_MASK 0x0001
#define MADERA_LHPF1_ENA_SHIFT 0
-#define MADERA_LHPF1_ENA_WIDTH 1
/* (0x0EC1) HPLPF1_2 */
#define MADERA_LHPF1_COEFF_MASK 0xFFFF
#define MADERA_LHPF1_COEFF_SHIFT 0
-#define MADERA_LHPF1_COEFF_WIDTH 16
/* (0x0EC4) HPLPF2_1 */
#define MADERA_LHPF2_MODE 0x0002
#define MADERA_LHPF2_MODE_MASK 0x0002
#define MADERA_LHPF2_MODE_SHIFT 1
-#define MADERA_LHPF2_MODE_WIDTH 1
#define MADERA_LHPF2_ENA 0x0001
#define MADERA_LHPF2_ENA_MASK 0x0001
#define MADERA_LHPF2_ENA_SHIFT 0
-#define MADERA_LHPF2_ENA_WIDTH 1
/* (0x0EC5) HPLPF2_2 */
#define MADERA_LHPF2_COEFF_MASK 0xFFFF
#define MADERA_LHPF2_COEFF_SHIFT 0
-#define MADERA_LHPF2_COEFF_WIDTH 16
/* (0x0EC8) HPLPF3_1 */
#define MADERA_LHPF3_MODE 0x0002
#define MADERA_LHPF3_MODE_MASK 0x0002
#define MADERA_LHPF3_MODE_SHIFT 1
-#define MADERA_LHPF3_MODE_WIDTH 1
#define MADERA_LHPF3_ENA 0x0001
#define MADERA_LHPF3_ENA_MASK 0x0001
#define MADERA_LHPF3_ENA_SHIFT 0
-#define MADERA_LHPF3_ENA_WIDTH 1
/* (0x0EC9) HPLPF3_2 */
#define MADERA_LHPF3_COEFF_MASK 0xFFFF
#define MADERA_LHPF3_COEFF_SHIFT 0
-#define MADERA_LHPF3_COEFF_WIDTH 16
/* (0x0ECC) HPLPF4_1 */
#define MADERA_LHPF4_MODE 0x0002
#define MADERA_LHPF4_MODE_MASK 0x0002
#define MADERA_LHPF4_MODE_SHIFT 1
-#define MADERA_LHPF4_MODE_WIDTH 1
#define MADERA_LHPF4_ENA 0x0001
#define MADERA_LHPF4_ENA_MASK 0x0001
#define MADERA_LHPF4_ENA_SHIFT 0
-#define MADERA_LHPF4_ENA_WIDTH 1
/* (0x0ECD) HPLPF4_2 */
#define MADERA_LHPF4_COEFF_MASK 0xFFFF
#define MADERA_LHPF4_COEFF_SHIFT 0
-#define MADERA_LHPF4_COEFF_WIDTH 16
/* (0x0ED0) ASRC2_ENABLE */
#define MADERA_ASRC2_IN2L_ENA 0x0008
#define MADERA_ASRC2_IN2L_ENA_MASK 0x0008
#define MADERA_ASRC2_IN2L_ENA_SHIFT 3
-#define MADERA_ASRC2_IN2L_ENA_WIDTH 1
#define MADERA_ASRC2_IN2R_ENA 0x0004
#define MADERA_ASRC2_IN2R_ENA_MASK 0x0004
#define MADERA_ASRC2_IN2R_ENA_SHIFT 2
-#define MADERA_ASRC2_IN2R_ENA_WIDTH 1
#define MADERA_ASRC2_IN1L_ENA 0x0002
#define MADERA_ASRC2_IN1L_ENA_MASK 0x0002
#define MADERA_ASRC2_IN1L_ENA_SHIFT 1
-#define MADERA_ASRC2_IN1L_ENA_WIDTH 1
#define MADERA_ASRC2_IN1R_ENA 0x0001
#define MADERA_ASRC2_IN1R_ENA_MASK 0x0001
#define MADERA_ASRC2_IN1R_ENA_SHIFT 0
-#define MADERA_ASRC2_IN1R_ENA_WIDTH 1
/* (0x0ED2) ASRC2_RATE1 */
#define MADERA_ASRC2_RATE1_MASK 0xF800
#define MADERA_ASRC2_RATE1_SHIFT 11
-#define MADERA_ASRC2_RATE1_WIDTH 5
/* (0x0ED3) ASRC2_RATE2 */
#define MADERA_ASRC2_RATE2_MASK 0xF800
#define MADERA_ASRC2_RATE2_SHIFT 11
-#define MADERA_ASRC2_RATE2_WIDTH 5
/* (0x0EE0) ASRC1_ENABLE */
#define MADERA_ASRC1_IN2L_ENA 0x0008
#define MADERA_ASRC1_IN2L_ENA_MASK 0x0008
#define MADERA_ASRC1_IN2L_ENA_SHIFT 3
-#define MADERA_ASRC1_IN2L_ENA_WIDTH 1
#define MADERA_ASRC1_IN2R_ENA 0x0004
#define MADERA_ASRC1_IN2R_ENA_MASK 0x0004
#define MADERA_ASRC1_IN2R_ENA_SHIFT 2
-#define MADERA_ASRC1_IN2R_ENA_WIDTH 1
#define MADERA_ASRC1_IN1L_ENA 0x0002
#define MADERA_ASRC1_IN1L_ENA_MASK 0x0002
#define MADERA_ASRC1_IN1L_ENA_SHIFT 1
-#define MADERA_ASRC1_IN1L_ENA_WIDTH 1
#define MADERA_ASRC1_IN1R_ENA 0x0001
#define MADERA_ASRC1_IN1R_ENA_MASK 0x0001
#define MADERA_ASRC1_IN1R_ENA_SHIFT 0
-#define MADERA_ASRC1_IN1R_ENA_WIDTH 1
/* (0x0EE2) ASRC1_RATE1 */
#define MADERA_ASRC1_RATE1_MASK 0xF800
#define MADERA_ASRC1_RATE1_SHIFT 11
-#define MADERA_ASRC1_RATE1_WIDTH 5
/* (0x0EE3) ASRC1_RATE2 */
#define MADERA_ASRC1_RATE2_MASK 0xF800
#define MADERA_ASRC1_RATE2_SHIFT 11
-#define MADERA_ASRC1_RATE2_WIDTH 5
/* (0x0EF0) - ISRC1 CTRL 1 */
#define MADERA_ISRC1_FSH_MASK 0xF800
#define MADERA_ISRC1_FSH_SHIFT 11
-#define MADERA_ISRC1_FSH_WIDTH 5
#define MADERA_ISRC1_CLK_SEL_MASK 0x0700
#define MADERA_ISRC1_CLK_SEL_SHIFT 8
-#define MADERA_ISRC1_CLK_SEL_WIDTH 3
/* (0x0EF1) ISRC1_CTRL_2 */
#define MADERA_ISRC1_FSL_MASK 0xF800
#define MADERA_ISRC1_FSL_SHIFT 11
-#define MADERA_ISRC1_FSL_WIDTH 5
/* (0x0EF2) ISRC1_CTRL_3 */
#define MADERA_ISRC1_INT1_ENA 0x8000
#define MADERA_ISRC1_INT1_ENA_MASK 0x8000
#define MADERA_ISRC1_INT1_ENA_SHIFT 15
-#define MADERA_ISRC1_INT1_ENA_WIDTH 1
#define MADERA_ISRC1_INT2_ENA 0x4000
#define MADERA_ISRC1_INT2_ENA_MASK 0x4000
#define MADERA_ISRC1_INT2_ENA_SHIFT 14
-#define MADERA_ISRC1_INT2_ENA_WIDTH 1
#define MADERA_ISRC1_INT3_ENA 0x2000
#define MADERA_ISRC1_INT3_ENA_MASK 0x2000
#define MADERA_ISRC1_INT3_ENA_SHIFT 13
-#define MADERA_ISRC1_INT3_ENA_WIDTH 1
#define MADERA_ISRC1_INT4_ENA 0x1000
#define MADERA_ISRC1_INT4_ENA_MASK 0x1000
#define MADERA_ISRC1_INT4_ENA_SHIFT 12
-#define MADERA_ISRC1_INT4_ENA_WIDTH 1
#define MADERA_ISRC1_DEC1_ENA 0x0200
#define MADERA_ISRC1_DEC1_ENA_MASK 0x0200
#define MADERA_ISRC1_DEC1_ENA_SHIFT 9
-#define MADERA_ISRC1_DEC1_ENA_WIDTH 1
#define MADERA_ISRC1_DEC2_ENA 0x0100
#define MADERA_ISRC1_DEC2_ENA_MASK 0x0100
#define MADERA_ISRC1_DEC2_ENA_SHIFT 8
-#define MADERA_ISRC1_DEC2_ENA_WIDTH 1
#define MADERA_ISRC1_DEC3_ENA 0x0080
#define MADERA_ISRC1_DEC3_ENA_MASK 0x0080
#define MADERA_ISRC1_DEC3_ENA_SHIFT 7
-#define MADERA_ISRC1_DEC3_ENA_WIDTH 1
#define MADERA_ISRC1_DEC4_ENA 0x0040
#define MADERA_ISRC1_DEC4_ENA_MASK 0x0040
#define MADERA_ISRC1_DEC4_ENA_SHIFT 6
-#define MADERA_ISRC1_DEC4_ENA_WIDTH 1
#define MADERA_ISRC1_NOTCH_ENA 0x0001
#define MADERA_ISRC1_NOTCH_ENA_MASK 0x0001
#define MADERA_ISRC1_NOTCH_ENA_SHIFT 0
-#define MADERA_ISRC1_NOTCH_ENA_WIDTH 1
/* (0x0EF3) ISRC2_CTRL_1 */
#define MADERA_ISRC2_FSH_MASK 0xF800
#define MADERA_ISRC2_FSH_SHIFT 11
-#define MADERA_ISRC2_FSH_WIDTH 5
#define MADERA_ISRC2_CLK_SEL_MASK 0x0700
#define MADERA_ISRC2_CLK_SEL_SHIFT 8
-#define MADERA_ISRC2_CLK_SEL_WIDTH 3
/* (0x0EF4) ISRC2_CTRL_2 */
#define MADERA_ISRC2_FSL_MASK 0xF800
#define MADERA_ISRC2_FSL_SHIFT 11
-#define MADERA_ISRC2_FSL_WIDTH 5
/* (0x0EF5) ISRC2_CTRL_3 */
#define MADERA_ISRC2_INT1_ENA 0x8000
#define MADERA_ISRC2_INT1_ENA_MASK 0x8000
#define MADERA_ISRC2_INT1_ENA_SHIFT 15
-#define MADERA_ISRC2_INT1_ENA_WIDTH 1
#define MADERA_ISRC2_INT2_ENA 0x4000
#define MADERA_ISRC2_INT2_ENA_MASK 0x4000
#define MADERA_ISRC2_INT2_ENA_SHIFT 14
-#define MADERA_ISRC2_INT2_ENA_WIDTH 1
#define MADERA_ISRC2_INT3_ENA 0x2000
#define MADERA_ISRC2_INT3_ENA_MASK 0x2000
#define MADERA_ISRC2_INT3_ENA_SHIFT 13
-#define MADERA_ISRC2_INT3_ENA_WIDTH 1
#define MADERA_ISRC2_INT4_ENA 0x1000
#define MADERA_ISRC2_INT4_ENA_MASK 0x1000
#define MADERA_ISRC2_INT4_ENA_SHIFT 12
-#define MADERA_ISRC2_INT4_ENA_WIDTH 1
#define MADERA_ISRC2_DEC1_ENA 0x0200
#define MADERA_ISRC2_DEC1_ENA_MASK 0x0200
#define MADERA_ISRC2_DEC1_ENA_SHIFT 9
-#define MADERA_ISRC2_DEC1_ENA_WIDTH 1
#define MADERA_ISRC2_DEC2_ENA 0x0100
#define MADERA_ISRC2_DEC2_ENA_MASK 0x0100
#define MADERA_ISRC2_DEC2_ENA_SHIFT 8
-#define MADERA_ISRC2_DEC2_ENA_WIDTH 1
#define MADERA_ISRC2_DEC3_ENA 0x0080
#define MADERA_ISRC2_DEC3_ENA_MASK 0x0080
#define MADERA_ISRC2_DEC3_ENA_SHIFT 7
-#define MADERA_ISRC2_DEC3_ENA_WIDTH 1
#define MADERA_ISRC2_DEC4_ENA 0x0040
#define MADERA_ISRC2_DEC4_ENA_MASK 0x0040
#define MADERA_ISRC2_DEC4_ENA_SHIFT 6
-#define MADERA_ISRC2_DEC4_ENA_WIDTH 1
#define MADERA_ISRC2_NOTCH_ENA 0x0001
#define MADERA_ISRC2_NOTCH_ENA_MASK 0x0001
#define MADERA_ISRC2_NOTCH_ENA_SHIFT 0
-#define MADERA_ISRC2_NOTCH_ENA_WIDTH 1
/* (0x0EF6) ISRC3_CTRL_1 */
#define MADERA_ISRC3_FSH_MASK 0xF800
#define MADERA_ISRC3_FSH_SHIFT 11
-#define MADERA_ISRC3_FSH_WIDTH 5
#define MADERA_ISRC3_CLK_SEL_MASK 0x0700
#define MADERA_ISRC3_CLK_SEL_SHIFT 8
-#define MADERA_ISRC3_CLK_SEL_WIDTH 3
/* (0x0EF7) ISRC3_CTRL_2 */
#define MADERA_ISRC3_FSL_MASK 0xF800
#define MADERA_ISRC3_FSL_SHIFT 11
-#define MADERA_ISRC3_FSL_WIDTH 5
/* (0x0EF8) ISRC3_CTRL_3 */
#define MADERA_ISRC3_INT1_ENA 0x8000
#define MADERA_ISRC3_INT1_ENA_MASK 0x8000
#define MADERA_ISRC3_INT1_ENA_SHIFT 15
-#define MADERA_ISRC3_INT1_ENA_WIDTH 1
#define MADERA_ISRC3_INT2_ENA 0x4000
#define MADERA_ISRC3_INT2_ENA_MASK 0x4000
#define MADERA_ISRC3_INT2_ENA_SHIFT 14
-#define MADERA_ISRC3_INT2_ENA_WIDTH 1
#define MADERA_ISRC3_INT3_ENA 0x2000
#define MADERA_ISRC3_INT3_ENA_MASK 0x2000
#define MADERA_ISRC3_INT3_ENA_SHIFT 13
-#define MADERA_ISRC3_INT3_ENA_WIDTH 1
#define MADERA_ISRC3_INT4_ENA 0x1000
#define MADERA_ISRC3_INT4_ENA_MASK 0x1000
#define MADERA_ISRC3_INT4_ENA_SHIFT 12
-#define MADERA_ISRC3_INT4_ENA_WIDTH 1
#define MADERA_ISRC3_DEC1_ENA 0x0200
#define MADERA_ISRC3_DEC1_ENA_MASK 0x0200
#define MADERA_ISRC3_DEC1_ENA_SHIFT 9
-#define MADERA_ISRC3_DEC1_ENA_WIDTH 1
#define MADERA_ISRC3_DEC2_ENA 0x0100
#define MADERA_ISRC3_DEC2_ENA_MASK 0x0100
#define MADERA_ISRC3_DEC2_ENA_SHIFT 8
-#define MADERA_ISRC3_DEC2_ENA_WIDTH 1
#define MADERA_ISRC3_DEC3_ENA 0x0080
#define MADERA_ISRC3_DEC3_ENA_MASK 0x0080
#define MADERA_ISRC3_DEC3_ENA_SHIFT 7
-#define MADERA_ISRC3_DEC3_ENA_WIDTH 1
#define MADERA_ISRC3_DEC4_ENA 0x0040
#define MADERA_ISRC3_DEC4_ENA_MASK 0x0040
#define MADERA_ISRC3_DEC4_ENA_SHIFT 6
-#define MADERA_ISRC3_DEC4_ENA_WIDTH 1
#define MADERA_ISRC3_NOTCH_ENA 0x0001
#define MADERA_ISRC3_NOTCH_ENA_MASK 0x0001
#define MADERA_ISRC3_NOTCH_ENA_SHIFT 0
-#define MADERA_ISRC3_NOTCH_ENA_WIDTH 1
/* (0x0EF9) ISRC4_CTRL_1 */
#define MADERA_ISRC4_FSH_MASK 0xF800
#define MADERA_ISRC4_FSH_SHIFT 11
-#define MADERA_ISRC4_FSH_WIDTH 5
#define MADERA_ISRC4_CLK_SEL_MASK 0x0700
#define MADERA_ISRC4_CLK_SEL_SHIFT 8
-#define MADERA_ISRC4_CLK_SEL_WIDTH 3
/* (0x0EFA) ISRC4_CTRL_2 */
#define MADERA_ISRC4_FSL_MASK 0xF800
#define MADERA_ISRC4_FSL_SHIFT 11
-#define MADERA_ISRC4_FSL_WIDTH 5
/* (0x0EFB) ISRC4_CTRL_3 */
#define MADERA_ISRC4_INT1_ENA 0x8000
#define MADERA_ISRC4_INT1_ENA_MASK 0x8000
#define MADERA_ISRC4_INT1_ENA_SHIFT 15
-#define MADERA_ISRC4_INT1_ENA_WIDTH 1
#define MADERA_ISRC4_INT2_ENA 0x4000
#define MADERA_ISRC4_INT2_ENA_MASK 0x4000
#define MADERA_ISRC4_INT2_ENA_SHIFT 14
-#define MADERA_ISRC4_INT2_ENA_WIDTH 1
#define MADERA_ISRC4_INT3_ENA 0x2000
#define MADERA_ISRC4_INT3_ENA_MASK 0x2000
#define MADERA_ISRC4_INT3_ENA_SHIFT 13
-#define MADERA_ISRC4_INT3_ENA_WIDTH 1
#define MADERA_ISRC4_INT4_ENA 0x1000
#define MADERA_ISRC4_INT4_ENA_MASK 0x1000
#define MADERA_ISRC4_INT4_ENA_SHIFT 12
-#define MADERA_ISRC4_INT4_ENA_WIDTH 1
#define MADERA_ISRC4_DEC1_ENA 0x0200
#define MADERA_ISRC4_DEC1_ENA_MASK 0x0200
#define MADERA_ISRC4_DEC1_ENA_SHIFT 9
-#define MADERA_ISRC4_DEC1_ENA_WIDTH 1
#define MADERA_ISRC4_DEC2_ENA 0x0100
#define MADERA_ISRC4_DEC2_ENA_MASK 0x0100
#define MADERA_ISRC4_DEC2_ENA_SHIFT 8
-#define MADERA_ISRC4_DEC2_ENA_WIDTH 1
#define MADERA_ISRC4_DEC3_ENA 0x0080
#define MADERA_ISRC4_DEC3_ENA_MASK 0x0080
#define MADERA_ISRC4_DEC3_ENA_SHIFT 7
-#define MADERA_ISRC4_DEC3_ENA_WIDTH 1
#define MADERA_ISRC4_DEC4_ENA 0x0040
#define MADERA_ISRC4_DEC4_ENA_MASK 0x0040
#define MADERA_ISRC4_DEC4_ENA_SHIFT 6
-#define MADERA_ISRC4_DEC4_ENA_WIDTH 1
#define MADERA_ISRC4_NOTCH_ENA 0x0001
#define MADERA_ISRC4_NOTCH_ENA_MASK 0x0001
#define MADERA_ISRC4_NOTCH_ENA_SHIFT 0
-#define MADERA_ISRC4_NOTCH_ENA_WIDTH 1
/* (0x0F00) Clock_Control */
#define MADERA_EXT_NG_SEL_CLR 0x0080
#define MADERA_EXT_NG_SEL_CLR_MASK 0x0080
#define MADERA_EXT_NG_SEL_CLR_SHIFT 7
-#define MADERA_EXT_NG_SEL_CLR_WIDTH 1
#define MADERA_EXT_NG_SEL_SET 0x0040
#define MADERA_EXT_NG_SEL_SET_MASK 0x0040
#define MADERA_EXT_NG_SEL_SET_SHIFT 6
-#define MADERA_EXT_NG_SEL_SET_WIDTH 1
#define MADERA_CLK_R_ENA_CLR 0x0020
#define MADERA_CLK_R_ENA_CLR_MASK 0x0020
#define MADERA_CLK_R_ENA_CLR_SHIFT 5
-#define MADERA_CLK_R_ENA_CLR_WIDTH 1
#define MADERA_CLK_R_ENA_SET 0x0010
#define MADERA_CLK_R_ENA_SET_MASK 0x0010
#define MADERA_CLK_R_ENA_SET_SHIFT 4
-#define MADERA_CLK_R_ENA_SET_WIDTH 1
#define MADERA_CLK_NG_ENA_CLR 0x0008
#define MADERA_CLK_NG_ENA_CLR_MASK 0x0008
#define MADERA_CLK_NG_ENA_CLR_SHIFT 3
-#define MADERA_CLK_NG_ENA_CLR_WIDTH 1
#define MADERA_CLK_NG_ENA_SET 0x0004
#define MADERA_CLK_NG_ENA_SET_MASK 0x0004
#define MADERA_CLK_NG_ENA_SET_SHIFT 2
-#define MADERA_CLK_NG_ENA_SET_WIDTH 1
#define MADERA_CLK_L_ENA_CLR 0x0002
#define MADERA_CLK_L_ENA_CLR_MASK 0x0002
#define MADERA_CLK_L_ENA_CLR_SHIFT 1
-#define MADERA_CLK_L_ENA_CLR_WIDTH 1
#define MADERA_CLK_L_ENA_SET 0x0001
#define MADERA_CLK_L_ENA_SET_MASK 0x0001
#define MADERA_CLK_L_ENA_SET_SHIFT 0
-#define MADERA_CLK_L_ENA_SET_WIDTH 1
/* (0x0F01) ANC_SRC */
#define MADERA_IN_RXANCR_SEL_MASK 0x0070
#define MADERA_IN_RXANCR_SEL_SHIFT 4
-#define MADERA_IN_RXANCR_SEL_WIDTH 3
#define MADERA_IN_RXANCL_SEL_MASK 0x0007
#define MADERA_IN_RXANCL_SEL_SHIFT 0
-#define MADERA_IN_RXANCL_SEL_WIDTH 3
/* (0x0F17) FCL_ADC_reformatter_control */
#define MADERA_FCL_MIC_MODE_SEL 0x000C
#define MADERA_FCL_MIC_MODE_SEL_SHIFT 2
-#define MADERA_FCL_MIC_MODE_SEL_WIDTH 2
/* (0x0F73) FCR_ADC_reformatter_control */
#define MADERA_FCR_MIC_MODE_SEL 0x000C
#define MADERA_FCR_MIC_MODE_SEL_SHIFT 2
-#define MADERA_FCR_MIC_MODE_SEL_WIDTH 2
/* (0x10C0) AUXPDM1_CTRL_0 */
#define MADERA_AUXPDM1_SRC_MASK 0x0F00
#define MADERA_AUXPDM1_SRC_SHIFT 8
-#define MADERA_AUXPDM1_SRC_WIDTH 4
#define MADERA_AUXPDM1_TXEDGE_MASK 0x0010
#define MADERA_AUXPDM1_TXEDGE_SHIFT 4
-#define MADERA_AUXPDM1_TXEDGE_WIDTH 1
#define MADERA_AUXPDM1_MSTR_MASK 0x0008
#define MADERA_AUXPDM1_MSTR_SHIFT 3
-#define MADERA_AUXPDM1_MSTR_WIDTH 1
#define MADERA_AUXPDM1_ENABLE_MASK 0x0001
#define MADERA_AUXPDM1_ENABLE_SHIFT 0
-#define MADERA_AUXPDM1_ENABLE_WIDTH 1
/* (0x10C1) AUXPDM1_CTRL_1 */
#define MADERA_AUXPDM1_CLK_FREQ_MASK 0xC000
#define MADERA_AUXPDM1_CLK_FREQ_SHIFT 14
-#define MADERA_AUXPDM1_CLK_FREQ_WIDTH 2
/* (0x1480) DFC1_CTRL_W0 */
#define MADERA_DFC1_RATE_MASK 0x007C
#define MADERA_DFC1_RATE_SHIFT 2
-#define MADERA_DFC1_RATE_WIDTH 5
#define MADERA_DFC1_DITH_ENA 0x0002
#define MADERA_DFC1_DITH_ENA_MASK 0x0002
#define MADERA_DFC1_DITH_ENA_SHIFT 1
-#define MADERA_DFC1_DITH_ENA_WIDTH 1
#define MADERA_DFC1_ENA 0x0001
#define MADERA_DFC1_ENA_MASK 0x0001
#define MADERA_DFC1_ENA_SHIFT 0
-#define MADERA_DFC1_ENA_WIDTH 1
/* (0x1482) DFC1_RX_W0 */
#define MADERA_DFC1_RX_DATA_WIDTH_MASK 0x1F00
#define MADERA_DFC1_RX_DATA_WIDTH_SHIFT 8
-#define MADERA_DFC1_RX_DATA_WIDTH_WIDTH 5
#define MADERA_DFC1_RX_DATA_TYPE_MASK 0x0007
#define MADERA_DFC1_RX_DATA_TYPE_SHIFT 0
-#define MADERA_DFC1_RX_DATA_TYPE_WIDTH 3
/* (0x1484) DFC1_TX_W0 */
#define MADERA_DFC1_TX_DATA_WIDTH_MASK 0x1F00
#define MADERA_DFC1_TX_DATA_WIDTH_SHIFT 8
-#define MADERA_DFC1_TX_DATA_WIDTH_WIDTH 5
#define MADERA_DFC1_TX_DATA_TYPE_MASK 0x0007
#define MADERA_DFC1_TX_DATA_TYPE_SHIFT 0
-#define MADERA_DFC1_TX_DATA_TYPE_WIDTH 3
/* (0x1600) ADSP2_IRQ0 */
#define MADERA_DSP_IRQ2 0x0002
@@ -3636,449 +3103,347 @@
#define MADERA_GP1_LVL 0x8000
#define MADERA_GP1_LVL_MASK 0x8000
#define MADERA_GP1_LVL_SHIFT 15
-#define MADERA_GP1_LVL_WIDTH 1
#define MADERA_GP1_OP_CFG 0x4000
#define MADERA_GP1_OP_CFG_MASK 0x4000
#define MADERA_GP1_OP_CFG_SHIFT 14
-#define MADERA_GP1_OP_CFG_WIDTH 1
#define MADERA_GP1_DB 0x2000
#define MADERA_GP1_DB_MASK 0x2000
#define MADERA_GP1_DB_SHIFT 13
-#define MADERA_GP1_DB_WIDTH 1
#define MADERA_GP1_POL 0x1000
#define MADERA_GP1_POL_MASK 0x1000
#define MADERA_GP1_POL_SHIFT 12
-#define MADERA_GP1_POL_WIDTH 1
#define MADERA_GP1_IP_CFG 0x0800
#define MADERA_GP1_IP_CFG_MASK 0x0800
#define MADERA_GP1_IP_CFG_SHIFT 11
-#define MADERA_GP1_IP_CFG_WIDTH 1
#define MADERA_GP1_FN_MASK 0x03FF
#define MADERA_GP1_FN_SHIFT 0
-#define MADERA_GP1_FN_WIDTH 10
/* (0x1701) GPIO1_CTRL_2 */
#define MADERA_GP1_DIR 0x8000
#define MADERA_GP1_DIR_MASK 0x8000
#define MADERA_GP1_DIR_SHIFT 15
-#define MADERA_GP1_DIR_WIDTH 1
#define MADERA_GP1_PU 0x4000
#define MADERA_GP1_PU_MASK 0x4000
#define MADERA_GP1_PU_SHIFT 14
-#define MADERA_GP1_PU_WIDTH 1
#define MADERA_GP1_PD 0x2000
#define MADERA_GP1_PD_MASK 0x2000
#define MADERA_GP1_PD_SHIFT 13
-#define MADERA_GP1_PD_WIDTH 1
#define MADERA_GP1_DRV_STR_MASK 0x1800
#define MADERA_GP1_DRV_STR_SHIFT 11
-#define MADERA_GP1_DRV_STR_WIDTH 2
/* (0x1800) IRQ1_Status_1 */
#define MADERA_CTRLIF_ERR_EINT1 0x1000
#define MADERA_CTRLIF_ERR_EINT1_MASK 0x1000
#define MADERA_CTRLIF_ERR_EINT1_SHIFT 12
-#define MADERA_CTRLIF_ERR_EINT1_WIDTH 1
#define MADERA_SYSCLK_FAIL_EINT1 0x0200
#define MADERA_SYSCLK_FAIL_EINT1_MASK 0x0200
#define MADERA_SYSCLK_FAIL_EINT1_SHIFT 9
-#define MADERA_SYSCLK_FAIL_EINT1_WIDTH 1
#define MADERA_CLOCK_DETECT_EINT1 0x0100
#define MADERA_CLOCK_DETECT_EINT1_MASK 0x0100
#define MADERA_CLOCK_DETECT_EINT1_SHIFT 8
-#define MADERA_CLOCK_DETECT_EINT1_WIDTH 1
#define MADERA_BOOT_DONE_EINT1 0x0080
#define MADERA_BOOT_DONE_EINT1_MASK 0x0080
#define MADERA_BOOT_DONE_EINT1_SHIFT 7
-#define MADERA_BOOT_DONE_EINT1_WIDTH 1
/* (0x1801) IRQ1_Status_2 */
#define MADERA_FLLAO_LOCK_EINT1 0x0800
#define MADERA_FLLAO_LOCK_EINT1_MASK 0x0800
#define MADERA_FLLAO_LOCK_EINT1_SHIFT 11
-#define MADERA_FLLAO_LOCK_EINT1_WIDTH 1
#define MADERA_FLL3_LOCK_EINT1 0x0400
#define MADERA_FLL3_LOCK_EINT1_MASK 0x0400
#define MADERA_FLL3_LOCK_EINT1_SHIFT 10
-#define MADERA_FLL3_LOCK_EINT1_WIDTH 1
#define MADERA_FLL2_LOCK_EINT1 0x0200
#define MADERA_FLL2_LOCK_EINT1_MASK 0x0200
#define MADERA_FLL2_LOCK_EINT1_SHIFT 9
-#define MADERA_FLL2_LOCK_EINT1_WIDTH 1
#define MADERA_FLL1_LOCK_EINT1 0x0100
#define MADERA_FLL1_LOCK_EINT1_MASK 0x0100
#define MADERA_FLL1_LOCK_EINT1_SHIFT 8
-#define MADERA_FLL1_LOCK_EINT1_WIDTH 1
/* (0x1805) IRQ1_Status_6 */
#define MADERA_MICDET2_EINT1 0x0200
#define MADERA_MICDET2_EINT1_MASK 0x0200
#define MADERA_MICDET2_EINT1_SHIFT 9
-#define MADERA_MICDET2_EINT1_WIDTH 1
#define MADERA_MICDET1_EINT1 0x0100
#define MADERA_MICDET1_EINT1_MASK 0x0100
#define MADERA_MICDET1_EINT1_SHIFT 8
-#define MADERA_MICDET1_EINT1_WIDTH 1
#define MADERA_HPDET_EINT1 0x0001
#define MADERA_HPDET_EINT1_MASK 0x0001
#define MADERA_HPDET_EINT1_SHIFT 0
-#define MADERA_HPDET_EINT1_WIDTH 1
/* (0x1806) IRQ1_Status_7 */
#define MADERA_MICD_CLAMP_FALL_EINT1 0x0020
#define MADERA_MICD_CLAMP_FALL_EINT1_MASK 0x0020
#define MADERA_MICD_CLAMP_FALL_EINT1_SHIFT 5
-#define MADERA_MICD_CLAMP_FALL_EINT1_WIDTH 1
#define MADERA_MICD_CLAMP_RISE_EINT1 0x0010
#define MADERA_MICD_CLAMP_RISE_EINT1_MASK 0x0010
#define MADERA_MICD_CLAMP_RISE_EINT1_SHIFT 4
-#define MADERA_MICD_CLAMP_RISE_EINT1_WIDTH 1
#define MADERA_JD2_FALL_EINT1 0x0008
#define MADERA_JD2_FALL_EINT1_MASK 0x0008
#define MADERA_JD2_FALL_EINT1_SHIFT 3
-#define MADERA_JD2_FALL_EINT1_WIDTH 1
#define MADERA_JD2_RISE_EINT1 0x0004
#define MADERA_JD2_RISE_EINT1_MASK 0x0004
#define MADERA_JD2_RISE_EINT1_SHIFT 2
-#define MADERA_JD2_RISE_EINT1_WIDTH 1
#define MADERA_JD1_FALL_EINT1 0x0002
#define MADERA_JD1_FALL_EINT1_MASK 0x0002
#define MADERA_JD1_FALL_EINT1_SHIFT 1
-#define MADERA_JD1_FALL_EINT1_WIDTH 1
#define MADERA_JD1_RISE_EINT1 0x0001
#define MADERA_JD1_RISE_EINT1_MASK 0x0001
#define MADERA_JD1_RISE_EINT1_SHIFT 0
-#define MADERA_JD1_RISE_EINT1_WIDTH 1
/* (0x1808) IRQ1_Status_9 */
#define MADERA_ASRC2_IN2_LOCK_EINT1 0x0800
#define MADERA_ASRC2_IN2_LOCK_EINT1_MASK 0x0800
#define MADERA_ASRC2_IN2_LOCK_EINT1_SHIFT 11
-#define MADERA_ASRC2_IN2_LOCK_EINT1_WIDTH 1
#define MADERA_ASRC2_IN1_LOCK_EINT1 0x0400
#define MADERA_ASRC2_IN1_LOCK_EINT1_MASK 0x0400
#define MADERA_ASRC2_IN1_LOCK_EINT1_SHIFT 10
-#define MADERA_ASRC2_IN1_LOCK_EINT1_WIDTH 1
#define MADERA_ASRC1_IN2_LOCK_EINT1 0x0200
#define MADERA_ASRC1_IN2_LOCK_EINT1_MASK 0x0200
#define MADERA_ASRC1_IN2_LOCK_EINT1_SHIFT 9
-#define MADERA_ASRC1_IN2_LOCK_EINT1_WIDTH 1
#define MADERA_ASRC1_IN1_LOCK_EINT1 0x0100
#define MADERA_ASRC1_IN1_LOCK_EINT1_MASK 0x0100
#define MADERA_ASRC1_IN1_LOCK_EINT1_SHIFT 8
-#define MADERA_ASRC1_IN1_LOCK_EINT1_WIDTH 1
#define MADERA_DRC2_SIG_DET_EINT1 0x0002
#define MADERA_DRC2_SIG_DET_EINT1_MASK 0x0002
#define MADERA_DRC2_SIG_DET_EINT1_SHIFT 1
-#define MADERA_DRC2_SIG_DET_EINT1_WIDTH 1
#define MADERA_DRC1_SIG_DET_EINT1 0x0001
#define MADERA_DRC1_SIG_DET_EINT1_MASK 0x0001
#define MADERA_DRC1_SIG_DET_EINT1_SHIFT 0
-#define MADERA_DRC1_SIG_DET_EINT1_WIDTH 1
/* (0x180A) IRQ1_Status_11 */
#define MADERA_DSP_IRQ16_EINT1 0x8000
#define MADERA_DSP_IRQ16_EINT1_MASK 0x8000
#define MADERA_DSP_IRQ16_EINT1_SHIFT 15
-#define MADERA_DSP_IRQ16_EINT1_WIDTH 1
#define MADERA_DSP_IRQ15_EINT1 0x4000
#define MADERA_DSP_IRQ15_EINT1_MASK 0x4000
#define MADERA_DSP_IRQ15_EINT1_SHIFT 14
-#define MADERA_DSP_IRQ15_EINT1_WIDTH 1
#define MADERA_DSP_IRQ14_EINT1 0x2000
#define MADERA_DSP_IRQ14_EINT1_MASK 0x2000
#define MADERA_DSP_IRQ14_EINT1_SHIFT 13
-#define MADERA_DSP_IRQ14_EINT1_WIDTH 1
#define MADERA_DSP_IRQ13_EINT1 0x1000
#define MADERA_DSP_IRQ13_EINT1_MASK 0x1000
#define MADERA_DSP_IRQ13_EINT1_SHIFT 12
-#define MADERA_DSP_IRQ13_EINT1_WIDTH 1
#define MADERA_DSP_IRQ12_EINT1 0x0800
#define MADERA_DSP_IRQ12_EINT1_MASK 0x0800
#define MADERA_DSP_IRQ12_EINT1_SHIFT 11
-#define MADERA_DSP_IRQ12_EINT1_WIDTH 1
#define MADERA_DSP_IRQ11_EINT1 0x0400
#define MADERA_DSP_IRQ11_EINT1_MASK 0x0400
#define MADERA_DSP_IRQ11_EINT1_SHIFT 10
-#define MADERA_DSP_IRQ11_EINT1_WIDTH 1
#define MADERA_DSP_IRQ10_EINT1 0x0200
#define MADERA_DSP_IRQ10_EINT1_MASK 0x0200
#define MADERA_DSP_IRQ10_EINT1_SHIFT 9
-#define MADERA_DSP_IRQ10_EINT1_WIDTH 1
#define MADERA_DSP_IRQ9_EINT1 0x0100
#define MADERA_DSP_IRQ9_EINT1_MASK 0x0100
#define MADERA_DSP_IRQ9_EINT1_SHIFT 8
-#define MADERA_DSP_IRQ9_EINT1_WIDTH 1
#define MADERA_DSP_IRQ8_EINT1 0x0080
#define MADERA_DSP_IRQ8_EINT1_MASK 0x0080
#define MADERA_DSP_IRQ8_EINT1_SHIFT 7
-#define MADERA_DSP_IRQ8_EINT1_WIDTH 1
#define MADERA_DSP_IRQ7_EINT1 0x0040
#define MADERA_DSP_IRQ7_EINT1_MASK 0x0040
#define MADERA_DSP_IRQ7_EINT1_SHIFT 6
-#define MADERA_DSP_IRQ7_EINT1_WIDTH 1
#define MADERA_DSP_IRQ6_EINT1 0x0020
#define MADERA_DSP_IRQ6_EINT1_MASK 0x0020
#define MADERA_DSP_IRQ6_EINT1_SHIFT 5
-#define MADERA_DSP_IRQ6_EINT1_WIDTH 1
#define MADERA_DSP_IRQ5_EINT1 0x0010
#define MADERA_DSP_IRQ5_EINT1_MASK 0x0010
#define MADERA_DSP_IRQ5_EINT1_SHIFT 4
-#define MADERA_DSP_IRQ5_EINT1_WIDTH 1
#define MADERA_DSP_IRQ4_EINT1 0x0008
#define MADERA_DSP_IRQ4_EINT1_MASK 0x0008
#define MADERA_DSP_IRQ4_EINT1_SHIFT 3
-#define MADERA_DSP_IRQ4_EINT1_WIDTH 1
#define MADERA_DSP_IRQ3_EINT1 0x0004
#define MADERA_DSP_IRQ3_EINT1_MASK 0x0004
#define MADERA_DSP_IRQ3_EINT1_SHIFT 2
-#define MADERA_DSP_IRQ3_EINT1_WIDTH 1
#define MADERA_DSP_IRQ2_EINT1 0x0002
#define MADERA_DSP_IRQ2_EINT1_MASK 0x0002
#define MADERA_DSP_IRQ2_EINT1_SHIFT 1
-#define MADERA_DSP_IRQ2_EINT1_WIDTH 1
#define MADERA_DSP_IRQ1_EINT1 0x0001
#define MADERA_DSP_IRQ1_EINT1_MASK 0x0001
#define MADERA_DSP_IRQ1_EINT1_SHIFT 0
-#define MADERA_DSP_IRQ1_EINT1_WIDTH 1
/* (0x180B) IRQ1_Status_12 */
#define MADERA_SPKOUTR_SC_EINT1 0x0080
#define MADERA_SPKOUTR_SC_EINT1_MASK 0x0080
#define MADERA_SPKOUTR_SC_EINT1_SHIFT 7
-#define MADERA_SPKOUTR_SC_EINT1_WIDTH 1
#define MADERA_SPKOUTL_SC_EINT1 0x0040
#define MADERA_SPKOUTL_SC_EINT1_MASK 0x0040
#define MADERA_SPKOUTL_SC_EINT1_SHIFT 6
-#define MADERA_SPKOUTL_SC_EINT1_WIDTH 1
#define MADERA_HP3R_SC_EINT1 0x0020
#define MADERA_HP3R_SC_EINT1_MASK 0x0020
#define MADERA_HP3R_SC_EINT1_SHIFT 5
-#define MADERA_HP3R_SC_EINT1_WIDTH 1
#define MADERA_HP3L_SC_EINT1 0x0010
#define MADERA_HP3L_SC_EINT1_MASK 0x0010
#define MADERA_HP3L_SC_EINT1_SHIFT 4
-#define MADERA_HP3L_SC_EINT1_WIDTH 1
#define MADERA_HP2R_SC_EINT1 0x0008
#define MADERA_HP2R_SC_EINT1_MASK 0x0008
#define MADERA_HP2R_SC_EINT1_SHIFT 3
-#define MADERA_HP2R_SC_EINT1_WIDTH 1
#define MADERA_HP2L_SC_EINT1 0x0004
#define MADERA_HP2L_SC_EINT1_MASK 0x0004
#define MADERA_HP2L_SC_EINT1_SHIFT 2
-#define MADERA_HP2L_SC_EINT1_WIDTH 1
#define MADERA_HP1R_SC_EINT1 0x0002
#define MADERA_HP1R_SC_EINT1_MASK 0x0002
#define MADERA_HP1R_SC_EINT1_SHIFT 1
-#define MADERA_HP1R_SC_EINT1_WIDTH 1
#define MADERA_HP1L_SC_EINT1 0x0001
#define MADERA_HP1L_SC_EINT1_MASK 0x0001
#define MADERA_HP1L_SC_EINT1_SHIFT 0
-#define MADERA_HP1L_SC_EINT1_WIDTH 1
/* (0x180E) IRQ1_Status_15 */
#define MADERA_SPK_OVERHEAT_WARN_EINT1 0x0004
#define MADERA_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
#define MADERA_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
-#define MADERA_SPK_OVERHEAT_WARN_EINT1_WIDTH 1
#define MADERA_SPK_OVERHEAT_EINT1 0x0002
#define MADERA_SPK_OVERHEAT_EINT1_MASK 0x0002
#define MADERA_SPK_OVERHEAT_EINT1_SHIFT 1
-#define MADERA_SPK_OVERHEAT_EINT1_WIDTH 1
#define MADERA_SPK_SHUTDOWN_EINT1 0x0001
#define MADERA_SPK_SHUTDOWN_EINT1_MASK 0x0001
#define MADERA_SPK_SHUTDOWN_EINT1_SHIFT 0
-#define MADERA_SPK_SHUTDOWN_EINT1_WIDTH 1
/* (0x1820) - IRQ1 Status 33 */
#define MADERA_DSP7_BUS_ERR_EINT1 0x0040
#define MADERA_DSP7_BUS_ERR_EINT1_MASK 0x0040
#define MADERA_DSP7_BUS_ERR_EINT1_SHIFT 6
-#define MADERA_DSP7_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP6_BUS_ERR_EINT1 0x0020
#define MADERA_DSP6_BUS_ERR_EINT1_MASK 0x0020
#define MADERA_DSP6_BUS_ERR_EINT1_SHIFT 5
-#define MADERA_DSP6_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP5_BUS_ERR_EINT1 0x0010
#define MADERA_DSP5_BUS_ERR_EINT1_MASK 0x0010
#define MADERA_DSP5_BUS_ERR_EINT1_SHIFT 4
-#define MADERA_DSP5_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP4_BUS_ERR_EINT1 0x0008
#define MADERA_DSP4_BUS_ERR_EINT1_MASK 0x0008
#define MADERA_DSP4_BUS_ERR_EINT1_SHIFT 3
-#define MADERA_DSP4_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP3_BUS_ERR_EINT1 0x0004
#define MADERA_DSP3_BUS_ERR_EINT1_MASK 0x0004
#define MADERA_DSP3_BUS_ERR_EINT1_SHIFT 2
-#define MADERA_DSP3_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP2_BUS_ERR_EINT1 0x0002
#define MADERA_DSP2_BUS_ERR_EINT1_MASK 0x0002
#define MADERA_DSP2_BUS_ERR_EINT1_SHIFT 1
-#define MADERA_DSP2_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP1_BUS_ERR_EINT1 0x0001
#define MADERA_DSP1_BUS_ERR_EINT1_MASK 0x0001
#define MADERA_DSP1_BUS_ERR_EINT1_SHIFT 0
-#define MADERA_DSP1_BUS_ERR_EINT1_WIDTH 1
/* (0x1845) IRQ1_Mask_6 */
#define MADERA_IM_MICDET2_EINT1 0x0200
#define MADERA_IM_MICDET2_EINT1_MASK 0x0200
#define MADERA_IM_MICDET2_EINT1_SHIFT 9
-#define MADERA_IM_MICDET2_EINT1_WIDTH 1
#define MADERA_IM_MICDET1_EINT1 0x0100
#define MADERA_IM_MICDET1_EINT1_MASK 0x0100
#define MADERA_IM_MICDET1_EINT1_SHIFT 8
-#define MADERA_IM_MICDET1_EINT1_WIDTH 1
#define MADERA_IM_HPDET_EINT1 0x0001
#define MADERA_IM_HPDET_EINT1_MASK 0x0001
#define MADERA_IM_HPDET_EINT1_SHIFT 0
-#define MADERA_IM_HPDET_EINT1_WIDTH 1
/* (0x184E) IRQ1_Mask_15 */
#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1 0x0004
#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
-#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_WIDTH 1
#define MADERA_IM_SPK_OVERHEAT_EINT1 0x0002
#define MADERA_IM_SPK_OVERHEAT_EINT1_MASK 0x0002
#define MADERA_IM_SPK_OVERHEAT_EINT1_SHIFT 1
-#define MADERA_IM_SPK_OVERHEAT_EINT1_WIDTH 1
#define MADERA_IM_SPK_SHUTDOWN_EINT1 0x0001
#define MADERA_IM_SPK_SHUTDOWN_EINT1_MASK 0x0001
#define MADERA_IM_SPK_SHUTDOWN_EINT1_SHIFT 0
-#define MADERA_IM_SPK_SHUTDOWN_EINT1_WIDTH 1
/* (0x1880) - IRQ1 Raw Status 1 */
#define MADERA_CTRLIF_ERR_STS1 0x1000
#define MADERA_CTRLIF_ERR_STS1_MASK 0x1000
#define MADERA_CTRLIF_ERR_STS1_SHIFT 12
-#define MADERA_CTRLIF_ERR_STS1_WIDTH 1
#define MADERA_SYSCLK_FAIL_STS1 0x0200
#define MADERA_SYSCLK_FAIL_STS1_MASK 0x0200
#define MADERA_SYSCLK_FAIL_STS1_SHIFT 9
-#define MADERA_SYSCLK_FAIL_STS1_WIDTH 1
#define MADERA_CLOCK_DETECT_STS1 0x0100
#define MADERA_CLOCK_DETECT_STS1_MASK 0x0100
#define MADERA_CLOCK_DETECT_STS1_SHIFT 8
-#define MADERA_CLOCK_DETECT_STS1_WIDTH 1
#define MADERA_BOOT_DONE_STS1 0x0080
#define MADERA_BOOT_DONE_STS1_MASK 0x0080
#define MADERA_BOOT_DONE_STS1_SHIFT 7
-#define MADERA_BOOT_DONE_STS1_WIDTH 1
/* (0x1881) - IRQ1 Raw Status 2 */
#define MADERA_FLL3_LOCK_STS1 0x0400
#define MADERA_FLL3_LOCK_STS1_MASK 0x0400
#define MADERA_FLL3_LOCK_STS1_SHIFT 10
-#define MADERA_FLL3_LOCK_STS1_WIDTH 1
#define MADERA_FLL2_LOCK_STS1 0x0200
#define MADERA_FLL2_LOCK_STS1_MASK 0x0200
#define MADERA_FLL2_LOCK_STS1_SHIFT 9
-#define MADERA_FLL2_LOCK_STS1_WIDTH 1
#define MADERA_FLL1_LOCK_STS1 0x0100
#define MADERA_FLL1_LOCK_STS1_MASK 0x0100
#define MADERA_FLL1_LOCK_STS1_SHIFT 8
-#define MADERA_FLL1_LOCK_STS1_WIDTH 1
/* (0x1886) - IRQ1 Raw Status 7 */
#define MADERA_MICD_CLAMP_FALL_STS1 0x0020
#define MADERA_MICD_CLAMP_FALL_STS1_MASK 0x0020
#define MADERA_MICD_CLAMP_FALL_STS1_SHIFT 5
-#define MADERA_MICD_CLAMP_FALL_STS1_WIDTH 1
#define MADERA_MICD_CLAMP_RISE_STS1 0x0010
#define MADERA_MICD_CLAMP_RISE_STS1_MASK 0x0010
#define MADERA_MICD_CLAMP_RISE_STS1_SHIFT 4
-#define MADERA_MICD_CLAMP_RISE_STS1_WIDTH 1
#define MADERA_JD2_FALL_STS1 0x0008
#define MADERA_JD2_FALL_STS1_MASK 0x0008
#define MADERA_JD2_FALL_STS1_SHIFT 3
-#define MADERA_JD2_FALL_STS1_WIDTH 1
#define MADERA_JD2_RISE_STS1 0x0004
#define MADERA_JD2_RISE_STS1_MASK 0x0004
#define MADERA_JD2_RISE_STS1_SHIFT 2
-#define MADERA_JD2_RISE_STS1_WIDTH 1
#define MADERA_JD1_FALL_STS1 0x0002
#define MADERA_JD1_FALL_STS1_MASK 0x0002
#define MADERA_JD1_FALL_STS1_SHIFT 1
-#define MADERA_JD1_FALL_STS1_WIDTH 1
#define MADERA_JD1_RISE_STS1 0x0001
#define MADERA_JD1_RISE_STS1_MASK 0x0001
#define MADERA_JD1_RISE_STS1_SHIFT 0
-#define MADERA_JD1_RISE_STS1_WIDTH 1
/* (0x188E) - IRQ1 Raw Status 15 */
#define MADERA_SPK_OVERHEAT_WARN_STS1 0x0004
#define MADERA_SPK_OVERHEAT_WARN_STS1_MASK 0x0004
#define MADERA_SPK_OVERHEAT_WARN_STS1_SHIFT 2
-#define MADERA_SPK_OVERHEAT_WARN_STS1_WIDTH 1
#define MADERA_SPK_OVERHEAT_STS1 0x0002
#define MADERA_SPK_OVERHEAT_STS1_MASK 0x0002
#define MADERA_SPK_OVERHEAT_STS1_SHIFT 1
-#define MADERA_SPK_OVERHEAT_STS1_WIDTH 1
#define MADERA_SPK_SHUTDOWN_STS1 0x0001
#define MADERA_SPK_SHUTDOWN_STS1_MASK 0x0001
#define MADERA_SPK_SHUTDOWN_STS1_SHIFT 0
-#define MADERA_SPK_SHUTDOWN_STS1_WIDTH 1
/* (0x1A06) Interrupt_Debounce_7 */
#define MADERA_MICD_CLAMP_DB 0x0010
#define MADERA_MICD_CLAMP_DB_MASK 0x0010
#define MADERA_MICD_CLAMP_DB_SHIFT 4
-#define MADERA_MICD_CLAMP_DB_WIDTH 1
#define MADERA_JD2_DB 0x0004
#define MADERA_JD2_DB_MASK 0x0004
#define MADERA_JD2_DB_SHIFT 2
-#define MADERA_JD2_DB_WIDTH 1
#define MADERA_JD1_DB 0x0001
#define MADERA_JD1_DB_MASK 0x0001
#define MADERA_JD1_DB_SHIFT 0
-#define MADERA_JD1_DB_WIDTH 1
/* (0x1A0E) Interrupt_Debounce_15 */
#define MADERA_SPK_OVERHEAT_WARN_DB 0x0004
#define MADERA_SPK_OVERHEAT_WARN_DB_MASK 0x0004
#define MADERA_SPK_OVERHEAT_WARN_DB_SHIFT 2
-#define MADERA_SPK_OVERHEAT_WARN_DB_WIDTH 1
#define MADERA_SPK_OVERHEAT_DB 0x0002
#define MADERA_SPK_OVERHEAT_DB_MASK 0x0002
#define MADERA_SPK_OVERHEAT_DB_SHIFT 1
-#define MADERA_SPK_OVERHEAT_DB_WIDTH 1
/* (0x1A80) IRQ1_CTRL */
#define MADERA_IM_IRQ1 0x0800
#define MADERA_IM_IRQ1_MASK 0x0800
#define MADERA_IM_IRQ1_SHIFT 11
-#define MADERA_IM_IRQ1_WIDTH 1
#define MADERA_IRQ_POL 0x0400
#define MADERA_IRQ_POL_MASK 0x0400
#define MADERA_IRQ_POL_SHIFT 10
-#define MADERA_IRQ_POL_WIDTH 1
/* (0x20004) OTP_HPDET_Cal_1 */
#define MADERA_OTP_HPDET_CALIB_OFFSET_11 0xFF000000
#define MADERA_OTP_HPDET_CALIB_OFFSET_11_MASK 0xFF000000
#define MADERA_OTP_HPDET_CALIB_OFFSET_11_SHIFT 24
-#define MADERA_OTP_HPDET_CALIB_OFFSET_11_WIDTH 8
#define MADERA_OTP_HPDET_CALIB_OFFSET_10 0x00FF0000
#define MADERA_OTP_HPDET_CALIB_OFFSET_10_MASK 0x00FF0000
#define MADERA_OTP_HPDET_CALIB_OFFSET_10_SHIFT 16
-#define MADERA_OTP_HPDET_CALIB_OFFSET_10_WIDTH 8
#define MADERA_OTP_HPDET_CALIB_OFFSET_01 0x0000FF00
#define MADERA_OTP_HPDET_CALIB_OFFSET_01_MASK 0x0000FF00
#define MADERA_OTP_HPDET_CALIB_OFFSET_01_SHIFT 8
-#define MADERA_OTP_HPDET_CALIB_OFFSET_01_WIDTH 8
#define MADERA_OTP_HPDET_CALIB_OFFSET_00 0x000000FF
#define MADERA_OTP_HPDET_CALIB_OFFSET_00_MASK 0x000000FF
#define MADERA_OTP_HPDET_CALIB_OFFSET_00_SHIFT 0
-#define MADERA_OTP_HPDET_CALIB_OFFSET_00_WIDTH 8
/* (0x20006) OTP_HPDET_Cal_2 */
#define MADERA_OTP_HPDET_GRADIENT_1X 0x0000FF00
#define MADERA_OTP_HPDET_GRADIENT_1X_MASK 0x0000FF00
#define MADERA_OTP_HPDET_GRADIENT_1X_SHIFT 8
-#define MADERA_OTP_HPDET_GRADIENT_1X_WIDTH 8
#define MADERA_OTP_HPDET_GRADIENT_0X 0x000000FF
#define MADERA_OTP_HPDET_GRADIENT_0X_MASK 0x000000FF
#define MADERA_OTP_HPDET_GRADIENT_0X_SHIFT 0
-#define MADERA_OTP_HPDET_GRADIENT_0X_WIDTH 8
#endif
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 833e578e051e..3acceeedbaba 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -133,35 +133,35 @@ enum max77686_pmic_reg {
/* Reserved: 0x7A-0x7D */
MAX77686_REG_BBAT_CHG = 0x7E,
- MAX77686_REG_32KHZ = 0x7F,
+ MAX77686_REG_32KHZ = 0x7F,
MAX77686_REG_PMIC_END = 0x80,
};
enum max77686_rtc_reg {
- MAX77686_RTC_INT = 0x00,
- MAX77686_RTC_INTM = 0x01,
+ MAX77686_RTC_INT = 0x00,
+ MAX77686_RTC_INTM = 0x01,
MAX77686_RTC_CONTROLM = 0x02,
MAX77686_RTC_CONTROL = 0x03,
MAX77686_RTC_UPDATE0 = 0x04,
/* Reserved: 0x5 */
MAX77686_WTSR_SMPL_CNTL = 0x06,
- MAX77686_RTC_SEC = 0x07,
- MAX77686_RTC_MIN = 0x08,
- MAX77686_RTC_HOUR = 0x09,
+ MAX77686_RTC_SEC = 0x07,
+ MAX77686_RTC_MIN = 0x08,
+ MAX77686_RTC_HOUR = 0x09,
MAX77686_RTC_WEEKDAY = 0x0A,
- MAX77686_RTC_MONTH = 0x0B,
- MAX77686_RTC_YEAR = 0x0C,
- MAX77686_RTC_DATE = 0x0D,
- MAX77686_ALARM1_SEC = 0x0E,
- MAX77686_ALARM1_MIN = 0x0F,
+ MAX77686_RTC_MONTH = 0x0B,
+ MAX77686_RTC_YEAR = 0x0C,
+ MAX77686_RTC_MONTHDAY = 0x0D,
+ MAX77686_ALARM1_SEC = 0x0E,
+ MAX77686_ALARM1_MIN = 0x0F,
MAX77686_ALARM1_HOUR = 0x10,
MAX77686_ALARM1_WEEKDAY = 0x11,
MAX77686_ALARM1_MONTH = 0x12,
MAX77686_ALARM1_YEAR = 0x13,
MAX77686_ALARM1_DATE = 0x14,
- MAX77686_ALARM2_SEC = 0x15,
- MAX77686_ALARM2_MIN = 0x16,
+ MAX77686_ALARM2_SEC = 0x15,
+ MAX77686_ALARM2_MIN = 0x16,
MAX77686_ALARM2_HOUR = 0x17,
MAX77686_ALARM2_WEEKDAY = 0x18,
MAX77686_ALARM2_MONTH = 0x19,
@@ -352,7 +352,7 @@ enum max77802_rtc_reg {
MAX77802_RTC_WEEKDAY = 0xCA,
MAX77802_RTC_MONTH = 0xCB,
MAX77802_RTC_YEAR = 0xCC,
- MAX77802_RTC_DATE = 0xCD,
+ MAX77802_RTC_MONTHDAY = 0xCD,
MAX77802_RTC_AE1 = 0xCE,
MAX77802_ALARM1_SEC = 0xCF,
MAX77802_ALARM1_MIN = 0xD0,
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index e798c81aec31..311f7d3d2323 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -131,7 +131,7 @@ enum max77693_pmic_reg {
#define FLASH_INT_FLED1_SHORT BIT(3)
#define FLASH_INT_OVER_CURRENT BIT(4)
-/* Fast charge timer in in hours */
+/* Fast charge timer in hours */
#define DEFAULT_FAST_CHARGE_TIMER 4
/* microamps */
#define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000
diff --git a/include/linux/mfd/max77714.h b/include/linux/mfd/max77714.h
new file mode 100644
index 000000000000..7947e0d697a5
--- /dev/null
+++ b/include/linux/mfd/max77714.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Maxim MAX77714 Register and data structures definition.
+ *
+ * Copyright (C) 2022 Luca Ceresoli
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+#ifndef __LINUX_MFD_MAX77714_H_
+#define __LINUX_MFD_MAX77714_H_
+
+#include <linux/bits.h>
+
+#define MAX77714_INT_TOP 0x00
+#define MAX77714_INT_TOPM 0x07 /* Datasheet says "read only", but it is RW */
+
+#define MAX77714_INT_TOP_ONOFF BIT(1)
+#define MAX77714_INT_TOP_RTC BIT(3)
+#define MAX77714_INT_TOP_GPIO BIT(4)
+#define MAX77714_INT_TOP_LDO BIT(5)
+#define MAX77714_INT_TOP_SD BIT(6)
+#define MAX77714_INT_TOP_GLBL BIT(7)
+
+#define MAX77714_32K_STATUS 0x30
+#define MAX77714_32K_STATUS_SIOSCOK BIT(5)
+#define MAX77714_32K_STATUS_XOSCOK BIT(4)
+#define MAX77714_32K_STATUS_32KSOURCE BIT(3)
+#define MAX77714_32K_STATUS_32KLOAD_MSK 0x3
+#define MAX77714_32K_STATUS_32KLOAD_SHF 1
+#define MAX77714_32K_STATUS_CRYSTAL_CFG BIT(0)
+
+#define MAX77714_32K_CONFIG 0x31
+#define MAX77714_32K_CONFIG_XOSC_RETRY BIT(4)
+
+#define MAX77714_CNFG_GLBL2 0x91
+#define MAX77714_WDTEN BIT(2)
+#define MAX77714_WDTSLPC BIT(3)
+#define MAX77714_TWD_MASK 0x3
+#define MAX77714_TWD_2s 0x0
+#define MAX77714_TWD_16s 0x1
+#define MAX77714_TWD_64s 0x2
+#define MAX77714_TWD_128s 0x3
+
+#define MAX77714_CNFG_GLBL3 0x92
+#define MAX77714_WDTC BIT(0)
+
+#define MAX77714_CNFG2_ONOFF 0x94
+#define MAX77714_WD_RST_WK BIT(5)
+
+/* Interrupts */
+enum {
+ MAX77714_IRQ_TOP_ONOFF,
+ MAX77714_IRQ_TOP_RTC, /* Real-time clock */
+ MAX77714_IRQ_TOP_GPIO, /* GPIOs */
+ MAX77714_IRQ_TOP_LDO, /* Low-dropout regulators */
+ MAX77714_IRQ_TOP_SD, /* Step-down regulators */
+ MAX77714_IRQ_TOP_GLBL, /* "Global resources": Low-Battery, overtemp... */
+};
+
+#endif /* __LINUX_MFD_MAX77714_H_ */
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index e955e2f0a2cc..6c98edcf4b0b 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -14,13 +14,13 @@
* others and b) it can be enabled simply by using MAX17042 driver.
*/
-#ifndef __LINUX_MFD_MAX8998_H
-#define __LINUX_MFD_MAX8998_H
+#ifndef __LINUX_MFD_MAX8997_H
+#define __LINUX_MFD_MAX8997_H
#include <linux/regulator/consumer.h>
/* MAX8997/8966 regulator IDs */
-enum max8998_regulators {
+enum max8997_regulators {
MAX8997_LDO1 = 0,
MAX8997_LDO2,
MAX8997_LDO3,
@@ -207,4 +207,4 @@ struct max8997_platform_data {
struct max8997_led_platform_data *led_pdata;
};
-#endif /* __LINUX_MFD_MAX8998_H */
+#endif /* __LINUX_MFD_MAX8997_H */
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index 061af220dcd3..79c020bd0c70 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -39,6 +39,7 @@ enum {
MAX8998_ENVICHG,
MAX8998_ESAFEOUT1,
MAX8998_ESAFEOUT2,
+ MAX8998_CHARGER,
};
/**
diff --git a/include/linux/mfd/mp2629.h b/include/linux/mfd/mp2629.h
new file mode 100644
index 000000000000..89b706900b57
--- /dev/null
+++ b/include/linux/mfd/mp2629.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 Monolithic Power Systems, Inc
+ */
+
+#ifndef __MP2629_H__
+#define __MP2629_H__
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+struct mp2629_data {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+enum mp2629_adc_chan {
+ MP2629_BATT_VOLT,
+ MP2629_SYSTEM_VOLT,
+ MP2629_INPUT_VOLT,
+ MP2629_BATT_CURRENT,
+ MP2629_INPUT_CURRENT,
+ MP2629_ADC_CHAN_END
+};
+
+#endif
diff --git a/include/linux/mfd/mt6331/core.h b/include/linux/mfd/mt6331/core.h
new file mode 100644
index 000000000000..df8e6b1e4bc1
--- /dev/null
+++ b/include/linux/mfd/mt6331/core.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_CORE_H__
+#define __MFD_MT6331_CORE_H__
+
+enum mt6331_irq_status_numbers {
+ MT6331_IRQ_STATUS_PWRKEY = 0,
+ MT6331_IRQ_STATUS_HOMEKEY,
+ MT6331_IRQ_STATUS_CHRDET,
+ MT6331_IRQ_STATUS_THR_H,
+ MT6331_IRQ_STATUS_THR_L,
+ MT6331_IRQ_STATUS_BAT_H,
+ MT6331_IRQ_STATUS_BAT_L,
+ MT6331_IRQ_STATUS_RTC,
+ MT6331_IRQ_STATUS_AUDIO,
+ MT6331_IRQ_STATUS_MAD,
+ MT6331_IRQ_STATUS_ACCDET,
+ MT6331_IRQ_STATUS_ACCDET_EINT,
+ MT6331_IRQ_STATUS_ACCDET_NEGV = 12,
+ MT6331_IRQ_STATUS_VDVFS11_OC = 16,
+ MT6331_IRQ_STATUS_VDVFS12_OC,
+ MT6331_IRQ_STATUS_VDVFS13_OC,
+ MT6331_IRQ_STATUS_VDVFS14_OC,
+ MT6331_IRQ_STATUS_GPU_OC,
+ MT6331_IRQ_STATUS_VCORE1_OC,
+ MT6331_IRQ_STATUS_VCORE2_OC,
+ MT6331_IRQ_STATUS_VIO18_OC,
+ MT6331_IRQ_STATUS_LDO_OC,
+ MT6331_IRQ_STATUS_NR,
+};
+
+#define MT6331_IRQ_CON0_BASE MT6331_IRQ_STATUS_PWRKEY
+#define MT6331_IRQ_CON0_BITS (MT6331_IRQ_STATUS_ACCDET_NEGV + 1)
+#define MT6331_IRQ_CON1_BASE MT6331_IRQ_STATUS_VDVFS11_OC
+#define MT6331_IRQ_CON1_BITS (MT6331_IRQ_STATUS_LDO_OC - MT6331_IRQ_STATUS_VDFS11_OC + 1)
+
+#endif /* __MFD_MT6331_CORE_H__ */
diff --git a/include/linux/mfd/mt6331/registers.h b/include/linux/mfd/mt6331/registers.h
new file mode 100644
index 000000000000..e2be6bccd1a7
--- /dev/null
+++ b/include/linux/mfd/mt6331/registers.h
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_REGISTERS_H__
+#define __MFD_MT6331_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6331_STRUP_CON0 0x0
+#define MT6331_STRUP_CON2 0x2
+#define MT6331_STRUP_CON3 0x4
+#define MT6331_STRUP_CON4 0x6
+#define MT6331_STRUP_CON5 0x8
+#define MT6331_STRUP_CON6 0xA
+#define MT6331_STRUP_CON7 0xC
+#define MT6331_STRUP_CON8 0xE
+#define MT6331_STRUP_CON9 0x10
+#define MT6331_STRUP_CON10 0x12
+#define MT6331_STRUP_CON11 0x14
+#define MT6331_STRUP_CON12 0x16
+#define MT6331_STRUP_CON13 0x18
+#define MT6331_STRUP_CON14 0x1A
+#define MT6331_STRUP_CON15 0x1C
+#define MT6331_STRUP_CON16 0x1E
+#define MT6331_STRUP_CON17 0x20
+#define MT6331_STRUP_CON18 0x22
+#define MT6331_HWCID 0x100
+#define MT6331_SWCID 0x102
+#define MT6331_EXT_PMIC_STATUS 0x104
+#define MT6331_TOP_CON 0x106
+#define MT6331_TEST_OUT 0x108
+#define MT6331_TEST_CON0 0x10A
+#define MT6331_TEST_CON1 0x10C
+#define MT6331_TESTMODE_SW 0x10E
+#define MT6331_EN_STATUS0 0x110
+#define MT6331_EN_STATUS1 0x112
+#define MT6331_EN_STATUS2 0x114
+#define MT6331_OCSTATUS0 0x116
+#define MT6331_OCSTATUS1 0x118
+#define MT6331_OCSTATUS2 0x11A
+#define MT6331_PGSTATUS 0x11C
+#define MT6331_TOPSTATUS 0x11E
+#define MT6331_TDSEL_CON 0x120
+#define MT6331_RDSEL_CON 0x122
+#define MT6331_SMT_CON0 0x124
+#define MT6331_SMT_CON1 0x126
+#define MT6331_SMT_CON2 0x128
+#define MT6331_DRV_CON0 0x12A
+#define MT6331_DRV_CON1 0x12C
+#define MT6331_DRV_CON2 0x12E
+#define MT6331_DRV_CON3 0x130
+#define MT6331_TOP_STATUS 0x132
+#define MT6331_TOP_STATUS_SET 0x134
+#define MT6331_TOP_STATUS_CLR 0x136
+#define MT6331_TOP_CKPDN_CON0 0x138
+#define MT6331_TOP_CKPDN_CON0_SET 0x13A
+#define MT6331_TOP_CKPDN_CON0_CLR 0x13C
+#define MT6331_TOP_CKPDN_CON1 0x13E
+#define MT6331_TOP_CKPDN_CON1_SET 0x140
+#define MT6331_TOP_CKPDN_CON1_CLR 0x142
+#define MT6331_TOP_CKPDN_CON2 0x144
+#define MT6331_TOP_CKPDN_CON2_SET 0x146
+#define MT6331_TOP_CKPDN_CON2_CLR 0x148
+#define MT6331_TOP_CKSEL_CON 0x14A
+#define MT6331_TOP_CKSEL_CON_SET 0x14C
+#define MT6331_TOP_CKSEL_CON_CLR 0x14E
+#define MT6331_TOP_CKHWEN_CON 0x150
+#define MT6331_TOP_CKHWEN_CON_SET 0x152
+#define MT6331_TOP_CKHWEN_CON_CLR 0x154
+#define MT6331_TOP_CKTST_CON0 0x156
+#define MT6331_TOP_CKTST_CON1 0x158
+#define MT6331_TOP_CLKSQ 0x15A
+#define MT6331_TOP_CLKSQ_SET 0x15C
+#define MT6331_TOP_CLKSQ_CLR 0x15E
+#define MT6331_TOP_RST_CON 0x160
+#define MT6331_TOP_RST_CON_SET 0x162
+#define MT6331_TOP_RST_CON_CLR 0x164
+#define MT6331_TOP_RST_MISC 0x166
+#define MT6331_TOP_RST_MISC_SET 0x168
+#define MT6331_TOP_RST_MISC_CLR 0x16A
+#define MT6331_INT_CON0 0x16C
+#define MT6331_INT_CON0_SET 0x16E
+#define MT6331_INT_CON0_CLR 0x170
+#define MT6331_INT_CON1 0x172
+#define MT6331_INT_CON1_SET 0x174
+#define MT6331_INT_CON1_CLR 0x176
+#define MT6331_INT_MISC_CON 0x178
+#define MT6331_INT_MISC_CON_SET 0x17A
+#define MT6331_INT_MISC_CON_CLR 0x17C
+#define MT6331_INT_STATUS_CON0 0x17E
+#define MT6331_INT_STATUS_CON1 0x180
+#define MT6331_OC_GEAR_0 0x182
+#define MT6331_FQMTR_CON0 0x184
+#define MT6331_FQMTR_CON1 0x186
+#define MT6331_FQMTR_CON2 0x188
+#define MT6331_RG_SPI_CON 0x18A
+#define MT6331_DEW_DIO_EN 0x18C
+#define MT6331_DEW_READ_TEST 0x18E
+#define MT6331_DEW_WRITE_TEST 0x190
+#define MT6331_DEW_CRC_SWRST 0x192
+#define MT6331_DEW_CRC_EN 0x194
+#define MT6331_DEW_CRC_VAL 0x196
+#define MT6331_DEW_DBG_MON_SEL 0x198
+#define MT6331_DEW_CIPHER_KEY_SEL 0x19A
+#define MT6331_DEW_CIPHER_IV_SEL 0x19C
+#define MT6331_DEW_CIPHER_EN 0x19E
+#define MT6331_DEW_CIPHER_RDY 0x1A0
+#define MT6331_DEW_CIPHER_MODE 0x1A2
+#define MT6331_DEW_CIPHER_SWRST 0x1A4
+#define MT6331_DEW_RDDMY_NO 0x1A6
+#define MT6331_INT_TYPE_CON0 0x1A8
+#define MT6331_INT_TYPE_CON0_SET 0x1AA
+#define MT6331_INT_TYPE_CON0_CLR 0x1AC
+#define MT6331_INT_TYPE_CON1 0x1AE
+#define MT6331_INT_TYPE_CON1_SET 0x1B0
+#define MT6331_INT_TYPE_CON1_CLR 0x1B2
+#define MT6331_INT_STA 0x1B4
+#define MT6331_BUCK_ALL_CON0 0x200
+#define MT6331_BUCK_ALL_CON1 0x202
+#define MT6331_BUCK_ALL_CON2 0x204
+#define MT6331_BUCK_ALL_CON3 0x206
+#define MT6331_BUCK_ALL_CON4 0x208
+#define MT6331_BUCK_ALL_CON5 0x20A
+#define MT6331_BUCK_ALL_CON6 0x20C
+#define MT6331_BUCK_ALL_CON7 0x20E
+#define MT6331_BUCK_ALL_CON8 0x210
+#define MT6331_BUCK_ALL_CON9 0x212
+#define MT6331_BUCK_ALL_CON10 0x214
+#define MT6331_BUCK_ALL_CON11 0x216
+#define MT6331_BUCK_ALL_CON12 0x218
+#define MT6331_BUCK_ALL_CON13 0x21A
+#define MT6331_BUCK_ALL_CON14 0x21C
+#define MT6331_BUCK_ALL_CON15 0x21E
+#define MT6331_BUCK_ALL_CON16 0x220
+#define MT6331_BUCK_ALL_CON17 0x222
+#define MT6331_BUCK_ALL_CON18 0x224
+#define MT6331_BUCK_ALL_CON19 0x226
+#define MT6331_BUCK_ALL_CON20 0x228
+#define MT6331_BUCK_ALL_CON21 0x22A
+#define MT6331_BUCK_ALL_CON22 0x22C
+#define MT6331_BUCK_ALL_CON23 0x22E
+#define MT6331_BUCK_ALL_CON24 0x230
+#define MT6331_BUCK_ALL_CON25 0x232
+#define MT6331_BUCK_ALL_CON26 0x234
+#define MT6331_VDVFS11_CON0 0x236
+#define MT6331_VDVFS11_CON1 0x238
+#define MT6331_VDVFS11_CON2 0x23A
+#define MT6331_VDVFS11_CON3 0x23C
+#define MT6331_VDVFS11_CON4 0x23E
+#define MT6331_VDVFS11_CON5 0x240
+#define MT6331_VDVFS11_CON6 0x242
+#define MT6331_VDVFS11_CON7 0x244
+#define MT6331_VDVFS11_CON8 0x246
+#define MT6331_VDVFS11_CON9 0x248
+#define MT6331_VDVFS11_CON10 0x24A
+#define MT6331_VDVFS11_CON11 0x24C
+#define MT6331_VDVFS11_CON12 0x24E
+#define MT6331_VDVFS11_CON13 0x250
+#define MT6331_VDVFS11_CON14 0x252
+#define MT6331_VDVFS11_CON18 0x25A
+#define MT6331_VDVFS11_CON19 0x25C
+#define MT6331_VDVFS11_CON20 0x25E
+#define MT6331_VDVFS11_CON21 0x260
+#define MT6331_VDVFS11_CON22 0x262
+#define MT6331_VDVFS11_CON23 0x264
+#define MT6331_VDVFS11_CON24 0x266
+#define MT6331_VDVFS11_CON25 0x268
+#define MT6331_VDVFS11_CON26 0x26A
+#define MT6331_VDVFS11_CON27 0x26C
+#define MT6331_VDVFS12_CON0 0x26E
+#define MT6331_VDVFS12_CON1 0x270
+#define MT6331_VDVFS12_CON2 0x272
+#define MT6331_VDVFS12_CON3 0x274
+#define MT6331_VDVFS12_CON4 0x276
+#define MT6331_VDVFS12_CON5 0x278
+#define MT6331_VDVFS12_CON6 0x27A
+#define MT6331_VDVFS12_CON7 0x27C
+#define MT6331_VDVFS12_CON8 0x27E
+#define MT6331_VDVFS12_CON9 0x280
+#define MT6331_VDVFS12_CON10 0x282
+#define MT6331_VDVFS12_CON11 0x284
+#define MT6331_VDVFS12_CON12 0x286
+#define MT6331_VDVFS12_CON13 0x288
+#define MT6331_VDVFS12_CON14 0x28A
+#define MT6331_VDVFS12_CON18 0x292
+#define MT6331_VDVFS12_CON19 0x294
+#define MT6331_VDVFS12_CON20 0x296
+#define MT6331_VDVFS13_CON0 0x298
+#define MT6331_VDVFS13_CON1 0x29A
+#define MT6331_VDVFS13_CON2 0x29C
+#define MT6331_VDVFS13_CON3 0x29E
+#define MT6331_VDVFS13_CON4 0x2A0
+#define MT6331_VDVFS13_CON5 0x2A2
+#define MT6331_VDVFS13_CON6 0x2A4
+#define MT6331_VDVFS13_CON7 0x2A6
+#define MT6331_VDVFS13_CON8 0x2A8
+#define MT6331_VDVFS13_CON9 0x2AA
+#define MT6331_VDVFS13_CON10 0x2AC
+#define MT6331_VDVFS13_CON11 0x2AE
+#define MT6331_VDVFS13_CON12 0x2B0
+#define MT6331_VDVFS13_CON13 0x2B2
+#define MT6331_VDVFS13_CON14 0x2B4
+#define MT6331_VDVFS13_CON18 0x2BC
+#define MT6331_VDVFS13_CON19 0x2BE
+#define MT6331_VDVFS13_CON20 0x2C0
+#define MT6331_VDVFS14_CON0 0x2C2
+#define MT6331_VDVFS14_CON1 0x2C4
+#define MT6331_VDVFS14_CON2 0x2C6
+#define MT6331_VDVFS14_CON3 0x2C8
+#define MT6331_VDVFS14_CON4 0x2CA
+#define MT6331_VDVFS14_CON5 0x2CC
+#define MT6331_VDVFS14_CON6 0x2CE
+#define MT6331_VDVFS14_CON7 0x2D0
+#define MT6331_VDVFS14_CON8 0x2D2
+#define MT6331_VDVFS14_CON9 0x2D4
+#define MT6331_VDVFS14_CON10 0x2D6
+#define MT6331_VDVFS14_CON11 0x2D8
+#define MT6331_VDVFS14_CON12 0x2DA
+#define MT6331_VDVFS14_CON13 0x2DC
+#define MT6331_VDVFS14_CON14 0x2DE
+#define MT6331_VDVFS14_CON18 0x2E6
+#define MT6331_VDVFS14_CON19 0x2E8
+#define MT6331_VDVFS14_CON20 0x2EA
+#define MT6331_VGPU_CON0 0x300
+#define MT6331_VGPU_CON1 0x302
+#define MT6331_VGPU_CON2 0x304
+#define MT6331_VGPU_CON3 0x306
+#define MT6331_VGPU_CON4 0x308
+#define MT6331_VGPU_CON5 0x30A
+#define MT6331_VGPU_CON6 0x30C
+#define MT6331_VGPU_CON7 0x30E
+#define MT6331_VGPU_CON8 0x310
+#define MT6331_VGPU_CON9 0x312
+#define MT6331_VGPU_CON10 0x314
+#define MT6331_VGPU_CON11 0x316
+#define MT6331_VGPU_CON12 0x318
+#define MT6331_VGPU_CON13 0x31A
+#define MT6331_VGPU_CON14 0x31C
+#define MT6331_VGPU_CON15 0x31E
+#define MT6331_VGPU_CON16 0x320
+#define MT6331_VGPU_CON17 0x322
+#define MT6331_VGPU_CON18 0x324
+#define MT6331_VGPU_CON19 0x326
+#define MT6331_VGPU_CON20 0x328
+#define MT6331_VCORE1_CON0 0x32A
+#define MT6331_VCORE1_CON1 0x32C
+#define MT6331_VCORE1_CON2 0x32E
+#define MT6331_VCORE1_CON3 0x330
+#define MT6331_VCORE1_CON4 0x332
+#define MT6331_VCORE1_CON5 0x334
+#define MT6331_VCORE1_CON6 0x336
+#define MT6331_VCORE1_CON7 0x338
+#define MT6331_VCORE1_CON8 0x33A
+#define MT6331_VCORE1_CON9 0x33C
+#define MT6331_VCORE1_CON10 0x33E
+#define MT6331_VCORE1_CON11 0x340
+#define MT6331_VCORE1_CON12 0x342
+#define MT6331_VCORE1_CON13 0x344
+#define MT6331_VCORE1_CON14 0x346
+#define MT6331_VCORE1_CON15 0x348
+#define MT6331_VCORE1_CON16 0x34A
+#define MT6331_VCORE1_CON17 0x34C
+#define MT6331_VCORE1_CON18 0x34E
+#define MT6331_VCORE1_CON19 0x350
+#define MT6331_VCORE1_CON20 0x352
+#define MT6331_VCORE2_CON0 0x354
+#define MT6331_VCORE2_CON1 0x356
+#define MT6331_VCORE2_CON2 0x358
+#define MT6331_VCORE2_CON3 0x35A
+#define MT6331_VCORE2_CON4 0x35C
+#define MT6331_VCORE2_CON5 0x35E
+#define MT6331_VCORE2_CON6 0x360
+#define MT6331_VCORE2_CON7 0x362
+#define MT6331_VCORE2_CON8 0x364
+#define MT6331_VCORE2_CON9 0x366
+#define MT6331_VCORE2_CON10 0x368
+#define MT6331_VCORE2_CON11 0x36A
+#define MT6331_VCORE2_CON12 0x36C
+#define MT6331_VCORE2_CON13 0x36E
+#define MT6331_VCORE2_CON14 0x370
+#define MT6331_VCORE2_CON15 0x372
+#define MT6331_VCORE2_CON16 0x374
+#define MT6331_VCORE2_CON17 0x376
+#define MT6331_VCORE2_CON18 0x378
+#define MT6331_VCORE2_CON19 0x37A
+#define MT6331_VCORE2_CON20 0x37C
+#define MT6331_VCORE2_CON21 0x37E
+#define MT6331_VIO18_CON0 0x380
+#define MT6331_VIO18_CON1 0x382
+#define MT6331_VIO18_CON2 0x384
+#define MT6331_VIO18_CON3 0x386
+#define MT6331_VIO18_CON4 0x388
+#define MT6331_VIO18_CON5 0x38A
+#define MT6331_VIO18_CON6 0x38C
+#define MT6331_VIO18_CON7 0x38E
+#define MT6331_VIO18_CON8 0x390
+#define MT6331_VIO18_CON9 0x392
+#define MT6331_VIO18_CON10 0x394
+#define MT6331_VIO18_CON11 0x396
+#define MT6331_VIO18_CON12 0x398
+#define MT6331_VIO18_CON13 0x39A
+#define MT6331_VIO18_CON14 0x39C
+#define MT6331_VIO18_CON15 0x39E
+#define MT6331_VIO18_CON16 0x3A0
+#define MT6331_VIO18_CON17 0x3A2
+#define MT6331_VIO18_CON18 0x3A4
+#define MT6331_VIO18_CON19 0x3A6
+#define MT6331_VIO18_CON20 0x3A8
+#define MT6331_BUCK_K_CON0 0x3AA
+#define MT6331_BUCK_K_CON1 0x3AC
+#define MT6331_BUCK_K_CON2 0x3AE
+#define MT6331_BUCK_K_CON3 0x3B0
+#define MT6331_ZCD_CON0 0x400
+#define MT6331_ZCD_CON1 0x402
+#define MT6331_ZCD_CON2 0x404
+#define MT6331_ZCD_CON3 0x406
+#define MT6331_ZCD_CON4 0x408
+#define MT6331_ZCD_CON5 0x40A
+#define MT6331_ISINK0_CON0 0x40C
+#define MT6331_ISINK0_CON1 0x40E
+#define MT6331_ISINK0_CON2 0x410
+#define MT6331_ISINK0_CON3 0x412
+#define MT6331_ISINK0_CON4 0x414
+#define MT6331_ISINK1_CON0 0x416
+#define MT6331_ISINK1_CON1 0x418
+#define MT6331_ISINK1_CON2 0x41A
+#define MT6331_ISINK1_CON3 0x41C
+#define MT6331_ISINK1_CON4 0x41E
+#define MT6331_ISINK2_CON0 0x420
+#define MT6331_ISINK2_CON1 0x422
+#define MT6331_ISINK2_CON2 0x424
+#define MT6331_ISINK2_CON3 0x426
+#define MT6331_ISINK2_CON4 0x428
+#define MT6331_ISINK3_CON0 0x42A
+#define MT6331_ISINK3_CON1 0x42C
+#define MT6331_ISINK3_CON2 0x42E
+#define MT6331_ISINK3_CON3 0x430
+#define MT6331_ISINK3_CON4 0x432
+#define MT6331_ISINK_ANA0 0x434
+#define MT6331_ISINK_ANA1 0x436
+#define MT6331_ISINK_PHASE_DLY 0x438
+#define MT6331_ISINK_EN_CTRL 0x43A
+#define MT6331_ANALDO_CON0 0x500
+#define MT6331_ANALDO_CON1 0x502
+#define MT6331_ANALDO_CON2 0x504
+#define MT6331_ANALDO_CON3 0x506
+#define MT6331_ANALDO_CON4 0x508
+#define MT6331_ANALDO_CON5 0x50A
+#define MT6331_ANALDO_CON6 0x50C
+#define MT6331_ANALDO_CON7 0x50E
+#define MT6331_ANALDO_CON8 0x510
+#define MT6331_ANALDO_CON9 0x512
+#define MT6331_ANALDO_CON10 0x514
+#define MT6331_ANALDO_CON11 0x516
+#define MT6331_ANALDO_CON12 0x518
+#define MT6331_ANALDO_CON13 0x51A
+#define MT6331_SYSLDO_CON0 0x51C
+#define MT6331_SYSLDO_CON1 0x51E
+#define MT6331_SYSLDO_CON2 0x520
+#define MT6331_SYSLDO_CON3 0x522
+#define MT6331_SYSLDO_CON4 0x524
+#define MT6331_SYSLDO_CON5 0x526
+#define MT6331_SYSLDO_CON6 0x528
+#define MT6331_SYSLDO_CON7 0x52A
+#define MT6331_SYSLDO_CON8 0x52C
+#define MT6331_SYSLDO_CON9 0x52E
+#define MT6331_SYSLDO_CON10 0x530
+#define MT6331_SYSLDO_CON11 0x532
+#define MT6331_SYSLDO_CON12 0x534
+#define MT6331_SYSLDO_CON13 0x536
+#define MT6331_SYSLDO_CON14 0x538
+#define MT6331_SYSLDO_CON15 0x53A
+#define MT6331_SYSLDO_CON16 0x53C
+#define MT6331_SYSLDO_CON17 0x53E
+#define MT6331_SYSLDO_CON18 0x540
+#define MT6331_SYSLDO_CON19 0x542
+#define MT6331_SYSLDO_CON20 0x544
+#define MT6331_SYSLDO_CON21 0x546
+#define MT6331_DIGLDO_CON0 0x548
+#define MT6331_DIGLDO_CON1 0x54A
+#define MT6331_DIGLDO_CON2 0x54C
+#define MT6331_DIGLDO_CON3 0x54E
+#define MT6331_DIGLDO_CON4 0x550
+#define MT6331_DIGLDO_CON5 0x552
+#define MT6331_DIGLDO_CON6 0x554
+#define MT6331_DIGLDO_CON7 0x556
+#define MT6331_DIGLDO_CON8 0x558
+#define MT6331_DIGLDO_CON9 0x55A
+#define MT6331_DIGLDO_CON10 0x55C
+#define MT6331_DIGLDO_CON11 0x55E
+#define MT6331_DIGLDO_CON12 0x560
+#define MT6331_DIGLDO_CON13 0x562
+#define MT6331_DIGLDO_CON14 0x564
+#define MT6331_DIGLDO_CON15 0x566
+#define MT6331_DIGLDO_CON16 0x568
+#define MT6331_DIGLDO_CON17 0x56A
+#define MT6331_DIGLDO_CON18 0x56C
+#define MT6331_DIGLDO_CON19 0x56E
+#define MT6331_DIGLDO_CON20 0x570
+#define MT6331_DIGLDO_CON21 0x572
+#define MT6331_DIGLDO_CON22 0x574
+#define MT6331_DIGLDO_CON23 0x576
+#define MT6331_DIGLDO_CON24 0x578
+#define MT6331_DIGLDO_CON25 0x57A
+#define MT6331_DIGLDO_CON26 0x57C
+#define MT6331_DIGLDO_CON27 0x57E
+#define MT6331_DIGLDO_CON28 0x580
+#define MT6331_OTP_CON0 0x600
+#define MT6331_OTP_CON1 0x602
+#define MT6331_OTP_CON2 0x604
+#define MT6331_OTP_CON3 0x606
+#define MT6331_OTP_CON4 0x608
+#define MT6331_OTP_CON5 0x60A
+#define MT6331_OTP_CON6 0x60C
+#define MT6331_OTP_CON7 0x60E
+#define MT6331_OTP_CON8 0x610
+#define MT6331_OTP_CON9 0x612
+#define MT6331_OTP_CON10 0x614
+#define MT6331_OTP_CON11 0x616
+#define MT6331_OTP_CON12 0x618
+#define MT6331_OTP_CON13 0x61A
+#define MT6331_OTP_CON14 0x61C
+#define MT6331_OTP_DOUT_0_15 0x61E
+#define MT6331_OTP_DOUT_16_31 0x620
+#define MT6331_OTP_DOUT_32_47 0x622
+#define MT6331_OTP_DOUT_48_63 0x624
+#define MT6331_OTP_DOUT_64_79 0x626
+#define MT6331_OTP_DOUT_80_95 0x628
+#define MT6331_OTP_DOUT_96_111 0x62A
+#define MT6331_OTP_DOUT_112_127 0x62C
+#define MT6331_OTP_DOUT_128_143 0x62E
+#define MT6331_OTP_DOUT_144_159 0x630
+#define MT6331_OTP_DOUT_160_175 0x632
+#define MT6331_OTP_DOUT_176_191 0x634
+#define MT6331_OTP_DOUT_192_207 0x636
+#define MT6331_OTP_DOUT_208_223 0x638
+#define MT6331_OTP_DOUT_224_239 0x63A
+#define MT6331_OTP_DOUT_240_255 0x63C
+#define MT6331_OTP_VAL_0_15 0x63E
+#define MT6331_OTP_VAL_16_31 0x640
+#define MT6331_OTP_VAL_32_47 0x642
+#define MT6331_OTP_VAL_48_63 0x644
+#define MT6331_OTP_VAL_64_79 0x646
+#define MT6331_OTP_VAL_80_95 0x648
+#define MT6331_OTP_VAL_96_111 0x64A
+#define MT6331_OTP_VAL_112_127 0x64C
+#define MT6331_OTP_VAL_128_143 0x64E
+#define MT6331_OTP_VAL_144_159 0x650
+#define MT6331_OTP_VAL_160_175 0x652
+#define MT6331_OTP_VAL_176_191 0x654
+#define MT6331_OTP_VAL_192_207 0x656
+#define MT6331_OTP_VAL_208_223 0x658
+#define MT6331_OTP_VAL_224_239 0x65A
+#define MT6331_OTP_VAL_240_255 0x65C
+#define MT6331_RTC_MIX_CON0 0x65E
+#define MT6331_RTC_MIX_CON1 0x660
+#define MT6331_AUDDAC_CFG0 0x662
+#define MT6331_AUDBUF_CFG0 0x664
+#define MT6331_AUDBUF_CFG1 0x666
+#define MT6331_AUDBUF_CFG2 0x668
+#define MT6331_AUDBUF_CFG3 0x66A
+#define MT6331_AUDBUF_CFG4 0x66C
+#define MT6331_AUDBUF_CFG5 0x66E
+#define MT6331_AUDBUF_CFG6 0x670
+#define MT6331_AUDBUF_CFG7 0x672
+#define MT6331_AUDBUF_CFG8 0x674
+#define MT6331_IBIASDIST_CFG0 0x676
+#define MT6331_AUDCLKGEN_CFG0 0x678
+#define MT6331_AUDLDO_CFG0 0x67A
+#define MT6331_AUDDCDC_CFG0 0x67C
+#define MT6331_AUDDCDC_CFG1 0x67E
+#define MT6331_AUDNVREGGLB_CFG0 0x680
+#define MT6331_AUD_NCP0 0x682
+#define MT6331_AUD_ZCD_CFG0 0x684
+#define MT6331_AUDPREAMP_CFG0 0x686
+#define MT6331_AUDPREAMP_CFG1 0x688
+#define MT6331_AUDPREAMP_CFG2 0x68A
+#define MT6331_AUDADC_CFG0 0x68C
+#define MT6331_AUDADC_CFG1 0x68E
+#define MT6331_AUDADC_CFG2 0x690
+#define MT6331_AUDADC_CFG3 0x692
+#define MT6331_AUDADC_CFG4 0x694
+#define MT6331_AUDADC_CFG5 0x696
+#define MT6331_AUDDIGMI_CFG0 0x698
+#define MT6331_AUDDIGMI_CFG1 0x69A
+#define MT6331_AUDMICBIAS_CFG0 0x69C
+#define MT6331_AUDMICBIAS_CFG1 0x69E
+#define MT6331_AUDENCSPARE_CFG0 0x6A0
+#define MT6331_AUDPREAMPGAIN_CFG0 0x6A2
+#define MT6331_AUDMADPLL_CFG0 0x6A4
+#define MT6331_AUDMADPLL_CFG1 0x6A6
+#define MT6331_AUDMADPLL_CFG2 0x6A8
+#define MT6331_AUDLDO_NVREG_CFG0 0x6AA
+#define MT6331_AUDLDO_NVREG_CFG1 0x6AC
+#define MT6331_AUDLDO_NVREG_CFG2 0x6AE
+#define MT6331_AUXADC_ADC0 0x700
+#define MT6331_AUXADC_ADC1 0x702
+#define MT6331_AUXADC_ADC2 0x704
+#define MT6331_AUXADC_ADC3 0x706
+#define MT6331_AUXADC_ADC4 0x708
+#define MT6331_AUXADC_ADC5 0x70A
+#define MT6331_AUXADC_ADC6 0x70C
+#define MT6331_AUXADC_ADC7 0x70E
+#define MT6331_AUXADC_ADC8 0x710
+#define MT6331_AUXADC_ADC9 0x712
+#define MT6331_AUXADC_ADC10 0x714
+#define MT6331_AUXADC_ADC11 0x716
+#define MT6331_AUXADC_ADC12 0x718
+#define MT6331_AUXADC_ADC13 0x71A
+#define MT6331_AUXADC_ADC14 0x71C
+#define MT6331_AUXADC_ADC15 0x71E
+#define MT6331_AUXADC_ADC16 0x720
+#define MT6331_AUXADC_ADC17 0x722
+#define MT6331_AUXADC_ADC18 0x724
+#define MT6331_AUXADC_ADC19 0x726
+#define MT6331_AUXADC_STA0 0x728
+#define MT6331_AUXADC_STA1 0x72A
+#define MT6331_AUXADC_RQST0 0x72C
+#define MT6331_AUXADC_RQST0_SET 0x72E
+#define MT6331_AUXADC_RQST0_CLR 0x730
+#define MT6331_AUXADC_RQST1 0x732
+#define MT6331_AUXADC_RQST1_SET 0x734
+#define MT6331_AUXADC_RQST1_CLR 0x736
+#define MT6331_AUXADC_CON0 0x738
+#define MT6331_AUXADC_CON1 0x73A
+#define MT6331_AUXADC_CON2 0x73C
+#define MT6331_AUXADC_CON3 0x73E
+#define MT6331_AUXADC_CON4 0x740
+#define MT6331_AUXADC_CON5 0x742
+#define MT6331_AUXADC_CON6 0x744
+#define MT6331_AUXADC_CON7 0x746
+#define MT6331_AUXADC_CON8 0x748
+#define MT6331_AUXADC_CON9 0x74A
+#define MT6331_AUXADC_CON10 0x74C
+#define MT6331_AUXADC_CON11 0x74E
+#define MT6331_AUXADC_CON12 0x750
+#define MT6331_AUXADC_CON13 0x752
+#define MT6331_AUXADC_CON14 0x754
+#define MT6331_AUXADC_CON15 0x756
+#define MT6331_AUXADC_CON16 0x758
+#define MT6331_AUXADC_CON17 0x75A
+#define MT6331_AUXADC_CON18 0x75C
+#define MT6331_AUXADC_CON19 0x75E
+#define MT6331_AUXADC_CON20 0x760
+#define MT6331_AUXADC_CON21 0x762
+#define MT6331_AUXADC_CON22 0x764
+#define MT6331_AUXADC_CON23 0x766
+#define MT6331_AUXADC_CON24 0x768
+#define MT6331_AUXADC_CON25 0x76A
+#define MT6331_AUXADC_CON26 0x76C
+#define MT6331_AUXADC_CON27 0x76E
+#define MT6331_AUXADC_CON28 0x770
+#define MT6331_AUXADC_CON29 0x772
+#define MT6331_AUXADC_CON30 0x774
+#define MT6331_AUXADC_CON31 0x776
+#define MT6331_AUXADC_CON32 0x778
+#define MT6331_ACCDET_CON0 0x77A
+#define MT6331_ACCDET_CON1 0x77C
+#define MT6331_ACCDET_CON2 0x77E
+#define MT6331_ACCDET_CON3 0x780
+#define MT6331_ACCDET_CON4 0x782
+#define MT6331_ACCDET_CON5 0x784
+#define MT6331_ACCDET_CON6 0x786
+#define MT6331_ACCDET_CON7 0x788
+#define MT6331_ACCDET_CON8 0x78A
+#define MT6331_ACCDET_CON9 0x78C
+#define MT6331_ACCDET_CON10 0x78E
+#define MT6331_ACCDET_CON11 0x790
+#define MT6331_ACCDET_CON12 0x792
+#define MT6331_ACCDET_CON13 0x794
+#define MT6331_ACCDET_CON14 0x796
+#define MT6331_ACCDET_CON15 0x798
+#define MT6331_ACCDET_CON16 0x79A
+#define MT6331_ACCDET_CON17 0x79C
+#define MT6331_ACCDET_CON18 0x79E
+#define MT6331_ACCDET_CON19 0x7A0
+#define MT6331_ACCDET_CON20 0x7A2
+#define MT6331_ACCDET_CON21 0x7A4
+#define MT6331_ACCDET_CON22 0x7A6
+#define MT6331_ACCDET_CON23 0x7A8
+#define MT6331_ACCDET_CON24 0x7AA
+
+#endif /* __MFD_MT6331_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6332/core.h b/include/linux/mfd/mt6332/core.h
new file mode 100644
index 000000000000..cd6013eb82d9
--- /dev/null
+++ b/include/linux/mfd/mt6332/core.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_CORE_H__
+#define __MFD_MT6332_CORE_H__
+
+enum mt6332_irq_status_numbers {
+ MT6332_IRQ_STATUS_CHR_COMPLETE = 0,
+ MT6332_IRQ_STATUS_THERMAL_SD,
+ MT6332_IRQ_STATUS_THERMAL_REG_IN,
+ MT6332_IRQ_STATUS_THERMAL_REG_OUT,
+ MT6332_IRQ_STATUS_OTG_OC,
+ MT6332_IRQ_STATUS_CHR_OC,
+ MT6332_IRQ_STATUS_OTG_THERMAL,
+ MT6332_IRQ_STATUS_CHRIN_SHORT,
+ MT6332_IRQ_STATUS_DRVCDT_SHORT,
+ MT6332_IRQ_STATUS_PLUG_IN_FLASH,
+ MT6332_IRQ_STATUS_CHRWDT_FLAG,
+ MT6332_IRQ_STATUS_FLASH_EN_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_OPEN = 13,
+ MT6332_IRQ_STATUS_OV = 16,
+ MT6332_IRQ_STATUS_BVALID_DET,
+ MT6332_IRQ_STATUS_VBATON_UNDET,
+ MT6332_IRQ_STATUS_CHR_PLUG_IN,
+ MT6332_IRQ_STATUS_CHR_PLUG_OUT,
+ MT6332_IRQ_STATUS_BC11_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_OPEN = 23,
+ MT6332_IRQ_STATUS_THR_H = 32,
+ MT6332_IRQ_STATUS_THR_L,
+ MT6332_IRQ_STATUS_BAT_H,
+ MT6332_IRQ_STATUS_BAT_L,
+ MT6332_IRQ_STATUS_M3_H,
+ MT6332_IRQ_STATUS_M3_L,
+ MT6332_IRQ_STATUS_FG_BAT_H,
+ MT6332_IRQ_STATUS_FG_BAT_L,
+ MT6332_IRQ_STATUS_FG_CUR_H,
+ MT6332_IRQ_STATUS_FG_CUR_L,
+ MT6332_IRQ_STATUS_SPKL_D,
+ MT6332_IRQ_STATUS_SPKL_AB,
+ MT6332_IRQ_STATUS_BIF,
+ MT6332_IRQ_STATUS_VWLED_OC = 45,
+ MT6332_IRQ_STATUS_VDRAM_OC = 48,
+ MT6332_IRQ_STATUS_VDVFS2_OC,
+ MT6332_IRQ_STATUS_VRF1_OC,
+ MT6332_IRQ_STATUS_VRF2_OC,
+ MT6332_IRQ_STATUS_VPA_OC,
+ MT6332_IRQ_STATUS_VSBST_OC,
+ MT6332_IRQ_STATUS_LDO_OC,
+ MT6332_IRQ_STATUS_NR,
+};
+
+#define MT6332_IRQ_CON0_BASE MT6332_IRQ_STATUS_CHR_COMPLETE
+#define MT6332_IRQ_CON0_BITS (MT6332_IRQ_STATUS_FLASH_VLED1_OPEN + 1)
+#define MT6332_IRQ_CON1_BASE MT6332_IRQ_STATUS_OV
+#define MT6332_IRQ_CON1_BITS (MT6332_IRQ_STATUS_FLASH_VLED2_OPEN - MT6332_IRQ_STATUS_OV + 1)
+#define MT6332_IRQ_CON2_BASE MT6332_IRQ_STATUS_THR_H
+#define MT6332_IRQ_CON2_BITS (MT6332_IRQ_STATUS_VWLED_OC - MT6332_IRQ_STATUS_THR_H + 1)
+#define MT6332_IRQ_CON3_BASE MT6332_IRQ_STATUS_VDRAM_OC
+#define MT6332_IRQ_CON3_BITS (MT6332_IRQ_STATUS_LDO_OC - MT6332_IRQ_STATUS_VDRAM_OC + 1)
+
+#endif /* __MFD_MT6332_CORE_H__ */
diff --git a/include/linux/mfd/mt6332/registers.h b/include/linux/mfd/mt6332/registers.h
new file mode 100644
index 000000000000..65e0b86fceac
--- /dev/null
+++ b/include/linux/mfd/mt6332/registers.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_REGISTERS_H__
+#define __MFD_MT6332_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6332_HWCID 0x8000
+#define MT6332_SWCID 0x8002
+#define MT6332_TOP_CON 0x8004
+#define MT6332_DDR_VREF_AP_CON 0x8006
+#define MT6332_DDR_VREF_DQ_CON 0x8008
+#define MT6332_DDR_VREF_CA_CON 0x800A
+#define MT6332_TEST_OUT 0x800C
+#define MT6332_TEST_CON0 0x800E
+#define MT6332_TEST_CON1 0x8010
+#define MT6332_TESTMODE_SW 0x8012
+#define MT6332_TESTMODE_ANA 0x8014
+#define MT6332_TDSEL_CON 0x8016
+#define MT6332_RDSEL_CON 0x8018
+#define MT6332_SMT_CON0 0x801A
+#define MT6332_SMT_CON1 0x801C
+#define MT6332_DRV_CON0 0x801E
+#define MT6332_DRV_CON1 0x8020
+#define MT6332_DRV_CON2 0x8022
+#define MT6332_EN_STATUS0 0x8024
+#define MT6332_OCSTATUS0 0x8026
+#define MT6332_TOP_STATUS 0x8028
+#define MT6332_TOP_STATUS_SET 0x802A
+#define MT6332_TOP_STATUS_CLR 0x802C
+#define MT6332_FLASH_CON0 0x802E
+#define MT6332_FLASH_CON1 0x8030
+#define MT6332_FLASH_CON2 0x8032
+#define MT6332_CORE_CON0 0x8034
+#define MT6332_CORE_CON1 0x8036
+#define MT6332_CORE_CON2 0x8038
+#define MT6332_CORE_CON3 0x803A
+#define MT6332_CORE_CON4 0x803C
+#define MT6332_CORE_CON5 0x803E
+#define MT6332_CORE_CON6 0x8040
+#define MT6332_CORE_CON7 0x8042
+#define MT6332_CORE_CON8 0x8044
+#define MT6332_CORE_CON9 0x8046
+#define MT6332_CORE_CON10 0x8048
+#define MT6332_CORE_CON11 0x804A
+#define MT6332_CORE_CON12 0x804C
+#define MT6332_CORE_CON13 0x804E
+#define MT6332_CORE_CON14 0x8050
+#define MT6332_CORE_CON15 0x8052
+#define MT6332_STA_CON0 0x8054
+#define MT6332_STA_CON1 0x8056
+#define MT6332_STA_CON2 0x8058
+#define MT6332_STA_CON3 0x805A
+#define MT6332_STA_CON4 0x805C
+#define MT6332_STA_CON5 0x805E
+#define MT6332_STA_CON6 0x8060
+#define MT6332_STA_CON7 0x8062
+#define MT6332_CHR_CON0 0x8064
+#define MT6332_CHR_CON1 0x8066
+#define MT6332_CHR_CON2 0x8068
+#define MT6332_CHR_CON3 0x806A
+#define MT6332_CHR_CON4 0x806C
+#define MT6332_CHR_CON5 0x806E
+#define MT6332_CHR_CON6 0x8070
+#define MT6332_CHR_CON7 0x8072
+#define MT6332_CHR_CON8 0x8074
+#define MT6332_CHR_CON9 0x8076
+#define MT6332_CHR_CON10 0x8078
+#define MT6332_CHR_CON11 0x807A
+#define MT6332_CHR_CON12 0x807C
+#define MT6332_CHR_CON13 0x807E
+#define MT6332_CHR_CON14 0x8080
+#define MT6332_CHR_CON15 0x8082
+#define MT6332_BOOST_CON0 0x8084
+#define MT6332_BOOST_CON1 0x8086
+#define MT6332_BOOST_CON2 0x8088
+#define MT6332_BOOST_CON3 0x808A
+#define MT6332_BOOST_CON4 0x808C
+#define MT6332_BOOST_CON5 0x808E
+#define MT6332_BOOST_CON6 0x8090
+#define MT6332_BOOST_CON7 0x8092
+#define MT6332_TOP_CKPDN_CON0 0x8094
+#define MT6332_TOP_CKPDN_CON0_SET 0x8096
+#define MT6332_TOP_CKPDN_CON0_CLR 0x8098
+#define MT6332_TOP_CKPDN_CON1 0x809A
+#define MT6332_TOP_CKPDN_CON1_SET 0x809C
+#define MT6332_TOP_CKPDN_CON1_CLR 0x809E
+#define MT6332_TOP_CKPDN_CON2 0x80A0
+#define MT6332_TOP_CKPDN_CON2_SET 0x80A2
+#define MT6332_TOP_CKPDN_CON2_CLR 0x80A4
+#define MT6332_TOP_CKSEL_CON0 0x80A6
+#define MT6332_TOP_CKSEL_CON0_SET 0x80A8
+#define MT6332_TOP_CKSEL_CON0_CLR 0x80AA
+#define MT6332_TOP_CKSEL_CON1 0x80AC
+#define MT6332_TOP_CKSEL_CON1_SET 0x80AE
+#define MT6332_TOP_CKSEL_CON1_CLR 0x80B0
+#define MT6332_TOP_CKHWEN_CON 0x80B2
+#define MT6332_TOP_CKHWEN_CON_SET 0x80B4
+#define MT6332_TOP_CKHWEN_CON_CLR 0x80B6
+#define MT6332_TOP_CKTST_CON0 0x80B8
+#define MT6332_TOP_CKTST_CON1 0x80BA
+#define MT6332_TOP_RST_CON 0x80BC
+#define MT6332_TOP_RST_CON_SET 0x80BE
+#define MT6332_TOP_RST_CON_CLR 0x80C0
+#define MT6332_TOP_RST_MISC 0x80C2
+#define MT6332_TOP_RST_MISC_SET 0x80C4
+#define MT6332_TOP_RST_MISC_CLR 0x80C6
+#define MT6332_INT_CON0 0x80C8
+#define MT6332_INT_CON0_SET 0x80CA
+#define MT6332_INT_CON0_CLR 0x80CC
+#define MT6332_INT_CON1 0x80CE
+#define MT6332_INT_CON1_SET 0x80D0
+#define MT6332_INT_CON1_CLR 0x80D2
+#define MT6332_INT_CON2 0x80D4
+#define MT6332_INT_CON2_SET 0x80D6
+#define MT6332_INT_CON2_CLR 0x80D8
+#define MT6332_INT_CON3 0x80DA
+#define MT6332_INT_CON3_SET 0x80DC
+#define MT6332_INT_CON3_CLR 0x80DE
+#define MT6332_CHRWDT_CON0 0x80E0
+#define MT6332_CHRWDT_STATUS0 0x80E2
+#define MT6332_INT_STATUS0 0x80E4
+#define MT6332_INT_STATUS1 0x80E6
+#define MT6332_INT_STATUS2 0x80E8
+#define MT6332_INT_STATUS3 0x80EA
+#define MT6332_OC_GEAR_0 0x80EC
+#define MT6332_OC_GEAR_1 0x80EE
+#define MT6332_OC_GEAR_2 0x80F0
+#define MT6332_INT_MISC_CON 0x80F2
+#define MT6332_RG_SPI_CON 0x80F4
+#define MT6332_DEW_DIO_EN 0x80F6
+#define MT6332_DEW_READ_TEST 0x80F8
+#define MT6332_DEW_WRITE_TEST 0x80FA
+#define MT6332_DEW_CRC_SWRST 0x80FC
+#define MT6332_DEW_CRC_EN 0x80FE
+#define MT6332_DEW_CRC_VAL 0x8100
+#define MT6332_DEW_DBG_MON_SEL 0x8102
+#define MT6332_DEW_CIPHER_KEY_SEL 0x8104
+#define MT6332_DEW_CIPHER_IV_SEL 0x8106
+#define MT6332_DEW_CIPHER_EN 0x8108
+#define MT6332_DEW_CIPHER_RDY 0x810A
+#define MT6332_DEW_CIPHER_MODE 0x810C
+#define MT6332_DEW_CIPHER_SWRST 0x810E
+#define MT6332_DEW_RDDMY_NO 0x8110
+#define MT6332_INT_STA 0x8112
+#define MT6332_BIF_CON0 0x8114
+#define MT6332_BIF_CON1 0x8116
+#define MT6332_BIF_CON2 0x8118
+#define MT6332_BIF_CON3 0x811A
+#define MT6332_BIF_CON4 0x811C
+#define MT6332_BIF_CON5 0x811E
+#define MT6332_BIF_CON6 0x8120
+#define MT6332_BIF_CON7 0x8122
+#define MT6332_BIF_CON8 0x8124
+#define MT6332_BIF_CON9 0x8126
+#define MT6332_BIF_CON10 0x8128
+#define MT6332_BIF_CON11 0x812A
+#define MT6332_BIF_CON12 0x812C
+#define MT6332_BIF_CON13 0x812E
+#define MT6332_BIF_CON14 0x8130
+#define MT6332_BIF_CON15 0x8132
+#define MT6332_BIF_CON16 0x8134
+#define MT6332_BIF_CON17 0x8136
+#define MT6332_BIF_CON18 0x8138
+#define MT6332_BIF_CON19 0x813A
+#define MT6332_BIF_CON20 0x813C
+#define MT6332_BIF_CON21 0x813E
+#define MT6332_BIF_CON22 0x8140
+#define MT6332_BIF_CON23 0x8142
+#define MT6332_BIF_CON24 0x8144
+#define MT6332_BIF_CON25 0x8146
+#define MT6332_BIF_CON26 0x8148
+#define MT6332_BIF_CON27 0x814A
+#define MT6332_BIF_CON28 0x814C
+#define MT6332_BIF_CON29 0x814E
+#define MT6332_BIF_CON30 0x8150
+#define MT6332_BIF_CON31 0x8152
+#define MT6332_BIF_CON32 0x8154
+#define MT6332_BIF_CON33 0x8156
+#define MT6332_BIF_CON34 0x8158
+#define MT6332_BIF_CON35 0x815A
+#define MT6332_BIF_CON36 0x815C
+#define MT6332_BATON_CON0 0x815E
+#define MT6332_BIF_CON37 0x8160
+#define MT6332_BIF_CON38 0x8162
+#define MT6332_CHR_CON16 0x8164
+#define MT6332_CHR_CON17 0x8166
+#define MT6332_CHR_CON18 0x8168
+#define MT6332_CHR_CON19 0x816A
+#define MT6332_CHR_CON20 0x816C
+#define MT6332_CHR_CON21 0x816E
+#define MT6332_CHR_CON22 0x8170
+#define MT6332_CHR_CON23 0x8172
+#define MT6332_CHR_CON24 0x8174
+#define MT6332_CHR_CON25 0x8176
+#define MT6332_STA_CON8 0x8178
+#define MT6332_BUCK_ALL_CON0 0x8400
+#define MT6332_BUCK_ALL_CON1 0x8402
+#define MT6332_BUCK_ALL_CON2 0x8404
+#define MT6332_BUCK_ALL_CON3 0x8406
+#define MT6332_BUCK_ALL_CON4 0x8408
+#define MT6332_BUCK_ALL_CON5 0x840A
+#define MT6332_BUCK_ALL_CON6 0x840C
+#define MT6332_BUCK_ALL_CON7 0x840E
+#define MT6332_BUCK_ALL_CON8 0x8410
+#define MT6332_BUCK_ALL_CON9 0x8412
+#define MT6332_BUCK_ALL_CON10 0x8414
+#define MT6332_BUCK_ALL_CON11 0x8416
+#define MT6332_BUCK_ALL_CON12 0x8418
+#define MT6332_BUCK_ALL_CON13 0x841A
+#define MT6332_BUCK_ALL_CON14 0x841C
+#define MT6332_BUCK_ALL_CON15 0x841E
+#define MT6332_BUCK_ALL_CON16 0x8420
+#define MT6332_BUCK_ALL_CON17 0x8422
+#define MT6332_BUCK_ALL_CON18 0x8424
+#define MT6332_BUCK_ALL_CON19 0x8426
+#define MT6332_BUCK_ALL_CON20 0x8428
+#define MT6332_BUCK_ALL_CON21 0x842A
+#define MT6332_BUCK_ALL_CON22 0x842C
+#define MT6332_BUCK_ALL_CON23 0x842E
+#define MT6332_BUCK_ALL_CON24 0x8430
+#define MT6332_BUCK_ALL_CON25 0x8432
+#define MT6332_BUCK_ALL_CON26 0x8434
+#define MT6332_BUCK_ALL_CON27 0x8436
+#define MT6332_VDRAM_CON0 0x8438
+#define MT6332_VDRAM_CON1 0x843A
+#define MT6332_VDRAM_CON2 0x843C
+#define MT6332_VDRAM_CON3 0x843E
+#define MT6332_VDRAM_CON4 0x8440
+#define MT6332_VDRAM_CON5 0x8442
+#define MT6332_VDRAM_CON6 0x8444
+#define MT6332_VDRAM_CON7 0x8446
+#define MT6332_VDRAM_CON8 0x8448
+#define MT6332_VDRAM_CON9 0x844A
+#define MT6332_VDRAM_CON10 0x844C
+#define MT6332_VDRAM_CON11 0x844E
+#define MT6332_VDRAM_CON12 0x8450
+#define MT6332_VDRAM_CON13 0x8452
+#define MT6332_VDRAM_CON14 0x8454
+#define MT6332_VDRAM_CON15 0x8456
+#define MT6332_VDRAM_CON16 0x8458
+#define MT6332_VDRAM_CON17 0x845A
+#define MT6332_VDRAM_CON18 0x845C
+#define MT6332_VDRAM_CON19 0x845E
+#define MT6332_VDRAM_CON20 0x8460
+#define MT6332_VDRAM_CON21 0x8462
+#define MT6332_VDVFS2_CON0 0x8464
+#define MT6332_VDVFS2_CON1 0x8466
+#define MT6332_VDVFS2_CON2 0x8468
+#define MT6332_VDVFS2_CON3 0x846A
+#define MT6332_VDVFS2_CON4 0x846C
+#define MT6332_VDVFS2_CON5 0x846E
+#define MT6332_VDVFS2_CON6 0x8470
+#define MT6332_VDVFS2_CON7 0x8472
+#define MT6332_VDVFS2_CON8 0x8474
+#define MT6332_VDVFS2_CON9 0x8476
+#define MT6332_VDVFS2_CON10 0x8478
+#define MT6332_VDVFS2_CON11 0x847A
+#define MT6332_VDVFS2_CON12 0x847C
+#define MT6332_VDVFS2_CON13 0x847E
+#define MT6332_VDVFS2_CON14 0x8480
+#define MT6332_VDVFS2_CON15 0x8482
+#define MT6332_VDVFS2_CON16 0x8484
+#define MT6332_VDVFS2_CON17 0x8486
+#define MT6332_VDVFS2_CON18 0x8488
+#define MT6332_VDVFS2_CON19 0x848A
+#define MT6332_VDVFS2_CON20 0x848C
+#define MT6332_VDVFS2_CON21 0x848E
+#define MT6332_VDVFS2_CON22 0x8490
+#define MT6332_VDVFS2_CON23 0x8492
+#define MT6332_VDVFS2_CON24 0x8494
+#define MT6332_VDVFS2_CON25 0x8496
+#define MT6332_VDVFS2_CON26 0x8498
+#define MT6332_VDVFS2_CON27 0x849A
+#define MT6332_VRF1_CON0 0x849C
+#define MT6332_VRF1_CON1 0x849E
+#define MT6332_VRF1_CON2 0x84A0
+#define MT6332_VRF1_CON3 0x84A2
+#define MT6332_VRF1_CON4 0x84A4
+#define MT6332_VRF1_CON5 0x84A6
+#define MT6332_VRF1_CON6 0x84A8
+#define MT6332_VRF1_CON7 0x84AA
+#define MT6332_VRF1_CON8 0x84AC
+#define MT6332_VRF1_CON9 0x84AE
+#define MT6332_VRF1_CON10 0x84B0
+#define MT6332_VRF1_CON11 0x84B2
+#define MT6332_VRF1_CON12 0x84B4
+#define MT6332_VRF1_CON13 0x84B6
+#define MT6332_VRF1_CON14 0x84B8
+#define MT6332_VRF1_CON15 0x84BA
+#define MT6332_VRF1_CON16 0x84BC
+#define MT6332_VRF1_CON17 0x84BE
+#define MT6332_VRF1_CON18 0x84C0
+#define MT6332_VRF1_CON19 0x84C2
+#define MT6332_VRF1_CON20 0x84C4
+#define MT6332_VRF1_CON21 0x84C6
+#define MT6332_VRF2_CON0 0x84C8
+#define MT6332_VRF2_CON1 0x84CA
+#define MT6332_VRF2_CON2 0x84CC
+#define MT6332_VRF2_CON3 0x84CE
+#define MT6332_VRF2_CON4 0x84D0
+#define MT6332_VRF2_CON5 0x84D2
+#define MT6332_VRF2_CON6 0x84D4
+#define MT6332_VRF2_CON7 0x84D6
+#define MT6332_VRF2_CON8 0x84D8
+#define MT6332_VRF2_CON9 0x84DA
+#define MT6332_VRF2_CON10 0x84DC
+#define MT6332_VRF2_CON11 0x84DE
+#define MT6332_VRF2_CON12 0x84E0
+#define MT6332_VRF2_CON13 0x84E2
+#define MT6332_VRF2_CON14 0x84E4
+#define MT6332_VRF2_CON15 0x84E6
+#define MT6332_VRF2_CON16 0x84E8
+#define MT6332_VRF2_CON17 0x84EA
+#define MT6332_VRF2_CON18 0x84EC
+#define MT6332_VRF2_CON19 0x84EE
+#define MT6332_VRF2_CON20 0x84F0
+#define MT6332_VRF2_CON21 0x84F2
+#define MT6332_VPA_CON0 0x84F4
+#define MT6332_VPA_CON1 0x84F6
+#define MT6332_VPA_CON2 0x84F8
+#define MT6332_VPA_CON3 0x84FC
+#define MT6332_VPA_CON4 0x84FE
+#define MT6332_VPA_CON5 0x8500
+#define MT6332_VPA_CON6 0x8502
+#define MT6332_VPA_CON7 0x8504
+#define MT6332_VPA_CON8 0x8506
+#define MT6332_VPA_CON9 0x8508
+#define MT6332_VPA_CON10 0x850A
+#define MT6332_VPA_CON11 0x850C
+#define MT6332_VPA_CON12 0x850E
+#define MT6332_VPA_CON13 0x8510
+#define MT6332_VPA_CON14 0x8512
+#define MT6332_VPA_CON15 0x8514
+#define MT6332_VPA_CON16 0x8516
+#define MT6332_VPA_CON17 0x8518
+#define MT6332_VPA_CON18 0x851A
+#define MT6332_VPA_CON19 0x851C
+#define MT6332_VPA_CON20 0x851E
+#define MT6332_VPA_CON21 0x8520
+#define MT6332_VPA_CON22 0x8522
+#define MT6332_VPA_CON23 0x8524
+#define MT6332_VPA_CON24 0x8526
+#define MT6332_VPA_CON25 0x8528
+#define MT6332_VSBST_CON0 0x852A
+#define MT6332_VSBST_CON1 0x852C
+#define MT6332_VSBST_CON2 0x852E
+#define MT6332_VSBST_CON3 0x8530
+#define MT6332_VSBST_CON4 0x8532
+#define MT6332_VSBST_CON5 0x8534
+#define MT6332_VSBST_CON6 0x8536
+#define MT6332_VSBST_CON7 0x8538
+#define MT6332_VSBST_CON8 0x853A
+#define MT6332_VSBST_CON9 0x853C
+#define MT6332_VSBST_CON10 0x853E
+#define MT6332_VSBST_CON11 0x8540
+#define MT6332_VSBST_CON12 0x8542
+#define MT6332_VSBST_CON13 0x8544
+#define MT6332_VSBST_CON14 0x8546
+#define MT6332_VSBST_CON15 0x8548
+#define MT6332_VSBST_CON16 0x854A
+#define MT6332_VSBST_CON17 0x854C
+#define MT6332_VSBST_CON18 0x854E
+#define MT6332_VSBST_CON19 0x8550
+#define MT6332_VSBST_CON20 0x8552
+#define MT6332_VSBST_CON21 0x8554
+#define MT6332_BUCK_K_CON0 0x8556
+#define MT6332_BUCK_K_CON1 0x8558
+#define MT6332_BUCK_K_CON2 0x855A
+#define MT6332_BUCK_K_CON3 0x855C
+#define MT6332_BUCK_K_CON4 0x855E
+#define MT6332_BUCK_K_CON5 0x8560
+#define MT6332_AUXADC_ADC0 0x8800
+#define MT6332_AUXADC_ADC1 0x8802
+#define MT6332_AUXADC_ADC2 0x8804
+#define MT6332_AUXADC_ADC3 0x8806
+#define MT6332_AUXADC_ADC4 0x8808
+#define MT6332_AUXADC_ADC5 0x880A
+#define MT6332_AUXADC_ADC6 0x880C
+#define MT6332_AUXADC_ADC7 0x880E
+#define MT6332_AUXADC_ADC8 0x8810
+#define MT6332_AUXADC_ADC9 0x8812
+#define MT6332_AUXADC_ADC10 0x8814
+#define MT6332_AUXADC_ADC11 0x8816
+#define MT6332_AUXADC_ADC12 0x8818
+#define MT6332_AUXADC_ADC13 0x881A
+#define MT6332_AUXADC_ADC14 0x881C
+#define MT6332_AUXADC_ADC15 0x881E
+#define MT6332_AUXADC_ADC16 0x8820
+#define MT6332_AUXADC_ADC17 0x8822
+#define MT6332_AUXADC_ADC18 0x8824
+#define MT6332_AUXADC_ADC19 0x8826
+#define MT6332_AUXADC_ADC20 0x8828
+#define MT6332_AUXADC_ADC21 0x882A
+#define MT6332_AUXADC_ADC22 0x882C
+#define MT6332_AUXADC_ADC23 0x882E
+#define MT6332_AUXADC_ADC24 0x8830
+#define MT6332_AUXADC_ADC25 0x8832
+#define MT6332_AUXADC_ADC26 0x8834
+#define MT6332_AUXADC_ADC27 0x8836
+#define MT6332_AUXADC_ADC28 0x8838
+#define MT6332_AUXADC_ADC29 0x883A
+#define MT6332_AUXADC_ADC30 0x883C
+#define MT6332_AUXADC_ADC31 0x883E
+#define MT6332_AUXADC_ADC32 0x8840
+#define MT6332_AUXADC_ADC33 0x8842
+#define MT6332_AUXADC_ADC34 0x8844
+#define MT6332_AUXADC_ADC35 0x8846
+#define MT6332_AUXADC_ADC36 0x8848
+#define MT6332_AUXADC_ADC37 0x884A
+#define MT6332_AUXADC_ADC38 0x884C
+#define MT6332_AUXADC_ADC39 0x884E
+#define MT6332_AUXADC_ADC40 0x8850
+#define MT6332_AUXADC_ADC41 0x8852
+#define MT6332_AUXADC_ADC42 0x8854
+#define MT6332_AUXADC_ADC43 0x8856
+#define MT6332_AUXADC_STA0 0x8858
+#define MT6332_AUXADC_STA1 0x885A
+#define MT6332_AUXADC_RQST0 0x885C
+#define MT6332_AUXADC_RQST0_SET 0x885E
+#define MT6332_AUXADC_RQST0_CLR 0x8860
+#define MT6332_AUXADC_RQST1 0x8862
+#define MT6332_AUXADC_RQST1_SET 0x8864
+#define MT6332_AUXADC_RQST1_CLR 0x8866
+#define MT6332_AUXADC_CON0 0x8868
+#define MT6332_AUXADC_CON1 0x886A
+#define MT6332_AUXADC_CON2 0x886C
+#define MT6332_AUXADC_CON3 0x886E
+#define MT6332_AUXADC_CON4 0x8870
+#define MT6332_AUXADC_CON5 0x8872
+#define MT6332_AUXADC_CON6 0x8874
+#define MT6332_AUXADC_CON7 0x8876
+#define MT6332_AUXADC_CON8 0x8878
+#define MT6332_AUXADC_CON9 0x887A
+#define MT6332_AUXADC_CON10 0x887C
+#define MT6332_AUXADC_CON11 0x887E
+#define MT6332_AUXADC_CON12 0x8880
+#define MT6332_AUXADC_CON13 0x8882
+#define MT6332_AUXADC_CON14 0x8884
+#define MT6332_AUXADC_CON15 0x8886
+#define MT6332_AUXADC_CON16 0x8888
+#define MT6332_AUXADC_CON17 0x888A
+#define MT6332_AUXADC_CON18 0x888C
+#define MT6332_AUXADC_CON19 0x888E
+#define MT6332_AUXADC_CON20 0x8890
+#define MT6332_AUXADC_CON21 0x8892
+#define MT6332_AUXADC_CON22 0x8894
+#define MT6332_AUXADC_CON23 0x8896
+#define MT6332_AUXADC_CON24 0x8898
+#define MT6332_AUXADC_CON25 0x889A
+#define MT6332_AUXADC_CON26 0x889C
+#define MT6332_AUXADC_CON27 0x889E
+#define MT6332_AUXADC_CON28 0x88A0
+#define MT6332_AUXADC_CON29 0x88A2
+#define MT6332_AUXADC_CON30 0x88A4
+#define MT6332_AUXADC_CON31 0x88A6
+#define MT6332_AUXADC_CON32 0x88A8
+#define MT6332_AUXADC_CON33 0x88AA
+#define MT6332_AUXADC_CON34 0x88AC
+#define MT6332_AUXADC_CON35 0x88AE
+#define MT6332_AUXADC_CON36 0x88B0
+#define MT6332_AUXADC_CON37 0x88B2
+#define MT6332_AUXADC_CON38 0x88B4
+#define MT6332_AUXADC_CON39 0x88B6
+#define MT6332_AUXADC_CON40 0x88B8
+#define MT6332_AUXADC_CON41 0x88BA
+#define MT6332_AUXADC_CON42 0x88BC
+#define MT6332_AUXADC_CON43 0x88BE
+#define MT6332_AUXADC_CON44 0x88C0
+#define MT6332_AUXADC_CON45 0x88C2
+#define MT6332_AUXADC_CON46 0x88C4
+#define MT6332_AUXADC_CON47 0x88C6
+#define MT6332_STRUP_CONA0 0x8C00
+#define MT6332_STRUP_CONA1 0x8C02
+#define MT6332_STRUP_CONA2 0x8C04
+#define MT6332_STRUP_CON0 0x8C06
+#define MT6332_STRUP_CON2 0x8C08
+#define MT6332_STRUP_CON3 0x8C0A
+#define MT6332_STRUP_CON4 0x8C0C
+#define MT6332_STRUP_CON5 0x8C0E
+#define MT6332_STRUP_CON6 0x8C10
+#define MT6332_STRUP_CON7 0x8C12
+#define MT6332_STRUP_CON8 0x8C14
+#define MT6332_STRUP_CON9 0x8C16
+#define MT6332_STRUP_CON10 0x8C18
+#define MT6332_STRUP_CON11 0x8C1A
+#define MT6332_STRUP_CON12 0x8C1C
+#define MT6332_STRUP_CON13 0x8C1E
+#define MT6332_STRUP_CON14 0x8C20
+#define MT6332_STRUP_CON15 0x8C22
+#define MT6332_STRUP_CON16 0x8C24
+#define MT6332_STRUP_CON17 0x8C26
+#define MT6332_FGADC_CON0 0x8C28
+#define MT6332_FGADC_CON1 0x8C2A
+#define MT6332_FGADC_CON2 0x8C2C
+#define MT6332_FGADC_CON3 0x8C2E
+#define MT6332_FGADC_CON4 0x8C30
+#define MT6332_FGADC_CON5 0x8C32
+#define MT6332_FGADC_CON6 0x8C34
+#define MT6332_FGADC_CON7 0x8C36
+#define MT6332_FGADC_CON8 0x8C38
+#define MT6332_FGADC_CON9 0x8C3A
+#define MT6332_FGADC_CON10 0x8C3C
+#define MT6332_FGADC_CON11 0x8C3E
+#define MT6332_FGADC_CON12 0x8C40
+#define MT6332_FGADC_CON13 0x8C42
+#define MT6332_FGADC_CON14 0x8C44
+#define MT6332_FGADC_CON15 0x8C46
+#define MT6332_FGADC_CON16 0x8C48
+#define MT6332_FGADC_CON17 0x8C4A
+#define MT6332_FGADC_CON18 0x8C4C
+#define MT6332_FGADC_CON19 0x8C4E
+#define MT6332_FGADC_CON20 0x8C50
+#define MT6332_FGADC_CON21 0x8C52
+#define MT6332_FGADC_CON22 0x8C54
+#define MT6332_OTP_CON0 0x8C56
+#define MT6332_OTP_CON1 0x8C58
+#define MT6332_OTP_CON2 0x8C5A
+#define MT6332_OTP_CON3 0x8C5C
+#define MT6332_OTP_CON4 0x8C5E
+#define MT6332_OTP_CON5 0x8C60
+#define MT6332_OTP_CON6 0x8C62
+#define MT6332_OTP_CON7 0x8C64
+#define MT6332_OTP_CON8 0x8C66
+#define MT6332_OTP_CON9 0x8C68
+#define MT6332_OTP_CON10 0x8C6A
+#define MT6332_OTP_CON11 0x8C6C
+#define MT6332_OTP_CON12 0x8C6E
+#define MT6332_OTP_CON13 0x8C70
+#define MT6332_OTP_CON14 0x8C72
+#define MT6332_OTP_DOUT_0_15 0x8C74
+#define MT6332_OTP_DOUT_16_31 0x8C76
+#define MT6332_OTP_DOUT_32_47 0x8C78
+#define MT6332_OTP_DOUT_48_63 0x8C7A
+#define MT6332_OTP_DOUT_64_79 0x8C7C
+#define MT6332_OTP_DOUT_80_95 0x8C7E
+#define MT6332_OTP_DOUT_96_111 0x8C80
+#define MT6332_OTP_DOUT_112_127 0x8C82
+#define MT6332_OTP_DOUT_128_143 0x8C84
+#define MT6332_OTP_DOUT_144_159 0x8C86
+#define MT6332_OTP_DOUT_160_175 0x8C88
+#define MT6332_OTP_DOUT_176_191 0x8C8A
+#define MT6332_OTP_DOUT_192_207 0x8C8C
+#define MT6332_OTP_DOUT_208_223 0x8C8E
+#define MT6332_OTP_DOUT_224_239 0x8C90
+#define MT6332_OTP_DOUT_240_255 0x8C92
+#define MT6332_OTP_VAL_0_15 0x8C94
+#define MT6332_OTP_VAL_16_31 0x8C96
+#define MT6332_OTP_VAL_32_47 0x8C98
+#define MT6332_OTP_VAL_48_63 0x8C9A
+#define MT6332_OTP_VAL_64_79 0x8C9C
+#define MT6332_OTP_VAL_80_95 0x8C9E
+#define MT6332_OTP_VAL_96_111 0x8CA0
+#define MT6332_OTP_VAL_112_127 0x8CA2
+#define MT6332_OTP_VAL_128_143 0x8CA4
+#define MT6332_OTP_VAL_144_159 0x8CA6
+#define MT6332_OTP_VAL_160_175 0x8CA8
+#define MT6332_OTP_VAL_176_191 0x8CAA
+#define MT6332_OTP_VAL_192_207 0x8CAC
+#define MT6332_OTP_VAL_208_223 0x8CAE
+#define MT6332_OTP_VAL_224_239 0x8CB0
+#define MT6332_OTP_VAL_240_255 0x8CB2
+#define MT6332_LDO_CON0 0x8CB4
+#define MT6332_LDO_CON1 0x8CB6
+#define MT6332_LDO_CON2 0x8CB8
+#define MT6332_LDO_CON3 0x8CBA
+#define MT6332_LDO_CON5 0x8CBC
+#define MT6332_LDO_CON6 0x8CBE
+#define MT6332_LDO_CON7 0x8CC0
+#define MT6332_LDO_CON8 0x8CC2
+#define MT6332_LDO_CON9 0x8CC4
+#define MT6332_LDO_CON10 0x8CC6
+#define MT6332_LDO_CON11 0x8CC8
+#define MT6332_LDO_CON12 0x8CCA
+#define MT6332_LDO_CON13 0x8CCC
+#define MT6332_FQMTR_CON0 0x8CCE
+#define MT6332_FQMTR_CON1 0x8CD0
+#define MT6332_FQMTR_CON2 0x8CD2
+#define MT6332_IWLED_CON0 0x8CD4
+#define MT6332_IWLED_DEG 0x8CD6
+#define MT6332_IWLED_STATUS 0x8CD8
+#define MT6332_IWLED_EN_CTRL 0x8CDA
+#define MT6332_IWLED_CON1 0x8CDC
+#define MT6332_IWLED_CON2 0x8CDE
+#define MT6332_IWLED_TRIM0 0x8CE0
+#define MT6332_IWLED_TRIM1 0x8CE2
+#define MT6332_IWLED_CON3 0x8CE4
+#define MT6332_IWLED_CON4 0x8CE6
+#define MT6332_IWLED_CON5 0x8CE8
+#define MT6332_IWLED_CON6 0x8CEA
+#define MT6332_IWLED_CON7 0x8CEC
+#define MT6332_IWLED_CON8 0x8CEE
+#define MT6332_IWLED_CON9 0x8CF0
+#define MT6332_SPK_CON0 0x8CF2
+#define MT6332_SPK_CON1 0x8CF4
+#define MT6332_SPK_CON2 0x8CF6
+#define MT6332_SPK_CON3 0x8CF8
+#define MT6332_SPK_CON4 0x8CFA
+#define MT6332_SPK_CON5 0x8CFC
+#define MT6332_SPK_CON6 0x8CFE
+#define MT6332_SPK_CON7 0x8D00
+#define MT6332_SPK_CON8 0x8D02
+#define MT6332_SPK_CON9 0x8D04
+#define MT6332_SPK_CON10 0x8D06
+#define MT6332_SPK_CON11 0x8D08
+#define MT6332_SPK_CON12 0x8D0A
+#define MT6332_SPK_CON13 0x8D0C
+#define MT6332_SPK_CON14 0x8D0E
+#define MT6332_SPK_CON15 0x8D10
+#define MT6332_SPK_CON16 0x8D12
+#define MT6332_TESTI_CON0 0x8D14
+#define MT6332_TESTI_CON1 0x8D16
+#define MT6332_TESTI_CON2 0x8D18
+#define MT6332_TESTI_CON3 0x8D1A
+#define MT6332_TESTI_CON4 0x8D1C
+#define MT6332_TESTI_CON5 0x8D1E
+#define MT6332_TESTI_CON6 0x8D20
+#define MT6332_TESTI_MUX_CON0 0x8D22
+#define MT6332_TESTI_MUX_CON1 0x8D24
+#define MT6332_TESTI_MUX_CON2 0x8D26
+#define MT6332_TESTI_MUX_CON3 0x8D28
+#define MT6332_TESTI_MUX_CON4 0x8D2A
+#define MT6332_TESTI_MUX_CON5 0x8D2C
+#define MT6332_TESTI_MUX_CON6 0x8D2E
+#define MT6332_TESTO_CON0 0x8D30
+#define MT6332_TESTO_CON1 0x8D32
+#define MT6332_TEST_OMUX_CON0 0x8D34
+#define MT6332_TEST_OMUX_CON1 0x8D36
+#define MT6332_DEBUG_CON0 0x8D38
+#define MT6332_DEBUG_CON1 0x8D3A
+#define MT6332_DEBUG_CON2 0x8D3C
+#define MT6332_FGADC_CON23 0x8D3E
+#define MT6332_FGADC_CON24 0x8D40
+#define MT6332_FGADC_CON25 0x8D42
+#define MT6332_TOP_RST_STATUS 0x8D44
+#define MT6332_TOP_RST_STATUS_SET 0x8D46
+#define MT6332_TOP_RST_STATUS_CLR 0x8D48
+#define MT6332_VDVFS2_CON28 0x8D4A
+
+#endif /* __MFD_MT6332_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6357/core.h b/include/linux/mfd/mt6357/core.h
new file mode 100644
index 000000000000..2441611264fd
--- /dev/null
+++ b/include/linux/mfd/mt6357/core.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 BayLibre, SAS
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef __MFD_MT6357_CORE_H__
+#define __MFD_MT6357_CORE_H__
+
+enum mt6357_irq_top_status_shift {
+ MT6357_BUCK_TOP = 0,
+ MT6357_LDO_TOP,
+ MT6357_PSC_TOP,
+ MT6357_SCK_TOP,
+ MT6357_BM_TOP,
+ MT6357_HK_TOP,
+ MT6357_XPP_TOP,
+ MT6357_AUD_TOP,
+ MT6357_MISC_TOP,
+};
+
+enum mt6357_irq_numbers {
+ MT6357_IRQ_VPROC_OC = 0,
+ MT6357_IRQ_VCORE_OC,
+ MT6357_IRQ_VMODEM_OC,
+ MT6357_IRQ_VS1_OC,
+ MT6357_IRQ_VPA_OC,
+ MT6357_IRQ_VCORE_PREOC,
+ MT6357_IRQ_VFE28_OC = 16,
+ MT6357_IRQ_VXO22_OC,
+ MT6357_IRQ_VRF18_OC,
+ MT6357_IRQ_VRF12_OC,
+ MT6357_IRQ_VEFUSE_OC,
+ MT6357_IRQ_VCN33_OC,
+ MT6357_IRQ_VCN28_OC,
+ MT6357_IRQ_VCN18_OC,
+ MT6357_IRQ_VCAMA_OC,
+ MT6357_IRQ_VCAMD_OC,
+ MT6357_IRQ_VCAMIO_OC,
+ MT6357_IRQ_VLDO28_OC,
+ MT6357_IRQ_VUSB33_OC,
+ MT6357_IRQ_VAUX18_OC,
+ MT6357_IRQ_VAUD28_OC,
+ MT6357_IRQ_VIO28_OC,
+ MT6357_IRQ_VIO18_OC,
+ MT6357_IRQ_VSRAM_PROC_OC,
+ MT6357_IRQ_VSRAM_OTHERS_OC,
+ MT6357_IRQ_VIBR_OC,
+ MT6357_IRQ_VDRAM_OC,
+ MT6357_IRQ_VMC_OC,
+ MT6357_IRQ_VMCH_OC,
+ MT6357_IRQ_VEMC_OC,
+ MT6357_IRQ_VSIM1_OC,
+ MT6357_IRQ_VSIM2_OC,
+ MT6357_IRQ_PWRKEY = 48,
+ MT6357_IRQ_HOMEKEY,
+ MT6357_IRQ_PWRKEY_R,
+ MT6357_IRQ_HOMEKEY_R,
+ MT6357_IRQ_NI_LBAT_INT,
+ MT6357_IRQ_CHRDET,
+ MT6357_IRQ_CHRDET_EDGE,
+ MT6357_IRQ_VCDT_HV_DET,
+ MT6357_IRQ_WATCHDOG,
+ MT6357_IRQ_VBATON_UNDET,
+ MT6357_IRQ_BVALID_DET,
+ MT6357_IRQ_OV,
+ MT6357_IRQ_RTC = 64,
+ MT6357_IRQ_FG_BAT0_H = 80,
+ MT6357_IRQ_FG_BAT0_L,
+ MT6357_IRQ_FG_CUR_H,
+ MT6357_IRQ_FG_CUR_L,
+ MT6357_IRQ_FG_ZCV,
+ MT6357_IRQ_BATON_LV = 96,
+ MT6357_IRQ_BATON_HT,
+ MT6357_IRQ_BAT_H = 112,
+ MT6357_IRQ_BAT_L,
+ MT6357_IRQ_AUXADC_IMP,
+ MT6357_IRQ_NAG_C_DLTV,
+ MT6357_IRQ_AUDIO = 128,
+ MT6357_IRQ_ACCDET = 133,
+ MT6357_IRQ_ACCDET_EINT0,
+ MT6357_IRQ_ACCDET_EINT1,
+ MT6357_IRQ_SPI_CMD_ALERT = 144,
+ MT6357_IRQ_NR,
+};
+
+#define MT6357_IRQ_BUCK_BASE MT6357_IRQ_VPROC_OC
+#define MT6357_IRQ_LDO_BASE MT6357_IRQ_VFE28_OC
+#define MT6357_IRQ_PSC_BASE MT6357_IRQ_PWRKEY
+#define MT6357_IRQ_SCK_BASE MT6357_IRQ_RTC
+#define MT6357_IRQ_BM_BASE MT6357_IRQ_FG_BAT0_H
+#define MT6357_IRQ_HK_BASE MT6357_IRQ_BAT_H
+#define MT6357_IRQ_AUD_BASE MT6357_IRQ_AUDIO
+#define MT6357_IRQ_MISC_BASE MT6357_IRQ_SPI_CMD_ALERT
+
+#define MT6357_IRQ_BUCK_BITS (MT6357_IRQ_VCORE_PREOC - MT6357_IRQ_BUCK_BASE + 1)
+#define MT6357_IRQ_LDO_BITS (MT6357_IRQ_VSIM2_OC - MT6357_IRQ_LDO_BASE + 1)
+#define MT6357_IRQ_PSC_BITS (MT6357_IRQ_VCDT_HV_DET - MT6357_IRQ_PSC_BASE + 1)
+#define MT6357_IRQ_SCK_BITS (MT6357_IRQ_RTC - MT6357_IRQ_SCK_BASE + 1)
+#define MT6357_IRQ_BM_BITS (MT6357_IRQ_BATON_HT - MT6357_IRQ_BM_BASE + 1)
+#define MT6357_IRQ_HK_BITS (MT6357_IRQ_NAG_C_DLTV - MT6357_IRQ_HK_BASE + 1)
+#define MT6357_IRQ_AUD_BITS (MT6357_IRQ_ACCDET_EINT1 - MT6357_IRQ_AUD_BASE + 1)
+#define MT6357_IRQ_MISC_BITS \
+ (MT6357_IRQ_SPI_CMD_ALERT - MT6357_IRQ_MISC_BASE + 1)
+
+#define MT6357_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6357_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6357_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6357_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6357_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6357_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6357_CORE_H__ */
diff --git a/include/linux/mfd/mt6357/registers.h b/include/linux/mfd/mt6357/registers.h
new file mode 100644
index 000000000000..e24af83b618d
--- /dev/null
+++ b/include/linux/mfd/mt6357/registers.h
@@ -0,0 +1,1574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6357_REGISTERS_H__
+#define __MFD_MT6357_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6357_TOP0_ID 0x0
+#define MT6357_TOP0_REV0 0x2
+#define MT6357_TOP0_DSN_DBI 0x4
+#define MT6357_TOP0_DSN_DXI 0x6
+#define MT6357_HWCID 0x8
+#define MT6357_SWCID 0xa
+#define MT6357_PONSTS 0xc
+#define MT6357_POFFSTS 0xe
+#define MT6357_PSTSCTL 0x10
+#define MT6357_PG_DEB_STS0 0x12
+#define MT6357_PG_SDN_STS0 0x14
+#define MT6357_OC_SDN_STS0 0x16
+#define MT6357_THERMALSTATUS 0x18
+#define MT6357_TOP_CON 0x1a
+#define MT6357_TEST_OUT 0x1c
+#define MT6357_TEST_CON0 0x1e
+#define MT6357_TEST_CON1 0x20
+#define MT6357_TESTMODE_SW 0x22
+#define MT6357_TOPSTATUS 0x24
+#define MT6357_TDSEL_CON 0x26
+#define MT6357_RDSEL_CON 0x28
+#define MT6357_SMT_CON0 0x2a
+#define MT6357_SMT_CON1 0x2c
+#define MT6357_TOP_RSV0 0x2e
+#define MT6357_TOP_RSV1 0x30
+#define MT6357_DRV_CON0 0x32
+#define MT6357_DRV_CON1 0x34
+#define MT6357_DRV_CON2 0x36
+#define MT6357_DRV_CON3 0x38
+#define MT6357_FILTER_CON0 0x3a
+#define MT6357_FILTER_CON1 0x3c
+#define MT6357_FILTER_CON2 0x3e
+#define MT6357_FILTER_CON3 0x40
+#define MT6357_TOP_STATUS 0x42
+#define MT6357_TOP_STATUS_SET 0x44
+#define MT6357_TOP_STATUS_CLR 0x46
+#define MT6357_TOP_TRAP 0x48
+#define MT6357_TOP1_ID 0x80
+#define MT6357_TOP1_REV0 0x82
+#define MT6357_TOP1_DSN_DBI 0x84
+#define MT6357_TOP1_DSN_DXI 0x86
+#define MT6357_GPIO_DIR0 0x88
+#define MT6357_GPIO_DIR0_SET 0x8a
+#define MT6357_GPIO_DIR0_CLR 0x8c
+#define MT6357_GPIO_PULLEN0 0x8e
+#define MT6357_GPIO_PULLEN0_SET 0x90
+#define MT6357_GPIO_PULLEN0_CLR 0x92
+#define MT6357_GPIO_PULLSEL0 0x94
+#define MT6357_GPIO_PULLSEL0_SET 0x96
+#define MT6357_GPIO_PULLSEL0_CLR 0x98
+#define MT6357_GPIO_DINV0 0x9a
+#define MT6357_GPIO_DINV0_SET 0x9c
+#define MT6357_GPIO_DINV0_CLR 0x9e
+#define MT6357_GPIO_DOUT0 0xa0
+#define MT6357_GPIO_DOUT0_SET 0xa2
+#define MT6357_GPIO_DOUT0_CLR 0xa4
+#define MT6357_GPIO_PI0 0xa6
+#define MT6357_GPIO_POE0 0xa8
+#define MT6357_GPIO_MODE0 0xaa
+#define MT6357_GPIO_MODE0_SET 0xac
+#define MT6357_GPIO_MODE0_CLR 0xae
+#define MT6357_GPIO_MODE1 0xb0
+#define MT6357_GPIO_MODE1_SET 0xb2
+#define MT6357_GPIO_MODE1_CLR 0xb4
+#define MT6357_GPIO_MODE2 0xb6
+#define MT6357_GPIO_MODE2_SET 0xb8
+#define MT6357_GPIO_MODE2_CLR 0xba
+#define MT6357_GPIO_MODE3 0xbc
+#define MT6357_GPIO_MODE3_SET 0xbe
+#define MT6357_GPIO_MODE3_CLR 0xc0
+#define MT6357_GPIO_RSV 0xc2
+#define MT6357_TOP2_ID 0x100
+#define MT6357_TOP2_REV0 0x102
+#define MT6357_TOP2_DSN_DBI 0x104
+#define MT6357_TOP2_DSN_DXI 0x106
+#define MT6357_TOP_PAM0 0x108
+#define MT6357_TOP_PAM1 0x10a
+#define MT6357_TOP_CKPDN_CON0 0x10c
+#define MT6357_TOP_CKPDN_CON0_SET 0x10e
+#define MT6357_TOP_CKPDN_CON0_CLR 0x110
+#define MT6357_TOP_CKPDN_CON1 0x112
+#define MT6357_TOP_CKPDN_CON1_SET 0x114
+#define MT6357_TOP_CKPDN_CON1_CLR 0x116
+#define MT6357_TOP_CKSEL_CON0 0x118
+#define MT6357_TOP_CKSEL_CON0_SET 0x11a
+#define MT6357_TOP_CKSEL_CON0_CLR 0x11c
+#define MT6357_TOP_CKSEL_CON1 0x11e
+#define MT6357_TOP_CKSEL_CON1_SET 0x120
+#define MT6357_TOP_CKSEL_CON1_CLR 0x122
+#define MT6357_TOP_CKDIVSEL_CON0 0x124
+#define MT6357_TOP_CKDIVSEL_CON0_SET 0x126
+#define MT6357_TOP_CKDIVSEL_CON0_CLR 0x128
+#define MT6357_TOP_CKHWEN_CON0 0x12a
+#define MT6357_TOP_CKHWEN_CON0_SET 0x12c
+#define MT6357_TOP_CKHWEN_CON0_CLR 0x12e
+#define MT6357_TOP_CKTST_CON0 0x130
+#define MT6357_TOP_CKTST_CON1 0x132
+#define MT6357_TOP_CLK_CON0 0x134
+#define MT6357_TOP_CLK_CON0_SET 0x136
+#define MT6357_TOP_CLK_CON0_CLR 0x138
+#define MT6357_TOP_DCM_CON0 0x13a
+#define MT6357_TOP_HANDOVER_DEBUG0 0x13c
+#define MT6357_TOP_RST_CON0 0x13e
+#define MT6357_TOP_RST_CON0_SET 0x140
+#define MT6357_TOP_RST_CON0_CLR 0x142
+#define MT6357_TOP_RST_CON1 0x144
+#define MT6357_TOP_RST_CON1_SET 0x146
+#define MT6357_TOP_RST_CON1_CLR 0x148
+#define MT6357_TOP_RST_CON2 0x14a
+#define MT6357_TOP_RST_MISC 0x14c
+#define MT6357_TOP_RST_MISC_SET 0x14e
+#define MT6357_TOP_RST_MISC_CLR 0x150
+#define MT6357_TOP_RST_STATUS 0x152
+#define MT6357_TOP_RST_STATUS_SET 0x154
+#define MT6357_TOP_RST_STATUS_CLR 0x156
+#define MT6357_TOP2_ELR_NUM 0x158
+#define MT6357_TOP2_ELR0 0x15a
+#define MT6357_TOP2_ELR1 0x15c
+#define MT6357_TOP3_ID 0x180
+#define MT6357_TOP3_REV0 0x182
+#define MT6357_TOP3_DSN_DBI 0x184
+#define MT6357_TOP3_DSN_DXI 0x186
+#define MT6357_MISC_TOP_INT_CON0 0x188
+#define MT6357_MISC_TOP_INT_CON0_SET 0x18a
+#define MT6357_MISC_TOP_INT_CON0_CLR 0x18c
+#define MT6357_MISC_TOP_INT_MASK_CON0 0x18e
+#define MT6357_MISC_TOP_INT_MASK_CON0_SET 0x190
+#define MT6357_MISC_TOP_INT_MASK_CON0_CLR 0x192
+#define MT6357_MISC_TOP_INT_STATUS0 0x194
+#define MT6357_MISC_TOP_INT_RAW_STATUS0 0x196
+#define MT6357_TOP_INT_MASK_CON0 0x198
+#define MT6357_TOP_INT_MASK_CON0_SET 0x19a
+#define MT6357_TOP_INT_MASK_CON0_CLR 0x19c
+#define MT6357_TOP_INT_STATUS0 0x19e
+#define MT6357_TOP_INT_RAW_STATUS0 0x1a0
+#define MT6357_TOP_INT_CON0 0x1a2
+#define MT6357_PLT0_ID 0x380
+#define MT6357_PLT0_REV0 0x382
+#define MT6357_PLT0_REV1 0x384
+#define MT6357_PLT0_DSN_DXI 0x386
+#define MT6357_FQMTR_CON0 0x388
+#define MT6357_FQMTR_CON1 0x38a
+#define MT6357_FQMTR_CON2 0x38c
+#define MT6357_TOP_CLK_TRIM 0x38e
+#define MT6357_OTP_CON0 0x390
+#define MT6357_OTP_CON1 0x392
+#define MT6357_OTP_CON2 0x394
+#define MT6357_OTP_CON3 0x396
+#define MT6357_OTP_CON4 0x398
+#define MT6357_OTP_CON5 0x39a
+#define MT6357_OTP_CON6 0x39c
+#define MT6357_OTP_CON7 0x39e
+#define MT6357_OTP_CON8 0x3a0
+#define MT6357_OTP_CON9 0x3a2
+#define MT6357_OTP_CON10 0x3a4
+#define MT6357_OTP_CON11 0x3a6
+#define MT6357_OTP_CON12 0x3a8
+#define MT6357_OTP_CON13 0x3aa
+#define MT6357_OTP_CON14 0x3ac
+#define MT6357_TOP_TMA_KEY 0x3ae
+#define MT6357_TOP_MDB_CONF0 0x3b0
+#define MT6357_TOP_MDB_CONF1 0x3b2
+#define MT6357_TOP_MDB_CONF2 0x3b4
+#define MT6357_PLT0_ELR_NUM 0x3b6
+#define MT6357_PLT0_ELR0 0x3b8
+#define MT6357_PLT0_ELR1 0x3ba
+#define MT6357_SPISLV_ID 0x400
+#define MT6357_SPISLV_REV0 0x402
+#define MT6357_SPISLV_REV1 0x404
+#define MT6357_SPISLV_DSN_DXI 0x406
+#define MT6357_RG_SPI_CON0 0x408
+#define MT6357_DEW_DIO_EN 0x40a
+#define MT6357_DEW_READ_TEST 0x40c
+#define MT6357_DEW_WRITE_TEST 0x40e
+#define MT6357_DEW_CRC_SWRST 0x410
+#define MT6357_DEW_CRC_EN 0x412
+#define MT6357_DEW_CRC_VAL 0x414
+#define MT6357_DEW_DBG_MON_SEL 0x416
+#define MT6357_DEW_CIPHER_KEY_SEL 0x418
+#define MT6357_DEW_CIPHER_IV_SEL 0x41a
+#define MT6357_DEW_CIPHER_EN 0x41c
+#define MT6357_DEW_CIPHER_RDY 0x41e
+#define MT6357_DEW_CIPHER_MODE 0x420
+#define MT6357_DEW_CIPHER_SWRST 0x422
+#define MT6357_DEW_RDDMY_NO 0x424
+#define MT6357_INT_TYPE_CON0 0x426
+#define MT6357_INT_TYPE_CON0_SET 0x428
+#define MT6357_INT_TYPE_CON0_CLR 0x42a
+#define MT6357_INT_STA 0x42c
+#define MT6357_RG_SPI_CON1 0x42e
+#define MT6357_RG_SPI_CON2 0x430
+#define MT6357_RG_SPI_CON3 0x432
+#define MT6357_RG_SPI_CON4 0x434
+#define MT6357_RG_SPI_CON5 0x436
+#define MT6357_RG_SPI_CON6 0x438
+#define MT6357_RG_SPI_CON7 0x43a
+#define MT6357_RG_SPI_CON8 0x43c
+#define MT6357_RG_SPI_CON9 0x43e
+#define MT6357_RG_SPI_CON10 0x440
+#define MT6357_RG_SPI_CON11 0x442
+#define MT6357_RG_SPI_CON12 0x444
+#define MT6357_RG_SPI_CON13 0x446
+#define MT6357_TOP_SPI_CON0 0x448
+#define MT6357_TOP_SPI_CON1 0x44a
+#define MT6357_SCK_TOP_DSN_ID 0x500
+#define MT6357_SCK_TOP_DSN_REV0 0x502
+#define MT6357_SCK_TOP_DBI 0x504
+#define MT6357_SCK_TOP_DXI 0x506
+#define MT6357_SCK_TOP_TPM0 0x508
+#define MT6357_SCK_TOP_TPM1 0x50a
+#define MT6357_SCK_TOP_CON0 0x50c
+#define MT6357_SCK_TOP_CON1 0x50e
+#define MT6357_SCK_TOP_TEST_OUT 0x510
+#define MT6357_SCK_TOP_TEST_CON0 0x512
+#define MT6357_SCK_TOP_CKPDN_CON0 0x514
+#define MT6357_SCK_TOP_CKPDN_CON0_SET 0x516
+#define MT6357_SCK_TOP_CKPDN_CON0_CLR 0x518
+#define MT6357_SCK_TOP_CKHWEN_CON0 0x51a
+#define MT6357_SCK_TOP_CKHWEN_CON0_SET 0x51c
+#define MT6357_SCK_TOP_CKHWEN_CON0_CLR 0x51e
+#define MT6357_SCK_TOP_CKTST_CON 0x520
+#define MT6357_SCK_TOP_RST_CON0 0x522
+#define MT6357_SCK_TOP_RST_CON0_SET 0x524
+#define MT6357_SCK_TOP_RST_CON0_CLR 0x526
+#define MT6357_SCK_TOP_INT_CON0 0x528
+#define MT6357_SCK_TOP_INT_CON0_SET 0x52a
+#define MT6357_SCK_TOP_INT_CON0_CLR 0x52c
+#define MT6357_SCK_TOP_INT_MASK_CON0 0x52e
+#define MT6357_SCK_TOP_INT_MASK_CON0_SET 0x530
+#define MT6357_SCK_TOP_INT_MASK_CON0_CLR 0x532
+#define MT6357_SCK_TOP_INT_STATUS0 0x534
+#define MT6357_SCK_TOP_INT_RAW_STATUS0 0x536
+#define MT6357_SCK_TOP_INT_MISC_CON 0x538
+#define MT6357_EOSC_CALI_CON0 0x53a
+#define MT6357_EOSC_CALI_CON1 0x53c
+#define MT6357_RTC_MIX_CON0 0x53e
+#define MT6357_RTC_MIX_CON1 0x540
+#define MT6357_RTC_MIX_CON2 0x542
+#define MT6357_RTC_DSN_ID 0x580
+#define MT6357_RTC_DSN_REV0 0x582
+#define MT6357_RTC_DBI 0x584
+#define MT6357_RTC_DXI 0x586
+#define MT6357_RTC_BBPU 0x588
+#define MT6357_RTC_IRQ_STA 0x58a
+#define MT6357_RTC_IRQ_EN 0x58c
+#define MT6357_RTC_CII_EN 0x58e
+#define MT6357_RTC_AL_MASK 0x590
+#define MT6357_RTC_TC_SEC 0x592
+#define MT6357_RTC_TC_MIN 0x594
+#define MT6357_RTC_TC_HOU 0x596
+#define MT6357_RTC_TC_DOM 0x598
+#define MT6357_RTC_TC_DOW 0x59a
+#define MT6357_RTC_TC_MTH 0x59c
+#define MT6357_RTC_TC_YEA 0x59e
+#define MT6357_RTC_AL_SEC 0x5a0
+#define MT6357_RTC_AL_MIN 0x5a2
+#define MT6357_RTC_AL_HOU 0x5a4
+#define MT6357_RTC_AL_DOM 0x5a6
+#define MT6357_RTC_AL_DOW 0x5a8
+#define MT6357_RTC_AL_MTH 0x5aa
+#define MT6357_RTC_AL_YEA 0x5ac
+#define MT6357_RTC_OSC32CON 0x5ae
+#define MT6357_RTC_POWERKEY1 0x5b0
+#define MT6357_RTC_POWERKEY2 0x5b2
+#define MT6357_RTC_PDN1 0x5b4
+#define MT6357_RTC_PDN2 0x5b6
+#define MT6357_RTC_SPAR0 0x5b8
+#define MT6357_RTC_SPAR1 0x5ba
+#define MT6357_RTC_PROT 0x5bc
+#define MT6357_RTC_DIFF 0x5be
+#define MT6357_RTC_CALI 0x5c0
+#define MT6357_RTC_WRTGR 0x5c2
+#define MT6357_RTC_CON 0x5c4
+#define MT6357_RTC_SEC_CTRL 0x5c6
+#define MT6357_RTC_INT_CNT 0x5c8
+#define MT6357_RTC_SEC_DAT0 0x5ca
+#define MT6357_RTC_SEC_DAT1 0x5cc
+#define MT6357_RTC_SEC_DAT2 0x5ce
+#define MT6357_RTC_SEC_DSN_ID 0x600
+#define MT6357_RTC_SEC_DSN_REV0 0x602
+#define MT6357_RTC_SEC_DBI 0x604
+#define MT6357_RTC_SEC_DXI 0x606
+#define MT6357_RTC_TC_SEC_SEC 0x608
+#define MT6357_RTC_TC_MIN_SEC 0x60a
+#define MT6357_RTC_TC_HOU_SEC 0x60c
+#define MT6357_RTC_TC_DOM_SEC 0x60e
+#define MT6357_RTC_TC_DOW_SEC 0x610
+#define MT6357_RTC_TC_MTH_SEC 0x612
+#define MT6357_RTC_TC_YEA_SEC 0x614
+#define MT6357_RTC_SEC_CK_PDN 0x616
+#define MT6357_RTC_SEC_WRTGR 0x618
+#define MT6357_DCXO_DSN_ID 0x780
+#define MT6357_DCXO_DSN_REV0 0x782
+#define MT6357_DCXO_DSN_DBI 0x784
+#define MT6357_DCXO_DSN_DXI 0x786
+#define MT6357_DCXO_CW00 0x788
+#define MT6357_DCXO_CW00_SET 0x78a
+#define MT6357_DCXO_CW00_CLR 0x78c
+#define MT6357_DCXO_CW01 0x78e
+#define MT6357_DCXO_CW02 0x790
+#define MT6357_DCXO_CW03 0x792
+#define MT6357_DCXO_CW04 0x794
+#define MT6357_DCXO_CW05 0x796
+#define MT6357_DCXO_CW06 0x798
+#define MT6357_DCXO_CW07 0x79a
+#define MT6357_DCXO_CW08 0x79c
+#define MT6357_DCXO_CW09 0x79e
+#define MT6357_DCXO_CW10 0x7a0
+#define MT6357_DCXO_CW11 0x7a2
+#define MT6357_DCXO_CW11_SET 0x7a4
+#define MT6357_DCXO_CW11_CLR 0x7a6
+#define MT6357_DCXO_CW12 0x7a8
+#define MT6357_DCXO_CW13 0x7aa
+#define MT6357_DCXO_CW14 0x7ac
+#define MT6357_DCXO_CW15 0x7ae
+#define MT6357_DCXO_CW16 0x7b0
+#define MT6357_DCXO_CW17 0x7b2
+#define MT6357_DCXO_CW18 0x7b4
+#define MT6357_DCXO_CW19 0x7b6
+#define MT6357_DCXO_CW20 0x7b8
+#define MT6357_DCXO_CW21 0x7ba
+#define MT6357_DCXO_CW22 0x7bc
+#define MT6357_DCXO_ELR_NUM 0x7be
+#define MT6357_DCXO_ELR0 0x7c0
+#define MT6357_PSC_TOP_ID 0x900
+#define MT6357_PSC_TOP_REV0 0x902
+#define MT6357_PSC_TOP_DBI 0x904
+#define MT6357_PSC_TOP_DXI 0x906
+#define MT6357_PSC_TPM0 0x908
+#define MT6357_PSC_TPM1 0x90a
+#define MT6357_PSC_TOP_RSTCTL_0 0x90c
+#define MT6357_PSC_TOP_INT_CON0 0x90e
+#define MT6357_PSC_TOP_INT_CON0_SET 0x910
+#define MT6357_PSC_TOP_INT_CON0_CLR 0x912
+#define MT6357_PSC_TOP_INT_MASK_CON0 0x914
+#define MT6357_PSC_TOP_INT_MASK_CON0_SET 0x916
+#define MT6357_PSC_TOP_INT_MASK_CON0_CLR 0x918
+#define MT6357_PSC_TOP_INT_STATUS0 0x91a
+#define MT6357_PSC_TOP_INT_RAW_STATUS0 0x91c
+#define MT6357_PSC_TOP_INT_MISC_CON 0x91e
+#define MT6357_PSC_TOP_INT_MISC_CON_SET 0x920
+#define MT6357_PSC_TOP_INT_MISC_CON_CLR 0x922
+#define MT6357_PSC_TOP_MON_CTL 0x924
+#define MT6357_STRUP_ID 0x980
+#define MT6357_STRUP_REV0 0x982
+#define MT6357_STRUP_DBI 0x984
+#define MT6357_STRUP_DXI 0x986
+#define MT6357_STRUP_ANA_CON0 0x988
+#define MT6357_STRUP_ANA_CON1 0x98a
+#define MT6357_STRUP_ANA_CON2 0x98c
+#define MT6357_STRUP_ELR_NUM 0x98e
+#define MT6357_STRUP_ELR_0 0x990
+#define MT6357_PSEQ_ID 0xa00
+#define MT6357_PSEQ_REV0 0xa02
+#define MT6357_PSEQ_DBI 0xa04
+#define MT6357_PSEQ_DXI 0xa06
+#define MT6357_PPCCTL0 0xa08
+#define MT6357_PPCCTL1 0xa0a
+#define MT6357_PPCCTL2 0xa0c
+#define MT6357_PPCCFG0 0xa0e
+#define MT6357_PPCTST0 0xa10
+#define MT6357_PORFLAG 0xa12
+#define MT6357_STRUP_CON0 0xa14
+#define MT6357_STRUP_CON1 0xa16
+#define MT6357_STRUP_CON2 0xa18
+#define MT6357_STRUP_CON3 0xa1a
+#define MT6357_STRUP_CON4 0xa1c
+#define MT6357_STRUP_CON5 0xa1e
+#define MT6357_STRUP_CON6 0xa20
+#define MT6357_STRUP_CON7 0xa22
+#define MT6357_CPSCFG0 0xa24
+#define MT6357_STRUP_CON9 0xa26
+#define MT6357_STRUP_CON10 0xa28
+#define MT6357_STRUP_CON11 0xa2a
+#define MT6357_STRUP_CON12 0xa2c
+#define MT6357_STRUP_CON13 0xa2e
+#define MT6357_STRUP_CON14 0xa30
+#define MT6357_STRUP_CON15 0xa32
+#define MT6357_STRUP_CON16 0xa34
+#define MT6357_STRUP_CON19 0xa36
+#define MT6357_PSEQ_ELR_NUM 0xa38
+#define MT6357_PSEQ_ELR7 0xa3a
+#define MT6357_PSEQ_ELR8 0xa3c
+#define MT6357_PCHR_DIG_DSN_ID 0xa80
+#define MT6357_PCHR_DIG_DSN_REV0 0xa82
+#define MT6357_PCHR_DIG_DSN_DBI 0xa84
+#define MT6357_PCHR_DIG_DSN_DXI 0xa86
+#define MT6357_CHR_TOP_CON0 0xa88
+#define MT6357_CHR_TOP_CON1 0xa8a
+#define MT6357_CHR_TOP_CON2 0xa8c
+#define MT6357_CHR_TOP_CON3 0xa8e
+#define MT6357_CHR_TOP_CON4 0xa90
+#define MT6357_CHR_TOP_CON5 0xa92
+#define MT6357_CHR_TOP_CON6 0xa94
+#define MT6357_PCHR_DIG_ELR_NUM 0xa96
+#define MT6357_PCHR_ELR0 0xa98
+#define MT6357_PCHR_ELR1 0xa9a
+#define MT6357_PCHR_MACRO_DSN_ID 0xb80
+#define MT6357_PCHR_MACRO_DSN_REV0 0xb82
+#define MT6357_PCHR_MACRO_DSN_DBI 0xb84
+#define MT6357_PCHR_MACRO_DSN_DXI 0xb86
+#define MT6357_CHR_CON0 0xb88
+#define MT6357_CHR_CON1 0xb8a
+#define MT6357_CHR_CON2 0xb8c
+#define MT6357_CHR_CON3 0xb8e
+#define MT6357_CHR_CON4 0xb90
+#define MT6357_CHR_CON5 0xb92
+#define MT6357_CHR_CON6 0xb94
+#define MT6357_CHR_CON7 0xb96
+#define MT6357_CHR_CON8 0xb98
+#define MT6357_CHR_CON9 0xb9a
+#define MT6357_BM_TOP_DSN_ID 0xc00
+#define MT6357_BM_TOP_DSN_REV0 0xc02
+#define MT6357_BM_TOP_DBI 0xc04
+#define MT6357_BM_TOP_DXI 0xc06
+#define MT6357_BM_TPM0 0xc08
+#define MT6357_BM_TPM1 0xc0a
+#define MT6357_BM_TOP_CKPDN_CON0 0xc0c
+#define MT6357_BM_TOP_CKPDN_CON0_SET 0xc0e
+#define MT6357_BM_TOP_CKPDN_CON0_CLR 0xc10
+#define MT6357_BM_TOP_CKSEL_CON0 0xc12
+#define MT6357_BM_TOP_CKSEL_CON0_SET 0xc14
+#define MT6357_BM_TOP_CKSEL_CON0_CLR 0xc16
+#define MT6357_BM_TOP_CKTST_CON0 0xc18
+#define MT6357_BM_TOP_RST_CON0 0xc1a
+#define MT6357_BM_TOP_RST_CON0_SET 0xc1c
+#define MT6357_BM_TOP_RST_CON0_CLR 0xc1e
+#define MT6357_BM_TOP_INT_CON0 0xc20
+#define MT6357_BM_TOP_INT_CON0_SET 0xc22
+#define MT6357_BM_TOP_INT_CON0_CLR 0xc24
+#define MT6357_BM_TOP_INT_CON1 0xc26
+#define MT6357_BM_TOP_INT_CON1_SET 0xc28
+#define MT6357_BM_TOP_INT_CON1_CLR 0xc2a
+#define MT6357_BM_TOP_INT_MASK_CON0 0xc2c
+#define MT6357_BM_TOP_INT_MASK_CON0_SET 0xc2e
+#define MT6357_BM_TOP_INT_MASK_CON0_CLR 0xc30
+#define MT6357_BM_TOP_INT_MASK_CON1 0xc32
+#define MT6357_BM_TOP_INT_MASK_CON1_SET 0xc34
+#define MT6357_BM_TOP_INT_MASK_CON1_CLR 0xc36
+#define MT6357_BM_TOP_INT_STATUS0 0xc38
+#define MT6357_BM_TOP_INT_STATUS1 0xc3a
+#define MT6357_BM_TOP_INT_RAW_STATUS0 0xc3c
+#define MT6357_BM_TOP_INT_RAW_STATUS1 0xc3e
+#define MT6357_BM_TOP_INT_MISC_CON 0xc40
+#define MT6357_BM_TOP_DBG_CON 0xc42
+#define MT6357_BM_TOP_RSV0 0xc44
+#define MT6357_FGADC_ANA_DSN_ID 0xc80
+#define MT6357_FGADC_ANA_DSN_REV0 0xc82
+#define MT6357_FGADC_ANA_DSN_DBI 0xc84
+#define MT6357_FGADC_ANA_DSN_DXI 0xc86
+#define MT6357_FGADC_ANA_CON0 0xc88
+#define MT6357_FGADC_ANA_TEST_CON0 0xc8a
+#define MT6357_FGADC_ANA_ELR_NUM 0xc8c
+#define MT6357_FGADC_ANA_ELR0 0xc8e
+#define MT6357_FGADC_ANA_ELR1 0xc90
+#define MT6357_FGADC0_DSN_ID 0xd00
+#define MT6357_FGADC0_DSN_REV0 0xd02
+#define MT6357_FGADC0_DSN_DBI 0xd04
+#define MT6357_FGADC0_DSN_DXI 0xd06
+#define MT6357_FGADC_CON0 0xd08
+#define MT6357_FGADC_CON1 0xd0a
+#define MT6357_FGADC_CON2 0xd0c
+#define MT6357_FGADC_CON3 0xd0e
+#define MT6357_FGADC_CON4 0xd10
+#define MT6357_FGADC_CAR_CON0 0xd12
+#define MT6357_FGADC_CAR_CON1 0xd14
+#define MT6357_FGADC_CAR_CON2 0xd16
+#define MT6357_FGADC_CARTH_CON0 0xd18
+#define MT6357_FGADC_CARTH_CON1 0xd1a
+#define MT6357_FGADC_CARTH_CON2 0xd1c
+#define MT6357_FGADC_CARTH_CON3 0xd1e
+#define MT6357_FGADC_NTER_CON0 0xd20
+#define MT6357_FGADC_NTER_CON1 0xd22
+#define MT6357_FGADC_NTER_CON2 0xd24
+#define MT6357_FGADC_SON_CON0 0xd26
+#define MT6357_FGADC_SON_CON1 0xd28
+#define MT6357_FGADC_SON_CON2 0xd2a
+#define MT6357_FGADC_SON_CON3 0xd2c
+#define MT6357_FGADC_ZCV_CON0 0xd2e
+#define MT6357_FGADC_ZCV_CON1 0xd30
+#define MT6357_FGADC_ZCV_CON2 0xd32
+#define MT6357_FGADC_ZCV_CON3 0xd34
+#define MT6357_FGADC_ZCV_CON4 0xd36
+#define MT6357_FGADC_ZCVTH_CON0 0xd38
+#define MT6357_FGADC_ZCVTH_CON1 0xd3a
+#define MT6357_FGADC_ZCVTH_CON2 0xd3c
+#define MT6357_FGADC1_DSN_ID 0xd80
+#define MT6357_FGADC1_DSN_REV0 0xd82
+#define MT6357_FGADC1_DSN_DBI 0xd84
+#define MT6357_FGADC1_DSN_DXI 0xd86
+#define MT6357_FGADC_R_CON0 0xd88
+#define MT6357_FGADC_CUR_CON0 0xd8a
+#define MT6357_FGADC_CUR_CON1 0xd8c
+#define MT6357_FGADC_CUR_CON2 0xd8e
+#define MT6357_FGADC_CUR_CON3 0xd90
+#define MT6357_FGADC_OFFSET_CON0 0xd92
+#define MT6357_FGADC_OFFSET_CON1 0xd94
+#define MT6357_FGADC_GAIN_CON0 0xd96
+#define MT6357_FGADC_TEST_CON0 0xd98
+#define MT6357_SYSTEM_INFO_CON0 0xd9a
+#define MT6357_SYSTEM_INFO_CON1 0xd9c
+#define MT6357_SYSTEM_INFO_CON2 0xd9e
+#define MT6357_SYSTEM_INFO_CON3 0xda0
+#define MT6357_SYSTEM_INFO_CON4 0xda2
+#define MT6357_BATON_ANA_DSN_ID 0xe00
+#define MT6357_BATON_ANA_DSN_REV0 0xe02
+#define MT6357_BATON_ANA_DSN_DBI 0xe04
+#define MT6357_BATON_ANA_DSN_DXI 0xe06
+#define MT6357_BATON_ANA_CON0 0xe08
+#define MT6357_BATON_ANA_ELR_NUM 0xe0a
+#define MT6357_BATON_ANA_ELR0 0xe0c
+#define MT6357_HK_TOP_ID 0xf80
+#define MT6357_HK_TOP_REV0 0xf82
+#define MT6357_HK_TOP_DBI 0xf84
+#define MT6357_HK_TOP_DXI 0xf86
+#define MT6357_HK_TPM0 0xf88
+#define MT6357_HK_TPM1 0xf8a
+#define MT6357_HK_TOP_CLK_CON0 0xf8c
+#define MT6357_HK_TOP_CLK_CON1 0xf8e
+#define MT6357_HK_TOP_RST_CON0 0xf90
+#define MT6357_HK_TOP_INT_CON0 0xf92
+#define MT6357_HK_TOP_INT_CON0_SET 0xf94
+#define MT6357_HK_TOP_INT_CON0_CLR 0xf96
+#define MT6357_HK_TOP_INT_MASK_CON0 0xf98
+#define MT6357_HK_TOP_INT_MASK_CON0_SET 0xf9a
+#define MT6357_HK_TOP_INT_MASK_CON0_CLR 0xf9c
+#define MT6357_HK_TOP_INT_STATUS0 0xf9e
+#define MT6357_HK_TOP_INT_RAW_STATUS0 0xfa0
+#define MT6357_HK_TOP_MON_CON0 0xfa2
+#define MT6357_HK_TOP_MON_CON1 0xfa4
+#define MT6357_HK_TOP_MON_CON2 0xfa6
+#define MT6357_AUXADC_DSN_ID 0x1000
+#define MT6357_AUXADC_DSN_REV0 0x1002
+#define MT6357_AUXADC_DSN_DBI 0x1004
+#define MT6357_AUXADC_DSN_DXI 0x1006
+#define MT6357_AUXADC_ANA_CON0 0x1008
+#define MT6357_AUXADC_DIG_1_DSN_ID 0x1080
+#define MT6357_AUXADC_DIG_1_DSN_REV0 0x1082
+#define MT6357_AUXADC_DIG_1_DSN_DBI 0x1084
+#define MT6357_AUXADC_DIG_1_DSN_DXI 0x1086
+#define MT6357_AUXADC_ADC0 0x1088
+#define MT6357_AUXADC_ADC1 0x108a
+#define MT6357_AUXADC_ADC2 0x108c
+#define MT6357_AUXADC_ADC3 0x108e
+#define MT6357_AUXADC_ADC4 0x1090
+#define MT6357_AUXADC_ADC5 0x1092
+#define MT6357_AUXADC_ADC6 0x1094
+#define MT6357_AUXADC_ADC7 0x1096
+#define MT6357_AUXADC_ADC8 0x1098
+#define MT6357_AUXADC_ADC9 0x109a
+#define MT6357_AUXADC_ADC10 0x109c
+#define MT6357_AUXADC_ADC11 0x109e
+#define MT6357_AUXADC_ADC12 0x10a0
+#define MT6357_AUXADC_ADC14 0x10a2
+#define MT6357_AUXADC_ADC16 0x10a4
+#define MT6357_AUXADC_ADC17 0x10a6
+#define MT6357_AUXADC_ADC18 0x10a8
+#define MT6357_AUXADC_ADC19 0x10aa
+#define MT6357_AUXADC_ADC20 0x10ac
+#define MT6357_AUXADC_ADC21 0x10ae
+#define MT6357_AUXADC_ADC22 0x10b0
+#define MT6357_AUXADC_ADC23 0x10b2
+#define MT6357_AUXADC_ADC24 0x10b4
+#define MT6357_AUXADC_ADC25 0x10b6
+#define MT6357_AUXADC_ADC26 0x10b8
+#define MT6357_AUXADC_ADC27 0x10ba
+#define MT6357_AUXADC_ADC29 0x10bc
+#define MT6357_AUXADC_ADC30 0x10be
+#define MT6357_AUXADC_ADC31 0x10c0
+#define MT6357_AUXADC_ADC32 0x10c2
+#define MT6357_AUXADC_ADC33 0x10c4
+#define MT6357_AUXADC_ADC34 0x10c6
+#define MT6357_AUXADC_ADC35 0x10c8
+#define MT6357_AUXADC_ADC36 0x10ca
+#define MT6357_AUXADC_ADC38 0x10cc
+#define MT6357_AUXADC_ADC39 0x10ce
+#define MT6357_AUXADC_ADC40 0x10d0
+#define MT6357_AUXADC_ADC41 0x10d2
+#define MT6357_AUXADC_ADC42 0x10d4
+#define MT6357_AUXADC_ADC43 0x10d6
+#define MT6357_AUXADC_ADC46 0x10d8
+#define MT6357_AUXADC_ADC47 0x10da
+#define MT6357_AUXADC_DIG_1_ELR_NUM 0x10dc
+#define MT6357_AUXADC_DIG_1_ELR0 0x10de
+#define MT6357_AUXADC_DIG_1_ELR1 0x10e0
+#define MT6357_AUXADC_DIG_2_DSN_ID 0x1100
+#define MT6357_AUXADC_DIG_2_DSN_REV0 0x1102
+#define MT6357_AUXADC_DIG_2_DSN_DBI 0x1104
+#define MT6357_AUXADC_DIG_2_DSN_DXI 0x1106
+#define MT6357_AUXADC_STA0 0x1108
+#define MT6357_AUXADC_STA1 0x110a
+#define MT6357_AUXADC_STA2 0x110c
+#define MT6357_AUXADC_RQST0 0x110e
+#define MT6357_AUXADC_RQST0_SET 0x1110
+#define MT6357_AUXADC_RQST0_CLR 0x1112
+#define MT6357_AUXADC_RQST2 0x1114
+#define MT6357_AUXADC_RQST2_SET 0x1116
+#define MT6357_AUXADC_RQST2_CLR 0x1118
+#define MT6357_AUXADC_RQST1 0x111a
+#define MT6357_AUXADC_RQST1_SET 0x111c
+#define MT6357_AUXADC_RQST1_CLR 0x111e
+#define MT6357_AUXADC_CON0 0x1120
+#define MT6357_AUXADC_CON0_SET 0x1122
+#define MT6357_AUXADC_CON0_CLR 0x1124
+#define MT6357_AUXADC_CON1 0x1126
+#define MT6357_AUXADC_CON2 0x1128
+#define MT6357_AUXADC_CON3 0x112a
+#define MT6357_AUXADC_CON4 0x112c
+#define MT6357_AUXADC_CON5 0x112e
+#define MT6357_AUXADC_CON6 0x1130
+#define MT6357_AUXADC_CON7 0x1132
+#define MT6357_AUXADC_CON8 0x1134
+#define MT6357_AUXADC_CON9 0x1136
+#define MT6357_AUXADC_CON10 0x1138
+#define MT6357_AUXADC_CON11 0x113a
+#define MT6357_AUXADC_CON12 0x113c
+#define MT6357_AUXADC_CON13 0x113e
+#define MT6357_AUXADC_CON14 0x1140
+#define MT6357_AUXADC_CON15 0x1142
+#define MT6357_AUXADC_CON16 0x1144
+#define MT6357_AUXADC_CON17 0x1146
+#define MT6357_AUXADC_CON18 0x1148
+#define MT6357_AUXADC_CON19 0x114a
+#define MT6357_AUXADC_CON20 0x114c
+#define MT6357_AUXADC_DIG_3_DSN_ID 0x1180
+#define MT6357_AUXADC_DIG_3_DSN_REV0 0x1182
+#define MT6357_AUXADC_DIG_3_DSN_DBI 0x1184
+#define MT6357_AUXADC_DIG_3_DSN_DXI 0x1186
+#define MT6357_AUXADC_AUTORPT0 0x1188
+#define MT6357_AUXADC_LBAT0 0x118a
+#define MT6357_AUXADC_LBAT1 0x118c
+#define MT6357_AUXADC_LBAT2 0x118e
+#define MT6357_AUXADC_LBAT3 0x1190
+#define MT6357_AUXADC_LBAT4 0x1192
+#define MT6357_AUXADC_LBAT5 0x1194
+#define MT6357_AUXADC_LBAT6 0x1196
+#define MT6357_AUXADC_ACCDET 0x1198
+#define MT6357_AUXADC_DBG0 0x119a
+#define MT6357_AUXADC_IMP0 0x119c
+#define MT6357_AUXADC_IMP1 0x119e
+#define MT6357_AUXADC_DIG_3_ELR_NUM 0x11a0
+#define MT6357_AUXADC_DIG_3_ELR0 0x11a2
+#define MT6357_AUXADC_DIG_3_ELR1 0x11a4
+#define MT6357_AUXADC_DIG_3_ELR2 0x11a6
+#define MT6357_AUXADC_DIG_3_ELR3 0x11a8
+#define MT6357_AUXADC_DIG_3_ELR4 0x11aa
+#define MT6357_AUXADC_DIG_3_ELR5 0x11ac
+#define MT6357_AUXADC_DIG_3_ELR6 0x11ae
+#define MT6357_AUXADC_DIG_3_ELR7 0x11b0
+#define MT6357_AUXADC_DIG_3_ELR8 0x11b2
+#define MT6357_AUXADC_DIG_3_ELR9 0x11b4
+#define MT6357_AUXADC_DIG_3_ELR10 0x11b6
+#define MT6357_AUXADC_DIG_3_ELR11 0x11b8
+#define MT6357_AUXADC_DIG_4_DSN_ID 0x1200
+#define MT6357_AUXADC_DIG_4_DSN_REV0 0x1202
+#define MT6357_AUXADC_DIG_4_DSN_DBI 0x1204
+#define MT6357_AUXADC_DIG_4_DSN_DXI 0x1206
+#define MT6357_AUXADC_MDRT_0 0x1208
+#define MT6357_AUXADC_MDRT_1 0x120a
+#define MT6357_AUXADC_MDRT_2 0x120c
+#define MT6357_AUXADC_MDRT_3 0x120e
+#define MT6357_AUXADC_MDRT_4 0x1210
+#define MT6357_AUXADC_DCXO_MDRT_0 0x1212
+#define MT6357_AUXADC_DCXO_MDRT_1 0x1214
+#define MT6357_AUXADC_DCXO_MDRT_2 0x1216
+#define MT6357_AUXADC_NAG_0 0x1218
+#define MT6357_AUXADC_NAG_1 0x121a
+#define MT6357_AUXADC_NAG_2 0x121c
+#define MT6357_AUXADC_NAG_3 0x121e
+#define MT6357_AUXADC_NAG_4 0x1220
+#define MT6357_AUXADC_NAG_5 0x1222
+#define MT6357_AUXADC_NAG_6 0x1224
+#define MT6357_AUXADC_NAG_7 0x1226
+#define MT6357_AUXADC_NAG_8 0x1228
+#define MT6357_AUXADC_RSV_1 0x122a
+#define MT6357_AUXADC_ANA_0 0x122c
+#define MT6357_AUXADC_IMP_CG0 0x122e
+#define MT6357_AUXADC_LBAT_CG0 0x1230
+#define MT6357_AUXADC_NAG_CG0 0x1232
+#define MT6357_AUXADC_PRI_NEW 0x1234
+#define MT6357_AUXADC_CHR_TOP_CON2 0x1236
+#define MT6357_BUCK_TOP_DSN_ID 0x1400
+#define MT6357_BUCK_TOP_DSN_REV0 0x1402
+#define MT6357_BUCK_TOP_DBI 0x1404
+#define MT6357_BUCK_TOP_DXI 0x1406
+#define MT6357_BUCK_TOP_PAM0 0x1408
+#define MT6357_BUCK_TOP_PAM1 0x140a
+#define MT6357_BUCK_TOP_CLK_CON0 0x140c
+#define MT6357_BUCK_TOP_CLK_CON0_SET 0x140e
+#define MT6357_BUCK_TOP_CLK_CON0_CLR 0x1410
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0 0x1412
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_SET 0x1414
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_CLR 0x1416
+#define MT6357_BUCK_TOP_CLK_MISC_CON0 0x1418
+#define MT6357_BUCK_TOP_INT_CON0 0x141a
+#define MT6357_BUCK_TOP_INT_CON0_SET 0x141c
+#define MT6357_BUCK_TOP_INT_CON0_CLR 0x141e
+#define MT6357_BUCK_TOP_INT_MASK_CON0 0x1420
+#define MT6357_BUCK_TOP_INT_MASK_CON0_SET 0x1422
+#define MT6357_BUCK_TOP_INT_MASK_CON0_CLR 0x1424
+#define MT6357_BUCK_TOP_INT_STATUS0 0x1426
+#define MT6357_BUCK_TOP_INT_RAW_STATUS0 0x1428
+#define MT6357_BUCK_TOP_STB_CON 0x142a
+#define MT6357_BUCK_TOP_SLP_CON0 0x142c
+#define MT6357_BUCK_TOP_SLP_CON1 0x142e
+#define MT6357_BUCK_TOP_SLP_CON2 0x1430
+#define MT6357_BUCK_TOP_MINFREQ_CON 0x1432
+#define MT6357_BUCK_TOP_OC_CON0 0x1434
+#define MT6357_BUCK_TOP_K_CON0 0x1436
+#define MT6357_BUCK_TOP_K_CON1 0x1438
+#define MT6357_BUCK_TOP_K_CON2 0x143a
+#define MT6357_BUCK_TOP_WDTDBG0 0x143c
+#define MT6357_BUCK_TOP_WDTDBG1 0x143e
+#define MT6357_BUCK_TOP_WDTDBG2 0x1440
+#define MT6357_BUCK_TOP_ELR_NUM 0x1442
+#define MT6357_BUCK_TOP_ELR0 0x1444
+#define MT6357_BUCK_TOP_ELR1 0x1446
+#define MT6357_BUCK_VPROC_DSN_ID 0x1480
+#define MT6357_BUCK_VPROC_DSN_REV0 0x1482
+#define MT6357_BUCK_VPROC_DSN_DBI 0x1484
+#define MT6357_BUCK_VPROC_DSN_DXI 0x1486
+#define MT6357_BUCK_VPROC_CON0 0x1488
+#define MT6357_BUCK_VPROC_CON1 0x148a
+#define MT6357_BUCK_VPROC_CFG0 0x148c
+#define MT6357_BUCK_VPROC_CFG1 0x148e
+#define MT6357_BUCK_VPROC_OP_EN 0x1490
+#define MT6357_BUCK_VPROC_OP_EN_SET 0x1492
+#define MT6357_BUCK_VPROC_OP_EN_CLR 0x1494
+#define MT6357_BUCK_VPROC_OP_CFG 0x1496
+#define MT6357_BUCK_VPROC_OP_CFG_SET 0x1498
+#define MT6357_BUCK_VPROC_OP_CFG_CLR 0x149a
+#define MT6357_BUCK_VPROC_SP_CON 0x149c
+#define MT6357_BUCK_VPROC_SP_CFG 0x149e
+#define MT6357_BUCK_VPROC_OC_CFG 0x14a0
+#define MT6357_BUCK_VPROC_DBG0 0x14a2
+#define MT6357_BUCK_VPROC_DBG1 0x14a4
+#define MT6357_BUCK_VPROC_DBG2 0x14a6
+#define MT6357_BUCK_VPROC_ELR_NUM 0x14a8
+#define MT6357_BUCK_VPROC_ELR0 0x14aa
+#define MT6357_BUCK_VCORE_DSN_ID 0x1500
+#define MT6357_BUCK_VCORE_DSN_REV0 0x1502
+#define MT6357_BUCK_VCORE_DSN_DBI 0x1504
+#define MT6357_BUCK_VCORE_DSN_DXI 0x1506
+#define MT6357_BUCK_VCORE_CON0 0x1508
+#define MT6357_BUCK_VCORE_CON1 0x150a
+#define MT6357_BUCK_VCORE_CFG0 0x150c
+#define MT6357_BUCK_VCORE_CFG1 0x150e
+#define MT6357_BUCK_VCORE_OP_EN 0x1510
+#define MT6357_BUCK_VCORE_OP_EN_SET 0x1512
+#define MT6357_BUCK_VCORE_OP_EN_CLR 0x1514
+#define MT6357_BUCK_VCORE_OP_CFG 0x1516
+#define MT6357_BUCK_VCORE_OP_CFG_SET 0x1518
+#define MT6357_BUCK_VCORE_OP_CFG_CLR 0x151a
+#define MT6357_BUCK_VCORE_SP_CON 0x151c
+#define MT6357_BUCK_VCORE_SP_CFG 0x151e
+#define MT6357_BUCK_VCORE_OC_CFG 0x1520
+#define MT6357_BUCK_VCORE_DBG0 0x1522
+#define MT6357_BUCK_VCORE_DBG1 0x1524
+#define MT6357_BUCK_VCORE_DBG2 0x1526
+#define MT6357_BUCK_VCORE_ELR_NUM 0x1528
+#define MT6357_BUCK_VCORE_ELR0 0x152a
+#define MT6357_BUCK_VMODEM_DSN_ID 0x1580
+#define MT6357_BUCK_VMODEM_DSN_REV0 0x1582
+#define MT6357_BUCK_VMODEM_DSN_DBI 0x1584
+#define MT6357_BUCK_VMODEM_DSN_DXI 0x1586
+#define MT6357_BUCK_VMODEM_CON0 0x1588
+#define MT6357_BUCK_VMODEM_CON1 0x158a
+#define MT6357_BUCK_VMODEM_CFG0 0x158c
+#define MT6357_BUCK_VMODEM_CFG1 0x158e
+#define MT6357_BUCK_VMODEM_OP_EN 0x1590
+#define MT6357_BUCK_VMODEM_OP_EN_SET 0x1592
+#define MT6357_BUCK_VMODEM_OP_EN_CLR 0x1594
+#define MT6357_BUCK_VMODEM_OP_CFG 0x1596
+#define MT6357_BUCK_VMODEM_OP_CFG_SET 0x1598
+#define MT6357_BUCK_VMODEM_OP_CFG_CLR 0x159a
+#define MT6357_BUCK_VMODEM_SP_CON 0x159c
+#define MT6357_BUCK_VMODEM_SP_CFG 0x159e
+#define MT6357_BUCK_VMODEM_OC_CFG 0x15a0
+#define MT6357_BUCK_VMODEM_DBG0 0x15a2
+#define MT6357_BUCK_VMODEM_DBG1 0x15a4
+#define MT6357_BUCK_VMODEM_DBG2 0x15a6
+#define MT6357_BUCK_VMODEM_ELR_NUM 0x15a8
+#define MT6357_BUCK_VMODEM_ELR0 0x15aa
+#define MT6357_BUCK_VS1_DSN_ID 0x1600
+#define MT6357_BUCK_VS1_DSN_REV0 0x1602
+#define MT6357_BUCK_VS1_DSN_DBI 0x1604
+#define MT6357_BUCK_VS1_DSN_DXI 0x1606
+#define MT6357_BUCK_VS1_CON0 0x1608
+#define MT6357_BUCK_VS1_CON1 0x160a
+#define MT6357_BUCK_VS1_CFG0 0x160c
+#define MT6357_BUCK_VS1_CFG1 0x160e
+#define MT6357_BUCK_VS1_OP_EN 0x1610
+#define MT6357_BUCK_VS1_OP_EN_SET 0x1612
+#define MT6357_BUCK_VS1_OP_EN_CLR 0x1614
+#define MT6357_BUCK_VS1_OP_CFG 0x1616
+#define MT6357_BUCK_VS1_OP_CFG_SET 0x1618
+#define MT6357_BUCK_VS1_OP_CFG_CLR 0x161a
+#define MT6357_BUCK_VS1_SP_CON 0x161c
+#define MT6357_BUCK_VS1_SP_CFG 0x161e
+#define MT6357_BUCK_VS1_OC_CFG 0x1620
+#define MT6357_BUCK_VS1_DBG0 0x1622
+#define MT6357_BUCK_VS1_DBG1 0x1624
+#define MT6357_BUCK_VS1_DBG2 0x1626
+#define MT6357_BUCK_VS1_VOTER 0x1628
+#define MT6357_BUCK_VS1_VOTER_SET 0x162a
+#define MT6357_BUCK_VS1_VOTER_CLR 0x162c
+#define MT6357_BUCK_VS1_VOTER_CFG 0x162e
+#define MT6357_BUCK_VS1_ELR_NUM 0x1630
+#define MT6357_BUCK_VS1_ELR0 0x1632
+#define MT6357_BUCK_VPA_DSN_ID 0x1680
+#define MT6357_BUCK_VPA_DSN_REV0 0x1682
+#define MT6357_BUCK_VPA_DSN_DBI 0x1684
+#define MT6357_BUCK_VPA_DSN_DXI 0x1686
+#define MT6357_BUCK_VPA_CON0 0x1688
+#define MT6357_BUCK_VPA_CON1 0x168a
+#define MT6357_BUCK_VPA_CFG0 0x168c
+#define MT6357_BUCK_VPA_CFG1 0x168e
+#define MT6357_BUCK_VPA_OC_CFG 0x1690
+#define MT6357_BUCK_VPA_DBG0 0x1692
+#define MT6357_BUCK_VPA_DBG1 0x1694
+#define MT6357_BUCK_VPA_DBG2 0x1696
+#define MT6357_BUCK_VPA_DLC_CON0 0x1698
+#define MT6357_BUCK_VPA_DLC_CON1 0x169a
+#define MT6357_BUCK_VPA_DLC_CON2 0x169c
+#define MT6357_BUCK_VPA_MSFG_CON0 0x169e
+#define MT6357_BUCK_VPA_MSFG_CON1 0x16a0
+#define MT6357_BUCK_VPA_MSFG_RRATE0 0x16a2
+#define MT6357_BUCK_VPA_MSFG_RRATE1 0x16a4
+#define MT6357_BUCK_VPA_MSFG_RRATE2 0x16a6
+#define MT6357_BUCK_VPA_MSFG_RTHD0 0x16a8
+#define MT6357_BUCK_VPA_MSFG_RTHD1 0x16aa
+#define MT6357_BUCK_VPA_MSFG_RTHD2 0x16ac
+#define MT6357_BUCK_VPA_MSFG_FRATE0 0x16ae
+#define MT6357_BUCK_VPA_MSFG_FRATE1 0x16b0
+#define MT6357_BUCK_VPA_MSFG_FRATE2 0x16b2
+#define MT6357_BUCK_VPA_MSFG_FTHD0 0x16b4
+#define MT6357_BUCK_VPA_MSFG_FTHD1 0x16b6
+#define MT6357_BUCK_VPA_MSFG_FTHD2 0x16b8
+#define MT6357_BUCK_ANA_DSN_ID 0x1700
+#define MT6357_BUCK_ANA_DSN_REV0 0x1702
+#define MT6357_BUCK_ANA_DSN_DBI 0x1704
+#define MT6357_BUCK_ANA_DSN_FPI 0x1706
+#define MT6357_SMPS_ANA_CON0 0x1708
+#define MT6357_SMPS_ANA_CON1 0x170a
+#define MT6357_SMPS_ANA_CON2 0x170c
+#define MT6357_VCORE_VPROC_ANA_CON0 0x170e
+#define MT6357_VCORE_VPROC_ANA_CON1 0x1710
+#define MT6357_VCORE_VPROC_ANA_CON2 0x1712
+#define MT6357_VCORE_VPROC_ANA_CON3 0x1714
+#define MT6357_VCORE_VPROC_ANA_CON4 0x1716
+#define MT6357_VCORE_VPROC_ANA_CON5 0x1718
+#define MT6357_VCORE_VPROC_ANA_CON6 0x171a
+#define MT6357_VCORE_VPROC_ANA_CON7 0x171c
+#define MT6357_VCORE_VPROC_ANA_CON8 0x171e
+#define MT6357_VCORE_VPROC_ANA_CON9 0x1720
+#define MT6357_VCORE_VPROC_ANA_CON10 0x1722
+#define MT6357_VCORE_VPROC_ANA_CON11 0x1724
+#define MT6357_VMODEM_ANA_CON0 0x1726
+#define MT6357_VMODEM_ANA_CON1 0x1728
+#define MT6357_VMODEM_ANA_CON2 0x172a
+#define MT6357_VMODEM_ANA_CON3 0x172c
+#define MT6357_VMODEM_ANA_CON4 0x172e
+#define MT6357_VMODEM_ANA_CON5 0x1730
+#define MT6357_VS1_ANA_CON0 0x1732
+#define MT6357_VS1_ANA_CON1 0x1734
+#define MT6357_VS1_ANA_CON2 0x1736
+#define MT6357_VS1_ANA_CON3 0x1738
+#define MT6357_VS1_ANA_CON4 0x173a
+#define MT6357_VS1_ANA_CON5 0x173c
+#define MT6357_VPA_ANA_CON0 0x173e
+#define MT6357_VPA_ANA_CON1 0x1740
+#define MT6357_VPA_ANA_CON2 0x1742
+#define MT6357_VPA_ANA_CON3 0x1744
+#define MT6357_VPA_ANA_CON4 0x1746
+#define MT6357_VPA_ANA_CON5 0x1748
+#define MT6357_BUCK_ANA_ELR_NUM 0x174a
+#define MT6357_SMPS_ELR_0 0x174c
+#define MT6357_SMPS_ELR_1 0x174e
+#define MT6357_SMPS_ELR_2 0x1750
+#define MT6357_SMPS_ELR_3 0x1752
+#define MT6357_SMPS_ELR_4 0x1754
+#define MT6357_SMPS_ELR_5 0x1756
+#define MT6357_VCORE_VPROC_ELR_0 0x1758
+#define MT6357_VCORE_VPROC_ELR_1 0x175a
+#define MT6357_VCORE_VPROC_ELR_2 0x175c
+#define MT6357_VCORE_VPROC_ELR_3 0x175e
+#define MT6357_VCORE_VPROC_ELR_4 0x1760
+#define MT6357_VMODEM_ELR_0 0x1762
+#define MT6357_VMODEM_ELR_1 0x1764
+#define MT6357_VMODEM_ELR_2 0x1766
+#define MT6357_VS1_ELR_0 0x1768
+#define MT6357_VS1_ELR_1 0x176a
+#define MT6357_VPA_ELR_0 0x176c
+#define MT6357_LDO_TOP_ID 0x1880
+#define MT6357_LDO_TOP_REV0 0x1882
+#define MT6357_LDO_TOP_DBI 0x1884
+#define MT6357_LDO_TOP_DXI 0x1886
+#define MT6357_LDO_TPM0 0x1888
+#define MT6357_LDO_TPM1 0x188a
+#define MT6357_LDO_TOP_CLK_DCM_CON0 0x188c
+#define MT6357_LDO_TOP_CLK_VIO28_CON0 0x188e
+#define MT6357_LDO_TOP_CLK_VIO18_CON0 0x1890
+#define MT6357_LDO_TOP_CLK_VAUD28_CON0 0x1892
+#define MT6357_LDO_TOP_CLK_VDRAM_CON0 0x1894
+#define MT6357_LDO_TOP_CLK_VSRAM_PROC_CON0 0x1896
+#define MT6357_LDO_TOP_CLK_VSRAM_OTHERS_CON0 0x1898
+#define MT6357_LDO_TOP_CLK_VAUX18_CON0 0x189a
+#define MT6357_LDO_TOP_CLK_VUSB33_CON0 0x189c
+#define MT6357_LDO_TOP_CLK_VEMC_CON0 0x189e
+#define MT6357_LDO_TOP_CLK_VXO22_CON0 0x18a0
+#define MT6357_LDO_TOP_CLK_VSIM1_CON0 0x18a2
+#define MT6357_LDO_TOP_CLK_VSIM2_CON0 0x18a4
+#define MT6357_LDO_TOP_CLK_VCAMD_CON0 0x18a6
+#define MT6357_LDO_TOP_CLK_VCAMIO_CON0 0x18a8
+#define MT6357_LDO_TOP_CLK_VEFUSE_CON0 0x18aa
+#define MT6357_LDO_TOP_CLK_VCN33_CON0 0x18ac
+#define MT6357_LDO_TOP_CLK_VCN18_CON0 0x18ae
+#define MT6357_LDO_TOP_CLK_VCN28_CON0 0x18b0
+#define MT6357_LDO_TOP_CLK_VIBR_CON0 0x18b2
+#define MT6357_LDO_TOP_CLK_VFE28_CON0 0x18b4
+#define MT6357_LDO_TOP_CLK_VMCH_CON0 0x18b6
+#define MT6357_LDO_TOP_CLK_VMC_CON0 0x18b8
+#define MT6357_LDO_TOP_CLK_VRF18_CON0 0x18ba
+#define MT6357_LDO_TOP_CLK_VLDO28_CON0 0x18bc
+#define MT6357_LDO_TOP_CLK_VRF12_CON0 0x18be
+#define MT6357_LDO_TOP_CLK_VCAMA_CON0 0x18c0
+#define MT6357_LDO_TOP_CLK_TREF_CON0 0x18c2
+#define MT6357_LDO_TOP_INT_CON0 0x18c4
+#define MT6357_LDO_TOP_INT_CON0_SET 0x18c6
+#define MT6357_LDO_TOP_INT_CON0_CLR 0x18c8
+#define MT6357_LDO_TOP_INT_CON1 0x18ca
+#define MT6357_LDO_TOP_INT_CON1_SET 0x18cc
+#define MT6357_LDO_TOP_INT_CON1_CLR 0x18ce
+#define MT6357_LDO_TOP_INT_MASK_CON0 0x18d0
+#define MT6357_LDO_TOP_INT_MASK_CON0_SET 0x18d2
+#define MT6357_LDO_TOP_INT_MASK_CON0_CLR 0x18d4
+#define MT6357_LDO_TOP_INT_MASK_CON1 0x18d6
+#define MT6357_LDO_TOP_INT_MASK_CON1_SET 0x18d8
+#define MT6357_LDO_TOP_INT_MASK_CON1_CLR 0x18da
+#define MT6357_LDO_TOP_INT_STATUS0 0x18dc
+#define MT6357_LDO_TOP_INT_STATUS1 0x18de
+#define MT6357_LDO_TOP_INT_RAW_STATUS0 0x18e0
+#define MT6357_LDO_TOP_INT_RAW_STATUS1 0x18e2
+#define MT6357_LDO_TEST_CON0 0x18e4
+#define MT6357_LDO_TOP_WDT_CON0 0x18e6
+#define MT6357_LDO_TOP_RSV_CON0 0x18e8
+#define MT6357_LDO_TOP_RSV_CON1 0x18ea
+#define MT6357_LDO_OCFB0 0x18ec
+#define MT6357_LDO_LP_PROTECTION 0x18ee
+#define MT6357_LDO_DUMMY_LOAD_GATED 0x18f0
+#define MT6357_LDO_GON0_DSN_ID 0x1900
+#define MT6357_LDO_GON0_DSN_REV0 0x1902
+#define MT6357_LDO_GON0_DSN_DBI 0x1904
+#define MT6357_LDO_GON0_DSN_DXI 0x1906
+#define MT6357_LDO_VXO22_CON0 0x1908
+#define MT6357_LDO_VXO22_OP_EN 0x190a
+#define MT6357_LDO_VXO22_OP_EN_SET 0x190c
+#define MT6357_LDO_VXO22_OP_EN_CLR 0x190e
+#define MT6357_LDO_VXO22_OP_CFG 0x1910
+#define MT6357_LDO_VXO22_OP_CFG_SET 0x1912
+#define MT6357_LDO_VXO22_OP_CFG_CLR 0x1914
+#define MT6357_LDO_VXO22_CON1 0x1916
+#define MT6357_LDO_VXO22_CON2 0x1918
+#define MT6357_LDO_VXO22_CON3 0x191a
+#define MT6357_LDO_VAUX18_CON0 0x191c
+#define MT6357_LDO_VAUX18_OP_EN 0x191e
+#define MT6357_LDO_VAUX18_OP_EN_SET 0x1920
+#define MT6357_LDO_VAUX18_OP_EN_CLR 0x1922
+#define MT6357_LDO_VAUX18_OP_CFG 0x1924
+#define MT6357_LDO_VAUX18_OP_CFG_SET 0x1926
+#define MT6357_LDO_VAUX18_OP_CFG_CLR 0x1928
+#define MT6357_LDO_VAUX18_CON1 0x192a
+#define MT6357_LDO_VAUX18_CON2 0x192c
+#define MT6357_LDO_VAUX18_CON3 0x192e
+#define MT6357_LDO_VAUD28_CON0 0x1930
+#define MT6357_LDO_VAUD28_OP_EN 0x1932
+#define MT6357_LDO_VAUD28_OP_EN_SET 0x1934
+#define MT6357_LDO_VAUD28_OP_EN_CLR 0x1936
+#define MT6357_LDO_VAUD28_OP_CFG 0x1938
+#define MT6357_LDO_VAUD28_OP_CFG_SET 0x193a
+#define MT6357_LDO_VAUD28_OP_CFG_CLR 0x193c
+#define MT6357_LDO_VAUD28_CON1 0x193e
+#define MT6357_LDO_VAUD28_CON2 0x1940
+#define MT6357_LDO_VAUD28_CON3 0x1942
+#define MT6357_LDO_VIO28_CON0 0x1944
+#define MT6357_LDO_VIO28_OP_EN 0x1946
+#define MT6357_LDO_VIO28_OP_EN_SET 0x1948
+#define MT6357_LDO_VIO28_OP_EN_CLR 0x194a
+#define MT6357_LDO_VIO28_OP_CFG 0x194c
+#define MT6357_LDO_VIO28_OP_CFG_SET 0x194e
+#define MT6357_LDO_VIO28_OP_CFG_CLR 0x1950
+#define MT6357_LDO_VIO28_CON1 0x1952
+#define MT6357_LDO_VIO28_CON2 0x1954
+#define MT6357_LDO_VIO28_CON3 0x1956
+#define MT6357_LDO_VIO18_CON0 0x1958
+#define MT6357_LDO_VIO18_OP_EN 0x195a
+#define MT6357_LDO_VIO18_OP_EN_SET 0x195c
+#define MT6357_LDO_VIO18_OP_EN_CLR 0x195e
+#define MT6357_LDO_VIO18_OP_CFG 0x1960
+#define MT6357_LDO_VIO18_OP_CFG_SET 0x1962
+#define MT6357_LDO_VIO18_OP_CFG_CLR 0x1964
+#define MT6357_LDO_VIO18_CON1 0x1966
+#define MT6357_LDO_VIO18_CON2 0x1968
+#define MT6357_LDO_VIO18_CON3 0x196a
+#define MT6357_LDO_VDRAM_CON0 0x196c
+#define MT6357_LDO_VDRAM_OP_EN 0x196e
+#define MT6357_LDO_VDRAM_OP_EN_SET 0x1970
+#define MT6357_LDO_VDRAM_OP_EN_CLR 0x1972
+#define MT6357_LDO_VDRAM_OP_CFG 0x1974
+#define MT6357_LDO_VDRAM_OP_CFG_SET 0x1976
+#define MT6357_LDO_VDRAM_OP_CFG_CLR 0x1978
+#define MT6357_LDO_VDRAM_CON1 0x197a
+#define MT6357_LDO_VDRAM_CON2 0x197c
+#define MT6357_LDO_VDRAM_CON3 0x197e
+#define MT6357_LDO_GON1_DSN_ID 0x1980
+#define MT6357_LDO_GON1_DSN_REV0 0x1982
+#define MT6357_LDO_GON1_DSN_DBI 0x1984
+#define MT6357_LDO_GON1_DSN_DXI 0x1986
+#define MT6357_LDO_VEMC_CON0 0x1988
+#define MT6357_LDO_VEMC_OP_EN 0x198a
+#define MT6357_LDO_VEMC_OP_EN_SET 0x198c
+#define MT6357_LDO_VEMC_OP_EN_CLR 0x198e
+#define MT6357_LDO_VEMC_OP_CFG 0x1990
+#define MT6357_LDO_VEMC_OP_CFG_SET 0x1992
+#define MT6357_LDO_VEMC_OP_CFG_CLR 0x1994
+#define MT6357_LDO_VEMC_CON1 0x1996
+#define MT6357_LDO_VEMC_CON2 0x1998
+#define MT6357_LDO_VEMC_CON3 0x199a
+#define MT6357_LDO_VUSB33_CON0_0 0x199c
+#define MT6357_LDO_VUSB33_OP_EN 0x199e
+#define MT6357_LDO_VUSB33_OP_EN_SET 0x19a0
+#define MT6357_LDO_VUSB33_OP_EN_CLR 0x19a2
+#define MT6357_LDO_VUSB33_OP_CFG 0x19a4
+#define MT6357_LDO_VUSB33_OP_CFG_SET 0x19a6
+#define MT6357_LDO_VUSB33_OP_CFG_CLR 0x19a8
+#define MT6357_LDO_VUSB33_CON0_1 0x19aa
+#define MT6357_LDO_VUSB33_CON1 0x19ac
+#define MT6357_LDO_VUSB33_CON2 0x19ae
+#define MT6357_LDO_VUSB33_CON3 0x19b0
+#define MT6357_LDO_VSRAM_PROC_CON0 0x19b2
+#define MT6357_LDO_VSRAM_PROC_CON2 0x19b4
+#define MT6357_LDO_VSRAM_PROC_CFG0 0x19b6
+#define MT6357_LDO_VSRAM_PROC_CFG1 0x19b8
+#define MT6357_LDO_VSRAM_PROC_OP_EN 0x19ba
+#define MT6357_LDO_VSRAM_PROC_OP_EN_SET 0x19bc
+#define MT6357_LDO_VSRAM_PROC_OP_EN_CLR 0x19be
+#define MT6357_LDO_VSRAM_PROC_OP_CFG 0x19c0
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_SET 0x19c2
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_CLR 0x19c4
+#define MT6357_LDO_VSRAM_PROC_CON3 0x19c6
+#define MT6357_LDO_VSRAM_PROC_CON4 0x19c8
+#define MT6357_LDO_VSRAM_PROC_CON5 0x19ca
+#define MT6357_LDO_VSRAM_PROC_DBG0 0x19cc
+#define MT6357_LDO_VSRAM_PROC_DBG1 0x19ce
+#define MT6357_LDO_VSRAM_OTHERS_CON0 0x19d0
+#define MT6357_LDO_VSRAM_OTHERS_CON2 0x19d2
+#define MT6357_LDO_VSRAM_OTHERS_CFG0 0x19d4
+#define MT6357_LDO_VSRAM_OTHERS_CFG1 0x19d6
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN 0x19d8
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_SET 0x19da
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_CLR 0x19dc
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG 0x19de
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_SET 0x19e0
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_CLR 0x19e2
+#define MT6357_LDO_VSRAM_OTHERS_CON3 0x19e4
+#define MT6357_LDO_VSRAM_OTHERS_CON4 0x19e6
+#define MT6357_LDO_VSRAM_OTHERS_CON5 0x19e8
+#define MT6357_LDO_VSRAM_OTHERS_DBG0 0x19ea
+#define MT6357_LDO_VSRAM_OTHERS_DBG1 0x19ec
+#define MT6357_LDO_VSRAM_PROC_SP 0x19ee
+#define MT6357_LDO_VSRAM_OTHERS_SP 0x19f0
+#define MT6357_LDO_VSRAM_PROC_R2R_PDN_DIS 0x19f2
+#define MT6357_LDO_VSRAM_OTHERS_R2R_PDN_DIS 0x19f4
+#define MT6357_LDO_VSRAM_WDT_DBG0 0x19f6
+#define MT6357_LDO_GON1_ELR_NUM 0x19f8
+#define MT6357_LDO_VSRAM_CON0 0x19fa
+#define MT6357_LDO_VSRAM_CON1 0x19fc
+#define MT6357_LDO_VSRAM_CON2 0x19fe
+#define MT6357_LDO_GOFF0_DSN_ID 0x1a00
+#define MT6357_LDO_GOFF0_DSN_REV0 0x1a02
+#define MT6357_LDO_GOFF0_DSN_DBI 0x1a04
+#define MT6357_LDO_GOFF0_DSN_DXI 0x1a06
+#define MT6357_LDO_VFE28_CON0 0x1a08
+#define MT6357_LDO_VFE28_OP_EN 0x1a0a
+#define MT6357_LDO_VFE28_OP_EN_SET 0x1a0c
+#define MT6357_LDO_VFE28_OP_EN_CLR 0x1a0e
+#define MT6357_LDO_VFE28_OP_CFG 0x1a10
+#define MT6357_LDO_VFE28_OP_CFG_SET 0x1a12
+#define MT6357_LDO_VFE28_OP_CFG_CLR 0x1a14
+#define MT6357_LDO_VFE28_CON1 0x1a16
+#define MT6357_LDO_VFE28_CON2 0x1a18
+#define MT6357_LDO_VFE28_CON3 0x1a1a
+#define MT6357_LDO_VRF18_CON0 0x1a1c
+#define MT6357_LDO_VRF18_OP_EN 0x1a1e
+#define MT6357_LDO_VRF18_OP_EN_SET 0x1a20
+#define MT6357_LDO_VRF18_OP_EN_CLR 0x1a22
+#define MT6357_LDO_VRF18_OP_CFG 0x1a24
+#define MT6357_LDO_VRF18_OP_CFG_SET 0x1a26
+#define MT6357_LDO_VRF18_OP_CFG_CLR 0x1a28
+#define MT6357_LDO_VRF18_CON1 0x1a2a
+#define MT6357_LDO_VRF18_CON2 0x1a2c
+#define MT6357_LDO_VRF18_CON3 0x1a2e
+#define MT6357_LDO_VRF12_CON0 0x1a30
+#define MT6357_LDO_VRF12_OP_EN 0x1a32
+#define MT6357_LDO_VRF12_OP_EN_SET 0x1a34
+#define MT6357_LDO_VRF12_OP_EN_CLR 0x1a36
+#define MT6357_LDO_VRF12_OP_CFG 0x1a38
+#define MT6357_LDO_VRF12_OP_CFG_SET 0x1a3a
+#define MT6357_LDO_VRF12_OP_CFG_CLR 0x1a3c
+#define MT6357_LDO_VRF12_CON1 0x1a3e
+#define MT6357_LDO_VRF12_CON2 0x1a40
+#define MT6357_LDO_VRF12_CON3 0x1a42
+#define MT6357_LDO_VEFUSE_CON0 0x1a44
+#define MT6357_LDO_VEFUSE_OP_EN 0x1a46
+#define MT6357_LDO_VEFUSE_OP_EN_SET 0x1a48
+#define MT6357_LDO_VEFUSE_OP_EN_CLR 0x1a4a
+#define MT6357_LDO_VEFUSE_OP_CFG 0x1a4c
+#define MT6357_LDO_VEFUSE_OP_CFG_SET 0x1a4e
+#define MT6357_LDO_VEFUSE_OP_CFG_CLR 0x1a50
+#define MT6357_LDO_VEFUSE_CON1 0x1a52
+#define MT6357_LDO_VEFUSE_CON2 0x1a54
+#define MT6357_LDO_VEFUSE_CON3 0x1a56
+#define MT6357_LDO_VCN18_CON0 0x1a58
+#define MT6357_LDO_VCN18_OP_EN 0x1a5a
+#define MT6357_LDO_VCN18_OP_EN_SET 0x1a5c
+#define MT6357_LDO_VCN18_OP_EN_CLR 0x1a5e
+#define MT6357_LDO_VCN18_OP_CFG 0x1a60
+#define MT6357_LDO_VCN18_OP_CFG_SET 0x1a62
+#define MT6357_LDO_VCN18_OP_CFG_CLR 0x1a64
+#define MT6357_LDO_VCN18_CON1 0x1a66
+#define MT6357_LDO_VCN18_CON2 0x1a68
+#define MT6357_LDO_VCN18_CON3 0x1a6a
+#define MT6357_LDO_VCAMA_CON0 0x1a6c
+#define MT6357_LDO_VCAMA_OP_EN 0x1a6e
+#define MT6357_LDO_VCAMA_OP_EN_SET 0x1a70
+#define MT6357_LDO_VCAMA_OP_EN_CLR 0x1a72
+#define MT6357_LDO_VCAMA_OP_CFG 0x1a74
+#define MT6357_LDO_VCAMA_OP_CFG_SET 0x1a76
+#define MT6357_LDO_VCAMA_OP_CFG_CLR 0x1a78
+#define MT6357_LDO_VCAMA_CON1 0x1a7a
+#define MT6357_LDO_VCAMA_CON2 0x1a7c
+#define MT6357_LDO_VCAMA_CON3 0x1a7e
+#define MT6357_LDO_GOFF1_DSN_ID 0x1a80
+#define MT6357_LDO_GOFF1_DSN_REV0 0x1a82
+#define MT6357_LDO_GOFF1_DSN_DBI 0x1a84
+#define MT6357_LDO_GOFF1_DSN_DXI 0x1a86
+#define MT6357_LDO_VCAMD_CON0 0x1a88
+#define MT6357_LDO_VCAMD_OP_EN 0x1a8a
+#define MT6357_LDO_VCAMD_OP_EN_SET 0x1a8c
+#define MT6357_LDO_VCAMD_OP_EN_CLR 0x1a8e
+#define MT6357_LDO_VCAMD_OP_CFG 0x1a90
+#define MT6357_LDO_VCAMD_OP_CFG_SET 0x1a92
+#define MT6357_LDO_VCAMD_OP_CFG_CLR 0x1a94
+#define MT6357_LDO_VCAMD_CON1 0x1a96
+#define MT6357_LDO_VCAMD_CON2 0x1a98
+#define MT6357_LDO_VCAMD_CON3 0x1a9a
+#define MT6357_LDO_VCAMIO_CON0 0x1a9c
+#define MT6357_LDO_VCAMIO_OP_EN 0x1a9e
+#define MT6357_LDO_VCAMIO_OP_EN_SET 0x1aa0
+#define MT6357_LDO_VCAMIO_OP_EN_CLR 0x1aa2
+#define MT6357_LDO_VCAMIO_OP_CFG 0x1aa4
+#define MT6357_LDO_VCAMIO_OP_CFG_SET 0x1aa6
+#define MT6357_LDO_VCAMIO_OP_CFG_CLR 0x1aa8
+#define MT6357_LDO_VCAMIO_CON1 0x1aaa
+#define MT6357_LDO_VCAMIO_CON2 0x1aac
+#define MT6357_LDO_VCAMIO_CON3 0x1aae
+#define MT6357_LDO_VMC_CON0 0x1ab0
+#define MT6357_LDO_VMC_OP_EN 0x1ab2
+#define MT6357_LDO_VMC_OP_EN_SET 0x1ab4
+#define MT6357_LDO_VMC_OP_EN_CLR 0x1ab6
+#define MT6357_LDO_VMC_OP_CFG 0x1ab8
+#define MT6357_LDO_VMC_OP_CFG_SET 0x1aba
+#define MT6357_LDO_VMC_OP_CFG_CLR 0x1abc
+#define MT6357_LDO_VMC_CON1 0x1abe
+#define MT6357_LDO_VMC_CON2 0x1ac0
+#define MT6357_LDO_VMC_CON3 0x1ac2
+#define MT6357_LDO_VMCH_CON0 0x1ac4
+#define MT6357_LDO_VMCH_OP_EN 0x1ac6
+#define MT6357_LDO_VMCH_OP_EN_SET 0x1ac8
+#define MT6357_LDO_VMCH_OP_EN_CLR 0x1aca
+#define MT6357_LDO_VMCH_OP_CFG 0x1acc
+#define MT6357_LDO_VMCH_OP_CFG_SET 0x1ace
+#define MT6357_LDO_VMCH_OP_CFG_CLR 0x1ad0
+#define MT6357_LDO_VMCH_CON1 0x1ad2
+#define MT6357_LDO_VMCH_CON2 0x1ad4
+#define MT6357_LDO_VMCH_CON3 0x1ad6
+#define MT6357_LDO_VSIM1_CON0 0x1ad8
+#define MT6357_LDO_VSIM1_OP_EN 0x1ada
+#define MT6357_LDO_VSIM1_OP_EN_SET 0x1adc
+#define MT6357_LDO_VSIM1_OP_EN_CLR 0x1ade
+#define MT6357_LDO_VSIM1_OP_CFG 0x1ae0
+#define MT6357_LDO_VSIM1_OP_CFG_SET 0x1ae2
+#define MT6357_LDO_VSIM1_OP_CFG_CLR 0x1ae4
+#define MT6357_LDO_VSIM1_CON1 0x1ae6
+#define MT6357_LDO_VSIM1_CON2 0x1ae8
+#define MT6357_LDO_VSIM1_CON3 0x1aea
+#define MT6357_LDO_VSIM2_CON0 0x1aec
+#define MT6357_LDO_VSIM2_OP_EN 0x1aee
+#define MT6357_LDO_VSIM2_OP_EN_SET 0x1af0
+#define MT6357_LDO_VSIM2_OP_EN_CLR 0x1af2
+#define MT6357_LDO_VSIM2_OP_CFG 0x1af4
+#define MT6357_LDO_VSIM2_OP_CFG_SET 0x1af6
+#define MT6357_LDO_VSIM2_OP_CFG_CLR 0x1af8
+#define MT6357_LDO_VSIM2_CON1 0x1afa
+#define MT6357_LDO_VSIM2_CON2 0x1afc
+#define MT6357_LDO_VSIM2_CON3 0x1afe
+#define MT6357_LDO_GOFF2_DSN_ID 0x1b00
+#define MT6357_LDO_GOFF2_DSN_REV0 0x1b02
+#define MT6357_LDO_GOFF2_DSN_DBI 0x1b04
+#define MT6357_LDO_GOFF2_DSN_DXI 0x1b06
+#define MT6357_LDO_VIBR_CON0 0x1b08
+#define MT6357_LDO_VIBR_OP_EN 0x1b0a
+#define MT6357_LDO_VIBR_OP_EN_SET 0x1b0c
+#define MT6357_LDO_VIBR_OP_EN_CLR 0x1b0e
+#define MT6357_LDO_VIBR_OP_CFG 0x1b10
+#define MT6357_LDO_VIBR_OP_CFG_SET 0x1b12
+#define MT6357_LDO_VIBR_OP_CFG_CLR 0x1b14
+#define MT6357_LDO_VIBR_CON1 0x1b16
+#define MT6357_LDO_VIBR_CON2 0x1b18
+#define MT6357_LDO_VIBR_CON3 0x1b1a
+#define MT6357_LDO_VCN33_CON0_0 0x1b1c
+#define MT6357_LDO_VCN33_OP_EN 0x1b1e
+#define MT6357_LDO_VCN33_OP_EN_SET 0x1b20
+#define MT6357_LDO_VCN33_OP_EN_CLR 0x1b22
+#define MT6357_LDO_VCN33_OP_CFG 0x1b24
+#define MT6357_LDO_VCN33_OP_CFG_SET 0x1b26
+#define MT6357_LDO_VCN33_OP_CFG_CLR 0x1b28
+#define MT6357_LDO_VCN33_CON0_1 0x1b2a
+#define MT6357_LDO_VCN33_CON1 0x1b2c
+#define MT6357_LDO_VCN33_CON2 0x1b2e
+#define MT6357_LDO_VCN33_CON3 0x1b30
+#define MT6357_LDO_VLDO28_CON0_0 0x1b32
+#define MT6357_LDO_VLDO28_OP_EN 0x1b34
+#define MT6357_LDO_VLDO28_OP_EN_SET 0x1b36
+#define MT6357_LDO_VLDO28_OP_EN_CLR 0x1b38
+#define MT6357_LDO_VLDO28_OP_CFG 0x1b3a
+#define MT6357_LDO_VLDO28_OP_CFG_SET 0x1b3c
+#define MT6357_LDO_VLDO28_OP_CFG_CLR 0x1b3e
+#define MT6357_LDO_VLDO28_CON0_1 0x1b40
+#define MT6357_LDO_VLDO28_CON1 0x1b42
+#define MT6357_LDO_VLDO28_CON2 0x1b44
+#define MT6357_LDO_VLDO28_CON3 0x1b46
+#define MT6357_LDO_GOFF2_RSV_CON0 0x1b48
+#define MT6357_LDO_GOFF2_RSV_CON1 0x1b4a
+#define MT6357_LDO_GOFF3_DSN_ID 0x1b80
+#define MT6357_LDO_GOFF3_DSN_REV0 0x1b82
+#define MT6357_LDO_GOFF3_DSN_DBI 0x1b84
+#define MT6357_LDO_GOFF3_DSN_DXI 0x1b86
+#define MT6357_LDO_VCN28_CON0 0x1b88
+#define MT6357_LDO_VCN28_OP_EN 0x1b8a
+#define MT6357_LDO_VCN28_OP_EN_SET 0x1b8c
+#define MT6357_LDO_VCN28_OP_EN_CLR 0x1b8e
+#define MT6357_LDO_VCN28_OP_CFG 0x1b90
+#define MT6357_LDO_VCN28_OP_CFG_SET 0x1b92
+#define MT6357_LDO_VCN28_OP_CFG_CLR 0x1b94
+#define MT6357_LDO_VCN28_CON1 0x1b96
+#define MT6357_LDO_VCN28_CON2 0x1b98
+#define MT6357_LDO_VCN28_CON3 0x1b9a
+#define MT6357_VRTC_CON0 0x1b9c
+#define MT6357_LDO_TREF_CON0 0x1b9e
+#define MT6357_LDO_TREF_OP_EN 0x1ba0
+#define MT6357_LDO_TREF_OP_EN_SET 0x1ba2
+#define MT6357_LDO_TREF_OP_EN_CLR 0x1ba4
+#define MT6357_LDO_TREF_OP_CFG 0x1ba6
+#define MT6357_LDO_TREF_OP_CFG_SET 0x1ba8
+#define MT6357_LDO_TREF_OP_CFG_CLR 0x1baa
+#define MT6357_LDO_TREF_CON1 0x1bac
+#define MT6357_LDO_GOFF3_RSV_CON0 0x1bae
+#define MT6357_LDO_GOFF3_RSV_CON1 0x1bb0
+#define MT6357_LDO_ANA0_DSN_ID 0x1c00
+#define MT6357_LDO_ANA0_DSN_REV0 0x1c02
+#define MT6357_LDO_ANA0_DSN_DBI 0x1c04
+#define MT6357_LDO_ANA0_DSN_DXI 0x1c06
+#define MT6357_VFE28_ANA_CON0 0x1c08
+#define MT6357_VFE28_ANA_CON1 0x1c0a
+#define MT6357_VCN28_ANA_CON0 0x1c0c
+#define MT6357_VCN28_ANA_CON1 0x1c0e
+#define MT6357_VAUD28_ANA_CON0 0x1c10
+#define MT6357_VAUD28_ANA_CON1 0x1c12
+#define MT6357_VAUX18_ANA_CON0 0x1c14
+#define MT6357_VAUX18_ANA_CON1 0x1c16
+#define MT6357_VXO22_ANA_CON0 0x1c18
+#define MT6357_VXO22_ANA_CON1 0x1c1a
+#define MT6357_VCN33_ANA_CON0 0x1c1c
+#define MT6357_VCN33_ANA_CON1 0x1c1e
+#define MT6357_VEMC_ANA_CON0 0x1c20
+#define MT6357_VEMC_ANA_CON1 0x1c22
+#define MT6357_VLDO28_ANA_CON0 0x1c24
+#define MT6357_VLDO28_ANA_CON1 0x1c26
+#define MT6357_VIO28_ANA_CON0 0x1c28
+#define MT6357_VIO28_ANA_CON1 0x1c2a
+#define MT6357_VIBR_ANA_CON0 0x1c2c
+#define MT6357_VIBR_ANA_CON1 0x1c2e
+#define MT6357_VSIM1_ANA_CON0 0x1c30
+#define MT6357_VSIM1_ANA_CON1 0x1c32
+#define MT6357_VSIM2_ANA_CON0 0x1c34
+#define MT6357_VSIM2_ANA_CON1 0x1c36
+#define MT6357_VMCH_ANA_CON0 0x1c38
+#define MT6357_VMCH_ANA_CON1 0x1c3a
+#define MT6357_VMC_ANA_CON0 0x1c3c
+#define MT6357_VMC_ANA_CON1 0x1c3e
+#define MT6357_VCAMIO_ANA_CON0 0x1c40
+#define MT6357_VCAMIO_ANA_CON1 0x1c42
+#define MT6357_VCN18_ANA_CON0 0x1c44
+#define MT6357_VCN18_ANA_CON1 0x1c46
+#define MT6357_VRF18_ANA_CON0 0x1c48
+#define MT6357_VRF18_ANA_CON1 0x1c4a
+#define MT6357_VIO18_ANA_CON0 0x1c4c
+#define MT6357_VIO18_ANA_CON1 0x1c4e
+#define MT6357_VDRAM_ANA_CON1 0x1c50
+#define MT6357_VRF12_ANA_CON0 0x1c52
+#define MT6357_VRF12_ANA_CON1 0x1c54
+#define MT6357_VSRAM_PROC_ANA_CON0 0x1c56
+#define MT6357_VSRAM_OTHERS_ANA_CON0 0x1c58
+#define MT6357_LDO_ANA0_ELR_NUM 0x1c5a
+#define MT6357_VFE28_ELR_0 0x1c5c
+#define MT6357_VCN28_ELR_0 0x1c5e
+#define MT6357_VAUD28_ELR_0 0x1c60
+#define MT6357_VAUX18_ELR_0 0x1c62
+#define MT6357_VXO22_ELR_0 0x1c64
+#define MT6357_VCN33_ELR_0 0x1c66
+#define MT6357_VEMC_ELR_0 0x1c68
+#define MT6357_VLDO28_ELR_0 0x1c6a
+#define MT6357_VIO28_ELR_0 0x1c6c
+#define MT6357_VIBR_ELR_0 0x1c6e
+#define MT6357_VSIM1_ELR_0 0x1c70
+#define MT6357_VSIM2_ELR_0 0x1c72
+#define MT6357_VMCH_ELR_0 0x1c74
+#define MT6357_VMC_ELR_0 0x1c76
+#define MT6357_VCAMIO_ELR_0 0x1c78
+#define MT6357_VCN18_ELR_0 0x1c7a
+#define MT6357_VRF18_ELR_0 0x1c7c
+#define MT6357_LDO_ANA1_DSN_ID 0x1c80
+#define MT6357_LDO_ANA1_DSN_REV0 0x1c82
+#define MT6357_LDO_ANA1_DSN_DBI 0x1c84
+#define MT6357_LDO_ANA1_DSN_DXI 0x1c86
+#define MT6357_VUSB33_ANA_CON0 0x1c88
+#define MT6357_VUSB33_ANA_CON1 0x1c8a
+#define MT6357_VCAMA_ANA_CON0 0x1c8c
+#define MT6357_VCAMA_ANA_CON1 0x1c8e
+#define MT6357_VEFUSE_ANA_CON0 0x1c90
+#define MT6357_VEFUSE_ANA_CON1 0x1c92
+#define MT6357_VCAMD_ANA_CON0 0x1c94
+#define MT6357_VCAMD_ANA_CON1 0x1c96
+#define MT6357_LDO_ANA1_ELR_NUM 0x1c98
+#define MT6357_VUSB33_ELR_0 0x1c9a
+#define MT6357_VCAMA_ELR_0 0x1c9c
+#define MT6357_VEFUSE_ELR_0 0x1c9e
+#define MT6357_VCAMD_ELR_0 0x1ca0
+#define MT6357_VIO18_ELR_0 0x1ca2
+#define MT6357_VDRAM_ELR_0 0x1ca4
+#define MT6357_VRF12_ELR_0 0x1ca6
+#define MT6357_VRTC_ELR_0 0x1ca8
+#define MT6357_VDRAM_ELR_1 0x1caa
+#define MT6357_VDRAM_ELR_2 0x1cac
+#define MT6357_XPP_TOP_ID 0x1e00
+#define MT6357_XPP_TOP_REV0 0x1e02
+#define MT6357_XPP_TOP_DBI 0x1e04
+#define MT6357_XPP_TOP_DXI 0x1e06
+#define MT6357_XPP_TPM0 0x1e08
+#define MT6357_XPP_TPM1 0x1e0a
+#define MT6357_XPP_TOP_TEST_OUT 0x1e0c
+#define MT6357_XPP_TOP_TEST_CON0 0x1e0e
+#define MT6357_XPP_TOP_CKPDN_CON0 0x1e10
+#define MT6357_XPP_TOP_CKPDN_CON0_SET 0x1e12
+#define MT6357_XPP_TOP_CKPDN_CON0_CLR 0x1e14
+#define MT6357_XPP_TOP_CKSEL_CON0 0x1e16
+#define MT6357_XPP_TOP_CKSEL_CON0_SET 0x1e18
+#define MT6357_XPP_TOP_CKSEL_CON0_CLR 0x1e1a
+#define MT6357_XPP_TOP_RST_CON0 0x1e1c
+#define MT6357_XPP_TOP_RST_CON0_SET 0x1e1e
+#define MT6357_XPP_TOP_RST_CON0_CLR 0x1e20
+#define MT6357_XPP_TOP_RST_BANK_CON0 0x1e22
+#define MT6357_XPP_TOP_RST_BANK_CON0_SET 0x1e24
+#define MT6357_XPP_TOP_RST_BANK_CON0_CLR 0x1e26
+#define MT6357_DRIVER_BL_DSN_ID 0x1e80
+#define MT6357_DRIVER_BL_DSN_REV0 0x1e82
+#define MT6357_DRIVER_BL_DSN_DBI 0x1e84
+#define MT6357_DRIVER_BL_DSN_DXI 0x1e86
+#define MT6357_ISINK1_CON0 0x1e88
+#define MT6357_ISINK1_CON1 0x1e8a
+#define MT6357_ISINK1_CON2 0x1e8c
+#define MT6357_ISINK1_CON3 0x1e8e
+#define MT6357_ISINK_ANA1 0x1e90
+#define MT6357_ISINK_PHASE_DLY 0x1e92
+#define MT6357_ISINK_SFSTR 0x1e94
+#define MT6357_ISINK_EN_CTRL 0x1e96
+#define MT6357_ISINK_MODE_CTRL 0x1e98
+#define MT6357_DRIVER_ANA_CON0 0x1e9a
+#define MT6357_ISINK_ANA_CON0 0x1e9c
+#define MT6357_ISINK_ANA_CON1 0x1e9e
+#define MT6357_DRIVER_BL_ELR_NUM 0x1ea0
+#define MT6357_DRIVER_BL_ELR_0 0x1ea2
+#define MT6357_DRIVER_CI_DSN_ID 0x1f00
+#define MT6357_DRIVER_CI_DSN_REV0 0x1f02
+#define MT6357_DRIVER_CI_DSN_DBI 0x1f04
+#define MT6357_DRIVER_CI_DSN_DXI 0x1f06
+#define MT6357_CHRIND_CON0 0x1f08
+#define MT6357_CHRIND_CON1 0x1f0a
+#define MT6357_CHRIND_CON2 0x1f0c
+#define MT6357_CHRIND_CON3 0x1f0e
+#define MT6357_CHRIND_CON4 0x1f10
+#define MT6357_CHRIND_EN_CTRL 0x1f12
+#define MT6357_CHRIND_ANA_CON0 0x1f14
+#define MT6357_DRIVER_DL_DSN_ID 0x1f80
+#define MT6357_DRIVER_DL_DSN_REV0 0x1f82
+#define MT6357_DRIVER_DL_DSN_DBI 0x1f84
+#define MT6357_DRIVER_DL_DSN_DXI 0x1f86
+#define MT6357_ISINK2_CON0 0x1f88
+#define MT6357_ISINK3_CON0 0x1f8a
+#define MT6357_ISINK_EN_CTRL_SMPL 0x1f8c
+#define MT6357_AUD_TOP_ID 0x2080
+#define MT6357_AUD_TOP_REV0 0x2082
+#define MT6357_AUD_TOP_DBI 0x2084
+#define MT6357_AUD_TOP_DXI 0x2086
+#define MT6357_AUD_TOP_CKPDN_TPM0 0x2088
+#define MT6357_AUD_TOP_CKPDN_TPM1 0x208a
+#define MT6357_AUD_TOP_CKPDN_CON0 0x208c
+#define MT6357_AUD_TOP_CKPDN_CON0_SET 0x208e
+#define MT6357_AUD_TOP_CKPDN_CON0_CLR 0x2090
+#define MT6357_AUD_TOP_CKSEL_CON0 0x2092
+#define MT6357_AUD_TOP_CKSEL_CON0_SET 0x2094
+#define MT6357_AUD_TOP_CKSEL_CON0_CLR 0x2096
+#define MT6357_AUD_TOP_CKTST_CON0 0x2098
+#define MT6357_AUD_TOP_RST_CON0 0x209a
+#define MT6357_AUD_TOP_RST_CON0_SET 0x209c
+#define MT6357_AUD_TOP_RST_CON0_CLR 0x209e
+#define MT6357_AUD_TOP_RST_BANK_CON0 0x20a0
+#define MT6357_AUD_TOP_INT_CON0 0x20a2
+#define MT6357_AUD_TOP_INT_CON0_SET 0x20a4
+#define MT6357_AUD_TOP_INT_CON0_CLR 0x20a6
+#define MT6357_AUD_TOP_INT_MASK_CON0 0x20a8
+#define MT6357_AUD_TOP_INT_MASK_CON0_SET 0x20aa
+#define MT6357_AUD_TOP_INT_MASK_CON0_CLR 0x20ac
+#define MT6357_AUD_TOP_INT_STATUS0 0x20ae
+#define MT6357_AUD_TOP_INT_RAW_STATUS0 0x20b0
+#define MT6357_AUD_TOP_INT_MISC_CON0 0x20b2
+#define MT6357_AUDNCP_CLKDIV_CON0 0x20b4
+#define MT6357_AUDNCP_CLKDIV_CON1 0x20b6
+#define MT6357_AUDNCP_CLKDIV_CON2 0x20b8
+#define MT6357_AUDNCP_CLKDIV_CON3 0x20ba
+#define MT6357_AUDNCP_CLKDIV_CON4 0x20bc
+#define MT6357_AUD_TOP_MON_CON0 0x20be
+#define MT6357_AUDIO_DIG_DSN_ID 0x2100
+#define MT6357_AUDIO_DIG_DSN_REV0 0x2102
+#define MT6357_AUDIO_DIG_DSN_DBI 0x2104
+#define MT6357_AUDIO_DIG_DSN_DXI 0x2106
+#define MT6357_AFE_UL_DL_CON0 0x2108
+#define MT6357_AFE_DL_SRC2_CON0_L 0x210a
+#define MT6357_AFE_UL_SRC_CON0_H 0x210c
+#define MT6357_AFE_UL_SRC_CON0_L 0x210e
+#define MT6357_AFE_TOP_CON0 0x2110
+#define MT6357_AUDIO_TOP_CON0 0x2112
+#define MT6357_AFE_MON_DEBUG0 0x2114
+#define MT6357_AFUNC_AUD_CON0 0x2116
+#define MT6357_AFUNC_AUD_CON1 0x2118
+#define MT6357_AFUNC_AUD_CON2 0x211a
+#define MT6357_AFUNC_AUD_CON3 0x211c
+#define MT6357_AFUNC_AUD_CON4 0x211e
+#define MT6357_AFUNC_AUD_CON5 0x2120
+#define MT6357_AFUNC_AUD_CON6 0x2122
+#define MT6357_AFUNC_AUD_MON0 0x2124
+#define MT6357_AUDRC_TUNE_MON0 0x2126
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_CFG0 0x2128
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x212a
+#define MT6357_AFE_ADDA_MTKAIF_MON0 0x212c
+#define MT6357_AFE_ADDA_MTKAIF_MON1 0x212e
+#define MT6357_AFE_ADDA_MTKAIF_MON2 0x2130
+#define MT6357_AFE_ADDA_MTKAIF_MON3 0x2132
+#define MT6357_AFE_ADDA_MTKAIF_CFG0 0x2134
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG0 0x2136
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG1 0x2138
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG2 0x213a
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG3 0x213c
+#define MT6357_AFE_ADDA_MTKAIF_TX_CFG1 0x213e
+#define MT6357_AFE_SGEN_CFG0 0x2140
+#define MT6357_AFE_SGEN_CFG1 0x2142
+#define MT6357_AFE_ADC_ASYNC_FIFO_CFG 0x2144
+#define MT6357_AFE_DCCLK_CFG0 0x2146
+#define MT6357_AFE_DCCLK_CFG1 0x2148
+#define MT6357_AUDIO_DIG_CFG 0x214a
+#define MT6357_AFE_AUD_PAD_TOP 0x214c
+#define MT6357_AFE_AUD_PAD_TOP_MON 0x214e
+#define MT6357_AFE_AUD_PAD_TOP_MON1 0x2150
+#define MT6357_AUDENC_DSN_ID 0x2180
+#define MT6357_AUDENC_DSN_REV0 0x2182
+#define MT6357_AUDENC_DSN_DBI 0x2184
+#define MT6357_AUDENC_DSN_FPI 0x2186
+#define MT6357_AUDENC_ANA_CON0 0x2188
+#define MT6357_AUDENC_ANA_CON1 0x218a
+#define MT6357_AUDENC_ANA_CON2 0x218c
+#define MT6357_AUDENC_ANA_CON3 0x218e
+#define MT6357_AUDENC_ANA_CON4 0x2190
+#define MT6357_AUDENC_ANA_CON5 0x2192
+#define MT6357_AUDENC_ANA_CON6 0x2194
+#define MT6357_AUDENC_ANA_CON7 0x2196
+#define MT6357_AUDENC_ANA_CON8 0x2198
+#define MT6357_AUDENC_ANA_CON9 0x219a
+#define MT6357_AUDENC_ANA_CON10 0x219c
+#define MT6357_AUDENC_ANA_CON11 0x219e
+#define MT6357_AUDDEC_DSN_ID 0x2200
+#define MT6357_AUDDEC_DSN_REV0 0x2202
+#define MT6357_AUDDEC_DSN_DBI 0x2204
+#define MT6357_AUDDEC_DSN_FPI 0x2206
+#define MT6357_AUDDEC_ANA_CON0 0x2208
+#define MT6357_AUDDEC_ANA_CON1 0x220a
+#define MT6357_AUDDEC_ANA_CON2 0x220c
+#define MT6357_AUDDEC_ANA_CON3 0x220e
+#define MT6357_AUDDEC_ANA_CON4 0x2210
+#define MT6357_AUDDEC_ANA_CON5 0x2212
+#define MT6357_AUDDEC_ANA_CON6 0x2214
+#define MT6357_AUDDEC_ANA_CON7 0x2216
+#define MT6357_AUDDEC_ANA_CON8 0x2218
+#define MT6357_AUDDEC_ANA_CON9 0x221a
+#define MT6357_AUDDEC_ANA_CON10 0x221c
+#define MT6357_AUDDEC_ANA_CON11 0x221e
+#define MT6357_AUDDEC_ANA_CON12 0x2220
+#define MT6357_AUDDEC_ANA_CON13 0x2222
+#define MT6357_AUDDEC_ELR_NUM 0x2224
+#define MT6357_AUDDEC_ELR_0 0x2226
+#define MT6357_AUDZCD_DSN_ID 0x2280
+#define MT6357_AUDZCD_DSN_REV0 0x2282
+#define MT6357_AUDZCD_DSN_DBI 0x2284
+#define MT6357_AUDZCD_DSN_FPI 0x2286
+#define MT6357_ZCD_CON0 0x2288
+#define MT6357_ZCD_CON1 0x228a
+#define MT6357_ZCD_CON2 0x228c
+#define MT6357_ZCD_CON3 0x228e
+#define MT6357_ZCD_CON4 0x2290
+#define MT6357_ZCD_CON5 0x2292
+#define MT6357_ACCDET_DSN_DIG_ID 0x2300
+#define MT6357_ACCDET_DSN_DIG_REV0 0x2302
+#define MT6357_ACCDET_DSN_DBI 0x2304
+#define MT6357_ACCDET_DSN_FPI 0x2306
+#define MT6357_ACCDET_CON0 0x2308
+#define MT6357_ACCDET_CON1 0x230a
+#define MT6357_ACCDET_CON2 0x230c
+#define MT6357_ACCDET_CON3 0x230e
+#define MT6357_ACCDET_CON4 0x2310
+#define MT6357_ACCDET_CON5 0x2312
+#define MT6357_ACCDET_CON6 0x2314
+#define MT6357_ACCDET_CON7 0x2316
+#define MT6357_ACCDET_CON8 0x2318
+#define MT6357_ACCDET_CON9 0x231a
+#define MT6357_ACCDET_CON10 0x231c
+#define MT6357_ACCDET_CON11 0x231e
+#define MT6357_ACCDET_CON12 0x2320
+#define MT6357_ACCDET_CON13 0x2322
+#define MT6357_ACCDET_CON14 0x2324
+#define MT6357_ACCDET_CON15 0x2326
+#define MT6357_ACCDET_CON16 0x2328
+#define MT6357_ACCDET_CON17 0x232a
+#define MT6357_ACCDET_CON18 0x232c
+#define MT6357_ACCDET_CON19 0x232e
+#define MT6357_ACCDET_CON20 0x2330
+#define MT6357_ACCDET_CON21 0x2332
+#define MT6357_ACCDET_CON22 0x2334
+#define MT6357_ACCDET_CON23 0x2336
+#define MT6357_ACCDET_CON24 0x2338
+#define MT6357_ACCDET_CON25 0x233a
+#define MT6357_ACCDET_CON26 0x233c
+#define MT6357_ACCDET_CON27 0x233e
+#define MT6357_ACCDET_CON28 0x2340
+
+#endif /* __MFD_MT6357_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
new file mode 100644
index 000000000000..68578e2019b0
--- /dev/null
+++ b/include/linux/mfd/mt6358/core.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_CORE_H__
+#define __MFD_MT6358_CORE_H__
+
+struct irq_top_t {
+ int hwirq_base;
+ unsigned int num_int_regs;
+ unsigned int en_reg;
+ unsigned int en_reg_shift;
+ unsigned int sta_reg;
+ unsigned int sta_reg_shift;
+ unsigned int top_offset;
+};
+
+struct pmic_irq_data {
+ unsigned int num_top;
+ unsigned int num_pmic_irqs;
+ unsigned short top_int_status_reg;
+ bool *enable_hwirq;
+ bool *cache_hwirq;
+ const struct irq_top_t *pmic_ints;
+};
+
+enum mt6358_irq_top_status_shift {
+ MT6358_BUCK_TOP = 0,
+ MT6358_LDO_TOP,
+ MT6358_PSC_TOP,
+ MT6358_SCK_TOP,
+ MT6358_BM_TOP,
+ MT6358_HK_TOP,
+ MT6358_AUD_TOP,
+ MT6358_MISC_TOP,
+};
+
+enum mt6358_irq_numbers {
+ MT6358_IRQ_VPROC11_OC = 0,
+ MT6358_IRQ_VPROC12_OC,
+ MT6358_IRQ_VCORE_OC,
+ MT6358_IRQ_VGPU_OC,
+ MT6358_IRQ_VMODEM_OC,
+ MT6358_IRQ_VDRAM1_OC,
+ MT6358_IRQ_VS1_OC,
+ MT6358_IRQ_VS2_OC,
+ MT6358_IRQ_VPA_OC,
+ MT6358_IRQ_VCORE_PREOC,
+ MT6358_IRQ_VFE28_OC = 16,
+ MT6358_IRQ_VXO22_OC,
+ MT6358_IRQ_VRF18_OC,
+ MT6358_IRQ_VRF12_OC,
+ MT6358_IRQ_VEFUSE_OC,
+ MT6358_IRQ_VCN33_OC,
+ MT6358_IRQ_VCN28_OC,
+ MT6358_IRQ_VCN18_OC,
+ MT6358_IRQ_VCAMA1_OC,
+ MT6358_IRQ_VCAMA2_OC,
+ MT6358_IRQ_VCAMD_OC,
+ MT6358_IRQ_VCAMIO_OC,
+ MT6358_IRQ_VLDO28_OC,
+ MT6358_IRQ_VA12_OC,
+ MT6358_IRQ_VAUX18_OC,
+ MT6358_IRQ_VAUD28_OC,
+ MT6358_IRQ_VIO28_OC,
+ MT6358_IRQ_VIO18_OC,
+ MT6358_IRQ_VSRAM_PROC11_OC,
+ MT6358_IRQ_VSRAM_PROC12_OC,
+ MT6358_IRQ_VSRAM_OTHERS_OC,
+ MT6358_IRQ_VSRAM_GPU_OC,
+ MT6358_IRQ_VDRAM2_OC,
+ MT6358_IRQ_VMC_OC,
+ MT6358_IRQ_VMCH_OC,
+ MT6358_IRQ_VEMC_OC,
+ MT6358_IRQ_VSIM1_OC,
+ MT6358_IRQ_VSIM2_OC,
+ MT6358_IRQ_VIBR_OC,
+ MT6358_IRQ_VUSB_OC,
+ MT6358_IRQ_VBIF28_OC,
+ MT6358_IRQ_PWRKEY = 48,
+ MT6358_IRQ_HOMEKEY,
+ MT6358_IRQ_PWRKEY_R,
+ MT6358_IRQ_HOMEKEY_R,
+ MT6358_IRQ_NI_LBAT_INT,
+ MT6358_IRQ_CHRDET,
+ MT6358_IRQ_CHRDET_EDGE,
+ MT6358_IRQ_VCDT_HV_DET,
+ MT6358_IRQ_RTC = 64,
+ MT6358_IRQ_FG_BAT0_H = 80,
+ MT6358_IRQ_FG_BAT0_L,
+ MT6358_IRQ_FG_CUR_H,
+ MT6358_IRQ_FG_CUR_L,
+ MT6358_IRQ_FG_ZCV,
+ MT6358_IRQ_FG_BAT1_H,
+ MT6358_IRQ_FG_BAT1_L,
+ MT6358_IRQ_FG_N_CHARGE_L,
+ MT6358_IRQ_FG_IAVG_H,
+ MT6358_IRQ_FG_IAVG_L,
+ MT6358_IRQ_FG_TIME_H,
+ MT6358_IRQ_FG_DISCHARGE,
+ MT6358_IRQ_FG_CHARGE,
+ MT6358_IRQ_BATON_LV = 96,
+ MT6358_IRQ_BATON_HT,
+ MT6358_IRQ_BATON_BAT_IN,
+ MT6358_IRQ_BATON_BAT_OUT,
+ MT6358_IRQ_BIF,
+ MT6358_IRQ_BAT_H = 112,
+ MT6358_IRQ_BAT_L,
+ MT6358_IRQ_BAT2_H,
+ MT6358_IRQ_BAT2_L,
+ MT6358_IRQ_BAT_TEMP_H,
+ MT6358_IRQ_BAT_TEMP_L,
+ MT6358_IRQ_AUXADC_IMP,
+ MT6358_IRQ_NAG_C_DLTV,
+ MT6358_IRQ_AUDIO = 128,
+ MT6358_IRQ_ACCDET = 133,
+ MT6358_IRQ_ACCDET_EINT0,
+ MT6358_IRQ_ACCDET_EINT1,
+ MT6358_IRQ_SPI_CMD_ALERT = 144,
+ MT6358_IRQ_NR,
+};
+
+#define MT6358_IRQ_BUCK_BASE MT6358_IRQ_VPROC11_OC
+#define MT6358_IRQ_LDO_BASE MT6358_IRQ_VFE28_OC
+#define MT6358_IRQ_PSC_BASE MT6358_IRQ_PWRKEY
+#define MT6358_IRQ_SCK_BASE MT6358_IRQ_RTC
+#define MT6358_IRQ_BM_BASE MT6358_IRQ_FG_BAT0_H
+#define MT6358_IRQ_HK_BASE MT6358_IRQ_BAT_H
+#define MT6358_IRQ_AUD_BASE MT6358_IRQ_AUDIO
+#define MT6358_IRQ_MISC_BASE MT6358_IRQ_SPI_CMD_ALERT
+
+#define MT6358_IRQ_BUCK_BITS (MT6358_IRQ_VCORE_PREOC - MT6358_IRQ_BUCK_BASE + 1)
+#define MT6358_IRQ_LDO_BITS (MT6358_IRQ_VBIF28_OC - MT6358_IRQ_LDO_BASE + 1)
+#define MT6358_IRQ_PSC_BITS (MT6358_IRQ_VCDT_HV_DET - MT6358_IRQ_PSC_BASE + 1)
+#define MT6358_IRQ_SCK_BITS (MT6358_IRQ_RTC - MT6358_IRQ_SCK_BASE + 1)
+#define MT6358_IRQ_BM_BITS (MT6358_IRQ_BIF - MT6358_IRQ_BM_BASE + 1)
+#define MT6358_IRQ_HK_BITS (MT6358_IRQ_NAG_C_DLTV - MT6358_IRQ_HK_BASE + 1)
+#define MT6358_IRQ_AUD_BITS (MT6358_IRQ_ACCDET_EINT1 - MT6358_IRQ_AUD_BASE + 1)
+#define MT6358_IRQ_MISC_BITS \
+ (MT6358_IRQ_SPI_CMD_ALERT - MT6358_IRQ_MISC_BASE + 1)
+
+#define MT6358_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6358_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6358_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6358_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6358_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6358_CORE_H__ */
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
new file mode 100644
index 000000000000..3d33517f178c
--- /dev/null
+++ b/include/linux/mfd/mt6358/registers.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_REGISTERS_H__
+#define __MFD_MT6358_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6358_SWCID 0xa
+#define MT6358_TOPSTATUS 0x28
+#define MT6358_TOP_RST_MISC 0x14c
+#define MT6358_MISC_TOP_INT_CON0 0x188
+#define MT6358_MISC_TOP_INT_STATUS0 0x194
+#define MT6358_TOP_INT_STATUS0 0x19e
+#define MT6358_SCK_TOP_INT_CON0 0x52e
+#define MT6358_SCK_TOP_INT_STATUS0 0x53a
+#define MT6358_EOSC_CALI_CON0 0x540
+#define MT6358_EOSC_CALI_CON1 0x542
+#define MT6358_RTC_MIX_CON0 0x544
+#define MT6358_RTC_MIX_CON1 0x546
+#define MT6358_RTC_MIX_CON2 0x548
+#define MT6358_RTC_DSN_ID 0x580
+#define MT6358_RTC_DSN_REV0 0x582
+#define MT6358_RTC_DBI 0x584
+#define MT6358_RTC_DXI 0x586
+#define MT6358_RTC_BBPU 0x588
+#define MT6358_RTC_IRQ_STA 0x58a
+#define MT6358_RTC_IRQ_EN 0x58c
+#define MT6358_RTC_CII_EN 0x58e
+#define MT6358_RTC_AL_MASK 0x590
+#define MT6358_RTC_TC_SEC 0x592
+#define MT6358_RTC_TC_MIN 0x594
+#define MT6358_RTC_TC_HOU 0x596
+#define MT6358_RTC_TC_DOM 0x598
+#define MT6358_RTC_TC_DOW 0x59a
+#define MT6358_RTC_TC_MTH 0x59c
+#define MT6358_RTC_TC_YEA 0x59e
+#define MT6358_RTC_AL_SEC 0x5a0
+#define MT6358_RTC_AL_MIN 0x5a2
+#define MT6358_RTC_AL_HOU 0x5a4
+#define MT6358_RTC_AL_DOM 0x5a6
+#define MT6358_RTC_AL_DOW 0x5a8
+#define MT6358_RTC_AL_MTH 0x5aa
+#define MT6358_RTC_AL_YEA 0x5ac
+#define MT6358_RTC_OSC32CON 0x5ae
+#define MT6358_RTC_POWERKEY1 0x5b0
+#define MT6358_RTC_POWERKEY2 0x5b2
+#define MT6358_RTC_PDN1 0x5b4
+#define MT6358_RTC_PDN2 0x5b6
+#define MT6358_RTC_SPAR0 0x5b8
+#define MT6358_RTC_SPAR1 0x5ba
+#define MT6358_RTC_PROT 0x5bc
+#define MT6358_RTC_DIFF 0x5be
+#define MT6358_RTC_CALI 0x5c0
+#define MT6358_RTC_WRTGR 0x5c2
+#define MT6358_RTC_CON 0x5c4
+#define MT6358_RTC_SEC_CTRL 0x5c6
+#define MT6358_RTC_INT_CNT 0x5c8
+#define MT6358_RTC_SEC_DAT0 0x5ca
+#define MT6358_RTC_SEC_DAT1 0x5cc
+#define MT6358_RTC_SEC_DAT2 0x5ce
+#define MT6358_RTC_SEC_DSN_ID 0x600
+#define MT6358_RTC_SEC_DSN_REV0 0x602
+#define MT6358_RTC_SEC_DBI 0x604
+#define MT6358_RTC_SEC_DXI 0x606
+#define MT6358_RTC_TC_SEC_SEC 0x608
+#define MT6358_RTC_TC_MIN_SEC 0x60a
+#define MT6358_RTC_TC_HOU_SEC 0x60c
+#define MT6358_RTC_TC_DOM_SEC 0x60e
+#define MT6358_RTC_TC_DOW_SEC 0x610
+#define MT6358_RTC_TC_MTH_SEC 0x612
+#define MT6358_RTC_TC_YEA_SEC 0x614
+#define MT6358_RTC_SEC_CK_PDN 0x616
+#define MT6358_RTC_SEC_WRTGR 0x618
+#define MT6358_PSC_TOP_INT_CON0 0x910
+#define MT6358_PSC_TOP_INT_STATUS0 0x91c
+#define MT6358_BM_TOP_INT_CON0 0xc32
+#define MT6358_BM_TOP_INT_CON1 0xc38
+#define MT6358_BM_TOP_INT_STATUS0 0xc4a
+#define MT6358_BM_TOP_INT_STATUS1 0xc4c
+#define MT6358_HK_TOP_INT_CON0 0xf92
+#define MT6358_HK_TOP_INT_STATUS0 0xf9e
+#define MT6358_BUCK_TOP_INT_CON0 0x1318
+#define MT6358_BUCK_TOP_INT_STATUS0 0x1324
+#define MT6358_BUCK_VPROC11_CON0 0x1388
+#define MT6358_BUCK_VPROC11_DBG0 0x139e
+#define MT6358_BUCK_VPROC11_DBG1 0x13a0
+#define MT6358_BUCK_VPROC11_ELR0 0x13a6
+#define MT6358_BUCK_VPROC12_CON0 0x1408
+#define MT6358_BUCK_VPROC12_DBG0 0x141e
+#define MT6358_BUCK_VPROC12_DBG1 0x1420
+#define MT6358_BUCK_VPROC12_ELR0 0x1426
+#define MT6358_BUCK_VCORE_CON0 0x1488
+#define MT6358_BUCK_VCORE_DBG0 0x149e
+#define MT6358_BUCK_VCORE_DBG1 0x14a0
+#define MT6358_BUCK_VCORE_SSHUB_CON0 0x14a4
+#define MT6358_BUCK_VCORE_SSHUB_CON1 0x14a6
+#define MT6358_BUCK_VCORE_SSHUB_ELR0 MT6358_BUCK_VCORE_SSHUB_CON1
+#define MT6358_BUCK_VCORE_SSHUB_DBG1 MT6358_BUCK_VCORE_DBG1
+#define MT6358_BUCK_VCORE_ELR0 0x14aa
+#define MT6358_BUCK_VGPU_CON0 0x1508
+#define MT6358_BUCK_VGPU_DBG0 0x151e
+#define MT6358_BUCK_VGPU_DBG1 0x1520
+#define MT6358_BUCK_VGPU_ELR0 0x1526
+#define MT6358_BUCK_VMODEM_CON0 0x1588
+#define MT6358_BUCK_VMODEM_DBG0 0x159e
+#define MT6358_BUCK_VMODEM_DBG1 0x15a0
+#define MT6358_BUCK_VMODEM_ELR0 0x15a6
+#define MT6358_BUCK_VDRAM1_CON0 0x1608
+#define MT6358_BUCK_VDRAM1_DBG0 0x161e
+#define MT6358_BUCK_VDRAM1_DBG1 0x1620
+#define MT6358_BUCK_VDRAM1_ELR0 0x1626
+#define MT6358_BUCK_VS1_CON0 0x1688
+#define MT6358_BUCK_VS1_DBG0 0x169e
+#define MT6358_BUCK_VS1_DBG1 0x16a0
+#define MT6358_BUCK_VS1_ELR0 0x16ae
+#define MT6358_BUCK_VS2_CON0 0x1708
+#define MT6358_BUCK_VS2_DBG0 0x171e
+#define MT6358_BUCK_VS2_DBG1 0x1720
+#define MT6358_BUCK_VS2_ELR0 0x172e
+#define MT6358_BUCK_VPA_CON0 0x1788
+#define MT6358_BUCK_VPA_CON1 0x178a
+#define MT6358_BUCK_VPA_ELR0 MT6358_BUCK_VPA_CON1
+#define MT6358_BUCK_VPA_DBG0 0x1792
+#define MT6358_BUCK_VPA_DBG1 0x1794
+#define MT6358_VPROC_ANA_CON0 0x180c
+#define MT6358_VCORE_VGPU_ANA_CON0 0x1828
+#define MT6358_VMODEM_ANA_CON0 0x1888
+#define MT6358_VDRAM1_ANA_CON0 0x1896
+#define MT6358_VS1_ANA_CON0 0x18a2
+#define MT6358_VS2_ANA_CON0 0x18ae
+#define MT6358_VPA_ANA_CON0 0x18ba
+#define MT6358_LDO_TOP_INT_CON0 0x1a50
+#define MT6358_LDO_TOP_INT_CON1 0x1a56
+#define MT6358_LDO_TOP_INT_STATUS0 0x1a68
+#define MT6358_LDO_TOP_INT_STATUS1 0x1a6a
+#define MT6358_LDO_VXO22_CON0 0x1a88
+#define MT6358_LDO_VXO22_CON1 0x1a96
+#define MT6358_LDO_VA12_CON0 0x1a9c
+#define MT6358_LDO_VA12_CON1 0x1aaa
+#define MT6358_LDO_VAUX18_CON0 0x1ab0
+#define MT6358_LDO_VAUX18_CON1 0x1abe
+#define MT6358_LDO_VAUD28_CON0 0x1ac4
+#define MT6358_LDO_VAUD28_CON1 0x1ad2
+#define MT6358_LDO_VIO28_CON0 0x1ad8
+#define MT6358_LDO_VIO28_CON1 0x1ae6
+#define MT6358_LDO_VIO18_CON0 0x1aec
+#define MT6358_LDO_VIO18_CON1 0x1afa
+#define MT6358_LDO_VDRAM2_CON0 0x1b08
+#define MT6358_LDO_VDRAM2_CON1 0x1b16
+#define MT6358_LDO_VEMC_CON0 0x1b1c
+#define MT6358_LDO_VEMC_CON1 0x1b2a
+#define MT6358_LDO_VUSB_CON0_0 0x1b30
+#define MT6358_LDO_VUSB_CON1 0x1b40
+#define MT6358_LDO_VSRAM_PROC11_CON0 0x1b46
+#define MT6358_LDO_VSRAM_PROC11_DBG0 0x1b60
+#define MT6358_LDO_VSRAM_PROC11_DBG1 0x1b62
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON0 0x1b64
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON1 0x1b66
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON2 0x1b68
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON3 0x1b6a
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON0 0x1b6c
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON1 0x1b6e
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON2 0x1b70
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON3 0x1b72
+#define MT6358_LDO_VSRAM_WAKEUP_CON0 0x1b74
+#define MT6358_LDO_GON1_ELR_NUM 0x1b76
+#define MT6358_LDO_VDRAM2_ELR0 0x1b78
+#define MT6358_LDO_VSRAM_PROC12_CON0 0x1b88
+#define MT6358_LDO_VSRAM_PROC12_DBG0 0x1ba2
+#define MT6358_LDO_VSRAM_PROC12_DBG1 0x1ba4
+#define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6
+#define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0
+#define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON0 0x1bc4
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1 0x1bc6
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_DBG1 MT6358_LDO_VSRAM_OTHERS_DBG1
+#define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8
+#define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2
+#define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4
+#define MT6358_LDO_VSRAM_CON0 0x1bee
+#define MT6358_LDO_VSRAM_CON1 0x1bf0
+#define MT6358_LDO_VSRAM_CON2 0x1bf2
+#define MT6358_LDO_VSRAM_CON3 0x1bf4
+#define MT6358_LDO_VFE28_CON0 0x1c08
+#define MT6358_LDO_VFE28_CON1 0x1c16
+#define MT6358_LDO_VFE28_CON2 0x1c18
+#define MT6358_LDO_VFE28_CON3 0x1c1a
+#define MT6358_LDO_VRF18_CON0 0x1c1c
+#define MT6358_LDO_VRF18_CON1 0x1c2a
+#define MT6358_LDO_VRF18_CON2 0x1c2c
+#define MT6358_LDO_VRF18_CON3 0x1c2e
+#define MT6358_LDO_VRF12_CON0 0x1c30
+#define MT6358_LDO_VRF12_CON1 0x1c3e
+#define MT6358_LDO_VRF12_CON2 0x1c40
+#define MT6358_LDO_VRF12_CON3 0x1c42
+#define MT6358_LDO_VEFUSE_CON0 0x1c44
+#define MT6358_LDO_VEFUSE_CON1 0x1c52
+#define MT6358_LDO_VEFUSE_CON2 0x1c54
+#define MT6358_LDO_VEFUSE_CON3 0x1c56
+#define MT6358_LDO_VCN18_CON0 0x1c58
+#define MT6358_LDO_VCN18_CON1 0x1c66
+#define MT6358_LDO_VCN18_CON2 0x1c68
+#define MT6358_LDO_VCN18_CON3 0x1c6a
+#define MT6358_LDO_VCAMA1_CON0 0x1c6c
+#define MT6358_LDO_VCAMA1_CON1 0x1c7a
+#define MT6358_LDO_VCAMA1_CON2 0x1c7c
+#define MT6358_LDO_VCAMA1_CON3 0x1c7e
+#define MT6358_LDO_VCAMA2_CON0 0x1c88
+#define MT6358_LDO_VCAMA2_CON1 0x1c96
+#define MT6358_LDO_VCAMA2_CON2 0x1c98
+#define MT6358_LDO_VCAMA2_CON3 0x1c9a
+#define MT6358_LDO_VCAMD_CON0 0x1c9c
+#define MT6358_LDO_VCAMD_CON1 0x1caa
+#define MT6358_LDO_VCAMD_CON2 0x1cac
+#define MT6358_LDO_VCAMD_CON3 0x1cae
+#define MT6358_LDO_VCAMIO_CON0 0x1cb0
+#define MT6358_LDO_VCAMIO_CON1 0x1cbe
+#define MT6358_LDO_VCAMIO_CON2 0x1cc0
+#define MT6358_LDO_VCAMIO_CON3 0x1cc2
+#define MT6358_LDO_VMC_CON0 0x1cc4
+#define MT6358_LDO_VMC_CON1 0x1cd2
+#define MT6358_LDO_VMC_CON2 0x1cd4
+#define MT6358_LDO_VMC_CON3 0x1cd6
+#define MT6358_LDO_VMCH_CON0 0x1cd8
+#define MT6358_LDO_VMCH_CON1 0x1ce6
+#define MT6358_LDO_VMCH_CON2 0x1ce8
+#define MT6358_LDO_VMCH_CON3 0x1cea
+#define MT6358_LDO_VIBR_CON0 0x1d08
+#define MT6358_LDO_VIBR_CON1 0x1d16
+#define MT6358_LDO_VIBR_CON2 0x1d18
+#define MT6358_LDO_VIBR_CON3 0x1d1a
+#define MT6358_LDO_VCN33_CON0_0 0x1d1c
+#define MT6358_LDO_VCN33_CON0_1 0x1d2a
+#define MT6358_LDO_VCN33_CON1 0x1d2c
+#define MT6358_LDO_VCN33_BT_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_WIFI_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_CON2 0x1d2e
+#define MT6358_LDO_VCN33_CON3 0x1d30
+#define MT6358_LDO_VLDO28_CON0_0 0x1d32
+#define MT6358_LDO_VLDO28_CON0_1 0x1d40
+#define MT6358_LDO_VLDO28_CON1 0x1d42
+#define MT6358_LDO_VLDO28_CON2 0x1d44
+#define MT6358_LDO_VLDO28_CON3 0x1d46
+#define MT6358_LDO_VSIM1_CON0 0x1d48
+#define MT6358_LDO_VSIM1_CON1 0x1d56
+#define MT6358_LDO_VSIM1_CON2 0x1d58
+#define MT6358_LDO_VSIM1_CON3 0x1d5a
+#define MT6358_LDO_VSIM2_CON0 0x1d5c
+#define MT6358_LDO_VSIM2_CON1 0x1d6a
+#define MT6358_LDO_VSIM2_CON2 0x1d6c
+#define MT6358_LDO_VSIM2_CON3 0x1d6e
+#define MT6358_LDO_VCN28_CON0 0x1d88
+#define MT6358_LDO_VCN28_CON1 0x1d96
+#define MT6358_LDO_VCN28_CON2 0x1d98
+#define MT6358_LDO_VCN28_CON3 0x1d9a
+#define MT6358_VRTC28_CON0 0x1d9c
+#define MT6358_LDO_VBIF28_CON0 0x1d9e
+#define MT6358_LDO_VBIF28_CON1 0x1dac
+#define MT6358_LDO_VBIF28_CON2 0x1dae
+#define MT6358_LDO_VBIF28_CON3 0x1db0
+#define MT6358_VCAMA1_ANA_CON0 0x1e08
+#define MT6358_VCAMA2_ANA_CON0 0x1e0c
+#define MT6358_VCN33_ANA_CON0 0x1e28
+#define MT6358_VSIM1_ANA_CON0 0x1e2c
+#define MT6358_VSIM2_ANA_CON0 0x1e30
+#define MT6358_VUSB_ANA_CON0 0x1e34
+#define MT6358_VEMC_ANA_CON0 0x1e38
+#define MT6358_VLDO28_ANA_CON0 0x1e3c
+#define MT6358_VIO28_ANA_CON0 0x1e40
+#define MT6358_VIBR_ANA_CON0 0x1e44
+#define MT6358_VMCH_ANA_CON0 0x1e48
+#define MT6358_VMC_ANA_CON0 0x1e4c
+#define MT6358_VRF18_ANA_CON0 0x1e88
+#define MT6358_VCN18_ANA_CON0 0x1e8c
+#define MT6358_VCAMIO_ANA_CON0 0x1e90
+#define MT6358_VIO18_ANA_CON0 0x1e94
+#define MT6358_VEFUSE_ANA_CON0 0x1e98
+#define MT6358_VRF12_ANA_CON0 0x1e9c
+#define MT6358_VSRAM_PROC11_ANA_CON0 0x1ea0
+#define MT6358_VSRAM_PROC12_ANA_CON0 0x1ea4
+#define MT6358_VSRAM_OTHERS_ANA_CON0 0x1ea6
+#define MT6358_VSRAM_GPU_ANA_CON0 0x1ea8
+#define MT6358_VDRAM2_ANA_CON0 0x1eaa
+#define MT6358_VCAMD_ANA_CON0 0x1eae
+#define MT6358_VA12_ANA_CON0 0x1eb2
+#define MT6358_AUD_TOP_INT_CON0 0x2228
+#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+
+#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359/core.h b/include/linux/mfd/mt6359/core.h
new file mode 100644
index 000000000000..8d298868126d
--- /dev/null
+++ b/include/linux/mfd/mt6359/core.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_CORE_H__
+#define __MFD_MT6359_CORE_H__
+
+enum mt6359_irq_top_status_shift {
+ MT6359_BUCK_TOP = 0,
+ MT6359_LDO_TOP,
+ MT6359_PSC_TOP,
+ MT6359_SCK_TOP,
+ MT6359_BM_TOP,
+ MT6359_HK_TOP,
+ MT6359_AUD_TOP = 7,
+ MT6359_MISC_TOP,
+};
+
+enum mt6359_irq_numbers {
+ MT6359_IRQ_VCORE_OC = 1,
+ MT6359_IRQ_VGPU11_OC,
+ MT6359_IRQ_VGPU12_OC,
+ MT6359_IRQ_VMODEM_OC,
+ MT6359_IRQ_VPROC1_OC,
+ MT6359_IRQ_VPROC2_OC,
+ MT6359_IRQ_VS1_OC,
+ MT6359_IRQ_VS2_OC,
+ MT6359_IRQ_VPA_OC = 9,
+ MT6359_IRQ_VFE28_OC = 16,
+ MT6359_IRQ_VXO22_OC,
+ MT6359_IRQ_VRF18_OC,
+ MT6359_IRQ_VRF12_OC,
+ MT6359_IRQ_VEFUSE_OC,
+ MT6359_IRQ_VCN33_1_OC,
+ MT6359_IRQ_VCN33_2_OC,
+ MT6359_IRQ_VCN13_OC,
+ MT6359_IRQ_VCN18_OC,
+ MT6359_IRQ_VA09_OC,
+ MT6359_IRQ_VCAMIO_OC,
+ MT6359_IRQ_VA12_OC,
+ MT6359_IRQ_VAUX18_OC,
+ MT6359_IRQ_VAUD18_OC,
+ MT6359_IRQ_VIO18_OC,
+ MT6359_IRQ_VSRAM_PROC1_OC,
+ MT6359_IRQ_VSRAM_PROC2_OC,
+ MT6359_IRQ_VSRAM_OTHERS_OC,
+ MT6359_IRQ_VSRAM_MD_OC,
+ MT6359_IRQ_VEMC_OC,
+ MT6359_IRQ_VSIM1_OC,
+ MT6359_IRQ_VSIM2_OC,
+ MT6359_IRQ_VUSB_OC,
+ MT6359_IRQ_VRFCK_OC,
+ MT6359_IRQ_VBBCK_OC,
+ MT6359_IRQ_VBIF28_OC,
+ MT6359_IRQ_VIBR_OC,
+ MT6359_IRQ_VIO28_OC,
+ MT6359_IRQ_VM18_OC,
+ MT6359_IRQ_VUFS_OC = 45,
+ MT6359_IRQ_PWRKEY = 48,
+ MT6359_IRQ_HOMEKEY,
+ MT6359_IRQ_PWRKEY_R,
+ MT6359_IRQ_HOMEKEY_R,
+ MT6359_IRQ_NI_LBAT_INT,
+ MT6359_IRQ_CHRDET_EDGE = 53,
+ MT6359_IRQ_RTC = 64,
+ MT6359_IRQ_FG_BAT_H = 80,
+ MT6359_IRQ_FG_BAT_L,
+ MT6359_IRQ_FG_CUR_H,
+ MT6359_IRQ_FG_CUR_L,
+ MT6359_IRQ_FG_ZCV = 84,
+ MT6359_IRQ_FG_N_CHARGE_L = 87,
+ MT6359_IRQ_FG_IAVG_H,
+ MT6359_IRQ_FG_IAVG_L = 89,
+ MT6359_IRQ_FG_DISCHARGE = 91,
+ MT6359_IRQ_FG_CHARGE,
+ MT6359_IRQ_BATON_LV = 96,
+ MT6359_IRQ_BATON_BAT_IN = 98,
+ MT6359_IRQ_BATON_BAT_OU,
+ MT6359_IRQ_BIF = 100,
+ MT6359_IRQ_BAT_H = 112,
+ MT6359_IRQ_BAT_L,
+ MT6359_IRQ_BAT2_H,
+ MT6359_IRQ_BAT2_L,
+ MT6359_IRQ_BAT_TEMP_H,
+ MT6359_IRQ_BAT_TEMP_L,
+ MT6359_IRQ_THR_H,
+ MT6359_IRQ_THR_L,
+ MT6359_IRQ_AUXADC_IMP,
+ MT6359_IRQ_NAG_C_DLTV = 121,
+ MT6359_IRQ_AUDIO = 128,
+ MT6359_IRQ_ACCDET = 133,
+ MT6359_IRQ_ACCDET_EINT0,
+ MT6359_IRQ_ACCDET_EINT1,
+ MT6359_IRQ_SPI_CMD_ALERT = 144,
+ MT6359_IRQ_NR,
+};
+
+#define MT6359_IRQ_BUCK_BASE MT6359_IRQ_VCORE_OC
+#define MT6359_IRQ_LDO_BASE MT6359_IRQ_VFE28_OC
+#define MT6359_IRQ_PSC_BASE MT6359_IRQ_PWRKEY
+#define MT6359_IRQ_SCK_BASE MT6359_IRQ_RTC
+#define MT6359_IRQ_BM_BASE MT6359_IRQ_FG_BAT_H
+#define MT6359_IRQ_HK_BASE MT6359_IRQ_BAT_H
+#define MT6359_IRQ_AUD_BASE MT6359_IRQ_AUDIO
+#define MT6359_IRQ_MISC_BASE MT6359_IRQ_SPI_CMD_ALERT
+
+#define MT6359_IRQ_BUCK_BITS (MT6359_IRQ_VPA_OC - MT6359_IRQ_BUCK_BASE + 1)
+#define MT6359_IRQ_LDO_BITS (MT6359_IRQ_VUFS_OC - MT6359_IRQ_LDO_BASE + 1)
+#define MT6359_IRQ_PSC_BITS \
+ (MT6359_IRQ_CHRDET_EDGE - MT6359_IRQ_PSC_BASE + 1)
+#define MT6359_IRQ_SCK_BITS (MT6359_IRQ_RTC - MT6359_IRQ_SCK_BASE + 1)
+#define MT6359_IRQ_BM_BITS (MT6359_IRQ_BIF - MT6359_IRQ_BM_BASE + 1)
+#define MT6359_IRQ_HK_BITS (MT6359_IRQ_NAG_C_DLTV - MT6359_IRQ_HK_BASE + 1)
+#define MT6359_IRQ_AUD_BITS \
+ (MT6359_IRQ_ACCDET_EINT1 - MT6359_IRQ_AUD_BASE + 1)
+#define MT6359_IRQ_MISC_BITS \
+ (MT6359_IRQ_SPI_CMD_ALERT - MT6359_IRQ_MISC_BASE + 1)
+
+#define MT6359_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6359_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6359_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6359_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6359_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6359_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6359_CORE_H__ */
diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h
new file mode 100644
index 000000000000..2a4394a27b1c
--- /dev/null
+++ b/include/linux/mfd/mt6359/registers.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_REGISTERS_H__
+#define __MFD_MT6359_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6359_SWCID 0xa
+#define MT6359_TOPSTATUS 0x2a
+#define MT6359_TOP_RST_MISC 0x14c
+#define MT6359_MISC_TOP_INT_CON0 0x188
+#define MT6359_MISC_TOP_INT_STATUS0 0x194
+#define MT6359_TOP_INT_STATUS0 0x19e
+#define MT6359_SCK_TOP_INT_CON0 0x528
+#define MT6359_SCK_TOP_INT_STATUS0 0x534
+#define MT6359_EOSC_CALI_CON0 0x53a
+#define MT6359_EOSC_CALI_CON1 0x53c
+#define MT6359_RTC_MIX_CON0 0x53e
+#define MT6359_RTC_MIX_CON1 0x540
+#define MT6359_RTC_MIX_CON2 0x542
+#define MT6359_RTC_DSN_ID 0x580
+#define MT6359_RTC_DSN_REV0 0x582
+#define MT6359_RTC_DBI 0x584
+#define MT6359_RTC_DXI 0x586
+#define MT6359_RTC_BBPU 0x588
+#define MT6359_RTC_IRQ_STA 0x58a
+#define MT6359_RTC_IRQ_EN 0x58c
+#define MT6359_RTC_CII_EN 0x58e
+#define MT6359_RTC_AL_MASK 0x590
+#define MT6359_RTC_TC_SEC 0x592
+#define MT6359_RTC_TC_MIN 0x594
+#define MT6359_RTC_TC_HOU 0x596
+#define MT6359_RTC_TC_DOM 0x598
+#define MT6359_RTC_TC_DOW 0x59a
+#define MT6359_RTC_TC_MTH 0x59c
+#define MT6359_RTC_TC_YEA 0x59e
+#define MT6359_RTC_AL_SEC 0x5a0
+#define MT6359_RTC_AL_MIN 0x5a2
+#define MT6359_RTC_AL_HOU 0x5a4
+#define MT6359_RTC_AL_DOM 0x5a6
+#define MT6359_RTC_AL_DOW 0x5a8
+#define MT6359_RTC_AL_MTH 0x5aa
+#define MT6359_RTC_AL_YEA 0x5ac
+#define MT6359_RTC_OSC32CON 0x5ae
+#define MT6359_RTC_POWERKEY1 0x5b0
+#define MT6359_RTC_POWERKEY2 0x5b2
+#define MT6359_RTC_PDN1 0x5b4
+#define MT6359_RTC_PDN2 0x5b6
+#define MT6359_RTC_SPAR0 0x5b8
+#define MT6359_RTC_SPAR1 0x5ba
+#define MT6359_RTC_PROT 0x5bc
+#define MT6359_RTC_DIFF 0x5be
+#define MT6359_RTC_CALI 0x5c0
+#define MT6359_RTC_WRTGR 0x5c2
+#define MT6359_RTC_CON 0x5c4
+#define MT6359_RTC_SEC_CTRL 0x5c6
+#define MT6359_RTC_INT_CNT 0x5c8
+#define MT6359_RTC_SEC_DAT0 0x5ca
+#define MT6359_RTC_SEC_DAT1 0x5cc
+#define MT6359_RTC_SEC_DAT2 0x5ce
+#define MT6359_RTC_SEC_DSN_ID 0x600
+#define MT6359_RTC_SEC_DSN_REV0 0x602
+#define MT6359_RTC_SEC_DBI 0x604
+#define MT6359_RTC_SEC_DXI 0x606
+#define MT6359_RTC_TC_SEC_SEC 0x608
+#define MT6359_RTC_TC_MIN_SEC 0x60a
+#define MT6359_RTC_TC_HOU_SEC 0x60c
+#define MT6359_RTC_TC_DOM_SEC 0x60e
+#define MT6359_RTC_TC_DOW_SEC 0x610
+#define MT6359_RTC_TC_MTH_SEC 0x612
+#define MT6359_RTC_TC_YEA_SEC 0x614
+#define MT6359_RTC_SEC_CK_PDN 0x616
+#define MT6359_RTC_SEC_WRTGR 0x618
+#define MT6359_PSC_TOP_INT_CON0 0x910
+#define MT6359_PSC_TOP_INT_STATUS0 0x91c
+#define MT6359_BM_TOP_INT_CON0 0xc32
+#define MT6359_BM_TOP_INT_CON1 0xc38
+#define MT6359_BM_TOP_INT_STATUS0 0xc4a
+#define MT6359_BM_TOP_INT_STATUS1 0xc4c
+#define MT6359_HK_TOP_INT_CON0 0xf92
+#define MT6359_HK_TOP_INT_STATUS0 0xf9e
+#define MT6359_BUCK_TOP_INT_CON0 0x1418
+#define MT6359_BUCK_TOP_INT_STATUS0 0x1424
+#define MT6359_BUCK_VPU_CON0 0x1488
+#define MT6359_BUCK_VPU_DBG0 0x14a6
+#define MT6359_BUCK_VPU_DBG1 0x14a8
+#define MT6359_BUCK_VPU_ELR0 0x14ac
+#define MT6359_BUCK_VCORE_CON0 0x1508
+#define MT6359_BUCK_VCORE_DBG0 0x1526
+#define MT6359_BUCK_VCORE_DBG1 0x1528
+#define MT6359_BUCK_VCORE_SSHUB_CON0 0x152a
+#define MT6359_BUCK_VCORE_ELR0 0x1534
+#define MT6359_BUCK_VGPU11_CON0 0x1588
+#define MT6359_BUCK_VGPU11_DBG0 0x15a6
+#define MT6359_BUCK_VGPU11_DBG1 0x15a8
+#define MT6359_BUCK_VGPU11_ELR0 0x15ac
+#define MT6359_BUCK_VMODEM_CON0 0x1688
+#define MT6359_BUCK_VMODEM_DBG0 0x16a6
+#define MT6359_BUCK_VMODEM_DBG1 0x16a8
+#define MT6359_BUCK_VMODEM_ELR0 0x16ae
+#define MT6359_BUCK_VPROC1_CON0 0x1708
+#define MT6359_BUCK_VPROC1_DBG0 0x1726
+#define MT6359_BUCK_VPROC1_DBG1 0x1728
+#define MT6359_BUCK_VPROC1_ELR0 0x172e
+#define MT6359_BUCK_VPROC2_CON0 0x1788
+#define MT6359_BUCK_VPROC2_DBG0 0x17a6
+#define MT6359_BUCK_VPROC2_DBG1 0x17a8
+#define MT6359_BUCK_VPROC2_ELR0 0x17b2
+#define MT6359_BUCK_VS1_CON0 0x1808
+#define MT6359_BUCK_VS1_DBG0 0x1826
+#define MT6359_BUCK_VS1_DBG1 0x1828
+#define MT6359_BUCK_VS1_ELR0 0x1834
+#define MT6359_BUCK_VS2_CON0 0x1888
+#define MT6359_BUCK_VS2_DBG0 0x18a6
+#define MT6359_BUCK_VS2_DBG1 0x18a8
+#define MT6359_BUCK_VS2_ELR0 0x18b4
+#define MT6359_BUCK_VPA_CON0 0x1908
+#define MT6359_BUCK_VPA_CON1 0x190e
+#define MT6359_BUCK_VPA_CFG0 0x1910
+#define MT6359_BUCK_VPA_CFG1 0x1912
+#define MT6359_BUCK_VPA_DBG0 0x1914
+#define MT6359_BUCK_VPA_DBG1 0x1916
+#define MT6359_VGPUVCORE_ANA_CON2 0x198e
+#define MT6359_VGPUVCORE_ANA_CON13 0x19a4
+#define MT6359_VPROC1_ANA_CON3 0x19b2
+#define MT6359_VPROC2_ANA_CON3 0x1a0e
+#define MT6359_VMODEM_ANA_CON3 0x1a1a
+#define MT6359_VPU_ANA_CON3 0x1a26
+#define MT6359_VS1_ANA_CON0 0x1a2c
+#define MT6359_VS2_ANA_CON0 0x1a34
+#define MT6359_VPA_ANA_CON0 0x1a3c
+#define MT6359_LDO_TOP_INT_CON0 0x1b14
+#define MT6359_LDO_TOP_INT_CON1 0x1b1a
+#define MT6359_LDO_TOP_INT_STATUS0 0x1b28
+#define MT6359_LDO_TOP_INT_STATUS1 0x1b2a
+#define MT6359_LDO_VSRAM_PROC1_ELR 0x1b40
+#define MT6359_LDO_VSRAM_PROC2_ELR 0x1b42
+#define MT6359_LDO_VSRAM_OTHERS_ELR 0x1b44
+#define MT6359_LDO_VSRAM_MD_ELR 0x1b46
+#define MT6359_LDO_VFE28_CON0 0x1b88
+#define MT6359_LDO_VFE28_MON 0x1b8a
+#define MT6359_LDO_VXO22_CON0 0x1b98
+#define MT6359_LDO_VXO22_MON 0x1b9a
+#define MT6359_LDO_VRF18_CON0 0x1ba8
+#define MT6359_LDO_VRF18_MON 0x1baa
+#define MT6359_LDO_VRF12_CON0 0x1bb8
+#define MT6359_LDO_VRF12_MON 0x1bba
+#define MT6359_LDO_VEFUSE_CON0 0x1bc8
+#define MT6359_LDO_VEFUSE_MON 0x1bca
+#define MT6359_LDO_VCN33_1_CON0 0x1bd8
+#define MT6359_LDO_VCN33_1_MON 0x1bda
+#define MT6359_LDO_VCN33_1_MULTI_SW 0x1be8
+#define MT6359_LDO_VCN33_2_CON0 0x1c08
+#define MT6359_LDO_VCN33_2_MON 0x1c0a
+#define MT6359_LDO_VCN33_2_MULTI_SW 0x1c18
+#define MT6359_LDO_VCN13_CON0 0x1c1a
+#define MT6359_LDO_VCN13_MON 0x1c1c
+#define MT6359_LDO_VCN18_CON0 0x1c2a
+#define MT6359_LDO_VCN18_MON 0x1c2c
+#define MT6359_LDO_VA09_CON0 0x1c3a
+#define MT6359_LDO_VA09_MON 0x1c3c
+#define MT6359_LDO_VCAMIO_CON0 0x1c4a
+#define MT6359_LDO_VCAMIO_MON 0x1c4c
+#define MT6359_LDO_VA12_CON0 0x1c5a
+#define MT6359_LDO_VA12_MON 0x1c5c
+#define MT6359_LDO_VAUX18_CON0 0x1c88
+#define MT6359_LDO_VAUX18_MON 0x1c8a
+#define MT6359_LDO_VAUD18_CON0 0x1c98
+#define MT6359_LDO_VAUD18_MON 0x1c9a
+#define MT6359_LDO_VIO18_CON0 0x1ca8
+#define MT6359_LDO_VIO18_MON 0x1caa
+#define MT6359_LDO_VEMC_CON0 0x1cb8
+#define MT6359_LDO_VEMC_MON 0x1cba
+#define MT6359_LDO_VSIM1_CON0 0x1cc8
+#define MT6359_LDO_VSIM1_MON 0x1cca
+#define MT6359_LDO_VSIM2_CON0 0x1cd8
+#define MT6359_LDO_VSIM2_MON 0x1cda
+#define MT6359_LDO_VUSB_CON0 0x1d08
+#define MT6359_LDO_VUSB_MON 0x1d0a
+#define MT6359_LDO_VUSB_MULTI_SW 0x1d18
+#define MT6359_LDO_VRFCK_CON0 0x1d1a
+#define MT6359_LDO_VRFCK_MON 0x1d1c
+#define MT6359_LDO_VBBCK_CON0 0x1d2a
+#define MT6359_LDO_VBBCK_MON 0x1d2c
+#define MT6359_LDO_VBIF28_CON0 0x1d3a
+#define MT6359_LDO_VBIF28_MON 0x1d3c
+#define MT6359_LDO_VIBR_CON0 0x1d4a
+#define MT6359_LDO_VIBR_MON 0x1d4c
+#define MT6359_LDO_VIO28_CON0 0x1d5a
+#define MT6359_LDO_VIO28_MON 0x1d5c
+#define MT6359_LDO_VM18_CON0 0x1d88
+#define MT6359_LDO_VM18_MON 0x1d8a
+#define MT6359_LDO_VUFS_CON0 0x1d98
+#define MT6359_LDO_VUFS_MON 0x1d9a
+#define MT6359_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359_LDO_VSRAM_PROC1_MON 0x1e8a
+#define MT6359_LDO_VSRAM_PROC1_VOSEL1 0x1e8e
+#define MT6359_LDO_VSRAM_PROC2_CON0 0x1ea6
+#define MT6359_LDO_VSRAM_PROC2_MON 0x1ea8
+#define MT6359_LDO_VSRAM_PROC2_VOSEL1 0x1eac
+#define MT6359_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359_LDO_VSRAM_OTHERS_MON 0x1f0a
+#define MT6359_LDO_VSRAM_OTHERS_VOSEL1 0x1f0e
+#define MT6359_LDO_VSRAM_OTHERS_SSHUB 0x1f26
+#define MT6359_LDO_VSRAM_MD_CON0 0x1f2c
+#define MT6359_LDO_VSRAM_MD_MON 0x1f2e
+#define MT6359_LDO_VSRAM_MD_VOSEL1 0x1f32
+#define MT6359_VFE28_ANA_CON0 0x1f88
+#define MT6359_VAUX18_ANA_CON0 0x1f8c
+#define MT6359_VUSB_ANA_CON0 0x1f90
+#define MT6359_VBIF28_ANA_CON0 0x1f94
+#define MT6359_VCN33_1_ANA_CON0 0x1f98
+#define MT6359_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359_VEMC_ANA_CON0 0x1fa0
+#define MT6359_VSIM1_ANA_CON0 0x1fa4
+#define MT6359_VSIM2_ANA_CON0 0x1fa8
+#define MT6359_VIO28_ANA_CON0 0x1fac
+#define MT6359_VIBR_ANA_CON0 0x1fb0
+#define MT6359_VRF18_ANA_CON0 0x2008
+#define MT6359_VEFUSE_ANA_CON0 0x200c
+#define MT6359_VCN18_ANA_CON0 0x2010
+#define MT6359_VCAMIO_ANA_CON0 0x2014
+#define MT6359_VAUD18_ANA_CON0 0x2018
+#define MT6359_VIO18_ANA_CON0 0x201c
+#define MT6359_VM18_ANA_CON0 0x2020
+#define MT6359_VUFS_ANA_CON0 0x2024
+#define MT6359_VRF12_ANA_CON0 0x202a
+#define MT6359_VCN13_ANA_CON0 0x202e
+#define MT6359_VA09_ANA_CON0 0x2032
+#define MT6359_VA12_ANA_CON0 0x2036
+#define MT6359_VXO22_ANA_CON0 0x2088
+#define MT6359_VRFCK_ANA_CON0 0x208c
+#define MT6359_VBBCK_ANA_CON0 0x2094
+#define MT6359_AUD_TOP_INT_CON0 0x2328
+#define MT6359_AUD_TOP_INT_STATUS0 0x2334
+
+#define MT6359_RG_BUCK_VPU_EN_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_SHIFT 1
+#define MT6359_DA_VPU_VOSEL_ADDR MT6359_BUCK_VPU_DBG0
+#define MT6359_DA_VPU_VOSEL_MASK 0x7F
+#define MT6359_DA_VPU_VOSEL_SHIFT 0
+#define MT6359_DA_VPU_EN_ADDR MT6359_BUCK_VPU_DBG1
+#define MT6359_RG_BUCK_VPU_VOSEL_ADDR MT6359_BUCK_VPU_ELR0
+#define MT6359_RG_BUCK_VPU_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPU_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VCORE_EN_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_SHIFT 1
+#define MT6359_DA_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_DBG0
+#define MT6359_DA_VCORE_VOSEL_MASK 0x7F
+#define MT6359_DA_VCORE_VOSEL_SHIFT 0
+#define MT6359_DA_VCORE_EN_ADDR MT6359_BUCK_VCORE_DBG1
+#define MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT 4
+#define MT6359_RG_BUCK_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_ELR0
+#define MT6359_RG_BUCK_VCORE_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_SHIFT 1
+#define MT6359_DA_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_DBG0
+#define MT6359_DA_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_DA_VGPU11_VOSEL_SHIFT 0
+#define MT6359_DA_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_DBG1
+#define MT6359_RG_BUCK_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_ELR0
+#define MT6359_RG_BUCK_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_SHIFT 1
+#define MT6359_DA_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_DBG0
+#define MT6359_DA_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_DA_VMODEM_VOSEL_SHIFT 0
+#define MT6359_DA_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_DBG1
+#define MT6359_RG_BUCK_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_ELR0
+#define MT6359_RG_BUCK_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_SHIFT 1
+#define MT6359_DA_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_DBG0
+#define MT6359_DA_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC1_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_DBG1
+#define MT6359_RG_BUCK_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_ELR0
+#define MT6359_RG_BUCK_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_SHIFT 1
+#define MT6359_DA_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_DBG0
+#define MT6359_DA_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC2_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_DBG1
+#define MT6359_RG_BUCK_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_ELR0
+#define MT6359_RG_BUCK_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS1_EN_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_SHIFT 1
+#define MT6359_DA_VS1_VOSEL_ADDR MT6359_BUCK_VS1_DBG0
+#define MT6359_DA_VS1_VOSEL_MASK 0x7F
+#define MT6359_DA_VS1_VOSEL_SHIFT 0
+#define MT6359_DA_VS1_EN_ADDR MT6359_BUCK_VS1_DBG1
+#define MT6359_RG_BUCK_VS1_VOSEL_ADDR MT6359_BUCK_VS1_ELR0
+#define MT6359_RG_BUCK_VS1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS2_EN_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_SHIFT 1
+#define MT6359_DA_VS2_VOSEL_ADDR MT6359_BUCK_VS2_DBG0
+#define MT6359_DA_VS2_VOSEL_MASK 0x7F
+#define MT6359_DA_VS2_VOSEL_SHIFT 0
+#define MT6359_DA_VS2_EN_ADDR MT6359_BUCK_VS2_DBG1
+#define MT6359_RG_BUCK_VS2_VOSEL_ADDR MT6359_BUCK_VS2_ELR0
+#define MT6359_RG_BUCK_VS2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPA_EN_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_SHIFT 1
+#define MT6359_RG_BUCK_VPA_VOSEL_ADDR MT6359_BUCK_VPA_CON1
+#define MT6359_RG_BUCK_VPA_VOSEL_MASK 0x3F
+#define MT6359_RG_BUCK_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_VOSEL_ADDR MT6359_BUCK_VPA_DBG0
+#define MT6359_DA_VPA_VOSEL_MASK 0x3F
+#define MT6359_DA_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_EN_ADDR MT6359_BUCK_VPA_DBG1
+#define MT6359_RG_VGPU11_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON2
+#define MT6359_RG_VGPU11_FCCM_SHIFT 9
+#define MT6359_RG_VCORE_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON13
+#define MT6359_RG_VCORE_FCCM_SHIFT 5
+#define MT6359_RG_VPROC1_FCCM_ADDR MT6359_VPROC1_ANA_CON3
+#define MT6359_RG_VPROC1_FCCM_SHIFT 1
+#define MT6359_RG_VPROC2_FCCM_ADDR MT6359_VPROC2_ANA_CON3
+#define MT6359_RG_VPROC2_FCCM_SHIFT 1
+#define MT6359_RG_VMODEM_FCCM_ADDR MT6359_VMODEM_ANA_CON3
+#define MT6359_RG_VMODEM_FCCM_SHIFT 1
+#define MT6359_RG_VPU_FCCM_ADDR MT6359_VPU_ANA_CON3
+#define MT6359_RG_VPU_FCCM_SHIFT 1
+#define MT6359_RG_VS1_FPWM_ADDR MT6359_VS1_ANA_CON0
+#define MT6359_RG_VS1_FPWM_SHIFT 3
+#define MT6359_RG_VS2_FPWM_ADDR MT6359_VS2_ANA_CON0
+#define MT6359_RG_VS2_FPWM_SHIFT 3
+#define MT6359_RG_VPA_MODESET_ADDR MT6359_VPA_ANA_CON0
+#define MT6359_RG_VPA_MODESET_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_ELR
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_ELR
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_ELR
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_ELR
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VFE28_EN_ADDR MT6359_LDO_VFE28_CON0
+#define MT6359_DA_VFE28_B_EN_ADDR MT6359_LDO_VFE28_MON
+#define MT6359_RG_LDO_VXO22_EN_ADDR MT6359_LDO_VXO22_CON0
+#define MT6359_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359_DA_VXO22_B_EN_ADDR MT6359_LDO_VXO22_MON
+#define MT6359_RG_LDO_VRF18_EN_ADDR MT6359_LDO_VRF18_CON0
+#define MT6359_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359_DA_VRF18_B_EN_ADDR MT6359_LDO_VRF18_MON
+#define MT6359_RG_LDO_VRF12_EN_ADDR MT6359_LDO_VRF12_CON0
+#define MT6359_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359_DA_VRF12_B_EN_ADDR MT6359_LDO_VRF12_MON
+#define MT6359_RG_LDO_VEFUSE_EN_ADDR MT6359_LDO_VEFUSE_CON0
+#define MT6359_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359_DA_VEFUSE_B_EN_ADDR MT6359_LDO_VEFUSE_MON
+#define MT6359_RG_LDO_VCN33_1_EN_0_ADDR MT6359_LDO_VCN33_1_CON0
+#define MT6359_RG_LDO_VCN33_1_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VCN33_1_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_1_B_EN_ADDR MT6359_LDO_VCN33_1_MON
+#define MT6359_RG_LDO_VCN33_1_EN_1_ADDR MT6359_LDO_VCN33_1_MULTI_SW
+#define MT6359_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN33_2_EN_0_ADDR MT6359_LDO_VCN33_2_CON0
+#define MT6359_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_2_B_EN_ADDR MT6359_LDO_VCN33_2_MON
+#define MT6359_RG_LDO_VCN33_2_EN_1_ADDR MT6359_LDO_VCN33_2_MULTI_SW
+#define MT6359_RG_LDO_VCN33_2_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VCN33_2_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN13_EN_ADDR MT6359_LDO_VCN13_CON0
+#define MT6359_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359_DA_VCN13_B_EN_ADDR MT6359_LDO_VCN13_MON
+#define MT6359_RG_LDO_VCN18_EN_ADDR MT6359_LDO_VCN18_CON0
+#define MT6359_DA_VCN18_B_EN_ADDR MT6359_LDO_VCN18_MON
+#define MT6359_RG_LDO_VA09_EN_ADDR MT6359_LDO_VA09_CON0
+#define MT6359_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359_DA_VA09_B_EN_ADDR MT6359_LDO_VA09_MON
+#define MT6359_RG_LDO_VCAMIO_EN_ADDR MT6359_LDO_VCAMIO_CON0
+#define MT6359_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359_DA_VCAMIO_B_EN_ADDR MT6359_LDO_VCAMIO_MON
+#define MT6359_RG_LDO_VA12_EN_ADDR MT6359_LDO_VA12_CON0
+#define MT6359_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359_DA_VA12_B_EN_ADDR MT6359_LDO_VA12_MON
+#define MT6359_RG_LDO_VAUX18_EN_ADDR MT6359_LDO_VAUX18_CON0
+#define MT6359_DA_VAUX18_B_EN_ADDR MT6359_LDO_VAUX18_MON
+#define MT6359_RG_LDO_VAUD18_EN_ADDR MT6359_LDO_VAUD18_CON0
+#define MT6359_DA_VAUD18_B_EN_ADDR MT6359_LDO_VAUD18_MON
+#define MT6359_RG_LDO_VIO18_EN_ADDR MT6359_LDO_VIO18_CON0
+#define MT6359_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359_DA_VIO18_B_EN_ADDR MT6359_LDO_VIO18_MON
+#define MT6359_RG_LDO_VEMC_EN_ADDR MT6359_LDO_VEMC_CON0
+#define MT6359_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359_DA_VEMC_B_EN_ADDR MT6359_LDO_VEMC_MON
+#define MT6359_RG_LDO_VSIM1_EN_ADDR MT6359_LDO_VSIM1_CON0
+#define MT6359_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359_DA_VSIM1_B_EN_ADDR MT6359_LDO_VSIM1_MON
+#define MT6359_RG_LDO_VSIM2_EN_ADDR MT6359_LDO_VSIM2_CON0
+#define MT6359_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359_DA_VSIM2_B_EN_ADDR MT6359_LDO_VSIM2_MON
+#define MT6359_RG_LDO_VUSB_EN_0_ADDR MT6359_LDO_VUSB_CON0
+#define MT6359_RG_LDO_VUSB_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_0_SHIFT 0
+#define MT6359_DA_VUSB_B_EN_ADDR MT6359_LDO_VUSB_MON
+#define MT6359_RG_LDO_VUSB_EN_1_ADDR MT6359_LDO_VUSB_MULTI_SW
+#define MT6359_RG_LDO_VUSB_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VRFCK_EN_ADDR MT6359_LDO_VRFCK_CON0
+#define MT6359_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359_DA_VRFCK_B_EN_ADDR MT6359_LDO_VRFCK_MON
+#define MT6359_RG_LDO_VBBCK_EN_ADDR MT6359_LDO_VBBCK_CON0
+#define MT6359_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359_DA_VBBCK_B_EN_ADDR MT6359_LDO_VBBCK_MON
+#define MT6359_RG_LDO_VBIF28_EN_ADDR MT6359_LDO_VBIF28_CON0
+#define MT6359_DA_VBIF28_B_EN_ADDR MT6359_LDO_VBIF28_MON
+#define MT6359_RG_LDO_VIBR_EN_ADDR MT6359_LDO_VIBR_CON0
+#define MT6359_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359_DA_VIBR_B_EN_ADDR MT6359_LDO_VIBR_MON
+#define MT6359_RG_LDO_VIO28_EN_ADDR MT6359_LDO_VIO28_CON0
+#define MT6359_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359_DA_VIO28_B_EN_ADDR MT6359_LDO_VIO28_MON
+#define MT6359_RG_LDO_VM18_EN_ADDR MT6359_LDO_VM18_CON0
+#define MT6359_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359_DA_VM18_B_EN_ADDR MT6359_LDO_VM18_MON
+#define MT6359_RG_LDO_VUFS_EN_ADDR MT6359_LDO_VUFS_CON0
+#define MT6359_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359_DA_VUFS_B_EN_ADDR MT6359_LDO_VUFS_MON
+#define MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359_LDO_VSRAM_PROC1_CON0
+#define MT6359_DA_VSRAM_PROC1_B_EN_ADDR MT6359_LDO_VSRAM_PROC1_MON
+#define MT6359_DA_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359_DA_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC1_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359_LDO_VSRAM_PROC2_CON0
+#define MT6359_DA_VSRAM_PROC2_B_EN_ADDR MT6359_LDO_VSRAM_PROC2_MON
+#define MT6359_DA_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359_DA_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC2_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359_LDO_VSRAM_OTHERS_CON0
+#define MT6359_DA_VSRAM_OTHERS_B_EN_ADDR MT6359_LDO_VSRAM_OTHERS_MON
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_MD_EN_ADDR MT6359_LDO_VSRAM_MD_CON0
+#define MT6359_DA_VSRAM_MD_B_EN_ADDR MT6359_LDO_VSRAM_MD_MON
+#define MT6359_DA_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_VOSEL1
+#define MT6359_DA_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_MD_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_1_VOSEL_ADDR MT6359_VCN33_1_ANA_CON0
+#define MT6359_RG_VCN33_1_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_1_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_2_VOSEL_ADDR MT6359_VCN33_2_ANA_CON0
+#define MT6359_RG_VCN33_2_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_2_VOSEL_SHIFT 8
+#define MT6359_RG_VEMC_VOSEL_ADDR MT6359_VEMC_ANA_CON0
+#define MT6359_RG_VEMC_VOSEL_MASK 0xF
+#define MT6359_RG_VEMC_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM1_VOSEL_ADDR MT6359_VSIM1_ANA_CON0
+#define MT6359_RG_VSIM1_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM1_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM2_VOSEL_ADDR MT6359_VSIM2_ANA_CON0
+#define MT6359_RG_VSIM2_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM2_VOSEL_SHIFT 8
+#define MT6359_RG_VIO28_VOSEL_ADDR MT6359_VIO28_ANA_CON0
+#define MT6359_RG_VIO28_VOSEL_MASK 0xF
+#define MT6359_RG_VIO28_VOSEL_SHIFT 8
+#define MT6359_RG_VIBR_VOSEL_ADDR MT6359_VIBR_ANA_CON0
+#define MT6359_RG_VIBR_VOSEL_MASK 0xF
+#define MT6359_RG_VIBR_VOSEL_SHIFT 8
+#define MT6359_RG_VRF18_VOSEL_ADDR MT6359_VRF18_ANA_CON0
+#define MT6359_RG_VRF18_VOSEL_MASK 0xF
+#define MT6359_RG_VRF18_VOSEL_SHIFT 8
+#define MT6359_RG_VEFUSE_VOSEL_ADDR MT6359_VEFUSE_ANA_CON0
+#define MT6359_RG_VEFUSE_VOSEL_MASK 0xF
+#define MT6359_RG_VEFUSE_VOSEL_SHIFT 8
+#define MT6359_RG_VCAMIO_VOSEL_ADDR MT6359_VCAMIO_ANA_CON0
+#define MT6359_RG_VCAMIO_VOSEL_MASK 0xF
+#define MT6359_RG_VCAMIO_VOSEL_SHIFT 8
+#define MT6359_RG_VIO18_VOSEL_ADDR MT6359_VIO18_ANA_CON0
+#define MT6359_RG_VIO18_VOSEL_MASK 0xF
+#define MT6359_RG_VIO18_VOSEL_SHIFT 8
+#define MT6359_RG_VM18_VOSEL_ADDR MT6359_VM18_ANA_CON0
+#define MT6359_RG_VM18_VOSEL_MASK 0xF
+#define MT6359_RG_VM18_VOSEL_SHIFT 8
+#define MT6359_RG_VUFS_VOSEL_ADDR MT6359_VUFS_ANA_CON0
+#define MT6359_RG_VUFS_VOSEL_MASK 0xF
+#define MT6359_RG_VUFS_VOSEL_SHIFT 8
+#define MT6359_RG_VRF12_VOSEL_ADDR MT6359_VRF12_ANA_CON0
+#define MT6359_RG_VRF12_VOSEL_MASK 0xF
+#define MT6359_RG_VRF12_VOSEL_SHIFT 8
+#define MT6359_RG_VCN13_VOSEL_ADDR MT6359_VCN13_ANA_CON0
+#define MT6359_RG_VCN13_VOSEL_MASK 0xF
+#define MT6359_RG_VCN13_VOSEL_SHIFT 8
+#define MT6359_RG_VA09_VOSEL_ADDR MT6359_VA09_ANA_CON0
+#define MT6359_RG_VA09_VOSEL_MASK 0xF
+#define MT6359_RG_VA09_VOSEL_SHIFT 8
+#define MT6359_RG_VA12_VOSEL_ADDR MT6359_VA12_ANA_CON0
+#define MT6359_RG_VA12_VOSEL_MASK 0xF
+#define MT6359_RG_VA12_VOSEL_SHIFT 8
+#define MT6359_RG_VXO22_VOSEL_ADDR MT6359_VXO22_ANA_CON0
+#define MT6359_RG_VXO22_VOSEL_MASK 0xF
+#define MT6359_RG_VXO22_VOSEL_SHIFT 8
+#define MT6359_RG_VRFCK_VOSEL_ADDR MT6359_VRFCK_ANA_CON0
+#define MT6359_RG_VRFCK_VOSEL_MASK 0xF
+#define MT6359_RG_VRFCK_VOSEL_SHIFT 8
+#define MT6359_RG_VBBCK_VOSEL_ADDR MT6359_VBBCK_ANA_CON0
+#define MT6359_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359_RG_VBBCK_VOSEL_SHIFT 8
+
+#endif /* __MFD_MT6359_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359p/registers.h b/include/linux/mfd/mt6359p/registers.h
new file mode 100644
index 000000000000..3d97c1885171
--- /dev/null
+++ b/include/linux/mfd/mt6359p/registers.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359P_REGISTERS_H__
+#define __MFD_MT6359P_REGISTERS_H__
+
+#define MT6359P_CHIP_VER 0x5930
+
+/* PMIC Registers */
+#define MT6359P_HWCID 0x8
+#define MT6359P_TOP_TRAP 0x50
+#define MT6359P_TOP_TMA_KEY 0x3a8
+#define MT6359P_BUCK_VCORE_ELR_NUM 0x152a
+#define MT6359P_BUCK_VCORE_ELR0 0x152c
+#define MT6359P_BUCK_VGPU11_SSHUB_CON0 0x15aa
+#define MT6359P_BUCK_VGPU11_ELR0 0x15b4
+#define MT6359P_LDO_VSRAM_PROC1_ELR 0x1b44
+#define MT6359P_LDO_VSRAM_PROC2_ELR 0x1b46
+#define MT6359P_LDO_VSRAM_OTHERS_ELR 0x1b48
+#define MT6359P_LDO_VSRAM_MD_ELR 0x1b4a
+#define MT6359P_LDO_VEMC_ELR_0 0x1b4c
+#define MT6359P_LDO_VFE28_CON0 0x1b88
+#define MT6359P_LDO_VFE28_MON 0x1b8c
+#define MT6359P_LDO_VXO22_CON0 0x1b9a
+#define MT6359P_LDO_VXO22_MON 0x1b9e
+#define MT6359P_LDO_VRF18_CON0 0x1bac
+#define MT6359P_LDO_VRF18_MON 0x1bb0
+#define MT6359P_LDO_VRF12_CON0 0x1bbe
+#define MT6359P_LDO_VRF12_MON 0x1bc2
+#define MT6359P_LDO_VEFUSE_CON0 0x1bd0
+#define MT6359P_LDO_VEFUSE_MON 0x1bd4
+#define MT6359P_LDO_VCN33_1_CON0 0x1be2
+#define MT6359P_LDO_VCN33_1_MON 0x1be6
+#define MT6359P_LDO_VCN33_1_MULTI_SW 0x1bf4
+#define MT6359P_LDO_VCN33_2_CON0 0x1c08
+#define MT6359P_LDO_VCN33_2_MON 0x1c0c
+#define MT6359P_LDO_VCN33_2_MULTI_SW 0x1c1a
+#define MT6359P_LDO_VCN13_CON0 0x1c1c
+#define MT6359P_LDO_VCN13_MON 0x1c20
+#define MT6359P_LDO_VCN18_CON0 0x1c2e
+#define MT6359P_LDO_VCN18_MON 0x1c32
+#define MT6359P_LDO_VA09_CON0 0x1c40
+#define MT6359P_LDO_VA09_MON 0x1c44
+#define MT6359P_LDO_VCAMIO_CON0 0x1c52
+#define MT6359P_LDO_VCAMIO_MON 0x1c56
+#define MT6359P_LDO_VA12_CON0 0x1c64
+#define MT6359P_LDO_VA12_MON 0x1c68
+#define MT6359P_LDO_VAUX18_CON0 0x1c88
+#define MT6359P_LDO_VAUX18_MON 0x1c8c
+#define MT6359P_LDO_VAUD18_CON0 0x1c9a
+#define MT6359P_LDO_VAUD18_MON 0x1c9e
+#define MT6359P_LDO_VIO18_CON0 0x1cac
+#define MT6359P_LDO_VIO18_MON 0x1cb0
+#define MT6359P_LDO_VEMC_CON0 0x1cbe
+#define MT6359P_LDO_VEMC_MON 0x1cc2
+#define MT6359P_LDO_VSIM1_CON0 0x1cd0
+#define MT6359P_LDO_VSIM1_MON 0x1cd4
+#define MT6359P_LDO_VSIM2_CON0 0x1ce2
+#define MT6359P_LDO_VSIM2_MON 0x1ce6
+#define MT6359P_LDO_VUSB_CON0 0x1d08
+#define MT6359P_LDO_VUSB_MON 0x1d0c
+#define MT6359P_LDO_VUSB_MULTI_SW 0x1d1a
+#define MT6359P_LDO_VRFCK_CON0 0x1d1c
+#define MT6359P_LDO_VRFCK_MON 0x1d20
+#define MT6359P_LDO_VBBCK_CON0 0x1d2e
+#define MT6359P_LDO_VBBCK_MON 0x1d32
+#define MT6359P_LDO_VBIF28_CON0 0x1d40
+#define MT6359P_LDO_VBIF28_MON 0x1d44
+#define MT6359P_LDO_VIBR_CON0 0x1d52
+#define MT6359P_LDO_VIBR_MON 0x1d56
+#define MT6359P_LDO_VIO28_CON0 0x1d64
+#define MT6359P_LDO_VIO28_MON 0x1d68
+#define MT6359P_LDO_VM18_CON0 0x1d88
+#define MT6359P_LDO_VM18_MON 0x1d8c
+#define MT6359P_LDO_VUFS_CON0 0x1d9a
+#define MT6359P_LDO_VUFS_MON 0x1d9e
+#define MT6359P_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359P_LDO_VSRAM_PROC1_MON 0x1e8c
+#define MT6359P_LDO_VSRAM_PROC1_VOSEL1 0x1e90
+#define MT6359P_LDO_VSRAM_PROC2_CON0 0x1ea8
+#define MT6359P_LDO_VSRAM_PROC2_MON 0x1eac
+#define MT6359P_LDO_VSRAM_PROC2_VOSEL1 0x1eb0
+#define MT6359P_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359P_LDO_VSRAM_OTHERS_MON 0x1f0c
+#define MT6359P_LDO_VSRAM_OTHERS_VOSEL1 0x1f10
+#define MT6359P_LDO_VSRAM_OTHERS_SSHUB 0x1f28
+#define MT6359P_LDO_VSRAM_MD_CON0 0x1f2e
+#define MT6359P_LDO_VSRAM_MD_MON 0x1f32
+#define MT6359P_LDO_VSRAM_MD_VOSEL1 0x1f36
+#define MT6359P_VFE28_ANA_CON0 0x1f88
+#define MT6359P_VAUX18_ANA_CON0 0x1f8c
+#define MT6359P_VUSB_ANA_CON0 0x1f90
+#define MT6359P_VBIF28_ANA_CON0 0x1f94
+#define MT6359P_VCN33_1_ANA_CON0 0x1f98
+#define MT6359P_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359P_VEMC_ANA_CON0 0x1fa0
+#define MT6359P_VSIM1_ANA_CON0 0x1fa2
+#define MT6359P_VSIM2_ANA_CON0 0x1fa6
+#define MT6359P_VIO28_ANA_CON0 0x1faa
+#define MT6359P_VIBR_ANA_CON0 0x1fae
+#define MT6359P_VFE28_ELR_4 0x1fc0
+#define MT6359P_VRF18_ANA_CON0 0x2008
+#define MT6359P_VEFUSE_ANA_CON0 0x200c
+#define MT6359P_VCN18_ANA_CON0 0x2010
+#define MT6359P_VCAMIO_ANA_CON0 0x2014
+#define MT6359P_VAUD18_ANA_CON0 0x2018
+#define MT6359P_VIO18_ANA_CON0 0x201c
+#define MT6359P_VM18_ANA_CON0 0x2020
+#define MT6359P_VUFS_ANA_CON0 0x2024
+#define MT6359P_VRF12_ANA_CON0 0x202a
+#define MT6359P_VCN13_ANA_CON0 0x202e
+#define MT6359P_VA09_ANA_CON0 0x2032
+#define MT6359P_VRF18_ELR_3 0x204e
+#define MT6359P_VXO22_ANA_CON0 0x2088
+#define MT6359P_VRFCK_ANA_CON0 0x208c
+#define MT6359P_VBBCK_ANA_CON0 0x2096
+
+#define MT6359P_RG_BUCK_VCORE_VOSEL_ADDR MT6359P_BUCK_VCORE_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR MT6359P_BUCK_VGPU11_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK 0x7F
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT 4
+#define MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_ELR
+#define MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_ELR
+#define MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_ELR
+#define MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_ELR
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR MT6359P_LDO_VEMC_ELR_0
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_MASK 0xF
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT 0
+#define MT6359P_RG_LDO_VFE28_EN_ADDR MT6359P_LDO_VFE28_CON0
+#define MT6359P_DA_VFE28_B_EN_ADDR MT6359P_LDO_VFE28_MON
+#define MT6359P_RG_LDO_VXO22_EN_ADDR MT6359P_LDO_VXO22_CON0
+#define MT6359P_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359P_DA_VXO22_B_EN_ADDR MT6359P_LDO_VXO22_MON
+#define MT6359P_RG_LDO_VRF18_EN_ADDR MT6359P_LDO_VRF18_CON0
+#define MT6359P_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359P_DA_VRF18_B_EN_ADDR MT6359P_LDO_VRF18_MON
+#define MT6359P_RG_LDO_VRF12_EN_ADDR MT6359P_LDO_VRF12_CON0
+#define MT6359P_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359P_DA_VRF12_B_EN_ADDR MT6359P_LDO_VRF12_MON
+#define MT6359P_RG_LDO_VEFUSE_EN_ADDR MT6359P_LDO_VEFUSE_CON0
+#define MT6359P_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359P_DA_VEFUSE_B_EN_ADDR MT6359P_LDO_VEFUSE_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_0_ADDR MT6359P_LDO_VCN33_1_CON0
+#define MT6359P_DA_VCN33_1_B_EN_ADDR MT6359P_LDO_VCN33_1_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_1_ADDR MT6359P_LDO_VCN33_1_MULTI_SW
+#define MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359P_RG_LDO_VCN33_2_EN_0_ADDR MT6359P_LDO_VCN33_2_CON0
+#define MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359P_DA_VCN33_2_B_EN_ADDR MT6359P_LDO_VCN33_2_MON
+#define MT6359P_RG_LDO_VCN33_2_EN_1_ADDR MT6359P_LDO_VCN33_2_MULTI_SW
+#define MT6359P_RG_LDO_VCN13_EN_ADDR MT6359P_LDO_VCN13_CON0
+#define MT6359P_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359P_DA_VCN13_B_EN_ADDR MT6359P_LDO_VCN13_MON
+#define MT6359P_RG_LDO_VCN18_EN_ADDR MT6359P_LDO_VCN18_CON0
+#define MT6359P_DA_VCN18_B_EN_ADDR MT6359P_LDO_VCN18_MON
+#define MT6359P_RG_LDO_VA09_EN_ADDR MT6359P_LDO_VA09_CON0
+#define MT6359P_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359P_DA_VA09_B_EN_ADDR MT6359P_LDO_VA09_MON
+#define MT6359P_RG_LDO_VCAMIO_EN_ADDR MT6359P_LDO_VCAMIO_CON0
+#define MT6359P_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359P_DA_VCAMIO_B_EN_ADDR MT6359P_LDO_VCAMIO_MON
+#define MT6359P_RG_LDO_VA12_EN_ADDR MT6359P_LDO_VA12_CON0
+#define MT6359P_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359P_DA_VA12_B_EN_ADDR MT6359P_LDO_VA12_MON
+#define MT6359P_RG_LDO_VAUX18_EN_ADDR MT6359P_LDO_VAUX18_CON0
+#define MT6359P_DA_VAUX18_B_EN_ADDR MT6359P_LDO_VAUX18_MON
+#define MT6359P_RG_LDO_VAUD18_EN_ADDR MT6359P_LDO_VAUD18_CON0
+#define MT6359P_DA_VAUD18_B_EN_ADDR MT6359P_LDO_VAUD18_MON
+#define MT6359P_RG_LDO_VIO18_EN_ADDR MT6359P_LDO_VIO18_CON0
+#define MT6359P_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359P_DA_VIO18_B_EN_ADDR MT6359P_LDO_VIO18_MON
+#define MT6359P_RG_LDO_VEMC_EN_ADDR MT6359P_LDO_VEMC_CON0
+#define MT6359P_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359P_DA_VEMC_B_EN_ADDR MT6359P_LDO_VEMC_MON
+#define MT6359P_RG_LDO_VSIM1_EN_ADDR MT6359P_LDO_VSIM1_CON0
+#define MT6359P_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359P_DA_VSIM1_B_EN_ADDR MT6359P_LDO_VSIM1_MON
+#define MT6359P_RG_LDO_VSIM2_EN_ADDR MT6359P_LDO_VSIM2_CON0
+#define MT6359P_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359P_DA_VSIM2_B_EN_ADDR MT6359P_LDO_VSIM2_MON
+#define MT6359P_RG_LDO_VUSB_EN_0_ADDR MT6359P_LDO_VUSB_CON0
+#define MT6359P_DA_VUSB_B_EN_ADDR MT6359P_LDO_VUSB_MON
+#define MT6359P_RG_LDO_VUSB_EN_1_ADDR MT6359P_LDO_VUSB_MULTI_SW
+#define MT6359P_RG_LDO_VRFCK_EN_ADDR MT6359P_LDO_VRFCK_CON0
+#define MT6359P_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359P_DA_VRFCK_B_EN_ADDR MT6359P_LDO_VRFCK_MON
+#define MT6359P_RG_LDO_VBBCK_EN_ADDR MT6359P_LDO_VBBCK_CON0
+#define MT6359P_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359P_DA_VBBCK_B_EN_ADDR MT6359P_LDO_VBBCK_MON
+#define MT6359P_RG_LDO_VBIF28_EN_ADDR MT6359P_LDO_VBIF28_CON0
+#define MT6359P_DA_VBIF28_B_EN_ADDR MT6359P_LDO_VBIF28_MON
+#define MT6359P_RG_LDO_VIBR_EN_ADDR MT6359P_LDO_VIBR_CON0
+#define MT6359P_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359P_DA_VIBR_B_EN_ADDR MT6359P_LDO_VIBR_MON
+#define MT6359P_RG_LDO_VIO28_EN_ADDR MT6359P_LDO_VIO28_CON0
+#define MT6359P_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359P_DA_VIO28_B_EN_ADDR MT6359P_LDO_VIO28_MON
+#define MT6359P_RG_LDO_VM18_EN_ADDR MT6359P_LDO_VM18_CON0
+#define MT6359P_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359P_DA_VM18_B_EN_ADDR MT6359P_LDO_VM18_MON
+#define MT6359P_RG_LDO_VUFS_EN_ADDR MT6359P_LDO_VUFS_CON0
+#define MT6359P_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359P_DA_VUFS_B_EN_ADDR MT6359P_LDO_VUFS_MON
+#define MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359P_LDO_VSRAM_PROC1_CON0
+#define MT6359P_DA_VSRAM_PROC1_B_EN_ADDR MT6359P_LDO_VSRAM_PROC1_MON
+#define MT6359P_DA_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359P_LDO_VSRAM_PROC2_CON0
+#define MT6359P_DA_VSRAM_PROC2_B_EN_ADDR MT6359P_LDO_VSRAM_PROC2_MON
+#define MT6359P_DA_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_CON0
+#define MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_MON
+#define MT6359P_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_MD_EN_ADDR MT6359P_LDO_VSRAM_MD_CON0
+#define MT6359P_DA_VSRAM_MD_B_EN_ADDR MT6359P_LDO_VSRAM_MD_MON
+#define MT6359P_DA_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_VOSEL1
+#define MT6359P_RG_VCN33_1_VOSEL_ADDR MT6359P_VCN33_1_ANA_CON0
+#define MT6359P_RG_VCN33_2_VOSEL_ADDR MT6359P_VCN33_2_ANA_CON0
+#define MT6359P_RG_VEMC_VOSEL_ADDR MT6359P_VEMC_ANA_CON0
+#define MT6359P_RG_VSIM1_VOSEL_ADDR MT6359P_VSIM1_ANA_CON0
+#define MT6359P_RG_VSIM2_VOSEL_ADDR MT6359P_VSIM2_ANA_CON0
+#define MT6359P_RG_VIO28_VOSEL_ADDR MT6359P_VIO28_ANA_CON0
+#define MT6359P_RG_VIBR_VOSEL_ADDR MT6359P_VIBR_ANA_CON0
+#define MT6359P_RG_VRF18_VOSEL_ADDR MT6359P_VRF18_ANA_CON0
+#define MT6359P_RG_VEFUSE_VOSEL_ADDR MT6359P_VEFUSE_ANA_CON0
+#define MT6359P_RG_VCAMIO_VOSEL_ADDR MT6359P_VCAMIO_ANA_CON0
+#define MT6359P_RG_VIO18_VOSEL_ADDR MT6359P_VIO18_ANA_CON0
+#define MT6359P_RG_VM18_VOSEL_ADDR MT6359P_VM18_ANA_CON0
+#define MT6359P_RG_VUFS_VOSEL_ADDR MT6359P_VUFS_ANA_CON0
+#define MT6359P_RG_VRF12_VOSEL_ADDR MT6359P_VRF12_ANA_CON0
+#define MT6359P_RG_VCN13_VOSEL_ADDR MT6359P_VCN13_ANA_CON0
+#define MT6359P_RG_VA09_VOSEL_ADDR MT6359P_VRF18_ELR_3
+#define MT6359P_RG_VA12_VOSEL_ADDR MT6359P_VFE28_ELR_4
+#define MT6359P_RG_VXO22_VOSEL_ADDR MT6359P_VXO22_ANA_CON0
+#define MT6359P_RG_VRFCK_VOSEL_ADDR MT6359P_VRFCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_ADDR MT6359P_VBBCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359P_RG_VBBCK_VOSEL_SHIFT 4
+#define MT6359P_VM_MODE_ADDR MT6359P_TOP_TRAP
+#define MT6359P_TMA_KEY_ADDR MT6359P_TOP_TMA_KEY
+
+#define TMA_KEY 0x9CA6
+
+#endif /* __MFD_MT6359P_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index fc88d315bdde..627487e26287 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -8,9 +8,16 @@
#define __MFD_MT6397_CORE_H__
#include <linux/mutex.h>
+#include <linux/notifier.h>
enum chip_id {
MT6323_CHIP_ID = 0x23,
+ MT6331_CHIP_ID = 0x20,
+ MT6332_CHIP_ID = 0x20,
+ MT6357_CHIP_ID = 0x57,
+ MT6358_CHIP_ID = 0x58,
+ MT6359_CHIP_ID = 0x59,
+ MT6366_CHIP_ID = 0x66,
MT6391_CHIP_ID = 0x91,
MT6397_CHIP_ID = 0x97,
};
@@ -54,6 +61,7 @@ enum mt6397_irq_numbers {
struct mt6397_chip {
struct device *dev;
struct regmap *regmap;
+ struct notifier_block pm_nb;
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
@@ -63,8 +71,10 @@ struct mt6397_chip {
u16 int_con[2];
u16 int_status[2];
u16 chip_id;
+ void *irq_data;
};
+int mt6358_irq_init(struct mt6397_chip *chip);
int mt6397_irq_init(struct mt6397_chip *chip);
#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index 7dfb63b81373..068ae1c0f0e8 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -18,7 +18,9 @@
#define RTC_BBPU_CBUSY BIT(6)
#define RTC_BBPU_KEY (0x43 << 8)
-#define RTC_WRTGR 0x003c
+#define RTC_WRTGR_MT6358 0x003a
+#define RTC_WRTGR_MT6397 0x003c
+#define RTC_WRTGR_MT6323 RTC_WRTGR_MT6397
#define RTC_IRQ_STA 0x0002
#define RTC_IRQ_STA_AL BIT(0)
@@ -34,6 +36,7 @@
#define RTC_AL_MASK_DOW BIT(4)
#define RTC_TC_SEC 0x000a
+#define RTC_TC_MTH_MASK 0x000f
/* Min, Hour, Dom... register offset to RTC_TC_SEC */
#define RTC_OFFSET_SEC 0
#define RTC_OFFSET_MIN 1
@@ -65,8 +68,11 @@
#define MTK_RTC_POLL_DELAY_US 10
#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+struct mtk_rtc_data {
+ u32 wrtgr;
+};
+
struct mt6397_rtc {
- struct device *dev;
struct rtc_device *rtc_dev;
/* Protect register access from multiple tasks */
@@ -74,6 +80,7 @@ struct mt6397_rtc {
struct regmap *regmap;
int irq;
u32 addr_base;
+ const struct mtk_rtc_data *data;
};
#endif /* _LINUX_MFD_MT6397_RTC_H_ */
diff --git a/include/linux/mfd/ntxec.h b/include/linux/mfd/ntxec.h
new file mode 100644
index 000000000000..cc6f07bfa2b3
--- /dev/null
+++ b/include/linux/mfd/ntxec.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Jonathan Neuschäfer
+ *
+ * Register access and version information for the Netronix embedded
+ * controller.
+ */
+
+#ifndef NTXEC_H
+#define NTXEC_H
+
+#include <linux/types.h>
+
+struct device;
+struct regmap;
+
+struct ntxec {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+/*
+ * Some registers, such as the battery status register (0x41), are in
+ * big-endian, but others only have eight significant bits, which are in the
+ * first byte transmitted over I2C (the MSB of the big-endian value).
+ * This convenience function converts an 8-bit value to 16-bit for use in the
+ * second kind of register.
+ */
+static inline u16 ntxec_reg8(u8 value)
+{
+ return value << 8;
+}
+
+/* Known firmware versions */
+#define NTXEC_VERSION_KOBO_AURA 0xd726 /* found in Kobo Aura */
+#define NTXEC_VERSION_TOLINO_SHINE2 0xf110 /* found in Tolino Shine 2 HD */
+
+#endif
diff --git a/include/linux/mfd/ocelot.h b/include/linux/mfd/ocelot.h
new file mode 100644
index 000000000000..dd72073d2d4f
--- /dev/null
+++ b/include/linux/mfd/ocelot.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2022 Innovative Advantage Inc. */
+
+#ifndef _LINUX_MFD_OCELOT_H
+#define _LINUX_MFD_OCELOT_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct resource;
+
+static inline struct regmap *
+ocelot_regmap_from_resource_optional(struct platform_device *pdev,
+ unsigned int index,
+ const struct regmap_config *config)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *regs;
+
+ /*
+ * Don't use _get_and_ioremap_resource() here, since that will invoke
+ * prints of "invalid resource" which will simply add confusion.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (res) {
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+ return devm_regmap_init_mmio(dev, regs, config);
+ }
+
+ /*
+ * Fall back to using REG and getting the resource from the parent
+ * device, which is possible in an MFD configuration
+ */
+ if (dev->parent) {
+ res = platform_get_resource(pdev, IORESOURCE_REG, index);
+ if (!res)
+ return NULL;
+
+ return dev_get_regmap(dev->parent, res->name);
+ }
+
+ return NULL;
+}
+
+static inline struct regmap *
+ocelot_regmap_from_resource(struct platform_device *pdev, unsigned int index,
+ const struct regmap_config *config)
+{
+ struct regmap *map;
+
+ map = ocelot_regmap_from_resource_optional(pdev, index, config);
+ return map ?: ERR_PTR(-ENOENT);
+}
+
+#endif
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index a59bf323f713..9af1f3105f80 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -373,6 +373,7 @@ enum rk805_reg {
#define SWITCH2_EN BIT(6)
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
+#define DEV_RST BIT(2)
#define DEV_OFF BIT(0)
#define RTC_STOP BIT(0)
@@ -437,6 +438,158 @@ enum rk809_reg_id {
#define RK817_RTC_COMP_LSB_REG 0x10
#define RK817_RTC_COMP_MSB_REG 0x11
+/* RK817 Codec Registers */
+#define RK817_CODEC_DTOP_VUCTL 0x12
+#define RK817_CODEC_DTOP_VUCTIME 0x13
+#define RK817_CODEC_DTOP_LPT_SRST 0x14
+#define RK817_CODEC_DTOP_DIGEN_CLKE 0x15
+#define RK817_CODEC_AREF_RTCFG0 0x16
+#define RK817_CODEC_AREF_RTCFG1 0x17
+#define RK817_CODEC_AADC_CFG0 0x18
+#define RK817_CODEC_AADC_CFG1 0x19
+#define RK817_CODEC_DADC_VOLL 0x1a
+#define RK817_CODEC_DADC_VOLR 0x1b
+#define RK817_CODEC_DADC_SR_ACL0 0x1e
+#define RK817_CODEC_DADC_ALC1 0x1f
+#define RK817_CODEC_DADC_ALC2 0x20
+#define RK817_CODEC_DADC_NG 0x21
+#define RK817_CODEC_DADC_HPF 0x22
+#define RK817_CODEC_DADC_RVOLL 0x23
+#define RK817_CODEC_DADC_RVOLR 0x24
+#define RK817_CODEC_AMIC_CFG0 0x27
+#define RK817_CODEC_AMIC_CFG1 0x28
+#define RK817_CODEC_DMIC_PGA_GAIN 0x29
+#define RK817_CODEC_DMIC_LMT1 0x2a
+#define RK817_CODEC_DMIC_LMT2 0x2b
+#define RK817_CODEC_DMIC_NG1 0x2c
+#define RK817_CODEC_DMIC_NG2 0x2d
+#define RK817_CODEC_ADAC_CFG0 0x2e
+#define RK817_CODEC_ADAC_CFG1 0x2f
+#define RK817_CODEC_DDAC_POPD_DACST 0x30
+#define RK817_CODEC_DDAC_VOLL 0x31
+#define RK817_CODEC_DDAC_VOLR 0x32
+#define RK817_CODEC_DDAC_SR_LMT0 0x35
+#define RK817_CODEC_DDAC_LMT1 0x36
+#define RK817_CODEC_DDAC_LMT2 0x37
+#define RK817_CODEC_DDAC_MUTE_MIXCTL 0x38
+#define RK817_CODEC_DDAC_RVOLL 0x39
+#define RK817_CODEC_DDAC_RVOLR 0x3a
+#define RK817_CODEC_AHP_ANTI0 0x3b
+#define RK817_CODEC_AHP_ANTI1 0x3c
+#define RK817_CODEC_AHP_CFG0 0x3d
+#define RK817_CODEC_AHP_CFG1 0x3e
+#define RK817_CODEC_AHP_CP 0x3f
+#define RK817_CODEC_ACLASSD_CFG1 0x40
+#define RK817_CODEC_ACLASSD_CFG2 0x41
+#define RK817_CODEC_APLL_CFG0 0x42
+#define RK817_CODEC_APLL_CFG1 0x43
+#define RK817_CODEC_APLL_CFG2 0x44
+#define RK817_CODEC_APLL_CFG3 0x45
+#define RK817_CODEC_APLL_CFG4 0x46
+#define RK817_CODEC_APLL_CFG5 0x47
+#define RK817_CODEC_DI2S_CKM 0x48
+#define RK817_CODEC_DI2S_RSD 0x49
+#define RK817_CODEC_DI2S_RXCR1 0x4a
+#define RK817_CODEC_DI2S_RXCR2 0x4b
+#define RK817_CODEC_DI2S_RXCMD_TSD 0x4c
+#define RK817_CODEC_DI2S_TXCR1 0x4d
+#define RK817_CODEC_DI2S_TXCR2 0x4e
+#define RK817_CODEC_DI2S_TXCR3_TXCMD 0x4f
+
+/* RK817_CODEC_DI2S_CKM */
+#define RK817_I2S_MODE_MASK (0x1 << 0)
+#define RK817_I2S_MODE_MST (0x1 << 0)
+#define RK817_I2S_MODE_SLV (0x0 << 0)
+
+/* RK817_CODEC_DDAC_MUTE_MIXCTL */
+#define DACMT_MASK (0x1 << 0)
+#define DACMT_ENABLE (0x1 << 0)
+#define DACMT_DISABLE (0x0 << 0)
+
+/* RK817_CODEC_DI2S_RXCR2 */
+#define VDW_RX_24BITS (0x17)
+#define VDW_RX_16BITS (0x0f)
+
+/* RK817_CODEC_DI2S_TXCR2 */
+#define VDW_TX_24BITS (0x17)
+#define VDW_TX_16BITS (0x0f)
+
+/* RK817_CODEC_AMIC_CFG0 */
+#define MIC_DIFF_MASK (0x1 << 7)
+#define MIC_DIFF_DIS (0x0 << 7)
+#define MIC_DIFF_EN (0x1 << 7)
+
+/* RK817 Battery Registers */
+#define RK817_GAS_GAUGE_ADC_CONFIG0 0x50
+#define RK817_GG_EN (0x1 << 7)
+#define RK817_SYS_VOL_ADC_EN (0x1 << 6)
+#define RK817_TS_ADC_EN (0x1 << 5)
+#define RK817_USB_VOL_ADC_EN (0x1 << 4)
+#define RK817_BAT_VOL_ADC_EN (0x1 << 3)
+#define RK817_BAT_CUR_ADC_EN (0x1 << 2)
+
+#define RK817_GAS_GAUGE_ADC_CONFIG1 0x55
+
+#define RK817_VOL_CUR_CALIB_UPD BIT(7)
+
+#define RK817_GAS_GAUGE_GG_CON 0x56
+#define RK817_GAS_GAUGE_GG_STS 0x57
+
+#define RK817_BAT_CON (0x1 << 4)
+#define RK817_RELAX_VOL_UPD (0x3 << 2)
+#define RK817_RELAX_STS (0x1 << 1)
+
+#define RK817_GAS_GAUGE_RELAX_THRE_H 0x58
+#define RK817_GAS_GAUGE_RELAX_THRE_L 0x59
+#define RK817_GAS_GAUGE_OCV_THRE_VOL 0x62
+#define RK817_GAS_GAUGE_OCV_VOL_H 0x63
+#define RK817_GAS_GAUGE_OCV_VOL_L 0x64
+#define RK817_GAS_GAUGE_PWRON_VOL_H 0x6b
+#define RK817_GAS_GAUGE_PWRON_VOL_L 0x6c
+#define RK817_GAS_GAUGE_PWRON_CUR_H 0x6d
+#define RK817_GAS_GAUGE_PWRON_CUR_L 0x6e
+#define RK817_GAS_GAUGE_OFF_CNT 0x6f
+#define RK817_GAS_GAUGE_Q_INIT_H3 0x70
+#define RK817_GAS_GAUGE_Q_INIT_H2 0x71
+#define RK817_GAS_GAUGE_Q_INIT_L1 0x72
+#define RK817_GAS_GAUGE_Q_INIT_L0 0x73
+#define RK817_GAS_GAUGE_Q_PRES_H3 0x74
+#define RK817_GAS_GAUGE_Q_PRES_H2 0x75
+#define RK817_GAS_GAUGE_Q_PRES_L1 0x76
+#define RK817_GAS_GAUGE_Q_PRES_L0 0x77
+#define RK817_GAS_GAUGE_BAT_VOL_H 0x78
+#define RK817_GAS_GAUGE_BAT_VOL_L 0x79
+#define RK817_GAS_GAUGE_BAT_CUR_H 0x7a
+#define RK817_GAS_GAUGE_BAT_CUR_L 0x7b
+#define RK817_GAS_GAUGE_USB_VOL_H 0x7e
+#define RK817_GAS_GAUGE_USB_VOL_L 0x7f
+#define RK817_GAS_GAUGE_SYS_VOL_H 0x80
+#define RK817_GAS_GAUGE_SYS_VOL_L 0x81
+#define RK817_GAS_GAUGE_Q_MAX_H3 0x82
+#define RK817_GAS_GAUGE_Q_MAX_H2 0x83
+#define RK817_GAS_GAUGE_Q_MAX_L1 0x84
+#define RK817_GAS_GAUGE_Q_MAX_L0 0x85
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_H 0x8f
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_L 0x90
+#define RK817_GAS_GAUGE_CAL_OFFSET_H 0x91
+#define RK817_GAS_GAUGE_CAL_OFFSET_L 0x92
+#define RK817_GAS_GAUGE_VCALIB0_H 0x93
+#define RK817_GAS_GAUGE_VCALIB0_L 0x94
+#define RK817_GAS_GAUGE_VCALIB1_H 0x95
+#define RK817_GAS_GAUGE_VCALIB1_L 0x96
+#define RK817_GAS_GAUGE_IOFFSET_H 0x97
+#define RK817_GAS_GAUGE_IOFFSET_L 0x98
+#define RK817_GAS_GAUGE_BAT_R1 0x9a
+#define RK817_GAS_GAUGE_BAT_R2 0x9b
+#define RK817_GAS_GAUGE_BAT_R3 0x9c
+#define RK817_GAS_GAUGE_DATA0 0x9d
+#define RK817_GAS_GAUGE_DATA1 0x9e
+#define RK817_GAS_GAUGE_DATA2 0x9f
+#define RK817_GAS_GAUGE_DATA3 0xa0
+#define RK817_GAS_GAUGE_DATA4 0xa1
+#define RK817_GAS_GAUGE_DATA5 0xa2
+#define RK817_GAS_GAUGE_CUR_ADC_K0 0xb0
+
#define RK817_POWER_EN_REG(i) (0xb1 + (i))
#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i))
@@ -462,10 +615,30 @@ enum rk809_reg_id {
#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2)
#define RK817_BOOST_OTG_CFG (0xde)
+#define RK817_PMIC_CHRG_OUT 0xe4
+#define RK817_CHRG_VOL_SEL (0x07 << 4)
+#define RK817_CHRG_CUR_SEL (0x07 << 0)
+
+#define RK817_PMIC_CHRG_IN 0xe5
+#define RK817_USB_VLIM_EN (0x01 << 7)
+#define RK817_USB_VLIM_SEL (0x07 << 4)
+#define RK817_USB_ILIM_EN (0x01 << 3)
+#define RK817_USB_ILIM_SEL (0x07 << 0)
+#define RK817_PMIC_CHRG_TERM 0xe6
+#define RK817_CHRG_TERM_ANA_DIG (0x01 << 2)
+#define RK817_CHRG_TERM_ANA_SEL (0x03 << 0)
+#define RK817_CHRG_EN (0x01 << 6)
+
+#define RK817_PMIC_CHRG_STS 0xeb
+#define RK817_BAT_EXS BIT(7)
+#define RK817_CHG_STS (0x07 << 4)
+
#define RK817_ID_MSB 0xed
#define RK817_ID_LSB 0xee
#define RK817_SYS_STS 0xf0
+#define RK817_PLUG_IN_STS (0x1 << 6)
+
#define RK817_SYS_CFG(i) (0xf1 + (i))
#define RK817_ON_SOURCE_REG 0xf5
@@ -620,7 +793,5 @@ struct rk808 {
long variant;
const struct regmap_config *regmap_cfg;
const struct regmap_irq_chip *regmap_irq_chip;
- void (*pm_pwroff_fn)(void);
- void (*pm_pwroff_prep_fn)(void);
};
#endif /* __LINUX_REGULATOR_RK808_H */
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
index d62ef48060b5..8aa0bda1af4f 100644
--- a/include/linux/mfd/rn5t618.h
+++ b/include/linux/mfd/rn5t618.h
@@ -139,6 +139,17 @@
#define RN5T618_INTPOL 0x9c
#define RN5T618_INTEN 0x9d
#define RN5T618_INTMON 0x9e
+
+#define RN5T618_RTC_SECONDS 0xA0
+#define RN5T618_RTC_MDAY 0xA4
+#define RN5T618_RTC_MONTH 0xA5
+#define RN5T618_RTC_YEAR 0xA6
+#define RN5T618_RTC_ADJUST 0xA7
+#define RN5T618_RTC_ALARM_Y_SEC 0xA8
+#define RN5T618_RTC_DAL_MONTH 0xAC
+#define RN5T618_RTC_CTRL1 0xAE
+#define RN5T618_RTC_CTRL2 0xAF
+
#define RN5T618_PREVINDAC 0xb0
#define RN5T618_BATDAC 0xb1
#define RN5T618_CHGCTL1 0xb3
@@ -177,6 +188,7 @@
#define RN5T618_CHGOSCSCORESET3 0xd7
#define RN5T618_CHGOSCFREQSET1 0xd8
#define RN5T618_CHGOSCFREQSET2 0xd9
+#define RN5T618_GCHGDET 0xda
#define RN5T618_CONTROL 0xe0
#define RN5T618_SOC 0xe1
#define RN5T618_RE_CAP_H 0xe2
@@ -242,9 +254,24 @@ enum {
RC5T619,
};
+/* RN5T618 IRQ definitions */
+enum {
+ RN5T618_IRQ_SYS = 0,
+ RN5T618_IRQ_DCDC,
+ RN5T618_IRQ_RTC,
+ RN5T618_IRQ_ADC,
+ RN5T618_IRQ_GPIO,
+ RN5T618_IRQ_CHG,
+ RN5T618_NR_IRQS,
+};
+
struct rn5t618 {
struct regmap *regmap;
+ struct device *dev;
long variant;
+
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
};
#endif /* __LINUX_MFD_RN5T618_H */
diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
deleted file mode 100644
index a57af878fd0c..000000000000
--- a/include/linux/mfd/rohm-bd70528.h
+++ /dev/null
@@ -1,391 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Copyright (C) 2018 ROHM Semiconductors */
-
-#ifndef __LINUX_MFD_BD70528_H__
-#define __LINUX_MFD_BD70528_H__
-
-#include <linux/bits.h>
-#include <linux/device.h>
-#include <linux/mfd/rohm-generic.h>
-#include <linux/mfd/rohm-shared.h>
-#include <linux/regmap.h>
-
-enum {
- BD70528_BUCK1,
- BD70528_BUCK2,
- BD70528_BUCK3,
- BD70528_LDO1,
- BD70528_LDO2,
- BD70528_LDO3,
- BD70528_LED1,
- BD70528_LED2,
-};
-
-struct bd70528_data {
- struct rohm_regmap_dev chip;
- struct mutex rtc_timer_lock;
-};
-
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_LDO_VOLTS 0x20
-
-#define BD70528_REG_BUCK1_EN 0x0F
-#define BD70528_REG_BUCK1_VOLT 0x15
-#define BD70528_REG_BUCK2_EN 0x10
-#define BD70528_REG_BUCK2_VOLT 0x16
-#define BD70528_REG_BUCK3_EN 0x11
-#define BD70528_REG_BUCK3_VOLT 0x17
-#define BD70528_REG_LDO1_EN 0x1b
-#define BD70528_REG_LDO1_VOLT 0x1e
-#define BD70528_REG_LDO2_EN 0x1c
-#define BD70528_REG_LDO2_VOLT 0x1f
-#define BD70528_REG_LDO3_EN 0x1d
-#define BD70528_REG_LDO3_VOLT 0x20
-#define BD70528_REG_LED_CTRL 0x2b
-#define BD70528_REG_LED_VOLT 0x29
-#define BD70528_REG_LED_EN 0x2a
-
-/* main irq registers */
-#define BD70528_REG_INT_MAIN 0x7E
-#define BD70528_REG_INT_MAIN_MASK 0x74
-
-/* 'sub irq' registers */
-#define BD70528_REG_INT_SHDN 0x7F
-#define BD70528_REG_INT_PWR_FLT 0x80
-#define BD70528_REG_INT_VR_FLT 0x81
-#define BD70528_REG_INT_MISC 0x82
-#define BD70528_REG_INT_BAT1 0x83
-#define BD70528_REG_INT_BAT2 0x84
-#define BD70528_REG_INT_RTC 0x85
-#define BD70528_REG_INT_GPIO 0x86
-#define BD70528_REG_INT_OP_FAIL 0x87
-
-#define BD70528_REG_INT_SHDN_MASK 0x75
-#define BD70528_REG_INT_PWR_FLT_MASK 0x76
-#define BD70528_REG_INT_VR_FLT_MASK 0x77
-#define BD70528_REG_INT_MISC_MASK 0x78
-#define BD70528_REG_INT_BAT1_MASK 0x79
-#define BD70528_REG_INT_BAT2_MASK 0x7a
-#define BD70528_REG_INT_RTC_MASK 0x7b
-#define BD70528_REG_INT_GPIO_MASK 0x7c
-#define BD70528_REG_INT_OP_FAIL_MASK 0x7d
-
-/* Reset related 'magic' registers */
-#define BD70528_REG_SHIPMODE 0x03
-#define BD70528_REG_HWRESET 0x04
-#define BD70528_REG_WARMRESET 0x05
-#define BD70528_REG_STANDBY 0x06
-
-/* GPIO registers */
-#define BD70528_REG_GPIO_STATE 0x8F
-
-#define BD70528_REG_GPIO1_IN 0x4d
-#define BD70528_REG_GPIO2_IN 0x4f
-#define BD70528_REG_GPIO3_IN 0x51
-#define BD70528_REG_GPIO4_IN 0x53
-#define BD70528_REG_GPIO1_OUT 0x4e
-#define BD70528_REG_GPIO2_OUT 0x50
-#define BD70528_REG_GPIO3_OUT 0x52
-#define BD70528_REG_GPIO4_OUT 0x54
-
-/* RTC */
-
-#define BD70528_REG_RTC_COUNT_H 0x2d
-#define BD70528_REG_RTC_COUNT_L 0x2e
-#define BD70528_REG_RTC_SEC 0x2f
-#define BD70528_REG_RTC_MINUTE 0x30
-#define BD70528_REG_RTC_HOUR 0x31
-#define BD70528_REG_RTC_WEEK 0x32
-#define BD70528_REG_RTC_DAY 0x33
-#define BD70528_REG_RTC_MONTH 0x34
-#define BD70528_REG_RTC_YEAR 0x35
-
-#define BD70528_REG_RTC_ALM_SEC 0x36
-#define BD70528_REG_RTC_ALM_START BD70528_REG_RTC_ALM_SEC
-#define BD70528_REG_RTC_ALM_MINUTE 0x37
-#define BD70528_REG_RTC_ALM_HOUR 0x38
-#define BD70528_REG_RTC_ALM_WEEK 0x39
-#define BD70528_REG_RTC_ALM_DAY 0x3a
-#define BD70528_REG_RTC_ALM_MONTH 0x3b
-#define BD70528_REG_RTC_ALM_YEAR 0x3c
-#define BD70528_REG_RTC_ALM_MASK 0x3d
-#define BD70528_REG_RTC_ALM_REPEAT 0x3e
-#define BD70528_REG_RTC_START BD70528_REG_RTC_SEC
-
-#define BD70528_REG_RTC_WAKE_SEC 0x43
-#define BD70528_REG_RTC_WAKE_START BD70528_REG_RTC_WAKE_SEC
-#define BD70528_REG_RTC_WAKE_MIN 0x44
-#define BD70528_REG_RTC_WAKE_HOUR 0x45
-#define BD70528_REG_RTC_WAKE_CTRL 0x46
-
-#define BD70528_REG_ELAPSED_TIMER_EN 0x42
-#define BD70528_REG_WAKE_EN 0x46
-
-/* WDT registers */
-#define BD70528_REG_WDT_CTRL 0x4A
-#define BD70528_REG_WDT_HOUR 0x49
-#define BD70528_REG_WDT_MINUTE 0x48
-#define BD70528_REG_WDT_SEC 0x47
-
-/* Charger / Battery */
-#define BD70528_REG_CHG_CURR_STAT 0x59
-#define BD70528_REG_CHG_BAT_STAT 0x57
-#define BD70528_REG_CHG_BAT_TEMP 0x58
-#define BD70528_REG_CHG_IN_STAT 0x56
-#define BD70528_REG_CHG_DCIN_ILIM 0x5d
-#define BD70528_REG_CHG_CHG_CURR_WARM 0x61
-#define BD70528_REG_CHG_CHG_CURR_COLD 0x62
-
-/* Masks for main IRQ register bits */
-enum {
- BD70528_INT_SHDN,
-#define BD70528_INT_SHDN_MASK BIT(BD70528_INT_SHDN)
- BD70528_INT_PWR_FLT,
-#define BD70528_INT_PWR_FLT_MASK BIT(BD70528_INT_PWR_FLT)
- BD70528_INT_VR_FLT,
-#define BD70528_INT_VR_FLT_MASK BIT(BD70528_INT_VR_FLT)
- BD70528_INT_MISC,
-#define BD70528_INT_MISC_MASK BIT(BD70528_INT_MISC)
- BD70528_INT_BAT1,
-#define BD70528_INT_BAT1_MASK BIT(BD70528_INT_BAT1)
- BD70528_INT_RTC,
-#define BD70528_INT_RTC_MASK BIT(BD70528_INT_RTC)
- BD70528_INT_GPIO,
-#define BD70528_INT_GPIO_MASK BIT(BD70528_INT_GPIO)
- BD70528_INT_OP_FAIL,
-#define BD70528_INT_OP_FAIL_MASK BIT(BD70528_INT_OP_FAIL)
-};
-
-/* IRQs */
-enum {
- /* Shutdown register IRQs */
- BD70528_INT_LONGPUSH,
- BD70528_INT_WDT,
- BD70528_INT_HWRESET,
- BD70528_INT_RSTB_FAULT,
- BD70528_INT_VBAT_UVLO,
- BD70528_INT_TSD,
- BD70528_INT_RSTIN,
- /* Power failure register IRQs */
- BD70528_INT_BUCK1_FAULT,
- BD70528_INT_BUCK2_FAULT,
- BD70528_INT_BUCK3_FAULT,
- BD70528_INT_LDO1_FAULT,
- BD70528_INT_LDO2_FAULT,
- BD70528_INT_LDO3_FAULT,
- BD70528_INT_LED1_FAULT,
- BD70528_INT_LED2_FAULT,
- /* VR FAULT register IRQs */
- BD70528_INT_BUCK1_OCP,
- BD70528_INT_BUCK2_OCP,
- BD70528_INT_BUCK3_OCP,
- BD70528_INT_LED1_OCP,
- BD70528_INT_LED2_OCP,
- BD70528_INT_BUCK1_FULLON,
- BD70528_INT_BUCK2_FULLON,
- /* PMU register interrupts */
- BD70528_INT_SHORTPUSH,
- BD70528_INT_AUTO_WAKEUP,
- BD70528_INT_STATE_CHANGE,
- /* Charger 1 register IRQs */
- BD70528_INT_BAT_OV_RES,
- BD70528_INT_BAT_OV_DET,
- BD70528_INT_DBAT_DET,
- BD70528_INT_BATTSD_COLD_RES,
- BD70528_INT_BATTSD_COLD_DET,
- BD70528_INT_BATTSD_HOT_RES,
- BD70528_INT_BATTSD_HOT_DET,
- BD70528_INT_CHG_TSD,
- /* Charger 2 register IRQs */
- BD70528_INT_BAT_RMV,
- BD70528_INT_BAT_DET,
- BD70528_INT_DCIN2_OV_RES,
- BD70528_INT_DCIN2_OV_DET,
- BD70528_INT_DCIN2_RMV,
- BD70528_INT_DCIN2_DET,
- BD70528_INT_DCIN1_RMV,
- BD70528_INT_DCIN1_DET,
- /* RTC register IRQs */
- BD70528_INT_RTC_ALARM,
- BD70528_INT_ELPS_TIM,
- /* GPIO register IRQs */
- BD70528_INT_GPIO0,
- BD70528_INT_GPIO1,
- BD70528_INT_GPIO2,
- BD70528_INT_GPIO3,
- /* Invalid operation register IRQs */
- BD70528_INT_BUCK1_DVS_OPFAIL,
- BD70528_INT_BUCK2_DVS_OPFAIL,
- BD70528_INT_BUCK3_DVS_OPFAIL,
- BD70528_INT_LED1_VOLT_OPFAIL,
- BD70528_INT_LED2_VOLT_OPFAIL,
-};
-
-/* Masks */
-#define BD70528_INT_LONGPUSH_MASK 0x1
-#define BD70528_INT_WDT_MASK 0x2
-#define BD70528_INT_HWRESET_MASK 0x4
-#define BD70528_INT_RSTB_FAULT_MASK 0x8
-#define BD70528_INT_VBAT_UVLO_MASK 0x10
-#define BD70528_INT_TSD_MASK 0x20
-#define BD70528_INT_RSTIN_MASK 0x40
-
-#define BD70528_INT_BUCK1_FAULT_MASK 0x1
-#define BD70528_INT_BUCK2_FAULT_MASK 0x2
-#define BD70528_INT_BUCK3_FAULT_MASK 0x4
-#define BD70528_INT_LDO1_FAULT_MASK 0x8
-#define BD70528_INT_LDO2_FAULT_MASK 0x10
-#define BD70528_INT_LDO3_FAULT_MASK 0x20
-#define BD70528_INT_LED1_FAULT_MASK 0x40
-#define BD70528_INT_LED2_FAULT_MASK 0x80
-
-#define BD70528_INT_BUCK1_OCP_MASK 0x1
-#define BD70528_INT_BUCK2_OCP_MASK 0x2
-#define BD70528_INT_BUCK3_OCP_MASK 0x4
-#define BD70528_INT_LED1_OCP_MASK 0x8
-#define BD70528_INT_LED2_OCP_MASK 0x10
-#define BD70528_INT_BUCK1_FULLON_MASK 0x20
-#define BD70528_INT_BUCK2_FULLON_MASK 0x40
-
-#define BD70528_INT_SHORTPUSH_MASK 0x1
-#define BD70528_INT_AUTO_WAKEUP_MASK 0x2
-#define BD70528_INT_STATE_CHANGE_MASK 0x10
-
-#define BD70528_INT_BAT_OV_RES_MASK 0x1
-#define BD70528_INT_BAT_OV_DET_MASK 0x2
-#define BD70528_INT_DBAT_DET_MASK 0x4
-#define BD70528_INT_BATTSD_COLD_RES_MASK 0x8
-#define BD70528_INT_BATTSD_COLD_DET_MASK 0x10
-#define BD70528_INT_BATTSD_HOT_RES_MASK 0x20
-#define BD70528_INT_BATTSD_HOT_DET_MASK 0x40
-#define BD70528_INT_CHG_TSD_MASK 0x80
-
-#define BD70528_INT_BAT_RMV_MASK 0x1
-#define BD70528_INT_BAT_DET_MASK 0x2
-#define BD70528_INT_DCIN2_OV_RES_MASK 0x4
-#define BD70528_INT_DCIN2_OV_DET_MASK 0x8
-#define BD70528_INT_DCIN2_RMV_MASK 0x10
-#define BD70528_INT_DCIN2_DET_MASK 0x20
-#define BD70528_INT_DCIN1_RMV_MASK 0x40
-#define BD70528_INT_DCIN1_DET_MASK 0x80
-
-#define BD70528_INT_RTC_ALARM_MASK 0x1
-#define BD70528_INT_ELPS_TIM_MASK 0x2
-
-#define BD70528_INT_GPIO0_MASK 0x1
-#define BD70528_INT_GPIO1_MASK 0x2
-#define BD70528_INT_GPIO2_MASK 0x4
-#define BD70528_INT_GPIO3_MASK 0x8
-
-#define BD70528_INT_BUCK1_DVS_OPFAIL_MASK 0x1
-#define BD70528_INT_BUCK2_DVS_OPFAIL_MASK 0x2
-#define BD70528_INT_BUCK3_DVS_OPFAIL_MASK 0x4
-#define BD70528_INT_LED1_VOLT_OPFAIL_MASK 0x10
-#define BD70528_INT_LED2_VOLT_OPFAIL_MASK 0x20
-
-#define BD70528_DEBOUNCE_MASK 0x3
-
-#define BD70528_DEBOUNCE_DISABLE 0
-#define BD70528_DEBOUNCE_15MS 1
-#define BD70528_DEBOUNCE_30MS 2
-#define BD70528_DEBOUNCE_50MS 3
-
-#define BD70528_GPIO_DRIVE_MASK 0x2
-#define BD70528_GPIO_PUSH_PULL 0x0
-#define BD70528_GPIO_OPEN_DRAIN 0x2
-
-#define BD70528_GPIO_OUT_EN_MASK 0x80
-#define BD70528_GPIO_OUT_ENABLE 0x80
-#define BD70528_GPIO_OUT_DISABLE 0x0
-
-#define BD70528_GPIO_OUT_HI 0x1
-#define BD70528_GPIO_OUT_LO 0x0
-#define BD70528_GPIO_OUT_MASK 0x1
-
-#define BD70528_GPIO_IN_STATE_BASE 1
-
-/* RTC masks to mask out reserved bits */
-
-#define BD70528_MASK_ELAPSED_TIMER_EN 0x1
-/* Mask second, min and hour fields
- * HW would support ALM irq for over 24h
- * (by setting day, month and year too)
- * but as we wish to keep this same as for
- * wake-up we limit ALM to 24H and only
- * unmask sec, min and hour
- */
-#define BD70528_MASK_WAKE_EN 0x1
-
-/* WDT masks */
-#define BD70528_MASK_WDT_EN 0x1
-#define BD70528_MASK_WDT_HOUR 0x1
-#define BD70528_MASK_WDT_MINUTE 0x7f
-#define BD70528_MASK_WDT_SEC 0x7f
-
-#define BD70528_WDT_STATE_BIT 0x1
-#define BD70528_ELAPSED_STATE_BIT 0x2
-#define BD70528_WAKE_STATE_BIT 0x4
-
-/* Charger masks */
-#define BD70528_MASK_CHG_STAT 0x7f
-#define BD70528_MASK_CHG_BAT_TIMER 0x20
-#define BD70528_MASK_CHG_BAT_OVERVOLT 0x10
-#define BD70528_MASK_CHG_BAT_DETECT 0x1
-#define BD70528_MASK_CHG_DCIN1_UVLO 0x1
-#define BD70528_MASK_CHG_DCIN_ILIM 0x3f
-#define BD70528_MASK_CHG_CHG_CURR 0x1f
-#define BD70528_MASK_CHG_TRICKLE_CURR 0x10
-
-/*
- * Note, external battery register is the lonely rider at
- * address 0xc5. See how to stuff that in the regmap
- */
-#define BD70528_MAX_REGISTER 0x94
-
-/* Buck control masks */
-#define BD70528_MASK_RUN_EN 0x4
-#define BD70528_MASK_STBY_EN 0x2
-#define BD70528_MASK_IDLE_EN 0x1
-#define BD70528_MASK_LED1_EN 0x1
-#define BD70528_MASK_LED2_EN 0x10
-
-#define BD70528_MASK_BUCK_VOLT 0xf
-#define BD70528_MASK_LDO_VOLT 0x1f
-#define BD70528_MASK_LED1_VOLT 0x1
-#define BD70528_MASK_LED2_VOLT 0x10
-
-/* Misc irq masks */
-#define BD70528_INT_MASK_SHORT_PUSH 1
-#define BD70528_INT_MASK_AUTO_WAKE 2
-#define BD70528_INT_MASK_POWER_STATE 4
-
-#define BD70528_MASK_BUCK_RAMP 0x10
-#define BD70528_SIFT_BUCK_RAMP 4
-
-#if IS_ENABLED(CONFIG_BD70528_WATCHDOG)
-
-int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state);
-void bd70528_wdt_lock(struct rohm_regmap_dev *data);
-void bd70528_wdt_unlock(struct rohm_regmap_dev *data);
-
-#else /* CONFIG_BD70528_WATCHDOG */
-
-static inline int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable,
- int *old_state)
-{
- return 0;
-}
-
-static inline void bd70528_wdt_lock(struct rohm_regmap_dev *data)
-{
-}
-
-static inline void bd70528_wdt_unlock(struct rohm_regmap_dev *data)
-{
-}
-
-#endif /* CONFIG_BD70528_WATCHDOG */
-
-#endif /* __LINUX_MFD_BD70528_H__ */
diff --git a/include/linux/mfd/rohm-bd71815.h b/include/linux/mfd/rohm-bd71815.h
new file mode 100644
index 000000000000..ec6d9612bebe
--- /dev/null
+++ b/include/linux/mfd/rohm-bd71815.h
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2021 ROHM Semiconductors.
+ *
+ * Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+ *
+ * Copyright 2014 Embest Technology Co. Ltd. Inc.
+ *
+ * Author: yanglsh@embest-tech.com
+ */
+
+#ifndef _MFD_BD71815_H
+#define _MFD_BD71815_H
+
+#include <linux/regmap.h>
+
+enum {
+ BD71815_BUCK1 = 0,
+ BD71815_BUCK2,
+ BD71815_BUCK3,
+ BD71815_BUCK4,
+ BD71815_BUCK5,
+ /* General Purpose */
+ BD71815_LDO1,
+ BD71815_LDO2,
+ BD71815_LDO3,
+ /* LDOs for SD Card and SD Card Interface */
+ BD71815_LDO4,
+ BD71815_LDO5,
+ /* LDO for DDR Reference Voltage */
+ BD71815_LDODVREF,
+ /* LDO for Low-Power State Retention */
+ BD71815_LDOLPSR,
+ BD71815_WLED,
+ BD71815_REGULATOR_CNT,
+};
+
+#define BD71815_SUPPLY_STATE_ENABLED 0x1
+
+enum {
+ BD71815_REG_DEVICE = 0,
+ BD71815_REG_PWRCTRL,
+ BD71815_REG_BUCK1_MODE,
+ BD71815_REG_BUCK2_MODE,
+ BD71815_REG_BUCK3_MODE,
+ BD71815_REG_BUCK4_MODE,
+ BD71815_REG_BUCK5_MODE,
+ BD71815_REG_BUCK1_VOLT_H,
+ BD71815_REG_BUCK1_VOLT_L,
+ BD71815_REG_BUCK2_VOLT_H,
+ BD71815_REG_BUCK2_VOLT_L,
+ BD71815_REG_BUCK3_VOLT,
+ BD71815_REG_BUCK4_VOLT,
+ BD71815_REG_BUCK5_VOLT,
+ BD71815_REG_LED_CTRL,
+ BD71815_REG_LED_DIMM,
+ BD71815_REG_LDO_MODE1,
+ BD71815_REG_LDO_MODE2,
+ BD71815_REG_LDO_MODE3,
+ BD71815_REG_LDO_MODE4,
+ BD71815_REG_LDO1_VOLT,
+ BD71815_REG_LDO2_VOLT,
+ BD71815_REG_LDO3_VOLT,
+ BD71815_REG_LDO4_VOLT,
+ BD71815_REG_LDO5_VOLT_H,
+ BD71815_REG_LDO5_VOLT_L,
+ BD71815_REG_BUCK_PD_DIS,
+ BD71815_REG_LDO_PD_DIS,
+ BD71815_REG_GPO,
+ BD71815_REG_OUT32K,
+ BD71815_REG_SEC,
+ BD71815_REG_MIN,
+ BD71815_REG_HOUR,
+ BD71815_REG_WEEK,
+ BD71815_REG_DAY,
+ BD71815_REG_MONTH,
+ BD71815_REG_YEAR,
+ BD71815_REG_ALM0_SEC,
+
+ BD71815_REG_ALM1_SEC = 0x2C,
+
+ BD71815_REG_ALM0_MASK = 0x33,
+ BD71815_REG_ALM1_MASK,
+ BD71815_REG_ALM2,
+ BD71815_REG_TRIM,
+ BD71815_REG_CONF,
+ BD71815_REG_SYS_INIT,
+ BD71815_REG_CHG_STATE,
+ BD71815_REG_CHG_LAST_STATE,
+ BD71815_REG_BAT_STAT,
+ BD71815_REG_DCIN_STAT,
+ BD71815_REG_VSYS_STAT,
+ BD71815_REG_CHG_STAT,
+ BD71815_REG_CHG_WDT_STAT,
+ BD71815_REG_BAT_TEMP,
+ BD71815_REG_IGNORE_0,
+ BD71815_REG_INHIBIT_0,
+ BD71815_REG_DCIN_CLPS,
+ BD71815_REG_VSYS_REG,
+ BD71815_REG_VSYS_MAX,
+ BD71815_REG_VSYS_MIN,
+ BD71815_REG_CHG_SET1,
+ BD71815_REG_CHG_SET2,
+ BD71815_REG_CHG_WDT_PRE,
+ BD71815_REG_CHG_WDT_FST,
+ BD71815_REG_CHG_IPRE,
+ BD71815_REG_CHG_IFST,
+ BD71815_REG_CHG_IFST_TERM,
+ BD71815_REG_CHG_VPRE,
+ BD71815_REG_CHG_VBAT_1,
+ BD71815_REG_CHG_VBAT_2,
+ BD71815_REG_CHG_VBAT_3,
+ BD71815_REG_CHG_LED_1,
+ BD71815_REG_VF_TH,
+ BD71815_REG_BAT_SET_1,
+ BD71815_REG_BAT_SET_2,
+ BD71815_REG_BAT_SET_3,
+ BD71815_REG_ALM_VBAT_TH_U,
+ BD71815_REG_ALM_VBAT_TH_L,
+ BD71815_REG_ALM_DCIN_TH,
+ BD71815_REG_ALM_VSYS_TH,
+ BD71815_REG_VM_IBAT_U,
+ BD71815_REG_VM_IBAT_L,
+ BD71815_REG_VM_VBAT_U,
+ BD71815_REG_VM_VBAT_L,
+ BD71815_REG_VM_BTMP,
+ BD71815_REG_VM_VTH,
+ BD71815_REG_VM_DCIN_U,
+ BD71815_REG_VM_DCIN_L,
+ BD71815_REG_VM_VSYS,
+ BD71815_REG_VM_VF,
+ BD71815_REG_VM_OCI_PRE_U,
+ BD71815_REG_VM_OCI_PRE_L,
+ BD71815_REG_VM_OCV_PRE_U,
+ BD71815_REG_VM_OCV_PRE_L,
+ BD71815_REG_VM_OCI_PST_U,
+ BD71815_REG_VM_OCI_PST_L,
+ BD71815_REG_VM_OCV_PST_U,
+ BD71815_REG_VM_OCV_PST_L,
+ BD71815_REG_VM_SA_VBAT_U,
+ BD71815_REG_VM_SA_VBAT_L,
+ BD71815_REG_VM_SA_IBAT_U,
+ BD71815_REG_VM_SA_IBAT_L,
+ BD71815_REG_CC_CTRL,
+ BD71815_REG_CC_BATCAP1_TH_U,
+ BD71815_REG_CC_BATCAP1_TH_L,
+ BD71815_REG_CC_BATCAP2_TH_U,
+ BD71815_REG_CC_BATCAP2_TH_L,
+ BD71815_REG_CC_BATCAP3_TH_U,
+ BD71815_REG_CC_BATCAP3_TH_L,
+ BD71815_REG_CC_STAT,
+ BD71815_REG_CC_CCNTD_3,
+ BD71815_REG_CC_CCNTD_2,
+ BD71815_REG_CC_CCNTD_1,
+ BD71815_REG_CC_CCNTD_0,
+ BD71815_REG_CC_CURCD_U,
+ BD71815_REG_CC_CURCD_L,
+ BD71815_REG_VM_OCUR_THR_1,
+ BD71815_REG_VM_OCUR_DUR_1,
+ BD71815_REG_VM_OCUR_THR_2,
+ BD71815_REG_VM_OCUR_DUR_2,
+ BD71815_REG_VM_OCUR_THR_3,
+ BD71815_REG_VM_OCUR_DUR_3,
+ BD71815_REG_VM_OCUR_MON,
+ BD71815_REG_VM_BTMP_OV_THR,
+ BD71815_REG_VM_BTMP_OV_DUR,
+ BD71815_REG_VM_BTMP_LO_THR,
+ BD71815_REG_VM_BTMP_LO_DUR,
+ BD71815_REG_VM_BTMP_MON,
+ BD71815_REG_INT_EN_01,
+
+ BD71815_REG_INT_EN_11 = 0x95,
+ BD71815_REG_INT_EN_12,
+ BD71815_REG_INT_STAT,
+ BD71815_REG_INT_STAT_01,
+ BD71815_REG_INT_STAT_02,
+ BD71815_REG_INT_STAT_03,
+ BD71815_REG_INT_STAT_04,
+ BD71815_REG_INT_STAT_05,
+ BD71815_REG_INT_STAT_06,
+ BD71815_REG_INT_STAT_07,
+ BD71815_REG_INT_STAT_08,
+ BD71815_REG_INT_STAT_09,
+ BD71815_REG_INT_STAT_10,
+ BD71815_REG_INT_STAT_11,
+ BD71815_REG_INT_STAT_12,
+ BD71815_REG_INT_UPDATE,
+
+ BD71815_REG_VM_VSYS_U = 0xC0,
+ BD71815_REG_VM_VSYS_L,
+ BD71815_REG_VM_SA_VSYS_U,
+ BD71815_REG_VM_SA_VSYS_L,
+
+ BD71815_REG_VM_SA_IBAT_MIN_U = 0xD0,
+ BD71815_REG_VM_SA_IBAT_MIN_L,
+ BD71815_REG_VM_SA_IBAT_MAX_U,
+ BD71815_REG_VM_SA_IBAT_MAX_L,
+ BD71815_REG_VM_SA_VBAT_MIN_U,
+ BD71815_REG_VM_SA_VBAT_MIN_L,
+ BD71815_REG_VM_SA_VBAT_MAX_U,
+ BD71815_REG_VM_SA_VBAT_MAX_L,
+ BD71815_REG_VM_SA_VSYS_MIN_U,
+ BD71815_REG_VM_SA_VSYS_MIN_L,
+ BD71815_REG_VM_SA_VSYS_MAX_U,
+ BD71815_REG_VM_SA_VSYS_MAX_L,
+ BD71815_REG_VM_SA_MINMAX_CLR,
+
+ BD71815_REG_REX_CCNTD_3 = 0xE0,
+ BD71815_REG_REX_CCNTD_2,
+ BD71815_REG_REX_CCNTD_1,
+ BD71815_REG_REX_CCNTD_0,
+ BD71815_REG_REX_SA_VBAT_U,
+ BD71815_REG_REX_SA_VBAT_L,
+ BD71815_REG_REX_CTRL_1,
+ BD71815_REG_REX_CTRL_2,
+ BD71815_REG_FULL_CCNTD_3,
+ BD71815_REG_FULL_CCNTD_2,
+ BD71815_REG_FULL_CCNTD_1,
+ BD71815_REG_FULL_CCNTD_0,
+ BD71815_REG_FULL_CTRL,
+
+ BD71815_REG_CCNTD_CHG_3 = 0xF0,
+ BD71815_REG_CCNTD_CHG_2,
+
+ BD71815_REG_TEST_MODE = 0xFE,
+ BD71815_MAX_REGISTER,
+};
+
+/* BD71815_REG_BUCK1_MODE bits */
+#define BD71815_BUCK_RAMPRATE_MASK 0xC0
+#define BD71815_BUCK_RAMPRATE_10P00MV 0x0
+#define BD71815_BUCK_RAMPRATE_5P00MV 0x01
+#define BD71815_BUCK_RAMPRATE_2P50MV 0x02
+#define BD71815_BUCK_RAMPRATE_1P25MV 0x03
+
+#define BD71815_BUCK_PWM_FIXED BIT(4)
+#define BD71815_BUCK_SNVS_ON BIT(3)
+#define BD71815_BUCK_RUN_ON BIT(2)
+#define BD71815_BUCK_LPSR_ON BIT(1)
+#define BD71815_BUCK_SUSP_ON BIT(0)
+
+/* BD71815_REG_BUCK1_VOLT_H bits */
+#define BD71815_BUCK_DVSSEL BIT(7)
+#define BD71815_BUCK_STBY_DVS BIT(6)
+#define BD71815_VOLT_MASK 0x3F
+#define BD71815_BUCK1_H_DEFAULT 0x14
+#define BD71815_BUCK1_L_DEFAULT 0x14
+
+/* BD71815_REG_BUCK2_VOLT_H bits */
+#define BD71815_BUCK2_H_DEFAULT 0x14
+#define BD71815_BUCK2_L_DEFAULT 0x14
+
+/* WLED output */
+/* current register mask */
+#define LED_DIMM_MASK 0x3f
+/* LED enable bits at LED_CTRL reg */
+#define LED_CHGDONE_EN BIT(4)
+#define LED_RUN_ON BIT(2)
+#define LED_LPSR_ON BIT(1)
+#define LED_SUSP_ON BIT(0)
+
+/* BD71815_REG_LDO1_CTRL bits */
+#define LDO1_EN BIT(0)
+#define LDO2_EN BIT(1)
+#define LDO3_EN BIT(2)
+#define DVREF_EN BIT(3)
+#define VOSNVS_SW_EN BIT(4)
+
+/* LDO_MODE1_register */
+#define LDO1_SNVS_ON BIT(7)
+#define LDO1_RUN_ON BIT(6)
+#define LDO1_LPSR_ON BIT(5)
+#define LDO1_SUSP_ON BIT(4)
+/* set => register control, unset => GPIO control */
+#define LDO4_MODE_MASK BIT(3)
+#define LDO4_MODE_I2C BIT(3)
+#define LDO4_MODE_GPIO 0
+/* set => register control, unset => start when DCIN connected */
+#define LDO3_MODE_MASK BIT(2)
+#define LDO3_MODE_I2C BIT(2)
+#define LDO3_MODE_DCIN 0
+
+/* LDO_MODE2 register */
+#define LDO3_SNVS_ON BIT(7)
+#define LDO3_RUN_ON BIT(6)
+#define LDO3_LPSR_ON BIT(5)
+#define LDO3_SUSP_ON BIT(4)
+#define LDO2_SNVS_ON BIT(3)
+#define LDO2_RUN_ON BIT(2)
+#define LDO2_LPSR_ON BIT(1)
+#define LDO2_SUSP_ON BIT(0)
+
+
+/* LDO_MODE3 register */
+#define LDO5_SNVS_ON BIT(7)
+#define LDO5_RUN_ON BIT(6)
+#define LDO5_LPSR_ON BIT(5)
+#define LDO5_SUSP_ON BIT(4)
+#define LDO4_SNVS_ON BIT(3)
+#define LDO4_RUN_ON BIT(2)
+#define LDO4_LPSR_ON BIT(1)
+#define LDO4_SUSP_ON BIT(0)
+
+/* LDO_MODE4 register */
+#define DVREF_SNVS_ON BIT(7)
+#define DVREF_RUN_ON BIT(6)
+#define DVREF_LPSR_ON BIT(5)
+#define DVREF_SUSP_ON BIT(4)
+#define LDO_LPSR_SNVS_ON BIT(3)
+#define LDO_LPSR_RUN_ON BIT(2)
+#define LDO_LPSR_LPSR_ON BIT(1)
+#define LDO_LPSR_SUSP_ON BIT(0)
+
+/* BD71815_REG_OUT32K bits */
+#define OUT32K_EN BIT(0)
+#define OUT32K_MODE BIT(1)
+#define OUT32K_MODE_CMOS BIT(1)
+#define OUT32K_MODE_OPEN_DRAIN 0
+
+/* BD71815_REG_BAT_STAT bits */
+#define BAT_DET BIT(5)
+#define BAT_DET_OFFSET 5
+#define BAT_DET_DONE BIT(4)
+#define VBAT_OV BIT(3)
+#define DBAT_DET BIT(0)
+
+/* BD71815_REG_VBUS_STAT bits */
+#define VBUS_DET BIT(0)
+
+#define BD71815_REG_RTC_START BD71815_REG_SEC
+#define BD71815_REG_RTC_ALM_START BD71815_REG_ALM0_SEC
+
+/* BD71815_REG_ALM0_MASK bits */
+#define A0_ONESEC BIT(7)
+
+/* BD71815_REG_INT_EN_00 bits */
+#define ALMALE BIT(0)
+
+/* BD71815_REG_INT_STAT_03 bits */
+#define DCIN_MON_DET BIT(1)
+#define DCIN_MON_RES BIT(0)
+#define POWERON_LONG BIT(2)
+#define POWERON_MID BIT(3)
+#define POWERON_SHORT BIT(4)
+#define POWERON_PRESS BIT(5)
+
+/* BD71805_REG_INT_STAT_08 bits */
+#define VBAT_MON_DET BIT(1)
+#define VBAT_MON_RES BIT(0)
+
+/* BD71805_REG_INT_STAT_11 bits */
+#define INT_STAT_11_VF_DET BIT(7)
+#define INT_STAT_11_VF_RES BIT(6)
+#define INT_STAT_11_VF125_DET BIT(5)
+#define INT_STAT_11_VF125_RES BIT(4)
+#define INT_STAT_11_OVTMP_DET BIT(3)
+#define INT_STAT_11_OVTMP_RES BIT(2)
+#define INT_STAT_11_LOTMP_DET BIT(1)
+#define INT_STAT_11_LOTMP_RES BIT(0)
+
+#define VBAT_MON_DET BIT(1)
+#define VBAT_MON_RES BIT(0)
+
+/* BD71815_REG_PWRCTRL bits */
+#define RESTARTEN BIT(0)
+
+/* BD71815_REG_GPO bits */
+#define READY_FORCE_LOW BIT(2)
+#define BD71815_GPIO_DRIVE_MASK BIT(4)
+#define BD71815_GPIO_OPEN_DRAIN 0
+#define BD71815_GPIO_CMOS BIT(4)
+
+/* BD71815 interrupt masks */
+enum {
+ BD71815_INT_EN_01_BUCKAST_MASK = 0x0F,
+ BD71815_INT_EN_02_DCINAST_MASK = 0x3E,
+ BD71815_INT_EN_03_DCINAST_MASK = 0x3F,
+ BD71815_INT_EN_04_VSYSAST_MASK = 0xCF,
+ BD71815_INT_EN_05_CHGAST_MASK = 0xFC,
+ BD71815_INT_EN_06_BATAST_MASK = 0xF3,
+ BD71815_INT_EN_07_BMONAST_MASK = 0xFE,
+ BD71815_INT_EN_08_BMONAST_MASK = 0x03,
+ BD71815_INT_EN_09_BMONAST_MASK = 0x07,
+ BD71815_INT_EN_10_BMONAST_MASK = 0x3F,
+ BD71815_INT_EN_11_TMPAST_MASK = 0xFF,
+ BD71815_INT_EN_12_ALMAST_MASK = 0x07,
+};
+/* BD71815 interrupt irqs */
+enum {
+ /* BUCK reg interrupts */
+ BD71815_INT_BUCK1_OCP,
+ BD71815_INT_BUCK2_OCP,
+ BD71815_INT_BUCK3_OCP,
+ BD71815_INT_BUCK4_OCP,
+ BD71815_INT_BUCK5_OCP,
+ BD71815_INT_LED_OVP,
+ BD71815_INT_LED_OCP,
+ BD71815_INT_LED_SCP,
+ /* DCIN1 interrupts */
+ BD71815_INT_DCIN_RMV,
+ BD71815_INT_CLPS_OUT,
+ BD71815_INT_CLPS_IN,
+ BD71815_INT_DCIN_OVP_RES,
+ BD71815_INT_DCIN_OVP_DET,
+ /* DCIN2 interrupts */
+ BD71815_INT_DCIN_MON_RES,
+ BD71815_INT_DCIN_MON_DET,
+ BD71815_INT_WDOG,
+ /* Vsys INT_STAT_04 */
+ BD71815_INT_VSYS_UV_RES,
+ BD71815_INT_VSYS_UV_DET,
+ BD71815_INT_VSYS_LOW_RES,
+ BD71815_INT_VSYS_LOW_DET,
+ BD71815_INT_VSYS_MON_RES,
+ BD71815_INT_VSYS_MON_DET,
+ /* Charger INT_STAT_05 */
+ BD71815_INT_CHG_WDG_TEMP,
+ BD71815_INT_CHG_WDG_TIME,
+ BD71815_INT_CHG_RECHARGE_RES,
+ BD71815_INT_CHG_RECHARGE_DET,
+ BD71815_INT_CHG_RANGED_TEMP_TRANSITION,
+ BD71815_INT_CHG_STATE_TRANSITION,
+ /* Battery INT_STAT_06 */
+ BD71815_INT_BAT_TEMP_NORMAL,
+ BD71815_INT_BAT_TEMP_ERANGE,
+ BD71815_INT_BAT_REMOVED,
+ BD71815_INT_BAT_DETECTED,
+ BD71815_INT_THERM_REMOVED,
+ BD71815_INT_THERM_DETECTED,
+ /* Battery Mon 1 INT_STAT_07 */
+ BD71815_INT_BAT_DEAD,
+ BD71815_INT_BAT_SHORTC_RES,
+ BD71815_INT_BAT_SHORTC_DET,
+ BD71815_INT_BAT_LOW_VOLT_RES,
+ BD71815_INT_BAT_LOW_VOLT_DET,
+ BD71815_INT_BAT_OVER_VOLT_RES,
+ BD71815_INT_BAT_OVER_VOLT_DET,
+ /* Battery Mon 2 INT_STAT_08 */
+ BD71815_INT_BAT_MON_RES,
+ BD71815_INT_BAT_MON_DET,
+ /* Battery Mon 3 (Coulomb counter) INT_STAT_09 */
+ BD71815_INT_BAT_CC_MON1,
+ BD71815_INT_BAT_CC_MON2,
+ BD71815_INT_BAT_CC_MON3,
+ /* Battery Mon 4 INT_STAT_10 */
+ BD71815_INT_BAT_OVER_CURR_1_RES,
+ BD71815_INT_BAT_OVER_CURR_1_DET,
+ BD71815_INT_BAT_OVER_CURR_2_RES,
+ BD71815_INT_BAT_OVER_CURR_2_DET,
+ BD71815_INT_BAT_OVER_CURR_3_RES,
+ BD71815_INT_BAT_OVER_CURR_3_DET,
+ /* Temperature INT_STAT_11 */
+ BD71815_INT_TEMP_BAT_LOW_RES,
+ BD71815_INT_TEMP_BAT_LOW_DET,
+ BD71815_INT_TEMP_BAT_HI_RES,
+ BD71815_INT_TEMP_BAT_HI_DET,
+ BD71815_INT_TEMP_CHIP_OVER_125_RES,
+ BD71815_INT_TEMP_CHIP_OVER_125_DET,
+ BD71815_INT_TEMP_CHIP_OVER_VF_RES,
+ BD71815_INT_TEMP_CHIP_OVER_VF_DET,
+ /* RTC Alarm INT_STAT_12 */
+ BD71815_INT_RTC0,
+ BD71815_INT_RTC1,
+ BD71815_INT_RTC2,
+};
+
+#define BD71815_INT_BUCK1_OCP_MASK BIT(0)
+#define BD71815_INT_BUCK2_OCP_MASK BIT(1)
+#define BD71815_INT_BUCK3_OCP_MASK BIT(2)
+#define BD71815_INT_BUCK4_OCP_MASK BIT(3)
+#define BD71815_INT_BUCK5_OCP_MASK BIT(4)
+#define BD71815_INT_LED_OVP_MASK BIT(5)
+#define BD71815_INT_LED_OCP_MASK BIT(6)
+#define BD71815_INT_LED_SCP_MASK BIT(7)
+
+#define BD71815_INT_DCIN_RMV_MASK BIT(1)
+#define BD71815_INT_CLPS_OUT_MASK BIT(2)
+#define BD71815_INT_CLPS_IN_MASK BIT(3)
+#define BD71815_INT_DCIN_OVP_RES_MASK BIT(4)
+#define BD71815_INT_DCIN_OVP_DET_MASK BIT(5)
+
+#define BD71815_INT_DCIN_MON_RES_MASK BIT(0)
+#define BD71815_INT_DCIN_MON_DET_MASK BIT(1)
+#define BD71815_INT_WDOG_MASK BIT(6)
+
+#define BD71815_INT_VSYS_UV_RES_MASK BIT(0)
+#define BD71815_INT_VSYS_UV_DET_MASK BIT(1)
+#define BD71815_INT_VSYS_LOW_RES_MASK BIT(2)
+#define BD71815_INT_VSYS_LOW_DET_MASK BIT(3)
+#define BD71815_INT_VSYS_MON_RES_MASK BIT(6)
+#define BD71815_INT_VSYS_MON_DET_MASK BIT(7)
+
+#define BD71815_INT_CHG_WDG_TEMP_MASK BIT(2)
+#define BD71815_INT_CHG_WDG_TIME_MASK BIT(3)
+#define BD71815_INT_CHG_RECHARGE_RES_MASK BIT(4)
+#define BD71815_INT_CHG_RECHARGE_DET_MASK BIT(5)
+#define BD71815_INT_CHG_RANGED_TEMP_TRANSITION_MASK BIT(6)
+#define BD71815_INT_CHG_STATE_TRANSITION_MASK BIT(7)
+
+#define BD71815_INT_BAT_TEMP_NORMAL_MASK BIT(0)
+#define BD71815_INT_BAT_TEMP_ERANGE_MASK BIT(1)
+#define BD71815_INT_BAT_REMOVED_MASK BIT(4)
+#define BD71815_INT_BAT_DETECTED_MASK BIT(5)
+#define BD71815_INT_THERM_REMOVED_MASK BIT(6)
+#define BD71815_INT_THERM_DETECTED_MASK BIT(7)
+
+#define BD71815_INT_BAT_DEAD_MASK BIT(1)
+#define BD71815_INT_BAT_SHORTC_RES_MASK BIT(2)
+#define BD71815_INT_BAT_SHORTC_DET_MASK BIT(3)
+#define BD71815_INT_BAT_LOW_VOLT_RES_MASK BIT(4)
+#define BD71815_INT_BAT_LOW_VOLT_DET_MASK BIT(5)
+#define BD71815_INT_BAT_OVER_VOLT_RES_MASK BIT(6)
+#define BD71815_INT_BAT_OVER_VOLT_DET_MASK BIT(7)
+
+#define BD71815_INT_BAT_MON_RES_MASK BIT(0)
+#define BD71815_INT_BAT_MON_DET_MASK BIT(1)
+
+#define BD71815_INT_BAT_CC_MON1_MASK BIT(0)
+#define BD71815_INT_BAT_CC_MON2_MASK BIT(1)
+#define BD71815_INT_BAT_CC_MON3_MASK BIT(2)
+
+#define BD71815_INT_BAT_OVER_CURR_1_RES_MASK BIT(0)
+#define BD71815_INT_BAT_OVER_CURR_1_DET_MASK BIT(1)
+#define BD71815_INT_BAT_OVER_CURR_2_RES_MASK BIT(2)
+#define BD71815_INT_BAT_OVER_CURR_2_DET_MASK BIT(3)
+#define BD71815_INT_BAT_OVER_CURR_3_RES_MASK BIT(4)
+#define BD71815_INT_BAT_OVER_CURR_3_DET_MASK BIT(5)
+
+#define BD71815_INT_TEMP_BAT_LOW_RES_MASK BIT(0)
+#define BD71815_INT_TEMP_BAT_LOW_DET_MASK BIT(1)
+#define BD71815_INT_TEMP_BAT_HI_RES_MASK BIT(2)
+#define BD71815_INT_TEMP_BAT_HI_DET_MASK BIT(3)
+#define BD71815_INT_TEMP_CHIP_OVER_125_RES_MASK BIT(4)
+#define BD71815_INT_TEMP_CHIP_OVER_125_DET_MASK BIT(5)
+#define BD71815_INT_TEMP_CHIP_OVER_VF_RES_MASK BIT(6)
+#define BD71815_INT_TEMP_CHIP_OVER_VF_DET_MASK BIT(7)
+
+#define BD71815_INT_RTC0_MASK BIT(0)
+#define BD71815_INT_RTC1_MASK BIT(1)
+#define BD71815_INT_RTC2_MASK BIT(2)
+
+/* BD71815_REG_CC_CTRL bits */
+#define CCNTRST 0x80
+#define CCNTENB 0x40
+#define CCCALIB 0x20
+
+/* BD71815_REG_CC_CURCD */
+#define CURDIR_Discharging 0x8000
+
+/* BD71815_REG_VM_SA_IBAT */
+#define IBAT_SA_DIR_Discharging 0x8000
+
+/* BD71815_REG_REX_CTRL_1 bits */
+#define REX_CLR BIT(4)
+
+/* BD71815_REG_REX_CTRL_1 bits */
+#define REX_PMU_STATE_MASK BIT(2)
+
+/* BD71815_REG_LED_CTRL bits */
+#define CHGDONE_LED_EN BIT(4)
+
+#endif /* __LINUX_MFD_BD71815_H */
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
index 017a4c01cb31..3b5f3a7db4bd 100644
--- a/include/linux/mfd/rohm-bd71828.h
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -26,11 +26,11 @@ enum {
BD71828_REGULATOR_AMOUNT,
};
-#define BD71828_BUCK1267_VOLTS 0xEF
-#define BD71828_BUCK3_VOLTS 0x10
-#define BD71828_BUCK4_VOLTS 0x20
-#define BD71828_BUCK5_VOLTS 0x10
-#define BD71828_LDO_VOLTS 0x32
+#define BD71828_BUCK1267_VOLTS 0x100
+#define BD71828_BUCK3_VOLTS 0x20
+#define BD71828_BUCK4_VOLTS 0x40
+#define BD71828_BUCK5_VOLTS 0x20
+#define BD71828_LDO_VOLTS 0x40
/* LDO6 is fixed 1.8V voltage */
#define BD71828_LDO_6_VOLTAGE 1800000
@@ -151,6 +151,9 @@ enum {
#define BD71828_REG_GPIO_CTRL3 0x49
#define BD71828_REG_IO_STAT 0xed
+/* clk */
+#define BD71828_REG_OUT32K 0x4b
+
/* RTC */
#define BD71828_REG_RTC_SEC 0x4c
#define BD71828_REG_RTC_MINUTE 0x4d
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index bee2474a8f9f..df2918198d37 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -310,17 +310,4 @@ enum {
BD718XX_PWRBTN_LONG_PRESS_15S
};
-struct bd718xx {
- /*
- * Please keep this as the first member here as some
- * drivers (clk) supporting more than one chip may only know this
- * generic struct 'struct rohm_regmap_dev' and assume it is
- * the first chunk of parent device's private data.
- */
- struct rohm_regmap_dev chip;
-
- int chip_irq;
- struct regmap_irq_chip_data *irq_data;
-};
-
#endif /* __LINUX_MFD_BD718XX_H__ */
diff --git a/include/linux/mfd/rohm-bd957x.h b/include/linux/mfd/rohm-bd957x.h
new file mode 100644
index 000000000000..acc920b64f75
--- /dev/null
+++ b/include/linux/mfd/rohm-bd957x.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2021 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD957X_H__
+#define __LINUX_MFD_BD957X_H__
+
+enum {
+ BD957X_VD50,
+ BD957X_VD18,
+ BD957X_VDDDR,
+ BD957X_VD10,
+ BD957X_VOUTL1,
+ BD957X_VOUTS1,
+};
+
+/*
+ * The BD9576 has own IRQ 'blocks' for:
+ * - I2C/thermal,
+ * - Over voltage protection
+ * - Short-circuit protection
+ * - Over current protection
+ * - Over voltage detection
+ * - Under voltage detection
+ * - Under voltage protection
+ * - 'system interrupt'.
+ *
+ * Each of the blocks have a status register giving more accurate IRQ source
+ * information - for example which of the regulators have over-voltage.
+ *
+ * On top of this, there is "main IRQ" status register where each bit indicates
+ * which of sub-blocks have active IRQs. Fine. That would fit regmap-irq main
+ * status handling. Except that:
+ * - Only some sub-IRQs can be masked.
+ * - The IRQ informs us about fault-condition, not when fault state changes.
+ * The IRQ line it is kept asserted until the detected condition is acked
+ * AND cleared in HW. This is annoying for IRQs like the one informing high
+ * temperature because if IRQ is not disabled it keeps the CPU in IRQ
+ * handling loop.
+ *
+ * For now we do just use the main-IRQ register as source for our IRQ
+ * information and bind the regmap-irq to this. We leave fine-grained sub-IRQ
+ * register handling to handlers in sub-devices. The regulator driver shall
+ * read which regulators are source for problem - or if the detected error is
+ * regulator temperature error. The sub-drivers do also handle masking of "sub-
+ * IRQs" if this is supported/needed.
+ *
+ * To overcome the problem with HW keeping IRQ asserted we do call
+ * disable_irq_nosync() from sub-device handler and add a delayed work to
+ * re-enable IRQ roughly 1 second later. This should keep our CPU out of
+ * busy-loop.
+ */
+#define IRQS_SILENT_MS 1000
+
+enum {
+ BD9576_INT_THERM,
+ BD9576_INT_OVP,
+ BD9576_INT_SCP,
+ BD9576_INT_OCP,
+ BD9576_INT_OVD,
+ BD9576_INT_UVD,
+ BD9576_INT_UVP,
+ BD9576_INT_SYS,
+};
+
+#define BD957X_REG_SMRB_ASSERT 0x15
+#define BD957X_REG_PMIC_INTERNAL_STAT 0x20
+#define BD957X_REG_INT_THERM_STAT 0x23
+#define BD957X_REG_INT_THERM_MASK 0x24
+#define BD957X_REG_INT_OVP_STAT 0x25
+#define BD957X_REG_INT_SCP_STAT 0x26
+#define BD957X_REG_INT_OCP_STAT 0x27
+#define BD957X_REG_INT_OVD_STAT 0x28
+#define BD957X_REG_INT_UVD_STAT 0x29
+#define BD957X_REG_INT_UVP_STAT 0x2a
+#define BD957X_REG_INT_SYS_STAT 0x2b
+#define BD957X_REG_INT_SYS_MASK 0x2c
+#define BD957X_REG_INT_MAIN_STAT 0x30
+#define BD957X_REG_INT_MAIN_MASK 0x31
+
+#define UVD_IRQ_VALID_MASK 0x6F
+#define OVD_IRQ_VALID_MASK 0x2F
+
+#define BD957X_MASK_INT_MAIN_THERM BIT(0)
+#define BD957X_MASK_INT_MAIN_OVP BIT(1)
+#define BD957X_MASK_INT_MAIN_SCP BIT(2)
+#define BD957X_MASK_INT_MAIN_OCP BIT(3)
+#define BD957X_MASK_INT_MAIN_OVD BIT(4)
+#define BD957X_MASK_INT_MAIN_UVD BIT(5)
+#define BD957X_MASK_INT_MAIN_UVP BIT(6)
+#define BD957X_MASK_INT_MAIN_SYS BIT(7)
+#define BD957X_MASK_INT_ALL 0xff
+
+#define BD957X_REG_WDT_CONF 0x16
+
+#define BD957X_REG_POW_TRIGGER1 0x41
+#define BD957X_REG_POW_TRIGGER2 0x42
+#define BD957X_REG_POW_TRIGGER3 0x43
+#define BD957X_REG_POW_TRIGGER4 0x44
+#define BD957X_REG_POW_TRIGGERL1 0x45
+#define BD957X_REG_POW_TRIGGERS1 0x46
+
+#define BD957X_REGULATOR_EN_MASK 0xff
+#define BD957X_REGULATOR_DIS_VAL 0xff
+
+#define BD957X_VSEL_REG_MASK 0xff
+
+#define BD957X_MASK_VOUT1_TUNE 0x87
+#define BD957X_MASK_VOUT2_TUNE 0x87
+#define BD957X_MASK_VOUT3_TUNE 0x1f
+#define BD957X_MASK_VOUT4_TUNE 0x1f
+#define BD957X_MASK_VOUTL1_TUNE 0x87
+
+#define BD957X_REG_VOUT1_TUNE 0x50
+#define BD957X_REG_VOUT2_TUNE 0x53
+#define BD957X_REG_VOUT3_TUNE 0x56
+#define BD957X_REG_VOUT4_TUNE 0x59
+#define BD957X_REG_VOUTL1_TUNE 0x5c
+
+#define BD9576_REG_VOUT1_OVD 0x51
+#define BD9576_REG_VOUT1_UVD 0x52
+#define BD9576_REG_VOUT2_OVD 0x54
+#define BD9576_REG_VOUT2_UVD 0x55
+#define BD9576_REG_VOUT3_OVD 0x57
+#define BD9576_REG_VOUT3_UVD 0x58
+#define BD9576_REG_VOUT4_OVD 0x5a
+#define BD9576_REG_VOUT4_UVD 0x5b
+#define BD9576_REG_VOUTL1_OVD 0x5d
+#define BD9576_REG_VOUTL1_UVD 0x5e
+
+#define BD9576_MASK_XVD 0x7f
+
+#define BD9576_REG_VOUT1S_OCW 0x5f
+#define BD9576_REG_VOUT1S_OCP 0x60
+
+#define BD9576_MASK_VOUT1S_OCW 0x3f
+#define BD9576_MASK_VOUT1S_OCP 0x3f
+
+#define BD957X_MAX_REGISTER 0x61
+
+#endif
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index 4283b5b33e04..4eeb22876bad 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -8,10 +8,14 @@
#include <linux/regulator/driver.h>
enum rohm_chip_type {
- ROHM_CHIP_TYPE_BD71837 = 0,
- ROHM_CHIP_TYPE_BD71847,
- ROHM_CHIP_TYPE_BD70528,
+ ROHM_CHIP_TYPE_BD9571,
+ ROHM_CHIP_TYPE_BD9573,
+ ROHM_CHIP_TYPE_BD9574,
+ ROHM_CHIP_TYPE_BD9576,
+ ROHM_CHIP_TYPE_BD71815,
ROHM_CHIP_TYPE_BD71828,
+ ROHM_CHIP_TYPE_BD71837,
+ ROHM_CHIP_TYPE_BD71847,
ROHM_CHIP_TYPE_AMOUNT
};
@@ -20,14 +24,13 @@ struct rohm_regmap_dev {
struct regmap *regmap;
};
-enum {
- ROHM_DVS_LEVEL_UNKNOWN,
- ROHM_DVS_LEVEL_RUN,
- ROHM_DVS_LEVEL_IDLE,
- ROHM_DVS_LEVEL_SUSPEND,
- ROHM_DVS_LEVEL_LPSR,
- ROHM_DVS_LEVEL_MAX = ROHM_DVS_LEVEL_LPSR,
-};
+#define ROHM_DVS_LEVEL_RUN BIT(0)
+#define ROHM_DVS_LEVEL_IDLE BIT(1)
+#define ROHM_DVS_LEVEL_SUSPEND BIT(2)
+#define ROHM_DVS_LEVEL_LPSR BIT(3)
+#define ROHM_DVS_LEVEL_SNVS BIT(4)
+#define ROHM_DVS_LEVEL_VALID_AMOUNT 5
+#define ROHM_DVS_LEVEL_UNKNOWN 0
/**
* struct rohm_dvs_config - dynamic voltage scaling register descriptions
@@ -65,6 +68,9 @@ struct rohm_dvs_config {
unsigned int lpsr_reg;
unsigned int lpsr_mask;
unsigned int lpsr_on_mask;
+ unsigned int snvs_reg;
+ unsigned int snvs_mask;
+ unsigned int snvs_on_mask;
};
#if IS_ENABLED(CONFIG_REGULATOR_ROHM)
@@ -73,14 +79,8 @@ int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
const struct regulator_desc *desc,
struct regmap *regmap);
-#else
-static inline int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
- struct device_node *np,
- const struct regulator_desc *desc,
- struct regmap *regmap)
-{
- return 0;
-}
+int rohm_regulator_set_voltage_sel_restricted(struct regulator_dev *rdev,
+ unsigned int sel);
#endif
#endif
diff --git a/include/linux/mfd/rsmu.h b/include/linux/mfd/rsmu.h
new file mode 100644
index 000000000000..6870de608233
--- /dev/null
+++ b/include/linux/mfd/rsmu.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core interface for Renesas Synchronization Management Unit (SMU) devices.
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#ifndef __LINUX_MFD_RSMU_H
+#define __LINUX_MFD_RSMU_H
+
+/* The supported devices are ClockMatrix, Sabre and SnowLotus */
+enum rsmu_type {
+ RSMU_CM = 0x34000,
+ RSMU_SABRE = 0x33810,
+ RSMU_SL = 0x19850,
+};
+
+/**
+ *
+ * struct rsmu_ddata - device data structure for sub devices.
+ *
+ * @dev: i2c/spi device.
+ * @regmap: i2c/spi bus access.
+ * @lock: mutex used by sub devices to make sure a series of
+ * bus access requests are not interrupted.
+ * @type: RSMU device type.
+ * @page: i2c/spi bus driver internal use only.
+ */
+struct rsmu_ddata {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ enum rsmu_type type;
+ u16 page;
+};
+#endif /* __LINUX_MFD_RSMU_H */
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
index f812105c538c..2d1895c3efbf 100644
--- a/include/linux/mfd/rt5033-private.h
+++ b/include/linux/mfd/rt5033-private.h
@@ -91,14 +91,14 @@ enum rt5033_reg {
#define RT5033_RT_HZ_MASK 0x01
/* RT5033 control register */
-#define RT5033_CTRL_FCCM_BUCK_MASK 0x00
-#define RT5033_CTRL_BUCKOMS_MASK 0x01
-#define RT5033_CTRL_LDOOMS_MASK 0x02
-#define RT5033_CTRL_SLDOOMS_MASK 0x03
-#define RT5033_CTRL_EN_BUCK_MASK 0x04
-#define RT5033_CTRL_EN_LDO_MASK 0x05
-#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06
-#define RT5033_CTRL_LDO_SLEEP_MASK 0x07
+#define RT5033_CTRL_FCCM_BUCK_MASK BIT(0)
+#define RT5033_CTRL_BUCKOMS_MASK BIT(1)
+#define RT5033_CTRL_LDOOMS_MASK BIT(2)
+#define RT5033_CTRL_SLDOOMS_MASK BIT(3)
+#define RT5033_CTRL_EN_BUCK_MASK BIT(4)
+#define RT5033_CTRL_EN_LDO_MASK BIT(5)
+#define RT5033_CTRL_EN_SAFE_LDO_MASK BIT(6)
+#define RT5033_CTRL_LDO_SLEEP_MASK BIT(7)
/* RT5033 BUCK control register */
#define RT5033_BUCK_CTRL_MASK 0x1f
@@ -247,11 +247,11 @@ enum rt5033_fuel_reg {
#define RT5033_FUEL_BAT_PRESENT 0x02
/* RT5033 PMIC interrupts */
-#define RT5033_PMIC_IRQ_BUCKOCP 2
-#define RT5033_PMIC_IRQ_BUCKLV 3
-#define RT5033_PMIC_IRQ_SAFELDOLV 4
-#define RT5033_PMIC_IRQ_LDOLV 5
-#define RT5033_PMIC_IRQ_OT 6
-#define RT5033_PMIC_IRQ_VDDA_UV 7
+#define RT5033_PMIC_IRQ_BUCKOCP BIT(2)
+#define RT5033_PMIC_IRQ_BUCKLV BIT(3)
+#define RT5033_PMIC_IRQ_SAFELDOLV BIT(4)
+#define RT5033_PMIC_IRQ_LDOLV BIT(5)
+#define RT5033_PMIC_IRQ_OT BIT(6)
+#define RT5033_PMIC_IRQ_VDDA_UV BIT(7)
#endif /* __RT5033_PRIVATE_H__ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index f1631a39acfc..f92fe090473d 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -67,11 +67,8 @@ struct sec_pmic_dev {
struct i2c_client *i2c;
unsigned long device_type;
- int irq_base;
int irq;
struct regmap_irq_chip_data *irq_data;
-
- bool wakeup;
};
int sec_irq_init(struct sec_pmic_dev *sec_pmic);
@@ -81,15 +78,8 @@ int sec_irq_resume(struct sec_pmic_dev *sec_pmic);
struct sec_platform_data {
struct sec_regulator_data *regulators;
struct sec_opmode_data *opmode;
- int device_type;
int num_regulators;
- int irq_base;
- int (*cfg_pmic_irq)(void);
-
- bool wakeup;
- bool buck_voltage_lock;
-
int buck_gpios[3];
int buck_ds[3];
unsigned int buck2_voltage[8];
@@ -99,35 +89,12 @@ struct sec_platform_data {
unsigned int buck4_voltage[8];
bool buck4_gpiodvs;
- int buck_set1;
- int buck_set2;
- int buck_set3;
- int buck2_enable;
- int buck3_enable;
- int buck4_enable;
int buck_default_idx;
- int buck2_default_idx;
- int buck3_default_idx;
- int buck4_default_idx;
-
int buck_ramp_delay;
- int buck2_ramp_delay;
- int buck34_ramp_delay;
- int buck5_ramp_delay;
- int buck16_ramp_delay;
- int buck7810_ramp_delay;
- int buck9_ramp_delay;
- int buck24_ramp_delay;
- int buck3_ramp_delay;
- int buck7_ramp_delay;
- int buck8910_ramp_delay;
-
- bool buck1_ramp_enable;
bool buck2_ramp_enable;
bool buck3_ramp_enable;
bool buck4_ramp_enable;
- bool buck6_ramp_enable;
int buck2_init;
int buck3_init;
diff --git a/include/linux/mfd/sc27xx-pmic.h b/include/linux/mfd/sc27xx-pmic.h
new file mode 100644
index 000000000000..57e45c0b3ae2
--- /dev/null
+++ b/include/linux/mfd/sc27xx-pmic.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MFD_SC27XX_PMIC_H
+#define __LINUX_MFD_SC27XX_PMIC_H
+
+extern enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev);
+
+#endif /* __LINUX_MFD_SC27XX_PMIC_H */
diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h
index 4708c2b8512a..dd95c37ca134 100644
--- a/include/linux/mfd/si476x-core.h
+++ b/include/linux/mfd/si476x-core.h
@@ -57,7 +57,7 @@ enum si476x_mfd_cells {
* @SI476X_POWER_DOWN: In this state all regulators are turned off
* and the reset line is pulled low. The device is completely
* inactive.
- * @SI476X_POWER_UP_FULL: In this state all the power regualtors are
+ * @SI476X_POWER_UP_FULL: In this state all the power regulators are
* turned on, reset line pulled high, IRQ line is enabled(polling is
* active for polling use scenario) and device is turned on with
* POWER_UP command. The device is ready to be used.
diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h
index d469aa481243..b08570ff34df 100644
--- a/include/linux/mfd/sky81452.h
+++ b/include/linux/mfd/sky81452.h
@@ -9,11 +9,9 @@
#ifndef _SKY81452_H
#define _SKY81452_H
-#include <linux/platform_data/sky81452-backlight.h>
#include <linux/regulator/machine.h>
struct sky81452_platform_data {
- struct sky81452_bl_platform_data *bl_pdata;
struct regulator_init_data *regulator_init_data;
};
diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h
deleted file mode 100644
index 83944124e886..000000000000
--- a/include/linux/mfd/smsc.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * SMSC ECE1099
- *
- * Copyright 2012 Texas Instruments Inc.
- *
- * Author: Sourav Poddar <sourav.poddar@ti.com>
- */
-
-#ifndef __LINUX_MFD_SMSC_H
-#define __LINUX_MFD_SMSC_H
-
-#include <linux/regmap.h>
-
-#define SMSC_ID_ECE1099 1
-#define SMSC_NUM_CLIENTS 2
-
-#define SMSC_BASE_ADDR 0x38
-#define OMAP_GPIO_SMSC_IRQ 151
-
-#define SMSC_MAXGPIO 32
-#define SMSC_BANK(offs) ((offs) >> 3)
-#define SMSC_BIT(offs) (1u << ((offs) & 0x7))
-
-struct smsc {
- struct device *dev;
- struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS];
- struct regmap *regmap;
- int clk;
- /* Stored chip id */
- int id;
-};
-
-struct smsc_gpio;
-struct smsc_keypad;
-
-static inline int smsc_read(struct device *child, unsigned int reg,
- unsigned int *dest)
-{
- struct smsc *smsc = dev_get_drvdata(child->parent);
-
- return regmap_read(smsc->regmap, reg, dest);
-}
-
-static inline int smsc_write(struct device *child, unsigned int reg,
- unsigned int value)
-{
- struct smsc *smsc = dev_get_drvdata(child->parent);
-
- return regmap_write(smsc->regmap, reg, value);
-}
-
-/* Registers for SMSC */
-#define SMSC_RESET 0xF5
-#define SMSC_GRP_INT 0xF9
-#define SMSC_CLK_CTRL 0xFA
-#define SMSC_WKUP_CTRL 0xFB
-#define SMSC_DEV_ID 0xFC
-#define SMSC_DEV_REV 0xFD
-#define SMSC_VEN_ID_L 0xFE
-#define SMSC_VEN_ID_H 0xFF
-
-/* CLK VALUE */
-#define SMSC_CLK_VALUE 0x13
-
-/* Registers for function GPIO INPUT */
-#define SMSC_GPIO_DATA_IN_START 0x00
-
-/* Registers for function GPIO OUPUT */
-#define SMSC_GPIO_DATA_OUT_START 0x05
-
-/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/
-#define SMSC_GPIO_INPUT_LOW 0x01
-#define SMSC_GPIO_INPUT_RISING 0x09
-#define SMSC_GPIO_INPUT_FALLING 0x11
-#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19
-#define SMSC_GPIO_OUTPUT_PP 0x21
-#define SMSC_GPIO_OUTPUT_OP 0x31
-
-#define GRP_INT_STAT 0xf9
-#define SMSC_GPI_INT 0x0f
-#define SMSC_CFG_START 0x0A
-
-/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/
-#define SMSC_GPIO_INT_STAT_START 0x32
-
-/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/
-#define SMSC_GPIO_INT_MASK_START 0x37
-
-/* Registers for SMSC function KEYPAD*/
-#define SMSC_KP_OUT 0x40
-#define SMSC_KP_IN 0x41
-#define SMSC_KP_INT_STAT 0x42
-#define SMSC_KP_INT_MASK 0x43
-
-/* Definitions for keypad */
-#define SMSC_KP_KSO 0x70
-#define SMSC_KP_KSI 0x51
-#define SMSC_KSO_ALL_LOW 0x20
-#define SMSC_KP_SET_LOW_PWR 0x0B
-#define SMSC_KP_SET_HIGH 0xFF
-#define SMSC_KSO_EVAL 0x00
-
-#endif /* __LINUX_MFD_SMSC_H */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 605f62264825..06d3f11dc3c9 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -27,10 +27,15 @@
#define STM32_LPTIM_CMPOK BIT(3)
/* STM32_LPTIM_ICR - bit fields */
+#define STM32_LPTIM_ARRMCF BIT(1)
#define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3)
+/* STM32_LPTIM_IER - bit flieds */
+#define STM32_LPTIM_ARRMIE BIT(1)
+
/* STM32_LPTIM_CR - bit fields */
#define STM32_LPTIM_CNTSTRT BIT(2)
+#define STM32_LPTIM_SNGSTRT BIT(1)
#define STM32_LPTIM_ENABLE BIT(0)
/* STM32_LPTIM_CFGR - bit fields */
@@ -40,6 +45,11 @@
#define STM32_LPTIM_PRESC GENMASK(11, 9)
#define STM32_LPTIM_CKPOL GENMASK(2, 1)
+/* STM32_LPTIM_CKPOL */
+#define STM32_LPTIM_CKPOL_RISING_EDGE 0
+#define STM32_LPTIM_CKPOL_FALLING_EDGE 1
+#define STM32_LPTIM_CKPOL_BOTH_EDGES 2
+
/* STM32_LPTIM_ARR */
#define STM32_LPTIM_MAX_ARR 0xFFFF
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index f8db83aedb2b..5f5c43fd69dd 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -82,6 +82,10 @@
#define MAX_TIM_ICPSC 0x3
#define TIM_CR2_MMS_SHIFT 4
#define TIM_CR2_MMS2_SHIFT 20
+#define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */
+#define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */
#define TIM_SMCR_TS_SHIFT 4
#define TIM_BDTR_BKF_MASK 0xF
#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h
index 3c67983678ec..744dce63946e 100644
--- a/include/linux/mfd/stmfx.h
+++ b/include/linux/mfd/stmfx.h
@@ -109,6 +109,7 @@ struct stmfx {
struct device *dev;
struct regmap *map;
struct regulator *vdd;
+ int irq;
struct irq_domain *irq_domain;
struct mutex lock; /* IRQ bus lock */
u8 irq_src;
diff --git a/include/linux/mfd/sy7636a.h b/include/linux/mfd/sy7636a.h
new file mode 100644
index 000000000000..22f03b2f851e
--- /dev/null
+++ b/include/linux/mfd/sy7636a.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Functions to access SY3686A power management chip.
+ *
+ * Copyright (C) 2021 reMarkable AS - http://www.remarkable.com/
+ */
+
+#ifndef __MFD_SY7636A_H
+#define __MFD_SY7636A_H
+
+#define SY7636A_REG_OPERATION_MODE_CRL 0x00
+/* It is set if a gpio is used to control the regulator */
+#define SY7636A_OPERATION_MODE_CRL_VCOMCTL BIT(6)
+#define SY7636A_OPERATION_MODE_CRL_ONOFF BIT(7)
+#define SY7636A_REG_VCOM_ADJUST_CTRL_L 0x01
+#define SY7636A_REG_VCOM_ADJUST_CTRL_H 0x02
+#define SY7636A_REG_VCOM_ADJUST_CTRL_MASK 0x01ff
+#define SY7636A_REG_VLDO_VOLTAGE_ADJULST_CTRL 0x03
+#define SY7636A_REG_POWER_ON_DELAY_TIME 0x06
+#define SY7636A_REG_FAULT_FLAG 0x07
+#define SY7636A_FAULT_FLAG_PG BIT(0)
+#define SY7636A_REG_TERMISTOR_READOUT 0x08
+
+#define SY7636A_REG_MAX 0x08
+
+#define VCOM_ADJUST_CTRL_MASK 0x1ff
+// Used to shift the high byte
+#define VCOM_ADJUST_CTRL_SHIFT 8
+// Used to scale from VCOM_ADJUST_CTRL to mv
+#define VCOM_ADJUST_CTRL_SCAL 10000
+
+#define FAULT_FLAG_SHIFT 1
+
+#endif /* __LINUX_MFD_SY7636A_H */
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 7f20e9b502a5..fecc2fa2a364 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -28,6 +28,9 @@ extern struct regmap *syscon_regmap_lookup_by_phandle_args(
const char *property,
int arg_count,
unsigned int *out_args);
+extern struct regmap *syscon_regmap_lookup_by_phandle_optional(
+ struct device_node *np,
+ const char *property);
#else
static inline struct regmap *device_node_to_regmap(struct device_node *np)
{
@@ -59,6 +62,14 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle_args(
{
return ERR_PTR(-ENOTSUPP);
}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle_optional(
+ struct device_node *np,
+ const char *property)
+{
+ return NULL;
+}
+
#endif
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/xlnx-vcu.h b/include/linux/mfd/syscon/xlnx-vcu.h
new file mode 100644
index 000000000000..ff7bc3656f6e
--- /dev/null
+++ b/include/linux/mfd/syscon/xlnx-vcu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ */
+
+#ifndef __XLNX_VCU_H
+#define __XLNX_VCU_H
+
+#define VCU_ECODER_ENABLE 0x00
+#define VCU_DECODER_ENABLE 0x04
+#define VCU_MEMORY_DEPTH 0x08
+#define VCU_ENC_COLOR_DEPTH 0x0c
+#define VCU_ENC_VERTICAL_RANGE 0x10
+#define VCU_ENC_FRAME_SIZE_X 0x14
+#define VCU_ENC_FRAME_SIZE_Y 0x18
+#define VCU_ENC_COLOR_FORMAT 0x1c
+#define VCU_ENC_FPS 0x20
+#define VCU_MCU_CLK 0x24
+#define VCU_CORE_CLK 0x28
+#define VCU_PLL_BYPASS 0x2c
+#define VCU_ENC_CLK 0x30
+#define VCU_PLL_CLK 0x34
+#define VCU_ENC_VIDEO_STANDARD 0x38
+#define VCU_STATUS 0x3c
+#define VCU_AXI_ENC_CLK 0x40
+#define VCU_AXI_DEC_CLK 0x44
+#define VCU_AXI_MCU_CLK 0x48
+#define VCU_DEC_VIDEO_STANDARD 0x4c
+#define VCU_DEC_FRAME_SIZE_X 0x50
+#define VCU_DEC_FRAME_SIZE_Y 0x54
+#define VCU_DEC_FPS 0x58
+#define VCU_BUFFER_B_FRAME 0x5c
+#define VCU_WPP_EN 0x60
+#define VCU_PLL_CLK_DEC 0x64
+#define VCU_NUM_CORE 0x6c
+#define VCU_GASKET_INIT 0x74
+#define VCU_GASKET_VALUE 0x03
+
+#endif /* __XLNX_VCU_H */
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
index 69632c1b07bd..ae3e7a5c5219 100644
--- a/include/linux/mfd/t7l66xb.h
+++ b/include/linux/mfd/t7l66xb.h
@@ -12,7 +12,6 @@
struct t7l66xb_platform_data {
int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index bb2b19599761..b84955410e03 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -19,6 +19,9 @@ enum tx3589x_block {
#define TC3589x_RSTCTRL_KBDRST (1 << 1)
#define TC3589x_RSTCTRL_GPIRST (1 << 0)
+#define TC3589x_DKBDMSK_ELINT (1 << 1)
+#define TC3589x_DKBDMSK_EINT (1 << 0)
+
/* Keyboard Configuration Registers */
#define TC3589x_KBDSETTLE_REG 0x01
#define TC3589x_KBDBOUNCE 0x02
@@ -101,6 +104,9 @@ enum tx3589x_block {
#define TC3589x_GPIOODM2 0xE4
#define TC3589x_GPIOODE2 0xE5
+#define TC3589x_DIRECT0 0xEC
+#define TC3589x_DKBDMSK 0xF3
+
#define TC3589x_INT_GPIIRQ 0
#define TC3589x_INT_TI0IRQ 1
#define TC3589x_INT_TI1IRQ 2
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
index b4888209494a..aacf1dcc86b9 100644
--- a/include/linux/mfd/tc6387xb.h
+++ b/include/linux/mfd/tc6387xb.h
@@ -12,7 +12,6 @@
struct tc6387xb_platform_data {
int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
};
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
index fcc8e74f0e8d..d17807f2d0c9 100644
--- a/include/linux/mfd/tc6393xb.h
+++ b/include/linux/mfd/tc6393xb.h
@@ -22,14 +22,11 @@ struct tc6393xb_platform_data {
u16 scr_gper; /* GP Enable */
int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
+ void (*disable)(struct platform_device *dev);
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
int irq_base; /* base for subdevice irqs */
- int gpio_base;
- int (*setup)(struct platform_device *dev);
- void (*teardown)(struct platform_device *dev);
struct tmio_nand_data *nand_data;
struct tmio_fb_data *fb_data;
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 483168403ae5..4063b0614d90 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -1,22 +1,16 @@
-#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
-#define __LINUX_TI_AM335X_TSCADC_MFD_H
-
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Touch Screen / ADC MFD driver
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
+#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
+#define __LINUX_TI_AM335X_TSCADC_MFD_H
+
+#include <linux/bitfield.h>
#include <linux/mfd/core.h>
+#include <linux/units.h>
#define REG_RAWIRQSTATUS 0x024
#define REG_IRQSTATUS 0x028
@@ -46,13 +40,6 @@
/* IRQ wakeup enable */
#define IRQWKUP_ENB BIT(0)
-/* Step Enable */
-#define STEPENB_MASK (0x1FFFF << 0)
-#define STEPENB(val) ((val) << 0)
-#define ENB(val) (1 << (val))
-#define STPENB_STEPENB STEPENB(0x1FFFF)
-#define STPENB_STEPENB_TC STEPENB(0x1FFF)
-
/* IRQ enable */
#define IRQENB_HW_PEN BIT(0)
#define IRQENB_EOS BIT(1)
@@ -65,12 +52,10 @@
#define IRQENB_PENUP BIT(9)
/* Step Configuration */
-#define STEPCONFIG_MODE_MASK (3 << 0)
-#define STEPCONFIG_MODE(val) ((val) << 0)
+#define STEPCONFIG_MODE(val) FIELD_PREP(GENMASK(1, 0), (val))
#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1)
#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
-#define STEPCONFIG_AVG_MASK (7 << 2)
-#define STEPCONFIG_AVG(val) ((val) << 2)
+#define STEPCONFIG_AVG(val) FIELD_PREP(GENMASK(4, 2), (val))
#define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4)
#define STEPCONFIG_XPP BIT(5)
#define STEPCONFIG_XNN BIT(6)
@@ -78,70 +63,67 @@
#define STEPCONFIG_YNN BIT(8)
#define STEPCONFIG_XNP BIT(9)
#define STEPCONFIG_YPN BIT(10)
-#define STEPCONFIG_RFP(val) ((val) << 12)
-#define STEPCONFIG_RFP_VREFP (0x3 << 12)
-#define STEPCONFIG_INM_MASK (0xF << 15)
-#define STEPCONFIG_INM(val) ((val) << 15)
+#define STEPCONFIG_RFP(val) FIELD_PREP(GENMASK(13, 12), (val))
+#define STEPCONFIG_RFP_VREFP STEPCONFIG_RFP(3)
+#define STEPCONFIG_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
-#define STEPCONFIG_INP_MASK (0xF << 19)
-#define STEPCONFIG_INP(val) ((val) << 19)
+#define STEPCONFIG_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
#define STEPCONFIG_FIFO1 BIT(26)
-#define STEPCONFIG_RFM(val) ((val) << 23)
-#define STEPCONFIG_RFM_VREFN (0x3 << 23)
+#define STEPCONFIG_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
+#define STEPCONFIG_RFM_VREFN STEPCONFIG_RFM(3)
/* Delay register */
-#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
-#define STEPDELAY_OPEN(val) ((val) << 0)
+#define STEPDELAY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098)
-#define STEPDELAY_SAMPLE_MASK (0xFF << 24)
-#define STEPDELAY_SAMPLE(val) ((val) << 24)
+#define STEPCONFIG_MAX_OPENDLY GENMASK(17, 0)
+#define STEPDELAY_SAMPLE(val) FIELD_PREP(GENMASK(31, 24), (val))
#define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0)
+#define STEPCONFIG_MAX_SAMPLE GENMASK(7, 0)
/* Charge Config */
-#define STEPCHARGE_RFP_MASK (7 << 12)
-#define STEPCHARGE_RFP(val) ((val) << 12)
+#define STEPCHARGE_RFP(val) FIELD_PREP(GENMASK(14, 12), (val))
#define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1)
-#define STEPCHARGE_INM_MASK (0xF << 15)
-#define STEPCHARGE_INM(val) ((val) << 15)
+#define STEPCHARGE_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1)
-#define STEPCHARGE_INP_MASK (0xF << 19)
-#define STEPCHARGE_INP(val) ((val) << 19)
-#define STEPCHARGE_RFM_MASK (3 << 23)
-#define STEPCHARGE_RFM(val) ((val) << 23)
+#define STEPCHARGE_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
+#define STEPCHARGE_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
#define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1)
/* Charge delay */
-#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
-#define CHARGEDLY_OPEN(val) ((val) << 0)
+#define CHARGEDLY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
/* Control register */
-#define CNTRLREG_TSCSSENB BIT(0)
+#define CNTRLREG_SSENB BIT(0)
#define CNTRLREG_STEPID BIT(1)
-#define CNTRLREG_STEPCONFIGWRT BIT(2)
+#define CNTRLREG_TSC_STEPCONFIGWRT BIT(2)
#define CNTRLREG_POWERDOWN BIT(4)
-#define CNTRLREG_AFE_CTRL_MASK (3 << 5)
-#define CNTRLREG_AFE_CTRL(val) ((val) << 5)
-#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1)
-#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2)
-#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
-#define CNTRLREG_TSCENB BIT(7)
+#define CNTRLREG_TSC_AFE_CTRL(val) FIELD_PREP(GENMASK(6, 5), (val))
+#define CNTRLREG_TSC_4WIRE CNTRLREG_TSC_AFE_CTRL(1)
+#define CNTRLREG_TSC_5WIRE CNTRLREG_TSC_AFE_CTRL(2)
+#define CNTRLREG_TSC_ENB BIT(7)
+
+/*Control registers bitfields for MAGADC IP */
+#define CNTRLREG_MAGADCENB BIT(0)
+#define CNTRLREG_MAG_PREAMP_PWRDOWN BIT(5)
+#define CNTRLREG_MAG_PREAMP_BYPASS BIT(6)
/* FIFO READ Register */
-#define FIFOREAD_DATA_MASK (0xfff << 0)
-#define FIFOREAD_CHNLID_MASK (0xf << 16)
+#define FIFOREAD_DATA_MASK GENMASK(11, 0)
+#define FIFOREAD_CHNLID_MASK GENMASK(19, 16)
/* DMA ENABLE/CLEAR Register */
#define DMA_FIFO0 BIT(0)
#define DMA_FIFO1 BIT(1)
/* Sequencer Status */
-#define SEQ_STATUS BIT(5)
+#define SEQ_STATUS BIT(5)
#define CHARGE_STEP 0x11
-#define ADC_CLK 3000000
+#define TSC_ADC_CLK (3 * HZ_PER_MHZ)
+#define MAG_ADC_CLK (13 * HZ_PER_MHZ)
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
#define FIFO1_THRESHOLD 19
@@ -158,21 +140,27 @@
*
* max processing time: 266431 * 308ns = 83ms(approx)
*/
-#define IDLE_TIMEOUT 83 /* milliseconds */
+#define IDLE_TIMEOUT_MS 83 /* milliseconds */
#define TSCADC_CELLS 2
+struct ti_tscadc_data {
+ char *adc_feature_name;
+ char *adc_feature_compatible;
+ char *secondary_feature_name;
+ char *secondary_feature_compatible;
+ unsigned int target_clk_rate;
+};
+
struct ti_tscadc_dev {
struct device *dev;
struct regmap *regmap;
void __iomem *tscadc_base;
phys_addr_t tscadc_phys_base;
+ const struct ti_tscadc_data *data;
int irq;
- int used_cells; /* 1-2 */
- int tsc_wires;
- int tsc_cell; /* -1 if not used */
- int adc_cell; /* -1 if not used */
struct mfd_cell cells[TSCADC_CELLS];
+ u32 ctrl;
u32 reg_se_cache;
bool adc_waiting;
bool adc_in_use;
@@ -194,6 +182,12 @@ static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p)
return *tscadc_dev;
}
+static inline bool ti_adc_with_touchscreen(struct ti_tscadc_dev *tscadc)
+{
+ return of_device_is_compatible(tscadc->dev->of_node,
+ "ti,am3359-tscadc");
+}
+
void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val);
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 8ba042430d8e..27264fe4b3b9 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -55,7 +55,12 @@
*/
#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
-/* BIT(5) is unused */
+/*
+ * Use the busy timeout feature. Probably all TMIO versions support it. Yet,
+ * we don't have documentation for old variants, so we enable only known good
+ * variants with this flag. Can be removed once all variants are known good.
+ */
+#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5)
/*
* Some controllers have CMD12 automatically
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
index a228ae4c88d9..16f87cccc003 100644
--- a/include/linux/mfd/tps65086.h
+++ b/include/linux/mfd/tps65086.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65912 driver
*/
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index b5dd108421c8..877d9c41c53d 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65217.h
*
* Functions to access TPS65217 power management chip.
*
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_TPS65217_H
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index b0470c35162d..2946be2f15f3 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65218.h
*
- * Functions to access TPS65219 power management chip.
+ * Functions to access TPS65218 power management chip.
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_TPS65218_H
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index ce4b9e743f7c..701925db75b3 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -890,11 +890,6 @@ struct tps65910 {
struct regmap *regmap;
unsigned long id;
- /* Client devices */
- struct tps65910_pmic *pmic;
- struct tps65910_rtc *rtc;
- struct tps65910_power *power;
-
/* Device node parsed board data */
struct tps65910_board *of_plat_data;
@@ -913,39 +908,4 @@ static inline int tps65910_chip_id(struct tps65910 *tps65910)
return tps65910->id;
}
-static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
- unsigned int *val)
-{
- return regmap_read(tps65910->regmap, reg, val);
-}
-
-static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
- unsigned int val)
-{
- return regmap_write(tps65910->regmap, reg, val);
-}
-
-static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, mask);
-}
-
-static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, 0);
-}
-
-static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask, u8 val)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, val);
-}
-
-static inline int tps65910_irq_get_virq(struct tps65910 *tps65910, int irq)
-{
- return regmap_irq_get_virq(tps65910->irq_data, irq);
-}
-
#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index b25d0297ba88..860ec0a16c96 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
@@ -322,6 +314,6 @@ struct tps65912 {
extern const struct regmap_config tps65912_regmap_config;
int tps65912_device_init(struct tps65912 *tps);
-int tps65912_device_exit(struct tps65912 *tps);
+void tps65912_device_exit(struct tps65912 *tps);
#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h
index ffe81127d91c..7807fa329db0 100644
--- a/include/linux/mfd/tps68470.h
+++ b/include/linux/mfd/tps68470.h
@@ -75,6 +75,17 @@
#define TPS68470_CLKCFG1_MODE_A_MASK GENMASK(1, 0)
#define TPS68470_CLKCFG1_MODE_B_MASK GENMASK(3, 2)
+#define TPS68470_CLKCFG2_DRV_STR_2MA 0x05
+#define TPS68470_PLL_OUTPUT_ENABLE 0x02
+#define TPS68470_CLK_SRC_XTAL BIT(0)
+#define TPS68470_PLLSWR_DEFAULT GENMASK(1, 0)
+#define TPS68470_OSC_EXT_CAP_DEFAULT 0x05
+
+#define TPS68470_OUTPUT_A_SHIFT 0x00
+#define TPS68470_OUTPUT_B_SHIFT 0x02
+#define TPS68470_CLK_SRC_SHIFT GENMASK(2, 0)
+#define TPS68470_OSC_EXT_CAP_SHIFT BIT(2)
+
#define TPS68470_GPIO_CTL_REG_A(x) (TPS68470_REG_GPCTL0A + (x) * 2)
#define TPS68470_GPIO_CTL_REG_B(x) (TPS68470_REG_GPCTL0B + (x) * 2)
#define TPS68470_GPIO_MODE_MASK GENMASK(1, 0)
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
deleted file mode 100644
index 2c75c9c9318f..000000000000
--- a/include/linux/mfd/tps80031.h
+++ /dev/null
@@ -1,637 +0,0 @@
-/*
- * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver.
- *
- * Copyright (c) 2012, NVIDIA Corporation.
- *
- * Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
- */
-
-#ifndef __LINUX_MFD_TPS80031_H
-#define __LINUX_MFD_TPS80031_H
-
-#include <linux/device.h>
-#include <linux/regmap.h>
-
-/* Pull-ups/Pull-downs */
-#define TPS80031_CFG_INPUT_PUPD1 0xF0
-#define TPS80031_CFG_INPUT_PUPD2 0xF1
-#define TPS80031_CFG_INPUT_PUPD3 0xF2
-#define TPS80031_CFG_INPUT_PUPD4 0xF3
-#define TPS80031_CFG_LDO_PD1 0xF4
-#define TPS80031_CFG_LDO_PD2 0xF5
-#define TPS80031_CFG_SMPS_PD 0xF6
-
-/* Real Time Clock */
-#define TPS80031_SECONDS_REG 0x00
-#define TPS80031_MINUTES_REG 0x01
-#define TPS80031_HOURS_REG 0x02
-#define TPS80031_DAYS_REG 0x03
-#define TPS80031_MONTHS_REG 0x04
-#define TPS80031_YEARS_REG 0x05
-#define TPS80031_WEEKS_REG 0x06
-#define TPS80031_ALARM_SECONDS_REG 0x08
-#define TPS80031_ALARM_MINUTES_REG 0x09
-#define TPS80031_ALARM_HOURS_REG 0x0A
-#define TPS80031_ALARM_DAYS_REG 0x0B
-#define TPS80031_ALARM_MONTHS_REG 0x0C
-#define TPS80031_ALARM_YEARS_REG 0x0D
-#define TPS80031_RTC_CTRL_REG 0x10
-#define TPS80031_RTC_STATUS_REG 0x11
-#define TPS80031_RTC_INTERRUPTS_REG 0x12
-#define TPS80031_RTC_COMP_LSB_REG 0x13
-#define TPS80031_RTC_COMP_MSB_REG 0x14
-#define TPS80031_RTC_RESET_STATUS_REG 0x16
-
-/*PMC Master Module */
-#define TPS80031_PHOENIX_START_CONDITION 0x1F
-#define TPS80031_PHOENIX_MSK_TRANSITION 0x20
-#define TPS80031_STS_HW_CONDITIONS 0x21
-#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22
-#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23
-#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24
-#define TPS80031_PHOENIX_DEV_ON 0x25
-#define TPS80031_STS_PWR_GRP_STATE 0x27
-#define TPS80031_PH_CFG_VSYSLOW 0x28
-#define TPS80031_PH_STS_BOOT 0x29
-#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A
-#define TPS80031_PHOENIX_SEQ_CFG 0x2B
-#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C
-#define TPS80031_KEY_PRESS_DUR_CFG 0X2D
-#define TPS80031_SMPS_LDO_SHORT_STS 0x2E
-
-/* PMC Slave Module - Broadcast */
-#define TPS80031_BROADCAST_ADDR_ALL 0x31
-#define TPS80031_BROADCAST_ADDR_REF 0x32
-#define TPS80031_BROADCAST_ADDR_PROV 0x33
-#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34
-
-/* PMC Slave Module SMPS Regulators */
-#define TPS80031_SMPS4_CFG_TRANS 0x41
-#define TPS80031_SMPS4_CFG_STATE 0x42
-#define TPS80031_SMPS4_CFG_VOLTAGE 0x44
-#define TPS80031_VIO_CFG_TRANS 0x47
-#define TPS80031_VIO_CFG_STATE 0x48
-#define TPS80031_VIO_CFG_FORCE 0x49
-#define TPS80031_VIO_CFG_VOLTAGE 0x4A
-#define TPS80031_VIO_CFG_STEP 0x48
-#define TPS80031_SMPS1_CFG_TRANS 0x53
-#define TPS80031_SMPS1_CFG_STATE 0x54
-#define TPS80031_SMPS1_CFG_FORCE 0x55
-#define TPS80031_SMPS1_CFG_VOLTAGE 0x56
-#define TPS80031_SMPS1_CFG_STEP 0x57
-#define TPS80031_SMPS2_CFG_TRANS 0x59
-#define TPS80031_SMPS2_CFG_STATE 0x5A
-#define TPS80031_SMPS2_CFG_FORCE 0x5B
-#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C
-#define TPS80031_SMPS2_CFG_STEP 0x5D
-#define TPS80031_SMPS3_CFG_TRANS 0x65
-#define TPS80031_SMPS3_CFG_STATE 0x66
-#define TPS80031_SMPS3_CFG_VOLTAGE 0x68
-
-/* PMC Slave Module LDO Regulators */
-#define TPS80031_VANA_CFG_TRANS 0x81
-#define TPS80031_VANA_CFG_STATE 0x82
-#define TPS80031_VANA_CFG_VOLTAGE 0x83
-#define TPS80031_LDO2_CFG_TRANS 0x85
-#define TPS80031_LDO2_CFG_STATE 0x86
-#define TPS80031_LDO2_CFG_VOLTAGE 0x87
-#define TPS80031_LDO4_CFG_TRANS 0x89
-#define TPS80031_LDO4_CFG_STATE 0x8A
-#define TPS80031_LDO4_CFG_VOLTAGE 0x8B
-#define TPS80031_LDO3_CFG_TRANS 0x8D
-#define TPS80031_LDO3_CFG_STATE 0x8E
-#define TPS80031_LDO3_CFG_VOLTAGE 0x8F
-#define TPS80031_LDO6_CFG_TRANS 0x91
-#define TPS80031_LDO6_CFG_STATE 0x92
-#define TPS80031_LDO6_CFG_VOLTAGE 0x93
-#define TPS80031_LDOLN_CFG_TRANS 0x95
-#define TPS80031_LDOLN_CFG_STATE 0x96
-#define TPS80031_LDOLN_CFG_VOLTAGE 0x97
-#define TPS80031_LDO5_CFG_TRANS 0x99
-#define TPS80031_LDO5_CFG_STATE 0x9A
-#define TPS80031_LDO5_CFG_VOLTAGE 0x9B
-#define TPS80031_LDO1_CFG_TRANS 0x9D
-#define TPS80031_LDO1_CFG_STATE 0x9E
-#define TPS80031_LDO1_CFG_VOLTAGE 0x9F
-#define TPS80031_LDOUSB_CFG_TRANS 0xA1
-#define TPS80031_LDOUSB_CFG_STATE 0xA2
-#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3
-#define TPS80031_LDO7_CFG_TRANS 0xA5
-#define TPS80031_LDO7_CFG_STATE 0xA6
-#define TPS80031_LDO7_CFG_VOLTAGE 0xA7
-
-/* PMC Slave Module External Control */
-#define TPS80031_REGEN1_CFG_TRANS 0xAE
-#define TPS80031_REGEN1_CFG_STATE 0xAF
-#define TPS80031_REGEN2_CFG_TRANS 0xB1
-#define TPS80031_REGEN2_CFG_STATE 0xB2
-#define TPS80031_SYSEN_CFG_TRANS 0xB4
-#define TPS80031_SYSEN_CFG_STATE 0xB5
-
-/* PMC Slave Module Internal Control */
-#define TPS80031_NRESPWRON_CFG_TRANS 0xB7
-#define TPS80031_NRESPWRON_CFG_STATE 0xB8
-#define TPS80031_CLK32KAO_CFG_TRANS 0xBA
-#define TPS80031_CLK32KAO_CFG_STATE 0xBB
-#define TPS80031_CLK32KG_CFG_TRANS 0xBD
-#define TPS80031_CLK32KG_CFG_STATE 0xBE
-#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0
-#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1
-#define TPS80031_VRTC_CFG_TRANS 0xC3
-#define TPS80031_VRTC_CFG_STATE 0xC4
-#define TPS80031_BIAS_CFG_TRANS 0xC6
-#define TPS80031_BIAS_CFG_STATE 0xC7
-#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9
-#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA
-#define TPS80031_RC6MHZ_CFG_TRANS 0xCC
-#define TPS80031_RC6MHZ_CFG_STATE 0xCD
-#define TPS80031_TMP_CFG_TRANS 0xCF
-#define TPS80031_TMP_CFG_STATE 0xD0
-
-/* PMC Slave Module resources assignment */
-#define TPS80031_PREQ1_RES_ASS_A 0xD7
-#define TPS80031_PREQ1_RES_ASS_B 0xD8
-#define TPS80031_PREQ1_RES_ASS_C 0xD9
-#define TPS80031_PREQ2_RES_ASS_A 0xDA
-#define TPS80031_PREQ2_RES_ASS_B 0xDB
-#define TPS80031_PREQ2_RES_ASS_C 0xDC
-#define TPS80031_PREQ3_RES_ASS_A 0xDD
-#define TPS80031_PREQ3_RES_ASS_B 0xDE
-#define TPS80031_PREQ3_RES_ASS_C 0xDF
-
-/* PMC Slave Module Miscellaneous */
-#define TPS80031_SMPS_OFFSET 0xE0
-#define TPS80031_SMPS_MULT 0xE3
-#define TPS80031_MISC1 0xE4
-#define TPS80031_MISC2 0xE5
-#define TPS80031_BBSPOR_CFG 0xE6
-#define TPS80031_TMP_CFG 0xE7
-
-/* Battery Charging Controller and Indicator LED */
-#define TPS80031_CONTROLLER_CTRL2 0xDA
-#define TPS80031_CONTROLLER_VSEL_COMP 0xDB
-#define TPS80031_CHARGERUSB_VSYSREG 0xDC
-#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD
-#define TPS80031_LINEAR_CHRG_STS 0xDE
-#define TPS80031_CONTROLLER_INT_MASK 0xE0
-#define TPS80031_CONTROLLER_CTRL1 0xE1
-#define TPS80031_CONTROLLER_WDG 0xE2
-#define TPS80031_CONTROLLER_STAT1 0xE3
-#define TPS80031_CHARGERUSB_INT_STATUS 0xE4
-#define TPS80031_CHARGERUSB_INT_MASK 0xE5
-#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6
-#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7
-#define TPS80031_CHARGERUSB_CTRL1 0xE8
-#define TPS80031_CHARGERUSB_CTRL2 0xE9
-#define TPS80031_CHARGERUSB_CTRL3 0xEA
-#define TPS80031_CHARGERUSB_STAT1 0xEB
-#define TPS80031_CHARGERUSB_VOREG 0xEC
-#define TPS80031_CHARGERUSB_VICHRG 0xED
-#define TPS80031_CHARGERUSB_CINLIMIT 0xEE
-#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF
-#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0
-#define TPS80031_LED_PWM_CTRL1 0xF4
-#define TPS80031_LED_PWM_CTRL2 0xF5
-
-/* USB On-The-Go */
-#define TPS80031_BACKUP_REG 0xFA
-#define TPS80031_USB_VENDOR_ID_LSB 0x00
-#define TPS80031_USB_VENDOR_ID_MSB 0x01
-#define TPS80031_USB_PRODUCT_ID_LSB 0x02
-#define TPS80031_USB_PRODUCT_ID_MSB 0x03
-#define TPS80031_USB_VBUS_CTRL_SET 0x04
-#define TPS80031_USB_VBUS_CTRL_CLR 0x05
-#define TPS80031_USB_ID_CTRL_SET 0x06
-#define TPS80031_USB_ID_CTRL_CLR 0x07
-#define TPS80031_USB_VBUS_INT_SRC 0x08
-#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09
-#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A
-#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B
-#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C
-#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D
-#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E
-#define TPS80031_USB_ID_INT_SRC 0x0F
-#define TPS80031_USB_ID_INT_LATCH_SET 0x10
-#define TPS80031_USB_ID_INT_LATCH_CLR 0x11
-#define TPS80031_USB_ID_INT_EN_LO_SET 0x12
-#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13
-#define TPS80031_USB_ID_INT_EN_HI_SET 0x14
-#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15
-#define TPS80031_USB_OTG_ADP_CTRL 0x16
-#define TPS80031_USB_OTG_ADP_HIGH 0x17
-#define TPS80031_USB_OTG_ADP_LOW 0x18
-#define TPS80031_USB_OTG_ADP_RISE 0x19
-#define TPS80031_USB_OTG_REVISION 0x1A
-
-/* Gas Gauge */
-#define TPS80031_FG_REG_00 0xC0
-#define TPS80031_FG_REG_01 0xC1
-#define TPS80031_FG_REG_02 0xC2
-#define TPS80031_FG_REG_03 0xC3
-#define TPS80031_FG_REG_04 0xC4
-#define TPS80031_FG_REG_05 0xC5
-#define TPS80031_FG_REG_06 0xC6
-#define TPS80031_FG_REG_07 0xC7
-#define TPS80031_FG_REG_08 0xC8
-#define TPS80031_FG_REG_09 0xC9
-#define TPS80031_FG_REG_10 0xCA
-#define TPS80031_FG_REG_11 0xCB
-
-/* General Purpose ADC */
-#define TPS80031_GPADC_CTRL 0x2E
-#define TPS80031_GPADC_CTRL2 0x2F
-#define TPS80031_RTSELECT_LSB 0x32
-#define TPS80031_RTSELECT_ISB 0x33
-#define TPS80031_RTSELECT_MSB 0x34
-#define TPS80031_GPSELECT_ISB 0x35
-#define TPS80031_CTRL_P1 0x36
-#define TPS80031_RTCH0_LSB 0x37
-#define TPS80031_RTCH0_MSB 0x38
-#define TPS80031_RTCH1_LSB 0x39
-#define TPS80031_RTCH1_MSB 0x3A
-#define TPS80031_GPCH0_LSB 0x3B
-#define TPS80031_GPCH0_MSB 0x3C
-
-/* SIM, MMC and Battery Detection */
-#define TPS80031_SIMDEBOUNCING 0xEB
-#define TPS80031_SIMCTRL 0xEC
-#define TPS80031_MMCDEBOUNCING 0xED
-#define TPS80031_MMCCTRL 0xEE
-#define TPS80031_BATDEBOUNCING 0xEF
-
-/* Vibrator Driver and PWMs */
-#define TPS80031_VIBCTRL 0x9B
-#define TPS80031_VIBMODE 0x9C
-#define TPS80031_PWM1ON 0xBA
-#define TPS80031_PWM1OFF 0xBB
-#define TPS80031_PWM2ON 0xBD
-#define TPS80031_PWM2OFF 0xBE
-
-/* Control Interface */
-#define TPS80031_INT_STS_A 0xD0
-#define TPS80031_INT_STS_B 0xD1
-#define TPS80031_INT_STS_C 0xD2
-#define TPS80031_INT_MSK_LINE_A 0xD3
-#define TPS80031_INT_MSK_LINE_B 0xD4
-#define TPS80031_INT_MSK_LINE_C 0xD5
-#define TPS80031_INT_MSK_STS_A 0xD6
-#define TPS80031_INT_MSK_STS_B 0xD7
-#define TPS80031_INT_MSK_STS_C 0xD8
-#define TPS80031_TOGGLE1 0x90
-#define TPS80031_TOGGLE2 0x91
-#define TPS80031_TOGGLE3 0x92
-#define TPS80031_PWDNSTATUS1 0x93
-#define TPS80031_PWDNSTATUS2 0x94
-#define TPS80031_VALIDITY0 0x17
-#define TPS80031_VALIDITY1 0x18
-#define TPS80031_VALIDITY2 0x19
-#define TPS80031_VALIDITY3 0x1A
-#define TPS80031_VALIDITY4 0x1B
-#define TPS80031_VALIDITY5 0x1C
-#define TPS80031_VALIDITY6 0x1D
-#define TPS80031_VALIDITY7 0x1E
-
-/* Version number related register */
-#define TPS80031_JTAGVERNUM 0x87
-#define TPS80031_EPROM_REV 0xDF
-
-/* GPADC Trimming Bits. */
-#define TPS80031_GPADC_TRIM0 0xCC
-#define TPS80031_GPADC_TRIM1 0xCD
-#define TPS80031_GPADC_TRIM2 0xCE
-#define TPS80031_GPADC_TRIM3 0xCF
-#define TPS80031_GPADC_TRIM4 0xD0
-#define TPS80031_GPADC_TRIM5 0xD1
-#define TPS80031_GPADC_TRIM6 0xD2
-#define TPS80031_GPADC_TRIM7 0xD3
-#define TPS80031_GPADC_TRIM8 0xD4
-#define TPS80031_GPADC_TRIM9 0xD5
-#define TPS80031_GPADC_TRIM10 0xD6
-#define TPS80031_GPADC_TRIM11 0xD7
-#define TPS80031_GPADC_TRIM12 0xD8
-#define TPS80031_GPADC_TRIM13 0xD9
-#define TPS80031_GPADC_TRIM14 0xDA
-#define TPS80031_GPADC_TRIM15 0xDB
-#define TPS80031_GPADC_TRIM16 0xDC
-#define TPS80031_GPADC_TRIM17 0xDD
-#define TPS80031_GPADC_TRIM18 0xDE
-
-/* TPS80031_CONTROLLER_STAT1 bit fields */
-#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0
-#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1
-#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2
-#define TPS80031_CONTROLLER_STAT1_VAC_DET 3
-#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4
-#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6
-/* TPS80031_CONTROLLER_INT_MASK bit filed */
-#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0
-#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2
-#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4
-#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5
-
-#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F
-
-/* TPS80031_PHOENIX_DEV_ON bit field */
-#define TPS80031_DEVOFF 0x1
-
-#define TPS80031_EXT_CONTROL_CFG_TRANS 0
-#define TPS80031_EXT_CONTROL_CFG_STATE 1
-
-/* State register field */
-#define TPS80031_STATE_OFF 0x00
-#define TPS80031_STATE_ON 0x01
-#define TPS80031_STATE_MASK 0x03
-
-/* Trans register field */
-#define TPS80031_TRANS_ACTIVE_OFF 0x00
-#define TPS80031_TRANS_ACTIVE_ON 0x01
-#define TPS80031_TRANS_ACTIVE_MASK 0x03
-#define TPS80031_TRANS_SLEEP_OFF 0x00
-#define TPS80031_TRANS_SLEEP_ON 0x04
-#define TPS80031_TRANS_SLEEP_MASK 0x0C
-#define TPS80031_TRANS_OFF_OFF 0x00
-#define TPS80031_TRANS_OFF_ACTIVE 0x10
-#define TPS80031_TRANS_OFF_MASK 0x30
-
-#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \
- TPS80031_PWR_REQ_INPUT_PREQ2 | \
- TPS80031_PWR_REQ_INPUT_PREQ3)
-
-/* TPS80031_BBSPOR_CFG bit field */
-#define TPS80031_BBSPOR_CHG_EN 0x8
-#define TPS80031_MAX_REGISTER 0xFF
-
-struct i2c_client;
-
-/* Supported chips */
-enum chips {
- TPS80031 = 0x00000001,
- TPS80032 = 0x00000002,
-};
-
-enum {
- TPS80031_INT_PWRON,
- TPS80031_INT_RPWRON,
- TPS80031_INT_SYS_VLOW,
- TPS80031_INT_RTC_ALARM,
- TPS80031_INT_RTC_PERIOD,
- TPS80031_INT_HOT_DIE,
- TPS80031_INT_VXX_SHORT,
- TPS80031_INT_SPDURATION,
- TPS80031_INT_WATCHDOG,
- TPS80031_INT_BAT,
- TPS80031_INT_SIM,
- TPS80031_INT_MMC,
- TPS80031_INT_RES,
- TPS80031_INT_GPADC_RT,
- TPS80031_INT_GPADC_SW2_EOC,
- TPS80031_INT_CC_AUTOCAL,
- TPS80031_INT_ID_WKUP,
- TPS80031_INT_VBUSS_WKUP,
- TPS80031_INT_ID,
- TPS80031_INT_VBUS,
- TPS80031_INT_CHRG_CTRL,
- TPS80031_INT_EXT_CHRG,
- TPS80031_INT_INT_CHRG,
- TPS80031_INT_RES2,
- TPS80031_INT_BAT_TEMP_OVRANGE,
- TPS80031_INT_BAT_REMOVED,
- TPS80031_INT_VBUS_DET,
- TPS80031_INT_VAC_DET,
- TPS80031_INT_FAULT_WDG,
- TPS80031_INT_LINCH_GATED,
-
- /* Last interrupt id to get the end number */
- TPS80031_INT_NR,
-};
-
-/* TPS80031 Slave IDs */
-#define TPS80031_NUM_SLAVES 4
-#define TPS80031_SLAVE_ID0 0
-#define TPS80031_SLAVE_ID1 1
-#define TPS80031_SLAVE_ID2 2
-#define TPS80031_SLAVE_ID3 3
-
-/* TPS80031 I2C addresses */
-#define TPS80031_I2C_ID0_ADDR 0x12
-#define TPS80031_I2C_ID1_ADDR 0x48
-#define TPS80031_I2C_ID2_ADDR 0x49
-#define TPS80031_I2C_ID3_ADDR 0x4A
-
-enum {
- TPS80031_REGULATOR_VIO,
- TPS80031_REGULATOR_SMPS1,
- TPS80031_REGULATOR_SMPS2,
- TPS80031_REGULATOR_SMPS3,
- TPS80031_REGULATOR_SMPS4,
- TPS80031_REGULATOR_VANA,
- TPS80031_REGULATOR_LDO1,
- TPS80031_REGULATOR_LDO2,
- TPS80031_REGULATOR_LDO3,
- TPS80031_REGULATOR_LDO4,
- TPS80031_REGULATOR_LDO5,
- TPS80031_REGULATOR_LDO6,
- TPS80031_REGULATOR_LDO7,
- TPS80031_REGULATOR_LDOLN,
- TPS80031_REGULATOR_LDOUSB,
- TPS80031_REGULATOR_VBUS,
- TPS80031_REGULATOR_REGEN1,
- TPS80031_REGULATOR_REGEN2,
- TPS80031_REGULATOR_SYSEN,
- TPS80031_REGULATOR_MAX,
-};
-
-/* Different configurations for the rails */
-enum {
- /* USBLDO input selection */
- TPS80031_USBLDO_INPUT_VSYS = 0x00000001,
- TPS80031_USBLDO_INPUT_PMID = 0x00000002,
-
- /* LDO3 output mode */
- TPS80031_LDO3_OUTPUT_VIB = 0x00000004,
-
- /* VBUS configuration */
- TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004,
- TPS80031_VBUS_SW_ONLY = 0x00000008,
- TPS80031_VBUS_SW_N_ID = 0x00000010,
-};
-
-/* External controls requests */
-enum tps80031_ext_control {
- TPS80031_PWR_REQ_INPUT_NONE = 0x00000000,
- TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001,
- TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002,
- TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004,
- TPS80031_PWR_OFF_ON_SLEEP = 0x00000008,
- TPS80031_PWR_ON_ON_SLEEP = 0x00000010,
-};
-
-enum tps80031_pupd_pins {
- TPS80031_PREQ1 = 0,
- TPS80031_PREQ2A,
- TPS80031_PREQ2B,
- TPS80031_PREQ2C,
- TPS80031_PREQ3,
- TPS80031_NRES_WARM,
- TPS80031_PWM_FORCE,
- TPS80031_CHRG_EXT_CHRG_STATZ,
- TPS80031_SIM,
- TPS80031_MMC,
- TPS80031_GPADC_START,
- TPS80031_DVSI2C_SCL,
- TPS80031_DVSI2C_SDA,
- TPS80031_CTLI2C_SCL,
- TPS80031_CTLI2C_SDA,
-};
-
-enum tps80031_pupd_settings {
- TPS80031_PUPD_NORMAL,
- TPS80031_PUPD_PULLDOWN,
- TPS80031_PUPD_PULLUP,
-};
-
-struct tps80031 {
- struct device *dev;
- unsigned long chip_info;
- int es_version;
- struct i2c_client *clients[TPS80031_NUM_SLAVES];
- struct regmap *regmap[TPS80031_NUM_SLAVES];
- struct regmap_irq_chip_data *irq_data;
-};
-
-struct tps80031_pupd_init_data {
- int input_pin;
- int setting;
-};
-
-/*
- * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
- *
- * @reg_init_data: The regulator init data.
- * @ext_ctrl_flag: External control flag for sleep/power request control.
- * @config_flags: Configuration flag to configure the rails.
- * It should be ORed of config enums.
- */
-
-struct tps80031_regulator_platform_data {
- struct regulator_init_data *reg_init_data;
- unsigned int ext_ctrl_flag;
- unsigned int config_flags;
-};
-
-struct tps80031_platform_data {
- int irq_base;
- bool use_power_off;
- struct tps80031_pupd_init_data *pupd_init_data;
- int pupd_init_data_size;
- struct tps80031_regulator_platform_data
- *regulator_pdata[TPS80031_REGULATOR_MAX];
-};
-
-static inline int tps80031_write(struct device *dev, int sid,
- int reg, uint8_t val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_write(tps80031->regmap[sid], reg, val);
-}
-
-static inline int tps80031_writes(struct device *dev, int sid, int reg,
- int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_write(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_read(struct device *dev, int sid,
- int reg, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
- unsigned int ival;
- int ret;
-
- ret = regmap_read(tps80031->regmap[sid], reg, &ival);
- if (ret < 0) {
- dev_err(dev, "failed reading from reg 0x%02x\n", reg);
- return ret;
- }
-
- *val = ival;
- return ret;
-}
-
-static inline int tps80031_reads(struct device *dev, int sid,
- int reg, int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_read(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_set_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg,
- bit_mask, bit_mask);
-}
-
-static inline int tps80031_clr_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0);
-}
-
-static inline int tps80031_update(struct device *dev, int sid,
- int reg, uint8_t val, uint8_t mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, mask, val);
-}
-
-static inline unsigned long tps80031_get_chip_info(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->chip_info;
-}
-
-static inline int tps80031_get_pmu_version(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->es_version;
-}
-
-static inline int tps80031_irq_get_virq(struct device *dev, int irq)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_irq_get_virq(tps80031->irq_data, irq);
-}
-
-extern int tps80031_ext_power_req_config(struct device *dev,
- unsigned long ext_ctrl_flag, int preq_bit,
- int state_reg_add, int trans_reg_add);
-#endif /*__LINUX_MFD_TPS80031_H */
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 089e8942223a..eaa233038254 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -594,8 +594,6 @@ struct twl4030_gpio_platform_data {
int (*setup)(struct device *dev,
unsigned gpio, unsigned ngpio);
- int (*teardown)(struct device *dev,
- unsigned gpio, unsigned ngpio);
};
struct twl4030_madc_platform_data {
@@ -694,61 +692,6 @@ struct twl4030_audio_data {
unsigned int irq_base;
};
-struct twl4030_platform_data {
- struct twl4030_clock_init_data *clock;
- struct twl4030_bci_platform_data *bci;
- struct twl4030_gpio_platform_data *gpio;
- struct twl4030_madc_platform_data *madc;
- struct twl4030_keypad_data *keypad;
- struct twl4030_usb_data *usb;
- struct twl4030_power_data *power;
- struct twl4030_audio_data *audio;
-
- /* Common LDO regulators for TWL4030/TWL6030 */
- struct regulator_init_data *vdac;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
- struct regulator_init_data *vdd1;
- struct regulator_init_data *vdd2;
- struct regulator_init_data *vdd3;
- /* TWL4030 LDO regulators */
- struct regulator_init_data *vpll1;
- struct regulator_init_data *vpll2;
- struct regulator_init_data *vmmc1;
- struct regulator_init_data *vmmc2;
- struct regulator_init_data *vsim;
- struct regulator_init_data *vaux4;
- struct regulator_init_data *vio;
- struct regulator_init_data *vintana1;
- struct regulator_init_data *vintana2;
- struct regulator_init_data *vintdig;
- /* TWL6030 LDO regulators */
- struct regulator_init_data *vmmc;
- struct regulator_init_data *vpp;
- struct regulator_init_data *vusim;
- struct regulator_init_data *vana;
- struct regulator_init_data *vcxio;
- struct regulator_init_data *vusb;
- struct regulator_init_data *clk32kg;
- struct regulator_init_data *v1v8;
- struct regulator_init_data *v2v1;
- /* TWL6032 LDO regulators */
- struct regulator_init_data *ldo1;
- struct regulator_init_data *ldo2;
- struct regulator_init_data *ldo3;
- struct regulator_init_data *ldo4;
- struct regulator_init_data *ldo5;
- struct regulator_init_data *ldo6;
- struct regulator_init_data *ldo7;
- struct regulator_init_data *ldoln;
- struct regulator_init_data *ldousb;
- /* TWL6032 DCDC regulators */
- struct regulator_init_data *smps3;
- struct regulator_init_data *smps4;
- struct regulator_init_data *vio6025;
-};
-
struct twl_regulator_driver_data {
int (*set_voltage)(void *data, int target_uV);
int (*get_voltage)(void *data);
@@ -781,8 +724,6 @@ int twl4030_sih_setup(struct device *dev, int module, int irq_base);
#define TWL4030_VAUX3_DEV_GRP 0x1F
#define TWL4030_VAUX3_DEDICATED 0x22
-static inline int twl4030charger_usb_en(int enable) { return 0; }
-
/*----------------------------------------------------------------------*/
/* Linux-specific regulator identifiers ... for now, we only support
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
index bb8d2e276668..76a943c83c63 100644
--- a/include/linux/mfd/wcd934x/registers.h
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -18,6 +18,8 @@
#define WCD934X_EFUSE_SENSE_STATE_DEF 0x10
#define WCD934X_EFUSE_SENSE_EN_MASK BIT(0)
#define WCD934X_EFUSE_SENSE_ENABLE BIT(0)
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 0x002a
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 0x002b
#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037
#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038
#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039
@@ -103,21 +105,58 @@
#define WCD934X_ANA_AMIC3 0x0610
#define WCD934X_ANA_AMIC4 0x0611
#define WCD934X_ANA_MBHC_MECH 0x0614
+#define WCD934X_MBHC_L_DET_EN_MASK BIT(7)
+#define WCD934X_MBHC_L_DET_EN BIT(7)
+#define WCD934X_MBHC_GND_DET_EN_MASK BIT(6)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_MASK BIT(5)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_INS 1
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_MASK BIT(4)
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_GND_PLUG_TYPE_MASK BIT(3)
+#define WCD934X_MBHC_GND_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_HSL_PULLUP_COMP_EN BIT(2)
+#define WCD934X_MBHC_HSG_PULLUP_COMP_EN BIT(1)
+#define WCD934X_MBHC_HPHL_100K_TO_GND_EN BIT(0)
#define WCD934X_ANA_MBHC_ELECT 0x0615
+#define WCD934X_ANA_MBHC_BIAS_EN_MASK BIT(0)
+#define WCD934X_ANA_MBHC_BIAS_EN BIT(0)
#define WCD934X_ANA_MBHC_ZDET 0x0616
#define WCD934X_ANA_MBHC_RESULT_1 0x0617
#define WCD934X_ANA_MBHC_RESULT_2 0x0618
#define WCD934X_ANA_MBHC_RESULT_3 0x0619
+#define WCD934X_ANA_MBHC_BTN0 0x061a
+#define WCD934X_VTH_MASK GENMASK(7, 2)
+#define WCD934X_ANA_MBHC_BTN1 0x061b
+#define WCD934X_ANA_MBHC_BTN2 0x061c
+#define WCD934X_ANA_MBHC_BTN3 0x061d
+#define WCD934X_ANA_MBHC_BTN4 0x061e
+#define WCD934X_ANA_MBHC_BTN5 0x061f
+#define WCD934X_ANA_MBHC_BTN6 0x0620
+#define WCD934X_ANA_MBHC_BTN7 0x0621
+#define WCD934X_MBHC_BTN_VTH_MASK GENMASK(7, 2)
#define WCD934X_ANA_MICB1 0x0622
#define WCD934X_MICB_VAL_MASK GENMASK(5, 0)
#define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6)
+#define WCD934X_MICB_DISABLE 0
+#define WCD934X_MICB_ENABLE 1
+#define WCD934X_MICB_PULL_UP 2
+#define WCD934X_MICB_PULL_DOWN 3
#define WCD934X_ANA_MICB_PULL_UP 0x80
#define WCD934X_ANA_MICB_ENABLE 0x40
#define WCD934X_ANA_MICB_DISABLE 0x0
#define WCD934X_ANA_MICB2 0x0623
+#define WCD934X_ANA_MICB2_ENABLE BIT(6)
+#define WCD934X_ANA_MICB2_ENABLE_MASK GENMASK(7, 6)
+#define WCD934X_ANA_MICB2_VOUT_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB2_RAMP 0x0624
+#define WCD934X_RAMP_EN_MASK BIT(7)
+#define WCD934X_RAMP_SHIFT_CTRL_MASK GENMASK(4, 2)
#define WCD934X_ANA_MICB3 0x0625
#define WCD934X_ANA_MICB4 0x0626
#define WCD934X_BIAS_VBG_FINE_ADJ 0x0629
+#define WCD934X_MBHC_CTL_CLK 0x0656
+#define WCD934X_MBHC_CTL_BCS 0x065a
+#define WCD934X_MBHC_STATUS_SPARE_1 0x065b
#define WCD934X_MICB1_TEST_CTL_1 0x066b
#define WCD934X_MICB1_TEST_CTL_2 0x066c
#define WCD934X_MICB2_TEST_CTL_1 0x066e
@@ -141,7 +180,11 @@
#define WCD934X_HPH_CNP_WG_CTL 0x06cc
#define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7)
#define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7)
+#define WCD934X_HPH_CNP_WG_TIME 0x06cd
#define WCD934X_HPH_OCP_CTL 0x06ce
+#define WCD934X_HPH_PA_CTL2 0x06d2
+#define WCD934X_HPHPA_GND_R_MASK BIT(6)
+#define WCD934X_HPHPA_GND_L_MASK BIT(4)
#define WCD934X_HPH_L_EN 0x06d3
#define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5)
#define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0
@@ -152,6 +195,8 @@
#define WCD934X_HPH_OCP_DET_MASK BIT(0)
#define WCD934X_HPH_OCP_DET_ENABLE BIT(0)
#define WCD934X_HPH_OCP_DET_DISABLE 0
+#define WCD934X_HPH_R_ATEST 0x06d8
+#define WCD934X_HPHPA_GND_OVR_MASK BIT(1)
#define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea
#define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb
#define WCD934X_CLK_SYS_MCLK_PRG 0x0711
@@ -172,7 +217,19 @@
#define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e
#define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0)
#define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0)
+#define WCD934X_MBHC_NEW_CTL_1 0x0720
+#define WCD934X_MBHC_CTL_RCO_EN_MASK BIT(7)
+#define WCD935X_MBHC_CTL_RCO_EN BIT(7)
#define WCD934X_MBHC_NEW_CTL_2 0x0721
+#define WCD934X_M_RTH_CTL_MASK GENMASK(3, 2)
+#define WCD934X_MBHC_NEW_PLUG_DETECT_CTL 0x0722
+#define WCD934X_HSDET_PULLUP_C_MASK GENMASK(7, 6)
+#define WCD934X_MBHC_NEW_ZDET_ANA_CTL 0x0723
+#define WCD934X_ZDET_RANGE_CTL_MASK GENMASK(3, 0)
+#define WCD934X_ZDET_MAXV_CTL_MASK GENMASK(6, 4)
+#define WCD934X_MBHC_NEW_ZDET_RAMP_CTL 0x0724
+#define WCD934X_MBHC_NEW_FSM_STATUS 0x0725
+#define WCD934X_MBHC_NEW_ADC_RESULT 0x0726
#define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727
#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 986986fe4e4e..75aa94dadf1c 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -89,7 +89,6 @@ enum wm831x_watchdog_action {
struct wm831x_watchdog_pdata {
enum wm831x_watchdog_action primary, secondary;
- int update_gpio;
unsigned int software:1;
};
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 81e7dcbd94df..6e2962ef5b81 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -33,7 +33,7 @@ struct wm8994_ldo_pdata {
* DRC configurations are specified with a label and a set of register
* values to write (the enable bits will be ignored). At runtime an
* enumerated control will be presented for each DRC block allowing
- * the user to choose the configration to use.
+ * the user to choose the configuration to use.
*
* Configurations may be generated by hand or by using the DRC control
* panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
new file mode 100644
index 000000000000..a5441ad33c74
--- /dev/null
+++ b/include/linux/mhi.h
@@ -0,0 +1,817 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+#ifndef _MHI_H_
+#define _MHI_H_
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#define MHI_MAX_OEM_PK_HASH_SEGMENTS 16
+
+struct mhi_chan;
+struct mhi_event;
+struct mhi_ctxt;
+struct mhi_cmd;
+struct mhi_buf_info;
+
+/**
+ * enum mhi_callback - MHI callback
+ * @MHI_CB_IDLE: MHI entered idle state
+ * @MHI_CB_PENDING_DATA: New data available for client to process
+ * @MHI_CB_LPM_ENTER: MHI host entered low power mode
+ * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
+ * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env
+ * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
+ * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover)
+ * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state
+ * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
+ */
+enum mhi_callback {
+ MHI_CB_IDLE,
+ MHI_CB_PENDING_DATA,
+ MHI_CB_LPM_ENTER,
+ MHI_CB_LPM_EXIT,
+ MHI_CB_EE_RDDM,
+ MHI_CB_EE_MISSION_MODE,
+ MHI_CB_SYS_ERROR,
+ MHI_CB_FATAL_ERROR,
+ MHI_CB_BW_REQ,
+};
+
+/**
+ * enum mhi_flags - Transfer flags
+ * @MHI_EOB: End of buffer for bulk transfer
+ * @MHI_EOT: End of transfer
+ * @MHI_CHAIN: Linked transfer
+ */
+enum mhi_flags {
+ MHI_EOB = BIT(0),
+ MHI_EOT = BIT(1),
+ MHI_CHAIN = BIT(2),
+};
+
+/**
+ * enum mhi_device_type - Device types
+ * @MHI_DEVICE_XFER: Handles data transfer
+ * @MHI_DEVICE_CONTROLLER: Control device
+ */
+enum mhi_device_type {
+ MHI_DEVICE_XFER,
+ MHI_DEVICE_CONTROLLER,
+};
+
+/**
+ * enum mhi_ch_type - Channel types
+ * @MHI_CH_TYPE_INVALID: Invalid channel type
+ * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device
+ * @MHI_CH_TYPE_INBOUND: Inbound channel from the device
+ * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine
+ * multiple packets and send them as a single
+ * large packet to reduce CPU consumption
+ */
+enum mhi_ch_type {
+ MHI_CH_TYPE_INVALID = 0,
+ MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE,
+ MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE,
+ MHI_CH_TYPE_INBOUND_COALESCED = 3,
+};
+
+/**
+ * struct image_info - Firmware and RDDM table
+ * @mhi_buf: Buffer for firmware and RDDM table
+ * @entries: # of entries in table
+ */
+struct image_info {
+ struct mhi_buf *mhi_buf;
+ /* private: from internal.h */
+ struct bhi_vec_entry *bhi_vec;
+ /* public: */
+ u32 entries;
+};
+
+/**
+ * struct mhi_link_info - BW requirement
+ * target_link_speed - Link speed as defined by TLS bits in LinkControl reg
+ * target_link_width - Link width as defined by NLW bits in LinkStatus reg
+ */
+struct mhi_link_info {
+ unsigned int target_link_speed;
+ unsigned int target_link_width;
+};
+
+/**
+ * enum mhi_ee_type - Execution environment types
+ * @MHI_EE_PBL: Primary Bootloader
+ * @MHI_EE_SBL: Secondary Bootloader
+ * @MHI_EE_AMSS: Modem, aka the primary runtime EE
+ * @MHI_EE_RDDM: Ram dump download mode
+ * @MHI_EE_WFW: WLAN firmware mode
+ * @MHI_EE_PTHRU: Passthrough
+ * @MHI_EE_EDL: Embedded downloader
+ * @MHI_EE_FP: Flash Programmer Environment
+ */
+enum mhi_ee_type {
+ MHI_EE_PBL,
+ MHI_EE_SBL,
+ MHI_EE_AMSS,
+ MHI_EE_RDDM,
+ MHI_EE_WFW,
+ MHI_EE_PTHRU,
+ MHI_EE_EDL,
+ MHI_EE_FP,
+ MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
+ MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
+ MHI_EE_NOT_SUPPORTED,
+ MHI_EE_MAX,
+};
+
+/**
+ * enum mhi_state - MHI states
+ * @MHI_STATE_RESET: Reset state
+ * @MHI_STATE_READY: Ready state
+ * @MHI_STATE_M0: M0 state
+ * @MHI_STATE_M1: M1 state
+ * @MHI_STATE_M2: M2 state
+ * @MHI_STATE_M3: M3 state
+ * @MHI_STATE_M3_FAST: M3 Fast state
+ * @MHI_STATE_BHI: BHI state
+ * @MHI_STATE_SYS_ERR: System Error state
+ */
+enum mhi_state {
+ MHI_STATE_RESET = 0x0,
+ MHI_STATE_READY = 0x1,
+ MHI_STATE_M0 = 0x2,
+ MHI_STATE_M1 = 0x3,
+ MHI_STATE_M2 = 0x4,
+ MHI_STATE_M3 = 0x5,
+ MHI_STATE_M3_FAST = 0x6,
+ MHI_STATE_BHI = 0x7,
+ MHI_STATE_SYS_ERR = 0xFF,
+ MHI_STATE_MAX,
+};
+
+/**
+ * enum mhi_ch_ee_mask - Execution environment mask for channel
+ * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE
+ * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE
+ * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE
+ * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE
+ * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE
+ * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE
+ * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE
+ */
+enum mhi_ch_ee_mask {
+ MHI_CH_EE_PBL = BIT(MHI_EE_PBL),
+ MHI_CH_EE_SBL = BIT(MHI_EE_SBL),
+ MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS),
+ MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM),
+ MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU),
+ MHI_CH_EE_WFW = BIT(MHI_EE_WFW),
+ MHI_CH_EE_EDL = BIT(MHI_EE_EDL),
+};
+
+/**
+ * enum mhi_er_data_type - Event ring data types
+ * @MHI_ER_DATA: Only client data over this ring
+ * @MHI_ER_CTRL: MHI control data and client data
+ */
+enum mhi_er_data_type {
+ MHI_ER_DATA,
+ MHI_ER_CTRL,
+};
+
+/**
+ * enum mhi_db_brst_mode - Doorbell mode
+ * @MHI_DB_BRST_DISABLE: Burst mode disable
+ * @MHI_DB_BRST_ENABLE: Burst mode enable
+ */
+enum mhi_db_brst_mode {
+ MHI_DB_BRST_DISABLE = 0x2,
+ MHI_DB_BRST_ENABLE = 0x3,
+};
+
+/**
+ * struct mhi_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @local_elements: The local ring length of the channel
+ * @event_ring: The event ring index that services this channel
+ * @dir: Direction that data may flow on this channel
+ * @type: Channel type
+ * @ee_mask: Execution Environment mask for this channel
+ * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds
+ for UL channels, multiple of 8 ring elements for DL channels
+ * @doorbell: Doorbell mode
+ * @lpm_notify: The channel master requires low power mode notifications
+ * @offload_channel: The client manages the channel completely
+ * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
+ * @auto_queue: Framework will automatically queue buffers for DL traffic
+ * @wake-capable: Channel capable of waking up the system
+ */
+struct mhi_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ u32 local_elements;
+ u32 event_ring;
+ enum dma_data_direction dir;
+ enum mhi_ch_type type;
+ u32 ee_mask;
+ u32 pollcfg;
+ enum mhi_db_brst_mode doorbell;
+ bool lpm_notify;
+ bool offload_channel;
+ bool doorbell_mode_switch;
+ bool auto_queue;
+ bool wake_capable;
+};
+
+/**
+ * struct mhi_event_config - Event ring configuration structure for controller
+ * @num_elements: The number of elements that can be queued to this ring
+ * @irq_moderation_ms: Delay irq for additional events to be aggregated
+ * @irq: IRQ associated with this ring
+ * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring
+ * @priority: Priority of this ring. Use 1 for now
+ * @mode: Doorbell mode
+ * @data_type: Type of data this ring will process
+ * @hardware_event: This ring is associated with hardware channels
+ * @client_managed: This ring is client managed
+ * @offload_channel: This ring is associated with an offloaded channel
+ */
+struct mhi_event_config {
+ u32 num_elements;
+ u32 irq_moderation_ms;
+ u32 irq;
+ u32 channel;
+ u32 priority;
+ enum mhi_db_brst_mode mode;
+ enum mhi_er_data_type data_type;
+ bool hardware_event;
+ bool client_managed;
+ bool offload_channel;
+};
+
+/**
+ * struct mhi_controller_config - Root MHI controller configuration
+ * @max_channels: Maximum number of channels supported
+ * @timeout_ms: Timeout value for operations. 0 means use default
+ * @buf_len: Size of automatically allocated buffers. 0 means use default
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ * @num_events: Number of event rings defined in @event_cfg
+ * @event_cfg: Array of defined event rings
+ * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access
+ * @m2_no_db: Host is not allowed to ring DB in M2 state
+ */
+struct mhi_controller_config {
+ u32 max_channels;
+ u32 timeout_ms;
+ u32 buf_len;
+ u32 num_channels;
+ const struct mhi_channel_config *ch_cfg;
+ u32 num_events;
+ struct mhi_event_config *event_cfg;
+ bool use_bounce_buf;
+ bool m2_no_db;
+};
+
+/**
+ * struct mhi_controller - Master MHI controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * controller (required)
+ * @mhi_dev: MHI device instance for the controller
+ * @debugfs_dentry: MHI controller debugfs directory
+ * @regs: Base address of MHI MMIO register space (required)
+ * @bhi: Points to base of MHI BHI register space
+ * @bhie: Points to base of MHI BHIe register space
+ * @wake_db: MHI WAKE doorbell register address
+ * @iova_start: IOMMU starting address for data (required)
+ * @iova_stop: IOMMU stop address for data (required)
+ * @fw_image: Firmware image name for normal booting (optional)
+ * @edl_image: Firmware image name for emergency download mode (optional)
+ * @rddm_size: RAM dump size that host should allocate for debugging purpose
+ * @sbl_size: SBL image size downloaded through BHIe (optional)
+ * @seg_len: BHIe vector size (optional)
+ * @reg_len: Length of the MHI MMIO region (required)
+ * @fbc_image: Points to firmware image buffer
+ * @rddm_image: Points to RAM dump buffer
+ * @mhi_chan: Points to the channel configuration table
+ * @lpm_chans: List of channels that require LPM notifications
+ * @irq: base irq # to request (required)
+ * @max_chan: Maximum number of channels the controller supports
+ * @total_ev_rings: Total # of event rings allocated
+ * @hw_ev_rings: Number of hardware event rings
+ * @sw_ev_rings: Number of software event rings
+ * @nr_irqs: Number of IRQ allocated by bus master (required)
+ * @family_number: MHI controller family number
+ * @device_number: MHI controller device number
+ * @major_version: MHI controller major revision number
+ * @minor_version: MHI controller minor revision number
+ * @serial_number: MHI controller serial number obtained from BHI
+ * @oem_pk_hash: MHI controller OEM PK Hash obtained from BHI
+ * @mhi_event: MHI event ring configurations table
+ * @mhi_cmd: MHI command ring configurations table
+ * @mhi_ctxt: MHI device context, shared memory between host and device
+ * @pm_mutex: Mutex for suspend/resume operation
+ * @pm_lock: Lock for protecting MHI power management state
+ * @timeout_ms: Timeout in ms for state transitions
+ * @pm_state: MHI power management state
+ * @db_access: DB access states
+ * @ee: MHI device execution environment
+ * @dev_state: MHI device state
+ * @dev_wake: Device wakeup count
+ * @pending_pkts: Pending packets for the controller
+ * @M0, M2, M3: Counters to track number of device MHI state changes
+ * @transition_list: List of MHI state transitions
+ * @transition_lock: Lock for protecting MHI state transition list
+ * @wlock: Lock for protecting device wakeup
+ * @mhi_link_info: Device bandwidth info
+ * @st_worker: State transition worker
+ * @hiprio_wq: High priority workqueue for MHI work such as state transitions
+ * @state_event: State change event
+ * @status_cb: CB function to notify power states of the device (required)
+ * @wake_get: CB function to assert device wake (optional)
+ * @wake_put: CB function to de-assert device wake (optional)
+ * @wake_toggle: CB function to assert and de-assert device wake (optional)
+ * @runtime_get: CB function to controller runtime resume (required)
+ * @runtime_put: CB function to decrement pm usage (required)
+ * @map_single: CB function to create TRE buffer
+ * @unmap_single: CB function to destroy TRE buffer
+ * @read_reg: Read a MHI register via the physical link (required)
+ * @write_reg: Write a MHI register via the physical link (required)
+ * @reset: Controller specific reset function (optional)
+ * @buffer_len: Bounce buffer length
+ * @index: Index of the MHI controller instance
+ * @bounce_buf: Use of bounce buffer
+ * @fbc_download: MHI host needs to do complete image transfer (optional)
+ * @wake_set: Device wakeup set flag
+ * @irq_flags: irq flags passed to request_irq (optional)
+ * @mru: the default MRU for the MHI device
+ *
+ * Fields marked as (required) need to be populated by the controller driver
+ * before calling mhi_register_controller(). For the fields marked as (optional)
+ * they can be populated depending on the usecase.
+ *
+ * The following fields are present for the purpose of implementing any device
+ * specific quirks or customizations for specific MHI revisions used in device
+ * by the controller drivers. The MHI stack will just populate these fields
+ * during mhi_register_controller():
+ * family_number
+ * device_number
+ * major_version
+ * minor_version
+ */
+struct mhi_controller {
+ struct device *cntrl_dev;
+ struct mhi_device *mhi_dev;
+ struct dentry *debugfs_dentry;
+ void __iomem *regs;
+ void __iomem *bhi;
+ void __iomem *bhie;
+ void __iomem *wake_db;
+
+ dma_addr_t iova_start;
+ dma_addr_t iova_stop;
+ const char *fw_image;
+ const char *edl_image;
+ size_t rddm_size;
+ size_t sbl_size;
+ size_t seg_len;
+ size_t reg_len;
+ struct image_info *fbc_image;
+ struct image_info *rddm_image;
+ struct mhi_chan *mhi_chan;
+ struct list_head lpm_chans;
+ int *irq;
+ u32 max_chan;
+ u32 total_ev_rings;
+ u32 hw_ev_rings;
+ u32 sw_ev_rings;
+ u32 nr_irqs;
+ u32 family_number;
+ u32 device_number;
+ u32 major_version;
+ u32 minor_version;
+ u32 serial_number;
+ u32 oem_pk_hash[MHI_MAX_OEM_PK_HASH_SEGMENTS];
+
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_ctxt *mhi_ctxt;
+
+ struct mutex pm_mutex;
+ rwlock_t pm_lock;
+ u32 timeout_ms;
+ u32 pm_state;
+ u32 db_access;
+ enum mhi_ee_type ee;
+ enum mhi_state dev_state;
+ atomic_t dev_wake;
+ atomic_t pending_pkts;
+ u32 M0, M2, M3;
+ struct list_head transition_list;
+ spinlock_t transition_lock;
+ spinlock_t wlock;
+ struct mhi_link_info mhi_link_info;
+ struct work_struct st_worker;
+ struct workqueue_struct *hiprio_wq;
+ wait_queue_head_t state_event;
+
+ void (*status_cb)(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb);
+ void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
+ void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
+ void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
+ int (*runtime_get)(struct mhi_controller *mhi_cntrl);
+ void (*runtime_put)(struct mhi_controller *mhi_cntrl);
+ int (*map_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+ void (*unmap_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+ int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
+ u32 *out);
+ void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
+ u32 val);
+ void (*reset)(struct mhi_controller *mhi_cntrl);
+
+ size_t buffer_len;
+ int index;
+ bool bounce_buf;
+ bool fbc_download;
+ bool wake_set;
+ unsigned long irq_flags;
+ u32 mru;
+};
+
+/**
+ * struct mhi_device - Structure representing an MHI device which binds
+ * to channels or is associated with controllers
+ * @id: Pointer to MHI device ID struct
+ * @name: Name of the associated MHI device
+ * @mhi_cntrl: Controller the device belongs to
+ * @ul_chan: UL channel for the device
+ * @dl_chan: DL channel for the device
+ * @dev: Driver model device node for the MHI device
+ * @dev_type: MHI device type
+ * @ul_chan_id: MHI channel id for UL transfer
+ * @dl_chan_id: MHI channel id for DL transfer
+ * @dev_wake: Device wakeup counter
+ */
+struct mhi_device {
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *ul_chan;
+ struct mhi_chan *dl_chan;
+ struct device dev;
+ enum mhi_device_type dev_type;
+ int ul_chan_id;
+ int dl_chan_id;
+ u32 dev_wake;
+};
+
+/**
+ * struct mhi_result - Completed buffer information
+ * @buf_addr: Address of data buffer
+ * @bytes_xferd: # of bytes transferred
+ * @dir: Channel direction
+ * @transaction_status: Status of last transaction
+ */
+struct mhi_result {
+ void *buf_addr;
+ size_t bytes_xferd;
+ enum dma_data_direction dir;
+ int transaction_status;
+};
+
+/**
+ * struct mhi_buf - MHI Buffer description
+ * @buf: Virtual address of the buffer
+ * @name: Buffer label. For offload channel, configurations name must be:
+ * ECA - Event context array data
+ * CCA - Channel context array data
+ * @dma_addr: IOMMU address of the buffer
+ * @len: # of bytes
+ */
+struct mhi_buf {
+ void *buf;
+ const char *name;
+ dma_addr_t dma_addr;
+ size_t len;
+};
+
+/**
+ * struct mhi_driver - Structure representing a MHI client driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL data transfer
+ * @dl_xfer_cb: CB function for DL data transfer
+ * @status_cb: CB functions for asynchronous status
+ * @driver: Device driver model driver
+ */
+struct mhi_driver {
+ const struct mhi_device_id *id_table;
+ int (*probe)(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_device *mhi_dev);
+ void (*ul_xfer_cb)(struct mhi_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_device *mhi_dev,
+ struct mhi_result *result);
+ void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb);
+ struct device_driver driver;
+};
+
+#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
+#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
+
+/**
+ * mhi_alloc_controller - Allocate the MHI Controller structure
+ * Allocate the mhi_controller structure using zero initialized memory
+ */
+struct mhi_controller *mhi_alloc_controller(void);
+
+/**
+ * mhi_free_controller - Free the MHI Controller structure
+ * Free the mhi_controller structure which was previously allocated
+ */
+void mhi_free_controller(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_register_controller - Register MHI controller
+ * @mhi_cntrl: MHI controller to register
+ * @config: Configuration to use for the controller
+ */
+int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config);
+
+/**
+ * mhi_unregister_controller - Unregister MHI controller
+ * @mhi_cntrl: MHI controller to unregister
+ */
+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl);
+
+/*
+ * module_mhi_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_driver_register() and
+ * mhi_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_driver_register, \
+ mhi_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_driver_register(mhi_drv) \
+ __mhi_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_driver_register - Register driver with MHI framework
+ * @mhi_drv: Driver associated with the device
+ * @owner: The module owner
+ */
+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_driver_unregister - Unregister a driver for mhi_devices
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_driver_unregister(struct mhi_driver *mhi_drv);
+
+/**
+ * mhi_set_mhi_state - Set MHI device state
+ * @mhi_cntrl: MHI controller
+ * @state: State to set
+ */
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_state state);
+
+/**
+ * mhi_notify - Notify the MHI client driver about client device status
+ * @mhi_dev: MHI device instance
+ * @cb_reason: MHI callback reason
+ */
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
+
+/**
+ * mhi_get_free_desc_count - Get transfer ring length
+ * Get # of TD available to queue buffers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Direction of the channel
+ */
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir);
+
+/**
+ * mhi_prepare_for_power_up - Do pre-initialization before power up.
+ * This is optional, call this before power up if
+ * the controller does not want bus framework to
+ * automatically free any allocated memory during
+ * shutdown process.
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_async_power_up - Start MHI power up sequence
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_sync_power_up - Start MHI power up sequence and wait till the device
+ * enters valid EE state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_power_down - Start MHI power down sequence
+ * @mhi_cntrl: MHI controller
+ * @graceful: Link is still accessible, so do a graceful shutdown process
+ */
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
+
+/**
+ * mhi_unprepare_after_power_down - Free any allocated memory after power down
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_suspend - Move MHI into a suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_resume - Resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_download_rddm_image - Download ramdump image from device for
+ * debugging purpose.
+ * @mhi_cntrl: MHI controller
+ * @in_panic: Download rddm image during kernel panic
+ */
+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic);
+
+/**
+ * mhi_force_rddm_mode - Force device into rddm mode
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_get_exec_env - Get BHI execution environment of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_get_mhi_state - Get MHI state of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_soc_reset - Trigger a device reset. This can be used as a last resort
+ * to reset and recover a device.
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_device_get - Disable device low power mode
+ * @mhi_dev: Device associated with the channel
+ */
+void mhi_device_get(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_device_get_sync - Disable device low power mode. Synchronously
+ * take the controller out of suspended state
+ * @mhi_dev: Device associated with the channel
+ */
+int mhi_device_get_sync(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_device_put - Re-enable device low power mode
+ * @mhi_dev: Device associated with the channel
+ */
+void mhi_device_put(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer.
+ * @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
+ */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue
+ * buffers for DL traffic
+ * @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
+ * The MHI core will automatically allocate and queue buffers for the DL traffic.
+ */
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
+ * Issue the RESET channel command and let the
+ * device clean-up the context so no incoming
+ * transfers are seen on the host. Free memory
+ * associated with the context on host. If device
+ * is unresponsive, only perform a host side
+ * clean-up. Channels can be reset only if both
+ * host and device execution environments match
+ * and channels are in an ENABLED, STOPPED or
+ * SUSPENDED state.
+ * @mhi_dev: Device associated with the channels
+ */
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_poll - Poll for any available data in DL direction
+ * @mhi_dev: Device associated with the channels
+ * @budget: # of events to process
+ */
+int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
+
+/**
+ * mhi_queue_dma - Send or receive DMA mapped buffers from client device
+ * over MHI channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @mhi_buf: Buffer for holding the DMA mapped data
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_buf - Send or receive raw buffers from client device over MHI
+ * channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @buf: Buffer for holding the data
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ void *buf, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_skb - Send or receive SKBs from client device over MHI channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @skb: Buffer for holding SKBs
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_is_full - Determine whether queueing new elements is possible
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ */
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
+
+#endif /* _MHI_H_ */
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
new file mode 100644
index 000000000000..478aece17046
--- /dev/null
+++ b/include/linux/mhi_ep.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+#ifndef _MHI_EP_H_
+#define _MHI_EP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/mhi.h>
+
+#define MHI_EP_DEFAULT_MTU 0x8000
+
+/**
+ * struct mhi_ep_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @dir: Direction that data may flow on this channel
+ */
+struct mhi_ep_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ enum dma_data_direction dir;
+};
+
+/**
+ * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration
+ * @mhi_version: MHI spec version supported by the controller
+ * @max_channels: Maximum number of channels supported
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ */
+struct mhi_ep_cntrl_config {
+ u32 mhi_version;
+ u32 max_channels;
+ u32 num_channels;
+ const struct mhi_ep_channel_config *ch_cfg;
+};
+
+/**
+ * struct mhi_ep_db_info - MHI Endpoint doorbell info
+ * @mask: Mask of the doorbell interrupt
+ * @status: Status of the doorbell interrupt
+ */
+struct mhi_ep_db_info {
+ u32 mask;
+ u32 status;
+};
+
+/**
+ * struct mhi_ep_cntrl - MHI Endpoint controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * Endpoint controller
+ * @mhi_dev: MHI Endpoint device instance for the controller
+ * @mmio: MMIO region containing the MHI registers
+ * @mhi_chan: Points to the channel configuration table
+ * @mhi_event: Points to the event ring configurations table
+ * @mhi_cmd: Points to the command ring configurations table
+ * @sm: MHI Endpoint state machine
+ * @ch_ctx_cache: Cache of host channel context data structure
+ * @ev_ctx_cache: Cache of host event context data structure
+ * @cmd_ctx_cache: Cache of host command context data structure
+ * @ch_ctx_host_pa: Physical address of host channel context data structure
+ * @ev_ctx_host_pa: Physical address of host event context data structure
+ * @cmd_ctx_host_pa: Physical address of host command context data structure
+ * @ch_ctx_cache_phys: Physical address of the host channel context cache
+ * @ev_ctx_cache_phys: Physical address of the host event context cache
+ * @cmd_ctx_cache_phys: Physical address of the host command context cache
+ * @chdb: Array of channel doorbell interrupt info
+ * @event_lock: Lock for protecting event rings
+ * @list_lock: Lock for protecting state transition and channel doorbell lists
+ * @state_lock: Lock for protecting state transitions
+ * @st_transition_list: List of state transitions
+ * @ch_db_list: List of queued channel doorbells
+ * @wq: Dedicated workqueue for handling rings and state changes
+ * @state_work: State transition worker
+ * @reset_work: Worker for MHI Endpoint reset
+ * @cmd_ring_work: Worker for processing command rings
+ * @ch_ring_work: Worker for processing channel rings
+ * @raise_irq: CB function for raising IRQ to the host
+ * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
+ * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
+ * @read_from_host: CB function for reading from host memory from endpoint
+ * @write_to_host: CB function for writing to host memory from endpoint
+ * @mhi_state: MHI Endpoint state
+ * @max_chan: Maximum channels supported by the endpoint controller
+ * @mru: MRU (Maximum Receive Unit) value of the endpoint controller
+ * @event_rings: Number of event rings supported by the endpoint controller
+ * @hw_event_rings: Number of hardware event rings supported by the endpoint controller
+ * @chdb_offset: Channel doorbell offset set by the host
+ * @erdb_offset: Event ring doorbell offset set by the host
+ * @index: MHI Endpoint controller index
+ * @irq: IRQ used by the endpoint controller
+ * @enabled: Check if the endpoint controller is enabled or not
+ */
+struct mhi_ep_cntrl {
+ struct device *cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ void __iomem *mmio;
+
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_event *mhi_event;
+ struct mhi_ep_cmd *mhi_cmd;
+ struct mhi_ep_sm *sm;
+
+ struct mhi_chan_ctxt *ch_ctx_cache;
+ struct mhi_event_ctxt *ev_ctx_cache;
+ struct mhi_cmd_ctxt *cmd_ctx_cache;
+ u64 ch_ctx_host_pa;
+ u64 ev_ctx_host_pa;
+ u64 cmd_ctx_host_pa;
+ phys_addr_t ch_ctx_cache_phys;
+ phys_addr_t ev_ctx_cache_phys;
+ phys_addr_t cmd_ctx_cache_phys;
+
+ struct mhi_ep_db_info chdb[4];
+ struct mutex event_lock;
+ spinlock_t list_lock;
+ spinlock_t state_lock;
+
+ struct list_head st_transition_list;
+ struct list_head ch_db_list;
+
+ struct workqueue_struct *wq;
+ struct work_struct state_work;
+ struct work_struct reset_work;
+ struct work_struct cmd_ring_work;
+ struct work_struct ch_ring_work;
+
+ void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
+ int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
+ void __iomem **virt, size_t size);
+ void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
+ void __iomem *virt, size_t size);
+ int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
+ int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
+
+ enum mhi_state mhi_state;
+
+ u32 max_chan;
+ u32 mru;
+ u32 event_rings;
+ u32 hw_event_rings;
+ u32 chdb_offset;
+ u32 erdb_offset;
+ u32 index;
+ int irq;
+ bool enabled;
+};
+
+/**
+ * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds
+ * to channels or is associated with controllers
+ * @dev: Driver model device node for the MHI Endpoint device
+ * @mhi_cntrl: Controller the device belongs to
+ * @id: Pointer to MHI Endpoint device ID struct
+ * @name: Name of the associated MHI Endpoint device
+ * @ul_chan: UL (from host to endpoint) channel for the device
+ * @dl_chan: DL (from endpoint to host) channel for the device
+ * @dev_type: MHI device type
+ */
+struct mhi_ep_device {
+ struct device dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_ep_chan *ul_chan;
+ struct mhi_ep_chan *dl_chan;
+ enum mhi_device_type dev_type;
+};
+
+/**
+ * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver
+ * @id_table: Pointer to MHI Endpoint device ID table
+ * @driver: Device driver model driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer
+ * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer
+ */
+struct mhi_ep_driver {
+ const struct mhi_device_id *id_table;
+ struct device_driver driver;
+ int (*probe)(struct mhi_ep_device *mhi_ep,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_ep_device *mhi_ep);
+ void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+};
+
+#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
+#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
+
+/*
+ * module_mhi_ep_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_ep_driver_register() and
+ * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_ep_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_ep_driver_register, \
+ mhi_ep_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_ep_driver_register(mhi_drv) \
+ __mhi_ep_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus
+ * @mhi_drv: Driver to be associated with the device
+ * @owner: The module owner
+ *
+ * Return: 0 if driver registrations succeeds, a negative error code otherwise.
+ */
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
+
+/**
+ * mhi_ep_register_controller - Register MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to register
+ * @config: Configuration to use for the controller
+ *
+ * Return: 0 if controller registrations succeeds, a negative error code otherwise.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config);
+
+/**
+ * mhi_ep_unregister_controller - Unregister MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to unregister
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_up - Power up the MHI endpoint stack
+ * @mhi_cntrl: MHI Endpoint controller
+ *
+ * Return: 0 if power up succeeds, a negative error code otherwise.
+ */
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_down - Power down the MHI endpoint stack
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ *
+ * Return: true if the queue is empty, false otherwise.
+ */
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
+
+/**
+ * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
+ * @mhi_dev: Device associated with the DL channel
+ * @skb: SKBs to be queued
+ *
+ * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
+ */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb);
+
+#endif
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
deleted file mode 100644
index 491156a2359f..000000000000
--- a/include/linux/mic_bus.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel MIC Bus driver.
- *
- * This implementation is very similar to the the virtio bus driver
- * implementation @ include/linux/virtio.h.
- */
-#ifndef _MIC_BUS_H_
-#define _MIC_BUS_H_
-/*
- * Everything a mbus driver needs to work with any particular mbus
- * implementation.
- */
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-
-struct mbus_device_id {
- __u32 device;
- __u32 vendor;
-};
-
-#define MBUS_DEV_DMA_HOST 2
-#define MBUS_DEV_DMA_MIC 3
-#define MBUS_DEV_ANY_ID 0xffffffff
-
-/**
- * mbus_device - representation of a device using mbus
- * @mmio_va: virtual address of mmio space
- * @hw_ops: the hardware ops supported by this device.
- * @id: the device type identification (used to match it with a driver).
- * @dev: underlying device.
- * be used to communicate with.
- * @index: unique position on the mbus bus
- */
-struct mbus_device {
- void __iomem *mmio_va;
- struct mbus_hw_ops *hw_ops;
- struct mbus_device_id id;
- struct device dev;
- int index;
-};
-
-/**
- * mbus_driver - operations for a mbus I/O driver
- * @driver: underlying device driver (populate name and owner).
- * @id_table: the ids serviced by this driver.
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct mbus_driver {
- struct device_driver driver;
- const struct mbus_device_id *id_table;
- int (*probe)(struct mbus_device *dev);
- void (*scan)(struct mbus_device *dev);
- void (*remove)(struct mbus_device *dev);
-};
-
-/**
- * struct mic_irq - opaque pointer used as cookie
- */
-struct mic_irq;
-
-/**
- * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus.
- */
-struct mbus_hw_ops {
- struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev,
- irq_handler_t handler,
- irq_handler_t thread_fn,
- const char *name, void *data,
- int intr_src);
- void (*free_irq)(struct mbus_device *mbdev,
- struct mic_irq *cookie, void *data);
- void (*ack_interrupt)(struct mbus_device *mbdev, int num);
-};
-
-struct mbus_device *
-mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
- struct mbus_hw_ops *hw_ops, int index,
- void __iomem *mmio_va);
-void mbus_unregister_device(struct mbus_device *mbdev);
-
-int mbus_register_driver(struct mbus_driver *drv);
-void mbus_unregister_driver(struct mbus_driver *drv);
-
-static inline struct mbus_device *dev_to_mbus(struct device *_dev)
-{
- return container_of(_dev, struct mbus_device, dev);
-}
-
-static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv)
-{
- return container_of(drv, struct mbus_driver, driver);
-}
-
-#endif /* _MIC_BUS_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 75f880c25bb8..1f7c33b2f5a3 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -27,6 +27,8 @@
#define PHY_ID_KSZ8061 0x00221570
#define PHY_ID_KSZ9031 0x00221620
#define PHY_ID_KSZ9131 0x00221640
+#define PHY_ID_LAN8814 0x00221660
+#define PHY_ID_LAN8804 0x00221670
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
@@ -38,10 +40,26 @@
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002
+#define MICREL_KSZ8_P1_ERRATA 0x00000003
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
#define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104
#define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105
+/* Device specific MII_BMCR (Reg 0) bits */
+/* 1 = HP Auto MDI/MDI-X mode, 0 = Microchip Auto MDI/MDI-X mode */
+#define KSZ886X_BMCR_HP_MDIX BIT(5)
+/* 1 = Force MDI (transmit on RXP/RXM pins), 0 = Normal operation
+ * (transmit on TXP/TXM pins)
+ */
+#define KSZ886X_BMCR_FORCE_MDI BIT(4)
+/* 1 = Disable auto MDI-X */
+#define KSZ886X_BMCR_DISABLE_AUTO_MDIX BIT(3)
+#define KSZ886X_BMCR_DISABLE_FAR_END_FAULT BIT(2)
+#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1)
+#define KSZ886X_BMCR_DISABLE_LED BIT(0)
+
+#define KSZ886X_CTRL_MDIX_STAT BIT(4)
+
#endif /* _MICREL_PHY_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 72120061b7d4..3ef77f52a4f0 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -10,6 +10,8 @@
typedef struct page *new_page_t(struct page *page, unsigned long private);
typedef void free_page_t(struct page *page, unsigned long private);
+struct migration_target_control;
+
/*
* Return values from addresss_space_operations.migratepage():
* - negative errno on page migration failure;
@@ -17,89 +19,83 @@ typedef void free_page_t(struct page *page, unsigned long private);
*/
#define MIGRATEPAGE_SUCCESS 0
-enum migrate_reason {
- MR_COMPACTION,
- MR_MEMORY_FAILURE,
- MR_MEMORY_HOTPLUG,
- MR_SYSCALL, /* also applies to cpusets */
- MR_MEMPOLICY_MBIND,
- MR_NUMA_MISPLACED,
- MR_CONTIG_RANGE,
- MR_TYPES
+/**
+ * struct movable_operations - Driver page migration
+ * @isolate_page:
+ * The VM calls this function to prepare the page to be moved. The page
+ * is locked and the driver should not unlock it. The driver should
+ * return ``true`` if the page is movable and ``false`` if it is not
+ * currently movable. After this function returns, the VM uses the
+ * page->lru field, so the driver must preserve any information which
+ * is usually stored here.
+ *
+ * @migrate_page:
+ * After isolation, the VM calls this function with the isolated
+ * @src page. The driver should copy the contents of the
+ * @src page to the @dst page and set up the fields of @dst page.
+ * Both pages are locked.
+ * If page migration is successful, the driver should call
+ * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
+ * If the driver cannot migrate the page at the moment, it can return
+ * -EAGAIN. The VM interprets this as a temporary migration failure and
+ * will retry it later. Any other error value is a permanent migration
+ * failure and migration will not be retried.
+ * The driver shouldn't touch the @src->lru field while in the
+ * migrate_page() function. It may write to @dst->lru.
+ *
+ * @putback_page:
+ * If migration fails on the isolated page, the VM informs the driver
+ * that the page is no longer a candidate for migration by calling
+ * this function. The driver should put the isolated page back into
+ * its own data structure.
+ */
+struct movable_operations {
+ bool (*isolate_page)(struct page *, isolate_mode_t);
+ int (*migrate_page)(struct page *dst, struct page *src,
+ enum migrate_mode);
+ void (*putback_page)(struct page *);
};
-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
+/* Defined in mm/debug.c: */
extern const char *migrate_reason_names[MR_TYPES];
-static inline struct page *new_page_nodemask(struct page *page,
- int preferred_nid, nodemask_t *nodemask)
-{
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
- unsigned int order = 0;
- struct page *new_page = NULL;
-
- if (PageHuge(page))
- return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
- preferred_nid, nodemask);
-
- if (PageTransHuge(page)) {
- gfp_mask |= GFP_TRANSHUGE;
- order = HPAGE_PMD_ORDER;
- }
-
- if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
- gfp_mask |= __GFP_HIGHMEM;
-
- new_page = __alloc_pages_nodemask(gfp_mask, order,
- preferred_nid, nodemask);
-
- if (new_page && PageTransHuge(new_page))
- prep_transhuge_page(new_page);
-
- return new_page;
-}
-
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode);
+int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode, int extra_count);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
- unsigned long private, enum migrate_mode mode, int reason);
+ unsigned long private, enum migrate_mode mode, int reason,
+ unsigned int *ret_succeeded);
+extern struct page *alloc_migration_target(struct page *page, unsigned long private);
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
-extern void putback_movable_page(struct page *page);
-
-extern int migrate_prep(void);
-extern int migrate_prep_local(void);
-extern void migrate_page_states(struct page *newpage, struct page *page);
-extern void migrate_page_copy(struct page *newpage, struct page *page);
-extern int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page);
-extern int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page, int extra_count);
+
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct folio *dst, struct folio *src);
+void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
+ spinlock_t *ptl);
+void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
+void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
+int folio_migrate_mapping(struct address_space *mapping,
+ struct folio *newfolio, struct folio *folio, int extra_count);
+
#else
static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t new,
free_page_t free, unsigned long private, enum migrate_mode mode,
- int reason)
+ int reason, unsigned int *ret_succeeded)
{ return -ENOSYS; }
+static inline struct page *alloc_migration_target(struct page *page,
+ unsigned long private)
+ { return NULL; }
static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return -EBUSY; }
-static inline int migrate_prep(void) { return -ENOSYS; }
-static inline int migrate_prep_local(void) { return -ENOSYS; }
-
-static inline void migrate_page_states(struct page *newpage, struct page *page)
-{
-}
-
-static inline void migrate_page_copy(struct page *newpage,
- struct page *page) {}
-
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct folio *dst, struct folio *src)
{
return -ENOSYS;
}
@@ -107,13 +103,13 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_COMPACTION
-extern int PageMovable(struct page *page);
-extern void __SetPageMovable(struct page *page, struct address_space *mapping);
-extern void __ClearPageMovable(struct page *page);
+bool PageMovable(struct page *page);
+void __SetPageMovable(struct page *page, const struct movable_operations *ops);
+void __ClearPageMovable(struct page *page);
#else
-static inline int PageMovable(struct page *page) { return 0; };
+static inline bool PageMovable(struct page *page) { return false; }
static inline void __SetPageMovable(struct page *page,
- struct address_space *mapping)
+ const struct movable_operations *ops)
{
}
static inline void __ClearPageMovable(struct page *page)
@@ -121,15 +117,24 @@ static inline void __ClearPageMovable(struct page *page)
}
#endif
+static inline bool folio_test_movable(struct folio *folio)
+{
+ return PageMovable(&folio->page);
+}
+
+static inline
+const struct movable_operations *page_movable_ops(struct page *page)
+{
+ VM_BUG_ON(!__PageMovable(page));
+
+ return (const struct movable_operations *)
+ ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
+}
+
#ifdef CONFIG_NUMA_BALANCING
-extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
#else
-static inline bool pmd_trans_migrating(pmd_t pmd)
-{
- return false;
-}
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
@@ -137,24 +142,6 @@ static inline int migrate_misplaced_page(struct page *page,
}
#endif /* CONFIG_NUMA_BALANCING */
-#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node);
-#else
-static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node)
-{
- return -EAGAIN;
-}
-#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
-
-
#ifdef CONFIG_MIGRATION
/*
@@ -164,7 +151,6 @@ static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
*/
#define MIGRATE_PFN_VALID (1UL << 0)
#define MIGRATE_PFN_MIGRATE (1UL << 1)
-#define MIGRATE_PFN_LOCKED (1UL << 2)
#define MIGRATE_PFN_WRITE (1UL << 3)
#define MIGRATE_PFN_SHIFT 6
@@ -180,6 +166,12 @@ static inline unsigned long migrate_pfn(unsigned long pfn)
return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
}
+enum migrate_vma_direction {
+ MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
+};
+
struct migrate_vma {
struct vm_area_struct *vma;
/*
@@ -196,11 +188,34 @@ struct migrate_vma {
unsigned long npages;
unsigned long start;
unsigned long end;
+
+ /*
+ * Set to the owner value also stored in page->pgmap->owner for
+ * migrating out of device private memory. The flags also need to
+ * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
+ * The caller should always set this field when using mmu notifier
+ * callbacks to avoid device MMU invalidations for device private
+ * pages that are not being migrated.
+ */
+ void *pgmap_owner;
+ unsigned long flags;
+
+ /*
+ * Set to vmf->page if this is being called to migrate a page as part of
+ * a migrate_to_ram() callback.
+ */
+ struct page *fault_page;
};
int migrate_vma_setup(struct migrate_vma *args);
void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
+int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+ unsigned long npages);
+void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
+ unsigned long npages);
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages);
#endif /* CONFIG_MIGRATION */
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index 883c99249033..f37cc03f9369 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -19,4 +19,17 @@ enum migrate_mode {
MIGRATE_SYNC_NO_COPY,
};
+enum migrate_reason {
+ MR_COMPACTION,
+ MR_MEMORY_FAILURE,
+ MR_MEMORY_HOTPLUG,
+ MR_SYSCALL, /* also applies to cpusets */
+ MR_MEMPOLICY_MBIND,
+ MR_NUMA_MISPLACED,
+ MR_CONTIG_RANGE,
+ MR_LONGTERM_PIN,
+ MR_DEMOTION,
+ MR_TYPES
+};
+
#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 18c6208f56fc..d5a959ce4877 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -32,7 +32,7 @@ struct mii_if_info {
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
-extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern void mii_ethtool_get_link_ksettings(
struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
@@ -355,74 +355,6 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
}
/**
- * mii_lpa_to_ethtool_lpa_x
- * @adv: value of the MII_LPA register
- *
- * A small helper function that translates MII_LPA
- * bits, when in 1000Base-X mode, to ethtool
- * LP advertisement settings.
- */
-static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
-{
- u32 result = 0;
-
- if (lpa & LPA_LPACK)
- result |= ADVERTISED_Autoneg;
-
- return result | mii_adv_to_ethtool_adv_x(lpa);
-}
-
-/**
- * mii_lpa_mod_linkmode_adv_sgmii
- * @lp_advertising: pointer to destination link mode.
- * @lpa: value of the MII_LPA register
- *
- * A small helper function that translates MII_LPA bits to
- * linkmode advertisement settings for SGMII.
- * Leaves other bits unchanged.
- */
-static inline void
-mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa)
-{
- u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_1000HALF);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_1000FULL);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_100HALF);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_100FULL);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_10HALF);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_10FULL);
-}
-
-/**
- * mii_lpa_to_linkmode_adv_sgmii
- * @advertising: pointer to destination link mode.
- * @lpa: value of the MII_LPA register
- *
- * A small helper function that translates MII_ADVERTISE bits
- * to linkmode advertisement settings when in SGMII mode.
- * Clears the old value of advertising.
- */
-static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising,
- u32 lpa)
-{
- linkmode_zero(lp_advertising);
-
- mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa);
-}
-
-/**
* mii_adv_mod_linkmode_adv_t
* @advertising:pointer to destination link mode.
* @adv: value of the MII_ADVERTISE register
@@ -536,6 +468,45 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
}
/**
+ * mii_lpa_mod_linkmode_x - decode the link partner's config_reg to linkmodes
+ * @linkmodes: link modes array
+ * @lpa: config_reg word from link partner
+ * @fd_bit: link mode for 1000XFULL bit
+ */
+static inline void mii_lpa_mod_linkmode_x(unsigned long *linkmodes, u16 lpa,
+ int fd_bit)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, linkmodes,
+ lpa & LPA_LPACK);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes,
+ lpa & LPA_1000XPAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes,
+ lpa & LPA_1000XPAUSE_ASYM);
+ linkmode_mod_bit(fd_bit, linkmodes,
+ lpa & LPA_1000XFULL);
+}
+
+/**
+ * linkmode_adv_to_mii_adv_x - encode a linkmode to config_reg
+ * @linkmodes: linkmodes
+ * @fd_bit: full duplex bit
+ */
+static inline u16 linkmode_adv_to_mii_adv_x(const unsigned long *linkmodes,
+ int fd_bit)
+{
+ u16 adv = 0;
+
+ if (linkmode_test_bit(fd_bit, linkmodes))
+ adv |= ADVERTISE_1000XFULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes))
+ adv |= ADVERTISE_1000XPAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes))
+ adv |= ADVERTISE_1000XPSE_ASYM;
+
+ return adv;
+}
+
+/**
* mii_advertise_flowctrl - get flow control advertisement flags
* @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
*/
@@ -574,4 +545,39 @@ static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
return cap;
}
+/**
+ * mii_bmcr_encode_fixed - encode fixed speed/duplex settings to a BMCR value
+ * @speed: a SPEED_* value
+ * @duplex: a DUPLEX_* value
+ *
+ * Encode the speed and duplex to a BMCR value. 2500, 1000, 100 and 10 Mbps are
+ * supported. 2500Mbps is encoded to 1000Mbps. Other speeds are encoded as 10
+ * Mbps. Unknown duplex values are encoded to half-duplex.
+ */
+static inline u16 mii_bmcr_encode_fixed(int speed, int duplex)
+{
+ u16 bmcr;
+
+ switch (speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ bmcr = BMCR_SPEED1000;
+ break;
+
+ case SPEED_100:
+ bmcr = BMCR_SPEED100;
+ break;
+
+ case SPEED_10:
+ default:
+ bmcr = BMCR_SPEED10;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ return bmcr;
+}
+
#endif /* __LINUX_MII_H__ */
diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h
new file mode 100644
index 000000000000..44077837385f
--- /dev/null
+++ b/include/linux/min_heap.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MIN_HEAP_H
+#define _LINUX_MIN_HEAP_H
+
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/**
+ * struct min_heap - Data structure to hold a min-heap.
+ * @data: Start of array holding the heap elements.
+ * @nr: Number of elements currently in the heap.
+ * @size: Maximum number of elements that can be held in current storage.
+ */
+struct min_heap {
+ void *data;
+ int nr;
+ int size;
+};
+
+/**
+ * struct min_heap_callbacks - Data/functions to customise the min_heap.
+ * @elem_size: The nr of each element in bytes.
+ * @less: Partial order function for this heap.
+ * @swp: Swap elements function.
+ */
+struct min_heap_callbacks {
+ int elem_size;
+ bool (*less)(const void *lhs, const void *rhs);
+ void (*swp)(void *lhs, void *rhs);
+};
+
+/* Sift the element at pos down the heap. */
+static __always_inline
+void min_heapify(struct min_heap *heap, int pos,
+ const struct min_heap_callbacks *func)
+{
+ void *left, *right, *parent, *smallest;
+ void *data = heap->data;
+
+ for (;;) {
+ if (pos * 2 + 1 >= heap->nr)
+ break;
+
+ left = data + ((pos * 2 + 1) * func->elem_size);
+ parent = data + (pos * func->elem_size);
+ smallest = parent;
+ if (func->less(left, smallest))
+ smallest = left;
+
+ if (pos * 2 + 2 < heap->nr) {
+ right = data + ((pos * 2 + 2) * func->elem_size);
+ if (func->less(right, smallest))
+ smallest = right;
+ }
+ if (smallest == parent)
+ break;
+ func->swp(smallest, parent);
+ if (smallest == left)
+ pos = (pos * 2) + 1;
+ else
+ pos = (pos * 2) + 2;
+ }
+}
+
+/* Floyd's approach to heapification that is O(nr). */
+static __always_inline
+void min_heapify_all(struct min_heap *heap,
+ const struct min_heap_callbacks *func)
+{
+ int i;
+
+ for (i = heap->nr / 2; i >= 0; i--)
+ min_heapify(heap, i, func);
+}
+
+/* Remove minimum element from the heap, O(log2(nr)). */
+static __always_inline
+void min_heap_pop(struct min_heap *heap,
+ const struct min_heap_callbacks *func)
+{
+ void *data = heap->data;
+
+ if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
+ return;
+
+ /* Place last element at the root (position 0) and then sift down. */
+ heap->nr--;
+ memcpy(data, data + (heap->nr * func->elem_size), func->elem_size);
+ min_heapify(heap, 0, func);
+}
+
+/*
+ * Remove the minimum element and then push the given element. The
+ * implementation performs 1 sift (O(log2(nr))) and is therefore more
+ * efficient than a pop followed by a push that does 2.
+ */
+static __always_inline
+void min_heap_pop_push(struct min_heap *heap,
+ const void *element,
+ const struct min_heap_callbacks *func)
+{
+ memcpy(heap->data, element, func->elem_size);
+ min_heapify(heap, 0, func);
+}
+
+/* Push an element on to the heap, O(log2(nr)). */
+static __always_inline
+void min_heap_push(struct min_heap *heap, const void *element,
+ const struct min_heap_callbacks *func)
+{
+ void *data = heap->data;
+ void *child, *parent;
+ int pos;
+
+ if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
+ return;
+
+ /* Place at the end of data. */
+ pos = heap->nr;
+ memcpy(data + (pos * func->elem_size), element, func->elem_size);
+ heap->nr++;
+
+ /* Sift child at pos up. */
+ for (; pos > 0; pos = (pos - 1) / 2) {
+ child = data + (pos * func->elem_size);
+ parent = data + ((pos - 1) / 2) * func->elem_size;
+ if (func->less(parent, child))
+ break;
+ func->swp(parent, child);
+ }
+}
+
+#endif /* _LINUX_MIN_HEAP_H */
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
new file mode 100644
index 000000000000..5433c08fcc68
--- /dev/null
+++ b/include/linux/minmax.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MINMAX_H
+#define _LINUX_MINMAX_H
+
+#include <linux/const.h>
+
+/*
+ * min()/max()/clamp() macros must accomplish three things:
+ *
+ * - avoid multiple evaluations of the arguments (so side-effects like
+ * "x++" happen only once) when non-constant.
+ * - perform strict type-checking (to generate warnings instead of
+ * nasty runtime surprises). See the "unnecessary" pointer comparison
+ * in __typecheck().
+ * - retain result as a constant expressions when called with only
+ * constant expressions (to avoid tripping VLA warnings in stack
+ * allocation usage).
+ */
+#define __typecheck(x, y) \
+ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
+
+#define __no_side_effects(x, y) \
+ (__is_constexpr(x) && __is_constexpr(y))
+
+#define __safe_cmp(x, y) \
+ (__typecheck(x, y) && __no_side_effects(x, y))
+
+#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
+
+#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
+ typeof(x) unique_x = (x); \
+ typeof(y) unique_y = (y); \
+ __cmp(unique_x, unique_y, op); })
+
+#define __careful_cmp(x, y, op) \
+ __builtin_choose_expr(__safe_cmp(x, y), \
+ __cmp(x, y, op), \
+ __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
+
+/**
+ * min - return minimum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
+#define min(x, y) __careful_cmp(x, y, <)
+
+/**
+ * max - return maximum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
+#define max(x, y) __careful_cmp(x, y, >)
+
+/**
+ * min3 - return minimum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
+#define min3(x, y, z) min((typeof(x))min(x, y), z)
+
+/**
+ * max3 - return maximum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
+#define max3(x, y, z) max((typeof(x))max(x, y), z)
+
+/**
+ * min_not_zero - return the minimum that is _not_ zero, unless both are zero
+ * @x: value1
+ * @y: value2
+ */
+#define min_not_zero(x, y) ({ \
+ typeof(x) __x = (x); \
+ typeof(y) __y = (y); \
+ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+
+/**
+ * clamp - return a value clamped to a given range with strict typechecking
+ * @val: current value
+ * @lo: lowest allowable value
+ * @hi: highest allowable value
+ *
+ * This macro does strict typechecking of @lo/@hi to make sure they are of the
+ * same type as @val. See the unnecessary pointer comparisons.
+ */
+#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
+
+/*
+ * ..and if you can't take the strict
+ * types, you can specify one yourself.
+ *
+ * Or not use min/max/clamp at all, of course.
+ */
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
+
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
+
+/**
+ * clamp_t - return a value clamped to a given range using a given type
+ * @type: the type of variable to use
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of type
+ * @type to make all the comparisons.
+ */
+#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
+
+/**
+ * clamp_val - return a value clamped to a given range using val's type
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of whatever
+ * type the input argument @val is. This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
+ * integer type.
+ */
+#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
+
+/**
+ * swap - swap values of @a and @b
+ * @a: first value
+ * @b: second value
+ */
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+#endif /* _LINUX_MINMAX_H */
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
new file mode 100644
index 000000000000..c238207d1615
--- /dev/null
+++ b/include/linux/misc_cgroup.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Miscellaneous cgroup controller.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Vipin Sharma <vipinsh@google.com>
+ */
+#ifndef _MISC_CGROUP_H_
+#define _MISC_CGROUP_H_
+
+/**
+ * Types of misc cgroup entries supported by the host.
+ */
+enum misc_res_type {
+#ifdef CONFIG_KVM_AMD_SEV
+ /* AMD SEV ASIDs resource */
+ MISC_CG_RES_SEV,
+ /* AMD SEV-ES ASIDs resource */
+ MISC_CG_RES_SEV_ES,
+#endif
+ MISC_CG_RES_TYPES
+};
+
+struct misc_cg;
+
+#ifdef CONFIG_CGROUP_MISC
+
+#include <linux/cgroup.h>
+
+/**
+ * struct misc_res: Per cgroup per misc type resource
+ * @max: Maximum limit on the resource.
+ * @usage: Current usage of the resource.
+ * @failed: True if charged failed for the resource in a cgroup.
+ */
+struct misc_res {
+ unsigned long max;
+ atomic_long_t usage;
+ atomic_long_t events;
+};
+
+/**
+ * struct misc_cg - Miscellaneous controller's cgroup structure.
+ * @css: cgroup subsys state object.
+ * @res: Array of misc resources usage in the cgroup.
+ */
+struct misc_cg {
+ struct cgroup_subsys_state css;
+
+ /* misc.events */
+ struct cgroup_file events_file;
+
+ struct misc_res res[MISC_CG_RES_TYPES];
+};
+
+unsigned long misc_cg_res_total_usage(enum misc_res_type type);
+int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity);
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
+ unsigned long amount);
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg,
+ unsigned long amount);
+
+/**
+ * css_misc() - Get misc cgroup from the css.
+ * @css: cgroup subsys state object.
+ *
+ * Context: Any context.
+ * Return:
+ * * %NULL - If @css is null.
+ * * struct misc_cg* - misc cgroup pointer of the passed css.
+ */
+static inline struct misc_cg *css_misc(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct misc_cg, css) : NULL;
+}
+
+/*
+ * get_current_misc_cg() - Find and get the misc cgroup of the current task.
+ *
+ * Returned cgroup has its ref count increased by 1. Caller must call
+ * put_misc_cg() to return the reference.
+ *
+ * Return: Misc cgroup to which the current task belongs to.
+ */
+static inline struct misc_cg *get_current_misc_cg(void)
+{
+ return css_misc(task_get_css(current, misc_cgrp_id));
+}
+
+/*
+ * put_misc_cg() - Put the misc cgroup and reduce its ref count.
+ * @cg - cgroup to put.
+ */
+static inline void put_misc_cg(struct misc_cg *cg)
+{
+ if (cg)
+ css_put(&cg->css);
+}
+
+#else /* !CONFIG_CGROUP_MISC */
+
+static inline unsigned long misc_cg_res_total_usage(enum misc_res_type type)
+{
+ return 0;
+}
+
+static inline int misc_cg_set_capacity(enum misc_res_type type,
+ unsigned long capacity)
+{
+ return 0;
+}
+
+static inline int misc_cg_try_charge(enum misc_res_type type,
+ struct misc_cg *cg,
+ unsigned long amount)
+{
+ return 0;
+}
+
+static inline void misc_cg_uncharge(enum misc_res_type type,
+ struct misc_cg *cg,
+ unsigned long amount)
+{
+}
+
+static inline struct misc_cg *get_current_misc_cg(void)
+{
+ return NULL;
+}
+
+static inline void put_misc_cg(struct misc_cg *cg)
+{
+}
+
+#endif /* CONFIG_CGROUP_MISC */
+#endif /* _MISC_CGROUP_H_ */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index becde6981a95..c0fea6ca5076 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -7,9 +7,9 @@
#include <linux/device.h>
/*
- * These allocations are managed by device@lanana.org. If you use an
- * entry that is not in assigned your entry may well be moved and
- * reassigned, or set dynamic if a fixed value is not justified.
+ * These allocations are managed by device@lanana.org. If you need
+ * an entry that is not assigned here, it can be moved and
+ * reassigned or dynamically set if a fixed value is not justified.
*/
#define PSMOUSE_MINOR 1
@@ -25,20 +25,31 @@
#define TEMP_MINOR 131 /* Temperature Sensor */
#define APM_MINOR_DEV 134
#define RTC_MINOR 135
-#define EFI_RTC_MINOR 136 /* EFI Time services */
+/*#define EFI_RTC_MINOR 136 was EFI Time services */
#define VHCI_MINOR 137
#define SUN_OPENPROM_MINOR 139
#define DMAPI_MINOR 140 /* unused */
#define NVRAM_MINOR 144
+#define SBUS_FLASH_MINOR 152
#define SGI_MMTIMER 153
+#define PMU_MINOR 154
#define STORE_QUEUE_MINOR 155 /* unused */
+#define LCD_MINOR 156
+#define AC_MINOR 157
+#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */
+#define NWFLASH_MINOR 160 /* MAJOR is 10 - miscdevice */
+#define ENVCTRL_MINOR 162
#define I2O_MINOR 166
+#define UCTRL_MINOR 174
#define AGPGART_MINOR 175
+#define TOSH_MINOR_DEV 181
#define HWRNG_MINOR 183
-#define MICROCODE_MINOR 184
+/*#define MICROCODE_MINOR 184 unused */
+#define KEYPAD_MINOR 185
#define IRNET_MINOR 187
#define D7S_MINOR 193
#define VFIO_MINOR 196
+#define PXA3XX_GCU_MINOR 197
#define TUN_MINOR 200
#define CUSE_MINOR 203
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
@@ -49,6 +60,7 @@
#define MISC_MCELOG_MINOR 227
#define HPET_MINOR 228
#define FUSE_MINOR 229
+#define SNAPSHOT_MINOR 231
#define KVM_MINOR 232
#define BTRFS_MINOR 234
#define AUTOFS_MINOR 235
@@ -81,14 +93,14 @@ extern void misc_deregister(struct miscdevice *misc);
/*
* Helper macro for drivers that don't do anything special in the initcall.
- * This helps in eleminating of boilerplate code.
+ * This helps to eliminate boilerplate code.
*/
#define builtin_misc_device(__misc_device) \
builtin_driver(__misc_device, misc_register)
/*
* Helper macro for drivers that don't do anything special in module init / exit
- * call. This helps in eleminating of boilerplate code.
+ * call. This helps to eliminate boilerplate code.
*/
#define module_misc_device(__misc_device) \
module_driver(__misc_device, misc_register, misc_deregister)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 20372de0b587..6646634a0b9d 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -46,7 +46,6 @@
#define DEFAULT_UAR_PAGE_SHIFT 12
-#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 128
#define MIN_MSIX_P_PORT 5
#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
@@ -573,7 +572,6 @@ struct mlx4_caps {
int reserved_eqs;
int num_comp_vectors;
int num_mpts;
- int max_fmr_maps;
int num_mtts;
int fmr_reserved_mtts;
int reserved_mtts;
@@ -632,6 +630,7 @@ struct mlx4_caps {
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
u32 health_buffer_addrs;
+ bool map_clock_to_user;
};
struct mlx4_buf_list {
@@ -707,17 +706,6 @@ struct mlx4_mw {
int enabled;
};
-struct mlx4_fmr {
- struct mlx4_mr mr;
- struct mlx4_mpt_entry *mpt;
- __be64 *mtts;
- dma_addr_t dma_handle;
- int max_pages;
- int max_maps;
- int maps;
- u8 page_shift;
-};
-
struct mlx4_uar {
unsigned long pfn;
int index;
@@ -1412,14 +1400,6 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
- int npages, u64 iova, u32 *lkey, u32 *rkey);
-int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
- int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
-int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
-void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
- u32 *lkey, u32 *rkey);
-int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
int mlx4_test_async(struct mlx4_dev *dev);
@@ -1456,7 +1436,7 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
@@ -1522,6 +1502,8 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enable);
+
+struct mlx4_mpt_entry;
int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
struct mlx4_mpt_entry ***mpt_entry);
int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index a858bcb6220b..1834c8fad12e 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -92,26 +92,4 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
-static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
-{
- int i;
-
- for (i = ETH_ALEN; i > 0; i--) {
- addr[i - 1] = mac & 0xFF;
- mac >>= 8;
- }
-}
-
#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 8e2828d48d7f..9db93e487496 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -362,7 +362,7 @@ struct mlx4_wqe_datagram_seg {
struct mlx4_wqe_lso_seg {
__be32 mss_hdr_size;
- __be32 header[0];
+ __be32 header[];
};
enum mlx4_wqe_bind_seg_flags2 {
diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h
deleted file mode 100644
index 5613e677a5f9..000000000000
--- a/include/linux/mlx5/accel.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_H__
-#define __MLX5_ACCEL_H__
-
-#include <linux/mlx5/driver.h>
-
-enum mlx5_accel_esp_aes_gcm_keymat_iv_algo {
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ,
-};
-
-enum mlx5_accel_esp_flags {
- MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
- MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
- MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
-};
-
-enum mlx5_accel_esp_action {
- MLX5_ACCEL_ESP_ACTION_DECRYPT,
- MLX5_ACCEL_ESP_ACTION_ENCRYPT,
-};
-
-enum mlx5_accel_esp_keymats {
- MLX5_ACCEL_ESP_KEYMAT_AES_NONE,
- MLX5_ACCEL_ESP_KEYMAT_AES_GCM,
-};
-
-enum mlx5_accel_esp_replay {
- MLX5_ACCEL_ESP_REPLAY_NONE,
- MLX5_ACCEL_ESP_REPLAY_BMP,
-};
-
-struct aes_gcm_keymat {
- u64 seq_iv;
- enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo;
-
- u32 salt;
- u32 icv_len;
-
- u32 key_len;
- u32 aes_key[256 / 32];
-};
-
-struct mlx5_accel_esp_xfrm_attrs {
- enum mlx5_accel_esp_action action;
- u32 esn;
- u32 spi;
- u32 seq;
- u32 tfc_pad;
- u32 flags;
- u32 sa_handle;
- enum mlx5_accel_esp_replay replay_type;
- union {
- struct {
- u32 size;
-
- } bmp;
- } replay;
- enum mlx5_accel_esp_keymats keymat_type;
- union {
- struct aes_gcm_keymat aes_gcm;
- } keymat;
-};
-
-struct mlx5_accel_esp_xfrm {
- struct mlx5_core_dev *mdev;
- struct mlx5_accel_esp_xfrm_attrs attrs;
-};
-
-enum {
- MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0,
-};
-
-enum mlx5_accel_ipsec_cap {
- MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
- MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1,
- MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2,
- MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3,
- MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4,
- MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
- MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6,
- MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
-};
-
-#ifdef CONFIG_MLX5_FPGA_IPSEC
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
-
-struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags);
-void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
-int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs);
-
-#else
-
-static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-
-static inline struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags) { return ERR_PTR(-EOPNOTSUPP); }
-static inline void
-mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
-static inline int
-mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
-
-#endif
-#endif
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
deleted file mode 100644
index 68cd08f02c2f..000000000000
--- a/include/linux/mlx5/cmd.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_CMD_H
-#define MLX5_CMD_H
-
-#include <linux/types.h>
-
-struct manage_pages_layout {
- u64 ptr;
- u32 reserved;
- u16 num_entries;
- u16 func_id;
-};
-
-
-struct mlx5_cmd_alloc_uar_imm_out {
- u32 rsvd[3];
- u32 uarn;
-};
-
-#endif /* MLX5_CMD_H */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 40748fc1b11b..cb15308b5cb0 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -33,7 +33,6 @@
#ifndef MLX5_CORE_CQ_H
#define MLX5_CORE_CQ_H
-#include <rdma/ib_verbs.h>
#include <linux/mlx5/driver.h>
#include <linux/refcount.h>
@@ -184,11 +183,13 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
complete(&cq->free);
}
+int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *out, int outlen);
+ u32 *out);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0e62c3db45e5..1ff91cb79ded 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -276,7 +276,9 @@ enum {
MLX5_MKEY_MASK_RW = 1ull << 20,
MLX5_MKEY_MASK_A = 1ull << 21,
MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
- MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
+ MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
};
enum {
@@ -288,6 +290,7 @@ enum {
MLX5_UMR_INLINE = (1 << 7),
};
+#define MLX5_UMR_KLM_ALIGNMENT 4
#define MLX5_UMR_MTT_ALIGNMENT 0x40
#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
@@ -322,6 +325,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+ MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
@@ -344,6 +348,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
+ MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
@@ -356,6 +361,10 @@ enum mlx5_event {
MLX5_EVENT_TYPE_MAX = 0x100,
};
+enum mlx5_driver_event {
+ MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+};
+
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
@@ -364,6 +373,8 @@ enum {
enum {
MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
+ MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
+ MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
};
enum {
@@ -377,21 +388,6 @@ enum {
};
enum {
- MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
- MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
- MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
- MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
- MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
- MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
- MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
- MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
- MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
- MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
- MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
- MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
-};
-
-enum {
MLX5_ROCE_VERSION_1 = 0,
MLX5_ROCE_VERSION_2 = 2,
};
@@ -445,14 +441,26 @@ enum {
MLX5_OPCODE_UMR = 0x25,
+ MLX5_OPCODE_ACCESS_ASO = 0x2d,
};
enum {
MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
+ MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
};
enum {
MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
+ MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
+};
+
+struct mlx5_wqe_tls_static_params_seg {
+ u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
+};
+
+struct mlx5_wqe_tls_progress_params_seg {
+ __be32 tis_tir_num;
+ u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
};
enum {
@@ -475,10 +483,6 @@ enum {
};
enum {
- MLX5_CAP_OFF_CMDIF_CSUM = 46,
-};
-
-enum {
/*
* Max wqe size for rdma read is 512 bytes, so this
* limits our max_sge_rd as the wqe needs to fit:
@@ -521,19 +525,21 @@ struct mlx5_cmd_layout {
u8 status_own;
};
-enum mlx5_fatal_assert_bit_offsets {
- MLX5_RFR_OFFSET = 31,
+enum mlx5_rfr_severity_bit_offsets {
+ MLX5_RFR_BIT_OFFSET = 0x7,
};
struct health_buffer {
- __be32 assert_var[5];
- __be32 rsvd0[3];
+ __be32 assert_var[6];
+ __be32 rsvd0[2];
__be32 assert_exit_ptr;
__be32 assert_callra;
- __be32 rsvd1[2];
+ __be32 rsvd1[1];
+ __be32 time;
__be32 fw_ver;
__be32 hw_id;
- __be32 rfr;
+ u8 rfr_severity;
+ u8 rsvd2[3];
u8 irisc_index;
u8 synd;
__be16 ext_synd;
@@ -557,12 +563,17 @@ struct mlx5_init_seg {
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[880];
+ __be32 rsvd2[878];
+ __be32 cmd_exec_to;
+ __be32 cmd_q_init_to;
__be32 internal_timer_h;
__be32 internal_timer_l;
__be32 rsvd3[2];
__be32 health_counter;
- __be32 rsvd4[1019];
+ __be32 rsvd4[11];
+ __be32 real_time_h;
+ __be32 real_time_l;
+ __be32 rsvd5[1006];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
@@ -689,6 +700,30 @@ struct mlx5_eqe_temp_warning {
__be64 sensor_warning_lsb;
} __packed;
+struct mlx5_eqe_obj_change {
+ u8 rsvd0[2];
+ __be16 obj_type;
+ __be32 obj_id;
+} __packed;
+
+#define SYNC_RST_STATE_MASK 0xf
+
+enum sync_rst_state_type {
+ MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
+ MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
+ MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
+};
+
+struct mlx5_eqe_sync_fw_update {
+ u8 reserved_at_0[3];
+ u8 sync_rst_state;
+};
+
+struct mlx5_eqe_vhca_state {
+ __be16 ec_function;
+ __be16 function_id;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -707,6 +742,9 @@ union ev_data {
struct mlx5_eqe_dct dct;
struct mlx5_eqe_temp_warning temp_warning;
struct mlx5_eqe_xrq_err xrq_err;
+ struct mlx5_eqe_sync_fw_update sync_fw_update;
+ struct mlx5_eqe_vhca_state vhca_state;
+ struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {
@@ -749,13 +787,26 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 outer_l3_tunneled;
+ u8 tls_outer_l3_tunneled;
u8 rsvd0;
__be16 wqe_id;
- u8 lro_tcppsh_abort_dupack;
- u8 lro_min_ttl;
- __be16 lro_tcp_win;
- __be32 lro_ack_seq_num;
+ union {
+ struct {
+ u8 tcppsh_abort_dupack;
+ u8 min_ttl;
+ __be16 tcp_win;
+ __be32 ack_seq_num;
+ } lro;
+ struct {
+ u8 reserved0:1;
+ u8 match:1;
+ u8 flush:1;
+ u8 reserved3:5;
+ u8 header_size;
+ __be16 header_entry_index;
+ __be32 data_offset;
+ } shampo;
+ };
__be32 rss_hash_result;
u8 rss_hash_type;
u8 ml_path;
@@ -767,14 +818,22 @@ struct mlx5_cqe64 {
u8 l4_l3_hdr_type;
__be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
- __be32 imm_inval_pkey;
+ union {
+ __be32 immediate;
+ __be32 inval_rkey;
+ __be32 pkey;
+ __be32 ft_metadata;
+ };
u8 rsvd40[4];
__be32 byte_cnt;
__be32 timestamp_h;
__be32 timestamp_l;
__be32 sop_drop_qpn;
__be16 wqe_counter;
- u8 signature;
+ union {
+ u8 signature;
+ u8 validity_iteration_count;
+ };
u8 op_own;
};
@@ -783,7 +842,7 @@ struct mlx5_mini_cqe8 {
__be32 rx_hash_result;
struct {
__be16 checksum;
- __be16 rsvd;
+ __be16 stridx;
};
struct {
__be16 wqe_counter;
@@ -803,6 +862,12 @@ enum {
enum {
MLX5_CQE_FORMAT_CSUM = 0x1,
+ MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
+};
+
+enum {
+ MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
+ MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
};
#define MLX5_MINI_CQE_ARRAY_SIZE 8
@@ -819,7 +884,7 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
- return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+ return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
}
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
@@ -827,14 +892,14 @@ static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
return (cqe->l4_l3_hdr_type >> 4) & 0x7;
}
-static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+ return cqe->tls_outer_l3_tunneled & 0x1;
}
-static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
{
- return cqe->outer_l3_tunneled & 0x1;
+ return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
}
static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
@@ -852,8 +917,16 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
-#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9)
-#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6)
+static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
+{
+ return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
+}
+
+#define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
+#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
+#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
+#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
+#define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
struct mpwrq_cqe_bc {
__be16 filler_consumed_strides;
@@ -922,6 +995,13 @@ enum {
CQE_L4_OK = 1 << 2,
};
+enum {
+ CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0,
+ CQE_TLS_OFFLOAD_DECRYPTED = 0x1,
+ CQE_TLS_OFFLOAD_RESYNC = 0x2,
+ CQE_TLS_OFFLOAD_ERROR = 0x3,
+};
+
struct mlx5_sig_err_cqe {
u8 rsvd0[16];
__be32 expected_trans_sig;
@@ -964,13 +1044,12 @@ enum {
MLX5_MKEY_REMOTE_INVAL = 1 << 24,
MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
MLX5_MKEY_BSF_EN = 1 << 30,
- MLX5_MKEY_LEN64 = 1 << 31,
};
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
- * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+ * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
*/
u8 status;
u8 pcie_control;
@@ -1030,6 +1109,8 @@ enum {
MLX5_MATCH_INNER_HEADERS = 1 << 2,
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
+ MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
+ MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
};
enum {
@@ -1076,6 +1157,8 @@ enum mlx5_flex_parser_protos {
MLX5_FLEX_PROTO_GENEVE = 1 << 3,
MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
+ MLX5_FLEX_PROTO_ICMP = 1 << 8,
+ MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
};
/* MLX5 DEV CAPs */
@@ -1086,6 +1169,9 @@ enum mlx5_cap_mode {
HCA_CAP_OPMOD_GET_CUR = 1,
};
+/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
+ * capability memory.
+ */
enum mlx5_cap_type {
MLX5_CAP_GENERAL = 0,
MLX5_CAP_ETHERNET_OFFLOADS,
@@ -1107,6 +1193,12 @@ enum mlx5_cap_type {
MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
+ MLX5_CAP_IPSEC,
+ MLX5_CAP_DEV_SHAMPO = 0x1d,
+ MLX5_CAP_MACSEC = 0x1f,
+ MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
+ MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1140,46 +1232,55 @@ enum mlx5_qcam_feature_groups {
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_64(mdev, cap) \
- MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
+
+#define MLX5_CAP_GEN_2(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
+
+#define MLX5_CAP_GEN_2_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
+
+#define MLX5_CAP_GEN_2_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
#define MLX5_CAP_ETH_MAX(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap)
#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
#define MLX5_CAP_ROCE(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
- MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
@@ -1211,13 +1312,19 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
@@ -1239,31 +1346,53 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET64(flow_table_eswitch_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
+
+#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
+
+#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->max, cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
#define MLX5_CAP_ODP(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
#define MLX5_CAP_ODP_MAX(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
MLX5_GET(vector_calc_cap, \
- mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
+ mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap)
#define MLX5_CAP_QOS(mdev, cap)\
- MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+ MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
#define MLX5_CAP_DEBUG(mdev, cap)\
- MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
+ MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
@@ -1299,24 +1428,33 @@ enum mlx5_qcam_feature_groups {
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
#define MLX5_CAP_DEV_MEM(mdev, cap)\
- MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+ MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
#define MLX5_CAP64_DEV_MEM(mdev, cap)\
- MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+ MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
#define MLX5_CAP_TLS(mdev, cap) \
- MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap)
+ MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
- MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
+ MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
- MLX5_GET(device_virtio_emulation_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+ MLX5_GET(virtio_emulation_cap, \
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
- MLX5_GET64(device_virtio_emulation_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+ MLX5_GET64(virtio_emulation_cap, \
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
+
+#define MLX5_CAP_IPSEC(mdev, cap)\
+ MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+
+#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
+ MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
+
+#define MLX5_CAP_MACSEC(mdev, cap)\
+ MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
@@ -1362,6 +1500,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 277a51d3ec40..af2ceb4160bc 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -48,6 +48,7 @@
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
+#include <linux/auxiliary_bus.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -56,15 +57,15 @@
#include <linux/ptp_clock_kernel.h>
#include <net/devlink.h>
+#define MLX5_ADEV_NAME "mlx5_core"
+
+#define MLX5_IRQ_EQ_CTRL (U8_MAX)
+
enum {
MLX5_BOARD_ID_LEN = 64,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -83,7 +84,7 @@ enum mlx5_sqp_t {
};
enum {
- MLX5_MAX_PORTS = 2,
+ MLX5_MAX_PORTS = 4,
};
enum {
@@ -124,13 +125,16 @@ enum {
MLX5_REG_PELC = 0x500e,
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
+ MLX5_REG_PDDR = 0x5031,
MLX5_REG_PMLP = 0x5002,
MLX5_REG_PPLM = 0x5023,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MRTC = 0x902d,
MLX5_REG_MTRC_CAP = 0x9040,
MLX5_REG_MTRC_CONF = 0x9041,
MLX5_REG_MTRC_STDB = 0x9042,
@@ -139,6 +143,7 @@ enum {
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MTUTC = 0x9055,
MLX5_REG_MPEGC = 0x9056,
MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
@@ -146,7 +151,9 @@ enum {
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
MLX5_REG_MIRC = 0x9162,
+ MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_DTOR = 0xC00E,
};
enum mlx5_qpts_trust_state {
@@ -188,7 +195,8 @@ enum port_state_policy {
enum mlx5_coredev_type {
MLX5_COREDEV_PF,
- MLX5_COREDEV_VF
+ MLX5_COREDEV_VF,
+ MLX5_COREDEV_SF,
};
struct mlx5_field_desc {
@@ -200,7 +208,7 @@ struct mlx5_rsc_debug {
void *object;
enum dbg_rsc_type type;
struct dentry *root;
- struct mlx5_field_desc fields[0];
+ struct mlx5_field_desc fields[];
};
enum mlx5_dev_event {
@@ -213,21 +221,10 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-struct mlx5_bfreg_info {
- u32 *sys_pages;
- int num_low_latency_bfregs;
- unsigned int *count;
-
- /*
- * protect bfreg allocation data structs
- */
- struct mutex lock;
- u32 ver;
- bool lib_uar_4k;
- u32 num_sys_pages;
- u32 num_static_sys_pages;
- u32 total_num_bfregs;
- u32 num_dyn_bfregs;
+enum mlx5_cmdif_state {
+ MLX5_CMDIF_STATE_UNINITIALIZED,
+ MLX5_CMDIF_STATE_UP,
+ MLX5_CMDIF_STATE_DOWN,
};
struct mlx5_cmd_first {
@@ -267,6 +264,16 @@ enum {
struct mlx5_cmd_stats {
u64 sum;
u64 n;
+ /* number of times command failed */
+ u64 failed;
+ /* number of times command failed on bad status returned by FW */
+ u64 failed_mbox_status;
+ /* last command failed returned errno */
+ u32 last_failed_errno;
+ /* last bad status returned by FW */
+ u8 last_failed_mbox_status;
+ /* last command failed syndrome returned by FW */
+ u32 last_failed_syndrome;
struct dentry *root;
/* protect command average calculations */
spinlock_t lock;
@@ -275,6 +282,7 @@ struct mlx5_cmd_stats {
struct mlx5_cmd {
struct mlx5_nb nb;
+ enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
@@ -301,19 +309,13 @@ struct mlx5_cmd {
struct semaphore sem;
struct semaphore pages_sem;
int mode;
+ u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct dma_pool *pool;
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
- struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
-};
-
-struct mlx5_port_caps {
- int gid_table_len;
- int pkey_table_len;
- u8 ext_port_cap;
- bool has_smi;
+ struct mlx5_cmd_stats *stats;
};
struct mlx5_cmd_mailbox {
@@ -365,20 +367,6 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
-enum {
- MLX5_MKEY_MR = 1,
- MLX5_MKEY_MW,
- MLX5_MKEY_INDIRECT_DEVX,
-};
-
-struct mlx5_core_mkey {
- u64 iova;
- u64 size;
- u32 key;
- u32 pd;
- u32 type;
-};
-
#define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type {
@@ -445,9 +433,9 @@ struct mlx5_core_health {
unsigned long flags;
struct work_struct fatal_report_work;
struct work_struct report_work;
- struct delayed_work recover_work;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
+ struct delayed_work update_fw_log_ts_work;
};
struct mlx5_qp_table {
@@ -459,6 +447,11 @@ struct mlx5_qp_table {
struct radix_tree_root tree;
};
+enum {
+ MLX5_PF_NOTIFY_DISABLE_VF,
+ MLX5_PF_NOTIFY_ENABLE_VF,
+};
+
struct mlx5_vf_context {
int enabled;
u64 port_guid;
@@ -469,6 +462,7 @@ struct mlx5_vf_context {
u8 port_guid_valid:1;
u8 node_guid_valid:1;
enum port_state_policy policy;
+ struct blocking_notifier_head notifier;
};
struct mlx5_core_sriov {
@@ -500,6 +494,10 @@ struct mlx5_fc_stats {
unsigned long next_query;
unsigned long sampling_interval; /* jiffies */
u32 *bulk_query_out;
+ int bulk_query_len;
+ size_t num_counters;
+ bool bulk_query_alloc_failed;
+ unsigned long next_bulk_query_alloc;
struct mlx5_fc_pool fc_pool;
};
@@ -508,8 +506,13 @@ struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
struct mlx5_devcom;
+struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
+struct mlx5_vhca_state_notifier;
+struct mlx5_sf_dev_table;
+struct mlx5_sf_hw_table;
+struct mlx5_sf_table;
struct mlx5_rate_limit {
u32 rate;
@@ -518,9 +521,11 @@ struct mlx5_rate_limit {
};
struct mlx5_rl_entry {
- struct mlx5_rate_limit rl;
- u16 index;
- u16 refcount;
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
+ u64 refcount;
+ u16 index;
+ u16 uid;
+ u8 dedicated : 1;
};
struct mlx5_rl_table {
@@ -530,6 +535,7 @@ struct mlx5_rl_table {
u32 max_rate;
u32 min_rate;
struct mlx5_rl_entry *rl_entry;
+ u64 refcount;
};
struct mlx5_core_roce {
@@ -538,6 +544,36 @@ struct mlx5_core_roce {
struct mlx5_flow_handle *allow_rule;
};
+enum {
+ MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
+ MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+ /* Set during device detach to block any further devices
+ * creation/deletion on drivers rescan. Unset during device attach.
+ */
+ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
+ /* Distinguish between mlx5e_probe/remove called by module init/cleanup
+ * and called by other flows which can already hold devlink lock
+ */
+ MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW = 1 << 3,
+};
+
+struct mlx5_adev {
+ struct auxiliary_device adev;
+ struct mlx5_core_dev *mdev;
+ int idx;
+};
+
+struct mlx5_debugfs_entries {
+ struct dentry *dbg_root;
+ struct dentry *qp_debugfs;
+ struct dentry *eq_debugfs;
+ struct dentry *cq_debugfs;
+ struct dentry *cmdif_debugfs;
+ struct dentry *pages_debugfs;
+ struct dentry *lag_debugfs;
+};
+
+struct mlx5_ft_pool;
struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */
struct mlx5_irq_table *irq_table;
@@ -546,40 +582,35 @@ struct mlx5_priv {
/* pages stuff */
struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
- struct rb_root page_root;
- int fw_pages;
+ struct xarray page_root_xa;
+ u32 fw_pages;
atomic_t reg_pages;
struct list_head free_list;
- int vfs_pages;
- int peer_pf_pages;
+ u32 vfs_pages;
+ u32 host_pf_pages;
+ u32 fw_pages_alloc_failed;
+ u32 give_pages_dropped;
+ u32 reclaim_pages_discard;
struct mlx5_core_health health;
+ struct list_head traps;
- /* start: qp staff */
- struct mlx5_qp_table qp_table;
- struct dentry *qp_debugfs;
- struct dentry *eq_debugfs;
- struct dentry *cq_debugfs;
- struct dentry *cmdif_debugfs;
- /* end: qp staff */
+ struct mlx5_debugfs_entries dbg;
/* start: alloc staff */
- /* protect buffer alocation according to numa node */
+ /* protect buffer allocation according to numa node */
struct mutex alloc_mutex;
int numa_node;
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
- struct dentry *dbg_root;
- /* protect mkey key part */
- spinlock_t mkey_lock;
- u8 mkey_key;
-
- struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
+ struct mlx5_adev **adev;
+ int adev_idx;
+ int sw_vhca_id;
struct mlx5_events *events;
struct mlx5_flow_steering *steering;
@@ -587,23 +618,35 @@ struct mlx5_priv {
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
+ u32 flags;
struct mlx5_devcom *devcom;
+ struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
+ struct mlx5_ft_pool *ft_pool;
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
+#ifdef CONFIG_MLX5_SF
+ struct mlx5_vhca_state_notifier *vhca_state_notifier;
+ struct mlx5_sf_dev_table *sf_dev_table;
+ struct mlx5_core_dev *parent_mdev;
+#endif
+#ifdef CONFIG_MLX5_SF_MANAGER
+ struct mlx5_sf_hw_table *sf_hw_table;
+ struct mlx5_sf_table *sf_table;
+#endif
};
enum mlx5_device_state {
- MLX5_DEVICE_STATE_UNINITIALIZED,
- MLX5_DEVICE_STATE_UP,
+ MLX5_DEVICE_STATE_UP = 1,
MLX5_DEVICE_STATE_INTERNAL_ERROR,
};
enum mlx5_interface_state {
MLX5_INTERFACE_STATE_UP = BIT(0),
+ MLX5_BREAK_FW_WAIT = BIT(1),
};
enum mlx5_pci_status {
@@ -625,15 +668,20 @@ struct mlx5_td {
};
struct mlx5e_resources {
- u32 pdn;
- struct mlx5_td td;
- struct mlx5_core_mkey mkey;
- struct mlx5_sq_bfreg bfreg;
+ struct mlx5e_hw_objs {
+ u32 pdn;
+ struct mlx5_td td;
+ u32 mkey;
+ struct mlx5_sq_bfreg bfreg;
+ } hw_objs;
+ struct devlink_port dl_port;
+ struct net_device *uplink_netdev;
};
enum mlx5_sw_icm_type {
MLX5_SW_ICM_TYPE_STEERING,
MLX5_SW_ICM_TYPE_HEADER_MODIFY,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN,
};
#define MLX5_MAX_RESERVED_GIDS 8
@@ -650,21 +698,26 @@ struct mlx5_pps {
struct work_struct out_work;
u64 start[MAX_PIN_NUM];
u8 enabled;
+ u64 min_npps_period;
+ u64 min_out_pulse_duration_ns;
};
-struct mlx5_clock {
- struct mlx5_core_dev *mdev;
- struct mlx5_nb pps_nb;
- seqlock_t lock;
+struct mlx5_timer {
struct cyclecounter cycles;
struct timecounter tc;
- struct hwtstamp_config hwtstamp_config;
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
+};
+
+struct mlx5_clock {
+ struct mlx5_nb pps_nb;
+ seqlock_t lock;
+ struct hwtstamp_config hwtstamp_config;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
};
struct mlx5_dm;
@@ -676,6 +729,32 @@ struct mlx5_hv_vhca;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+enum {
+ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+};
+
+enum {
+ MKEY_CACHE_LAST_STD_ENTRY = 20,
+ MLX5_IMR_MTT_CACHE_ENTRY,
+ MLX5_IMR_KSM_CACHE_ENTRY,
+ MAX_MKEY_CACHE_ENTRIES
+};
+
+struct mlx5_profile {
+ u64 mask;
+ u8 log_max_qp;
+ struct {
+ int size;
+ int limit;
+ } mr_cache[MAX_MKEY_CACHE_ENTRIES];
+};
+
+struct mlx5_hca_cap {
+ u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
+};
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -686,16 +765,15 @@ struct mlx5_core_dev {
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
- struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
struct {
- u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
} caps;
+ struct mlx5_timeouts *timeouts;
u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
@@ -703,10 +781,10 @@ struct mlx5_core_dev {
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
+ struct lock_class_key lock_key;
unsigned long intf_state;
struct mlx5_priv priv;
- struct mlx5_profile *profile;
- atomic_t num_qps;
+ struct mlx5_profile profile;
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
@@ -722,6 +800,7 @@ struct mlx5_core_dev {
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct mlx5_fw_tracer *tracer;
+ struct mlx5_rsc_dump *rsc_dump;
u32 vsc_addr;
struct mlx5_hv_vhca *hv_vhca;
};
@@ -761,6 +840,7 @@ struct mlx5_cmd_work_ent {
struct delayed_work cb_timeout_work;
void *context;
int idx;
+ struct completion handling;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
@@ -773,11 +853,8 @@ struct mlx5_cmd_work_ent {
u64 ts2;
u16 op;
bool polling;
-};
-
-struct mlx5_pas {
- u64 pa;
- u8 log_sz;
+ /* Track the max comp handlers */
+ refcount_t refcnt;
};
enum phy_port_state {
@@ -811,20 +888,10 @@ struct mlx5_hca_vport_context {
bool grh_required;
};
-static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
-{
- return buf->frags->buf + offset;
-}
-
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
-static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
-{
- return pci_get_drvdata(pdev);
-}
-
extern struct dentry *mlx5_debugfs_root;
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
@@ -842,14 +909,14 @@ static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
}
-static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
+static inline u32 mlx5_base_mkey(const u32 key)
{
- return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+ return key & 0xffffff00u;
}
-static inline u32 mlx5_base_mkey(const u32 key)
+static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
- return key & 0xffffff00u;
+ return ((u32)1 << log_sz) << log_stride;
}
static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
@@ -892,15 +959,18 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
}
-int mlx5_cmd_init(struct mlx5_core_dev *dev);
-void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
+enum {
+ CMD_ALLOWED_OPCODE_ALL,
+};
+
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
+void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
struct mlx5_async_ctx {
struct mlx5_core_dev *dev;
atomic_t num_inflight;
- struct wait_queue_head wait;
+ struct completion inflight_done;
};
struct mlx5_async_work;
@@ -910,6 +980,8 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
struct mlx5_async_work {
struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback;
+ u16 opcode; /* cmd opcode */
+ void *out; /* pointer to the cmd output buffer */
};
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
@@ -918,26 +990,36 @@ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work);
-
+void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
+int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
+int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
+
+#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \
+ ({ \
+ mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \
+ MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \
+ })
+
+#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \
+ ({ \
+ u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \
+ mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \
+ })
+
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
+bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
-void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
+void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
- int size, struct mlx5_frag_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
@@ -945,25 +1027,19 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- struct mlx5_async_ctx *async_ctx, u32 *in,
- int inlen, u32 *out, int outlen,
- mlx5_async_cbk_t callback,
- struct mlx5_async_work *context);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages, bool ec_function);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -971,22 +1047,30 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
+void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
+ void *data_out, int size_out, u16 reg_id, int arg,
+ int write, bool verbose);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
+
+static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
+
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
@@ -998,8 +1082,6 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
- u8 port_num, void *out, size_t sz);
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
@@ -1007,6 +1089,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
+ bool dedicated_entry, u16 *index);
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
@@ -1021,11 +1106,6 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
-static inline int fw_initializing(struct mlx5_core_dev *dev)
-{
- return ioread32be(&dev->iseg->initializing) >> 31;
-}
-
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
return mkey >> 8;
@@ -1041,59 +1121,62 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
return mkey & 0xff;
}
-enum {
- MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
- MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
-};
-
-enum {
- MR_CACHE_LAST_STD_ENTRY = 20,
- MLX5_IMR_MTT_CACHE_ENTRY,
- MLX5_IMR_KSM_CACHE_ENTRY,
- MAX_MR_CACHE_ENTRIES
-};
-
-enum {
- MLX5_INTERFACE_PROTOCOL_IB = 0,
- MLX5_INTERFACE_PROTOCOL_ETH = 1,
-};
-
-struct mlx5_interface {
- void * (*add)(struct mlx5_core_dev *dev);
- void (*remove)(struct mlx5_core_dev *dev, void *context);
- int (*attach)(struct mlx5_core_dev *dev, void *context);
- void (*detach)(struct mlx5_core_dev *dev, void *context);
- int protocol;
- struct list_head list;
-};
-
-int mlx5_register_interface(struct mlx5_interface *intf);
-void mlx5_unregister_interface(struct mlx5_interface *intf);
+/* Async-atomic event notifier used by mlx5 core to forward FW
+ * evetns received from event queue to mlx5 consumers.
+ * Optimise event queue dipatching.
+ */
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+
+/* Async-atomic event notifier used for forwarding
+ * evetns from the event queue into the to mlx5 events dispatcher,
+ * eswitch, clock and others.
+ */
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+/* Blocking event notifier used to forward SW events, used for slow path */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data);
+
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
-bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ struct net_device *slave);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
int num_counters,
size_t *offsets);
+struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
+u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
- u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id);
+ u64 length, u32 log_alignment, u16 uid,
+ phys_addr_t *addr, u32 *obj_id);
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
+struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
+void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
+
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
#ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
@@ -1104,15 +1187,6 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
struct ib_device *device,
struct rdma_netdev_alloc_params *params);
-struct mlx5_profile {
- u64 mask;
- u8 log_max_qp;
- struct {
- int size;
- int limit;
- } mr_cache[MAX_MR_CACHE_ENTRIES];
-};
-
enum {
MLX5_PCI_DEV_IS_VF = 1 << 0,
};
@@ -1127,7 +1201,7 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF;
}
-static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
+static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
}
@@ -1188,19 +1262,35 @@ static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
return MLX5_CAP_GEN(dev, native_port_num);
}
+static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
+{
+ int idx = MLX5_CAP_GEN(dev, native_port_num);
+
+ if (idx >= 1 && idx <= MLX5_MAX_PORTS)
+ return idx - 1;
+ else
+ return PCI_FUNC(dev->pdev->devfn);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
-static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
+
+static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- union devlink_param_value val;
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ return MLX5_CAP_GEN(dev, roce);
- devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
- return val.vbool;
+ /* If RoCE cap is read-only in FW, get RoCE state from devlink
+ * in order to support RoCE enable/disable feature
+ */
+ return mlx5_is_roce_on(dev);
}
+enum {
+ MLX5_OCTWORD = 16,
+};
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
index e49d8c0d4f26..3705a382276b 100644
--- a/include/linux/mlx5/eq.h
+++ b/include/linux/mlx5/eq.h
@@ -4,18 +4,18 @@
#ifndef MLX5_CORE_EQ_H
#define MLX5_CORE_EQ_H
-#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_NUM_CMD_EQE (32)
#define MLX5_NUM_ASYNC_EQE (0x1000)
#define MLX5_NUM_SPARE_EQE (0x80)
struct mlx5_eq;
+struct mlx5_irq;
struct mlx5_core_dev;
struct mlx5_eq_param {
- u8 irq_index;
int nent;
u64 mask[4];
+ struct mlx5_irq *irq;
};
struct mlx5_eq *
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 98e667b176ef..e2701ed0200e 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -12,7 +12,6 @@
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
enum {
- MLX5_ESWITCH_NONE,
MLX5_ESWITCH_LEGACY,
MLX5_ESWITCH_OFFLOADS
};
@@ -29,11 +28,20 @@ enum {
REP_LOADED,
};
+enum mlx5_switchdev_event {
+ MLX5_SWITCHDEV_EVENT_PAIR,
+ MLX5_SWITCHDEV_EVENT_UNPAIR,
+};
+
struct mlx5_eswitch_rep;
struct mlx5_eswitch_rep_ops {
int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
void (*unload)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
+ int (*event)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ enum mlx5_switchdev_event event,
+ void *data);
};
struct mlx5_eswitch_rep_data {
@@ -48,6 +56,7 @@ struct mlx5_eswitch_rep {
/* Only IB rep is using vport_index */
u16 vport_index;
u32 vlan_refcount;
+ struct mlx5_eswitch *esw;
};
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
@@ -61,24 +70,89 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
u16 vport_num);
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
struct mlx5_flow_handle *
-mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
- u16 vport_num, u32 sqn);
-
-u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ struct mlx5_eswitch *from_esw,
+ struct mlx5_eswitch_rep *rep, u32 sqn);
#ifdef CONFIG_MLX5_ESWITCH
enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
+bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
-u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
+
+/* Reg C0 usage:
+ * Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_REG_C0_OBJ(16) >
+ *
+ * Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of
+ * unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left
+ * for user data objects managed by a common mapping context.
+ * PFNUM + VPORT comprise the SOURCE_PORT matching.
+ */
+#define ESW_VPORT_BITS 12
+#define ESW_PFNUM_BITS 4
+#define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS)
+#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_REG_C0_USER_DATA_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_REG_C0_USER_DATA_METADATA_MASK GENMASK(ESW_REG_C0_USER_DATA_METADATA_BITS - 1, 0)
+
+static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS);
+}
+
+u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num);
-u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
+u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ u16 vport_num);
+
+/* Reg C1 usage:
+ * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) >
+ *
+ * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1
+ * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options,
+ * and the lowest 8 bits are used for zone id.
+ *
+ * Zone id is used to restore CT flow when packet misses on chain.
+ *
+ * Tunnel id and options are used together to restore the tunnel info metadata
+ * on miss and to support inner header rewrite by means of implicit chain 0
+ * flows.
+ */
+#define ESW_RESERVED_BITS 1
+#define ESW_ZONE_ID_BITS 8
+#define ESW_TUN_OPTS_BITS 11
+#define ESW_TUN_ID_BITS 12
+#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
+#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
+#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
+#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
+#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
+#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
+#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
+/* 0x7FF is a reserved mapping */
+#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
+/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
+#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
+ ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
+ GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
+ ESW_TUN_OPTS_OFFSET + 1)
+
+u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
+u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
-static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
+static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
- return MLX5_ESWITCH_NONE;
+ return MLX5_ESWITCH_LEGACY;
}
static inline enum devlink_eswitch_encap_mode
@@ -88,17 +162,49 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
}
static inline bool
+mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
+{
+ return false;
+};
+
+static inline bool
mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
{
return false;
};
static inline u32
-mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
- int vport_num)
+mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num)
{
return 0;
};
+
+static inline u32
+mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return 0;
+}
+
+static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
+{
+ return NULL;
+}
+
#endif /* CONFIG_MLX5_ESWITCH */
+static inline bool is_mdev_legacy_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_LEGACY;
+}
+
+static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
+}
+
#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 4cae16016b2b..c7a91981cd5a 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -38,10 +38,25 @@
#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_NONE,
+ MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ MLX5_FLOW_DESTINATION_TYPE_TIR,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK,
+ MLX5_FLOW_DESTINATION_TYPE_PORT,
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
+};
+
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19,
};
enum {
@@ -49,6 +64,7 @@ enum {
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
+ MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
};
#define LEFTOVERS_RULE_NUM 2
@@ -63,31 +79,43 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR,
+ MLX5_FLOW_NAMESPACE_FDB_BYPASS,
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS,
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC,
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
+ MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
};
enum {
FDB_BYPASS_PATH,
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
+ FDB_TC_MISS,
+ FDB_BR_OFFLOAD,
FDB_SLOW_PATH,
+ FDB_PER_VPORT,
};
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
+struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@@ -128,6 +156,7 @@ struct mlx5_flow_destination {
struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
+ u32 sampler_id;
};
};
@@ -151,6 +180,7 @@ struct mlx5_flow_table_attr {
int max_fte;
u32 level;
u32 flags;
+ u16 uid;
struct mlx5_flow_table *next_ft;
struct {
@@ -169,9 +199,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- u32 level, u16 vport);
+ struct mlx5_flow_table_attr *ft_attr, u16 vport);
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
struct mlx5_flow_namespace *ns,
int prio, u32 level);
@@ -187,6 +215,19 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_exe_aso {
+ u32 object_id;
+ u8 type;
+ u8 return_reg_id;
+ union {
+ u32 ctrl_data;
+ struct {
+ u8 meter_idx;
+ u8 init_color;
+ } flow_meter;
+ };
+};
+
struct mlx5_fs_vlan {
u16 ethtype;
u16 vid;
@@ -204,10 +245,15 @@ struct mlx5_flow_act {
u32 action;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
- uintptr_t esp_id;
+ struct mlx5_flow_act_crypto_params {
+ u8 type;
+ u32 obj_id;
+ } crypto;
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
+ struct mlx5_flow_group *fg;
+ struct mlx5_exe_aso exe_aso;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
@@ -230,6 +276,10 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *old_dest);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+
+/* As mlx5_fc_create() but doesn't queue stats refresh thread. */
+struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging);
+
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
@@ -246,13 +296,27 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask);
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
+
+struct mlx5_pkt_reformat_params {
+ int type;
+ u8 param_0;
+ u8 param_1;
+ size_t size;
+ void *data;
+};
struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type ns_type);
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
#endif
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
index 9db21cd0e92c..bc5125bc0561 100644
--- a/include/linux/mlx5/fs_helpers.h
+++ b/include/linux/mlx5/fs_helpers.h
@@ -38,46 +38,6 @@
#define MLX5_FS_IPV4_VERSION 4
#define MLX5_FS_IPV6_VERSION 6
-static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c)
-{
- void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
- misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
-}
-
-static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c,
- const u32 *match_v, u8 match)
-{
- const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
- outer_headers);
-
- return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff &&
- MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match;
-}
-
-static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c,
- const u32 *match_v)
-{
- return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP);
-}
-
-static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c,
- const u32 *match_v)
-{
- return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP);
-}
-
-static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c)
-{
- void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
- misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni);
-}
-
static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
const u32 *match_c,
const u32 *match_v, int version)
@@ -131,12 +91,4 @@ mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
MLX5_FS_IPV6_VERSION);
}
-static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c)
-{
- void *misc_params_c =
- MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
-}
-
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index bfdf41537cf1..5a4e914e2a6f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -64,16 +64,11 @@ enum {
};
enum {
- MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
- MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
- MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
- MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
-};
-
-enum {
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
+ MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4,
+ MLX5_SET_HCA_CAP_OP_MODE_PORT_SELECTION = 0x25,
};
enum {
@@ -88,10 +83,15 @@ enum {
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
};
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
+ MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
+ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@@ -124,6 +124,11 @@ enum {
MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111,
MLX5_CMD_OP_ALLOC_SF = 0x113,
MLX5_CMD_OP_DEALLOC_SF = 0x114,
+ MLX5_CMD_OP_SUSPEND_VHCA = 0x115,
+ MLX5_CMD_OP_RESUME_VHCA = 0x116,
+ MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE = 0x117,
+ MLX5_CMD_OP_SAVE_VHCA_STATE = 0x118,
+ MLX5_CMD_OP_LOAD_VHCA_STATE = 0x119,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -131,6 +136,7 @@ enum {
MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
MLX5_CMD_OP_ALLOC_MEMIC = 0x205,
MLX5_CMD_OP_DEALLOC_MEMIC = 0x206,
+ MLX5_CMD_OP_MODIFY_MEMIC = 0x207,
MLX5_CMD_OP_CREATE_EQ = 0x301,
MLX5_CMD_OP_DESTROY_EQ = 0x302,
MLX5_CMD_OP_QUERY_EQ = 0x303,
@@ -297,6 +303,8 @@ enum {
MLX5_CMD_OP_CREATE_UMEM = 0xa08,
MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
+ MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
+ MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_MAX
};
@@ -337,7 +345,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_geneve_oam[0x1];
u8 outer_geneve_protocol_type[0x1];
u8 outer_geneve_opt_len[0x1];
- u8 reserved_at_1e[0x1];
+ u8 source_vhca_port[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@@ -366,7 +374,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 reserved_at_37[0x9];
u8 geneve_tlv_option_0_data[0x1];
- u8 reserved_at_41[0x4];
+ u8 geneve_tlv_option_0_exist[0x1];
+ u8 reserved_at_42[0x3];
u8 outer_first_mpls_over_udp[0x4];
u8 outer_first_mpls_over_gre[0x4];
u8 inner_first_mpls[0x4];
@@ -388,6 +397,14 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 metadata_reg_c_0[0x1];
};
+struct mlx5_ifc_flow_table_fields_supported_2_bits {
+ u8 reserved_at_0[0xe];
+ u8 bth_opcode[0x1];
+ u8 reserved_at_f[0x11];
+
+ u8 reserved_at_20[0x60];
+};
+
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
u8 reserved_at_1[0x1];
@@ -414,23 +431,40 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_16[0x1];
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
- u8 reserved_at_19[0x7];
- u8 reserved_at_20[0x2];
+ u8 reformat_and_fwd_to_table[0x1];
+ u8 reserved_at_1a[0x2];
+ u8 ipsec_encrypt[0x1];
+ u8 ipsec_decrypt[0x1];
+ u8 sw_owner_v2[0x1];
+ u8 reserved_at_1f[0x1];
+
+ u8 termination_table_raw_traffic[0x1];
+ u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
- u8 reserved_at_40[0x20];
+ u8 reserved_at_40[0x6];
+ u8 execute_aso[0x1];
+ u8 reserved_at_47[0x19];
- u8 reserved_at_60[0x18];
+ u8 reserved_at_60[0x2];
+ u8 reformat_insert[0x1];
+ u8 reformat_remove[0x1];
+ u8 macsec_encrypt[0x1];
+ u8 macsec_decrypt[0x1];
+ u8 reserved_at_66[0x2];
+ u8 reformat_add_macsec[0x1];
+ u8 reformat_remove_macsec[0x1];
+ u8 reserved_at_6a[0xe];
u8 log_max_ft_num[0x8];
- u8 reserved_at_80[0x18];
+ u8 reserved_at_80[0x10];
+ u8 log_max_flow_counter[0x8];
u8 log_max_destination[0x8];
- u8 log_max_flow_counter[0x8];
- u8 reserved_at_a8[0x10];
+ u8 reserved_at_a0[0x18];
u8 log_max_flow[0x8];
u8 reserved_at_c0[0x40];
@@ -450,6 +484,22 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
u8 reserved_at_6[0x1a];
};
+struct mlx5_ifc_ipv4_layout_bits {
+ u8 reserved_at_0[0x60];
+
+ u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+ u8 ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+ struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+ u8 reserved_at_0[0x80];
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -475,7 +525,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_sport[0x10];
u8 tcp_dport[0x10];
- u8 reserved_at_c0[0x18];
+ u8 reserved_at_c0[0x10];
+ u8 ipv4_ihl[0x4];
+ u8 reserved_at_c4[0x4];
+
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10];
@@ -524,10 +577,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
- u8 reserved_at_b8[0x8];
+ u8 bth_opcode[0x8];
u8 geneve_vni[0x18];
- u8 reserved_at_d8[0x7];
+ u8 reserved_at_d8[0x6];
+ u8 geneve_tlv_option_0_exist[0x1];
u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc];
@@ -581,9 +635,11 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
- u8 metadata_reg_b[0x20];
+ u8 reserved_at_1a0[0x8];
- u8 reserved_at_1c0[0x40];
+ u8 macsec_syndrome[0x8];
+
+ u8 reserved_at_1b0[0x50];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
@@ -613,7 +669,59 @@ struct mlx5_ifc_fte_match_set_misc3_bits {
u8 geneve_tlv_option_0_data[0x20];
- u8 reserved_at_140[0xc0];
+ u8 gtpu_teid[0x20];
+
+ u8 gtpu_msg_type[0x8];
+ u8 gtpu_msg_flags[0x8];
+ u8 reserved_at_170[0x10];
+
+ u8 gtpu_dw_2[0x20];
+
+ u8 gtpu_first_ext_dw_0[0x20];
+
+ u8 gtpu_dw_0[0x20];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc4_bits {
+ u8 prog_sample_field_value_0[0x20];
+
+ u8 prog_sample_field_id_0[0x20];
+
+ u8 prog_sample_field_value_1[0x20];
+
+ u8 prog_sample_field_id_1[0x20];
+
+ u8 prog_sample_field_value_2[0x20];
+
+ u8 prog_sample_field_id_2[0x20];
+
+ u8 prog_sample_field_value_3[0x20];
+
+ u8 prog_sample_field_id_3[0x20];
+
+ u8 reserved_at_100[0x100];
+};
+
+struct mlx5_ifc_fte_match_set_misc5_bits {
+ u8 macsec_tag_0[0x20];
+
+ u8 macsec_tag_1[0x20];
+
+ u8 macsec_tag_2[0x20];
+
+ u8 macsec_tag_3[0x20];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 tunnel_header_1[0x20];
+
+ u8 tunnel_header_2[0x20];
+
+ u8 tunnel_header_3[0x20];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -707,11 +815,19 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
- u8 reserved_at_a00[0x200];
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x1200];
+ u8 reserved_at_e00[0x700];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
+
+ u8 reserved_at_1580[0x280];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma;
+
+ u8 reserved_at_1880[0x780];
u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
@@ -722,6 +838,20 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_at_20c0[0x5f40];
};
+struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0x1];
+ u8 port_select_flow_table_bypass[0x1];
+ u8 reserved_at_13[0xd];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ u8 reserved_at_400[0x7c00];
+};
+
enum {
MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
@@ -737,11 +867,11 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 fdb_to_vport_reg_c_id[0x8];
u8 reserved_at_8[0xd];
u8 fdb_modify_header_fwd_to_table[0x1];
- u8 reserved_at_16[0x1];
+ u8 fdb_ipv4_ttl_modify[0x1];
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
- u8 reserved_at_1b[0x1];
+ u8 egress_acl_forward_to_vport[0x1];
u8 fdb_multi_path_to_table[0x1];
u8 reserved_at_1d[0x3];
@@ -777,9 +907,11 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x3];
+ u8 reserved_at_5[0x2];
+ u8 esw_shared_ingress_acl[0x1];
u8 esw_uplink_ingress_acl[0x1];
- u8 reserved_at_9[0x10];
+ u8 root_ft_on_other_esw[0x1];
+ u8 reserved_at_a[0xf];
u8 esw_functions_changed[0x1];
u8 reserved_at_1a[0x1];
u8 ecpf_vport_exists[0x1];
@@ -813,9 +945,17 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1];
- u8 reserved_at_7[0x19];
+ u8 reserved_at_7[0x1];
+ u8 nic_sq_scheduling[0x1];
+ u8 nic_bw_share[0x1];
+ u8 nic_rate_limit[0x1];
+ u8 packet_pacing_uid[0x1];
+ u8 log_esw_max_sched_depth[0x4];
+ u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x20];
+ u8 reserved_at_20[0xb];
+ u8 log_max_qos_nic_queue_group[0x5];
+ u8 reserved_at_30[0x10];
u8 packet_pacing_max_rate[0x20];
@@ -832,7 +972,17 @@ struct mlx5_ifc_qos_cap_bits {
u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x700];
+ u8 reserved_at_100[0x20];
+
+ u8 reserved_at_120[0x3];
+ u8 log_meter_aso_granularity[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_meter_aso_max_alloc[0x5];
+ u8 reserved_at_130[0x3];
+ u8 log_max_num_meter_aso[0x5];
+ u8 reserved_at_138[0x8];
+
+ u8 reserved_at_140[0x6c0];
};
struct mlx5_ifc_debug_cap_bits {
@@ -867,7 +1017,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 scatter_fcs[0x1];
u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
- u8 reserved_at_1c[0x2];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
@@ -875,9 +1026,17 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp_csum[0x1];
u8 swp_lso[0x1];
u8 cqe_checksum_full[0x1];
- u8 reserved_at_24[0x5];
+ u8 tunnel_stateless_geneve_tx[0x1];
+ u8 tunnel_stateless_mpls_over_udp[0x1];
+ u8 tunnel_stateless_mpls_over_gre[0x1];
+ u8 tunnel_stateless_vxlan_gpe[0x1];
+ u8 tunnel_stateless_ipv4_over_vxlan[0x1];
u8 tunnel_stateless_ip_over_ip[0x1];
- u8 reserved_at_2a[0x6];
+ u8 insert_trailer[0x1];
+ u8 reserved_at_2b[0x1];
+ u8 tunnel_stateless_ip_over_ip_rx[0x1];
+ u8 tunnel_stateless_ip_over_ip_tx[0x1];
+ u8 reserved_at_2e[0x2];
u8 max_vxlan_udp_ports[0x8];
u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
@@ -893,9 +1052,20 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 reserved_at_200[0x600];
};
+enum {
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1];
- u8 reserved_at_1[0x1f];
+ u8 reserved_at_1[0x3];
+ u8 sw_r_roce_src_udp_port[0x1];
+ u8 fl_rc_qp_when_roce_disabled[0x1];
+ u8 fl_rc_qp_when_roce_enabled[0x1];
+ u8 reserved_at_7[0x17];
+ u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60];
@@ -958,11 +1128,18 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 log_sw_icm_alloc_granularity[0x6];
u8 log_steering_sw_icm_size[0x8];
- u8 reserved_at_120[0x20];
+ u8 reserved_at_120[0x18];
+ u8 log_header_modify_pattern_sw_icm_size[0x8];
u8 header_modify_sw_icm_start_address[0x40];
- u8 reserved_at_180[0x680];
+ u8 reserved_at_180[0x40];
+
+ u8 header_modify_pattern_sw_icm_start_address[0x40];
+
+ u8 memic_operations[0x20];
+
+ u8 reserved_at_220[0x5e0];
};
struct mlx5_ifc_device_event_cap_bits {
@@ -971,17 +1148,40 @@ struct mlx5_ifc_device_event_cap_bits {
u8 user_unaffiliated_events[4][0x40];
};
-struct mlx5_ifc_device_virtio_emulation_cap_bits {
- u8 reserved_at_0[0x20];
+struct mlx5_ifc_virtio_emulation_cap_bits {
+ u8 desc_tunnel_offload_type[0x1];
+ u8 eth_frame_offload_type[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 device_features_bits_mask[0xd];
+ u8 event_mode[0x8];
+ u8 virtio_queue_type[0x8];
- u8 reserved_at_20[0x13];
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_30[0x3];
u8 log_doorbell_stride[0x5];
u8 reserved_at_38[0x3];
u8 log_doorbell_bar_size[0x5];
u8 doorbell_bar_offset[0x40];
- u8 reserved_at_80[0x780];
+ u8 max_emulated_devices[0x8];
+ u8 max_num_virtio_queues[0x18];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 umem_1_buffer_param_a[0x20];
+
+ u8 umem_1_buffer_param_b[0x20];
+
+ u8 umem_2_buffer_param_a[0x20];
+
+ u8 umem_2_buffer_param_b[0x20];
+
+ u8 umem_3_buffer_param_a[0x20];
+
+ u8 umem_3_buffer_param_b[0x20];
+
+ u8 reserved_at_1c0[0x640];
};
enum {
@@ -1089,6 +1289,41 @@ struct mlx5_ifc_tls_cap_bits {
u8 reserved_at_20[0x7e0];
};
+struct mlx5_ifc_ipsec_cap_bits {
+ u8 ipsec_full_offload[0x1];
+ u8 ipsec_crypto_offload[0x1];
+ u8 ipsec_esn[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_ipsec_offload[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 min_log_ipsec_full_replay_window[0x8];
+ u8 max_log_ipsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x7d0];
+};
+
+struct mlx5_ifc_macsec_cap_bits {
+ u8 macsec_epn[0x1];
+ u8 reserved_at_1[0x2];
+ u8 macsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_macsec_offload[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 min_log_macsec_full_replay_window[0x8];
+ u8 max_log_macsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x7c0];
+};
+
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
@@ -1142,9 +1377,17 @@ enum {
enum {
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
+ MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
+ MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
+ MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED = 1 << 10,
+ MLX5_FLEX_PARSER_GTPU_ENABLED = 1 << 11,
+ MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED = 1 << 16,
+ MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED = 1 << 17,
+ MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED = 1 << 18,
+ MLX5_FLEX_PARSER_GTPU_TEID_ENABLED = 1 << 19,
};
enum {
@@ -1167,8 +1410,29 @@ enum mlx5_fc_bulk_alloc_bitmask {
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
+enum {
+ MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
+ MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+ MLX5_STEERING_FORMAT_CONNECTX_7 = 2,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x30];
+ u8 reserved_at_0[0x10];
+ u8 shared_object_to_user_object_allowed[0x1];
+ u8 reserved_at_13[0xe];
+ u8 vhca_resource_manager[0x1];
+
+ u8 hca_cap_2[0x1];
+ u8 create_lag_when_not_master_up[0x1];
+ u8 dtor[0x1];
+ u8 event_on_vhca_state_teardown_request[0x1];
+ u8 event_on_vhca_state_in_use[0x1];
+ u8 event_on_vhca_state_active[0x1];
+ u8 event_on_vhca_state_allocated[0x1];
+ u8 event_on_vhca_state_invalid[0x1];
+ u8 reserved_at_28[0x8];
u8 vhca_id[0x10];
u8 reserved_at_40[0x40];
@@ -1176,18 +1440,33 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
u8 event_cap[0x1];
- u8 reserved_at_91[0x7];
+ u8 reserved_at_91[0x2];
+ u8 isolate_vl_tc_new[0x1];
+ u8 reserved_at_94[0x4];
u8 prio_tag_required[0x1];
u8 reserved_at_99[0x2];
u8 log_max_qp[0x5];
- u8 reserved_at_a0[0xb];
+ u8 reserved_at_a0[0x3];
+ u8 ece_support[0x1];
+ u8 reserved_at_a4[0x5];
+ u8 reg_c_preserve[0x1];
+ u8 reserved_at_aa[0x1];
u8 log_max_srq[0x5];
- u8 reserved_at_b0[0x10];
+ u8 reserved_at_b0[0x1];
+ u8 uplink_follow[0x1];
+ u8 ts_cqe_to_dest_cqn[0x1];
+ u8 reserved_at_b3[0x7];
+ u8 shampo[0x1];
+ u8 reserved_at_bb[0x5];
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
- u8 reserved_at_d0[0xb];
+ u8 relaxed_ordering_write_umr[0x1];
+ u8 relaxed_ordering_read_umr[0x1];
+ u8 reserved_at_d2[0x7];
+ u8 virtio_net_device_emualtion_manager[0x1];
+ u8 virtio_blk_device_emualtion_manager[0x1];
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
@@ -1212,10 +1491,16 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_120[0xa];
u8 log_max_ra_req_dc[0x6];
- u8 reserved_at_130[0xa];
+ u8 reserved_at_130[0x2];
+ u8 eth_wqe_too_small[0x1];
+ u8 reserved_at_133[0x6];
+ u8 vnic_env_cq_overrun[0x1];
u8 log_max_ra_res_dc[0x6];
- u8 reserved_at_140[0x9];
+ u8 reserved_at_140[0x5];
+ u8 release_all_pages[0x1];
+ u8 must_not_use[0x1];
+ u8 reserved_at_147[0x2];
u8 roce_accl[0x1];
u8 log_max_ra_req_qp[0x6];
u8 reserved_at_150[0xa];
@@ -1288,7 +1573,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 wol_p[0x1];
u8 stat_rate_support[0x10];
- u8 reserved_at_1f0[0xc];
+ u8 reserved_at_1f0[0x1];
+ u8 pci_sync_for_fw_update_event[0x1];
+ u8 reserved_at_1f2[0x6];
+ u8 init2_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1fa[0x3];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -1351,18 +1640,26 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uar_4k[0x1];
u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
- u8 reserved_at_250[0x8];
+ u8 port_selection_cap[0x1];
+ u8 reserved_at_248[0x1];
+ u8 umem_uid_0[0x1];
+ u8 reserved_at_250[0x5];
u8 log_pg_sz[0x8];
u8 bf[0x1];
u8 driver_version[0x1];
u8 pad_tx_eth_packet[0x1];
- u8 reserved_at_263[0x8];
+ u8 reserved_at_263[0x3];
+ u8 mkey_by_name[0x1];
+ u8 reserved_at_267[0x4];
+
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x8];
+ u8 reserved_at_270[0x6];
+ u8 lag_dct[0x2];
u8 lag_tx_port_affinity[0x1];
- u8 reserved_at_279[0x2];
+ u8 lag_native_fdb_selection[0x1];
+ u8 reserved_at_27a[0x1];
u8 lag_master[0x1];
u8 num_lag_ports[0x4];
@@ -1392,7 +1689,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 nic_receive_steering_discard[0x1];
u8 receive_discard_vport_down[0x1];
u8 transmit_discard_vport_down[0x1];
- u8 reserved_at_343[0x5];
+ u8 eq_overrun_count[0x1];
+ u8 reserved_at_344[0x1];
+ u8 invalid_command_count[0x1];
+ u8 quota_exceeded_count[0x1];
+ u8 reserved_at_347[0x1];
u8 log_max_flow_counter_bulk[0x8];
u8 max_flow_counter_15_0[0x10];
@@ -1417,7 +1718,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1];
- u8 reserved_at_3a1[0x2];
+ u8 roce_rw_supported[0x1];
+ u8 log_max_current_uc_list_wr_supported[0x1];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@ -1440,7 +1742,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1];
u8 log_min_hairpin_wq_data_sz[0x5];
- u8 reserved_at_3e8[0x3];
+ u8 reserved_at_3e8[0x2];
+ u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@@ -1449,17 +1752,23 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 general_obj_types[0x40];
- u8 reserved_at_440[0x20];
+ u8 sq_ts_format[0x2];
+ u8 rq_ts_format[0x2];
+ u8 steering_format_version[0x4];
+ u8 create_qp_start_hint[0x18];
- u8 reserved_at_460[0x3];
+ u8 reserved_at_460[0x1];
+ u8 ats[0x1];
+ u8 reserved_at_462[0x1];
u8 log_max_uctx[0x5];
- u8 reserved_at_468[0x3];
+ u8 reserved_at_468[0x2];
+ u8 ipsec_offload[0x1];
u8 log_max_umem[0x5];
u8 max_num_eqs[0x10];
u8 reserved_at_480[0x1];
u8 tls_tx[0x1];
- u8 reserved_at_482[0x1];
+ u8 tls_rx[0x1];
u8 log_max_l2_table[0x5];
u8 reserved_at_488[0x8];
u8 log_uar_page_sz[0x10];
@@ -1476,9 +1785,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_geneve_tlv_options[0x8];
u8 reserved_at_568[0x3];
u8 max_geneve_tlv_option_data_len[0x5];
- u8 reserved_at_570[0x10];
-
- u8 reserved_at_580[0x33];
+ u8 reserved_at_570[0x9];
+ u8 adv_virtualization[0x1];
+ u8 reserved_at_57a[0x6];
+
+ u8 reserved_at_580[0xb];
+ u8 log_max_dci_stream_channels[0x5];
+ u8 reserved_at_590[0x3];
+ u8 log_max_dci_errored_streams[0x5];
+ u8 reserved_at_598[0x8];
+
+ u8 reserved_at_5a0[0x10];
+ u8 enhanced_cqe_compression[0x1];
+ u8 reserved_at_5b1[0x2];
u8 log_max_dek[0x5];
u8 reserved_at_5b8[0x4];
u8 mini_cqe_resp_stride_index[0x1];
@@ -1489,7 +1808,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cqe_compression_timeout[0x10];
u8 cqe_compression_max_num[0x10];
- u8 reserved_at_5e0[0x10];
+ u8 reserved_at_5e0[0x8];
+ u8 flex_parser_id_gtpu_dw_0[0x4];
+ u8 reserved_at_5ec[0x4];
u8 tag_matching[0x1];
u8 rndv_offload_rc[0x1];
u8 rndv_offload_dc[0x1];
@@ -1500,14 +1821,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 affiliate_nic_vport_criteria[0x8];
u8 native_port_num[0x8];
u8 num_vhca_ports[0x8];
- u8 reserved_at_618[0x6];
+ u8 flex_parser_id_gtpu_teid[0x4];
+ u8 reserved_at_61c[0x2];
u8 sw_owner_id[0x1];
u8 reserved_at_61f[0x1];
u8 max_num_of_monitor_counters[0x10];
u8 num_ppcnt_monitor_counters[0x10];
- u8 reserved_at_640[0x10];
+ u8 max_num_sf[0x10];
u8 num_q_monitor_counters[0x10];
u8 reserved_at_660[0x20];
@@ -1516,7 +1838,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 sf_set_partition[0x1];
u8 reserved_at_682[0x1];
u8 log_max_sf[0x5];
- u8 reserved_at_688[0x8];
+ u8 apu[0x1];
+ u8 reserved_at_689[0x4];
+ u8 migration[0x1];
+ u8 reserved_at_68e[0x2];
u8 log_min_sf_size[0x8];
u8 max_num_sf_partitions[0x8];
@@ -1531,22 +1856,58 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
- u8 reserved_at_6e0[0x10];
+ u8 max_num_match_definer[0x10];
u8 sf_base_id[0x10];
- u8 reserved_at_700[0x80];
+ u8 flex_parser_id_gtpu_dw_2[0x4];
+ u8 flex_parser_id_gtpu_first_ext_dw_0[0x4];
+ u8 num_total_dynamic_vf_msix[0x18];
+ u8 reserved_at_720[0x14];
+ u8 dynamic_msix_table_size[0xc];
+ u8 reserved_at_740[0xc];
+ u8 min_dynamic_vf_msix_table_size[0x4];
+ u8 reserved_at_750[0x4];
+ u8 max_dynamic_vf_msix_table_size[0xc];
+
+ u8 reserved_at_760[0x20];
u8 vhca_tunnel_commands[0x40];
- u8 reserved_at_7c0[0x40];
+ u8 match_definer_format_supported[0x40];
};
-enum mlx5_flow_destination_type {
- MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
- MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+struct mlx5_ifc_cmd_hca_cap_2_bits {
+ u8 reserved_at_0[0xa0];
- MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
- MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101,
+ u8 max_reformat_insert_size[0x8];
+ u8 max_reformat_insert_offset[0x8];
+ u8 max_reformat_remove_size[0x8];
+ u8 max_reformat_remove_offset[0x8];
+
+ u8 reserved_at_c0[0xe0];
+
+ u8 reserved_at_1a0[0xb];
+ u8 log_min_mkey_entity_size[0x5];
+ u8 reserved_at_1b0[0x10];
+
+ u8 reserved_at_1c0[0x60];
+
+ u8 reserved_at_220[0x1];
+ u8 sw_vhca_id_valid[0x1];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_230[0x10];
+
+ u8 reserved_at_240[0xb];
+ u8 ts_cqe_metadata_size2wqe_counter[0x5];
+ u8 reserved_at_250[0x10];
+
+ u8 reserved_at_260[0x5a0];
+};
+
+enum mlx5_ifc_flow_destination_type {
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
};
enum mlx5_flow_table_miss_action {
@@ -1595,7 +1956,11 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
- u8 reserved_at_a00[0x600];
+ struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
+
+ struct mlx5_ifc_fte_match_set_misc5_bits misc_parameters_5;
+
+ u8 reserved_at_e00[0x200];
};
enum {
@@ -1667,9 +2032,23 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x4c0];
+ u8 reserved_at_140[0x80];
+
+ u8 headers_mkey[0x20];
+
+ u8 shampo_enable[0x1];
+ u8 reserved_at_1e1[0x4];
+ u8 log_reservation_size[0x3];
+ u8 reserved_at_1e8[0x5];
+ u8 log_max_num_of_packets_per_reservation[0x3];
+ u8 reserved_at_1f0[0x6];
+ u8 log_headers_entry_size[0x2];
+ u8 reserved_at_1f8[0x4];
+ u8 log_headers_buffer_entry_num[0x4];
+
+ u8 reserved_at_200[0x400];
- struct mlx5_ifc_cmd_pas_bits pas[0];
+ struct mlx5_ifc_cmd_pas_bits pas[];
};
struct mlx5_ifc_rq_num_bits {
@@ -1887,7 +2266,7 @@ struct mlx5_ifc_resource_dump_menu_segment_bits {
u8 reserved_at_20[0x10];
u8 num_of_records[0x10];
- struct mlx5_ifc_resource_dump_menu_record_bits record[0];
+ struct mlx5_ifc_resource_dump_menu_record_bits record[];
};
struct mlx5_ifc_resource_dump_resource_segment_bits {
@@ -1899,7 +2278,7 @@ struct mlx5_ifc_resource_dump_resource_segment_bits {
u8 index2[0x20];
- u8 payload[0][0x20];
+ u8 payload[][0x20];
};
struct mlx5_ifc_resource_dump_terminate_segment_bits {
@@ -2598,6 +2977,40 @@ struct mlx5_ifc_dropped_packet_logged_bits {
u8 reserved_at_0[0xe0];
};
+struct mlx5_ifc_default_timeout_bits {
+ u8 to_multiplier[0x3];
+ u8 reserved_at_3[0x9];
+ u8 to_value[0x14];
+};
+
+struct mlx5_ifc_dtor_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+ u8 reserved_at_40[0x60];
+
+ struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+ struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+ struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+ struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+ struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+ struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+ struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+ u8 reserved_at_1c0[0x40];
+};
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@ -2747,11 +3160,18 @@ enum {
MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
};
+enum {
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
u8 lag_tx_port_affinity[0x4];
u8 st[0x8];
- u8 reserved_at_10[0x3];
+ u8 reserved_at_10[0x2];
+ u8 isolate_vl_tc[0x1];
u8 pm_state[0x2];
u8 reserved_at_15[0x1];
u8 req_e2e_credit_mode[0x2];
@@ -2775,7 +3195,9 @@ struct mlx5_ifc_qpc_bits {
u8 log_rq_stride[0x3];
u8 no_sq[0x1];
u8 log_sq_size[0x4];
- u8 reserved_at_55[0x6];
+ u8 reserved_at_55[0x3];
+ u8 ts_format[0x2];
+ u8 reserved_at_5a[0x1];
u8 rlky[0x1];
u8 ulp_stateless_offload_mode[0x4];
@@ -2810,10 +3232,12 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3c0[0x8];
u8 next_send_psn[0x18];
- u8 reserved_at_3e0[0x8];
+ u8 reserved_at_3e0[0x3];
+ u8 log_num_dci_stream_channels[0x5];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x8];
+ u8 reserved_at_400[0x3];
+ u8 log_num_dci_errored_streams[0x5];
u8 deth_sqpn[0x18];
u8 reserved_at_420[0x20];
@@ -2898,8 +3322,23 @@ struct mlx5_ifc_roce_addr_layout_bits {
u8 reserved_at_e0[0x20];
};
+struct mlx5_ifc_shampo_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 shampo_log_max_reservation_size[0x5];
+ u8 reserved_at_8[0x3];
+ u8 shampo_log_min_reservation_size[0x5];
+ u8 shampo_min_mss_size[0x10];
+
+ u8 reserved_at_20[0x3];
+ u8 shampo_max_log_headers_entry_size[0x5];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_40[0x7c0];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
struct mlx5_ifc_odp_cap_bits odp_cap;
struct mlx5_ifc_atomic_caps_bits atomic_caps;
struct mlx5_ifc_roce_cap_bits roce_cap;
@@ -2907,13 +3346,16 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
- struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_shampo_cap_bits shampo_cap;
+ struct mlx5_ifc_macsec_cap_bits macsec_cap;
u8 reserved_at_0[0x8000];
};
@@ -2929,6 +3371,9 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT = 0x1000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT = 0x2000,
+ MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000,
};
enum {
@@ -2937,6 +3382,11 @@ enum {
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2,
};
+enum {
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC = 0x1,
+};
+
struct mlx5_ifc_vlan_bits {
u8 ethtype[0x10];
u8 prio[0x3];
@@ -2944,6 +3394,38 @@ struct mlx5_ifc_vlan_bits {
u8 vid[0xc];
};
+enum {
+ MLX5_FLOW_METER_COLOR_RED = 0x0,
+ MLX5_FLOW_METER_COLOR_YELLOW = 0x1,
+ MLX5_FLOW_METER_COLOR_GREEN = 0x2,
+ MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum {
+ MLX5_EXE_ASO_FLOW_METER = 0x2,
+};
+
+struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits {
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_8[0x14];
+ u8 action[0x1];
+ u8 init_color[0x2];
+ u8 meter_id[0x1];
+};
+
+union mlx5_ifc_exe_aso_ctrl {
+ struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter;
+};
+
+struct mlx5_ifc_execute_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x7];
+ u8 aso_object_id[0x18];
+
+ union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl;
+};
+
struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan;
@@ -2958,7 +3440,7 @@ struct mlx5_ifc_flow_context_bits {
u8 extended_destination[0x1];
u8 reserved_at_81[0x1];
u8 flow_source[0x2];
- u8 reserved_at_84[0x4];
+ u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
@@ -2970,13 +3452,16 @@ struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan_2;
- u8 reserved_at_120[0xe0];
+ u8 encrypt_decrypt_obj_id[0x20];
+ u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
- u8 reserved_at_1200[0x600];
+ struct mlx5_ifc_execute_aso_bits execute_aso[4];
- union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
+ u8 reserved_at_1300[0x500];
+
+ union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
};
enum {
@@ -3042,11 +3527,23 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits {
u8 transmit_discard_vport_down[0x40];
- u8 reserved_at_140[0xa0];
+ u8 async_eq_overrun[0x20];
+
+ u8 comp_eq_overrun[0x20];
+
+ u8 reserved_at_180[0x20];
+
+ u8 invalid_command[0x20];
+
+ u8 quota_exceeded_command[0x20];
u8 internal_rq_out_of_buffer[0x20];
- u8 reserved_at_200[0xe00];
+ u8 cq_overrun[0x20];
+
+ u8 eth_wqe_too_small[0x20];
+
+ u8 reserved_at_220[0xdc0];
};
struct mlx5_ifc_traffic_counter_bits {
@@ -3085,8 +3582,8 @@ enum {
};
enum {
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
};
enum {
@@ -3104,13 +3601,14 @@ struct mlx5_ifc_tirc_bits {
u8 reserved_at_0[0x20];
u8 disp_type[0x4];
- u8 reserved_at_24[0x1c];
+ u8 tls_en[0x1];
+ u8 reserved_at_25[0x1b];
u8 reserved_at_40[0x40];
u8 reserved_at_80[0x4];
u8 lro_timeout_period_usecs[0x10];
- u8 lro_enable_mask[0x4];
+ u8 packet_merge_mask[0x4];
u8 lro_max_ip_payload_size[0x8];
u8 reserved_at_a0[0x40];
@@ -3198,7 +3696,9 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -3212,11 +3712,15 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_80[0x10];
u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_a0[0x50];
+ u8 reserved_at_a0[0x20];
+
+ u8 reserved_at_c0[0x8];
+ u8 ts_cqe_to_dest_cqn[0x18];
+ u8 reserved_at_e0[0x10];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
- u8 reserved_at_110[0x10];
+ u8 qos_queue_group_id[0x10];
u8 reserved_at_120[0x40];
@@ -3231,6 +3735,7 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4,
};
enum {
@@ -3258,17 +3763,20 @@ struct mlx5_ifc_scheduling_context_bits {
};
struct mlx5_ifc_rqtc_bits {
- u8 reserved_at_0[0xa0];
+ u8 reserved_at_0[0xa0];
- u8 reserved_at_a0[0x10];
- u8 rqt_max_size[0x10];
+ u8 reserved_at_a0[0x5];
+ u8 list_q_type[0x3];
+ u8 reserved_at_a8[0x8];
+ u8 rqt_max_size[0x10];
- u8 reserved_at_c0[0x10];
- u8 rqt_actual_size[0x10];
+ u8 rq_vhca_id_format[0x1];
+ u8 reserved_at_c1[0xf];
+ u8 rqt_actual_size[0x10];
- u8 reserved_at_e0[0x6a0];
+ u8 reserved_at_e0[0x6a0];
- struct mlx5_ifc_rq_num_bits rq_num[0];
+ struct mlx5_ifc_rq_num_bits rq_num[];
};
enum {
@@ -3282,6 +3790,18 @@ enum {
MLX5_RQC_STATE_ERR = 0x3,
};
+enum {
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_BYTE = 0x0,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE = 0x1,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_PAGE = 0x2,
+};
+
+enum {
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_NO_MATCH = 0x0,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED = 0x1,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_FIVE_TUPLE = 0x2,
+};
+
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
u8 delay_drop_en[0x1];
@@ -3292,7 +3812,9 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -3312,7 +3834,13 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_c0[0x10];
u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_e0[0xa0];
+ u8 reserved_at_e0[0x46];
+ u8 shampo_no_match_alignment_granularity[0x2];
+ u8 reserved_at_128[0x6];
+ u8 shampo_match_criteria_type[0x2];
+ u8 reservation_timeout[0x10];
+
+ u8 reserved_at_140[0x40];
struct mlx5_ifc_wq_bits wq;
};
@@ -3335,6 +3863,11 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_wq_bits wq;
};
+enum {
+ VHCA_ID_TYPE_HW = 0,
+ VHCA_ID_TYPE_SW = 1,
+};
+
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3];
@@ -3351,8 +3884,8 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xc];
-
+ u8 vhca_id_type[0x1];
+ u8 reserved_at_41[0xb];
u8 affiliation_criteria[0x4];
u8 affiliated_vhca_id[0x10];
@@ -3380,7 +3913,7 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_7e0[0x20];
- u8 current_uc_mac_address[0][0x40];
+ u8 current_uc_mac_address[][0x40];
};
enum {
@@ -3408,7 +3941,9 @@ struct mlx5_ifc_mkc_bits {
u8 lw[0x1];
u8 lr[0x1];
u8 access_mode_1_0[0x2];
- u8 reserved_at_18[0x8];
+ u8 reserved_at_18[0x2];
+ u8 ma_translation_mode[0x2];
+ u8 reserved_at_1c[0x4];
u8 qpn[0x18];
u8 mkey_7_0[0x8];
@@ -3557,8 +4092,8 @@ struct mlx5_ifc_eqc_bits {
u8 reserved_at_80[0x20];
- u8 reserved_at_a0[0x18];
- u8 intr[0x8];
+ u8 reserved_at_a0[0x14];
+ u8 intr[0xc];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -3653,7 +4188,8 @@ struct mlx5_ifc_dctc_bits {
u8 ecn[0x2];
u8 dscp[0x6];
- u8 reserved_at_1c0[0x40];
+ u8 reserved_at_1c0[0x20];
+ u8 ece[0x20];
};
enum {
@@ -3683,7 +4219,7 @@ struct mlx5_ifc_cqc_bits {
u8 status[0x4];
u8 reserved_at_4[0x2];
u8 dbr_umem_valid[0x1];
- u8 reserved_at_7[0x1];
+ u8 apu_cq[0x1];
u8 cqe_sz[0x3];
u8 cc[0x1];
u8 reserved_at_c[0x1];
@@ -3693,7 +4229,8 @@ struct mlx5_ifc_cqc_bits {
u8 cqe_comp_en[0x1];
u8 mini_cqe_res_format[0x2];
u8 st[0x4];
- u8 reserved_at_18[0x8];
+ u8 reserved_at_18[0x6];
+ u8 cqe_compression_layout[0x2];
u8 reserved_at_20[0x20];
@@ -3709,8 +4246,7 @@ struct mlx5_ifc_cqc_bits {
u8 cq_period[0xc];
u8 cq_max_count[0x10];
- u8 reserved_at_a0[0x18];
- u8 c_eqn[0x8];
+ u8 c_eqn_or_apu_element[0x20];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -3860,13 +4396,19 @@ struct mlx5_ifc_health_buffer_bits {
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40];
+ u8 reserved_at_140[0x20];
+
+ u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20];
+ u8 rfr[0x1];
+ u8 reserved_at_1c1[0x3];
+ u8 valid[0x1];
+ u8 severity[0x3];
+ u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8];
u8 synd[0x8];
@@ -4123,7 +4665,11 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
union mlx5_ifc_hca_cap_union_bits capability;
};
@@ -4132,7 +4678,8 @@ enum {
MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
- MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_IPSEC_OBJ_ID = 0x4
};
struct mlx5_ifc_set_fte_out_bits {
@@ -4182,7 +4729,8 @@ struct mlx5_ifc_rts2rts_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rts2rts_qp_in_bits {
@@ -4199,7 +4747,7 @@ struct mlx5_ifc_rts2rts_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -4212,7 +4760,8 @@ struct mlx5_ifc_rtr2rts_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rtr2rts_qp_in_bits {
@@ -4229,7 +4778,7 @@ struct mlx5_ifc_rtr2rts_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -4242,7 +4791,8 @@ struct mlx5_ifc_rst2init_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rst2init_qp_in_bits {
@@ -4259,7 +4809,7 @@ struct mlx5_ifc_rst2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -4302,7 +4852,7 @@ struct mlx5_ifc_query_xrc_srq_out_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_xrc_srq_in_bits {
@@ -4339,6 +4889,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
enum {
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2,
};
struct mlx5_ifc_arm_monitor_counter_in_bits {
@@ -4580,7 +5131,7 @@ struct mlx5_ifc_query_srq_out_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_srq_in_bits {
@@ -4661,6 +5212,7 @@ struct mlx5_ifc_query_scheduling_element_out_bits {
enum {
SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+ SCHEDULING_HIERARCHY_NIC = 0x3,
};
struct mlx5_ifc_query_scheduling_element_in_bits {
@@ -4785,13 +5337,13 @@ struct mlx5_ifc_query_qp_out_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
u8 reserved_at_800[0x80];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_qp_in_bits {
@@ -5124,7 +5676,7 @@ struct mlx5_ifc_query_hca_vport_pkey_out_bits {
u8 reserved_at_40[0x40];
- struct mlx5_ifc_pkey_bits pkey[0];
+ struct mlx5_ifc_pkey_bits pkey[];
};
struct mlx5_ifc_query_hca_vport_pkey_in_bits {
@@ -5160,7 +5712,7 @@ struct mlx5_ifc_query_hca_vport_gid_out_bits {
u8 gids_num[0x10];
u8 reserved_at_70[0x10];
- struct mlx5_ifc_array128_auto_bits gid[0];
+ struct mlx5_ifc_array128_auto_bits gid[];
};
struct mlx5_ifc_query_hca_vport_gid_in_bits {
@@ -5368,12 +5920,246 @@ struct mlx5_ifc_query_fte_in_bits {
u8 reserved_at_120[0xe0];
};
+struct mlx5_ifc_match_definer_format_0_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_dmac_15_0[0x10];
+ u8 outer_ethertype[0x10];
+
+ u8 reserved_at_180[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 outer_l4_type_ext[0x4];
+ u8 reserved_at_1a4[0x2];
+ u8 outer_ipsec_layer[0x2];
+ u8 outer_l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 outer_l2_ok[0x1];
+ u8 outer_l3_ok[0x1];
+ u8 outer_l4_ok[0x1];
+ u8 outer_second_vlan_type[0x2];
+ u8 outer_second_vlan_prio[0x3];
+ u8 outer_second_vlan_cfi[0x1];
+ u8 outer_second_vlan_vid[0xc];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 inner_ipv4_checksum_ok[0x1];
+ u8 inner_l4_checksum_ok[0x1];
+ u8 outer_ipv4_checksum_ok[0x1];
+ u8 outer_l4_checksum_ok[0x1];
+ u8 inner_l3_ok[0x1];
+ u8 inner_l4_ok[0x1];
+ u8 outer_l3_ok_duplicate[0x1];
+ u8 outer_l4_ok_duplicate[0x1];
+ u8 outer_tcp_cwr[0x1];
+ u8 outer_tcp_ece[0x1];
+ u8 outer_tcp_urg[0x1];
+ u8 outer_tcp_ack[0x1];
+ u8 outer_tcp_psh[0x1];
+ u8 outer_tcp_rst[0x1];
+ u8 outer_tcp_syn[0x1];
+ u8 outer_tcp_fin[0x1];
+};
+
+struct mlx5_ifc_match_definer_format_22_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 outer_ip_src_addr[0x20];
+
+ u8 outer_ip_dest_addr[0x20];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_23_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 inner_ip_src_addr[0x20];
+
+ u8 inner_ip_dest_addr[0x20];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 inner_ip_frag[0x1];
+ u8 inner_qp_type[0x2];
+ u8 inner_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 inner_l3_type[0x2];
+ u8 inner_l4_type[0x2];
+ u8 inner_first_vlan_type[0x2];
+ u8 inner_first_vlan_prio[0x3];
+ u8 inner_first_vlan_cfi[0x1];
+ u8 inner_first_vlan_vid[0xc];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_29_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_30_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_31_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_32_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x10];
+ u8 format_id[0x10];
+
+ u8 reserved_at_a0[0x160];
+
+ u8 match_mask[16][0x20];
+};
+
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 vhca_tunnel_id[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x3];
+ u8 log_obj_range[0x5];
+ u8 reserved_at_68[0x18];
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_match_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_match_definer_bits obj_context;
+};
+
+struct mlx5_ifc_create_match_definer_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_5 = 0x6,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -5428,7 +6214,7 @@ struct mlx5_ifc_query_flow_counter_out_bits {
u8 reserved_at_40[0x40];
- struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
+ struct mlx5_ifc_traffic_counter_bits flow_statistics[];
};
struct mlx5_ifc_query_flow_counter_in_bits {
@@ -5522,7 +6308,7 @@ struct mlx5_ifc_query_eq_out_bits {
u8 reserved_at_300[0x580];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_eq_in_bits {
@@ -5539,15 +6325,17 @@ struct mlx5_ifc_query_eq_in_bits {
};
struct mlx5_ifc_packet_reformat_context_in_bits {
- u8 reserved_at_0[0x5];
- u8 reformat_type[0x3];
- u8 reserved_at_8[0xe];
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_10[0x6];
u8 reformat_data_size[0xa];
- u8 reserved_at_20[0x10];
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_28[0x8];
u8 reformat_data[2][0x8];
- u8 more_reformat_data[0][0x8];
+ u8 more_reformat_data[][0x8];
};
struct mlx5_ifc_query_packet_reformat_context_out_bits {
@@ -5558,7 +6346,7 @@ struct mlx5_ifc_query_packet_reformat_context_out_bits {
u8 reserved_at_40[0xa0];
- struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0];
+ struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[];
};
struct mlx5_ifc_query_packet_reformat_context_in_bits {
@@ -5584,12 +6372,22 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 reserved_at_60[0x20];
};
+enum {
+ MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9,
+};
+
enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
+ MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
+ MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
+ MLX5_REFORMAT_TYPE_DEL_MACSEC = 0x12,
};
struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
@@ -5659,9 +6457,9 @@ struct mlx5_ifc_copy_action_in_bits {
u8 reserved_at_38[0x8];
};
-union mlx5_ifc_set_action_in_add_action_in_auto_bits {
- struct mlx5_ifc_set_action_in_bits set_action_in;
- struct mlx5_ifc_add_action_in_bits add_action_in;
+union mlx5_ifc_set_add_copy_action_in_auto_bits {
+ struct mlx5_ifc_set_action_in_bits set_action_in;
+ struct mlx5_ifc_add_action_in_bits add_action_in;
struct mlx5_ifc_copy_action_in_bits copy_action_in;
u8 reserved_at_0[0x40];
};
@@ -5709,6 +6507,9 @@ enum {
MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58,
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
+ MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -5735,7 +6536,7 @@ struct mlx5_ifc_alloc_modify_header_context_in_bits {
u8 reserved_at_68[0x10];
u8 num_of_actions[0x8];
- union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0];
+ union mlx5_ifc_set_add_copy_action_in_auto_bits actions[];
};
struct mlx5_ifc_dealloc_modify_header_context_out_bits {
@@ -5759,6 +6560,18 @@ struct mlx5_ifc_dealloc_modify_header_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5797,7 +6610,7 @@ struct mlx5_ifc_query_cq_out_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_cq_in_bits {
@@ -6095,7 +6908,7 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved_at_3c[0x1];
u8 hash[0x1];
u8 reserved_at_3e[0x1];
- u8 lro[0x1];
+ u8 packet_merge[0x1];
};
struct mlx5_ifc_modify_tir_out_bits {
@@ -6404,7 +7217,7 @@ struct mlx5_ifc_modify_cq_in_bits {
u8 reserved_at_300[0x580];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_modify_cong_status_out_bits {
@@ -6468,7 +7281,7 @@ struct mlx5_ifc_manage_pages_out_bits {
u8 reserved_at_60[0x20];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
enum {
@@ -6490,7 +7303,7 @@ struct mlx5_ifc_manage_pages_in_bits {
u8 input_num_entries[0x20];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_mad_ifc_out_bits {
@@ -6536,7 +7349,12 @@ struct mlx5_ifc_init_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x2];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_70[0x10];
+
u8 sw_owner_id[4][0x20];
};
@@ -6546,7 +7364,8 @@ struct mlx5_ifc_init2rtr_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_init2rtr_qp_in_bits {
@@ -6563,7 +7382,7 @@ struct mlx5_ifc_init2rtr_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -6576,7 +7395,8 @@ struct mlx5_ifc_init2init_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_init2init_qp_in_bits {
@@ -6593,7 +7413,7 @@ struct mlx5_ifc_init2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -7038,7 +7858,7 @@ struct mlx5_ifc_destroy_mkey_out_bits {
struct mlx5_ifc_destroy_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7293,7 +8113,7 @@ struct mlx5_ifc_dealloc_uar_out_bits {
struct mlx5_ifc_dealloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7445,7 +8265,7 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
u8 reserved_at_300[0x580];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_tis_out_bits {
@@ -7521,7 +8341,7 @@ struct mlx5_ifc_create_srq_in_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_sq_out_bits {
@@ -7659,7 +8479,7 @@ struct mlx5_ifc_create_qp_out_bits {
u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_at_60[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_create_qp_in_bits {
@@ -7669,11 +8489,13 @@ struct mlx5_ifc_create_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x8];
+ u8 input_qpn[0x18];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -7682,7 +8504,7 @@ struct mlx5_ifc_create_qp_in_bits {
u8 wq_umem_valid[0x1];
u8 reserved_at_861[0x1f];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_psv_out_bits {
@@ -7734,7 +8556,7 @@ struct mlx5_ifc_create_mkey_out_bits {
struct mlx5_ifc_create_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7753,7 +8575,7 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 reserved_at_320[0x560];
- u8 klm_pas_mtt[0][0x20];
+ u8 klm_pas_mtt[][0x20];
};
enum {
@@ -7780,7 +8602,7 @@ struct mlx5_ifc_create_flow_table_out_bits {
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7812,6 +8634,11 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
+};
+
+enum {
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
@@ -7832,7 +8659,9 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -7847,7 +8676,10 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
+
+ u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@@ -7886,7 +8718,7 @@ struct mlx5_ifc_create_eq_in_bits {
u8 reserved_at_3c0[0x4c0];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_dct_out_bits {
@@ -7898,7 +8730,7 @@ struct mlx5_ifc_create_dct_out_bits {
u8 reserved_at_40[0x8];
u8 dctn[0x18];
- u8 reserved_at_60[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_create_dct_in_bits {
@@ -7943,7 +8775,7 @@ struct mlx5_ifc_create_cq_in_bits {
u8 cq_umem_valid[0x1];
u8 reserved_at_2e1[0x59f];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_config_int_moderation_out_bits {
@@ -8138,7 +8970,7 @@ struct mlx5_ifc_alloc_uar_out_bits {
struct mlx5_ifc_alloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -8265,9 +9097,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_set_pp_rate_limit_context_bits {
+ u8 rate_limit[0x20];
+
+ u8 burst_upper_bound[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 typical_packet_size[0x10];
+
+ u8 reserved_at_60[0x120];
+};
+
struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -8277,14 +9120,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 reserved_at_60[0x20];
- u8 rate_limit[0x20];
-
- u8 burst_upper_bound[0x20];
-
- u8 reserved_at_c0[0x10];
- u8 typical_packet_size[0x10];
-
- u8 reserved_at_e0[0x120];
+ struct mlx5_ifc_set_pp_rate_limit_context_bits ctx;
};
struct mlx5_ifc_access_register_out_bits {
@@ -8295,7 +9131,7 @@ struct mlx5_ifc_access_register_out_bits {
u8 reserved_at_40[0x40];
- u8 register_data[0][0x20];
+ u8 register_data[][0x20];
};
enum {
@@ -8315,7 +9151,7 @@ struct mlx5_ifc_access_register_in_bits {
u8 argument[0x20];
- u8 register_data[0][0x20];
+ u8 register_data[][0x20];
};
struct mlx5_ifc_sltp_reg_bits {
@@ -8420,7 +9256,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 proto_mask[0x3];
u8 an_status[0x4];
- u8 reserved_at_24[0x1c];
+ u8 reserved_at_24[0xc];
+ u8 data_rate_oper[0x10];
u8 ext_eth_proto_capability[0x20];
@@ -8615,6 +9452,8 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_100g_2x[0x10];
u8 fec_override_admin_50g_1x[0x10];
+
+ u8 reserved_at_140[0x140];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -8940,6 +9779,28 @@ struct mlx5_ifc_mpegc_reg_bits {
u8 reserved_at_60[0x100];
};
+enum {
+ MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
+ MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3,
+};
+
+struct mlx5_ifc_mtutc_reg_bits {
+ u8 reserved_at_0[0x1c];
+ u8 operation[0x4];
+
+ u8 freq_adjustment[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 utc_sec[0x20];
+
+ u8 reserved_at_a0[0x2];
+ u8 utc_nsec[0x1e];
+
+ u8 time_adjustment[0x20];
+};
+
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x68];
u8 fec_50G_per_lane_in_pplm[0x1];
@@ -8998,7 +9859,14 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x6e];
+ u8 reserved_at_0[0x5d];
+ u8 mcia_32dwords[0x1];
+ u8 out_pulse_duration_ns[0x1];
+ u8 npps_period[0x1];
+ u8 reserved_at_60[0xa];
+ u8 reset_state[0x1];
+ u8 ptpcyc2realtime_modify[0x1];
+ u8 reserved_at_6c[0x2];
u8 pci_status_and_power[0x1];
u8 reserved_at_6f[0x5];
u8 mark_tx_action_cnp[0x1];
@@ -9021,10 +9889,14 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_95_to_87[0x9];
u8 mpegc[0x1];
- u8 regs_85_to_68[0x12];
+ u8 mtutc[0x1];
+ u8 regs_84_to_68[0x11];
u8 tracer_registers[0x4];
- u8 regs_63_to_32[0x20];
+ u8 regs_63_to_46[0x12];
+ u8 mrtc[0x1];
+ u8 regs_44_to_32[0xd];
+
u8 regs_31_to_0[0x20];
};
@@ -9165,25 +10037,31 @@ struct mlx5_ifc_pcmr_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
u8 reserved_at_10[0x10];
+
u8 entropy_force_cap[0x1];
u8 entropy_calc_cap[0x1];
u8 entropy_gre_calc_cap[0x1];
- u8 reserved_at_23[0x1b];
+ u8 reserved_at_23[0xf];
+ u8 rx_ts_over_crc_cap[0x1];
+ u8 reserved_at_33[0xb];
u8 fcs_cap[0x1];
u8 reserved_at_3f[0x1];
+
u8 entropy_force[0x1];
u8 entropy_calc[0x1];
u8 entropy_gre_calc[0x1];
- u8 reserved_at_43[0x1b];
+ u8 reserved_at_43[0xf];
+ u8 rx_ts_over_crc[0x1];
+ u8 reserved_at_53[0xb];
u8 fcs_chk[0x1];
u8 reserved_at_5f[0x1];
};
struct mlx5_ifc_lane_2_module_mapping_bits {
- u8 reserved_at_0[0x6];
- u8 rx_lane[0x2];
- u8 reserved_at_8[0x6];
- u8 tx_lane[0x2];
+ u8 reserved_at_0[0x4];
+ u8 rx_lane[0x4];
+ u8 reserved_at_8[0x4];
+ u8 tx_lane[0x4];
u8 reserved_at_10[0x8];
u8 module[0x8];
};
@@ -9192,8 +10070,8 @@ struct mlx5_ifc_bufferx_reg_bits {
u8 reserved_at_0[0x6];
u8 lossy[0x1];
u8 epsb[0x1];
- u8 reserved_at_8[0xc];
- u8 size[0xc];
+ u8 reserved_at_8[0x8];
+ u8 size[0x10];
u8 xoff_threshold[0x10];
u8 xon_threshold[0x10];
@@ -9331,7 +10209,7 @@ struct mlx5_ifc_cmd_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 command[0][0x20];
+ u8 command[][0x20];
};
struct mlx5_ifc_cmd_if_box_bits {
@@ -9485,7 +10363,12 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_18[0x4];
u8 cap_max_num_of_pps_out_pins[0x4];
- u8 reserved_at_20[0x24];
+ u8 reserved_at_20[0x13];
+ u8 cap_log_min_npps_period[0x5];
+ u8 reserved_at_38[0x3];
+ u8 cap_log_min_out_pulse_duration_ns[0x5];
+
+ u8 reserved_at_40[0x4];
u8 cap_pin_3_mode[0x4];
u8 reserved_at_48[0x4];
u8 cap_pin_2_mode[0x4];
@@ -9504,7 +10387,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 cap_pin_4_mode[0x4];
u8 field_select[0x20];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x20];
+
+ u8 npps_period[0x40];
u8 enable[0x1];
u8 reserved_at_101[0xb];
@@ -9513,7 +10398,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 pin_mode[0x4];
u8 pin[0x8];
- u8 reserved_at_120[0x20];
+ u8 reserved_at_120[0x2];
+ u8 out_pulse_duration_ns[0x1e];
u8 time_stamp[0x40];
@@ -9625,7 +10511,7 @@ struct mlx5_ifc_mcqi_reg_bits {
u8 reserved_at_a0[0x10];
u8 data_size[0x10];
- union mlx5_ifc_mcqi_reg_data_bits data[0];
+ union mlx5_ifc_mcqi_reg_data_bits data[];
};
struct mlx5_ifc_mcc_reg_bits {
@@ -9664,7 +10550,39 @@ struct mlx5_ifc_mcda_reg_bits {
u8 reserved_at_60[0x20];
- u8 data[0][0x20];
+ u8 data[][0x20];
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
+ MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
+ MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
+ MLX5_MFRL_REG_RESET_STATE_TIMEOUT = 3,
+ MLX5_MFRL_REG_RESET_STATE_NACK = 4,
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0),
+ MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1),
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_LEVEL0 = BIT(0),
+ MLX5_MFRL_REG_RESET_LEVEL3 = BIT(3),
+ MLX5_MFRL_REG_RESET_LEVEL6 = BIT(6),
+};
+
+struct mlx5_ifc_mfrl_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x2];
+ u8 pci_sync_for_fw_update_start[0x1];
+ u8 pci_sync_for_fw_update_resp[0x2];
+ u8 rst_type_sel[0x3];
+ u8 reserved_at_28[0x4];
+ u8 reset_state[0x4];
+ u8 reset_type[0x8];
+ u8 reset_level[0x8];
};
struct mlx5_ifc_mirc_reg_bits {
@@ -9674,6 +10592,64 @@ struct mlx5_ifc_mirc_reg_bits {
u8 reserved_at_20[0x20];
};
+struct mlx5_ifc_pddr_monitor_opcode_bits {
+ u8 reserved_at_0[0x10];
+ u8 monitor_opcode[0x10];
+};
+
+union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits {
+ struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
+ u8 reserved_at_0[0x20];
+};
+
+enum {
+ /* Monitor opcodes */
+ MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR = 0x0,
+};
+
+struct mlx5_ifc_pddr_troubleshooting_page_bits {
+ u8 reserved_at_0[0x10];
+ u8 group_opcode[0x10];
+
+ union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits status_opcode;
+
+ u8 reserved_at_40[0x20];
+
+ u8 status_message[59][0x20];
+};
+
+union mlx5_ifc_pddr_reg_page_data_auto_bits {
+ struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
+ u8 reserved_at_0[0x7c0];
+};
+
+enum {
+ MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE = 0x1,
+};
+
+struct mlx5_ifc_pddr_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_at_12[0xe];
+
+ u8 reserved_at_20[0x18];
+ u8 page_select[0x8];
+
+ union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
+};
+
+struct mlx5_ifc_mrtc_reg_bits {
+ u8 time_synced[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0x20];
+
+ u8 time_h[0x20];
+
+ u8 time_l[0x20];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -9688,6 +10664,9 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pamp_reg_bits pamp_reg;
struct mlx5_ifc_paos_reg_bits paos_reg;
struct mlx5_ifc_pcap_reg_bits pcap_reg;
+ struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
+ struct mlx5_ifc_pddr_reg_bits pddr_reg;
+ struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
struct mlx5_ifc_peir_reg_bits peir_reg;
struct mlx5_ifc_pelc_reg_bits pelc_reg;
struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
@@ -9730,6 +10709,9 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcc_reg_bits mcc_reg;
struct mlx5_ifc_mcda_reg_bits mcda_reg;
struct mlx5_ifc_mirc_reg_bits mirc_reg;
+ struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
+ struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
+ struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
u8 reserved_at_0[0x60e0];
};
@@ -9766,14 +10748,19 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x7];
+ u8 table_of_other_vport[0x1];
+ u8 table_vport_number[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_at_c0[0x8];
u8 underlay_qpn[0x18];
- u8 reserved_at_e0[0x120];
+ u8 table_eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_e1[0xf];
+ u8 table_eswitch_owner_vhca_id[0x10];
+ u8 reserved_at_100[0x100];
};
enum {
@@ -9886,6 +10873,34 @@ struct mlx5_ifc_pptb_reg_bits {
u8 untagged_buff[0x4];
};
+struct mlx5_ifc_sbcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ u8 sb_access_reg_cap_mask[4][0x20];
+
+ u8 reserved_at_c0[0x80];
+
+ u8 sb_feature_cap_mask[4][0x20];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 cap_total_buffer_size[0x20];
+
+ u8 cap_cell_size[0x10];
+ u8 cap_max_pg_buffers[0x8];
+ u8 cap_num_pool_supported[0x8];
+
+ u8 reserved_at_240[0x8];
+ u8 cap_sbsr_stat_size[0x8];
+ u8 cap_max_tclass_data[0x8];
+ u8 cap_max_cpu_ingress_tclass_sb[0x8];
+};
+
struct mlx5_ifc_pbmc_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
@@ -9900,7 +10915,7 @@ struct mlx5_ifc_pbmc_reg_bits {
struct mlx5_ifc_bufferx_reg_bits buffer[10];
- u8 reserved_at_2e0[0x40];
+ u8 reserved_at_2e0[0x80];
};
struct mlx5_ifc_qtct_reg_bits {
@@ -9974,11 +10989,22 @@ struct mlx5_ifc_dcbx_param_bits {
u8 reserved_at_a0[0x160];
};
+enum {
+ MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT = 1,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW = 2,
+};
+
struct mlx5_ifc_lagc_bits {
- u8 reserved_at_0[0x1d];
+ u8 fdb_selection_mode[0x1];
+ u8 reserved_at_1[0x14];
+ u8 port_select_mode[0x3];
+ u8 reserved_at_18[0x5];
u8 lag_state[0x3];
- u8 reserved_at_20[0x14];
+ u8 reserved_at_20[0xc];
+ u8 active_port[0x4];
+ u8 reserved_at_30[0x4];
u8 tx_remap_affinity_2[0x4];
u8 reserved_at_38[0x4];
u8 tx_remap_affinity_1[0x4];
@@ -10101,6 +11127,41 @@ struct mlx5_ifc_destroy_vport_lag_in_bits {
u8 reserved_at_40[0x40];
};
+enum {
+ MLX5_MODIFY_MEMIC_OP_MOD_ALLOC,
+ MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC,
+};
+
+struct mlx5_ifc_modify_memic_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x18];
+ u8 memic_operation_type[0x8];
+
+ u8 memic_start_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_modify_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 memic_operation_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
struct mlx5_ifc_alloc_memic_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -10154,40 +11215,18 @@ struct mlx5_ifc_dealloc_memic_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
-
- u8 vhca_tunnel_id[0x10];
- u8 obj_type[0x10];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
struct mlx5_ifc_umem_bits {
u8 reserved_at_0[0x80];
- u8 reserved_at_80[0x1b];
+ u8 ats[0x1];
+ u8 reserved_at_81[0x1a];
u8 log_page_size[0x5];
u8 page_offset[0x20];
u8 num_of_mtt[0x40];
- struct mlx5_ifc_mtt_bits mtt[0];
+ struct mlx5_ifc_mtt_bits mtt[];
};
struct mlx5_ifc_uctx_bits {
@@ -10235,6 +11274,40 @@ struct mlx5_ifc_create_umem_in_bits {
struct mlx5_ifc_umem_bits umem;
};
+struct mlx5_ifc_create_umem_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 umem_id[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_umem_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x8];
+ u8 umem_id[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_umem_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_create_uctx_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -10247,6 +11320,18 @@ struct mlx5_ifc_create_uctx_in_bits {
struct mlx5_ifc_uctx_bits uctx;
};
+struct mlx5_ifc_create_uctx_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_destroy_uctx_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -10260,6 +11345,15 @@ struct mlx5_ifc_destroy_uctx_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_destroy_uctx_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_create_sw_icm_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_sw_icm_bits sw_icm;
@@ -10312,7 +11406,7 @@ struct mlx5_ifc_mtrc_stdb_bits {
u8 reserved_at_4[0x4];
u8 read_size[0x18];
u8 start_offset[0x20];
- u8 string_db_data[0];
+ u8 string_db_data[];
};
struct mlx5_ifc_mtrc_ctrl_bits {
@@ -10366,7 +11460,7 @@ struct mlx5_ifc_query_esw_functions_out_bits {
struct mlx5_ifc_host_params_context_bits host_params_context;
u8 reserved_at_280[0x180];
- u8 host_sf_enable[0][0x40];
+ u8 host_sf_enable[][0x40];
};
struct mlx5_ifc_sf_partition_bits {
@@ -10386,7 +11480,7 @@ struct mlx5_ifc_query_sf_partitions_out_bits {
u8 reserved_at_60[0x20];
- struct mlx5_ifc_sf_partition_bits sf_partition[0];
+ struct mlx5_ifc_sf_partition_bits sf_partition[];
};
struct mlx5_ifc_query_sf_partitions_in_bits {
@@ -10451,11 +11545,156 @@ struct mlx5_ifc_affiliated_event_header_bits {
};
enum {
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24),
};
enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
+ MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
+ MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
+};
+
+enum {
+ MLX5_IPSEC_OBJECT_ICV_LEN_16B,
+};
+
+struct mlx5_ifc_ipsec_obj_bits {
+ u8 modify_field_select[0x40];
+ u8 full_offload[0x1];
+ u8 reserved_at_41[0x1];
+ u8 esn_en[0x1];
+ u8 esn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 icv_length[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 esn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 salt[0x20];
+
+ u8 implicit_iv[0x40];
+
+ u8 reserved_at_100[0x700];
+};
+
+struct mlx5_ifc_create_ipsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+enum {
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP = BIT(0),
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB = BIT(1),
+};
+
+struct mlx5_ifc_query_ipsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+struct mlx5_ifc_modify_ipsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+enum {
+ MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1,
+};
+
+enum {
+ MLX5_MACSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_MACSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_MACSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_MACSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+#define MLX5_MACSEC_ASO_INC_SN 0x2
+#define MLX5_MACSEC_ASO_REG_C_4_5 0x2
+
+struct mlx5_ifc_macsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x1];
+ u8 mode[0x2];
+ u8 window_size[0x2];
+ u8 soft_lifetime_arm[0x1];
+ u8 hard_lifetime_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 epn_event_arm[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 remove_flow_packet_count[0x20];
+
+ u8 remove_flow_soft_lifetime[0x20];
+
+ u8 reserved_at_60[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[8][0x20];
+};
+
+struct mlx5_ifc_macsec_offload_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 confidentiality_en[0x1];
+ u8 reserved_at_41[0x1];
+ u8 epn_en[0x1];
+ u8 epn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 confidentiality_offset[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 epn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sci[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 macsec_aso_access_pd[0x18];
+
+ u8 reserved_at_120[0x60];
+
+ u8 salt[3][0x20];
+
+ u8 reserved_at_1e0[0x20];
+
+ struct mlx5_ifc_macsec_aso_bits macsec_aso;
+};
+
+struct mlx5_ifc_create_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_modify_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+enum {
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP = BIT(0),
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB = BIT(1),
+};
+
+struct mlx5_ifc_query_macsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
};
struct mlx5_ifc_encryption_key_obj_bits {
@@ -10481,12 +11720,101 @@ struct mlx5_ifc_create_encryption_key_in_bits {
};
enum {
+ MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2_IPG = 0x2,
+ MLX5_FLOW_METER_MODE_NUM_PACKETS = 0x3,
+};
+
+struct mlx5_ifc_flow_meter_parameters_bits {
+ u8 valid[0x1];
+ u8 bucket_overflow[0x1];
+ u8 start_color[0x2];
+ u8 both_buckets_on_green[0x1];
+ u8 reserved_at_5[0x1];
+ u8 meter_mode[0x2];
+ u8 reserved_at_8[0x18];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x3];
+ u8 cbs_exponent[0x5];
+ u8 cbs_mantissa[0x8];
+ u8 reserved_at_50[0x3];
+ u8 cir_exponent[0x5];
+ u8 cir_mantissa[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ u8 reserved_at_80[0x3];
+ u8 ebs_exponent[0x5];
+ u8 ebs_mantissa[0x8];
+ u8 reserved_at_90[0x3];
+ u8 eir_exponent[0x5];
+ u8 eir_mantissa[0x8];
+
+ u8 reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_flow_meter_aso_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 meter_aso_access_pd[0x18];
+
+ u8 reserved_at_a0[0x160];
+
+ struct mlx5_ifc_flow_meter_parameters_bits flow_meter_parameters[2];
+};
+
+struct mlx5_ifc_create_flow_meter_aso_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj;
+};
+
+struct mlx5_ifc_sampler_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 table_type[0x8];
+ u8 level[0x8];
+ u8 reserved_at_50[0xf];
+ u8 ignore_flow_level[0x1];
+
+ u8 sample_ratio[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 sample_table_id[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 default_table_id[0x18];
+
+ u8 sw_steering_icm_address_rx[0x40];
+ u8 sw_steering_icm_address_tx[0x40];
+
+ u8 reserved_at_140[0xa0];
+};
+
+struct mlx5_ifc_create_sampler_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
+struct mlx5_ifc_query_sampler_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
+enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
};
enum {
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC = 0x4,
};
struct mlx5_ifc_tls_static_params_bits {
@@ -10513,17 +11841,236 @@ struct mlx5_ifc_tls_static_params_bits {
};
struct mlx5_ifc_tls_progress_params_bits {
- u8 reserved_at_0[0x8];
- u8 tisn[0x18];
-
u8 next_record_tcp_sn[0x20];
u8 hw_resync_tcp_sn[0x20];
u8 record_tracker_state[0x2];
u8 auth_state[0x2];
- u8 reserved_at_64[0x4];
+ u8 reserved_at_44[0x4];
u8 hw_offset_record_number[0x18];
};
+enum {
+ MLX5_MTT_PERM_READ = 1 << 0,
+ MLX5_MTT_PERM_WRITE = 1 << 1,
+ MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE,
+};
+
+enum {
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR = 0x0,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER = 0x1,
+};
+
+struct mlx5_ifc_suspend_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_suspend_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER = 0x0,
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR = 0x1,
+};
+
+struct mlx5_ifc_resume_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_resume_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 required_umem_size[0x20];
+
+ u8 reserved_at_a0[0x160];
+};
+
+struct mlx5_ifc_save_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_save_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 actual_image_size[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_adv_virtualization_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 pg_track_log_max_num[0x5];
+ u8 pg_track_max_num_range[0x8];
+ u8 pg_track_log_min_addr_space[0x8];
+ u8 pg_track_log_max_addr_space[0x8];
+
+ u8 reserved_at_20[0x3];
+ u8 pg_track_log_min_msg_size[0x5];
+ u8 reserved_at_28[0x3];
+ u8 pg_track_log_max_msg_size[0x5];
+ u8 reserved_at_30[0x3];
+ u8 pg_track_log_min_page_size[0x5];
+ u8 reserved_at_38[0x3];
+ u8 pg_track_log_max_page_size[0x5];
+
+ u8 reserved_at_40[0x7c0];
+};
+
+struct mlx5_ifc_page_track_report_entry_bits {
+ u8 dirty_address_high[0x20];
+
+ u8 dirty_address_low[0x20];
+};
+
+enum {
+ MLX5_PAGE_TRACK_STATE_TRACKING,
+ MLX5_PAGE_TRACK_STATE_REPORTING,
+ MLX5_PAGE_TRACK_STATE_ERROR,
+};
+
+struct mlx5_ifc_page_track_range_bits {
+ u8 start_address[0x40];
+
+ u8 length[0x40];
+};
+
+struct mlx5_ifc_page_track_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 state[0x4];
+ u8 track_type[0x4];
+ u8 log_addr_space_size[0x8];
+ u8 reserved_at_90[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_at_98[0x3];
+ u8 log_msg_size[0x5];
+
+ u8 reserved_at_a0[0x8];
+ u8 reporting_qpn[0x18];
+
+ u8 reserved_at_c0[0x18];
+ u8 num_ranges[0x8];
+
+ u8 reserved_at_e0[0x20];
+
+ u8 range_start_address[0x40];
+
+ u8 length[0x40];
+
+ struct mlx5_ifc_page_track_range_bits track_range[0];
+};
+
+struct mlx5_ifc_create_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_modify_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 37e065a80a43..0596472923ad 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -32,31 +32,6 @@
#ifndef MLX5_IFC_FPGA_H
#define MLX5_IFC_FPGA_H
-struct mlx5_ifc_ipv4_layout_bits {
- u8 reserved_at_0[0x60];
-
- u8 ipv4[0x20];
-};
-
-struct mlx5_ifc_ipv6_layout_bits {
- u8 ipv6[16][0x8];
-};
-
-union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
- struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
- struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
- u8 reserved_at_0[0x80];
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9,
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3,
-};
-
struct mlx5_ifc_fpga_shell_caps_bits {
u8 max_num_qps[0x10];
u8 reserved_at_10[0x8];
@@ -387,89 +362,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_tls_extended_cap_bits {
- u8 aes_gcm_128[0x1];
- u8 aes_gcm_256[0x1];
- u8 reserved_at_2[0x1e];
- u8 reserved_at_20[0x20];
- u8 context_capacity_total[0x20];
- u8 context_capacity_rx[0x20];
- u8 context_capacity_tx[0x20];
- u8 reserved_at_a0[0x10];
- u8 tls_counter_size[0x10];
- u8 tls_counters_addr_low[0x20];
- u8 tls_counters_addr_high[0x20];
- u8 rx[0x1];
- u8 tx[0x1];
- u8 tls_v12[0x1];
- u8 tls_v13[0x1];
- u8 lro[0x1];
- u8 ipv6[0x1];
- u8 reserved_at_106[0x1a];
-};
-
-struct mlx5_ifc_ipsec_extended_cap_bits {
- u8 encapsulation[0x20];
-
- u8 reserved_0[0x12];
- u8 v2_command[0x1];
- u8 udp_encap[0x1];
- u8 rx_no_trailer[0x1];
- u8 ipv4_fragment[0x1];
- u8 ipv6[0x1];
- u8 esn[0x1];
- u8 lso[0x1];
- u8 transport_and_tunnel_mode[0x1];
- u8 tunnel_mode[0x1];
- u8 transport_mode[0x1];
- u8 ah_esp[0x1];
- u8 esp[0x1];
- u8 ah[0x1];
- u8 ipv4_options[0x1];
-
- u8 auth_alg[0x20];
-
- u8 enc_alg[0x20];
-
- u8 sa_cap[0x20];
-
- u8 reserved_1[0x10];
- u8 number_of_ipsec_counters[0x10];
-
- u8 ipsec_counters_addr_low[0x20];
- u8 ipsec_counters_addr_high[0x20];
-};
-
-struct mlx5_ifc_ipsec_counters_bits {
- u8 dec_in_packets[0x40];
-
- u8 dec_out_packets[0x40];
-
- u8 dec_bypass_packets[0x40];
-
- u8 enc_in_packets[0x40];
-
- u8 enc_out_packets[0x40];
-
- u8 enc_bypass_packets[0x40];
-
- u8 drop_dec_packets[0x40];
-
- u8 failed_auth_dec_packets[0x40];
-
- u8 drop_enc_packets[0x40];
-
- u8 success_add_sa[0x40];
-
- u8 fail_add_sa[0x40];
-
- u8 success_delete_sa[0x40];
-
- u8 fail_delete_sa[0x40];
-
- u8 dropped_cmd[0x40];
-};
-
enum {
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1,
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2,
@@ -486,131 +378,4 @@ struct mlx5_ifc_fpga_qp_error_event_bits {
u8 reserved_at_c0[0x8];
u8 fpga_qpn[0x18];
};
-enum mlx5_ifc_fpga_ipsec_response_syndrome {
- MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
- MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
- MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2,
- MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
-};
-
-struct mlx5_ifc_fpga_ipsec_cmd_resp {
- __be32 syndrome;
- union {
- __be32 sw_sa_handle;
- __be32 flags;
- };
- u8 reserved[24];
-} __packed;
-
-enum mlx5_ifc_fpga_ipsec_cmd_opcode {
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
- MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
- MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
-};
-
-enum mlx5_ifc_fpga_ipsec_cap {
- MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
-};
-
-struct mlx5_ifc_fpga_ipsec_cmd_cap {
- __be32 cmd;
- __be32 flags;
- u8 reserved[24];
-} __packed;
-
-enum mlx5_ifc_fpga_ipsec_sa_flags {
- MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
- MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
- MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
- MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
- MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
- MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
- MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
-};
-
-enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
- MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
-};
-
-struct mlx5_ifc_fpga_ipsec_sa_v1 {
- __be32 cmd;
- u8 key_enc[32];
- u8 key_auth[32];
- __be32 sip[4];
- __be32 dip[4];
- union {
- struct {
- __be32 reserved;
- u8 salt_iv[8];
- __be32 salt;
- } __packed gcm;
- struct {
- u8 salt[16];
- } __packed cbc;
- };
- __be32 spi;
- __be32 sw_sa_handle;
- __be16 tfclen;
- u8 enc_mode;
- u8 reserved1[2];
- u8 flags;
- u8 reserved2[2];
-};
-
-struct mlx5_ifc_fpga_ipsec_sa {
- struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
- __be16 udp_sp;
- __be16 udp_dp;
- u8 reserved1[4];
- __be32 esn;
- __be16 vid; /* only 12 bits, rest is reserved */
- __be16 reserved2;
-} __packed;
-
-enum fpga_tls_cmds {
- CMD_SETUP_STREAM = 0x1001,
- CMD_TEARDOWN_STREAM = 0x1002,
- CMD_RESYNC_RX = 0x1003,
-};
-
-#define MLX5_TLS_1_2 (0)
-
-#define MLX5_TLS_ALG_AES_GCM_128 (0)
-#define MLX5_TLS_ALG_AES_GCM_256 (1)
-
-struct mlx5_ifc_tls_cmd_bits {
- u8 command_type[0x20];
- u8 ipv6[0x1];
- u8 direction_sx[0x1];
- u8 tls_version[0x2];
- u8 reserved[0x1c];
- u8 swid[0x20];
- u8 src_port[0x10];
- u8 dst_port[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
- u8 tls_rcd_sn[0x40];
- u8 tcp_sn[0x20];
- u8 tls_implicit_iv[0x20];
- u8 tls_xor_iv[0x40];
- u8 encryption_key[0x100];
- u8 alg[4];
- u8 reserved2[0x1c];
- u8 reserved3[0x4a0];
-};
-
-struct mlx5_ifc_tls_resp_bits {
- u8 syndrome[0x20];
- u8 stream_id[0x20];
- u8 reserverd[0x40];
-};
-
-#define MLX5_TLS_COMMAND_SIZE (0x100)
-
#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
new file mode 100644
index 000000000000..9becdc3fa503
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_IFC_VDPA_H_
+#define __MLX5_IFC_VDPA_H_
+
+enum {
+ MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0,
+ MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1,
+ MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
+};
+
+struct mlx5_ifc_virtio_q_bits {
+ u8 virtio_q_type[0x8];
+ u8 reserved_at_8[0x5];
+ u8 event_mode[0x3];
+ u8 queue_index[0x10];
+
+ u8 full_emulation[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 reserved_at_22[0x2];
+ u8 offload_type[0x4];
+ u8 event_qpn_or_msix[0x18];
+
+ u8 doorbell_stride_index[0x10];
+ u8 queue_size[0x10];
+
+ u8 device_emulation_id[0x20];
+
+ u8 desc_addr[0x40];
+
+ u8 used_addr[0x40];
+
+ u8 available_addr[0x40];
+
+ u8 virtio_q_mkey[0x20];
+
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_170[0x8];
+ u8 error_type[0x8];
+
+ u8 umem_1_id[0x20];
+
+ u8 umem_1_size[0x20];
+
+ u8 umem_1_offset[0x40];
+
+ u8 umem_2_id[0x20];
+
+ u8 umem_2_size[0x20];
+
+ u8 umem_2_offset[0x40];
+
+ u8 umem_3_id[0x20];
+
+ u8 umem_3_size[0x20];
+
+ u8 umem_3_offset[0x40];
+
+ u8 counter_set_id[0x20];
+
+ u8 reserved_at_320[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_340[0xc0];
+};
+
+struct mlx5_ifc_virtio_net_q_object_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x20];
+
+ u8 vhca_id[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 queue_feature_bit_mask_12_3[0xa];
+ u8 dirty_bitmap_dump_enable[0x1];
+ u8 vhost_log_page[0x5];
+ u8 reserved_at_90[0xc];
+ u8 state[0x4];
+
+ u8 reserved_at_a0[0x5];
+ u8 queue_feature_bit_mask_2_0[0x3];
+ u8 tisn_or_qpn[0x18];
+
+ u8 dirty_bitmap_mkey[0x20];
+
+ u8 dirty_bitmap_size[0x20];
+
+ u8 dirty_bitmap_addr[0x40];
+
+ u8 hw_available_index[0x10];
+ u8 hw_used_index[0x10];
+
+ u8 reserved_at_160[0xa0];
+
+ struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+enum {
+ MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+};
+
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
+};
+
+/* This indicates that the object was not created or has already
+ * been desroyed. It is very safe to assume that this object will never
+ * have so many states
+ */
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_NONE = 0xffffffff
+};
+
+enum {
+ MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
+ MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
+};
+
+struct mlx5_ifc_modify_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_modify_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_virtio_q_counters_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 received_desc[0x40];
+ u8 completed_desc[0x40];
+ u8 error_cqes[0x20];
+ u8 bad_desc_errors[0x20];
+ u8 exceed_max_chain[0x20];
+ u8 invalid_buffer[0x20];
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_create_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_create_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits counters;
+};
+
+#endif /* __MLX5_IFC_VDPA_H_ */
diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h
new file mode 100644
index 000000000000..bf700c8d5516
--- /dev/null
+++ b/include/linux/mlx5/mpfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2021 Mellanox Technologies Ltd.
+ */
+
+#ifndef _MLX5_MPFS_
+#define _MLX5_MPFS_
+
+struct mlx5_core_dev;
+
+#ifdef CONFIG_MLX5_MPFS
+int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+#else /* #ifndef CONFIG_MLX5_MPFS */
+static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index de9a272c9f3d..e96ee1e348cb 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -45,6 +45,7 @@ enum mlx5_module_id {
MLX5_MODULE_ID_QSFP = 0xC,
MLX5_MODULE_ID_QSFP_PLUS = 0xD,
MLX5_MODULE_ID_QSFP28 = 0x11,
+ MLX5_MODULE_ID_DSFP = 0x1B,
};
enum mlx5_an_status {
@@ -55,13 +56,20 @@ enum mlx5_an_status {
MLX5_AN_LINK_DOWN = 4,
};
-#define MLX5_EEPROM_MAX_BYTES 32
-#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
#define MLX5_I2C_ADDR_LOW 0x50
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128
+struct mlx5_module_eeprom_query_params {
+ u16 size;
+ u16 offset;
+ u16 i2c_address;
+ u32 page;
+ u32 bank;
+ u32 module_number;
+};
+
enum mlx5e_link_mode {
MLX5E_1000BASE_CX_SGMII = 0,
MLX5E_1000BASE_KX = 1,
@@ -104,8 +112,11 @@ enum mlx5e_ext_link_mode {
MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8,
MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9,
MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10,
+ MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
+ MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
MLX5E_400GAUI_8 = 15,
+ MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
MLX5E_EXT_LINK_MODES_NUMBER,
};
@@ -122,7 +133,15 @@ enum mlx5e_connector_type {
MLX5E_CONNECTOR_TYPE_NUMBER,
};
-#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+enum mlx5_ptys_width {
+ MLX5_PTYS_WIDTH_1X = 1 << 0,
+ MLX5_PTYS_WIDTH_2X = 1 << 1,
+ MLX5_PTYS_WIDTH_4X = 1 << 2,
+ MLX5_PTYS_WIDTH_8X = 1 << 3,
+ MLX5_PTYS_WIDTH_12X = 1 << 4,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
(ext ? MLX5_GET(reg, out, ext_##field) : \
MLX5_GET(reg, out, field))
@@ -130,10 +149,9 @@ enum mlx5e_connector_type {
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
- u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, u8 local_port);
+
+int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
+ u16 *proto_oper, u8 local_port);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
@@ -190,6 +208,8 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
bool *enabled);
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data);
+int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params, u8 *data);
int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index ae63b1ae9004..4657d5c54abe 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -66,6 +66,7 @@ enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
+ MLX5_QP_OPTPAR_LAG_TX_AFF = 1 << 15,
MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
MLX5_QP_OPTPAR_SRQN = 1 << 18,
MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
@@ -161,6 +162,8 @@ enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16,
};
+#define MLX5_SEND_WQE_MAX_SIZE (MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQE_BB)
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -201,6 +204,9 @@ struct mlx5_wqe_fmr_seg {
struct mlx5_wqe_ctrl_seg {
__be32 opmod_idx_opcode;
__be32 qpn_ds;
+
+ struct_group(trailer,
+
u8 signature;
u8 rsvd[2];
u8 fm_ce_se;
@@ -208,8 +214,10 @@ struct mlx5_wqe_ctrl_seg {
__be32 general_id;
__be32 imm;
__be32 umr_mkey;
- __be32 tisn;
+ __be32 tis_tir_num;
};
+
+ ); /* end of trailer group */
};
#define MLX5_WQE_CTRL_DS_MASK 0x3f
@@ -229,6 +237,11 @@ enum {
enum {
MLX5_ETH_WQE_SVLAN = 1 << 0,
+ MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26,
+ MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27,
+ MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26,
+ MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28,
+ MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30,
MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
};
@@ -239,6 +252,11 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
};
+enum {
+ MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
+ MLX5_ETH_WQE_FT_META_MACSEC = BIT(1),
+};
+
struct mlx5_wqe_eth_seg {
u8 swp_outer_l4_offset;
u8 swp_outer_l3_offset;
@@ -247,7 +265,7 @@ struct mlx5_wqe_eth_seg {
u8 cs_flags;
u8 swp_flags;
__be16 mss;
- __be32 rsvd2;
+ __be32 flow_table_metadata;
union {
struct {
__be16 sz;
@@ -257,6 +275,7 @@ struct mlx5_wqe_eth_seg {
__be16 type;
__be16 vlan_tci;
} insert;
+ __be32 trailer;
};
};
@@ -315,6 +334,7 @@ struct mlx5_av {
struct mlx5_ib_ah {
struct ib_ah ibah;
struct mlx5_av av;
+ u8 xmit_port;
};
static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
@@ -402,7 +422,7 @@ struct mlx5_wqe_signature_seg {
struct mlx5_wqe_inline_seg {
__be32 byte_count;
- __be32 data[0];
+ __be32 data[];
};
enum mlx5_sig_type {
@@ -458,6 +478,12 @@ struct mlx5_klm {
__be64 va;
};
+struct mlx5_ksm {
+ __be32 reserved;
+ __be32 key;
+ __be64 va;
+};
+
struct mlx5_stride_block_entry {
__be16 stride;
__be16 bcount;
@@ -487,123 +513,8 @@ struct mlx5_core_dct {
struct completion drained;
};
-struct mlx5_qp_path {
- u8 fl_free_ar;
- u8 rsvd3;
- __be16 pkey_index;
- u8 rsvd0;
- u8 grh_mlid;
- __be16 rlid;
- u8 ackto_lt;
- u8 mgid_index;
- u8 static_rate;
- u8 hop_limit;
- __be32 tclass_flowlabel;
- union {
- u8 rgid[16];
- u8 rip[16];
- };
- u8 f_dscp_ecn_prio;
- u8 ecn_dscp;
- __be16 udp_sport;
- u8 dci_cfi_prio_sl;
- u8 port;
- u8 rmac[6];
-};
-
-/* FIXME: use mlx5_ifc.h qpc */
-struct mlx5_qp_context {
- __be32 flags;
- __be32 flags_pd;
- u8 mtu_msgmax;
- u8 rq_size_stride;
- __be16 sq_crq_size;
- __be32 qp_counter_set_usr_page;
- __be32 wire_qpn;
- __be32 log_pg_sz_remote_qpn;
- struct mlx5_qp_path pri_path;
- struct mlx5_qp_path alt_path;
- __be32 params1;
- u8 reserved2[4];
- __be32 next_send_psn;
- __be32 cqn_send;
- __be32 deth_sqpn;
- u8 reserved3[4];
- __be32 last_acked_psn;
- __be32 ssn;
- __be32 params2;
- __be32 rnr_nextrecvpsn;
- __be32 xrcd;
- __be32 cqn_recv;
- __be64 db_rec_addr;
- __be32 qkey;
- __be32 rq_type_srqn;
- __be32 rmsn;
- __be16 hw_sq_wqe_counter;
- __be16 sw_sq_wqe_counter;
- __be16 hw_rcyclic_byte_counter;
- __be16 hw_rq_counter;
- __be16 sw_rcyclic_byte_counter;
- __be16 sw_rq_counter;
- u8 rsvd0[5];
- u8 cgs;
- u8 cs_req;
- u8 cs_res;
- __be64 dc_access_key;
- u8 rsvd1[24];
-};
-
-static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
-{
- return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
-}
-
-int mlx5_core_create_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *qp,
- u32 *in, int inlen,
- u32 *out, int outlen);
-int mlx5_core_create_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- u32 *in,
- int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
- u32 opt_param_mask, void *qpc,
- struct mlx5_core_qp *qp);
-int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp);
-int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *dct);
-int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- u32 *out, int outlen);
-int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
- u32 *out, int outlen);
-
-int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
- u32 timeout_usec);
-
-int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
-int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
-void mlx5_init_qp_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
-int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
- struct mlx5_core_qp *rq);
-void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *rq);
-int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
- struct mlx5_core_qp *sq);
-void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *sq);
-int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
-int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
-int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
- int reset, void *out, int out_size);
-
-struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
- int res_num,
- enum mlx5_res_type res_type);
-void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
static inline const char *mlx5_qp_type_str(int type)
{
@@ -650,4 +561,11 @@ static inline const char *mlx5_qp_state_str(int state)
}
}
+static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
+{
+ return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_TIMESTAMP_FORMAT_DEFAULT;
+}
+
#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h
new file mode 100644
index 000000000000..d11c0b228620
--- /dev/null
+++ b/include/linux/mlx5/rsc_dump.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies inc. */
+
+#include <linux/mlx5/driver.h>
+
+#ifndef __MLX5_RSC_DUMP
+#define __MLX5_RSC_DUMP
+
+enum mlx5_sgmt_type {
+ MLX5_SGMT_TYPE_HW_CQPC,
+ MLX5_SGMT_TYPE_HW_SQPC,
+ MLX5_SGMT_TYPE_HW_RQPC,
+ MLX5_SGMT_TYPE_FULL_SRQC,
+ MLX5_SGMT_TYPE_FULL_CQC,
+ MLX5_SGMT_TYPE_FULL_EQC,
+ MLX5_SGMT_TYPE_FULL_QPC,
+ MLX5_SGMT_TYPE_SND_BUFF,
+ MLX5_SGMT_TYPE_RCV_BUFF,
+ MLX5_SGMT_TYPE_SRQ_BUFF,
+ MLX5_SGMT_TYPE_CQ_BUFF,
+ MLX5_SGMT_TYPE_EQ_BUFF,
+ MLX5_SGMT_TYPE_SX_SLICE,
+ MLX5_SGMT_TYPE_SX_SLICE_ALL,
+ MLX5_SGMT_TYPE_RDB,
+ MLX5_SGMT_TYPE_RX_SLICE_ALL,
+ MLX5_SGMT_TYPE_PRM_QUERY_QP,
+ MLX5_SGMT_TYPE_PRM_QUERY_CQ,
+ MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
+ MLX5_SGMT_TYPE_MENU,
+ MLX5_SGMT_TYPE_TERMINATE,
+
+ MLX5_SGMT_TYPE_NUM, /* Keep last */
+};
+
+struct mlx5_rsc_key {
+ enum mlx5_sgmt_type rsc;
+ int index1;
+ int index2;
+ int num_of_obj1;
+ int num_of_obj2;
+ int size;
+};
+
+struct mlx5_rsc_dump_cmd;
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key);
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd);
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size);
+#endif /* __MLX5_RSC_DUMP */
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index dc6b1e7cb8c4..60ffeb6b67ae 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -39,27 +39,20 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqn);
-int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *sqn);
-int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state);
-int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tirn);
-int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
- u32 *in, int inlen,
- u32 *out, int outlen);
-int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
- int inlen);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
-int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tisn);
-int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
- int inlen);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn);
+int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn);
@@ -92,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_hairpin_params *params);
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 16060fb9b5e5..aad53cb72f17 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -36,14 +36,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
-#define MLX5_VPORT_PF_PLACEHOLDER (1u)
-#define MLX5_VPORT_UPLINK_PLACEHOLDER (1u)
-#define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev))
-
-#define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \
- MLX5_VPORT_UPLINK_PLACEHOLDER + \
- MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
-
#define MLX5_VPORT_MANAGER(mdev) \
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
@@ -75,7 +67,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
- u16 vport, u8 *addr);
+ u16 vport, const u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
@@ -127,8 +119,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
u8 other_vport, u64 *rx_discard_vport_down,
u64 *tx_discard_vport_down);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
- int vf, u8 port_num, void *out,
- size_t out_sz);
+ int vf, u8 port_num, void *out);
int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
int vf,
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h
deleted file mode 100644
index 9c4bedc95504..000000000000
--- a/include/linux/mm-arch-hooks.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Generic mm no-op hooks.
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- */
-#ifndef _LINUX_MM_ARCH_HOOKS_H
-#define _LINUX_MM_ARCH_HOOKS_H
-
-#include <asm/mm-arch-hooks.h>
-
-#ifndef arch_remap
-static inline void arch_remap(struct mm_struct *mm,
- unsigned long old_start, unsigned long old_end,
- unsigned long new_start, unsigned long new_end)
-{
-}
-#define arch_remap arch_remap
-#endif
-
-#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c54fb96cb1e6..8bbcccbc5565 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3,9 +3,6 @@
#define _LINUX_MM_H
#include <linux/errno.h>
-
-#ifdef __KERNEL__
-
#include <linux/mmdebug.h>
#include <linux/gfp.h>
#include <linux/bug.h>
@@ -15,6 +12,7 @@
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/percpu-refcount.h>
@@ -23,22 +21,26 @@
#include <linux/resource.h>
#include <linux/page_ext.h>
#include <linux/err.h>
+#include <linux/page-flags.h>
#include <linux/page_ref.h>
-#include <linux/memremap.h>
#include <linux/overflow.h>
#include <linux/sizes.h>
+#include <linux/sched.h>
+#include <linux/pgtable.h>
+#include <linux/kasan.h>
+#include <linux/memremap.h>
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
-struct file_ra_state;
struct user_struct;
-struct writeback_control;
-struct bdi_writeback;
+struct pt_regs;
+
+extern int sysctl_page_lock_unfairness;
void init_mm_internals(void);
-#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
+#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
static inline void set_max_mapnr(unsigned long limit)
@@ -91,7 +93,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
/*
@@ -99,7 +100,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
* embedding these tags into addresses that point to these memory regions, and
* checking that the memory and the pointer tags match on memory accesses)
* redefine this macro to strip tags from pointers.
- * It's defined as noop for arcitectures that don't support memory tagging.
+ * It's defined as noop for architectures that don't support memory tagging.
*/
#ifndef untagged_addr
#define untagged_addr(addr) (addr)
@@ -138,7 +139,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
/* This function must be updated when the size of struct page grows above 80
* or reduces below 56. The idea that compiler optimizes out switch()
* statement, and only leaves move/store instructions. Also the compiler can
- * combine write statments if they are both assignments and can be reordered,
+ * combine write statements if they are both assignments and can be reordered,
* this can result in several of the writes here being dropped.
*/
#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
@@ -153,11 +154,14 @@ static inline void __mm_zero_struct_page(struct page *page)
switch (sizeof(struct page)) {
case 80:
- _pp[9] = 0; /* fallthrough */
+ _pp[9] = 0;
+ fallthrough;
case 72:
- _pp[8] = 0; /* fallthrough */
+ _pp[8] = 0;
+ fallthrough;
case 64:
- _pp[7] = 0; /* fallthrough */
+ _pp[7] = 0;
+ fallthrough;
case 56:
_pp[6] = 0;
_pp[5] = 0;
@@ -200,20 +204,38 @@ extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;
-extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
- size_t *, loff_t *);
-extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
- size_t *, loff_t *);
+int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+#define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio))
+#else
+#define nth_page(page,n) ((page) + (n))
+#define folio_page_idx(folio, p) ((p) - &(folio)->page)
+#endif
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+/* to align the pointer to the (prev) page boundary */
+#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
+
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+static inline struct folio *lru_to_folio(struct list_head *head)
+{
+ return list_entry((head)->prev, struct folio, lru);
+}
+
+void setup_initial_init_mm(void *start_code, void *end_code,
+ void *end_data, void *brk);
/*
* Linux kernel virtual memory manager primitives.
@@ -255,7 +277,6 @@ extern unsigned int kobjsize(const void *objp);
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
-#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
#define VM_LOCKED 0x00002000
@@ -324,24 +345,49 @@ extern unsigned int kobjsize(const void *objp);
#elif defined(CONFIG_SPARC64)
# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
# define VM_ARCH_CLEAR VM_SPARC_ADI
+#elif defined(CONFIG_ARM64)
+# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */
+# define VM_ARCH_CLEAR VM_ARM64_BTI
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
-#if defined(CONFIG_X86_INTEL_MPX)
-/* MPX specific bounds table or bounds directory */
-# define VM_MPX VM_HIGH_ARCH_4
+#if defined(CONFIG_ARM64_MTE)
+# define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */
+# define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */
#else
-# define VM_MPX VM_NONE
+# define VM_MTE VM_NONE
+# define VM_MTE_ALLOWED VM_NONE
#endif
#ifndef VM_GROWSUP
# define VM_GROWSUP VM_NONE
#endif
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
+# define VM_UFFD_MINOR_BIT 37
+# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
+#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+# define VM_UFFD_MINOR VM_NONE
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
+#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
+
+/* Common data flag combinations */
+#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
+ VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
+#endif
+
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
@@ -354,12 +400,18 @@ extern unsigned int kobjsize(const void *objp);
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+/* VMA basic access permission flags */
+#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
+
+
/*
* Special vmas that are non-mergable, non-mlock()able.
- * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+/* This mask prevents VMA from being scanned with khugepaged */
+#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
+
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
@@ -376,17 +428,33 @@ extern unsigned int kobjsize(const void *objp);
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
-extern pgprot_t protection_map[16];
-#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
-#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
-#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
-#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
-#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
-#define FAULT_FLAG_TRIED 0x20 /* Second try */
-#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
-#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
-#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
+/*
+ * The default fault flags that should be used by most of the
+ * arch-specific page fault handlers.
+ */
+#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \
+ FAULT_FLAG_KILLABLE | \
+ FAULT_FLAG_INTERRUPTIBLE)
+
+/**
+ * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
+ * @flags: Fault flags.
+ *
+ * This is mostly used for places where we want to try to avoid taking
+ * the mmap_lock for too long a time when waiting for another condition
+ * to change, in which case we can try to be polite to release the
+ * mmap_lock in the first round to avoid potential starvation of other
+ * processes that would also want the mmap_lock.
+ *
+ * Return: true if the page fault allows retry and this is the first
+ * attempt of the fault handling; false otherwise.
+ */
+static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
+{
+ return (flags & FAULT_FLAG_ALLOW_RETRY) &&
+ (!(flags & FAULT_FLAG_TRIED));
+}
#define FAULT_FLAG_TRACE \
{ FAULT_FLAG_WRITE, "WRITE" }, \
@@ -397,10 +465,11 @@ extern pgprot_t protection_map[16];
{ FAULT_FLAG_TRIED, "TRIED" }, \
{ FAULT_FLAG_USER, "USER" }, \
{ FAULT_FLAG_REMOTE, "REMOTE" }, \
- { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
+ { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
+ { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }
/*
- * vm_fault is filled by the the pagefault handler and passed to the vma's
+ * vm_fault is filled by the pagefault handler and passed to the vma's
* ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled.
*
@@ -410,20 +479,28 @@ extern pgprot_t protection_map[16];
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
- struct vm_area_struct *vma; /* Target VMA */
- unsigned int flags; /* FAULT_FLAG_xxx flags */
- gfp_t gfp_mask; /* gfp mask to be used for allocations */
- pgoff_t pgoff; /* Logical page offset based on vma */
- unsigned long address; /* Faulting virtual address */
+ const struct {
+ struct vm_area_struct *vma; /* Target VMA */
+ gfp_t gfp_mask; /* gfp mask to be used for allocations */
+ pgoff_t pgoff; /* Logical page offset based on vma */
+ unsigned long address; /* Faulting virtual address - masked */
+ unsigned long real_address; /* Faulting virtual address - unmasked */
+ };
+ enum fault_flag flags; /* FAULT_FLAG_xxx flags
+ * XXX: should really be 'const' */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' */
pud_t *pud; /* Pointer to pud entry matching
* the 'address'
*/
- pte_t orig_pte; /* Value of PTE at the time of fault */
+ union {
+ pte_t orig_pte; /* Value of PTE at the time of fault */
+ pmd_t orig_pmd; /* Value of PMD at the time of fault,
+ * used by PMD fault only.
+ */
+ };
struct page *cow_page; /* Page handler may use for COW fault */
- struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
@@ -439,8 +516,8 @@ struct vm_fault {
* is not NULL, otherwise pmd.
*/
pgtable_t prealloc_pte; /* Pre-allocated pte page table.
- * vm_ops->map_pages() calls
- * alloc_set_pte() from atomic context.
+ * vm_ops->map_pages() sets up a page
+ * table from atomic context.
* do_fault_around() pre-allocates
* page table to avoid allocation from
* atomic context.
@@ -461,13 +538,25 @@ enum page_entry_size {
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
+ /**
+ * @close: Called when the VMA is being removed from the MM.
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
void (*close)(struct vm_area_struct * area);
- int (*split)(struct vm_area_struct * area, unsigned long addr);
- int (*mremap)(struct vm_area_struct * area);
+ /* Called any time before splitting to check if it's allowed */
+ int (*may_split)(struct vm_area_struct *area, unsigned long addr);
+ int (*mremap)(struct vm_area_struct *area);
+ /*
+ * Called by mprotect() to make driver-specific permission
+ * checks before mprotect() is finalised. The VMA must not
+ * be modified. Returns 0 if eprotect() can proceed.
+ */
+ int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf,
enum page_entry_size pe_size);
- void (*map_pages)(struct vm_fault *vmf,
+ vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
@@ -479,7 +568,8 @@ struct vm_operations_struct {
vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
- * for use by special VMAs that can switch between memory and hardware
+ * for use by special VMAs. See also generic_access_phys() for a generic
+ * implementation useful for any iomem mapping.
*/
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
@@ -504,7 +594,7 @@ struct vm_operations_struct {
* (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
* in mm/mempolicy.c will do this automatically.
* get_policy() must NOT add a ref if the policy at (vma,addr) is not
- * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
* If no [shared/vma] mempolicy exists at the addr, get_policy() op
* must return NULL--i.e., do not "fallback" to task or system default
* policy.
@@ -541,6 +631,68 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
return !vma->vm_ops;
}
+static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
+{
+ int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
+
+ if (!maybe_stack)
+ return false;
+
+ if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
+ VM_STACK_INCOMPLETE_SETUP)
+ return true;
+
+ return false;
+}
+
+static inline bool vma_is_foreign(struct vm_area_struct *vma)
+{
+ if (!current->mm)
+ return true;
+
+ if (current->mm != vma->vm_mm)
+ return true;
+
+ return false;
+}
+
+static inline bool vma_is_accessible(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_ACCESS_FLAGS;
+}
+
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_find(&vmi->mas, max);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
+{
+ /*
+ * Uses vma_find() to get the first VMA when the iterator starts.
+ * Calling mas_next() could skip the first entry.
+ */
+ return vma_find(vmi, ULONG_MAX);
+}
+
+static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
+{
+ return mas_prev(&vmi->mas, 0);
+}
+
+static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
+{
+ return vmi->mas.index;
+}
+
+#define for_each_vma(__vmi, __vma) \
+ while (((__vma) = vma_next(&(__vmi))) != NULL)
+
+/* The MM code likes to work with exclusive end addresses */
+#define for_each_vma_range(__vmi, __vma, __end) \
+ while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
+
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
@@ -559,11 +711,29 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
-/*
- * FIXME: take this include out, include page-flags.h in
- * files which need it (119 of them)
+static inline unsigned int compound_order(struct page *page)
+{
+ if (!PageHead(page))
+ return 0;
+ return page[1].compound_order;
+}
+
+/**
+ * folio_order - The allocation order of a folio.
+ * @folio: The folio.
+ *
+ * A folio is composed of 2^order pages. See get_order() for the definition
+ * of order.
+ *
+ * Return: The order of the folio.
*/
-#include <linux/page-flags.h>
+static inline unsigned int folio_order(struct folio *folio)
+{
+ if (!folio_test_large(folio))
+ return 0;
+ return folio->_folio_order;
+}
+
#include <linux/huge_mm.h>
/*
@@ -588,13 +758,18 @@ static inline int put_page_testzero(struct page *page)
return page_ref_dec_and_test(page);
}
+static inline int folio_put_testzero(struct folio *folio)
+{
+ return put_page_testzero(&folio->page);
+}
+
/*
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
* This can be called when MMU is off so it must not access
* any of the virtual mappings.
*/
-static inline int get_page_unless_zero(struct page *page)
+static inline bool get_page_unless_zero(struct page *page)
{
return page_ref_add_unless(page, 1, 0);
}
@@ -639,42 +814,26 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
-static inline void *kvmalloc(size_t size, gfp_t flags)
-{
- return kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
-{
- return kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-static inline void *kvzalloc(size_t size, gfp_t flags)
-{
- return kvmalloc(size, flags | __GFP_ZERO);
-}
-
-static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
-{
- size_t bytes;
-
- if (unlikely(check_mul_overflow(n, size, &bytes)))
- return NULL;
-
- return kvmalloc(bytes, flags);
-}
-
-static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
+/*
+ * How many times the entire folio is mapped as a single unit (eg by a
+ * PMD or PUD entry). This is probably not what you want, except for
+ * debugging purposes; look at folio_mapcount() or page_mapcount()
+ * instead.
+ */
+static inline int folio_entire_mapcount(struct folio *folio)
{
- return kvmalloc_array(n, size, flags | __GFP_ZERO);
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ return atomic_read(folio_mapcount_ptr(folio)) + 1;
}
-extern void kvfree(const void *addr);
-
+/*
+ * Mapcount of compound page as a whole, does not include mapped sub-pages.
+ *
+ * Must be called only for compound pages.
+ */
static inline int compound_mapcount(struct page *page)
{
- VM_BUG_ON_PAGE(!PageCompound(page), page);
- page = compound_head(page);
- return atomic_read(compound_mapcount_ptr(page)) + 1;
+ return folio_entire_mapcount(page_folio(page));
}
/*
@@ -689,30 +848,33 @@ static inline void page_mapcount_reset(struct page *page)
int __page_mapcount(struct page *page);
+/*
+ * Mapcount of 0-order page; when compound sub-page, includes
+ * compound_mapcount().
+ *
+ * Result is undefined for pages which cannot be mapped into userspace.
+ * For example SLAB or special types of pages. See function page_has_type().
+ * They use this place in struct page differently.
+ */
static inline int page_mapcount(struct page *page)
{
- VM_BUG_ON_PAGE(PageSlab(page), page);
-
if (unlikely(PageCompound(page)))
return __page_mapcount(page);
return atomic_read(&page->_mapcount) + 1;
}
+int folio_mapcount(struct folio *folio);
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-int total_mapcount(struct page *page);
-int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
-#else
static inline int total_mapcount(struct page *page)
{
- return page_mapcount(page);
+ return folio_mapcount(page_folio(page));
}
-static inline int page_trans_huge_mapcount(struct page *page,
- int *total_mapcount)
+
+#else
+static inline int total_mapcount(struct page *page)
{
- int mapcount = page_mapcount(page);
- if (total_mapcount)
- *total_mapcount = mapcount;
- return mapcount;
+ return page_mapcount(page);
}
#endif
@@ -723,11 +885,21 @@ static inline struct page *virt_to_head_page(const void *x)
return compound_head(page);
}
-void __put_page(struct page *page);
+static inline struct folio *virt_to_folio(const void *x)
+{
+ struct page *page = virt_to_page(x);
+
+ return page_folio(page);
+}
+
+void __folio_put(struct folio *folio);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
+void folio_copy(struct folio *dst, struct folio *src);
+
+unsigned long nr_free_buffer_pages(void);
/*
* Compound pages have a destructor function. Provide a
@@ -748,7 +920,7 @@ enum compound_dtor_id {
#endif
NR_COMPOUND_DTORS,
};
-extern compound_page_dtor * const compound_page_dtors[];
+extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
static inline void set_compound_page_dtor(struct page *page,
enum compound_dtor_id compound_dtor)
@@ -757,28 +929,31 @@ static inline void set_compound_page_dtor(struct page *page,
page[1].compound_dtor = compound_dtor;
}
-static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
-{
- VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
- return compound_page_dtors[page[1].compound_dtor];
-}
+void destroy_large_folio(struct folio *folio);
-static inline unsigned int compound_order(struct page *page)
+static inline int head_compound_pincount(struct page *head)
{
- if (!PageHead(page))
- return 0;
- return page[1].compound_order;
+ return atomic_read(compound_pincount_ptr(head));
}
static inline void set_compound_order(struct page *page, unsigned int order)
{
page[1].compound_order = order;
+#ifdef CONFIG_64BIT
+ page[1].compound_nr = 1U << order;
+#endif
}
/* Returns the number of pages in this potentially compound page. */
static inline unsigned long compound_nr(struct page *page)
{
+ if (!PageHead(page))
+ return 1;
+#ifdef CONFIG_64BIT
+ return page[1].compound_nr;
+#else
return 1UL << compound_order(page);
+#endif
}
/* Returns the number of bytes in this potentially compound page. */
@@ -793,6 +968,37 @@ static inline unsigned int page_shift(struct page *page)
return PAGE_SHIFT + compound_order(page);
}
+/**
+ * thp_order - Order of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ */
+static inline unsigned int thp_order(struct page *page)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ return compound_order(page);
+}
+
+/**
+ * thp_nr_pages - The number of regular pages in this huge page.
+ * @page: The head page of a huge page.
+ */
+static inline int thp_nr_pages(struct page *page)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ return compound_nr(page);
+}
+
+/**
+ * thp_size - Size of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ *
+ * Return: Number of bytes in this page.
+ */
+static inline unsigned long thp_size(struct page *page)
+{
+ return PAGE_SIZE << thp_order(page);
+}
+
void free_compound_page(struct page *page);
#ifdef CONFIG_MMU
@@ -809,8 +1015,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
- struct page *page);
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
+void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+
vm_fault_t finish_fault(struct vm_fault *vmf);
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
@@ -875,132 +1082,55 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
* back into memory.
*/
-/*
- * The zone field is never updated after free_area_init_core()
- * sets it, so none of the operations on it need to be atomic.
- */
-
-/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
-#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
-#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
-#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
-#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
-#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
-
-/*
- * Define the bit shifts to access each section. For non-existent
- * sections we define the shift as 0; that plus a 0 mask ensures
- * the compiler will optimise away reference to them.
- */
-#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
-#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
-#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
-#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
-#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
-
-/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
-#ifdef NODE_NOT_IN_PAGE_FLAGS
-#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
- SECTIONS_PGOFF : ZONES_PGOFF)
-#else
-#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
- NODES_PGOFF : ZONES_PGOFF)
-#endif
-
-#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
-
-#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
-#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
-#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
-#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
-#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
-#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
-
-static inline enum zone_type page_zonenum(const struct page *page)
-{
- return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
-}
-
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_zone_device_page(const struct page *page)
-{
- return page_zonenum(page) == ZONE_DEVICE;
-}
-extern void memmap_init_zone_device(struct zone *, unsigned long,
- unsigned long, struct dev_pagemap *);
-#else
-static inline bool is_zone_device_page(const struct page *page)
-{
- return false;
-}
-#endif
-
-#ifdef CONFIG_DEV_PAGEMAP_OPS
-void free_devmap_managed_page(struct page *page);
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-static inline bool page_is_devmap_managed(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs);
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
{
if (!static_branch_unlikely(&devmap_managed_key))
return false;
if (!is_zone_device_page(page))
return false;
- switch (page->pgmap->type) {
- case MEMORY_DEVICE_PRIVATE:
- case MEMORY_DEVICE_FS_DAX:
- return true;
- default:
- break;
- }
- return false;
+ return __put_devmap_managed_page_refs(page, refs);
}
-
-void put_devmap_managed_page(struct page *page);
-
-#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool page_is_devmap_managed(struct page *page)
+#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
{
return false;
}
+#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-static inline void put_devmap_managed_page(struct page *page)
+static inline bool put_devmap_managed_page(struct page *page)
{
+ return put_devmap_managed_page_refs(page, 1);
}
-#endif /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool is_device_private_page(const struct page *page)
-{
- return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
- IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
- is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PRIVATE;
-}
+/* 127: arbitrary random number, small enough to assemble well */
+#define folio_ref_zero_or_close_to_overflow(folio) \
+ ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
-static inline bool is_pci_p2pdma_page(const struct page *page)
+/**
+ * folio_get - Increment the reference count on a folio.
+ * @folio: The folio.
+ *
+ * Context: May be called in any context, as long as you know that
+ * you have a refcount on the folio. If you do not already have one,
+ * folio_try_get() may be the right interface for you to use.
+ */
+static inline void folio_get(struct folio *folio)
{
- return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
- IS_ENABLED(CONFIG_PCI_P2PDMA) &&
- is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+ VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
+ folio_ref_inc(folio);
}
-/* 127: arbitrary random number, small enough to assemble well */
-#define page_ref_zero_or_close_to_overflow(page) \
- ((unsigned int) page_ref_count(page) + 127u <= 127u)
-
static inline void get_page(struct page *page)
{
- page = compound_head(page);
- /*
- * Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_refcount.
- */
- VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
- page_ref_inc(page);
+ folio_get(page_folio(page));
}
+bool __must_check try_grab_page(struct page *page, unsigned int flags);
+
static inline __must_check bool try_get_page(struct page *page)
{
page = compound_head(page);
@@ -1010,48 +1140,121 @@ static inline __must_check bool try_get_page(struct page *page)
return true;
}
+/**
+ * folio_put - Decrement the reference count on a folio.
+ * @folio: The folio.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put() unless you can be sure that it wasn't the
+ * last reference.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put(struct folio *folio)
+{
+ if (folio_put_testzero(folio))
+ __folio_put(folio);
+}
+
+/**
+ * folio_put_refs - Reduce the reference count on a folio.
+ * @folio: The folio.
+ * @refs: The amount to subtract from the folio's reference count.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put_refs() unless you can be sure that these weren't
+ * the last references.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put_refs(struct folio *folio, int refs)
+{
+ if (folio_ref_sub_and_test(folio, refs))
+ __folio_put(folio);
+}
+
+void release_pages(struct page **pages, int nr);
+
+/**
+ * folios_put - Decrement the reference count on an array of folios.
+ * @folios: The folios.
+ * @nr: How many folios there are.
+ *
+ * Like folio_put(), but for an array of folios. This is more efficient
+ * than writing the loop yourself as it will optimise the locks which
+ * need to be taken if the folios are freed.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folios_put(struct folio **folios, unsigned int nr)
+{
+ release_pages((struct page **)folios, nr);
+}
+
static inline void put_page(struct page *page)
{
- page = compound_head(page);
+ struct folio *folio = page_folio(page);
/*
- * For devmap managed pages we need to catch refcount transition from
- * 2 to 1, when refcount reach one it means the page is free and we
- * need to inform the device driver through callback. See
- * include/linux/memremap.h and HMM for details.
+ * For some devmap managed pages we need to catch refcount transition
+ * from 2 to 1:
*/
- if (page_is_devmap_managed(page)) {
- put_devmap_managed_page(page);
+ if (put_devmap_managed_page(&folio->page))
return;
- }
-
- if (put_page_testzero(page))
- __put_page(page);
+ folio_put(folio);
}
-/**
- * unpin_user_page() - release a gup-pinned page
- * @page: pointer to page to be released
+/*
+ * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
+ * the page's refcount so that two separate items are tracked: the original page
+ * reference count, and also a new count of how many pin_user_pages() calls were
+ * made against the page. ("gup-pinned" is another term for the latter).
+ *
+ * With this scheme, pin_user_pages() becomes special: such pages are marked as
+ * distinct from normal pages. As such, the unpin_user_page() call (and its
+ * variants) must be used in order to release gup-pinned pages.
+ *
+ * Choice of value:
*
- * Pages that were pinned via pin_user_pages*() must be released via either
- * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
- * that eventually such pages can be separately tracked and uniquely handled. In
- * particular, interactions with RDMA and filesystems need special handling.
+ * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
+ * counts with respect to pin_user_pages() and unpin_user_page() becomes
+ * simpler, due to the fact that adding an even power of two to the page
+ * refcount has the effect of using only the upper N bits, for the code that
+ * counts up using the bias value. This means that the lower bits are left for
+ * the exclusive use of the original code that increments and decrements by one
+ * (or at least, by much smaller values than the bias value).
*
- * unpin_user_page() and put_page() are not interchangeable, despite this early
- * implementation that makes them look the same. unpin_user_page() calls must
- * be perfectly matched up with pin*() calls.
+ * Of course, once the lower bits overflow into the upper bits (and this is
+ * OK, because subtraction recovers the original values), then visual inspection
+ * no longer suffices to directly view the separate counts. However, for normal
+ * applications that don't have huge page reference counts, this won't be an
+ * issue.
+ *
+ * Locking: the lockless algorithm described in folio_try_get_rcu()
+ * provides safe operation for get_user_pages(), page_mkclean() and
+ * other calls that race to set up page table entries.
*/
-static inline void unpin_user_page(struct page *page)
-{
- put_page(page);
-}
+#define GUP_PIN_COUNTING_BIAS (1U << 10)
+void unpin_user_page(struct page *page);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
-
+void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
+ bool make_dirty);
void unpin_user_pages(struct page **pages, unsigned long npages);
+static inline bool is_cow_mapping(vm_flags_t flags)
+{
+ return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+}
+
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif
@@ -1080,7 +1283,24 @@ static inline int page_to_nid(const struct page *page)
}
#endif
+static inline int folio_nid(const struct folio *folio)
+{
+ return page_to_nid(&folio->page);
+}
+
#ifdef CONFIG_NUMA_BALANCING
+/* page access time bits needs to hold at least 4 seconds */
+#define PAGE_ACCESS_TIME_MIN_BITS 12
+#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
+#define PAGE_ACCESS_TIME_BUCKETS \
+ (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
+#else
+#define PAGE_ACCESS_TIME_BUCKETS 0
+#endif
+
+#define PAGE_ACCESS_TIME_MASK \
+ (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
+
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
@@ -1144,12 +1364,25 @@ static inline void page_cpupid_reset_last(struct page *page)
page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+
+static inline int xchg_page_access_time(struct page *page, int time)
+{
+ int last_time;
+
+ last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
+ return last_time << PAGE_ACCESS_TIME_BUCKETS;
+}
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
return page_to_nid(page); /* XXX */
}
+static inline int xchg_page_access_time(struct page *page, int time)
+{
+ return 0;
+}
+
static inline int page_cpupid_last(struct page *page)
{
return page_to_nid(page); /* XXX */
@@ -1177,7 +1410,7 @@ static inline int cpu_pid_to_cpupid(int nid, int pid)
static inline bool cpupid_pid_unset(int cpupid)
{
- return 1;
+ return true;
}
static inline void page_cpupid_reset_last(struct page *page)
@@ -1190,23 +1423,50 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
}
#endif /* CONFIG_NUMA_BALANCING */
-#ifdef CONFIG_KASAN_SW_TAGS
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+
+/*
+ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
+ * setting tags for all pages to native kernel tag value 0xff, as the default
+ * value 0x00 maps to 0xff.
+ */
+
static inline u8 page_kasan_tag(const struct page *page)
{
- return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ u8 tag = 0xff;
+
+ if (kasan_enabled()) {
+ tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ tag ^= 0xff;
+ }
+
+ return tag;
}
static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
- page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
- page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+ unsigned long old_flags, flags;
+
+ if (!kasan_enabled())
+ return;
+
+ tag ^= 0xff;
+ old_flags = READ_ONCE(page->flags);
+ do {
+ flags = old_flags;
+ flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+ flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+ } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
}
static inline void page_kasan_tag_reset(struct page *page)
{
- page_kasan_tag_set(page, 0xff);
+ if (kasan_enabled())
+ page_kasan_tag_set(page, 0xff);
}
-#else
+
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
static inline u8 page_kasan_tag(const struct page *page)
{
return 0xff;
@@ -1214,7 +1474,8 @@ static inline u8 page_kasan_tag(const struct page *page)
static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
static inline void page_kasan_tag_reset(struct page *page) { }
-#endif
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline struct zone *page_zone(const struct page *page)
{
@@ -1226,6 +1487,16 @@ static inline pg_data_t *page_pgdat(const struct page *page)
return NODE_DATA(page_to_nid(page));
}
+static inline struct zone *folio_zone(const struct folio *folio)
+{
+ return page_zone(&folio->page);
+}
+
+static inline pg_data_t *folio_pgdat(const struct folio *folio)
+{
+ return page_pgdat(&folio->page);
+}
+
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
@@ -1239,6 +1510,127 @@ static inline unsigned long page_to_section(const struct page *page)
}
#endif
+/**
+ * folio_pfn - Return the Page Frame Number of a folio.
+ * @folio: The folio.
+ *
+ * A folio may contain multiple pages. The pages have consecutive
+ * Page Frame Numbers.
+ *
+ * Return: The Page Frame Number of the first page in the folio.
+ */
+static inline unsigned long folio_pfn(struct folio *folio)
+{
+ return page_to_pfn(&folio->page);
+}
+
+static inline struct folio *pfn_folio(unsigned long pfn)
+{
+ return page_folio(pfn_to_page(pfn));
+}
+
+static inline atomic_t *folio_pincount_ptr(struct folio *folio)
+{
+ return &folio_page(folio, 1)->compound_pincount;
+}
+
+/**
+ * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
+ * @folio: The folio.
+ *
+ * This function checks if a folio has been pinned via a call to
+ * a function in the pin_user_pages() family.
+ *
+ * For small folios, the return value is partially fuzzy: false is not fuzzy,
+ * because it means "definitely not pinned for DMA", but true means "probably
+ * pinned for DMA, but possibly a false positive due to having at least
+ * GUP_PIN_COUNTING_BIAS worth of normal folio references".
+ *
+ * False positives are OK, because: a) it's unlikely for a folio to
+ * get that many refcounts, and b) all the callers of this routine are
+ * expected to be able to deal gracefully with a false positive.
+ *
+ * For large folios, the result will be exactly correct. That's because
+ * we have more tracking data available: the compound_pincount is used
+ * instead of the GUP_PIN_COUNTING_BIAS scheme.
+ *
+ * For more information, please see Documentation/core-api/pin_user_pages.rst.
+ *
+ * Return: True, if it is likely that the page has been "dma-pinned".
+ * False, if the page is definitely not dma-pinned.
+ */
+static inline bool folio_maybe_dma_pinned(struct folio *folio)
+{
+ if (folio_test_large(folio))
+ return atomic_read(folio_pincount_ptr(folio)) > 0;
+
+ /*
+ * folio_ref_count() is signed. If that refcount overflows, then
+ * folio_ref_count() returns a negative value, and callers will avoid
+ * further incrementing the refcount.
+ *
+ * Here, for that overflow case, use the sign bit to count a little
+ * bit higher via unsigned math, and thus still get an accurate result.
+ */
+ return ((unsigned int)folio_ref_count(folio)) >=
+ GUP_PIN_COUNTING_BIAS;
+}
+
+static inline bool page_maybe_dma_pinned(struct page *page)
+{
+ return folio_maybe_dma_pinned(page_folio(page));
+}
+
+/*
+ * This should most likely only be called during fork() to see whether we
+ * should break the cow immediately for an anon page on the src mm.
+ *
+ * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
+ */
+static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct page *page)
+{
+ VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
+
+ if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
+ return false;
+
+ return page_maybe_dma_pinned(page);
+}
+
+/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
+#ifdef CONFIG_MIGRATION
+static inline bool is_longterm_pinnable_page(struct page *page)
+{
+#ifdef CONFIG_CMA
+ int mt = get_pageblock_migratetype(page);
+
+ if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
+ return false;
+#endif
+ /* The zero page may always be pinned */
+ if (is_zero_pfn(page_to_pfn(page)))
+ return true;
+
+ /* Coherent device memory must always allow eviction. */
+ if (is_device_coherent_page(page))
+ return false;
+
+ /* Otherwise, non-movable zone pages can be pinned. */
+ return !is_zone_movable_page(page);
+}
+#else
+static inline bool is_longterm_pinnable_page(struct page *page)
+{
+ return true;
+}
+#endif
+
+static inline bool folio_is_longterm_pinnable(struct folio *folio)
+{
+ return is_longterm_pinnable_page(&folio->page);
+}
+
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
@@ -1261,25 +1653,92 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#endif
}
-#ifdef CONFIG_MEMCG
-static inline struct mem_cgroup *page_memcg(struct page *page)
+/**
+ * folio_nr_pages - The number of pages in the folio.
+ * @folio: The folio.
+ *
+ * Return: A positive power of two.
+ */
+static inline long folio_nr_pages(struct folio *folio)
{
- return page->mem_cgroup;
+ if (!folio_test_large(folio))
+ return 1;
+#ifdef CONFIG_64BIT
+ return folio->_folio_nr_pages;
+#else
+ return 1L << folio->_folio_order;
+#endif
}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+
+/**
+ * folio_next - Move to the next physical folio.
+ * @folio: The folio we're currently operating on.
+ *
+ * If you have physically contiguous memory which may span more than
+ * one folio (eg a &struct bio_vec), use this function to move from one
+ * folio to the next. Do not use it if the memory is only virtually
+ * contiguous as the folios are almost certainly not adjacent to each
+ * other. This is the folio equivalent to writing ``page++``.
+ *
+ * Context: We assume that the folios are refcounted and/or locked at a
+ * higher level and do not adjust the reference counts.
+ * Return: The next struct folio.
+ */
+static inline struct folio *folio_next(struct folio *folio)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return READ_ONCE(page->mem_cgroup);
+ return (struct folio *)folio_page(folio, folio_nr_pages(folio));
}
-#else
-static inline struct mem_cgroup *page_memcg(struct page *page)
+
+/**
+ * folio_shift - The size of the memory described by this folio.
+ * @folio: The folio.
+ *
+ * A folio represents a number of bytes which is a power-of-two in size.
+ * This function tells you which power-of-two the folio is. See also
+ * folio_size() and folio_order().
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The base-2 logarithm of the size of this folio.
+ */
+static inline unsigned int folio_shift(struct folio *folio)
{
- return NULL;
+ return PAGE_SHIFT + folio_order(folio);
}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+
+/**
+ * folio_size - The number of bytes in a folio.
+ * @folio: The folio.
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The number of bytes in this folio.
+ */
+static inline size_t folio_size(struct folio *folio)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return NULL;
+ return PAGE_SIZE << folio_order(folio);
+}
+
+#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
+static inline int arch_make_page_accessible(struct page *page)
+{
+ return 0;
+}
+#endif
+
+#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
+static inline int arch_make_folio_accessible(struct folio *folio)
+{
+ int ret;
+ long i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ ret = arch_make_page_accessible(folio_page(folio, i));
+ if (ret)
+ break;
+ }
+
+ return ret;
}
#endif
@@ -1321,21 +1780,12 @@ void page_address_init(void);
#define page_address_init() do { } while(0)
#endif
-extern void *page_rmapping(struct page *page);
-extern struct anon_vma *page_anon_vma(struct page *page);
-extern struct address_space *page_mapping(struct page *page);
-
-extern struct address_space *__page_file_mapping(struct page *);
-
-static inline
-struct address_space *page_file_mapping(struct page *page)
+static inline void *folio_address(const struct folio *folio)
{
- if (unlikely(PageSwapCache(page)))
- return __page_file_mapping(page);
-
- return page->mapping;
+ return page_address(&folio->page);
}
+extern void *page_rmapping(struct page *page);
extern pgoff_t __page_file_index(struct page *page);
/*
@@ -1350,21 +1800,21 @@ static inline pgoff_t page_index(struct page *page)
}
bool page_mapped(struct page *page);
-struct address_space *page_mapping(struct page *page);
-struct address_space *page_mapping_file(struct page *page);
+bool folio_mapped(struct folio *folio);
/*
* Return true only if the page has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
-static inline bool page_is_pfmemalloc(struct page *page)
+static inline bool page_is_pfmemalloc(const struct page *page)
{
/*
- * Page index cannot be this large so this must be
- * a pfmemalloc page.
+ * lru.next has bit 1 set if the page is allocated from the
+ * pfmemalloc reserves. Callers may simply overwrite it if
+ * they do not need to preserve that information.
*/
- return page->index == -1UL;
+ return (uintptr_t)page->lru.next & BIT(1);
}
/*
@@ -1373,12 +1823,12 @@ static inline bool page_is_pfmemalloc(struct page *page)
*/
static inline void set_page_pfmemalloc(struct page *page)
{
- page->index = -1UL;
+ page->lru.next = (void *)BIT(1);
}
static inline void clear_page_pfmemalloc(struct page *page)
{
- page->index = 0;
+ page->lru.next = NULL;
}
/*
@@ -1387,6 +1837,8 @@ static inline void clear_page_pfmemalloc(struct page *page)
extern void pagefault_out_of_memory(void);
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
+#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
* Flags passed to show_mem() and show_free_areas() to suppress output in
@@ -1394,24 +1846,19 @@ extern void pagefault_out_of_memory(void);
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
-extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
+extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask)
+{
+ __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
+}
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
static inline bool can_do_mlock(void) { return false; }
#endif
-extern int user_shm_lock(size_t, struct user_struct *);
-extern void user_shm_unlock(size_t, struct user_struct *);
-
-/*
- * Parameter block passed down to zap_pte_range in exceptional cases.
- */
-struct zap_details {
- struct address_space *check_mapping; /* Check page->mapping if set */
- pgoff_t first_index; /* Lowest page->index to unmap */
- pgoff_t last_index; /* Highest page->index to unmap */
-};
+extern int user_shm_lock(size_t, struct ucounts *);
+extern void user_shm_unlock(size_t, struct ucounts *);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
@@ -1422,18 +1869,18 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
- unsigned long start, unsigned long end);
+void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
+ struct vm_area_struct *start_vma, unsigned long start,
+ unsigned long end);
struct mmu_notifier_range;
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
-int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
- struct vm_area_struct *vma);
-int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
- struct mmu_notifier_range *range,
- pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
+int
+copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+int follow_pte(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
@@ -1445,14 +1892,13 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
-int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags);
-extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long address, unsigned int flags,
+ struct pt_regs *regs);
+extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
void unmap_mapping_pages(struct address_space *mapping,
@@ -1461,14 +1907,14 @@ void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
#else
static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
+ unsigned long address, unsigned int flags,
+ struct pt_regs *regs)
{
/* should never happen if there's no MMU */
BUG();
return VM_FAULT_SIGBUS;
}
-static inline int fixup_user_fault(struct task_struct *tsk,
- struct mm_struct *mm, unsigned long address,
+static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
unsigned int fault_flags, bool *unlocked)
{
/* should never happen if there's no MMU */
@@ -1491,14 +1937,14 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
-extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, unsigned int gup_flags);
+extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
+ void *buf, int len, unsigned int gup_flags);
-long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
-long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
@@ -1508,10 +1954,10 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages, int *locked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
+long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
+ struct page **pages, unsigned int gup_flags);
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
@@ -1522,76 +1968,14 @@ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
struct task_struct *task, bool bypass_rlim);
-/* Container for pinned pfns / pages */
-struct frame_vector {
- unsigned int nr_allocated; /* Number of frames we have space for */
- unsigned int nr_frames; /* Number of frames stored in ptrs array */
- bool got_ref; /* Did we pin pages by getting page ref? */
- bool is_pfns; /* Does array contain pages or pfns? */
- void *ptrs[0]; /* Array of pinned pfns / pages. Use
- * pfns_vector_pages() or pfns_vector_pfns()
- * for access */
-};
-
-struct frame_vector *frame_vector_create(unsigned int nr_frames);
-void frame_vector_destroy(struct frame_vector *vec);
-int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
- unsigned int gup_flags, struct frame_vector *vec);
-void put_vaddr_frames(struct frame_vector *vec);
-int frame_vector_to_pages(struct frame_vector *vec);
-void frame_vector_to_pfns(struct frame_vector *vec);
-
-static inline unsigned int frame_vector_count(struct frame_vector *vec)
-{
- return vec->nr_frames;
-}
-
-static inline struct page **frame_vector_pages(struct frame_vector *vec)
-{
- if (vec->is_pfns) {
- int err = frame_vector_to_pages(vec);
-
- if (err)
- return ERR_PTR(err);
- }
- return (struct page **)(vec->ptrs);
-}
-
-static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
-{
- if (!vec->is_pfns)
- frame_vector_to_pfns(vec);
- return (unsigned long *)(vec->ptrs);
-}
-
struct kvec;
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages);
-int get_kernel_page(unsigned long start, int write, struct page **pages);
struct page *get_dump_page(unsigned long addr);
-extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
-extern void do_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-
-void __set_page_dirty(struct page *, struct address_space *, int warn);
-int __set_page_dirty_nobuffers(struct page *page);
-int __set_page_dirty_no_writeback(struct page *page);
-int redirty_page_for_writepage(struct writeback_control *wbc,
- struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping);
-void account_page_cleaned(struct page *page, struct address_space *mapping,
- struct bdi_writeback *wb);
-int set_page_dirty(struct page *page);
+bool folio_mark_dirty(struct folio *folio);
+bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
-void __cancel_dirty_page(struct page *page);
-static inline void cancel_dirty_page(struct page *page)
-{
- /* Avoid atomic ops, locking, etc. when not actually needed. */
- if (PageDirty(page))
- __cancel_dirty_page(page);
-}
-int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
@@ -1599,18 +1983,48 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
bool need_rmap_locks);
-extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
+
+/*
+ * Flags used by change_protection(). For now we make it a bitmap so
+ * that we can pass in multiple flags just like parameters. However
+ * for now all the callers are only use one of the flags at the same
+ * time.
+ */
+/*
+ * Whether we should manually check if we can map individual PTEs writable,
+ * because something (e.g., COW, uffd-wp) blocks that from happening for all
+ * PTEs automatically in a writable mapping.
+ */
+#define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0)
+/* Whether this protection change is for NUMA hints */
+#define MM_CP_PROT_NUMA (1UL << 1)
+/* Whether this change is for write protecting */
+#define MM_CP_UFFD_WP (1UL << 2) /* do wp */
+#define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */
+#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
+ MM_CP_UFFD_WP_RESOLVE)
+
+extern unsigned long change_protection(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot,
- int dirty_accountable, int prot_numa);
-extern int mprotect_fixup(struct vm_area_struct *vma,
+ unsigned long cp_flags);
+extern int mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
/*
* doesn't attempt to fault and will return short.
*/
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages);
+int get_user_pages_fast_only(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
+int pin_user_pages_fast_only(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
+
+static inline bool get_user_page_fast_only(unsigned long addr,
+ unsigned int gup_flags, struct page **pagep)
+{
+ return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
+}
/*
* per-process(per-mm_struct) statistics.
*/
@@ -1720,6 +2134,18 @@ static inline void sync_mm_rss(struct mm_struct *mm)
}
#endif
+#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte;
+}
+#endif
+
#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
@@ -1841,11 +2267,6 @@ int __pte_alloc_kernel(pmd_t *pmd);
#if defined(CONFIG_MMU)
-/*
- * The following ifdef needed to get the 5level-fixup.h header to work.
- * Remove it when 5level-fixup.h has been removed.
- */
-#ifndef __ARCH_HAS_5LEVEL_HACK
static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{
@@ -1859,7 +2280,6 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
NULL : pud_offset(p4d, address);
}
-#endif /* !__ARCH_HAS_5LEVEL_HACK */
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
@@ -1943,7 +2363,7 @@ static inline bool pgtable_pte_page_ctor(struct page *page)
if (!ptlock_init(page))
return false;
__SetPageTable(page);
- inc_zone_page_state(page, NR_PAGETABLE);
+ inc_lruvec_page_state(page, NR_PAGETABLE);
return true;
}
@@ -1951,7 +2371,7 @@ static inline void pgtable_pte_page_dtor(struct page *page)
{
ptlock_free(page);
__ClearPageTable(page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ dec_lruvec_page_state(page, NR_PAGETABLE);
}
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
@@ -1994,7 +2414,7 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
return ptlock_ptr(pmd_to_page(pmd));
}
-static inline bool pgtable_pmd_page_ctor(struct page *page)
+static inline bool pmd_ptlock_init(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
page->pmd_huge_pte = NULL;
@@ -2002,7 +2422,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
return ptlock_init(page);
}
-static inline void pgtable_pmd_page_dtor(struct page *page)
+static inline void pmd_ptlock_free(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
@@ -2019,8 +2439,8 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
return &mm->page_table_lock;
}
-static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
-static inline void pgtable_pmd_page_dtor(struct page *page) {}
+static inline bool pmd_ptlock_init(struct page *page) { return true; }
+static inline void pmd_ptlock_free(struct page *page) {}
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
@@ -2033,6 +2453,22 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
+static inline bool pgtable_pmd_page_ctor(struct page *page)
+{
+ if (!pmd_ptlock_init(page))
+ return false;
+ __SetPageTable(page);
+ inc_lruvec_page_state(page, NR_PAGETABLE);
+ return true;
+}
+
+static inline void pgtable_pmd_page_dtor(struct page *page)
+{
+ pmd_ptlock_free(page);
+ __ClearPageTable(page);
+ dec_lruvec_page_state(page, NR_PAGETABLE);
+}
+
/*
* No scalability reason to split PUD locks yet, but follow the same pattern
* as the PMD locks to make it easier if we decide to. The VM should not be
@@ -2053,9 +2489,6 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
}
extern void __init pagecache_init(void);
-extern void free_area_init(unsigned long * zones_size);
-extern void __init free_area_init_node(int nid, unsigned long * zones_size,
- unsigned long zone_start_pfn, unsigned long *zholes_size);
extern void free_initmem(void);
/*
@@ -2067,32 +2500,20 @@ extern void free_initmem(void);
extern unsigned long free_reserved_area(void *start, void *end,
int poison, const char *s);
-#ifdef CONFIG_HIGHMEM
-/*
- * Free a highmem page into the buddy system, adjusting totalhigh_pages
- * and totalram_pages.
- */
-extern void free_highmem_page(struct page *page);
-#endif
-
extern void adjust_managed_page_count(struct page *page, long count);
-extern void mem_init_print_info(const char *str);
+extern void mem_init_print_info(void);
extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
/* Free the reserved page into the buddy system, so it gets managed. */
-static inline void __free_reserved_page(struct page *page)
+static inline void free_reserved_page(struct page *page)
{
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
-}
-
-static inline void free_reserved_page(struct page *page)
-{
- __free_reserved_page(page);
adjust_managed_page_count(page, 1);
}
+#define free_highmem_page(page) free_reserved_page(page)
static inline void mark_page_reserved(struct page *page)
{
@@ -2111,7 +2532,7 @@ static inline unsigned long free_initmem_default(int poison)
extern char __init_begin[], __init_end[];
return free_reserved_area(&__init_begin, &__init_end,
- poison, "unused kernel");
+ poison, "unused kernel image (initmem)");
}
static inline unsigned long get_num_physpages(void)
@@ -2125,34 +2546,23 @@ static inline unsigned long get_num_physpages(void)
return phys_pages;
}
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
- * zones, allocate the backing mem_map and account for memory holes in a more
- * architecture independent manner. This is a substitute for creating the
- * zone_sizes[] and zholes_size[] arrays and passing them to
- * free_area_init_node()
+ * Using memblock node mappings, an architecture may initialise its
+ * zones, allocate the backing mem_map and account for memory holes in an
+ * architecture independent manner.
*
* An architecture is expected to register range of page frames backed by
* physical memory with memblock_add[_node]() before calling
- * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
+ * free_area_init() passing in the PFN each zone ends at. At a basic
* usage, an architecture is expected to do something like
*
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * memblock_add_node(base, size, nid)
- * free_area_init_nodes(max_zone_pfns);
- *
- * free_bootmem_with_active_regions() calls free_bootmem_node() for each
- * registered physical page range. Similarly
- * sparse_memory_present_with_active_regions() calls memory_present() for
- * each range when SPARSEMEM is enabled.
- *
- * See mm/page_alloc.c for more information on each function exposed by
- * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
+ * memblock_add_node(base, size, nid, MEMBLOCK_NONE)
+ * free_area_init(max_zone_pfns);
*/
-extern void free_area_init_nodes(unsigned long *max_zone_pfn);
+void free_area_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn);
@@ -2160,36 +2570,32 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
-extern unsigned long find_min_pfn_with_active_regions(void);
-extern void free_bootmem_with_active_regions(int nid,
- unsigned long max_low_pfn);
-extern void sparse_memory_present_with_active_regions(int nid);
-
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
- !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
-static inline int __early_pfn_to_nid(unsigned long pfn,
- struct mminit_pfnnid_cache *state)
+#ifndef CONFIG_NUMA
+static inline int early_pfn_to_nid(unsigned long pfn)
{
return 0;
}
#else
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
-/* there is a per-arch backend function. */
-extern int __meminit __early_pfn_to_nid(unsigned long pfn,
- struct mminit_pfnnid_cache *state);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
- enum memmap_context, struct vmem_altmap *);
+extern void memmap_init_range(unsigned long, int, unsigned long,
+ unsigned long, unsigned long, enum meminit_context,
+ struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void);
+extern void calculate_min_free_kbytes(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
-extern void show_mem(unsigned int flags, nodemask_t *nodemask);
+
+extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static inline void show_mem(unsigned int flags, nodemask_t *nodemask)
+{
+ __show_mem(flags, nodemask, MAX_NR_ZONES - 1);
+}
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
@@ -2206,6 +2612,7 @@ extern void setup_per_cpu_pageset(void);
extern int min_free_kbytes;
extern int watermark_boost_factor;
extern int watermark_scale_factor;
+extern bool arch_has_descending_max_zone_pfns(void);
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
@@ -2258,21 +2665,22 @@ static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *, struct vm_userfaultfd_ctx);
+ struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
unsigned long addr, int new_below);
extern int split_vma(struct mm_struct *, struct vm_area_struct *,
unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
- struct rb_node **, struct rb_node *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
+void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas);
+void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas);
+
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
unsigned long start,
@@ -2290,7 +2698,8 @@ static inline int check_data_rlimit(unsigned long rlim,
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
-extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern struct file *get_task_exe_file(struct task_struct *task);
@@ -2309,6 +2718,7 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long flags, struct page **pages);
unsigned long randomize_stack_top(unsigned long stack_top);
+unsigned long randomize_page(unsigned long start, unsigned long range);
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
@@ -2317,22 +2727,13 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
- struct list_head *uf);
-extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
- struct list_head *uf, bool downgrade);
+ unsigned long pgoff, unsigned long *populate, struct list_head *uf);
+extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
-extern int do_madvise(unsigned long start, size_t len_in, int behavior);
-
-static inline unsigned long
-do_mmap_pgoff(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate,
- struct list_head *uf)
-{
- return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
-}
+extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
@@ -2364,26 +2765,7 @@ struct vm_unmapped_area_info {
unsigned long align_offset;
};
-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
-
-/*
- * Search for an unmapped address range.
- *
- * We are looking for a range that:
- * - does not intersect with any VMA;
- * - is contained within the [low_limit, high_limit) interval;
- * - is at least the desired size.
- * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
- */
-static inline unsigned long
-vm_unmapped_area(struct vm_unmapped_area_info *info)
-{
- if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
- return unmapped_area_topdown(info);
- else
- return unmapped_area(info);
-}
+extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
/* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t);
@@ -2393,38 +2775,15 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
-extern void filemap_map_pages(struct vm_fault *vmf,
+extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
-/* mm/page-writeback.c */
-int __must_check write_one_page(struct page *page);
-void task_dirty_inc(struct task_struct *tsk);
-
-/* readahead.c */
-#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
-
-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t offset, unsigned long nr_to_read);
-
-void page_cache_sync_readahead(struct address_space *mapping,
- struct file_ra_state *ra,
- struct file *filp,
- pgoff_t offset,
- unsigned long size);
-
-void page_cache_async_readahead(struct address_space *mapping,
- struct file_ra_state *ra,
- struct file *filp,
- struct page *pg,
- pgoff_t offset,
- unsigned long size);
-
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
+/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
extern int expand_downwards(struct vm_area_struct *vma,
unsigned long address);
#if VM_GROWSUP
@@ -2438,15 +2797,24 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
-/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
- NULL if none. Assume start_addr < end_addr. */
-static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-{
- struct vm_area_struct * vma = find_vma(mm,start_addr);
+/*
+ * Look up the first VMA which intersects the interval [start_addr, end_addr)
+ * NULL if none. Assume start_addr < end_addr.
+ */
+struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr, unsigned long end_addr);
- if (vma && end_addr <= vma->vm_start)
- vma = NULL;
- return vma;
+/**
+ * vma_lookup() - Find a VMA at a specific address
+ * @mm: The process address space.
+ * @addr: The user address.
+ *
+ * Return: The vm_area_struct at the given address, %NULL otherwise.
+ */
+static inline
+struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
+{
+ return mtree_load(&mm->mm_mt, addr);
}
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
@@ -2482,7 +2850,7 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
{
- struct vm_area_struct *vma = find_vma(mm, vm_start);
+ struct vm_area_struct *vma = vma_lookup(mm, vm_start);
if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
vma = NULL;
@@ -2510,6 +2878,8 @@ static inline void vma_set_page_prot(struct vm_area_struct *vma)
}
#endif
+void vma_set_file(struct vm_area_struct *vma, struct file *file);
+
#ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
@@ -2518,7 +2888,11 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
+int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
+ struct page **pages, unsigned long *num);
int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
@@ -2548,6 +2922,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
+#ifndef io_remap_pfn_range
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
+}
+#endif
+
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
@@ -2565,19 +2948,16 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
* and return without waiting upon it */
-#define FOLL_POPULATE 0x40 /* fault in page */
-#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
+#define FOLL_NOFAULT 0x80 /* do not fault in pages */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
-#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
-#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
-#define FOLL_COW 0x4000 /* internal GUP flag */
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
+#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */
/*
* FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
@@ -2632,7 +3012,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
* releasing pages: get_user_pages*() pages must be released via put_page(),
* while pin_user_pages*() pages must be released via unpin_user_page().
*
- * Please see Documentation/vm/pin_user_pages.rst for more information.
+ * Please see Documentation/core-api/pin_user_pages.rst for more information.
*/
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
@@ -2646,6 +3026,65 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
return 0;
}
+/*
+ * Indicates for which pages that are write-protected in the page table,
+ * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
+ * GUP pin will remain consistent with the pages mapped into the page tables
+ * of the MM.
+ *
+ * Temporary unmapping of PageAnonExclusive() pages or clearing of
+ * PageAnonExclusive() has to protect against concurrent GUP:
+ * * Ordinary GUP: Using the PT lock
+ * * GUP-fast and fork(): mm->write_protect_seq
+ * * GUP-fast and KSM or temporary unmapping (swap, migration): see
+ * page_try_share_anon_rmap()
+ *
+ * Must be called with the (sub)page that's actually referenced via the
+ * page table entry, which might not necessarily be the head page for a
+ * PTE-mapped THP.
+ */
+static inline bool gup_must_unshare(unsigned int flags, struct page *page)
+{
+ /*
+ * FOLL_WRITE is implicitly handled correctly as the page table entry
+ * has to be writable -- and if it references (part of) an anonymous
+ * folio, that part is required to be marked exclusive.
+ */
+ if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
+ return false;
+ /*
+ * Note: PageAnon(page) is stable until the page is actually getting
+ * freed.
+ */
+ if (!PageAnon(page))
+ return false;
+
+ /* Paired with a memory barrier in page_try_share_anon_rmap(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_rmb();
+
+ /*
+ * Note that PageKsm() pages cannot be exclusive, and consequently,
+ * cannot get pinned.
+ */
+ return !PageAnonExclusive(page);
+}
+
+/*
+ * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
+ * a (NUMA hinting) fault is required.
+ */
+static inline bool gup_can_follow_protnone(unsigned int flags)
+{
+ /*
+ * FOLL_FORCE has to be able to make progress even if the VMA is
+ * inaccessible. Further, FOLL_FORCE access usually does not represent
+ * application behaviour and we should avoid triggering NUMA hinting
+ * faults.
+ */
+ return flags & FOLL_FORCE;
+}
+
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
@@ -2653,44 +3092,58 @@ extern int apply_to_existing_page_range(struct mm_struct *mm,
unsigned long address, unsigned long size,
pte_fn_t fn, void *data);
+extern void __init init_mem_debugging_and_hardening(void);
#ifdef CONFIG_PAGE_POISONING
-extern bool page_poisoning_enabled(void);
-extern void kernel_poison_pages(struct page *page, int numpages, int enable);
+extern void __kernel_poison_pages(struct page *page, int numpages);
+extern void __kernel_unpoison_pages(struct page *page, int numpages);
+extern bool _page_poisoning_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
+static inline bool page_poisoning_enabled(void)
+{
+ return _page_poisoning_enabled_early;
+}
+/*
+ * For use in fast paths after init_mem_debugging() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool page_poisoning_enabled_static(void)
+{
+ return static_branch_unlikely(&_page_poisoning_enabled);
+}
+static inline void kernel_poison_pages(struct page *page, int numpages)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_poison_pages(page, numpages);
+}
+static inline void kernel_unpoison_pages(struct page *page, int numpages)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_unpoison_pages(page, numpages);
+}
#else
static inline bool page_poisoning_enabled(void) { return false; }
-static inline void kernel_poison_pages(struct page *page, int numpages,
- int enable) { }
+static inline bool page_poisoning_enabled_static(void) { return false; }
+static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
+static inline void kernel_poison_pages(struct page *page, int numpages) { }
+static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
#endif
-#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
-DECLARE_STATIC_KEY_TRUE(init_on_alloc);
-#else
-DECLARE_STATIC_KEY_FALSE(init_on_alloc);
-#endif
+DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
static inline bool want_init_on_alloc(gfp_t flags)
{
- if (static_branch_unlikely(&init_on_alloc) &&
- !page_poisoning_enabled())
+ if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+ &init_on_alloc))
return true;
return flags & __GFP_ZERO;
}
-#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
-DECLARE_STATIC_KEY_TRUE(init_on_free);
-#else
-DECLARE_STATIC_KEY_FALSE(init_on_free);
-#endif
+DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
static inline bool want_init_on_free(void)
{
- return static_branch_unlikely(&init_on_free) &&
- !page_poisoning_enabled();
+ return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
+ &init_on_free);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern void init_debug_pagealloc(void);
-#else
-static inline void init_debug_pagealloc(void) {}
-#endif
extern bool _debug_pagealloc_enabled_early;
DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
@@ -2712,28 +3165,28 @@ static inline bool debug_pagealloc_enabled_static(void)
return static_branch_unlikely(&_debug_pagealloc_enabled);
}
-#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
-extern void __kernel_map_pages(struct page *page, int numpages, int enable);
-
+#ifdef CONFIG_DEBUG_PAGEALLOC
/*
- * When called in DEBUG_PAGEALLOC context, the call should most likely be
- * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
+ * To support DEBUG_PAGEALLOC architecture must ensure that
+ * __kernel_map_pages() never fails
*/
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable)
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
{
- __kernel_map_pages(page, numpages, enable);
+ if (debug_pagealloc_enabled_static())
+ __kernel_map_pages(page, numpages, 1);
}
-#ifdef CONFIG_HIBERNATION
-extern bool kernel_page_present(struct page *page);
-#endif /* CONFIG_HIBERNATION */
-#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable) {}
-#ifdef CONFIG_HIBERNATION
-static inline bool kernel_page_present(struct page *page) { return true; }
-#endif /* CONFIG_HIBERNATION */
-#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
+
+static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
+{
+ if (debug_pagealloc_enabled_static())
+ __kernel_map_pages(page, numpages, 0);
+}
+#else /* CONFIG_DEBUG_PAGEALLOC */
+static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
+static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
@@ -2755,12 +3208,11 @@ extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
#ifdef CONFIG_SYSCTL
extern int sysctl_drop_caches;
-int drop_caches_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
#endif
void drop_slab(void);
-void drop_slab_node(int nid);
#ifndef CONFIG_MMU
#define randomize_va_space 0
@@ -2779,19 +3231,21 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
void *sparse_buffer_alloc(unsigned long size);
struct page * __populate_section_memmap(unsigned long pfn,
- unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
+ unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
-pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
+pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
+ struct vmem_altmap *altmap, struct page *reuse);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
-void *vmemmap_alloc_block_buf(unsigned long size, int node);
-void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
+void *vmemmap_alloc_block_buf(unsigned long size, int node,
+ struct vmem_altmap *altmap);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
- int node);
+ int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
void vmemmap_populate_print_last(void);
@@ -2807,18 +3261,43 @@ enum mf_flags {
MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
+ MF_UNPOISON = 1 << 4,
+ MF_SW_SIMULATED = 1 << 5,
+ MF_NO_RETRY = 1 << 6,
};
+int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+ unsigned long count, int mf_flags);
extern int memory_failure(unsigned long pfn, int flags);
extern void memory_failure_queue(unsigned long pfn, int flags);
+extern void memory_failure_queue_kick(int cpu);
extern int unpoison_memory(unsigned long pfn);
-extern int get_hwpoison_page(struct page *page);
-#define put_hwpoison_page(page) put_page(page)
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
-extern void shake_page(struct page *p, int access);
+extern void shake_page(struct page *p);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(unsigned long pfn, int flags);
+#ifdef CONFIG_MEMORY_FAILURE
+extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags);
+#else
+static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+ return 0;
+}
+#endif
+
+#ifndef arch_memory_failure
+static inline int arch_memory_failure(unsigned long pfn, int flags)
+{
+ return -ENXIO;
+}
+#endif
+#ifndef arch_is_platform_page
+static inline bool arch_is_platform_page(u64 paddr)
+{
+ return false;
+}
+#endif
/*
* Error handlers for various types of pages.
@@ -2835,10 +3314,8 @@ enum mf_action_page_type {
MF_MSG_KERNEL_HIGH_ORDER,
MF_MSG_SLAB,
MF_MSG_DIFFERENT_COMPOUND,
- MF_MSG_POISONED_HUGE,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
- MF_MSG_NON_PMD_HUGE,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,
@@ -2850,8 +3327,8 @@ enum mf_action_page_type {
MF_MSG_CLEAN_LRU,
MF_MSG_TRUNCATED_LRU,
MF_MSG_BUDDY,
- MF_MSG_BUDDY_2ND,
MF_MSG_DAX,
+ MF_MSG_UNSPLIT_THP,
MF_MSG_UNKNOWN,
};
@@ -2867,6 +3344,23 @@ extern long copy_huge_page_from_user(struct page *dst_page,
const void __user *usr_src,
unsigned int pages_per_huge_page,
bool allow_pagefault);
+
+/**
+ * vma_is_special_huge - Are transhuge page-table entries considered special?
+ * @vma: Pointer to the struct vm_area_struct to consider
+ *
+ * Whether transhuge page-table entries are considered "special" following
+ * the definition in vm_normal_page().
+ *
+ * Return: true if transhuge page-table entries should be considered special,
+ * false otherwise.
+ */
+static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
+{
+ return vma_is_dax(vma) || (vma->vm_file &&
+ (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+}
+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -2921,5 +3415,64 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
pgoff_t first_index, pgoff_t nr);
#endif
-#endif /* __KERNEL__ */
+extern int sysctl_nr_trim_pages;
+
+#ifdef CONFIG_PRINTK
+void mem_dump_obj(void *object);
+#else
+static inline void mem_dump_obj(void *object) {}
+#endif
+
+/**
+ * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
+ * @seals: the seals to check
+ * @vma: the vma to operate on
+ *
+ * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
+ * the vma flags. Return 0 if check pass, or <0 for errors.
+ */
+static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
+{
+ if (seals & F_SEAL_FUTURE_WRITE) {
+ /*
+ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+ * "future write" seal active.
+ */
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+ return -EPERM;
+
+ /*
+ * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+ * MAP_SHARED and read-only, take care to not allow mprotect to
+ * revert protections on such mappings. Do this only for shared
+ * mappings. For private mappings, don't need to mask
+ * VM_MAYWRITE as we still want them to be COW-writable.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ vma->vm_flags &= ~(VM_MAYWRITE);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ANON_VMA_NAME
+int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
+ unsigned long len_in,
+ struct anon_vma_name *anon_name);
+#else
+static inline int
+madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
+ unsigned long len_in, struct anon_vma_name *anon_name) {
+ return 0;
+}
+#endif
+
+/*
+ * Whether to drop the pte markers, for example, the uffd-wp information for
+ * file-backed memory. This should only be specified when we will completely
+ * drop the page in the mm, either by truncation or unmapping of the vma. By
+ * default, the flag is not set.
+ */
+#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_api.h b/include/linux/mm_api.h
new file mode 100644
index 000000000000..a5ace2b198b8
--- /dev/null
+++ b/include/linux/mm_api.h
@@ -0,0 +1 @@
+#include <linux/mm.h>
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 6f2fef7b0784..e8ed225d8f7c 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -2,33 +2,47 @@
#ifndef LINUX_MM_INLINE_H
#define LINUX_MM_INLINE_H
+#include <linux/atomic.h>
#include <linux/huge_mm.h>
#include <linux/swap.h>
+#include <linux/string.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/swapops.h>
/**
- * page_is_file_cache - should the page be on a file LRU or anon LRU?
- * @page: the page to test
- *
- * Returns 1 if @page is page cache page backed by a regular filesystem,
- * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
- * Used by functions that manipulate the LRU lists, to sort a page
- * onto the right LRU list.
+ * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
+ * @folio: The folio to test.
*
* We would like to get this info without a page flag, but the state
- * needs to survive until the page is last deleted from the LRU, which
+ * needs to survive until the folio is last deleted from the LRU, which
* could be as far down as __page_cache_release.
+ *
+ * Return: An integer (not a boolean!) used to sort a folio onto the
+ * right LRU list and to account folios correctly.
+ * 1 if @folio is a regular filesystem backed page cache folio
+ * or a lazily freed anonymous folio (e.g. via MADV_FREE).
+ * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
+ * ram or swap backed folio.
*/
-static inline int page_is_file_cache(struct page *page)
+static inline int folio_is_file_lru(struct folio *folio)
+{
+ return !folio_test_swapbacked(folio);
+}
+
+static inline int page_is_file_lru(struct page *page)
{
- return !PageSwapBacked(page);
+ return folio_is_file_lru(page_folio(page));
}
static __always_inline void __update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
- int nr_pages)
+ long nr_pages)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ lockdep_assert_held(&lruvec->lru_lock);
+ WARN_ON_ONCE(nr_pages != (int)nr_pages);
+
__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
@@ -36,7 +50,7 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
static __always_inline void update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
- int nr_pages)
+ long nr_pages)
{
__update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
@@ -44,84 +58,524 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
#endif
}
+/**
+ * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
+ * @folio: The folio that was on lru and now has a zero reference.
+ */
+static __always_inline void __folio_clear_lru_flags(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
+
+ __folio_clear_lru(folio);
+
+ /* this shouldn't happen, so leave the flags to bad_page() */
+ if (folio_test_active(folio) && folio_test_unevictable(folio))
+ return;
+
+ __folio_clear_active(folio);
+ __folio_clear_unevictable(folio);
+}
+
+/**
+ * folio_lru_list - Which LRU list should a folio be on?
+ * @folio: The folio to test.
+ *
+ * Return: The LRU list a folio should be on, as an index
+ * into the array of LRU lists.
+ */
+static __always_inline enum lru_list folio_lru_list(struct folio *folio)
+{
+ enum lru_list lru;
+
+ VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
+
+ if (folio_test_unevictable(folio))
+ return LRU_UNEVICTABLE;
+
+ lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
+ if (folio_test_active(folio))
+ lru += LRU_ACTIVE;
+
+ return lru;
+}
+
+#ifdef CONFIG_LRU_GEN
+
+#ifdef CONFIG_LRU_GEN_ENABLED
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#else
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#endif
+
+static inline bool lru_gen_in_fault(void)
+{
+ return current->in_lru_fault;
+}
+
+static inline int lru_gen_from_seq(unsigned long seq)
+{
+ return seq % MAX_NR_GENS;
+}
+
+static inline int lru_hist_from_seq(unsigned long seq)
+{
+ return seq % NR_HIST_GENS;
+}
+
+static inline int lru_tier_from_refs(int refs)
+{
+ VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
+
+ /* see the comment in folio_lru_refs() */
+ return order_base_2(refs + 1);
+}
+
+static inline int folio_lru_refs(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+ bool workingset = flags & BIT(PG_workingset);
+
+ /*
+ * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
+ * total number of accesses is N>1, since N=0,1 both map to the first
+ * tier. lru_tier_from_refs() will account for this off-by-one. Also see
+ * the comment on MAX_NR_TIERS.
+ */
+ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
+}
+
+static inline int folio_lru_gen(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+
+ return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+}
+
+static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+{
+ unsigned long max_seq = lruvec->lrugen.max_seq;
+
+ VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
+
+ /* see the comment on MIN_NR_GENS */
+ return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
+}
+
+static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
+ int old_gen, int new_gen)
+{
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ int delta = folio_nr_pages(folio);
+ enum lru_list lru = type * LRU_INACTIVE_FILE;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
+
+ if (old_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
+ lrugen->nr_pages[old_gen][type][zone] - delta);
+ if (new_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
+ lrugen->nr_pages[new_gen][type][zone] + delta);
+
+ /* addition */
+ if (old_gen < 0) {
+ if (lru_gen_is_active(lruvec, new_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, delta);
+ return;
+ }
+
+ /* deletion */
+ if (new_gen < 0) {
+ if (lru_gen_is_active(lruvec, old_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, -delta);
+ return;
+ }
+
+ /* promotion */
+ if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
+ __update_lru_size(lruvec, lru, zone, -delta);
+ __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
+ }
+
+ /* demotion requires isolation, e.g., lru_deactivate_fn() */
+ VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long seq;
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
+
+ if (folio_test_unevictable(folio) || !lrugen->enabled)
+ return false;
+ /*
+ * There are three common cases for this page:
+ * 1. If it's hot, e.g., freshly faulted in or previously hot and
+ * migrated, add it to the youngest generation.
+ * 2. If it's cold but can't be evicted immediately, i.e., an anon page
+ * not in swapcache or a dirty page pending writeback, add it to the
+ * second oldest generation.
+ * 3. Everything else (clean, cold) is added to the oldest generation.
+ */
+ if (folio_test_active(folio))
+ seq = lrugen->max_seq;
+ else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+ seq = lrugen->min_seq[type] + 1;
+ else
+ seq = lrugen->min_seq[type];
+
+ gen = lru_gen_from_seq(seq);
+ flags = (gen + 1UL) << LRU_GEN_PGOFF;
+ /* see the comment on MIN_NR_GENS about PG_active */
+ set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
+
+ lru_gen_update_size(lruvec, folio, -1, gen);
+ /* for folio_rotate_reclaimable() */
+ if (reclaiming)
+ list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ else
+ list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
+
+ return true;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+
+ if (gen < 0)
+ return false;
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+
+ /* for folio_migrate_flags() */
+ flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
+ flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
+ gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+
+ lru_gen_update_size(lruvec, folio, gen, -1);
+ list_del(&folio->lru);
+
+ return true;
+}
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline bool lru_gen_enabled(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_in_fault(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+#endif /* CONFIG_LRU_GEN */
+
+static __always_inline
+void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_add_folio(lruvec, folio, false))
+ return;
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ if (lru != LRU_UNEVICTABLE)
+ list_add(&folio->lru, &lruvec->lists[lru]);
+}
+
static __always_inline void add_page_to_lru_list(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
+ struct lruvec *lruvec)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
- list_add(&page->lru, &lruvec->lists[lru]);
+ lruvec_add_folio(lruvec, page_folio(page));
}
-static __always_inline void add_page_to_lru_list_tail(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
+static __always_inline
+void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
- list_add_tail(&page->lru, &lruvec->lists[lru]);
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_add_folio(lruvec, folio, true))
+ return;
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ /* This is not expected to be used on LRU_UNEVICTABLE */
+ list_add_tail(&folio->lru, &lruvec->lists[lru]);
+}
+
+static __always_inline
+void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_del_folio(lruvec, folio, false))
+ return;
+
+ if (lru != LRU_UNEVICTABLE)
+ list_del(&folio->lru);
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ -folio_nr_pages(folio));
}
static __always_inline void del_page_from_lru_list(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
+ struct lruvec *lruvec)
{
- list_del(&page->lru);
- update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+ lruvec_del_folio(lruvec, page_folio(page));
}
-/**
- * page_lru_base_type - which LRU list type should a page be on?
- * @page: the page to test
- *
- * Used for LRU list index arithmetic.
- *
- * Returns the base LRU type - file or anon - @page should be on.
+#ifdef CONFIG_ANON_VMA_NAME
+/*
+ * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
+ * either keep holding the lock while using the returned pointer or it should
+ * raise anon_vma_name refcount before releasing the lock.
*/
-static inline enum lru_list page_lru_base_type(struct page *page)
+extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
+extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
+extern void anon_vma_name_free(struct kref *kref);
+
+/* mmap_lock should be read-locked */
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
{
- if (page_is_file_cache(page))
- return LRU_INACTIVE_FILE;
- return LRU_INACTIVE_ANON;
+ if (anon_name)
+ kref_get(&anon_name->kref);
}
-/**
- * page_off_lru - which LRU list was page on? clearing its lru flags.
- * @page: the page to test
- *
- * Returns the LRU list a page was on, as an index into the array of LRU
- * lists; and clears its Unevictable or Active flags, ready for freeing.
- */
-static __always_inline enum lru_list page_off_lru(struct page *page)
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
{
- enum lru_list lru;
+ if (anon_name)
+ kref_put(&anon_name->kref, anon_vma_name_free);
+}
+
+static inline
+struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
+{
+ /* Prevent anon_name refcount saturation early on */
+ if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
+ anon_vma_name_get(anon_name);
+ return anon_name;
- if (PageUnevictable(page)) {
- __ClearPageUnevictable(page);
- lru = LRU_UNEVICTABLE;
- } else {
- lru = page_lru_base_type(page);
- if (PageActive(page)) {
- __ClearPageActive(page);
- lru += LRU_ACTIVE;
- }
}
- return lru;
+ return anon_vma_name_alloc(anon_name->name);
}
-/**
- * page_lru - which LRU list should a page be on?
- * @page: the page to test
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma)
+{
+ struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
+
+ if (anon_name)
+ new_vma->anon_name = anon_vma_name_reuse(anon_name);
+}
+
+static inline void free_anon_vma_name(struct vm_area_struct *vma)
+{
+ /*
+ * Not using anon_vma_name because it generates a warning if mmap_lock
+ * is not held, which might be the case here.
+ */
+ if (!vma->vm_file)
+ anon_vma_name_put(vma->anon_name);
+}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ if (anon_name1 == anon_name2)
+ return true;
+
+ return anon_name1 && anon_name2 &&
+ !strcmp(anon_name1->name, anon_name2->name);
+}
+
+#else /* CONFIG_ANON_VMA_NAME */
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
+{
+ return NULL;
+}
+
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma) {}
+static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ return true;
+}
+
+#endif /* CONFIG_ANON_VMA_NAME */
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_set(&mm->tlb_flush_pending, 0);
+}
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_inc(&mm->tlb_flush_pending);
+ /*
+ * The only time this value is relevant is when there are indeed pages
+ * to flush. And we'll only flush pages after changing them, which
+ * requires the PTL.
+ *
+ * So the ordering here is:
+ *
+ * atomic_inc(&mm->tlb_flush_pending);
+ * spin_lock(&ptl);
+ * ...
+ * set_pte_at();
+ * spin_unlock(&ptl);
+ *
+ * spin_lock(&ptl)
+ * mm_tlb_flush_pending();
+ * ....
+ * spin_unlock(&ptl);
+ *
+ * flush_tlb_range();
+ * atomic_dec(&mm->tlb_flush_pending);
+ *
+ * Where the increment if constrained by the PTL unlock, it thus
+ * ensures that the increment is visible if the PTE modification is
+ * visible. After all, if there is no PTE modification, nobody cares
+ * about TLB flushes either.
+ *
+ * This very much relies on users (mm_tlb_flush_pending() and
+ * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
+ * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
+ * locks (PPC) the unlock of one doesn't order against the lock of
+ * another PTL.
+ *
+ * The decrement is ordered by the flush_tlb_range(), such that
+ * mm_tlb_flush_pending() will not return false unless all flushes have
+ * completed.
+ */
+}
+
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * See inc_tlb_flush_pending().
+ *
+ * This cannot be smp_mb__before_atomic() because smp_mb() simply does
+ * not order against TLB invalidate completion, which is what we need.
+ *
+ * Therefore we must rely on tlb_flush_*() to guarantee order.
+ */
+ atomic_dec(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * Must be called after having acquired the PTL; orders against that
+ * PTLs release and therefore ensures that if we observe the modified
+ * PTE we must also observe the increment from inc_tlb_flush_pending().
+ *
+ * That is, it only guarantees to return true if there is a flush
+ * pending for _this_ PTL.
+ */
+ return atomic_read(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+ /*
+ * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
+ * for which there is a TLB flush pending in order to guarantee
+ * we've seen both that PTE modification and the increment.
+ *
+ * (no requirement on actually still holding the PTL, that is irrelevant)
+ */
+ return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+/*
+ * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
+ * replace a none pte. NOTE! This should only be called when *pte is already
+ * cleared so we will never accidentally replace something valuable. Meanwhile
+ * none pte also means we are not demoting the pte so tlb flushed is not needed.
+ * E.g., when pte cleared the caller should have taken care of the tlb flush.
*
- * Returns the LRU list a page should be on, as an index
- * into the array of LRU lists.
+ * Must be called with pgtable lock held so that no thread will see the none
+ * pte, and if they see it, they'll fault and serialize at the pgtable lock.
+ *
+ * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
*/
-static __always_inline enum lru_list page_lru(struct page *page)
+static inline void
+pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *pte, pte_t pteval)
{
- enum lru_list lru;
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
+ bool arm_uffd_pte = false;
- if (PageUnevictable(page))
- lru = LRU_UNEVICTABLE;
- else {
- lru = page_lru_base_type(page);
- if (PageActive(page))
- lru += LRU_ACTIVE;
- }
- return lru;
+ /* The current status of the pte should be "cleared" before calling */
+ WARN_ON_ONCE(!pte_none(*pte));
+
+ if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
+ return;
+
+ /* A uffd-wp wr-protected normal pte */
+ if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
+ arm_uffd_pte = true;
+
+ /*
+ * A uffd-wp wr-protected swap pte. Note: this should even cover an
+ * existing pte marker with uffd-wp bit set.
+ */
+ if (unlikely(pte_swp_uffd_wp_any(pteval)))
+ arm_uffd_pte = true;
+
+ if (unlikely(arm_uffd_pte))
+ set_pte_at(vma->vm_mm, addr, pte,
+ make_pte_marker(PTE_MARKER_UFFD_WP));
+#endif
}
+
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c28911c3afa8..500e536796ca 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -5,15 +5,19 @@
#include <linux/mm_types_task.h>
#include <linux/auxvec.h>
+#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
+#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/uprobes.h>
+#include <linux/rcupdate.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
+#include <linux/seqlock.h>
#include <asm/mmu.h>
@@ -22,6 +26,7 @@
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
+#define INIT_PASID 0
struct address_space;
struct mem_cgroup;
@@ -53,11 +58,11 @@ struct mem_cgroup;
* in each subpage, but you may need to restore some of their values
* afterwards.
*
- * SLUB uses cmpxchg_double() to atomically update its freelist and
- * counters. That requires that freelist & counters be adjacent and
- * double-word aligned. We align all struct pages to double-word
- * boundaries, and ensure that 'freelist' is aligned within the
- * struct.
+ * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
+ * That requires that freelist & counters in struct slab be adjacent and
+ * double-word aligned. Because struct slab currently just reinterprets the
+ * bits of struct page, we align all struct pages to double-word boundaries,
+ * and ensure that 'freelist' is aligned within struct slab.
*/
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
@@ -78,10 +83,24 @@ struct page {
struct { /* Page cache and anonymous pages */
/**
* @lru: Pageout list, eg. active_list protected by
- * pgdat->lru_lock. Sometimes used as a generic list
+ * lruvec->lru_lock. Sometimes used as a generic list
* by the page owner.
*/
- struct list_head lru;
+ union {
+ struct list_head lru;
+
+ /* Or, for the Unevictable "LRU list" slot */
+ struct {
+ /* Always even, to negate PageTail */
+ void *__filler;
+ /* Count page's or folio's mlocks */
+ unsigned int mlock_count;
+ };
+
+ /* Or, free page */
+ struct list_head buddy_list;
+ struct list_head pcp_list;
+ };
/* See page-flags.h for PAGE_MAPPING_FLAGS */
struct address_space *mapping;
pgoff_t index; /* Our offset within mapping. */
@@ -95,36 +114,24 @@ struct page {
};
struct { /* page_pool used by netstack */
/**
- * @dma_addr: might require a 64-bit value even on
- * 32-bit architectures.
+ * @pp_magic: magic value to avoid recycling non
+ * page_pool allocated pages.
*/
- dma_addr_t dma_addr;
- };
- struct { /* slab, slob and slub */
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
union {
- struct list_head slab_list;
- struct { /* Partial pages */
- struct page *next;
-#ifdef CONFIG_64BIT
- int pages; /* Nr of pages left */
- int pobjects; /* Approximate count */
-#else
- short int pages;
- short int pobjects;
-#endif
- };
- };
- struct kmem_cache *slab_cache; /* not slob */
- /* Double-word boundary */
- void *freelist; /* first free object */
- union {
- void *s_mem; /* slab: first object */
- unsigned long counters; /* SLUB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
+ /**
+ * dma_addr_upper: might require a 64-bit
+ * value on 32-bit architectures.
+ */
+ unsigned long dma_addr_upper;
+ /**
+ * For frag page support, not supported in
+ * 32-bit architectures with 64-bit DMA.
+ */
+ atomic_long_t pp_frag_count;
};
};
struct { /* Tail pages of compound page */
@@ -134,6 +141,10 @@ struct page {
unsigned char compound_dtor;
unsigned char compound_order;
atomic_t compound_mapcount;
+ atomic_t compound_pincount;
+#ifdef CONFIG_64BIT
+ unsigned int compound_nr; /* 1 << compound_order */
+#endif
};
struct { /* Second tail page of compound page */
unsigned long _compound_pad_1; /* compound_head */
@@ -189,16 +200,13 @@ struct page {
* which are currently stored here.
*/
unsigned int page_type;
-
- unsigned int active; /* SLAB */
- int units; /* SLOB */
};
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
atomic_t _refcount;
#ifdef CONFIG_MEMCG
- struct mem_cgroup *mem_cgroup;
+ unsigned long memcg_data;
#endif
/*
@@ -216,16 +224,139 @@ struct page {
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
+#ifdef CONFIG_KMSAN
+ /*
+ * KMSAN metadata for this page:
+ * - shadow page: every bit indicates whether the corresponding
+ * bit of the original page is initialized (0) or not (1);
+ * - origin page: every 4 bytes contain an id of the stack trace
+ * where the uninitialized value was created.
+ */
+ struct page *kmsan_shadow;
+ struct page *kmsan_origin;
+#endif
+
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
} _struct_page_alignment;
+/**
+ * struct folio - Represents a contiguous set of bytes.
+ * @flags: Identical to the page flags.
+ * @lru: Least Recently Used list; tracks how recently this folio was used.
+ * @mlock_count: Number of times this folio has been pinned by mlock().
+ * @mapping: The file this page belongs to, or refers to the anon_vma for
+ * anonymous memory.
+ * @index: Offset within the file, in units of pages. For anonymous memory,
+ * this is the index from the beginning of the mmap.
+ * @private: Filesystem per-folio data (see folio_attach_private()).
+ * Used for swp_entry_t if folio_test_swapcache().
+ * @_mapcount: Do not access this member directly. Use folio_mapcount() to
+ * find out how many times this folio is mapped by userspace.
+ * @_refcount: Do not access this member directly. Use folio_ref_count()
+ * to find how many references there are to this folio.
+ * @memcg_data: Memory Control Group data.
+ * @_flags_1: For large folios, additional page flags.
+ * @__head: Points to the folio. Do not use.
+ * @_folio_dtor: Which destructor to use for this folio.
+ * @_folio_order: Do not use directly, call folio_order().
+ * @_total_mapcount: Do not use directly, call folio_entire_mapcount().
+ * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
+ * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
+ *
+ * A folio is a physically, virtually and logically contiguous set
+ * of bytes. It is a power-of-two in size, and it is aligned to that
+ * same power-of-two. It is at least as large as %PAGE_SIZE. If it is
+ * in the page cache, it is at a file offset which is a multiple of that
+ * power-of-two. It may be mapped into userspace at an address which is
+ * at an arbitrary page offset, but its kernel virtual address is aligned
+ * to its size.
+ */
+struct folio {
+ /* private: don't document the anon union */
+ union {
+ struct {
+ /* public: */
+ unsigned long flags;
+ union {
+ struct list_head lru;
+ /* private: avoid cluttering the output */
+ struct {
+ void *__filler;
+ /* public: */
+ unsigned int mlock_count;
+ /* private: */
+ };
+ /* public: */
+ };
+ struct address_space *mapping;
+ pgoff_t index;
+ void *private;
+ atomic_t _mapcount;
+ atomic_t _refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#endif
+ /* private: the union with struct page is transitional */
+ };
+ struct page page;
+ };
+ unsigned long _flags_1;
+ unsigned long __head;
+ unsigned char _folio_dtor;
+ unsigned char _folio_order;
+ atomic_t _total_mapcount;
+ atomic_t _pincount;
+#ifdef CONFIG_64BIT
+ unsigned int _folio_nr_pages;
+#endif
+};
+
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
+FOLIO_MATCH(flags, flags);
+FOLIO_MATCH(lru, lru);
+FOLIO_MATCH(mapping, mapping);
+FOLIO_MATCH(compound_head, lru);
+FOLIO_MATCH(index, index);
+FOLIO_MATCH(private, private);
+FOLIO_MATCH(_mapcount, _mapcount);
+FOLIO_MATCH(_refcount, _refcount);
+#ifdef CONFIG_MEMCG
+FOLIO_MATCH(memcg_data, memcg_data);
+#endif
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + sizeof(struct page))
+FOLIO_MATCH(flags, _flags_1);
+FOLIO_MATCH(compound_head, __head);
+FOLIO_MATCH(compound_dtor, _folio_dtor);
+FOLIO_MATCH(compound_order, _folio_order);
+FOLIO_MATCH(compound_mapcount, _total_mapcount);
+FOLIO_MATCH(compound_pincount, _pincount);
+#ifdef CONFIG_64BIT
+FOLIO_MATCH(compound_nr, _folio_nr_pages);
+#endif
+#undef FOLIO_MATCH
+
+static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
+{
+ struct page *tail = &folio->page + 1;
+ return &tail->compound_mapcount;
+}
+
static inline atomic_t *compound_mapcount_ptr(struct page *page)
{
return &page[1].compound_mapcount;
}
+static inline atomic_t *compound_pincount_ptr(struct page *page)
+{
+ return &page[1].compound_pincount;
+}
+
/*
* Used for sizing the vmemmap region on some architectures
*/
@@ -234,8 +365,23 @@ static inline atomic_t *compound_mapcount_ptr(struct page *page)
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+/*
+ * page_private can be used on tail pages. However, PagePrivate is only
+ * checked by the VM on the head page. So page_private on the tail pages
+ * should be used for data that's ancillary to the head page (eg attaching
+ * buffer heads to tail pages after attaching buffer heads to the head page)
+ */
#define page_private(page) ((page)->private)
-#define set_page_private(page, v) ((page)->private = (v))
+
+static inline void set_page_private(struct page *page, unsigned long private)
+{
+ page->private = private;
+}
+
+static inline void *folio_get_private(struct folio *folio)
+{
+ return folio->private;
+}
struct page_frag_cache {
void * va;
@@ -283,9 +429,15 @@ struct vm_userfaultfd_ctx {
struct vm_userfaultfd_ctx {};
#endif /* CONFIG_USERFAULTFD */
+struct anon_vma_name {
+ struct kref kref;
+ /* The name needs to be at the end because it is dynamically sized. */
+ char name[];
+};
+
/*
- * This struct defines a memory VMM memory area. There is one of these
- * per VM-area/task. A VM area is any part of the process virtual memory
+ * This struct describes a virtual memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
*/
@@ -296,21 +448,6 @@ struct vm_area_struct {
unsigned long vm_end; /* The first byte after our end address
within vm_mm. */
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next, *vm_prev;
-
- struct rb_node vm_rb;
-
- /*
- * Largest free memory gap in bytes to the left of this VMA.
- * Either between this VMA and vma->vm_prev, or between one of the
- * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
- * get_unmapped_area find a free area of the right size.
- */
- unsigned long rb_subtree_gap;
-
- /* Second cache line starts here. */
-
struct mm_struct *vm_mm; /* The address space we belong to. */
/*
@@ -323,11 +460,22 @@ struct vm_area_struct {
/*
* For areas with an address space and backing store,
* linkage into the address_space->i_mmap interval tree.
+ *
+ * For private anonymous mappings, a pointer to a null terminated string
+ * containing the name given to the vma, or NULL if unnamed.
*/
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } shared;
+
+ union {
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } shared;
+ /*
+ * Serialized by mmap_sem. Never use directly because it is
+ * valid only when vm_file is NULL. Use anon_vma_name instead.
+ */
+ struct anon_vma_name *anon_name;
+ };
/*
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
@@ -335,7 +483,7 @@ struct vm_area_struct {
* can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
* or brk vma (with NULL file) can only be in an anon_vma list.
*/
- struct list_head anon_vma_chain; /* Serialized by mmap_sem &
+ struct list_head anon_vma_chain; /* Serialized by mmap_lock &
* page_table_lock */
struct anon_vma *anon_vma; /* Serialized by page_table_lock */
@@ -360,23 +508,10 @@ struct vm_area_struct {
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
-struct core_thread {
- struct task_struct *task;
- struct core_thread *next;
-};
-
-struct core_state {
- atomic_t nr_threads;
- struct core_thread dumper;
- struct completion startup;
-};
-
struct kioctx_table;
struct mm_struct {
struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u64 vmacache_seqnum; /* per-thread vmacache */
+ struct maple_tree mm_mt;
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
@@ -385,12 +520,11 @@ struct mm_struct {
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
+ /* Base addresses for compatible mmap() */
unsigned long mmap_compat_base;
unsigned long mmap_compat_legacy_base;
#endif
unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
#ifdef CONFIG_MEMBARRIER
@@ -431,7 +565,19 @@ struct mm_struct {
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
- struct rw_semaphore mmap_sem;
+ /*
+ * With some kernel config, the current mmap_lock's offset
+ * inside 'mm_struct' is at 0x120, which is very optimal, as
+ * its two hot fields 'count' and 'owner' sit in 2 different
+ * cachelines, and when mmap_lock is highly contended, both
+ * of the 2 fields will be accessed frequently, current layout
+ * will help to reduce cache bouncing.
+ *
+ * So please be careful with adding new fields before
+ * mmap_lock, which can easily push the 2 fields into one
+ * cacheline.
+ */
+ struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
* are globally strung together off
@@ -451,7 +597,15 @@ struct mm_struct {
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
+ /**
+ * @write_protect_seq: Locked when any thread is write
+ * protecting pages mapped by this mm to enforce a later COW,
+ * for instance during page table copying for fork().
+ */
+ seqcount_t write_protect_seq;
+
spinlock_t arg_lock; /* protect the below fields */
+
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
@@ -471,8 +625,6 @@ struct mm_struct {
unsigned long flags; /* Must use atomic bitops to access */
- struct core_state *core_state; /* coredumping support */
-
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
struct kioctx_table __rcu *ioctx_table;
@@ -502,33 +654,68 @@ struct mm_struct {
#endif
#ifdef CONFIG_NUMA_BALANCING
/*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and
- * migrate pages to new nodes if necessary.
+ * numa_next_scan is the next time that PTEs will be remapped
+ * PROT_NONE to trigger NUMA hinting faults; such faults gather
+ * statistics and migrate pages to new nodes if necessary.
*/
unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
+ /* Restart point for scanning and remapping PTEs. */
unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
+ /* numa_scan_seq prevents two threads remapping PTEs. */
int numa_scan_seq;
#endif
/*
* An operation with batched TLB flushing is going on. Anything
* that can move process memory needs to flush the TLB when
- * moving a PROT_NONE or PROT_NUMA mapped page.
+ * moving a PROT_NONE mapped page.
*/
atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/* See flush_tlb_batched_pending() */
- bool tlb_flush_batched;
+ atomic_t tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
+#ifdef CONFIG_PREEMPT_RT
+ struct rcu_head delayed_drop;
+#endif
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
+
+#ifdef CONFIG_IOMMU_SVA
+ u32 pasid;
+#endif
+#ifdef CONFIG_KSM
+ /*
+ * Represent how many pages of this process are involved in KSM
+ * merging.
+ */
+ unsigned long ksm_merging_pages;
+ /*
+ * Represent how many pages are checked for ksm merging
+ * including merged and not merged.
+ */
+ unsigned long ksm_rmap_items;
+#endif
+#ifdef CONFIG_LRU_GEN
+ struct {
+ /* this mm_struct is on lru_gen_mm_list */
+ struct list_head list;
+ /*
+ * Set when switching to this mm_struct, as a hint of
+ * whether it has been used since the last time per-node
+ * page table walkers cleared the corresponding bits.
+ */
+ unsigned long bitmap;
+#ifdef CONFIG_MEMCG
+ /* points to the memcg of "owner" above */
+ struct mem_cgroup *memcg;
+#endif
+ } lru_gen;
+#endif /* CONFIG_LRU_GEN */
} __randomize_layout;
/*
@@ -538,6 +725,7 @@ struct mm_struct {
unsigned long cpu_bitmap[];
};
+#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN)
extern struct mm_struct init_mm;
/* Pointer magic because the dynamic array size confuses some compilers. */
@@ -555,96 +743,92 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return (struct cpumask *)&mm->cpu_bitmap;
}
-struct mmu_gather;
-extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end);
-extern void tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end);
+#ifdef CONFIG_LRU_GEN
-static inline void init_tlb_flush_pending(struct mm_struct *mm)
+struct lru_gen_mm_list {
+ /* mm_struct list for page table walkers */
+ struct list_head fifo;
+ /* protects the list above */
+ spinlock_t lock;
+};
+
+void lru_gen_add_mm(struct mm_struct *mm);
+void lru_gen_del_mm(struct mm_struct *mm);
+#ifdef CONFIG_MEMCG
+void lru_gen_migrate_mm(struct mm_struct *mm);
+#endif
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
{
- atomic_set(&mm->tlb_flush_pending, 0);
+ INIT_LIST_HEAD(&mm->lru_gen.list);
+ mm->lru_gen.bitmap = 0;
+#ifdef CONFIG_MEMCG
+ mm->lru_gen.memcg = NULL;
+#endif
}
-static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+static inline void lru_gen_use_mm(struct mm_struct *mm)
{
- atomic_inc(&mm->tlb_flush_pending);
/*
- * The only time this value is relevant is when there are indeed pages
- * to flush. And we'll only flush pages after changing them, which
- * requires the PTL.
- *
- * So the ordering here is:
- *
- * atomic_inc(&mm->tlb_flush_pending);
- * spin_lock(&ptl);
- * ...
- * set_pte_at();
- * spin_unlock(&ptl);
- *
- * spin_lock(&ptl)
- * mm_tlb_flush_pending();
- * ....
- * spin_unlock(&ptl);
- *
- * flush_tlb_range();
- * atomic_dec(&mm->tlb_flush_pending);
- *
- * Where the increment if constrained by the PTL unlock, it thus
- * ensures that the increment is visible if the PTE modification is
- * visible. After all, if there is no PTE modification, nobody cares
- * about TLB flushes either.
- *
- * This very much relies on users (mm_tlb_flush_pending() and
- * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
- * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
- * locks (PPC) the unlock of one doesn't order against the lock of
- * another PTL.
- *
- * The decrement is ordered by the flush_tlb_range(), such that
- * mm_tlb_flush_pending() will not return false unless all flushes have
- * completed.
+ * When the bitmap is set, page reclaim knows this mm_struct has been
+ * used since the last time it cleared the bitmap. So it might be worth
+ * walking the page tables of this mm_struct to clear the accessed bit.
*/
+ WRITE_ONCE(mm->lru_gen.bitmap, -1);
}
-static inline void dec_tlb_flush_pending(struct mm_struct *mm)
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_add_mm(struct mm_struct *mm)
{
- /*
- * See inc_tlb_flush_pending().
- *
- * This cannot be smp_mb__before_atomic() because smp_mb() simply does
- * not order against TLB invalidate completion, which is what we need.
- *
- * Therefore we must rely on tlb_flush_*() to guarantee order.
- */
- atomic_dec(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+static inline void lru_gen_del_mm(struct mm_struct *mm)
{
- /*
- * Must be called after having acquired the PTL; orders against that
- * PTLs release and therefore ensures that if we observe the modified
- * PTE we must also observe the increment from inc_tlb_flush_pending().
- *
- * That is, it only guarantees to return true if there is a flush
- * pending for _this_ PTL.
- */
- return atomic_read(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+#ifdef CONFIG_MEMCG
+static inline void lru_gen_migrate_mm(struct mm_struct *mm)
{
- /*
- * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
- * for which there is a TLB flush pending in order to guarantee
- * we've seen both that PTE modification and the increment.
- *
- * (no requirement on actually still holding the PTL, that is irrelevant)
- */
- return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+#endif
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
+{
+}
+
+static inline void lru_gen_use_mm(struct mm_struct *mm)
+{
+}
+
+#endif /* CONFIG_LRU_GEN */
+
+struct vma_iterator {
+ struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, __mm, __addr) \
+ struct vma_iterator name = { \
+ .mas = { \
+ .tree = &(__mm)->mm_mt, \
+ .index = __addr, \
+ .node = MAS_START, \
+ }, \
+ }
+
+static inline void vma_iter_init(struct vma_iterator *vmi,
+ struct mm_struct *mm, unsigned long addr)
+{
+ vmi->mas.tree = &mm->mm_mt;
+ vmi->mas.index = addr;
+ vmi->mas.node = MAS_START;
}
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_finish_mmu(struct mmu_gather *tlb);
+
struct vm_fault;
/**
@@ -676,6 +860,7 @@ typedef __bitwise unsigned int vm_fault_t;
* @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs
* fsync() to complete (for synchronous page faults
* in DAX)
+ * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released
* @VM_FAULT_HINDEX_MASK: mask HINDEX value
*
*/
@@ -693,6 +878,7 @@ enum vm_fault_reason {
VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
+ VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000,
VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
};
@@ -759,4 +945,62 @@ typedef struct {
unsigned long val;
} swp_entry_t;
+/**
+ * enum fault_flag - Fault flag definitions.
+ * @FAULT_FLAG_WRITE: Fault was a write fault.
+ * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
+ * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
+ * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
+ * @FAULT_FLAG_TRIED: The fault has been tried once.
+ * @FAULT_FLAG_USER: The fault originated in userspace.
+ * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
+ * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
+ * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
+ * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to unshare (and mark
+ * exclusive) a possibly shared anonymous page that is
+ * mapped R/O.
+ * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
+ * We should only access orig_pte if this flag set.
+ *
+ * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
+ * whether we would allow page faults to retry by specifying these two
+ * fault flags correctly. Currently there can be three legal combinations:
+ *
+ * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
+ * this is the first try
+ *
+ * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
+ * we've already tried at least once
+ *
+ * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
+ *
+ * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
+ * be used. Note that page faults can be allowed to retry for multiple times,
+ * in which case we'll have an initial fault with flags (a) then later on
+ * continuous faults with flags (b). We should always try to detect pending
+ * signals before a retry to make sure the continuous page faults can still be
+ * interrupted if necessary.
+ *
+ * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
+ * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
+ * no existing R/O-mapped anonymous page is encountered.
+ */
+enum fault_flag {
+ FAULT_FLAG_WRITE = 1 << 0,
+ FAULT_FLAG_MKWRITE = 1 << 1,
+ FAULT_FLAG_ALLOW_RETRY = 1 << 2,
+ FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
+ FAULT_FLAG_KILLABLE = 1 << 4,
+ FAULT_FLAG_TRIED = 1 << 5,
+ FAULT_FLAG_USER = 1 << 6,
+ FAULT_FLAG_REMOTE = 1 << 7,
+ FAULT_FLAG_INSTRUCTION = 1 << 8,
+ FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
+ FAULT_FLAG_UNSHARE = 1 << 10,
+ FAULT_FLAG_ORIG_PTE_VALID = 1 << 11,
+};
+
+typedef unsigned int __bitwise zap_flags_t;
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index c1bc6731125c..0bb4b6da9993 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -25,18 +25,6 @@
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
- * The per task VMA cache array:
- */
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
-struct vmacache {
- u64 seqnum;
- struct vm_area_struct *vmas[VMACACHE_SIZE];
-};
-
-/*
* When updating this, please also update struct resident_page_types[] in
* kernel/fork.c
*/
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 4b08e9c9c538..58b3abd457a3 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -31,6 +31,9 @@
/*
* The historical set of flags that all mmap implementations implicitly
* support when a ->mmap_validate() op is not provided in file_operations.
+ *
+ * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the
+ * kernel.
*/
#define LEGACY_MAP_MASK (MAP_SHARED \
| MAP_PRIVATE \
@@ -57,8 +60,12 @@ extern struct percpu_counter vm_committed_as;
#ifdef CONFIG_SMP
extern s32 vm_committed_as_batch;
+extern void mm_compute_batch(int overcommit_policy);
#else
#define vm_committed_as_batch 0
+static inline void mm_compute_batch(int overcommit_policy)
+{
+}
#endif
unsigned long vm_memory_committed(void);
@@ -74,15 +81,16 @@ static inline void vm_unacct_memory(long pages)
}
/*
- * Allow architectures to handle additional protection bits
+ * Allow architectures to handle additional protection and flag bits. The
+ * overriding macros must be defined in the arch-specific asm/mman.h file.
*/
#ifndef arch_calc_vm_prot_bits
#define arch_calc_vm_prot_bits(prot, pkey) 0
#endif
-#ifndef arch_vm_get_page_prot
-#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
+#ifndef arch_calc_vm_flag_bits
+#define arch_calc_vm_flag_bits(flags) 0
#endif
#ifndef arch_validate_prot
@@ -99,6 +107,19 @@ static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
#define arch_validate_prot arch_validate_prot
#endif
+#ifndef arch_validate_flags
+/*
+ * This is called from mmap() and mprotect() with the updated vma->vm_flags.
+ *
+ * Returns true if the VM_* flags are valid.
+ */
+static inline bool arch_validate_flags(unsigned long flags)
+{
+ return true;
+}
+#define arch_validate_flags arch_validate_flags
+#endif
+
/*
* Optimisation macro. It is equivalent to:
* (x & bit1) ? bit2 : 0
@@ -129,9 +150,9 @@ static inline unsigned long
calc_vm_flag_bits(unsigned long flags)
{
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
- _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
- _calc_vm_trans(flags, MAP_SYNC, VM_SYNC );
+ _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
+ arch_calc_vm_flag_bits(flags);
}
unsigned long vm_commit_limit(void);
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
new file mode 100644
index 000000000000..96e113e23d04
--- /dev/null
+++ b/include/linux/mmap_lock.h
@@ -0,0 +1,170 @@
+#ifndef _LINUX_MMAP_LOCK_H
+#define _LINUX_MMAP_LOCK_H
+
+#include <linux/lockdep.h>
+#include <linux/mm_types.h>
+#include <linux/mmdebug.h>
+#include <linux/rwsem.h>
+#include <linux/tracepoint-defs.h>
+#include <linux/types.h>
+
+#define MMAP_LOCK_INITIALIZER(name) \
+ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
+
+DECLARE_TRACEPOINT(mmap_lock_start_locking);
+DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
+DECLARE_TRACEPOINT(mmap_lock_released);
+
+#ifdef CONFIG_TRACING
+
+void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
+void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
+ bool success);
+void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
+
+static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
+ bool write)
+{
+ if (tracepoint_enabled(mmap_lock_start_locking))
+ __mmap_lock_do_trace_start_locking(mm, write);
+}
+
+static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
+ bool write, bool success)
+{
+ if (tracepoint_enabled(mmap_lock_acquire_returned))
+ __mmap_lock_do_trace_acquire_returned(mm, write, success);
+}
+
+static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
+{
+ if (tracepoint_enabled(mmap_lock_released))
+ __mmap_lock_do_trace_released(mm, write);
+}
+
+#else /* !CONFIG_TRACING */
+
+static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
+ bool write)
+{
+}
+
+static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
+ bool write, bool success)
+{
+}
+
+static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
+{
+}
+
+#endif /* CONFIG_TRACING */
+
+static inline void mmap_init_lock(struct mm_struct *mm)
+{
+ init_rwsem(&mm->mmap_lock);
+}
+
+static inline void mmap_write_lock(struct mm_struct *mm)
+{
+ __mmap_lock_trace_start_locking(mm, true);
+ down_write(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
+}
+
+static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+{
+ __mmap_lock_trace_start_locking(mm, true);
+ down_write_nested(&mm->mmap_lock, subclass);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
+}
+
+static inline int mmap_write_lock_killable(struct mm_struct *mm)
+{
+ int ret;
+
+ __mmap_lock_trace_start_locking(mm, true);
+ ret = down_write_killable(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
+ return ret;
+}
+
+static inline bool mmap_write_trylock(struct mm_struct *mm)
+{
+ bool ret;
+
+ __mmap_lock_trace_start_locking(mm, true);
+ ret = down_write_trylock(&mm->mmap_lock) != 0;
+ __mmap_lock_trace_acquire_returned(mm, true, ret);
+ return ret;
+}
+
+static inline void mmap_write_unlock(struct mm_struct *mm)
+{
+ __mmap_lock_trace_released(mm, true);
+ up_write(&mm->mmap_lock);
+}
+
+static inline void mmap_write_downgrade(struct mm_struct *mm)
+{
+ __mmap_lock_trace_acquire_returned(mm, false, true);
+ downgrade_write(&mm->mmap_lock);
+}
+
+static inline void mmap_read_lock(struct mm_struct *mm)
+{
+ __mmap_lock_trace_start_locking(mm, false);
+ down_read(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, false, true);
+}
+
+static inline int mmap_read_lock_killable(struct mm_struct *mm)
+{
+ int ret;
+
+ __mmap_lock_trace_start_locking(mm, false);
+ ret = down_read_killable(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
+ return ret;
+}
+
+static inline bool mmap_read_trylock(struct mm_struct *mm)
+{
+ bool ret;
+
+ __mmap_lock_trace_start_locking(mm, false);
+ ret = down_read_trylock(&mm->mmap_lock) != 0;
+ __mmap_lock_trace_acquire_returned(mm, false, ret);
+ return ret;
+}
+
+static inline void mmap_read_unlock(struct mm_struct *mm)
+{
+ __mmap_lock_trace_released(mm, false);
+ up_read(&mm->mmap_lock);
+}
+
+static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
+{
+ __mmap_lock_trace_released(mm, false);
+ up_read_non_owner(&mm->mmap_lock);
+}
+
+static inline void mmap_assert_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+static inline void mmap_assert_write_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held_write(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+static inline int mmap_lock_is_contended(struct mm_struct *mm)
+{
+ return rwsem_is_contended(&mm->mmap_lock);
+}
+
+#endif /* _LINUX_MMAP_LOCK_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index cf3780a6ccc4..c726ea781255 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -48,6 +48,7 @@ struct mmc_ext_csd {
u8 sec_feature_support;
u8 rel_sectors;
u8 rel_param;
+ bool enhanced_rpmb_supported;
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
@@ -108,6 +109,7 @@ struct mmc_ext_csd {
u8 raw_hc_erase_gap_size; /* 221 */
u8 raw_erase_timeout_mult; /* 223 */
u8 raw_hc_erase_grp_size; /* 224 */
+ u8 raw_boot_mult; /* 226 */
u8 raw_sec_trim_mult; /* 229 */
u8 raw_sec_erase_mult; /* 230 */
u8 raw_sec_feature_support;/* 231 */
@@ -138,6 +140,8 @@ struct sd_scr {
unsigned char cmds;
#define SD_SCR_CMD20_SUPPORT (1<<0)
#define SD_SCR_CMD23_SUPPORT (1<<1)
+#define SD_SCR_CMD48_SUPPORT (1<<2)
+#define SD_SCR_CMD58_SUPPORT (1<<3)
};
struct sd_ssr {
@@ -188,6 +192,25 @@ struct sd_switch_caps {
#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
};
+struct sd_ext_reg {
+ u8 fno;
+ u8 page;
+ u16 offset;
+ u8 rev;
+ u8 feature_enabled;
+ u8 feature_support;
+/* Power Management Function. */
+#define SD_EXT_POWER_OFF_NOTIFY (1<<0)
+#define SD_EXT_POWER_SUSTENANCE (1<<1)
+#define SD_EXT_POWER_DOWN_MODE (1<<2)
+/* Performance Enhancement Function. */
+#define SD_EXT_PERF_FX_EVENT (1<<0)
+#define SD_EXT_PERF_CARD_MAINT (1<<1)
+#define SD_EXT_PERF_HOST_MAINT (1<<2)
+#define SD_EXT_PERF_CACHE (1<<3)
+#define SD_EXT_PERF_CMD_QUEUE (1<<4)
+};
+
struct sdio_cccr {
unsigned int sdio_vsn;
unsigned int sd_vsn;
@@ -196,7 +219,8 @@ struct sdio_cccr {
wide_bus:1,
high_power:1,
high_speed:1,
- disable_cd:1;
+ disable_cd:1,
+ enable_async_irq:1;
};
struct sdio_cis {
@@ -269,6 +293,7 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
bool reenable_cmdq; /* Re-enable Command Queue */
@@ -289,6 +314,8 @@ struct mmc_card {
struct sd_scr scr; /* extra SD information */
struct sd_ssr ssr; /* yet more SD information */
struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
+ struct sd_ext_reg ext_power; /* SD extension reg for PM */
+ struct sd_ext_reg ext_perf; /* SD extension reg for PERF */
unsigned int sdio_funcs; /* number of SDIO functions */
atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
@@ -296,6 +323,8 @@ struct mmc_card {
struct sdio_cis cis; /* common tuple info */
struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */
+ u8 major_rev; /* major revision number */
+ u8 minor_rev; /* minor revision number */
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
struct sdio_func_tuple *tuples; /* unknown common tuples */
@@ -308,7 +337,6 @@ struct mmc_card {
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
- unsigned int bouncesz; /* Bounce buffer size */
struct workqueue_struct *complete_wq; /* Private workqueue */
};
@@ -317,10 +345,16 @@ static inline bool mmc_large_sector(struct mmc_card *card)
return card->ext_csd.data_sector_size == 4096;
}
+static inline int mmc_card_enable_async_irq(struct mmc_card *card)
+{
+ return card->cccr.enable_async_irq;
+}
+
bool mmc_card_is_blockaddr(struct mmc_card *card);
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
+#define mmc_card_sd_combo(c) ((c)->type == MMC_TYPE_SD_COMBO)
#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b7ba8810a3b5..6efec0b9820c 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -107,9 +107,6 @@ struct mmc_command {
*/
unsigned int busy_timeout; /* busy detect timeout in ms */
- /* Set this flag only for blocking sanitize request */
- bool sanitize_busy;
-
struct mmc_data *data; /* data segment associated with cmd */
struct mmc_request *mrq; /* associated request */
};
@@ -165,6 +162,11 @@ struct mmc_request {
bool cap_cmd_during_tfr;
int tag;
+
+#ifdef CONFIG_MMC_CRYPTO
+ const struct bio_crypt_ctx *crypto_ctx;
+ int crypto_key_slot;
+#endif
};
struct mmc_card;
@@ -173,8 +175,8 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
int retries);
-int mmc_hw_reset(struct mmc_host *host);
-int mmc_sw_reset(struct mmc_host *host);
+int mmc_hw_reset(struct mmc_card *card);
+int mmc_sw_reset(struct mmc_card *card);
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4c5eb3aa8e72..8fdd3cf971a3 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -15,6 +15,7 @@
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
#include <linux/dma-direction.h>
+#include <linux/blk-crypto-profile.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
@@ -60,6 +61,8 @@ struct mmc_ios {
#define MMC_TIMING_MMC_DDR52 8
#define MMC_TIMING_MMC_HS200 9
#define MMC_TIMING_MMC_HS400 10
+#define MMC_TIMING_SD_EXP 11
+#define MMC_TIMING_SD_EXP_1_2V 12
unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
@@ -77,8 +80,38 @@ struct mmc_ios {
bool enhanced_strobe; /* hs400es selection */
};
+struct mmc_clk_phase {
+ bool valid;
+ u16 in_deg;
+ u16 out_deg;
+};
+
+#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1)
+struct mmc_clk_phase_map {
+ struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES];
+};
+
struct mmc_host;
+enum mmc_err_stat {
+ MMC_ERR_CMD_TIMEOUT,
+ MMC_ERR_CMD_CRC,
+ MMC_ERR_DAT_TIMEOUT,
+ MMC_ERR_DAT_CRC,
+ MMC_ERR_AUTO_CMD,
+ MMC_ERR_ADMA,
+ MMC_ERR_TUNING,
+ MMC_ERR_CMDQ_RED,
+ MMC_ERR_CMDQ_GCE,
+ MMC_ERR_CMDQ_ICCE,
+ MMC_ERR_REQ_TIMEOUT,
+ MMC_ERR_CMDQ_REQ_TIMEOUT,
+ MMC_ERR_ICE_CFG,
+ MMC_ERR_CTRL_TIMEOUT,
+ MMC_ERR_UNEXPECTED_IRQ,
+ MMC_ERR_MAX,
+};
+
struct mmc_host_ops {
/*
* It is optional for the host to implement pre_req and post_req in
@@ -92,6 +125,9 @@ struct mmc_host_ops {
int err);
void (*pre_req)(struct mmc_host *host, struct mmc_request *req);
void (*request)(struct mmc_host *host, struct mmc_request *req);
+ /* Submit one request to host in atomic context. */
+ int (*request_atomic)(struct mmc_host *host,
+ struct mmc_request *req);
/*
* Avoid calling the next three functions too often or in a "fast
@@ -136,7 +172,7 @@ struct mmc_host_ops {
int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
- /* Check if the card is pulling dat[0:3] low */
+ /* Check if the card is pulling dat[0] low */
int (*card_busy)(struct mmc_host *host);
/* The tuning command opcode value is different for SD and eMMC cards */
@@ -145,6 +181,9 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+ /* Execute HS400 tuning depending host driver */
+ int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card);
+
/* Prepare switch to DDR during the HS400 init sequence */
int (*hs400_prepare_ddr)(struct mmc_host *host);
@@ -160,7 +199,8 @@ struct mmc_host_ops {
int (*select_drive_strength)(struct mmc_card *card,
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type);
- void (*hw_reset)(struct mmc_host *host);
+ /* Reset the eMMC card via RST_n */
+ void (*card_hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
/*
@@ -169,6 +209,9 @@ struct mmc_host_ops {
*/
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
+
+ /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */
+ int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios);
};
struct mmc_cqe_ops {
@@ -281,9 +324,7 @@ struct mmc_host {
u32 ocr_avail_sdio; /* SDIO-specific OCR */
u32 ocr_avail_sd; /* SD-specific OCR */
u32 ocr_avail_mmc; /* MMC-specific OCR */
-#ifdef CONFIG_PM_SLEEP
- struct notifier_block pm_notify;
-#endif
+ struct wakeup_source *ws; /* Enable consume of uevents */
u32 max_current_330;
u32 max_current_300;
u32 max_current_180;
@@ -318,10 +359,11 @@ struct mmc_host {
#define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */
#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
-#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */
#define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */
#define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */
#define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */
+#define MMC_CAP_DDR (MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | \
+ MMC_CAP_1_2V_DDR)
#define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */
#define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */
#define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */
@@ -341,16 +383,19 @@ struct mmc_host {
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
-#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
+#define MMC_CAP_HW_RESET (1 << 31) /* Reset the eMMC card via RST_n */
u32 caps2; /* More host capabilities */
#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
+#define MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND (1 << 3) /* Can do full power cycle in suspend */
#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
MMC_CAP2_HS200_1_2V_SDR)
+#define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */
+#define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */
#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
@@ -370,6 +415,12 @@ struct mmc_host {
#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */
+#ifdef CONFIG_MMC_CRYPTO
+#define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */
+#else
+#define MMC_CAP2_CRYPTO 0
+#endif
+#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */
int fixed_drv_type; /* fixed driver type for non-removable media */
@@ -392,12 +443,11 @@ struct mmc_host {
/* group bitfields together to minimize padding */
unsigned int use_spi_crc:1;
unsigned int claimed:1; /* host exclusively claimed */
- unsigned int bus_dead:1; /* bus has been released */
+ unsigned int doing_init_tune:1; /* initial tuning in progress */
unsigned int can_retune:1; /* re-tuning can be used */
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
- unsigned int use_blk_mq:1; /* use blk-mq */
unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge:1; /* merging can be used */
@@ -423,11 +473,10 @@ struct mmc_host {
struct mmc_slot slot;
const struct mmc_bus_ops *bus_ops; /* current bus driver */
- unsigned int bus_refs; /* reference counter */
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
- struct delayed_work sdio_irq_work;
+ struct work_struct sdio_irq_work;
bool sdio_irq_pending;
atomic_t sdio_irq_thread_abort;
@@ -463,7 +512,16 @@ struct mmc_host {
bool cqe_enabled;
bool cqe_on;
- unsigned long private[0] ____cacheline_aligned;
+ /* Inline encryption support */
+#ifdef CONFIG_MMC_CRYPTO
+ struct blk_crypto_profile crypto_profile;
+#endif
+
+ /* Host Software Queue support */
+ bool hsq_enabled;
+
+ u32 err_stats[MMC_ERR_MAX];
+ unsigned long private[] ____cacheline_aligned;
};
struct device_node;
@@ -472,8 +530,10 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *);
int mmc_add_host(struct mmc_host *);
void mmc_remove_host(struct mmc_host *);
void mmc_free_host(struct mmc_host *);
+void mmc_of_parse_clk_phase(struct mmc_host *host,
+ struct mmc_clk_phase_map *map);
int mmc_of_parse(struct mmc_host *host);
-int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
+int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask);
static inline void *mmc_priv(struct mmc_host *host)
{
@@ -585,12 +645,24 @@ static inline bool mmc_doing_retune(struct mmc_host *host)
return host->doing_retune == 1;
}
+static inline bool mmc_doing_tune(struct mmc_host *host)
+{
+ return host->doing_retune == 1 || host->doing_init_tune == 1;
+}
+
static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
{
return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
+static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
+ enum mmc_err_stat stat)
+{
+ host->err_stats[stat] += 1;
+}
+
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 897a87c4c827..9c50bc40f8ff 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -99,6 +99,12 @@ static inline bool mmc_op_multi(u32 opcode)
opcode == MMC_READ_MULTIPLE_BLOCK;
}
+static inline bool mmc_op_tuning(u32 opcode)
+{
+ return opcode == MMC_SEND_TUNING_BLOCK ||
+ opcode == MMC_SEND_TUNING_BLOCK_HS200;
+}
+
/*
* MMC_SWITCH argument format:
*
@@ -161,6 +167,16 @@ static inline bool mmc_op_multi(u32 opcode)
#define R1_STATE_PRG 7
#define R1_STATE_DIS 8
+static inline bool mmc_ready_for_data(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ R1_CURRENT_STATE(status) == R1_STATE_TRAN;
+}
+
/*
* MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS
* R1 is the low order byte; R2 is the next highest byte, when present.
@@ -315,6 +331,7 @@ static inline bool mmc_op_multi(u32 opcode)
*/
#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
+#define EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR (1<<4)
#define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40)
#define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10)
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 2236aa540faa..6727576a8755 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -29,6 +29,10 @@
#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
#define SD_APP_SEND_SCR 51 /* adtc R1 */
+ /* class 11 */
+#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */
+#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */
+
/* OCR bit definitions */
#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
deleted file mode 100644
index 1d42872d22f3..000000000000
--- a/include/linux/mmc/sdhci-pci-data.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_MMC_SDHCI_PCI_DATA_H
-#define LINUX_MMC_SDHCI_PCI_DATA_H
-
-struct pci_dev;
-
-struct sdhci_pci_data {
- struct pci_dev *pdev;
- int slotno;
- int rst_n_gpio; /* Set to -EINVAL if unused */
- int cd_gpio; /* Set to -EINVAL if unused */
- int (*setup)(struct sdhci_pci_data *data);
- void (*cleanup)(struct sdhci_pci_data *data);
-};
-
-extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
- int slotno);
-#endif
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index e28769991e82..1ef400f28642 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -82,7 +82,7 @@
#define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */
#define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */
#define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */
-#define SDIO_SD_REV_3_00 3 /* SD Physical Spev Version 3.00 */
+#define SDIO_SD_REV_3_00 3 /* SD Physical Spec Version 3.00 */
#define SDIO_CCCR_IOEx 0x02
#define SDIO_CCCR_IORx 0x03
@@ -159,6 +159,11 @@
#define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT)
+
+#define SDIO_CCCR_INTERRUPT_EXT 0x16
+#define SDIO_INTERRUPT_EXT_SAI (1 << 0)
+#define SDIO_INTERRUPT_EXT_EAI (1 << 1)
+
/*
* Function Basic Registers (FBR)
*/
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 5a177f7a83c3..478855b8e406 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -25,7 +25,7 @@ struct sdio_func_tuple {
struct sdio_func_tuple *next;
unsigned char code;
unsigned char size;
- unsigned char data[0];
+ unsigned char data[];
};
/*
@@ -51,6 +51,8 @@ struct sdio_func {
u8 *tmpbuf; /* DMA:able scratch buffer */
+ u8 major_rev; /* major revision number */
+ u8 minor_rev; /* minor revision number */
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 2e9a6e4634eb..74f9d9a6d330 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -24,59 +24,111 @@
/*
* Vendors and devices. Sort key: vendor first, device next.
*/
+
+#define SDIO_VENDOR_ID_STE 0x0020
+#define SDIO_DEVICE_ID_STE_CW1200 0x2280
+
+#define SDIO_VENDOR_ID_INTEL 0x0089
+#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
+#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403
+#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
+#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405
+#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406
+#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407
+
+#define SDIO_VENDOR_ID_CGUYS 0x0092
+#define SDIO_DEVICE_ID_CGUYS_EW_CG1102GC 0x0004
+
+#define SDIO_VENDOR_ID_TI 0x0097
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+
+#define SDIO_VENDOR_ID_ATHEROS 0x0271
+#define SDIO_DEVICE_ID_ATHEROS_AR6003_00 0x0300
+#define SDIO_DEVICE_ID_ATHEROS_AR6003_01 0x0301
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_00 0x0400
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_01 0x0401
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_02 0x0402
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_18 0x0418
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_19 0x0419
+#define SDIO_DEVICE_ID_ATHEROS_AR6005 0x050A
+#define SDIO_DEVICE_ID_ATHEROS_QCA9377 0x0701
+
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887
+#define SDIO_DEVICE_ID_BROADCOM_NINTENDO_WII 0x044b
#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
-#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
-#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
-#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
-#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
-#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
-#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359 0x4355
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
#define SDIO_DEVICE_ID_BROADCOM_4359 0x4359
-#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373
-#define SDIO_DEVICE_ID_CYPRESS_43012 43012
-#define SDIO_DEVICE_ID_CYPRESS_89359 0x4355
-
-#define SDIO_VENDOR_ID_INTEL 0x0089
-#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
-#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403
-#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
-#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405
-#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406
-#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373 0x4373
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012 0xa804
+#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887
+#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
+#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
+#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
+#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xa9af
+#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
#define SDIO_VENDOR_ID_MARVELL 0x02df
#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
-#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104
-#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105
+#define SDIO_DEVICE_ID_MARVELL_8688_WLAN 0x9104
+#define SDIO_DEVICE_ID_MARVELL_8688_BT 0x9105
+#define SDIO_DEVICE_ID_MARVELL_8786_WLAN 0x9116
+#define SDIO_DEVICE_ID_MARVELL_8787_WLAN 0x9119
+#define SDIO_DEVICE_ID_MARVELL_8787_BT 0x911a
+#define SDIO_DEVICE_ID_MARVELL_8787_BT_AMP 0x911b
#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
-#define SDIO_DEVICE_ID_MARVELL_8887WLAN 0x9134
+#define SDIO_DEVICE_ID_MARVELL_8797_WLAN 0x9129
+#define SDIO_DEVICE_ID_MARVELL_8797_BT 0x912a
+#define SDIO_DEVICE_ID_MARVELL_8897_WLAN 0x912d
+#define SDIO_DEVICE_ID_MARVELL_8897_BT 0x912e
+#define SDIO_DEVICE_ID_MARVELL_8887_F0 0x9134
+#define SDIO_DEVICE_ID_MARVELL_8887_WLAN 0x9135
+#define SDIO_DEVICE_ID_MARVELL_8887_BT 0x9136
+#define SDIO_DEVICE_ID_MARVELL_8801_WLAN 0x9139
+#define SDIO_DEVICE_ID_MARVELL_8997_F0 0x9140
+#define SDIO_DEVICE_ID_MARVELL_8997_WLAN 0x9141
+#define SDIO_DEVICE_ID_MARVELL_8997_BT 0x9142
+#define SDIO_DEVICE_ID_MARVELL_8977_WLAN 0x9145
+#define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146
+#define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149
+#define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a
#define SDIO_VENDOR_ID_MEDIATEK 0x037a
+#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
+#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668
+#define SDIO_DEVICE_ID_MEDIATEK_MT7961 0x7961
+
+#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
+#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
#define SDIO_DEVICE_ID_SIANO_NICE 0x0202
#define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300
#define SDIO_DEVICE_ID_SIANO_VENICE 0x0301
+#define SDIO_DEVICE_ID_SIANO_MING 0x0302
+#define SDIO_DEVICE_ID_SIANO_PELE 0x0500
+#define SDIO_DEVICE_ID_SIANO_RIO 0x0600
+#define SDIO_DEVICE_ID_SIANO_DENVER_2160 0x0700
+#define SDIO_DEVICE_ID_SIANO_DENVER_1530 0x0800
#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100
#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347
-#define SDIO_VENDOR_ID_TI 0x0097
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#define SDIO_VENDOR_ID_RSI 0x041b
+#define SDIO_DEVICE_ID_RSI_9113 0x9330
+#define SDIO_DEVICE_ID_RSI_9116 0x9116
+
#define SDIO_VENDOR_ID_TI_WL1251 0x104c
#define SDIO_DEVICE_ID_TI_WL1251 0x9066
-#define SDIO_VENDOR_ID_STE 0x0020
-#define SDIO_DEVICE_ID_STE_CW1200 0x2280
-
#endif /* LINUX_MMC_SDIO_IDS_H */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 2ad72d2c8cc5..b8728d11c949 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -9,8 +9,7 @@ struct page;
struct vm_area_struct;
struct mm_struct;
-extern void dump_page(struct page *page, const char *reason);
-extern void __dump_page(struct page *page, const char *reason);
+void dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
@@ -23,6 +22,13 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
+#define VM_BUG_ON_FOLIO(cond, folio) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_page(&folio->page, "VM_BUG_ON_FOLIO(" __stringify(cond)")");\
+ BUG(); \
+ } \
+ } while (0)
#define VM_BUG_ON_VMA(cond, vma) \
do { \
if (unlikely(cond)) { \
@@ -37,6 +43,38 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
+#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#define VM_WARN_ON_FOLIO(cond, folio) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_page(&folio->page, "VM_WARN_ON_FOLIO(" __stringify(cond)")");\
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_page(&folio->page, "VM_WARN_ON_ONCE_FOLIO(" __stringify(cond)")");\
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+
#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
@@ -44,14 +82,24 @@ void dump_mm(const struct mm_struct *mm);
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
+#define VM_BUG_ON_FOLIO(cond, folio) VM_BUG_ON(cond)
#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
+#ifdef CONFIG_DEBUG_VM_IRQSOFF
+#define VM_WARN_ON_IRQS_ENABLED() WARN_ON_ONCE(!irqs_disabled())
+#else
+#define VM_WARN_ON_IRQS_ENABLED() do { } while (0)
+#endif
+
#ifdef CONFIG_DEBUG_VIRTUAL
#define VIRTUAL_BUG_ON(cond) BUG_ON(cond)
#else
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index d9a543a9e1cc..b9b970f7ab45 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -3,15 +3,29 @@
#define _LINUX_MMU_CONTEXT_H
#include <asm/mmu_context.h>
-
-struct mm_struct;
-
-void use_mm(struct mm_struct *mm);
-void unuse_mm(struct mm_struct *mm);
+#include <asm/mmu.h>
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
#endif
+#ifndef leave_mm
+static inline void leave_mm(int cpu) { }
+#endif
+
+/*
+ * CPUs that are capable of running user task @p. Must contain at least one
+ * active CPU. It is assumed that the kernel can run on all CPUs, so calling
+ * this for a kernel thread is pointless.
+ *
+ * By default, we assume a sane, homogeneous system.
+ */
+#ifndef task_cpu_possible_mask
+# define task_cpu_possible_mask(p) cpu_possible_mask
+# define task_cpu_possible(cpu, p) true
+#else
+# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
+#endif
+
#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 736f6918335e..d6c06e140277 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/srcu.h>
#include <linux/interval_tree.h>
@@ -32,11 +33,20 @@ struct mmu_interval_notifier;
*
* @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
* access flags). User should soft dirty the page in the end callback to make
- * sure that anyone relying on soft dirtyness catch pages that might be written
+ * sure that anyone relying on soft dirtiness catch pages that might be written
* through non CPU mappings.
*
* @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
* that the mm refcount is zero and the range is no longer accessible.
+ *
+ * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
+ * a device driver to possibly ignore the invalidation if the
+ * owner field matches the driver's device private pgmap owner.
+ *
+ * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
+ * longer have exclusive access to the page. When sent during creation of an
+ * exclusive range the owner will be initialised to the value provided by the
+ * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
@@ -45,6 +55,8 @@ enum mmu_notifier_event {
MMU_NOTIFY_PROTECTION_PAGE,
MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
+ MMU_NOTIFY_MIGRATE,
+ MMU_NOTIFY_EXCLUSIVE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
@@ -121,7 +133,7 @@ struct mmu_notifier_ops {
/*
* invalidate_range_start() and invalidate_range_end() must be
- * paired and are called only when the mmap_sem and/or the
+ * paired and are called only when the mmap_lock and/or the
* locks protecting the reverse maps are held. If the subsystem
* can't guarantee that no additional references are taken to
* the pages in the range, it has to implement the
@@ -155,7 +167,7 @@ struct mmu_notifier_ops {
* decrease the refcount. If the refcount is decreased on
* invalidate_range_start() then the VM can free pages as page
* table entries are removed. If the refcount is only
- * droppped on invalidate_range_end() then the driver itself
+ * dropped on invalidate_range_end() then the driver itself
* will drop the last refcount but it must take care to flush
* any secondary tlb before doing the final free on the
* page. Pages will no longer be referenced by the linux
@@ -163,11 +175,11 @@ struct mmu_notifier_ops {
* the last refcount is dropped.
*
* If blockable argument is set to false then the callback cannot
- * sleep and has to return with -EAGAIN. 0 should be returned
- * otherwise. Please note that if invalidate_range_start approves
- * a non-blocking behavior then the same applies to
- * invalidate_range_end.
- *
+ * sleep and has to return with -EAGAIN if sleeping would be required.
+ * 0 should be returned otherwise. Please note that notifiers that can
+ * fail invalidate_range_start are not allowed to implement
+ * invalidate_range_end, as there is no mechanism for informing the
+ * notifier that its start failed.
*/
int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
@@ -184,9 +196,9 @@ struct mmu_notifier_ops {
* If invalidate_range() is used to manage a non-CPU TLB with
* shared page-tables, it not necessary to implement the
* invalidate_range_start()/end() notifiers, as
- * invalidate_range() alread catches the points in time when an
+ * invalidate_range() already catches the points in time when an
* external TLB range needs to be flushed. For more in depth
- * discussion on this see Documentation/vm/mmu_notifier.rst
+ * discussion on this see Documentation/mm/mmu_notifier.rst
*
* Note that this function might be called with just a sub-range
* of what was passed to invalidate_range_start()/end(), if
@@ -212,13 +224,13 @@ struct mmu_notifier_ops {
};
/*
- * The notifier chains are protected by mmap_sem and/or the reverse map
+ * The notifier chains are protected by mmap_lock and/or the reverse map
* semaphores. Notifier chains are only changed when all reverse maps and
- * the mmap_sem locks are taken.
+ * the mmap_lock locks are taken.
*
* Therefore notifier chains can only be traversed when either
*
- * 1. mmap_sem is held.
+ * 1. mmap_lock is held.
* 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
* 3. No other concurrent thread can access the list (release)
*/
@@ -263,6 +275,7 @@ struct mmu_notifier_range {
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
+ void *owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -277,9 +290,9 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
{
struct mmu_notifier *ret;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
ret = mmu_notifier_get_locked(ops, mm);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
void mmu_notifier_put(struct mmu_notifier *subscription);
@@ -356,7 +369,7 @@ mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
* mmu_interval_read_retry() will return true.
*
* False is not reliable and only suggests a collision may not have
- * occured. It can be called many times and does not have to hold the user
+ * occurred. It can be called many times and does not have to hold the user
* provided lock.
*
* This call can be used as part of loops and other expensive operations to
@@ -514,6 +527,16 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->flags = flags;
}
+static inline void mmu_notifier_range_init_owner(
+ struct mmu_notifier_range *range,
+ enum mmu_notifier_event event, unsigned int flags,
+ struct vm_area_struct *vma, struct mm_struct *mm,
+ unsigned long start, unsigned long end, void *owner)
+{
+ mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
+ range->owner = owner;
+}
+
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
({ \
int __young; \
@@ -638,6 +661,9 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
+#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
+ end, owner) \
+ _mmu_notifier_range_init(range, start, end)
static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 462f6873905a..5f74891556f3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -20,13 +20,14 @@
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
+#include <linux/local_lock.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
-#ifndef CONFIG_FORCE_MAX_ZONEORDER
+#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
#define MAX_ORDER 11
#else
-#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
+#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
@@ -53,10 +54,7 @@ enum migratetype {
*
* The way to use it is to change migratetype of a range of
* pageblocks to MIGRATE_CMA which can be done by
- * __free_pageblock_cma() function. What is important though
- * is that a range of pageblocks must be aligned to
- * MAX_ORDER_NR_PAGES should biggest page be bigger then
- * a single pageblock.
+ * __free_pageblock_cma() function.
*/
MIGRATE_CMA,
#endif
@@ -82,59 +80,33 @@ static inline bool is_migrate_movable(int mt)
return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
}
+/*
+ * Check whether a migratetype can be merged with another migratetype.
+ *
+ * It is only mergeable when it can fall back to other migratetypes for
+ * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c.
+ */
+static inline bool migratetype_is_mergeable(int mt)
+{
+ return mt < MIGRATE_PCPTYPES;
+}
+
#define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled;
-#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
-#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
+#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
#define get_pageblock_migratetype(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- PB_migrate_end, MIGRATETYPE_MASK)
+ get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
-/* Used for pages not on another list */
-static inline void add_to_free_area(struct page *page, struct free_area *area,
- int migratetype)
-{
- list_add(&page->lru, &area->free_list[migratetype]);
- area->nr_free++;
-}
-
-/* Used for pages not on another list */
-static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
- int migratetype)
-{
- list_add_tail(&page->lru, &area->free_list[migratetype]);
- area->nr_free++;
-}
-
-#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
-/* Used to preserve page allocation order entropy */
-void add_to_free_area_random(struct page *page, struct free_area *area,
- int migratetype);
-#else
-static inline void add_to_free_area_random(struct page *page,
- struct free_area *area, int migratetype)
-{
- add_to_free_area(page, area, migratetype);
-}
-#endif
-
-/* Used for pages which are on another list */
-static inline void move_to_free_area(struct page *page, struct free_area *area,
- int migratetype)
-{
- list_move(&page->lru, &area->free_list[migratetype]);
-}
-
static inline struct page *get_page_from_free_area(struct free_area *area,
int migratetype)
{
@@ -142,15 +114,6 @@ static inline struct page *get_page_from_free_area(struct free_area *area,
struct page, lru);
}
-static inline void del_page_from_free_area(struct page *page,
- struct free_area *area)
-{
- list_del(&page->lru);
- __ClearPageBuddy(page);
- set_page_private(page, 0);
- area->nr_free--;
-}
-
static inline bool free_area_empty(struct free_area *area, int migratetype)
{
return list_empty(&area->free_list[migratetype]);
@@ -158,21 +121,6 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
struct pglist_data;
-/*
- * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
- * So add a wild amount of padding here to ensure that they fall into separate
- * cachelines. There are very few zone structures in the machine, so space
- * consumption is not a concern here.
- */
-#if defined(CONFIG_SMP)
-struct zone_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define ZONE_PADDING(name) struct zone_padding name;
-#else
-#define ZONE_PADDING(name)
-#endif
-
#ifdef CONFIG_NUMA
enum numa_stat_item {
NUMA_HIT, /* allocated in intended node */
@@ -181,10 +129,10 @@ enum numa_stat_item {
NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
NUMA_LOCAL, /* allocation from local node */
NUMA_OTHER, /* allocation from other node */
- NR_VM_NUMA_STAT_ITEMS
+ NR_VM_NUMA_EVENT_ITEMS
};
#else
-#define NR_VM_NUMA_STAT_ITEMS 0
+#define NR_VM_NUMA_EVENT_ITEMS 0
#endif
enum zone_stat_item {
@@ -198,8 +146,6 @@ enum zone_stat_item {
NR_ZONE_UNEVICTABLE,
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
- NR_PAGETABLE, /* used for pagetables */
- NR_KERNEL_STACK_KB, /* measured in KiB */
/* Second 128 byte cacheline */
NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
@@ -215,14 +161,20 @@ enum node_stat_item {
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
- NR_SLAB_RECLAIMABLE,
- NR_SLAB_UNRECLAIMABLE,
+ NR_SLAB_RECLAIMABLE_B,
+ NR_SLAB_UNRECLAIMABLE_B,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
- WORKINGSET_REFAULT,
- WORKINGSET_ACTIVATE,
- WORKINGSET_RESTORE,
+ WORKINGSET_REFAULT_BASE,
+ WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
+ WORKINGSET_REFAULT_FILE,
+ WORKINGSET_ACTIVATE_BASE,
+ WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
+ WORKINGSET_ACTIVATE_FILE,
+ WORKINGSET_RESTORE_BASE,
+ WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
+ WORKINGSET_RESTORE_FILE,
WORKINGSET_NODERECLAIM,
NR_ANON_MAPPED, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
@@ -237,16 +189,68 @@ enum node_stat_item {
NR_FILE_THPS,
NR_FILE_PMDMAPPED,
NR_ANON_THPS,
- NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_VMSCAN_WRITE,
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
+ NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
+ NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
+ NR_KERNEL_STACK_KB, /* measured in KiB */
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+ NR_KERNEL_SCS_KB, /* measured in KiB */
+#endif
+ NR_PAGETABLE, /* used for pagetables */
+ NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */
+#ifdef CONFIG_SWAP
+ NR_SWAPCACHE,
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+ PGPROMOTE_SUCCESS, /* promote successfully */
+ PGPROMOTE_CANDIDATE, /* candidate pages to promote */
+#endif
NR_VM_NODE_STAT_ITEMS
};
/*
+ * Returns true if the item should be printed in THPs (/proc/vmstat
+ * currently prints number of anon, file and shmem THPs. But the item
+ * is charged in pages).
+ */
+static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return false;
+
+ return item == NR_ANON_THPS ||
+ item == NR_FILE_THPS ||
+ item == NR_SHMEM_THPS ||
+ item == NR_SHMEM_PMDMAPPED ||
+ item == NR_FILE_PMDMAPPED;
+}
+
+/*
+ * Returns true if the value is measured in bytes (most vmstat values are
+ * measured in pages). This defines the API part, the internal representation
+ * might be different.
+ */
+static __always_inline bool vmstat_item_in_bytes(int idx)
+{
+ /*
+ * Global and per-node slab counters track slab pages.
+ * It's expected that changes are multiples of PAGE_SIZE.
+ * Internally values are stored in pages.
+ *
+ * Per-memcg and per-lruvec counters track memory, consumed
+ * by individual slab objects. These counters are actually
+ * byte-precise.
+ */
+ return (idx == NR_SLAB_RECLAIMABLE_B ||
+ idx == NR_SLAB_UNRECLAIMABLE_B);
+}
+
+/*
* We do arithmetic on the LRU lists in various places in the code,
* so it is important to keep the active lists LRU_ACTIVE higher in
* the array than the corresponding inactive lists, and to keep
@@ -268,6 +272,14 @@ enum lru_list {
NR_LRU_LISTS
};
+enum vmscan_throttle_state {
+ VMSCAN_THROTTLE_WRITEBACK,
+ VMSCAN_THROTTLE_ISOLATED,
+ VMSCAN_THROTTLE_NOPROGRESS,
+ VMSCAN_THROTTLE_CONGESTED,
+ NR_VMSCAN_THROTTLE,
+};
+
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
@@ -282,18 +294,9 @@ static inline bool is_active_lru(enum lru_list lru)
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
-struct zone_reclaim_stat {
- /*
- * The pageout code in vmscan.c keeps track of how many of the
- * mem/swap backed and file backed pages are referenced.
- * The higher the rotated/scanned ratio, the more valuable
- * that cache is.
- *
- * The anon LRU stats live in [0], file LRU stats in [1]
- */
- unsigned long recent_rotated[2];
- unsigned long recent_scanned[2];
-};
+#define WORKINGSET_ANON 0
+#define WORKINGSET_FILE 1
+#define ANON_AND_FILE 2
enum lruvec_flags {
LRUVEC_CONGESTED, /* lruvec has many dirty pages
@@ -301,15 +304,230 @@ enum lruvec_flags {
*/
};
+#endif /* !__GENERATING_BOUNDS_H */
+
+/*
+ * Evictable pages are divided into multiple generations. The youngest and the
+ * oldest generation numbers, max_seq and min_seq, are monotonically increasing.
+ * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
+ * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
+ * corresponding generation. The gen counter in folio->flags stores gen+1 while
+ * a page is on one of lrugen->lists[]. Otherwise it stores 0.
+ *
+ * A page is added to the youngest generation on faulting. The aging needs to
+ * check the accessed bit at least twice before handing this page over to the
+ * eviction. The first check takes care of the accessed bit set on the initial
+ * fault; the second check makes sure this page hasn't been used since then.
+ * This process, AKA second chance, requires a minimum of two generations,
+ * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive
+ * LRU, e.g., /proc/vmstat, these two generations are considered active; the
+ * rest of generations, if they exist, are considered inactive. See
+ * lru_gen_is_active().
+ *
+ * PG_active is always cleared while a page is on one of lrugen->lists[] so that
+ * the aging needs not to worry about it. And it's set again when a page
+ * considered active is isolated for non-reclaiming purposes, e.g., migration.
+ * See lru_gen_add_folio() and lru_gen_del_folio().
+ *
+ * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
+ * in folio->flags.
+ */
+#define MIN_NR_GENS 2U
+#define MAX_NR_GENS 4U
+
+/*
+ * Each generation is divided into multiple tiers. A page accessed N times
+ * through file descriptors is in tier order_base_2(N). A page in the first tier
+ * (N=0,1) is marked by PG_referenced unless it was faulted in through page
+ * tables or read ahead. A page in any other tier (N>1) is marked by
+ * PG_referenced and PG_workingset. This implies a minimum of two tiers is
+ * supported without using additional bits in folio->flags.
+ *
+ * In contrast to moving across generations which requires the LRU lock, moving
+ * across tiers only involves atomic operations on folio->flags and therefore
+ * has a negligible cost in the buffered access path. In the eviction path,
+ * comparisons of refaulted/(evicted+protected) from the first tier and the
+ * rest infer whether pages accessed multiple times through file descriptors
+ * are statistically hot and thus worth protecting.
+ *
+ * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
+ * folio->flags.
+ */
+#define MAX_NR_TIERS 4U
+
+#ifndef __GENERATING_BOUNDS_H
+
+struct lruvec;
+struct page_vma_mapped_walk;
+
+#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
+#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
+
+#ifdef CONFIG_LRU_GEN
+
+enum {
+ LRU_GEN_ANON,
+ LRU_GEN_FILE,
+};
+
+enum {
+ LRU_GEN_CORE,
+ LRU_GEN_MM_WALK,
+ LRU_GEN_NONLEAF_YOUNG,
+ NR_LRU_GEN_CAPS
+};
+
+#define MIN_LRU_BATCH BITS_PER_LONG
+#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
+
+/* whether to keep historical stats from evicted generations */
+#ifdef CONFIG_LRU_GEN_STATS
+#define NR_HIST_GENS MAX_NR_GENS
+#else
+#define NR_HIST_GENS 1U
+#endif
+
+/*
+ * The youngest generation number is stored in max_seq for both anon and file
+ * types as they are aged on an equal footing. The oldest generation numbers are
+ * stored in min_seq[] separately for anon and file types as clean file pages
+ * can be evicted regardless of swap constraints.
+ *
+ * Normally anon and file min_seq are in sync. But if swapping is constrained,
+ * e.g., out of swap space, file min_seq is allowed to advance and leave anon
+ * min_seq behind.
+ *
+ * The number of pages in each generation is eventually consistent and therefore
+ * can be transiently negative when reset_batch_size() is pending.
+ */
+struct lru_gen_struct {
+ /* the aging increments the youngest generation number */
+ unsigned long max_seq;
+ /* the eviction increments the oldest generation numbers */
+ unsigned long min_seq[ANON_AND_FILE];
+ /* the birth time of each generation in jiffies */
+ unsigned long timestamps[MAX_NR_GENS];
+ /* the multi-gen LRU lists, lazily sorted on eviction */
+ struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the multi-gen LRU sizes, eventually consistent */
+ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the exponential moving average of refaulted */
+ unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the exponential moving average of evicted+protected */
+ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the first tier doesn't need protection, hence the minus one */
+ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
+ /* can be modified without holding the LRU lock */
+ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ /* whether the multi-gen LRU is enabled */
+ bool enabled;
+};
+
+enum {
+ MM_LEAF_TOTAL, /* total leaf entries */
+ MM_LEAF_OLD, /* old leaf entries */
+ MM_LEAF_YOUNG, /* young leaf entries */
+ MM_NONLEAF_TOTAL, /* total non-leaf entries */
+ MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
+ MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
+ NR_MM_STATS
+};
+
+/* double-buffering Bloom filters */
+#define NR_BLOOM_FILTERS 2
+
+struct lru_gen_mm_state {
+ /* set to max_seq after each iteration */
+ unsigned long seq;
+ /* where the current iteration continues (inclusive) */
+ struct list_head *head;
+ /* where the last iteration ended (exclusive) */
+ struct list_head *tail;
+ /* to wait for the last page table walker to finish */
+ struct wait_queue_head wait;
+ /* Bloom filters flip after each iteration */
+ unsigned long *filters[NR_BLOOM_FILTERS];
+ /* the mm stats for debugging */
+ unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
+ /* the number of concurrent page table walkers */
+ int nr_walkers;
+};
+
+struct lru_gen_mm_walk {
+ /* the lruvec under reclaim */
+ struct lruvec *lruvec;
+ /* unstable max_seq from lru_gen_struct */
+ unsigned long max_seq;
+ /* the next address within an mm to scan */
+ unsigned long next_addr;
+ /* to batch promoted pages */
+ int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* to batch the mm stats */
+ int mm_stats[NR_MM_STATS];
+ /* total batched items */
+ int batched;
+ bool can_swap;
+ bool force_scan;
+};
+
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+
+#ifdef CONFIG_MEMCG
+void lru_gen_init_memcg(struct mem_cgroup *memcg);
+void lru_gen_exit_memcg(struct mem_cgroup *memcg);
+#endif
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
+{
+}
+
+static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+{
+}
+
+#ifdef CONFIG_MEMCG
+static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
+{
+}
+#endif
+
+#endif /* CONFIG_LRU_GEN */
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
- struct zone_reclaim_stat reclaim_stat;
- /* Evictions & activations on the inactive file list */
- atomic_long_t inactive_age;
+ /* per lruvec lru_lock for memcg */
+ spinlock_t lru_lock;
+ /*
+ * These track the cost of reclaiming one LRU - file or anon -
+ * over the other. As the observed cost of reclaiming one LRU
+ * increases, the reclaim scan balance tips toward the other.
+ */
+ unsigned long anon_cost;
+ unsigned long file_cost;
+ /* Non-resident age, driven by LRU movement */
+ atomic_long_t nonresident_age;
/* Refaults at the time of last reclaim cycle */
- unsigned long refaults;
+ unsigned long refaults[ANON_AND_FILE];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
+#ifdef CONFIG_LRU_GEN
+ /* evictable pages divided into generations */
+ struct lru_gen_struct lrugen;
+ /* to concurrently iterate lru_gen_mm_list */
+ struct lru_gen_mm_state mm_state;
+#endif
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
@@ -329,32 +547,56 @@ enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
+ WMARK_PROMO,
NR_WMARK
};
+/*
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
+ * for THP which will usually be GFP_MOVABLE. Even if it is another type,
+ * it should not contribute to serious fragmentation causing THP allocation
+ * failures.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define NR_PCP_THP 1
+#else
+#define NR_PCP_THP 0
+#endif
+#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
+#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
+
#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
+/* Fields and list protected by pagesets local_lock in page_alloc.c */
struct per_cpu_pages {
+ spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
+ short free_factor; /* batch scaling factor during free */
+#ifdef CONFIG_NUMA
+ short expire; /* When 0, remote pagesets are drained */
+#endif
/* Lists of pages, one per migrate type stored on the pcp-lists */
- struct list_head lists[MIGRATE_PCPTYPES];
-};
+ struct list_head lists[NR_PCP_LISTS];
+} ____cacheline_aligned_in_smp;
-struct per_cpu_pageset {
- struct per_cpu_pages pcp;
-#ifdef CONFIG_NUMA
- s8 expire;
- u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
-#endif
+struct per_cpu_zonestat {
#ifdef CONFIG_SMP
- s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
+ s8 stat_threshold;
+#endif
+#ifdef CONFIG_NUMA
+ /*
+ * Low priority inaccurate counters that are only folded
+ * on demand. Use a large type to avoid the overhead of
+ * folding during refresh_cpu_vm_stats.
+ */
+ unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
#endif
};
@@ -375,26 +617,6 @@ enum zone_type {
* DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
* platforms may need both zones as they support peripherals with
* different DMA addressing limitations.
- *
- * Some examples:
- *
- * - i386 and x86_64 have a fixed 16M ZONE_DMA and ZONE_DMA32 for the
- * rest of the lower 4G.
- *
- * - arm only uses ZONE_DMA, the size, up to 4G, may vary depending on
- * the specific device.
- *
- * - arm64 has a fixed 1G ZONE_DMA and ZONE_DMA32 for the rest of the
- * lower 4G.
- *
- * - powerpc only uses ZONE_DMA, the size, up to 2G, may vary
- * depending on the specific device.
- *
- * - s390 uses ZONE_DMA fixed to the lower 2G.
- *
- * - ia64 and riscv only use ZONE_DMA32.
- *
- * - parisc uses neither.
*/
#ifdef CONFIG_ZONE_DMA
ZONE_DMA,
@@ -419,6 +641,55 @@ enum zone_type {
*/
ZONE_HIGHMEM,
#endif
+ /*
+ * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
+ * movable pages with few exceptional cases described below. Main use
+ * cases for ZONE_MOVABLE are to make memory offlining/unplug more
+ * likely to succeed, and to locally limit unmovable allocations - e.g.,
+ * to increase the number of THP/huge pages. Notable special cases are:
+ *
+ * 1. Pinned pages: (long-term) pinning of movable pages might
+ * essentially turn such pages unmovable. Therefore, we do not allow
+ * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
+ * faulted, they come from the right zone right away. However, it is
+ * still possible that address space already has pages in
+ * ZONE_MOVABLE at the time when pages are pinned (i.e. user has
+ * touches that memory before pinning). In such case we migrate them
+ * to a different zone. When migration fails - pinning fails.
+ * 2. memblock allocations: kernelcore/movablecore setups might create
+ * situations where ZONE_MOVABLE contains unmovable allocations
+ * after boot. Memory offlining and allocations fail early.
+ * 3. Memory holes: kernelcore/movablecore setups might create very rare
+ * situations where ZONE_MOVABLE contains memory holes after boot,
+ * for example, if we have sections that are only partially
+ * populated. Memory offlining and allocations fail early.
+ * 4. PG_hwpoison pages: while poisoned pages can be skipped during
+ * memory offlining, such pages cannot be allocated.
+ * 5. Unmovable PG_offline pages: in paravirtualized environments,
+ * hotplugged memory blocks might only partially be managed by the
+ * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
+ * parts not manged by the buddy are unmovable PG_offline pages. In
+ * some cases (virtio-mem), such pages can be skipped during
+ * memory offlining, however, cannot be moved/allocated. These
+ * techniques might use alloc_contig_range() to hide previously
+ * exposed pages from the buddy again (e.g., to implement some sort
+ * of memory unplug in virtio-mem).
+ * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create
+ * situations where ZERO_PAGE(0) which is allocated differently
+ * on different platforms may end up in a movable zone. ZERO_PAGE(0)
+ * cannot be migrated.
+ * 7. Memory-hotplug: when using memmap_on_memory and onlining the
+ * memory to the MOVABLE zone, the vmemmap pages are also placed in
+ * such zone. Such pages cannot be really moved around as they are
+ * self-stored in the range, but they are treated as movable when
+ * the range they describe is about to be offlined.
+ *
+ * In general, no unmovable allocations that degrade memory offlining
+ * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
+ * have to expect that migrating pages in ZONE_MOVABLE can fail (even
+ * if has_unmovable_pages() states that there are no unmovable pages,
+ * there can be false negatives).
+ */
ZONE_MOVABLE,
#ifdef CONFIG_ZONE_DEVICE
ZONE_DEVICE,
@@ -429,6 +700,8 @@ enum zone_type {
#ifndef __GENERATING_BOUNDS_H
+#define ASYNC_AND_SYNC 2
+
struct zone {
/* Read-mostly fields */
@@ -453,7 +726,14 @@ struct zone {
int node;
#endif
struct pglist_data *zone_pgdat;
- struct per_cpu_pageset __percpu *pageset;
+ struct per_cpu_pages __percpu *per_cpu_pageset;
+ struct per_cpu_zonestat __percpu *per_cpu_zonestats;
+ /*
+ * the high and batch values are copied to individual pagesets for
+ * faster access
+ */
+ int pageset_high;
+ int pageset_batch;
#ifndef CONFIG_SPARSEMEM
/*
@@ -475,11 +755,18 @@ struct zone {
* is calculated as:
* present_pages = spanned_pages - absent_pages(pages in holes);
*
+ * present_early_pages is present pages existing within the zone
+ * located on memory available since early boot, excluding hotplugged
+ * memory.
+ *
* managed_pages is present pages managed by the buddy system, which
* is calculated as (reserved_pages includes pages allocated by the
* bootmem allocator):
* managed_pages = present_pages - reserved_pages;
*
+ * cma pages is present pages that are assigned for CMA use
+ * (MIGRATE_CMA).
+ *
* So present_pages may be used by memory hotplug or memory power
* management logic to figure out unmanaged pages by checking
* (present_pages - managed_pages). And managed_pages should be used
@@ -498,12 +785,18 @@ struct zone {
* give them a chance of being in the same cacheline.
*
* Write access to present_pages at runtime should be protected by
- * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
- * present_pages should get_online_mems() to get a stable value.
+ * mem_hotplug_begin/done(). Any reader who can't tolerant drift of
+ * present_pages should use get_online_mems() to get a stable value.
*/
atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
+#if defined(CONFIG_MEMORY_HOTPLUG)
+ unsigned long present_early_pages;
+#endif
+#ifdef CONFIG_CMA
+ unsigned long cma_pages;
+#endif
const char *name;
@@ -524,7 +817,7 @@ struct zone {
int initialized;
/* Write-intensive fields used from the page allocator */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
struct free_area free_area[MAX_ORDER];
@@ -536,7 +829,7 @@ struct zone {
spinlock_t lock;
/* Write-intensive fields used by compaction and vmstats. */
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
/*
* When free pages are below this point, additional steps are taken
@@ -548,8 +841,8 @@ struct zone {
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* pfn where compaction free scanner should start */
unsigned long compact_cached_free_pfn;
- /* pfn where async and sync compaction migration scanner should start */
- unsigned long compact_cached_migrate_pfn[2];
+ /* pfn where compaction migration scanner should start */
+ unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
unsigned long compact_init_migrate_pfn;
unsigned long compact_init_free_pfn;
#endif
@@ -559,6 +852,7 @@ struct zone {
* On compaction failure, 1<<compact_defer_shift compactions
* are skipped before trying again. The number attempted since
* last failure is tracked with compact_considered.
+ * compact_order_failed is the minimum compaction failed order.
*/
unsigned int compact_considered;
unsigned int compact_defer_shift;
@@ -572,10 +866,10 @@ struct zone {
bool contiguous;
- ZONE_PADDING(_pad3_)
+ CACHELINE_PADDING(_pad3_);
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
- atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
+ atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
@@ -593,6 +887,7 @@ enum zone_flags {
ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
* Cleared when kswapd is woken.
*/
+ ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
};
static inline unsigned long zone_managed_pages(struct zone *zone)
@@ -600,6 +895,15 @@ static inline unsigned long zone_managed_pages(struct zone *zone)
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
+static inline unsigned long zone_cma_pages(struct zone *zone)
+{
+#ifdef CONFIG_CMA
+ return zone->cma_pages;
+#else
+ return 0;
+#endif
+}
+
static inline unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
@@ -620,6 +924,88 @@ static inline bool zone_is_empty(struct zone *zone)
return zone->spanned_pages == 0;
}
+#ifndef BUILD_VDSO32_64
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+
+/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
+#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
+#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
+#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
+#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
+#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
+#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
+#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
+
+/*
+ * Define the bit shifts to access each section. For non-existent
+ * sections we define the shift as 0; that plus a 0 mask ensures
+ * the compiler will optimise away reference to them.
+ */
+#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
+#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
+#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
+#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
+#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
+
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
+ SECTIONS_PGOFF : ZONES_PGOFF)
+#else
+#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
+ NODES_PGOFF : ZONES_PGOFF)
+#endif
+
+#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
+
+#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
+#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
+#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
+#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
+#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
+#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+
+static inline enum zone_type page_zonenum(const struct page *page)
+{
+ ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
+ return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+}
+
+static inline enum zone_type folio_zonenum(const struct folio *folio)
+{
+ return page_zonenum(&folio->page);
+}
+
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return page_zonenum(page) == ZONE_DEVICE;
+}
+extern void memmap_init_zone_device(struct zone *, unsigned long,
+ unsigned long, struct dev_pagemap *);
+#else
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return false;
+}
+#endif
+
+static inline bool folio_is_zone_device(const struct folio *folio)
+{
+ return is_zone_device_page(&folio->page);
+}
+
+static inline bool is_zone_movable_page(const struct page *page)
+{
+ return page_zonenum(page) == ZONE_MOVABLE;
+}
+#endif
+
/*
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
@@ -685,10 +1071,12 @@ struct zonelist {
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
};
-#ifndef CONFIG_DISCONTIGMEM
-/* The array of struct pages - for discontigmem use pgdat->lmem_map */
+/*
+ * The array of struct pages for flatmem.
+ * It must be declared for SPARSEMEM as well because there are configurations
+ * that rely on that.
+ */
extern struct page *mem_map;
-#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split {
@@ -706,12 +1094,23 @@ struct deferred_split {
* Memory statistics and page replacement data structures are maintained on a
* per-zone basis.
*/
-struct bootmem_data;
typedef struct pglist_data {
+ /*
+ * node_zones contains just the zones for THIS node. Not all of the
+ * zones may be populated, but it is the full list. It is referenced by
+ * this node's node_zonelists as well as other node's node_zonelists.
+ */
struct zone node_zones[MAX_NR_ZONES];
+
+ /*
+ * node_zonelists contains references to all zones in all nodes.
+ * Generally the first zones will be references to this node's
+ * node_zones.
+ */
struct zonelist node_zonelists[MAX_ZONELISTS];
- int nr_zones;
-#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
+
+ int nr_zones; /* number of populated zones in this node */
+#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
struct page_ext *node_page_ext;
@@ -721,6 +1120,8 @@ typedef struct pglist_data {
/*
* Must be held any time you expect node_start_pfn,
* node_present_pages, node_spanned_pages or nr_zones to stay constant.
+ * Also synchronizes pgdat->first_deferred_pfn during deferred page
+ * init.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
@@ -737,18 +1138,28 @@ typedef struct pglist_data {
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
- struct task_struct *kswapd; /* Protected by
- mem_hotplug_begin/end() */
+
+ /* workqueues for throttling reclaim for different reasons. */
+ wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
+
+ atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
+ unsigned long nr_reclaim_start; /* nr pages written while throttled
+ * when throttling started. */
+#ifdef CONFIG_MEMORY_HOTPLUG
+ struct mutex kswapd_lock;
+#endif
+ struct task_struct *kswapd; /* Protected by kswapd_lock */
int kswapd_order;
- enum zone_type kswapd_classzone_idx;
+ enum zone_type kswapd_highest_zoneidx;
int kswapd_failures; /* Number of 'reclaimed == 0' runs */
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
- enum zone_type kcompactd_classzone_idx;
+ enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
+ bool proactive_compact_trigger;
#endif
/*
* This is a per-node reserve of pages that are not available
@@ -765,8 +1176,7 @@ typedef struct pglist_data {
#endif /* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */
- ZONE_PADDING(_pad1_)
- spinlock_t lru_lock;
+ CACHELINE_PADDING(_pad1_);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -780,6 +1190,21 @@ typedef struct pglist_data {
struct deferred_split deferred_split_queue;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ /* start time in ms of current promote rate limit period */
+ unsigned int nbp_rl_start;
+ /* number of promote candidate pages at start time of current rate limit period */
+ unsigned long nbp_rl_nr_cand;
+ /* promote threshold in ms */
+ unsigned int nbp_threshold;
+ /* start time in ms of current promote threshold adjustment period */
+ unsigned int nbp_th_start;
+ /*
+ * number of promote candidate pages at stat time of current promote
+ * threshold adjustment period
+ */
+ unsigned long nbp_th_nr_cand;
+#endif
/* Fields commonly accessed by the page reclaim scanner */
/*
@@ -791,21 +1216,23 @@ typedef struct pglist_data {
unsigned long flags;
- ZONE_PADDING(_pad2_)
+#ifdef CONFIG_LRU_GEN
+ /* kswap mm walk data */
+ struct lru_gen_mm_walk mm_walk;
+#endif
+
+ CACHELINE_PADDING(_pad2_);
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
+#ifdef CONFIG_NUMA
+ struct memory_tier __rcu *memtier;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
-#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
-#else
-#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
-#endif
-#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
@@ -815,28 +1242,28 @@ static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}
-static inline bool pgdat_is_empty(pg_data_t *pgdat)
-{
- return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
-}
-
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
- enum zone_type classzone_idx);
+ enum zone_type highest_zoneidx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
- int classzone_idx, unsigned int alloc_flags,
+ int highest_zoneidx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx,
+ unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx);
-enum memmap_context {
- MEMMAP_EARLY,
- MEMMAP_HOTPLUG,
+ unsigned long mark, int highest_zoneidx);
+/*
+ * Memory initialization context, use to differentiate memory added by
+ * the platform statically or via memory hotplug interface.
+ */
+enum meminit_context {
+ MEMINIT_EARLY,
+ MEMINIT_HOTPLUG,
};
+
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size);
@@ -851,20 +1278,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
#endif
}
-extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
-
-#ifdef CONFIG_HAVE_MEMORY_PRESENT
-void memory_present(int nid, unsigned long start, unsigned long end);
-#else
-static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
-#endif
-
-#if defined(CONFIG_SPARSEMEM)
-void memblocks_present(void);
-#else
-static inline void memblocks_present(void) {}
-#endif
-
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
@@ -876,6 +1289,18 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool zone_is_zone_device(struct zone *zone)
+{
+ return zone_idx(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool zone_is_zone_device(struct zone *zone)
+{
+ return false;
+}
+#endif
+
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -914,22 +1339,11 @@ static inline void zone_set_nid(struct zone *zone, int nid) {}
extern int movable_zone;
-#ifdef CONFIG_HIGHMEM
-static inline int zone_movable_is_highmem(void)
-{
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
- return movable_zone == ZONE_HIGHMEM;
-#else
- return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
-#endif
-}
-#endif
-
static inline int is_highmem_idx(enum zone_type idx)
{
#ifdef CONFIG_HIGHMEM
return (idx == ZONE_HIGHMEM ||
- (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
+ (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
#else
return 0;
#endif
@@ -939,51 +1353,58 @@ static inline int is_highmem_idx(enum zone_type idx)
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
- * @zone - pointer to struct zone variable
+ * @zone: pointer to struct zone variable
+ * Return: 1 for a highmem zone, 0 otherwise
*/
static inline int is_highmem(struct zone *zone)
{
-#ifdef CONFIG_HIGHMEM
return is_highmem_idx(zone_idx(zone));
+}
+
+#ifdef CONFIG_ZONE_DMA
+bool has_managed_dma(void);
#else
- return 0;
-#endif
+static inline bool has_managed_dma(void)
+{
+ return false;
}
+#endif
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
-int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+
+int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
+ size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
-int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
+ size_t *, loff_t *);
+int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+ void *, size_t *, loff_t *);
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-
-extern int numa_zonelist_order_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+ void *, size_t *, loff_t *);
+int numa_zonelist_order_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int percpu_pagelist_high_fraction;
extern char numa_zonelist_order[];
#define NUMA_ZONELIST_ORDER_LEN 16
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
extern struct pglist_data contig_page_data;
-#define NODE_DATA(nid) (&contig_page_data)
-#define NODE_MEM_MAP(nid) mem_map
+static inline struct pglist_data *NODE_DATA(int nid)
+{
+ return &contig_page_data;
+}
-#else /* CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
#include <asm/mmzone.h>
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
@@ -991,7 +1412,7 @@ extern struct zone *next_zone(struct zone *zone);
/**
* for_each_online_pgdat - helper macro to iterate over all online nodes
- * @pgdat - pointer to a pg_data_t variable
+ * @pgdat: pointer to a pg_data_t variable
*/
#define for_each_online_pgdat(pgdat) \
for (pgdat = first_online_pgdat(); \
@@ -999,7 +1420,7 @@ extern struct zone *next_zone(struct zone *zone);
pgdat = next_online_pgdat(pgdat))
/**
* for_each_zone - helper macro to iterate over all memory zones
- * @zone - pointer to struct zone variable
+ * @zone: pointer to struct zone variable
*
* The user only needs to declare the zone variable, for_each_zone
* fills it in.
@@ -1038,15 +1459,18 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
/**
* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
- * @z - The cursor used as a starting point for the search
- * @highest_zoneidx - The zone index of the highest zone to return
- * @nodes - An optional nodemask to filter the zonelist with
+ * @z: The cursor used as a starting point for the search
+ * @highest_zoneidx: The zone index of the highest zone to return
+ * @nodes: An optional nodemask to filter the zonelist with
*
* This function returns the next zone at or below a given zone index that is
* within the allowed nodemask using a cursor as the starting point for the
* search. The zoneref returned is a cursor that represents the current zone
* being examined. It should be advanced by one before calling
* next_zones_zonelist again.
+ *
+ * Return: the next zone at or below highest_zoneidx within the allowed
+ * nodemask using a cursor within a zonelist as a starting point
*/
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
@@ -1059,10 +1483,9 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
/**
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
- * @zonelist - The zonelist to search for a suitable zone
- * @highest_zoneidx - The zone index of the highest zone to return
- * @nodes - An optional nodemask to filter the zonelist with
- * @return - Zoneref pointer for the first suitable zone found (see below)
+ * @zonelist: The zonelist to search for a suitable zone
+ * @highest_zoneidx: The zone index of the highest zone to return
+ * @nodes: An optional nodemask to filter the zonelist with
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
@@ -1072,6 +1495,8 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
* When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
* never NULL). This may happen either genuinely, or due to concurrent nodemask
* update due to cpuset modification.
+ *
+ * Return: Zoneref pointer for the first suitable zone found
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
@@ -1083,11 +1508,11 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
/**
* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
- * @zone - The current zone in the iterator
- * @z - The current pointer within zonelist->_zonerefs being iterated
- * @zlist - The zonelist being iterated
- * @highidx - The zone index of the highest zone to return
- * @nodemask - Nodemask allowed by the allocator
+ * @zone: The current zone in the iterator
+ * @z: The current pointer within zonelist->_zonerefs being iterated
+ * @zlist: The zonelist being iterated
+ * @highidx: The zone index of the highest zone to return
+ * @nodemask: Nodemask allowed by the allocator
*
* This iterator iterates though all zones at or below a given zone index and
* within a given nodemask
@@ -1098,7 +1523,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
-#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
for (zone = z->zone; \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
@@ -1107,27 +1532,40 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
- * @zone - The current zone in the iterator
- * @z - The current pointer within zonelist->zones being iterated
- * @zlist - The zonelist being iterated
- * @highidx - The zone index of the highest zone to return
+ * @zone: The current zone in the iterator
+ * @z: The current pointer within zonelist->zones being iterated
+ * @zlist: The zonelist being iterated
+ * @highidx: The zone index of the highest zone to return
*
* This iterator iterates though all zones at or below a given zone index.
*/
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
-#ifdef CONFIG_SPARSEMEM
-#include <asm/sparsemem.h>
-#endif
-
-#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
- !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
-static inline unsigned long early_pfn_to_nid(unsigned long pfn)
+/* Whether the 'nodes' are all movable nodes */
+static inline bool movable_only_nodes(nodemask_t *nodes)
{
- BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
- return 0;
+ struct zonelist *zonelist;
+ struct zoneref *z;
+ int nid;
+
+ if (nodes_empty(*nodes))
+ return false;
+
+ /*
+ * We can chose arbitrary node from the nodemask to get a
+ * zonelist as they are interlinked. We just need to find
+ * at least one zone that can satisfy kernel allocations.
+ */
+ nid = first_node(*nodes);
+ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
+ z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
+ return (!z->zone) ? true : false;
}
+
+
+#ifdef CONFIG_SPARSEMEM
+#include <asm/sparsemem.h>
#endif
#ifdef CONFIG_FLATMEM
@@ -1137,8 +1575,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
#ifdef CONFIG_SPARSEMEM
/*
- * SECTION_SHIFT #bits space required to store a section #
- *
* PA_SECTION_SHIFT physical address to/from section number
* PFN_SECTION_SHIFT pfn to/from section number
*/
@@ -1170,6 +1606,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
#define SUBSECTION_SHIFT 21
+#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
@@ -1185,7 +1622,9 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
+#endif
/* See declaration of similar field in struct zone */
unsigned long pageblock_flags[0];
};
@@ -1247,15 +1686,17 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
+ unsigned long root = SECTION_NR_TO_ROOT(nr);
+
+ if (unlikely(root >= NR_SECTION_ROOTS))
+ return NULL;
+
#ifdef CONFIG_SPARSEMEM_EXTREME
- if (!mem_section)
+ if (!mem_section || !mem_section[root])
return NULL;
#endif
- if (!mem_section[SECTION_NR_TO_ROOT(nr)])
- return NULL;
- return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+ return &mem_section[root][nr & SECTION_ROOT_MASK];
}
-extern unsigned long __section_nr(struct mem_section *ms);
extern size_t mem_section_usage_size(void);
/*
@@ -1269,15 +1710,32 @@ extern size_t mem_section_usage_size(void);
* (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
* worst combination is powerpc with 256k pages,
* which results in PFN_SECTION_SHIFT equal 6.
- * To sum it up, at least 6 bits are available.
+ * To sum it up, at least 6 bits are available on all architectures.
+ * However, we can exceed 6 bits on some other architectures except
+ * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available
+ * with the worst case of 64K pages on arm64) if we make sure the
+ * exceeded bit is not applicable to powerpc.
*/
-#define SECTION_MARKED_PRESENT (1UL<<0)
-#define SECTION_HAS_MEM_MAP (1UL<<1)
-#define SECTION_IS_ONLINE (1UL<<2)
-#define SECTION_IS_EARLY (1UL<<3)
-#define SECTION_MAP_LAST_BIT (1UL<<4)
-#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
-#define SECTION_NID_SHIFT 3
+enum {
+ SECTION_MARKED_PRESENT_BIT,
+ SECTION_HAS_MEM_MAP_BIT,
+ SECTION_IS_ONLINE_BIT,
+ SECTION_IS_EARLY_BIT,
+#ifdef CONFIG_ZONE_DEVICE
+ SECTION_TAINT_ZONE_DEVICE_BIT,
+#endif
+ SECTION_MAP_LAST_BIT,
+};
+
+#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT)
+#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT)
+#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT)
+#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT)
+#ifdef CONFIG_ZONE_DEVICE
+#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
+#endif
+#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
+#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
@@ -1316,6 +1774,20 @@ static inline int online_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
+#ifdef CONFIG_ZONE_DEVICE
+static inline int online_device_section(struct mem_section *section)
+{
+ unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
+
+ return section && ((section->section_mem_map & flags) == flags);
+}
+#else
+static inline int online_device_section(struct mem_section *section)
+{
+ return 0;
+}
+#endif
+
static inline int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
@@ -1323,10 +1795,8 @@ static inline int online_section_nr(unsigned long nr)
#ifdef CONFIG_MEMORY_HOTPLUG
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
-#ifdef CONFIG_MEMORY_HOTREMOVE
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#endif
-#endif
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
@@ -1355,13 +1825,33 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
#endif
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
+/**
+ * pfn_valid - check if there is a valid memory map entry for a PFN
+ * @pfn: the page frame number to check
+ *
+ * Check if there is a valid memory map entry aka struct page for the @pfn.
+ * Note, that availability of the memory map entry does not imply that
+ * there is actual usable memory at that @pfn. The struct page may
+ * represent a hole or an unusable page frame.
+ *
+ * Return: 1 for PFNs that have memory map entries and 0 otherwise
+ */
static inline int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;
+ /*
+ * Ensure the upper PAGE_SHIFT bits are clear in the
+ * pfn. Else it might lead to false positives when
+ * some of the upper bits are set, but the lower bits
+ * match a valid pfn.
+ */
+ if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
+ return 0;
+
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- ms = __nr_to_section(pfn_to_section_nr(pfn));
+ ms = __pfn_to_section(pfn);
if (!valid_section(ms))
return 0;
/*
@@ -1372,11 +1862,11 @@ static inline int pfn_valid(unsigned long pfn)
}
#endif
-static inline int pfn_present(unsigned long pfn)
+static inline int pfn_in_present_section(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ return present_section(__pfn_to_section(pfn));
}
static inline unsigned long next_present_section_nr(unsigned long section_nr)
@@ -1404,75 +1894,14 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#define pfn_to_nid(pfn) (0)
#endif
-#define early_pfn_valid(pfn) pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
-#define pfn_present pfn_valid
+#define pfn_in_present_section pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
-/*
- * During memory init memblocks map pfns to nids. The search is expensive and
- * this caches recent lookups. The implementation of __early_pfn_to_nid
- * may treat start/end as pfns or sections.
- */
-struct mminit_pfnnid_cache {
- unsigned long last_start;
- unsigned long last_end;
- int last_nid;
-};
-
-#ifndef early_pfn_valid
-#define early_pfn_valid(pfn) (1)
-#endif
-
-void memory_present(int nid, unsigned long start, unsigned long end);
-
-/*
- * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
- * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
- * pfn_valid_within() should be used in this case; we optimise this away
- * when we have no holes within a MAX_ORDER_NR_PAGES block.
- */
-#ifdef CONFIG_HOLES_IN_ZONE
-#define pfn_valid_within(pfn) pfn_valid(pfn)
-#else
-#define pfn_valid_within(pfn) (1)
-#endif
-
-#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
-/*
- * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
- * associated with it or not. This means that a struct page exists for this
- * pfn. The caller cannot assume the page is fully initialized in general.
- * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
- * will ensure the struct page is fully online and initialized. Special pages
- * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
- *
- * In FLATMEM, it is expected that holes always have valid memmap as long as
- * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
- * that a valid section has a memmap for the entire section.
- *
- * However, an ARM, and maybe other embedded architectures in the future
- * free memmap backing holes to save memory on the assumption the memmap is
- * never used. The page_zone linkages are then broken even though pfn_valid()
- * returns true. A walker of the full memmap must then do this additional
- * check to ensure the memmap they are looking at is sane by making sure
- * the zone and PFN linkages are still valid. This is expensive, but walkers
- * of the full memmap are extremely rare.
- */
-bool memmap_valid_within(unsigned long pfn,
- struct page *page, struct zone *zone);
-#else
-static inline bool memmap_valid_within(unsigned long pfn,
- struct page *page, struct zone *zone)
-{
- return true;
-}
-#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
-
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
new file mode 100644
index 000000000000..f6e5369d2928
--- /dev/null
+++ b/include/linux/mnt_idmapping.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MNT_IDMAPPING_H
+#define _LINUX_MNT_IDMAPPING_H
+
+#include <linux/types.h>
+#include <linux/uidgid.h>
+
+struct user_namespace;
+/*
+ * Carries the initial idmapping of 0:0:4294967295 which is an identity
+ * mapping. This means that {g,u}id 0 is mapped to {g,u}id 0, {g,u}id 1 is
+ * mapped to {g,u}id 1, [...], {g,u}id 1000 to {g,u}id 1000, [...].
+ */
+extern struct user_namespace init_user_ns;
+
+typedef struct {
+ uid_t val;
+} vfsuid_t;
+
+typedef struct {
+ gid_t val;
+} vfsgid_t;
+
+static_assert(sizeof(vfsuid_t) == sizeof(kuid_t));
+static_assert(sizeof(vfsgid_t) == sizeof(kgid_t));
+static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val));
+static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val));
+
+#ifdef CONFIG_MULTIUSER
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+ return uid.val;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+ return gid.val;
+}
+#else
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+ return 0;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+ return 0;
+}
+#endif
+
+static inline bool vfsuid_valid(vfsuid_t uid)
+{
+ return __vfsuid_val(uid) != (uid_t)-1;
+}
+
+static inline bool vfsgid_valid(vfsgid_t gid)
+{
+ return __vfsgid_val(gid) != (gid_t)-1;
+}
+
+static inline bool vfsuid_eq(vfsuid_t left, vfsuid_t right)
+{
+ return vfsuid_valid(left) && __vfsuid_val(left) == __vfsuid_val(right);
+}
+
+static inline bool vfsgid_eq(vfsgid_t left, vfsgid_t right)
+{
+ return vfsgid_valid(left) && __vfsgid_val(left) == __vfsgid_val(right);
+}
+
+/**
+ * vfsuid_eq_kuid - check whether kuid and vfsuid have the same value
+ * @vfsuid: the vfsuid to compare
+ * @kuid: the kuid to compare
+ *
+ * Check whether @vfsuid and @kuid have the same values.
+ *
+ * Return: true if @vfsuid and @kuid have the same value, false if not.
+ * Comparison between two invalid uids returns false.
+ */
+static inline bool vfsuid_eq_kuid(vfsuid_t vfsuid, kuid_t kuid)
+{
+ return vfsuid_valid(vfsuid) && __vfsuid_val(vfsuid) == __kuid_val(kuid);
+}
+
+/**
+ * vfsgid_eq_kgid - check whether kgid and vfsgid have the same value
+ * @vfsgid: the vfsgid to compare
+ * @kgid: the kgid to compare
+ *
+ * Check whether @vfsgid and @kgid have the same values.
+ *
+ * Return: true if @vfsgid and @kgid have the same value, false if not.
+ * Comparison between two invalid gids returns false.
+ */
+static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
+{
+ return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid);
+}
+
+/*
+ * vfs{g,u}ids are created from k{g,u}ids.
+ * We don't allow them to be created from regular {u,g}id.
+ */
+#define VFSUIDT_INIT(val) (vfsuid_t){ __kuid_val(val) }
+#define VFSGIDT_INIT(val) (vfsgid_t){ __kgid_val(val) }
+
+#define INVALID_VFSUID VFSUIDT_INIT(INVALID_UID)
+#define INVALID_VFSGID VFSGIDT_INIT(INVALID_GID)
+
+/*
+ * Allow a vfs{g,u}id to be used as a k{g,u}id where we want to compare
+ * whether the mapped value is identical to value of a k{g,u}id.
+ */
+#define AS_KUIDT(val) (kuid_t){ __vfsuid_val(val) }
+#define AS_KGIDT(val) (kgid_t){ __vfsgid_val(val) }
+
+#ifdef CONFIG_MULTIUSER
+/**
+ * vfsgid_in_group_p() - check whether a vfsuid matches the caller's groups
+ * @vfsgid: the mnt gid to match
+ *
+ * This function can be used to determine whether @vfsuid matches any of the
+ * caller's groups.
+ *
+ * Return: 1 if vfsuid matches caller's groups, 0 if not.
+ */
+static inline int vfsgid_in_group_p(vfsgid_t vfsgid)
+{
+ return in_group_p(AS_KGIDT(vfsgid));
+}
+#else
+static inline int vfsgid_in_group_p(vfsgid_t vfsgid)
+{
+ return 1;
+}
+#endif
+
+/**
+ * initial_idmapping - check whether this is the initial mapping
+ * @ns: idmapping to check
+ *
+ * Check whether this is the initial mapping, mapping 0 to 0, 1 to 1,
+ * [...], 1000 to 1000 [...].
+ *
+ * Return: true if this is the initial mapping, false if not.
+ */
+static inline bool initial_idmapping(const struct user_namespace *ns)
+{
+ return ns == &init_user_ns;
+}
+
+/**
+ * no_idmapping - check whether we can skip remapping a kuid/gid
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ *
+ * This function can be used to check whether a remapping between two
+ * idmappings is required.
+ * An idmapped mount is a mount that has an idmapping attached to it that
+ * is different from the filsystem's idmapping and the initial idmapping.
+ * If the initial mapping is used or the idmapping of the mount and the
+ * filesystem are identical no remapping is required.
+ *
+ * Return: true if remapping can be skipped, false if not.
+ */
+static inline bool no_idmapping(const struct user_namespace *mnt_userns,
+ const struct user_namespace *fs_userns)
+{
+ return initial_idmapping(mnt_userns) || mnt_userns == fs_userns;
+}
+
+/**
+ * make_vfsuid - map a filesystem kuid into a mnt_userns
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @kuid : kuid to be mapped
+ *
+ * Take a @kuid and remap it from @fs_userns into @mnt_userns. Use this
+ * function when preparing a @kuid to be reported to userspace.
+ *
+ * If no_idmapping() determines that this is not an idmapped mount we can
+ * simply return @kuid unchanged.
+ * If initial_idmapping() tells us that the filesystem is not mounted with an
+ * idmapping we know the value of @kuid won't change when calling
+ * from_kuid() so we can simply retrieve the value via __kuid_val()
+ * directly.
+ *
+ * Return: @kuid mapped according to @mnt_userns.
+ * If @kuid has no mapping in either @mnt_userns or @fs_userns INVALID_UID is
+ * returned.
+ */
+
+static inline vfsuid_t make_vfsuid(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ kuid_t kuid)
+{
+ uid_t uid;
+
+ if (no_idmapping(mnt_userns, fs_userns))
+ return VFSUIDT_INIT(kuid);
+ if (initial_idmapping(fs_userns))
+ uid = __kuid_val(kuid);
+ else
+ uid = from_kuid(fs_userns, kuid);
+ if (uid == (uid_t)-1)
+ return INVALID_VFSUID;
+ return VFSUIDT_INIT(make_kuid(mnt_userns, uid));
+}
+
+static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ kuid_t kuid)
+{
+ return AS_KUIDT(make_vfsuid(mnt_userns, fs_userns, kuid));
+}
+
+/**
+ * make_vfsgid - map a filesystem kgid into a mnt_userns
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @kgid : kgid to be mapped
+ *
+ * Take a @kgid and remap it from @fs_userns into @mnt_userns. Use this
+ * function when preparing a @kgid to be reported to userspace.
+ *
+ * If no_idmapping() determines that this is not an idmapped mount we can
+ * simply return @kgid unchanged.
+ * If initial_idmapping() tells us that the filesystem is not mounted with an
+ * idmapping we know the value of @kgid won't change when calling
+ * from_kgid() so we can simply retrieve the value via __kgid_val()
+ * directly.
+ *
+ * Return: @kgid mapped according to @mnt_userns.
+ * If @kgid has no mapping in either @mnt_userns or @fs_userns INVALID_GID is
+ * returned.
+ */
+
+static inline vfsgid_t make_vfsgid(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ kgid_t kgid)
+{
+ gid_t gid;
+
+ if (no_idmapping(mnt_userns, fs_userns))
+ return VFSGIDT_INIT(kgid);
+ if (initial_idmapping(fs_userns))
+ gid = __kgid_val(kgid);
+ else
+ gid = from_kgid(fs_userns, kgid);
+ if (gid == (gid_t)-1)
+ return INVALID_VFSGID;
+ return VFSGIDT_INIT(make_kgid(mnt_userns, gid));
+}
+
+static inline kgid_t mapped_kgid_fs(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ kgid_t kgid)
+{
+ return AS_KGIDT(make_vfsgid(mnt_userns, fs_userns, kgid));
+}
+
+/**
+ * from_vfsuid - map a vfsuid into the filesystem idmapping
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsuid : vfsuid to be mapped
+ *
+ * Map @vfsuid into the filesystem idmapping. This function has to be used in
+ * order to e.g. write @vfsuid to inode->i_uid.
+ *
+ * Return: @vfsuid mapped into the filesystem idmapping
+ */
+static inline kuid_t from_vfsuid(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ vfsuid_t vfsuid)
+{
+ uid_t uid;
+
+ if (no_idmapping(mnt_userns, fs_userns))
+ return AS_KUIDT(vfsuid);
+ uid = from_kuid(mnt_userns, AS_KUIDT(vfsuid));
+ if (uid == (uid_t)-1)
+ return INVALID_UID;
+ if (initial_idmapping(fs_userns))
+ return KUIDT_INIT(uid);
+ return make_kuid(fs_userns, uid);
+}
+
+/**
+ * mapped_kuid_user - map a user kuid into a mnt_userns
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @kuid : kuid to be mapped
+ *
+ * Use the idmapping of @mnt_userns to remap a @kuid into @fs_userns. Use this
+ * function when preparing a @kuid to be written to disk or inode.
+ *
+ * If no_idmapping() determines that this is not an idmapped mount we can
+ * simply return @kuid unchanged.
+ * If initial_idmapping() tells us that the filesystem is not mounted with an
+ * idmapping we know the value of @kuid won't change when calling
+ * make_kuid() so we can simply retrieve the value via KUIDT_INIT()
+ * directly.
+ *
+ * Return: @kuid mapped according to @mnt_userns.
+ * If @kuid has no mapping in either @mnt_userns or @fs_userns INVALID_UID is
+ * returned.
+ */
+static inline kuid_t mapped_kuid_user(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ kuid_t kuid)
+{
+ return from_vfsuid(mnt_userns, fs_userns, VFSUIDT_INIT(kuid));
+}
+
+/**
+ * vfsuid_has_fsmapping - check whether a vfsuid maps into the filesystem
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsuid: vfsuid to be mapped
+ *
+ * Check whether @vfsuid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsuid.
+ *
+ * Return: true if @vfsuid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsuid_has_fsmapping(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ vfsuid_t vfsuid)
+{
+ return uid_valid(from_vfsuid(mnt_userns, fs_userns, vfsuid));
+}
+
+/**
+ * vfsuid_into_kuid - convert vfsuid into kuid
+ * @vfsuid: the vfsuid to convert
+ *
+ * This can be used when a vfsuid is committed as a kuid.
+ *
+ * Return: a kuid with the value of @vfsuid
+ */
+static inline kuid_t vfsuid_into_kuid(vfsuid_t vfsuid)
+{
+ return AS_KUIDT(vfsuid);
+}
+
+/**
+ * from_vfsgid - map a vfsgid into the filesystem idmapping
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsgid : vfsgid to be mapped
+ *
+ * Map @vfsgid into the filesystem idmapping. This function has to be used in
+ * order to e.g. write @vfsgid to inode->i_gid.
+ *
+ * Return: @vfsgid mapped into the filesystem idmapping
+ */
+static inline kgid_t from_vfsgid(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ vfsgid_t vfsgid)
+{
+ gid_t gid;
+
+ if (no_idmapping(mnt_userns, fs_userns))
+ return AS_KGIDT(vfsgid);
+ gid = from_kgid(mnt_userns, AS_KGIDT(vfsgid));
+ if (gid == (gid_t)-1)
+ return INVALID_GID;
+ if (initial_idmapping(fs_userns))
+ return KGIDT_INIT(gid);
+ return make_kgid(fs_userns, gid);
+}
+
+/**
+ * mapped_kgid_user - map a user kgid into a mnt_userns
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @kgid : kgid to be mapped
+ *
+ * Use the idmapping of @mnt_userns to remap a @kgid into @fs_userns. Use this
+ * function when preparing a @kgid to be written to disk or inode.
+ *
+ * If no_idmapping() determines that this is not an idmapped mount we can
+ * simply return @kgid unchanged.
+ * If initial_idmapping() tells us that the filesystem is not mounted with an
+ * idmapping we know the value of @kgid won't change when calling
+ * make_kgid() so we can simply retrieve the value via KGIDT_INIT()
+ * directly.
+ *
+ * Return: @kgid mapped according to @mnt_userns.
+ * If @kgid has no mapping in either @mnt_userns or @fs_userns INVALID_GID is
+ * returned.
+ */
+static inline kgid_t mapped_kgid_user(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ kgid_t kgid)
+{
+ return from_vfsgid(mnt_userns, fs_userns, VFSGIDT_INIT(kgid));
+}
+
+/**
+ * vfsgid_has_fsmapping - check whether a vfsgid maps into the filesystem
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsgid: vfsgid to be mapped
+ *
+ * Check whether @vfsgid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsgid.
+ *
+ * Return: true if @vfsgid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsgid_has_fsmapping(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ vfsgid_t vfsgid)
+{
+ return gid_valid(from_vfsgid(mnt_userns, fs_userns, vfsgid));
+}
+
+/**
+ * vfsgid_into_kgid - convert vfsgid into kgid
+ * @vfsgid: the vfsgid to convert
+ *
+ * This can be used when a vfsgid is committed as a kgid.
+ *
+ * Return: a kgid with the value of @vfsgid
+ */
+static inline kgid_t vfsgid_into_kgid(vfsgid_t vfsgid)
+{
+ return AS_KGIDT(vfsgid);
+}
+
+/**
+ * mapped_fsuid - return caller's fsuid mapped up into a mnt_userns
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ *
+ * Use this helper to initialize a new vfs or filesystem object based on
+ * the caller's fsuid. A common example is initializing the i_uid field of
+ * a newly allocated inode triggered by a creation event such as mkdir or
+ * O_CREAT. Other examples include the allocation of quotas for a specific
+ * user.
+ *
+ * Return: the caller's current fsuid mapped up according to @mnt_userns.
+ */
+static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns)
+{
+ return from_vfsuid(mnt_userns, fs_userns,
+ VFSUIDT_INIT(current_fsuid()));
+}
+
+/**
+ * mapped_fsgid - return caller's fsgid mapped up into a mnt_userns
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ *
+ * Use this helper to initialize a new vfs or filesystem object based on
+ * the caller's fsgid. A common example is initializing the i_gid field of
+ * a newly allocated inode triggered by a creation event such as mkdir or
+ * O_CREAT. Other examples include the allocation of quotas for a specific
+ * user.
+ *
+ * Return: the caller's current fsgid mapped up according to @mnt_userns.
+ */
+static inline kgid_t mapped_fsgid(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns)
+{
+ return from_vfsgid(mnt_userns, fs_userns,
+ VFSGIDT_INIT(current_fsgid()));
+}
+
+#endif /* _LINUX_MNT_IDMAPPING_H */
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 35942084cd40..8f882f5881e8 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -6,10 +6,12 @@
struct mnt_namespace;
struct fs_struct;
struct user_namespace;
+struct ns_common;
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct user_namespace *, struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
+extern struct ns_common *from_mnt_ns(struct mnt_namespace *);
extern const struct file_operations proc_mounts_operations;
extern const struct file_operations proc_mountinfo_operations;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index e3596db077dc..549590e9c644 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -16,6 +16,10 @@ typedef unsigned long kernel_ulong_t;
#define PCI_ANY_ID (~0)
+enum {
+ PCI_ID_F_VFIO_DRIVER_OVERRIDE = 1,
+};
+
/**
* struct pci_device_id - PCI device ID structure
* @vendor: Vendor ID to match (or PCI_ANY_ID)
@@ -34,12 +38,14 @@ typedef unsigned long kernel_ulong_t;
* Best practice is to use driver_data as an index
* into a static list of equivalent device types,
* instead of using it as a pointer.
+ * @override_only: Match only when dev->driver_override is this driver.
*/
struct pci_device_id {
__u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
__u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
__u32 class, class_mask; /* (class,subclass,prog-if) triplet */
kernel_ulong_t driver_data; /* Data private to the driver */
+ __u32 override_only;
};
@@ -205,7 +211,7 @@ struct css_device_id {
kernel_ulong_t driver_data;
};
-#define ACPI_ID_LEN 9
+#define ACPI_ID_LEN 16
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
@@ -251,6 +257,8 @@ struct hda_device_id {
struct sdw_device_id {
__u16 mfg_id;
__u16 part_id;
+ __u8 sdw_version;
+ __u8 class_id;
kernel_ulong_t driver_data;
};
@@ -318,7 +326,7 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_LED_MAX 0x0f
#define INPUT_DEVICE_ID_SND_MAX 0x07
#define INPUT_DEVICE_ID_FF_MAX 0x7f
-#define INPUT_DEVICE_ID_SW_MAX 0x0f
+#define INPUT_DEVICE_ID_SW_MAX 0x10
#define INPUT_DEVICE_ID_PROP_MAX 0x1f
#define INPUT_DEVICE_ID_MATCH_BUS 1
@@ -434,7 +442,7 @@ struct virtio_device_id {
* For Hyper-V devices we use the device guid as the id.
*/
struct hv_vmbus_device_id {
- uuid_le guid;
+ guid_t guid;
kernel_ulong_t driver_data; /* Data private to the driver */
};
@@ -445,6 +453,7 @@ struct hv_vmbus_device_id {
struct rpmsg_device_id {
char name[RPMSG_NAME_SIZE];
+ kernel_ulong_t driver_data;
};
/* i2c */
@@ -532,6 +541,8 @@ enum dmi_field {
DMI_BIOS_VENDOR,
DMI_BIOS_VERSION,
DMI_BIOS_DATE,
+ DMI_BIOS_RELEASE,
+ DMI_EC_FIRMWARE_RELEASE,
DMI_SYS_VENDOR,
DMI_PRODUCT_NAME,
DMI_PRODUCT_VERSION,
@@ -663,16 +674,16 @@ struct x86_cpu_id {
__u16 vendor;
__u16 family;
__u16 model;
+ __u16 steppings;
__u16 feature; /* bit index */
kernel_ulong_t driver_data;
};
-#define X86_FEATURE_MATCH(x) \
- { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x }
-
+/* Wild cards for x86_cpu_id::vendor, family, model and feature */
#define X86_VENDOR_ANY 0xffff
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
+#define X86_STEPPING_ANY 0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
/*
@@ -821,4 +832,83 @@ struct wmi_device_id {
const void *context;
};
+#define MHI_DEVICE_MODALIAS_FMT "mhi:%s"
+#define MHI_NAME_SIZE 32
+
+#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s"
+
+/**
+ * struct mhi_device_id - MHI device identification
+ * @chan: MHI channel name
+ * @driver_data: driver data;
+ */
+struct mhi_device_id {
+ const char chan[MHI_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
+#define AUXILIARY_NAME_SIZE 32
+#define AUXILIARY_MODULE_PREFIX "auxiliary:"
+
+struct auxiliary_device_id {
+ char name[AUXILIARY_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
+/* Surface System Aggregator Module */
+
+#define SSAM_MATCH_TARGET 0x1
+#define SSAM_MATCH_INSTANCE 0x2
+#define SSAM_MATCH_FUNCTION 0x4
+
+struct ssam_device_id {
+ __u8 match_flags;
+
+ __u8 domain;
+ __u8 category;
+ __u8 target;
+ __u8 instance;
+ __u8 function;
+
+ kernel_ulong_t driver_data;
+};
+
+/*
+ * DFL (Device Feature List)
+ *
+ * DFL defines a linked list of feature headers within the device MMIO space to
+ * provide an extensible way of adding features. Software can walk through these
+ * predefined data structures to enumerate features. It is now used in the FPGA.
+ * See Documentation/fpga/dfl.rst for more information.
+ *
+ * The dfl bus type is introduced to match the individual feature devices (dfl
+ * devices) for specific dfl drivers.
+ */
+
+/**
+ * struct dfl_device_id - dfl device identifier
+ * @type: DFL FIU type of the device. See enum dfl_id_type.
+ * @feature_id: feature identifier local to its DFL FIU type.
+ * @driver_data: driver specific data.
+ */
+struct dfl_device_id {
+ __u16 type;
+ __u16 feature_id;
+ kernel_ulong_t driver_data;
+};
+
+/* ISHTP (Integrated Sensor Hub Transport Protocol) */
+
+#define ISHTP_MODULE_PREFIX "ishtp:"
+
+/**
+ * struct ishtp_device_id - ISHTP device identifier
+ * @guid: GUID of the device.
+ * @driver_data: pointer to driver specific data
+ */
+struct ishtp_device_id {
+ guid_t guid;
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 1ad393e62bef..ec61fb53979a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/stat.h>
+#include <linux/buildid.h>
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/kmod.h>
@@ -25,13 +26,11 @@
#include <linux/error-injection.h>
#include <linux/tracepoint-defs.h>
#include <linux/srcu.h>
+#include <linux/static_call_types.h>
#include <linux/percpu.h>
#include <asm/module.h>
-/* Not Yet Implemented */
-#define MODULE_SUPPORTED_DEVICE(name)
-
#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
struct modversion_info {
@@ -65,7 +64,7 @@ struct module_version_attribute {
struct module_attribute mattr;
const char *module_name;
const char *version;
-} __attribute__ ((__aligned__(sizeof(void *))));
+};
extern ssize_t __modver_version_show(struct module_attribute *,
struct module_kobject *, char *);
@@ -130,13 +129,17 @@ extern void cleanup_module(void);
#define module_init(initfn) \
static inline initcall_t __maybe_unused __inittest(void) \
{ return initfn; } \
- int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
+ int init_module(void) __copy(initfn) \
+ __attribute__((alias(#initfn))); \
+ ___ADDRESSABLE(init_module, __initdata);
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
static inline exitcall_t __maybe_unused __exittest(void) \
{ return exitfn; } \
- void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
+ void cleanup_module(void) __copy(exitfn) \
+ __attribute__((alias(#exitfn))); \
+ ___ADDRESSABLE(cleanup_module, __exitdata);
#endif
@@ -265,20 +268,20 @@ extern typeof(name) __mod_##type##__##name##_device_table \
#else
#define MODULE_VERSION(_version) \
MODULE_INFO(version, _version); \
- static struct module_version_attribute ___modver_attr = { \
- .mattr = { \
- .attr = { \
- .name = "version", \
- .mode = S_IRUGO, \
+ static struct module_version_attribute __modver_attr \
+ __used __section("__modver") \
+ __aligned(__alignof__(struct module_version_attribute)) \
+ = { \
+ .mattr = { \
+ .attr = { \
+ .name = "version", \
+ .mode = S_IRUGO, \
+ }, \
+ .show = __modver_version_show, \
}, \
- .show = __modver_version_show, \
- }, \
- .module_name = KBUILD_MODNAME, \
- .version = _version, \
- }; \
- static const struct module_version_attribute \
- __used __attribute__ ((__section__ ("__modver"))) \
- * __moduleparam_const __modver_attr = &___modver_attr
+ .module_name = KBUILD_MODNAME, \
+ .version = _version, \
+ }
#endif
/* Optional firmware file (or files) needed by the module
@@ -286,7 +289,7 @@ extern typeof(name) __mod_##type##__##name##_device_table \
* files require multiple MODULE_FIRMWARE() specifiers */
#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
-#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns)
+#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, __stringify(ns))
struct notifier_block;
@@ -366,6 +369,11 @@ struct module {
/* Unique handle for this module */
char name[MODULE_NAME_LEN];
+#ifdef CONFIG_STACKTRACE_BUILD_ID
+ /* Module build ID */
+ unsigned char build_id[BUILD_ID_SIZE_MAX];
+#endif
+
/* Sysfs stuff. */
struct module_kobject mkobj;
struct module_attribute *modinfo_attrs;
@@ -378,6 +386,11 @@ struct module {
const s32 *crcs;
unsigned int num_syms;
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+ s32 *kcfi_traps;
+ s32 *kcfi_traps_end;
+#endif
+
/* Kernel parameters. */
#ifdef CONFIG_SYSFS
struct mutex param_lock;
@@ -389,18 +402,7 @@ struct module {
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
const s32 *gpl_crcs;
-
-#ifdef CONFIG_UNUSED_SYMBOLS
- /* unused exported symbols. */
- const struct kernel_symbol *unused_syms;
- const s32 *unused_crcs;
- unsigned int num_unused_syms;
-
- /* GPL-only, unused exported symbols. */
- unsigned int num_unused_gpl_syms;
- const struct kernel_symbol *unused_gpl_syms;
- const s32 *unused_gpl_crcs;
-#endif
+ bool using_gplonly_symbols;
#ifdef CONFIG_MODULE_SIG
/* Signature was verified. */
@@ -409,11 +411,6 @@ struct module {
bool async_probe_requested;
- /* symbols that will be GPL-only in the near future. */
- const struct kernel_symbol *gpl_future_syms;
- const s32 *gpl_future_crcs;
- unsigned int num_gpl_future_syms;
-
/* Exception table */
unsigned int num_exentries;
struct exception_table_entry *extable;
@@ -424,6 +421,9 @@ struct module {
/* Core layout: rbtree is accessed frequently, so keep together. */
struct module_layout core_layout __module_layout_align;
struct module_layout init_layout;
+#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
+ struct module_layout data_layout;
+#endif
/* Arch-specific module values */
struct mod_arch_specific arch;
@@ -458,6 +458,8 @@ struct module {
void __percpu *percpu;
unsigned int percpu_size;
#endif
+ void *noinstr_text_start;
+ unsigned int noinstr_text_size;
#ifdef CONFIG_TRACEPOINTS
unsigned int num_tracepoints;
@@ -471,6 +473,10 @@ struct module {
unsigned int num_bpf_raw_events;
struct bpf_raw_event_map *bpf_raw_events;
#endif
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+ unsigned int btf_data_size;
+ void *btf_data;
+#endif
#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
@@ -489,6 +495,21 @@ struct module {
unsigned int num_ftrace_callsites;
unsigned long *ftrace_callsites;
#endif
+#ifdef CONFIG_KPROBES
+ void *kprobes_text_start;
+ unsigned int kprobes_text_size;
+ unsigned long *kprobe_blacklist;
+ unsigned int num_kprobe_blacklist;
+#endif
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+ int num_static_call_sites;
+ struct static_call_site *static_call_sites;
+#endif
+#if IS_ENABLED(CONFIG_KUNIT)
+ int num_kunit_suites;
+ struct kunit_suite **kunit_suites;
+#endif
+
#ifdef CONFIG_LIVEPATCH
bool klp; /* Is this a livepatch module? */
@@ -498,6 +519,11 @@ struct module {
struct klp_modinfo *klp_info;
#endif
+#ifdef CONFIG_PRINTK_INDEX
+ unsigned int printk_index_size;
+ struct pi_entry **printk_index_start;
+#endif
+
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
struct list_head source_list;
@@ -532,8 +558,6 @@ static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
}
#endif
-extern struct mutex module_mutex;
-
/* FIXME: It'd be nice to isolate modules during init, too, so they
aren't used before they (may) fail. But presently too much code
(IDE & SCSI) require entry into the module during init.*/
@@ -552,6 +576,11 @@ bool is_module_text_address(unsigned long addr);
static inline bool within_module_core(unsigned long addr,
const struct module *mod)
{
+#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
+ if ((unsigned long)mod->data_layout.base <= addr &&
+ addr < (unsigned long)mod->data_layout.base + mod->data_layout.size)
+ return true;
+#endif
return (unsigned long)mod->core_layout.base <= addr &&
addr < (unsigned long)mod->core_layout.base + mod->core_layout.size;
}
@@ -568,40 +597,9 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
return within_module_init(addr, mod) || within_module_core(addr, mod);
}
-/* Search for module by name: must hold module_mutex. */
+/* Search for module by name: must be in a RCU-sched critical section. */
struct module *find_module(const char *name);
-struct symsearch {
- const struct kernel_symbol *start, *stop;
- const s32 *crcs;
- enum {
- NOT_GPL_ONLY,
- GPL_ONLY,
- WILL_BE_GPL_ONLY,
- } licence;
- bool unused;
-};
-
-/*
- * Search for an exported symbol by name.
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-const struct kernel_symbol *find_symbol(const char *name,
- struct module **owner,
- const s32 **crc,
- bool gplok,
- bool warn);
-
-/*
- * Walk the exported symbol table
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
- struct module *owner,
- void *data), void *data);
-
/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
symnum out of range. */
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -610,13 +608,9 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name(const char *name);
-int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *, unsigned long),
- void *data);
-
-extern void __noreturn __module_put_and_exit(struct module *mod,
+extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
long code);
-#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
+#define module_put_and_kthread_exit(code) __module_put_and_kthread_exit(THIS_MODULE, code)
#ifdef CONFIG_MODULE_UNLOAD
int module_refcount(struct module *mod);
@@ -649,7 +643,6 @@ static inline void __module_get(struct module *module)
#define symbol_put_addr(p) do { } while (0)
#endif /* CONFIG_MODULE_UNLOAD */
-int ref_module(struct module *a, struct module *b);
/* This is a #define so the string doesn't get put in every .o file */
#define module_name(mod) \
@@ -667,7 +660,7 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr);
const char *module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
- char **modname,
+ char **modname, const unsigned char **modbuildid,
char *namebuf);
int lookup_module_symbol_name(unsigned long addr, char *symname);
int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
@@ -682,19 +675,15 @@ static inline bool module_requested_async_probing(struct module *module)
return module && module->async_probe_requested;
}
-#ifdef CONFIG_LIVEPATCH
static inline bool is_livepatch_module(struct module *mod)
{
+#ifdef CONFIG_LIVEPATCH
return mod->klp;
-}
-#else /* !CONFIG_LIVEPATCH */
-static inline bool is_livepatch_module(struct module *mod)
-{
+#else
return false;
+#endif
}
-#endif /* CONFIG_LIVEPATCH */
-bool is_module_sig_enforced(void);
void set_module_sig_enforced(void);
#else /* !CONFIG_MODULES... */
@@ -747,7 +736,7 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
}
/* Get/put a kernel symbol (calls should be symmetric) */
-#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
+#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak,visibility("hidden"))); &(x); })
#define symbol_put(x) do { } while (0)
#define symbol_put_addr(x) do { } while (0)
@@ -771,6 +760,7 @@ static inline const char *module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname,
+ const unsigned char **modbuildid,
char *namebuf)
{
return NULL;
@@ -798,14 +788,6 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name)
return 0;
}
-static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *,
- unsigned long),
- void *data)
-{
- return 0;
-}
-
static inline int register_module_notifier(struct notifier_block *nb)
{
/* no events will happen anyway, so this can always succeed */
@@ -817,7 +799,7 @@ static inline int unregister_module_notifier(struct notifier_block *nb)
return 0;
}
-#define module_put_and_exit(code) do_exit(code)
+#define module_put_and_kthread_exit(code) kthread_exit(code)
static inline void print_modules(void)
{
@@ -828,10 +810,6 @@ static inline bool module_requested_async_probing(struct module *module)
return false;
}
-static inline bool is_module_sig_enforced(void)
-{
- return false;
-}
static inline void set_module_sig_enforced(void)
{
@@ -858,14 +836,6 @@ extern int module_sysfs_initialized;
#define __MODULE_STRING(x) __stringify(x)
-#ifdef CONFIG_STRICT_MODULE_RWX
-extern void module_enable_ro(const struct module *mod, bool after_init);
-extern void module_disable_ro(const struct module *mod);
-#else
-static inline void module_enable_ro(const struct module *mod, bool after_init) { }
-static inline void module_disable_ro(const struct module *mod) { }
-#endif
-
#ifdef CONFIG_GENERIC_BUG
void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
struct module *);
@@ -891,15 +861,26 @@ static inline bool retpoline_module_ok(bool has_retpoline)
#endif
#ifdef CONFIG_MODULE_SIG
+bool is_module_sig_enforced(void);
+
static inline bool module_sig_ok(struct module *module)
{
return module->sig_ok;
}
#else /* !CONFIG_MODULE_SIG */
+static inline bool is_module_sig_enforced(void)
+{
+ return false;
+}
+
static inline bool module_sig_ok(struct module *module)
{
return true;
}
#endif /* CONFIG_MODULE_SIG */
+int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *, unsigned long),
+ void *data);
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index ca92aea8a6bd..9e09d11ffe5b 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -29,6 +29,11 @@ void *module_alloc(unsigned long size);
/* Free memory returned from module_alloc. */
void module_memfree(void *module_region);
+/* Determines if the section name is an init section (that is only used during
+ * module loading).
+ */
+bool module_init_section(const char *name);
+
/* Determines if the section name is an exit section (that is only used during
* module unloading)
*/
@@ -91,7 +96,8 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
-#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
#include <linux/kasan.h>
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 3ef917ff0964..962cd41a2cb5 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -21,12 +21,12 @@
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
#define __MODULE_INFO(tag, name, info) \
-static const char __UNIQUE_ID(name)[] \
- __used __attribute__((section(".modinfo"), unused, aligned(1))) \
- = __MODULE_INFO_PREFIX __stringify(tag) "=" info
+ static const char __UNIQUE_ID(name)[] \
+ __used __section(".modinfo") __aligned(1) \
+ = __MODULE_INFO_PREFIX __stringify(tag) "=" info
#define __MODULE_PARM_TYPE(name, _type) \
- __MODULE_INFO(parmtype, name##type, #name ":" _type)
+ __MODULE_INFO(parmtype, name##type, #name ":" _type)
/* One for each parameter, describing how to use it. Some files do
multiple of these per line, so can't just use MODULE_INFO. */
@@ -108,7 +108,7 @@ struct kparam_array
* ".") the kernel commandline parameter. Note that - is changed to _, so
* the user can use "foo-bar=1" even for variable "foo_bar".
*
- * @perm is 0 if the the variable is not to appear in sysfs, or 0444
+ * @perm is 0 if the variable is not to appear in sysfs, or 0444
* for world-readable, 0644 for root-writable, etc. Note that if it
* is writable, you may need to use kernel_param_lock() around
* accesses (esp. charp, which can be kfreed when it changes).
@@ -118,7 +118,7 @@ struct kparam_array
* you can create your own by defining those variables.
*
* Standard types are:
- * byte, short, ushort, int, uint, long, ulong
+ * byte, hexint, short, ushort, int, uint, long, ulong
* charp: a character pointer
* bool: a bool, values 0/1, y/n, Y/N.
* invbool: the above, only sense-reversed (N = true).
@@ -288,8 +288,8 @@ struct kparam_array
/* Default value instead of permissions? */ \
static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
- __used \
- __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
+ __used __section("__param") \
+ __aligned(__alignof__(struct kernel_param)) \
= { __param_str_##name, THIS_MODULE, ops, \
VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
@@ -431,6 +431,8 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp);
extern const struct kernel_param_ops param_ops_uint;
extern int param_set_uint(const char *val, const struct kernel_param *kp);
extern int param_get_uint(char *buffer, const struct kernel_param *kp);
+int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
+ unsigned int min, unsigned int max);
#define param_check_uint(name, p) __param_check(name, p, unsigned int)
extern const struct kernel_param_ops param_ops_long;
@@ -448,6 +450,11 @@ extern int param_set_ullong(const char *val, const struct kernel_param *kp);
extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
#define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
+extern const struct kernel_param_ops param_ops_hexint;
+extern int param_set_hexint(const char *val, const struct kernel_param *kp);
+extern int param_get_hexint(char *buffer, const struct kernel_param *kp);
+#define param_check_hexint(name, p) param_check_uint(name, p)
+
extern const struct kernel_param_ops param_ops_charp;
extern int param_set_charp(const char *val, const struct kernel_param *kp);
extern int param_get_charp(char *buffer, const struct kernel_param *kp);
diff --git a/drivers/staging/most/most.h b/include/linux/most.h
index 232e01b7f5d2..232e01b7f5d2 100644
--- a/drivers/staging/most/most.h
+++ b/include/linux/most.h
diff --git a/include/linux/mount.h b/include/linux/mount.h
index bf8cc4108b8f..55a4abaf6715 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -11,17 +11,15 @@
#define _LINUX_MOUNT_H
#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/nodemask.h>
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <linux/atomic.h>
+#include <asm/barrier.h>
struct super_block;
-struct vfsmount;
struct dentry;
-struct mnt_namespace;
+struct user_namespace;
+struct file_system_type;
struct fs_context;
+struct file;
+struct path;
#define MNT_NOSUID 0x01
#define MNT_NODEV 0x02
@@ -30,6 +28,7 @@ struct fs_context;
#define MNT_NODIRATIME 0x10
#define MNT_RELATIME 0x20
#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
+#define MNT_NOSYMFOLLOW 0x80
#define MNT_SHRINKABLE 0x100
#define MNT_WRITE_HOLD 0x200
@@ -46,11 +45,12 @@ struct fs_context;
#define MNT_SHARED_MASK (MNT_UNBINDABLE)
#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
| MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
- | MNT_READONLY)
+ | MNT_READONLY | MNT_NOSYMFOLLOW)
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \
+ MNT_CURSOR)
#define MNT_INTERNAL 0x4000
@@ -64,19 +64,23 @@ struct fs_context;
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
#define MNT_UMOUNT 0x8000000
+#define MNT_CURSOR 0x10000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
+ struct user_namespace *mnt_userns;
} __randomize_layout;
-struct file; /* forward dec */
-struct path;
+static inline struct user_namespace *mnt_user_ns(const struct vfsmount *mnt)
+{
+ /* Pairs with smp_store_release() in do_idmap_mount(). */
+ return smp_load_acquire(&mnt->mnt_userns);
+}
extern int mnt_want_write(struct vfsmount *mnt);
extern int mnt_want_write_file(struct file *file);
-extern int mnt_clone_write(struct vfsmount *mnt);
extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
@@ -85,12 +89,10 @@ extern struct vfsmount *mnt_clone_internal(const struct path *path);
extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
-struct path;
extern struct vfsmount *clone_private_mount(const struct path *path);
extern int __mnt_want_write(struct vfsmount *);
extern void __mnt_drop_write(struct vfsmount *);
-struct file_system_type;
extern struct vfsmount *fc_mount(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
@@ -104,9 +106,20 @@ extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
extern dev_t name_to_dev_t(const char *name);
-
-extern unsigned int sysctl_mount_max;
-
extern bool path_is_mountpoint(const struct path *path);
+extern bool our_mnt(struct vfsmount *mnt);
+
+extern struct vfsmount *kern_mount(struct file_system_type *);
+extern void kern_unmount(struct vfsmount *mnt);
+extern int may_umount_tree(struct vfsmount *);
+extern int may_umount(struct vfsmount *);
+extern long do_mount(const char *, const char __user *,
+ const char *, unsigned long, void *);
+extern struct vfsmount *collect_mounts(const struct path *);
+extern void drop_collected_mounts(struct vfsmount *);
+extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
+ struct vfsmount *);
+extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h
index 490db6886dcc..79184948fab4 100644
--- a/include/linux/moxtet.h
+++ b/include/linux/moxtet.h
@@ -2,7 +2,7 @@
/*
* Turris Mox module configuration bus driver
*
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
*/
#ifndef __LINUX_MOXTET_H
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
index 001f1fcf9836..1bdc39daac0a 100644
--- a/include/linux/mpage.h
+++ b/include/linux/mpage.h
@@ -13,13 +13,11 @@
#ifdef CONFIG_BLOCK
struct writeback_control;
+struct readahead_control;
-int mpage_readpages(struct address_space *mapping, struct list_head *pages,
- unsigned nr_pages, get_block_t get_block);
-int mpage_readpage(struct page *page, get_block_t get_block);
+void mpage_readahead(struct readahead_control *, get_block_t get_block);
+int mpage_read_folio(struct folio *folio, get_block_t get_block);
int mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block);
-int mpage_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
#endif
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 7bd6d8af0004..eb0d1c1db208 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -40,21 +40,79 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
+#define mpi_has_sign(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
+void mpi_clear(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
+static inline MPI mpi_new(unsigned int nbits)
+{
+ return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB);
+}
+
+MPI mpi_copy(MPI a);
+MPI mpi_alloc_like(MPI a);
+void mpi_snatch(MPI w, MPI u);
+MPI mpi_set(MPI w, MPI u);
+MPI mpi_set_ui(MPI w, unsigned long u);
+MPI mpi_alloc_set_ui(unsigned long u);
+void mpi_swap_cond(MPI a, MPI b, unsigned long swap);
+
+/* Constants used to return constant MPIs. See mpi_init if you
+ * want to add more constants.
+ */
+#define MPI_NUMBER_OF_CONSTANTS 6
+enum gcry_mpi_constants {
+ MPI_C_ZERO,
+ MPI_C_ONE,
+ MPI_C_TWO,
+ MPI_C_THREE,
+ MPI_C_FOUR,
+ MPI_C_EIGHT
+};
+
+MPI mpi_const(enum gcry_mpi_constants no);
+
/*-- mpicoder.c --*/
+
+/* Different formats of external big integer representation. */
+enum gcry_mpi_format {
+ GCRYMPI_FMT_NONE = 0,
+ GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */
+ GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */
+ GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */
+ GCRYMPI_FMT_HEX = 4, /* Hex format. */
+ GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */
+ GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */
+};
+
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
+int mpi_fromstr(MPI val, const char *str);
+MPI mpi_scanval(const char *string);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
+int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
+ size_t buflen, size_t *nwritten, MPI a);
+
+/*-- mpi-mod.c --*/
+void mpi_mod(MPI rem, MPI dividend, MPI divisor);
+
+/* Context used with Barrett reduction. */
+struct barrett_ctx_s;
+typedef struct barrett_ctx_s *mpi_barrett_t;
+
+mpi_barrett_t mpi_barrett_init(MPI m, int copy);
+void mpi_barrett_free(mpi_barrett_t ctx);
+void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx);
+void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx);
/*-- mpi-pow.c --*/
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
@@ -62,17 +120,154 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
+int mpi_cmpabs(MPI u, MPI v);
+
+/*-- mpi-sub-ui.c --*/
+int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
/*-- mpi-bit.c --*/
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
+int mpi_test_bit(MPI a, unsigned int n);
+void mpi_set_bit(MPI a, unsigned int n);
+void mpi_set_highbit(MPI a, unsigned int n);
+void mpi_clear_highbit(MPI a, unsigned int n);
+void mpi_clear_bit(MPI a, unsigned int n);
+void mpi_rshift_limbs(MPI a, unsigned int count);
+void mpi_rshift(MPI x, MPI a, unsigned int n);
+void mpi_lshift_limbs(MPI a, unsigned int count);
+void mpi_lshift(MPI x, MPI a, unsigned int n);
+
+/*-- mpi-add.c --*/
+void mpi_add_ui(MPI w, MPI u, unsigned long v);
+void mpi_add(MPI w, MPI u, MPI v);
+void mpi_sub(MPI w, MPI u, MPI v);
+void mpi_addm(MPI w, MPI u, MPI v, MPI m);
+void mpi_subm(MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-mul.c --*/
+void mpi_mul(MPI w, MPI u, MPI v);
+void mpi_mulm(MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-div.c --*/
+void mpi_tdiv_r(MPI rem, MPI num, MPI den);
+void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
+void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
+
+/*-- mpi-inv.c --*/
+int mpi_invm(MPI x, MPI a, MPI n);
+
+/*-- ec.c --*/
+
+/* Object to represent a point in projective coordinates */
+struct gcry_mpi_point {
+ MPI x;
+ MPI y;
+ MPI z;
+};
+
+typedef struct gcry_mpi_point *MPI_POINT;
+
+/* Models describing an elliptic curve */
+enum gcry_mpi_ec_models {
+ /* The Short Weierstrass equation is
+ * y^2 = x^3 + ax + b
+ */
+ MPI_EC_WEIERSTRASS = 0,
+ /* The Montgomery equation is
+ * by^2 = x^3 + ax^2 + x
+ */
+ MPI_EC_MONTGOMERY,
+ /* The Twisted Edwards equation is
+ * ax^2 + y^2 = 1 + bx^2y^2
+ * Note that we use 'b' instead of the commonly used 'd'.
+ */
+ MPI_EC_EDWARDS
+};
+
+/* Dialects used with elliptic curves */
+enum ecc_dialects {
+ ECC_DIALECT_STANDARD = 0,
+ ECC_DIALECT_ED25519,
+ ECC_DIALECT_SAFECURVE
+};
+
+/* This context is used with all our EC functions. */
+struct mpi_ec_ctx {
+ enum gcry_mpi_ec_models model; /* The model describing this curve. */
+ enum ecc_dialects dialect; /* The ECC dialect used with the curve. */
+ int flags; /* Public key flags (not always used). */
+ unsigned int nbits; /* Number of bits. */
+
+ /* Domain parameters. Note that they may not all be set and if set
+ * the MPIs may be flagged as constant.
+ */
+ MPI p; /* Prime specifying the field GF(p). */
+ MPI a; /* First coefficient of the Weierstrass equation. */
+ MPI b; /* Second coefficient of the Weierstrass equation. */
+ MPI_POINT G; /* Base point (generator). */
+ MPI n; /* Order of G. */
+ unsigned int h; /* Cofactor. */
+
+ /* The actual key. May not be set. */
+ MPI_POINT Q; /* Public key. */
+ MPI d; /* Private key. */
+
+ const char *name; /* Name of the curve. */
+
+ /* This structure is private to mpi/ec.c! */
+ struct {
+ struct {
+ unsigned int a_is_pminus3:1;
+ unsigned int two_inv_p:1;
+ } valid; /* Flags to help setting the helper vars below. */
+
+ int a_is_pminus3; /* True if A = P - 3. */
+
+ MPI two_inv_p;
+
+ mpi_barrett_t p_barrett;
+
+ /* Scratch variables. */
+ MPI scratch[11];
+
+ /* Helper for fast reduction. */
+ /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */
+ /* MPI s[10]; */
+ /* MPI c; */
+ } t;
+
+ /* Curve specific computation routines for the field. */
+ void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec);
+ void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
+ void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
+};
+
+void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ enum ecc_dialects dialect,
+ int flags, MPI p, MPI a, MPI b);
+void mpi_ec_deinit(struct mpi_ec_ctx *ctx);
+MPI_POINT mpi_point_new(unsigned int nbits);
+void mpi_point_release(MPI_POINT p);
+void mpi_point_init(MPI_POINT p);
+void mpi_point_free_parts(MPI_POINT p);
+int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx);
+void mpi_ec_add_points(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx);
+void mpi_ec_mul_point(MPI_POINT result,
+ MPI scalar, MPI_POINT point,
+ struct mpi_ec_ctx *ctx);
+int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx);
/* inline functions */
/**
* mpi_get_size() - returns max size required to store the number
*
- * @a: A multi precision integer for which we want to allocate a bufer
+ * @a: A multi precision integer for which we want to allocate a buffer
*
* Return: size required to store the number
*/
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 9a36fad9e068..80b8400ab8b2 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -8,6 +8,7 @@
#include <net/fib_notifier.h>
#include <uapi/linux/mroute.h>
#include <linux/mroute_base.h>
+#include <linux/sockptr.h>
#ifdef CONFIG_IP_MROUTE
static inline int ip_mroute_opt(int opt)
@@ -15,21 +16,21 @@ static inline int ip_mroute_opt(int opt)
return opt >= MRT_BASE && opt <= MRT_MAX;
}
-int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
-int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
+int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
int ip_mr_init(void);
bool ipmr_rule_default(const struct fib_rule *rule);
#else
static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
- char __user *optval, unsigned int optlen)
+ sockptr_t optval, unsigned int optlen)
{
return -ENOPROTOOPT;
}
-static inline int ip_mroute_getsockopt(struct sock *sock, int optname,
- char __user *optval, int __user *optlen)
+static inline int ip_mroute_getsockopt(struct sock *sk, int optname,
+ sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index c4a45859f586..8f2b307fb124 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -8,6 +8,7 @@
#include <net/net_namespace.h>
#include <uapi/linux/mroute6.h>
#include <linux/mroute_base.h>
+#include <linux/sockptr.h>
#include <net/fib_rules.h>
#ifdef CONFIG_IPV6_MROUTE
@@ -25,24 +26,23 @@ static inline int ip6_mroute_opt(int opt)
struct sock;
#ifdef CONFIG_IPV6_MROUTE
-extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
-extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
+extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip6_mr_init(void);
extern void ip6_mr_cleanup(void);
#else
-static inline
-int ip6_mroute_setsockopt(struct sock *sock,
- int optname, char __user *optval, unsigned int optlen)
+static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
+ sockptr_t optval, unsigned int optlen)
{
return -ENOPROTOOPT;
}
static inline
int ip6_mroute_getsockopt(struct sock *sock,
- int optname, char __user *optval, int __user *optlen)
+ int optname, sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 8071148f29a6..9dd4bf157255 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -12,6 +12,7 @@
/**
* struct vif_device - interface representor for multicast routing
* @dev: network device being used
+ * @dev_tracker: refcount tracker for @dev reference
* @bytes_in: statistic; bytes ingressing
* @bytes_out: statistic; bytes egresing
* @pkt_in: statistic; packets ingressing
@@ -25,7 +26,8 @@
* @remote: Remote address for tunnels
*/
struct vif_device {
- struct net_device *dev;
+ struct net_device __rcu *dev;
+ netdevice_tracker dev_tracker;
unsigned long bytes_in, bytes_out;
unsigned long pkt_in, pkt_out;
unsigned long rate_limit;
@@ -50,6 +52,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
+ struct net_device *vif_dev,
unsigned short vif_index, u32 tb_id,
struct netlink_ext_ack *extack)
{
@@ -58,7 +61,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb,
.family = family,
.extack = extack,
},
- .dev = vif->dev,
+ .dev = vif_dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
@@ -71,6 +74,7 @@ static inline int mr_call_vif_notifiers(struct net *net,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
+ struct net_device *vif_dev,
unsigned short vif_index, u32 tb_id,
unsigned int *ipmr_seq)
{
@@ -78,7 +82,7 @@ static inline int mr_call_vif_notifiers(struct net *net,
.info = {
.family = family,
},
- .dev = vif->dev,
+ .dev = vif_dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
@@ -96,7 +100,8 @@ static inline int mr_call_vif_notifiers(struct net *net,
#define MAXVIFS 32
#endif
-#define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev))
+/* Note: This helper is deprecated. */
+#define VIF_EXISTS(_mrt, _idx) (!!rcu_access_pointer((_mrt)->vif_table[_idx].dev))
/* mfc_flags:
* MFC_STATIC - the entry was added statically (not by a routing daemon)
@@ -303,7 +308,7 @@ int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock, struct netlink_ext_ack *extack);
+ struct netlink_ext_ack *extack);
#else
static inline void vif_device_init(struct vif_device *v,
struct net_device *dev,
@@ -358,7 +363,7 @@ static inline int mr_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock, struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack)
{
return -EINVAL;
}
diff --git a/include/linux/msdos_partition.h b/include/linux/msdos_partition.h
new file mode 100644
index 000000000000..2cb82db2a43c
--- /dev/null
+++ b/include/linux/msdos_partition.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MSDOS_PARTITION_H
+#define _LINUX_MSDOS_PARTITION_H
+
+#define MSDOS_LABEL_MAGIC 0xAA55
+
+struct msdos_partition {
+ u8 boot_ind; /* 0x80 - active */
+ u8 head; /* starting head */
+ u8 sector; /* starting sector */
+ u8 cyl; /* starting cylinder */
+ u8 sys_ind; /* What partition type */
+ u8 end_head; /* end head */
+ u8 end_sector; /* end sector */
+ u8 end_cyl; /* end cylinder */
+ __le32 start_sect; /* starting sector counting from 0 */
+ __le32 nr_sects; /* nr of sectors in partition */
+} __packed;
+
+enum msdos_sys_ind {
+ /*
+ * These three have identical behaviour; use the second one if DOS FDISK
+ * gets confused about extended/logical partitions starting past
+ * cylinder 1023.
+ */
+ DOS_EXTENDED_PARTITION = 5,
+ LINUX_EXTENDED_PARTITION = 0x85,
+ WIN98_EXTENDED_PARTITION = 0x0f,
+
+ LINUX_DATA_PARTITION = 0x83,
+ LINUX_LVM_PARTITION = 0x8e,
+ LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */
+
+ SOLARIS_X86_PARTITION = 0x82, /* also Linux swap partitions */
+ NEW_SOLARIS_X86_PARTITION = 0xbf,
+
+ DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */
+ DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */
+ DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */
+ EZD_PARTITION = 0x55, /* EZ-DRIVE */
+
+ FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */
+ OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */
+ NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */
+ BSDI_PARTITION = 0xb7, /* BSDI Partition ID */
+ MINIX_PARTITION = 0x81, /* Minix Partition ID */
+ UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
+};
+
+#endif /* LINUX_MSDOS_PARTITION_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8ad679e9d9c0..fc918a658d48 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -2,13 +2,68 @@
#ifndef LINUX_MSI_H
#define LINUX_MSI_H
-#include <linux/kobject.h>
+/*
+ * This header file contains MSI data structures and functions which are
+ * only relevant for:
+ * - Interrupt core code
+ * - PCI/MSI core code
+ * - MSI interrupt domain implementations
+ * - IOMMU, low level VFIO, NTB and other justified exceptions
+ * dealing with low level MSI details.
+ *
+ * Regular device drivers have no business with any of these functions and
+ * especially storing MSI descriptor pointers in random code is considered
+ * abuse. The only function which is relevant for drivers is msi_get_virq().
+ */
+
+#include <linux/cpumask.h>
+#include <linux/xarray.h>
+#include <linux/mutex.h>
#include <linux/list.h>
+#include <asm/msi.h>
+
+/* Dummy shadow structures if an architecture does not define them */
+#ifndef arch_msi_msg_addr_lo
+typedef struct arch_msi_msg_addr_lo {
+ u32 address_lo;
+} __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
+#endif
+
+#ifndef arch_msi_msg_addr_hi
+typedef struct arch_msi_msg_addr_hi {
+ u32 address_hi;
+} __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
+#endif
+
+#ifndef arch_msi_msg_data
+typedef struct arch_msi_msg_data {
+ u32 data;
+} __attribute__ ((packed)) arch_msi_msg_data_t;
+#endif
+/**
+ * msi_msg - Representation of a MSI message
+ * @address_lo: Low 32 bits of msi message address
+ * @arch_addrlo: Architecture specific shadow of @address_lo
+ * @address_hi: High 32 bits of msi message address
+ * (only used when device supports it)
+ * @arch_addrhi: Architecture specific shadow of @address_hi
+ * @data: MSI message data (usually 16 bits)
+ * @arch_data: Architecture specific shadow of @data
+ */
struct msi_msg {
- u32 address_lo; /* low 32 bits of msi message address */
- u32 address_hi; /* high 32 bits of msi message address */
- u32 data; /* 16 bits of msi message data */
+ union {
+ u32 address_lo;
+ arch_msi_msg_addr_lo_t arch_addr_lo;
+ };
+ union {
+ u32 address_hi;
+ arch_msi_msg_addr_hi_t arch_addr_hi;
+ };
+ union {
+ u32 data;
+ arch_msi_msg_data_t arch_data;
+ };
};
extern int pci_msi_ignore_mask;
@@ -17,6 +72,8 @@ struct irq_data;
struct msi_desc;
struct pci_dev;
struct platform_msi_priv_data;
+struct device_attribute;
+
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
#ifdef CONFIG_GENERIC_MSI_IRQ
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
@@ -30,61 +87,59 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
struct msi_msg *msg);
/**
- * platform_msi_desc - Platform device specific msi descriptor data
- * @msi_priv_data: Pointer to platform private data
- * @msi_index: The index of the MSI descriptor for multi MSI
- */
-struct platform_msi_desc {
- struct platform_msi_priv_data *msi_priv_data;
- u16 msi_index;
-};
-
-/**
- * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
- * @msi_index: The index of the MSI descriptor
+ * pci_msi_desc - PCI/MSI specific MSI descriptor data
+ *
+ * @msi_mask: [PCI MSI] MSI cached mask bits
+ * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
+ * @is_msix: [PCI MSI/X] True if MSI-X
+ * @multiple: [PCI MSI/X] log2 num of messages allocated
+ * @multi_cap: [PCI MSI/X] log2 num of messages supported
+ * @can_mask: [PCI MSI/X] Masking supported?
+ * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos: [PCI MSI] Mask register position
+ * @mask_base: [PCI MSI-X] Mask register base address
*/
-struct fsl_mc_msi_desc {
- u16 msi_index;
+struct pci_msi_desc {
+ union {
+ u32 msi_mask;
+ u32 msix_ctrl;
+ };
+ struct {
+ u8 is_msix : 1;
+ u8 multiple : 3;
+ u8 multi_cap : 3;
+ u8 can_mask : 1;
+ u8 is_64 : 1;
+ u8 is_virtual : 1;
+ unsigned default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void __iomem *mask_base;
+ };
};
-/**
- * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
- * @dev_index: TISCI device index
- */
-struct ti_sci_inta_msi_desc {
- u16 dev_index;
-};
+#define MSI_MAX_INDEX ((unsigned int)USHRT_MAX)
/**
* struct msi_desc - Descriptor structure for MSI based interrupts
- * @list: List head for management
* @irq: The base interrupt number
* @nvec_used: The number of vectors used
* @dev: Pointer to the device which uses this descriptor
* @msg: The last set MSI message cached for reuse
* @affinity: Optional pointer to a cpu affinity mask for this descriptor
+ * @sysfs_attr: Pointer to sysfs device attribute
*
* @write_msi_msg: Callback that may be called when the MSI message
* address or data changes
* @write_msi_msg_data: Data parameter for the callback.
*
- * @masked: [PCI MSI/X] Mask bits
- * @is_msix: [PCI MSI/X] True if MSI-X
- * @multiple: [PCI MSI/X] log2 num of messages allocated
- * @multi_cap: [PCI MSI/X] log2 num of messages supported
- * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
- * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
- * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
- * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
- * @mask_pos: [PCI MSI] Mask register position
- * @mask_base: [PCI MSI-X] Mask register base address
- * @platform: [platform] Platform device specific msi descriptor data
- * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
- * @inta: [INTA] TISCI based INTA specific msi descriptor data
+ * @msi_index: Index of the msi descriptor
+ * @pci: PCI specific msi descriptor data
*/
struct msi_desc {
/* Shared device/bus type independent data */
- struct list_head list;
unsigned int irq;
unsigned int nvec_used;
struct device *dev;
@@ -93,52 +148,71 @@ struct msi_desc {
#ifdef CONFIG_IRQ_MSI_IOMMU
const void *iommu_cookie;
#endif
+#ifdef CONFIG_SYSFS
+ struct device_attribute *sysfs_attrs;
+#endif
void (*write_msi_msg)(struct msi_desc *entry, void *data);
void *write_msi_msg_data;
- union {
- /* PCI MSI/X specific data */
- struct {
- u32 masked;
- struct {
- u8 is_msix : 1;
- u8 multiple : 3;
- u8 multi_cap : 3;
- u8 maskbit : 1;
- u8 is_64 : 1;
- u8 is_virtual : 1;
- u16 entry_nr;
- unsigned default_irq;
- } msi_attrib;
- union {
- u8 mask_pos;
- void __iomem *mask_base;
- };
- };
-
- /*
- * Non PCI variants add their data structure here. New
- * entries need to use a named structure. We want
- * proper name spaces for this. The PCI part is
- * anonymous for now as it would require an immediate
- * tree wide cleanup.
- */
- struct platform_msi_desc platform;
- struct fsl_mc_msi_desc fsl_mc;
- struct ti_sci_inta_msi_desc inta;
- };
+ u16 msi_index;
+ struct pci_msi_desc pci;
+};
+
+/*
+ * Filter values for the MSI descriptor iterators and accessor functions.
+ */
+enum msi_desc_filter {
+ /* All descriptors */
+ MSI_DESC_ALL,
+ /* Descriptors which have no interrupt associated */
+ MSI_DESC_NOTASSOCIATED,
+ /* Descriptors which have an interrupt associated */
+ MSI_DESC_ASSOCIATED,
+};
+
+/**
+ * msi_device_data - MSI per device data
+ * @properties: MSI properties which are interesting to drivers
+ * @platform_data: Platform-MSI specific data
+ * @mutex: Mutex protecting the MSI descriptor store
+ * @__store: Xarray for storing MSI descriptor pointers
+ * @__iter_idx: Index to search the next entry for iterators
+ */
+struct msi_device_data {
+ unsigned long properties;
+ struct platform_msi_priv_data *platform_data;
+ struct mutex mutex;
+ struct xarray __store;
+ unsigned long __iter_idx;
};
-/* Helpers to hide struct msi_desc implementation details */
+int msi_setup_device_data(struct device *dev);
+
+unsigned int msi_get_virq(struct device *dev, unsigned int index);
+void msi_lock_descs(struct device *dev);
+void msi_unlock_descs(struct device *dev);
+
+struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter);
+struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter);
+
+/**
+ * msi_for_each_desc - Iterate the MSI descriptors
+ *
+ * @desc: struct msi_desc pointer used as iterator
+ * @dev: struct device pointer - device to iterate
+ * @filter: Filter for descriptor selection
+ *
+ * Notes:
+ * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
+ * pair.
+ * - It is safe to remove a retrieved MSI descriptor in the loop.
+ */
+#define msi_for_each_desc(desc, dev, filter) \
+ for ((desc) = msi_first_desc((dev), (filter)); (desc); \
+ (desc) = msi_next_desc((dev), (filter)))
+
#define msi_desc_to_dev(desc) ((desc)->dev)
-#define dev_to_msi_list(dev) (&(dev)->msi_list)
-#define first_msi_entry(dev) \
- list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
-#define for_each_msi_entry(desc, dev) \
- list_for_each_entry((desc), dev_to_msi_list((dev)), list)
-#define for_each_msi_entry_safe(desc, tmp, dev) \
- list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
#ifdef CONFIG_IRQ_MSI_IOMMU
static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
@@ -164,65 +238,64 @@ static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
#endif
#ifdef CONFIG_PCI_MSI
-#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
-#define for_each_pci_msi_entry(desc, pdev) \
- for_each_msi_entry((desc), &(pdev)->dev)
-
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
-void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
#else /* CONFIG_PCI_MSI */
-static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
-{
- return NULL;
-}
static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
{
}
#endif /* CONFIG_PCI_MSI */
-struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
- const struct irq_affinity_desc *affinity);
-void free_msi_entry(struct msi_desc *entry);
+int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc);
+void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
+ unsigned int first_index, unsigned int last_index);
+
+/**
+ * msi_free_msi_descs - Free MSI descriptors of a device
+ * @dev: Device to free the descriptors
+ */
+static inline void msi_free_msi_descs(struct device *dev)
+{
+ msi_free_msi_descs_range(dev, MSI_DESC_ALL, 0, MSI_MAX_INDEX);
+}
+
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
/*
- * The arch hooks to setup up msi irqs. Those functions are
- * implemented as weak symbols so that they /can/ be overriden by
- * architecture specific code if needed.
+ * The arch hooks to setup up msi irqs. Default functions are implemented
+ * as weak symbols so that they /can/ be overriden by architecture specific
+ * code if needed. These hooks can only be enabled by the architecture.
+ *
+ * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
+ * stubs with warnings.
*/
+#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void arch_teardown_msi_irqs(struct pci_dev *dev);
-void arch_restore_msi_irqs(struct pci_dev *dev);
-
-void default_teardown_msi_irqs(struct pci_dev *dev);
-void default_restore_msi_irqs(struct pci_dev *dev);
-
-struct msi_controller {
- struct module *owner;
- struct device *dev;
- struct device_node *of_node;
- struct list_head list;
-
- int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
- struct msi_desc *desc);
- int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
- int nvec, int type);
- void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
-};
+#ifdef CONFIG_SYSFS
+int msi_device_populate_sysfs(struct device *dev);
+void msi_device_destroy_sysfs(struct device *dev);
+#else /* CONFIG_SYSFS */
+static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
+static inline void msi_device_destroy_sysfs(struct device *dev) { }
+#endif /* !CONFIG_SYSFS */
+#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
+
+/*
+ * The restore hook is still available even for fully irq domain based
+ * setups. Courtesy to XEN/X86.
+ */
+bool arch_restore_msi_irqs(struct pci_dev *dev);
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
#include <linux/irqhandler.h>
-#include <asm/msi.h>
struct irq_domain;
struct irq_domain_ops;
@@ -238,16 +311,33 @@ struct msi_domain_info;
* @msi_free: Domain specific function to free a MSI interrupts
* @msi_check: Callback for verification of the domain/info/dev data
* @msi_prepare: Prepare the allocation of the interrupts in the domain
- * @msi_finish: Optional callback to finalize the allocation
* @set_desc: Set the msi descriptor for an interrupt
- * @handle_error: Optional error handler if the allocation fails
+ * @domain_alloc_irqs: Optional function to override the default allocation
+ * function.
+ * @domain_free_irqs: Optional function to override the default free
+ * function.
+ *
+ * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
+ * irqdomain.
+ *
+ * @msi_check, @msi_prepare and @set_desc are callbacks used by
+ * msi_domain_alloc/free_irqs().
+ *
+ * @domain_alloc_irqs, @domain_free_irqs can be used to override the
+ * default allocation/free functions (__msi_domain_alloc/free_irqs). This
+ * is initially for a wrapper around XENs seperate MSI universe which can't
+ * be wrapped into the regular irq domains concepts by mere mortals. This
+ * allows to universally use msi_domain_alloc/free_irqs without having to
+ * special case XEN all over the place.
*
- * @get_hwirq, @msi_init and @msi_free are callbacks used by
- * msi_create_irq_domain() and related interfaces
+ * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs
+ * are set to the default implementation if NULL and even when
+ * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and
+ * because these callbacks are obviously mandatory.
*
- * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
- * are callbacks used by msi_domain_alloc_irqs() and related
- * interfaces which are based on msi_desc.
+ * This is NOT meant to be abused, but it can be useful to build wrappers
+ * for specialized MSI irq domains which need extra work before and after
+ * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs().
*/
struct msi_domain_ops {
irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
@@ -265,11 +355,12 @@ struct msi_domain_ops {
int (*msi_prepare)(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *arg);
- void (*msi_finish)(msi_alloc_info_t *arg, int retval);
void (*set_desc)(msi_alloc_info_t *arg,
struct msi_desc *desc);
- int (*handle_error)(struct irq_domain *domain,
- struct msi_desc *desc, int error);
+ int (*domain_alloc_irqs)(struct irq_domain *domain,
+ struct device *dev, int nvec);
+ void (*domain_free_irqs)(struct irq_domain *domain,
+ struct device *dev);
};
/**
@@ -319,6 +410,14 @@ enum {
MSI_FLAG_MUST_REACTIVATE = (1 << 5),
/* Is level-triggered capable, using two messages */
MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
+ /* Populate sysfs on alloc() and destroy it on free() */
+ MSI_FLAG_DEV_SYSFS = (1 << 7),
+ /* MSI-X entries must be contiguous */
+ MSI_FLAG_MSIX_CONTIGUOUS = (1 << 8),
+ /* Allocate simple MSI descriptors */
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 9),
+ /* Free MSI descriptors */
+ MSI_FLAG_FREE_MSI_DESCS = (1 << 10),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
@@ -327,8 +426,14 @@ int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
+int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
+ int nvec);
+int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
+ int nvec);
int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
int nvec);
+void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
+void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev);
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
@@ -357,24 +462,20 @@ __platform_msi_create_device_domain(struct device *dev,
#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
__platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
-int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs);
-void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nvec);
+int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nvec);
void *platform_msi_get_host_data(struct irq_domain *domain);
#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
-irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
- struct msi_desc *desc);
-int pci_msi_domain_check_cap(struct irq_domain *domain,
- struct msi_domain_info *info, struct device *dev);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
+bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
#else
static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 886e30441c90..d890805f5494 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -98,7 +98,7 @@ struct nand_bbt_descr {
/*
* Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
- * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning
* in nand_chip.bbt_options.
*/
#define NAND_BBT_DYNAMICSTRUCT 0x80000000
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 3c668cb1e344..15cc9b95e32b 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -77,5 +77,16 @@ extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev);
+/**
+ * module_mtd_blktrans() - Helper macro for registering a mtd blktrans driver
+ * @__mtd_blktrans: mtd_blktrans_ops struct
+ *
+ * Helper macro for mtd blktrans drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_mtd_blktrans(__mtd_blktrans) \
+ module_driver(__mtd_blktrans, register_mtd_blktrans, \
+ deregister_mtd_blktrans)
#endif /* __MTD_TRANS_H__ */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index c98a21108688..d88bb56c18e2 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -138,7 +138,7 @@ struct cfi_ident {
uint16_t InterfaceDesc;
uint16_t MaxBufWriteSize;
uint8_t NumEraseRegions;
- uint32_t EraseRegionInfo[0]; /* Not host ordered */
+ uint32_t EraseRegionInfo[]; /* Not host ordered */
} __packed;
/* Extended Query Structure for both PRI and ALT */
@@ -165,7 +165,7 @@ struct cfi_pri_intelext {
uint16_t ProtRegAddr;
uint8_t FactProtRegSize;
uint8_t UserProtRegSize;
- uint8_t extra[0];
+ uint8_t extra[];
} __packed;
struct cfi_intelext_otpinfo {
@@ -286,7 +286,8 @@ struct cfi_private {
map_word sector_erase_cmd;
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
- struct flchip chips[0]; /* per-chip data structure for each chip */
+ unsigned long quirks;
+ struct flchip chips[]; /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h
index 2dfe65964f6e..bb6b7121a542 100644
--- a/include/linux/mtd/hyperbus.h
+++ b/include/linux/mtd/hyperbus.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MTD_HYPERBUS_H__
@@ -8,6 +8,17 @@
#include <linux/mtd/map.h>
+/* HyperBus command bits */
+#define HYPERBUS_RW 0x80 /* R/W# */
+#define HYPERBUS_RW_WRITE 0
+#define HYPERBUS_RW_READ 0x80
+#define HYPERBUS_AS 0x40 /* Address Space */
+#define HYPERBUS_AS_MEM 0
+#define HYPERBUS_AS_REG 0x40
+#define HYPERBUS_BT 0x20 /* Burst Type */
+#define HYPERBUS_BT_WRAPPED 0
+#define HYPERBUS_BT_LINEAR 0x20
+
enum hyperbus_memtype {
HYPERFLASH,
HYPERRAM,
@@ -20,6 +31,7 @@ enum hyperbus_memtype {
* @mtd: pointer to MTD struct
* @ctlr: pointer to HyperBus controller struct
* @memtype: type of memory device: HyperFlash or HyperRAM
+ * @priv: pointer to controller specific per device private data
*/
struct hyperbus_device {
@@ -28,6 +40,7 @@ struct hyperbus_device {
struct mtd_info *mtd;
struct hyperbus_ctlr *ctlr;
enum hyperbus_memtype memtype;
+ void *priv;
};
/**
@@ -76,9 +89,7 @@ int hyperbus_register_device(struct hyperbus_device *hbdev);
/**
* hyperbus_unregister_device - deregister HyperBus slave memory device
* @hbdev: hyperbus_device to be unregistered
- *
- * Return: 0 for success, others for failure.
*/
-int hyperbus_unregister_device(struct hyperbus_device *hbdev);
+void hyperbus_unregister_device(struct hyperbus_device *hbdev);
#endif /* __LINUX_MTD_HYPERBUS_H__ */
diff --git a/include/linux/mtd/latch-addr-flash.h b/include/linux/mtd/latch-addr-flash.h
deleted file mode 100644
index e94b8e128074..000000000000
--- a/include/linux/mtd/latch-addr-flash.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Interface for NOR flash driver whose high address lines are latched
- *
- * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef __LATCH_ADDR_FLASH__
-#define __LATCH_ADDR_FLASH__
-
-struct map_info;
-struct mtd_partition;
-
-struct latch_addr_flash_data {
- unsigned int width;
- unsigned int size;
-
- int (*init)(void *data, int cs);
- void (*done)(void *data);
- void (*set_window)(unsigned long offset, void *data);
- void *data;
-
- unsigned int nr_parts;
- struct mtd_partition *parts;
-};
-
-#endif
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 249e8d9bfbcd..7c58c44662b8 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/uio.h>
+#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/of.h>
@@ -39,6 +40,12 @@ struct mtd_erase_region_info {
unsigned long *lockmap; /* If keeping bitmap of locks */
};
+struct mtd_req_stats {
+ unsigned int uncorrectable_errors;
+ unsigned int corrected_bitflips;
+ unsigned int max_bitflips;
+};
+
/**
* struct mtd_oob_ops - oob operation operands
* @mode: operation mode
@@ -69,10 +76,9 @@ struct mtd_oob_ops {
uint32_t ooboffs;
uint8_t *datbuf;
uint8_t *oobbuf;
+ struct mtd_req_stats *stats;
};
-#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
/**
* struct mtd_oob_region - oob region definition
* @offset: region offset
@@ -189,15 +195,49 @@ struct module; /* only needed for owner field in mtd_info */
*/
struct mtd_debug_info {
struct dentry *dfs_dir;
+};
+
+/**
+ * struct mtd_part - MTD partition specific fields
+ *
+ * @node: list node used to add an MTD partition to the parent partition list
+ * @offset: offset of the partition relatively to the parent offset
+ * @size: partition size. Should be equal to mtd->size unless
+ * MTD_SLC_ON_MLC_EMULATION is set
+ * @flags: original flags (before the mtdpart logic decided to tweak them based
+ * on flash constraints, like eraseblock/pagesize alignment)
+ *
+ * This struct is embedded in mtd_info and contains partition-specific
+ * properties/fields.
+ */
+struct mtd_part {
+ struct list_head node;
+ u64 offset;
+ u64 size;
+ u32 flags;
+};
- const char *partname;
- const char *partid;
+/**
+ * struct mtd_master - MTD master specific fields
+ *
+ * @partitions_lock: lock protecting accesses to the partition list. Protects
+ * not only the master partition list, but also all
+ * sub-partitions.
+ * @suspended: et to 1 when the device is suspended, 0 otherwise
+ *
+ * This struct is embedded in mtd_info and contains master-specific
+ * properties/fields. The master is the root MTD device from the MTD partition
+ * point of view.
+ */
+struct mtd_master {
+ struct mutex partitions_lock;
+ struct mutex chrdev_lock;
+ unsigned int suspended : 1;
};
struct mtd_info {
u_char type;
uint32_t flags;
- uint32_t orig_flags; /* Flags as before running mtd checks */
uint64_t size; // Total size of the MTD
/* "Major" erase size for the device. Naïve users may take this
@@ -296,9 +336,12 @@ struct mtd_info {
int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf);
int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
- size_t len, size_t *retlen, u_char *buf);
+ size_t len, size_t *retlen,
+ const u_char *buf);
int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
size_t len);
+ int (*_erase_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len);
int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
void (*_sync) (struct mtd_info *mtd);
@@ -339,8 +382,52 @@ struct mtd_info {
int usecount;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
+ struct nvmem_device *otp_user_nvmem;
+ struct nvmem_device *otp_factory_nvmem;
+
+ /*
+ * Parent device from the MTD partition point of view.
+ *
+ * MTD masters do not have any parent, MTD partitions do. The parent
+ * MTD device can itself be a partition.
+ */
+ struct mtd_info *parent;
+
+ /* List of partitions attached to this MTD device */
+ struct list_head partitions;
+
+ struct mtd_part part;
+ struct mtd_master master;
};
+static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
+{
+ while (mtd->parent)
+ mtd = mtd->parent;
+
+ return mtd;
+}
+
+static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs)
+{
+ while (mtd->parent) {
+ ofs += mtd->part.offset;
+ mtd = mtd->parent;
+ }
+
+ return ofs;
+}
+
+static inline bool mtd_is_partition(const struct mtd_info *mtd)
+{
+ return mtd->parent;
+}
+
+static inline bool mtd_has_partitions(const struct mtd_info *mtd)
+{
+ return !list_empty(&mtd->partitions);
+}
+
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc);
int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
@@ -392,13 +479,16 @@ static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
loff_t ofs, size_t len)
{
- if (!mtd->_max_bad_blocks)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_max_bad_blocks)
return -ENOTSUPP;
if (mtd->size < (len + ofs) || ofs < 0)
return -EINVAL;
- return mtd->_max_bad_blocks(mtd, ofs, len);
+ return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
+ len);
}
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
@@ -431,16 +521,19 @@ int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, u_char *buf);
+ size_t *retlen, const u_char *buf);
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
+int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
static inline void mtd_sync(struct mtd_info *mtd)
{
- if (mtd->_sync)
- mtd->_sync(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (master->_sync)
+ master->_sync(master);
}
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
@@ -452,13 +545,31 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
static inline int mtd_suspend(struct mtd_info *mtd)
{
- return mtd->_suspend ? mtd->_suspend(mtd) : 0;
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ if (master->master.suspended)
+ return 0;
+
+ ret = master->_suspend ? master->_suspend(master) : 0;
+ if (ret)
+ return ret;
+
+ master->master.suspended = 1;
+ return 0;
}
static inline void mtd_resume(struct mtd_info *mtd)
{
- if (mtd->_resume)
- mtd->_resume(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->master.suspended)
+ return;
+
+ if (master->_resume)
+ master->_resume(master);
+
+ master->master.suspended = 0;
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -521,7 +632,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
{
- return mtd->erasesize / mtd->writesize;
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ return master->erasesize / mtd->writesize;
}
static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
@@ -538,7 +651,9 @@ static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
static inline int mtd_has_oob(const struct mtd_info *mtd)
{
- return mtd->_read_oob && mtd->_write_oob;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return master->_read_oob && master->_write_oob;
}
static inline int mtd_type_is_nand(const struct mtd_info *mtd)
@@ -548,7 +663,9 @@ static inline int mtd_type_is_nand(const struct mtd_info *mtd)
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
{
- return !!mtd->_block_isbad;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return !!master->_block_isbad;
}
/* Kernel-side ioctl definitions */
@@ -567,6 +684,7 @@ extern int mtd_device_unregister(struct mtd_info *master);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
extern int __get_mtd_device(struct mtd_info *mtd);
extern void __put_mtd_device(struct mtd_info *mtd);
+extern struct mtd_info *of_get_mtd_device_by_node(struct device_node *np);
extern struct mtd_info *get_mtd_device_nm(const char *name);
extern void put_mtd_device(struct mtd_info *mtd);
@@ -596,4 +714,11 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
+#ifdef CONFIG_DEBUG_FS
+bool mtd_check_expert_analysis_mode(void);
+#else
+static inline bool mtd_check_expert_analysis_mode(void) { return false; }
+#endif
+
+
#endif /* __MTD_MTD_H__ */
diff --git a/drivers/mtd/nand/raw/mtk_ecc.h b/include/linux/mtd/nand-ecc-mtk.h
index 0e48c36e6ca0..0e48c36e6ca0 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.h
+++ b/include/linux/mtd/nand-ecc-mtk.h
diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h
new file mode 100644
index 000000000000..b125926e458c
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-mxic.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2019 Macronix
+ * Author: Miquèl Raynal <miquel.raynal@bootlin.com>
+ *
+ * Header for the Macronix external ECC engine.
+ */
+
+#ifndef __MTD_NAND_ECC_MXIC_H__
+#define __MTD_NAND_ECC_MXIC_H__
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+struct mxic_ecc_engine;
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+
+struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void);
+struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev);
+void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng);
+int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction, dma_addr_t dirmap);
+
+#else /* !CONFIG_MTD_NAND_ECC_MXIC */
+
+static inline struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
+{
+ return NULL;
+}
+
+static inline struct nand_ecc_engine *
+mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng) {}
+
+static inline int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction,
+ dma_addr_t dirmap)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_MTD_NAND_ECC_MXIC */
+
+#endif /* __MTD_NAND_ECC_MXIC_H__ */
diff --git a/include/linux/mtd/nand-ecc-sw-bch.h b/include/linux/mtd/nand-ecc-sw-bch.h
new file mode 100644
index 000000000000..9da9969505a8
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-sw-bch.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This file is the header for the NAND BCH ECC implementation.
+ */
+
+#ifndef __MTD_NAND_ECC_SW_BCH_H__
+#define __MTD_NAND_ECC_SW_BCH_H__
+
+#include <linux/mtd/nand.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_ecc_sw_bch_conf - private software BCH ECC engine structure
+ * @req_ctx: Save request context and tweak the original request to fit the
+ * engine needs
+ * @code_size: Number of bytes needed to store a code (one code per step)
+ * @calc_buf: Buffer to use when calculating ECC bytes
+ * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip
+ * @bch: BCH control structure
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_ecc_sw_bch_conf {
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ unsigned int code_size;
+ u8 *calc_buf;
+ u8 *code_buf;
+ struct bch_control *bch;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
+
+int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf, unsigned char *code);
+int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc);
+int nand_ecc_sw_bch_init_ctx(struct nand_device *nand);
+void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
+
+#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */
+
+static inline int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_bch_correct(struct nand_device *nand,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_bch_init_ctx(struct nand_device *nand)
+{
+ return -ENOTSUPP;
+}
+
+static inline void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand) {}
+
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
+
+#endif /* __MTD_NAND_ECC_SW_BCH_H__ */
diff --git a/include/linux/mtd/nand-ecc-sw-hamming.h b/include/linux/mtd/nand-ecc-sw-hamming.h
new file mode 100644
index 000000000000..c6c71894c575
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-sw-hamming.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
+ * David Woodhouse <dwmw2@infradead.org>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This file is the header for the NAND Hamming ECC implementation.
+ */
+
+#ifndef __MTD_NAND_ECC_SW_HAMMING_H__
+#define __MTD_NAND_ECC_SW_HAMMING_H__
+
+#include <linux/mtd/nand.h>
+
+/**
+ * struct nand_ecc_sw_hamming_conf - private software Hamming ECC engine structure
+ * @req_ctx: Save request context and tweak the original request to fit the
+ * engine needs
+ * @code_size: Number of bytes needed to store a code (one code per step)
+ * @calc_buf: Buffer to use when calculating ECC bytes
+ * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip
+ * @sm_order: Smart Media special ordering
+ */
+struct nand_ecc_sw_hamming_conf {
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ unsigned int code_size;
+ u8 *calc_buf;
+ u8 *code_buf;
+ unsigned int sm_order;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
+
+int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand);
+void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand);
+int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size,
+ unsigned char *code, bool sm_order);
+int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code);
+int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc,
+ unsigned char *calc_ecc, unsigned int step_size,
+ bool sm_order);
+int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc);
+
+#else /* !CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+static inline int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand)
+{
+ return -ENOTSUPP;
+}
+
+static inline void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand) {}
+
+static inline int ecc_sw_hamming_calculate(const unsigned char *buf,
+ unsigned int step_size,
+ unsigned char *code, bool sm_order)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ return -ENOTSUPP;
+}
+
+static inline int ecc_sw_hamming_correct(unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc,
+ unsigned int step_size, bool sm_order)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_hamming_correct(struct nand_device *nand,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return -ENOTSUPP;
+}
+
+#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+#endif /* __MTD_NAND_ECC_SW_HAMMING_H__ */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 0c7483843a32..c3693bb87b4c 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -12,6 +12,8 @@
#include <linux/mtd/mtd.h>
+struct nand_device;
+
/**
* struct nand_memory_organization - Memory organization structure
* @bits_per_cell: number of bits per NAND cell
@@ -81,7 +83,18 @@ struct nand_pos {
};
/**
+ * enum nand_page_io_req_type - Direction of an I/O request
+ * @NAND_PAGE_READ: from the chip, to the controller
+ * @NAND_PAGE_WRITE: from the controller, to the chip
+ */
+enum nand_page_io_req_type {
+ NAND_PAGE_READ = 0,
+ NAND_PAGE_WRITE,
+};
+
+/**
* struct nand_page_io_req - NAND I/O request object
+ * @type: the type of page I/O: read or write
* @pos: the position this I/O request is targeting
* @dataoffs: the offset within the page
* @datalen: number of data bytes to read from/write to this page
@@ -97,6 +110,7 @@ struct nand_pos {
* specific commands/operations.
*/
struct nand_page_io_req {
+ enum nand_page_io_req_type type;
struct nand_pos pos;
unsigned int dataoffs;
unsigned int datalen;
@@ -113,18 +127,77 @@ struct nand_page_io_req {
int mode;
};
+const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
+const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
+const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
+
+/**
+ * enum nand_ecc_engine_type - NAND ECC engine type
+ * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value
+ * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
+ * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
+ * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
+ * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
+ */
+enum nand_ecc_engine_type {
+ NAND_ECC_ENGINE_TYPE_INVALID,
+ NAND_ECC_ENGINE_TYPE_NONE,
+ NAND_ECC_ENGINE_TYPE_SOFT,
+ NAND_ECC_ENGINE_TYPE_ON_HOST,
+ NAND_ECC_ENGINE_TYPE_ON_DIE,
+};
+
+/**
+ * enum nand_ecc_placement - NAND ECC bytes placement
+ * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
+ * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
+ * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
+ * interleaved with regular data in the main
+ * area
+ */
+enum nand_ecc_placement {
+ NAND_ECC_PLACEMENT_UNKNOWN,
+ NAND_ECC_PLACEMENT_OOB,
+ NAND_ECC_PLACEMENT_INTERLEAVED,
+};
+
+/**
+ * enum nand_ecc_algo - NAND ECC algorithm
+ * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm
+ * @NAND_ECC_ALGO_HAMMING: Hamming algorithm
+ * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
+ * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
+ */
+enum nand_ecc_algo {
+ NAND_ECC_ALGO_UNKNOWN,
+ NAND_ECC_ALGO_HAMMING,
+ NAND_ECC_ALGO_BCH,
+ NAND_ECC_ALGO_RS,
+};
+
/**
- * struct nand_ecc_req - NAND ECC requirements
+ * struct nand_ecc_props - NAND ECC properties
+ * @engine_type: ECC engine type
+ * @placement: OOB placement (if relevant)
+ * @algo: ECC algorithm (if relevant)
* @strength: ECC strength
- * @step_size: ECC step/block size
+ * @step_size: Number of bytes per step
+ * @flags: Misc properties
*/
-struct nand_ecc_req {
+struct nand_ecc_props {
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement placement;
+ enum nand_ecc_algo algo;
unsigned int strength;
unsigned int step_size;
+ unsigned int flags;
};
#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
+/* NAND ECC misc flags */
+#define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
+
/**
* struct nand_bbt - bad block table object
* @cache: in memory BBT cache
@@ -133,8 +206,6 @@ struct nand_bbt {
unsigned long *cache;
};
-struct nand_device;
-
/**
* struct nand_ops - NAND operations
* @erase: erase a specific block. No need to check if the block is bad before
@@ -158,10 +229,177 @@ struct nand_ops {
};
/**
+ * struct nand_ecc_context - Context for the ECC engine
+ * @conf: basic ECC engine parameters
+ * @nsteps: number of ECC steps
+ * @total: total number of bytes used for storing ECC codes, this is used by
+ * generic OOB layouts
+ * @priv: ECC engine driver private data
+ */
+struct nand_ecc_context {
+ struct nand_ecc_props conf;
+ unsigned int nsteps;
+ unsigned int total;
+ void *priv;
+};
+
+/**
+ * struct nand_ecc_engine_ops - ECC engine operations
+ * @init_ctx: given a desired user configuration for the pointed NAND device,
+ * requests the ECC engine driver to setup a configuration with
+ * values it supports.
+ * @cleanup_ctx: clean the context initialized by @init_ctx.
+ * @prepare_io_req: is called before reading/writing a page to prepare the I/O
+ * request to be performed with ECC correction.
+ * @finish_io_req: is called after reading/writing a page to terminate the I/O
+ * request and ensure proper ECC correction.
+ */
+struct nand_ecc_engine_ops {
+ int (*init_ctx)(struct nand_device *nand);
+ void (*cleanup_ctx)(struct nand_device *nand);
+ int (*prepare_io_req)(struct nand_device *nand,
+ struct nand_page_io_req *req);
+ int (*finish_io_req)(struct nand_device *nand,
+ struct nand_page_io_req *req);
+};
+
+/**
+ * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated
+ * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value
+ * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly
+ * correction, does not need to copy
+ * data around
+ * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the
+ * data into its own area before use
+ */
+enum nand_ecc_engine_integration {
+ NAND_ECC_ENGINE_INTEGRATION_INVALID,
+ NAND_ECC_ENGINE_INTEGRATION_PIPELINED,
+ NAND_ECC_ENGINE_INTEGRATION_EXTERNAL,
+};
+
+/**
+ * struct nand_ecc_engine - ECC engine abstraction for NAND devices
+ * @dev: Host device
+ * @node: Private field for registration time
+ * @ops: ECC engine operations
+ * @integration: How the engine is integrated with the host
+ * (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines)
+ * @priv: Private data
+ */
+struct nand_ecc_engine {
+ struct device *dev;
+ struct list_head node;
+ struct nand_ecc_engine_ops *ops;
+ enum nand_ecc_engine_integration integration;
+ void *priv;
+};
+
+void of_get_nand_ecc_user_config(struct nand_device *nand);
+int nand_ecc_init_ctx(struct nand_device *nand);
+void nand_ecc_cleanup_ctx(struct nand_device *nand);
+int nand_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req);
+int nand_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req);
+bool nand_ecc_is_strong_enough(struct nand_device *nand);
+
+#if IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine);
+int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine);
+#else
+static inline int
+nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+static inline int
+nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand);
+void nand_ecc_put_on_host_hw_engine(struct nand_device *nand);
+struct device *nand_ecc_get_engine_dev(struct device *host);
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
+struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
+#else
+static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
+#else
+static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
+
+/**
+ * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests
+ * @orig_req: Pointer to the original IO request
+ * @nand: Related NAND device, to have access to its memory organization
+ * @page_buffer_size: Real size of the page buffer to use (can be set by the
+ * user before the tweaking mechanism initialization)
+ * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the
+ * user before the tweaking mechanism initialization)
+ * @spare_databuf: Data bounce buffer
+ * @spare_oobbuf: OOB bounce buffer
+ * @bounce_data: Flag indicating a data bounce buffer is used
+ * @bounce_oob: Flag indicating an OOB bounce buffer is used
+ */
+struct nand_ecc_req_tweak_ctx {
+ struct nand_page_io_req orig_req;
+ struct nand_device *nand;
+ unsigned int page_buffer_size;
+ unsigned int oob_buffer_size;
+ void *spare_databuf;
+ void *spare_oobbuf;
+ bool bounce_data;
+ bool bounce_oob;
+};
+
+int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_device *nand);
+void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
+void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req);
+void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req);
+
+/**
+ * struct nand_ecc - Information relative to the ECC
+ * @defaults: Default values, depend on the underlying subsystem
+ * @requirements: ECC requirements from the NAND chip perspective
+ * @user_conf: User desires in terms of ECC parameters
+ * @ctx: ECC context for the ECC engine, derived from the device @requirements
+ * the @user_conf and the @defaults
+ * @ondie_engine: On-die ECC engine reference, if any
+ * @engine: ECC engine actually bound
+ */
+struct nand_ecc {
+ struct nand_ecc_props defaults;
+ struct nand_ecc_props requirements;
+ struct nand_ecc_props user_conf;
+ struct nand_ecc_context ctx;
+ struct nand_ecc_engine *ondie_engine;
+ struct nand_ecc_engine *engine;
+};
+
+/**
* struct nand_device - NAND device
* @mtd: MTD instance attached to the NAND device
* @memorg: memory layout
- * @eccreq: ECC requirements
+ * @ecc: NAND ECC object attached to the NAND device
* @rowconv: position to row address converter
* @bbt: bad block table info
* @ops: NAND operations attached to the NAND device
@@ -169,8 +407,8 @@ struct nand_ops {
* Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
* should declare their own NAND object embedding a nand_device struct (that's
* how inheritance is done).
- * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
- * at device detection time to reflect the NAND device
+ * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
+ * be filled at device detection time to reflect the NAND device
* capabilities/requirements. Once this is done nanddev_init() can be called.
* It will take care of converting NAND information into MTD ones, which means
* the specialized NAND layers should never manually tweak
@@ -179,7 +417,7 @@ struct nand_ops {
struct nand_device {
struct mtd_info mtd;
struct nand_memory_organization memorg;
- struct nand_ecc_req eccreq;
+ struct nand_ecc ecc;
struct nand_row_converter rowconv;
struct nand_bbt bbt;
const struct nand_ops *ops;
@@ -383,6 +621,60 @@ nanddev_get_memorg(struct nand_device *nand)
return &nand->memorg;
}
+/**
+ * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
+ * @nand: NAND device
+ */
+static inline const struct nand_ecc_props *
+nanddev_get_ecc_conf(struct nand_device *nand)
+{
+ return &nand->ecc.ctx.conf;
+}
+
+/**
+ * nanddev_get_ecc_nsteps() - Extract the number of ECC steps
+ * @nand: NAND device
+ */
+static inline unsigned int
+nanddev_get_ecc_nsteps(struct nand_device *nand)
+{
+ return nand->ecc.ctx.nsteps;
+}
+
+/**
+ * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step
+ * @nand: NAND device
+ */
+static inline unsigned int
+nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
+{
+ return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
+}
+
+/**
+ * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
+ * device
+ * @nand: NAND device
+ */
+static inline const struct nand_ecc_props *
+nanddev_get_ecc_requirements(struct nand_device *nand)
+{
+ return &nand->ecc.requirements;
+}
+
+/**
+ * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
+ * device
+ * @nand: NAND device
+ * @reqs: Requirements
+ */
+static inline void
+nanddev_set_ecc_requirements(struct nand_device *nand,
+ const struct nand_ecc_props *reqs)
+{
+ nand->ecc.requirements = *reqs;
+}
+
int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
struct module *owner);
void nanddev_cleanup(struct nand_device *nand);
@@ -624,11 +916,13 @@ static inline void nanddev_pos_next_page(struct nand_device *nand,
* layer.
*/
static inline void nanddev_io_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
loff_t offs, struct mtd_oob_ops *req,
struct nand_io_iter *iter)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
+ iter->req.type = reqtype;
iter->req.mode = req->mode;
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
iter->req.ooboffs = req->ooboffs;
@@ -698,8 +992,8 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
*
* Should be used for iterate over pages that are contained in an MTD request.
*/
-#define nanddev_io_for_each_page(nand, start, req, iter) \
- for (nanddev_io_iter_init(nand, start, req, iter); \
+#define nanddev_io_for_each_page(nand, type, start, req, iter) \
+ for (nanddev_io_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
@@ -708,6 +1002,15 @@ bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
+/* ECC related functions */
+int nanddev_ecc_engine_init(struct nand_device *nand);
+void nanddev_ecc_engine_cleanup(struct nand_device *nand);
+
+static inline void *nand_to_ecc_ctx(struct nand_device *nand)
+{
+ return nand->ecc.ctx.priv;
+}
+
/* BBT related functions */
enum nand_bbt_block_status {
NAND_BBT_BLOCK_STATUS_UNKNOWN,
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
deleted file mode 100644
index d5956cc48ba9..000000000000
--- a/include/linux/mtd/nand_bch.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
- *
- * This file is the header for the NAND BCH ECC implementation.
- */
-
-#ifndef __MTD_NAND_BCH_H__
-#define __MTD_NAND_BCH_H__
-
-struct mtd_info;
-struct nand_chip;
-struct nand_bch_control;
-
-#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
-
-static inline int mtd_nand_has_bch(void) { return 1; }
-
-/*
- * Calculate BCH ecc code
- */
-int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
- u_char *ecc_code);
-
-/*
- * Detect and correct bit errors
- */
-int nand_bch_correct_data(struct nand_chip *chip, u_char *dat,
- u_char *read_ecc, u_char *calc_ecc);
-/*
- * Initialize BCH encoder/decoder
- */
-struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
-/*
- * Release BCH encoder/decoder resources
- */
-void nand_bch_free(struct nand_bch_control *nbc);
-
-#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */
-
-static inline int mtd_nand_has_bch(void) { return 0; }
-
-static inline int
-nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
- u_char *ecc_code)
-{
- return -1;
-}
-
-static inline int
-nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc)
-{
- return -ENOTSUPP;
-}
-
-static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
-{
- return NULL;
-}
-
-static inline void nand_bch_free(struct nand_bch_control *nbc) {}
-
-#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
-
-#endif /* __MTD_NAND_BCH_H__ */
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
deleted file mode 100644
index d423916b94f0..000000000000
--- a/include/linux/mtd/nand_ecc.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
- * David Woodhouse <dwmw2@infradead.org>
- * Thomas Gleixner <tglx@linutronix.de>
- *
- * This file is the header for the ECC algorithm.
- */
-
-#ifndef __MTD_NAND_ECC_H__
-#define __MTD_NAND_ECC_H__
-
-struct nand_chip;
-
-/*
- * Calculate 3 byte ECC code for eccsize byte block
- */
-void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
- u_char *ecc_code, bool sm_order);
-
-/*
- * Calculate 3 byte ECC code for 256/512 byte block
- */
-int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
- u_char *ecc_code);
-
-/*
- * Detect and correct a 1 bit error for eccsize byte block
- */
-int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
- unsigned int eccsize, bool sm_order);
-
-/*
- * Detect and correct a 1 bit error for 256/512 byte block
- */
-int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc,
- u_char *calc_ecc);
-
-#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index 339ac798568e..a7376f9beddf 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -11,6 +11,7 @@
#define __LINUX_MTD_ONFI_H
#include <linux/types.h>
+#include <linux/bitfield.h>
/* ONFI version bits */
#define ONFI_VERSION_1_0 BIT(1)
@@ -24,17 +25,22 @@
#define ONFI_VERSION_4_0 BIT(9)
/* ONFI features */
-#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
-#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
+#define ONFI_FEATURE_16_BIT_BUS BIT(0)
+#define ONFI_FEATURE_NV_DDR BIT(5)
+#define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7)
/* ONFI timing mode, used in both asynchronous and synchronous mode */
-#define ONFI_TIMING_MODE_0 (1 << 0)
-#define ONFI_TIMING_MODE_1 (1 << 1)
-#define ONFI_TIMING_MODE_2 (1 << 2)
-#define ONFI_TIMING_MODE_3 (1 << 3)
-#define ONFI_TIMING_MODE_4 (1 << 4)
-#define ONFI_TIMING_MODE_5 (1 << 5)
-#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
+#define ONFI_DATA_INTERFACE_SDR 0
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define ONFI_DATA_INTERFACE_NVDDR2 BIT(5)
+#define ONFI_TIMING_MODE_0 BIT(0)
+#define ONFI_TIMING_MODE_1 BIT(1)
+#define ONFI_TIMING_MODE_2 BIT(2)
+#define ONFI_TIMING_MODE_3 BIT(3)
+#define ONFI_TIMING_MODE_4 BIT(4)
+#define ONFI_TIMING_MODE_5 BIT(5)
+#define ONFI_TIMING_MODE_UNKNOWN BIT(6)
+#define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x))
/* ONFI feature number/address */
#define ONFI_FEATURE_NUMBER 256
@@ -49,7 +55,7 @@
#define ONFI_SUBFEATURE_PARAM_LEN 4
/* ONFI optional commands SET/GET FEATURES supported? */
-#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
+#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
struct nand_onfi_params {
/* rev info and features block */
@@ -93,14 +99,15 @@ struct nand_onfi_params {
/* electrical parameter block */
u8 io_pin_capacitance_max;
- __le16 async_timing_mode;
+ __le16 sdr_timing_modes;
__le16 program_cache_timing_mode;
__le16 t_prog;
__le16 t_bers;
__le16 t_r;
__le16 t_ccs;
- __le16 src_sync_timing_mode;
- u8 src_ssync_features;
+ u8 nvddr_timing_modes;
+ u8 nvddr2_timing_modes;
+ u8 nvddr_nvddr2_features;
__le16 clk_pin_capacitance_typ;
__le16 io_pin_capacitance_typ;
__le16 input_pin_capacitance_typ;
@@ -160,7 +167,9 @@ struct onfi_ext_param_page {
* @tBERS: Block erase time
* @tR: Page read time
* @tCCS: Change column setup time
- * @async_timing_mode: Supported asynchronous timing mode
+ * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only)
+ * @sdr_timing_modes: Supported asynchronous/SDR timing modes
+ * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes
* @vendor_revision: Vendor specific revision number
* @vendor: Vendor specific data
*/
@@ -170,7 +179,9 @@ struct onfi_params {
u16 tBERS;
u16 tR;
u16 tCCS;
- u16 async_timing_mode;
+ bool fast_tCAD;
+ u16 sdr_timing_modes;
+ u16 nvddr_timing_modes;
u16 vendor_revision;
u8 vendor[88];
};
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 11cb0c50cd84..b74a539ec581 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -37,6 +37,7 @@
* master MTD flag set for the corresponding MTD partition.
* For example, to force a read-only partition, simply adding
* MTD_WRITEABLE to the mask_flags will do the trick.
+ * add_flags: contains flags to add to the parent flags
*
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
@@ -48,6 +49,7 @@ struct mtd_partition {
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ uint32_t add_flags; /* flags to add to the partition */
struct device_node *of_node;
};
@@ -105,7 +107,6 @@ extern void deregister_mtd_parser(struct mtd_part_parser *parser);
module_driver(__mtd_part_parser, register_mtd_parser, \
deregister_mtd_parser)
-int mtd_is_partition(const struct mtd_info *mtd);
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
index 122f3439e1af..146413d4bdb7 100644
--- a/include/linux/mtd/pfow.h
+++ b/include/linux/mtd/pfow.h
@@ -19,7 +19,7 @@
/* Identification info for LPDDR chip */
#define PFOW_MANUFACTURER_ID 0x0020
#define PFOW_DEVICE_ID 0x0022
-/* Address in PFOW where prog buffer can can be found */
+/* Address in PFOW where prog buffer can be found */
#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040
/* Size of program buffer in words */
#define PFOW_PROGRAM_BUFFER_SIZE 0x0042
@@ -121,37 +121,4 @@ static inline void send_pfow_command(struct map_info *map,
map_write(map, CMD(LPDDR_START_EXECUTION),
map->pfow_base + PFOW_COMMAND_EXECUTE);
}
-
-static inline void print_drs_error(unsigned dsr)
-{
- int prog_status = (dsr & DSR_RPS) >> 8;
-
- if (!(dsr & DSR_AVAILABLE))
- printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
- if (prog_status & 0x03)
- printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
- "half with 41h command\n");
- else if (prog_status & 0x02)
- printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt "
- "in region with Control Mode data\n");
- else if (prog_status & 0x01)
- printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region "
- "with Object Mode data\n");
- if (!(dsr & DSR_READY_STATUS))
- printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n");
- if (dsr & DSR_ESS)
- printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n");
- if (dsr & DSR_ERASE_STATUS)
- printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n");
- if (dsr & DSR_PROGRAM_STATUS)
- printk(KERN_NOTICE"DSR.4: (1) Program Error\n");
- if (dsr & DSR_VPPS)
- printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation "
- "aborted\n");
- if (dsr & DSR_PSS)
- printk(KERN_NOTICE"DSR.2: (1) Program suspended\n");
- if (dsr & DSR_DPS)
- printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt "
- "on locked block\n");
-}
#endif /* __LINUX_MTD_PFOW_H */
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index df5b9fddea16..2e3f43788d48 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -24,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[0];
+ struct flchip chips[];
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 4ab9bccfcde0..dcf90144d70b 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -14,16 +14,17 @@
#define __LINUX_MTD_RAWNAND_H
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
#include <linux/mtd/jedec.h>
-#include <linux/mtd/nand.h>
#include <linux/mtd/onfi.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/types.h>
struct nand_chip;
+struct gpio_desc;
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -81,25 +82,6 @@ struct nand_chip;
#define NAND_DATA_IFACE_CHECK_ONLY -1
/*
- * Constants for ECC_MODES
- */
-typedef enum {
- NAND_ECC_NONE,
- NAND_ECC_SOFT,
- NAND_ECC_HW,
- NAND_ECC_HW_SYNDROME,
- NAND_ECC_HW_OOB_FIRST,
- NAND_ECC_ON_DIE,
-} nand_ecc_modes_t;
-
-enum nand_ecc_algo {
- NAND_ECC_UNKNOWN,
- NAND_ECC_HAMMING,
- NAND_ECC_BCH,
- NAND_ECC_RS,
-};
-
-/*
* Constants for Hardware ECC
*/
/* Reset Hardware ECC for read */
@@ -116,7 +98,14 @@ enum nand_ecc_algo {
* pages and you want to rely on the default implementation.
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
-#define NAND_ECC_MAXIMIZE BIT(1)
+
+/*
+ * Option constants for bizarre disfunctionality and real
+ * features.
+ */
+
+/* Buswidth is 16 bit */
+#define NAND_BUSWIDTH_16 BIT(1)
/*
* When using software implementation of Hamming, we can specify which byte
@@ -124,80 +113,60 @@ enum nand_ecc_algo {
*/
#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
-/*
- * Option constants for bizarre disfunctionality and real
- * features.
- */
-/* Buswidth is 16 bit */
-#define NAND_BUSWIDTH_16 0x00000002
/* Chip has cache program function */
-#define NAND_CACHEPRG 0x00000008
+#define NAND_CACHEPRG BIT(3)
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
/*
* Chip requires ready check on read (for auto-incremented sequential read).
* True only for small page devices; large page devices do not support
* autoincrement.
*/
-#define NAND_NEED_READRDY 0x00000100
+#define NAND_NEED_READRDY BIT(8)
/* Chip does not allow subpage writes */
-#define NAND_NO_SUBPAGE_WRITE 0x00000200
+#define NAND_NO_SUBPAGE_WRITE BIT(9)
/* Device is one of 'new' xD cards that expose fake nand command set */
-#define NAND_BROKEN_XD 0x00000400
+#define NAND_BROKEN_XD BIT(10)
/* Device behaves just like nand, but is readonly */
-#define NAND_ROM 0x00000800
+#define NAND_ROM BIT(11)
/* Device supports subpage reads */
-#define NAND_SUBPAGE_READ 0x00001000
+#define NAND_SUBPAGE_READ BIT(12)
+/* Macros to identify the above */
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
/*
* Some MLC NANDs need data scrambling to limit bitflips caused by repeated
* patterns.
*/
-#define NAND_NEED_SCRAMBLING 0x00002000
+#define NAND_NEED_SCRAMBLING BIT(13)
/* Device needs 3rd row address cycle */
-#define NAND_ROW_ADDR_3 0x00004000
-
-/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
-
-/* Macros to identify the above */
-#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
-
-/*
- * There are different places where the manufacturer stores the factory bad
- * block markers.
- *
- * Position within the block: Each of these pages needs to be checked for a
- * bad block marking pattern.
- */
-#define NAND_BBM_FIRSTPAGE 0x01000000
-#define NAND_BBM_SECONDPAGE 0x02000000
-#define NAND_BBM_LASTPAGE 0x04000000
-
-/* Position within the OOB data of the page */
-#define NAND_BBM_POS_SMALL 5
-#define NAND_BBM_POS_LARGE 0
+#define NAND_ROW_ADDR_3 BIT(14)
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
-#define NAND_SKIP_BBTSCAN 0x00010000
+#define NAND_SKIP_BBTSCAN BIT(16)
/* Chip may not exist, so silence any errors in scan */
-#define NAND_SCAN_SILENT_NODEV 0x00040000
+#define NAND_SCAN_SILENT_NODEV BIT(18)
+
/*
* Autodetect nand buswidth with readid/onfi.
* This suppose the driver will configure the hardware in 8 bits mode
* when calling nand_scan_ident, and update its configuration
* before calling nand_scan_tail.
*/
-#define NAND_BUSWIDTH_AUTO 0x00080000
+#define NAND_BUSWIDTH_AUTO BIT(19)
+
/*
* This option could be defined by controller drivers to protect against
* kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
*/
-#define NAND_USE_BOUNCE_BUFFER 0x00100000
+#define NAND_USES_DMA BIT(20)
/*
* In case your controller is implementing ->legacy.cmd_ctrl() and is relying
@@ -207,26 +176,49 @@ enum nand_ecc_algo {
* If your controller already takes care of this delay, you don't need to set
* this flag.
*/
-#define NAND_WAIT_TCCS 0x00200000
+#define NAND_WAIT_TCCS BIT(21)
/*
* Whether the NAND chip is a boot medium. Drivers might use this information
* to select ECC algorithms supported by the boot ROM or similar restrictions.
*/
-#define NAND_IS_BOOT_MEDIUM 0x00400000
+#define NAND_IS_BOOT_MEDIUM BIT(22)
/*
* Do not try to tweak the timings at runtime. This is needed when the
* controller initializes the timings on itself or when it relies on
* configuration done by the bootloader.
*/
-#define NAND_KEEP_TIMINGS 0x00800000
+#define NAND_KEEP_TIMINGS BIT(23)
+
+/*
+ * There are different places where the manufacturer stores the factory bad
+ * block markers.
+ *
+ * Position within the block: Each of these pages needs to be checked for a
+ * bad block marking pattern.
+ */
+#define NAND_BBM_FIRSTPAGE BIT(24)
+#define NAND_BBM_SECONDPAGE BIT(25)
+#define NAND_BBM_LASTPAGE BIT(26)
+
+/*
+ * Some controllers with pipelined ECC engines override the BBM marker with
+ * data or ECC bytes, thus making bad block detection through bad block marker
+ * impossible. Let's flag those chips so the core knows it shouldn't check the
+ * BBM and consider all blocks good.
+ */
+#define NAND_NO_BBM_QUIRK BIT(27)
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
#define NAND_CI_CELLTYPE_SHIFT 2
+/* Position within the OOB data of the page */
+#define NAND_BBM_POS_SMALL 5
+#define NAND_BBM_POS_LARGE 0
+
/**
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
@@ -299,7 +291,8 @@ static const struct nand_ecc_caps __name = { \
/**
* struct nand_ecc_ctrl - Control structure for ECC
- * @mode: ECC mode
+ * @engine_type: ECC engine type
+ * @placement: OOB bytes placement
* @algo: ECC algorithm
* @steps: number of ECC steps per page
* @size: data bytes per ECC step
@@ -309,7 +302,6 @@ static const struct nand_ecc_caps __name = { \
* @prepad: padding information for syndrome based ECC generators
* @postpad: padding information for syndrome based ECC generators
* @options: ECC specific options (see NAND_ECC_XXX flags defined above)
- * @priv: pointer to private ECC control data
* @calc_buf: buffer for calculated ECC, size is oobsize.
* @code_buf: buffer for ECC read from flash, size is oobsize.
* @hwctl: function to control hardware ECC generator. Must only
@@ -327,7 +319,7 @@ static const struct nand_ecc_caps __name = { \
* controller and always return contiguous in-band and
* out-of-band data even if they're not stored
* contiguously on the NAND chip (e.g.
- * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
* out-of-band data).
* @write_page_raw: function to write a raw page without ECC. This function
* should hide the specific layout used by the ECC
@@ -335,7 +327,7 @@ static const struct nand_ecc_caps __name = { \
* in-band and out-of-band data. ECC controller is
* responsible for doing the appropriate transformations
* to adapt to its specific layout (e.g.
- * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
* out-of-band data).
* @read_page: function to read a page according to the ECC generator
* requirements; returns maximum number of bitflips corrected in
@@ -351,7 +343,8 @@ static const struct nand_ecc_caps __name = { \
* @write_oob: function to write chip OOB data
*/
struct nand_ecc_ctrl {
- nand_ecc_modes_t mode;
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement placement;
enum nand_ecc_algo algo;
int steps;
int size;
@@ -361,7 +354,6 @@ struct nand_ecc_ctrl {
int prepad;
int postpad;
unsigned int options;
- void *priv;
u8 *calc_buf;
u8 *code_buf;
void (*hwctl)(struct nand_chip *chip, int mode);
@@ -394,8 +386,8 @@ struct nand_ecc_ctrl {
* This struct defines the timing requirements of a SDR NAND chip.
* These information can be found in every NAND datasheets and the timings
* meaning are described in the ONFI specifications:
- * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
- * Parameters)
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf
+ * (chapter 4.15 Timing Parameters)
*
* All these timings are expressed in picoseconds.
*
@@ -481,40 +473,193 @@ struct nand_sdr_timings {
};
/**
- * enum nand_data_interface_type - NAND interface timing type
+ * struct nand_nvddr_timings - NV-DDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a NV-DDR NAND data interface.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf
+ * (chapter 4.18.2 NV-DDR)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
+ * @tAC_min: Access window of DQ[7:0] from CLK
+ * @tAC_max: Access window of DQ[7:0] from CLK
+ * @tADL_min: ALE to data loading time
+ * @tCAD_min: Command, Address, Data delay
+ * @tCAH_min: Command/Address DQ hold time
+ * @tCALH_min: W/R_n, CLE and ALE hold time
+ * @tCALS_min: W/R_n, CLE and ALE setup time
+ * @tCAS_min: Command/address DQ setup time
+ * @tCEH_min: CE# high hold time
+ * @tCH_min: CE# hold time
+ * @tCK_min: Average clock cycle time
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDQSCK_min: Start of the access window of DQS from CLK
+ * @tDQSCK_max: End of the access window of DQS from CLK
+ * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device
+ * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device
+ * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device
+ * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access
+ * @tDS_min: Data setup time
+ * @tDSC_min: DQS cycle time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tQHS_max: Data hold skew factor
+ * @tRHW_min: Data output cycle to command, address, or data input cycle
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ * rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWHR_min: WE# high to RE# low
+ * @tWRCK_min: W/R_n low to data output cycle
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_nvddr_timings {
+ u64 tBERS_max;
+ u32 tCCS_min;
+ u64 tPROG_max;
+ u64 tR_max;
+ u32 tAC_min;
+ u32 tAC_max;
+ u32 tADL_min;
+ u32 tCAD_min;
+ u32 tCAH_min;
+ u32 tCALH_min;
+ u32 tCALS_min;
+ u32 tCAS_min;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCK_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDQSCK_min;
+ u32 tDQSCK_max;
+ u32 tDQSD_min;
+ u32 tDQSD_max;
+ u32 tDQSHZ_max;
+ u32 tDQSQ_max;
+ u32 tDS_min;
+ u32 tDSC_min;
+ u32 tFEAT_max;
+ u32 tITC_max;
+ u32 tQHS_max;
+ u32 tRHW_min;
+ u32 tRR_min;
+ u32 tRST_max;
+ u32 tWB_max;
+ u32 tWHR_min;
+ u32 tWRCK_min;
+ u32 tWW_min;
+};
+
+/*
+ * While timings related to the data interface itself are mostly different
+ * between SDR and NV-DDR, timings related to the internal chip behavior are
+ * common. IOW, the following entries which describe the internal delays have
+ * the same definition and are shared in both SDR and NV-DDR timing structures:
+ * - tADL_min
+ * - tBERS_max
+ * - tCCS_min
+ * - tFEAT_max
+ * - tPROG_max
+ * - tR_max
+ * - tRR_min
+ * - tRST_max
+ * - tWB_max
+ *
+ * The below macros return the value of a given timing, no matter the interface.
+ */
+#define NAND_COMMON_TIMING_PS(conf, timing_name) \
+ nand_interface_is_sdr(conf) ? \
+ nand_get_sdr_timings(conf)->timing_name : \
+ nand_get_nvddr_timings(conf)->timing_name
+
+#define NAND_COMMON_TIMING_MS(conf, timing_name) \
+ PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+#define NAND_COMMON_TIMING_NS(conf, timing_name) \
+ PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+/**
+ * enum nand_interface_type - NAND interface type
* @NAND_SDR_IFACE: Single Data Rate interface
+ * @NAND_NVDDR_IFACE: Double Data Rate interface
*/
-enum nand_data_interface_type {
+enum nand_interface_type {
NAND_SDR_IFACE,
+ NAND_NVDDR_IFACE,
};
/**
- * struct nand_data_interface - NAND interface timing
+ * struct nand_interface_config - NAND interface timing
* @type: type of the timing
- * @timings: The timing, type according to @type
+ * @timings: The timing information
+ * @timings.mode: Timing mode as defined in the specification
* @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
+ * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE.
*/
-struct nand_data_interface {
- enum nand_data_interface_type type;
- union {
- struct nand_sdr_timings sdr;
+struct nand_interface_config {
+ enum nand_interface_type type;
+ struct nand_timings {
+ unsigned int mode;
+ union {
+ struct nand_sdr_timings sdr;
+ struct nand_nvddr_timings nvddr;
+ };
} timings;
};
/**
+ * nand_interface_is_sdr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_sdr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_SDR_IFACE;
+}
+
+/**
+ * nand_interface_is_nvddr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_nvddr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_NVDDR_IFACE;
+}
+
+/**
* nand_get_sdr_timings - get SDR timing from data interface
* @conf: The data interface
*/
static inline const struct nand_sdr_timings *
-nand_get_sdr_timings(const struct nand_data_interface *conf)
+nand_get_sdr_timings(const struct nand_interface_config *conf)
{
- if (conf->type != NAND_SDR_IFACE)
+ if (!nand_interface_is_sdr(conf))
return ERR_PTR(-EINVAL);
return &conf->timings.sdr;
}
/**
+ * nand_get_nvddr_timings - get NV-DDR timing from data interface
+ * @conf: The data interface
+ */
+static inline const struct nand_nvddr_timings *
+nand_get_nvddr_timings(const struct nand_interface_config *conf)
+{
+ if (!nand_interface_is_nvddr(conf))
+ return ERR_PTR(-EINVAL);
+
+ return &conf->timings.nvddr;
+}
+
+/**
* struct nand_op_cmd_instr - Definition of a command instruction
* @opcode: the command to issue in one cycle
*/
@@ -694,6 +839,7 @@ struct nand_op_instr {
/**
* struct nand_subop - a sub operation
+ * @cs: the CS line to select for this NAND sub-operation
* @instrs: array of instructions
* @ninstrs: length of the @instrs array
* @first_instr_start_off: offset to start from for the first instruction
@@ -709,6 +855,7 @@ struct nand_op_instr {
* controller driver.
*/
struct nand_subop {
+ unsigned int cs;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
unsigned int first_instr_start_off;
@@ -927,11 +1074,10 @@ static inline void nand_op_trace(const char *prefix,
* This method replaces chip->legacy.cmdfunc(),
* chip->legacy.{read,write}_{buf,byte,word}(),
* chip->legacy.dev_ready() and chip->legacy.waifunc().
- * @setup_data_interface: setup the data interface and timing. If
- * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
- * means the configuration should not be applied but
- * only checked.
- * This hook is optional.
+ * @setup_interface: setup the data interface and timing. If chipnr is set to
+ * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
+ * should not be applied but only checked.
+ * This hook is optional.
*/
struct nand_controller_ops {
int (*attach_chip)(struct nand_chip *chip);
@@ -939,8 +1085,8 @@ struct nand_controller_ops {
int (*exec_op)(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only);
- int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
- const struct nand_data_interface *conf);
+ int (*setup_interface)(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf);
};
/**
@@ -1011,136 +1157,156 @@ struct nand_legacy {
};
/**
- * struct nand_chip - NAND Private Flash Chip Data
- * @base: Inherit from the generic NAND device
- * @legacy: All legacy fields/hooks. If you develop a new driver,
- * don't even try to use any of these fields/hooks, and if
- * you're modifying an existing driver that is using those
- * fields/hooks, you should consider reworking the driver
- * avoid using them.
- * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
- * setting the read-retry mode. Mostly needed for MLC NAND.
- * @ecc: [BOARDSPECIFIC] ECC control structure
- * @buf_align: minimum buffer alignment required by a platform
- * @oob_poi: "poison value buffer," used for laying out OOB data
- * before writing
- * @page_shift: [INTERN] number of address bits in a page (column
- * address bits).
- * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
- * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
- * @chip_shift: [INTERN] number of address bits in one chip
- * @options: [BOARDSPECIFIC] various chip options. They can partly
- * be set to inform nand_scan about special functionality.
- * See the defines for further explanation.
- * @bbt_options: [INTERN] bad block specific options. All options used
- * here must come from bbm.h. By default, these options
- * will be copied to the appropriate nand_bbt_descr's.
- * @badblockpos: [INTERN] position of the bad block marker in the oob
- * area.
- * @badblockbits: [INTERN] minimum number of set bits in a good block's
- * bad block marker position; i.e., BBM == 11110111b is
- * not bad when badblockbits == 7
- * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
- * set to the actually used ONFI mode if the chip is
- * ONFI compliant or deduced from the datasheet if
- * the NAND chip is not ONFI compliant.
- * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
- * @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
- * @pagecache: Structure containing page cache related fields
- * @pagecache.bitflips: Number of bitflips of the cached page
- * @pagecache.page: Page number currently in the cache. -1 means no page is
- * currently cached
- * @subpagesize: [INTERN] holds the subpagesize
- * @id: [INTERN] holds NAND ID
- * @parameters: [INTERN] holds generic parameters under an easily
- * readable form.
- * @data_interface: [INTERN] NAND interface timing information
- * @cur_cs: currently selected target. -1 means no target selected,
- * otherwise we should always have cur_cs >= 0 &&
- * cur_cs < nanddev_ntargets(). NAND Controller drivers
- * should not modify this value, but they're allowed to
- * read it.
- * @read_retries: [INTERN] the number of read retry modes supported
- * @lock: lock protecting the suspended field. Also used to
- * serialize accesses to the NAND device.
- * @suspended: set to 1 when the device is suspended, 0 when it's not.
- * @bbt: [INTERN] bad block table pointer
- * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
- * lookup.
- * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
- * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial
- * bad block scan.
- * @controller: [REPLACEABLE] a pointer to a hardware controller
- * structure which is shared among multiple independent
- * devices.
- * @priv: [OPTIONAL] pointer to private chip data
- * @manufacturer: [INTERN] Contains manufacturer information
- * @manufacturer.desc: [INTERN] Contains manufacturer's description
- * @manufacturer.priv: [INTERN] Contains manufacturer private information
+ * struct nand_chip_ops - NAND chip operations
+ * @suspend: Suspend operation
+ * @resume: Resume operation
+ * @lock_area: Lock operation
+ * @unlock_area: Unlock operation
+ * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs)
+ * @choose_interface_config: Choose the best interface configuration
*/
+struct nand_chip_ops {
+ int (*suspend)(struct nand_chip *chip);
+ void (*resume)(struct nand_chip *chip);
+ int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+ int (*choose_interface_config)(struct nand_chip *chip,
+ struct nand_interface_config *iface);
+};
+
+/**
+ * struct nand_manufacturer - NAND manufacturer structure
+ * @desc: The manufacturer description
+ * @priv: Private information for the manufacturer driver
+ */
+struct nand_manufacturer {
+ const struct nand_manufacturer_desc *desc;
+ void *priv;
+};
+
+/**
+ * struct nand_secure_region - NAND secure region structure
+ * @offset: Offset of the start of the secure region
+ * @size: Size of the secure region
+ */
+struct nand_secure_region {
+ u64 offset;
+ u64 size;
+};
+/**
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @base: Inherit from the generic NAND device
+ * @id: Holds NAND ID
+ * @parameters: Holds generic parameters under an easily readable form
+ * @manufacturer: Manufacturer information
+ * @ops: NAND chip operations
+ * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try
+ * to use any of these fields/hooks, and if you're modifying an
+ * existing driver that is using those fields/hooks, you should
+ * consider reworking the driver and avoid using them.
+ * @options: Various chip options. They can partly be set to inform nand_scan
+ * about special functionality. See the defines for further
+ * explanation.
+ * @current_interface_config: The currently used NAND interface configuration
+ * @best_interface_config: The best NAND interface configuration which fits both
+ * the NAND chip and NAND controller constraints. If
+ * unset, the default reset interface configuration must
+ * be used.
+ * @bbt_erase_shift: Number of address bits in a bbt entry
+ * @bbt_options: Bad block table specific options. All options used here must
+ * come from bbm.h. By default, these options will be copied to
+ * the appropriate nand_bbt_descr's.
+ * @badblockpos: Bad block marker position in the oob area
+ * @badblockbits: Minimum number of set bits in a good block's bad block marker
+ * position; i.e., BBM = 11110111b is good when badblockbits = 7
+ * @bbt_td: Bad block table descriptor for flash lookup
+ * @bbt_md: Bad block table mirror descriptor
+ * @badblock_pattern: Bad block scan pattern used for initial bad block scan
+ * @bbt: Bad block table pointer
+ * @page_shift: Number of address bits in a page (column address bits)
+ * @phys_erase_shift: Number of address bits in a physical eraseblock
+ * @chip_shift: Number of address bits in one chip
+ * @pagemask: Page number mask = number of (pages / chip) - 1
+ * @subpagesize: Holds the subpagesize
+ * @data_buf: Buffer for data, size is (page size + oobsize)
+ * @oob_poi: pointer on the OOB area covered by data_buf
+ * @pagecache: Structure containing page cache related fields
+ * @pagecache.bitflips: Number of bitflips of the cached page
+ * @pagecache.page: Page number currently in the cache. -1 means no page is
+ * currently cached
+ * @buf_align: Minimum buffer alignment required by a platform
+ * @lock: Lock protecting the suspended field. Also used to serialize accesses
+ * to the NAND device
+ * @suspended: Set to 1 when the device is suspended, 0 when it's not
+ * @resume_wq: wait queue to sleep if rawnand is in suspended state.
+ * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
+ * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
+ * NAND Controller drivers should not modify this value, but they're
+ * allowed to read it.
+ * @read_retries: The number of read retry modes supported
+ * @secure_regions: Structure containing the secure regions info
+ * @nr_secure_regions: Number of secure regions
+ * @controller: The hardware controller structure which is shared among multiple
+ * independent devices
+ * @ecc: The ECC controller structure
+ * @priv: Chip private data
+ */
struct nand_chip {
struct nand_device base;
-
+ struct nand_id id;
+ struct nand_parameters parameters;
+ struct nand_manufacturer manufacturer;
+ struct nand_chip_ops ops;
struct nand_legacy legacy;
+ unsigned int options;
- int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+ /* Data interface */
+ const struct nand_interface_config *current_interface_config;
+ struct nand_interface_config *best_interface_config;
- unsigned int options;
+ /* Bad block information */
+ unsigned int bbt_erase_shift;
unsigned int bbt_options;
+ unsigned int badblockpos;
+ unsigned int badblockbits;
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+ struct nand_bbt_descr *badblock_pattern;
+ u8 *bbt;
- int page_shift;
- int phys_erase_shift;
- int bbt_erase_shift;
- int chip_shift;
- int pagemask;
- u8 *data_buf;
+ /* Device internal layout */
+ unsigned int page_shift;
+ unsigned int phys_erase_shift;
+ unsigned int chip_shift;
+ unsigned int pagemask;
+ unsigned int subpagesize;
+ /* Buffers */
+ u8 *data_buf;
+ u8 *oob_poi;
struct {
unsigned int bitflips;
int page;
} pagecache;
+ unsigned long buf_align;
- int subpagesize;
- int onfi_timing_mode_default;
- unsigned int badblockpos;
- int badblockbits;
-
- struct nand_id id;
- struct nand_parameters parameters;
-
- struct nand_data_interface data_interface;
-
- int cur_cs;
-
- int read_retries;
-
+ /* Internals */
struct mutex lock;
unsigned int suspended : 1;
+ wait_queue_head_t resume_wq;
+ int cur_cs;
+ int read_retries;
+ struct nand_secure_region *secure_regions;
+ u8 nr_secure_regions;
- uint8_t *oob_poi;
+ /* Externals */
struct nand_controller *controller;
-
struct nand_ecc_ctrl ecc;
- unsigned long buf_align;
-
- uint8_t *bbt;
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
-
- struct nand_bbt_descr *badblock_pattern;
-
void *priv;
-
- struct {
- const struct nand_manufacturer *desc;
- void *priv;
- } manufacturer;
};
-extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
-extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
-
static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
{
return container_of(mtd, struct nand_chip, base.mtd);
@@ -1183,6 +1349,17 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
return mtd_get_of_node(nand_to_mtd(chip));
}
+/**
+ * nand_get_interface_config - Retrieve the current interface configuration
+ * of a NAND chip
+ * @chip: The NAND chip
+ */
+static inline const struct nand_interface_config *
+nand_get_interface_config(struct nand_chip *chip)
+{
+ return chip->current_interface_config;
+}
+
/*
* A helper for defining older NAND chips where the second ID byte fully
* defined the chip, including the geometry (chip size, eraseblock size, page
@@ -1215,7 +1392,7 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
* struct nand_flash_dev - NAND Flash Device ID Structure
* @name: a human-readable name of the NAND chip
* @dev_id: the device ID (the second byte of the full chip ID array)
- * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
+ * @mfr_id: manufacturer ID part of the full chip ID array (refers the same
* memory address as ``id[0]``)
* @dev_id: device ID part of the full chip ID array (refers the same memory
* address as ``id[1]``)
@@ -1235,10 +1412,6 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
* @ecc_step_ds in nand_chip{}, also from the datasheet.
* For example, the "4bit ECC for each 512Byte" can be set with
* NAND_ECC_INFO(4, 512).
- * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND
- * reset. Should be deduced from timings described
- * in the datasheet.
- *
*/
struct nand_flash_dev {
char *name;
@@ -1259,7 +1432,6 @@ struct nand_flash_dev {
uint16_t strength_ds;
uint16_t step_ds;
} ecc;
- int onfi_timing_mode_default;
};
int nand_create_bbt(struct nand_chip *chip);
@@ -1277,7 +1449,8 @@ static inline bool nand_is_slc(struct nand_chip *chip)
}
/**
- * Check if the opcode's address should be sent only on the lower 8 bits
+ * nand_opcode_8bits - Check if the opcode's address should be sent only on the
+ * lower 8 bits
* @command: opcode to check
*/
static inline int nand_opcode_8bits(unsigned int command)
@@ -1294,6 +1467,20 @@ static inline int nand_opcode_8bits(unsigned int command)
return 0;
}
+int rawnand_sw_hamming_init(struct nand_chip *chip);
+int rawnand_sw_hamming_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code);
+int rawnand_sw_hamming_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc);
+void rawnand_sw_hamming_cleanup(struct nand_chip *chip);
+int rawnand_sw_bch_init(struct nand_chip *chip);
+int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc);
+void rawnand_sw_bch_cleanup(struct nand_chip *chip);
+
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
void *extraoob, int extraooblen,
@@ -1312,13 +1499,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page);
int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
u8 *subfeature_param);
-/* Default read_page_raw implementation */
+/* read_page_raw implementations */
int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
int page);
+int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
-/* Default write_page_raw implementation */
+/* write_page_raw implementations */
int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page);
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1347,9 +1538,11 @@ int nand_change_write_column_op(struct nand_chip *chip,
unsigned int offset_in_page, const void *buf,
unsigned int len, bool force_8bit);
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
- bool force_8bit);
+ bool force_8bit, bool check_only);
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
+int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
/* Scan and identify a NAND device */
int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
@@ -1368,15 +1561,12 @@ void nand_wait_ready(struct nand_chip *chip);
* sucessful nand_scan().
*/
void nand_cleanup(struct nand_chip *chip);
-/* Unregister the MTD device and calls nand_cleanup() */
-void nand_release(struct nand_chip *chip);
/*
* External helper for controller drivers that have to implement the WAITRDY
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
-struct gpio_desc;
int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
unsigned long timeout_ms);
@@ -1384,6 +1574,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
void nand_select_target(struct nand_chip *chip, unsigned int cs);
void nand_deselect_target(struct nand_chip *chip);
+/* Bitops */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits);
+
/**
* nand_get_data_buf() - Get the internal page buffer
* @chip: NAND chip object
@@ -1405,4 +1599,8 @@ static inline void *nand_get_data_buf(struct nand_chip *chip)
return chip->data_buf;
}
+/* Parse the gpio-cs property */
+int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
+ unsigned int *ncs_array);
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
index d2c3cf29e0d1..231bd1c3f408 100644
--- a/include/linux/mtd/sharpsl.h
+++ b/include/linux/mtd/sharpsl.h
@@ -9,7 +9,6 @@
#define _MTD_SHARPSL_H
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
struct sharpsl_nand_platform_data {
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 5abd91cc6dfa..42218a1164f6 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -12,23 +12,6 @@
#include <linux/spi/spi-mem.h>
/*
- * Manufacturer IDs
- *
- * The first byte returned from the flash after sending opcode SPINOR_OP_RDID.
- * Sometimes these are the same as CFI IDs, but sometimes they aren't.
- */
-#define SNOR_MFR_ATMEL CFI_MFR_ATMEL
-#define SNOR_MFR_GIGADEVICE 0xc8
-#define SNOR_MFR_INTEL CFI_MFR_INTEL
-#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
-#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
-#define SNOR_MFR_ISSI CFI_MFR_PMC
-#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
-#define SNOR_MFR_SPANSION CFI_MFR_AMD
-#define SNOR_MFR_SST CFI_MFR_SST
-#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */
-
-/*
* Note on opcode nomenclature: some opcodes have a format like
* SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number
* of I/O lines used for the opcode, address, and data (respectively). The
@@ -37,6 +20,7 @@
*/
/* Flash opcodes. */
+#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_WREN 0x06 /* Write enable */
#define SPINOR_OP_RDSR 0x05 /* Read status register */
#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
@@ -63,10 +47,9 @@
#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */
#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
-#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
-#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
-#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
-#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */
+#define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */
+#define SPINOR_OP_SRST 0x99 /* Software Reset */
+#define SPINOR_OP_GBULK 0x98 /* Global Block Unlock */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -97,30 +80,24 @@
/* Used for SST flashes only. */
#define SPINOR_OP_BP 0x02 /* Byte program */
-#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
-/* Used for S3AN flashes only */
-#define SPINOR_OP_XSE 0x50 /* Sector erase */
-#define SPINOR_OP_XPP 0x82 /* Page program */
-#define SPINOR_OP_XRDSR 0xd7 /* Read status register */
-
-#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
-#define XSR_RDY BIT(7) /* Ready */
-
-
/* Used for Macronix and Winbond flashes. */
#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
/* Used for Spansion flashes only. */
#define SPINOR_OP_BRWR 0x17 /* Bank register write */
-#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
/* Used for Micron flashes only. */
#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
+/* Used for GigaDevices and Winbond flashes. */
+#define SPINOR_OP_ESECR 0x44 /* Erase Security registers */
+#define SPINOR_OP_PSECR 0x42 /* Program Security registers */
+#define SPINOR_OP_RSECR 0x48 /* Read Security registers */
+
/* Status Register bits. */
#define SR_WIP BIT(0) /* Write in progress */
#define SR_WEL BIT(1) /* Write enable latch */
@@ -128,7 +105,9 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_BP3 BIT(5) /* Block protect 3 */
#define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */
+#define SR_BP3_BIT6 BIT(6) /* Block protect 3 */
#define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
/* Spansion/Cypress specific status bits */
@@ -137,17 +116,16 @@
#define SR1_QUAD_EN_BIT6 BIT(6)
+#define SR_BP_SHIFT 2
+
/* Enhanced Volatile Configuration Register bits */
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
-/* Flag Status Register bits */
-#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
-#define FSR_E_ERR BIT(5) /* Erase operation status */
-#define FSR_P_ERR BIT(4) /* Program operation status */
-#define FSR_PT_ERR BIT(1) /* Protection error bit */
-
/* Status Register 2 bits. */
#define SR2_QUAD_EN_BIT1 BIT(1)
+#define SR2_LB1 BIT(3) /* Security Register Lock Bit 1 */
+#define SR2_LB2 BIT(4) /* Security Register Lock Bit 2 */
+#define SR2_LB3 BIT(5) /* Security Register Lock Bit 3 */
#define SR2_QUAD_EN_BIT7 BIT(7)
/* Supported SPI protocols */
@@ -195,6 +173,7 @@ enum spi_nor_protocol {
SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2),
SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4),
SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8),
+ SNOR_PROTO_8_8_8_DTR = SNOR_PROTO_DTR(8, 8, 8),
};
static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto)
@@ -225,110 +204,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
return spi_nor_get_protocol_data_nbits(proto);
}
-enum spi_nor_option_flags {
- SNOR_F_USE_FSR = BIT(0),
- SNOR_F_HAS_SR_TB = BIT(1),
- SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
- SNOR_F_READY_XSR_RDY = BIT(3),
- SNOR_F_USE_CLSR = BIT(4),
- SNOR_F_BROKEN_RESET = BIT(5),
- SNOR_F_4B_OPCODES = BIT(6),
- SNOR_F_HAS_4BAIT = BIT(7),
- SNOR_F_HAS_LOCK = BIT(8),
- SNOR_F_HAS_16BIT_SR = BIT(9),
- SNOR_F_NO_READ_CR = BIT(10),
- SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
-
-};
-
-/**
- * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
- * @size: the size of the sector/block erased by the erase type.
- * JEDEC JESD216B imposes erase sizes to be a power of 2.
- * @size_shift: @size is a power of 2, the shift is stored in
- * @size_shift.
- * @size_mask: the size mask based on @size_shift.
- * @opcode: the SPI command op code to erase the sector/block.
- * @idx: Erase Type index as sorted in the Basic Flash Parameter
- * Table. It will be used to synchronize the supported
- * Erase Types with the ones identified in the SFDP
- * optional tables.
- */
-struct spi_nor_erase_type {
- u32 size;
- u32 size_shift;
- u32 size_mask;
- u8 opcode;
- u8 idx;
-};
-
-/**
- * struct spi_nor_erase_command - Used for non-uniform erases
- * The structure is used to describe a list of erase commands to be executed
- * once we validate that the erase can be performed. The elements in the list
- * are run-length encoded.
- * @list: for inclusion into the list of erase commands.
- * @count: how many times the same erase command should be
- * consecutively used.
- * @size: the size of the sector/block erased by the command.
- * @opcode: the SPI command op code to erase the sector/block.
- */
-struct spi_nor_erase_command {
- struct list_head list;
- u32 count;
- u32 size;
- u8 opcode;
-};
-
-/**
- * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
- * @offset: the offset in the data array of erase region start.
- * LSB bits are used as a bitmask encoding flags to
- * determine if this region is overlaid, if this region is
- * the last in the SPI NOR flash memory and to indicate
- * all the supported erase commands inside this region.
- * The erase types are sorted in ascending order with the
- * smallest Erase Type size being at BIT(0).
- * @size: the size of the region in bytes.
- */
-struct spi_nor_erase_region {
- u64 offset;
- u64 size;
-};
-
-#define SNOR_ERASE_TYPE_MAX 4
-#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
-
-#define SNOR_LAST_REGION BIT(4)
-#define SNOR_OVERLAID_REGION BIT(5)
-
-#define SNOR_ERASE_FLAGS_MAX 6
-#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
-
-/**
- * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
- * @regions: array of erase regions. The regions are consecutive in
- * address space. Walking through the regions is done
- * incrementally.
- * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
- * sector size (legacy implementation).
- * @erase_type: an array of erase types shared by all the regions.
- * The erase types are sorted in ascending order, with the
- * smallest Erase Type size being the first member in the
- * erase_type array.
- * @uniform_erase_type: bitmask encoding erase types that can erase the
- * entire memory. This member is completed at init by
- * uniform and non-uniform SPI NOR flash memories if they
- * support at least one erase type that can erase the
- * entire memory.
- */
-struct spi_nor_erase_map {
- struct spi_nor_erase_region *regions;
- struct spi_nor_erase_region uniform_region;
- struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
- u8 uniform_erase_type;
-};
-
/**
* struct spi_nor_hwcaps - Structure for describing the hardware capabilies
* supported by the SPI controller (bus master).
@@ -345,7 +220,7 @@ struct spi_nor_hwcaps {
* then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
* (Slow) Read.
*/
-#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0)
+#define SNOR_HWCAPS_READ_MASK GENMASK(15, 0)
#define SNOR_HWCAPS_READ BIT(0)
#define SNOR_HWCAPS_READ_FAST BIT(1)
#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2)
@@ -362,11 +237,12 @@ struct spi_nor_hwcaps {
#define SNOR_HWCAPS_READ_4_4_4 BIT(9)
#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10)
-#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11)
+#define SNOR_HWCAPS_READ_OCTAL GENMASK(15, 11)
#define SNOR_HWCAPS_READ_1_1_8 BIT(11)
#define SNOR_HWCAPS_READ_1_8_8 BIT(12)
#define SNOR_HWCAPS_READ_8_8_8 BIT(13)
#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14)
+#define SNOR_HWCAPS_READ_8_8_8_DTR BIT(15)
/*
* Page Program capabilities.
@@ -377,18 +253,19 @@ struct spi_nor_hwcaps {
* JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
* implements such commands.
*/
-#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16)
-#define SNOR_HWCAPS_PP BIT(16)
+#define SNOR_HWCAPS_PP_MASK GENMASK(23, 16)
+#define SNOR_HWCAPS_PP BIT(16)
-#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
-#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
-#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
-#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
+#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
+#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
+#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
+#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
-#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20)
-#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
-#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
-#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+#define SNOR_HWCAPS_PP_OCTAL GENMASK(23, 20)
+#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
+#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
+#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+#define SNOR_HWCAPS_PP_8_8_8_DTR BIT(23)
#define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \
SNOR_HWCAPS_READ_4_4_4 | \
@@ -396,69 +273,19 @@ struct spi_nor_hwcaps {
SNOR_HWCAPS_PP_4_4_4 | \
SNOR_HWCAPS_PP_8_8_8)
+#define SNOR_HWCAPS_X_X_X_DTR (SNOR_HWCAPS_READ_8_8_8_DTR | \
+ SNOR_HWCAPS_PP_8_8_8_DTR)
+
#define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \
SNOR_HWCAPS_READ_1_2_2_DTR | \
SNOR_HWCAPS_READ_1_4_4_DTR | \
- SNOR_HWCAPS_READ_1_8_8_DTR)
+ SNOR_HWCAPS_READ_1_8_8_DTR | \
+ SNOR_HWCAPS_READ_8_8_8_DTR)
#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \
SNOR_HWCAPS_PP_MASK)
-struct spi_nor_read_command {
- u8 num_mode_clocks;
- u8 num_wait_states;
- u8 opcode;
- enum spi_nor_protocol proto;
-};
-
-struct spi_nor_pp_command {
- u8 opcode;
- enum spi_nor_protocol proto;
-};
-
-enum spi_nor_read_command_index {
- SNOR_CMD_READ,
- SNOR_CMD_READ_FAST,
- SNOR_CMD_READ_1_1_1_DTR,
-
- /* Dual SPI */
- SNOR_CMD_READ_1_1_2,
- SNOR_CMD_READ_1_2_2,
- SNOR_CMD_READ_2_2_2,
- SNOR_CMD_READ_1_2_2_DTR,
-
- /* Quad SPI */
- SNOR_CMD_READ_1_1_4,
- SNOR_CMD_READ_1_4_4,
- SNOR_CMD_READ_4_4_4,
- SNOR_CMD_READ_1_4_4_DTR,
-
- /* Octal SPI */
- SNOR_CMD_READ_1_1_8,
- SNOR_CMD_READ_1_8_8,
- SNOR_CMD_READ_8_8_8,
- SNOR_CMD_READ_1_8_8_DTR,
-
- SNOR_CMD_READ_MAX
-};
-
-enum spi_nor_pp_command_index {
- SNOR_CMD_PP,
-
- /* Quad SPI */
- SNOR_CMD_PP_1_1_4,
- SNOR_CMD_PP_1_4_4,
- SNOR_CMD_PP_4_4_4,
-
- /* Octal SPI */
- SNOR_CMD_PP_1_1_8,
- SNOR_CMD_PP_1_8_8,
- SNOR_CMD_PP_8_8_8,
-
- SNOR_CMD_PP_MAX
-};
-
-/* Forward declaration that will be used in 'struct spi_nor_flash_parameter' */
+/* Forward declaration that is used in 'struct spi_nor_controller_ops' */
struct spi_nor;
/**
@@ -473,7 +300,7 @@ struct spi_nor;
* @read: read data from the SPI NOR.
* @write: write data to the SPI NOR.
* @erase: erase a sector of the SPI NOR at the offset @offs; if
- * not provided by the driver, spi-nor will send the erase
+ * not provided by the driver, SPI NOR will send the erase
* opcode via write_reg().
*/
struct spi_nor_controller_ops {
@@ -490,95 +317,60 @@ struct spi_nor_controller_ops {
};
/**
- * struct spi_nor_locking_ops - SPI NOR locking methods
- * @lock: lock a region of the SPI NOR.
- * @unlock: unlock a region of the SPI NOR.
- * @is_locked: check if a region of the SPI NOR is completely locked
+ * enum spi_nor_cmd_ext - describes the command opcode extension in DTR mode
+ * @SPI_NOR_EXT_NONE: no extension. This is the default, and is used in Legacy
+ * SPI mode
+ * @SPI_NOR_EXT_REPEAT: the extension is same as the opcode
+ * @SPI_NOR_EXT_INVERT: the extension is the bitwise inverse of the opcode
+ * @SPI_NOR_EXT_HEX: the extension is any hex value. The command and opcode
+ * combine to form a 16-bit opcode.
*/
-struct spi_nor_locking_ops {
- int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
-};
-
-/**
- * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings.
- * Includes legacy flash parameters and settings that can be overwritten
- * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216
- * Serial Flash Discoverable Parameters (SFDP) tables.
- *
- * @size: the flash memory density in bytes.
- * @page_size: the page size of the SPI NOR flash memory.
- * @hwcaps: describes the read and page program hardware
- * capabilities.
- * @reads: read capabilities ordered by priority: the higher index
- * in the array, the higher priority.
- * @page_programs: page program capabilities ordered by priority: the
- * higher index in the array, the higher priority.
- * @erase_map: the erase map parsed from the SFDP Sector Map Parameter
- * Table.
- * @quad_enable: enables SPI NOR quad mode.
- * @set_4byte: puts the SPI NOR in 4 byte addressing mode.
- * @convert_addr: converts an absolute address into something the flash
- * will understand. Particularly useful when pagesize is
- * not a power-of-2.
- * @setup: configures the SPI NOR memory. Useful for SPI NOR
- * flashes that have peculiarities to the SPI NOR standard
- * e.g. different opcodes, specific address calculation,
- * page size, etc.
- * @locking_ops: SPI NOR locking methods.
- */
-struct spi_nor_flash_parameter {
- u64 size;
- u32 page_size;
-
- struct spi_nor_hwcaps hwcaps;
- struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
- struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
-
- struct spi_nor_erase_map erase_map;
-
- int (*quad_enable)(struct spi_nor *nor);
- int (*set_4byte)(struct spi_nor *nor, bool enable);
- u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
- int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
-
- const struct spi_nor_locking_ops *locking_ops;
+enum spi_nor_cmd_ext {
+ SPI_NOR_EXT_NONE = 0,
+ SPI_NOR_EXT_REPEAT,
+ SPI_NOR_EXT_INVERT,
+ SPI_NOR_EXT_HEX,
};
-/**
- * struct flash_info - Forward declaration of a structure used internally by
- * spi_nor_scan()
+/*
+ * Forward declarations that are used internally by the core and manufacturer
+ * drivers.
*/
struct flash_info;
+struct spi_nor_manufacturer;
+struct spi_nor_flash_parameter;
/**
- * struct spi_nor - Structure for defining a the SPI NOR layer
- * @mtd: point to a mtd_info structure
+ * struct spi_nor - Structure for defining the SPI NOR layer
+ * @mtd: an mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
- * @dev: point to a spi device, or a spi nor controller device.
- * @spimem: point to the spi mem device
+ * @dev: pointer to an SPI device or an SPI NOR controller device
+ * @spimem: pointer to the SPI memory device
* @bouncebuf: bounce buffer used when the buffer passed by the MTD
* layer is not DMA-able
* @bouncebuf_size: size of the bounce buffer
- * @info: spi-nor part JDEC MFR id and other info
- * @page_size: the page size of the SPI NOR
- * @addr_width: number of address bytes
+ * @info: SPI NOR part JEDEC MFR ID and other info
+ * @manufacturer: SPI NOR manufacturer
+ * @addr_nbytes: number of address bytes
* @erase_opcode: the opcode for erasing a sector
* @read_opcode: the read opcode
* @read_dummy: the dummy needed by the read operation
* @program_opcode: the program opcode
* @sst_write_second: used by the SST write operation
- * @flags: flag options for the current SPI-NOR (SNOR_F_*)
+ * @flags: flag options for the current SPI NOR (SNOR_F_*)
+ * @cmd_ext_type: the command opcode extension type for DTR mode.
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
- * @reg_proto the SPI protocol for read_reg/write_reg/erase operations
+ * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations
+ * @sfdp: the SFDP data of the flash
+ * @debugfs_root: pointer to the debugfs directory
* @controller_ops: SPI NOR controller driver specific operations.
- * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings.
+ * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
* The structure includes legacy flash parameters and
* settings that can be overwritten by the spi_nor_fixups
* hooks, or dynamically when parsing the SFDP tables.
- * @priv: the private data
+ * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes.
+ * @priv: pointer to the private data
*/
struct spi_nor {
struct mtd_info mtd;
@@ -588,8 +380,8 @@ struct spi_nor {
u8 *bouncebuf;
size_t bouncebuf_size;
const struct flash_info *info;
- u32 page_size;
- u8 addr_width;
+ const struct spi_nor_manufacturer *manufacturer;
+ u8 addr_nbytes;
u8 erase_opcode;
u8 read_opcode;
u8 read_dummy;
@@ -599,43 +391,22 @@ struct spi_nor {
enum spi_nor_protocol reg_proto;
bool sst_write_second;
u32 flags;
+ enum spi_nor_cmd_ext cmd_ext_type;
+ struct sfdp *sfdp;
+ struct dentry *debugfs_root;
const struct spi_nor_controller_ops *controller_ops;
- struct spi_nor_flash_parameter params;
+ struct spi_nor_flash_parameter *params;
+
+ struct {
+ struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc;
+ } dirmap;
void *priv;
};
-static u64 __maybe_unused
-spi_nor_region_is_last(const struct spi_nor_erase_region *region)
-{
- return region->offset & SNOR_LAST_REGION;
-}
-
-static u64 __maybe_unused
-spi_nor_region_end(const struct spi_nor_erase_region *region)
-{
- return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
-}
-
-static void __maybe_unused
-spi_nor_region_mark_end(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_LAST_REGION;
-}
-
-static void __maybe_unused
-spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_OVERLAID_REGION;
-}
-
-static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor)
-{
- return !!nor->params.erase_map.uniform_erase_type;
-}
-
static inline void spi_nor_set_flash_node(struct spi_nor *nor,
struct device_node *np)
{
@@ -653,7 +424,7 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
* @name: the chip type name
* @hwcaps: the hardware capabilities supported by the controller driver
*
- * The drivers can use this fuction to scan the SPI NOR.
+ * The drivers can use this function to scan the SPI NOR.
* In the scanning, it will try to get all the necessary information to
* fill the mtd_info{} and the spi_nor{}.
*
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 4ea558bd3c46..6d3392a7edc6 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -32,9 +32,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_READID_OP(ndummy, buf, len) \
+#define SPINAND_READID_OP(naddr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
- SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_ADDR(naddr, 0, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
@@ -170,43 +170,74 @@ struct spinand_op;
struct spinand_device;
#define SPINAND_MAX_ID_LEN 4
+/*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+ * tPROG 300us to 400us
+ * tREAD 25us to 100us
+ * In order to minimize latency, the min value is divided by 4 for the
+ * initial delay, and dividing by 20 for the poll delay.
+ * For reset, 5us/10us/500us if the device is respectively
+ * reading/programming/erasing when the RESET occurs. Since we always
+ * issue a RESET when the device is IDLE, 5us is selected for both initial
+ * and poll delay.
+ */
+#define SPINAND_READ_INITIAL_DELAY_US 6
+#define SPINAND_READ_POLL_DELAY_US 5
+#define SPINAND_RESET_INITIAL_DELAY_US 5
+#define SPINAND_RESET_POLL_DELAY_US 5
+#define SPINAND_WRITE_INITIAL_DELAY_US 75
+#define SPINAND_WRITE_POLL_DELAY_US 15
+#define SPINAND_ERASE_INITIAL_DELAY_US 250
+#define SPINAND_ERASE_POLL_DELAY_US 50
+
+#define SPINAND_WAITRDY_TIMEOUT_MS 400
/**
* struct spinand_id - SPI NAND id structure
* @data: buffer containing the id bytes. Currently 4 bytes large, but can
* be extended if required
* @len: ID length
- *
- * struct_spinand_id->data contains all bytes returned after a READ_ID command,
- * including dummy bytes if the chip does not emit ID bytes right after the
- * READ_ID command. The responsibility to extract real ID bytes is left to
- * struct_manufacurer_ops->detect().
*/
struct spinand_id {
u8 data[SPINAND_MAX_ID_LEN];
int len;
};
+enum spinand_readid_method {
+ SPINAND_READID_METHOD_OPCODE,
+ SPINAND_READID_METHOD_OPCODE_ADDR,
+ SPINAND_READID_METHOD_OPCODE_DUMMY,
+};
+
+/**
+ * struct spinand_devid - SPI NAND device id structure
+ * @id: device id of current chip
+ * @len: number of bytes in device id
+ * @method: method to read chip id
+ * There are 3 possible variants:
+ * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately
+ * after read_id opcode.
+ * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after
+ * read_id opcode + 1-byte address.
+ * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after
+ * read_id opcode + 1 dummy byte.
+ */
+struct spinand_devid {
+ const u8 *id;
+ const u8 len;
+ const enum spinand_readid_method method;
+};
+
/**
* struct manufacurer_ops - SPI NAND manufacturer specific operations
- * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
- * the core calls the struct_manufacurer_ops->detect() hook of each
- * registered manufacturer until one of them return 1. Note that
- * the first thing to check in this hook is that the manufacturer ID
- * in struct_spinand_device->id matches the manufacturer whose
- * ->detect() hook has been called. Should return 1 if there's a
- * match, 0 if the manufacturer ID does not match and a negative
- * error code otherwise. When true is returned, the core assumes
- * that properties of the NAND chip (spinand->base.memorg and
- * spinand->base.eccreq) have been filled
* @init: initialize a SPI NAND device
* @cleanup: cleanup a SPI NAND device
*
* Each SPI NAND manufacturer driver should implement this interface so that
- * NAND chips coming from this vendor can be detected and initialized properly.
+ * NAND chips coming from this vendor can be initialized properly.
*/
struct spinand_manufacturer_ops {
- int (*detect)(struct spinand_device *spinand);
int (*init)(struct spinand_device *spinand);
void (*cleanup)(struct spinand_device *spinand);
};
@@ -215,21 +246,28 @@ struct spinand_manufacturer_ops {
* struct spinand_manufacturer - SPI NAND manufacturer instance
* @id: manufacturer ID
* @name: manufacturer name
+ * @devid_len: number of bytes in device ID
+ * @chips: supported SPI NANDs under current manufacturer
+ * @nchips: number of SPI NANDs available in chips array
* @ops: manufacturer operations
*/
struct spinand_manufacturer {
u8 id;
char *name;
+ const struct spinand_info *chips;
+ const size_t nchips;
const struct spinand_manufacturer_ops *ops;
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer ato_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
extern const struct spinand_manufacturer paragon_spinand_manufacturer;
extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+extern const struct spinand_manufacturer xtx_spinand_manufacturer;
/**
* struct spinand_op_variants - SPI NAND operation variants
@@ -270,6 +308,16 @@ struct spinand_ecc_info {
};
#define SPINAND_HAS_QE_BIT BIT(0)
+#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+
+/**
+ * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
+ * @status: status of the last wait operation that will be used in case
+ * ->get_status() is not populated by the spinand device.
+ */
+struct spinand_ondie_ecc_conf {
+ u8 status;
+};
/**
* struct spinand_info - Structure used to describe SPI NAND chips
@@ -291,10 +339,10 @@ struct spinand_ecc_info {
*/
struct spinand_info {
const char *model;
- u16 devid;
+ struct spinand_devid devid;
u32 flags;
struct nand_memory_organization memorg;
- struct nand_ecc_req eccreq;
+ struct nand_ecc_props eccreq;
struct spinand_ecc_info eccinfo;
struct {
const struct spinand_op_variants *read_cache;
@@ -305,6 +353,13 @@ struct spinand_info {
unsigned int target);
};
+#define SPINAND_ID(__method, ...) \
+ { \
+ .id = (const u8[]){ __VA_ARGS__ }, \
+ .len = sizeof((u8[]){ __VA_ARGS__ }), \
+ .method = __method, \
+ }
+
#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
{ \
.read_cache = __read, \
@@ -336,6 +391,8 @@ struct spinand_info {
struct spinand_dirmap {
struct spi_mem_dirmap_desc *wdesc;
struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc_ecc;
+ struct spi_mem_dirmap_desc *rdesc_ecc;
};
/**
@@ -451,9 +508,10 @@ static inline void spinand_set_of_node(struct spinand_device *spinand,
nanddev_set_of_node(&spinand->base, np);
}
-int spinand_match_and_init(struct spinand_device *dev,
+int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *table,
- unsigned int table_size, u16 devid);
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h
index a4e352b1dfe6..3cac9360588f 100644
--- a/include/linux/mtd/xip.h
+++ b/include/linux/mtd/xip.h
@@ -28,7 +28,7 @@
* those functions so they get relocated to ram.
*/
#ifdef CONFIG_XIP_KERNEL
-#define __xipram noinline __attribute__ ((__section__ (".xiptext")))
+#define __xipram noinline __section(".xiptext")
#endif
/*
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index aca8f36dfac9..8f226d460f51 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -20,7 +20,17 @@
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
-struct ww_acquire_ctx;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
/*
* Simple, straightforward mutexes with strict semantics:
@@ -52,7 +62,7 @@ struct ww_acquire_ctx;
*/
struct mutex {
atomic_long_t owner;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
@@ -65,19 +75,6 @@ struct mutex {
#endif
};
-/*
- * This is the control structure for tasks blocked on mutex,
- * which resides on the blocked task's kernel stack:
- */
-struct mutex_waiter {
- struct list_head list;
- struct task_struct *task;
- struct ww_acquire_ctx *ww_ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- void *magic;
-#endif
-};
-
#ifdef CONFIG_DEBUG_MUTEXES
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
@@ -108,16 +105,9 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { .name = #lockname }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
- , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+ , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
__DEBUG_MUTEX_INITIALIZER(lockname) \
__DEP_MAP_MUTEX_INITIALIZER(lockname) }
@@ -136,6 +126,50 @@ extern void __mutex_init(struct mutex *lock, const char *name,
*/
extern bool mutex_is_locked(struct mutex *lock);
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+#include <linux/rtmutex.h>
+
+struct mutex {
+ struct rt_mutex_base rtmutex;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#define __MUTEX_INITIALIZER(mutexname) \
+{ \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \
+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
+}
+
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void __mutex_rt_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
+extern int mutex_trylock(struct mutex *lock);
+
+static inline void mutex_destroy(struct mutex *lock) { }
+
+#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
+
+#define __mutex_init(mutex, name, key) \
+do { \
+ rt_mutex_base_init(&(mutex)->rtmutex); \
+ __mutex_rt_init((mutex), name, key); \
+} while (0)
+
+#define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
+#endif /* CONFIG_PREEMPT_RT */
+
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/locking/mutex-design.rst.
@@ -171,7 +205,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
/*
@@ -185,29 +219,4 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-/*
- * These values are chosen such that FAIL and SUCCESS match the
- * values of the regular mutex_trylock().
- */
-enum mutex_trylock_recursive_enum {
- MUTEX_TRYLOCK_FAILED = 0,
- MUTEX_TRYLOCK_SUCCESS = 1,
- MUTEX_TRYLOCK_RECURSIVE,
-};
-
-/**
- * mutex_trylock_recursive - trylock variant that allows recursive locking
- * @lock: mutex to be locked
- *
- * This function should not be used, _ever_. It is purely for hysterical GEM
- * raisins, and once those are gone this will be removed.
- *
- * Returns:
- * - MUTEX_TRYLOCK_FAILED - trylock failed,
- * - MUTEX_TRYLOCK_SUCCESS - lock acquired,
- * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
- */
-extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
-mutex_trylock_recursive(struct mutex *lock);
-
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/mutex_api.h b/include/linux/mutex_api.h
new file mode 100644
index 000000000000..85ab9491e13e
--- /dev/null
+++ b/include/linux/mutex_api.h
@@ -0,0 +1 @@
+#include <linux/mutex.h>
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 5fc6bb2fefad..2e25c838f831 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -14,18 +14,51 @@
struct device;
struct mux_control;
+struct mux_state;
unsigned int mux_control_states(struct mux_control *mux);
-int __must_check mux_control_select(struct mux_control *mux,
- unsigned int state);
-int __must_check mux_control_try_select(struct mux_control *mux,
- unsigned int state);
+int __must_check mux_control_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+int __must_check mux_state_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
+int __must_check mux_control_try_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+int __must_check mux_state_try_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
+
+static inline int __must_check mux_control_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_select_delay(mux, state, 0);
+}
+
+static inline int __must_check mux_state_select(struct mux_state *mstate)
+{
+ return mux_state_select_delay(mstate, 0);
+}
+
+static inline int __must_check mux_control_try_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_try_select_delay(mux, state, 0);
+}
+
+static inline int __must_check mux_state_try_select(struct mux_state *mstate)
+{
+ return mux_state_try_select_delay(mstate, 0);
+}
+
int mux_control_deselect(struct mux_control *mux);
+int mux_state_deselect(struct mux_state *mstate);
struct mux_control *mux_control_get(struct device *dev, const char *mux_name);
void mux_control_put(struct mux_control *mux);
struct mux_control *devm_mux_control_get(struct device *dev,
const char *mux_name);
+struct mux_state *devm_mux_state_get(struct device *dev,
+ const char *mux_name);
#endif /* _LINUX_MUX_CONSUMER_H */
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
index 627a2c6bc02d..18824064f8c0 100644
--- a/include/linux/mux/driver.h
+++ b/include/linux/mux/driver.h
@@ -12,6 +12,7 @@
#include <dt-bindings/mux/mux.h>
#include <linux/device.h>
+#include <linux/ktime.h>
#include <linux/semaphore.h>
struct mux_chip;
@@ -33,6 +34,7 @@ struct mux_control_ops {
* @states: The number of mux controller states.
* @idle_state: The mux controller state to use when inactive, or one
* of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT.
+ * @last_change: Timestamp of last change
*
* Mux drivers may only change @states and @idle_state, and may only do so
* between allocation and registration of the mux controller. Specifically,
@@ -47,6 +49,8 @@ struct mux_control {
unsigned int states;
int idle_state;
+
+ ktime_t last_change;
};
/**
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index 47e5679b48e1..000b126acfb6 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -918,12 +918,4 @@
extern void mv64340_irq_init(unsigned int base);
-/* Watchdog Platform Device, Driver Data */
-#define MV64x60_WDT_NAME "mv64x60_wdt"
-
-struct mv64x60_wdt_pdata {
- int timeout; /* watchdog expiry in seconds, default 10 */
- int bus_clk; /* bus clock in MHz, default 133 */
-};
-
#endif /* __ASM_MV643XX_H */
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h
deleted file mode 100644
index 90a803aa42e8..000000000000
--- a/include/linux/n_r3964.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/* r3964 linediscipline for linux
- *
- * -----------------------------------------------------------
- * Copyright by
- * Philips Automation Projects
- * Kassel (Germany)
- * -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
- * Author:
- * L. Haag
- *
- * $Log: r3964.h,v $
- * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de>
- * Fixed HZ usage on 2.6 kernels
- * Removed unnecessary include
- *
- * Revision 1.3 2001/03/18 13:02:24 dwmw2
- * Fix timer usage, use spinlocks properly.
- *
- * Revision 1.2 2001/03/18 12:53:15 dwmw2
- * Merge changes in 2.4.2
- *
- * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2
- * This'll screw the version control
- *
- * Revision 1.6 1998/09/30 00:40:38 dwmw2
- * Updated to use kernel's N_R3964 if available
- *
- * Revision 1.4 1998/04/02 20:29:44 lhaag
- * select, blocking, ...
- *
- * Revision 1.3 1998/02/12 18:58:43 root
- * fixed some memory leaks
- * calculation of checksum characters
- *
- * Revision 1.2 1998/02/07 13:03:17 root
- * ioctl read_telegram
- *
- * Revision 1.1 1998/02/06 19:19:43 root
- * Initial revision
- *
- *
- */
-#ifndef __LINUX_N_R3964_H__
-#define __LINUX_N_R3964_H__
-
-
-#include <linux/param.h>
-#include <uapi/linux/n_r3964.h>
-
-/*
- * Common ascii handshake characters:
- */
-
-#define STX 0x02
-#define ETX 0x03
-#define DLE 0x10
-#define NAK 0x15
-
-/*
- * Timeouts (from milliseconds to jiffies)
- */
-
-#define R3964_TO_QVZ ((550)*HZ/1000)
-#define R3964_TO_ZVZ ((220)*HZ/1000)
-#define R3964_TO_NO_BUF ((400)*HZ/1000)
-#define R3964_NO_TX_ROOM ((100)*HZ/1000)
-#define R3964_TO_RX_PANIC ((4000)*HZ/1000)
-#define R3964_MAX_RETRIES 5
-
-
-enum { R3964_IDLE,
- R3964_TX_REQUEST, R3964_TRANSMITTING,
- R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK,
- R3964_WAIT_FOR_RX_BUF,
- R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT
- };
-
-/*
- * All open file-handles are 'clients' and are stored in a linked list:
- */
-
-struct r3964_message;
-
-struct r3964_client_info {
- spinlock_t lock;
- struct pid *pid;
- unsigned int sig_flags;
-
- struct r3964_client_info *next;
-
- struct r3964_message *first_msg;
- struct r3964_message *last_msg;
- struct r3964_block_header *next_block_to_read;
- int msg_count;
-};
-
-
-
-struct r3964_block_header;
-
-/* internal version of client_message: */
-struct r3964_message {
- int msg_id;
- int arg;
- int error_code;
- struct r3964_block_header *block;
- struct r3964_message *next;
-};
-
-/*
- * Header of received block in rx_buf/tx_buf:
- */
-
-struct r3964_block_header
-{
- unsigned int length; /* length in chars without header */
- unsigned char *data; /* usually data is located
- immediately behind this struct */
- unsigned int locks; /* only used in rx_buffer */
-
- struct r3964_block_header *next;
- struct r3964_client_info *owner; /* =NULL in rx_buffer */
-};
-
-/*
- * If rx_buf hasn't enough space to store R3964_MTU chars,
- * we will reject all incoming STX-requests by sending NAK.
- */
-
-#define RX_BUF_SIZE 4000
-#define TX_BUF_SIZE 4000
-#define R3964_MAX_BLOCKS_IN_RX_QUEUE 100
-
-#define R3964_PARITY 0x0001
-#define R3964_FRAME 0x0002
-#define R3964_OVERRUN 0x0004
-#define R3964_UNKNOWN 0x0008
-#define R3964_BREAK 0x0010
-#define R3964_CHECKSUM 0x0020
-#define R3964_ERROR 0x003f
-#define R3964_BCC 0x4000
-#define R3964_DEBUG 0x8000
-
-
-struct r3964_info {
- spinlock_t lock;
- struct tty_struct *tty;
- unsigned char priority;
- unsigned char *rx_buf; /* ring buffer */
- unsigned char *tx_buf;
-
- struct r3964_block_header *rx_first;
- struct r3964_block_header *rx_last;
- struct r3964_block_header *tx_first;
- struct r3964_block_header *tx_last;
- unsigned int tx_position;
- unsigned int rx_position;
- unsigned char last_rx;
- unsigned char bcc;
- unsigned int blocks_in_rx_queue;
-
- struct mutex read_lock; /* serialize r3964_read */
-
- struct r3964_client_info *firstClient;
- unsigned int state;
- unsigned int flags;
-
- struct timer_list tmr;
- int nRetry;
-};
-
-#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 0dd980d7318f..00fee52df842 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -15,7 +15,7 @@ enum { MAX_NESTED_LINKS = 8 };
/*
* Type of the last component on LOOKUP_PARENT
*/
-enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
+enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
/* pathwalk mode */
#define LOOKUP_FOLLOW 0x0001 /* follow links at the end */
@@ -23,6 +23,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */
#define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */
#define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */
+#define LOOKUP_MOUNTPOINT 0x0080 /* follow mounts in the end */
#define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */
#define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */
@@ -35,9 +36,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
/* internal use only */
#define LOOKUP_PARENT 0x0010
-#define LOOKUP_JUMPED 0x1000
-#define LOOKUP_ROOT 0x2000
-#define LOOKUP_ROOT_GRABBED 0x0008
/* Scoping flags for lookup. */
#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */
@@ -45,6 +43,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
+#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
@@ -64,12 +63,18 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne
extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
-extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
+struct dentry *lookup_one(struct user_namespace *, const char *, struct dentry *, int);
+struct dentry *lookup_one_unlocked(struct user_namespace *mnt_userns,
+ const char *name, struct dentry *base,
+ int len);
+struct dentry *lookup_one_positive_unlocked(struct user_namespace *mnt_userns,
+ const char *name,
+ struct dentry *base, int len);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *);
@@ -78,7 +83,7 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern int __must_check nd_jump_link(struct path *path);
+extern int __must_check nd_jump_link(const struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 55c735997805..b9771ba1ef87 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -8,9 +8,11 @@
#include <linux/ndctl.h>
#include <linux/device.h>
#include <linux/badblocks.h>
+#include <linux/perf_event.h>
enum nvdimm_event {
NVDIMM_REVALIDATE_POISON,
+ NVDIMM_REVALIDATE_REGION,
};
enum nvdimm_claim_class {
@@ -22,11 +24,62 @@ enum nvdimm_claim_class {
NVDIMM_CCLASS_UNKNOWN,
};
+#define NVDIMM_EVENT_VAR(_id) event_attr_##_id
+#define NVDIMM_EVENT_PTR(_id) (&event_attr_##_id.attr.attr)
+
+#define NVDIMM_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id, \
+ nvdimm_events_sysfs_show)
+
+/* Event attribute array index */
+#define NVDIMM_PMU_FORMAT_ATTR 0
+#define NVDIMM_PMU_EVENT_ATTR 1
+#define NVDIMM_PMU_CPUMASK_ATTR 2
+#define NVDIMM_PMU_NULL_ATTR 3
+
+/**
+ * struct nvdimm_pmu - data structure for nvdimm perf driver
+ * @pmu: pmu data structure for nvdimm performance stats.
+ * @dev: nvdimm device pointer.
+ * @cpu: designated cpu for counter access.
+ * @node: node for cpu hotplug notifier link.
+ * @cpuhp_state: state for cpu hotplug notification.
+ * @arch_cpumask: cpumask to get designated cpu for counter access.
+ */
+struct nvdimm_pmu {
+ struct pmu pmu;
+ struct device *dev;
+ int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
+ /* cpumask provided by arch/platform specific code */
+ struct cpumask arch_cpumask;
+};
+
+struct platform_device;
+
+#ifdef CONFIG_PERF_EVENTS
+extern ssize_t nvdimm_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page);
+
+int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
+void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu);
+
+#else
+static inline int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev)
+{
+ return -ENXIO;
+}
+
+static inline void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) { }
+#endif
+
struct nd_device_driver {
struct device_driver drv;
unsigned long type;
int (*probe)(struct device *dev);
- int (*remove)(struct device *dev);
+ void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
void (*notify)(struct device *dev, enum nvdimm_event event);
};
@@ -87,31 +140,10 @@ struct nd_namespace_pmem {
struct nd_namespace_io nsio;
unsigned long lbasize;
char *alt_name;
- u8 *uuid;
+ uuid_t *uuid;
int id;
};
-/**
- * struct nd_namespace_blk - namespace for dimm-bounded persistent memory
- * @alt_name: namespace name supplied in the dimm label
- * @uuid: namespace name supplied in the dimm label
- * @id: ida allocated id
- * @lbasize: blk namespaces have a native sector size when btt not present
- * @size: sum of all the resource ranges allocated to this namespace
- * @num_resources: number of dpa extents to claim
- * @res: discontiguous dpa extents for given dimm
- */
-struct nd_namespace_blk {
- struct nd_namespace_common common;
- char *alt_name;
- u8 *uuid;
- int id;
- unsigned long lbasize;
- resource_size_t size;
- int num_resources;
- struct resource **res;
-};
-
static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
{
return container_of(dev, struct nd_namespace_io, common.dev);
@@ -124,11 +156,6 @@ static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device
return container_of(nsio, struct nd_namespace_pmem, nsio);
}
-static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev)
-{
- return container_of(dev, struct nd_namespace_blk, common.dev);
-}
-
/**
* nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
* @ndns: device to read
diff --git a/include/linux/net.h b/include/linux/net.h
index 6451425e828f..18d942bbdf6e 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -21,6 +21,8 @@
#include <linux/rcupdate.h>
#include <linux/once.h>
#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sockptr.h>
#include <uapi/linux/net.h>
@@ -39,6 +41,7 @@ struct net;
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
+#define SOCK_SUPPORT_ZC 5
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -123,6 +126,25 @@ struct socket {
struct socket_wq wq;
};
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+ size_t written;
+ size_t count;
+ union {
+ char __user *buf;
+ void *data;
+ } arg;
+ int error;
+} read_descriptor_t;
+
struct vm_area_struct;
struct page;
struct sockaddr;
@@ -131,6 +153,8 @@ struct module;
struct sk_buff;
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
unsigned int, size_t);
+typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *);
+
struct proto_ops {
int family;
@@ -162,15 +186,10 @@ struct proto_ops {
int (*listen) (struct socket *sock, int len);
int (*shutdown) (struct socket *sock, int flags);
int (*setsockopt)(struct socket *sock, int level,
- int optname, char __user *optval, unsigned int optlen);
+ int optname, sockptr_t optval,
+ unsigned int optlen);
int (*getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
-#ifdef CONFIG_COMPAT
- int (*compat_setsockopt)(struct socket *sock, int level,
- int optname, char __user *optval, unsigned int optlen);
- int (*compat_getsockopt)(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen);
-#endif
void (*show_fdinfo)(struct seq_file *m, struct socket *sock);
int (*sendmsg) (struct socket *sock, struct msghdr *m,
size_t total_len);
@@ -198,6 +217,8 @@ struct proto_ops {
*/
int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
+ /* This is different from read_sock(), it reads an entire skb at a time. */
+ int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor);
int (*sendpage_locked)(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
@@ -240,7 +261,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
-struct socket *sock_from_file(struct file *file, int *err);
+struct socket *sock_from_file(struct file *file);
#define sockfd_put(sock) fput(sock->file)
int net_ratelimit(void);
@@ -264,7 +285,8 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define net_dbg_ratelimited(fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
@@ -286,8 +308,21 @@ do { \
#define net_get_random_once(buf, nbytes) \
get_random_once((buf), (nbytes))
-#define net_get_random_once_wait(buf, nbytes) \
- get_random_once_wait((buf), (nbytes))
+
+/*
+ * E.g. XFS meta- & log-data is in slab pages, or bcache meta
+ * data pages, or other high order pages allocated by
+ * __get_free_pages() without __GFP_COMP, which have a page_count
+ * of 0 and/or have PageSlab() set. We cannot use send_page for
+ * those, as that does get_page(); put_page(); and would cause
+ * either a VM_BUG directly, or __page_cache_release a page that
+ * would actually still be referenced by someone, leading to some
+ * obscure delayed Oops somewhere else.
+ */
+static inline bool sendpage_ok(struct page *page)
+{
+ return !PageSlab(page) && page_count(page) >= 1;
+}
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
@@ -303,10 +338,6 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags);
int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
-int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
- int *optlen);
-int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
- unsigned int optlen);
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/include/linux/net/intel/i40e_client.h
index 72994baf4941..ed42bd5f639f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/include/linux/net/intel/i40e_client.h
@@ -4,6 +4,8 @@
#ifndef _I40E_CLIENT_H_
#define _I40E_CLIENT_H_
+#include <linux/auxiliary_bus.h>
+
#define I40E_CLIENT_STR_LENGTH 10
/* Client interface version should be updated anytime there is a change in the
@@ -24,11 +26,6 @@ struct i40e_client_version {
u8 rsvd;
};
-enum i40e_client_state {
- __I40E_CLIENT_NULL,
- __I40E_CLIENT_REGISTERED
-};
-
enum i40e_client_instance_state {
__I40E_CLIENT_INSTANCE_NONE,
__I40E_CLIENT_INSTANCE_OPENED,
@@ -37,11 +34,6 @@ enum i40e_client_instance_state {
struct i40e_ops;
struct i40e_client;
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
- */
-#define I40E_QUEUE_TYPE_PE_AEQ 0x80
#define I40E_QUEUE_INVALID_IDX 0xFFFF
struct i40e_qv_info {
@@ -53,10 +45,9 @@ struct i40e_qv_info {
struct i40e_qvlist_info {
u32 num_vectors;
- struct i40e_qv_info qv_info[1];
+ struct i40e_qv_info qv_info[];
};
-#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
/* set of LAN parameters useful for clients managed by LAN */
@@ -84,10 +75,10 @@ struct i40e_info {
u8 lanmac[6];
struct net_device *netdev;
struct pci_dev *pcidev;
+ struct auxiliary_device *aux_dev;
u8 __iomem *hw_addr;
u8 fid; /* function id, PF id or VF id */
#define I40E_CLIENT_FTYPE_PF 0
-#define I40E_CLIENT_FTYPE_VF 1
u8 ftype; /* function type, PF or VF */
void *pf;
@@ -107,6 +98,11 @@ struct i40e_info {
u32 fw_build; /* firmware build number */
};
+struct i40e_auxiliary_device {
+ struct auxiliary_device aux_dev;
+ struct i40e_info *ldev;
+};
+
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
@@ -184,20 +180,12 @@ struct i40e_client {
unsigned long state; /* client state */
atomic_t ref_cnt; /* Count of all the client devices of this kind */
u32 flags;
-#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
-#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
u8 type;
#define I40E_CLIENT_IWARP 0
const struct i40e_client_ops *ops; /* client ops provided by the client */
};
-static inline bool i40e_client_is_registered(struct i40e_client *client)
-{
- return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
-}
-
-/* used by clients */
-int i40e_register_client(struct i40e_client *client);
-int i40e_unregister_client(struct i40e_client *client);
+void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client);
+void i40e_client_device_unregister(struct i40e_info *ldev);
#endif /* _I40E_CLIENT_H_ */
diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h
new file mode 100644
index 000000000000..1c1332e4df26
--- /dev/null
+++ b/include/linux/net/intel/iidc.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _IIDC_H_
+#define _IIDC_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/dcbnl.h>
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+enum iidc_event_type {
+ IIDC_EVENT_BEFORE_MTU_CHANGE,
+ IIDC_EVENT_AFTER_MTU_CHANGE,
+ IIDC_EVENT_BEFORE_TC_CHANGE,
+ IIDC_EVENT_AFTER_TC_CHANGE,
+ IIDC_EVENT_CRIT_ERR,
+ IIDC_EVENT_NBITS /* must be last */
+};
+
+enum iidc_reset_type {
+ IIDC_PFR,
+ IIDC_CORER,
+ IIDC_GLOBR,
+};
+
+enum iidc_rdma_protocol {
+ IIDC_RDMA_PROTOCOL_IWARP = BIT(0),
+ IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1),
+};
+
+#define IIDC_MAX_USER_PRIORITY 8
+#define IIDC_MAX_DSCP_MAPPING 64
+#define IIDC_DSCP_PFC_MODE 0x1
+
+/* Struct to hold per RDMA Qset info */
+struct iidc_rdma_qset_params {
+ /* Qset TEID returned to the RDMA driver in
+ * ice_add_rdma_qset and used by RDMA driver
+ * for calls to ice_del_rdma_qset
+ */
+ u32 teid; /* Qset TEID */
+ u16 qs_handle; /* RDMA driver provides this */
+ u16 vport_id; /* VSI index */
+ u8 tc; /* TC branch the Qset should belong to */
+};
+
+struct iidc_qos_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+/* Struct to pass QoS info */
+struct iidc_qos_params {
+ struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
+ u8 up2tc[IIDC_MAX_USER_PRIORITY];
+ u8 vport_relative_bw;
+ u8 vport_priority_type;
+ u8 num_tc;
+ u8 pfc_mode;
+ u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+};
+
+struct iidc_event {
+ DECLARE_BITMAP(type, IIDC_EVENT_NBITS);
+ u32 reg;
+};
+
+struct ice_pf;
+
+int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
+int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
+int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type);
+int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable);
+void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos);
+
+/* Structure representing auxiliary driver tailored information about the core
+ * PCI dev, each auxiliary driver using the IIDC interface will have an
+ * instance of this struct dedicated to it.
+ */
+
+struct iidc_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct ice_pf *pf;
+};
+
+/* structure representing the auxiliary driver. This struct is to be
+ * allocated and populated by the auxiliary driver's owner. The core PCI
+ * driver will access these ops by performing a container_of on the
+ * auxiliary_device->dev.driver.
+ */
+struct iidc_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ /* This event_handler is meant to be a blocking call. For instance,
+ * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not
+ * return until the auxiliary driver is ready for the MTU change to
+ * happen.
+ */
+ void (*event_handler)(struct ice_pf *pf, struct iidc_event *event);
+};
+
+#endif /* _IIDC_H_*/
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 34d050bb1ae6..7c2d77d75a88 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -83,11 +83,19 @@ enum {
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
+ NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */
+ NETIF_F_GRO_UDP_FWD_BIT, /* Allow UDP GRO for forwarding */
+
+ NETIF_F_HW_HSR_TAG_INS_BIT, /* Offload HSR tag insertion */
+ NETIF_F_HW_HSR_TAG_RM_BIT, /* Offload HSR tag removal */
+ NETIF_F_HW_HSR_FWD_BIT, /* Offload HSR forwarding */
+ NETIF_F_HW_HSR_DUP_BIT, /* Offload HSR duplication */
+
/*
* Add your fresh new feature above and remember to update
- * netdev_features_strings[] in net/core/ethtool.c and maybe
+ * netdev_features_strings[] in net/ethtool/common.c and maybe
* some feature mask #defines below. Please also describe it
- * in Documentation/networking/netdev-features.txt.
+ * in Documentation/networking/netdev-features.rst.
*/
/**/NETDEV_FEATURE_COUNT
@@ -154,8 +162,14 @@ enum {
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
+#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC)
+#define NETIF_F_GRO_UDP_FWD __NETIF_F(GRO_UDP_FWD)
+#define NETIF_F_HW_HSR_TAG_INS __NETIF_F(HW_HSR_TAG_INS)
+#define NETIF_F_HW_HSR_TAG_RM __NETIF_F(HW_HSR_TAG_RM)
+#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD)
+#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP)
-/* Finds the next feature with the highest number of the range of start till 0.
+/* Finds the next feature with the highest number of the range of start-1 till 0.
*/
static inline int find_next_netdev_feature(u64 feature, unsigned long start)
{
@@ -174,7 +188,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
for ((bit) = find_next_netdev_feature((mask_addr), \
NETDEV_FEATURE_COUNT); \
(bit) >= 0; \
- (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
+ (bit) = find_next_netdev_feature((mask_addr), (bit)))
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
@@ -190,7 +204,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
__NETIF_F_BIT(NETIF_F_GSO_SHIFT))
-/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
+/* List of IP checksum features. Note that NETIF_F_HW_CSUM should not be
* set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
* this would be contradictory
*/
@@ -204,8 +218,8 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
NETIF_F_FSO)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \
- NETIF_F_GSO_SCTP)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \
+ NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST)
/*
* If one device supports one of these features, then enable them
@@ -231,7 +245,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
/* Changeable features with no special hardware requirements that defaults to off. */
-#define NETIF_F_SOFT_FEATURES_OFF NETIF_F_GRO_FRAGLIST
+#define NETIF_F_SOFT_FEATURES_OFF (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6c3f7032e8d9..eddf8ee270e7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -28,13 +28,13 @@
#include <linux/prefetch.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
+#include <asm/local.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
#include <linux/workqueue.h>
#include <linux/dynamic_queue_limits.h>
-#include <linux/ethtool.h>
#include <net/net_namespace.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
@@ -48,12 +48,20 @@
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
#include <linux/hashtable.h>
+#include <linux/rbtree.h>
+#include <net/net_trackers.h>
+#include <net/net_debug.h>
struct netpoll_info;
struct device;
+struct ethtool_ops;
struct phy_device;
struct dsa_port;
-
+struct ip_tunnel_parm;
+struct macsec_context;
+struct macsec_ops;
+struct netdev_name_node;
+struct sd_flow_limit;
struct sfp_bus;
/* 802.11 specific */
struct wireless_dev;
@@ -62,9 +70,12 @@ struct wpan_dev;
struct mpls_dev;
/* UDP Tunnel offloads */
struct udp_tunnel_info;
+struct udp_tunnel_nic_info;
+struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;
+void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -186,6 +197,15 @@ struct net_device_stats {
unsigned long tx_compressed;
};
+/* per-cpu stats, allocated on demand.
+ * Try to fit them in a single cache line, for dev_get_stats() sake.
+ */
+struct net_device_core_stats {
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+ unsigned long rx_nohandler;
+ unsigned long rx_otherhost_dropped;
+} __aligned(4 * sizeof(unsigned long));
#include <linux/cache.h>
#include <linux/skbuff.h>
@@ -202,13 +222,13 @@ struct sk_buff;
struct netdev_hw_addr {
struct list_head list;
+ struct rb_node node;
unsigned char addr[MAX_ADDR_LEN];
unsigned char type;
#define NETDEV_HW_ADDR_T_LAN 1
#define NETDEV_HW_ADDR_T_SAN 2
-#define NETDEV_HW_ADDR_T_SLAVE 3
-#define NETDEV_HW_ADDR_T_UNICAST 4
-#define NETDEV_HW_ADDR_T_MULTICAST 5
+#define NETDEV_HW_ADDR_T_UNICAST 3
+#define NETDEV_HW_ADDR_T_MULTICAST 4
bool global_use;
int sync_cnt;
int refcount;
@@ -219,6 +239,9 @@ struct netdev_hw_addr {
struct netdev_hw_addr_list {
struct list_head list;
int count;
+
+ /* Auxiliary tree for faster lookup on addition and deletion */
+ struct rb_root tree;
};
#define netdev_hw_addr_list_count(l) ((l)->count)
@@ -230,11 +253,17 @@ struct netdev_hw_addr_list {
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->uc)
+#define netdev_for_each_synced_uc_addr(_ha, _dev) \
+ netdev_for_each_uc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
#define netdev_for_each_mc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
+#define netdev_for_each_synced_mc_addr(_ha, _dev) \
+ netdev_for_each_mc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
struct hh_cache {
unsigned int hh_len;
@@ -286,21 +315,9 @@ enum netdev_state_t {
__LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
+ __LINK_STATE_TESTING,
};
-
-/*
- * This structure holds boot-time configured netdevice settings. They
- * are then used in the device probing.
- */
-struct netdev_boot_setup {
- char name[IFNAMSIZ];
- struct ifmap map;
-};
-#define NETDEV_BOOT_SETUP_MAX 8
-
-int __init netdev_boot_setup(char *str);
-
struct gro_list {
struct list_head list;
int count;
@@ -326,6 +343,7 @@ struct napi_struct {
unsigned long state;
int weight;
+ int defer_hard_irqs_count;
unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
@@ -340,26 +358,33 @@ struct napi_struct {
struct list_head dev_list;
struct hlist_node napi_hash_node;
unsigned int napi_id;
+ struct task_struct *thread;
};
enum {
- NAPI_STATE_SCHED, /* Poll is scheduled */
- NAPI_STATE_MISSED, /* reschedule a napi */
- NAPI_STATE_DISABLE, /* Disable pending */
- NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
- NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
- NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
- NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_SCHED, /* Poll is scheduled */
+ NAPI_STATE_MISSED, /* reschedule a napi */
+ NAPI_STATE_DISABLE, /* Disable pending */
+ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
+ NAPI_STATE_LISTED, /* NAPI added to system lists */
+ NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
+ NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
+ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
+ NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
};
enum {
- NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
- NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
- NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
- NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
- NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
- NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
- NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
+ NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
+ NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
+ NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
+ NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
+ NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
+ NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
+ NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
+ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
+ NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
};
enum gro_result {
@@ -367,7 +392,6 @@ enum gro_result {
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
- GRO_DROP,
GRO_CONSUMED,
};
typedef enum gro_result gro_result_t;
@@ -430,6 +454,11 @@ static inline bool napi_disable_pending(struct napi_struct *n)
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
+static inline bool napi_prefer_busy_poll(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
+}
+
bool napi_schedule_prep(struct napi_struct *n);
/**
@@ -481,19 +510,7 @@ static inline bool napi_complete(struct napi_struct *n)
return napi_complete_done(n, 0);
}
-/**
- * napi_hash_del - remove a NAPI from global table
- * @napi: NAPI context
- *
- * Warning: caller must observe RCU grace period
- * before freeing memory containing @napi, if
- * this function returns true.
- * Note: core networking stack automatically calls it
- * from netif_napi_del().
- * Drivers might want to call this helper to combine all
- * the needed RCU grace periods into a single one.
- */
-bool napi_hash_del(struct napi_struct *napi);
+int dev_set_threaded(struct net_device *dev, bool threaded);
/**
* napi_disable - prevent NAPI from scheduling
@@ -504,20 +521,7 @@ bool napi_hash_del(struct napi_struct *napi);
*/
void napi_disable(struct napi_struct *n);
-/**
- * napi_enable - enable NAPI scheduling
- * @n: NAPI context
- *
- * Resume NAPI from being scheduled on this context.
- * Must be paired with napi_disable.
- */
-static inline void napi_enable(struct napi_struct *n)
-{
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_atomic();
- clear_bit(NAPI_STATE_SCHED, &n->state);
- clear_bit(NAPI_STATE_NPSVC, &n->state);
-}
+void napi_enable(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running
@@ -548,8 +552,8 @@ static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
{
unsigned long val, new;
+ val = READ_ONCE(n->state);
do {
- val = READ_ONCE(n->state);
if (val & NAPIF_STATE_DISABLE)
return true;
@@ -557,7 +561,7 @@ static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
return false;
new = val | NAPIF_STATE_MISSED;
- } while (cmpxchg(&n->state, val, new) != val);
+ } while (!try_cmpxchg(&n->state, &val, new));
return true;
}
@@ -593,6 +597,8 @@ struct netdev_queue {
* read-mostly part
*/
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
struct Qdisc __rcu *qdisc;
struct Qdisc *qdisc_sleeping;
#ifdef CONFIG_SYSFS
@@ -606,12 +612,12 @@ struct netdev_queue {
* Number of TX timeouts for this queue
* (/sys/class/net/DEV/Q/trans_timeout)
*/
- unsigned long trans_timeout;
+ atomic_long_t trans_timeout;
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
#ifdef CONFIG_XDP_SOCKETS
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
#endif
/*
* write-mostly part
@@ -633,11 +639,30 @@ struct netdev_queue {
extern int sysctl_fb_tunnels_only_for_init_net;
extern int sysctl_devconf_inherit_init_net;
+/*
+ * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
+ * == 1 : For initns only
+ * == 2 : For none.
+ */
static inline bool net_has_fallback_tunnels(const struct net *net)
{
- return net == &init_net ||
- !IS_ENABLED(CONFIG_SYSCTL) ||
- !sysctl_fb_tunnels_only_for_init_net;
+#if IS_ENABLED(CONFIG_SYSCTL)
+ int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
+
+ return !fb_tunnels_only_for_init_net ||
+ (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
+#else
+ return true;
+#endif
+}
+
+static inline int net_inherit_devconf(void)
+{
+#if IS_ENABLED(CONFIG_SYSCTL)
+ return READ_ONCE(sysctl_devconf_inherit_init_net);
+#else
+ return 0;
+#endif
}
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -664,7 +689,7 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node
struct rps_map {
unsigned int len;
struct rcu_head rcu;
- u16 cpus[0];
+ u16 cpus[];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
@@ -686,7 +711,7 @@ struct rps_dev_flow {
struct rps_dev_flow_table {
unsigned int mask;
struct rcu_head rcu;
- struct rps_dev_flow flows[0];
+ struct rps_dev_flow flows[];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
((_num) * sizeof(struct rps_dev_flow)))
@@ -704,7 +729,7 @@ struct rps_dev_flow_table {
struct rps_sock_flow_table {
u32 mask;
- u32 ents[0] ____cacheline_aligned_in_smp;
+ u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
@@ -736,15 +761,17 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
+ struct xdp_rxq_info xdp_rxq;
#ifdef CONFIG_RPS
struct rps_map __rcu *rps_map;
struct rps_dev_flow_table __rcu *rps_flow_table;
#endif
struct kobject kobj;
struct net_device *dev;
- struct xdp_rxq_info xdp_rxq;
+ netdevice_tracker dev_tracker;
+
#ifdef CONFIG_XDP_SOCKETS
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
#endif
} ____cacheline_aligned_in_smp;
@@ -758,6 +785,13 @@ struct rx_queue_attribute {
const char *buf, size_t len);
};
+/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
+enum xps_map_type {
+ XPS_CPUS = 0,
+ XPS_RXQS,
+ XPS_MAPS_MAX,
+};
+
#ifdef CONFIG_XPS
/*
* This structure holds an XPS map which can be of variable length. The
@@ -767,7 +801,7 @@ struct xps_map {
unsigned int len;
unsigned int alloc_len;
struct rcu_head rcu;
- u16 queues[0];
+ u16 queues[];
};
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
@@ -775,10 +809,20 @@ struct xps_map {
/*
* This structure holds all XPS maps for device. Maps are indexed by CPU.
+ *
+ * We keep track of the number of cpus/rxqs used when the struct is allocated,
+ * in nr_ids. This will help not accessing out-of-bound memory.
+ *
+ * We keep track of the number of traffic classes used when the struct is
+ * allocated, in num_tc. This will be used to navigate the maps, to ensure we're
+ * not crossing its upper bound, as the original dev->num_tc can be updated in
+ * the meantime.
*/
struct xps_dev_maps {
struct rcu_head rcu;
- struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
+ unsigned int nr_ids;
+ s16 num_tc;
+ struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
};
#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
@@ -835,7 +879,68 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);
+enum net_device_path_type {
+ DEV_PATH_ETHERNET = 0,
+ DEV_PATH_VLAN,
+ DEV_PATH_BRIDGE,
+ DEV_PATH_PPPOE,
+ DEV_PATH_DSA,
+ DEV_PATH_MTK_WDMA,
+};
+
+struct net_device_path {
+ enum net_device_path_type type;
+ const struct net_device *dev;
+ union {
+ struct {
+ u16 id;
+ __be16 proto;
+ u8 h_dest[ETH_ALEN];
+ } encap;
+ struct {
+ enum {
+ DEV_PATH_BR_VLAN_KEEP,
+ DEV_PATH_BR_VLAN_TAG,
+ DEV_PATH_BR_VLAN_UNTAG,
+ DEV_PATH_BR_VLAN_UNTAG_HW,
+ } vlan_mode;
+ u16 vlan_id;
+ __be16 vlan_proto;
+ } bridge;
+ struct {
+ int port;
+ u16 proto;
+ } dsa;
+ struct {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+ } mtk_wdma;
+ };
+};
+
+#define NET_DEVICE_PATH_STACK_MAX 5
+#define NET_DEVICE_PATH_VLAN_MAX 2
+
+struct net_device_path_stack {
+ int num_paths;
+ struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
+};
+
+struct net_device_path_ctx {
+ const struct net_device *dev;
+ u8 daddr[ETH_ALEN];
+
+ int num_vlans;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlan[NET_DEVICE_PATH_VLAN_MAX];
+};
+
enum tc_setup_type {
+ TC_QUERY_CAPS,
TC_SETUP_QDISC_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
@@ -853,6 +958,9 @@ enum tc_setup_type {
TC_SETUP_FT,
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
+ TC_SETUP_QDISC_FIFO,
+ TC_SETUP_QDISC_HTB,
+ TC_SETUP_ACT,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -868,18 +976,29 @@ enum bpf_netdev_command {
*/
XDP_SETUP_PROG,
XDP_SETUP_PROG_HW,
- XDP_QUERY_PROG,
- XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
- XDP_SETUP_XSK_UMEM,
+ XDP_SETUP_XSK_POOL,
};
struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct xdp_umem;
struct xdp_dev_bulk_queue;
+struct bpf_xdp_link;
+
+enum bpf_xdp_mode {
+ XDP_MODE_SKB = 0,
+ XDP_MODE_DRV = 1,
+ XDP_MODE_HW = 2,
+ __MAX_XDP_MODE
+};
+
+struct bpf_xdp_entity {
+ struct bpf_prog *prog;
+ struct bpf_xdp_link *link;
+};
struct netdev_bpf {
enum bpf_netdev_command command;
@@ -890,19 +1009,13 @@ struct netdev_bpf {
struct bpf_prog *prog;
struct netlink_ext_ack *extack;
};
- /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
- struct {
- u32 prog_id;
- /* flags with which program was installed */
- u32 prog_flags;
- };
/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct {
struct bpf_offloaded_map *offmap;
};
- /* XDP_SETUP_XSK_UMEM */
+ /* XDP_SETUP_XSK_POOL */
struct {
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
u16 queue_id;
} xsk;
};
@@ -931,16 +1044,6 @@ struct dev_ifalias {
struct devlink;
struct tlsdev_ops;
-struct netdev_name_node {
- struct hlist_node hlist;
- struct list_head list;
- struct net_device *dev;
- const char *name;
-};
-
-int netdev_name_node_alt_create(struct net_device *dev, const char *name);
-int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
-
struct netdev_net_notifier {
struct list_head list;
struct notifier_block *nb;
@@ -1011,9 +1114,18 @@ struct netdev_net_notifier {
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
- * Called when a user requests an ioctl which can't be handled by
- * the generic interface code. If not defined ioctls return
- * not supported error code.
+ * Old-style ioctl entry point. This is used internally by the
+ * appletalk and ieee802154 subsystems but is no longer called by
+ * the device ioctl handler.
+ *
+ * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Used by the bonding driver for its device specific ioctls:
+ * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
+ * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
+ *
+ * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
+ * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
@@ -1143,6 +1255,12 @@ struct netdev_net_notifier {
* int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
* Called to release previously enslaved netdev.
*
+ * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
+ * struct sk_buff *skb,
+ * bool all_slaves);
+ * Get the xmit slave of master device. If all_slaves is true, function
+ * assume all the slaves can transmit.
+ *
* Feature/offload setting functions.
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
@@ -1164,6 +1282,10 @@ struct netdev_net_notifier {
* struct net_device *dev,
* const unsigned char *addr, u16 vid)
* Deletes the FDB entry from dev coresponding to addr.
+ * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[],
+ * struct net_device *dev,
+ * u16 vid,
+ * struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
* int *idx)
@@ -1196,19 +1318,6 @@ struct netdev_net_notifier {
* struct netdev_phys_item_id *ppid)
* Called to get the parent ID of the physical port of this device.
*
- * void (*ndo_udp_tunnel_add)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify a driver about the UDP port and socket
- * address family that a UDP tunnel is listnening to. It is called only
- * when a new port starts listening. The operation is protected by the
- * RTNL.
- *
- * void (*ndo_udp_tunnel_del)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify the driver about a UDP port and socket
- * address family that the UDP tunnel is not listening to anymore. The
- * operation is protected by the RTNL.
- *
* void* (*ndo_dfwd_add_station)(struct net_device *pdev,
* struct net_device *dev)
* Called by upper layer devices to accelerate switching or other
@@ -1227,11 +1336,6 @@ struct netdev_net_notifier {
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
- * void (*ndo_change_proto_down)(struct net_device *dev,
- * bool proto_down);
- * This function is used to pass protocol port error state information
- * to the switch driver. The switch driver can react to the proto_down
- * by doing a phys down on the associated switch port.
* int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
@@ -1253,6 +1357,9 @@ struct netdev_net_notifier {
* that got dropped are freed/returned via xdp_return_frame().
* Returns negative number, means general error invoking ndo, meaning
* no frames were xmit'ed and core-caller will free all frames.
+ * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ * struct xdp_buff *xdp);
+ * Get the xmit slave of master device based on the xdp_buff.
* int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
* This function is used to wake up the softirq, ksoftirqd or kthread
* responsible for sending and/or receiving packets on a specific
@@ -1263,6 +1370,20 @@ struct netdev_net_notifier {
* Get devlink port instance associated with a given netdev.
* Called with a reference on the netdevice and devlink locks only,
* rtnl_lock is not held.
+ * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
+ * int cmd);
+ * Add, change, delete or get information on an IPv4 tunnel.
+ * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
+ * If a device is paired with a peer device, return the peer instance.
+ * The caller must be under RCU read context.
+ * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
+ * Get the forwarding path to reach the real device from the HW destination address
+ * ktime_t (*ndo_get_tstamp)(struct net_device *dev,
+ * const struct skb_shared_hwtstamps *hwtstamps,
+ * bool cycles);
+ * Get hardware timestamp based on normal/adjustable time or free running
+ * cycle counter. This function is required if physical clock supports a
+ * free running cycle counter.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1285,6 +1406,15 @@ struct net_device_ops {
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
+ int (*ndo_eth_ioctl)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocbond)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocwandev)(struct net_device *dev,
+ struct if_settings *ifs);
+ int (*ndo_siocdevprivate)(struct net_device *dev,
+ struct ifreq *ifr,
+ void __user *data, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
@@ -1386,6 +1516,11 @@ struct net_device_ops {
struct netlink_ext_ack *extack);
int (*ndo_del_slave)(struct net_device *dev,
struct net_device *slave_dev);
+ struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
+ struct sk_buff *skb,
+ bool all_slaves);
+ struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
+ struct sock *sk);
netdev_features_t (*ndo_fix_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
@@ -1406,7 +1541,12 @@ struct net_device_ops {
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 vid);
+ u16 vid, struct netlink_ext_ack *extack);
+ int (*ndo_fdb_del_bulk)(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ u16 vid,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
@@ -1438,10 +1578,6 @@ struct net_device_ops {
struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
- void (*ndo_udp_tunnel_add)(struct net_device *dev,
- struct udp_tunnel_info *ti);
- void (*ndo_udp_tunnel_del)(struct net_device *dev,
- struct udp_tunnel_info *ti);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
@@ -1451,8 +1587,6 @@ struct net_device_ops {
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
- int (*ndo_change_proto_down)(struct net_device *dev,
- bool proto_down);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_set_rx_headroom)(struct net_device *dev,
@@ -1462,13 +1596,23 @@ struct net_device_ops {
int (*ndo_xdp_xmit)(struct net_device *dev, int n,
struct xdp_frame **xdp,
u32 flags);
+ struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ struct xdp_buff *xdp);
int (*ndo_xsk_wakeup)(struct net_device *dev,
u32 queue_id, u32 flags);
struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
+ int (*ndo_tunnel_ctl)(struct net_device *dev,
+ struct ip_tunnel_parm *p, int cmd);
+ struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
+ int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
+ struct net_device_path *path);
+ ktime_t (*ndo_get_tstamp)(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles);
};
/**
- * enum net_device_priv_flags - &struct net_device priv_flags
+ * enum netdev_priv_flags - &struct net_device priv_flags
*
* These are the &struct net_device, they are only set internally
* by drivers and used in the kernel. These flags are invisible to
@@ -1512,6 +1656,9 @@ struct net_device_ops {
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
* @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
+ * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
+ * skb_headlen(skb) == 0 (data starts from frag0)
+ * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1545,6 +1692,8 @@ enum netdev_priv_flags {
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
IFF_LIVE_RENAME_OK = 1<<30,
+ IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
+ IFF_CHANGE_PROTO_DOWN = BIT_ULL(32),
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1571,12 +1720,20 @@ enum netdev_priv_flags {
#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
#define IFF_TEAM IFF_TEAM
#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
+#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM
#define IFF_MACSEC IFF_MACSEC
#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
#define IFF_FAILOVER IFF_FAILOVER
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
+#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
+
+/* Specifies the type of the struct net_device::ml_priv pointer */
+enum netdev_ml_priv_type {
+ ML_PRIV_NONE,
+ ML_PRIV_CAN,
+};
/**
* struct net_device - The DEVICE structure.
@@ -1626,12 +1783,8 @@ enum netdev_priv_flags {
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
*
- * @rx_dropped: Dropped packets by core network,
- * do not use this in drivers
- * @tx_dropped: Dropped packets by core network,
+ * @core_stats: core networking counters,
* do not use this in drivers
- * @rx_nohandler: nohandler dropped packets by core network on
- * inactive devices, do not use this in drivers
* @carrier_up_count: Number of times the carrier has been up
* @carrier_down_count: Number of times the carrier has been down
*
@@ -1705,13 +1858,13 @@ enum netdev_priv_flags {
* @tipc_ptr: TIPC specific data
* @atalk_ptr: AppleTalk link
* @ip_ptr: IPv4 specific data
- * @dn_ptr: DECnet specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
* @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
* device struct
* @mpls_ptr: mpls_dev struct pointer
+ * @mctp_ptr: MCTP specific data
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
@@ -1722,6 +1875,8 @@ enum netdev_priv_flags {
* @real_num_rx_queues: Number of RX queues currently active in device
* @xdp_prog: XDP sockets filter program pointer
* @gro_flush_timeout: timeout for GRO layer in NAPI
+ * @napi_defer_hard_irqs: If not zero, provides a counter that would
+ * allow to avoid NIC hard IRQ, on busy queues.
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
@@ -1744,18 +1899,21 @@ enum netdev_priv_flags {
* @tx_queue_len: Max frames per queue allowed
* @tx_global_lock: XXX: need comments on this one
* @xdp_bulkq: XDP device bulk queue
- * @xps_cpus_map: all CPUs map for XPS device
- * @xps_rxqs_map: all RXQs map for XPS device
+ * @xps_maps: all CPUs/RXQs maps for XPS device
*
* @xps_maps: XXX: need comments on this one
* @miniq_egress: clsact qdisc specific data for
* egress processing
+ * @nf_hooks_egress: netfilter hooks executed for egress packets
* @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
*
+ * @proto_down_reason: reason a netdev interface is held down
* @pcpu_refcnt: Number of references to this device
+ * @dev_refcnt: Number of references to this device
+ * @refcnt_tracker: Tracker directory for tracked references to this device
* @todo_list: Delayed register/unregister
* @link_watch_list: XXX: need comments on this one
*
@@ -1770,6 +1928,7 @@ enum netdev_priv_flags {
* @nd_net: Network namespace this network device is inside
*
* @ml_priv: Mid-layer private
+ * @ml_priv_type: Mid-layer private type
* @lstats: Loopback statistics
* @tstats: Tunnel statistics
* @dstats: Dummy statistics
@@ -1778,6 +1937,8 @@ enum netdev_priv_flags {
* @garp_port: GARP
* @mrp_port: MRP
*
+ * @dm_private: Drop monitor private
+ *
* @dev: Class/net/name entry
* @sysfs_groups: Space for optional device, statistics and wireless
* sysfs groups
@@ -1786,8 +1947,10 @@ enum netdev_priv_flags {
* @rtnl_link_ops: Rtnl_link_ops
*
* @gso_max_size: Maximum size of generic segmentation offload
+ * @tso_max_size: Device (as in HW) limit on the max TSO request size
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
+ * @tso_max_segs: Device (as in HW) limit on the max TSO segment count
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
@@ -1800,13 +1963,8 @@ enum netdev_priv_flags {
* @phydev: Physical device may attach itself
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
- * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
- * spinlock
- * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
- * @qdisc_xmit_lock_key: lockdep class annotating
- * netdev_queue->_xmit_lock spinlock
- * @addr_list_lock_key: lockdep class annotating
- * net_device->addr_list_lock spinlock
+ *
+ * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
@@ -1814,10 +1972,33 @@ enum netdev_priv_flags {
*
* @wol_enabled: Wake-on-LAN is enabled
*
+ * @threaded: napi threaded mode is enabled
+ *
* @net_notifier_list: List of per-net netdev notifier block
* that follow this device when it is moved
* to another network namespace.
*
+ * @macsec_ops: MACsec offloading ops
+ *
+ * @udp_tunnel_nic_info: static structure describing the UDP tunnel
+ * offload capabilities of the device
+ * @udp_tunnel_nic: UDP tunnel offload state
+ * @xdp_state: stores info on attached XDP BPF programs
+ *
+ * @nested_level: Used as a parameter of spin_lock_nested() of
+ * dev->addr_list_lock.
+ * @unlink_list: As netif_addr_lock() can be called recursively,
+ * keep a list of interfaces to be deleted.
+ * @gro_max_size: Maximum size of aggregated packet in generic
+ * receive offload (GRO)
+ *
+ * @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
+ * @linkwatch_dev_tracker: refcount tracker used by linkwatch.
+ * @watchdog_dev_tracker: refcount tracker used by watchdog.
+ * @dev_registered_tracker: tracker for reference held while
+ * registered
+ * @offload_xstats_l3: L3 HW stats for this netdevice.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1833,7 +2014,6 @@ struct net_device {
unsigned long mem_end;
unsigned long mem_start;
unsigned long base_addr;
- int irq;
/*
* Some hardware also needs these fields (state,dev_list,
@@ -1855,6 +2035,23 @@ struct net_device {
struct list_head lower;
} adj_list;
+ /* Read-mostly cache-line for fast-path access */
+ unsigned int flags;
+ unsigned long long priv_flags;
+ const struct net_device_ops *netdev_ops;
+ int ifindex;
+ unsigned short gflags;
+ unsigned short hard_header_len;
+
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
+ unsigned int mtu;
+ unsigned short needed_headroom;
+ unsigned short needed_tailroom;
+
netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
@@ -1863,14 +2060,17 @@ struct net_device {
netdev_features_t mpls_features;
netdev_features_t gso_partial_features;
- int ifindex;
+ unsigned int min_mtu;
+ unsigned int max_mtu;
+ unsigned short type;
+ unsigned char min_header_len;
+ unsigned char name_assign_type;
+
int group;
- struct net_device_stats stats;
+ struct net_device_stats stats; /* not used by modern drivers */
- atomic_long_t rx_dropped;
- atomic_long_t tx_dropped;
- atomic_long_t rx_nohandler;
+ struct net_device_core_stats __percpu *core_stats;
/* Stats to monitor link on/off, flapping */
atomic_t carrier_up_count;
@@ -1880,7 +2080,6 @@ struct net_device {
const struct iw_handler_def *wireless_handlers;
struct iw_public_data *wireless_data;
#endif
- const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_L3_MASTER_DEV
const struct l3mdev_ops *l3mdev_ops;
@@ -1899,45 +2098,27 @@ struct net_device {
const struct header_ops *header_ops;
- unsigned int flags;
- unsigned int priv_flags;
-
- unsigned short gflags;
- unsigned short padded;
-
unsigned char operstate;
unsigned char link_mode;
unsigned char if_port;
unsigned char dma;
- /* Note : dev->mtu is often read without holding a lock.
- * Writers usually hold RTNL.
- * It is recommended to use READ_ONCE() to annotate the reads,
- * and to use WRITE_ONCE() to annotate the writes.
- */
- unsigned int mtu;
- unsigned int min_mtu;
- unsigned int max_mtu;
- unsigned short type;
- unsigned short hard_header_len;
- unsigned char min_header_len;
-
- unsigned short needed_headroom;
- unsigned short needed_tailroom;
-
/* Interface address info. */
unsigned char perm_addr[MAX_ADDR_LEN];
unsigned char addr_assign_type;
unsigned char addr_len;
unsigned char upper_level;
unsigned char lower_level;
+
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
+ unsigned short padded;
+
spinlock_t addr_list_lock;
- unsigned char name_assign_type;
- bool uc_promisc;
+ int irq;
+
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
@@ -1945,12 +2126,21 @@ struct net_device {
#ifdef CONFIG_SYSFS
struct kset *queues_kset;
#endif
+#ifdef CONFIG_LOCKDEP
+ struct list_head unlink_list;
+#endif
unsigned int promiscuity;
unsigned int allmulti;
+ bool uc_promisc;
+#ifdef CONFIG_LOCKDEP
+ unsigned char nested_level;
+#endif
/* Protocol-specific pointers */
+ struct in_device __rcu *ip_ptr;
+ struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
#endif
@@ -1960,28 +2150,30 @@ struct net_device {
#if IS_ENABLED(CONFIG_TIPC)
struct tipc_bearer __rcu *tipc_ptr;
#endif
-#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
+#if IS_ENABLED(CONFIG_ATALK)
void *atalk_ptr;
#endif
- struct in_device __rcu *ip_ptr;
-#if IS_ENABLED(CONFIG_DECNET)
- struct dn_dev __rcu *dn_ptr;
-#endif
- struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_AX25)
void *ax25_ptr;
#endif
+#if IS_ENABLED(CONFIG_CFG80211)
struct wireless_dev *ieee80211_ptr;
+#endif
+#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
struct wpan_dev *ieee802154_ptr;
+#endif
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
struct mpls_dev __rcu *mpls_ptr;
#endif
+#if IS_ENABLED(CONFIG_MCTP)
+ struct mctp_dev __rcu *mctp_ptr;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
/* Interface address info used in eth_type_trans() */
- unsigned char *dev_addr;
+ const unsigned char *dev_addr;
struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
@@ -1989,6 +2181,13 @@ struct net_device {
struct bpf_prog __rcu *xdp_prog;
unsigned long gro_flush_timeout;
+ int napi_defer_hard_irqs;
+#define GRO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GRO_MAX_SIZE (8 * 65535u)
+ unsigned int gro_max_size;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
@@ -2012,19 +2211,21 @@ struct net_device {
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
#ifdef CONFIG_XPS
- struct xps_dev_maps __rcu *xps_cpus_map;
- struct xps_dev_maps __rcu *xps_rxqs_map;
+ struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
#endif
#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc __rcu *miniq_egress;
#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ struct nf_hook_entries __rcu *nf_hooks_egress;
+#endif
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
@@ -2033,8 +2234,16 @@ struct net_device {
struct timer_list watchdog_timer;
int watchdog_timeo;
+ u32 proto_down_reason;
+
struct list_head todo_list;
+
+#ifdef CONFIG_PCPU_DEV_REFCNT
int __percpu *pcpu_refcnt;
+#else
+ refcount_t dev_refcnt;
+#endif
+ struct ref_tracker_dir refcnt_tracker;
struct list_head link_watch_list;
@@ -2063,8 +2272,10 @@ struct net_device {
possible_net_t nd_net;
/* mid-layer private */
+ void *ml_priv;
+ enum netdev_ml_priv_type ml_priv_type;
+
union {
- void *ml_priv;
struct pcpu_lstats __percpu *lstats;
struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats;
@@ -2076,7 +2287,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_MRP)
struct mrp_port __rcu *mrp_port;
#endif
-
+#if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
+ struct dm_hw_stat_delta __rcu *dm_private;
+#endif
struct device dev;
const struct attribute_group *sysfs_groups[4];
const struct attribute_group *sysfs_rx_queue_group;
@@ -2084,10 +2297,20 @@ struct net_device {
const struct rtnl_link_ops *rtnl_link_ops;
/* for setting kernel sock attribute on TCP connection setup */
-#define GSO_MAX_SIZE 65536
+#define GSO_MAX_SEGS 65535u
+#define GSO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
+
unsigned int gso_max_size;
-#define GSO_MAX_SEGS 65535
+#define TSO_LEGACY_MAX_SIZE 65536
+#define TSO_MAX_SIZE UINT_MAX
+ unsigned int tso_max_size;
u16 gso_max_segs;
+#define TSO_MAX_SEGS U16_MAX
+ u16 tso_max_segs;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
@@ -2104,14 +2327,28 @@ struct net_device {
#endif
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
- struct lock_class_key qdisc_tx_busylock_key;
- struct lock_class_key qdisc_running_key;
- struct lock_class_key qdisc_xmit_lock_key;
- struct lock_class_key addr_list_lock_key;
+ struct lock_class_key *qdisc_tx_busylock;
bool proto_down;
unsigned wol_enabled:1;
+ unsigned threaded:1;
struct list_head net_notifier_list;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec management functions */
+ const struct macsec_ops *macsec_ops;
+#endif
+ const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
+ struct udp_tunnel_nic *udp_tunnel_nic;
+
+ /* protected by rtnl_lock */
+ struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
+
+ u8 dev_addr_shadow[MAX_ADDR_LEN];
+ netdevice_tracker linkwatch_dev_tracker;
+ netdevice_tracker watchdog_dev_tracker;
+ netdevice_tracker dev_registered_tracker;
+ struct rtnl_hw_stats64 *offload_xstats_l3;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2151,6 +2388,22 @@ int netdev_get_num_tc(struct net_device *dev)
return dev->num_tc;
}
+static inline void net_prefetch(void *p)
+{
+ prefetch(p);
+#if L1_CACHE_BYTES < 128
+ prefetch((u8 *)p + L1_CACHE_BYTES);
+#endif
+}
+
+static inline void net_prefetchw(void *p)
+{
+ prefetchw(p);
+#if L1_CACHE_BYTES < 128
+ prefetchw((u8 *)p + L1_CACHE_BYTES);
+#endif
+}
+
void netdev_unbind_sb_channel(struct net_device *dev,
struct net_device *sb_dev);
int netdev_bind_sb_channel_queue(struct net_device *dev,
@@ -2187,6 +2440,21 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
f(dev, &dev->_tx[i], arg);
}
+#define netdev_lockdep_set_classes(dev) \
+{ \
+ static struct lock_class_key qdisc_tx_busylock_key; \
+ static struct lock_class_key qdisc_xmit_lock_key; \
+ static struct lock_class_key dev_addr_list_lock_key; \
+ unsigned int i; \
+ \
+ (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
+ lockdep_set_class(&(dev)->addr_list_lock, \
+ &dev_addr_list_lock_key); \
+ for (i = 0; i < (dev)->num_tx_queues; i++) \
+ lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
+ &qdisc_xmit_lock_key); \
+}
+
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
@@ -2213,6 +2481,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev)
netdev_set_rx_headroom(dev, -1);
}
+static inline void *netdev_get_ml_priv(struct net_device *dev,
+ enum netdev_ml_priv_type type)
+{
+ if (dev->ml_priv_type != type)
+ return NULL;
+
+ return dev->ml_priv;
+}
+
+static inline void netdev_set_ml_priv(struct net_device *dev,
+ void *ml_priv,
+ enum netdev_ml_priv_type type)
+{
+ WARN(dev->ml_priv_type && dev->ml_priv_type != type,
+ "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
+ dev->ml_priv_type, type);
+ WARN(!dev->ml_priv_type && dev->ml_priv,
+ "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
+
+ dev->ml_priv = ml_priv;
+ dev->ml_priv_type = type;
+}
+
/*
* Net namespace inlines
*/
@@ -2255,154 +2546,79 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define NAPI_POLL_WEIGHT 64
+void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight);
+
/**
- * netif_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add() - initialize a NAPI context
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
*
* netif_napi_add() must be used to initialize a NAPI context prior to calling
* *any* of the other NAPI-related functions.
*/
-void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight);
+static inline void
+netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int))
+{
+ netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
+static inline void
+netif_napi_add_tx_weight(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
+ netif_napi_add_weight(dev, napi, poll, weight);
+}
/**
- * netif_tx_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
*
* This variant of netif_napi_add() should be used from drivers using NAPI
* to exclusively poll a TX queue.
* This will avoid we add it into napi_hash[], thus polluting this hash table.
*/
-static inline void netif_tx_napi_add(struct net_device *dev,
+static inline void netif_napi_add_tx(struct net_device *dev,
struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int),
- int weight)
+ int (*poll)(struct napi_struct *, int))
{
- set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
- netif_napi_add(dev, napi, poll, weight);
+ netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
/**
+ * __netif_napi_del - remove a NAPI context
+ * @napi: NAPI context
+ *
+ * Warning: caller must observe RCU grace period before freeing memory
+ * containing @napi. Drivers might want to call this helper to combine
+ * all the needed RCU grace periods into a single one.
+ */
+void __netif_napi_del(struct napi_struct *napi);
+
+/**
* netif_napi_del - remove a NAPI context
* @napi: NAPI context
*
* netif_napi_del() removes a NAPI context from the network device NAPI list
*/
-void netif_napi_del(struct napi_struct *napi);
-
-struct napi_gro_cb {
- /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
-
- /* Length of frag0. */
- unsigned int frag0_len;
-
- /* This indicates where we are processing relative to skb->data. */
- int data_offset;
-
- /* This is non-zero if the packet cannot be merged with the new skb. */
- u16 flush;
-
- /* Save the IP ID here and check when we get to the transport layer */
- u16 flush_id;
-
- /* Number of segments aggregated. */
- u16 count;
-
- /* Start offset for remote checksum offload */
- u16 gro_remcsum_start;
-
- /* jiffies when first packet was created/queued */
- unsigned long age;
-
- /* Used in ipv6_gro_receive() and foo-over-udp */
- u16 proto;
-
- /* This is non-zero if the packet may be of the same flow. */
- u8 same_flow:1;
-
- /* Used in tunnel GRO receive */
- u8 encap_mark:1;
-
- /* GRO checksum is valid */
- u8 csum_valid:1;
-
- /* Number of checksums via CHECKSUM_UNNECESSARY */
- u8 csum_cnt:3;
-
- /* Free the skb? */
- u8 free:2;
-#define NAPI_GRO_FREE 1
-#define NAPI_GRO_FREE_STOLEN_HEAD 2
-
- /* Used in foo-over-udp, set in udp[46]_gro_receive */
- u8 is_ipv6:1;
-
- /* Used in GRE, set in fou/gue_gro_receive */
- u8 is_fou:1;
-
- /* Used to determine if flush_id can be ignored */
- u8 is_atomic:1;
-
- /* Number of gro_receive callbacks this packet already went through */
- u8 recursion_counter:4;
-
- /* GRO is done by frag_list pointer chaining. */
- u8 is_flist:1;
-
- /* used to support CHECKSUM_COMPLETE for tunneling protocols */
- __wsum csum;
-
- /* used in skb_gro_receive() slow path */
- struct sk_buff *last;
-};
-
-#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
-
-#define GRO_RECURSION_LIMIT 15
-static inline int gro_recursion_inc_test(struct sk_buff *skb)
+static inline void netif_napi_del(struct napi_struct *napi)
{
- return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
-}
-
-typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
-static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
- struct list_head *head,
- struct sk_buff *skb)
-{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(head, skb);
-}
-
-typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
- struct sk_buff *);
-static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
- struct sock *sk,
- struct list_head *head,
- struct sk_buff *skb)
-{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(sk, head, skb);
+ __netif_napi_del(napi);
+ synchronize_net();
}
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
bool ignore_outgoing;
struct net_device *dev; /* NULL is wildcarded here */
+ netdevice_tracker dev_tracker;
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
@@ -2412,6 +2628,7 @@ struct packet_type {
struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
+ struct net *af_packet_net;
void *af_packet_priv;
struct list_head list;
};
@@ -2433,10 +2650,10 @@ struct packet_offload {
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
} __aligned(4 * sizeof(u64));
@@ -2448,6 +2665,28 @@ struct pcpu_lstats {
void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
+static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->rx_bytes, len);
+ u64_stats_inc(&tstats->rx_packets);
+ u64_stats_update_end(&tstats->syncp);
+}
+
+static inline void dev_sw_netstats_tx_add(struct net_device *dev,
+ unsigned int packets,
+ unsigned int len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->tx_bytes, len);
+ u64_stats_add(&tstats->tx_packets, packets);
+ u64_stats_update_end(&tstats->syncp);
+}
+
static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
{
struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
@@ -2475,6 +2714,20 @@ static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
#define netdev_alloc_pcpu_stats(type) \
__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
+#define devm_netdev_alloc_pcpu_stats(dev, type) \
+({ \
+ typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
+ if (pcpu_stats) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
+ typeof(type) *stat; \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
+ u64_stats_init(&stat->syncp); \
+ } \
+ } \
+ pcpu_stats; \
+})
+
enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_UNKNOWN,
NETDEV_LAG_TX_TYPE_RANDOM,
@@ -2491,6 +2744,7 @@ enum netdev_lag_hash {
NETDEV_LAG_HASH_L23,
NETDEV_LAG_HASH_E23,
NETDEV_LAG_HASH_E34,
+ NETDEV_LAG_HASH_VLAN_SRCMAC,
NETDEV_LAG_HASH_UNKNOWN,
};
@@ -2548,6 +2802,10 @@ enum netdev_cmd {
NETDEV_CVLAN_FILTER_DROP_INFO,
NETDEV_SVLAN_FILTER_PUSH_INFO,
NETDEV_SVLAN_FILTER_DROP_INFO,
+ NETDEV_OFFLOAD_XSTATS_ENABLE,
+ NETDEV_OFFLOAD_XSTATS_DISABLE,
+ NETDEV_OFFLOAD_XSTATS_REPORT_USED,
+ NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
@@ -2598,6 +2856,42 @@ struct netdev_notifier_pre_changeaddr_info {
const unsigned char *dev_addr;
};
+enum netdev_offload_xstats_type {
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
+};
+
+struct netdev_notifier_offload_xstats_info {
+ struct netdev_notifier_info info; /* must be first */
+ enum netdev_offload_xstats_type type;
+
+ union {
+ /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
+ struct netdev_notifier_offload_xstats_rd *report_delta;
+ /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
+ struct netdev_notifier_offload_xstats_ru *report_used;
+ };
+};
+
+int netdev_offload_xstats_enable(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ struct netlink_ext_ack *extack);
+int netdev_offload_xstats_disable(struct net_device *dev,
+ enum netdev_offload_xstats_type type);
+bool netdev_offload_xstats_enabled(const struct net_device *dev,
+ enum netdev_offload_xstats_type type);
+int netdev_offload_xstats_get(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ struct rtnl_hw_stats64 *stats, bool *used,
+ struct netlink_ext_ack *extack);
+void
+netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
+ const struct rtnl_hw_stats64 *stats);
+void
+netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
+void netdev_offload_xstats_push_delta(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ const struct rtnl_hw_stats64 *stats);
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2676,11 +2970,9 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
}
int netdev_boot_setup_check(struct net_device *dev);
-unsigned long netdev_boot_base(const char *prefix, int unit);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
void dev_add_pack(struct packet_type *pt);
void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
@@ -2689,11 +2981,14 @@ void dev_remove_offload(struct packet_offload *po);
int dev_get_iflink(const struct net_device *dev);
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
+int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
+ struct net_device_path_stack *stack);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
+bool netdev_name_in_use(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void dev_close(struct net_device *dev);
@@ -2704,9 +2999,31 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
-int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+
+int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
+int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+ return __dev_queue_xmit(skb, NULL);
+}
+
+static inline int dev_queue_xmit_accel(struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ return __dev_queue_xmit(skb, sb_dev);
+}
+
+static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+{
+ int ret;
+
+ ret = __dev_direct_xmit(skb, queue_id);
+ if (!dev_xmit_complete(ret))
+ kfree_skb(skb);
+ return ret;
+}
+
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
@@ -2718,263 +3035,19 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
void netdev_freemem(struct net_device *dev);
-void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
+struct net_device *netdev_get_xmit_slave(struct net_device *dev,
+ struct sk_buff *skb,
+ bool all_slaves);
+struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
+ struct sock *sk);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
-int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
-int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
-int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
-static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
-{
- return NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline unsigned int skb_gro_len(const struct sk_buff *skb)
-{
- return skb->len - NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
-{
- NAPI_GRO_CB(skb)->data_offset += len;
-}
-
-static inline void *skb_gro_header_fast(struct sk_buff *skb,
- unsigned int offset)
-{
- return NAPI_GRO_CB(skb)->frag0 + offset;
-}
-
-static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
-{
- return NAPI_GRO_CB(skb)->frag0_len < hlen;
-}
-
-static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
-{
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
-}
-
-static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
- unsigned int offset)
-{
- if (!pskb_may_pull(skb, hlen))
- return NULL;
-
- skb_gro_frag0_invalidate(skb);
- return skb->data + offset;
-}
-
-static inline void *skb_gro_network_header(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
- skb_network_offset(skb);
-}
-
-static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
- const void *start, unsigned int len)
-{
- if (NAPI_GRO_CB(skb)->csum_valid)
- NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
- csum_partial(start, len, 0));
-}
-
-/* GRO checksum functions. These are logical equivalents of the normal
- * checksum functions (in skbuff.h) except that they operate on the GRO
- * offsets and fields in sk_buff.
- */
-
-__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
-
-static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
-}
-
-static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
- bool zero_okay,
- __sum16 check)
-{
- return ((skb->ip_summed != CHECKSUM_PARTIAL ||
- skb_checksum_start_offset(skb) <
- skb_gro_offset(skb)) &&
- !skb_at_gro_remcsum_start(skb) &&
- NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- (!zero_okay || check));
-}
-
-static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
- __wsum psum)
-{
- if (NAPI_GRO_CB(skb)->csum_valid &&
- !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
- return 0;
-
- NAPI_GRO_CB(skb)->csum = psum;
-
- return __skb_gro_checksum_complete(skb);
-}
-
-static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
-{
- if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
- /* Consume a checksum from CHECKSUM_UNNECESSARY */
- NAPI_GRO_CB(skb)->csum_cnt--;
- } else {
- /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
- * verified a new top level checksum or an encapsulated one
- * during GRO. This saves work if we fallback to normal path.
- */
- __skb_incr_checksum_unnecessary(skb);
- }
-}
-
-#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
- compute_pseudo) \
-({ \
- __sum16 __ret = 0; \
- if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
- __ret = __skb_gro_checksum_validate_complete(skb, \
- compute_pseudo(skb, proto)); \
- if (!__ret) \
- skb_gro_incr_csum_unnecessary(skb); \
- __ret; \
-})
-
-#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
-
-#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
- compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
-
-#define skb_gro_checksum_simple_validate(skb) \
- __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
-
-static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- !NAPI_GRO_CB(skb)->csum_valid);
-}
-
-static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
- __wsum pseudo)
-{
- NAPI_GRO_CB(skb)->csum = ~pseudo;
- NAPI_GRO_CB(skb)->csum_valid = 1;
-}
-
-#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
-do { \
- if (__skb_gro_checksum_convert_check(skb)) \
- __skb_gro_checksum_convert(skb, \
- compute_pseudo(skb, proto)); \
-} while (0)
-
-struct gro_remcsum {
- int offset;
- __wsum delta;
-};
-
-static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
-{
- grc->offset = 0;
- grc->delta = 0;
-}
-
-static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- unsigned int off, size_t hdrlen,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
-{
- __wsum delta;
- size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
-
- BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
-
- if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
- return ptr;
- }
-
- ptr = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, off + plen)) {
- ptr = skb_gro_header_slow(skb, off + plen, off);
- if (!ptr)
- return NULL;
- }
-
- delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
- start, offset);
-
- /* Adjust skb->csum since we changed the packet */
- NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
-
- grc->offset = off + hdrlen + offset;
- grc->delta = delta;
-
- return ptr;
-}
-
-static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
- struct gro_remcsum *grc)
-{
- void *ptr;
- size_t plen = grc->offset + sizeof(u16);
-
- if (!grc->delta)
- return;
-
- ptr = skb_gro_header_fast(skb, grc->offset);
- if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
- ptr = skb_gro_header_slow(skb, plen, grc->offset);
- if (!ptr)
- return;
- }
-
- remcsum_unadjust((__sum16 *)ptr, grc->delta);
-}
-
-#ifdef CONFIG_XFRM_OFFLOAD
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
-{
- if (PTR_ERR(pp) != -EINPROGRESS)
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff *pp,
- int flush,
- struct gro_remcsum *grc)
-{
- if (PTR_ERR(pp) != -EINPROGRESS) {
- NAPI_GRO_CB(skb)->flush |= flush;
- skb_gro_remcsum_cleanup(skb, grc);
- skb->remcsum_offload = 0;
- }
-}
-#else
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
-{
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff *pp,
- int flush,
- struct gro_remcsum *grc)
-{
- NAPI_GRO_CB(skb)->flush |= flush;
- skb_gro_remcsum_cleanup(skb, grc);
- skb->remcsum_offload = 0;
-}
-#endif
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -3026,27 +3099,11 @@ static inline bool dev_validate_header(const struct net_device *dev,
return false;
}
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
- int len, int size);
-int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
-static inline int unregister_gifconf(unsigned int family)
+static inline bool dev_has_header(const struct net_device *dev)
{
- return register_gifconf(family, NULL);
+ return dev->header_ops && dev->header_ops->create;
}
-#ifdef CONFIG_NET_FLOW_LIMIT
-#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
-struct sd_flow_limit {
- u64 count;
- unsigned int num_buckets;
- unsigned int history_head;
- u16 history[FLOW_LIMIT_HISTORY];
- u8 buckets[];
-};
-
-extern int netdev_flow_limit_table_len;
-#endif /* CONFIG_NET_FLOW_LIMIT */
-
/*
* Incoming packets are placed on per-CPU queues
*/
@@ -3074,6 +3131,9 @@ struct softnet_data {
struct {
u16 recursion;
u8 more;
+#ifdef CONFIG_NET_EGRESS
+ u8 skip_txqueue;
+#endif
} xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
@@ -3091,6 +3151,12 @@ struct softnet_data {
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
+ /* Another possibly contended cache line */
+ spinlock_t defer_lock ____cacheline_aligned_in_smp;
+ int defer_count;
+ int defer_ipi_scheduled;
+ struct sk_buff *defer_list;
+ call_single_data_t defer_csd;
};
static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -3115,7 +3181,7 @@ static inline int dev_recursion_level(void)
return this_cpu_read(softnet_data.xmit.recursion);
}
-#define XMIT_RECURSION_LIMIT 10
+#define XMIT_RECURSION_LIMIT 8
static inline bool dev_xmit_recursion(void)
{
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
@@ -3211,7 +3277,6 @@ static inline void netif_stop_queue(struct net_device *dev)
}
void netif_tx_stop_all_queues(struct net_device *dev);
-void netdev_update_lockdep_key(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
@@ -3247,6 +3312,24 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
}
/**
+ * netdev_queue_set_dql_min_limit - set dql minimum limit
+ * @dev_queue: pointer to transmit queue
+ * @min_limit: dql minimum limit
+ *
+ * Forces xmit_more() to return true until the minimum threshold
+ * defined by @min_limit is reached (or until the tx queue is
+ * empty). Warning: to be use with care, misuse will impact the
+ * latency.
+ */
+static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
+ unsigned int min_limit)
+{
+#ifdef CONFIG_BQL
+ dev_queue->dql.min_limit = min_limit;
+#endif
+}
+
+/**
* netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
*
@@ -3274,6 +3357,16 @@ static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_qu
#endif
}
+/**
+ * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
+ * @dev_queue: network device queue
+ * @bytes: number of bytes queued to the device queue
+ *
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
+ */
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes)
{
@@ -3319,13 +3412,14 @@ static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
}
/**
- * netdev_sent_queue - report the number of bytes queued to hardware
- * @dev: network device
- * @bytes: number of bytes queued to the hardware device queue
+ * netdev_sent_queue - report the number of bytes queued to hardware
+ * @dev: network device
+ * @bytes: number of bytes queued to the hardware device queue
*
- * Report the number of bytes queued for sending/completion to the network
- * device hardware queue. @bytes should be a good approximation and should
- * exactly match netdev_completed_queue() @bytes
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue#0. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
*/
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
@@ -3340,6 +3434,15 @@ static inline bool __netdev_sent_queue(struct net_device *dev,
xmit_more);
}
+/**
+ * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
+ * @dev_queue: network device queue
+ * @pkts: number of packets (currently ignored)
+ * @bytes: number of bytes dequeued from the device queue
+ *
+ * Must be called at most once per TX completion round (and not per
+ * individual packet), so that BQL can adjust its limits appropriately.
+ */
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{
@@ -3466,7 +3569,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
}
/**
- * netif_subqueue_stopped - test status of subqueue
+ * __netif_subqueue_stopped - test status of subqueue
* @dev: network device
* @queue_index: sub queue index
*
@@ -3480,6 +3583,13 @@ static inline bool __netif_subqueue_stopped(const struct net_device *dev,
return netif_tx_queue_stopped(txq);
}
+/**
+ * netif_subqueue_stopped - test status of subqueue
+ * @dev: network device
+ * @skb: sub queue buffer pointer
+ *
+ * Check individual transmit queue of a device with multiple transmit queues.
+ */
static inline bool netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
@@ -3504,7 +3614,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index);
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
- u16 index, bool is_rxqs_map);
+ u16 index, enum xps_map_type type);
/**
* netif_attr_test_mask - Test a CPU or Rx queue set in a mask
@@ -3599,7 +3709,7 @@ static inline int netif_set_xps_queue(struct net_device *dev,
static inline int __netif_set_xps_queue(struct net_device *dev,
const unsigned long *mask,
- u16 index, bool is_rxqs_map)
+ u16 index, enum xps_map_type type)
{
return 0;
}
@@ -3628,6 +3738,8 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
return 0;
}
#endif
+int netif_set_real_num_queues(struct net_device *dev,
+ unsigned int txq, unsigned int rxq);
static inline struct netdev_rx_queue *
__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
@@ -3647,7 +3759,6 @@ static inline unsigned int get_netdev_rx_queue_index(
}
#endif
-#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
int netif_get_num_default_rss_queues(void);
enum skb_free_reason {
@@ -3661,7 +3772,7 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
/*
* It is not allowed to call kfree_skb() or consume_skb() from hardware
* interrupt context or with hardware interrupts being disabled.
- * (in_irq() || irqs_disabled())
+ * (in_hardirq() || irqs_disabled())
*
* We provide four helpers that can be used in following contexts :
*
@@ -3697,16 +3808,21 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
}
+u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb);
-int netif_rx_ni(struct sk_buff *skb);
+int __netif_rx(struct sk_buff *skb);
+
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
+void netif_receive_skb_list_internal(struct list_head *head);
void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
+void napi_get_frags_check(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);
@@ -3724,10 +3840,16 @@ int netdev_rx_handler_register(struct net_device *dev,
void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name);
+static inline bool is_socket_ioctl_cmd(unsigned int cmd)
+{
+ return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
+}
+int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
+int put_user_ifreq(struct ifreq *ifr, void __user *arg);
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
- bool *need_copyout);
-int dev_ifconf(struct net *net, struct ifconf *, int);
-int dev_ethtool(struct net *net, struct ifreq *);
+ void __user *data, bool *need_copyout);
+int dev_ifconf(struct net *net, struct ifconf __user *ifc);
+int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
@@ -3735,59 +3857,107 @@ int dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
void __dev_notify_flags(struct net_device *, unsigned int old_flags,
unsigned int gchanges);
-int dev_change_name(struct net_device *, const char *);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
-int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ const char *pat, int new_ifindex);
+static inline
+int dev_change_net_namespace(struct net_device *dev, struct net *net,
+ const char *pat)
+{
+ return __dev_change_net_namespace(dev, net, pat, 0);
+}
int __dev_set_mtu(struct net_device *, int);
-int dev_validate_mtu(struct net_device *dev, int mtu,
- struct netlink_ext_ack *extack);
-int dev_set_mtu_ext(struct net_device *dev, int mtu,
- struct netlink_ext_ack *extack);
int dev_set_mtu(struct net_device *, int);
-int dev_change_tx_queue_len(struct net_device *, unsigned long);
-void dev_set_group(struct net_device *, int);
int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack);
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack);
-int dev_change_carrier(struct net_device *, bool new_carrier);
-int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid);
-int dev_get_phys_port_name(struct net_device *dev,
- char *name, size_t len);
+int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack);
+int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
-int dev_change_proto_down(struct net_device *dev, bool proto_down);
-int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
-typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
-int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
- int fd, u32 flags);
-u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
- enum bpf_netdev_command cmd);
-int xdp_umem_query(struct net_device *dev, u16 queue_id);
+int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+u8 dev_xdp_prog_count(struct net_device *dev);
+u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
+int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb);
+static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
+ const struct sk_buff *skb,
+ const bool check_mtu)
+{
+ const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
+ unsigned int len;
+
+ if (!(dev->flags & IFF_UP))
+ return false;
+
+ if (!check_mtu)
+ return true;
+
+ len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
+ if (skb->len <= len)
+ return true;
+
+ /* if TSO is enabled, we don't care about the length as the packet
+ * could be forwarded without being segmented before
+ */
+ if (skb_is_gso(skb))
+ return true;
+
+ return false;
+}
+
+struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev);
+
+static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev)
+{
+ /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
+ struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
+
+ if (likely(p))
+ return p;
+
+ return netdev_core_stats_alloc(dev);
+}
+
+#define DEV_CORE_STATS_INC(FIELD) \
+static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
+{ \
+ struct net_device_core_stats __percpu *p; \
+ \
+ p = dev_core_stats(dev); \
+ if (p) \
+ this_cpu_inc(p->FIELD); \
+}
+DEV_CORE_STATS_INC(rx_dropped)
+DEV_CORE_STATS_INC(tx_dropped)
+DEV_CORE_STATS_INC(rx_nohandler)
+DEV_CORE_STATS_INC(rx_otherhost_dropped)
+
static __always_inline int ____dev_forward_skb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ const bool check_mtu)
{
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
- unlikely(!is_skb_forwardable(dev, skb))) {
- atomic_long_inc(&dev->rx_dropped);
+ unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
+ dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
return NET_RX_DROP;
}
- skb_scrub_packet(skb, true);
+ skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
skb->priority = 0;
return 0;
}
@@ -3795,21 +3965,73 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
bool dev_nit_active(struct net_device *dev);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
-extern int netdev_budget;
-extern unsigned int netdev_budget_usecs;
+static inline void __dev_put(struct net_device *dev)
+{
+ if (dev) {
+#ifdef CONFIG_PCPU_DEV_REFCNT
+ this_cpu_dec(*dev->pcpu_refcnt);
+#else
+ refcount_dec(&dev->dev_refcnt);
+#endif
+ }
+}
+
+static inline void __dev_hold(struct net_device *dev)
+{
+ if (dev) {
+#ifdef CONFIG_PCPU_DEV_REFCNT
+ this_cpu_inc(*dev->pcpu_refcnt);
+#else
+ refcount_inc(&dev->dev_refcnt);
+#endif
+ }
+}
-/* Called by rtnetlink.c:rtnl_unlock() */
-void netdev_run_todo(void);
+static inline void __netdev_tracker_alloc(struct net_device *dev,
+ netdevice_tracker *tracker,
+ gfp_t gfp)
+{
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
+#endif
+}
-/**
- * dev_put - release reference to device
- * @dev: network device
- *
- * Release reference to device to allow it to be freed.
+/* netdev_tracker_alloc() can upgrade a prior untracked reference
+ * taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
*/
-static inline void dev_put(struct net_device *dev)
+static inline void netdev_tracker_alloc(struct net_device *dev,
+ netdevice_tracker *tracker, gfp_t gfp)
+{
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ refcount_dec(&dev->refcnt_tracker.no_tracker);
+ __netdev_tracker_alloc(dev, tracker, gfp);
+#endif
+}
+
+static inline void netdev_tracker_free(struct net_device *dev,
+ netdevice_tracker *tracker)
{
- this_cpu_dec(*dev->pcpu_refcnt);
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ ref_tracker_free(&dev->refcnt_tracker, tracker);
+#endif
+}
+
+static inline void netdev_hold(struct net_device *dev,
+ netdevice_tracker *tracker, gfp_t gfp)
+{
+ if (dev) {
+ __dev_hold(dev);
+ __netdev_tracker_alloc(dev, tracker, gfp);
+ }
+}
+
+static inline void netdev_put(struct net_device *dev,
+ netdevice_tracker *tracker)
+{
+ if (dev) {
+ netdev_tracker_free(dev, tracker);
+ __dev_put(dev);
+ }
}
/**
@@ -3817,10 +4039,38 @@ static inline void dev_put(struct net_device *dev)
* @dev: network device
*
* Hold reference to device to keep it from being freed.
+ * Try using netdev_hold() instead.
*/
static inline void dev_hold(struct net_device *dev)
{
- this_cpu_inc(*dev->pcpu_refcnt);
+ netdev_hold(dev, NULL, GFP_ATOMIC);
+}
+
+/**
+ * dev_put - release reference to device
+ * @dev: network device
+ *
+ * Release reference to device to allow it to be freed.
+ * Try using netdev_put() instead.
+ */
+static inline void dev_put(struct net_device *dev)
+{
+ netdev_put(dev, NULL);
+}
+
+static inline void netdev_ref_replace(struct net_device *odev,
+ struct net_device *ndev,
+ netdevice_tracker *tracker,
+ gfp_t gfp)
+{
+ if (odev)
+ netdev_tracker_free(odev, tracker);
+
+ __dev_hold(ndev);
+ __dev_put(odev);
+
+ if (ndev)
+ __netdev_tracker_alloc(ndev, tracker, gfp);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
@@ -3831,10 +4081,7 @@ static inline void dev_hold(struct net_device *dev)
* called netif_lowerlayer_*() because they represent the state of any
* kind of lower layer not just hardware media.
*/
-
-void linkwatch_init_dev(struct net_device *dev);
void linkwatch_fire_event(struct net_device *dev);
-void linkwatch_forget_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -3852,8 +4099,8 @@ unsigned long dev_trans_start(struct net_device *dev);
void __netdev_watchdog_up(struct net_device *dev);
void netif_carrier_on(struct net_device *dev);
-
void netif_carrier_off(struct net_device *dev);
+void netif_carrier_event(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
@@ -3898,6 +4145,46 @@ static inline bool netif_dormant(const struct net_device *dev)
/**
+ * netif_testing_on - mark device as under test.
+ * @dev: network device
+ *
+ * Mark device as under test (as per RFC2863).
+ *
+ * The testing state indicates that some test(s) must be performed on
+ * the interface. After completion, of the test, the interface state
+ * will change to up, dormant, or down, as appropriate.
+ */
+static inline void netif_testing_on(struct net_device *dev)
+{
+ if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
+ linkwatch_fire_event(dev);
+}
+
+/**
+ * netif_testing_off - set device as not under test.
+ * @dev: network device
+ *
+ * Device is not in testing state.
+ */
+static inline void netif_testing_off(struct net_device *dev)
+{
+ if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
+ linkwatch_fire_event(dev);
+}
+
+/**
+ * netif_testing - test if device is under test
+ * @dev: network device
+ *
+ * Check if device is under test
+ */
+static inline bool netif_testing(const struct net_device *dev)
+{
+ return test_bit(__LINK_STATE_TESTING, &dev->state);
+}
+
+
+/**
* netif_oper_up - test if device is operational
* @dev: network device
*
@@ -3915,7 +4202,7 @@ static inline bool netif_oper_up(const struct net_device *dev)
*
* Check if device has not been removed from system.
*/
-static inline bool netif_device_present(struct net_device *dev)
+static inline bool netif_device_present(const struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
@@ -4002,7 +4289,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- txq->xmit_lock_owner = cpu;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -4019,33 +4307,50 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- txq->xmit_lock_owner = smp_processor_id();
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
bool ok = spin_trylock(&txq->_xmit_lock);
- if (likely(ok))
- txq->xmit_lock_owner = smp_processor_id();
+
+ if (likely(ok)) {
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
+/*
+ * txq->trans_start can be read locklessly from dev_watchdog()
+ */
static inline void txq_trans_update(struct netdev_queue *txq)
{
if (txq->xmit_lock_owner != -1)
- txq->trans_start = jiffies;
+ WRITE_ONCE(txq->trans_start, jiffies);
+}
+
+static inline void txq_trans_cond_update(struct netdev_queue *txq)
+{
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(txq->trans_start) != now)
+ WRITE_ONCE(txq->trans_start, now);
}
/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
@@ -4053,8 +4358,7 @@ static inline void netif_trans_update(struct net_device *dev)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
- if (txq->trans_start != jiffies)
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
}
/**
@@ -4063,27 +4367,7 @@ static inline void netif_trans_update(struct net_device *dev)
*
* Get network device transmit lock
*/
-static inline void netif_tx_lock(struct net_device *dev)
-{
- unsigned int i;
- int cpu;
-
- spin_lock(&dev->tx_global_lock);
- cpu = smp_processor_id();
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* We are the only thread of execution doing a
- * freeze, but we have to grab the _xmit_lock in
- * order to synchronize with threads which are in
- * the ->hard_start_xmit() handler and already
- * checked the frozen bit.
- */
- __netif_tx_lock(txq, cpu);
- set_bit(__QUEUE_STATE_FROZEN, &txq->state);
- __netif_tx_unlock(txq);
- }
-}
+void netif_tx_lock(struct net_device *dev);
static inline void netif_tx_lock_bh(struct net_device *dev)
{
@@ -4091,22 +4375,7 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
netif_tx_lock(dev);
}
-static inline void netif_tx_unlock(struct net_device *dev)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* No need to grab the _xmit_lock here. If the
- * queue is not stopped for another reason, we
- * force a schedule.
- */
- clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
- netif_schedule_queue(txq);
- }
- spin_unlock(&dev->tx_global_lock);
-}
+void netif_tx_unlock(struct net_device *dev);
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
@@ -4142,6 +4411,7 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_disable();
cpu = smp_processor_id();
+ spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -4149,17 +4419,29 @@ static inline void netif_tx_disable(struct net_device *dev)
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
+ spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}
static inline void netif_addr_lock(struct net_device *dev)
{
- spin_lock(&dev->addr_list_lock);
+ unsigned char nest_level = 0;
+
+#ifdef CONFIG_LOCKDEP
+ nest_level = dev->nested_level;
+#endif
+ spin_lock_nested(&dev->addr_list_lock, nest_level);
}
static inline void netif_addr_lock_bh(struct net_device *dev)
{
- spin_lock_bh(&dev->addr_list_lock);
+ unsigned char nest_level = 0;
+
+#ifdef CONFIG_LOCKDEP
+ nest_level = dev->nested_level;
+#endif
+ local_bh_disable();
+ spin_lock_nested(&dev->addr_list_lock, nest_level);
}
static inline void netif_addr_unlock(struct net_device *dev)
@@ -4198,6 +4480,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
int register_netdev(struct net_device *dev);
void unregister_netdev(struct net_device *dev);
+int devm_register_netdev(struct device *dev, struct net_device *ndev);
+
/* General hardware address lists handling functions */
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
@@ -4225,12 +4509,24 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
+void dev_addr_mod(struct net_device *dev, unsigned int offset,
+ const void *addr, size_t len);
+
+static inline void
+__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
+{
+ dev_addr_mod(dev, 0, addr, len);
+}
+
+static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, dev->addr_len);
+}
+
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
-void dev_addr_flush(struct net_device *dev);
-int dev_addr_init(struct net_device *dev);
/* Functions used for unicast addresses handling */
int dev_uc_add(struct net_device *dev, const unsigned char *addr);
@@ -4320,10 +4616,10 @@ static inline void __dev_mc_unsync(struct net_device *dev,
/* Functions used for secondary unicast and multicast support */
void dev_set_rx_mode(struct net_device *dev);
-void __dev_set_rx_mode(struct net_device *dev);
int dev_set_promiscuity(struct net_device *dev, int inc);
int dev_set_allmulti(struct net_device *dev, int inc);
void netdev_state_change(struct net_device *dev);
+void __netdev_notify_peers(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
@@ -4332,21 +4628,34 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats);
+void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
+ const struct pcpu_sw_netstats __percpu *netstats);
+void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
extern int netdev_max_backlog;
-extern int netdev_tstamp_prequeue;
-extern int weight_p;
-extern int dev_weight_rx_bias;
-extern int dev_weight_tx_bias;
extern int dev_rx_weight;
extern int dev_tx_weight;
extern int gro_normal_batch;
+enum {
+ NESTED_SYNC_IMM_BIT,
+ NESTED_SYNC_TODO_BIT,
+};
+
+#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
+#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
+
+#define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
+#define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
+
+struct netdev_nested_priv {
+ unsigned char flags;
+ void *data;
+};
+
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
-struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
- struct list_head **iter);
/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
@@ -4357,8 +4666,8 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *upper_dev,
- void *data),
- void *data);
+ struct netdev_nested_priv *priv),
+ struct netdev_nested_priv *priv);
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);
@@ -4395,12 +4704,12 @@ struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter);
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
- void *data),
- void *data);
+ struct netdev_nested_priv *priv),
+ struct netdev_nested_priv *priv);
int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
- void *data),
- void *data);
+ struct netdev_nested_priv *priv),
+ struct netdev_nested_priv *priv);
void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
@@ -4442,6 +4751,8 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
+struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, __be16 type);
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
netdev_features_t features);
@@ -4509,11 +4820,17 @@ static inline void netdev_rx_csum_fault(struct net_device *dev,
void net_enable_timestamp(void);
void net_disable_timestamp(void);
-#ifdef CONFIG_PROC_FS
-int __init dev_proc_init(void);
-#else
-#define dev_proc_init() 0
-#endif
+static inline ktime_t netdev_get_tstamp(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (ops->ndo_get_tstamp)
+ return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
+
+ return hwtstamps->hwtstamp;
+}
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev,
@@ -4546,22 +4863,10 @@ int netdev_class_create_file_ns(const struct class_attribute *class_attr,
void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
const void *ns);
-static inline int netdev_class_create_file(const struct class_attribute *class_attr)
-{
- return netdev_class_create_file_ns(class_attr, NULL);
-}
-
-static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
-{
- netdev_class_remove_file_ns(class_attr, NULL);
-}
-
extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
-void linkwatch_run_queue(void);
-
static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
netdev_features_t f2)
{
@@ -4647,11 +4952,10 @@ static inline bool netif_needs_gso(struct sk_buff *skb,
(skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
-static inline void netif_set_gso_max_size(struct net_device *dev,
- unsigned int size)
-{
- dev->gso_max_size = size;
-}
+void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
+void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
+void netif_inherit_tso_max(struct net_device *to,
+ const struct net_device *from);
static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
int pulled_hlen, u16 mac_offset,
@@ -4731,6 +5035,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev)
return dev->priv_flags & IFF_OVS_DATAPATH;
}
+static inline bool netif_is_any_bridge_port(const struct net_device *dev)
+{
+ return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
+}
+
static inline bool netif_is_team_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM;
@@ -4776,7 +5085,7 @@ static inline void netif_keep_dst(struct net_device *dev)
static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
{
/* TODO: reserve and use an additional IFF bit, if we get more users */
- return dev->priv_flags & IFF_MACSEC;
+ return netif_is_macsec(dev);
}
extern struct pernet_operations __net_initdata loopback_net_ops;
@@ -4812,80 +5121,9 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
return " (unknown)";
}
-__printf(3, 4) __cold
-void netdev_printk(const char *level, const struct net_device *dev,
- const char *format, ...);
-__printf(2, 3) __cold
-void netdev_emerg(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_alert(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_crit(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_err(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_warn(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_notice(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_info(const struct net_device *dev, const char *format, ...);
-
-#define netdev_level_once(level, dev, fmt, ...) \
-do { \
- static bool __print_once __read_mostly; \
- \
- if (!__print_once) { \
- __print_once = true; \
- netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
- } \
-} while (0)
-
-#define netdev_emerg_once(dev, fmt, ...) \
- netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
-#define netdev_alert_once(dev, fmt, ...) \
- netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
-#define netdev_crit_once(dev, fmt, ...) \
- netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
-#define netdev_err_once(dev, fmt, ...) \
- netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
-#define netdev_warn_once(dev, fmt, ...) \
- netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
-#define netdev_notice_once(dev, fmt, ...) \
- netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
-#define netdev_info_once(dev, fmt, ...) \
- netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
-
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define netdev_dbg(__dev, format, args...) \
-do { \
- dynamic_netdev_dbg(__dev, format, ##args); \
-} while (0)
-#elif defined(DEBUG)
-#define netdev_dbg(__dev, format, args...) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args)
-#else
-#define netdev_dbg(__dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args); \
-})
-#endif
-
-#if defined(VERBOSE_DEBUG)
-#define netdev_vdbg netdev_dbg
-#else
-
-#define netdev_vdbg(dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
/*
* netdev_WARN() acts like dev_printk(), but with the key difference
* of using a WARN/WARN_ON to get the message out, including the
@@ -4899,73 +5137,6 @@ do { \
WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
-/* netif printk helpers, similar to netdev_printk */
-
-#define netif_printk(priv, type, level, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_printk(level, (dev), fmt, ##args); \
-} while (0)
-
-#define netif_level(level, priv, type, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_##level(dev, fmt, ##args); \
-} while (0)
-
-#define netif_emerg(priv, type, dev, fmt, args...) \
- netif_level(emerg, priv, type, dev, fmt, ##args)
-#define netif_alert(priv, type, dev, fmt, args...) \
- netif_level(alert, priv, type, dev, fmt, ##args)
-#define netif_crit(priv, type, dev, fmt, args...) \
- netif_level(crit, priv, type, dev, fmt, ##args)
-#define netif_err(priv, type, dev, fmt, args...) \
- netif_level(err, priv, type, dev, fmt, ##args)
-#define netif_warn(priv, type, dev, fmt, args...) \
- netif_level(warn, priv, type, dev, fmt, ##args)
-#define netif_notice(priv, type, dev, fmt, args...) \
- netif_level(notice, priv, type, dev, fmt, ##args)
-#define netif_info(priv, type, dev, fmt, args...) \
- netif_level(info, priv, type, dev, fmt, ##args)
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define netif_dbg(priv, type, netdev, format, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- dynamic_netdev_dbg(netdev, format, ##args); \
-} while (0)
-#elif defined(DEBUG)
-#define netif_dbg(priv, type, dev, format, args...) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
-#else
-#define netif_dbg(priv, type, dev, format, args...) \
-({ \
- if (0) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
-/* if @cond then downgrade to debug, else print at @level */
-#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
- do { \
- if (cond) \
- netif_dbg(priv, type, netdev, fmt, ##args); \
- else \
- netif_ ## level(priv, type, netdev, fmt, ##args); \
- } while (0)
-
-#if defined(VERBOSE_DEBUG)
-#define netif_vdbg netif_dbg
-#else
-#define netif_vdbg(priv, type, dev, format, args...) \
-({ \
- if (0) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
/*
* The list of packet types we will receive (as opposed to discard)
* and the routines to invoke.
@@ -4988,6 +5159,9 @@ do { \
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
+extern struct list_head ptype_all __read_mostly;
+extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+
extern struct net_device *blackhole_netdev;
#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index eb312e7ca36e..d8817d381c14 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -13,6 +13,7 @@
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
+#include <linux/sockptr.h>
#include <net/net_namespace.h>
static inline int NF_DROP_GETERR(int verdict)
@@ -64,8 +65,8 @@ struct nf_hook_ops;
struct sock;
struct nf_hook_state {
- unsigned int hook;
- u_int8_t pf;
+ u8 hook;
+ u8 pf;
struct net_device *in;
struct net_device *out;
struct sock *sk;
@@ -76,12 +77,18 @@ struct nf_hook_state {
typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
+enum nf_hook_ops_type {
+ NF_HOOK_OP_UNDEFINED,
+ NF_HOOK_OP_NF_TABLES,
+};
+
struct nf_hook_ops {
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
void *priv;
- u_int8_t pf;
+ u8 pf;
+ enum nf_hook_ops_type hook_ops_type:8;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
@@ -163,18 +170,11 @@ struct nf_sockopt_ops {
/* Non-inclusive ranges: use 0/0/NULL to never get called. */
int set_optmin;
int set_optmax;
- int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
-#ifdef CONFIG_COMPAT
- int (*compat_set)(struct sock *sk, int optval,
- void __user *user, unsigned int len);
-#endif
+ int (*set)(struct sock *sk, int optval, sockptr_t arg,
+ unsigned int len);
int get_optmin;
int get_optmax;
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
-#ifdef CONFIG_COMPAT
- int (*compat_get)(struct sock *sk, int optval,
- void __user *user, int *len);
-#endif
/* Use the module struct to lock set/get code in place */
struct module *owner;
};
@@ -243,11 +243,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
#endif
break;
-#if IS_ENABLED(CONFIG_DECNET)
- case NFPROTO_DECNET:
- hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
- break;
-#endif
default:
WARN_ON_ONCE(1);
break;
@@ -346,16 +341,10 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
}
/* Call setsockopt() */
-int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
+int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt,
unsigned int len);
int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
int *len);
-#ifdef CONFIG_COMPAT
-int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
- char __user *opt, unsigned int len);
-int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
- char __user *opt, int *len);
-#endif
struct flowi;
struct nf_queue_entry;
@@ -385,15 +374,16 @@ struct nf_nat_hook {
unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
enum nf_nat_manip_type mtype,
enum ip_conntrack_dir dir);
+ void (*remove_nat_bysrc)(struct nf_conn *ct);
};
-extern struct nf_nat_hook __rcu *nf_nat_hook;
+extern const struct nf_nat_hook __rcu *nf_nat_hook;
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
#if IS_ENABLED(CONFIG_NF_NAT)
- struct nf_nat_hook *nat_hook;
+ const struct nf_nat_hook *nat_hook;
rcu_read_lock();
nat_hook = rcu_dereference(nf_nat_hook);
@@ -446,7 +436,6 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_zones_common.h>
-extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
struct nf_conntrack_tuple;
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
@@ -469,14 +458,13 @@ struct nf_ct_hook {
void (*destroy)(struct nf_conntrack *);
bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
const struct sk_buff *);
+ void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
};
-extern struct nf_ct_hook __rcu *nf_ct_hook;
+extern const struct nf_ct_hook __rcu *nf_ct_hook;
struct nlattr;
struct nfnl_ct_hook {
- struct nf_conn *(*get_ct)(const struct sk_buff *skb,
- enum ip_conntrack_info *ctinfo);
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
@@ -487,7 +475,7 @@ struct nfnl_ct_hook {
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, s32 off);
};
-extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
+extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
/**
* nf_skb_duplicated - TEE target has sent a packet
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 5448c8b443db..ada1296c87d5 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -98,7 +98,7 @@ struct ip_set_counter {
struct ip_set_comment_rcu {
struct rcu_head rcu;
- char str[0];
+ char str[];
};
struct ip_set_comment {
@@ -124,8 +124,6 @@ struct ip_set_ext {
bool target;
};
-struct ip_set;
-
#define ext_timeout(e, s) \
((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
#define ext_counter(e, s) \
@@ -198,6 +196,12 @@ struct ip_set_region {
u32 elements; /* Number of elements vs timeout */
};
+/* Max range where every element is added/deleted in one step */
+#define IPSET_MAX_RANGE (1<<20)
+
+/* The max revision number supported by any set type + 1 */
+#define IPSET_REVISION_MAX 9
+
/* The core set type structure */
struct ip_set_type {
struct list_head list;
@@ -215,6 +219,8 @@ struct ip_set_type {
u8 family;
/* Type revisions */
u8 revision_min, revision_max;
+ /* Revision-specific supported (create) flags */
+ u8 create_flags[IPSET_REVISION_MAX+1];
/* Set features to control swapping */
u16 features;
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 1db83c931d9c..2770db2fa080 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -2,15 +2,15 @@
#ifndef _NF_CONNTRACK_COMMON_H
#define _NF_CONNTRACK_COMMON_H
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
unsigned int found;
unsigned int invalid;
- unsigned int ignore;
unsigned int insert;
unsigned int insert_failed;
+ unsigned int clash_resolve;
unsigned int drop;
unsigned int early_drop;
unsigned int error;
@@ -18,25 +18,28 @@ struct ip_conntrack_stat {
unsigned int expect_create;
unsigned int expect_delete;
unsigned int search_restart;
+ unsigned int chaintoolong;
};
#define NFCT_INFOMASK 7UL
#define NFCT_PTRMASK ~(NFCT_INFOMASK)
struct nf_conntrack {
- atomic_t use;
+ refcount_t use;
};
void nf_conntrack_destroy(struct nf_conntrack *nfct);
+
+/* like nf_ct_put, but without module dependency on nf_conntrack */
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
{
- if (nfct && atomic_dec_and_test(&nfct->use))
+ if (nfct && refcount_dec_and_test(&nfct->use))
nf_conntrack_destroy(nfct);
}
static inline void nf_conntrack_get(struct nf_conntrack *nfct)
{
if (nfct)
- atomic_inc(&nfct->use);
+ refcount_inc(&nfct->use);
}
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index 4561ec0fcea4..9e937f64a1ad 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -38,60 +38,63 @@ void nf_conntrack_h245_expect(struct nf_conn *new,
struct nf_conntrack_expect *this);
void nf_conntrack_q931_expect(struct nf_conn *new,
struct nf_conntrack_expect *this);
-extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- H245_TransportAddress *taddr,
- union nf_inet_addr *addr,
- __be16 port);
-extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr,
- union nf_inet_addr *addr,
- __be16 port);
-extern int (*set_sig_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count);
-extern int (*set_ras_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count);
-extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- int dataoff,
- H245_TransportAddress *taddr,
- __be16 port, __be16 rtp_port,
- struct nf_conntrack_expect *rtp_exp,
- struct nf_conntrack_expect *rtcp_exp);
-extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
+
+struct nfct_h323_nat_hooks {
+ int (*set_h245_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
- H245_TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
+ H245_TransportAddress *taddr,
+ union nf_inet_addr *addr, __be16 port);
+ int (*set_h225_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
- TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr,
- __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, TransportAddress *taddr,
- int idx, __be16 port,
- struct nf_conntrack_expect *exp);
+ TransportAddress *taddr,
+ union nf_inet_addr *addr, __be16 port);
+ int (*set_sig_addr)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+ int (*set_ras_addr)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+ int (*nat_rtp_rtcp)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr,
+ __be16 port, __be16 rtp_port,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp);
+ int (*nat_t120)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_h245)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_callforwarding)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_q931)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, TransportAddress *taddr, int idx,
+ __be16 port, struct nf_conntrack_expect *exp);
+};
+extern const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook;
#endif
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
index fcc409de31a4..c3bdb4370938 100644
--- a/include/linux/netfilter/nf_conntrack_pptp.h
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -10,7 +10,7 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
-extern const char *const pptp_msg_name[];
+const char *pptp_msg_name(u_int16_t msg);
/* state of the control session */
enum pptp_ctrlsess_state {
@@ -300,26 +300,22 @@ union pptp_ctrl_union {
struct PptpSetLinkInfo setlink;
};
-extern int
-(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- struct PptpControlHeader *ctlh,
- union pptp_ctrl_union *pptpReq);
-
-extern int
-(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- struct PptpControlHeader *ctlh,
- union pptp_ctrl_union *pptpReq);
-
-extern void
-(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig,
- struct nf_conntrack_expect *exp_reply);
-
-extern void
-(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
- struct nf_conntrack_expect *exp);
+struct nf_nat_pptp_hook {
+ int (*outbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+ int (*inbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+ void (*exp_gre)(struct nf_conntrack_expect *exp_orig,
+ struct nf_conntrack_expect *exp_reply);
+ void (*expectfn)(struct nf_conn *ct,
+ struct nf_conntrack_expect *exp);
+};
+extern const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook;
#endif /* _NF_CONNTRACK_PPTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 9a33f171aa82..625f491b95de 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,8 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 last_dir;
+ u8 flags;
};
#endif /* _NF_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index c620521c42bc..dbc614dfe0d5 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -164,7 +164,7 @@ struct nf_nat_sip_hooks {
unsigned int medialen,
union nf_inet_addr *rtp_addr);
};
-extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+extern const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks;
int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
unsigned int datalen, unsigned int *matchoff,
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 851425c3178f..241e005f290a 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -7,21 +7,33 @@
#include <net/netlink.h>
#include <uapi/linux/netfilter/nfnetlink.h>
+struct nfnl_info {
+ struct net *net;
+ struct sock *sk;
+ const struct nlmsghdr *nlh;
+ const struct nfgenmsg *nfmsg;
+ struct netlink_ext_ack *extack;
+};
+
+enum nfnl_callback_type {
+ NFNL_CB_UNSPEC = 0,
+ NFNL_CB_MUTEX,
+ NFNL_CB_RCU,
+ NFNL_CB_BATCH,
+};
+
struct nfnl_callback {
- int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[],
- struct netlink_ext_ack *extack);
- int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[],
- struct netlink_ext_ack *extack);
- int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[],
- struct netlink_ext_ack *extack);
- const struct nla_policy *policy; /* netlink attribute policy */
- const u_int16_t attr_count; /* number of nlattr's */
+ int (*call)(struct sk_buff *skb, const struct nfnl_info *info,
+ const struct nlattr * const cda[]);
+ const struct nla_policy *policy;
+ enum nfnl_callback_type type;
+ __u16 attr_count;
+};
+
+enum nfnl_abort_action {
+ NFNL_ABORT_NONE = 0,
+ NFNL_ABORT_AUTOLOAD,
+ NFNL_ABORT_VALIDATE,
};
struct nfnetlink_subsystem {
@@ -31,7 +43,8 @@ struct nfnetlink_subsystem {
const struct nfnl_callback *cb; /* callback for individual types */
struct module *owner;
int (*commit)(struct net *net, struct sk_buff *skb);
- int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
+ int (*abort)(struct net *net, struct sk_buff *skb,
+ enum nfnl_abort_action action);
void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
};
@@ -43,14 +56,42 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
- int flags);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
+void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid,
+ __u32 group, gfp_t allocation);
static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
{
return subsys << 8 | msg_type;
}
+static inline void nfnl_fill_hdr(struct nlmsghdr *nlh, u8 family, u8 version,
+ __be16 res_id)
+{
+ struct nfgenmsg *nfmsg;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = family;
+ nfmsg->version = version;
+ nfmsg->res_id = res_id;
+}
+
+static inline struct nlmsghdr *nfnl_msg_put(struct sk_buff *skb, u32 portid,
+ u32 seq, int type, int flags,
+ u8 family, u8 version,
+ __be16 res_id)
+{
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags);
+ if (!nlh)
+ return NULL;
+
+ nfnl_fill_hdr(nlh, family, version, res_id);
+
+ return nlh;
+}
+
void nfnl_lock(__u8 subsys_id);
void nfnl_unlock(__u8 subsys_id);
#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 1b261c51b3a3..5897f3dbaf7c 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -36,8 +36,8 @@ struct xt_action_param {
const void *matchinfo, *targinfo;
};
const struct nf_hook_state *state;
- int fragoff;
unsigned int thoff;
+ u16 fragoff;
bool hotdrop;
};
@@ -158,7 +158,7 @@ struct xt_match {
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_mtdtor_param *);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, const void *src);
int (*compat_to_user)(void __user *dst, const void *src);
@@ -169,7 +169,7 @@ struct xt_match {
const char *table;
unsigned int matchsize;
unsigned int usersize;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
unsigned int compatsize;
#endif
unsigned int hooks;
@@ -199,7 +199,7 @@ struct xt_target {
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_tgdtor_param *);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, const void *src);
int (*compat_to_user)(void __user *dst, const void *src);
@@ -210,7 +210,7 @@ struct xt_target {
const char *table;
unsigned int targetsize;
unsigned int usersize;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
unsigned int compatsize;
#endif
unsigned int hooks;
@@ -229,15 +229,15 @@ struct xt_table {
/* Man behind the curtain... */
struct xt_table_info *private;
+ /* hook ops that register the table with the netfilter core */
+ struct nf_hook_ops *ops;
+
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
u_int8_t af; /* address/protocol family */
int priority; /* hook order */
- /* called when table is needed in the given netns */
- int (*table_init)(struct net *net);
-
/* A unique name... */
const char name[XT_TABLE_MAXNAMELEN];
};
@@ -264,7 +264,7 @@ struct xt_table_info {
unsigned int stacksize;
void ***jumpstack;
- unsigned char entries[0] __aligned(8);
+ unsigned char entries[] __aligned(8);
};
int xt_register_target(struct xt_target *target);
@@ -301,8 +301,8 @@ int xt_target_to_user(const struct xt_entry_target *t,
int xt_data_to_user(void __user *dst, const void *src,
int usersize, int size, int aligned_size);
-void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
- struct xt_counters_info *info, bool compat);
+void *xt_copy_counters(sockptr_t arg, unsigned int len,
+ struct xt_counters_info *info);
struct xt_counters *xt_counters_alloc(unsigned int counters);
struct xt_table *xt_register_table(struct net *net,
@@ -322,6 +322,7 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
int xt_find_revision(u8 af, const char *name, u8 revision, int target,
int *err);
+struct xt_table *xt_find_table(struct net *net, u8 af, const char *name);
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name);
struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
@@ -376,7 +377,7 @@ static inline unsigned int xt_write_recseq_begin(void)
* since addend is most likely 1
*/
__this_cpu_add(xt_recseq.sequence, addend);
- smp_wmb();
+ smp_mb();
return addend;
}
@@ -448,7 +449,10 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
-#ifdef CONFIG_COMPAT
+int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net));
+void xt_unregister_template(const struct xt_table *t);
+
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_xt_entry_match {
@@ -464,7 +468,7 @@ struct compat_xt_entry_match {
} kernel;
u_int16_t match_size;
} u;
- unsigned char data[0];
+ unsigned char data[];
};
struct compat_xt_entry_target {
@@ -480,7 +484,7 @@ struct compat_xt_entry_target {
} kernel;
u_int16_t target_size;
} u;
- unsigned char data[0];
+ unsigned char data[];
};
/* FIXME: this works only on 32 bit tasks
@@ -494,7 +498,7 @@ struct compat_xt_counters {
struct compat_xt_counters_info {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t num_counters;
- struct compat_xt_counters counters[0];
+ struct compat_xt_counters counters[];
};
struct _compat_xt_align {
@@ -529,5 +533,5 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems,
unsigned int target_offset,
unsigned int next_offset);
-#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index e98028f00e47..a40aaf645fa4 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -51,14 +51,13 @@ struct arpt_error {
extern void *arpt_alloc_initial_table(const struct xt_table *);
int arpt_register_table(struct net *net, const struct xt_table *table,
const struct arpt_replace *repl,
- const struct nf_hook_ops *ops, struct xt_table **res);
-void arpt_unregister_table(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
-extern unsigned int arpt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+ const struct nf_hook_ops *ops);
+void arpt_unregister_table(struct net *net, const char *name);
+void arpt_unregister_table_pre_exit(struct net *net, const char *name);
+extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_arpt_entry {
@@ -67,7 +66,7 @@ struct compat_arpt_entry {
__u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
- unsigned char elems[0];
+ unsigned char elems[];
};
static inline struct xt_entry_target *
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 162f59d0d17a..fd533552a062 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -85,7 +85,7 @@ struct ebt_table_info {
/* room to maintain the stack used for jumping from and into udc */
struct ebt_chainstack **chainstack;
char *entries;
- struct ebt_counter counters[0] ____cacheline_aligned;
+ struct ebt_counter counters[] ____cacheline_aligned;
};
struct ebt_table {
@@ -94,12 +94,9 @@ struct ebt_table {
struct ebt_replace_kernel *table;
unsigned int valid_hooks;
rwlock_t lock;
- /* e.g. could be the table explicitly only allows certain
- * matches, targets, ... 0 == let it in */
- int (*check)(const struct ebt_table_info *info,
- unsigned int valid_hooks);
/* the data used by the kernel */
struct ebt_table_info *private;
+ struct nf_hook_ops *ops;
struct module *me;
};
@@ -108,13 +105,11 @@ struct ebt_table {
extern int ebt_register_table(struct net *net,
const struct ebt_table *table,
- const struct nf_hook_ops *ops,
- struct ebt_table **res);
-extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
- const struct nf_hook_ops *);
-extern unsigned int ebt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct ebt_table *table);
+ const struct nf_hook_ops *ops);
+extern void ebt_unregister_table(struct net *net, const char *tablename);
+void ebt_unregister_table_pre_exit(struct net *net, const char *tablename);
+extern unsigned int ebt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
/* True if the hook mask denotes that the rule is in a base chain,
* used in the check() functions */
@@ -127,4 +122,6 @@ static inline bool ebt_invalid_target(int target)
return (target < -NUM_STANDARD_TARGETS || target >= 0);
}
+int ebt_register_template(const struct ebt_table *t, int(*table_init)(struct net *net));
+void ebt_unregister_template(const struct ebt_table *t);
#endif
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index 8dddfb151f00..a5f7bef1b3a4 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -7,14 +7,6 @@
/* in/out/forward only */
#define NF_ARP_NUMHOOKS 3
-/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
-#define NF_DN_NUMHOOKS 7
-
-#if IS_ENABLED(CONFIG_DECNET)
-/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS NF_DN_NUMHOOKS
-#else
#define NF_MAX_HOOKS NF_INET_NUMHOOKS
-#endif
#endif
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
deleted file mode 100644
index a13774be2eb5..000000000000
--- a/include/linux/netfilter_ingress.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NETFILTER_INGRESS_H_
-#define _NETFILTER_INGRESS_H_
-
-#include <linux/netfilter.h>
-#include <linux/netdevice.h>
-
-#ifdef CONFIG_NETFILTER_INGRESS
-static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
-{
-#ifdef CONFIG_JUMP_LABEL
- if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
- return false;
-#endif
- return rcu_access_pointer(skb->dev->nf_hooks_ingress);
-}
-
-/* caller must hold rcu_read_lock */
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
- struct nf_hook_state state;
- int ret;
-
- /* Must recheck the ingress hook head, in the event it became NULL
- * after the check in nf_hook_ingress_active evaluated to true.
- */
- if (unlikely(!e))
- return 0;
-
- nf_hook_state_init(&state, NF_NETDEV_INGRESS,
- NFPROTO_NETDEV, skb->dev, NULL, NULL,
- dev_net(skb->dev), NULL);
- ret = nf_hook_slow(skb, &state, e, 0);
- if (ret == 0)
- return -1;
-
- return ret;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev)
-{
- RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
-}
-#else /* CONFIG_NETFILTER_INGRESS */
-static inline int nf_hook_ingress_active(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev) {}
-#endif /* CONFIG_NETFILTER_INGRESS */
-#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 082e2c41b7ff..5b70ca868bb1 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -16,7 +16,7 @@ struct ip_rt_info {
u_int32_t mark;
};
-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
+int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type);
struct nf_queue_entry;
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index e9e1ed74cdf1..132b0e4a6d4d 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -24,9 +24,10 @@
int ipt_register_table(struct net *net, const struct xt_table *table,
const struct ipt_replace *repl,
- const struct nf_hook_ops *ops, struct xt_table **res);
-void ipt_unregister_table(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
+ const struct nf_hook_ops *ops);
+
+void ipt_unregister_table_pre_exit(struct net *net, const char *name);
+void ipt_unregister_table_exit(struct net *net, const char *name);
/* Standard entry. */
struct ipt_standard {
@@ -62,11 +63,11 @@ struct ipt_error {
}
extern void *ipt_alloc_initial_table(const struct xt_table *);
-extern unsigned int ipt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int ipt_do_table(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_ipt_entry {
@@ -76,7 +77,7 @@ struct compat_ipt_entry {
__u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
- unsigned char elems[0];
+ unsigned char elems[];
};
/* Helper functions */
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index aac42c28fe62..48314ade1506 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -42,7 +42,7 @@ struct nf_ipv6_ops {
#if IS_MODULE(CONFIG_IPV6)
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
- int (*route_me_harder)(struct net *net, struct sk_buff *skb);
+ int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
@@ -58,7 +58,6 @@ struct nf_ipv6_ops {
int (*output)(struct net *, struct sock *, struct sk_buff *));
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
#if IS_MODULE(CONFIG_IPV6)
- int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
int (*br_fragment)(struct net *net, struct sock *sk,
struct sk_buff *skb,
struct nf_bridge_frag_data *data,
@@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
-static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
- u32 user)
-{
-#if IS_MODULE(CONFIG_IPV6)
- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
- if (!v6_ops)
- return 1;
-
- return v6_ops->br_defrag(net, skb, user);
-#elif IS_BUILTIN(CONFIG_IPV6)
- return nf_ct_frag6_gather(net, skb, user);
-#else
- return 1;
-#endif
-}
-
int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct nf_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
@@ -161,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
#endif
}
-int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
+int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb);
-static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb)
{
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
@@ -171,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
if (!v6_ops)
return -EHOSTUNREACH;
- return v6_ops->route_me_harder(net, skb);
+ return v6_ops->route_me_harder(net, sk, skb);
#elif IS_BUILTIN(CONFIG_IPV6)
- return ip6_route_me_harder(net, skb);
+ return ip6_route_me_harder(net, sk, skb);
#else
return -EHOSTUNREACH;
#endif
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 78ab959c4575..8b8885a73c76 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -26,14 +26,13 @@ extern void *ip6t_alloc_initial_table(const struct xt_table *);
int ip6t_register_table(struct net *net, const struct xt_table *table,
const struct ip6t_replace *repl,
- const struct nf_hook_ops *ops, struct xt_table **res);
-void ip6t_unregister_table(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
-extern unsigned int ip6t_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
-
-#ifdef CONFIG_COMPAT
+ const struct nf_hook_ops *ops);
+void ip6t_unregister_table_pre_exit(struct net *net, const char *name);
+void ip6t_unregister_table_exit(struct net *net, const char *name);
+extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_ip6t_entry {
@@ -43,7 +42,7 @@ struct compat_ip6t_entry {
__u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
- unsigned char elems[0];
+ unsigned char elems[];
};
static inline struct xt_entry_target *
diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
new file mode 100644
index 000000000000..8676316547cc
--- /dev/null
+++ b/include/linux/netfilter_netdev.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NETFILTER_NETDEV_H_
+#define _NETFILTER_NETDEV_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+ return false;
+#endif
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
+}
+
+/* caller must hold rcu_read_lock */
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
+ struct nf_hook_state state;
+ int ret;
+
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, NF_NETDEV_INGRESS,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
+ ret = nf_hook_slow(skb, &state, e, 0);
+ if (ret == 0)
+ return -1;
+
+ return ret;
+}
+
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+#endif /* CONFIG_NETFILTER_INGRESS */
+
+#ifdef CONFIG_NETFILTER_EGRESS
+static inline bool nf_hook_egress_active(void)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS]))
+ return false;
+#endif
+ return true;
+}
+
+/**
+ * nf_hook_egress - classify packets before transmission
+ * @skb: packet to be classified
+ * @rc: result code which shall be returned by __dev_queue_xmit() on failure
+ * @dev: netdev whose egress hooks shall be applied to @skb
+ *
+ * Returns @skb on success or %NULL if the packet was consumed or filtered.
+ * Caller must hold rcu_read_lock.
+ *
+ * On ingress, packets are classified first by tc, then by netfilter.
+ * On egress, the order is reversed for symmetry. Conceptually, tc and
+ * netfilter can be thought of as layers, with netfilter layered above tc:
+ * When tc redirects a packet to another interface, netfilter is not applied
+ * because the packet is on the tc layer.
+ *
+ * The nf_skip_egress flag controls whether netfilter is applied on egress.
+ * It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the
+ * packet passes through tc and netfilter. Because __dev_queue_xmit() may be
+ * called recursively by tunnel drivers such as vxlan, the flag is reverted to
+ * false after sch_handle_egress(). This ensures that netfilter is applied
+ * both on the overlay and underlying network.
+ */
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ struct nf_hook_entries *e;
+ struct nf_hook_state state;
+ int ret;
+
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ if (skb->nf_skip_egress)
+ return skb;
+#endif
+
+ e = rcu_dereference_check(dev->nf_hooks_egress, rcu_read_lock_bh_held());
+ if (!e)
+ return skb;
+
+ nf_hook_state_init(&state, NF_NETDEV_EGRESS,
+ NFPROTO_NETDEV, NULL, dev, NULL,
+ dev_net(dev), NULL);
+
+ /* nf assumes rcu_read_lock, not just read_lock_bh */
+ rcu_read_lock();
+ ret = nf_hook_slow(skb, &state, e, 0);
+ rcu_read_unlock();
+
+ if (ret == 1) {
+ return skb;
+ } else if (ret < 0) {
+ *rc = NET_XMIT_DROP;
+ return NULL;
+ } else { /* ret == 0 */
+ *rc = NET_XMIT_SUCCESS;
+ return NULL;
+ }
+}
+#else /* CONFIG_NETFILTER_EGRESS */
+static inline bool nf_hook_egress_active(void)
+{
+ return false;
+}
+
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ return skb;
+}
+#endif /* CONFIG_NETFILTER_EGRESS */
+
+static inline void nf_skip_egress(struct sk_buff *skb, bool skip)
+{
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ skb->nf_skip_egress = skip;
+#endif
+}
+
+static inline void nf_hook_netdev_init(struct net_device *dev)
+{
+#ifdef CONFIG_NETFILTER_INGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_egress, NULL);
+#endif
+}
+
+#endif /* _NETFILTER_NETDEV_H_ */
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
new file mode 100644
index 000000000000..f2402ddeafbf
--- /dev/null
+++ b/include/linux/netfs.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Network filesystem support services.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/filesystems/netfs_library.rst
+ *
+ * for a description of the network filesystem interface declared here.
+ */
+
+#ifndef _LINUX_NETFS_H
+#define _LINUX_NETFS_H
+
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+enum netfs_sreq_ref_trace;
+
+/*
+ * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
+ * a page is currently backed by a local disk cache
+ */
+#define folio_test_fscache(folio) folio_test_private_2(folio)
+#define PageFsCache(page) PagePrivate2((page))
+#define SetPageFsCache(page) SetPagePrivate2((page))
+#define ClearPageFsCache(page) ClearPagePrivate2((page))
+#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
+#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
+
+/**
+ * folio_start_fscache - Start an fscache write on a folio.
+ * @folio: The folio.
+ *
+ * Call this function before writing a folio to a local cache. Starting a
+ * second write before the first one finishes is not allowed.
+ */
+static inline void folio_start_fscache(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio);
+ folio_get(folio);
+ folio_set_private_2(folio);
+}
+
+/**
+ * folio_end_fscache - End an fscache write on a folio.
+ * @folio: The folio.
+ *
+ * Call this function after the folio has been written to the local cache.
+ * This will wake any sleepers waiting on this folio.
+ */
+static inline void folio_end_fscache(struct folio *folio)
+{
+ folio_end_private_2(folio);
+}
+
+/**
+ * folio_wait_fscache - Wait for an fscache write on this folio to end.
+ * @folio: The folio.
+ *
+ * If this folio is currently being written to a local cache, wait for
+ * the write to finish. Another write may start after this one finishes,
+ * unless the caller holds the folio lock.
+ */
+static inline void folio_wait_fscache(struct folio *folio)
+{
+ folio_wait_private_2(folio);
+}
+
+/**
+ * folio_wait_fscache_killable - Wait for an fscache write on this folio to end.
+ * @folio: The folio.
+ *
+ * If this folio is currently being written to a local cache, wait
+ * for the write to finish or for a fatal signal to be received.
+ * Another write may start after this one finishes, unless the caller
+ * holds the folio lock.
+ *
+ * Return:
+ * - 0 if successful.
+ * - -EINTR if a fatal signal was encountered.
+ */
+static inline int folio_wait_fscache_killable(struct folio *folio)
+{
+ return folio_wait_private_2_killable(folio);
+}
+
+static inline void set_page_fscache(struct page *page)
+{
+ folio_start_fscache(page_folio(page));
+}
+
+static inline void end_page_fscache(struct page *page)
+{
+ folio_end_private_2(page_folio(page));
+}
+
+static inline void wait_on_page_fscache(struct page *page)
+{
+ folio_wait_private_2(page_folio(page));
+}
+
+static inline int wait_on_page_fscache_killable(struct page *page)
+{
+ return folio_wait_private_2_killable(page_folio(page));
+}
+
+enum netfs_io_source {
+ NETFS_FILL_WITH_ZEROES,
+ NETFS_DOWNLOAD_FROM_SERVER,
+ NETFS_READ_FROM_CACHE,
+ NETFS_INVALID_READ,
+} __mode(byte);
+
+typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
+ bool was_async);
+
+/*
+ * Per-inode context. This wraps the VFS inode.
+ */
+struct netfs_inode {
+ struct inode inode; /* The VFS inode */
+ const struct netfs_request_ops *ops;
+#if IS_ENABLED(CONFIG_FSCACHE)
+ struct fscache_cookie *cache;
+#endif
+ loff_t remote_i_size; /* Size of the remote file */
+};
+
+/*
+ * Resources required to do operations on a cache.
+ */
+struct netfs_cache_resources {
+ const struct netfs_cache_ops *ops;
+ void *cache_priv;
+ void *cache_priv2;
+ unsigned int debug_id; /* Cookie debug ID */
+ unsigned int inval_counter; /* object->inval_counter at begin_op */
+};
+
+/*
+ * Descriptor for a single component subrequest.
+ */
+struct netfs_io_subrequest {
+ struct netfs_io_request *rreq; /* Supervising I/O request */
+ struct list_head rreq_link; /* Link in rreq->subrequests */
+ loff_t start; /* Where to start the I/O */
+ size_t len; /* Size of the I/O */
+ size_t transferred; /* Amount of data transferred */
+ refcount_t ref;
+ short error; /* 0 or error that occurred */
+ unsigned short debug_index; /* Index in list (for debugging output) */
+ enum netfs_io_source source; /* Where to read from/write to */
+ unsigned long flags;
+#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
+#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
+#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */
+#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
+#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
+#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
+};
+
+enum netfs_io_origin {
+ NETFS_READAHEAD, /* This read was triggered by readahead */
+ NETFS_READPAGE, /* This read is a synchronous read */
+ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+} __mode(byte);
+
+/*
+ * Descriptor for an I/O helper request. This is used to make multiple I/O
+ * operations to a variety of data stores and then stitch the result together.
+ */
+struct netfs_io_request {
+ struct work_struct work;
+ struct inode *inode; /* The file being accessed */
+ struct address_space *mapping; /* The mapping being accessed */
+ struct netfs_cache_resources cache_resources;
+ struct list_head subrequests; /* Contributory I/O operations */
+ void *netfs_priv; /* Private data for the netfs */
+ unsigned int debug_id;
+ atomic_t nr_outstanding; /* Number of ops in progress */
+ atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
+ size_t submitted; /* Amount submitted for I/O so far */
+ size_t len; /* Length of the request */
+ short error; /* 0 or error that occurred */
+ enum netfs_io_origin origin; /* Origin of the request */
+ loff_t i_size; /* Size of the file */
+ loff_t start; /* Start position */
+ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
+ refcount_t ref;
+ unsigned long flags;
+#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
+#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
+#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
+#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
+#define NETFS_RREQ_FAILED 4 /* The request failed */
+#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
+ const struct netfs_request_ops *netfs_ops;
+};
+
+/*
+ * Operations the network filesystem can/must provide to the helpers.
+ */
+struct netfs_request_ops {
+ int (*init_request)(struct netfs_io_request *rreq, struct file *file);
+ void (*free_request)(struct netfs_io_request *rreq);
+ int (*begin_cache_operation)(struct netfs_io_request *rreq);
+
+ void (*expand_readahead)(struct netfs_io_request *rreq);
+ bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+ void (*issue_read)(struct netfs_io_subrequest *subreq);
+ bool (*is_still_valid)(struct netfs_io_request *rreq);
+ int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
+ struct folio **foliop, void **_fsdata);
+ void (*done)(struct netfs_io_request *rreq);
+};
+
+/*
+ * How to handle reading from a hole.
+ */
+enum netfs_read_from_hole {
+ NETFS_READ_HOLE_IGNORE,
+ NETFS_READ_HOLE_CLEAR,
+ NETFS_READ_HOLE_FAIL,
+};
+
+/*
+ * Table of operations for access to a cache. This is obtained by
+ * rreq->ops->begin_cache_operation().
+ */
+struct netfs_cache_ops {
+ /* End an operation */
+ void (*end_operation)(struct netfs_cache_resources *cres);
+
+ /* Read data from the cache */
+ int (*read)(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
+
+ /* Write data to the cache */
+ int (*write)(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
+
+ /* Expand readahead request */
+ void (*expand_readahead)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size);
+
+ /* Prepare a read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+ enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
+ loff_t i_size);
+
+ /* Prepare a write operation, working out what part of the write we can
+ * actually do.
+ */
+ int (*prepare_write)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size,
+ bool no_space_allocated_yet);
+
+ /* Query the occupancy of the cache in a region, returning where the
+ * next chunk of data starts and how long it is.
+ */
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
+};
+
+struct readahead_control;
+void netfs_readahead(struct readahead_control *);
+int netfs_read_folio(struct file *, struct folio *);
+int netfs_write_begin(struct netfs_inode *, struct file *,
+ struct address_space *, loff_t pos, unsigned int len,
+ struct folio **, void **fsdata);
+
+void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what);
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+ bool was_async, enum netfs_sreq_ref_trace what);
+void netfs_stats_show(struct seq_file *);
+
+/**
+ * netfs_inode - Get the netfs inode context from the inode
+ * @inode: The inode to query
+ *
+ * Get the netfs lib inode context from the network filesystem's inode. The
+ * context struct is expected to directly follow on from the VFS inode struct.
+ */
+static inline struct netfs_inode *netfs_inode(struct inode *inode)
+{
+ return container_of(inode, struct netfs_inode, inode);
+}
+
+/**
+ * netfs_inode_init - Initialise a netfslib inode context
+ * @ctx: The netfs inode to initialise
+ * @ops: The netfs's operations list
+ *
+ * Initialise the netfs library context struct. This is expected to follow on
+ * directly from the VFS inode struct.
+ */
+static inline void netfs_inode_init(struct netfs_inode *ctx,
+ const struct netfs_request_ops *ops)
+{
+ ctx->ops = ops;
+ ctx->remote_i_size = i_size_read(&ctx->inode);
+#if IS_ENABLED(CONFIG_FSCACHE)
+ ctx->cache = NULL;
+#endif
+}
+
+/**
+ * netfs_resize_file - Note that a file got resized
+ * @ctx: The netfs inode being resized
+ * @new_i_size: The new file size
+ *
+ * Inform the netfs lib that a file got resized so that it can adjust its state.
+ */
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size)
+{
+ ctx->remote_i_size = new_i_size;
+}
+
+/**
+ * netfs_i_cookie - Get the cache cookie from the inode
+ * @ctx: The netfs inode to query
+ *
+ * Get the caching cookie (if enabled) from the network filesystem's inode.
+ */
+static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+ return ctx->cache;
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _LINUX_NETFS_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 205fa7b1f07a..d51e041d2242 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -11,6 +11,8 @@
struct net;
+void do_trace_netlink_extack(const char *msg);
+
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
@@ -68,12 +70,18 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
* @_msg: message string to report - don't access directly, use
* %NL_SET_ERR_MSG
* @bad_attr: attribute with error
+ * @policy: policy for a bad attribute
+ * @miss_type: attribute type which was missing
+ * @miss_nest: nest missing an attribute (%NULL if missing top level attr)
* @cookie: cookie data to return to userspace (for success)
* @cookie_len: actual cookie data length
*/
struct netlink_ext_ack {
const char *_msg;
const struct nlattr *bad_attr;
+ const struct nla_policy *policy;
+ const struct nlattr *miss_nest;
+ u16 miss_type;
u8 cookie[NETLINK_MAX_COOKIE_LEN];
u8 cookie_len;
};
@@ -88,6 +96,8 @@ struct netlink_ext_ack {
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
+ do_trace_netlink_extack(__msg); \
+ \
if (__extack) \
__extack->_msg = __msg; \
} while (0)
@@ -95,28 +105,58 @@ struct netlink_ext_ack {
#define NL_SET_ERR_MSG_MOD(extack, msg) \
NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
-#define NL_SET_BAD_ATTR(extack, attr) do { \
- if ((extack)) \
+#define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \
+ if ((extack)) { \
(extack)->bad_attr = (attr); \
+ (extack)->policy = (pol); \
+ } \
} while (0)
-#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
- static const char __msg[] = msg; \
+#define NL_SET_BAD_ATTR(extack, attr) NL_SET_BAD_ATTR_POLICY(extack, attr, NULL)
+
+#define NL_SET_ERR_MSG_ATTR_POL(extack, attr, pol, msg) do { \
+ static const char __msg[] = msg; \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ do_trace_netlink_extack(__msg); \
+ \
+ if (__extack) { \
+ __extack->_msg = __msg; \
+ __extack->bad_attr = (attr); \
+ __extack->policy = (pol); \
+ } \
+} while (0)
+
+#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) \
+ NL_SET_ERR_MSG_ATTR_POL(extack, attr, NULL, msg)
+
+#define NL_SET_ERR_ATTR_MISS(extack, nest, type) do { \
struct netlink_ext_ack *__extack = (extack); \
\
if (__extack) { \
- __extack->_msg = __msg; \
- __extack->bad_attr = (attr); \
+ __extack->miss_nest = (nest); \
+ __extack->miss_type = (type); \
} \
} while (0)
+#define NL_REQ_ATTR_CHECK(extack, nest, tb, type) ({ \
+ struct nlattr **__tb = (tb); \
+ u32 __attr = (type); \
+ int __retval; \
+ \
+ __retval = !__tb[__attr]; \
+ if (__retval) \
+ NL_SET_ERR_ATTR_MISS((extack), (nest), __attr); \
+ __retval; \
+})
+
static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
u64 cookie)
{
- u64 __cookie = cookie;
-
- memcpy(extack->cookie, &__cookie, sizeof(__cookie));
- extack->cookie_len = sizeof(__cookie);
+ if (!extack)
+ return;
+ memcpy(extack->cookie, &cookie, sizeof(cookie));
+ extack->cookie_len = sizeof(cookie);
}
void netlink_kernel_release(struct sock *sk);
@@ -131,10 +171,6 @@ bool netlink_strict_get_check(struct sk_buff *skb);
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
-int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
- __u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
- void *filter_data);
int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
int netlink_register_notifier(struct notifier_block *nb);
int netlink_unregister_notifier(struct notifier_block *nb);
@@ -188,10 +224,10 @@ struct netlink_callback {
struct module *module;
struct netlink_ext_ack *extack;
u16 family;
- u16 min_dump_alloc;
- bool strict_check;
u16 answer_flags;
+ u32 min_dump_alloc;
unsigned int prev_seq, seq;
+ bool strict_check;
union {
u8 ctx[48];
@@ -217,7 +253,7 @@ struct netlink_dump_control {
int (*done)(struct netlink_callback *);
void *data;
struct module *module;
- u16 min_dump_alloc;
+ u32 min_dump_alloc;
};
int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 676f1ff161a9..bd19c4b91e31 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -24,6 +24,7 @@ union inet_addr {
struct netpoll {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
char dev_name[IFNAMSIZ];
const char *name;
@@ -63,15 +64,7 @@ int netpoll_setup(struct netpoll *np);
void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
-void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
- struct net_device *dev);
-static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
-{
- unsigned long flags;
- local_irq_save(flags);
- netpoll_send_skb_on_dev(np, skb, np->dev);
- local_irq_restore(flags);
-}
+netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
static inline void *netpoll_poll_lock(struct napi_struct *napi)
@@ -110,9 +103,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
static inline void netpoll_poll_unlock(void *have)
{
}
-static inline void netpoll_netdev_init(struct net_device *dev)
-{
-}
static inline bool netpoll_tx_running(struct net_device *dev)
{
return false;
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 0dc7ad38a0da..b06375e88e58 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -36,14 +36,6 @@ static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *sourc
memcpy(target->data, source->data, source->size);
}
-
-/*
- * This is really a general kernel constant, but since nothing like
- * this is defined in the kernel headers, I have to do it here.
- */
-#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1))
-
-
enum nfs3_stable_how {
NFS_UNSTABLE = 0,
NFS_DATA_SYNC = 1,
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 82d8fb422092..8d04b6a5964c 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -38,7 +38,7 @@ struct nfs4_ace {
struct nfs4_acl {
uint32_t naces;
- struct nfs4_ace aces[0];
+ struct nfs4_ace aces[];
};
#define NFS4_MAXLABELLEN 2048
@@ -150,6 +150,12 @@ enum nfs_opnum4 {
OP_WRITE_SAME = 70,
OP_CLONE = 71,
+ /* xattr support (RFC8726) */
+ OP_GETXATTR = 72,
+ OP_SETXATTR = 73,
+ OP_LISTXATTRS = 74,
+ OP_REMOVEXATTR = 75,
+
OP_ILLEGAL = 10044,
};
@@ -159,7 +165,7 @@ Needs to be updated if more operations are defined in future.*/
#define FIRST_NFS4_OP OP_ACCESS
#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
-#define LAST_NFS42_OP OP_CLONE
+#define LAST_NFS42_OP OP_REMOVEXATTR
#define LAST_NFS4_OP LAST_NFS42_OP
enum nfsstat4 {
@@ -280,8 +286,16 @@ enum nfsstat4 {
NFS4ERR_WRONG_LFS = 10092,
NFS4ERR_BADLABEL = 10093,
NFS4ERR_OFFLOAD_NO_REQS = 10094,
+
+ /* xattr (RFC8276) */
+ NFS4ERR_NOXATTR = 10095,
+ NFS4ERR_XATTR2BIG = 10096,
};
+/* error codes for internal client use */
+#define NFS4ERR_RESET_TO_MDS 12001
+#define NFS4ERR_RESET_TO_PNFS 12002
+
static inline bool seqid_mutating_err(u32 err)
{
/* See RFC 7530, section 9.1.7 */
@@ -295,7 +309,7 @@ static inline bool seqid_mutating_err(u32 err)
case NFS4ERR_NOFILEHANDLE:
case NFS4ERR_MOVED:
return false;
- };
+ }
return true;
}
@@ -375,13 +389,6 @@ enum lock_type4 {
NFS4_WRITEW_LT = 4
};
-enum change_attr_type4 {
- NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0,
- NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1,
- NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2,
- NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3,
- NFS4_CHANGE_TYPE_IS_UNDEFINED = 4
-};
/* Mandatory Attributes */
#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
@@ -444,6 +451,8 @@ enum change_attr_type4 {
#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
+#define FATTR4_WORD1_DACL (1UL << 26)
+#define FATTR4_WORD1_SACL (1UL << 27)
#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
@@ -452,6 +461,7 @@ enum change_attr_type4 {
#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
+#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
@@ -540,8 +550,13 @@ enum {
NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LAYOUTERROR,
-
NFSPROC4_CLNT_COPY_NOTIFY,
+
+ NFSPROC4_CLNT_GETXATTR,
+ NFSPROC4_CLNT_SETXATTR,
+ NFSPROC4_CLNT_LISTXATTRS,
+ NFSPROC4_CLNT_REMOVEXATTR,
+ NFSPROC4_CLNT_READ_PLUS,
};
/* nfs41 types */
@@ -700,4 +715,21 @@ struct nl4_server {
struct nfs42_netaddr nl4_addr; /* NL4_NETADDR */
} u;
};
+
+enum nfs4_change_attr_type {
+ NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2,
+ NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3,
+ NFS4_CHANGE_TYPE_IS_UNDEFINED = 4,
+};
+
+/*
+ * Options for setxattr. These match the flags for setxattr(2).
+ */
+enum nfs4_setxattr_options {
+ SETXATTR4_EITHER = 0,
+ SETXATTR4_CREATE = 1,
+ SETXATTR4_REPLACE = 2,
+};
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 5d5b91e54f73..7931fa472561 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -41,9 +41,14 @@
#include <linux/mempool.h>
/*
- * These are the default flags for swap requests
+ * These are the default for number of transports to different server IPs
*/
-#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
+#define NFS_MAX_TRANSPORTS 16
+
+/*
+ * Size of the NFS directory verifier
+ */
+#define NFS_DIR_VERIFIER_SIZE 2
/*
* NFSv3/v4 Access mode cache entry
@@ -51,7 +56,9 @@
struct nfs_access_entry {
struct rb_node rb_node;
struct list_head lru;
- const struct cred * cred;
+ kuid_t fsuid;
+ kgid_t fsgid;
+ struct group_info *group_info;
__u32 mask;
struct rcu_head rcu_head;
};
@@ -71,14 +78,14 @@ struct nfs_open_context {
fl_owner_t flock_owner;
struct dentry *dentry;
const struct cred *cred;
- struct rpc_cred *ll_cred; /* low-level cred - use to check for expiry */
+ struct rpc_cred __rcu *ll_cred; /* low-level cred - use to check for expiry */
struct nfs4_state *state;
fmode_t mode;
unsigned long flags;
-#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
+#define NFS_CONTEXT_FILE_OPEN (4)
int error;
struct list_head list;
@@ -88,11 +95,17 @@ struct nfs_open_context {
struct nfs_open_dir_context {
struct list_head list;
- const struct cred *cred;
+ atomic_t cache_hits;
+ atomic_t cache_misses;
unsigned long attr_gencount;
+ __be32 verf[NFS_DIR_VERIFIER_SIZE];
__u64 dir_cookie;
- __u64 dup_cookie;
- signed char duped;
+ __u64 last_cookie;
+ pgoff_t page_index;
+ unsigned int dtsize;
+ bool force_clear;
+ bool eof;
+ struct rcu_head rcu_head;
};
/*
@@ -102,6 +115,8 @@ struct nfs_delegation;
struct posix_acl;
+struct nfs4_xattr_cache;
+
/*
* nfs fs inode data in memory
*/
@@ -141,36 +156,40 @@ struct nfs_inode {
unsigned long attrtimeo_timestamp;
unsigned long attr_gencount;
- /* "Generation counter" for the attribute cache. This is
- * bumped whenever we update the metadata on the
- * server.
- */
- unsigned long cache_change_attribute;
struct rb_root access_cache;
struct list_head access_cache_entry_lru;
struct list_head access_cache_inode_lru;
- /*
- * This is the cookie verifier used for NFSv3 readdir
- * operations
- */
- __be32 cookieverf[2];
-
- atomic_long_t nrequests;
- struct nfs_mds_commit_info commit_info;
+ union {
+ /* Directory */
+ struct {
+ /* "Generation counter" for the attribute cache.
+ * This is bumped whenever we update the metadata
+ * on the server.
+ */
+ unsigned long cache_change_attribute;
+ /*
+ * This is the cookie verifier used for NFSv3 readdir
+ * operations
+ */
+ __be32 cookieverf[NFS_DIR_VERIFIER_SIZE];
+ /* Readers: in-flight sillydelete RPC calls */
+ /* Writers: rmdir */
+ struct rw_semaphore rmdir_sem;
+ };
+ /* Regular file */
+ struct {
+ atomic_long_t nrequests;
+ atomic_long_t redirtied_pages;
+ struct nfs_mds_commit_info commit_info;
+ struct mutex commit_mutex;
+ };
+ };
/* Open contexts for shared mmap writes */
struct list_head open_files;
- /* Readers: in-flight sillydelete RPC calls */
- /* Writers: rmdir */
- struct rw_semaphore rmdir_sem;
- struct mutex commit_mutex;
-
- /* track last access to cached pages */
- unsigned long page_index;
-
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
/* NFSv4 state */
@@ -188,6 +207,10 @@ struct nfs_inode {
struct fscache_cookie *fscache;
#endif
struct inode vfs_inode;
+
+#ifdef CONFIG_NFS_V4_2
+ struct nfs4_xattr_cache *xattr_cache;
+#endif
};
struct nfs4_copy_state {
@@ -212,6 +235,9 @@ struct nfs4_copy_state {
#define NFS_ACCESS_EXTEND 0x0008
#define NFS_ACCESS_DELETE 0x0010
#define NFS_ACCESS_EXECUTE 0x0020
+#define NFS_ACCESS_XAREAD 0x0040
+#define NFS_ACCESS_XAWRITE 0x0080
+#define NFS_ACCESS_XALIST 0x0100
/*
* Cache validity bit flags
@@ -220,7 +246,6 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */
#define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */
#define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */
-#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */
#define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */
#define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */
#define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */
@@ -230,22 +255,27 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */
#define NFS_INO_DATA_INVAL_DEFER \
BIT(13) /* Deferred cache invalidation */
+#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */
+#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */
+#define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */
+#define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \
| NFS_INO_INVALID_MTIME \
| NFS_INO_INVALID_SIZE \
+ | NFS_INO_INVALID_NLINK \
+ | NFS_INO_INVALID_MODE \
| NFS_INO_INVALID_OTHER) /* inode metadata is invalid */
/*
* Bit offsets in flags field
*/
-#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */
#define NFS_INO_STALE (1) /* possible stale inode */
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
+#define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */
#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
-#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
@@ -322,17 +352,15 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
- nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL
- | NFS_INO_INVALID_CHANGE
- | NFS_INO_INVALID_CTIME;
+ nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
+ NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME |
+ NFS_INO_INVALID_SIZE;
if (S_ISDIR(inode->i_mode))
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
}
-static inline int nfs_server_capable(struct inode *inode, int cap)
+static inline int nfs_server_capable(const struct inode *inode, int cap)
{
return NFS_SERVER(inode)->caps & cap;
}
@@ -354,29 +382,31 @@ static inline unsigned long nfs_save_change_attribute(struct inode *dir)
extern int nfs_sync_mapping(struct address_space *mapping);
extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping);
extern void nfs_zap_caches(struct inode *);
+extern void nfs_set_inode_stale(struct inode *inode);
extern void nfs_invalidate_atime(struct inode *);
extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
+ struct nfs_fattr *);
struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *);
extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
-extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
-extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
+extern int nfs_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
+extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *, const struct cred *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
-extern int nfs_permission(struct inode *, int);
+extern int nfs_permission(struct user_namespace *, struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
extern int nfs_attribute_cache_expired(struct inode *inode);
-extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
+extern int nfs_revalidate_inode(struct inode *inode, unsigned long flags);
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
+extern int nfs_clear_invalid_mapping(struct address_space *mapping);
extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
-extern int nfs_setattr(struct dentry *, struct iattr *);
+extern int nfs_setattr(struct user_namespace *, struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
-extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
- struct nfs4_label *label);
+extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode);
@@ -392,9 +422,22 @@ extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
extern unsigned long nfs_inc_attr_generation_counter(void);
extern struct nfs_fattr *nfs_alloc_fattr(void);
+extern struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server);
+
+static inline void nfs4_label_free(struct nfs4_label *label)
+{
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ if (label) {
+ kfree(label->label);
+ kfree(label);
+ }
+#endif
+}
static inline void nfs_free_fattr(const struct nfs_fattr *fattr)
{
+ if (fattr)
+ nfs4_label_free(fattr->label);
kfree(fattr);
}
@@ -464,11 +507,11 @@ static inline const struct cred *nfs_file_cred(struct file *file)
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
- struct iov_iter *iter);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
- struct iov_iter *iter);
+int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t nfs_file_direct_read(struct kiocb *iocb,
+ struct iov_iter *iter, bool swap);
+ssize_t nfs_file_direct_write(struct kiocb *iocb,
+ struct iov_iter *iter, bool swap);
/*
* linux/fs/nfs/dir.c
@@ -482,12 +525,13 @@ extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf);
extern void nfs_clear_verifier_delegated(struct inode *inode);
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
- struct nfs_fh *fh, struct nfs_fattr *fattr,
- struct nfs4_label *label);
+ struct nfs_fh *fh, struct nfs_fattr *fattr);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
- struct nfs_fattr *fattr, struct nfs4_label *label);
+ struct nfs_fattr *fattr);
extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
+extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred,
+ u32 *mask, bool may_block);
/*
* linux/fs/nfs/symlink.c
@@ -534,25 +578,24 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page *page);
-extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
+int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
-extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
+extern struct nfs_commit_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_commit_data *data);
+bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
-static inline int
-nfs_have_writebacks(struct inode *inode)
+static inline bool nfs_have_writebacks(const struct inode *inode)
{
- return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
+ if (S_ISREG(inode->i_mode))
+ return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
+ return false;
}
/*
* linux/fs/nfs/read.c
*/
-extern int nfs_readpage(struct file *, struct page *);
-extern int nfs_readpages(struct file *, struct address_space *,
- struct list_head *, unsigned);
-extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
- struct page *);
+int nfs_read_folio(struct file *, struct folio *);
+void nfs_readahead(struct readahead_control *);
/*
* inline functions
@@ -574,6 +617,15 @@ nfs_fileid_to_ino_t(u64 fileid)
#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
+/* We need to block new opens while a file is being unlinked.
+ * If it is opened *before* we decide to unlink, we will silly-rename
+ * instead. If it is opened *after*, then we need to create or will fail.
+ * If we allow the two to race, we could end up with a file that is open
+ * but deleted on the server resulting in ESTALE.
+ * So use ->d_fsdata to record when the unlink is happening
+ * and block dentry revalidation while it is set.
+ */
+#define NFS_FSDATA_BLOCKED ((void*)1)
# undef ifdebug
# ifdef NFS_DEBUG
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 465fa98258a3..ea2f7e6b1b0b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -62,6 +62,7 @@ struct nfs_client {
u32 cl_minorversion;/* NFSv4 minorversion */
unsigned int cl_nconnect; /* Number of connections */
+ unsigned int cl_max_connect; /* max number of xprts allowed */
const char * cl_principal; /* used for machine cred */
#if IS_ENABLED(CONFIG_NFS_V4)
@@ -119,11 +120,6 @@ struct nfs_client {
* This is used to generate the mv0 callback address.
*/
char cl_ipaddr[48];
-
-#ifdef CONFIG_NFS_FSCACHE
- struct fscache_cookie *fscache; /* client index cache cookie */
-#endif
-
struct net *cl_net;
struct list_head pending_cb_stateids;
};
@@ -142,7 +138,8 @@ struct nfs_server {
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
atomic_long_t writeback; /* number of writeback pages */
- int flags; /* various flags */
+ unsigned int write_congested;/* flag set when writeback gets too high */
+ unsigned int flags; /* various flags */
/* The following are for internal use only. Also see uapi/linux/nfs_mount.h */
#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
@@ -153,7 +150,11 @@ struct nfs_server {
#define NFS_MOUNT_LOCAL_FCNTL 0x200000
#define NFS_MOUNT_SOFTERR 0x400000
#define NFS_MOUNT_SOFTREVAL 0x800000
+#define NFS_MOUNT_WRITE_EAGER 0x01000000
+#define NFS_MOUNT_WRITE_WAIT 0x02000000
+#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
+ unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
unsigned int rsize; /* read size */
unsigned int rpages; /* read size (in pages) */
@@ -163,6 +164,11 @@ struct nfs_server {
unsigned int dtsize; /* readdir size */
unsigned short port; /* "port=" setting */
unsigned int bsize; /* server block size */
+#ifdef CONFIG_NFS_V4_2
+ unsigned int gxasize; /* getxattr size */
+ unsigned int sxasize; /* setxattr size */
+ unsigned int lxasize; /* listxattr size */
+#endif
unsigned int acregmin; /* attr cache timeouts */
unsigned int acregmax;
unsigned int acdirmin;
@@ -173,6 +179,9 @@ struct nfs_server {
#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
#define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */
+ enum nfs4_change_attr_type
+ change_attr_type;/* Description of change attribute */
+
struct nfs_fsid fsid;
__u64 maxfilesize; /* maximum file size */
struct timespec64 time_delta; /* smallest time granularity */
@@ -182,8 +191,8 @@ struct nfs_server {
struct nfs_auth_info auth_info; /* parsed auth flavors */
#ifdef CONFIG_NFS_FSCACHE
- struct nfs_fscache_key *fscache_key; /* unique key for superblock */
- struct fscache_cookie *fscache; /* superblock cookie */
+ struct fscache_volume *fscache; /* superblock cookie */
+ char *fscache_uniq; /* Uniquifier (or NULL) */
#endif
u32 pnfs_blksize; /* layout_blksize attr */
@@ -249,6 +258,7 @@ struct nfs_server {
/* User namespace info */
const struct cred *cred;
+ bool has_sec_mnt_opts;
};
/* Server capabilities */
@@ -257,16 +267,9 @@ struct nfs_server {
#define NFS_CAP_SYMLINKS (1U << 2)
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
-/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
#define NFS_CAP_LGOPEN (1U << 5)
-#define NFS_CAP_FILEID (1U << 6)
-#define NFS_CAP_MODE (1U << 7)
-#define NFS_CAP_NLINK (1U << 8)
-#define NFS_CAP_OWNER (1U << 9)
-#define NFS_CAP_OWNER_GROUP (1U << 10)
-#define NFS_CAP_ATIME (1U << 11)
-#define NFS_CAP_CTIME (1U << 12)
-#define NFS_CAP_MTIME (1U << 13)
+#define NFS_CAP_CASE_INSENSITIVE (1U << 6)
+#define NFS_CAP_CASE_PRESERVING (1U << 7)
#define NFS_CAP_POSIX_LOCK (1U << 14)
#define NFS_CAP_UIDGID_NOMAP (1U << 15)
#define NFS_CAP_STATEID_NFSV41 (1U << 16)
@@ -281,5 +284,8 @@ struct nfs_server {
#define NFS_CAP_OFFLOAD_CANCEL (1U << 25)
#define NFS_CAP_LAYOUTERROR (1U << 26)
#define NFS_CAP_COPY_NOTIFY (1U << 27)
-
+#define NFS_CAP_XATTR (1U << 28)
+#define NFS_CAP_READ_PLUS (1U << 29)
+#define NFS_CAP_FS_LOCATIONS (1U << 30)
+#define NFS_CAP_MOVEABLE (1U << 31)
#endif
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 0bbd587fac6a..ba7e2e4b0926 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -55,6 +55,7 @@ struct nfs_page {
unsigned short wb_nio; /* Number of I/O attempts */
};
+struct nfs_pgio_mirror;
struct nfs_pageio_descriptor;
struct nfs_pageio_ops {
void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *);
@@ -64,6 +65,9 @@ struct nfs_pageio_ops {
unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
struct nfs_page *);
void (*pg_cleanup)(struct nfs_pageio_descriptor *);
+ struct nfs_pgio_mirror *
+ (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32);
+ u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32);
};
struct nfs_rw_ops {
@@ -139,9 +143,14 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
+extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
+extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
+extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+extern int nfs_page_set_headlock(struct nfs_page *req);
+extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
/*
@@ -193,8 +202,7 @@ nfs_list_entry(struct list_head *head)
return list_entry(head, struct nfs_page, wb_list);
}
-static inline
-loff_t req_offset(struct nfs_page *req)
+static inline loff_t req_offset(const struct nfs_page *req)
{
return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
}
diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
new file mode 100644
index 000000000000..75843c00f326
--- /dev/null
+++ b/include/linux/nfs_ssc.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/nfs_ssc.h
+ *
+ * Author: Dai Ngo <dai.ngo@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/sunrpc/svc.h>
+
+extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
+
+/*
+ * NFS_V4
+ */
+struct nfs4_ssc_client_ops {
+ struct file *(*sco_open)(struct vfsmount *ss_mnt,
+ struct nfs_fh *src_fh, nfs4_stateid *stateid);
+ void (*sco_close)(struct file *filep);
+};
+
+/*
+ * NFS_FS
+ */
+struct nfs_ssc_client_ops {
+ void (*sco_sb_deactive)(struct super_block *sb);
+};
+
+struct nfs_ssc_client_ops_tbl {
+ const struct nfs4_ssc_client_ops *ssc_nfs4_ops;
+ const struct nfs_ssc_client_ops *ssc_nfs_ops;
+};
+
+extern void nfs42_ssc_register_ops(void);
+extern void nfs42_ssc_unregister_ops(void);
+
+extern void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops);
+extern void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops);
+
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+static inline struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
+ struct nfs_fh *src_fh, nfs4_stateid *stateid)
+{
+ if (nfs_ssc_client_tbl.ssc_nfs4_ops)
+ return (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_open)(ss_mnt, src_fh, stateid);
+ return ERR_PTR(-EIO);
+}
+
+static inline void nfs42_ssc_close(struct file *filep)
+{
+ if (nfs_ssc_client_tbl.ssc_nfs4_ops)
+ (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
+}
+
+struct nfsd4_ssc_umount_item {
+ struct list_head nsui_list;
+ bool nsui_busy;
+ /*
+ * nsui_refcnt inited to 2, 1 on list and 1 for consumer. Entry
+ * is removed when refcnt drops to 1 and nsui_expire expires.
+ */
+ refcount_t nsui_refcnt;
+ unsigned long nsui_expire;
+ struct vfsmount *nsui_vfsmount;
+ char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1];
+};
+#endif
+
+/*
+ * NFS_FS
+ */
+extern void nfs_ssc_register(const struct nfs_ssc_client_ops *ops);
+extern void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops);
+
+static inline void nfs_do_sb_deactive(struct super_block *sb)
+{
+ if (nfs_ssc_client_tbl.ssc_nfs_ops)
+ (*nfs_ssc_client_tbl.ssc_nfs_ops->sco_sb_deactive)(sb);
+}
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 94c77ed55ce1..e86cf6642d21 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -15,6 +15,8 @@
#define NFS_DEF_FILE_IO_SIZE (4096U)
#define NFS_MIN_FILE_IO_SIZE (1024U)
+#define NFS_BITMASK_SZ 3
+
struct nfs4_string {
unsigned int len;
char *data;
@@ -75,6 +77,7 @@ struct nfs_fattr {
struct nfs4_string *owner_name;
struct nfs4_string *group_name;
struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */
+ struct nfs4_label *label;
};
#define NFS_ATTR_FATTR_TYPE (1U << 0)
@@ -149,6 +152,9 @@ struct nfs_fsinfo {
__u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */
__u32 blksize; /* preferred pnfs io block size */
__u32 clone_blksize; /* granularity of a CLONE operation */
+ enum nfs4_change_attr_type
+ change_attr_type; /* Info about change attr */
+ __u32 xattr_support; /* User xattrs supported */
};
struct nfs_fsstat {
@@ -271,6 +277,7 @@ struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
const struct cred *cred;
+ struct pnfs_layout_hdr *lo;
gfp_t gfp_flags;
};
@@ -481,7 +488,6 @@ struct nfs_openres {
struct nfs4_change_info cinfo;
__u32 rflags;
struct nfs_fattr * f_attr;
- struct nfs4_label *f_label;
struct nfs_seqid * seqid;
const struct nfs_server *server;
fmode_t delegation_type;
@@ -524,6 +530,7 @@ struct nfs_closeargs {
fmode_t fmode;
u32 share_access;
const u32 * bitmask;
+ u32 bitmask_store[NFS_BITMASK_SZ];
struct nfs4_layoutreturn_args *lr_args;
};
@@ -606,7 +613,8 @@ struct nfs4_delegreturnargs {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
- const u32 * bitmask;
+ const u32 *bitmask;
+ u32 bitmask_store[NFS_BITMASK_SZ];
struct nfs4_layoutreturn_args *lr_args;
};
@@ -647,6 +655,7 @@ struct nfs_pgio_args {
unsigned int replen; /* used by read */
struct {
const u32 * bitmask; /* used by write */
+ u32 bitmask_store[NFS_BITMASK_SZ]; /* used by write */
enum nfs3_stable_how stable; /* used by write */
};
};
@@ -655,7 +664,7 @@ struct nfs_pgio_args {
struct nfs_pgio_res {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- __u32 count;
+ __u64 count;
__u32 op_status;
union {
struct {
@@ -736,18 +745,30 @@ struct nfs_auth_info {
*/
struct nfs_entry {
__u64 ino;
- __u64 cookie,
- prev_cookie;
+ __u64 cookie;
const char * name;
unsigned int len;
int eof;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
unsigned char d_type;
struct nfs_server * server;
};
+struct nfs_readdir_arg {
+ struct dentry *dentry;
+ const struct cred *cred;
+ __be32 *verf;
+ u64 cookie;
+ struct page **pages;
+ unsigned int page_len;
+ bool plus;
+};
+
+struct nfs_readdir_res {
+ __be32 *verf;
+};
+
/*
* The following types are for NFSv2 only.
*/
@@ -779,9 +800,17 @@ struct nfs_setattrargs {
const struct nfs4_label *label;
};
+enum nfs4_acl_type {
+ NFS4ACL_NONE = 0,
+ NFS4ACL_ACL,
+ NFS4ACL_DACL,
+ NFS4ACL_SACL,
+};
+
struct nfs_setaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -793,6 +822,7 @@ struct nfs_setaclres {
struct nfs_getaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -801,6 +831,7 @@ struct nfs_getaclargs {
#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */
struct nfs_getaclres {
struct nfs4_sequence_res seq_res;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
size_t acl_data_offset;
int acl_flags;
@@ -810,7 +841,6 @@ struct nfs_getaclres {
struct nfs_setattrres {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
const struct nfs_server * server;
};
@@ -1017,7 +1047,6 @@ struct nfs4_create_res {
const struct nfs_server * server;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info dir_cinfo;
};
@@ -1042,7 +1071,6 @@ struct nfs4_getattr_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
};
struct nfs4_link_arg {
@@ -1057,7 +1085,6 @@ struct nfs4_link_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
};
@@ -1074,7 +1101,6 @@ struct nfs4_lookup_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
- struct nfs4_label *label;
};
struct nfs4_lookupp_arg {
@@ -1088,7 +1114,6 @@ struct nfs4_lookupp_res {
const struct nfs_server *server;
struct nfs_fattr *fattr;
struct nfs_fh *fh;
- struct nfs4_label *label;
};
struct nfs4_lookup_root_arg {
@@ -1178,6 +1203,8 @@ struct nfs4_server_caps_res {
u32 has_links;
u32 has_symlinks;
u32 fh_expire_type;
+ u32 case_insensitive;
+ u32 case_preserving;
};
#define NFS4_PATHNAME_MAXCOMPONENTS 512
@@ -1195,7 +1222,7 @@ struct nfs4_fs_location {
#define NFS4_FS_LOCATIONS_MAXENTRIES 10
struct nfs4_fs_locations {
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
const struct nfs_server *server;
struct nfs4_pathname fs_path;
int nlocations;
@@ -1226,7 +1253,7 @@ struct nfs4_secinfo4 {
struct nfs4_secinfo_flavors {
unsigned int num_flavors;
- struct nfs4_secinfo4 flavors[0];
+ struct nfs4_secinfo4 flavors[];
};
struct nfs4_secinfo_arg {
@@ -1265,16 +1292,25 @@ struct nfstime4 {
struct pnfs_commit_bucket {
struct list_head written;
struct list_head committing;
- struct pnfs_layout_segment *wlseg;
- struct pnfs_layout_segment *clseg;
+ struct pnfs_layout_segment *lseg;
struct nfs_writeverf direct_verf;
};
+struct pnfs_commit_array {
+ struct list_head cinfo_list;
+ struct list_head lseg_list;
+ struct pnfs_layout_segment *lseg;
+ struct rcu_head rcu;
+ refcount_t refcount;
+ unsigned int nbuckets;
+ struct pnfs_commit_bucket buckets[];
+};
+
struct pnfs_ds_commit_info {
- int nwritten;
- int ncommitting;
- int nbuckets;
- struct pnfs_commit_bucket *buckets;
+ struct list_head commits;
+ unsigned int nwritten;
+ unsigned int ncommitting;
+ const struct pnfs_commit_ops *ops;
};
struct nfs41_state_protection {
@@ -1307,11 +1343,13 @@ struct nfs41_impl_id {
struct nfstime4 date;
};
+#define MAX_BIND_CONN_TO_SESSION_RETRIES 3
struct nfs41_bind_conn_to_session_args {
struct nfs_client *client;
struct nfs4_sessionid sessionid;
u32 dir;
bool use_conn_in_rdma_mode;
+ int retries;
};
struct nfs41_bind_conn_to_session_res {
@@ -1385,22 +1423,11 @@ struct nfs41_free_stateid_res {
unsigned int status;
};
-static inline void
-nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
-{
- kfree(cinfo->buckets);
-}
-
#else
struct pnfs_ds_commit_info {
};
-static inline void
-nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
-{
-}
-
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2
@@ -1496,7 +1523,64 @@ struct nfs42_seek_res {
u32 sr_eof;
u64 sr_offset;
};
-#endif
+
+struct nfs42_setxattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ const char *xattr_name;
+ u32 xattr_flags;
+ size_t xattr_len;
+ struct page **xattr_pages;
+};
+
+struct nfs42_setxattrres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs4_change_info cinfo;
+};
+
+struct nfs42_getxattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ const char *xattr_name;
+ size_t xattr_len;
+ struct page **xattr_pages;
+};
+
+struct nfs42_getxattrres {
+ struct nfs4_sequence_res seq_res;
+ size_t xattr_len;
+};
+
+struct nfs42_listxattrsargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ u32 count;
+ u64 cookie;
+ struct page **xattr_pages;
+};
+
+struct nfs42_listxattrsres {
+ struct nfs4_sequence_res seq_res;
+ struct page *scratch;
+ void *xattr_buf;
+ size_t xattr_len;
+ u64 cookie;
+ bool eof;
+ size_t copied;
+};
+
+struct nfs42_removexattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ const char *xattr_name;
+};
+
+struct nfs42_removexattrres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs4_change_info cinfo;
+};
+
+#endif /* CONFIG_NFS_V4_2 */
struct nfs_page;
@@ -1516,6 +1600,7 @@ enum {
NFS_IOHDR_STAT,
NFS_IOHDR_RESEND_PNFS,
NFS_IOHDR_RESEND_MDS,
+ NFS_IOHDR_UNSTABLE_WRITES,
};
struct nfs_io_completion;
@@ -1552,8 +1637,8 @@ struct nfs_pgio_header {
__u64 mds_offset; /* Filelayout dense stripe */
struct nfs_page_array page_array;
struct nfs_client *ds_clp; /* pNFS data server */
- int ds_commit_idx; /* ds index if ds_clp is set */
- int pgio_mirror_idx;/* mirror index in pgio layer */
+ u32 ds_commit_idx; /* ds index if ds_clp is set */
+ u32 pgio_mirror_idx;/* mirror index in pgio layer */
};
struct nfs_mds_commit_info {
@@ -1620,6 +1705,7 @@ struct nfs_unlinkdata {
struct nfs_renamedata {
struct nfs_renameargs args;
struct nfs_renameres res;
+ struct rpc_task task;
const struct cred *cred;
struct inode *old_dir;
struct dentry *old_dentry;
@@ -1657,16 +1743,14 @@ struct nfs_rpc_ops {
int (*submount) (struct fs_context *, struct nfs_server *);
int (*try_get_tree) (struct fs_context *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *,
- struct inode *);
+ struct nfs_fattr *, struct inode *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
int (*lookup) (struct inode *, struct dentry *,
- struct nfs_fh *, struct nfs_fattr *,
- struct nfs4_label *);
+ struct nfs_fh *, struct nfs_fattr *);
int (*lookupp) (struct inode *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
- int (*access) (struct inode *, struct nfs_access_entry *);
+ struct nfs_fattr *);
+ int (*access) (struct inode *, struct nfs_access_entry *, const struct cred *);
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
int (*create) (struct inode *, struct dentry *,
@@ -1685,8 +1769,7 @@ struct nfs_rpc_ops {
unsigned int, struct iattr *);
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
- int (*readdir) (struct dentry *, const struct cred *,
- u64, struct page **, unsigned int, bool);
+ int (*readdir) (struct nfs_readdir_arg *, struct nfs_readdir_res *);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
dev_t);
int (*statfs) (struct nfs_server *, struct nfs_fh *,
@@ -1725,6 +1808,9 @@ struct nfs_rpc_ops {
struct nfs_server *(*create_server)(struct fs_context *);
struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
+ int (*discover_trunking)(struct nfs_server *, struct nfs_fh *);
+ void (*enable_swap)(struct inode *inode);
+ void (*disable_swap)(struct inode *inode);
};
/*
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index 103d44695323..8e76a79cdc6a 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -38,5 +38,11 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
extern int
nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
struct posix_acl **pacl);
+extern bool
+nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt,
+ struct posix_acl **pacl);
+extern bool
+nfs_stream_encode_acl(struct xdr_stream *xdr, struct inode *inode,
+ struct posix_acl *acl, int encode_entries, int typeflag);
#endif /* __LINUX_NFSACL_H */
diff --git a/include/linux/nitro_enclaves.h b/include/linux/nitro_enclaves.h
new file mode 100644
index 000000000000..d91ef2bfdf47
--- /dev/null
+++ b/include/linux/nitro_enclaves.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef _LINUX_NITRO_ENCLAVES_H_
+#define _LINUX_NITRO_ENCLAVES_H_
+
+#include <uapi/linux/nitro_enclaves.h>
+
+#endif /* _LINUX_NITRO_ENCLAVES_H_ */
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index b22782225f27..cbe5fd1dd2e7 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -8,6 +8,8 @@
#ifndef NL802154_H
#define NL802154_H
+#include <net/netlink.h>
+
#define IEEE802154_NL_NAME "802.15.4 MAC"
#define IEEE802154_MCAST_COORD_NAME "coordinator"
#define IEEE802154_MCAST_BEACON_NAME "beacon"
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 9003e29cde46..f700ff2df074 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -122,6 +122,8 @@ int watchdog_nmi_probe(void);
int watchdog_nmi_enable(unsigned int cpu);
void watchdog_nmi_disable(unsigned int cpu);
+void lockup_detector_reconfigure(void);
+
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
@@ -202,16 +204,11 @@ static inline void watchdog_update_hrtimer_threshold(u64 period) { }
#endif
struct ctl_table;
-extern int proc_watchdog(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
-extern int proc_nmi_watchdog(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
-extern int proc_soft_watchdog(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
-extern int proc_watchdog_thresh(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
-extern int proc_watchdog_cpumask(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
+int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
+int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
+int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
#include <asm/nmi.h>
diff --git a/include/linux/node.h b/include/linux/node.h
index 4866f32a02d8..427a5975cf40 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -2,15 +2,15 @@
/*
* include/linux/node.h - generic node definition
*
- * This is mainly for topological representation. We define the
- * basic 'struct node' here, which can be embedded in per-arch
+ * This is mainly for topological representation. We define the
+ * basic 'struct node' here, which can be embedded in per-arch
* definitions of processors.
*
* Basic handling of the devices is done in drivers/base/node.c
- * and system devices are handled in drivers/base/sys.c.
+ * and system devices are handled in drivers/base/sys.c.
*
* Nodes are exported via driverfs in the class/node/devices/
- * directory.
+ * directory.
*/
#ifndef _LINUX_NODE_H_
#define _LINUX_NODE_H_
@@ -18,7 +18,6 @@
#include <linux/device.h>
#include <linux/cpumask.h>
#include <linux/list.h>
-#include <linux/workqueue.h>
/**
* struct node_hmem_attrs - heterogeneous memory performance attributes
@@ -84,10 +83,6 @@ static inline void node_set_perf_attrs(unsigned int nid,
struct node {
struct device dev;
struct list_head access_list;
-
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
- struct work_struct node_work;
-#endif
#ifdef CONFIG_HMEM_REPORTING
struct list_head cache_attrs;
struct device *cache_dev;
@@ -96,21 +91,22 @@ struct node {
struct memory_block;
extern struct node *node_devices[];
-typedef void (*node_registration_func_t)(struct node *);
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
-extern int link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
+void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context);
#else
-static inline int link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long end_pfn)
+static inline void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context)
{
- return 0;
}
#endif
extern void unregister_node(struct node *node);
#ifdef CONFIG_NUMA
+extern void node_dev_init(void);
/* Core of the node registration - only memory hotplug should use this */
extern int __register_one_node(int nid);
@@ -127,8 +123,8 @@ static inline int register_one_node(int nid)
error = __register_one_node(nid);
if (error)
return error;
- /* link memory sections under this node */
- error = link_mem_sections(nid, start_pfn, end_pfn);
+ register_memory_blocks_under_node(nid, start_pfn, end_pfn,
+ MEMINIT_EARLY);
}
return error;
@@ -142,12 +138,10 @@ extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
extern int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
unsigned access);
-
-#ifdef CONFIG_HUGETLBFS
-extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister);
-#endif
#else
+static inline void node_dev_init(void)
+{
+}
static inline int __register_one_node(int nid)
{
return 0;
@@ -171,11 +165,6 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
}
-
-static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
- node_registration_func_t unreg)
-{
-}
#endif
#define to_node(device) container_of(device, struct node, dev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 27e7fa36f707..efef68c9352a 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -42,11 +42,11 @@
* void nodes_shift_right(dst, src, n) Shift right
* void nodes_shift_left(dst, src, n) Shift left
*
- * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
- * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
- * int next_node_in(node, mask) Next node past 'node', or wrap to first,
+ * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES
+ * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
* or MAX_NUMNODES
- * int first_unset_node(mask) First node not set in mask, or
+ * unsigned int first_unset_node(mask) First node not set in mask, or
* MAX_NUMNODES
*
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
@@ -90,10 +90,11 @@
* for such situations. See below and CPUMASK_ALLOC also.
*/
-#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
+#include <linux/minmax.h>
#include <linux/numa.h>
+#include <linux/random.h>
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
@@ -119,7 +120,7 @@ static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
* The inline keyword gives the compiler room to decide to inline, or
* not inline a function as it sees best. However, as these functions
* are called in both __init and non-__init functions, if they are not
- * inlined we will end up with a section mis-match error (of the type of
+ * inlined we will end up with a section mismatch error (of the type of
* freeable items not being freed). So we must use __always_inline here
* to fix the problem. If other functions in the future also end up in
* this situation they will also need to be annotated as __always_inline
@@ -153,7 +154,7 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
#define node_test_and_set(node, nodemask) \
__node_test_and_set((node), &(nodemask))
-static inline int __node_test_and_set(int node, nodemask_t *addr)
+static inline bool __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
@@ -200,7 +201,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_equal(const nodemask_t *src1p,
+static inline bool __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -208,7 +209,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_intersects(const nodemask_t *src1p,
+static inline bool __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -216,20 +217,20 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_subset(const nodemask_t *src1p,
+static inline bool __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
@@ -260,15 +261,15 @@ static inline void __nodes_shift_left(nodemask_t *dstp,
> MAX_NUMNODES, then the silly min_ts could be dropped. */
#define first_node(src) __first_node(&(src))
-static inline int __first_node(const nodemask_t *srcp)
+static inline unsigned int __first_node(const nodemask_t *srcp)
{
- return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
+ return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
-static inline int __next_node(int n, const nodemask_t *srcp)
+static inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
- return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+ return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
/*
@@ -276,7 +277,14 @@ static inline int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-int __next_node_in(int node, const nodemask_t *srcp);
+static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
+{
+ unsigned int ret = __next_node(node, srcp);
+
+ if (ret == MAX_NUMNODES)
+ ret = __first_node(srcp);
+ return ret;
+}
static inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
@@ -296,9 +304,9 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
})
#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline int __first_unset_node(const nodemask_t *maskp)
+static inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
- return min_t(int,MAX_NUMNODES,
+ return min_t(unsigned int, MAX_NUMNODES,
find_first_zero_bit(maskp->bits, MAX_NUMNODES));
}
@@ -375,14 +383,13 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
}
#if MAX_NUMNODES > 1
-#define for_each_node_mask(node, mask) \
- for ((node) = first_node(mask); \
- (node) < MAX_NUMNODES; \
- (node) = next_node((node), (mask)))
+#define for_each_node_mask(node, mask) \
+ for ((node) = first_node(mask); \
+ (node >= 0) && (node) < MAX_NUMNODES; \
+ (node) = next_node((node), (mask)))
#else /* MAX_NUMNODES == 1 */
-#define for_each_node_mask(node, mask) \
- if (!nodes_empty(mask)) \
- for ((node) = 0; (node) < 1; (node)++)
+#define for_each_node_mask(node, mask) \
+ for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++)
#endif /* MAX_NUMNODES */
/*
@@ -399,6 +406,7 @@ enum node_states {
#endif
N_MEMORY, /* The node has memory(regular, high, movable) */
N_CPU, /* The node has one or more cpus */
+ N_GENERIC_INITIATOR, /* The node has one or more Generic Initiators */
NR_NODE_STATES
};
@@ -435,11 +443,11 @@ static inline int num_node_state(enum node_states state)
#define first_online_node first_node(node_states[N_ONLINE])
#define first_memory_node first_node(node_states[N_MEMORY])
-static inline int next_online_node(int nid)
+static inline unsigned int next_online_node(int nid)
{
return next_node(nid, node_states[N_ONLINE]);
}
-static inline int next_memory_node(int nid)
+static inline unsigned int next_memory_node(int nid)
{
return next_node(nid, node_states[N_MEMORY]);
}
@@ -485,6 +493,7 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define first_memory_node 0
#define next_online_node(nid) (MAX_NUMNODES)
+#define next_memory_node(nid) (MAX_NUMNODES)
#define nr_node_ids 1U
#define nr_online_nodes 1U
@@ -493,14 +502,28 @@ static inline int num_node_state(enum node_states state)
#endif
+static inline int node_random(const nodemask_t *maskp)
+{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
-extern int node_random(const nodemask_t *maskp);
+ int w, bit;
+
+ w = nodes_weight(*maskp);
+ switch (w) {
+ case 0:
+ bit = NUMA_NO_NODE;
+ break;
+ case 1:
+ bit = first_node(*maskp);
+ break;
+ default:
+ bit = find_nth_bit(maskp->bits, MAX_NUMNODES, prandom_u32_max(w));
+ break;
+ }
+ return bit;
#else
-static inline int node_random(const nodemask_t *mask)
-{
return 0;
-}
#endif
+}
#define node_online_map node_states[N_ONLINE]
#define node_possible_map node_states[N_POSSIBLE]
@@ -514,7 +537,7 @@ static inline int node_random(const nodemask_t *mask)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
- * For nodemask scrach area.
+ * For nodemask scratch area.
* NODEMASK_ALLOC(type, name) allocates an object with a specified type and
* name.
*/
@@ -527,7 +550,7 @@ static inline int node_random(const nodemask_t *mask)
#define NODEMASK_FREE(m) do {} while (0)
#endif
-/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
+/* Example structure for using NODEMASK_ALLOC, used in mempolicy. */
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index 0c5ef54fd416..c1e79f72cd89 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -5,6 +5,8 @@
#ifndef _LINUX_NOSPEC_H
#define _LINUX_NOSPEC_H
+
+#include <linux/compiler.h>
#include <asm/barrier.h>
struct task_struct;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 018947611483..aef88c2d1173 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -150,6 +150,11 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);
+extern int atomic_notifier_chain_register_unique_prio(
+ struct atomic_notifier_head *nh, struct notifier_block *nb);
+extern int blocking_notifier_chain_register_unique_prio(
+ struct blocking_notifier_head *nh, struct notifier_block *nb);
+
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
@@ -161,20 +166,19 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v);
-extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v);
-extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v);
-extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);
-extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
+
+extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
+ unsigned long val_up, unsigned long val_down, void *v);
+extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
+ unsigned long val_up, unsigned long val_down, void *v);
+
+extern bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh);
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
index 5fbc4000358f..0f1d024bd958 100644
--- a/include/linux/ns_common.h
+++ b/include/linux/ns_common.h
@@ -2,12 +2,15 @@
#ifndef _LINUX_NS_COMMON_H
#define _LINUX_NS_COMMON_H
+#include <linux/refcount.h>
+
struct proc_ns_operations;
struct ns_common {
atomic_long_t stashed;
const struct proc_ns_operations *ops;
unsigned int inum;
+ refcount_t count;
};
#endif
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 074f395b9ad2..cdb171efc7cb 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -42,6 +42,30 @@ struct nsproxy {
extern struct nsproxy init_nsproxy;
/*
+ * A structure to encompass all bits needed to install
+ * a partial or complete new set of namespaces.
+ *
+ * If a new user namespace is requested cred will
+ * point to a modifiable set of credentials. If a pointer
+ * to a modifiable set is needed nsset_cred() must be
+ * used and tested.
+ */
+struct nsset {
+ unsigned flags;
+ struct nsproxy *nsproxy;
+ struct fs_struct *fs;
+ const struct cred *cred;
+};
+
+static inline struct cred *nsset_cred(struct nsset *set)
+{
+ if (set->flags & CLONE_NEWUSER)
+ return (struct cred *)set->cred;
+
+ return NULL;
+}
+
+/*
* the namespaces access rules are:
*
* 1. only current task is allowed to change tsk->nsproxy pointer or
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 8c13538aeffe..191b524e5c0d 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -478,7 +478,7 @@ void ntb_unregister_client(struct ntb_client *client);
int ntb_register_device(struct ntb_dev *ntb);
/**
- * ntb_register_device() - unregister a ntb device
+ * ntb_unregister_device() - unregister a ntb device
* @ntb: NTB device context.
*
* The device will be removed from the list of ntb devices. If the ntb device
@@ -1351,7 +1351,7 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
* @sidx: Scratchpad index.
* @spad_addr: OUT - The address of the peer scratchpad register.
*
- * Return the address of the peer doorbell register. This may be used, for
+ * Return the address of the peer scratchpad register. This may be used, for
* example, by drivers that offload memory copy operations to a dma engine.
*
* Return: Zero on success, otherwise an error number.
@@ -1373,7 +1373,7 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
*
* Read the peer scratchpad register, and return the value.
*
- * Return: The value of the local scratchpad register.
+ * Return: The value of the peer scratchpad register.
*/
static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index eba50b057f6f..392fc6c53e96 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -86,7 +86,7 @@ extern struct list_head nubus_func_rsrcs;
struct nubus_driver {
struct device_driver driver;
int (*probe)(struct nubus_board *board);
- int (*remove)(struct nubus_board *board);
+ void (*remove)(struct nubus_board *board);
};
extern struct bus_type nubus_bus_type;
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 110b0e5d0fb0..59df211d051f 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NUMA_H
#define _LINUX_NUMA_H
-
+#include <linux/types.h>
#ifdef CONFIG_NODES_SHIFT
#define NODES_SHIFT CONFIG_NODES_SHIFT
@@ -13,4 +13,53 @@
#define NUMA_NO_NODE (-1)
+/* optionally keep NUMA memory info available post init */
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+#define __initdata_or_meminfo
+#else
+#define __initdata_or_meminfo __initdata
+#endif
+
+#ifdef CONFIG_NUMA
+#include <linux/printk.h>
+#include <asm/sparsemem.h>
+
+/* Generic implementation available */
+int numa_map_to_online_node(int node);
+
+#ifndef memory_add_physaddr_to_nid
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+#endif
+#ifndef phys_to_target_node
+static inline int phys_to_target_node(u64 start)
+{
+ pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+#endif
+#else /* !CONFIG_NUMA */
+static inline int numa_map_to_online_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ return 0;
+}
+static inline int phys_to_target_node(u64 start)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+extern const struct attribute_group arch_node_dev_group;
+#endif
+
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
new file mode 100644
index 000000000000..dcb8030062dd
--- /dev/null
+++ b/include/linux/nvme-auth.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions
+ */
+
+#ifndef _NVME_AUTH_H
+#define _NVME_AUTH_H
+
+#include <crypto/kpp.h>
+
+struct nvme_dhchap_key {
+ u8 *key;
+ size_t len;
+ u8 hash;
+};
+
+u32 nvme_auth_get_seqnum(void);
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id);
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id);
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name);
+
+const char *nvme_auth_hmac_name(u8 hmac_id);
+const char *nvme_auth_digest_name(u8 hmac_id);
+size_t nvme_auth_hmac_hash_len(u8 hmac_id);
+u8 nvme_auth_hmac_id(const char *hmac_name);
+
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash);
+void nvme_auth_free_key(struct nvme_dhchap_key *key);
+u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn);
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen);
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid);
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len);
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len);
+
+#endif /* _NVME_AUTH_H */
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 6d0d70f3219c..fa092b9be2fd 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -7,50 +7,30 @@
#define _NVME_FC_DRIVER_H 1
#include <linux/scatterlist.h>
+#include <linux/blk-mq.h>
/*
- * ********************** LLDD FC-NVME Host API ********************
+ * ********************** FC-NVME LS API ********************
*
- * For FC LLDD's that are the NVME Host role.
+ * Data structures used by both FC-NVME hosts and FC-NVME
+ * targets to perform FC-NVME LS requests or transmit
+ * responses.
*
- * ******************************************************************
+ * ***********************************************************
*/
-
-
/**
- * struct nvme_fc_port_info - port-specific ids and FC connection-specific
- * data element used during NVME Host role
- * registrations
+ * struct nvmefc_ls_req - Request structure passed from the transport
+ * to the LLDD to perform a NVME-FC LS request and obtain
+ * a response.
+ * Used by nvme-fc transport (host) to send LS's such as
+ * Create Association, Create Connection and Disconnect
+ * Association.
+ * Used by the nvmet-fc transport (controller) to send
+ * LS's such as Disconnect Association.
*
- * Static fields describing the port being registered:
- * @node_name: FC WWNN for the port
- * @port_name: FC WWPN for the port
- * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
- * @dev_loss_tmo: maximum delay for reconnects to an association on
- * this device. Used only on a remoteport.
- *
- * Initialization values for dynamic port fields:
- * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
- * be set to 0.
- */
-struct nvme_fc_port_info {
- u64 node_name;
- u64 port_name;
- u32 port_role;
- u32 port_id;
- u32 dev_loss_tmo;
-};
-
-
-/**
- * struct nvmefc_ls_req - Request structure passed from NVME-FC transport
- * to LLDD in order to perform a NVME FC-4 LS
- * request and obtain a response.
- *
- * Values set by the NVME-FC layer prior to calling the LLDD ls_req
- * entrypoint.
+ * Values set by the requestor prior to calling the LLDD ls_req entrypoint:
* @rqstaddr: pointer to request buffer
* @rqstdma: PCI DMA address of request buffer
* @rqstlen: Length, in bytes, of request buffer
@@ -63,8 +43,8 @@ struct nvme_fc_port_info {
* @private: pointer to memory allocated alongside the ls request structure
* that is specifically for the LLDD to use while processing the
* request. The length of the buffer corresponds to the
- * lsrqst_priv_sz value specified in the nvme_fc_port_template
- * supplied by the LLDD.
+ * lsrqst_priv_sz value specified in the xxx_template supplied
+ * by the LLDD.
* @done: The callback routine the LLDD is to invoke upon completion of
* the LS request. req argument is the pointer to the original LS
* request structure. Status argument must be 0 upon success, a
@@ -86,6 +66,101 @@ struct nvmefc_ls_req {
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+/**
+ * struct nvmefc_ls_rsp - Structure passed from the transport to the LLDD
+ * to request the transmit the NVME-FC LS response to a
+ * NVME-FC LS request. The structure originates in the LLDD
+ * and is given to the transport via the xxx_rcv_ls_req()
+ * transport routine. As such, the structure represents the
+ * FC exchange context for the NVME-FC LS request that was
+ * received and which the response is to be sent for.
+ * Used by the LLDD to pass the nvmet-fc transport (controller)
+ * received LS's such as Create Association, Create Connection
+ * and Disconnect Association.
+ * Used by the LLDD to pass the nvme-fc transport (host)
+ * received LS's such as Disconnect Association or Disconnect
+ * Connection.
+ *
+ * The structure is allocated by the LLDD whenever a LS Request is received
+ * from the FC link. The address of the structure is passed to the nvmet-fc
+ * or nvme-fc layer via the xxx_rcv_ls_req() transport routines.
+ *
+ * The address of the structure is to be passed back to the LLDD
+ * when the response is to be transmit. The LLDD will use the address to
+ * map back to the LLDD exchange structure which maintains information such
+ * the remote N_Port that sent the LS as well as any FC exchange context.
+ * Upon completion of the LS response transmit, the LLDD will pass the
+ * address of the structure back to the transport LS rsp done() routine,
+ * allowing the transport release dma resources. Upon completion of
+ * the done() routine, no further access to the structure will be made by
+ * the transport and the LLDD can de-allocate the structure.
+ *
+ * Field initialization:
+ * At the time of the xxx_rcv_ls_req() call, there is no content that
+ * is valid in the structure.
+ *
+ * When the structure is used for the LLDD->xmt_ls_rsp() call, the
+ * transport layer will fully set the fields in order to specify the
+ * response payload buffer and its length as well as the done routine
+ * to be called upon completion of the transmit. The transport layer
+ * will also set a private pointer for its own use in the done routine.
+ *
+ * Values set by the transport layer prior to calling the LLDD xmt_ls_rsp
+ * entrypoint:
+ * @rspbuf: pointer to the LS response buffer
+ * @rspdma: PCI DMA address of the LS response buffer
+ * @rsplen: Length, in bytes, of the LS response buffer
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * transmitting the LS response. req argument is the pointer to
+ * the original ls request.
+ * @nvme_fc_private: pointer to an internal transport-specific structure
+ * used as part of the transport done() processing. The LLDD is
+ * not to access this pointer.
+ */
+struct nvmefc_ls_rsp {
+ void *rspbuf;
+ dma_addr_t rspdma;
+ u16 rsplen;
+
+ void (*done)(struct nvmefc_ls_rsp *rsp);
+ void *nvme_fc_private; /* LLDD is not to access !! */
+};
+
+
+
+/*
+ * ********************** LLDD FC-NVME Host API ********************
+ *
+ * For FC LLDD's that are the NVME Host role.
+ *
+ * ******************************************************************
+ */
+
+
+/**
+ * struct nvme_fc_port_info - port-specific ids and FC connection-specific
+ * data element used during NVME Host role
+ * registrations
+ *
+ * Static fields describing the port being registered:
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
+ * @dev_loss_tmo: maximum delay for reconnects to an association on
+ * this device. Used only on a remoteport.
+ *
+ * Initialization values for dynamic port fields:
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ */
+struct nvme_fc_port_info {
+ u64 node_name;
+ u64 port_name;
+ u32 port_role;
+ u32 port_id;
+ u32 dev_loss_tmo;
+};
+
enum nvmefc_fcp_datadir {
NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */
NVMEFC_FCP_WRITE,
@@ -270,8 +345,6 @@ struct nvme_fc_remote_port {
*
* Host/Initiator Transport Entrypoints/Parameters:
*
- * @module: The LLDD module using the interface
- *
* @localport_delete: The LLDD initiates deletion of a localport via
* nvme_fc_deregister_localport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the
@@ -339,6 +412,21 @@ struct nvme_fc_remote_port {
* indicating an FC transport Aborted status.
* Entrypoint is Mandatory.
*
+ * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service.
+ * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange
+ * structure specified in the nvme_fc_rcv_ls_req() call made when
+ * the LS request was received. The structure will fully describe
+ * the buffers for the response payload and the dma address of the
+ * payload. The LLDD is to transmit the response (or return a
+ * non-zero errno status), and upon completion of the transmit, call
+ * the "done" routine specified in the nvmefc_ls_rsp structure
+ * (argument to done is the address of the nvmefc_ls_rsp structure
+ * itself). Upon the completion of the done routine, the LLDD shall
+ * consider the LS handling complete and the nvmefc_ls_rsp structure
+ * may be freed/released.
+ * Entrypoint is mandatory if the LLDD calls the nvme_fc_rcv_ls_req()
+ * entrypoint.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -373,7 +461,7 @@ struct nvme_fc_remote_port {
* @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
* memory that it would like fc nvme layer to allocate on the LLDD's
* behalf whenever a ls request structure is allocated. The additional
- * memory area solely for the of the LLDD and its location is
+ * memory area is solely for use by the LLDD and its location is
* specified by the ls_request->private pointer.
* Value is Mandatory. Allowed to be zero.
*
@@ -385,8 +473,6 @@ struct nvme_fc_remote_port {
* Value is Mandatory. Allowed to be zero.
*/
struct nvme_fc_port_template {
- struct module *module;
-
/* initiator-based functions */
void (*localport_delete)(struct nvme_fc_local_port *);
void (*remoteport_delete)(struct nvme_fc_remote_port *);
@@ -409,6 +495,11 @@ struct nvme_fc_port_template {
struct nvme_fc_remote_port *,
void *hw_queue_handle,
struct nvmefc_fcp_req *);
+ int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *rport,
+ struct nvmefc_ls_rsp *ls_rsp);
+ void (*map_queues)(struct nvme_fc_local_port *localport,
+ struct blk_mq_queue_map *map);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -445,6 +536,43 @@ void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport);
int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *remoteport,
u32 dev_loss_tmo);
+/*
+ * Routine called to pass a NVME-FC LS request, received by the lldd,
+ * to the nvme-fc transport.
+ *
+ * If the return value is zero: the LS was successfully accepted by the
+ * transport.
+ * If the return value is non-zero: the transport has not accepted the
+ * LS. The lldd should ABTS-LS the LS.
+ *
+ * Note: if the LLDD receives and ABTS for the LS prior to the transport
+ * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD
+ * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the
+ * response shall not be transmit and the struct nvmefc_ls_rsp() done
+ * routine shall be called. The LLDD may transmit the ABTS response as
+ * soon as the LS was marked or can delay until the xmt_ls_rsp() call is
+ * made.
+ * Note: if an RCV LS was successfully posted to the transport and the
+ * remoteport is then unregistered before xmt_ls_rsp() was called for
+ * the lsrsp structure, the transport will still call xmt_ls_rsp()
+ * afterward to cleanup the outstanding lsrsp structure. The LLDD should
+ * noop the transmission of the rsp and call the lsrsp->done() routine
+ * to allow the lsrsp structure to be released.
+ */
+int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_rsp *lsrsp,
+ void *lsreqbuf, u32 lsreqbuf_len);
+
+
+/*
+ * Routine called to get the appid field associated with request by the lldd
+ *
+ * If the return value is NULL : the user/libvirt has not set the appid to VM
+ * If the return value is non-zero: Returns the appid associated with VM
+ *
+ * @req: IO request from nvme fc to driver
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req);
/*
* *************** LLDD FC-NVME Target/Subsystem API ***************
@@ -474,55 +602,6 @@ struct nvmet_fc_port_info {
};
-/**
- * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC
- * layer to represent the exchange context for
- * a FC-NVME Link Service (LS).
- *
- * The structure is allocated by the LLDD whenever a LS Request is received
- * from the FC link. The address of the structure is passed to the nvmet-fc
- * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure
- * will be passed back to the LLDD when the response is to be transmit.
- * The LLDD is to use the address to map back to the LLDD exchange structure
- * which maintains information such as the targetport the LS was received
- * on, the remote FC NVME initiator that sent the LS, and any FC exchange
- * context. Upon completion of the LS response transmit, the address of the
- * structure will be passed back to the LS rsp done() routine, allowing the
- * nvmet-fc layer to release dma resources. Upon completion of the done()
- * routine, no further access will be made by the nvmet-fc layer and the
- * LLDD can de-allocate the structure.
- *
- * Field initialization:
- * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that
- * is valid in the structure.
- *
- * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc
- * layer will fully set the fields in order to specify the response
- * payload buffer and its length as well as the done routine to be called
- * upon compeletion of the transmit. The nvmet-fc layer will also set a
- * private pointer for its own use in the done routine.
- *
- * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp
- * entrypoint.
- * @rspbuf: pointer to the LS response buffer
- * @rspdma: PCI DMA address of the LS response buffer
- * @rsplen: Length, in bytes, of the LS response buffer
- * @done: The callback routine the LLDD is to invoke upon completion of
- * transmitting the LS response. req argument is the pointer to
- * the original ls request.
- * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used
- * as part of the NVMET-FC processing. The LLDD is not to access
- * this pointer.
- */
-struct nvmefc_tgt_ls_req {
- void *rspbuf;
- dma_addr_t rspdma;
- u16 rsplen;
-
- void (*done)(struct nvmefc_tgt_ls_req *req);
- void *nvmet_fc_private; /* LLDD is not to access !! */
-};
-
/* Operations that NVME-FC layer may request the LLDD to perform for FCP */
enum {
NVMET_FCOP_READDATA = 1, /* xmt data to initiator */
@@ -605,7 +684,7 @@ enum {
* Values set by the LLDD indicating completion status of the FCP operation.
* Must be set prior to calling the done() callback.
* @transferred_length: amount of DATA_OUT payload data received by a
- * a WRITEDATA operation. If not a WRITEDATA operation, value must
+ * WRITEDATA operation. If not a WRITEDATA operation, value must
* be set to 0. Should equal transfer_length on success.
* @fcp_error: status of the FCP operation. Must be 0 on success; on failure
* must be a NVME_SC_FC_xxxx value.
@@ -651,7 +730,7 @@ enum {
*
* Fields with static values for the port. Initialized by the
* port_info struct supplied to the registration call.
- * @port_num: NVME-FC transport subsytem port number
+ * @port_num: NVME-FC transport subsystem port number
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @private: pointer to memory allocated alongside the local port
@@ -697,19 +776,25 @@ struct nvmet_fc_target_port {
* Entrypoint is Mandatory.
*
* @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service.
- * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange
+ * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange
* structure specified in the nvmet_fc_rcv_ls_req() call made when
- * the LS request was received. The structure will fully describe
+ * the LS request was received. The structure will fully describe
* the buffers for the response payload and the dma address of the
- * payload. The LLDD is to transmit the response (or return a non-zero
- * errno status), and upon completion of the transmit, call the
- * "done" routine specified in the nvmefc_tgt_ls_req structure
- * (argument to done is the ls reqwuest structure itself).
- * After calling the done routine, the LLDD shall consider the
- * LS handling complete and the nvmefc_tgt_ls_req structure may
- * be freed/released.
+ * payload. The LLDD is to transmit the response (or return a
+ * non-zero errno status), and upon completion of the transmit, call
+ * the "done" routine specified in the nvmefc_ls_rsp structure
+ * (argument to done is the address of the nvmefc_ls_rsp structure
+ * itself). Upon the completion of the done() routine, the LLDD shall
+ * consider the LS handling complete and the nvmefc_ls_rsp structure
+ * may be freed/released.
+ * The transport will always call the xmt_ls_rsp() routine for any
+ * LS received.
* Entrypoint is Mandatory.
*
+ * @map_queues: This functions lets the driver expose the queue mapping
+ * to the block layer.
+ * Entrypoint is Optional.
+ *
* @fcp_op: Called to perform a data transfer or transmit a response.
* The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
* exchange structure specified in the nvmet_fc_rcv_fcp_req() call
@@ -802,6 +887,39 @@ struct nvmet_fc_target_port {
* should cause the initiator to rescan the discovery controller
* on the targetport.
*
+ * @ls_req: Called to issue a FC-NVME FC-4 LS service request.
+ * The nvme_fc_ls_req structure will fully describe the buffers for
+ * the request payload and where to place the response payload.
+ * The targetport that is to issue the LS request is identified by
+ * the targetport argument. The remote port that is to receive the
+ * LS request is identified by the hosthandle argument. The nvmet-fc
+ * transport is only allowed to issue FC-NVME LS's on behalf of an
+ * association that was created prior by a Create Association LS.
+ * The hosthandle will originate from the LLDD in the struct
+ * nvmefc_ls_rsp structure for the Create Association LS that
+ * was delivered to the transport. The transport will save the
+ * hosthandle as an attribute of the association. If the LLDD
+ * loses connectivity with the remote port, it must call the
+ * nvmet_fc_invalidate_host() routine to remove any references to
+ * the remote port in the transport.
+ * The LLDD is to allocate an exchange, issue the LS request, obtain
+ * the LS response, and call the "done" routine specified in the
+ * request structure (argument to done is the ls request structure
+ * itself).
+ * Entrypoint is Optional - but highly recommended.
+ *
+ * @ls_abort: called to request the LLDD to abort the indicated ls request.
+ * The call may return before the abort has completed. After aborting
+ * the request, the LLDD must still call the ls request done routine
+ * indicating an FC transport Aborted status.
+ * Entrypoint is Mandatory if the ls_req entry point is specified.
+ *
+ * @host_release: called to inform the LLDD that the request to invalidate
+ * the host port indicated by the hosthandle has been fully completed.
+ * No associations exist with the host port and there will be no
+ * further references to hosthandle.
+ * Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host().
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -830,11 +948,19 @@ struct nvmet_fc_target_port {
* area solely for the of the LLDD and its location is specified by
* the targetport->private pointer.
* Value is Mandatory. Allowed to be zero.
+ *
+ * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like nvmet-fc layer to allocate on the LLDD's
+ * behalf whenever a ls request structure is allocated. The additional
+ * memory area is solely for use by the LLDD and its location is
+ * specified by the ls_request->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ *
*/
struct nvmet_fc_target_template {
void (*targetport_delete)(struct nvmet_fc_target_port *tgtport);
int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_ls_req *tls_req);
+ struct nvmefc_ls_rsp *ls_rsp);
int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,
@@ -844,6 +970,11 @@ struct nvmet_fc_target_template {
void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
void (*discovery_event)(struct nvmet_fc_target_port *tgtport);
+ int (*ls_req)(struct nvmet_fc_target_port *targetport,
+ void *hosthandle, struct nvmefc_ls_req *lsreq);
+ void (*ls_abort)(struct nvmet_fc_target_port *targetport,
+ void *hosthandle, struct nvmefc_ls_req *lsreq);
+ void (*host_release)(void *hosthandle);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -852,7 +983,9 @@ struct nvmet_fc_target_template {
u32 target_features;
+ /* sizes of additional private data for data structures */
u32 target_priv_sz;
+ u32 lsrqst_priv_sz;
};
@@ -863,15 +996,71 @@ int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo,
int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport);
+/*
+ * Routine called to pass a NVME-FC LS request, received by the lldd,
+ * to the nvmet-fc transport.
+ *
+ * If the return value is zero: the LS was successfully accepted by the
+ * transport.
+ * If the return value is non-zero: the transport has not accepted the
+ * LS. The lldd should ABTS-LS the LS.
+ *
+ * Note: if the LLDD receives and ABTS for the LS prior to the transport
+ * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD
+ * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the
+ * response shall not be transmit and the struct nvmefc_ls_rsp() done
+ * routine shall be called. The LLDD may transmit the ABTS response as
+ * soon as the LS was marked or can delay until the xmt_ls_rsp() call is
+ * made.
+ * Note: if an RCV LS was successfully posted to the transport and the
+ * targetport is then unregistered before xmt_ls_rsp() was called for
+ * the lsrsp structure, the transport will still call xmt_ls_rsp()
+ * afterward to cleanup the outstanding lsrsp structure. The LLDD should
+ * noop the transmission of the rsp and call the lsrsp->done() routine
+ * to allow the lsrsp structure to be released.
+ */
int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_ls_req *lsreq,
+ void *hosthandle,
+ struct nvmefc_ls_rsp *rsp,
void *lsreqbuf, u32 lsreqbuf_len);
+/*
+ * Routine called by the LLDD whenever it has a logout or loss of
+ * connectivity to a NVME-FC host port which there had been active
+ * NVMe controllers for. The host port is indicated by the
+ * hosthandle. The hosthandle is given to the nvmet-fc transport
+ * when a NVME LS was received, typically to create a new association.
+ * The nvmet-fc transport will cache the hostport value with the
+ * association for use in LS requests for the association.
+ * When the LLDD calls this routine, the nvmet-fc transport will
+ * immediately terminate all associations that were created with
+ * the hosthandle host port.
+ * The LLDD, after calling this routine and having control returned,
+ * must assume the transport may subsequently utilize hosthandle as
+ * part of sending LS's to terminate the association. The LLDD
+ * should reject the LS's if they are attempted.
+ * Once the last association has terminated for the hosthandle host
+ * port, the nvmet-fc transport will call the ops->host_release()
+ * callback. As of the callback, the nvmet-fc transport will no
+ * longer reference hosthandle.
+ */
+void nvmet_fc_invalidate_host(struct nvmet_fc_target_port *tgtport,
+ void *hosthandle);
+
+/*
+ * If nvmet_fc_rcv_fcp_req returns non-zero, the transport has not accepted
+ * the FCP cmd. The lldd should ABTS-LS the cmd.
+ */
int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq,
void *cmdiubuf, u32 cmdiubuf_len);
void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
+/*
+ * add a define, visible to the compiler, that indicates support
+ * for feature. Allows for conditional compilation in LLDDs.
+ */
+#define NVME_FC_FEAT_UUID 0x0001
#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index e8c30b39bb27..51fe44e0328b 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -4,8 +4,8 @@
*/
/*
- * This file contains definitions relative to FC-NVME-2 r1.06
- * (T11-2019-00210-v001).
+ * This file contains definitions relative to FC-NVME-2 r1.08
+ * (T11-2019-00210-v004).
*/
#ifndef _NVME_FC_H
@@ -81,7 +81,8 @@ struct nvme_fc_ersp_iu {
};
-#define FCNVME_NVME_SR_OPCODE 0x01
+#define FCNVME_NVME_SR_OPCODE 0x01
+#define FCNVME_NVME_SR_RSP_OPCODE 0x02
struct nvme_fc_nvme_sr_iu {
__u8 fc_id;
@@ -94,7 +95,7 @@ struct nvme_fc_nvme_sr_iu {
enum {
FCNVME_SRSTAT_ACC = 0x0,
- FCNVME_SRSTAT_INV_FCID = 0x1,
+ /* reserved 0x1 */
/* reserved 0x2 */
FCNVME_SRSTAT_LOGICAL_ERR = 0x3,
FCNVME_SRSTAT_INV_QUALIF = 0x4,
@@ -397,7 +398,7 @@ struct fcnvme_ls_disconnect_conn_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_assoc_id associd;
- struct fcnvme_lsdesc_disconn_cmd connectid;
+ struct fcnvme_lsdesc_conn_id connectid;
};
struct fcnvme_ls_disconnect_conn_acc {
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index 3ec8e50efa16..4dd7e6fe92fb 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_NVME_RDMA_H
#define _LINUX_NVME_RDMA_H
+#define NVME_RDMA_MAX_QUEUE_SIZE 128
+
enum nvme_rdma_cm_fmt {
NVME_RDMA_CM_FMT_1_0 = 0x0,
};
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 959e0bd9a913..75470159a194 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -12,6 +12,7 @@
#define NVME_TCP_DISC_PORT 8009
#define NVME_TCP_ADMIN_CCSZ SZ_8K
#define NVME_TCP_DIGEST_LENGTH 4
+#define NVME_TCP_MIN_MAXH2CDATA 4096
enum nvme_tcp_pfv {
NVME_TCP_PFV_1_0 = 0x0,
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 3d5189f46cb1..050d7d0cd81b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -19,6 +19,7 @@
#define NVMF_TRSVCID_SIZE 32
#define NVMF_TRADDR_SIZE 256
#define NVMF_TSAS_SIZE 256
+#define NVMF_AUTH_HASH_LEN 64
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
@@ -27,8 +28,26 @@
#define NVME_NSID_ALL 0xffffffff
enum nvme_subsys_type {
- NVME_NQN_DISC = 1, /* Discovery type target subsystem */
- NVME_NQN_NVME = 2, /* NVME type target subsystem */
+ /* Referral to another discovery type target subsystem */
+ NVME_NQN_DISC = 1,
+
+ /* NVME type target subsystem */
+ NVME_NQN_NVME = 2,
+
+ /* Current discovery type target subsystem */
+ NVME_NQN_CURR = 3,
+};
+
+enum nvme_ctrl_type {
+ NVME_CTRL_IO = 1, /* I/O controller */
+ NVME_CTRL_DISC = 2, /* Discovery controller */
+ NVME_CTRL_ADMIN = 3, /* Administrative controller */
+};
+
+enum nvme_dctype {
+ NVME_DCTYPE_NOT_REPORTED = 0,
+ NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */
+ NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */
};
/* Address Family codes for Discovery Log Page entry ADRFAM field */
@@ -38,6 +57,8 @@ enum {
NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
+ NVMF_ADDR_FAMILY_LOOP = 254, /* Reserved for host usage */
+ NVMF_ADDR_FAMILY_MAX,
};
/* Transport Type codes for Discovery Log Page entry TRTYPE field */
@@ -114,6 +135,10 @@ enum {
NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
* Location
*/
+ NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory
+ * Space Control
+ */
+ NVME_REG_CRTO = 0x0068, /* Controller Ready Timeouts */
NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
@@ -130,12 +155,17 @@ enum {
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
+#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1)
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
+#define NVME_CRTO_CRIMT(crto) ((crto) >> 16)
+#define NVME_CRTO_CRWMT(crto) ((crto) & 0xffff)
+
enum {
NVME_CMBSZ_SQS = 1 << 0,
NVME_CMBSZ_CQS = 1 << 1,
@@ -160,7 +190,6 @@ enum {
enum {
NVME_CC_ENABLE = 1 << 0,
- NVME_CC_CSS_NVM = 0 << 4,
NVME_CC_EN_SHIFT = 0,
NVME_CC_CSS_SHIFT = 4,
NVME_CC_MPS_SHIFT = 7,
@@ -168,6 +197,9 @@ enum {
NVME_CC_SHN_SHIFT = 14,
NVME_CC_IOSQES_SHIFT = 16,
NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT,
+ NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT,
+ NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT,
NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
@@ -177,6 +209,10 @@ enum {
NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
+ NVME_CC_CRIME = 1 << 24,
+};
+
+enum {
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
NVME_CSTS_NSSRO = 1 << 4,
@@ -187,6 +223,21 @@ enum {
NVME_CSTS_SHST_MASK = 3 << 2,
};
+enum {
+ NVME_CMBMSC_CRE = 1 << 0,
+ NVME_CMBMSC_CMSE = 1 << 1,
+};
+
+enum {
+ NVME_CAP_CSS_NVM = 1 << 0,
+ NVME_CAP_CSS_CSI = 1 << 6,
+};
+
+enum {
+ NVME_CAP_CRMS_CRWMS = 1ULL << 59,
+ NVME_CAP_CRMS_CRIMS = 1ULL << 60,
+};
+
struct nvme_id_power_state {
__le16 max_power; /* centiwatts */
__u8 rsvd2;
@@ -213,6 +264,7 @@ enum {
enum nvme_ctrl_attr {
NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
NVME_CTRL_ATTR_TBKAS = (1 << 6),
+ NVME_CTRL_ATTR_ELBAS = (1 << 15),
};
struct nvme_id_ctrl {
@@ -231,7 +283,9 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
- __u8 rsvd100[28];
+ __u8 rsvd100[11];
+ __u8 cntrltype;
+ __u8 fguid[16];
__le16 crdt1;
__le16 crdt2;
__le16 crdt3;
@@ -293,19 +347,26 @@ struct nvme_id_ctrl {
__le16 icdoff;
__u8 ctrattr;
__u8 msdbd;
- __u8 rsvd1804[244];
+ __u8 rsvd1804[2];
+ __u8 dctype;
+ __u8 rsvd1807[241];
struct nvme_id_power_state psd[32];
__u8 vs[1024];
};
enum {
+ NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
+ NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
+ NVME_CTRL_CMIC_ANA = 1 << 3,
NVME_CTRL_ONCS_COMPARE = 1 << 0,
NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
NVME_CTRL_ONCS_DSM = 1 << 2,
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
+ NVME_CTRL_ONCS_RESERVATIONS = 1 << 5,
NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
+ NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
@@ -360,16 +421,90 @@ struct nvme_id_ns {
__le16 endgid;
__u8 nguid[16];
__u8 eui64[8];
- struct nvme_lbaf lbaf[16];
- __u8 rsvd192[192];
+ struct nvme_lbaf lbaf[64];
__u8 vs[3712];
};
+/* I/O Command Set Independent Identify Namespace Data Structure */
+struct nvme_id_ns_cs_indep {
+ __u8 nsfeat;
+ __u8 nmic;
+ __u8 rescap;
+ __u8 fpi;
+ __le32 anagrpid;
+ __u8 nsattr;
+ __u8 rsvd9;
+ __le16 nvmsetid;
+ __le16 endgid;
+ __u8 nstat;
+ __u8 rsvd15[4081];
+};
+
+struct nvme_zns_lbafe {
+ __le64 zsze;
+ __u8 zdes;
+ __u8 rsvd9[7];
+};
+
+struct nvme_id_ns_zns {
+ __le16 zoc;
+ __le16 ozcs;
+ __le32 mar;
+ __le32 mor;
+ __le32 rrl;
+ __le32 frl;
+ __u8 rsvd20[2796];
+ struct nvme_zns_lbafe lbafe[64];
+ __u8 vs[256];
+};
+
+struct nvme_id_ctrl_zns {
+ __u8 zasl;
+ __u8 rsvd1[4095];
+};
+
+struct nvme_id_ns_nvm {
+ __le64 lbstm;
+ __u8 pic;
+ __u8 rsvd9[3];
+ __le32 elbaf[64];
+ __u8 rsvd268[3828];
+};
+
+enum {
+ NVME_ID_NS_NVM_STS_MASK = 0x3f,
+ NVME_ID_NS_NVM_GUARD_SHIFT = 7,
+ NVME_ID_NS_NVM_GUARD_MASK = 0x3,
+};
+
+static inline __u8 nvme_elbaf_sts(__u32 elbaf)
+{
+ return elbaf & NVME_ID_NS_NVM_STS_MASK;
+}
+
+static inline __u8 nvme_elbaf_guard_type(__u32 elbaf)
+{
+ return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK;
+}
+
+struct nvme_id_ctrl_nvm {
+ __u8 vsl;
+ __u8 wzsl;
+ __u8 wusl;
+ __u8 dmrl;
+ __le32 dmrsl;
+ __le64 dmsl;
+ __u8 rsvd16[4080];
+};
+
enum {
NVME_ID_CNS_NS = 0x00,
NVME_ID_CNS_CTRL = 0x01,
NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
NVME_ID_CNS_NS_DESC_LIST = 0x03,
+ NVME_ID_CNS_CS_NS = 0x05,
+ NVME_ID_CNS_CS_CTRL = 0x06,
+ NVME_ID_CNS_NS_CS_INDEP = 0x08,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
@@ -380,6 +515,11 @@ enum {
};
enum {
+ NVME_CSI_NVM = 0,
+ NVME_CSI_ZNS = 2,
+};
+
+enum {
NVME_DIR_IDENTIFY = 0x00,
NVME_DIR_STREAMS = 0x01,
NVME_DIR_SND_ID_OP_ENABLE = 0x01,
@@ -394,8 +534,14 @@ enum {
enum {
NVME_NS_FEAT_THIN = 1 << 0,
+ NVME_NS_FEAT_ATOMICS = 1 << 1,
+ NVME_NS_FEAT_IO_OPT = 1 << 4,
+ NVME_NS_ATTR_RO = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
+ NVME_NS_FLBAS_LBA_UMASK = 0x60,
+ NVME_NS_FLBAS_LBA_SHIFT = 1,
NVME_NS_FLBAS_META_EXT = 0x10,
+ NVME_NS_NMIC_SHARED = 1 << 0,
NVME_LBAF_RP_BEST = 0,
NVME_LBAF_RP_BETTER = 1,
NVME_LBAF_RP_GOOD = 2,
@@ -412,6 +558,28 @@ enum {
NVME_NS_DPS_PI_TYPE3 = 3,
};
+enum {
+ NVME_NSTAT_NRDY = 1 << 0,
+};
+
+enum {
+ NVME_NVM_NS_16B_GUARD = 0,
+ NVME_NVM_NS_32B_GUARD = 1,
+ NVME_NVM_NS_64B_GUARD = 2,
+};
+
+static inline __u8 nvme_lbaf_index(__u8 flbas)
+{
+ return (flbas & NVME_NS_FLBAS_LBA_MASK) |
+ ((flbas & NVME_NS_FLBAS_LBA_UMASK) >> NVME_NS_FLBAS_LBA_SHIFT);
+}
+
+/* Identify Namespace Metadata Capabilities (MC): */
+enum {
+ NVME_MC_EXTENDED_LBA = (1 << 0),
+ NVME_MC_METADATA_PTR = (1 << 1),
+};
+
struct nvme_ns_id_desc {
__u8 nidt;
__u8 nidl;
@@ -421,11 +589,13 @@ struct nvme_ns_id_desc {
#define NVME_NIDT_EUI64_LEN 8
#define NVME_NIDT_NGUID_LEN 16
#define NVME_NIDT_UUID_LEN 16
+#define NVME_NIDT_CSI_LEN 1
enum {
NVME_NIDT_EUI64 = 0x01,
NVME_NIDT_NGUID = 0x02,
NVME_NIDT_UUID = 0x03,
+ NVME_NIDT_CSI = 0x04,
};
struct nvme_smart_log {
@@ -505,6 +675,27 @@ struct nvme_ana_rsp_hdr {
__le16 rsvd10[3];
};
+struct nvme_zone_descriptor {
+ __u8 zt;
+ __u8 zs;
+ __u8 za;
+ __u8 rsvd3[5];
+ __le64 zcap;
+ __le64 zslba;
+ __le64 wp;
+ __u8 rsvd32[32];
+};
+
+enum {
+ NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2,
+};
+
+struct nvme_zone_report {
+ __le64 nr_zones;
+ __u8 resv8[56];
+ struct nvme_zone_descriptor entries[];
+};
+
enum {
NVME_SMART_CRIT_SPARE = 1 << 0,
NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
@@ -522,6 +713,10 @@ enum {
};
enum {
+ NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
+};
+
+enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
NVME_AER_NOTICE_ANA = 0x03,
@@ -546,8 +741,8 @@ struct nvme_lba_range_type {
__u8 type;
__u8 attributes;
__u8 rsvd2[14];
- __u64 slba;
- __u64 nlb;
+ __le64 slba;
+ __le64 nlb;
__u8 guid[16];
__u8 rsvd48[16];
};
@@ -599,6 +794,9 @@ enum nvme_opcode {
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
nvme_cmd_resv_release = 0x15,
+ nvme_cmd_zone_mgmt_send = 0x79,
+ nvme_cmd_zone_mgmt_recv = 0x7a,
+ nvme_cmd_zone_append = 0x7d,
};
#define nvme_opcode_name(opcode) { opcode, #opcode }
@@ -614,7 +812,11 @@ enum nvme_opcode {
nvme_opcode_name(nvme_cmd_resv_register), \
nvme_opcode_name(nvme_cmd_resv_report), \
nvme_opcode_name(nvme_cmd_resv_acquire), \
- nvme_opcode_name(nvme_cmd_resv_release))
+ nvme_opcode_name(nvme_cmd_resv_release), \
+ nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
+ nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
+ nvme_opcode_name(nvme_cmd_zone_append))
+
/*
@@ -709,12 +911,14 @@ struct nvme_common_command {
__le32 cdw2[2];
__le64 metadata;
union nvme_data_ptr dptr;
+ struct_group(cdws,
__le32 cdw10;
__le32 cdw11;
__le32 cdw12;
__le32 cdw13;
__le32 cdw14;
__le32 cdw15;
+ );
};
struct nvme_rw_command {
@@ -722,7 +926,8 @@ struct nvme_rw_command {
__u8 flags;
__u16 command_id;
__le32 nsid;
- __u64 rsvd2;
+ __le32 cdw2;
+ __le32 cdw3;
__le64 metadata;
union nvme_data_ptr dptr;
__le64 slba;
@@ -737,6 +942,7 @@ struct nvme_rw_command {
enum {
NVME_RW_LR = 1 << 15,
NVME_RW_FUA = 1 << 14,
+ NVME_RW_APPEND_PIREMAP = 1 << 9,
NVME_RW_DSM_FREQ_UNSPEC = 0,
NVME_RW_DSM_FREQ_TYPICAL = 1,
NVME_RW_DSM_FREQ_RARE = 2,
@@ -802,6 +1008,60 @@ struct nvme_write_zeroes_cmd {
__le16 appmask;
};
+enum nvme_zone_mgmt_action {
+ NVME_ZONE_CLOSE = 0x1,
+ NVME_ZONE_FINISH = 0x2,
+ NVME_ZONE_OPEN = 0x3,
+ NVME_ZONE_RESET = 0x4,
+ NVME_ZONE_OFFLINE = 0x5,
+ NVME_ZONE_SET_DESC_EXT = 0x10,
+};
+
+struct nvme_zone_mgmt_send_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le32 cdw2[2];
+ __le64 metadata;
+ union nvme_data_ptr dptr;
+ __le64 slba;
+ __le32 cdw12;
+ __u8 zsa;
+ __u8 select_all;
+ __u8 rsvd13[2];
+ __le32 cdw14[2];
+};
+
+struct nvme_zone_mgmt_recv_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le64 rsvd2[2];
+ union nvme_data_ptr dptr;
+ __le64 slba;
+ __le32 numd;
+ __u8 zra;
+ __u8 zrasf;
+ __u8 pr;
+ __u8 rsvd13;
+ __le32 cdw14[2];
+};
+
+enum {
+ NVME_ZRA_ZONE_REPORT = 0,
+ NVME_ZRASF_ZONE_REPORT_ALL = 0,
+ NVME_ZRASF_ZONE_STATE_EMPTY = 0x01,
+ NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02,
+ NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03,
+ NVME_ZRASF_ZONE_STATE_CLOSED = 0x04,
+ NVME_ZRASF_ZONE_STATE_READONLY = 0x05,
+ NVME_ZRASF_ZONE_STATE_FULL = 0x06,
+ NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07,
+ NVME_REPORT_ZONE_PARTIAL = 1,
+};
+
/* Features */
enum {
@@ -821,11 +1081,14 @@ enum {
struct nvme_feat_host_behavior {
__u8 acre;
- __u8 resv1[511];
+ __u8 etdas;
+ __u8 lbafee;
+ __u8 resv1[509];
};
enum {
NVME_ENABLE_ACRE = 1,
+ NVME_ENABLE_LBAFEE = 1,
};
/* Admin commands */
@@ -858,6 +1121,7 @@ enum nvme_admin_opcode {
nvme_admin_security_recv = 0x82,
nvme_admin_sanitize_nvm = 0x84,
nvme_admin_get_lba_status = 0x86,
+ nvme_admin_vendor_start = 0xC0,
};
#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
@@ -921,6 +1185,8 @@ enum {
NVME_FEAT_RESV_MASK = 0x82,
NVME_FEAT_RESV_PERSIST = 0x83,
NVME_FEAT_WRITE_PROTECT = 0x84,
+ NVME_FEAT_VENDOR_START = 0xC0,
+ NVME_FEAT_VENDOR_END = 0xFF,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
@@ -958,7 +1224,9 @@ struct nvme_identify {
__u8 cns;
__u8 rsvd3;
__le16 ctrlid;
- __u32 rsvd11[5];
+ __u8 rsvd11[3];
+ __u8 csi;
+ __u32 rsvd12[4];
};
#define NVME_IDENTIFY_DATA_SIZE 4096
@@ -1072,7 +1340,9 @@ struct nvme_get_log_page_command {
};
__le64 lpo;
};
- __u32 rsvd14[2];
+ __u8 rsvd14[3];
+ __u8 csi;
+ __u32 rsvd15;
};
struct nvme_directive_cmd {
@@ -1104,6 +1374,8 @@ enum nvmf_capsule_command {
nvme_fabrics_type_property_set = 0x00,
nvme_fabrics_type_connect = 0x01,
nvme_fabrics_type_property_get = 0x04,
+ nvme_fabrics_type_auth_send = 0x05,
+ nvme_fabrics_type_auth_receive = 0x06,
};
#define nvme_fabrics_type_name(type) { type, #type }
@@ -1111,7 +1383,9 @@ enum nvmf_capsule_command {
__print_symbolic(type, \
nvme_fabrics_type_name(nvme_fabrics_type_property_set), \
nvme_fabrics_type_name(nvme_fabrics_type_connect), \
- nvme_fabrics_type_name(nvme_fabrics_type_property_get))
+ nvme_fabrics_type_name(nvme_fabrics_type_property_get), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_send), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_receive))
/*
* If not fabrics command, fctype will be ignored.
@@ -1144,6 +1418,12 @@ struct nvmf_common_command {
#define MAX_DISC_LOGS 255
+/* Discovery log page entry flags (EFLAGS): */
+enum {
+ NVME_DISC_EFLAGS_EPCSD = (1 << 1),
+ NVME_DISC_EFLAGS_DUPRETINFO = (1 << 0),
+};
+
/* Discovery log page entry */
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
@@ -1153,7 +1433,8 @@ struct nvmf_disc_rsp_page_entry {
__le16 portid;
__le16 cntlid;
__le16 asqsz;
- __u8 resv8[22];
+ __le16 eflags;
+ __u8 resv10[20];
char trsvcid[NVMF_TRSVCID_SIZE];
__u8 resv64[192];
char subnqn[NVMF_NQN_FIELD_LEN];
@@ -1177,7 +1458,7 @@ struct nvmf_disc_rsp_page_hdr {
__le64 numrec;
__le16 recfmt;
__u8 resv14[1006];
- struct nvmf_disc_rsp_page_entry entries[0];
+ struct nvmf_disc_rsp_page_entry entries[];
};
enum {
@@ -1200,6 +1481,11 @@ struct nvmf_connect_command {
__u8 resv4[12];
};
+enum {
+ NVME_CONNECT_AUTHREQ_ASCR = (1U << 18),
+ NVME_CONNECT_AUTHREQ_ATR = (1U << 17),
+};
+
struct nvmf_connect_data {
uuid_t hostid;
__le16 cntlid;
@@ -1234,6 +1520,200 @@ struct nvmf_property_get_command {
__u8 resv4[16];
};
+struct nvmf_auth_common_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al_tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_send_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_receive_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al;
+ __u8 resv4[16];
+};
+
+/* Value for secp */
+enum {
+ NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER = 0xe9,
+};
+
+/* Defined value for auth_type */
+enum {
+ NVME_AUTH_COMMON_MESSAGES = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGES = 0x01,
+};
+
+/* Defined messages for auth_id */
+enum {
+ NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE = 0x01,
+ NVME_AUTH_DHCHAP_MESSAGE_REPLY = 0x02,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1 = 0x03,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 = 0x04,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2 = 0xf0,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1 = 0xf1,
+};
+
+struct nvmf_auth_dhchap_protocol_descriptor {
+ __u8 authid;
+ __u8 rsvd;
+ __u8 halen;
+ __u8 dhlen;
+ __u8 idlist[60];
+};
+
+enum {
+ NVME_AUTH_DHCHAP_AUTH_ID = 0x01,
+};
+
+/* Defined hash functions for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_HASH_SHA256 = 0x01,
+ NVME_AUTH_HASH_SHA384 = 0x02,
+ NVME_AUTH_HASH_SHA512 = 0x03,
+ NVME_AUTH_HASH_INVALID = 0xff,
+};
+
+/* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_DHGROUP_NULL = 0x00,
+ NVME_AUTH_DHGROUP_2048 = 0x01,
+ NVME_AUTH_DHGROUP_3072 = 0x02,
+ NVME_AUTH_DHGROUP_4096 = 0x03,
+ NVME_AUTH_DHGROUP_6144 = 0x04,
+ NVME_AUTH_DHGROUP_8192 = 0x05,
+ NVME_AUTH_DHGROUP_INVALID = 0xff,
+};
+
+union nvmf_auth_protocol {
+ struct nvmf_auth_dhchap_protocol_descriptor dhchap;
+};
+
+struct nvmf_auth_dhchap_negotiate_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd;
+ __le16 t_id;
+ __u8 sc_c;
+ __u8 napd;
+ union nvmf_auth_protocol auth_protocol[];
+};
+
+struct nvmf_auth_dhchap_challenge_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __u16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 hashid;
+ __u8 dhgid;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of challenge value */
+ __u8 cval[];
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+struct nvmf_auth_dhchap_reply_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 cvalid;
+ __u8 rsvd3;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of response data */
+ __u8 rval[];
+ /* followed by 'hl' bytes of Challenge value */
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+enum {
+ NVME_AUTH_DHCHAP_RESPONSE_VALID = (1 << 0),
+};
+
+struct nvmf_auth_dhchap_success1_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 rvalid;
+ __u8 rsvd3[7];
+ /* 'hl' bytes of response value if 'rvalid' is set */
+ __u8 rval[];
+};
+
+struct nvmf_auth_dhchap_success2_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rsvd2[10];
+};
+
+struct nvmf_auth_dhchap_failure_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rescode;
+ __u8 rescode_exp;
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED = 0x01,
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_FAILED = 0x01,
+ NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE = 0x02,
+ NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH = 0x03,
+ NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE = 0x04,
+ NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE = 0x05,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD = 0x06,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE = 0x07,
+};
+
+
struct nvme_dbbuf {
__u8 opcode;
__u8 flags;
@@ -1269,12 +1749,17 @@ struct nvme_command {
struct nvme_format_cmd format;
struct nvme_dsm_cmd dsm;
struct nvme_write_zeroes_cmd write_zeroes;
+ struct nvme_zone_mgmt_send_cmd zms;
+ struct nvme_zone_mgmt_recv_cmd zmr;
struct nvme_abort_cmd abort;
struct nvme_get_log_page_command get_log_page;
struct nvmf_common_command fabrics;
struct nvmf_connect_command connect;
struct nvmf_property_set_command prop_set;
struct nvmf_property_get_command prop_get;
+ struct nvmf_auth_common_command auth_common;
+ struct nvmf_auth_send_command auth_send;
+ struct nvmf_auth_receive_command auth_receive;
struct nvme_dbbuf dbbuf;
struct nvme_directive_cmd directive;
};
@@ -1333,20 +1818,31 @@ enum {
NVME_SC_SGL_INVALID_DATA = 0xf,
NVME_SC_SGL_INVALID_METADATA = 0x10,
NVME_SC_SGL_INVALID_TYPE = 0x11,
-
+ NVME_SC_CMB_INVALID_USE = 0x12,
+ NVME_SC_PRP_INVALID_OFFSET = 0x13,
+ NVME_SC_ATOMIC_WU_EXCEEDED = 0x14,
+ NVME_SC_OP_DENIED = 0x15,
NVME_SC_SGL_INVALID_OFFSET = 0x16,
- NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
-
+ NVME_SC_RESERVED = 0x17,
+ NVME_SC_HOST_ID_INCONSIST = 0x18,
+ NVME_SC_KA_TIMEOUT_EXPIRED = 0x19,
+ NVME_SC_KA_TIMEOUT_INVALID = 0x1A,
+ NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B,
NVME_SC_SANITIZE_FAILED = 0x1C,
NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
-
+ NVME_SC_SGL_INVALID_GRANULARITY = 0x1E,
+ NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F,
NVME_SC_NS_WRITE_PROTECTED = 0x20,
NVME_SC_CMD_INTERRUPTED = 0x21,
+ NVME_SC_TRANSIENT_TR_ERR = 0x22,
+ NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 0x24,
+ NVME_SC_INVALID_IO_CMD_SET = 0x2C,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
NVME_SC_RESERVATION_CONFLICT = 0x83,
+ NVME_SC_FORMAT_IN_PROGRESS = 0x84,
/*
* Command Specific Status:
@@ -1379,8 +1875,15 @@ enum {
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
+ NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
+ NVME_SC_CTRL_ID_INVALID = 0x11f,
+ NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
+ NVME_SC_CTRL_RES_NUM_INVALID = 0x121,
+ NVME_SC_RES_ID_INVALID = 0x122,
NVME_SC_PMR_SAN_PROHIBITED = 0x123,
+ NVME_SC_ANA_GROUP_ID_INVALID = 0x124,
+ NVME_SC_ANA_ATTACH_FAILED = 0x125,
/*
* I/O Command Set Specific - NVM commands:
@@ -1403,6 +1906,18 @@ enum {
NVME_SC_AUTH_REQUIRED = 0x191,
/*
+ * I/O Command Set Specific - Zoned commands:
+ */
+ NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8,
+ NVME_SC_ZONE_FULL = 0x1b9,
+ NVME_SC_ZONE_READ_ONLY = 0x1ba,
+ NVME_SC_ZONE_OFFLINE = 0x1bb,
+ NVME_SC_ZONE_INVALID_WRITE = 0x1bc,
+ NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd,
+ NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be,
+ NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf,
+
+ /*
* Media and Data Integrity Errors:
*/
NVME_SC_WRITE_FAULT = 0x280,
@@ -1417,13 +1932,16 @@ enum {
/*
* Path-related Errors:
*/
+ NVME_SC_INTERNAL_PATH_ERROR = 0x300,
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
+ NVME_SC_CTRL_PATH_ERROR = 0x360,
NVME_SC_HOST_PATH_ERROR = 0x370,
NVME_SC_HOST_ABORTED_CMD = 0x371,
NVME_SC_CRD = 0x1800,
+ NVME_SC_MORE = 0x2000,
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index d3776be48c53..980f9c9ac0bc 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -25,6 +25,7 @@ struct nvmem_cell_info {
unsigned int bytes;
unsigned int bit_offset;
unsigned int nbits;
+ struct device_node *np;
};
/**
@@ -61,8 +62,14 @@ void nvmem_cell_put(struct nvmem_cell *cell);
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val);
int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val);
int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
+int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val);
+int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
+ u32 *val);
+int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
+ u64 *val);
/* direct nvmem device read/write interface */
struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
@@ -138,6 +145,26 @@ static inline int nvmem_cell_read_u32(struct device *dev,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_u64(struct device *dev,
+ const char *cell_id, u64 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
+ const char *cell_id,
+ u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
+ const char *cell_id,
+ u64 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 6d6f8e5d24c9..50caa117cb62 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -19,12 +19,32 @@ typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
+/* used for vendor specific post processing of cell data */
+typedef int (*nvmem_cell_post_process_t)(void *priv, const char *id, unsigned int offset,
+ void *buf, size_t bytes);
enum nvmem_type {
NVMEM_TYPE_UNKNOWN = 0,
NVMEM_TYPE_EEPROM,
NVMEM_TYPE_OTP,
NVMEM_TYPE_BATTERY_BACKED,
+ NVMEM_TYPE_FRAM,
+};
+
+#define NVMEM_DEVID_NONE (-1)
+#define NVMEM_DEVID_AUTO (-2)
+
+/**
+ * struct nvmem_keepout - NVMEM register keepout range.
+ *
+ * @start: The first byte offset to avoid.
+ * @end: One beyond the last byte offset to avoid.
+ * @value: The byte to fill reads with for this region.
+ */
+struct nvmem_keepout {
+ unsigned int start;
+ unsigned int end;
+ unsigned char value;
};
/**
@@ -36,17 +56,22 @@ enum nvmem_type {
* @owner: Pointer to exporter module. Used for refcounting.
* @cells: Optional array of pre-defined NVMEM cells.
* @ncells: Number of elements in cells.
+ * @keepout: Optional array of keepout ranges (sorted ascending by start).
+ * @nkeepout: Number of elements in the keepout array.
* @type: Type of the nvmem storage
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
+ * @of_node: If given, this will be used instead of the parent's of_node.
* @no_of_node: Device should not use the parent's of_node even if it's !NULL.
* @reg_read: Callback to read data.
* @reg_write: Callback to write data.
+ * @cell_post_process: Callback for vendor specific post processing of cell data
* @size: Device size.
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
* @priv: User context passed to read/write callbacks.
- * @wp-gpio: Write protect pin
+ * @wp-gpio: Write protect pin
+ * @ignore_wp: Write Protect pin is managed by the provider.
*
* Note: A default "nvmem<id>" name will be assigned to the device if
* no name is specified in its configuration. In such case "<id>" is
@@ -63,12 +88,17 @@ struct nvmem_config {
struct gpio_desc *wp_gpio;
const struct nvmem_cell_info *cells;
int ncells;
+ const struct nvmem_keepout *keepout;
+ unsigned int nkeepout;
enum nvmem_type type;
bool read_only;
bool root_only;
+ bool ignore_wp;
+ struct device_node *of_node;
bool no_of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
+ nvmem_cell_post_process_t cell_post_process;
int size;
int word_size;
int stride;
@@ -105,8 +135,6 @@ void nvmem_unregister(struct nvmem_device *nvmem);
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *cfg);
-int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
-
void nvmem_add_cell_table(struct nvmem_cell_table *table);
void nvmem_del_cell_table(struct nvmem_cell_table *table);
@@ -125,12 +153,6 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
return nvmem_register(c);
}
-static inline int
-devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
-{
- return -EOPNOTSUPP;
-}
-
static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
new file mode 100644
index 000000000000..62c54ffbeeaa
--- /dev/null
+++ b/include/linux/objtool.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_OBJTOOL_H
+#define _LINUX_OBJTOOL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack.
+ */
+struct unwind_hint {
+ u32 ip;
+ s16 sp_offset;
+ u8 sp_reg;
+ u8 type;
+ u8 end;
+};
+#endif
+
+/*
+ * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
+ * (the caller's SP right before it made the call). Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
+ * points to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
+ * sp_reg+sp_offset points to the iret return frame.
+ *
+ * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
+ * Useful for code which doesn't have an ELF function annotation.
+ *
+ * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc.
+ */
+#define UNWIND_HINT_TYPE_CALL 0
+#define UNWIND_HINT_TYPE_REGS 1
+#define UNWIND_HINT_TYPE_REGS_PARTIAL 2
+#define UNWIND_HINT_TYPE_FUNC 3
+#define UNWIND_HINT_TYPE_ENTRY 4
+#define UNWIND_HINT_TYPE_SAVE 5
+#define UNWIND_HINT_TYPE_RESTORE 6
+
+#ifdef CONFIG_OBJTOOL
+
+#include <asm/asm.h>
+
+#ifndef __ASSEMBLY__
+
+#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
+ "987: \n\t" \
+ ".pushsection .discard.unwind_hints\n\t" \
+ /* struct unwind_hint */ \
+ ".long 987b - .\n\t" \
+ ".short " __stringify(sp_offset) "\n\t" \
+ ".byte " __stringify(sp_reg) "\n\t" \
+ ".byte " __stringify(type) "\n\t" \
+ ".byte " __stringify(end) "\n\t" \
+ ".balign 4 \n\t" \
+ ".popsection\n\t"
+
+/*
+ * This macro marks the given function's stack frame as "non-standard", which
+ * tells objtool to ignore the function when doing stack metadata validation.
+ * It should only be used in special cases where you're 100% sure it won't
+ * affect the reliability of frame pointers and kernel stack traces.
+ *
+ * For more information, see tools/objtool/Documentation/objtool.txt.
+ */
+#define STACK_FRAME_NON_STANDARD(func) \
+ static void __used __section(".discard.func_stack_frame_non_standard") \
+ *__func_stack_frame_non_standard_##func = func
+
+/*
+ * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore
+ * for the case where a function is intentionally missing frame pointer setup,
+ * but otherwise needs objtool/ORC coverage when frame pointers are disabled.
+ */
+#ifdef CONFIG_FRAME_POINTER
+#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func)
+#else
+#define STACK_FRAME_NON_STANDARD_FP(func)
+#endif
+
+#define ANNOTATE_NOENDBR \
+ "986: \n\t" \
+ ".pushsection .discard.noendbr\n\t" \
+ _ASM_PTR " 986b\n\t" \
+ ".popsection\n\t"
+
+#define ASM_REACHABLE \
+ "998:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long 998b - .\n\t" \
+ ".popsection\n\t"
+
+#else /* __ASSEMBLY__ */
+
+/*
+ * This macro indicates that the following intra-function call is valid.
+ * Any non-annotated intra-function call will cause objtool to issue a warning.
+ */
+#define ANNOTATE_INTRA_FUNCTION_CALL \
+ 999: \
+ .pushsection .discard.intra_function_calls; \
+ .long 999b; \
+ .popsection;
+
+/*
+ * In asm, there are two kinds of code: normal C-type callable functions and
+ * the rest. The normal callable functions can be called by other code, and
+ * don't do anything unusual with the stack. Such normal callable functions
+ * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
+ * category. In this case, no special debugging annotations are needed because
+ * objtool can automatically generate the ORC data for the ORC unwinder to read
+ * at runtime.
+ *
+ * Anything which doesn't fall into the above category, such as syscall and
+ * interrupt handlers, tends to not be called directly by other functions, and
+ * often does unusual non-C-function-type things with the stack pointer. Such
+ * code needs to be annotated such that objtool can understand it. The
+ * following CFI hint macros are for this type of code.
+ *
+ * These macros provide hints to objtool about the state of the stack at each
+ * instruction. Objtool starts from the hints and follows the code flow,
+ * making automatic CFI adjustments when it sees pushes and pops, filling out
+ * the debuginfo as necessary. It will also warn if it sees any
+ * inconsistencies.
+ */
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
+.Lunwind_hint_ip_\@:
+ .pushsection .discard.unwind_hints
+ /* struct unwind_hint */
+ .long .Lunwind_hint_ip_\@ - .
+ .short \sp_offset
+ .byte \sp_reg
+ .byte \type
+ .byte \end
+ .balign 4
+ .popsection
+.endm
+
+.macro STACK_FRAME_NON_STANDARD func:req
+ .pushsection .discard.func_stack_frame_non_standard, "aw"
+ _ASM_PTR \func
+ .popsection
+.endm
+
+.macro STACK_FRAME_NON_STANDARD_FP func:req
+#ifdef CONFIG_FRAME_POINTER
+ STACK_FRAME_NON_STANDARD \func
+#endif
+.endm
+
+.macro ANNOTATE_NOENDBR
+.Lhere_\@:
+ .pushsection .discard.noendbr
+ .quad .Lhere_\@
+ .popsection
+.endm
+
+.macro REACHABLE
+.Lhere_\@:
+ .pushsection .discard.reachable
+ .long .Lhere_\@ - .
+ .popsection
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#else /* !CONFIG_OBJTOOL */
+
+#ifndef __ASSEMBLY__
+
+#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
+ "\n\t"
+#define STACK_FRAME_NON_STANDARD(func)
+#define STACK_FRAME_NON_STANDARD_FP(func)
+#define ANNOTATE_NOENDBR
+#define ASM_REACHABLE
+#else
+#define ANNOTATE_INTRA_FUNCTION_CALL
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
+.endm
+.macro STACK_FRAME_NON_STANDARD func:req
+.endm
+.macro ANNOTATE_NOENDBR
+.endm
+.macro REACHABLE
+.endm
+#endif
+
+#endif /* CONFIG_OBJTOOL */
+
+#endif /* _LINUX_OBJTOOL_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index c669c0a4732f..6b79ef9a6541 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -108,7 +108,7 @@ static inline void of_node_init(struct device_node *node)
#if defined(CONFIG_OF_KOBJ)
kobject_init(&node->kobj, &of_node_ktype);
#endif
- node->fwnode.ops = &of_fwnode_ops;
+ fwnode_init(&node->fwnode, &of_fwnode_ops);
}
#if defined(CONFIG_OF_KOBJ)
@@ -185,7 +185,7 @@ static inline bool of_node_is_root(const struct device_node *node)
return node && (node->parent == NULL);
}
-static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+static inline int of_node_check_flag(const struct device_node *n, unsigned long flag)
{
return test_bit(flag, &n->_flags);
}
@@ -207,7 +207,7 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
}
#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC)
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+static inline int of_property_check_flag(const struct property *p, unsigned long flag)
{
return test_bit(flag, &p->_flags);
}
@@ -342,7 +342,7 @@ extern int of_property_read_string_helper(const struct device_node *np,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
-extern int of_device_compatible_match(struct device_node *device,
+extern int of_device_compatible_match(const struct device_node *device,
const char *const *compat);
extern bool of_device_is_available(const struct device_node *device);
extern bool of_device_is_big_endian(const struct device_node *device);
@@ -353,6 +353,7 @@ extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
int index);
+extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread);
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -363,18 +364,12 @@ extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
extern int of_modalias_node(struct device_node *node, char *modalias, int len);
extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
-extern struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index);
-extern int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name, const char *cells_name, int index,
- struct of_phandle_args *out_args);
+extern int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name, const char *cells_name, int cell_count,
+ int index, struct of_phandle_args *out_args);
extern int of_parse_phandle_with_args_map(const struct device_node *np,
const char *list_name, const char *stem_name, int index,
struct of_phandle_args *out_args);
-extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
- struct of_phandle_args *out_args);
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
@@ -393,9 +388,6 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
-extern int of_alias_get_alias_list(const struct of_device_id *matches,
- const char *stem, unsigned long *bitmap,
- unsigned int nbits);
extern int of_machine_is_compatible(const char *compat);
@@ -415,122 +407,6 @@ extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
-/**
- * of_property_read_u8_array - Find and read an array of u8 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 8-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * dts entry of array should be like:
- * property = /bits/ 8 <0x50 0x60 0x70>;
- *
- * The out_values is modified only if a valid u8 value can be decoded.
- */
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname,
- u8 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u8_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u16_array - Find and read an array of u16 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 16-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * dts entry of array should be like:
- * property = /bits/ 16 <0x5000 0x6000 0x7000>;
- *
- * The out_values is modified only if a valid u16 value can be decoded.
- */
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname,
- u16 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u16_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u32_array - Find and read an array of 32 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 32-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u32 value can be decoded.
- */
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u32_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u64_array - Find and read an array of 64 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 64-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u64 value can be decoded.
- */
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u64_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
/*
* struct property *prop;
* const __be32 *p;
@@ -554,10 +430,17 @@ bool of_console_check(struct device_node *dn, char *name, int index);
extern int of_cpu_node_to_id(struct device_node *np);
-int of_map_rid(struct device_node *np, u32 rid,
+int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out);
+phys_addr_t of_dma_get_max_cpu_address(struct device_node *np);
+
+struct kimage;
+void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
+ unsigned long initrd_load_addr,
+ unsigned long initrd_len,
+ const char *cmdline, size_t extra_fdt_size);
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -630,6 +513,11 @@ static inline struct device_node *of_get_parent(const struct device_node *node)
return NULL;
}
+static inline struct device_node *of_get_next_parent(struct device_node *node)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_next_child(
const struct device_node *node, struct device_node *prev)
{
@@ -674,7 +562,7 @@ static inline int of_device_is_compatible(const struct device_node *device,
return 0;
}
-static inline int of_device_compatible_match(struct device_node *device,
+static inline int of_device_compatible_match(const struct device_node *device,
const char *const *compat)
{
return 0;
@@ -711,32 +599,6 @@ static inline int of_property_count_elems_of_size(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname, u8 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname, u16 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
static inline int of_property_read_u32_index(const struct device_node *np,
const char *propname, u32 index, u32 *out_value)
{
@@ -842,18 +704,12 @@ static inline int of_property_read_string_helper(const struct device_node *np,
return -ENOSYS;
}
-static inline struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index)
-{
- return NULL;
-}
-
-static inline int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name,
- const char *cells_name,
- int index,
- struct of_phandle_args *out_args)
+static inline int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
{
return -ENOSYS;
}
@@ -867,14 +723,7 @@ static inline int of_parse_phandle_with_args_map(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
- struct of_phandle_args *out_args)
-{
- return -ENOSYS;
-}
-
-static inline int of_count_phandle_with_args(struct device_node *np,
+static inline int of_count_phandle_with_args(const struct device_node *np,
const char *list_name,
const char *cells_name)
{
@@ -912,14 +761,17 @@ static inline int of_alias_get_highest_id(const char *stem)
return -ENOSYS;
}
-static inline int of_alias_get_alias_list(const struct of_device_id *matches,
- const char *stem, unsigned long *bitmap,
- unsigned int nbits)
+static inline int of_machine_is_compatible(const char *compat)
{
- return -ENOSYS;
+ return 0;
}
-static inline int of_machine_is_compatible(const char *compat)
+static inline int of_add_property(struct device_node *np, struct property *prop)
+{
+ return 0;
+}
+
+static inline int of_remove_property(struct device_node *np, struct property *prop)
{
return 0;
}
@@ -960,7 +812,8 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
{
}
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+static inline int of_property_check_flag(const struct property *p,
+ unsigned long flag)
{
return 0;
}
@@ -978,13 +831,18 @@ static inline int of_cpu_node_to_id(struct device_node *np)
return -ENODEV;
}
-static inline int of_map_rid(struct device_node *np, u32 rid,
+static inline int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
{
return -EINVAL;
}
+static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np)
+{
+ return PHYS_ADDR_MAX;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
@@ -1040,13 +898,126 @@ static inline bool of_node_is_type(const struct device_node *np, const char *typ
}
/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ * the table
+ *
+ * Return: The device_node pointer with refcount incremented. Use
+ * of_node_put() on it when done.
+ */
+static inline struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name,
+ int index)
+{
+ struct of_phandle_args args;
+
+ if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+ index, &args))
+ return NULL;
+
+ return args.np;
+}
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * #list-cells = <2>;
+ * };
+ *
+ * phandle2: node2 {
+ * #list-cells = <1>;
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 1 2 &phandle2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
+static inline int of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ int cell_count = -1;
+
+ /* If cells_name is NULL we assume a cell count of 0 */
+ if (!cells_name)
+ cell_count = 0;
+
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
+ cell_count, index, out_args);
+}
+
+/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * };
+ *
+ * phandle2: node2 {
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 0 2 &phandle2 2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+ index, out_args);
+}
+
+/**
* of_property_count_u8_elems - Count the number of u8 elements in a property
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u8 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u8 and -ENODATA if the
* property does not have a value.
*/
@@ -1063,7 +1034,9 @@ static inline int of_property_count_u8_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u16 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u16 and -ENODATA if the
* property does not have a value.
*/
@@ -1080,7 +1053,9 @@ static inline int of_property_count_u16_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u32 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u32 and -ENODATA if the
* property does not have a value.
*/
@@ -1097,7 +1072,9 @@ static inline int of_property_count_u32_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u64 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u64 and -ENODATA if the
* property does not have a value.
*/
@@ -1118,7 +1095,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
* Search for a property in a device tree node and retrieve a list of
* terminated string values (pointer to data, not a copy) in that property.
*
- * If @out_strs is NULL, the number of strings in the property is returned.
+ * Return: If @out_strs is NULL, the number of strings in the property is returned.
*/
static inline int of_property_read_string_array(const struct device_node *np,
const char *propname, const char **out_strs,
@@ -1134,10 +1111,11 @@ static inline int of_property_read_string_array(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device tree node and retrieve the number of null
- * terminated string contain in it. Returns the number of strings on
- * success, -EINVAL if the property does not exist, -ENODATA if property
- * does not have a value, and -EILSEQ if the string is not null-terminated
- * within the length of the property data.
+ * terminated string contain in it.
+ *
+ * Return: The number of strings on success, -EINVAL if the property does not
+ * exist, -ENODATA if property does not have a value, and -EILSEQ if the string
+ * is not null-terminated within the length of the property data.
*/
static inline int of_property_count_strings(const struct device_node *np,
const char *propname)
@@ -1151,13 +1129,14 @@ static inline int of_property_count_strings(const struct device_node *np,
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @index: index of the string in the list of strings
- * @out_string: pointer to null terminated return string, modified only if
+ * @output: pointer to null terminated return string, modified only if
* return value is 0.
*
* Search for a property in a device tree node and retrieve a null
* terminated string value (pointer to data, not a copy) in the list of strings
* contained in that property.
- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if
* property does not have a value, and -EILSEQ if the string is not
* null-terminated within the length of the property data.
*
@@ -1177,7 +1156,8 @@ static inline int of_property_read_string_index(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node.
- * Returns true if the property exists false otherwise.
+ *
+ * Return: true if the property exists false otherwise.
*/
static inline bool of_property_read_bool(const struct device_node *np,
const char *propname)
@@ -1187,6 +1167,130 @@ static inline bool of_property_read_bool(const struct device_node *np,
return prop ? true : false;
}
+/**
+ * of_property_read_u8_array - Find and read an array of u8 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 8-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 8 <0x50 0x60 0x70>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u8 value can be decoded.
+ */
+static inline int of_property_read_u8_array(const struct device_node *np,
+ const char *propname,
+ u8 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u8_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u16_array - Find and read an array of u16 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 16-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u16 value can be decoded.
+ */
+static inline int of_property_read_u16_array(const struct device_node *np,
+ const char *propname,
+ u16 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u16_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u32_array - Find and read an array of 32 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+static inline int of_property_read_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u32_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u64_array - Find and read an array of 64 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 64-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u64 value can be decoded.
+ */
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u64_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
static inline int of_property_read_u8(const struct device_node *np,
const char *propname,
u8 *out_value)
@@ -1286,18 +1390,22 @@ static inline int of_get_available_child_count(const struct device_node *np)
return num;
}
+#define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \
+ static const struct of_device_id __of_table_##name \
+ __attribute__((unused)) \
+ = { .compatible = compat, \
+ .data = (fn == (fn_type)NULL) ? fn : fn }
+
#if defined(CONFIG_OF) && !defined(MODULE)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
- __used __section(__##table##_of_table) \
+ __used __section("__" #table "_of_table") \
+ __aligned(__alignof__(struct of_device_id)) \
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
- static const struct of_device_id __of_table_##name \
- __attribute__((unused)) \
- = { .compatible = compat, \
- .data = (fn == (fn_type)NULL) ? fn : fn }
+ _OF_DECLARE_STUB(table, name, compat, fn, fn_type)
#endif
typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
@@ -1422,24 +1530,38 @@ static inline int of_reconfig_get_state_change(unsigned long action,
* of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
* @np: Pointer to the given device_node
*
- * return true if present false otherwise
+ * Return: true if present false otherwise
*/
static inline bool of_device_is_system_power_controller(const struct device_node *np)
{
return of_property_read_bool(np, "system-power-controller");
}
-/**
+/*
* Overlay support
*/
enum of_overlay_notify_action {
- OF_OVERLAY_PRE_APPLY = 0,
+ OF_OVERLAY_INIT = 0, /* kzalloc() of ovcs sets this value */
+ OF_OVERLAY_PRE_APPLY,
OF_OVERLAY_POST_APPLY,
OF_OVERLAY_PRE_REMOVE,
OF_OVERLAY_POST_REMOVE,
};
+static inline char *of_overlay_action_name(enum of_overlay_notify_action action)
+{
+ static char *of_overlay_action_name[] = {
+ "init",
+ "pre-apply",
+ "post-apply",
+ "pre-remove",
+ "post-remove",
+ };
+
+ return of_overlay_action_name[action];
+}
+
struct of_overlay_notify_data {
struct device_node *overlay;
struct device_node *target;
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index eac7ab109df4..45598dbec269 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -6,25 +6,34 @@
#include <linux/of.h>
#include <linux/io.h>
+struct of_bus;
+
struct of_pci_range_parser {
struct device_node *node;
+ struct of_bus *bus;
const __be32 *range;
const __be32 *end;
- int np;
+ int na;
+ int ns;
int pna;
bool dma;
};
+#define of_range_parser of_pci_range_parser
struct of_pci_range {
- u32 pci_space;
- u64 pci_addr;
+ union {
+ u64 pci_addr;
+ u64 bus_addr;
+ };
u64 cpu_addr;
u64 size;
u32 flags;
};
+#define of_range of_pci_range
#define for_each_of_pci_range(parser, range) \
for (; of_pci_range_parser_one(parser, range);)
+#define for_each_of_range for_each_of_pci_range
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
@@ -42,8 +51,8 @@ void __iomem *of_io_request_and_map(struct device_node *device,
* the address space flags too. The PCI version uses a BAR number
* instead of an absolute index
*/
-extern const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags);
+extern const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
@@ -52,6 +61,11 @@ extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+extern int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
static inline void __iomem *of_io_request_and_map(struct device_node *device,
@@ -66,8 +80,8 @@ static inline u64 of_translate_address(struct device_node *np,
return OF_BAD_ADDR;
}
-static inline const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags)
+static inline const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags)
{
return NULL;
}
@@ -91,6 +105,19 @@ static inline struct of_pci_range *of_pci_range_parser_one(
return NULL;
}
+static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ return -ENOSYS;
+}
+
+static inline int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
static inline bool of_dma_is_coherent(struct device_node *np)
{
return false;
@@ -113,34 +140,18 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
return NULL;
}
#endif
+#define of_range_parser_init of_pci_range_parser_init
-#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
-extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
- u64 *size, unsigned int *flags);
-extern int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r);
-extern int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res);
-#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
-static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r)
+static inline const __be32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags)
{
- return -ENOSYS;
+ return __of_get_address(dev, index, -1, size, flags);
}
-static inline const __be32 *of_get_pci_address(struct device_node *dev,
- int bar_no, u64 *size, unsigned int *flags)
+static inline const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags)
{
- return NULL;
+ return __of_get_address(dev, -1, bar_no, size, flags);
}
-static inline int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res)
-{
- return -ENOSYS;
-}
-#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
#endif /* __OF_ADDRESS_H */
-
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8d31e39dd564..1a803e4335d3 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -26,9 +26,6 @@ static inline int of_driver_match_device(struct device *dev,
return of_match_device(drv->of_match_table, dev) != NULL;
}
-extern struct platform_device *of_dev_get(struct platform_device *dev);
-extern void of_dev_put(struct platform_device *dev);
-
extern int of_device_add(struct platform_device *pdev);
extern int of_device_register(struct platform_device *ofdev);
extern void of_device_unregister(struct platform_device *ofdev);
@@ -41,11 +38,6 @@ extern int of_device_request_module(struct device *dev);
extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
-static inline void of_device_node_put(struct device *dev)
-{
- of_node_put(dev->of_node);
-}
-
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
struct device *cpu_dev;
@@ -55,9 +47,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-int of_dma_configure(struct device *dev,
+int of_dma_configure_id(struct device *dev,
struct device_node *np,
- bool force_dma);
+ bool force_dma, const u32 *id);
+static inline int of_dma_configure(struct device *dev,
+ struct device_node *np,
+ bool force_dma)
+{
+ return of_dma_configure_id(dev, np, force_dma, NULL);
+}
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -91,21 +89,24 @@ static inline int of_device_uevent_modalias(struct device *dev,
return -ENODEV;
}
-static inline void of_device_node_put(struct device *dev) { }
-
-static inline const struct of_device_id *__of_match_device(
+static inline const struct of_device_id *of_match_device(
const struct of_device_id *matches, const struct device *dev)
{
return NULL;
}
-#define of_match_device(matches, dev) \
- __of_match_device(of_match_ptr(matches), (dev))
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
+static inline int of_dma_configure_id(struct device *dev,
+ struct device_node *np,
+ bool force_dma,
+ const u32 *id)
+{
+ return 0;
+}
static inline int of_dma_configure(struct device *dev,
struct device_node *np,
bool force_dma)
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index acf820e88952..d69ad5bb1eb1 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -58,23 +58,17 @@ extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
extern unsigned long of_get_flat_dt_root(void);
extern uint32_t of_get_flat_dt_phandle(unsigned long node);
-extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
- int depth, void *data);
-extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_chosen(char *cmdline);
+extern int early_init_dt_scan_memory(void);
+extern void early_init_dt_check_for_usable_mem_range(void);
extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
-extern void __init early_init_dt_scan_chosen_arch(unsigned long node);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
-extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
-extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
- bool no_map);
extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
/* Early flat tree scan hooks */
-extern int early_init_dt_scan_root(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_root(void);
extern bool early_init_dt_scan(void *params);
extern bool early_init_dt_verify(void *params);
@@ -90,6 +84,7 @@ extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *);
#else /* CONFIG_OF_EARLY_FLATTREE */
+static inline void early_init_dt_check_for_usable_mem_range(void) {}
static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline void early_init_fdt_reserve_self(void) {}
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 16967390a3fe..a5166eb93437 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -11,9 +11,8 @@
#define __LINUX_OF_GPIO_H
#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio.h> /* FIXME: Shouldn't be here */
#include <linux/of.h>
struct device_node;
@@ -30,10 +29,13 @@ enum of_gpio_flags {
OF_GPIO_TRANSITORY = 0x8,
OF_GPIO_PULL_UP = 0x10,
OF_GPIO_PULL_DOWN = 0x20,
+ OF_GPIO_PULL_DISABLE = 0x40,
};
#ifdef CONFIG_OF_GPIO
+#include <linux/kernel.h>
+
/*
* OF GPIO chip for memory mapped banks
*/
@@ -48,7 +50,7 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
return container_of(gc, struct of_mm_gpio_chip, gc);
}
-extern int of_get_named_gpio_flags(struct device_node *np,
+extern int of_get_named_gpio_flags(const struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags);
extern int of_mm_gpiochip_add_data(struct device_node *np,
@@ -63,8 +65,10 @@ extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
#else /* CONFIG_OF_GPIO */
+#include <linux/errno.h>
+
/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_named_gpio_flags(struct device_node *np,
+static inline int of_get_named_gpio_flags(const struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags)
{
if (flags)
@@ -95,7 +99,8 @@ static inline int of_get_named_gpio_flags(struct device_node *np,
* The above example defines four GPIOs, two of which are not specified.
* This function will return '4'
*/
-static inline int of_gpio_named_count(struct device_node *np, const char* propname)
+static inline int of_gpio_named_count(const struct device_node *np,
+ const char *propname)
{
return of_count_phandle_with_args(np, propname, "#gpio-cells");
}
@@ -106,12 +111,12 @@ static inline int of_gpio_named_count(struct device_node *np, const char* propna
*
* Same as of_gpio_named_count, but hard coded to use the 'gpios' property
*/
-static inline int of_gpio_count(struct device_node *np)
+static inline int of_gpio_count(const struct device_node *np)
{
return of_gpio_named_count(np, "gpios");
}
-static inline int of_get_gpio_flags(struct device_node *np, int index,
+static inline int of_get_gpio_flags(const struct device_node *np, int index,
enum of_gpio_flags *flags)
{
return of_get_named_gpio_flags(np, "gpios", index, flags);
@@ -126,7 +131,7 @@ static inline int of_get_gpio_flags(struct device_node *np, int index,
* Returns GPIO number to use with Linux generic GPIO API, or one of the errno
* value on the error condition.
*/
-static inline int of_get_named_gpio(struct device_node *np,
+static inline int of_get_named_gpio(const struct device_node *np,
const char *propname, int index)
{
return of_get_named_gpio_flags(np, propname, index, NULL);
@@ -140,7 +145,7 @@ static inline int of_get_named_gpio(struct device_node *np,
* Returns GPIO number to use with Linux generic GPIO API, or one of the errno
* value on the error condition.
*/
-static inline int of_get_gpio(struct device_node *np, int index)
+static inline int of_get_gpio(const struct device_node *np, int index)
{
return of_get_gpio_flags(np, index, NULL);
}
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 01038a6aade0..4d7756087b6b 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -38,6 +38,7 @@ struct of_endpoint {
child = of_graph_get_next_endpoint(parent, child))
#ifdef CONFIG_OF
+bool of_graph_is_present(const struct device_node *node);
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
int of_graph_get_endpoint_count(const struct device_node *np);
@@ -56,6 +57,11 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
u32 port, u32 endpoint);
#else
+static inline bool of_graph_is_present(const struct device_node *node)
+{
+ return false;
+}
+
static inline int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint)
{
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index f3d40dd7bb66..55c1eb300a86 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -2,30 +2,21 @@
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
-#include <linux/device.h>
-#include <linux/iommu.h>
-#include <linux/of.h>
+struct device;
+struct device_node;
+struct iommu_ops;
#ifdef CONFIG_OF_IOMMU
-extern int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size);
-
extern const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np);
+ struct device_node *master_np,
+ const u32 *id);
#else
-static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size)
-{
- return -EINVAL;
-}
-
static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np)
+ struct device_node *master_np,
+ const u32 *id)
{
return NULL;
}
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1214cabb2247..d6d3eae2f145 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -20,12 +20,12 @@ typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
extern unsigned int of_irq_workarounds;
extern struct device_node *of_irq_dflt_pic;
-extern int of_irq_parse_oldworld(struct device_node *device, int index,
- struct of_phandle_args *out_irq);
+int of_irq_parse_oldworld(const struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
#define of_irq_workarounds (0)
#define of_irq_dflt_pic (NULL)
-static inline int of_irq_parse_oldworld(struct device_node *device, int index,
+static inline int of_irq_parse_oldworld(const struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
return -EINVAL;
@@ -33,15 +33,14 @@ static inline int of_irq_parse_oldworld(struct device_node *device, int index,
#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
-extern int of_irq_parse_one(struct device_node *device, int index,
- struct of_phandle_args *out_irq);
extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern void of_irq_init(const struct of_device_id *matches);
-
#ifdef CONFIG_OF_IRQ
+extern void of_irq_init(const struct of_device_id *matches);
+extern int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
@@ -52,10 +51,19 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
- u32 rid);
+ u32 id,
+ u32 bus_token);
extern void of_msi_configure(struct device *dev, struct device_node *np);
-u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
+u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
+static inline void of_irq_init(const struct of_device_id *matches)
+{
+}
+static inline int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq)
+{
+ return -EINVAL;
+}
static inline int of_irq_count(struct device_node *dev)
{
return 0;
@@ -85,17 +93,17 @@ static inline struct irq_domain *of_msi_get_domain(struct device *dev,
return NULL;
}
static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
- u32 rid)
+ u32 id, u32 bus_token)
{
return NULL;
}
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
-static inline u32 of_msi_map_rid(struct device *dev,
- struct device_node *msi_np, u32 rid_in)
+static inline u32 of_msi_map_id(struct device *dev,
+ struct device_node *msi_np, u32 id_in)
{
- return rid_in;
+ return id_in;
}
#endif
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 491a2b7e77c1..da633d34ab86 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -8,29 +8,31 @@
#ifndef __LINUX_OF_MDIO_H
#define __LINUX_OF_MDIO_H
+#include <linux/device.h>
#include <linux/phy.h>
#include <linux/of.h>
#if IS_ENABLED(CONFIG_OF_MDIO)
-extern bool of_mdiobus_child_is_phy(struct device_node *child);
-extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
-extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
-extern struct phy_device *of_phy_connect(struct net_device *dev,
- struct device_node *phy_np,
- void (*hndlr)(struct net_device *),
- u32 flags, phy_interface_t iface);
-extern struct phy_device *
+bool of_mdiobus_child_is_phy(struct device_node *child);
+int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
+int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np);
+struct mdio_device *of_mdio_find_device(struct device_node *np);
+struct phy_device *of_phy_find_device(struct device_node *phy_np);
+struct phy_device *
+of_phy_connect(struct net_device *dev, struct device_node *phy_np,
+ void (*hndlr)(struct net_device *), u32 flags,
+ phy_interface_t iface);
+struct phy_device *
of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
void (*hndlr)(struct net_device *));
-struct phy_device *of_phy_attach(struct net_device *dev,
- struct device_node *phy_np, u32 flags,
- phy_interface_t iface);
-
-extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
-extern int of_phy_register_fixed_link(struct device_node *np);
-extern void of_phy_deregister_fixed_link(struct device_node *np);
-extern bool of_phy_is_fixed_link(struct device_node *np);
+struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+int of_phy_register_fixed_link(struct device_node *np);
+void of_phy_deregister_fixed_link(struct device_node *np);
+bool of_phy_is_fixed_link(struct device_node *np);
+int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy,
+ struct device_node *child, u32 addr);
static inline int of_mdio_parse_addr(struct device *dev,
const struct device_node *np)
@@ -70,6 +72,18 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *
return mdiobus_register(mdio);
}
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return devm_mdiobus_register(dev, mdio);
+}
+
+static inline struct mdio_device *of_mdio_find_device(struct device_node *np)
+{
+ return NULL;
+}
+
static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
return NULL;
@@ -90,13 +104,6 @@ of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
return NULL;
}
-static inline struct phy_device *of_phy_attach(struct net_device *dev,
- struct device_node *phy_np,
- u32 flags, phy_interface_t iface)
-{
- return NULL;
-}
-
static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
@@ -118,6 +125,13 @@ static inline bool of_phy_is_fixed_link(struct device_node *np)
{
return false;
}
+
+static inline int of_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct device_node *child, u32 addr)
+{
+ return -ENOSYS;
+}
#endif
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 71bbfcf3adcd..0484b613ca64 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -8,12 +8,13 @@
#include <linux/phy.h>
-#ifdef CONFIG_OF_NET
+#if defined(CONFIG_OF) && defined(CONFIG_NET)
#include <linux/of.h>
struct net_device;
extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
-extern const void *of_get_mac_address(struct device_node *np);
+extern int of_get_mac_address(struct device_node *np, u8 *mac);
+int of_get_ethdev_address(struct device_node *np, struct net_device *dev);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np,
@@ -22,9 +23,14 @@ static inline int of_get_phy_mode(struct device_node *np,
return -ENODEV;
}
-static inline const void *of_get_mac_address(struct device_node *np)
+static inline int of_get_mac_address(struct device_node *np, u8 *mac)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
+}
+
+static inline int of_get_ethdev_address(struct device_node *np, struct net_device *dev)
+{
+ return -ENODEV;
}
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 84a966623e78..d15b6cd5e1c3 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -61,16 +61,18 @@ static inline struct platform_device *of_find_device_by_node(struct device_node
}
#endif
+extern int of_platform_bus_probe(struct device_node *root,
+ const struct of_device_id *matches,
+ struct device *parent);
+
+#ifdef CONFIG_OF_ADDRESS
/* Platform devices and busses creation */
extern struct platform_device *of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent);
extern int of_platform_device_destroy(struct device *dev, void *data);
-extern int of_platform_bus_probe(struct device_node *root,
- const struct of_device_id *matches,
- struct device *parent);
-#ifdef CONFIG_OF_ADDRESS
+
extern int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
@@ -84,6 +86,18 @@ extern int devm_of_platform_populate(struct device *dev);
extern void devm_of_platform_depopulate(struct device *dev);
#else
+/* Platform devices and busses creation */
+static inline struct platform_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ return NULL;
+}
+static inline int of_platform_device_destroy(struct device *dev, void *data)
+{
+ return -ENODEV;
+}
+
static inline int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 60f541912ccf..4de2a24cadc9 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -3,6 +3,7 @@
#define __OF_RESERVED_MEM_H
#include <linux/device.h>
+#include <linux/of.h>
struct of_phandle_args;
struct reserved_mem_ops;
@@ -26,30 +27,39 @@ struct reserved_mem_ops {
typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
+#ifdef CONFIG_OF_RESERVED_MEM
+
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
-#ifdef CONFIG_OF_RESERVED_MEM
-
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
+int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name);
void of_reserved_mem_device_release(struct device *dev);
-void fdt_init_reserved_mem(void);
-void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
- phys_addr_t base, phys_addr_t size);
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np);
#else
+
+#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
+ _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn)
+
static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)
{
return -ENOSYS;
}
+
+static inline int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name)
+{
+ return -ENOSYS;
+}
+
static inline void of_reserved_mem_device_release(struct device *pdev) { }
-static inline void fdt_init_reserved_mem(void) { }
-static inline void fdt_reserved_mem_save_node(unsigned long node,
- const char *uname, phys_addr_t base, phys_addr_t size) { }
static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
{
return NULL;
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 657d6bf2c064..0f4a8903922a 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -19,8 +19,14 @@
enum OID {
OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
OID_id_dsa, /* 1.2.840.10040.4.1 */
- OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
+ OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */
+ OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */
+ OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
+ OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */
+ OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */
+ OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */
+ OID_id_ecdsa_with_sha512, /* 1.2.840.10045.4.3.4 */
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
@@ -48,6 +54,10 @@ enum OID {
OID_md4, /* 1.2.840.113549.2.4 */
OID_md5, /* 1.2.840.113549.2.5 */
+ OID_mskrb5, /* 1.2.840.48018.1.2.2 */
+ OID_krb5, /* 1.2.840.113554.1.2.2 */
+ OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */
+
/* Microsoft Authenticode & Software Publishing */
OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */
@@ -56,8 +66,16 @@ enum OID {
OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
+ OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */
+
+ OID_spnego, /* 1.3.6.1.5.5.2 */
+
+ OID_IAKerb, /* 1.3.6.1.5.2.5 */
+ OID_PKU2U, /* 1.3.5.1.5.2.7 */
+ OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
OID_sha1, /* 1.3.14.3.2.26 */
+ OID_id_ansip384r1, /* 1.3.132.0.34 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
@@ -89,6 +107,10 @@ enum OID {
OID_authorityKeyIdentifier, /* 2.5.29.35 */
OID_extKeyUsage, /* 2.5.29.37 */
+ /* Heimdal mechanisms */
+ OID_NetlogonMechanism, /* 1.2.752.43.14.2 */
+ OID_appleLocalKdcSupported, /* 1.2.752.43.14.3 */
+
/* EC-RDSA */
OID_gostCPSignA, /* 1.2.643.2.2.35.1 */
OID_gostCPSignB, /* 1.2.643.2.2.35.2 */
@@ -107,10 +129,22 @@ enum OID {
OID_gostTC26Sign512B, /* 1.2.643.7.1.2.1.2.2 */
OID_gostTC26Sign512C, /* 1.2.643.7.1.2.1.2.3 */
+ /* OSCCA */
+ OID_sm2, /* 1.2.156.10197.1.301 */
+ OID_sm3, /* 1.2.156.10197.1.401 */
+ OID_SM2_with_SM3, /* 1.2.156.10197.1.501 */
+ OID_sm3WithRSAEncryption, /* 1.2.156.10197.1.504 */
+
+ /* TCG defined OIDS for TPM based keys */
+ OID_TPMLoadableKey, /* 2.23.133.10.1.3 */
+ OID_TPMImportableKey, /* 2.23.133.10.1.4 */
+ OID_TPMSealedData, /* 2.23.133.10.1.5 */
+
OID__NR
};
extern enum OID look_up_OID(const void *data, size_t datasize);
+extern int parse_OID(const void *data, size_t datasize, enum OID *oid);
extern int sprint_oid(const void *, size_t, char *, size_t);
extern int sprint_OID(enum OID, char *, size_t);
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 5c5c93ad6b50..6f6c31e3fb93 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -292,15 +292,22 @@ struct omap_system_dma_plat_info {
#define dma_omap15xx() __dma_omap15xx(d)
#define dma_omap16xx() __dma_omap16xx(d)
-#if defined(CONFIG_ARCH_OMAP)
extern struct omap_system_dma_plat_info *omap_get_plat_info(void);
+#if defined(CONFIG_ARCH_OMAP1)
extern void omap_set_dma_priority(int lch, int dst_port, int priority);
+#else
+static inline void omap_set_dma_priority(int lch, int dst_port, int priority)
+{
+}
+#endif
+
extern int omap_request_dma(int dev_id, const char *dev_name,
void (*callback)(int lch, u16 ch_status, void *data),
void *data, int *dma_ch);
-extern void omap_disable_dma_irq(int ch, u16 irq_bits);
extern void omap_free_dma(int ch);
+#if IS_ENABLED(CONFIG_USB_OMAP)
+extern void omap_disable_dma_irq(int ch, u16 irq_bits);
extern void omap_start_dma(int lch);
extern void omap_stop_dma(int lch);
extern void omap_set_dma_transfer_params(int lch, int data_type,
@@ -326,10 +333,12 @@ extern void omap_set_dma_dest_burst_mode(int lch,
extern dma_addr_t omap_get_dma_src_pos(int lch);
extern dma_addr_t omap_get_dma_dst_pos(int lch);
extern int omap_get_dma_active_status(int lch);
+#endif
+
extern int omap_dma_running(void);
-#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
-#include <mach/lcd_dma.h>
+#if IS_ENABLED(CONFIG_FB_OMAP)
+extern int omap_lcd_dma_running(void);
#else
static inline int omap_lcd_dma_running(void)
{
@@ -337,22 +346,4 @@ static inline int omap_lcd_dma_running(void)
}
#endif
-#else /* CONFIG_ARCH_OMAP */
-
-static inline struct omap_system_dma_plat_info *omap_get_plat_info(void)
-{
- return NULL;
-}
-
-static inline int omap_request_dma(int dev_id, const char *dev_name,
- void (*callback)(int lch, u16 ch_status, void *data),
- void *data, int *dma_ch)
-{
- return -ENODEV;
-}
-
-static inline void omap_free_dma(int ch) { }
-
-#endif /* CONFIG_ARCH_OMAP */
-
#endif /* __LINUX_OMAP_DMA_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index b7bf735960c2..082841908fe7 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -81,9 +81,6 @@ extern int gpmc_configure(int cmd, int wval);
extern void gpmc_read_settings_dt(struct device_node *np,
struct gpmc_settings *p);
-extern void omap3_gpmc_save_context(void);
-extern void omap3_gpmc_restore_context(void);
-
struct gpmc_timings;
struct omap_nand_platform_data;
struct omap_onenand_platform_data;
diff --git a/include/linux/once.h b/include/linux/once.h
index 9225ee6d96c7..bc714d414448 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -5,9 +5,17 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+/* Helpers used from arbitrary contexts.
+ * Hard irqs are blocked, be cautious.
+ */
bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key_true *once_key,
- unsigned long *flags);
+ unsigned long *flags, struct module *mod);
+
+/* Variant for process contexts only. */
+bool __do_once_sleepable_start(bool *done);
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ struct module *mod);
/* Call a function exactly once. The idea of DO_ONCE() is to perform
* a function call such as initialization of random seeds, etc, only
@@ -16,7 +24,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
* out the condition into a nop. DO_ONCE() guarantees type safety of
* arguments!
*
- * Not that the following is not equivalent ...
+ * Note that the following is not equivalent ...
*
* DO_ONCE(func, arg);
* DO_ONCE(func, arg);
@@ -38,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE(func, ...) \
({ \
bool ___ret = false; \
- static bool ___done = false; \
+ static bool __section(".data.once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
unsigned long ___flags; \
@@ -46,15 +54,33 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
if (unlikely(___ret)) { \
func(__VA_ARGS__); \
__do_once_done(&___done, &___once_key, \
- &___flags); \
+ &___flags, THIS_MODULE); \
} \
} \
___ret; \
})
+/* Variant of DO_ONCE() for process/sleepable contexts. */
+#define DO_ONCE_SLEEPABLE(func, ...) \
+ ({ \
+ bool ___ret = false; \
+ static bool __section(".data.once") ___done = false; \
+ static DEFINE_STATIC_KEY_TRUE(___once_key); \
+ if (static_branch_unlikely(&___once_key)) { \
+ ___ret = __do_once_sleepable_start(&___done); \
+ if (unlikely(___ret)) { \
+ func(__VA_ARGS__); \
+ __do_once_sleepable_done(&___done, &___once_key,\
+ THIS_MODULE); \
+ } \
+ } \
+ ___ret; \
+ })
+
#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
-#define get_random_once_wait(buf, nbytes) \
- DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \
+
+#define get_random_sleepable_once(buf, nbytes) \
+ DO_ONCE_SLEEPABLE(get_random_bytes, (buf), (nbytes))
#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h
new file mode 100644
index 000000000000..b7bce4983638
--- /dev/null
+++ b/include/linux/once_lite.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ONCE_LITE_H
+#define _LINUX_ONCE_LITE_H
+
+#include <linux/types.h>
+
+/* Call a function once. Similar to DO_ONCE(), but does not use jump label
+ * patching via static keys.
+ */
+#define DO_ONCE_LITE(func, ...) \
+ DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__)
+
+#define __ONCE_LITE_IF(condition) \
+ ({ \
+ static bool __section(".data.once") __already_done; \
+ bool __ret_cond = !!(condition); \
+ bool __ret_once = false; \
+ \
+ if (unlikely(__ret_cond && !__already_done)) { \
+ __already_done = true; \
+ __ret_once = true; \
+ } \
+ unlikely(__ret_once); \
+ })
+
+#define DO_ONCE_LITE_IF(condition, func, ...) \
+ ({ \
+ bool __ret_do_once = !!(condition); \
+ \
+ if (__ONCE_LITE_IF(__ret_do_once)) \
+ func(__VA_ARGS__); \
+ \
+ unlikely(__ret_do_once); \
+ })
+
+#endif /* _LINUX_ONCE_LITE_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index c696c265f019..7d0c9c48a0c5 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -48,13 +48,14 @@ struct oom_control {
/* Used by oom implementation, do not set */
unsigned long totalpages;
struct task_struct *chosen;
- unsigned long chosen_points;
+ long chosen_points;
/* Used to print the constraint info. */
enum oom_constraint constraint;
};
extern struct mutex oom_lock;
+extern struct mutex oom_adj_mutex;
static inline void set_current_oom_origin(void)
{
@@ -77,15 +78,6 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
}
/*
- * Use this helper if tsk->mm != mm and the victim mm needs a special
- * handling. This is guaranteed to stay true after once set.
- */
-static inline bool mm_is_oom_victim(struct mm_struct *mm)
-{
- return test_bit(MMF_OOM_VICTIM, &mm->flags);
-}
-
-/*
* Checks whether a page fault on the given mm is still reliable.
* This is no longer true if the oom reaper started to reap the
* address space which is reflected by MMF_UNSTABLE flag set in
@@ -105,9 +97,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
return 0;
}
-bool __oom_reap_task_mm(struct mm_struct *mm);
-
-extern unsigned long oom_badness(struct task_struct *p,
+long oom_badness(struct task_struct *p,
unsigned long totalpages);
extern bool out_of_memory(struct oom_control *oc);
@@ -122,8 +112,4 @@ extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
-/* sysctls */
-extern int sysctl_oom_dump_tasks;
-extern int sysctl_oom_kill_allocating_task;
-extern int sysctl_panic_on_oom;
#endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
deleted file mode 100644
index b2a0f15f11fe..000000000000
--- a/include/linux/oprofile.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * @file oprofile.h
- *
- * API for machine-specific interrupts to interface
- * to oprofile.
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROFILE_H
-#define OPROFILE_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/printk.h>
-#include <linux/atomic.h>
-
-/* Each escaped entry is prefixed by ESCAPE_CODE
- * then one of the following codes, then the
- * relevant data.
- * These #defines live in this file so that arch-specific
- * buffer sync'ing code can access them.
- */
-#define ESCAPE_CODE ~0UL
-#define CTX_SWITCH_CODE 1
-#define CPU_SWITCH_CODE 2
-#define COOKIE_SWITCH_CODE 3
-#define KERNEL_ENTER_SWITCH_CODE 4
-#define KERNEL_EXIT_SWITCH_CODE 5
-#define MODULE_LOADED_CODE 6
-#define CTX_TGID_CODE 7
-#define TRACE_BEGIN_CODE 8
-#define TRACE_END_CODE 9
-#define XEN_ENTER_SWITCH_CODE 10
-#define SPU_PROFILING_CODE 11
-#define SPU_CTX_SWITCH_CODE 12
-#define IBS_FETCH_CODE 13
-#define IBS_OP_CODE 14
-
-struct dentry;
-struct file_operations;
-struct pt_regs;
-
-/* Operations structure to be filled in */
-struct oprofile_operations {
- /* create any necessary configuration files in the oprofile fs.
- * Optional. */
- int (*create_files)(struct dentry * root);
- /* Do any necessary interrupt setup. Optional. */
- int (*setup)(void);
- /* Do any necessary interrupt shutdown. Optional. */
- void (*shutdown)(void);
- /* Start delivering interrupts. */
- int (*start)(void);
- /* Stop delivering interrupts. */
- void (*stop)(void);
- /* Arch-specific buffer sync functions.
- * Return value = 0: Success
- * Return value = -1: Failure
- * Return value = 1: Run generic sync function
- */
- int (*sync_start)(void);
- int (*sync_stop)(void);
-
- /* Initiate a stack backtrace. Optional. */
- void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
-
- /* Multiplex between different events. Optional. */
- int (*switch_events)(void);
- /* CPU identification string. */
- char * cpu_type;
-};
-
-/**
- * One-time initialisation. *ops must be set to a filled-in
- * operations structure. This is called even in timer interrupt
- * mode so an arch can set a backtrace callback.
- *
- * If an error occurs, the fields should be left untouched.
- */
-int oprofile_arch_init(struct oprofile_operations * ops);
-
-/**
- * One-time exit/cleanup for the arch.
- */
-void oprofile_arch_exit(void);
-
-/**
- * Add a sample. This may be called from any context.
- */
-void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
-
-/**
- * Add an extended sample. Use this when the PC is not from the regs, and
- * we cannot determine if we're in kernel mode from the regs.
- *
- * This function does perform a backtrace.
- *
- */
-void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel);
-
-/**
- * Add an hardware sample.
- */
-void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel,
- struct task_struct *task);
-
-/* Use this instead when the PC value is not from the regs. Doesn't
- * backtrace. */
-void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
-
-/* add a backtrace entry, to be called from the ->backtrace callback */
-void oprofile_add_trace(unsigned long eip);
-
-
-/**
- * Create a file of the given name as a child of the given root, with
- * the specified file operations.
- */
-int oprofilefs_create_file(struct dentry * root,
- char const * name, const struct file_operations * fops);
-
-int oprofilefs_create_file_perm(struct dentry * root,
- char const * name, const struct file_operations * fops, int perm);
-
-/** Create a file for read/write access to an unsigned long. */
-int oprofilefs_create_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an unsigned long. */
-int oprofilefs_create_ro_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an atomic_t. */
-int oprofilefs_create_ro_atomic(struct dentry * root,
- char const * name, atomic_t * val);
-
-/** create a directory */
-struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
-
-/**
- * Write the given asciz string to the given user buffer @buf, updating *offset
- * appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Convert an unsigned long value into ASCII and copy it to the user buffer @buf,
- * updating *offset appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Read an ASCII string for a number from a userspace buffer and fill *val on success.
- * Returns 0 on success, < 0 on error.
- */
-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
-
-/** lock for read/write safety */
-extern raw_spinlock_t oprofilefs_lock;
-
-/**
- * Add the contents of a circular buffer to the event buffer.
- */
-void oprofile_put_buff(unsigned long *buf, unsigned int start,
- unsigned int stop, unsigned int max);
-
-unsigned long oprofile_get_cpu_buffer_size(void);
-void oprofile_cpu_buffer_inc_smpl_lost(void);
-
-/* cpu buffer functions */
-
-struct op_sample;
-
-struct op_entry {
- struct ring_buffer_event *event;
- struct op_sample *sample;
- unsigned long size;
- unsigned long *data;
-};
-
-void oprofile_write_reserve(struct op_entry *entry,
- struct pt_regs * const regs,
- unsigned long pc, int code, int size);
-int oprofile_add_data(struct op_entry *entry, unsigned long val);
-int oprofile_add_data64(struct op_entry *entry, u64 val);
-int oprofile_write_commit(struct op_entry *entry);
-
-#ifdef CONFIG_HW_PERF_EVENTS
-int __init oprofile_perf_init(struct oprofile_operations *ops);
-void oprofile_perf_exit(void);
-char *op_name_from_perf_id(void);
-#else
-static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
-{
- pr_info("oprofile: hardware counters not available\n");
- return -ENODEV;
-}
-static inline void oprofile_perf_exit(void) { }
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* OPROFILE_H */
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 659045046468..1d3be1a2204c 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -3,14 +3,13 @@
#define __LINUX_OVERFLOW_H
#include <linux/compiler.h>
+#include <linux/limits.h>
+#include <linux/const.h>
/*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -31,7 +30,6 @@
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
-#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
@@ -43,191 +41,82 @@
#define is_non_negative(a) ((a) > 0 || (a) == 0)
#define is_negative(a) (!(is_non_negative(a)))
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
-/*
- * For simplicity and code hygiene, the fallback code below insists on
- * a, b and *d having the same type (similar to the min() and max()
- * macros), whereas gcc's type-generic overflow checkers accept
- * different types. Hence we don't just make check_add_overflow an
- * alias for __builtin_add_overflow, but add type checks similar to
- * below.
- */
-#define check_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_add_overflow(__a, __b, __d); \
-})
-
-#define check_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_sub_overflow(__a, __b, __d); \
-})
-
-#define check_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_mul_overflow(__a, __b, __d); \
-})
-
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a + __b; \
- *__d < __a; \
-})
-#define __unsigned_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a - __b; \
- __a < __b; \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a * __b; \
- __builtin_constant_p(__b) ? \
- __b > 0 && __a > type_max(typeof(__a)) / __b : \
- __a > 0 && __b > type_max(typeof(__b)) / __a; \
-})
-
/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
+ * Allows for effectively applying __must_check to a macro so we can have
+ * both the type-agnostic benefits of the macros while also being able to
+ * enforce that the return value is, in fact, checked.
*/
+static inline bool __must_check __must_check_overflow(bool overflow)
+{
+ return unlikely(overflow);
+}
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
+/**
+ * check_add_overflow() - Calculate addition with overflow checking
+ * @a: first addend
+ * @b: second addend
+ * @d: pointer to store sum
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted addition, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * sum has overflowed or been truncated.
*/
-#define __signed_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a + (u64)__b; \
- (((~(__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
+#define check_add_overflow(a, b, d) \
+ __must_check_overflow(__builtin_add_overflow(a, b, d))
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
+/**
+ * check_sub_overflow() - Calculate subtraction with overflow checking
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ * @d: pointer to store difference
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted subtraction, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * difference has underflowed or been truncated.
*/
-#define __signed_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a - (u64)__b; \
- ((((__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
+#define check_sub_overflow(a, b, d) \
+ __must_check_overflow(__builtin_sub_overflow(a, b, d))
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
+/**
+ * check_mul_overflow() - Calculate multiplication with overflow checking
+ * @a: first factor
+ * @b: second factor
+ * @d: pointer to store product
*
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
+ * Returns 0 on success.
*
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
+ * *@d holds the results of the attempted multiplication, but is not
+ * considered "safe for use" on a non-zero return value, which indicates
+ * that the product has overflowed or been truncated.
*/
+#define check_mul_overflow(a, b, d) \
+ __must_check_overflow(__builtin_mul_overflow(a, b, d))
-#define __signed_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- typeof(a) __tmax = type_max(typeof(a)); \
- typeof(a) __tmin = type_min(typeof(a)); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a * (u64)__b; \
- (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
- (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
- (__b == (typeof(__b))-1 && __a == __tmin); \
-})
-
-
-#define check_add_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_add_overflow(a, b, d), \
- __unsigned_add_overflow(a, b, d))
-
-#define check_sub_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_sub_overflow(a, b, d), \
- __unsigned_sub_overflow(a, b, d))
-
-#define check_mul_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_mul_overflow(a, b, d), \
- __unsigned_mul_overflow(a, b, d))
-
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
-/** check_shl_overflow() - Calculate a left-shifted value and check overflow
- *
+/**
+ * check_shl_overflow() - Calculate a left-shifted value and check overflow
* @a: Value to be shifted
* @s: How many bits left to shift
* @d: Pointer to where to store the result
*
* Computes *@d = (@a << @s)
*
- * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
+ * Returns true if '*@d' cannot hold the result or when '@a << @s' doesn't
* make sense. Example conditions:
- * - 'a << s' causes bits to be lost when stored in *d.
- * - 's' is garbage (e.g. negative) or so large that the result of
- * 'a << s' is guaranteed to be 0.
- * - 'a' is negative.
- * - 'a << s' sets the sign bit, if any, in '*d'.
*
- * '*d' will hold the results of the attempted shift, but is not
- * considered "safe for use" if false is returned.
+ * - '@a << @s' causes bits to be lost when stored in *@d.
+ * - '@s' is garbage (e.g. negative) or so large that the result of
+ * '@a << @s' is guaranteed to be 0.
+ * - '@a' is negative.
+ * - '@a << @s' sets the sign bit, if any, in '*@d'.
+ *
+ * '*@d' will hold the results of the attempted shift, but is not
+ * considered "safe for use" if true is returned.
*/
-#define check_shl_overflow(a, s, d) ({ \
+#define check_shl_overflow(a, s, d) __must_check_overflow(({ \
typeof(a) _a = a; \
typeof(s) _s = s; \
typeof(d) _d = d; \
@@ -237,83 +126,124 @@
*_d = (_a_full << _to_shift); \
(_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
(*_d >> _to_shift) != _a); \
-})
+}))
/**
- * array_size() - Calculate size of 2-dimensional array.
- *
- * @a: dimension one
- * @b: dimension two
+ * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
+ * @factor1: first factor
+ * @factor2: second factor
*
- * Calculates size of 2-dimensional array: @a * @b.
- *
- * Returns: number of bytes needed to represent the array or SIZE_MAX on
- * overflow.
+ * Returns: calculate @factor1 * @factor2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
*/
-static inline __must_check size_t array_size(size_t a, size_t b)
+static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
{
size_t bytes;
- if (check_mul_overflow(a, b, &bytes))
+ if (check_mul_overflow(factor1, factor2, &bytes))
return SIZE_MAX;
return bytes;
}
/**
- * array3_size() - Calculate size of 3-dimensional array.
+ * size_add() - Calculate size_t addition with saturation at SIZE_MAX
+ * @addend1: first addend
+ * @addend2: second addend
*
- * @a: dimension one
- * @b: dimension two
- * @c: dimension three
- *
- * Calculates size of 3-dimensional array: @a * @b * @c.
- *
- * Returns: number of bytes needed to represent the array or SIZE_MAX on
- * overflow.
+ * Returns: calculate @addend1 + @addend2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
*/
-static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+static inline size_t __must_check size_add(size_t addend1, size_t addend2)
{
size_t bytes;
- if (check_mul_overflow(a, b, &bytes))
- return SIZE_MAX;
- if (check_mul_overflow(bytes, c, &bytes))
+ if (check_add_overflow(addend1, addend2, &bytes))
return SIZE_MAX;
return bytes;
}
-/*
- * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for
- * struct_size() below.
+/**
+ * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
+ * @minuend: value to subtract from
+ * @subtrahend: value to subtract from @minuend
+ *
+ * Returns: calculate @minuend - @subtrahend, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. For
+ * composition with the size_add() and size_mul() helpers, neither
+ * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
+ * The lvalue must be size_t to avoid implicit type conversion.
*/
-static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
+static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
{
size_t bytes;
- if (check_mul_overflow(a, b, &bytes))
- return SIZE_MAX;
- if (check_add_overflow(bytes, c, &bytes))
+ if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
+ check_sub_overflow(minuend, subtrahend, &bytes))
return SIZE_MAX;
return bytes;
}
/**
- * struct_size() - Calculate size of structure with trailing array.
+ * array_size() - Calculate size of 2-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array_size(a, b) size_mul(a, b)
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array3_size(a, b, c) size_mul(size_mul(a, b), c)
+
+/**
+ * flex_array_size() - Calculate size of a flexible array member
+ * within an enclosing structure.
+ * @p: Pointer to the structure.
+ * @member: Name of the flexible array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of a flexible array of @count number of @member
+ * elements, at the end of structure @p.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define flex_array_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \
+ size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
+
+/**
+ * struct_size() - Calculate size of structure with trailing flexible array.
* @p: Pointer to the structure.
* @member: Name of the array member.
- * @n: Number of elements in the array.
+ * @count: Number of elements in the array.
*
* Calculates size of memory needed for structure @p followed by an
- * array of @n @member elements.
+ * array of @count number of @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
-#define struct_size(p, member, n) \
- __ab_c_size(n, \
- sizeof(*(p)->member) + __must_be_array((p)->member),\
- sizeof(*(p)))
+#define struct_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ sizeof(*(p)) + flex_array_size(p, member, count), \
+ size_add(sizeof(*(p)), flex_array_size(p, member, count)))
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 54667735cc67..8d6571feb95d 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _LINUX_PACKING_H
diff --git a/include/linux/padata.h b/include/linux/padata.h
index a0d8b41850b2..495b16b6b4d7 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -4,11 +4,15 @@
*
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * Copyright (c) 2020 Oracle and/or its affiliates.
+ * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
*/
#ifndef PADATA_H
#define PADATA_H
+#include <linux/refcount.h>
#include <linux/compiler_types.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
@@ -24,7 +28,6 @@
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
* @cb_cpu: Callback cpu for serializatioon.
- * @cpu: Cpu for parallelization.
* @seq_nr: Sequence number of the parallelized data object.
* @info: Used to pass information from the parallel to the serial function.
* @parallel: Parallel execution function.
@@ -34,7 +37,6 @@ struct padata_priv {
struct list_head list;
struct parallel_data *pd;
int cb_cpu;
- int cpu;
unsigned int seq_nr;
int info;
void (*parallel)(struct padata_priv *padata);
@@ -66,21 +68,6 @@ struct padata_serial_queue {
};
/**
- * struct padata_parallel_queue - The percpu padata parallel queue
- *
- * @parallel: List to wait for parallelization.
- * @reorder: List to wait for reordering after parallel processing.
- * @work: work struct for parallelization.
- * @num_obj: Number of objects that are processed by this cpu.
- */
-struct padata_parallel_queue {
- struct padata_list parallel;
- struct padata_list reorder;
- struct work_struct work;
- atomic_t num_obj;
-};
-
-/**
* struct padata_cpumask - The cpumasks for the parallel/serial workers
*
* @pcpu: cpumask for the parallel workers.
@@ -96,7 +83,7 @@ struct padata_cpumask {
* that depends on the cpumask in use.
*
* @ps: padata_shell object.
- * @pqueue: percpu padata queues used for parallelization.
+ * @reorder_list: percpu reorder lists
* @squeue: percpu padata queues used for serialuzation.
* @refcnt: Number of objects holding a reference on this parallel_data.
* @seq_nr: Sequence number of the parallelized data object.
@@ -108,10 +95,10 @@ struct padata_cpumask {
*/
struct parallel_data {
struct padata_shell *ps;
- struct padata_parallel_queue __percpu *pqueue;
+ struct padata_list __percpu *reorder_list;
struct padata_serial_queue __percpu *squeue;
- atomic_t refcnt;
- atomic_t seq_nr;
+ refcount_t refcnt;
+ unsigned int seq_nr;
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
@@ -137,25 +124,50 @@ struct padata_shell {
};
/**
+ * struct padata_mt_job - represents one multithreaded job
+ *
+ * @thread_fn: Called for each chunk of work that a padata thread does.
+ * @fn_arg: The thread function argument.
+ * @start: The start of the job (units are job-specific).
+ * @size: size of this node's work (units are job-specific).
+ * @align: Ranges passed to the thread function fall on this boundary, with the
+ * possible exceptions of the beginning and end of the job.
+ * @min_chunk: The minimum chunk size in job-specific units. This allows
+ * the client to communicate the minimum amount of work that's
+ * appropriate for one worker thread to do at once.
+ * @max_threads: Max threads to use for the job, actual number may be less
+ * depending on task size and minimum chunk size.
+ */
+struct padata_mt_job {
+ void (*thread_fn)(unsigned long start, unsigned long end, void *arg);
+ void *fn_arg;
+ unsigned long start;
+ unsigned long size;
+ unsigned long align;
+ unsigned long min_chunk;
+ int max_threads;
+};
+
+/**
* struct padata_instance - The overall control structure.
*
- * @node: Used by CPU hotplug.
+ * @cpu_online_node: Linkage for CPU online callback.
+ * @cpu_dead_node: Linkage for CPU offline callback.
* @parallel_wq: The workqueue used for parallel work.
* @serial_wq: The workqueue used for serial work.
* @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
- * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
*/
struct padata_instance {
- struct hlist_node node;
+ struct hlist_node cpu_online_node;
+ struct hlist_node cpu_dead_node;
struct workqueue_struct *parallel_wq;
struct workqueue_struct *serial_wq;
struct list_head pslist;
struct padata_cpumask cpumask;
- struct padata_cpumask rcpumask;
struct kobject kobj;
struct mutex lock;
u8 flags;
@@ -164,15 +176,20 @@ struct padata_instance {
#define PADATA_INVALID 4
};
-extern struct padata_instance *padata_alloc_possible(const char *name);
+#ifdef CONFIG_PADATA
+extern void __init padata_init(void);
+#else
+static inline void __init padata_init(void) {}
+#endif
+
+extern struct padata_instance *padata_alloc(const char *name);
extern void padata_free(struct padata_instance *pinst);
extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
extern void padata_free_shell(struct padata_shell *ps);
extern int padata_do_parallel(struct padata_shell *ps,
struct padata_priv *padata, int *cb_cpu);
extern void padata_do_serial(struct padata_priv *padata);
+extern void __init padata_do_multithreaded(struct padata_mt_job *job);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
-extern int padata_start(struct padata_instance *pinst);
-extern void padata_stop(struct padata_instance *pinst);
#endif
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 71283739ffd2..7d79818dc065 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -21,16 +21,17 @@
#elif MAX_NR_ZONES <= 8
#define ZONES_SHIFT 3
#else
-#error ZONES_SHIFT -- too many zones configured adjust calculation
+#error ZONES_SHIFT "Too many zones configured"
#endif
+#define ZONES_WIDTH ZONES_SHIFT
+
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
-
-/* SECTION_SHIFT #bits space required to store a section # */
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
-
-#endif /* CONFIG_SPARSEMEM */
+#else
+#define SECTIONS_SHIFT 0
+#endif
#ifndef BUILD_VDSO32_64
/*
@@ -54,17 +55,29 @@
#define SECTIONS_WIDTH 0
#endif
-#define ZONES_WIDTH ZONES_SHIFT
-
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \
+ <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
-#else
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
#error "Vmemmap: No space for nodes field in page flags"
-#endif
+#else
#define NODES_WIDTH 0
#endif
+/*
+ * Note that this #define MUST have a value so that it can be tested with
+ * the IS_ENABLED() macro.
+ */
+#if NODES_SHIFT != 0 && NODES_WIDTH == 0
+#define NODE_NOT_IN_PAGE_FLAGS 1
+#endif
+
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+#define KASAN_TAG_WIDTH 8
+#else
+#define KASAN_TAG_WIDTH 0
+#endif
+
#ifdef CONFIG_NUMA_BALANCING
#define LAST__PID_SHIFT 8
#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
@@ -77,35 +90,26 @@
#define LAST_CPUPID_SHIFT 0
#endif
-#ifdef CONFIG_KASAN_SW_TAGS
-#define KASAN_TAG_WIDTH 8
-#else
-#define KASAN_TAG_WIDTH 0
-#endif
-
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
- <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
#define LAST_CPUPID_WIDTH 0
#endif
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
- > BITS_PER_LONG - NR_PAGEFLAGS
-#error "Not enough bits in page flags"
+#if LAST_CPUPID_SHIFT != 0 && LAST_CPUPID_WIDTH == 0
+#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
-/*
- * We are going to use the flags for the page to node mapping if its in
- * there. This includes the case where there is no node, so it is implicit.
- */
-#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
-#define NODE_NOT_IN_PAGE_FLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#error "Not enough bits in page flags"
#endif
-#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
-#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
-#endif
+/* see the comment on MAX_NR_TIERS */
+#define LRU_REFS_WIDTH min(__LRU_REFS_WIDTH, BITS_PER_LONG - NR_PAGEFLAGS - \
+ ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \
+ NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH)
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 77de28bfefb0..0b0ae5084e60 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -63,8 +63,10 @@
* page_waitqueue(page) is a wait queue of all tasks waiting for the page
* to become unlocked.
*
- * PG_uptodate tells whether the page's contents is valid. When a read
- * completes, the page becomes uptodate, unless a disk I/O error happened.
+ * PG_swapbacked is set when a page uses swap as a backing storage. This are
+ * usually PageAnon or shmem pages but please note that even anonymous pages
+ * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
+ * a result of MADV_FREE).
*
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
@@ -81,8 +83,7 @@
*/
/*
- * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
- * locked- and dirty-page accounting.
+ * Don't use the pageflags directly. Use the PageFoo macros.
*
* The page flags field is split into two parts, the main flags area
* which extends from the low bits upwards, and the fields area which
@@ -127,12 +128,29 @@ enum pageflags {
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
PG_young,
PG_idle,
#endif
+#ifdef CONFIG_64BIT
+ PG_arch_2,
+#endif
+#ifdef CONFIG_KASAN_HW_TAGS
+ PG_skip_kasan_poison,
+#endif
__NR_PAGEFLAGS,
+ PG_readahead = PG_reclaim,
+
+ /*
+ * Depending on the way an anonymous folio can be mapped into a page
+ * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
+ * THP), PG_anon_exclusive may be set only for the head page or for
+ * tail pages of an anonymous folio. For now, we only expect it to be
+ * set on tail pages for PTE-mapped THP.
+ */
+ PG_anon_exclusive = PG_mappedtodisk,
+
/* Filesystems */
PG_checked = PG_owner_priv_1,
@@ -159,39 +177,131 @@ enum pageflags {
PG_slob_free = PG_private,
/* Compound pages. Stored in first tail page's flags */
- PG_double_map = PG_private_2,
+ PG_double_map = PG_workingset,
+
+#ifdef CONFIG_MEMORY_FAILURE
+ /*
+ * Compound pages. Stored in first tail page's flags.
+ * Indicates that at least one subpage is hwpoisoned in the
+ * THP.
+ */
+ PG_has_hwpoisoned = PG_error,
+#endif
/* non-lru isolated movable page */
PG_isolated = PG_reclaim,
+
+ /* Only valid for buddy pages. Used to track pages that are reported */
+ PG_reported = PG_uptodate,
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /* For self-hosted memmap pages */
+ PG_vmemmap_self_hosted = PG_owner_priv_1,
+#endif
};
+#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
+
#ifndef __GENERATING_BOUNDS_H
-struct page; /* forward declaration */
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
+
+/*
+ * Return the real head page struct iff the @page is a fake head page, otherwise
+ * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
+ */
+static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
+{
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
+ return page;
+
+ /*
+ * Only addresses aligned with PAGE_SIZE of struct page may be fake head
+ * struct page. The alignment check aims to avoid access the fields (
+ * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
+ * cold cacheline in some cases.
+ */
+ if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
+ test_bit(PG_head, &page->flags)) {
+ /*
+ * We can safely access the field of the @page[1] with PG_head
+ * because the @page is a compound page composed with at least
+ * two contiguous pages.
+ */
+ unsigned long head = READ_ONCE(page[1].compound_head);
+
+ if (likely(head & 1))
+ return (const struct page *)(head - 1);
+ }
+ return page;
+}
+#else
+static inline const struct page *page_fixed_fake_head(const struct page *page)
+{
+ return page;
+}
+#endif
+
+static __always_inline int page_is_fake_head(struct page *page)
+{
+ return page_fixed_fake_head(page) != page;
+}
-static inline struct page *compound_head(struct page *page)
+static inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head);
if (unlikely(head & 1))
- return (struct page *) (head - 1);
- return page;
+ return head - 1;
+ return (unsigned long)page_fixed_fake_head(page);
}
+#define compound_head(page) ((typeof(page))_compound_head(page))
+
+/**
+ * page_folio - Converts from page to folio.
+ * @p: The page.
+ *
+ * Every page is part of a folio. This function cannot be called on a
+ * NULL pointer.
+ *
+ * Context: No reference, nor lock is required on @page. If the caller
+ * does not hold a reference, this call may race with a folio split, so
+ * it should re-check the folio still contains this page after gaining
+ * a reference on the folio.
+ * Return: The folio which contains this page.
+ */
+#define page_folio(p) (_Generic((p), \
+ const struct page *: (const struct folio *)_compound_head(p), \
+ struct page *: (struct folio *)_compound_head(p)))
+
+/**
+ * folio_page - Return a page from a folio.
+ * @folio: The folio.
+ * @n: The page number to return.
+ *
+ * @n is relative to the start of the folio. This function does not
+ * check that the page number lies within @folio; the caller is presumed
+ * to have a reference to the page.
+ */
+#define folio_page(folio, n) nth_page(&(folio)->page, n)
+
static __always_inline int PageTail(struct page *page)
{
- return READ_ONCE(page->compound_head) & 1;
+ return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
}
static __always_inline int PageCompound(struct page *page)
{
- return test_bit(PG_head, &page->flags) || PageTail(page);
+ return test_bit(PG_head, &page->flags) ||
+ READ_ONCE(page->compound_head) & 1;
}
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
- return page->flags == PAGE_POISON_PATTERN;
+ return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
}
#ifdef CONFIG_DEBUG_VM
@@ -202,6 +312,15 @@ static inline void page_init_poison(struct page *page, size_t size)
}
#endif
+static unsigned long *folio_flags(struct folio *folio, unsigned n)
+{
+ struct page *page = &folio->page;
+
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
+ return &page[n].flags;
+}
+
/*
* Page flags policies wrt compound pages
*
@@ -224,6 +343,9 @@ static inline void page_init_poison(struct page *page, size_t size)
*
* PF_NO_COMPOUND:
* the page flag is not relevant for compound pages.
+ *
+ * PF_SECOND:
+ * the page flag is stored in the first tail page.
*/
#define PF_POISONED_CHECK(page) ({ \
VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
@@ -239,37 +361,68 @@ static inline void page_init_poison(struct page *page, size_t size)
#define PF_NO_COMPOUND(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
PF_POISONED_CHECK(page); })
+#define PF_SECOND(page, enforce) ({ \
+ VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
+ PF_POISONED_CHECK(&page[1]); })
+
+/* Which page is the flag stored in */
+#define FOLIO_PF_ANY 0
+#define FOLIO_PF_HEAD 0
+#define FOLIO_PF_ONLY_HEAD 0
+#define FOLIO_PF_NO_TAIL 0
+#define FOLIO_PF_NO_COMPOUND 0
+#define FOLIO_PF_SECOND 1
/*
* Macros to create function definitions for page flags
*/
#define TESTPAGEFLAG(uname, lname, policy) \
+static __always_inline bool folio_test_##lname(struct folio *folio) \
+{ return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline int Page##uname(struct page *page) \
- { return test_bit(PG_##lname, &policy(page, 0)->flags); }
+{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
#define SETPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void folio_set_##lname(struct folio *folio) \
+{ set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void SetPage##uname(struct page *page) \
- { set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ set_bit(PG_##lname, &policy(page, 1)->flags); }
#define CLEARPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void folio_clear_##lname(struct folio *folio) \
+{ clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void ClearPage##uname(struct page *page) \
- { clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define __SETPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void __folio_set_##lname(struct folio *folio) \
+{ __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void __SetPage##uname(struct page *page) \
- { __set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void __folio_clear_##lname(struct folio *folio) \
+{ __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void __ClearPage##uname(struct page *page) \
- { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTSETFLAG(uname, lname, policy) \
+static __always_inline \
+bool folio_test_set_##lname(struct folio *folio) \
+{ return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline int TestSetPage##uname(struct page *page) \
- { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTCLEARFLAG(uname, lname, policy) \
+static __always_inline \
+bool folio_test_clear_##lname(struct folio *folio) \
+{ return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline int TestClearPage##uname(struct page *page) \
- { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
@@ -285,32 +438,40 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
-#define TESTPAGEFLAG_FALSE(uname) \
+#define TESTPAGEFLAG_FALSE(uname, lname) \
+static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
static inline int Page##uname(const struct page *page) { return 0; }
-#define SETPAGEFLAG_NOOP(uname) \
+#define SETPAGEFLAG_NOOP(uname, lname) \
+static inline void folio_set_##lname(struct folio *folio) { } \
static inline void SetPage##uname(struct page *page) { }
-#define CLEARPAGEFLAG_NOOP(uname) \
+#define CLEARPAGEFLAG_NOOP(uname, lname) \
+static inline void folio_clear_##lname(struct folio *folio) { } \
static inline void ClearPage##uname(struct page *page) { }
-#define __CLEARPAGEFLAG_NOOP(uname) \
+#define __CLEARPAGEFLAG_NOOP(uname, lname) \
+static inline void __folio_clear_##lname(struct folio *folio) { } \
static inline void __ClearPage##uname(struct page *page) { }
-#define TESTSETFLAG_FALSE(uname) \
+#define TESTSETFLAG_FALSE(uname, lname) \
+static inline bool folio_test_set_##lname(struct folio *folio) \
+{ return 0; } \
static inline int TestSetPage##uname(struct page *page) { return 0; }
-#define TESTCLEARFLAG_FALSE(uname) \
+#define TESTCLEARFLAG_FALSE(uname, lname) \
+static inline bool folio_test_clear_##lname(struct folio *folio) \
+{ return 0; } \
static inline int TestClearPage##uname(struct page *page) { return 0; }
-#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
- SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
+#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
+ SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
-#define TESTSCFLAG_FALSE(uname) \
- TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
+#define TESTSCFLAG_FALSE(uname, lname) \
+ TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
-PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
+PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
PAGEFLAG(Referenced, referenced, PF_HEAD)
TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
@@ -318,6 +479,7 @@ PAGEFLAG(Referenced, referenced, PF_HEAD)
PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
+ TESTCLEARFLAG(LRU, lru, PF_HEAD)
PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
TESTCLEARFLAG(Active, active, PF_HEAD)
PAGEFLAG(Workingset, workingset, PF_HEAD)
@@ -344,10 +506,9 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
/*
* Private page markings that may be used by the filesystem that owns the page
* for its own purposes.
- * - PG_private and PG_private_2 cause releasepage() and co to be invoked
+ * - PG_private and PG_private_2 cause release_folio() and co to be invoked
*/
-PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
- __CLEARPAGEFLAG(Private, private, PF_ANY)
+PAGEFLAG(Private, private, PF_ANY)
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
@@ -363,8 +524,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
- TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
+PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
+ TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
#ifdef CONFIG_HIGHMEM
/*
@@ -373,22 +534,25 @@ PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
*/
#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
#else
-PAGEFLAG_FALSE(HighMem)
+PAGEFLAG_FALSE(HighMem, highmem)
#endif
#ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
+static __always_inline bool folio_test_swapcache(struct folio *folio)
{
-#ifdef CONFIG_THP_SWAP
- page = compound_head(page);
-#endif
- return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
+ return folio_test_swapbacked(folio) &&
+ test_bit(PG_swapcache, folio_flags(folio, 0));
+}
+static __always_inline bool PageSwapCache(struct page *page)
+{
+ return folio_test_swapcache(page_folio(page));
}
+
SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
#else
-PAGEFLAG_FALSE(SwapCache)
+PAGEFLAG_FALSE(SwapCache, swapcache)
#endif
PAGEFLAG(Unevictable, unevictable, PF_HEAD)
@@ -400,37 +564,57 @@ PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
#else
-PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
- TESTSCFLAG_FALSE(Mlocked)
+PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
+ TESTSCFLAG_FALSE(Mlocked, mlocked)
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
#else
-PAGEFLAG_FALSE(Uncached)
+PAGEFLAG_FALSE(Uncached, uncached)
#endif
#ifdef CONFIG_MEMORY_FAILURE
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
-extern bool set_hwpoison_free_buddy_page(struct page *page);
+#define MAGIC_HWPOISON 0x48575053U /* HWPS */
+extern void SetPageHWPoisonTakenOff(struct page *page);
+extern void ClearPageHWPoisonTakenOff(struct page *page);
+extern bool take_page_off_buddy(struct page *page);
+extern bool put_page_back_buddy(struct page *page);
#else
-PAGEFLAG_FALSE(HWPoison)
-static inline bool set_hwpoison_free_buddy_page(struct page *page)
-{
- return 0;
-}
+PAGEFLAG_FALSE(HWPoison, hwpoison)
#define __PG_HWPOISON 0
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
TESTPAGEFLAG(Young, young, PF_ANY)
SETPAGEFLAG(Young, young, PF_ANY)
TESTCLEARFLAG(Young, young, PF_ANY)
PAGEFLAG(Idle, idle, PF_ANY)
#endif
+#ifdef CONFIG_KASAN_HW_TAGS
+PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
+#else
+PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison)
+#endif
+
+/*
+ * PageReported() is used to track reported free pages within the Buddy
+ * allocator. We can use the non-atomic version of the test and set
+ * operations as both should be shielded with the zone lock to prevent
+ * any possible races on the setting or clearing of the bit.
+ */
+__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
+#else
+PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
+#endif
+
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
@@ -442,7 +626,7 @@ PAGEFLAG(Idle, idle, PF_ANY)
* structure which KSM associates with that merged page. See ksm.h.
*
* PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
- * page and then page->mapping points a struct address_space.
+ * page and then page->mapping points to a struct movable_operations.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
@@ -453,15 +637,36 @@ PAGEFLAG(Idle, idle, PF_ANY)
#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+/*
+ * Different with flags above, this flag is used only for fsdax mode. It
+ * indicates that this page->mapping is now under reflink case.
+ */
+#define PAGE_MAPPING_DAX_COW 0x1
+
+static __always_inline bool folio_mapping_flags(struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
+}
+
static __always_inline int PageMappingFlags(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
}
-static __always_inline int PageAnon(struct page *page)
+static __always_inline bool folio_test_anon(struct folio *folio)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+static __always_inline bool PageAnon(struct page *page)
+{
+ return folio_test_anon(page_folio(page));
+}
+
+static __always_inline bool __folio_test_movable(const struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
+ PAGE_MAPPING_MOVABLE;
}
static __always_inline int __PageMovable(struct page *page)
@@ -477,30 +682,42 @@ static __always_inline int __PageMovable(struct page *page)
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
-static __always_inline int PageKsm(struct page *page)
+static __always_inline bool folio_test_ksm(struct folio *folio)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
PAGE_MAPPING_KSM;
}
+
+static __always_inline bool PageKsm(struct page *page)
+{
+ return folio_test_ksm(page_folio(page));
+}
#else
-TESTPAGEFLAG_FALSE(Ksm)
+TESTPAGEFLAG_FALSE(Ksm, ksm)
#endif
u64 stable_page_flags(struct page *page);
-static inline int PageUptodate(struct page *page)
+/**
+ * folio_test_uptodate - Is this folio up to date?
+ * @folio: The folio.
+ *
+ * The uptodate flag is set on a folio when every byte in the folio is
+ * at least as new as the corresponding bytes on storage. Anonymous
+ * and CoW folios are always uptodate. If the folio is not uptodate,
+ * some of the bytes in it may be; see the is_partially_uptodate()
+ * address_space operation.
+ */
+static inline bool folio_test_uptodate(struct folio *folio)
{
- int ret;
- page = compound_head(page);
- ret = test_bit(PG_uptodate, &(page)->flags);
+ bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
/*
- * Must ensure that the data we read out of the page is loaded
- * _after_ we've loaded page->flags to check for PageUptodate.
- * We can skip the barrier if the page is not uptodate, because
+ * Must ensure that the data we read out of the folio is loaded
+ * _after_ we've loaded folio->flags to check the uptodate bit.
+ * We can skip the barrier if the folio is not uptodate, because
* we wouldn't be reading anything from it.
*
- * See SetPageUptodate() for the other side of the story.
+ * See folio_mark_uptodate() for the other side of the story.
*/
if (ret)
smp_rmb();
@@ -508,46 +725,83 @@ static inline int PageUptodate(struct page *page)
return ret;
}
-static __always_inline void __SetPageUptodate(struct page *page)
+static inline int PageUptodate(struct page *page)
+{
+ return folio_test_uptodate(page_folio(page));
+}
+
+static __always_inline void __folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
smp_wmb();
- __set_bit(PG_uptodate, &page->flags);
+ __set_bit(PG_uptodate, folio_flags(folio, 0));
}
-static __always_inline void SetPageUptodate(struct page *page)
+static __always_inline void folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
/*
* Memory barrier must be issued before setting the PG_uptodate bit,
- * so that all previous stores issued in order to bring the page
- * uptodate are actually visible before PageUptodate becomes true.
+ * so that all previous stores issued in order to bring the folio
+ * uptodate are actually visible before folio_test_uptodate becomes true.
*/
smp_wmb();
- set_bit(PG_uptodate, &page->flags);
+ set_bit(PG_uptodate, folio_flags(folio, 0));
+}
+
+static __always_inline void __SetPageUptodate(struct page *page)
+{
+ __folio_mark_uptodate((struct folio *)page);
+}
+
+static __always_inline void SetPageUptodate(struct page *page)
+{
+ folio_mark_uptodate((struct folio *)page);
}
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
-int test_clear_page_writeback(struct page *page);
-int __test_set_page_writeback(struct page *page, bool keep_write);
+bool __folio_start_writeback(struct folio *folio, bool keep_write);
+bool set_page_writeback(struct page *page);
-#define test_set_page_writeback(page) \
- __test_set_page_writeback(page, false)
-#define test_set_page_writeback_keepwrite(page) \
- __test_set_page_writeback(page, true)
+#define folio_start_writeback(folio) \
+ __folio_start_writeback(folio, false)
+#define folio_start_writeback_keepwrite(folio) \
+ __folio_start_writeback(folio, true)
-static inline void set_page_writeback(struct page *page)
+static inline void set_page_writeback_keepwrite(struct page *page)
{
- test_set_page_writeback(page);
+ folio_start_writeback_keepwrite(page_folio(page));
}
-static inline void set_page_writeback_keepwrite(struct page *page)
+static inline bool test_set_page_writeback(struct page *page)
+{
+ return set_page_writeback(page);
+}
+
+static __always_inline bool folio_test_head(struct folio *folio)
{
- test_set_page_writeback_keepwrite(page);
+ return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
}
-__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
+static __always_inline int PageHead(struct page *page)
+{
+ PF_POISONED_CHECK(page);
+ return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
+}
+
+__SETPAGEFLAG(Head, head, PF_ANY)
+__CLEARPAGEFLAG(Head, head, PF_ANY)
+CLEARPAGEFLAG(Head, head, PF_ANY)
+
+/**
+ * folio_test_large() - Does this folio contain more than one page?
+ * @folio: The folio to test.
+ *
+ * Return: True if the folio is larger than one page.
+ */
+static inline bool folio_test_large(struct folio *folio)
+{
+ return folio_test_head(folio);
+}
static __always_inline void set_compound_head(struct page *page, struct page *head)
{
@@ -572,18 +826,15 @@ static inline void ClearPageCompound(struct page *page)
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
int PageHeadHuge(struct page *page);
-bool page_huge_active(struct page *page);
-#else
-TESTPAGEFLAG_FALSE(Huge)
-TESTPAGEFLAG_FALSE(HeadHuge)
-
-static inline bool page_huge_active(struct page *page)
+static inline bool folio_test_hugetlb(struct folio *folio)
{
- return 0;
+ return PageHeadHuge(&folio->page);
}
+#else
+TESTPAGEFLAG_FALSE(Huge, hugetlb)
+TESTPAGEFLAG_FALSE(HeadHuge, headhuge)
#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -599,6 +850,11 @@ static inline int PageTransHuge(struct page *page)
return PageHead(page);
}
+static inline bool folio_test_transhuge(struct folio *folio)
+{
+ return folio_test_head(folio);
+}
+
/*
* PageTransCompound returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
@@ -610,43 +866,6 @@ static inline int PageTransCompound(struct page *page)
}
/*
- * PageTransCompoundMap is the same as PageTransCompound, but it also
- * guarantees the primary MMU has the entire compound page mapped
- * through pmd_trans_huge, which in turn guarantees the secondary MMUs
- * can also map the entire compound page. This allows the secondary
- * MMUs to call get_user_pages() only once for each compound page and
- * to immediately map the entire compound page with a single secondary
- * MMU fault. If there will be a pmd split later, the secondary MMUs
- * will get an update through the MMU notifier invalidation through
- * split_huge_pmd().
- *
- * Unlike PageTransCompound, this is safe to be called only while
- * split_huge_pmd() cannot run from under us, like if protected by the
- * MMU notifier, otherwise it may result in page->_mapcount check false
- * positives.
- *
- * We have to treat page cache THP differently since every subpage of it
- * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
- * mapped in the current process so comparing subpage's _mapcount to
- * compound_mapcount to filter out PTE mapped case.
- */
-static inline int PageTransCompoundMap(struct page *page)
-{
- struct page *head;
-
- if (!PageTransCompound(page))
- return 0;
-
- if (PageAnon(page))
- return atomic_read(&page->_mapcount) < 0;
-
- head = compound_head(page);
- /* File THP is PMD mapped and not PTE mapped */
- return atomic_read(&page->_mapcount) ==
- atomic_read(compound_mapcount_ptr(head));
-}
-
-/*
* PageTransTail returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
@@ -669,44 +888,43 @@ static inline int PageTransTail(struct page *page)
*
* See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
*/
-static inline int PageDoubleMap(struct page *page)
-{
- return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
-}
-
-static inline void SetPageDoubleMap(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- set_bit(PG_double_map, &page[1].flags);
-}
+PAGEFLAG(DoubleMap, double_map, PF_SECOND)
+ TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
+#else
+TESTPAGEFLAG_FALSE(TransHuge, transhuge)
+TESTPAGEFLAG_FALSE(TransCompound, transcompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
+TESTPAGEFLAG_FALSE(TransTail, transtail)
+PAGEFLAG_FALSE(DoubleMap, double_map)
+ TESTSCFLAG_FALSE(DoubleMap, double_map)
+#endif
-static inline void ClearPageDoubleMap(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- clear_bit(PG_double_map, &page[1].flags);
-}
-static inline int TestSetPageDoubleMap(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- return test_and_set_bit(PG_double_map, &page[1].flags);
-}
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+/*
+ * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
+ * compound page.
+ *
+ * This flag is set by hwpoison handler. Cleared by THP split or free page.
+ */
+PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+ TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+#else
+PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+ TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+#endif
-static inline int TestClearPageDoubleMap(struct page *page)
+/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- return test_and_clear_bit(PG_double_map, &page[1].flags);
+ if (PageHWPoison(page))
+ return true;
+ return PageHuge(page) && PageHWPoison(compound_head(page));
}
-#else
-TESTPAGEFLAG_FALSE(TransHuge)
-TESTPAGEFLAG_FALSE(TransCompound)
-TESTPAGEFLAG_FALSE(TransCompoundMap)
-TESTPAGEFLAG_FALSE(TransTail)
-PAGEFLAG_FALSE(DoubleMap)
- TESTSETFLAG_FALSE(DoubleMap)
- TESTCLEARFLAG_FALSE(DoubleMap)
-#endif
-
/*
* For pages that are never mapped to userspace (and aren't PageSlab),
* page_type may be used. Because it is initialised to -1, we invert the
@@ -721,9 +939,8 @@ PAGEFLAG_FALSE(DoubleMap)
#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
#define PG_offline 0x00000100
-#define PG_kmemcg 0x00000200
-#define PG_table 0x00000400
-#define PG_guard 0x00000800
+#define PG_table 0x00000200
+#define PG_guard 0x00000400
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -761,14 +978,28 @@ PAGE_TYPE_OPS(Buddy, buddy)
* not onlined when onlining the section).
* The content of these pages is effectively stale. Such pages should not
* be touched (read/write/dump/save) except by their owner.
+ *
+ * If a driver wants to allow to offline unmovable PageOffline() pages without
+ * putting them back to the buddy, it can do so via the memory notifier by
+ * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
+ * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
+ * pages (now with a reference count of zero) are treated like free pages,
+ * allowing the containing memory block to get offlined. A driver that
+ * relies on this feature is aware that re-onlining the memory block will
+ * require to re-set the pages PageOffline() and not giving them to the
+ * buddy via online_page_callback_t.
+ *
+ * There are drivers that mark a page PageOffline() and expect there won't be
+ * any further access to page content. PFN walkers that read content of random
+ * pages should check PageOffline() and synchronize with such drivers using
+ * page_offline_freeze()/page_offline_thaw().
*/
PAGE_TYPE_OPS(Offline, offline)
-/*
- * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
- * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
- */
-PAGE_TYPE_OPS(Kmemcg, kmemcg)
+extern void page_offline_freeze(void);
+extern void page_offline_thaw(void);
+extern void page_offline_begin(void);
+extern void page_offline_end(void);
/*
* Marks pages in use as page tables.
@@ -782,34 +1013,34 @@ PAGE_TYPE_OPS(Guard, guard)
extern bool is_free_buddy_page(struct page *page);
-__PAGEFLAG(Isolated, isolated, PF_ANY);
+PAGEFLAG(Isolated, isolated, PF_ANY);
-/*
- * If network-based swap is enabled, sl*b must keep track of whether pages
- * were allocated from pfmemalloc reserves.
- */
-static inline int PageSlabPfmemalloc(struct page *page)
+static __always_inline int PageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- return PageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
-static inline void SetPageSlabPfmemalloc(struct page *page)
+static __always_inline void SetPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- SetPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
-static inline void __ClearPageSlabPfmemalloc(struct page *page)
+static __always_inline void ClearPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- __ClearPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
-static inline void ClearPageSlabPfmemalloc(struct page *page)
+static __always_inline void __ClearPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- ClearPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
#ifdef CONFIG_MMU
@@ -820,25 +1051,25 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
/*
* Flags checked when a page is freed. Pages being freed should not have
- * these flags set. It they are, there is a problem.
+ * these flags set. If they are, there is a problem.
*/
#define PAGE_FLAGS_CHECK_AT_FREE \
(1UL << PG_lru | 1UL << PG_locked | \
1UL << PG_private | 1UL << PG_private_2 | \
1UL << PG_writeback | 1UL << PG_reserved | \
1UL << PG_slab | 1UL << PG_active | \
- 1UL << PG_unevictable | __PG_MLOCKED)
+ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
/*
* Flags checked when a page is prepped for return by the page allocator.
- * Pages being prepped should not have these flags set. It they are set,
+ * Pages being prepped should not have these flags set. If they are set,
* there has been a kernel bug or struct page corruption.
*
* __PG_HWPOISON is exceptional because it needs to be kept beyond page's
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
- (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
+ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
@@ -854,11 +1085,17 @@ static inline int page_has_private(struct page *page)
return !!(page->flags & PAGE_FLAGS_PRIVATE);
}
+static inline bool folio_has_private(struct folio *folio)
+{
+ return page_has_private(&folio->page);
+}
+
#undef PF_ANY
#undef PF_HEAD
#undef PF_ONLY_HEAD
#undef PF_NO_TAIL
#undef PF_NO_COMPOUND
+#undef PF_SECOND
#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 572458016331..5456b7be38ae 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,8 +33,6 @@ static inline bool is_migrate_isolate(int migratetype)
#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
-struct page *has_unmovable_pages(struct zone *zone, struct page *page,
- int migratetype, int flags);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
@@ -44,7 +42,7 @@ int move_freepages_block(struct zone *zone, struct page *page,
*/
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype, int flags);
+ int migratetype, int flags, gfp_t gfp_flags);
/*
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
@@ -52,7 +50,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
*/
void
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype);
+ int migratetype);
/*
* Test all pages in [start_pfn, end_pfn) are isolated or not.
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index bab7e57f659b..c141ea9a95ef 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -3,15 +3,17 @@
#define _LINUX_PAGE_COUNTER_H
#include <linux/atomic.h>
+#include <linux/cache.h>
#include <linux/kernel.h>
#include <asm/page.h>
struct page_counter {
+ /*
+ * Make sure 'usage' does not share cacheline with any other field. The
+ * memcg->memory.usage is a hot member of struct mem_cgroup.
+ */
atomic_long_t usage;
- unsigned long min;
- unsigned long low;
- unsigned long max;
- struct page_counter *parent;
+ CACHELINE_PADDING(_pad1_);
/* effective memory.min and memory.min usage tracking */
unsigned long emin;
@@ -23,10 +25,18 @@ struct page_counter {
atomic_long_t low_usage;
atomic_long_t children_low_usage;
- /* legacy */
unsigned long watermark;
unsigned long failcnt;
-};
+
+ /* Keep all the read most fields in a separete cacheline. */
+ CACHELINE_PADDING(_pad2_);
+
+ unsigned long min;
+ unsigned long low;
+ unsigned long high;
+ unsigned long max;
+ struct page_counter *parent;
+} ____cacheline_internodealigned_in_smp;
#if BITS_PER_LONG == 32
#define PAGE_COUNTER_MAX LONG_MAX
@@ -55,6 +65,13 @@ bool page_counter_try_charge(struct page_counter *counter,
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
+
+static inline void page_counter_set_high(struct page_counter *counter,
+ unsigned long nr_pages)
+{
+ WRITE_ONCE(counter->high, nr_pages);
+}
+
int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages);
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index cfce186f0c4e..22be4582faae 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -19,7 +19,7 @@ struct page_ext_operations {
enum page_ext_flags {
PAGE_EXT_OWNER,
PAGE_EXT_OWNER_ALLOCATED,
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,
#endif
@@ -36,22 +36,33 @@ struct page_ext {
unsigned long flags;
};
+extern bool early_page_ext;
extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+static inline bool early_page_ext_enabled(void)
+{
+ return early_page_ext;
+}
+
#ifdef CONFIG_SPARSEMEM
static inline void page_ext_init_flatmem(void)
{
}
extern void page_ext_init(void);
+static inline void page_ext_init_flatmem_late(void)
+{
+}
#else
extern void page_ext_init_flatmem(void);
+extern void page_ext_init_flatmem_late(void);
static inline void page_ext_init(void)
{
}
#endif
-struct page_ext *lookup_page_ext(const struct page *page);
+extern struct page_ext *page_ext_get(struct page *page);
+extern void page_ext_put(struct page_ext *page_ext);
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
@@ -63,21 +74,34 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
-static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
+static inline bool early_page_ext_enabled(void)
{
+ return false;
}
-static inline struct page_ext *lookup_page_ext(const struct page *page)
+static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
- return NULL;
}
static inline void page_ext_init(void)
{
}
+static inline void page_ext_init_flatmem_late(void)
+{
+}
+
static inline void page_ext_init_flatmem(void)
{
}
+
+static inline struct page_ext *page_ext_get(struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_ext_put(struct page_ext *page_ext)
+{
+}
#endif /* CONFIG_PAGE_EXTENSION */
#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index 1e894d34bdce..5cb7bd2078ec 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -6,135 +6,147 @@
#include <linux/page-flags.h>
#include <linux/page_ext.h>
-#ifdef CONFIG_IDLE_PAGE_TRACKING
+#ifdef CONFIG_PAGE_IDLE_FLAG
-#ifdef CONFIG_64BIT
-static inline bool page_is_young(struct page *page)
-{
- return PageYoung(page);
-}
-
-static inline void set_page_young(struct page *page)
-{
- SetPageYoung(page);
-}
-
-static inline bool test_and_clear_page_young(struct page *page)
-{
- return TestClearPageYoung(page);
-}
-
-static inline bool page_is_idle(struct page *page)
-{
- return PageIdle(page);
-}
-
-static inline void set_page_idle(struct page *page)
-{
- SetPageIdle(page);
-}
-
-static inline void clear_page_idle(struct page *page)
-{
- ClearPageIdle(page);
-}
-#else /* !CONFIG_64BIT */
+#ifndef CONFIG_64BIT
/*
* If there is not enough space to store Idle and Young bits in page flags, use
* page ext flags instead.
*/
-extern struct page_ext_operations page_idle_ops;
-
-static inline bool page_is_young(struct page *page)
+static inline bool folio_test_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
-static inline void set_page_young(struct page *page)
+static inline void folio_set_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
}
-static inline bool test_and_clear_page_young(struct page *page)
+static inline bool folio_test_clear_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
-static inline bool page_is_idle(struct page *page)
+static inline bool folio_test_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_idle;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_idle;
}
-static inline void set_page_idle(struct page *page)
+static inline void folio_set_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
-static inline void clear_page_idle(struct page *page)
+static inline void folio_clear_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
-#endif /* CONFIG_64BIT */
+#endif /* !CONFIG_64BIT */
-#else /* !CONFIG_IDLE_PAGE_TRACKING */
+#else /* !CONFIG_PAGE_IDLE_FLAG */
-static inline bool page_is_young(struct page *page)
+static inline bool folio_test_young(struct folio *folio)
+{
+ return false;
+}
+
+static inline void folio_set_young(struct folio *folio)
+{
+}
+
+static inline bool folio_test_clear_young(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool folio_test_idle(struct folio *folio)
{
return false;
}
+static inline void folio_set_idle(struct folio *folio)
+{
+}
+
+static inline void folio_clear_idle(struct folio *folio)
+{
+}
+
+#endif /* CONFIG_PAGE_IDLE_FLAG */
+
+static inline bool page_is_young(struct page *page)
+{
+ return folio_test_young(page_folio(page));
+}
+
static inline void set_page_young(struct page *page)
{
+ folio_set_young(page_folio(page));
}
static inline bool test_and_clear_page_young(struct page *page)
{
- return false;
+ return folio_test_clear_young(page_folio(page));
}
static inline bool page_is_idle(struct page *page)
{
- return false;
+ return folio_test_idle(page_folio(page));
}
static inline void set_page_idle(struct page *page)
{
+ folio_set_idle(page_folio(page));
}
static inline void clear_page_idle(struct page *page)
{
+ folio_clear_idle(page_folio(page));
}
-
-#endif /* CONFIG_IDLE_PAGE_TRACKING */
-
#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 8679ccd722e8..119a0c9d2a8b 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,51 +8,51 @@
extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
-extern void __reset_page_owner(struct page *page, unsigned int order);
+extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, unsigned int order);
-extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+ unsigned short order, gfp_t gfp_mask);
+extern void __split_page_owner(struct page *page, unsigned int nr);
+extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
-extern void __dump_page_owner(struct page *page);
+extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
if (static_branch_unlikely(&page_owner_inited))
__reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner(page, order, gfp_mask);
}
-static inline void split_page_owner(struct page *page, unsigned int order)
+static inline void split_page_owner(struct page *page, unsigned int nr)
{
if (static_branch_unlikely(&page_owner_inited))
- __split_page_owner(page, order);
+ __split_page_owner(page, nr);
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
{
if (static_branch_unlikely(&page_owner_inited))
- __copy_page_owner(oldpage, newpage);
+ __folio_copy_owner(newfolio, old);
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner_migrate_reason(page, reason);
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
if (static_branch_unlikely(&page_owner_inited))
__dump_page_owner(page);
}
#else
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
}
static inline void set_page_owner(struct page *page,
@@ -60,16 +60,16 @@ static inline void set_page_owner(struct page *page,
{
}
static inline void split_page_owner(struct page *page,
- unsigned int order)
+ unsigned short order)
{
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
}
#endif /* CONFIG_PAGE_OWNER */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 14d14beb1f7f..2e677e6ad09f 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -7,13 +7,13 @@
#include <linux/page-flags.h>
#include <linux/tracepoint-defs.h>
-extern struct tracepoint __tracepoint_page_ref_set;
-extern struct tracepoint __tracepoint_page_ref_mod;
-extern struct tracepoint __tracepoint_page_ref_mod_and_test;
-extern struct tracepoint __tracepoint_page_ref_mod_and_return;
-extern struct tracepoint __tracepoint_page_ref_mod_unless;
-extern struct tracepoint __tracepoint_page_ref_freeze;
-extern struct tracepoint __tracepoint_page_ref_unfreeze;
+DECLARE_TRACEPOINT(page_ref_set);
+DECLARE_TRACEPOINT(page_ref_mod);
+DECLARE_TRACEPOINT(page_ref_mod_and_test);
+DECLARE_TRACEPOINT(page_ref_mod_and_return);
+DECLARE_TRACEPOINT(page_ref_mod_unless);
+DECLARE_TRACEPOINT(page_ref_freeze);
+DECLARE_TRACEPOINT(page_ref_unfreeze);
#ifdef CONFIG_DEBUG_PAGE_REF
@@ -24,7 +24,7 @@ extern struct tracepoint __tracepoint_page_ref_unfreeze;
*
* See trace_##name##_enabled(void) in include/linux/tracepoint.h
*/
-#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
+#define page_ref_tracepoint_active(t) tracepoint_enabled(t)
extern void __page_ref_set(struct page *page, int v);
extern void __page_ref_mod(struct page *page, int v);
@@ -62,23 +62,50 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
#endif
-static inline int page_ref_count(struct page *page)
+static inline int page_ref_count(const struct page *page)
{
return atomic_read(&page->_refcount);
}
-static inline int page_count(struct page *page)
+/**
+ * folio_ref_count - The reference count on this folio.
+ * @folio: The folio.
+ *
+ * The refcount is usually incremented by calls to folio_get() and
+ * decremented by calls to folio_put(). Some typical users of the
+ * folio refcount:
+ *
+ * - Each reference from a page table
+ * - The page cache
+ * - Filesystem private data
+ * - The LRU list
+ * - Pipes
+ * - Direct IO which references this page in the process address space
+ *
+ * Return: The number of references to this folio.
+ */
+static inline int folio_ref_count(const struct folio *folio)
{
- return atomic_read(&compound_head(page)->_refcount);
+ return page_ref_count(&folio->page);
+}
+
+static inline int page_count(const struct page *page)
+{
+ return folio_ref_count(page_folio(page));
}
static inline void set_page_count(struct page *page, int v)
{
atomic_set(&page->_refcount, v);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
+ if (page_ref_tracepoint_active(page_ref_set))
__page_ref_set(page, v);
}
+static inline void folio_set_count(struct folio *folio, int v)
+{
+ set_page_count(&folio->page, v);
+}
+
/*
* Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug)
@@ -91,93 +118,227 @@ static inline void init_page_count(struct page *page)
static inline void page_ref_add(struct page *page, int nr)
{
atomic_add(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, nr);
}
+static inline void folio_ref_add(struct folio *folio, int nr)
+{
+ page_ref_add(&folio->page, nr);
+}
+
static inline void page_ref_sub(struct page *page, int nr)
{
atomic_sub(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -nr);
}
+static inline void folio_ref_sub(struct folio *folio, int nr)
+{
+ page_ref_sub(&folio->page, nr);
+}
+
+static inline int page_ref_sub_return(struct page *page, int nr)
+{
+ int ret = atomic_sub_return(nr, &page->_refcount);
+
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, -nr, ret);
+ return ret;
+}
+
+static inline int folio_ref_sub_return(struct folio *folio, int nr)
+{
+ return page_ref_sub_return(&folio->page, nr);
+}
+
static inline void page_ref_inc(struct page *page)
{
atomic_inc(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, 1);
}
+static inline void folio_ref_inc(struct folio *folio)
+{
+ page_ref_inc(&folio->page);
+}
+
static inline void page_ref_dec(struct page *page)
{
atomic_dec(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -1);
}
+static inline void folio_ref_dec(struct folio *folio)
+{
+ page_ref_dec(&folio->page);
+}
+
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -nr, ret);
return ret;
}
+static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
+{
+ return page_ref_sub_and_test(&folio->page, nr);
+}
+
static inline int page_ref_inc_return(struct page *page)
{
int ret = atomic_inc_return(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(page, 1, ret);
return ret;
}
+static inline int folio_ref_inc_return(struct folio *folio)
+{
+ return page_ref_inc_return(&folio->page);
+}
+
static inline int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -1, ret);
return ret;
}
+static inline int folio_ref_dec_and_test(struct folio *folio)
+{
+ return page_ref_dec_and_test(&folio->page);
+}
+
static inline int page_ref_dec_return(struct page *page)
{
int ret = atomic_dec_return(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(page, -1, ret);
return ret;
}
-static inline int page_ref_add_unless(struct page *page, int nr, int u)
+static inline int folio_ref_dec_return(struct folio *folio)
{
- int ret = atomic_add_unless(&page->_refcount, nr, u);
+ return page_ref_dec_return(&folio->page);
+}
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
+static inline bool page_ref_add_unless(struct page *page, int nr, int u)
+{
+ bool ret = atomic_add_unless(&page->_refcount, nr, u);
+
+ if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
return ret;
}
+static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
+{
+ return page_ref_add_unless(&folio->page, nr, u);
+}
+
+/**
+ * folio_try_get - Attempt to increase the refcount on a folio.
+ * @folio: The folio.
+ *
+ * If you do not already have a reference to a folio, you can attempt to
+ * get one using this function. It may fail if, for example, the folio
+ * has been freed since you found a pointer to it, or it is frozen for
+ * the purposes of splitting or migration.
+ *
+ * Return: True if the reference count was successfully incremented.
+ */
+static inline bool folio_try_get(struct folio *folio)
+{
+ return folio_ref_add_unless(folio, 1, 0);
+}
+
+static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
+{
+#ifdef CONFIG_TINY_RCU
+ /*
+ * The caller guarantees the folio will not be freed from interrupt
+ * context, so (on !SMP) we only need preemption to be disabled
+ * and TINY_RCU does that for us.
+ */
+# ifdef CONFIG_PREEMPT_COUNT
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
+# endif
+ VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
+ folio_ref_add(folio, count);
+#else
+ if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
+ /* Either the folio has been freed, or will be freed. */
+ return false;
+ }
+#endif
+ return true;
+}
+
+/**
+ * folio_try_get_rcu - Attempt to increase the refcount on a folio.
+ * @folio: The folio.
+ *
+ * This is a version of folio_try_get() optimised for non-SMP kernels.
+ * If you are still holding the rcu_read_lock() after looking up the
+ * page and know that the page cannot have its refcount decreased to
+ * zero in interrupt context, you can use this instead of folio_try_get().
+ *
+ * Example users include get_user_pages_fast() (as pages are not unmapped
+ * from interrupt context) and the page cache lookups (as pages are not
+ * truncated from interrupt context). We also know that pages are not
+ * frozen in interrupt context for the purposes of splitting or migration.
+ *
+ * You can also use this function if you're holding a lock that prevents
+ * pages being frozen & removed; eg the i_pages lock for the page cache
+ * or the mmap_sem or page table lock for page tables. In this case,
+ * it will always succeed, and you could have used a plain folio_get(),
+ * but it's sometimes more convenient to have a common function called
+ * from both locked and RCU-protected contexts.
+ *
+ * Return: True if the reference count was successfully incremented.
+ */
+static inline bool folio_try_get_rcu(struct folio *folio)
+{
+ return folio_ref_try_add_rcu(folio, 1);
+}
+
static inline int page_ref_freeze(struct page *page, int count)
{
int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
+ if (page_ref_tracepoint_active(page_ref_freeze))
__page_ref_freeze(page, count, ret);
return ret;
}
+static inline int folio_ref_freeze(struct folio *folio, int count)
+{
+ return page_ref_freeze(&folio->page, count);
+}
+
static inline void page_ref_unfreeze(struct page *page, int count)
{
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
atomic_set_release(&page->_refcount, count);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
+ if (page_ref_tracepoint_active(page_ref_unfreeze))
__page_ref_unfreeze(page, count);
}
+static inline void folio_ref_unfreeze(struct folio *folio, int count)
+{
+ page_ref_unfreeze(&folio->page, count);
+}
#endif
diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h
new file mode 100644
index 000000000000..fe648dfa3a7c
--- /dev/null
+++ b/include/linux/page_reporting.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PAGE_REPORTING_H
+#define _LINUX_PAGE_REPORTING_H
+
+#include <linux/mmzone.h>
+#include <linux/scatterlist.h>
+
+/* This value should always be a power of 2, see page_reporting_cycle() */
+#define PAGE_REPORTING_CAPACITY 32
+
+struct page_reporting_dev_info {
+ /* function that alters pages to make them "reported" */
+ int (*report)(struct page_reporting_dev_info *prdev,
+ struct scatterlist *sg, unsigned int nents);
+
+ /* work struct for processing reports */
+ struct delayed_work work;
+
+ /* Current state of page reporting */
+ atomic_t state;
+
+ /* Minimal order of page reporting */
+ unsigned int order;
+};
+
+/* Tear-down and bring-up for page reporting devices */
+void page_reporting_unregister(struct page_reporting_dev_info *prdev);
+int page_reporting_register(struct page_reporting_dev_info *prdev);
+#endif /*_LINUX_PAGE_REPORTING_H */
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
new file mode 100644
index 000000000000..01e16c7696ec
--- /dev/null
+++ b/include/linux/page_table_check.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2021, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef __LINUX_PAGE_TABLE_CHECK_H
+#define __LINUX_PAGE_TABLE_CHECK_H
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+#include <linux/jump_label.h>
+
+extern struct static_key_true page_table_check_disabled;
+extern struct page_ext_operations page_table_check_ops;
+
+void __page_table_check_zero(struct page *page, unsigned int order);
+void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t pte);
+void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t pmd);
+void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
+ pud_t pud);
+void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
+void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud);
+void __page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd);
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear(mm, addr, pte);
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmd_clear(mm, addr, pmd);
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pud_clear(mm, addr, pud);
+}
+
+static inline void page_table_check_pte_set(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_set(mm, addr, ptep, pte);
+}
+
+static inline void page_table_check_pmd_set(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmd_set(mm, addr, pmdp, pmd);
+}
+
+static inline void page_table_check_pud_set(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp,
+ pud_t pud)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pud_set(mm, addr, pudp, pud);
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear_range(mm, addr, pmd);
+}
+
+#else
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
+{
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
+{
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
+{
+}
+
+static inline void page_table_check_pte_set(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte)
+{
+}
+
+static inline void page_table_check_pmd_set(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ pmd_t pmd)
+{
+}
+
+static inline void page_table_check_pud_set(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp,
+ pud_t pud)
+{
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+}
+
+#endif /* CONFIG_PAGE_TABLE_CHECK */
+#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index c066fec5b74b..5f1ae07d724b 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -37,8 +37,11 @@ extern unsigned int pageblock_order;
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
-/* Huge pages are a constant size */
-#define pageblock_order HUGETLB_PAGE_ORDER
+/*
+ * Huge pages are a constant size, but don't exceed the maximum allocation
+ * granularity.
+ */
+#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER - 1)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
@@ -50,41 +53,35 @@ extern unsigned int pageblock_order;
#endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
+#define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages)
+#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
+#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(struct page *page,
+unsigned long get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
- unsigned long end_bitidx,
unsigned long mask);
void set_pfnblock_flags_mask(struct page *page,
unsigned long flags,
unsigned long pfn,
- unsigned long end_bitidx,
unsigned long mask);
/* Declarations for getting and setting flags. See mm/page_alloc.c */
-#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- end_bitidx, \
- (1 << (end_bitidx - start_bitidx + 1)) - 1)
-#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
- set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
- end_bitidx, \
- (1 << (end_bitidx - start_bitidx + 1)) - 1)
-
#ifdef CONFIG_COMPACTION
#define get_pageblock_skip(page) \
- get_pageblock_flags_group(page, PB_migrate_skip, \
- PB_migrate_skip)
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
+ (1 << (PB_migrate_skip)))
#define clear_pageblock_skip(page) \
- set_pageblock_flags_group(page, 0, PB_migrate_skip, \
- PB_migrate_skip)
+ set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
+ (1 << PB_migrate_skip))
#define set_pageblock_skip(page) \
- set_pageblock_flags_group(page, 1, PB_migrate_skip, \
- PB_migrate_skip)
+ set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
+ page_to_pfn(page), \
+ (1 << PB_migrate_skip))
#else
static inline bool get_pageblock_skip(struct page *page)
{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ccb14b6a16b5..bbccb4044222 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -16,7 +16,176 @@
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
-struct pagevec;
+struct folio_batch;
+
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+
+static inline void invalidate_remote_inode(struct inode *inode)
+{
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode))
+ invalidate_mapping_pages(inode->i_mapping, 0, -1);
+}
+int invalidate_inode_pages2(struct address_space *mapping);
+int invalidate_inode_pages2_range(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+int write_inode_now(struct inode *, int sync);
+int filemap_fdatawrite(struct address_space *);
+int filemap_flush(struct address_space *);
+int filemap_fdatawait_keep_errors(struct address_space *mapping);
+int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
+int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
+
+static inline int filemap_fdatawait(struct address_space *mapping)
+{
+ return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
+}
+
+bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
+int filemap_write_and_wait_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend);
+int __filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end, int sync_mode);
+int filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end);
+int filemap_check_errors(struct address_space *mapping);
+void __filemap_set_wb_err(struct address_space *mapping, int err);
+int filemap_fdatawrite_wbc(struct address_space *mapping,
+ struct writeback_control *wbc);
+
+static inline int filemap_write_and_wait(struct address_space *mapping)
+{
+ return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
+}
+
+/**
+ * filemap_set_wb_err - set a writeback error on an address_space
+ * @mapping: mapping in which to set writeback error
+ * @err: error to be set in mapping
+ *
+ * When writeback fails in some way, we must record that error so that
+ * userspace can be informed when fsync and the like are called. We endeavor
+ * to report errors on any file that was open at the time of the error. Some
+ * internal callers also need to know when writeback errors have occurred.
+ *
+ * When a writeback error occurs, most filesystems will want to call
+ * filemap_set_wb_err to record the error in the mapping so that it will be
+ * automatically reported whenever fsync is called on the file.
+ */
+static inline void filemap_set_wb_err(struct address_space *mapping, int err)
+{
+ /* Fastpath for common case of no error */
+ if (unlikely(err))
+ __filemap_set_wb_err(mapping, err);
+}
+
+/**
+ * filemap_check_wb_err - has an error occurred since the mark was sampled?
+ * @mapping: mapping to check for writeback errors
+ * @since: previously-sampled errseq_t
+ *
+ * Grab the errseq_t value from the mapping, and see if it has changed "since"
+ * the given value was sampled.
+ *
+ * If it has then report the latest error set, otherwise return 0.
+ */
+static inline int filemap_check_wb_err(struct address_space *mapping,
+ errseq_t since)
+{
+ return errseq_check(&mapping->wb_err, since);
+}
+
+/**
+ * filemap_sample_wb_err - sample the current errseq_t to test for later errors
+ * @mapping: mapping to be sampled
+ *
+ * Writeback errors are always reported relative to a particular sample point
+ * in the past. This function provides those sample points.
+ */
+static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
+{
+ return errseq_sample(&mapping->wb_err);
+}
+
+/**
+ * file_sample_sb_err - sample the current errseq_t to test for later errors
+ * @file: file pointer to be sampled
+ *
+ * Grab the most current superblock-level errseq_t value for the given
+ * struct file.
+ */
+static inline errseq_t file_sample_sb_err(struct file *file)
+{
+ return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
+}
+
+/*
+ * Flush file data before changing attributes. Caller must hold any locks
+ * required to prevent further writes to this file until we're done setting
+ * flags.
+ */
+static inline int inode_drain_writes(struct inode *inode)
+{
+ inode_dio_wait(inode);
+ return filemap_write_and_wait(inode->i_mapping);
+}
+
+static inline bool mapping_empty(struct address_space *mapping)
+{
+ return xa_empty(&mapping->i_pages);
+}
+
+/*
+ * mapping_shrinkable - test if page cache state allows inode reclaim
+ * @mapping: the page cache mapping
+ *
+ * This checks the mapping's cache state for the pupose of inode
+ * reclaim and LRU management.
+ *
+ * The caller is expected to hold the i_lock, but is not required to
+ * hold the i_pages lock, which usually protects cache state. That's
+ * because the i_lock and the list_lru lock that protect the inode and
+ * its LRU state don't nest inside the irq-safe i_pages lock.
+ *
+ * Cache deletions are performed under the i_lock, which ensures that
+ * when an inode goes empty, it will reliably get queued on the LRU.
+ *
+ * Cache additions do not acquire the i_lock and may race with this
+ * check, in which case we'll report the inode as shrinkable when it
+ * has cache pages. This is okay: the shrinker also checks the
+ * refcount and the referenced bit, which will be elevated or set in
+ * the process of adding new cache pages to an inode.
+ */
+static inline bool mapping_shrinkable(struct address_space *mapping)
+{
+ void *head;
+
+ /*
+ * On highmem systems, there could be lowmem pressure from the
+ * inodes before there is highmem pressure from the page
+ * cache. Make inodes shrinkable regardless of cache state.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ return true;
+
+ /* Cache completely empty? Shrink away. */
+ head = rcu_access_pointer(mapping->i_pages.xa_head);
+ if (!head)
+ return true;
+
+ /*
+ * The xarray stores single offset-0 entries directly in the
+ * head pointer, which allows non-resident page cache entries
+ * to escape the shadow shrinker's list of xarray nodes. The
+ * inode shrinker needs to pick them up under memory pressure.
+ */
+ if (!xa_is_node(head) && xa_is_value(head))
+ return true;
+
+ return false;
+}
/*
* Bits in mapping->flags.
@@ -29,12 +198,13 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
+ AS_LARGE_FOLIO_SUPPORT = 6,
};
/**
* mapping_set_error - record a writeback error in the address_space
- * @mapping - the mapping in which an error should be set
- * @error - the error to set in the mapping
+ * @mapping: the mapping in which an error should be set
+ * @error: the error to set in the mapping
*
* When writeback fails in some way, we must record that error so that
* userspace can be informed when fsync and the like are called. We endeavor
@@ -51,7 +221,11 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
return;
/* Record in wb_err for checkers using errseq_t based tracking */
- filemap_set_wb_err(mapping, error);
+ __filemap_set_wb_err(mapping, error);
+
+ /* Record it in superblock */
+ if (mapping->host)
+ errseq_set(&mapping->host->i_sb->s_wb_err, error);
/* Record it in flags for now, for legacy callers */
if (error == -ENOSPC)
@@ -70,11 +244,9 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
-static inline int mapping_unevictable(struct address_space *mapping)
+static inline bool mapping_unevictable(struct address_space *mapping)
{
- if (mapping)
- return test_bit(AS_UNEVICTABLE, &mapping->flags);
- return !!mapping;
+ return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
}
static inline void mapping_set_exiting(struct address_space *mapping)
@@ -118,104 +290,196 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
-void release_pages(struct page **pages, int nr);
+/**
+ * mapping_set_large_folios() - Indicate the file supports large folios.
+ * @mapping: The file.
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate that the VFS can use large folios to cache the contents of
+ * the file.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_large_folios(struct address_space *mapping)
+{
+ __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+}
/*
- * speculatively take a reference to a page.
- * If the page is free (_refcount == 0), then _refcount is untouched, and 0
- * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
- *
- * This function must be called inside the same rcu_read_lock() section as has
- * been used to lookup the page in the pagecache radix-tree (or page table):
- * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
- *
- * Unless an RCU grace period has passed, the count of all pages coming out
- * of the allocator must be considered unstable. page_count may return higher
- * than expected, and put_page must be able to do the right thing when the
- * page has been finished with, no matter what it is subsequently allocated
- * for (because put_page is what is used here to drop an invalid speculative
- * reference).
- *
- * This is the interesting part of the lockless pagecache (and lockless
- * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
- * has the following pattern:
- * 1. find page in radix tree
- * 2. conditionally increment refcount
- * 3. check the page is still in pagecache (if no, goto 1)
- *
- * Remove-side that cares about stability of _refcount (eg. reclaim) has the
- * following (with the i_pages lock held):
- * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
- * B. remove page from pagecache
- * C. free the page
- *
- * There are 2 critical interleavings that matter:
- * - 2 runs before A: in this case, A sees elevated refcount and bails out
- * - A runs before 2: in this case, 2 sees zero refcount and retries;
- * subsequently, B will complete and 1 will find no page, causing the
- * lookup to return NULL.
- *
- * It is possible that between 1 and 2, the page is removed then the exact same
- * page is inserted into the same position in pagecache. That's OK: the
- * old find_get_page using a lock could equally have run before or after
- * such a re-insertion, depending on order that locks are granted.
- *
- * Lookups racing against pagecache insertion isn't a big problem: either 1
- * will find the page or it will not. Likewise, the old find_get_page could run
- * either before the insertion or afterwards, depending on timing.
- */
-static inline int __page_cache_add_speculative(struct page *page, int count)
-{
-#ifdef CONFIG_TINY_RCU
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- /*
- * Preempt must be disabled here - we rely on rcu_read_lock doing
- * this for us.
- *
- * Pagecache won't be truncated from interrupt context, so if we have
- * found a page in the radix tree here, we have pinned its refcount by
- * disabling preempt, and hence no need for the "speculative get" that
- * SMP requires.
- */
- VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_add(page, count);
+ * Large folio support currently depends on THP. These dependencies are
+ * being worked on but are not yet fixed.
+ */
+static inline bool mapping_large_folio_support(struct address_space *mapping)
+{
+ return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+}
+static inline int filemap_nr_thps(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ return atomic_read(&mapping->nr_thps);
#else
- if (unlikely(!page_ref_add_unless(page, count, 0))) {
- /*
- * Either the page has been freed, or will be freed.
- * In either case, retry here and the caller should
- * do the right thing (see comments above).
- */
- return 0;
- }
+ return 0;
+#endif
+}
+
+static inline void filemap_nr_thps_inc(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ if (!mapping_large_folio_support(mapping))
+ atomic_inc(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
#endif
- VM_BUG_ON_PAGE(PageTail(page), page);
+}
- return 1;
+static inline void filemap_nr_thps_dec(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ if (!mapping_large_folio_support(mapping))
+ atomic_dec(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
+#endif
}
-static inline int page_cache_get_speculative(struct page *page)
+struct address_space *page_mapping(struct page *);
+struct address_space *folio_mapping(struct folio *);
+struct address_space *swapcache_mapping(struct folio *);
+
+/**
+ * folio_file_mapping - Find the mapping this folio belongs to.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the mapping that this
+ * page belongs to. Folios in the swap cache return the mapping of the
+ * swap file or swap device where the data is stored. This is different
+ * from the mapping returned by folio_mapping(). The only reason to
+ * use it is if, like NFS, you return 0 from ->activate_swapfile.
+ *
+ * Do not call this for folios which aren't in the page cache or swap cache.
+ */
+static inline struct address_space *folio_file_mapping(struct folio *folio)
{
- return __page_cache_add_speculative(page, 1);
+ if (unlikely(folio_test_swapcache(folio)))
+ return swapcache_mapping(folio);
+
+ return folio->mapping;
}
-static inline int page_cache_add_speculative(struct page *page, int count)
+static inline struct address_space *page_file_mapping(struct page *page)
{
- return __page_cache_add_speculative(page, count);
+ return folio_file_mapping(page_folio(page));
+}
+
+/*
+ * For file cache pages, return the address_space, otherwise return NULL
+ */
+static inline struct address_space *page_mapping_file(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+
+ if (unlikely(folio_test_swapcache(folio)))
+ return NULL;
+ return folio_mapping(folio);
+}
+
+/**
+ * folio_inode - Get the host inode for this folio.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the inode that this folio
+ * belongs to.
+ *
+ * Do not call this for folios which aren't in the page cache.
+ */
+static inline struct inode *folio_inode(struct folio *folio)
+{
+ return folio->mapping->host;
+}
+
+/**
+ * folio_attach_private - Attach private data to a folio.
+ * @folio: Folio to attach data to.
+ * @data: Data to attach to folio.
+ *
+ * Attaching private data to a folio increments the page's reference count.
+ * The data must be detached before the folio will be freed.
+ */
+static inline void folio_attach_private(struct folio *folio, void *data)
+{
+ folio_get(folio);
+ folio->private = data;
+ folio_set_private(folio);
+}
+
+/**
+ * folio_change_private - Change private data on a folio.
+ * @folio: Folio to change the data on.
+ * @data: Data to set on the folio.
+ *
+ * Change the private data attached to a folio and return the old
+ * data. The page must previously have had data attached and the data
+ * must be detached before the folio will be freed.
+ *
+ * Return: Data that was previously attached to the folio.
+ */
+static inline void *folio_change_private(struct folio *folio, void *data)
+{
+ void *old = folio_get_private(folio);
+
+ folio->private = data;
+ return old;
+}
+
+/**
+ * folio_detach_private - Detach private data from a folio.
+ * @folio: Folio to detach data from.
+ *
+ * Removes the data that was previously attached to the folio and decrements
+ * the refcount on the page.
+ *
+ * Return: Data that was attached to the folio.
+ */
+static inline void *folio_detach_private(struct folio *folio)
+{
+ void *data = folio_get_private(folio);
+
+ if (!folio_test_private(folio))
+ return NULL;
+ folio_clear_private(folio);
+ folio->private = NULL;
+ folio_put(folio);
+
+ return data;
+}
+
+static inline void attach_page_private(struct page *page, void *data)
+{
+ folio_attach_private(page_folio(page), data);
+}
+
+static inline void *detach_page_private(struct page *page)
+{
+ return folio_detach_private(page_folio(page));
}
#ifdef CONFIG_NUMA
-extern struct page *__page_cache_alloc(gfp_t gfp);
+struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
#else
-static inline struct page *__page_cache_alloc(gfp_t gfp)
+static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
{
- return alloc_pages(gfp, 0);
+ return folio_alloc(gfp, order);
}
#endif
+static inline struct page *__page_cache_alloc(gfp_t gfp)
+{
+ return &filemap_alloc_folio(gfp, 0)->page;
+}
+
static inline struct page *page_cache_alloc(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x));
@@ -226,7 +490,7 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
}
-typedef int filler_t(void *, struct page *);
+typedef int filler_t(struct file *, struct folio *);
pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
@@ -240,9 +504,48 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
#define FGP_NOFS 0x00000010
#define FGP_NOWAIT 0x00000020
#define FGP_FOR_MMAP 0x00000040
+#define FGP_HEAD 0x00000080
+#define FGP_ENTRY 0x00000100
+#define FGP_STABLE 0x00000200
-struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask);
+struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
+ int fgp_flags, gfp_t gfp);
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
+ int fgp_flags, gfp_t gfp);
+
+/**
+ * filemap_get_folio - Find and get a folio.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ *
+ * Looks up the page cache entry at @mapping & @index. If a folio is
+ * present, it is returned with an increased refcount.
+ *
+ * Otherwise, %NULL is returned.
+ */
+static inline struct folio *filemap_get_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, 0, 0);
+}
+
+/**
+ * filemap_lock_folio - Find and lock a folio.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ *
+ * Looks up the page cache entry at @mapping & @index. If a folio is
+ * present, it is returned locked with an increased refcount.
+ *
+ * Context: May sleep.
+ * Return: A folio or %NULL if there is no folio in the cache for this
+ * index. Will not return a shadow, swap or DAX entry.
+ */
+static inline struct folio *filemap_lock_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
+}
/**
* find_get_page - find and get a page reference
@@ -269,20 +572,20 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
- * @offset: the page index
+ * @index: the page index
*
- * Looks up the page cache slot at @mapping & @offset. If there is a
+ * Looks up the page cache entry at @mapping & @index. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
- * Otherwise, %NULL is returned.
- *
- * find_lock_page() may sleep.
+ * Context: May sleep.
+ * Return: A struct page or %NULL if there is no page in the cache for this
+ * index.
*/
static inline struct page *find_lock_page(struct address_space *mapping,
- pgoff_t offset)
+ pgoff_t index)
{
- return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
+ return pagecache_get_page(mapping, index, FGP_LOCK, 0);
}
/**
@@ -305,9 +608,9 @@ static inline struct page *find_lock_page(struct address_space *mapping,
* atomic allocation!
*/
static inline struct page *find_or_create_page(struct address_space *mapping,
- pgoff_t offset, gfp_t gfp_mask)
+ pgoff_t index, gfp_t gfp_mask)
{
- return pagecache_get_page(mapping, offset,
+ return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
gfp_mask);
}
@@ -333,33 +636,90 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
-static inline struct page *find_subpage(struct page *page, pgoff_t offset)
+#define swapcache_index(folio) __page_file_index(&(folio)->page)
+
+/**
+ * folio_index - File index of a folio.
+ * @folio: The folio.
+ *
+ * For a folio which is either in the page cache or the swap cache,
+ * return its index within the address_space it belongs to. If you know
+ * the page is definitely in the page cache, you can look at the folio's
+ * index directly.
+ *
+ * Return: The index (offset in units of pages) of a folio in its file.
+ */
+static inline pgoff_t folio_index(struct folio *folio)
{
- if (PageHuge(page))
- return page;
+ if (unlikely(folio_test_swapcache(folio)))
+ return swapcache_index(folio);
+ return folio->index;
+}
- VM_BUG_ON_PAGE(PageTail(page), page);
+/**
+ * folio_next_index - Get the index of the next folio.
+ * @folio: The current folio.
+ *
+ * Return: The index of the folio which follows this folio in the file.
+ */
+static inline pgoff_t folio_next_index(struct folio *folio)
+{
+ return folio->index + folio_nr_pages(folio);
+}
- return page + (offset & (compound_nr(page) - 1));
+/**
+ * folio_file_page - The page for a particular index.
+ * @folio: The folio which contains this index.
+ * @index: The index we want to look up.
+ *
+ * Sometimes after looking up a folio in the page cache, we need to
+ * obtain the specific page for an index (eg a page fault).
+ *
+ * Return: The page containing the file data for this index.
+ */
+static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
+{
+ /* HugeTLBfs indexes the page cache in units of hpage_size */
+ if (folio_test_hugetlb(folio))
+ return &folio->page;
+ return folio_page(folio, index & (folio_nr_pages(folio) - 1));
}
-struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
-struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
-unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
- unsigned int nr_entries, struct page **entries,
- pgoff_t *indices);
-unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
- pgoff_t end, unsigned int nr_pages,
- struct page **pages);
-static inline unsigned find_get_pages(struct address_space *mapping,
- pgoff_t *start, unsigned int nr_pages,
- struct page **pages)
+/**
+ * folio_contains - Does this folio contain this index?
+ * @folio: The folio.
+ * @index: The page index within the file.
+ *
+ * Context: The caller should have the page locked in order to prevent
+ * (eg) shmem from moving the page between the page cache and swap cache
+ * and changing its index in the middle of the operation.
+ * Return: true or false.
+ */
+static inline bool folio_contains(struct folio *folio, pgoff_t index)
+{
+ /* HugeTLBfs indexes the page cache in units of hpage_size */
+ if (folio_test_hugetlb(folio))
+ return folio->index == index;
+ return index - folio_index(folio) < folio_nr_pages(folio);
+}
+
+/*
+ * Given the page we found in the page cache, return the page corresponding
+ * to this index in the file
+ */
+static inline struct page *find_subpage(struct page *head, pgoff_t index)
{
- return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
- pages);
+ /* HugeTLBfs wants the head page regardless */
+ if (PageHuge(head))
+ return head;
+
+ return head + (index & (thp_nr_pages(head) - 1));
}
-unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
- unsigned int nr_pages, struct page **pages);
+
+unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, struct folio_batch *fbatch);
+unsigned filemap_get_folios_contig(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages);
@@ -372,7 +732,7 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping,
}
struct page *grab_cache_page_write_begin(struct address_space *mapping,
- pgoff_t index, unsigned flags);
+ pgoff_t index);
/*
* Returns locked page at given index in given cache, creating it if needed.
@@ -383,48 +743,54 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
}
-extern struct page * read_cache_page(struct address_space *mapping,
- pgoff_t index, filler_t *filler, void *data);
+struct folio *read_cache_folio(struct address_space *, pgoff_t index,
+ filler_t *filler, struct file *file);
+struct page *read_cache_page(struct address_space *, pgoff_t index,
+ filler_t *filler, struct file *file);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-extern int read_cache_pages(struct address_space *mapping,
- struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page(struct address_space *mapping,
- pgoff_t index, void *data)
+ pgoff_t index, struct file *file)
+{
+ return read_cache_page(mapping, index, NULL, file);
+}
+
+static inline struct folio *read_mapping_folio(struct address_space *mapping,
+ pgoff_t index, struct file *file)
{
- return read_cache_page(mapping, index, NULL, data);
+ return read_cache_folio(mapping, index, NULL, file);
}
/*
- * Get index of the page with in radix-tree
+ * Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_index(struct page *page)
{
- pgoff_t pgoff;
+ struct page *head;
if (likely(!PageTransTail(page)))
return page->index;
+ head = compound_head(page);
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
- pgoff = compound_head(page)->index;
- pgoff += page - compound_head(page);
- return pgoff;
+ return head->index + page - head;
}
+extern pgoff_t hugetlb_basepage_index(struct page *page);
+
/*
- * Get the offset in PAGE_SIZE.
- * (TODO: hugepage should have ->index in PAGE_SIZE)
+ * Get the offset in PAGE_SIZE (even for hugetlb pages).
+ * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
- if (unlikely(PageHeadHuge(page)))
- return page->index << compound_order(page);
-
+ if (unlikely(PageHuge(page)))
+ return hugetlb_basepage_index(page);
return page_to_index(page);
}
@@ -441,6 +807,38 @@ static inline loff_t page_file_offset(struct page *page)
return ((loff_t)page_index(page)) << PAGE_SHIFT;
}
+/**
+ * folio_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
+ */
+static inline loff_t folio_pos(struct folio *folio)
+{
+ return page_offset(&folio->page);
+}
+
+/**
+ * folio_file_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
+ *
+ * This differs from folio_pos() for folios which belong to a swap file.
+ * NFS is the only filesystem today which needs to use folio_file_pos().
+ */
+static inline loff_t folio_file_pos(struct folio *folio)
+{
+ return page_file_offset(&folio->page);
+}
+
+/*
+ * Get the offset in PAGE_SIZE (even for hugetlb folios).
+ * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t folio_pgoff(struct folio *folio)
+{
+ if (unlikely(folio_test_hugetlb(folio)))
+ return hugetlb_basepage_index(&folio->page);
+ return folio->index;
+}
+
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address);
@@ -455,29 +853,129 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return pgoff;
}
-extern void __lock_page(struct page *page);
-extern int __lock_page_killable(struct page *page);
-extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+struct wait_page_key {
+ struct folio *folio;
+ int bit_nr;
+ int page_match;
+};
+
+struct wait_page_queue {
+ struct folio *folio;
+ int bit_nr;
+ wait_queue_entry_t wait;
+};
+
+static inline bool wake_page_match(struct wait_page_queue *wait_page,
+ struct wait_page_key *key)
+{
+ if (wait_page->folio != key->folio)
+ return false;
+ key->page_match = 1;
+
+ if (wait_page->bit_nr != key->bit_nr)
+ return false;
+
+ return true;
+}
+
+void __folio_lock(struct folio *folio);
+int __folio_lock_killable(struct folio *folio);
+bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
unsigned int flags);
-extern void unlock_page(struct page *page);
+void unlock_page(struct page *page);
+void folio_unlock(struct folio *folio);
+
+/**
+ * folio_trylock() - Attempt to lock a folio.
+ * @folio: The folio to attempt to lock.
+ *
+ * Sometimes it is undesirable to wait for a folio to be unlocked (eg
+ * when the locks are being taken in the wrong order, or if making
+ * progress through a batch of folios is more important than processing
+ * them in order). Usually folio_lock() is the correct function to call.
+ *
+ * Context: Any context.
+ * Return: Whether the lock was successfully acquired.
+ */
+static inline bool folio_trylock(struct folio *folio)
+{
+ return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
+}
/*
* Return true if the page was successfully locked
*/
static inline int trylock_page(struct page *page)
{
- page = compound_head(page);
- return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+ return folio_trylock(page_folio(page));
}
-/*
- * lock_page may only be called if we have the page's inode pinned.
+/**
+ * folio_lock() - Lock this folio.
+ * @folio: The folio to lock.
+ *
+ * The folio lock protects against many things, probably more than it
+ * should. It is primarily held while a folio is being brought uptodate,
+ * either from its backing file or from swap. It is also held while a
+ * folio is being truncated from its address_space, so holding the lock
+ * is sufficient to keep folio->mapping stable.
+ *
+ * The folio lock is also held while write() is modifying the page to
+ * provide POSIX atomicity guarantees (as long as the write does not
+ * cross a page boundary). Other modifications to the data in the folio
+ * do not hold the folio lock and can race with writes, eg DMA and stores
+ * to mapped pages.
+ *
+ * Context: May sleep. If you need to acquire the locks of two or
+ * more folios, they must be in order of ascending index, if they are
+ * in the same address_space. If they are in different address_spaces,
+ * acquire the lock of the folio which belongs to the address_space which
+ * has the lowest address in memory first.
+ */
+static inline void folio_lock(struct folio *folio)
+{
+ might_sleep();
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
+}
+
+/**
+ * lock_page() - Lock the folio containing this page.
+ * @page: The page to lock.
+ *
+ * See folio_lock() for a description of what the lock protects.
+ * This is a legacy function and new code should probably use folio_lock()
+ * instead.
+ *
+ * Context: May sleep. Pages in the same folio share a lock, so do not
+ * attempt to lock two pages which share a folio.
*/
static inline void lock_page(struct page *page)
{
+ struct folio *folio;
+ might_sleep();
+
+ folio = page_folio(page);
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
+}
+
+/**
+ * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
+ * @folio: The folio to lock.
+ *
+ * Attempts to lock the folio, like folio_lock(), except that the sleep
+ * to acquire the lock is interruptible by a fatal signal.
+ *
+ * Context: May sleep; see folio_lock().
+ * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
+ */
+static inline int folio_lock_killable(struct folio *folio)
+{
might_sleep();
- if (!trylock_page(page))
- __lock_page(page);
+ if (!folio_trylock(folio))
+ return __folio_lock_killable(folio);
+ return 0;
}
/*
@@ -487,147 +985,396 @@ static inline void lock_page(struct page *page)
*/
static inline int lock_page_killable(struct page *page)
{
- might_sleep();
- if (!trylock_page(page))
- return __lock_page_killable(page);
- return 0;
+ return folio_lock_killable(page_folio(page));
}
/*
- * lock_page_or_retry - Lock the page, unless this would block and the
+ * folio_lock_or_retry - Lock the folio, unless this would block and the
* caller indicated that it can handle a retry.
*
- * Return value and mmap_sem implications depend on flags; see
- * __lock_page_or_retry().
+ * Return value and mmap_lock implications depend on flags; see
+ * __folio_lock_or_retry().
*/
-static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
- unsigned int flags)
+static inline bool folio_lock_or_retry(struct folio *folio,
+ struct mm_struct *mm, unsigned int flags)
{
might_sleep();
- return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
+ return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
}
/*
- * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
+ * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
* and should not be used directly.
*/
-extern void wait_on_page_bit(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+void folio_wait_bit(struct folio *folio, int bit_nr);
+int folio_wait_bit_killable(struct folio *folio, int bit_nr);
/*
- * Wait for a page to be unlocked.
+ * Wait for a folio to be unlocked.
*
- * This must be called with the caller "holding" the page,
- * ie with increased "page->count" so that the page won't
- * go away during the wait..
+ * This must be called with the caller "holding" the folio,
+ * ie with increased folio reference count so that the folio won't
+ * go away during the wait.
*/
-static inline void wait_on_page_locked(struct page *page)
+static inline void folio_wait_locked(struct folio *folio)
{
- if (PageLocked(page))
- wait_on_page_bit(compound_head(page), PG_locked);
+ if (folio_test_locked(folio))
+ folio_wait_bit(folio, PG_locked);
}
-static inline int wait_on_page_locked_killable(struct page *page)
+static inline int folio_wait_locked_killable(struct folio *folio)
{
- if (!PageLocked(page))
+ if (!folio_test_locked(folio))
return 0;
- return wait_on_page_bit_killable(compound_head(page), PG_locked);
+ return folio_wait_bit_killable(folio, PG_locked);
+}
+
+static inline void wait_on_page_locked(struct page *page)
+{
+ folio_wait_locked(page_folio(page));
}
-extern void put_and_wait_on_page_locked(struct page *page);
+static inline int wait_on_page_locked_killable(struct page *page)
+{
+ return folio_wait_locked_killable(page_folio(page));
+}
void wait_on_page_writeback(struct page *page);
-extern void end_page_writeback(struct page *page);
+void folio_wait_writeback(struct folio *folio);
+int folio_wait_writeback_killable(struct folio *folio);
+void end_page_writeback(struct page *page);
+void folio_end_writeback(struct folio *folio);
void wait_for_stable_page(struct page *page);
+void folio_wait_stable(struct folio *folio);
+void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
+static inline void __set_page_dirty(struct page *page,
+ struct address_space *mapping, int warn)
+{
+ __folio_mark_dirty(page_folio(page), mapping, warn);
+}
+void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
+void __folio_cancel_dirty(struct folio *folio);
+static inline void folio_cancel_dirty(struct folio *folio)
+{
+ /* Avoid atomic ops, locking, etc. when not actually needed. */
+ if (folio_test_dirty(folio))
+ __folio_cancel_dirty(folio);
+}
+bool folio_clear_dirty_for_io(struct folio *folio);
+bool clear_page_dirty_for_io(struct page *page);
+void folio_invalidate(struct folio *folio, size_t offset, size_t length);
+int __must_check folio_write_one(struct folio *folio);
+static inline int __must_check write_one_page(struct page *page)
+{
+ return folio_write_one(page_folio(page));
+}
+int __set_page_dirty_nobuffers(struct page *page);
+bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
+
+#ifdef CONFIG_MIGRATION
+int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
+#else
+#define filemap_migrate_folio NULL
+#endif
void page_endio(struct page *page, bool is_write, int err);
+void folio_end_private_2(struct folio *folio);
+void folio_wait_private_2(struct folio *folio);
+int folio_wait_private_2_killable(struct folio *folio);
+
/*
* Add an arbitrary waiter to a page's wait queue
*/
-extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
+void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
/*
- * Fault everything in given userspace address range in.
+ * Fault in userspace address range.
+ */
+size_t fault_in_writeable(char __user *uaddr, size_t size);
+size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
+size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
+size_t fault_in_readable(const char __user *uaddr, size_t size);
+
+int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp);
+int filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp);
+void filemap_remove_folio(struct folio *folio);
+void delete_from_page_cache(struct page *page);
+void __filemap_remove_folio(struct folio *folio, void *shadow);
+void replace_page_cache_page(struct page *old, struct page *new);
+void delete_from_page_cache_batch(struct address_space *mapping,
+ struct folio_batch *fbatch);
+int try_to_release_page(struct page *page, gfp_t gfp);
+bool filemap_release_folio(struct folio *folio, gfp_t gfp);
+loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
+ int whence);
+
+/* Must be non-static for BPF error injection */
+int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp, void **shadowp);
+
+bool filemap_range_has_writeback(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
+
+/**
+ * filemap_range_needs_writeback - check if range potentially needs writeback
+ * @mapping: address space within which to check
+ * @start_byte: offset in bytes where the range starts
+ * @end_byte: offset in bytes where the range ends (inclusive)
+ *
+ * Find at least one page in the range supplied, usually used to check if
+ * direct writing in this range will trigger a writeback. Used by O_DIRECT
+ * read/write with IOCB_NOWAIT, to see if the caller needs to do
+ * filemap_write_and_wait_range() before proceeding.
+ *
+ * Return: %true if the caller should do filemap_write_and_wait_range() before
+ * doing O_DIRECT to a page in this range, %false otherwise.
*/
-static inline int fault_in_pages_writeable(char __user *uaddr, int size)
+static inline bool filemap_range_needs_writeback(struct address_space *mapping,
+ loff_t start_byte,
+ loff_t end_byte)
{
- char __user *end = uaddr + size - 1;
+ if (!mapping->nrpages)
+ return false;
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
+ !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+ return false;
+ return filemap_range_has_writeback(mapping, start_byte, end_byte);
+}
- if (unlikely(size == 0))
- return 0;
+/**
+ * struct readahead_control - Describes a readahead request.
+ *
+ * A readahead request is for consecutive pages. Filesystems which
+ * implement the ->readahead method should call readahead_page() or
+ * readahead_page_batch() in a loop and attempt to start I/O against
+ * each page in the request.
+ *
+ * Most of the fields in this struct are private and should be accessed
+ * by the functions below.
+ *
+ * @file: The file, used primarily by network filesystems for authentication.
+ * May be NULL if invoked internally by the filesystem.
+ * @mapping: Readahead this filesystem object.
+ * @ra: File readahead state. May be NULL.
+ */
+struct readahead_control {
+ struct file *file;
+ struct address_space *mapping;
+ struct file_ra_state *ra;
+/* private: use the readahead_* accessors instead */
+ pgoff_t _index;
+ unsigned int _nr_pages;
+ unsigned int _batch_count;
+ bool _workingset;
+ unsigned long _pflags;
+};
- if (unlikely(uaddr > end))
- return -EFAULT;
- /*
- * Writing zeroes into userspace here is OK, because we know that if
- * the zero gets there, we'll be overwriting it.
- */
- do {
- if (unlikely(__put_user(0, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
+#define DEFINE_READAHEAD(ractl, f, r, m, i) \
+ struct readahead_control ractl = { \
+ .file = f, \
+ .mapping = m, \
+ .ra = r, \
+ ._index = i, \
+ }
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK))
- return __put_user(0, end);
+#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
- return 0;
+void page_cache_ra_unbounded(struct readahead_control *,
+ unsigned long nr_to_read, unsigned long lookahead_count);
+void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
+void page_cache_async_ra(struct readahead_control *, struct folio *,
+ unsigned long req_count);
+void readahead_expand(struct readahead_control *ractl,
+ loff_t new_start, size_t new_len);
+
+/**
+ * page_cache_sync_readahead - generic file readahead
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_sync_readahead() should be called when a cache miss happened:
+ * it will submit the read. The readahead logic may decide to piggyback more
+ * pages onto the read request if access patterns suggest it will improve
+ * performance.
+ */
+static inline
+void page_cache_sync_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *file, pgoff_t index,
+ unsigned long req_count)
+{
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_sync_ra(&ractl, req_count);
}
-static inline int fault_in_pages_readable(const char __user *uaddr, int size)
+/**
+ * page_cache_async_readahead - file readahead for marked pages
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @folio: The folio at @index which triggered the readahead call.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_async_readahead() should be called when a page is used which
+ * is marked as PageReadahead; this is a marker to suggest that the application
+ * has used up enough of the readahead window that we should start pulling in
+ * more pages.
+ */
+static inline
+void page_cache_async_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *file,
+ struct folio *folio, pgoff_t index, unsigned long req_count)
{
- volatile char c;
- const char __user *end = uaddr + size - 1;
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_async_ra(&ractl, folio, req_count);
+}
- if (unlikely(size == 0))
- return 0;
+static inline struct folio *__readahead_folio(struct readahead_control *ractl)
+{
+ struct folio *folio;
- if (unlikely(uaddr > end))
- return -EFAULT;
+ BUG_ON(ractl->_batch_count > ractl->_nr_pages);
+ ractl->_nr_pages -= ractl->_batch_count;
+ ractl->_index += ractl->_batch_count;
- do {
- if (unlikely(__get_user(c, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
+ if (!ractl->_nr_pages) {
+ ractl->_batch_count = 0;
+ return NULL;
+ }
+
+ folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ ractl->_batch_count = folio_nr_pages(folio);
+
+ return folio;
+}
+
+/**
+ * readahead_page - Get the next page to read.
+ * @ractl: The current readahead request.
+ *
+ * Context: The page is locked and has an elevated refcount. The caller
+ * should decreases the refcount once the page has been submitted for I/O
+ * and unlock the page once all I/O to that page has completed.
+ * Return: A pointer to the next page, or %NULL if we are done.
+ */
+static inline struct page *readahead_page(struct readahead_control *ractl)
+{
+ struct folio *folio = __readahead_folio(ractl);
+
+ return &folio->page;
+}
+
+/**
+ * readahead_folio - Get the next folio to read.
+ * @ractl: The current readahead request.
+ *
+ * Context: The folio is locked. The caller should unlock the folio once
+ * all I/O to that folio has completed.
+ * Return: A pointer to the next folio, or %NULL if we are done.
+ */
+static inline struct folio *readahead_folio(struct readahead_control *ractl)
+{
+ struct folio *folio = __readahead_folio(ractl);
+
+ if (folio)
+ folio_put(folio);
+ return folio;
+}
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK)) {
- return __get_user(c, end);
+static inline unsigned int __readahead_batch(struct readahead_control *rac,
+ struct page **array, unsigned int array_sz)
+{
+ unsigned int i = 0;
+ XA_STATE(xas, &rac->mapping->i_pages, 0);
+ struct page *page;
+
+ BUG_ON(rac->_batch_count > rac->_nr_pages);
+ rac->_nr_pages -= rac->_batch_count;
+ rac->_index += rac->_batch_count;
+ rac->_batch_count = 0;
+
+ xas_set(&xas, rac->_index);
+ rcu_read_lock();
+ xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
+ if (xas_retry(&xas, page))
+ continue;
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ array[i++] = page;
+ rac->_batch_count += thp_nr_pages(page);
+ if (i == array_sz)
+ break;
}
+ rcu_read_unlock();
- (void)c;
- return 0;
+ return i;
}
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
-int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
-void delete_from_page_cache_batch(struct address_space *mapping,
- struct pagevec *pvec);
+/**
+ * readahead_page_batch - Get a batch of pages to read.
+ * @rac: The current readahead request.
+ * @array: An array of pointers to struct page.
+ *
+ * Context: The pages are locked and have an elevated refcount. The caller
+ * should decreases the refcount once the page has been submitted for I/O
+ * and unlock the page once all I/O to that page has completed.
+ * Return: The number of pages placed in the array. 0 indicates the request
+ * is complete.
+ */
+#define readahead_page_batch(rac, array) \
+ __readahead_batch(rac, array, ARRAY_SIZE(array))
-/*
- * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __SetPageLocked() against it.
+/**
+ * readahead_pos - The byte offset into the file of this readahead request.
+ * @rac: The readahead request.
+ */
+static inline loff_t readahead_pos(struct readahead_control *rac)
+{
+ return (loff_t)rac->_index * PAGE_SIZE;
+}
+
+/**
+ * readahead_length - The number of bytes in this readahead request.
+ * @rac: The readahead request.
+ */
+static inline size_t readahead_length(struct readahead_control *rac)
+{
+ return rac->_nr_pages * PAGE_SIZE;
+}
+
+/**
+ * readahead_index - The index of the first page in this readahead request.
+ * @rac: The readahead request.
+ */
+static inline pgoff_t readahead_index(struct readahead_control *rac)
+{
+ return rac->_index;
+}
+
+/**
+ * readahead_count - The number of pages in this readahead request.
+ * @rac: The readahead request.
*/
-static inline int add_to_page_cache(struct page *page,
- struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
+static inline unsigned int readahead_count(struct readahead_control *rac)
{
- int error;
+ return rac->_nr_pages;
+}
- __SetPageLocked(page);
- error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
- if (unlikely(error))
- __ClearPageLocked(page);
- return error;
+/**
+ * readahead_batch_length - The number of bytes in the current batch.
+ * @rac: The readahead request.
+ */
+static inline size_t readahead_batch_length(struct readahead_control *rac)
+{
+ return rac->_batch_count * PAGE_SIZE;
}
static inline unsigned long dir_pages(struct inode *inode)
@@ -637,6 +1384,34 @@ static inline unsigned long dir_pages(struct inode *inode)
}
/**
+ * folio_mkwrite_check_truncate - check if folio was truncated
+ * @folio: the folio to check
+ * @inode: the inode to check the folio against
+ *
+ * Return: the number of bytes in the folio up to EOF,
+ * or -EFAULT if the folio was truncated.
+ */
+static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
+ struct inode *inode)
+{
+ loff_t size = i_size_read(inode);
+ pgoff_t index = size >> PAGE_SHIFT;
+ size_t offset = offset_in_folio(folio, size);
+
+ if (!folio->mapping)
+ return -EFAULT;
+
+ /* folio is wholly inside EOF */
+ if (folio_next_index(folio) - 1 < index)
+ return folio_size(folio);
+ /* folio is wholly past EOF */
+ if (folio->index > index || !offset)
+ return -EFAULT;
+ /* folio is partially inside EOF */
+ return offset;
+}
+
+/**
* page_mkwrite_check_truncate - check if page was truncated
* @page: the page to check
* @inode: the inode to check the page against
@@ -664,4 +1439,26 @@ static inline int page_mkwrite_check_truncate(struct page *page,
return offset;
}
+/**
+ * i_blocks_per_folio - How many blocks fit in this folio.
+ * @inode: The inode which contains the blocks.
+ * @folio: The folio.
+ *
+ * If the block size is larger than the size of this folio, return zero.
+ *
+ * Context: The caller should hold a refcount on the folio to prevent it
+ * from being split.
+ * Return: The number of filesystem blocks covered by this folio.
+ */
+static inline
+unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
+{
+ return folio_size(folio) >> inode->i_blkbits;
+}
+
+static inline
+unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
+{
+ return i_blocks_per_folio(inode, page_folio(page));
+}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 081d934eda64..215eb6c3bdc9 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -15,8 +15,10 @@
#define PAGEVEC_SIZE 15
struct page;
+struct folio;
struct address_space;
+/* Layout must match folio_batch */
struct pagevec {
unsigned char nr;
bool percpu_pvec_drained;
@@ -24,28 +26,9 @@ struct pagevec {
};
void __pagevec_release(struct pagevec *pvec);
-void __pagevec_lru_add(struct pagevec *pvec);
-unsigned pagevec_lookup_entries(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t start, unsigned nr_entries,
- pgoff_t *indices);
-void pagevec_remove_exceptionals(struct pagevec *pvec);
-unsigned pagevec_lookup_range(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t *start, pgoff_t end);
-static inline unsigned pagevec_lookup(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t *start)
-{
- return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
-}
-
unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
xa_mark_t tag);
-unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, pgoff_t end,
- xa_mark_t tag, unsigned max_pages);
static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, xa_mark_t tag)
{
@@ -88,4 +71,69 @@ static inline void pagevec_release(struct pagevec *pvec)
__pagevec_release(pvec);
}
+/**
+ * struct folio_batch - A collection of folios.
+ *
+ * The folio_batch is used to amortise the cost of retrieving and
+ * operating on a set of folios. The order of folios in the batch may be
+ * significant (eg delete_from_page_cache_batch()). Some users of the
+ * folio_batch store "exceptional" entries in it which can be removed
+ * by calling folio_batch_remove_exceptionals().
+ */
+struct folio_batch {
+ unsigned char nr;
+ bool percpu_pvec_drained;
+ struct folio *folios[PAGEVEC_SIZE];
+};
+
+/* Layout must match pagevec */
+static_assert(sizeof(struct pagevec) == sizeof(struct folio_batch));
+static_assert(offsetof(struct pagevec, pages) ==
+ offsetof(struct folio_batch, folios));
+
+/**
+ * folio_batch_init() - Initialise a batch of folios
+ * @fbatch: The folio batch.
+ *
+ * A freshly initialised folio_batch contains zero folios.
+ */
+static inline void folio_batch_init(struct folio_batch *fbatch)
+{
+ fbatch->nr = 0;
+ fbatch->percpu_pvec_drained = false;
+}
+
+static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
+{
+ return fbatch->nr;
+}
+
+static inline unsigned int fbatch_space(struct folio_batch *fbatch)
+{
+ return PAGEVEC_SIZE - fbatch->nr;
+}
+
+/**
+ * folio_batch_add() - Add a folio to a batch.
+ * @fbatch: The folio batch.
+ * @folio: The folio to add.
+ *
+ * The folio is added to the end of the batch.
+ * The batch must have previously been initialised using folio_batch_init().
+ *
+ * Return: The number of slots still available.
+ */
+static inline unsigned folio_batch_add(struct folio_batch *fbatch,
+ struct folio *folio)
+{
+ fbatch->folios[fbatch->nr++] = folio;
+ return fbatch_space(fbatch);
+}
+
+static inline void folio_batch_release(struct folio_batch *fbatch)
+{
+ pagevec_release((struct pagevec *)fbatch);
+}
+
+void folio_batch_remove_exceptionals(struct folio_batch *fbatch);
#endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index b1cb6b753abb..f3fafb731ffd 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -7,7 +7,7 @@
struct mm_walk;
/**
- * mm_walk_ops - callbacks for walk_page_range
+ * struct mm_walk_ops - callbacks for walk_page_range
* @pgd_entry: if set, called for each non-empty PGD (top-level) entry
* @p4d_entry: if set, called for each non-empty P4D entry
* @pud_entry: if set, called for each non-empty PUD entry
@@ -15,12 +15,12 @@ struct mm_walk;
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
* split_huge_page() instead of handling it explicitly.
- * @pte_entry: if set, called for each non-empty PTE (lowest-level)
- * entry
+ * @pte_entry: if set, called for each PTE (lowest-level) entry,
+ * including empty ones
* @pte_hole: if set, called for each hole at all levels,
- * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
- * 4:PTE. Any folded depths (where PTRS_PER_P?D is equal
- * to 1) are skipped.
+ * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD.
+ * Any folded depths (where PTRS_PER_P?D is equal to 1)
+ * are skipped.
* @hugetlb_entry: if set, called for each hugetlb entry
* @test_walk: caller specific callback function to determine whether
* we walk over the current vma or not. Returning 0 means
@@ -71,7 +71,7 @@ enum page_walk_action {
};
/**
- * mm_walk - walk_page_range data
+ * struct mm_walk - walk_page_range data
* @ops: operation to call during the walk
* @mm: mm_struct representing the target process of page table walk
* @pgd: pointer to PGD; only valid with no_vma (otherwise set to NULL)
diff --git a/include/linux/panic.h b/include/linux/panic.h
new file mode 100644
index 000000000000..c7759b3f2045
--- /dev/null
+++ b/include/linux/panic.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_H
+#define _LINUX_PANIC_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+extern long (*panic_blink)(int state);
+__printf(1, 2)
+void panic(const char *fmt, ...) __noreturn __cold;
+void nmi_panic(struct pt_regs *regs, const char *msg);
+extern void oops_enter(void);
+extern void oops_exit(void);
+extern bool oops_may_print(void);
+
+extern int panic_timeout;
+extern unsigned long panic_print;
+extern int panic_on_oops;
+extern int panic_on_unrecovered_nmi;
+extern int panic_on_io_nmi;
+extern int panic_on_warn;
+
+extern unsigned long panic_on_taint;
+extern bool panic_on_taint_nousertaint;
+
+extern int sysctl_panic_on_rcu_stall;
+extern int sysctl_max_rcu_stall_to_panic;
+extern int sysctl_panic_on_stackoverflow;
+
+extern bool crash_kexec_post_notifiers;
+
+/*
+ * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
+ * holds a CPU number which is executing panic() currently. A value of
+ * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
+ */
+extern atomic_t panic_cpu;
+#define PANIC_CPU_INVALID -1
+
+/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
+{
+ if (panic_timeout == arch_default_timeout)
+ panic_timeout = timeout;
+}
+
+/* This cannot be an enum because some may be used in assembly source. */
+#define TAINT_PROPRIETARY_MODULE 0
+#define TAINT_FORCED_MODULE 1
+#define TAINT_CPU_OUT_OF_SPEC 2
+#define TAINT_FORCED_RMMOD 3
+#define TAINT_MACHINE_CHECK 4
+#define TAINT_BAD_PAGE 5
+#define TAINT_USER 6
+#define TAINT_DIE 7
+#define TAINT_OVERRIDDEN_ACPI_TABLE 8
+#define TAINT_WARN 9
+#define TAINT_CRAP 10
+#define TAINT_FIRMWARE_WORKAROUND 11
+#define TAINT_OOT_MODULE 12
+#define TAINT_UNSIGNED_MODULE 13
+#define TAINT_SOFTLOCKUP 14
+#define TAINT_LIVEPATCH 15
+#define TAINT_AUX 16
+#define TAINT_RANDSTRUCT 17
+#define TAINT_TEST 18
+#define TAINT_FLAGS_COUNT 19
+#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
+
+struct taint_flag {
+ char c_true; /* character printed when tainted */
+ char c_false; /* character printed when not tainted */
+ bool module; /* also show as a per-module taint flag */
+};
+
+extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
+
+enum lockdep_ok {
+ LOCKDEP_STILL_OK,
+ LOCKDEP_NOW_UNRELIABLE,
+};
+
+extern const char *print_tainted(void);
+extern void add_taint(unsigned flag, enum lockdep_ok);
+extern int test_taint(unsigned flag);
+extern unsigned long get_taint(void);
+
+#endif /* _LINUX_PANIC_H */
diff --git a/include/linux/panic_notifier.h b/include/linux/panic_notifier.h
new file mode 100644
index 000000000000..41e32483d7a7
--- /dev/null
+++ b/include/linux/panic_notifier.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_NOTIFIERS_H
+#define _LINUX_PANIC_NOTIFIERS_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+extern struct atomic_notifier_head panic_notifier_list;
+
+extern bool crash_kexec_post_notifiers;
+
+#endif /* _LINUX_PANIC_NOTIFIERS_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 13932ce8b37b..1c16ffb8b908 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -297,13 +297,54 @@ int __must_check __parport_register_driver(struct parport_driver *,
* parport_register_driver must be a macro so that KBUILD_MODNAME can
* be expanded
*/
+
+/**
+ * parport_register_driver - register a parallel port device driver
+ * @driver: structure describing the driver
+ *
+ * This can be called by a parallel port device driver in order
+ * to receive notifications about ports being found in the
+ * system, as well as ports no longer available.
+ *
+ * If devmodel is true then the new device model is used
+ * for registration.
+ *
+ * The @driver structure is allocated by the caller and must not be
+ * deallocated until after calling parport_unregister_driver().
+ *
+ * If using the non device model:
+ * The driver's attach() function may block. The port that
+ * attach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so. Calling
+ * parport_register_device() on that port will do this for you.
+ *
+ * The driver's detach() function may block. The port that
+ * detach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so.
+ *
+ *
+ * Returns 0 on success. The non device model will always succeeds.
+ * but the new device model can fail and will return the error code.
+ **/
#define parport_register_driver(driver) \
__parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
/* Unregister a high-level driver. */
-extern void parport_unregister_driver (struct parport_driver *);
void parport_unregister_driver(struct parport_driver *);
+/**
+ * module_parport_driver() - Helper macro for registering a modular parport driver
+ * @__parport_driver: struct parport_driver to be used
+ *
+ * Helper macro for parport drivers which do not do anything special in module
+ * init and exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit().
+ */
+#define module_parport_driver(__parport_driver) \
+ module_driver(__parport_driver, parport_register_driver, parport_unregister_driver)
+
/* If parport_register_driver doesn't fit your needs, perhaps
* parport_find_xxx does. */
extern struct parport *parport_find_number (int);
@@ -325,18 +366,10 @@ struct pardev_cb {
unsigned int flags;
};
-/* parport_register_device declares that a device is connected to a
- port, and tells the kernel all it needs to know.
- - pf is the preemption function (may be NULL for no callback)
- - kf is the wake-up function (may be NULL for no callback)
- - irq_func is the interrupt handler (may be NULL for no interrupts)
- - handle is a user pointer that gets handed to callback functions. */
-struct pardevice *parport_register_device(struct parport *port,
- const char *name,
- int (*pf)(void *), void (*kf)(void *),
- void (*irq_func)(void *),
- int flags, void *handle);
-
+/*
+ * parport_register_dev_model declares that a device is connected to a
+ * port, and tells the kernel all it needs to know.
+ */
struct pardevice *
parport_register_dev_model(struct parport *port, const char *name,
const struct pardev_cb *par_dev_cb, int cnt);
diff --git a/include/linux/parser.h b/include/linux/parser.h
index 12fc3482f5fc..dd79f45a37b8 100644
--- a/include/linux/parser.h
+++ b/include/linux/parser.h
@@ -7,7 +7,8 @@
* but could potentially be used anywhere else that simple option=arg
* parsing is required.
*/
-
+#ifndef _LINUX_PARSER_H
+#define _LINUX_PARSER_H
/* associates an integer enumerator with a pattern string. */
struct match_token {
@@ -28,9 +29,12 @@ typedef struct {
int match_token(char *, const match_table_t table, substring_t args[]);
int match_int(substring_t *, int *result);
+int match_uint(substring_t *s, unsigned int *result);
int match_u64(substring_t *, u64 *result);
int match_octal(substring_t *, int *result);
int match_hex(substring_t *, int *result);
bool match_wildcard(const char *pattern, const char *str);
size_t match_strlcpy(char *, const substring_t *, size_t);
char *match_strdup(const substring_t *);
+
+#endif /* _LINUX_PARSER_H */
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
new file mode 100644
index 000000000000..abeba356bc3f
--- /dev/null
+++ b/include/linux/part_stat.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PART_STAT_H
+#define _LINUX_PART_STAT_H
+
+#include <linux/blkdev.h>
+#include <asm/local.h>
+
+struct disk_stats {
+ u64 nsecs[NR_STAT_GROUPS];
+ unsigned long sectors[NR_STAT_GROUPS];
+ unsigned long ios[NR_STAT_GROUPS];
+ unsigned long merges[NR_STAT_GROUPS];
+ unsigned long io_ticks;
+ local_t in_flight[2];
+};
+
+/*
+ * Macros to operate on percpu disk statistics:
+ *
+ * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters and should
+ * be called between disk_stat_lock() and disk_stat_unlock().
+ *
+ * part_stat_read() can be called at any time.
+ */
+#define part_stat_lock() preempt_disable()
+#define part_stat_unlock() preempt_enable()
+
+#define part_stat_get_cpu(part, field, cpu) \
+ (per_cpu_ptr((part)->bd_stats, (cpu))->field)
+
+#define part_stat_get(part, field) \
+ part_stat_get_cpu(part, field, smp_processor_id())
+
+#define part_stat_read(part, field) \
+({ \
+ typeof((part)->bd_stats->field) res = 0; \
+ unsigned int _cpu; \
+ for_each_possible_cpu(_cpu) \
+ res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
+ res; \
+})
+
+static inline void part_stat_set_all(struct block_device *part, int value)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ memset(per_cpu_ptr(part->bd_stats, i), value,
+ sizeof(struct disk_stats));
+}
+
+#define part_stat_read_accum(part, field) \
+ (part_stat_read(part, field[STAT_READ]) + \
+ part_stat_read(part, field[STAT_WRITE]) + \
+ part_stat_read(part, field[STAT_DISCARD]))
+
+#define __part_stat_add(part, field, addnd) \
+ __this_cpu_add((part)->bd_stats->field, addnd)
+
+#define part_stat_add(part, field, addnd) do { \
+ __part_stat_add((part), field, addnd); \
+ if ((part)->bd_partno) \
+ __part_stat_add(bdev_whole(part), field, addnd); \
+} while (0)
+
+#define part_stat_dec(part, field) \
+ part_stat_add(part, field, -1)
+#define part_stat_inc(part, field) \
+ part_stat_add(part, field, 1)
+#define part_stat_sub(part, field, subnd) \
+ part_stat_add(part, field, -subnd)
+
+#define part_stat_local_dec(part, field) \
+ local_dec(&(part_stat_get(part, field)))
+#define part_stat_local_inc(part, field) \
+ local_inc(&(part_stat_get(part, field)))
+#define part_stat_local_read(part, field) \
+ local_read(&(part_stat_get(part, field)))
+#define part_stat_local_read_cpu(part, field, cpu) \
+ local_read(&(part_stat_get_cpu(part, field, cpu)))
+
+#endif /* _LINUX_PART_STAT_H */
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 62b7fdcc661c..078225b514d4 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -27,7 +27,7 @@ extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
struct pci_ecam_ops;
extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops);
+ const struct pci_ecam_ops **ecam_ops);
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
@@ -84,6 +84,14 @@ extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
void acpi_pci_add_bus(struct pci_bus *bus);
void acpi_pci_remove_bus(struct pci_bus *bus);
+#ifdef CONFIG_PCI
+void pci_acpi_setup(struct device *dev, struct acpi_device *adev);
+void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev);
+#else
+static inline void pci_acpi_setup(struct device *dev, struct acpi_device *adev) {}
+static inline void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) {}
+#endif
+
#ifdef CONFIG_ACPI_PCI_SLOT
void acpi_pci_slot_init(void);
void acpi_pci_slot_enumerate(struct pci_bus *bus);
@@ -107,20 +115,27 @@ static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
#endif
extern const guid_t pci_acpi_dsm_guid;
-#define IGNORE_PCI_BOOT_CONFIG_DSM 0x05
-#define DEVICE_LABEL_DSM 0x07
-#define RESET_DELAY_DSM 0x08
-#define FUNCTION_DELAY_DSM 0x09
+
+/* _DSM Definitions for PCI */
+#define DSM_PCI_PRESERVE_BOOT_CONFIG 0x05
+#define DSM_PCI_DEVICE_NAME 0x07
+#define DSM_PCI_POWER_ON_RESET_DELAY 0x08
+#define DSM_PCI_DEVICE_READINESS_DURATIONS 0x09
+
+#ifdef CONFIG_PCIE_EDR
+void pci_acpi_add_edr_notifier(struct pci_dev *pdev);
+void pci_acpi_remove_edr_notifier(struct pci_dev *pdev);
+#else
+static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { }
+static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { }
+#endif /* CONFIG_PCIE_EDR */
+
+int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *));
+void pci_acpi_clear_companion_lookup_hook(void);
#else /* CONFIG_ACPI */
static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
#endif /* CONFIG_ACPI */
-#ifdef CONFIG_ACPI_APEI
-extern bool aer_acpi_firmware_first(void);
-#else
-static inline bool aer_acpi_firmware_first(void) { return false; }
-#endif
-
#endif /* _PCI_ACPI_H_ */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index d08f0869f121..df54cd5b15db 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -6,11 +6,14 @@
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
+bool pci_ats_supported(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
#else /* CONFIG_PCI_ATS */
+static inline bool pci_ats_supported(struct pci_dev *d)
+{ return false; }
static inline int pci_enable_ats(struct pci_dev *d, int ps)
{ return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
@@ -25,6 +28,10 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
void pci_disable_pri(struct pci_dev *pdev);
int pci_reset_pri(struct pci_dev *pdev);
int pci_prg_resp_pasid_required(struct pci_dev *pdev);
+bool pci_pri_supported(struct pci_dev *pdev);
+#else
+static inline bool pci_pri_supported(struct pci_dev *pdev)
+{ return false; }
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
deleted file mode 100644
index 249d4d7fbf18..000000000000
--- a/include/linux/pci-dma-compat.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* include this file if the platform implements the dma_ DMA Mapping API
- * and wants to provide the pci_ DMA Mapping API in terms of it */
-
-#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
-#define _ASM_GENERIC_PCI_DMA_COMPAT_H
-
-#include <linux/dma-mapping.h>
-
-/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
-#define PCI_DMA_TODEVICE DMA_TO_DEVICE
-#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE
-#define PCI_DMA_NONE DMA_NONE
-
-static inline void *
-pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void *
-pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void
-pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t
-pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
- return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
-}
-
-static inline dma_addr_t
-pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
-{
- return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
- size_t size, int direction)
-{
- dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
-{
- return dma_mapping_error(&pdev->dev, dma_addr);
-}
-
-#ifdef CONFIG_PCI
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
- return dma_set_mask(&dev->dev, mask);
-}
-
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{
- return dma_set_coherent_mask(&dev->dev, mask);
-}
-#else
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-#endif
-
-#endif
diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h
new file mode 100644
index 000000000000..ed9b4df792b8
--- /dev/null
+++ b/include/linux/pci-doe.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Object Exchange
+ * PCIe r6.0, sec 6.30 DOE
+ *
+ * Copyright (C) 2021 Huawei
+ * Jonathan Cameron <Jonathan.Cameron@huawei.com>
+ *
+ * Copyright (C) 2022 Intel Corporation
+ * Ira Weiny <ira.weiny@intel.com>
+ */
+
+#ifndef LINUX_PCI_DOE_H
+#define LINUX_PCI_DOE_H
+
+struct pci_doe_protocol {
+ u16 vid;
+ u8 type;
+};
+
+struct pci_doe_mb;
+
+/**
+ * struct pci_doe_task - represents a single query/response
+ *
+ * @prot: DOE Protocol
+ * @request_pl: The request payload
+ * @request_pl_sz: Size of the request payload (bytes)
+ * @response_pl: The response payload
+ * @response_pl_sz: Size of the response payload (bytes)
+ * @rv: Return value. Length of received response or error (bytes)
+ * @complete: Called when task is complete
+ * @private: Private data for the consumer
+ * @work: Used internally by the mailbox
+ * @doe_mb: Used internally by the mailbox
+ *
+ * The payload sizes and rv are specified in bytes with the following
+ * restrictions concerning the protocol.
+ *
+ * 1) The request_pl_sz must be a multiple of double words (4 bytes)
+ * 2) The response_pl_sz must be >= a single double word (4 bytes)
+ * 3) rv is returned as bytes but it will be a multiple of double words
+ *
+ * NOTE there is no need for the caller to initialize work or doe_mb.
+ */
+struct pci_doe_task {
+ struct pci_doe_protocol prot;
+ u32 *request_pl;
+ size_t request_pl_sz;
+ u32 *response_pl;
+ size_t response_pl_sz;
+ int rv;
+ void (*complete)(struct pci_doe_task *task);
+ void *private;
+
+ /* No need for the user to initialize these fields */
+ struct work_struct work;
+ struct pci_doe_mb *doe_mb;
+};
+
+/**
+ * pci_doe_for_each_off - Iterate each DOE capability
+ * @pdev: struct pci_dev to iterate
+ * @off: u16 of config space offset of each mailbox capability found
+ */
+#define pci_doe_for_each_off(pdev, off) \
+ for (off = pci_find_next_ext_capability(pdev, off, \
+ PCI_EXT_CAP_ID_DOE); \
+ off > 0; \
+ off = pci_find_next_ext_capability(pdev, off, \
+ PCI_EXT_CAP_ID_DOE))
+
+struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset);
+bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type);
+int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task);
+
+#endif
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index a73164c85e78..6b1301e2498e 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -10,6 +10,33 @@
#include <linux/platform_device.h>
/*
+ * Memory address shift values for the byte-level address that
+ * can be used when accessing the PCI Express Configuration Space.
+ */
+
+/*
+ * Enhanced Configuration Access Mechanism (ECAM)
+ *
+ * See PCI Express Base Specification, Revision 5.0, Version 1.0,
+ * Section 7.2.2, Table 7-1, p. 677.
+ */
+#define PCIE_ECAM_BUS_SHIFT 20 /* Bus number */
+#define PCIE_ECAM_DEVFN_SHIFT 12 /* Device and Function number */
+
+#define PCIE_ECAM_BUS_MASK 0xff
+#define PCIE_ECAM_DEVFN_MASK 0xff
+#define PCIE_ECAM_REG_MASK 0xfff /* Limit offset to a maximum of 4K */
+
+#define PCIE_ECAM_BUS(x) (((x) & PCIE_ECAM_BUS_MASK) << PCIE_ECAM_BUS_SHIFT)
+#define PCIE_ECAM_DEVFN(x) (((x) & PCIE_ECAM_DEVFN_MASK) << PCIE_ECAM_DEVFN_SHIFT)
+#define PCIE_ECAM_REG(x) ((x) & PCIE_ECAM_REG_MASK)
+
+#define PCIE_ECAM_OFFSET(bus, devfn, where) \
+ (PCIE_ECAM_BUS(bus) | \
+ PCIE_ECAM_DEVFN(devfn) | \
+ PCIE_ECAM_REG(where))
+
+/*
* struct to hold pci ops and bus shift of the config window
* for a PCI controller.
*/
@@ -28,8 +55,9 @@ struct pci_ecam_ops {
struct pci_config_window {
struct resource res;
struct resource busr;
+ unsigned int bus_shift;
void *priv;
- struct pci_ecam_ops *ops;
+ const struct pci_ecam_ops *ops;
union {
void __iomem *win; /* 64-bit single mapping */
void __iomem **winp; /* 32-bit per-bus mapping */
@@ -40,29 +68,31 @@ struct pci_config_window {
/* create and free pci_config_window */
struct pci_config_window *pci_ecam_create(struct device *dev,
struct resource *cfgres, struct resource *busr,
- struct pci_ecam_ops *ops);
+ const struct pci_ecam_ops *ops);
void pci_ecam_free(struct pci_config_window *cfg);
/* map_bus when ->sysdata is an instance of pci_config_window */
void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
/* default ECAM ops */
-extern struct pci_ecam_ops pci_generic_ecam_ops;
+extern const struct pci_ecam_ops pci_generic_ecam_ops;
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
-extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
-extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
-extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
-extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
-extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
-extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
+extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
+extern const struct pci_ecam_ops pci_32b_read_ops; /* 32-bit read only */
+extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
+extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
+extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
+extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
+extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
+extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
+extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
+extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
#endif
-#ifdef CONFIG_PCI_HOST_COMMON
+#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
/* for DT-based PCI controllers that support ECAM */
-int pci_host_common_probe(struct platform_device *pdev,
- struct pci_ecam_ops *ops);
+int pci_host_common_probe(struct platform_device *pdev);
int pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
index f42b0fd4b4bc..3e2140d7e31d 100644
--- a/include/linux/pci-ep-cfs.h
+++ b/include/linux/pci-ep-cfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/**
+/*
* PCI Endpoint ConfigFS header file
*
* Copyright (C) 2017 Texas Instruments
@@ -19,7 +19,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group);
#else
static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name)
{
- return 0;
+ return NULL;
}
static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
@@ -28,7 +28,7 @@ static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name)
{
- return 0;
+ return NULL;
}
static inline void pci_ep_cfs_remove_epf_group(struct config_group *group)
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 56f1846b9d39..a48778e1a4ee 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* PCI Endpoint *Controller* (EPC) header file
*
* Copyright (C) 2017 Texas Instruments
@@ -13,6 +13,12 @@
struct pci_epc;
+enum pci_epc_interface_type {
+ UNKNOWN_INTERFACE = -1,
+ PRIMARY_INTERFACE,
+ SECONDARY_INTERFACE,
+};
+
enum pci_epc_irq_type {
PCI_EPC_IRQ_UNKNOWN,
PCI_EPC_IRQ_LEGACY,
@@ -20,6 +26,19 @@ enum pci_epc_irq_type {
PCI_EPC_IRQ_MSIX,
};
+static inline const char *
+pci_epc_interface_string(enum pci_epc_interface_type type)
+{
+ switch (type) {
+ case PRIMARY_INTERFACE:
+ return "primary";
+ case SECONDARY_INTERFACE:
+ return "secondary";
+ default:
+ return "UNKNOWN interface";
+ }
+}
+
/**
* struct pci_epc_ops - set of function pointers for performing EPC operations
* @write_header: ops to populate configuration space header
@@ -36,48 +55,67 @@ enum pci_epc_irq_type {
* @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
* from the MSI-X capability register
* @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
+ * @map_msi_irq: ops to map physical address to MSI address and return MSI data
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
+ * @get_features: ops to get the features supported by the EPC
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
- int (*write_header)(struct pci_epc *epc, u8 func_no,
+ int (*write_header)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
- int (*set_bar)(struct pci_epc *epc, u8 func_no,
+ int (*set_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- void (*clear_bar)(struct pci_epc *epc, u8 func_no,
+ void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- int (*map_addr)(struct pci_epc *epc, u8 func_no,
+ int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr, u64 pci_addr, size_t size);
- void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
+ void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr);
- int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
- int (*get_msi)(struct pci_epc *epc, u8 func_no);
- int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts);
- int (*get_msix)(struct pci_epc *epc, u8 func_no);
- int (*raise_irq)(struct pci_epc *epc, u8 func_no,
+ int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+ int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num);
+ int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
- u8 func_no);
+ u8 func_no, u8 vfunc_no);
struct module *owner;
};
/**
+ * struct pci_epc_mem_window - address window of the endpoint controller
+ * @phys_base: physical base address of the PCI address window
+ * @size: the size of the PCI address window
+ * @page_size: size of each page
+ */
+struct pci_epc_mem_window {
+ phys_addr_t phys_base;
+ size_t size;
+ size_t page_size;
+};
+
+/**
* struct pci_epc_mem - address space of the endpoint controller
- * @phys_base: physical base address of the PCI address space
- * @size: the size of the PCI address space
+ * @window: address window of the endpoint controller
* @bitmap: bitmap to manage the PCI address space
* @pages: number of bits representing the address region
- * @page_size: size of each page
+ * @lock: mutex to protect bitmap
*/
struct pci_epc_mem {
- phys_addr_t phys_base;
- size_t size;
+ struct pci_epc_mem_window window;
unsigned long *bitmap;
- size_t page_size;
int pages;
+ /* mutex to protect against concurrent access for memory allocation*/
+ struct mutex lock;
};
/**
@@ -85,25 +123,40 @@ struct pci_epc_mem {
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
* @ops: function pointers for performing endpoint operations
- * @mem: address space of the endpoint controller
+ * @windows: array of address space of the endpoint controller
+ * @mem: first window of the endpoint controller, which corresponds to
+ * default address space of the endpoint controller supporting
+ * single window.
+ * @num_windows: number of windows supported by device
* @max_functions: max number of functions that can be configured in this EPC
+ * @max_vfs: Array indicating the maximum number of virtual functions that can
+ * be associated with each physical function
* @group: configfs group representing the PCI EPC device
- * @lock: spinlock to protect pci_epc ops
+ * @lock: mutex to protect pci_epc ops
+ * @function_num_map: bitmap to manage physical function number
+ * @notifier: used to notify EPF of any EPC events (like linkup)
*/
struct pci_epc {
struct device dev;
struct list_head pci_epf;
const struct pci_epc_ops *ops;
+ struct pci_epc_mem **windows;
struct pci_epc_mem *mem;
+ unsigned int num_windows;
u8 max_functions;
+ u8 *max_vfs;
struct config_group *group;
- /* spinlock to protect against concurrent access of EP controller */
- spinlock_t lock;
+ /* mutex to protect against concurrent access of EP controller */
+ struct mutex lock;
+ unsigned long function_num_map;
+ struct atomic_notifier_head notifier;
};
/**
* struct pci_epc_features - features supported by a EPC device per function
* @linkup_notifier: indicate if the EPC device can notify EPF driver on link up
+ * @core_init_notifier: indicate cores that can notify about their availability
+ * for initialization
* @msi_capable: indicate if the endpoint function has MSI capability
* @msix_capable: indicate if the endpoint function has MSI-X capability
* @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver
@@ -113,6 +166,7 @@ struct pci_epc {
*/
struct pci_epc_features {
unsigned int linkup_notifier : 1;
+ unsigned int core_init_notifier : 1;
unsigned int msi_capable : 1;
unsigned int msix_capable : 1;
u8 reserved_bar;
@@ -128,9 +182,6 @@ struct pci_epc_features {
#define devm_pci_epc_create(dev, ops) \
__devm_pci_epc_create((dev), (ops), THIS_MODULE)
-#define pci_epc_mem_init(epc, phys_addr, size) \
- __pci_epc_mem_init((epc), (phys_addr), (size), PAGE_SIZE)
-
static inline void epc_set_drvdata(struct pci_epc *epc, void *data)
{
dev_set_drvdata(&epc->dev, data);
@@ -141,6 +192,12 @@ static inline void *epc_get_drvdata(struct pci_epc *epc)
return dev_get_drvdata(&epc->dev);
}
+static inline int
+pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&epc->notifier, nb);
+}
+
struct pci_epc *
__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
@@ -149,37 +206,50 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
void pci_epc_destroy(struct pci_epc *epc);
-int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
-void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+void pci_epc_init_notify(struct pci_epc *epc);
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type);
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size);
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr);
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts);
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no);
-unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
- *epc_features);
+ u8 func_no, u8 vfunc_no);
+enum pci_barno
+pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
+enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
+ *epc_features, enum pci_barno bar);
struct pci_epc *pci_epc_get(const char *epc_name);
void pci_epc_put(struct pci_epc *epc);
-int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size,
- size_t page_size);
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
+ size_t size, size_t page_size);
+int pci_epc_multi_mem_init(struct pci_epc *epc,
+ struct pci_epc_mem_window *window,
+ unsigned int num_windows);
void pci_epc_mem_exit(struct pci_epc *epc);
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 2d6f07556682..009a07147c61 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* PCI Endpoint *Function* (EPF) header file
*
* Copyright (C) 2017 Texas Instruments
@@ -9,13 +9,21 @@
#ifndef __LINUX_PCI_EPF_H
#define __LINUX_PCI_EPF_H
+#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
struct pci_epf;
+enum pci_epc_interface_type;
+
+enum pci_notify_event {
+ CORE_INIT,
+ LINK_UP,
+};
enum pci_barno {
+ NO_BAR = -1,
BAR_0,
BAR_1,
BAR_2,
@@ -55,13 +63,13 @@ struct pci_epf_header {
* @bind: ops to perform when a EPC device has been bound to EPF device
* @unbind: ops to perform when a binding has been lost between a EPC device
* and EPF device
- * @linkup: ops to perform when the EPC device has established a connection with
- * a host system
+ * @add_cfs: ops to initialize function specific configfs attributes
*/
struct pci_epf_ops {
int (*bind)(struct pci_epf *epf);
void (*unbind)(struct pci_epf *epf);
- void (*linkup)(struct pci_epf *epf);
+ struct config_group *(*add_cfs)(struct pci_epf *epf,
+ struct config_group *group);
};
/**
@@ -77,7 +85,7 @@ struct pci_epf_ops {
*/
struct pci_epf_driver {
int (*probe)(struct pci_epf *epf);
- int (*remove)(struct pci_epf *epf);
+ void (*remove)(struct pci_epf *epf);
struct device_driver driver;
struct pci_epf_ops *ops;
@@ -92,10 +100,14 @@ struct pci_epf_driver {
/**
* struct pci_epf_bar - represents the BAR of EPF device
* @phys_addr: physical address that should be mapped to the BAR
+ * @addr: virtual address corresponding to the @phys_addr
* @size: the size of the address space present in BAR
+ * @barno: BAR number
+ * @flags: flags that are set for the BAR
*/
struct pci_epf_bar {
dma_addr_t phys_addr;
+ void *addr;
size_t size;
enum pci_barno barno;
int flags;
@@ -108,10 +120,25 @@ struct pci_epf_bar {
* @header: represents standard configuration header
* @bar: represents the BAR of EPF device
* @msi_interrupts: number of MSI interrupts required by this function
- * @func_no: unique function number within this endpoint device
+ * @msix_interrupts: number of MSI-X interrupts required by this function
+ * @func_no: unique (physical) function number within this endpoint device
+ * @vfunc_no: unique virtual function number within a physical function
* @epc: the EPC device to which this EPF device is bound
+ * @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
+ * @nb: notifier block to notify EPF of any EPC events (like linkup)
+ * @lock: mutex to protect pci_epf_ops
+ * @sec_epc: the secondary EPC device to which this EPF device is bound
+ * @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary
+ * EPC device
+ * @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC
+ * @sec_epc_func_no: unique (physical) function number within the secondary EPC
+ * @group: configfs group associated with the EPF device
+ * @is_bound: indicates if bind notification to function driver has been invoked
+ * @is_vf: true - virtual function, false - physical function
+ * @vfunction_num_map: bitmap to manage virtual function number
+ * @pci_vepf: list of virtual endpoint functions associated with this function
*/
struct pci_epf {
struct device dev;
@@ -121,10 +148,39 @@ struct pci_epf {
u8 msi_interrupts;
u16 msix_interrupts;
u8 func_no;
+ u8 vfunc_no;
struct pci_epc *epc;
+ struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
struct list_head list;
+ struct notifier_block nb;
+ /* mutex to protect against concurrent access of pci_epf_ops */
+ struct mutex lock;
+
+ /* Below members are to attach secondary EPC to an endpoint function */
+ struct pci_epc *sec_epc;
+ struct list_head sec_epc_list;
+ struct pci_epf_bar sec_epc_bar[6];
+ u8 sec_epc_func_no;
+ struct config_group *group;
+ unsigned int is_bound;
+ unsigned int is_vf;
+ unsigned long vfunction_num_map;
+ struct list_head pci_vepf;
+};
+
+/**
+ * struct pci_epf_msix_tbl - represents the MSIX table entry structure
+ * @msg_addr: Writes to this address will trigger MSIX interrupt in host
+ * @msg_data: Data that should be written to @msg_addr to trigger MSIX interrupt
+ * @vector_ctrl: Identifies if the function is prohibited from sending a message
+ * using this MSIX table entry
+ */
+struct pci_epf_msix_tbl {
+ u64 msg_addr;
+ u32 msg_data;
+ u32 vector_ctrl;
};
#define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev)
@@ -142,17 +198,19 @@ static inline void *epf_get_drvdata(struct pci_epf *epf)
return dev_get_drvdata(&epf->dev);
}
-const struct pci_epf_device_id *
-pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf);
struct pci_epf *pci_epf_create(const char *name);
void pci_epf_destroy(struct pci_epf *epf);
int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner);
void pci_epf_unregister_driver(struct pci_epf_driver *driver);
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align);
-void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
+ size_t align, enum pci_epc_interface_type type);
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+ enum pci_epc_interface_type type);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
-void pci_epf_linkup(struct pci_epf *epf);
+struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
+ struct config_group *group);
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 8318a97c9c61..2c07aa6b7665 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -30,10 +30,6 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
unsigned int *nents, u32 length);
void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
-int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs);
-void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs);
int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
bool *use_p2pdma);
ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
@@ -83,17 +79,6 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
{
}
-static inline int pci_p2pdma_map_sg_attrs(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
static inline int pci_p2pdma_enable_store(const char *page,
struct pci_dev **p2p_dev, bool *use_p2pdma)
{
@@ -119,16 +104,4 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
return pci_p2pmem_find_many(&client, 1);
}
-static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0);
-}
-
-static inline void pci_p2pdma_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir)
-{
- pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0);
-}
-
#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3840a541a9de..2bda4a4e47e8 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -42,6 +42,19 @@
#include <linux/pci_ids.h>
+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+ PCI_STATUS_SIG_SYSTEM_ERROR | \
+ PCI_STATUS_REC_MASTER_ABORT | \
+ PCI_STATUS_REC_TARGET_ABORT | \
+ PCI_STATUS_SIG_TARGET_ABORT | \
+ PCI_STATUS_PARITY)
+
+/* Number of reset methods used in pci_reset_fn_methods array in pci.c */
+#define PCI_NUM_RESET_METHODS 7
+
+#define PCI_RESET_PROBE true
+#define PCI_RESET_DO_RESET false
+
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
@@ -93,9 +106,21 @@ enum {
PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
#endif
- /* Resources assigned to buses behind the bridge */
+/* PCI-to-PCI (P2P) bridge windows */
+#define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
+#define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
+#define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
+
+/* CardBus bridge windows */
+#define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
+#define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
+#define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
+#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
+
+/* Total number of bridge resources for P2P and CardBus */
#define PCI_BRIDGE_RESOURCE_NUM 4
+ /* Resources assigned to buses behind the bridge */
PCI_BRIDGE_RESOURCES,
PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
PCI_BRIDGE_RESOURCE_NUM - 1,
@@ -130,6 +155,15 @@ enum pci_interrupt_pin {
#define PCI_NUM_INTX 4
/*
+ * Reading from a device that doesn't respond typically returns ~0. A
+ * successful read from a device may also return ~0, so you need additional
+ * information to reliably identify errors.
+ */
+#define PCI_ERROR_RESPONSE (~0ULL)
+#define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
+#define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
+
+/*
* pci_power_t values must match the bits in the Capabilities PME_Support
* and Control/Status PowerState fields in the Power Management capability.
*/
@@ -160,7 +194,7 @@ static inline const char *pci_power_name(pci_power_t state)
*/
typedef unsigned int __bitwise pci_channel_state_t;
-enum pci_channel_state {
+enum {
/* I/O channel is in normal state */
pci_channel_io_normal = (__force pci_channel_state_t) 1,
@@ -208,6 +242,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
/* Don't use Relaxed Ordering for TLPs directed at this device */
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
+ /* Device does honor MSI masking despite saying otherwise */
+ PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
};
enum pci_irq_reroute_variant {
@@ -236,7 +272,7 @@ enum pcie_link_width {
PCIE_LNK_WIDTH_UNKNOWN = 0xff,
};
-/* Based on the PCI Hotplug Spec, but some values are made up by us */
+/* See matching string table in pci_speed_string() */
enum pci_bus_speed {
PCI_SPEED_33MHz = 0x00,
PCI_SPEED_66MHz = 0x01,
@@ -262,29 +298,24 @@ enum pci_bus_speed {
PCIE_SPEED_8_0GT = 0x16,
PCIE_SPEED_16_0GT = 0x17,
PCIE_SPEED_32_0GT = 0x18,
+ PCIE_SPEED_64_0GT = 0x19,
PCI_SPEED_UNKNOWN = 0xff,
};
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-struct pci_cap_saved_data {
- u16 cap_nr;
- bool cap_extended;
- unsigned int size;
- u32 data[0];
-};
-
-struct pci_cap_saved_state {
- struct hlist_node next;
- struct pci_cap_saved_data cap;
+struct pci_vpd {
+ struct mutex lock;
+ unsigned int len;
+ u8 cap;
};
struct irq_affinity;
struct pcie_link_state;
-struct pci_vpd;
struct pci_sriov;
struct pci_p2pdma;
+struct rcec_ea;
/* The pci_dev structure describes PCI devices */
struct pci_dev {
@@ -308,6 +339,11 @@ struct pci_dev {
u16 aer_cap; /* AER capability offset */
struct aer_stats *aer_stats; /* AER stats for this device */
#endif
+#ifdef CONFIG_PCIEPORTBUS
+ struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
+ struct pci_dev *rcec; /* Associated RCEC device */
+#endif
+ u32 devcap; /* PCIe Device Capabilities */
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
@@ -343,10 +379,6 @@ struct pci_dev {
unsigned int mmio_always_on:1; /* Disallow turning off io/mem
decoding during BAR sizing */
unsigned int wakeup_prepared:1;
- unsigned int runtime_d3cold:1; /* Whether go through runtime
- D3cold, not set for devices
- powered on/off by the
- corresponding bridge */
unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
@@ -354,14 +386,16 @@ struct pci_dev {
user sysfs */
unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
bit manually */
- unsigned int d3_delay; /* D3->D0 transition time in ms */
+ unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
unsigned int ltr_path:1; /* Latency Tolerance Reporting
supported from root to here */
+ u16 l1ss; /* L1SS Capability pointer */
#endif
+ unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
pci_channel_state_t error_state; /* Current connectivity state */
@@ -396,12 +430,12 @@ struct pci_dev {
unsigned int ats_enabled:1; /* Address Translation Svc */
unsigned int pasid_enabled:1; /* Process Address Space ID */
unsigned int pri_enabled:1; /* Page Request Interface */
- unsigned int is_managed:1;
+ unsigned int is_managed:1; /* Managed via devres */
+ unsigned int is_msi_managed:1; /* MSI release via devres installed */
unsigned int needs_freset:1; /* Requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
unsigned int is_virtfn:1;
- unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
@@ -413,8 +447,12 @@ struct pci_dev {
* mappings to make sure they cannot access arbitrary memory.
*/
unsigned int untrusted:1;
- unsigned int __aer_firmware_first_valid:1;
- unsigned int __aer_firmware_first:1;
+ /*
+ * Info from the platform, e.g., ACPI or device tree, may mark a
+ * device as "external-facing". An external-facing device is
+ * itself internal but devices downstream from it are external.
+ */
+ unsigned int external_facing:1;
unsigned int broken_intx_masking:1; /* INTx masking can't be used */
unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
unsigned int irq_managed:1;
@@ -422,12 +460,13 @@ struct pci_dev {
unsigned int is_probed:1; /* Device probing in progress */
unsigned int link_active_reporting:1;/* Device capable of reporting link active */
unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
+ unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
+ unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
u32 saved_config_space[16]; /* Config space saved at suspend time */
struct hlist_head saved_cap_space;
- struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */
int rom_attr_enabled; /* Display of ROM attribute enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
@@ -436,14 +475,21 @@ struct pci_dev {
unsigned int broken_cmd_compl:1; /* No compl for some cmds */
#endif
#ifdef CONFIG_PCIE_PTM
+ u16 ptm_cap; /* PTM Capability */
unsigned int ptm_root:1;
unsigned int ptm_enabled:1;
u8 ptm_granularity;
#endif
#ifdef CONFIG_PCI_MSI
- const struct attribute_group **msi_irq_groups;
+ void __iomem *msix_base;
+ raw_spinlock_t msi_lock;
+#endif
+ struct pci_vpd vpd;
+#ifdef CONFIG_PCIE_DPC
+ u16 dpc_cap;
+ unsigned int dpc_rp_extensions:1;
+ u8 dpc_rp_log_size;
#endif
- struct pci_vpd *vpd;
#ifdef CONFIG_PCI_ATS
union {
struct pci_sriov *sriov; /* PF: SR-IOV info */
@@ -462,13 +508,21 @@ struct pci_dev {
u16 pasid_features;
#endif
#ifdef CONFIG_PCI_P2PDMA
- struct pci_p2pdma *p2pdma;
+ struct pci_p2pdma __rcu *p2pdma;
#endif
+ u16 acs_cap; /* ACS Capability offset */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
unsigned long priv_flags; /* Private flags for the PCI driver */
+
+ /* These methods index pci_reset_fn_methods[] */
+ u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -490,19 +544,30 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
return (pdev->error_state != pci_channel_io_normal);
}
+/*
+ * Currently in ACPI spec, for each PCI host bridge, PCI Segment
+ * Group number is limited to a 16-bit value, therefore (int)-1 is
+ * not a valid PCI domain number, and can be used as a sentinel
+ * value indicating ->domain_nr is not set by the driver (and
+ * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
+ * pci_bus_find_domain_nr()).
+ */
+#define PCI_DOMAIN_NR_NOT_SET (-1)
+
struct pci_host_bridge {
struct device dev;
struct pci_bus *bus; /* Root bus */
struct pci_ops *ops;
+ struct pci_ops *child_ops;
void *sysdata;
int busnr;
+ int domain_nr;
struct list_head windows; /* resource_entry */
struct list_head dma_ranges; /* dma ranges resource list */
u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
int (*map_irq)(const struct pci_dev *, u8, u8);
void (*release_fn)(struct pci_host_bridge *);
void *release_data;
- struct msi_controller *msi;
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
unsigned int native_aer:1; /* OS may use PCIe AER */
@@ -510,7 +575,10 @@ struct pci_host_bridge {
unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
unsigned int native_pme:1; /* OS may use PCIe PME */
unsigned int native_ltr:1; /* OS may use PCIe LTR */
+ unsigned int native_dpc:1; /* OS may use PCIe DPC */
unsigned int preserve_config:1; /* Preserve FW resource setup */
+ unsigned int size_windows:1; /* Enable root bus sizing */
+ unsigned int msi_domain:1; /* Bridge wants MSI domain */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
@@ -518,7 +586,7 @@ struct pci_host_bridge {
resource_size_t start,
resource_size_t size,
resource_size_t align);
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
@@ -581,7 +649,6 @@ struct pci_bus {
struct resource busn_res; /* Bus numbers routed to this bus */
struct pci_ops *ops; /* Configuration access functions */
- struct msi_controller *msi; /* MSI controller */
void *sysdata; /* Hook for sys-specific extension */
struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
@@ -602,6 +669,7 @@ struct pci_bus {
struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
struct bin_attribute *legacy_mem; /* Legacy mem */
unsigned int is_added:1;
+ unsigned int unsafe_warn:1; /* warned about RW1C config write */
};
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
@@ -761,7 +829,7 @@ enum pci_ers_result {
struct pci_error_handlers {
/* PCI bus error detected on this device */
pci_ers_result_t (*error_detected)(struct pci_dev *dev,
- enum pci_channel_state error);
+ pci_channel_state_t error);
/* MMIO has been re-enabled, but not DMA */
pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
@@ -816,10 +884,25 @@ struct module;
* e.g. drivers/net/e100.c.
* @sriov_configure: Optional driver callback to allow configuration of
* number of VFs to enable via sysfs "sriov_numvfs" file.
+ * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
+ * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
+ * This will change MSI-X Table Size in the VF Message Control
+ * registers.
+ * @sriov_get_vf_total_msix: PF driver callback to get the total number of
+ * MSI-X vectors available for distribution to the VFs.
* @err_handler: See Documentation/PCI/pci-error-recovery.rst
* @groups: Sysfs attribute groups.
+ * @dev_groups: Attributes attached to the device that will be
+ * created once it is bound to the driver.
* @driver: Driver model structure.
* @dynids: List of dynamically added device IDs.
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
*/
struct pci_driver {
struct list_head node;
@@ -831,13 +914,20 @@ struct pci_driver {
int (*resume)(struct pci_dev *dev); /* Device woken up */
void (*shutdown)(struct pci_dev *dev);
int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
+ int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
+ u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
const struct pci_error_handlers *err_handler;
const struct attribute_group **groups;
+ const struct attribute_group **dev_groups;
struct device_driver driver;
struct pci_dynids dynids;
+ bool driver_managed_dma;
};
-#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
+static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
+{
+ return drv ? container_of(drv, struct pci_driver, driver) : NULL;
+}
/**
* PCI_DEVICE - macro used to describe a specific PCI device
@@ -853,6 +943,35 @@ struct pci_driver {
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
/**
+ * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
+ * override_only flags.
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ * @driver_override: the 32 bit PCI Device override_only
+ *
+ * This macro is used to create a struct pci_device_id that matches only a
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID.
+ */
+#define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
+ .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, .override_only = (driver_override)
+
+/**
+ * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
+ * "driver_override" PCI device.
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID and the driver_override will be set to
+ * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
+ */
+#define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
+ PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
+
+/**
* PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
@@ -1011,7 +1130,6 @@ void pci_bus_add_device(struct pci_dev *dev);
void pci_read_bridge_bases(struct pci_bus *child);
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res);
-struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
@@ -1030,20 +1148,18 @@ void pci_sort_breadthfirst(void);
/* Generic PCI functions exported to card drivers */
-enum pci_lost_interrupt_reason {
- PCI_LOST_IRQ_NO_INFORMATION = 0,
- PCI_LOST_IRQ_DISABLE_MSI,
- PCI_LOST_IRQ_DISABLE_MSIX,
- PCI_LOST_IRQ_DISABLE_ACPI,
-};
-enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
-int pci_find_capability(struct pci_dev *dev, int cap);
-int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
-int pci_find_ext_capability(struct pci_dev *dev, int cap);
-int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
-int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
-int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
+u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
+u8 pci_find_capability(struct pci_dev *dev, int cap);
+u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
+u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
+u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
+u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
+u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
+u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
+
+u64 pci_get_dsn(struct pci_dev *dev);
struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
struct pci_dev *from);
@@ -1162,11 +1278,11 @@ void pci_clear_master(struct pci_dev *dev);
int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
int pci_set_cacheline_size(struct pci_dev *dev);
-#define HAVE_PCI_SET_MWI
int __must_check pci_set_mwi(struct pci_dev *dev);
int __must_check pcim_set_mwi(struct pci_dev *dev);
int pci_try_set_mwi(struct pci_dev *dev);
void pci_clear_mwi(struct pci_dev *dev);
+void pci_disable_parity(struct pci_dev *dev);
void pci_intx(struct pci_dev *dev, int enable);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
@@ -1183,7 +1299,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
void pcie_print_link_status(struct pci_dev *dev);
-bool pcie_has_flr(struct pci_dev *dev);
+int pcie_reset_flr(struct pci_dev *dev, bool probe);
int pcie_flr(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
@@ -1198,11 +1314,21 @@ void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
void pci_release_resource(struct pci_dev *dev, int resno);
+static inline int pci_rebar_bytes_to_size(u64 bytes)
+{
+ bytes = roundup_pow_of_two(bytes);
+
+ /* Return BAR size as defined in the resizable BAR specification */
+ return max(ilog2(bytes), 20) - 20;
+}
+
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
+int pci_status_get_and_clear_errors(struct pci_dev *pdev);
int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
@@ -1214,7 +1340,6 @@ int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
-void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
/* Power management related routines */
int pci_save_state(struct pci_dev *dev);
@@ -1224,12 +1349,6 @@ int pci_load_saved_state(struct pci_dev *dev,
struct pci_saved_state *state);
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
-struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
-struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
- u16 cap);
-int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
-int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
- u16 cap, unsigned int size);
int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
@@ -1243,7 +1362,7 @@ bool pci_dev_run_wake(struct pci_dev *dev);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
-void pci_wakeup_bus(struct pci_bus *bus);
+void pci_resume_bus(struct pci_bus *bus);
void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
/* For use by arch with custom probe code */
@@ -1251,7 +1370,6 @@ void set_pcie_port_type(struct pci_dev *pdev);
void set_pcie_hotplug_bridge(struct pci_dev *pdev);
/* Functions for PCI Hotplug drivers to use */
-int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
unsigned int pci_rescan_bus(struct pci_bus *bus);
void pci_lock_rescan_remove(void);
@@ -1260,7 +1378,8 @@ void pci_unlock_rescan_remove(void);
/* Vital Product Data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
-int pci_set_vpd_size(struct pci_dev *dev, size_t len);
+ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
@@ -1409,19 +1528,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
#define PCI_IRQ_ALL_TYPES \
(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
-/* kmem_cache style wrapper around pci_alloc_consistent() */
-
#include <linux/dmapool.h>
-#define pci_pool dma_pool
-#define pci_pool_create(name, pdev, size, align, allocation) \
- dma_pool_create(name, &pdev->dev, size, align, allocation)
-#define pci_pool_destroy(pool) dma_pool_destroy(pool)
-#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
-#define pci_pool_zalloc(pool, flags, handle) \
- dma_pool_zalloc(pool, flags, handle)
-#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-
struct msix_entry {
u32 vector; /* Kernel uses to write allocated vector */
u16 entry; /* Driver uses to specify entry, OS writes */
@@ -1568,10 +1676,26 @@ static inline bool pci_aer_available(void) { return false; }
bool pci_ats_disabled(void);
+#ifdef CONFIG_PCIE_PTM
+int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
+void pci_disable_ptm(struct pci_dev *dev);
+bool pcie_ptm_enabled(struct pci_dev *dev);
+#else
+static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+{ return -EINVAL; }
+static inline void pci_disable_ptm(struct pci_dev *dev) { }
+static inline bool pcie_ptm_enabled(struct pci_dev *dev)
+{ return false; }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
+void pci_dev_lock(struct pci_dev *dev);
+int pci_dev_trylock(struct pci_dev *dev);
+void pci_dev_unlock(struct pci_dev *dev);
+
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
* a PCI domain is defined to be a set of PCI buses which share
@@ -1675,7 +1799,10 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
struct pci_dev *from)
{ return NULL; }
-#define pci_dev_present(ids) (0)
+
+static inline int pci_dev_present(const struct pci_device_id *ids)
+{ return 0; }
+
#define no_pci_devices() (1)
#define pci_dev_put(dev) do { } while (0)
@@ -1685,13 +1812,14 @@ static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
-static inline int __pci_register_driver(struct pci_driver *drv,
- struct module *owner)
+static inline int __must_check __pci_register_driver(struct pci_driver *drv,
+ struct module *owner,
+ const char *mod_name)
{ return 0; }
static inline int pci_register_driver(struct pci_driver *drv)
{ return 0; }
static inline void pci_unregister_driver(struct pci_driver *drv) { }
-static inline int pci_find_capability(struct pci_dev *dev, int cap)
+static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
{ return 0; }
static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
int cap)
@@ -1699,6 +1827,9 @@ static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
{ return 0; }
+static inline u64 pci_get_dsn(struct pci_dev *dev)
+{ return 0; }
+
/* Power management related routines */
static inline int pci_save_state(struct pci_dev *dev) { return 0; }
static inline void pci_restore_state(struct pci_dev *dev) { }
@@ -1720,6 +1851,10 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
+static inline int pci_register_io_range(struct fwnode_handle *fwnode,
+ phys_addr_t addr, resource_size_t size)
+{ return -EINVAL; }
+
static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
@@ -1777,24 +1912,14 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
#include <asm/pci.h>
-/* These two functions provide almost identical functionality. Depending
- * on the architecture, one will be implemented as a wrapper around the
- * other (in drivers/pci/mmap.c).
- *
+/*
* pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
* is expected to be an offset within that region.
*
- * pci_mmap_page_range() is the legacy architecture-specific interface,
- * which accepts a "user visible" resource address converted by
- * pci_resource_to_user(), as used in the legacy mmap() interface in
- * /proc/bus/pci/.
*/
int pci_mmap_resource_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
#ifndef arch_can_pci_mmap_wc
#define arch_can_pci_mmap_wc() 0
@@ -1819,9 +1944,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
#define pci_resource_len(dev,bar) \
- ((pci_resource_start((dev), (bar)) == 0 && \
- pci_resource_end((dev), (bar)) == \
- pci_resource_start((dev), (bar))) ? 0 : \
+ ((pci_resource_end((dev), (bar)) == 0) ? 0 : \
\
(pci_resource_end((dev), (bar)) - \
pci_resource_start((dev), (bar)) + 1))
@@ -1881,7 +2004,7 @@ enum pci_fixup_pass {
};
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+#define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
class_shift, hook) \
__ADDRESSABLE(hook) \
asm(".section " #sec ", \"a\" \n" \
@@ -1890,10 +2013,33 @@ enum pci_fixup_pass {
".long " #class ", " #class_shift " \n" \
".long " #hook " - . \n" \
".previous \n");
+
+/*
+ * Clang's LTO may rename static functions in C, but has no way to
+ * handle such renamings when referenced from inline asm. To work
+ * around this, create global C stubs for these cases.
+ */
+#ifdef CONFIG_LTO_CLANG
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook, stub) \
+ void stub(struct pci_dev *dev); \
+ void stub(struct pci_dev *dev) \
+ { \
+ hook(dev); \
+ } \
+ ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, stub)
+#else
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook, stub) \
+ ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook)
+#endif
+
#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
class_shift, hook) \
__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
- class_shift, hook)
+ class_shift, hook, __UNIQUE_ID(hook))
#else
/* Anonymous variables would be nice... */
#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
@@ -1995,7 +2141,7 @@ void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
-int pcibios_add_device(struct pci_dev *dev);
+int pcibios_device_add(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
#ifdef CONFIG_PCI
void pcibios_penalize_isa_irq(int irq, int active);
@@ -2006,10 +2152,6 @@ int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-extern struct dev_pm_ops pcibios_pm_ops;
-#endif
-
#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
void __init pci_mmcfg_early_init(void);
void __init pci_mmcfg_late_init(void);
@@ -2026,9 +2168,12 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
-
+int pci_iov_vf_id(struct pci_dev *dev);
+void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
+
+int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
int pci_iov_add_virtfn(struct pci_dev *dev, int id);
void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
int pci_num_vf(struct pci_dev *dev);
@@ -2052,8 +2197,26 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
{
return -ENOSYS;
}
+
+static inline int pci_iov_vf_id(struct pci_dev *dev)
+{
+ return -ENOSYS;
+}
+
+static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
+ struct pci_driver *pf_driver)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
+
+static inline int pci_iov_sysfs_link(struct pci_dev *dev,
+ struct pci_dev *virtfn, int id)
+{
+ return -ENODEV;
+}
static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
return -ENOSYS;
@@ -2124,17 +2287,22 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
}
+/**
+ * pcie_find_root_port - Get the PCIe root port device
+ * @dev: PCI device
+ *
+ * Traverse up the parent chain and return the PCIe Root Port PCI Device
+ * for a given PCI/PCIe Device.
+ */
static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
{
- while (1) {
- if (!pci_is_pcie(dev))
- break;
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ while (dev) {
+ if (pci_is_pcie(dev) &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
return dev;
- if (!dev->bus->self)
- break;
- dev = dev->bus->self;
+ dev = pci_upstream_bridge(dev);
}
+
return NULL;
}
@@ -2156,114 +2324,59 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
-/* Small Resource Data Type Tag Item Names */
-#define PCI_VPD_STIN_END 0x0f /* End */
-
-#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
-
-#define PCI_VPD_SRDT_TIN_MASK 0x78
-#define PCI_VPD_SRDT_LEN_MASK 0x07
-#define PCI_VPD_LRDT_TIN_MASK 0x7f
-
-#define PCI_VPD_LRDT_TAG_SIZE 3
-#define PCI_VPD_SRDT_TAG_SIZE 1
-
-#define PCI_VPD_INFO_FLD_HDR_SIZE 3
-
#define PCI_VPD_RO_KEYWORD_PARTNO "PN"
+#define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
#define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
#define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
#define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
/**
- * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type length.
- */
-static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
-{
- return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
-}
-
-/**
- * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type Tag item.
- */
-static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
-{
- return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
-}
-
-/**
- * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
- * @srdt: Pointer to the beginning of the Small Resource Data Type tag
- *
- * Returns the extracted Small Resource Data Type length.
- */
-static inline u8 pci_vpd_srdt_size(const u8 *srdt)
-{
- return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
-}
-
-/**
- * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
- * @srdt: Pointer to the beginning of the Small Resource Data Type tag
+ * pci_vpd_alloc - Allocate buffer and read VPD into it
+ * @dev: PCI device
+ * @size: pointer to field where VPD length is returned
*
- * Returns the extracted Small Resource Data Type Tag Item.
+ * Returns pointer to allocated buffer or an ERR_PTR in case of failure
*/
-static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
-{
- return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
-}
+void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
/**
- * pci_vpd_info_field_size - Extracts the information field length
- * @info_field: Pointer to the beginning of an information field header
+ * pci_vpd_find_id_string - Locate id string in VPD
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @size: Pointer to field where length of id string is returned
*
- * Returns the extracted information field length.
+ * Returns the index of the id string or -ENOENT if not found.
*/
-static inline u8 pci_vpd_info_field_size(const u8 *info_field)
-{
- return info_field[2];
-}
+int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
/**
- * pci_vpd_find_tag - Locates the Resource Data Type tag provided
- * @buf: Pointer to buffered vpd data
- * @off: The offset into the buffer at which to begin the search
- * @len: The length of the vpd buffer
- * @rdt: The Resource Data Type to search for
+ * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @kw: The keyword to search for
+ * @size: Pointer to field where length of found keyword data is returned
*
- * Returns the index where the Resource Data Type was found or
- * -ENOENT otherwise.
+ * Returns the index of the information field keyword data or -ENOENT if
+ * not found.
*/
-int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
+int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
+ const char *kw, unsigned int *size);
/**
- * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
- * @buf: Pointer to buffered vpd data
- * @off: The offset into the buffer at which to begin the search
- * @len: The length of the buffer area, relative to off, in which to search
- * @kw: The keyword to search for
+ * pci_vpd_check_csum - Check VPD checksum
+ * @buf: Pointer to buffered VPD data
+ * @len: VPD size
*
- * Returns the index where the information field keyword was found or
- * -ENOENT otherwise.
+ * Returns 1 if VPD has no checksum, otherwise 0 or an errno
*/
-int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
- unsigned int len, const char *kw);
+int pci_vpd_check_csum(const void *buf, unsigned int len);
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
-int pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *resources,
- struct list_head *ib_resources,
- struct resource **bus_range);
+bool pci_host_of_has_msi_map(struct device *dev);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2271,14 +2384,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
-static inline int
-pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *resources,
- struct list_head *ib_resources,
- struct resource **bus_range)
-{
- return -EINVAL;
-}
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
#endif /* CONFIG_OF */
static inline struct device_node *
@@ -2368,8 +2474,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
-/* Provide the legacy pci_dma_* API */
-#include <linux/pci-dma-compat.h>
+#include <linux/dma-mapping.h>
#define pci_printk(level, pdev, fmt, arg...) \
dev_printk(level, &(pdev)->dev, fmt, ##arg)
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index b482e42d7153..3a10d6ec3ee7 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -44,12 +44,14 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
- int (*reset_slot) (struct hotplug_slot *slot, int probe);
+ int (*reset_slot) (struct hotplug_slot *slot, bool probe);
};
/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
+ * @slot_list: internal list used to track hotplug PCI slots
+ * @pci_slot: represents a physical slot
* @owner: The module owner of this structure
* @mod_name: The module name (KBUILD_MODNAME) of this structure
*/
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 352c0d708720..b362d90eb9b0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -51,6 +51,7 @@
#define PCI_BASE_CLASS_MEMORY 0x05
#define PCI_CLASS_MEMORY_RAM 0x0500
#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_CXL 0x0502
#define PCI_CLASS_MEMORY_OTHER 0x0580
#define PCI_BASE_CLASS_BRIDGE 0x06
@@ -59,6 +60,8 @@
#define PCI_CLASS_BRIDGE_EISA 0x0602
#define PCI_CLASS_BRIDGE_MC 0x0603
#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCI_NORMAL 0x060400
+#define PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE 0x060401
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
#define PCI_CLASS_BRIDGE_NUBUS 0x0606
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
@@ -72,6 +75,9 @@
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+/* Interface for SERIAL/MODEM */
+#define PCI_SERIAL_16550_COMPATIBLE 0x02
+
#define PCI_BASE_CLASS_SYSTEM 0x08
#define PCI_CLASS_SYSTEM_PIC 0x0800
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
@@ -81,6 +87,7 @@
#define PCI_CLASS_SYSTEM_RTC 0x0803
#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
#define PCI_CLASS_SYSTEM_SDHCI 0x0805
+#define PCI_CLASS_SYSTEM_RCEC 0x0807
#define PCI_CLASS_SYSTEM_OTHER 0x0880
#define PCI_BASE_CLASS_INPUT 0x09
@@ -147,6 +154,9 @@
#define PCI_CLASS_OTHERS 0xff
/* Vendors and devices. Sort key: vendor first, device next. */
+#define PCI_VENDOR_ID_PCI_SIG 0x0001
+
+#define PCI_VENDOR_ID_LOONGSON 0x0014
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_DEVICE_ID_TTTECH_MC322 0x000a
@@ -548,8 +558,15 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
+#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3 0x1727
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
+#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F3 0x14b0
+#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
+#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
+#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -626,6 +643,8 @@
#define PCI_DEVICE_ID_DELL_RAC4 0x0012
#define PCI_DEVICE_ID_DELL_PERC5 0x0015
+#define PCI_SUBVENDOR_ID_DELL 0x1028
+
#define PCI_VENDOR_ID_MATROX 0x102B
#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
#define PCI_DEVICE_ID_MATROX_MIL 0x0519
@@ -877,6 +896,7 @@
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
+#define PCI_DEVICE_ID_TI_J721E 0xb00d
#define PCI_DEVICE_ID_TI_DRA74x 0xb500
#define PCI_DEVICE_ID_TI_DRA72x 0xb501
@@ -1112,6 +1132,7 @@
#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a
#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1489 0x1489
#define PCI_DEVICE_ID_AL_M1533 0x1533
#define PCI_DEVICE_ID_AL_M1535 0x1535
#define PCI_DEVICE_ID_AL_M1541 0x1541
@@ -1682,37 +1703,8 @@
#define PCI_VENDOR_ID_MICROSEMI 0x11f8
#define PCI_VENDOR_ID_RP 0x11fe
-#define PCI_DEVICE_ID_RP32INTF 0x0001
-#define PCI_DEVICE_ID_RP8INTF 0x0002
-#define PCI_DEVICE_ID_RP16INTF 0x0003
-#define PCI_DEVICE_ID_RP4QUAD 0x0004
-#define PCI_DEVICE_ID_RP8OCTA 0x0005
-#define PCI_DEVICE_ID_RP8J 0x0006
-#define PCI_DEVICE_ID_RP4J 0x0007
-#define PCI_DEVICE_ID_RP8SNI 0x0008
-#define PCI_DEVICE_ID_RP16SNI 0x0009
-#define PCI_DEVICE_ID_RPP4 0x000A
-#define PCI_DEVICE_ID_RPP8 0x000B
-#define PCI_DEVICE_ID_RP4M 0x000D
-#define PCI_DEVICE_ID_RP2_232 0x000E
-#define PCI_DEVICE_ID_RP2_422 0x000F
-#define PCI_DEVICE_ID_URP32INTF 0x0801
-#define PCI_DEVICE_ID_URP8INTF 0x0802
-#define PCI_DEVICE_ID_URP16INTF 0x0803
-#define PCI_DEVICE_ID_URP8OCTA 0x0805
-#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C
-#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D
-#define PCI_DEVICE_ID_CRP16INTF 0x0903
#define PCI_VENDOR_ID_CYCLADES 0x120e
-#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
-#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
-#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102
-#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103
-#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104
-#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105
-#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
-#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
#define PCI_DEVICE_ID_PC300_RX_2 0x0300
#define PCI_DEVICE_ID_PC300_RX_1 0x0301
#define PCI_DEVICE_ID_PC300_TE_2 0x0310
@@ -1830,6 +1822,12 @@
#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+#define PCI_VENDOR_ID_PERICOM 0x12D8
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
+
#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021
@@ -1976,24 +1974,6 @@
#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
#define PCI_VENDOR_ID_MOXA 0x1393
-#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
-#define PCI_DEVICE_ID_MOXA_CP102 0x1020
-#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
-#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
-#define PCI_DEVICE_ID_MOXA_C104 0x1040
-#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
-#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
-#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043
-#define PCI_DEVICE_ID_MOXA_CT114 0x1140
-#define PCI_DEVICE_ID_MOXA_CP114 0x1141
-#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
-#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181
-#define PCI_DEVICE_ID_MOXA_CP132 0x1320
-#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
-#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
-#define PCI_DEVICE_ID_MOXA_C168 0x1680
-#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
-#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682
#define PCI_DEVICE_ID_MOXA_CP204J 0x2040
#define PCI_DEVICE_ID_MOXA_C218 0x2180
#define PCI_DEVICE_ID_MOXA_C320 0x3200
@@ -2053,8 +2033,6 @@
#define PCI_DEVICE_ID_EXAR_XR17V358 0x0358
#define PCI_VENDOR_ID_MICROGATE 0x13c0
-#define PCI_DEVICE_ID_MICROGATE_USC 0x0010
-#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030
#define PCI_VENDOR_ID_3WARE 0x13C1
#define PCI_DEVICE_ID_3WARE_1000 0x1000
@@ -2104,6 +2082,9 @@
#define PCI_DEVICE_ID_ICE_1712 0x1712
#define PCI_DEVICE_ID_VT1724 0x1724
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
+
#define PCI_VENDOR_ID_OXSEMI 0x1415
#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
@@ -2467,7 +2448,8 @@
#define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
-#define PCI_VENDOR_ID_FREESCALE 0x1957
+#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */
+#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */
#define PCI_DEVICE_ID_MPC8308 0xc006
#define PCI_DEVICE_ID_MPC8315E 0x00b4
#define PCI_DEVICE_ID_MPC8315 0x00b5
@@ -2559,11 +2541,16 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
#define PCI_VENDOR_ID_HUAWEI 0x19e5
+#define PCI_DEVICE_ID_HUAWEI_ZIP_VF 0xa251
+#define PCI_DEVICE_ID_HUAWEI_SEC_VF 0xa256
+#define PCI_DEVICE_ID_HUAWEI_HPRE_VF 0xa259
#define PCI_VENDOR_ID_NETRONOME 0x19ee
+#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
+#define PCI_DEVICE_ID_NETRONOME_NFP3800_VF 0x3803
#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
#define PCI_VENDOR_ID_QMI 0x1a32
@@ -2576,6 +2563,10 @@
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
+#define PCI_VENDOR_ID_REDHAT 0x1b36
+
+#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
+
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
@@ -2583,8 +2574,12 @@
#define PCI_VENDOR_ID_AMAZON 0x1d0f
+#define PCI_VENDOR_ID_ZHAOXIN 0x1d17
+
#define PCI_VENDOR_ID_HYGON 0x1d94
+#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
+
#define PCI_VENDOR_ID_HXT 0x1dbf
#define PCI_VENDOR_ID_TEKRAM 0x1de1
@@ -2642,15 +2637,18 @@
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
-#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
-#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
+#define PCI_DEVICE_ID_INTEL_PXH_1 0x032a
+#define PCI_DEVICE_ID_INTEL_PXHV 0x032c
#define PCI_DEVICE_ID_INTEL_80332_0 0x0330
#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
#define PCI_DEVICE_ID_INTEL_80333_1 0x0372
+#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435
+#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82425 0x0486
#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820
@@ -2658,14 +2656,14 @@
#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
-#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
-#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
+#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084f
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095e
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
-#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
+#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108f
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
@@ -2697,6 +2695,8 @@
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
+#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2
+#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
@@ -2757,12 +2757,6 @@
#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db
#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc
#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd
-#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
-#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
-#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
-#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
-#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
-#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
@@ -2777,14 +2771,15 @@
#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
-#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0
-#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5
-#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6
-#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
-#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
-#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
-#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
-#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
+#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
+#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
+#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
+#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
+#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
+#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25f0
+#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25f5
+#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25f6
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
@@ -2796,6 +2791,11 @@
#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
+#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
+#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
+#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27a0
+#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27a2
#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
@@ -2848,7 +2848,7 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99
-#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9c
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2
@@ -2913,6 +2913,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
+#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8
+#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
@@ -2958,16 +2960,16 @@
#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
-#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
-#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
-#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
-#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
-#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
+#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
+#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
+#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
+#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
+#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
diff --git a/include/linux/pcs-altera-tse.h b/include/linux/pcs-altera-tse.h
new file mode 100644
index 000000000000..92ab9f08e835
--- /dev/null
+++ b/include/linux/pcs-altera-tse.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Bootlin
+ *
+ * Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+
+#ifndef __LINUX_PCS_ALTERA_TSE_H
+#define __LINUX_PCS_ALTERA_TSE_H
+
+struct phylink_pcs;
+struct net_device;
+
+struct phylink_pcs *alt_tse_pcs_create(struct net_device *ndev,
+ void __iomem *pcs_base, int reg_width);
+
+#endif /* __LINUX_PCS_ALTERA_TSE_H */
diff --git a/include/linux/pcs-lynx.h b/include/linux/pcs-lynx.h
new file mode 100644
index 000000000000..5712cc2ce775
--- /dev/null
+++ b/include/linux/pcs-lynx.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2020 NXP
+ * Lynx PCS helpers
+ */
+
+#ifndef __LINUX_PCS_LYNX_H
+#define __LINUX_PCS_LYNX_H
+
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+
+struct mdio_device *lynx_get_mdio_device(struct phylink_pcs *pcs);
+
+struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio);
+
+void lynx_pcs_destroy(struct phylink_pcs *pcs);
+
+#endif /* __LINUX_PCS_LYNX_H */
diff --git a/include/linux/pcs-rzn1-miic.h b/include/linux/pcs-rzn1-miic.h
new file mode 100644
index 000000000000..56d12b21365d
--- /dev/null
+++ b/include/linux/pcs-rzn1-miic.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#ifndef __LINUX_PCS_MIIC_H
+#define __LINUX_PCS_MIIC_H
+
+struct phylink;
+struct device_node;
+
+struct phylink_pcs *miic_create(struct device *dev, struct device_node *np);
+
+void miic_destroy(struct phylink_pcs *pcs);
+
+#endif /* __LINUX_PCS_MIIC_H */
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
new file mode 100644
index 000000000000..d2da1e0b4a92
--- /dev/null
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ */
+
+#ifndef __LINUX_PCS_XPCS_H
+#define __LINUX_PCS_XPCS_H
+
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+#define NXP_SJA1105_XPCS_ID 0x00000010
+#define NXP_SJA1110_XPCS_ID 0x00000020
+
+/* AN mode */
+#define DW_AN_C73 1
+#define DW_AN_C37_SGMII 2
+#define DW_2500BASEX 3
+#define DW_AN_C37_1000BASEX 4
+
+struct xpcs_id;
+
+struct dw_xpcs {
+ struct mdio_device *mdiodev;
+ const struct xpcs_id *id;
+ struct phylink_pcs pcs;
+};
+
+int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
+void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
+int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
+ unsigned int mode, const unsigned long *advertising);
+void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces);
+int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
+ int enable);
+struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
+ phy_interface_t interface);
+void xpcs_destroy(struct dw_xpcs *xpcs);
+
+#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/pe.h b/include/linux/pe.h
index c86bd3a2f70f..1d3836ef9d92 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -10,6 +10,27 @@
#include <linux/types.h>
+/*
+ * Linux EFI stub v1.0 adds the following functionality:
+ * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path,
+ * - Loading/starting the kernel from firmware that targets a different
+ * machine type, via the entrypoint exposed in the .compat PE/COFF section.
+ *
+ * The recommended way of loading and starting v1.0 or later kernels is to use
+ * the LoadImage() and StartImage() EFI boot services, and expose the initrd
+ * via the LINUX_EFI_INITRD_MEDIA_GUID device path.
+ *
+ * Versions older than v1.0 support initrd loading via the image load options
+ * (using initrd=, limited to the volume from which the kernel itself was
+ * loaded), or via arch specific means (bootparams, DT, etc).
+ *
+ * On x86, LoadImage() and StartImage() can be omitted if the EFI handover
+ * protocol is implemented, which can be inferred from the version,
+ * handover_offset and xloadflags fields in the bootparams structure.
+ */
+#define LINUX_EFISTUB_MAJOR_VERSION 0x1
+#define LINUX_EFISTUB_MINOR_VERSION 0x0
+
#define MZ_MAGIC 0x5a4d /* "MZ" */
#define PE_MAGIC 0x00004550 /* "PE\0\0" */
@@ -34,6 +55,9 @@
#define IMAGE_FILE_MACHINE_POWERPC 0x01f0
#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1
#define IMAGE_FILE_MACHINE_R4000 0x0166
+#define IMAGE_FILE_MACHINE_RISCV32 0x5032
+#define IMAGE_FILE_MACHINE_RISCV64 0x5064
+#define IMAGE_FILE_MACHINE_RISCV128 0x5128
#define IMAGE_FILE_MACHINE_SH3 0x01a2
#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3
#define IMAGE_FILE_MACHINE_SH3E 0x01a4
@@ -41,6 +65,8 @@
#define IMAGE_FILE_MACHINE_SH5 0x01a8
#define IMAGE_FILE_MACHINE_THUMB 0x01c2
#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169
+#define IMAGE_FILE_MACHINE_LOONGARCH32 0x6232
+#define IMAGE_FILE_MACHINE_LOONGARCH64 0x6264
/* flags */
#define IMAGE_FILE_RELOCS_STRIPPED 0x0001
diff --git a/include/linux/peci-cpu.h b/include/linux/peci-cpu.h
new file mode 100644
index 000000000000..ff8ae9c26c80
--- /dev/null
+++ b/include/linux/peci-cpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_CPU_H
+#define __LINUX_PECI_CPU_H
+
+#include <linux/types.h>
+
+#include "../../arch/x86/include/asm/intel-family.h"
+
+#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */
+#define PECI_PKG_ID_CPU_ID 0x0000 /* CPUID Info */
+#define PECI_PKG_ID_PLATFORM_ID 0x0001 /* Platform ID */
+#define PECI_PKG_ID_DEVICE_ID 0x0002 /* Uncore Device ID */
+#define PECI_PKG_ID_MAX_THREAD_ID 0x0003 /* Max Thread ID */
+#define PECI_PKG_ID_MICROCODE_REV 0x0004 /* CPU Microcode Update Revision */
+#define PECI_PKG_ID_MCA_ERROR_LOG 0x0005 /* Machine Check Status */
+#define PECI_PCS_MODULE_TEMP 9 /* Per Core DTS Temperature Read */
+#define PECI_PCS_THERMAL_MARGIN 10 /* DTS thermal margin */
+#define PECI_PCS_DDR_DIMM_TEMP 14 /* DDR DIMM Temperature */
+#define PECI_PCS_TEMP_TARGET 16 /* Temperature Target Read */
+#define PECI_PCS_TDP_UNITS 30 /* Units for power/energy registers */
+
+struct peci_device;
+
+int peci_temp_read(struct peci_device *device, s16 *temp_raw);
+
+int peci_pcs_read(struct peci_device *device, u8 index,
+ u16 param, u32 *data);
+
+int peci_pci_local_read(struct peci_device *device, u8 bus, u8 dev,
+ u8 func, u16 reg, u32 *data);
+
+int peci_ep_pci_local_read(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg, u32 *data);
+
+int peci_mmio_read(struct peci_device *device, u8 bar, u8 seg,
+ u8 bus, u8 dev, u8 func, u64 address, u32 *data);
+
+#endif /* __LINUX_PECI_CPU_H */
diff --git a/include/linux/peci.h b/include/linux/peci.h
new file mode 100644
index 000000000000..06e6ef935297
--- /dev/null
+++ b/include/linux/peci.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_H
+#define __LINUX_PECI_H
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/*
+ * Currently we don't support any PECI command over 32 bytes.
+ */
+#define PECI_REQUEST_MAX_BUF_SIZE 32
+
+struct peci_controller;
+struct peci_request;
+
+/**
+ * struct peci_controller_ops - PECI controller specific methods
+ * @xfer: PECI transfer function
+ *
+ * PECI controllers may have different hardware interfaces - the drivers
+ * implementing PECI controllers can use this structure to abstract away those
+ * differences by exposing a common interface for PECI core.
+ */
+struct peci_controller_ops {
+ int (*xfer)(struct peci_controller *controller, u8 addr, struct peci_request *req);
+};
+
+/**
+ * struct peci_controller - PECI controller
+ * @dev: device object to register PECI controller to the device model
+ * @ops: pointer to device specific controller operations
+ * @bus_lock: lock used to protect multiple callers
+ * @id: PECI controller ID
+ *
+ * PECI controllers usually connect to their drivers using non-PECI bus,
+ * such as the platform bus.
+ * Each PECI controller can communicate with one or more PECI devices.
+ */
+struct peci_controller {
+ struct device dev;
+ struct peci_controller_ops *ops;
+ struct mutex bus_lock; /* held for the duration of xfer */
+ u8 id;
+};
+
+struct peci_controller *devm_peci_controller_add(struct device *parent,
+ struct peci_controller_ops *ops);
+
+static inline struct peci_controller *to_peci_controller(void *d)
+{
+ return container_of(d, struct peci_controller, dev);
+}
+
+/**
+ * struct peci_device - PECI device
+ * @dev: device object to register PECI device to the device model
+ * @controller: manages the bus segment hosting this PECI device
+ * @info: PECI device characteristics
+ * @info.family: device family
+ * @info.model: device model
+ * @info.peci_revision: PECI revision supported by the PECI device
+ * @info.socket_id: the socket ID represented by the PECI device
+ * @addr: address used on the PECI bus connected to the parent controller
+ * @deleted: indicates that PECI device was already deleted
+ *
+ * A peci_device identifies a single device (i.e. CPU) connected to a PECI bus.
+ * The behaviour exposed to the rest of the system is defined by the PECI driver
+ * managing the device.
+ */
+struct peci_device {
+ struct device dev;
+ struct {
+ u16 family;
+ u8 model;
+ u8 peci_revision;
+ u8 socket_id;
+ } info;
+ u8 addr;
+ bool deleted;
+};
+
+static inline struct peci_device *to_peci_device(struct device *d)
+{
+ return container_of(d, struct peci_device, dev);
+}
+
+/**
+ * struct peci_request - PECI request
+ * @device: PECI device to which the request is sent
+ * @tx: TX buffer specific data
+ * @tx.buf: TX buffer
+ * @tx.len: transfer data length in bytes
+ * @rx: RX buffer specific data
+ * @rx.buf: RX buffer
+ * @rx.len: received data length in bytes
+ *
+ * A peci_request represents a request issued by PECI originator (TX) and
+ * a response received from PECI responder (RX).
+ */
+struct peci_request {
+ struct peci_device *device;
+ struct {
+ u8 buf[PECI_REQUEST_MAX_BUF_SIZE];
+ u8 len;
+ } rx, tx;
+};
+
+#endif /* __LINUX_PECI_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 176bfbd52d97..af1071535de8 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -51,7 +51,7 @@
PER_CPU_ATTRIBUTES
#define __PCPU_DUMMY_ATTRS \
- __attribute__((section(".discard"), unused))
+ __section(".discard") __attribute__((unused))
/*
* s390 and alpha modules require percpu variables to be defined as
@@ -412,7 +412,7 @@ do { \
* instead.
*
* If there is no other protection through preempt disable and/or disabling
- * interupts then one of these RMW operations can show unexpected behavior
+ * interrupts then one of these RMW operations can show unexpected behavior
* because the execution thread was rescheduled on another processor or an
* interrupt occurred and the same percpu variable was modified from the
* interrupt context.
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 22d9d183950d..d73a1c08c3e3 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -51,9 +51,9 @@
#define _LINUX_PERCPU_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
+#include <linux/types.h>
#include <linux/gfp.h>
struct percpu_ref;
@@ -92,18 +92,30 @@ enum {
PERCPU_REF_ALLOW_REINIT = 1 << 2,
};
-struct percpu_ref {
+struct percpu_ref_data {
atomic_long_t count;
- /*
- * The low bit of the pointer indicates whether the ref is in percpu
- * mode; if set, then get/put will manipulate the atomic_t.
- */
- unsigned long percpu_count_ptr;
percpu_ref_func_t *release;
percpu_ref_func_t *confirm_switch;
bool force_atomic:1;
bool allow_reinit:1;
struct rcu_head rcu;
+ struct percpu_ref *ref;
+};
+
+struct percpu_ref {
+ /*
+ * The low bit of the pointer indicates whether the ref is in percpu
+ * mode; if set, then get/put will manipulate the atomic_t.
+ */
+ unsigned long percpu_count_ptr;
+
+ /*
+ * 'percpu_ref' is often embedded into user structure, and only
+ * 'percpu_count_ptr' is required in fast path, move other fields
+ * into 'percpu_ref_data', so we can reduce memory footprint in
+ * fast path.
+ */
+ struct percpu_ref_data *data;
};
int __must_check percpu_ref_init(struct percpu_ref *ref,
@@ -118,6 +130,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
+bool percpu_ref_is_zero(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
@@ -155,7 +168,7 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
*
- * The smp_read_barrier_depends() implied by READ_ONCE() pairs
+ * The dependency ordering from the READ_ONCE() pairs
* with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
@@ -191,7 +204,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_add(*percpu_count, nr);
else
- atomic_long_add(nr, &ref->count);
+ atomic_long_add(nr, &ref->data->count);
rcu_read_unlock();
}
@@ -200,7 +213,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
* percpu_ref_get - increment a percpu refcount
* @ref: percpu_ref to get
*
- * Analagous to atomic_long_inc().
+ * Analogous to atomic_long_inc().
*
* This function is safe to call as long as @ref is between init and exit.
*/
@@ -231,7 +244,7 @@ static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
this_cpu_add(*percpu_count, nr);
ret = true;
} else {
- ret = atomic_long_add_unless(&ref->count, nr, 0);
+ ret = atomic_long_add_unless(&ref->data->count, nr, 0);
}
rcu_read_unlock();
@@ -254,6 +267,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
}
/**
+ * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
+ * caller is responsible for taking RCU.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ bool ret = false;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (likely(__ref_is_percpu(ref, &percpu_count))) {
+ this_cpu_inc(*percpu_count);
+ ret = true;
+ } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
+ ret = atomic_long_inc_not_zero(&ref->data->count);
+ }
+ return ret;
+}
+
+/**
* percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get
*
@@ -270,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
- unsigned long __percpu *percpu_count;
bool ret = false;
rcu_read_lock();
-
- if (__ref_is_percpu(ref, &percpu_count)) {
- this_cpu_inc(*percpu_count);
- ret = true;
- } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
- ret = atomic_long_inc_not_zero(&ref->count);
- }
-
+ ret = percpu_ref_tryget_live_rcu(ref);
rcu_read_unlock();
-
return ret;
}
@@ -305,8 +331,8 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_sub(*percpu_count, nr);
- else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
- ref->release(ref);
+ else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
+ ref->data->release(ref);
rcu_read_unlock();
}
@@ -339,21 +365,4 @@ static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
}
-/**
- * percpu_ref_is_zero - test whether a percpu refcount reached zero
- * @ref: percpu_ref to test
- *
- * Returns %true if @ref reached zero.
- *
- * This function is safe to call as long as @ref is between init and exit.
- */
-static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
-{
- unsigned long __percpu *percpu_count;
-
- if (__ref_is_percpu(ref, &percpu_count))
- return false;
- return !atomic_long_read(&ref->count);
-}
-
#endif
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index ad2ca2a89d5b..36b942b67b7d 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -3,41 +3,52 @@
#define _LINUX_PERCPU_RWSEM_H
#include <linux/atomic.h>
-#include <linux/rwsem.h>
#include <linux/percpu.h>
#include <linux/rcuwait.h>
+#include <linux/wait.h>
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
struct percpu_rw_semaphore {
struct rcu_sync rss;
unsigned int __percpu *read_count;
- struct rw_semaphore rw_sem; /* slowpath */
- struct rcuwait writer; /* blocked writer */
- int readers_block;
+ struct rcuwait writer;
+ wait_queue_head_t waiters;
+ atomic_t block;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
};
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
+#else
+#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
#define __DEFINE_PERCPU_RWSEM(name, is_static) \
static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
is_static struct percpu_rw_semaphore name = { \
.rss = __RCU_SYNC_INITIALIZER(name.rss), \
.read_count = &__percpu_rwsem_rc_##name, \
- .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
.writer = __RCUWAIT_INITIALIZER(name.writer), \
+ .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \
+ .block = ATOMIC_INIT(0), \
+ __PERCPU_RWSEM_DEP_MAP_INIT(name) \
}
+
#define DEFINE_PERCPU_RWSEM(name) \
__DEFINE_PERCPU_RWSEM(name, /* not static */)
#define DEFINE_STATIC_PERCPU_RWSEM(name) \
__DEFINE_PERCPU_RWSEM(name, static)
-extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
-extern void __percpu_up_read(struct percpu_rw_semaphore *);
+extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
might_sleep();
- rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+ rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
preempt_disable();
/*
@@ -48,8 +59,9 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
* and that once the synchronize_rcu() is done, the writer will see
* anything we did within this RCU-sched read-size critical section.
*/
- __this_cpu_inc(*sem->read_count);
- if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ if (likely(rcu_sync_is_idle(&sem->rss)))
+ this_cpu_inc(*sem->read_count);
+ else
__percpu_down_read(sem, false); /* Unconditional memory barrier */
/*
* The preempt_enable() prevents the compiler from
@@ -58,16 +70,17 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
preempt_enable();
}
-static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
- int ret = 1;
+ bool ret = true;
preempt_disable();
/*
* Same as in percpu_down_read().
*/
- __this_cpu_inc(*sem->read_count);
- if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ if (likely(rcu_sync_is_idle(&sem->rss)))
+ this_cpu_inc(*sem->read_count);
+ else
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
preempt_enable();
/*
@@ -76,29 +89,47 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
*/
if (ret)
- rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
+ rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
return ret;
}
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
+ rwsem_release(&sem->dep_map, _RET_IP_);
+
preempt_disable();
/*
* Same as in percpu_down_read().
*/
- if (likely(rcu_sync_is_idle(&sem->rss)))
- __this_cpu_dec(*sem->read_count);
- else
- __percpu_up_read(sem); /* Unconditional memory barrier */
+ if (likely(rcu_sync_is_idle(&sem->rss))) {
+ this_cpu_dec(*sem->read_count);
+ } else {
+ /*
+ * slowpath; reader will only ever wake a single blocked
+ * writer.
+ */
+ smp_mb(); /* B matches C */
+ /*
+ * In other words, if they see our decrement (presumably to
+ * aggregate zero, as that is the only time it matters) they
+ * will also see our critical section.
+ */
+ this_cpu_dec(*sem->read_count);
+ rcuwait_wake_up(&sem->writer);
+ }
preempt_enable();
-
- rwsem_release(&sem->rw_sem.dep_map, _RET_IP_);
}
+extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
+static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
+{
+ return atomic_read(&sem->block);
+}
+
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);
@@ -110,29 +141,19 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
__percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
-#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
-
-#define percpu_rwsem_assert_held(sem) \
- lockdep_assert_held(&(sem)->rw_sem)
+#define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
+#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
- lock_release(&sem->rw_sem.dep_map, ip);
-#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- if (!read)
- atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN);
-#endif
+ lock_release(&sem->dep_map, ip);
}
static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
- lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
-#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- if (!read)
- atomic_long_set(&sem->rw_sem.owner, (long)current);
-#endif
+ lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
}
#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5e76af742c80..f1ec5ad1351c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -6,7 +6,6 @@
#include <linux/preempt.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
-#include <linux/printk.h>
#include <linux/pfn.h>
#include <linux/init.h>
@@ -95,10 +94,7 @@ extern const char * const pcpu_fc_names[PCPU_FC_NR];
extern enum pcpu_fc pcpu_chosen_fc;
-typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
- size_t align);
-typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
-typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
+typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu);
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
@@ -112,18 +108,16 @@ extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+void __init pcpu_populate_pte(unsigned long addr);
extern int __init pcpu_page_first_chunk(size_t reserved_size,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn,
- pcpu_fc_populate_pte_fn_t populate_pte_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#endif
-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1);
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
@@ -131,8 +125,8 @@ extern bool is_kernel_percpu_address(unsigned long addr);
extern void __init setup_per_cpu_areas(void);
#endif
-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
+extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 4f052496cdfd..8ed5fba6d156 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -15,6 +15,9 @@
#include <linux/types.h>
#include <linux/gfp.h>
+/* percpu_counter batch for local add or sub */
+#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
+
#ifdef CONFIG_SMP
struct percpu_counter {
@@ -44,6 +47,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+void percpu_counter_sync(struct percpu_counter *fbc);
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
@@ -55,6 +59,22 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
+/*
+ * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
+ * are accumulated in local per cpu counter and not in fbc->count until
+ * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
+ * write efficient.
+ * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
+ * used to add up the counts from each CPU to account for all the local
+ * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
+ * should be used when a counter is updated frequently and read rarely.
+ */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
+}
+
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
@@ -78,9 +98,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc)
*/
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
- s64 ret = fbc->count;
+ /* Prevent reloads of fbc->count */
+ s64 ret = READ_ONCE(fbc->count);
- barrier(); /* Prevent reloads of fbc->count */
if (ret >= 0)
return ret;
return 0;
@@ -137,6 +157,13 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
preempt_enable();
}
+/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, amount);
+}
+
static inline void
percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
@@ -172,6 +199,9 @@ static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
return true;
}
+static inline void percpu_counter_sync(struct percpu_counter *fbc)
+{
+}
#endif /* CONFIG_SMP */
static inline void percpu_counter_inc(struct percpu_counter *fbc)
@@ -189,4 +219,10 @@ static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
percpu_counter_add(fbc, -amount);
}
+static inline void
+percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_local(fbc, -amount);
+}
+
#endif /* _LINUX_PERCPU_COUNTER_H */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 71f525a35ac2..0356cb6a215d 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -24,8 +24,11 @@
/*
* ARM PMU hw_event flags
*/
-/* Event uses a 64bit counter */
-#define ARMPMU_EVT_64BIT 1
+#define ARMPMU_EVT_64BIT 0x00001 /* Event uses a 64bit counter */
+#define ARMPMU_EVT_47BIT 0x00002 /* Event uses a 47bit counter */
+
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_64BIT) == ARMPMU_EVT_64BIT);
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_47BIT) == ARMPMU_EVT_47BIT);
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) PERF_COUNT_HW_CACHE_##_x
@@ -73,6 +76,7 @@ enum armpmu_attr_groups {
ARMPMU_ATTR_GROUP_COMMON,
ARMPMU_ATTR_GROUP_EVENTS,
ARMPMU_ATTR_GROUP_FORMATS,
+ ARMPMU_ATTR_GROUP_CAPS,
ARMPMU_NR_ATTR_GROUPS
};
@@ -80,6 +84,7 @@ struct arm_pmu {
struct pmu pmu;
cpumask_t supported_cpus;
char *name;
+ int pmuver;
irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
void (*enable)(struct perf_event *event);
void (*disable)(struct perf_event *event);
@@ -108,6 +113,8 @@ struct arm_pmu {
struct notifier_block cpu_pm_nb;
/* the attr_groups array must be NULL-terminated */
const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
+ /* store the PMMIR_EL1 to expose slots */
+ u64 reg_pmmir;
/* Only to be used by ACPI probing code */
unsigned long acpi_cpuid;
@@ -159,6 +166,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
+#ifdef CONFIG_KVM
+void kvm_host_pmu_init(struct arm_pmu *pmu);
+#else
+#define kvm_host_pmu_init(x) do { } while(0)
+#endif
+
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
new file mode 100644
index 000000000000..e17e86ad6f3a
--- /dev/null
+++ b/include/linux/perf/riscv_pmu.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 SiFive
+ * Copyright (C) 2018 Andes Technology Corporation
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ *
+ */
+
+#ifndef _ASM_RISCV_PERF_EVENT_H
+#define _ASM_RISCV_PERF_EVENT_H
+
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+
+#ifdef CONFIG_RISCV_PMU
+
+/*
+ * The RISCV_MAX_COUNTERS parameter should be specified.
+ */
+
+#define RISCV_MAX_COUNTERS 64
+#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
+#define RISCV_PMU_PDEV_NAME "riscv-pmu"
+#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
+
+#define RISCV_PMU_STOP_FLAG_RESET 1
+
+struct cpu_hw_events {
+ /* currently enabled events */
+ int n_events;
+ /* Counter overflow interrupt */
+ int irq;
+ /* currently enabled events */
+ struct perf_event *events[RISCV_MAX_COUNTERS];
+ /* currently enabled hardware counters */
+ DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS);
+ /* currently enabled firmware counters */
+ DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS);
+};
+
+struct riscv_pmu {
+ struct pmu pmu;
+ char *name;
+
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+
+ unsigned long cmask;
+ u64 (*ctr_read)(struct perf_event *event);
+ int (*ctr_get_idx)(struct perf_event *event);
+ int (*ctr_get_width)(int idx);
+ void (*ctr_clear_idx)(struct perf_event *event);
+ void (*ctr_start)(struct perf_event *event, u64 init_val);
+ void (*ctr_stop)(struct perf_event *event, unsigned long flag);
+ int (*event_map)(struct perf_event *event, u64 *config);
+
+ struct cpu_hw_events __percpu *hw_events;
+ struct hlist_node node;
+ struct notifier_block riscv_pm_nb;
+};
+
+#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
+
+void riscv_pmu_start(struct perf_event *event, int flags);
+void riscv_pmu_stop(struct perf_event *event, int flags);
+unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
+int riscv_pmu_event_set_period(struct perf_event *event);
+uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);
+u64 riscv_pmu_event_update(struct perf_event *event);
+#ifdef CONFIG_RISCV_PMU_LEGACY
+void riscv_pmu_legacy_skip_init(void);
+#else
+static inline void riscv_pmu_legacy_skip_init(void) {};
+#endif
+struct riscv_pmu *riscv_pmu_alloc(void);
+
+#endif /* CONFIG_RISCV_PMU */
+
+#endif /* _ASM_RISCV_PERF_EVENT_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 547773f5894e..0031f7b4d9ab 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -26,14 +26,17 @@
# include <asm/local64.h>
#endif
+#define PERF_GUEST_ACTIVE 0x01
+#define PERF_GUEST_USER 0x02
+
struct perf_guest_info_callbacks {
- int (*is_in_guest)(void);
- int (*is_user_mode)(void);
- unsigned long (*get_guest_ip)(void);
- void (*handle_intel_pt_intr)(void);
+ unsigned int (*state)(void);
+ unsigned long (*get_ip)(void);
+ unsigned int (*handle_intel_pt_intr)(void);
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <linux/rhashtable-types.h>
#include <asm/hw_breakpoint.h>
#endif
@@ -57,11 +60,13 @@ struct perf_guest_info_callbacks {
#include <linux/cgroup.h>
#include <linux/refcount.h>
#include <linux/security.h>
+#include <linux/static_call.h>
+#include <linux/lockdep.h>
#include <asm/local.h>
struct perf_callchain_entry {
__u64 nr;
- __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
+ __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
};
struct perf_callchain_entry_ctx {
@@ -93,15 +98,27 @@ struct perf_raw_record {
/*
* branch stack layout:
* nr: number of taken branches stored in entries[]
+ * hw_idx: The low level index of raw branch records
+ * for the most recent branch.
+ * -1ULL means invalid/unknown.
*
* Note that nr can vary from sample to sample
* branches (to, from) are stored from most recent
* to least recent, i.e., entries[0] contains the most
* recent branch.
+ * The entries[] is an abstraction of raw branch records,
+ * which may not be stored in age order in HW, e.g. Intel LBR.
+ * The hw_idx is to expose the low level index of raw
+ * branch record for the most recent branch aka entries[0].
+ * The hw_idx index is between -1 (unknown) and max depth,
+ * which can be retrieved in /sys/devices/cpu/caps/branches.
+ * For the architectures whose raw branch records are
+ * already stored in age order, the hw_idx should be 0.
*/
struct perf_branch_stack {
__u64 nr;
- struct perf_branch_entry entries[0];
+ __u64 hw_idx;
+ struct perf_branch_entry entries[];
};
struct task_struct;
@@ -117,6 +134,17 @@ struct hw_perf_event_extra {
};
/**
+ * hw_perf_event::flag values
+ *
+ * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
+ * usage.
+ */
+#define PERF_EVENT_FLAG_ARCH 0x000fffff
+#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
+
+static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
+
+/**
* struct hw_perf_event - performance event hardware details:
*/
struct hw_perf_event {
@@ -154,7 +182,7 @@ struct hw_perf_event {
* creation and event initalization.
*/
struct arch_hw_breakpoint info;
- struct list_head bp_list;
+ struct rhlist_head bp_list;
};
#endif
struct { /* amd_iommu */
@@ -200,17 +228,26 @@ struct hw_perf_event {
*/
u64 sample_period;
- /*
- * The period we started this sample with.
- */
- u64 last_period;
+ union {
+ struct { /* Sampling */
+ /*
+ * The period we started this sample with.
+ */
+ u64 last_period;
- /*
- * However much is left of the current period; note that this is
- * a full 64bit value and allows for generation of periods longer
- * than hardware might allow.
- */
- local64_t period_left;
+ /*
+ * However much is left of the current period;
+ * note that this is a full 64bit value and
+ * allows for generation of periods longer
+ * than hardware might allow.
+ */
+ local64_t period_left;
+ };
+ struct { /* Topdown events counting for context switch */
+ u64 saved_metric;
+ u64 saved_slots;
+ };
+ };
/*
* State for throttling the event, see __perf_event_overflow() and
@@ -239,15 +276,16 @@ struct perf_event;
/**
* pmu::capabilities flags
*/
-#define PERF_PMU_CAP_NO_INTERRUPT 0x01
-#define PERF_PMU_CAP_NO_NMI 0x02
-#define PERF_PMU_CAP_AUX_NO_SG 0x04
-#define PERF_PMU_CAP_EXTENDED_REGS 0x08
-#define PERF_PMU_CAP_EXCLUSIVE 0x10
-#define PERF_PMU_CAP_ITRACE 0x20
-#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
-#define PERF_PMU_CAP_NO_EXCLUDE 0x80
-#define PERF_PMU_CAP_AUX_OUTPUT 0x100
+#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
+#define PERF_PMU_CAP_NO_NMI 0x0002
+#define PERF_PMU_CAP_AUX_NO_SG 0x0004
+#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
+#define PERF_PMU_CAP_EXCLUSIVE 0x0010
+#define PERF_PMU_CAP_ITRACE 0x0020
+#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040
+#define PERF_PMU_CAP_NO_EXCLUDE 0x0080
+#define PERF_PMU_CAP_AUX_OUTPUT 0x0100
+#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200
struct perf_output_handle;
@@ -354,7 +392,7 @@ struct pmu {
* ->stop() with PERF_EF_UPDATE will read the counter and update
* period/count values like ->read() would.
*
- * ->start() with PERF_EF_RELOAD will reprogram the the counter
+ * ->start() with PERF_EF_RELOAD will reprogram the counter
* value, must be preceded by a ->stop() with PERF_EF_UPDATE.
*/
void (*start) (struct perf_event *event, int flags);
@@ -407,10 +445,11 @@ struct pmu {
*/
void (*sched_task) (struct perf_event_context *ctx,
bool sched_in);
+
/*
- * PMU specific data size
+ * Kmem cache of PMU specific data
*/
- size_t task_ctx_size;
+ struct kmem_cache *task_ctx_cache;
/*
* PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
@@ -563,9 +602,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
* PERF_EV_CAP_SOFTWARE: Is a software event.
* PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
* from any CPU in the package where it is active.
+ * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
+ * cannot be a group leader. If an event with this flag is detached from the
+ * group it is scheduled out and moved into an unrecoverable ERROR state.
*/
#define PERF_EV_CAP_SOFTWARE BIT(0)
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
+#define PERF_EV_CAP_SIBLING BIT(2)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -580,7 +623,10 @@ struct swevent_hlist {
#define PERF_ATTACH_TASK 0x04
#define PERF_ATTACH_TASK_DATA 0x08
#define PERF_ATTACH_ITRACE 0x10
+#define PERF_ATTACH_SCHED_CB 0x20
+#define PERF_ATTACH_CHILD 0x40
+struct bpf_prog;
struct perf_cgroup;
struct perf_buffer;
@@ -589,7 +635,23 @@ struct pmu_event_list {
struct list_head list;
};
+/*
+ * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
+ * as such iteration must hold either lock. However, since ctx->lock is an IRQ
+ * safe lock, and is only held by the CPU doing the modification, having IRQs
+ * disabled is sufficient since it will hold-off the IPIs.
+ */
+#ifdef CONFIG_PROVE_LOCKING
+#define lockdep_assert_event_ctx(event) \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (this_cpu_read(hardirqs_enabled) && \
+ lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
+#else
+#define lockdep_assert_event_ctx(event)
+#endif
+
#define for_each_sibling_event(sibling, event) \
+ lockdep_assert_event_ctx(event); \
if ((event)->group_leader == (event)) \
list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
@@ -651,16 +713,6 @@ struct perf_event {
u64 total_time_running;
u64 tstamp;
- /*
- * timestamp shadows the actual context timing but it can
- * be safely used in NMI interrupt context. It reflects the
- * context time as it was when the event was last scheduled in.
- *
- * ctx_time already accounts for ctx->timestamp. Therefore to
- * compute ctx_time for a sample, simply add perf_clock().
- */
- u64 shadow_ctx_time;
-
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
@@ -704,10 +756,14 @@ struct perf_event {
struct fasync_struct *fasync;
/* delayed work for NMIs and such */
- int pending_wakeup;
- int pending_kill;
- int pending_disable;
- struct irq_work pending;
+ unsigned int pending_wakeup;
+ unsigned int pending_kill;
+ unsigned int pending_disable;
+ unsigned int pending_sigtrap;
+ unsigned long pending_addr; /* SIGTRAP */
+ struct irq_work pending_irq;
+ struct callback_head pending_task;
+ unsigned int pending_work;
atomic_t event_limit;
@@ -726,12 +782,15 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
+ atomic64_t lost_samples;
+
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
#ifdef CONFIG_BPF_SYSCALL
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
+ u64 bpf_cookie;
#endif
#ifdef CONFIG_EVENT_TRACING
@@ -788,6 +847,7 @@ struct perf_event_context {
int nr_events;
int nr_active;
+ int nr_user;
int is_active;
int nr_stat;
int nr_freq;
@@ -805,6 +865,7 @@ struct perf_event_context {
*/
u64 time;
u64 timestamp;
+ u64 timeoffset;
/*
* These fields let us detect when two contexts have both
@@ -819,6 +880,14 @@ struct perf_event_context {
#endif
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
+
+ /*
+ * Sum (event->pending_sigtrap + event->pending_work)
+ *
+ * The SIGTRAP is targeted at ctx->task, as such it won't do changing
+ * that until the signal is delivered.
+ */
+ local_t nr_pending;
};
/*
@@ -828,7 +897,7 @@ struct perf_event_context {
#define PERF_NR_CONTEXTS 4
/**
- * struct perf_event_cpu_context - per cpu event context structure
+ * struct perf_cpu_context - per cpu event context structure
*/
struct perf_cpu_context {
struct perf_event_context ctx;
@@ -850,6 +919,13 @@ struct perf_cpu_context {
int sched_cb_usage;
int online;
+ /*
+ * Per-CPU storage for iterators used in visit_groups_merge. The default
+ * storage is of size 2 to hold the CPU and any CPU event iterators.
+ */
+ int heap_size;
+ struct perf_event **heap;
+ struct perf_event *heap_default[2];
};
struct perf_output_handle {
@@ -880,6 +956,8 @@ struct bpf_perf_event_data_kern {
struct perf_cgroup_info {
u64 time;
u64 timestamp;
+ u64 timeoffset;
+ int active;
};
struct perf_cgroup {
@@ -917,13 +995,11 @@ extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
-extern int perf_num_counters(void);
-extern const char *perf_pmu_name(void);
extern void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next);
-extern int perf_event_init_task(struct task_struct *child);
+extern int perf_event_init_task(struct task_struct *child, u64 clone_flags);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
@@ -962,18 +1038,20 @@ struct perf_sample_data {
* Fields set by perf_sample_data_init(), group so as to
* minimize the cachelines touched.
*/
- u64 addr;
- struct perf_raw_record *raw;
- struct perf_branch_stack *br_stack;
+ u64 sample_flags;
u64 period;
- u64 weight;
- u64 txn;
- union perf_mem_data_src data_src;
/*
* The other fields, optionally {set,used} by
* perf_{prepare,output}_sample().
*/
+ struct perf_branch_stack *br_stack;
+ union perf_sample_weight weight;
+ union perf_mem_data_src data_src;
+ u64 txn;
+ u64 addr;
+ struct perf_raw_record *raw;
+
u64 type;
u64 ip;
struct {
@@ -990,17 +1068,14 @@ struct perf_sample_data {
struct perf_callchain_entry *callchain;
u64 aux_size;
- /*
- * regs_user may point to task_pt_regs or to regs_user_copy, depending
- * on arch details.
- */
struct perf_regs regs_user;
- struct pt_regs regs_user_copy;
-
struct perf_regs regs_intr;
u64 stack_user_size;
u64 phys_addr;
+ u64 cgroup;
+ u64 data_page_size;
+ u64 code_page_size;
} ____cacheline_aligned;
/* default value for data source */
@@ -1014,13 +1089,30 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
{
/* remaining struct members initialized in perf_prepare_sample() */
- data->addr = addr;
- data->raw = NULL;
- data->br_stack = NULL;
+ data->sample_flags = PERF_SAMPLE_PERIOD;
data->period = period;
- data->weight = 0;
- data->data_src.val = PERF_MEM_NA;
- data->txn = 0;
+
+ if (addr) {
+ data->addr = addr;
+ data->sample_flags |= PERF_SAMPLE_ADDR;
+ }
+}
+
+/*
+ * Clear all bitfields in the perf_branch_entry.
+ * The to and from fields are not cleared because they are
+ * systematically modified by caller.
+ */
+static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
+{
+ br->mispred = 0;
+ br->predicted = 0;
+ br->in_tx = 0;
+ br->abort = 0;
+ br->cycles = 0;
+ br->type = 0;
+ br->spec = PERF_BR_SPEC_NA;
+ br->reserved = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1145,30 +1237,24 @@ DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
* which is guaranteed by us not actually scheduling inside other swevents
* because those disable preemption.
*/
-static __always_inline void
-perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
+static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
- if (static_key_false(&perf_swevent_enabled[event_id])) {
- struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
- perf_fetch_caller_regs(regs);
- ___perf_sw_event(event_id, nr, regs, addr);
- }
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(event_id, nr, regs, addr);
}
extern struct static_key_false perf_sched_events;
-static __always_inline bool
-perf_sw_migrate_enabled(void)
+static __always_inline bool __perf_sw_enabled(int swevt)
{
- if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
- return true;
- return false;
+ return static_key_false(&perf_swevent_enabled[swevt]);
}
static inline void perf_event_task_migrate(struct task_struct *task)
{
- if (perf_sw_migrate_enabled())
+ if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS))
task->sched_migrated = 1;
}
@@ -1178,11 +1264,9 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_in(prev, task);
- if (perf_sw_migrate_enabled() && task->sched_migrated) {
- struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
-
- perf_fetch_caller_regs(regs);
- ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
+ if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) &&
+ task->sched_migrated) {
+ __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
task->sched_migrated = 0;
}
}
@@ -1190,7 +1274,15 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{
- perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
+ if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES))
+ __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
+
+#ifdef CONFIG_CGROUP_PERF
+ if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) &&
+ perf_cgroup_from_task(prev, NULL) !=
+ perf_cgroup_from_task(next, NULL))
+ __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0);
+#endif
if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_out(prev, next);
@@ -1204,14 +1296,40 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags);
-extern struct perf_guest_info_callbacks *perf_guest_cbs;
-extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
-extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+#ifdef CONFIG_GUEST_PERF_EVENTS
+extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
+
+DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
+DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
+DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
+
+static inline unsigned int perf_guest_state(void)
+{
+ return static_call(__perf_guest_state)();
+}
+static inline unsigned long perf_guest_get_ip(void)
+{
+ return static_call(__perf_guest_get_ip)();
+}
+static inline unsigned int perf_guest_handle_intel_pt_intr(void)
+{
+ return static_call(__perf_guest_handle_intel_pt_intr)();
+}
+extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+#else
+static inline unsigned int perf_guest_state(void) { return 0; }
+static inline unsigned long perf_guest_get_ip(void) { return 0; }
+static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
+#endif /* CONFIG_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
extern void perf_event_namespaces(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
+extern void perf_event_text_poke(const void *addr,
+ const void *old_bytes, size_t old_len,
+ const void *new_bytes, size_t new_len);
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
@@ -1224,6 +1342,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
+extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
+extern void put_callchain_entry(int rctx);
extern int sysctl_perf_event_max_stack;
extern int sysctl_perf_event_max_contexts_per_stack;
@@ -1260,15 +1380,12 @@ extern int sysctl_perf_cpu_time_max_percent;
extern void perf_sample_event_took(u64 sample_len_ns);
-extern int perf_proc_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-
+int perf_proc_update_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
int perf_event_max_stack_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
/* Access to perf_event_open(2) syscall. */
#define PERF_SECURITY_OPEN 0
@@ -1285,7 +1402,7 @@ static inline int perf_is_paranoid(void)
static inline int perf_allow_kernel(struct perf_event_attr *attr)
{
- if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN))
+ if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
return -EACCES;
return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
@@ -1293,7 +1410,7 @@ static inline int perf_allow_kernel(struct perf_event_attr *attr)
static inline int perf_allow_cpu(struct perf_event_attr *attr)
{
- if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN))
+ if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
return -EACCES;
return security_perf_event_open(attr, PERF_SECURITY_CPU);
@@ -1301,7 +1418,7 @@ static inline int perf_allow_cpu(struct perf_event_attr *attr)
static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
{
- if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN))
+ if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
return -EPERM;
return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
@@ -1363,13 +1480,17 @@ perf_event_addr_filters(struct perf_event *event)
}
extern void perf_event_addr_filters_sync(struct perf_event *event);
+extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
extern int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event, unsigned int size);
extern int perf_output_begin_forward(struct perf_output_handle *handle,
- struct perf_event *event,
- unsigned int size);
+ struct perf_sample_data *data,
+ struct perf_event *event,
+ unsigned int size);
extern int perf_output_begin_backward(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event,
unsigned int size);
@@ -1412,7 +1533,8 @@ perf_event_task_sched_in(struct task_struct *prev,
static inline void
perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next) { }
-static inline int perf_event_init_task(struct task_struct *child) { return 0; }
+static inline int perf_event_init_task(struct task_struct *child,
+ u64 clone_flags) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
@@ -1441,15 +1563,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
-perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
-static inline void
perf_bp_event(struct perf_event *event, void *data) { }
-static inline int perf_register_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-static inline int perf_unregister_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
@@ -1462,6 +1577,11 @@ static inline void perf_event_exec(void) { }
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
static inline void perf_event_namespaces(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
+static inline void perf_event_text_poke(const void *addr,
+ const void *old_bytes,
+ size_t old_len,
+ const void *new_bytes,
+ size_t new_len) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
@@ -1507,6 +1627,18 @@ struct perf_pmu_events_ht_attr {
const char *event_str_noht;
};
+struct perf_pmu_events_hybrid_attr {
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
+ u64 pmu_type;
+};
+
+struct perf_pmu_format_hybrid_attr {
+ struct device_attribute attr;
+ u64 pmu_type;
+};
+
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
@@ -1523,6 +1655,12 @@ static struct perf_pmu_events_attr _var = { \
.event_str = _str, \
};
+#define PMU_EVENT_ATTR_ID(_name, _show, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, _show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
#define PMU_FORMAT_ATTR(_name, _format) \
static ssize_t \
_name##_show(struct device *dev, \
@@ -1548,4 +1686,62 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);
+#ifdef CONFIG_MMU
+extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
+#endif
+
+/*
+ * Snapshot branch stack on software events.
+ *
+ * Branch stack can be very useful in understanding software events. For
+ * example, when a long function, e.g. sys_perf_event_open, returns an
+ * errno, it is not obvious why the function failed. Branch stack could
+ * provide very helpful information in this type of scenarios.
+ *
+ * On software event, it is necessary to stop the hardware branch recorder
+ * fast. Otherwise, the hardware register/buffer will be flushed with
+ * entries of the triggering event. Therefore, static call is used to
+ * stop the hardware recorder.
+ */
+
+/*
+ * cnt is the number of entries allocated for entries.
+ * Return number of entries copied to .
+ */
+typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
+ unsigned int cnt);
+DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
+
+#ifndef PERF_NEEDS_LOPWR_CB
+static inline void perf_lopwr_cb(bool mode)
+{
+}
+#endif
+
+#ifdef CONFIG_PERF_EVENTS
+static inline bool branch_sample_no_flags(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
+}
+
+static inline bool branch_sample_no_cycles(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
+}
+
+static inline bool branch_sample_type(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
+}
+
+static inline bool branch_sample_hw_index(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
+}
+
+static inline bool branch_sample_priv(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
+}
+#endif /* CONFIG_PERF_EVENTS */
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/perf_event_api.h b/include/linux/perf_event_api.h
new file mode 100644
index 000000000000..c2fd6048b790
--- /dev/null
+++ b/include/linux/perf_event_api.h
@@ -0,0 +1 @@
+#include <linux/perf_event.h>
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 2d12e97d5e7b..f632c5725f16 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -20,8 +20,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx);
int perf_reg_validate(u64 mask);
u64 perf_reg_abi(struct task_struct *task);
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy);
+ struct pt_regs *regs);
#else
#define PERF_REG_EXTENDED_MASK 0
@@ -42,8 +41,7 @@ static inline u64 perf_reg_abi(struct task_struct *task)
}
static inline void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
new file mode 100644
index 000000000000..a108b60a6962
--- /dev/null
+++ b/include/linux/pgtable.h
@@ -0,0 +1,1728 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PGTABLE_H
+#define _LINUX_PGTABLE_H
+
+#include <linux/pfn.h>
+#include <asm/pgtable.h>
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_MMU
+
+#include <linux/mm_types.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <asm-generic/pgtable_uffd.h>
+#include <linux/page_table_check.h>
+
+#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
+ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
+#endif
+
+/*
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE. However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
+ */
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING 0UL
+#endif
+
+/*
+ * This defines the first usable user address. Platforms
+ * can override its value with custom FIRST_USER_ADDRESS
+ * defined in their respective <asm/pgtable.h>.
+ */
+#ifndef FIRST_USER_ADDRESS
+#define FIRST_USER_ADDRESS 0UL
+#endif
+
+/*
+ * This defines the generic helper for accessing PMD page
+ * table page. Although platforms can still override this
+ * via their respective <asm/pgtable.h>.
+ */
+#ifndef pmd_pgtable
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
+
+/*
+ * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
+ *
+ * The pXx_index() functions return the index of the entry in the page
+ * table page which would control the given virtual address
+ *
+ * As these functions may be used by the same code for different levels of
+ * the page table folding, they are always available, regardless of
+ * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
+ * because in such cases PTRS_PER_PxD equals 1.
+ */
+
+static inline unsigned long pte_index(unsigned long address)
+{
+ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+}
+#define pte_index pte_index
+
+#ifndef pmd_index
+static inline unsigned long pmd_index(unsigned long address)
+{
+ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
+#define pmd_index pmd_index
+#endif
+
+#ifndef pud_index
+static inline unsigned long pud_index(unsigned long address)
+{
+ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+#define pud_index pud_index
+#endif
+
+#ifndef pgd_index
+/* Must be a compile-time constant, so implement it as a macro */
+#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#endif
+
+#ifndef pte_offset_kernel
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
+{
+ return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
+}
+#define pte_offset_kernel pte_offset_kernel
+#endif
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+ pte_index((address)))
+#define pte_unmap(pte) kunmap_atomic((pte))
+#else
+#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
+#define pte_unmap(pte) ((void)(pte)) /* NOP */
+#endif
+
+/* Find an entry in the second-level page table.. */
+#ifndef pmd_offset
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+ return pud_pgtable(*pud) + pmd_index(address);
+}
+#define pmd_offset pmd_offset
+#endif
+
+#ifndef pud_offset
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ return p4d_pgtable(*p4d) + pud_index(address);
+}
+#define pud_offset pud_offset
+#endif
+
+static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
+{
+ return (pgd + pgd_index(address));
+};
+
+/*
+ * a shortcut to get a pgd_t in a given mm
+ */
+#ifndef pgd_offset
+#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
+#endif
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#ifndef pgd_offset_k
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
+#endif
+
+/*
+ * In many cases it is known that a virtual address is mapped at PMD or PTE
+ * level, so instead of traversing all the page table levels, we can get a
+ * pointer to the PMD entry in user or kernel page table or translate a virtual
+ * address to the pointer in the PTE in the kernel page tables with simple
+ * helpers.
+ */
+static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
+}
+
+static inline pte_t *virt_to_kpte(unsigned long vaddr)
+{
+ pmd_t *pmd = pmd_off_k(vaddr);
+
+ return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
+}
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty);
+#else
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+static inline int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int r = 1;
+ if (!pte_young(pte))
+ r = 0;
+ else
+ set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
+ return r;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ int r = 1;
+ if (!pmd_young(pmd))
+ r = 0;
+ else
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
+ return r;
+}
+#else
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+/*
+ * Despite relevant to THP only, this API is called from generic rmap code
+ * under PageTransHuge(), hence needs a dummy implementation for !THP
+ */
+static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef arch_has_hw_pte_young
+/*
+ * Return whether the accessed bit is supported on the local CPU.
+ *
+ * This stub assumes accessing through an old PTE triggers a page fault.
+ * Architectures that automatically set the access bit should overwrite it.
+ */
+static inline bool arch_has_hw_pte_young(void)
+{
+ return false;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, address, ptep);
+ page_table_check_pte_clear(mm, address, pte);
+ return pte;
+}
+#endif
+
+static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ ptep_get_and_clear(mm, addr, ptep);
+}
+
+#ifndef __HAVE_ARCH_PTEP_GET
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#endif
+
+#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
+/*
+ * WARNING: only to be used in the get_user_pages_fast() implementation.
+ *
+ * With get_user_pages_fast(), we walk down the pagetables without taking any
+ * locks. For this we would like to load the pointers atomically, but sometimes
+ * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What
+ * we do have is the guarantee that a PTE will only either go from not present
+ * to present, or present to not present or both -- it will not switch to a
+ * completely different present page without a TLB flush in between; something
+ * that we are blocking by holding interrupts off.
+ *
+ * Setting ptes from not present to present goes:
+ *
+ * ptep->pte_high = h;
+ * smp_wmb();
+ * ptep->pte_low = l;
+ *
+ * And present to not present goes:
+ *
+ * ptep->pte_low = 0;
+ * smp_wmb();
+ * ptep->pte_high = 0;
+ *
+ * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
+ * We load pte_high *after* loading pte_low, which ensures we don't see an older
+ * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
+ * picked up a changed pte high. We might have gotten rubbish values from
+ * pte_low and pte_high, but we are guaranteed that pte_low will not have the
+ * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
+ * operates on present ptes we're safe.
+ */
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ pte_t pte;
+
+ do {
+ pte.pte_low = ptep->pte_low;
+ smp_rmb();
+ pte.pte_high = ptep->pte_high;
+ smp_rmb();
+ } while (unlikely(pte.pte_low != ptep->pte_low));
+
+ return pte;
+}
+#else /* CONFIG_GUP_GET_PTE_LOW_HIGH */
+/*
+ * We require that the PTE can be read atomically.
+ */
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ return ptep_get(ptep);
+}
+#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ pmd_clear(pmdp);
+ page_table_check_pmd_clear(mm, address, pmd);
+
+ return pmd;
+}
+#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pud_t *pudp)
+{
+ pud_t pud = *pudp;
+
+ pud_clear(pudp);
+ page_table_check_pud_clear(mm, address, pud);
+
+ return pud;
+}
+#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ int full)
+{
+ return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
+static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp,
+ int full)
+{
+ return pudp_huge_get_and_clear(mm, address, pudp);
+}
+#endif
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep,
+ int full)
+{
+ pte_t pte;
+ pte = ptep_get_and_clear(mm, address, ptep);
+ return pte;
+}
+#endif
+
+
+/*
+ * If two threads concurrently fault at the same page, the thread that
+ * won the race updates the PTE and its local TLB/Cache. The other thread
+ * gives up, simply does nothing, and continues; on architectures where
+ * software can update TLB, local TLB can be updated here to avoid next page
+ * fault. This function updates TLB only, do nothing with cache or others.
+ * It is the difference with function update_mmu_cache.
+ */
+#ifndef __HAVE_ARCH_UPDATE_MMU_TLB
+static inline void update_mmu_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+}
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#endif
+
+/*
+ * Some architectures may be able to avoid expensive synchronization
+ * primitives when modifications are made to PTE's which are already
+ * not present, or in the process of an address space destruction.
+ */
+#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
+static inline void pte_clear_not_present_full(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep,
+ int full)
+{
+ pte_clear(mm, address, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp);
+extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pud_t *pudp);
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+struct mm_struct;
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+/*
+ * On some architectures hardware does not set page access bit when accessing
+ * memory page, it is responsibility of software setting this bit. It brings
+ * out extra page fault penalty to track page access bit. For optimization page
+ * access bit can be set during all page fault flow on these arches.
+ * To be differentiate with macro pte_mkyoung, this macro is used on platforms
+ * where software maintains page access bit.
+ */
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+ return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
+#ifndef pte_savedwrite
+#define pte_savedwrite pte_write
+#endif
+
+#ifndef pte_mk_savedwrite
+#define pte_mk_savedwrite pte_mkwrite
+#endif
+
+#ifndef pte_clear_savedwrite
+#define pte_clear_savedwrite pte_wrprotect
+#endif
+
+#ifndef pmd_savedwrite
+#define pmd_savedwrite pmd_write
+#endif
+
+#ifndef pmd_mk_savedwrite
+#define pmd_mk_savedwrite pmd_mkwrite
+#endif
+
+#ifndef pmd_clear_savedwrite
+#define pmd_clear_savedwrite pmd_wrprotect
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
+}
+#else
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ pud_t old_pud = *pudp;
+
+ set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
+}
+#else
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif
+
+#ifndef pmdp_collapse_flush
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return *pmdp;
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * This is an implementation of pmdp_establish() that is only suitable for an
+ * architecture that doesn't have hardware dirty/accessed bits. In this case we
+ * can't race with CPU which sets these bits and non-atomic approach is fine.
+ */
+static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ return old_pmd;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
+
+/*
+ * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
+ * hugepage mapping in the page tables. This function is similar to
+ * pmdp_invalidate(), but should only be used if the access and dirty bits would
+ * not be cleared by the software in the new PMD value. The function ensures
+ * that hardware changes of the access and dirty bits updates would not be lost.
+ *
+ * Doing so can allow in certain architectures to avoid a TLB flush in most
+ * cases. Yet, another TLB flush might be necessary later if the PMD update
+ * itself requires such flush (e.g., if protection was set to be stricter). Yet,
+ * even when a TLB flush is needed because of the update, the caller may be able
+ * to batch these TLB flushing operations, so fewer TLB flush operations are
+ * needed.
+ */
+extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_UNUSED
+/*
+ * Some architectures provide facilities to virtualization guests
+ * so that they can flag allocated pages as unused. This allows the
+ * host to transparently reclaim unused pages. This function returns
+ * whether the pte's page is unused.
+ */
+static inline int pte_unused(pte_t pte)
+{
+ return 0;
+}
+#endif
+
+#ifndef pte_access_permitted
+#define pte_access_permitted(pte, write) \
+ (pte_present(pte) && (!(write) || pte_write(pte)))
+#endif
+
+#ifndef pmd_access_permitted
+#define pmd_access_permitted(pmd, write) \
+ (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
+#endif
+
+#ifndef pud_access_permitted
+#define pud_access_permitted(pud, write) \
+ (pud_present(pud) && (!(write) || pud_write(pud)))
+#endif
+
+#ifndef p4d_access_permitted
+#define p4d_access_permitted(p4d, write) \
+ (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
+#endif
+
+#ifndef pgd_access_permitted
+#define pgd_access_permitted(pgd, write) \
+ (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
+#endif
+
+#ifndef __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ return pmd_val(pmd_a) == pmd_val(pmd_b);
+}
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+ return pud_val(pud_a) == pud_val(pud_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_P4D_SAME
+static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
+{
+ return p4d_val(p4d_a) == p4d_val(p4d_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_SAME
+static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
+{
+ return pgd_val(pgd_a) == pgd_val(pgd_b);
+}
+#endif
+
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+ set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+ set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+ set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
+
+#ifndef __HAVE_ARCH_DO_SWAP_PAGE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_do_swap_page() can restore this
+ * metadata when a page is swapped back in.
+ */
+static inline void arch_do_swap_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte)
+{
+
+}
+#endif
+
+#ifndef __HAVE_ARCH_UNMAP_ONE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_unmap_one() can save this
+ * metadata on a swap-out of a page.
+ */
+static inline int arch_unmap_one(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t orig_pte)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Allow architectures to preserve additional metadata associated with
+ * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
+ * prototypes must be defined in the arch-specific asm/pgtable.h file.
+ */
+#ifndef __HAVE_ARCH_PREPARE_TO_SWAP
+static inline int arch_prepare_to_swap(struct page *page)
+{
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_SWAP_INVALIDATE
+static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
+{
+}
+
+static inline void arch_swap_invalidate_area(int type)
+{
+}
+#endif
+
+#ifndef __HAVE_ARCH_SWAP_RESTORE
+static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
+{
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
+#ifndef __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) (pte)
+#endif
+
+#ifndef pte_accessible
+# define pte_accessible(mm, pte) ((void)(pte), 1)
+#endif
+
+#ifndef flush_tlb_fix_spurious_fault
+#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
+#endif
+
+/*
+ * When walking page tables, get the address of the next boundary,
+ * or the end address of the range if that comes earlier. Although no
+ * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
+ */
+
+#define pgd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#ifndef p4d_addr_end
+#define p4d_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pud_addr_end
+#define pud_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pmd_addr_end
+#define pmd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+/*
+ * When walking page tables, we usually want to skip any p?d_none entries;
+ * and any p?d_bad entries - reporting the error before resetting to none.
+ * Do the tests inline, but report and clear the bad entry in mm/memory.c.
+ */
+void pgd_clear_bad(pgd_t *);
+
+#ifndef __PAGETABLE_P4D_FOLDED
+void p4d_clear_bad(p4d_t *);
+#else
+#define p4d_clear_bad(p4d) do { } while (0)
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_clear_bad(pud_t *);
+#else
+#define pud_clear_bad(p4d) do { } while (0)
+#endif
+
+void pmd_clear_bad(pmd_t *);
+
+static inline int pgd_none_or_clear_bad(pgd_t *pgd)
+{
+ if (pgd_none(*pgd))
+ return 1;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_clear_bad(pgd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int p4d_none_or_clear_bad(p4d_t *p4d)
+{
+ if (p4d_none(*p4d))
+ return 1;
+ if (unlikely(p4d_bad(*p4d))) {
+ p4d_clear_bad(p4d);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pud_none_or_clear_bad(pud_t *pud)
+{
+ if (pud_none(*pud))
+ return 1;
+ if (unlikely(pud_bad(*pud))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pmd_none_or_clear_bad(pmd_t *pmd)
+{
+ if (pmd_none(*pmd))
+ return 1;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ /*
+ * Get the current pte state, but zero it out to make it
+ * non-present, preventing the hardware from asynchronously
+ * updating it.
+ */
+ return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ /*
+ * The pte is non-present, so there's no hardware state to
+ * preserve.
+ */
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
+
+#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+/*
+ * Start a pte protection read-modify-write transaction, which
+ * protects against asynchronous hardware modifications to the pte.
+ * The intention is not to prevent the hardware from making pte
+ * updates, but to prevent any updates it may make from being lost.
+ *
+ * This does not protect against other software modifications of the
+ * pte; the appropriate pte lock must be held over the transaction.
+ *
+ * Note that this interface is intended to be batchable, meaning that
+ * ptep_modify_prot_commit may not actually update the pte, but merely
+ * queue the update to be done at some later time. The update must be
+ * actually committed before the pte lock is released, however.
+ */
+static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ return __ptep_modify_prot_start(vma, addr, ptep);
+}
+
+/*
+ * Commit an update to a pte, leaving any hardware-controlled bits in
+ * the PTE unmodified.
+ */
+static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte)
+{
+ __ptep_modify_prot_commit(vma, addr, ptep, pte);
+}
+#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+#endif /* CONFIG_MMU */
+
+/*
+ * No-op macros that just return the current protection value. Defined here
+ * because these macros can be used even if CONFIG_MMU is not defined.
+ */
+
+#ifndef pgprot_nx
+#define pgprot_nx(prot) (prot)
+#endif
+
+#ifndef pgprot_noncached
+#define pgprot_noncached(prot) (prot)
+#endif
+
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
+#ifndef pgprot_device
+#define pgprot_device pgprot_noncached
+#endif
+
+#ifndef pgprot_mhp
+#define pgprot_mhp(prot) (prot)
+#endif
+
+#ifdef CONFIG_MMU
+#ifndef pgprot_modify
+#define pgprot_modify pgprot_modify
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
+ newprot = pgprot_noncached(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
+ newprot = pgprot_writecombine(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
+ newprot = pgprot_device(newprot);
+ return newprot;
+}
+#endif
+#endif /* CONFIG_MMU */
+
+#ifndef pgprot_encrypted
+#define pgprot_encrypted(prot) (prot)
+#endif
+
+#ifndef pgprot_decrypted
+#define pgprot_decrypted(prot) (prot)
+#endif
+
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+/*
+ * A facility to provide batching of the reload of page tables and
+ * other process state with the actual context switch code for
+ * paravirtualized guests. By convention, only one of the batched
+ * update (lazy) modes (CPU, MMU) should be active at any given time,
+ * entry should never be nested, and entry and exits should always be
+ * paired. This is for sanity of maintaining and reasoning about the
+ * kernel code. In this case, the exit (end of the context switch) is
+ * in architecture-specific code, and so doesn't need a generic
+ * definition.
+ */
+#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
+#define arch_start_context_switch(prev) do {} while (0)
+#endif
+
+/*
+ * When replacing an anonymous page by a real (!non) swap entry, we clear
+ * PG_anon_exclusive from the page and instead remember whether the flag was
+ * set in the swp pte. During fork(), we have to mark the entry as !exclusive
+ * (possibly shared). On swapin, we use that information to restore
+ * PG_anon_exclusive, which is very helpful in cases where we might have
+ * additional (e.g., FOLL_GET) references on a page and wouldn't be able to
+ * detect exclusivity.
+ *
+ * These functions don't apply to non-swap entries (e.g., migration, hwpoison,
+ * ...).
+ */
+#ifndef __HAVE_ARCH_PTE_SWP_EXCLUSIVE
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return false;
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return pte;
+}
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
+static inline int pte_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+
+#ifndef __HAVE_PFNMAP_TRACKING
+/*
+ * Interfaces that can be used by architecture code to keep track of
+ * memory type of pfn mappings specified by the remap_pfn_range,
+ * vmf_insert_pfn.
+ */
+
+/*
+ * track_pfn_remap is called when a _new_ pfn mapping is being established
+ * by remap_pfn_range() for physical range indicated by pfn and size.
+ */
+static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size)
+{
+ return 0;
+}
+
+/*
+ * track_pfn_insert is called when a _new_ single pfn is established
+ * by vmf_insert_pfn().
+ */
+static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn)
+{
+}
+
+/*
+ * track_pfn_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ */
+static inline int track_pfn_copy(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+/*
+ * untrack_pfn is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case pfn, size are zero).
+ */
+static inline void untrack_pfn(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size)
+{
+}
+
+/*
+ * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
+ */
+static inline void untrack_pfn_moved(struct vm_area_struct *vma)
+{
+}
+#else
+extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size);
+extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn);
+extern int track_pfn_copy(struct vm_area_struct *vma);
+extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size);
+extern void untrack_pfn_moved(struct vm_area_struct *vma);
+#endif
+
+#ifdef CONFIG_MMU
+#ifdef __HAVE_COLOR_ZERO_PAGE
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+}
+
+#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ return pfn == zero_pfn;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ extern unsigned long zero_pfn;
+ return zero_pfn;
+}
+#endif
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ return 0;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ return 0;
+}
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_MMU
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return 0;
+}
+#ifndef pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ BUG();
+ return 0;
+}
+#endif /* pmd_write */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif /* pud_write */
+
+#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return 0;
+}
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
+#endif
+
+#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+static inline int pud_trans_huge(pud_t pud)
+{
+ return 0;
+}
+#endif
+
+/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
+static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
+{
+ pud_t pudval = READ_ONCE(*pud);
+
+ if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ return 1;
+ if (unlikely(pud_bad(pudval))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+/* See pmd_trans_unstable for discussion. */
+static inline int pud_trans_unstable(pud_t *pud)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+ return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
+#else
+ return 0;
+#endif
+}
+
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ /*
+ * Depend on compiler for an atomic pmd read. NOTE: this is
+ * only going to work, if the pmdval_t isn't larger than
+ * an unsigned long.
+ */
+ return *pmdp;
+}
+#endif
+
+#ifndef arch_needs_pgtable_deposit
+#define arch_needs_pgtable_deposit() (false)
+#endif
+/*
+ * This function is meant to be used by sites walking pagetables with
+ * the mmap_lock held in read mode to protect against MADV_DONTNEED and
+ * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
+ * into a null pmd and the transhuge page fault can convert a null pmd
+ * into an hugepmd or into a regular pmd (if the hugepage allocation
+ * fails). While holding the mmap_lock in read mode the pmd becomes
+ * stable and stops changing under us only if it's not null and not a
+ * transhuge pmd. When those races occurs and this function makes a
+ * difference vs the standard pmd_none_or_clear_bad, the result is
+ * undefined so behaving like if the pmd was none is safe (because it
+ * can return none anyway). The compiler level barrier() is critically
+ * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_lock is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
+ */
+static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+{
+ pmd_t pmdval = pmd_read_atomic(pmd);
+ /*
+ * The barrier will stabilize the pmdval in a register or on
+ * the stack so that it will stop changing under the code.
+ *
+ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+ * pmd_read_atomic is allowed to return a not atomic pmdval
+ * (for example pointing to an hugepage that has never been
+ * mapped in the pmd). The below checks will only care about
+ * the low part of the pmd with 32bit PAE x86 anyway, with the
+ * exception of pmd_none(). So the important thing is that if
+ * the low part of the pmd is found null, the high part will
+ * be also null or the pmd_none() check below would be
+ * confused.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+#endif
+ /*
+ * !pmd_present() checks for pmd migration entries
+ *
+ * The complete check uses is_pmd_migration_entry() in linux/swapops.h
+ * But using that requires moving current function and pmd_trans_unstable()
+ * to linux/swapops.h to resolve dependency, which is too much code move.
+ *
+ * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
+ * because !pmd_present() pages can only be under migration not swapped
+ * out.
+ *
+ * pmd_none() is preserved for future condition checks on pmd migration
+ * entries and not confusing with this function name, although it is
+ * redundant with !pmd_present().
+ */
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
+ (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
+ return 1;
+ if (unlikely(pmd_bad(pmdval))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * This is a noop if Transparent Hugepage Support is not built into
+ * the kernel. Otherwise it is equivalent to
+ * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
+ * places that already verified the pmd is not none and they want to
+ * walk ptes while holding the mmap sem in read mode (write mode don't
+ * need this). If THP is not enabled, the pmd can't go away under the
+ * code even if MADV_DONTNEED runs, but if THP is enabled we need to
+ * run a pmd_trans_unstable before walking the ptes after
+ * split_huge_pmd returns (because it may have run when the pmd become
+ * null, but then a page fault can map in a THP and not a regular page).
+ */
+static inline int pmd_trans_unstable(pmd_t *pmd)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return pmd_none_or_trans_huge_or_clear_bad(pmd);
+#else
+ return 0;
+#endif
+}
+
+/*
+ * the ordering of these checks is important for pmds with _page_devmap set.
+ * if we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static inline int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+ return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
+#ifndef CONFIG_NUMA_BALANCING
+/*
+ * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
+ * the only case the kernel cares is for NUMA balancing and is only ever set
+ * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
+ * _PAGE_PROTNONE so by default, implement the helper as "always no". It
+ * is the responsibility of the caller to distinguish between PROT_NONE
+ * protections and NUMA hinting fault protections.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#ifndef __PAGETABLE_P4D_FOLDED
+int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
+void p4d_clear_huge(p4d_t *p4d);
+#else
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline void p4d_clear_huge(p4d_t *p4d) { }
+#endif /* !__PAGETABLE_P4D_FOLDED */
+
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
+int pmd_clear_huge(pmd_t *pmd);
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline void p4d_clear_huge(p4d_t *p4d) { }
+static inline int pud_clear_huge(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_clear_huge(pmd_t *pmd)
+{
+ return 0;
+}
+static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0;
+}
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ return 0;
+}
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * ARCHes with special requirements for evicting THP backing TLB entries can
+ * implement this. Otherwise also, it can help optimize normal TLB flush in
+ * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB if flush span is greater than a threshold, which will
+ * likely be true for a single huge page. Thus a single THP flush will
+ * invalidate the entire TLB which is not desirable.
+ * e.g. see arch/arc: flush_pmd_tlb_range
+ */
+#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#else
+#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
+#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
+#endif
+#endif
+
+struct file;
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
+
+#ifndef CONFIG_X86_ESPFIX64
+static inline void init_espfix_bsp(void) { }
+#endif
+
+extern void __init pgtable_cache_init(void);
+
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
+/*
+ * Architecture PAGE_KERNEL_* fallbacks
+ *
+ * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
+ * because they really don't support them, or the port needs to be updated to
+ * reflect the required functionality. Below are a set of relatively safe
+ * fallbacks, as best effort, which we can count on in lieu of the architectures
+ * not defining them on their own yet.
+ */
+
+#ifndef PAGE_KERNEL_RO
+# define PAGE_KERNEL_RO PAGE_KERNEL
+#endif
+
+#ifndef PAGE_KERNEL_EXEC
+# define PAGE_KERNEL_EXEC PAGE_KERNEL
+#endif
+
+/*
+ * Page Table Modification bits for pgtbl_mod_mask.
+ *
+ * These are used by the p?d_alloc_track*() set of functions an in the generic
+ * vmalloc/ioremap code to track at which page-table levels entries have been
+ * modified. Based on that the code can better decide when vmalloc and ioremap
+ * mapping changes need to be synchronized to other page-tables in the system.
+ */
+#define __PGTBL_PGD_MODIFIED 0
+#define __PGTBL_P4D_MODIFIED 1
+#define __PGTBL_PUD_MODIFIED 2
+#define __PGTBL_PMD_MODIFIED 3
+#define __PGTBL_PTE_MODIFIED 4
+
+#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
+#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
+#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
+#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
+#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
+
+/* Page-Table Modification Mask */
+typedef unsigned int pgtbl_mod_mask;
+
+#endif /* !__ASSEMBLY__ */
+
+#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+/*
+ * ZSMALLOC needs to know the highest PFN on 32-bit architectures
+ * with physical address space extension, but falls back to
+ * BITS_PER_LONG otherwise.
+ */
+#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
+#else
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#endif
+#endif
+
+#ifndef has_transparent_hugepage
+#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
+#endif
+
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
+#ifndef p4d_offset_lockless
+#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
+#endif
+#ifndef pud_offset_lockless
+#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
+#endif
+#ifndef pmd_offset_lockless
+#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
+#endif
+
+/*
+ * p?d_leaf() - true if this entry is a final mapping to a physical address.
+ * This differs from p?d_huge() by the fact that they are always available (if
+ * the architecture supports large pages at the appropriate level) even
+ * if CONFIG_HUGETLB_PAGE is not defined.
+ * Only meaningful when called on a valid entry.
+ */
+#ifndef pgd_leaf
+#define pgd_leaf(x) 0
+#endif
+#ifndef p4d_leaf
+#define p4d_leaf(x) 0
+#endif
+#ifndef pud_leaf
+#define pud_leaf(x) 0
+#endif
+#ifndef pmd_leaf
+#define pmd_leaf(x) 0
+#endif
+
+#ifndef pgd_leaf_size
+#define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
+#endif
+#ifndef p4d_leaf_size
+#define p4d_leaf_size(x) P4D_SIZE
+#endif
+#ifndef pud_leaf_size
+#define pud_leaf_size(x) PUD_SIZE
+#endif
+#ifndef pmd_leaf_size
+#define pmd_leaf_size(x) PMD_SIZE
+#endif
+#ifndef pte_leaf_size
+#define pte_leaf_size(x) PAGE_SIZE
+#endif
+
+/*
+ * Some architectures have MMUs that are configurable or selectable at boot
+ * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
+ * helps to have a static maximum value.
+ */
+
+#ifndef MAX_PTRS_PER_PTE
+#define MAX_PTRS_PER_PTE PTRS_PER_PTE
+#endif
+
+#ifndef MAX_PTRS_PER_PMD
+#define MAX_PTRS_PER_PMD PTRS_PER_PMD
+#endif
+
+#ifndef MAX_PTRS_PER_PUD
+#define MAX_PTRS_PER_PUD PTRS_PER_PUD
+#endif
+
+#ifndef MAX_PTRS_PER_P4D
+#define MAX_PTRS_PER_P4D PTRS_PER_P4D
+#endif
+
+/* description of effects of mapping type and prot in current implementation.
+ * this is due to the limited x86 page protection hardware. The expected
+ * behavior is in parens:
+ *
+ * map_type prot
+ * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
+ * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (yes) yes w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (copy) copy w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
+ * MAP_PRIVATE (with Enhanced PAN supported):
+ * r: (no) no
+ * w: (no) no
+ * x: (yes) yes
+ */
+#define DECLARE_VM_GET_PAGE_PROT \
+pgprot_t vm_get_page_prot(unsigned long vm_flags) \
+{ \
+ return protection_map[vm_flags & \
+ (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
+} \
+EXPORT_SYMBOL(vm_get_page_prot);
+
+#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/pgtable_api.h b/include/linux/pgtable_api.h
new file mode 100644
index 000000000000..ff367a4ba8c4
--- /dev/null
+++ b/include/linux/pgtable_api.h
@@ -0,0 +1 @@
+#include <linux/pgtable.h>
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 452e8ba8665f..ddf66198f751 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/linkmode.h>
+#include <linux/netlink.h>
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/mii_timestamper.h>
@@ -23,6 +24,9 @@
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
#include <linux/u64_stats_sync.h>
+#include <linux/irqreturn.h>
+#include <linux/iopoll.h>
+#include <linux/refcount.h>
#include <linux/atomic.h>
@@ -61,23 +65,62 @@ extern const int phy_basic_ports_array[3];
extern const int phy_fibre_port_array[1];
extern const int phy_all_ports_features_array[7];
extern const int phy_10_100_features_array[4];
-extern const int phy_basic_t1_features_array[2];
+extern const int phy_basic_t1_features_array[3];
extern const int phy_gbit_features_array[2];
extern const int phy_10gbit_features_array[1];
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
- * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
- * the attached driver handles the interrupt
+ * or not desired for this PHY. Set to PHY_MAC_INTERRUPT if
+ * the attached MAC driver handles the interrupt
*/
#define PHY_POLL -1
-#define PHY_IGNORE_INTERRUPT -2
+#define PHY_MAC_INTERRUPT -2
#define PHY_IS_INTERNAL 0x00000001
#define PHY_RST_AFTER_CLK_EN 0x00000002
+#define PHY_POLL_CABLE_TEST 0x00000004
#define MDIO_DEVICE_IS_PHY 0x80000000
-/* Interface Mode definitions */
+/**
+ * enum phy_interface_t - Interface Mode definitions
+ *
+ * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch
+ * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined
+ * @PHY_INTERFACE_MODE_MII: Media-independent interface
+ * @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
+ * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
+ * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface
+ * @PHY_INTERFACE_MODE_REVRMII: Reduced Media Independent Interface in PHY role
+ * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay
+ * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RTBI: Reduced TBI
+ * @PHY_INTERFACE_MODE_SMII: Serial MII
+ * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
+ * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
+ * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
+ * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
+ * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX
+ * @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX
+ * @PHY_INTERFACE_MODE_5GBASER: 5G BaseR
+ * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
+ * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
+ * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR
+ * @PHY_INTERFACE_MODE_25GBASER: 25G BaseR
+ * @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII
+ * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
+ * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_MAX: Book keeping
+ *
+ * Describes the interface between the MAC and PHY.
+ */
typedef enum {
PHY_INTERFACE_MODE_NA,
PHY_INTERFACE_MODE_INTERNAL,
@@ -87,6 +130,7 @@ typedef enum {
PHY_INTERFACE_MODE_TBI,
PHY_INTERFACE_MODE_REVMII,
PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_REVRMII,
PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_RGMII_ID,
PHY_INTERFACE_MODE_RGMII_RXID,
@@ -94,31 +138,63 @@ typedef enum {
PHY_INTERFACE_MODE_RTBI,
PHY_INTERFACE_MODE_SMII,
PHY_INTERFACE_MODE_XGMII,
+ PHY_INTERFACE_MODE_XLGMII,
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
+ PHY_INTERFACE_MODE_100BASEX,
PHY_INTERFACE_MODE_1000BASEX,
PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_5GBASER,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
/* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_25GBASER,
PHY_INTERFACE_MODE_USXGMII,
/* 10GBASE-KR - with Clause 73 AN */
PHY_INTERFACE_MODE_10GKR,
+ PHY_INTERFACE_MODE_QUSGMII,
+ PHY_INTERFACE_MODE_1000BASEKX,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
-/**
- * phy_supported_speeds - return all speeds currently supported by a phy device
- * @phy: The phy device to return supported speeds of.
- * @speeds: buffer to store supported speeds in.
- * @size: size of speeds buffer.
- *
- * Description: Returns the number of supported speeds, and fills
- * the speeds buffer with the supported speeds. If speeds buffer is
- * too small to contain all currently supported speeds, will return as
- * many speeds as can fit.
+/* PHY interface mode bitmap handling */
+#define DECLARE_PHY_INTERFACE_MASK(name) \
+ DECLARE_BITMAP(name, PHY_INTERFACE_MODE_MAX)
+
+static inline void phy_interface_zero(unsigned long *intf)
+{
+ bitmap_zero(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline bool phy_interface_empty(const unsigned long *intf)
+{
+ return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_and(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_and(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_or(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_or(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_set_rgmii(unsigned long *intf)
+{
+ __set_bit(PHY_INTERFACE_MODE_RGMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf);
+}
+
+/*
+ * phy_supported_speeds - return all speeds currently supported by a PHY device
*/
unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
@@ -128,9 +204,9 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
* phy_modes - map phy_interface_t enum to device tree binding of phy-mode
* @interface: enum phy_interface_t value
*
- * Description: maps 'enum phy_interface_t' defined in this file
+ * Description: maps enum &phy_interface_t defined in this file
* into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
+ * device driver can get PHY interface from device tree.
*/
static inline const char *phy_modes(phy_interface_t interface)
{
@@ -151,6 +227,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "rev-mii";
case PHY_INTERFACE_MODE_RMII:
return "rmii";
+ case PHY_INTERFACE_MODE_REVRMII:
+ return "rev-rmii";
case PHY_INTERFACE_MODE_RGMII:
return "rgmii";
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -165,6 +243,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "smii";
case PHY_INTERFACE_MODE_XGMII:
return "xgmii";
+ case PHY_INTERFACE_MODE_XLGMII:
+ return "xlgmii";
case PHY_INTERFACE_MODE_MOCA:
return "moca";
case PHY_INTERFACE_MODE_QSGMII:
@@ -173,24 +253,33 @@ static inline const char *phy_modes(phy_interface_t interface)
return "trgmii";
case PHY_INTERFACE_MODE_1000BASEX:
return "1000base-x";
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ return "1000base-kx";
case PHY_INTERFACE_MODE_2500BASEX:
return "2500base-x";
+ case PHY_INTERFACE_MODE_5GBASER:
+ return "5gbase-r";
case PHY_INTERFACE_MODE_RXAUI:
return "rxaui";
case PHY_INTERFACE_MODE_XAUI:
return "xaui";
case PHY_INTERFACE_MODE_10GBASER:
return "10gbase-r";
+ case PHY_INTERFACE_MODE_25GBASER:
+ return "25gbase-r";
case PHY_INTERFACE_MODE_USXGMII:
return "usxgmii";
case PHY_INTERFACE_MODE_10GKR:
return "10gbase-kr";
+ case PHY_INTERFACE_MODE_100BASEX:
+ return "100base-x";
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return "qusgmii";
default:
return "unknown";
}
}
-
#define PHY_INIT_TIMEOUT 100000
#define PHY_FORCE_TIMEOUT 10
@@ -201,18 +290,20 @@ static inline const char *phy_modes(phy_interface_t interface)
#define MII_BUS_ID_SIZE 61
-/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
- IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
-#define MII_ADDR_C45 (1<<30)
-#define MII_DEVADDR_C45_SHIFT 16
-#define MII_REGADDR_C45_MASK GENMASK(15, 0)
-
struct device;
struct phylink;
struct sfp_bus;
struct sfp_upstream_ops;
struct sk_buff;
+/**
+ * struct mdio_bus_stats - Statistics counters for MDIO busses
+ * @transfers: Total number of transfers, i.e. @writes + @reads
+ * @errors: Number of MDIO transfers that returned an error
+ * @writes: Number of write transfers
+ * @reads: Number of read transfers
+ * @syncp: Synchronisation for incrementing statistics
+ */
struct mdio_bus_stats {
u64_stats_t transfers;
u64_stats_t errors;
@@ -222,7 +313,45 @@ struct mdio_bus_stats {
struct u64_stats_sync syncp;
};
-/*
+/**
+ * struct phy_package_shared - Shared information in PHY packages
+ * @addr: Common PHY address used to combine PHYs in one package
+ * @refcnt: Number of PHYs connected to this shared data
+ * @flags: Initialization of PHY package
+ * @priv_size: Size of the shared private data @priv
+ * @priv: Driver private data shared across a PHY package
+ *
+ * Represents a shared structure between different phydev's in the same
+ * package, for example a quad PHY. See phy_package_join() and
+ * phy_package_leave().
+ */
+struct phy_package_shared {
+ int addr;
+ refcount_t refcnt;
+ unsigned long flags;
+ size_t priv_size;
+
+ /* private data pointer */
+ /* note that this pointer is shared between different phydevs and
+ * the user has to take care of appropriate locking. It is allocated
+ * and freed automatically by phy_package_join() and
+ * phy_package_leave().
+ */
+ void *priv;
+};
+
+/* used as bit number in atomic bitops */
+#define PHY_SHARED_F_INIT_DONE 0
+#define PHY_SHARED_F_PROBE_DONE 1
+
+/**
+ * struct mii_bus - Represents an MDIO bus
+ *
+ * @owner: Who owns this device
+ * @name: User friendly name for this MDIO device, or driver name
+ * @id: Unique identifier for this bus, typical from bus hierarchy
+ * @priv: Driver private data
+ *
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
*/
@@ -231,56 +360,93 @@ struct mii_bus {
const char *name;
char id[MII_BUS_ID_SIZE];
void *priv;
+ /** @read: Perform a read transfer on the bus */
int (*read)(struct mii_bus *bus, int addr, int regnum);
+ /** @write: Perform a write transfer on the bus */
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ /** @reset: Perform a reset of the bus */
int (*reset)(struct mii_bus *bus);
+
+ /** @stats: Statistic counters per device on the bus */
struct mdio_bus_stats stats[PHY_MAX_ADDR];
- /*
- * A lock to ensure that only one thing can read/write
+ /**
+ * @mdio_lock: A lock to ensure that only one thing can read/write
* the MDIO bus at a time
*/
struct mutex mdio_lock;
+ /** @parent: Parent device of this bus */
struct device *parent;
+ /** @state: State of bus structure */
enum {
MDIOBUS_ALLOCATED = 1,
MDIOBUS_REGISTERED,
MDIOBUS_UNREGISTERED,
MDIOBUS_RELEASED,
} state;
+
+ /** @dev: Kernel device representation */
struct device dev;
- /* list of all PHYs on bus */
+ /** @mdio_map: list of all MDIO devices on bus */
struct mdio_device *mdio_map[PHY_MAX_ADDR];
- /* PHY addresses to be ignored when probing */
+ /** @phy_mask: PHY addresses to be ignored when probing */
u32 phy_mask;
- /* PHY addresses to ignore the TA/read failure */
+ /** @phy_ignore_ta_mask: PHY addresses to ignore the TA/read failure */
u32 phy_ignore_ta_mask;
- /*
- * An array of interrupts, each PHY's interrupt at the index
+ /**
+ * @irq: An array of interrupts, each PHY's interrupt at the index
* matching its address
*/
int irq[PHY_MAX_ADDR];
- /* GPIO reset pulse width in microseconds */
+ /** @reset_delay_us: GPIO reset pulse width in microseconds */
int reset_delay_us;
- /* RESET GPIO descriptor pointer */
+ /** @reset_post_delay_us: GPIO reset deassert delay in microseconds */
+ int reset_post_delay_us;
+ /** @reset_gpiod: Reset GPIO descriptor pointer */
struct gpio_desc *reset_gpiod;
+
+ /** @probe_capabilities: bus capabilities, used for probing */
+ enum {
+ MDIOBUS_NO_CAP = 0,
+ MDIOBUS_C22,
+ MDIOBUS_C45,
+ MDIOBUS_C22_C45,
+ } probe_capabilities;
+
+ /** @shared_lock: protect access to the shared element */
+ struct mutex shared_lock;
+
+ /** @shared: shared state across different PHYs */
+ struct phy_package_shared *shared[PHY_MAX_ADDR];
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
-struct mii_bus *mdiobus_alloc_size(size_t);
+struct mii_bus *mdiobus_alloc_size(size_t size);
+
+/**
+ * mdiobus_alloc - Allocate an MDIO bus structure
+ *
+ * The internal state of the MDIO bus will be set of MDIOBUS_ALLOCATED ready
+ * for the driver to register the bus.
+ */
static inline struct mii_bus *mdiobus_alloc(void)
{
return mdiobus_alloc_size(0);
}
int __mdiobus_register(struct mii_bus *bus, struct module *owner);
+int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus,
+ struct module *owner);
#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
+#define devm_mdiobus_register(dev, bus) \
+ __devm_mdiobus_register(dev, bus, THIS_MODULE)
+
void mdiobus_unregister(struct mii_bus *bus);
void mdiobus_free(struct mii_bus *bus);
struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
@@ -289,40 +455,47 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
return devm_mdiobus_alloc_size(dev, 0);
}
-void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
+struct mii_bus *mdio_find_bus(const char *mdio_name);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
#define PHY_INTERRUPT_DISABLED false
#define PHY_INTERRUPT_ENABLED true
-/* PHY state machine states:
+/**
+ * enum phy_state - PHY state machine states:
*
- * DOWN: PHY device and driver are not ready for anything. probe
+ * @PHY_DOWN: PHY device and driver are not ready for anything. probe
* should be called if and only if the PHY is in this state,
* given that the PHY device exists.
- * - PHY driver probe function will set the state to READY
+ * - PHY driver probe function will set the state to @PHY_READY
*
- * READY: PHY is ready to send and receive packets, but the
+ * @PHY_READY: PHY is ready to send and receive packets, but the
* controller is not. By default, PHYs which do not implement
* probe will be set to this state by phy_probe().
* - start will set the state to UP
*
- * UP: The PHY and attached device are ready to do work.
+ * @PHY_UP: The PHY and attached device are ready to do work.
* Interrupts should be started here.
- * - timer moves to NOLINK or RUNNING
+ * - timer moves to @PHY_NOLINK or @PHY_RUNNING
*
- * NOLINK: PHY is up, but not currently plugged in.
- * - irq or timer will set RUNNING if link comes back
- * - phy_stop moves to HALTED
+ * @PHY_NOLINK: PHY is up, but not currently plugged in.
+ * - irq or timer will set @PHY_RUNNING if link comes back
+ * - phy_stop moves to @PHY_HALTED
*
- * RUNNING: PHY is currently up, running, and possibly sending
+ * @PHY_RUNNING: PHY is currently up, running, and possibly sending
* and/or receiving packets
- * - irq or timer will set NOLINK if link goes down
- * - phy_stop moves to HALTED
+ * - irq or timer will set @PHY_NOLINK if link goes down
+ * - phy_stop moves to @PHY_HALTED
*
- * HALTED: PHY is up, but no polling or interrupts are done. Or
+ * @PHY_CABLETEST: PHY is performing a cable test. Packet reception/sending
+ * is not expected to work, carrier will be indicated as down. PHY will be
+ * poll once per second, or on interrupt for it current state.
+ * Once complete, move to UP to restart the PHY.
+ * - phy_stop aborts the running test and moves to @PHY_HALTED
+ *
+ * @PHY_HALTED: PHY is up, but no polling or interrupts are done. Or
* PHY is in an error state.
- * - phy_start moves to UP
+ * - phy_start moves to @PHY_UP
*/
enum phy_state {
PHY_DOWN = 0,
@@ -331,48 +504,104 @@ enum phy_state {
PHY_UP,
PHY_RUNNING,
PHY_NOLINK,
+ PHY_CABLETEST,
};
+#define MDIO_MMD_NUM 32
+
/**
* struct phy_c45_device_ids - 802.3-c45 Device Identifiers
- * @devices_in_package: Bit vector of devices present.
+ * @devices_in_package: IEEE 802.3 devices in package register value.
+ * @mmds_present: bit vector of MMDs present.
* @device_ids: The device identifer for each present device.
*/
struct phy_c45_device_ids {
u32 devices_in_package;
- u32 device_ids[8];
+ u32 mmds_present;
+ u32 device_ids[MDIO_MMD_NUM];
};
struct macsec_context;
struct macsec_ops;
-/* phy_device: An instance of a PHY
+/**
+ * struct phy_device - An instance of a PHY
+ *
+ * @mdio: MDIO bus this PHY is on
+ * @drv: Pointer to the driver for this PHY instance
+ * @phy_id: UID for this device found during discovery
+ * @c45_ids: 802.3-c45 Device Identifiers if is_c45.
+ * @is_c45: Set to true if this PHY uses clause 45 addressing.
+ * @is_internal: Set to true if this PHY is internal to a MAC.
+ * @is_pseudo_fixed_link: Set to true if this PHY is an Ethernet switch, etc.
+ * @is_gigabit_capable: Set to true if PHY supports 1000Mbps
+ * @has_fixups: Set to true if this PHY has fixups/quirks.
+ * @suspended: Set to true if this PHY has been suspended successfully.
+ * @suspended_by_mdio_bus: Set to true if this PHY was suspended by MDIO bus.
+ * @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
+ * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
+ * @downshifted_rate: Set true if link speed has been downshifted.
+ * @is_on_sfp_module: Set true if PHY is located on an SFP module.
+ * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
+ * @state: State of the PHY for management purposes
+ * @dev_flags: Device-specific flags used by the PHY driver.
*
- * drv: Pointer to the driver for this PHY instance
- * phy_id: UID for this device found during discovery
- * c45_ids: 802.3-c45 Device Identifers if is_c45.
- * is_c45: Set to true if this phy uses clause 45 addressing.
- * is_internal: Set to true if this phy is internal to a MAC.
- * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
- * is_gigabit_capable: Set to true if PHY supports 1000Mbps
- * has_fixups: Set to true if this phy has fixups/quirks.
- * suspended: Set to true if this phy has been suspended successfully.
- * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
- * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
- * loopback_enabled: Set true if this phy has been loopbacked successfully.
- * state: state of the PHY for management purposes
- * dev_flags: Device-specific flags used by the PHY driver.
- * irq: IRQ number of the PHY's interrupt (-1 if none)
- * phy_timer: The timer for handling the state machine
- * sfp_bus_attached: flag indicating whether the SFP bus has been attached
- * sfp_bus: SFP bus attached to this PHY's fiber port
- * attached_dev: The attached enet driver's device instance ptr
- * adjust_link: Callback for the enet controller to respond to
- * changes in the link state.
- * macsec_ops: MACsec offloading ops.
+ * - Bits [15:0] are free to use by the PHY driver to communicate
+ * driver specific behavior.
+ * - Bits [23:16] are currently reserved for future use.
+ * - Bits [31:24] are reserved for defining generic
+ * PHY driver behavior.
+ * @irq: IRQ number of the PHY's interrupt (-1 if none)
+ * @phy_timer: The timer for handling the state machine
+ * @phylink: Pointer to phylink instance for this PHY
+ * @sfp_bus_attached: Flag indicating whether the SFP bus has been attached
+ * @sfp_bus: SFP bus attached to this PHY's fiber port
+ * @attached_dev: The attached enet driver's device instance ptr
+ * @adjust_link: Callback for the enet controller to respond to changes: in the
+ * link state.
+ * @phy_link_change: Callback for phylink for notification of link change
+ * @macsec_ops: MACsec offloading ops.
*
- * speed, duplex, pause, supported, advertising, lp_advertising,
- * and autoneg are used like in mii_if_info
+ * @speed: Current link speed
+ * @duplex: Current duplex
+ * @port: Current port
+ * @pause: Current pause
+ * @asym_pause: Current asymmetric pause
+ * @supported: Combined MAC/PHY supported linkmodes
+ * @advertising: Currently advertised linkmodes
+ * @adv_old: Saved advertised while power saving for WoL
+ * @lp_advertising: Current link partner advertised linkmodes
+ * @host_interfaces: PHY interface modes supported by host
+ * @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
+ * @autoneg: Flag autoneg being used
+ * @rate_matching: Current rate matching mode
+ * @link: Current link state
+ * @autoneg_complete: Flag auto negotiation of the link has completed
+ * @mdix: Current crossover
+ * @mdix_ctrl: User setting of crossover
+ * @pma_extable: Cached value of PMA/PMD Extended Abilities Register
+ * @interrupts: Flag interrupts have been enabled
+ * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt
+ * handling shall be postponed until PHY has resumed
+ * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
+ * requiring a rerun of the interrupt handler after resume
+ * @interface: enum phy_interface_t value
+ * @skb: Netlink message for cable diagnostics
+ * @nest: Netlink nest used for cable diagnostics
+ * @ehdr: nNtlink header for cable diagnostics
+ * @phy_led_triggers: Array of LED triggers
+ * @phy_num_led_triggers: Number of triggers in @phy_led_triggers
+ * @led_link_trigger: LED trigger for link up/down
+ * @last_triggered: last LED trigger for link speed
+ * @master_slave_set: User requested master/slave configuration
+ * @master_slave_get: Current master/slave advertisement
+ * @master_slave_state: Current master/slave configuration
+ * @mii_ts: Pointer to time stamper callbacks
+ * @psec: Pointer to Power Sourcing Equipment control struct
+ * @lock: Mutex for serialization access to PHY
+ * @state_queue: Work queue for state machine
+ * @shared: Pointer to private data shared by phys in one package
+ * @priv: Pointer to driver private data
*
* interrupts currently only supports enabled or disabled,
* but could be changed in the future to support enabling
@@ -400,6 +629,9 @@ struct phy_device {
unsigned suspended_by_mdio_bus:1;
unsigned sysfs_links:1;
unsigned loopback_enabled:1;
+ unsigned downshifted_rate:1;
+ unsigned is_on_sfp_module:1;
+ unsigned mac_managed_pm:1;
unsigned autoneg:1;
/* The most recently read link state */
@@ -408,6 +640,10 @@ struct phy_device {
/* Interrupts are enabled */
unsigned interrupts:1;
+ unsigned irq_suspended:1;
+ unsigned irq_rerun:1;
+
+ int rate_matching;
enum phy_state state;
@@ -421,8 +657,12 @@ struct phy_device {
*/
int speed;
int duplex;
+ int port;
int pause;
int asym_pause;
+ u8 master_slave_get;
+ u8 master_slave_set;
+ u8 master_slave_state;
/* Union of PHY and Attached devices' supported link modes */
/* See ethtool.h for more info */
@@ -432,6 +672,9 @@ struct phy_device {
/* used with phy_speed_down */
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ /* Host supported PHY interface types. Should be ignored if empty. */
+ DECLARE_PHY_INTERFACE_MASK(host_interfaces);
+
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
@@ -453,6 +696,15 @@ struct phy_device {
/* For use by PHYs to maintain extra state */
void *priv;
+ /* shared data pointer */
+ /* For use by PHYs inside the same package that need a shared state. */
+ struct phy_package_shared *shared;
+
+ /* Reporting cable test results */
+ struct sk_buff *skb;
+ void *ehdr;
+ struct nlattr *nest;
+
/* Interrupt and Polling infrastructure */
struct delayed_work state_queue;
@@ -464,11 +716,14 @@ struct phy_device {
struct phylink *phylink;
struct net_device *attached_dev;
struct mii_timestamper *mii_ts;
+ struct pse_control *psec;
u8 mdix;
u8 mdix_ctrl;
- void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier);
+ int pma_extable;
+
+ void (*phy_link_change)(struct phy_device *phydev, bool up);
void (*adjust_link)(struct net_device *dev);
#if IS_ENABLED(CONFIG_MACSEC)
@@ -476,21 +731,47 @@ struct phy_device {
const struct macsec_ops *macsec_ops;
#endif
};
-#define to_phy_device(d) container_of(to_mdio_device(d), \
- struct phy_device, mdio)
-/* struct phy_driver: Driver structure for a particular PHY type
+static inline struct phy_device *to_phy_device(const struct device *dev)
+{
+ return container_of(to_mdio_device(dev), struct phy_device, mdio);
+}
+
+/**
+ * struct phy_tdr_config - Configuration of a TDR raw test
*
- * driver_data: static driver data
- * phy_id: The result of reading the UID registers of this PHY
+ * @first: Distance for first data collection point
+ * @last: Distance for last data collection point
+ * @step: Step between data collection points
+ * @pair: Bitmap of cable pairs to collect data for
+ *
+ * A structure containing possible configuration parameters
+ * for a TDR cable test. The driver does not need to implement
+ * all the parameters, but should report what is actually used.
+ * All distances are in centimeters.
+ */
+struct phy_tdr_config {
+ u32 first;
+ u32 last;
+ u32 step;
+ s8 pair;
+};
+#define PHY_PAIR_ALL -1
+
+/**
+ * struct phy_driver - Driver structure for a particular PHY type
+ *
+ * @mdiodrv: Data common to all MDIO devices
+ * @phy_id: The result of reading the UID registers of this PHY
* type, and ANDing them with the phy_id_mask. This driver
* only works for PHYs with IDs which match this field
- * name: The friendly name of this PHY type
- * phy_id_mask: Defines the important bits of the phy_id
- * features: A mandatory list of features (speed, duplex, etc)
+ * @name: The friendly name of this PHY type
+ * @phy_id_mask: Defines the important bits of the phy_id
+ * @features: A mandatory list of features (speed, duplex, etc)
* supported by this PHY
- * flags: A bitfield defining certain other features this PHY
+ * @flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
+ * @driver_data: Static driver data
*
* All functions are optional. If config_aneg or read_status
* are not implemented, the phy core uses the genphy versions.
@@ -509,138 +790,188 @@ struct phy_driver {
u32 flags;
const void *driver_data;
- /*
- * Called to issue a PHY software reset
+ /**
+ * @soft_reset: Called to issue a PHY software reset
*/
int (*soft_reset)(struct phy_device *phydev);
- /*
- * Called to initialize the PHY,
+ /**
+ * @config_init: Called to initialize the PHY,
* including after a reset
*/
int (*config_init)(struct phy_device *phydev);
- /*
- * Called during discovery. Used to set
+ /**
+ * @probe: Called during discovery. Used to set
* up device-specific structures, if any
*/
int (*probe)(struct phy_device *phydev);
- /*
- * Probe the hardware to determine what abilities it has.
- * Should only set phydev->supported.
+ /**
+ * @get_features: Probe the hardware to determine what
+ * abilities it has. Should only set phydev->supported.
*/
int (*get_features)(struct phy_device *phydev);
+ /**
+ * @get_rate_matching: Get the supported type of rate matching for a
+ * particular phy interface. This is used by phy consumers to determine
+ * whether to advertise lower-speed modes for that interface. It is
+ * assumed that if a rate matching mode is supported on an interface,
+ * then that interface's rate can be adapted to all slower link speeds
+ * supported by the phy. If iface is %PHY_INTERFACE_MODE_NA, and the phy
+ * supports any kind of rate matching for any interface, then it must
+ * return that rate matching mode (preferring %RATE_MATCH_PAUSE to
+ * %RATE_MATCH_CRS). If the interface is not supported, this should
+ * return %RATE_MATCH_NONE.
+ */
+ int (*get_rate_matching)(struct phy_device *phydev,
+ phy_interface_t iface);
+
/* PHY Power Management */
+ /** @suspend: Suspend the hardware, saving state if needed */
int (*suspend)(struct phy_device *phydev);
+ /** @resume: Resume the hardware, restoring state if needed */
int (*resume)(struct phy_device *phydev);
- /*
- * Configures the advertisement and resets
+ /**
+ * @config_aneg: Configures the advertisement and resets
* autonegotiation if phydev->autoneg is on,
* forces the speed to the current settings in phydev
* if phydev->autoneg is off
*/
int (*config_aneg)(struct phy_device *phydev);
- /* Determines the auto negotiation result */
+ /** @aneg_done: Determines the auto negotiation result */
int (*aneg_done)(struct phy_device *phydev);
- /* Determines the negotiated speed and duplex */
+ /** @read_status: Determines the negotiated speed and duplex */
int (*read_status)(struct phy_device *phydev);
- /* Clears any pending interrupts */
- int (*ack_interrupt)(struct phy_device *phydev);
-
- /* Enables or disables interrupts */
- int (*config_intr)(struct phy_device *phydev);
-
- /*
- * Checks if the PHY generated an interrupt.
- * For multi-PHY devices with shared PHY interrupt pin
- * Set interrupt bits have to be cleared.
+ /**
+ * @config_intr: Enables or disables interrupts.
+ * It should also clear any pending interrupts prior to enabling the
+ * IRQs and after disabling them.
*/
- int (*did_interrupt)(struct phy_device *phydev);
+ int (*config_intr)(struct phy_device *phydev);
- /* Override default interrupt handling */
- int (*handle_interrupt)(struct phy_device *phydev);
+ /** @handle_interrupt: Override default interrupt handling */
+ irqreturn_t (*handle_interrupt)(struct phy_device *phydev);
- /* Clears up any memory if needed */
+ /** @remove: Clears up any memory if needed */
void (*remove)(struct phy_device *phydev);
- /* Returns true if this is a suitable driver for the given
- * phydev. If NULL, matching is based on phy_id and
- * phy_id_mask.
+ /**
+ * @match_phy_device: Returns true if this is a suitable
+ * driver for the given phydev. If NULL, matching is based on
+ * phy_id and phy_id_mask.
*/
int (*match_phy_device)(struct phy_device *phydev);
- /* Some devices (e.g. qnap TS-119P II) require PHY register changes to
- * enable Wake on LAN, so set_wol is provided to be called in the
- * ethernet driver's set_wol function. */
+ /**
+ * @set_wol: Some devices (e.g. qnap TS-119P II) require PHY
+ * register changes to enable Wake on LAN, so set_wol is
+ * provided to be called in the ethernet driver's set_wol
+ * function.
+ */
int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /* See set_wol, but for checking whether Wake on LAN is enabled. */
+ /**
+ * @get_wol: See set_wol, but for checking whether Wake on LAN
+ * is enabled.
+ */
void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /*
- * Called to inform a PHY device driver when the core is about to
- * change the link state. This callback is supposed to be used as
- * fixup hook for drivers that need to take action when the link
- * state changes. Drivers are by no means allowed to mess with the
+ /**
+ * @link_change_notify: Called to inform a PHY device driver
+ * when the core is about to change the link state. This
+ * callback is supposed to be used as fixup hook for drivers
+ * that need to take action when the link state
+ * changes. Drivers are by no means allowed to mess with the
* PHY device structure in their implementations.
*/
void (*link_change_notify)(struct phy_device *dev);
- /*
- * Phy specific driver override for reading a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD read function will be used
- * by phy_read_mmd(), which will use either a direct read for
- * Clause 45 PHYs or an indirect read for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
+ /**
+ * @read_mmd: PHY specific driver override for reading a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD read function
+ * will be used by phy_read_mmd(), which will use either a
+ * direct read for Clause 45 PHYs or an indirect read for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device.
*/
int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum);
- /*
- * Phy specific driver override for writing a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD write function will be used
- * by phy_write_mmd(), which will use either a direct write for
- * Clause 45 PHYs, or an indirect write for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
- * val is the value to be written.
+ /**
+ * @write_mmd: PHY specific driver override for writing a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD write function
+ * will be used by phy_write_mmd(), which will use either a
+ * direct write for Clause 45 PHYs, or an indirect write for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device. val is the value to be written.
*/
int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
u16 val);
+ /** @read_page: Return the current PHY register page number */
int (*read_page)(struct phy_device *dev);
+ /** @write_page: Set the current PHY register page number */
int (*write_page)(struct phy_device *dev, int page);
- /* Get the size and type of the eeprom contained within a plug-in
- * module */
+ /**
+ * @module_info: Get the size and type of the eeprom contained
+ * within a plug-in module
+ */
int (*module_info)(struct phy_device *dev,
struct ethtool_modinfo *modinfo);
- /* Get the eeprom information from the plug-in module */
+ /**
+ * @module_eeprom: Get the eeprom information from the plug-in
+ * module
+ */
int (*module_eeprom)(struct phy_device *dev,
struct ethtool_eeprom *ee, u8 *data);
- /* Get statistics from the phy using ethtool */
+ /** @cable_test_start: Start a cable test */
+ int (*cable_test_start)(struct phy_device *dev);
+
+ /** @cable_test_tdr_start: Start a raw TDR cable test */
+ int (*cable_test_tdr_start)(struct phy_device *dev,
+ const struct phy_tdr_config *config);
+
+ /**
+ * @cable_test_get_status: Once per second, or on interrupt,
+ * request the status of the test.
+ */
+ int (*cable_test_get_status)(struct phy_device *dev, bool *finished);
+
+ /* Get statistics from the PHY using ethtool */
+ /** @get_sset_count: Number of statistic counters */
int (*get_sset_count)(struct phy_device *dev);
+ /** @get_strings: Names of the statistic counters */
void (*get_strings)(struct phy_device *dev, u8 *data);
+ /** @get_stats: Return the statistic counter values */
void (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
/* Get and Set PHY tunables */
+ /** @get_tunable: Return the value of a tunable */
int (*get_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna, void *data);
+ /** @set_tunable: Set the value of a tunable */
int (*set_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna,
const void *data);
+ /** @set_loopback: Set the loopback mood of the PHY */
int (*set_loopback)(struct phy_device *dev, bool enable);
+ /** @get_sqi: Get the signal quality indication */
+ int (*get_sqi)(struct phy_device *dev);
+ /** @get_sqi_max: Get the maximum signal quality indication */
+ int (*get_sqi_max)(struct phy_device *dev);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -663,6 +994,9 @@ struct phy_fixup {
const char *phy_speed_to_str(int speed);
const char *phy_duplex_to_str(unsigned int duplex);
+const char *phy_rate_matching_to_str(int rate_matching);
+
+int phy_interface_num_ports(phy_interface_t interface);
/* A structure for mapping a particular speed and duplex
* combination to a particular SUPPORTED and ADVERTISED value
@@ -693,6 +1027,7 @@ static inline bool phy_is_started(struct phy_device *phydev)
void phy_resolve_aneg_pause(struct phy_device *phydev);
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
+void phy_check_downshift(struct phy_device *phydev);
/**
* phy_read - Convenience function for reading a given PHY register
@@ -708,6 +1043,19 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
}
+#define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \
+ timeout_us, sleep_before_read) \
+({ \
+ int __ret = read_poll_timeout(phy_read, val, (cond) || val < 0, \
+ sleep_us, timeout_us, sleep_before_read, phydev, regnum); \
+ if (val < 0) \
+ __ret = val; \
+ if (__ret) \
+ phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
+ __ret; \
+})
+
+
/**
* __phy_read - convenience function for reading a given PHY register
* @phydev: the phy_device struct
@@ -750,48 +1098,76 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
}
/**
+ * __phy_modify_changed() - Convenience function for modifying a PHY register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Unlocked helper function which allows a PHY register to be modified as
+ * new register value = (old register value & ~mask) | set
+ *
+ * Returns negative errno, 0 if there was no change, and 1 in case of change
+ */
+static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return __mdiobus_modify_changed(phydev->mdio.bus, phydev->mdio.addr,
+ regnum, mask, set);
+}
+
+/*
* phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Same rules as for phy_read();
*/
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
/**
- * __phy_read_mmd - Convenience function for reading a register
- * from an MMD on a given PHY.
+ * phy_read_mmd_poll_timeout - Periodically poll a PHY register until a
+ * condition is met or a timeout occurs
+ *
* @phydev: The phy_device struct
- * @devad: The MMD to read from
+ * @devaddr: The MMD to read from
* @regnum: The register on the MMD to read
- *
- * Same rules as for __phy_read();
+ * @val: Variable to read the register into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
+#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \
+ sleep_us, timeout_us, sleep_before_read) \
+({ \
+ int __ret = read_poll_timeout(phy_read_mmd, val, (cond) || val < 0, \
+ sleep_us, timeout_us, sleep_before_read, \
+ phydev, devaddr, regnum); \
+ if (val < 0) \
+ __ret = val; \
+ if (__ret) \
+ phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
+ __ret; \
+})
+
+/*
+ * __phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
*/
int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
-/**
+/*
* phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to read
- * @val: value to write to @regnum
- *
- * Same rules as for phy_write();
*/
int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
-/**
+/*
* __phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to read
- * @val: value to write to @regnum
- *
- * Same rules as for __phy_write();
*/
int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
@@ -925,11 +1301,11 @@ static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad,
* @phydev: the phy_device struct
*
* NOTE: must be kept in sync with addition/removal of PHY_POLL and
- * PHY_IGNORE_INTERRUPT
+ * PHY_MAC_INTERRUPT
*/
static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
{
- return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT;
+ return phydev->irq != PHY_POLL && phydev->irq != PHY_MAC_INTERRUPT;
}
/**
@@ -939,6 +1315,10 @@ static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
*/
static inline bool phy_polling_mode(struct phy_device *phydev)
{
+ if (phydev->state == PHY_CABLETEST)
+ if (phydev->drv->flags & PHY_POLL_CABLE_TEST)
+ return true;
+
return phydev->irq == PHY_POLL;
}
@@ -1012,9 +1392,18 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_on_sfp - Convenience function for testing if a PHY is on an SFP module
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_on_sfp(struct phy_device *phydev)
+{
+ return phydev->is_on_sfp_module;
+}
+
+/**
* phy_interface_mode_is_rgmii - Convenience function for testing if a
* PHY interface mode is RGMII (all variants)
- * @mode: the phy_interface_t enum
+ * @mode: the &phy_interface_t enum
*/
static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
{
@@ -1023,11 +1412,11 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
};
/**
- * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z
+ * phy_interface_mode_is_8023z() - does the PHY interface mode use 802.3z
* negotiation
* @mode: one of &enum phy_interface_t
*
- * Returns true if the phy interface mode uses the 16-bit negotiation
+ * Returns true if the PHY interface mode uses the 16-bit negotiation
* word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
*/
static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
@@ -1046,7 +1435,7 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
return phy_interface_mode_is_rgmii(phydev->interface);
};
-/*
+/**
* phy_is_pseudo_fixed_link - Convenience function for testing if this
* PHY is the CPU port facing side of an Ethernet switch, or similar.
* @phydev: the phy_device struct
@@ -1070,10 +1459,42 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
#if IS_ENABLED(CONFIG_PHYLIB)
+int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id);
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode);
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
+struct phy_device *device_phy_find_device(struct device *dev);
+struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
void phy_device_free(struct phy_device *phydev);
#else
+static inline int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id)
+{
+ return 0;
+}
+static inline
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode)
+{
+ return 0;
+}
+
+static inline
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
+{
+ return NULL;
+}
+
+static inline struct phy_device *device_phy_find_device(struct device *dev)
+{
+ return NULL;
+}
+
+static inline
+struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
static inline
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
@@ -1088,6 +1509,7 @@ static inline int phy_device_register(struct phy_device *phy)
static inline void phy_device_free(struct phy_device *phydev) { }
#endif /* CONFIG_PHYLIB */
void phy_device_remove(struct phy_device *phydev);
+int phy_get_c45_ids(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
@@ -1112,6 +1534,7 @@ void phy_disconnect(struct phy_device *phydev);
void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
+int phy_config_aneg(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
int phy_speed_down(struct phy_device *phydev, bool sync);
@@ -1120,6 +1543,34 @@ int phy_speed_up(struct phy_device *phydev);
int phy_restart_aneg(struct phy_device *phydev);
int phy_reset_after_clk_enable(struct phy_device *phydev);
+#if IS_ENABLED(CONFIG_PHYLIB)
+int phy_start_cable_test(struct phy_device *phydev,
+ struct netlink_ext_ack *extack);
+int phy_start_cable_test_tdr(struct phy_device *phydev,
+ struct netlink_ext_ack *extack,
+ const struct phy_tdr_config *config);
+#else
+static inline
+int phy_start_cable_test(struct phy_device *phydev,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support");
+ return -EOPNOTSUPP;
+}
+static inline
+int phy_start_cable_test_tdr(struct phy_device *phydev,
+ struct netlink_ext_ack *extack,
+ const struct phy_tdr_config *config)
+{
+ NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support");
+ return -EOPNOTSUPP;
+}
+#endif
+
+int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result);
+int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair,
+ u16 cm);
+
static inline void phy_device_reset(struct phy_device *phydev, int value)
{
mdio_device_reset(&phydev->mdio, value);
@@ -1128,6 +1579,9 @@ static inline void phy_device_reset(struct phy_device *phydev, int value)
#define phydev_err(_phydev, format, args...) \
dev_err(&_phydev->mdio.dev, format, ##args)
+#define phydev_err_probe(_phydev, err, format, args...) \
+ dev_err_probe(&_phydev->mdio.dev, err, format, ##args)
+
#define phydev_info(_phydev, format, args...) \
dev_info(&_phydev->mdio.dev, format, ##args)
@@ -1170,24 +1624,18 @@ int genphy_update_link(struct phy_device *phydev);
int genphy_read_lpa(struct phy_device *phydev);
int genphy_read_status_fixed(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
+int genphy_read_master_slave(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
int genphy_loopback(struct phy_device *phydev, bool enable);
int genphy_soft_reset(struct phy_device *phydev);
+irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev);
static inline int genphy_config_aneg(struct phy_device *phydev)
{
return __genphy_config_aneg(phydev, false);
}
-static inline int genphy_no_soft_reset(struct phy_device *phydev)
-{
- return 0;
-}
-static inline int genphy_no_ack_interrupt(struct phy_device *phydev)
-{
- return 0;
-}
static inline int genphy_no_config_intr(struct phy_device *phydev)
{
return 0;
@@ -1209,12 +1657,22 @@ int genphy_c45_read_link(struct phy_device *phydev);
int genphy_c45_read_lpa(struct phy_device *phydev);
int genphy_c45_read_pma(struct phy_device *phydev);
int genphy_c45_pma_setup_forced(struct phy_device *phydev);
+int genphy_c45_pma_baset1_setup_master_slave(struct phy_device *phydev);
int genphy_c45_an_config_aneg(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
int genphy_c45_read_mdix(struct phy_device *phydev);
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);
int genphy_c45_read_status(struct phy_device *phydev);
+int genphy_c45_baset1_read_status(struct phy_device *phydev);
int genphy_c45_config_aneg(struct phy_device *phydev);
+int genphy_c45_loopback(struct phy_device *phydev, bool enable);
+int genphy_c45_pma_resume(struct phy_device *phydev);
+int genphy_c45_pma_suspend(struct phy_device *phydev);
+int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable);
+
+/* Generic C45 PHY driver */
+extern struct phy_driver genphy_c45_driver;
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev);
@@ -1235,8 +1693,10 @@ void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
+void phy_error(struct phy_device *phydev);
void phy_state_machine(struct work_struct *work);
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies);
+void phy_trigger_machine(struct phy_device *phydev);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
@@ -1247,10 +1707,13 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_disable_interrupts(struct phy_device *phydev);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
-int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface);
+void phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
void phy_advertise_supported(struct phy_device *phydev);
void phy_support_sym_pause(struct phy_device *phydev);
@@ -1260,6 +1723,13 @@ void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp);
+void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause);
+
+s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
+ const int *delay_values, int size, bool is_rx);
+
+void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv,
+ bool *tx_pause, bool *rx_pause);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
@@ -1284,56 +1754,82 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev,
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
+int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size);
+void phy_package_leave(struct phy_device *phydev);
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int addr, size_t priv_size);
#if IS_ENABLED(CONFIG_PHYLIB)
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
#endif
-/* Inline function for use within net/core/ethtool.c (built-in) */
-static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
+int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
+int phy_ethtool_get_sset_count(struct phy_device *phydev);
+int phy_ethtool_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data);
+
+static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
{
- if (!phydev->drv)
- return -EIO;
+ struct phy_package_shared *shared = phydev->shared;
- mutex_lock(&phydev->lock);
- phydev->drv->get_strings(phydev, data);
- mutex_unlock(&phydev->lock);
+ if (!shared)
+ return -EIO;
- return 0;
+ return mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
}
-static inline int phy_ethtool_get_sset_count(struct phy_device *phydev)
+static inline int __phy_package_read(struct phy_device *phydev, u32 regnum)
{
- int ret;
+ struct phy_package_shared *shared = phydev->shared;
- if (!phydev->drv)
+ if (!shared)
return -EIO;
- if (phydev->drv->get_sset_count &&
- phydev->drv->get_strings &&
- phydev->drv->get_stats) {
- mutex_lock(&phydev->lock);
- ret = phydev->drv->get_sset_count(phydev);
- mutex_unlock(&phydev->lock);
+ return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+}
- return ret;
- }
+static inline int phy_package_write(struct phy_device *phydev,
+ u32 regnum, u16 val)
+{
+ struct phy_package_shared *shared = phydev->shared;
- return -EOPNOTSUPP;
+ if (!shared)
+ return -EIO;
+
+ return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
}
-static inline int phy_ethtool_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
+static inline int __phy_package_write(struct phy_device *phydev,
+ u32 regnum, u16 val)
{
- if (!phydev->drv)
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
return -EIO;
- mutex_lock(&phydev->lock);
- phydev->drv->get_stats(phydev, stats, data);
- mutex_unlock(&phydev->lock);
+ return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+}
- return 0;
+static inline bool __phy_package_set_once(struct phy_device *phydev,
+ unsigned int b)
+{
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
+ return false;
+
+ return !test_and_set_bit(b, &shared->flags);
+}
+
+static inline bool phy_package_init_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE);
+}
+
+static inline bool phy_package_probe_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE);
}
extern struct bus_type mdio_bus_type;
@@ -1358,8 +1854,9 @@ static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
/**
- * module_phy_driver() - Helper macro for registering PHY drivers
+ * phy_module_driver() - Helper macro for registering PHY drivers
* @__phy_drivers: array of PHY drivers to register
+ * @__count: Numbers of members in array
*
* Helper macro for PHY drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h
index 5973a6313529..e23b52df93ec 100644
--- a/include/linux/phy/omap_usb.h
+++ b/include/linux/phy/omap_usb.h
@@ -2,68 +2,14 @@
/*
* omap_usb.h -- omap usb2 phy header file
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
#ifndef __DRIVERS_OMAP_USB2_H
#define __DRIVERS_OMAP_USB2_H
-#include <linux/io.h>
-#include <linux/usb/otg.h>
-
-struct usb_dpll_params {
- u16 m;
- u8 n;
- u8 freq:3;
- u8 sd;
- u32 mf;
-};
-
-enum omap_usb_phy_type {
- TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */
- TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */
- TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */
-};
-
-struct omap_usb {
- struct usb_phy phy;
- struct phy_companion *comparator;
- void __iomem *pll_ctrl_base;
- void __iomem *phy_base;
- struct device *dev;
- struct device *control_dev;
- struct clk *wkupclk;
- struct clk *optclk;
- u8 flags;
- enum omap_usb_phy_type type;
- struct regmap *syscon_phy_power; /* ctrl. reg. acces */
- unsigned int power_reg; /* power reg. index within syscon */
- u32 mask;
- u32 power_on;
- u32 power_off;
-};
-
-struct usb_phy_data {
- const char *label;
- u8 flags;
- u32 mask;
- u32 power_on;
- u32 power_off;
-};
-
-/* Driver Flags */
-#define OMAP_USB2_HAS_START_SRP (1 << 0)
-#define OMAP_USB2_HAS_SET_VBUS (1 << 1)
-#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2)
-
-#define OMAP_DEV_PHY_PD BIT(0)
-#define OMAP_USB2_PHY_PD BIT(28)
-
-#define AM437X_USB2_PHY_PD BIT(0)
-#define AM437X_USB2_OTG_PD BIT(1)
-#define AM437X_USB2_OTGVDET_EN BIT(19)
-#define AM437X_USB2_OTGSESSEND_EN BIT(20)
+#include <linux/usb/phy_companion.h>
#define phy_to_omapusb(x) container_of((x), struct omap_usb, phy)
@@ -76,15 +22,4 @@ static inline int omap_usb2_set_comparator(struct phy_companion *comparator)
}
#endif
-static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset)
-{
- return __raw_readl(addr + offset);
-}
-
-static inline void omap_usb_writel(void __iomem *addr, unsigned offset,
- u32 data)
-{
- __raw_writel(data, addr + offset);
-}
-
#endif /* __DRIVERS_OMAP_USB_H */
diff --git a/include/linux/phy/pcie.h b/include/linux/phy/pcie.h
new file mode 100644
index 000000000000..e7ac81764576
--- /dev/null
+++ b/include/linux/phy/pcie.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ */
+#ifndef __PHY_PCIE_H
+#define __PHY_PCIE_H
+
+#define PHY_MODE_PCIE_RC 20
+#define PHY_MODE_PCIE_EP 21
+#define PHY_MODE_PCIE_BIFURCATION 22
+
+#endif
diff --git a/include/linux/phy/phy-lvds.h b/include/linux/phy/phy-lvds.h
new file mode 100644
index 000000000000..09931d080a6d
--- /dev/null
+++ b/include/linux/phy/phy-lvds.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020,2022 NXP
+ */
+
+#ifndef __PHY_LVDS_H_
+#define __PHY_LVDS_H_
+
+/**
+ * struct phy_configure_opts_lvds - LVDS configuration set
+ * @bits_per_lane_and_dclk_cycle: Number of bits per lane per differential
+ * clock cycle.
+ * @differential_clk_rate: Clock rate, in Hertz, of the LVDS
+ * differential clock.
+ * @lanes: Number of active, consecutive,
+ * data lanes, starting from lane 0,
+ * used for the transmissions.
+ * @is_slave: Boolean, true if the phy is a slave
+ * which works together with a master
+ * phy to support dual link transmission,
+ * otherwise a regular phy or a master phy.
+ *
+ * This structure is used to represent the configuration state of a LVDS phy.
+ */
+struct phy_configure_opts_lvds {
+ unsigned int bits_per_lane_and_dclk_cycle;
+ unsigned long differential_clk_rate;
+ unsigned int lanes;
+ bool is_slave;
+};
+
+#endif /* __PHY_LVDS_H_ */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index bcee8eba62b3..b1413757fcc3 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/phy/phy-dp.h>
+#include <linux/phy/phy-lvds.h>
#include <linux/phy/phy-mipi-dphy.h>
struct phy;
@@ -44,6 +45,12 @@ enum phy_mode {
PHY_MODE_DP
};
+enum phy_media {
+ PHY_MEDIA_DEFAULT,
+ PHY_MEDIA_SR,
+ PHY_MEDIA_DAC,
+};
+
/**
* union phy_configure_opts - Opaque generic phy configuration
*
@@ -51,10 +58,13 @@ enum phy_mode {
* the MIPI_DPHY phy mode.
* @dp: Configuration set applicable for phys supporting
* the DisplayPort protocol.
+ * @lvds: Configuration set applicable for phys supporting
+ * the LVDS phy mode.
*/
union phy_configure_opts {
struct phy_configure_opts_mipi_dphy mipi_dphy;
struct phy_configure_opts_dp dp;
+ struct phy_configure_opts_lvds lvds;
};
/**
@@ -64,6 +74,8 @@ union phy_configure_opts {
* @power_on: powering on the phy
* @power_off: powering off the phy
* @set_mode: set the mode of the phy
+ * @set_media: set the media type of the phy (optional)
+ * @set_speed: set the speed of the phy (optional)
* @reset: resetting the phy
* @calibrate: calibrate the phy
* @release: ops to be performed while the consumer relinquishes the PHY
@@ -75,6 +87,8 @@ struct phy_ops {
int (*power_on)(struct phy *phy);
int (*power_off)(struct phy *phy);
int (*set_mode)(struct phy *phy, enum phy_mode mode, int submode);
+ int (*set_media)(struct phy *phy, enum phy_media media);
+ int (*set_speed)(struct phy *phy, int speed);
/**
* @configure:
@@ -115,10 +129,12 @@ struct phy_ops {
/**
* struct phy_attrs - represents phy attributes
* @bus_width: Data path width implemented by PHY
+ * @max_link_rate: Maximum link rate supported by PHY (units to be decided by producer and consumer)
* @mode: PHY mode
*/
struct phy_attrs {
u32 bus_width;
+ u32 max_link_rate;
enum phy_mode mode;
};
@@ -213,6 +229,8 @@ int phy_power_off(struct phy *phy);
int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode);
#define phy_set_mode(phy, mode) \
phy_set_mode_ext(phy, mode, 0)
+int phy_set_media(struct phy *phy, enum phy_media media);
+int phy_set_speed(struct phy *phy, int speed);
int phy_configure(struct phy *phy, union phy_configure_opts *opts);
int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts);
@@ -342,6 +360,20 @@ static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode,
#define phy_set_mode(phy, mode) \
phy_set_mode_ext(phy, mode, 0)
+static inline int phy_set_media(struct phy *phy, enum phy_media media)
+{
+ if (!phy)
+ return 0;
+ return -ENODEV;
+}
+
+static inline int phy_set_speed(struct phy *phy, int speed)
+{
+ if (!phy)
+ return 0;
+ return -ENODEV;
+}
+
static inline enum phy_mode phy_get_mode(struct phy *phy)
{
return PHY_MODE_INVALID;
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index 1235865e7e2c..70998e6dd6fd 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef PHY_TEGRA_XUSB_H
@@ -8,6 +8,7 @@
struct tegra_xusb_padctl;
struct device;
+enum usb_device_speed;
struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev);
void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl);
@@ -20,5 +21,16 @@ int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable);
int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
bool val);
+void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy);
+void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy);
int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
+int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ unsigned int port);
+int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
+ enum usb_device_speed speed);
+int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy);
+
#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 52bc8e487ef7..1acafd86ab13 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -2,6 +2,8 @@
#ifndef __PHY_FIXED_H
#define __PHY_FIXED_H
+#include <linux/types.h>
+
struct fixed_phy_status {
int link;
int speed;
@@ -12,6 +14,7 @@ struct fixed_phy_status {
struct device_node;
struct gpio_desc;
+struct net_device;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 523209e70947..3f01ac8017e0 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -12,16 +12,66 @@ struct net_device;
enum {
MLO_PAUSE_NONE,
- MLO_PAUSE_ASYM = BIT(0),
- MLO_PAUSE_SYM = BIT(1),
- MLO_PAUSE_RX = BIT(2),
- MLO_PAUSE_TX = BIT(3),
+ MLO_PAUSE_RX = BIT(0),
+ MLO_PAUSE_TX = BIT(1),
MLO_PAUSE_TXRX_MASK = MLO_PAUSE_TX | MLO_PAUSE_RX,
- MLO_PAUSE_AN = BIT(4),
+ MLO_PAUSE_AN = BIT(2),
MLO_AN_PHY = 0, /* Conventional PHY */
MLO_AN_FIXED, /* Fixed-link mode */
MLO_AN_INBAND, /* In-band protocol */
+
+ /* MAC_SYM_PAUSE and MAC_ASYM_PAUSE are used when configuring our
+ * autonegotiation advertisement. They correspond to the PAUSE and
+ * ASM_DIR bits defined by 802.3, respectively.
+ *
+ * The following table lists the values of tx_pause and rx_pause which
+ * might be requested in mac_link_up. The exact values depend on either
+ * the results of autonegotation (if MLO_PAUSE_AN is set) or user
+ * configuration (if MLO_PAUSE_AN is not set).
+ *
+ * MAC_SYM_PAUSE MAC_ASYM_PAUSE MLO_PAUSE_AN tx_pause/rx_pause
+ * ============= ============== ============ ==================
+ * 0 0 0 0/0
+ * 0 0 1 0/0
+ * 0 1 0 0/0, 0/1, 1/0, 1/1
+ * 0 1 1 0/0, 1/0
+ * 1 0 0 0/0, 1/1
+ * 1 0 1 0/0, 1/1
+ * 1 1 0 0/0, 0/1, 1/0, 1/1
+ * 1 1 1 0/0, 0/1, 1/1
+ *
+ * If you set MAC_ASYM_PAUSE, the user may request any combination of
+ * tx_pause and rx_pause. You do not have to support these
+ * combinations.
+ *
+ * However, you should support combinations of tx_pause and rx_pause
+ * which might be the result of autonegotation. For example, don't set
+ * MAC_SYM_PAUSE unless your device can support tx_pause and rx_pause
+ * at the same time.
+ */
+ MAC_SYM_PAUSE = BIT(0),
+ MAC_ASYM_PAUSE = BIT(1),
+ MAC_10HD = BIT(2),
+ MAC_10FD = BIT(3),
+ MAC_10 = MAC_10HD | MAC_10FD,
+ MAC_100HD = BIT(4),
+ MAC_100FD = BIT(5),
+ MAC_100 = MAC_100HD | MAC_100FD,
+ MAC_1000HD = BIT(6),
+ MAC_1000FD = BIT(7),
+ MAC_1000 = MAC_1000HD | MAC_1000FD,
+ MAC_2500FD = BIT(8),
+ MAC_5000FD = BIT(9),
+ MAC_10000FD = BIT(10),
+ MAC_20000FD = BIT(11),
+ MAC_25000FD = BIT(12),
+ MAC_40000FD = BIT(13),
+ MAC_50000FD = BIT(14),
+ MAC_56000FD = BIT(15),
+ MAC_100000FD = BIT(16),
+ MAC_200000FD = BIT(17),
+ MAC_400000FD = BIT(18),
};
static inline bool phylink_autoneg_inband(unsigned int mode)
@@ -38,6 +88,10 @@ static inline bool phylink_autoneg_inband(unsigned int mode)
* @speed: link speed, one of the SPEED_* constants.
* @duplex: link duplex mode, one of DUPLEX_* constants.
* @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @rate_matching: rate matching being performed, one of the RATE_MATCH_*
+ * constants. If rate matching is taking place, then the speed/duplex of
+ * the medium link mode (@speed and @duplex) and the speed/duplex of the phy
+ * interface mode (@interface) are different.
* @link: true if the link is up.
* @an_enabled: true if autonegotiation is enabled/desired.
* @an_complete: true if autonegotiation has completed.
@@ -49,6 +103,7 @@ struct phylink_link_state {
int speed;
int duplex;
int pause;
+ int rate_matching;
unsigned int link:1;
unsigned int an_enabled:1;
unsigned int an_complete:1;
@@ -63,19 +118,39 @@ enum phylink_op_type {
* struct phylink_config - PHYLINK configuration structure
* @dev: a pointer to a struct device associated with the MAC
* @type: operation type of PHYLINK instance
- * @pcs_poll: MAC PCS cannot provide link change interrupt
+ * @legacy_pre_march2020: driver has not been updated for March 2020 updates
+ * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls")
+ * @poll_fixed_state: if true, starts link_poll,
+ * if MAC link is at %MLO_AN_FIXED mode.
+ * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
+ * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
+ * @get_fixed_state: callback to execute to determine the fixed link state,
+ * if MAC link is at %MLO_AN_FIXED mode.
+ * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx
+ * are supported by the MAC/PCS.
+ * @mac_capabilities: MAC pause/speed/duplex capabilities.
*/
struct phylink_config {
struct device *dev;
enum phylink_op_type type;
- bool pcs_poll;
+ bool legacy_pre_march2020;
+ bool poll_fixed_state;
+ bool mac_managed_pm;
+ bool ovr_an_inband;
+ void (*get_fixed_state)(struct phylink_config *config,
+ struct phylink_link_state *state);
+ DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
+ unsigned long mac_capabilities;
};
/**
* struct phylink_mac_ops - MAC operations structure.
* @validate: Validate and update the link configuration.
+ * @mac_select_pcs: Select a PCS for the interface mode.
* @mac_pcs_get_state: Read the current link state from the hardware.
+ * @mac_prepare: prepare for a major reconfiguration of the interface.
* @mac_config: configure the MAC for the selected mode and state.
+ * @mac_finish: finish a major reconfiguration of the interface.
* @mac_an_restart: restart 802.3z BaseX autonegotiation.
* @mac_link_down: take the link down.
* @mac_link_up: allow the link to come up.
@@ -86,16 +161,23 @@ struct phylink_mac_ops {
void (*validate)(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state);
+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
+ phy_interface_t interface);
void (*mac_pcs_get_state)(struct phylink_config *config,
struct phylink_link_state *state);
+ int (*mac_prepare)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
void (*mac_config)(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
+ int (*mac_finish)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
void (*mac_an_restart)(struct phylink_config *config);
void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
- void (*mac_link_up)(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy);
+ void (*mac_link_up)(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause);
};
#if 0 /* For kernel-doc purposes only. */
@@ -114,19 +196,35 @@ struct phylink_mac_ops {
* clearing unsupported speeds and duplex settings. The port modes
* should not be cleared; phylink_set_port_modes() will help with this.
*
- * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
- * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
- * based on @state->advertising and/or @state->speed and update
- * @state->interface accordingly. See phylink_helper_basex_speed().
+ * When @config->supported_interfaces has been set, phylink will iterate
+ * over the supported interfaces to determine the full capability of the
+ * MAC. The validation function must not print errors if @state->interface
+ * is set to an unexpected value.
*
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the
- * MAC driver to return all supported link modes.
+ * When @config->supported_interfaces is empty, phylink will call this
+ * function with @state->interface set to %PHY_INTERFACE_MODE_NA, and
+ * expects the MAC driver to return all supported link modes.
*
* If the @state->interface mode is not supported, then the @supported
* mask must be cleared.
*/
void validate(struct phylink_config *config, unsigned long *supported,
struct phylink_link_state *state);
+/**
+ * mac_select_pcs: Select a PCS for the interface mode.
+ * @config: a pointer to a &struct phylink_config.
+ * @interface: PHY interface mode for PCS
+ *
+ * Return the &struct phylink_pcs for the specified interface mode, or
+ * NULL if none is required, or an error pointer on error.
+ *
+ * This must not modify any state. It is used to query which PCS should
+ * be used. Phylink will use this during validation to ensure that the
+ * configuration is valid, and when setting a configuration to internally
+ * set the PCS that will be used.
+ */
+struct phylink_pcs *mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface);
/**
* mac_pcs_get_state() - Read the current inband link state from the hardware
@@ -139,11 +237,40 @@ void validate(struct phylink_config *config, unsigned long *supported,
* negotiation completion state in @state->an_complete, and link up state
* in @state->link. If possible, @state->lp_advertising should also be
* populated.
+ *
+ * Note: This is a legacy method. This function will not be called unless
+ * legacy_pre_march2020 is set in &struct phylink_config and there is no
+ * PCS attached.
*/
void mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state);
/**
+ * mac_prepare() - prepare to change the PHY interface mode
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @iface: interface mode to switch to
+ *
+ * phylink will call this method at the beginning of a full initialisation
+ * of the link, which includes changing the interface mode or at initial
+ * startup time. It may be called for the current mode. The MAC driver
+ * should perform whatever actions are required, e.g. disabling the
+ * Serdes PHY.
+ *
+ * This will be the first call in the sequence:
+ * - mac_prepare()
+ * - mac_config()
+ * - pcs_config()
+ * - possible pcs_an_restart()
+ * - mac_finish()
+ *
+ * Returns zero on success, or negative errno on failure which will be
+ * reported to the kernel log.
+ */
+int mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
+
+/**
* mac_config() - configure the MAC for the selected mode and state
* @config: a pointer to a &struct phylink_config.
* @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
@@ -154,15 +281,42 @@ void mac_pcs_get_state(struct phylink_config *config,
* guaranteed to be correct, and so any mac_config() implementation must
* never reference these fields.
*
+ * Note: For legacy March 2020 drivers (drivers with legacy_pre_march2020 set
+ * in their &phylnk_config and which don't have a PCS), this function will be
+ * called on each link up event, and to also change the in-band advert. For
+ * non-legacy drivers, it will only be called to reconfigure the MAC for a
+ * "major" change in e.g. interface mode. It will not be called for changes
+ * in speed, duplex or pause modes or to change the in-band advertisement.
+ * In any case, it is strongly preferred that speed, duplex and pause settings
+ * are handled in the mac_link_up() method and not in this method.
+ *
+ * (this requires a rewrite - please refer to mac_link_up() for situations
+ * where the PCS and MAC are not tightly integrated.)
+ *
+ * In all negotiation modes, as defined by @mode, @state->pause indicates the
+ * pause settings which should be applied as follows. If %MLO_PAUSE_AN is not
+ * set, %MLO_PAUSE_TX and %MLO_PAUSE_RX indicate whether the MAC should send
+ * pause frames and/or act on received pause frames respectively. Otherwise,
+ * the results of in-band negotiation/status from the MAC PCS should be used
+ * to control the MAC pause mode settings.
+ *
* The action performed depends on the currently selected mode:
*
* %MLO_AN_FIXED, %MLO_AN_PHY:
- * Configure the specified @state->speed, @state->duplex and
- * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) modes over a link
- * specified by @state->interface. @state->advertising may be used,
- * but is not required. Other members of @state must be ignored.
+ * Configure for non-inband negotiation mode, where the link settings
+ * are completely communicated via mac_link_up(). The physical link
+ * protocol from the MAC is specified by @state->interface.
*
- * Valid state members: interface, speed, duplex, pause, advertising.
+ * @state->advertising may be used, but is not required.
+ *
+ * Older drivers (prior to the mac_link_up() change) may use @state->speed,
+ * @state->duplex and @state->pause to configure the MAC, but this is
+ * deprecated; such drivers should be converted to use mac_link_up().
+ *
+ * Other members of @state must be ignored.
+ *
+ * Valid state members: interface, advertising.
+ * Deprecated state members: speed, duplex, pause.
*
* %MLO_AN_INBAND:
* place the link in an inband negotiation mode (such as 802.3z
@@ -172,11 +326,14 @@ void mac_pcs_get_state(struct phylink_config *config,
* mac_pcs_get_state() callback. Changes in link state must be made
* by calling phylink_mac_change().
*
+ * Interface mode specific details are mentioned below.
+ *
* If in 802.3z mode, the link speed is fixed, dependent on the
- * @state->interface. Duplex is negotiated, and pause is advertised
- * according to @state->an_enabled, @state->pause and
- * @state->advertising flags. Beware of MACs which only support full
- * duplex at gigabit and higher speeds.
+ * @state->interface. Duplex and pause modes are negotiated via
+ * the in-band configuration word. Advertised pause modes are set
+ * according to the @state->an_enabled and @state->advertising
+ * flags. Beware of MACs which only support full duplex at gigabit
+ * and higher speeds.
*
* If in Cisco SGMII mode, the link speed and duplex mode are passed
* in the serial bitstream 16-bit configuration word, and the MAC
@@ -198,8 +355,29 @@ void mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
/**
+ * mac_finish() - finish a to change the PHY interface mode
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @iface: interface mode to switch to
+ *
+ * phylink will call this if it called mac_prepare() to allow the MAC to
+ * complete any necessary steps after the MAC and PCS have been configured
+ * for the @mode and @iface. E.g. a MAC driver may wish to re-enable the
+ * Serdes PHY here if it was previously disabled by mac_prepare().
+ *
+ * Returns zero on success, or negative errno on failure which will be
+ * reported to the kernel log.
+ */
+int mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
+
+/**
* mac_an_restart() - restart 802.3z BaseX autonegotiation
* @config: a pointer to a &struct phylink_config.
+ *
+ * Note: This is a legacy method. This function will not be called unless
+ * legacy_pre_march2020 is set in &struct phylink_config and there is no
+ * PCS attached.
*/
void mac_an_restart(struct phylink_config *config);
@@ -220,38 +398,190 @@ void mac_link_down(struct phylink_config *config, unsigned int mode,
/**
* mac_link_up() - allow the link to come up
* @config: a pointer to a &struct phylink_config.
+ * @phy: any attached phy
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
- * @phy: any attached phy
+ * @speed: link speed
+ * @duplex: link duplex
+ * @tx_pause: link transmit pause enablement status
+ * @rx_pause: link receive pause enablement status
*
- * If @mode is not an in-band negotiation mode (as defined by
- * phylink_autoneg_inband()), allow the link to come up. If @phy
- * is non-%NULL, configure Energy Efficient Ethernet by calling
+ * Configure the MAC for an established link.
+ *
+ * @speed, @duplex, @tx_pause and @rx_pause indicate the finalised link
+ * settings, and should be used to configure the MAC block appropriately
+ * where these settings are not automatically conveyed from the PCS block,
+ * or if in-band negotiation (as defined by phylink_autoneg_inband(@mode))
+ * is disabled.
+ *
+ * Note that when 802.3z in-band negotiation is in use, it is possible
+ * that the user wishes to override the pause settings, and this should
+ * be allowed when considering the implementation of this method.
+ *
+ * If in-band negotiation mode is disabled, allow the link to come up. If
+ * @phy is non-%NULL, configure Energy Efficient Ethernet by calling
* phy_init_eee() and perform appropriate MAC configuration for EEE.
* Interface type selection must be done in mac_config().
*/
-void mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy);
+void mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause);
#endif
+struct phylink_pcs_ops;
+
+/**
+ * struct phylink_pcs - PHYLINK PCS instance
+ * @ops: a pointer to the &struct phylink_pcs_ops structure
+ * @poll: poll the PCS for link changes
+ *
+ * This structure is designed to be embedded within the PCS private data,
+ * and will be passed between phylink and the PCS.
+ */
+struct phylink_pcs {
+ const struct phylink_pcs_ops *ops;
+ bool poll;
+};
+
+/**
+ * struct phylink_pcs_ops - MAC PCS operations structure.
+ * @pcs_validate: validate the link configuration.
+ * @pcs_get_state: read the current MAC PCS link state from the hardware.
+ * @pcs_config: configure the MAC PCS for the selected mode and state.
+ * @pcs_an_restart: restart 802.3z BaseX autonegotiation.
+ * @pcs_link_up: program the PCS for the resolved link configuration
+ * (where necessary).
+ */
+struct phylink_pcs_ops {
+ int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state);
+ void (*pcs_get_state)(struct phylink_pcs *pcs,
+ struct phylink_link_state *state);
+ int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac);
+ void (*pcs_an_restart)(struct phylink_pcs *pcs);
+ void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
+};
+
+#if 0 /* For kernel-doc purposes only. */
+/**
+ * pcs_validate() - validate the link configuration.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a const pointer to a &struct phylink_link_state.
+ *
+ * Validate the interface mode, and advertising's autoneg bit, removing any
+ * media ethtool link modes that would not be supportable from the supported
+ * mask. Phylink will propagate the changes to the advertising mask. See the
+ * &struct phylink_mac_ops validate() method.
+ *
+ * Returns -EINVAL if the interface mode/autoneg mode is not supported.
+ * Returns non-zero positive if the link state can be supported.
+ */
+int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state);
+
+/**
+ * pcs_get_state() - Read the current inband link state from the hardware
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Read the current inband link state from the MAC PCS, reporting the
+ * current speed in @state->speed, duplex mode in @state->duplex, pause
+ * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link up state
+ * in @state->link. If possible, @state->lp_advertising should also be
+ * populated.
+ *
+ * When present, this overrides mac_pcs_get_state() in &struct
+ * phylink_mac_ops.
+ */
+void pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state);
+
+/**
+ * pcs_config() - Configure the PCS mode and advertisement
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @interface: interface mode to be used
+ * @advertising: adertisement ethtool link mode mask
+ * @permit_pause_to_mac: permit forwarding pause resolution to MAC
+ *
+ * Configure the PCS for the operating mode, the interface mode, and set
+ * the advertisement mask. @permit_pause_to_mac indicates whether the
+ * hardware may forward the pause mode resolution to the MAC.
+ *
+ * When operating in %MLO_AN_INBAND, inband should always be enabled,
+ * otherwise inband should be disabled.
+ *
+ * For SGMII, there is no advertisement from the MAC side, the PCS should
+ * be programmed to acknowledge the inband word from the PHY.
+ *
+ * For 1000BASE-X, the advertisement should be programmed into the PCS.
+ *
+ * For most 10GBASE-R, there is no advertisement.
+ */
+int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, const unsigned long *advertising,
+ bool permit_pause_to_mac);
+
+/**
+ * pcs_an_restart() - restart 802.3z BaseX autonegotiation
+ * @pcs: a pointer to a &struct phylink_pcs.
+ *
+ * When PCS ops are present, this overrides mac_an_restart() in &struct
+ * phylink_mac_ops.
+ */
+void pcs_an_restart(struct phylink_pcs *pcs);
+
+/**
+ * pcs_link_up() - program the PCS for the resolved link configuration
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
+ * @speed: link speed
+ * @duplex: link duplex
+ *
+ * This call will be made just before mac_link_up() to inform the PCS of
+ * the resolved link parameters. For example, a PCS operating in SGMII
+ * mode without in-band AN needs to be manually configured for the link
+ * and duplex setting. Otherwise, this should be a no-op.
+ */
+void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
+#endif
+
+void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps);
+unsigned long phylink_get_capabilities(phy_interface_t interface,
+ unsigned long mac_capabilities,
+ int rate_matching);
+void phylink_generic_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+
struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
phy_interface_t iface,
- const struct phylink_mac_ops *ops);
+ const struct phylink_mac_ops *mac_ops);
void phylink_destroy(struct phylink *);
int phylink_connect_phy(struct phylink *, struct phy_device *);
int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
+int phylink_fwnode_phy_connect(struct phylink *pl,
+ struct fwnode_handle *fwnode,
+ u32 flags);
void phylink_disconnect_phy(struct phylink *);
-int phylink_fixed_state_cb(struct phylink *,
- void (*cb)(struct net_device *dev,
- struct phylink_link_state *));
void phylink_mac_change(struct phylink *, bool up);
void phylink_start(struct phylink *);
void phylink_stop(struct phylink *);
+void phylink_suspend(struct phylink *pl, bool mac_wol);
+void phylink_resume(struct phylink *pl);
+
void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *);
int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *);
@@ -269,6 +599,8 @@ int phylink_init_eee(struct phylink *, bool);
int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
+int phylink_speed_down(struct phylink *pl, bool sync);
+int phylink_speed_up(struct phylink *pl);
#define phylink_zero(bm) \
bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS)
@@ -280,6 +612,21 @@ int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
void phylink_set_port_modes(unsigned long *bits);
-void phylink_helper_basex_speed(struct phylink_link_state *state);
+void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
+ u16 bmsr, u16 lpa);
+void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state);
+int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
+ const unsigned long *advertising);
+int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising);
+void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
+
+void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state);
+
+void phylink_decode_usxgmii_word(struct phylink_link_state *state,
+ uint16_t lpa);
#endif
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 998ae7d24450..343abf22092e 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -60,8 +60,10 @@ struct pid
{
refcount_t count;
unsigned int level;
+ spinlock_t lock;
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
+ struct hlist_head inodes;
/* wait queue for pidfd notifications */
wait_queue_head_t wait_pidfd;
struct rcu_head rcu;
@@ -75,6 +77,9 @@ extern const struct file_operations pidfd_fops;
struct file;
extern struct pid *pidfd_pid(const struct file *file);
+struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
+struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
+int pidfd_create(struct pid *pid, unsigned int flags);
static inline struct pid *get_pid(struct pid *pid)
{
@@ -100,12 +105,16 @@ extern void attach_pid(struct task_struct *task, enum pid_type);
extern void detach_pid(struct task_struct *task, enum pid_type);
extern void change_pid(struct task_struct *task, enum pid_type,
struct pid *pid);
+extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
struct pid_namespace;
extern struct pid_namespace init_pid_ns;
+extern int pid_max;
+extern int pid_max_min, pid_max_max;
+
/*
* look up a PID in the hash table. Must be called with the tasklist_lock
* or rcu_read_lock() held.
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 2ed6af88794b..07481bb87d4e 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -8,7 +8,6 @@
#include <linux/workqueue.h>
#include <linux/threads.h>
#include <linux/nsproxy.h>
-#include <linux/kref.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
@@ -17,14 +16,7 @@
struct fs_pin;
-enum { /* definitions for pid_namespace's hide_pid field */
- HIDEPID_OFF = 0,
- HIDEPID_NO_ACCESS = 1,
- HIDEPID_INVISIBLE = 2,
-};
-
struct pid_namespace {
- struct kref kref;
struct idr idr;
struct rcu_head rcu;
unsigned int pid_allocated;
@@ -32,19 +24,11 @@ struct pid_namespace {
struct kmem_cache *pid_cachep;
unsigned int level;
struct pid_namespace *parent;
-#ifdef CONFIG_PROC_FS
- struct vfsmount *proc_mnt;
- struct dentry *proc_self;
- struct dentry *proc_thread_self;
-#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
struct fs_pin *bacct;
#endif
struct user_namespace *user_ns;
struct ucounts *ucounts;
- struct work_struct proc_work;
- kgid_t pid_gid;
- int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
} __randomize_layout;
@@ -57,7 +41,7 @@ extern struct pid_namespace init_pid_ns;
static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
{
if (ns != &init_pid_ns)
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
return ns;
}
@@ -102,4 +86,9 @@ extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
void pid_idr_init(void);
+static inline bool task_is_in_init_pid_ns(struct task_struct *tsk)
+{
+ return task_active_pid_ns(tsk) == &init_pid_ns;
+}
+
#endif /* _LINUX_PID_NS_H */
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 6aeb711f7cd1..2422211d6a5a 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -76,32 +76,35 @@ struct pinctrl_map;
* @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
* If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
* schmitt-trigger mode is disabled.
- * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power
+ * @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
* to indicate low power mode, argument 0 turns low power mode off.
+ * @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM
+ * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
+ * value on the line. Use argument 1 to indicate high level, argument 0 to
+ * indicate low level. (Please see Documentation/driver-api/pin-control.rst,
+ * section "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode
* without driving a value there. For most platforms this reduces to
* enable the output buffers and then let the pin controller current
* configuration (eg. the currently selected mux function) drive values on
* the line. Use argument 1 to enable output mode, argument 0 to disable
* it.
- * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
- * value on the line. Use argument 1 to indicate high level, argument 0 to
- * indicate low level. (Please see Documentation/driver-api/pinctl.rst,
- * section "GPIO mode pitfalls" for a discussion around this parameter.)
+ * @PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: this will configure the output impedance
+ * of the pin with the value passed as argument. The argument is in ohms.
+ * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
* supplies, the argument to this parameter (on a custom format) tells
* the driver which alternative power source to use.
- * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
- * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
- * this parameter (on a custom format) tells the driver which alternative
- * slew rate to use.
* @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs)
* or latch delay (on outputs) this parameter (in a custom format)
* specifies the clock skew or latch delay. It typically controls how
* many double inverters are put in front of the line.
- * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
+ * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
+ * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
+ * this parameter (on a custom format) tells the driver which alternative
+ * slew rate to use.
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
@@ -124,14 +127,16 @@ enum pin_config_param {
PIN_CONFIG_INPUT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
- PIN_CONFIG_LOW_POWER_MODE,
- PIN_CONFIG_OUTPUT_ENABLE,
+ PIN_CONFIG_MODE_LOW_POWER,
+ PIN_CONFIG_MODE_PWM,
PIN_CONFIG_OUTPUT,
+ PIN_CONFIG_OUTPUT_ENABLE,
+ PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS,
+ PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_POWER_SOURCE,
+ PIN_CONFIG_SKEW_DELAY,
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
- PIN_CONFIG_SKEW_DELAY,
- PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
};
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 7ce23450a1cb..487117ccb1bc 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -27,6 +27,26 @@ struct gpio_chip;
struct device_node;
/**
+ * struct pingroup - provides information on pingroup
+ * @name: a name for pingroup
+ * @pins: an array of pins in the pingroup
+ * @npins: number of pins in the pingroup
+ */
+struct pingroup {
+ const char *name;
+ const unsigned int *pins;
+ size_t npins;
+};
+
+/* Convenience macro to define a single named or anonymous pingroup */
+#define PINCTRL_PINGROUP(_name, _pins, _npins) \
+(struct pingroup){ \
+ .name = _name, \
+ .pins = _pins, \
+ .npins = _npins, \
+}
+
+/**
* struct pinctrl_pin_desc - boards/machines provide information on their
* pins, pads or other muxable units in this struct
* @number: unique pin number from the global pin number space
@@ -51,8 +71,8 @@ struct pinctrl_pin_desc {
* @id: an ID number for the chip in this range
* @base: base offset of the GPIO range
* @pin_base: base pin number of the GPIO range if pins == NULL
- * @pins: enumeration of pins in GPIO range or NULL
* @npins: number of pins in the GPIO range, including the base number
+ * @pins: enumeration of pins in GPIO range or NULL
* @gc: an optional pointer to a gpio_chip
*/
struct pinctrl_gpio_range {
@@ -61,8 +81,8 @@ struct pinctrl_gpio_range {
unsigned int id;
unsigned int base;
unsigned int pin_base;
- unsigned const *pins;
unsigned int npins;
+ unsigned const *pins;
struct gpio_chip *gc;
};
@@ -186,7 +206,7 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
const char *pin_group, const unsigned **pins,
unsigned *num_pins);
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL)
extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
#else
static inline
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index ae58fad7f1e0..6cb65df3e3ba 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -8,6 +8,11 @@
#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
+#define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */
+#define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */
+#ifdef CONFIG_WATCH_QUEUE
+#define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */
+#endif
/**
* struct pipe_buffer - a linux kernel pipe buffer
@@ -33,18 +38,22 @@ struct pipe_buffer {
* @wr_wait: writer wait point in case of full pipe
* @head: The point of buffer production
* @tail: The point of buffer consumption
+ * @note_loss: The next read() should insert a data-lost message
* @max_usage: The maximum number of slots that may be used in the ring
* @ring_size: total number of buffers (should be a power of 2)
+ * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs
* @tmp_page: cached released page
* @readers: number of current readers of this pipe
* @writers: number of current writers of this pipe
* @files: number of struct file referring this pipe (protected by ->i_lock)
* @r_counter: reader counter
* @w_counter: writer counter
+ * @poll_usage: is this pipe used for epoll, which has crazy wakeups?
* @fasync_readers: reader side fasync
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
* @user: the user who created this pipe
+ * @watch_queue: If this pipe is a watch_queue, this is the stuff for that
**/
struct pipe_inode_info {
struct mutex mutex;
@@ -53,27 +62,35 @@ struct pipe_inode_info {
unsigned int tail;
unsigned int max_usage;
unsigned int ring_size;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
+ unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
unsigned int files;
unsigned int r_counter;
unsigned int w_counter;
+ bool poll_usage;
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
struct user_struct *user;
+#ifdef CONFIG_WATCH_QUEUE
+ struct watch_queue *watch_queue;
+#endif
};
/*
* Note on the nesting of these functions:
*
* ->confirm()
- * ->steal()
+ * ->try_steal()
*
- * That is, ->steal() must be called on a confirmed buffer.
- * See below for the meaning of each operation. Also see kerneldoc
- * in fs/pipe.c for the pipe and generic variants of these hooks.
+ * That is, ->try_steal() must be called on a confirmed buffer. See below for
+ * the meaning of each operation. Also see the kerneldoc in fs/pipe.c for the
+ * pipe and generic variants of these hooks.
*/
struct pipe_buf_operations {
/*
@@ -81,7 +98,7 @@ struct pipe_buf_operations {
* and that the contents are good. If the pages in the pipe belong
* to a file system, we may need to wait for IO completion in this
* hook. Returns 0 for good, or a negative error value in case of
- * error.
+ * error. If not present all pages are considered good.
*/
int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
@@ -93,13 +110,13 @@ struct pipe_buf_operations {
/*
* Attempt to take ownership of the pipe buffer and its contents.
- * ->steal() returns 0 for success, in which case the contents
- * of the pipe (the buf->page) is locked and now completely owned
- * by the caller. The page may then be transferred to a different
- * mapping, the most often used case is insertion into different
- * file address space cache.
+ * ->try_steal() returns %true for success, in which case the contents
+ * of the pipe (the buf->page) is locked and now completely owned by the
+ * caller. The page may then be transferred to a different mapping, the
+ * most often used case is insertion into different file address space
+ * cache.
*/
- int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
+ bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *);
/*
* Get a reference to the pipe buffer.
@@ -140,26 +157,6 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,
}
/**
- * pipe_space_for_user - Return number of slots available to userspace
- * @head: The pipe ring head pointer
- * @tail: The pipe ring tail pointer
- * @pipe: The pipe info structure
- */
-static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
- struct pipe_inode_info *pipe)
-{
- unsigned int p_occupancy, p_space;
-
- p_occupancy = pipe_occupancy(head, tail);
- if (p_occupancy >= pipe->max_usage)
- return 0;
- p_space = pipe->ring_size - p_occupancy;
- if (p_space > pipe->max_usage)
- p_space = pipe->max_usage;
- return p_space;
-}
-
-/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
@@ -194,18 +191,31 @@ static inline void pipe_buf_release(struct pipe_inode_info *pipe,
static inline int pipe_buf_confirm(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
+ if (!buf->ops->confirm)
+ return 0;
return buf->ops->confirm(pipe, buf);
}
/**
- * pipe_buf_steal - attempt to take ownership of a pipe_buffer
+ * pipe_buf_try_steal - attempt to take ownership of a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to attempt to steal
*/
-static inline int pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ if (!buf->ops->try_steal)
+ return false;
+ return buf->ops->try_steal(pipe, buf);
+}
+
+static inline void pipe_discard_from(struct pipe_inode_info *pipe,
+ unsigned int old_head)
{
- return buf->ops->steal(pipe, buf);
+ unsigned int mask = pipe->ring_size - 1;
+
+ while (pipe->head > old_head)
+ pipe_buf_release(pipe, &pipe->bufs[--pipe->head & mask]);
}
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
@@ -217,29 +227,34 @@ void pipe_lock(struct pipe_inode_info *);
void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
-extern unsigned int pipe_max_size;
-extern unsigned long pipe_user_pages_hard;
-extern unsigned long pipe_user_pages_soft;
-
-/* Drop the inode semaphore and wait for a pipe event, atomically */
-void pipe_wait(struct pipe_inode_info *pipe);
+/* Wait for a pipe to be readable/writable while dropping the pipe lock */
+void pipe_wait_readable(struct pipe_inode_info *);
+void pipe_wait_writable(struct pipe_inode_info *);
struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);
/* Generic pipe buffer ops functions */
bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
-int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
-int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
-int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
-void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
+#ifdef CONFIG_WATCH_QUEUE
+unsigned long account_pipe_buffers(struct user_struct *user,
+ unsigned long old, unsigned long new);
+bool too_many_pipe_buffers_soft(unsigned long user_bufs);
+bool too_many_pipe_buffers_hard(unsigned long user_bufs);
+bool pipe_is_unprivileged_user(void);
+#endif
+
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+#ifdef CONFIG_WATCH_QUEUE
+int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
+#endif
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
-struct pipe_inode_info *get_pipe_info(struct file *file);
+struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
int create_pipe_files(struct file **, int);
unsigned int round_pipe_size(unsigned long size);
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index 2955ba976048..86be8bf27b41 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -4,6 +4,8 @@
#include <linux/mm.h>
+#define ARCH_DEFAULT_PKEY 0
+
#ifdef CONFIG_ARCH_HAS_PKEYS
#include <asm/pkeys.h>
#else /* ! CONFIG_ARCH_HAS_PKEYS */
@@ -44,10 +46,6 @@ static inline bool arch_pkeys_enabled(void)
return false;
}
-static inline void copy_init_pkru_to_fpregs(void)
-{
-}
-
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
#endif /* _LINUX_PKEYS_H */
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 174601554b06..f9c5ac80d59b 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -152,14 +152,6 @@ struct packet_stacked_data
};
#define PSD_POOL_SIZE 64
-struct pktcdvd_kobj
-{
- struct kobject kobj;
- struct pktcdvd_device *pd;
-};
-#define to_pktcdvdkobj(_k) \
- ((struct pktcdvd_kobj*)container_of(_k,struct pktcdvd_kobj,kobj))
-
struct pktcdvd_device
{
struct block_device *bdev; /* dev attached */
@@ -183,6 +175,8 @@ struct pktcdvd_device
spinlock_t lock; /* Serialize access to bio_queue */
struct rb_root bio_queue; /* Work queue of bios we need to handle */
int bio_queue_size; /* Number of nodes in bio_queue */
+ bool congested; /* Someone is waiting for bio_queue_size
+ * to drop. */
sector_t current_sector; /* Keep track of where the elevator is */
atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
/* needs to be run. */
@@ -195,8 +189,6 @@ struct pktcdvd_device
int write_congestion_on;
struct device *dev; /* sysfs pktcdvd[0-7] dev */
- struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */
- struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */
struct dentry *dfs_d_root; /* debugfs: devname directory */
struct dentry *dfs_f_info; /* debugfs: info file */
diff --git a/include/linux/pl353-smc.h b/include/linux/pl353-smc.h
deleted file mode 100644
index 0e0d3df9bf72..000000000000
--- a/include/linux/pl353-smc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ARM PL353 SMC Driver Header
- *
- * Copyright (C) 2012 - 2018 Xilinx, Inc
- */
-
-#ifndef __LINUX_PL353_SMC_H
-#define __LINUX_PL353_SMC_H
-
-enum pl353_smc_ecc_mode {
- PL353_SMC_ECCMODE_BYPASS = 0,
- PL353_SMC_ECCMODE_APB = 1,
- PL353_SMC_ECCMODE_MEM = 2
-};
-
-enum pl353_smc_mem_width {
- PL353_SMC_MEM_WIDTH_8 = 0,
- PL353_SMC_MEM_WIDTH_16 = 1
-};
-
-u32 pl353_smc_get_ecc_val(int ecc_reg);
-bool pl353_smc_ecc_is_busy(void);
-int pl353_smc_get_nand_int_status_raw(void);
-void pl353_smc_clr_nand_int(void);
-int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode);
-int pl353_smc_set_ecc_pg_size(unsigned int pg_sz);
-int pl353_smc_set_buswidth(unsigned int bw);
-void pl353_smc_set_cycles(u32 timings[]);
-#endif
diff --git a/include/linux/platform_data/ad5755.h b/include/linux/platform_data/ad5755.h
deleted file mode 100644
index e371e08f04bc..000000000000
--- a/include/linux/platform_data/ad5755.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Analog Devices Inc.
- */
-#ifndef __LINUX_PLATFORM_DATA_AD5755_H__
-#define __LINUX_PLATFORM_DATA_AD5755_H__
-
-enum ad5755_mode {
- AD5755_MODE_VOLTAGE_0V_5V = 0,
- AD5755_MODE_VOLTAGE_0V_10V = 1,
- AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2,
- AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3,
- AD5755_MODE_CURRENT_4mA_20mA = 4,
- AD5755_MODE_CURRENT_0mA_20mA = 5,
- AD5755_MODE_CURRENT_0mA_24mA = 6,
-};
-
-enum ad5755_dc_dc_phase {
- AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0,
- AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1,
- AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2,
- AD5755_DC_DC_PHASE_90_DEGREE = 3,
-};
-
-enum ad5755_dc_dc_freq {
- AD5755_DC_DC_FREQ_250kHZ = 0,
- AD5755_DC_DC_FREQ_410kHZ = 1,
- AD5755_DC_DC_FREQ_650kHZ = 2,
-};
-
-enum ad5755_dc_dc_maxv {
- AD5755_DC_DC_MAXV_23V = 0,
- AD5755_DC_DC_MAXV_24V5 = 1,
- AD5755_DC_DC_MAXV_27V = 2,
- AD5755_DC_DC_MAXV_29V5 = 3,
-};
-
-enum ad5755_slew_rate {
- AD5755_SLEW_RATE_64k = 0,
- AD5755_SLEW_RATE_32k = 1,
- AD5755_SLEW_RATE_16k = 2,
- AD5755_SLEW_RATE_8k = 3,
- AD5755_SLEW_RATE_4k = 4,
- AD5755_SLEW_RATE_2k = 5,
- AD5755_SLEW_RATE_1k = 6,
- AD5755_SLEW_RATE_500 = 7,
- AD5755_SLEW_RATE_250 = 8,
- AD5755_SLEW_RATE_125 = 9,
- AD5755_SLEW_RATE_64 = 10,
- AD5755_SLEW_RATE_32 = 11,
- AD5755_SLEW_RATE_16 = 12,
- AD5755_SLEW_RATE_8 = 13,
- AD5755_SLEW_RATE_4 = 14,
- AD5755_SLEW_RATE_0_5 = 15,
-};
-
-enum ad5755_slew_step_size {
- AD5755_SLEW_STEP_SIZE_1 = 0,
- AD5755_SLEW_STEP_SIZE_2 = 1,
- AD5755_SLEW_STEP_SIZE_4 = 2,
- AD5755_SLEW_STEP_SIZE_8 = 3,
- AD5755_SLEW_STEP_SIZE_16 = 4,
- AD5755_SLEW_STEP_SIZE_32 = 5,
- AD5755_SLEW_STEP_SIZE_64 = 6,
- AD5755_SLEW_STEP_SIZE_128 = 7,
- AD5755_SLEW_STEP_SIZE_256 = 8,
-};
-
-/**
- * struct ad5755_platform_data - AD5755 DAC driver platform data
- * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter
- * compensation register is used.
- * @dc_dc_phase: DC-DC converter phase.
- * @dc_dc_freq: DC-DC converter frequency.
- * @dc_dc_maxv: DC-DC maximum allowed boost voltage.
- * @dac.mode: The mode to be used for the DAC output.
- * @dac.ext_current_sense_resistor: Whether an external current sense resistor
- * is used.
- * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange.
- * @dac.slew.enable: Whether to enable digital slew.
- * @dac.slew.rate: Slew rate of the digital slew.
- * @dac.slew.step_size: Slew step size of the digital slew.
- **/
-struct ad5755_platform_data {
- bool ext_dc_dc_compenstation_resistor;
- enum ad5755_dc_dc_phase dc_dc_phase;
- enum ad5755_dc_dc_freq dc_dc_freq;
- enum ad5755_dc_dc_maxv dc_dc_maxv;
-
- struct {
- enum ad5755_mode mode;
- bool ext_current_sense_resistor;
- bool enable_voltage_overrange;
- struct {
- bool enable;
- enum ad5755_slew_rate rate;
- enum ad5755_slew_step_size step_size;
- } slew;
- } dac[4];
-};
-
-#endif
diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h
index 02bef5177ff5..69e261e2ca14 100644
--- a/include/linux/platform_data/ad5761.h
+++ b/include/linux/platform_data/ad5761.h
@@ -3,7 +3,7 @@
* AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
*
* Copyright 2016 Qtechnology A/S
- * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ * 2016 Ricardo Ribalda <ribalda@kernel.org>
*/
#ifndef __LINUX_PLATFORM_DATA_AD5761_H__
#define __LINUX_PLATFORM_DATA_AD5761_H__
diff --git a/include/linux/platform_data/ad7291.h b/include/linux/platform_data/ad7291.h
deleted file mode 100644
index b1fd1530c9a5..000000000000
--- a/include/linux/platform_data/ad7291.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IIO_AD7291_H__
-#define __IIO_AD7291_H__
-
-/**
- * struct ad7291_platform_data - AD7291 platform data
- * @use_external_ref: Whether to use an external or internal reference voltage
- */
-struct ad7291_platform_data {
- bool use_external_ref;
-};
-
-#endif
diff --git a/include/linux/platform_data/ad7298.h b/include/linux/platform_data/ad7298.h
deleted file mode 100644
index 3e0ffe2d5d3d..000000000000
--- a/include/linux/platform_data/ad7298.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AD7298 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_AD7298_H__
-#define __LINUX_PLATFORM_DATA_AD7298_H__
-
-/**
- * struct ad7298_platform_data - Platform data for the ad7298 ADC driver
- * @ext_ref: Whether to use an external reference voltage.
- **/
-struct ad7298_platform_data {
- bool ext_ref;
-};
-
-#endif /* IIO_ADC_AD7298_H_ */
diff --git a/include/linux/platform_data/ad7303.h b/include/linux/platform_data/ad7303.h
deleted file mode 100644
index c2bd0a13bea1..000000000000
--- a/include/linux/platform_data/ad7303.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Analog Devices AD7303 DAC driver
- *
- * Copyright 2013 Analog Devices Inc.
- */
-
-#ifndef __IIO_ADC_AD7303_H__
-#define __IIO_ADC_AD7303_H__
-
-/**
- * struct ad7303_platform_data - AD7303 platform data
- * @use_external_ref: If set to true use an external voltage reference connected
- * to the REF pin, otherwise use the internal reference derived from Vdd.
- */
-struct ad7303_platform_data {
- bool use_external_ref;
-};
-
-#endif
diff --git a/include/linux/platform_data/ad7793.h b/include/linux/platform_data/ad7793.h
index 576c7f962c4e..7c697e58f02a 100644
--- a/include/linux/platform_data/ad7793.h
+++ b/include/linux/platform_data/ad7793.h
@@ -40,7 +40,7 @@ enum ad7793_bias_voltage {
* enum ad7793_refsel - AD7793 reference voltage selection
* @AD7793_REFSEL_REFIN1: External reference applied between REFIN1(+)
* and REFIN1(-).
- * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+) and
+ * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+)
* and REFIN1(-). Only valid for AD7795/AD7796.
* @AD7793_REFSEL_INTERNAL: Internal 1.17 V reference.
*/
diff --git a/include/linux/platform_data/ad7887.h b/include/linux/platform_data/ad7887.h
index 732af46b2d16..9b4dca6ae70b 100644
--- a/include/linux/platform_data/ad7887.h
+++ b/include/linux/platform_data/ad7887.h
@@ -13,13 +13,9 @@
* second input channel, and Vref is internally connected to Vdd. If set to
* false the device is used in single channel mode and AIN1/Vref is used as
* VREF input.
- * @use_onchip_ref: Whether to use the onchip reference. If set to true the
- * internal 2.5V reference is used. If set to false a external reference is
- * used.
*/
struct ad7887_platform_data {
bool en_dual;
- bool use_onchip_ref;
};
#endif /* IIO_ADC_AD7887_H_ */
diff --git a/include/linux/platform_data/adau1977.h b/include/linux/platform_data/adau1977.h
deleted file mode 100644
index 86667235077a..000000000000
--- a/include/linux/platform_data/adau1977.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * ADAU1977/ADAU1978/ADAU1979 driver
- *
- * Copyright 2014 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__
-#define __LINUX_PLATFORM_DATA_ADAU1977_H__
-
-/**
- * enum adau1977_micbias - ADAU1977 MICBIAS pin voltage setting
- * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V
- * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V
- * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V
- * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V
- * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V
- * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V
- * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V
- * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V
- * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V
- */
-enum adau1977_micbias {
- ADAU1977_MICBIAS_5V0 = 0x0,
- ADAU1977_MICBIAS_5V5 = 0x1,
- ADAU1977_MICBIAS_6V0 = 0x2,
- ADAU1977_MICBIAS_6V5 = 0x3,
- ADAU1977_MICBIAS_7V0 = 0x4,
- ADAU1977_MICBIAS_7V5 = 0x5,
- ADAU1977_MICBIAS_8V0 = 0x6,
- ADAU1977_MICBIAS_8V5 = 0x7,
- ADAU1977_MICBIAS_9V0 = 0x8,
-};
-
-/**
- * struct adau1977_platform_data - Platform configuration data for the ADAU1977
- * @micbias: Specifies the voltage for the MICBIAS pin
- */
-struct adau1977_platform_data {
- enum adau1977_micbias micbias;
-};
-
-#endif
diff --git a/include/linux/platform_data/adp5588.h b/include/linux/platform_data/adp5588.h
deleted file mode 100644
index 6d3f7d911a92..000000000000
--- a/include/linux/platform_data/adp5588.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
- *
- * Copyright 2009-2010 Analog Devices Inc.
- */
-
-#ifndef _ADP5588_H
-#define _ADP5588_H
-
-#define DEV_ID 0x00 /* Device ID */
-#define CFG 0x01 /* Configuration Register1 */
-#define INT_STAT 0x02 /* Interrupt Status Register */
-#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */
-#define Key_EVENTA 0x04 /* Key Event Register A */
-#define Key_EVENTB 0x05 /* Key Event Register B */
-#define Key_EVENTC 0x06 /* Key Event Register C */
-#define Key_EVENTD 0x07 /* Key Event Register D */
-#define Key_EVENTE 0x08 /* Key Event Register E */
-#define Key_EVENTF 0x09 /* Key Event Register F */
-#define Key_EVENTG 0x0A /* Key Event Register G */
-#define Key_EVENTH 0x0B /* Key Event Register H */
-#define Key_EVENTI 0x0C /* Key Event Register I */
-#define Key_EVENTJ 0x0D /* Key Event Register J */
-#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */
-#define UNLOCK1 0x0F /* Unlock Key1 */
-#define UNLOCK2 0x10 /* Unlock Key2 */
-#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */
-#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */
-#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */
-#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */
-#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */
-#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */
-#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */
-#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */
-#define GPI_EM1 0x20 /* GPI Event Mode 1 */
-#define GPI_EM2 0x21 /* GPI Event Mode 2 */
-#define GPI_EM3 0x22 /* GPI Event Mode 3 */
-#define GPIO_DIR1 0x23 /* GPIO Data Direction */
-#define GPIO_DIR2 0x24 /* GPIO Data Direction */
-#define GPIO_DIR3 0x25 /* GPIO Data Direction */
-#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */
-#define Debounce_DIS1 0x29 /* Debounce Disable */
-#define Debounce_DIS2 0x2A /* Debounce Disable */
-#define Debounce_DIS3 0x2B /* Debounce Disable */
-#define GPIO_PULL1 0x2C /* GPIO Pull Disable */
-#define GPIO_PULL2 0x2D /* GPIO Pull Disable */
-#define GPIO_PULL3 0x2E /* GPIO Pull Disable */
-#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */
-#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */
-#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */
-#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */
-#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */
-#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */
-#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */
-#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */
-#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */
-#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */
-#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */
-#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */
-#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */
-#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */
-#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */
-
-#define ADP5588_DEVICE_ID_MASK 0xF
-
- /* Configuration Register1 */
-#define ADP5588_AUTO_INC (1 << 7)
-#define ADP5588_GPIEM_CFG (1 << 6)
-#define ADP5588_OVR_FLOW_M (1 << 5)
-#define ADP5588_INT_CFG (1 << 4)
-#define ADP5588_OVR_FLOW_IEN (1 << 3)
-#define ADP5588_K_LCK_IM (1 << 2)
-#define ADP5588_GPI_IEN (1 << 1)
-#define ADP5588_KE_IEN (1 << 0)
-
-/* Interrupt Status Register */
-#define ADP5588_CMP2_INT (1 << 5)
-#define ADP5588_CMP1_INT (1 << 4)
-#define ADP5588_OVR_FLOW_INT (1 << 3)
-#define ADP5588_K_LCK_INT (1 << 2)
-#define ADP5588_GPI_INT (1 << 1)
-#define ADP5588_KE_INT (1 << 0)
-
-/* Key Lock and Event Counter Register */
-#define ADP5588_K_LCK_EN (1 << 6)
-#define ADP5588_LCK21 0x30
-#define ADP5588_KEC 0xF
-
-#define ADP5588_MAXGPIO 18
-#define ADP5588_BANK(offs) ((offs) >> 3)
-#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
-
-/* Put one of these structures in i2c_board_info platform_data */
-
-#define ADP5588_KEYMAPSIZE 80
-
-#define GPI_PIN_ROW0 97
-#define GPI_PIN_ROW1 98
-#define GPI_PIN_ROW2 99
-#define GPI_PIN_ROW3 100
-#define GPI_PIN_ROW4 101
-#define GPI_PIN_ROW5 102
-#define GPI_PIN_ROW6 103
-#define GPI_PIN_ROW7 104
-#define GPI_PIN_COL0 105
-#define GPI_PIN_COL1 106
-#define GPI_PIN_COL2 107
-#define GPI_PIN_COL3 108
-#define GPI_PIN_COL4 109
-#define GPI_PIN_COL5 110
-#define GPI_PIN_COL6 111
-#define GPI_PIN_COL7 112
-#define GPI_PIN_COL8 113
-#define GPI_PIN_COL9 114
-
-#define GPI_PIN_ROW_BASE GPI_PIN_ROW0
-#define GPI_PIN_ROW_END GPI_PIN_ROW7
-#define GPI_PIN_COL_BASE GPI_PIN_COL0
-#define GPI_PIN_COL_END GPI_PIN_COL9
-
-#define GPI_PIN_BASE GPI_PIN_ROW_BASE
-#define GPI_PIN_END GPI_PIN_COL_END
-
-#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1)
-
-struct adp5588_gpi_map {
- unsigned short pin;
- unsigned short sw_evt;
-};
-
-struct adp5588_kpad_platform_data {
- int rows; /* Number of rows */
- int cols; /* Number of columns */
- const unsigned short *keymap; /* Pointer to keymap */
- unsigned short keymapsize; /* Keymap size */
- unsigned repeat:1; /* Enable key repeat */
- unsigned en_keylock:1; /* Enable Key Lock feature */
- unsigned short unlock_key1; /* Unlock Key 1 */
- unsigned short unlock_key2; /* Unlock Key 2 */
- const struct adp5588_gpi_map *gpimap;
- unsigned short gpimapsize;
- const struct adp5588_gpio_platform_data *gpio_data;
-};
-
-struct i2c_client; /* forward declaration */
-
-struct adp5588_gpio_platform_data {
- int gpio_start; /* GPIO Chip base # */
- const char *const *names;
- unsigned irq_base; /* interrupt base # */
- unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- void *context;
-};
-
-#endif
diff --git a/include/linux/platform_data/asoc-mx27vis.h b/include/linux/platform_data/asoc-mx27vis.h
deleted file mode 100644
index 2107d0d992dd..000000000000
--- a/include/linux/platform_data/asoc-mx27vis.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PLATFORM_DATA_ASOC_MX27VIS_H
-#define __PLATFORM_DATA_ASOC_MX27VIS_H
-
-struct snd_mx27vis_platform_data {
- int amp_gain0_gpio;
- int amp_gain1_gpio;
- int amp_mutel_gpio;
- int amp_muter_gpio;
-};
-
-#endif /* __PLATFORM_DATA_ASOC_MX27VIS_H */
diff --git a/include/linux/platform_data/asoc-poodle.h b/include/linux/platform_data/asoc-poodle.h
new file mode 100644
index 000000000000..2052fad55c5c
--- /dev/null
+++ b/include/linux/platform_data/asoc-poodle.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_PLATFORM_DATA_POODLE_AUDIO
+#define __LINUX_PLATFORM_DATA_POODLE_AUDIO
+
+/* locomo is not a proper gpio driver, and uses its own api */
+struct poodle_audio_platform_data {
+ struct device *locomo_dev;
+
+ int gpio_amp_on;
+ int gpio_mute_l;
+ int gpio_mute_r;
+ int gpio_232vcc_on;
+ int gpio_jk_b;
+};
+
+#endif
diff --git a/include/linux/platform_data/asoc-pxa.h b/include/linux/platform_data/asoc-pxa.h
new file mode 100644
index 000000000000..327454cd8246
--- /dev/null
+++ b/include/linux/platform_data/asoc-pxa.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_PXA_AUDIO_H__
+#define __SOC_PXA_AUDIO_H__
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+
+/*
+ * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95)
+ * a -1 value means no gpio will be used for reset
+ * @codec_pdata: AC97 codec platform_data
+
+ * reset_gpio should only be specified for pxa27x CPUs where a silicon
+ * bug prevents correct operation of the reset line. If not specified,
+ * the default behaviour on these CPUs is to consider gpio 113 as the
+ * AC97 reset line, which is the default on most boards.
+ */
+typedef struct {
+ int (*startup)(struct snd_pcm_substream *, void *);
+ void (*shutdown)(struct snd_pcm_substream *, void *);
+ void (*suspend)(void *);
+ void (*resume)(void *);
+ void *priv;
+ int reset_gpio;
+ void *codec_pdata[AC97_BUS_MAX_DEVICES];
+} pxa2xx_audio_ops_t;
+
+extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops);
+
+#endif
diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h
deleted file mode 100644
index f20eaeb827ce..000000000000
--- a/include/linux/platform_data/at91_adc.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2011 Free Electrons
- */
-
-#ifndef _AT91_ADC_H_
-#define _AT91_ADC_H_
-
-enum atmel_adc_ts_type {
- ATMEL_ADC_TOUCHSCREEN_NONE = 0,
- ATMEL_ADC_TOUCHSCREEN_4WIRE = 4,
- ATMEL_ADC_TOUCHSCREEN_5WIRE = 5,
-};
-
-/**
- * struct at91_adc_trigger - description of triggers
- * @name: name of the trigger advertised to the user
- * @value: value to set in the ADC's trigger setup register
- to enable the trigger
- * @is_external: Does the trigger rely on an external pin?
- */
-struct at91_adc_trigger {
- const char *name;
- u8 value;
- bool is_external;
-};
-
-/**
- * struct at91_adc_data - platform data for ADC driver
- * @channels_used: channels in use on the board as a bitmask
- * @startup_time: startup time of the ADC in microseconds
- * @trigger_list: Triggers available in the ADC
- * @trigger_number: Number of triggers available in the ADC
- * @use_external_triggers: does the board has external triggers availables
- * @vref: Reference voltage for the ADC in millivolts
- * @touchscreen_type: If a touchscreen is connected, its type (4 or 5 wires)
- */
-struct at91_adc_data {
- unsigned long channels_used;
- u8 startup_time;
- struct at91_adc_trigger *trigger_list;
- u8 trigger_number;
- bool use_external_triggers;
- u16 vref;
- enum atmel_adc_ts_type touchscreen_type;
-};
-
-extern void __init at91_add_device_adc(struct at91_adc_data *data);
-#endif
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 99e6069c5fd8..73f63be509c4 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -6,18 +6,6 @@
#ifndef __ATMEL_H__
#define __ATMEL_H__
- /* Compact Flash */
-struct at91_cf_data {
- int irq_pin; /* I/O IRQ */
- int det_pin; /* Card detect */
- int vcc_pin; /* power switching */
- int rst_pin; /* card reset */
- u8 chipselect; /* EBI Chip Select number */
- u8 flags;
-#define AT91_CF_TRUE_IDE 0x01
-#define AT91_IDE_SWAP_A0_A2 0x02
-};
-
/* FIXME: this needs a better location, but gets stuff building again */
#ifdef CONFIG_ATMEL_PM
extern int at91_suspend_entering_slow_clock(void);
diff --git a/include/linux/platform_data/bcm7038_wdt.h b/include/linux/platform_data/bcm7038_wdt.h
new file mode 100644
index 000000000000..e18cfd9ec8f9
--- /dev/null
+++ b/include/linux/platform_data/bcm7038_wdt.h
@@ -0,0 +1,8 @@
+#ifndef __BCM7038_WDT_PDATA_H
+#define __BCM7038_WDT_PDATA_H
+
+struct bcm7038_wdt_platform_data {
+ const char *clk_name;
+};
+
+#endif /* __BCM7038_WDT_PDATA_H */
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
index 1d30bf278231..f922a192fe58 100644
--- a/include/linux/platform_data/brcmfmac.h
+++ b/include/linux/platform_data/brcmfmac.h
@@ -125,7 +125,7 @@ struct brcmfmac_pd_cc_entry {
*/
struct brcmfmac_pd_cc {
int table_size;
- struct brcmfmac_pd_cc_entry table[0];
+ struct brcmfmac_pd_cc_entry table[];
};
/**
@@ -178,7 +178,7 @@ struct brcmfmac_platform_data {
void (*power_off)(void);
char *fw_alternative_path;
int device_count;
- struct brcmfmac_pd_device devices[0];
+ struct brcmfmac_pd_device devices[];
};
diff --git a/include/linux/platform_data/brcmnand.h b/include/linux/platform_data/brcmnand.h
new file mode 100644
index 000000000000..8b8777985dce
--- /dev/null
+++ b/include/linux/platform_data/brcmnand.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef BRCMNAND_PLAT_DATA_H
+#define BRCMNAND_PLAT_DATA_H
+
+struct brcmnand_platform_data {
+ int chip_select;
+ const char * const *part_probe_types;
+ unsigned int ecc_stepsize;
+ unsigned int ecc_strength;
+};
+
+#endif /* BRCMNAND_PLAT_DATA_H */
diff --git a/include/linux/platform_data/clk-fch.h b/include/linux/platform_data/clk-fch.h
new file mode 100644
index 000000000000..11a2a23fd9b2
--- /dev/null
+++ b/include/linux/platform_data/clk-fch.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * clock framework for AMD misc clocks
+ *
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __CLK_FCH_H
+#define __CLK_FCH_H
+
+#include <linux/compiler.h>
+
+struct fch_clk_data {
+ void __iomem *base;
+ char *name;
+};
+
+#endif /* __CLK_FCH_H */
diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h
deleted file mode 100644
index addd48cac625..000000000000
--- a/include/linux/platform_data/clk-integrator.h
+++ /dev/null
@@ -1,2 +0,0 @@
-void integrator_impd1_clk_init(void __iomem *base, unsigned int id);
-void integrator_impd1_clk_exit(unsigned int id);
diff --git a/include/linux/platform_data/clk-s3c2410.h b/include/linux/platform_data/clk-s3c2410.h
new file mode 100644
index 000000000000..7eb1cfa5409b
--- /dev/null
+++ b/include/linux/platform_data/clk-s3c2410.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CLK_S3C2410_H_
+#define __LINUX_PLATFORM_DATA_CLK_S3C2410_H_
+
+/**
+ * struct s3c2410_clk_platform_data - platform data for S3C2410 clock driver
+ *
+ * @modify_misccr: Function to modify the MISCCR and return the new value
+ */
+struct s3c2410_clk_platform_data {
+ unsigned int (*modify_misccr)(unsigned int clr, unsigned int chg);
+};
+
+#endif /* __LINUX_PLATFORM_DATA_CLK_S3C2410_H_ */
+
diff --git a/include/linux/platform_data/clk-st.h b/include/linux/platform_data/clk-st.h
deleted file mode 100644
index 7cdb6a402b35..000000000000
--- a/include/linux/platform_data/clk-st.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * clock framework for AMD Stoney based clock
- *
- * Copyright 2018 Advanced Micro Devices, Inc.
- */
-
-#ifndef __CLK_ST_H
-#define __CLK_ST_H
-
-#include <linux/compiler.h>
-
-struct st_clk_data {
- void __iomem *base;
-};
-
-#endif /* __CLK_ST_H */
diff --git a/include/linux/platform_data/clk-u300.h b/include/linux/platform_data/clk-u300.h
deleted file mode 100644
index 8429e73911a1..000000000000
--- a/include/linux/platform_data/clk-u300.h
+++ /dev/null
@@ -1 +0,0 @@
-void __init u300_clk_init(void __iomem *base);
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 69210881ebac..5744a2d746aa 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -13,8 +13,8 @@
#ifndef __CROS_EC_COMMANDS_H
#define __CROS_EC_COMMANDS_H
-
-
+#include <linux/bits.h>
+#include <linux/types.h>
#define BUILD_ASSERT(_cond)
@@ -51,10 +51,14 @@
/*
* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff
* and they tell the kernel that so we have to think of it as two parts.
+ *
+ * Other BIOSes report only the I/O port region spanned by the Microchip
+ * MEC series EC; an attempt to address a larger region may fail.
*/
-#define EC_HOST_CMD_REGION0 0x800
-#define EC_HOST_CMD_REGION1 0x880
-#define EC_HOST_CMD_REGION_SIZE 0x80
+#define EC_HOST_CMD_REGION0 0x800
+#define EC_HOST_CMD_REGION1 0x880
+#define EC_HOST_CMD_REGION_SIZE 0x80
+#define EC_HOST_CMD_MEC_REGION_SIZE 0x8
/* EC command register bit functions */
#define EC_LPC_CMDR_DATA BIT(0) /* Data ready for host to read */
@@ -783,7 +787,7 @@ struct ec_host_response {
*
* Packets always start with a request or response header. They are followed
* by data_len bytes of data. If the data_crc_present flag is set, the data
- * bytes are followed by a CRC-8 of that data, using using x^8 + x^2 + x + 1
+ * bytes are followed by a CRC-8 of that data, using x^8 + x^2 + x + 1
* polynomial.
*
* Host algorithm when sending a request q:
@@ -1284,6 +1288,20 @@ enum ec_feature_code {
EC_FEATURE_SCP = 39,
/* The MCU is an Integrated Sensor Hub */
EC_FEATURE_ISH = 40,
+ /* New TCPMv2 TYPEC_ prefaced commands supported */
+ EC_FEATURE_TYPEC_CMD = 41,
+ /*
+ * The EC will wait for direction from the AP to enter Type-C alternate
+ * modes or USB4.
+ */
+ EC_FEATURE_TYPEC_REQUIRE_AP_MODE_ENTRY = 42,
+ /*
+ * The EC will wait for an acknowledge from the AP after setting the
+ * mux.
+ */
+ EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK = 43,
+ /* The MCU is a System Companion Processor (SCP) 2nd Core. */
+ EC_FEATURE_SCP_C1 = 45,
};
#define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32)
@@ -1419,7 +1437,7 @@ struct ec_response_flash_info_2 {
uint16_t num_banks_total;
/* Number of banks described in banks array. */
uint16_t num_banks_desc;
- struct ec_flash_bank banks[0];
+ struct ec_flash_bank banks[];
} __ec_align4;
/*
@@ -2420,12 +2438,12 @@ struct ec_response_motion_sense_fifo_info {
/* Total amount of vector lost */
uint16_t total_lost;
/* Lost events since the last fifo_info, per sensors */
- uint16_t lost[0];
+ uint16_t lost[];
} __ec_todo_packed;
struct ec_response_motion_sense_fifo_data {
uint32_t number_data;
- struct ec_response_motion_sensor_data data[0];
+ struct ec_response_motion_sensor_data data[];
} __ec_todo_packed;
/* List supported activity recognition */
@@ -3093,7 +3111,7 @@ struct ec_response_tmp006_get_calibration_v1 {
uint8_t algorithm;
uint8_t num_params;
uint8_t reserved[2];
- float val[0];
+ float val[];
} __ec_align4;
struct ec_params_tmp006_set_calibration_v1 {
@@ -3101,7 +3119,7 @@ struct ec_params_tmp006_set_calibration_v1 {
uint8_t algorithm;
uint8_t num_params;
uint8_t reserved;
- float val[0];
+ float val[];
} __ec_align4;
@@ -3374,6 +3392,9 @@ enum ec_mkbp_event {
/* Send an incoming CEC message to the AP */
EC_MKBP_EVENT_CEC_MESSAGE = 9,
+ /* Peripheral device charger event */
+ EC_MKBP_EVENT_PCHG = 12,
+
/* Number of MKBP events */
EC_MKBP_EVENT_COUNT,
};
@@ -3455,6 +3476,7 @@ struct ec_response_get_next_event_v1 {
#define EC_MKBP_LID_OPEN 0
#define EC_MKBP_TABLET_MODE 1
#define EC_MKBP_BASE_ATTACHED 2
+#define EC_MKBP_FRONT_PROXIMITY 3
/* Run keyboard factory test scanning */
#define EC_CMD_KEYBOARD_FACTORY_TEST 0x0068
@@ -4215,6 +4237,7 @@ enum ec_device_event {
EC_DEVICE_EVENT_TRACKPAD,
EC_DEVICE_EVENT_DSP,
EC_DEVICE_EVENT_WIFI,
+ EC_DEVICE_EVENT_WLC,
};
enum ec_device_event_param {
@@ -4598,6 +4621,7 @@ enum ec_codec_i2s_rx_subcmd {
EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH = 0x2,
EC_CODEC_I2S_RX_SET_DAIFMT = 0x3,
EC_CODEC_I2S_RX_SET_BCLK = 0x4,
+ EC_CODEC_I2S_RX_RESET = 0x5,
EC_CODEC_I2S_RX_SUBCMD_COUNT,
};
@@ -4729,6 +4753,7 @@ enum ec_reboot_cmd {
EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */
EC_REBOOT_HIBERNATE = 6, /* Hibernate EC */
EC_REBOOT_HIBERNATE_CLEAR_AP_OFF = 7, /* and clears AP_OFF flag */
+ EC_REBOOT_COLD_AP_OFF = 8, /* Cold-reboot and don't boot AP */
};
/* Flags for ec_params_reboot_ec.reboot_flags */
@@ -4917,15 +4942,26 @@ struct ec_response_usb_pd_control_v1 {
#define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */
#define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */
+/* Active/Passive Cable */
+#define USB_PD_CTRL_ACTIVE_CABLE BIT(0)
+/* Optical/Non-optical cable */
+#define USB_PD_CTRL_OPTICAL_CABLE BIT(1)
+/* 3rd Gen TBT device (or AMA)/2nd gen tbt Adapter */
+#define USB_PD_CTRL_TBT_LEGACY_ADAPTER BIT(2)
+/* Active Link Uni-Direction */
+#define USB_PD_CTRL_ACTIVE_LINK_UNIDIR BIT(3)
+
struct ec_response_usb_pd_control_v2 {
uint8_t enabled;
uint8_t role;
uint8_t polarity;
char state[32];
- uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */
- uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */
- /* CL:1500994 Current cable type */
- uint8_t reserved_cable_type;
+ uint8_t cc_state; /* enum pd_cc_states representing cc state */
+ uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */
+ uint8_t reserved; /* Reserved for future use */
+ uint8_t control_flags; /* USB_PD_CTRL_*flags */
+ uint8_t cable_speed; /* TBT_SS_* cable speed */
+ uint8_t cable_gen; /* TBT_GEN3_* cable rounded support */
} __ec_align1;
#define EC_CMD_USB_PD_PORTS 0x0102
@@ -5065,7 +5101,7 @@ struct ec_response_pd_log {
uint8_t type; /* event type : see PD_EVENT_xx below */
uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */
uint16_t data; /* type-defined data payload */
- uint8_t payload[0]; /* optional additional data payload: 0..16 bytes */
+ uint8_t payload[]; /* optional additional data payload: 0..16 bytes */
} __ec_align4;
/* The timestamp is the microsecond counter shifted to get about a ms. */
@@ -5207,11 +5243,15 @@ struct ec_params_usb_pd_mux_info {
} __ec_align1;
/* Flags representing mux state */
-#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */
-#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */
-#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */
-#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */
-#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */
+#define USB_PD_MUX_NONE 0 /* Open switch */
+#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */
+#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */
+#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */
+#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */
+#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */
+#define USB_PD_MUX_SAFE_MODE BIT(5) /* DP is in safe mode */
+#define USB_PD_MUX_TBT_COMPAT_ENABLED BIT(6) /* TBT compat enabled */
+#define USB_PD_MUX_USB4_ENABLED BIT(7) /* USB4 enabled */
struct ec_response_usb_pd_mux_info {
uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */
@@ -5430,6 +5470,414 @@ struct ec_response_rollback_info {
/* Issue AP reset */
#define EC_CMD_AP_RESET 0x0125
+/**
+ * Get the number of peripheral charge ports
+ */
+#define EC_CMD_PCHG_COUNT 0x0134
+
+#define EC_PCHG_MAX_PORTS 8
+
+struct ec_response_pchg_count {
+ uint8_t port_count;
+} __ec_align1;
+
+/**
+ * Get the status of a peripheral charge port
+ */
+#define EC_CMD_PCHG 0x0135
+
+struct ec_params_pchg {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_pchg {
+ uint32_t error; /* enum pchg_error */
+ uint8_t state; /* enum pchg_state state */
+ uint8_t battery_percentage;
+ uint8_t unused0;
+ uint8_t unused1;
+ /* Fields added in version 1 */
+ uint32_t fw_version;
+ uint32_t dropped_event_count;
+} __ec_align2;
+
+enum pchg_state {
+ /* Charger is reset and not initialized. */
+ PCHG_STATE_RESET = 0,
+ /* Charger is initialized or disabled. */
+ PCHG_STATE_INITIALIZED,
+ /* Charger is enabled and ready to detect a device. */
+ PCHG_STATE_ENABLED,
+ /* Device is in proximity. */
+ PCHG_STATE_DETECTED,
+ /* Device is being charged. */
+ PCHG_STATE_CHARGING,
+ /* Device is fully charged. It implies DETECTED (& not charging). */
+ PCHG_STATE_FULL,
+ /* In download (a.k.a. firmware update) mode */
+ PCHG_STATE_DOWNLOAD,
+ /* In download mode. Ready for receiving data. */
+ PCHG_STATE_DOWNLOADING,
+ /* Device is ready for data communication. */
+ PCHG_STATE_CONNECTED,
+ /* Put no more entry below */
+ PCHG_STATE_COUNT,
+};
+
+#define EC_PCHG_STATE_TEXT { \
+ [PCHG_STATE_RESET] = "RESET", \
+ [PCHG_STATE_INITIALIZED] = "INITIALIZED", \
+ [PCHG_STATE_ENABLED] = "ENABLED", \
+ [PCHG_STATE_DETECTED] = "DETECTED", \
+ [PCHG_STATE_CHARGING] = "CHARGING", \
+ [PCHG_STATE_FULL] = "FULL", \
+ [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \
+ [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \
+ [PCHG_STATE_CONNECTED] = "CONNECTED", \
+ }
+
+/*
+ * Update firmware of peripheral chip
+ */
+#define EC_CMD_PCHG_UPDATE 0x0136
+
+/* Port number is encoded in bit[28:31]. */
+#define EC_MKBP_PCHG_PORT_SHIFT 28
+/* Utility macro for converting MKBP event to port number. */
+#define EC_MKBP_PCHG_EVENT_TO_PORT(e) (((e) >> EC_MKBP_PCHG_PORT_SHIFT) & 0xf)
+/* Utility macro for extracting event bits. */
+#define EC_MKBP_PCHG_EVENT_MASK(e) ((e) \
+ & GENMASK(EC_MKBP_PCHG_PORT_SHIFT-1, 0))
+
+#define EC_MKBP_PCHG_UPDATE_OPENED BIT(0)
+#define EC_MKBP_PCHG_WRITE_COMPLETE BIT(1)
+#define EC_MKBP_PCHG_UPDATE_CLOSED BIT(2)
+#define EC_MKBP_PCHG_UPDATE_ERROR BIT(3)
+#define EC_MKBP_PCHG_DEVICE_EVENT BIT(4)
+
+enum ec_pchg_update_cmd {
+ /* Reset chip to normal mode. */
+ EC_PCHG_UPDATE_CMD_RESET_TO_NORMAL = 0,
+ /* Reset and put a chip in update (a.k.a. download) mode. */
+ EC_PCHG_UPDATE_CMD_OPEN,
+ /* Write a block of data containing FW image. */
+ EC_PCHG_UPDATE_CMD_WRITE,
+ /* Close update session. */
+ EC_PCHG_UPDATE_CMD_CLOSE,
+ /* End of commands */
+ EC_PCHG_UPDATE_CMD_COUNT,
+};
+
+struct ec_params_pchg_update {
+ /* PCHG port number */
+ uint8_t port;
+ /* enum ec_pchg_update_cmd */
+ uint8_t cmd;
+ /* Padding */
+ uint8_t reserved0;
+ uint8_t reserved1;
+ /* Version of new firmware */
+ uint32_t version;
+ /* CRC32 of new firmware */
+ uint32_t crc32;
+ /* Address in chip memory where <data> is written to */
+ uint32_t addr;
+ /* Size of <data> */
+ uint32_t size;
+ /* Partial data of new firmware */
+ uint8_t data[];
+} __ec_align4;
+
+BUILD_ASSERT(EC_PCHG_UPDATE_CMD_COUNT
+ < BIT(sizeof(((struct ec_params_pchg_update *)0)->cmd)*8));
+
+struct ec_response_pchg_update {
+ /* Block size */
+ uint32_t block_size;
+} __ec_align4;
+
+
+/*****************************************************************************/
+/* Voltage regulator controls */
+
+/*
+ * Get basic info of voltage regulator for given index.
+ *
+ * Returns the regulator name and supported voltage list in mV.
+ */
+#define EC_CMD_REGULATOR_GET_INFO 0x012C
+
+/* Maximum length of regulator name */
+#define EC_REGULATOR_NAME_MAX_LEN 16
+
+/* Maximum length of the supported voltage list. */
+#define EC_REGULATOR_VOLTAGE_MAX_COUNT 16
+
+struct ec_params_regulator_get_info {
+ uint32_t index;
+} __ec_align4;
+
+struct ec_response_regulator_get_info {
+ char name[EC_REGULATOR_NAME_MAX_LEN];
+ uint16_t num_voltages;
+ uint16_t voltages_mv[EC_REGULATOR_VOLTAGE_MAX_COUNT];
+} __ec_align2;
+
+/*
+ * Configure the regulator as enabled / disabled.
+ */
+#define EC_CMD_REGULATOR_ENABLE 0x012D
+
+struct ec_params_regulator_enable {
+ uint32_t index;
+ uint8_t enable;
+} __ec_align4;
+
+/*
+ * Query if the regulator is enabled.
+ *
+ * Returns 1 if the regulator is enabled, 0 if not.
+ */
+#define EC_CMD_REGULATOR_IS_ENABLED 0x012E
+
+struct ec_params_regulator_is_enabled {
+ uint32_t index;
+} __ec_align4;
+
+struct ec_response_regulator_is_enabled {
+ uint8_t enabled;
+} __ec_align1;
+
+/*
+ * Set voltage for the voltage regulator within the range specified.
+ *
+ * The driver should select the voltage in range closest to min_mv.
+ *
+ * Also note that this might be called before the regulator is enabled, and the
+ * setting should be in effect after the regulator is enabled.
+ */
+#define EC_CMD_REGULATOR_SET_VOLTAGE 0x012F
+
+struct ec_params_regulator_set_voltage {
+ uint32_t index;
+ uint32_t min_mv;
+ uint32_t max_mv;
+} __ec_align4;
+
+/*
+ * Get the currently configured voltage for the voltage regulator.
+ *
+ * Note that this might be called before the regulator is enabled, and this
+ * should return the configured output voltage if the regulator is enabled.
+ */
+#define EC_CMD_REGULATOR_GET_VOLTAGE 0x0130
+
+struct ec_params_regulator_get_voltage {
+ uint32_t index;
+} __ec_align4;
+
+struct ec_response_regulator_get_voltage {
+ uint32_t voltage_mv;
+} __ec_align4;
+
+/*
+ * Gather all discovery information for the given port and partner type.
+ *
+ * Note that if discovery has not yet completed, only the currently completed
+ * responses will be filled in. If the discovery data structures are changed
+ * in the process of the command running, BUSY will be returned.
+ *
+ * VDO field sizes are set to the maximum possible number of VDOs a VDM may
+ * contain, while the number of SVIDs here is selected to fit within the PROTO2
+ * maximum parameter size.
+ */
+#define EC_CMD_TYPEC_DISCOVERY 0x0131
+
+enum typec_partner_type {
+ TYPEC_PARTNER_SOP = 0,
+ TYPEC_PARTNER_SOP_PRIME = 1,
+};
+
+struct ec_params_typec_discovery {
+ uint8_t port;
+ uint8_t partner_type; /* enum typec_partner_type */
+} __ec_align1;
+
+struct svid_mode_info {
+ uint16_t svid;
+ uint16_t mode_count; /* Number of modes partner sent */
+ uint32_t mode_vdo[6]; /* Max VDOs allowed after VDM header is 6 */
+};
+
+struct ec_response_typec_discovery {
+ uint8_t identity_count; /* Number of identity VDOs partner sent */
+ uint8_t svid_count; /* Number of SVIDs partner sent */
+ uint16_t reserved;
+ uint32_t discovery_vdo[6]; /* Max VDOs allowed after VDM header is 6 */
+ struct svid_mode_info svids[];
+} __ec_align1;
+
+/* USB Type-C commands for AP-controlled device policy. */
+#define EC_CMD_TYPEC_CONTROL 0x0132
+
+enum typec_control_command {
+ TYPEC_CONTROL_COMMAND_EXIT_MODES,
+ TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
+ TYPEC_CONTROL_COMMAND_ENTER_MODE,
+ TYPEC_CONTROL_COMMAND_TBT_UFP_REPLY,
+ TYPEC_CONTROL_COMMAND_USB_MUX_SET,
+};
+
+/* Replies the AP may specify to the TBT EnterMode command as a UFP */
+enum typec_tbt_ufp_reply {
+ TYPEC_TBT_UFP_REPLY_NAK,
+ TYPEC_TBT_UFP_REPLY_ACK,
+};
+
+struct typec_usb_mux_set {
+ uint8_t mux_index; /* Index of the mux to set in the chain */
+ uint8_t mux_flags; /* USB_PD_MUX_*-encoded USB mux state to set */
+} __ec_align1;
+
+struct ec_params_typec_control {
+ uint8_t port;
+ uint8_t command; /* enum typec_control_command */
+ uint16_t reserved;
+
+ /*
+ * This section will be interpreted based on |command|. Define a
+ * placeholder structure to avoid having to increase the size and bump
+ * the command version when adding new sub-commands.
+ */
+ union {
+ uint32_t clear_events_mask;
+ uint8_t mode_to_enter; /* enum typec_mode */
+ uint8_t tbt_ufp_reply; /* enum typec_tbt_ufp_reply */
+ struct typec_usb_mux_set mux_params;
+ uint8_t placeholder[128];
+ };
+} __ec_align1;
+
+/*
+ * Gather all status information for a port.
+ *
+ * Note: this covers many of the return fields from the deprecated
+ * EC_CMD_USB_PD_CONTROL command, except those that are redundant with the
+ * discovery data. The "enum pd_cc_states" is defined with the deprecated
+ * EC_CMD_USB_PD_CONTROL command.
+ *
+ * This also combines in the EC_CMD_USB_PD_MUX_INFO flags.
+ */
+#define EC_CMD_TYPEC_STATUS 0x0133
+
+/*
+ * Power role.
+ *
+ * Note this is also used for PD header creation, and values align to those in
+ * the Power Delivery Specification Revision 3.0 (See
+ * 6.2.1.1.4 Port Power Role).
+ */
+enum pd_power_role {
+ PD_ROLE_SINK = 0,
+ PD_ROLE_SOURCE = 1
+};
+
+/*
+ * Data role.
+ *
+ * Note this is also used for PD header creation, and the first two values
+ * align to those in the Power Delivery Specification Revision 3.0 (See
+ * 6.2.1.1.6 Port Data Role).
+ */
+enum pd_data_role {
+ PD_ROLE_UFP = 0,
+ PD_ROLE_DFP = 1,
+ PD_ROLE_DISCONNECTED = 2,
+};
+
+enum pd_vconn_role {
+ PD_ROLE_VCONN_OFF = 0,
+ PD_ROLE_VCONN_SRC = 1,
+};
+
+/*
+ * Note: BIT(0) may be used to determine whether the polarity is CC1 or CC2,
+ * regardless of whether a debug accessory is connected.
+ */
+enum tcpc_cc_polarity {
+ /*
+ * _CCx: is used to indicate the polarity while not connected to
+ * a Debug Accessory. Only one CC line will assert a resistor and
+ * the other will be open.
+ */
+ POLARITY_CC1 = 0,
+ POLARITY_CC2 = 1,
+
+ /*
+ * _CCx_DTS is used to indicate the polarity while connected to a
+ * SRC Debug Accessory. Assert resistors on both lines.
+ */
+ POLARITY_CC1_DTS = 2,
+ POLARITY_CC2_DTS = 3,
+
+ /*
+ * The current TCPC code relies on these specific POLARITY values.
+ * Adding in a check to verify if the list grows for any reason
+ * that this will give a hint that other places need to be
+ * adjusted.
+ */
+ POLARITY_COUNT
+};
+
+#define PD_STATUS_EVENT_SOP_DISC_DONE BIT(0)
+#define PD_STATUS_EVENT_SOP_PRIME_DISC_DONE BIT(1)
+#define PD_STATUS_EVENT_HARD_RESET BIT(2)
+#define PD_STATUS_EVENT_DISCONNECTED BIT(3)
+#define PD_STATUS_EVENT_MUX_0_SET_DONE BIT(4)
+#define PD_STATUS_EVENT_MUX_1_SET_DONE BIT(5)
+
+struct ec_params_typec_status {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_typec_status {
+ uint8_t pd_enabled; /* PD communication enabled - bool */
+ uint8_t dev_connected; /* Device connected - bool */
+ uint8_t sop_connected; /* Device is SOP PD capable - bool */
+ uint8_t source_cap_count; /* Number of Source Cap PDOs */
+
+ uint8_t power_role; /* enum pd_power_role */
+ uint8_t data_role; /* enum pd_data_role */
+ uint8_t vconn_role; /* enum pd_vconn_role */
+ uint8_t sink_cap_count; /* Number of Sink Cap PDOs */
+
+ uint8_t polarity; /* enum tcpc_cc_polarity */
+ uint8_t cc_state; /* enum pd_cc_states */
+ uint8_t dp_pin; /* DP pin mode (MODE_DP_IN_[A-E]) */
+ uint8_t mux_state; /* USB_PD_MUX* - encoded mux state */
+
+ char tc_state[32]; /* TC state name */
+
+ uint32_t events; /* PD_STATUS_EVENT bitmask */
+
+ /*
+ * BCD PD revisions for partners
+ *
+ * The format has the PD major reversion in the upper nibble, and PD
+ * minor version in the next nibble. Following two nibbles are
+ * currently 0.
+ * ex. PD 3.2 would map to 0x3200
+ *
+ * PD major/minor will be 0 if no PD device is connected.
+ */
+ uint16_t sop_revision;
+ uint16_t sop_prime_revision;
+
+ uint32_t source_cap_pdos[7]; /* Max 7 PDOs can be present */
+
+ uint32_t sink_cap_pdos[7]; /* Max 7 PDOs can be present */
+} __ec_align1;
+
/*****************************************************************************/
/* The command range 0x200-0x2FF is reserved for Rotor. */
@@ -5691,7 +6139,7 @@ struct ec_response_fp_encryption_status {
struct ec_response_tp_frame_info {
uint32_t n_frames;
- uint32_t frame_sizes[0];
+ uint32_t frame_sizes[];
} __ec_align4;
/* Create a snapshot of current frame readings */
@@ -5801,6 +6249,13 @@ struct ec_params_charger_control {
uint8_t allow_charging;
} __ec_align_size1;
+/* Get ACK from the USB-C SS muxes */
+#define EC_CMD_USB_PD_MUX_ACK 0x0603
+
+struct ec_params_usb_pd_mux_ack {
+ uint8_t port; /* USB-C port number */
+} __ec_align1;
+
/*****************************************************************************/
/*
* Reserve a range of host commands for board-specific, experimental, or
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index ba5914770191..e43107e0bee1 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -19,8 +19,12 @@
#define CROS_EC_DEV_ISH_NAME "cros_ish"
#define CROS_EC_DEV_PD_NAME "cros_pd"
#define CROS_EC_DEV_SCP_NAME "cros_scp"
+#define CROS_EC_DEV_SCP_C1_NAME "cros_scp_c1"
#define CROS_EC_DEV_TP_NAME "cros_tp"
+#define CROS_EC_DEV_EC_INDEX 0
+#define CROS_EC_DEV_PD_INDEX 1
+
/*
* The EC is unresponsive for a time after a reboot command. Add a
* simple delay to make sure that the bus stays locked.
@@ -69,15 +73,13 @@ struct cros_ec_command {
uint32_t outsize;
uint32_t insize;
uint32_t result;
- uint8_t data[0];
+ uint8_t data[];
};
/**
* struct cros_ec_device - Information about a ChromeOS EC device.
* @phys_name: Name of physical comms layer (e.g. 'i2c-4').
* @dev: Device pointer for physical comms device
- * @was_wake_device: True if this device was set to wake the system from
- * sleep at the last suspend.
* @cros_class: The class structure for this device.
* @cmd_readmem: Direct read of the EC memory-mapped region, if supported.
* @offset: Is within EC_LPC_ADDR_MEMMAP region.
@@ -125,6 +127,9 @@ struct cros_ec_command {
* @host_event_wake_mask: Mask of host events that cause wake from suspend.
* @last_event_time: exact time from the hard irq when we got notified of
* a new event.
+ * @notifier_ready: The notifier_block to let the kernel re-query EC
+ * communication protocol when the EC sends
+ * EC_HOST_EVENT_INTERFACE_READY.
* @ec: The platform_device used by the mfd driver to interface with the
* main EC.
* @pd: The platform_device used by the mfd driver to interface with the
@@ -134,7 +139,6 @@ struct cros_ec_device {
/* These are used by other drivers that want to talk to the EC */
const char *phys_name;
struct device *dev;
- bool was_wake_device;
struct class *cros_class;
int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
unsigned int bytes, void *dest);
@@ -165,7 +169,9 @@ struct cros_ec_device {
int event_size;
u32 host_event_wake_mask;
u32 last_resume_result;
+ u16 suspend_timeout_ms;
ktime_t last_event_time;
+ struct notifier_block notifier_ready;
/* The platform devices used by the mfd driver */
struct platform_device *ec;
@@ -201,7 +207,7 @@ struct cros_ec_dev {
struct cros_ec_debugfs *debug_info;
bool has_kb_wake_angle;
u16 cmd_offset;
- u32 features[2];
+ struct ec_response_get_features features;
};
#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
@@ -226,10 +232,13 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
-int cros_ec_check_features(struct cros_ec_dev *ec, int feature);
+bool cros_ec_check_features(struct cros_ec_dev *ec, int feature);
int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
+int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata,
+ size_t outsize, void *indata, size_t insize);
+
/**
* cros_ec_get_time_ns() - Return time in ns.
*
diff --git a/include/linux/platform_data/cros_ec_sensorhub.h b/include/linux/platform_data/cros_ec_sensorhub.h
index bef7ffc7fce1..0ecce6aa69d5 100644
--- a/include/linux/platform_data/cros_ec_sensorhub.h
+++ b/include/linux/platform_data/cros_ec_sensorhub.h
@@ -8,8 +8,13 @@
#ifndef __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H
#define __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
#include <linux/platform_data/cros_ec_commands.h>
+struct iio_dev;
+
/**
* struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
* @sensor_num: Id of the sensor, as reported by the EC.
@@ -19,12 +24,171 @@ struct cros_ec_sensor_platform {
};
/**
+ * typedef cros_ec_sensorhub_push_data_cb_t - Callback function to send datum
+ * to specific sensors.
+ *
+ * @indio_dev: The IIO device that will process the sample.
+ * @data: Vector array of the ring sample.
+ * @timestamp: Timestamp in host timespace when the sample was acquired by
+ * the EC.
+ */
+typedef int (*cros_ec_sensorhub_push_data_cb_t)(struct iio_dev *indio_dev,
+ s16 *data,
+ s64 timestamp);
+
+struct cros_ec_sensorhub_sensor_push_data {
+ struct iio_dev *indio_dev;
+ cros_ec_sensorhub_push_data_cb_t push_data_cb;
+};
+
+enum {
+ CROS_EC_SENSOR_LAST_TS,
+ CROS_EC_SENSOR_NEW_TS,
+ CROS_EC_SENSOR_ALL_TS
+};
+
+struct cros_ec_sensors_ring_sample {
+ u8 sensor_id;
+ u8 flag;
+ s16 vector[3];
+ s64 timestamp;
+} __packed;
+
+/* State used for cros_ec_ring_fix_overflow */
+struct cros_ec_sensors_ec_overflow_state {
+ s64 offset;
+ s64 last;
+};
+
+/* Length of the filter, how long to remember entries for */
+#define CROS_EC_SENSORHUB_TS_HISTORY_SIZE 64
+
+/**
+ * struct cros_ec_sensors_ts_filter_state - Timestamp filetr state.
+ *
+ * @x_offset: x is EC interrupt time. x_offset its last value.
+ * @y_offset: y is the difference between AP and EC time, y_offset its last
+ * value.
+ * @x_history: The past history of x, relative to x_offset.
+ * @y_history: The past history of y, relative to y_offset.
+ * @m_history: rate between y and x.
+ * @history_len: Amount of valid historic data in the arrays.
+ * @temp_buf: Temporary buffer used when updating the filter.
+ * @median_m: median value of m_history
+ * @median_error: final error to apply to AP interrupt timestamp to get the
+ * "true timestamp" the event occurred.
+ */
+struct cros_ec_sensors_ts_filter_state {
+ s64 x_offset, y_offset;
+ s64 x_history[CROS_EC_SENSORHUB_TS_HISTORY_SIZE];
+ s64 y_history[CROS_EC_SENSORHUB_TS_HISTORY_SIZE];
+ s64 m_history[CROS_EC_SENSORHUB_TS_HISTORY_SIZE];
+ int history_len;
+
+ s64 temp_buf[CROS_EC_SENSORHUB_TS_HISTORY_SIZE];
+
+ s64 median_m;
+ s64 median_error;
+};
+
+/* struct cros_ec_sensors_ts_batch_state - State of batch of a single sensor.
+ *
+ * Use to store information to batch data using median fileter information.
+ *
+ * @penul_ts: last but one batch timestamp (penultimate timestamp).
+ * Used for timestamp spreading calculations
+ * when a batch shows up.
+ * @penul_len: last but one batch length.
+ * @last_ts: Last batch timestam.
+ * @last_len: Last batch length.
+ * @newest_sensor_event: Last sensor timestamp.
+ */
+struct cros_ec_sensors_ts_batch_state {
+ s64 penul_ts;
+ int penul_len;
+ s64 last_ts;
+ int last_len;
+ s64 newest_sensor_event;
+};
+
+/*
* struct cros_ec_sensorhub - Sensor Hub device data.
*
+ * @dev: Device object, mostly used for logging.
* @ec: Embedded Controller where the hub is located.
+ * @sensor_num: Number of MEMS sensors present in the EC.
+ * @msg: Structure to send FIFO requests.
+ * @params: Pointer to parameters in msg.
+ * @resp: Pointer to responses in msg.
+ * @cmd_lock : Lock for sending msg.
+ * @notifier: Notifier to kick the FIFO interrupt.
+ * @ring: Preprocessed ring to store events.
+ * @fifo_timestamp: Array for event timestamp and spreading.
+ * @fifo_info: Copy of FIFO information coming from the EC.
+ * @fifo_size: Size of the ring.
+ * @batch_state: Per sensor information of the last batches received.
+ * @overflow_a: For handling timestamp overflow for a time (sensor events)
+ * @overflow_b: For handling timestamp overflow for b time (ec interrupts)
+ * @filter: Medium fileter structure.
+ * @tight_timestamps: Set to truen when EC support tight timestamping:
+ * The timestamps reported from the EC have low jitter.
+ * Timestamps also come before every sample. Set either
+ * by feature bits coming from the EC or userspace.
+ * @future_timestamp_count: Statistics used to compute shaved time.
+ * This occurs when timestamp interpolation from EC
+ * time to AP time accidentally puts timestamps in
+ * the future. These timestamps are clamped to
+ * `now` and these count/total_ns maintain the
+ * statistics for how much time was removed in a
+ * given period.
+ * @future_timestamp_total_ns: Total amount of time shaved.
+ * @push_data: Array of callback to send datums to iio sensor object.
*/
struct cros_ec_sensorhub {
+ struct device *dev;
struct cros_ec_dev *ec;
+ int sensor_num;
+
+ struct cros_ec_command *msg;
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct mutex cmd_lock; /* Lock for protecting msg structure. */
+
+ struct notifier_block notifier;
+
+ struct cros_ec_sensors_ring_sample *ring;
+
+ ktime_t fifo_timestamp[CROS_EC_SENSOR_ALL_TS];
+ struct ec_response_motion_sense_fifo_info *fifo_info;
+ int fifo_size;
+
+ struct cros_ec_sensors_ts_batch_state *batch_state;
+
+ struct cros_ec_sensors_ec_overflow_state overflow_a;
+ struct cros_ec_sensors_ec_overflow_state overflow_b;
+
+ struct cros_ec_sensors_ts_filter_state filter;
+
+ int tight_timestamps;
+
+ s32 future_timestamp_count;
+ s64 future_timestamp_total_ns;
+
+ struct cros_ec_sensorhub_sensor_push_data *push_data;
};
+int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t cb);
+
+void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num);
+
+int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub);
+int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub);
+void cros_ec_sensorhub_ring_remove(void *arg);
+int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
+ bool on);
+
#endif /* __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H */
diff --git a/include/linux/platform_data/cros_usbpd_notify.h b/include/linux/platform_data/cros_usbpd_notify.h
new file mode 100644
index 000000000000..4f2791722b6d
--- /dev/null
+++ b/include/linux/platform_data/cros_usbpd_notify.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ChromeOS EC Power Delivery Notifier Driver
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CROS_USBPD_NOTIFY_H
+#define __LINUX_PLATFORM_DATA_CROS_USBPD_NOTIFY_H
+
+#include <linux/notifier.h>
+
+int cros_usbpd_register_notify(struct notifier_block *nb);
+
+void cros_usbpd_unregister_notify(struct notifier_block *nb);
+
+#endif /* __LINUX_PLATFORM_DATA_CROS_USBPD_NOTIFY_H */
diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h
index 3fbf9f2793b5..bc208c64e3d7 100644
--- a/include/linux/platform_data/davinci-cpufreq.h
+++ b/include/linux/platform_data/davinci-cpufreq.h
@@ -2,7 +2,7 @@
/*
* TI DaVinci CPUFreq platform support.
*
- * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
+ * Copyright (C) 2009 Texas Instruments, Inc. https://www.ti.com/
*/
#ifndef _MACH_DAVINCI_CPUFREQ_H
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 7fe80f1c7e08..c8645b2ed3c0 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI DaVinci Audio Serial Port support
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __DAVINCI_ASP_H
@@ -96,6 +88,7 @@ enum {
MCASP_VERSION_2, /* DA8xx/OMAPL1x */
MCASP_VERSION_3, /* TI81xx/AM33xx */
MCASP_VERSION_4, /* DRA7xxx */
+ MCASP_VERSION_OMAP, /* OMAP4/5 */
};
enum mcbsp_clk_input_pin {
diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h
deleted file mode 100644
index 069637e6004f..000000000000
--- a/include/linux/platform_data/dma-atmel.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Header file for the Atmel AHB DMA Controller driver
- *
- * Copyright (C) 2008 Atmel Corporation
- */
-#ifndef AT_HDMAC_H
-#define AT_HDMAC_H
-
-#include <linux/dmaengine.h>
-
-/**
- * struct at_dma_platform_data - Controller configuration parameters
- * @nr_channels: Number of channels supported by hardware (max 8)
- * @cap_mask: dma_capability flags supported by the platform
- */
-struct at_dma_platform_data {
- unsigned int nr_channels;
- dma_cap_mask_t cap_mask;
-};
-
-/**
- * struct at_dma_slave - Controller-specific information about a slave
- * @dma_dev: required DMA master device
- * @cfg: Platform-specific initializer for the CFG register
- */
-struct at_dma_slave {
- struct device *dma_dev;
- u32 cfg;
-};
-
-
-/* Platform-configurable bits in CFG */
-#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
-
-#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
-#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
-#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
-#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
-#define ATC_SRC_H2SEL_SW (0x0 << 9)
-#define ATC_SRC_H2SEL_HW (0x1 << 9)
-#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
-#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
-#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
-#define ATC_DST_H2SEL_SW (0x0 << 13)
-#define ATC_DST_H2SEL_HW (0x1 << 13)
-#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
-#define ATC_SOD (0x1 << 16) /* Stop On Done */
-#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
-#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
-#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
-#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
-#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
-#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
-#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
-#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
-#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
-#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
-
-
-#endif /* AT_HDMAC_H */
diff --git a/include/linux/platform_data/dma-coh901318.h b/include/linux/platform_data/dma-coh901318.h
deleted file mode 100644
index 4cca529f8d56..000000000000
--- a/include/linux/platform_data/dma-coh901318.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform data for the COH901318 DMA controller
- * Copyright (C) 2007-2013 ST-Ericsson
- */
-
-#ifndef PLAT_COH901318_H
-#define PLAT_COH901318_H
-
-#ifdef CONFIG_COH901318
-
-/* We only support the U300 DMA channels */
-#define U300_DMA_MSL_TX_0 0
-#define U300_DMA_MSL_TX_1 1
-#define U300_DMA_MSL_TX_2 2
-#define U300_DMA_MSL_TX_3 3
-#define U300_DMA_MSL_TX_4 4
-#define U300_DMA_MSL_TX_5 5
-#define U300_DMA_MSL_TX_6 6
-#define U300_DMA_MSL_RX_0 7
-#define U300_DMA_MSL_RX_1 8
-#define U300_DMA_MSL_RX_2 9
-#define U300_DMA_MSL_RX_3 10
-#define U300_DMA_MSL_RX_4 11
-#define U300_DMA_MSL_RX_5 12
-#define U300_DMA_MSL_RX_6 13
-#define U300_DMA_MMCSD_RX_TX 14
-#define U300_DMA_MSPRO_TX 15
-#define U300_DMA_MSPRO_RX 16
-#define U300_DMA_UART0_TX 17
-#define U300_DMA_UART0_RX 18
-#define U300_DMA_APEX_TX 19
-#define U300_DMA_APEX_RX 20
-#define U300_DMA_PCM_I2S0_TX 21
-#define U300_DMA_PCM_I2S0_RX 22
-#define U300_DMA_PCM_I2S1_TX 23
-#define U300_DMA_PCM_I2S1_RX 24
-#define U300_DMA_XGAM_CDI 25
-#define U300_DMA_XGAM_PDI 26
-#define U300_DMA_SPI_TX 27
-#define U300_DMA_SPI_RX 28
-#define U300_DMA_GENERAL_PURPOSE_0 29
-#define U300_DMA_GENERAL_PURPOSE_1 30
-#define U300_DMA_GENERAL_PURPOSE_2 31
-#define U300_DMA_GENERAL_PURPOSE_3 32
-#define U300_DMA_GENERAL_PURPOSE_4 33
-#define U300_DMA_GENERAL_PURPOSE_5 34
-#define U300_DMA_GENERAL_PURPOSE_6 35
-#define U300_DMA_GENERAL_PURPOSE_7 36
-#define U300_DMA_GENERAL_PURPOSE_8 37
-#define U300_DMA_UART1_TX 38
-#define U300_DMA_UART1_RX 39
-
-#define U300_DMA_DEVICE_CHANNELS 32
-#define U300_DMA_CHANNELS 40
-
-/**
- * coh901318_filter_id() - DMA channel filter function
- * @chan: dma channel handle
- * @chan_id: id of dma channel to be filter out
- *
- * In dma_request_channel() it specifies what channel id to be requested
- */
-bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
-#else
-static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
-{
- return false;
-}
-#endif
-
-#endif /* PLAT_COH901318_H */
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index f3eaf9ec00a1..860ba4bc5ead 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -8,10 +8,15 @@
#ifndef _PLATFORM_DATA_DMA_DW_H
#define _PLATFORM_DATA_DMA_DW_H
-#include <linux/device.h>
+#include <linux/bits.h>
+#include <linux/types.h>
#define DW_DMA_MAX_NR_MASTERS 4
#define DW_DMA_MAX_NR_CHANNELS 8
+#define DW_DMA_MIN_BURST 1
+#define DW_DMA_MAX_BURST 256
+
+struct device;
/**
* struct dw_dma_slave - Controller-specific information about a slave
@@ -21,6 +26,7 @@
* @dst_id: dst request line
* @m_master: memory master for transfers on allocated channel
* @p_master: peripheral master for transfers on allocated channel
+ * @channels: mask of the channels permitted for allocation (zero value means any)
* @hs_polarity:set active low polarity of handshake interface
*/
struct dw_dma_slave {
@@ -29,38 +35,45 @@ struct dw_dma_slave {
u8 dst_id;
u8 m_master;
u8 p_master;
+ u8 channels;
bool hs_polarity;
};
/**
* struct dw_dma_platform_data - Controller configuration parameters
+ * @nr_masters: Number of AHB masters supported by the controller
* @nr_channels: Number of channels supported by hardware (max 8)
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
- * @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
* (in bytes, power of 2)
* @multi_block: Multi block transfers supported by hardware per channel.
+ * @max_burst: Maximum value of burst transaction size supported by hardware
+ * per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH).
* @protctl: Protection control signals setting per channel.
+ * @quirks: Optional platform quirks.
*/
struct dw_dma_platform_data {
- unsigned int nr_channels;
+ u32 nr_masters;
+ u32 nr_channels;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
- unsigned char chan_allocation_order;
+ u32 chan_allocation_order;
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
- unsigned char chan_priority;
- unsigned int block_size;
- unsigned char nr_masters;
- unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
- unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
+ u32 chan_priority;
+ u32 block_size;
+ u32 data_width[DW_DMA_MAX_NR_MASTERS];
+ u32 multi_block[DW_DMA_MAX_NR_CHANNELS];
+ u32 max_burst[DW_DMA_MAX_NR_CHANNELS];
#define CHAN_PROTCTL_PRIVILEGED BIT(0)
#define CHAN_PROTCTL_BUFFERABLE BIT(1)
#define CHAN_PROTCTL_CACHEABLE BIT(2)
#define CHAN_PROTCTL_MASK GENMASK(2, 0)
- unsigned char protctl;
+ u32 protctl;
+#define DW_DMA_QUIRK_XBAR_PRESENT BIT(0)
+ u32 quirks;
};
#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
index c65b412b2b33..611bae193c1c 100644
--- a/include/linux/platform_data/dma-hsu.h
+++ b/include/linux/platform_data/dma-hsu.h
@@ -8,7 +8,7 @@
#ifndef _PLATFORM_DATA_DMA_HSU_H
#define _PLATFORM_DATA_DMA_HSU_H
-#include <linux/device.h>
+struct device;
struct hsu_dma_slave {
struct device *dma_dev;
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
deleted file mode 100644
index 30e676b36b24..000000000000
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MACH_MXC_SDMA_H__
-#define __MACH_MXC_SDMA_H__
-
-/**
- * struct sdma_script_start_addrs - SDMA script start pointers
- *
- * start addresses of the different functions in the physical
- * address space of the SDMA engine.
- */
-struct sdma_script_start_addrs {
- s32 ap_2_ap_addr;
- s32 ap_2_bp_addr;
- s32 ap_2_ap_fixed_addr;
- s32 bp_2_ap_addr;
- s32 loopback_on_dsp_side_addr;
- s32 mcu_interrupt_only_addr;
- s32 firi_2_per_addr;
- s32 firi_2_mcu_addr;
- s32 per_2_firi_addr;
- s32 mcu_2_firi_addr;
- s32 uart_2_per_addr;
- s32 uart_2_mcu_addr;
- s32 per_2_app_addr;
- s32 mcu_2_app_addr;
- s32 per_2_per_addr;
- s32 uartsh_2_per_addr;
- s32 uartsh_2_mcu_addr;
- s32 per_2_shp_addr;
- s32 mcu_2_shp_addr;
- s32 ata_2_mcu_addr;
- s32 mcu_2_ata_addr;
- s32 app_2_per_addr;
- s32 app_2_mcu_addr;
- s32 shp_2_per_addr;
- s32 shp_2_mcu_addr;
- s32 mshc_2_mcu_addr;
- s32 mcu_2_mshc_addr;
- s32 spdif_2_mcu_addr;
- s32 mcu_2_spdif_addr;
- s32 asrc_2_mcu_addr;
- s32 ext_mem_2_ipu_addr;
- s32 descrambler_addr;
- s32 dptc_dvfs_addr;
- s32 utra_addr;
- s32 ram_code_start_addr;
- /* End of v1 array */
- s32 mcu_2_ssish_addr;
- s32 ssish_2_mcu_addr;
- s32 hdmi_dma_addr;
- /* End of v2 array */
- s32 zcanfd_2_mcu_addr;
- s32 zqspi_2_mcu_addr;
- s32 mcu_2_ecspi_addr;
- /* End of v3 array */
- s32 mcu_2_zqspi_addr;
- /* End of v4 array */
-};
-
-/**
- * struct sdma_platform_data - platform specific data for SDMA engine
- *
- * @fw_name The firmware name
- * @script_addrs SDMA scripts addresses in SDMA ROM
- */
-struct sdma_platform_data {
- char *fw_name;
- struct sdma_script_start_addrs *script_addrs;
-};
-
-#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
deleted file mode 100644
index 281adbb26e6b..000000000000
--- a/include/linux/platform_data/dma-imx.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-#ifndef __ASM_ARCH_MXC_DMA_H__
-#define __ASM_ARCH_MXC_DMA_H__
-
-#include <linux/scatterlist.h>
-#include <linux/device.h>
-#include <linux/dmaengine.h>
-
-/*
- * This enumerates peripheral types. Used for SDMA.
- */
-enum sdma_peripheral_type {
- IMX_DMATYPE_SSI, /* MCU domain SSI */
- IMX_DMATYPE_SSI_SP, /* Shared SSI */
- IMX_DMATYPE_MMC, /* MMC */
- IMX_DMATYPE_SDHC, /* SDHC */
- IMX_DMATYPE_UART, /* MCU domain UART */
- IMX_DMATYPE_UART_SP, /* Shared UART */
- IMX_DMATYPE_FIRI, /* FIRI */
- IMX_DMATYPE_CSPI, /* MCU domain CSPI */
- IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
- IMX_DMATYPE_SIM, /* SIM */
- IMX_DMATYPE_ATA, /* ATA */
- IMX_DMATYPE_CCM, /* CCM */
- IMX_DMATYPE_EXT, /* External peripheral */
- IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
- IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
- IMX_DMATYPE_DSP, /* DSP */
- IMX_DMATYPE_MEMORY, /* Memory */
- IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
- IMX_DMATYPE_SPDIF, /* SPDIF */
- IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
- IMX_DMATYPE_ASRC, /* ASRC */
- IMX_DMATYPE_ESAI, /* ESAI */
- IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
- IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
- IMX_DMATYPE_SAI, /* SAI */
-};
-
-enum imx_dma_prio {
- DMA_PRIO_HIGH = 0,
- DMA_PRIO_MEDIUM = 1,
- DMA_PRIO_LOW = 2
-};
-
-struct imx_dma_data {
- int dma_request; /* DMA request line */
- int dma_request2; /* secondary DMA request line */
- enum sdma_peripheral_type peripheral_type;
- int priority;
-};
-
-static inline int imx_dma_is_ipu(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "ipu-core");
-}
-
-static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
-{
- return !strcmp(chan->device->dev->driver->name, "imx-sdma") ||
- !strcmp(chan->device->dev->driver->name, "imx-dma");
-}
-
-#endif
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
index bdaaf537604a..95d852aef130 100644
--- a/include/linux/platform_data/dmtimer-omap.h
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -30,12 +30,12 @@ struct omap_dm_timer_ops {
int (*stop)(struct omap_dm_timer *timer);
int (*set_source)(struct omap_dm_timer *timer, int source);
- int (*set_load)(struct omap_dm_timer *timer, int autoreload,
- unsigned int value);
+ int (*set_load)(struct omap_dm_timer *timer, unsigned int value);
int (*set_match)(struct omap_dm_timer *timer, int enable,
unsigned int match);
int (*set_pwm)(struct omap_dm_timer *timer, int def_on,
- int toggle, int trigger);
+ int toggle, int trigger, int autoreload);
+ int (*get_pwm_status)(struct omap_dm_timer *timer);
int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler);
unsigned int (*read_counter)(struct omap_dm_timer *timer);
diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h
deleted file mode 100644
index a2c56fcd0534..000000000000
--- a/include/linux/platform_data/efm32-spi.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__
-#define __LINUX_PLATFORM_DATA_EFM32_SPI_H__
-
-#include <linux/types.h>
-
-/**
- * struct efm32_spi_pdata
- * @location: pinmux location for the I/O pins (to be written to the ROUTE
- * register)
- */
-struct efm32_spi_pdata {
- u8 location;
-};
-#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */
diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h
deleted file mode 100644
index ccbb8f11db75..000000000000
--- a/include/linux/platform_data/efm32-uart.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *
- *
- */
-#ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__
-#define __LINUX_PLATFORM_DATA_EFM32_UART_H__
-
-#include <linux/types.h>
-
-/**
- * struct efm32_uart_pdata
- * @location: pinmux location for the I/O pins (to be written to the ROUTE
- * register)
- */
-struct efm32_uart_pdata {
- u8 location;
-};
-#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
index 0f491d8abfdd..3cc78f0447b1 100644
--- a/include/linux/platform_data/elm.h
+++ b/include/linux/platform_data/elm.h
@@ -2,7 +2,7 @@
/*
* BCH Error Location Module
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __ELM_H
diff --git a/include/linux/platform_data/emc2305.h b/include/linux/platform_data/emc2305.h
new file mode 100644
index 000000000000..54d672dd6f7d
--- /dev/null
+++ b/include/linux/platform_data/emc2305.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_PLATFORM_DATA_EMC2305__
+#define __LINUX_PLATFORM_DATA_EMC2305__
+
+#define EMC2305_PWM_MAX 5
+
+/**
+ * struct emc2305_platform_data - EMC2305 driver platform data
+ * @max_state: maximum cooling state of the cooling device;
+ * @pwm_num: number of active channels;
+ * @pwm_separate: separate PWM settings for every channel;
+ * @pwm_min: array of minimum PWM per channel;
+ */
+struct emc2305_platform_data {
+ u8 max_state;
+ u8 pwm_num;
+ bool pwm_separate;
+ u8 pwm_min[EMC2305_PWM_MAX];
+};
+
+#endif
diff --git a/include/linux/platform_data/eth_ixp4xx.h b/include/linux/platform_data/eth_ixp4xx.h
deleted file mode 100644
index 6f652ea0c6ae..000000000000
--- a/include/linux/platform_data/eth_ixp4xx.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PLATFORM_DATA_ETH_IXP4XX
-#define __PLATFORM_DATA_ETH_IXP4XX
-
-#include <linux/types.h>
-
-#define IXP4XX_ETH_NPEA 0x00
-#define IXP4XX_ETH_NPEB 0x10
-#define IXP4XX_ETH_NPEC 0x20
-
-/* Information about built-in Ethernet MAC interfaces */
-struct eth_plat_info {
- u8 phy; /* MII PHY ID, 0 - 31 */
- u8 rxq; /* configurable, currently 0 - 31 only */
- u8 txreadyq;
- u8 hwaddr[6];
-};
-
-#endif
diff --git a/include/linux/platform_data/fb-s3c2410.h b/include/linux/platform_data/fb-s3c2410.h
new file mode 100644
index 000000000000..10c11e6316d6
--- /dev/null
+++ b/include/linux/platform_data/fb-s3c2410.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2004 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * Inspired by pxafb.h
+*/
+
+#ifndef __ASM_PLAT_FB_S3C2410_H
+#define __ASM_PLAT_FB_S3C2410_H __FILE__
+
+#include <linux/compiler_types.h>
+
+struct s3c2410fb_hw {
+ unsigned long lcdcon1;
+ unsigned long lcdcon2;
+ unsigned long lcdcon3;
+ unsigned long lcdcon4;
+ unsigned long lcdcon5;
+};
+
+/* LCD description */
+struct s3c2410fb_display {
+ /* LCD type */
+ unsigned type;
+#define S3C2410_LCDCON1_DSCAN4 (0<<5)
+#define S3C2410_LCDCON1_STN4 (1<<5)
+#define S3C2410_LCDCON1_STN8 (2<<5)
+#define S3C2410_LCDCON1_TFT (3<<5)
+
+#define S3C2410_LCDCON1_TFT1BPP (8<<1)
+#define S3C2410_LCDCON1_TFT2BPP (9<<1)
+#define S3C2410_LCDCON1_TFT4BPP (10<<1)
+#define S3C2410_LCDCON1_TFT8BPP (11<<1)
+#define S3C2410_LCDCON1_TFT16BPP (12<<1)
+#define S3C2410_LCDCON1_TFT24BPP (13<<1)
+
+ /* Screen size */
+ unsigned short width;
+ unsigned short height;
+
+ /* Screen info */
+ unsigned short xres;
+ unsigned short yres;
+ unsigned short bpp;
+
+ unsigned pixclock; /* pixclock in picoseconds */
+ unsigned short left_margin; /* value in pixels (TFT) or HCLKs (STN) */
+ unsigned short right_margin; /* value in pixels (TFT) or HCLKs (STN) */
+ unsigned short hsync_len; /* value in pixels (TFT) or HCLKs (STN) */
+ unsigned short upper_margin; /* value in lines (TFT) or 0 (STN) */
+ unsigned short lower_margin; /* value in lines (TFT) or 0 (STN) */
+ unsigned short vsync_len; /* value in lines (TFT) or 0 (STN) */
+
+ /* lcd configuration registers */
+ unsigned long lcdcon5;
+#define S3C2410_LCDCON5_BPP24BL (1<<12)
+#define S3C2410_LCDCON5_FRM565 (1<<11)
+#define S3C2410_LCDCON5_INVVCLK (1<<10)
+#define S3C2410_LCDCON5_INVVLINE (1<<9)
+#define S3C2410_LCDCON5_INVVFRAME (1<<8)
+#define S3C2410_LCDCON5_INVVD (1<<7)
+#define S3C2410_LCDCON5_INVVDEN (1<<6)
+#define S3C2410_LCDCON5_INVPWREN (1<<5)
+#define S3C2410_LCDCON5_INVLEND (1<<4)
+#define S3C2410_LCDCON5_PWREN (1<<3)
+#define S3C2410_LCDCON5_ENLEND (1<<2)
+#define S3C2410_LCDCON5_BSWP (1<<1)
+#define S3C2410_LCDCON5_HWSWP (1<<0)
+};
+
+struct s3c2410fb_mach_info {
+
+ struct s3c2410fb_display *displays; /* attached displays info */
+ unsigned num_displays; /* number of defined displays */
+ unsigned default_display;
+
+ /* GPIOs */
+
+ unsigned long gpcup;
+ unsigned long gpcup_mask;
+ unsigned long gpccon;
+ unsigned long gpccon_mask;
+ unsigned long gpdup;
+ unsigned long gpdup_mask;
+ unsigned long gpdcon;
+ unsigned long gpdcon_mask;
+
+ void __iomem * gpccon_reg;
+ void __iomem * gpcup_reg;
+ void __iomem * gpdcon_reg;
+ void __iomem * gpdup_reg;
+
+ /* lpc3600 control register */
+ unsigned long lpcsel;
+};
+
+extern void s3c24xx_fb_set_platdata(struct s3c2410fb_mach_info *);
+
+#endif /* __ASM_PLAT_FB_S3C2410_H */
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index a93841bfb9f7..b82e44662efe 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* DaVinci GPIO Platform Related Defines
*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __DAVINCI_GPIO_PLATFORM_H
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
deleted file mode 100644
index 3c606c450d05..000000000000
--- a/include/linux/platform_data/gpio-dwapb.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2014 Intel Corporation.
- */
-
-#ifndef GPIO_DW_APB_H
-#define GPIO_DW_APB_H
-
-struct dwapb_port_property {
- struct fwnode_handle *fwnode;
- unsigned int idx;
- unsigned int ngpio;
- unsigned int gpio_base;
- int irq[32];
- bool has_irq;
- bool irq_shared;
-};
-
-struct dwapb_platform_data {
- struct dwapb_port_property *properties;
- unsigned int nports;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 8b30b14b47d3..f377817ce75c 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -85,6 +85,7 @@
* omap2+ specific GPIO registers
*/
#define OMAP24XX_GPIO_REVISION 0x0000
+#define OMAP24XX_GPIO_SYSCONFIG 0x0010
#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
#define OMAP24XX_GPIO_IRQENABLE2 0x002c
@@ -108,6 +109,7 @@
#define OMAP24XX_GPIO_SETDATAOUT 0x0094
#define OMAP4_GPIO_REVISION 0x0000
+#define OMAP4_GPIO_SYSCONFIG 0x0010
#define OMAP4_GPIO_EOI 0x0020
#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
@@ -148,6 +150,7 @@
#ifndef __ASSEMBLER__
struct omap_gpio_reg_offs {
u16 revision;
+ u16 sysconfig;
u16 direction;
u16 datain;
u16 dataout;
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h
index 9e46678edb2a..255d51c9d36d 100644
--- a/include/linux/platform_data/gpio/gpio-amd-fch.h
+++ b/include/linux/platform_data/gpio/gpio-amd-fch.h
@@ -19,7 +19,7 @@
#define AMD_FCH_GPIO_REG_GPIO49 0x40
#define AMD_FCH_GPIO_REG_GPIO50 0x41
#define AMD_FCH_GPIO_REG_GPIO51 0x42
-#define AMD_FCH_GPIO_REG_GPIO59_DEVSLP0 0x43
+#define AMD_FCH_GPIO_REG_GPIO55_DEVSLP0 0x43
#define AMD_FCH_GPIO_REG_GPIO57 0x44
#define AMD_FCH_GPIO_REG_GPIO58 0x45
#define AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 0x46
diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h
index ef663e570552..c9cc4e32435d 100644
--- a/include/linux/platform_data/gpmc-omap.h
+++ b/include/linux/platform_data/gpmc-omap.h
@@ -2,7 +2,7 @@
/*
* OMAP GPMC Platform data
*
- * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments, Inc. - https://www.ti.com
* Roger Quadros <rogerq@ti.com>
*/
diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h
new file mode 100644
index 000000000000..281f499eda97
--- /dev/null
+++ b/include/linux/platform_data/gsc_hwmon.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _GSC_HWMON_H
+#define _GSC_HWMON_H
+
+enum gsc_hwmon_mode {
+ mode_temperature,
+ mode_voltage_24bit,
+ mode_voltage_raw,
+ mode_voltage_16bit,
+ mode_fan,
+ mode_max,
+};
+
+/**
+ * struct gsc_hwmon_channel - configuration parameters
+ * @reg: I2C register offset
+ * @mode: channel mode
+ * @name: channel name
+ * @mvoffset: voltage offset
+ * @vdiv: voltage divider array (2 resistor values in milli-ohms)
+ */
+struct gsc_hwmon_channel {
+ unsigned int reg;
+ unsigned int mode;
+ const char *name;
+ unsigned int mvoffset;
+ unsigned int vdiv[2];
+};
+
+/**
+ * struct gsc_hwmon_platform_data - platform data for gsc_hwmon driver
+ * @channels: pointer to array of gsc_hwmon_channel structures
+ * describing channels
+ * @nchannels: number of elements in @channels array
+ * @vreference: voltage reference (mV)
+ * @resolution: ADC bit resolution
+ * @fan_base: register base for FAN controller
+ */
+struct gsc_hwmon_platform_data {
+ const struct gsc_hwmon_channel *channels;
+ int nchannels;
+ unsigned int resolution;
+ unsigned int vreference;
+ unsigned int fan_base;
+};
+#endif
diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h
new file mode 100644
index 000000000000..6a000df5541f
--- /dev/null
+++ b/include/linux/platform_data/hirschmann-hellcreek.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * Hirschmann Hellcreek TSN switch platform data.
+ *
+ * Copyright (C) 2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#ifndef _HIRSCHMANN_HELLCREEK_H_
+#define _HIRSCHMANN_HELLCREEK_H_
+
+#include <linux/types.h>
+
+struct hellcreek_platform_data {
+ const char *name; /* Switch name */
+ int num_ports; /* Amount of switch ports */
+ int is_100_mbits; /* Is it configured to 100 or 1000 mbit/s */
+ int qbv_support; /* Qbv support on front TSN ports */
+ int qbv_on_cpu_port; /* Qbv support on the CPU port */
+ int qbu_support; /* Qbu support on front TSN ports */
+ u16 module_id; /* Module identificaton */
+};
+
+#endif /* _HIRSCHMANN_HELLCREEK_H_ */
diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h
deleted file mode 100644
index 014c4a5a7e13..000000000000
--- a/include/linux/platform_data/i2c-designware.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2014 Intel Corporation.
- */
-
-#ifndef I2C_DESIGNWARE_H
-#define I2C_DESIGNWARE_H
-
-struct dw_i2c_platform_data {
- unsigned int i2c_scl_freq;
-};
-
-#endif
diff --git a/include/linux/platform_data/i2c-hid.h b/include/linux/platform_data/i2c-hid.h
deleted file mode 100644
index c628bb5e1061..000000000000
--- a/include/linux/platform_data/i2c-hid.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * HID over I2C protocol implementation
- *
- * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
- * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#ifndef __LINUX_I2C_HID_H
-#define __LINUX_I2C_HID_H
-
-#include <linux/regulator/consumer.h>
-#include <linux/types.h>
-
-/**
- * struct i2chid_platform_data - used by hid over i2c implementation.
- * @hid_descriptor_address: i2c register where the HID descriptor is stored.
- * @supplies: regulators for powering on the device.
- * @post_power_delay_ms: delay after powering on before device is usable.
- *
- * Note that it is the responsibility of the platform driver (or the acpi 5.0
- * driver, or the flattened device tree) to setup the irq related to the gpio in
- * the struct i2c_board_info.
- * The platform driver should also setup the gpio according to the device:
- *
- * A typical example is the following:
- * irq = gpio_to_irq(intr_gpio);
- * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info
- * gpio_request(intr_gpio, "elan-irq");
- * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP);
- */
-struct i2c_hid_platform_data {
- u16 hid_descriptor_address;
- struct regulator_bulk_data supplies[2];
- int post_power_delay_ms;
-};
-
-#endif /* __LINUX_I2C_HID_H */
diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h
index 6a9b28399b39..24953981bd9f 100644
--- a/include/linux/platform_data/i2c-pxa.h
+++ b/include/linux/platform_data/i2c-pxa.h
@@ -7,54 +7,6 @@
#ifndef _I2C_PXA_H_
#define _I2C_PXA_H_
-#if 0
-#define DEF_TIMEOUT 3
-#else
-/* need a longer timeout if we're dealing with the fact we may well be
- * looking at a multi-master environment
-*/
-#define DEF_TIMEOUT 32
-#endif
-
-#define BUS_ERROR (-EREMOTEIO)
-#define XFER_NAKED (-ECONNREFUSED)
-#define I2C_RETRY (-2000) /* an error has occurred retry transmit */
-
-/* ICR initialize bit values
-*
-* 15. FM 0 (100 Khz operation)
-* 14. UR 0 (No unit reset)
-* 13. SADIE 0 (Disables the unit from interrupting on slave addresses
-* matching its slave address)
-* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration
-* in master mode)
-* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode)
-* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent)
-* 9. IRFIE 1 (Enable interrupts from full buffer received)
-* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty)
-* 7. GCD 1 (Disables i2c unit response to general call messages as a slave)
-* 6. IUE 0 (Disable unit until we change settings)
-* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL)
-* 4. MA 0 (Only send stop with the ICR stop bit)
-* 3. TB 0 (We are not transmitting a byte initially)
-* 2. ACKNAK 0 (Send an ACK after the unit receives a byte)
-* 1. STOP 0 (Do not send a STOP)
-* 0. START 0 (Do not send a START)
-*
-*/
-#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE)
-
-/* I2C status register init values
- *
- * 10. BED 1 (Clear bus error detected)
- * 9. SAD 1 (Clear slave address detected)
- * 7. IRF 1 (Clear IDBR Receive Full)
- * 6. ITE 1 (Clear IDBR Transmit Empty)
- * 5. ALD 1 (Clear Arbitration Loss Detected)
- * 4. SSD 1 (Clear Slave Stop Detected)
- */
-#define I2C_ISR_INIT 0x7FF /* status register init */
-
struct i2c_pxa_platform_data {
unsigned int class;
unsigned int use_pio :1;
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h
deleted file mode 100644
index 7f53a5c6f35e..000000000000
--- a/include/linux/platform_data/intel-spi.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel PCH/PCU SPI flash driver.
- *
- * Copyright (C) 2016, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#ifndef INTEL_SPI_PDATA_H
-#define INTEL_SPI_PDATA_H
-
-enum intel_spi_type {
- INTEL_SPI_BYT = 1,
- INTEL_SPI_LPT,
- INTEL_SPI_BXT,
- INTEL_SPI_CNL,
-};
-
-/**
- * struct intel_spi_boardinfo - Board specific data for Intel SPI driver
- * @type: Type which this controller is compatible with
- * @writeable: The chip is writeable
- */
-struct intel_spi_boardinfo {
- enum intel_spi_type type;
- bool writeable;
-};
-
-#endif /* INTEL_SPI_PDATA_H */
diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h
index 93974f4cfba1..f05b37521f67 100644
--- a/include/linux/platform_data/invensense_mpu6050.h
+++ b/include/linux/platform_data/invensense_mpu6050.h
@@ -12,7 +12,7 @@
* mounting matrix retrieved from device-tree)
*
* Contains platform specific information on how to configure the MPU6050 to
- * work on this platform. The orientation matricies are 3x3 rotation matricies
+ * work on this platform. The orientation matrices are 3x3 rotation matrices
* that are applied to the data to rotate from the mounting orientation to the
* platform orientation. The values must be one of 0, 1, or -1 and each row and
* column should have exactly 1 non-zero value.
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
index 2ccdce6a4e27..45d860cac2b0 100644
--- a/include/linux/platform_data/itco_wdt.h
+++ b/include/linux/platform_data/itco_wdt.h
@@ -12,13 +12,16 @@
#define ICH_RES_MEM_OFF 2
#define ICH_RES_MEM_GCS_PMC 0
+/**
+ * struct itco_wdt_platform_data - iTCO_wdt platform data
+ * @name: Name of the platform
+ * @version: iTCO version
+ * @no_reboot_use_pmc: Use PMC BXT API to set and clear NO_REBOOT bit
+ */
struct itco_wdt_platform_data {
char name[32];
unsigned int version;
- /* private data to be passed to update_no_reboot_bit API */
- void *no_reboot_priv;
- /* pointer for platform specific no reboot update function */
- int (*update_no_reboot_bit)(void *priv, bool set);
+ bool no_reboot_use_pmc;
};
#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/jz4740/jz4740_nand.h b/include/linux/platform_data/jz4740/jz4740_nand.h
deleted file mode 100644
index b3f066d63059..000000000000
--- a/include/linux/platform_data/jz4740/jz4740_nand.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * JZ4740 SoC NAND controller driver
- */
-
-#ifndef __JZ4740_NAND_H__
-#define __JZ4740_NAND_H__
-
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#define JZ_NAND_NUM_BANKS 4
-
-struct jz_nand_platform_data {
- int num_partitions;
- struct mtd_partition *partitions;
-
- unsigned char banks[JZ_NAND_NUM_BANKS];
-
- void (*ident_callback)(struct platform_device *, struct mtd_info *,
- struct mtd_partition **, int *num_partitions);
-};
-
-#endif
diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h
deleted file mode 100644
index eb8a6860e816..000000000000
--- a/include/linux/platform_data/leds-kirkwood-ns2.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Platform data structure for Network Space v2 LED driver
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LEDS_KIRKWOOD_NS2_H
-#define __LEDS_KIRKWOOD_NS2_H
-
-enum ns2_led_modes {
- NS_V2_LED_OFF,
- NS_V2_LED_ON,
- NS_V2_LED_SATA,
-};
-
-struct ns2_led_modval {
- enum ns2_led_modes mode;
- int cmd_level;
- int slow_level;
-};
-
-struct ns2_led {
- const char *name;
- const char *default_trigger;
- unsigned cmd;
- unsigned slow;
- int num_modes;
- struct ns2_led_modval *modval;
-};
-
-struct ns2_led_platform_data {
- int num_leds;
- struct ns2_led *leds;
-};
-
-#endif /* __LEDS_KIRKWOOD_NS2_H */
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 96a787100fda..3441064713a3 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -12,17 +12,26 @@
#ifndef _LEDS_LP55XX_H
#define _LEDS_LP55XX_H
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-multicolor.h>
+
/* Clock configuration */
#define LP55XX_CLOCK_AUTO 0
#define LP55XX_CLOCK_INT 1
#define LP55XX_CLOCK_EXT 2
+#define LP55XX_MAX_GROUPED_CHAN 4
+
struct lp55xx_led_config {
const char *name;
const char *default_trigger;
u8 chan_nr;
u8 led_current; /* mA x10, 0 if led is not connected */
u8 max_current;
+ int num_colors;
+ unsigned int max_channel;
+ int color_id[LED_COLOR_ID_MAX];
+ int output_num[LED_COLOR_ID_MAX];
};
struct lp55xx_predef_pattern {
@@ -49,7 +58,7 @@ enum lp8501_pwr_sel {
* @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT
* @setup_resources : Platform specific function before enabling the chip
* @release_resources : Platform specific function after disabling the chip
- * @enable : EN pin control by platform side
+ * @enable_gpiod : enable GPIO descriptor
* @patterns : Predefined pattern data for RGB channels
* @num_patterns : Number of patterns
* @update_config : Value of CONFIG register
@@ -65,7 +74,7 @@ struct lp55xx_platform_data {
u8 clock_mode;
/* optional enable GPIO */
- int enable_gpio;
+ struct gpio_desc *enable_gpiod;
/* Predefined pattern data */
struct lp55xx_predef_pattern *patterns;
diff --git a/include/linux/platform_data/leds-pca963x.h b/include/linux/platform_data/leds-pca963x.h
deleted file mode 100644
index 6091337ce4bf..000000000000
--- a/include/linux/platform_data/leds-pca963x.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * PCA963X LED chip driver.
- *
- * Copyright 2012 bct electronic GmbH
- * Copyright 2013 Qtechnology A/S
- */
-
-#ifndef __LINUX_PCA963X_H
-#define __LINUX_PCA963X_H
-#include <linux/leds.h>
-
-enum pca963x_outdrv {
- PCA963X_OPEN_DRAIN,
- PCA963X_TOTEM_POLE, /* aka push-pull */
-};
-
-enum pca963x_blink_type {
- PCA963X_SW_BLINK,
- PCA963X_HW_BLINK,
-};
-
-enum pca963x_direction {
- PCA963X_NORMAL,
- PCA963X_INVERTED,
-};
-
-struct pca963x_platform_data {
- struct led_platform_data leds;
- enum pca963x_outdrv outdrv;
- enum pca963x_blink_type blink_type;
- enum pca963x_direction dir;
-};
-
-#endif /* __LINUX_PCA963X_H*/
diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h
index 5bbae85811e2..64f8d14876e0 100644
--- a/include/linux/platform_data/leds-s3c24xx.h
+++ b/include/linux/platform_data/leds-s3c24xx.h
@@ -10,13 +10,7 @@
#ifndef __LEDS_S3C24XX_H
#define __LEDS_S3C24XX_H
-#define S3C24XX_LEDF_ACTLOW (1<<0) /* LED is on when GPIO low */
-#define S3C24XX_LEDF_TRISTATE (1<<1) /* tristate to turn off */
-
struct s3c24xx_led_platdata {
- unsigned int gpio;
- unsigned int flags;
-
char *name;
char *def_trigger;
};
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
deleted file mode 100644
index aa5b5562d6f7..000000000000
--- a/include/linux/platform_data/macb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2004-2006 Atmel Corporation
- */
-#ifndef __MACB_PDATA_H__
-#define __MACB_PDATA_H__
-
-#include <linux/clk.h>
-
-/**
- * struct macb_platform_data - platform data for MACB Ethernet
- * @pclk: platform clock
- * @hclk: AHB clock
- */
-struct macb_platform_data {
- struct clk *pclk;
- struct clk *hclk;
-};
-
-#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/max732x.h b/include/linux/platform_data/max732x.h
index f231c635faec..423999207cd5 100644
--- a/include/linux/platform_data/max732x.h
+++ b/include/linux/platform_data/max732x.h
@@ -7,17 +7,5 @@
struct max732x_platform_data {
/* number of the first GPIO */
unsigned gpio_base;
-
- /* interrupt base */
- int irq_base;
-
- void *context; /* param to setup/teardown */
-
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
};
#endif /* __LINUX_I2C_MAX732X_H */
diff --git a/include/linux/platform_data/media/camera-mx2.h b/include/linux/platform_data/media/camera-mx2.h
deleted file mode 100644
index 8cfa76b6e1e1..000000000000
--- a/include/linux/platform_data/media/camera-mx2.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mx2-cam.h - i.MX27/i.MX25 camera driver header file
- *
- * Copyright (C) 2003, Intel Corporation
- * Copyright (C) 2008, Sascha Hauer <s.hauer@pengutronix.de>
- * Copyright (C) 2010, Baruch Siach <baruch@tkos.co.il>
- */
-
-#ifndef __MACH_MX2_CAM_H_
-#define __MACH_MX2_CAM_H_
-
-#define MX2_CAMERA_EXT_VSYNC (1 << 1)
-#define MX2_CAMERA_CCIR (1 << 2)
-#define MX2_CAMERA_CCIR_INTERLACE (1 << 3)
-#define MX2_CAMERA_HSYNC_HIGH (1 << 4)
-#define MX2_CAMERA_GATED_CLOCK (1 << 5)
-#define MX2_CAMERA_INV_DATA (1 << 6)
-#define MX2_CAMERA_PCLK_SAMPLE_RISING (1 << 7)
-
-/**
- * struct mx2_camera_platform_data - optional platform data for mx2_camera
- * @flags: any combination of MX2_CAMERA_*
- * @clk: clock rate of the csi block / 2
- */
-struct mx2_camera_platform_data {
- unsigned long flags;
- unsigned long clk;
-};
-
-#endif /* __MACH_MX2_CAM_H_ */
diff --git a/include/linux/platform_data/media/camera-mx3.h b/include/linux/platform_data/media/camera-mx3.h
deleted file mode 100644
index 781c004e5596..000000000000
--- a/include/linux/platform_data/media/camera-mx3.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mx3_camera.h - i.MX3x camera driver header file
- *
- * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#ifndef _MX3_CAMERA_H_
-#define _MX3_CAMERA_H_
-
-#include <linux/device.h>
-
-#define MX3_CAMERA_CLK_SRC 1
-#define MX3_CAMERA_EXT_VSYNC 2
-#define MX3_CAMERA_DP 4
-#define MX3_CAMERA_PCP 8
-#define MX3_CAMERA_HSP 0x10
-#define MX3_CAMERA_VSP 0x20
-#define MX3_CAMERA_DATAWIDTH_4 0x40
-#define MX3_CAMERA_DATAWIDTH_8 0x80
-#define MX3_CAMERA_DATAWIDTH_10 0x100
-#define MX3_CAMERA_DATAWIDTH_15 0x200
-
-#define MX3_CAMERA_DATAWIDTH_MASK (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | \
- MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15)
-
-struct v4l2_async_subdev;
-
-/**
- * struct mx3_camera_pdata - i.MX3x camera platform data
- * @flags: MX3_CAMERA_* flags
- * @mclk_10khz: master clock frequency in 10kHz units
- * @dma_dev: IPU DMA device to match against in channel allocation
- */
-struct mx3_camera_pdata {
- unsigned long flags;
- unsigned long mclk_10khz;
- struct device *dma_dev;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- int *asd_sizes; /* 0-terminated array of asd group sizes */
-};
-
-#endif
diff --git a/include/linux/platform_data/media/coda.h b/include/linux/platform_data/media/coda.h
deleted file mode 100644
index 293b61b60c9d..000000000000
--- a/include/linux/platform_data/media/coda.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2013 Philipp Zabel, Pengutronix
- */
-#ifndef PLATFORM_CODA_H
-#define PLATFORM_CODA_H
-
-struct device;
-
-struct coda_platform_data {
- struct device *iram_dev;
-};
-
-#endif
diff --git a/include/linux/platform_data/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h
deleted file mode 100644
index 386439db68de..000000000000
--- a/include/linux/platform_data/media/omap1_camera.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface
- *
- * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
- */
-
-#ifndef __MEDIA_OMAP1_CAMERA_H_
-#define __MEDIA_OMAP1_CAMERA_H_
-
-#include <linux/bitops.h>
-
-#define OMAP1_CAMERA_IOSIZE 0x1c
-
-enum omap1_cam_vb_mode {
- OMAP1_CAM_DMA_CONTIG = 0,
- OMAP1_CAM_DMA_SG,
-};
-
-#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2)
-
-struct omap1_cam_platform_data {
- unsigned long camexclk_khz;
- unsigned long lclk_khz_max;
- unsigned long flags;
-};
-
-#define OMAP1_CAMERA_LCLK_RISING BIT(0)
-#define OMAP1_CAMERA_RST_LOW BIT(1)
-#define OMAP1_CAMERA_RST_HIGH BIT(2)
-
-#endif /* __MEDIA_OMAP1_CAMERA_H_ */
diff --git a/include/linux/platform_data/mlxcpld.h b/include/linux/platform_data/mlxcpld.h
new file mode 100644
index 000000000000..d7610b528856
--- /dev/null
+++ b/include/linux/platform_data/mlxcpld.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/*
+ * Mellanox I2C multiplexer support in CPLD
+ *
+ * Copyright (C) 2016-2020 Mellanox Technologies
+ */
+
+#ifndef _LINUX_I2C_MLXCPLD_H
+#define _LINUX_I2C_MLXCPLD_H
+
+/* Platform data for the CPLD I2C multiplexers */
+
+/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
+ * @chan_ids - channels array
+ * @num_adaps - number of adapters
+ * @sel_reg_addr - mux select register offset in CPLD space
+ * @reg_size: register size in bytes
+ * @handle: handle to be passed by callback
+ * @completion_notify: callback to notify when all the adapters are created
+ */
+struct mlxcpld_mux_plat_data {
+ int *chan_ids;
+ int num_adaps;
+ int sel_reg_addr;
+ u8 reg_size;
+ void *handle;
+ int (*completion_notify)(void *handle, struct i2c_adapter *parent,
+ struct i2c_adapter *adapters[]);
+};
+
+#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index b8da8aef2446..a6bd74e29b6b 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -1,34 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2017-2020 Mellanox Technologies Ltd.
*/
#ifndef __LINUX_PLATFORM_DATA_MLXREG_H
@@ -43,10 +15,71 @@
*
* TYPE1 HW watchdog implementation exist in old systems.
* All new systems have TYPE2 HW watchdog.
+ * TYPE3 HW watchdog can exist on all systems with new CPLD.
+ * TYPE3 is selected by WD capability bit.
*/
enum mlxreg_wdt_type {
MLX_WDT_TYPE1,
MLX_WDT_TYPE2,
+ MLX_WDT_TYPE3,
+};
+
+/**
+ * enum mlxreg_hotplug_kind - kind of hotplug entry
+ *
+ * @MLXREG_HOTPLUG_DEVICE_NA: do not care;
+ * @MLXREG_HOTPLUG_LC_PRESENT: entry for line card presence in/out events;
+ * @MLXREG_HOTPLUG_LC_VERIFIED: entry for line card verification status events
+ * coming after line card security signature validation;
+ * @MLXREG_HOTPLUG_LC_POWERED: entry for line card power on/off events;
+ * @MLXREG_HOTPLUG_LC_SYNCED: entry for line card synchronization events, coming
+ * after hardware-firmware synchronization handshake;
+ * @MLXREG_HOTPLUG_LC_READY: entry for line card ready events, indicating line card
+ PHYs ready / unready state;
+ * @MLXREG_HOTPLUG_LC_ACTIVE: entry for line card active events, indicating firmware
+ * availability / unavailability for the ports on line card;
+ * @MLXREG_HOTPLUG_LC_THERMAL: entry for line card thermal shutdown events, positive
+ * event indicates that system should power off the line
+ * card for which this event has been received;
+ */
+enum mlxreg_hotplug_kind {
+ MLXREG_HOTPLUG_DEVICE_NA = 0,
+ MLXREG_HOTPLUG_LC_PRESENT = 1,
+ MLXREG_HOTPLUG_LC_VERIFIED = 2,
+ MLXREG_HOTPLUG_LC_POWERED = 3,
+ MLXREG_HOTPLUG_LC_SYNCED = 4,
+ MLXREG_HOTPLUG_LC_READY = 5,
+ MLXREG_HOTPLUG_LC_ACTIVE = 6,
+ MLXREG_HOTPLUG_LC_THERMAL = 7,
+};
+
+/**
+ * enum mlxreg_hotplug_device_action - hotplug device action required for
+ * driver's connectivity
+ *
+ * @MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: probe device for 'on' event, remove
+ * for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: probe platform device for 'on'
+ * event, remove for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_NO_ACTION: no connectivity action is required;
+ */
+enum mlxreg_hotplug_device_action {
+ MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION = 0,
+ MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION = 1,
+ MLXREG_HOTPLUG_DEVICE_NO_ACTION = 2,
+};
+
+/**
+ * struct mlxreg_core_hotplug_notifier - hotplug notifier block:
+ *
+ * @identity: notifier identity name;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ */
+struct mlxreg_core_hotplug_notifier {
+ char identity[MLXREG_CORE_LABEL_MAX_SIZE];
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
};
/**
@@ -56,6 +89,11 @@ enum mlxreg_wdt_type {
* @client: I2C device client;
* @brdinfo: device board information;
* @nr: I2C device adapter number, to which device is to be attached;
+ * @pdev: platform device, if device is instantiated as a platform device;
+ * @action: action to be performed upon event receiving;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ * @notifier: pointer to event notifier block;
*
* Structure represents I2C hotplug device static data (board topology) and
* dynamic data (related kernel objects handles).
@@ -65,6 +103,11 @@ struct mlxreg_hotplug_device {
struct i2c_client *client;
struct i2c_board_info *brdinfo;
int nr;
+ struct platform_device *pdev;
+ enum mlxreg_hotplug_device_action action;
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
+ struct mlxreg_core_hotplug_notifier *notifier;
};
/**
@@ -75,11 +118,19 @@ struct mlxreg_hotplug_device {
* @mask: attribute access mask;
* @bit: attribute effective bit;
* @capability: attribute capability register;
+ * @reg_prsnt: attribute presence register;
+ * @reg_sync: attribute synch register;
+ * @reg_pwr: attribute power register;
+ * @reg_ena: attribute enable register;
* @mode: access mode;
* @np - pointer to node platform associated with attribute;
* @hpdev - hotplug device data;
+ * @notifier: pointer to event notifier block;
* @health_cntr: dynamic device health indication counter;
* @attached: true if device has been attached after good health indication;
+ * @regnum: number of registers occupied by multi-register attribute;
+ * @slot: slot number, at which device is located;
+ * @secured: if set indicates that entry access is secured;
*/
struct mlxreg_core_data {
char label[MLXREG_CORE_LABEL_MAX_SIZE];
@@ -87,17 +138,26 @@ struct mlxreg_core_data {
u32 mask;
u32 bit;
u32 capability;
+ u32 reg_prsnt;
+ u32 reg_sync;
+ u32 reg_pwr;
+ u32 reg_ena;
umode_t mode;
struct device_node *np;
struct mlxreg_hotplug_device hpdev;
- u8 health_cntr;
+ struct mlxreg_core_hotplug_notifier *notifier;
+ u32 health_cntr;
bool attached;
+ u8 regnum;
+ u8 slot;
+ u8 secured;
};
/**
* struct mlxreg_core_item - same type components controlled by the driver:
*
* @data: component data;
+ * @kind: kind of hotplug attribute;
* @aggr_mask: group aggregation mask;
* @reg: group interrupt status register;
* @mask: group interrupt mask;
@@ -110,6 +170,7 @@ struct mlxreg_core_data {
*/
struct mlxreg_core_item {
struct mlxreg_core_data *data;
+ enum mlxreg_hotplug_kind kind;
u32 aggr_mask;
u32 reg;
u32 mask;
@@ -130,6 +191,7 @@ struct mlxreg_core_item {
* @features: supported features of device;
* @version: implementation version;
* @identity: device identity name;
+ * @capability: device capability register;
*/
struct mlxreg_core_platform_data {
struct mlxreg_core_data *data;
@@ -138,6 +200,7 @@ struct mlxreg_core_platform_data {
u32 features;
u32 version;
char identity[MLXREG_CORE_LABEL_MAX_SIZE];
+ u32 capability;
};
/**
@@ -153,6 +216,8 @@ struct mlxreg_core_platform_data {
* @mask_low: low aggregation interrupt common mask;
* @deferred_nr: I2C adapter number must be exist prior probing execution;
* @shift_nr: I2C adapter numbers must be incremented by this value;
+ * @handle: handle to be passed by callback;
+ * @completion_notify: callback to notify when platform driver probing is done;
*/
struct mlxreg_core_hotplug_platform_data {
struct mlxreg_core_item *items;
@@ -165,6 +230,8 @@ struct mlxreg_core_hotplug_platform_data {
u32 mask_low;
int deferred_nr;
int shift_nr;
+ void *handle;
+ int (*completion_notify)(void *handle, int id);
};
#endif /* __LINUX_PLATFORM_DATA_MLXREG_H */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
deleted file mode 100644
index 6c006078c8a1..000000000000
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2010 Wolfram Sang <w.sang@pengutronix.de>
- */
-
-#ifndef __ASM_ARCH_IMX_ESDHC_H
-#define __ASM_ARCH_IMX_ESDHC_H
-
-#include <linux/types.h>
-
-enum wp_types {
- ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
- ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
- ESDHC_WP_GPIO, /* external gpio pin for WP */
-};
-
-enum cd_types {
- ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
- ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
- ESDHC_CD_GPIO, /* external gpio pin for CD */
- ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
-};
-
-/**
- * struct esdhc_platform_data - platform data for esdhc on i.MX
- *
- * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
- *
- * @wp_type: type of write_protect method (see wp_types enum above)
- * @cd_type: type of card_detect method (see cd_types enum above)
- */
-
-struct esdhc_platform_data {
- enum wp_types wp_type;
- enum cd_types cd_type;
- int max_bus_width;
- unsigned int delay_line;
- unsigned int tuning_step; /* The delay cell steps in tuning procedure */
- unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
-};
-#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/mmc-esdhc-mcf.h b/include/linux/platform_data/mmc-esdhc-mcf.h
new file mode 100644
index 000000000000..85cb786a62fe
--- /dev/null
+++ b/include/linux/platform_data/mmc-esdhc-mcf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_PLATFORM_DATA_MCF_ESDHC_H__
+#define __LINUX_PLATFORM_DATA_MCF_ESDHC_H__
+
+enum cd_types {
+ ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
+ ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
+ ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
+};
+
+struct mcf_esdhc_platform_data {
+ int max_bus_width;
+ int cd_type;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MCF_ESDHC_H__ */
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 9acf0e87aa9b..91051e9907f3 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -108,11 +108,13 @@ struct omap_mmc_platform_data {
const char *name;
u32 ocr_mask;
- /* Card detection IRQs */
- int card_detect_irq;
+ /* Card detection */
int (*card_detect)(struct device *dev, int slot);
unsigned int ban_openended:1;
} slots[OMAP_MMC_MAX_SLOTS];
};
+
+extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
+ int is_closed);
diff --git a/include/linux/platform_data/mmc-s3cmci.h b/include/linux/platform_data/mmc-s3cmci.h
index 33310b11cbdd..bacb86db3112 100644
--- a/include/linux/platform_data/mmc-s3cmci.h
+++ b/include/linux/platform_data/mmc-s3cmci.h
@@ -35,6 +35,7 @@ struct s3c24xx_mci_pdata {
unsigned long ocr_avail;
void (*set_power)(unsigned char power_mode,
unsigned short vdd);
+ struct gpio_desc *bus[6];
};
/**
@@ -44,6 +45,7 @@ struct s3c24xx_mci_pdata {
* Copy the platform data supplied by @pdata so that this can be marked
* __initdata.
*/
+extern void s3c24xx_mci_def_set_power(unsigned char power_mode, unsigned short vdd);
extern void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata);
#endif /* _ARCH_NCI_H */
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
index a403dd51dacc..a49826214a39 100644
--- a/include/linux/platform_data/mtd-davinci-aemif.h
+++ b/include/linux/platform_data/mtd-davinci-aemif.h
@@ -1,7 +1,7 @@
/*
* TI DaVinci AEMIF support
*
- * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/
+ * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
index 08e639e047e5..dd474dd44848 100644
--- a/include/linux/platform_data/mtd-davinci.h
+++ b/include/linux/platform_data/mtd-davinci.h
@@ -60,15 +60,16 @@ struct davinci_nand_pdata { /* platform_data */
struct mtd_partition *parts;
unsigned nr_parts;
- /* none == NAND_ECC_NONE (strongly *not* advised!!)
- * soft == NAND_ECC_SOFT
- * else == NAND_ECC_HW, according to ecc_bits
+ /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
+ * soft == NAND_ECC_ENGINE_TYPE_SOFT
+ * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement ecc_placement;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
diff --git a/include/linux/platform_data/mtd-mxc_nand.h b/include/linux/platform_data/mtd-mxc_nand.h
deleted file mode 100644
index d1230030c6db..000000000000
--- a/include/linux/platform_data/mtd-mxc_nand.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
- */
-
-#ifndef __ASM_ARCH_NAND_H
-#define __ASM_ARCH_NAND_H
-
-#include <linux/mtd/partitions.h>
-
-struct mxc_nand_platform_data {
- unsigned int width; /* data bus width in bytes */
- unsigned int hw_ecc:1; /* 0 if suppress hardware ECC */
- unsigned int flash_bbt:1; /* set to 1 to use a flash based bbt */
- struct mtd_partition *parts; /* partition table */
- int nr_parts; /* size of parts */
-};
-#endif /* __ASM_ARCH_NAND_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index de6ada739121..8c2f1f185353 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -7,6 +7,7 @@
#define _MTD_NAND_OMAP2_H
#include <linux/mtd/partitions.h>
+#include <linux/mod_devicetable.h>
#define GPMC_BCH_NUM_REMAINDER 8
@@ -61,4 +62,11 @@ struct gpmc_nand_regs {
void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
};
-#endif
+
+static const struct of_device_id omap_nand_ids[] = {
+ { .compatible = "ti,omap2-nand", },
+ { .compatible = "ti,am64-nand", },
+ {},
+};
+
+#endif /* _MTD_NAND_OMAP2_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index deb849bcf0ec..25390fc3e795 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -49,7 +49,7 @@ struct s3c2410_platform_nand {
unsigned int ignore_unset_ecc:1;
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_engine_type engine_type;
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
deleted file mode 100644
index b324d03e580c..000000000000
--- a/include/linux/platform_data/ntc_thermistor.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * ntc_thermistor.h - NTC Thermistors
- *
- * Copyright (C) 2010 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- */
-#ifndef _LINUX_NTC_H
-#define _LINUX_NTC_H
-
-struct iio_channel;
-
-enum ntc_thermistor_type {
- TYPE_B57330V2103,
- TYPE_B57891S0103,
- TYPE_NCPXXWB473,
- TYPE_NCPXXWF104,
- TYPE_NCPXXWL333,
- TYPE_NCPXXXH103,
-};
-
-struct ntc_thermistor_platform_data {
- /*
- * One (not both) of read_uV and read_ohm should be provided and only
- * one of the two should be provided.
- * Both functions should return negative value for an error case.
- *
- * pullup_uV, pullup_ohm, pulldown_ohm, and connect are required to use
- * read_uV()
- *
- * How to setup pullup_ohm, pulldown_ohm, and connect is
- * described at Documentation/hwmon/ntc_thermistor.rst
- *
- * pullup/down_ohm: 0 for infinite / not-connected
- *
- * chan: iio_channel pointer to communicate with the ADC which the
- * thermistor is using for conversion of the analog values.
- */
- int (*read_uv)(struct ntc_thermistor_platform_data *);
- unsigned int pullup_uv;
-
- unsigned int pullup_ohm;
- unsigned int pulldown_ohm;
- enum { NTC_CONNECTED_POSITIVE, NTC_CONNECTED_GROUND } connect;
- struct iio_channel *chan;
-
- int (*read_ohm)(void);
-};
-
-#endif /* _LINUX_NTC_H */
diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h
index 8419c8caf54e..0dd851ea1c72 100644
--- a/include/linux/platform_data/omap-twl4030.h
+++ b/include/linux/platform_data/omap-twl4030.h
@@ -3,7 +3,7 @@
* omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030
* codec, header.
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
* All rights reserved.
*
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h
index 4eb53e023997..96c1a14ab365 100644
--- a/include/linux/platform_data/pca953x.h
+++ b/include/linux/platform_data/pca953x.h
@@ -22,7 +22,7 @@ struct pca953x_platform_data {
int (*setup)(struct i2c_client *client,
unsigned gpio, unsigned ngpio,
void *context);
- int (*teardown)(struct i2c_client *client,
+ void (*teardown)(struct i2c_client *client,
unsigned gpio, unsigned ngpio,
void *context);
const char *const *names;
diff --git a/include/linux/platform_data/pcf857x.h b/include/linux/platform_data/pcf857x.h
index 11d4ed78c7f4..01d0a3ea3aef 100644
--- a/include/linux/platform_data/pcf857x.h
+++ b/include/linux/platform_data/pcf857x.h
@@ -36,7 +36,7 @@ struct pcf857x_platform_data {
int (*setup)(struct i2c_client *client,
int gpio, unsigned ngpio,
void *context);
- int (*teardown)(struct i2c_client *client,
+ void (*teardown)(struct i2c_client *client,
int gpio, unsigned ngpio,
void *context);
void *context;
diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h
index dd5971937a64..7037ba7a53ca 100644
--- a/include/linux/platform_data/pm33xx.h
+++ b/include/linux/platform_data/pm33xx.h
@@ -46,15 +46,16 @@ struct am33xx_pm_sram_addr {
};
struct am33xx_pm_platform_data {
- int (*init)(void);
+ int (*init)(int (*idle)(u32 wfi_flags));
+ int (*deinit)(void);
int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long),
unsigned long args);
+ int (*cpu_suspend)(int (*fn)(unsigned long), unsigned long args);
+ void (*begin_suspend)(void);
+ void (*finish_suspend)(void);
struct am33xx_pm_sram_addr *(*get_sram_addrs)(void);
- void __iomem *(*get_rtc_base_addr)(void);
void (*save_context)(void);
void (*restore_context)(void);
- void (*prepare_rtc_suspend)(void);
- void (*prepare_rtc_resume)(void);
int (*check_off_mode_enable)(void);
};
diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h
deleted file mode 100644
index e7d521e48855..000000000000
--- a/include/linux/platform_data/pwm_omap_dmtimer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * include/linux/platform_data/pwm_omap_dmtimer.h
- *
- * OMAP Dual-Mode Timer PWM platform data
- *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
- * Tarun Kanti DebBarma <tarun.kanti@ti.com>
- * Thara Gopinath <thara@ti.com>
- *
- * Platform device conversion and hwmod support.
- *
- * Copyright (C) 2005 Nokia Corporation
- * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
- * PWM and clock framework support by Timo Teras.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __PWM_OMAP_DMTIMER_PDATA_H
-#define __PWM_OMAP_DMTIMER_PDATA_H
-
-/* clock sources */
-#define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00
-#define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01
-#define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02
-
-/* timer interrupt enable bits */
-#define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2)
-#define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1)
-#define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0)
-
-/* trigger types */
-#define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00
-#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01
-#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
-
-struct omap_dm_timer;
-typedef struct omap_dm_timer pwm_omap_dmtimer;
-
-struct pwm_omap_dmtimer_pdata {
- pwm_omap_dmtimer *(*request_by_node)(struct device_node *np);
- pwm_omap_dmtimer *(*request_specific)(int timer_id);
- pwm_omap_dmtimer *(*request)(void);
-
- int (*free)(pwm_omap_dmtimer *timer);
-
- void (*enable)(pwm_omap_dmtimer *timer);
- void (*disable)(pwm_omap_dmtimer *timer);
-
- int (*get_irq)(pwm_omap_dmtimer *timer);
- int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value);
- int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask);
-
- struct clk *(*get_fclk)(pwm_omap_dmtimer *timer);
-
- int (*start)(pwm_omap_dmtimer *timer);
- int (*stop)(pwm_omap_dmtimer *timer);
- int (*set_source)(pwm_omap_dmtimer *timer, int source);
-
- int (*set_load)(pwm_omap_dmtimer *timer, int autoreload,
- unsigned int value);
- int (*set_match)(pwm_omap_dmtimer *timer, int enable,
- unsigned int match);
- int (*set_pwm)(pwm_omap_dmtimer *timer, int def_on,
- int toggle, int trigger);
- int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler);
-
- unsigned int (*read_counter)(pwm_omap_dmtimer *timer);
- int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value);
- unsigned int (*read_status)(pwm_omap_dmtimer *timer);
- int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value);
-};
-
-#endif /* __PWM_OMAP_DMTIMER_PDATA_H */
diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h
deleted file mode 100644
index 7e3a16097672..000000000000
--- a/include/linux/platform_data/remoteproc-omap.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Remote Processor - omap-specific bits
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- * Copyright (C) 2011 Google, Inc.
- */
-
-#ifndef _PLAT_REMOTEPROC_H
-#define _PLAT_REMOTEPROC_H
-
-struct rproc_ops;
-struct platform_device;
-
-/*
- * struct omap_rproc_pdata - omap remoteproc's platform data
- * @name: the remoteproc's name
- * @oh_name: omap hwmod device
- * @oh_name_opt: optional, secondary omap hwmod device
- * @firmware: name of firmware file to load
- * @mbox_name: name of omap mailbox device to use with this rproc
- * @ops: start/stop rproc handlers
- * @device_enable: omap-specific handler for enabling a device
- * @device_shutdown: omap-specific handler for shutting down a device
- * @set_bootaddr: omap-specific handler for setting the rproc boot address
- */
-struct omap_rproc_pdata {
- const char *name;
- const char *oh_name;
- const char *oh_name_opt;
- const char *firmware;
- const char *mbox_name;
- const struct rproc_ops *ops;
- int (*device_enable)(struct platform_device *pdev);
- int (*device_shutdown)(struct platform_device *pdev);
- void (*set_bootaddr)(u32);
-};
-
-#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE)
-
-void __init omap_rproc_reserve_cma(void);
-
-#else
-
-static inline void __init omap_rproc_reserve_cma(void)
-{
-}
-
-#endif
-
-#endif /* _PLAT_REMOTEPROC_H */
diff --git a/include/linux/platform_data/s3c-hsudc.h b/include/linux/platform_data/s3c-hsudc.h
index 4dc9b8760166..a170939832d5 100644
--- a/include/linux/platform_data/s3c-hsudc.h
+++ b/include/linux/platform_data/s3c-hsudc.h
@@ -26,6 +26,8 @@ struct s3c24xx_hsudc_platdata {
unsigned int epnum;
void (*gpio_init)(void);
void (*gpio_uninit)(void);
+ void (*phy_init)(void);
+ void (*phy_uninit)(void);
};
#endif /* __LINUX_USB_S3C_HSUDC_H */
diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h
deleted file mode 100644
index 0844b21372c7..000000000000
--- a/include/linux/platform_data/serial-imx.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
- */
-
-#ifndef ASMARM_ARCH_UART_H
-#define ASMARM_ARCH_UART_H
-
-#define IMXUART_HAVE_RTSCTS (1<<0)
-
-struct imxuart_platform_data {
- unsigned int flags;
-};
-
-#endif
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/platform_data/sh_mmcif.h
index e25533b95d9f..6eb914f958f9 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/platform_data/sh_mmcif.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/linux/mmc/sh_mmcif.h
- *
* platform data for eMMC driver
*
* Copyright (C) 2010 Renesas Solutions Corp.
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index fe815d7d9f58..d661399b217d 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -10,8 +10,6 @@
#ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__
-#include <linux/kernel.h>
-
#include <drm/drm_mode.h>
enum shmob_drm_clk_source {
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
index 4f733a411d18..27ea99af6e1d 100644
--- a/include/linux/platform_data/simplefb.h
+++ b/include/linux/platform_data/simplefb.h
@@ -10,12 +10,13 @@
#include <drm/drm_fourcc.h>
#include <linux/fb.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
/* format array, use it to initialize a "struct simplefb_format" array */
#define SIMPLEFB_FORMATS \
{ \
{ "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \
+ { "r5g5b5a1", 16, {11, 5}, {6, 5}, {1, 5}, {0, 1}, DRM_FORMAT_RGBA5551 }, \
{ "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \
{ "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \
{ "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \
diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h
deleted file mode 100644
index 02653d92d84f..000000000000
--- a/include/linux/platform_data/sky81452-backlight.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sky81452.h SKY81452 backlight driver
- *
- * Copyright 2014 Skyworks Solutions Inc.
- * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
- */
-
-#ifndef _SKY81452_BACKLIGHT_H
-#define _SKY81452_BACKLIGHT_H
-
-/**
- * struct sky81452_platform_data
- * @name: backlight driver name.
- If it is not defined, default name is lcd-backlight.
- * @gpio_enable:GPIO number which control EN pin
- * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
- * @ignore_pwm: true if DPWMI should be ignored.
- * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
- * @phase_shift:true is phase shift mode.
- * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V.
- * @boost_current_limit: It should be one of 2300, 2750mA.
- */
-struct sky81452_bl_platform_data {
- const char *name;
- int gpio_enable;
- unsigned int enable;
- bool ignore_pwm;
- bool dpwm_mode;
- bool phase_shift;
- unsigned int short_detection_threshold;
- unsigned int boost_current_limit;
-};
-
-#endif
diff --git a/include/linux/platform_data/spi-ath79.h b/include/linux/platform_data/spi-ath79.h
deleted file mode 100644
index 81a388ff58cc..000000000000
--- a/include/linux/platform_data/spi-ath79.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#ifndef _ATH79_SPI_PLATFORM_H
-#define _ATH79_SPI_PLATFORM_H
-
-struct ath79_spi_platform_data {
- unsigned bus_num;
- unsigned num_chipselect;
-};
-
-#endif /* _ATH79_SPI_PLATFORM_H */
diff --git a/include/linux/platform_data/spi-clps711x.h b/include/linux/platform_data/spi-clps711x.h
deleted file mode 100644
index efaa596848c9..000000000000
--- a/include/linux/platform_data/spi-clps711x.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CLPS711X SPI bus driver definitions
- *
- * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
- */
-
-#ifndef ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
-#define ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
-
-/* Board specific platform_data */
-struct spi_clps711x_pdata {
- int *chipselect; /* Array of GPIO-numbers */
- int num_chipselect; /* Total count of GPIOs */
-};
-
-#endif
diff --git a/include/linux/platform_data/spi-imx.h b/include/linux/platform_data/spi-imx.h
deleted file mode 100644
index 328f670d10bd..000000000000
--- a/include/linux/platform_data/spi-imx.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __MACH_SPI_H_
-#define __MACH_SPI_H_
-
-/*
- * struct spi_imx_master - device.platform_data for SPI controller devices.
- * @chipselect: Array of chipselects for this master or NULL. Numbers >= 0
- * mean GPIO pins, -ENOENT means internal CSPI chipselect
- * matching the position in the array. E.g., if chipselect[1] =
- * -ENOENT then a SPI slave using chip select 1 will use the
- * native SS1 line of the CSPI. Omitting the array will use
- * all native chip selects.
-
- * Normally you want to use gpio based chip selects as the CSPI
- * module tries to be intelligent about when to assert the
- * chipselect: The CSPI module deasserts the chipselect once it
- * runs out of input data. The other problem is that it is not
- * possible to mix between high active and low active chipselects
- * on one single bus using the internal chipselects.
- * Unfortunately, on some SoCs, Freescale decided to put some
- * chipselects on dedicated pins which are not usable as gpios,
- * so we have to support the internal chipselects.
- *
- * @num_chipselect: If @chipselect is specified, ARRAY_SIZE(chipselect),
- * otherwise the number of native chip selects.
- */
-struct spi_imx_master {
- int *chipselect;
- int num_chipselect;
-};
-
-#endif /* __MACH_SPI_H_*/
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
index 65fd5ffd257c..f0db674f07b8 100644
--- a/include/linux/platform_data/spi-mt65xx.h
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -12,5 +12,6 @@
/* Board specific platform_data */
struct mtk_chip_config {
u32 sample_sel;
+ u32 tick_delay;
};
#endif
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index 773daf7915a3..5df1ace6d2c9 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -16,7 +16,6 @@ struct platform_device;
* struct s3c64xx_spi_csinfo - ChipSelect description
* @fb_delay: Slave specific feedback delay.
* Refer to FB_CLK_SEL register definition in SPI chapter.
- * @line: Custom 'identity' of the CS line.
*
* This is per SPI-Slave Chipselect information.
* Allocate and initialize one in machine init code and make the
@@ -24,7 +23,6 @@ struct platform_device;
*/
struct s3c64xx_spi_csinfo {
u8 fb_delay;
- unsigned line;
};
/**
@@ -43,26 +41,16 @@ struct s3c64xx_spi_info {
/**
* s3c64xx_spi_set_platdata - SPI Controller configure callback by the board
* initialization code.
- * @cfg_gpio: Pointer to gpio setup function.
* @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks.
* @num_cs: Number of elements in the 'cs' array.
*
* Call this from machine init code for each SPI Controller that
* has some chips attached to it.
*/
-extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
-extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
-extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
+extern void s3c64xx_spi0_set_platdata(int src_clk_nr, int num_cs);
/* defined by architecture to configure gpio */
extern int s3c64xx_spi0_cfg_gpio(void);
-extern int s3c64xx_spi1_cfg_gpio(void);
-extern int s3c64xx_spi2_cfg_gpio(void);
extern struct s3c64xx_spi_info s3c64xx_spi0_pdata;
-extern struct s3c64xx_spi_info s3c64xx_spi1_pdata;
-extern struct s3c64xx_spi_info s3c64xx_spi2_pdata;
#endif /*__SPI_S3C64XX_H */
diff --git a/include/linux/platform_data/ssm2518.h b/include/linux/platform_data/ssm2518.h
deleted file mode 100644
index 3f9e632d6f63..000000000000
--- a/include/linux/platform_data/ssm2518.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SSM2518 amplifier audio driver
- *
- * Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_SSM2518_H__
-#define __LINUX_PLATFORM_DATA_SSM2518_H__
-
-/**
- * struct ssm2518_platform_data - Platform data for the ssm2518 driver
- * @enable_gpio: GPIO connected to the nSD pin. Set to -1 if the nSD pin is
- * hardwired.
- */
-struct ssm2518_platform_data {
- int enable_gpio;
-};
-
-#endif
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index e40b28ca892e..897051e51b78 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -13,8 +13,9 @@
/**
* struct st_sensors_platform_data - Platform data for the ST sensors
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
- * Available only for accelerometer and pressure sensors.
+ * Available only for accelerometer, magnetometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ * Magnetometer DRDY is supported only on LSM9DS0.
* @open_drain: set the interrupt line to be open drain if possible.
* @spi_3wire: enable spi-3wire mode.
* @pullups: enable/disable i2c controller pullup resistors.
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 2cbde6542849..eb556f988d57 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -17,6 +17,7 @@ enum ti_sysc_module_type {
TI_SYSC_OMAP4_MCASP,
TI_SYSC_OMAP4_USB_HOST_FS,
TI_SYSC_DRA7_MCAN,
+ TI_SYSC_PRUSS,
};
struct ti_sysc_cookie {
@@ -49,6 +50,15 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_MODULE_QUIRK_OTG BIT(30)
+#define SYSC_QUIRK_RESET_ON_CTX_LOST BIT(29)
+#define SYSC_QUIRK_REINIT_ON_CTX_LOST BIT(28)
+#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
+#define SYSC_QUIRK_GPMC_DEBUG BIT(26)
+#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
+#define SYSC_MODULE_QUIRK_PRUSS BIT(24)
+#define SYSC_MODULE_QUIRK_DSS_RESET BIT(23)
+#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22)
#define SYSC_QUIRK_CLKDM_NOAUTO BIT(21)
#define SYSC_QUIRK_FORCE_MSTANDBY BIT(20)
#define SYSC_MODULE_QUIRK_AESS BIT(19)
@@ -141,6 +151,7 @@ struct clk;
struct ti_sysc_platform_data {
struct of_dev_auxdata *auxdata;
+ bool (*soc_type_gp)(void);
int (*init_clockdomain)(struct device *dev, struct clk *fck,
struct clk *ick, struct ti_sysc_cookie *cookie);
void (*clkdm_deny_idle)(struct device *dev,
diff --git a/include/linux/platform_data/timer-ixp4xx.h b/include/linux/platform_data/timer-ixp4xx.h
deleted file mode 100644
index ee92ae7edaed..000000000000
--- a/include/linux/platform_data/timer-ixp4xx.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TIMER_IXP4XX_H
-#define __TIMER_IXP4XX_H
-
-#include <linux/ioport.h>
-
-void __init ixp4xx_timer_setup(resource_size_t timerbase,
- int timer_irq,
- unsigned int timer_freq);
-
-#endif
diff --git a/include/linux/platform_data/tps68470.h b/include/linux/platform_data/tps68470.h
new file mode 100644
index 000000000000..e605a2cab07f
--- /dev/null
+++ b/include/linux/platform_data/tps68470.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#ifndef __PDATA_TPS68470_H
+#define __PDATA_TPS68470_H
+
+enum tps68470_regulators {
+ TPS68470_CORE,
+ TPS68470_ANA,
+ TPS68470_VCM,
+ TPS68470_VIO,
+ TPS68470_VSIO,
+ TPS68470_AUX1,
+ TPS68470_AUX2,
+ TPS68470_NUM_REGULATORS
+};
+
+struct regulator_init_data;
+
+struct tps68470_regulator_platform_data {
+ const struct regulator_init_data *reg_init_data[TPS68470_NUM_REGULATORS];
+};
+
+struct tps68470_clk_consumer {
+ const char *consumer_dev_name;
+ const char *consumer_con_id;
+};
+
+struct tps68470_clk_platform_data {
+ unsigned int n_consumers;
+ struct tps68470_clk_consumer consumers[];
+};
+
+#endif
diff --git a/include/linux/platform_data/uio_dmem_genirq.h b/include/linux/platform_data/uio_dmem_genirq.h
index 973c1bb32168..c8f6de685306 100644
--- a/include/linux/platform_data/uio_dmem_genirq.h
+++ b/include/linux/platform_data/uio_dmem_genirq.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/platform_data/uio_dmem_genirq.h
*
* Copyright (C) 2012 Damian Hobson-Garcia
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _UIO_DMEM_GENIRQ_H
diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h
index 3d47d219827f..f76fa393b802 100644
--- a/include/linux/platform_data/uio_pruss.h
+++ b/include/linux/platform_data/uio_pruss.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/platform_data/uio_pruss.h
*
* Platform data for uio_pruss driver
*
- * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _UIO_PRUSS_H_
diff --git a/include/linux/platform_data/usb-ehci-mxc.h b/include/linux/platform_data/usb-ehci-mxc.h
deleted file mode 100644
index ad9794d09bc8..000000000000
--- a/include/linux/platform_data/usb-ehci-mxc.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H
-#define __INCLUDE_ASM_ARCH_MXC_EHCI_H
-
-struct mxc_usbh_platform_data {
- int (*init)(struct platform_device *pdev);
- int (*exit)(struct platform_device *pdev);
-
- unsigned int portsc;
- struct usb_phy *otg;
-};
-
-#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
-
diff --git a/include/linux/platform_data/usb-mx2.h b/include/linux/platform_data/usb-mx2.h
deleted file mode 100644
index 97a670f3d8fb..000000000000
--- a/include/linux/platform_data/usb-mx2.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 Martin Fuzzey <mfuzzey@gmail.com>
- */
-
-#ifndef __ASM_ARCH_MX21_USBH
-#define __ASM_ARCH_MX21_USBH
-
-enum mx21_usbh_xcvr {
- /* Values below as used by hardware (HWMODE register) */
- MX21_USBXCVR_TXDIF_RXDIF = 0,
- MX21_USBXCVR_TXDIF_RXSE = 1,
- MX21_USBXCVR_TXSE_RXDIF = 2,
- MX21_USBXCVR_TXSE_RXSE = 3,
-};
-
-struct mx21_usbh_platform_data {
- enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */
- enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */
- u16 enable_host1:1,
- enable_host2:1,
- enable_otg_host:1, /* enable "OTG" port (as host) */
- host1_xcverless:1, /* traceiverless host1 port */
- host1_txenoe:1, /* output enable host1 transmit enable */
- otg_ext_xcvr:1, /* external tranceiver for OTG port */
- unused:10;
-};
-
-#endif /* __ASM_ARCH_MX21_USBH */
diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h
index fa579b4c666b..580978e468f8 100644
--- a/include/linux/platform_data/usb-omap.h
+++ b/include/linux/platform_data/usb-omap.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* usb-omap.h - Platform data for the various OMAP USB IPs
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
- *
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
*/
#define OMAP3_HS_USB_PORTS 3
diff --git a/include/linux/platform_data/usb-omap1.h b/include/linux/platform_data/usb-omap1.h
index 43b5ce139c37..e7b8dc92a269 100644
--- a/include/linux/platform_data/usb-omap1.h
+++ b/include/linux/platform_data/usb-omap1.h
@@ -48,6 +48,10 @@ struct omap_usb_config {
u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
int (*ocpi_enable)(void);
+
+ void (*lb_reset)(void);
+
+ int (*transceiver_power)(int on);
};
#endif /* __LINUX_USB_OMAP1_H */
diff --git a/include/linux/platform_data/usb-s3c2410_udc.h b/include/linux/platform_data/usb-s3c2410_udc.h
index 07394819d03b..c0fbe1fe3426 100644
--- a/include/linux/platform_data/usb-s3c2410_udc.h
+++ b/include/linux/platform_data/usb-s3c2410_udc.h
@@ -22,12 +22,6 @@ enum s3c2410_udc_cmd_e {
struct s3c2410_udc_mach_info {
void (*udc_command)(enum s3c2410_udc_cmd_e);
void (*vbus_draw)(unsigned int ma);
-
- unsigned int pullup_pin;
- unsigned int pullup_pin_inverted;
-
- unsigned int vbus_pin;
- unsigned char vbus_pin_inverted;
};
extern void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *);
diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h
deleted file mode 100644
index de6a4ad41e76..000000000000
--- a/include/linux/platform_data/ux500_wdt.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST Ericsson SA 2011
- *
- * STE Ux500 Watchdog platform data
- */
-#ifndef __UX500_WDT_H
-#define __UX500_WDT_H
-
-/**
- * struct ux500_wdt_data
- */
-struct ux500_wdt_data {
- unsigned int timeout;
- bool has_28_bits_resolution;
-};
-
-#endif /* __UX500_WDT_H */
diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h
deleted file mode 100644
index 02812651af7d..000000000000
--- a/include/linux/platform_data/video-imxfb.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This structure describes the machine which we are running on.
- */
-#ifndef __MACH_IMXFB_H__
-#define __MACH_IMXFB_H__
-
-#include <linux/fb.h>
-
-#define PCR_TFT (1 << 31)
-#define PCR_COLOR (1 << 30)
-#define PCR_PBSIZ_1 (0 << 28)
-#define PCR_PBSIZ_2 (1 << 28)
-#define PCR_PBSIZ_4 (2 << 28)
-#define PCR_PBSIZ_8 (3 << 28)
-#define PCR_BPIX_1 (0 << 25)
-#define PCR_BPIX_2 (1 << 25)
-#define PCR_BPIX_4 (2 << 25)
-#define PCR_BPIX_8 (3 << 25)
-#define PCR_BPIX_12 (4 << 25)
-#define PCR_BPIX_16 (5 << 25)
-#define PCR_BPIX_18 (6 << 25)
-#define PCR_PIXPOL (1 << 24)
-#define PCR_FLMPOL (1 << 23)
-#define PCR_LPPOL (1 << 22)
-#define PCR_CLKPOL (1 << 21)
-#define PCR_OEPOL (1 << 20)
-#define PCR_SCLKIDLE (1 << 19)
-#define PCR_END_SEL (1 << 18)
-#define PCR_END_BYTE_SWAP (1 << 17)
-#define PCR_REV_VS (1 << 16)
-#define PCR_ACD_SEL (1 << 15)
-#define PCR_ACD(x) (((x) & 0x7f) << 8)
-#define PCR_SCLK_SEL (1 << 7)
-#define PCR_SHARP (1 << 6)
-#define PCR_PCD(x) ((x) & 0x3f)
-
-#define PWMR_CLS(x) (((x) & 0x1ff) << 16)
-#define PWMR_LDMSK (1 << 15)
-#define PWMR_SCR1 (1 << 10)
-#define PWMR_SCR0 (1 << 9)
-#define PWMR_CC_EN (1 << 8)
-#define PWMR_PW(x) ((x) & 0xff)
-
-#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26)
-#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16)
-#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
-#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
-#define LSCR1_GRAY1(x) (((x) & 0xf))
-
-struct imx_fb_videomode {
- struct fb_videomode mode;
- u32 pcr;
- bool aus_mode;
- unsigned char bpp;
-};
-
-struct imx_fb_platform_data {
- struct imx_fb_videomode *mode;
- int num_modes;
-
- u_int pwmr;
- u_int lscr1;
- u_int dmacr;
-
- int (*init)(struct platform_device *);
- void (*exit)(struct platform_device *);
-};
-
-#endif /* ifndef __MACH_IMXFB_H__ */
diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h
index b3d574778326..6333bac166a5 100644
--- a/include/linux/platform_data/video-pxafb.h
+++ b/include/linux/platform_data/video-pxafb.h
@@ -8,7 +8,6 @@
*/
#include <linux/fb.h>
-#include <mach/regs-lcd.h>
/*
* Supported LCD connections
@@ -153,6 +152,27 @@ struct pxafb_mach_info {
void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
unsigned long pxafb_get_hsync_time(struct device *dev);
+/* smartpanel related */
+#define SMART_CMD_A0 (0x1 << 8)
+#define SMART_CMD_READ_STATUS_REG (0x0 << 9)
+#define SMART_CMD_READ_FRAME_BUFFER ((0x0 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_COMMAND (0x1 << 9)
+#define SMART_CMD_WRITE_DATA ((0x1 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_FRAME ((0x2 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WAIT_FOR_VSYNC (0x3 << 9)
+#define SMART_CMD_NOOP (0x4 << 9)
+#define SMART_CMD_INTERRUPT (0x5 << 9)
+
+#define SMART_CMD(x) (SMART_CMD_WRITE_COMMAND | ((x) & 0xff))
+#define SMART_DAT(x) (SMART_CMD_WRITE_DATA | ((x) & 0xff))
+
+/* SMART_DELAY() is introduced for software controlled delay primitive which
+ * can be inserted between command sequences, unused command 0x6 is used here
+ * and delay ranges from 0ms ~ 255ms
+ */
+#define SMART_CMD_DELAY (0x6 << 9)
+#define SMART_DELAY(ms) (SMART_CMD_DELAY | ((ms) & 0xff))
+
#ifdef CONFIG_FB_PXA_SMARTPANEL
extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
extern int pxafb_smart_flush(struct fb_info *info);
diff --git a/include/linux/platform_data/wan_ixp4xx_hss.h b/include/linux/platform_data/wan_ixp4xx_hss.h
deleted file mode 100644
index d525a0feb9e1..000000000000
--- a/include/linux/platform_data/wan_ixp4xx_hss.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PLATFORM_DATA_WAN_IXP4XX_HSS_H
-#define __PLATFORM_DATA_WAN_IXP4XX_HSS_H
-
-#include <linux/types.h>
-
-/* Information about built-in HSS (synchronous serial) interfaces */
-struct hss_plat_info {
- int (*set_clock)(int port, unsigned int clock_type);
- int (*open)(int port, void *pdev,
- void (*set_carrier_cb)(void *pdev, int carrier));
- void (*close)(int port, void *pdev);
- u8 txreadyq;
- u32 timer_freq;
-};
-
-#endif
diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h
index afede15a95bf..3e268e636b5b 100644
--- a/include/linux/platform_data/wilco-ec.h
+++ b/include/linux/platform_data/wilco-ec.h
@@ -8,8 +8,8 @@
#ifndef WILCO_EC_H
#define WILCO_EC_H
-#include <linux/device.h>
-#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
/* Message flags for using the mailbox() interface */
#define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */
@@ -17,6 +17,10 @@
/* Normal commands have a maximum 32 bytes of data */
#define EC_MAILBOX_DATA_SIZE 32
+struct device;
+struct resource;
+struct platform_device;
+
/**
* struct wilco_ec_device - Wilco Embedded Controller handle.
* @dev: Device handle.
@@ -79,7 +83,7 @@ struct wilco_ec_response {
u16 result;
u16 data_size;
u8 reserved[2];
- u8 data[0];
+ u8 data[];
} __packed;
/**
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index d39fc658c320..28234dc9fa6a 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -49,6 +49,7 @@
#define ASUS_WMI_DEVID_LED4 0x00020014
#define ASUS_WMI_DEVID_LED5 0x00020015
#define ASUS_WMI_DEVID_LED6 0x00020016
+#define ASUS_WMI_DEVID_MICMUTE_LED 0x00040017
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
@@ -61,7 +62,10 @@
#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
/* Misc */
+#define ASUS_WMI_DEVID_PANEL_OD 0x00050019
#define ASUS_WMI_DEVID_CAMERA 0x00060013
+#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
+#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
@@ -75,6 +79,9 @@
#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */
#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013
+#define ASUS_WMI_DEVID_GPU_FAN_CTRL 0x00110014
+#define ASUS_WMI_DEVID_CPU_FAN_CURVE 0x00110024
+#define ASUS_WMI_DEVID_GPU_FAN_CURVE 0x00110025
/* Power */
#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
@@ -85,6 +92,24 @@
/* Maximum charging percentage */
#define ASUS_WMI_DEVID_RSOC 0x00120057
+/* Keyboard dock */
+#define ASUS_WMI_DEVID_KBD_DOCK 0x00120063
+
+/* dgpu on/off */
+#define ASUS_WMI_DEVID_EGPU 0x00090019
+
+/* dgpu on/off */
+#define ASUS_WMI_DEVID_DGPU 0x00090020
+
+/* gpu mux switch, 0 = dGPU, 1 = Optimus */
+#define ASUS_WMI_DEVID_GPU_MUX 0x00090016
+
+/* TUF laptop RGB modes/colours */
+#define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056
+
+/* TUF laptop RGB power/state */
+#define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057
+
/* DSTS masks */
#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
diff --git a/include/linux/platform_data/x86/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h
index 207e1a317800..41df326583f9 100644
--- a/include/linux/platform_data/x86/clk-lpss.h
+++ b/include/linux/platform_data/x86/clk-lpss.h
@@ -15,6 +15,6 @@ struct lpss_clk_data {
struct clk *clk;
};
-extern int lpt_clk_init(void);
+extern int lpss_atom_clk_init(void);
#endif /* __CLK_LPSS_H */
diff --git a/include/linux/platform_data/x86/mlxcpld.h b/include/linux/platform_data/x86/mlxcpld.h
deleted file mode 100644
index b08dcb183fca..000000000000
--- a/include/linux/platform_data/x86/mlxcpld.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * mlxcpld.h - Mellanox I2C multiplexer support in CPLD
- *
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _LINUX_I2C_MLXCPLD_H
-#define _LINUX_I2C_MLXCPLD_H
-
-/* Platform data for the CPLD I2C multiplexers */
-
-/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
- * @adap_ids - adapter array
- * @num_adaps - number of adapters
- * @sel_reg_addr - mux select register offset in CPLD space
- */
-struct mlxcpld_mux_plat_data {
- int *adap_ids;
- int num_adaps;
- int sel_reg_addr;
-};
-
-#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
new file mode 100644
index 000000000000..23d60130272c
--- /dev/null
+++ b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+#define __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+
+#define WMI_BRIGHTNESS_GUID "603E9613-EF25-4338-A3D0-C46177516DB7"
+
+/**
+ * enum wmi_brightness_method - WMI method IDs
+ * @WMI_BRIGHTNESS_METHOD_LEVEL: Get/Set EC brightness level status
+ * @WMI_BRIGHTNESS_METHOD_SOURCE: Get/Set EC Brightness Source
+ */
+enum wmi_brightness_method {
+ WMI_BRIGHTNESS_METHOD_LEVEL = 1,
+ WMI_BRIGHTNESS_METHOD_SOURCE = 2,
+ WMI_BRIGHTNESS_METHOD_MAX
+};
+
+/**
+ * enum wmi_brightness_mode - Operation mode for WMI-wrapped method
+ * @WMI_BRIGHTNESS_MODE_GET: Get the current brightness level/source.
+ * @WMI_BRIGHTNESS_MODE_SET: Set the brightness level.
+ * @WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL: Get the maximum brightness level. This
+ * is only valid when the WMI method is
+ * %WMI_BRIGHTNESS_METHOD_LEVEL.
+ */
+enum wmi_brightness_mode {
+ WMI_BRIGHTNESS_MODE_GET = 0,
+ WMI_BRIGHTNESS_MODE_SET = 1,
+ WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL = 2,
+ WMI_BRIGHTNESS_MODE_MAX
+};
+
+/**
+ * enum wmi_brightness_source - Backlight brightness control source selection
+ * @WMI_BRIGHTNESS_SOURCE_GPU: Backlight brightness is controlled by the GPU.
+ * @WMI_BRIGHTNESS_SOURCE_EC: Backlight brightness is controlled by the
+ * system's Embedded Controller (EC).
+ * @WMI_BRIGHTNESS_SOURCE_AUX: Backlight brightness is controlled over the
+ * DisplayPort AUX channel.
+ */
+enum wmi_brightness_source {
+ WMI_BRIGHTNESS_SOURCE_GPU = 1,
+ WMI_BRIGHTNESS_SOURCE_EC = 2,
+ WMI_BRIGHTNESS_SOURCE_AUX = 3,
+ WMI_BRIGHTNESS_SOURCE_MAX
+};
+
+/**
+ * struct wmi_brightness_args - arguments for the WMI-wrapped ACPI method
+ * @mode: Pass in an &enum wmi_brightness_mode value to select between
+ * getting or setting a value.
+ * @val: In parameter for value to set when using %WMI_BRIGHTNESS_MODE_SET
+ * mode. Not used in conjunction with %WMI_BRIGHTNESS_MODE_GET or
+ * %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL mode.
+ * @ret: Out parameter returning retrieved value when operating in
+ * %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL
+ * mode. Not used in %WMI_BRIGHTNESS_MODE_SET mode.
+ * @ignored: Padding; not used. The ACPI method expects a 24 byte params struct.
+ *
+ * This is the parameters structure for the WmiBrightnessNotify ACPI method as
+ * wrapped by WMI. The value passed in to @val or returned by @ret will be a
+ * brightness value when the WMI method ID is %WMI_BRIGHTNESS_METHOD_LEVEL, or
+ * an &enum wmi_brightness_source value with %WMI_BRIGHTNESS_METHOD_SOURCE.
+ */
+struct wmi_brightness_args {
+ u32 mode;
+ u32 val;
+ u32 ret;
+ u32 ignored[3];
+};
+
+#endif
diff --git a/include/linux/platform_data/x86/p2sb.h b/include/linux/platform_data/x86/p2sb.h
new file mode 100644
index 000000000000..a1d5fddc8f13
--- /dev/null
+++ b/include/linux/platform_data/x86/p2sb.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Primary to Sideband (P2SB) bridge access support
+ */
+
+#ifndef _PLATFORM_DATA_X86_P2SB_H
+#define _PLATFORM_DATA_X86_P2SB_H
+
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+
+struct pci_bus;
+struct resource;
+
+#if IS_BUILTIN(CONFIG_P2SB)
+
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem);
+
+#else /* CONFIG_P2SB */
+
+static inline int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_P2SB is not set */
+
+#endif /* _PLATFORM_DATA_X86_P2SB_H */
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
index 022bcea9edec..b8a701c77fd0 100644
--- a/include/linux/platform_data/x86/pmc_atom.h
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Intel Atom SOC Power Management Controller Header File
- * Copyright (c) 2014, Intel Corporation.
+ * Intel Atom SoC Power Management Controller Header File
+ * Copyright (c) 2014-2015,2022 Intel Corporation.
*/
#ifndef PMC_ATOM_H
#define PMC_ATOM_H
+#include <linux/bits.h>
+
/* ValleyView Power Control Unit PCI Device ID */
#define PCI_DEVICE_ID_VLV_PMC 0x0F1C
/* CherryTrail Power Control Unit PCI Device ID */
@@ -47,7 +49,7 @@
#define PMC_S0I2_TMR 0x88
#define PMC_S0I3_TMR 0x8C
#define PMC_S0_TMR 0x90
-/* Sleep state counter is in units of of 32us */
+/* Sleep state counter is in units of 32us */
#define PMC_TMR_SHIFT 5
/* Power status of power islands */
@@ -139,11 +141,10 @@
#define ACPI_MMIO_REG_LEN 0x100
#define PM1_CNT 0x4
-#define SLEEP_TYPE_MASK 0xFFFFECFF
+#define SLEEP_TYPE_MASK GENMASK(12, 10)
#define SLEEP_TYPE_S5 0x1C00
-#define SLEEP_ENABLE 0x2000
+#define SLEEP_ENABLE BIT(13)
extern int pmc_atom_read(int offset, u32 *value);
-extern int pmc_atom_write(int offset, u32 value);
#endif /* PMC_ATOM_H */
diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h
new file mode 100644
index 000000000000..57d6a10dfc9e
--- /dev/null
+++ b/include/linux/platform_data/x86/simatic-ipc-base.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Siemens SIMATIC IPC drivers
+ *
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H
+#define __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H
+
+#include <linux/types.h>
+
+#define SIMATIC_IPC_DEVICE_NONE 0
+#define SIMATIC_IPC_DEVICE_227D 1
+#define SIMATIC_IPC_DEVICE_427E 2
+#define SIMATIC_IPC_DEVICE_127E 3
+#define SIMATIC_IPC_DEVICE_227E 4
+#define SIMATIC_IPC_DEVICE_227G 5
+
+struct simatic_ipc_platform {
+ u8 devmode;
+};
+
+#endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H */
diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
new file mode 100644
index 000000000000..632320ec8f08
--- /dev/null
+++ b/include/linux/platform_data/x86/simatic-ipc.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Siemens SIMATIC IPC drivers
+ *
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_SIMATIC_IPC_H
+#define __PLATFORM_DATA_X86_SIMATIC_IPC_H
+
+#include <linux/dmi.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#define SIMATIC_IPC_DMI_ENTRY_OEM 129
+/* binary type */
+#define SIMATIC_IPC_DMI_TYPE 0xff
+#define SIMATIC_IPC_DMI_GROUP 0x05
+#define SIMATIC_IPC_DMI_ENTRY 0x02
+#define SIMATIC_IPC_DMI_TID 0x02
+
+enum simatic_ipc_station_ids {
+ SIMATIC_IPC_INVALID_STATION_ID = 0,
+ SIMATIC_IPC_IPC227D = 0x00000501,
+ SIMATIC_IPC_IPC427D = 0x00000701,
+ SIMATIC_IPC_IPC227E = 0x00000901,
+ SIMATIC_IPC_IPC277E = 0x00000902,
+ SIMATIC_IPC_IPC427E = 0x00000A01,
+ SIMATIC_IPC_IPC477E = 0x00000A02,
+ SIMATIC_IPC_IPC127E = 0x00000D01,
+ SIMATIC_IPC_IPC227G = 0x00000F01,
+ SIMATIC_IPC_IPC427G = 0x00001001,
+};
+
+static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
+{
+ struct {
+ u8 type; /* type (0xff = binary) */
+ u8 len; /* len of data entry */
+ u8 group;
+ u8 entry;
+ u8 tid;
+ __le32 station_id; /* station id (LE) */
+ } __packed * data_entry = (void *)data + sizeof(struct dmi_header);
+
+ while ((u8 *)data_entry < data + max_len) {
+ if (data_entry->type == SIMATIC_IPC_DMI_TYPE &&
+ data_entry->len == sizeof(*data_entry) &&
+ data_entry->group == SIMATIC_IPC_DMI_GROUP &&
+ data_entry->entry == SIMATIC_IPC_DMI_ENTRY &&
+ data_entry->tid == SIMATIC_IPC_DMI_TID) {
+ return le32_to_cpu(data_entry->station_id);
+ }
+ data_entry = (void *)((u8 *)(data_entry) + data_entry->len);
+ }
+
+ return SIMATIC_IPC_INVALID_STATION_ID;
+}
+
+static inline void
+simatic_ipc_find_dmi_entry_helper(const struct dmi_header *dh, void *_data)
+{
+ u32 *id = _data;
+
+ if (dh->type != SIMATIC_IPC_DMI_ENTRY_OEM)
+ return;
+
+ *id = simatic_ipc_get_station_id((u8 *)dh, dh->length);
+}
+
+#endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_H */
diff --git a/include/linux/platform_data/x86/soc.h b/include/linux/platform_data/x86/soc.h
new file mode 100644
index 000000000000..da05f425587a
--- /dev/null
+++ b/include/linux/platform_data/x86/soc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Helpers for Intel SoC model detection
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#ifndef __PLATFORM_DATA_X86_SOC_H
+#define __PLATFORM_DATA_X86_SOC_H
+
+#if IS_ENABLED(CONFIG_X86)
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define SOC_INTEL_IS_CPU(soc, type) \
+static inline bool soc_intel_is_##soc(void) \
+{ \
+ static const struct x86_cpu_id soc##_cpu_ids[] = { \
+ X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \
+ {} \
+ }; \
+ const struct x86_cpu_id *id; \
+ \
+ id = x86_match_cpu(soc##_cpu_ids); \
+ if (id) \
+ return true; \
+ return false; \
+}
+
+SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT);
+SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT);
+SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT);
+SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS);
+SOC_INTEL_IS_CPU(cml, KABYLAKE_L);
+
+#else /* IS_ENABLED(CONFIG_X86) */
+
+static inline bool soc_intel_is_byt(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cht(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_apl(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_glk(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cml(void)
+{
+ return false;
+}
+#endif /* IS_ENABLED(CONFIG_X86) */
+
+#endif /* __PLATFORM_DATA_X86_SOC_H */
diff --git a/include/linux/platform_data/x86/spi-intel.h b/include/linux/platform_data/x86/spi-intel.h
new file mode 100644
index 000000000000..a512ec37abbb
--- /dev/null
+++ b/include/linux/platform_data/x86/spi-intel.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#ifndef SPI_INTEL_PDATA_H
+#define SPI_INTEL_PDATA_H
+
+enum intel_spi_type {
+ INTEL_SPI_BYT = 1,
+ INTEL_SPI_LPT,
+ INTEL_SPI_BXT,
+ INTEL_SPI_CNL,
+};
+
+/**
+ * struct intel_spi_boardinfo - Board specific data for Intel SPI driver
+ * @type: Type which this controller is compatible with
+ * @set_writeable: Try to make the chip writeable (optional)
+ * @data: Data to be passed to @set_writeable can be %NULL
+ */
+struct intel_spi_boardinfo {
+ enum intel_spi_type type;
+ bool (*set_writeable)(void __iomem *base, void *data);
+ void *data;
+};
+
+#endif /* SPI_INTEL_PDATA_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 041bfa412aa0..b0d5a253156e 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -15,6 +15,7 @@
#define PLATFORM_DEVID_NONE (-1)
#define PLATFORM_DEVID_AUTO (-2)
+struct irq_affinity;
struct mfd_cell;
struct property_entry;
struct platform_device_id;
@@ -25,11 +26,16 @@ struct platform_device {
bool id_auto;
struct device dev;
u64 platform_dma_mask;
+ struct device_dma_parameters dma_parms;
u32 num_resources;
struct resource *resource;
const struct platform_device_id *id_entry;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
/* MFD cell pointer */
struct mfd_cell *mfd_cell;
@@ -51,21 +57,29 @@ extern struct device platform_bus;
extern struct resource *platform_get_resource(struct platform_device *,
unsigned int, unsigned int);
+extern struct resource *platform_get_mem_or_io(struct platform_device *,
+ unsigned int);
+
extern struct device *
platform_find_device_by_driver(struct device *start,
const struct device_driver *drv);
extern void __iomem *
+devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
+ unsigned int index, struct resource **res);
+extern void __iomem *
devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index);
extern void __iomem *
-devm_platform_ioremap_resource_wc(struct platform_device *pdev,
- unsigned int index);
-extern void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
const char *name);
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_get_irq_optional(struct platform_device *, unsigned int);
extern int platform_irq_count(struct platform_device *);
+extern int devm_platform_get_irqs_affinity(struct platform_device *dev,
+ struct irq_affinity *affd,
+ unsigned int minvec,
+ unsigned int maxvec,
+ int **irqs);
extern struct resource *platform_get_resource_byname(struct platform_device *,
unsigned int,
const char *);
@@ -89,7 +103,7 @@ struct platform_device_info {
size_t size_data;
u64 dma_mask;
- struct property_entry *properties;
+ const struct property_entry *properties;
};
extern struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo);
@@ -187,8 +201,6 @@ extern int platform_device_add_resources(struct platform_device *pdev,
unsigned int num);
extern int platform_device_add_data(struct platform_device *pdev,
const void *data, size_t size);
-extern int platform_device_add_properties(struct platform_device *pdev,
- const struct property_entry *properties);
extern int platform_device_add(struct platform_device *pdev);
extern void platform_device_del(struct platform_device *pdev);
extern void platform_device_put(struct platform_device *pdev);
@@ -202,6 +214,14 @@ struct platform_driver {
struct device_driver driver;
const struct platform_device_id *id_table;
bool prevent_deferred_probe;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
@@ -320,8 +340,6 @@ extern int platform_pm_restore(struct device *dev);
#define platform_pm_restore NULL
#endif
-extern int platform_dma_configure(struct device *dev);
-
#ifdef CONFIG_PM_SLEEP
#define USE_PLATFORM_PM_SLEEP_OPS \
.suspend = platform_pm_suspend, \
@@ -346,4 +364,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
}
#endif /* CONFIG_SUPERH */
+/* For now only SuperH uses it */
+void early_platform_cleanup(void);
+
#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
new file mode 100644
index 000000000000..e5cbb6841f3a
--- /dev/null
+++ b/include/linux/platform_profile.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Platform profile sysfs interface
+ *
+ * See Documentation/userspace-api/sysfs-platform_profile.rst for more
+ * information.
+ */
+
+#ifndef _PLATFORM_PROFILE_H_
+#define _PLATFORM_PROFILE_H_
+
+#include <linux/bitops.h>
+
+/*
+ * If more options are added please update profile_names array in
+ * platform_profile.c and sysfs-platform_profile documentation.
+ */
+
+enum platform_profile_option {
+ PLATFORM_PROFILE_LOW_POWER,
+ PLATFORM_PROFILE_COOL,
+ PLATFORM_PROFILE_QUIET,
+ PLATFORM_PROFILE_BALANCED,
+ PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ PLATFORM_PROFILE_PERFORMANCE,
+ PLATFORM_PROFILE_LAST, /*must always be last */
+};
+
+struct platform_profile_handler {
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int (*profile_get)(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile);
+ int (*profile_set)(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile);
+};
+
+int platform_profile_register(struct platform_profile_handler *pprof);
+int platform_profile_remove(void);
+void platform_profile_notify(void);
+
+#endif /*_PLATFORM_PROFILE_H_*/
diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h
new file mode 100644
index 000000000000..0fc831338226
--- /dev/null
+++ b/include/linux/pldmfw.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#ifndef _PLDMFW_H_
+#define _PLDMFW_H_
+
+#include <linux/list.h>
+#include <linux/firmware.h>
+
+#define PLDM_DEVICE_UPDATE_CONTINUE_AFTER_FAIL BIT(0)
+
+#define PLDM_STRING_TYPE_UNKNOWN 0
+#define PLDM_STRING_TYPE_ASCII 1
+#define PLDM_STRING_TYPE_UTF8 2
+#define PLDM_STRING_TYPE_UTF16 3
+#define PLDM_STRING_TYPE_UTF16LE 4
+#define PLDM_STRING_TYPE_UTF16BE 5
+
+struct pldmfw_record {
+ struct list_head entry;
+
+ /* List of descriptor TLVs */
+ struct list_head descs;
+
+ /* Component Set version string*/
+ const u8 *version_string;
+ u8 version_type;
+ u8 version_len;
+
+ /* Package Data length */
+ u16 package_data_len;
+
+ /* Bitfield of Device Update Flags */
+ u32 device_update_flags;
+
+ /* Package Data block */
+ const u8 *package_data;
+
+ /* Bitmap of components applicable to this record */
+ unsigned long *component_bitmap;
+ u16 component_bitmap_len;
+};
+
+/* Standard descriptor TLV identifiers */
+#define PLDM_DESC_ID_PCI_VENDOR_ID 0x0000
+#define PLDM_DESC_ID_IANA_ENTERPRISE_ID 0x0001
+#define PLDM_DESC_ID_UUID 0x0002
+#define PLDM_DESC_ID_PNP_VENDOR_ID 0x0003
+#define PLDM_DESC_ID_ACPI_VENDOR_ID 0x0004
+#define PLDM_DESC_ID_PCI_DEVICE_ID 0x0100
+#define PLDM_DESC_ID_PCI_SUBVENDOR_ID 0x0101
+#define PLDM_DESC_ID_PCI_SUBDEV_ID 0x0102
+#define PLDM_DESC_ID_PCI_REVISION_ID 0x0103
+#define PLDM_DESC_ID_PNP_PRODUCT_ID 0x0104
+#define PLDM_DESC_ID_ACPI_PRODUCT_ID 0x0105
+#define PLDM_DESC_ID_VENDOR_DEFINED 0xFFFF
+
+struct pldmfw_desc_tlv {
+ struct list_head entry;
+
+ const u8 *data;
+ u16 type;
+ u16 size;
+};
+
+#define PLDM_CLASSIFICATION_UNKNOWN 0x0000
+#define PLDM_CLASSIFICATION_OTHER 0x0001
+#define PLDM_CLASSIFICATION_DRIVER 0x0002
+#define PLDM_CLASSIFICATION_CONFIG_SW 0x0003
+#define PLDM_CLASSIFICATION_APP_SW 0x0004
+#define PLDM_CLASSIFICATION_INSTRUMENTATION 0x0005
+#define PLDM_CLASSIFICATION_BIOS 0x0006
+#define PLDM_CLASSIFICATION_DIAGNOSTIC_SW 0x0007
+#define PLDM_CLASSIFICATION_OS 0x0008
+#define PLDM_CLASSIFICATION_MIDDLEWARE 0x0009
+#define PLDM_CLASSIFICATION_FIRMWARE 0x000A
+#define PLDM_CLASSIFICATION_CODE 0x000B
+#define PLDM_CLASSIFICATION_SERVICE_PACK 0x000C
+#define PLDM_CLASSIFICATION_SOFTWARE_BUNDLE 0x000D
+
+#define PLDM_ACTIVATION_METHOD_AUTO BIT(0)
+#define PLDM_ACTIVATION_METHOD_SELF_CONTAINED BIT(1)
+#define PLDM_ACTIVATION_METHOD_MEDIUM_SPECIFIC BIT(2)
+#define PLDM_ACTIVATION_METHOD_REBOOT BIT(3)
+#define PLDM_ACTIVATION_METHOD_DC_CYCLE BIT(4)
+#define PLDM_ACTIVATION_METHOD_AC_CYCLE BIT(5)
+
+#define PLDMFW_COMPONENT_OPTION_FORCE_UPDATE BIT(0)
+#define PLDMFW_COMPONENT_OPTION_USE_COMPARISON_STAMP BIT(1)
+
+struct pldmfw_component {
+ struct list_head entry;
+
+ /* component identifier */
+ u16 classification;
+ u16 identifier;
+
+ u16 options;
+ u16 activation_method;
+
+ u32 comparison_stamp;
+
+ u32 component_size;
+ const u8 *component_data;
+
+ /* Component version string */
+ const u8 *version_string;
+ u8 version_type;
+ u8 version_len;
+
+ /* component index */
+ u8 index;
+
+};
+
+/* Transfer flag used for sending components to the firmware */
+#define PLDM_TRANSFER_FLAG_START BIT(0)
+#define PLDM_TRANSFER_FLAG_MIDDLE BIT(1)
+#define PLDM_TRANSFER_FLAG_END BIT(2)
+
+struct pldmfw_ops;
+
+/* Main entry point to the PLDM firmware update engine. Device drivers
+ * should embed this in a private structure and use container_of to obtain
+ * a pointer to their own data, used to implement the device specific
+ * operations.
+ */
+struct pldmfw {
+ const struct pldmfw_ops *ops;
+ struct device *dev;
+};
+
+bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record);
+
+/* Operations invoked by the generic PLDM firmware update engine. Used to
+ * implement device specific logic.
+ *
+ * @match_record: check if the device matches the given record. For
+ * convenience, a standard implementation is provided for PCI devices.
+ *
+ * @send_package_data: send the package data associated with the matching
+ * record to firmware.
+ *
+ * @send_component_table: send the component data associated with a given
+ * component to firmware. Called once for each applicable component.
+ *
+ * @flash_component: Flash the data for a given component to the device.
+ * Called once for each applicable component, after all component tables have
+ * been sent.
+ *
+ * @finalize_update: (optional) Finish the update. Called after all components
+ * have been flashed.
+ */
+struct pldmfw_ops {
+ bool (*match_record)(struct pldmfw *context, struct pldmfw_record *record);
+ int (*send_package_data)(struct pldmfw *context, const u8 *data, u16 length);
+ int (*send_component_table)(struct pldmfw *context, struct pldmfw_component *component,
+ u8 transfer_flag);
+ int (*flash_component)(struct pldmfw *context, struct pldmfw_component *component);
+ int (*finalize_update)(struct pldmfw *context);
+};
+
+int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw);
+
+#endif
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 66bab1bca35c..0f352c1d3c80 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -73,8 +73,11 @@
#ifndef _LINUX_PLIST_H_
#define _LINUX_PLIST_H_
-#include <linux/kernel.h>
+#include <linux/container_of.h>
#include <linux/list.h>
+#include <linux/types.h>
+
+#include <asm/bug.h>
struct plist_head {
struct list_head node_list;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e057d1fa2469..93cd34f00822 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -8,6 +8,7 @@
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
@@ -20,7 +21,6 @@
* Callbacks for platform drivers to implement.
*/
extern void (*pm_power_off)(void);
-extern void (*pm_power_off_prepare)(void);
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
@@ -35,11 +35,19 @@ static inline void pm_vt_switch_unregister(struct device *dev)
}
#endif /* CONFIG_VT_CONSOLE_SLEEP */
+#ifdef CONFIG_CXL_SUSPEND
+bool cxl_mem_active(void);
+#else
+static inline bool cxl_mem_active(void)
+{
+ return false;
+}
+#endif
+
/*
* Device power management
*/
-struct device;
#ifdef CONFIG_PM
extern const char power_group_name[]; /* = "power" */
@@ -301,57 +309,116 @@ struct dev_pm_ops {
int (*runtime_idle)(struct device *dev);
};
+#define SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend = pm_sleep_ptr(suspend_fn), \
+ .resume = pm_sleep_ptr(resume_fn), \
+ .freeze = pm_sleep_ptr(suspend_fn), \
+ .thaw = pm_sleep_ptr(resume_fn), \
+ .poweroff = pm_sleep_ptr(suspend_fn), \
+ .restore = pm_sleep_ptr(resume_fn),
+
+#define LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_late = pm_sleep_ptr(suspend_fn), \
+ .resume_early = pm_sleep_ptr(resume_fn), \
+ .freeze_late = pm_sleep_ptr(suspend_fn), \
+ .thaw_early = pm_sleep_ptr(resume_fn), \
+ .poweroff_late = pm_sleep_ptr(suspend_fn), \
+ .restore_early = pm_sleep_ptr(resume_fn),
+
+#define NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_noirq = pm_sleep_ptr(suspend_fn), \
+ .resume_noirq = pm_sleep_ptr(resume_fn), \
+ .freeze_noirq = pm_sleep_ptr(suspend_fn), \
+ .thaw_noirq = pm_sleep_ptr(resume_fn), \
+ .poweroff_noirq = pm_sleep_ptr(suspend_fn), \
+ .restore_noirq = pm_sleep_ptr(resume_fn),
+
+#define RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ .runtime_suspend = suspend_fn, \
+ .runtime_resume = resume_fn, \
+ .runtime_idle = idle_fn,
+
#ifdef CONFIG_PM_SLEEP
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend = suspend_fn, \
- .resume = resume_fn, \
- .freeze = suspend_fn, \
- .thaw = resume_fn, \
- .poweroff = suspend_fn, \
- .restore = resume_fn,
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM_SLEEP
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend_late = suspend_fn, \
- .resume_early = resume_fn, \
- .freeze_late = suspend_fn, \
- .thaw_early = resume_fn, \
- .poweroff_late = suspend_fn, \
- .restore_early = resume_fn,
+ LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM_SLEEP
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend_noirq = suspend_fn, \
- .resume_noirq = resume_fn, \
- .freeze_noirq = suspend_fn, \
- .thaw_noirq = resume_fn, \
- .poweroff_noirq = suspend_fn, \
- .restore_noirq = resume_fn,
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
- .runtime_suspend = suspend_fn, \
- .runtime_resume = resume_fn, \
- .runtime_idle = idle_fn,
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#else
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif
+#define _DEFINE_DEV_PM_OPS(name, \
+ suspend_fn, resume_fn, \
+ runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+const struct dev_pm_ops name = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+}
+
+#ifdef CONFIG_PM
+#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+ const struct dev_pm_ops name; \
+ __EXPORT_SYMBOL(name, sec, ns); \
+ const struct dev_pm_ops name
+#else
+#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+ static __maybe_unused const struct dev_pm_ops __static_##name
+#endif
+
+#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "_gpl", "")
+#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "_gpl", #ns)
+
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_SIMPLE_DEV_PM_OPS() or EXPORT_GPL_SIMPLE_DEV_PM_OPS() instead.
*/
+#define DEFINE_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
+
+#define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ EXPORT_DEV_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ EXPORT_GPL_DEV_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+ EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+ EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+
+/* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
@@ -367,13 +434,19 @@ const struct dev_pm_ops name = { \
* suspend and "early" resume callback pointers, .suspend_late() and
* .resume_early(), to the same routines as .runtime_suspend() and
* .runtime_resume(), respectively (and analogously for hibernation).
+ *
+ * Deprecated. You most likely don't want this macro. Use
+ * DEFINE_RUNTIME_DEV_PM_OPS() instead.
*/
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
+#define pm_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM), (_ptr))
+#define pm_sleep_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM_SLEEP), (_ptr))
+
/*
* PM_EVENT_ messages
*
@@ -494,6 +567,7 @@ const struct dev_pm_ops name = { \
*/
enum rpm_status {
+ RPM_INVALID = -1,
RPM_ACTIVE = 0,
RPM_RESUMING,
RPM_SUSPENDED,
@@ -531,6 +605,8 @@ struct pm_subsys_data {
spinlock_t lock;
unsigned int refcount;
#ifdef CONFIG_PM_CLK
+ unsigned int clock_op_might_sleep;
+ struct mutex clock_mutex;
struct list_head clock_list;
#endif
#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -544,31 +620,17 @@ struct pm_subsys_data {
* These flags can be set by device drivers at the probe time. They need not be
* cleared by the drivers as the driver core will take care of that.
*
- * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device.
- * SMART_PREPARE: Check the return value of the driver's ->prepare callback.
- * SMART_SUSPEND: No need to resume the device from runtime suspend.
- * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible.
- *
- * Setting SMART_PREPARE instructs bus types and PM domains which may want
- * system suspend/resume callbacks to be skipped for the device to return 0 from
- * their ->prepare callbacks if the driver's ->prepare callback returns 0 (in
- * other words, the system suspend/resume callbacks can only be skipped for the
- * device if its driver doesn't object against that). This flag has no effect
- * if NEVER_SKIP is set.
- *
- * Setting SMART_SUSPEND instructs bus types and PM domains which may want to
- * runtime resume the device upfront during system suspend that doing so is not
- * necessary from the driver's perspective. It also may cause them to skip
- * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by
- * the driver if they decide to leave the device in runtime suspend.
- *
- * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the
- * driver prefers the device to be left in suspend after system resume.
+ * NO_DIRECT_COMPLETE: Do not apply direct-complete optimization to the device.
+ * SMART_PREPARE: Take the driver ->prepare callback return value into account.
+ * SMART_SUSPEND: Avoid resuming the device from runtime suspend.
+ * MAY_SKIP_RESUME: Allow driver "noirq" and "early" callbacks to be skipped.
+ *
+ * See Documentation/driver-api/pm/devices.rst for details.
*/
-#define DPM_FLAG_NEVER_SKIP BIT(0)
+#define DPM_FLAG_NO_DIRECT_COMPLETE BIT(0)
#define DPM_FLAG_SMART_PREPARE BIT(1)
#define DPM_FLAG_SMART_SUSPEND BIT(2)
-#define DPM_FLAG_LEAVE_SUSPENDED BIT(3)
+#define DPM_FLAG_MAY_SKIP_RESUME BIT(3)
struct dev_pm_info {
pm_message_t power_state;
@@ -598,7 +660,7 @@ struct dev_pm_info {
#endif
#ifdef CONFIG_PM
struct hrtimer suspend_timer;
- unsigned long timer_expires;
+ u64 timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
struct wake_irq *wakeirq;
@@ -608,6 +670,7 @@ struct dev_pm_info {
unsigned int idle_notification:1;
unsigned int request_pending:1;
unsigned int deferred_resume:1;
+ unsigned int needs_force_resume:1;
unsigned int runtime_auto:1;
bool ignore_children:1;
unsigned int no_callbacks:1;
@@ -618,6 +681,7 @@ struct dev_pm_info {
unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
+ enum rpm_status last_status;
int runtime_error;
int autosuspend_delay;
u64 last_busy;
@@ -727,11 +791,11 @@ extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
extern int dpm_prepare(pm_message_t state);
-extern void __suspend_report_result(const char *function, void *fn, int ret);
+extern void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret);
-#define suspend_report_result(fn, ret) \
+#define suspend_report_result(dev, fn, ret) \
do { \
- __suspend_report_result(__func__, fn, ret); \
+ __suspend_report_result(__func__, dev, fn, ret); \
} while (0)
extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
@@ -758,8 +822,8 @@ extern int pm_generic_poweroff_late(struct device *dev);
extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
-extern bool dev_pm_may_skip_resume(struct device *dev);
-extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
+extern bool dev_pm_skip_resume(struct device *dev);
+extern bool dev_pm_skip_suspend(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
@@ -771,7 +835,7 @@ static inline int dpm_suspend_start(pm_message_t state)
return 0;
}
-#define suspend_report_result(fn, ret) do {} while (0)
+#define suspend_report_result(dev, fn, ret) do {} while (0)
static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
{
diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h
deleted file mode 100644
index b8fac96f05aa..000000000000
--- a/include/linux/pm2301_charger.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * PM2301 charger driver.
- *
- * Copyright (C) 2012 ST Ericsson Corporation
- *
- * Contact: Olivier LAUNAY (olivier.launay@stericsson.com
- */
-
-#ifndef __LINUX_PM2301_H
-#define __LINUX_PM2301_H
-
-/**
- * struct pm2xxx_bm_charger_parameters - Charger specific parameters
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct pm2xxx_bm_charger_parameters {
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct pm2xxx_bm_data - pm2xxx battery management data
- * @enable_overshoot flag to enable VBAT overshoot control
- * @chg_params charger parameters
- */
-struct pm2xxx_bm_data {
- bool enable_overshoot;
- const struct pm2xxx_bm_charger_parameters *chg_params;
-};
-
-struct pm2xxx_charger_platform_data {
- char **supplied_to;
- size_t num_supplicants;
- int i2c_bus;
- const char *label;
- int gpio_irq_number;
- unsigned int lpn_gpio;
- int irq_type;
-};
-
-struct pm2xxx_platform_data {
- struct pm2xxx_charger_platform_data *wall_charger;
- struct pm2xxx_bm_data *battery;
-};
-
-#endif /* __LINUX_PM2301_H */
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 8ddc7860e131..ada3a0ab10bf 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -47,6 +47,7 @@ extern void pm_clk_remove(struct device *dev, const char *con_id);
extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);
extern int pm_clk_suspend(struct device *dev);
extern int pm_clk_resume(struct device *dev);
+extern int devm_pm_clk_create(struct device *dev);
#else
static inline bool pm_clk_no_clocks(struct device *dev)
{
@@ -83,6 +84,10 @@ static inline void pm_clk_remove(struct device *dev, const char *con_id)
static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk)
{
}
+static inline int devm_pm_clk_create(struct device *dev)
+{
+ return -EINVAL;
+}
#endif
#ifdef CONFIG_HAVE_CLK
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 9ec78ee53652..ebc351698090 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -9,6 +9,7 @@
#define _LINUX_PM_DOMAIN_H
#include <linux/device.h>
+#include <linux/ktime.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/err.h>
@@ -55,6 +56,10 @@
*
* GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain
* powered on except for system suspend.
+ *
+ * GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its
+ * components' next wakeup when determining the
+ * optimal idle state.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -62,10 +67,18 @@
#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
+#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
enum gpd_status {
- GPD_STATE_ACTIVE = 0, /* PM domain is active */
- GPD_STATE_POWER_OFF, /* PM domain is off */
+ GENPD_STATE_ON = 0, /* PM domain is on */
+ GENPD_STATE_OFF, /* PM domain is off */
+};
+
+enum genpd_notication {
+ GENPD_NOTIFY_PRE_OFF = 0,
+ GENPD_NOTIFY_OFF,
+ GENPD_NOTIFY_PRE_ON,
+ GENPD_NOTIFY_ON,
};
struct dev_power_governor {
@@ -78,12 +91,22 @@ struct gpd_dev_ops {
int (*stop)(struct device *dev);
};
+struct genpd_governor_data {
+ s64 max_off_time_ns;
+ bool max_off_time_changed;
+ ktime_t next_wakeup;
+ bool cached_power_down_ok;
+ bool cached_power_down_state_idx;
+};
+
struct genpd_power_state {
s64 power_off_latency_ns;
s64 power_on_latency_ns;
s64 residency_ns;
+ u64 usage;
+ u64 rejected;
struct fwnode_handle *fwnode;
- ktime_t idle_time;
+ u64 idle_time;
void *data;
};
@@ -95,10 +118,11 @@ struct generic_pm_domain {
struct device dev;
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
- struct list_head master_links; /* Links with PM domain as a master */
- struct list_head slave_links; /* Links with PM domain as a slave */
+ struct list_head parent_links; /* Links with PM domain as a parent */
+ struct list_head child_links; /* Links with PM domain as a child */
struct list_head dev_list; /* List of devices */
struct dev_power_governor *gov;
+ struct genpd_governor_data *gd; /* Data used by a genpd governor. */
struct work_struct power_off_work;
struct fwnode_handle *provider; /* Identity of the domain provider */
bool has_provider;
@@ -112,16 +136,13 @@ struct generic_pm_domain {
cpumask_var_t cpus; /* A cpumask of the attached CPUs */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
+ struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
struct opp_table *opp_table; /* OPP table of the genpd */
unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
unsigned int state);
struct gpd_dev_ops dev_ops;
- s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
- bool max_off_time_changed;
- bool cached_power_down_ok;
- bool cached_power_down_state_idx;
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
@@ -132,8 +153,8 @@ struct generic_pm_domain {
unsigned int state_count);
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
- ktime_t on_time;
- ktime_t accounting_time;
+ u64 on_time;
+ u64 accounting_time;
const struct genpd_lock_ops *lock_ops;
union {
struct mutex mlock;
@@ -151,10 +172,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
}
struct gpd_link {
- struct generic_pm_domain *master;
- struct list_head master_node;
- struct generic_pm_domain *slave;
- struct list_head slave_node;
+ struct generic_pm_domain *parent;
+ struct list_head parent_node;
+ struct generic_pm_domain *child;
+ struct list_head child_node;
/* Sub-domain's per-master domain performance state */
unsigned int performance_state;
@@ -165,6 +186,7 @@ struct gpd_timing_data {
s64 suspend_latency_ns;
s64 resume_latency_ns;
s64 effective_constraint_ns;
+ ktime_t next_wakeup;
bool constraint_changed;
bool cached_suspend_ok;
};
@@ -176,10 +198,13 @@ struct pm_domain_data {
struct generic_pm_domain_data {
struct pm_domain_data base;
- struct gpd_timing_data td;
+ struct gpd_timing_data *td;
struct notifier_block nb;
+ struct notifier_block *power_nb;
int cpu;
unsigned int performance_state;
+ unsigned int default_pstate;
+ unsigned int rpm_pstate;
void *data;
};
@@ -204,6 +229,9 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
int pm_genpd_remove(struct generic_pm_domain *genpd);
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
+int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
+int dev_pm_genpd_remove_notifier(struct device *dev);
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -242,25 +270,39 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd,
}
static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_genpd_set_performance_state(struct device *dev,
unsigned int state)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int dev_pm_genpd_add_notifier(struct device *dev,
+ struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
}
+static inline int dev_pm_genpd_remove_notifier(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
+{ }
+
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
-void pm_genpd_syscore_poweroff(struct device *dev);
-void pm_genpd_syscore_poweron(struct device *dev);
+void dev_pm_genpd_suspend(struct device *dev);
+void dev_pm_genpd_resume(struct device *dev);
#else
-static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
-static inline void pm_genpd_syscore_poweron(struct device *dev) {}
+static inline void dev_pm_genpd_suspend(struct device *dev) {}
+static inline void dev_pm_genpd_resume(struct device *dev) {}
#endif
/* OF PM domain providers */
@@ -301,13 +343,13 @@ struct device *genpd_dev_pm_attach_by_name(struct device *dev,
static inline int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int of_genpd_add_provider_onecell(struct device_node *np,
struct genpd_onecell_data *data)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void of_genpd_del_provider(struct device_node *np) {}
@@ -363,7 +405,7 @@ static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
static inline
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 747861816f4f..dc1fb5890792 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_OPP_H__
#define __LINUX_OPP_H__
+#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/notifier.h>
@@ -31,61 +32,84 @@ enum dev_pm_opp_event {
* @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
* @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
* @u_amp: Maximum current drawn by the device in microamperes
+ * @u_watt: Power used by the device in microwatts
*
- * This structure stores the voltage/current values for a single power supply.
+ * This structure stores the voltage/current/power values for a single power
+ * supply.
*/
struct dev_pm_opp_supply {
unsigned long u_volt;
unsigned long u_volt_min;
unsigned long u_volt_max;
unsigned long u_amp;
+ unsigned long u_watt;
};
/**
- * struct dev_pm_opp_info - OPP freq/voltage/current values
- * @rate: Target clk rate in hz
- * @supplies: Array of voltage/current values for all power supplies
+ * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
+ * @avg: Average bandwidth corresponding to this OPP (in icc units)
+ * @peak: Peak bandwidth corresponding to this OPP (in icc units)
*
- * This structure stores the freq/voltage/current values for a single OPP.
+ * This structure stores the bandwidth values for a single interconnect path.
*/
-struct dev_pm_opp_info {
- unsigned long rate;
- struct dev_pm_opp_supply *supplies;
+struct dev_pm_opp_icc_bw {
+ u32 avg;
+ u32 peak;
};
+typedef int (*config_regulators_t)(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count);
+
+typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data, bool scaling_down);
+
/**
- * struct dev_pm_set_opp_data - Set OPP data
- * @old_opp: Old OPP info
- * @new_opp: New OPP info
- * @regulators: Array of regulator pointers
- * @regulator_count: Number of regulators
- * @clk: Pointer to clk
- * @dev: Pointer to the struct device
+ * struct dev_pm_opp_config - Device OPP configuration values
+ * @clk_names: Clk names, NULL terminated array.
+ * @config_clks: Custom set clk helper.
+ * @prop_name: Name to postfix to properties.
+ * @config_regulators: Custom set regulator helper.
+ * @supported_hw: Array of hierarchy of versions to match.
+ * @supported_hw_count: Number of elements in the array.
+ * @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
+ * @genpd_names: Null terminated array of pointers containing names of genpd to
+ * attach.
+ * @virt_devs: Pointer to return the array of virtual devices.
*
- * This structure contains all information required for setting an OPP.
+ * This structure contains platform specific OPP configurations for the device.
*/
-struct dev_pm_set_opp_data {
- struct dev_pm_opp_info old_opp;
- struct dev_pm_opp_info new_opp;
-
- struct regulator **regulators;
- unsigned int regulator_count;
- struct clk *clk;
- struct device *dev;
+struct dev_pm_opp_config {
+ /* NULL terminated */
+ const char * const *clk_names;
+ config_clks_t config_clks;
+ const char *prop_name;
+ config_regulators_t config_regulators;
+ const unsigned int *supported_hw;
+ unsigned int supported_hw_count;
+ const char * const *regulator_names;
+ const char * const *genpd_names;
+ struct device ***virt_devs;
};
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
-struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
+int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies);
+
+unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp);
+
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp);
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index);
+
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
int dev_pm_opp_get_opp_count(struct device *dev);
@@ -97,16 +121,23 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
-struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level);
-
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt);
+
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level);
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index);
+
void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add(struct device *dev, unsigned long freq,
@@ -125,33 +156,31 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq);
int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb);
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name);
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
-void dev_pm_opp_put_regulators(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
-void dev_pm_opp_put_clkname(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
-void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
+int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+void dev_pm_opp_clear_config(int token);
+int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down);
+
+struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp);
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
+int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
+int dev_pm_opp_sync_regulators(struct device *dev);
#else
static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
@@ -161,6 +190,16 @@ static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
return 0;
}
+static inline int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
{
return 0;
@@ -171,6 +210,13 @@ static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
return 0;
}
+static inline
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index)
+{
+ return 0;
+}
+
static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
return false;
@@ -201,34 +247,46 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
return 0;
}
+static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level)
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt)
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
- unsigned long *freq)
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
@@ -236,7 +294,7 @@ static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
@@ -267,72 +325,57 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb)
{
- return -ENOTSUPP;
-}
-
-static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions,
- unsigned int count)
-{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static inline int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+static inline int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {}
+static inline void dev_pm_opp_clear_config(int token) {}
-static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
+static inline int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name)
+static inline struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
+ struct opp_table *dst_table, struct dev_pm_opp *src_opp)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs)
+static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {}
-
-static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate)
+static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
-static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
@@ -348,37 +391,59 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask
{
}
+static inline int dev_pm_opp_sync_regulators(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
+int devm_pm_opp_of_add_table_indexed(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
+int devm_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
-void dev_pm_opp_of_register_em(struct cpumask *cpus);
+int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
+int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
+{
+ em_dev_unregister_perf_domain(dev);
+}
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
+{
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
+static inline int devm_pm_opp_of_add_table(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
@@ -387,7 +452,7 @@ static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpum
static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
@@ -400,14 +465,170 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
return NULL;
}
-static inline void dev_pm_opp_of_register_em(struct cpumask *cpus)
+static inline int dev_pm_opp_of_register_em(struct device *dev,
+ struct cpumask *cpus)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
{
}
static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
+{
+ return -EOPNOTSUPP;
}
#endif
+/* OPP Configuration helpers */
+
+/* Regulators helpers */
+static inline int dev_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* Supported-hw helpers */
+static inline int dev_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_supported_hw(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* clkname helpers */
+static inline int dev_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_clkname(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* config-regulators helpers */
+static inline int dev_pm_opp_set_config_regulators(struct device *dev,
+ config_regulators_t helper)
+{
+ struct dev_pm_opp_config config = {
+ .config_regulators = helper,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_config_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+/* genpd helpers */
+static inline int dev_pm_opp_attach_genpd(struct device *dev,
+ const char * const *names,
+ struct device ***virt_devs)
+{
+ struct dev_pm_opp_config config = {
+ .genpd_names = names,
+ .virt_devs = virt_devs,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_detach_genpd(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_attach_genpd(struct device *dev,
+ const char * const *names,
+ struct device ***virt_devs)
+{
+ struct dev_pm_opp_config config = {
+ .genpd_names = names,
+ .virt_devs = virt_devs,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* prop-name helpers */
+static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+ struct dev_pm_opp_config config = {
+ .prop_name = name,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_prop_name(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 19eafca5680e..4a69d4af3ff8 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -1,22 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PM_QOS_H
-#define _LINUX_PM_QOS_H
-/* interface for the pm_qos_power infrastructure of the linux kernel.
+/*
+ * Definitions related to Power Management Quality of Service (PM QoS).
*
- * Mark Gross <mgross@linux.intel.com>
+ * Copyright (C) 2020 Intel Corporation
+ *
+ * Authors:
+ * Mark Gross <mgross@linux.intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
+
+#ifndef _LINUX_PM_QOS_H
+#define _LINUX_PM_QOS_H
+
#include <linux/plist.h>
#include <linux/notifier.h>
#include <linux/device.h>
-#include <linux/workqueue.h>
-
-enum {
- PM_QOS_RESERVED = 0,
- PM_QOS_CPU_DMA_LATENCY,
-
- /* insert new class ID */
- PM_QOS_NUM_CLASSES,
-};
enum pm_qos_flags_status {
PM_QOS_FLAGS_UNDEFINED = -1,
@@ -29,7 +27,7 @@ enum pm_qos_flags_status {
#define PM_QOS_LATENCY_ANY S32_MAX
#define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC)
-#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_CPU_LATENCY_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
@@ -40,22 +38,10 @@ enum pm_qos_flags_status {
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
-struct pm_qos_request {
- struct plist_node node;
- int pm_qos_class;
- struct delayed_work work; /* for pm_qos_update_request_timeout */
-};
-
-struct pm_qos_flags_request {
- struct list_head node;
- s32 flags; /* Do not change to 64 bit */
-};
-
enum pm_qos_type {
PM_QOS_UNITIALIZED,
PM_QOS_MAX, /* return the largest value */
PM_QOS_MIN, /* return the smallest value */
- PM_QOS_SUM /* return the sum */
};
/*
@@ -72,6 +58,16 @@ struct pm_qos_constraints {
struct blocking_notifier_head *notifiers;
};
+struct pm_qos_request {
+ struct plist_node node;
+ struct pm_qos_constraints *qos;
+};
+
+struct pm_qos_flags_request {
+ struct list_head node;
+ s32 flags; /* Do not change to 64 bit */
+};
+
struct pm_qos_flags {
struct list_head list;
s32 effective_flags; /* Do not change to 64 bit */
@@ -140,24 +136,31 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
return req->dev != NULL;
}
+s32 pm_qos_read_value(struct pm_qos_constraints *c);
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
enum pm_qos_req_action action, int value);
bool pm_qos_update_flags(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req,
enum pm_qos_req_action action, s32 val);
-void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
- s32 value);
-void pm_qos_update_request(struct pm_qos_request *req,
- s32 new_value);
-void pm_qos_update_request_timeout(struct pm_qos_request *req,
- s32 new_value, unsigned long timeout_us);
-void pm_qos_remove_request(struct pm_qos_request *req);
-
-int pm_qos_request(int pm_qos_class);
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_request_active(struct pm_qos_request *req);
-s32 pm_qos_read_value(struct pm_qos_constraints *c);
+
+#ifdef CONFIG_CPU_IDLE
+s32 cpu_latency_qos_limit(void);
+bool cpu_latency_qos_request_active(struct pm_qos_request *req);
+void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value);
+void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value);
+void cpu_latency_qos_remove_request(struct pm_qos_request *req);
+#else
+static inline s32 cpu_latency_qos_limit(void) { return INT_MAX; }
+static inline bool cpu_latency_qos_request_active(struct pm_qos_request *req)
+{
+ return false;
+}
+static inline void cpu_latency_qos_add_request(struct pm_qos_request *req,
+ s32 value) {}
+static inline void cpu_latency_qos_update_request(struct pm_qos_request *req,
+ s32 new_value) {}
+static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {}
+#endif
#ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 22af69d237a6..9a8151a2bdea 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -22,6 +22,40 @@
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
+/*
+ * Use this for defining a set of PM operations to be used in all situations
+ * (system suspend, hibernation or runtime PM).
+ *
+ * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
+ * macro, which uses the provided callbacks for both runtime PM and system
+ * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
+ * and pm_runtime_force_resume() for its system sleep callbacks.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
+ */
+#define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ _DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
+ pm_runtime_force_resume, suspend_fn, \
+ resume_fn, idle_fn)
+
+#define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ EXPORT_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ EXPORT_GPL_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
+ EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
+ EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
@@ -38,7 +72,7 @@ extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
-extern int pm_runtime_get_if_in_use(struct device *dev);
+extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
@@ -54,59 +88,159 @@ extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
-extern void pm_runtime_clean_up_links(struct device *dev);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
-extern void pm_runtime_drop_link(struct device *dev);
+extern void pm_runtime_drop_link(struct device_link *link);
+extern void pm_runtime_release_supplier(struct device_link *link);
+
+extern int devm_pm_runtime_enable(struct device *dev);
+
+/**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
+ */
+static inline int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_if_active(dev, false);
+}
+/**
+ * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
+ * @dev: Target device.
+ * @enable: Whether or not to ignore possible dependencies on children.
+ *
+ * The dependencies of @dev on its children will not be taken into account by
+ * the runtime PM framework going forward if @enable is %true, or they will
+ * be taken into account otherwise.
+ */
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
dev->power.ignore_children = enable;
}
+/**
+ * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
+ * @dev: Target device.
+ */
static inline void pm_runtime_get_noresume(struct device *dev)
{
atomic_inc(&dev->power.usage_count);
}
+/**
+ * pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev unless it is 0 already.
+ */
static inline void pm_runtime_put_noidle(struct device *dev)
{
atomic_add_unless(&dev->power.usage_count, -1, 0);
}
+/**
+ * pm_runtime_suspended - Check whether or not a device is runtime-suspended.
+ * @dev: Target device.
+ *
+ * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * %RPM_SUSPENDED, or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which
+ * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
+ * status cannot change.
+ */
static inline bool pm_runtime_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED
&& !dev->power.disable_depth;
}
+/**
+ * pm_runtime_active - Check whether or not a device is runtime-active.
+ * @dev: Target device.
+ *
+ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
+ * %RPM_ACTIVE, or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which
+ * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
+ * status cannot change.
+ */
static inline bool pm_runtime_active(struct device *dev)
{
return dev->power.runtime_status == RPM_ACTIVE
|| dev->power.disable_depth;
}
+/**
+ * pm_runtime_status_suspended - Check if runtime PM status is "suspended".
+ * @dev: Target device.
+ *
+ * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
+ * otherwise, regardless of whether or not runtime PM has been enabled for @dev.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which the
+ * runtime PM status of @dev cannot change.
+ */
static inline bool pm_runtime_status_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED;
}
+/**
+ * pm_runtime_enabled - Check if runtime PM is enabled.
+ * @dev: Target device.
+ *
+ * Return %true if runtime PM is enabled for @dev or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which
+ * runtime PM cannot be either disabled or enabled for @dev.
+ */
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
}
-static inline bool pm_runtime_callbacks_present(struct device *dev)
+/**
+ * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
+ * @dev: Target device.
+ *
+ * Return %true if @dev is a special device without runtime PM callbacks or
+ * %false otherwise.
+ */
+static inline bool pm_runtime_has_no_callbacks(struct device *dev)
{
- return !dev->power.no_callbacks;
+ return dev->power.no_callbacks;
}
+/**
+ * pm_runtime_mark_last_busy - Update the last access time of a device.
+ * @dev: Target device.
+ *
+ * Update the last access time of @dev used by the runtime PM autosuspend
+ * mechanism to the current time as returned by ktime_get_mono_fast_ns().
+ */
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
}
+/**
+ * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
+ * @dev: Target device.
+ *
+ * Return %true if @dev has been marked as an "IRQ-safe" device (with respect
+ * to runtime PM), in which case its runtime PM callabcks can be expected to
+ * work correctly when invoked from interrupt handlers.
+ */
static inline bool pm_runtime_is_irq_safe(struct device *dev)
{
return dev->power.irq_safe;
@@ -143,6 +277,11 @@ static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
+static inline int pm_runtime_get_if_active(struct device *dev,
+ bool ign_usage_count)
+{
+ return -EINVAL;
+}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
@@ -151,6 +290,8 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
+
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
@@ -163,7 +304,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
-static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
+static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
@@ -173,105 +314,286 @@ static inline u64 pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
-static inline void pm_runtime_clean_up_links(struct device *dev) {}
static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
-static inline void pm_runtime_drop_link(struct device *dev) {}
+static inline void pm_runtime_drop_link(struct device_link *link) {}
+static inline void pm_runtime_release_supplier(struct device_link *link) {}
#endif /* !CONFIG_PM */
+/**
+ * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
+ * @dev: Target device.
+ *
+ * Invoke the "idle check" callback of @dev and, depending on its return value,
+ * set up autosuspend of @dev or suspend it (depending on whether or not
+ * autosuspend has been enabled for it).
+ */
static inline int pm_runtime_idle(struct device *dev)
{
return __pm_runtime_idle(dev, 0);
}
+/**
+ * pm_runtime_suspend - Suspend a device synchronously.
+ * @dev: Target device.
+ */
static inline int pm_runtime_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, 0);
}
+/**
+ * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
+ * @dev: Target device.
+ *
+ * Set up autosuspend of @dev or suspend it (depending on whether or not
+ * autosuspend is enabled for it) without engaging its "idle check" callback.
+ */
static inline int pm_runtime_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_AUTO);
}
+/**
+ * pm_runtime_resume - Resume a device synchronously.
+ * @dev: Target device.
+ */
static inline int pm_runtime_resume(struct device *dev)
{
return __pm_runtime_resume(dev, 0);
}
+/**
+ * pm_request_idle - Queue up "idle check" execution for a device.
+ * @dev: Target device.
+ *
+ * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
+ * asynchronously.
+ */
static inline int pm_request_idle(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_ASYNC);
}
+/**
+ * pm_request_resume - Queue up runtime-resume of a device.
+ * @dev: Target device.
+ */
static inline int pm_request_resume(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_ASYNC);
}
+/**
+ * pm_request_autosuspend - Queue up autosuspend of a device.
+ * @dev: Target device.
+ *
+ * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
+ * asynchronously.
+ */
static inline int pm_request_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
}
+/**
+ * pm_runtime_get - Bump up usage counter and queue up resume of a device.
+ * @dev: Target device.
+ *
+ * Bump up the runtime PM usage counter of @dev and queue up a work item to
+ * carry out runtime-resume of it.
+ */
static inline int pm_runtime_get(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
}
+/**
+ * pm_runtime_get_sync - Bump up usage counter of a device and resume it.
+ * @dev: Target device.
+ *
+ * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
+ * it synchronously.
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_resume() and the runtime PM usage counter of @dev remains
+ * incremented in all cases, even if it returns an error code.
+ * Consider using pm_runtime_resume_and_get() instead of it, especially
+ * if its return value is checked by the caller, as this is likely to result
+ * in cleaner code.
+ */
static inline int pm_runtime_get_sync(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT);
}
+/**
+ * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
+ * @dev: Target device.
+ *
+ * Resume @dev synchronously and if that is successful, increment its runtime
+ * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
+ * incremented or a negative error code otherwise.
+ */
+static inline int pm_runtime_resume_and_get(struct device *dev)
+{
+ int ret;
+
+ ret = __pm_runtime_resume(dev, RPM_GET_PUT);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, queue up a work item for @dev like in pm_request_idle().
+ */
static inline int pm_runtime_put(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
}
+/**
+ * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
+ */
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev,
RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
}
+/**
+ * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, invoke the "idle check" callback of @dev and, depending on its
+ * return value, set up autosuspend of @dev or suspend it (depending on whether
+ * or not autosuspend has been enabled for it).
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_idle() and the runtime PM usage counter of @dev remains
+ * decremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_put_sync(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT);
}
+/**
+ * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, carry out runtime-suspend of @dev synchronously.
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
+ * decremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_put_sync_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT);
}
+/**
+ * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
+ * on whether or not autosuspend has been enabled for it).
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
+ * decremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}
+/**
+ * pm_runtime_set_active - Set runtime PM status to "active".
+ * @dev: Target device.
+ *
+ * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
+ * of it will be taken into account.
+ *
+ * It is not valid to call this function for devices with runtime PM enabled.
+ */
static inline int pm_runtime_set_active(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_ACTIVE);
}
+/**
+ * pm_runtime_set_suspended - Set runtime PM status to "suspended".
+ * @dev: Target device.
+ *
+ * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
+ * dependencies of it will be taken into account.
+ *
+ * It is not valid to call this function for devices with runtime PM enabled.
+ */
static inline int pm_runtime_set_suspended(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_SUSPENDED);
}
+/**
+ * pm_runtime_disable - Disable runtime PM for a device.
+ * @dev: Target device.
+ *
+ * Prevent the runtime PM framework from working with @dev (by incrementing its
+ * "blocking" counter).
+ *
+ * For each invocation of this function for @dev there must be a matching
+ * pm_runtime_enable() call in order for runtime PM to be enabled for it.
+ */
static inline void pm_runtime_disable(struct device *dev)
{
__pm_runtime_disable(dev, true);
}
+/**
+ * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
+ * @dev: Target device.
+ *
+ * Allow the runtime PM autosuspend mechanism to be used for @dev whenever
+ * requested (or "autosuspend" will be handled as direct runtime-suspend for
+ * it).
+ *
+ * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
+ * at driver exit time unless your driver initially enabled pm_runtime
+ * with devm_pm_runtime_enable() (which handles it for you).
+ */
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, true);
}
+/**
+ * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
+ * @dev: Target device.
+ *
+ * Prevent the runtime PM autosuspend mechanism from being used for @dev which
+ * means that "autosuspend" will be handled as direct runtime-suspend for it
+ * going forward.
+ */
static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, false);
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
index cd5b62db9084..dd42d16945d0 100644
--- a/include/linux/pm_wakeirq.h
+++ b/include/linux/pm_wakeirq.h
@@ -1,15 +1,5 @@
-/*
- * pm_wakeirq.h - Device wakeirq helper functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* pm_wakeirq.h - Device wakeirq helper functions */
#ifndef _LINUX_PM_WAKEIRQ_H
#define _LINUX_PM_WAKEIRQ_H
@@ -17,8 +7,8 @@
#ifdef CONFIG_PM
extern int dev_pm_set_wake_irq(struct device *dev, int irq);
-extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
- int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
extern void dev_pm_enable_wake_irq(struct device *dev);
extern void dev_pm_disable_wake_irq(struct device *dev);
@@ -35,6 +25,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
return 0;
}
+static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
+{
+ return 0;
+}
+
static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index aa3da6611533..77f4849e3418 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -84,6 +84,11 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && !!dev->power.wakeup;
}
+static inline bool device_wakeup_path(struct device *dev)
+{
+ return dev->power.wakeup_path;
+}
+
static inline void device_set_wakeup_path(struct device *dev)
{
dev->power.wakeup_path = true;
@@ -104,7 +109,6 @@ extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws);
extern int device_wakeup_enable(struct device *dev);
extern int device_wakeup_disable(struct device *dev);
extern void device_set_wakeup_capable(struct device *dev, bool capable);
-extern int device_init_wakeup(struct device *dev, bool val);
extern int device_set_wakeup_enable(struct device *dev, bool enable);
extern void __pm_stay_awake(struct wakeup_source *ws);
extern void pm_stay_awake(struct device *dev);
@@ -162,16 +166,14 @@ static inline int device_set_wakeup_enable(struct device *dev, bool enable)
return 0;
}
-static inline int device_init_wakeup(struct device *dev, bool val)
+static inline bool device_may_wakeup(struct device *dev)
{
- device_set_wakeup_capable(dev, val);
- device_set_wakeup_enable(dev, val);
- return 0;
+ return dev->power.can_wakeup && dev->power.should_wakeup;
}
-static inline bool device_may_wakeup(struct device *dev)
+static inline bool device_wakeup_path(struct device *dev)
{
- return dev->power.can_wakeup && dev->power.should_wakeup;
+ return false;
}
static inline void device_set_wakeup_path(struct device *dev) {}
@@ -207,4 +209,27 @@ static inline void pm_wakeup_hard_event(struct device *dev)
return pm_wakeup_dev_event(dev, 0, true);
}
+/**
+ * device_init_wakeup - Device wakeup initialization.
+ * @dev: Device to handle.
+ * @enable: Whether or not to enable @dev as a wakeup device.
+ *
+ * By default, most devices should leave wakeup disabled. The exceptions are
+ * devices that everyone expects to be wakeup sources: keyboards, power buttons,
+ * possibly network interfaces, etc. Also, devices that don't generate their
+ * own wakeup requests but merely forward requests from one bus to another
+ * (like PCI bridges) should have wakeup enabled by default.
+ */
+static inline int device_init_wakeup(struct device *dev, bool enable)
+{
+ if (enable) {
+ device_set_wakeup_capable(dev, true);
+ return device_wakeup_enable(dev);
+ } else {
+ device_wakeup_disable(dev);
+ device_set_wakeup_capable(dev, false);
+ return 0;
+ }
+}
+
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h
index 1ea5bae708a1..fa9f08164c36 100644
--- a/include/linux/pmbus.h
+++ b/include/linux/pmbus.h
@@ -34,6 +34,45 @@
*/
#define PMBUS_WRITE_PROTECTED BIT(1)
+/*
+ * PMBUS_NO_CAPABILITY
+ *
+ * Some PMBus chips don't respond with valid data when reading the CAPABILITY
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use CAPABILITY to determine it's behavior.
+ */
+#define PMBUS_NO_CAPABILITY BIT(2)
+
+/*
+ * PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+ *
+ * Some PMBus chips end up in an undefined state when trying to read an
+ * unsupported register. For such chips, it is necessary to reset the
+ * chip pmbus controller to a known state after a failed register check.
+ * This can be done by reading a known register. By setting this flag the
+ * driver will try to read the STATUS register after each failed
+ * register check. This read may fail, but it will put the chip in a
+ * known state.
+ */
+#define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3)
+
+/*
+ * PMBUS_NO_WRITE_PROTECT
+ *
+ * Some PMBus chips respond with invalid data when reading the WRITE_PROTECT
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use the WRITE_PROTECT command to determine its behavior.
+ */
+#define PMBUS_NO_WRITE_PROTECT BIT(4)
+
+/*
+ * PMBUS_USE_COEFFICIENTS_CMD
+ *
+ * When this flag is set the PMBus core driver will use the COEFFICIENTS
+ * register to initialize the coefficients for the direct mode format.
+ */
+#define PMBUS_USE_COEFFICIENTS_CMD BIT(5)
+
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 52453a24a24f..c677442d007c 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -13,7 +13,7 @@
#include <uapi/linux/pmu.h>
-extern int find_via_pmu(void);
+extern int __init find_via_pmu(void);
extern int pmu_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
deleted file mode 100644
index 17d7d0d20eca..000000000000
--- a/include/linux/pnfs_osd_xdr.h
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * pNFS-osd on-the-wire data structures
- *
- * Copyright (C) 2007 Panasas Inc. [year of first publication]
- * All rights reserved.
- *
- * Benny Halevy <bhalevy@panasas.com>
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * See the file COPYING included with this distribution for more details.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Panasas company nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __PNFS_OSD_XDR_H__
-#define __PNFS_OSD_XDR_H__
-
-#include <linux/nfs_fs.h>
-
-/*
- * draft-ietf-nfsv4-minorversion-22
- * draft-ietf-nfsv4-pnfs-obj-12
- */
-
-/* Layout Structure */
-
-enum pnfs_osd_raid_algorithm4 {
- PNFS_OSD_RAID_0 = 1,
- PNFS_OSD_RAID_4 = 2,
- PNFS_OSD_RAID_5 = 3,
- PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */
-};
-
-/* struct pnfs_osd_data_map4 {
- * uint32_t odm_num_comps;
- * length4 odm_stripe_unit;
- * uint32_t odm_group_width;
- * uint32_t odm_group_depth;
- * uint32_t odm_mirror_cnt;
- * pnfs_osd_raid_algorithm4 odm_raid_algorithm;
- * };
- */
-struct pnfs_osd_data_map {
- u32 odm_num_comps;
- u64 odm_stripe_unit;
- u32 odm_group_width;
- u32 odm_group_depth;
- u32 odm_mirror_cnt;
- u32 odm_raid_algorithm;
-};
-
-/* struct pnfs_osd_objid4 {
- * deviceid4 oid_device_id;
- * uint64_t oid_partition_id;
- * uint64_t oid_object_id;
- * };
- */
-struct pnfs_osd_objid {
- struct nfs4_deviceid oid_device_id;
- u64 oid_partition_id;
- u64 oid_object_id;
-};
-
-/* For printout. I use:
- * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer));
- * BE style
- */
-#define _DEVID_LO(oid_device_id) \
- (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data)
-
-#define _DEVID_HI(oid_device_id) \
- (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1)
-
-enum pnfs_osd_version {
- PNFS_OSD_MISSING = 0,
- PNFS_OSD_VERSION_1 = 1,
- PNFS_OSD_VERSION_2 = 2
-};
-
-struct pnfs_osd_opaque_cred {
- u32 cred_len;
- void *cred;
-};
-
-enum pnfs_osd_cap_key_sec {
- PNFS_OSD_CAP_KEY_SEC_NONE = 0,
- PNFS_OSD_CAP_KEY_SEC_SSV = 1,
-};
-
-/* struct pnfs_osd_object_cred4 {
- * pnfs_osd_objid4 oc_object_id;
- * pnfs_osd_version4 oc_osd_version;
- * pnfs_osd_cap_key_sec4 oc_cap_key_sec;
- * opaque oc_capability_key<>;
- * opaque oc_capability<>;
- * };
- */
-struct pnfs_osd_object_cred {
- struct pnfs_osd_objid oc_object_id;
- u32 oc_osd_version;
- u32 oc_cap_key_sec;
- struct pnfs_osd_opaque_cred oc_cap_key;
- struct pnfs_osd_opaque_cred oc_cap;
-};
-
-/* struct pnfs_osd_layout4 {
- * pnfs_osd_data_map4 olo_map;
- * uint32_t olo_comps_index;
- * pnfs_osd_object_cred4 olo_components<>;
- * };
- */
-struct pnfs_osd_layout {
- struct pnfs_osd_data_map olo_map;
- u32 olo_comps_index;
- u32 olo_num_comps;
- struct pnfs_osd_object_cred *olo_comps;
-};
-
-/* Device Address */
-enum pnfs_osd_targetid_type {
- OBJ_TARGET_ANON = 1,
- OBJ_TARGET_SCSI_NAME = 2,
- OBJ_TARGET_SCSI_DEVICE_ID = 3,
-};
-
-/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) {
- * case OBJ_TARGET_SCSI_NAME:
- * string oti_scsi_name<>;
- *
- * case OBJ_TARGET_SCSI_DEVICE_ID:
- * opaque oti_scsi_device_id<>;
- *
- * default:
- * void;
- * };
- *
- * union pnfs_osd_targetaddr4 switch (bool ota_available) {
- * case TRUE:
- * netaddr4 ota_netaddr;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_deviceaddr4 {
- * pnfs_osd_targetid4 oda_targetid;
- * pnfs_osd_targetaddr4 oda_targetaddr;
- * uint64_t oda_lun;
- * opaque oda_systemid<>;
- * pnfs_osd_object_cred4 oda_root_obj_cred;
- * opaque oda_osdname<>;
- * };
- */
-struct pnfs_osd_targetid {
- u32 oti_type;
- struct nfs4_string oti_scsi_device_id;
-};
-
-/* struct netaddr4 {
- * // see struct rpcb in RFC1833
- * string r_netid<>; // network id
- * string r_addr<>; // universal address
- * };
- */
-struct pnfs_osd_net_addr {
- struct nfs4_string r_netid;
- struct nfs4_string r_addr;
-};
-
-struct pnfs_osd_targetaddr {
- u32 ota_available;
- struct pnfs_osd_net_addr ota_netaddr;
-};
-
-struct pnfs_osd_deviceaddr {
- struct pnfs_osd_targetid oda_targetid;
- struct pnfs_osd_targetaddr oda_targetaddr;
- u8 oda_lun[8];
- struct nfs4_string oda_systemid;
- struct pnfs_osd_object_cred oda_root_obj_cred;
- struct nfs4_string oda_osdname;
-};
-
-/* LAYOUTCOMMIT: layoutupdate */
-
-/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) {
- * case TRUE:
- * int64_t dsu_delta;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_layoutupdate4 {
- * pnfs_osd_deltaspaceused4 olu_delta_space_used;
- * bool olu_ioerr_flag;
- * };
- */
-struct pnfs_osd_layoutupdate {
- u32 dsu_valid;
- s64 dsu_delta;
- u32 olu_ioerr_flag;
-};
-
-/* LAYOUTRETURN: I/O Rrror Report */
-
-enum pnfs_osd_errno {
- PNFS_OSD_ERR_EIO = 1,
- PNFS_OSD_ERR_NOT_FOUND = 2,
- PNFS_OSD_ERR_NO_SPACE = 3,
- PNFS_OSD_ERR_BAD_CRED = 4,
- PNFS_OSD_ERR_NO_ACCESS = 5,
- PNFS_OSD_ERR_UNREACHABLE = 6,
- PNFS_OSD_ERR_RESOURCE = 7
-};
-
-/* struct pnfs_osd_ioerr4 {
- * pnfs_osd_objid4 oer_component;
- * length4 oer_comp_offset;
- * length4 oer_comp_length;
- * bool oer_iswrite;
- * pnfs_osd_errno4 oer_errno;
- * };
- */
-struct pnfs_osd_ioerr {
- struct pnfs_osd_objid oer_component;
- u64 oer_comp_offset;
- u64 oer_comp_length;
- u32 oer_iswrite;
- u32 oer_errno;
-};
-
-/* OSD XDR Client API */
-/* Layout helpers */
-/* Layout decoding is done in two parts:
- * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part
- * of the layout. @iter members need not be initialized.
- * Returned:
- * @layout members are set. (@layout->olo_comps set to NULL).
- *
- * Zero on success, or negative error if passed xdr is broken.
- *
- * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns
- * false, to decode the next component.
- * Returned:
- * true if there is more to decode or false if we are done or error.
- *
- * Example:
- * struct pnfs_osd_xdr_decode_layout_iter iter;
- * struct pnfs_osd_layout layout;
- * struct pnfs_osd_object_cred comp;
- * int status;
- *
- * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
- * if (unlikely(status))
- * goto err;
- * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) {
- * // All of @comp strings point to inside the xdr_buffer
- * // or scrach buffer. Copy them out to user memory eg.
- * copy_single_comp(dest_comp++, &comp);
- * }
- * if (unlikely(status))
- * goto err;
- */
-
-struct pnfs_osd_xdr_decode_layout_iter {
- unsigned total_comps;
- unsigned decoded_comps;
-};
-
-extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr);
-
-extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
- int *err);
-
-/* Device Info helpers */
-
-/* Note: All strings inside @deviceaddr point to space inside @p.
- * @p should stay valid while @deviceaddr is in use.
- */
-extern void pnfs_osd_xdr_decode_deviceaddr(
- struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p);
-
-/* layoutupdate (layout_commit) xdr helpers */
-extern int
-pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
- struct pnfs_osd_layoutupdate *lou);
-
-/* osd_ioerror encoding (layout_return) */
-extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr);
-extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr);
-
-#endif /* __PNFS_OSD_XDR_H__ */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 3b12fd28af78..c2a7cfbca713 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -220,10 +220,8 @@ struct pnp_card {
#define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
#define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
#define to_pnp_card(n) container_of(n, struct pnp_card, dev)
-#define pnp_for_each_card(card) \
- for((card) = global_to_pnp_card(pnp_cards.next); \
- (card) != global_to_pnp_card(&pnp_cards); \
- (card) = global_to_pnp_card((card)->global_list.next))
+#define pnp_for_each_card(card) \
+ list_for_each_entry(card, &pnp_cards, global_list)
struct pnp_card_link {
struct pnp_card *card;
@@ -276,14 +274,9 @@ struct pnp_dev {
#define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
#define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
#define to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
-#define pnp_for_each_dev(dev) \
- for((dev) = global_to_pnp_dev(pnp_global.next); \
- (dev) != global_to_pnp_dev(&pnp_global); \
- (dev) = global_to_pnp_dev((dev)->global_list.next))
-#define card_for_each_dev(card,dev) \
- for((dev) = card_to_pnp_dev((card)->devices.next); \
- (dev) != card_to_pnp_dev(&(card)->devices); \
- (dev) = card_to_pnp_dev((dev)->card_list.next))
+#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list)
+#define card_for_each_dev(card, dev) \
+ list_for_each_entry(dev, &(card)->devices, card_list)
#define pnp_dev_name(dev) (dev)->name
static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
@@ -379,7 +372,7 @@ struct pnp_id {
};
struct pnp_driver {
- char *name;
+ const char *name;
const struct pnp_device_id *id_table;
unsigned int flags;
int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id);
@@ -437,14 +430,10 @@ struct pnp_protocol {
};
#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
-#define protocol_for_each_card(protocol,card) \
- for((card) = protocol_to_pnp_card((protocol)->cards.next); \
- (card) != protocol_to_pnp_card(&(protocol)->cards); \
- (card) = protocol_to_pnp_card((card)->protocol_list.next))
-#define protocol_for_each_dev(protocol,dev) \
- for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
- (dev) != protocol_to_pnp_dev(&(protocol)->devices); \
- (dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
+#define protocol_for_each_card(protocol, card) \
+ list_for_each_entry(card, &(protocol)->cards, protocol_list)
+#define protocol_for_each_dev(protocol, dev) \
+ list_for_each_entry(dev, &(protocol)->devices, protocol_list)
extern struct bus_type pnp_bus_type;
diff --git a/include/linux/poison.h b/include/linux/poison.h
index df34330b4e34..2d3249eb0e62 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -24,18 +24,10 @@
#define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
-/*
- * Magic number "tsta" to indicate a static timer initializer
- * for the object debugging code.
- */
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
/********** mm/page_poison.c **********/
-#ifdef CONFIG_PAGE_POISONING_ZERO
-#define PAGE_POISON 0x00
-#else
#define PAGE_POISON 0xaa
-#endif
/********** mm/page_alloc.c ************/
@@ -86,4 +78,10 @@
/********** security/ **********/
#define KEY_DESTROY 0xbd
+/********** net/core/page_pool.c **********/
+#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
+
+/********** kernel/bpf/ **********/
+#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 1cdc32b1f1b0..a9e0e1c2d1f2 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -8,12 +8,10 @@
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/fs.h>
-#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <uapi/linux/poll.h>
#include <uapi/linux/eventpoll.h>
-extern struct ctl_table epoll_table[]; /* for sysctl */
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
additional memory. */
#ifdef __clang__
diff --git a/include/linux/polynomial.h b/include/linux/polynomial.h
new file mode 100644
index 000000000000..9e074a0bb6fa
--- /dev/null
+++ b/include/linux/polynomial.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ */
+
+#ifndef _POLYNOMIAL_H
+#define _POLYNOMIAL_H
+
+/*
+ * struct polynomial_term - one term descriptor of a polynomial
+ * @deg: degree of the term.
+ * @coef: multiplication factor of the term.
+ * @divider: distributed divider per each degree.
+ * @divider_leftover: divider leftover, which couldn't be redistributed.
+ */
+struct polynomial_term {
+ unsigned int deg;
+ long coef;
+ long divider;
+ long divider_leftover;
+};
+
+/*
+ * struct polynomial - a polynomial descriptor
+ * @total_divider: total data divider.
+ * @terms: polynomial terms, last term must have degree of 0
+ */
+struct polynomial {
+ long total_divider;
+ struct polynomial_term terms[];
+};
+
+long polynomial_calc(const struct polynomial *poly, long data);
+
+#endif
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 3d10c84a97a9..2c6e99ca48af 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -69,7 +69,7 @@ static inline int clockid_to_fd(const clockid_t clk)
struct cpu_timer {
struct timerqueue_node node;
struct timerqueue_head *head;
- struct task_struct *task;
+ struct pid *pid;
struct list_head elist;
int firing;
};
@@ -81,12 +81,19 @@ static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
return timerqueue_add(head, &ctmr->node);
}
-static inline void cpu_timer_dequeue(struct cpu_timer *ctmr)
+static inline bool cpu_timer_queued(struct cpu_timer *ctmr)
{
- if (ctmr->head) {
+ return !!ctmr->head;
+}
+
+static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr)
+{
+ if (cpu_timer_queued(ctmr)) {
timerqueue_del(ctmr->head, &ctmr->node);
ctmr->head = NULL;
+ return true;
}
+ return false;
}
static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr)
@@ -125,6 +132,16 @@ struct posix_cputimers {
unsigned int expiry_active;
};
+/**
+ * posix_cputimers_work - Container for task work based posix CPU timer expiry
+ * @work: The task work to be scheduled
+ * @scheduled: @work has been scheduled already, no further processing
+ */
+struct posix_cputimers_work {
+ struct callback_head work;
+ unsigned int scheduled;
+};
+
static inline void posix_cputimers_init(struct posix_cputimers *pct)
{
memset(pct, 0, sizeof(*pct));
@@ -165,6 +182,14 @@ static inline void posix_cputimers_group_init(struct posix_cputimers *pct,
u64 cpu_limit) { }
#endif
+#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+void clear_posix_cputimers_work(struct task_struct *p);
+void posix_cputimers_init_work(void);
+#else
+static inline void clear_posix_cputimers_work(struct task_struct *p) { }
+static inline void posix_cputimers_init_work(void) { }
+#endif
+
#define REQUEUE_PENDING 1
/**
@@ -227,7 +252,7 @@ void posix_cpu_timers_exit_group(struct task_struct *task);
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
u64 *newval, u64 *oldval);
-void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
+int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
void posixtimer_rearm(struct kernel_siginfo *info);
#endif
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 540595a321a7..7d1e604c1325 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -15,6 +15,8 @@
#include <linux/refcount.h>
#include <uapi/linux/posix_acl.h>
+struct user_namespace;
+
struct posix_acl_entry {
short e_tag;
unsigned short e_perm;
@@ -28,7 +30,7 @@ struct posix_acl {
refcount_t a_refcount;
struct rcu_head a_rcu;
unsigned int a_count;
- struct posix_acl_entry a_entries[0];
+ struct posix_acl_entry a_entries[];
};
#define FOREACH_ACL_ENTRY(pa, acl, pe) \
@@ -61,30 +63,36 @@ posix_acl_release(struct posix_acl *acl)
extern void posix_acl_init(struct posix_acl *, int);
extern struct posix_acl *posix_acl_alloc(int, gfp_t);
-extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
-extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int);
-extern int set_posix_acl(struct inode *, int, struct posix_acl *);
+extern int set_posix_acl(struct user_namespace *, struct inode *, int,
+ struct posix_acl *);
+
+struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
+struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags);
#ifdef CONFIG_FS_POSIX_ACL
-extern int posix_acl_chmod(struct inode *, umode_t);
+int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t);
extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
struct posix_acl **);
-extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
+int posix_acl_update_mode(struct user_namespace *, struct inode *, umode_t *,
+ struct posix_acl **);
-extern int simple_set_acl(struct inode *, struct posix_acl *, int);
+extern int simple_set_acl(struct user_namespace *, struct inode *,
+ struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
struct posix_acl *get_cached_acl(struct inode *inode, int type);
-struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
void forget_cached_acl(struct inode *inode, int type);
void forget_all_cached_acls(struct inode *inode);
+int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
+int posix_acl_permission(struct user_namespace *, struct inode *,
+ const struct posix_acl *, int);
static inline void cache_no_acl(struct inode *inode)
{
@@ -92,7 +100,8 @@ static inline void cache_no_acl(struct inode *inode)
inode->i_default_acl = NULL;
}
#else
-static inline int posix_acl_chmod(struct inode *inode, umode_t mode)
+static inline int posix_acl_chmod(struct user_namespace *mnt_userns,
+ struct inode *inode, umode_t mode)
{
return 0;
}
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index 2387709991b5..8163dd48c430 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -35,6 +35,9 @@ posix_acl_xattr_count(size_t size)
#ifdef CONFIG_FS_POSIX_ACL
void posix_acl_fix_xattr_from_user(void *value, size_t size);
void posix_acl_fix_xattr_to_user(void *value, size_t size);
+void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
+ const struct inode *inode,
+ void *value, size_t size);
#else
static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
{
@@ -42,12 +45,21 @@ static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
static inline void posix_acl_fix_xattr_to_user(void *value, size_t size)
{
}
+static inline void
+posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
+ const struct inode *inode, void *value,
+ size_t size)
+{
+}
#endif
struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
const void *value, size_t size);
int posix_acl_to_xattr(struct user_namespace *user_ns,
const struct posix_acl *acl, void *buffer, size_t size);
+struct posix_acl *vfs_set_acl_prepare(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ const void *value, size_t size);
extern const struct xattr_handler posix_acl_access_xattr_handler;
extern const struct xattr_handler posix_acl_default_xattr_handler;
diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h
deleted file mode 100644
index 51976b52f373..000000000000
--- a/include/linux/power/ab8500.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson 2013
- * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
- */
-
-#ifndef PWR_AB8500_H
-#define PWR_AB8500_H
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[];
-extern const int ab8500_temp_tbl_a_size;
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[];
-extern const int ab8500_temp_tbl_b_size;
-
-#endif /* PWR_AB8500_H */
diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h
index 7a91b357e3ac..f3c267f2a467 100644
--- a/include/linux/power/bq2415x_charger.h
+++ b/include/linux/power/bq2415x_charger.h
@@ -2,7 +2,7 @@
/*
* bq2415x charger driver
*
- * Copyright (C) 2011-2013 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2011-2013 Pali Rohár <pali@kernel.org>
*/
#ifndef BQ2415X_CHARGER_H
@@ -14,8 +14,8 @@
* value is -1 then default chip value (specified in datasheet) will be
* used.
*
- * Value resistor_sense is needed for for configuring charge and
- * termination current. It it is less or equal to zero, configuring charge
+ * Value resistor_sense is needed for configuring charge and
+ * termination current. If it is less or equal to zero, configuring charge
* and termination current will not be possible.
*
* For automode support is needed to provide name of power supply device
diff --git a/include/linux/power/bq25890_charger.h b/include/linux/power/bq25890_charger.h
new file mode 100644
index 000000000000..c706ddb77a08
--- /dev/null
+++ b/include/linux/power/bq25890_charger.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Platform data for the TI bq25890 battery charger driver.
+ */
+
+#ifndef _BQ25890_CHARGER_H_
+#define _BQ25890_CHARGER_H_
+
+struct regulator_init_data;
+
+struct bq25890_platform_data {
+ const struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 507c5e214c42..a1aa68141d0b 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -30,6 +30,10 @@ enum bq27xxx_chip {
BQ27426,
BQ27441,
BQ27621,
+ BQ27Z561,
+ BQ28Z610,
+ BQ34Z100,
+ BQ78Z100,
};
struct bq27xxx_device_info;
@@ -50,7 +54,6 @@ struct bq27xxx_reg_cache {
int capacity;
int energy;
int flags;
- int power_avg;
int health;
};
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index ad19e68e1fc3..45e228b353ea 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -31,22 +31,16 @@ enum polling_modes {
CM_POLL_CHARGING_ONLY,
};
-enum cm_event_types {
- CM_EVENT_UNKNOWN = 0,
- CM_EVENT_BATT_FULL,
- CM_EVENT_BATT_IN,
- CM_EVENT_BATT_OUT,
- CM_EVENT_BATT_OVERHEAT,
- CM_EVENT_BATT_COLD,
- CM_EVENT_EXT_PWR_IN_OUT,
- CM_EVENT_CHG_START_STOP,
- CM_EVENT_OTHERS,
+enum cm_batt_temp {
+ CM_BATT_OK = 0,
+ CM_BATT_OVERHEAT,
+ CM_BATT_COLD,
};
/**
* struct charger_cable
* @extcon_name: the name of extcon device.
- * @name: the name of charger cable(external connector).
+ * @name: the name of the cable connector
* @extcon_dev: the extcon device.
* @wq: the workqueue to control charger according to the state of
* charger cable. If charger cable is attached, enable charger.
@@ -62,9 +56,10 @@ enum cm_event_types {
struct charger_cable {
const char *extcon_name;
const char *name;
+ struct extcon_dev *extcon_dev;
+ u64 extcon_type;
/* The charger-manager use Extcon framework */
- struct extcon_specific_cable_nb extcon_dev;
struct work_struct wq;
struct notifier_block nb;
@@ -131,11 +126,10 @@ struct charger_regulator {
* @psy_name: the name of power-supply-class for charger manager
* @polling_mode:
* Determine which polling mode will be used
- * @fullbatt_vchkdrop_ms:
* @fullbatt_vchkdrop_uV:
* Check voltage drop after the battery is fully charged.
- * If it has dropped more than fullbatt_vchkdrop_uV after
- * fullbatt_vchkdrop_ms, CM will restart charging.
+ * If it has dropped more than fullbatt_vchkdrop_uV
+ * CM will restart charging.
* @fullbatt_uV: voltage in microvolt
* If VBATT >= fullbatt_uV, it is assumed to be full.
* @fullbatt_soc: state of Charge in %
@@ -172,7 +166,6 @@ struct charger_desc {
enum polling_modes polling_mode;
unsigned int polling_interval_ms;
- unsigned int fullbatt_vchkdrop_ms;
unsigned int fullbatt_vchkdrop_uV;
unsigned int fullbatt_uV;
unsigned int fullbatt_soc;
@@ -211,9 +204,6 @@ struct charger_desc {
* @charger_stat: array of power_supply for chargers
* @tzd_batt : thermal zone device for battery
* @charger_enabled: the state of charger
- * @fullbatt_vchk_jiffies_at:
- * jiffies at the time full battery check will occur.
- * @fullbatt_vchk_work: work queue for full battery check
* @emergency_stop:
* When setting true, stop charging
* @psy_name_buf: the name of power-supply-class for charger manager
@@ -224,6 +214,7 @@ struct charger_desc {
* saved status of battery before entering suspend-to-RAM
* @charging_start_time: saved start time of enabling charging
* @charging_end_time: saved end time of disabling charging
+ * @battery_status: Current battery status
*/
struct charger_manager {
struct list_head entry;
@@ -235,9 +226,6 @@ struct charger_manager {
#endif
bool charger_enabled;
- unsigned long fullbatt_vchk_jiffies_at;
- struct delayed_work fullbatt_vchk_work;
-
int emergency_stop;
char psy_name_buf[PSY_NAME_MAX + 1];
@@ -246,13 +234,8 @@ struct charger_manager {
u64 charging_start_time;
u64 charging_end_time;
+
+ int battery_status;
};
-#ifdef CONFIG_CHARGER_MANAGER
-extern void cm_notify_event(struct power_supply *psy,
- enum cm_event_types type, char *msg);
-#else
-static inline void cm_notify_event(struct power_supply *psy,
- enum cm_event_types type, char *msg) { }
-#endif
#endif /* _CHARGER_MANAGER_H */
diff --git a/include/linux/power/generic-adc-battery.h b/include/linux/power/generic-adc-battery.h
index 40f9c7628f7b..c68cbf34cd34 100644
--- a/include/linux/power/generic-adc-battery.h
+++ b/include/linux/power/generic-adc-battery.h
@@ -11,16 +11,12 @@
* @battery_info: recommended structure to specify static power supply
* parameters
* @cal_charge: calculate charge level.
- * @gpio_charge_finished: gpio for the charger.
- * @gpio_inverted: Should be 1 if the GPIO is active low otherwise 0
* @jitter_delay: delay required after the interrupt to check battery
* status.Default set is 10ms.
*/
struct gab_platform_data {
struct power_supply_info battery_info;
int (*cal_charge)(long value);
- int gpio_charge_finished;
- bool gpio_inverted;
int jitter_delay;
};
diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h
index 5a5a8de98181..c0b7657ac1df 100644
--- a/include/linux/power/gpio-charger.h
+++ b/include/linux/power/gpio-charger.h
@@ -13,18 +13,12 @@
* struct gpio_charger_platform_data - platform_data for gpio_charger devices
* @name: Name for the chargers power_supply device
* @type: Type of the charger
- * @gpio: GPIO which is used to indicate the chargers status
- * @gpio_active_low: Should be set to 1 if the GPIO is active low otherwise 0
* @supplied_to: Array of battery names to which this chargers supplies power
* @num_supplicants: Number of entries in the supplied_to array
*/
struct gpio_charger_platform_data {
const char *name;
enum power_supply_type type;
-
- int gpio;
- int gpio_active_low;
-
char **supplied_to;
size_t num_supplicants;
};
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index d55c746ac56e..c417abd2ab70 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -69,7 +69,7 @@ enum max17042_register {
MAX17042_RelaxCFG = 0x2A,
MAX17042_MiscCFG = 0x2B,
MAX17042_TGAIN = 0x2C,
- MAx17042_TOFF = 0x2D,
+ MAX17042_TOFF = 0x2D,
MAX17042_CGAIN = 0x2E,
MAX17042_COFF = 0x2F,
@@ -78,7 +78,7 @@ enum max17042_register {
MAX17042_T_empty = 0x34,
MAX17042_FullCAP0 = 0x35,
- MAX17042_LAvg_empty = 0x36,
+ MAX17042_IAvg_empty = 0x36,
MAX17042_FCTC = 0x37,
MAX17042_RCOMP0 = 0x38,
MAX17042_TempCo = 0x39,
@@ -110,13 +110,14 @@ enum max17042_register {
MAX17042_VFSOC = 0xFF,
};
+/* Registers specific to max17055 only */
enum max17055_register {
MAX17055_QRes = 0x0C,
+ MAX17055_RCell = 0x14,
MAX17055_TTF = 0x20,
- MAX17055_V_empty = 0x3A,
- MAX17055_TIMER = 0x3E,
+ MAX17055_DieTemp = 0x34,
MAX17055_USER_MEM = 0x40,
- MAX17055_RGAIN = 0x42,
+ MAX17055_RGAIN = 0x43,
MAX17055_ConvgCfg = 0x49,
MAX17055_VFRemCap = 0x4A,
@@ -155,13 +156,14 @@ enum max17055_register {
MAX17055_AtAvCap = 0xDF,
};
-/* Registers specific to max17047/50 */
+/* Registers specific to max17047/50/55 */
enum max17047_register {
MAX17047_QRTbl00 = 0x12,
MAX17047_FullSOCThr = 0x13,
MAX17047_QRTbl10 = 0x22,
MAX17047_QRTbl20 = 0x32,
MAX17047_V_empty = 0x3A,
+ MAX17047_TIMER = 0x3E,
MAX17047_QRTbl30 = 0x42,
};
@@ -219,7 +221,7 @@ struct max17042_config_data {
u16 fullcap; /* 0x10 */
u16 fullcapnom; /* 0x23 */
u16 socempty; /* 0x33 */
- u16 lavg_empty; /* 0x36 */
+ u16 iavg_empty; /* 0x36 */
u16 dqacc; /* 0x45 */
u16 dpacc; /* 0x46 */
u16 qrtbl00; /* 0x12 */
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
deleted file mode 100644
index 02f94a1b323b..000000000000
--- a/include/linux/power/max8903_charger.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- */
-
-#ifndef __MAX8903_CHARGER_H__
-#define __MAX8903_CHARGER_H__
-
-struct max8903_pdata {
- /*
- * GPIOs
- * cen, chg, flt, dcm and usus are optional.
- * dok and uok are not optional depending on the status of
- * dc_valid and usb_valid.
- */
- int cen; /* Charger Enable input */
- int dok; /* DC(Adapter) Power OK output */
- int uok; /* USB Power OK output */
- int chg; /* Charger status output */
- int flt; /* Fault output */
- int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
- int usus; /* USB Suspend Input (1: suspended) */
-
- /*
- * DC(Adapter/TA) is wired
- * When dc_valid is true,
- * dok should be valid.
- *
- * At least one of dc_valid or usb_valid should be true.
- */
- bool dc_valid;
- /*
- * USB is wired
- * When usb_valid is true,
- * uok should be valid.
- */
- bool usb_valid;
-};
-
-#endif /* __MAX8903_CHARGER_H__ */
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index 971c9264179e..167b9b040091 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -155,6 +155,7 @@ struct omap_sr {
struct voltagedomain *voltdm;
struct dentry *dbg_dir;
unsigned int irq;
+ struct clk *fck;
int srid;
int ip_type;
int nvalue_count;
@@ -169,6 +170,7 @@ struct omap_sr {
u32 senp_mod;
u32 senn_mod;
void __iomem *base;
+ unsigned long enabled:1;
};
/**
diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h
deleted file mode 100644
index e0b687a4d20c..000000000000
--- a/include/linux/power/smb347-charger.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Summit Microelectronics SMB347 Battery Charger Driver
- *
- * Copyright (C) 2011, Intel Corporation
- *
- * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
- * Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#ifndef SMB347_CHARGER_H
-#define SMB347_CHARGER_H
-
-#include <linux/types.h>
-#include <linux/power_supply.h>
-
-enum {
- /* use the default compensation method */
- SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1,
-
- SMB347_SOFT_TEMP_COMPENSATE_NONE,
- SMB347_SOFT_TEMP_COMPENSATE_CURRENT,
- SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE,
-};
-
-/* Use default factory programmed value for hard/soft temperature limit */
-#define SMB347_TEMP_USE_DEFAULT -273
-
-/*
- * Charging enable can be controlled by software (via i2c) by
- * smb347-charger driver or by EN pin (active low/high).
- */
-enum smb347_chg_enable {
- SMB347_CHG_ENABLE_SW,
- SMB347_CHG_ENABLE_PIN_ACTIVE_LOW,
- SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH,
-};
-
-/**
- * struct smb347_charger_platform_data - platform data for SMB347 charger
- * @battery_info: Information about the battery
- * @max_charge_current: maximum current (in uA) the battery can be charged
- * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
- * @pre_charge_current: current (in uA) to use in pre-charging phase
- * @termination_current: current (in uA) used to determine when the
- * charging cycle terminates
- * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to
- * pre-charge to fast charge mode
- * @mains_current_limit: maximum input current drawn from AC/DC input (in uA)
- * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB
- * input
- * @chip_temp_threshold: die temperature where device starts limiting charge
- * current [%100 - %130] (in degree C)
- * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C),
- * granularity is 5 deg C.
- * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C),
- * granularity is 5 deg C.
- * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C),
- * granularity is 5 deg C.
- * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C),
- * granularity is 5 deg C.
- * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit
- * @soft_temp_limit_compensation: compensation method when soft temperature
- * limit is hit
- * @charge_current_compensation: current (in uA) for charging compensation
- * current when temperature hits soft limits
- * @use_mains: AC/DC input can be used
- * @use_usb: USB input can be used
- * @use_usb_otg: USB OTG output can be used (not implemented yet)
- * @irq_gpio: GPIO number used for interrupts (%-1 if not used)
- * @enable_control: how charging enable/disable is controlled
- * (driver/pin controls)
- *
- * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
- * hardware support for these. This is useful when we want to have for
- * example OTG charging controlled via OTG transceiver driver and not by
- * the SMB347 hardware.
- *
- * Hard and soft temperature limit values are given as described in the
- * device data sheet and assuming NTC beta value is %3750. Even if this is
- * not the case, these values should be used. They can be mapped to the
- * corresponding NTC beta values with the help of table %2 in the data
- * sheet. So for example if NTC beta is %3375 and we want to program hard
- * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50.
- *
- * If zero value is given in any of the current and voltage values, the
- * factory programmed default will be used. For soft/hard temperature
- * values, pass in %SMB347_TEMP_USE_DEFAULT instead.
- */
-struct smb347_charger_platform_data {
- struct power_supply_info battery_info;
- unsigned int max_charge_current;
- unsigned int max_charge_voltage;
- unsigned int pre_charge_current;
- unsigned int termination_current;
- unsigned int pre_to_fast_voltage;
- unsigned int mains_current_limit;
- unsigned int usb_hc_current_limit;
- unsigned int chip_temp_threshold;
- int soft_cold_temp_limit;
- int soft_hot_temp_limit;
- int hard_cold_temp_limit;
- int hard_hot_temp_limit;
- bool suspend_on_hard_temp_limit;
- unsigned int soft_temp_limit_compensation;
- unsigned int charge_current_compensation;
- bool use_mains;
- bool use_usb;
- bool use_usb_otg;
- int irq_gpio;
- enum smb347_chg_enable enable_control;
-};
-
-#endif /* SMB347_CHARGER_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index dcd5a71e6c67..aa2c4a7c4826 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -48,6 +48,8 @@ enum {
POWER_SUPPLY_CHARGE_TYPE_STANDARD, /* normal speed */
POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */
POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */
+ POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */
+ POWER_SUPPLY_CHARGE_TYPE_BYPASS, /* bypassing the charger */
};
enum {
@@ -61,6 +63,11 @@ enum {
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_OVERCURRENT,
+ POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED,
+ POWER_SUPPLY_HEALTH_WARM,
+ POWER_SUPPLY_HEALTH_COOL,
+ POWER_SUPPLY_HEALTH_HOT,
+ POWER_SUPPLY_HEALTH_NO_BATTERY,
};
enum {
@@ -127,6 +134,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD, /* in percents! */
POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD, /* in percents! */
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
POWER_SUPPLY_PROP_INPUT_POWER_LIMIT,
@@ -139,6 +147,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_MAX,
@@ -158,6 +167,9 @@ enum power_supply_property {
POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
POWER_SUPPLY_PROP_CALIBRATE,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -177,6 +189,7 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */
POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */
POWER_SUPPLY_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
+ POWER_SUPPLY_TYPE_WIRELESS, /* Wireless */
};
enum power_supply_usb_type {
@@ -192,6 +205,12 @@ enum power_supply_usb_type {
POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
};
+enum power_supply_charge_behaviour {
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE,
+};
+
enum power_supply_notifier_events {
PSY_EVENT_PROP_CHANGED,
};
@@ -223,9 +242,9 @@ struct power_supply_config {
struct power_supply_desc {
const char *name;
enum power_supply_type type;
- enum power_supply_usb_type *usb_types;
+ const enum power_supply_usb_type *usb_types;
size_t num_usb_types;
- enum power_supply_property *properties;
+ const enum power_supply_property *properties;
size_t num_properties;
/*
@@ -330,39 +349,434 @@ struct power_supply_resistance_temp_table {
int resistance; /* internal resistance percent */
};
+struct power_supply_vbat_ri_table {
+ int vbat_uv; /* Battery voltage in microvolt */
+ int ri_uohm; /* Internal resistance in microohm */
+};
+
+/**
+ * struct power_supply_maintenance_charge_table - setting for maintenace charging
+ * @charge_current_max_ua: maintenance charging current that is used to keep
+ * the charge of the battery full as current is consumed after full charging.
+ * The corresponding charge_voltage_max_uv is used as a safeguard: when we
+ * reach this voltage the maintenance charging current is turned off. It is
+ * turned back on if we fall below this voltage.
+ * @charge_voltage_max_uv: maintenance charging voltage that is usually a bit
+ * lower than the constant_charge_voltage_max_uv. We can apply this settings
+ * charge_current_max_ua until we get back up to this voltage.
+ * @safety_timer_minutes: maintenance charging safety timer, with an expiry
+ * time in minutes. We will only use maintenance charging in this setting
+ * for a certain amount of time, then we will first move to the next
+ * maintenance charge current and voltage pair in respective array and wait
+ * for the next safety timer timeout, or, if we reached the last maintencance
+ * charging setting, disable charging until we reach
+ * charge_restart_voltage_uv and restart ordinary CC/CV charging from there.
+ * These timers should be chosen to align with the typical discharge curve
+ * for the battery.
+ *
+ * Ordinary CC/CV charging will stop charging when the charge current goes
+ * below charge_term_current_ua, and then restart it (if the device is still
+ * plugged into the charger) at charge_restart_voltage_uv. This happens in most
+ * consumer products because the power usage while connected to a charger is
+ * not zero, and devices are not manufactured to draw power directly from the
+ * charger: instead they will at all times dissipate the battery a little, like
+ * the power used in standby mode. This will over time give a charge graph
+ * such as this:
+ *
+ * Energy
+ * ^ ... ... ... ... ... ... ...
+ * | . . . . . . . . . . . . .
+ * | .. . .. . .. . .. . .. . .. . ..
+ * |. .. .. .. .. .. ..
+ * +-------------------------------------------------------------------> t
+ *
+ * Practically this means that the Li-ions are wandering back and forth in the
+ * battery and this causes degeneration of the battery anode and cathode.
+ * To prolong the life of the battery, maintenance charging is applied after
+ * reaching charge_term_current_ua to hold up the charge in the battery while
+ * consuming power, thus lowering the wear on the battery:
+ *
+ * Energy
+ * ^ .......................................
+ * | . ......................
+ * | ..
+ * |.
+ * +-------------------------------------------------------------------> t
+ *
+ * Maintenance charging uses the voltages from this table: a table of settings
+ * is traversed using a slightly lower current and voltage than what is used for
+ * CC/CV charging. The maintenance charging will for safety reasons not go on
+ * indefinately: we lower the current and voltage with successive maintenance
+ * settings, then disable charging completely after we reach the last one,
+ * and after that we do not restart charging until we reach
+ * charge_restart_voltage_uv (see struct power_supply_battery_info) and restart
+ * ordinary CC/CV charging from there.
+ *
+ * As an example, a Samsung EB425161LA Lithium-Ion battery is CC/CV charged
+ * at 900mA to 4340mV, then maintenance charged at 600mA and 4150mV for up to
+ * 60 hours, then maintenance charged at 600mA and 4100mV for up to 200 hours.
+ * After this the charge cycle is restarted waiting for
+ * charge_restart_voltage_uv.
+ *
+ * For most mobile electronics this type of maintenance charging is enough for
+ * the user to disconnect the device and make use of it before both maintenance
+ * charging cycles are complete, if the current and voltage has been chosen
+ * appropriately. These need to be determined from battery discharge curves
+ * and expected standby current.
+ *
+ * If the voltage anyway drops to charge_restart_voltage_uv during maintenance
+ * charging, ordinary CC/CV charging is restarted. This can happen if the
+ * device is e.g. actively used during charging, so more current is drawn than
+ * the expected stand-by current. Also overvoltage protection will be applied
+ * as usual.
+ */
+struct power_supply_maintenance_charge_table {
+ int charge_current_max_ua;
+ int charge_voltage_max_uv;
+ int charge_safety_timer_minutes;
+};
+
#define POWER_SUPPLY_OCV_TEMP_MAX 20
-/*
+/**
+ * struct power_supply_battery_info - information about batteries
+ * @technology: from the POWER_SUPPLY_TECHNOLOGY_* enum
+ * @energy_full_design_uwh: energy content when fully charged in microwatt
+ * hours
+ * @charge_full_design_uah: charge content when fully charged in microampere
+ * hours
+ * @voltage_min_design_uv: minimum voltage across the poles when the battery
+ * is at minimum voltage level in microvolts. If the voltage drops below this
+ * level the battery will need precharging when using CC/CV charging.
+ * @voltage_max_design_uv: voltage across the poles when the battery is fully
+ * charged in microvolts. This is the "nominal voltage" i.e. the voltage
+ * printed on the label of the battery.
+ * @tricklecharge_current_ua: the tricklecharge current used when trickle
+ * charging the battery in microamperes. This is the charging phase when the
+ * battery is completely empty and we need to carefully trickle in some
+ * charge until we reach the precharging voltage.
+ * @precharge_current_ua: current to use in the precharge phase in microamperes,
+ * the precharge rate is limited by limiting the current to this value.
+ * @precharge_voltage_max_uv: the maximum voltage allowed when precharging in
+ * microvolts. When we pass this voltage we will nominally switch over to the
+ * CC (constant current) charging phase defined by constant_charge_current_ua
+ * and constant_charge_voltage_max_uv.
+ * @charge_term_current_ua: when the current in the CV (constant voltage)
+ * charging phase drops below this value in microamperes the charging will
+ * terminate completely and not restart until the voltage over the battery
+ * poles reach charge_restart_voltage_uv unless we use maintenance charging.
+ * @charge_restart_voltage_uv: when the battery has been fully charged by
+ * CC/CV charging and charging has been disabled, and the voltage subsequently
+ * drops below this value in microvolts, the charging will be restarted
+ * (typically using CV charging).
+ * @overvoltage_limit_uv: If the voltage exceeds the nominal voltage
+ * voltage_max_design_uv and we reach this voltage level, all charging must
+ * stop and emergency procedures take place, such as shutting down the system
+ * in some cases.
+ * @constant_charge_current_max_ua: current in microamperes to use in the CC
+ * (constant current) charging phase. The charging rate is limited
+ * by this current. This is the main charging phase and as the current is
+ * constant into the battery the voltage slowly ascends to
+ * constant_charge_voltage_max_uv.
+ * @constant_charge_voltage_max_uv: voltage in microvolts signifying the end of
+ * the CC (constant current) charging phase and the beginning of the CV
+ * (constant voltage) charging phase.
+ * @maintenance_charge: an array of maintenance charging settings to be used
+ * after the main CC/CV charging phase is complete.
+ * @maintenance_charge_size: the number of maintenance charging settings in
+ * maintenance_charge.
+ * @alert_low_temp_charge_current_ua: The charging current to use if the battery
+ * enters low alert temperature, i.e. if the internal temperature is between
+ * temp_alert_min and temp_min. No matter the charging phase, this
+ * and alert_high_temp_charge_voltage_uv will be applied.
+ * @alert_low_temp_charge_voltage_uv: Same as alert_low_temp_charge_current_ua,
+ * but for the charging voltage.
+ * @alert_high_temp_charge_current_ua: The charging current to use if the
+ * battery enters high alert temperature, i.e. if the internal temperature is
+ * between temp_alert_max and temp_max. No matter the charging phase, this
+ * and alert_high_temp_charge_voltage_uv will be applied, usually lowering
+ * the charging current as an evasive manouver.
+ * @alert_high_temp_charge_voltage_uv: Same as
+ * alert_high_temp_charge_current_ua, but for the charging voltage.
+ * @factory_internal_resistance_uohm: the internal resistance of the battery
+ * at fabrication time, expressed in microohms. This resistance will vary
+ * depending on the lifetime and charge of the battery, so this is just a
+ * nominal ballpark figure. This internal resistance is given for the state
+ * when the battery is discharging.
+ * @factory_internal_resistance_charging_uohm: the internal resistance of the
+ * battery at fabrication time while charging, expressed in microohms.
+ * The charging process will affect the internal resistance of the battery
+ * so this value provides a better resistance under these circumstances.
+ * This resistance will vary depending on the lifetime and charge of the
+ * battery, so this is just a nominal ballpark figure.
+ * @ocv_temp: array indicating the open circuit voltage (OCV) capacity
+ * temperature indices. This is an array of temperatures in degrees Celsius
+ * indicating which capacity table to use for a certain temperature, since
+ * the capacity for reasons of chemistry will be different at different
+ * temperatures. Determining capacity is a multivariate problem and the
+ * temperature is the first variable we determine.
+ * @temp_ambient_alert_min: the battery will go outside of operating conditions
+ * when the ambient temperature goes below this temperature in degrees
+ * Celsius.
+ * @temp_ambient_alert_max: the battery will go outside of operating conditions
+ * when the ambient temperature goes above this temperature in degrees
+ * Celsius.
+ * @temp_alert_min: the battery should issue an alert if the internal
+ * temperature goes below this temperature in degrees Celsius.
+ * @temp_alert_max: the battery should issue an alert if the internal
+ * temperature goes above this temperature in degrees Celsius.
+ * @temp_min: the battery will go outside of operating conditions when
+ * the internal temperature goes below this temperature in degrees Celsius.
+ * Normally this means the system should shut down.
+ * @temp_max: the battery will go outside of operating conditions when
+ * the internal temperature goes above this temperature in degrees Celsius.
+ * Normally this means the system should shut down.
+ * @ocv_table: for each entry in ocv_temp there is a corresponding entry in
+ * ocv_table and a size for each entry in ocv_table_size. These arrays
+ * determine the capacity in percent in relation to the voltage in microvolts
+ * at the indexed temperature.
+ * @ocv_table_size: for each entry in ocv_temp this array is giving the size of
+ * each entry in the array of capacity arrays in ocv_table.
+ * @resist_table: this is a table that correlates a battery temperature to the
+ * expected internal resistance at this temperature. The resistance is given
+ * as a percentage of factory_internal_resistance_uohm. Knowing the
+ * resistance of the battery is usually necessary for calculating the open
+ * circuit voltage (OCV) that is then used with the ocv_table to calculate
+ * the capacity of the battery. The resist_table must be ordered descending
+ * by temperature: highest temperature with lowest resistance first, lowest
+ * temperature with highest resistance last.
+ * @resist_table_size: the number of items in the resist_table.
+ * @vbat2ri_discharging: this is a table that correlates Battery voltage (VBAT)
+ * to internal resistance (Ri). The resistance is given in microohm for the
+ * corresponding voltage in microvolts. The internal resistance is used to
+ * determine the open circuit voltage so that we can determine the capacity
+ * of the battery. These voltages to resistance tables apply when the battery
+ * is discharging. The table must be ordered descending by voltage: highest
+ * voltage first.
+ * @vbat2ri_discharging_size: the number of items in the vbat2ri_discharging
+ * table.
+ * @vbat2ri_charging: same function as vbat2ri_discharging but for the state
+ * when the battery is charging. Being under charge changes the battery's
+ * internal resistance characteristics so a separate table is needed.*
+ * The table must be ordered descending by voltage: highest voltage first.
+ * @vbat2ri_charging_size: the number of items in the vbat2ri_charging
+ * table.
+ * @bti_resistance_ohm: The Battery Type Indicator (BIT) nominal resistance
+ * in ohms for this battery, if an identification resistor is mounted
+ * between a third battery terminal and ground. This scheme is used by a lot
+ * of mobile device batteries.
+ * @bti_resistance_tolerance: The tolerance in percent of the BTI resistance,
+ * for example 10 for +/- 10%, if the bti_resistance is set to 7000 and the
+ * tolerance is 10% we will detect a proper battery if the BTI resistance
+ * is between 6300 and 7700 Ohm.
+ *
* This is the recommended struct to manage static battery parameters,
* populated by power_supply_get_battery_info(). Most platform drivers should
* use these for consistency.
+ *
* Its field names must correspond to elements in enum power_supply_property.
- * The default field value is -EINVAL.
- * Power supply class itself doesn't use this.
+ * The default field value is -EINVAL or NULL for pointers.
+ *
+ * CC/CV CHARGING:
+ *
+ * The charging parameters here assume a CC/CV charging scheme. This method
+ * is most common with Lithium Ion batteries (other methods are possible) and
+ * looks as follows:
+ *
+ * ^ Battery voltage
+ * | --- overvoltage_limit_uv
+ * |
+ * | ...................................................
+ * | .. constant_charge_voltage_max_uv
+ * | ..
+ * | .
+ * | .
+ * | .
+ * | .
+ * | .
+ * | .. precharge_voltage_max_uv
+ * | ..
+ * |. (trickle charging)
+ * +------------------------------------------------------------------> time
+ *
+ * ^ Current into the battery
+ * |
+ * | ............. constant_charge_current_max_ua
+ * | . .
+ * | . .
+ * | . .
+ * | . .
+ * | . ..
+ * | . ....
+ * | . .....
+ * | ... precharge_current_ua ....... charge_term_current_ua
+ * | . .
+ * | . .
+ * |.... tricklecharge_current_ua .
+ * | .
+ * +-----------------------------------------------------------------> time
+ *
+ * These diagrams are synchronized on time and the voltage and current
+ * follow each other.
+ *
+ * With CC/CV charging commence over time like this for an empty battery:
+ *
+ * 1. When the battery is completely empty it may need to be charged with
+ * an especially small current so that electrons just "trickle in",
+ * this is the tricklecharge_current_ua.
+ *
+ * 2. Next a small initial pre-charge current (precharge_current_ua)
+ * is applied if the voltage is below precharge_voltage_max_uv until we
+ * reach precharge_voltage_max_uv. CAUTION: in some texts this is referred
+ * to as "trickle charging" but the use in the Linux kernel is different
+ * see below!
+ *
+ * 3. Then the main charging current is applied, which is called the constant
+ * current (CC) phase. A current regulator is set up to allow
+ * constant_charge_current_max_ua of current to flow into the battery.
+ * The chemical reaction in the battery will make the voltage go up as
+ * charge goes into the battery. This current is applied until we reach
+ * the constant_charge_voltage_max_uv voltage.
+ *
+ * 4. At this voltage we switch over to the constant voltage (CV) phase. This
+ * means we allow current to go into the battery, but we keep the voltage
+ * fixed. This current will continue to charge the battery while keeping
+ * the voltage the same. A chemical reaction in the battery goes on
+ * storing energy without affecting the voltage. Over time the current
+ * will slowly drop and when we reach charge_term_current_ua we will
+ * end the constant voltage phase.
+ *
+ * After this the battery is fully charged, and if we do not support maintenance
+ * charging, the charging will not restart until power dissipation makes the
+ * voltage fall so that we reach charge_restart_voltage_uv and at this point
+ * we restart charging at the appropriate phase, usually this will be inside
+ * the CV phase.
+ *
+ * If we support maintenance charging the voltage is however kept high after
+ * the CV phase with a very low current. This is meant to let the same charge
+ * go in for usage while the charger is still connected, mainly for
+ * dissipation for the power consuming entity while connected to the
+ * charger.
+ *
+ * All charging MUST terminate if the overvoltage_limit_uv is ever reached.
+ * Overcharging Lithium Ion cells can be DANGEROUS and lead to fire or
+ * explosions.
+ *
+ * DETERMINING BATTERY CAPACITY:
+ *
+ * Several members of the struct deal with trying to determine the remaining
+ * capacity in the battery, usually as a percentage of charge. In practice
+ * many chargers uses a so-called fuel gauge or coloumb counter that measure
+ * how much charge goes into the battery and how much goes out (+/- leak
+ * consumption). This does not help if we do not know how much capacity the
+ * battery has to begin with, such as when it is first used or was taken out
+ * and charged in a separate charger. Therefore many capacity algorithms use
+ * the open circuit voltage with a look-up table to determine the rough
+ * capacity of the battery. The open circuit voltage can be conceptualized
+ * with an ideal voltage source (V) in series with an internal resistance (Ri)
+ * like this:
+ *
+ * +-------> IBAT >----------------+
+ * | ^ |
+ * [ ] Ri | |
+ * | | VBAT |
+ * o <---------- | |
+ * +| ^ | [ ] Rload
+ * .---. | | |
+ * | V | | OCV | |
+ * '---' | | |
+ * | | | |
+ * GND +-------------------------------+
+ *
+ * If we disconnect the load (here simplified as a fixed resistance Rload)
+ * and measure VBAT with a infinite impedance voltage meter we will get
+ * VBAT = OCV and this assumption is sometimes made even under load, assuming
+ * Rload is insignificant. However this will be of dubious quality because the
+ * load is rarely that small and Ri is strongly nonlinear depending on
+ * temperature and how much capacity is left in the battery due to the
+ * chemistry involved.
+ *
+ * In many practical applications we cannot just disconnect the battery from
+ * the load, so instead we often try to measure the instantaneous IBAT (the
+ * current out from the battery), estimate the Ri and thus calculate the
+ * voltage drop over Ri and compensate like this:
+ *
+ * OCV = VBAT - (IBAT * Ri)
+ *
+ * The tables vbat2ri_discharging and vbat2ri_charging are used to determine
+ * (by interpolation) the Ri from the VBAT under load. These curves are highly
+ * nonlinear and may need many datapoints but can be found in datasheets for
+ * some batteries. This gives the compensated open circuit voltage (OCV) for
+ * the battery even under load. Using this method will also compensate for
+ * temperature changes in the environment: this will also make the internal
+ * resistance change, and it will affect the VBAT under load, so correlating
+ * VBAT to Ri takes both remaining capacity and temperature into consideration.
+ *
+ * Alternatively a manufacturer can specify how the capacity of the battery
+ * is dependent on the battery temperature which is the main factor affecting
+ * Ri. As we know all checmical reactions are faster when it is warm and slower
+ * when it is cold. You can put in 1500mAh and only get 800mAh out before the
+ * voltage drops too low for example. This effect is also highly nonlinear and
+ * the purpose of the table resist_table: this will take a temperature and
+ * tell us how big percentage of Ri the specified temperature correlates to.
+ * Usually we have 100% of the factory_internal_resistance_uohm at 25 degrees
+ * Celsius.
+ *
+ * The power supply class itself doesn't use this struct as of now.
*/
struct power_supply_battery_info {
- int energy_full_design_uwh; /* microWatt-hours */
- int charge_full_design_uah; /* microAmp-hours */
- int voltage_min_design_uv; /* microVolts */
- int voltage_max_design_uv; /* microVolts */
- int precharge_current_ua; /* microAmps */
- int charge_term_current_ua; /* microAmps */
- int constant_charge_current_max_ua; /* microAmps */
- int constant_charge_voltage_max_uv; /* microVolts */
- int factory_internal_resistance_uohm; /* microOhms */
- int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */
+ unsigned int technology;
+ int energy_full_design_uwh;
+ int charge_full_design_uah;
+ int voltage_min_design_uv;
+ int voltage_max_design_uv;
+ int tricklecharge_current_ua;
+ int precharge_current_ua;
+ int precharge_voltage_max_uv;
+ int charge_term_current_ua;
+ int charge_restart_voltage_uv;
+ int overvoltage_limit_uv;
+ int constant_charge_current_max_ua;
+ int constant_charge_voltage_max_uv;
+ struct power_supply_maintenance_charge_table *maintenance_charge;
+ int maintenance_charge_size;
+ int alert_low_temp_charge_current_ua;
+ int alert_low_temp_charge_voltage_uv;
+ int alert_high_temp_charge_current_ua;
+ int alert_high_temp_charge_voltage_uv;
+ int factory_internal_resistance_uohm;
+ int factory_internal_resistance_charging_uohm;
+ int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];
+ int temp_ambient_alert_min;
+ int temp_ambient_alert_max;
+ int temp_alert_min;
+ int temp_alert_max;
+ int temp_min;
+ int temp_max;
struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
struct power_supply_resistance_temp_table *resist_table;
int resist_table_size;
+ struct power_supply_vbat_ri_table *vbat2ri_discharging;
+ int vbat2ri_discharging_size;
+ struct power_supply_vbat_ri_table *vbat2ri_charging;
+ int vbat2ri_charging_size;
+ int bti_resistance_ohm;
+ int bti_resistance_tolerance;
};
extern struct atomic_notifier_head power_supply_notifier;
extern int power_supply_reg_notifier(struct notifier_block *nb);
extern void power_supply_unreg_notifier(struct notifier_block *nb);
+#if IS_ENABLED(CONFIG_POWER_SUPPLY)
extern struct power_supply *power_supply_get_by_name(const char *name);
extern void power_supply_put(struct power_supply *psy);
+#else
+static inline void power_supply_put(struct power_supply *psy) {}
+static inline struct power_supply *power_supply_get_by_name(const char *name)
+{ return NULL; }
+#endif
#ifdef CONFIG_OF
extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
const char *property);
@@ -378,7 +792,7 @@ devm_power_supply_get_by_phandle(struct device *dev, const char *property)
#endif /* CONFIG_OF */
extern int power_supply_get_battery_info(struct power_supply *psy,
- struct power_supply_battery_info *info);
+ struct power_supply_battery_info **info_out);
extern void power_supply_put_battery_info(struct power_supply *psy,
struct power_supply_battery_info *info);
extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
@@ -391,12 +805,43 @@ extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
extern int
power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
int table_len, int temp);
+extern int power_supply_vbat2ri(struct power_supply_battery_info *info,
+ int vbat_uv, bool charging);
+extern struct power_supply_maintenance_charge_table *
+power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index);
+extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info,
+ int resistance);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
-extern int power_supply_set_input_current_limit_from_supplier(
- struct power_supply *psy);
+int power_supply_get_property_from_supplier(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
extern int power_supply_set_battery_charged(struct power_supply *psy);
+static inline bool
+power_supply_supports_maintenance_charging(struct power_supply_battery_info *info)
+{
+ struct power_supply_maintenance_charge_table *mt;
+
+ mt = power_supply_get_maintenance_charging_setting(info, 0);
+
+ return (mt != NULL);
+}
+
+static inline bool
+power_supply_supports_vbat2ri(struct power_supply_battery_info *info)
+{
+ return ((info->vbat2ri_discharging != NULL) &&
+ info->vbat2ri_discharging_size > 0);
+}
+
+static inline bool
+power_supply_supports_temp2ri(struct power_supply_battery_info *info)
+{
+ return ((info->resist_table != NULL) &&
+ info->resist_table_size > 0);
+}
+
#ifdef CONFIG_POWER_SUPPLY
extern int power_supply_is_system_supplied(void);
#else
@@ -406,9 +851,16 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
extern int power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val);
+#if IS_ENABLED(CONFIG_POWER_SUPPLY)
extern int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+#else
+static inline int power_supply_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{ return 0; }
+#endif
extern int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp);
extern void power_supply_external_power_changed(struct power_supply *psy);
@@ -456,12 +908,12 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_BOOT:
- return 1;
+ return true;
default:
break;
}
- return 0;
+ return false;
}
static inline bool power_supply_is_watt_property(enum power_supply_property psp)
@@ -484,12 +936,12 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
case POWER_SUPPLY_PROP_POWER_NOW:
- return 1;
+ return true;
default:
break;
}
- return 0;
+ return false;
}
#ifdef CONFIG_POWER_SUPPLY_HWMON
@@ -505,4 +957,28 @@ static inline
void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {}
#endif
+#ifdef CONFIG_SYSFS
+ssize_t power_supply_charge_behaviour_show(struct device *dev,
+ unsigned int available_behaviours,
+ enum power_supply_charge_behaviour behaviour,
+ char *buf);
+
+int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const char *buf);
+#else
+static inline
+ssize_t power_supply_charge_behaviour_show(struct device *dev,
+ unsigned int available_behaviours,
+ enum power_supply_charge_behaviour behaviour,
+ char *buf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int power_supply_charge_behaviour_parse(unsigned int available_behaviours,
+ const char *buf)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#endif /* __LINUX_POWER_SUPPLY_H__ */
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
index 4537f57f9e42..3d557bbcd2c7 100644
--- a/include/linux/powercap.h
+++ b/include/linux/powercap.h
@@ -44,19 +44,18 @@ struct powercap_control_type_ops {
};
/**
- * struct powercap_control_type- Defines a powercap control_type
- * @name: name of control_type
+ * struct powercap_control_type - Defines a powercap control_type
* @dev: device for this control_type
* @idr: idr to have unique id for its child
- * @root_node: Root holding power zones for this control_type
+ * @nr_zones: counter for number of zones of this type
* @ops: Pointer to callback struct
- * @node_lock: mutex for control type
+ * @lock: mutex for control type
* @allocated: This is possible that client owns the memory
* used by this structure. In this case
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @ctrl_inst: link to the control_type list
+ * @node: linked-list node
*
* Defines powercap control_type. This acts as a container for power
* zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
@@ -129,7 +128,7 @@ struct powercap_zone_ops {
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @constraint_ptr: List of constraints for this zone.
+ * @constraints: List of constraints for this zone.
*
* This defines a power zone instance. The fields of this structure are
* private, and should not be used by client drivers.
diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
index 9d3ffc8f5ea6..fb847e47f148 100644
--- a/include/linux/ppp-comp.h
+++ b/include/linux/ppp-comp.h
@@ -9,7 +9,7 @@
#include <uapi/linux/ppp-comp.h>
-
+struct compstat;
struct module;
/*
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index 98966064ee68..45e6e427ceb8 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -20,6 +20,8 @@
#include <linux/poll.h>
#include <net/net_namespace.h>
+struct net_device_path;
+struct net_device_path_ctx;
struct ppp_channel;
struct ppp_channel_ops {
@@ -28,6 +30,9 @@ struct ppp_channel_ops {
int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
/* Handle an ioctl call that has come in via /dev/ppp. */
int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
+ int (*fill_forward_path)(struct net_device_path_ctx *,
+ struct net_device_path *,
+ const struct ppp_channel *);
};
struct ppp_channel {
diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h
index 9d2b388fae1a..b7e57fdbd413 100644
--- a/include/linux/ppp_defs.h
+++ b/include/linux/ppp_defs.h
@@ -11,4 +11,18 @@
#include <uapi/linux/ppp_defs.h>
#define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c)
+
+/**
+ * ppp_proto_is_valid - checks if PPP protocol is valid
+ * @proto: PPP protocol
+ *
+ * Assumes proto is not compressed.
+ * Protocol is valid if the value is odd and the least significant bit of the
+ * most significant octet is 0 (see RFC 1661, section 2).
+ */
+static inline bool ppp_proto_is_valid(u16 proto)
+{
+ return !!((proto & 0x0101) == 0x0001);
+}
+
#endif /* _PPP_DEFS_H_ */
diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h
deleted file mode 100644
index 7bf49908be06..000000000000
--- a/include/linux/pps-gpio.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * pps-gpio.h -- PPS client for GPIOs
- *
- * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca>
- */
-
-#ifndef _PPS_GPIO_H
-#define _PPS_GPIO_H
-
-struct pps_gpio_platform_data {
- struct gpio_desc *gpio_pin;
- struct gpio_desc *echo_pin;
- bool assert_falling_edge;
- bool capture_clear;
- unsigned int echo_active_ms;
-};
-
-#endif /* _PPS_GPIO_H */
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
new file mode 100644
index 000000000000..e0a0759dd09c
--- /dev/null
+++ b/include/linux/prandom.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/prandom.h
+ *
+ * Include file for the fast pseudo-random 32-bit
+ * generation.
+ */
+#ifndef _LINUX_PRANDOM_H
+#define _LINUX_PRANDOM_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/random.h>
+
+struct rnd_state {
+ __u32 s1, s2, s3, s4;
+};
+
+u32 prandom_u32_state(struct rnd_state *state);
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+
+#define prandom_init_once(pcpu_state) \
+ DO_ONCE(prandom_seed_full_state, (pcpu_state))
+
+/**
+ * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
+ * @ep_ro: right open interval endpoint
+ *
+ * Returns a pseudo-random number that is in interval [0, ep_ro). This is
+ * useful when requesting a random index of an array containing ep_ro elements,
+ * for example. The result is somewhat biased when ep_ro is not a power of 2,
+ * so do not use this for cryptographic purposes.
+ *
+ * Returns: pseudo-random number in interval [0, ep_ro)
+ */
+static inline u32 prandom_u32_max(u32 ep_ro)
+{
+ if (__builtin_constant_p(ep_ro <= 1U << 8) && ep_ro <= 1U << 8)
+ return (get_random_u8() * ep_ro) >> 8;
+ if (__builtin_constant_p(ep_ro <= 1U << 16) && ep_ro <= 1U << 16)
+ return (get_random_u16() * ep_ro) >> 16;
+ return ((u64)get_random_u32() * ep_ro) >> 32;
+}
+
+/*
+ * Handle minimum values for seeds
+ */
+static inline u32 __seed(u32 x, u32 m)
+{
+ return (x < m) ? x + m : x;
+}
+
+/**
+ * prandom_seed_state - set seed for prandom_u32_state().
+ * @state: pointer to state structure to receive the seed.
+ * @seed: arbitrary 64-bit value to use as a seed.
+ */
+static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+{
+ u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
+
+ state->s1 = __seed(i, 2U);
+ state->s2 = __seed(i, 8U);
+ state->s3 = __seed(i, 16U);
+ state->s4 = __seed(i, 128U);
+}
+
+/* Pseudo random number generator from numerical recipes. */
+static inline u32 next_pseudo_random32(u32 seed)
+{
+ return seed * 1664525 + 1013904223;
+}
+
+#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index bbb68dba37cc..0df425bf9bd7 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -26,13 +26,13 @@
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000
- * NMI_MASK: 0x00100000
+ * NMI_MASK: 0x00f00000
* PREEMPT_NEED_RESCHED: 0x80000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4
-#define NMI_BITS 1
+#define NMI_BITS 4
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
@@ -77,31 +77,58 @@
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+/**
+ * interrupt_context_level - return interrupt context level
+ *
+ * Returns the current interrupt context level.
+ * 0 - normal context
+ * 1 - softirq context
+ * 2 - hardirq context
+ * 3 - NMI context
+ */
+static __always_inline unsigned char interrupt_context_level(void)
+{
+ unsigned long pc = preempt_count();
+ unsigned char level = 0;
+
+ level += !!(pc & (NMI_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+ return level;
+}
+
+#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
+#ifdef CONFIG_PREEMPT_RT
+# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
+#else
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#endif
+#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
/*
- * Are we doing bottom half or hardware interrupt processing?
+ * Macros to retrieve the current execution context:
*
- * in_irq() - We're in (hard) IRQ context
+ * in_nmi() - We're in NMI context
+ * in_hardirq() - We're in hard IRQ context
+ * in_serving_softirq() - We're in softirq context
+ * in_task() - We're in task context
+ */
+#define in_nmi() (nmi_count())
+#define in_hardirq() (hardirq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
+
+/*
+ * The following macros are deprecated and should not be used in new code:
+ * in_irq() - Obsolete version of in_hardirq()
* in_softirq() - We have BH disabled, or are processing softirqs
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
- * in_serving_softirq() - We're in softirq context
- * in_nmi() - We're in NMI context
- * in_task() - We're in task context
- *
- * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
- * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#define in_nmi() (preempt_count() & NMI_MASK)
-#define in_task() (!(preempt_count() & \
- (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
@@ -115,7 +142,12 @@
/*
* The preempt_count offset after spin_lock()
*/
-#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#if !defined(CONFIG_PREEMPT_RT)
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#else
+/* Locks on RT do not disable preemption */
+#define PREEMPT_LOCK_OFFSET 0
+#endif
/*
* The preempt_count offset needed for things like:
@@ -322,4 +354,113 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
#endif
+#ifdef CONFIG_SMP
+
+/*
+ * Migrate-Disable and why it is undesired.
+ *
+ * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * becomes one of the M highest priority tasks), it might still have to wait
+ * for the preemptee's migrate_disable() section to complete. Thereby suffering
+ * a reduction in bandwidth in the exact duration of the migrate_disable()
+ * section.
+ *
+ * Per this argument, the change from preempt_disable() to migrate_disable()
+ * gets us:
+ *
+ * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
+ * it would have had to wait for the lower priority task.
+ *
+ * - a lower priority tasks; which under preempt_disable() could've instantly
+ * migrated away when another CPU becomes available, is now constrained
+ * by the ability to push the higher priority task away, which might itself be
+ * in a migrate_disable() section, reducing it's available bandwidth.
+ *
+ * IOW it trades latency / moves the interference term, but it stays in the
+ * system, and as long as it remains unbounded, the system is not fully
+ * deterministic.
+ *
+ *
+ * The reason we have it anyway.
+ *
+ * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
+ * number of primitives into becoming preemptible, they would also allow
+ * migration. This turns out to break a bunch of per-cpu usage. To this end,
+ * all these primitives employ migirate_disable() to restore this implicit
+ * assumption.
+ *
+ * This is a 'temporary' work-around at best. The correct solution is getting
+ * rid of the above assumptions and reworking the code to employ explicit
+ * per-cpu locking or short preempt-disable regions.
+ *
+ * The end goal must be to get rid of migrate_disable(), alternatively we need
+ * a schedulability theory that does not depend on abritrary migration.
+ *
+ *
+ * Notes on the implementation.
+ *
+ * The implementation is particularly tricky since existing code patterns
+ * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
+ * This means that it cannot use cpus_read_lock() to serialize against hotplug,
+ * nor can it easily migrate itself into a pending affinity mask change on
+ * migrate_enable().
+ *
+ *
+ * Note: even non-work-conserving schedulers like semi-partitioned depends on
+ * migration, so migrate_disable() is not only a problem for
+ * work-conserving schedulers.
+ *
+ */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+
+#else
+
+static inline void migrate_disable(void) { }
+static inline void migrate_enable(void) { }
+
+#endif /* CONFIG_SMP */
+
+/**
+ * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
+ *
+ * Use for code which requires preemption protection inside a critical
+ * section which has preemption disabled implicitly on non-PREEMPT_RT
+ * enabled kernels, by e.g.:
+ * - holding a spinlock/rwlock
+ * - soft interrupt context
+ * - regular interrupt handlers
+ *
+ * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
+ * interrupt context and regular interrupt handlers are preemptible and
+ * only prevent migration. preempt_disable_nested() ensures that preemption
+ * is disabled for cases which require CPU local serialization even on
+ * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
+ *
+ * The use cases are code sequences which are not serialized by a
+ * particular lock instance, e.g.:
+ * - seqcount write side critical sections where the seqcount is not
+ * associated to a particular lock and therefore the automatic
+ * protection mechanism does not work. This prevents a live lock
+ * against a preempting high priority reader.
+ * - RMW per CPU variable updates like vmstat.
+ */
+/* Macro to avoid header recursion hell vs. lockdep */
+#define preempt_disable_nested() \
+do { \
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ preempt_disable(); \
+ else \
+ lockdep_assert_preemption_disabled(); \
+} while (0)
+
+/**
+ * preempt_enable_nested - Undo the effect of preempt_disable_nested()
+ */
+static __always_inline void preempt_enable_nested(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+}
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index 13eafebf3549..b83a3f944f28 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -15,6 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
+struct page;
/*
prefetch(x) attempts to pre-emptively get the memory pointed to
by address "x" into the CPU L1 cache.
@@ -62,4 +63,11 @@ static inline void prefetch_range(void *addr, size_t len)
#endif
}
+static inline void prefetch_page_address(struct page *page)
+{
+#if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL)
+ prefetch(page);
+#endif
+}
+
#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 1e6108b8d15f..8c81806c2e99 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -2,15 +2,18 @@
#ifndef __KERNEL_PRINTK__
#define __KERNEL_PRINTK__
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/init.h>
#include <linux/kern_levels.h>
#include <linux/linkage.h>
-#include <linux/cache.h>
+#include <linux/ratelimit_types.h>
+#include <linux/once_lite.h>
extern const char linux_banner[];
extern const char linux_proc_banner[];
+extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
+
#define PRINTK_MAX_SINGLE_HEADER_LEN 2
static inline int printk_get_level(const char *buffer)
@@ -66,16 +69,7 @@ extern int console_printk[];
#define minimum_console_loglevel (console_printk[2])
#define default_console_loglevel (console_printk[3])
-static inline void console_silent(void)
-{
- console_loglevel = CONSOLE_LOGLEVEL_SILENT;
-}
-
-static inline void console_verbose(void)
-{
- if (console_loglevel)
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
+extern void console_verbose(void);
/* strlen("ratelimit") + 1 */
#define DEVKMSG_STR_MAX_SIZE 10
@@ -146,34 +140,34 @@ static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
-#ifdef CONFIG_PRINTK_NMI
-extern void printk_nmi_enter(void);
-extern void printk_nmi_exit(void);
-extern void printk_nmi_direct_enter(void);
-extern void printk_nmi_direct_exit(void);
-#else
-static inline void printk_nmi_enter(void) { }
-static inline void printk_nmi_exit(void) { }
-static inline void printk_nmi_direct_enter(void) { }
-static inline void printk_nmi_direct_exit(void) { }
-#endif /* PRINTK_NMI */
+struct dev_printk_info;
#ifdef CONFIG_PRINTK
-asmlinkage __printf(5, 0)
+asmlinkage __printf(4, 0)
int vprintk_emit(int facility, int level,
- const char *dict, size_t dictlen,
+ const struct dev_printk_info *dev_info,
const char *fmt, va_list args);
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
asmlinkage __printf(1, 2) __cold
-int printk(const char *fmt, ...);
+int _printk(const char *fmt, ...);
/*
* Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
*/
-__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
+__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
+
+extern void __printk_safe_enter(void);
+extern void __printk_safe_exit(void);
+/*
+ * The printk_deferred_enter/exit macros are available only as a hack for
+ * some code paths that need to defer all printk console printing. Interrupts
+ * must be disabled for the deferred duration.
+ */
+#define printk_deferred_enter __printk_safe_enter
+#define printk_deferred_exit __printk_safe_exit
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -188,10 +182,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
extern int printk_delay_msec;
extern int dmesg_restrict;
-extern int
-devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf,
- size_t *lenp, loff_t *ppos);
-
extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
@@ -201,10 +191,9 @@ void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
+extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
-extern void printk_safe_init(void);
-extern void printk_safe_flush(void);
-extern void printk_safe_flush_on_panic(void);
+void printk_trigger_flush(void);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
@@ -212,15 +201,24 @@ int vprintk(const char *s, va_list args)
return 0;
}
static inline __printf(1, 2) __cold
-int printk(const char *s, ...)
+int _printk(const char *s, ...)
{
return 0;
}
static inline __printf(1, 2) __cold
-int printk_deferred(const char *s, ...)
+int _printk_deferred(const char *s, ...)
{
return 0;
}
+
+static inline void printk_deferred_enter(void)
+{
+}
+
+static inline void printk_deferred_exit(void)
+{
+}
+
static inline int printk_ratelimit(void)
{
return 0;
@@ -265,58 +263,294 @@ static inline void show_regs_print_info(const char *log_lvl)
{
}
-static inline void dump_stack(void)
-{
-}
-
-static inline void printk_safe_init(void)
+static inline void dump_stack_lvl(const char *log_lvl)
{
}
-static inline void printk_safe_flush(void)
+static inline void dump_stack(void)
{
}
-
-static inline void printk_safe_flush_on_panic(void)
+static inline void printk_trigger_flush(void)
{
}
#endif
+#ifdef CONFIG_SMP
+extern int __printk_cpu_sync_try_get(void);
+extern void __printk_cpu_sync_wait(void);
+extern void __printk_cpu_sync_put(void);
+
+#else
+
+#define __printk_cpu_sync_try_get() true
+#define __printk_cpu_sync_wait()
+#define __printk_cpu_sync_put()
+#endif /* CONFIG_SMP */
+
+/**
+ * printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk
+ * cpu-reentrant spinning lock.
+ * @flags: Stack-allocated storage for saving local interrupt state,
+ * to be passed to printk_cpu_sync_put_irqrestore().
+ *
+ * If the lock is owned by another CPU, spin until it becomes available.
+ * Interrupts are restored while spinning.
+ *
+ * CAUTION: This function must be used carefully. It does not behave like a
+ * typical lock. Here are important things to watch out for...
+ *
+ * * This function is reentrant on the same CPU. Therefore the calling
+ * code must not assume exclusive access to data if code accessing the
+ * data can run reentrant or within NMI context on the same CPU.
+ *
+ * * If there exists usage of this function from NMI context, it becomes
+ * unsafe to perform any type of locking or spinning to wait for other
+ * CPUs after calling this function from any context. This includes
+ * using spinlocks or any other busy-waiting synchronization methods.
+ */
+#define printk_cpu_sync_get_irqsave(flags) \
+ for (;;) { \
+ local_irq_save(flags); \
+ if (__printk_cpu_sync_try_get()) \
+ break; \
+ local_irq_restore(flags); \
+ __printk_cpu_sync_wait(); \
+ }
+
+/**
+ * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
+ * lock and restore interrupts.
+ * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
+ */
+#define printk_cpu_sync_put_irqrestore(flags) \
+ do { \
+ __printk_cpu_sync_put(); \
+ local_irq_restore(flags); \
+ } while (0)
+
extern int kptr_restrict;
+/**
+ * pr_fmt - used by the pr_*() macros to generate the printk format string
+ * @fmt: format string passed from a pr_*() macro
+ *
+ * This macro can be used to generate a unified format string for pr_*()
+ * macros. A common use is to prefix all pr_*() messages in a file with a common
+ * string. For example, defining this at the top of a source file:
+ *
+ * #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ *
+ * would prefix all pr_info, pr_emerg... messages in the file with the module
+ * name.
+ */
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
#endif
+struct module;
+
+#ifdef CONFIG_PRINTK_INDEX
+struct pi_entry {
+ const char *fmt;
+ const char *func;
+ const char *file;
+ unsigned int line;
+
+ /*
+ * While printk and pr_* have the level stored in the string at compile
+ * time, some subsystems dynamically add it at runtime through the
+ * format string. For these dynamic cases, we allow the subsystem to
+ * tell us the level at compile time.
+ *
+ * NULL indicates that the level, if any, is stored in fmt.
+ */
+ const char *level;
+
+ /*
+ * The format string used by various subsystem specific printk()
+ * wrappers to prefix the message.
+ *
+ * Note that the static prefix defined by the pr_fmt() macro is stored
+ * directly in the message format (@fmt), not here.
+ */
+ const char *subsys_fmt_prefix;
+} __packed;
+
+#define __printk_index_emit(_fmt, _level, _subsys_fmt_prefix) \
+ do { \
+ if (__builtin_constant_p(_fmt) && __builtin_constant_p(_level)) { \
+ /*
+ * We check __builtin_constant_p multiple times here
+ * for the same input because GCC will produce an error
+ * if we try to assign a static variable to fmt if it
+ * is not a constant, even with the outer if statement.
+ */ \
+ static const struct pi_entry _entry \
+ __used = { \
+ .fmt = __builtin_constant_p(_fmt) ? (_fmt) : NULL, \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ .level = __builtin_constant_p(_level) ? (_level) : NULL, \
+ .subsys_fmt_prefix = _subsys_fmt_prefix,\
+ }; \
+ static const struct pi_entry *_entry_ptr \
+ __used __section(".printk_index") = &_entry; \
+ } \
+ } while (0)
+
+#else /* !CONFIG_PRINTK_INDEX */
+#define __printk_index_emit(...) do {} while (0)
+#endif /* CONFIG_PRINTK_INDEX */
+
/*
- * These can be used to print at the various log levels.
- * All of these will print unconditionally, although note that pr_debug()
- * and other debug macros are compiled out unless either DEBUG is defined
- * or CONFIG_DYNAMIC_DEBUG is set.
+ * Some subsystems have their own custom printk that applies a va_format to a
+ * generic format, for example, to include a device number or other metadata
+ * alongside the format supplied by the caller.
+ *
+ * In order to store these in the way they would be emitted by the printk
+ * infrastructure, the subsystem provides us with the start, fixed string, and
+ * any subsequent text in the format string.
+ *
+ * We take a variable argument list as pr_fmt/dev_fmt/etc are sometimes passed
+ * as multiple arguments (eg: `"%s: ", "blah"`), and we must only take the
+ * first one.
+ *
+ * subsys_fmt_prefix must be known at compile time, or compilation will fail
+ * (since this is a mistake). If fmt or level is not known at compile time, no
+ * index entry will be made (since this can legitimately happen).
+ */
+#define printk_index_subsys_emit(subsys_fmt_prefix, level, fmt, ...) \
+ __printk_index_emit(fmt, level, subsys_fmt_prefix)
+
+#define printk_index_wrap(_p_func, _fmt, ...) \
+ ({ \
+ __printk_index_emit(_fmt, NULL, NULL); \
+ _p_func(_fmt, ##__VA_ARGS__); \
+ })
+
+
+/**
+ * printk - print a kernel message
+ * @fmt: format string
+ *
+ * This is printk(). It can be called from any context. We want it to work.
+ *
+ * If printk indexing is enabled, _printk() is called from printk_index_wrap.
+ * Otherwise, printk is simply #defined to _printk.
+ *
+ * We try to grab the console_lock. If we succeed, it's easy - we log the
+ * output and call the console drivers. If we fail to get the semaphore, we
+ * place the output into the log buffer and return. The current holder of
+ * the console_sem will notice the new output in console_unlock(); and will
+ * send it to the consoles before releasing the lock.
+ *
+ * One effect of this deferred printing is that code which calls printk() and
+ * then changes console_loglevel may break. This is because console_loglevel
+ * is inspected when the actual printing occurs.
+ *
+ * See also:
+ * printf(3)
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
+ */
+#define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__)
+#define printk_deferred(fmt, ...) \
+ printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__)
+
+/**
+ * pr_emerg - Print an emergency-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_EMERG loglevel. It uses pr_fmt() to
+ * generate the format string.
*/
#define pr_emerg(fmt, ...) \
printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_alert - Print an alert-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_ALERT loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_alert(fmt, ...) \
printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_crit - Print a critical-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_CRIT loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_crit(fmt, ...) \
printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_err - Print an error-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_ERR loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_warn - Print a warning-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_WARNING loglevel. It uses pr_fmt()
+ * to generate the format string.
+ */
#define pr_warn(fmt, ...) \
printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_notice - Print a notice-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_NOTICE loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_notice(fmt, ...) \
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_info - Print an info-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_INFO loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-/*
- * Like KERN_CONT, pr_cont() should only be used when continuing
- * a line with no newline ('\n') enclosed. Otherwise it defaults
- * back to KERN_DEFAULT.
+
+/**
+ * pr_cont - Continues a previous log message in the same line.
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_CONT loglevel. It should only be
+ * used when continuing a log message with no newline ('\n') enclosed. Otherwise
+ * it defaults back to KERN_DEFAULT loglevel.
*/
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
-/* pr_devel() should produce zero code unless DEBUG is defined */
+/**
+ * pr_devel - Print a debug-level message conditionally
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_DEBUG loglevel if DEBUG is
+ * defined. Otherwise it does nothing.
+ *
+ * It uses pr_fmt() to generate the format string.
+ */
#ifdef DEBUG
#define pr_devel(fmt, ...) \
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
@@ -327,11 +561,23 @@ extern int kptr_restrict;
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#include <linux/dynamic_debug.h>
-/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
-#define pr_debug(fmt, ...) \
+/**
+ * pr_debug - Print a debug-level message conditionally
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to dynamic_pr_debug() if CONFIG_DYNAMIC_DEBUG is
+ * set. Otherwise, if DEBUG is defined, it's equivalent to a printk with
+ * KERN_DEBUG loglevel. If DEBUG is not defined it does nothing.
+ *
+ * It uses pr_fmt() to generate the format string (dynamic_pr_debug() uses
+ * pr_fmt() internally).
+ */
+#define pr_debug(fmt, ...) \
dynamic_pr_debug(fmt, ##__VA_ARGS__)
#elif defined(DEBUG)
#define pr_debug(fmt, ...) \
@@ -347,27 +593,9 @@ extern int kptr_restrict;
#ifdef CONFIG_PRINTK
#define printk_once(fmt, ...) \
-({ \
- static bool __section(.data.once) __print_once; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__)
#define printk_deferred_once(fmt, ...) \
-({ \
- static bool __section(.data.once) __print_once; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk_deferred(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__)
#else
#define printk_once(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
@@ -389,8 +617,7 @@ extern int kptr_restrict;
printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info_once(fmt, ...) \
printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_cont_once(fmt, ...) \
- printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__)
+/* no pr_cont_once, don't do that... */
#if defined(DEBUG)
#define pr_devel_once(fmt, ...) \
@@ -453,7 +680,8 @@ extern int kptr_restrict;
#endif
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define pr_debug_ratelimited(fmt, ...) \
do { \
@@ -500,7 +728,8 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
#endif
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
diff --git a/include/linux/prmt.h b/include/linux/prmt.h
new file mode 100644
index 000000000000..24da8364b919
--- /dev/null
+++ b/include/linux/prmt.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifdef CONFIG_ACPI_PRMT
+void init_prmt(void);
+#else
+static inline void init_prmt(void) { }
+#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 3dfa92633af3..81d6e4ec2294 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -5,6 +5,7 @@
#ifndef _LINUX_PROC_FS_H
#define _LINUX_PROC_FS_H
+#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/fs.h>
@@ -12,10 +13,26 @@ struct proc_dir_entry;
struct seq_file;
struct seq_operations;
+enum {
+ /*
+ * All /proc entries using this ->proc_ops instance are never removed.
+ *
+ * If in doubt, ignore this flag.
+ */
+#ifdef MODULE
+ PROC_ENTRY_PERMANENT = 0U,
+#else
+ PROC_ENTRY_PERMANENT = 1U << 0,
+#endif
+};
+
struct proc_ops {
+ unsigned int proc_flags;
int (*proc_open)(struct inode *, struct file *);
ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *);
ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *);
+ /* mandatory unless nonseekable_open() or equivalent is used */
loff_t (*proc_lseek)(struct file *, loff_t, int);
int (*proc_release)(struct inode *, struct file *);
__poll_t (*proc_poll)(struct file *, struct poll_table_struct *);
@@ -25,17 +42,46 @@ struct proc_ops {
#endif
int (*proc_mmap)(struct file *, struct vm_area_struct *);
unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+} __randomize_layout;
+
+/* definitions for hide_pid field */
+enum proc_hidepid {
+ HIDEPID_OFF = 0,
+ HIDEPID_NO_ACCESS = 1,
+ HIDEPID_INVISIBLE = 2,
+ HIDEPID_NOT_PTRACEABLE = 4, /* Limit pids to only ptraceable pids */
};
+/* definitions for proc mount option pidonly */
+enum proc_pidonly {
+ PROC_PIDONLY_OFF = 0,
+ PROC_PIDONLY_ON = 1,
+};
+
+struct proc_fs_info {
+ struct pid_namespace *pid_ns;
+ struct dentry *proc_self; /* For /proc/self */
+ struct dentry *proc_thread_self; /* For /proc/thread-self */
+ kgid_t pid_gid;
+ enum proc_hidepid hide_pid;
+ enum proc_pidonly pidonly;
+};
+
+static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
#ifdef CONFIG_PROC_FS
typedef int (*proc_write_t)(struct file *, char *, size_t);
extern void proc_root_init(void);
-extern void proc_flush_task(struct task_struct *);
+extern void proc_flush_pid(struct pid *);
extern struct proc_dir_entry *proc_symlink(const char *,
struct proc_dir_entry *, const char *);
+struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool);
extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
struct proc_dir_entry *, void *);
@@ -64,7 +110,16 @@ extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops);
extern void proc_set_size(struct proc_dir_entry *, loff_t);
extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
-extern void *PDE_DATA(const struct inode *);
+
+/*
+ * Obtain the private data passed by user through proc_create_data() or
+ * related.
+ */
+static inline void *pde_data(const struct inode *inode)
+{
+ return inode->i_private;
+}
+
extern void *proc_get_parent_data(const struct inode *);
extern void proc_remove(struct proc_dir_entry *);
extern void remove_proc_entry(const char *, struct proc_dir_entry *);
@@ -90,6 +145,10 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
void *data);
extern struct pid *tgid_pidfd_to_pid(const struct file *file);
+struct bpf_iter_aux_info;
+extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux);
+extern void bpf_iter_fini_seq_net(void *priv_data);
+
#ifdef CONFIG_PROC_PID_ARCH_STATUS
/*
* The architecture which selects CONFIG_PROC_PID_ARCH_STATUS must
@@ -105,7 +164,7 @@ static inline void proc_root_init(void)
{
}
-static inline void proc_flush_task(struct task_struct *task)
+static inline void proc_flush_pid(struct pid *pid)
{
}
@@ -114,6 +173,11 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
static inline struct proc_dir_entry *proc_mkdir(const char *name,
struct proc_dir_entry *parent) {return NULL;}
static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; }
+static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, void *data, bool force_lookup)
+{
+ return NULL;
+}
static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
@@ -123,12 +187,20 @@ static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
#define proc_create_seq(name, mode, parent, ops) ({NULL;})
#define proc_create_single(name, mode, parent, show) ({NULL;})
#define proc_create_single_data(name, mode, parent, show, data) ({NULL;})
-#define proc_create(name, mode, parent, proc_ops) ({NULL;})
-#define proc_create_data(name, mode, parent, proc_ops, data) ({NULL;})
+
+static inline struct proc_dir_entry *
+proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops)
+{ return NULL; }
+
+static inline struct proc_dir_entry *
+proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops, void *data)
+{ return NULL; }
static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {}
static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {}
-static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;}
+static inline void *pde_data(const struct inode *inode) {BUG(); return NULL;}
static inline void *proc_get_parent_data(const struct inode *inode) { BUG(); return NULL; }
static inline void proc_remove(struct proc_dir_entry *de) {}
@@ -151,7 +223,7 @@ struct net;
static inline struct proc_dir_entry *proc_net_mkdir(
struct net *net, const char *name, struct proc_dir_entry *parent)
{
- return proc_mkdir_data(name, 0, parent, net);
+ return _proc_mkdir(name, 0, parent, net, true);
}
struct ns_common;
@@ -159,9 +231,11 @@ int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns));
/* get the associated pid namespace for a file in procfs */
-static inline struct pid_namespace *proc_pid_ns(const struct inode *inode)
+static inline struct pid_namespace *proc_pid_ns(struct super_block *sb)
{
- return inode->i_sb->s_fs_info;
+ return proc_sb_info(sb)->pid_ns;
}
+bool proc_ns_file(const struct file *file);
+
#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 4626b1ac3b6c..75807ecef880 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -8,7 +8,7 @@
#include <linux/ns_common.h>
struct pid_namespace;
-struct nsproxy;
+struct nsset;
struct path;
struct task_struct;
struct inode;
@@ -19,7 +19,7 @@ struct proc_ns_operations {
int type;
struct ns_common *(*get)(struct task_struct *task);
void (*put)(struct ns_common *ns);
- int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
+ int (*install)(struct nsset *nsset, struct ns_common *ns);
struct user_namespace *(*owner)(struct ns_common *ns);
struct ns_common *(*get_parent)(struct ns_common *ns);
} __randomize_layout;
@@ -50,16 +50,11 @@ enum {
#ifdef CONFIG_PROC_FS
-extern int pid_ns_prepare_proc(struct pid_namespace *ns);
-extern void pid_ns_release_proc(struct pid_namespace *ns);
extern int proc_alloc_inum(unsigned int *pino);
extern void proc_free_inum(unsigned int inum);
#else /* CONFIG_PROC_FS */
-static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; }
-static inline void pid_ns_release_proc(struct pid_namespace *ns) {}
-
static inline int proc_alloc_inum(unsigned int *inum)
{
*inum = 1;
@@ -85,6 +80,8 @@ typedef struct ns_common *ns_get_path_helper_t(void *);
extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
void *private_data);
+extern bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino);
+
extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
extern void nsfs_init(void);
diff --git a/include/linux/profile.h b/include/linux/profile.h
index bad18ca43150..11db1ec516e2 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -15,7 +15,6 @@
#define KVM_PROFILING 4
struct proc_dir_entry;
-struct pt_regs;
struct notifier_block;
#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
@@ -32,11 +31,6 @@ static inline int create_proc_profile(void)
}
#endif
-enum profile_type {
- PROFILE_TASK_EXIT,
- PROFILE_MUNMAP
-};
-
#ifdef CONFIG_PROFILING
extern int prof_on __read_mostly;
@@ -67,25 +61,6 @@ static inline void profile_hit(int type, void *ip)
struct task_struct;
struct mm_struct;
-/* task is in do_exit() */
-void profile_task_exit(struct task_struct * task);
-
-/* task is dead, free task struct ? Returns 1 if
- * the task was taken, 0 if the task should be freed.
- */
-int profile_handoff_task(struct task_struct * task);
-
-/* sys_munmap */
-void profile_munmap(unsigned long addr);
-
-int task_handoff_register(struct notifier_block * n);
-int task_handoff_unregister(struct notifier_block * n);
-
-int profile_event_register(enum profile_type, struct notifier_block * n);
-int profile_event_unregister(enum profile_type, struct notifier_block * n);
-
-struct pt_regs;
-
#else
#define prof_on 0
@@ -110,29 +85,6 @@ static inline void profile_hit(int type, void *ip)
return;
}
-static inline int task_handoff_register(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int task_handoff_unregister(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-#define profile_task_exit(a) do { } while (0)
-#define profile_handoff_task(a) (0)
-#define profile_munmap(a) do { } while (0)
#endif /* CONFIG_PROFILING */
diff --git a/include/linux/property.h b/include/linux/property.h
index d86de017c689..117cc200c656 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
struct device;
+struct net_device;
enum dev_prop_type {
DEV_PROP_U8,
@@ -31,7 +32,7 @@ enum dev_dma_attr {
DEV_DMA_COHERENT,
};
-struct fwnode_handle *dev_fwnode(struct device *dev);
+struct fwnode_handle *dev_fwnode(const struct device *dev);
bool device_property_present(struct device *dev, const char *propname);
int device_property_read_u8_array(struct device *dev, const char *propname,
@@ -82,12 +83,19 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *fwnode_get_name(const struct fwnode_handle *fwnode);
const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
+
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_get_next_parent(
- struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
+
+#define fwnode_for_each_parent_node(fwnode, parent) \
+ for (parent = fwnode_get_parent(fwnode); parent; \
+ parent = fwnode_get_next_parent(parent))
+
+struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode);
unsigned int fwnode_count_parents(const struct fwnode_handle *fwn);
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn,
unsigned int depth);
+bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle *child);
struct fwnode_handle *fwnode_get_next_child_node(
const struct fwnode_handle *fwnode, struct fwnode_handle *child);
struct fwnode_handle *fwnode_get_next_available_child_node(
@@ -116,7 +124,8 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev,
struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
-int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index);
+int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index);
+int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name);
unsigned int device_get_child_node_count(struct device *dev);
@@ -170,6 +179,12 @@ static inline int device_property_count_u64(struct device *dev, const char *prop
return device_property_read_u64_array(dev, propname, NULL, 0);
}
+static inline int device_property_string_array_count(struct device *dev,
+ const char *propname)
+{
+ return device_property_read_string_array(dev, propname, NULL, 0);
+}
+
static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
const char *propname)
{
@@ -224,6 +239,13 @@ static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode,
return fwnode_property_read_u64_array(fwnode, propname, NULL, 0);
}
+static inline int
+fwnode_property_string_array_count(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return fwnode_property_read_string_array(fwnode, propname, NULL, 0);
+}
+
struct software_node;
/**
@@ -238,6 +260,13 @@ struct software_node_ref_args {
u64 args[NR_FWNODE_REFERENCE_ARGS];
};
+#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \
+(const struct software_node_ref_args) { \
+ .node = _ref_, \
+ .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+ .args = { __VA_ARGS__ }, \
+}
+
/**
* struct property_entry - "Built-in" device property representation.
* @name: Name of the property.
@@ -346,11 +375,7 @@ struct property_entry {
.name = _name_, \
.length = sizeof(struct software_node_ref_args), \
.type = DEV_PROP_REF, \
- { .pointer = &(const struct software_node_ref_args) { \
- .node = _ref_, \
- .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
- .args = { __VA_ARGS__ }, \
- } }, \
+ { .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \
}
struct property_entry *
@@ -358,23 +383,17 @@ property_entries_dup(const struct property_entry *properties);
void property_entries_free(const struct property_entry *properties);
-int device_add_properties(struct device *dev,
- const struct property_entry *properties);
-void device_remove_properties(struct device *dev);
-
bool device_dma_supported(struct device *dev);
enum dev_dma_attr device_get_dma_attr(struct device *dev);
-const void *device_get_match_data(struct device *dev);
+const void *device_get_match_data(const struct device *dev);
int device_get_phy_mode(struct device *dev);
+int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
-void *device_get_mac_address(struct device *dev, char *addr, int alen);
+void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index);
-int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
-void *fwnode_get_mac_address(struct fwnode_handle *fwnode,
- char *addr, int alen);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
@@ -385,9 +404,11 @@ struct fwnode_handle *fwnode_graph_get_remote_port(
const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_endpoint(
const struct fwnode_handle *fwnode);
-struct fwnode_handle *
-fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
- u32 endpoint);
+
+static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode)
+{
+ return fwnode_property_present(fwnode, "remote-endpoint");
+}
/*
* Fwnode lookup flags
@@ -397,7 +418,8 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
* one.
* @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote
* endpoint of the given endpoint belongs to,
- * may be disabled.
+ * may be disabled, or that the endpoint is not
+ * connected.
*/
#define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0)
#define FWNODE_GRAPH_DEVICE_DISABLED BIT(1)
@@ -405,6 +427,8 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
struct fwnode_handle *
fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
u32 port, u32 endpoint, unsigned long flags);
+unsigned int fwnode_graph_get_endpoint_count(struct fwnode_handle *fwnode,
+ unsigned long flags);
#define fwnode_graph_for_each_endpoint(fwnode, child) \
for (child = NULL; \
@@ -413,6 +437,25 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+typedef void *(*devcon_match_fn_t)(struct fwnode_handle *fwnode, const char *id,
+ void *data);
+
+void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match);
+
+static inline void *device_connection_find_match(struct device *dev,
+ const char *con_id, void *data,
+ devcon_match_fn_t match)
+{
+ return fwnode_connection_find_match(dev_fwnode(dev), con_id, data, match);
+}
+
+int fwnode_connection_find_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches, unsigned int matches_len);
+
/* -------------------------------------------------------------------------- */
/* Software fwnode support - when HW description is incomplete or missing */
@@ -440,13 +483,22 @@ software_node_find_by_name(const struct software_node *parent,
int software_node_register_nodes(const struct software_node *nodes);
void software_node_unregister_nodes(const struct software_node *nodes);
-int software_node_register(const struct software_node *node);
+int software_node_register_node_group(const struct software_node **node_group);
+void software_node_unregister_node_group(const struct software_node **node_group);
-int software_node_notify(struct device *dev, unsigned long action);
+int software_node_register(const struct software_node *node);
+void software_node_unregister(const struct software_node *node);
struct fwnode_handle *
fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent);
void fwnode_remove_software_node(struct fwnode_handle *fwnode);
+int device_add_software_node(struct device *dev, const struct software_node *node);
+void device_remove_software_node(struct device *dev);
+
+int device_create_managed_software_node(struct device *dev,
+ const struct property_entry *properties,
+ const struct software_node *parent);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h
new file mode 100644
index 000000000000..ecfded30ed05
--- /dev/null
+++ b/include/linux/pruss_driver.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PRU-ICSS sub-system specific definitions
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef _PRUSS_DRIVER_H_
+#define _PRUSS_DRIVER_H_
+
+#include <linux/types.h>
+
+/*
+ * enum pruss_mem - PRUSS memory range identifiers
+ */
+enum pruss_mem {
+ PRUSS_MEM_DRAM0 = 0,
+ PRUSS_MEM_DRAM1,
+ PRUSS_MEM_SHRD_RAM2,
+ PRUSS_MEM_MAX,
+};
+
+/**
+ * struct pruss_mem_region - PRUSS memory region structure
+ * @va: kernel virtual address of the PRUSS memory region
+ * @pa: physical (bus) address of the PRUSS memory region
+ * @size: size of the PRUSS memory region
+ */
+struct pruss_mem_region {
+ void __iomem *va;
+ phys_addr_t pa;
+ size_t size;
+};
+
+/**
+ * struct pruss - PRUSS parent structure
+ * @dev: pruss device pointer
+ * @cfg_base: base iomap for CFG region
+ * @cfg_regmap: regmap for config region
+ * @mem_regions: data for each of the PRUSS memory regions
+ * @core_clk_mux: clk handle for PRUSS CORE_CLK_MUX
+ * @iep_clk_mux: clk handle for PRUSS IEP_CLK_MUX
+ */
+struct pruss {
+ struct device *dev;
+ void __iomem *cfg_base;
+ struct regmap *cfg_regmap;
+ struct pruss_mem_region mem_regions[PRUSS_MEM_MAX];
+ struct clk *core_clk_mux;
+ struct clk *iep_clk_mux;
+};
+
+#endif /* _PRUSS_DRIVER_H_ */
diff --git a/include/linux/psci.h b/include/linux/psci.h
index a67712b73b6c..4ca0060a3fc4 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -18,14 +18,9 @@ bool psci_tos_resident_on(int cpu);
int psci_cpu_suspend_enter(u32 state);
bool psci_power_state_is_valid(u32 state);
-int psci_set_osi_mode(void);
+int psci_set_osi_mode(bool enable);
bool psci_has_osi_support(void);
-enum smccc_version {
- SMCCC_VERSION_1_0,
- SMCCC_VERSION_1_1,
-};
-
struct psci_operations {
u32 (*get_version)(void);
int (*cpu_suspend)(u32 state, unsigned long entry_point);
@@ -35,12 +30,19 @@ struct psci_operations {
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
- enum arm_smccc_conduit conduit;
- enum smccc_version smccc_version;
};
extern struct psci_operations psci_ops;
+struct psci_0_1_function_ids {
+ u32 cpu_suspend;
+ u32 cpu_on;
+ u32 cpu_off;
+ u32 migrate;
+};
+
+struct psci_0_1_function_ids get_psci_0_1_function_ids(void);
+
#if defined(CONFIG_ARM_PSCI_FW)
int __init psci_dt_init(void);
#else
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
new file mode 100644
index 000000000000..fb724c65c77b
--- /dev/null
+++ b/include/linux/pse-pd/pse.h
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+#ifndef _LINUX_PSE_CONTROLLER_H
+#define _LINUX_PSE_CONTROLLER_H
+
+#include <linux/ethtool.h>
+#include <linux/list.h>
+#include <uapi/linux/ethtool.h>
+
+struct phy_device;
+struct pse_controller_dev;
+
+/**
+ * struct pse_control_config - PSE control/channel configuration.
+ *
+ * @admin_cotrol: set PoDL PSE admin control as described in
+ * IEEE 802.3-2018 30.15.1.2.1 acPoDLPSEAdminControl
+ */
+struct pse_control_config {
+ enum ethtool_podl_pse_admin_state admin_cotrol;
+};
+
+/**
+ * struct pse_control_status - PSE control/channel status.
+ *
+ * @podl_admin_state: operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @podl_pw_status: power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ */
+struct pse_control_status {
+ enum ethtool_podl_pse_admin_state podl_admin_state;
+ enum ethtool_podl_pse_pw_d_status podl_pw_status;
+};
+
+/**
+ * struct pse_controller_ops - PSE controller driver callbacks
+ *
+ * @ethtool_get_status: get PSE control status for ethtool interface
+ * @ethtool_set_config: set PSE control configuration over ethtool interface
+ */
+struct pse_controller_ops {
+ int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
+ unsigned long id, struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+ int (*ethtool_set_config)(struct pse_controller_dev *pcdev,
+ unsigned long id, struct netlink_ext_ack *extack,
+ const struct pse_control_config *config);
+};
+
+struct module;
+struct device_node;
+struct of_phandle_args;
+struct pse_control;
+
+/**
+ * struct pse_controller_dev - PSE controller entity that might
+ * provide multiple PSE controls
+ * @ops: a pointer to device specific struct pse_controller_ops
+ * @owner: kernel module of the PSE controller driver
+ * @list: internal list of PSE controller devices
+ * @pse_control_head: head of internal list of requested PSE controls
+ * @dev: corresponding driver model device struct
+ * @of_pse_n_cells: number of cells in PSE line specifiers
+ * @of_xlate: translation function to translate from specifier as found in the
+ * device tree to id as given to the PSE control ops
+ * @nr_lines: number of PSE controls in this controller device
+ * @lock: Mutex for serialization access to the PSE controller
+ */
+struct pse_controller_dev {
+ const struct pse_controller_ops *ops;
+ struct module *owner;
+ struct list_head list;
+ struct list_head pse_control_head;
+ struct device *dev;
+ int of_pse_n_cells;
+ int (*of_xlate)(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec);
+ unsigned int nr_lines;
+ struct mutex lock;
+};
+
+#if IS_ENABLED(CONFIG_PSE_CONTROLLER)
+int pse_controller_register(struct pse_controller_dev *pcdev);
+void pse_controller_unregister(struct pse_controller_dev *pcdev);
+struct device;
+int devm_pse_controller_register(struct device *dev,
+ struct pse_controller_dev *pcdev);
+
+struct pse_control *of_pse_control_get(struct device_node *node);
+void pse_control_put(struct pse_control *psec);
+
+int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config);
+
+#else
+
+static inline struct pse_control *of_pse_control_get(struct device_node *node)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline void pse_control_put(struct pse_control *psec)
+{
+}
+
+static inline int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ return -ENOTSUPP;
+}
+
+static inline int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ return -ENOTSUPP;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 7b3de7321219..b029a847def1 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PSI_H
#define _LINUX_PSI_H
@@ -5,6 +6,8 @@
#include <linux/psi_types.h>
#include <linux/sched.h>
#include <linux/poll.h>
+#include <linux/cgroup-defs.h>
+#include <linux/cgroup.h>
struct seq_file;
struct css_set;
@@ -16,25 +19,27 @@ extern struct psi_group psi_system;
void psi_init(void);
-void psi_task_change(struct task_struct *task, int clear, int set);
-
-void psi_memstall_tick(struct task_struct *task, int cpu);
void psi_memstall_enter(unsigned long *flags);
void psi_memstall_leave(unsigned long *flags);
int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+ char *buf, enum psi_res res);
+void psi_trigger_destroy(struct psi_trigger *t);
+
+__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
+ poll_table *wait);
#ifdef CONFIG_CGROUPS
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+}
+
int psi_cgroup_alloc(struct cgroup *cgrp);
void psi_cgroup_free(struct cgroup *cgrp);
void cgroup_move_task(struct task_struct *p, struct css_set *to);
-
-struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, size_t nbytes, enum psi_res res);
-void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
-
-__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
- poll_table *wait);
+void psi_cgroup_restart(struct psi_group *group);
#endif
#else /* CONFIG_PSI */
@@ -56,6 +61,7 @@ static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
{
rcu_assign_pointer(p->cgroups, to);
}
+static inline void psi_cgroup_restart(struct psi_group *group) {}
#endif
#endif /* CONFIG_PSI */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 07aaf9b82241..6e4372735068 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PSI_TYPES_H
#define _LINUX_PSI_TYPES_H
@@ -14,20 +15,37 @@ enum psi_task_count {
NR_IOWAIT,
NR_MEMSTALL,
NR_RUNNING,
- NR_PSI_TASK_COUNTS = 3,
+ /*
+ * For IO and CPU stalls the presence of running/oncpu tasks
+ * in the domain means a partial rather than a full stall.
+ * For memory it's not so simple because of page reclaimers:
+ * they are running/oncpu while representing a stall. To tell
+ * whether a domain has productivity left or not, we need to
+ * distinguish between regular running (i.e. productive)
+ * threads and memstall ones.
+ */
+ NR_MEMSTALL_RUNNING,
+ NR_PSI_TASK_COUNTS = 4,
};
/* Task state bitmasks */
#define TSK_IOWAIT (1 << NR_IOWAIT)
#define TSK_MEMSTALL (1 << NR_MEMSTALL)
#define TSK_RUNNING (1 << NR_RUNNING)
+#define TSK_MEMSTALL_RUNNING (1 << NR_MEMSTALL_RUNNING)
+
+/* Only one task can be scheduled, no corresponding task count */
+#define TSK_ONCPU (1 << NR_PSI_TASK_COUNTS)
/* Resources that workloads could be stalled on */
enum psi_res {
PSI_IO,
PSI_MEM,
PSI_CPU,
- NR_PSI_RESOURCES = 3,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ,
+#endif
+ NR_PSI_RESOURCES,
};
/*
@@ -42,11 +60,18 @@ enum psi_states {
PSI_MEM_SOME,
PSI_MEM_FULL,
PSI_CPU_SOME,
+ PSI_CPU_FULL,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ_FULL,
+#endif
/* Only per-CPU, to weigh the CPU in the global average: */
PSI_NONIDLE,
- NR_PSI_STATES = 6,
+ NR_PSI_STATES,
};
+/* Use one bit in the state mask to track TSK_ONCPU */
+#define PSI_ONCPU (1 << NR_PSI_STATES)
+
enum psi_aggregators {
PSI_AVGS = 0,
PSI_POLL,
@@ -121,11 +146,14 @@ struct psi_trigger {
*/
u64 last_event_time;
- /* Refcounting to prevent premature destruction */
- struct kref refcount;
+ /* Deferred event(s) from previous ratelimit window */
+ bool pending_event;
};
struct psi_group {
+ struct psi_group *parent;
+ bool enabled;
+
/* Protects data used by the aggregator */
struct mutex avgs_lock;
@@ -145,9 +173,10 @@ struct psi_group {
unsigned long avg[NR_PSI_STATES - 1][3];
/* Monitor work control */
- atomic_t poll_scheduled;
- struct kthread_worker __rcu *poll_kworker;
- struct kthread_delayed_work poll_work;
+ struct task_struct __rcu *poll_task;
+ struct timer_list poll_timer;
+ wait_queue_head_t poll_wait;
+ atomic_t poll_wakeup;
/* Protects data used by the monitor */
struct mutex trigger_lock;
@@ -166,6 +195,8 @@ struct psi_group {
#else /* CONFIG_PSI */
+#define NR_PSI_RESOURCES 0
+
struct psi_group { };
#endif /* CONFIG_PSI */
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 5167bf2bfc75..1595088c428b 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -52,6 +52,7 @@ enum sev_cmd {
SEV_CMD_DF_FLUSH = 0x00A,
SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B,
SEV_CMD_GET_ID = 0x00C,
+ SEV_CMD_INIT_EX = 0x00D,
/* Guest commands */
SEV_CMD_DECOMMISSION = 0x020,
@@ -66,12 +67,14 @@ enum sev_cmd {
SEV_CMD_LAUNCH_MEASURE = 0x033,
SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034,
SEV_CMD_LAUNCH_FINISH = 0x035,
+ SEV_CMD_ATTESTATION_REPORT = 0x036,
/* Guest migration commands (outgoing) */
SEV_CMD_SEND_START = 0x040,
SEV_CMD_SEND_UPDATE_DATA = 0x041,
SEV_CMD_SEND_UPDATE_VMSA = 0x042,
SEV_CMD_SEND_FINISH = 0x043,
+ SEV_CMD_SEND_CANCEL = 0x044,
/* Guest migration commands (incoming) */
SEV_CMD_RECEIVE_START = 0x050,
@@ -101,6 +104,28 @@ struct sev_data_init {
} __packed;
/**
+ * struct sev_data_init_ex - INIT_EX command parameters
+ *
+ * @length: len of the command buffer read by the PSP
+ * @flags: processing flags
+ * @tmr_address: system physical address used for SEV-ES
+ * @tmr_len: len of tmr_address
+ * @nv_address: system physical address used for PSP NV storage
+ * @nv_len: len of nv_address
+ */
+struct sev_data_init_ex {
+ u32 length; /* In */
+ u32 flags; /* In */
+ u64 tmr_address; /* In */
+ u32 tmr_len; /* In */
+ u32 reserved; /* In */
+ u64 nv_address; /* In/Out */
+ u32 nv_len; /* In */
+} __packed;
+
+#define SEV_INIT_FLAGS_SEV_ES 0x01
+
+/**
* struct sev_data_pek_csr - PEK_CSR command parameters
*
* @address: PEK certificate chain
@@ -323,11 +348,11 @@ struct sev_data_send_start {
u64 pdh_cert_address; /* In */
u32 pdh_cert_len; /* In */
u32 reserved1;
- u64 plat_cert_address; /* In */
- u32 plat_cert_len; /* In */
+ u64 plat_certs_address; /* In */
+ u32 plat_certs_len; /* In */
u32 reserved2;
- u64 amd_cert_address; /* In */
- u32 amd_cert_len; /* In */
+ u64 amd_certs_address; /* In */
+ u32 amd_certs_len; /* In */
u32 reserved3;
u64 session_address; /* In */
u32 session_len; /* In/Out */
@@ -390,6 +415,15 @@ struct sev_data_send_finish {
} __packed;
/**
+ * struct sev_data_send_cancel - SEND_CANCEL command parameters
+ *
+ * @handle: handle of the VM to process
+ */
+struct sev_data_send_cancel {
+ u32 handle; /* In */
+} __packed;
+
+/**
* struct sev_data_receive_start - RECEIVE_START command parameters
*
* @handle: handle of the VM to perform receive operation
@@ -481,6 +515,22 @@ struct sev_data_dbg {
u32 len; /* In */
} __packed;
+/**
+ * struct sev_data_attestation_report - SEV_ATTESTATION_REPORT command parameters
+ *
+ * @handle: handle of the VM
+ * @mnonce: a random nonce that will be included in the report.
+ * @address: physical address where the report will be copied.
+ * @len: length of the physical buffer.
+ */
+struct sev_data_attestation_report {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u8 mnonce[16]; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
/**
@@ -595,7 +645,7 @@ int sev_guest_df_flush(int *error);
*/
int sev_guest_decommission(struct sev_data_decommission *data, int *error);
-void *psp_copy_user_blob(u64 __user uaddr, u32 len);
+void *psp_copy_user_blob(u64 uaddr, u32 len);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index e779441e6d26..638507a3c8ff 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
-#include <linux/semaphore.h>
+#include <linux/spinlock.h>
#include <linux/time.h>
#include <linux/types.h>
@@ -57,6 +57,9 @@ struct pstore_info;
* @size: size of @buf
* @ecc_notice_size:
* ECC information for @buf
+ * @priv: pointer for backend specific use, will be
+ * kfree()d by the pstore core if non-NULL
+ * when the record is freed.
*
* Valid for PSTORE_TYPE_DMESG @type:
*
@@ -74,6 +77,7 @@ struct pstore_record {
char *buf;
ssize_t size;
ssize_t ecc_notice_size;
+ void *priv;
int count;
enum kmsg_dump_reason reason;
@@ -87,7 +91,7 @@ struct pstore_record {
* @owner: module which is responsible for this backend driver
* @name: name of the backend driver
*
- * @buf_lock: semaphore to serialize access to @buf
+ * @buf_lock: spinlock to serialize access to @buf
* @buf: preallocated crash dump buffer
* @bufsize: size of @buf available for crash dump bytes (must match
* smallest number of bytes available for writing to a
@@ -96,6 +100,12 @@ struct pstore_record {
*
* @read_mutex: serializes @open, @read, @close, and @erase callbacks
* @flags: bitfield of frontends the backend can accept writes for
+ * @max_reason: Used when PSTORE_FLAGS_DMESG is set. Contains the
+ * kmsg_dump_reason enum value. KMSG_DUMP_UNDEF means
+ * "use existing kmsg_dump() filtering, based on the
+ * printk.always_kmsg_dump boot param" (which is either
+ * KMSG_DUMP_OOPS when false, or KMSG_DUMP_MAX when
+ * true); see printk.always_kmsg_dump for more details.
* @data: backend-private pointer passed back during callbacks
*
* Callbacks:
@@ -170,15 +180,16 @@ struct pstore_record {
*/
struct pstore_info {
struct module *owner;
- char *name;
+ const char *name;
- struct semaphore buf_lock;
+ spinlock_t buf_lock;
char *buf;
size_t bufsize;
struct mutex read_mutex;
int flags;
+ int max_reason;
void *data;
int (*open)(struct pstore_info *psi);
diff --git a/include/linux/pstore_blk.h b/include/linux/pstore_blk.h
new file mode 100644
index 000000000000..924ca07aafbd
--- /dev/null
+++ b/include/linux/pstore_blk.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __PSTORE_BLK_H_
+#define __PSTORE_BLK_H_
+
+#include <linux/types.h>
+#include <linux/pstore.h>
+#include <linux/pstore_zone.h>
+
+/**
+ * struct pstore_device_info - back-end pstore/blk driver structure.
+ *
+ * @flags: Refer to macro starting with PSTORE_FLAGS defined in
+ * linux/pstore.h. It means what front-ends this device support.
+ * Zero means all backends for compatible.
+ * @zone: The struct pstore_zone_info details.
+ *
+ */
+struct pstore_device_info {
+ unsigned int flags;
+ struct pstore_zone_info zone;
+};
+
+int register_pstore_device(struct pstore_device_info *dev);
+void unregister_pstore_device(struct pstore_device_info *dev);
+
+/**
+ * struct pstore_blk_config - the pstore_blk backend configuration
+ *
+ * @device: Name of the desired block device
+ * @max_reason: Maximum kmsg dump reason to store to block device
+ * @kmsg_size: Total size of for kmsg dumps
+ * @pmsg_size: Total size of the pmsg storage area
+ * @console_size: Total size of the console storage area
+ * @ftrace_size: Total size for ftrace logging data (for all CPUs)
+ */
+struct pstore_blk_config {
+ char device[80];
+ enum kmsg_dump_reason max_reason;
+ unsigned long kmsg_size;
+ unsigned long pmsg_size;
+ unsigned long console_size;
+ unsigned long ftrace_size;
+};
+
+/**
+ * pstore_blk_get_config - get a copy of the pstore_blk backend configuration
+ *
+ * @info: The sturct pstore_blk_config to be filled in
+ *
+ * Failure returns negative error code, and success returns 0.
+ */
+int pstore_blk_get_config(struct pstore_blk_config *info);
+
+#endif
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9cb9b9067298..9f16afec7290 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -133,7 +133,7 @@ struct ramoops_platform_data {
unsigned long console_size;
unsigned long ftrace_size;
unsigned long pmsg_size;
- int dump_oops;
+ int max_reason;
u32 flags;
struct persistent_ram_ecc_info ecc_info;
};
diff --git a/include/linux/pstore_zone.h b/include/linux/pstore_zone.h
new file mode 100644
index 000000000000..1e35eaa33e5e
--- /dev/null
+++ b/include/linux/pstore_zone.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __PSTORE_ZONE_H_
+#define __PSTORE_ZONE_H_
+
+#include <linux/types.h>
+
+typedef ssize_t (*pstore_zone_read_op)(char *, size_t, loff_t);
+typedef ssize_t (*pstore_zone_write_op)(const char *, size_t, loff_t);
+typedef ssize_t (*pstore_zone_erase_op)(size_t, loff_t);
+/**
+ * struct pstore_zone_info - pstore/zone back-end driver structure
+ *
+ * @owner: Module which is responsible for this back-end driver.
+ * @name: Name of the back-end driver.
+ * @total_size: The total size in bytes pstore/zone can use. It must be greater
+ * than 4096 and be multiple of 4096.
+ * @kmsg_size: The size of oops/panic zone. Zero means disabled, otherwise,
+ * it must be multiple of SECTOR_SIZE(512 Bytes).
+ * @max_reason: Maximum kmsg dump reason to store.
+ * @pmsg_size: The size of pmsg zone which is the same as @kmsg_size.
+ * @console_size:The size of console zone which is the same as @kmsg_size.
+ * @ftrace_size:The size of ftrace zone which is the same as @kmsg_size.
+ * @read: The general read operation. Both of the function parameters
+ * @size and @offset are relative value to storage.
+ * On success, the number of bytes should be returned, others
+ * mean error.
+ * @write: The same as @read, but the following error number:
+ * -EBUSY means try to write again later.
+ * -ENOMSG means to try next zone.
+ * @erase: The general erase operation for device with special removing
+ * job. Both of the function parameters @size and @offset are
+ * relative value to storage.
+ * Return 0 on success and others on failure.
+ * @panic_write:The write operation only used for panic case. It's optional
+ * if you do not care panic log. The parameters are relative
+ * value to storage.
+ * On success, the number of bytes should be returned, others
+ * excluding -ENOMSG mean error. -ENOMSG means to try next zone.
+ */
+struct pstore_zone_info {
+ struct module *owner;
+ const char *name;
+
+ unsigned long total_size;
+ unsigned long kmsg_size;
+ int max_reason;
+ unsigned long pmsg_size;
+ unsigned long console_size;
+ unsigned long ftrace_size;
+ pstore_zone_read_op read;
+ pstore_zone_write_op write;
+ pstore_zone_erase_op erase;
+ pstore_zone_write_op panic_write;
+};
+
+extern int register_pstore_zone(struct pstore_zone_info *info);
+extern void unregister_pstore_zone(struct pstore_zone_info *info);
+
+#endif
diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h
index a67065c403c3..2a3a95586425 100644
--- a/include/linux/ptdump.h
+++ b/include/linux/ptdump.h
@@ -13,7 +13,8 @@ struct ptdump_range {
struct ptdump_state {
/* level is 0:PGD to 4:PTE, or -1 if unknown */
void (*note_page)(struct ptdump_state *st, unsigned long addr,
- int level, unsigned long val);
+ int level, u64 val);
+ void (*effective_prot)(struct ptdump_state *st, int level, u64 val);
const struct ptdump_range *range;
};
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index dd00fa41f7e7..2b6ea36ad162 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -31,12 +31,20 @@
#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
#define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6)
+#define PTP_MSGTYPE_SYNC 0x0
+#define PTP_MSGTYPE_DELAY_REQ 0x1
+#define PTP_MSGTYPE_PDELAY_REQ 0x2
+#define PTP_MSGTYPE_PDELAY_RESP 0x3
+
#define PTP_EV_PORT 319
+#define PTP_GEN_PORT 320
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
#define OFF_PTP_SEQUENCE_ID 30
-#define OFF_PTP_CONTROL 32 /* PTPv1 only */
+
+/* PTP header flag fields */
+#define PTP_FLAG_TWOSTEP BIT(1)
/* Below defines should actually be removed at some point in time. */
#define IP6_HLEN 40
@@ -44,6 +52,30 @@
#define OFF_IHL 14
#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
+struct clock_identity {
+ u8 id[8];
+} __packed;
+
+struct port_identity {
+ struct clock_identity clock_identity;
+ __be16 port_number;
+} __packed;
+
+struct ptp_header {
+ u8 tsmt; /* transportSpecific | messageType */
+ u8 ver; /* reserved | versionPTP */
+ __be16 message_length;
+ u8 domain_number;
+ u8 reserved1;
+ u8 flag_field[2];
+ __be64 correction;
+ __be32 reserved2;
+ struct port_identity source_port_identity;
+ __be16 sequence_id;
+ u8 control;
+ u8 log_message_interval;
+} __packed;
+
#if defined(CONFIG_NET_PTP_CLASSIFY)
/**
* ptp_classify_raw - classify a PTP packet
@@ -57,6 +89,57 @@
*/
unsigned int ptp_classify_raw(const struct sk_buff *skb);
+/**
+ * ptp_parse_header - Get pointer to the PTP v2 header
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function takes care of the VLAN, UDP, IPv4 and IPv6 headers. The length
+ * is checked.
+ *
+ * Note, internally skb_mac_header() is used. Make sure that the @skb is
+ * initialized accordingly.
+ *
+ * Return: Pointer to the ptp v2 header or NULL if not found
+ */
+struct ptp_header *ptp_parse_header(struct sk_buff *skb, unsigned int type);
+
+/**
+ * ptp_get_msgtype - Extract ptp message type from given header
+ * @hdr: ptp header
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function returns the message type for a given ptp header. It takes care
+ * of the different ptp header versions (v1 or v2).
+ *
+ * Return: The message type
+ */
+static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
+ unsigned int type)
+{
+ u8 msgtype;
+
+ if (unlikely(type & PTP_CLASS_V1)) {
+ /* msg type is located at the control field for ptp v1 */
+ msgtype = hdr->control;
+ } else {
+ msgtype = hdr->tsmt & 0x0f;
+ }
+
+ return msgtype;
+}
+
+/**
+ * ptp_msg_is_sync - Evaluates whether the given skb is a PTP Sync message
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function evaluates whether the given skb is a PTP Sync message.
+ *
+ * Return: true if sync message, false otherwise
+ */
+bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type);
+
void __init ptp_classifier_init(void);
#else
static inline void ptp_classifier_init(void)
@@ -66,5 +149,22 @@ static inline unsigned int ptp_classify_raw(struct sk_buff *skb)
{
return PTP_CLASS_NONE;
}
+static inline struct ptp_header *ptp_parse_header(struct sk_buff *skb,
+ unsigned int type)
+{
+ return NULL;
+}
+static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
+ unsigned int type)
+{
+ /* The return is meaningless. The stub function would not be
+ * executed since no available header from ptp_parse_header.
+ */
+ return PTP_MSGTYPE_SYNC;
+}
+static inline bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type)
+{
+ return false;
+}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index c64a1ef87240..92b44161408e 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -11,7 +11,23 @@
#include <linux/device.h>
#include <linux/pps_kernel.h>
#include <linux/ptp_clock.h>
+#include <linux/timecounter.h>
+#include <linux/skbuff.h>
+#define PTP_CLOCK_NAME_LEN 32
+/**
+ * struct ptp_clock_request - request PTP clock event
+ *
+ * @type: The type of the request.
+ * EXTTS: Configure external trigger timestamping
+ * PEROUT: Configure periodic output signal (e.g. PPS)
+ * PPS: trigger internal PPS event for input
+ * into kernel PPS subsystem
+ * @extts: describes configuration for external trigger timestamping.
+ * This is only valid when event == PTP_CLK_REQ_EXTTS.
+ * @perout: describes configuration for periodic output.
+ * This is only valid when event == PTP_CLK_REQ_PEROUT.
+ */
struct ptp_clock_request {
enum {
@@ -36,7 +52,7 @@ struct ptp_system_timestamp {
};
/**
- * struct ptp_clock_info - decribes a PTP hardware clock
+ * struct ptp_clock_info - describes a PTP hardware clock
*
* @owner: The clock driver should set to THIS_MODULE.
* @name: A short "friendly name" to identify the clock and to
@@ -65,6 +81,9 @@ struct ptp_system_timestamp {
* parameter delta: Desired frequency offset from nominal frequency
* in parts per billion
*
+ * @adjphase: Adjusts the phase offset of the hardware clock.
+ * parameter delta: Desired change in nanoseconds.
+ *
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.
*
@@ -89,6 +108,32 @@ struct ptp_system_timestamp {
* @settime64: Set the current time on the hardware clock.
* parameter ts: Time value to set.
*
+ * @getcycles64: Reads the current free running cycle counter from the hardware
+ * clock.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @gettime64 or @gettimex64 will be used as default
+ * implementation.
+ * parameter ts: Holds the result.
+ *
+ * @getcyclesx64: Reads the current free running cycle counter from the
+ * hardware clock and optionally also the system clock.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @gettimex64 will be used as default implementation if
+ * available.
+ * parameter ts: Holds the PHC timestamp.
+ * parameter sts: If not NULL, it holds a pair of timestamps
+ * from the system clock. The first reading is made right before
+ * reading the lowest bits of the PHC timestamp and the second
+ * reading immediately follows that.
+ *
+ * @getcrosscycles: Reads the current free running cycle counter from the
+ * hardware clock and system clock simultaneously.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @getcrosststamp will be used as default implementation if
+ * available.
+ * parameter cts: Contains timestamp (device,system) pair,
+ * where system time is realtime and monotonic.
+ *
* @enable: Request driver to enable or disable an ancillary feature.
* parameter request: Desired resource to enable or disable.
* parameter on: Caller passes one to enable or zero to disable.
@@ -105,10 +150,10 @@ struct ptp_system_timestamp {
* parameter func: the desired function to use.
* parameter chan: the function channel index to use.
*
- * @do_work: Request driver to perform auxiliary (periodic) operations
- * Driver should return delay of the next auxiliary work scheduling
- * time (>=0) or negative value in case further scheduling
- * is not required.
+ * @do_aux_work: Request driver to perform auxiliary (periodic) operations
+ * Driver should return delay of the next auxiliary work
+ * scheduling time (>=0) or negative value in case further
+ * scheduling is not required.
*
* Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of().
@@ -118,7 +163,7 @@ struct ptp_system_timestamp {
struct ptp_clock_info {
struct module *owner;
- char name[16];
+ char name[PTP_CLOCK_NAME_LEN];
s32 max_adj;
int n_alarm;
int n_ext_ts;
@@ -128,6 +173,7 @@ struct ptp_clock_info {
struct ptp_pin_desc *pin_config;
int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm);
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
+ int (*adjphase)(struct ptp_clock_info *ptp, s32 phase);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
@@ -135,6 +181,11 @@ struct ptp_clock_info {
int (*getcrosststamp)(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
+ int (*getcycles64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*getcyclesx64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts);
+ int (*getcrosscycles)(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts);
int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
@@ -169,7 +220,33 @@ struct ptp_clock_event {
};
};
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+/**
+ * scaled_ppm_to_ppb() - convert scaled ppm to ppb
+ *
+ * @ppm: Parts per million, but with a 16 bit binary fractional field
+ */
+static inline long scaled_ppm_to_ppb(long ppm)
+{
+ /*
+ * The 'freq' field in the 'struct timex' is in parts per
+ * million, but with a 16 bit binary fractional field.
+ *
+ * We want to calculate
+ *
+ * ppb = scaled_ppm * 1000 / 2^16
+ *
+ * which simplifies to
+ *
+ * ppb = scaled_ppm * 125 / 2^13
+ */
+ s64 ppb = 1 + ppm;
+
+ ppb *= 125;
+ ppb >>= 13;
+ return (long)ppb;
+}
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
/**
* ptp_clock_register() - register a PTP hardware clock driver
@@ -213,16 +290,14 @@ extern void ptp_clock_event(struct ptp_clock *ptp,
extern int ptp_clock_index(struct ptp_clock *ptp);
/**
- * scaled_ppm_to_ppb() - convert scaled ppm to ppb
- *
- * @ppm: Parts per million, but with a 16 bit binary fractional field
- */
-
-extern s32 scaled_ppm_to_ppb(long ppm);
-
-/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
*
+ * The caller must hold ptp_clock::pincfg_mux. Drivers do not have
+ * access to that mutex as ptp_clock is an opaque type. However, the
+ * core code acquires the mutex before invoking the driver's
+ * ptp_clock_info::enable() callback, and so drivers may call this
+ * function from that context.
+ *
* @ptp: The clock obtained from ptp_clock_register().
* @func: One of the ptp_pin_function enumerated values.
* @chan: The particular functional channel to find.
@@ -234,6 +309,19 @@ int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan);
/**
+ * ptp_find_pin_unlocked() - wrapper for ptp_find_pin()
+ *
+ * This function acquires the ptp_clock::pincfg_mux mutex before
+ * invoking ptp_find_pin(). Instead of using this function, drivers
+ * should most likely call ptp_find_pin() directly from their
+ * ptp_clock_info::enable() method.
+ *
+ */
+
+int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan);
+
+/**
* ptp_schedule_worker() - schedule ptp auxiliary work
*
* @ptp: The clock obtained from ptp_clock_register().
@@ -264,11 +352,50 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
+static inline int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{ return -1; }
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
unsigned long delay)
{ return -EOPNOTSUPP; }
static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
{ }
+#endif
+
+#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK)
+/*
+ * These are called by the network core, and don't work if PTP is in
+ * a loadable module.
+ */
+
+/**
+ * ptp_get_vclocks_index() - get all vclocks index on pclock, and
+ * caller is responsible to free memory
+ * of vclock_index
+ *
+ * @pclock_index: phc index of ptp pclock.
+ * @vclock_index: pointer to pointer of vclock index.
+ *
+ * return number of vclocks.
+ */
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
+
+/**
+ * ptp_convert_timestamp() - convert timestamp to a ptp vclock time
+ *
+ * @hwtstamp: timestamp
+ * @vclock_index: phc index of ptp vclock.
+ *
+ * Returns converted timestamp, or 0 on error.
+ */
+ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index);
+#else
+static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{ return 0; }
+static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp,
+ int vclock_index)
+{ return 0; }
#endif
diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h
new file mode 100644
index 000000000000..c2e28deef33a
--- /dev/null
+++ b/include/linux/ptp_kvm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Virtual PTP 1588 clock for use with KVM guests
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ */
+
+#ifndef _PTP_KVM_H_
+#define _PTP_KVM_H_
+
+#include <linux/types.h>
+
+struct timespec64;
+struct clocksource;
+
+int kvm_arch_ptp_init(void);
+int kvm_arch_ptp_get_clock(struct timespec64 *ts);
+int kvm_arch_ptp_get_crosststamp(u64 *cycle,
+ struct timespec64 *tspec, struct clocksource **cs);
+
+#endif /* _PTP_KVM_H_ */
diff --git a/include/linux/ptp_pch.h b/include/linux/ptp_pch.h
new file mode 100644
index 000000000000..7ba643b62c15
--- /dev/null
+++ b/include/linux/ptp_pch.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PTP PCH
+ *
+ * Copyright 2019 Linaro Ltd.
+ *
+ * Author Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef _PTP_PCH_H_
+#define _PTP_PCH_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+
+#endif /* _PTP_PCH_H_ */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 417db0a79a62..808f9d3ee546 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -107,7 +107,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
return -ENOSPC;
/* Make sure the pointer we are storing points to a valid data. */
- /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
+ /* Pairs with the dependency ordering in __ptr_ring_consume. */
smp_wmb();
WRITE_ONCE(r->queue[r->producer++], ptr);
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 2a9df80ea887..c952c5ba8fab 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -30,7 +30,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
#define PT_PTRACED 0x00000001
-#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
#define PT_OPT_FLAG_SHIFT 3
/* PT_TRACE_* event enable flags */
@@ -47,12 +46,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
-/* single stepping state bits (used on ARM and PA-RISC) */
-#define PT_SINGLESTEP_BIT 31
-#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
-#define PT_BLOCKSTEP_BIT 30
-#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
-
extern long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
@@ -60,7 +53,7 @@ extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned
extern void ptrace_disable(struct task_struct *);
extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
-extern void ptrace_notify(int exit_code);
+extern int ptrace_notify(int exit_code, unsigned long message);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent,
const struct cred *ptracer_cred);
@@ -155,8 +148,7 @@ static inline bool ptrace_event_enabled(struct task_struct *task, int event)
static inline void ptrace_event(int event, unsigned long message)
{
if (unlikely(ptrace_event_enabled(current, event))) {
- current->ptrace_message = message;
- ptrace_notify((event << 8) | SIGTRAP);
+ ptrace_notify((event << 8) | SIGTRAP, message);
} else if (event == PTRACE_EVENT_EXEC) {
/* legacy EXEC report via SIGTRAP */
if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
@@ -171,7 +163,7 @@ static inline void ptrace_event(int event, unsigned long message)
*
* Check whether @event is enabled and, if so, report @event and @pid
* to the ptrace parent. @pid is reported as the pid_t seen from the
- * the ptrace parent's pid namespace.
+ * ptrace parent's pid namespace.
*
* Called without locks.
*/
@@ -362,29 +354,25 @@ static inline void user_single_step_report(struct pt_regs *regs)
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with the siglock held, to decide whether or not it's
- * necessary to release the siglock and call arch_ptrace_stop() with the
- * same @code and @info arguments. It can be defined to a constant if
- * arch_ptrace_stop() is never required, or always is. On machines where
- * this makes sense, it should be defined to a quick test to optimize out
- * calling arch_ptrace_stop() when it would be superfluous. For example,
- * if the thread has not been back to user mode since the last stop, the
- * thread state might indicate that nothing needs to be done.
+ * necessary to release the siglock and call arch_ptrace_stop(). It can be
+ * defined to a constant if arch_ptrace_stop() is never required, or always
+ * is. On machines where this makes sense, it should be defined to a quick
+ * test to optimize out calling arch_ptrace_stop() when it would be
+ * superfluous. For example, if the thread has not been back to user mode
+ * since the last stop, the thread state might indicate that nothing needs
+ * to be done.
*
* This is guaranteed to be invoked once before a task stops for ptrace and
* may include arch-specific operations necessary prior to a ptrace stop.
*/
-#define arch_ptrace_stop_needed(code, info) (0)
+#define arch_ptrace_stop_needed() (0)
#endif
#ifndef arch_ptrace_stop
/**
* arch_ptrace_stop - Do machine-specific work before stopping for ptrace
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with no locks held when arch_ptrace_stop_needed() has
* just returned nonzero. It is allowed to block, e.g. for user memory
@@ -394,7 +382,7 @@ static inline void user_single_step_report(struct pt_regs *regs)
* we only do it when the arch requires it for this particular stop, as
* indicated by arch_ptrace_stop_needed().
*/
-#define arch_ptrace_stop(code, info) do { } while (0)
+#define arch_ptrace_stop() do { } while (0)
#endif
#ifndef current_pt_regs
@@ -417,4 +405,80 @@ static inline void user_single_step_report(struct pt_regs *regs)
extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
+
+/*
+ * ptrace report for syscall entry and exit looks identical.
+ */
+static inline int ptrace_report_syscall(unsigned long message)
+{
+ int ptrace = current->ptrace;
+ int signr;
+
+ if (!(ptrace & PT_PTRACED))
+ return 0;
+
+ signr = ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0),
+ message);
+
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (signr)
+ send_sig(signr, current, 1);
+
+ return fatal_signal_pending(current);
+}
+
+/**
+ * ptrace_report_syscall_entry - task is about to attempt a system call
+ * @regs: user register state of current task
+ *
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just
+ * entered the kernel for a system call. Full user register state is
+ * available here. Changing the values in @regs can affect the system
+ * call number and arguments to be tried. It is safe to block here,
+ * preventing the system call from beginning.
+ *
+ * Returns zero normally, or nonzero if the calling arch code should abort
+ * the system call. That must prevent normal entry so no system call is
+ * made. If @task ever returns to user mode after this, its register state
+ * is unspecified, but should be something harmless like an %ENOSYS error
+ * return. It should preserve enough information so that syscall_rollback()
+ * can work (see asm-generic/syscall.h).
+ *
+ * Called without locks, just after entering kernel mode.
+ */
+static inline __must_check int ptrace_report_syscall_entry(
+ struct pt_regs *regs)
+{
+ return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY);
+}
+
+/**
+ * ptrace_report_syscall_exit - task has just finished a system call
+ * @regs: user register state of current task
+ * @step: nonzero if simulating single-step or block-step
+ *
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when
+ * the current task has just finished an attempted system call. Full
+ * user register state is available here. It is safe to block here,
+ * preventing signals from being processed.
+ *
+ * If @step is nonzero, this report is also in lieu of the normal
+ * trap that would follow the system call instruction because
+ * user_enable_block_step() or user_enable_single_step() was used.
+ * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set.
+ *
+ * Called without locks, just before checking for pending signals.
+ */
+static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step)
+{
+ if (step)
+ user_single_step_report(regs);
+ else
+ ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT);
+}
#endif
diff --git a/include/linux/ptrace_api.h b/include/linux/ptrace_api.h
new file mode 100644
index 000000000000..26e7d275ad8d
--- /dev/null
+++ b/include/linux/ptrace_api.h
@@ -0,0 +1 @@
+#include <linux/ptrace.h>
diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h
index b950e961cfa8..d7dc1559427f 100644
--- a/include/linux/purgatory.h
+++ b/include/linux/purgatory.h
@@ -3,7 +3,7 @@
#define _LINUX_PURGATORY_H
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <uapi/linux/kexec.h>
struct kexec_sha_region {
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 0ef808d925bb..d70c6e5a839d 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -6,9 +6,6 @@
#include <linux/mutex.h>
#include <linux/of.h>
-struct pwm_capture;
-struct seq_file;
-
struct pwm_chip;
/**
@@ -39,7 +36,7 @@ enum pwm_polarity {
* current PWM hardware state.
*/
struct pwm_args {
- unsigned int period;
+ u64 period;
enum pwm_polarity polarity;
};
@@ -54,12 +51,17 @@ enum {
* @duty_cycle: PWM duty cycle (in nanoseconds)
* @polarity: PWM polarity
* @enabled: PWM enabled status
+ * @usage_power: If set, the PWM driver is only required to maintain the power
+ * output but has more freedom regarding signal form.
+ * If supported, the signal can be optimized, for example to
+ * improve EMI by phase shifting individual channels.
*/
struct pwm_state {
- unsigned int period;
- unsigned int duty_cycle;
+ u64 period;
+ u64 duty_cycle;
enum pwm_polarity polarity;
bool enabled;
+ bool usage_power;
};
/**
@@ -71,7 +73,8 @@ struct pwm_state {
* @chip: PWM chip providing this PWM device
* @chip_data: chip-private data associated with the PWM device
* @args: PWM arguments
- * @state: curent PWM channel state
+ * @state: last applied state
+ * @last: last implemented state (for PWM_DEBUG)
*/
struct pwm_device {
const char *label;
@@ -83,12 +86,18 @@ struct pwm_device {
struct pwm_args args;
struct pwm_state state;
+ struct pwm_state last;
};
/**
* pwm_get_state() - retrieve the current PWM state
* @pwm: PWM device
* @state: state to fill with the current PWM state
+ *
+ * The returned PWM state represents the state that was applied by a previous call to
+ * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to
+ * hardware. If pwm_apply_state() was never called, this returns either the current hardware
+ * state (if supported) or the default settings.
*/
static inline void pwm_get_state(const struct pwm_device *pwm,
struct pwm_state *state)
@@ -105,13 +114,13 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm)
return state.enabled;
}
-static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
+static inline void pwm_set_period(struct pwm_device *pwm, u64 period)
{
if (pwm)
pwm->state.period = period;
}
-static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
+static inline u64 pwm_get_period(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -126,7 +135,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
pwm->state.duty_cycle = duty;
}
-static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
+static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -181,6 +190,7 @@ static inline void pwm_init_state(const struct pwm_device *pwm,
state->period = args.period;
state->polarity = args.polarity;
state->duty_cycle = 0;
+ state->usage_power = false;
}
/**
@@ -239,6 +249,16 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
}
/**
+ * struct pwm_capture - PWM capture data
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ */
+struct pwm_capture {
+ unsigned int period;
+ unsigned int duty_cycle;
+};
+
+/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
@@ -248,10 +268,6 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
* called once per PWM device when the PWM chip is
* registered.
* @owner: helps prevent removal of modules exporting active PWMs
- * @config: configure duty cycles and period length for this PWM
- * @set_polarity: configure the polarity of this PWM
- * @enable: enable PWM output toggling
- * @disable: disable PWM output toggling
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
@@ -263,14 +279,6 @@ struct pwm_ops {
void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state);
struct module *owner;
-
- /* Only used by legacy drivers */
- int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns);
- int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity);
- int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
- void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
};
/**
@@ -299,16 +307,6 @@ struct pwm_chip {
struct pwm_device *pwms;
};
-/**
- * struct pwm_capture - PWM capture data
- * @period: period of the PWM signal (in nanoseconds)
- * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
- */
-struct pwm_capture {
- unsigned int period;
- unsigned int duty_cycle;
-};
-
#if IS_ENABLED(CONFIG_PWM)
/* PWM user APIs */
struct pwm_device *pwm_request(int pwm_id, const char *label);
@@ -390,42 +388,43 @@ int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
-int pwmchip_add_with_polarity(struct pwm_chip *chip,
- enum pwm_polarity polarity);
int pwmchip_add(struct pwm_chip *chip);
-int pwmchip_remove(struct pwm_chip *chip);
+void pwmchip_remove(struct pwm_chip *chip);
+
+int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip);
+
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label);
struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
const struct of_phandle_args *args);
+struct pwm_device *of_pwm_single_xlate(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id);
void pwm_put(struct pwm_device *pwm);
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id);
struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id);
-void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
#else
static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline void pwm_free(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline int pwm_apply_state(struct pwm_device *pwm,
const struct pwm_state *state)
{
+ might_sleep();
return -ENOTSUPP;
}
@@ -437,6 +436,7 @@ static inline int pwm_adjust_config(struct pwm_device *pwm)
static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
int period_ns)
{
+ might_sleep();
return -EINVAL;
}
@@ -449,11 +449,13 @@ static inline int pwm_capture(struct pwm_device *pwm,
static inline int pwm_enable(struct pwm_device *pwm)
{
+ might_sleep();
return -EINVAL;
}
static inline void pwm_disable(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
@@ -471,11 +473,6 @@ static inline int pwmchip_add(struct pwm_chip *chip)
return -EINVAL;
}
-static inline int pwmchip_add_inversed(struct pwm_chip *chip)
-{
- return -EINVAL;
-}
-
static inline int pwmchip_remove(struct pwm_chip *chip)
{
return -EINVAL;
@@ -485,36 +482,26 @@ static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct pwm_device *of_pwm_get(struct device *dev,
- struct device_node *np,
- const char *con_id)
-{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline void pwm_put(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline struct pwm_device *devm_pwm_get(struct device *dev,
const char *consumer)
{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
- struct device_node *np,
- const char *con_id)
-{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
@@ -522,12 +509,9 @@ static inline struct pwm_device *
devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode,
const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
-
-static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
-{
-}
#endif
static inline void pwm_apply_args(struct pwm_device *pwm)
@@ -558,6 +542,7 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
state.enabled = false;
state.polarity = pwm->args.polarity;
state.period = pwm->args.period;
+ state.usage_power = false;
pwm_apply_state(pwm, &state);
}
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 8ea265a022fd..06086cb93b6f 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -16,8 +16,6 @@ struct platform_pwm_backlight_data {
unsigned int *levels;
unsigned int post_pwm_on_delay;
unsigned int pwm_off_delay;
- /* TODO remove once all users are switched to gpiod_* API */
- int enable_gpio;
int (*init)(struct device *dev);
int (*notify)(struct device *dev, int brightness);
void (*notify_after)(struct device *dev, int brightness);
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 6facf27865f9..a3fec2de512f 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -1,8 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * pxa2xx_ssp.h
- *
- * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This driver supports the following PXA CPU/SSP ports:-
*
@@ -13,13 +11,19 @@
* PXA3xx SSP1, SSP2, SSP3, SSP4
*/
-#ifndef __LINUX_SSP_H
-#define __LINUX_SSP_H
+#ifndef __LINUX_PXA2XX_SSP_H
+#define __LINUX_PXA2XX_SSP_H
-#include <linux/list.h>
+#include <linux/bits.h>
+#include <linux/compiler_types.h>
#include <linux/io.h>
-#include <linux/of.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/types.h>
+struct clk;
+struct device;
+struct device_node;
/*
* SSP Serial Port Registers
@@ -34,7 +38,6 @@
#define SSDR (0x10) /* SSP Data Write/Data Read Register */
#define SSTO (0x28) /* SSP Time Out Register */
-#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
#define SSTSA (0x30) /* SSP Tx Timeslot Active */
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
@@ -43,130 +46,130 @@
#define SSACDD (0x40) /* SSP Audio Clock Dither Divider */
/* Common PXA2xx bits first */
-#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
+#define SSCR0_DSS GENMASK(3, 0) /* Data Size Select (mask) */
#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */
-#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */
+#define SSCR0_FRF GENMASK(5, 4) /* FRame Format (mask) */
#define SSCR0_Motorola (0x0 << 4) /* Motorola's Serial Peripheral Interface (SPI) */
#define SSCR0_TI (0x1 << 4) /* Texas Instruments' Synchronous Serial Protocol (SSP) */
#define SSCR0_National (0x2 << 4) /* National Microwire */
-#define SSCR0_ECS (1 << 6) /* External clock select */
-#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
+#define SSCR0_ECS BIT(6) /* External clock select */
+#define SSCR0_SSE BIT(7) /* Synchronous Serial Port Enable */
#define SSCR0_SCR(x) ((x) << 8) /* Serial Clock Rate (mask) */
/* PXA27x, PXA3xx */
-#define SSCR0_EDSS (1 << 20) /* Extended data size select */
-#define SSCR0_NCS (1 << 21) /* Network clock select */
-#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */
-#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */
-#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
+#define SSCR0_EDSS BIT(20) /* Extended data size select */
+#define SSCR0_NCS BIT(21) /* Network clock select */
+#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */
+#define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */
+#define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */
#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */
-#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */
-#define SSCR0_ACS (1 << 30) /* Audio clock select */
-#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
-
-
-#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
-#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
-#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */
-#define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */
-#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */
-#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */
-
-#define SSSR_ALT_FRM_MASK 3 /* Masks the SFRM signal number */
-#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */
-#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */
-#define SSSR_BSY (1 << 4) /* SSP Busy */
-#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */
-#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */
-#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */
+#define SSCR0_FPCKE BIT(29) /* FIFO packing enable */
+#define SSCR0_ACS BIT(30) /* Audio clock select */
+#define SSCR0_MOD BIT(31) /* Mode (normal or network) */
+
+#define SSCR1_RIE BIT(0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE BIT(1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM BIT(2) /* Loop-Back Mode */
+#define SSCR1_SPO BIT(3) /* Motorola SPI SSPSCLK polarity setting */
+#define SSCR1_SPH BIT(4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS BIT(5) /* Microwire Transmit Data Size */
+
+#define SSSR_ALT_FRM_MASK GENMASK(1, 0) /* Masks the SFRM signal number */
+#define SSSR_TNF BIT(2) /* Transmit FIFO Not Full */
+#define SSSR_RNE BIT(3) /* Receive FIFO Not Empty */
+#define SSSR_BSY BIT(4) /* SSP Busy */
+#define SSSR_TFS BIT(5) /* Transmit FIFO Service Request */
+#define SSSR_RFS BIT(6) /* Receive FIFO Service Request */
+#define SSSR_ROR BIT(7) /* Receive FIFO Overrun */
#define RX_THRESH_DFLT 8
#define TX_THRESH_DFLT 8
-#define SSSR_TFL_MASK (0xf << 8) /* Transmit FIFO Level mask */
-#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */
+#define SSSR_TFL_MASK GENMASK(11, 8) /* Transmit FIFO Level mask */
+#define SSSR_RFL_MASK GENMASK(15, 12) /* Receive FIFO Level mask */
-#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TFT GENMASK(9, 6) /* Transmit FIFO Threshold (mask) */
#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
-#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
+#define SSCR1_RFT GENMASK(13, 10) /* Receive FIFO Threshold (mask) */
#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
#define RX_THRESH_CE4100_DFLT 2
#define TX_THRESH_CE4100_DFLT 2
-#define CE4100_SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */
-#define CE4100_SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */
+#define CE4100_SSSR_TFL_MASK GENMASK(9, 8) /* Transmit FIFO Level mask */
+#define CE4100_SSSR_RFL_MASK GENMASK(13, 12) /* Receive FIFO Level mask */
-#define CE4100_SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */
+#define CE4100_SSCR1_TFT GENMASK(7, 6) /* Transmit FIFO Threshold (mask) */
#define CE4100_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */
-#define CE4100_SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */
+#define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */
#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
+/* Intel Quark X1000 */
+#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */
+
/* QUARK_X1000 SSCR0 bit definition */
-#define QUARK_X1000_SSCR0_DSS (0x1F << 0) /* Data Size Select (mask) */
+#define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */
#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
-#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
+#define QUARK_X1000_SSCR0_FRF GENMASK(6, 5) /* FRame Format (mask) */
#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
#define RX_THRESH_QUARK_X1000_DFLT 1
#define TX_THRESH_QUARK_X1000_DFLT 16
-#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */
-#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */
+#define QUARK_X1000_SSSR_TFL_MASK GENMASK(12, 8) /* Transmit FIFO Level mask */
+#define QUARK_X1000_SSSR_RFL_MASK GENMASK(17, 13) /* Receive FIFO Level mask */
-#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_TFT GENMASK(10, 6) /* Transmit FIFO Threshold (mask) */
#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
-#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_RFT GENMASK(15, 11) /* Receive FIFO Threshold (mask) */
#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
-#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
-#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
+#define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */
+#define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */
-/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
+/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
-#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */
-#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */
-#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */
-#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */
-#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */
-#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */
-#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */
-#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */
-#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */
-#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */
-#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */
-#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */
-#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */
-#define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interrupt Enable */
-#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */
-#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */
-#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */
-
-#define SSSR_BCE (1 << 23) /* Bit Count Error */
-#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */
-#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */
-#define SSSR_EOC (1 << 20) /* End Of Chain */
-#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */
-#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */
+#define SSCR1_EFWR BIT(14) /* Enable FIFO Write/Read */
+#define SSCR1_STRF BIT(15) /* Select FIFO or EFWR */
+#define SSCR1_IFS BIT(16) /* Invert Frame Signal */
+#define SSCR1_PINTE BIT(18) /* Peripheral Trailing Byte Interrupt Enable */
+#define SSCR1_TINTE BIT(19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_RSRE BIT(20) /* Receive Service Request Enable */
+#define SSCR1_TSRE BIT(21) /* Transmit Service Request Enable */
+#define SSCR1_TRAIL BIT(22) /* Trailing Byte */
+#define SSCR1_RWOT BIT(23) /* Receive Without Transmit */
+#define SSCR1_SFRMDIR BIT(24) /* Frame Direction */
+#define SSCR1_SCLKDIR BIT(25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_ECRB BIT(26) /* Enable Clock request B */
+#define SSCR1_ECRA BIT(27) /* Enable Clock Request A */
+#define SSCR1_SCFR BIT(28) /* Slave Clock free Running */
+#define SSCR1_EBCEI BIT(29) /* Enable Bit Count Error interrupt */
+#define SSCR1_TTE BIT(30) /* TXD Tristate Enable */
+#define SSCR1_TTELP BIT(31) /* TXD Tristate Enable Last Phase */
+
+#define SSSR_PINT BIT(18) /* Peripheral Trailing Byte Interrupt */
+#define SSSR_TINT BIT(19) /* Receiver Time-out Interrupt */
+#define SSSR_EOC BIT(20) /* End Of Chain */
+#define SSSR_TUR BIT(21) /* Transmit FIFO Under Run */
+#define SSSR_CSS BIT(22) /* Clock Synchronisation Status */
+#define SSSR_BCE BIT(23) /* Bit Count Error */
#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */
-#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
-#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
+#define SSPSP_SFRMP BIT(2) /* Serial Frame Polarity */
+#define SSPSP_ETDS BIT(3) /* End of Transfer data State */
#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */
#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
-#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
+#define SSPSP_FSRT BIT(25) /* Frame Sync Relative Timing */
/* PXA3xx */
#define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */
#define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */
#define SSPSP_TIMING_MASK (0x7f8001f0)
-#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
-#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
#define SSACD_ACDS_1 (0)
#define SSACD_ACDS_2 (1)
@@ -174,18 +177,39 @@
#define SSACD_ACDS_8 (3)
#define SSACD_ACDS_16 (4)
#define SSACD_ACDS_32 (5)
+#define SSACD_SCDB BIT(3) /* SSPSYSCLK Divider Bypass */
#define SSACD_SCDB_4X (0)
#define SSACD_SCDB_1X (1)
-#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
+#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
+#define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */
+
+/* Intel Merrifield SSP */
+#define SFIFOL 0x68 /* FIFO level */
+#define SFIFOTT 0x6c /* FIFO trigger threshold */
+
+#define RX_THRESH_MRFLD_DFLT 16
+#define TX_THRESH_MRFLD_DFLT 16
+
+#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */
+#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */
+
+#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */
+#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */
+#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */
+#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */
/* LPSS SSP */
#define SSITF 0x44 /* TX FIFO trigger level */
+#define SSITF_TxHiThresh(x) (((x) - 1) << 0)
#define SSITF_TxLoThresh(x) (((x) - 1) << 8)
-#define SSITF_TxHiThresh(x) ((x) - 1)
#define SSIRF 0x48 /* RX FIFO trigger level */
#define SSIRF_RxThresh(x) ((x) - 1)
+/* LPT/WPT SSP */
+#define SSCR2 (0x40) /* SSP Command / Status 2 */
+#define SSPSP2 (0x44) /* SSP Programmable Serial Protocol 2 */
+
enum pxa_ssp_type {
SSP_UNDEFINED = 0,
PXA25x_SSP, /* pxa 210, 250, 255, 26x */
@@ -196,8 +220,10 @@ enum pxa_ssp_type {
MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
+ MRFLD_SSP,
QUARK_X1000_SSP,
- LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_LPT_SSP,
LPSS_BYT_SSP,
LPSS_BSW_SSP,
LPSS_SPT_SSP,
@@ -245,6 +271,22 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
return __raw_readl(dev->mmio_base + reg);
}
+static inline void pxa_ssp_enable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
+static inline void pxa_ssp_disable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
#if IS_ENABLED(CONFIG_PXA_SSP)
struct ssp_device *pxa_ssp_request(int port, const char *label);
void pxa_ssp_free(struct ssp_device *);
@@ -263,4 +305,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
static inline void pxa_ssp_free(struct ssp_device *ssp) {}
#endif
-#endif
+#endif /* __LINUX_PXA2XX_SSP_H */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index dd464943f717..f5672785c0c4 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -6,11 +6,26 @@
#ifndef _LINUX_QCOM_GENI_SE
#define _LINUX_QCOM_GENI_SE
-/* Transfer mode supported by GENI Serial Engines */
+#include <linux/interconnect.h>
+
+/**
+ * enum geni_se_xfer_mode: Transfer modes supported by Serial Engines
+ *
+ * @GENI_SE_INVALID: Invalid mode
+ * @GENI_SE_FIFO: FIFO mode. Data is transferred with SE FIFO
+ * by programmed IO method
+ * @GENI_SE_DMA: Serial Engine DMA mode. Data is transferred
+ * with SE by DMAengine internal to SE
+ * @GENI_GPI_DMA: GPI DMA mode. Data is transferred using a DMAengine
+ * configured by a firmware residing on a GSI engine. This DMA name is
+ * interchangeably used as GSI or GPI which seem to imply the same DMAengine
+ */
+
enum geni_se_xfer_mode {
GENI_SE_INVALID,
GENI_SE_FIFO,
GENI_SE_DMA,
+ GENI_GPI_DMA,
};
/* Protocols supported by GENI Serial Engines */
@@ -25,6 +40,17 @@ enum geni_se_protocol_type {
struct geni_wrapper;
struct clk;
+enum geni_icc_path_index {
+ GENI_TO_CORE,
+ CPU_TO_GENI,
+ GENI_TO_DDR
+};
+
+struct geni_icc_path {
+ struct icc_path *path;
+ unsigned int avg_bw;
+};
+
/**
* struct geni_se - GENI Serial Engine
* @base: Base Address of the Serial Engine's register block
@@ -33,6 +59,7 @@ struct clk;
* @clk: Handle to the core serial engine clock
* @num_clk_levels: Number of valid clock levels in clk_perf_tbl
* @clk_perf_tbl: Table of clock frequency input to serial engine clock
+ * @icc_paths: Array of ICC paths for SE
*/
struct geni_se {
void __iomem *base;
@@ -41,6 +68,7 @@ struct geni_se {
struct clk *clk;
unsigned int num_clk_levels;
unsigned long *clk_perf_tbl;
+ struct geni_icc_path icc_paths[3];
};
/* Common SE registers */
@@ -48,6 +76,7 @@ struct geni_se {
#define SE_GENI_STATUS 0x40
#define GENI_SER_M_CLK_CFG 0x48
#define GENI_SER_S_CLK_CFG 0x4c
+#define GENI_IF_DISABLE_RO 0x64
#define GENI_FW_REVISION_RO 0x68
#define SE_GENI_CLK_SEL 0x7c
#define SE_GENI_DMA_MODE_EN 0x258
@@ -90,6 +119,9 @@ struct geni_se {
#define CLK_DIV_MSK GENMASK(15, 4)
#define CLK_DIV_SHFT 4
+/* GENI_IF_DISABLE_RO fields */
+#define FIFO_IF_DISABLE (BIT(0))
+
/* GENI_FW_REVISION_RO fields */
#define FW_REV_PROTOCOL_MSK GENMASK(15, 8)
#define FW_REV_PROTOCOL_SHFT 8
@@ -229,6 +261,24 @@ struct geni_se {
#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
+/* QUP SE VERSION value for major number 2 and minor number 5 */
+#define QUP_SE_VERSION_2_5 0x20050000
+
+/*
+ * Define bandwidth thresholds that cause the underlying Core 2X interconnect
+ * clock to run at the named frequency. These baseline values are recommended
+ * by the hardware team, and are not dynamically scaled with GENI bandwidth
+ * beyond basic on/off.
+ */
+#define CORE_2X_19_2_MHZ 960
+#define CORE_2X_50_MHZ 2500
+#define CORE_2X_100_MHZ 5000
+#define CORE_2X_150_MHZ 7500
+#define CORE_2X_200_MHZ 10000
+#define CORE_2X_236_MHZ 16383
+
+#define GENI_DEFAULT_BW Bps_to_icc(1000)
+
#if IS_ENABLED(CONFIG_QCOM_GENI_SE)
u32 geni_se_get_qup_hw_version(struct geni_se *se);
@@ -262,7 +312,7 @@ static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params)
u32 m_cmd;
m_cmd = (cmd << M_OPCODE_SHFT) | (params & M_PARAMS_MSK);
- writel_relaxed(m_cmd, se->base + SE_GENI_M_CMD0);
+ writel(m_cmd, se->base + SE_GENI_M_CMD0);
}
/**
@@ -282,7 +332,7 @@ static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params)
s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK);
s_cmd |= (cmd << S_OPCODE_SHFT);
s_cmd |= (params & S_PARAMS_MSK);
- writel_relaxed(s_cmd, se->base + SE_GENI_S_CMD0);
+ writel(s_cmd, se->base + SE_GENI_S_CMD0);
}
/**
@@ -416,5 +466,14 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
+
+int geni_icc_get(struct geni_se *se, const char *icc_ddr);
+
+int geni_icc_set_bw(struct geni_se *se);
+void geni_icc_set_tag(struct geni_se *se, u32 tag);
+
+int geni_icc_enable(struct geni_se *se);
+
+int geni_icc_disable(struct geni_se *se);
#endif
#endif
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 3d6a24697761..f8335644a01a 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -44,6 +44,13 @@ enum qcom_scm_sec_dev_id {
QCOM_SCM_ICE_DEV_ID = 20,
};
+enum qcom_scm_ice_cipher {
+ QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0,
+ QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3,
+ QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
+};
+
#define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_MSS_MSA 0xF
#define QCOM_SCM_VMID_WLAN 0x18
@@ -54,16 +61,23 @@ enum qcom_scm_sec_dev_id {
#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
-#if IS_ENABLED(CONFIG_QCOM_SCM)
extern bool qcom_scm_is_available(void);
-extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
-extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
+extern int qcom_scm_set_cold_boot_addr(void *entry);
+extern int qcom_scm_set_warm_boot_addr(void *entry);
extern void qcom_scm_cpu_power_down(u32 flags);
extern int qcom_scm_set_remote_state(u32 state, u32 id);
+struct qcom_scm_pas_metadata {
+ void *ptr;
+ dma_addr_t phys;
+ ssize_t size;
+};
+
extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size);
+ size_t size,
+ struct qcom_scm_pas_metadata *ctx);
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
phys_addr_t size);
extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
@@ -77,6 +91,10 @@ extern bool qcom_scm_restore_sec_cfg_available(void);
extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
+extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start,
+ u32 cp_nonpixel_size);
extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
unsigned int *src,
const struct qcom_scm_vmperm *newvm,
@@ -88,61 +106,22 @@ extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
u32 size);
+extern bool qcom_scm_ice_available(void);
+extern int qcom_scm_ice_invalidate_key(u32 index);
+extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher,
+ u32 data_unit_size);
+
extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
+extern int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt);
extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
-#else
-
-#include <linux/errno.h>
-
-static inline bool qcom_scm_is_available(void) { return false; }
-
-static inline int qcom_scm_set_cold_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline int qcom_scm_set_warm_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
- { return -ENODEV; }
-
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
- { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
- { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
- { return -ENODEV; }
-
-static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
- { return -ENODEV; }
-static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- unsigned int *src, const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt) { return -ENODEV; }
-
-static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
-static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size, u32 mode) { return -ENODEV; }
-static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
- u32 offset, u32 size) { return -ENODEV; }
-
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp) { return -ENODEV; }
-
-static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
- { return -ENODEV; }
-#endif
+
+extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version);
+extern int qcom_scm_lmh_profile_change(u32 profile_id);
+extern bool qcom_scm_lmh_dcvsh_available(void);
+
#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 2c4737e6694a..827624840ee2 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2016 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _COMMON_HSI_H
@@ -73,10 +47,10 @@
#define ISCSI_CDU_TASK_SEG_TYPE 0
#define FCOE_CDU_TASK_SEG_TYPE 0
#define RDMA_CDU_TASK_SEG_TYPE 1
+#define ETH_CDU_TASK_SEG_TYPE 2
#define FW_ASSERT_GENERAL_ATTN_IDX 32
-
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
#define MSTORM_QZONE_SIZE 16
@@ -86,9 +60,12 @@
#define PSTORM_QZONE_SIZE 0
#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+#define ETH_MAX_RXQ_VF_DEFAULT 16
+#define ETH_MAX_RXQ_VF_DOUBLE 48
+#define ETH_MAX_RXQ_VF_QUAD 112
+
+#define ETH_RGSRC_CTX_SIZE 6
+#define ETH_TGSRC_CTX_SIZE 6
/********************************/
/* CORE (LIGHT L2) FW CONSTANTS */
@@ -115,8 +92,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 42
-#define FW_REVISION_VERSION 2
+#define FW_MINOR_VERSION 59
+#define FW_REVISION_VERSION 1
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -138,6 +115,7 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
@@ -159,7 +137,7 @@
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_CONNECTION_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
@@ -170,7 +148,7 @@
#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
/* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION 11
/*****************/
/* CDU CONSTANTS */
@@ -188,6 +166,7 @@
#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+#define CDU_CONTEXT_VALIDATION_DEFAULT_CFG (0x3d)
/*****************/
/* DQ CONSTANTS */
@@ -328,6 +307,9 @@
/* PWM address mapping */
#define DQ_PWM_OFFSET_DPM_BASE 0x0
#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM32_24ICID_BASE 0x28
+#define DQ_PWM_OFFSET_UCM32_24ICID_BASE 0x30
+#define DQ_PWM_OFFSET_TCM32_24ICID_BASE 0x38
#define DQ_PWM_OFFSET_XCM16_BASE 0x40
#define DQ_PWM_OFFSET_XCM32_BASE 0x44
#define DQ_PWM_OFFSET_UCM16_BASE 0x48
@@ -351,6 +333,13 @@
#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
(DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
+#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \
+ (DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \
+ (DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4)
+#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD \
+ (DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1)
+
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -386,6 +375,7 @@
/* Number of global Vport/QCN rate limiters */
#define MAX_QM_GLOBAL_RLS 256
+#define COMMON_MAX_QM_GLOBAL_RLS MAX_QM_GLOBAL_RLS
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
@@ -405,7 +395,7 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB_E4 12
+#define PIS_PER_SB 12
#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
@@ -726,9 +716,16 @@ enum mf_mode {
MAX_MF_MODE
};
+/* Per protocol packet duplication enable bit vector. If set, duplicate
+ * offloaded traffic to LL2 debug queueu.
+ */
+struct offload_pkt_dup_enable {
+ __le16 enable_vector;
+};
+
/* Per-protocol connection types */
enum protocol_type {
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
PROTOCOLID_FCOE,
PROTOCOLID_ROCE,
PROTOCOLID_CORE,
@@ -743,6 +740,12 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+/* Pstorm packet duplication config */
+struct pstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved[3];
+};
+
struct regpair {
__le32 lo;
__le32 hi;
@@ -754,10 +757,24 @@ struct rdma_eqe_destroy_qp {
u8 reserved[4];
};
+/* RoCE Suspend Event Data */
+struct rdma_eqe_suspend_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
/* RDMA Event Data Union */
union rdma_eqe_data {
struct regpair async_handle;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+ struct rdma_eqe_suspend_qp rdma_suspend_qp_data;
+};
+
+/* Tstorm packet duplication config */
+struct tstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved;
+ __le32 cid;
};
struct tstorm_queue_zone {
@@ -917,6 +934,15 @@ struct db_legacy_addr {
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
+/* Structure for doorbell address, in legacy mode, without DEMS */
+struct db_legacy_wo_dems_addr {
+ __le32 addr;
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_MASK 0x3FFFFFFF
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_SHIFT 2
+};
+
/* Structure for doorbell address, in PWM mode */
struct db_pwm_addr {
__le32 addr;
@@ -933,6 +959,31 @@ struct db_pwm_addr {
};
/* Parameters to RDMA firmware, passed in EDPM doorbell */
+struct db_rdma_24b_icid_dpm_params {
+ __le32 params;
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT 16
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK 0x7
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_SHIFT 24
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT 27
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+};
+
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
struct db_rdma_dpm_params {
__le32 params;
#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
@@ -1246,21 +1297,41 @@ struct rdif_task_context {
__le32 reserved2;
};
+/* Searcher Table struct */
+struct src_entry_header {
+ __le32 flags;
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_MASK 0x1
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_SHIFT 0
+#define SRC_ENTRY_HEADER_EMPTY_MASK 0x1
+#define SRC_ENTRY_HEADER_EMPTY_SHIFT 1
+#define SRC_ENTRY_HEADER_RESERVED_MASK 0x3FFFFFFF
+#define SRC_ENTRY_HEADER_RESERVED_SHIFT 2
+ __le32 magic_number;
+ struct regpair next_ptr;
+};
+
+/* Enumeration for address type */
+enum src_header_next_ptr_type_enum {
+ e_physical_addr,
+ e_logical_addr,
+ MAX_SRC_HEADER_NEXT_PTR_TYPE_ENUM
+};
+
/* Status block structure */
-struct status_block_e4 {
- __le16 pi_array[PIS_PER_SB_E4];
+struct status_block {
+ __le16 pi_array[PIS_PER_SB];
__le32 sb_num;
-#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
/* Tdif context */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 95f5fd615852..c84e08bc6802 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ETH_COMMON__
@@ -93,6 +67,7 @@
/* Ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
#define ETH_RSS_KEY_SIZE_REGS 10
#define ETH_RSS_ENGINE_NUM_K2 207
#define ETH_RSS_ENGINE_NUM_BB 127
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 98cfc195abe2..7ba0abc867f1 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -1,6 +1,7 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __FCOE_COMMON__
@@ -149,49 +150,49 @@ struct ystorm_fcoe_task_st_ctx {
u8 reserved2[8];
};
-struct e4_ystorm_fcoe_task_ag_ctx {
+struct ystorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -205,73 +206,73 @@ struct e4_ystorm_fcoe_task_ag_ctx {
__le32 reg2;
};
-struct e4_tstorm_fcoe_task_ag_ctx {
+struct tstorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
u8 flags1;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
u8 flags3;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 cleanup_state;
__le16 last_sent_tid;
__le32 rec_rr_tov_exp_timeout;
@@ -351,49 +352,49 @@ struct tstorm_fcoe_task_st_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
};
-struct e4_mstorm_fcoe_task_ag_ctx {
+struct mstorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 cleanup_state;
__le32 received_bytes;
u8 byte3;
@@ -439,56 +440,56 @@ struct mstorm_fcoe_task_st_ctx {
struct scsi_cached_sges data_desc;
};
-struct e4_ustorm_fcoe_task_ag_ctx {
+struct ustorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 global_cq_num;
@@ -498,18 +499,18 @@ struct e4_ustorm_fcoe_task_ag_ctx {
};
/* FCoE task context */
-struct e4_fcoe_task_context {
+struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
- struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
- struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+ struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+ struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
struct tstorm_fcoe_task_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_task_st_ctx mstorm_st_context;
- struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+ struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
struct rdif_task_context rdif_context;
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 2f0a771a9176..1a60285a01e3 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ISCSI_COMMON__
@@ -740,49 +714,49 @@ struct ystorm_iscsi_task_st_ctx {
union iscsi_task_hdr pdu_hdr;
};
-struct e4_ystorm_iscsi_task_ag_ctx {
+struct ystorm_iscsi_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 TTT;
u8 byte3;
@@ -790,49 +764,49 @@ struct e4_ystorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_mstorm_iscsi_task_ag_ctx {
+struct mstorm_iscsi_task_ag_ctx {
u8 cdu_validation;
u8 byte1;
__le16 task_cid;
u8 flags0;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
u8 flags1;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -840,56 +814,56 @@ struct e4_mstorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_ustorm_iscsi_task_ag_ctx {
+struct ustorm_iscsi_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
u8 flags1;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 rcv_cont_len;
@@ -978,14 +952,14 @@ struct ustorm_iscsi_task_st_ctx {
};
/* iscsi task context */
-struct e4_iscsi_task_context {
+struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
- struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
struct regpair mstorm_ag_padding[2];
- struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
struct mstorm_iscsi_task_st_ctx mstorm_st_context;
struct ustorm_iscsi_task_st_ctx ustorm_st_context;
struct rdif_task_context rdif_context;
@@ -1457,73 +1431,73 @@ struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_tcp_pkt_cnt;
};
-struct e4_tstorm_iscsi_task_ag_ctx {
+struct tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h
index c6cfd39cd910..14f9e4c0ddd9 100644
--- a/include/linux/qed/iwarp_common.h
+++ b/include/linux/qed/iwarp_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __IWARP_COMMON__
diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h
new file mode 100644
index 000000000000..cc7c7481a0e0
--- /dev/null
+++ b/include/linux/qed/nvmetcp_common.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef __NVMETCP_COMMON__
+#define __NVMETCP_COMMON__
+
+#include "tcp_common.h"
+#include <linux/nvme-tcp.h>
+
+#define NVMETCP_SLOW_PATH_LAYER_CODE (6)
+#define NVMETCP_WQE_NUM_SGES_SLOWIO (0xf)
+
+/* NVMeTCP firmware function init parameters */
+struct nvmetcp_spe_func_init {
+ __le16 half_way_close_timeout;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 ll2_rx_queue_id;
+ u8 flags;
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_SHIFT 1
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_MASK 0x3F
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_SHIFT 2
+ u8 debug_flags;
+ __le16 reserved1;
+ u8 params;
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_SHIFT 4
+ u8 reserved2[5];
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+};
+
+/* NVMeTCP init params passed by driver to FW in NVMeTCP init ramrod. */
+struct nvmetcp_init_ramrod_params {
+ struct nvmetcp_spe_func_init nvmetcp_init_spe;
+ struct tcp_init_params tcp_init;
+};
+
+/* NVMeTCP Ramrod Command IDs */
+enum nvmetcp_ramrod_cmd_id {
+ NVMETCP_RAMROD_CMD_ID_UNUSED = 0,
+ NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1,
+ NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2,
+ NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
+ NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4,
+ NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5,
+ NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ MAX_NVMETCP_RAMROD_CMD_ID
+};
+
+struct nvmetcp_glbl_queue_entry {
+ struct regpair cq_pbl_addr;
+ struct regpair reserved;
+};
+
+/* NVMeTCP conn level EQEs */
+enum nvmetcp_eqe_opcode {
+ NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */
+ NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */
+ NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */
+ NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */
+ NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */
+ NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */
+ NVMETCP_EVENT_TYPE_RESERVED0,
+ NVMETCP_EVENT_TYPE_RESERVED1,
+ NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */
+ NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */
+ MAX_NVMETCP_EQE_OPCODE
+};
+
+struct nvmetcp_conn_offload_section {
+ struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */
+ __le16 cccid_max_range; /* CCCID max value - used for validation */
+ __le16 reserved[3];
+};
+
+/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */
+struct nvmetcp_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4
+ u8 default_cq;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le32 initial_ack;
+
+ struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */
+};
+
+/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */
+struct nvmetcp_spe_conn_offload {
+ __le16 reserved;
+ __le16 conn_id;
+ __le32 fw_cid;
+ struct nvmetcp_conn_offload_params nvmetcp;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */
+struct nvmetcp_conn_update_ramrod_params {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 flags;
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7
+ u8 reserved3[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 reserved4[5];
+};
+
+/* NVMeTCP connection termination request */
+struct nvmetcp_spe_conn_termination {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 abortive;
+ u8 reserved2[7];
+ struct regpair reserved3;
+ struct regpair reserved4;
+};
+
+struct nvmetcp_dif_flags {
+ u8 flags;
+};
+
+enum nvmetcp_wqe_type {
+ NVMETCP_WQE_TYPE_NORMAL,
+ NVMETCP_WQE_TYPE_TASK_CLEANUP,
+ NVMETCP_WQE_TYPE_MIDDLE_PATH,
+ NVMETCP_WQE_TYPE_IC,
+ MAX_NVMETCP_WQE_TYPE
+};
+
+struct nvmetcp_wqe {
+ __le16 task_id;
+ u8 flags;
+#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */
+#define NVMETCP_WQE_WQE_TYPE_SHIFT 0
+#define NVMETCP_WQE_NUM_SGES_MASK 0xF
+#define NVMETCP_WQE_NUM_SGES_SHIFT 3
+#define NVMETCP_WQE_RESPONSE_MASK 0x1
+#define NVMETCP_WQE_RESPONSE_SHIFT 7
+ struct nvmetcp_dif_flags prot_flags;
+ __le32 contlen_cdbsize;
+#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF
+#define NVMETCP_WQE_CONT_LEN_SHIFT 0
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24
+};
+
+struct nvmetcp_host_cccid_itid_entry {
+ __le16 itid;
+};
+
+struct nvmetcp_connect_done_results {
+ __le16 icid;
+ __le16 conn_id;
+ struct tcp_ulp_connect_done_params params;
+};
+
+struct nvmetcp_eqe_data {
+ __le16 icid;
+ __le16 conn_id;
+ __le16 reserved;
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define NVMETCP_EQE_DATA_RESERVED0_MASK 0x1
+#define NVMETCP_EQE_DATA_RESERVED0_SHIFT 7
+};
+
+enum nvmetcp_task_type {
+ NVMETCP_TASK_TYPE_HOST_WRITE,
+ NVMETCP_TASK_TYPE_HOST_READ,
+ NVMETCP_TASK_TYPE_INIT_CONN_REQUEST,
+ NVMETCP_TASK_TYPE_RESERVED0,
+ NVMETCP_TASK_TYPE_CLEANUP,
+ NVMETCP_TASK_TYPE_HOST_READ_NO_CQE,
+ MAX_NVMETCP_TASK_TYPE
+};
+
+struct nvmetcp_db_data {
+ u8 params;
+#define NVMETCP_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */
+#define NVMETCP_DB_DATA_DEST_SHIFT 0
+#define NVMETCP_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
+#define NVMETCP_DB_DATA_AGG_CMD_SHIFT 2
+#define NVMETCP_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
+#define NVMETCP_DB_DATA_BYPASS_EN_SHIFT 4
+#define NVMETCP_DB_DATA_RESERVED_MASK 0x1
+#define NVMETCP_DB_DATA_RESERVED_SHIFT 5
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags; /* bit for every DQ counter flags in CM context that DQ can increment */
+ __le16 sq_prod;
+};
+
+struct nvmetcp_fw_nvmf_cqe {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_icresp_mdata {
+ u8 digest;
+ u8 cpda;
+ __le16 pfv;
+ __le32 maxdata;
+ __le16 rsvd[4];
+};
+
+union nvmetcp_fw_cqe_data {
+ struct nvmetcp_fw_nvmf_cqe nvme_cqe;
+ struct nvmetcp_icresp_mdata icresp_mdata;
+};
+
+struct nvmetcp_fw_cqe {
+ __le16 conn_id;
+ u8 cqe_type;
+ u8 cqe_error_status_bits;
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+ __le16 itid;
+ u8 task_type;
+ u8 fw_dbg_field;
+ u8 caused_conn_err;
+ u8 reserved0[3];
+ __le32 reserved1;
+ union nvmetcp_fw_cqe_data cqe_data;
+ struct regpair task_opaque;
+ __le32 reserved[6];
+};
+
+enum nvmetcp_fw_cqes_type {
+ NVMETCP_FW_CQE_TYPE_NORMAL = 1,
+ NVMETCP_FW_CQE_TYPE_RESERVED0,
+ NVMETCP_FW_CQE_TYPE_RESERVED1,
+ NVMETCP_FW_CQE_TYPE_CLEANUP,
+ NVMETCP_FW_CQE_TYPE_DUMMY,
+ MAX_NVMETCP_FW_CQES_TYPE
+};
+
+struct ystorm_nvmetcp_task_state {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 resrved0;
+ __le32 buffer_offset;
+ __le16 cccid;
+ struct nvmetcp_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_SHIFT 3
+};
+
+struct ystorm_nvmetcp_task_rxmit_opt {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_task_hdr {
+ __le32 reg[18];
+};
+
+struct nvmetcp_task_hdr_aligned {
+ struct nvmetcp_task_hdr task_hdr;
+ __le32 reserved[2]; /* HSI_COMMENT: Align to QREG */
+};
+
+struct e5_tdif_task_context {
+ __le32 reserved[16];
+};
+
+struct e5_rdif_task_context {
+ __le32 reserved[12];
+};
+
+struct ystorm_nvmetcp_task_st_ctx {
+ struct ystorm_nvmetcp_task_state state;
+ struct ystorm_nvmetcp_task_rxmit_opt rxmit_opt;
+ struct nvmetcp_task_hdr_aligned pdu_hdr;
+};
+
+struct mstorm_nvmetcp_task_st_ctx {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
+ u8 task_type;
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 dif_task_icid;
+ struct regpair reserved0;
+ __le32 expected_itt;
+ __le32 reserved1;
+};
+
+struct ustorm_nvmetcp_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair reserved0;
+ __le32 reg1_map;
+#define REG1_NUM_SGES_MASK 0xF
+#define REG1_NUM_SGES_SHIFT 0
+#define REG1_RESERVED1_MASK 0xFFFFFFF
+#define REG1_RESERVED1_SHIFT 4
+ u8 flags2;
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_SHIFT 1
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 reserved3;
+ __le16 tqe_opaque[2];
+ __le32 reserved5;
+ __le32 nvme_tcp_opaque_lo;
+ __le32 nvme_tcp_opaque_hi;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_SHIFT 3
+ u8 flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+ u8 cq_rss_number;
+};
+
+struct e5_ystorm_nvmetcp_task_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state_and_core_id */;
+ __le16 word0 /* icid */;
+ u8 flags0;
+ u8 flags1;
+ u8 flags2;
+ u8 flags3;
+ __le32 TTT;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 reserved7;
+};
+
+struct e5_mstorm_nvmetcp_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_SHIFT 2
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+ u8 flags3;
+ __le32 reg0;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 reserved7;
+};
+
+struct e5_ustorm_nvmetcp_task_ag_ctx {
+ u8 reserved;
+ u8 state_and_core_id;
+ __le16 icid;
+ u8 flags0;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+ u8 flags4;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ u8 byte2;
+ u8 byte3;
+ u8 reserved8;
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ __le16 word1;
+ __le16 next_tid;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+struct e5_nvmetcp_task_context {
+ struct ystorm_nvmetcp_task_st_ctx ystorm_st_context;
+ struct e5_ystorm_nvmetcp_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct e5_tdif_task_context tdif_context;
+ struct e5_mstorm_nvmetcp_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct e5_ustorm_nvmetcp_task_ag_ctx ustorm_ag_context;
+ struct regpair ustorm_ag_padding[2];
+ struct mstorm_nvmetcp_task_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_nvmetcp_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct e5_rdif_task_context rdif_context;
+};
+
+#endif /* __NVMETCP_COMMON__*/
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 2dd0a9ed5b36..a84063492c71 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_CHAIN_H
@@ -37,6 +11,7 @@
#include <asm/byteorder.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
@@ -44,7 +19,7 @@ enum qed_chain_mode {
/* Each Page contains a next pointer at its end */
QED_CHAIN_MODE_NEXT_PTR,
- /* Chain is a single page (next ptr) is unrequired */
+ /* Chain is a single page (next ptr) is not required */
QED_CHAIN_MODE_SINGLE,
/* Page pointers are located in a side list */
@@ -52,9 +27,9 @@ enum qed_chain_mode {
};
enum qed_chain_use_mode {
- QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
- QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
- QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
+ QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
+ QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
};
enum qed_chain_cnt_type {
@@ -66,196 +41,242 @@ enum qed_chain_cnt_type {
};
struct qed_chain_next {
- struct regpair next_phys;
- void *next_virt;
+ struct regpair next_phys;
+ void *next_virt;
};
struct qed_chain_pbl_u16 {
- u16 prod_page_idx;
- u16 cons_page_idx;
+ u16 prod_page_idx;
+ u16 cons_page_idx;
};
struct qed_chain_pbl_u32 {
- u32 prod_page_idx;
- u32 cons_page_idx;
-};
-
-struct qed_chain_ext_pbl {
- dma_addr_t p_pbl_phys;
- void *p_pbl_virt;
+ u32 prod_page_idx;
+ u32 cons_page_idx;
};
struct qed_chain_u16 {
- /* Cyclic index of next element to produce/consme */
- u16 prod_idx;
- u16 cons_idx;
+ /* Cyclic index of next element to produce/consume */
+ u16 prod_idx;
+ u16 cons_idx;
};
struct qed_chain_u32 {
- /* Cyclic index of next element to produce/consme */
- u32 prod_idx;
- u32 cons_idx;
+ /* Cyclic index of next element to produce/consume */
+ u32 prod_idx;
+ u32 cons_idx;
+};
+
+struct addr_tbl_entry {
+ void *virt_addr;
+ dma_addr_t dma_map;
};
struct qed_chain {
- /* fastpath portion of the chain - required for commands such
+ /* Fastpath portion of the chain - required for commands such
* as produce / consume.
*/
+
/* Point to next element to produce/consume */
- void *p_prod_elem;
- void *p_cons_elem;
+ void *p_prod_elem;
+ void *p_cons_elem;
/* Fastpath portions of the PBL [if exists] */
+
struct {
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
+ /* Table for keeping the virtual and physical addresses of the
+ * chain pages, respectively to the physical addresses
+ * in the pbl table.
*/
- void **pp_virt_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl;
union {
- struct qed_chain_pbl_u16 u16;
- struct qed_chain_pbl_u32 u32;
- } c;
- } pbl;
+ struct qed_chain_pbl_u16 u16;
+ struct qed_chain_pbl_u32 u32;
+ } c;
+ } pbl;
union {
- struct qed_chain_u16 chain16;
- struct qed_chain_u32 chain32;
- } u;
+ struct qed_chain_u16 chain16;
+ struct qed_chain_u32 chain32;
+ } u;
/* Capacity counts only usable elements */
- u32 capacity;
- u32 page_cnt;
+ u32 capacity;
+ u32 page_cnt;
- enum qed_chain_mode mode;
+ enum qed_chain_mode mode;
/* Elements information for fast calculations */
- u16 elem_per_page;
- u16 elem_per_page_mask;
- u16 elem_size;
- u16 next_page_mask;
- u16 usable_per_page;
- u8 elem_unusable;
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_size;
+ u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
- u8 cnt_type;
+ enum qed_chain_cnt_type cnt_type;
/* Slowpath of the chain - required for initialization and destruction,
* but isn't involved in regular functionality.
*/
+ u32 page_size;
+
/* Base address of a pre-allocated buffer for pbl */
struct {
- dma_addr_t p_phys_table;
- void *p_virt_table;
- } pbl_sp;
+ __le64 *table_virt;
+ dma_addr_t table_phys;
+ size_t table_size;
+ } pbl_sp;
/* Address of first page of the chain - the address is required
- * for fastpath operation [consume/produce] but only for the the SINGLE
+ * for fastpath operation [consume/produce] but only for the SINGLE
* flavour which isn't considered fastpath [== SPQ].
*/
- void *p_virt_addr;
- dma_addr_t p_phys_addr;
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
/* Total number of elements [for entire chain] */
- u32 size;
+ u32 size;
- u8 intended_use;
+ enum qed_chain_use_mode intended_use;
- bool b_external_pbl;
+ bool b_external_pbl;
};
-#define QED_CHAIN_PBL_ENTRY_SIZE (8)
-#define QED_CHAIN_PAGE_SIZE (0x1000)
-#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
+struct qed_chain_init_params {
+ enum qed_chain_mode mode;
+ enum qed_chain_use_mode intended_use;
+ enum qed_chain_cnt_type cnt_type;
-#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
- (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
- (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
- (elem_size))) : 0)
+ u32 page_size;
+ u32 num_elems;
+ size_t elem_size;
+
+ void *ext_pbl_virt;
+ dma_addr_t ext_pbl_phys;
+};
-#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
- ((u32)(ELEMS_PER_PAGE(elem_size) - \
- UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+#define QED_CHAIN_PAGE_SIZE SZ_4K
-#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
- DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+#define ELEMS_PER_PAGE(elem_size, page_size) \
+ ((page_size) / (elem_size))
-#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
-#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
+ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \
+ 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \
+ ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \
+ UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode))))
+
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \
+ DIV_ROUND_UP((elem_cnt), \
+ USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode)))
+
+#define is_chain_u16(p) \
+ ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p) \
+ ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
/* Accessors */
-static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+
+static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain)
{
- return p_chain->u.chain16.prod_idx;
+ return chain->u.chain16.prod_idx;
}
-static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain)
{
- return p_chain->u.chain16.cons_idx;
+ return chain->u.chain16.cons_idx;
}
-static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain)
{
- return p_chain->u.chain32.cons_idx;
+ return chain->u.chain32.prod_idx;
}
-static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain)
{
+ return chain->u.chain32.cons_idx;
+}
+
+static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain)
+{
+ u32 prod = qed_chain_get_prod_idx(chain);
+ u32 cons = qed_chain_get_cons_idx(chain);
+ u16 elem_per_page = chain->elem_per_page;
u16 used;
- used = (u16) (((u32)0x10000 +
- (u32)p_chain->u.chain16.prod_idx) -
- (u32)p_chain->u.chain16.cons_idx);
- if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+ if (prod < cons)
+ prod += (u32)U16_MAX + 1;
- return (u16)(p_chain->capacity - used);
+ used = (u16)(prod - cons);
+ if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= (u16)(prod / elem_per_page - cons / elem_per_page);
+
+ return used;
}
-static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain)
{
+ return (u16)(chain->capacity - qed_chain_get_elem_used(chain));
+}
+
+static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain)
+{
+ u64 prod = qed_chain_get_prod_idx_u32(chain);
+ u64 cons = qed_chain_get_cons_idx_u32(chain);
+ u16 elem_per_page = chain->elem_per_page;
u32 used;
- used = (u32) (((u64)0x100000000ULL +
- (u64)p_chain->u.chain32.prod_idx) -
- (u64)p_chain->u.chain32.cons_idx);
- if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+ if (prod < cons)
+ prod += (u64)U32_MAX + 1;
- return p_chain->capacity - used;
+ used = (u32)(prod - cons);
+ if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= (u32)(prod / elem_per_page - cons / elem_per_page);
+
+ return used;
}
-static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain)
{
- return p_chain->usable_per_page;
+ return chain->capacity - qed_chain_get_elem_used_u32(chain);
}
-static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain)
{
- return p_chain->elem_unusable;
+ return chain->usable_per_page;
}
-static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
+static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain)
{
- return p_chain->page_cnt;
+ return chain->elem_unusable;
}
-static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain)
{
- return p_chain->pbl_sp.p_phys_table;
+ return chain->page_cnt;
+}
+
+static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
+{
+ return chain->pbl_sp.table_phys;
}
/**
- * @brief qed_chain_advance_page -
+ * qed_chain_advance_page(): Advance the next element across pages for a
+ * linked chain.
*
- * Advance the next element accros pages for a linked chain
+ * @p_chain: P_chain.
+ * @p_next_elem: P_next_elem.
+ * @idx_to_inc: Idx_to_inc.
+ * @page_to_inc: page_to_inc.
*
- * @param p_chain
- * @param p_next_elem
- * @param idx_to_inc
- * @param page_to_inc
+ * Return: Void.
*/
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
@@ -287,7 +308,7 @@ qed_chain_advance_page(struct qed_chain *p_chain,
*(u32 *)page_to_inc = 0;
page_index = *(u32 *)page_to_inc;
}
- *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+ *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
}
}
@@ -316,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain,
} while (0)
/**
- * @brief qed_chain_return_produced -
+ * qed_chain_return_produced(): A chain in which the driver "Produces"
+ * elements should use this API
+ * to indicate previous produced elements
+ * are now consumed.
*
- * A chain in which the driver "Produces" elements should use this API
- * to indicate previous produced elements are now consumed.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
{
@@ -333,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_produce -
+ * qed_chain_produce(): A chain in which the driver "Produces"
+ * elements should use this to get a pointer to
+ * the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for
+ * new element.
*
- * A chain in which the driver "Produces" elements should use this to get
- * a pointer to the next element which can be "Produced". It's driver
- * responsibility to validate that the chain has room for new element.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to next element
+ * Return: void*, a pointer to next element.
*/
static inline void *qed_chain_produce(struct qed_chain *p_chain)
{
@@ -375,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_capacity -
- *
- * Get the maximum number of BDs in chain
+ * qed_chain_get_capacity(): Get the maximum number of BDs in chain
*
- * @param p_chain
- * @param num
+ * @p_chain: Chain.
*
- * @return number of unusable BDs
+ * Return: number of unusable BDs.
*/
static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
{
@@ -390,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_recycle_consumed -
+ * qed_chain_recycle_consumed(): Returns an element which was
+ * previously consumed;
+ * Increments producers so they could
+ * be written to FW.
*
- * Returns an element which was previously consumed;
- * Increments producers so they could be written to FW.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
{
@@ -407,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_consume -
+ * qed_chain_consume(): A Chain in which the driver utilizes data written
+ * by a different source (i.e., FW) should use this to
+ * access passed buffers.
*
- * A Chain in which the driver utilizes data written by a different source
- * (i.e., FW) should use this to access passed buffers.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to the next buffer written
+ * Return: void*, a pointer to the next buffer written.
*/
static inline void *qed_chain_consume(struct qed_chain *p_chain)
{
@@ -448,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_reset - Resets the chain to its start state
+ * qed_chain_reset(): Resets the chain to its start state.
+ *
+ * @p_chain: pointer to a previously allocated chain.
*
- * @param p_chain pointer to a previously allocted chain
+ * Return Void.
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
@@ -499,125 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_init - Initalizes a basic chain struct
- *
- * @param p_chain
- * @param p_virt_addr
- * @param p_phys_addr physical address of allocated buffer's beginning
- * @param page_cnt number of pages in the allocated buffer
- * @param elem_size size of each element in the chain
- * @param intended_use
- * @param mode
- */
-static inline void qed_chain_init_params(struct qed_chain *p_chain,
- u32 page_cnt,
- u8 elem_size,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type)
-{
- /* chain fixed parameters */
- p_chain->p_virt_addr = NULL;
- p_chain->p_phys_addr = 0;
- p_chain->elem_size = elem_size;
- p_chain->intended_use = (u8)intended_use;
- p_chain->mode = mode;
- p_chain->cnt_type = (u8)cnt_type;
-
- p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
- p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
- p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->next_page_mask = (p_chain->usable_per_page &
- p_chain->elem_per_page_mask);
-
- p_chain->page_cnt = page_cnt;
- p_chain->capacity = p_chain->usable_per_page * page_cnt;
- p_chain->size = p_chain->elem_per_page * page_cnt;
-
- p_chain->pbl_sp.p_phys_table = 0;
- p_chain->pbl_sp.p_virt_table = NULL;
- p_chain->pbl.pp_virt_addr_tbl = NULL;
-}
-
-/**
- * @brief qed_chain_init_mem -
- *
- * Initalizes a basic chain struct with its chain buffers
- *
- * @param p_chain
- * @param p_virt_addr virtual address of allocated buffer's beginning
- * @param p_phys_addr physical address of allocated buffer's beginning
- *
- */
-static inline void qed_chain_init_mem(struct qed_chain *p_chain,
- void *p_virt_addr, dma_addr_t p_phys_addr)
-{
- p_chain->p_virt_addr = p_virt_addr;
- p_chain->p_phys_addr = p_phys_addr;
-}
-
-/**
- * @brief qed_chain_init_pbl_mem -
+ * qed_chain_get_last_elem(): Returns a pointer to the last element of the
+ * chain.
*
- * Initalizes a basic chain struct with its pbl buffers
- *
- * @param p_chain
- * @param p_virt_pbl pointer to a pre allocated side table which will hold
- * virtual page addresses.
- * @param p_phys_pbl pointer to a pre-allocated side table which will hold
- * physical page addresses.
- * @param pp_virt_addr_tbl
- * pointer to a pre-allocated side table which will hold
- * the virtual addresses of the chain pages.
+ * @p_chain: Chain.
*
- */
-static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
- void *p_virt_pbl,
- dma_addr_t p_phys_pbl,
- void **pp_virt_addr_tbl)
-{
- p_chain->pbl_sp.p_phys_table = p_phys_pbl;
- p_chain->pbl_sp.p_virt_table = p_virt_pbl;
- p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
-}
-
-/**
- * @brief qed_chain_init_next_ptr_elem -
- *
- * Initalizes a next pointer element
- *
- * @param p_chain
- * @param p_virt_curr virtual address of a chain page of which the next
- * pointer element is initialized
- * @param p_virt_next virtual address of the next chain page
- * @param p_phys_next physical address of the next chain page
- *
- */
-static inline void
-qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
- void *p_virt_curr,
- void *p_virt_next, dma_addr_t p_phys_next)
-{
- struct qed_chain_next *p_next;
- u32 size;
-
- size = p_chain->elem_size * p_chain->usable_per_page;
- p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
-
- DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
-
- p_next->next_virt = p_virt_next;
-}
-
-/**
- * @brief qed_chain_get_last_elem -
- *
- * Returns a pointer to the last element of the chain
- *
- * @param p_chain
- *
- * @return void*
+ * Return: void*.
*/
static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
{
@@ -644,7 +554,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
break;
case QED_CHAIN_MODE_PBL:
last_page_idx = p_chain->page_cnt - 1;
- p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
break;
}
/* p_virt_addr points at this stage to the last page of the chain */
@@ -655,10 +565,13 @@ out:
}
/**
- * @brief qed_chain_set_prod - sets the prod to the given value
+ * qed_chain_set_prod(): sets the prod to the given value.
*
- * @param prod_idx
- * @param p_prod_elem
+ * @p_chain: Chain.
+ * @prod_idx: Prod Idx.
+ * @p_prod_elem: Prod elem.
+ *
+ * Return Void.
*/
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
@@ -702,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain,
}
/**
- * @brief qed_chain_pbl_zero_mem - set chain memory to 0
+ * qed_chain_pbl_zero_mem(): set chain memory to 0.
+ *
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
{
@@ -716,8 +631,8 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
page_cnt = qed_chain_get_page_cnt(p_chain);
for (i = 0; i < page_cnt; i++)
- memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
- QED_CHAIN_PAGE_SIZE);
+ memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
+ p_chain->page_size);
}
#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index a1310482c4ed..e1bf3219b4e6 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_ETH_IF_H
@@ -171,12 +145,6 @@ struct qed_filter_mcast_params {
unsigned char mac[64][ETH_ALEN];
};
-union qed_filter_type_params {
- enum qed_filter_rx_mode_type accept_flags;
- struct qed_filter_ucast_params ucast;
- struct qed_filter_mcast_params mcast;
-};
-
enum qed_filter_type {
QED_FILTER_TYPE_UCAST,
QED_FILTER_TYPE_MCAST,
@@ -184,11 +152,6 @@ enum qed_filter_type {
QED_MAX_FILTER_TYPES,
};
-struct qed_filter_params {
- enum qed_filter_type type;
- union qed_filter_type_params filter;
-};
-
struct qed_tunn_params {
u16 vxlan_port;
u8 update_vxlan_port;
@@ -340,8 +303,14 @@ struct qed_eth_ops {
int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
- int (*filter_config)(struct qed_dev *cdev,
- struct qed_filter_params *params);
+ int (*filter_config_rx_mode)(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type);
+
+ int (*filter_config_ucast)(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params);
+
+ int (*filter_config_mcast)(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params);
int (*fastpath_stop)(struct qed_dev *cdev);
@@ -362,7 +331,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev,
enum qed_filter_config_mode mode);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
- int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*req_bulletin_update_mac)(struct qed_dev *cdev, const u8 *mac);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
index 46082480a2c3..90e3045b2dcb 100644
--- a/include/linux/qed/qed_fcoe_if.h
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -1,4 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (c) 2019-2020 Marvell International Ltd. */
+
#ifndef _QED_FCOE_IF_H
#define _QED_FCOE_IF_H
#include <linux/types.h>
@@ -74,7 +76,7 @@ void qed_fcoe_set_pf_params(struct qed_dev *cdev,
* @fill_dev_info: fills FCoE specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register FCoE operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -94,7 +96,7 @@ void qed_fcoe_set_pf_params(struct qed_dev *cdev,
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * return 0 on sucesss, otherwise error value.
+ * return 0 on success, otherwise error value.
* @release_conn: release a previously acquired fcoe connection
* @param cdev
* @param handle - the connection handle.
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 8f29e0d8a7b3..6dc4943d8aec 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -1,38 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_IF_H
#define _QED_IF_H
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
@@ -47,6 +22,10 @@
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <net/devlink.h>
+
+#define QED_TX_SWS_TIMER_DFLT 500
+#define QED_TWO_MSL_TIMER_DFLT 4000
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
@@ -524,7 +503,7 @@ struct qed_fcoe_pf_params {
u8 bdq_pbl_num_entries[2];
};
-/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+/* Most of the parameters below are described in the FW iSCSI / TCP HSI */
struct qed_iscsi_pf_params {
u64 glbl_q_params_addr;
u64 bdq_pbl_base_addr[3];
@@ -566,6 +545,22 @@ struct qed_iscsi_pf_params {
u8 bdq_pbl_num_entries[3];
};
+struct qed_nvmetcp_pf_params {
+ u64 glbl_q_params_addr;
+ u16 cq_num_entries;
+ u16 num_cons;
+ u16 num_tasks;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u16 min_rto;
+};
+
struct qed_rdma_pf_params {
/* Supplied to QED during resource allocation (may affect the ILT and
* the doorbell BAR).
@@ -584,6 +579,7 @@ struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
struct qed_fcoe_pf_params fcoe_pf_params;
struct qed_iscsi_pf_params iscsi_pf_params;
+ struct qed_nvmetcp_pf_params nvmetcp_pf_params;
struct qed_rdma_pf_params rdma_pf_params;
};
@@ -595,7 +591,7 @@ enum qed_int_mode {
};
struct qed_sb_info {
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
@@ -607,6 +603,16 @@ struct qed_sb_info {
struct qed_dev *cdev;
};
+enum qed_hw_err_type {
+ QED_HW_ERR_FAN_FAIL,
+ QED_HW_ERR_MFW_RESP_FAIL,
+ QED_HW_ERR_HW_ATTN,
+ QED_HW_ERR_DMAE_FAIL,
+ QED_HW_ERR_RAMROD_FAIL,
+ QED_HW_ERR_FW_ASSERT,
+ QED_HW_ERR_LAST,
+};
+
enum qed_dev_type {
QED_DEV_TYPE_BB,
QED_DEV_TYPE_AH,
@@ -638,6 +644,7 @@ struct qed_dev_info {
#define QED_MFW_VERSION_3_OFFSET 24
u32 flash_size;
+ bool b_arfs_capable;
bool b_inter_pf_switch;
bool tx_switching;
bool rdma_supported;
@@ -645,6 +652,7 @@ struct qed_dev_info {
bool wol_support;
bool smart_an;
+ bool esl;
/* MBI version */
u32 mbi_version;
@@ -674,90 +682,76 @@ enum qed_sb_type {
enum qed_protocol {
QED_PROTOCOL_ETH,
QED_PROTOCOL_ISCSI,
+ QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI,
QED_PROTOCOL_FCOE,
};
-enum qed_link_mode_bits {
- QED_LM_FIBRE_BIT = BIT(0),
- QED_LM_Autoneg_BIT = BIT(1),
- QED_LM_Asym_Pause_BIT = BIT(2),
- QED_LM_Pause_BIT = BIT(3),
- QED_LM_1000baseT_Full_BIT = BIT(4),
- QED_LM_10000baseT_Full_BIT = BIT(5),
- QED_LM_10000baseKR_Full_BIT = BIT(6),
- QED_LM_20000baseKR2_Full_BIT = BIT(7),
- QED_LM_25000baseKR_Full_BIT = BIT(8),
- QED_LM_40000baseLR4_Full_BIT = BIT(9),
- QED_LM_50000baseKR2_Full_BIT = BIT(10),
- QED_LM_100000baseKR4_Full_BIT = BIT(11),
- QED_LM_TP_BIT = BIT(12),
- QED_LM_Backplane_BIT = BIT(13),
- QED_LM_1000baseKX_Full_BIT = BIT(14),
- QED_LM_10000baseKX4_Full_BIT = BIT(15),
- QED_LM_10000baseR_FEC_BIT = BIT(16),
- QED_LM_40000baseKR4_Full_BIT = BIT(17),
- QED_LM_40000baseCR4_Full_BIT = BIT(18),
- QED_LM_40000baseSR4_Full_BIT = BIT(19),
- QED_LM_25000baseCR_Full_BIT = BIT(20),
- QED_LM_25000baseSR_Full_BIT = BIT(21),
- QED_LM_50000baseCR2_Full_BIT = BIT(22),
- QED_LM_100000baseSR4_Full_BIT = BIT(23),
- QED_LM_100000baseCR4_Full_BIT = BIT(24),
- QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25),
- QED_LM_50000baseSR2_Full_BIT = BIT(26),
- QED_LM_1000baseX_Full_BIT = BIT(27),
- QED_LM_10000baseCR_Full_BIT = BIT(28),
- QED_LM_10000baseSR_Full_BIT = BIT(29),
- QED_LM_10000baseLR_Full_BIT = BIT(30),
- QED_LM_10000baseLRM_Full_BIT = BIT(31),
- QED_LM_COUNT = 32
+enum qed_fec_mode {
+ QED_FEC_MODE_NONE = BIT(0),
+ QED_FEC_MODE_FIRECODE = BIT(1),
+ QED_FEC_MODE_RS = BIT(2),
+ QED_FEC_MODE_AUTO = BIT(3),
+ QED_FEC_MODE_UNSUPPORTED = BIT(4),
};
struct qed_link_params {
- bool link_up;
-
-#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
-#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
-#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
-#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
-#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
-#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
- u32 override_flags;
- bool autoneg;
- u32 adv_speeds;
- u32 forced_speed;
-#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
-#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
-#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
- u32 pause_config;
-#define QED_LINK_LOOPBACK_NONE BIT(0)
-#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
-#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
-#define QED_LINK_LOOPBACK_EXT BIT(3)
-#define QED_LINK_LOOPBACK_MAC BIT(4)
- u32 loopback_mode;
- struct qed_link_eee_params eee;
+ bool link_up;
+
+ u32 override_flags;
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
+#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
+#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
+#define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6)
+
+ bool autoneg;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
+ u32 forced_speed;
+
+ u32 pause_config;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
+#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
+#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
+
+ u32 loopback_mode;
+#define QED_LINK_LOOPBACK_NONE BIT(0)
+#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
+#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
+#define QED_LINK_LOOPBACK_EXT BIT(3)
+#define QED_LINK_LOOPBACK_MAC BIT(4)
+#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5)
+#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6)
+#define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7)
+#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8)
+#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9)
+
+ struct qed_link_eee_params eee;
+ u32 fec;
};
struct qed_link_output {
- bool link_up;
+ bool link_up;
- /* In QED_LM_* defs */
- u32 supported_caps;
- u32 advertised_caps;
- u32 lp_caps;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
- u32 speed; /* In Mb/s */
- u8 duplex; /* In DUPLEX defs */
- u8 port; /* In PORT defs */
- bool autoneg;
- u32 pause_config;
+ u32 speed; /* In Mb/s */
+ u8 duplex; /* In DUPLEX defs */
+ u8 port; /* In PORT defs */
+ bool autoneg;
+ u32 pause_config;
/* EEE - capability & param */
- bool eee_supported;
- bool eee_active;
- u8 sup_caps;
- struct qed_link_eee_params eee;
+ bool eee_supported;
+ bool eee_active;
+ u8 sup_caps;
+ struct qed_link_eee_params eee;
+
+ u32 sup_fec;
+ u32 active_fec;
};
struct qed_probe_params {
@@ -809,59 +803,72 @@ enum qed_nvm_flash_cmd {
QED_NVM_FLASH_CMD_NVM_MAX,
};
+struct qed_devlink {
+ struct qed_dev *cdev;
+ struct devlink_health_reporter *fw_reporter;
+};
+
+struct qed_sb_info_dbg {
+ u32 igu_prod;
+ u32 igu_cons;
+ u16 pi[PIS_PER_SB];
+};
+
struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
- void (*link_update)(void *dev,
- struct qed_link_output *link);
+ void (*link_update)(void *dev, struct qed_link_output *link);
void (*schedule_recovery_handler)(void *dev);
- void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
+ void (*schedule_hw_err_handler)(void *dev,
+ enum qed_hw_err_type err_type);
+ void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
void (*get_protocol_tlv_data)(void *dev, void *data);
+ void (*bw_update)(void *dev);
};
struct qed_selftest_ops {
/**
- * @brief selftest_interrupt - Perform interrupt test
+ * selftest_interrupt(): Perform interrupt test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_interrupt)(struct qed_dev *cdev);
/**
- * @brief selftest_memory - Perform memory test
+ * selftest_memory(): Perform memory test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_memory)(struct qed_dev *cdev);
/**
- * @brief selftest_register - Perform register test
+ * selftest_register(): Perform register test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_register)(struct qed_dev *cdev);
/**
- * @brief selftest_clock - Perform clock test
+ * selftest_clock(): Perform clock test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
/**
- * @brief selftest_nvram - Perform nvram test
+ * selftest_nvram(): Perform nvram test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_nvram) (struct qed_dev *cdev);
};
@@ -872,10 +879,9 @@ struct qed_common_ops {
struct qed_dev* (*probe)(struct pci_dev *dev,
struct qed_probe_params *params);
- void (*remove)(struct qed_dev *cdev);
+ void (*remove)(struct qed_dev *cdev);
- int (*set_power_state)(struct qed_dev *cdev,
- pci_power_t state);
+ int (*set_power_state)(struct qed_dev *cdev, pci_power_t state);
void (*set_name) (struct qed_dev *cdev, char name[]);
@@ -883,166 +889,180 @@ struct qed_common_ops {
* PF params required for the call before slowpath_start is
* documented within the qed_pf_params structure definition.
*/
- void (*update_pf_params)(struct qed_dev *cdev,
- struct qed_pf_params *params);
- int (*slowpath_start)(struct qed_dev *cdev,
- struct qed_slowpath_params *params);
+ void (*update_pf_params)(struct qed_dev *cdev,
+ struct qed_pf_params *params);
+
+ int (*slowpath_start)(struct qed_dev *cdev,
+ struct qed_slowpath_params *params);
- int (*slowpath_stop)(struct qed_dev *cdev);
+ int (*slowpath_stop)(struct qed_dev *cdev);
/* Requests to use `cnt' interrupts for fastpath.
* upon success, returns number of interrupts allocated for fastpath.
*/
- int (*set_fp_int)(struct qed_dev *cdev,
- u16 cnt);
+ int (*set_fp_int)(struct qed_dev *cdev, u16 cnt);
/* Fills `info' with pointers required for utilizing interrupts */
- int (*get_fp_int)(struct qed_dev *cdev,
- struct qed_int_info *info);
-
- u32 (*sb_init)(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr,
- u16 sb_id,
- enum qed_sb_type type);
-
- u32 (*sb_release)(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- u16 sb_id,
- enum qed_sb_type type);
-
- void (*simd_handler_config)(struct qed_dev *cdev,
- void *token,
- int index,
- void (*handler)(void *));
-
- void (*simd_handler_clean)(struct qed_dev *cdev,
- int index);
- int (*dbg_grc)(struct qed_dev *cdev,
- void *buffer, u32 *num_dumped_bytes);
+ int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info);
+
+ u32 (*sb_init)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ u32 (*sb_release)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ void (*simd_handler_config)(struct qed_dev *cdev,
+ void *token,
+ int index,
+ void (*handler)(void *));
+
+ void (*simd_handler_clean)(struct qed_dev *cdev, int index);
+
+ int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int (*dbg_grc_size)(struct qed_dev *cdev);
- int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
+ int (*dbg_all_data)(struct qed_dev *cdev, void *buffer);
+
+ int (*dbg_all_data_size)(struct qed_dev *cdev);
- int (*dbg_all_data_size) (struct qed_dev *cdev);
+ int (*report_fatal_error)(struct devlink *devlink,
+ enum qed_hw_err_type err_type);
/**
- * @brief can_link_change - can the instance change the link or not
+ * can_link_change(): can the instance change the link or not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return true if link-change is allowed, false otherwise.
+ * Return: true if link-change is allowed, false otherwise.
*/
bool (*can_link_change)(struct qed_dev *cdev);
/**
- * @brief set_link - set links according to params
+ * set_link(): set links according to params.
*
- * @param cdev
- * @param params - values used to override the default link configuration
+ * @cdev: Qed dev pointer.
+ * @params: values used to override the default link configuration.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_link)(struct qed_dev *cdev,
struct qed_link_params *params);
/**
- * @brief get_link - returns the current link state.
+ * get_link(): returns the current link state.
*
- * @param cdev
- * @param if_link - structure to be filled with current link configuration.
+ * @cdev: Qed dev pointer.
+ * @if_link: structure to be filled with current link configuration.
+ *
+ * Return: Void.
*/
void (*get_link)(struct qed_dev *cdev,
struct qed_link_output *if_link);
/**
- * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ * drain(): drains chip in case Tx completions fail to arrive due to pause.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Int.
*/
int (*drain)(struct qed_dev *cdev);
/**
- * @brief update_msglvl - update module debug level
+ * update_msglvl(): update module debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * @cdev: Qed dev pointer.
+ * @dp_module: Debug module.
+ * @dp_level: Debug level.
+ *
+ * Return: Void.
*/
void (*update_msglvl)(struct qed_dev *cdev,
u32 dp_module,
u8 dp_level);
int (*chain_alloc)(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type,
- u32 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain,
- struct qed_chain_ext_pbl *ext_pbl);
+ struct qed_chain *chain,
+ struct qed_chain_init_params *params);
void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain);
/**
- * @brief nvm_flash - Flash nvm data.
+ * nvm_flash(): Flash nvm data.
*
- * @param cdev
- * @param name - file containing the data
+ * @cdev: Qed dev pointer.
+ * @name: file containing the data.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_flash)(struct qed_dev *cdev, const char *name);
/**
- * @brief nvm_get_image - reads an entire image from nvram
+ * nvm_get_image(): reads an entire image from nvram.
*
- * @param cdev
- * @param type - type of the request nvram image
- * @param buf - preallocated buffer to fill with the image
- * @param len - length of the allocated buffer
+ * @cdev: Qed dev pointer.
+ * @type: type of the request nvram image.
+ * @buf: preallocated buffer to fill with the image.
+ * @len: length of the allocated buffer.
*
- * @return 0 on success, error otherwise
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_get_image)(struct qed_dev *cdev,
enum qed_nvm_images type, u8 *buf, u16 len);
/**
- * @brief set_coalesce - Configure Rx coalesce value in usec
+ * set_coalesce(): Configure Rx coalesce value in usec.
*
- * @param cdev
- * @param rx_coal - Rx coalesce value in usec
- * @param tx_coal - Tx coalesce value in usec
- * @param qid - Queue index
- * @param sb_id - Status Block Id
+ * @cdev: Qed dev pointer.
+ * @rx_coal: Rx coalesce value in usec.
+ * @tx_coal: Tx coalesce value in usec.
+ * @handle: Handle.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_coalesce)(struct qed_dev *cdev,
u16 rx_coal, u16 tx_coal, void *handle);
/**
- * @brief set_led - Configure LED mode
+ * set_led() - Configure LED mode.
*
- * @param cdev
- * @param mode - LED mode
+ * @cdev: Qed dev pointer.
+ * @mode: LED mode.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
+
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * attn_clr_enable(): Prevent attentions from being reasserted.
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_is_32b - doorbell is 32b pr 64b
- * @param db_is_user - doorbell recovery addresses are user or kernel space
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable.
+ *
+ * Return: Void.
+ */
+ void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
+
+/**
+ * db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Dddress of where db_data is stored.
+ * @db_width: Doorbell is 32b or 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
+ *
+ * Return: Int.
*/
int (*db_recovery_add)(struct qed_dev *cdev,
void __iomem *db_addr,
@@ -1051,116 +1071,143 @@ struct qed_common_ops {
enum qed_db_rec_space db_space);
/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
+ * db_recovery_del(): remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
- * entry to delete.
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address where db_data is stored. Serves as key for the
+ * entry to delete.
+ *
+ * Return: Int.
*/
int (*db_recovery_del)(struct qed_dev *cdev,
void __iomem *db_addr, void *db_data);
/**
- * @brief recovery_process - Trigger a recovery process
+ * recovery_process(): Trigger a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_process)(struct qed_dev *cdev);
/**
- * @brief recovery_prolog - Execute the prolog operations of a recovery process
+ * recovery_prolog(): Execute the prolog operations of a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_prolog)(struct qed_dev *cdev);
/**
- * @brief update_drv_state - API to inform the change in the driver state.
+ * update_drv_state(): API to inform the change in the driver state.
*
- * @param cdev
- * @param active
+ * @cdev: Qed dev pointer.
+ * @active: Active
*
+ * Return: Int.
*/
int (*update_drv_state)(struct qed_dev *cdev, bool active);
/**
- * @brief update_mac - API to inform the change in the mac address
+ * update_mac(): API to inform the change in the mac address.
*
- * @param cdev
- * @param mac
+ * @cdev: Qed dev pointer.
+ * @mac: MAC.
*
+ * Return: Int.
*/
- int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
/**
- * @brief update_mtu - API to inform the change in the mtu
+ * update_mtu(): API to inform the change in the mtu.
*
- * @param cdev
- * @param mtu
+ * @cdev: Qed dev pointer.
+ * @mtu: MTU.
*
+ * Return: Int.
*/
int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
/**
- * @brief update_wol - update of changes in the WoL configuration
+ * update_wol(): Update of changes in the WoL configuration.
*
- * @param cdev
- * @param enabled - true iff WoL should be enabled.
+ * @cdev: Qed dev pointer.
+ * @enabled: true iff WoL should be enabled.
+ *
+ * Return: Int.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
/**
- * @brief read_module_eeprom
+ * read_module_eeprom(): Read EEPROM.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: buffer.
+ * @dev_addr: PHY device memory region.
+ * @offset: offset into eeprom contents to be read.
+ * @len: buffer length, i.e., max bytes to be read.
*
- * @param cdev
- * @param buf - buffer
- * @param dev_addr - PHY device memory region
- * @param offset - offset into eeprom contents to be read
- * @param len - buffer length, i.e., max bytes to be read
+ * Return: Int.
*/
int (*read_module_eeprom)(struct qed_dev *cdev,
char *buf, u8 dev_addr, u32 offset, u32 len);
/**
- * @brief get_affin_hwfn_idx
+ * get_affin_hwfn_idx(): Get affine HW function.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: u8.
*/
u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param buf - buffer
- * @param cmd - NVM CFG command id
- * @param entity_id - Entity id
+ * read_nvm_cfg(): Read NVM config attribute value.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: Buffer.
+ * @cmd: NVM CFG command id.
+ * @entity_id: Entity id.
*
+ * Return: Int.
*/
int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
u32 entity_id);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param cmd - NVM CFG command id
+ * read_nvm_cfg_len(): Read NVM config attribute value.
+ *
+ * @cdev: Qed dev pointer.
+ * @cmd: NVM CFG command id.
*
- * @return config id length, 0 on error.
+ * Return: config id length, 0 on error.
*/
int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
/**
- * @brief set_grc_config - Configure value for grc config id.
- * @param cdev
- * @param cfg_id - grc config id
- * @param val - grc config value
+ * set_grc_config(): Configure value for grc config id.
*
+ * @cdev: Qed dev pointer.
+ * @cfg_id: grc config id
+ * @val: grc config value
+ *
+ * Return: Int.
*/
int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
+
+ struct devlink* (*devlink_register)(struct qed_dev *cdev);
+
+ void (*devlink_unregister)(struct devlink *devlink);
+
+ __printf(2, 3) void (*mfw_report)(struct qed_dev *cdev, char *fmt, ...);
+
+ int (*get_sb_info)(struct qed_dev *cdev, struct qed_sb_info *sb,
+ u16 qid, struct qed_sb_info_dbg *sb_dbg);
+
+ int (*get_esl_status)(struct qed_dev *cdev, bool *esl_active);
};
#define MASK_FIELD(_name, _value) \
@@ -1380,7 +1427,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
u16 rc = 0;
prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
- STATUS_BLOCK_E4_PROD_INDEX_MASK;
+ STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= QED_SB_IDX;
@@ -1391,33 +1438,30 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
}
/**
+ * qed_sb_ack(): This function creates an update command for interrupts
+ * that is written to the IGU.
*
- * @brief This function creates an update command for interrupts that is
- * written to the IGU.
- *
- * @param sb_info - This is the structure allocated and
- * initialized per status block. Assumption is
- * that it was initialized using qed_sb_init
- * @param int_cmd - Enable/Disable/Nop
- * @param upd_flg - whether igu consumer should be
- * updated.
+ * @sb_info: This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using qed_sb_init
+ * @int_cmd: Enable/Disable/Nop
+ * @upd_flg: Whether igu consumer should be updated.
*
- * @return inline void
+ * Return: inline void.
*/
static inline void qed_sb_ack(struct qed_sb_info *sb_info,
enum igu_int_cmd int_cmd,
u8 upd_flg)
{
- struct igu_prod_cons_update igu_ack = { 0 };
+ u32 igu_ack;
- igu_ack.sb_id_and_flags =
- ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
- (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
- (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
- (IGU_SEG_ACCESS_REG <<
- IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+ igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_REG <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
- DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
+ DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
index ac2e6a3199a3..8e31a28e51b9 100644
--- a/include/linux/qed/qed_iov_if.h
+++ b/include/linux/qed/qed_iov_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_IOV_IF_H
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index d0df1bec5357..fbf7973ae9ba 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_ISCSI_IF_H
@@ -159,7 +133,7 @@ struct qed_iscsi_cb_ops {
* @fill_dev_info: fills iSCSI specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register iscsi operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -178,7 +152,7 @@ struct qed_iscsi_cb_ops {
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @release_conn: release a previously acquired iscsi connection
* @param cdev
* @param handle - the connection handle.
@@ -208,7 +182,7 @@ struct qed_iscsi_cb_ops {
* @param stats - pointer to struck that would be filled
* we stats
* @return 0 on success, error otherwise.
- * @change_mac Change MAC of interface
+ * @change_mac: Change MAC of interface
* @param cdev
* @param handle - the connection handle.
* @param mac - new MAC to configure.
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 1313c34d9a68..5b67cd03276e 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_LL2_IF_H
@@ -38,14 +12,13 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/qed/qed_if.h>
enum qed_ll2_conn_type {
QED_LL2_TYPE_FCOE,
- QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TCP_ULP,
QED_LL2_TYPE_TEST,
QED_LL2_TYPE_OOO,
QED_LL2_TYPE_RESERVED2,
@@ -235,57 +208,57 @@ enum qed_ll2_xmit_flags {
struct qed_ll2_ops {
/**
- * @brief start - initializes ll2
+ * start(): Initializes ll2.
*
- * @param cdev
- * @param params - protocol driver configuration for the ll2.
+ * @cdev: Qed dev pointer.
+ * @params: Protocol driver configuration for the ll2.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
/**
- * @brief stop - stops the ll2
+ * stop(): Stops the ll2
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*stop)(struct qed_dev *cdev);
/**
- * @brief start_xmit - transmits an skb over the ll2 interface
+ * start_xmit(): Transmits an skb over the ll2 interface
*
- * @param cdev
- * @param skb
- * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags.
+ * @cdev: Qed dev pointer.
+ * @skb: SKB.
+ * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
unsigned long xmit_flags);
/**
- * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * register_cb_ops(): Protocol driver register the callback for Rx/Tx
* packets. Should be called before `start'.
*
- * @param cdev
- * @param cookie - to be passed to the callback functions.
- * @param ops - the callback functions to register for Rx / Tx.
+ * @cdev: Qed dev pointer.
+ * @cookie: to be passed to the callback functions.
+ * @ops: the callback functions to register for Rx / Tx.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
void (*register_cb_ops)(struct qed_dev *cdev,
const struct qed_ll2_cb_ops *ops,
void *cookie);
/**
- * @brief get LL2 related statistics
+ * get_stats(): Get LL2 related statistics.
*
- * @param cdev
- * @param stats - pointer to struct that would be filled with stats
+ * @cdev: Qed dev pointer.
+ * @stats: Pointer to struct that would be filled with stats.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
};
diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h
new file mode 100644
index 000000000000..bbfbfba51f37
--- /dev/null
+++ b/include/linux/qed/qed_nvmetcp_if.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_IF_H
+#define _QED_NVMETCP_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+
+#define QED_NVMETCP_MAX_IO_SIZE 0x800000
+#define QED_NVMETCP_CMN_HDR_SIZE (sizeof(struct nvme_tcp_hdr))
+#define QED_NVMETCP_CMD_HDR_SIZE (sizeof(struct nvme_tcp_cmd_pdu))
+#define QED_NVMETCP_NON_IO_HDR_SIZE ((QED_NVMETCP_CMN_HDR_SIZE + 16))
+
+typedef int (*nvmetcp_event_cb_t) (void *context,
+ u8 fw_event_code, void *fw_handle);
+
+struct qed_dev_nvmetcp_info {
+ struct qed_dev_info common;
+ u8 port_id; /* Physical port */
+ u8 num_cqs;
+};
+
+#define MAX_TID_BLOCKS_NVMETCP (512)
+struct qed_nvmetcp_tid {
+ u32 size; /* In bytes per task */
+ u32 num_tids_per_block;
+ u8 *blocks[MAX_TID_BLOCKS_NVMETCP];
+};
+
+struct qed_nvmetcp_id_params {
+ u8 mac[ETH_ALEN];
+ u32 ip[4];
+ u16 port;
+};
+
+struct qed_nvmetcp_params_offload {
+ /* FW initializations */
+ dma_addr_t sq_pbl_addr;
+ dma_addr_t nvmetcp_cccid_itid_table_addr;
+ u16 nvmetcp_cccid_max_range;
+ u8 default_cq;
+
+ /* Networking and TCP stack initializations */
+ struct qed_nvmetcp_id_params src;
+ struct qed_nvmetcp_id_params dst;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u32 cwnd;
+ u16 mss;
+ u16 vlan_id;
+ bool timestamp_en;
+ bool delayed_ack_en;
+ bool tcp_keep_alive_en;
+ bool ecn_en;
+ u8 ip_version;
+ u8 ka_max_probe_cnt;
+ u8 ttl;
+ u8 tos_or_tc;
+ u8 rcv_wnd_scale;
+};
+
+struct qed_nvmetcp_params_update {
+ u32 max_io_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+
+ /* Placeholder: pfv, cpda, hpda */
+
+ bool hdr_digest_en;
+ bool data_digest_en;
+};
+
+struct qed_nvmetcp_cb_ops {
+ struct qed_common_cb_ops common;
+};
+
+struct nvmetcp_sge {
+ struct regpair sge_addr; /* SGE address */
+ __le32 sge_len; /* SGE length */
+ __le32 reserved;
+};
+
+/* IO path HSI function SGL params */
+struct storage_sgl_task_params {
+ struct nvmetcp_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+ bool small_mid_sge;
+};
+
+/* IO path HSI function FW task context params */
+struct nvmetcp_task_params {
+ void *context; /* Output parameter - set/filled by the HSI function */
+ struct nvmetcp_wqe *sqe;
+ u32 tx_io_size; /* in bytes (Without DIF, if exists) */
+ u32 rx_io_size; /* in bytes (Without DIF, if exists) */
+ u16 conn_icid;
+ u16 itid;
+ struct regpair opq; /* qedn_task_ctx address */
+ u16 host_cccid;
+ u8 cq_rss_number;
+ bool send_write_incapsule;
+};
+
+/**
+ * struct qed_nvmetcp_ops - qed NVMeTCP operations.
+ * @common: common operations pointer
+ * @ll2: light L2 operations pointer
+ * @fill_dev_info: fills NVMeTCP specific information
+ * @param cdev
+ * @param info
+ * @return 0 on success, otherwise error value.
+ * @register_ops: register nvmetcp operations
+ * @param cdev
+ * @param ops - specified using qed_nvmetcp_cb_ops
+ * @param cookie - driver private
+ * @start: nvmetcp in FW
+ * @param cdev
+ * @param tasks - qed will fill information about tasks
+ * return 0 on success, otherwise error value.
+ * @stop: nvmetcp in FW
+ * @param cdev
+ * return 0 on success, otherwise error value.
+ * @acquire_conn: acquire a new nvmetcp connection
+ * @param cdev
+ * @param handle - qed will fill handle that should be
+ * used henceforth as identifier of the
+ * connection.
+ * @param p_doorbell - qed will fill the address of the
+ * doorbell.
+ * @return 0 on success, otherwise error value.
+ * @release_conn: release a previously acquired nvmetcp connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @offload_conn: configures an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @update_conn: updates an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @destroy_conn: stops an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @clear_sq: clear all task in sq
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @add_src_tcp_port_filter: Add source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @remove_src_tcp_port_filter: Remove source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @add_dst_tcp_port_filter: Add destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @remove_dst_tcp_port_filter: Remove destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @clear_all_filters: Clear all filters.
+ * @param cdev
+ * @init_read_io: Init read IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_write_io: Init write IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_icreq_exchange: Exchange ICReq.
+ * @task_params
+ * @init_conn_req_pdu_hdr
+ * @tx_sgl_task_params
+ * @rx_sgl_task_params
+ * @init_task_cleanup: Init task cleanup.
+ * @task_params
+ */
+struct qed_nvmetcp_ops {
+ const struct qed_common_ops *common;
+
+ const struct qed_ll2_ops *ll2;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_nvmetcp_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_nvmetcp_cb_ops *ops, void *cookie);
+
+ int (*start)(struct qed_dev *cdev,
+ struct qed_nvmetcp_tid *tasks,
+ void *event_context, nvmetcp_event_cb_t async_event_cb);
+
+ int (*stop)(struct qed_dev *cdev);
+
+ int (*acquire_conn)(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell);
+
+ int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+ int (*offload_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_offload *conn_info);
+
+ int (*update_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_update *conn_info);
+
+ int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
+
+ int (*clear_sq)(struct qed_dev *cdev, u32 handle);
+
+ int (*add_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ void (*remove_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ int (*add_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*remove_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*clear_all_filters)(struct qed_dev *cdev);
+
+ void (*init_read_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_write_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_icreq_exchange)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params);
+
+ void (*init_task_cleanup)(struct nvmetcp_task_params *task_params);
+};
+
+const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void);
+void qed_put_nvmetcp_ops(void);
+#endif
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 74efca15fde7..3b76c07fbcf8 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#ifndef _QED_RDMA_IF_H
#define _QED_RDMA_IF_H
#include <linux/types.h>
@@ -53,6 +28,13 @@ enum qed_roce_qp_state {
QED_ROCE_QP_STATE_SQE
};
+enum qed_rdma_qp_type {
+ QED_RDMA_QP_TYPE_RC,
+ QED_RDMA_QP_TYPE_XRC_INI,
+ QED_RDMA_QP_TYPE_XRC_TGT,
+ QED_RDMA_QP_TYPE_INVAL = 0xffff,
+};
+
enum qed_rdma_tid_type {
QED_RDMA_TID_REGISTERED_MR,
QED_RDMA_TID_FMR,
@@ -91,7 +73,6 @@ struct qed_rdma_device {
u64 max_mr_size;
u32 max_cqe;
u32 max_mw;
- u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd;
@@ -261,10 +242,8 @@ struct qed_rdma_register_tid_in_params {
bool pbl_two_level;
u8 pbl_page_size_log;
u8 page_size_log;
- u32 fbo;
u64 length;
u64 vaddr;
- bool zbva;
bool phy_mr;
bool dma_mr;
@@ -291,6 +270,12 @@ struct qed_rdma_create_srq_in_params {
u16 num_pages;
u16 pd_id;
u16 page_size;
+
+ /* XRC related only */
+ bool reserved_key_en;
+ bool is_xrc;
+ u32 cq_cid;
+ u16 xrcd_id;
};
struct qed_rdma_destroy_cq_in_params {
@@ -319,7 +304,12 @@ struct qed_rdma_create_qp_in_params {
u16 rq_num_pages;
u64 rq_pbl_ptr;
u16 srq_id;
+ u16 xrcd_id;
u8 stats_queue;
+ enum qed_rdma_qp_type qp_type;
+ u8 flags;
+#define QED_ROCE_EDPM_MODE_MASK 0x1
+#define QED_ROCE_EDPM_MODE_SHIFT 0
};
struct qed_rdma_create_qp_out_params {
@@ -429,11 +419,13 @@ struct qed_rdma_create_srq_out_params {
struct qed_rdma_destroy_srq_in_params {
u16 srq_id;
+ bool is_xrc;
};
struct qed_rdma_modify_srq_in_params {
u32 wqe_limit;
u16 srq_id;
+ bool is_xrc;
};
struct qed_rdma_stats_out_params {
@@ -611,6 +603,8 @@ struct qed_rdma_ops {
int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
+ int (*rdma_alloc_xrcd)(void *rdma_cxt, u16 *xrcd);
+ void (*rdma_dealloc_xrcd)(void *rdma_cxt, u16 xrcd);
int (*rdma_create_cq)(void *rdma_cxt,
struct qed_rdma_create_cq_in_params *params,
u16 *icid);
@@ -668,7 +662,8 @@ struct qed_rdma_ops {
u8 connection_handle,
struct qed_ll2_stats *p_stats);
int (*ll2_set_mac_filter)(struct qed_dev *cdev,
- u8 *old_mac_address, u8 *new_mac_address);
+ u8 *old_mac_address,
+ const u8 *new_mac_address);
int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset);
diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h
index 5a00c7a473bf..0d5564a59a59 100644
--- a/include/linux/qed/qede_rdma.h
+++ b/include/linux/qed/qede_rdma.h
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qedr NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#ifndef QEDE_ROCE_H
#define QEDE_ROCE_H
@@ -45,7 +20,8 @@ enum qede_rdma_event {
QEDE_UP,
QEDE_DOWN,
QEDE_CHANGE_ADDR,
- QEDE_CLOSE
+ QEDE_CLOSE,
+ QEDE_CHANGE_MTU,
};
struct qede_rdma_event_work {
@@ -79,6 +55,7 @@ void qede_rdma_dev_event_open(struct qede_dev *dev);
void qede_rdma_dev_event_close(struct qede_dev *dev);
void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery);
void qede_rdma_event_changeaddr(struct qede_dev *edr);
+void qede_rdma_event_change_mtu(struct qede_dev *edev);
#else
static inline int qede_rdma_dev_add(struct qede_dev *dev,
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 480a57eb36cc..6dfed163ab6c 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __RDMA_COMMON__
@@ -53,6 +27,7 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_MAX_XRC_SRQS (1024)
#define RDMA_MAX_SRQS (32 * 1024)
+#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index 473fba76aa77..ccddd7a96b67 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ROCE_COMMON__
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 9a973ffbbff5..91896e8793bf 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __STORAGE_COMMON__
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index 4a4845193539..2b2c87d10e0a 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __TCP_COMMON__
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 27aab84fcbaa..fd692b4a41d5 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -91,7 +91,7 @@ extern bool qid_valid(struct kqid qid);
*
* When there is no mapping defined for the user-namespace, type,
* qid tuple an invalid kqid is returned. Callers are expected to
- * test for and handle handle invalid kqids being returned.
+ * test for and handle invalid kqids being returned.
* Invalid kqids may be tested for using qid_valid().
*/
static inline struct kqid make_kqid(struct user_namespace *from,
@@ -448,17 +448,18 @@ struct quota_format_type {
};
/**
- * Quota state flags - they actually come in two flavors - for users and groups.
+ * Quota state flags - they come in three flavors - for users, groups and projects.
*
* Actual typed flags layout:
- * USRQUOTA GRPQUOTA
- * DQUOT_USAGE_ENABLED 0x0001 0x0002
- * DQUOT_LIMITS_ENABLED 0x0004 0x0008
- * DQUOT_SUSPENDED 0x0010 0x0020
+ * USRQUOTA GRPQUOTA PRJQUOTA
+ * DQUOT_USAGE_ENABLED 0x0001 0x0002 0x0004
+ * DQUOT_LIMITS_ENABLED 0x0008 0x0010 0x0020
+ * DQUOT_SUSPENDED 0x0040 0x0080 0x0100
*
* Following bits are used for non-typed flags:
- * DQUOT_QUOTA_SYS_FILE 0x0040
- * DQUOT_NEGATIVE_USAGE 0x0080
+ * DQUOT_QUOTA_SYS_FILE 0x0200
+ * DQUOT_NEGATIVE_USAGE 0x0400
+ * DQUOT_NOLIST_DIRTY 0x0800
*/
enum {
_DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 9cf0cd3dc88c..0d8625d71733 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -20,16 +20,14 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
}
/* i_mutex must being held */
-static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+static inline bool is_quota_modification(struct user_namespace *mnt_userns,
+ struct inode *inode, struct iattr *ia)
{
- return (ia->ia_valid & ATTR_SIZE) ||
- (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
- (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
+ return ((ia->ia_valid & ATTR_SIZE) ||
+ i_uid_needs_update(mnt_userns, ia, inode) ||
+ i_gid_needs_update(mnt_userns, ia, inode));
}
-int kernel_quotactl(unsigned int cmd, const char __user *special,
- qid_t id, void __user *addr);
-
#if defined(CONFIG_QUOTA)
#define quota_error(sb, fmt, args...) \
@@ -118,7 +116,8 @@ int dquot_set_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
-int dquot_transfer(struct inode *inode, struct iattr *iattr);
+int dquot_transfer(struct user_namespace *mnt_userns, struct inode *inode,
+ struct iattr *iattr);
static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
{
@@ -237,7 +236,8 @@ static inline void dquot_free_inode(struct inode *inode)
{
}
-static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
+static inline int dquot_transfer(struct user_namespace *mnt_userns,
+ struct inode *inode, struct iattr *iattr)
{
return 0;
}
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 63e62372443a..eae67015ce51 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -9,18 +9,30 @@
#define _LINUX_RADIX_TREE_H
#include <linux/bitops.h>
-#include <linux/kernel.h>
+#include <linux/gfp_types.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/math.h>
+#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/xarray.h>
+#include <linux/local_lock.h>
/* Keep unconverted code working */
#define radix_tree_root xarray
#define radix_tree_node xa_node
+struct radix_tree_preload {
+ local_lock_t lock;
+ unsigned nr;
+ /* nodes->parent points to next preallocated node */
+ struct radix_tree_node *nodes;
+};
+DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
+
/*
* The bottom two bits of the slot determine how the remaining bits in the
* slot are interpreted:
@@ -245,7 +257,7 @@ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
static inline void radix_tree_preload_end(void)
{
- preempt_enable();
+ local_unlock(&radix_tree_preloads.lock);
}
void __rcu **idr_get_free(struct radix_tree_root *root,
@@ -367,7 +379,7 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
* radix_tree_next_slot - find next slot in chunk
*
* @slot: pointer to current slot
- * @iter: pointer to interator state
+ * @iter: pointer to iterator state
* @flags: RADIX_TREE_ITER_*, should be constant
* Returns: pointer to next slot, or NULL if there no more left
*
diff --git a/include/linux/raid/detect.h b/include/linux/raid/detect.h
new file mode 100644
index 000000000000..1f029a71c3ef
--- /dev/null
+++ b/include/linux/raid/detect.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+void md_autodetect_dev(dev_t dev);
+
+#ifdef CONFIG_BLK_DEV_MD
+void md_run_setup(void);
+#else
+static inline void md_run_setup(void)
+{
+}
+#endif
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
deleted file mode 100644
index 8dfec085a20e..000000000000
--- a/include/linux/raid/md_u.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
- Copyright (C) 1998 Ingo Molnar
-
-*/
-#ifndef _MD_U_H
-#define _MD_U_H
-
-#include <uapi/linux/raid/md_u.h>
-
-extern int mdp_major;
-#endif
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 154e954b711d..d6e5a1feb947 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -81,7 +81,7 @@ struct raid6_calls {
void (*xor_syndrome)(int, int, int, size_t, void **);
int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */
- int prefer; /* Has special performance attribute */
+ int priority; /* Relative priority ranking if non-zero */
};
/* Selected algorithm */
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 2a9fee8ddae3..51b811b62322 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -11,13 +11,20 @@ struct xor_block_template {
struct xor_block_template *next;
const char *name;
int speed;
- void (*do_2)(unsigned long, unsigned long *, unsigned long *);
- void (*do_3)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
- void (*do_4)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
- void (*do_5)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+ void (*do_2)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_3)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_4)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_5)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
};
#endif
diff --git a/include/linux/random.h b/include/linux/random.h
index d319f9a1e429..147a5e0d0b8e 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -1,58 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * include/linux/random.h
- *
- * Include file for the random number generator.
- */
+
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
+#include <linux/bug.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/once.h>
#include <uapi/linux/random.h>
-struct random_ready_callback {
- struct list_head list;
- void (*func)(struct random_ready_callback *rdy);
- struct module *owner;
-};
+struct notifier_block;
-extern void add_device_randomness(const void *, unsigned int);
-extern void add_bootloader_randomness(const void *, unsigned int);
+void add_device_randomness(const void *buf, size_t len);
+void __init add_bootloader_randomness(const void *buf, size_t len);
+void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value) __latent_entropy;
+void add_interrupt_randomness(int irq) __latent_entropy;
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
{
- add_device_randomness((const void *)&latent_entropy,
- sizeof(latent_entropy));
+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
}
#else
-static inline void add_latent_entropy(void) {}
+static inline void add_latent_entropy(void) { }
#endif
-extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value) __latent_entropy;
-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
-
-extern void get_random_bytes(void *buf, int nbytes);
-extern int wait_for_random_bytes(void);
-extern int __init rand_initialize(void);
-extern bool rng_is_initialized(void);
-extern int add_random_ready_callback(struct random_ready_callback *rdy);
-extern void del_random_ready_callback(struct random_ready_callback *rdy);
-extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
-
-#ifndef MODULE
-extern const struct file_operations random_fops, urandom_fops;
+#if IS_ENABLED(CONFIG_VMGENID)
+void add_vmfork_randomness(const void *unique_vm_id, size_t len);
+int register_random_vmfork_notifier(struct notifier_block *nb);
+int unregister_random_vmfork_notifier(struct notifier_block *nb);
+#else
+static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
+static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
#endif
+void get_random_bytes(void *buf, size_t len);
+u8 get_random_u8(void);
+u16 get_random_u16(void);
u32 get_random_u32(void);
u64 get_random_u64(void);
-static inline unsigned int get_random_int(void)
-{
- return get_random_u32();
-}
static inline unsigned long get_random_long(void)
{
#if BITS_PER_LONG == 64
@@ -78,117 +67,74 @@ static inline unsigned long get_random_long(void)
static inline unsigned long get_random_canary(void)
{
- unsigned long val = get_random_long();
-
- return val & CANARY_MASK;
+ return get_random_long() & CANARY_MASK;
}
+void __init random_init_early(const char *command_line);
+void __init random_init(void);
+bool rng_is_initialized(void);
+int wait_for_random_bytes(void);
+
/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
* Returns the result of the call to wait_for_random_bytes. */
-static inline int get_random_bytes_wait(void *buf, int nbytes)
+static inline int get_random_bytes_wait(void *buf, size_t nbytes)
{
int ret = wait_for_random_bytes();
get_random_bytes(buf, nbytes);
return ret;
}
-#define declare_get_random_var_wait(var) \
- static inline int get_random_ ## var ## _wait(var *out) { \
+#define declare_get_random_var_wait(name, ret_type) \
+ static inline int get_random_ ## name ## _wait(ret_type *out) { \
int ret = wait_for_random_bytes(); \
if (unlikely(ret)) \
return ret; \
- *out = get_random_ ## var(); \
+ *out = get_random_ ## name(); \
return 0; \
}
-declare_get_random_var_wait(u32)
-declare_get_random_var_wait(u64)
-declare_get_random_var_wait(int)
-declare_get_random_var_wait(long)
+declare_get_random_var_wait(u8, u8)
+declare_get_random_var_wait(u16, u16)
+declare_get_random_var_wait(u32, u32)
+declare_get_random_var_wait(u64, u32)
+declare_get_random_var_wait(long, unsigned long)
#undef declare_get_random_var
-unsigned long randomize_page(unsigned long start, unsigned long range);
-
-u32 prandom_u32(void);
-void prandom_bytes(void *buf, size_t nbytes);
-void prandom_seed(u32 seed);
-void prandom_reseed_late(void);
-
-struct rnd_state {
- __u32 s1, s2, s3, s4;
-};
-
-u32 prandom_u32_state(struct rnd_state *state);
-void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
-void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
-
-#define prandom_init_once(pcpu_state) \
- DO_ONCE(prandom_seed_full_state, (pcpu_state))
-
-/**
- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
- * @ep_ro: right open interval endpoint
- *
- * Returns a pseudo-random number that is in interval [0, ep_ro). Note
- * that the result depends on PRNG being well distributed in [0, ~0U]
- * u32 space. Here we use maximally equidistributed combined Tausworthe
- * generator, that is, prandom_u32(). This is useful when requesting a
- * random index of an array containing ep_ro elements, for example.
- *
- * Returns: pseudo-random number in interval [0, ep_ro)
+/*
+ * This is designed to be standalone for just prandom
+ * users, but for now we include it from <linux/random.h>
+ * for legacy reasons.
*/
-static inline u32 prandom_u32_max(u32 ep_ro)
-{
- return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
-}
+#include <linux/prandom.h>
+
+#include <asm/archrandom.h>
/*
- * Handle minimum values for seeds
+ * Called from the boot CPU during startup; not valid to call once
+ * secondary CPUs are up and preemption is possible.
*/
-static inline u32 __seed(u32 x, u32 m)
+#ifndef arch_get_random_seed_longs_early
+static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
{
- return (x < m) ? x + m : x;
+ WARN_ON(system_state != SYSTEM_BOOTING);
+ return arch_get_random_seed_longs(v, max_longs);
}
+#endif
-/**
- * prandom_seed_state - set seed for prandom_u32_state().
- * @state: pointer to state structure to receive the seed.
- * @seed: arbitrary 64-bit value to use as a seed.
- */
-static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+#ifndef arch_get_random_longs_early
+static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
{
- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
-
- state->s1 = __seed(i, 2U);
- state->s2 = __seed(i, 8U);
- state->s3 = __seed(i, 16U);
- state->s4 = __seed(i, 128U);
+ WARN_ON(system_state != SYSTEM_BOOTING);
+ return arch_get_random_longs(v, max_longs);
}
+#endif
-#ifdef CONFIG_ARCH_RANDOM
-# include <asm/archrandom.h>
-#else
-static inline bool __must_check arch_get_random_long(unsigned long *v)
-{
- return false;
-}
-static inline bool __must_check arch_get_random_int(unsigned int *v)
-{
- return false;
-}
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
-{
- return false;
-}
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
-{
- return false;
-}
+#ifdef CONFIG_SMP
+int random_prepare_cpu(unsigned int cpu);
+int random_online_cpu(unsigned int cpu);
#endif
-/* Pseudo random number generator from numerical recipes. */
-static inline u32 next_pseudo_random32(u32 seed)
-{
- return seed * 1664525 + 1013904223;
-}
+#ifndef MODULE
+extern const struct file_operations random_fops, urandom_fops;
+#endif
#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
new file mode 100644
index 000000000000..5d868505a94e
--- /dev/null
+++ b/include/linux/randomize_kstack.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_RANDOMIZE_KSTACK_H
+#define _LINUX_RANDOMIZE_KSTACK_H
+
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <linux/percpu-defs.h>
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
+ randomize_kstack_offset);
+DECLARE_PER_CPU(u32, kstack_offset);
+
+/*
+ * Do not use this anywhere else in the kernel. This is used here because
+ * it provides an arch-agnostic way to grow the stack with correct
+ * alignment. Also, since this use is being explicitly masked to a max of
+ * 10 bits, stack-clash style attacks are unlikely. For more details see
+ * "VLAs" in Documentation/process/deprecated.rst
+ *
+ * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently
+ * only with Clang and not GCC). Initializing the unused area on each syscall
+ * entry is expensive, and generating an implicit call to memset() may also be
+ * problematic (such as in noinstr functions). Therefore, if the compiler
+ * supports it (which it should if it initializes allocas), always use the
+ * "uninitialized" variant of the builtin.
+ */
+#if __has_builtin(__builtin_alloca_uninitialized)
+#define __kstack_alloca __builtin_alloca_uninitialized
+#else
+#define __kstack_alloca __builtin_alloca
+#endif
+
+/*
+ * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
+ * "VLA" from being unbounded (see above). 10 bits leaves enough room for
+ * per-arch offset masks to reduce entropy (by removing higher bits, since
+ * high entropy may overly constrain usable stack space), and for
+ * compiler/arch-specific stack alignment to remove the lower bits.
+ */
+#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
+
+/**
+ * add_random_kstack_offset - Increase stack utilization by previously
+ * chosen random offset
+ *
+ * This should be used in the syscall entry path when interrupts and
+ * preempt are disabled, and after user registers have been stored to
+ * the stack. For testing the resulting entropy, please see:
+ * tools/testing/selftests/lkdtm/stack-entropy.sh
+ */
+#define add_random_kstack_offset() do { \
+ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)) { \
+ u32 offset = raw_cpu_read(kstack_offset); \
+ u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
+ /* Keep allocation even after "ptr" loses scope. */ \
+ asm volatile("" :: "r"(ptr) : "memory"); \
+ } \
+} while (0)
+
+/**
+ * choose_random_kstack_offset - Choose the random offset for the next
+ * add_random_kstack_offset()
+ *
+ * This should only be used during syscall exit when interrupts and
+ * preempt are disabled. This position in the syscall flow is done to
+ * frustrate attacks from userspace attempting to learn the next offset:
+ * - Maximize the timing uncertainty visible from userspace: if the
+ * offset is chosen at syscall entry, userspace has much more control
+ * over the timing between choosing offsets. "How long will we be in
+ * kernel mode?" tends to be more difficult to predict than "how long
+ * will we be in user mode?"
+ * - Reduce the lifetime of the new offset sitting in memory during
+ * kernel mode execution. Exposure of "thread-local" memory content
+ * (e.g. current, percpu, etc) tends to be easier than arbitrary
+ * location memory exposure.
+ */
+#define choose_random_kstack_offset(rand) do { \
+ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)) { \
+ u32 offset = raw_cpu_read(kstack_offset); \
+ offset ^= (rand); \
+ raw_cpu_write(kstack_offset, offset); \
+ } \
+} while (0)
+#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+#define add_random_kstack_offset() do { } while (0)
+#define choose_random_kstack_offset(rand) do { } while (0)
+#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+
+#endif
diff --git a/include/linux/range.h b/include/linux/range.h
index d1fbeb664012..274681cc3154 100644
--- a/include/linux/range.h
+++ b/include/linux/range.h
@@ -1,12 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RANGE_H
#define _LINUX_RANGE_H
+#include <linux/types.h>
struct range {
u64 start;
u64 end;
};
+static inline u64 range_len(const struct range *range)
+{
+ return range->end - range->start + 1;
+}
+
int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end);
diff --git a/include/linux/ras.h b/include/linux/ras.h
index 7c3debb47c87..1f4048bf2674 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -17,12 +17,7 @@ static inline int ras_add_daemon_trace(void) { return 0; }
#endif
#ifdef CONFIG_RAS_CEC
-void __init cec_init(void);
int __init parse_cec_param(char *str);
-int cec_add_elem(u64 pfn);
-#else
-static inline void __init cec_init(void) { }
-static inline int cec_add_elem(u64 pfn) { return -ENODEV; }
#endif
#ifdef CONFIG_RAS
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 8ddf79e9207a..b17e0cd0a30c 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -2,41 +2,10 @@
#ifndef _LINUX_RATELIMIT_H
#define _LINUX_RATELIMIT_H
-#include <linux/param.h>
+#include <linux/ratelimit_types.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
-#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
-#define DEFAULT_RATELIMIT_BURST 10
-
-/* issue num suppressed message on exit */
-#define RATELIMIT_MSG_ON_RELEASE BIT(0)
-
-struct ratelimit_state {
- raw_spinlock_t lock; /* protect the state */
-
- int interval;
- int burst;
- int printed;
- int missed;
- unsigned long begin;
- unsigned long flags;
-};
-
-#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .interval = interval_init, \
- .burst = burst_init, \
- }
-
-#define RATELIMIT_STATE_INIT_DISABLED \
- RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
-
-#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
- \
- struct ratelimit_state name = \
- RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
-
static inline void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
@@ -73,9 +42,6 @@ ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags)
extern struct ratelimit_state printk_ratelimit_state;
-extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
-#define __ratelimit(state) ___ratelimit(state, __func__)
-
#ifdef CONFIG_PRINTK
#define WARN_ON_RATELIMIT(condition, state) ({ \
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
new file mode 100644
index 000000000000..002266693e50
--- /dev/null
+++ b/include/linux/ratelimit_types.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RATELIMIT_TYPES_H
+#define _LINUX_RATELIMIT_TYPES_H
+
+#include <linux/bits.h>
+#include <linux/param.h>
+#include <linux/spinlock_types_raw.h>
+
+#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
+#define DEFAULT_RATELIMIT_BURST 10
+
+/* issue num suppressed message on exit */
+#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+
+struct ratelimit_state {
+ raw_spinlock_t lock; /* protect the state */
+
+ int interval;
+ int burst;
+ int printed;
+ int missed;
+ unsigned long begin;
+ unsigned long flags;
+};
+
+#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .interval = interval_init, \
+ .burst = burst_init, \
+ .flags = flags_init, \
+ }
+
+#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+ RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0)
+
+#define RATELIMIT_STATE_INIT_DISABLED \
+ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
+
+#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
+ \
+ struct ratelimit_state name = \
+ RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+
+extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
+#define __ratelimit(state) ___ratelimit(state, __func__)
+
+#endif /* _LINUX_RATELIMIT_TYPES_H */
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 1fd61a9af45c..f7edca369eda 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -11,30 +11,20 @@
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
- See Documentation/rbtree.txt for documentation and samples.
+ See Documentation/core-api/rbtree.rst for documentation and samples.
*/
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/rbtree_types.h>
+
#include <linux/stddef.h>
#include <linux/rcupdate.h>
-struct rb_node {
- unsigned long __rb_parent_color;
- struct rb_node *rb_right;
- struct rb_node *rb_left;
-} __attribute__((aligned(sizeof(long))));
- /* The alignment might seem pointless, but allegedly CRIS needs it */
-
-struct rb_root {
- struct rb_node *rb_node;
-};
-
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
-#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -112,23 +102,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
typeof(*pos), field); 1; }); \
pos = n)
-/*
- * Leftmost-cached rbtrees.
- *
- * We do not cache the rightmost node based on footprint
- * size vs number of potential users that could benefit
- * from O(1) rb_last(). Just not worth it, users that want
- * this feature can always implement the logic explicitly.
- * Furthermore, users that want to cache both pointers may
- * find it a bit asymmetric, but that's ok.
- */
-struct rb_root_cached {
- struct rb_root rb_root;
- struct rb_node *rb_leftmost;
-};
-
-#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
-
/* Same as rb_first(), but O(1) */
#define rb_first_cached(root) (root)->rb_leftmost
@@ -141,12 +114,18 @@ static inline void rb_insert_color_cached(struct rb_node *node,
rb_insert_color(node, &root->rb_root);
}
-static inline void rb_erase_cached(struct rb_node *node,
- struct rb_root_cached *root)
+
+static inline struct rb_node *
+rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
{
+ struct rb_node *leftmost = NULL;
+
if (root->rb_leftmost == node)
- root->rb_leftmost = rb_next(node);
+ leftmost = root->rb_leftmost = rb_next(node);
+
rb_erase(node, &root->rb_root);
+
+ return leftmost;
}
static inline void rb_replace_node_cached(struct rb_node *victim,
@@ -158,4 +137,198 @@ static inline void rb_replace_node_cached(struct rb_node *victim,
rb_replace_node(victim, new, &root->rb_root);
}
+/*
+ * The below helper functions use 2 operators with 3 different
+ * calling conventions. The operators are related like:
+ *
+ * comp(a->key,b) < 0 := less(a,b)
+ * comp(a->key,b) > 0 := less(b,a)
+ * comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
+ *
+ * If these operators define a partial order on the elements we make no
+ * guarantee on which of the elements matching the key is found. See
+ * rb_find().
+ *
+ * The reason for this is to allow the find() interface without requiring an
+ * on-stack dummy object, which might not be feasible due to object size.
+ */
+
+/**
+ * rb_add_cached() - insert @node into the leftmost cached tree @tree
+ * @node: node to insert
+ * @tree: leftmost cached tree to insert @node into
+ * @less: operator defining the (partial) node order
+ *
+ * Returns @node when it is the new leftmost, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color_cached(node, tree, leftmost);
+
+ return leftmost ? node : NULL;
+}
+
+/**
+ * rb_add() - insert @node into @tree
+ * @node: node to insert
+ * @tree: tree to insert @node into
+ * @less: operator defining the (partial) node order
+ */
+static __always_inline void
+rb_add(struct rb_node *node, struct rb_root *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent))
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+}
+
+/**
+ * rb_find_add() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add(struct rb_node *node, struct rb_root *tree,
+ int (*cmp)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0)
+ link = &parent->rb_left;
+ else if (c > 0)
+ link = &parent->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+ return NULL;
+}
+
+/**
+ * rb_find() - find @key in tree @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @key or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c < 0)
+ node = node->rb_left;
+ else if (c > 0)
+ node = node->rb_right;
+ else
+ return node;
+ }
+
+ return NULL;
+}
+
+/**
+ * rb_find_first() - find the first @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the leftmost node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find_first(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+ struct rb_node *match = NULL;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c <= 0) {
+ if (!c)
+ match = node;
+ node = node->rb_left;
+ } else if (c > 0) {
+ node = node->rb_right;
+ }
+ }
+
+ return match;
+}
+
+/**
+ * rb_next_match() - find the next @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the next node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_next_match(const void *key, struct rb_node *node,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ node = rb_next(node);
+ if (node && cmp(key, node))
+ node = NULL;
+ return node;
+}
+
+/**
+ * rb_for_each() - iterates a subtree matching @key
+ * @node: iterator
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ */
+#define rb_for_each(node, key, tree, cmp) \
+ for ((node) = rb_find_first((key), (tree), (cmp)); \
+ (node); (node) = rb_next_match((key), (node), (cmp)))
+
#endif /* _LINUX_RBTREE_H */
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 724b0d036b57..d1c53e9d8c75 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -21,7 +21,7 @@
* rb_insert_augmented() and rb_erase_augmented() are intended to be public.
* The rest are implementation details you are not expected to depend on.
*
- * See Documentation/rbtree.txt for documentation and samples.
+ * See Documentation/core-api/rbtree.rst for documentation and samples.
*/
struct rb_augment_callbacks {
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index 7d012faa509a..3d1a9e716b80 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -42,8 +42,8 @@ struct latch_tree_node {
};
struct latch_tree_root {
- seqcount_t seq;
- struct rb_root tree[2];
+ seqcount_latch_t seq;
+ struct rb_root tree[2];
};
/**
@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
do {
seq = raw_read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
- } while (read_seqcount_retry(&root->seq, seq));
+ } while (read_seqcount_latch_retry(&root->seq, seq));
return node;
}
diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h
new file mode 100644
index 000000000000..45b6ecde3665
--- /dev/null
+++ b/include/linux/rbtree_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_RBTREE_TYPES_H
+#define _LINUX_RBTREE_TYPES_H
+
+struct rb_node {
+ unsigned long __rb_parent_color;
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+/* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+ struct rb_node *rb_node;
+};
+
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
+
+#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+#endif
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h
index b8e094b125ee..78feb8ba7358 100644
--- a/include/linux/rcu_node_tree.h
+++ b/include/linux/rcu_node_tree.h
@@ -20,6 +20,8 @@
#ifndef __LINUX_RCU_NODE_TREE_H
#define __LINUX_RCU_NODE_TREE_H
+#include <linux/math.h>
+
/*
* Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
* CONFIG_RCU_FANOUT_LEAF.
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index b36afe7b22c9..659d13a7ddaa 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -63,6 +63,146 @@ struct rcu_cblist {
#define RCU_NEXT_TAIL 3
#define RCU_CBLIST_NSEGS 4
+
+/*
+ * ==NOCB Offloading state machine==
+ *
+ *
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, without holding nocb_lock. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
+ * | allowing nocb_timer to be armed. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * -----------------------------------
+ * | |
+ * v v
+ * --------------------------------------- ----------------------------------|
+ * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
+ * | | | |
+ * | | | |
+ * | CB kthread woke up and | | GP kthread woke up and |
+ * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
+ * | Processes callbacks concurrently | | |
+ * | with rcu_core(), holding | | |
+ * | nocb_lock. | | |
+ * --------------------------------------- -----------------------------------
+ * | |
+ * -----------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_GP | |
+ * | SEGCBLIST_KTHREAD_CB |
+ * | |
+ * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
+ * | handling callbacks. Enable bypass queueing. |
+ * ----------------------------------------------------------------------------
+ */
+
+
+
+/*
+ * ==NOCB De-Offloading state machine==
+ *
+ *
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
+ * | ignores callbacks. Bypass enqueue is enabled. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
+ * | handles callbacks concurrently. Bypass enqueue is enabled. |
+ * | Invoke RCU core so we make sure not to preempt it in the middle with |
+ * | leaving some urgent work unattended within a jiffy. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
+ * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
+ * | bypass enqueue. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * -----------------------------------
+ * | |
+ * v v
+ * ---------------------------------------------------------------------------|
+ * | | |
+ * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
+ * | | |
+ * | GP kthread woke up and | CB kthread woke up and |
+ * | acknowledged the fact that | acknowledged the fact that |
+ * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
+ * | | The CB kthread goes to sleep |
+ * | The callbacks from the target CPU | until it ever gets re-offloaded. |
+ * | will be ignored from the GP kthread | |
+ * | loop. | |
+ * ----------------------------------------------------------------------------
+ * | |
+ * -----------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
+ * | Flush pending nocb_timer. Flush nocb bypass callbacks. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, without holding nocb_lock. |
+ * ----------------------------------------------------------------------------
+ */
+#define SEGCBLIST_ENABLED BIT(0)
+#define SEGCBLIST_RCU_CORE BIT(1)
+#define SEGCBLIST_LOCKING BIT(2)
+#define SEGCBLIST_KTHREAD_CB BIT(3)
+#define SEGCBLIST_KTHREAD_GP BIT(4)
+#define SEGCBLIST_OFFLOADED BIT(5)
+
struct rcu_segcblist {
struct rcu_head *head;
struct rcu_head **tails[RCU_CBLIST_NSEGS];
@@ -72,8 +212,8 @@ struct rcu_segcblist {
#else
long len;
#endif
- u8 enabled;
- u8 offloaded;
+ long seglen[RCU_CBLIST_NSEGS];
+ u8 flags;
};
#define RCU_SEGCBLIST_INITIALIZER(n) \
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 9f313e4999fe..d29740be4833 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -11,15 +11,6 @@
#include <linux/rcupdate.h>
/*
- * Why is there no list_empty_rcu()? Because list_empty() serves this
- * purpose. The list_empty() function fetches the RCU-protected pointer
- * and compares it to the address of the list head, but neither dereferences
- * this pointer itself nor provides this pointer to the caller. Therefore,
- * it is not necessary to use rcu_dereference(), so that list_empty() can
- * be used anywhere you would want to use a list_empty_rcu().
- */
-
-/*
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
* @list: list to be initialized
*
@@ -60,12 +51,20 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
#define __list_check_rcu(dummy, cond, extra...) \
({ \
check_arg_count_one(extra); \
- RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \
+ RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \
"RCU-list traversed in non-reader section!"); \
- })
+ })
+
+#define __list_check_srcu(cond) \
+ ({ \
+ RCU_LOCKDEP_WARN(!(cond), \
+ "RCU-list traversed without holding the required lock!");\
+ })
#else
#define __list_check_rcu(dummy, cond, extra...) \
({ check_arg_count_one(extra); })
+
+#define __list_check_srcu(cond) ({ })
#endif
/*
@@ -248,6 +247,8 @@ static inline void __list_splice_init_rcu(struct list_head *list,
*/
sync();
+ ASSERT_EXCLUSIVE_ACCESS(*first);
+ ASSERT_EXCLUSIVE_ACCESS(*last);
/*
* Readers are finished with the source list, so perform splice.
@@ -308,21 +309,29 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
/*
* Where are list_empty_rcu() and list_first_entry_rcu()?
*
- * Implementing those functions following their counterparts list_empty() and
- * list_first_entry() is not advisable because they lead to subtle race
- * conditions as the following snippet shows:
+ * They do not exist because they would lead to subtle race conditions:
*
* if (!list_empty_rcu(mylist)) {
* struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
* do_something(bar);
* }
*
- * The list may not be empty when list_empty_rcu checks it, but it may be when
- * list_first_entry_rcu rereads the ->next pointer.
+ * The list might be non-empty when list_empty_rcu() checks it, but it
+ * might have become empty by the time that list_first_entry_rcu() rereads
+ * the ->next pointer, which would result in a SEGV.
*
- * Rereading the ->next pointer is not a problem for list_empty() and
- * list_first_entry() because they would be protected by a lock that blocks
- * writers.
+ * When not using RCU, it is OK for list_first_entry() to re-read that
+ * pointer because both functions should be protected by some lock that
+ * blocks writers.
+ *
+ * When using RCU, list_empty() uses READ_ONCE() to fetch the
+ * RCU-protected ->next pointer and then compares it to the address of the
+ * list head. However, it neither dereferences this pointer nor provides
+ * this pointer to its caller. Thus, READ_ONCE() suffices (that is,
+ * rcu_dereference() is not needed), which means that list_empty() can be
+ * used anywhere you would want to use list_empty_rcu(). Just don't
+ * expect anything useful to happen if you do a subsequent lockless
+ * call to list_first_entry_rcu()!!!
*
* See list_first_or_null_rcu for an alternative.
*/
@@ -371,7 +380,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
- * @cond...: optional lockdep expression if called from non-RCU protection.
+ * @cond: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
@@ -384,6 +393,25 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
+ * list_for_each_entry_srcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ * @cond: lockdep expression for the lock required to traverse the list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by srcu_read_lock().
+ * The lockdep expression srcu_read_lock_held() can be passed as the
+ * cond argument from read side.
+ */
+#define list_for_each_entry_srcu(pos, head, member, cond) \
+ for (__list_check_srcu(cond), \
+ pos = list_entry_rcu((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
+
+/**
* list_entry_lockless - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
@@ -506,6 +534,27 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
WRITE_ONCE(old->pprev, LIST_POISON2);
}
+/**
+ * hlists_swap_heads_rcu - swap the lists the hlist heads point to
+ * @left: The hlist head on the left
+ * @right: The hlist head on the right
+ *
+ * The lists start out as [@left ][node1 ... ] and
+ * [@right ][node2 ... ]
+ * The lists end up as [@left ][node2 ... ]
+ * [@right ][node1 ... ]
+ */
+static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
+{
+ struct hlist_node *node1 = left->first;
+ struct hlist_node *node2 = right->first;
+
+ rcu_assign_pointer(left->first, node2);
+ rcu_assign_pointer(right->first, node1);
+ WRITE_ONCE(node2->pprev, &left->first);
+ WRITE_ONCE(node1->pprev, &right->first);
+}
+
/*
* return the first or the next element in an RCU protected hlist
*/
@@ -646,7 +695,7 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
- * @cond...: optional lockdep expression if called from non-RCU protection.
+ * @cond: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
@@ -661,6 +710,27 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
&(pos)->member)), typeof(*(pos)), member))
/**
+ * hlist_for_each_entry_srcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ * @cond: lockdep expression for the lock required to traverse the list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by srcu_read_lock().
+ * The lockdep expression srcu_read_lock_held() can be passed as the
+ * cond argument from read side.
+ */
+#define hlist_for_each_entry_srcu(pos, head, member, cond) \
+ for (__list_check_srcu(cond), \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
+
+/**
* hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 9670b54b484a..d8afdb8784c1 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -161,8 +161,8 @@ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
*
* The barrier() is needed to make sure compiler doesn't cache first element [1],
* as this loop can be restarted [2]
- * [1] Documentation/core-api/atomic_ops.rst around line 114
- * [2] Documentation/RCU/rculist_nulls.txt around line 146
+ * [1] Documentation/memory-barriers.txt around line 1533
+ * [2] Documentation/RCU/rculist_nulls.rst around line 146
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
for (({barrier();}), \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2678a37c3169..08605ce7379d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -29,16 +29,45 @@
#include <linux/lockdep.h>
#include <asm/processor.h>
#include <linux/cpumask.h>
+#include <linux/context_tracking_irq.h>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
#define ulong2long(a) (*(long *)(&(a)))
+#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
+#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
+void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
+struct rcu_gp_oldstate;
+unsigned long get_completed_synchronize_rcu(void);
+void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+
+// Maximum number of unsigned long values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_OLDSTATE 2
+
+/**
+ * same_state_synchronize_rcu - Are two old-state values identical?
+ * @oldstate1: First old-state value.
+ * @oldstate2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or
+ * get_completed_synchronize_rcu(). Returns @true if the two values are
+ * identical and @false otherwise. This allows structures whose lifetimes
+ * are tracked by old-state values to push these values to a list header,
+ * allowing those structures to be slightly smaller.
+ */
+static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2)
+{
+ return oldstate1 == oldstate2;
+}
+
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
@@ -50,10 +79,16 @@ void __rcu_read_unlock(void);
* nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting)
#else /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TINY_RCU
+#define rcu_read_unlock_strict() do { } while (0)
+#else
+void rcu_read_unlock_strict(void);
+#endif
+
static inline void __rcu_read_lock(void)
{
preempt_disable();
@@ -62,6 +97,8 @@ static inline void __rcu_read_lock(void)
static inline void __rcu_read_unlock(void)
{
preempt_enable();
+ if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
+ rcu_read_unlock_strict();
}
static inline int rcu_preempt_depth(void)
@@ -73,11 +110,17 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
-extern int rcu_scheduler_active __read_mostly;
+extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);
+#ifdef CONFIG_TASKS_RCU_GENERIC
+void rcu_init_tasks_generic(void);
+#else
+static inline void rcu_init_tasks_generic(void) { }
+#endif
+
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
void rcu_sysrq_end(void);
@@ -86,18 +129,22 @@ static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
-#ifdef CONFIG_NO_HZ_FULL
-void rcu_user_enter(void);
-void rcu_user_exit(void);
+#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
+void rcu_irq_work_resched(void);
#else
-static inline void rcu_user_enter(void) { }
-static inline void rcu_user_exit(void) { }
-#endif /* CONFIG_NO_HZ_FULL */
+static inline void rcu_irq_work_resched(void) { }
+#endif
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
+int rcu_nocb_cpu_offload(int cpu);
+int rcu_nocb_cpu_deoffload(int cpu);
+void rcu_nocb_flush_deferred_wakeup(void);
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static inline void rcu_init_nohz(void) { }
+static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
+static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
+static inline void rcu_nocb_flush_deferred_wakeup(void) { }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/**
@@ -105,7 +152,7 @@ static inline void rcu_init_nohz(void) { }
* @a: Code that RCU needs to pay attention to.
*
* RCU read-side critical sections are forbidden in the inner idle loop,
- * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
+ * that is, between the ct_idle_enter() and the ct_idle_exit() -- RCU
* will happily ignore any such read-side critical sections. However,
* things like powertop need tracepoints in the inner idle loop.
*
@@ -120,34 +167,78 @@ static inline void rcu_init_nohz(void) { }
*/
#define RCU_NONIDLE(a) \
do { \
- rcu_irq_enter_irqson(); \
+ ct_irq_enter_irqson(); \
do { a; } while (0); \
- rcu_irq_exit_irqson(); \
+ ct_irq_exit_irqson(); \
} while (0)
/*
* Note a quasi-voluntary context switch for RCU-tasks's benefit.
* This is a macro rather than an inline function to avoid #include hell.
*/
-#ifdef CONFIG_TASKS_RCU
-#define rcu_tasks_qs(t) \
- do { \
- if (READ_ONCE((t)->rcu_tasks_holdout)) \
- WRITE_ONCE((t)->rcu_tasks_holdout, false); \
+#ifdef CONFIG_TASKS_RCU_GENERIC
+
+# ifdef CONFIG_TASKS_RCU
+# define rcu_tasks_classic_qs(t, preempt) \
+ do { \
+ if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
+ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
-#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
+# else
+# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
+# define call_rcu_tasks call_rcu
+# define synchronize_rcu_tasks synchronize_rcu
+# endif
+
+# ifdef CONFIG_TASKS_TRACE_RCU
+// Bits for ->trc_reader_special.b.need_qs field.
+#define TRC_NEED_QS 0x1 // Task needs a quiescent state.
+#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
+
+u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
+void rcu_tasks_trace_qs_blkd(struct task_struct *t);
+
+# define rcu_tasks_trace_qs(t) \
+ do { \
+ int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
+ \
+ if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) && \
+ likely(!___rttq_nesting)) { \
+ rcu_trc_cmpxchg_need_qs((t), 0, TRC_NEED_QS_CHECKED); \
+ } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
+ !READ_ONCE((t)->trc_reader_special.b.blocked)) { \
+ rcu_tasks_trace_qs_blkd(t); \
+ } \
+ } while (0)
+# else
+# define rcu_tasks_trace_qs(t) do { } while (0)
+# endif
+
+#define rcu_tasks_qs(t, preempt) \
+do { \
+ rcu_tasks_classic_qs((t), (preempt)); \
+ rcu_tasks_trace_qs(t); \
+} while (0)
+
+# ifdef CONFIG_TASKS_RUDE_RCU
+void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
+void synchronize_rcu_tasks_rude(void);
+# endif
+
+#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
-#else /* #ifdef CONFIG_TASKS_RCU */
-#define rcu_tasks_qs(t) do { } while (0)
+#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
+#define rcu_tasks_qs(t, preempt) do { } while (0)
#define rcu_note_voluntary_context_switch(t) do { } while (0)
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_finish(void) { }
-#endif /* #else #ifdef CONFIG_TASKS_RCU */
+#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
/**
* cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
@@ -158,7 +249,7 @@ static inline void exit_tasks_rcu_finish(void) { }
*/
#define cond_resched_tasks_rcu_qs() \
do { \
- rcu_tasks_qs(current); \
+ rcu_tasks_qs(current, false); \
cond_resched(); \
} while (0)
@@ -201,6 +292,11 @@ bool rcu_lockdep_current_cpu_online(void);
static inline bool rcu_lockdep_current_cpu_online(void) { return true; }
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
+extern struct lockdep_map rcu_lock_map;
+extern struct lockdep_map rcu_bh_lock_map;
+extern struct lockdep_map rcu_sched_lock_map;
+extern struct lockdep_map rcu_callback_map;
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void rcu_lock_acquire(struct lockdep_map *map)
@@ -213,10 +309,6 @@ static inline void rcu_lock_release(struct lockdep_map *map)
lock_release(map, _THIS_IP_);
}
-extern struct lockdep_map rcu_lock_map;
-extern struct lockdep_map rcu_bh_lock_map;
-extern struct lockdep_map rcu_sched_lock_map;
-extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void);
@@ -259,8 +351,8 @@ static inline int rcu_read_lock_any_held(void)
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
- static bool __section(.data.unlikely) __warned; \
- if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
+ static bool __section(".data.unlikely") __warned; \
+ if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
@@ -279,7 +371,8 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
"Illegal context switch in RCU-bh read-side critical section"); \
RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
"Illegal context switch in RCU-sched read-side critical section"); \
@@ -287,7 +380,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#else /* #ifdef CONFIG_PROVE_RCU */
-#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -307,32 +400,48 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_check_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */
-#define __rcu_access_pointer(p, space) \
+#define __unrcu_pointer(p, local) \
+({ \
+ typeof(*p) *local = (typeof(*p) *__force)(p); \
+ rcu_check_sparse(p, __rcu); \
+ ((typeof(*p) __force __kernel *)(local)); \
+})
+/**
+ * unrcu_pointer - mark a pointer as not being RCU protected
+ * @p: pointer needing to lose its __rcu property
+ *
+ * Converts @p from an __rcu pointer to a __kernel pointer.
+ * This allows an __rcu pointer to be used with xchg() and friends.
+ */
+#define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu))
+
+#define __rcu_access_pointer(p, local, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
+ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
rcu_check_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(_________p1)); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
-#define __rcu_dereference_check(p, c, space) \
+#define __rcu_dereference_check(p, local, c, space) \
({ \
/* Dependency order vs. p above. */ \
- typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
+ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_check_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(________p1)); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
-#define __rcu_dereference_protected(p, c, space) \
+#define __rcu_dereference_protected(p, local, c, space) \
({ \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
-#define rcu_dereference_raw(p) \
+#define __rcu_dereference_raw(p, local) \
({ \
/* Dependency order vs. p above. */ \
- typeof(p) ________p1 = READ_ONCE(p); \
- ((typeof(*p) __force __kernel *)(________p1)); \
+ typeof(p) local = READ_ONCE(p); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
+#define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu))
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
@@ -411,15 +520,23 @@ do { \
* against NULL. Although rcu_access_pointer() may also be used in cases
* where update-side locks prevent the value of the pointer from changing,
* you should instead use rcu_dereference_protected() for this use case.
+ * Within an RCU read-side critical section, there is little reason to
+ * use rcu_access_pointer().
+ *
+ * It is usually best to test the rcu_access_pointer() return value
+ * directly in order to avoid accidental dereferences being introduced
+ * by later inattentive changes. In other words, assigning the
+ * rcu_access_pointer() return value to a local variable results in an
+ * accident waiting to happen.
*
* It is also permissible to use rcu_access_pointer() when read-side
- * access to the pointer was removed at least one grace period ago, as
- * is the case in the context of the RCU callback that is freeing up
- * the data, or after a synchronize_rcu() returns. This can be useful
- * when tearing down multi-linked structures after a grace period
- * has elapsed.
+ * access to the pointer was removed at least one grace period ago, as is
+ * the case in the context of the RCU callback that is freeing up the data,
+ * or after a synchronize_rcu() returns. This can be useful when tearing
+ * down multi-linked structures after a grace period has elapsed. However,
+ * rcu_dereference_protected() is normally preferred for this use case.
*/
-#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
+#define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu)
/**
* rcu_dereference_check() - rcu_dereference with debug checking
@@ -455,17 +572,24 @@ do { \
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_held(), __rcu)
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
- * This is the RCU-bh counterpart to rcu_dereference_check().
+ * This is the RCU-bh counterpart to rcu_dereference_check(). However,
+ * please note that starting in v5.0 kernels, vanilla RCU grace periods
+ * wait for local_bh_disable() regions of code in addition to regions of
+ * code demarked by rcu_read_lock() and rcu_read_unlock(). This means
+ * that synchronize_rcu(), call_rcu, and friends all take not only
+ * rcu_read_lock() but also rcu_read_lock_bh() into account.
*/
#define rcu_dereference_bh_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_bh_held(), __rcu)
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
@@ -473,9 +597,15 @@ do { \
* @c: The conditions under which the dereference will take place
*
* This is the RCU-sched counterpart to rcu_dereference_check().
+ * However, please note that starting in v5.0 kernels, vanilla RCU grace
+ * periods wait for preempt_disable() regions of code in addition to
+ * regions of code demarked by rcu_read_lock() and rcu_read_unlock().
+ * This means that synchronize_rcu(), call_rcu, and friends all take not
+ * only rcu_read_lock() but also rcu_read_lock_sched() into account.
*/
#define rcu_dereference_sched_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_sched_held(), \
__rcu)
/*
@@ -485,7 +615,8 @@ do { \
* The no-tracing version of rcu_dereference_raw() must not call
* rcu_read_lock_held().
*/
-#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
+#define rcu_dereference_raw_check(p) \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu)
/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
@@ -504,7 +635,7 @@ do { \
* but very ugly failures.
*/
#define rcu_dereference_protected(p, c) \
- __rcu_dereference_protected((p), (c), __rcu)
+ __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu)
/**
@@ -564,6 +695,12 @@ do { \
* sections, invocation of the corresponding RCU callback is deferred
* until after the all the other CPUs exit their critical sections.
*
+ * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also
+ * wait for regions of code with preemption disabled, including regions of
+ * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which
+ * define synchronize_sched(), only code enclosed within rcu_read_lock()
+ * and rcu_read_unlock() are guaranteed to be waited for.
+ *
* Note, however, that RCU callbacks are permitted to run concurrently
* with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
@@ -616,33 +753,12 @@ static __always_inline void rcu_read_lock(void)
/**
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
- * In most situations, rcu_read_unlock() is immune from deadlock.
- * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
- * is responsible for deboosting, which it does via rt_mutex_unlock().
- * Unfortunately, this function acquires the scheduler's runqueue and
- * priority-inheritance spinlocks. This means that deadlock could result
- * if the caller of rcu_read_unlock() already holds one of these locks or
- * any lock that is ever acquired while holding them.
- *
- * That said, RCU readers are never priority boosted unless they were
- * preempted. Therefore, one way to avoid deadlock is to make sure
- * that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with one of
- * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
- * a number of ways, for example, by invoking preempt_disable() before
- * critical section's outermost rcu_read_lock().
- *
- * Given that the set of locks acquired by rt_mutex_unlock() might change
- * at any time, a somewhat more future-proofed approach is to make sure
- * that that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with irqs disabled.
- * This approach relies on the fact that rt_mutex_unlock() currently only
- * acquires irq-disabled locks.
- *
- * The second of these two approaches is best in most situations,
- * however, the first approach can also be useful, at least to those
- * developers willing to keep abreast of the set of locks acquired by
- * rt_mutex_unlock().
+ * In almost all situations, rcu_read_unlock() is immune from deadlock.
+ * In recent kernels that have consolidated synchronize_sched() and
+ * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity
+ * also extends to the scheduler's runqueue and priority-inheritance
+ * spinlocks, courtesy of the quiescent-state deferral that is carried
+ * out when rcu_read_unlock() is invoked with interrupts disabled.
*
* See rcu_read_lock() for more information.
*/
@@ -658,9 +774,11 @@ static inline void rcu_read_unlock(void)
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
- * This is equivalent of rcu_read_lock(), but also disables softirqs.
- * Note that anything else that disables softirqs can also serve as
- * an RCU read-side critical section.
+ * This is equivalent to rcu_read_lock(), but also disables softirqs.
+ * Note that anything else that disables softirqs can also serve as an RCU
+ * read-side critical section. However, please note that this equivalence
+ * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
+ * rcu_read_lock_bh() were unrelated.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
@@ -676,8 +794,8 @@ static inline void rcu_read_lock_bh(void)
"rcu_read_lock_bh() used illegally while idle");
}
-/*
- * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
+/**
+ * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section
*
* See rcu_read_lock_bh() for more information.
*/
@@ -693,9 +811,12 @@ static inline void rcu_read_unlock_bh(void)
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * This is equivalent of rcu_read_lock(), but disables preemption.
- * Read-side critical sections can also be introduced by anything else
- * that disables preemption, including local_irq_disable() and friends.
+ * This is equivalent to rcu_read_lock(), but also disables preemption.
+ * Read-side critical sections can also be introduced by anything else that
+ * disables preemption, including local_irq_disable() and friends. However,
+ * please note that the equivalence to rcu_read_lock() applies only to
+ * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
+ * were unrelated.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
@@ -718,10 +839,10 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
__acquire(RCU_SCHED);
}
-/*
- * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
+/**
+ * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section
*
- * See rcu_read_lock_sched for more information.
+ * See rcu_read_lock_sched() for more information.
*/
static inline void rcu_read_unlock_sched(void)
{
@@ -795,23 +916,15 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/*
* Does the specified offset indicate that the corresponding rcu_head
- * structure can be handled by kfree_rcu()?
+ * structure can be handled by kvfree_rcu()?
*/
-#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
-
-/*
- * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
- */
-#define __kfree_rcu(head, offset) \
- do { \
- BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
- kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
- } while (0)
+#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
/**
* kfree_rcu() - kfree an object after a grace period.
- * @ptr: pointer to kfree
- * @rhf: the name of the struct rcu_head within the type of @ptr.
+ * @ptr: pointer to kfree for both single- and double-argument invocations.
+ * @rhf: the name of the struct rcu_head within the type of @ptr,
+ * but only for double-argument invocations.
*
* Many rcu callbacks functions just call kfree() on the base structure.
* These functions are trivial, but their size adds up, and furthermore
@@ -824,7 +937,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* Because the functions are not allowed in the low-order 4096 bytes of
* kernel virtual memory, offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
- * be generated in __kfree_rcu(). If this error is triggered, you can
+ * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
@@ -834,12 +947,55 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* The BUILD_BUG_ON check must not involve any function calls, hence the
* checks are done in macros here.
*/
-#define kfree_rcu(ptr, rhf) \
+#define kfree_rcu(ptr, rhf...) kvfree_rcu(ptr, ## rhf)
+
+/**
+ * kvfree_rcu() - kvfree an object after a grace period.
+ *
+ * This macro consists of one or two arguments and it is
+ * based on whether an object is head-less or not. If it
+ * has a head then a semantic stays the same as it used
+ * to be before:
+ *
+ * kvfree_rcu(ptr, rhf);
+ *
+ * where @ptr is a pointer to kvfree(), @rhf is the name
+ * of the rcu_head structure within the type of @ptr.
+ *
+ * When it comes to head-less variant, only one argument
+ * is passed and that is just a pointer which has to be
+ * freed after a grace period. Therefore the semantic is
+ *
+ * kvfree_rcu(ptr);
+ *
+ * where @ptr is the pointer to be freed by kvfree().
+ *
+ * Please note, head-less way of freeing is permitted to
+ * use from a context that has to follow might_sleep()
+ * annotation. Otherwise, please switch and embed the
+ * rcu_head structure within the type of @ptr.
+ */
+#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
+ kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
+
+#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
+#define kvfree_rcu_arg_2(ptr, rhf) \
do { \
typeof (ptr) ___p = (ptr); \
\
- if (___p) \
- __kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
+ if (___p) { \
+ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
+ kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \
+ (offsetof(typeof(*(ptr)), rhf))); \
+ } \
+} while (0)
+
+#define kvfree_rcu_arg_1(ptr) \
+do { \
+ typeof(ptr) ___p = (ptr); \
+ \
+ if (___p) \
+ kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
} while (0)
/*
@@ -873,7 +1029,7 @@ static inline void rcu_head_init(struct rcu_head *rhp)
}
/**
- * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()?
+ * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()?
* @rhp: The rcu_head structure to test.
* @f: The function passed to call_rcu() along with @rhp.
*
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
new file mode 100644
index 000000000000..9bc8cbb33340
--- /dev/null
+++ b/include/linux/rcupdate_trace.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Read-Copy Update mechanism for mutual exclusion, adapted for tracing.
+ *
+ * Copyright (C) 2020 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_RCUPDATE_TRACE_H
+#define __LINUX_RCUPDATE_TRACE_H
+
+#include <linux/sched.h>
+#include <linux/rcupdate.h>
+
+extern struct lockdep_map rcu_trace_lock_map;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+static inline int rcu_read_lock_trace_held(void)
+{
+ return lock_is_held(&rcu_trace_lock_map);
+}
+
+#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline int rcu_read_lock_trace_held(void)
+{
+ return 1;
+}
+
+#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+#ifdef CONFIG_TASKS_TRACE_RCU
+
+void rcu_read_unlock_trace_special(struct task_struct *t);
+
+/**
+ * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
+ *
+ * When synchronize_rcu_tasks_trace() is invoked by one task, then that
+ * task is guaranteed to block until all other tasks exit their read-side
+ * critical sections. Similarly, if call_rcu_trace() is invoked on one
+ * task while other tasks are within RCU read-side critical sections,
+ * invocation of the corresponding RCU callback is deferred until after
+ * the all the other tasks exit their critical sections.
+ *
+ * For more details, please see the documentation for rcu_read_lock().
+ */
+static inline void rcu_read_lock_trace(void)
+{
+ struct task_struct *t = current;
+
+ WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
+ barrier();
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
+ t->trc_reader_special.b.need_mb)
+ smp_mb(); // Pairs with update-side barriers
+ rcu_lock_acquire(&rcu_trace_lock_map);
+}
+
+/**
+ * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section
+ *
+ * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is
+ * allowed. Invoking a rcu_read_unlock_trace() when there is no matching
+ * rcu_read_lock_trace() is verboten, and will result in lockdep complaints.
+ *
+ * For more details, please see the documentation for rcu_read_unlock().
+ */
+static inline void rcu_read_unlock_trace(void)
+{
+ int nesting;
+ struct task_struct *t = current;
+
+ rcu_lock_release(&rcu_trace_lock_map);
+ nesting = READ_ONCE(t->trc_reader_nesting) - 1;
+ barrier(); // Critical section before disabling.
+ // Disable IPI-based setting of .need_qs.
+ WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
+ if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
+ WRITE_ONCE(t->trc_reader_nesting, nesting);
+ return; // We assume shallow reader nesting.
+ }
+ WARN_ON_ONCE(nesting != 0);
+ rcu_read_unlock_trace_special(t);
+}
+
+void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
+void synchronize_rcu_tasks_trace(void);
+void rcu_barrier_tasks_trace(void);
+#else
+/*
+ * The BPF JIT forms these addresses even when it doesn't call these
+ * functions, so provide definitions that result in runtime errors.
+ */
+static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
+static inline void rcu_read_lock_trace(void) { BUG(); }
+static inline void rcu_read_unlock_trace(void) { BUG(); }
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
+
+#endif /* __LINUX_RCUPDATE_TRACE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index c0578ba23c1a..699b938358bf 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -31,4 +31,23 @@ do { \
#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+/**
+ * synchronize_rcu_mult - Wait concurrently for multiple grace periods
+ * @...: List of call_rcu() functions for different grace periods to wait on
+ *
+ * This macro waits concurrently for multiple types of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
+ * on concurrent RCU and RCU-tasks grace periods. Waiting on a given SRCU
+ * domain requires you to write a wrapper function for that SRCU domain's
+ * call_srcu() function, with this wrapper supplying the pointer to the
+ * corresponding srcu_struct.
+ *
+ * The first argument tells Tiny RCU's _wait_rcu_gp() not to
+ * bother waiting for RCU. The reason for this is because anywhere
+ * synchronize_rcu_mult() can be called is automatically already a full
+ * grace period.
+ */
+#define synchronize_rcu_mult(...) \
+ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b2b2dc990da9..768196a5f39d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -14,12 +14,43 @@
#include <asm/param.h> /* for HZ */
-/* Never flag non-existent other CPUs! */
-static inline bool rcu_eqs_special_set(int cpu) { return false; }
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+};
-static inline unsigned long get_state_synchronize_rcu(void)
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
+
+/*
+ * Are the two oldstate values the same? See the Tree RCU version for
+ * docbook header.
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
{
- return 0;
+ return rgosp1->rgos_norm == rgosp2->rgos_norm;
+}
+
+unsigned long get_state_synchronize_rcu(void);
+
+static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = get_state_synchronize_rcu();
+}
+
+unsigned long start_poll_synchronize_rcu(void);
+
+static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu();
+}
+
+bool poll_state_synchronize_rcu(unsigned long oldstate);
+
+static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ return poll_state_synchronize_rcu(rgosp->rgos_norm);
}
static inline void cond_synchronize_rcu(unsigned long oldstate)
@@ -27,6 +58,31 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}
+static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu(rgosp->rgos_norm);
+}
+
+static inline unsigned long start_poll_synchronize_rcu_expedited(void)
+{
+ return start_poll_synchronize_rcu();
+}
+
+static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
+}
+
+static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
+{
+ cond_synchronize_rcu(oldstate);
+}
+
+static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu_expedited(rgosp->rgos_norm);
+}
+
extern void rcu_barrier(void);
static inline void synchronize_rcu_expedited(void)
@@ -34,10 +90,35 @@ static inline void synchronize_rcu_expedited(void)
synchronize_rcu();
}
-static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+/*
+ * Add one more declaration of kvfree() here. It is
+ * not so straight forward to just include <linux/mm.h>
+ * where it is defined due to getting many compile
+ * errors caused by that include.
+ */
+extern void kvfree(const void *addr);
+
+static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+{
+ if (head) {
+ call_rcu(head, func);
+ return;
+ }
+
+ // kvfree_rcu(one_arg) call.
+ might_sleep();
+ synchronize_rcu();
+ kvfree((void *) func);
+}
+
+#ifdef CONFIG_KASAN_GENERIC
+void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+#else
+static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
- call_rcu(head, func);
+ __kvfree_call_rcu(head, func);
}
+#endif
void rcu_qs(void);
@@ -49,12 +130,11 @@ static inline void rcu_softirq_qs(void)
#define rcu_note_context_switch(preempt) \
do { \
rcu_qs(); \
- rcu_tasks_qs(current); \
+ rcu_tasks_qs(current, (preempt)); \
} while (0)
-static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
+static inline int rcu_needs_cpu(void)
{
- *nextevt = KTIME_MAX;
return 0;
}
@@ -65,12 +145,9 @@ static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
static inline void rcu_virt_note_context_switch(int cpu) { }
static inline void rcu_cpu_stall_reset(void) { }
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
-static inline void rcu_idle_enter(void) { }
-static inline void rcu_idle_exit(void) { }
-static inline void rcu_irq_enter(void) { }
-static inline void rcu_irq_exit_irqson(void) { }
-static inline void rcu_irq_enter_irqson(void) { }
-static inline void rcu_irq_exit(void) { }
+static inline void rcu_irq_exit_check_preempt(void) { }
+#define rcu_is_idle_cpu(cpu) \
+ (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq())
static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
@@ -83,9 +160,11 @@ void rcu_scheduler_starting(void);
static inline void rcu_scheduler_starting(void) { }
#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
+static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
static inline void rcu_momentary_dyntick_idle(void) { }
static inline void kfree_rcu_scheduler_running(void) { }
+static inline bool rcu_gp_might_be_stalled(void) { return false; }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 2f787b9029d1..5efb51486e8a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -19,7 +19,7 @@
void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
-int rcu_needs_cpu(u64 basem, u64 *nextevt);
+int rcu_needs_cpu(void);
void rcu_cpu_stall_reset(void);
/*
@@ -33,27 +33,77 @@ static inline void rcu_virt_note_context_switch(int cpu)
}
void synchronize_rcu_expedited(void);
-void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
void rcu_momentary_dyntick_idle(void);
void kfree_rcu_scheduler_running(void);
+bool rcu_gp_might_be_stalled(void);
+
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+ unsigned long rgos_exp;
+};
+
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4
+
+/**
+ * same_state_synchronize_rcu_full - Are two old-state values identical?
+ * @rgosp1: First old-state value.
+ * @rgosp2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
+ * or get_completed_synchronize_rcu_full(). Returns @true if the two
+ * values are identical and @false otherwise. This allows structures
+ * whose lifetimes are tracked by old-state values to push these values
+ * to a list header, allowing those structures to be slightly smaller.
+ *
+ * Note that equality is judged on a bitwise basis, so that an
+ * @rcu_gp_oldstate structure with an already-completed state in one field
+ * will compare not-equal to a structure with an already-completed state
+ * in the other field. After all, the @rcu_gp_oldstate structure is opaque
+ * so how did such a situation come to pass in the first place?
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
+{
+ return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp;
+}
+
+unsigned long start_poll_synchronize_rcu_expedited(void);
+void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
+void cond_synchronize_rcu_expedited(unsigned long oldstate);
+void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
unsigned long get_state_synchronize_rcu(void);
+void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+unsigned long start_poll_synchronize_rcu(void);
+void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+bool poll_state_synchronize_rcu(unsigned long oldstate);
+bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
void cond_synchronize_rcu(unsigned long oldstate);
+void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+
+bool rcu_is_idle_cpu(int cpu);
+
+#ifdef CONFIG_PROVE_RCU
+void rcu_irq_exit_check_preempt(void);
+#else
+static inline void rcu_irq_exit_check_preempt(void) { }
+#endif
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
-void rcu_irq_enter_irqson(void);
-void rcu_irq_exit_irqson(void);
+struct task_struct;
+void rcu_preempt_deferred_qs(struct task_struct *t);
void exit_rcu(void);
void rcu_scheduler_starting(void);
-extern int rcu_scheduler_active __read_mostly;
+extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void);
+bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void);
#ifndef CONFIG_PREEMPTION
void rcu_all_qs(void);
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 75c97e4bbc57..8052d34da782 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -3,6 +3,7 @@
#define _LINUX_RCUWAIT_H_
#include <linux/rcupdate.h>
+#include <linux/sched/signal.h>
/*
* rcuwait provides a way of blocking and waking up a single
@@ -24,29 +25,52 @@ static inline void rcuwait_init(struct rcuwait *w)
w->task = NULL;
}
-extern void rcuwait_wake_up(struct rcuwait *w);
+/*
+ * Note: this provides no serialization and, just as with waitqueues,
+ * requires care to estimate as to whether or not the wait is active.
+ */
+static inline int rcuwait_active(struct rcuwait *w)
+{
+ return !!rcu_access_pointer(w->task);
+}
+
+extern int rcuwait_wake_up(struct rcuwait *w);
/*
* The caller is responsible for locking around rcuwait_wait_event(),
- * such that writes to @task are properly serialized.
+ * and [prepare_to/finish]_rcuwait() such that writes to @task are
+ * properly serialized.
*/
-#define rcuwait_wait_event(w, condition) \
+
+static inline void prepare_to_rcuwait(struct rcuwait *w)
+{
+ rcu_assign_pointer(w->task, current);
+}
+
+extern void finish_rcuwait(struct rcuwait *w);
+
+#define rcuwait_wait_event(w, condition, state) \
({ \
- rcu_assign_pointer((w)->task, current); \
+ int __ret = 0; \
+ prepare_to_rcuwait(w); \
for (;;) { \
/* \
* Implicit barrier (A) pairs with (B) in \
* rcuwait_wake_up(). \
*/ \
- set_current_state(TASK_UNINTERRUPTIBLE); \
+ set_current_state(state); \
if (condition) \
break; \
\
+ if (signal_pending_state(state, current)) { \
+ __ret = -EINTR; \
+ break; \
+ } \
+ \
schedule(); \
} \
- \
- WRITE_ONCE((w)->task, NULL); \
- __set_current_state(TASK_RUNNING); \
+ finish_rcuwait(w); \
+ __ret; \
})
#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/rcuwait_api.h b/include/linux/rcuwait_api.h
new file mode 100644
index 000000000000..f962e28544dd
--- /dev/null
+++ b/include/linux/rcuwait_api.h
@@ -0,0 +1 @@
+#include <linux/rcuwait.h>
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 3734cd8f38a8..2b6bb593be5b 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -7,6 +7,7 @@
#include <uapi/linux/reboot.h>
struct device;
+struct sys_off_handler;
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
@@ -62,6 +63,103 @@ extern void machine_shutdown(void);
struct pt_regs;
extern void machine_crash_shutdown(struct pt_regs *);
+void do_kernel_power_off(void);
+
+/*
+ * sys-off handler API.
+ */
+
+/*
+ * Standard sys-off priority levels. Users are expected to set priorities
+ * relative to the standard levels.
+ *
+ * SYS_OFF_PRIO_PLATFORM: Use this for platform-level handlers.
+ *
+ * SYS_OFF_PRIO_LOW: Use this for handler of last resort.
+ *
+ * SYS_OFF_PRIO_DEFAULT: Use this for normal handlers.
+ *
+ * SYS_OFF_PRIO_HIGH: Use this for higher priority handlers.
+ *
+ * SYS_OFF_PRIO_FIRMWARE: Use this if handler uses firmware call.
+ */
+#define SYS_OFF_PRIO_PLATFORM -256
+#define SYS_OFF_PRIO_LOW -128
+#define SYS_OFF_PRIO_DEFAULT 0
+#define SYS_OFF_PRIO_HIGH 192
+#define SYS_OFF_PRIO_FIRMWARE 224
+
+enum sys_off_mode {
+ /**
+ * @SYS_OFF_MODE_POWER_OFF_PREPARE:
+ *
+ * Handlers prepare system to be powered off. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF_PREPARE,
+
+ /**
+ * @SYS_OFF_MODE_POWER_OFF:
+ *
+ * Handlers power-off system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF,
+
+ /**
+ * @SYS_OFF_MODE_RESTART_PREPARE:
+ *
+ * Handlers prepare system to be restarted. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART_PREPARE,
+
+ /**
+ * @SYS_OFF_MODE_RESTART:
+ *
+ * Handlers restart system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART,
+};
+
+/**
+ * struct sys_off_data - sys-off callback argument
+ *
+ * @mode: Mode ID. Currently used only by the sys-off restart mode,
+ * see enum reboot_mode for the available modes.
+ * @cb_data: User's callback data.
+ * @cmd: Command string. Currently used only by the sys-off restart mode,
+ * NULL otherwise.
+ */
+struct sys_off_data {
+ int mode;
+ void *cb_data;
+ const char *cmd;
+};
+
+struct sys_off_handler *
+register_sys_off_handler(enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+void unregister_sys_off_handler(struct sys_off_handler *handler);
+
+int devm_register_sys_off_handler(struct device *dev,
+ enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_power_off_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_restart_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int register_platform_power_off(void (*power_off)(void));
+void unregister_platform_power_off(void (*power_off)(void));
+
/*
* Architecture independent implemenations of sys_reboot commands.
*/
@@ -70,15 +168,13 @@ extern void kernel_restart_prepare(char *cmd);
extern void kernel_restart(char *cmd);
extern void kernel_halt(void);
extern void kernel_power_off(void);
+extern bool kernel_can_power_off(void);
-extern int C_A_D; /* for sysctl */
void ctrl_alt_del(void);
-#define POWEROFF_CMD_PATH_LEN 256
-extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
-
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
+void hw_protection_shutdown(const char *reason, int ms_until_forced);
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
new file mode 100644
index 000000000000..9ca353ab712b
--- /dev/null
+++ b/include/linux/ref_tracker.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#ifndef _LINUX_REF_TRACKER_H
+#define _LINUX_REF_TRACKER_H
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/stackdepot.h>
+
+struct ref_tracker;
+
+struct ref_tracker_dir {
+#ifdef CONFIG_REF_TRACKER
+ spinlock_t lock;
+ unsigned int quarantine_avail;
+ refcount_t untracked;
+ refcount_t no_tracker;
+ bool dead;
+ struct list_head list; /* List of active trackers */
+ struct list_head quarantine; /* List of dead trackers */
+#endif
+};
+
+#ifdef CONFIG_REF_TRACKER
+static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ unsigned int quarantine_count)
+{
+ INIT_LIST_HEAD(&dir->list);
+ INIT_LIST_HEAD(&dir->quarantine);
+ spin_lock_init(&dir->lock);
+ dir->quarantine_avail = quarantine_count;
+ dir->dead = false;
+ refcount_set(&dir->untracked, 1);
+ refcount_set(&dir->no_tracker, 1);
+ stack_depot_init();
+}
+
+void ref_tracker_dir_exit(struct ref_tracker_dir *dir);
+
+void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit);
+
+int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp, gfp_t gfp);
+
+int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp);
+
+#else /* CONFIG_REF_TRACKER */
+
+static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ unsigned int quarantine_count)
+{
+}
+
+static inline void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
+{
+}
+
+static inline void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+}
+
+static inline int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp,
+ gfp_t gfp)
+{
+ return 0;
+}
+
+static inline int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _LINUX_REF_TRACKER_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 0ac50cf62d06..a62fcca97486 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -38,11 +38,24 @@
* atomic operations, then the count will continue to edge closer to 0. If it
* reaches a value of 1 before /any/ of the threads reset it to the saturated
* value, then a concurrent refcount_dec_and_test() may erroneously free the
- * underlying object. Given the precise timing details involved with the
- * round-robin scheduling of each thread manipulating the refcount and the need
- * to hit the race multiple times in succession, there doesn't appear to be a
- * practical avenue of attack even if using refcount_add() operations with
- * larger increments.
+ * underlying object.
+ * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently
+ * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK).
+ * With the current PID limit, if no batched refcounting operations are used and
+ * the attacker can't repeatedly trigger kernel oopses in the middle of refcount
+ * operations, this makes it impossible for a saturated refcount to leave the
+ * saturation range, even if it is possible for multiple uses of the same
+ * refcount to nest in the context of a single task:
+ *
+ * (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT =
+ * 0x40000000 / 0x400000 = 0x100 = 256
+ *
+ * If hundreds of references are added/removed with a single refcounting
+ * operation, it may potentially be possible to leave the saturation range; but
+ * given the precise timing details involved with the round-robin scheduling of
+ * each thread manipulating the refcount and the need to hit the race multiple
+ * times in succession, there doesn't appear to be a practical avenue of attack
+ * even if using refcount_add() operations with larger increments.
*
* Memory ordering
* ===============
@@ -88,7 +101,7 @@
struct mutex;
/**
- * struct refcount_t - variant of atomic_t specialized for reference counts
+ * typedef refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at REFCOUNT_SATURATED and will not move once
@@ -134,6 +147,24 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
+static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
+{
+ int old = refcount_read(r);
+
+ do {
+ if (!old)
+ break;
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
+
+ if (oldp)
+ *oldp = old;
+
+ if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
+
+ return old;
+}
+
/**
* refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
@@ -154,17 +185,20 @@ static inline unsigned int refcount_read(const refcount_t *r)
*/
static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
{
- int old = refcount_read(r);
+ return __refcount_add_not_zero(i, r, NULL);
+}
- do {
- if (!old)
- break;
- } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
+static inline void __refcount_add(int i, refcount_t *r, int *oldp)
+{
+ int old = atomic_fetch_add_relaxed(i, &r->refs);
- if (unlikely(old < 0 || old + i < 0))
- refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
+ if (oldp)
+ *oldp = old;
- return old;
+ if (unlikely(!old))
+ refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
+ else if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
/**
@@ -185,12 +219,12 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
*/
static inline void refcount_add(int i, refcount_t *r)
{
- int old = atomic_fetch_add_relaxed(i, &r->refs);
+ __refcount_add(i, r, NULL);
+}
- if (unlikely(!old))
- refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
- else if (unlikely(old < 0 || old + i < 0))
- refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
+static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
+{
+ return __refcount_add_not_zero(1, r, oldp);
}
/**
@@ -208,7 +242,12 @@ static inline void refcount_add(int i, refcount_t *r)
*/
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
- return refcount_add_not_zero(1, r);
+ return __refcount_inc_not_zero(r, NULL);
+}
+
+static inline void __refcount_inc(refcount_t *r, int *oldp)
+{
+ __refcount_add(1, r, oldp);
}
/**
@@ -225,7 +264,25 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
*/
static inline void refcount_inc(refcount_t *r)
{
- refcount_add(1, r);
+ __refcount_inc(r, NULL);
+}
+
+static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
+{
+ int old = atomic_fetch_sub_release(i, &r->refs);
+
+ if (oldp)
+ *oldp = old;
+
+ if (old == i) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ if (unlikely(old < 0 || old - i < 0))
+ refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
+
+ return false;
}
/**
@@ -250,17 +307,12 @@ static inline void refcount_inc(refcount_t *r)
*/
static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
{
- int old = atomic_fetch_sub_release(i, &r->refs);
-
- if (old == i) {
- smp_acquire__after_ctrl_dep();
- return true;
- }
-
- if (unlikely(old < 0 || old - i < 0))
- refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
+ return __refcount_sub_and_test(i, r, NULL);
+}
- return false;
+static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
+{
+ return __refcount_sub_and_test(1, r, oldp);
}
/**
@@ -278,7 +330,18 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
*/
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- return refcount_sub_and_test(1, r);
+ return __refcount_dec_and_test(r, NULL);
+}
+
+static inline void __refcount_dec(refcount_t *r, int *oldp)
+{
+ int old = atomic_fetch_sub_release(1, &r->refs);
+
+ if (oldp)
+ *oldp = old;
+
+ if (unlikely(old <= 1))
+ refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
/**
@@ -293,15 +356,14 @@ static inline __must_check bool refcount_dec_and_test(refcount_t *r)
*/
static inline void refcount_dec(refcount_t *r)
{
- if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
- refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
+ __refcount_dec(r, NULL);
}
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
-extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
- unsigned long *flags);
+ unsigned long *flags) __cond_acquires(lock);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/refcount_api.h b/include/linux/refcount_api.h
new file mode 100644
index 000000000000..5f032589f568
--- /dev/null
+++ b/include/linux/refcount_api.h
@@ -0,0 +1 @@
+#include <linux/refcount.h>
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index f0a092a1a96d..ca3434dca3a0 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -17,13 +17,17 @@
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/lockdep.h>
+#include <linux/iopoll.h>
+#include <linux/fwnode.h>
struct module;
struct clk;
struct device;
+struct device_node;
struct i2c_client;
struct i3c_device;
struct irq_domain;
+struct mdio_device;
struct slim_device;
struct spi_device;
struct spmi_device;
@@ -71,35 +75,12 @@ struct reg_sequence {
unsigned int delay_us;
};
-#define regmap_update_bits(map, reg, mask, val) \
- regmap_update_bits_base(map, reg, mask, val, NULL, false, false)
-#define regmap_update_bits_async(map, reg, mask, val)\
- regmap_update_bits_base(map, reg, mask, val, NULL, true, false)
-#define regmap_update_bits_check(map, reg, mask, val, change)\
- regmap_update_bits_base(map, reg, mask, val, change, false, false)
-#define regmap_update_bits_check_async(map, reg, mask, val, change)\
- regmap_update_bits_base(map, reg, mask, val, change, true, false)
-
-#define regmap_write_bits(map, reg, mask, val) \
- regmap_update_bits_base(map, reg, mask, val, NULL, false, true)
-
-#define regmap_field_write(field, val) \
- regmap_field_update_bits_base(field, ~0, val, NULL, false, false)
-#define regmap_field_force_write(field, val) \
- regmap_field_update_bits_base(field, ~0, val, NULL, false, true)
-#define regmap_field_update_bits(field, mask, val)\
- regmap_field_update_bits_base(field, mask, val, NULL, false, false)
-#define regmap_field_force_update_bits(field, mask, val) \
- regmap_field_update_bits_base(field, mask, val, NULL, false, true)
-
-#define regmap_fields_write(field, id, val) \
- regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false)
-#define regmap_fields_force_write(field, id, val) \
- regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true)
-#define regmap_fields_update_bits(field, id, mask, val)\
- regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false)
-#define regmap_fields_force_update_bits(field, id, mask, val) \
- regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true)
+#define REG_SEQ(_reg, _def, _delay_us) { \
+ .reg = _reg, \
+ .def = _def, \
+ .delay_us = _delay_us, \
+ }
+#define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0)
/**
* regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
@@ -122,26 +103,10 @@ struct reg_sequence {
*/
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
- u64 __timeout_us = (timeout_us); \
- unsigned long __sleep_us = (sleep_us); \
- ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
- int __ret; \
- might_sleep_if(__sleep_us); \
- for (;;) { \
- __ret = regmap_read((map), (addr), &(val)); \
- if (__ret) \
- break; \
- if (cond) \
- break; \
- if ((__timeout_us) && \
- ktime_compare(ktime_get(), __timeout) > 0) { \
- __ret = regmap_read((map), (addr), &(val)); \
- break; \
- } \
- if (__sleep_us) \
- usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
- } \
- __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+ int __ret, __tmp; \
+ __tmp = read_poll_timeout(regmap_read, __ret, __ret || (cond), \
+ sleep_us, timeout_us, false, (map), (addr), &(val)); \
+ __ret ?: __tmp; \
})
/**
@@ -209,25 +174,10 @@ struct reg_sequence {
*/
#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
({ \
- u64 __timeout_us = (timeout_us); \
- unsigned long __sleep_us = (sleep_us); \
- ktime_t timeout = ktime_add_us(ktime_get(), __timeout_us); \
- int pollret; \
- might_sleep_if(__sleep_us); \
- for (;;) { \
- pollret = regmap_field_read((field), &(val)); \
- if (pollret) \
- break; \
- if (cond) \
- break; \
- if (__timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- pollret = regmap_field_read((field), &(val)); \
- break; \
- } \
- if (__sleep_us) \
- usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
- } \
- pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
+ int __ret, __tmp; \
+ __tmp = read_poll_timeout(regmap_field_read, __ret, __ret || (cond), \
+ sleep_us, timeout_us, false, (field), &(val)); \
+ __ret ?: __tmp; \
})
#ifdef CONFIG_REGMAP
@@ -287,6 +237,10 @@ typedef void (*regmap_unlock)(void *);
* @reg_stride: The register address stride. Valid register addresses are a
* multiple of this value. If set to 0, a value of 1 will be
* used.
+ * @reg_downshift: The number of bits to downshift the register before
+ * performing any operations.
+ * @reg_base: Value to be added to every register address before performing any
+ * operation.
* @pad_bits: Number of bits of padding between register and value.
* @val_bits: Number of bits in a register value, mandatory.
*
@@ -326,7 +280,7 @@ typedef void (*regmap_unlock)(void *);
* readable if it belongs to one of the ranges specified
* by rd_noinc_table).
* @disable_locking: This regmap is either protected by external means or
- * is guaranteed not be be accessed from multiple threads.
+ * is guaranteed not to be accessed from multiple threads.
* Don't use any locking mechanisms.
* @lock: Optional lock callback (overrides regmap's default lock
* function, based on spinlock or mutex).
@@ -340,12 +294,25 @@ typedef void (*regmap_unlock)(void *);
* read operation on a bus such as SPI, I2C, etc. Most of the
* devices do not need this.
* @reg_write: Same as above for writing.
+ * @reg_update_bits: Optional callback that if filled will be used to perform
+ * all the update_bits(rmw) operation. Should only be provided
+ * if the function require special handling with lock and reg
+ * handling and the operation cannot be represented as a simple
+ * update_bits operation on a bus such as SPI, I2C, etc.
+ * @read: Optional callback that if filled will be used to perform all the
+ * bulk reads from the registers. Data is returned in the buffer used
+ * to transmit data.
+ * @write: Same as above for writing.
+ * @max_raw_read: Max raw read size that can be used on the device.
+ * @max_raw_write: Max raw write size that can be used on the device.
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of struct regmap_config).
* This field is a duplicate of a similar file in
* 'struct regmap_bus' and serves exact same purpose.
* Use it only for "no-bus" cases.
+ * @io_port: Support IO port accessors. Makes sense only when MMIO vs. IO port
+ * access can be distinguished.
* @max_register: Optional, specifies the maximum valid register address.
* @wr_table: Optional, points to a struct regmap_access_table specifying
* valid ranges for write access.
@@ -366,6 +333,10 @@ typedef void (*regmap_unlock)(void *);
* masks are used.
* @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
* if they are both empty.
+ * @use_relaxed_mmio: If set, MMIO R/W operations will not use memory barriers.
+ * This can avoid load on devices which don't require strict
+ * orderings, but drivers should carefully add any explicit
+ * memory barriers when they may require them.
* @use_single_read: If set, converts the bulk read operation into a series of
* single read operations. This is useful for a device that
* does not support bulk read.
@@ -390,15 +361,19 @@ typedef void (*regmap_unlock)(void *);
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
* @use_hwlock: Indicate if a hardware spinlock should be used.
+ * @use_raw_spinlock: Indicate if a raw spinlock should be used.
* @hwlock_id: Specify the hardware spinlock id.
* @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
* HWLOCK_IRQ or 0.
+ * @can_sleep: Optional, specifies whether regmap operations can sleep.
*/
struct regmap_config {
const char *name;
int reg_bits;
int reg_stride;
+ int reg_downshift;
+ unsigned int reg_base;
int pad_bits;
int val_bits;
@@ -416,8 +391,17 @@ struct regmap_config {
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
+ size_t max_raw_read;
+ size_t max_raw_write;
bool fast_io;
+ bool io_port;
unsigned int max_register;
const struct regmap_access_table *wr_table;
@@ -438,6 +422,7 @@ struct regmap_config {
bool use_single_read;
bool use_single_write;
+ bool use_relaxed_mmio;
bool can_multi_write;
enum regmap_endian reg_format_endian;
@@ -447,8 +432,11 @@ struct regmap_config {
unsigned int num_ranges;
bool use_hwlock;
+ bool use_raw_spinlock;
unsigned int hwlock_id;
unsigned int hwlock_mode;
+
+ bool can_sleep;
};
/**
@@ -461,8 +449,8 @@ struct regmap_config {
* @range_max: Address of the highest register in virtual range.
*
* @selector_reg: Register with selector field.
- * @selector_mask: Bit shift for selector value.
- * @selector_shift: Bit mask for selector value.
+ * @selector_mask: Bit mask for selector value.
+ * @selector_shift: Bit shift for selector value.
*
* @window_start: Address of first (lowest) register in data window.
* @window_len: Number of registers in data window.
@@ -504,8 +492,12 @@ typedef int (*regmap_hw_read)(void *context,
void *val_buf, size_t val_size);
typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
unsigned int *val);
+typedef int (*regmap_hw_reg_noinc_read)(void *context, unsigned int reg,
+ void *val, size_t val_count);
typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
unsigned int val);
+typedef int (*regmap_hw_reg_noinc_write)(void *context, unsigned int reg,
+ const void *val, size_t val_count);
typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
unsigned int mask, unsigned int val);
typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
@@ -526,6 +518,8 @@ typedef void (*regmap_hw_free_context)(void *context);
* must serialise with respect to non-async I/O.
* @reg_write: Write a single register value to the given register address. This
* write operation has to complete when returning from the function.
+ * @reg_write_noinc: Write multiple register value to the same register. This
+ * write operation has to complete when returning from the function.
* @reg_update_bits: Update bits operation to be used against volatile
* registers, intended for devices supporting some mechanism
* for setting clearing bits without having to
@@ -545,6 +539,7 @@ typedef void (*regmap_hw_free_context)(void *context);
* DEFAULT, BIG is assumed.
* @max_raw_read: Max raw read size that can be used on the bus.
* @max_raw_write: Max raw write size that can be used on the bus.
+ * @free_on_exit: kfree this on exit of regmap
*/
struct regmap_bus {
bool fast_io;
@@ -552,9 +547,11 @@ struct regmap_bus {
regmap_hw_gather_write gather_write;
regmap_hw_async_write async_write;
regmap_hw_reg_write reg_write;
+ regmap_hw_reg_noinc_write reg_noinc_write;
regmap_hw_reg_update_bits reg_update_bits;
regmap_hw_read read;
regmap_hw_reg_read reg_read;
+ regmap_hw_reg_noinc_read reg_noinc_read;
regmap_hw_free_context free_context;
regmap_hw_async_alloc async_alloc;
u8 read_flag_mask;
@@ -562,6 +559,7 @@ struct regmap_bus {
enum regmap_endian val_format_endian_default;
size_t max_raw_read;
size_t max_raw_write;
+ bool free_on_exit;
};
/*
@@ -581,6 +579,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -618,6 +620,14 @@ struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -629,6 +639,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -663,6 +677,10 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -671,6 +689,10 @@ struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
* for each call. No-op if CONFIG_LOCKDEP is not set.
@@ -725,6 +747,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_mdio() - Initialise register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* regmap_init_sccb() - Initialise register map
*
* @i2c: Device that will be interacted with
@@ -857,6 +892,32 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
sdw, config)
+/**
+ * regmap_init_sdw_mbq() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
+ * regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.
+ */
+#define regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi_avmm, #config, \
+ spi, config)
/**
* devm_regmap_init() - Initialise managed register map
@@ -890,6 +951,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
+ * devm_regmap_init_mdio() - Initialise managed register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* devm_regmap_init_sccb() - Initialise managed register map
*
* @i2c: Device that will be interacted with
@@ -1017,6 +1092,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
sdw, config)
/**
+ * devm_regmap_init_sdw_mbq() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
* devm_regmap_init_slimbus() - Initialise managed register map
*
* @slimbus: Device that will be interacted with
@@ -1044,6 +1133,21 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_i3c, #config, \
i3c, config)
+/**
+ * devm_regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi_avmm, #config, \
+ spi, config)
+
int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk);
void regmap_mmio_detach_clk(struct regmap *map);
void regmap_exit(struct regmap *map);
@@ -1076,6 +1180,42 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
+
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, false);
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, true, false);
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, false, false);
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, true, false);
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
+}
+
int regmap_get_val_bytes(struct regmap *map);
int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
@@ -1111,6 +1251,21 @@ bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges);
+static inline int regmap_set_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ return regmap_update_bits_base(map, reg, bits, bits,
+ NULL, false, false);
+}
+
+static inline int regmap_clear_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false);
+}
+
+int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits);
+
/**
* struct reg_field - Description of an register field
*
@@ -1134,6 +1289,14 @@ struct reg_field {
.msb = _msb, \
}
+#define REG_FIELD_ID(_reg, _lsb, _msb, _size, _offset) { \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .msb = _msb, \
+ .id_size = _size, \
+ .id_offset = _offset, \
+ }
+
struct regmap_field *regmap_field_alloc(struct regmap *regmap,
struct reg_field reg_field);
void regmap_field_free(struct regmap_field *field);
@@ -1142,6 +1305,18 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev,
struct regmap *regmap, struct reg_field reg_field);
void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
+int regmap_field_bulk_alloc(struct regmap *regmap,
+ struct regmap_field **rm_field,
+ const struct reg_field *reg_field,
+ int num_fields);
+void regmap_field_bulk_free(struct regmap_field *field);
+int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap,
+ struct regmap_field **field,
+ const struct reg_field *reg_field,
+ int num_fields);
+void devm_regmap_field_bulk_free(struct device *dev,
+ struct regmap_field *field);
+
int regmap_field_read(struct regmap_field *field, unsigned int *val);
int regmap_field_update_bits_base(struct regmap_field *field,
unsigned int mask, unsigned int val,
@@ -1151,6 +1326,81 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
+
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val, NULL, false, true);
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, false);
+}
+
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, bits, NULL, false,
+ false);
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, 0, NULL, false,
+ false);
+}
+
+int regmap_field_test_bits(struct regmap_field *field, unsigned int bits);
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, true);
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, true);
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, false);
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, true);
+}
+
/**
* struct regmap_irq_type - IRQ type definitions.
*
@@ -1201,6 +1451,8 @@ struct regmap_irq_sub_irq_map {
unsigned int *offset;
};
+struct regmap_irq_chip_data;
+
/**
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
@@ -1221,44 +1473,86 @@ struct regmap_irq_sub_irq_map {
* status_base. Should contain num_regs arrays.
* Can be provided for chips with more complex mapping than
* 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ...
+ * When used with not_fixed_stride, each one-element array
+ * member contains offset calculated as address from each
+ * peripheral to first peripheral.
* @num_main_regs: Number of 'main status' irq registers for chips which have
* main_status set.
*
* @status_base: Base status register address.
- * @mask_base: Base mask register address.
- * @mask_writeonly: Base mask register is write only.
- * @unmask_base: Base unmask register address. for chips who have
- * separate mask and unmask registers
+ * @mask_base: Base mask register address. Mask bits are set to 1 when an
+ * interrupt is masked, 0 when unmasked.
+ * @unmask_base: Base unmask register address. Unmask bits are set to 1 when
+ * an interrupt is unmasked and 0 when masked.
* @ack_base: Base ack address. If zero then the chip is clear on read.
* Using zero value is possible with @use_ack bit.
* @wake_base: Base address for wake enables. If zero unsupported.
- * @type_base: Base address for irq type. If zero unsupported.
+ * @type_base: Base address for irq type. If zero unsupported. Deprecated,
+ * use @config_base instead.
+ * @virt_reg_base: Base addresses for extra config regs. Deprecated, use
+ * @config_base instead.
+ * @config_base: Base address for IRQ type config regs. If null unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous.
* @init_ack_masked: Ack all masked interrupts once during initalization.
* @mask_invert: Inverted mask register: cleared bits are masked out.
+ * Deprecated; prefer describing an inverted mask register as
+ * an unmask register.
+ * @mask_unmask_non_inverted: Controls mask bit inversion for chips that set
+ * both @mask_base and @unmask_base. If false, mask and unmask bits are
+ * inverted (which is deprecated behavior); if true, bits will not be
+ * inverted and the registers keep their normal behavior. Note that if
+ * you use only one of @mask_base or @unmask_base, this flag has no
+ * effect and is unnecessary. Any new drivers that set both @mask_base
+ * and @unmask_base should set this to true to avoid relying on the
+ * deprecated behavior.
* @use_ack: Use @ack register even if it is zero.
* @ack_invert: Inverted ack register: cleared bits for ack.
+ * @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
- * @type_invert: Invert the type flags.
- * @type_in_mask: Use the mask registers for controlling irq type. For
- * interrupts defining type_rising/falling_mask use mask_base
- * for edge configuration and never update bits in type_base.
+ * @type_invert: Invert the type flags. Deprecated, use config registers
+ * instead.
+ * @type_in_mask: Use the mask registers for controlling irq type. Use this if
+ * the hardware provides separate bits for rising/falling edge
+ * or low/high level interrupts and they should be combined into
+ * a single logical interrupt. Use &struct regmap_irq_type data
+ * to define the mask bit for each irq type.
* @clear_on_unmask: For chips with interrupts cleared on read: read the status
* registers before unmasking interrupts to clear any bits
* set when they were masked.
+ * @not_fixed_stride: Used when chip peripherals are not laid out with fixed
+ * stride. Must be used with sub_reg_offsets containing the
+ * offsets to each peripheral. Deprecated; the same thing
+ * can be accomplished with a @get_irq_reg callback, without
+ * the need for a @sub_reg_offsets table.
+ * @status_invert: Inverted status register: cleared bits are active interrupts.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
*
* @num_regs: Number of registers in each control bank.
* @irqs: Descriptors for individual IRQs. Interrupt numbers are
* assigned based on the index in the array of the interrupt.
* @num_irqs: Number of descriptors.
- * @num_type_reg: Number of type registers.
- * @type_reg_stride: Stride to use for chips where type registers are not
- * contiguous.
+ * @num_type_reg: Number of type registers. Deprecated, use config registers
+ * instead.
+ * @num_virt_regs: Number of non-standard irq configuration registers.
+ * If zero unsupported. Deprecated, use config registers
+ * instead.
+ * @num_config_bases: Number of config base registers.
+ * @num_config_regs: Number of config registers for each config base register.
* @handle_pre_irq: Driver specific callback to handle interrupt from device
* before regmap_irq_handler process the interrupts.
* @handle_post_irq: Driver specific callback to handle interrupt from device
* after handling the interrupts in regmap_irq_handler().
+ * @set_type_virt: Driver specific callback to extend regmap_irq_set_type()
+ * and configure virt regs. Deprecated, use @set_type_config
+ * callback and config registers instead.
+ * @set_type_config: Callback used for configuring irq types.
+ * @get_irq_reg: Callback for mapping (base register, index) pairs to register
+ * addresses. The base register will be one of @status_base,
+ * @mask_base, etc., @main_status, or any of @config_base.
+ * The index will be in the range [0, num_main_regs[ for the
+ * main status base, [0, num_type_settings[ for any config
+ * register base, and [0, num_regs[ for any other base.
+ * If unspecified then regmap_irq_get_irq_reg_linear() is used.
* @irq_drv_data: Driver specific IRQ data which is passed as parameter when
* driver specific pre/post interrupt handler is called.
*
@@ -1280,17 +1574,22 @@ struct regmap_irq_chip {
unsigned int ack_base;
unsigned int wake_base;
unsigned int type_base;
+ unsigned int *virt_reg_base;
+ const unsigned int *config_base;
unsigned int irq_reg_stride;
- bool mask_writeonly:1;
- bool init_ack_masked:1;
- bool mask_invert:1;
- bool use_ack:1;
- bool ack_invert:1;
- bool wake_invert:1;
- bool runtime_pm:1;
- bool type_invert:1;
- bool type_in_mask:1;
- bool clear_on_unmask:1;
+ unsigned int init_ack_masked:1;
+ unsigned int mask_invert:1;
+ unsigned int mask_unmask_non_inverted:1;
+ unsigned int use_ack:1;
+ unsigned int ack_invert:1;
+ unsigned int clear_ack:1;
+ unsigned int wake_invert:1;
+ unsigned int runtime_pm:1;
+ unsigned int type_invert:1;
+ unsigned int type_in_mask:1;
+ unsigned int clear_on_unmask:1;
+ unsigned int not_fixed_stride:1;
+ unsigned int status_invert:1;
int num_regs;
@@ -1298,24 +1597,46 @@ struct regmap_irq_chip {
int num_irqs;
int num_type_reg;
- unsigned int type_reg_stride;
+ int num_virt_regs;
+ int num_config_bases;
+ int num_config_regs;
int (*handle_pre_irq)(void *irq_drv_data);
int (*handle_post_irq)(void *irq_drv_data);
+ int (*set_type_virt)(unsigned int **buf, unsigned int type,
+ unsigned long hwirq, int reg);
+ int (*set_type_config)(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data, int idx);
+ unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
void *irq_drv_data;
};
-struct regmap_irq_chip_data;
+unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
+int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data, int idx);
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data);
+int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
int irq_flags, int irq_base,
const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data);
+int devm_regmap_add_irq_chip_fwnode(struct device *dev,
+ struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
void devm_regmap_del_irq_chip(struct device *dev, int irq,
struct regmap_irq_chip_data *data);
@@ -1410,6 +1731,27 @@ static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_set_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_clear_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_test_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_field_update_bits_base(struct regmap_field *field,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force)
@@ -1427,6 +1769,124 @@ static inline int regmap_fields_update_bits_base(struct regmap_field *field,
return -EINVAL;
}
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_test_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_get_val_bytes(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regset.h b/include/linux/regset.h
index bf0243779738..a00765f0e8cf 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -17,6 +17,64 @@
struct task_struct;
struct user_regset;
+struct membuf {
+ void *p;
+ size_t left;
+};
+
+static inline int membuf_zero(struct membuf *s, size_t size)
+{
+ if (s->left) {
+ if (size > s->left)
+ size = s->left;
+ memset(s->p, 0, size);
+ s->p += size;
+ s->left -= size;
+ }
+ return s->left;
+}
+
+static inline int membuf_write(struct membuf *s, const void *v, size_t size)
+{
+ if (s->left) {
+ if (size > s->left)
+ size = s->left;
+ memcpy(s->p, v, size);
+ s->p += size;
+ s->left -= size;
+ }
+ return s->left;
+}
+
+static inline struct membuf membuf_at(const struct membuf *s, size_t offs)
+{
+ struct membuf n = *s;
+
+ if (offs > n.left)
+ offs = n.left;
+ n.p += offs;
+ n.left -= offs;
+
+ return n;
+}
+
+/* current s->p must be aligned for v; v must be a scalar */
+#define membuf_store(s, v) \
+({ \
+ struct membuf *__s = (s); \
+ if (__s->left) { \
+ typeof(v) __v = (v); \
+ size_t __size = sizeof(__v); \
+ if (unlikely(__size > __s->left)) { \
+ __size = __s->left; \
+ memcpy(__s->p, &__v, __size); \
+ } else { \
+ *(typeof(__v + 0) *)__s->p = __v; \
+ } \
+ __s->p += __size; \
+ __s->left -= __size; \
+ } \
+ __s->left;})
/**
* user_regset_active_fn - type of @active function in &struct user_regset
@@ -36,26 +94,9 @@ struct user_regset;
typedef int user_regset_active_fn(struct task_struct *target,
const struct user_regset *regset);
-/**
- * user_regset_get_fn - type of @get function in &struct user_regset
- * @target: thread being examined
- * @regset: regset being examined
- * @pos: offset into the regset data to access, in bytes
- * @count: amount of data to copy, in bytes
- * @kbuf: if not %NULL, a kernel-space pointer to copy into
- * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into
- *
- * Fetch register values. Return %0 on success; -%EIO or -%ENODEV
- * are usual failure returns. The @pos and @count values are in
- * bytes, but must be properly aligned. If @kbuf is non-null, that
- * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then
- * ubuf gives a userland pointer to access directly, and an -%EFAULT
- * return value is possible.
- */
-typedef int user_regset_get_fn(struct task_struct *target,
+typedef int user_regset_get2_fn(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf);
+ struct membuf to);
/**
* user_regset_set_fn - type of @set function in &struct user_regset
@@ -104,28 +145,6 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
int immediate);
/**
- * user_regset_get_size_fn - type of @get_size function in &struct user_regset
- * @target: thread being examined
- * @regset: regset being examined
- *
- * This call is optional; usually the pointer is %NULL.
- *
- * When provided, this function must return the current size of regset
- * data, as observed by the @get function in &struct user_regset. The
- * value returned must be a multiple of @size. The returned size is
- * required to be valid only until the next time (if any) @regset is
- * modified for @target.
- *
- * This function is intended for dynamically sized regsets. A regset
- * that is statically sized does not need to implement it.
- *
- * This function should not be called directly: instead, callers should
- * call regset_size() to determine the current size of a regset.
- */
-typedef unsigned int user_regset_get_size_fn(struct task_struct *target,
- const struct user_regset *regset);
-
-/**
* struct user_regset - accessible thread CPU state
* @n: Number of slots (registers).
* @size: Size in bytes of a slot (register).
@@ -136,7 +155,6 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target,
* @set: Function to store values.
* @active: Function to report if regset is active, or %NULL.
* @writeback: Function to write data back to user memory, or %NULL.
- * @get_size: Function to return the regset's size, or %NULL.
*
* This data structure describes a machine resource we call a register set.
* This is part of the state of an individual thread, not necessarily
@@ -144,12 +162,7 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target,
* similar slots, given by @n. Each slot is @size bytes, and aligned to
* @align bytes (which is at least @size). For dynamically-sized
* regsets, @n must contain the maximum possible number of slots for the
- * regset, and @get_size must point to a function that returns the
- * current regset size.
- *
- * Callers that need to know only the current size of the regset and do
- * not care about its internal structure should call regset_size()
- * instead of inspecting @n or calling @get_size.
+ * regset.
*
* For backward compatibility, the @get and @set methods must pad to, or
* accept, @n * @size bytes, even if the current regset size is smaller.
@@ -185,11 +198,10 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target,
* omitted when there is an @active function and it returns zero.
*/
struct user_regset {
- user_regset_get_fn *get;
+ user_regset_get2_fn *regset_get;
user_regset_set_fn *set;
user_regset_active_fn *active;
user_regset_writeback_fn *writeback;
- user_regset_get_size_fn *get_size;
unsigned int n;
unsigned int size;
unsigned int align;
@@ -238,44 +250,6 @@ struct user_regset_view {
*/
const struct user_regset_view *task_user_regset_view(struct task_struct *tsk);
-
-/*
- * These are helpers for writing regset get/set functions in arch code.
- * Because @start_pos and @end_pos are always compile-time constants,
- * these are inlined into very little code though they look large.
- *
- * Use one or more calls sequentially for each chunk of regset data stored
- * contiguously in memory. Call with constants for @start_pos and @end_pos,
- * giving the range of byte positions in the regset that data corresponds
- * to; @end_pos can be -1 if this chunk is at the end of the regset layout.
- * Each call updates the arguments to point past its chunk.
- */
-
-static inline int user_regset_copyout(unsigned int *pos, unsigned int *count,
- void **kbuf,
- void __user **ubuf, const void *data,
- const int start_pos, const int end_pos)
-{
- if (*count == 0)
- return 0;
- BUG_ON(*pos < start_pos);
- if (end_pos < 0 || *pos < end_pos) {
- unsigned int copy = (end_pos < 0 ? *count
- : min(*count, end_pos - *pos));
- data += *pos - start_pos;
- if (*kbuf) {
- memcpy(*kbuf, data, copy);
- *kbuf += copy;
- } else if (__copy_to_user(*ubuf, data, copy))
- return -EFAULT;
- else
- *ubuf += copy;
- *pos += copy;
- *count -= copy;
- }
- return 0;
-}
-
static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
const void **kbuf,
const void __user **ubuf, void *data,
@@ -301,35 +275,6 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
return 0;
}
-/*
- * These two parallel the two above, but for portions of a regset layout
- * that always read as all-zero or for which writes are ignored.
- */
-static inline int user_regset_copyout_zero(unsigned int *pos,
- unsigned int *count,
- void **kbuf, void __user **ubuf,
- const int start_pos,
- const int end_pos)
-{
- if (*count == 0)
- return 0;
- BUG_ON(*pos < start_pos);
- if (end_pos < 0 || *pos < end_pos) {
- unsigned int copy = (end_pos < 0 ? *count
- : min(*count, end_pos - *pos));
- if (*kbuf) {
- memset(*kbuf, 0, copy);
- *kbuf += copy;
- } else if (__clear_user(*ubuf, copy))
- return -EFAULT;
- else
- *ubuf += copy;
- *pos += copy;
- *count -= copy;
- }
- return 0;
-}
-
static inline int user_regset_copyin_ignore(unsigned int *pos,
unsigned int *count,
const void **kbuf,
@@ -353,31 +298,19 @@ static inline int user_regset_copyin_ignore(unsigned int *pos,
return 0;
}
-/**
- * copy_regset_to_user - fetch a thread's user_regset data into user memory
- * @target: thread to be examined
- * @view: &struct user_regset_view describing user thread machine state
- * @setno: index in @view->regsets
- * @offset: offset into the regset data, in bytes
- * @size: amount of data to copy, in bytes
- * @data: user-mode pointer to copy into
- */
-static inline int copy_regset_to_user(struct task_struct *target,
- const struct user_regset_view *view,
- unsigned int setno,
- unsigned int offset, unsigned int size,
- void __user *data)
-{
- const struct user_regset *regset = &view->regsets[setno];
+extern int regset_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int size, void *data);
- if (!regset->get)
- return -EOPNOTSUPP;
-
- if (!access_ok(data, size))
- return -EFAULT;
+extern int regset_get_alloc(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int size,
+ void **data);
- return regset->get(target, regset, offset, size, NULL, data);
-}
+extern int copy_regset_to_user(struct task_struct *target,
+ const struct user_regset_view *view,
+ unsigned int setno, unsigned int offset,
+ unsigned int size, void __user *data);
/**
* copy_regset_from_user - store into thread's user_regset data from user memory
@@ -405,21 +338,4 @@ static inline int copy_regset_from_user(struct task_struct *target,
return regset->set(target, regset, offset, size, NULL, data);
}
-/**
- * regset_size - determine the current size of a regset
- * @target: thread to be examined
- * @regset: regset to be examined
- *
- * Note that the returned size is valid only until the next time
- * (if any) @regset is modified for @target.
- */
-static inline unsigned int regset_size(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!regset->get_size)
- return regset->n * regset->size;
- else
- return regset->get_size(target, regset);
-}
-
#endif /* <linux/regset.h> */
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
deleted file mode 100644
index 3ab1ddf151a2..000000000000
--- a/include/linux/regulator/ab8500.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
- * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
- * Daniel Willerud <daniel.willerud@stericsson.com> for ST-Ericsson
- */
-
-#ifndef __LINUX_MFD_AB8500_REGULATOR_H
-#define __LINUX_MFD_AB8500_REGULATOR_H
-
-#include <linux/platform_device.h>
-
-/* AB8500 regulators */
-enum ab8500_regulator_id {
- AB8500_LDO_AUX1,
- AB8500_LDO_AUX2,
- AB8500_LDO_AUX3,
- AB8500_LDO_INTCORE,
- AB8500_LDO_TVOUT,
- AB8500_LDO_AUDIO,
- AB8500_LDO_ANAMIC1,
- AB8500_LDO_ANAMIC2,
- AB8500_LDO_DMIC,
- AB8500_LDO_ANA,
- AB8500_NUM_REGULATORS,
-};
-
-/* AB8505 regulators */
-enum ab8505_regulator_id {
- AB8505_LDO_AUX1,
- AB8505_LDO_AUX2,
- AB8505_LDO_AUX3,
- AB8505_LDO_AUX4,
- AB8505_LDO_AUX5,
- AB8505_LDO_AUX6,
- AB8505_LDO_INTCORE,
- AB8505_LDO_ADC,
- AB8505_LDO_AUDIO,
- AB8505_LDO_ANAMIC1,
- AB8505_LDO_ANAMIC2,
- AB8505_LDO_AUX8,
- AB8505_LDO_ANA,
- AB8505_NUM_REGULATORS,
-};
-
-/* AB8500 and AB8505 register initialization */
-struct ab8500_regulator_reg_init {
- int id;
- u8 mask;
- u8 value;
-};
-
-#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \
- { \
- .id = _id, \
- .mask = _mask, \
- .value = _value, \
- }
-
-/* AB8500 registers */
-enum ab8500_regulator_reg {
- AB8500_REGUREQUESTCTRL2,
- AB8500_REGUREQUESTCTRL3,
- AB8500_REGUREQUESTCTRL4,
- AB8500_REGUSYSCLKREQ1HPVALID1,
- AB8500_REGUSYSCLKREQ1HPVALID2,
- AB8500_REGUHWHPREQ1VALID1,
- AB8500_REGUHWHPREQ1VALID2,
- AB8500_REGUHWHPREQ2VALID1,
- AB8500_REGUHWHPREQ2VALID2,
- AB8500_REGUSWHPREQVALID1,
- AB8500_REGUSWHPREQVALID2,
- AB8500_REGUSYSCLKREQVALID1,
- AB8500_REGUSYSCLKREQVALID2,
- AB8500_REGUMISC1,
- AB8500_VAUDIOSUPPLY,
- AB8500_REGUCTRL1VAMIC,
- AB8500_VPLLVANAREGU,
- AB8500_VREFDDR,
- AB8500_EXTSUPPLYREGU,
- AB8500_VAUX12REGU,
- AB8500_VRF1VAUX3REGU,
- AB8500_VAUX1SEL,
- AB8500_VAUX2SEL,
- AB8500_VRF1VAUX3SEL,
- AB8500_REGUCTRL2SPARE,
- AB8500_REGUCTRLDISCH,
- AB8500_REGUCTRLDISCH2,
- AB8500_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8505 registers */
-enum ab8505_regulator_reg {
- AB8505_REGUREQUESTCTRL1,
- AB8505_REGUREQUESTCTRL2,
- AB8505_REGUREQUESTCTRL3,
- AB8505_REGUREQUESTCTRL4,
- AB8505_REGUSYSCLKREQ1HPVALID1,
- AB8505_REGUSYSCLKREQ1HPVALID2,
- AB8505_REGUHWHPREQ1VALID1,
- AB8505_REGUHWHPREQ1VALID2,
- AB8505_REGUHWHPREQ2VALID1,
- AB8505_REGUHWHPREQ2VALID2,
- AB8505_REGUSWHPREQVALID1,
- AB8505_REGUSWHPREQVALID2,
- AB8505_REGUSYSCLKREQVALID1,
- AB8505_REGUSYSCLKREQVALID2,
- AB8505_REGUVAUX4REQVALID,
- AB8505_REGUMISC1,
- AB8505_VAUDIOSUPPLY,
- AB8505_REGUCTRL1VAMIC,
- AB8505_VSMPSAREGU,
- AB8505_VSMPSBREGU,
- AB8505_VSAFEREGU, /* NOTE! PRCMU register */
- AB8505_VPLLVANAREGU,
- AB8505_EXTSUPPLYREGU,
- AB8505_VAUX12REGU,
- AB8505_VRF1VAUX3REGU,
- AB8505_VSMPSASEL1,
- AB8505_VSMPSASEL2,
- AB8505_VSMPSASEL3,
- AB8505_VSMPSBSEL1,
- AB8505_VSMPSBSEL2,
- AB8505_VSMPSBSEL3,
- AB8505_VSAFESEL1, /* NOTE! PRCMU register */
- AB8505_VSAFESEL2, /* NOTE! PRCMU register */
- AB8505_VSAFESEL3, /* NOTE! PRCMU register */
- AB8505_VAUX1SEL,
- AB8505_VAUX2SEL,
- AB8505_VRF1VAUX3SEL,
- AB8505_VAUX4REQCTRL,
- AB8505_VAUX4REGU,
- AB8505_VAUX4SEL,
- AB8505_REGUCTRLDISCH,
- AB8505_REGUCTRLDISCH2,
- AB8505_REGUCTRLDISCH3,
- AB8505_CTRLVAUX5,
- AB8505_CTRLVAUX6,
- AB8505_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8500 external regulators */
-struct ab8500_ext_regulator_cfg {
- bool hwreq; /* requires hw mode or high power mode */
-};
-
-enum ab8500_ext_regulator_id {
- AB8500_EXT_SUPPLY1,
- AB8500_EXT_SUPPLY2,
- AB8500_EXT_SUPPLY3,
- AB8500_NUM_EXT_REGULATORS,
-};
-
-/* AB8500 regulator platform data */
-struct ab8500_regulator_platform_data {
- int num_reg_init;
- struct ab8500_regulator_reg_init *reg_init;
- int num_regulator;
- struct regulator_init_data *regulator;
- int num_ext_regulator;
- struct regulator_init_data *ext_regulator;
-};
-
-#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 6a92fd3105a3..ee3b4a014611 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -32,10 +32,12 @@
#define __LINUX_REGULATOR_CONSUMER_H_
#include <linux/err.h>
+#include <linux/suspend.h>
struct device;
struct notifier_block;
struct regmap;
+struct regulator_dev;
/*
* Regulator operating modes.
@@ -117,6 +119,16 @@ struct regmap;
#define REGULATOR_EVENT_PRE_DISABLE 0x400
#define REGULATOR_EVENT_ABORT_DISABLE 0x800
#define REGULATOR_EVENT_ENABLE 0x1000
+/*
+ * Following notifications should be emitted only if detected condition
+ * is such that the HW is likely to still be working but consumers should
+ * take a recovery action to prevent problems esacalating into errors.
+ */
+#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
+#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
+#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
+#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
+#define REGULATOR_EVENT_WARN_MASK 0x1E000
/*
* Regulator errors that can be queried using regulator_get_error_flags
@@ -136,6 +148,10 @@ struct regmap;
#define REGULATOR_ERROR_FAIL BIT(4)
#define REGULATOR_ERROR_OVER_TEMP BIT(5)
+#define REGULATOR_ERROR_UNDER_VOLTAGE_WARN BIT(6)
+#define REGULATOR_ERROR_OVER_CURRENT_WARN BIT(7)
+#define REGULATOR_ERROR_OVER_VOLTAGE_WARN BIT(8)
+#define REGULATOR_ERROR_OVER_TEMP_WARN BIT(9)
/**
* struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
@@ -155,10 +171,13 @@ struct regulator;
/**
* struct regulator_bulk_data - Data used for bulk regulator operations.
*
- * @supply: The name of the supply. Initialised by the user before
- * using the bulk regulator APIs.
- * @consumer: The regulator consumer for the supply. This will be managed
- * by the bulk API.
+ * @supply: The name of the supply. Initialised by the user before
+ * using the bulk regulator APIs.
+ * @init_load_uA: After getting the regulator, regulator_set_load() will be
+ * called with this load. Initialised by the user before
+ * using the bulk regulator APIs.
+ * @consumer: The regulator consumer for the supply. This will be managed
+ * by the bulk API.
*
* The regulator APIs provide a series of regulator_bulk_() API calls as
* a convenience to consumers which require multiple supplies. This
@@ -166,6 +185,7 @@ struct regulator;
*/
struct regulator_bulk_data {
const char *supply;
+ int init_load_uA;
struct regulator *consumer;
/* private: Internal use */
@@ -187,6 +207,8 @@ struct regulator *__must_check regulator_get_optional(struct device *dev,
const char *id);
struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
const char *id);
+int devm_regulator_get_enable(struct device *dev, const char *id);
+int devm_regulator_get_enable_optional(struct device *dev, const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
@@ -206,17 +228,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev,
int devm_regulator_register_supply_alias(struct device *dev, const char *id,
struct device *alias_dev,
const char *alias_id);
-void devm_regulator_unregister_supply_alias(struct device *dev,
- const char *id);
int devm_regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
struct device *alias_dev,
const char *const *alias_id,
int num_id);
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
- const char *const *id,
- int num_id);
/* regulator output control and status */
int __must_check regulator_enable(struct regulator *regulator);
@@ -229,8 +246,15 @@ int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
+void devm_regulator_bulk_put(struct regulator_bulk_data *consumers);
+int __must_check devm_regulator_bulk_get_const(
+ struct device *dev, int num_consumers,
+ const struct regulator_bulk_data *in_consumers,
+ struct regulator_bulk_data **out_consumers);
int __must_check regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers);
+int devm_regulator_bulk_get_enable(struct device *dev, int num_consumers,
+ const char * const *id);
int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers);
int regulator_bulk_force_disable(int num_consumers,
@@ -277,6 +301,14 @@ int regulator_unregister_notifier(struct regulator *regulator,
void devm_regulator_unregister_notifier(struct regulator *regulator,
struct notifier_block *nb);
+/* regulator suspend */
+int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state);
+int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state);
+int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV,
+ int max_uV, suspend_state_t state);
+
/* driver data - core doesn't touch */
void *regulator_get_drvdata(struct regulator *regulator);
void regulator_set_drvdata(struct regulator *regulator, void *data);
@@ -322,6 +354,23 @@ regulator_get_exclusive(struct device *dev, const char *id)
}
static inline struct regulator *__must_check
+devm_regulator_get_exclusive(struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int devm_regulator_get_enable(struct device *dev, const char *id)
+{
+ return -ENODEV;
+}
+
+static inline int devm_regulator_get_enable_optional(struct device *dev,
+ const char *id)
+{
+ return -ENODEV;
+}
+
+static inline struct regulator *__must_check
regulator_get_optional(struct device *dev, const char *id)
{
return ERR_PTR(-ENODEV);
@@ -342,6 +391,10 @@ static inline void devm_regulator_put(struct regulator *regulator)
{
}
+static inline void devm_regulator_bulk_put(struct regulator_bulk_data *consumers)
+{
+}
+
static inline int regulator_register_supply_alias(struct device *dev,
const char *id,
struct device *alias_dev,
@@ -378,11 +431,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev,
return 0;
}
-static inline void devm_regulator_unregister_supply_alias(struct device *dev,
- const char *id)
-{
-}
-
static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
struct device *alias_dev,
@@ -392,11 +440,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
return 0;
}
-static inline void devm_regulator_bulk_unregister_supply_alias(
- struct device *dev, const char *const *id, int num_id)
-{
-}
-
static inline int regulator_enable(struct regulator *regulator)
{
return 0;
@@ -442,6 +485,13 @@ static inline int regulator_bulk_enable(int num_consumers,
return 0;
}
+static inline int devm_regulator_bulk_get_enable(struct device *dev,
+ int num_consumers,
+ const char * const *id)
+{
+ return 0;
+}
+
static inline int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers)
{
@@ -476,6 +526,11 @@ static inline int regulator_get_voltage(struct regulator *regulator)
return -EINVAL;
}
+static inline int regulator_sync_voltage(struct regulator *regulator)
+{
+ return -EINVAL;
+}
+
static inline int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
@@ -568,6 +623,25 @@ static inline int devm_regulator_unregister_notifier(struct regulator *regulator
return 0;
}
+static inline int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
static inline void *regulator_get_drvdata(struct regulator *regulator)
{
return NULL;
diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h
index 0212d6255e4e..73291f280a23 100644
--- a/include/linux/regulator/coupler.h
+++ b/include/linux/regulator/coupler.h
@@ -52,7 +52,6 @@ struct regulator_coupler {
#ifdef CONFIG_REGULATOR
int regulator_coupler_register(struct regulator_coupler *coupler);
-const char *rdev_get_name(struct regulator_dev *rdev);
int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state);
@@ -62,15 +61,13 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev);
int regulator_set_voltage_rdev(struct regulator_dev *rdev,
int min_uV, int max_uV,
suspend_state_t state);
+int regulator_do_balance_voltage(struct regulator_dev *rdev,
+ suspend_state_t state, bool skip_coupled);
#else
static inline int regulator_coupler_register(struct regulator_coupler *coupler)
{
return 0;
}
-static inline const char *rdev_get_name(struct regulator_dev *rdev)
-{
- return NULL;
-}
static inline int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state)
@@ -92,6 +89,12 @@ static inline int regulator_set_voltage_rdev(struct regulator_dev *rdev,
{
return -EINVAL;
}
+static inline int regulator_do_balance_voltage(struct regulator_dev *rdev,
+ suspend_state_t state,
+ bool skip_coupled)
+{
+ return -EINVAL;
+}
#endif
#endif
diff --git a/include/linux/regulator/da9121.h b/include/linux/regulator/da9121.h
new file mode 100644
index 000000000000..62d9d257dc25
--- /dev/null
+++ b/include/linux/regulator/da9121.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * DA9121 Single-channel dual-phase 10A buck converter
+ * DA9130 Single-channel dual-phase 10A buck converter (Automotive)
+ * DA9217 Single-channel dual-phase 6A buck converter
+ * DA9122 Dual-channel single-phase 5A buck converter
+ * DA9131 Dual-channel single-phase 5A buck converter (Automotive)
+ * DA9220 Dual-channel single-phase 3A buck converter
+ * DA9132 Dual-channel single-phase 3A buck converter (Automotive)
+ *
+ * Copyright (C) 2020 Dialog Semiconductor
+ *
+ * Authors: Adam Ward, Dialog Semiconductor
+ */
+
+#ifndef __LINUX_REGULATOR_DA9121_H
+#define __LINUX_REGULATOR_DA9121_H
+
+#include <linux/regulator/machine.h>
+
+struct gpio_desc;
+
+enum {
+ DA9121_IDX_BUCK1,
+ DA9121_IDX_BUCK2,
+ DA9121_IDX_MAX
+};
+
+struct da9121_pdata {
+ int num_buck;
+ struct gpio_desc *gpiod_ren[DA9121_IDX_MAX];
+ struct device_node *reg_node[DA9121_IDX_MAX];
+ struct regulator_init_data *init_data[DA9121_IDX_MAX];
+};
+
+#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 9a911bb5fb61..f9a7461e72b8 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -13,6 +13,7 @@
#define __LINUX_REGULATOR_DRIVER_H_
#include <linux/device.h>
+#include <linux/linear_range.h>
#include <linux/notifier.h>
#include <linux/regulator/consumer.h>
#include <linux/ww_mutex.h>
@@ -39,31 +40,22 @@ enum regulator_status {
REGULATOR_STATUS_UNDEFINED,
};
-/**
- * struct regulator_linear_range - specify linear voltage ranges
- *
- * Specify a range of voltages for regulator_map_linear_range() and
- * regulator_list_linear_range().
- *
- * @min_uV: Lowest voltage in range
- * @min_sel: Lowest selector for range
- * @max_sel: Highest selector for range
- * @uV_step: Step size
- */
-struct regulator_linear_range {
- unsigned int min_uV;
- unsigned int min_sel;
- unsigned int max_sel;
- unsigned int uV_step;
+enum regulator_detection_severity {
+ /* Hardware shut down voltage outputs if condition is detected */
+ REGULATOR_SEVERITY_PROT,
+ /* Hardware is probably damaged/inoperable */
+ REGULATOR_SEVERITY_ERR,
+ /* Hardware is still recoverable but recovery action must be taken */
+ REGULATOR_SEVERITY_WARN,
};
-/* Initialize struct regulator_linear_range */
+/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
{ \
- .min_uV = _min_uV, \
+ .min = _min_uV, \
.min_sel = _min_sel, \
.max_sel = _max_sel, \
- .uV_step = _step_uV, \
+ .step = _step_uV, \
}
/**
@@ -95,8 +87,31 @@ struct regulator_linear_range {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
- * @set_over_current_protection: Support capability of automatically shutting
- * down when detecting an over current event.
+ * @set_over_current_protection: Support enabling of and setting limits for over
+ * current situation detection. Detection can be configured for three
+ * levels of severity.
+ *
+ * - REGULATOR_SEVERITY_PROT should automatically shut down the regulator(s).
+ *
+ * - REGULATOR_SEVERITY_ERR should indicate that over-current situation is
+ * caused by an unrecoverable error but HW does not perform
+ * automatic shut down.
+ *
+ * - REGULATOR_SEVERITY_WARN should indicate situation where hardware is
+ * still believed to not be damaged but that a board sepcific
+ * recovery action is needed. If lim_uA is 0 the limit should not
+ * be changed but the detection should just be enabled/disabled as
+ * is requested.
+ *
+ * @set_over_voltage_protection: Support enabling of and setting limits for over
+ * voltage situation detection. Detection can be configured for same
+ * severities as over current protection. Units of uV.
+ * @set_under_voltage_protection: Support enabling of and setting limits for
+ * under voltage situation detection. Detection can be configured for same
+ * severities as over current protection. Units of uV.
+ * @set_thermal_protection: Support enabling of and setting limits for over
+ * temperature situation detection.Detection can be configured for same
+ * severities as over current protection. Units of degree Kelvin.
*
* @set_active_discharge: Set active discharge enable/disable of regulators.
*
@@ -134,7 +149,7 @@ struct regulator_linear_range {
* suspended.
* @set_suspend_mode: Set the operating mode for the regulator when the
* system is suspended.
- *
+ * @resume: Resume operation of suspended regulator.
* @set_pull_down: Configure the regulator to pull down when the regulator
* is disabled.
*
@@ -160,8 +175,15 @@ struct regulator_ops {
int (*get_current_limit) (struct regulator_dev *);
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
- int (*set_over_current_protection) (struct regulator_dev *);
- int (*set_active_discharge) (struct regulator_dev *, bool enable);
+ int (*set_over_current_protection)(struct regulator_dev *, int lim_uA,
+ int severity, bool enable);
+ int (*set_over_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_under_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_thermal_protection)(struct regulator_dev *, int lim,
+ int severity, bool enable);
+ int (*set_active_discharge)(struct regulator_dev *, bool enable);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
@@ -240,6 +262,8 @@ enum regulator_type {
* @name: Identifying name for the regulator.
* @supply_name: Identifying the regulator supply
* @of_match: Name used to identify regulator in DT.
+ * @of_match_full_name: A flag to indicate that the of_match string, if
+ * present, should be matched against the node full_name.
* @regulators_node: Name of node containing regulator definitions in DT.
* @of_parse_cb: Optional callback called only if of_match is present.
* Will be called for each regulator parsed from DT, during
@@ -277,9 +301,9 @@ enum regulator_type {
* @curr_table: Current limit mapping table (if table based mapping)
*
* @vsel_range_reg: Register for range selector when using pickable ranges
- * and regulator_regmap_X_voltage_X_pickable functions.
+ * and ``regulator_map_*_voltage_*_pickable`` functions.
* @vsel_range_mask: Mask for register bitfield used for range selector
- * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
+ * @vsel_reg: Register for selector when using ``regulator_map_*_voltage_*``
* @vsel_mask: Mask for register bitfield used for selector
* @vsel_step: Specify the resolution of selector stepping when setting
* voltage. If 0, then no stepping is done (requested selector is
@@ -319,15 +343,26 @@ enum regulator_type {
* @pull_down_val_on: Enabling value for control when using regmap
* set_pull_down
*
+ * @ramp_reg: Register for controlling the regulator ramp-rate.
+ * @ramp_mask: Bitmask for the ramp-rate control register.
+ * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values
+ * should be given in units of V/S (uV/uS). See the
+ * regulator_set_ramp_delay_regmap().
+ * @n_ramp_values: number of elements at @ramp_delay_table.
+ *
* @enable_time: Time taken for initial enable of regulator (in uS).
* @off_on_delay: guard time (in uS), before re-enabling a regulator
*
+ * @poll_enabled_time: The polling interval (in uS) to use while checking that
+ * the regulator was actually enabled. Max upto enable_time.
+ *
* @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode
*/
struct regulator_desc {
const char *name;
const char *supply_name;
const char *of_match;
+ bool of_match_full_name;
const char *regulators_node;
int (*of_parse_cb)(struct device_node *,
const struct regulator_desc *,
@@ -348,7 +383,7 @@ struct regulator_desc {
unsigned int ramp_delay;
int min_dropout_uV;
- const struct regulator_linear_range *linear_ranges;
+ const struct linear_range *linear_ranges;
const unsigned int *linear_range_selectors;
int n_linear_ranges;
@@ -384,11 +419,17 @@ struct regulator_desc {
unsigned int pull_down_reg;
unsigned int pull_down_mask;
unsigned int pull_down_val_on;
+ unsigned int ramp_reg;
+ unsigned int ramp_mask;
+ const unsigned int *ramp_delay_table;
+ unsigned int n_ramp_values;
unsigned int enable_time;
unsigned int off_on_delay;
+ unsigned int poll_enabled_time;
+
unsigned int (*of_map_mode)(unsigned int mode);
};
@@ -418,6 +459,130 @@ struct regulator_config {
struct gpio_desc *ena_gpiod;
};
+/**
+ * struct regulator_err_state - regulator error/notification status
+ *
+ * @rdev: Regulator which status the struct indicates.
+ * @notifs: Events which have occurred on the regulator.
+ * @errors: Errors which are active on the regulator.
+ * @possible_errs: Errors which can be signaled (by given IRQ).
+ */
+struct regulator_err_state {
+ struct regulator_dev *rdev;
+ unsigned long notifs;
+ unsigned long errors;
+ int possible_errs;
+};
+
+/**
+ * struct regulator_irq_data - regulator error/notification status data
+ *
+ * @states: Status structs for each of the associated regulators.
+ * @num_states: Amount of associated regulators.
+ * @data: Driver data pointer given at regulator_irq_desc.
+ * @opaque: Value storage for IC driver. Core does not update this. ICs
+ * may want to store status register value here at map_event and
+ * compare contents at 'renable' callback to see if new problems
+ * have been added to status. If that is the case it may be
+ * desirable to return REGULATOR_ERROR_CLEARED and not
+ * REGULATOR_ERROR_ON to allow IRQ fire again and to generate
+ * notifications also for the new issues.
+ *
+ * This structure is passed to 'map_event' and 'renable' callbacks for
+ * reporting regulator status to core.
+ */
+struct regulator_irq_data {
+ struct regulator_err_state *states;
+ int num_states;
+ void *data;
+ long opaque;
+};
+
+/**
+ * struct regulator_irq_desc - notification sender for IRQ based events.
+ *
+ * @name: The visible name for the IRQ
+ * @fatal_cnt: If this IRQ is used to signal HW damaging condition it may be
+ * best to shut-down regulator(s) or reboot the SOC if error
+ * handling is repeatedly failing. If fatal_cnt is given the IRQ
+ * handling is aborted if it fails for fatal_cnt times and die()
+ * callback (if populated) is called. If die() is not populated
+ * poweroff for the system is attempted in order to prevent any
+ * further damage.
+ * @reread_ms: The time which is waited before attempting to re-read status
+ * at the worker if IC reading fails. Immediate re-read is done
+ * if time is not specified.
+ * @irq_off_ms: The time which IRQ is kept disabled before re-evaluating the
+ * status for devices which keep IRQ disabled for duration of the
+ * error. If this is not given the IRQ is left enabled and renable
+ * is not called.
+ * @skip_off: If set to true the IRQ handler will attempt to check if any of
+ * the associated regulators are enabled prior to taking other
+ * actions. If no regulators are enabled and this is set to true
+ * a spurious IRQ is assumed and IRQ_NONE is returned.
+ * @high_prio: Boolean to indicate that high priority WQ should be used.
+ * @data: Driver private data pointer which will be passed as such to
+ * the renable, map_event and die callbacks in regulator_irq_data.
+ * @die: Protection callback. If IC status reading or recovery actions
+ * fail fatal_cnt times this callback is called or system is
+ * powered off. This callback should implement a final protection
+ * attempt like disabling the regulator. If protection succeeded
+ * die() may return 0. If anything else is returned the core
+ * assumes final protection failed and attempts to perform a
+ * poweroff as a last resort.
+ * @map_event: Driver callback to map IRQ status into regulator devices with
+ * events / errors. NOTE: callback MUST initialize both the
+ * errors and notifs for all rdevs which it signals having
+ * active events as core does not clean the map data.
+ * REGULATOR_FAILED_RETRY can be returned to indicate that the
+ * status reading from IC failed. If this is repeated for
+ * fatal_cnt times the core will call die() callback or power-off
+ * the system as a last resort to protect the HW.
+ * @renable: Optional callback to check status (if HW supports that) before
+ * re-enabling IRQ. If implemented this should clear the error
+ * flags so that errors fetched by regulator_get_error_flags()
+ * are updated. If callback is not implemented then errors are
+ * assumed to be cleared and IRQ is re-enabled.
+ * REGULATOR_FAILED_RETRY can be returned to
+ * indicate that the status reading from IC failed. If this is
+ * repeated for 'fatal_cnt' times the core will call die()
+ * callback or if die() is not populated then attempt to power-off
+ * the system as a last resort to protect the HW.
+ * Returning zero indicates that the problem in HW has been solved
+ * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON
+ * indicates the error condition is still active and keeps IRQ
+ * disabled. Please note that returning REGULATOR_ERROR_ON does
+ * not retrigger evaluating what events are active or resending
+ * notifications. If this is needed you probably want to return
+ * zero and allow IRQ to retrigger causing events to be
+ * re-evaluated and re-sent.
+ *
+ * This structure is used for registering regulator IRQ notification helper.
+ */
+struct regulator_irq_desc {
+ const char *name;
+ int fatal_cnt;
+ int reread_ms;
+ int irq_off_ms;
+ bool skip_off;
+ bool high_prio;
+ void *data;
+
+ int (*die)(struct regulator_irq_data *rid);
+ int (*map_event)(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
+ int (*renable)(struct regulator_irq_data *rid);
+};
+
+/*
+ * Return values for regulator IRQ helpers.
+ */
+enum {
+ REGULATOR_ERROR_CLEARED,
+ REGULATOR_FAILED_RETRY,
+ REGULATOR_ERROR_ON,
+};
+
/*
* struct coupling_desc
*
@@ -481,9 +646,46 @@ struct regulator_dev {
unsigned int is_switch:1;
/* time when this regulator was disabled last time */
- unsigned long last_off_jiffy;
+ ktime_t last_off;
+ int cached_err;
+ bool use_cached_err;
+ spinlock_t err_lock;
};
+/*
+ * Convert error flags to corresponding notifications.
+ *
+ * Can be used by drivers which use the notification helpers to
+ * find out correct notification flags based on the error flags. Drivers
+ * can avoid storing both supported notification and error flags which
+ * may save few bytes.
+ */
+static inline int regulator_err2notif(int err)
+{
+ switch (err) {
+ case REGULATOR_ERROR_UNDER_VOLTAGE:
+ return REGULATOR_EVENT_UNDER_VOLTAGE;
+ case REGULATOR_ERROR_OVER_CURRENT:
+ return REGULATOR_EVENT_OVER_CURRENT;
+ case REGULATOR_ERROR_REGULATION_OUT:
+ return REGULATOR_EVENT_REGULATION_OUT;
+ case REGULATOR_ERROR_FAIL:
+ return REGULATOR_EVENT_FAIL;
+ case REGULATOR_ERROR_OVER_TEMP:
+ return REGULATOR_EVENT_OVER_TEMP;
+ case REGULATOR_ERROR_UNDER_VOLTAGE_WARN:
+ return REGULATOR_EVENT_UNDER_VOLTAGE_WARN;
+ case REGULATOR_ERROR_OVER_CURRENT_WARN:
+ return REGULATOR_EVENT_OVER_CURRENT_WARN;
+ case REGULATOR_ERROR_OVER_VOLTAGE_WARN:
+ return REGULATOR_EVENT_OVER_VOLTAGE_WARN;
+ case REGULATOR_ERROR_OVER_TEMP_WARN:
+ return REGULATOR_EVENT_OVER_TEMP_WARN;
+ }
+ return 0;
+}
+
+
struct regulator_dev *
regulator_register(const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
@@ -492,10 +694,21 @@ devm_regulator_register(struct device *dev,
const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
void regulator_unregister(struct regulator_dev *rdev);
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
+void *devm_regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs,
+ int *per_rdev_errs, struct regulator_dev **rdev,
+ int rdev_amount);
+void *regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs, int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount);
+void regulator_irq_helper_cancel(void **handle);
+int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
void *rdev_get_drvdata(struct regulator_dev *rdev);
struct device *rdev_get_dev(struct regulator_dev *rdev);
@@ -544,9 +757,8 @@ int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
int min_uA, int max_uA);
int regulator_get_current_limit_regmap(struct regulator_dev *rdev);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
-
-void regulator_lock(struct regulator_dev *rdev);
-void regulator_unlock(struct regulator_dev *rdev);
+int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay);
+int regulator_sync_voltage_rdev(struct regulator_dev *rdev);
/*
* Helper functions intended to be used by regulator drivers prior registering
@@ -555,4 +767,16 @@ void regulator_unlock(struct regulator_dev *rdev);
int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
unsigned int selector);
+int regulator_desc_list_voltage_linear(const struct regulator_desc *desc,
+ unsigned int selector);
+
+#ifdef CONFIG_REGULATOR
+const char *rdev_get_name(struct regulator_dev *rdev);
+#else
+static inline const char *rdev_get_name(struct regulator_dev *rdev)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h
index fdeb312cdabd..c223e50ff9f7 100644
--- a/include/linux/regulator/gpio-regulator.h
+++ b/include/linux/regulator/gpio-regulator.h
@@ -42,6 +42,7 @@ struct gpio_regulator_state {
/**
* struct gpio_regulator_config - config structure
* @supply_name: Name of the regulator supply
+ * @input_supply: Name of the input regulator supply
* @enabled_at_boot: Whether regulator has been enabled at
* boot or not. 1 = Yes, 0 = No
* This is used to keep the regulator at
@@ -62,6 +63,7 @@ struct gpio_regulator_state {
*/
struct gpio_regulator_config {
const char *supply_name;
+ const char *input_supply;
unsigned enabled_at_boot:1;
unsigned startup_delay;
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
index d780dbb8b423..b62e45aa1dd3 100644
--- a/include/linux/regulator/lp872x.h
+++ b/include/linux/regulator/lp872x.h
@@ -10,7 +10,7 @@
#include <linux/regulator/machine.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#define LP872X_MAX_REGULATORS 9
@@ -40,11 +40,6 @@ enum lp872x_regulator_id {
LP872X_ID_MAX,
};
-enum lp872x_dvs_state {
- DVS_LOW = GPIOF_OUT_INIT_LOW,
- DVS_HIGH = GPIOF_OUT_INIT_HIGH,
-};
-
enum lp872x_dvs_sel {
SEL_V1,
SEL_V2,
@@ -52,14 +47,14 @@ enum lp872x_dvs_sel {
/**
* lp872x_dvs
- * @gpio : gpio pin number for dvs control
+ * @gpio : gpio descriptor for dvs control
* @vsel : dvs selector for buck v1 or buck v2 register
* @init_state : initial dvs pin state
*/
struct lp872x_dvs {
- int gpio;
+ struct gpio_desc *gpio;
enum lp872x_dvs_sel vsel;
- enum lp872x_dvs_state init_state;
+ enum gpiod_flags init_state;
};
/**
@@ -78,14 +73,14 @@ struct lp872x_regulator_data {
* @update_config : if LP872X_GENERAL_CFG register is updated, set true
* @regulator_data : platform regulator id and init data
* @dvs : dvs data for buck voltage control
- * @enable_gpio : gpio pin number for enable control
+ * @enable_gpio : gpio descriptor for enable control
*/
struct lp872x_platform_data {
u8 general_config;
bool update_config;
struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
struct lp872x_dvs *dvs;
- int enable_gpio;
+ struct gpio_desc *enable_gpio;
};
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index a84cc8879c3e..621b7f4a3639 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -83,6 +83,14 @@ struct regulator_state {
bool changeable;
};
+#define REGULATOR_NOTIF_LIMIT_DISABLE -1
+#define REGULATOR_NOTIF_LIMIT_ENABLE -2
+struct notification_limit {
+ int prot;
+ int err;
+ int warn;
+};
+
/**
* struct regulation_constraints - regulator operating constraints.
*
@@ -100,7 +108,13 @@ struct regulator_state {
* @ilim_uA: Maximum input current.
* @system_load: Load that isn't captured by any consumer requests.
*
+ * @over_curr_limits: Limits for acting on over current.
+ * @over_voltage_limits: Limits for acting on over voltage.
+ * @under_voltage_limits: Limits for acting on under voltage.
+ * @temp_limits: Limits for acting on over temperature.
+ *
* @max_spread: Max possible spread between coupled regulators
+ * @max_uV_step: Max possible step change in voltage
* @valid_modes_mask: Mask of modes which may be configured by consumers.
* @valid_ops_mask: Operations which may be performed by consumers.
*
@@ -115,6 +129,11 @@ struct regulator_state {
* @pull_down: Enable pull down when regulator is disabled.
* @over_current_protection: Auto disable on over current event.
*
+ * @over_current_detection: Configure over current limits.
+ * @over_voltage_detection: Configure over voltage limits.
+ * @under_voltage_detection: Configure under voltage limits.
+ * @over_temp_detection: Configure over temperature limits.
+ *
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
* @state_disk: State for regulator when system is suspended in disk mode.
@@ -171,6 +190,10 @@ struct regulation_constraints {
struct regulator_state state_disk;
struct regulator_state state_mem;
struct regulator_state state_standby;
+ struct notification_limit over_curr_limits;
+ struct notification_limit over_voltage_limits;
+ struct notification_limit under_voltage_limits;
+ struct notification_limit temp_limits;
suspend_state_t initial_state; /* suspend state to set at init */
/* mode to set on startup */
@@ -192,6 +215,10 @@ struct regulation_constraints {
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
unsigned over_current_protection:1; /* auto disable on over current */
+ unsigned over_current_detection:1; /* notify on over current */
+ unsigned over_voltage_detection:1; /* notify on over voltage */
+ unsigned under_voltage_detection:1; /* notify on under voltage */
+ unsigned over_temp_detection:1; /* notify on over temperature */
};
/**
diff --git a/include/linux/regulator/mt6315-regulator.h b/include/linux/regulator/mt6315-regulator.h
new file mode 100644
index 000000000000..3b80d3f3910c
--- /dev/null
+++ b/include/linux/regulator/mt6315-regulator.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6315_H
+#define __LINUX_REGULATOR_MT6315_H
+
+#define MT6315_RP 3
+#define MT6315_PP 6
+#define MT6315_SP 7
+
+enum {
+ MT6315_VBUCK1 = 0,
+ MT6315_VBUCK2,
+ MT6315_VBUCK3,
+ MT6315_VBUCK4,
+ MT6315_VBUCK_MAX,
+};
+
+/* Register */
+#define MT6315_TOP2_ELR7 0x139
+#define MT6315_TOP_TMA_KEY 0x39F
+#define MT6315_TOP_TMA_KEY_H 0x3A0
+#define MT6315_BUCK_TOP_CON0 0x1440
+#define MT6315_BUCK_TOP_CON1 0x1443
+#define MT6315_BUCK_TOP_ELR0 0x1449
+#define MT6315_BUCK_TOP_ELR2 0x144B
+#define MT6315_BUCK_TOP_ELR4 0x144D
+#define MT6315_BUCK_TOP_ELR6 0x144F
+#define MT6315_VBUCK1_DBG0 0x1499
+#define MT6315_VBUCK1_DBG4 0x149D
+#define MT6315_VBUCK2_DBG0 0x1519
+#define MT6315_VBUCK2_DBG4 0x151D
+#define MT6315_VBUCK3_DBG0 0x1599
+#define MT6315_VBUCK3_DBG4 0x159D
+#define MT6315_VBUCK4_DBG0 0x1619
+#define MT6315_VBUCK4_DBG4 0x161D
+#define MT6315_BUCK_TOP_4PHASE_ANA_CON42 0x16B1
+
+#define PROTECTION_KEY_H 0x9C
+#define PROTECTION_KEY 0xEA
+
+#endif /* __LINUX_REGULATOR_MT6315_H */
diff --git a/include/linux/regulator/mt6331-regulator.h b/include/linux/regulator/mt6331-regulator.h
new file mode 100644
index 000000000000..2801a9879c14
--- /dev/null
+++ b/include/linux/regulator/mt6331-regulator.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6331_H
+#define __LINUX_REGULATOR_MT6331_H
+
+enum {
+ /* BUCK */
+ MT6331_ID_VDVFS11 = 0,
+ MT6331_ID_VDVFS12,
+ MT6331_ID_VDVFS13,
+ MT6331_ID_VDVFS14,
+ MT6331_ID_VCORE2,
+ MT6331_ID_VIO18,
+ /* LDO */
+ MT6331_ID_VTCXO1,
+ MT6331_ID_VTCXO2,
+ MT6331_ID_AVDD32_AUD,
+ MT6331_ID_VAUXA32,
+ MT6331_ID_VCAMA,
+ MT6331_ID_VIO28,
+ MT6331_ID_VCAM_AF,
+ MT6331_ID_VMC,
+ MT6331_ID_VMCH,
+ MT6331_ID_VEMC33,
+ MT6331_ID_VGP1,
+ MT6331_ID_VSIM1,
+ MT6331_ID_VSIM2,
+ MT6331_ID_VMIPI,
+ MT6331_ID_VIBR,
+ MT6331_ID_VGP4,
+ MT6331_ID_VCAMD,
+ MT6331_ID_VUSB10,
+ MT6331_ID_VCAM_IO,
+ MT6331_ID_VSRAM_DVFS1,
+ MT6331_ID_VGP2,
+ MT6331_ID_VGP3,
+ MT6331_ID_VRTC,
+ MT6331_ID_VDIG18,
+ MT6331_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6331_H */
diff --git a/include/linux/regulator/mt6332-regulator.h b/include/linux/regulator/mt6332-regulator.h
new file mode 100644
index 000000000000..af5e3ed31029
--- /dev/null
+++ b/include/linux/regulator/mt6332-regulator.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6332_H
+#define __LINUX_REGULATOR_MT6332_H
+
+enum {
+ /* BUCK */
+ MT6332_ID_VDRAM = 0,
+ MT6332_ID_VDVFS2,
+ MT6332_ID_VPA,
+ MT6332_ID_VRF1,
+ MT6332_ID_VRF2,
+ MT6332_ID_VSBST,
+ /* LDO */
+ MT6332_ID_VAUXB32,
+ MT6332_ID_VBIF28,
+ MT6332_ID_VDIG18,
+ MT6332_ID_VSRAM_DVFS2,
+ MT6332_ID_VUSB33,
+ MT6332_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6332_H */
diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h
index 1cc304946d09..bdcf83cd719e 100644
--- a/include/linux/regulator/mt6358-regulator.h
+++ b/include/linux/regulator/mt6358-regulator.h
@@ -48,9 +48,54 @@ enum {
MT6358_ID_VLDO28,
MT6358_ID_VAUD28,
MT6358_ID_VSIM2,
+ MT6358_ID_VCORE_SSHUB,
+ MT6358_ID_VSRAM_OTHERS_SSHUB,
MT6358_ID_RG_MAX,
};
+enum {
+ MT6366_ID_VDRAM1 = 0,
+ MT6366_ID_VCORE,
+ MT6366_ID_VPA,
+ MT6366_ID_VPROC11,
+ MT6366_ID_VPROC12,
+ MT6366_ID_VGPU,
+ MT6366_ID_VS2,
+ MT6366_ID_VMODEM,
+ MT6366_ID_VS1,
+ MT6366_ID_VDRAM2,
+ MT6366_ID_VSIM1,
+ MT6366_ID_VIBR,
+ MT6366_ID_VRF12,
+ MT6366_ID_VIO18,
+ MT6366_ID_VUSB,
+ MT6366_ID_VCN18,
+ MT6366_ID_VFE28,
+ MT6366_ID_VSRAM_PROC11,
+ MT6366_ID_VCN28,
+ MT6366_ID_VSRAM_OTHERS,
+ MT6366_ID_VSRAM_GPU,
+ MT6366_ID_VXO22,
+ MT6366_ID_VEFUSE,
+ MT6366_ID_VAUX18,
+ MT6366_ID_VMCH,
+ MT6366_ID_VBIF28,
+ MT6366_ID_VSRAM_PROC12,
+ MT6366_ID_VEMC,
+ MT6366_ID_VIO28,
+ MT6366_ID_VA12,
+ MT6366_ID_VRF18,
+ MT6366_ID_VCN33_BT,
+ MT6366_ID_VCN33_WIFI,
+ MT6366_ID_VMC,
+ MT6366_ID_VAUD28,
+ MT6366_ID_VSIM2,
+ MT6366_ID_VCORE_SSHUB,
+ MT6366_ID_VSRAM_OTHERS_SSHUB,
+ MT6366_ID_RG_MAX,
+};
+
#define MT6358_MAX_REGULATOR MT6358_ID_RG_MAX
+#define MT6366_MAX_REGULATOR MT6366_ID_RG_MAX
#endif /* __LINUX_REGULATOR_MT6358_H */
diff --git a/include/linux/regulator/mt6359-regulator.h b/include/linux/regulator/mt6359-regulator.h
new file mode 100644
index 000000000000..6d6e5a58f482
--- /dev/null
+++ b/include/linux/regulator/mt6359-regulator.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6359_H
+#define __LINUX_REGULATOR_MT6359_H
+
+enum {
+ MT6359_ID_VS1 = 0,
+ MT6359_ID_VGPU11,
+ MT6359_ID_VMODEM,
+ MT6359_ID_VPU,
+ MT6359_ID_VCORE,
+ MT6359_ID_VS2,
+ MT6359_ID_VPA,
+ MT6359_ID_VPROC2,
+ MT6359_ID_VPROC1,
+ MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VGPU11_SSHUB = MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VAUD18 = 10,
+ MT6359_ID_VSIM1,
+ MT6359_ID_VIBR,
+ MT6359_ID_VRF12,
+ MT6359_ID_VUSB,
+ MT6359_ID_VSRAM_PROC2,
+ MT6359_ID_VIO18,
+ MT6359_ID_VCAMIO,
+ MT6359_ID_VCN18,
+ MT6359_ID_VFE28,
+ MT6359_ID_VCN13,
+ MT6359_ID_VCN33_1_BT,
+ MT6359_ID_VCN33_1_WIFI,
+ MT6359_ID_VAUX18,
+ MT6359_ID_VSRAM_OTHERS,
+ MT6359_ID_VEFUSE,
+ MT6359_ID_VXO22,
+ MT6359_ID_VRFCK,
+ MT6359_ID_VBIF28,
+ MT6359_ID_VIO28,
+ MT6359_ID_VEMC,
+ MT6359_ID_VCN33_2_BT,
+ MT6359_ID_VCN33_2_WIFI,
+ MT6359_ID_VA12,
+ MT6359_ID_VA09,
+ MT6359_ID_VRF18,
+ MT6359_ID_VSRAM_MD,
+ MT6359_ID_VUFS,
+ MT6359_ID_VM18,
+ MT6359_ID_VBBCK,
+ MT6359_ID_VSRAM_PROC1,
+ MT6359_ID_VSIM2,
+ MT6359_ID_VSRAM_OTHERS_SSHUB,
+ MT6359_ID_RG_MAX,
+};
+
+#define MT6359_MAX_REGULATOR MT6359_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6359_H */
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
new file mode 100644
index 000000000000..3c01c2bf84f5
--- /dev/null
+++ b/include/linux/regulator/pca9450.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2020 NXP. */
+
+#ifndef __LINUX_REG_PCA9450_H__
+#define __LINUX_REG_PCA9450_H__
+
+#include <linux/regmap.h>
+
+enum pca9450_chip_type {
+ PCA9450_TYPE_PCA9450A = 0,
+ PCA9450_TYPE_PCA9450BC,
+ PCA9450_TYPE_AMOUNT,
+};
+
+enum {
+ PCA9450_BUCK1 = 0,
+ PCA9450_BUCK2,
+ PCA9450_BUCK3,
+ PCA9450_BUCK4,
+ PCA9450_BUCK5,
+ PCA9450_BUCK6,
+ PCA9450_LDO1,
+ PCA9450_LDO2,
+ PCA9450_LDO3,
+ PCA9450_LDO4,
+ PCA9450_LDO5,
+ PCA9450_REGULATOR_CNT,
+};
+
+enum {
+ PCA9450_DVS_LEVEL_RUN = 0,
+ PCA9450_DVS_LEVEL_STANDBY,
+ PCA9450_DVS_LEVEL_MAX,
+};
+
+#define PCA9450_BUCK1_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK2_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK3_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK4_VOLTAGE_NUM 0x80
+
+#define PCA9450_BUCK5_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK6_VOLTAGE_NUM 0x80
+
+#define PCA9450_LDO1_VOLTAGE_NUM 0x08
+#define PCA9450_LDO2_VOLTAGE_NUM 0x08
+#define PCA9450_LDO3_VOLTAGE_NUM 0x20
+#define PCA9450_LDO4_VOLTAGE_NUM 0x20
+#define PCA9450_LDO5_VOLTAGE_NUM 0x10
+
+enum {
+ PCA9450_REG_DEV_ID = 0x00,
+ PCA9450_REG_INT1 = 0x01,
+ PCA9450_REG_INT1_MSK = 0x02,
+ PCA9450_REG_STATUS1 = 0x03,
+ PCA9450_REG_STATUS2 = 0x04,
+ PCA9450_REG_PWRON_STAT = 0x05,
+ PCA9450_REG_SWRST = 0x06,
+ PCA9450_REG_PWRCTRL = 0x07,
+ PCA9450_REG_RESET_CTRL = 0x08,
+ PCA9450_REG_CONFIG1 = 0x09,
+ PCA9450_REG_CONFIG2 = 0x0A,
+ PCA9450_REG_BUCK123_DVS = 0x0C,
+ PCA9450_REG_BUCK1OUT_LIMIT = 0x0D,
+ PCA9450_REG_BUCK2OUT_LIMIT = 0x0E,
+ PCA9450_REG_BUCK3OUT_LIMIT = 0x0F,
+ PCA9450_REG_BUCK1CTRL = 0x10,
+ PCA9450_REG_BUCK1OUT_DVS0 = 0x11,
+ PCA9450_REG_BUCK1OUT_DVS1 = 0x12,
+ PCA9450_REG_BUCK2CTRL = 0x13,
+ PCA9450_REG_BUCK2OUT_DVS0 = 0x14,
+ PCA9450_REG_BUCK2OUT_DVS1 = 0x15,
+ PCA9450_REG_BUCK3CTRL = 0x16,
+ PCA9450_REG_BUCK3OUT_DVS0 = 0x17,
+ PCA9450_REG_BUCK3OUT_DVS1 = 0x18,
+ PCA9450_REG_BUCK4CTRL = 0x19,
+ PCA9450_REG_BUCK4OUT = 0x1A,
+ PCA9450_REG_BUCK5CTRL = 0x1B,
+ PCA9450_REG_BUCK5OUT = 0x1C,
+ PCA9450_REG_BUCK6CTRL = 0x1D,
+ PCA9450_REG_BUCK6OUT = 0x1E,
+ PCA9450_REG_LDO_AD_CTRL = 0x20,
+ PCA9450_REG_LDO1CTRL = 0x21,
+ PCA9450_REG_LDO2CTRL = 0x22,
+ PCA9450_REG_LDO3CTRL = 0x23,
+ PCA9450_REG_LDO4CTRL = 0x24,
+ PCA9450_REG_LDO5CTRL_L = 0x25,
+ PCA9450_REG_LDO5CTRL_H = 0x26,
+ PCA9450_REG_LOADSW_CTRL = 0x2A,
+ PCA9450_REG_VRFLT1_STS = 0x2B,
+ PCA9450_REG_VRFLT2_STS = 0x2C,
+ PCA9450_REG_VRFLT1_MASK = 0x2D,
+ PCA9450_REG_VRFLT2_MASK = 0x2E,
+ PCA9450_MAX_REGISTER = 0x2F,
+};
+
+/* PCA9450 BUCK ENMODE bits */
+#define BUCK_ENMODE_OFF 0x00
+#define BUCK_ENMODE_ONREQ 0x01
+#define BUCK_ENMODE_ONREQ_STBYREQ 0x02
+#define BUCK_ENMODE_ON 0x03
+
+/* PCA9450_REG_BUCK1_CTRL bits */
+#define BUCK1_RAMP_MASK 0xC0
+#define BUCK1_RAMP_25MV 0x0
+#define BUCK1_RAMP_12P5MV 0x1
+#define BUCK1_RAMP_6P25MV 0x2
+#define BUCK1_RAMP_3P125MV 0x3
+#define BUCK1_DVS_CTRL 0x10
+#define BUCK1_AD 0x08
+#define BUCK1_FPWM 0x04
+#define BUCK1_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK2_CTRL bits */
+#define BUCK2_RAMP_MASK 0xC0
+#define BUCK2_RAMP_25MV 0x0
+#define BUCK2_RAMP_12P5MV 0x1
+#define BUCK2_RAMP_6P25MV 0x2
+#define BUCK2_RAMP_3P125MV 0x3
+#define BUCK2_DVS_CTRL 0x10
+#define BUCK2_AD 0x08
+#define BUCK2_FPWM 0x04
+#define BUCK2_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK3_CTRL bits */
+#define BUCK3_RAMP_MASK 0xC0
+#define BUCK3_RAMP_25MV 0x0
+#define BUCK3_RAMP_12P5MV 0x1
+#define BUCK3_RAMP_6P25MV 0x2
+#define BUCK3_RAMP_3P125MV 0x3
+#define BUCK3_DVS_CTRL 0x10
+#define BUCK3_AD 0x08
+#define BUCK3_FPWM 0x04
+#define BUCK3_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK4_CTRL bits */
+#define BUCK4_AD 0x08
+#define BUCK4_FPWM 0x04
+#define BUCK4_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK5_CTRL bits */
+#define BUCK5_AD 0x08
+#define BUCK5_FPWM 0x04
+#define BUCK5_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK6_CTRL bits */
+#define BUCK6_AD 0x08
+#define BUCK6_FPWM 0x04
+#define BUCK6_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK123_PRESET_EN bit */
+#define BUCK123_PRESET_EN 0x80
+
+/* PCA9450_BUCK1OUT_DVS0 bits */
+#define BUCK1OUT_DVS0_MASK 0x7F
+#define BUCK1OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK1OUT_DVS1 bits */
+#define BUCK1OUT_DVS1_MASK 0x7F
+#define BUCK1OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_BUCK2OUT_DVS0 bits */
+#define BUCK2OUT_DVS0_MASK 0x7F
+#define BUCK2OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK2OUT_DVS1 bits */
+#define BUCK2OUT_DVS1_MASK 0x7F
+#define BUCK2OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_BUCK3OUT_DVS0 bits */
+#define BUCK3OUT_DVS0_MASK 0x7F
+#define BUCK3OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK3OUT_DVS1 bits */
+#define BUCK3OUT_DVS1_MASK 0x7F
+#define BUCK3OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_REG_BUCK4OUT bits */
+#define BUCK4OUT_MASK 0x7F
+#define BUCK4OUT_DEFAULT 0x6C
+
+/* PCA9450_REG_BUCK5OUT bits */
+#define BUCK5OUT_MASK 0x7F
+#define BUCK5OUT_DEFAULT 0x30
+
+/* PCA9450_REG_BUCK6OUT bits */
+#define BUCK6OUT_MASK 0x7F
+#define BUCK6OUT_DEFAULT 0x14
+
+/* PCA9450_REG_LDO1_VOLT bits */
+#define LDO1_EN_MASK 0xC0
+#define LDO1OUT_MASK 0x07
+
+/* PCA9450_REG_LDO2_VOLT bits */
+#define LDO2_EN_MASK 0xC0
+#define LDO2OUT_MASK 0x07
+
+/* PCA9450_REG_LDO3_VOLT bits */
+#define LDO3_EN_MASK 0xC0
+#define LDO3OUT_MASK 0x0F
+
+/* PCA9450_REG_LDO4_VOLT bits */
+#define LDO4_EN_MASK 0xC0
+#define LDO4OUT_MASK 0x0F
+
+/* PCA9450_REG_LDO5_VOLT bits */
+#define LDO5L_EN_MASK 0xC0
+#define LDO5LOUT_MASK 0x0F
+
+#define LDO5H_EN_MASK 0xC0
+#define LDO5HOUT_MASK 0x0F
+
+/* PCA9450_REG_IRQ bits */
+#define IRQ_PWRON 0x80
+#define IRQ_WDOGB 0x40
+#define IRQ_RSVD 0x20
+#define IRQ_VR_FLT1 0x10
+#define IRQ_VR_FLT2 0x08
+#define IRQ_LOWVSYS 0x04
+#define IRQ_THERM_105 0x02
+#define IRQ_THERM_125 0x01
+
+/* PCA9450_REG_RESET_CTRL bits */
+#define WDOG_B_CFG_MASK 0xC0
+#define WDOG_B_CFG_NONE 0x00
+#define WDOG_B_CFG_WARM 0x40
+#define WDOG_B_CFG_COLD_LDO12 0x80
+#define WDOG_B_CFG_COLD 0xC0
+
+/* PCA9450_REG_CONFIG2 bits */
+#define I2C_LT_MASK 0x03
+#define I2C_LT_FORCE_DISABLE 0x00
+#define I2C_LT_ON_STANDBY_RUN 0x01
+#define I2C_LT_ON_RUN 0x02
+#define I2C_LT_FORCE_ENABLE 0x03
+
+#endif /* __LINUX_REG_PCA9450_H__ */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index d47e668d9ca8..c964fe8ab698 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -63,10 +63,4 @@
#define PFUZE3001_VLDO3 8
#define PFUZE3001_VLDO4 9
-struct regulator_init_data;
-
-struct pfuze_regulator_platform_data {
- struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR];
-};
-
#endif /* __LINUX_REG_PFUZE100_H */
diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h
index 94a90c06f1e5..398e74a1d941 100644
--- a/include/linux/regulator/tps62360.h
+++ b/include/linux/regulator/tps62360.h
@@ -19,10 +19,6 @@
* @en_discharge: Enable discharge the output capacitor via internal
* register.
* @en_internal_pulldn: internal pull down enable or not.
- * @vsel0_gpio: Gpio number for vsel0. It should be -1 if this is tied with
- * fixed logic.
- * @vsel1_gpio: Gpio number for vsel1. It should be -1 if this is tied with
- * fixed logic.
* @vsel0_def_state: Default state of vsel0. 1 if it is high else 0.
* @vsel1_def_state: Default state of vsel1. 1 if it is high else 0.
*/
@@ -30,8 +26,6 @@ struct tps62360_regulator_platform_data {
struct regulator_init_data *reg_init_data;
bool en_discharge;
bool en_internal_pulldn;
- int vsel0_gpio;
- int vsel1_gpio;
int vsel0_def_state;
int vsel1_def_state;
};
diff --git a/include/linux/relay.h b/include/linux/relay.h
index c759f96e39c1..72b876dd5cb8 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -62,7 +62,7 @@ struct rchan
size_t subbuf_size; /* sub-buffer size */
size_t n_subbufs; /* number of sub-buffers per buffer */
size_t alloc_size; /* total buffer size allocated */
- struct rchan_callbacks *cb; /* client callbacks */
+ const struct rchan_callbacks *cb; /* client callbacks */
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
size_t last_toobig; /* tried to log event > subbuf size */
@@ -89,6 +89,8 @@ struct rchan_callbacks
* The client should return 1 to continue logging, 0 to stop
* logging.
*
+ * This callback is optional.
+ *
* NOTE: subbuf_start will also be invoked when the buffer is
* created, so that the first sub-buffer can be initialized
* if necessary. In this case, prev_subbuf will be NULL.
@@ -102,25 +104,6 @@ struct rchan_callbacks
size_t prev_padding);
/*
- * buf_mapped - relay buffer mmap notification
- * @buf: the channel buffer
- * @filp: relay file pointer
- *
- * Called when a relay file is successfully mmapped
- */
- void (*buf_mapped)(struct rchan_buf *buf,
- struct file *filp);
-
- /*
- * buf_unmapped - relay buffer unmap notification
- * @buf: the channel buffer
- * @filp: relay file pointer
- *
- * Called when a relay file is successfully unmapped
- */
- void (*buf_unmapped)(struct rchan_buf *buf,
- struct file *filp);
- /*
* create_buf_file - create file to represent a relay channel buffer
* @filename: the name of the file to create
* @parent: the parent of the file to create
@@ -141,7 +124,9 @@ struct rchan_callbacks
* cause relay_open() to create a single global buffer rather
* than the default set of per-cpu buffers.
*
- * See Documentation/filesystems/relay.txt for more info.
+ * This callback is mandatory.
+ *
+ * See Documentation/filesystems/relay.rst for more info.
*/
struct dentry *(*create_buf_file)(const char *filename,
struct dentry *parent,
@@ -158,6 +143,8 @@ struct rchan_callbacks
* channel buffer.
*
* The callback should return 0 if successful, negative if not.
+ *
+ * This callback is mandatory.
*/
int (*remove_buf_file)(struct dentry *dentry);
};
@@ -170,7 +157,7 @@ struct rchan *relay_open(const char *base_filename,
struct dentry *parent,
size_t subbuf_size,
size_t n_subbufs,
- struct rchan_callbacks *cb,
+ const struct rchan_callbacks *cb,
void *private_data);
extern int relay_late_setup_files(struct rchan *chan,
const char *base_filename,
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 16ad66683ad0..fe8978eb69f1 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -38,6 +38,7 @@
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/virtio.h>
+#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/idr.h>
#include <linux/of.h>
@@ -73,7 +74,7 @@ struct resource_table {
u32 ver;
u32 num;
u32 reserved[2];
- u32 offset[0];
+ u32 offset[];
} __packed;
/**
@@ -87,7 +88,7 @@ struct resource_table {
*/
struct fw_rsc_hdr {
u32 type;
- u8 data[0];
+ u8 data[];
} __packed;
/**
@@ -242,7 +243,7 @@ struct fw_rsc_trace {
* @da: device address
* @align: the alignment between the consumer and producer parts of the vring
* @num: num of buffers supported by this vring (must be power of two)
- * @notifyid is a unique rproc-wide notify index for this vring. This notify
+ * @notifyid: a unique rproc-wide notify index for this vring. This notify
* index is used when kicking a remote processor, to let it know that this
* vring is triggered.
* @pa: physical address
@@ -265,18 +266,18 @@ struct fw_rsc_vdev_vring {
/**
* struct fw_rsc_vdev - virtio device header
* @id: virtio device id (as in virtio_ids.h)
- * @notifyid is a unique rproc-wide notify index for this vdev. This notify
+ * @notifyid: a unique rproc-wide notify index for this vdev. This notify
* index is used when kicking a remote processor, to let it know that the
* status/features of this vdev have changes.
- * @dfeatures specifies the virtio device features supported by the firmware
- * @gfeatures is a place holder used by the host to write back the
+ * @dfeatures: specifies the virtio device features supported by the firmware
+ * @gfeatures: a place holder used by the host to write back the
* negotiated features that are supported by both sides.
- * @config_len is the size of the virtio config space of this vdev. The config
+ * @config_len: the size of the virtio config space of this vdev. The config
* space lies in the resource table immediate after this vdev header.
- * @status is a place holder where the host will indicate its virtio progress.
- * @num_of_vrings indicates how many vrings are described in this vdev header
+ * @status: a place holder where the host will indicate its virtio progress.
+ * @num_of_vrings: indicates how many vrings are described in this vdev header
* @reserved: reserved (must be zero)
- * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
+ * @vring: an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
*
* This resource is a virtio device header: it provides information about
* the vdev, and is then used by the host and its peer remote processors
@@ -286,16 +287,17 @@ struct fw_rsc_vdev_vring {
* to statically allocate a vdev upon registration of the rproc (dynamic vdev
* allocation is not yet supported).
*
- * Note: unlike virtualization systems, the term 'host' here means
- * the Linux side which is running remoteproc to control the remote
- * processors. We use the name 'gfeatures' to comply with virtio's terms,
- * though there isn't really any virtualized guest OS here: it's the host
- * which is responsible for negotiating the final features.
- * Yeah, it's a bit confusing.
- *
- * Note: immediately following this structure is the virtio config space for
- * this vdev (which is specific to the vdev; for more info, read the virtio
- * spec). the size of the config space is specified by @config_len.
+ * Note:
+ * 1. unlike virtualization systems, the term 'host' here means
+ * the Linux side which is running remoteproc to control the remote
+ * processors. We use the name 'gfeatures' to comply with virtio's terms,
+ * though there isn't really any virtualized guest OS here: it's the host
+ * which is responsible for negotiating the final features.
+ * Yeah, it's a bit confusing.
+ *
+ * 2. immediately following this structure is the virtio config space for
+ * this vdev (which is specific to the vdev; for more info, read the virtio
+ * spec). The size of the config space is specified by @config_len.
*/
struct fw_rsc_vdev {
u32 id;
@@ -306,7 +308,7 @@ struct fw_rsc_vdev {
u8 status;
u8 num_of_vrings;
u8 reserved[2];
- struct fw_rsc_vdev_vring vring[0];
+ struct fw_rsc_vdev_vring vring[];
} __packed;
struct rproc;
@@ -314,6 +316,7 @@ struct rproc;
/**
* struct rproc_mem_entry - memory entry descriptor
* @va: virtual address
+ * @is_iomem: io memory
* @dma: dma address
* @len: length, in bytes
* @da: device address
@@ -328,8 +331,9 @@ struct rproc;
*/
struct rproc_mem_entry {
void *va;
+ bool is_iomem;
dma_addr_t dma;
- int len;
+ size_t len;
u32 da;
void *priv;
char name[32];
@@ -355,34 +359,50 @@ enum rsc_handling_status {
/**
* struct rproc_ops - platform-specific device handlers
+ * @prepare: prepare device for code loading
+ * @unprepare: unprepare device after stop
* @start: power on the device and boot it
* @stop: power off the device
+ * @attach: attach to a device that his already powered up
+ * @detach: detach from a device, leaving it powered up
* @kick: kick a virtqueue (virtqueue id given as a parameter)
* @da_to_va: optional platform hook to perform address translations
* @parse_fw: parse firmware to extract information (e.g. resource table)
* @handle_rsc: optional platform hook to handle vendor resources. Should return
- * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a
- * negative value on error
- * @load_rsc_table: load resource table from firmware image
- * @find_loaded_rsc_table: find the loaded resouce table
+ * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled
+ * and a negative value on error
+ * @find_loaded_rsc_table: find the loaded resource table from firmware image
+ * @get_loaded_rsc_table: get resource table installed in memory
+ * by external entity
* @load: load firmware to memory, where the remote processor
* expects to find it
* @sanity_check: sanity check the fw image
* @get_boot_addr: get boot address to entry point specified in firmware
+ * @panic: optional callback to react to system panic, core will delay
+ * panic at least the returned number of milliseconds
+ * @coredump: collect firmware dump after the subsystem is shutdown
*/
struct rproc_ops {
+ int (*prepare)(struct rproc *rproc);
+ int (*unprepare)(struct rproc *rproc);
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
+ int (*attach)(struct rproc *rproc);
+ int (*detach)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
- void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
+ void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc,
int offset, int avail);
struct resource_table *(*find_loaded_rsc_table)(
struct rproc *rproc, const struct firmware *fw);
+ struct resource_table *(*get_loaded_rsc_table)(
+ struct rproc *rproc, size_t *size);
int (*load)(struct rproc *rproc, const struct firmware *fw);
int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
- u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
+ u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
+ unsigned long (*panic)(struct rproc *rproc);
+ void (*coredump)(struct rproc *rproc);
};
/**
@@ -393,6 +413,10 @@ struct rproc_ops {
* @RPROC_RUNNING: device is up and running
* @RPROC_CRASHED: device has crashed; need to start recovery
* @RPROC_DELETED: device is deleted
+ * @RPROC_ATTACHED: device has been booted by another entity and the core
+ * has attached to it
+ * @RPROC_DETACHED: device has been booted by another entity and waiting
+ * for the core to attach to it
* @RPROC_LAST: just keep this one at the end
*
* Please note that the values of these states are used as indices
@@ -407,14 +431,16 @@ enum rproc_state {
RPROC_RUNNING = 2,
RPROC_CRASHED = 3,
RPROC_DELETED = 4,
- RPROC_LAST = 5,
+ RPROC_ATTACHED = 5,
+ RPROC_DETACHED = 6,
+ RPROC_LAST = 7,
};
/**
* enum rproc_crash_type - remote processor crash types
* @RPROC_MMUFAULT: iommu fault
* @RPROC_WATCHDOG: watchdog bite
- * @RPROC_FATAL_ERROR fatal error
+ * @RPROC_FATAL_ERROR: fatal error
*
* Each element of the enum is used as an array index. So that, the value of
* the elements should be always something sane.
@@ -428,6 +454,20 @@ enum rproc_crash_type {
};
/**
+ * enum rproc_dump_mechanism - Coredump options for core
+ * @RPROC_COREDUMP_DISABLED: Don't perform any dump
+ * @RPROC_COREDUMP_ENABLED: Copy dump to separate buffer and carry on with
+ * recovery
+ * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall
+ * recovery until all segments are read
+ */
+enum rproc_dump_mechanism {
+ RPROC_COREDUMP_DISABLED,
+ RPROC_COREDUMP_ENABLED,
+ RPROC_COREDUMP_INLINE,
+};
+
+/**
* struct rproc_dump_segment - segment info from ELF header
* @node: list node related to the rproc segment list
* @da: device address of the segment
@@ -435,6 +475,7 @@ enum rproc_crash_type {
* @priv: private data associated with the dump_segment
* @dump: custom dump function to fill device memory segment associated
* with coredump
+ * @offset: offset of the segment
*/
struct rproc_dump_segment {
struct list_head node;
@@ -444,11 +485,25 @@ struct rproc_dump_segment {
void *priv;
void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment,
- void *dest);
+ void *dest, size_t offset, size_t size);
loff_t offset;
};
/**
+ * enum rproc_features - features supported
+ *
+ * @RPROC_FEAT_ATTACH_ON_RECOVERY: The remote processor does not need help
+ * from Linux to recover, such as firmware
+ * loading. Linux just needs to attach after
+ * recovery.
+ */
+
+enum rproc_features {
+ RPROC_FEAT_ATTACH_ON_RECOVERY,
+ RPROC_MAX_FEATURES,
+};
+
+/**
* struct rproc - represents a physical remote processor device
* @node: list node of this rproc object
* @domain: iommu domain
@@ -459,6 +514,7 @@ struct rproc_dump_segment {
* @dev: virtual device for refcounting and common remoteproc behavior
* @power: refcount of users who need this rproc powered up
* @state: state of the device
+ * @dump_conf: Currently selected coredump configuration
* @lock: lock which protects concurrent manipulations of the rproc
* @dbg_dir: debugfs directory of this rproc device
* @traces: list of trace buffers
@@ -475,30 +531,39 @@ struct rproc_dump_segment {
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
* @table_ptr: pointer to the resource table in effect
+ * @clean_table: copy of the resource table without modifications. Used
+ * when a remote processor is attached or detached from the core
* @cached_table: copy of the resource table
* @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
* @auto_boot: flag to indicate if remote processor should be auto-started
+ * @sysfs_read_only: flag to make remoteproc sysfs files read only
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
+ * @elf_class: firmware ELF class
+ * @elf_machine: firmware ELF machine
+ * @cdev: character device of the rproc
+ * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release
+ * @features: indicate remoteproc features
*/
struct rproc {
struct list_head node;
struct iommu_domain *domain;
const char *name;
- char *firmware;
+ const char *firmware;
void *priv;
struct rproc_ops *ops;
struct device dev;
atomic_t power;
unsigned int state;
+ enum rproc_dump_mechanism dump_conf;
struct mutex lock;
struct dentry *dbg_dir;
struct list_head traces;
int num_traces;
struct list_head carveouts;
struct list_head mappings;
- u32 bootaddr;
+ u64 bootaddr;
struct list_head rvdevs;
struct list_head subdevs;
struct idr notifyids;
@@ -508,12 +573,19 @@ struct rproc {
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
+ struct resource_table *clean_table;
struct resource_table *cached_table;
size_t table_sz;
bool has_iommu;
bool auto_boot;
+ bool sysfs_read_only;
struct list_head dump_segments;
int nb_vdev;
+ u8 elf_class;
+ u16 elf_machine;
+ struct cdev cdev;
+ bool cdev_put_on_release;
+ DECLARE_BITMAP(features, RPROC_MAX_FEATURES);
};
/**
@@ -541,7 +613,7 @@ struct rproc_subdev {
/**
* struct rproc_vring - remoteproc vring state
* @va: virtual address
- * @len: length, in bytes
+ * @num: vring size
* @da: device address
* @align: vring alignment
* @notifyid: rproc-specific unique vring index
@@ -550,7 +622,7 @@ struct rproc_subdev {
*/
struct rproc_vring {
void *va;
- int len;
+ int num;
u32 da;
u32 align;
int notifyid;
@@ -560,21 +632,19 @@ struct rproc_vring {
/**
* struct rproc_vdev - remoteproc state for a supported virtio device
- * @refcount: reference counter for the vdev and vring allocations
* @subdev: handle for registering the vdev as a rproc subdevice
+ * @pdev: remoteproc virtio platform device
* @id: virtio device id (as in virtio_ids.h)
* @node: list node
* @rproc: the rproc handle
- * @vdev: the virio device
* @vring: the vrings for this vdev
* @rsc_offset: offset of the vdev's resource entry
* @index: vdev position versus other vdev declared in resource table
*/
struct rproc_vdev {
- struct kref refcount;
struct rproc_subdev subdev;
- struct device dev;
+ struct platform_device *pdev;
unsigned int id;
struct list_head node;
@@ -594,42 +664,42 @@ void rproc_put(struct rproc *rproc);
int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
void rproc_free(struct rproc *rproc);
+void rproc_resource_cleanup(struct rproc *rproc);
+
+struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len);
+int devm_rproc_add(struct device *dev, struct rproc *rproc);
void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem);
struct rproc_mem_entry *
rproc_mem_entry_init(struct device *dev,
- void *va, dma_addr_t dma, int len, u32 da,
+ void *va, dma_addr_t dma, size_t len, u32 da,
int (*alloc)(struct rproc *, struct rproc_mem_entry *),
int (*release)(struct rproc *, struct rproc_mem_entry *),
const char *name, ...);
struct rproc_mem_entry *
-rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...);
int rproc_boot(struct rproc *rproc);
-void rproc_shutdown(struct rproc *rproc);
+int rproc_shutdown(struct rproc *rproc);
+int rproc_detach(struct rproc *rproc);
+int rproc_set_firmware(struct rproc *rproc, const char *fw_name);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
+void rproc_coredump_using_sections(struct rproc *rproc);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
int rproc_coredump_add_custom_segment(struct rproc *rproc,
dma_addr_t da, size_t size,
void (*dumpfn)(struct rproc *rproc,
struct rproc_dump_segment *segment,
- void *dest),
+ void *dest, size_t offset,
+ size_t size),
void *priv);
-
-static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
-{
- return container_of(vdev->dev.parent, struct rproc_vdev, dev);
-}
-
-static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
-{
- struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
-
- return rvdev->rproc;
-}
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine);
void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h
index b47416f7aeb8..7c2b7cc9fe6c 100644
--- a/include/linux/remoteproc/mtk_scp.h
+++ b/include/linux/remoteproc/mtk_scp.h
@@ -41,6 +41,8 @@ enum scp_ipi_id {
SCP_IPI_ISP_FRAME,
SCP_IPI_FD_CMD,
SCP_IPI_CROS_HOST_CMD,
+ SCP_IPI_VDEC_LAT,
+ SCP_IPI_VDEC_CORE,
SCP_IPI_NS_SERVICE = 0xFF,
SCP_IPI_MAX = 0x100,
};
diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h
index fa8e38681b4b..82b211518136 100644
--- a/include/linux/remoteproc/qcom_rproc.h
+++ b/include/linux/remoteproc/qcom_rproc.h
@@ -3,19 +3,45 @@
struct notifier_block;
+/**
+ * enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc
+ * processor.
+ *
+ * @QCOM_SSR_BEFORE_POWERUP: Remoteproc about to start (prepare stage)
+ * @QCOM_SSR_AFTER_POWERUP: Remoteproc is running (start stage)
+ * @QCOM_SSR_BEFORE_SHUTDOWN: Remoteproc crashed or shutting down (stop stage)
+ * @QCOM_SSR_AFTER_SHUTDOWN: Remoteproc is down (unprepare stage)
+ */
+enum qcom_ssr_notify_type {
+ QCOM_SSR_BEFORE_POWERUP,
+ QCOM_SSR_AFTER_POWERUP,
+ QCOM_SSR_BEFORE_SHUTDOWN,
+ QCOM_SSR_AFTER_SHUTDOWN,
+};
+
+struct qcom_ssr_notify_data {
+ const char *name;
+ bool crashed;
+};
+
#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
-int qcom_register_ssr_notifier(struct notifier_block *nb);
-void qcom_unregister_ssr_notifier(struct notifier_block *nb);
+void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb);
+int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb);
#else
-static inline int qcom_register_ssr_notifier(struct notifier_block *nb)
+static inline void *qcom_register_ssr_notifier(const char *name,
+ struct notifier_block *nb)
{
- return 0;
+ return NULL;
}
-static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {}
+static inline int qcom_unregister_ssr_notifier(void *notify,
+ struct notifier_block *nb)
+{
+ return 0;
+}
#endif
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index daf5cf64c6a6..0cf5b20c6ddf 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -2,6 +2,10 @@
#ifndef _RESCTRL_H
#define _RESCTRL_H
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pid.h>
+
#ifdef CONFIG_PROC_CPU_RESCTRL
int proc_resctrl_show(struct seq_file *m,
@@ -11,4 +15,242 @@ int proc_resctrl_show(struct seq_file *m,
#endif
+/* max value for struct rdt_domain's mbps_val */
+#define MBA_MAX_MBPS U32_MAX
+
+/**
+ * enum resctrl_conf_type - The type of configuration.
+ * @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
+ * @CDP_CODE: Configuration applies to instruction fetches.
+ * @CDP_DATA: Configuration applies to reads and writes.
+ */
+enum resctrl_conf_type {
+ CDP_NONE,
+ CDP_CODE,
+ CDP_DATA,
+};
+
+#define CDP_NUM_TYPES (CDP_DATA + 1)
+
+/*
+ * Event IDs, the values match those used to program IA32_QM_EVTSEL before
+ * reading IA32_QM_CTR on RDT systems.
+ */
+enum resctrl_event_id {
+ QOS_L3_OCCUP_EVENT_ID = 0x01,
+ QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
+ QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+};
+
+/**
+ * struct resctrl_staged_config - parsed configuration to be applied
+ * @new_ctrl: new ctrl value to be loaded
+ * @have_new_ctrl: whether the user provided new_ctrl is valid
+ */
+struct resctrl_staged_config {
+ u32 new_ctrl;
+ bool have_new_ctrl;
+};
+
+/**
+ * struct rdt_domain - group of CPUs sharing a resctrl resource
+ * @list: all instances of this resource
+ * @id: unique id for this instance
+ * @cpu_mask: which CPUs share this resource
+ * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
+ * @mbm_total: saved state for MBM total bandwidth
+ * @mbm_local: saved state for MBM local bandwidth
+ * @mbm_over: worker to periodically read MBM h/w counters
+ * @cqm_limbo: worker to periodically read CQM h/w counters
+ * @mbm_work_cpu: worker CPU for MBM h/w counters
+ * @cqm_work_cpu: worker CPU for CQM h/w counters
+ * @plr: pseudo-locked region (if any) associated with domain
+ * @staged_config: parsed configuration to be applied
+ * @mbps_val: When mba_sc is enabled, this holds the array of user
+ * specified control values for mba_sc in MBps, indexed
+ * by closid
+ */
+struct rdt_domain {
+ struct list_head list;
+ int id;
+ struct cpumask cpu_mask;
+ unsigned long *rmid_busy_llc;
+ struct mbm_state *mbm_total;
+ struct mbm_state *mbm_local;
+ struct delayed_work mbm_over;
+ struct delayed_work cqm_limbo;
+ int mbm_work_cpu;
+ int cqm_work_cpu;
+ struct pseudo_lock_region *plr;
+ struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
+ u32 *mbps_val;
+};
+
+/**
+ * struct resctrl_cache - Cache allocation related data
+ * @cbm_len: Length of the cache bit mask
+ * @min_cbm_bits: Minimum number of consecutive bits to be set
+ * @shareable_bits: Bitmask of shareable resource with other
+ * executing entities
+ * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
+ * @arch_has_empty_bitmaps: True if the '0' bitmap is valid.
+ * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
+ * level has CPU scope.
+ */
+struct resctrl_cache {
+ unsigned int cbm_len;
+ unsigned int min_cbm_bits;
+ unsigned int shareable_bits;
+ bool arch_has_sparse_bitmaps;
+ bool arch_has_empty_bitmaps;
+ bool arch_has_per_cpu_cfg;
+};
+
+/**
+ * enum membw_throttle_mode - System's memory bandwidth throttling mode
+ * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
+ * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core
+ * always using smallest bandwidth percentage
+ * assigned to threads, aka "max throttling"
+ * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread
+ */
+enum membw_throttle_mode {
+ THREAD_THROTTLE_UNDEFINED = 0,
+ THREAD_THROTTLE_MAX,
+ THREAD_THROTTLE_PER_THREAD,
+};
+
+/**
+ * struct resctrl_membw - Memory bandwidth allocation related data
+ * @min_bw: Minimum memory bandwidth percentage user can request
+ * @bw_gran: Granularity at which the memory bandwidth is allocated
+ * @delay_linear: True if memory B/W delay is in linear scale
+ * @arch_needs_linear: True if we can't configure non-linear resources
+ * @throttle_mode: Bandwidth throttling mode when threads request
+ * different memory bandwidths
+ * @mba_sc: True if MBA software controller(mba_sc) is enabled
+ * @mb_map: Mapping of memory B/W percentage to memory B/W delay
+ */
+struct resctrl_membw {
+ u32 min_bw;
+ u32 bw_gran;
+ u32 delay_linear;
+ bool arch_needs_linear;
+ enum membw_throttle_mode throttle_mode;
+ bool mba_sc;
+ u32 *mb_map;
+};
+
+struct rdt_parse_data;
+struct resctrl_schema;
+
+/**
+ * struct rdt_resource - attributes of a resctrl resource
+ * @rid: The index of the resource
+ * @alloc_capable: Is allocation available on this machine
+ * @mon_capable: Is monitor feature available on this machine
+ * @num_rmid: Number of RMIDs available
+ * @cache_level: Which cache level defines scope of this resource
+ * @cache: Cache allocation related data
+ * @membw: If the component has bandwidth controls, their properties.
+ * @domains: All domains for this resource
+ * @name: Name to use in "schemata" file.
+ * @data_width: Character width of data when displaying
+ * @default_ctrl: Specifies default cache cbm or memory B/W percent.
+ * @format_str: Per resource format string to show domain value
+ * @parse_ctrlval: Per resource function pointer to parse control values
+ * @evt_list: List of monitoring events
+ * @fflags: flags to choose base and info files
+ * @cdp_capable: Is the CDP feature available on this resource
+ */
+struct rdt_resource {
+ int rid;
+ bool alloc_capable;
+ bool mon_capable;
+ int num_rmid;
+ int cache_level;
+ struct resctrl_cache cache;
+ struct resctrl_membw membw;
+ struct list_head domains;
+ char *name;
+ int data_width;
+ u32 default_ctrl;
+ const char *format_str;
+ int (*parse_ctrlval)(struct rdt_parse_data *data,
+ struct resctrl_schema *s,
+ struct rdt_domain *d);
+ struct list_head evt_list;
+ unsigned long fflags;
+ bool cdp_capable;
+};
+
+/**
+ * struct resctrl_schema - configuration abilities of a resource presented to
+ * user-space
+ * @list: Member of resctrl_schema_all.
+ * @name: The name to use in the "schemata" file.
+ * @conf_type: Whether this schema is specific to code/data.
+ * @res: The resource structure exported by the architecture to describe
+ * the hardware that is configured by this schema.
+ * @num_closid: The number of closid that can be used with this schema. When
+ * features like CDP are enabled, this will be lower than the
+ * hardware supports for the resource.
+ */
+struct resctrl_schema {
+ struct list_head list;
+ char name[8];
+ enum resctrl_conf_type conf_type;
+ struct rdt_resource *res;
+ u32 num_closid;
+};
+
+/* The number of closid supported by this resource regardless of CDP */
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+
+/*
+ * Update the ctrl_val and apply this config right now.
+ * Must be called on one of the domain's CPUs.
+ */
+int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
+ u32 closid, enum resctrl_conf_type t, u32 cfg_val);
+
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+ u32 closid, enum resctrl_conf_type type);
+int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
+void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
+
+/**
+ * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
+ * for this resource and domain.
+ * @r: resource that the counter should be read from.
+ * @d: domain that the counter should be read from.
+ * @rmid: rmid of the counter to read.
+ * @eventid: eventid to read, e.g. L3 occupancy.
+ * @val: result of the counter read in bytes.
+ *
+ * Call from process context on a CPU that belongs to domain @d.
+ *
+ * Return:
+ * 0 on success, or -EIO, -EINVAL etc on error.
+ */
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
+ u32 rmid, enum resctrl_event_id eventid, u64 *val);
+
+/**
+ * resctrl_arch_reset_rmid() - Reset any private state associated with rmid
+ * and eventid.
+ * @r: The domain's resource.
+ * @d: The rmid's domain.
+ * @rmid: The rmid whose counter values should be reset.
+ * @eventid: The eventid whose counter values should be reset.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
+ u32 rmid, enum resctrl_event_id eventid);
+
+extern unsigned int resctrl_rmid_realloc_threshold;
+extern unsigned int resctrl_rmid_realloc_limit;
+
#endif /* _RESCTRL_H */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index ec35814e0bbb..0fa4f60e1186 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -79,6 +79,7 @@ struct reset_controller_dev {
unsigned int nr_resets;
};
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
int reset_controller_register(struct reset_controller_dev *rcdev);
void reset_controller_unregister(struct reset_controller_dev *rcdev);
@@ -88,5 +89,26 @@ int devm_reset_controller_register(struct device *dev,
void reset_controller_add_lookup(struct reset_control_lookup *lookup,
unsigned int num_entries);
+#else
+static inline int reset_controller_register(struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
+
+static inline void reset_controller_unregister(struct reset_controller_dev *rcdev)
+{
+}
+
+static inline int devm_reset_controller_register(struct device *dev,
+ struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
+
+static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup,
+ unsigned int num_entries)
+{
+}
+#endif
#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 05aa9f440f48..514ddf003efc 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -10,15 +10,37 @@ struct device;
struct device_node;
struct reset_control;
+/**
+ * struct reset_control_bulk_data - Data used for bulk reset control operations.
+ *
+ * @id: reset control consumer ID
+ * @rstc: struct reset_control * to store the associated reset control
+ *
+ * The reset APIs provide a series of reset_control_bulk_*() API calls as
+ * a convenience to consumers which require multiple reset controls.
+ * This structure is used to manage data for these calls.
+ */
+struct reset_control_bulk_data {
+ const char *id;
+ struct reset_control *rstc;
+};
+
#ifdef CONFIG_RESET_CONTROLLER
int reset_control_reset(struct reset_control *rstc);
+int reset_control_rearm(struct reset_control *rstc);
int reset_control_assert(struct reset_control *rstc);
int reset_control_deassert(struct reset_control *rstc);
int reset_control_status(struct reset_control *rstc);
int reset_control_acquire(struct reset_control *rstc);
void reset_control_release(struct reset_control *rstc);
+int reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs);
+int reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs);
+int reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs);
+int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs);
+void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs);
+
struct reset_control *__of_reset_control_get(struct device_node *node,
const char *id, int index, bool shared,
bool optional, bool acquired);
@@ -26,10 +48,18 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id,
int index, bool shared,
bool optional, bool acquired);
void reset_control_put(struct reset_control *rstc);
+int __reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired);
+void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs);
+
int __device_reset(struct device *dev, bool optional);
struct reset_control *__devm_reset_control_get(struct device *dev,
const char *id, int index, bool shared,
bool optional, bool acquired);
+int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired);
struct reset_control *devm_reset_control_array_get(struct device *dev,
bool shared, bool optional);
@@ -46,6 +76,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
return 0;
}
+static inline int reset_control_rearm(struct reset_control *rstc)
+{
+ return 0;
+}
+
static inline int reset_control_assert(struct reset_control *rstc)
{
return 0;
@@ -95,6 +130,48 @@ static inline struct reset_control *__reset_control_get(
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline int
+reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline int
+reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline int
+reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline int
+reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline void
+reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+}
+
+static inline int
+__reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ return optional ? 0 : -EOPNOTSUPP;
+}
+
+static inline void
+reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+}
+
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
int index, bool shared, bool optional,
@@ -103,6 +180,14 @@ static inline struct reset_control *__devm_reset_control_get(
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline int
+__devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ return optional ? 0 : -EOPNOTSUPP;
+}
+
static inline struct reset_control *
devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
{
@@ -155,6 +240,23 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
}
/**
+ * reset_control_bulk_get_exclusive - Lookup and obtain exclusive references to
+ * multiple reset controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Fills the rstcs array with pointers to exclusive reset controls and
+ * returns 0, or an IS_ERR() condition containing errno.
+ */
+static inline int __must_check
+reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+}
+
+/**
* reset_control_get_exclusive_released - Lookup and obtain a temoprarily
* exclusive reference to a reset
* controller.
@@ -176,6 +278,48 @@ __must_check reset_control_get_exclusive_released(struct device *dev,
}
/**
+ * reset_control_bulk_get_exclusive_released - Lookup and obtain temporarily
+ * exclusive references to multiple reset
+ * controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Fills the rstcs array with pointers to exclusive reset controls and
+ * returns 0, or an IS_ERR() condition containing errno.
+ * reset-controls returned by this function must be acquired via
+ * reset_control_bulk_acquire() before they can be used and should be released
+ * via reset_control_bulk_release() afterwards.
+ */
+static inline int __must_check
+reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+}
+
+/**
+ * reset_control_bulk_get_optional_exclusive_released - Lookup and obtain optional
+ * temporarily exclusive references to multiple
+ * reset controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Optional variant of reset_control_bulk_get_exclusive_released(). If the
+ * requested reset is not specified in the device tree, this function returns 0
+ * instead of an error and missing rtsc is set to NULL.
+ *
+ * See reset_control_bulk_get_exclusive_released() for more information.
+ */
+static inline int __must_check
+reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+}
+
+/**
* reset_control_get_shared - Lookup and obtain a shared reference to a
* reset controller.
* @dev: device to be reset by the controller
@@ -204,6 +348,23 @@ static inline struct reset_control *reset_control_get_shared(
}
/**
+ * reset_control_bulk_get_shared - Lookup and obtain shared references to
+ * multiple reset controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Fills the rstcs array with pointers to shared reset controls and
+ * returns 0, or an IS_ERR() condition containing errno.
+ */
+static inline int __must_check
+reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+}
+
+/**
* reset_control_get_optional_exclusive - optional reset_control_get_exclusive()
* @dev: device to be reset by the controller
* @id: reset line name
@@ -221,6 +382,26 @@ static inline struct reset_control *reset_control_get_optional_exclusive(
}
/**
+ * reset_control_bulk_get_optional_exclusive - optional
+ * reset_control_bulk_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Optional variant of reset_control_bulk_get_exclusive(). If any of the
+ * requested resets are not specified in the device tree, this function sets
+ * them to NULL instead of returning an error.
+ *
+ * See reset_control_bulk_get_exclusive() for more information.
+ */
+static inline int __must_check
+reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
+}
+
+/**
* reset_control_get_optional_shared - optional reset_control_get_shared()
* @dev: device to be reset by the controller
* @id: reset line name
@@ -238,6 +419,26 @@ static inline struct reset_control *reset_control_get_optional_shared(
}
/**
+ * reset_control_bulk_get_optional_shared - optional
+ * reset_control_bulk_get_shared()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Optional variant of reset_control_bulk_get_shared(). If the requested resets
+ * are not specified in the device tree, this function sets them to NULL
+ * instead of returning an error.
+ *
+ * See reset_control_bulk_get_shared() for more information.
+ */
+static inline int __must_check
+reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+}
+
+/**
* of_reset_control_get_exclusive - Lookup and obtain an exclusive reference
* to a reset controller.
* @node: device to be reset by the controller
@@ -254,6 +455,26 @@ static inline struct reset_control *of_reset_control_get_exclusive(
}
/**
+ * of_reset_control_get_optional_exclusive - Lookup and obtain an optional exclusive
+ * reference to a reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Optional variant of of_reset_control_get_exclusive(). If the requested reset
+ * is not specified in the device tree, this function returns NULL instead of
+ * an error.
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get_optional_exclusive(
+ struct device_node *node, const char *id)
+{
+ return __of_reset_control_get(node, id, 0, false, true, true);
+}
+
+/**
* of_reset_control_get_shared - Lookup and obtain a shared reference
* to a reset controller.
* @node: device to be reset by the controller
@@ -343,6 +564,26 @@ __must_check devm_reset_control_get_exclusive(struct device *dev,
}
/**
+ * devm_reset_control_bulk_get_exclusive - resource managed
+ * reset_control_bulk_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_exclusive(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_bulk_get_exclusive() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+}
+
+/**
* devm_reset_control_get_exclusive_released - resource managed
* reset_control_get_exclusive_released()
* @dev: device to be reset by the controller
@@ -362,6 +603,65 @@ __must_check devm_reset_control_get_exclusive_released(struct device *dev,
}
/**
+ * devm_reset_control_bulk_get_exclusive_released - resource managed
+ * reset_control_bulk_get_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_exclusive_released(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_bulk_get_exclusive_released() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+}
+
+/**
+ * devm_reset_control_get_optional_exclusive_released - resource managed
+ * reset_control_get_optional_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed-and-optional variant of reset_control_get_exclusive_released(). For
+ * reset controllers returned from this function, reset_control_put() is called
+ * automatically on driver detach.
+ *
+ * See reset_control_get_exclusive_released() for more information.
+ */
+static inline struct reset_control *
+__must_check devm_reset_control_get_optional_exclusive_released(struct device *dev,
+ const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, false, true, false);
+}
+
+/**
+ * devm_reset_control_bulk_get_optional_exclusive_released - resource managed
+ * reset_control_bulk_optional_get_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_optional_get_exclusive_released(). For reset
+ * controllers returned from this function, reset_control_put() is called
+ * automatically on driver detach.
+ *
+ * See reset_control_bulk_optional_get_exclusive_released() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+}
+
+/**
* devm_reset_control_get_shared - resource managed reset_control_get_shared()
* @dev: device to be reset by the controller
* @id: reset line name
@@ -377,6 +677,26 @@ static inline struct reset_control *devm_reset_control_get_shared(
}
/**
+ * devm_reset_control_bulk_get_shared - resource managed
+ * reset_control_bulk_get_shared()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_shared(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_bulk_get_shared() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+}
+
+/**
* devm_reset_control_get_optional_exclusive - resource managed
* reset_control_get_optional_exclusive()
* @dev: device to be reset by the controller
@@ -395,6 +715,26 @@ static inline struct reset_control *devm_reset_control_get_optional_exclusive(
}
/**
+ * devm_reset_control_bulk_get_optional_exclusive - resource managed
+ * reset_control_bulk_get_optional_exclusive()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_optional_exclusive(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_bulk_get_optional_exclusive() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
+}
+
+/**
* devm_reset_control_get_optional_shared - resource managed
* reset_control_get_optional_shared()
* @dev: device to be reset by the controller
@@ -413,6 +753,26 @@ static inline struct reset_control *devm_reset_control_get_optional_shared(
}
/**
+ * devm_reset_control_bulk_get_optional_shared - resource managed
+ * reset_control_bulk_get_optional_shared()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_optional_shared(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_bulk_get_optional_shared() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+}
+
+/**
* devm_reset_control_get_exclusive_by_index - resource managed
* reset_control_get_exclusive()
* @dev: device to be reset by the controller
diff --git a/include/linux/reset/bcm63xx_pmb.h b/include/linux/reset/bcm63xx_pmb.h
index bb4af7b5eb36..c77b6999518a 100644
--- a/include/linux/reset/bcm63xx_pmb.h
+++ b/include/linux/reset/bcm63xx_pmb.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset)
*
* Copyright (C) 2015, Broadcom Corporation
* Author: Florian Fainelli <f.fainelli@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __BCM63XX_PMB_H
#define __BCM63XX_PMB_H
diff --git a/include/linux/reset/reset-simple.h b/include/linux/reset/reset-simple.h
new file mode 100644
index 000000000000..c3e44f45b0f7
--- /dev/null
+++ b/include/linux/reset/reset-simple.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Simple Reset Controller ops
+ *
+ * Based on Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#ifndef __RESET_SIMPLE_H__
+#define __RESET_SIMPLE_H__
+
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct reset_simple_data - driver data for simple reset controllers
+ * @lock: spinlock to protect registers during read-modify-write cycles
+ * @membase: memory mapped I/O register range
+ * @rcdev: reset controller device base structure
+ * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits
+ * are set to assert the reset. Note that this says nothing about
+ * the voltage level of the actual reset line.
+ * @status_active_low: if true, bits read back as cleared while the reset is
+ * asserted. Otherwise, bits read back as set while the
+ * reset is asserted.
+ * @reset_us: Minimum delay in microseconds needed that needs to be
+ * waited for between an assert and a deassert to reset the
+ * device. If multiple consumers with different delay
+ * requirements are connected to this controller, it must
+ * be the largest minimum delay. 0 means that such a delay is
+ * unknown and the reset operation is unsupported.
+ */
+struct reset_simple_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+ bool active_low;
+ bool status_active_low;
+ unsigned int reset_us;
+};
+
+extern const struct reset_control_ops reset_simple_ops;
+
+#endif /* __RESET_SIMPLE_H__ */
diff --git a/include/linux/resource.h b/include/linux/resource.h
index bdf491cbcab7..4fdbc0c3f315 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -8,7 +8,5 @@
struct task_struct;
void getrusage(struct task_struct *p, int who, struct rusage *ru);
-int do_prlimit(struct task_struct *tsk, unsigned int resource,
- struct rlimit *new_rlim, struct rlimit *old_rlim);
#endif
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index bba2920e9c05..980a65594412 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -23,6 +23,7 @@ enum timespec_type {
* System call restart block.
*/
struct restart_block {
+ unsigned long arch_data;
long (*fn)(struct restart_block *);
union {
/* For futex_wait and futex_wait_requeue_pi */
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
new file mode 100644
index 000000000000..285189454449
--- /dev/null
+++ b/include/linux/resume_user_mode.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LINUX_RESUME_USER_MODE_H
+#define LINUX_RESUME_USER_MODE_H
+
+#include <linux/sched.h>
+#include <linux/task_work.h>
+#include <linux/memcontrol.h>
+#include <linux/blk-cgroup.h>
+
+/**
+ * set_notify_resume - cause resume_user_mode_work() to be called
+ * @task: task that will call resume_user_mode_work()
+ *
+ * Calling this arranges that @task will call resume_user_mode_work()
+ * before returning to user mode. If it's already running in user mode,
+ * it will enter the kernel and call resume_user_mode_work() soon.
+ * If it's blocked, it will not be woken.
+ */
+static inline void set_notify_resume(struct task_struct *task)
+{
+ if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
+ kick_process(task);
+}
+
+
+/**
+ * resume_user_mode_work - Perform work before returning to user mode
+ * @regs: user-mode registers of @current task
+ *
+ * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
+ * about to return to user mode, and the user state in @regs can be
+ * inspected or adjusted. The caller in arch code has cleared
+ * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
+ * asynchronously, this will be called again before we return to
+ * user mode.
+ *
+ * Called without locks.
+ */
+static inline void resume_user_mode_work(struct pt_regs *regs)
+{
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ /*
+ * This barrier pairs with task_work_add()->set_notify_resume() after
+ * hlist_add_head(task->task_works);
+ */
+ smp_mb__after_atomic();
+ if (unlikely(task_work_pending(current)))
+ task_work_run();
+
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ if (unlikely(current->cached_requested_key)) {
+ key_put(current->cached_requested_key);
+ current->cached_requested_key = NULL;
+ }
+#endif
+
+ mem_cgroup_handle_over_high();
+ blkcg_maybe_throttle_current();
+
+ rseq_handle_notify_resume(NULL, regs);
+}
+
+#endif /* LINUX_RESUME_USER_MODE_H */
diff --git a/include/linux/rethook.h b/include/linux/rethook.h
new file mode 100644
index 000000000000..c8ac1e5afcd1
--- /dev/null
+++ b/include/linux/rethook.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Return hooking with list-based shadow stack.
+ */
+#ifndef _LINUX_RETHOOK_H
+#define _LINUX_RETHOOK_H
+
+#include <linux/compiler.h>
+#include <linux/freelist.h>
+#include <linux/kallsyms.h>
+#include <linux/llist.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+
+struct rethook_node;
+
+typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs *);
+
+/**
+ * struct rethook - The rethook management data structure.
+ * @data: The user-defined data storage.
+ * @handler: The user-defined return hook handler.
+ * @pool: The pool of struct rethook_node.
+ * @ref: The reference counter.
+ * @rcu: The rcu_head for deferred freeing.
+ *
+ * Don't embed to another data structure, because this is a self-destructive
+ * data structure when all rethook_node are freed.
+ */
+struct rethook {
+ void *data;
+ rethook_handler_t handler;
+ struct freelist_head pool;
+ refcount_t ref;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct rethook_node - The rethook shadow-stack entry node.
+ * @freelist: The freelist, linked to struct rethook::pool.
+ * @rcu: The rcu_head for deferred freeing.
+ * @llist: The llist, linked to a struct task_struct::rethooks.
+ * @rethook: The pointer to the struct rethook.
+ * @ret_addr: The storage for the real return address.
+ * @frame: The storage for the frame pointer.
+ *
+ * You can embed this to your extended data structure to store any data
+ * on each entry of the shadow stack.
+ */
+struct rethook_node {
+ union {
+ struct freelist_node freelist;
+ struct rcu_head rcu;
+ };
+ struct llist_node llist;
+ struct rethook *rethook;
+ unsigned long ret_addr;
+ unsigned long frame;
+};
+
+struct rethook *rethook_alloc(void *data, rethook_handler_t handler);
+void rethook_free(struct rethook *rh);
+void rethook_add_node(struct rethook *rh, struct rethook_node *node);
+struct rethook_node *rethook_try_get(struct rethook *rh);
+void rethook_recycle(struct rethook_node *node);
+void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount);
+unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame,
+ struct llist_node **cur);
+
+/* Arch dependent code must implement arch_* and trampoline code */
+void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs, bool mcount);
+void arch_rethook_trampoline(void);
+
+/**
+ * is_rethook_trampoline() - Check whether the address is rethook trampoline
+ * @addr: The address to be checked
+ *
+ * Return true if the @addr is the rethook trampoline address.
+ */
+static inline bool is_rethook_trampoline(unsigned long addr)
+{
+ return addr == (unsigned long)dereference_symbol_descriptor(arch_rethook_trampoline);
+}
+
+/* If the architecture needs to fixup the return address, implement it. */
+void arch_rethook_fixup_return(struct pt_regs *regs,
+ unsigned long correct_ret_addr);
+
+/* Generic trampoline handler, arch code must prepare asm stub */
+unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ unsigned long frame);
+
+#ifdef CONFIG_RETHOOK
+void rethook_flush_task(struct task_struct *tk);
+#else
+#define rethook_flush_task(tsk) do { } while (0)
+#endif
+
+#endif
+
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 8ad2487a86d5..373003ace639 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -138,6 +138,17 @@ void rfkill_unregister(struct rfkill *rfkill);
void rfkill_destroy(struct rfkill *rfkill);
/**
+ * rfkill_set_hw_state_reason - Set the internal rfkill hardware block state
+ * with a reason
+ * @rfkill: pointer to the rfkill class to modify.
+ * @blocked: the current hardware block state to set
+ * @reason: one of &enum rfkill_hard_block_reasons
+ *
+ * Prefer to use rfkill_set_hw_state if you don't need any special reason.
+ */
+bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked, unsigned long reason);
+/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
* @blocked: the current hardware block state to set
@@ -156,7 +167,11 @@ void rfkill_destroy(struct rfkill *rfkill);
* should be blocked) so that drivers need not keep track of the soft
* block state -- which they might not be able to.
*/
-bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
+static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
+{
+ return rfkill_set_hw_state_reason(rfkill, blocked,
+ RFKILL_HARD_BLOCK_SIGNAL);
+}
/**
* rfkill_set_sw_state - Set the internal rfkill software block state
@@ -215,6 +230,13 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
bool rfkill_blocked(struct rfkill *rfkill);
/**
+ * rfkill_soft_blocked - Query soft rfkill block state
+ *
+ * @rfkill: rfkill struct to query
+ */
+bool rfkill_soft_blocked(struct rfkill *rfkill);
+
+/**
* rfkill_find_type - Helper for finding rfkill type by name
* @name: the name of the type
*
@@ -256,6 +278,13 @@ static inline void rfkill_destroy(struct rfkill *rfkill)
{
}
+static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked,
+ unsigned long reason)
+{
+ return blocked;
+}
+
static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
{
return blocked;
@@ -279,6 +308,11 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
return false;
}
+static inline bool rfkill_soft_blocked(struct rfkill *rfkill)
+{
+ return false;
+}
+
static inline enum rfkill_type rfkill_find_type(const char *name)
{
return RFKILL_TYPE_ALL;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 70ebef866cc8..68dab3e08aad 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -33,7 +33,7 @@
* of two or more hash tables when the rhashtable is being resized.
* The end of the chain is marked with a special nulls marks which has
* the least significant bit set but otherwise stores the address of
- * the hash bucket. This allows us to be be sure we've found the end
+ * the hash bucket. This allows us to be sure we've found the end
* of the right list.
* The value stored in the hash bucket has BIT(0) used as a lock bit.
* This bit must be atomically set before any changes are made to
@@ -84,7 +84,7 @@ struct bucket_table {
struct lockdep_map dep_map;
- struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp;
+ struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
/*
@@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
-struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash);
-struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash);
-struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash);
+struct rhash_lock_head __rcu **__rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
-static inline struct rhash_lock_head *const *rht_bucket(
+static inline struct rhash_lock_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_lock_head **rht_bucket_var(
+static inline struct rhash_lock_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_lock_head **rht_bucket_insert(
+static inline struct rhash_lock_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
@@ -325,7 +324,7 @@ static inline struct rhash_lock_head **rht_bucket_insert(
*/
static inline void rht_lock(struct bucket_table *tbl,
- struct rhash_lock_head **bkt)
+ struct rhash_lock_head __rcu **bkt)
{
local_bh_disable();
bit_spin_lock(0, (unsigned long *)bkt);
@@ -333,7 +332,7 @@ static inline void rht_lock(struct bucket_table *tbl,
}
static inline void rht_lock_nested(struct bucket_table *tbl,
- struct rhash_lock_head **bucket,
+ struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
{
local_bh_disable();
@@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl,
}
static inline void rht_unlock(struct bucket_table *tbl,
- struct rhash_lock_head **bkt)
+ struct rhash_lock_head __rcu **bkt)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
local_bh_enable();
}
-static inline struct rhash_head __rcu *__rht_ptr(
- struct rhash_lock_head *const *bkt)
+static inline struct rhash_head *__rht_ptr(
+ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
{
- return (struct rhash_head __rcu *)
- ((unsigned long)*bkt & ~BIT(0) ?:
+ return (struct rhash_head *)
+ ((unsigned long)p & ~BIT(0) ?:
(unsigned long)RHT_NULLS_MARKER(bkt));
}
@@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr(
* access is guaranteed, such as when destroying the table.
*/
static inline struct rhash_head *rht_ptr_rcu(
- struct rhash_lock_head *const *bkt)
+ struct rhash_lock_head __rcu *const *bkt)
{
- struct rhash_head __rcu *p = __rht_ptr(bkt);
-
- return rcu_dereference(p);
+ return __rht_ptr(rcu_dereference(*bkt), bkt);
}
static inline struct rhash_head *rht_ptr(
- struct rhash_lock_head *const *bkt,
+ struct rhash_lock_head __rcu *const *bkt,
struct bucket_table *tbl,
unsigned int hash)
{
- return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash);
+ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
}
static inline struct rhash_head *rht_ptr_exclusive(
- struct rhash_lock_head *const *bkt)
+ struct rhash_lock_head __rcu *const *bkt)
{
- return rcu_dereference_protected(__rht_ptr(bkt), 1);
+ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
}
-static inline void rht_assign_locked(struct rhash_lock_head **bkt,
+static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
- struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
-
if (rht_is_a_nulls(obj))
obj = NULL;
- rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
+ rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
}
static inline void rht_assign_unlock(struct bucket_table *tbl,
- struct rhash_lock_head **bkt,
+ struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
- struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
-
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
- rcu_assign_pointer(*p, obj);
+ rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
__release(bitlock);
local_bh_enable();
@@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
- struct rhash_lock_head *const *bkt;
+ struct rhash_lock_head __rcu *const *bkt;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast(
.ht = ht,
.key = key,
};
- struct rhash_lock_head **bkt;
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
@@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_head *obj, const struct rhashtable_params params,
bool rhlist)
{
- struct rhash_lock_head **bkt;
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned int hash;
@@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast(
struct rhash_head *obj_old, struct rhash_head *obj_new,
const struct rhashtable_params params)
{
- struct rhash_lock_head **bkt;
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned int hash;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index df0124eabece..2504df9a0453 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -61,7 +61,8 @@ enum ring_buffer_type {
unsigned ring_buffer_event_length(struct ring_buffer_event *event);
void *ring_buffer_event_data(struct ring_buffer_event *event);
-u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
+u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
+ struct ring_buffer_event *event);
/*
* ring_buffer_discard_commit will remove an event that has not
@@ -100,7 +101,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
-
+void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
#define RING_BUFFER_ALL_CPUS -1
@@ -135,14 +136,15 @@ void ring_buffer_read_finish(struct ring_buffer_iter *iter);
struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
-struct ring_buffer_event *
-ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
+void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
+bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
void ring_buffer_reset(struct trace_buffer *buffer);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
@@ -179,7 +181,7 @@ unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cp
unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
-u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts);
void ring_buffer_set_clock(struct trace_buffer *buffer,
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 317bace5ac64..2cd637268b4f 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -100,7 +100,7 @@ struct rio_switch {
u32 port_ok;
struct rio_switch_ops *ops;
spinlock_t lock;
- struct rio_dev *nextdev[0];
+ struct rio_dev *nextdev[];
};
/**
@@ -201,7 +201,7 @@ struct rio_dev {
u8 hopcount;
struct rio_dev *prev;
atomic_t state;
- struct rio_switch rswitch[0]; /* RIO switch info */
+ struct rio_switch rswitch[]; /* RIO switch info */
};
#define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index d637742e5039..e49c32b0f394 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -444,9 +444,6 @@ static inline void rio_set_drvdata(struct rio_dev *rdev, void *data)
/* Misc driver helpers */
extern u16 rio_local_get_device_id(struct rio_mport *port);
extern void rio_local_set_device_id(struct rio_mport *port, u16 did);
-extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
-extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
- struct rio_dev *from);
extern int rio_init_mports(void);
#endif /* LINUX_RIO_DRV_H */
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 4846f72759b2..c7e2f21dd5c1 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -9,18 +9,6 @@
#ifndef LINUX_RIO_IDS_H
#define LINUX_RIO_IDS_H
-#define RIO_VID_FREESCALE 0x0002
-#define RIO_DID_MPC8560 0x0003
-
-#define RIO_VID_TUNDRA 0x000d
-#define RIO_DID_TSI500 0x0500
-#define RIO_DID_TSI568 0x0568
-#define RIO_DID_TSI572 0x0572
-#define RIO_DID_TSI574 0x0574
-#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */
-#define RIO_DID_TSI577 0x0577
-#define RIO_DID_TSI578 0x0578
-
#define RIO_VID_IDT 0x0038
#define RIO_DID_IDT70K200 0x0310
#define RIO_DID_IDTCPS8 0x035c
@@ -33,7 +21,6 @@
#define RIO_DID_IDTCPS1616 0x0379
#define RIO_DID_IDTVPS1616 0x0377
#define RIO_DID_IDTSPS1616 0x0378
-#define RIO_DID_TSI721 0x80ab
#define RIO_DID_IDTRXS1632 0x80e5
#define RIO_DID_IDTRXS2448 0x80e6
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 988d176472df..bd3504d11b15 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -11,6 +11,8 @@
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/memremap.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -39,12 +41,15 @@ struct anon_vma {
atomic_t refcount;
/*
- * Count of child anon_vmas and VMAs which points to this anon_vma.
+ * Count of child anon_vmas. Equals to the count of all anon_vmas that
+ * have ->parent pointing to this one, including itself.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
- unsigned degree;
+ unsigned long num_children;
+ /* Count of VMAs whose ->anon_vma pointer points to this object. */
+ unsigned long num_active_vmas;
struct anon_vma *parent; /* Parent of this anon_vma */
@@ -77,7 +82,7 @@ struct anon_vma {
struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
- struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
+ struct list_head same_vma; /* locked by mmap_lock & page_table_lock */
struct rb_node rb; /* locked by anon_vma->rwsem */
unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
@@ -86,19 +91,15 @@ struct anon_vma_chain {
};
enum ttu_flags {
- TTU_MIGRATION = 0x1, /* migration mode */
- TTU_MUNLOCK = 0x2, /* munlock mode */
-
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
- TTU_IGNORE_ACCESS = 0x10, /* don't age */
+ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
- TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
};
#ifdef CONFIG_MMU
@@ -130,6 +131,11 @@ static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
down_read(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
+{
+ return down_read_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
{
up_read(&anon_vma->root->rwsem);
@@ -160,50 +166,213 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}
-struct anon_vma *page_get_anon_vma(struct page *page);
+struct anon_vma *folio_get_anon_vma(struct folio *folio);
+
+/* RMAP flags, currently only relevant for some anon rmap operations. */
+typedef int __bitwise rmap_t;
+
+/*
+ * No special request: if the page is a subpage of a compound page, it is
+ * mapped via a PTE. The mapped (sub)page is possibly shared between processes.
+ */
+#define RMAP_NONE ((__force rmap_t)0)
+
+/* The (sub)page is exclusive to a single process. */
+#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
-/* bitflags for do_page_add_anon_rmap() */
-#define RMAP_EXCLUSIVE 0x01
-#define RMAP_COMPOUND 0x02
+/*
+ * The compound page is not mapped via PTEs, but instead via a single PMD and
+ * should be accounted accordingly.
+ */
+#define RMAP_COMPOUND ((__force rmap_t)BIT(1))
/*
* rmap interfaces called when adding or removing pte of page
*/
void page_move_anon_rmap(struct page *, struct vm_area_struct *);
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, bool);
-void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, int);
+ unsigned long address, rmap_t flags);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, bool);
-void page_add_file_rmap(struct page *, bool);
-void page_remove_rmap(struct page *, bool);
+ unsigned long address);
+void page_add_file_rmap(struct page *, struct vm_area_struct *,
+ bool compound);
+void page_remove_rmap(struct page *, struct vm_area_struct *,
+ bool compound);
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long);
+ unsigned long address, rmap_t flags);
void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long);
+ unsigned long address);
-static inline void page_dup_rmap(struct page *page, bool compound)
+static inline void __page_dup_rmap(struct page *page, bool compound)
{
atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
}
+static inline void page_dup_file_rmap(struct page *page, bool compound)
+{
+ __page_dup_rmap(page, compound);
+}
+
+/**
+ * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped
+ * anonymous page
+ * @page: the page to duplicate the mapping for
+ * @compound: the page is mapped as compound or as a small page
+ * @vma: the source vma
+ *
+ * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mapping can only fail if the page may be pinned; device
+ * private pages cannot get pinned and consequently this function cannot fail.
+ *
+ * If duplicating the mapping succeeds, the page has to be mapped R/O into
+ * the parent and the child. It must *not* get mapped writable after this call.
+ *
+ * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ */
+static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
+ struct vm_area_struct *vma)
+{
+ VM_BUG_ON_PAGE(!PageAnon(page), page);
+
+ /*
+ * No need to check+clear for already shared pages, including KSM
+ * pages.
+ */
+ if (!PageAnonExclusive(page))
+ goto dup;
+
+ /*
+ * If this page may have been pinned by the parent process,
+ * don't allow to duplicate the mapping but instead require to e.g.,
+ * copy the page immediately for the child so that we'll always
+ * guarantee the pinned page won't be randomly replaced in the
+ * future on write faults.
+ */
+ if (likely(!is_device_private_page(page) &&
+ unlikely(page_needs_cow_for_dma(vma, page))))
+ return -EBUSY;
+
+ ClearPageAnonExclusive(page);
+ /*
+ * It's okay to share the anon page between both processes, mapping
+ * the page R/O into both processes.
+ */
+dup:
+ __page_dup_rmap(page, compound);
+ return 0;
+}
+
+/**
+ * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly
+ * shared to prepare for KSM or temporary unmapping
+ * @page: the exclusive anonymous page to try marking possibly shared
+ *
+ * The caller needs to hold the PT lock and has to have the page table entry
+ * cleared/invalidated.
+ *
+ * This is similar to page_try_dup_anon_rmap(), however, not used during fork()
+ * to duplicate a mapping, but instead to prepare for KSM or temporarily
+ * unmapping a page (swap, migration) via page_remove_rmap().
+ *
+ * Marking the page shared can only fail if the page may be pinned; device
+ * private pages cannot get pinned and consequently this function cannot fail.
+ *
+ * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY
+ * otherwise.
+ */
+static inline int page_try_share_anon_rmap(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page);
+
+ /* device private pages cannot get pinned via GUP. */
+ if (unlikely(is_device_private_page(page))) {
+ ClearPageAnonExclusive(page);
+ return 0;
+ }
+
+ /*
+ * We have to make sure that when we clear PageAnonExclusive, that
+ * the page is not pinned and that concurrent GUP-fast won't succeed in
+ * concurrently pinning the page.
+ *
+ * Conceptually, PageAnonExclusive clearing consists of:
+ * (A1) Clear PTE
+ * (A2) Check if the page is pinned; back off if so.
+ * (A3) Clear PageAnonExclusive
+ * (A4) Restore PTE (optional, but certainly not writable)
+ *
+ * When clearing PageAnonExclusive, we cannot possibly map the page
+ * writable again, because anon pages that may be shared must never
+ * be writable. So in any case, if the PTE was writable it cannot
+ * be writable anymore afterwards and there would be a PTE change. Only
+ * if the PTE wasn't writable, there might not be a PTE change.
+ *
+ * Conceptually, GUP-fast pinning of an anon page consists of:
+ * (B1) Read the PTE
+ * (B2) FOLL_WRITE: check if the PTE is not writable; back off if so.
+ * (B3) Pin the mapped page
+ * (B4) Check if the PTE changed by re-reading it; back off if so.
+ * (B5) If the original PTE is not writable, check if
+ * PageAnonExclusive is not set; back off if so.
+ *
+ * If the PTE was writable, we only have to make sure that GUP-fast
+ * observes a PTE change and properly backs off.
+ *
+ * If the PTE was not writable, we have to make sure that GUP-fast either
+ * detects a (temporary) PTE change or that PageAnonExclusive is cleared
+ * and properly backs off.
+ *
+ * Consequently, when clearing PageAnonExclusive(), we have to make
+ * sure that (A1), (A2)/(A3) and (A4) happen in the right memory
+ * order. In GUP-fast pinning code, we have to make sure that (B3),(B4)
+ * and (B5) happen in the right memory order.
+ *
+ * We assume that there might not be a memory barrier after
+ * clearing/invalidating the PTE (A1) and before restoring the PTE (A4),
+ * so we use explicit ones here.
+ */
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(page_maybe_dma_pinned(page)))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
+ return 0;
+}
+
/*
* Called from mm/vmscan.c to handle paging out
*/
-int page_referenced(struct page *, int is_locked,
+int folio_referenced(struct folio *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
-bool try_to_unmap(struct page *, enum ttu_flags flags);
+void try_to_migrate(struct folio *folio, enum ttu_flags flags);
+void try_to_unmap(struct folio *, enum ttu_flags flags);
+
+int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct page **pages,
+ void *arg);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
-/* Look for migarion entries rather than present PTEs */
+/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
struct page_vma_mapped_walk {
- struct page *page;
+ unsigned long pfn;
+ unsigned long nr_pages;
+ pgoff_t pgoff;
struct vm_area_struct *vma;
unsigned long address;
pmd_t *pmd;
@@ -212,9 +381,30 @@ struct page_vma_mapped_walk {
unsigned int flags;
};
+#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \
+ struct page_vma_mapped_walk name = { \
+ .pfn = page_to_pfn(_page), \
+ .nr_pages = compound_nr(_page), \
+ .pgoff = page_to_pgoff(_page), \
+ .vma = _vma, \
+ .address = _address, \
+ .flags = _flags, \
+ }
+
+#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \
+ struct page_vma_mapped_walk name = { \
+ .pfn = folio_pfn(_folio), \
+ .nr_pages = folio_nr_pages(_folio), \
+ .pgoff = folio_pgoff(_folio), \
+ .vma = _vma, \
+ .address = _address, \
+ .flags = _flags, \
+ }
+
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
- if (pvmw->pte)
+ /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
+ if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma))
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);
@@ -233,27 +423,21 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*
* returns the number of cleaned PTEs.
*/
-int page_mkclean(struct page *);
+int folio_mkclean(struct folio *);
-/*
- * called in munlock()/munmap() path to check for other vmas holding
- * the page mlocked.
- */
-void try_to_munlock(struct page *);
+int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
+ struct vm_area_struct *vma);
-void remove_migration_ptes(struct page *old, struct page *new, bool locked);
+void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
-/*
- * Called by memory-failure.c to kill processes.
- */
-struct anon_vma *page_lock_anon_vma_read(struct page *page);
-void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
/*
* rmap_walk_control: To control rmap traversing for specific needs
*
* arg: passed to rmap_one() and invalid_vma()
+ * try_lock: bail out if the rmap lock is contended
+ * contended: indicate the rmap traversal bailed out due to lock contention
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
* anon_lock: for getting anon_lock by optimized way rather than default
@@ -261,19 +445,24 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
*/
struct rmap_walk_control {
void *arg;
+ bool try_lock;
+ bool contended;
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
- bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
- int (*done)(struct page *page);
- struct anon_vma *(*anon_lock)(struct page *page);
+ int (*done)(struct folio *folio);
+ struct anon_vma *(*anon_lock)(struct folio *folio,
+ struct rmap_walk_control *rwc);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
-void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
-void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
+void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
+struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
+ struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
@@ -281,7 +470,7 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0)
-static inline int page_referenced(struct page *page, int is_locked,
+static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
@@ -289,14 +478,18 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
-#define try_to_unmap(page, refs) false
+static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags)
+{
+}
-static inline int page_mkclean(struct page *page)
+static inline int folio_mkclean(struct folio *folio)
{
return 0;
}
-
-
#endif /* CONFIG_MMU */
+static inline int page_mkclean(struct page *page)
+{
+ return folio_mkclean(page_folio(page));
+}
#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index 7b22366d0065..ab7eea01ab42 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -102,15 +102,16 @@ struct rmi_2d_sensor_platform_data {
};
/**
- * struct rmi_f30_data - overrides defaults for a single F30 GPIOs/LED chip.
+ * struct rmi_gpio_data - overrides defaults for a single F30/F3A GPIOs/LED
+ * chip.
* @buttonpad - the touchpad is a buttonpad, so enable only the first actual
* button that is found.
- * @trackstick_buttons - Set when the function 30 is handling the physical
+ * @trackstick_buttons - Set when the function 30 or 3a is handling the physical
* buttons of the trackstick (as a PS/2 passthrough device).
- * @disable - the touchpad incorrectly reports F30 and it should be ignored.
+ * @disable - the touchpad incorrectly reports F30/F3A and it should be ignored.
* This is a special case which is due to misconfigured firmware.
*/
-struct rmi_f30_data {
+struct rmi_gpio_data {
bool buttonpad;
bool trackstick_buttons;
bool disable;
@@ -206,7 +207,7 @@ struct rmi_device_platform_data_spi {
*
* @reset_delay_ms - after issuing a reset command to the touch sensor, the
* driver waits a few milliseconds to give the firmware a chance to
- * to re-initialize. You can override the default wait period here.
+ * re-initialize. You can override the default wait period here.
* @irq: irq associated with the attn gpio line, or negative
*/
struct rmi_device_platform_data {
@@ -218,7 +219,7 @@ struct rmi_device_platform_data {
/* function handler pdata */
struct rmi_2d_sensor_platform_data sensor_pdata;
struct rmi_f01_power_management power_management;
- struct rmi_f30_data f30_data;
+ struct rmi_gpio_data gpio_data;
};
/**
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 9fe156d1c018..523c98b96cb4 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -17,8 +17,8 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/poll.h>
-
-#define RPMSG_ADDR_ANY 0xFFFFFFFF
+#include <linux/rpmsg/byteorder.h>
+#include <uapi/linux/rpmsg.h>
struct rpmsg_device;
struct rpmsg_endpoint;
@@ -41,20 +41,24 @@ struct rpmsg_channel_info {
* rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
* @src: local address
* @dst: destination address
* @ept: the rpmsg endpoint of this channel
* @announce: if set, rpmsg will announce the creation/removal of this channel
+ * @little_endian: True if transport is using little endian byte representation
*/
struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
- char *driver_override;
+ const char *driver_override;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
bool announce;
+ bool little_endian;
const struct rpmsg_device_ops *ops;
};
@@ -111,10 +115,61 @@ struct rpmsg_driver {
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
};
+static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val)
+{
+ if (!rpdev)
+ return __rpmsg16_to_cpu(rpmsg_is_little_endian(), val);
+ else
+ return __rpmsg16_to_cpu(rpdev->little_endian, val);
+}
+
+static inline __rpmsg16 cpu_to_rpmsg16(struct rpmsg_device *rpdev, u16 val)
+{
+ if (!rpdev)
+ return __cpu_to_rpmsg16(rpmsg_is_little_endian(), val);
+ else
+ return __cpu_to_rpmsg16(rpdev->little_endian, val);
+}
+
+static inline u32 rpmsg32_to_cpu(struct rpmsg_device *rpdev, __rpmsg32 val)
+{
+ if (!rpdev)
+ return __rpmsg32_to_cpu(rpmsg_is_little_endian(), val);
+ else
+ return __rpmsg32_to_cpu(rpdev->little_endian, val);
+}
+
+static inline __rpmsg32 cpu_to_rpmsg32(struct rpmsg_device *rpdev, u32 val)
+{
+ if (!rpdev)
+ return __cpu_to_rpmsg32(rpmsg_is_little_endian(), val);
+ else
+ return __cpu_to_rpmsg32(rpdev->little_endian, val);
+}
+
+static inline u64 rpmsg64_to_cpu(struct rpmsg_device *rpdev, __rpmsg64 val)
+{
+ if (!rpdev)
+ return __rpmsg64_to_cpu(rpmsg_is_little_endian(), val);
+ else
+ return __rpmsg64_to_cpu(rpdev->little_endian, val);
+}
+
+static inline __rpmsg64 cpu_to_rpmsg64(struct rpmsg_device *rpdev, u64 val)
+{
+ if (!rpdev)
+ return __cpu_to_rpmsg64(rpmsg_is_little_endian(), val);
+ else
+ return __cpu_to_rpmsg64(rpdev->little_endian, val);
+}
+
#if IS_ENABLED(CONFIG_RPMSG)
-int register_rpmsg_device(struct rpmsg_device *dev);
-void unregister_rpmsg_device(struct rpmsg_device *dev);
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override);
+int rpmsg_register_device(struct rpmsg_device *rpdev);
+int rpmsg_unregister_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo);
int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
void unregister_rpmsg_driver(struct rpmsg_driver *drv);
void rpmsg_destroy_ept(struct rpmsg_endpoint *);
@@ -135,17 +190,28 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
+ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
+
#else
-static inline int register_rpmsg_device(struct rpmsg_device *dev)
+static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override)
+{
+ return -ENXIO;
+}
+
+static inline int rpmsg_register_device(struct rpmsg_device *rpdev)
{
return -ENXIO;
}
-static inline void unregister_rpmsg_device(struct rpmsg_device *dev)
+static inline int rpmsg_unregister_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo)
{
/* This shouldn't be possible */
WARN_ON(1);
+
+ return -ENXIO;
}
static inline int __register_rpmsg_driver(struct rpmsg_driver *drv,
@@ -177,7 +243,7 @@ static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev
/* This shouldn't be possible */
WARN_ON(1);
- return ERR_PTR(-ENXIO);
+ return NULL;
}
static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
@@ -242,6 +308,14 @@ static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
return 0;
}
+static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
#endif /* IS_ENABLED(CONFIG_RPMSG) */
/* use a macro to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/rpmsg/byteorder.h b/include/linux/rpmsg/byteorder.h
new file mode 100644
index 000000000000..c0f565dbad6d
--- /dev/null
+++ b/include/linux/rpmsg/byteorder.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Follows implementation found in linux/virtio_byteorder.h
+ */
+#ifndef _LINUX_RPMSG_BYTEORDER_H
+#define _LINUX_RPMSG_BYTEORDER_H
+#include <linux/types.h>
+#include <uapi/linux/rpmsg_types.h>
+
+static inline bool rpmsg_is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN
+ return true;
+#else
+ return false;
+#endif
+}
+
+static inline u16 __rpmsg16_to_cpu(bool little_endian, __rpmsg16 val)
+{
+ if (little_endian)
+ return le16_to_cpu((__force __le16)val);
+ else
+ return be16_to_cpu((__force __be16)val);
+}
+
+static inline __rpmsg16 __cpu_to_rpmsg16(bool little_endian, u16 val)
+{
+ if (little_endian)
+ return (__force __rpmsg16)cpu_to_le16(val);
+ else
+ return (__force __rpmsg16)cpu_to_be16(val);
+}
+
+static inline u32 __rpmsg32_to_cpu(bool little_endian, __rpmsg32 val)
+{
+ if (little_endian)
+ return le32_to_cpu((__force __le32)val);
+ else
+ return be32_to_cpu((__force __be32)val);
+}
+
+static inline __rpmsg32 __cpu_to_rpmsg32(bool little_endian, u32 val)
+{
+ if (little_endian)
+ return (__force __rpmsg32)cpu_to_le32(val);
+ else
+ return (__force __rpmsg32)cpu_to_be32(val);
+}
+
+static inline u64 __rpmsg64_to_cpu(bool little_endian, __rpmsg64 val)
+{
+ if (little_endian)
+ return le64_to_cpu((__force __le64)val);
+ else
+ return be64_to_cpu((__force __be64)val);
+}
+
+static inline __rpmsg64 __cpu_to_rpmsg64(bool little_endian, u64 val)
+{
+ if (little_endian)
+ return (__force __rpmsg64)cpu_to_le64(val);
+ else
+ return (__force __rpmsg64)cpu_to_be64(val);
+}
+
+#endif /* _LINUX_RPMSG_BYTEORDER_H */
diff --git a/include/linux/rpmsg/ns.h b/include/linux/rpmsg/ns.h
new file mode 100644
index 000000000000..a7804edd6d58
--- /dev/null
+++ b/include/linux/rpmsg/ns.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_RPMSG_NS_H
+#define _LINUX_RPMSG_NS_H
+
+#include <linux/mod_devicetable.h>
+#include <linux/rpmsg.h>
+#include <linux/rpmsg/byteorder.h>
+#include <linux/types.h>
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ * @flags: indicates whether service is created or destroyed
+ *
+ * This message is sent across to publish a new service, or announce
+ * about its removal. When we receive these messages, an appropriate
+ * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
+ * or ->remove() handler of the appropriate rpmsg driver will be invoked
+ * (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+ char name[RPMSG_NAME_SIZE];
+ __rpmsg32 addr;
+ __rpmsg32 flags;
+} __packed;
+
+/**
+ * enum rpmsg_ns_flags - dynamic name service announcement flags
+ *
+ * @RPMSG_NS_CREATE: a new remote service was just created
+ * @RPMSG_NS_DESTROY: a known remote service was just destroyed
+ */
+enum rpmsg_ns_flags {
+ RPMSG_NS_CREATE = 0,
+ RPMSG_NS_DESTROY = 1,
+};
+
+/* Address 53 is reserved for advertising remote services */
+#define RPMSG_NS_ADDR (53)
+
+int rpmsg_ns_register_device(struct rpmsg_device *rpdev);
+
+#endif
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
index 96e26d94719f..22fc3a69b683 100644
--- a/include/linux/rpmsg/qcom_glink.h
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -7,6 +7,12 @@
struct qcom_glink;
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK)
+void qcom_glink_ssr_notify(const char *ssr_name);
+#else
+static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
+#endif
+
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
struct qcom_glink *qcom_glink_smem_register(struct device *parent,
@@ -23,7 +29,6 @@ qcom_glink_smem_register(struct device *parent,
}
static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
-
#endif
#endif
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
index 5974cedd008c..238bb85243d3 100644
--- a/include/linux/rslib.h
+++ b/include/linux/rslib.h
@@ -54,7 +54,7 @@ struct rs_codec {
*/
struct rs_control {
struct rs_codec *codec;
- uint16_t buffers[0];
+ uint16_t buffers[];
};
/* General purpose RS codec, 8-bit data width, symbol width 1-15 bit */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 23990bd29040..1fd9c6a21ebe 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -34,18 +34,6 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs);
}
-static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
-{
- rtc_time64_to_tm(time, tm);
-}
-
-static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
-{
- *time = rtc_tm_to_time64(tm);
-
- return 0;
-}
-
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/cdev.h>
@@ -67,10 +55,6 @@ extern struct class *rtc_class;
*
* The (current) exceptions are mostly filesystem hooks:
* - the proc() hook for procfs
- * - non-ioctl() chardev hooks: open(), release()
- *
- * REVISIT those periodic irq calls *do* have ops_lock when they're
- * issued through ioctl() ...
*/
struct rtc_class_ops {
int (*ioctl)(struct device *, unsigned int, unsigned long);
@@ -82,6 +66,8 @@ struct rtc_class_ops {
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
int (*read_offset)(struct device *, long *offset);
int (*set_offset)(struct device *, long offset);
+ int (*param_get)(struct device *, struct rtc_param *param);
+ int (*param_set)(struct device *, struct rtc_param *param);
};
struct rtc_device;
@@ -96,6 +82,7 @@ struct rtc_timer {
/* flags */
#define RTC_DEV_BUSY 0
+#define RTC_NO_CDEV 1
struct rtc_device {
struct device dev;
@@ -123,22 +110,39 @@ struct rtc_device {
struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
int pie_enabled;
struct work_struct irqwork;
- /* Some hardware can't support UIE mode */
- int uie_unsupported;
-
- /* Number of nsec it takes to set the RTC clock. This influences when
- * the set ops are called. An offset:
- * - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s
- * - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s
- * - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s
- */
- long set_offset_nsec;
- bool registered;
+ /*
+ * This offset specifies the update timing of the RTC.
+ *
+ * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds
+ *
+ * The offset defines how tsched is computed so that the write to
+ * the RTC (t2.tv_sec - 1sec) is correct versus the time required
+ * for the transport of the write and the time which the RTC needs
+ * to increment seconds the first time after the write (t2).
+ *
+ * For direct accessible RTCs tsched ~= t1 because the write time
+ * is negligible. For RTCs behind slow busses the transport time is
+ * significant and has to be taken into account.
+ *
+ * The time between the write (t1) and the first increment after
+ * the write (t2) is RTC specific. For a MC146818 RTC it's 500ms,
+ * for many others it's exactly 1 second. Consult the datasheet.
+ *
+ * The value of this offset is also used to calculate the to be
+ * written value (t2.tv_sec - 1sec) at tsched.
+ *
+ * The default value for this is NSEC_PER_SEC + 10 msec default
+ * transport time. The offset can be adjusted by drivers so the
+ * calculation for the to be written value at tsched becomes
+ * correct:
+ *
+ * newval = tsched + set_offset_nsec - NSEC_PER_SEC
+ * and (tsched + set_offset_nsec) % NSEC_PER_SEC == 0
+ */
+ unsigned long set_offset_nsec;
- /* Old ABI support */
- bool nvram_old_abi;
- struct bin_attribute *nvram;
+ unsigned long features[BITS_TO_LONGS(RTC_FEATURE_CNT)];
time64_t range_min;
timeu64_t range_max;
@@ -177,11 +181,10 @@ extern struct rtc_device *devm_rtc_device_register(struct device *dev,
const struct rtc_class_ops *ops,
struct module *owner);
struct rtc_device *devm_rtc_allocate_device(struct device *dev);
-int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
+int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc);
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
-extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec);
int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
extern int rtc_read_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
@@ -221,41 +224,8 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
-/* Determine if we can call to driver to set the time. Drivers can only be
- * called to set a second aligned time value, and the field set_offset_nsec
- * specifies how far away from the second aligned time to call the driver.
- *
- * This also computes 'to_set' which is the time we are trying to set, and has
- * a zero in tv_nsecs, such that:
- * to_set - set_delay_nsec == now +/- FUZZ
- *
- */
-static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec,
- struct timespec64 *to_set,
- const struct timespec64 *now)
-{
- /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
- const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
- struct timespec64 delay = {.tv_sec = 0,
- .tv_nsec = set_offset_nsec};
-
- *to_set = timespec64_add(*now, delay);
-
- if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
- to_set->tv_nsec = 0;
- return true;
- }
-
- if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
- to_set->tv_sec++;
- to_set->tv_nsec = 0;
- return true;
- }
- return false;
-}
-
-#define rtc_register_device(device) \
- __rtc_register_device(THIS_MODULE, device)
+#define devm_rtc_register_device(device) \
+ __devm_rtc_register_device(THIS_MODULE, device)
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
extern int rtc_hctosys_ret;
@@ -264,16 +234,14 @@ extern int rtc_hctosys_ret;
#endif
#ifdef CONFIG_RTC_NVMEM
-int rtc_nvmem_register(struct rtc_device *rtc,
- struct nvmem_config *nvmem_config);
-void rtc_nvmem_unregister(struct rtc_device *rtc);
+int devm_rtc_nvmem_register(struct rtc_device *rtc,
+ struct nvmem_config *nvmem_config);
#else
-static inline int rtc_nvmem_register(struct rtc_device *rtc,
- struct nvmem_config *nvmem_config)
+static inline int devm_rtc_nvmem_register(struct rtc_device *rtc,
+ struct nvmem_config *nvmem_config)
{
return 0;
}
-static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {}
#endif
#ifdef CONFIG_RTC_INTF_SYSFS
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
index 67ee9d20cc5a..5a41c3bbcbe3 100644
--- a/include/linux/rtc/ds1685.h
+++ b/include/linux/rtc/ds1685.h
@@ -46,7 +46,6 @@ struct ds1685_priv {
u32 regstep;
int irq_num;
bool bcd_mode;
- bool no_irq;
u8 (*read)(struct ds1685_priv *, int);
void (*write)(struct ds1685_priv *, int, u8);
void (*prepare_poweroff)(void);
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
deleted file mode 100644
index b31f2856733d..000000000000
--- a/include/linux/rtc/sirfsoc_rtciobrg.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * RTC I/O Bridge interfaces for CSR SiRFprimaII
- * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-#ifndef _SIRFSOC_RTC_IOBRG_H_
-#define _SIRFSOC_RTC_IOBRG_H_
-
-struct regmap_config;
-
-extern void sirfsoc_rtc_iobrg_besyncing(void);
-
-extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
-
-extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
-struct regmap *devm_regmap_init_iobg(struct device *dev,
- const struct regmap_config *config);
-
-#endif
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 6fd615a0eea9..7d049883a08a 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -13,12 +13,39 @@
#ifndef __LINUX_RT_MUTEX_H
#define __LINUX_RT_MUTEX_H
+#include <linux/compiler.h>
#include <linux/linkage.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock_types.h>
+#include <linux/rbtree_types.h>
+#include <linux/spinlock_types_raw.h>
extern int max_lock_depth; /* for sysctl */
+struct rt_mutex_base {
+ raw_spinlock_t wait_lock;
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
+};
+
+#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \
+{ \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \
+ .waiters = RB_ROOT_CACHED, \
+ .owner = NULL \
+}
+
+/**
+ * rt_mutex_base_is_locked - is the rtmutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
+{
+ return READ_ONCE(lock->owner) != NULL;
+}
+
+extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
+
/**
* The rt_mutex structure
*
@@ -28,15 +55,7 @@ extern int max_lock_depth; /* for sysctl */
* @owner: the mutex owner
*/
struct rt_mutex {
- raw_spinlock_t wait_lock;
- struct rb_root_cached waiters;
- struct task_struct *owner;
-#ifdef CONFIG_DEBUG_RT_MUTEXES
- int save_state;
- const char *name, *file;
- int line;
- void *magic;
-#endif
+ struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
@@ -46,78 +65,56 @@ struct rt_mutex_waiter;
struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
- extern int rt_mutex_debug_check_no_locks_freed(const void *from,
- unsigned long len);
- extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
+extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
- static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
- unsigned long len)
- {
- return 0;
- }
-# define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { }
#endif
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- , .name = #mutexname, .file = __FILE__, .line = __LINE__
-
-# define rt_mutex_init(mutex) \
+#define rt_mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
__rt_mutex_init(mutex, __func__, &__key); \
} while (0)
- extern void rt_mutex_debug_task_free(struct task_struct *tsk);
-#else
-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
-# define rt_mutex_debug_task_free(t) do { } while (0)
-#endif
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
- , .dep_map = { .name = #mutexname }
+#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+ .dep_map = { \
+ .name = #mutexname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
#else
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
#endif
-#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .waiters = RB_ROOT_CACHED \
- , .owner = NULL \
- __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+{ \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \
+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+}
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
-/**
- * rt_mutex_is_locked - is the mutex locked
- * @lock: the mutex to be queried
- *
- * Returns 1 if the mutex is locked, 0 if unlocked.
- */
-static inline int rt_mutex_is_locked(struct rt_mutex *lock)
-{
- return lock->owner != NULL;
-}
-
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
-extern void rt_mutex_destroy(struct rt_mutex *lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
+extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
+#define rt_mutex_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
+
#else
extern void rt_mutex_lock(struct rt_mutex *lock);
#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
+#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
#endif
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
-extern int rt_mutex_timed_lock(struct rt_mutex *lock,
- struct hrtimer_sleeper *timeout);
-
+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index bb9cb84114c1..ae2c6a3cec5d 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -100,6 +100,7 @@ void net_dec_ingress_queue(void);
#ifdef CONFIG_NET_EGRESS
void net_inc_egress_queue(void);
void net_dec_egress_queue(void);
+void netdev_xmit_skip_txqueue(bool skip);
#endif
void rtnetlink_init(void);
@@ -134,4 +135,7 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
int (*vlan_fill)(struct sk_buff *skb,
struct net_device *dev,
u32 filter_mask));
+
+extern void rtnl_offload_xstats_notify(struct net_device *dev);
+
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 65b8142a7fed..534038d962e4 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -82,6 +82,7 @@
#define MS_OC_INT_EN (1 << 23)
#define SD_OC_INT_EN (1 << 22)
+#define RTSX_DUM_REG 0x1C
/*
* macros for easy use
@@ -99,18 +100,6 @@
#define rtsx_pci_readb(pcr, reg) \
ioread8((pcr)->remap_addr + reg)
-#define rtsx_pci_read_config_byte(pcr, where, val) \
- pci_read_config_byte((pcr)->pci, where, val)
-
-#define rtsx_pci_write_config_byte(pcr, where, val) \
- pci_write_config_byte((pcr)->pci, where, val)
-
-#define rtsx_pci_read_config_dword(pcr, where, val) \
- pci_read_config_dword((pcr)->pci, where, val)
-
-#define rtsx_pci_write_config_dword(pcr, where, val) \
- pci_write_config_dword((pcr)->pci, where, val)
-
#define STATE_TRANS_NONE 0
#define STATE_TRANS_CMD 1
#define STATE_TRANS_BUF 2
@@ -305,6 +294,8 @@
#define SD30_CLK_STOP_CFG0 0x01
#define REG_PRE_RW_MODE 0xFD70
#define EN_INFINITE_MODE 0x01
+#define REG_CRC_DUMMY_0 0xFD71
+#define CFG_SD_POW_AUTO_PD (1<<0)
#define SRCTL 0xFC13
@@ -599,6 +590,7 @@
#define ASPM_FORCE_CTL 0xFE57
#define FORCE_ASPM_CTL0 0x10
+#define FORCE_ASPM_CTL1 0x20
#define FORCE_ASPM_VAL_MASK 0x03
#define FORCE_ASPM_L1_EN 0x02
#define FORCE_ASPM_L0_EN 0x01
@@ -667,6 +659,28 @@
#define PM_WAKE_EN 0x01
#define PM_CTRL4 0xFF47
+/* FW config info register */
+#define RTS5261_FW_CFG_INFO0 0xFF50
+#define RTS5261_FW_EXPRESS_TEST_MASK (0x01 << 0)
+#define RTS5261_FW_EA_MODE_MASK (0x01 << 5)
+#define RTS5261_FW_CFG0 0xFF54
+#define RTS5261_FW_ENTER_EXPRESS (0x01 << 0)
+
+#define RTS5261_FW_CFG1 0xFF55
+#define RTS5261_SYS_CLK_SEL_MCU_CLK (0x01 << 7)
+#define RTS5261_CRC_CLK_SEL_MCU_CLK (0x01 << 6)
+#define RTS5261_FAKE_MCU_CLOCK_GATING (0x01 << 5)
+#define RTS5261_MCU_BUS_SEL_MASK (0x01 << 4)
+#define RTS5261_MCU_CLOCK_SEL_MASK (0x03 << 2)
+#define RTS5261_MCU_CLOCK_SEL_16M (0x01 << 2)
+#define RTS5261_MCU_CLOCK_GATING (0x01 << 1)
+#define RTS5261_DRIVER_ENABLE_FW (0x01 << 0)
+
+#define REG_CFG_OOBS_OFF_TIMER 0xFEA6
+#define REG_CFG_OOBS_ON_TIMER 0xFEA7
+#define REG_CFG_VCM_ON_TIMER 0xFEA8
+#define REG_CFG_OOBS_POLLING 0xFEA9
+
/* Memory mapping */
#define SRAM_BASE 0xE600
#define RBUF_BASE 0xF400
@@ -705,6 +719,13 @@
#define RTS5260_DVCC_TUNE_MASK 0x70
#define RTS5260_DVCC_33 0x70
+/*RTS5261*/
+#define RTS5261_LDO1_CFG0 0xFF72
+#define RTS5261_LDO1_OCP_THD_MASK (0x07 << 5)
+#define RTS5261_LDO1_OCP_EN (0x01 << 4)
+#define RTS5261_LDO1_OCP_LMT_THD_MASK (0x03 << 2)
+#define RTS5261_LDO1_OCP_LMT_EN (0x01 << 1)
+
#define LDO_VCC_CFG1 0xFF73
#define LDO_VCC_REF_TUNE_MASK 0x30
#define LDO_VCC_REF_1V2 0x20
@@ -745,6 +766,8 @@
#define RTS5260_AUTOLOAD_CFG4 0xFF7F
#define RTS5260_MIMO_DISABLE 0x8A
+/*RTS5261*/
+#define RTS5261_AUX_CLK_16M_EN (1 << 5)
#define RTS5260_REG_GPIO_CTL0 0xFC1A
#define RTS5260_REG_GPIO_MASK 0x01
@@ -1041,13 +1064,12 @@
#define PHY_DIG1E_RX_EN_KEEP 0x0001
#define PHY_DUM_REG 0x1F
-#define PCR_ASPM_SETTING_REG1 0x160
-#define PCR_ASPM_SETTING_REG2 0x168
-#define PCR_ASPM_SETTING_5260 0x178
-
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
+#define PCR_SETTING_REG4 0x818
+#define PCR_SETTING_REG5 0x81C
+
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
@@ -1076,15 +1098,11 @@ struct pcr_ops {
unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr);
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
- void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+ void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state, bool runtime);
void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
- int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
- int (*set_l1off_sub)(struct rtsx_pcr *pcr, u8 val);
void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
- void (*full_on)(struct rtsx_pcr *pcr);
- void (*power_saving)(struct rtsx_pcr *pcr);
void (*enable_ocp)(struct rtsx_pcr *pcr);
void (*disable_ocp)(struct rtsx_pcr *pcr);
void (*init_ocp)(struct rtsx_pcr *pcr);
@@ -1094,11 +1112,7 @@ struct pcr_ops {
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
-
-#define ASPM_L1_1_EN_MASK BIT(3)
-#define ASPM_L1_2_EN_MASK BIT(2)
-#define PM_L1_1_EN_MASK BIT(1)
-#define PM_L1_2_EN_MASK BIT(0)
+enum ASPM_MODE {ASPM_MODE_CFG, ASPM_MODE_REG};
#define ASPM_L1_1_EN BIT(0)
#define ASPM_L1_2_EN BIT(1)
@@ -1108,13 +1122,6 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
#define L1_SNOOZE_TEST_EN BIT(5)
#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6)
-enum dev_aspm_mode {
- DEV_ASPM_DYNAMIC,
- DEV_ASPM_BACKDOOR,
- DEV_ASPM_STATIC,
- DEV_ASPM_DISABLE,
-};
-
/*
* struct rtsx_cr_option - card reader option
* @dev_flags: device flags
@@ -1125,7 +1132,6 @@ enum dev_aspm_mode {
* @ltr_active_latency: ltr mode active latency
* @ltr_idle_latency: ltr mode idle latency
* @ltr_l1off_latency: ltr mode l1off latency
- * @dev_aspm_mode: device aspm mode
* @l1_snooze_delay: l1 snooze delay
* @ltr_l1off_sspwrgate: ltr l1off sspwrgate
* @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
@@ -1142,7 +1148,6 @@ struct rtsx_cr_option {
u32 ltr_active_latency;
u32 ltr_idle_latency;
u32 ltr_l1off_latency;
- enum dev_aspm_mode dev_aspm_mode;
u32 l1_snooze_delay;
u8 ltr_l1off_sspwrgate;
u8 ltr_l1off_snooze_sspwrgate;
@@ -1171,7 +1176,6 @@ struct rtsx_hw_param {
struct rtsx_pcr {
struct pci_dev *pci;
unsigned int id;
- int pcie_cap;
struct rtsx_cr_option option;
struct rtsx_hw_param hw_param;
@@ -1200,7 +1204,6 @@ struct rtsx_pcr {
unsigned int card_exist;
struct delayed_work carddet_work;
- struct delayed_work idle_work;
spinlock_t lock;
struct mutex pcr_mutex;
@@ -1217,6 +1220,8 @@ struct rtsx_pcr {
#define EXTRA_CAPS_MMC_HSDDR (1 << 3)
#define EXTRA_CAPS_MMC_HS200 (1 << 4)
#define EXTRA_CAPS_MMC_8BIT (1 << 5)
+#define EXTRA_CAPS_NO_MMC (1 << 7)
+#define EXTRA_CAPS_SD_EXPRESS (1 << 8)
u32 extra_caps;
#define IC_VER_A 0
@@ -1230,6 +1235,7 @@ struct rtsx_pcr {
u8 card_drive_sel;
#define ASPM_L1_EN 0x02
u8 aspm_en;
+ enum ASPM_MODE aspm_mode;
bool aspm_enabled;
#define PCR_MS_PMOS (1 << 0)
@@ -1255,6 +1261,7 @@ struct rtsx_pcr {
u8 dma_error_count;
u8 ocp_stat;
u8 ocp_stat2;
+ u8 rtd3_en;
};
#define PID_524A 0x524A
@@ -1263,12 +1270,15 @@ struct rtsx_pcr {
#define PID_525A 0x525A
#define PID_5260 0x5260
#define PID_5261 0x5261
+#define PID_5228 0x5228
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
#define PCI_PID(pcr) ((pcr)->pci->device)
#define is_version(pcr, pid, ver) \
(CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver))
+#define is_version_higher_than(pcr, pid, ver) \
+ (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version > (ver))
#define pcr_dbg(pcr, fmt, arg...) \
dev_dbg(&(pcr)->pci->dev, fmt, ##arg)
@@ -1320,18 +1330,6 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
return (u8 *)(pcr->host_cmds_ptr);
}
-static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
- u8 mask, u8 append)
-{
- int err;
- u8 val;
-
- err = pci_read_config_byte(pcr->pci, addr, &val);
- if (err < 0)
- return err;
- return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
-}
-
static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
{
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24);
diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h
index 159729cffd8e..3247ed8e9ff0 100644
--- a/include/linux/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
@@ -54,8 +54,6 @@ struct rtsx_ucr {
struct usb_device *pusb_dev;
struct usb_interface *pusb_intf;
struct usb_sg_request current_sg;
- unsigned char *iobuf;
- dma_addr_t iobuf_dma;
struct timer_list sg_timer;
struct mutex dev_mutex;
diff --git a/include/linux/rv.h b/include/linux/rv.h
new file mode 100644
index 000000000000..8883b41d88ec
--- /dev/null
+++ b/include/linux/rv.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Runtime Verification.
+ *
+ * For futher information, see: kernel/trace/rv/rv.c.
+ */
+#ifndef _LINUX_RV_H
+#define _LINUX_RV_H
+
+#define MAX_DA_NAME_LEN 24
+
+#ifdef CONFIG_RV
+/*
+ * Deterministic automaton per-object variables.
+ */
+struct da_monitor {
+ bool monitoring;
+ unsigned int curr_state;
+};
+
+/*
+ * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS.
+ * If we find justification for more monitors, we can think about
+ * adding more or developing a dynamic method. So far, none of
+ * these are justified.
+ */
+#define RV_PER_TASK_MONITORS 1
+#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS)
+
+/*
+ * Futher monitor types are expected, so make this a union.
+ */
+union rv_task_monitor {
+ struct da_monitor da_mon;
+};
+
+#ifdef CONFIG_RV_REACTORS
+struct rv_reactor {
+ const char *name;
+ const char *description;
+ void (*react)(char *msg);
+};
+#endif
+
+struct rv_monitor {
+ const char *name;
+ const char *description;
+ bool enabled;
+ int (*enable)(void);
+ void (*disable)(void);
+ void (*reset)(void);
+#ifdef CONFIG_RV_REACTORS
+ void (*react)(char *msg);
+#endif
+};
+
+bool rv_monitoring_on(void);
+int rv_unregister_monitor(struct rv_monitor *monitor);
+int rv_register_monitor(struct rv_monitor *monitor);
+int rv_get_task_monitor_slot(void);
+void rv_put_task_monitor_slot(int slot);
+
+#ifdef CONFIG_RV_REACTORS
+bool rv_reacting_on(void);
+int rv_unregister_reactor(struct rv_reactor *reactor);
+int rv_register_reactor(struct rv_reactor *reactor);
+#endif /* CONFIG_RV_REACTORS */
+
+#endif /* CONFIG_RV */
+#endif /* _LINUX_RV_H */
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
new file mode 100644
index 000000000000..1d264dd08625
--- /dev/null
+++ b/include/linux/rwbase_rt.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef _LINUX_RWBASE_RT_H
+#define _LINUX_RWBASE_RT_H
+
+#include <linux/rtmutex.h>
+#include <linux/atomic.h>
+
+#define READER_BIAS (1U << 31)
+#define WRITER_BIAS (1U << 30)
+
+struct rwbase_rt {
+ atomic_t readers;
+ struct rt_mutex_base rtmutex;
+};
+
+#define __RWBASE_INITIALIZER(name) \
+{ \
+ .readers = ATOMIC_INIT(READER_BIAS), \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex), \
+}
+
+#define init_rwbase_rt(rwbase) \
+ do { \
+ rt_mutex_base_init(&(rwbase)->rtmutex); \
+ atomic_set(&(rwbase)->readers, READER_BIAS); \
+ } while (0)
+
+
+static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) != READER_BIAS;
+}
+
+static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) > 0;
+}
+
+#endif /* _LINUX_RWBASE_RT_H */
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 3dcd617e65ae..c0ef596f340b 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_RWLOCK_H
#define __LINUX_RWLOCK_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -30,31 +30,16 @@ do { \
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
extern int do_raw_read_trylock(rwlock_t *lock);
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
-
-#ifndef arch_read_lock_flags
-# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#endif
-
-#ifndef arch_write_lock_flags
-# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#endif
-
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_lock_flags(lock, flags) \
- do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_write_lock_flags(lock, flags) \
- do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
@@ -70,6 +55,12 @@ do { \
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define write_lock_nested(lock, subclass) _raw_write_lock_nested(lock, subclass)
+#else
+#define write_lock_nested(lock, subclass) _raw_write_lock(lock)
+#endif
+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define read_lock_irqsave(lock, flags) \
@@ -128,4 +119,11 @@ do { \
1 : ({ local_irq_restore(flags); 0; }); \
})
+#ifdef arch_rwlock_is_contended
+#define rwlock_is_contended(lock) \
+ arch_rwlock_is_contended(&(lock)->raw_lock)
+#else
+#define rwlock_is_contended(lock) ((void)(lock), 0)
+#endif /* arch_rwlock_is_contended */
+
#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index abfb53ab11be..dceb0a59b692 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -17,6 +17,7 @@
void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
@@ -157,8 +158,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
- do_raw_read_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
return flags;
}
@@ -184,8 +184,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
- do_raw_write_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
return flags;
}
@@ -211,6 +210,13 @@ static inline void __raw_write_lock(rwlock_t *lock)
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
+static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
new file mode 100644
index 000000000000..8544ff05e594
--- /dev/null
+++ b/include/linux/rwlock_rt.h
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_RT_H
+#error Do not #include directly. Use <linux/spinlock.h>.
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
+ struct lock_class_key *key);
+#else
+static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name,
+ struct lock_class_key *key)
+{
+}
+#endif
+
+#define rwlock_init(rwl) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ init_rwbase_rt(&(rwl)->rwbase); \
+ __rt_rwlock_init(rwl, #rwl, &__key); \
+} while (0)
+
+extern void rt_read_lock(rwlock_t *rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock);
+extern void rt_write_lock(rwlock_t *rwlock);
+extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
+extern int rt_write_trylock(rwlock_t *rwlock);
+extern void rt_write_unlock(rwlock_t *rwlock);
+
+static __always_inline void read_lock(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_bh(rwlock_t *rwlock)
+{
+ local_bh_disable();
+ rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_irq(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+}
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ rt_read_lock(lock); \
+ flags = 0; \
+ } while (0)
+
+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+
+static __always_inline void read_unlock(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+ local_bh_enable();
+}
+
+static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
+ unsigned long flags)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void write_lock(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
+{
+ rt_write_lock_nested(rwlock, subclass);
+}
+#else
+#define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock)))
+#endif
+
+static __always_inline void write_lock_bh(rwlock_t *rwlock)
+{
+ local_bh_disable();
+ rt_write_lock(rwlock);
+}
+
+static __always_inline void write_lock_irq(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+}
+
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ rt_write_lock(lock); \
+ flags = 0; \
+ } while (0)
+
+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ int __locked; \
+ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __locked = write_trylock(lock); \
+ __locked; \
+})
+
+static __always_inline void write_unlock(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+ local_bh_enable();
+}
+
+static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
+ unsigned long flags)
+{
+ rt_write_unlock(rwlock);
+}
+
+#define rwlock_is_contended(lock) (((void)(lock), 0))
+
+#endif /* __LINUX_RWLOCK_RT_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index 857a72ceb794..1948442e7750 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -1,9 +1,23 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
+# error "Do not include directly, include spinlock_types.h"
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
/*
- * include/linux/rwlock_types.h - generic rwlock type definitions
- * and initializers
+ * generic rwlock type definitions and initializers
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
@@ -21,12 +35,6 @@ typedef struct {
#define RWLOCK_MAGIC 0xdeaf1eed
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
@@ -42,4 +50,29 @@ typedef struct {
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+typedef struct {
+ struct rwbase_rt rwbase;
+ atomic_t readers;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define __RWLOCK_RT_INITIALIZER(name) \
+{ \
+ .rwbase = __RWBASE_INITIALIZER(name), \
+ RW_DEP_MAP_INIT(name) \
+}
+
+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
+
+#define DEFINE_RWLOCK(name) \
+ rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 00d6054687dd..efa5c324369a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -11,11 +11,23 @@
#include <linux/linkage.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/err.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ },
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
#endif
@@ -53,12 +65,6 @@ struct rw_semaphore {
#endif
};
-/*
- * Setting all bits of the owner field except bit 0 will indicate
- * that the rwsem is writer-owned with an unknown owner.
- */
-#define RWSEM_OWNER_UNKNOWN (-2L)
-
/* In all implementations count != 0 means locked */
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
@@ -66,35 +72,29 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
}
#define RWSEM_UNLOCKED_VALUE 0L
-#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
+#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
/* Common initializer macros and functions */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
#ifdef CONFIG_DEBUG_RWSEMS
-# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname
+# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
#else
-# define __DEBUG_RWSEM_INITIALIZER(lockname)
+# define __RWSEM_DEBUG_INIT(lockname)
#endif
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
-#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
+#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
#else
#define __RWSEM_OPT_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \
- { __RWSEM_INIT_COUNT(name), \
+ { __RWSEM_COUNT_INIT(name), \
.owner = ATOMIC_LONG_INIT(0), \
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
__RWSEM_OPT_INIT(name) \
- __DEBUG_RWSEM_INITIALIZER(name) \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ __RWSEM_DEBUG_INIT(name) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
@@ -112,7 +112,7 @@ do { \
/*
* This is the same regardless of which rwsem implementation that is being used.
- * It is just a heuristic meant to be called by somebody alreadying holding the
+ * It is just a heuristic meant to be called by somebody already holding the
* rwsem to see if somebody from an incompatible type is wanting access to the
* lock.
*/
@@ -121,10 +121,58 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
return !list_empty(&sem->wait_list);
}
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+struct rw_semaphore {
+ struct rwbase_rt rwbase;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#define __RWSEM_INITIALIZER(name) \
+ { \
+ .rwbase = __RWBASE_INITIALIZER(name), \
+ __RWSEM_DEP_MAP_INIT(name) \
+ }
+
+#define DECLARE_RWSEM(lockname) \
+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
+
+extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
+static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return rw_base_is_locked(&sem->rwbase);
+}
+
+static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
+{
+ return rw_base_is_contended(&sem->rwbase);
+}
+
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * The functions below are the same for all rwsem implementations including
+ * the RT specific variant.
+ */
+
/*
* lock for reading
*/
extern void down_read(struct rw_semaphore *sem);
+extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
extern int __must_check down_read_killable(struct rw_semaphore *sem);
/*
@@ -173,6 +221,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
* See Documentation/locking/lockdep-design.rst for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
@@ -181,7 +230,7 @@ extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
-} while (0);
+} while (0)
/*
* Take/release a lock when not the owner will release it.
@@ -193,6 +242,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
+# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h
index 833871dcf6fd..57f982c375f8 100644
--- a/include/linux/s3c_adc_battery.h
+++ b/include/linux/s3c_adc_battery.h
@@ -14,9 +14,6 @@ struct s3c_adc_bat_pdata {
void (*enable_charger)(void);
void (*disable_charger)(void);
- int gpio_charge_finished;
- int gpio_inverted;
-
const struct s3c_adc_bat_thresh *lut_noac;
unsigned int lut_noac_cnt;
const struct s3c_adc_bat_thresh *lut_acin;
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index e40d019c3d9d..4d2d5205ab58 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -9,8 +9,17 @@
#ifndef __LINUX_SCALE_BITMAP_H
#define __LINUX_SCALE_BITMAP_H
-#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/minmax.h>
+#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/wait.h>
struct seq_file;
@@ -19,24 +28,14 @@ struct seq_file;
*/
struct sbitmap_word {
/**
- * @depth: Number of bits being used in @word/@cleared
- */
- unsigned long depth;
-
- /**
* @word: word holding free bits
*/
- unsigned long word ____cacheline_aligned_in_smp;
+ unsigned long word;
/**
* @cleared: word holding cleared bits
*/
unsigned long cleared ____cacheline_aligned_in_smp;
-
- /**
- * @swap_lock: Held while swapping word <-> cleared
- */
- spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
@@ -62,9 +61,22 @@ struct sbitmap {
unsigned int map_nr;
/**
+ * @round_robin: Allocate bits in strict round-robin order.
+ */
+ bool round_robin;
+
+ /**
* @map: Allocated bitmap.
*/
struct sbitmap_word *map;
+
+ /*
+ * @alloc_hint: Cache of last successfully allocated or freed bit.
+ *
+ * This is per-cpu, which allows multiple users to stick to different
+ * cachelines until the map is exhausted.
+ */
+ unsigned int __percpu *alloc_hint;
};
#define SBQ_WAIT_QUEUES 8
@@ -100,14 +112,6 @@ struct sbitmap_queue {
*/
struct sbitmap sb;
- /*
- * @alloc_hint: Cache of last successfully allocated or freed bit.
- *
- * This is per-cpu, which allows multiple users to stick to different
- * cachelines until the map is exhausted.
- */
- unsigned int __percpu *alloc_hint;
-
/**
* @wake_batch: Number of bits which must be freed before we wake up any
* waiters.
@@ -130,13 +134,8 @@ struct sbitmap_queue {
atomic_t ws_active;
/**
- * @round_robin: Allocate bits in strict round-robin order.
- */
- bool round_robin;
-
- /**
* @min_shallow_depth: The minimum shallow depth which may be passed to
- * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
+ * sbitmap_queue_get_shallow()
*/
unsigned int min_shallow_depth;
};
@@ -149,11 +148,24 @@ struct sbitmap_queue {
* given, a good default is chosen.
* @flags: Allocation flags.
* @node: Memory node to allocate on.
+ * @round_robin: If true, be stricter about allocation order; always allocate
+ * starting from the last allocated bit. This is less efficient
+ * than the default behavior (false).
+ * @alloc_hint: If true, apply percpu hint for where to start searching for
+ * a free bit.
*
* Return: Zero on success or negative errno on failure.
*/
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
- gfp_t flags, int node);
+ gfp_t flags, int node, bool round_robin, bool alloc_hint);
+
+/* sbitmap internal helper */
+static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
+{
+ if (index == sb->map_nr - 1)
+ return sb->depth - (index << sb->shift);
+ return 1U << sb->shift;
+}
/**
* sbitmap_free() - Free memory used by a &struct sbitmap.
@@ -161,7 +173,8 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
*/
static inline void sbitmap_free(struct sbitmap *sb)
{
- kfree(sb->map);
+ free_percpu(sb->alloc_hint);
+ kvfree(sb->map);
sb->map = NULL;
}
@@ -178,22 +191,17 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
/**
* sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
* @sb: Bitmap to allocate from.
- * @alloc_hint: Hint for where to start searching for a free bit.
- * @round_robin: If true, be stricter about allocation order; always allocate
- * starting from the last allocated bit. This is less efficient
- * than the default behavior (false).
*
* This operation provides acquire barrier semantics if it succeeds.
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
+int sbitmap_get(struct sbitmap *sb);
/**
* sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
* limiting the depth used from each word.
* @sb: Bitmap to allocate from.
- * @alloc_hint: Hint for where to start searching for a free bit.
* @shallow_depth: The maximum number of bits to allocate from a single word.
*
* This rather specific operation allows for having multiple users with
@@ -205,8 +213,7 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
- unsigned long shallow_depth);
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
/**
* sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
@@ -247,7 +254,7 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb,
while (scanned < sb->depth) {
unsigned long word;
unsigned int depth = min_t(unsigned int,
- sb->map[index].depth - nr,
+ __map_depth(sb, index) - nr,
sb->depth - scanned);
scanned += depth;
@@ -320,10 +327,16 @@ static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int b
set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
}
-static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
- unsigned int bitnr)
+/*
+ * Pair of sbitmap_get, and this one applies both cleared bit and
+ * allocation hint.
+ */
+static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
{
- clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+ sbitmap_deferred_clear_bit(sb, bitnr);
+
+ if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
+ *raw_cpu_ptr(sb->alloc_hint) = bitnr;
}
static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
@@ -331,6 +344,24 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
+static inline int sbitmap_calculate_shift(unsigned int depth)
+{
+ int shift = ilog2(BITS_PER_LONG);
+
+ /*
+ * If the bitmap is small, shrink the number of bits per word so
+ * we spread over a few cachelines, at least. If less than 4
+ * bits, just forget about it, it's not going to work optimally
+ * anyway.
+ */
+ if (depth >= 4) {
+ while ((4U << shift) > depth)
+ shift--;
+ }
+
+ return shift;
+}
+
/**
* sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
* @sb: Bitmap to show.
@@ -340,6 +371,16 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
*/
void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
+
+/**
+ * sbitmap_weight() - Return how many set and not cleared bits in a &struct
+ * sbitmap.
+ * @sb: Bitmap to check.
+ *
+ * Return: How many set and not cleared bits set
+ */
+unsigned int sbitmap_weight(const struct sbitmap *sb);
+
/**
* sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
* seq_file.
@@ -374,11 +415,21 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
{
kfree(sbq->ws);
- free_percpu(sbq->alloc_hint);
sbitmap_free(&sbq->sb);
}
/**
+ * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
+ * @sbq: Bitmap queue to recalculate wake batch.
+ * @users: Number of shares.
+ *
+ * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
+ * by depth. This interface is for HCTX shared tags or queue shared tags.
+ */
+void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int users);
+
+/**
* sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
* @sbq: Bitmap queue to resize.
* @depth: New number of bits to resize to.
@@ -399,7 +450,20 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
/**
- * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
+ * @sbq: Bitmap queue to allocate from.
+ * @nr_tags: number of tags requested
+ * @offset: offset to add to returned bits
+ *
+ * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
+ * a bit in the mask returned, and the caller must add @offset to the value to
+ * get the absolute tag value.
+ */
+unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ unsigned int *offset);
+
+/**
+ * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
* sbitmap_queue, limiting the depth used from each word, with preemption
* already disabled.
* @sbq: Bitmap queue to allocate from.
@@ -411,8 +475,8 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq);
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int shallow_depth);
+int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth);
/**
* sbitmap_queue_get() - Try to allocate a free bit from a &struct
@@ -435,32 +499,6 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
}
/**
- * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
- * sbitmap_queue, limiting the depth used from each word.
- * @sbq: Bitmap queue to allocate from.
- * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
- * sbitmap_queue_clear()).
- * @shallow_depth: The maximum number of bits to allocate from a single word.
- * See sbitmap_get_shallow().
- *
- * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
- * initializing @sbq.
- *
- * Return: Non-negative allocated bit number if successful, -1 otherwise.
- */
-static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int *cpu,
- unsigned int shallow_depth)
-{
- int nr;
-
- *cpu = get_cpu();
- nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
- put_cpu();
- return nr;
-}
-
-/**
* sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
* minimum shallow depth that will be used.
* @sbq: Bitmap queue in question.
@@ -487,6 +525,17 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu);
+/**
+ * sbitmap_queue_clear_batch() - Free a batch of allocated bits
+ * &struct sbitmap_queue.
+ * @sbq: Bitmap to free from.
+ * @offset: offset for each tag in array
+ * @tags: array of tags
+ * @nr_tags: number of tags in array
+ */
+void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
+ int *tags, int nr_tags);
+
static inline int sbq_index_inc(int index)
{
return (index + 1) & (SBQ_WAIT_QUEUES - 1);
@@ -526,8 +575,9 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
* sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
* on a &struct sbitmap_queue.
* @sbq: Bitmap queue to wake up.
+ * @nr: Number of bits cleared.
*/
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
/**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 6eec50fb36c8..375a5e90d86a 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -16,15 +16,12 @@ struct scatterlist {
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
+#ifdef CONFIG_PCI_P2PDMA
+ unsigned int dma_flags;
+#endif
};
/*
- * Since the above length field is an unsigned int, below we define the maximum
- * length in bytes that can be stored in one scatterlist entry.
- */
-#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
-
-/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries dma_map_sg
@@ -45,6 +42,12 @@ struct sg_table {
unsigned int orig_nents; /* original size of list */
};
+struct sg_append_table {
+ struct sg_table sgt; /* The scatter list table */
+ struct scatterlist *prv; /* last populated sge in the table */
+ unsigned int total_nents; /* Total entries in the table */
+};
+
/*
* Notes on SG table design.
*
@@ -69,10 +72,27 @@ struct sg_table {
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
-#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
-#define sg_is_last(sg) ((sg)->page_link & SG_END)
-#define sg_chain_ptr(sg) \
- ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
+#define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END)
+
+static inline unsigned int __sg_flags(struct scatterlist *sg)
+{
+ return sg->page_link & SG_PAGE_LINK_MASK;
+}
+
+static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
+{
+ return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK);
+}
+
+static inline bool sg_is_chain(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_CHAIN;
+}
+
+static inline bool sg_is_last(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_END;
+}
/**
* sg_assign_page - Assign a given page to an SG entry
@@ -92,7 +112,7 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
- BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
+ BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
@@ -126,7 +146,7 @@ static inline struct page *sg_page(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
- return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
+ return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK);
}
/**
@@ -151,6 +171,36 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
+/*
+ * Loop over each sg element in the given sg_table object.
+ */
+#define for_each_sgtable_sg(sgt, sg, i) \
+ for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
+
+/*
+ * Loop over each sg element in the given *DMA mapped* sg_table object.
+ * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
+ * of the each element.
+ */
+#define for_each_sgtable_dma_sg(sgt, sg, i) \
+ for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
+
+static inline void __sg_chain(struct scatterlist *chain_sg,
+ struct scatterlist *sgl)
+{
+ /*
+ * offset and length are unused for chain entry. Clear them.
+ */
+ chain_sg->offset = 0;
+ chain_sg->length = 0;
+
+ /*
+ * Set lowest bit to indicate a link pointer, and make sure to clear
+ * the termination bit if it happens to be set.
+ */
+ chain_sg->page_link = ((unsigned long) sgl | SG_CHAIN) & ~SG_END;
+}
+
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
@@ -164,18 +214,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
- /*
- * offset and length are unused for chain entry. Clear them.
- */
- prv[prv_nents - 1].offset = 0;
- prv[prv_nents - 1].length = 0;
-
- /*
- * Set lowest bit to indicate a link pointer, and make sure to clear
- * the termination bit if it happens to be set.
- */
- prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN)
- & ~SG_END;
+ __sg_chain(&prv[prv_nents - 1], sgl);
}
/**
@@ -209,6 +248,72 @@ static inline void sg_unmark_end(struct scatterlist *sg)
sg->page_link &= ~SG_END;
}
+/*
+ * CONFGI_PCI_P2PDMA depends on CONFIG_64BIT which means there is 4 bytes
+ * in struct scatterlist (assuming also CONFIG_NEED_SG_DMA_LENGTH is set).
+ * Use this padding for DMA flags bits to indicate when a specific
+ * dma address is a bus address.
+ */
+#ifdef CONFIG_PCI_P2PDMA
+
+#define SG_DMA_BUS_ADDRESS (1 << 0)
+
+/**
+ * sg_dma_is_bus address - Return whether a given segment was marked
+ * as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Returns true if sg_dma_mark_bus_address() has been called on
+ * this segment.
+ **/
+static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
+{
+ return sg->dma_flags & SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_dma_mark_bus address - Mark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Marks the passed in sg entry to indicate that the dma_address is
+ * a bus address and doesn't need to be unmapped. This should only be
+ * used by dma_map_sg() implementations to mark bus addresses
+ * so they can be properly cleaned up in dma_unmap_sg().
+ **/
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags |= SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Clears the bus address mark.
+ **/
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags &= ~SG_DMA_BUS_ADDRESS;
+}
+
+#else
+
+static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
+{
+ return false;
+}
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+}
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+}
+
+#endif
+
/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
@@ -267,18 +372,51 @@ typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
- sg_free_fn *);
+ sg_free_fn *, unsigned int);
void sg_free_table(struct sg_table *);
+void sg_free_append_table(struct sg_append_table *sgt);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
-int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, unsigned int max_segment,
- gfp_t gfp_mask);
-int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, gfp_t gfp_mask);
+int sg_alloc_append_table_from_pages(struct sg_append_table *sgt,
+ struct page **pages, unsigned int n_pages,
+ unsigned int offset, unsigned long size,
+ unsigned int max_segment,
+ unsigned int left_pages, gfp_t gfp_mask);
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size,
+ unsigned int max_segment, gfp_t gfp_mask);
+
+/**
+ * sg_alloc_table_from_pages - Allocate and initialize an sg table from
+ * an array of pages
+ * @sgt: The sg table header to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
+ * @offset: Offset from start of the first page to the start of a buffer
+ * @size: Number of valid bytes in the buffer (after offset)
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table from a list of pages. Contiguous
+ * ranges of the pages are squashed into a single scatterlist node. A user
+ * may provide an offset at a start and a size of valid data in a buffer
+ * specified by the page array. The returned sg table is released by
+ * sg_free_table.
+ *
+ * Returns:
+ * 0 on success, negative error on failure
+ */
+static inline int sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages,
+ unsigned int n_pages,
+ unsigned int offset,
+ unsigned long size, gfp_t gfp_mask)
+{
+ return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
+ size, UINT_MAX, gfp_mask);
+}
#ifdef CONFIG_SGL_ALLOC
struct scatterlist *sgl_alloc_order(unsigned long long length,
@@ -401,9 +539,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* @sglist: sglist to iterate over
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
- * @pgoffset: starting page offset
+ * @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_page() to get each page pointer.
+ * In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
@@ -412,25 +551,54 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
/**
* for_each_sg_dma_page - iterate over the pages of the given sg list
* @sglist: sglist to iterate over
- * @dma_iter: page iterator to hold current page
+ * @dma_iter: DMA page iterator to hold current page
* @dma_nents: maximum number of sg entries to iterate over, this is the value
* returned from dma_map_sg
- * @pgoffset: starting page offset
+ * @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_dma_address() to get each page's DMA address.
+ * In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
pgoffset); \
__sg_page_iter_dma_next(dma_iter);)
+/**
+ * for_each_sgtable_page - iterate over all pages in the sg_table object
+ * @sgt: sg_table object to iterate over
+ * @piter: page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all memory pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
+ */
+#define for_each_sgtable_page(sgt, piter, pgoffset) \
+ for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
+
+/**
+ * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
+ * @sgt: sg_table object to iterate over
+ * @dma_iter: DMA page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all DMA mapped pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
+ * unit.
+ */
+#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
+ for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
+
+
/*
* Mapping sg iterator
*
* Iterates over sg entries mapping page-by-page. On each successful
* iteration, @miter->page points to the mapped page and
* @miter->length bytes of data can be accessed at @miter->addr. As
- * long as an interation is enclosed between start and stop, the user
+ * long as an iteration is enclosed between start and stop, the user
* is free to choose control structure and when to stop.
*
* @miter->consumed is set to @miter->length on each iteration. It
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04278493bf15..ffb6eb55cd13 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -14,10 +14,11 @@
#include <linux/pid.h>
#include <linux/sem.h>
#include <linux/shm.h>
-#include <linux/kcov.h>
+#include <linux/kmsan_types.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
+#include <linux/irqflags.h>
#include <linux/seccomp.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
@@ -27,21 +28,29 @@
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
+#include <linux/syscall_user_dispatch.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
+#include <linux/seqlock.h>
+#include <linux/kcsan.h>
+#include <linux/rv.h>
+#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
struct backing_dev_info;
struct bio_list;
struct blk_plug;
+struct bpf_local_storage;
+struct bpf_run_ctx;
struct capture_control;
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
+struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
@@ -73,28 +82,39 @@ struct task_group;
*/
/* Used in tsk->state: */
-#define TASK_RUNNING 0x0000
-#define TASK_INTERRUPTIBLE 0x0001
-#define TASK_UNINTERRUPTIBLE 0x0002
-#define __TASK_STOPPED 0x0004
-#define __TASK_TRACED 0x0008
+#define TASK_RUNNING 0x00000000
+#define TASK_INTERRUPTIBLE 0x00000001
+#define TASK_UNINTERRUPTIBLE 0x00000002
+#define __TASK_STOPPED 0x00000004
+#define __TASK_TRACED 0x00000008
/* Used in tsk->exit_state: */
-#define EXIT_DEAD 0x0010
-#define EXIT_ZOMBIE 0x0020
+#define EXIT_DEAD 0x00000010
+#define EXIT_ZOMBIE 0x00000020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* Used in tsk->state again: */
-#define TASK_PARKED 0x0040
-#define TASK_DEAD 0x0080
-#define TASK_WAKEKILL 0x0100
-#define TASK_WAKING 0x0200
-#define TASK_NOLOAD 0x0400
-#define TASK_NEW 0x0800
-#define TASK_STATE_MAX 0x1000
+#define TASK_PARKED 0x00000040
+#define TASK_DEAD 0x00000080
+#define TASK_WAKEKILL 0x00000100
+#define TASK_WAKING 0x00000200
+#define TASK_NOLOAD 0x00000400
+#define TASK_NEW 0x00000800
+#define TASK_RTLOCK_WAIT 0x00001000
+#define TASK_FREEZABLE 0x00002000
+#define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
+#define TASK_FROZEN 0x00008000
+#define TASK_STATE_MAX 0x00010000
+
+#define TASK_ANY (TASK_STATE_MAX-1)
+
+/*
+ * DO NOT ADD ANY NEW USERS !
+ */
+#define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
-#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_TRACED __TASK_TRACED
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
@@ -107,17 +127,11 @@ struct task_group;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
-
-#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
-
-#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FROZEN) == 0 && \
- (task->state & TASK_NOLOAD) == 0)
-
-#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
+#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
+#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
/*
* Special states are those that do not use the normal wait-loop pattern. See
@@ -126,30 +140,37 @@ struct task_group;
#define is_special_task_state(state) \
((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
-#define __set_current_state(state_value) \
- do { \
- WARN_ON_ONCE(is_special_task_state(state_value));\
- current->task_state_change = _THIS_IP_; \
- current->state = (state_value); \
- } while (0)
-
-#define set_current_state(state_value) \
- do { \
- WARN_ON_ONCE(is_special_task_state(state_value));\
- current->task_state_change = _THIS_IP_; \
- smp_store_mb(current->state, (state_value)); \
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define debug_normal_state_change(state_value) \
+ do { \
+ WARN_ON_ONCE(is_special_task_state(state_value)); \
+ current->task_state_change = _THIS_IP_; \
} while (0)
-#define set_special_state(state_value) \
+# define debug_special_state_change(state_value) \
do { \
- unsigned long flags; /* may shadow */ \
WARN_ON_ONCE(!is_special_task_state(state_value)); \
- raw_spin_lock_irqsave(&current->pi_lock, flags); \
current->task_state_change = _THIS_IP_; \
- current->state = (state_value); \
- raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
+
+# define debug_rtlock_wait_set_state() \
+ do { \
+ current->saved_state_change = current->task_state_change;\
+ current->task_state_change = _THIS_IP_; \
+ } while (0)
+
+# define debug_rtlock_wait_restore_state() \
+ do { \
+ current->task_state_change = current->saved_state_change;\
+ } while (0)
+
#else
+# define debug_normal_state_change(cond) do { } while (0)
+# define debug_special_state_change(cond) do { } while (0)
+# define debug_rtlock_wait_set_state() do { } while (0)
+# define debug_rtlock_wait_restore_state() do { } while (0)
+#endif
+
/*
* set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to
@@ -157,24 +178,24 @@ struct task_group;
*
* for (;;) {
* set_current_state(TASK_UNINTERRUPTIBLE);
- * if (!need_sleep)
- * break;
+ * if (CONDITION)
+ * break;
*
* schedule();
* }
* __set_current_state(TASK_RUNNING);
*
* If the caller does not need such serialisation (because, for instance, the
- * condition test and condition change and wakeup are under the same lock) then
+ * CONDITION test and condition change and wakeup are under the same lock) then
* use __set_current_state().
*
* The above is typically ordered against the wakeup, which does:
*
- * need_sleep = false;
+ * CONDITION = 1;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
- * where wake_up_state() executes a full memory barrier before accessing the
- * task state.
+ * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
+ * accessing p->state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -188,29 +209,87 @@ struct task_group;
* Also see the comments of try_to_wake_up().
*/
#define __set_current_state(state_value) \
- current->state = (state_value)
+ do { \
+ debug_normal_state_change((state_value)); \
+ WRITE_ONCE(current->__state, (state_value)); \
+ } while (0)
#define set_current_state(state_value) \
- smp_store_mb(current->state, (state_value))
+ do { \
+ debug_normal_state_change((state_value)); \
+ smp_store_mb(current->__state, (state_value)); \
+ } while (0)
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
- * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
- * will not collide with our state change.
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING
+ * stores will not collide with our state change.
*/
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
+ \
raw_spin_lock_irqsave(&current->pi_lock, flags); \
- current->state = (state_value); \
+ debug_special_state_change((state_value)); \
+ WRITE_ONCE(current->__state, (state_value)); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
-#endif
+/*
+ * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
+ *
+ * RT's spin/rwlock substitutions are state preserving. The state of the
+ * task when blocking on the lock is saved in task_struct::saved_state and
+ * restored after the lock has been acquired. These operations are
+ * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
+ * lock related wakeups while the task is blocked on the lock are
+ * redirected to operate on task_struct::saved_state to ensure that these
+ * are not dropped. On restore task_struct::saved_state is set to
+ * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
+ *
+ * The lock operation looks like this:
+ *
+ * current_save_and_set_rtlock_wait_state();
+ * for (;;) {
+ * if (try_lock())
+ * break;
+ * raw_spin_unlock_irq(&lock->wait_lock);
+ * schedule_rtlock();
+ * raw_spin_lock_irq(&lock->wait_lock);
+ * set_current_state(TASK_RTLOCK_WAIT);
+ * }
+ * current_restore_rtlock_saved_state();
+ */
+#define current_save_and_set_rtlock_wait_state() \
+ do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_lock(&current->pi_lock); \
+ current->saved_state = current->__state; \
+ debug_rtlock_wait_set_state(); \
+ WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
+ raw_spin_unlock(&current->pi_lock); \
+ } while (0);
+
+#define current_restore_rtlock_saved_state() \
+ do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_lock(&current->pi_lock); \
+ debug_rtlock_wait_restore_state(); \
+ WRITE_ONCE(current->__state, current->saved_state); \
+ current->saved_state = TASK_RUNNING; \
+ raw_spin_unlock(&current->pi_lock); \
+ } while (0);
-/* Task command name length: */
-#define TASK_COMM_LEN 16
+#define get_current_state() READ_ONCE(current->__state)
+
+/*
+ * Define the task command name length as enum, then it can be visible to
+ * BPF programs.
+ */
+enum {
+ TASK_COMM_LEN = 16,
+};
extern void scheduler_tick(void);
@@ -224,6 +303,9 @@ extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
asmlinkage void preempt_schedule_irq(void);
+#ifdef CONFIG_PREEMPT_RT
+ extern void schedule_rtlock(void);
+#endif
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
@@ -348,36 +430,46 @@ struct load_weight {
* Only for tasks we track a moving average of the past instantaneous
* estimated utilization. This allows to absorb sporadic drops in utilization
* of an otherwise almost periodic task.
+ *
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
+ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
+ * for a task) it is safe to use MSB.
*/
struct util_est {
unsigned int enqueued;
unsigned int ewma;
#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
} __attribute__((__aligned__(sizeof(u64))));
/*
- * The load_avg/util_avg accumulates an infinite geometric series
- * (see __update_load_avg() in kernel/sched/fair.c).
+ * The load/runnable/util_avg accumulates an infinite geometric series
+ * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
*
* [load_avg definition]
*
* load_avg = runnable% * scale_load_down(load)
*
- * where runnable% is the time ratio that a sched_entity is runnable.
- * For cfs_rq, it is the aggregated load_avg of all runnable and
- * blocked sched_entities.
+ * [runnable_avg definition]
+ *
+ * runnable_avg = runnable% * SCHED_CAPACITY_SCALE
*
* [util_avg definition]
*
* util_avg = running% * SCHED_CAPACITY_SCALE
*
- * where running% is the time ratio that a sched_entity is running on
- * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
- * and blocked sched_entities.
+ * where runnable% is the time ratio that a sched_entity is runnable and
+ * running% the time ratio that a sched_entity is running.
*
- * load_avg and util_avg don't direcly factor frequency scaling and CPU
- * capacity scaling. The scaling is done through the rq_clock_pelt that
- * is used for computing those signals (see update_rq_clock_pelt())
+ * For cfs_rq, they are the aggregated values of all runnable and blocked
+ * sched_entities.
+ *
+ * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
+ * capacity scaling. The scaling is done through the rq_clock_pelt that is used
+ * for computing those signals (see update_rq_clock_pelt())
*
* N.B., the above ratios (runnable% and running%) themselves are in the
* range of [0, 1]. To do fixed point arithmetics, we therefore scale them
@@ -401,11 +493,11 @@ struct util_est {
struct sched_avg {
u64 last_update_time;
u64 load_sum;
- u64 runnable_load_sum;
+ u64 runnable_sum;
u32 util_sum;
u32 period_contrib;
unsigned long load_avg;
- unsigned long runnable_load_avg;
+ unsigned long runnable_avg;
unsigned long util_avg;
struct util_est util_est;
} ____cacheline_aligned;
@@ -425,6 +517,8 @@ struct sched_statistics {
u64 block_start;
u64 block_max;
+ s64 sum_block_runtime;
+
u64 exec_max;
u64 slice_max;
@@ -443,13 +537,16 @@ struct sched_statistics {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_forceidle_sum;
#endif
-};
+#endif /* CONFIG_SCHEDSTATS */
+} ____cacheline_aligned;
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
- unsigned long runnable_weight;
struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq;
@@ -461,8 +558,6 @@ struct sched_entity {
u64 nr_migrations;
- struct sched_statistics statistics;
-
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
@@ -470,6 +565,8 @@ struct sched_entity {
struct cfs_rq *cfs_rq;
/* rq "owned" by this entity/group: */
struct cfs_rq *my_q;
+ /* cached value of my_q->h_nr_running */
+ unsigned long runnable_weight;
#endif
#ifdef CONFIG_SMP
@@ -531,10 +628,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
- * @dl_boosted tells if we are boosted due to DI. If so we are
- * outside bandwidth enforcement mechanism (but only until we
- * exit the critical section);
- *
* @dl_yielded tells if task gave up the CPU before consuming
* all its available runtime during the last job.
*
@@ -549,7 +642,6 @@ struct sched_dl_entity {
* overruns.
*/
unsigned int dl_throttled : 1;
- unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
@@ -568,6 +660,15 @@ struct sched_dl_entity {
* time.
*/
struct hrtimer inactive_timer;
+
+#ifdef CONFIG_RT_MUTEXES
+ /*
+ * Priority Inheritance. When a DEADLINE scheduling entity is boosted
+ * pi_se points to the donor, otherwise points to the dl_se it belongs
+ * to (the original one/itself).
+ */
+ struct sched_dl_entity *pi_se;
+#endif
};
#ifdef CONFIG_UCLAMP_TASK
@@ -610,7 +711,7 @@ union rcu_special {
u8 blocked;
u8 need_qs;
u8 exp_hint; /* Hint for performance. */
- u8 deferred_qs;
+ u8 need_mb; /* Readers need smp_mb(). */
} b; /* Bits. */
u32 s; /* Set of bits. */
};
@@ -626,6 +727,13 @@ struct wake_q_node {
struct wake_q_node *next;
};
+struct kmap_ctrl {
+#ifdef CONFIG_KMAP_LOCAL
+ int idx;
+ pte_t pteval[KM_MAX_IDX];
+#endif
+};
+
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -634,8 +742,12 @@ struct task_struct {
*/
struct thread_info thread_info;
#endif
- /* -1 unrunnable, 0 runnable, >0 stopped: */
- volatile long state;
+ unsigned int __state;
+
+#ifdef CONFIG_PREEMPT_RT
+ /* saved state for "spinlock sleepers" */
+ unsigned int saved_state;
+#endif
/*
* This begins the randomizable portion of task_struct. Only
@@ -650,12 +762,8 @@ struct task_struct {
unsigned int ptrace;
#ifdef CONFIG_SMP
- struct llist_node wake_entry;
int on_cpu;
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- /* Current CPU: */
- unsigned int cpu;
-#endif
+ struct __call_single_node wake_entry;
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -677,21 +785,36 @@ struct task_struct {
int normal_prio;
unsigned int rt_priority;
- const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+ struct sched_dl_entity dl;
+ const struct sched_class *sched_class;
+
+#ifdef CONFIG_SCHED_CORE
+ struct rb_node core_node;
+ unsigned long core_cookie;
+ unsigned int core_occupation;
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
- struct sched_dl_entity dl;
#ifdef CONFIG_UCLAMP_TASK
- /* Clamp values requested for a scheduling entity */
+ /*
+ * Clamp values requested for a scheduling entity.
+ * Must be updated with task_rq_lock() held.
+ */
struct uclamp_se uclamp_req[UCLAMP_CNT];
- /* Effective clamp values used for a scheduling entity */
+ /*
+ * Effective clamp values used for a scheduling entity.
+ * Must be updated with task_rq_lock() held.
+ */
struct uclamp_se uclamp[UCLAMP_CNT];
#endif
+ struct sched_statistics stats;
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* List of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
@@ -704,7 +827,13 @@ struct task_struct {
unsigned int policy;
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
+ cpumask_t *user_cpus_ptr;
cpumask_t cpus_mask;
+ void *migration_pending;
+#ifdef CONFIG_SMP
+ unsigned short migration_disabled;
+#endif
+ unsigned short migration_flags;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -721,6 +850,15 @@ struct task_struct {
struct list_head rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */
+#ifdef CONFIG_TASKS_TRACE_RCU
+ int trc_reader_nesting;
+ int trc_ipi_to_cpu;
+ union rcu_special trc_reader_special;
+ struct list_head trc_holdout_list;
+ struct list_head trc_blkd_node;
+ int trc_blkd_cpu;
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
+
struct sched_info sched_info;
struct list_head tasks;
@@ -732,9 +870,6 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
- /* Per-thread vma caching: */
- struct vmacache vmacache;
-
#ifdef SPLIT_RSS_COUNTING
struct task_rss_stat rss_stat;
#endif
@@ -753,7 +888,6 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
- unsigned sched_remote_wakeup:1;
#ifdef CONFIG_PSI
unsigned sched_psi_wake_requeue:1;
#endif
@@ -763,6 +897,21 @@ struct task_struct {
/* Unserialized, strictly 'current' */
+ /*
+ * This field must not be in the scheduler word above due to wakelist
+ * queueing no longer being serialized by p->on_cpu. However:
+ *
+ * p->XXX = X; ttwu()
+ * schedule() if (p->on_rq && ..) // false
+ * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
+ * deactivate_task() ttwu_queue_wakelist())
+ * p->on_rq = 0; p->sched_remote_wakeup = Y;
+ *
+ * guarantees all stores of 'current' are visible before
+ * ->sched_remote_wakeup gets used, so it can be in this word.
+ */
+ unsigned sched_remote_wakeup:1;
+
/* Bit to tell LSMs we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
@@ -772,6 +921,10 @@ struct task_struct {
#ifdef CONFIG_MEMCG
unsigned in_user_fault:1;
#endif
+#ifdef CONFIG_LRU_GEN
+ /* whether the LRU algorithm may apply to this access */
+ unsigned in_lru_fault:1;
+#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
@@ -782,9 +935,30 @@ struct task_struct {
unsigned frozen:1;
#endif
#ifdef CONFIG_BLK_CGROUP
- /* to be used once the psi infrastructure lands upstream. */
unsigned use_memdelay:1;
#endif
+#ifdef CONFIG_PSI
+ /* Stalled due to lack of memory */
+ unsigned in_memstall:1;
+#endif
+#ifdef CONFIG_PAGE_OWNER
+ /* Used by page_owner=on to detect recursion in page tracking. */
+ unsigned in_page_owner:1;
+#endif
+#ifdef CONFIG_EVENTFD
+ /* Recursion prevention for eventfd_signal() */
+ unsigned in_eventfd:1;
+#endif
+#ifdef CONFIG_IOMMU_SVA
+ unsigned pasid_activated:1;
+#endif
+#ifdef CONFIG_CPU_SUP_INTEL
+ unsigned reported_split_lock:1;
+#endif
+#ifdef CONFIG_TASK_DELAY_ACCT
+ /* delay due to memory thrashing */
+ unsigned in_thrashing:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -839,6 +1013,9 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
+ /* PF_KTHREAD | PF_IO_WORKER */
+ void *worker_private;
+
u64 utime;
u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
@@ -871,6 +1048,10 @@ struct task_struct {
/* Empty if CONFIG_POSIX_CPUTIMERS=n */
struct posix_cputimers posix_cputimers;
+#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+ struct posix_cputimers_work posix_cputimers_work;
+#endif
+
/* Process credentials: */
/* Tracer's credentials at attach: */
@@ -912,6 +1093,10 @@ struct task_struct {
/* Open file information: */
struct files_struct *files;
+#ifdef CONFIG_IO_URING
+ struct io_uring_task *io_uring;
+#endif
+
/* Namespaces: */
struct nsproxy *nsproxy;
@@ -937,10 +1122,11 @@ struct task_struct {
unsigned int sessionid;
#endif
struct seccomp seccomp;
+ struct syscall_user_dispatch syscall_dispatch;
/* Thread group tracking: */
- u32 parent_exec_id;
- u32 self_exec_id;
+ u64 parent_exec_id;
+ u64 self_exec_id;
/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
spinlock_t alloc_lock;
@@ -969,19 +1155,15 @@ struct task_struct {
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
- unsigned int irq_events;
- unsigned long hardirq_enable_ip;
- unsigned long hardirq_disable_ip;
- unsigned int hardirq_enable_event;
- unsigned int hardirq_disable_event;
- int hardirqs_enabled;
- int hardirq_context;
- unsigned long softirq_disable_ip;
- unsigned long softirq_enable_ip;
- unsigned int softirq_disable_event;
- unsigned int softirq_enable_event;
+ struct irqtrace_events irqtrace;
+ unsigned int hardirq_threaded;
+ u64 hardirq_chain_key;
int softirqs_enabled;
int softirq_context;
+ int irq_config;
+#endif
+#ifdef CONFIG_PREEMPT_RT
+ int softirq_disable_cnt;
#endif
#ifdef CONFIG_LOCKDEP
@@ -992,7 +1174,7 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
-#ifdef CONFIG_UBSAN
+#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
unsigned int in_ubsan;
#endif
@@ -1002,10 +1184,8 @@ struct task_struct {
/* Stacked block device info: */
struct bio_list *bio_list;
-#ifdef CONFIG_BLOCK
/* Stack plugging: */
struct blk_plug *plug;
-#endif
/* VM state: */
struct reclaim_state *reclaim_state;
@@ -1037,8 +1217,8 @@ struct task_struct {
#ifdef CONFIG_CPUSETS
/* Protected by ->alloc_lock: */
nodemask_t mems_allowed;
- /* Seqence number to catch updates: */
- seqcount_t mems_allowed_seq;
+ /* Sequence number to catch updates: */
+ seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
@@ -1176,10 +1356,28 @@ struct task_struct {
u64 timer_slack_ns;
u64 default_timer_slack_ns;
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
unsigned int kasan_depth;
#endif
+#ifdef CONFIG_KCSAN
+ struct kcsan_ctx kcsan_ctx;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ struct irqtrace_events kcsan_save_irqtrace;
+#endif
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ int kcsan_stack_depth;
+#endif
+#endif
+
+#ifdef CONFIG_KMSAN
+ struct kmsan_ctx kmsan_ctx;
+#endif
+
+#if IS_ENABLED(CONFIG_KUNIT)
+ struct kunit *kunit_test;
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
@@ -1202,9 +1400,6 @@ struct task_struct {
#endif
#ifdef CONFIG_TRACING
- /* State flags for use by tracers: */
- unsigned long trace;
-
/* Bitmask and counter of trace recursion: */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
@@ -1229,6 +1424,9 @@ struct task_struct {
/* KCOV sequence number: */
int kcov_sequence;
+
+ /* Collect coverage from softirq context: */
+ unsigned int kcov_softirq;
#endif
#ifdef CONFIG_MEMCG
@@ -1254,12 +1452,17 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
+ struct kmap_ctrl kmap_ctrl;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
+# ifdef CONFIG_PREEMPT_RT
+ unsigned long saved_state_change;
+# endif
#endif
int pagefault_disabled;
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
+ struct timer_list oom_reaper_timer;
#endif
#ifdef CONFIG_VMAP_STACK
struct vm_struct *stack_vm_area;
@@ -1275,12 +1478,56 @@ struct task_struct {
/* Used by LSM modules for access restriction: */
void *security;
#endif
+#ifdef CONFIG_BPF_SYSCALL
+ /* Used by BPF task local storage */
+ struct bpf_local_storage __rcu *bpf_storage;
+ /* Used for BPF run context */
+ struct bpf_run_ctx *bpf_ctx;
+#endif
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
unsigned long lowest_stack;
unsigned long prev_lowest_stack;
#endif
+#ifdef CONFIG_X86_MCE
+ void __user *mce_vaddr;
+ __u64 mce_kflags;
+ u64 mce_addr;
+ __u64 mce_ripv : 1,
+ mce_whole_page : 1,
+ __mce_reserved : 62;
+ struct callback_head mce_kill_me;
+ int mce_count;
+#endif
+
+#ifdef CONFIG_KRETPROBES
+ struct llist_head kretprobe_instances;
+#endif
+#ifdef CONFIG_RETHOOK
+ struct llist_head rethooks;
+#endif
+
+#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
+ /*
+ * If L1D flush is supported on mm context switch
+ * then we use this callback head to queue kill work
+ * to kill tasks that are not running on SMT disabled
+ * cores
+ */
+ struct callback_head l1d_flush_kill;
+#endif
+
+#ifdef CONFIG_RV
+ /*
+ * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
+ * If we find justification for more monitors, we can think
+ * about adding more or developing a dynamic method. So far,
+ * none of these are justified.
+ */
+ union rv_task_monitor rv[RV_PER_TASK_MONITORS];
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
@@ -1409,19 +1656,32 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
-static inline unsigned int task_state_index(struct task_struct *tsk)
+static inline unsigned int __task_state_index(unsigned int tsk_state,
+ unsigned int tsk_exit_state)
{
- unsigned int tsk_state = READ_ONCE(tsk->state);
- unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
+ unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
if (tsk_state == TASK_IDLE)
state = TASK_REPORT_IDLE;
+ /*
+ * We're lying here, but rather than expose a completely new task state
+ * to userspace, we can make this appear as if the task has gone through
+ * a regular rt_mutex_lock() call.
+ */
+ if (tsk_state == TASK_RTLOCK_WAIT)
+ state = TASK_UNINTERRUPTIBLE;
+
return fls(state);
}
+static inline unsigned int task_state_index(struct task_struct *tsk)
+{
+ return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
+}
+
static inline char task_index_to_char(unsigned int state)
{
static const char state_char[] = "RSDTtXZPI";
@@ -1455,9 +1715,11 @@ extern struct pid *cad_pid;
/*
* Per process flags
*/
+#define PF_VCPU 0x00000001 /* I'm a virtual CPU */
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
-#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+#define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */
+#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
@@ -1467,23 +1729,24 @@ extern struct pid *cad_pid;
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
-#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
+#define PF__HOLE__00004000 0x00004000
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
-#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
+#define PF__HOLE__00010000 0x00010000
#define PF_KSWAPD 0x00020000 /* I am kswapd */
#define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */
#define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */
-#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to,
+ * I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
-#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
-#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
+#define PF__HOLE__00800000 0x00800000
+#define PF__HOLE__01000000 0x01000000
+#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
-#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
-#define PF_IO_WORKER 0x20000000 /* Task is an IO worker */
-#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */
+#define PF__HOLE__20000000 0x20000000
+#define PF__HOLE__40000000 0x40000000
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
/*
@@ -1514,7 +1777,7 @@ extern struct pid *cad_pid;
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-static inline bool is_percpu_thread(void)
+static __always_inline bool is_percpu_thread(void)
{
#ifdef CONFIG_SMP
return (current->flags & PF_NO_SETAFFINITY) &&
@@ -1583,10 +1846,15 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
+extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
+extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
+extern void release_user_cpus_ptr(struct task_struct *p);
+extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
+extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
+extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
#else
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -1597,6 +1865,21 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
return -EINVAL;
return 0;
}
+static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
+{
+ if (src->user_cpus_ptr)
+ return -EINVAL;
+ return 0;
+}
+static inline void release_user_cpus_ptr(struct task_struct *p)
+{
+ WARN_ON(p->user_cpus_ptr);
+}
+
+static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+{
+ return 0;
+}
#endif
extern int yield_to(struct task_struct *p, bool preempt);
@@ -1620,6 +1903,9 @@ extern int idle_cpu(int cpu);
extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
+extern void sched_set_fifo(struct task_struct *p);
+extern void sched_set_fifo_low(struct task_struct *p);
+extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
@@ -1630,7 +1916,7 @@ extern struct task_struct *idle_task(int cpu);
*
* Return: 1 if @p is an idle task. 0 otherwise.
*/
-static inline bool is_idle_task(const struct task_struct *p)
+static __always_inline bool is_idle_task(const struct task_struct *p)
{
return !!(p->flags & PF_IDLE);
}
@@ -1657,10 +1943,7 @@ extern struct thread_info init_thread_info;
extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
-static inline struct thread_info *task_thread_info(struct task_struct *task)
-{
- return &task->thread_info;
-}
+# define task_thread_info(task) (&(task)->thread_info)
#elif !defined(__HAVE_THREAD_FUNCTIONS)
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
@@ -1708,11 +1991,19 @@ extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
})
#ifdef CONFIG_SMP
-void scheduler_ipi(void);
-extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
+static __always_inline void scheduler_ipi(void)
+{
+ /*
+ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
+ * TIF_NEED_RESCHED remotely (for the first time) will also send
+ * this IPI.
+ */
+ preempt_fold_need_resched();
+}
+extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
#else
static inline void scheduler_ipi(void) { }
-static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
{
return 1;
}
@@ -1774,22 +2065,82 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
*/
-#ifndef CONFIG_PREEMPTION
-extern int _cond_resched(void);
+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+extern int __cond_resched(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+
+DECLARE_STATIC_CALL(cond_resched, __cond_resched);
+
+static __always_inline int _cond_resched(void)
+{
+ return static_call_mod(cond_resched)();
+}
+
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+extern int dynamic_cond_resched(void);
+
+static __always_inline int _cond_resched(void)
+{
+ return dynamic_cond_resched();
+}
+
#else
+
+static inline int _cond_resched(void)
+{
+ return __cond_resched();
+}
+
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+#else
+
static inline int _cond_resched(void) { return 0; }
-#endif
+
+#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
#define cond_resched() ({ \
- ___might_sleep(__FILE__, __LINE__, 0); \
+ __might_resched(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
extern int __cond_resched_lock(spinlock_t *lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock);
+
+#define MIGHT_RESCHED_RCU_SHIFT 8
+#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * Non RT kernels have an elevated preempt count due to the held lock,
+ * but are not allowed to be inside a RCU read side critical section
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
+#else
+/*
+ * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
+ * cond_resched*lock() has to take that into account because it checks for
+ * preempt_count() and rcu_preempt_depth().
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS \
+ (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
+#endif
+
+#define cond_resched_lock(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_lock(lock); \
+})
+
+#define cond_resched_rwlock_read(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_read(lock); \
+})
-#define cond_resched_lock(lock) ({ \
- ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
- __cond_resched_lock(lock); \
+#define cond_resched_rwlock_write(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_write(lock); \
})
static inline void cond_resched_rcu(void)
@@ -1801,6 +2152,47 @@ static inline void cond_resched_rcu(void)
#endif
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+extern bool preempt_model_none(void);
+extern bool preempt_model_voluntary(void);
+extern bool preempt_model_full(void);
+
+#else
+
+static inline bool preempt_model_none(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_NONE);
+}
+static inline bool preempt_model_voluntary(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
+}
+static inline bool preempt_model_full(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT);
+}
+
+#endif
+
+static inline bool preempt_model_rt(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_RT);
+}
+
+/*
+ * Does the preemption model allow non-cooperative preemption?
+ *
+ * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
+ * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
+ * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
+ * PREEMPT_NONE model.
+ */
+static inline bool preempt_model_preemptible(void)
+{
+ return preempt_model_full() || preempt_model_rt();
+}
+
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
@@ -1815,6 +2207,23 @@ static inline int spin_needbreak(spinlock_t *lock)
#endif
}
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return rwlock_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -1827,11 +2236,7 @@ static __always_inline bool need_resched(void)
static inline unsigned int task_cpu(const struct task_struct *p)
{
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- return READ_ONCE(p->cpu);
-#else
return READ_ONCE(task_thread_info(p)->cpu);
-#endif
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
@@ -1849,6 +2254,10 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+extern bool sched_task_on_rq(struct task_struct *p);
+extern unsigned long get_wchan(struct task_struct *p);
+extern struct task_struct *cpu_curr_snapshot(int cpu);
+
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@@ -1871,6 +2280,20 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
+#ifdef CONFIG_SMP
+static inline bool owner_on_cpu(struct task_struct *owner)
+{
+ /*
+ * As lock holder preemption issue, we both skip spinning if
+ * task is not on cpu or its cpu is preempted
+ */
+ return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
+}
+
+/* Returns effective CPU energy utilization, as seen by the scheduler */
+unsigned long sched_cpu_util(int cpu);
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_RSEQ
/*
@@ -1979,14 +2402,6 @@ static inline void rseq_execve(struct task_struct *t)
#endif
-void __exit_umh(struct task_struct *tsk);
-
-static inline void exit_umh(struct task_struct *tsk)
-{
- if (unlikely(tsk->flags & PF_UMH))
- __exit_umh(tsk);
-}
-
#ifdef CONFIG_DEBUG_RSEQ
void rseq_syscall(struct pt_regs *regs);
@@ -1999,16 +2414,16 @@ static inline void rseq_syscall(struct pt_regs *regs)
#endif
-const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
-char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
-int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
-
-const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
-const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
-const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
-
-int sched_trace_rq_cpu(struct rq *rq);
+#ifdef CONFIG_SCHED_CORE
+extern void sched_core_free(struct task_struct *tsk);
+extern void sched_core_fork(struct task_struct *p);
+extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
+ unsigned long uaddr);
+#else
+static inline void sched_core_free(struct task_struct *tsk) { }
+static inline void sched_core_fork(struct task_struct *p) { }
+#endif
-const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
#endif
diff --git a/include/linux/sched/affinity.h b/include/linux/sched/affinity.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/affinity.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/cond_resched.h b/include/linux/sched/cond_resched.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/cond_resched.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ecdc6542070f..8270ad7ae14c 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -57,7 +57,8 @@ static inline int get_dumpable(struct mm_struct *mm)
#endif
/* leave room for more dump flags */
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
-#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
+#define MMF_VM_HUGEPAGE 17 /* set when mm is available for
+ khugepaged */
/*
* This one-shot flag is dropped due to necessity of changing exe once again
* on NFS restore
@@ -70,8 +71,16 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
-#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
-#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
+#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
+#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
+/*
+ * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
+ * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
+ * a counter on its own. We're aggresive on this bit for now: even if the
+ * pinned pages were unpinned later on, we'll still keep this bit set for the
+ * lifecycle of this mm, just for simplicity.
+ */
+#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 3ed5aa18593f..bdd31ab93bc5 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -26,7 +26,12 @@ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
static inline unsigned long map_util_freq(unsigned long util,
unsigned long freq, unsigned long cap)
{
- return (freq + (freq >> 2)) * util / cap;
+ return freq * util / cap;
+}
+
+static inline unsigned long map_util_perf(unsigned long util)
+{
+ return util + (util >> 2);
}
#endif /* CONFIG_CPU_FREQ */
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 6c9f19a33865..ce3c58286062 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -18,15 +18,16 @@
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
+extern bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime);
extern u64 task_gtime(struct task_struct *t);
#else
-static inline void task_cputime(struct task_struct *t,
+static inline bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
+ return false;
}
static inline u64 task_gtime(struct task_struct *t)
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 1aff00b65f3c..7c83d4d5a971 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -6,6 +6,8 @@
* NORMAL/BATCH tasks.
*/
+#include <linux/sched.h>
+
#define MAX_DL_PRIO 0
static inline int dl_prio(int prio)
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index 95fb9e025247..b5035afa2396 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -14,7 +14,7 @@ extern void dump_cpu_task(int cpu);
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
-extern void show_state_filter(unsigned long state_filter);
+extern void show_state_filter(unsigned int state_filter);
static inline void show_state(void)
{
@@ -30,7 +30,8 @@ extern void show_regs(struct pt_regs *);
* task), SP is the stack pointer of the first frame that should be shown in the back
* trace (or NULL if the entire call-chain of the task should be shown).
*/
-extern void show_stack(struct task_struct *task, unsigned long *sp);
+extern void show_stack(struct task_struct *task, unsigned long *sp,
+ const char *loglvl);
extern void sched_show_task(struct task_struct *p);
@@ -42,7 +43,7 @@ extern void proc_sched_set_task(struct task_struct *p);
#endif
/* Attach to any functions which should be ignored in wchan output. */
-#define __sched __attribute__((__section__(".sched.text")))
+#define __sched __section(".sched.text")
/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];
diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h
index 9a62ffdd296f..412cdaba33eb 100644
--- a/include/linux/sched/hotplug.h
+++ b/include/linux/sched/hotplug.h
@@ -11,8 +11,10 @@ extern int sched_cpu_activate(unsigned int cpu);
extern int sched_cpu_deactivate(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_cpu_wait_empty(unsigned int cpu);
extern int sched_cpu_dying(unsigned int cpu);
#else
+# define sched_cpu_wait_empty NULL
# define sched_cpu_dying NULL
#endif
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 22873d276be6..d73d314d59c6 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -11,7 +11,11 @@ enum cpu_idle_type {
CPU_MAX_IDLE_TYPES
};
+#ifdef CONFIG_SMP
extern void wake_up_if_idle(int cpu);
+#else
+static inline void wake_up_if_idle(int cpu) { }
+#endif
/*
* Idle thread specific functions to determine the need_resched
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index 0fbcbacd1b29..8c15abd67aed 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -5,53 +5,55 @@
#include <linux/init.h>
#include <linux/tick.h>
-enum hk_flags {
- HK_FLAG_TIMER = 1,
- HK_FLAG_RCU = (1 << 1),
- HK_FLAG_MISC = (1 << 2),
- HK_FLAG_SCHED = (1 << 3),
- HK_FLAG_TICK = (1 << 4),
- HK_FLAG_DOMAIN = (1 << 5),
- HK_FLAG_WQ = (1 << 6),
- HK_FLAG_MANAGED_IRQ = (1 << 7),
+enum hk_type {
+ HK_TYPE_TIMER,
+ HK_TYPE_RCU,
+ HK_TYPE_MISC,
+ HK_TYPE_SCHED,
+ HK_TYPE_TICK,
+ HK_TYPE_DOMAIN,
+ HK_TYPE_WQ,
+ HK_TYPE_MANAGED_IRQ,
+ HK_TYPE_KTHREAD,
+ HK_TYPE_MAX
};
#ifdef CONFIG_CPU_ISOLATION
DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
-extern int housekeeping_any_cpu(enum hk_flags flags);
-extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
-extern bool housekeeping_enabled(enum hk_flags flags);
-extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
-extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags);
+extern int housekeeping_any_cpu(enum hk_type type);
+extern const struct cpumask *housekeeping_cpumask(enum hk_type type);
+extern bool housekeeping_enabled(enum hk_type type);
+extern void housekeeping_affine(struct task_struct *t, enum hk_type type);
+extern bool housekeeping_test_cpu(int cpu, enum hk_type type);
extern void __init housekeeping_init(void);
#else
-static inline int housekeeping_any_cpu(enum hk_flags flags)
+static inline int housekeeping_any_cpu(enum hk_type type)
{
return smp_processor_id();
}
-static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
+static inline const struct cpumask *housekeeping_cpumask(enum hk_type type)
{
return cpu_possible_mask;
}
-static inline bool housekeeping_enabled(enum hk_flags flags)
+static inline bool housekeeping_enabled(enum hk_type type)
{
return false;
}
static inline void housekeeping_affine(struct task_struct *t,
- enum hk_flags flags) { }
+ enum hk_type type) { }
static inline void housekeeping_init(void) { }
#endif /* CONFIG_CPU_ISOLATION */
-static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
+static inline bool housekeeping_cpu(int cpu, enum hk_type type)
{
#ifdef CONFIG_CPU_ISOLATION
if (static_branch_unlikely(&housekeeping_overridden))
- return housekeeping_test_cpu(cpu, flags);
+ return housekeeping_test_cpu(cpu, type);
#endif
return true;
}
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index fa067de9f1a9..68876d0a7ef9 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -19,6 +19,10 @@ struct task_struct;
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
+#define JOBCTL_PTRACE_FROZEN_BIT 24 /* frozen for ptrace */
+
+#define JOBCTL_STOPPED_BIT 26 /* do_signal_stop() */
+#define JOBCTL_TRACED_BIT 27 /* ptrace_stop() */
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,6 +32,10 @@ struct task_struct;
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_PTRACE_FROZEN (1UL << JOBCTL_PTRACE_FROZEN_BIT)
+
+#define JOBCTL_STOPPED (1UL << JOBCTL_STOPPED_BIT)
+#define JOBCTL_TRACED (1UL << JOBCTL_TRACED_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h
index 4859bea47a7b..83ec54b65e79 100644
--- a/include/linux/sched/loadavg.h
+++ b/include/linux/sched/loadavg.h
@@ -43,6 +43,6 @@ extern unsigned long calc_load_n(unsigned long load, unsigned long exp,
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-extern void calc_global_load(unsigned long ticks);
+extern void calc_global_load(void);
#endif /* _LINUX_SCHED_LOADAVG_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index c49257a3b510..2a243616f222 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -8,6 +8,7 @@
#include <linux/mm_types.h>
#include <linux/gfp.h>
#include <linux/sync_core.h>
+#include <linux/ioasid.h>
/*
* Routines for handling mm_structs
@@ -23,12 +24,12 @@ extern struct mm_struct *mm_alloc(void);
* will still exist later on and mmget_not_zero() has to be used before
* accessing it.
*
- * This is a preferred way to to pin @mm for a longer/unbounded amount
+ * This is a preferred way to pin @mm for a longer/unbounded amount
* of time.
*
* Use mmdrop() to release the reference acquired by mmgrab().
*
- * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmgrab(struct mm_struct *mm)
@@ -49,31 +50,35 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+#ifdef CONFIG_PREEMPT_RT
/*
- * This has to be called after a get_task_mm()/mmget_not_zero()
- * followed by taking the mmap_sem for writing before modifying the
- * vmas or anything the coredump pretends not to change from under it.
- *
- * It also has to be called when mmgrab() is used in the context of
- * the process, but then the mm_count refcount is transferred outside
- * the context of the process to run down_write() on that pinned mm.
- *
- * NOTE: find_extend_vma() called from GUP context is the only place
- * that can modify the "mm" (notably the vm_start/end) under mmap_sem
- * for reading and outside the context of the process, so it is also
- * the only case that holds the mmap_sem for reading that must call
- * this function. Generally if the mmap_sem is hold for reading
- * there's no need of this check after get_task_mm()/mmget_not_zero().
- *
- * This function can be obsoleted and the check can be removed, after
- * the coredump code will hold the mmap_sem for writing before
- * invoking the ->core_dump methods.
+ * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
+ * by far the least expensive way to do that.
*/
-static inline bool mmget_still_valid(struct mm_struct *mm)
+static inline void __mmdrop_delayed(struct rcu_head *rhp)
{
- return likely(!mm->core_state);
+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
+
+ __mmdrop(mm);
}
+/*
+ * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
+ * kernels via RCU.
+ */
+static inline void mmdrop_sched(struct mm_struct *mm)
+{
+ /* Provides a full memory barrier. See mmdrop() */
+ if (atomic_dec_and_test(&mm->mm_count))
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
+#else
+static inline void mmdrop_sched(struct mm_struct *mm)
+{
+ mmdrop(mm);
+}
+#endif
+
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
@@ -87,7 +92,7 @@ static inline bool mmget_still_valid(struct mm_struct *mm)
*
* Use mmput() to release the reference acquired by mmget().
*
- * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmget(struct mm_struct *mm)
@@ -131,6 +136,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_MMU
+#ifndef arch_get_mmap_end
+#define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
+#endif
+
+#ifndef arch_get_mmap_base
+#define arch_get_mmap_base(addr, base) (base)
+#endif
+
extern void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack);
extern unsigned long
@@ -140,6 +153,15 @@ extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
+
+unsigned long
+generic_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+unsigned long
+generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack) {}
@@ -165,7 +187,8 @@ static inline bool in_vfork(struct task_struct *tsk)
* another oom-unkillable task does this it should blame itself.
*/
rcu_read_lock();
- ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
+ ret = tsk->vfork_done &&
+ rcu_dereference(tsk->real_parent)->mm == tsk->mm;
rcu_read_unlock();
return ret;
@@ -175,40 +198,82 @@ static inline bool in_vfork(struct task_struct *tsk)
* Applies per-task gfp context to the given allocation flags.
* PF_MEMALLOC_NOIO implies GFP_NOIO
* PF_MEMALLOC_NOFS implies GFP_NOFS
- * PF_MEMALLOC_NOCMA implies no allocation from CMA region.
+ * PF_MEMALLOC_PIN implies !GFP_MOVABLE
*/
static inline gfp_t current_gfp_context(gfp_t flags)
{
- if (unlikely(current->flags &
- (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) {
+ unsigned int pflags = READ_ONCE(current->flags);
+
+ if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
/*
* NOIO implies both NOIO and NOFS and it is a weaker context
* so always make sure it makes precedence
*/
- if (current->flags & PF_MEMALLOC_NOIO)
+ if (pflags & PF_MEMALLOC_NOIO)
flags &= ~(__GFP_IO | __GFP_FS);
- else if (current->flags & PF_MEMALLOC_NOFS)
+ else if (pflags & PF_MEMALLOC_NOFS)
flags &= ~__GFP_FS;
-#ifdef CONFIG_CMA
- if (current->flags & PF_MEMALLOC_NOCMA)
+
+ if (pflags & PF_MEMALLOC_PIN)
flags &= ~__GFP_MOVABLE;
-#endif
}
return flags;
}
#ifdef CONFIG_LOCKDEP
-extern void __fs_reclaim_acquire(void);
-extern void __fs_reclaim_release(void);
+extern void __fs_reclaim_acquire(unsigned long ip);
+extern void __fs_reclaim_release(unsigned long ip);
extern void fs_reclaim_acquire(gfp_t gfp_mask);
extern void fs_reclaim_release(gfp_t gfp_mask);
#else
-static inline void __fs_reclaim_acquire(void) { }
-static inline void __fs_reclaim_release(void) { }
+static inline void __fs_reclaim_acquire(unsigned long ip) { }
+static inline void __fs_reclaim_release(unsigned long ip) { }
static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
static inline void fs_reclaim_release(gfp_t gfp_mask) { }
#endif
+/* Any memory-allocation retry loop should use
+ * memalloc_retry_wait(), and pass the flags for the most
+ * constrained allocation attempt that might have failed.
+ * This provides useful documentation of where loops are,
+ * and a central place to fine tune the waiting as the MM
+ * implementation changes.
+ */
+static inline void memalloc_retry_wait(gfp_t gfp_flags)
+{
+ /* We use io_schedule_timeout because waiting for memory
+ * typically included waiting for dirty pages to be
+ * written out, which requires IO.
+ */
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ gfp_flags = current_gfp_context(gfp_flags);
+ if (gfpflags_allow_blocking(gfp_flags) &&
+ !(gfp_flags & __GFP_NORETRY))
+ /* Probably waited already, no need for much more */
+ io_schedule_timeout(1);
+ else
+ /* Probably didn't wait, and has now released a lock,
+ * so now is a good time to wait
+ */
+ io_schedule_timeout(HZ/50);
+}
+
+/**
+ * might_alloc - Mark possible allocation sites
+ * @gfp_mask: gfp_t flags that would be used to allocate
+ *
+ * Similar to might_sleep() and other annotations, this can be used in functions
+ * that might allocate, but often don't. Compiles to nothing without
+ * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
+ */
+static inline void might_alloc(gfp_t gfp_mask)
+{
+ fs_reclaim_acquire(gfp_mask);
+ fs_reclaim_release(gfp_mask);
+
+ might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+}
+
/**
* memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
*
@@ -232,7 +297,7 @@ static inline unsigned int memalloc_noio_save(void)
* @flags: Flags to restore.
*
* Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
- * Always make sure that that the given flags is the return value from the
+ * Always make sure that the given flags is the return value from the
* pairing memalloc_noio_save call.
*/
static inline void memalloc_noio_restore(unsigned int flags)
@@ -263,7 +328,7 @@ static inline unsigned int memalloc_nofs_save(void)
* @flags: Flags to restore.
*
* Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
- * Always make sure that that the given flags is the return value from the
+ * Always make sure that the given flags is the return value from the
* pairing memalloc_nofs_save call.
*/
static inline void memalloc_nofs_restore(unsigned int flags)
@@ -283,64 +348,52 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC) | flags;
}
-#ifdef CONFIG_CMA
-static inline unsigned int memalloc_nocma_save(void)
+static inline unsigned int memalloc_pin_save(void)
{
- unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
+ unsigned int flags = current->flags & PF_MEMALLOC_PIN;
- current->flags |= PF_MEMALLOC_NOCMA;
+ current->flags |= PF_MEMALLOC_PIN;
return flags;
}
-static inline void memalloc_nocma_restore(unsigned int flags)
-{
- current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
-}
-#else
-static inline unsigned int memalloc_nocma_save(void)
+static inline void memalloc_pin_restore(unsigned int flags)
{
- return 0;
+ current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
}
-static inline void memalloc_nocma_restore(unsigned int flags)
-{
-}
-#endif
-
#ifdef CONFIG_MEMCG
+DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
/**
- * memalloc_use_memcg - Starts the remote memcg charging scope.
+ * set_active_memcg - Starts the remote memcg charging scope.
* @memcg: memcg to charge.
*
* This function marks the beginning of the remote memcg charging scope. All the
* __GFP_ACCOUNT allocations till the end of the scope will be charged to the
* given memcg.
*
- * NOTE: This function is not nesting safe.
+ * NOTE: This function can nest. Users must save the return value and
+ * reset the previous value after their own charging scope is over.
*/
-static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
+static inline struct mem_cgroup *
+set_active_memcg(struct mem_cgroup *memcg)
{
- WARN_ON_ONCE(current->active_memcg);
- current->active_memcg = memcg;
-}
+ struct mem_cgroup *old;
+
+ if (!in_task()) {
+ old = this_cpu_read(int_active_memcg);
+ this_cpu_write(int_active_memcg, memcg);
+ } else {
+ old = current->active_memcg;
+ current->active_memcg = memcg;
+ }
-/**
- * memalloc_unuse_memcg - Ends the remote memcg charging scope.
- *
- * This function marks the end of the remote memcg charging scope started by
- * memalloc_use_memcg().
- */
-static inline void memalloc_unuse_memcg(void)
-{
- current->active_memcg = NULL;
+ return old;
}
#else
-static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
-{
-}
-
-static inline void memalloc_unuse_memcg(void)
+static inline struct mem_cgroup *
+set_active_memcg(struct mem_cgroup *memcg)
{
+ return NULL;
}
#endif
@@ -352,10 +405,13 @@ enum {
MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
};
enum {
MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
+ MEMBARRIER_FLAG_RSEQ = (1U << 1),
};
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
@@ -374,6 +430,8 @@ static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
extern void membarrier_exec_mmap(struct mm_struct *mm);
+extern void membarrier_update_current_mm(struct mm_struct *next_mm);
+
#else
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
@@ -388,6 +446,34 @@ static inline void membarrier_exec_mmap(struct mm_struct *mm)
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
}
+static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
+{
+}
+#endif
+
+#ifdef CONFIG_IOMMU_SVA
+static inline void mm_pasid_init(struct mm_struct *mm)
+{
+ mm->pasid = INVALID_IOASID;
+}
+
+/* Associate a PASID with an mm_struct: */
+static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid)
+{
+ mm->pasid = pasid;
+}
+
+static inline void mm_pasid_drop(struct mm_struct *mm)
+{
+ if (pasid_valid(mm->pasid)) {
+ ioasid_free(mm->pasid);
+ mm->pasid = INVALID_IOASID;
+ }
+}
+#else
+static inline void mm_pasid_init(struct mm_struct *mm) {}
+static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) {}
+static inline void mm_pasid_drop(struct mm_struct *mm) {}
#endif
#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/posix-timers.h b/include/linux/sched/posix-timers.h
new file mode 100644
index 000000000000..523a381d6c88
--- /dev/null
+++ b/include/linux/sched/posix-timers.h
@@ -0,0 +1 @@
+#include <linux/posix-timers.h>
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index 7d64feafc408..ab83d85e1183 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -11,16 +11,9 @@
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
* tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
* values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space. This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
*/
-#define MAX_USER_RT_PRIO 100
-#define MAX_RT_PRIO MAX_USER_RT_PRIO
+#define MAX_RT_PRIO 100
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
@@ -34,15 +27,6 @@
#define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO)
/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
-
-/*
* Convert nice value [19,-20] to rlimit style value [1,40].
*/
static inline long nice_to_rlimit(long nice)
diff --git a/include/linux/sched/rseq_api.h b/include/linux/sched/rseq_api.h
new file mode 100644
index 000000000000..cf2af72693e1
--- /dev/null
+++ b/include/linux/sched/rseq_api.h
@@ -0,0 +1 @@
+#include <linux/rseq.h>
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index e5af028c08b4..994c25640e15 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -39,20 +39,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
}
extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
- return tsk->pi_blocked_on != NULL;
-}
#else
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
{
return NULL;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
- return false;
-}
#endif
extern void normalize_rt_tasks(void);
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
new file mode 100644
index 000000000000..57bde66d95f7
--- /dev/null
+++ b/include/linux/sched/sd_flags.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sched-domains (multiprocessor balancing) flag declarations.
+ */
+
+#ifndef SD_FLAG
+# error "Incorrect import of SD flags definitions"
+#endif
+
+/*
+ * Hierarchical metaflags
+ *
+ * SHARED_CHILD: These flags are meant to be set from the base domain upwards.
+ * If a domain has this flag set, all of its children should have it set. This
+ * is usually because the flag describes some shared resource (all CPUs in that
+ * domain share the same resource), or because they are tied to a scheduling
+ * behaviour that we want to disable at some point in the hierarchy for
+ * scalability reasons.
+ *
+ * In those cases it doesn't make sense to have the flag set for a domain but
+ * not have it in (some of) its children: sched domains ALWAYS span their child
+ * domains, so operations done with parent domains will cover CPUs in the lower
+ * child domains.
+ *
+ *
+ * SHARED_PARENT: These flags are meant to be set from the highest domain
+ * downwards. If a domain has this flag set, all of its parents should have it
+ * set. This is usually for topology properties that start to appear above a
+ * certain level (e.g. domain starts spanning CPUs outside of the base CPU's
+ * socket).
+ */
+#define SDF_SHARED_CHILD 0x1
+#define SDF_SHARED_PARENT 0x2
+
+/*
+ * Behavioural metaflags
+ *
+ * NEEDS_GROUPS: These flags are only relevant if the domain they are set on has
+ * more than one group. This is usually for balancing flags (load balancing
+ * involves equalizing a metric between groups), or for flags describing some
+ * shared resource (which would be shared between groups).
+ */
+#define SDF_NEEDS_GROUPS 0x4
+
+/*
+ * Balance when about to become idle
+ *
+ * SHARED_CHILD: Set from the base domain up to cpuset.sched_relax_domain_level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_NEWIDLE, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Balance on exec
+ *
+ * SHARED_CHILD: Set from the base domain up to the NUMA reclaim level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_EXEC, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Balance on fork, clone
+ *
+ * SHARED_CHILD: Set from the base domain up to the NUMA reclaim level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_FORK, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Balance on wakeup
+ *
+ * SHARED_CHILD: Set from the base domain up to cpuset.sched_relax_domain_level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_WAKE, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Consider waking task on waking CPU.
+ *
+ * SHARED_CHILD: Set from the base domain up to the NUMA reclaim level.
+ */
+SD_FLAG(SD_WAKE_AFFINE, SDF_SHARED_CHILD)
+
+/*
+ * Domain members have different CPU capacities
+ *
+ * SHARED_PARENT: Set from the topmost domain down to the first domain where
+ * asymmetry is detected.
+ * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups.
+ */
+SD_FLAG(SD_ASYM_CPUCAPACITY, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members have different CPU capacities spanning all unique CPU
+ * capacity values.
+ *
+ * SHARED_PARENT: Set from the topmost domain down to the first domain where
+ * all available CPU capacities are visible
+ * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups.
+ */
+SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members share CPU capacity (i.e. SMT)
+ *
+ * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
+ * CPU capacity.
+ * NEEDS_GROUPS: Capacity is shared between groups.
+ */
+SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members share CPU package resources (i.e. caches)
+ *
+ * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
+ * the same cache(s).
+ * NEEDS_GROUPS: Caches are shared between groups.
+ */
+SD_FLAG(SD_SHARE_PKG_RESOURCES, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Only a single load balancing instance
+ *
+ * SHARED_PARENT: Set for all NUMA levels above NODE. Could be set from a
+ * different level upwards, but it doesn't change that if a
+ * domain has this flag set, then all of its parents need to have
+ * it too (otherwise the serialization doesn't make sense).
+ * NEEDS_GROUPS: No point in preserving domain if it has a single group.
+ */
+SD_FLAG(SD_SERIALIZE, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Place busy tasks earlier in the domain
+ *
+ * SHARED_CHILD: Usually set on the SMT level. Technically could be set further
+ * up, but currently assumed to be set from the base domain
+ * upwards (see update_top_cache_domain()).
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_ASYM_PACKING, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Prefer to place tasks in a sibling domain
+ *
+ * Set up until domains start spanning NUMA nodes. Close to being a SHARED_CHILD
+ * flag, but cleared below domains with SD_ASYM_CPUCAPACITY.
+ *
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS)
+
+/*
+ * sched_groups of this level overlap
+ *
+ * SHARED_PARENT: Set for all NUMA levels above NODE.
+ * NEEDS_GROUPS: Overlaps can only exist with more than one group.
+ */
+SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Cross-node balancing
+ *
+ * SHARED_PARENT: Set for all NUMA levels above NODE.
+ * NEEDS_GROUPS: No point in preserving domain if it has a single group.
+ */
+SD_FLAG(SD_NUMA, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 88050259c466..20099268fa25 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -10,6 +10,8 @@
#include <linux/cred.h>
#include <linux/refcount.h>
#include <linux/posix-timers.h>
+#include <linux/mm_types.h>
+#include <asm/ptrace.h>
/*
* Types defining task->signal and task->sighand and APIs using them:
@@ -70,6 +72,17 @@ struct multiprocess_signals {
struct hlist_node node;
};
+struct core_thread {
+ struct task_struct *task;
+ struct core_thread *next;
+};
+
+struct core_state {
+ atomic_t nr_threads;
+ struct core_thread dumper;
+ struct completion startup;
+};
+
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
@@ -81,6 +94,7 @@ struct signal_struct {
refcount_t sigcnt;
atomic_t live;
int nr_threads;
+ int quick_threads;
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
@@ -96,18 +110,16 @@ struct signal_struct {
/* thread group exit support */
int group_exit_code;
- /* overloaded:
- * - notify group_exit_task when ->count is equal to notify_count
- * - everyone except group_exit_task is stopped during signal delivery
- * of fatal signals, group_exit_task processes the signal.
- */
+ /* notify group_exec_task when notify_count is less or equal to 0 */
int notify_count;
- struct task_struct *group_exit_task;
+ struct task_struct *group_exec_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
unsigned int flags; /* see SIGNAL_* flags below */
+ struct core_state *core_state; /* coredumping support */
+
/*
* PR_SET_CHILD_SUBREAPER marks a process, like a service
* manager, to re-parent orphan (double-forking) child processes
@@ -224,7 +236,15 @@ struct signal_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
- * (notably. ptrace) */
+ * (notably. ptrace)
+ * Deprecated do not use in new code.
+ * Use exec_update_lock instead.
+ */
+ struct rw_semaphore exec_update_lock; /* Held while task_struct is
+ * being updated during exec,
+ * and may have inconsistent
+ * permissions.
+ */
} __randomize_layout;
/*
@@ -233,7 +253,6 @@ struct signal_struct {
#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
-#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
/*
* Pending notifications to parent.
*/
@@ -249,31 +268,25 @@ struct signal_struct {
static inline void signal_set_stop_flags(struct signal_struct *sig,
unsigned int flags)
{
- WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
}
-/* If true, all threads except ->group_exit_task have pending SIGKILL */
-static inline int signal_group_exit(const struct signal_struct *sig)
-{
- return (sig->flags & SIGNAL_GROUP_EXIT) ||
- (sig->group_exit_task != NULL);
-}
-
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task,
- sigset_t *mask, kernel_siginfo_t *info);
+extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
+ kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
struct task_struct *task = current;
kernel_siginfo_t __info;
+ enum pid_type __type;
int ret;
spin_lock_irq(&task->sighand->siglock);
- ret = dequeue_signal(task, &task->blocked, &__info);
+ ret = dequeue_signal(task, &task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;
@@ -282,17 +295,14 @@ static inline int kernel_dequeue_signal(void)
static inline void kernel_signal_stop(void)
{
spin_lock_irq(&current->sighand->siglock);
- if (current->jobctl & JOBCTL_STOP_DEQUEUED)
+ if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
+ current->jobctl |= JOBCTL_STOPPED;
set_special_state(TASK_STOPPED);
+ }
spin_unlock_irq(&current->sighand->siglock);
schedule();
}
-#ifdef __ARCH_SI_TRAPNO
-# define ___ARCH_SI_TRAPNO(_a1) , _a1
-#else
-# define ___ARCH_SI_TRAPNO(_a1)
-#endif
#ifdef __ia64__
# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
#else
@@ -300,14 +310,11 @@ static inline void kernel_signal_stop(void)
#endif
int force_sig_fault_to_task(int sig, int code, void __user *addr
- ___ARCH_SI_TRAPNO(int trapno)
___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
, struct task_struct *t);
int force_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_TRAPNO(int trapno)
___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
int send_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_TRAPNO(int trapno)
___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
, struct task_struct *t);
@@ -316,8 +323,13 @@ int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
int force_sig_pkuerr(void __user *addr, u32 pkey);
+int send_sig_perf(void __user *addr, u32 type, u64 sig_data);
int force_sig_ptrace_errno_trap(int errno, void __user *addr);
+int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
+int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
+ struct task_struct *t);
+int force_sig_seccomp(int syscall, int reason, bool force_coredump);
extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern void force_sigsegv(int sig);
@@ -331,6 +343,8 @@ extern int kill_pid(struct pid *pid, int sig, int priv);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int);
+extern void force_fatal_sig(int);
+extern void force_exit_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
@@ -338,17 +352,55 @@ extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+static inline void clear_notify_signal(void)
+{
+ clear_thread_flag(TIF_NOTIFY_SIGNAL);
+ smp_mb__after_atomic();
+}
+
+/*
+ * Returns 'true' if kick_process() is needed to force a transition from
+ * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work.
+ */
+static inline bool __set_notify_signal(struct task_struct *task)
+{
+ return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
+ !wake_up_state(task, TASK_INTERRUPTIBLE);
+}
+
+/*
+ * Called to break out of interruptible wait loops, and enter the
+ * exit_to_user_mode_loop().
+ */
+static inline void set_notify_signal(struct task_struct *task)
+{
+ if (__set_notify_signal(task))
+ kick_process(task);
+}
+
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
return -ERESTARTNOINTR;
}
-static inline int signal_pending(struct task_struct *p)
+static inline int task_sigpending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
+static inline int signal_pending(struct task_struct *p)
+{
+ /*
+ * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
+ * behavior in terms of ensuring that we break out of wait loops
+ * so that notify signal callbacks can be processed.
+ */
+ if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
+ return 1;
+ return task_sigpending(p);
+}
+
static inline int __fatal_signal_pending(struct task_struct *p)
{
return unlikely(sigismember(&p->pending.signal, SIGKILL));
@@ -356,10 +408,10 @@ static inline int __fatal_signal_pending(struct task_struct *p)
static inline int fatal_signal_pending(struct task_struct *p)
{
- return signal_pending(p) && __fatal_signal_pending(p);
+ return task_sigpending(p) && __fatal_signal_pending(p);
}
-static inline int signal_pending_state(long state, struct task_struct *p)
+static inline int signal_pending_state(unsigned int state, struct task_struct *p)
{
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
return 0;
@@ -370,6 +422,20 @@ static inline int signal_pending_state(long state, struct task_struct *p)
}
/*
+ * This should only be used in fault handlers to decide whether we
+ * should stop the current fault routine to handle the signals
+ * instead, especially with the case where we've got interrupted with
+ * a VM_FAULT_RETRY.
+ */
+static inline bool fault_signal_pending(vm_fault_t fault_flags,
+ struct pt_regs *regs)
+{
+ return unlikely((fault_flags & VM_FAULT_RETRY) &&
+ (fatal_signal_pending(current) ||
+ (user_mode(regs) && signal_pending(current))));
+}
+
+/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
* This is required every time the blocked sigset_t changes.
@@ -381,13 +447,23 @@ extern void calculate_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
-static inline void signal_wake_up(struct task_struct *t, bool resume)
+static inline void signal_wake_up(struct task_struct *t, bool fatal)
{
- signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+ unsigned int state = 0;
+ if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
+ t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
+ state = TASK_WAKEKILL | __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
- signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+ unsigned int state = 0;
+ if (resume) {
+ t->jobctl &= ~JOBCTL_TRACED;
+ state = __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
void task_join_group_stop(struct task_struct *task);
@@ -479,7 +555,7 @@ extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
static inline void restore_saved_sigmask_unless(bool interrupted)
{
if (interrupted)
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+ WARN_ON(!signal_pending(current));
else
restore_saved_sigmask();
}
@@ -501,6 +577,17 @@ static inline int kill_cad_pid(int sig, int priv)
#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
+static inline int __on_sig_stack(unsigned long sp)
+{
+#ifdef CONFIG_STACK_GROWSUP
+ return sp >= current->sas_ss_sp &&
+ sp - current->sas_ss_sp < current->sas_ss_size;
+#else
+ return sp > current->sas_ss_sp &&
+ sp - current->sas_ss_sp <= current->sas_ss_size;
+#endif
+}
+
/*
* True if we are on the alternate signal stack.
*/
@@ -518,13 +605,7 @@ static inline int on_sig_stack(unsigned long sp)
if (current->sas_ss_flags & SS_AUTODISARM)
return 0;
-#ifdef CONFIG_STACK_GROWSUP
- return sp >= current->sas_ss_sp &&
- sp - current->sas_ss_sp < current->sas_ss_size;
-#else
- return sp > current->sas_ss_sp &&
- sp - current->sas_ss_sp <= current->sas_ss_size;
-#endif
+ return __on_sig_stack(sp);
}
static inline int sas_ss_flags(unsigned long sp)
@@ -631,17 +712,6 @@ static inline bool thread_group_leader(struct task_struct *p)
return p->exit_signal >= 0;
}
-/* Do to the insanities of de_thread it is possible for a process
- * to have the pid of the thread group leader without actually being
- * the thread group leader. For iteration through the pids in proc
- * all we care about is that we have a task with the appropriate
- * pid, we don't actually care if we have the right task.
- */
-static inline bool has_group_leader_pid(struct task_struct *p)
-{
- return task_pid(p) == task_tgid(p);
-}
-
static inline
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
@@ -662,6 +732,8 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
+extern bool thread_group_exited(struct pid *pid);
+
extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
unsigned long *flags);
@@ -681,6 +753,12 @@ static inline void unlock_task_sighand(struct task_struct *task,
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_task_sighand_held(struct task_struct *task);
+#else
+static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
+#endif
+
static inline unsigned long task_rlimit(const struct task_struct *task,
unsigned int limit)
{
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index 568286411b43..0108a38bb64d 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -3,6 +3,7 @@
#define _LINUX_SCHED_STAT_H
#include <linux/percpu.h>
+#include <linux/kconfig.h>
/*
* Various counters maintained by the scheduler and fork(),
@@ -16,21 +17,14 @@ extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
-extern unsigned long nr_running(void);
+extern unsigned int nr_running(void);
extern bool single_task_running(void);
-extern unsigned long nr_iowait(void);
-extern unsigned long nr_iowait_cpu(int cpu);
+extern unsigned int nr_iowait(void);
+extern unsigned int nr_iowait_cpu(int cpu);
static inline int sched_info_on(void)
{
-#ifdef CONFIG_SCHEDSTATS
- return 1;
-#elif defined(CONFIG_TASK_DELAY_ACCT)
- extern int delayacct_on;
- return delayacct_on;
-#else
- return 0;
-#endif
+ return IS_ENABLED(CONFIG_SCHED_INFO);
}
#ifdef CONFIG_SCHEDSTATS
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index d4f6215ee03f..303ee7dd0c7e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -7,98 +7,32 @@
struct ctl_table;
#ifdef CONFIG_DETECT_HUNG_TASK
-extern int sysctl_hung_task_check_count;
-extern unsigned int sysctl_hung_task_panic;
+/* used for hung_task and block/ */
extern unsigned long sysctl_hung_task_timeout_secs;
-extern unsigned long sysctl_hung_task_check_interval_secs;
-extern int sysctl_hung_task_warnings;
-extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos);
#else
/* Avoid need for ifdefs elsewhere in the code */
enum { sysctl_hung_task_timeout_secs = 0 };
#endif
-extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
-extern unsigned int sysctl_sched_wakeup_granularity;
-extern unsigned int sysctl_sched_child_runs_first;
-
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
SCHED_TUNABLESCALING_LINEAR,
SCHED_TUNABLESCALING_END,
};
-extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
-
-extern unsigned int sysctl_numa_balancing_scan_delay;
-extern unsigned int sysctl_numa_balancing_scan_period_min;
-extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_size;
-
-#ifdef CONFIG_SCHED_DEBUG
-extern __read_mostly unsigned int sysctl_sched_migration_cost;
-extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-
-int sched_proc_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length,
- loff_t *ppos);
-#endif
-
-/*
- * control realtime throttling:
- *
- * /proc/sys/kernel/sched_rt_period_us
- * /proc/sys/kernel/sched_rt_runtime_us
- */
-extern unsigned int sysctl_sched_rt_period;
-extern int sysctl_sched_rt_runtime;
-
-#ifdef CONFIG_UCLAMP_TASK
-extern unsigned int sysctl_sched_uclamp_util_min;
-extern unsigned int sysctl_sched_uclamp_util_max;
-#endif
-
-#ifdef CONFIG_CFS_BANDWIDTH
-extern unsigned int sysctl_sched_cfs_bandwidth_slice;
-#endif
-
-#ifdef CONFIG_SCHED_AUTOGROUP
-extern unsigned int sysctl_sched_autogroup_enabled;
-#endif
-
-extern int sysctl_sched_rr_timeslice;
-extern int sched_rr_timeslice;
-
-extern int sched_rr_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-extern int sched_rt_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+#define NUMA_BALANCING_DISABLED 0x0
+#define NUMA_BALANCING_NORMAL 0x1
+#define NUMA_BALANCING_MEMORY_TIERING 0x2
-#ifdef CONFIG_UCLAMP_TASK
-extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+#ifdef CONFIG_NUMA_BALANCING
+extern int sysctl_numa_balancing_mode;
+extern unsigned int sysctl_numa_balancing_promote_rate_limit;
+#else
+#define sysctl_numa_balancing_mode 0
#endif
-extern int sysctl_numa_balancing(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-
-extern int sysctl_schedstats(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-extern unsigned int sysctl_sched_energy_aware;
-extern int sched_energy_aware_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-#endif
+int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
#endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index f1879884238e..d6c48163c6de 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -13,6 +13,7 @@
struct task_struct;
struct rusage;
union thread_union;
+struct css_set;
/* All the bits taken by the old clone syscall. */
#define CLONE_LEGACY_FLAGS 0xffffffffULL
@@ -29,6 +30,14 @@ struct kernel_clone_args {
pid_t *set_tid;
/* Number of elements in *set_tid */
size_t set_tid_size;
+ int cgroup;
+ int io_thread;
+ int kthread;
+ int idle;
+ int (*fn)(void *);
+ void *fn_arg;
+ struct cgroup *cgrp;
+ struct css_set *cset;
};
/*
@@ -43,17 +52,18 @@ extern spinlock_t mmlist_lock;
extern union thread_union init_thread_union;
extern struct task_struct init_task;
-#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
-#endif /* #ifdef CONFIG_PROVE_RCU */
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
+extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
+void __noreturn make_task_dead(int signr);
extern void proc_caches_init(void);
@@ -61,22 +71,8 @@ extern void fork_init(void);
extern void release_task(struct task_struct * p);
-#ifdef CONFIG_HAVE_COPY_THREAD_TLS
-extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
- struct task_struct *, unsigned long);
-#else
-extern int copy_thread(unsigned long, unsigned long, unsigned long,
- struct task_struct *);
-
-/* Architectures that haven't opted into copy_thread_tls get the tls argument
- * via pt_regs, so ignore the tls argument passed via C. */
-static inline int copy_thread_tls(
- unsigned long clone_flags, unsigned long sp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
-{
- return copy_thread(clone_flags, sp, arg, p);
-}
-#endif
+extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
+
extern void flush_thread(void);
#ifdef CONFIG_HAVE_EXIT_THREAD
@@ -86,18 +82,19 @@ static inline void exit_thread(struct task_struct *tsk)
{
}
#endif
-extern void do_group_exit(int);
+extern __noreturn void do_group_exit(int);
extern void exit_files(struct task_struct *);
-extern void exit_itimers(struct signal_struct *);
+extern void exit_itimers(struct task_struct *);
-extern long _do_fork(struct kernel_clone_args *kargs);
-extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs);
-extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
+extern pid_t kernel_clone(struct kernel_clone_args *kargs);
+struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
struct task_struct *fork_idle(int);
struct mm_struct *copy_init_mm(void);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
+int kernel_wait(pid_t pid, int *stat);
extern void free_task(struct task_struct *tsk);
@@ -122,8 +119,17 @@ static inline void put_task_struct(struct task_struct *t)
__put_task_struct(t);
}
+static inline void put_task_struct_many(struct task_struct *t, int nr)
+{
+ if (refcount_sub_and_test(nr, &t->usage))
+ __put_task_struct(t);
+}
+
void put_task_struct_rcu_user(struct task_struct *task);
+/* Free all architecture-specific resources held by a thread. */
+void release_thread(struct task_struct *dead_task);
+
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
#else
@@ -160,7 +166,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
- * ->cgroup.subsys[]. And ->vfork_done.
+ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
diff --git a/include/linux/sched/task_flags.h b/include/linux/sched/task_flags.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/task_flags.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index 2413427e439c..5e799a47431e 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -16,7 +16,7 @@
* try_get_task_stack() instead. task_stack_page will return a pointer
* that could get freed out from under you.
*/
-static inline void *task_stack_page(const struct task_struct *task)
+static __always_inline void *task_stack_page(const struct task_struct *task)
{
return task->stack;
}
@@ -25,7 +25,11 @@ static inline void *task_stack_page(const struct task_struct *task)
static inline unsigned long *end_of_stack(const struct task_struct *task)
{
+#ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
+#else
return task->stack;
+#endif
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
@@ -75,6 +79,8 @@ static inline void *try_get_task_stack(struct task_struct *tsk)
static inline void put_task_stack(struct task_struct *tsk) {}
#endif
+void exit_task_stack_account(struct task_struct *tsk);
+
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
diff --git a/include/linux/sched/thread_info_api.h b/include/linux/sched/thread_info_api.h
new file mode 100644
index 000000000000..2c60fbc16c08
--- /dev/null
+++ b/include/linux/sched/thread_info_api.h
@@ -0,0 +1 @@
+#include <linux/thread_info.h>
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index f341163fedc9..816df6cc444e 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -11,21 +11,29 @@
*/
#ifdef CONFIG_SMP
-#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
-#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
-#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
-#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
-#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
-#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
-#define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */
-#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */
-#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
-#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */
-#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
-#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
-#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
-#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
-#define SD_NUMA 0x4000 /* cross-node balancing */
+/* Generate SD flag indexes */
+#define SD_FLAG(name, mflags) __##name,
+enum {
+ #include <linux/sched/sd_flags.h>
+ __SD_FLAG_CNT,
+};
+#undef SD_FLAG
+/* Generate SD flag bits */
+#define SD_FLAG(name, mflags) name = 1 << __##name,
+enum {
+ #include <linux/sched/sd_flags.h>
+};
+#undef SD_FLAG
+
+#ifdef CONFIG_SCHED_DEBUG
+
+struct sd_flag_debug {
+ unsigned int meta_flags;
+ char *name;
+};
+extern const struct sd_flag_debug sd_flag_debug[];
+
+#endif
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
@@ -34,6 +42,13 @@ static inline int cpu_smt_flags(void)
}
#endif
+#ifdef CONFIG_SCHED_CLUSTER
+static inline int cpu_cluster_flags(void)
+{
+ return SD_SHARE_PKG_RESOURCES;
+}
+#endif
+
#ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void)
{
@@ -66,6 +81,7 @@ struct sched_domain_shared {
atomic_t ref;
atomic_t nr_busy_cpus;
int has_idle_cores;
+ int nr_idle_scan;
};
struct sched_domain {
@@ -78,6 +94,7 @@ struct sched_domain {
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
+ unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
@@ -90,7 +107,7 @@ struct sched_domain {
/* idle_balance() stats */
u64 max_newidle_lb_cost;
- unsigned long next_decay_max_lb_cost;
+ unsigned long last_decay_max_lb_cost;
u64 avg_scan_cost; /* select_idle_sibling */
@@ -142,7 +159,7 @@ struct sched_domain {
* by attaching extra space to the end of the structure,
* depending on how many CPUs the kernel has booted up with)
*/
- unsigned long span[0];
+ unsigned long span[];
};
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
@@ -217,7 +234,25 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
#endif /* !CONFIG_SMP */
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+extern void rebuild_sched_domains_energy(void);
+#else
+static inline void rebuild_sched_domains_energy(void)
+{
+}
+#endif
+
#ifndef arch_scale_cpu_capacity
+/**
+ * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
+ * @cpu: the CPU in question.
+ *
+ * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
+ *
+ * max_perf(cpu)
+ * ----------------------------- * SCHED_CAPACITY_SCALE
+ * max(max_perf(c) : c \in CPUs)
+ */
static __always_inline
unsigned long arch_scale_cpu_capacity(int cpu)
{
@@ -225,6 +260,21 @@ unsigned long arch_scale_cpu_capacity(int cpu)
}
#endif
+#ifndef arch_scale_thermal_pressure
+static __always_inline
+unsigned long arch_scale_thermal_pressure(int cpu)
+{
+ return 0;
+}
+#endif
+
+#ifndef arch_update_thermal_pressure
+static __always_inline
+void arch_update_thermal_pressure(const struct cpumask *cpus,
+ unsigned long capped_frequency)
+{ }
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 917d88edb7b9..f054d0360a75 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
#include <linux/uidgid.h>
#include <linux/atomic.h>
+#include <linux/percpu_counter.h>
#include <linux/refcount.h>
#include <linux/ratelimit.h>
@@ -12,19 +13,9 @@
*/
struct user_struct {
refcount_t __count; /* reference count */
- atomic_t processes; /* How many processes does this user have? */
- atomic_t sigpending; /* How many pending signals does this user have? */
-#ifdef CONFIG_FANOTIFY
- atomic_t fanotify_listeners;
-#endif
#ifdef CONFIG_EPOLL
- atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
-#endif
-#ifdef CONFIG_POSIX_MQUEUE
- /* protected by mq_lock */
- unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
+ struct percpu_counter epoll_watches; /* The number of file descriptors currently watched */
#endif
- unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
@@ -33,9 +24,13 @@ struct user_struct {
kuid_t uid;
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \
- defined(CONFIG_NET) || defined(CONFIG_IO_URING)
+ defined(CONFIG_NET) || defined(CONFIG_IO_URING) || \
+ defined(CONFIG_VFIO_PCI_ZDEV_KVM)
atomic_long_t locked_vm;
#endif
+#ifdef CONFIG_WATCH_QUEUE
+ atomic_t nr_watches; /* The number of watches this user currently has */
+#endif
/* Miscellaneous per-user rate limit */
struct ratelimit_state ratelimit;
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 26a2013ac39c..06cd8fb2f409 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -42,8 +42,11 @@ struct wake_q_head {
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
-#define DEFINE_WAKE_Q(name) \
- struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+#define WAKE_Q_HEAD_INITIALIZER(name) \
+ { WAKE_Q_TAIL, &name.first }
+
+#define DEFINE_WAKE_Q(name) \
+ struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name)
static inline void wake_q_init(struct wake_q_head *head)
{
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 0bb04a96a6d4..cb41c5edb4d4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -5,7 +5,37 @@
#ifndef LINUX_SCHED_CLOCK
#define LINUX_SCHED_CLOCK
+#include <linux/types.h>
+
#ifdef CONFIG_GENERIC_SCHED_CLOCK
+/**
+ * struct clock_read_data - data required to read from sched_clock()
+ *
+ * @epoch_ns: sched_clock() value at last update
+ * @epoch_cyc: Clock cycle value at last update.
+ * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
+ * clocks.
+ * @read_sched_clock: Current clock source (or dummy source when suspended).
+ * @mult: Multiplier for scaled math conversion.
+ * @shift: Shift value for scaled math conversion.
+ *
+ * Care must be taken when updating this structure; it is read by
+ * some very hot code paths. It occupies <=40 bytes and, when combined
+ * with the seqcount used to synchronize access, comfortably fits into
+ * a 64 byte cache line.
+ */
+struct clock_read_data {
+ u64 epoch_ns;
+ u64 epoch_cyc;
+ u64 sched_clock_mask;
+ u64 (*read_sched_clock)(void);
+ u32 mult;
+ u32 shift;
+};
+
+extern struct clock_read_data *sched_clock_read_begin(unsigned int *seq);
+extern int sched_clock_read_retry(unsigned int seq);
+
extern void generic_sched_clock_init(void);
extern void sched_clock_register(u64 (*read)(void), int bits,
diff --git a/include/linux/scif.h b/include/linux/scif.h
deleted file mode 100644
index eeb250b73c4b..000000000000
--- a/include/linux/scif.h
+++ /dev/null
@@ -1,1339 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel SCIF driver.
- *
- */
-#ifndef __SCIF_H__
-#define __SCIF_H__
-
-#include <linux/types.h>
-#include <linux/poll.h>
-#include <linux/device.h>
-#include <linux/scif_ioctl.h>
-
-#define SCIF_ACCEPT_SYNC 1
-#define SCIF_SEND_BLOCK 1
-#define SCIF_RECV_BLOCK 1
-
-enum {
- SCIF_PROT_READ = (1 << 0),
- SCIF_PROT_WRITE = (1 << 1)
-};
-
-enum {
- SCIF_MAP_FIXED = 0x10,
- SCIF_MAP_KERNEL = 0x20,
-};
-
-enum {
- SCIF_FENCE_INIT_SELF = (1 << 0),
- SCIF_FENCE_INIT_PEER = (1 << 1),
- SCIF_SIGNAL_LOCAL = (1 << 4),
- SCIF_SIGNAL_REMOTE = (1 << 5)
-};
-
-enum {
- SCIF_RMA_USECPU = (1 << 0),
- SCIF_RMA_USECACHE = (1 << 1),
- SCIF_RMA_SYNC = (1 << 2),
- SCIF_RMA_ORDERED = (1 << 3)
-};
-
-/* End of SCIF Admin Reserved Ports */
-#define SCIF_ADMIN_PORT_END 1024
-
-/* End of SCIF Reserved Ports */
-#define SCIF_PORT_RSVD 1088
-
-typedef struct scif_endpt *scif_epd_t;
-typedef struct scif_pinned_pages *scif_pinned_pages_t;
-
-/**
- * struct scif_range - SCIF registered range used in kernel mode
- * @cookie: cookie used internally by SCIF
- * @nr_pages: number of pages of PAGE_SIZE
- * @prot_flags: R/W protection
- * @phys_addr: Array of bus addresses
- * @va: Array of kernel virtual addresses backed by the pages in the phys_addr
- * array. The va is populated only when called on the host for a remote
- * SCIF connection on MIC. This is required to support the use case of DMA
- * between MIC and another device which is not a SCIF node e.g., an IB or
- * ethernet NIC.
- */
-struct scif_range {
- void *cookie;
- int nr_pages;
- int prot_flags;
- dma_addr_t *phys_addr;
- void __iomem **va;
-};
-
-/**
- * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll
- * @epd: SCIF endpoint
- * @events: requested events
- * @revents: returned events
- */
-struct scif_pollepd {
- scif_epd_t epd;
- __poll_t events;
- __poll_t revents;
-};
-
-/**
- * scif_peer_dev - representation of a peer SCIF device
- *
- * Peer devices show up as PCIe devices for the mgmt node but not the cards.
- * The mgmt node discovers all the cards on the PCIe bus and informs the other
- * cards about their peers. Upon notification of a peer a node adds a peer
- * device to the peer bus to maintain symmetry in the way devices are
- * discovered across all nodes in the SCIF network.
- *
- * @dev: underlying device
- * @dnode - The destination node which this device will communicate with.
- */
-struct scif_peer_dev {
- struct device dev;
- u8 dnode;
-};
-
-/**
- * scif_client - representation of a SCIF client
- * @name: client name
- * @probe - client method called when a peer device is registered
- * @remove - client method called when a peer device is unregistered
- * @si - subsys_interface used internally for implementing SCIF clients
- */
-struct scif_client {
- const char *name;
- void (*probe)(struct scif_peer_dev *spdev);
- void (*remove)(struct scif_peer_dev *spdev);
- struct subsys_interface si;
-};
-
-#define SCIF_OPEN_FAILED ((scif_epd_t)-1)
-#define SCIF_REGISTER_FAILED ((off_t)-1)
-#define SCIF_MMAP_FAILED ((void *)-1)
-
-/**
- * scif_open() - Create an endpoint
- *
- * Return:
- * Upon successful completion, scif_open() returns an endpoint descriptor to
- * be used in subsequent SCIF functions calls to refer to that endpoint;
- * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is
- * returned and errno is set to indicate the error; in kernel mode a NULL
- * scif_epd_t is returned.
- *
- * Errors:
- * ENOMEM - Insufficient kernel memory was available
- */
-scif_epd_t scif_open(void);
-
-/**
- * scif_bind() - Bind an endpoint to a port
- * @epd: endpoint descriptor
- * @pn: port number
- *
- * scif_bind() binds endpoint epd to port pn, where pn is a port number on the
- * local node. If pn is zero, a port number greater than or equal to
- * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to
- * exactly one local port. Ports less than 1024 when requested can only be bound
- * by system (or root) processes or by processes executed by privileged users.
- *
- * Return:
- * Upon successful completion, scif_bind() returns the port number to which epd
- * is bound; otherwise in user mode -1 is returned and errno is set to
- * indicate the error; in kernel mode the negative of one of the following
- * errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * EINVAL - the endpoint or the port is already bound
- * EISCONN - The endpoint is already connected
- * ENOSPC - No port number available for assignment
- * EACCES - The port requested is protected and the user is not the superuser
- */
-int scif_bind(scif_epd_t epd, u16 pn);
-
-/**
- * scif_listen() - Listen for connections on an endpoint
- * @epd: endpoint descriptor
- * @backlog: maximum pending connection requests
- *
- * scif_listen() marks the endpoint epd as a listening endpoint - that is, as
- * an endpoint that will be used to accept incoming connection requests. Once
- * so marked, the endpoint is said to be in the listening state and may not be
- * used as the endpoint of a connection.
- *
- * The endpoint, epd, must have been bound to a port.
- *
- * The backlog argument defines the maximum length to which the queue of
- * pending connections for epd may grow. If a connection request arrives when
- * the queue is full, the client may receive an error with an indication that
- * the connection was refused.
- *
- * Return:
- * Upon successful completion, scif_listen() returns 0; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * EINVAL - the endpoint is not bound to a port
- * EISCONN - The endpoint is already connected or listening
- */
-int scif_listen(scif_epd_t epd, int backlog);
-
-/**
- * scif_connect() - Initiate a connection on a port
- * @epd: endpoint descriptor
- * @dst: global id of port to which to connect
- *
- * The scif_connect() function requests the connection of endpoint epd to remote
- * port dst. If the connection is successful, a peer endpoint, bound to dst, is
- * created on node dst.node. On successful return, the connection is complete.
- *
- * If the endpoint epd has not already been bound to a port, scif_connect()
- * will bind it to an unused local port.
- *
- * A connection is terminated when an endpoint of the connection is closed,
- * either explicitly by scif_close(), or when a process that owns one of the
- * endpoints of the connection is terminated.
- *
- * In user space, scif_connect() supports an asynchronous connection mode
- * if the application has set the O_NONBLOCK flag on the endpoint via the
- * fcntl() system call. Setting this flag will result in the calling process
- * not to wait during scif_connect().
- *
- * Return:
- * Upon successful completion, scif_connect() returns the port ID to which the
- * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is
- * set to indicate the error; in kernel mode the negative of one of the
- * following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNREFUSED - The destination was not listening for connections or refused
- * the connection request
- * EINVAL - dst.port is not a valid port ID
- * EISCONN - The endpoint is already connected
- * ENOMEM - No buffer space is available
- * ENODEV - The destination node does not exist, or the node is lost or existed,
- * but is not currently in the network since it may have crashed
- * ENOSPC - No port number available for assignment
- * EOPNOTSUPP - The endpoint is listening and cannot be connected
- */
-int scif_connect(scif_epd_t epd, struct scif_port_id *dst);
-
-/**
- * scif_accept() - Accept a connection on an endpoint
- * @epd: endpoint descriptor
- * @peer: global id of port to which connected
- * @newepd: new connected endpoint descriptor
- * @flags: flags
- *
- * The scif_accept() call extracts the first connection request from the queue
- * of pending connections for the port on which epd is listening. scif_accept()
- * creates a new endpoint, bound to the same port as epd, and allocates a new
- * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new
- * endpoint is connected to the endpoint through which the connection was
- * requested. epd is unaffected by this call, and remains in the listening
- * state.
- *
- * On successful return, peer holds the global port identifier (node id and
- * local port number) of the port which requested the connection.
- *
- * A connection is terminated when an endpoint of the connection is closed,
- * either explicitly by scif_close(), or when a process that owns one of the
- * endpoints of the connection is terminated.
- *
- * The number of connections that can (subsequently) be accepted on epd is only
- * limited by system resources (memory).
- *
- * The flags argument is formed by OR'ing together zero or more of the
- * following values.
- * SCIF_ACCEPT_SYNC - block until a connection request is presented. If
- * SCIF_ACCEPT_SYNC is not in flags, and no pending
- * connections are present on the queue, scif_accept()
- * fails with an EAGAIN error
- *
- * In user mode, the select() and poll() functions can be used to determine
- * when there is a connection request. In kernel mode, the scif_poll()
- * function may be used for this purpose. A readable event will be delivered
- * when a connection is requested.
- *
- * Return:
- * Upon successful completion, scif_accept() returns 0; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be
- * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete
- * its connection request
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * EINTR - Interrupted function
- * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is
- * NULL, or newepd is NULL
- * ENODEV - The requesting node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOENT - Secondary part of epd registration failed
- */
-int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t
- *newepd, int flags);
-
-/**
- * scif_close() - Close an endpoint
- * @epd: endpoint descriptor
- *
- * scif_close() closes an endpoint and performs necessary teardown of
- * facilities associated with that endpoint.
- *
- * If epd is a listening endpoint then it will no longer accept connection
- * requests on the port to which it is bound. Any pending connection requests
- * are rejected.
- *
- * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs
- * which are in-process through epd or its peer endpoint will complete before
- * scif_close() returns. Registered windows of the local and peer endpoints are
- * released as if scif_unregister() was called against each window.
- *
- * Closing a SCIF endpoint does not affect local registered memory mapped by
- * a SCIF endpoint on a remote node. The local memory remains mapped by the peer
- * SCIF endpoint explicitly removed by calling munmap(..) by the peer.
- *
- * If the peer endpoint's receive queue is not empty at the time that epd is
- * closed, then the peer endpoint can be passed as the endpoint parameter to
- * scif_recv() until the receive queue is empty.
- *
- * epd is freed and may no longer be accessed.
- *
- * Return:
- * Upon successful completion, scif_close() returns 0; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- */
-int scif_close(scif_epd_t epd);
-
-/**
- * scif_send() - Send a message
- * @epd: endpoint descriptor
- * @msg: message buffer address
- * @len: message length
- * @flags: blocking mode flags
- *
- * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data
- * are copied from memory starting at address msg. On successful execution the
- * return value of scif_send() is the number of bytes that were sent, and is
- * zero if no bytes were sent because len was zero. scif_send() may be called
- * only when the endpoint is in a connected state.
- *
- * If a scif_send() call is non-blocking, then it sends only those bytes which
- * can be sent without waiting, up to a maximum of len bytes.
- *
- * If a scif_send() call is blocking, then it normally returns after sending
- * all len bytes. If a blocking call is interrupted or the connection is
- * reset, the call is considered successful if some bytes were sent or len is
- * zero, otherwise the call is considered unsuccessful.
- *
- * In user mode, the select() and poll() functions can be used to determine
- * when the send queue is not full. In kernel mode, the scif_poll() function
- * may be used for this purpose.
- *
- * It is recommended that scif_send()/scif_recv() only be used for short
- * control-type message communication between SCIF endpoints. The SCIF RMA
- * APIs are expected to provide better performance for transfer sizes of
- * 1024 bytes or longer for the current MIC hardware and software
- * implementation.
- *
- * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK
- * is passed as the flags argument.
- *
- * Return:
- * Upon successful completion, scif_send() returns the number of bytes sent;
- * otherwise in user mode -1 is returned and errno is set to indicate the
- * error; in kernel mode the negative of one of the following errors is
- * returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid, or len is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN - The endpoint is not connected
- */
-int scif_send(scif_epd_t epd, void *msg, int len, int flags);
-
-/**
- * scif_recv() - Receive a message
- * @epd: endpoint descriptor
- * @msg: message buffer address
- * @len: message buffer length
- * @flags: blocking mode flags
- *
- * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of
- * data are copied to memory starting at address msg. On successful execution
- * the return value of scif_recv() is the number of bytes that were received,
- * and is zero if no bytes were received because len was zero. scif_recv() may
- * be called only when the endpoint is in a connected state.
- *
- * If a scif_recv() call is non-blocking, then it receives only those bytes
- * which can be received without waiting, up to a maximum of len bytes.
- *
- * If a scif_recv() call is blocking, then it normally returns after receiving
- * all len bytes. If the blocking call was interrupted due to a disconnection,
- * subsequent calls to scif_recv() will copy all bytes received upto the point
- * of disconnection.
- *
- * In user mode, the select() and poll() functions can be used to determine
- * when data is available to be received. In kernel mode, the scif_poll()
- * function may be used for this purpose.
- *
- * It is recommended that scif_send()/scif_recv() only be used for short
- * control-type message communication between SCIF endpoints. The SCIF RMA
- * APIs are expected to provide better performance for transfer sizes of
- * 1024 bytes or longer for the current MIC hardware and software
- * implementation.
- *
- * scif_recv() will block until the entire message is received if
- * SCIF_RECV_BLOCK is passed as the flags argument.
- *
- * Return:
- * Upon successful completion, scif_recv() returns the number of bytes
- * received; otherwise in user mode -1 is returned and errno is set to
- * indicate the error; in kernel mode the negative of one of the following
- * errors is returned.
- *
- * Errors:
- * EAGAIN - The destination node is returning from a low power state
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid, or len is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN - The endpoint is not connected
- */
-int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
-
-/**
- * scif_register() - Mark a memory region for remote access.
- * @epd: endpoint descriptor
- * @addr: starting virtual address
- * @len: length of range
- * @offset: offset of window
- * @prot_flags: read/write protection flags
- * @map_flags: mapping flags
- *
- * The scif_register() function opens a window, a range of whole pages of the
- * registered address space of the endpoint epd, starting at offset po and
- * continuing for len bytes. The value of po, further described below, is a
- * function of the parameters offset and len, and the value of map_flags. Each
- * page of the window represents the physical memory page which backs the
- * corresponding page of the range of virtual address pages starting at addr
- * and continuing for len bytes. addr and len are constrained to be multiples
- * of the page size. A successful scif_register() call returns po.
- *
- * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
- * exactly, and offset is constrained to be a multiple of the page size. The
- * mapping established by scif_register() will not replace any existing
- * registration; an error is returned if any page within the range [offset,
- * offset + len - 1] intersects an existing window.
- *
- * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
- * implementation-defined manner to arrive at po. The po value so chosen will
- * be an area of the registered address space that the implementation deems
- * suitable for a mapping of len bytes. An offset value of 0 is interpreted as
- * granting the implementation complete freedom in selecting po, subject to
- * constraints described below. A non-zero value of offset is taken to be a
- * suggestion of an offset near which the mapping should be placed. When the
- * implementation selects a value for po, it does not replace any extant
- * window. In all cases, po will be a multiple of the page size.
- *
- * The physical pages which are so represented by a window are available for
- * access in calls to mmap(), scif_readfrom(), scif_writeto(),
- * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
- * physical pages represented by the window will not be reused by the memory
- * subsystem for any other purpose. Note that the same physical page may be
- * represented by multiple windows.
- *
- * Subsequent operations which change the memory pages to which virtual
- * addresses are mapped (such as mmap(), munmap()) have no effect on
- * existing window.
- *
- * If the process will fork(), it is recommended that the registered
- * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
- * problems due to copy-on-write semantics.
- *
- * The prot_flags argument is formed by OR'ing together one or more of the
- * following values.
- * SCIF_PROT_READ - allow read operations from the window
- * SCIF_PROT_WRITE - allow write operations to the window
- *
- * Return:
- * Upon successful completion, scif_register() returns the offset at which the
- * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that
- * is (off_t *)-1) is returned and errno is set to indicate the error; in
- * kernel mode the negative of one of the following errors is returned.
- *
- * Errors:
- * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range
- * [offset, offset + len -1] are already registered
- * EAGAIN - The mapping could not be performed due to lack of resources
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is
- * set in flags, and offset is not a multiple of the page size, or addr is not a
- * multiple of the page size, or len is not a multiple of the page size, or is
- * 0, or offset is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN -The endpoint is not connected
- */
-off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
- int prot_flags, int map_flags);
-
-/**
- * scif_unregister() - Mark a memory region for remote access.
- * @epd: endpoint descriptor
- * @offset: start of range to unregister
- * @len: length of range to unregister
- *
- * The scif_unregister() function closes those previously registered windows
- * which are entirely within the range [offset, offset + len - 1]. It is an
- * error to specify a range which intersects only a subrange of a window.
- *
- * On a successful return, pages within the window may no longer be specified
- * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(),
- * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window,
- * however, continues to exist until all previous references against it are
- * removed. A window is referenced if there is a mapping to it created by
- * mmap(), or if scif_get_pages() was called against the window
- * (and the pages have not been returned via scif_put_pages()). A window is
- * also referenced while an RMA, in which some range of the window is a source
- * or destination, is in progress. Finally a window is referenced while some
- * offset in that window was specified to scif_fence_signal(), and the RMAs
- * marked by that call to scif_fence_signal() have not completed. While a
- * window is in this state, its registered address space pages are not
- * available for use in a new registered window.
- *
- * When all such references to the window have been removed, its references to
- * all the physical pages which it represents are removed. Similarly, the
- * registered address space pages of the window become available for
- * registration in a new window.
- *
- * Return:
- * Upon successful completion, scif_unregister() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned. In the event of an
- * error, no windows are unregistered.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a
- * window, or offset is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the
- * registered address space of epd
- */
-int scif_unregister(scif_epd_t epd, off_t offset, size_t len);
-
-/**
- * scif_readfrom() - Copy from a remote address space
- * @epd: endpoint descriptor
- * @loffset: offset in local registered address space to
- * which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space
- * from which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_readfrom() copies len bytes from the remote registered address space of
- * the peer of endpoint epd, starting at the offset roffset to the local
- * registered address space of epd, starting at the offset loffset.
- *
- * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
- * roffset + len - 1] must be within some registered window or windows of the
- * local and remote nodes. A range may intersect multiple registered windows,
- * but only if those windows are contiguous in the registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * The optimal DMA performance will likely be realized if both
- * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if loffset and roffset are not
- * cacheline aligned but are separated by some multiple of 64. The lowest level
- * of performance is likely if loffset and roffset are not separated by a
- * multiple of 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_readfrom() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
- * address space of epd, or, The range [roffset, roffset + len - 1] is invalid
- * for the registered address space of the peer of epd, or loffset or roffset
- * is negative
- */
-int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t
- roffset, int rma_flags);
-
-/**
- * scif_writeto() - Copy to a remote address space
- * @epd: endpoint descriptor
- * @loffset: offset in local registered address space
- * from which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space to
- * which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_writeto() copies len bytes from the local registered address space of
- * epd, starting at the offset loffset to the remote registered address space
- * of the peer of endpoint epd, starting at the offset roffset.
- *
- * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
- * roffset + len - 1] must be within some registered window or windows of the
- * local and remote nodes. A range may intersect multiple registered windows,
- * but only if those windows are contiguous in the registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * The optimal DMA performance will likely be realized if both
- * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if loffset and roffset are not cacheline
- * aligned but are separated by some multiple of 64. The lowest level of
- * performance is likely if loffset and roffset are not separated by a multiple
- * of 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_readfrom() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
- * address space of epd, or, The range [roffset , roffset + len -1] is invalid
- * for the registered address space of the peer of epd, or loffset or roffset
- * is negative
- */
-int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t
- roffset, int rma_flags);
-
-/**
- * scif_vreadfrom() - Copy from a remote address space
- * @epd: endpoint descriptor
- * @addr: address to which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space
- * from which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_vreadfrom() copies len bytes from the remote registered address
- * space of the peer of endpoint epd, starting at the offset roffset, to local
- * memory, starting at addr.
- *
- * The specified range [roffset, roffset + len - 1] must be within some
- * registered window or windows of the remote nodes. The range may
- * intersect multiple registered windows, but only if those windows are
- * contiguous in the registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
- * the specified local memory range may be remain in a pinned state even after
- * the specified transfer completes. This may reduce overhead if some or all of
- * the same virtual address range is referenced in a subsequent call of
- * scif_vreadfrom() or scif_vwriteto().
- *
- * The optimal DMA performance will likely be realized if both
- * addr and roffset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if addr and roffset are not
- * cacheline aligned but are separated by some multiple of 64. The lowest level
- * of performance is likely if addr and roffset are not separated by a
- * multiple of 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_USECACHE - enable registration caching
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
- * registered address space of epd
- */
-int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset,
- int rma_flags);
-
-/**
- * scif_vwriteto() - Copy to a remote address space
- * @epd: endpoint descriptor
- * @addr: address from which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space to
- * which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_vwriteto() copies len bytes from the local memory, starting at addr, to
- * the remote registered address space of the peer of endpoint epd, starting at
- * the offset roffset.
- *
- * The specified range [roffset, roffset + len - 1] must be within some
- * registered window or windows of the remote nodes. The range may intersect
- * multiple registered windows, but only if those windows are contiguous in the
- * registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
- * the specified local memory range may be remain in a pinned state even after
- * the specified transfer completes. This may reduce overhead if some or all of
- * the same virtual address range is referenced in a subsequent call of
- * scif_vreadfrom() or scif_vwriteto().
- *
- * The optimal DMA performance will likely be realized if both
- * addr and offset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if addr and offset are not cacheline
- * aligned but are separated by some multiple of 64. The lowest level of
- * performance is likely if addr and offset are not separated by a multiple of
- * 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_USECACHE - allow registration caching
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_vwriteto() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
- * registered address space of epd
- */
-int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset,
- int rma_flags);
-
-/**
- * scif_fence_mark() - Mark previously issued RMAs
- * @epd: endpoint descriptor
- * @flags: control flags
- * @mark: marked value returned as output.
- *
- * scif_fence_mark() returns after marking the current set of all uncompleted
- * RMAs initiated through the endpoint epd or the current set of all
- * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are
- * marked with a value returned at mark. The application may subsequently call
- * scif_fence_wait(), passing the value returned at mark, to await completion
- * of all RMAs so marked.
- *
- * The flags argument has exactly one of the following values.
- * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
- * epd are marked
- * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
- * of endpoint epd are marked
- *
- * Return:
- * Upon successful completion, scif_fence_mark() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENOMEM - Insufficient kernel memory was available
- */
-int scif_fence_mark(scif_epd_t epd, int flags, int *mark);
-
-/**
- * scif_fence_wait() - Wait for completion of marked RMAs
- * @epd: endpoint descriptor
- * @mark: mark request
- *
- * scif_fence_wait() returns after all RMAs marked with mark have completed.
- * The value passed in mark must have been obtained in a previous call to
- * scif_fence_mark().
- *
- * Return:
- * Upon successful completion, scif_fence_wait() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENOMEM - Insufficient kernel memory was available
- */
-int scif_fence_wait(scif_epd_t epd, int mark);
-
-/**
- * scif_fence_signal() - Request a memory update on completion of RMAs
- * @epd: endpoint descriptor
- * @loff: local offset
- * @lval: local value to write to loffset
- * @roff: remote offset
- * @rval: remote value to write to roffset
- * @flags: flags
- *
- * scif_fence_signal() returns after marking the current set of all uncompleted
- * RMAs initiated through the endpoint epd or marking the current set of all
- * uncompleted RMAs initiated through the peer of endpoint epd.
- *
- * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the
- * marked set, lval is written to memory at the address corresponding to offset
- * loff in the local registered address space of epd. loff must be within a
- * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion
- * of the RMAs in the marked set, rval is written to memory at the address
- * corresponding to offset roff in the remote registered address space of epd.
- * roff must be within a remote registered window of the peer of epd. Note
- * that any specified offset must be DWORD (4 byte / 32 bit) aligned.
- *
- * The flags argument is formed by OR'ing together the following.
- * Exactly one of the following values.
- * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
- * epd are marked
- * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
- * of endpoint epd are marked
- * One or more of the following values.
- * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to
- * memory at the address corresponding to offset loff in the local
- * registered address space of epd.
- * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to
- * memory at the address corresponding to offset roff in the remote
- * registered address space of epd.
- *
- * Return:
- * Upon successful completion, scif_fence_signal() returns 0; otherwise in
- * user mode -1 is returned and errno is set to indicate the error; in kernel
- * mode the negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid, or loff or roff are not DWORD aligned
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - loff is invalid for the registered address of epd, or roff is invalid
- * for the registered address space, of the peer of epd
- */
-int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff,
- u64 rval, int flags);
-
-/**
- * scif_get_node_ids() - Return information about online nodes
- * @nodes: array in which to return online node IDs
- * @len: number of entries in the nodes array
- * @self: address to place the node ID of the local node
- *
- * scif_get_node_ids() fills in the nodes array with up to len node IDs of the
- * nodes in the SCIF network. If there is not enough space in nodes, as
- * indicated by the len parameter, only len node IDs are returned in nodes. The
- * return value of scif_get_node_ids() is the total number of nodes currently in
- * the SCIF network. By checking the return value against the len parameter,
- * the user may determine if enough space for nodes was allocated.
- *
- * The node ID of the local node is returned at self.
- *
- * Return:
- * Upon successful completion, scif_get_node_ids() returns the actual number of
- * online nodes in the SCIF network including 'self'; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode no
- * errors are returned.
- */
-int scif_get_node_ids(u16 *nodes, int len, u16 *self);
-
-/**
- * scif_pin_pages() - Pin a set of pages
- * @addr: Virtual address of range to pin
- * @len: Length of range to pin
- * @prot_flags: Page protection flags
- * @map_flags: Page classification flags
- * @pinned_pages: Handle to pinned pages
- *
- * scif_pin_pages() pins (locks in physical memory) the physical pages which
- * back the range of virtual address pages starting at addr and continuing for
- * len bytes. addr and len are constrained to be multiples of the page size. A
- * successful scif_pin_pages() call returns a handle to pinned_pages which may
- * be used in subsequent calls to scif_register_pinned_pages().
- *
- * The pages will remain pinned as long as there is a reference against the
- * scif_pinned_pages_t value returned by scif_pin_pages() and until
- * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A
- * reference is added to a scif_pinned_pages_t value each time a window is
- * created by calling scif_register_pinned_pages() and passing the
- * scif_pinned_pages_t value. A reference is removed from a
- * scif_pinned_pages_t value each time such a window is deleted.
- *
- * Subsequent operations which change the memory pages to which virtual
- * addresses are mapped (such as mmap(), munmap()) have no effect on the
- * scif_pinned_pages_t value or windows created against it.
- *
- * If the process will fork(), it is recommended that the registered
- * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
- * problems due to copy-on-write semantics.
- *
- * The prot_flags argument is formed by OR'ing together one or more of the
- * following values.
- * SCIF_PROT_READ - allow read operations against the pages
- * SCIF_PROT_WRITE - allow write operations against the pages
- * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a
- * kernel space address. By default, addr is interpreted as a user space
- * address.
- *
- * Return:
- * Upon successful completion, scif_pin_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative
- * ENOMEM - Not enough space
- */
-int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags,
- scif_pinned_pages_t *pinned_pages);
-
-/**
- * scif_unpin_pages() - Unpin a set of pages
- * @pinned_pages: Handle to pinned pages to be unpinned
- *
- * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new
- * windows against pinned_pages. The physical pages represented by pinned_pages
- * will remain pinned until all windows previously registered against
- * pinned_pages are deleted (the window is scif_unregister()'d and all
- * references to the window are removed (see scif_unregister()).
- *
- * pinned_pages must have been obtain from a previous call to scif_pin_pages().
- * After calling scif_unpin_pages(), it is an error to pass pinned_pages to
- * scif_register_pinned_pages().
- *
- * Return:
- * Upon successful completion, scif_unpin_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EINVAL - pinned_pages is not valid
- */
-int scif_unpin_pages(scif_pinned_pages_t pinned_pages);
-
-/**
- * scif_register_pinned_pages() - Mark a memory region for remote access.
- * @epd: endpoint descriptor
- * @pinned_pages: Handle to pinned pages
- * @offset: Registered address space offset
- * @map_flags: Flags which control where pages are mapped
- *
- * The scif_register_pinned_pages() function opens a window, a range of whole
- * pages of the registered address space of the endpoint epd, starting at
- * offset po. The value of po, further described below, is a function of the
- * parameters offset and pinned_pages, and the value of map_flags. Each page of
- * the window represents a corresponding physical memory page of the range
- * represented by pinned_pages; the length of the window is the same as the
- * length of range represented by pinned_pages. A successful
- * scif_register_pinned_pages() call returns po as the return value.
- *
- * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
- * exactly, and offset is constrained to be a multiple of the page size. The
- * mapping established by scif_register_pinned_pages() will not replace any
- * existing registration; an error is returned if any page of the new window
- * would intersect an existing window.
- *
- * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
- * implementation-defined manner to arrive at po. The po so chosen will be an
- * area of the registered address space that the implementation deems suitable
- * for a mapping of the required size. An offset value of 0 is interpreted as
- * granting the implementation complete freedom in selecting po, subject to
- * constraints described below. A non-zero value of offset is taken to be a
- * suggestion of an offset near which the mapping should be placed. When the
- * implementation selects a value for po, it does not replace any extant
- * window. In all cases, po will be a multiple of the page size.
- *
- * The physical pages which are so represented by a window are available for
- * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(),
- * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
- * physical pages represented by the window will not be reused by the memory
- * subsystem for any other purpose. Note that the same physical page may be
- * represented by multiple windows.
- *
- * Windows created by scif_register_pinned_pages() are unregistered by
- * scif_unregister().
- *
- * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
- * fixed offset.
- *
- * Return:
- * Upon successful completion, scif_register_pinned_pages() returns the offset
- * at which the mapping was placed (po); otherwise the negative of one of the
- * following errors is returned.
- *
- * Errors:
- * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window
- * would intersect an existing window
- * EAGAIN - The mapping could not be performed due to lack of resources
- * ECONNRESET - Connection reset by peer
- * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and
- * offset is not a multiple of the page size, or offset is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN - The endpoint is not connected
- */
-off_t scif_register_pinned_pages(scif_epd_t epd,
- scif_pinned_pages_t pinned_pages,
- off_t offset, int map_flags);
-
-/**
- * scif_get_pages() - Add references to remote registered pages
- * @epd: endpoint descriptor
- * @offset: remote registered offset
- * @len: length of range of pages
- * @pages: returned scif_range structure
- *
- * scif_get_pages() returns the addresses of the physical pages represented by
- * those pages of the registered address space of the peer of epd, starting at
- * offset and continuing for len bytes. offset and len are constrained to be
- * multiples of the page size.
- *
- * All of the pages in the specified range [offset, offset + len - 1] must be
- * within a single window of the registered address space of the peer of epd.
- *
- * The addresses are returned as a virtually contiguous array pointed to by the
- * phys_addr component of the scif_range structure whose address is returned in
- * pages. The nr_pages component of scif_range is the length of the array. The
- * prot_flags component of scif_range holds the protection flag value passed
- * when the pages were registered.
- *
- * Each physical page whose address is returned by scif_get_pages() remains
- * available and will not be released for reuse until the scif_range structure
- * is returned in a call to scif_put_pages(). The scif_range structure returned
- * by scif_get_pages() must be unmodified.
- *
- * It is an error to call scif_close() on an endpoint on which a scif_range
- * structure of that endpoint has not been returned to scif_put_pages().
- *
- * Return:
- * Upon successful completion, scif_get_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- * Errors:
- * ECONNRESET - Connection reset by peer.
- * EINVAL - offset is not a multiple of the page size, or offset is negative, or
- * len is not a multiple of the page size
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid
- * for the registered address space of the peer epd
- */
-int scif_get_pages(scif_epd_t epd, off_t offset, size_t len,
- struct scif_range **pages);
-
-/**
- * scif_put_pages() - Remove references from remote registered pages
- * @pages: pages to be returned
- *
- * scif_put_pages() releases a scif_range structure previously obtained by
- * calling scif_get_pages(). The physical pages represented by pages may
- * be reused when the window which represented those pages is unregistered.
- * Therefore, those pages must not be accessed after calling scif_put_pages().
- *
- * Return:
- * Upon successful completion, scif_put_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- * Errors:
- * EINVAL - pages does not point to a valid scif_range structure, or
- * the scif_range structure pointed to by pages was already returned
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- */
-int scif_put_pages(struct scif_range *pages);
-
-/**
- * scif_poll() - Wait for some event on an endpoint
- * @epds: Array of endpoint descriptors
- * @nepds: Length of epds
- * @timeout: Upper limit on time for which scif_poll() will block
- *
- * scif_poll() waits for one of a set of endpoints to become ready to perform
- * an I/O operation.
- *
- * The epds argument specifies the endpoint descriptors to be examined and the
- * events of interest for each endpoint descriptor. epds is a pointer to an
- * array with one member for each open endpoint descriptor of interest.
- *
- * The number of items in the epds array is specified in nepds. The epd field
- * of scif_pollepd is an endpoint descriptor of an open endpoint. The field
- * events is a bitmask specifying the events which the application is
- * interested in. The field revents is an output parameter, filled by the
- * kernel with the events that actually occurred. The bits returned in revents
- * can include any of those specified in events, or one of the values EPOLLERR,
- * EPOLLHUP, or EPOLLNVAL. (These three bits are meaningless in the events
- * field, and will be set in the revents field whenever the corresponding
- * condition is true.)
- *
- * If none of the events requested (and no error) has occurred for any of the
- * endpoint descriptors, then scif_poll() blocks until one of the events occurs.
- *
- * The timeout argument specifies an upper limit on the time for which
- * scif_poll() will block, in milliseconds. Specifying a negative value in
- * timeout means an infinite timeout.
- *
- * The following bits may be set in events and returned in revents.
- * EPOLLIN - Data may be received without blocking. For a connected
- * endpoint, this means that scif_recv() may be called without blocking. For a
- * listening endpoint, this means that scif_accept() may be called without
- * blocking.
- * EPOLLOUT - Data may be sent without blocking. For a connected endpoint, this
- * means that scif_send() may be called without blocking. EPOLLOUT may also be
- * used to block waiting for a non-blocking connect to complete. This bit value
- * has no meaning for a listening endpoint and is ignored if specified.
- *
- * The following bits are only returned in revents, and are ignored if set in
- * events.
- * EPOLLERR - An error occurred on the endpoint
- * EPOLLHUP - The connection to the peer endpoint was disconnected
- * EPOLLNVAL - The specified endpoint descriptor is invalid.
- *
- * Return:
- * Upon successful completion, scif_poll() returns a non-negative value. A
- * positive value indicates the total number of endpoint descriptors that have
- * been selected (that is, endpoint descriptors for which the revents member is
- * non-zero). A value of 0 indicates that the call timed out and no endpoint
- * descriptors have been selected. Otherwise in user mode -1 is returned and
- * errno is set to indicate the error; in kernel mode the negative of one of
- * the following errors is returned.
- *
- * Errors:
- * EINTR - A signal occurred before any requested event
- * EINVAL - The nepds argument is greater than {OPEN_MAX}
- * ENOMEM - There was no space to allocate file descriptor tables
- */
-int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout);
-
-/**
- * scif_client_register() - Register a SCIF client
- * @client: client to be registered
- *
- * scif_client_register() registers a SCIF client. The probe() method
- * of the client is called when SCIF peer devices come online and the
- * remove() method is called when the peer devices disappear.
- *
- * Return:
- * Upon successful completion, scif_client_register() returns a non-negative
- * value. Otherwise the return value is the same as subsys_interface_register()
- * in the kernel.
- */
-int scif_client_register(struct scif_client *client);
-
-/**
- * scif_client_unregister() - Unregister a SCIF client
- * @client: client to be unregistered
- *
- * scif_client_unregister() unregisters a SCIF client.
- *
- * Return:
- * None
- */
-void scif_client_unregister(struct scif_client *client);
-
-#endif /* __SCIF_H__ */
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 5c873a59b387..4f765bc788ff 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -2,13 +2,20 @@
/*
* SCMI Message Protocol driver header
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
+
+#ifndef _LINUX_SCMI_PROTOCOL_H
+#define _LINUX_SCMI_PROTOCOL_H
+
+#include <linux/bitfield.h>
#include <linux/device.h>
+#include <linux/notifier.h>
#include <linux/types.h>
-#define SCMI_MAX_STR_SIZE 16
-#define SCMI_MAX_NUM_RATES 16
+#define SCMI_MAX_STR_SIZE 64
+#define SCMI_SHORT_NAME_MAX_SIZE 16
+#define SCMI_MAX_NUM_RATES 16
/**
* struct scmi_revision_info - version information structure
@@ -30,13 +37,16 @@ struct scmi_revision_info {
u8 num_protocols;
u8 num_agents;
u32 impl_ver;
- char vendor_id[SCMI_MAX_STR_SIZE];
- char sub_vendor_id[SCMI_MAX_STR_SIZE];
+ char vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
+ char sub_vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_clock_info {
char name[SCMI_MAX_STR_SIZE];
+ unsigned int enable_latency;
bool rate_discrete;
+ bool rate_changed_notifications;
+ bool rate_change_requested_notifications;
union {
struct {
int num_rates;
@@ -50,10 +60,18 @@ struct scmi_clock_info {
};
};
+enum scmi_power_scale {
+ SCMI_POWER_BOGOWATTS,
+ SCMI_POWER_MILLIWATTS,
+ SCMI_POWER_MICROWATTS
+};
+
struct scmi_handle;
+struct scmi_device;
+struct scmi_protocol_handle;
/**
- * struct scmi_clk_ops - represents the various operations provided
+ * struct scmi_clk_proto_ops - represents the various operations provided
* by SCMI Clock Protocol
*
* @count_get: get the count of clocks provided by SCMI
@@ -63,21 +81,24 @@ struct scmi_handle;
* @enable: enables the specified clock
* @disable: disables the specified clock
*/
-struct scmi_clk_ops {
- int (*count_get)(const struct scmi_handle *handle);
+struct scmi_clk_proto_ops {
+ int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_clock_info *(*info_get)
- (const struct scmi_handle *handle, u32 clk_id);
- int (*rate_get)(const struct scmi_handle *handle, u32 clk_id,
+ const struct scmi_clock_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 *rate);
- int (*rate_set)(const struct scmi_handle *handle, u32 clk_id,
+ int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 rate);
- int (*enable)(const struct scmi_handle *handle, u32 clk_id);
- int (*disable)(const struct scmi_handle *handle, u32 clk_id);
+ int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*disable_atomic)(const struct scmi_protocol_handle *ph,
+ u32 clk_id);
};
/**
- * struct scmi_perf_ops - represents the various operations provided
+ * struct scmi_perf_proto_ops - represents the various operations provided
* by SCMI Performance Protocol
*
* @limits_set: sets limits on the performance level of a domain
@@ -93,31 +114,38 @@ struct scmi_clk_ops {
* to sustained performance level mapping
* @est_power_get: gets the estimated power cost for a given performance domain
* at a given frequency
+ * @fast_switch_possible: indicates if fast DVFS switching is possible or not
+ * for a given device
+ * @power_scale_mw_get: indicates if the power values provided are in milliWatts
+ * or in some other (abstract) scale
*/
-struct scmi_perf_ops {
- int (*limits_set)(const struct scmi_handle *handle, u32 domain,
+struct scmi_perf_proto_ops {
+ int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 max_perf, u32 min_perf);
- int (*limits_get)(const struct scmi_handle *handle, u32 domain,
+ int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *max_perf, u32 *min_perf);
- int (*level_set)(const struct scmi_handle *handle, u32 domain,
+ int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 level, bool poll);
- int (*level_get)(const struct scmi_handle *handle, u32 domain,
+ int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *level, bool poll);
int (*device_domain_id)(struct device *dev);
- int (*transition_latency_get)(const struct scmi_handle *handle,
+ int (*transition_latency_get)(const struct scmi_protocol_handle *ph,
struct device *dev);
- int (*device_opps_add)(const struct scmi_handle *handle,
+ int (*device_opps_add)(const struct scmi_protocol_handle *ph,
struct device *dev);
- int (*freq_set)(const struct scmi_handle *handle, u32 domain,
+ int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long rate, bool poll);
- int (*freq_get)(const struct scmi_handle *handle, u32 domain,
+ int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *rate, bool poll);
- int (*est_power_get)(const struct scmi_handle *handle, u32 domain,
+ int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *rate, unsigned long *power);
+ bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph,
+ struct device *dev);
+ enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph);
};
/**
- * struct scmi_power_ops - represents the various operations provided
+ * struct scmi_power_proto_ops - represents the various operations provided
* by SCMI Power Protocol
*
* @num_domains_get: get the count of power domains provided by SCMI
@@ -125,9 +153,10 @@ struct scmi_perf_ops {
* @state_set: sets the power state of a power domain
* @state_get: gets the power state of a power domain
*/
-struct scmi_power_ops {
- int (*num_domains_get)(const struct scmi_handle *handle);
- char *(*name_get)(const struct scmi_handle *handle, u32 domain);
+struct scmi_power_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const char *(*name_get)(const struct scmi_protocol_handle *ph,
+ u32 domain);
#define SCMI_POWER_STATE_TYPE_SHIFT 30
#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1)
#define SCMI_POWER_STATE_PARAM(type, id) \
@@ -135,19 +164,186 @@ struct scmi_power_ops {
((id) & SCMI_POWER_STATE_ID_MASK))
#define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0)
#define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0)
- int (*state_set)(const struct scmi_handle *handle, u32 domain,
+ int (*state_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 state);
- int (*state_get)(const struct scmi_handle *handle, u32 domain,
+ int (*state_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *state);
};
+/**
+ * struct scmi_sensor_reading - represent a timestamped read
+ *
+ * Used by @reading_get_timestamped method.
+ *
+ * @value: The signed value sensor read.
+ * @timestamp: An unsigned timestamp for the sensor read, as provided by
+ * SCMI platform. Set to zero when not available.
+ */
+struct scmi_sensor_reading {
+ long long value;
+ unsigned long long timestamp;
+};
+
+/**
+ * struct scmi_range_attrs - specifies a sensor or axis values' range
+ * @min_range: The minimum value which can be represented by the sensor/axis.
+ * @max_range: The maximum value which can be represented by the sensor/axis.
+ */
+struct scmi_range_attrs {
+ long long min_range;
+ long long max_range;
+};
+
+/**
+ * struct scmi_sensor_axis_info - describes one sensor axes
+ * @id: The axes ID.
+ * @type: Axes type. Chosen amongst one of @enum scmi_sensor_class.
+ * @scale: Power-of-10 multiplier applied to the axis unit.
+ * @name: NULL-terminated string representing axes name as advertised by
+ * SCMI platform.
+ * @extended_attrs: Flag to indicate the presence of additional extended
+ * attributes for this axes.
+ * @resolution: Extended attribute representing the resolution of the axes.
+ * Set to 0 if not reported by this axes.
+ * @exponent: Extended attribute representing the power-of-10 multiplier that
+ * is applied to the resolution field. Set to 0 if not reported by
+ * this axes.
+ * @attrs: Extended attributes representing minimum and maximum values
+ * measurable by this axes. Set to 0 if not reported by this sensor.
+ */
+struct scmi_sensor_axis_info {
+ unsigned int id;
+ unsigned int type;
+ int scale;
+ char name[SCMI_MAX_STR_SIZE];
+ bool extended_attrs;
+ unsigned int resolution;
+ int exponent;
+ struct scmi_range_attrs attrs;
+};
+
+/**
+ * struct scmi_sensor_intervals_info - describes number and type of available
+ * update intervals
+ * @segmented: Flag for segmented intervals' representation. When True there
+ * will be exactly 3 intervals in @desc, with each entry
+ * representing a member of a segment in this order:
+ * {lowest update interval, highest update interval, step size}
+ * @count: Number of intervals described in @desc.
+ * @desc: Array of @count interval descriptor bitmask represented as detailed in
+ * the SCMI specification: it can be accessed using the accompanying
+ * macros.
+ * @prealloc_pool: A minimal preallocated pool of desc entries used to avoid
+ * lesser-than-64-bytes dynamic allocation for small @count
+ * values.
+ */
+struct scmi_sensor_intervals_info {
+ bool segmented;
+ unsigned int count;
+#define SCMI_SENS_INTVL_SEGMENT_LOW 0
+#define SCMI_SENS_INTVL_SEGMENT_HIGH 1
+#define SCMI_SENS_INTVL_SEGMENT_STEP 2
+ unsigned int *desc;
+#define SCMI_SENS_INTVL_GET_SECS(x) FIELD_GET(GENMASK(20, 5), (x))
+#define SCMI_SENS_INTVL_GET_EXP(x) \
+ ({ \
+ int __signed_exp = FIELD_GET(GENMASK(4, 0), (x)); \
+ \
+ if (__signed_exp & BIT(4)) \
+ __signed_exp |= GENMASK(31, 5); \
+ __signed_exp; \
+ })
+#define SCMI_MAX_PREALLOC_POOL 16
+ unsigned int prealloc_pool[SCMI_MAX_PREALLOC_POOL];
+};
+
+/**
+ * struct scmi_sensor_info - represents information related to one of the
+ * available sensors.
+ * @id: Sensor ID.
+ * @type: Sensor type. Chosen amongst one of @enum scmi_sensor_class.
+ * @scale: Power-of-10 multiplier applied to the sensor unit.
+ * @num_trip_points: Number of maximum configurable trip points.
+ * @async: Flag for asynchronous read support.
+ * @update: Flag for continuouos update notification support.
+ * @timestamped: Flag for timestamped read support.
+ * @tstamp_scale: Power-of-10 multiplier applied to the sensor timestamps to
+ * represent it in seconds.
+ * @num_axis: Number of supported axis if any. Reported as 0 for scalar sensors.
+ * @axis: Pointer to an array of @num_axis descriptors.
+ * @intervals: Descriptor of available update intervals.
+ * @sensor_config: A bitmask reporting the current sensor configuration as
+ * detailed in the SCMI specification: it can accessed and
+ * modified through the accompanying macros.
+ * @name: NULL-terminated string representing sensor name as advertised by
+ * SCMI platform.
+ * @extended_scalar_attrs: Flag to indicate the presence of additional extended
+ * attributes for this sensor.
+ * @sensor_power: Extended attribute representing the average power
+ * consumed by the sensor in microwatts (uW) when it is active.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ * @resolution: Extended attribute representing the resolution of the sensor.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ * @exponent: Extended attribute representing the power-of-10 multiplier that is
+ * applied to the resolution field.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ * @scalar_attrs: Extended attributes representing minimum and maximum
+ * measurable values by this sensor.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ */
struct scmi_sensor_info {
- u32 id;
- u8 type;
- s8 scale;
- u8 num_trip_points;
+ unsigned int id;
+ unsigned int type;
+ int scale;
+ unsigned int num_trip_points;
bool async;
+ bool update;
+ bool timestamped;
+ int tstamp_scale;
+ unsigned int num_axis;
+ struct scmi_sensor_axis_info *axis;
+ struct scmi_sensor_intervals_info intervals;
+ unsigned int sensor_config;
+#define SCMI_SENS_CFG_UPDATE_SECS_MASK GENMASK(31, 16)
+#define SCMI_SENS_CFG_GET_UPDATE_SECS(x) \
+ FIELD_GET(SCMI_SENS_CFG_UPDATE_SECS_MASK, (x))
+
+#define SCMI_SENS_CFG_UPDATE_EXP_MASK GENMASK(15, 11)
+#define SCMI_SENS_CFG_GET_UPDATE_EXP(x) \
+ ({ \
+ int __signed_exp = \
+ FIELD_GET(SCMI_SENS_CFG_UPDATE_EXP_MASK, (x)); \
+ \
+ if (__signed_exp & BIT(4)) \
+ __signed_exp |= GENMASK(31, 5); \
+ __signed_exp; \
+ })
+
+#define SCMI_SENS_CFG_ROUND_MASK GENMASK(10, 9)
+#define SCMI_SENS_CFG_ROUND_AUTO 2
+#define SCMI_SENS_CFG_ROUND_UP 1
+#define SCMI_SENS_CFG_ROUND_DOWN 0
+
+#define SCMI_SENS_CFG_TSTAMP_ENABLED_MASK BIT(1)
+#define SCMI_SENS_CFG_TSTAMP_ENABLE 1
+#define SCMI_SENS_CFG_TSTAMP_DISABLE 0
+#define SCMI_SENS_CFG_IS_TSTAMP_ENABLED(x) \
+ FIELD_GET(SCMI_SENS_CFG_TSTAMP_ENABLED_MASK, (x))
+
+#define SCMI_SENS_CFG_SENSOR_ENABLED_MASK BIT(0)
+#define SCMI_SENS_CFG_SENSOR_ENABLE 1
+#define SCMI_SENS_CFG_SENSOR_DISABLE 0
char name[SCMI_MAX_STR_SIZE];
+#define SCMI_SENS_CFG_IS_ENABLED(x) FIELD_GET(BIT(0), (x))
+ bool extended_scalar_attrs;
+ unsigned int sensor_power;
+ unsigned int resolution;
+ int exponent;
+ struct scmi_range_attrs scalar_attrs;
};
/*
@@ -156,39 +352,137 @@ struct scmi_sensor_info {
*/
enum scmi_sensor_class {
NONE = 0x0,
+ UNSPEC = 0x1,
TEMPERATURE_C = 0x2,
+ TEMPERATURE_F = 0x3,
+ TEMPERATURE_K = 0x4,
VOLTAGE = 0x5,
CURRENT = 0x6,
POWER = 0x7,
ENERGY = 0x8,
+ CHARGE = 0x9,
+ VOLTAMPERE = 0xA,
+ NITS = 0xB,
+ LUMENS = 0xC,
+ LUX = 0xD,
+ CANDELAS = 0xE,
+ KPA = 0xF,
+ PSI = 0x10,
+ NEWTON = 0x11,
+ CFM = 0x12,
+ RPM = 0x13,
+ HERTZ = 0x14,
+ SECS = 0x15,
+ MINS = 0x16,
+ HOURS = 0x17,
+ DAYS = 0x18,
+ WEEKS = 0x19,
+ MILS = 0x1A,
+ INCHES = 0x1B,
+ FEET = 0x1C,
+ CUBIC_INCHES = 0x1D,
+ CUBIC_FEET = 0x1E,
+ METERS = 0x1F,
+ CUBIC_CM = 0x20,
+ CUBIC_METERS = 0x21,
+ LITERS = 0x22,
+ FLUID_OUNCES = 0x23,
+ RADIANS = 0x24,
+ STERADIANS = 0x25,
+ REVOLUTIONS = 0x26,
+ CYCLES = 0x27,
+ GRAVITIES = 0x28,
+ OUNCES = 0x29,
+ POUNDS = 0x2A,
+ FOOT_POUNDS = 0x2B,
+ OUNCE_INCHES = 0x2C,
+ GAUSS = 0x2D,
+ GILBERTS = 0x2E,
+ HENRIES = 0x2F,
+ FARADS = 0x30,
+ OHMS = 0x31,
+ SIEMENS = 0x32,
+ MOLES = 0x33,
+ BECQUERELS = 0x34,
+ PPM = 0x35,
+ DECIBELS = 0x36,
+ DBA = 0x37,
+ DBC = 0x38,
+ GRAYS = 0x39,
+ SIEVERTS = 0x3A,
+ COLOR_TEMP_K = 0x3B,
+ BITS = 0x3C,
+ BYTES = 0x3D,
+ WORDS = 0x3E,
+ DWORDS = 0x3F,
+ QWORDS = 0x40,
+ PERCENTAGE = 0x41,
+ PASCALS = 0x42,
+ COUNTS = 0x43,
+ GRAMS = 0x44,
+ NEWTON_METERS = 0x45,
+ HITS = 0x46,
+ MISSES = 0x47,
+ RETRIES = 0x48,
+ OVERRUNS = 0x49,
+ UNDERRUNS = 0x4A,
+ COLLISIONS = 0x4B,
+ PACKETS = 0x4C,
+ MESSAGES = 0x4D,
+ CHARS = 0x4E,
+ ERRORS = 0x4F,
+ CORRECTED_ERRS = 0x50,
+ UNCORRECTABLE_ERRS = 0x51,
+ SQ_MILS = 0x52,
+ SQ_INCHES = 0x53,
+ SQ_FEET = 0x54,
+ SQ_CM = 0x55,
+ SQ_METERS = 0x56,
+ RADIANS_SEC = 0x57,
+ BPM = 0x58,
+ METERS_SEC_SQUARED = 0x59,
+ METERS_SEC = 0x5A,
+ CUBIC_METERS_SEC = 0x5B,
+ MM_MERCURY = 0x5C,
+ RADIANS_SEC_SQUARED = 0x5D,
+ OEM_UNIT = 0xFF
};
/**
- * struct scmi_sensor_ops - represents the various operations provided
+ * struct scmi_sensor_proto_ops - represents the various operations provided
* by SCMI Sensor Protocol
*
* @count_get: get the count of sensors provided by SCMI
* @info_get: get the information of the specified sensor
- * @trip_point_notify: control notifications on cross-over events for
- * the trip-points
* @trip_point_config: selects and configures a trip-point of interest
* @reading_get: gets the current value of the sensor
+ * @reading_get_timestamped: gets the current value and timestamp, when
+ * available, of the sensor. (as of v3.0 spec)
+ * Supports multi-axis sensors for sensors which
+ * supports it and if the @reading array size of
+ * @count entry equals the sensor num_axis
+ * @config_get: Get sensor current configuration
+ * @config_set: Set sensor current configuration
*/
-struct scmi_sensor_ops {
- int (*count_get)(const struct scmi_handle *handle);
-
- const struct scmi_sensor_info *(*info_get)
- (const struct scmi_handle *handle, u32 sensor_id);
- int (*trip_point_notify)(const struct scmi_handle *handle,
- u32 sensor_id, bool enable);
- int (*trip_point_config)(const struct scmi_handle *handle,
+struct scmi_sensor_proto_ops {
+ int (*count_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_sensor_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 sensor_id);
+ int (*trip_point_config)(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 trip_id, u64 trip_value);
- int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id,
+ int (*reading_get)(const struct scmi_protocol_handle *ph, u32 sensor_id,
u64 *value);
+ int (*reading_get_timestamped)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u8 count,
+ struct scmi_sensor_reading *readings);
+ int (*config_get)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u32 *sensor_config);
+ int (*config_set)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u32 sensor_config);
};
/**
- * struct scmi_reset_ops - represents the various operations provided
+ * struct scmi_reset_proto_ops - represents the various operations provided
* by SCMI Reset Protocol
*
* @num_domains_get: get the count of reset domains provided by SCMI
@@ -198,13 +492,247 @@ struct scmi_sensor_ops {
* @assert: explicitly assert reset signal of the specified reset domain
* @deassert: explicitly deassert reset signal of the specified reset domain
*/
-struct scmi_reset_ops {
- int (*num_domains_get)(const struct scmi_handle *handle);
- char *(*name_get)(const struct scmi_handle *handle, u32 domain);
- int (*latency_get)(const struct scmi_handle *handle, u32 domain);
- int (*reset)(const struct scmi_handle *handle, u32 domain);
- int (*assert)(const struct scmi_handle *handle, u32 domain);
- int (*deassert)(const struct scmi_handle *handle, u32 domain);
+struct scmi_reset_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const char *(*name_get)(const struct scmi_protocol_handle *ph,
+ u32 domain);
+ int (*latency_get)(const struct scmi_protocol_handle *ph, u32 domain);
+ int (*reset)(const struct scmi_protocol_handle *ph, u32 domain);
+ int (*assert)(const struct scmi_protocol_handle *ph, u32 domain);
+ int (*deassert)(const struct scmi_protocol_handle *ph, u32 domain);
+};
+
+enum scmi_voltage_level_mode {
+ SCMI_VOLTAGE_LEVEL_SET_AUTO,
+ SCMI_VOLTAGE_LEVEL_SET_SYNC,
+};
+
+/**
+ * struct scmi_voltage_info - describe one available SCMI Voltage Domain
+ *
+ * @id: the domain ID as advertised by the platform
+ * @segmented: defines the layout of the entries of array @levels_uv.
+ * - when True the entries are to be interpreted as triplets,
+ * each defining a segment representing a range of equally
+ * space voltages: <lowest_volts>, <highest_volt>, <step_uV>
+ * - when False the entries simply represent a single discrete
+ * supported voltage level
+ * @negative_volts_allowed: True if any of the entries of @levels_uv represent
+ * a negative voltage.
+ * @async_level_set: True when the voltage domain supports asynchronous level
+ * set commands.
+ * @name: name assigned to the Voltage Domain by platform
+ * @num_levels: number of total entries in @levels_uv.
+ * @levels_uv: array of entries describing the available voltage levels for
+ * this domain.
+ */
+struct scmi_voltage_info {
+ unsigned int id;
+ bool segmented;
+ bool negative_volts_allowed;
+ bool async_level_set;
+ char name[SCMI_MAX_STR_SIZE];
+ unsigned int num_levels;
+#define SCMI_VOLTAGE_SEGMENT_LOW 0
+#define SCMI_VOLTAGE_SEGMENT_HIGH 1
+#define SCMI_VOLTAGE_SEGMENT_STEP 2
+ int *levels_uv;
+};
+
+/**
+ * struct scmi_voltage_proto_ops - represents the various operations provided
+ * by SCMI Voltage Protocol
+ *
+ * @num_domains_get: get the count of voltage domains provided by SCMI
+ * @info_get: get the information of the specified domain
+ * @config_set: set the config for the specified domain
+ * @config_get: get the config of the specified domain
+ * @level_set: set the voltage level for the specified domain
+ * @level_get: get the voltage level of the specified domain
+ */
+struct scmi_voltage_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_voltage_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain_id);
+ int (*config_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 config);
+#define SCMI_VOLTAGE_ARCH_STATE_OFF 0x0
+#define SCMI_VOLTAGE_ARCH_STATE_ON 0x7
+ int (*config_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *config);
+ int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ enum scmi_voltage_level_mode mode, s32 volt_uV);
+ int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ s32 *volt_uV);
+};
+
+/**
+ * struct scmi_powercap_info - Describe one available Powercap domain
+ *
+ * @id: Domain ID as advertised by the platform.
+ * @notify_powercap_cap_change: CAP change notification support.
+ * @notify_powercap_measurement_change: MEASUREMENTS change notifications
+ * support.
+ * @async_powercap_cap_set: Asynchronous CAP set support.
+ * @powercap_cap_config: CAP configuration support.
+ * @powercap_monitoring: Monitoring (measurements) support.
+ * @powercap_pai_config: PAI configuration support.
+ * @powercap_scale_mw: Domain reports power data in milliwatt units.
+ * @powercap_scale_uw: Domain reports power data in microwatt units.
+ * Note that, when both @powercap_scale_mw and
+ * @powercap_scale_uw are set to false, the domain
+ * reports power data on an abstract linear scale.
+ * @name: name assigned to the Powercap Domain by platform.
+ * @min_pai: Minimum configurable PAI.
+ * @max_pai: Maximum configurable PAI.
+ * @pai_step: Step size between two consecutive PAI values.
+ * @min_power_cap: Minimum configurable CAP.
+ * @max_power_cap: Maximum configurable CAP.
+ * @power_cap_step: Step size between two consecutive CAP values.
+ * @sustainable_power: Maximum sustainable power consumption for this domain
+ * under normal conditions.
+ * @accuracy: The accuracy with which the power is measured and reported in
+ * integral multiples of 0.001 percent.
+ * @parent_id: Identifier of the containing parent power capping domain, or the
+ * value 0xFFFFFFFF if this powercap domain is a root domain not
+ * contained in any other domain.
+ */
+struct scmi_powercap_info {
+ unsigned int id;
+ bool notify_powercap_cap_change;
+ bool notify_powercap_measurement_change;
+ bool async_powercap_cap_set;
+ bool powercap_cap_config;
+ bool powercap_monitoring;
+ bool powercap_pai_config;
+ bool powercap_scale_mw;
+ bool powercap_scale_uw;
+ bool fastchannels;
+ char name[SCMI_MAX_STR_SIZE];
+ unsigned int min_pai;
+ unsigned int max_pai;
+ unsigned int pai_step;
+ unsigned int min_power_cap;
+ unsigned int max_power_cap;
+ unsigned int power_cap_step;
+ unsigned int sustainable_power;
+ unsigned int accuracy;
+#define SCMI_POWERCAP_ROOT_ZONE_ID 0xFFFFFFFFUL
+ unsigned int parent_id;
+ struct scmi_fc_info *fc_info;
+};
+
+/**
+ * struct scmi_powercap_proto_ops - represents the various operations provided
+ * by SCMI Powercap Protocol
+ *
+ * @num_domains_get: get the count of powercap domains provided by SCMI.
+ * @info_get: get the information for the specified domain.
+ * @cap_get: get the current CAP value for the specified domain.
+ * @cap_set: set the CAP value for the specified domain to the provided value;
+ * if the domain supports setting the CAP with an asynchronous command
+ * this request will finally trigger an asynchronous transfer, but, if
+ * @ignore_dresp here is set to true, this call will anyway return
+ * immediately without waiting for the related delayed response.
+ * @pai_get: get the current PAI value for the specified domain.
+ * @pai_set: set the PAI value for the specified domain to the provided value.
+ * @measurements_get: retrieve the current average power measurements for the
+ * specified domain and the related PAI upon which is
+ * calculated.
+ * @measurements_threshold_set: set the desired low and high power thresholds
+ * to be used when registering for notification
+ * of type POWERCAP_MEASUREMENTS_NOTIFY with this
+ * powercap domain.
+ * Note that this must be called at least once
+ * before registering any callback with the usual
+ * @scmi_notify_ops; moreover, in case this method
+ * is called with measurement notifications already
+ * enabled it will also trigger, transparently, a
+ * proper update of the power thresholds configured
+ * in the SCMI backend server.
+ * @measurements_threshold_get: get the currently configured low and high power
+ * thresholds used when registering callbacks for
+ * notification POWERCAP_MEASUREMENTS_NOTIFY.
+ */
+struct scmi_powercap_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_powercap_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain_id);
+ int (*cap_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *power_cap);
+ int (*cap_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 power_cap, bool ignore_dresp);
+ int (*pai_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *pai);
+ int (*pai_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 pai);
+ int (*measurements_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *average_power, u32 *pai);
+ int (*measurements_threshold_set)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_thresh_low,
+ u32 power_thresh_high);
+ int (*measurements_threshold_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_thresh_low,
+ u32 *power_thresh_high);
+};
+
+/**
+ * struct scmi_notify_ops - represents notifications' operations provided by
+ * SCMI core
+ * @devm_event_notifier_register: Managed registration of a notifier_block for
+ * the requested event
+ * @devm_event_notifier_unregister: Managed unregistration of a notifier_block
+ * for the requested event
+ * @event_notifier_register: Register a notifier_block for the requested event
+ * @event_notifier_unregister: Unregister a notifier_block for the requested
+ * event
+ *
+ * A user can register/unregister its own notifier_block against the wanted
+ * platform instance regarding the desired event identified by the
+ * tuple: (proto_id, evt_id, src_id) using the provided register/unregister
+ * interface where:
+ *
+ * @sdev: The scmi_device to use when calling the devres managed ops devm_
+ * @handle: The handle identifying the platform instance to use, when not
+ * calling the managed ops devm_
+ * @proto_id: The protocol ID as in SCMI Specification
+ * @evt_id: The message ID of the desired event as in SCMI Specification
+ * @src_id: A pointer to the desired source ID if different sources are
+ * possible for the protocol (like domain_id, sensor_id...etc)
+ *
+ * @src_id can be provided as NULL if it simply does NOT make sense for
+ * the protocol at hand, OR if the user is explicitly interested in
+ * receiving notifications from ANY existent source associated to the
+ * specified proto_id / evt_id.
+ *
+ * Received notifications are finally delivered to the registered users,
+ * invoking the callback provided with the notifier_block *nb as follows:
+ *
+ * int user_cb(nb, evt_id, report)
+ *
+ * with:
+ *
+ * @nb: The notifier block provided by the user
+ * @evt_id: The message ID of the delivered event
+ * @report: A custom struct describing the specific event delivered
+ */
+struct scmi_notify_ops {
+ int (*devm_event_notifier_register)(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
+ int (*devm_event_notifier_unregister)(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
+ int (*event_notifier_register)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
+ int (*event_notifier_unregister)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
};
/**
@@ -212,36 +740,37 @@ struct scmi_reset_ops {
*
* @dev: pointer to the SCMI device
* @version: pointer to the structure containing SCMI version information
- * @power_ops: pointer to set of power protocol operations
- * @perf_ops: pointer to set of performance protocol operations
- * @clk_ops: pointer to set of clock protocol operations
- * @sensor_ops: pointer to set of sensor protocol operations
- * @reset_ops: pointer to set of reset protocol operations
- * @perf_priv: pointer to private data structure specific to performance
- * protocol(for internal use only)
- * @clk_priv: pointer to private data structure specific to clock
- * protocol(for internal use only)
- * @power_priv: pointer to private data structure specific to power
- * protocol(for internal use only)
- * @sensor_priv: pointer to private data structure specific to sensors
- * protocol(for internal use only)
- * @reset_priv: pointer to private data structure specific to reset
- * protocol(for internal use only)
+ * @devm_protocol_acquire: devres managed method to get hold of a protocol,
+ * causing its initialization and related resource
+ * accounting
+ * @devm_protocol_get: devres managed method to acquire a protocol and get specific
+ * operations and a dedicated protocol handler
+ * @devm_protocol_put: devres managed method to release a protocol
+ * @is_transport_atomic: method to check if the underlying transport for this
+ * instance handle is configured to support atomic
+ * transactions for commands.
+ * Some users of the SCMI stack in the upper layers could
+ * be interested to know if they can assume SCMI
+ * command transactions associated to this handle will
+ * never sleep and act accordingly.
+ * An optional atomic threshold value could be returned
+ * where configured.
+ * @notify_ops: pointer to set of notifications related operations
*/
struct scmi_handle {
struct device *dev;
struct scmi_revision_info *version;
- struct scmi_perf_ops *perf_ops;
- struct scmi_clk_ops *clk_ops;
- struct scmi_power_ops *power_ops;
- struct scmi_sensor_ops *sensor_ops;
- struct scmi_reset_ops *reset_ops;
- /* for protocol internal use */
- void *perf_priv;
- void *clk_priv;
- void *power_priv;
- void *sensor_priv;
- void *reset_priv;
+
+ int __must_check (*devm_protocol_acquire)(struct scmi_device *sdev,
+ u8 proto);
+ const void __must_check *
+ (*devm_protocol_get)(struct scmi_device *sdev, u8 proto,
+ struct scmi_protocol_handle **ph);
+ void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto);
+ bool (*is_transport_atomic)(const struct scmi_handle *handle,
+ unsigned int *atomic_threshold);
+
+ const struct scmi_notify_ops *notify_ops;
};
enum scmi_std_protocol {
@@ -252,6 +781,17 @@ enum scmi_std_protocol {
SCMI_PROTOCOL_CLOCK = 0x14,
SCMI_PROTOCOL_SENSOR = 0x15,
SCMI_PROTOCOL_RESET = 0x16,
+ SCMI_PROTOCOL_VOLTAGE = 0x17,
+ SCMI_PROTOCOL_POWERCAP = 0x18,
+};
+
+enum scmi_system_events {
+ SCMI_SYSTEM_SHUTDOWN,
+ SCMI_SYSTEM_COLDRESET,
+ SCMI_SYSTEM_WARMRESET,
+ SCMI_SYSTEM_POWERUP,
+ SCMI_SYSTEM_SUSPEND,
+ SCMI_SYSTEM_MAX
};
struct scmi_device {
@@ -285,7 +825,7 @@ struct scmi_driver {
#define to_scmi_driver(d) container_of(d, struct scmi_driver, driver)
-#ifdef CONFIG_ARM_SCMI_PROTOCOL
+#if IS_REACHABLE(CONFIG_ARM_SCMI_PROTOCOL)
int scmi_driver_register(struct scmi_driver *driver,
struct module *owner, const char *mod_name);
void scmi_driver_unregister(struct scmi_driver *driver);
@@ -316,6 +856,118 @@ static inline void scmi_driver_unregister(struct scmi_driver *driver) {}
#define module_scmi_driver(__scmi_driver) \
module_driver(__scmi_driver, scmi_register, scmi_unregister)
-typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *);
-int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn);
-void scmi_protocol_unregister(int protocol_id);
+/**
+ * module_scmi_protocol() - Helper macro for registering a scmi protocol
+ * @__scmi_protocol: scmi_protocol structure
+ *
+ * Helper macro for scmi drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_scmi_protocol(__scmi_protocol) \
+ module_driver(__scmi_protocol, \
+ scmi_protocol_register, scmi_protocol_unregister)
+
+struct scmi_protocol;
+int scmi_protocol_register(const struct scmi_protocol *proto);
+void scmi_protocol_unregister(const struct scmi_protocol *proto);
+
+/* SCMI Notification API - Custom Event Reports */
+enum scmi_notification_events {
+ SCMI_EVENT_POWER_STATE_CHANGED = 0x0,
+ SCMI_EVENT_CLOCK_RATE_CHANGED = 0x0,
+ SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED = 0x1,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0,
+ SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1,
+ SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0,
+ SCMI_EVENT_SENSOR_UPDATE = 0x1,
+ SCMI_EVENT_RESET_ISSUED = 0x0,
+ SCMI_EVENT_BASE_ERROR_EVENT = 0x0,
+ SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER = 0x0,
+ SCMI_EVENT_POWERCAP_CAP_CHANGED = 0x0,
+ SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED = 0x1,
+};
+
+struct scmi_power_state_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power_state;
+};
+
+struct scmi_clock_rate_notif_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int clock_id;
+ unsigned long long rate;
+};
+
+struct scmi_system_power_state_notifier_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+#define SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(flags) ((flags) & BIT(0))
+ unsigned int flags;
+ unsigned int system_state;
+ unsigned int timeout;
+};
+
+struct scmi_perf_limits_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int range_max;
+ unsigned int range_min;
+};
+
+struct scmi_perf_level_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int performance_level;
+};
+
+struct scmi_sensor_trip_point_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int sensor_id;
+ unsigned int trip_point_desc;
+};
+
+struct scmi_sensor_update_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int sensor_id;
+ unsigned int readings_count;
+ struct scmi_sensor_reading readings[];
+};
+
+struct scmi_reset_issued_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int reset_state;
+};
+
+struct scmi_base_error_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ bool fatal;
+ unsigned int cmd_count;
+ unsigned long long reports[];
+};
+
+struct scmi_powercap_cap_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power_cap;
+ unsigned int pai;
+};
+
+struct scmi_powercap_meas_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power;
+};
+#endif /* _LINUX_SCMI_PROTOCOL_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index ecb004711acf..d2176a56828a 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -4,6 +4,10 @@
*
* Copyright (C) 2014 ARM Ltd.
*/
+
+#ifndef _LINUX_SCPI_PROTOCOL_H
+#define _LINUX_SCPI_PROTOCOL_H
+
#include <linux/types.h>
struct scpi_opp {
@@ -47,6 +51,14 @@ struct scpi_sensor_info {
* OPP is an index to the list return by @dvfs_get_info
* @dvfs_get_info: returns the DVFS capabilities of the given power
* domain. It includes the OPP list and the latency information
+ * @device_domain_id: gets the scpi domain id for a given device
+ * @get_transition_latency: gets the DVFS transition latency for a given device
+ * @add_opps_to_device: adds all the OPPs for a given device
+ * @sensor_get_capability: get the list of capabilities for the sensors
+ * @sensor_get_info: get the information of the specified sensor
+ * @sensor_get_value: gets the current value of the sensor
+ * @device_get_power_state: gets the power state of a power domain
+ * @device_set_power_state: sets the power state of a power domain
*/
struct scpi_ops {
u32 (*get_version)(void);
@@ -71,3 +83,5 @@ struct scpi_ops *get_scpi_ops(void);
#else
static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
#endif
+
+#endif /* _LINUX_SCPI_PROTOCOL_H */
diff --git a/include/linux/scs.h b/include/linux/scs.h
new file mode 100644
index 000000000000..18122d9e17ff
--- /dev/null
+++ b/include/linux/scs.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shadow Call Stack support.
+ *
+ * Copyright (C) 2019 Google LLC
+ */
+
+#ifndef _LINUX_SCS_H
+#define _LINUX_SCS_H
+
+#include <linux/gfp.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+#define SCS_ORDER 0
+#define SCS_SIZE (PAGE_SIZE << SCS_ORDER)
+#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
+
+/* An illegal pointer value to mark the end of the shadow stack. */
+#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA)
+
+#define task_scs(tsk) (task_thread_info(tsk)->scs_base)
+#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp)
+
+void *scs_alloc(int node);
+void scs_free(void *s);
+void scs_init(void);
+int scs_prepare(struct task_struct *tsk, int node);
+void scs_release(struct task_struct *tsk);
+
+static inline void scs_task_reset(struct task_struct *tsk)
+{
+ /*
+ * Reset the shadow stack to the base address in case the task
+ * is reused.
+ */
+ task_scs_sp(tsk) = task_scs(tsk);
+}
+
+static inline unsigned long *__scs_magic(void *s)
+{
+ return (unsigned long *)(s + SCS_SIZE) - 1;
+}
+
+static inline bool task_scs_end_corrupted(struct task_struct *tsk)
+{
+ unsigned long *magic = __scs_magic(task_scs(tsk));
+ unsigned long sz = task_scs_sp(tsk) - task_scs(tsk);
+
+ return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC;
+}
+
+#else /* CONFIG_SHADOW_CALL_STACK */
+
+static inline void *scs_alloc(int node) { return NULL; }
+static inline void scs_free(void *s) {}
+static inline void scs_init(void) {}
+static inline void scs_task_reset(struct task_struct *tsk) {}
+static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
+static inline void scs_release(struct task_struct *tsk) {}
+static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; }
+
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
+#endif /* _LINUX_SCS_H */
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 8ccd82105de8..a86e852507b3 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -98,6 +98,7 @@ enum sctp_cid {
SCTP_CID_I_FWD_TSN = 0xC2,
SCTP_CID_ASCONF_ACK = 0x80,
SCTP_CID_RECONF = 0x82,
+ SCTP_CID_PAD = 0x84,
}; /* enum */
@@ -221,7 +222,7 @@ struct sctp_datahdr {
__be16 stream;
__be16 ssn;
__u32 ppid;
- __u8 payload[0];
+ __u8 payload[];
};
struct sctp_data_chunk {
@@ -269,7 +270,7 @@ struct sctp_inithdr {
__be16 num_outbound_streams;
__be16 num_inbound_streams;
__be32 initial_tsn;
- __u8 params[0];
+ __u8 params[];
};
struct sctp_init_chunk {
@@ -299,13 +300,13 @@ struct sctp_cookie_preserve_param {
/* Section 3.3.2.1 Host Name Address (11) */
struct sctp_hostname_param {
struct sctp_paramhdr param_hdr;
- uint8_t hostname[0];
+ uint8_t hostname[];
};
/* Section 3.3.2.1 Supported Address Types (12) */
struct sctp_supported_addrs_param {
struct sctp_paramhdr param_hdr;
- __be16 types[0];
+ __be16 types[];
};
/* ADDIP Section 3.2.6 Adaptation Layer Indication */
@@ -317,25 +318,25 @@ struct sctp_adaptation_ind_param {
/* ADDIP Section 4.2.7 Supported Extensions Parameter */
struct sctp_supported_ext_param {
struct sctp_paramhdr param_hdr;
- __u8 chunks[0];
+ __u8 chunks[];
};
/* AUTH Section 3.1 Random */
struct sctp_random_param {
struct sctp_paramhdr param_hdr;
- __u8 random_val[0];
+ __u8 random_val[];
};
/* AUTH Section 3.2 Chunk List */
struct sctp_chunks_param {
struct sctp_paramhdr param_hdr;
- __u8 chunks[0];
+ __u8 chunks[];
};
/* AUTH Section 3.3 HMAC Algorithm */
struct sctp_hmac_algo_param {
struct sctp_paramhdr param_hdr;
- __be16 hmac_ids[0];
+ __be16 hmac_ids[];
};
/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
@@ -350,7 +351,7 @@ struct sctp_initack_chunk {
/* Section 3.3.3.1 State Cookie (7) */
struct sctp_cookie_param {
struct sctp_paramhdr p;
- __u8 body[0];
+ __u8 body[];
};
/* Section 3.3.3.1 Unrecognized Parameters (8) */
@@ -384,7 +385,7 @@ struct sctp_sackhdr {
__be32 a_rwnd;
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
- union sctp_sack_variable variable[0];
+ union sctp_sack_variable variable[];
};
struct sctp_sack_chunk {
@@ -410,6 +411,12 @@ struct sctp_heartbeat_chunk {
};
+/* PAD chunk could be bundled with heartbeat chunk to probe pmtu */
+struct sctp_pad_chunk {
+ struct sctp_chunkhdr uh;
+};
+
+
/* For the abort and shutdown ACK we must carry the init tag in the
* common header. Just the common header is all that is needed with a
* chunk descriptor.
@@ -436,7 +443,7 @@ struct sctp_shutdown_chunk {
struct sctp_errhdr {
__be16 cause;
__be16 length;
- __u8 variable[0];
+ __u8 variable[];
};
struct sctp_operr_chunk {
@@ -482,11 +489,13 @@ enum sctp_error {
* 11 Restart of an association with new addresses
* 12 User Initiated Abort
* 13 Protocol Violation
+ * 14 Restart of an Association with New Encapsulation Port
*/
SCTP_ERROR_RESTART = cpu_to_be16(0x0b),
SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c),
SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d),
+ SCTP_ERROR_NEW_ENCAP_PORT = cpu_to_be16(0x0e),
/* ADDIP Section 3.3 New Error Causes
*
@@ -594,7 +603,7 @@ struct sctp_fwdtsn_skip {
struct sctp_fwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_fwdtsn_skip skip[0];
+ struct sctp_fwdtsn_skip skip[];
};
struct sctp_fwdtsn_chunk {
@@ -611,7 +620,7 @@ struct sctp_ifwdtsn_skip {
struct sctp_ifwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_ifwdtsn_skip skip[0];
+ struct sctp_ifwdtsn_skip skip[];
};
struct sctp_ifwdtsn_chunk {
@@ -658,7 +667,7 @@ struct sctp_addip_param {
struct sctp_addiphdr {
__be32 serial;
- __u8 params[0];
+ __u8 params[];
};
struct sctp_addip_chunk {
@@ -718,7 +727,7 @@ struct sctp_addip_chunk {
struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
- __u8 hmac[0];
+ __u8 hmac[];
};
struct sctp_auth_chunk {
@@ -733,7 +742,7 @@ struct sctp_infox {
struct sctp_reconf_chunk {
struct sctp_chunkhdr chunk_hdr;
- __u8 params[0];
+ __u8 params[];
};
struct sctp_strreset_outreq {
@@ -741,13 +750,13 @@ struct sctp_strreset_outreq {
__be32 request_seq;
__be32 response_seq;
__be32 send_reset_at_tsn;
- __be16 list_of_streams[0];
+ __be16 list_of_streams[];
};
struct sctp_strreset_inreq {
struct sctp_paramhdr param_hdr;
__be32 request_seq;
- __be16 list_of_streams[0];
+ __be16 list_of_streams[];
};
struct sctp_strreset_tsnreq {
@@ -793,4 +802,22 @@ enum {
SCTP_FLOWLABEL_VAL_MASK = 0xfffff
};
+/* UDP Encapsulation
+ * draft-tuexen-tsvwg-sctp-udp-encaps-cons-03.html#section-4-4
+ *
+ * The error cause indicating an "Restart of an Association with
+ * New Encapsulation Port"
+ *
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cause Code = 14 | Cause Length = 8 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Current Encapsulation Port | New Encapsulation Port |
+ * +-------------------------------+-------------------------------+
+ */
+struct sctp_new_encap_port_hdr {
+ __be16 cur_port;
+ __be16 new_port;
+};
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/sdb.h b/include/linux/sdb.h
deleted file mode 100644
index a2404a2bbd10..000000000000
--- a/include/linux/sdb.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is the official version 1.1 of sdb.h
- */
-#ifndef __SDB_H__
-#define __SDB_H__
-#ifdef __KERNEL__
-#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-
-/*
- * All structures are 64 bytes long and are expected
- * to live in an array, one for each interconnect.
- * Most fields of the structures are shared among the
- * various types, and most-specific fields are at the
- * beginning (for alignment reasons, and to keep the
- * magic number at the head of the interconnect record
- */
-
-/* Product, 40 bytes at offset 24, 8-byte aligned
- *
- * device_id is vendor-assigned; version is device-specific,
- * date is hex (e.g 0x20120501), name is UTF-8, blank-filled
- * and not terminated with a 0 byte.
- */
-struct sdb_product {
- uint64_t vendor_id; /* 0x18..0x1f */
- uint32_t device_id; /* 0x20..0x23 */
- uint32_t version; /* 0x24..0x27 */
- uint32_t date; /* 0x28..0x2b */
- uint8_t name[19]; /* 0x2c..0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/*
- * Component, 56 bytes at offset 8, 8-byte aligned
- *
- * The address range is first to last, inclusive
- * (for example 0x100000 - 0x10ffff)
- */
-struct sdb_component {
- uint64_t addr_first; /* 0x08..0x0f */
- uint64_t addr_last; /* 0x10..0x17 */
- struct sdb_product product; /* 0x18..0x3f */
-};
-
-/* Type of the SDB record */
-enum sdb_record_type {
- sdb_type_interconnect = 0x00,
- sdb_type_device = 0x01,
- sdb_type_bridge = 0x02,
- sdb_type_integration = 0x80,
- sdb_type_repo_url = 0x81,
- sdb_type_synthesis = 0x82,
- sdb_type_empty = 0xFF,
-};
-
-/* Type 0: interconnect (first of the array)
- *
- * sdb_records is the length of the table including this first
- * record, version is 1. The bus type is enumerated later.
- */
-#define SDB_MAGIC 0x5344422d /* "SDB-" */
-struct sdb_interconnect {
- uint32_t sdb_magic; /* 0x00-0x03 */
- uint16_t sdb_records; /* 0x04-0x05 */
- uint8_t sdb_version; /* 0x06 */
- uint8_t sdb_bus_type; /* 0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 1: device
- *
- * class is 0 for "custom device", other values are
- * to be standardized; ABI version is for the driver,
- * bus-specific bits are defined by each bus (see below)
- */
-struct sdb_device {
- uint16_t abi_class; /* 0x00-0x01 */
- uint8_t abi_ver_major; /* 0x02 */
- uint8_t abi_ver_minor; /* 0x03 */
- uint32_t bus_specific; /* 0x04-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 2: bridge
- *
- * child is the address of the nested SDB table
- */
-struct sdb_bridge {
- uint64_t sdb_child; /* 0x00-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 0x80: integration
- *
- * all types with bit 7 set are meta-information, so
- * software can ignore the types it doesn't know. Here we
- * just provide product information for an aggregate device
- */
-struct sdb_integration {
- uint8_t reserved[24]; /* 0x00-0x17 */
- struct sdb_product product; /* 0x08-0x3f */
-};
-
-/* Type 0x81: Top module repository url
- *
- * again, an informative field that software can ignore
- */
-struct sdb_repo_url {
- uint8_t repo_url[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0x82: Synthesis tool information
- *
- * this informative record
- */
-struct sdb_synthesis {
- uint8_t syn_name[16]; /* 0x00-0x0f */
- uint8_t commit_id[16]; /* 0x10-0x1f */
- uint8_t tool_name[8]; /* 0x20-0x27 */
- uint32_t tool_version; /* 0x28-0x2b */
- uint32_t date; /* 0x2c-0x2f */
- uint8_t user_name[15]; /* 0x30-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0xff: empty
- *
- * this allows keeping empty slots during development,
- * so they can be filled later with minimal efforts and
- * no misleading description is ever shipped -- hopefully.
- * It can also be used to pad a table to a desired length.
- */
-struct sdb_empty {
- uint8_t reserved[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* The type of bus, for bus-specific flags */
-enum sdb_bus_type {
- sdb_wishbone = 0x00,
- sdb_data = 0x01,
-};
-
-#define SDB_WB_WIDTH_MASK 0x0f
-#define SDB_WB_ACCESS8 0x01
-#define SDB_WB_ACCESS16 0x02
-#define SDB_WB_ACCESS32 0x04
-#define SDB_WB_ACCESS64 0x08
-#define SDB_WB_LITTLE_ENDIAN 0x80
-
-#define SDB_DATA_READ 0x04
-#define SDB_DATA_WRITE 0x02
-#define SDB_DATA_EXEC 0x01
-
-#endif /* __SDB_H__ */
diff --git a/include/linux/sdla.h b/include/linux/sdla.h
deleted file mode 100644
index 00e8b3b614f0..000000000000
--- a/include/linux/sdla.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * Global definitions for the Frame relay interface.
- *
- * Version: @(#)if_ifrad.h 0.20 13 Apr 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan Structure packing
- *
- * 0.20 Mike McLagan New flags for S508 buffer handling
- */
-#ifndef SDLA_H
-#define SDLA_H
-
-#include <uapi/linux/sdla.h>
-
-
-/* important Z80 window addresses */
-#define SDLA_CONTROL_WND 0xE000
-
-#define SDLA_502_CMD_BUF 0xEF60
-#define SDLA_502_RCV_BUF 0xA900
-#define SDLA_502_TXN_AVAIL 0xFFF1
-#define SDLA_502_RCV_AVAIL 0xFFF2
-#define SDLA_502_EVENT_FLAGS 0xFFF3
-#define SDLA_502_MDM_STATUS 0xFFF4
-#define SDLA_502_IRQ_INTERFACE 0xFFFD
-#define SDLA_502_IRQ_PERMISSION 0xFFFE
-#define SDLA_502_DATA_OFS 0x0010
-
-#define SDLA_508_CMD_BUF 0xE000
-#define SDLA_508_TXBUF_INFO 0xF100
-#define SDLA_508_RXBUF_INFO 0xF120
-#define SDLA_508_EVENT_FLAGS 0xF003
-#define SDLA_508_MDM_STATUS 0xF004
-#define SDLA_508_IRQ_INTERFACE 0xF010
-#define SDLA_508_IRQ_PERMISSION 0xF011
-#define SDLA_508_TSE_OFFSET 0xF012
-
-/* Event flags */
-#define SDLA_EVENT_STATUS 0x01
-#define SDLA_EVENT_DLCI_STATUS 0x02
-#define SDLA_EVENT_BAD_DLCI 0x04
-#define SDLA_EVENT_LINK_DOWN 0x40
-
-/* IRQ Trigger flags */
-#define SDLA_INTR_RX 0x01
-#define SDLA_INTR_TX 0x02
-#define SDLA_INTR_MODEM 0x04
-#define SDLA_INTR_COMPLETE 0x08
-#define SDLA_INTR_STATUS 0x10
-#define SDLA_INTR_TIMER 0x20
-
-/* DLCI status bits */
-#define SDLA_DLCI_DELETED 0x01
-#define SDLA_DLCI_ACTIVE 0x02
-#define SDLA_DLCI_WAITING 0x04
-#define SDLA_DLCI_NEW 0x08
-#define SDLA_DLCI_INCLUDED 0x40
-
-/* valid command codes */
-#define SDLA_INFORMATION_WRITE 0x01
-#define SDLA_INFORMATION_READ 0x02
-#define SDLA_ISSUE_IN_CHANNEL_SIGNAL 0x03
-#define SDLA_SET_DLCI_CONFIGURATION 0x10
-#define SDLA_READ_DLCI_CONFIGURATION 0x11
-#define SDLA_DISABLE_COMMUNICATIONS 0x12
-#define SDLA_ENABLE_COMMUNICATIONS 0x13
-#define SDLA_READ_DLC_STATUS 0x14
-#define SDLA_READ_DLC_STATISTICS 0x15
-#define SDLA_FLUSH_DLC_STATISTICS 0x16
-#define SDLA_LIST_ACTIVE_DLCI 0x17
-#define SDLA_FLUSH_INFORMATION_BUFFERS 0x18
-#define SDLA_ADD_DLCI 0x20
-#define SDLA_DELETE_DLCI 0x21
-#define SDLA_ACTIVATE_DLCI 0x22
-#define SDLA_DEACTIVATE_DLCI 0x23
-#define SDLA_READ_MODEM_STATUS 0x30
-#define SDLA_SET_MODEM_STATUS 0x31
-#define SDLA_READ_COMMS_ERR_STATS 0x32
-#define SDLA_FLUSH_COMMS_ERR_STATS 0x33
-#define SDLA_READ_CODE_VERSION 0x40
-#define SDLA_SET_IRQ_TRIGGER 0x50
-#define SDLA_GET_IRQ_TRIGGER 0x51
-
-/* In channel signal types */
-#define SDLA_ICS_LINK_VERIFY 0x02
-#define SDLA_ICS_STATUS_ENQ 0x03
-
-/* modem status flags */
-#define SDLA_MODEM_DTR_HIGH 0x01
-#define SDLA_MODEM_RTS_HIGH 0x02
-#define SDLA_MODEM_DCD_HIGH 0x08
-#define SDLA_MODEM_CTS_HIGH 0x20
-
-/* used for RET_MODEM interpretation */
-#define SDLA_MODEM_DCD_LOW 0x01
-#define SDLA_MODEM_CTS_LOW 0x02
-
-/* return codes */
-#define SDLA_RET_OK 0x00
-#define SDLA_RET_COMMUNICATIONS 0x01
-#define SDLA_RET_CHANNEL_INACTIVE 0x02
-#define SDLA_RET_DLCI_INACTIVE 0x03
-#define SDLA_RET_DLCI_CONFIG 0x04
-#define SDLA_RET_BUF_TOO_BIG 0x05
-#define SDLA_RET_NO_DATA 0x05
-#define SDLA_RET_BUF_OVERSIZE 0x06
-#define SDLA_RET_CIR_OVERFLOW 0x07
-#define SDLA_RET_NO_BUFS 0x08
-#define SDLA_RET_TIMEOUT 0x0A
-#define SDLA_RET_MODEM 0x10
-#define SDLA_RET_CHANNEL_OFF 0x11
-#define SDLA_RET_CHANNEL_ON 0x12
-#define SDLA_RET_DLCI_STATUS 0x13
-#define SDLA_RET_DLCI_UNKNOWN 0x14
-#define SDLA_RET_COMMAND_INVALID 0x1F
-
-/* Configuration flags */
-#define SDLA_DIRECT_RECV 0x0080
-#define SDLA_TX_NO_EXCEPT 0x0020
-#define SDLA_NO_ICF_MSGS 0x1000
-#define SDLA_TX50_RX50 0x0000
-#define SDLA_TX70_RX30 0x2000
-#define SDLA_TX30_RX70 0x4000
-
-/* IRQ selection flags */
-#define SDLA_IRQ_RECEIVE 0x01
-#define SDLA_IRQ_TRANSMIT 0x02
-#define SDLA_IRQ_MODEM_STAT 0x04
-#define SDLA_IRQ_COMMAND 0x08
-#define SDLA_IRQ_CHANNEL 0x10
-#define SDLA_IRQ_TIMER 0x20
-
-/* definitions for PC memory mapping */
-#define SDLA_8K_WINDOW 0x01
-#define SDLA_S502_SEG_A 0x10
-#define SDLA_S502_SEG_C 0x20
-#define SDLA_S502_SEG_D 0x00
-#define SDLA_S502_SEG_E 0x30
-#define SDLA_S507_SEG_A 0x00
-#define SDLA_S507_SEG_B 0x40
-#define SDLA_S507_SEG_C 0x80
-#define SDLA_S507_SEG_E 0xC0
-#define SDLA_S508_SEG_A 0x00
-#define SDLA_S508_SEG_C 0x10
-#define SDLA_S508_SEG_D 0x08
-#define SDLA_S508_SEG_E 0x18
-
-/* SDLA adapter port constants */
-#define SDLA_IO_EXTENTS 0x04
-
-#define SDLA_REG_CONTROL 0x00
-#define SDLA_REG_PC_WINDOW 0x01 /* offset for PC window select latch */
-#define SDLA_REG_Z80_WINDOW 0x02 /* offset for Z80 window select latch */
-#define SDLA_REG_Z80_CONTROL 0x03 /* offset for Z80 control latch */
-
-#define SDLA_S502_STS 0x00 /* status reg for 502, 502E, 507 */
-#define SDLA_S508_GNRL 0x00 /* general purp. reg for 508 */
-#define SDLA_S508_STS 0x01 /* status reg for 508 */
-#define SDLA_S508_IDR 0x02 /* ID reg for 508 */
-
-/* control register flags */
-#define SDLA_S502A_START 0x00 /* start the CPU */
-#define SDLA_S502A_INTREQ 0x02
-#define SDLA_S502A_INTEN 0x04
-#define SDLA_S502A_HALT 0x08 /* halt the CPU */
-#define SDLA_S502A_NMI 0x10 /* issue an NMI to the CPU */
-
-#define SDLA_S502E_CPUEN 0x01
-#define SDLA_S502E_ENABLE 0x02
-#define SDLA_S502E_INTACK 0x04
-
-#define SDLA_S507_ENABLE 0x01
-#define SDLA_S507_IRQ3 0x00
-#define SDLA_S507_IRQ4 0x20
-#define SDLA_S507_IRQ5 0x40
-#define SDLA_S507_IRQ7 0x60
-#define SDLA_S507_IRQ10 0x80
-#define SDLA_S507_IRQ11 0xA0
-#define SDLA_S507_IRQ12 0xC0
-#define SDLA_S507_IRQ15 0xE0
-
-#define SDLA_HALT 0x00
-#define SDLA_CPUEN 0x02
-#define SDLA_MEMEN 0x04
-#define SDLA_S507_EPROMWR 0x08
-#define SDLA_S507_EPROMCLK 0x10
-#define SDLA_S508_INTRQ 0x08
-#define SDLA_S508_INTEN 0x10
-
-struct sdla_cmd {
- char opp_flag;
- char cmd;
- short length;
- char retval;
- short dlci;
- char flags;
- short rxlost_int;
- long rxlost_app;
- char reserve[2];
- char data[SDLA_MAX_DATA]; /* transfer data buffer */
-} __attribute__((packed));
-
-struct intr_info {
- char flags;
- short txlen;
- char irq;
- char flags2;
- short timeout;
-} __attribute__((packed));
-
-/* found in the 508's control window at RXBUF_INFO */
-struct buf_info {
- unsigned short rse_num;
- unsigned long rse_base;
- unsigned long rse_next;
- unsigned long buf_base;
- unsigned short reserved;
- unsigned long buf_top;
-} __attribute__((packed));
-
-/* structure pointed to by rse_base in RXBUF_INFO struct */
-struct buf_entry {
- char opp_flag;
- short length;
- short dlci;
- char flags;
- short timestamp;
- short reserved[2];
- long buf_addr;
-} __attribute__((packed));
-
-#endif
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 03583b6d1416..d31d76be4982 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -7,11 +7,18 @@
#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
SECCOMP_FILTER_FLAG_LOG | \
SECCOMP_FILTER_FLAG_SPEC_ALLOW | \
- SECCOMP_FILTER_FLAG_NEW_LISTENER)
+ SECCOMP_FILTER_FLAG_NEW_LISTENER | \
+ SECCOMP_FILTER_FLAG_TSYNC_ESRCH | \
+ SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV)
+
+/* sizeof() the first published struct seccomp_notif_addfd */
+#define SECCOMP_NOTIFY_ADDFD_SIZE_VER0 24
+#define SECCOMP_NOTIFY_ADDFD_SIZE_LATEST SECCOMP_NOTIFY_ADDFD_SIZE_VER0
#ifdef CONFIG_SECCOMP
#include <linux/thread_info.h>
+#include <linux/atomic.h>
#include <asm/seccomp.h>
struct seccomp_filter;
@@ -28,6 +35,7 @@ struct seccomp_filter;
*/
struct seccomp {
int mode;
+ atomic_t filter_count;
struct seccomp_filter *filter;
};
@@ -35,7 +43,7 @@ struct seccomp {
extern int __secure_computing(const struct seccomp_data *sd);
static inline int secure_computing(void)
{
- if (unlikely(test_thread_flag(TIF_SECCOMP)))
+ if (unlikely(test_syscall_work(SECCOMP)))
return __secure_computing(NULL);
return 0;
}
@@ -57,9 +65,11 @@ static inline int seccomp_mode(struct seccomp *s)
struct seccomp { };
struct seccomp_filter { };
+struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
static inline int secure_computing(void) { return 0; }
+static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
@@ -81,10 +91,10 @@ static inline int seccomp_mode(struct seccomp *s)
#endif /* CONFIG_SECCOMP */
#ifdef CONFIG_SECCOMP_FILTER
-extern void put_seccomp_filter(struct task_struct *tsk);
+extern void seccomp_filter_release(struct task_struct *tsk);
extern void get_seccomp_filter(struct task_struct *tsk);
#else /* CONFIG_SECCOMP_FILTER */
-static inline void put_seccomp_filter(struct task_struct *tsk)
+static inline void seccomp_filter_release(struct task_struct *tsk)
{
return;
}
@@ -112,4 +122,11 @@ static inline long seccomp_get_metadata(struct task_struct *task,
return -EINVAL;
}
#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
+
+#ifdef CONFIG_SECCOMP_CACHE_DEBUG
+struct seq_file;
+
+int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
+#endif
#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
new file mode 100644
index 000000000000..988528b5da43
--- /dev/null
+++ b/include/linux/secretmem.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_SECRETMEM_H
+#define _LINUX_SECRETMEM_H
+
+#ifdef CONFIG_SECRETMEM
+
+extern const struct address_space_operations secretmem_aops;
+
+static inline bool page_is_secretmem(struct page *page)
+{
+ struct address_space *mapping;
+
+ /*
+ * Using page_mapping() is quite slow because of the actual call
+ * instruction and repeated compound_head(page) inside the
+ * page_mapping() function.
+ * We know that secretmem pages are not compound and LRU so we can
+ * save a couple of cycles here.
+ */
+ if (PageCompound(page) || !PageLRU(page))
+ return false;
+
+ mapping = (struct address_space *)
+ ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+
+ if (!mapping || mapping != page->mapping)
+ return false;
+
+ return mapping->a_ops == &secretmem_aops;
+}
+
+bool vma_is_secretmem(struct vm_area_struct *vma);
+bool secretmem_active(void);
+
+#else
+
+static inline bool vma_is_secretmem(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool page_is_secretmem(struct page *page)
+{
+ return false;
+}
+
+static inline bool secretmem_active(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_SECRETMEM */
+
+#endif /* _LINUX_SECRETMEM_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 64b19f050343..ca1b7109c0db 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -23,6 +23,7 @@
#ifndef __LINUX_SECURITY_H
#define __LINUX_SECURITY_H
+#include <linux/kernel_read_file.h>
#include <linux/key.h>
#include <linux/capability.h>
#include <linux/fs.h>
@@ -30,7 +31,6 @@
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <linux/fs.h>
struct linux_binprm;
struct cred;
@@ -57,6 +57,8 @@ struct mm_struct;
struct fs_context;
struct fs_parameter;
enum fs_value_type;
+struct watch;
+struct watch_notification;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -112,19 +114,25 @@ enum lockdown_reason {
LOCKDOWN_IOPORT,
LOCKDOWN_MSR,
LOCKDOWN_ACPI_TABLES,
+ LOCKDOWN_DEVICE_TREE,
LOCKDOWN_PCMCIA_CIS,
LOCKDOWN_TIOCSSERIAL,
LOCKDOWN_MODULE_PARAMETERS,
LOCKDOWN_MMIOTRACE,
LOCKDOWN_DEBUGFS,
LOCKDOWN_XMON_WR,
+ LOCKDOWN_BPF_WRITE_USER,
+ LOCKDOWN_DBG_WRITE_KERNEL,
+ LOCKDOWN_RTAS_ERROR_INJECTION,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
- LOCKDOWN_BPF_READ,
+ LOCKDOWN_BPF_READ_KERNEL,
+ LOCKDOWN_DBG_READ_KERNEL,
LOCKDOWN_PERF,
LOCKDOWN_TRACEFS,
LOCKDOWN_XMON_RW,
+ LOCKDOWN_XFRM_SECRET,
LOCKDOWN_CONFIDENTIALITY_MAX,
};
@@ -141,14 +149,17 @@ extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-extern int cap_bprm_set_creds(struct linux_binprm *bprm);
-extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
-extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
-extern int cap_inode_need_killpriv(struct dentry *dentry);
-extern int cap_inode_killpriv(struct dentry *dentry);
-extern int cap_inode_getsecurity(struct inode *inode, const char *name,
- void **buffer, bool alloc);
+extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+int cap_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+int cap_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name);
+int cap_inode_need_killpriv(struct dentry *dentry);
+int cap_inode_killpriv(struct user_namespace *mnt_userns,
+ struct dentry *dentry);
+int cap_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name, void **buffer,
+ bool alloc);
extern int cap_mmap_addr(unsigned long addr);
extern int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags);
@@ -165,14 +176,14 @@ struct sk_buff;
struct sock;
struct sockaddr;
struct socket;
-struct flowi;
+struct flowi_common;
struct dst_entry;
struct xfrm_selector;
struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
-struct sctp_endpoint;
+struct sctp_association;
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
@@ -212,7 +223,7 @@ struct request_sock;
#ifdef CONFIG_MMU
extern int mmap_min_addr_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
#endif
/* security_inode_init_security callback function to write xattrs */
@@ -251,13 +262,13 @@ extern int security_init(void);
extern int early_security_init(void);
/* Security operations */
-int security_binder_set_context_mgr(struct task_struct *mgr);
-int security_binder_transaction(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to, struct file *file);
+int security_binder_set_context_mgr(const struct cred *mgr);
+int security_binder_transaction(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to, struct file *file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
int security_capget(struct task_struct *target,
@@ -277,16 +288,19 @@ int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
-int security_bprm_set_creds(struct linux_binprm *bprm);
+int security_bprm_creds_for_exec(struct linux_binprm *bprm);
+int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
int security_bprm_check(struct linux_binprm *bprm);
void security_bprm_committing_creds(struct linux_binprm *bprm);
void security_bprm_committed_creds(struct linux_binprm *bprm);
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
int security_sb_alloc(struct super_block *sb);
+void security_sb_delete(struct super_block *sb);
void security_sb_free(struct super_block *sb);
void security_free_mnt_opts(void **mnt_opts);
int security_sb_eat_lsm_opts(char *options, void **mnt_opts);
+int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts);
int security_sb_remount(struct super_block *sb, void *mnt_opts);
int security_sb_kern_mount(struct super_block *sb);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
@@ -303,12 +317,11 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb,
unsigned long kern_flags,
unsigned long *set_kern_flags);
-int security_add_mnt_opt(const char *option, const char *val,
- int len, void **mnt_opts);
int security_move_mount(const struct path *from_path, const struct path *to_path);
int security_dentry_init_security(struct dentry *dentry, int mode,
- const struct qstr *name, void **ctx,
- u32 *ctxlen);
+ const struct qstr *name,
+ const char **xattr_name, void **ctx,
+ u32 *ctxlen);
int security_dentry_create_files_as(struct dentry *dentry, int mode,
struct qstr *name,
const struct cred *old,
@@ -320,6 +333,9 @@ void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
initxattrs initxattrs, void *fs_data);
+int security_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode);
int security_old_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, const char **name,
void **value, size_t *len);
@@ -339,18 +355,24 @@ int security_inode_readlink(struct dentry *dentry);
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
bool rcu);
int security_inode_permission(struct inode *inode, int mask);
-int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
+int security_inode_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr);
int security_inode_getattr(const struct path *path);
-int security_inode_setxattr(struct dentry *dentry, const char *name,
+int security_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
int security_inode_getxattr(struct dentry *dentry, const char *name);
int security_inode_listxattr(struct dentry *dentry);
-int security_inode_removexattr(struct dentry *dentry, const char *name);
+int security_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name);
int security_inode_need_killpriv(struct dentry *dentry);
-int security_inode_killpriv(struct dentry *dentry);
-int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc);
+int security_inode_killpriv(struct user_namespace *mnt_userns,
+ struct dentry *dentry);
+int security_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name,
+ void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
void security_inode_getsecid(struct inode *inode, u32 *secid);
@@ -384,16 +406,24 @@ void security_cred_getsecid(const struct cred *c, u32 *secid);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
-int security_kernel_load_data(enum kernel_load_data_id id);
-int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
+int security_kernel_load_data(enum kernel_load_data_id id, bool contents);
+int security_kernel_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id,
+ char *description);
+int security_kernel_read_file(struct file *file, enum kernel_read_file_id id,
+ bool contents);
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
+int security_task_fix_setgid(struct cred *new, const struct cred *old,
+ int flags);
+int security_task_fix_setgroups(struct cred *new, const struct cred *old);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
int security_task_getsid(struct task_struct *p);
-void security_task_getsecid(struct task_struct *p, u32 *secid);
+void security_current_getsecid_subj(u32 *secid);
+void security_task_getsecid_obj(struct task_struct *p, u32 *secid);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
@@ -409,6 +439,7 @@ int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
+int security_create_user_ns(const struct cred *cred);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
int security_msg_msg_alloc(struct msg_msg *msg);
@@ -433,7 +464,7 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
+int security_getprocattr(struct task_struct *p, const char *lsm, const char *name,
char **value);
int security_setprocattr(const char *lsm, const char *name, void *value,
size_t size);
@@ -483,25 +514,25 @@ static inline int early_security_init(void)
return 0;
}
-static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+static inline int security_binder_set_context_mgr(const struct cred *mgr)
{
return 0;
}
-static inline int security_binder_transaction(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transaction(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to,
+static inline int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to,
struct file *file)
{
return 0;
@@ -570,9 +601,15 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
}
-static inline int security_bprm_set_creds(struct linux_binprm *bprm)
+static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
{
- return cap_bprm_set_creds(bprm);
+ return 0;
+}
+
+static inline int security_bprm_creds_from_file(struct linux_binprm *bprm,
+ struct file *file)
+{
+ return cap_bprm_creds_from_file(bprm, file);
}
static inline int security_bprm_check(struct linux_binprm *bprm)
@@ -604,6 +641,9 @@ static inline int security_sb_alloc(struct super_block *sb)
return 0;
}
+static inline void security_sb_delete(struct super_block *sb)
+{ }
+
static inline void security_sb_free(struct super_block *sb)
{ }
@@ -619,6 +659,13 @@ static inline int security_sb_remount(struct super_block *sb,
return 0;
}
+static inline int security_sb_mnt_opts_compat(struct super_block *sb,
+ void *mnt_opts)
+{
+ return 0;
+}
+
+
static inline int security_sb_kern_mount(struct super_block *sb)
{
return 0;
@@ -669,12 +716,6 @@ static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb,
return 0;
}
-static inline int security_add_mnt_opt(const char *option, const char *val,
- int len, void **mnt_opts)
-{
- return 0;
-}
-
static inline int security_move_mount(const struct path *from_path,
const struct path *to_path)
{
@@ -698,6 +739,7 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_dentry_init_security(struct dentry *dentry,
int mode,
const struct qstr *name,
+ const char **xattr_name,
void **ctx,
u32 *ctxlen)
{
@@ -722,6 +764,13 @@ static inline int security_inode_init_security(struct inode *inode,
return 0;
}
+static inline int security_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode)
+{
+ return 0;
+}
+
static inline int security_old_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
@@ -804,8 +853,9 @@ static inline int security_inode_permission(struct inode *inode, int mask)
return 0;
}
-static inline int security_inode_setattr(struct dentry *dentry,
- struct iattr *attr)
+static inline int security_inode_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ struct iattr *attr)
{
return 0;
}
@@ -815,8 +865,9 @@ static inline int security_inode_getattr(const struct path *path)
return 0;
}
-static inline int security_inode_setxattr(struct dentry *dentry,
- const char *name, const void *value, size_t size, int flags)
+static inline int security_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
{
return cap_inode_setxattr(dentry, name, value, size, flags);
}
@@ -836,10 +887,11 @@ static inline int security_inode_listxattr(struct dentry *dentry)
return 0;
}
-static inline int security_inode_removexattr(struct dentry *dentry,
- const char *name)
+static inline int security_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *name)
{
- return cap_inode_removexattr(dentry, name);
+ return cap_inode_removexattr(mnt_userns, dentry, name);
}
static inline int security_inode_need_killpriv(struct dentry *dentry)
@@ -847,14 +899,18 @@ static inline int security_inode_need_killpriv(struct dentry *dentry)
return cap_inode_need_killpriv(dentry);
}
-static inline int security_inode_killpriv(struct dentry *dentry)
+static inline int security_inode_killpriv(struct user_namespace *mnt_userns,
+ struct dentry *dentry)
{
- return cap_inode_killpriv(dentry);
+ return cap_inode_killpriv(mnt_userns, dentry);
}
-static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
+static inline int security_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode,
+ const char *name, void **buffer,
+ bool alloc)
{
- return -EOPNOTSUPP;
+ return cap_inode_getsecurity(mnt_userns, inode, name, buffer, alloc);
}
static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
@@ -987,6 +1043,11 @@ static inline void security_transfer_creds(struct cred *new,
{
}
+static inline void security_cred_getsecid(const struct cred *c, u32 *secid)
+{
+ *secid = 0;
+}
+
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
@@ -1003,13 +1064,21 @@ static inline int security_kernel_module_request(char *kmod_name)
return 0;
}
-static inline int security_kernel_load_data(enum kernel_load_data_id id)
+static inline int security_kernel_load_data(enum kernel_load_data_id id, bool contents)
+{
+ return 0;
+}
+
+static inline int security_kernel_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id,
+ char *description)
{
return 0;
}
static inline int security_kernel_read_file(struct file *file,
- enum kernel_read_file_id id)
+ enum kernel_read_file_id id,
+ bool contents)
{
return 0;
}
@@ -1028,6 +1097,19 @@ static inline int security_task_fix_setuid(struct cred *new,
return cap_task_fix_setuid(new, old, flags);
}
+static inline int security_task_fix_setgid(struct cred *new,
+ const struct cred *old,
+ int flags)
+{
+ return 0;
+}
+
+static inline int security_task_fix_setgroups(struct cred *new,
+ const struct cred *old)
+{
+ return 0;
+}
+
static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -1043,7 +1125,12 @@ static inline int security_task_getsid(struct task_struct *p)
return 0;
}
-static inline void security_task_getsecid(struct task_struct *p, u32 *secid)
+static inline void security_current_getsecid_subj(u32 *secid)
+{
+ *secid = 0;
+}
+
+static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
*secid = 0;
}
@@ -1110,6 +1197,11 @@ static inline int security_task_prctl(int option, unsigned long arg2,
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
{ }
+static inline int security_create_user_ns(const struct cred *cred)
+{
+ return 0;
+}
+
static inline int security_ipc_permission(struct kern_ipc_perm *ipcp,
short flag)
{
@@ -1217,7 +1309,7 @@ static inline void security_d_instantiate(struct dentry *dentry,
{ }
static inline int security_getprocattr(struct task_struct *p, const char *lsm,
- char *name, char **value)
+ const char *name, char **value)
{
return -EINVAL;
}
@@ -1276,6 +1368,28 @@ static inline int security_locked_down(enum lockdown_reason what)
}
#endif /* CONFIG_SECURITY */
+#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+int security_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n);
+#else
+static inline int security_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
+int security_watch_key(struct key *key);
+#else
+static inline int security_watch_key(struct key *key)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_SECURITY_NETWORK
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
@@ -1303,10 +1417,11 @@ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u
int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
void security_sk_free(struct sock *sk);
void security_sk_clone(const struct sock *sk, struct sock *newsk);
-void security_sk_classify_flow(struct sock *sk, struct flowi *fl);
-void security_req_classify_flow(const struct request_sock *req, struct flowi *fl);
+void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic);
+void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic);
void security_sock_graft(struct sock*sk, struct socket *parent);
-int security_inet_conn_request(struct sock *sk,
+int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req);
void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req);
@@ -1321,11 +1436,13 @@ int security_tun_dev_create(void);
int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
-int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb);
+int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb);
int security_sctp_bind_connect(struct sock *sk, int optname,
struct sockaddr *address, int addrlen);
-void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
+void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk);
+int security_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb);
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
@@ -1455,11 +1572,13 @@ static inline void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
}
-static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
+static inline void security_sk_classify_flow(struct sock *sk,
+ struct flowi_common *flic)
{
}
-static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
+static inline void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic)
{
}
@@ -1467,7 +1586,7 @@ static inline void security_sock_graft(struct sock *sk, struct socket *parent)
{
}
-static inline int security_inet_conn_request(struct sock *sk,
+static inline int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req)
{
return 0;
@@ -1525,7 +1644,7 @@ static inline int security_tun_dev_open(void *security)
return 0;
}
-static inline int security_sctp_assoc_request(struct sctp_endpoint *ep,
+static inline int security_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
return 0;
@@ -1538,11 +1657,17 @@ static inline int security_sctp_bind_connect(struct sock *sk, int optname,
return 0;
}
-static inline void security_sctp_sk_clone(struct sctp_endpoint *ep,
+static inline void security_sctp_sk_clone(struct sctp_association *asoc,
struct sock *sk,
struct sock *newsk)
{
}
+
+static inline int security_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
@@ -1583,12 +1708,12 @@ int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid);
int security_xfrm_state_delete(struct xfrm_state *x);
void security_xfrm_state_free(struct xfrm_state *x);
-int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid);
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
- const struct flowi *fl);
+ const struct flowi_common *flic);
int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
-void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
+void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic);
#else /* CONFIG_SECURITY_NETWORK_XFRM */
@@ -1634,13 +1759,14 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x)
return 0;
}
-static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
return 0;
}
static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, const struct flowi *fl)
+ struct xfrm_policy *xp,
+ const struct flowi_common *flic)
{
return 1;
}
@@ -1650,7 +1776,8 @@ static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
return 0;
}
-static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
+static inline void security_skb_classify_flow(struct sk_buff *skb,
+ struct flowi_common *flic)
{
}
@@ -1744,8 +1871,8 @@ static inline int security_path_chroot(const struct path *path)
int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags);
void security_key_free(struct key *key);
-int security_key_permission(key_ref_t key_ref,
- const struct cred *cred, unsigned perm);
+int security_key_permission(key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm);
int security_key_getsecurity(struct key *key, char **_buffer);
#else
@@ -1763,7 +1890,7 @@ static inline void security_key_free(struct key *key)
static inline int security_key_permission(key_ref_t key_ref,
const struct cred *cred,
- unsigned perm)
+ enum key_need_perm need_perm)
{
return 0;
}
@@ -1937,4 +2064,25 @@ static inline int security_perf_event_write(struct perf_event *event)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_PERF_EVENTS */
+#ifdef CONFIG_IO_URING
+#ifdef CONFIG_SECURITY
+extern int security_uring_override_creds(const struct cred *new);
+extern int security_uring_sqpoll(void);
+extern int security_uring_cmd(struct io_uring_cmd *ioucmd);
+#else
+static inline int security_uring_override_creds(const struct cred *new)
+{
+ return 0;
+}
+static inline int security_uring_sqpoll(void)
+{
+ return 0;
+}
+static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_IO_URING */
+
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 1ac0d712a9c3..6f837bb6c715 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -43,6 +43,7 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_MBR_DONE:
case IOC_OPAL_WRITE_SHADOW_MBR:
case IOC_OPAL_GENERIC_TABLE_RW:
+ case IOC_OPAL_GET_STATUS:
return true;
}
return false;
diff --git a/include/linux/selection.h b/include/linux/selection.h
index e2c1f96bf059..170ef28ff26b 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -11,8 +11,8 @@
#include <linux/tiocl.h>
#include <linux/vt_buffer.h>
-extern struct vc_data *sel_cons;
struct tty_struct;
+struct vc_data;
extern void clear_selection(void);
extern int set_selection_user(const struct tiocl_selection __user *sel,
@@ -24,6 +24,8 @@ extern int sel_loadlut(char __user *p);
extern int mouse_reporting(void);
extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry);
+bool vc_is_sel(struct vc_data *vc);
+
extern int console_blanked;
extern const unsigned char color_table[];
@@ -31,21 +33,23 @@ extern unsigned char default_red[];
extern unsigned char default_grn[];
extern unsigned char default_blu[];
-extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed);
-extern u16 screen_glyph(struct vc_data *vc, int offset);
-extern u32 screen_glyph_unicode(struct vc_data *vc, int offset);
+extern unsigned short *screen_pos(const struct vc_data *vc, int w_offset,
+ bool viewed);
+extern u16 screen_glyph(const struct vc_data *vc, int offset);
+extern u32 screen_glyph_unicode(const struct vc_data *vc, int offset);
extern void complement_pos(struct vc_data *vc, int offset);
-extern void invert_screen(struct vc_data *vc, int offset, int count, int shift);
+extern void invert_screen(struct vc_data *vc, int offset, int count, bool viewed);
-extern void getconsxy(struct vc_data *vc, unsigned char *p);
-extern void putconsxy(struct vc_data *vc, unsigned char *p);
+extern void getconsxy(const struct vc_data *vc, unsigned char xy[static 2]);
+extern void putconsxy(struct vc_data *vc, unsigned char xy[static const 2]);
-extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org);
+extern u16 vcs_scr_readw(const struct vc_data *vc, const u16 *org);
extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org);
extern void vcs_scr_updated(struct vc_data *vc);
extern int vc_uniscr_check(struct vc_data *vc);
-extern void vc_uniscr_copy_line(struct vc_data *vc, void *dest, int viewed,
+extern void vc_uniscr_copy_line(const struct vc_data *vc, void *dest,
+ bool viewed,
unsigned int row, unsigned int col,
unsigned int nr);
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index fb0205d87d3c..5b31c5147969 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -30,7 +30,7 @@ static inline void seq_buf_clear(struct seq_buf *s)
}
static inline void
-seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
+seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
{
s->buffer = buf;
s->size = size;
@@ -72,6 +72,31 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
}
/**
+ * seq_buf_terminate - Make sure buffer is nul terminated
+ * @s: the seq_buf descriptor to terminate.
+ *
+ * This makes sure that the buffer in @s is nul terminated and
+ * safe to read as a string.
+ *
+ * Note, if this is called when the buffer has overflowed, then
+ * the last byte of the buffer is zeroed, and the len will still
+ * point passed it.
+ *
+ * After this function is called, s->buffer is safe to use
+ * in string operations.
+ */
+static inline void seq_buf_terminate(struct seq_buf *s)
+{
+ if (WARN_ON(s->size == 0))
+ return;
+
+ if (seq_buf_buffer_left(s))
+ s->buffer[s->len] = 0;
+ else
+ s->buffer[s->size - 1] = 0;
+}
+
+/**
* seq_buf_get_buf - get buffer to write arbitrary data to
* @s: the seq_buf handle
* @bufp: the beginning of the buffer is stored here
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 770c2bf3aa43..bd023dd38ae6 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/cpumask.h>
@@ -21,7 +22,6 @@ struct seq_file {
size_t pad_until;
loff_t index;
loff_t read_pos;
- u64 version;
struct mutex lock;
const struct seq_operations *op;
int poll_event;
@@ -108,6 +108,7 @@ void seq_pad(struct seq_file *m, char c);
char *mangle_path(char *s, const char *p, const char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
+ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
int seq_write(struct seq_file *seq, const void *data, size_t len);
@@ -126,8 +127,30 @@ void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num
void seq_put_hex_ll(struct seq_file *m, const char *delimiter,
unsigned long long v, unsigned int width);
-void seq_escape(struct seq_file *m, const char *s, const char *esc);
-void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz);
+void seq_escape_mem(struct seq_file *m, const char *src, size_t len,
+ unsigned int flags, const char *esc);
+
+static inline void seq_escape_str(struct seq_file *m, const char *src,
+ unsigned int flags, const char *esc)
+{
+ seq_escape_mem(m, src, strlen(src), flags, esc);
+}
+
+/**
+ * seq_escape - print string into buffer, escaping some characters
+ * @m: target buffer
+ * @s: NULL-terminated string
+ * @esc: set of characters that need escaping
+ *
+ * Puts string into buffer, replacing each occurrence of character from
+ * @esc with usual octal escape.
+ *
+ * Use seq_has_overflowed() to check for errors.
+ */
+static inline void seq_escape(struct seq_file *m, const char *s, const char *esc)
+{
+ seq_escape_str(m, s, ESCAPE_OCTAL, esc);
+}
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, const void *buf, size_t len,
@@ -139,6 +162,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *);
int seq_path_root(struct seq_file *m, const struct path *path,
const struct path *root, const char *esc);
+void *single_start(struct seq_file *, loff_t *);
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
int single_release(struct inode *, struct file *);
@@ -146,6 +170,29 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
+#ifdef CONFIG_BINARY_PRINTF
+void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
+#endif
+
+#define DEFINE_SEQ_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ int ret = seq_open(file, &__name ## _sops); \
+ if (!ret && inode->i_private) { \
+ struct seq_file *seq_f = file->private_data; \
+ seq_f->private = inode->i_private; \
+ } \
+ return ret; \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = seq_release, \
+}
+
#define DEFINE_SHOW_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
@@ -163,7 +210,7 @@ static const struct file_operations __name ## _fops = { \
#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
- return single_open(file, __name ## _show, inode->i_private); \
+ return single_open(file, __name ## _show, pde_data(inode)); \
} \
\
static const struct proc_ops __name ## _proc_ops = { \
@@ -230,6 +277,10 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
extern struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos);
+extern struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_next_rcu(void *v, struct list_head *head, loff_t *ppos);
+
/*
* Helpers for iteration over hlist_head-s in seq_files
*/
diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h
index 0fdbe1ddd8d1..79638395bc32 100644
--- a/include/linux/seq_file_net.h
+++ b/include/linux/seq_file_net.h
@@ -3,13 +3,15 @@
#define __SEQ_FILE_NET_H__
#include <linux/seq_file.h>
+#include <net/net_trackers.h>
struct net;
extern struct net init_net;
struct seq_net_private {
#ifdef CONFIG_NET_NS
- struct net *net;
+ struct net *net;
+ netns_tracker ns_tracker;
#endif
};
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 0491d963d47e..3926e9027947 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -1,49 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SEQLOCK_H
#define __LINUX_SEQLOCK_H
+
/*
- * Reader/writer consistent mechanism without starving writers. This type of
- * lock for data where the reader wants a consistent set of information
- * and is willing to retry if the information changes. There are two types
- * of readers:
- * 1. Sequence readers which never block a writer but they may have to retry
- * if a writer is in progress by detecting change in sequence number.
- * Writers do not wait for a sequence reader.
- * 2. Locking readers which will wait if a writer or another locking reader
- * is in progress. A locking reader in progress will also block a writer
- * from going forward. Unlike the regular rwlock, the read lock here is
- * exclusive so that only one locking reader can get it.
- *
- * This is not as cache friendly as brlock. Also, this may not work well
- * for data that contains pointers, because any writer could
- * invalidate a pointer that a reader was following.
- *
- * Expected non-blocking reader usage:
- * do {
- * seq = read_seqbegin(&foo);
- * ...
- * } while (read_seqretry(&foo, seq));
+ * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
+ * lockless readers (read-only retry loops), and no writer starvation.
*
+ * See Documentation/locking/seqlock.rst
*
- * On non-SMP the spin locks disappear but the writer still needs
- * to increment the sequence variables because an interrupt routine could
- * change the state of the data.
- *
- * Based on x86_64 vsyscall gettimeofday
- * by Keith Owens and Andrea Arcangeli
+ * Copyrights:
+ * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
+ * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
*/
-#include <linux/spinlock.h>
-#include <linux/preempt.h>
-#include <linux/lockdep.h>
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+
#include <asm/processor.h>
/*
- * Version using sequence counter only.
- * This can be used when code has its own mutex protecting the
- * updating starting before the write_seqcountbeqin() and ending
- * after the write_seqcount_end().
+ * The seqlock seqcount_t interface does not prescribe a precise sequence of
+ * read begin/retry/end. For readers, typically there is a call to
+ * read_seqcount_begin() and read_seqcount_retry(), however, there are more
+ * esoteric cases which do not follow this pattern.
+ *
+ * As a consequence, we take the following best-effort approach for raw usage
+ * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
+ * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
+ * atomics; if there is a matching read_seqcount_retry() call, no following
+ * memory operations are considered atomic. Usage of the seqlock_t interface
+ * is not affected.
+ */
+#define KCSAN_SEQLOCK_REGION_MAX 1000
+
+/*
+ * Sequence counters (seqcount_t)
+ *
+ * This is the raw counting mechanism, without any writer protection.
+ *
+ * Write side critical sections must be serialized and non-preemptible.
+ *
+ * If readers can be invoked from hardirq or softirq contexts,
+ * interrupts or bottom halves must also be respectively disabled before
+ * entering the write section.
+ *
+ * This mechanism can't be used if the protected data contains pointers,
+ * as the writer can invalidate a pointer that a reader is following.
+ *
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKNAME_t) instead.
+ *
+ * If it's desired to automatically handle the sequence counter writer
+ * serialization and non-preemptibility requirements, use a sequential
+ * lock (seqlock_t) instead.
+ *
+ * See Documentation/locking/seqlock.rst
*/
typedef struct seqcount {
unsigned sequence;
@@ -63,13 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SEQCOUNT_DEP_MAP_INIT(lockname) \
- .dep_map = { .name = #lockname } \
-# define seqcount_init(s) \
- do { \
- static struct lock_class_key __key; \
- __seqcount_init((s), #s, &__key); \
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+ .dep_map = { .name = #lockname }
+
+/**
+ * seqcount_init() - runtime initializer for seqcount_t
+ * @s: Pointer to the seqcount_t instance
+ */
+# define seqcount_init(s) \
+ do { \
+ static struct lock_class_key __key; \
+ __seqcount_init((s), #s, &__key); \
} while (0)
static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
@@ -89,13 +110,207 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
# define seqcount_lockdep_reader_access(x)
#endif
-#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
+/**
+ * SEQCNT_ZERO() - static initializer for seqcount_t
+ * @name: Name of the seqcount_t instance
+ */
+#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
+/*
+ * Sequence counters with associated locks (seqcount_LOCKNAME_t)
+ *
+ * A sequence counter which associates the lock used for writer
+ * serialization at initialization time. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ *
+ * For associated locks which do not implicitly disable preemption,
+ * preemption protection is enforced in the write side function.
+ *
+ * Lockdep is never used in any for the raw write variants.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+
+/*
+ * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
+ * disable preemption. It can lead to higher latencies, and the write side
+ * sections will not be able to acquire locks which become sleeping locks
+ * (e.g. spinlock_t).
+ *
+ * To remain preemptible while avoiding a possible livelock caused by the
+ * reader preempting the writer, use a different technique: let the reader
+ * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
+ * case, acquire then release the associated LOCKNAME writer serialization
+ * lock. This will allow any possibly-preempted writer to make progress
+ * until the end of its writer serialization lock critical section.
+ *
+ * This lock-unlock technique must be implemented for all of PREEMPT_RT
+ * sleeping locks. See Documentation/locking/locktypes.rst
+ */
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
+#define __SEQ_LOCK(expr) expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+/*
+ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
+ * @seqcount: The real sequence counter
+ * @lock: Pointer to the associated lock
+ *
+ * A plain sequence counter with external writer synchronization by
+ * LOCKNAME @lock. The lock is associated to the sequence counter in the
+ * static initializer or init function. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ *
+ * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex
+ */
+
+/*
+ * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
+ * @s: Pointer to the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated lock
+ */
+
+#define seqcount_LOCKNAME_init(s, _lock, lockname) \
+ do { \
+ seqcount_##lockname##_t *____s = (s); \
+ seqcount_init(&____s->seqcount); \
+ __SEQ_LOCK(____s->lock = (_lock)); \
+ } while (0)
+
+#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
+#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
+#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
+#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
+
+/*
+ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
+ *
+ * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
+ * @locktype: LOCKNAME canonical C data type
+ * @preemptible: preemptibility of above locktype
+ * @lockmember: argument for lockdep_assert_held()
+ * @lockbase: associated lock release function (prefix only)
+ * @lock_acquire: associated lock acquisition function (full call)
+ */
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
+typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+} seqcount_##lockname##_t; \
+ \
+static __always_inline seqcount_t * \
+__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
+} \
+ \
+static __always_inline unsigned \
+__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
+{ \
+ unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ return seq; \
+ \
+ if (preemptible && unlikely(seq & 1)) { \
+ __SEQ_LOCK(lock_acquire); \
+ __SEQ_LOCK(lockbase##_unlock(s->lock)); \
+ \
+ /* \
+ * Re-read the sequence counter since the (possibly \
+ * preempted) writer made progress. \
+ */ \
+ seq = READ_ONCE(s->seqcount.sequence); \
+ } \
+ \
+ return seq; \
+} \
+ \
+static __always_inline bool \
+__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
+{ \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ return preemptible; \
+ \
+ /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
+ return false; \
+} \
+ \
+static __always_inline void \
+__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
+{ \
+ __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+}
+
+/*
+ * __seqprop() for seqcount_t
+ */
+
+static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
+{
+ return s;
+}
+
+static inline unsigned __seqprop_sequence(const seqcount_t *s)
+{
+ return READ_ONCE(s->sequence);
+}
+
+static inline bool __seqprop_preemptible(const seqcount_t *s)
+{
+ return false;
+}
+
+static inline void __seqprop_assert(const seqcount_t *s)
+{
+ lockdep_assert_preemption_disabled();
+}
+
+#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
+
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
+
+/*
+ * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
+ * @name: Name of the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKNAME
+ */
+
+#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
+ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
+ __SEQ_LOCK(.lock = (assoc_lock)) \
+}
+
+#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+
+#define __seqprop_case(s, lockname, prop) \
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
+
+#define __seqprop(s, prop) _Generic(*(s), \
+ seqcount_t: __seqprop_##prop((void *)(s)), \
+ __seqprop_case((s), raw_spinlock, prop), \
+ __seqprop_case((s), spinlock, prop), \
+ __seqprop_case((s), rwlock, prop), \
+ __seqprop_case((s), mutex, prop))
+
+#define seqprop_ptr(s) __seqprop(s, ptr)
+#define seqprop_sequence(s) __seqprop(s, sequence)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)
+#define seqprop_assert(s) __seqprop(s, assert)
/**
- * __read_seqcount_begin - begin a seq-read critical section (without barrier)
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -104,93 +319,96 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*
* Use carefully, only in critical code, and comment how the barrier is
* provided.
+ *
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned __read_seqcount_begin(const seqcount_t *s)
-{
- unsigned ret;
-
-repeat:
- ret = READ_ONCE(s->sequence);
- if (unlikely(ret & 1)) {
- cpu_relax();
- goto repeat;
- }
- return ret;
-}
+#define __read_seqcount_begin(s) \
+({ \
+ unsigned __seq; \
+ \
+ while ((__seq = seqprop_sequence(s)) & 1) \
+ cpu_relax(); \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ __seq; \
+})
/**
- * raw_read_seqcount - Read the raw seqcount
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * raw_read_seqcount opens a read critical section of the given
- * seqcount without any lockdep checking and without checking or
- * masking the LSB. Calling code is responsible for handling that.
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount(const seqcount_t *s)
-{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- return ret;
-}
+#define raw_read_seqcount_begin(s) \
+({ \
+ unsigned _seq = __read_seqcount_begin(s); \
+ \
+ smp_rmb(); \
+ _seq; \
+})
/**
- * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * read_seqcount_begin() - begin a seqcount_t read critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * raw_read_seqcount_begin opens a read critical section of the given
- * seqcount, but without any lockdep checking. Validity of the critical
- * section is tested by checking read_seqcount_retry function.
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
-{
- unsigned ret = __read_seqcount_begin(s);
- smp_rmb();
- return ret;
-}
+#define read_seqcount_begin(s) \
+({ \
+ seqcount_lockdep_reader_access(seqprop_ptr(s)); \
+ raw_read_seqcount_begin(s); \
+})
/**
- * read_seqcount_begin - begin a seq-read critical section
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * raw_read_seqcount() - read the raw seqcount_t counter value
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * raw_read_seqcount opens a read critical section of the given
+ * seqcount_t, without any lockdep checking, and without checking or
+ * masking the sequence counter LSB. Calling code is responsible for
+ * handling that.
*
- * read_seqcount_begin opens a read critical section of the given seqcount.
- * Validity of the critical section is tested by checking read_seqcount_retry
- * function.
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned read_seqcount_begin(const seqcount_t *s)
-{
- seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_begin(s);
-}
+#define raw_read_seqcount(s) \
+({ \
+ unsigned __seq = seqprop_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ __seq; \
+})
/**
- * raw_seqcount_begin - begin a seq-read critical section
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
+ * lockdep and w/o counter stabilization
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * raw_seqcount_begin opens a read critical section of the given
+ * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
+ * for the count to stabilize. If a writer is active when it begins, it
+ * will fail the read_seqcount_retry() at the end of the read critical
+ * section instead of stabilizing at the beginning of it.
*
- * raw_seqcount_begin opens a read critical section of the given seqcount.
- * Validity of the critical section is tested by checking read_seqcount_retry
- * function.
+ * Use this only in special kernel hot paths where the read section is
+ * small and has a high probability of success through other external
+ * means. It will save a single branching instruction.
*
- * Unlike read_seqcount_begin(), this function will not wait for the count
- * to stabilize. If a writer is active when we begin, we will fail the
- * read_seqcount_retry() instead of stabilizing at the beginning of the
- * critical section.
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_seqcount_begin(const seqcount_t *s)
-{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- return ret & ~1;
-}
+#define raw_seqcount_begin(s) \
+({ \
+ /* \
+ * If the counter is odd, let read_seqcount_retry() fail \
+ * by decrementing the counter. \
+ */ \
+ raw_read_seqcount(s) & ~1; \
+})
/**
- * __read_seqcount_retry - end a seq-read critical section (without barrier)
- * @s: pointer to seqcount_t
- * @start: count, from read_seqcount_begin
- * Returns: 1 if retry is required, else 0
+ * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @start: count, from read_seqcount_begin()
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -199,93 +417,287 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*
* Use carefully, only in critical code, and comment how the barrier is
* provided.
+ *
+ * Return: true if a read section retry is required, else false
*/
-static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define __read_seqcount_retry(s, start) \
+ do___read_seqcount_retry(seqprop_ptr(s), start)
+
+static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
- return unlikely(s->sequence != start);
+ kcsan_atomic_next(0);
+ return unlikely(READ_ONCE(s->sequence) != start);
}
/**
- * read_seqcount_retry - end a seq-read critical section
- * @s: pointer to seqcount_t
- * @start: count, from read_seqcount_begin
- * Returns: 1 if retry is required, else 0
+ * read_seqcount_retry() - end a seqcount_t read critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @start: count, from read_seqcount_begin()
*
- * read_seqcount_retry closes a read critical section of the given seqcount.
- * If the critical section was invalid, it must be ignored (and typically
- * retried).
+ * read_seqcount_retry closes the read critical section of given
+ * seqcount_t. If the critical section was invalid, it must be ignored
+ * (and typically retried).
+ *
+ * Return: true if a read section retry is required, else false
*/
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define read_seqcount_retry(s, start) \
+ do_read_seqcount_retry(seqprop_ptr(s), start)
+
+static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
- return __read_seqcount_retry(s, start);
+ return do___read_seqcount_retry(s, start);
}
+/**
+ * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Context: check write_seqcount_begin()
+ */
+#define raw_write_seqcount_begin(s) \
+do { \
+ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+ do_raw_write_seqcount_begin(seqprop_ptr(s)); \
+} while (0)
-
-static inline void raw_write_seqcount_begin(seqcount_t *s)
+static inline void do_raw_write_seqcount_begin(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
-static inline void raw_write_seqcount_end(seqcount_t *s)
+/**
+ * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Context: check write_seqcount_end()
+ */
+#define raw_write_seqcount_end(s) \
+do { \
+ do_raw_write_seqcount_end(seqprop_ptr(s)); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void do_raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
+}
+
+/**
+ * write_seqcount_begin_nested() - start a seqcount_t write section with
+ * custom lockdep nesting level
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @subclass: lockdep nesting level
+ *
+ * See Documentation/locking/lockdep-design.rst
+ * Context: check write_seqcount_begin()
+ */
+#define write_seqcount_begin_nested(s, subclass) \
+do { \
+ seqprop_assert(s); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+ do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
+} while (0)
+
+static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
+{
+ do_raw_write_seqcount_begin(s);
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
/**
- * raw_write_seqcount_barrier - do a seq write barrier
- * @s: pointer to seqcount_t
+ * write_seqcount_begin() - start a seqcount_t write side critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * This can be used to provide an ordering guarantee instead of the
- * usual consistency guarantee. It is one wmb cheaper, because we can
- * collapse the two back-to-back wmb()s.
+ * Context: sequence counter write side sections must be serialized and
+ * non-preemptible. Preemption will be automatically disabled if and
+ * only if the seqcount write serialization lock is associated, and
+ * preemptible. If readers can be invoked from hardirq or softirq
+ * context, interrupts or bottom halves must be respectively disabled.
+ */
+#define write_seqcount_begin(s) \
+do { \
+ seqprop_assert(s); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+ do_write_seqcount_begin(seqprop_ptr(s)); \
+} while (0)
+
+static inline void do_write_seqcount_begin(seqcount_t *s)
+{
+ do_write_seqcount_begin_nested(s, 0);
+}
+
+/**
+ * write_seqcount_end() - end a seqcount_t write side critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * seqcount_t seq;
- * bool X = true, Y = false;
+ * Context: Preemption will be automatically re-enabled if and only if
+ * the seqcount write serialization lock is associated, and preemptible.
+ */
+#define write_seqcount_end(s) \
+do { \
+ do_write_seqcount_end(seqprop_ptr(s)); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void do_write_seqcount_end(seqcount_t *s)
+{
+ seqcount_release(&s->dep_map, _RET_IP_);
+ do_raw_write_seqcount_end(s);
+}
+
+/**
+ * raw_write_seqcount_barrier() - do a seqcount_t write barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * void read(void)
- * {
- * bool x, y;
+ * This can be used to provide an ordering guarantee instead of the usual
+ * consistency guarantee. It is one wmb cheaper, because it can collapse
+ * the two back-to-back wmb()s.
+ *
+ * Note that writes surrounding the barrier should be declared atomic (e.g.
+ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
+ * atomically, avoiding compiler optimizations; b) to document which writes are
+ * meant to propagate to the reader critical section. This is necessary because
+ * neither writes before and after the barrier are enclosed in a seq-writer
+ * critical section that would ensure readers are aware of ongoing writes::
+ *
+ * seqcount_t seq;
+ * bool X = true, Y = false;
*
- * do {
- * int s = read_seqcount_begin(&seq);
+ * void read(void)
+ * {
+ * bool x, y;
*
- * x = X; y = Y;
+ * do {
+ * int s = read_seqcount_begin(&seq);
*
- * } while (read_seqcount_retry(&seq, s));
+ * x = X; y = Y;
*
- * BUG_ON(!x && !y);
+ * } while (read_seqcount_retry(&seq, s));
+ *
+ * BUG_ON(!x && !y);
* }
*
* void write(void)
* {
- * Y = true;
+ * WRITE_ONCE(Y, true);
*
- * raw_write_seqcount_barrier(seq);
+ * raw_write_seqcount_barrier(seq);
*
- * X = false;
+ * WRITE_ONCE(X, false);
* }
*/
-static inline void raw_write_seqcount_barrier(seqcount_t *s)
+#define raw_write_seqcount_barrier(s) \
+ do_raw_write_seqcount_barrier(seqprop_ptr(s))
+
+static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
-static inline int raw_read_seqcount_latch(seqcount_t *s)
+/**
+ * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
+ * side operations
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * After write_seqcount_invalidate, no seqcount_t read side operations
+ * will complete successfully and see data older than this.
+ */
+#define write_seqcount_invalidate(s) \
+ do_write_seqcount_invalidate(seqprop_ptr(s))
+
+static inline void do_write_seqcount_invalidate(seqcount_t *s)
{
- /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
- int seq = READ_ONCE(s->sequence); /* ^^^ */
- return seq;
+ smp_wmb();
+ kcsan_nestable_atomic_begin();
+ s->sequence+=2;
+ kcsan_nestable_atomic_end();
+}
+
+/*
+ * Latch sequence counters (seqcount_latch_t)
+ *
+ * A sequence counter variant where the counter even/odd value is used to
+ * switch between two copies of protected data. This allows the read path,
+ * typically NMIs, to safely interrupt the write side critical section.
+ *
+ * As the write sections are fully preemptible, no special handling for
+ * PREEMPT_RT is needed.
+ */
+typedef struct {
+ seqcount_t seqcount;
+} seqcount_latch_t;
+
+/**
+ * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
+ * @seq_name: Name of the seqcount_latch_t instance
+ */
+#define SEQCNT_LATCH_ZERO(seq_name) { \
+ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
}
/**
- * raw_write_seqcount_latch - redirect readers to even/odd copy
- * @s: pointer to seqcount_t
+ * seqcount_latch_init() - runtime initializer for seqcount_latch_t
+ * @s: Pointer to the seqcount_latch_t instance
+ */
+#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
+
+/**
+ * raw_read_seqcount_latch() - pick even/odd latch data copy
+ * @s: Pointer to seqcount_latch_t
+ *
+ * See raw_write_seqcount_latch() for details and a full reader/writer
+ * usage example.
+ *
+ * Return: sequence counter raw value. Use the lowest bit as an index for
+ * picking which data copy to read. The full counter must then be checked
+ * with read_seqcount_latch_retry().
+ */
+static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
+{
+ /*
+ * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
+ * Due to the dependent load, a full smp_rmb() is not needed.
+ */
+ return READ_ONCE(s->seqcount.sequence);
+}
+
+/**
+ * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * @s: Pointer to seqcount_latch_t
+ * @start: count, from raw_read_seqcount_latch()
+ *
+ * Return: true if a read section retry is required, else false
+ */
+static inline int
+read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+{
+ return read_seqcount_retry(&s->seqcount, start);
+}
+
+/**
+ * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
+ * @s: Pointer to seqcount_latch_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@@ -301,181 +713,243 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
* Very simply put: we first modify one copy and then the other. This ensures
* there is always one copy in a stable state, ready to give us an answer.
*
- * The basic form is a data structure like:
+ * The basic form is a data structure like::
*
- * struct latch_struct {
- * seqcount_t seq;
- * struct data_struct data[2];
- * };
+ * struct latch_struct {
+ * seqcount_latch_t seq;
+ * struct data_struct data[2];
+ * };
*
* Where a modification, which is assumed to be externally serialized, does the
- * following:
+ * following::
*
- * void latch_modify(struct latch_struct *latch, ...)
- * {
- * smp_wmb(); <- Ensure that the last data[1] update is visible
- * latch->seq++;
- * smp_wmb(); <- Ensure that the seqcount update is visible
+ * void latch_modify(struct latch_struct *latch, ...)
+ * {
+ * smp_wmb(); // Ensure that the last data[1] update is visible
+ * latch->seq.sequence++;
+ * smp_wmb(); // Ensure that the seqcount update is visible
*
- * modify(latch->data[0], ...);
+ * modify(latch->data[0], ...);
*
- * smp_wmb(); <- Ensure that the data[0] update is visible
- * latch->seq++;
- * smp_wmb(); <- Ensure that the seqcount update is visible
+ * smp_wmb(); // Ensure that the data[0] update is visible
+ * latch->seq.sequence++;
+ * smp_wmb(); // Ensure that the seqcount update is visible
*
- * modify(latch->data[1], ...);
- * }
+ * modify(latch->data[1], ...);
+ * }
*
- * The query will have a form like:
+ * The query will have a form like::
*
- * struct entry *latch_query(struct latch_struct *latch, ...)
- * {
- * struct entry *entry;
- * unsigned seq, idx;
+ * struct entry *latch_query(struct latch_struct *latch, ...)
+ * {
+ * struct entry *entry;
+ * unsigned seq, idx;
*
- * do {
- * seq = raw_read_seqcount_latch(&latch->seq);
+ * do {
+ * seq = raw_read_seqcount_latch(&latch->seq);
*
- * idx = seq & 0x01;
- * entry = data_query(latch->data[idx], ...);
+ * idx = seq & 0x01;
+ * entry = data_query(latch->data[idx], ...);
*
- * smp_rmb();
- * } while (seq != latch->seq);
+ * // This includes needed smp_rmb()
+ * } while (read_seqcount_latch_retry(&latch->seq, seq));
*
- * return entry;
- * }
+ * return entry;
+ * }
*
* So during the modification, queries are first redirected to data[1]. Then we
* modify data[0]. When that is complete, we redirect queries back to data[0]
* and we can modify data[1].
*
- * NOTE: The non-requirement for atomic modifications does _NOT_ include
- * the publishing of new entries in the case where data is a dynamic
- * data structure.
+ * NOTE:
+ *
+ * The non-requirement for atomic modifications does _NOT_ include
+ * the publishing of new entries in the case where data is a dynamic
+ * data structure.
+ *
+ * An iteration might start in data[0] and get suspended long enough
+ * to miss an entire modification sequence, once it resumes it might
+ * observe the new entry.
*
- * An iteration might start in data[0] and get suspended long enough
- * to miss an entire modification sequence, once it resumes it might
- * observe the new entry.
+ * NOTE2:
*
- * NOTE: When data is a dynamic data structure; one should use regular RCU
- * patterns to manage the lifetimes of the objects within.
+ * When data is a dynamic data structure; one should use regular RCU
+ * patterns to manage the lifetimes of the objects within.
*/
-static inline void raw_write_seqcount_latch(seqcount_t *s)
+static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
{
- smp_wmb(); /* prior stores before incrementing "sequence" */
- s->sequence++;
- smp_wmb(); /* increment "sequence" before following stores */
+ smp_wmb(); /* prior stores before incrementing "sequence" */
+ s->seqcount.sequence++;
+ smp_wmb(); /* increment "sequence" before following stores */
}
/*
- * Sequence counter only version assumes that callers are using their
- * own mutexing.
- */
-static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
-{
- raw_write_seqcount_begin(s);
- seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
-}
-
-static inline void write_seqcount_begin(seqcount_t *s)
-{
- write_seqcount_begin_nested(s, 0);
-}
-
-static inline void write_seqcount_end(seqcount_t *s)
-{
- seqcount_release(&s->dep_map, _RET_IP_);
- raw_write_seqcount_end(s);
-}
-
-/**
- * write_seqcount_invalidate - invalidate in-progress read-side seq operations
- * @s: pointer to seqcount_t
+ * Sequential locks (seqlock_t)
*
- * After write_seqcount_invalidate, no read-side seq operations will complete
- * successfully and see data older than this.
+ * Sequence counters with an embedded spinlock for writer serialization
+ * and non-preemptibility.
+ *
+ * For more info, see:
+ * - Comments on top of seqcount_t
+ * - Documentation/locking/seqlock.rst
*/
-static inline void write_seqcount_invalidate(seqcount_t *s)
-{
- smp_wmb();
- s->sequence+=2;
-}
-
typedef struct {
- struct seqcount seqcount;
+ /*
+ * Make sure that readers don't starve writers on PREEMPT_RT: use
+ * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
+ */
+ seqcount_spinlock_t seqcount;
spinlock_t lock;
} seqlock_t;
-/*
- * These macros triggered gcc-3.x compile-time problems. We think these are
- * OK now. Be cautious.
- */
-#define __SEQLOCK_UNLOCKED(lockname) \
- { \
- .seqcount = SEQCNT_ZERO(lockname), \
- .lock = __SPIN_LOCK_UNLOCKED(lockname) \
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { \
+ .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
+ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
}
-#define seqlock_init(x) \
- do { \
- seqcount_init(&(x)->seqcount); \
- spin_lock_init(&(x)->lock); \
+/**
+ * seqlock_init() - dynamic initializer for seqlock_t
+ * @sl: Pointer to the seqlock_t instance
+ */
+#define seqlock_init(sl) \
+ do { \
+ spin_lock_init(&(sl)->lock); \
+ seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
} while (0)
-#define DEFINE_SEQLOCK(x) \
- seqlock_t x = __SEQLOCK_UNLOCKED(x)
+/**
+ * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
+ * @sl: Name of the seqlock_t instance
+ */
+#define DEFINE_SEQLOCK(sl) \
+ seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
-/*
- * Read side functions for starting and finalizing a read side section.
+/**
+ * read_seqbegin() - start a seqlock_t read side critical section
+ * @sl: Pointer to seqlock_t
+ *
+ * Return: count, to be passed to read_seqretry()
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- return read_seqcount_begin(&sl->seqcount);
+ unsigned ret = read_seqcount_begin(&sl->seqcount);
+
+ kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
+ kcsan_flat_atomic_begin();
+ return ret;
}
+/**
+ * read_seqretry() - end a seqlock_t read side section
+ * @sl: Pointer to seqlock_t
+ * @start: count, from read_seqbegin()
+ *
+ * read_seqretry closes the read side critical section of given seqlock_t.
+ * If the critical section was invalid, it must be ignored (and typically
+ * retried).
+ *
+ * Return: true if a read section retry is required, else false
+ */
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
+ /*
+ * Assume not nested: read_seqretry() may be called multiple times when
+ * completing read critical section.
+ */
+ kcsan_flat_atomic_end();
+
return read_seqcount_retry(&sl->seqcount, start);
}
/*
- * Lock out other writers and update the count.
- * Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
+ * For all seqlock_t write side functions, use the the internal
+ * do_write_seqcount_begin() instead of generic write_seqcount_begin().
+ * This way, no redundant lockdep_assert_held() checks are added.
+ */
+
+/**
+ * write_seqlock() - start a seqlock_t write side critical section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_seqlock opens a write side critical section for the given
+ * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
+ * that sequential lock. All seqlock_t write side sections are thus
+ * automatically serialized and non-preemptible.
+ *
+ * Context: if the seqlock_t read section, or other write side critical
+ * sections, can be invoked from hardirq or softirq contexts, use the
+ * _irqsave or _bh variants of this function instead.
*/
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
+/**
+ * write_sequnlock() - end a seqlock_t write side critical section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_sequnlock closes the (serialized and non-preemptible) write side
+ * critical section of given seqlock_t.
+ */
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
}
+/**
+ * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * _bh variant of write_seqlock(). Use only if the read side section, or
+ * other write side sections, can be invoked from softirq contexts.
+ */
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
+/**
+ * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_sequnlock_bh closes the serialized, non-preemptible, and
+ * softirqs-disabled, seqlock_t write side critical section opened with
+ * write_seqlock_bh().
+ */
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
}
+/**
+ * write_seqlock_irq() - start a non-interruptible seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * _irq variant of write_seqlock(). Use only if the read side section, or
+ * other write sections, can be invoked from hardirq contexts.
+ */
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
+/**
+ * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_sequnlock_irq closes the serialized and non-interruptible
+ * seqlock_t write side section opened with write_seqlock_irq().
+ */
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -484,79 +958,112 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- write_seqcount_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
return flags;
}
+/**
+ * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
+ * section
+ * @lock: Pointer to seqlock_t
+ * @flags: Stack-allocated storage for saving caller's local interrupt
+ * state, to be passed to write_sequnlock_irqrestore().
+ *
+ * _irqsave variant of write_seqlock(). Use it only if the read side
+ * section, or other write sections, can be invoked from hardirq context.
+ */
#define write_seqlock_irqsave(lock, flags) \
do { flags = __write_seqlock_irqsave(lock); } while (0)
+/**
+ * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
+ * section
+ * @sl: Pointer to seqlock_t
+ * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
+ *
+ * write_sequnlock_irqrestore closes the serialized and non-interruptible
+ * seqlock_t write section previously opened with write_seqlock_irqsave().
+ */
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
-/*
- * A locking reader exclusively locks out other writers and locking readers,
- * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
+/**
+ * read_seqlock_excl() - begin a seqlock_t locking reader section
+ * @sl: Pointer to seqlock_t
+ *
+ * read_seqlock_excl opens a seqlock_t locking reader critical section. A
+ * locking reader exclusively locks out *both* other writers *and* other
+ * locking readers, but it does not update the embedded sequence number.
+ *
+ * Locking readers act like a normal spin_lock()/spin_unlock().
+ *
+ * Context: if the seqlock_t write section, *or other read sections*, can
+ * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
+ * variant of this function instead.
+ *
+ * The opened read section must be closed with read_sequnlock_excl().
*/
static inline void read_seqlock_excl(seqlock_t *sl)
{
spin_lock(&sl->lock);
}
+/**
+ * read_sequnlock_excl() - end a seqlock_t locking reader critical section
+ * @sl: Pointer to seqlock_t
+ */
static inline void read_sequnlock_excl(seqlock_t *sl)
{
spin_unlock(&sl->lock);
}
/**
- * read_seqbegin_or_lock - begin a sequence number check or locking block
- * @lock: sequence lock
- * @seq : sequence number to be checked
+ * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
+ * softirqs disabled
+ * @sl: Pointer to seqlock_t
*
- * First try it once optimistically without taking the lock. If that fails,
- * take the lock. The sequence number is also used as a marker for deciding
- * whether to be a reader (even) or writer (odd).
- * N.B. seq must be initialized to an even number to begin with.
+ * _bh variant of read_seqlock_excl(). Use this variant only if the
+ * seqlock_t write side section, *or other read sections*, can be invoked
+ * from softirq contexts.
*/
-static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
-{
- if (!(*seq & 1)) /* Even */
- *seq = read_seqbegin(lock);
- else /* Odd */
- read_seqlock_excl(lock);
-}
-
-static inline int need_seqretry(seqlock_t *lock, int seq)
-{
- return !(seq & 1) && read_seqretry(lock, seq);
-}
-
-static inline void done_seqretry(seqlock_t *lock, int seq)
-{
- if (seq & 1)
- read_sequnlock_excl(lock);
-}
-
static inline void read_seqlock_excl_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
}
+/**
+ * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
+ * reader section
+ * @sl: Pointer to seqlock_t
+ */
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
{
spin_unlock_bh(&sl->lock);
}
+/**
+ * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
+ * reader section
+ * @sl: Pointer to seqlock_t
+ *
+ * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
+ * write side section, *or other read sections*, can be invoked from a
+ * hardirq context.
+ */
static inline void read_seqlock_excl_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
}
+/**
+ * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
+ * locking reader section
+ * @sl: Pointer to seqlock_t
+ */
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
{
spin_unlock_irq(&sl->lock);
@@ -570,15 +1077,117 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
return flags;
}
+/**
+ * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
+ * locking reader section
+ * @lock: Pointer to seqlock_t
+ * @flags: Stack-allocated storage for saving caller's local interrupt
+ * state, to be passed to read_sequnlock_excl_irqrestore().
+ *
+ * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
+ * write side section, *or other read sections*, can be invoked from a
+ * hardirq context.
+ */
#define read_seqlock_excl_irqsave(lock, flags) \
do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
+/**
+ * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
+ * locking reader section
+ * @sl: Pointer to seqlock_t
+ * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
+ */
static inline void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
{
spin_unlock_irqrestore(&sl->lock, flags);
}
+/**
+ * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
+ * @lock: Pointer to seqlock_t
+ * @seq : Marker and return parameter. If the passed value is even, the
+ * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
+ * If the passed value is odd, the reader will become a *locking* reader
+ * as in read_seqlock_excl(). In the first call to this function, the
+ * caller *must* initialize and pass an even value to @seq; this way, a
+ * lockless read can be optimistically tried first.
+ *
+ * read_seqbegin_or_lock is an API designed to optimistically try a normal
+ * lockless seqlock_t read section first. If an odd counter is found, the
+ * lockless read trial has failed, and the next read iteration transforms
+ * itself into a full seqlock_t locking reader.
+ *
+ * This is typically used to avoid seqlock_t lockless readers starvation
+ * (too much retry loops) in the case of a sharp spike in write side
+ * activity.
+ *
+ * Context: if the seqlock_t write section, *or other read sections*, can
+ * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
+ * variant of this function instead.
+ *
+ * Check Documentation/locking/seqlock.rst for template example code.
+ *
+ * Return: the encountered sequence counter value, through the @seq
+ * parameter, which is overloaded as a return parameter. This returned
+ * value must be checked with need_seqretry(). If the read section need to
+ * be retried, this returned value must also be passed as the @seq
+ * parameter of the next read_seqbegin_or_lock() iteration.
+ */
+static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+{
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl(lock);
+}
+
+/**
+ * need_seqretry() - validate seqlock_t "locking or lockless" read section
+ * @lock: Pointer to seqlock_t
+ * @seq: sequence count, from read_seqbegin_or_lock()
+ *
+ * Return: true if a read section retry is required, false otherwise
+ */
+static inline int need_seqretry(seqlock_t *lock, int seq)
+{
+ return !(seq & 1) && read_seqretry(lock, seq);
+}
+
+/**
+ * done_seqretry() - end seqlock_t "locking or lockless" reader section
+ * @lock: Pointer to seqlock_t
+ * @seq: count, from read_seqbegin_or_lock()
+ *
+ * done_seqretry finishes the seqlock_t read side critical section started
+ * with read_seqbegin_or_lock() and validated by need_seqretry().
+ */
+static inline void done_seqretry(seqlock_t *lock, int seq)
+{
+ if (seq & 1)
+ read_sequnlock_excl(lock);
+}
+
+/**
+ * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
+ * a non-interruptible locking reader
+ * @lock: Pointer to seqlock_t
+ * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
+ *
+ * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
+ * the seqlock_t write section, *or other read sections*, can be invoked
+ * from hardirq context.
+ *
+ * Note: Interrupts will be disabled only for "locking reader" mode.
+ *
+ * Return:
+ *
+ * 1. The saved local interrupts state in case of a locking reader, to
+ * be passed to done_seqretry_irqrestore().
+ *
+ * 2. The encountered sequence counter value, returned through @seq
+ * overloaded as a return parameter. Check read_seqbegin_or_lock().
+ */
static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
{
@@ -592,6 +1201,18 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
return flags;
}
+/**
+ * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
+ * non-interruptible locking reader section
+ * @lock: Pointer to seqlock_t
+ * @seq: Count, from read_seqbegin_or_lock_irqsave()
+ * @flags: Caller's saved local interrupt state in case of a locking
+ * reader, also from read_seqbegin_or_lock_irqsave()
+ *
+ * This is the _irqrestore variant of done_seqretry(). The read section
+ * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
+ * by need_seqretry().
+ */
static inline void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
{
diff --git a/include/linux/seqlock_api.h b/include/linux/seqlock_api.h
new file mode 100644
index 000000000000..be91e7d3b826
--- /dev/null
+++ b/include/linux/seqlock_api.h
@@ -0,0 +1 @@
+#include <linux/seqlock.h>
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
deleted file mode 100644
index 3cca2b8fac43..000000000000
--- a/include/linux/seqno-fence.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * seqno-fence, using a dma-buf to synchronize fencing
- *
- * Copyright (C) 2012 Texas Instruments
- * Copyright (C) 2012 Canonical Ltd
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- */
-
-#ifndef __LINUX_SEQNO_FENCE_H
-#define __LINUX_SEQNO_FENCE_H
-
-#include <linux/dma-fence.h>
-#include <linux/dma-buf.h>
-
-enum seqno_fence_condition {
- SEQNO_FENCE_WAIT_GEQUAL,
- SEQNO_FENCE_WAIT_NONZERO
-};
-
-struct seqno_fence {
- struct dma_fence base;
-
- const struct dma_fence_ops *ops;
- struct dma_buf *sync_buf;
- uint32_t seqno_ofs;
- enum seqno_fence_condition condition;
-};
-
-extern const struct dma_fence_ops seqno_fence_ops;
-
-/**
- * to_seqno_fence - cast a fence to a seqno_fence
- * @fence: fence to cast to a seqno_fence
- *
- * Returns NULL if the fence is not a seqno_fence,
- * or the seqno_fence otherwise.
- */
-static inline struct seqno_fence *
-to_seqno_fence(struct dma_fence *fence)
-{
- if (fence->ops != &seqno_fence_ops)
- return NULL;
- return container_of(fence, struct seqno_fence, base);
-}
-
-/**
- * seqno_fence_init - initialize a seqno fence
- * @fence: seqno_fence to initialize
- * @lock: pointer to spinlock to use for fence
- * @sync_buf: buffer containing the memory location to signal on
- * @context: the execution context this fence is a part of
- * @seqno_ofs: the offset within @sync_buf
- * @seqno: the sequence # to signal on
- * @cond: fence wait condition
- * @ops: the fence_ops for operations on this seqno fence
- *
- * This function initializes a struct seqno_fence with passed parameters,
- * and takes a reference on sync_buf which is released on fence destruction.
- *
- * A seqno_fence is a dma_fence which can complete in software when
- * enable_signaling is called, but it also completes when
- * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
- *
- * The seqno_fence will take a refcount on the sync_buf until it's
- * destroyed, but actual lifetime of sync_buf may be longer if one of the
- * callers take a reference to it.
- *
- * Certain hardware have instructions to insert this type of wait condition
- * in the command stream, so no intervention from software would be needed.
- * This type of fence can be destroyed before completed, however a reference
- * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
- * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
- * device's vm can be expensive.
- *
- * It is recommended for creators of seqno_fence to call dma_fence_signal()
- * before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check dma_fence_is_signaled()
- * before submitting instructions for the hardware to wait on the fence.
- * However, when ops.enable_signaling is not called, it doesn't have to be
- * done as soon as possible, just before there's any real danger of seqno
- * wraparound.
- */
-static inline void
-seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
- struct dma_buf *sync_buf, uint32_t context,
- uint32_t seqno_ofs, uint32_t seqno,
- enum seqno_fence_condition cond,
- const struct dma_fence_ops *ops)
-{
- BUG_ON(!fence || !sync_buf || !ops);
- BUG_ON(!ops->wait || !ops->enable_signaling ||
- !ops->get_driver_name || !ops->get_timeline_name);
-
- /*
- * ops is used in dma_fence_init for get_driver_name, so needs to be
- * initialized first
- */
- fence->ops = ops;
- dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
- get_dma_buf(sync_buf);
- fence->sync_buf = sync_buf;
- fence->seqno_ofs = seqno_ofs;
- fence->condition = cond;
-}
-
-#endif /* __LINUX_SEQNO_FENCE_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index 9f14f9c12ec4..66f624fc618c 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/delay.h>
@@ -327,4 +328,18 @@ static inline int serdev_tty_port_unregister(struct tty_port *port)
}
#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
+struct acpi_resource;
+struct acpi_resource_uart_serialbus;
+
+#ifdef CONFIG_ACPI
+bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
+ struct acpi_resource_uart_serialbus **uart);
+#else
+static inline bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
+ struct acpi_resource_uart_serialbus **uart)
+{
+ return false;
+}
+#endif /* CONFIG_ACPI */
+
#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 0916107c77f9..3d6fe3ef92cf 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -9,9 +9,19 @@
#ifndef _LINUX_SERIAL_H
#define _LINUX_SERIAL_H
-#include <asm/page.h>
#include <uapi/linux/serial.h>
+#include <uapi/linux/serial_reg.h>
+/* Helper for dealing with UART_LCR_WLEN* defines */
+#define UART_LCR_WLEN(x) ((x) - 5)
+
+/* FIFO and shifting register empty */
+#define UART_LSR_BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static inline bool uart_lsr_tx_empty(u16 lsr)
+{
+ return (lsr & UART_LSR_BOTH_EMPTY) == UART_LSR_BOTH_EMPTY;
+}
/*
* Counters of the input lines (CTS, DSR, RI, CD) interrupts
@@ -23,11 +33,6 @@ struct async_icount {
__u32 buf_overrun;
};
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-#define SERIAL_XMIT_SIZE PAGE_SIZE
-
#include <linux/compiler.h>
#endif /* _LINUX_SERIAL_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 6a8e8c48c882..19376bee9667 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -32,7 +32,7 @@ struct plat_serial8250_port {
void (*serial_out)(struct uart_port *, int, int);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
@@ -74,6 +74,7 @@ struct uart_8250_port;
struct uart_8250_ops {
int (*setup_irq)(struct uart_8250_port *);
void (*release_irq)(struct uart_8250_port *);
+ void (*setup_timer)(struct uart_8250_port *);
};
struct uart_8250_em485 {
@@ -81,6 +82,7 @@ struct uart_8250_em485 {
struct hrtimer stop_tx_timer; /* "rs485 stop tx" timer */
struct hrtimer *active_timer; /* pointer to active timer */
struct uart_8250_port *port; /* for hrtimer callbacks */
+ unsigned int tx_stopped:1; /* tx is currently stopped */
};
/*
@@ -103,8 +105,6 @@ struct uart_8250_port {
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
unsigned char cur_iotype; /* Running I/O type */
unsigned int rpm_tx_active;
unsigned char canary; /* non-zero during system sleep
@@ -120,7 +120,8 @@ struct uart_8250_port {
* be immediately processed.
*/
#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
- unsigned char lsr_saved_flags;
+ u16 lsr_saved_flags;
+ u16 lsr_save_mask;
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
unsigned char msr_saved_flags;
@@ -132,6 +133,8 @@ struct uart_8250_port {
void (*dl_write)(struct uart_8250_port *, int);
struct uart_8250_em485 *em485;
+ void (*rs485_start_tx)(struct uart_8250_port *);
+ void (*rs485_stop_tx)(struct uart_8250_port *);
/* Serial port overrun backoff */
struct delayed_work overrun_backoff;
@@ -143,7 +146,7 @@ static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
return container_of(up, struct uart_8250_port, port);
}
-int serial8250_register_8250_port(struct uart_8250_port *);
+int serial8250_register_8250_port(const struct uart_8250_port *);
void serial8250_unregister_port(int line);
void serial8250_suspend_port(int line);
void serial8250_resume_port(int line);
@@ -152,8 +155,10 @@ extern int early_serial_setup(struct uart_port *port);
extern int early_serial8250_setup(struct earlycon_device *device,
const char *options);
+extern void serial8250_update_uartclk(struct uart_port *port,
+ unsigned int uartclk);
extern void serial8250_do_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old);
+ struct ktermios *termios, const struct ktermios *old);
extern void serial8250_do_set_ldisc(struct uart_port *port,
struct ktermios *termios);
extern unsigned int serial8250_do_get_mctrl(struct uart_port *port);
@@ -167,8 +172,8 @@ extern void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
unsigned int quot_frac);
extern int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
-unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
-void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr);
+u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
+void serial8250_read_char(struct uart_8250_port *up, u16 lsr);
void serial8250_tx_chars(struct uart_8250_port *up);
unsigned int serial8250_modem_status(struct uart_8250_port *up);
void serial8250_init_port(struct uart_8250_port *up);
@@ -176,9 +181,15 @@ void serial8250_set_defaults(struct uart_8250_port *up);
void serial8250_console_write(struct uart_8250_port *up, const char *s,
unsigned int count);
int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
+int serial8250_console_exit(struct uart_port *port);
extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
u32 *capabilities));
+#ifdef CONFIG_SERIAL_8250_RT288X
+unsigned int au_serial_in(struct uart_port *p, int offset);
+void au_serial_out(struct uart_port *p, int offset, int value);
+#endif
+
#endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 52404ef1694e..d657f2a42a7b 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -29,10 +29,346 @@
struct uart_port;
struct serial_struct;
struct device;
+struct gpio_desc;
-/*
+/**
+ * struct uart_ops -- interface between serial_core and the driver
+ *
* This structure describes all the operations that can be done on the
- * physical hardware. See Documentation/driver-api/serial/driver.rst for details.
+ * physical hardware.
+ *
+ * @tx_empty: ``unsigned int ()(struct uart_port *port)``
+ *
+ * This function tests whether the transmitter fifo and shifter for the
+ * @port is empty. If it is empty, this function should return
+ * %TIOCSER_TEMT, otherwise return 0. If the port does not support this
+ * operation, then it should return %TIOCSER_TEMT.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_mctrl: ``void ()(struct uart_port *port, unsigned int mctrl)``
+ *
+ * This function sets the modem control lines for @port to the state
+ * described by @mctrl. The relevant bits of @mctrl are:
+ *
+ * - %TIOCM_RTS RTS signal.
+ * - %TIOCM_DTR DTR signal.
+ * - %TIOCM_OUT1 OUT1 signal.
+ * - %TIOCM_OUT2 OUT2 signal.
+ * - %TIOCM_LOOP Set the port into loopback mode.
+ *
+ * If the appropriate bit is set, the signal should be driven
+ * active. If the bit is clear, the signal should be driven
+ * inactive.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @get_mctrl: ``unsigned int ()(struct uart_port *port)``
+ *
+ * Returns the current state of modem control inputs of @port. The state
+ * of the outputs should not be returned, since the core keeps track of
+ * their state. The state information should include:
+ *
+ * - %TIOCM_CAR state of DCD signal
+ * - %TIOCM_CTS state of CTS signal
+ * - %TIOCM_DSR state of DSR signal
+ * - %TIOCM_RI state of RI signal
+ *
+ * The bit is set if the signal is currently driven active. If
+ * the port does not support CTS, DCD or DSR, the driver should
+ * indicate that the signal is permanently active. If RI is
+ * not available, the signal should not be indicated as active.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @stop_tx: ``void ()(struct uart_port *port)``
+ *
+ * Stop transmitting characters. This might be due to the CTS line
+ * becoming inactive or the tty layer indicating we want to stop
+ * transmission due to an %XOFF character.
+ *
+ * The driver should stop transmitting characters as soon as possible.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @start_tx: ``void ()(struct uart_port *port)``
+ *
+ * Start transmitting characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @throttle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that input buffers for the line discipline are
+ * close to full, and it should somehow signal that no more characters
+ * should be sent to the serial port.
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @unthrottle() and termios modification by the
+ * tty layer.
+ *
+ * @unthrottle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that characters can now be sent to the serial
+ * port without fear of overrunning the input buffers of the line
+ * disciplines.
+ *
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @throttle() and termios modification by the
+ * tty layer.
+ *
+ * @send_xchar: ``void ()(struct uart_port *port, char ch)``
+ *
+ * Transmit a high priority character, even if the port is stopped. This
+ * is used to implement XON/XOFF flow control and tcflow(). If the serial
+ * driver does not implement this function, the tty core will append the
+ * character to the circular buffer and then call start_tx() / stop_tx()
+ * to flush the data out.
+ *
+ * Do not transmit if @ch == '\0' (%__DISABLED_CHAR).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @start_rx: ``void ()(struct uart_port *port)``
+ *
+ * Start receiving characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @stop_rx: ``void ()(struct uart_port *port)``
+ *
+ * Stop receiving characters; the @port is in the process of being closed.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @enable_ms: ``void ()(struct uart_port *port)``
+ *
+ * Enable the modem status interrupts.
+ *
+ * This method may be called multiple times. Modem status interrupts
+ * should be disabled when the @shutdown() method is called.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @break_ctl: ``void ()(struct uart_port *port, int ctl)``
+ *
+ * Control the transmission of a break signal. If @ctl is nonzero, the
+ * break signal should be transmitted. The signal should be terminated
+ * when another call is made with a zero @ctl.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @startup: ``int ()(struct uart_port *port)``
+ *
+ * Grab any interrupt resources and initialise any low level driver state.
+ * Enable the port for reception. It should not activate RTS nor DTR;
+ * this will be done via a separate call to @set_mctrl().
+ *
+ * This method will only be called when the port is initially opened.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: globally disabled.
+ *
+ * @shutdown: ``void ()(struct uart_port *port)``
+ *
+ * Disable the @port, disable any break condition that may be in effect,
+ * and free any interrupt resources. It should not disable RTS nor DTR;
+ * this will have already been done via a separate call to @set_mctrl().
+ *
+ * Drivers must not access @port->state once this call has completed.
+ *
+ * This method will only be called when there are no more users of this
+ * @port.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: caller dependent.
+ *
+ * @flush_buffer: ``void ()(struct uart_port *port)``
+ *
+ * Flush any write buffers, reset any DMA state and stop any ongoing DMA
+ * transfers.
+ *
+ * This will be called whenever the @port->state->xmit circular buffer is
+ * cleared.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @set_termios: ``void ()(struct uart_port *port, struct ktermios *new,
+ * struct ktermios *old)``
+ *
+ * Change the @port parameters, including word length, parity, stop bits.
+ * Update @port->read_status_mask and @port->ignore_status_mask to
+ * indicate the types of events we are interested in receiving. Relevant
+ * ktermios::c_cflag bits are:
+ *
+ * - %CSIZE - word size
+ * - %CSTOPB - 2 stop bits
+ * - %PARENB - parity enable
+ * - %PARODD - odd parity (when %PARENB is in force)
+ * - %ADDRB - address bit (changed through uart_port::rs485_config()).
+ * - %CREAD - enable reception of characters (if not set, still receive
+ * characters from the port, but throw them away).
+ * - %CRTSCTS - if set, enable CTS status change reporting.
+ * - %CLOCAL - if not set, enable modem status change reporting.
+ *
+ * Relevant ktermios::c_iflag bits are:
+ *
+ * - %INPCK - enable frame and parity error events to be passed to the TTY
+ * layer.
+ * - %BRKINT / %PARMRK - both of these enable break events to be passed to
+ * the TTY layer.
+ * - %IGNPAR - ignore parity and framing errors.
+ * - %IGNBRK - ignore break errors. If %IGNPAR is also set, ignore overrun
+ * errors as well.
+ *
+ * The interaction of the ktermios::c_iflag bits is as follows (parity
+ * error given as an example):
+ *
+ * ============ ======= ======= =========================================
+ * Parity error INPCK IGNPAR
+ * ============ ======= ======= =========================================
+ * n/a 0 n/a character received, marked as %TTY_NORMAL
+ * None 1 n/a character received, marked as %TTY_NORMAL
+ * Yes 1 0 character received, marked as %TTY_PARITY
+ * Yes 1 1 character discarded
+ * ============ ======= ======= =========================================
+ *
+ * Other flags may be used (eg, xon/xoff characters) if your hardware
+ * supports hardware "soft" flow control.
+ *
+ * Locking: caller holds tty_port->mutex
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_ldisc: ``void ()(struct uart_port *port, struct ktermios *termios)``
+ *
+ * Notifier for discipline change. See
+ * Documentation/driver-api/tty/tty_ldisc.rst.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @pm: ``void ()(struct uart_port *port, unsigned int state,
+ * unsigned int oldstate)``
+ *
+ * Perform any power management related activities on the specified @port.
+ * @state indicates the new state (defined by enum uart_pm_state),
+ * @oldstate indicates the previous state.
+ *
+ * This function should not be used to grab any resources.
+ *
+ * This will be called when the @port is initially opened and finally
+ * closed, except when the @port is also the system console. This will
+ * occur even if %CONFIG_PM is not set.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @type: ``const char *()(struct uart_port *port)``
+ *
+ * Return a pointer to a string constant describing the specified @port,
+ * or return %NULL, in which case the string 'unknown' is substituted.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @release_port: ``void ()(struct uart_port *port)``
+ *
+ * Release any memory and IO region resources currently in use by the
+ * @port.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @request_port: ``int ()(struct uart_port *port)``
+ *
+ * Request any memory and IO region resources required by the port. If any
+ * fail, no resources should be registered when this function returns, and
+ * it should return -%EBUSY on failure.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @config_port: ``void ()(struct uart_port *port, int type)``
+ *
+ * Perform any autoconfiguration steps required for the @port. @type
+ * contains a bit mask of the required configuration. %UART_CONFIG_TYPE
+ * indicates that the port requires detection and identification.
+ * @port->type should be set to the type found, or %PORT_UNKNOWN if no
+ * port was detected.
+ *
+ * %UART_CONFIG_IRQ indicates autoconfiguration of the interrupt signal,
+ * which should be probed using standard kernel autoprobing techniques.
+ * This is not necessary on platforms where ports have interrupts
+ * internally hard wired (eg, system on a chip implementations).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @verify_port: ``int ()(struct uart_port *port,
+ * struct serial_struct *serinfo)``
+ *
+ * Verify the new serial port information contained within @serinfo is
+ * suitable for this port type.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @ioctl: ``int ()(struct uart_port *port, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * Perform any port specific IOCTLs. IOCTL commands must be defined using
+ * the standard numbering system found in <asm/ioctl.h>.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @poll_init: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to perform the minimal hardware initialization needed to
+ * support @poll_put_char() and @poll_get_char(). Unlike @startup(), this
+ * should not request interrupts.
+ *
+ * Locking: %tty_mutex and tty_port->mutex taken.
+ * Interrupts: n/a.
+ *
+ * @poll_put_char: ``void ()(struct uart_port *port, unsigned char ch)``
+ *
+ * Called by kgdb to write a single character @ch directly to the serial
+ * @port. It can and should block until there is space in the TX FIFO.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @poll_get_char: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to read a single character directly from the serial
+ * port. If data is available, it should be returned; otherwise the
+ * function should return %NO_POLL_CHAR immediately.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
*/
struct uart_ops {
unsigned int (*tx_empty)(struct uart_port *);
@@ -44,32 +380,19 @@ struct uart_ops {
void (*unthrottle)(struct uart_port *);
void (*send_xchar)(struct uart_port *, char ch);
void (*stop_rx)(struct uart_port *);
+ void (*start_rx)(struct uart_port *);
void (*enable_ms)(struct uart_port *);
void (*break_ctl)(struct uart_port *, int ctl);
int (*startup)(struct uart_port *);
void (*shutdown)(struct uart_port *);
void (*flush_buffer)(struct uart_port *);
void (*set_termios)(struct uart_port *, struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *, struct ktermios *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
-
- /*
- * Return a string describing the type of the port
- */
const char *(*type)(struct uart_port *);
-
- /*
- * Release IO and memory resources used by the port.
- * This includes iounmap if necessary.
- */
void (*release_port)(struct uart_port *);
-
- /*
- * Request IO and memory resources used by the port.
- * This includes iomapping the port if necessary.
- */
int (*request_port)(struct uart_port *);
void (*config_port)(struct uart_port *, int);
int (*verify_port)(struct uart_port *, struct serial_struct *);
@@ -99,7 +422,7 @@ struct uart_icount {
__u32 buf_overrun;
};
-typedef unsigned int __bitwise upf_t;
+typedef u64 __bitwise upf_t;
typedef unsigned int __bitwise upstat_t;
struct uart_port {
@@ -110,7 +433,7 @@ struct uart_port {
void (*serial_out)(struct uart_port *, int, int);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
@@ -131,6 +454,7 @@ struct uart_port {
unsigned int old);
void (*handle_break)(struct uart_port *);
int (*rs485_config)(struct uart_port *,
+ struct ktermios *termios,
struct serial_rs485 *rs485);
int (*iso7816_config)(struct uart_port *,
struct serial_iso7816 *iso7816);
@@ -170,7 +494,7 @@ struct uart_port {
* assigned from the serial_struct flags in uart_set_info()
* [for bit definitions in the UPF_CHANGE_MASK]
*
- * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
+ * Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable
* The remaining bits are serial-core specific and not modifiable by
* userspace.
*/
@@ -189,23 +513,24 @@ struct uart_port {
#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
-#define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19))
+#define UPF_NO_THRE_TEST ((__force upf_t) BIT_ULL(19))
/* Port has hardware-assisted h/w flow control */
-#define UPF_AUTO_CTS ((__force upf_t) (1 << 20))
-#define UPF_AUTO_RTS ((__force upf_t) (1 << 21))
+#define UPF_AUTO_CTS ((__force upf_t) BIT_ULL(20))
+#define UPF_AUTO_RTS ((__force upf_t) BIT_ULL(21))
#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
/* Port has hardware-assisted s/w flow control */
-#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22))
-#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
-#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
-#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
-#define UPF_BUG_THRE ((__force upf_t) (1 << 26))
+#define UPF_SOFT_FLOW ((__force upf_t) BIT_ULL(22))
+#define UPF_CONS_FLOW ((__force upf_t) BIT_ULL(23))
+#define UPF_SHARE_IRQ ((__force upf_t) BIT_ULL(24))
+#define UPF_EXAR_EFR ((__force upf_t) BIT_ULL(25))
+#define UPF_BUG_THRE ((__force upf_t) BIT_ULL(26))
/* The exact UART type is known and should not be probed. */
-#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
-#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
-#define UPF_FIXED_PORT ((__force upf_t) (1 << 29))
-#define UPF_DEAD ((__force upf_t) (1 << 30))
-#define UPF_IOREMAP ((__force upf_t) (1 << 31))
+#define UPF_FIXED_TYPE ((__force upf_t) BIT_ULL(27))
+#define UPF_BOOT_AUTOCONF ((__force upf_t) BIT_ULL(28))
+#define UPF_FIXED_PORT ((__force upf_t) BIT_ULL(29))
+#define UPF_DEAD ((__force upf_t) BIT_ULL(30))
+#define UPF_IOREMAP ((__force upf_t) BIT_ULL(31))
+#define UPF_FULL_PROBE ((__force upf_t) BIT_ULL(32))
#define __UPF_CHANGE_MASK 0x17fff
#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
@@ -230,7 +555,7 @@ struct uart_port {
int hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
- unsigned int timeout; /* character-based timeout */
+ unsigned int frame_time; /* frame timing in ns */
unsigned int type; /* port type */
const struct uart_ops *ops;
unsigned int custom_divisor;
@@ -243,13 +568,17 @@ struct uart_port {
unsigned long sysrq; /* sysrq timeout */
unsigned int sysrq_ch; /* char for sysrq */
unsigned char has_sysrq;
+ unsigned char sysrq_seq; /* index in sysrq_toggle_seq */
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
+ unsigned char console_reinit;
const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
+ struct serial_rs485 rs485_supported; /* Supported mask for serial_rs485 */
+ struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */
struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};
@@ -296,6 +625,23 @@ struct uart_state {
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
+/**
+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
+ * @up: uart_port structure describing the port
+ * @chars: number of characters sent
+ *
+ * This function advances the tail of circular xmit buffer by the number of
+ * @chars transmitted and handles accounting of transmitted bytes (into
+ * @up's icount.tx).
+ */
+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
+{
+ struct circ_buf *xmit = &up->state->xmit;
+
+ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
+ up->icount.tx += chars;
+}
+
struct module;
struct tty_driver;
@@ -324,14 +670,27 @@ void uart_write_wakeup(struct uart_port *port);
void uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud);
unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old, unsigned int min,
+ const struct ktermios *old, unsigned int min,
unsigned int max);
unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud);
+/*
+ * Calculates FIFO drain time.
+ */
+static inline unsigned long uart_fifo_timeout(struct uart_port *port)
+{
+ u64 fifo_timeout = (u64)READ_ONCE(port->frame_time) * port->fifosize;
+
+ /* Add .02 seconds of slop */
+ fifo_timeout += 20 * NSEC_PER_MSEC;
+
+ return max(nsecs_to_jiffies(fifo_timeout), 1UL);
+}
+
/* Base timer interval for polling */
static inline int uart_poll_timeout(struct uart_port *port)
{
- int timeout = port->timeout;
+ int timeout = uart_fifo_timeout(port);
return timeout > 6 ? (timeout / 2 - 2) : 1;
}
@@ -353,8 +712,8 @@ struct earlycon_id {
int (*setup)(struct earlycon_device *, const char *options);
};
-extern const struct earlycon_id *__earlycon_table[];
-extern const struct earlycon_id *__earlycon_table_end[];
+extern const struct earlycon_id __earlycon_table[];
+extern const struct earlycon_id __earlycon_table_end[];
#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
#define EARLYCON_USED_OR_UNUSED __used
@@ -362,19 +721,13 @@ extern const struct earlycon_id *__earlycon_table_end[];
#define EARLYCON_USED_OR_UNUSED __maybe_unused
#endif
-#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
- static const struct earlycon_id unique_id \
- EARLYCON_USED_OR_UNUSED __initconst \
+#define OF_EARLYCON_DECLARE(_name, compat, fn) \
+ static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
+ EARLYCON_USED_OR_UNUSED __section("__earlycon_table") \
+ __aligned(__alignof__(struct earlycon_id)) \
= { .name = __stringify(_name), \
.compatible = compat, \
- .setup = fn }; \
- static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
- __section(__earlycon_table) \
- * const __PASTE(__p, unique_id) = &unique_id
-
-#define OF_EARLYCON_DECLARE(_name, compat, fn) \
- _OF_EARLYCON_DECLARE(_name, compat, fn, \
- __UNIQUE_ID(__earlycon_##_name))
+ .setup = fn }
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
@@ -390,6 +743,11 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED;
static inline int setup_earlycon(char *buf) { return 0; }
#endif
+static inline bool uart_console_enabled(struct uart_port *port)
+{
+ return uart_console(port) && (port->cons->flags & CON_ENABLED);
+}
+
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
@@ -401,7 +759,7 @@ int uart_set_options(struct uart_port *port, struct console *co, int baud,
struct tty_driver *uart_console_device(struct console *co, int *index);
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
- void (*putchar)(struct uart_port *, int));
+ void (*putchar)(struct uart_port *, unsigned char));
/*
* Port/driver registration/removal
@@ -410,7 +768,8 @@ int uart_register_driver(struct uart_driver *uart);
void uart_unregister_driver(struct uart_driver *uart);
int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
-int uart_match_port(struct uart_port *port1, struct uart_port *port2);
+bool uart_match_port(const struct uart_port *port1,
+ const struct uart_port *port2);
/*
* Power Management
@@ -430,7 +789,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
static inline int uart_tx_stopped(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
- if ((tty && tty->stopped) || port->hw_stopped)
+ if ((tty && tty->flow.stopped) || port->hw_stopped)
return 1;
return 0;
}
@@ -459,11 +818,130 @@ extern void uart_handle_cts_change(struct uart_port *uport,
extern void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, unsigned int ch, unsigned int flag);
-extern int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch);
-extern int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch);
-extern void uart_unlock_and_check_sysrq(struct uart_port *port,
- unsigned long irqflags);
-extern int uart_handle_break(struct uart_port *port);
+void uart_xchar_out(struct uart_port *uport, int offset);
+
+#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
+#define SYSRQ_TIMEOUT (HZ * 5)
+
+bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch);
+
+static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (!port->sysrq)
+ return 0;
+
+ if (ch && time_before(jiffies, port->sysrq)) {
+ if (sysrq_mask()) {
+ handle_sysrq(ch);
+ port->sysrq = 0;
+ return 1;
+ }
+ if (uart_try_toggle_sysrq(port, ch))
+ return 1;
+ }
+ port->sysrq = 0;
+
+ return 0;
+}
+
+static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (!port->sysrq)
+ return 0;
+
+ if (ch && time_before(jiffies, port->sysrq)) {
+ if (sysrq_mask()) {
+ port->sysrq_ch = ch;
+ port->sysrq = 0;
+ return 1;
+ }
+ if (uart_try_toggle_sysrq(port, ch))
+ return 1;
+ }
+ port->sysrq = 0;
+
+ return 0;
+}
+
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
+{
+ int sysrq_ch;
+
+ if (!port->has_sysrq) {
+ spin_unlock(&port->lock);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock(&port->lock);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
+
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+{
+ int sysrq_ch;
+
+ if (!port->has_sysrq) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
+#else /* CONFIG_MAGIC_SYSRQ_SERIAL */
+static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ return 0;
+}
+static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ return 0;
+}
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
+{
+ spin_unlock(&port->lock);
+}
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+{
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
+
+/*
+ * We do the SysRQ and SAK checking like this...
+ */
+static inline int uart_handle_break(struct uart_port *port)
+{
+ struct uart_state *state = port->state;
+
+ if (port->handle_break)
+ port->handle_break(port);
+
+#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
+ if (port->has_sysrq && uart_console(port)) {
+ if (!port->sysrq) {
+ port->sysrq = jiffies + SYSRQ_TIMEOUT;
+ return 1;
+ }
+ port->sysrq = 0;
+ }
+#endif
+ if (port->flags & UPF_SAK)
+ do_SAK(state->port.tty);
+ return 0;
+}
/*
* UART_ENABLE_MS - determine if port should enable modem status irqs
@@ -472,5 +950,5 @@ extern int uart_handle_break(struct uart_port *port);
(cflag) & CRTSCTS || \
!((cflag) & CLOCAL))
-void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf);
+int uart_get_rs485_mode(struct uart_port *port);
#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/serial_pnx8xxx.h b/include/linux/serial_pnx8xxx.h
deleted file mode 100644
index 619d748dcd44..000000000000
--- a/include/linux/serial_pnx8xxx.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Embedded Alley Solutions, source@embeddedalley.com.
- */
-
-#ifndef _LINUX_SERIAL_PNX8XXX_H
-#define _LINUX_SERIAL_PNX8XXX_H
-
-#include <linux/serial_core.h>
-
-#define PNX8XXX_NR_PORTS 2
-
-struct pnx8xxx_port {
- struct uart_port port;
- struct timer_list timer;
- unsigned int old_status;
-};
-
-/* register offsets */
-#define PNX8XXX_LCR 0
-#define PNX8XXX_MCR 0x004
-#define PNX8XXX_BAUD 0x008
-#define PNX8XXX_CFG 0x00c
-#define PNX8XXX_FIFO 0x028
-#define PNX8XXX_ISTAT 0xfe0
-#define PNX8XXX_IEN 0xfe4
-#define PNX8XXX_ICLR 0xfe8
-#define PNX8XXX_ISET 0xfec
-#define PNX8XXX_PD 0xff4
-#define PNX8XXX_MID 0xffc
-
-#define PNX8XXX_UART_LCR_TXBREAK (1<<30)
-#define PNX8XXX_UART_LCR_PAREVN 0x10000000
-#define PNX8XXX_UART_LCR_PAREN 0x08000000
-#define PNX8XXX_UART_LCR_2STOPB 0x04000000
-#define PNX8XXX_UART_LCR_8BIT 0x01000000
-#define PNX8XXX_UART_LCR_TX_RST 0x00040000
-#define PNX8XXX_UART_LCR_RX_RST 0x00020000
-#define PNX8XXX_UART_LCR_RX_NEXT 0x00010000
-
-#define PNX8XXX_UART_MCR_SCR 0xFF000000
-#define PNX8XXX_UART_MCR_DCD 0x00800000
-#define PNX8XXX_UART_MCR_CTS 0x00100000
-#define PNX8XXX_UART_MCR_LOOP 0x00000010
-#define PNX8XXX_UART_MCR_RTS 0x00000002
-#define PNX8XXX_UART_MCR_DTR 0x00000001
-
-#define PNX8XXX_UART_INT_TX 0x00000080
-#define PNX8XXX_UART_INT_EMPTY 0x00000040
-#define PNX8XXX_UART_INT_RCVTO 0x00000020
-#define PNX8XXX_UART_INT_RX 0x00000010
-#define PNX8XXX_UART_INT_RXOVRN 0x00000008
-#define PNX8XXX_UART_INT_FRERR 0x00000004
-#define PNX8XXX_UART_INT_BREAK 0x00000002
-#define PNX8XXX_UART_INT_PARITY 0x00000001
-#define PNX8XXX_UART_INT_ALLRX 0x0000003F
-#define PNX8XXX_UART_INT_ALLTX 0x000000C0
-
-#define PNX8XXX_UART_FIFO_TXFIFO 0x001F0000
-#define PNX8XXX_UART_FIFO_TXFIFO_STA (0x1f<<16)
-#define PNX8XXX_UART_FIFO_RXBRK 0x00008000
-#define PNX8XXX_UART_FIFO_RXFE 0x00004000
-#define PNX8XXX_UART_FIFO_RXPAR 0x00002000
-#define PNX8XXX_UART_FIFO_RXFIFO 0x00001F00
-#define PNX8XXX_UART_FIFO_RBRTHR 0x000000FF
-
-#endif
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index 463ed28d2b27..1672cf0810ef 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -83,7 +83,7 @@
#define S3C2410_UCON_RXIRQMODE (1<<0)
#define S3C2410_UCON_RXFIFO_TOI (1<<7)
#define S3C2443_UCON_RXERR_IRQEN (1<<6)
-#define S3C2443_UCON_LOOPBACK (1<<5)
+#define S3C2410_UCON_LOOPBACK (1<<5)
#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -246,6 +246,25 @@
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
+#define APPLE_S5L_UCON_RXTO_ENA 9
+#define APPLE_S5L_UCON_RXTHRESH_ENA 12
+#define APPLE_S5L_UCON_TXTHRESH_ENA 13
+#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA)
+#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA)
+#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA)
+
+#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI)
+#define APPLE_S5L_UCON_MASK (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+ APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
+ APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
+
+#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4)
+#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5)
+#define APPLE_S5L_UTRSTAT_RXTO (1<<9)
+#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0)
+
#ifndef __ASSEMBLY__
#include <linux/serial_core.h>
@@ -254,7 +273,7 @@
* serial port
*
* the pointer is setup by the machine specific initialisation from the
- * arch/arm/mach-s3c2410/ directory.
+ * arch/arm/mach-s3c/ directory.
*/
struct s3c2410_uartcfg {
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 86281ac7c305..369769ce7399 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -23,16 +23,33 @@ static inline int set_direct_map_default_noflush(struct page *page)
{
return 0;
}
+
+static inline bool kernel_page_present(struct page *page)
+{
+ return true;
+}
+#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
+/*
+ * Some architectures, e.g. ARM64 can disable direct map modifications at
+ * boot time. Let them overrive this query.
+ */
+#ifndef can_set_direct_map
+static inline bool can_set_direct_map(void)
+{
+ return true;
+}
+#define can_set_direct_map can_set_direct_map
#endif
+#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
-#ifndef set_mce_nospec
+#ifdef CONFIG_X86_64
+int set_mce_nospec(unsigned long pfn);
+int clear_mce_nospec(unsigned long pfn);
+#else
static inline int set_mce_nospec(unsigned long pfn)
{
return 0;
}
-#endif
-
-#ifndef clear_mce_nospec
static inline int clear_mce_nospec(unsigned long pfn)
{
return 0;
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
deleted file mode 100644
index e0e1597ef9e6..000000000000
--- a/include/linux/sfi.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/* sfi.h Simple Firmware Interface */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#ifndef _LINUX_SFI_H
-#define _LINUX_SFI_H
-
-#include <linux/init.h>
-#include <linux/types.h>
-
-/* Table signatures reserved by the SFI specification */
-#define SFI_SIG_SYST "SYST"
-#define SFI_SIG_FREQ "FREQ"
-#define SFI_SIG_IDLE "IDLE"
-#define SFI_SIG_CPUS "CPUS"
-#define SFI_SIG_MTMR "MTMR"
-#define SFI_SIG_MRTC "MRTC"
-#define SFI_SIG_MMAP "MMAP"
-#define SFI_SIG_APIC "APIC"
-#define SFI_SIG_XSDT "XSDT"
-#define SFI_SIG_WAKE "WAKE"
-#define SFI_SIG_DEVS "DEVS"
-#define SFI_SIG_GPIO "GPIO"
-
-#define SFI_SIGNATURE_SIZE 4
-#define SFI_OEM_ID_SIZE 6
-#define SFI_OEM_TABLE_ID_SIZE 8
-
-#define SFI_NAME_LEN 16
-
-#define SFI_SYST_SEARCH_BEGIN 0x000E0000
-#define SFI_SYST_SEARCH_END 0x000FFFFF
-
-#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
- ((ptable->header.len - sizeof(struct sfi_table_header)) / \
- (sizeof(entry_type)))
-/*
- * Table structures must be byte-packed to match the SFI specification,
- * as they are provided by the BIOS.
- */
-struct sfi_table_header {
- char sig[SFI_SIGNATURE_SIZE];
- u32 len;
- u8 rev;
- u8 csum;
- char oem_id[SFI_OEM_ID_SIZE];
- char oem_table_id[SFI_OEM_TABLE_ID_SIZE];
-} __packed;
-
-struct sfi_table_simple {
- struct sfi_table_header header;
- u64 pentry[1];
-} __packed;
-
-/* Comply with UEFI spec 2.1 */
-struct sfi_mem_entry {
- u32 type;
- u64 phys_start;
- u64 virt_start;
- u64 pages;
- u64 attrib;
-} __packed;
-
-struct sfi_cpu_table_entry {
- u32 apic_id;
-} __packed;
-
-struct sfi_cstate_table_entry {
- u32 hint; /* MWAIT hint */
- u32 latency; /* latency in ms */
-} __packed;
-
-struct sfi_apic_table_entry {
- u64 phys_addr; /* phy base addr for APIC reg */
-} __packed;
-
-struct sfi_freq_table_entry {
- u32 freq_mhz; /* in MHZ */
- u32 latency; /* transition latency in ms */
- u32 ctrl_val; /* value to write to PERF_CTL */
-} __packed;
-
-struct sfi_wake_table_entry {
- u64 phys_addr; /* pointer to where the wake vector locates */
-} __packed;
-
-struct sfi_timer_table_entry {
- u64 phys_addr; /* phy base addr for the timer */
- u32 freq_hz; /* in HZ */
- u32 irq;
-} __packed;
-
-struct sfi_rtc_table_entry {
- u64 phys_addr; /* phy base addr for the RTC */
- u32 irq;
-} __packed;
-
-struct sfi_device_table_entry {
- u8 type; /* bus type, I2C, SPI or ...*/
-#define SFI_DEV_TYPE_SPI 0
-#define SFI_DEV_TYPE_I2C 1
-#define SFI_DEV_TYPE_UART 2
-#define SFI_DEV_TYPE_HSI 3
-#define SFI_DEV_TYPE_IPC 4
-#define SFI_DEV_TYPE_SD 5
-
- u8 host_num; /* attached to host 0, 1...*/
- u16 addr;
- u8 irq;
- u32 max_freq;
- char name[SFI_NAME_LEN];
-} __packed;
-
-struct sfi_gpio_table_entry {
- char controller_name[SFI_NAME_LEN];
- u16 pin_no;
- char pin_name[SFI_NAME_LEN];
-} __packed;
-
-typedef int (*sfi_table_handler) (struct sfi_table_header *table);
-
-#ifdef CONFIG_SFI
-extern void __init sfi_init(void);
-extern int __init sfi_platform_init(void);
-extern void __init sfi_init_late(void);
-extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
- sfi_table_handler handler);
-
-extern int sfi_disabled;
-static inline void disable_sfi(void)
-{
- sfi_disabled = 1;
-}
-
-#else /* !CONFIG_SFI */
-
-static inline void sfi_init(void)
-{
-}
-
-static inline void sfi_init_late(void)
-{
-}
-
-#define sfi_disabled 0
-
-static inline int sfi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- sfi_table_handler handler)
-{
- return -1;
-}
-
-#endif /* !CONFIG_SFI */
-
-#endif /*_LINUX_SFI_H*/
diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h
deleted file mode 100644
index a6e555cbe05c..000000000000
--- a/include/linux/sfi_acpi.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* sfi.h Simple Firmware Interface */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#ifndef _LINUX_SFI_ACPI_H
-#define _LINUX_SFI_ACPI_H
-
-#include <linux/acpi.h>
-#include <linux/sfi.h>
-
-#ifdef CONFIG_SFI
-extern int sfi_acpi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- int (*handler)(struct acpi_table_header *));
-
-static inline int __init acpi_sfi_table_parse(char *signature,
- int (*handler)(struct acpi_table_header *))
-{
- if (!acpi_table_parse(signature, handler))
- return 0;
-
- return sfi_acpi_table_parse(signature, NULL, NULL, handler);
-}
-#else /* !CONFIG_SFI */
-static inline int sfi_acpi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- int (*handler)(struct acpi_table_header *))
-{
- return -1;
-}
-
-static inline int __init acpi_sfi_table_parse(char *signature,
- int (*handler)(struct acpi_table_header *))
-{
- return acpi_table_parse(signature, handler);
-}
-#endif /* !CONFIG_SFI */
-
-#endif /*_LINUX_SFI_ACPI_H*/
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 38893e4dd0f0..d1f343853b6c 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -535,13 +535,16 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support);
+ unsigned long *support, unsigned long *interfaces);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
unsigned long *link_modes);
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
u8 *data);
+int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
void sfp_upstream_start(struct sfp_bus *bus);
void sfp_upstream_stop(struct sfp_bus *bus);
void sfp_bus_put(struct sfp_bus *bus);
@@ -565,7 +568,8 @@ static inline bool sfp_may_have_phy(struct sfp_bus *bus,
static inline void sfp_parse_support(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
- unsigned long *support)
+ unsigned long *support,
+ unsigned long *interfaces)
{
}
@@ -587,6 +591,13 @@ static inline int sfp_get_module_eeprom(struct sfp_bus *bus,
return -EOPNOTSUPP;
}
+static inline int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void sfp_upstream_start(struct sfp_bus *bus)
{
}
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index d56fefef8905..d500ea967dc7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -18,24 +18,35 @@ struct shmem_inode_info {
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
unsigned long swapped; /* subtotal assigned to swap */
+ pgoff_t fallocend; /* highest fallocate endindex */
struct list_head shrinklist; /* shrinkable hpage inodes */
struct list_head swaplist; /* chain of maybes on swap */
struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
atomic_t stop_eviction; /* hold when working on inode */
+ struct timespec64 i_crtime; /* file creation time */
+ unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
struct inode vfs_inode;
};
+#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
+#define SHMEM_FL_USER_MODIFIABLE \
+ (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
+#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
+
struct shmem_sb_info {
unsigned long max_blocks; /* How many blocks are allowed */
struct percpu_counter used_blocks; /* How many are allocated */
unsigned long max_inodes; /* How many inodes are allowed */
unsigned long free_inodes; /* How many are left for allocation */
- spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
+ raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
umode_t mode; /* Mount mode for root directory */
unsigned char huge; /* Whether to try for hugepages */
kuid_t uid; /* Mount uid for root directory */
kgid_t gid; /* Mount gid for root directory */
+ bool full_inums; /* If i_ino should be uint or ino_t */
+ ino_t next_ino; /* The next per-sb inode number to use */
+ ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */
struct mempolicy *mpol; /* default memory policy for mappings */
spinlock_t shrinklist_lock; /* Protects shrinklist */
struct list_head shrinklist; /* List of shinkable inodes */
@@ -51,7 +62,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
* Functions in mm/shmem.c called directly from elsewhere:
*/
extern const struct fs_parameter_spec shmem_fs_parameters[];
-extern int shmem_init(void);
+extern void shmem_init(void);
extern int shmem_init_fs_context(struct fs_context *fc);
extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
@@ -62,9 +73,13 @@ extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
extern int shmem_zero_setup(struct vm_area_struct *);
extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
-extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
-extern bool shmem_mapping(struct address_space *mapping);
+extern const struct address_space_operations shmem_aops;
+static inline bool shmem_mapping(struct address_space *mapping)
+{
+ return mapping->a_ops == &shmem_aops;
+}
#else
static inline bool shmem_mapping(struct address_space *mapping)
{
@@ -75,25 +90,31 @@ extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
-extern int shmem_unuse(unsigned int type, bool frontswap,
- unsigned long *fs_pages_to_unuse);
+int shmem_unuse(unsigned int type);
+extern bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
+ pgoff_t index, bool shmem_huge_force);
+static inline bool shmem_huge_enabled(struct vm_area_struct *vma,
+ bool shmem_huge_force)
+{
+ return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff,
+ shmem_huge_force);
+}
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-/* Flag allocation requirements to shmem_getpage */
+/* Flag allocation requirements to shmem_get_folio */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
+ SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
- SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
- SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-extern int shmem_getpage(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
+ enum sgp_type sgp);
static inline struct page *shmem_read_mapping_page(
struct address_space *mapping, pgoff_t index)
@@ -111,33 +132,33 @@ static inline bool shmem_file(struct file *file)
return shmem_mapping(file->f_mapping);
}
-extern bool shmem_charge(struct inode *inode, long pages);
-extern void shmem_uncharge(struct inode *inode, long pages);
-
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
-extern bool shmem_huge_enabled(struct vm_area_struct *vma);
-#else
-static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
+/*
+ * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages
+ * beyond i_size's notion of EOF, which fallocate has committed to reserving:
+ * which split_huge_page() must therefore not delete. This use of a single
+ * "fallocend" per inode errs on the side of not deleting a reservation when
+ * in doubt: there are plenty of cases when it preserves unreserved pages.
+ */
+static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof)
{
- return false;
+ return max(eof, SHMEM_I(inode)->fallocend);
}
-#endif
+extern bool shmem_charge(struct inode *inode, long pages);
+extern void shmem_uncharge(struct inode *inode, long pages);
+
+#ifdef CONFIG_USERFAULTFD
#ifdef CONFIG_SHMEM
-extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
+ bool zeropage, bool wp_copy,
struct page **pagep);
-extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
- pmd_t *dst_pmd,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr);
-#else
-#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
-#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
- dst_addr) ({ BUG(); 0; })
-#endif
+#else /* !CONFIG_SHMEM */
+#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \
+ src_addr, zeropage, wp_copy, pagep) ({ BUG(); 0; })
+#endif /* CONFIG_SHMEM */
+#endif /* CONFIG_USERFAULTFD */
#endif
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 0f80123650e2..08e6054e061f 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -4,7 +4,7 @@
/*
* This struct is used to pass information from page reclaim to the shrinkers.
- * We consolidate the values for easier extention later.
+ * We consolidate the values for easier extension later.
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
@@ -73,23 +73,52 @@ struct shrinker {
/* ID in shrinker_idr */
int id;
#endif
+#ifdef CONFIG_SHRINKER_DEBUG
+ int debugfs_id;
+ const char *name;
+ struct dentry *debugfs_entry;
+#endif
/* objs pending delete, per node */
atomic_long_t *nr_deferred;
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
/* Flags */
-#define SHRINKER_NUMA_AWARE (1 << 0)
-#define SHRINKER_MEMCG_AWARE (1 << 1)
+#define SHRINKER_REGISTERED (1 << 0)
+#define SHRINKER_NUMA_AWARE (1 << 1)
+#define SHRINKER_MEMCG_AWARE (1 << 2)
/*
* It just makes sense when the shrinker is also MEMCG_AWARE for now,
* non-MEMCG_AWARE shrinker should not have this flag set.
*/
-#define SHRINKER_NONSLAB (1 << 2)
+#define SHRINKER_NONSLAB (1 << 3)
-extern int prealloc_shrinker(struct shrinker *shrinker);
+extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker,
+ const char *fmt, ...);
extern void register_shrinker_prepared(struct shrinker *shrinker);
-extern int register_shrinker(struct shrinker *shrinker);
+extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker,
+ const char *fmt, ...);
extern void unregister_shrinker(struct shrinker *shrinker);
extern void free_prealloced_shrinker(struct shrinker *shrinker);
-#endif
+extern void synchronize_shrinkers(void);
+
+#ifdef CONFIG_SHRINKER_DEBUG
+extern int shrinker_debugfs_add(struct shrinker *shrinker);
+extern void shrinker_debugfs_remove(struct shrinker *shrinker);
+extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
+ const char *fmt, ...);
+#else /* CONFIG_SHRINKER_DEBUG */
+static inline int shrinker_debugfs_add(struct shrinker *shrinker)
+{
+ return 0;
+}
+static inline void shrinker_debugfs_remove(struct shrinker *shrinker)
+{
+}
+static inline __printf(2, 3)
+int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+{
+ return 0;
+}
+#endif /* CONFIG_SHRINKER_DEBUG */
+#endif /* _LINUX_SHRINKER_H */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 1a5f88316b08..3b98e7a28538 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -24,6 +24,14 @@ static inline void clear_siginfo(kernel_siginfo_t *info)
#define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo))
+static inline void copy_siginfo_to_external(siginfo_t *to,
+ const kernel_siginfo_t *from)
+{
+ memcpy(to, from, sizeof(*from));
+ memset(((char *)to) + sizeof(struct kernel_siginfo), 0,
+ SI_EXPANSION_SIZE);
+}
+
int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from);
int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from);
@@ -32,9 +40,11 @@ enum siginfo_layout {
SIL_TIMER,
SIL_POLL,
SIL_FAULT,
+ SIL_FAULT_TRAPNO,
SIL_FAULT_MCEERR,
SIL_FAULT_BNDERR,
SIL_FAULT_PKUERR,
+ SIL_FAULT_PERF_EVENT,
SIL_CHLD,
SIL_RT,
SIL_SYS,
@@ -116,7 +126,6 @@ static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
#define sigmask(sig) (1UL << ((sig) - 1))
#ifndef __HAVE_ARCH_SIG_SETOPS
-#include <linux/string.h>
#define _SIG_SET_BINOP(name, op) \
static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
@@ -129,11 +138,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
b3 = b->sig[3]; b2 = b->sig[2]; \
r->sig[3] = op(a3, b3); \
r->sig[2] = op(a2, b2); \
- /* fall through */ \
+ fallthrough; \
case 2: \
a1 = a->sig[1]; b1 = b->sig[1]; \
r->sig[1] = op(a1, b1); \
- /* fall through */ \
+ fallthrough; \
case 1: \
a0 = a->sig[0]; b0 = b->sig[0]; \
r->sig[0] = op(a0, b0); \
@@ -163,9 +172,9 @@ static inline void name(sigset_t *set) \
switch (_NSIG_WORDS) { \
case 4: set->sig[3] = op(set->sig[3]); \
set->sig[2] = op(set->sig[2]); \
- /* fall through */ \
+ fallthrough; \
case 2: set->sig[1] = op(set->sig[1]); \
- /* fall through */ \
+ fallthrough; \
case 1: set->sig[0] = op(set->sig[0]); \
break; \
default: \
@@ -186,7 +195,7 @@ static inline void sigemptyset(sigset_t *set)
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
- /* fall through */
+ fallthrough;
case 1: set->sig[0] = 0;
break;
}
@@ -199,7 +208,7 @@ static inline void sigfillset(sigset_t *set)
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
- /* fall through */
+ fallthrough;
case 1: set->sig[0] = -1;
break;
}
@@ -230,6 +239,7 @@ static inline void siginitset(sigset_t *set, unsigned long mask)
memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1));
break;
case 2: set->sig[1] = 0;
+ break;
case 1: ;
}
}
@@ -242,6 +252,7 @@ static inline void siginitsetinv(sigset_t *set, unsigned long mask)
memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1));
break;
case 2: set->sig[1] = -1;
+ break;
case 1: ;
}
}
@@ -271,7 +282,8 @@ extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
-extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+extern int send_signal_locked(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
@@ -444,19 +456,37 @@ void signals_init(void);
int restore_altstack(const stack_t __user *);
int __save_altstack(stack_t __user *, unsigned long);
-#define save_altstack_ex(uss, sp) do { \
+#define unsafe_save_altstack(uss, sp, label) do { \
stack_t __user *__uss = uss; \
struct task_struct *t = current; \
- put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
- put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
- put_user_ex(t->sas_ss_size, &__uss->ss_size); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
+ unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \
+ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
+ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
} while (0);
+#ifdef CONFIG_DYNAMIC_SIGFRAME
+bool sigaltstack_size_valid(size_t ss_size);
+#else
+static inline bool sigaltstack_size_valid(size_t size) { return true; }
+#endif /* !CONFIG_DYNAMIC_SIGFRAME */
+
#ifdef CONFIG_PROC_FS
struct seq_file;
extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
#endif
+#ifndef arch_untagged_si_addr
+/*
+ * Given a fault address and a signal and si_code which correspond to the
+ * _sigfault union member, returns the address that must appear in si_addr if
+ * the signal handler does not have SA_EXPOSE_TAGBITS enabled in sa_flags.
+ */
+static inline void __user *arch_untagged_si_addr(void __user *addr,
+ unsigned long sig,
+ unsigned long si_code)
+{
+ return addr;
+}
+#endif
+
#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index f8a90ae9c6ec..a70b2bdbf4d9 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -13,6 +13,8 @@ typedef struct kernel_siginfo {
__SIGINFO;
} kernel_siginfo_t;
+struct ucounts;
+
/*
* Real Time signals may be queued.
*/
@@ -21,7 +23,7 @@ struct sigqueue {
struct list_head list;
int flags;
kernel_siginfo_t info;
- struct user_struct *user;
+ struct ucounts *ucounts;
};
/* flags values. */
@@ -68,4 +70,19 @@ struct ksignal {
int sig;
};
+/* Used to kill the race between sigaction and forced signals */
+#define SA_IMMUTABLE 0x00800000
+
+#ifndef __ARCH_UAPI_SA_FLAGS
+#ifdef SA_RESTORER
+#define __ARCH_UAPI_SA_FLAGS SA_RESTORER
+#else
+#define __ARCH_UAPI_SA_FLAGS 0
+#endif
+#endif
+
+#define UAPI_SA_FLAGS \
+ (SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \
+ SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS | __ARCH_UAPI_SA_FLAGS)
+
#endif /* _LINUX_SIGNAL_TYPES_H */
diff --git a/include/linux/siox.h b/include/linux/siox.h
index da7225bf1877..6bfbda3f634c 100644
--- a/include/linux/siox.h
+++ b/include/linux/siox.h
@@ -36,7 +36,7 @@ bool siox_device_connected(struct siox_device *sdevice);
struct siox_driver {
int (*probe)(struct siox_device *sdevice);
- int (*remove)(struct siox_device *sdevice);
+ void (*remove)(struct siox_device *sdevice);
void (*shutdown)(struct siox_device *sdevice);
/*
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
index bf21591a9e5e..9153e77382e1 100644
--- a/include/linux/siphash.h
+++ b/include/linux/siphash.h
@@ -1,6 +1,5 @@
-/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
@@ -21,15 +20,15 @@ typedef struct {
u64 key[2];
} siphash_key_t;
+#define siphash_aligned_key_t siphash_key_t __aligned(16)
+
static inline bool siphash_key_is_zero(const siphash_key_t *key)
{
return !(key->key[0] | key->key[1]);
}
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -82,10 +81,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
static inline u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
return __siphash_unaligned(data, len, key);
-#endif
return ___siphash_aligned(data, len, key);
}
@@ -96,10 +94,8 @@ typedef struct {
u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#endif
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -135,11 +131,38 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
static inline u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
return __hsiphash_unaligned(data, len, key);
-#endif
return ___hsiphash_aligned(data, len, key);
}
+/*
+ * These macros expose the raw SipHash and HalfSipHash permutations.
+ * Do not use them directly! If you think you have a use for them,
+ * be sure to CC the maintainer of this file explaining why.
+ */
+
+#define SIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
+ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
+ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
+ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
+
+#define SIPHASH_CONST_0 0x736f6d6570736575ULL
+#define SIPHASH_CONST_1 0x646f72616e646f6dULL
+#define SIPHASH_CONST_2 0x6c7967656e657261ULL
+#define SIPHASH_CONST_3 0x7465646279746573ULL
+
+#define HSIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \
+ (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \
+ (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \
+ (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16))
+
+#define HSIPHASH_CONST_0 0U
+#define HSIPHASH_CONST_1 0U
+#define HSIPHASH_CONST_2 0x6c796765U
+#define HSIPHASH_CONST_3 0x74656462U
+
#endif /* _LINUX_SIPHASH_H */
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
deleted file mode 100644
index 50161b6afb61..000000000000
--- a/include/linux/sirfsoc_dma.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SIRFSOC_DMA_H_
-#define _SIRFSOC_DMA_H_
-
-bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
-
-#endif
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
index 9874f6f67537..84aa448d8bb3 100644
--- a/include/linux/sizes.h
+++ b/include/linux/sizes.h
@@ -44,6 +44,11 @@
#define SZ_2G 0x80000000
#define SZ_4G _AC(0x100000000, ULL)
+#define SZ_8G _AC(0x200000000, ULL)
+#define SZ_16G _AC(0x400000000, ULL)
+#define SZ_32G _AC(0x800000000, ULL)
+
+#define SZ_1T _AC(0x10000000000, ULL)
#define SZ_64T _AC(0x400000000000, ULL)
#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5b50278c4bc8..7be5bb4c94b6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -36,103 +36,121 @@
#include <linux/splice.h>
#include <linux/in6.h>
#include <linux/if_packet.h>
+#include <linux/llist.h>
#include <net/flow.h>
+#include <net/page_pool.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
+#include <net/net_debug.h>
+#include <net/dropreason.h>
-/* The interface for checksum offload between the stack and networking drivers
+/**
+ * DOC: skb checksums
+ *
+ * The interface for checksum offload between the stack and networking drivers
* is as follows...
*
- * A. IP checksum related features
+ * IP checksum related features
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Drivers advertise checksum offload capabilities in the features of a device.
- * From the stack's point of view these are capabilities offered by the driver,
- * a driver typically only advertises features that it is capable of offloading
+ * From the stack's point of view these are capabilities offered by the driver.
+ * A driver typically only advertises features that it is capable of offloading
* to its device.
*
- * The checksum related features are:
- *
- * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
- * IP (one's complement) checksum for any combination
- * of protocols or protocol layering. The checksum is
- * computed and set in a packet per the CHECKSUM_PARTIAL
- * interface (see below).
- *
- * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
- * TCP or UDP packets over IPv4. These are specifically
- * unencapsulated packets of the form IPv4|TCP or
- * IPv4|UDP where the Protocol field in the IPv4 header
- * is TCP or UDP. The IPv4 header may contain IP options
- * This feature cannot be set in features for a device
- * with NETIF_F_HW_CSUM also set. This feature is being
- * DEPRECATED (see below).
- *
- * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
- * TCP or UDP packets over IPv6. These are specifically
- * unencapsulated packets of the form IPv6|TCP or
- * IPv4|UDP where the Next Header field in the IPv6
- * header is either TCP or UDP. IPv6 extension headers
- * are not supported with this feature. This feature
- * cannot be set in features for a device with
- * NETIF_F_HW_CSUM also set. This feature is being
- * DEPRECATED (see below).
- *
- * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
- * This flag is used only used to disable the RX checksum
- * feature for a device. The stack will accept receive
- * checksum indication in packets received on a device
- * regardless of whether NETIF_F_RXCSUM is set.
- *
- * B. Checksumming of received packets by device. Indication of checksum
- * verification is in set skb->ip_summed. Possible values are:
- *
- * CHECKSUM_NONE:
+ * .. flat-table:: Checksum related device features
+ * :widths: 1 10
+ *
+ * * - %NETIF_F_HW_CSUM
+ * - The driver (or its device) is able to compute one
+ * IP (one's complement) checksum for any combination
+ * of protocols or protocol layering. The checksum is
+ * computed and set in a packet per the CHECKSUM_PARTIAL
+ * interface (see below).
+ *
+ * * - %NETIF_F_IP_CSUM
+ * - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv4. These are specifically
+ * unencapsulated packets of the form IPv4|TCP or
+ * IPv4|UDP where the Protocol field in the IPv4 header
+ * is TCP or UDP. The IPv4 header may contain IP options.
+ * This feature cannot be set in features for a device
+ * with NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * * - %NETIF_F_IPV6_CSUM
+ * - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv6. These are specifically
+ * unencapsulated packets of the form IPv6|TCP or
+ * IPv6|UDP where the Next Header field in the IPv6
+ * header is either TCP or UDP. IPv6 extension headers
+ * are not supported with this feature. This feature
+ * cannot be set in features for a device with
+ * NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * * - %NETIF_F_RXCSUM
+ * - Driver (device) performs receive checksum offload.
+ * This flag is only used to disable the RX checksum
+ * feature for a device. The stack will accept receive
+ * checksum indication in packets received on a device
+ * regardless of whether NETIF_F_RXCSUM is set.
+ *
+ * Checksumming of received packets by device
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Indication of checksum verification is set in &sk_buff.ip_summed.
+ * Possible values are:
+ *
+ * - %CHECKSUM_NONE
*
* Device did not checksum this packet e.g. due to lack of capabilities.
* The packet contains full (though not verified) checksum in packet but
* not in skb->csum. Thus, skb->csum is undefined in this case.
*
- * CHECKSUM_UNNECESSARY:
+ * - %CHECKSUM_UNNECESSARY
*
* The hardware you're dealing with doesn't calculate the full checksum
- * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
- * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
- * if their checksums are okay. skb->csum is still undefined in this case
+ * (as in %CHECKSUM_COMPLETE), but it does parse headers and verify checksums
+ * for specific protocols. For such packets it will set %CHECKSUM_UNNECESSARY
+ * if their checksums are okay. &sk_buff.csum is still undefined in this case
* though. A driver or device must never modify the checksum field in the
* packet even if checksum is verified.
*
- * CHECKSUM_UNNECESSARY is applicable to following protocols:
- * TCP: IPv6 and IPv4.
- * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
+ * %CHECKSUM_UNNECESSARY is applicable to following protocols:
+ *
+ * - TCP: IPv6 and IPv4.
+ * - UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
* zero UDP checksum for either IPv4 or IPv6, the networking stack
* may perform further validation in this case.
- * GRE: only if the checksum is present in the header.
- * SCTP: indicates the CRC in SCTP header has been validated.
- * FCOE: indicates the CRC in FC frame has been validated.
+ * - GRE: only if the checksum is present in the header.
+ * - SCTP: indicates the CRC in SCTP header has been validated.
+ * - FCOE: indicates the CRC in FC frame has been validated.
*
- * skb->csum_level indicates the number of consecutive checksums found in
- * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
+ * &sk_buff.csum_level indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as %CHECKSUM_UNNECESSARY.
* For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
* and a device is able to verify the checksums for UDP (possibly zero),
- * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
+ * GRE (checksum flag is set) and TCP, &sk_buff.csum_level would be set to
* two. If the device were only able to verify the UDP checksum and not
- * GRE, either because it doesn't support GRE checksum of because GRE
+ * GRE, either because it doesn't support GRE checksum or because GRE
* checksum is bad, skb->csum_level would be set to zero (TCP checksum is
* not considered in this case).
*
- * CHECKSUM_COMPLETE:
+ * - %CHECKSUM_COMPLETE
*
* This is the most generic way. The device supplied checksum of the _whole_
- * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
+ * packet as seen by netif_rx() and fills in &sk_buff.csum. This means the
* hardware doesn't need to parse L3/L4 headers to implement this.
*
* Notes:
+ *
* - Even if device supports only some protocols, but is able to produce
* skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
* - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
*
- * CHECKSUM_PARTIAL:
+ * - %CHECKSUM_PARTIAL
*
* A checksum is set up to be offloaded to a device as described in the
* output description for CHECKSUM_PARTIAL. This may occur on a packet
@@ -144,17 +162,21 @@
* packet that are after the checksum being offloaded are not considered to
* be verified.
*
- * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
- * in the skb->ip_summed for a packet. Values are:
+ * Checksumming on transmit for non-GSO
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The stack requests checksum offload in the &sk_buff.ip_summed for a packet.
+ * Values are:
*
- * CHECKSUM_PARTIAL:
+ * - %CHECKSUM_PARTIAL
*
* The driver is required to checksum the packet as seen by hard_start_xmit()
- * from skb->csum_start up to the end, and to record/write the checksum at
- * offset skb->csum_start + skb->csum_offset. A driver may verify that the
+ * from &sk_buff.csum_start up to the end, and to record/write the checksum at
+ * offset &sk_buff.csum_start + &sk_buff.csum_offset.
+ * A driver may verify that the
* csum_start and csum_offset values are valid values given the length and
- * offset of the packet, however they should not attempt to validate that the
- * checksum refers to a legitimate transport layer checksum-- it is the
+ * offset of the packet, but it should not attempt to validate that the
+ * checksum refers to a legitimate transport layer checksum -- it is the
* purview of the stack to validate that csum_start and csum_offset are set
* correctly.
*
@@ -163,57 +185,68 @@
* checksum calculation to the device, or call skb_checksum_help (in the case
* that the device does not support offload for a particular checksum).
*
- * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
- * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
+ * %NETIF_F_IP_CSUM and %NETIF_F_IPV6_CSUM are being deprecated in favor of
+ * %NETIF_F_HW_CSUM. New devices should use %NETIF_F_HW_CSUM to indicate
* checksum offload capability.
- * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
+ * skb_csum_hwoffload_help() can be called to resolve %CHECKSUM_PARTIAL based
* on network device checksumming capabilities: if a packet does not match
- * them, skb_checksum_help or skb_crc32c_help (depending on the value of
- * csum_not_inet, see item D.) is called to resolve the checksum.
+ * them, skb_checksum_help() or skb_crc32c_help() (depending on the value of
+ * &sk_buff.csum_not_inet, see :ref:`crc`)
+ * is called to resolve the checksum.
*
- * CHECKSUM_NONE:
+ * - %CHECKSUM_NONE
*
* The skb was already checksummed by the protocol, or a checksum is not
* required.
*
- * CHECKSUM_UNNECESSARY:
+ * - %CHECKSUM_UNNECESSARY
*
- * This has the same meaning on as CHECKSUM_NONE for checksum offload on
+ * This has the same meaning as CHECKSUM_NONE for checksum offload on
* output.
*
- * CHECKSUM_COMPLETE:
+ * - %CHECKSUM_COMPLETE
+ *
* Not used in checksum output. If a driver observes a packet with this value
- * set in skbuff, if should treat as CHECKSUM_NONE being set.
- *
- * D. Non-IP checksum (CRC) offloads
- *
- * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
- * offloading the SCTP CRC in a packet. To perform this offload the stack
- * will set set csum_start and csum_offset accordingly, set ip_summed to
- * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
- * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
- * A driver that supports both IP checksum offload and SCTP CRC32c offload
- * must verify which offload is configured for a packet by testing the
- * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
- * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
- *
- * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
- * offloading the FCOE CRC in a packet. To perform this offload the stack
- * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
- * accordingly. Note the there is no indication in the skbuff that the
- * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
- * both IP checksum offload and FCOE CRC offload must verify which offload
- * is configured for a packet presumably by inspecting packet headers.
- *
- * E. Checksumming on output with GSO.
- *
- * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
+ * set in skbuff, it should treat the packet as if %CHECKSUM_NONE were set.
+ *
+ * .. _crc:
+ *
+ * Non-IP checksum (CRC) offloads
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * .. flat-table::
+ * :widths: 1 10
+ *
+ * * - %NETIF_F_SCTP_CRC
+ * - This feature indicates that a device is capable of
+ * offloading the SCTP CRC in a packet. To perform this offload the stack
+ * will set csum_start and csum_offset accordingly, set ip_summed to
+ * %CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication
+ * in the skbuff that the %CHECKSUM_PARTIAL refers to CRC32c.
+ * A driver that supports both IP checksum offload and SCTP CRC32c offload
+ * must verify which offload is configured for a packet by testing the
+ * value of &sk_buff.csum_not_inet; skb_crc32c_csum_help() is provided to
+ * resolve %CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
+ *
+ * * - %NETIF_F_FCOE_CRC
+ * - This feature indicates that a device is capable of offloading the FCOE
+ * CRC in a packet. To perform this offload the stack will set ip_summed
+ * to %CHECKSUM_PARTIAL and set csum_start and csum_offset
+ * accordingly. Note that there is no indication in the skbuff that the
+ * %CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
+ * both IP checksum offload and FCOE CRC offload must verify which offload
+ * is configured for a packet, presumably by inspecting packet headers.
+ *
+ * Checksumming on output with GSO
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * In the case of a GSO packet (skb_is_gso() is true), checksum offload
* is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
- * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
+ * gso_type is %SKB_GSO_TCPV4 or %SKB_GSO_TCPV6, TCP checksum offload as
* part of the GSO operation is implied. If a checksum is being offloaded
- * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
- * are set to refer to the outermost checksum being offload (two offloaded
- * checksums are possible with UDP encapsulation).
+ * with GSO then ip_summed is %CHECKSUM_PARTIAL, and both csum_start and
+ * csum_offset are set to refer to the outermost checksum being offloaded
+ * (two offloaded checksums are possible with UDP encapsulation).
*/
/* Don't change this without changing skb_csum_unnecessary! */
@@ -238,6 +271,7 @@
SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+struct ahash_request;
struct net_device;
struct scatterlist;
struct pipe_inode_info;
@@ -283,13 +317,20 @@ struct nf_bridge_info {
*/
struct tc_skb_ext {
__u32 chain;
+ __u16 mru;
+ __u16 zone;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
};
#endif
struct sk_buff_head {
- /* These two members must be first. */
- struct sk_buff *next;
- struct sk_buff *prev;
+ /* These two members must be first to match sk_buff. */
+ struct_group_tagged(sk_buff_list, list,
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ );
__u32 qlen;
spinlock_t lock;
@@ -364,7 +405,7 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
static inline bool skb_frag_must_loop(struct page *p)
{
#if defined(CONFIG_HIGHMEM)
- if (PageHighMem(p))
+ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
return true;
#endif
return false;
@@ -401,8 +442,10 @@ static inline bool skb_frag_must_loop(struct page *p)
/**
* struct skb_shared_hwtstamps - hardware time stamps
- * @hwtstamp: hardware time stamp transformed into duration
- * since arbitrary point in time
+ * @hwtstamp: hardware time stamp transformed into duration
+ * since arbitrary point in time
+ * @netdev_data: address/cookie of network device driver used as
+ * reference to actual hardware time stamp
*
* Software time stamps generated by ktime_get_real() are stored in
* skb->tstamp.
@@ -414,7 +457,10 @@ static inline bool skb_frag_must_loop(struct page *p)
* &skb_shared_info. Use skb_hwtstamps() to get a pointer.
*/
struct skb_shared_hwtstamps {
- ktime_t hwtstamp;
+ union {
+ ktime_t hwtstamp;
+ void *netdev_data;
+ };
};
/* Definitions for tx_flags in struct skb_shared_info */
@@ -428,27 +474,53 @@ enum {
/* device driver is going to provide hardware time stamp */
SKBTX_IN_PROGRESS = 1 << 2,
- /* device driver supports TX zero-copy buffers */
- SKBTX_DEV_ZEROCOPY = 1 << 3,
+ /* generate hardware time stamp based on cycles if supported */
+ SKBTX_HW_TSTAMP_USE_CYCLES = 1 << 3,
/* generate wifi status information (where possible) */
SKBTX_WIFI_STATUS = 1 << 4,
+ /* determine hardware time stamp based on time or cycles */
+ SKBTX_HW_TSTAMP_NETDEV = 1 << 5,
+
+ /* generate software time stamp when entering packet scheduling */
+ SKBTX_SCHED_TSTAMP = 1 << 6,
+};
+
+#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
+ SKBTX_SCHED_TSTAMP)
+#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \
+ SKBTX_HW_TSTAMP_USE_CYCLES | \
+ SKBTX_ANY_SW_TSTAMP)
+
+/* Definitions for flags in struct skb_shared_info */
+enum {
+ /* use zcopy routines */
+ SKBFL_ZEROCOPY_ENABLE = BIT(0),
+
/* This indicates at least one fragment might be overwritten
* (as in vmsplice(), sendfile() ...)
* If we need to compute a TX checksum, we'll need to copy
* all frags to avoid possible bad checksum
*/
- SKBTX_SHARED_FRAG = 1 << 5,
+ SKBFL_SHARED_FRAG = BIT(1),
- /* generate software time stamp when entering packet scheduling */
- SKBTX_SCHED_TSTAMP = 1 << 6,
+ /* segment contains only zerocopy data and should not be
+ * charged to the kernel memory.
+ */
+ SKBFL_PURE_ZEROCOPY = BIT(2),
+
+ SKBFL_DONT_ORPHAN = BIT(3),
+
+ /* page references are managed by the ubuf_info, so it's safe to
+ * use frags only up until ubuf_info is released
+ */
+ SKBFL_MANAGED_FRAG_REFS = BIT(4),
};
-#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
-#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
- SKBTX_SCHED_TSTAMP)
-#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
+#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
+#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \
+ SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS)
/*
* The callback notifies userspace to release buffers when skb DMA is done in
@@ -459,7 +531,15 @@ enum {
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
- void (*callback)(struct ubuf_info *, bool zerocopy_success);
+ void (*callback)(struct sk_buff *, struct ubuf_info *,
+ bool zerocopy_success);
+ refcount_t refcnt;
+ u8 flags;
+};
+
+struct ubuf_info_msgzc {
+ struct ubuf_info ubuf;
+
union {
struct {
unsigned long desc;
@@ -472,7 +552,6 @@ struct ubuf_info {
u32 bytelen;
};
};
- refcount_t refcnt;
struct mmpin {
struct user_struct *user;
@@ -481,34 +560,17 @@ struct ubuf_info {
};
#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+#define uarg_to_msgzc(ubuf_ptr) container_of((ubuf_ptr), struct ubuf_info_msgzc, \
+ ubuf)
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
-struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
-struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
- struct ubuf_info *uarg);
-
-static inline void sock_zerocopy_get(struct ubuf_info *uarg)
-{
- refcount_inc(&uarg->refcnt);
-}
-
-void sock_zerocopy_put(struct ubuf_info *uarg);
-void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
-
-void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
-
-int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
-int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
- struct msghdr *msg, int len,
- struct ubuf_info *uarg);
-
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
- __u8 __unused;
+ __u8 flags;
__u8 meta_len;
__u8 nr_frags;
__u8 tx_flags;
@@ -524,6 +586,7 @@ struct skb_shared_info {
* Warning : all fields before dataref are cleared in __alloc_skb()
*/
atomic_t dataref;
+ unsigned int xdp_frags_size;
/* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */
@@ -533,16 +596,32 @@ struct skb_shared_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-/* We divide dataref into two halves. The higher 16 bits hold references
- * to the payload part of skb->data. The lower 16 bits hold references to
- * the entire skb->data. A clone of a headerless skb holds the length of
- * the header in skb->hdr_len.
- *
- * All users must obey the rule that the skb->data reference count must be
- * greater than or equal to the payload reference count.
- *
- * Holding a reference to the payload part means that the user does not
- * care about modifications to the header part of skb->data.
+/**
+ * DOC: dataref and headerless skbs
+ *
+ * Transport layers send out clones of payload skbs they hold for
+ * retransmissions. To allow lower layers of the stack to prepend their headers
+ * we split &skb_shared_info.dataref into two halves.
+ * The lower 16 bits count the overall number of references.
+ * The higher 16 bits indicate how many of the references are payload-only.
+ * skb_header_cloned() checks if skb is allowed to add / write the headers.
+ *
+ * The creator of the skb (e.g. TCP) marks its skb as &sk_buff.nohdr
+ * (via __skb_header_release()). Any clone created from marked skb will get
+ * &sk_buff.hdr_len populated with the available headroom.
+ * If there's the only clone in existence it's able to modify the headroom
+ * at will. The sequence of calls inside the transport layer is::
+ *
+ * <alloc skb>
+ * skb_reserve()
+ * __skb_header_release()
+ * skb_clone()
+ * // send the clone down the stack
+ *
+ * This is not a very generic construct and it depends on the transport layers
+ * doing the right thing. In practice there's usually only one payload-only skb.
+ * Having multiple payload-only skbs with different lengths of hdr_len is not
+ * possible. The payload-only skbs should never leave their owner.
*/
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
@@ -607,6 +686,46 @@ typedef unsigned char *sk_buff_data_t;
#endif
/**
+ * DOC: Basic sk_buff geometry
+ *
+ * struct sk_buff itself is a metadata structure and does not hold any packet
+ * data. All the data is held in associated buffers.
+ *
+ * &sk_buff.head points to the main "head" buffer. The head buffer is divided
+ * into two parts:
+ *
+ * - data buffer, containing headers and sometimes payload;
+ * this is the part of the skb operated on by the common helpers
+ * such as skb_put() or skb_pull();
+ * - shared info (struct skb_shared_info) which holds an array of pointers
+ * to read-only data in the (page, offset, length) format.
+ *
+ * Optionally &skb_shared_info.frag_list may point to another skb.
+ *
+ * Basic diagram may look like this::
+ *
+ * ---------------
+ * | sk_buff |
+ * ---------------
+ * ,--------------------------- + head
+ * / ,----------------- + data
+ * / / ,----------- + tail
+ * | | | , + end
+ * | | | |
+ * v v v v
+ * -----------------------------------------------
+ * | headroom | data | tailroom | skb_shared_info |
+ * -----------------------------------------------
+ * + [page frag]
+ * + [page frag]
+ * + [page frag]
+ * + [page frag] ---------
+ * + frag_list --> | sk_buff |
+ * ---------
+ *
+ */
+
+/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
@@ -615,6 +734,7 @@ typedef unsigned char *sk_buff_data_t;
* for retransmit timer
* @rbnode: RB tree node, alternative to next/prev for netem/tcp
* @list: queue head
+ * @ll_node: anchor in an llist (eg socket defer_list)
* @sk: Socket we are owned by
* @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
* fragmentation management
@@ -645,14 +765,16 @@ typedef unsigned char *sk_buff_data_t;
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
- * @tc_redirected: packet was redirected by a tc action
- * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
+ * @redirected: packet was redirected by packet classifier
+ * @from_ingress: packet was redirected from the ingress path
+ * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
* @protocol: Packet protocol from driver
* @destructor: Destruct function
* @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
+ * @_sk_redir: socket redirection information for skmsg
* @_nfct: Associated connection, if any (with nfctinfo bits)
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
@@ -662,6 +784,8 @@ typedef unsigned char *sk_buff_data_t;
* @head_frag: skb was allocated from page fragments,
* not allocated by kmalloc() or vmalloc().
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+ * @pp_recycle: mark the packet for recycling instead of freeing (implies
+ * page_pool support on driver)
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
@@ -679,10 +803,17 @@ typedef unsigned char *sk_buff_data_t;
* @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3)
+ * @scm_io_uring: SKB holds io_uring registered files
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
+ * @slow_gro: state present at GRO time, slower prepare step required
+ * @mono_delivery_time: When set, skb->tstamp has the
+ * delivery_time in mono clock base (i.e. EDT). Otherwise, the
+ * skb->tstamp has the (rcv) timestamp at ingress and
+ * delivery_time at egress.
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
+ * @alloc_cpu: CPU which did the skb allocation.
* @secmark: security marking
* @mark: Generic packet mark
* @reserved_tailroom: (aka @mark) number of bytes of free space available
@@ -699,6 +830,7 @@ typedef unsigned char *sk_buff_data_t;
* @transport_header: Transport layer header
* @network_header: Network layer header
* @mac_header: Link layer header
+ * @kcov_handle: KCOV remote handle for remote coverage collection
* @tail: Tail pointer
* @end: End pointer
* @head: Head of buffer
@@ -711,7 +843,7 @@ typedef unsigned char *sk_buff_data_t;
struct sk_buff {
union {
struct {
- /* These two members must be first. */
+ /* These two members must be first to match sk_buff_head. */
struct sk_buff *next;
struct sk_buff *prev;
@@ -726,6 +858,7 @@ struct sk_buff {
};
struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
struct list_head list;
+ struct llist_node ll_node;
};
union {
@@ -751,6 +884,9 @@ struct sk_buff {
void (*destructor)(struct sk_buff *skb);
};
struct list_head tcp_tsorted_anchor;
+#ifdef CONFIG_NET_SOCK_MSG
+ unsigned long _sk_redir;
+#endif
};
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -772,7 +908,7 @@ struct sk_buff {
#else
#define CLONED_MASK 1
#endif
-#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+#define CLONED_OFFSET offsetof(struct sk_buff, __cloned_offset)
/* private: */
__u8 __cloned_offset[0];
@@ -782,29 +918,21 @@ struct sk_buff {
fclone:2,
peeked:1,
head_frag:1,
- pfmemalloc:1;
+ pfmemalloc:1,
+ pp_recycle:1; /* page_pool recycle indicator */
#ifdef CONFIG_SKB_EXTENSIONS
__u8 active_extensions;
#endif
- /* fields enclosed in headers_start/headers_end are copied
+
+ /* Fields enclosed in headers group are copied
* using a single memcpy() in __copy_skb_header()
*/
- /* private: */
- __u32 headers_start[0];
- /* public: */
-
-/* if you move pkt_type around you also must adapt those constants */
-#ifdef __BIG_ENDIAN_BITFIELD
-#define PKT_TYPE_MAX (7 << 5)
-#else
-#define PKT_TYPE_MAX 7
-#endif
-#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
+ struct_group(headers,
/* private: */
__u8 __pkt_type_offset[0];
/* public: */
- __u8 pkt_type:3;
+ __u8 pkt_type:3; /* see PKT_TYPE_MAX */
__u8 ignore_df:1;
__u8 nf_trace:1;
__u8 ip_summed:2;
@@ -820,20 +948,18 @@ struct sk_buff {
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
-#ifdef __BIG_ENDIAN_BITFIELD
-#define PKT_VLAN_PRESENT_BIT 7
-#else
-#define PKT_VLAN_PRESENT_BIT 0
-#endif
-#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
/* private: */
__u8 __pkt_vlan_present_offset[0];
/* public: */
- __u8 vlan_present:1;
+ __u8 vlan_present:1; /* See PKT_VLAN_PRESENT_BIT */
__u8 csum_complete_sw:1;
__u8 csum_level:2;
- __u8 csum_not_inet:1;
__u8 dst_pending_confirm:1;
+ __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */
+#ifdef CONFIG_NET_CLS_ACT
+ __u8 tc_skip_classify:1;
+ __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
+#endif
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
@@ -845,15 +971,19 @@ struct sk_buff {
__u8 offload_fwd_mark:1;
__u8 offload_l3_fwd_mark:1;
#endif
-#ifdef CONFIG_NET_CLS_ACT
- __u8 tc_skip_classify:1;
- __u8 tc_at_ingress:1;
- __u8 tc_redirected:1;
- __u8 tc_from_ingress:1;
+ __u8 redirected:1;
+#ifdef CONFIG_NET_REDIRECT
+ __u8 from_ingress:1;
+#endif
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ __u8 nf_skip_egress:1;
#endif
#ifdef CONFIG_TLS_DEVICE
__u8 decrypted:1;
#endif
+ __u8 slow_gro:1;
+ __u8 csum_not_inet:1;
+ __u8 scm_io_uring:1;
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -877,6 +1007,7 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
+ u16 alloc_cpu;
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
#endif
@@ -900,9 +1031,11 @@ struct sk_buff {
__u16 network_header;
__u16 mac_header;
- /* private: */
- __u32 headers_end[0];
- /* public: */
+#ifdef CONFIG_KCOV
+ u64 kcov_handle;
+#endif
+
+ ); /* end headers group */
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
@@ -918,6 +1051,28 @@ struct sk_buff {
#endif
};
+/* if you move pkt_type around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX (7 << 5)
+#else
+#define PKT_TYPE_MAX 7
+#endif
+#define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
+
+/* if you move pkt_vlan_present, tc_at_ingress, or mono_delivery_time
+ * around, you also must adapt these constants.
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_VLAN_PRESENT_BIT 7
+#define TC_AT_INGRESS_MASK (1 << 0)
+#define SKB_MONO_DELIVERY_TIME_MASK (1 << 2)
+#else
+#define PKT_VLAN_PRESENT_BIT 0
+#define TC_AT_INGRESS_MASK (1 << 7)
+#define SKB_MONO_DELIVERY_TIME_MASK (1 << 5)
+#endif
+#define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset)
+
#ifdef __KERNEL__
/*
* Handling routines are only of interest to the kernel
@@ -970,6 +1125,7 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
*/
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
+ skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst;
}
@@ -986,6 +1142,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
}
@@ -1047,12 +1204,38 @@ static inline bool skb_unref(struct sk_buff *skb)
return true;
}
+void __fix_address
+kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);
+
+/**
+ * kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason
+ * @skb: buffer to free
+ */
+static inline void kfree_skb(struct sk_buff *skb)
+{
+ kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
void skb_release_head_state(struct sk_buff *skb);
-void kfree_skb(struct sk_buff *skb);
-void kfree_skb_list(struct sk_buff *segs);
+void kfree_skb_list_reason(struct sk_buff *segs,
+ enum skb_drop_reason reason);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
+
+static inline void kfree_skb_list(struct sk_buff *segs)
+{
+ kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
+#ifdef CONFIG_TRACEPOINTS
void consume_skb(struct sk_buff *skb);
+#else
+static inline void consume_skb(struct sk_buff *skb)
+{
+ return kfree_skb(skb);
+}
+#endif
+
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
@@ -1067,6 +1250,9 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);
+void skb_attempt_defer_free(struct sk_buff *skb);
+
+struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
/**
* alloc_skb - allocate a network buffer
@@ -1115,7 +1301,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
return skb->fclone == SKB_FCLONE_ORIG &&
refcount_read(&fclones->fclone_ref) > 1 &&
- fclones->skb2.sk == sk;
+ READ_ONCE(fclones->skb2.sk) == sk;
}
/**
@@ -1148,6 +1334,7 @@ static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
+struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
@@ -1185,6 +1372,7 @@ struct skb_seq_state {
struct sk_buff *root_skb;
struct sk_buff *cur_skb;
__u8 *frag_data;
+ __u32 frag_off;
};
void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
@@ -1266,10 +1454,10 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
void __skb_get_hash(struct sk_buff *skb);
u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
const struct flow_keys_basic *keys, int hlen);
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
- void *data, int hlen_proto);
+ const void *data, int hlen_proto);
static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
int thoff, u8 ip_proto)
@@ -1281,42 +1469,15 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count);
-#ifdef CONFIG_NET
-int skb_flow_dissector_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
- struct bpf_prog *prog);
-
-int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
-#else
-static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
- struct bpf_prog *prog)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
-{
- return -EOPNOTSUPP;
-}
-#endif
-
struct bpf_flow_dissector;
-bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
- __be16 proto, int nhoff, int hlen, unsigned int flags);
+u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
+ __be16 proto, int nhoff, int hlen, unsigned int flags);
bool __skb_flow_dissect(const struct net *net,
const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
- void *target_container,
- void *data, __be16 proto, int nhoff, int hlen,
- unsigned int flags);
+ void *target_container, const void *data,
+ __be16 proto, int nhoff, int hlen, unsigned int flags);
static inline bool skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
@@ -1338,9 +1499,9 @@ static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
static inline bool
skb_flow_dissect_flow_keys_basic(const struct net *net,
const struct sk_buff *skb,
- struct flow_keys_basic *flow, void *data,
- __be16 proto, int nhoff, int hlen,
- unsigned int flags)
+ struct flow_keys_basic *flow,
+ const void *data, __be16 proto,
+ int nhoff, int hlen, unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
@@ -1352,20 +1513,24 @@ void skb_flow_dissect_meta(const struct sk_buff *skb,
void *target_container);
/* Gets a skb connection tracking info, ctinfo map should be a
- * a map of mapsize to translate enum ip_conntrack_info states
+ * map of mapsize to translate enum ip_conntrack_info states
* to user states.
*/
void
skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
- u16 *ctinfo_map,
- size_t mapsize);
+ u16 *ctinfo_map, size_t mapsize,
+ bool post_ct, u16 zone);
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);
+void skb_flow_dissect_hash(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container);
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -1419,6 +1584,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end;
}
+
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
+{
+ skb->end = offset;
+}
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
@@ -1429,8 +1599,35 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end - skb->head;
}
+
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
+{
+ skb->end = skb->head + offset;
+}
#endif
+struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
+ struct ubuf_info *uarg);
+
+void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
+
+void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success);
+
+int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb, struct iov_iter *from,
+ size_t length);
+
+static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
+ struct msghdr *msg, int len)
+{
+ return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len);
+}
+
+int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
+ struct msghdr *msg, int len,
+ struct ubuf_info *uarg);
+
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
@@ -1441,11 +1638,38 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
{
- bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
+ bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
return is_zcopy ? skb_uarg(skb) : NULL;
}
+static inline bool skb_zcopy_pure(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
+}
+
+static inline bool skb_zcopy_managed(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS;
+}
+
+static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
+ const struct sk_buff *skb2)
+{
+ return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
+}
+
+static inline void net_zcopy_get(struct ubuf_info *uarg)
+{
+ refcount_inc(&uarg->refcnt);
+}
+
+static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
+{
+ skb_shinfo(skb)->destructor_arg = uarg;
+ skb_shinfo(skb)->flags |= uarg->flags;
+}
+
static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
bool *have_ref)
{
@@ -1453,16 +1677,15 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
if (unlikely(have_ref && *have_ref))
*have_ref = false;
else
- sock_zerocopy_get(uarg);
- skb_shinfo(skb)->destructor_arg = uarg;
- skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ net_zcopy_get(uarg);
+ skb_zcopy_init(skb, uarg);
}
}
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
- skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
}
static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
@@ -1475,36 +1698,43 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
}
-/* Release a reference on a zerocopy structure */
-static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
+static inline void net_zcopy_put(struct ubuf_info *uarg)
{
- struct ubuf_info *uarg = skb_zcopy(skb);
+ if (uarg)
+ uarg->callback(NULL, uarg, true);
+}
+static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
+{
if (uarg) {
- if (skb_zcopy_is_nouarg(skb)) {
- /* no notification callback */
- } else if (uarg->callback == sock_zerocopy_callback) {
- uarg->zerocopy = uarg->zerocopy && zerocopy;
- sock_zerocopy_put(uarg);
- } else {
- uarg->callback(uarg, zerocopy);
- }
-
- skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ if (uarg->callback == msg_zerocopy_callback)
+ msg_zerocopy_put_abort(uarg, have_uref);
+ else if (have_uref)
+ net_zcopy_put(uarg);
}
}
-/* Abort a zerocopy operation and revert zckey on error in send syscall */
-static inline void skb_zcopy_abort(struct sk_buff *skb)
+/* Release a reference on a zerocopy structure */
+static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
{
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
- sock_zerocopy_put_abort(uarg, false);
- skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ if (!skb_zcopy_is_nouarg(skb))
+ uarg->callback(skb, uarg, zerocopy_success);
+
+ skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
}
}
+void __skb_zcopy_downgrade_managed(struct sk_buff *skb);
+
+static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb)
+{
+ if (unlikely(skb_zcopy_managed(skb)))
+ __skb_zcopy_downgrade_managed(skb);
+}
+
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
@@ -1648,6 +1878,22 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
return 0;
}
+/* This variant of skb_unclone() makes sure skb->truesize
+ * and skb_end_offset() are not changed, whenever a new skb->head is needed.
+ *
+ * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X))
+ * when various debugging features are in place.
+ */
+int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
+static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(gfpflags_allow_blocking(pri));
+
+ if (skb_cloned(skb))
+ return __skb_unclone_keeptruesize(skb, pri);
+ return 0;
+}
+
/**
* skb_header_cloned - is the header a clone
* @skb: buffer to check
@@ -1678,8 +1924,10 @@ static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
}
/**
- * __skb_header_release - release reference to header
- * @skb: buffer to operate on
+ * __skb_header_release() - allow clones to use the headroom
+ * @skb: buffer to operate on
+ *
+ * See "DOC: dataref and headerless skbs".
*/
static inline void __skb_header_release(struct sk_buff *skb)
{
@@ -1915,9 +2163,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
*/
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
- WRITE_ONCE(next->prev, newsk);
- WRITE_ONCE(prev->next, newsk);
- list->qlen++;
+ WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk);
+ WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk);
+ WRITE_ONCE(list->qlen, list->qlen + 1);
}
static inline void __skb_queue_splice(const struct sk_buff_head *list,
@@ -2012,7 +2260,7 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *newsk)
{
- __skb_insert(newsk, prev, prev->next, list);
+ __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list);
}
void skb_append(struct sk_buff *old, struct sk_buff *newsk,
@@ -2022,7 +2270,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
struct sk_buff *next,
struct sk_buff *newsk)
{
- __skb_insert(newsk, next->prev, next, list);
+ __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list);
}
/**
@@ -2135,6 +2383,34 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
return skb_headlen(skb) + __skb_pagelen(skb);
}
+static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
+ int i, struct page *page,
+ int off, int size)
+{
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ /*
+ * Propagate page pfmemalloc to the skb if we can. The problem is
+ * that not all callers have unique ownership of the page but rely
+ * on page_is_pfmemalloc doing the right thing(tm).
+ */
+ frag->bv_page = page;
+ frag->bv_offset = off;
+ skb_frag_size_set(frag, size);
+}
+
+/**
+ * skb_len_add - adds a number to len fields of skb
+ * @skb: buffer to add len to
+ * @delta: number of bytes to add
+ */
+static inline void skb_len_add(struct sk_buff *skb, int delta)
+{
+ skb->len += delta;
+ skb->data_len += delta;
+ skb->truesize += delta;
+}
+
/**
* __skb_fill_page_desc - initialise a paged fragment in an skb
* @skb: buffer containing fragment to be initialised
@@ -2151,17 +2427,7 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- /*
- * Propagate page pfmemalloc to the skb if we can. The problem is
- * that not all callers have unique ownership of the page but rely
- * on page_is_pfmemalloc doing the right thing(tm).
- */
- frag->bv_page = page;
- frag->bv_offset = off;
- skb_frag_size_set(frag, size);
-
+ __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size);
page = compound_head(page);
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
@@ -2188,6 +2454,27 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
skb_shinfo(skb)->nr_frags = i + 1;
}
+/**
+ * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+ * @i: paged fragment index to initialise
+ * @page: the page to use for this fragment
+ * @off: the offset to the data with @page
+ * @size: the length of the data
+ *
+ * Variant of skb_fill_page_desc() which does not deal with
+ * pfmemalloc, if page is not owned by us.
+ */
+static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
+ struct page *page, int off,
+ int size)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ __skb_fill_page_desc_noacc(shinfo, i, page, off, size);
+ shinfo->nr_frags = i + 1;
+}
+
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize);
@@ -2231,6 +2518,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+static inline void skb_assert_len(struct sk_buff *skb)
+{
+#ifdef CONFIG_DEBUG_NET
+ if (WARN_ONCE(!skb->len, "%s\n", __func__))
+ DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+#endif /* CONFIG_DEBUG_NET */
+}
+
/*
* Add data to an sk_buff
*/
@@ -2303,7 +2598,14 @@ void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len -= len;
- BUG_ON(skb->len < skb->data_len);
+ if (unlikely(skb->len < skb->data_len)) {
+#if defined(CONFIG_DEBUG_NET)
+ skb->len += len;
+ pr_err("__skb_pull(len=%u)\n", len);
+ skb_dump(KERN_ERR, skb, false);
+#endif
+ BUG();
+ }
return skb->data += len;
}
@@ -2312,21 +2614,9 @@ static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
-void *__pskb_pull_tail(struct sk_buff *skb, int delta);
-
-static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len > skb_headlen(skb) &&
- !__pskb_pull_tail(skb, len - skb_headlen(skb)))
- return NULL;
- skb->len -= len;
- return skb->data += len;
-}
+void *skb_pull_data(struct sk_buff *skb, size_t len);
-static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
-{
- return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
-}
+void *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
@@ -2337,6 +2627,15 @@ static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
}
+static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (!pskb_may_pull(skb, len))
+ return NULL;
+
+ skb->len -= len;
+ return skb->data += len;
+}
+
void skb_condense(struct sk_buff *skb);
/**
@@ -2506,6 +2805,7 @@ static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
return skb->head + skb->transport_header;
}
@@ -2537,8 +2837,14 @@ static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
skb->network_header += offset;
}
+static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+{
+ return skb->mac_header != (typeof(skb->mac_header))~0U;
+}
+
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
return skb->head + skb->mac_header;
}
@@ -2549,12 +2855,13 @@ static inline int skb_mac_offset(const struct sk_buff *skb)
static inline u32 skb_mac_header_len(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
return skb->network_header - skb->mac_header;
}
-static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+static inline void skb_unset_mac_header(struct sk_buff *skb)
{
- return skb->mac_header != (typeof(skb->mac_header))~0U;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
}
static inline void skb_reset_mac_header(struct sk_buff *skb)
@@ -2676,7 +2983,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
*
* Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
* to reduce average number of cache lines per packet.
- * get_rps_cpus() for example only access one 64 bytes aligned block :
+ * get_rps_cpu() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
@@ -2774,8 +3081,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
if (likely(!skb_zcopy(skb)))
return 0;
- if (!skb_zcopy_is_nouarg(skb) &&
- skb_uarg(skb)->callback == sock_zerocopy_callback)
+ if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN)
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
@@ -2806,7 +3112,26 @@ void skb_queue_purge(struct sk_buff_head *list);
unsigned int skb_rbtree_purge(struct rb_root *root);
-void *netdev_alloc_frag(unsigned int fragsz);
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+static inline void *netdev_alloc_frag(unsigned int fragsz)
+{
+ return __netdev_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *netdev_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __netdev_alloc_frag_align(fragsz, -align);
+}
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
gfp_t gfp_mask);
@@ -2865,7 +3190,20 @@ static inline void skb_free_frag(void *addr)
page_frag_free(addr);
}
-void *napi_alloc_frag(unsigned int fragsz);
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+static inline void *napi_alloc_frag(unsigned int fragsz)
+{
+ return __napi_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *napi_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __napi_alloc_frag_align(fragsz, -align);
+}
+
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
@@ -2875,7 +3213,7 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
}
void napi_consume_skb(struct sk_buff *skb, int budget);
-void __kfree_skb_flush(void);
+void napi_skb_free_stolen_head(struct sk_buff *skb);
void __kfree_skb_defer(struct sk_buff *skb);
/**
@@ -2927,12 +3265,28 @@ static inline struct page *dev_alloc_page(void)
}
/**
+ * dev_page_is_reusable - check whether a page can be reused for network Rx
+ * @page: the page to test
+ *
+ * A page shouldn't be considered for reusing/recycling if it was allocated
+ * under memory pressure or at a distant memory node.
+ *
+ * Returns false if this page should be returned to page allocator, true
+ * otherwise.
+ */
+static inline bool dev_page_is_reusable(const struct page *page)
+{
+ return likely(page_to_nid(page) == numa_mem_id() &&
+ !page_is_pfmemalloc(page));
+}
+
+/**
* skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
* @page: The page that was allocated from skb_alloc_page
* @skb: The skb that may need pfmemalloc set
*/
-static inline void skb_propagate_pfmemalloc(struct page *page,
- struct sk_buff *skb)
+static inline void skb_propagate_pfmemalloc(const struct page *page,
+ struct sk_buff *skb)
{
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
@@ -3015,12 +3369,20 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
/**
* __skb_frag_unref - release a reference on a paged fragment.
* @frag: the paged fragment
+ * @recycle: recycle the page if allocated via page_pool
*
- * Releases a reference on the paged fragment @frag.
+ * Releases a reference on the paged fragment @frag
+ * or recycles the page via the page_pool API.
*/
-static inline void __skb_frag_unref(skb_frag_t *frag)
+static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
- put_page(skb_frag_page(frag));
+ struct page *page = skb_frag_page(frag);
+
+#ifdef CONFIG_PAGE_POOL
+ if (recycle && page_pool_return_skb_page(page))
+ return;
+#endif
+ put_page(page);
}
/**
@@ -3032,7 +3394,10 @@ static inline void __skb_frag_unref(skb_frag_t *frag)
*/
static inline void skb_frag_unref(struct sk_buff *skb, int f)
{
- __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ if (!skb_zcopy_managed(skb))
+ __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle);
}
/**
@@ -3232,8 +3597,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error if @free_on_error is true.
*/
-static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
- bool free_on_error)
+static inline int __must_check __skb_put_padto(struct sk_buff *skb,
+ unsigned int len,
+ bool free_on_error)
{
unsigned int size = skb->len;
@@ -3256,7 +3622,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error.
*/
-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
{
return __skb_put_padto(skb, len, true);
}
@@ -3321,7 +3687,7 @@ static inline int skb_linearize(struct sk_buff *skb)
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+ skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
}
/**
@@ -3362,7 +3728,12 @@ __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- __skb_postpull_rcsum(skb, start, len, 0);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = wsum_negate(csum_partial(start, len,
+ wsum_negate(skb->csum)));
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
}
static __always_inline void
@@ -3514,25 +3885,16 @@ int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff_head *queue,
unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff_head *queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err,
+ unsigned int flags, int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err);
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
- int *err);
+ unsigned int flags, int *off, int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
@@ -3561,12 +3923,13 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
- int len, __wsum csum);
+ int len);
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
+int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -3580,10 +3943,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
-int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+int skb_eth_pop(struct sk_buff *skb);
+int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
+ const unsigned char *src);
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
int mac_len, bool ethernet);
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
@@ -3616,14 +3982,13 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
static inline void * __must_check
-__skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *data, int hlen, void *buffer)
+__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
+ const void *data, int hlen, void *buffer)
{
- if (hlen - offset >= len)
- return data + offset;
+ if (likely(hlen - offset >= len))
+ return (void *)data + offset;
- if (!skb ||
- skb_copy_bits(skb, offset, buffer, len) < 0)
+ if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
return NULL;
return buffer;
@@ -3735,6 +4100,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb,
static inline void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
+ skb->mono_delivery_time = 0;
}
static inline ktime_t net_timedelta(ktime_t t)
@@ -3742,8 +4108,53 @@ static inline ktime_t net_timedelta(ktime_t t)
return ktime_sub(ktime_get_real(), t);
}
-static inline ktime_t net_invalid_timestamp(void)
+static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
+ bool mono)
+{
+ skb->tstamp = kt;
+ skb->mono_delivery_time = kt && mono;
+}
+
+DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
+
+/* It is used in the ingress path to clear the delivery_time.
+ * If needed, set the skb->tstamp to the (rcv) timestamp.
+ */
+static inline void skb_clear_delivery_time(struct sk_buff *skb)
+{
+ if (skb->mono_delivery_time) {
+ skb->mono_delivery_time = 0;
+ if (static_branch_unlikely(&netstamp_needed_key))
+ skb->tstamp = ktime_get_real();
+ else
+ skb->tstamp = 0;
+ }
+}
+
+static inline void skb_clear_tstamp(struct sk_buff *skb)
+{
+ if (skb->mono_delivery_time)
+ return;
+
+ skb->tstamp = 0;
+}
+
+static inline ktime_t skb_tstamp(const struct sk_buff *skb)
{
+ if (skb->mono_delivery_time)
+ return 0;
+
+ return skb->tstamp;
+}
+
+static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
+{
+ if (!skb->mono_delivery_time && skb->tstamp)
+ return skb->tstamp;
+
+ if (static_branch_unlikely(&netstamp_needed_key) || cond)
+ return ktime_get_real();
+
return 0;
}
@@ -3771,19 +4182,19 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
case 32: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 24: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 16: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 8: diffs |= __it_diff(a, b, 64);
break;
case 28: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 20: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 12: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 4: diffs |= __it_diff(a, b, 32);
break;
}
@@ -3844,14 +4255,14 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
* must call this function to return the skb back to the stack with a
* timestamp.
*
- * @skb: clone of the the original outgoing packet
+ * @skb: clone of the original outgoing packet
* @hwtstamps: hardware time stamps
*
*/
void skb_complete_tx_timestamp(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps);
-void __skb_tstamp_tx(struct sk_buff *orig_skb,
+void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
struct skb_shared_hwtstamps *hwtstamps,
struct sock *sk, int tstype);
@@ -3951,6 +4362,14 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
}
}
+static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_level = 0;
+ }
+}
+
/* Check if we need to perform checksum complete validation.
*
* Returns true if checksum complete is needed, false otherwise
@@ -4101,7 +4520,7 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
return;
}
- if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
__skb_checksum_complete(skb);
skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
}
@@ -4133,6 +4552,7 @@ static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ skb->slow_gro |= !!nfct;
skb->_nfct = nfct;
#endif
}
@@ -4151,6 +4571,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_MPTCP)
SKB_EXT_MPTCP,
#endif
+#if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ SKB_EXT_MCTP,
+#endif
SKB_EXT_NUM, /* must be last */
};
@@ -4168,10 +4591,10 @@ struct skb_ext {
refcount_t refcnt;
u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
u8 chunks; /* same */
- char data[0] __aligned(8);
+ char data[] __aligned(8);
};
-struct skb_ext *__skb_ext_alloc(void);
+struct skb_ext *__skb_ext_alloc(gfp_t flags);
void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
struct skb_ext *ext);
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
@@ -4292,6 +4715,7 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(dst));
#endif
+ dst->slow_gro = src->slow_gro;
__nf_copy(dst, src, true);
}
@@ -4395,8 +4819,8 @@ struct skb_gso_cb {
__wsum csum;
__u16 csum_start;
};
-#define SKB_SGO_CB_OFFSET 32
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
+#define SKB_GSO_CB_OFFSET 32
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
{
@@ -4528,9 +4952,7 @@ static inline void skb_forward_csum(struct sk_buff *skb)
*/
static inline void skb_checksum_none_assert(const struct sk_buff *skb)
{
-#ifdef DEBUG
- BUG_ON(skb->ip_summed != CHECKSUM_NONE);
-#endif
+ DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE);
}
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
@@ -4579,5 +5001,61 @@ static inline __wsum lco_csum(struct sk_buff *skb)
return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
}
+static inline bool skb_is_redirected(const struct sk_buff *skb)
+{
+ return skb->redirected;
+}
+
+static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
+{
+ skb->redirected = 1;
+#ifdef CONFIG_NET_REDIRECT
+ skb->from_ingress = from_ingress;
+ if (skb->from_ingress)
+ skb_clear_tstamp(skb);
+#endif
+}
+
+static inline void skb_reset_redirect(struct sk_buff *skb)
+{
+ skb->redirected = 0;
+}
+
+static inline bool skb_csum_is_sctp(struct sk_buff *skb)
+{
+ return skb->csum_not_inet;
+}
+
+static inline void skb_set_kcov_handle(struct sk_buff *skb,
+ const u64 kcov_handle)
+{
+#ifdef CONFIG_KCOV
+ skb->kcov_handle = kcov_handle;
+#endif
+}
+
+static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
+{
+#ifdef CONFIG_KCOV
+ return skb->kcov_handle;
+#else
+ return 0;
+#endif
+}
+
+#ifdef CONFIG_PAGE_POOL
+static inline void skb_mark_for_recycle(struct sk_buff *skb)
+{
+ skb->pp_recycle = 1;
+}
+#endif
+
+static inline bool skb_pp_recycle(struct sk_buff *skb, void *data)
+{
+ if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
+ return false;
+ return page_pool_return_skb_page(virt_to_page(data));
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 14d61bba0b79..70d6cb94e580 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -29,7 +29,7 @@ struct sk_msg_sg {
u32 end;
u32 size;
u32 copybreak;
- unsigned long copy;
+ DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
/* The extra two elements:
* 1) used for chaining the front and sections when the list becomes
* partitioned (e.g. end < start). The crypto APIs require the
@@ -38,7 +38,6 @@ struct sk_msg_sg {
*/
struct scatterlist data[MAX_MSG_FRAGS + 2];
};
-static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
struct sk_msg {
@@ -56,7 +55,8 @@ struct sk_msg {
struct sk_psock_progs {
struct bpf_prog *msg_parser;
- struct bpf_prog *skb_parser;
+ struct bpf_prog *stream_parser;
+ struct bpf_prog *stream_verdict;
struct bpf_prog *skb_verdict;
};
@@ -70,12 +70,6 @@ struct sk_psock_link {
void *link_raw;
};
-struct sk_psock_parser {
- struct strparser strp;
- bool enabled;
- void (*saved_data_ready)(struct sock *sk);
-};
-
struct sk_psock_work_state {
struct sk_buff *skb;
u32 len;
@@ -90,23 +84,28 @@ struct sk_psock {
u32 eval;
struct sk_msg *cork;
struct sk_psock_progs progs;
- struct sk_psock_parser parser;
+#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
+ struct strparser strp;
+#endif
struct sk_buff_head ingress_skb;
struct list_head ingress_msg;
+ spinlock_t ingress_lock;
unsigned long state;
struct list_head link;
spinlock_t link_lock;
refcount_t refcnt;
void (*saved_unhash)(struct sock *sk);
+ void (*saved_destroy)(struct sock *sk);
void (*saved_close)(struct sock *sk, long timeout);
void (*saved_write_space)(struct sock *sk);
+ void (*saved_data_ready)(struct sock *sk);
+ int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
+ bool restore);
struct proto *sk_proto;
+ struct mutex work_mutex;
struct sk_psock_work_state work_state;
struct work_struct work;
- union {
- struct rcu_head rcu;
- struct work_struct gc;
- };
+ struct rcu_work rwork;
};
int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
@@ -127,6 +126,9 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
+int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ int len, int flags);
+bool sk_msg_is_readable(struct sock *sk);
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
{
@@ -169,11 +171,6 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end)
#define sk_msg_iter_next(msg, which) \
sk_msg_iter_var_next(msg->sg.which)
-static inline void sk_msg_clear_meta(struct sk_msg *msg)
-{
- memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
-}
-
static inline void sk_msg_init(struct sk_msg *msg)
{
BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
@@ -187,6 +184,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
dst->sg.data[which] = src->sg.data[which];
dst->sg.data[which].length = size;
dst->sg.size += size;
+ src->sg.size -= size;
src->sg.data[which].length -= size;
src->sg.data[which].offset += size;
}
@@ -231,7 +229,7 @@ static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
{
struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
- if (test_bit(msg->sg.start, &msg->sg.copy)) {
+ if (test_bit(msg->sg.start, msg->sg.copy)) {
msg->data = NULL;
msg->data_end = NULL;
} else {
@@ -250,7 +248,7 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
sg_set_page(sge, page, len, offset);
sg_unmark_end(sge);
- __set_bit(msg->sg.end, &msg->sg.copy);
+ __set_bit(msg->sg.end, msg->sg.copy);
msg->sg.size += len;
sk_msg_iter_next(msg, end);
}
@@ -259,9 +257,9 @@ static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
{
do {
if (copy_state)
- __set_bit(i, &msg->sg.copy);
+ __set_bit(i, msg->sg.copy);
else
- __clear_bit(i, &msg->sg.copy);
+ __clear_bit(i, msg->sg.copy);
sk_msg_iter_var_next(i);
if (i == msg->sg.end)
break;
@@ -280,13 +278,81 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
static inline struct sk_psock *sk_psock(const struct sock *sk)
{
- return rcu_dereference_sk_user_data(sk);
+ return __rcu_dereference_sk_user_data_with_flags(sk,
+ SK_USER_DATA_PSOCK);
+}
+
+static inline void sk_psock_set_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ return test_bit(bit, &psock->state);
+}
+
+static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ kfree_skb(skb);
}
static inline void sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
- list_add_tail(&msg->list, &psock->ingress_msg);
+ spin_lock_bh(&psock->ingress_lock);
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+ list_add_tail(&msg->list, &psock->ingress_msg);
+ else {
+ sk_msg_free(psock->sk, msg);
+ kfree(msg);
+ }
+ spin_unlock_bh(&psock->ingress_lock);
+}
+
+static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
+{
+ struct sk_msg *msg;
+
+ spin_lock_bh(&psock->ingress_lock);
+ msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+ if (msg)
+ list_del(&msg->list);
+ spin_unlock_bh(&psock->ingress_lock);
+ return msg;
+}
+
+static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
+{
+ struct sk_msg *msg;
+
+ spin_lock_bh(&psock->ingress_lock);
+ msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+ spin_unlock_bh(&psock->ingress_lock);
+ return msg;
+}
+
+static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
+ struct sk_msg *msg)
+{
+ struct sk_msg *ret;
+
+ spin_lock_bh(&psock->ingress_lock);
+ if (list_is_last(&msg->list, &psock->ingress_msg))
+ ret = NULL;
+ else
+ ret = list_next_entry(msg, list);
+ spin_unlock_bh(&psock->ingress_lock);
+ return ret;
}
static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
@@ -294,19 +360,45 @@ static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
return psock ? list_empty(&psock->ingress_msg) : true;
}
+static inline void kfree_sk_msg(struct sk_msg *msg)
+{
+ if (msg->skb)
+ consume_skb(msg->skb);
+ kfree(msg);
+}
+
static inline void sk_psock_report_error(struct sk_psock *psock, int err)
{
struct sock *sk = psock->sk;
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
struct sk_psock *sk_psock_init(struct sock *sk, int node);
+void sk_psock_stop(struct sk_psock *psock);
+#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
+#else
+static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
+{
+}
+
+static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
+{
+}
+#endif
+
+void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
+void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg);
@@ -323,16 +415,6 @@ static inline void sk_psock_free_link(struct sk_psock_link *link)
}
struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
-#if defined(CONFIG_BPF_STREAM_PARSER)
-void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link);
-#else
-static inline void sk_psock_unlink(struct sock *sk,
- struct sk_psock_link *link)
-{
-}
-#endif
-
-void __sk_psock_purge_ingress_msg(struct sk_psock *psock);
static inline void sk_psock_cork_free(struct sk_psock *psock)
{
@@ -343,76 +425,11 @@ static inline void sk_psock_cork_free(struct sk_psock *psock)
}
}
-static inline void sk_psock_update_proto(struct sock *sk,
- struct sk_psock *psock,
- struct proto *ops)
-{
- psock->saved_unhash = sk->sk_prot->unhash;
- psock->saved_close = sk->sk_prot->close;
- psock->saved_write_space = sk->sk_write_space;
-
- psock->sk_proto = sk->sk_prot;
- sk->sk_prot = ops;
-}
-
static inline void sk_psock_restore_proto(struct sock *sk,
struct sk_psock *psock)
{
- sk->sk_prot->unhash = psock->saved_unhash;
-
- if (psock->sk_proto) {
- struct inet_connection_sock *icsk = inet_csk(sk);
- bool has_ulp = !!icsk->icsk_ulp_data;
-
- if (has_ulp) {
- tcp_update_ulp(sk, psock->sk_proto,
- psock->saved_write_space);
- } else {
- sk->sk_prot = psock->sk_proto;
- sk->sk_write_space = psock->saved_write_space;
- }
- psock->sk_proto = NULL;
- } else {
- sk->sk_write_space = psock->saved_write_space;
- }
-}
-
-static inline void sk_psock_set_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- set_bit(bit, &psock->state);
-}
-
-static inline void sk_psock_clear_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- clear_bit(bit, &psock->state);
-}
-
-static inline bool sk_psock_test_state(const struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- return test_bit(bit, &psock->state);
-}
-
-static inline struct sk_psock *sk_psock_get_checked(struct sock *sk)
-{
- struct sk_psock *psock;
-
- rcu_read_lock();
- psock = sk_psock(sk);
- if (psock) {
- if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) {
- psock = ERR_PTR(-EBUSY);
- goto out;
- }
-
- if (!refcount_inc_not_zero(&psock->refcnt))
- psock = ERR_PTR(-EBUSY);
- }
-out:
- rcu_read_unlock();
- return psock;
+ if (psock->psock_update_sk_prot)
+ psock->psock_update_sk_prot(sk, psock, true);
}
static inline struct sk_psock *sk_psock_get(struct sock *sk)
@@ -427,8 +444,6 @@ static inline struct sk_psock *sk_psock_get(struct sock *sk)
return psock;
}
-void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
-void sk_psock_destroy(struct rcu_head *rcu);
void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
@@ -439,8 +454,8 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
{
- if (psock->parser.enabled)
- psock->parser.saved_data_ready(sk);
+ if (psock->saved_data_ready)
+ psock->saved_data_ready(sk);
else
sk->sk_data_ready(sk);
}
@@ -453,11 +468,91 @@ static inline void psock_set_prog(struct bpf_prog **pprog,
bpf_prog_put(prog);
}
+static inline int psock_replace_prog(struct bpf_prog **pprog,
+ struct bpf_prog *prog,
+ struct bpf_prog *old)
+{
+ if (cmpxchg(pprog, old, prog) != old)
+ return -ENOENT;
+
+ if (old)
+ bpf_prog_put(old);
+
+ return 0;
+}
+
static inline void psock_progs_drop(struct sk_psock_progs *progs)
{
psock_set_prog(&progs->msg_parser, NULL);
- psock_set_prog(&progs->skb_parser, NULL);
+ psock_set_prog(&progs->stream_parser, NULL);
+ psock_set_prog(&progs->stream_verdict, NULL);
psock_set_prog(&progs->skb_verdict, NULL);
}
+int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
+
+static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
+{
+ if (!psock)
+ return false;
+ return !!psock->saved_data_ready;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+ return sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP;
+}
+
+#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
+
+#define BPF_F_STRPARSER (1UL << 1)
+
+/* We only have two bits so far. */
+#define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
+
+static inline bool skb_bpf_strparser(const struct sk_buff *skb)
+{
+ unsigned long sk_redir = skb->_sk_redir;
+
+ return sk_redir & BPF_F_STRPARSER;
+}
+
+static inline void skb_bpf_set_strparser(struct sk_buff *skb)
+{
+ skb->_sk_redir |= BPF_F_STRPARSER;
+}
+
+static inline bool skb_bpf_ingress(const struct sk_buff *skb)
+{
+ unsigned long sk_redir = skb->_sk_redir;
+
+ return sk_redir & BPF_F_INGRESS;
+}
+
+static inline void skb_bpf_set_ingress(struct sk_buff *skb)
+{
+ skb->_sk_redir |= BPF_F_INGRESS;
+}
+
+static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
+ bool ingress)
+{
+ skb->_sk_redir = (unsigned long)sk_redir;
+ if (ingress)
+ skb->_sk_redir |= BPF_F_INGRESS;
+}
+
+static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
+{
+ unsigned long sk_redir = skb->_sk_redir;
+
+ return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
+}
+
+static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
+{
+ skb->_sk_redir = 0;
+}
+#endif /* CONFIG_NET_SOCK_MSG */
#endif /* _LINUX_SKMSG_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 03a389358562..45efc6c553b8 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -29,6 +29,8 @@
#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
/* DEBUG: Poison objects */
#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
+/* Indicate a kmalloc slab */
+#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
/* Align objs on cache lines */
#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
/* Use GFP_DMA memory */
@@ -106,20 +108,30 @@
# define SLAB_ACCOUNT 0
#endif
-#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_GENERIC
#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
#else
#define SLAB_KASAN 0
#endif
+/*
+ * Ignore user specified debugging flags.
+ * Intended for caches created for self-tests so they have only flags
+ * specified in the code and other flags are ignored.
+ */
+#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
+
+#ifdef CONFIG_KFENCE
+#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
+#else
+#define SLAB_SKIP_KFENCE 0
+#endif
+
/* The following flags affect the page allocator grouping pages by mobility */
/* Objects are reclaimable */
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
-/* Slab deactivation flag */
-#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
-
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
@@ -135,6 +147,7 @@
#include <linux/kasan.h>
+struct list_lru;
struct mem_cgroup;
/*
* struct kmem_cache related prototypes
@@ -142,8 +155,6 @@ struct mem_cgroup;
void __init kmem_cache_init(void);
bool slab_is_available(void);
-extern bool usercopy_fallback;
-
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
void (*ctor)(void *));
@@ -152,11 +163,8 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
-void kmem_cache_destroy(struct kmem_cache *);
-int kmem_cache_shrink(struct kmem_cache *);
-
-void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
-void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
+void kmem_cache_destroy(struct kmem_cache *s);
+int kmem_cache_shrink(struct kmem_cache *s);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -184,24 +192,34 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc(const void *, size_t, gfp_t);
-void kfree(const void *);
-void kzfree(const void *);
-size_t __ksize(const void *);
-size_t ksize(const void *);
+void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
+void kfree(const void *objp);
+void kfree_sensitive(const void *objp);
+size_t __ksize(const void *objp);
-#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
-void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
- bool to_user);
-#else
-static inline void __check_heap_object(const void *ptr, unsigned long n,
- struct page *page, bool to_user) { }
+/**
+ * ksize - Report actual allocation size of associated object
+ *
+ * @objp: Pointer returned from a prior kmalloc()-family allocation.
+ *
+ * This should not be used for writing beyond the originally requested
+ * allocation size. Either use krealloc() or round up the allocation size
+ * with kmalloc_size_roundup() prior to allocation. If this is used to
+ * access beyond the originally requested allocation size, UBSAN_BOUNDS
+ * and/or FORTIFY_SOURCE may trip, since they only know about the
+ * originally allocated size via the __alloc_size attribute.
+ */
+size_t ksize(const void *objp);
+
+#ifdef CONFIG_PRINTK
+bool kmem_valid_obj(void *object);
+void kmem_dump_obj(void *object);
#endif
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
- * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
+ * Setting ARCH_DMA_MINALIGN in arch headers allows that.
*/
#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
@@ -221,9 +239,21 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
#endif
/*
- * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
- * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
- * aligned pointers.
+ * Arches can define this function if they want to decide the minimum slab
+ * alignment at runtime. The value returned by the function must be a power
+ * of two and >= ARCH_SLAB_MINALIGN.
+ */
+#ifndef arch_slab_minalign
+static inline unsigned int arch_slab_minalign(void)
+{
+ return ARCH_SLAB_MINALIGN;
+}
+#endif
+
+/*
+ * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
+ * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
+ * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
*/
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
@@ -235,27 +265,17 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
#ifdef CONFIG_SLAB
/*
- * The largest kmalloc size supported by the SLAB allocators is
- * 32 megabyte (2^25) or the maximum allocatable page order if that is
- * less than 32 MB.
- *
- * WARNING: Its not easy to increase this value since the allocators have
- * to do various tricks to work around compiler limitations in order to
- * ensure proper constant folding.
+ * SLAB and SLUB directly allocates requests fitting in to an order-1 page
+ * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
- (MAX_ORDER + PAGE_SHIFT - 1) : 25)
-#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
+#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5
#endif
#endif
#ifdef CONFIG_SLUB
-/*
- * SLUB directly allocates requests fitting in to an order-1 page
- * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
- */
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
@@ -280,7 +300,7 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
-/* Maximum order allocatable via the slab allocagtor */
+/* Maximum order allocatable via the slab allocator */
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
/*
@@ -304,9 +324,21 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
/*
* Whenever changing this, take care of that kmalloc_type() and
* create_kmalloc_caches() still work as intended.
+ *
+ * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
+ * is for accounted but unreclaimable and non-dma objects. All the other
+ * kmem caches can have both accounted and unaccounted objects.
*/
enum kmalloc_cache_type {
KMALLOC_NORMAL = 0,
+#ifndef CONFIG_ZONE_DMA
+ KMALLOC_DMA = KMALLOC_NORMAL,
+#endif
+#ifndef CONFIG_MEMCG_KMEM
+ KMALLOC_CGROUP = KMALLOC_NORMAL,
+#else
+ KMALLOC_CGROUP,
+#endif
KMALLOC_RECLAIM,
#ifdef CONFIG_ZONE_DMA
KMALLOC_DMA,
@@ -318,24 +350,36 @@ enum kmalloc_cache_type {
extern struct kmem_cache *
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
+/*
+ * Define gfp bits that should not be set for KMALLOC_NORMAL.
+ */
+#define KMALLOC_NOT_NORMAL_BITS \
+ (__GFP_RECLAIMABLE | \
+ (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
+ (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
+
static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
{
-#ifdef CONFIG_ZONE_DMA
/*
* The most common case is KMALLOC_NORMAL, so test for it
- * with a single branch for both flags.
+ * with a single branch for all the relevant flags.
*/
- if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
+ if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
return KMALLOC_NORMAL;
/*
- * At least one of the flags has to be set. If both are, __GFP_DMA
- * is more important.
+ * At least one of the flags has to be set. Their priorities in
+ * decreasing order are:
+ * 1) __GFP_DMA
+ * 2) __GFP_RECLAIMABLE
+ * 3) __GFP_ACCOUNT
*/
- return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
-#else
- return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
-#endif
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
+ return KMALLOC_DMA;
+ if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
+ return KMALLOC_RECLAIM;
+ else
+ return KMALLOC_CGROUP;
}
/*
@@ -345,8 +389,14 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
* 1 = 65 .. 96 bytes
* 2 = 129 .. 192 bytes
* n = 2^(n-1)+1 .. 2^n
+ *
+ * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
+ * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
+ * Callers where !size_is_constant should only be test modules, where runtime
+ * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
*/
-static __always_inline unsigned int kmalloc_index(size_t size)
+static __always_inline unsigned int __kmalloc_index(size_t size,
+ bool size_is_constant)
{
if (!size)
return 0;
@@ -377,21 +427,24 @@ static __always_inline unsigned int kmalloc_index(size_t size)
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
- if (size <= 4 * 1024 * 1024) return 22;
- if (size <= 8 * 1024 * 1024) return 23;
- if (size <= 16 * 1024 * 1024) return 24;
- if (size <= 32 * 1024 * 1024) return 25;
- if (size <= 64 * 1024 * 1024) return 26;
- BUG();
+
+ if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
+ BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
+ else
+ BUG();
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
+static_assert(PAGE_SHIFT <= 20);
+#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
-void kmem_cache_free(struct kmem_cache *, void *);
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
+void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
+void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+ gfp_t gfpflags) __assume_slab_alignment __malloc;
+void kmem_cache_free(struct kmem_cache *s, void *objp);
/*
* Bulk allocation and freeing operations. These are accelerated in an
@@ -400,8 +453,8 @@ void kmem_cache_free(struct kmem_cache *, void *);
*
* Note that interrupts must be enabled when calling these functions.
*/
-void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
/*
* Caller must not use kfree_bulk() on memory not originally allocated
@@ -412,77 +465,22 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}
-#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
-#else
-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc(size, flags);
-}
-
-static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
-{
- return kmem_cache_alloc(s, flags);
-}
-#endif
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
-
-#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment __malloc;
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- return kmem_cache_alloc_trace(s, gfpflags, size);
-}
-#endif /* CONFIG_NUMA */
-
-#else /* CONFIG_TRACING */
-static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
-{
- void *ret = kmem_cache_alloc(s, flags);
-
- ret = kasan_kmalloc(s, ret, size, flags);
- return ret;
-}
-
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = kmem_cache_alloc_node(s, gfpflags, node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
+ __alloc_size(1);
+void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
+ __malloc;
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-#endif /* CONFIG_TRACING */
+void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_kmalloc_alignment __alloc_size(3);
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size) __assume_kmalloc_alignment
+ __alloc_size(4);
+void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
+ __alloc_size(1);
-#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
-#else
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
+void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
+ __alloc_size(1);
/**
* kmalloc - allocate memory
@@ -501,7 +499,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
*
* The recommended usage of the @flags is described at
- * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>`
+ * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
*
* Below is a brief outline of the most useful GFP flags
*
@@ -538,7 +536,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Try really hard to succeed the allocation but fail
* eventually.
*/
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
#ifndef CONFIG_SLOB
@@ -552,7 +550,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
if (!index)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_trace(
+ return kmalloc_trace(
kmalloc_caches[kmalloc_type(flags)][index],
flags, size);
#endif
@@ -560,25 +558,35 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
#ifndef CONFIG_SLOB
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE) {
- unsigned int i = kmalloc_index(size);
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size)) {
+ unsigned int index;
+
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
- if (!i)
+ index = kmalloc_index(size);
+
+ if (!index)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][i],
- flags, node, size);
+ return kmalloc_node_trace(
+ kmalloc_caches[kmalloc_type(flags)][index],
+ flags, node, size);
}
-#endif
return __kmalloc_node(size, flags, node);
}
+#else
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
-int memcg_update_all_caches(int num_memcgs);
+ return __kmalloc_node(size, flags, node);
+}
+#endif
/**
* kmalloc_array - allocate memory for an array.
@@ -586,7 +594,7 @@ int memcg_update_all_caches(int num_memcgs);
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
@@ -598,16 +606,42 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
}
/**
+ * krealloc_array - reallocate memory for an array.
+ * @p: pointer to the memory chunk to reallocate
+ * @new_n: new number of elements to alloc
+ * @new_size: new size of a single member of the array
+ * @flags: the type of memory to allocate (see kmalloc)
+ */
+static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
+ size_t new_n,
+ size_t new_size,
+ gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return krealloc(p, bytes, flags);
+}
+
+/**
* kcalloc - allocate memory for an array. The memory is set to zero.
* @n: number of elements.
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
{
return kmalloc_array(n, size, flags | __GFP_ZERO);
}
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+ unsigned long caller) __alloc_size(1);
+#define kmalloc_node_track_caller(size, flags, node) \
+ __kmalloc_node_track_caller(size, flags, node, \
+ _RET_IP_)
+
/*
* kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead
@@ -616,12 +650,12 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
- __kmalloc_track_caller(size, flags, _RET_IP_)
+ __kmalloc_node_track_caller(size, flags, \
+ NUMA_NO_NODE, _RET_IP_)
-static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
- int node)
+static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+ int node)
{
size_t bytes;
@@ -632,25 +666,11 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
return __kmalloc_node(bytes, flags, node);
}
-static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
}
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
-#define kmalloc_node_track_caller(size, flags, node) \
- __kmalloc_node_track_caller(size, flags, node, \
- _RET_IP_)
-
-#else /* CONFIG_NUMA */
-
-#define kmalloc_node_track_caller(size, flags, node) \
- kmalloc_track_caller(size, flags)
-
-#endif /* CONFIG_NUMA */
-
/*
* Shortcuts
*/
@@ -664,7 +684,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kzalloc(size_t size, gfp_t flags)
+static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags | __GFP_ZERO);
}
@@ -675,12 +695,63 @@ static inline void *kzalloc(size_t size, gfp_t flags)
* @flags: the type of memory to allocate (see kmalloc).
* @node: memory node from which to allocate
*/
-static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
+static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
+static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc_node(size, flags, NUMA_NO_NODE);
+}
+static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kvmalloc_node(size, flags | __GFP_ZERO, node);
+}
+static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ return kvmalloc(bytes, flags);
+}
+
+static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
+{
+ return kvmalloc_array(n, size, flags | __GFP_ZERO);
+}
+
+extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+ __realloc_size(3);
+extern void kvfree(const void *addr);
+extern void kvfree_sensitive(const void *addr, size_t len);
+
unsigned int kmem_cache_size(struct kmem_cache *s);
+
+/**
+ * kmalloc_size_roundup - Report allocation bucket size for the given size
+ *
+ * @size: Number of bytes to round up from.
+ *
+ * This returns the number of bytes that would be available in a kmalloc()
+ * allocation of @size bytes. For example, a 126 byte request would be
+ * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
+ * for the general-purpose kmalloc()-based allocations, and is not for the
+ * pre-sized kmem_cache_alloc()-based allocations.)
+ *
+ * Use this to kmalloc() the full bucket size ahead of time instead of using
+ * ksize() to query the size after an allocation.
+ */
+size_t kmalloc_size_roundup(size_t size);
+
void __init kmem_cache_init_late(void);
#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index abc7de77b988..f0ffad6a3365 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_SLAB_DEF_H
#define _LINUX_SLAB_DEF_H
+#include <linux/kfence.h>
#include <linux/reciprocal_div.h>
/*
@@ -32,7 +33,6 @@ struct kmem_cache {
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
- struct kmem_cache *freelist_cache;
unsigned int freelist_size;
/* constructor func */
@@ -72,9 +72,6 @@ struct kmem_cache {
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
-#ifdef CONFIG_MEMCG
- struct memcg_cache_params memcg_params;
-#endif
#ifdef CONFIG_KASAN
struct kasan_cache kasan_info;
#endif
@@ -89,11 +86,11 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
+static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
void *x)
{
- void *object = x - (x - page->s_mem) % cache->size;
- void *last_object = page->s_mem + (cache->num - 1) * cache->size;
+ void *object = x - (x - slab->s_mem) % cache->size;
+ void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
if (unlikely(object > last_object))
return last_object;
@@ -108,10 +105,18 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct page *page, void *obj)
+ const struct slab *slab, void *obj)
{
- u32 offset = (obj - page->s_mem);
+ u32 offset = (obj - slab->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
+static inline int objs_per_slab(const struct kmem_cache *cache,
+ const struct slab *slab)
+{
+ if (is_kfence_address(slab_address(slab)))
+ return 1;
+ return cache->num;
+}
+
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d2153789bd9f..f9c68a9dac04 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -7,7 +7,10 @@
*
* (C) 2007 SGI, Christoph Lameter
*/
+#include <linux/kfence.h>
#include <linux/kobject.h>
+#include <linux/reciprocal_div.h>
+#include <linux/local_lock.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -38,13 +41,18 @@ enum stat_item {
CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
NR_SLUB_STAT_ITEMS };
+/*
+ * When changing the layout, make sure freelist and tid are still compatible
+ * with this_cpu_cmpxchg_double() alignment requirements.
+ */
struct kmem_cache_cpu {
void **freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
- struct page *page; /* The slab from which we are allocating */
+ struct slab *slab; /* The slab from which we are allocating */
#ifdef CONFIG_SLUB_CPU_PARTIAL
- struct page *partial; /* Partially allocated frozen slabs */
+ struct slab *partial; /* Partially allocated frozen slabs */
#endif
+ local_lock_t lock; /* Protects the fields above */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
@@ -86,15 +94,17 @@ struct kmem_cache {
unsigned long min_partial;
unsigned int size; /* The size of an object including metadata */
unsigned int object_size;/* The size of an object without metadata */
+ struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
+ /* Number of per cpu partial slabs to keep around */
+ unsigned int cpu_partial_slabs;
#endif
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */
- struct kmem_cache_order_objects max;
struct kmem_cache_order_objects min;
gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
@@ -106,17 +116,7 @@ struct kmem_cache {
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
- struct work_struct kobj_remove_work;
-#endif
-#ifdef CONFIG_MEMCG
- struct memcg_cache_params memcg_params;
- /* For propagation, maximum size of a stored attr */
- unsigned int max_attr_size;
-#ifdef CONFIG_SYSFS
- struct kset *memcg_kset;
-#endif
#endif
-
#ifdef CONFIG_SLAB_FREELIST_HARDENED
unsigned long random;
#endif
@@ -142,17 +142,6 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_cpu_partial(s) ((s)->cpu_partial)
-#define slub_set_cpu_partial(s, n) \
-({ \
- slub_cpu_partial(s) = (n); \
-})
-#else
-#define slub_cpu_partial(s) (0)
-#define slub_set_cpu_partial(s, n)
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
-
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
void sysfs_slab_unlink(struct kmem_cache *);
@@ -166,20 +155,38 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
}
#endif
-void object_err(struct kmem_cache *s, struct page *page,
- u8 *object, char *reason);
-
void *fixup_red_left(struct kmem_cache *s, void *p);
-static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
+static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
void *x) {
- void *object = x - (x - page_address(page)) % cache->size;
- void *last_object = page_address(page) +
- (page->objects - 1) * cache->size;
+ void *object = x - (x - slab_address(slab)) % cache->size;
+ void *last_object = slab_address(slab) +
+ (slab->objects - 1) * cache->size;
void *result = (unlikely(object > last_object)) ? last_object : object;
result = fixup_red_left(cache, result);
return result;
}
+/* Determine object index from a given position */
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
+ void *addr, void *obj)
+{
+ return reciprocal_divide(kasan_reset_tag(obj) - addr,
+ cache->reciprocal_size);
+}
+
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct slab *slab, void *obj)
+{
+ if (is_kfence_address(obj))
+ return 0;
+ return __obj_to_index(cache, slab_address(slab), obj);
+}
+
+static inline int objs_per_slab(const struct kmem_cache *cache,
+ const struct slab *slab)
+{
+ return slab->objects;
+}
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cbc9162689d0..a80ab58ae3f1 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -12,56 +12,102 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
-#include <linux/llist.h>
+#include <linux/smp_types.h>
typedef void (*smp_call_func_t)(void *info);
typedef bool (*smp_cond_func_t)(int cpu, void *info);
+
+/*
+ * structure shares (partial) layout with struct irq_work
+ */
struct __call_single_data {
- struct llist_node llist;
+ struct __call_single_node node;
smp_call_func_t func;
void *info;
- unsigned int flags;
};
+#define CSD_INIT(_func, _info) \
+ (struct __call_single_data){ .func = (_func), .info = (_info), }
+
/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
typedef struct __call_single_data call_single_data_t
__aligned(sizeof(struct __call_single_data));
+#define INIT_CSD(_csd, _func, _info) \
+do { \
+ *(_csd) = CSD_INIT((_func), (_info)); \
+} while (0)
+
+/*
+ * Enqueue a llist_node on the call_single_queue; be very careful, read
+ * flush_smp_call_function_queue() in detail.
+ */
+extern void __smp_call_single_queue(int cpu, struct llist_node *node);
+
/* total number of cpus in this system (may exceed NR_CPUS) */
extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+ void *info, bool wait, const struct cpumask *mask);
+
+int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+
/*
- * Call a function on all processors
+ * Cpus stopping functions in panic. All have default weak definitions.
+ * Architecture-dependent code may override them.
*/
-void on_each_cpu(smp_call_func_t func, void *info, int wait);
+void panic_smp_self_stop(void);
+void nmi_panic_self_stop(struct pt_regs *regs);
+void crash_smp_send_stop(void);
/*
- * Call a function on processors specified by mask, which might include
- * the local one.
+ * Call a function on all processors
*/
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
- void *info, bool wait);
+static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+ on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
+}
+
+/**
+ * on_each_cpu_mask(): Run a function on processors specified by
+ * cpumask, which may include the local processor.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ * on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
+ */
+static inline void on_each_cpu_mask(const struct cpumask *mask,
+ smp_call_func_t func, void *info, bool wait)
+{
+ on_each_cpu_cond_mask(NULL, func, info, wait, mask);
+}
/*
* Call a function on each processor for which the supplied function
* cond_func returns a positive value. This may include the local
- * processor.
+ * processor. May be used during early boot while early_boot_irqs_disabled is
+ * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
*/
-void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
- void *info, bool wait);
-
-void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
- void *info, bool wait, const struct cpumask *mask);
-
-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
+ smp_call_func_t func, void *info, bool wait)
+{
+ on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
+}
#ifdef CONFIG_SMP
#include <linux/preempt.h>
-#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <asm/smp.h>
@@ -227,8 +273,8 @@ static inline int get_boot_cpu_id(void)
*/
extern void arch_disable_smp_support(void);
-extern void arch_enable_nonboot_cpus_begin(void);
-extern void arch_enable_nonboot_cpus_end(void);
+extern void arch_thaw_secondary_cpus_begin(void);
+extern void arch_thaw_secondary_cpus_end(void);
void smp_setup_processor_id(void);
diff --git a/include/linux/smp_types.h b/include/linux/smp_types.h
new file mode 100644
index 000000000000..2e8461af8df6
--- /dev/null
+++ b/include/linux/smp_types.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SMP_TYPES_H
+#define __LINUX_SMP_TYPES_H
+
+#include <linux/llist.h>
+
+enum {
+ CSD_FLAG_LOCK = 0x01,
+
+ IRQ_WORK_PENDING = 0x01,
+ IRQ_WORK_BUSY = 0x02,
+ IRQ_WORK_LAZY = 0x04, /* No IPI, wait for tick */
+ IRQ_WORK_HARD_IRQ = 0x08, /* IRQ context on PREEMPT_RT */
+
+ IRQ_WORK_CLAIMED = (IRQ_WORK_PENDING | IRQ_WORK_BUSY),
+
+ CSD_TYPE_ASYNC = 0x00,
+ CSD_TYPE_SYNC = 0x10,
+ CSD_TYPE_IRQ_WORK = 0x20,
+ CSD_TYPE_TTWU = 0x30,
+
+ CSD_FLAG_TYPE_MASK = 0xF0,
+};
+
+/*
+ * struct __call_single_node is the primary type on
+ * smp.c:call_single_queue.
+ *
+ * flush_smp_call_function_queue() only reads the type from
+ * __call_single_node::u_flags as a regular load, the above
+ * (anonymous) enum defines all the bits of this word.
+ *
+ * Other bits are not modified until the type is known.
+ *
+ * CSD_TYPE_SYNC/ASYNC:
+ * struct {
+ * struct llist_node node;
+ * unsigned int flags;
+ * smp_call_func_t func;
+ * void *info;
+ * };
+ *
+ * CSD_TYPE_IRQ_WORK:
+ * struct {
+ * struct llist_node node;
+ * atomic_t flags;
+ * void (*func)(struct irq_work *);
+ * };
+ *
+ * CSD_TYPE_TTWU:
+ * struct {
+ * struct llist_node node;
+ * unsigned int flags;
+ * };
+ *
+ */
+
+struct __call_single_node {
+ struct llist_node llist;
+ union {
+ unsigned int u_flags;
+ atomic_t a_flags;
+ };
+#ifdef CONFIG_64BIT
+ u16 src, dst;
+#endif
+};
+
+#endif /* __LINUX_SMP_TYPES_H */
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
new file mode 100644
index 000000000000..c9cabb679cd1
--- /dev/null
+++ b/include/linux/soc/apple/rtkit.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple RTKit IPC Library
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple's SoCs come with various co-processors running their RTKit operating
+ * system. This protocol library is used by client drivers to use the
+ * features provided by them.
+ */
+#ifndef _LINUX_APPLE_RTKIT_H_
+#define _LINUX_APPLE_RTKIT_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_client.h>
+
+/*
+ * Struct to represent implementation-specific RTKit operations.
+ *
+ * @buffer: Shared memory buffer allocated inside normal RAM.
+ * @iomem: Shared memory buffer controlled by the co-processors.
+ * @size: Size of the shared memory buffer.
+ * @iova: Device VA of shared memory buffer.
+ * @is_mapped: Shared memory buffer is managed by the co-processor.
+ */
+
+struct apple_rtkit_shmem {
+ void *buffer;
+ void __iomem *iomem;
+ size_t size;
+ dma_addr_t iova;
+ bool is_mapped;
+};
+
+/*
+ * Struct to represent implementation-specific RTKit operations.
+ *
+ * @crashed: Called when the co-processor has crashed. Runs in process
+ * context.
+ * @recv_message: Function called when a message from RTKit is received
+ * on a non-system endpoint. Called from a worker thread.
+ * @recv_message_early:
+ * Like recv_message, but called from atomic context. It
+ * should return true if it handled the message. If it
+ * returns false, the message will be passed on to the
+ * worker thread.
+ * @shmem_setup: Setup shared memory buffer. If bfr.is_iomem is true the
+ * buffer is managed by the co-processor and needs to be mapped.
+ * Otherwise the buffer is managed by Linux and needs to be
+ * allocated. If not specified dma_alloc_coherent is used.
+ * Called in process context.
+ * @shmem_destroy: Undo the shared memory buffer setup in shmem_setup. If not
+ * specified dma_free_coherent is used. Called in process
+ * context.
+ */
+struct apple_rtkit_ops {
+ void (*crashed)(void *cookie);
+ void (*recv_message)(void *cookie, u8 endpoint, u64 message);
+ bool (*recv_message_early)(void *cookie, u8 endpoint, u64 message);
+ int (*shmem_setup)(void *cookie, struct apple_rtkit_shmem *bfr);
+ void (*shmem_destroy)(void *cookie, struct apple_rtkit_shmem *bfr);
+};
+
+struct apple_rtkit;
+
+/*
+ * Initializes the internal state required to handle RTKit. This
+ * should usually be called within _probe.
+ *
+ * @dev: Pointer to the device node this coprocessor is assocated with
+ * @cookie: opaque cookie passed to all functions defined in rtkit_ops
+ * @mbox_name: mailbox name used to communicate with the co-processor
+ * @mbox_idx: mailbox index to be used if mbox_name is NULL
+ * @ops: pointer to rtkit_ops to be used for this co-processor
+ */
+struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops);
+
+/*
+ * Reinitialize internal structures. Must only be called with the co-processor
+ * is held in reset.
+ */
+int apple_rtkit_reinit(struct apple_rtkit *rtk);
+
+/*
+ * Handle RTKit's boot process. Should be called after the CPU of the
+ * co-processor has been started.
+ */
+int apple_rtkit_boot(struct apple_rtkit *rtk);
+
+/*
+ * Quiesce the co-processor.
+ */
+int apple_rtkit_quiesce(struct apple_rtkit *rtk);
+
+/*
+ * Wake the co-processor up from hibernation mode.
+ */
+int apple_rtkit_wake(struct apple_rtkit *rtk);
+
+/*
+ * Shutdown the co-processor
+ */
+int apple_rtkit_shutdown(struct apple_rtkit *rtk);
+
+/*
+ * Checks if RTKit is running and ready to handle messages.
+ */
+bool apple_rtkit_is_running(struct apple_rtkit *rtk);
+
+/*
+ * Checks if RTKit has crashed.
+ */
+bool apple_rtkit_is_crashed(struct apple_rtkit *rtk);
+
+/*
+ * Starts an endpoint. Must be called after boot but before any messages can be
+ * sent or received from that endpoint.
+ */
+int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint);
+
+/*
+ * Send a message to the given endpoint.
+ *
+ * @rtk: RTKit reference
+ * @ep: target endpoint
+ * @message: message to be sent
+ * @completeion: will be completed once the message has been submitted
+ * to the hardware FIFO. Can be NULL.
+ * @atomic: if set to true this function can be called from atomic
+ * context.
+ */
+int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
+ struct completion *completion, bool atomic);
+
+/*
+ * Send a message to the given endpoint and wait until it has been submitted
+ * to the hardware FIFO.
+ * Will return zero on success and a negative error code on failure
+ * (e.g. -ETIME when the message couldn't be written within the given
+ * timeout)
+ *
+ * @rtk: RTKit reference
+ * @ep: target endpoint
+ * @message: message to be sent
+ * @timeout: timeout in milliseconds to allow the message transmission
+ * to be completed
+ * @atomic: if set to true this function can be called from atomic
+ * context.
+ */
+int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
+ unsigned long timeout, bool atomic);
+
+/*
+ * Process incoming messages in atomic context.
+ * This only guarantees that messages arrive as far as the recv_message_early
+ * callback; drivers expecting to handle incoming messages synchronously
+ * by calling this function must do it that way.
+ * Will return 1 if some data was processed, 0 if none was, or a
+ * negative error code on failure.
+ *
+ * @rtk: RTKit reference
+ */
+int apple_rtkit_poll(struct apple_rtkit *rtk);
+
+#endif /* _LINUX_APPLE_RTKIT_H_ */
diff --git a/include/linux/soc/apple/sart.h b/include/linux/soc/apple/sart.h
new file mode 100644
index 000000000000..2249bf6cde09
--- /dev/null
+++ b/include/linux/soc/apple/sart.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple SART device driver
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple SART is a simple address filter for DMA transactions.
+ * Regions of physical memory must be added to the SART's allow
+ * list before any DMA can target these. Unlike a proper
+ * IOMMU no remapping can be done.
+ */
+
+#ifndef _LINUX_SOC_APPLE_SART_H_
+#define _LINUX_SOC_APPLE_SART_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct apple_sart;
+
+/*
+ * Get a reference to the SART attached to dev.
+ *
+ * Looks for the phandle reference in apple,sart and returns a pointer
+ * to the corresponding apple_sart struct to be used with
+ * apple_sart_add_allowed_region and apple_sart_remove_allowed_region.
+ */
+struct apple_sart *devm_apple_sart_get(struct device *dev);
+
+/*
+ * Adds the region [paddr, paddr+size] to the DMA allow list.
+ *
+ * @sart: SART reference
+ * @paddr: Start address of the region to be used for DMA
+ * @size: Size of the region to be used for DMA.
+ */
+int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size);
+
+/*
+ * Removes the region [paddr, paddr+size] from the DMA allow list.
+ *
+ * Note that exact same paddr and size used for apple_sart_add_allowed_region
+ * have to be passed.
+ *
+ * @sart: SART reference
+ * @paddr: Start address of the region no longer used for DMA
+ * @size: Size of the region no longer used for DMA.
+ */
+int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size);
+
+#endif /* _LINUX_SOC_APPLE_SART_H_ */
diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h
index 8e884e0dda0a..f2b768852777 100644
--- a/include/linux/soc/brcmstb/brcmstb.h
+++ b/include/linux/soc/brcmstb/brcmstb.h
@@ -2,6 +2,8 @@
#ifndef __BRCMSTB_SOC_H
#define __BRCMSTB_SOC_H
+#include <linux/kconfig.h>
+
static inline u32 BRCM_ID(u32 reg)
{
return reg >> 28 ? reg >> 16 : reg >> 8;
@@ -12,6 +14,8 @@ static inline u32 BRCM_REV(u32 reg)
return reg & 0xff;
}
+#if IS_ENABLED(CONFIG_SOC_BRCMSTB)
+
/*
* Helper functions for getting family or product id from the
* SoC driver.
@@ -19,4 +23,16 @@ static inline u32 BRCM_REV(u32 reg)
u32 brcmstb_get_family_id(void);
u32 brcmstb_get_product_id(void);
+#else
+static inline u32 brcmstb_get_family_id(void)
+{
+ return 0;
+}
+
+static inline u32 brcmstb_get_product_id(void)
+{
+ return 0;
+}
+#endif
+
#endif /* __BRCMSTB_SOC_H */
diff --git a/include/linux/soc/ixp4xx/cpu.h b/include/linux/soc/ixp4xx/cpu.h
new file mode 100644
index 000000000000..f526ac33afea
--- /dev/null
+++ b/include/linux/soc/ixp4xx/cpu.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IXP4XX cpu type detection
+ *
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ */
+
+#ifndef __SOC_IXP4XX_CPU_H__
+#define __SOC_IXP4XX_CPU_H__
+
+#include <linux/io.h>
+#include <linux/regmap.h>
+#ifdef CONFIG_ARM
+#include <asm/cputype.h>
+#endif
+
+/* Processor id value in CP15 Register 0 */
+#define IXP42X_PROCESSOR_ID_VALUE 0x690541c0 /* including unused 0x690541Ex */
+#define IXP42X_PROCESSOR_ID_MASK 0xffffffc0
+
+#define IXP43X_PROCESSOR_ID_VALUE 0x69054040
+#define IXP43X_PROCESSOR_ID_MASK 0xfffffff0
+
+#define IXP46X_PROCESSOR_ID_VALUE 0x69054200 /* including IXP455 */
+#define IXP46X_PROCESSOR_ID_MASK 0xfffffff0
+
+/* Feature register in the expansion bus controller */
+#define IXP4XX_EXP_CNFG2 0x2c
+
+/* "fuse" bits of IXP_EXP_CFG2 */
+/* All IXP4xx CPUs */
+#define IXP4XX_FEATURE_RCOMP (1 << 0)
+#define IXP4XX_FEATURE_USB_DEVICE (1 << 1)
+#define IXP4XX_FEATURE_HASH (1 << 2)
+#define IXP4XX_FEATURE_AES (1 << 3)
+#define IXP4XX_FEATURE_DES (1 << 4)
+#define IXP4XX_FEATURE_HDLC (1 << 5)
+#define IXP4XX_FEATURE_AAL (1 << 6)
+#define IXP4XX_FEATURE_HSS (1 << 7)
+#define IXP4XX_FEATURE_UTOPIA (1 << 8)
+#define IXP4XX_FEATURE_NPEB_ETH0 (1 << 9)
+#define IXP4XX_FEATURE_NPEC_ETH (1 << 10)
+#define IXP4XX_FEATURE_RESET_NPEA (1 << 11)
+#define IXP4XX_FEATURE_RESET_NPEB (1 << 12)
+#define IXP4XX_FEATURE_RESET_NPEC (1 << 13)
+#define IXP4XX_FEATURE_PCI (1 << 14)
+#define IXP4XX_FEATURE_UTOPIA_PHY_LIMIT (3 << 16)
+#define IXP4XX_FEATURE_XSCALE_MAX_FREQ (3 << 22)
+#define IXP42X_FEATURE_MASK (IXP4XX_FEATURE_RCOMP | \
+ IXP4XX_FEATURE_USB_DEVICE | \
+ IXP4XX_FEATURE_HASH | \
+ IXP4XX_FEATURE_AES | \
+ IXP4XX_FEATURE_DES | \
+ IXP4XX_FEATURE_HDLC | \
+ IXP4XX_FEATURE_AAL | \
+ IXP4XX_FEATURE_HSS | \
+ IXP4XX_FEATURE_UTOPIA | \
+ IXP4XX_FEATURE_NPEB_ETH0 | \
+ IXP4XX_FEATURE_NPEC_ETH | \
+ IXP4XX_FEATURE_RESET_NPEA | \
+ IXP4XX_FEATURE_RESET_NPEB | \
+ IXP4XX_FEATURE_RESET_NPEC | \
+ IXP4XX_FEATURE_PCI | \
+ IXP4XX_FEATURE_UTOPIA_PHY_LIMIT | \
+ IXP4XX_FEATURE_XSCALE_MAX_FREQ)
+
+
+/* IXP43x/46x CPUs */
+#define IXP4XX_FEATURE_ECC_TIMESYNC (1 << 15)
+#define IXP4XX_FEATURE_USB_HOST (1 << 18)
+#define IXP4XX_FEATURE_NPEA_ETH (1 << 19)
+#define IXP43X_FEATURE_MASK (IXP42X_FEATURE_MASK | \
+ IXP4XX_FEATURE_ECC_TIMESYNC | \
+ IXP4XX_FEATURE_USB_HOST | \
+ IXP4XX_FEATURE_NPEA_ETH)
+
+/* IXP46x CPU (including IXP455) only */
+#define IXP4XX_FEATURE_NPEB_ETH_1_TO_3 (1 << 20)
+#define IXP4XX_FEATURE_RSA (1 << 21)
+#define IXP46X_FEATURE_MASK (IXP43X_FEATURE_MASK | \
+ IXP4XX_FEATURE_NPEB_ETH_1_TO_3 | \
+ IXP4XX_FEATURE_RSA)
+
+#ifdef CONFIG_ARCH_IXP4XX
+#define cpu_is_ixp42x_rev_a0() ((read_cpuid_id() & (IXP42X_PROCESSOR_ID_MASK | 0xF)) == \
+ IXP42X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp42x() ((read_cpuid_id() & IXP42X_PROCESSOR_ID_MASK) == \
+ IXP42X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp43x() ((read_cpuid_id() & IXP43X_PROCESSOR_ID_MASK) == \
+ IXP43X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp46x() ((read_cpuid_id() & IXP46X_PROCESSOR_ID_MASK) == \
+ IXP46X_PROCESSOR_ID_VALUE)
+static inline u32 cpu_ixp4xx_features(struct regmap *rmap)
+{
+ u32 val;
+
+ regmap_read(rmap, IXP4XX_EXP_CNFG2, &val);
+ /* For some reason this register is inverted */
+ val = ~val;
+ if (cpu_is_ixp42x_rev_a0())
+ return IXP42X_FEATURE_MASK & ~(IXP4XX_FEATURE_RCOMP |
+ IXP4XX_FEATURE_AES);
+ if (cpu_is_ixp42x())
+ return val & IXP42X_FEATURE_MASK;
+ if (cpu_is_ixp43x())
+ return val & IXP43X_FEATURE_MASK;
+ return val & IXP46X_FEATURE_MASK;
+}
+#else
+#define cpu_is_ixp42x_rev_a0() 0
+#define cpu_is_ixp42x() 0
+#define cpu_is_ixp43x() 0
+#define cpu_is_ixp46x() 0
+static inline u32 cpu_ixp4xx_features(struct regmap *rmap)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_ARCH_CPU_H */
diff --git a/include/linux/soc/ixp4xx/npe.h b/include/linux/soc/ixp4xx/npe.h
index 2a91f465d456..9efeac777da1 100644
--- a/include/linux/soc/ixp4xx/npe.h
+++ b/include/linux/soc/ixp4xx/npe.h
@@ -3,6 +3,7 @@
#define __IXP4XX_NPE_H
#include <linux/kernel.h>
+#include <linux/regmap.h>
extern const char *npe_names[];
@@ -17,6 +18,7 @@ struct npe_regs {
struct npe {
struct npe_regs __iomem *regs;
+ struct regmap *rmap;
int id;
int valid;
};
diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h
new file mode 100644
index 000000000000..d683251a0b40
--- /dev/null
+++ b/include/linux/soc/marvell/octeontx2/asm.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __SOC_OTX2_ASM_H
+#define __SOC_OTX2_ASM_H
+
+#include <linux/types.h>
+#if defined(CONFIG_ARM64)
+/*
+ * otx2_lmt_flush is used for LMT store operation.
+ * On octeontx2 platform CPT instruction enqueue and
+ * NIX packet send are only possible via LMTST
+ * operations and it uses LDEOR instruction targeting
+ * the coprocessor address.
+ */
+#define otx2_lmt_flush(ioaddr) \
+({ \
+ u64 result = 0; \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "ldeor xzr, %x[rf], [%[rs]]" \
+ : [rf]"=r" (result) \
+ : [rs]"r" (ioaddr)); \
+ (result); \
+})
+/*
+ * STEORL store to memory with release semantics.
+ * This will avoid using DMB barrier after each LMTST
+ * operation.
+ */
+#define cn10k_lmt_flush(val, addr) \
+({ \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "steorl %x[rf],[%[rs]]" \
+ : [rf] "+r"(val) \
+ : [rs] "r"(addr)); \
+})
+
+static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ asm volatile (".cpu generic+lse\n"
+ "ldadda %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+#else
+#define otx2_lmt_flush(ioaddr) ({ 0; })
+#define cn10k_lmt_flush(val, addr) ({ addr = val; })
+#define otx2_atomic64_fetch_add(incr, ptr) ({ incr; })
+#endif
+
+#endif /* __SOC_OTX2_ASM_H */
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index fd25f0148566..50804ac748bd 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -2,6 +2,238 @@
#ifndef __SOC_MEDIATEK_INFRACFG_H
#define __SOC_MEDIATEK_INFRACFG_H
+#define MT8195_TOP_AXI_PROT_EN_STA1 0x228
+#define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258
+#define MT8195_TOP_AXI_PROT_EN_SET 0x2a0
+#define MT8195_TOP_AXI_PROT_EN_CLR 0x2a4
+#define MT8195_TOP_AXI_PROT_EN_1_SET 0x2a8
+#define MT8195_TOP_AXI_PROT_EN_1_CLR 0x2ac
+#define MT8195_TOP_AXI_PROT_EN_MM_SET 0x2d4
+#define MT8195_TOP_AXI_PROT_EN_MM_CLR 0x2d8
+#define MT8195_TOP_AXI_PROT_EN_MM_STA1 0x2ec
+#define MT8195_TOP_AXI_PROT_EN_2_SET 0x714
+#define MT8195_TOP_AXI_PROT_EN_2_CLR 0x718
+#define MT8195_TOP_AXI_PROT_EN_2_STA1 0x724
+#define MT8195_TOP_AXI_PROT_EN_VDNR_SET 0xb84
+#define MT8195_TOP_AXI_PROT_EN_VDNR_CLR 0xb88
+#define MT8195_TOP_AXI_PROT_EN_VDNR_STA1 0xb90
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_SET 0xba4
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR 0xba8
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1 0xbb0
+#define MT8195_TOP_AXI_PROT_EN_VDNR_2_SET 0xbb8
+#define MT8195_TOP_AXI_PROT_EN_VDNR_2_CLR 0xbbc
+#define MT8195_TOP_AXI_PROT_EN_VDNR_2_STA1 0xbc4
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xbcc
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xbd0
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1 0xbd8
+#define MT8195_TOP_AXI_PROT_EN_MM_2_SET 0xdcc
+#define MT8195_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0
+#define MT8195_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8
+
+#define MT8195_TOP_AXI_PROT_EN_VDOSYS0 BIT(6)
+#define MT8195_TOP_AXI_PROT_EN_VPPSYS0 BIT(10)
+#define MT8195_TOP_AXI_PROT_EN_MFG1 BIT(11)
+#define MT8195_TOP_AXI_PROT_EN_MFG1_2ND GENMASK(22, 21)
+#define MT8195_TOP_AXI_PROT_EN_VPPSYS0_2ND BIT(23)
+#define MT8195_TOP_AXI_PROT_EN_1_MFG1 GENMASK(20, 19)
+#define MT8195_TOP_AXI_PROT_EN_1_CAM BIT(22)
+#define MT8195_TOP_AXI_PROT_EN_2_CAM BIT(0)
+#define MT8195_TOP_AXI_PROT_EN_2_MFG1_2ND GENMASK(6, 5)
+#define MT8195_TOP_AXI_PROT_EN_2_MFG1 BIT(7)
+#define MT8195_TOP_AXI_PROT_EN_2_AUDIO (BIT(9) | BIT(11))
+#define MT8195_TOP_AXI_PROT_EN_2_ADSP (BIT(12) | GENMASK(16, 14))
+#define MT8195_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2) | BIT(4))
+#define MT8195_TOP_AXI_PROT_EN_MM_IPE BIT(1)
+#define MT8195_TOP_AXI_PROT_EN_MM_IMG BIT(3)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS0 GENMASK(21, 17)
+#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1 GENMASK(8, 5)
+#define MT8195_TOP_AXI_PROT_EN_MM_VENC (BIT(9) | BIT(11))
+#define MT8195_TOP_AXI_PROT_EN_MM_VENC_CORE1 (BIT(10) | BIT(12))
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0 BIT(13)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1 BIT(14)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1_2ND BIT(22)
+#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1_2ND BIT(23)
+#define MT8195_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(24)
+#define MT8195_TOP_AXI_PROT_EN_MM_IMG_2ND BIT(25)
+#define MT8195_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(26)
+#define MT8195_TOP_AXI_PROT_EN_MM_WPESYS BIT(27)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0_2ND BIT(28)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1_2ND BIT(29)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1 GENMASK(31, 30)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0_2ND (GENMASK(1, 0) | BIT(4) | BIT(11))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC BIT(2)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC_CORE1 (BIT(3) | BIT(15))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_CAM (BIT(5) | BIT(17))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS1 (GENMASK(7, 6) | BIT(18))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0 GENMASK(9, 8)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDOSYS1 BIT(10)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2_2ND BIT(12)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0_2ND BIT(13)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS_2ND BIT(14)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_IPE BIT(16)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2 BIT(21)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0 BIT(22)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS GENMASK(24, 23)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_EPD_TX BIT(1)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_DP_TX BIT(2)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P0 (BIT(11) | BIT(28))
+#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P1 (BIT(12) | BIT(29))
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P0 BIT(13)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P1 BIT(14)
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1 (BIT(17) | BIT(19))
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0 BIT(20)
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0 BIT(21)
+
+#define MT8192_TOP_AXI_PROT_EN_STA1 0x228
+#define MT8192_TOP_AXI_PROT_EN_1_STA1 0x258
+#define MT8192_TOP_AXI_PROT_EN_SET 0x2a0
+#define MT8192_TOP_AXI_PROT_EN_CLR 0x2a4
+#define MT8192_TOP_AXI_PROT_EN_1_SET 0x2a8
+#define MT8192_TOP_AXI_PROT_EN_1_CLR 0x2ac
+#define MT8192_TOP_AXI_PROT_EN_MM_SET 0x2d4
+#define MT8192_TOP_AXI_PROT_EN_MM_CLR 0x2d8
+#define MT8192_TOP_AXI_PROT_EN_MM_STA1 0x2ec
+#define MT8192_TOP_AXI_PROT_EN_2_SET 0x714
+#define MT8192_TOP_AXI_PROT_EN_2_CLR 0x718
+#define MT8192_TOP_AXI_PROT_EN_2_STA1 0x724
+#define MT8192_TOP_AXI_PROT_EN_VDNR_SET 0xb84
+#define MT8192_TOP_AXI_PROT_EN_VDNR_CLR 0xb88
+#define MT8192_TOP_AXI_PROT_EN_VDNR_STA1 0xb90
+#define MT8192_TOP_AXI_PROT_EN_MM_2_SET 0xdcc
+#define MT8192_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0
+#define MT8192_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8
+
+#define MT8192_TOP_AXI_PROT_EN_DISP (BIT(6) | BIT(23))
+#define MT8192_TOP_AXI_PROT_EN_CONN (BIT(13) | BIT(18))
+#define MT8192_TOP_AXI_PROT_EN_CONN_2ND BIT(14)
+#define MT8192_TOP_AXI_PROT_EN_MFG1 GENMASK(22, 21)
+#define MT8192_TOP_AXI_PROT_EN_1_CONN BIT(10)
+#define MT8192_TOP_AXI_PROT_EN_1_MFG1 BIT(21)
+#define MT8192_TOP_AXI_PROT_EN_1_CAM BIT(22)
+#define MT8192_TOP_AXI_PROT_EN_2_CAM BIT(0)
+#define MT8192_TOP_AXI_PROT_EN_2_ADSP BIT(3)
+#define MT8192_TOP_AXI_PROT_EN_2_AUDIO BIT(4)
+#define MT8192_TOP_AXI_PROT_EN_2_MFG1 GENMASK(6, 5)
+#define MT8192_TOP_AXI_PROT_EN_2_MFG1_2ND BIT(7)
+#define MT8192_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2))
+#define MT8192_TOP_AXI_PROT_EN_MM_DISP (BIT(0) | BIT(2) | \
+ BIT(10) | BIT(12) | \
+ BIT(14) | BIT(16) | \
+ BIT(24) | BIT(26))
+#define MT8192_TOP_AXI_PROT_EN_MM_CAM_2ND (BIT(1) | BIT(3))
+#define MT8192_TOP_AXI_PROT_EN_MM_DISP_2ND (BIT(1) | BIT(3) | \
+ BIT(15) | BIT(17) | \
+ BIT(25) | BIT(27))
+#define MT8192_TOP_AXI_PROT_EN_MM_ISP2 BIT(14)
+#define MT8192_TOP_AXI_PROT_EN_MM_ISP2_2ND BIT(15)
+#define MT8192_TOP_AXI_PROT_EN_MM_IPE BIT(16)
+#define MT8192_TOP_AXI_PROT_EN_MM_IPE_2ND BIT(17)
+#define MT8192_TOP_AXI_PROT_EN_MM_VDEC BIT(24)
+#define MT8192_TOP_AXI_PROT_EN_MM_VDEC_2ND BIT(25)
+#define MT8192_TOP_AXI_PROT_EN_MM_VENC BIT(26)
+#define MT8192_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(27)
+#define MT8192_TOP_AXI_PROT_EN_MM_2_ISP BIT(8)
+#define MT8192_TOP_AXI_PROT_EN_MM_2_DISP (BIT(8) | BIT(12))
+#define MT8192_TOP_AXI_PROT_EN_MM_2_ISP_2ND BIT(9)
+#define MT8192_TOP_AXI_PROT_EN_MM_2_DISP_2ND (BIT(9) | BIT(13))
+#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP BIT(12)
+#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13)
+#define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21)
+
+#define MT8186_TOP_AXI_PROT_EN_SET (0x2A0)
+#define MT8186_TOP_AXI_PROT_EN_CLR (0x2A4)
+#define MT8186_TOP_AXI_PROT_EN_STA (0x228)
+#define MT8186_TOP_AXI_PROT_EN_1_SET (0x2A8)
+#define MT8186_TOP_AXI_PROT_EN_1_CLR (0x2AC)
+#define MT8186_TOP_AXI_PROT_EN_1_STA (0x258)
+#define MT8186_TOP_AXI_PROT_EN_2_SET (0x2B0)
+#define MT8186_TOP_AXI_PROT_EN_2_CLR (0x2B4)
+#define MT8186_TOP_AXI_PROT_EN_2_STA (0x26C)
+#define MT8186_TOP_AXI_PROT_EN_3_SET (0x2B8)
+#define MT8186_TOP_AXI_PROT_EN_3_CLR (0x2BC)
+#define MT8186_TOP_AXI_PROT_EN_3_STA (0x2C8)
+
+/* MFG1 */
+#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP1 (GENMASK(28, 27))
+#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP2 (GENMASK(22, 21))
+#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP3 (BIT(25))
+#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP4 (BIT(29))
+/* DIS */
+#define MT8186_TOP_AXI_PROT_EN_1_DIS_STEP1 (GENMASK(12, 11))
+#define MT8186_TOP_AXI_PROT_EN_DIS_STEP2 (GENMASK(2, 1) | GENMASK(11, 10))
+/* IMG */
+#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP1 (BIT(23))
+#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP2 (BIT(15))
+/* IPE */
+#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP1 (BIT(24))
+#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP2 (BIT(16))
+/* CAM */
+#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP1 (GENMASK(22, 21))
+#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP2 (GENMASK(14, 13))
+/* VENC */
+#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP1 (BIT(31))
+#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP2 (BIT(19))
+/* VDEC */
+#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP1 (BIT(30))
+#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP2 (BIT(17))
+/* WPE */
+#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP1 (BIT(17))
+#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP2 (BIT(16))
+/* CONN_ON */
+#define MT8186_TOP_AXI_PROT_EN_1_CONN_ON_STEP1 (BIT(18))
+#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP2 (BIT(14))
+#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP3 (BIT(13))
+#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP4 (BIT(16))
+/* ADSP_TOP */
+#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP1 (GENMASK(12, 11))
+#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP2 (GENMASK(1, 0))
+
+#define MT8183_TOP_AXI_PROT_EN_STA1 0x228
+#define MT8183_TOP_AXI_PROT_EN_STA1_1 0x258
+#define MT8183_TOP_AXI_PROT_EN_SET 0x2a0
+#define MT8183_TOP_AXI_PROT_EN_CLR 0x2a4
+#define MT8183_TOP_AXI_PROT_EN_1_SET 0x2a8
+#define MT8183_TOP_AXI_PROT_EN_1_CLR 0x2ac
+#define MT8183_TOP_AXI_PROT_EN_MCU_SET 0x2c4
+#define MT8183_TOP_AXI_PROT_EN_MCU_CLR 0x2c8
+#define MT8183_TOP_AXI_PROT_EN_MCU_STA1 0x2e4
+#define MT8183_TOP_AXI_PROT_EN_MM_SET 0x2d4
+#define MT8183_TOP_AXI_PROT_EN_MM_CLR 0x2d8
+#define MT8183_TOP_AXI_PROT_EN_MM_STA1 0x2ec
+
+#define MT8183_TOP_AXI_PROT_EN_DISP (BIT(10) | BIT(11))
+#define MT8183_TOP_AXI_PROT_EN_CONN (BIT(13) | BIT(14))
+#define MT8183_TOP_AXI_PROT_EN_MFG (BIT(21) | BIT(22))
+#define MT8183_TOP_AXI_PROT_EN_CAM BIT(28)
+#define MT8183_TOP_AXI_PROT_EN_VPU_TOP BIT(27)
+#define MT8183_TOP_AXI_PROT_EN_1_DISP (BIT(16) | BIT(17))
+#define MT8183_TOP_AXI_PROT_EN_1_MFG GENMASK(21, 19)
+#define MT8183_TOP_AXI_PROT_EN_MM_ISP (BIT(3) | BIT(8))
+#define MT8183_TOP_AXI_PROT_EN_MM_ISP_2ND BIT(10)
+#define MT8183_TOP_AXI_PROT_EN_MM_CAM (BIT(4) | BIT(5) | \
+ BIT(9) | BIT(13))
+#define MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP (GENMASK(9, 6) | \
+ BIT(12))
+#define MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP_2ND (BIT(10) | BIT(11))
+#define MT8183_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(11)
+#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0_2ND (BIT(0) | BIT(2) | \
+ BIT(4))
+#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1_2ND (BIT(1) | BIT(3) | \
+ BIT(5))
+#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0 BIT(6)
+#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1 BIT(7)
+
+#define MT8183_SMI_COMMON_CLAMP_EN 0x3c0
+#define MT8183_SMI_COMMON_CLAMP_EN_SET 0x3c4
+#define MT8183_SMI_COMMON_CLAMP_EN_CLR 0x3c8
+
+#define MT8183_SMI_COMMON_SMI_CLAMP_DISP GENMASK(7, 0)
+#define MT8183_SMI_COMMON_SMI_CLAMP_VENC BIT(1)
+#define MT8183_SMI_COMMON_SMI_CLAMP_ISP BIT(2)
+#define MT8183_SMI_COMMON_SMI_CLAMP_CAM (BIT(3) | BIT(4))
+#define MT8183_SMI_COMMON_SMI_CLAMP_VPU_TOP (BIT(5) | BIT(6))
+#define MT8183_SMI_COMMON_SMI_CLAMP_VDEC BIT(7)
+
#define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0)
#define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1)
#define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2)
@@ -21,6 +253,14 @@
#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
+#define MT8167_TOP_AXI_PROT_EN_MM_EMI BIT(1)
+#define MT8167_TOP_AXI_PROT_EN_MCU_MFG BIT(2)
+#define MT8167_TOP_AXI_PROT_EN_CONN_EMI BIT(4)
+#define MT8167_TOP_AXI_PROT_EN_MFG_EMI BIT(5)
+#define MT8167_TOP_AXI_PROT_EN_CONN_MCU BIT(8)
+#define MT8167_TOP_AXI_PROT_EN_MCU_CONN BIT(9)
+#define MT8167_TOP_AXI_PROT_EN_MCU_MM BIT(11)
+
#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1)
#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2)
#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8)
@@ -32,6 +272,17 @@
#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \
BIT(7) | BIT(8))
+#define INFRA_TOPAXI_PROTECTEN 0x0220
+#define INFRA_TOPAXI_PROTECTSTA1 0x0228
+#define INFRA_TOPAXI_PROTECTEN_SET 0x0260
+#define INFRA_TOPAXI_PROTECTEN_CLR 0x0264
+
+#define MT8192_INFRA_CTRL 0x290
+#define MT8192_INFRA_CTRL_DISABLE_MFG2ACP BIT(9)
+
+#define REG_INFRA_MISC 0xf00
+#define F_DDR_4GB_SUPPORT_EN BIT(13)
+
int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask,
bool reg_update);
int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask,
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index a74c1d5acdf3..2b498f4f3946 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -11,7 +11,8 @@
#include <linux/mailbox/mtk-cmdq-mailbox.h>
#include <linux/timer.h>
-#define CMDQ_NO_TIMEOUT 0xffffffffu
+#define CMDQ_ADDR_HIGH(addr) ((u32)(((addr) >> 16) & GENMASK(31, 0)))
+#define CMDQ_ADDR_LOW(addr) ((u16)(addr) | BIT(1))
struct cmdq_pkt;
@@ -22,12 +23,8 @@ struct cmdq_client_reg {
};
struct cmdq_client {
- spinlock_t lock;
- u32 pkt_cnt;
struct mbox_client client;
struct mbox_chan *chan;
- struct timer_list timer;
- u32 timeout_ms; /* in unit of microsecond */
};
/**
@@ -49,13 +46,10 @@ int cmdq_dev_get_client_reg(struct device *dev,
* cmdq_mbox_create() - create CMDQ mailbox client and channel
* @dev: device of CMDQ mailbox client
* @index: index of CMDQ mailbox channel
- * @timeout: timeout of a pkt execution by GCE, in unit of microsecond, set
- * CMDQ_NO_TIMEOUT if a timer is not used.
*
* Return: CMDQ mailbox client pointer
*/
-struct cmdq_client *cmdq_mbox_create(struct device *dev, int index,
- u32 timeout);
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index);
/**
* cmdq_mbox_destroy() - destroy CMDQ mailbox client and channel
@@ -102,14 +96,90 @@ int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
+/*
+ * cmdq_pkt_read_s() - append read_s command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @reg_idx: the CMDQ internal register ID to cache read data
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
+ u16 reg_idx);
+
+/**
+ * cmdq_pkt_write_s() - append write_s command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @src_reg_idx: the CMDQ internal register ID which cache source value
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Support write value to physical address without subsys. Use CMDQ_ADDR_HIGH()
+ * to get high address and call cmdq_pkt_assign() to assign value into internal
+ * reg. Also use CMDQ_ADDR_LOW() to get low address for addr_low parameter when
+ * call to this function.
+ */
+int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx);
+
+/**
+ * cmdq_pkt_write_s_mask() - append write_s with mask command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @src_reg_idx: the CMDQ internal register ID which cache source value
+ * @mask: the specified target address mask, use U32_MAX if no need
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Support write value to physical address without subsys. Use CMDQ_ADDR_HIGH()
+ * to get high address and call cmdq_pkt_assign() to assign value into internal
+ * reg. Also use CMDQ_ADDR_LOW() to get low address for addr_low parameter when
+ * call to this function.
+ */
+int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx, u32 mask);
+
+/**
+ * cmdq_pkt_write_s_value() - append write_s command to the CMDQ packet which
+ * write value to a physical address
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @value: the specified target value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value);
+
+/**
+ * cmdq_pkt_write_s_mask_value() - append write_s command with mask to the CMDQ
+ * packet which write value to a physical
+ * address
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @value: the specified target value
+ * @mask: the specified target mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value, u32 mask);
+
/**
* cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
* @pkt: the CMDQ packet
- * @event: the desired event type to "wait and CLEAR"
+ * @event: the desired event type to wait
+ * @clear: clear event or not after event arrive
*
* Return: 0 for success; else the error code is returned
*/
-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
/**
* cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
@@ -121,6 +191,15 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event);
/**
+ * cmdq_pkt_set_event() - append set event command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @event: the desired event to be set
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event);
+
+/**
* cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to
* execute an instruction that wait for a specified
* hardware register to check for the value w/o mask.
@@ -152,32 +231,50 @@ int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
*/
int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
+
/**
- * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
- * packet and call back at the end of done packet
+ * cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE
+ * to execute an instruction that set a constant value into
+ * internal register and use as value, mask or address in
+ * read/write instruction.
* @pkt: the CMDQ packet
- * @cb: called at the end of done packet
- * @data: this data will pass back to cb
+ * @reg_idx: the CMDQ internal register ID
+ * @value: the specified value
*
* Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value);
+
+/**
+ * cmdq_pkt_jump() - Append jump command to the CMDQ packet, ask GCE
+ * to execute an instruction that change current thread PC to
+ * a physical address which should contains more instruction.
+ * @pkt: the CMDQ packet
+ * @addr: physical address of target instruction buffer
*
- * Trigger CMDQ to asynchronously execute the CMDQ packet and call back
- * at the end of done packet. Note that this is an ASYNC function. When the
- * function returned, it may or may not be finished.
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr);
+
+/**
+ * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
+ * @pkt: the CMDQ packet
+ *
+ * Return: 0 for success; else the error code is returned
*/
-int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
- void *data);
+int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
/**
- * cmdq_pkt_flush() - trigger CMDQ to execute the CMDQ packet
+ * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
+ * packet and call back at the end of done packet
* @pkt: the CMDQ packet
*
* Return: 0 for success; else the error code is returned
*
- * Trigger CMDQ to execute the CMDQ packet. Note that this is a
- * synchronous flush function. When the function returned, the recorded
- * commands have been done.
+ * Trigger CMDQ to asynchronously execute the CMDQ packet and call back
+ * at the end of done packet. Note that this is an ASYNC function. When the
+ * function returned, it may or may not be finished.
*/
-int cmdq_pkt_flush(struct cmdq_pkt *pkt);
+int cmdq_pkt_flush_async(struct cmdq_pkt *pkt);
#endif /* __MTK_CMDQ_H__ */
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
new file mode 100644
index 000000000000..d2b02bb43768
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#ifndef __MTK_MMSYS_H
+#define __MTK_MMSYS_H
+
+enum mtk_ddp_comp_id;
+struct device;
+
+enum mtk_ddp_comp_id {
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_AAL1,
+ DDP_COMPONENT_BLS,
+ DDP_COMPONENT_CCORR,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DITHER0 = DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DITHER1,
+ DDP_COMPONENT_DP_INTF0,
+ DDP_COMPONENT_DP_INTF1,
+ DDP_COMPONENT_DPI0,
+ DDP_COMPONENT_DPI1,
+ DDP_COMPONENT_DSC0,
+ DDP_COMPONENT_DSC1,
+ DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DSI1,
+ DDP_COMPONENT_DSI2,
+ DDP_COMPONENT_DSI3,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_MERGE0,
+ DDP_COMPONENT_MERGE1,
+ DDP_COMPONENT_MERGE2,
+ DDP_COMPONENT_MERGE3,
+ DDP_COMPONENT_MERGE4,
+ DDP_COMPONENT_MERGE5,
+ DDP_COMPONENT_OD0,
+ DDP_COMPONENT_OD1,
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_OVL_2L1,
+ DDP_COMPONENT_OVL_2L2,
+ DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_POSTMASK0,
+ DDP_COMPONENT_PWM0,
+ DDP_COMPONENT_PWM1,
+ DDP_COMPONENT_PWM2,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_RDMA2,
+ DDP_COMPONENT_RDMA4,
+ DDP_COMPONENT_UFOE,
+ DDP_COMPONENT_WDMA0,
+ DDP_COMPONENT_WDMA1,
+ DDP_COMPONENT_ID_MAX,
+};
+
+void mtk_mmsys_ddp_connect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next);
+
+void mtk_mmsys_ddp_disconnect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next);
+
+void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val);
+
+#endif /* __MTK_MMSYS_H */
diff --git a/include/linux/soc/mediatek/mtk-mutex.h b/include/linux/soc/mediatek/mtk-mutex.h
new file mode 100644
index 000000000000..b335c2837cd8
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk-mutex.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#ifndef MTK_MUTEX_H
+#define MTK_MUTEX_H
+
+struct regmap;
+struct device;
+struct mtk_mutex;
+
+enum mtk_mutex_mod_index {
+ /* MDP table index */
+ MUTEX_MOD_IDX_MDP_RDMA0,
+ MUTEX_MOD_IDX_MDP_RSZ0,
+ MUTEX_MOD_IDX_MDP_RSZ1,
+ MUTEX_MOD_IDX_MDP_TDSHP0,
+ MUTEX_MOD_IDX_MDP_WROT0,
+ MUTEX_MOD_IDX_MDP_WDMA,
+ MUTEX_MOD_IDX_MDP_AAL0,
+ MUTEX_MOD_IDX_MDP_CCORR0,
+ MUTEX_MOD_IDX_MDP_HDR0,
+ MUTEX_MOD_IDX_MDP_COLOR0,
+
+ MUTEX_MOD_IDX_MAX /* ALWAYS keep at the end */
+};
+
+enum mtk_mutex_sof_index {
+ MUTEX_SOF_IDX_SINGLE_MODE,
+
+ MUTEX_SOF_IDX_MAX /* ALWAYS keep at the end */
+};
+
+struct mtk_mutex *mtk_mutex_get(struct device *dev);
+int mtk_mutex_prepare(struct mtk_mutex *mutex);
+void mtk_mutex_add_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id);
+void mtk_mutex_enable(struct mtk_mutex *mutex);
+int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex,
+ void *pkt);
+void mtk_mutex_disable(struct mtk_mutex *mutex);
+void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id);
+void mtk_mutex_unprepare(struct mtk_mutex *mutex);
+void mtk_mutex_put(struct mtk_mutex *mutex);
+void mtk_mutex_acquire(struct mtk_mutex *mutex);
+void mtk_mutex_release(struct mtk_mutex *mutex);
+int mtk_mutex_write_mod(struct mtk_mutex *mutex,
+ enum mtk_mutex_mod_index idx,
+ bool clear);
+int mtk_mutex_write_sof(struct mtk_mutex *mutex,
+ enum mtk_mutex_sof_index idx);
+
+#endif /* MTK_MUTEX_H */
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
index 082398e0cfb1..0761128b4354 100644
--- a/include/linux/soc/mediatek/mtk_sip_svc.h
+++ b/include/linux/soc/mediatek/mtk_sip_svc.h
@@ -22,4 +22,7 @@
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
ARM_SMCCC_OWNER_SIP, fn_id)
+/* IOMMU related SMC call */
+#define MTK_SIP_KERNEL_IOMMU_CONTROL MTK_SIP_SMC_CMD(0x514)
+
#endif
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
new file mode 100644
index 000000000000..4450c8b7a1cb
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -0,0 +1,148 @@
+#ifndef __MTK_WED_H
+#define __MTK_WED_H
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/regmap.h>
+#include <linux/pci.h>
+
+#define MTK_WED_TX_QUEUES 2
+
+struct mtk_wed_hw;
+struct mtk_wdma_desc;
+
+enum mtk_wed_bus_tye {
+ MTK_WED_BUS_PCIE,
+ MTK_WED_BUS_AXI,
+};
+
+struct mtk_wed_ring {
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ u32 desc_size;
+ int size;
+
+ u32 reg_base;
+ void __iomem *wpdma;
+};
+
+struct mtk_wed_device {
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ const struct mtk_wed_ops *ops;
+ struct device *dev;
+ struct mtk_wed_hw *hw;
+ bool init_done, running;
+ int wdma_idx;
+ int irq;
+
+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
+ struct mtk_wed_ring txfree_ring;
+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
+
+ struct {
+ int size;
+ void **pages;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ } buf_ring;
+
+ /* filled by driver: */
+ struct {
+ union {
+ struct platform_device *platform_dev;
+ struct pci_dev *pci_dev;
+ };
+ enum mtk_wed_bus_tye bus_type;
+
+ u32 wpdma_phys;
+ u32 wpdma_int;
+ u32 wpdma_mask;
+ u32 wpdma_tx;
+ u32 wpdma_txfree;
+
+ u16 token_start;
+ unsigned int nbuf;
+
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 txfree_tbit;
+
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+ void (*offload_disable)(struct mtk_wed_device *wed);
+ } wlan;
+#endif
+};
+
+struct mtk_wed_ops {
+ int (*attach)(struct mtk_wed_device *dev);
+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
+ void __iomem *regs);
+ void (*detach)(struct mtk_wed_device *dev);
+
+ void (*stop)(struct mtk_wed_device *dev);
+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
+ void (*reset_dma)(struct mtk_wed_device *dev);
+
+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
+
+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+};
+
+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+
+static inline int
+mtk_wed_device_attach(struct mtk_wed_device *dev)
+{
+ int ret = -ENODEV;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ rcu_read_lock();
+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
+ if (dev->ops)
+ ret = dev->ops->attach(dev);
+ else
+ rcu_read_unlock();
+
+ if (ret)
+ dev->ops = NULL;
+#endif
+
+ return ret;
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+#define mtk_wed_device_active(_dev) !!(_dev)->ops
+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
+#define mtk_wed_device_reg_read(_dev, _reg) \
+ (_dev)->ops->reg_read(_dev, _reg)
+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
+ (_dev)->ops->reg_write(_dev, _reg, _val)
+#define mtk_wed_device_irq_get(_dev, _mask) \
+ (_dev)->ops->irq_get(_dev, _mask)
+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
+ (_dev)->ops->irq_set_mask(_dev, _mask)
+#else
+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+{
+ return false;
+}
+#define mtk_wed_device_detach(_dev) do {} while (0)
+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_reg_read(_dev, _reg) 0
+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
+#define mtk_wed_device_irq_get(_dev, _mask) 0
+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
+#endif
+
+#endif
diff --git a/include/linux/soc/pxa/cpu.h b/include/linux/soc/pxa/cpu.h
new file mode 100644
index 000000000000..5782450ee45c
--- /dev/null
+++ b/include/linux/soc/pxa/cpu.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Author: Nicolas Pitre
+ * Created: Jun 15, 2001
+ * Copyright: MontaVista Software Inc.
+ */
+
+#ifndef __SOC_PXA_CPU_H
+#define __SOC_PXA_CPU_H
+
+#ifdef CONFIG_ARM
+#include <asm/cputype.h>
+#endif
+
+/*
+ * CPU Stepping CPU_ID JTAG_ID
+ *
+ * PXA210 B0 0x69052922 0x2926C013
+ * PXA210 B1 0x69052923 0x3926C013
+ * PXA210 B2 0x69052924 0x4926C013
+ * PXA210 C0 0x69052D25 0x5926C013
+ *
+ * PXA250 A0 0x69052100 0x09264013
+ * PXA250 A1 0x69052101 0x19264013
+ * PXA250 B0 0x69052902 0x29264013
+ * PXA250 B1 0x69052903 0x39264013
+ * PXA250 B2 0x69052904 0x49264013
+ * PXA250 C0 0x69052D05 0x59264013
+ *
+ * PXA255 A0 0x69052D06 0x69264013
+ *
+ * PXA26x A0 0x69052903 0x39264013
+ * PXA26x B0 0x69052D05 0x59264013
+ *
+ * PXA27x A0 0x69054110 0x09265013
+ * PXA27x A1 0x69054111 0x19265013
+ * PXA27x B0 0x69054112 0x29265013
+ * PXA27x B1 0x69054113 0x39265013
+ * PXA27x C0 0x69054114 0x49265013
+ * PXA27x C5 0x69054117 0x79265013
+ *
+ * PXA30x A0 0x69056880 0x0E648013
+ * PXA30x A1 0x69056881 0x1E648013
+ * PXA31x A0 0x69056890 0x0E649013
+ * PXA31x A1 0x69056891 0x1E649013
+ * PXA31x A2 0x69056892 0x2E649013
+ * PXA32x B1 0x69056825 0x5E642013
+ * PXA32x B2 0x69056826 0x6E642013
+ *
+ * PXA930 B0 0x69056835 0x5E643013
+ * PXA930 B1 0x69056837 0x7E643013
+ * PXA930 B2 0x69056838 0x8E643013
+ *
+ * PXA935 A0 0x56056931 0x1E653013
+ * PXA935 B0 0x56056936 0x6E653013
+ * PXA935 B1 0x56056938 0x8E653013
+ */
+#ifdef CONFIG_PXA25x
+#define __cpu_is_pxa210(id) \
+ ({ \
+ unsigned int _id = (id) & 0xf3f0; \
+ _id == 0x2120; \
+ })
+
+#define __cpu_is_pxa250(id) \
+ ({ \
+ unsigned int _id = (id) & 0xf3ff; \
+ _id <= 0x2105; \
+ })
+
+#define __cpu_is_pxa255(id) \
+ ({ \
+ unsigned int _id = (id) & 0xffff; \
+ _id == 0x2d06; \
+ })
+
+#define __cpu_is_pxa25x(id) \
+ ({ \
+ unsigned int _id = (id) & 0xf300; \
+ _id == 0x2100; \
+ })
+#else
+#define __cpu_is_pxa210(id) (0)
+#define __cpu_is_pxa250(id) (0)
+#define __cpu_is_pxa255(id) (0)
+#define __cpu_is_pxa25x(id) (0)
+#endif
+
+#ifdef CONFIG_PXA27x
+#define __cpu_is_pxa27x(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x411; \
+ })
+#else
+#define __cpu_is_pxa27x(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA300
+#define __cpu_is_pxa300(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x688; \
+ })
+#else
+#define __cpu_is_pxa300(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA310
+#define __cpu_is_pxa310(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x689; \
+ })
+#else
+#define __cpu_is_pxa310(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA320
+#define __cpu_is_pxa320(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x603 || _id == 0x682; \
+ })
+#else
+#define __cpu_is_pxa320(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA930
+#define __cpu_is_pxa930(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x683; \
+ })
+#else
+#define __cpu_is_pxa930(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA935
+#define __cpu_is_pxa935(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x693; \
+ })
+#else
+#define __cpu_is_pxa935(id) (0)
+#endif
+
+#define cpu_is_pxa210() \
+ ({ \
+ __cpu_is_pxa210(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa250() \
+ ({ \
+ __cpu_is_pxa250(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa255() \
+ ({ \
+ __cpu_is_pxa255(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa25x() \
+ ({ \
+ __cpu_is_pxa25x(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa27x() \
+ ({ \
+ __cpu_is_pxa27x(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa300() \
+ ({ \
+ __cpu_is_pxa300(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa310() \
+ ({ \
+ __cpu_is_pxa310(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa320() \
+ ({ \
+ __cpu_is_pxa320(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa930() \
+ ({ \
+ __cpu_is_pxa930(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa935() \
+ ({ \
+ __cpu_is_pxa935(read_cpuid_id()); \
+ })
+
+
+
+/*
+ * CPUID Core Generation Bit
+ * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
+ */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
+#define __cpu_is_pxa2xx(id) \
+ ({ \
+ unsigned int _id = (id) >> 13 & 0x7; \
+ _id <= 0x2; \
+ })
+#else
+#define __cpu_is_pxa2xx(id) (0)
+#endif
+
+#ifdef CONFIG_PXA3xx
+#define __cpu_is_pxa3xx(id) \
+ ({ \
+ __cpu_is_pxa300(id) \
+ || __cpu_is_pxa310(id) \
+ || __cpu_is_pxa320(id) \
+ || __cpu_is_pxa93x(id); \
+ })
+#else
+#define __cpu_is_pxa3xx(id) (0)
+#endif
+
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
+#define __cpu_is_pxa93x(id) \
+ ({ \
+ __cpu_is_pxa930(id) \
+ || __cpu_is_pxa935(id); \
+ })
+#else
+#define __cpu_is_pxa93x(id) (0)
+#endif
+
+#define cpu_is_pxa2xx() \
+ ({ \
+ __cpu_is_pxa2xx(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa3xx() \
+ ({ \
+ __cpu_is_pxa3xx(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa93x() \
+ ({ \
+ __cpu_is_pxa93x(read_cpuid_id()); \
+ })
+
+#endif
diff --git a/include/linux/soc/pxa/mfp.h b/include/linux/soc/pxa/mfp.h
new file mode 100644
index 000000000000..39779cbed0c0
--- /dev/null
+++ b/include/linux/soc/pxa/mfp.h
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common Multi-Function Pin Definitions
+ *
+ * Copyright (C) 2007 Marvell International Ltd.
+ *
+ * 2007-8-21: eric miao <eric.miao@marvell.com>
+ * initial version
+ */
+
+#ifndef __ASM_PLAT_MFP_H
+#define __ASM_PLAT_MFP_H
+
+#define mfp_to_gpio(m) ((m) % 256)
+
+/* list of all the configurable MFP pins */
+enum {
+ MFP_PIN_INVALID = -1,
+
+ MFP_PIN_GPIO0 = 0,
+ MFP_PIN_GPIO1,
+ MFP_PIN_GPIO2,
+ MFP_PIN_GPIO3,
+ MFP_PIN_GPIO4,
+ MFP_PIN_GPIO5,
+ MFP_PIN_GPIO6,
+ MFP_PIN_GPIO7,
+ MFP_PIN_GPIO8,
+ MFP_PIN_GPIO9,
+ MFP_PIN_GPIO10,
+ MFP_PIN_GPIO11,
+ MFP_PIN_GPIO12,
+ MFP_PIN_GPIO13,
+ MFP_PIN_GPIO14,
+ MFP_PIN_GPIO15,
+ MFP_PIN_GPIO16,
+ MFP_PIN_GPIO17,
+ MFP_PIN_GPIO18,
+ MFP_PIN_GPIO19,
+ MFP_PIN_GPIO20,
+ MFP_PIN_GPIO21,
+ MFP_PIN_GPIO22,
+ MFP_PIN_GPIO23,
+ MFP_PIN_GPIO24,
+ MFP_PIN_GPIO25,
+ MFP_PIN_GPIO26,
+ MFP_PIN_GPIO27,
+ MFP_PIN_GPIO28,
+ MFP_PIN_GPIO29,
+ MFP_PIN_GPIO30,
+ MFP_PIN_GPIO31,
+ MFP_PIN_GPIO32,
+ MFP_PIN_GPIO33,
+ MFP_PIN_GPIO34,
+ MFP_PIN_GPIO35,
+ MFP_PIN_GPIO36,
+ MFP_PIN_GPIO37,
+ MFP_PIN_GPIO38,
+ MFP_PIN_GPIO39,
+ MFP_PIN_GPIO40,
+ MFP_PIN_GPIO41,
+ MFP_PIN_GPIO42,
+ MFP_PIN_GPIO43,
+ MFP_PIN_GPIO44,
+ MFP_PIN_GPIO45,
+ MFP_PIN_GPIO46,
+ MFP_PIN_GPIO47,
+ MFP_PIN_GPIO48,
+ MFP_PIN_GPIO49,
+ MFP_PIN_GPIO50,
+ MFP_PIN_GPIO51,
+ MFP_PIN_GPIO52,
+ MFP_PIN_GPIO53,
+ MFP_PIN_GPIO54,
+ MFP_PIN_GPIO55,
+ MFP_PIN_GPIO56,
+ MFP_PIN_GPIO57,
+ MFP_PIN_GPIO58,
+ MFP_PIN_GPIO59,
+ MFP_PIN_GPIO60,
+ MFP_PIN_GPIO61,
+ MFP_PIN_GPIO62,
+ MFP_PIN_GPIO63,
+ MFP_PIN_GPIO64,
+ MFP_PIN_GPIO65,
+ MFP_PIN_GPIO66,
+ MFP_PIN_GPIO67,
+ MFP_PIN_GPIO68,
+ MFP_PIN_GPIO69,
+ MFP_PIN_GPIO70,
+ MFP_PIN_GPIO71,
+ MFP_PIN_GPIO72,
+ MFP_PIN_GPIO73,
+ MFP_PIN_GPIO74,
+ MFP_PIN_GPIO75,
+ MFP_PIN_GPIO76,
+ MFP_PIN_GPIO77,
+ MFP_PIN_GPIO78,
+ MFP_PIN_GPIO79,
+ MFP_PIN_GPIO80,
+ MFP_PIN_GPIO81,
+ MFP_PIN_GPIO82,
+ MFP_PIN_GPIO83,
+ MFP_PIN_GPIO84,
+ MFP_PIN_GPIO85,
+ MFP_PIN_GPIO86,
+ MFP_PIN_GPIO87,
+ MFP_PIN_GPIO88,
+ MFP_PIN_GPIO89,
+ MFP_PIN_GPIO90,
+ MFP_PIN_GPIO91,
+ MFP_PIN_GPIO92,
+ MFP_PIN_GPIO93,
+ MFP_PIN_GPIO94,
+ MFP_PIN_GPIO95,
+ MFP_PIN_GPIO96,
+ MFP_PIN_GPIO97,
+ MFP_PIN_GPIO98,
+ MFP_PIN_GPIO99,
+ MFP_PIN_GPIO100,
+ MFP_PIN_GPIO101,
+ MFP_PIN_GPIO102,
+ MFP_PIN_GPIO103,
+ MFP_PIN_GPIO104,
+ MFP_PIN_GPIO105,
+ MFP_PIN_GPIO106,
+ MFP_PIN_GPIO107,
+ MFP_PIN_GPIO108,
+ MFP_PIN_GPIO109,
+ MFP_PIN_GPIO110,
+ MFP_PIN_GPIO111,
+ MFP_PIN_GPIO112,
+ MFP_PIN_GPIO113,
+ MFP_PIN_GPIO114,
+ MFP_PIN_GPIO115,
+ MFP_PIN_GPIO116,
+ MFP_PIN_GPIO117,
+ MFP_PIN_GPIO118,
+ MFP_PIN_GPIO119,
+ MFP_PIN_GPIO120,
+ MFP_PIN_GPIO121,
+ MFP_PIN_GPIO122,
+ MFP_PIN_GPIO123,
+ MFP_PIN_GPIO124,
+ MFP_PIN_GPIO125,
+ MFP_PIN_GPIO126,
+ MFP_PIN_GPIO127,
+
+ MFP_PIN_GPIO128,
+ MFP_PIN_GPIO129,
+ MFP_PIN_GPIO130,
+ MFP_PIN_GPIO131,
+ MFP_PIN_GPIO132,
+ MFP_PIN_GPIO133,
+ MFP_PIN_GPIO134,
+ MFP_PIN_GPIO135,
+ MFP_PIN_GPIO136,
+ MFP_PIN_GPIO137,
+ MFP_PIN_GPIO138,
+ MFP_PIN_GPIO139,
+ MFP_PIN_GPIO140,
+ MFP_PIN_GPIO141,
+ MFP_PIN_GPIO142,
+ MFP_PIN_GPIO143,
+ MFP_PIN_GPIO144,
+ MFP_PIN_GPIO145,
+ MFP_PIN_GPIO146,
+ MFP_PIN_GPIO147,
+ MFP_PIN_GPIO148,
+ MFP_PIN_GPIO149,
+ MFP_PIN_GPIO150,
+ MFP_PIN_GPIO151,
+ MFP_PIN_GPIO152,
+ MFP_PIN_GPIO153,
+ MFP_PIN_GPIO154,
+ MFP_PIN_GPIO155,
+ MFP_PIN_GPIO156,
+ MFP_PIN_GPIO157,
+ MFP_PIN_GPIO158,
+ MFP_PIN_GPIO159,
+ MFP_PIN_GPIO160,
+ MFP_PIN_GPIO161,
+ MFP_PIN_GPIO162,
+ MFP_PIN_GPIO163,
+ MFP_PIN_GPIO164,
+ MFP_PIN_GPIO165,
+ MFP_PIN_GPIO166,
+ MFP_PIN_GPIO167,
+ MFP_PIN_GPIO168,
+ MFP_PIN_GPIO169,
+ MFP_PIN_GPIO170,
+ MFP_PIN_GPIO171,
+ MFP_PIN_GPIO172,
+ MFP_PIN_GPIO173,
+ MFP_PIN_GPIO174,
+ MFP_PIN_GPIO175,
+ MFP_PIN_GPIO176,
+ MFP_PIN_GPIO177,
+ MFP_PIN_GPIO178,
+ MFP_PIN_GPIO179,
+ MFP_PIN_GPIO180,
+ MFP_PIN_GPIO181,
+ MFP_PIN_GPIO182,
+ MFP_PIN_GPIO183,
+ MFP_PIN_GPIO184,
+ MFP_PIN_GPIO185,
+ MFP_PIN_GPIO186,
+ MFP_PIN_GPIO187,
+ MFP_PIN_GPIO188,
+ MFP_PIN_GPIO189,
+ MFP_PIN_GPIO190,
+ MFP_PIN_GPIO191,
+
+ MFP_PIN_GPIO255 = 255,
+
+ MFP_PIN_GPIO0_2,
+ MFP_PIN_GPIO1_2,
+ MFP_PIN_GPIO2_2,
+ MFP_PIN_GPIO3_2,
+ MFP_PIN_GPIO4_2,
+ MFP_PIN_GPIO5_2,
+ MFP_PIN_GPIO6_2,
+ MFP_PIN_GPIO7_2,
+ MFP_PIN_GPIO8_2,
+ MFP_PIN_GPIO9_2,
+ MFP_PIN_GPIO10_2,
+ MFP_PIN_GPIO11_2,
+ MFP_PIN_GPIO12_2,
+ MFP_PIN_GPIO13_2,
+ MFP_PIN_GPIO14_2,
+ MFP_PIN_GPIO15_2,
+ MFP_PIN_GPIO16_2,
+ MFP_PIN_GPIO17_2,
+
+ MFP_PIN_ULPI_STP,
+ MFP_PIN_ULPI_NXT,
+ MFP_PIN_ULPI_DIR,
+
+ MFP_PIN_nXCVREN,
+ MFP_PIN_DF_CLE_nOE,
+ MFP_PIN_DF_nADV1_ALE,
+ MFP_PIN_DF_SCLK_E,
+ MFP_PIN_DF_SCLK_S,
+ MFP_PIN_nBE0,
+ MFP_PIN_nBE1,
+ MFP_PIN_DF_nADV2_ALE,
+ MFP_PIN_DF_INT_RnB,
+ MFP_PIN_DF_nCS0,
+ MFP_PIN_DF_nCS1,
+ MFP_PIN_nLUA,
+ MFP_PIN_nLLA,
+ MFP_PIN_DF_nWE,
+ MFP_PIN_DF_ALE_nWE,
+ MFP_PIN_DF_nRE_nOE,
+ MFP_PIN_DF_ADDR0,
+ MFP_PIN_DF_ADDR1,
+ MFP_PIN_DF_ADDR2,
+ MFP_PIN_DF_ADDR3,
+ MFP_PIN_DF_IO0,
+ MFP_PIN_DF_IO1,
+ MFP_PIN_DF_IO2,
+ MFP_PIN_DF_IO3,
+ MFP_PIN_DF_IO4,
+ MFP_PIN_DF_IO5,
+ MFP_PIN_DF_IO6,
+ MFP_PIN_DF_IO7,
+ MFP_PIN_DF_IO8,
+ MFP_PIN_DF_IO9,
+ MFP_PIN_DF_IO10,
+ MFP_PIN_DF_IO11,
+ MFP_PIN_DF_IO12,
+ MFP_PIN_DF_IO13,
+ MFP_PIN_DF_IO14,
+ MFP_PIN_DF_IO15,
+ MFP_PIN_DF_nCS0_SM_nCS2,
+ MFP_PIN_DF_nCS1_SM_nCS3,
+ MFP_PIN_SM_nCS0,
+ MFP_PIN_SM_nCS1,
+ MFP_PIN_DF_WEn,
+ MFP_PIN_DF_REn,
+ MFP_PIN_DF_CLE_SM_OEn,
+ MFP_PIN_DF_ALE_SM_WEn,
+ MFP_PIN_DF_RDY0,
+ MFP_PIN_DF_RDY1,
+
+ MFP_PIN_SM_SCLK,
+ MFP_PIN_SM_BE0,
+ MFP_PIN_SM_BE1,
+ MFP_PIN_SM_ADV,
+ MFP_PIN_SM_ADVMUX,
+ MFP_PIN_SM_RDY,
+
+ MFP_PIN_MMC1_DAT7,
+ MFP_PIN_MMC1_DAT6,
+ MFP_PIN_MMC1_DAT5,
+ MFP_PIN_MMC1_DAT4,
+ MFP_PIN_MMC1_DAT3,
+ MFP_PIN_MMC1_DAT2,
+ MFP_PIN_MMC1_DAT1,
+ MFP_PIN_MMC1_DAT0,
+ MFP_PIN_MMC1_CMD,
+ MFP_PIN_MMC1_CLK,
+ MFP_PIN_MMC1_CD,
+ MFP_PIN_MMC1_WP,
+
+ /* additional pins on PXA930 */
+ MFP_PIN_GSIM_UIO,
+ MFP_PIN_GSIM_UCLK,
+ MFP_PIN_GSIM_UDET,
+ MFP_PIN_GSIM_nURST,
+ MFP_PIN_PMIC_INT,
+ MFP_PIN_RDY,
+
+ /* additional pins on MMP2 */
+ MFP_PIN_TWSI1_SCL,
+ MFP_PIN_TWSI1_SDA,
+ MFP_PIN_TWSI4_SCL,
+ MFP_PIN_TWSI4_SDA,
+ MFP_PIN_CLK_REQ,
+
+ MFP_PIN_MAX,
+};
+
+/*
+ * a possible MFP configuration is represented by a 32-bit integer
+ *
+ * bit 0.. 9 - MFP Pin Number (1024 Pins Maximum)
+ * bit 10..12 - Alternate Function Selection
+ * bit 13..15 - Drive Strength
+ * bit 16..18 - Low Power Mode State
+ * bit 19..20 - Low Power Mode Edge Detection
+ * bit 21..22 - Run Mode Pull State
+ *
+ * to facilitate the definition, the following macros are provided
+ *
+ * MFP_CFG_DEFAULT - default MFP configuration value, with
+ * alternate function = 0,
+ * drive strength = fast 3mA (MFP_DS03X)
+ * low power mode = default
+ * edge detection = none
+ *
+ * MFP_CFG - default MFPR value with alternate function
+ * MFP_CFG_DRV - default MFPR value with alternate function and
+ * pin drive strength
+ * MFP_CFG_LPM - default MFPR value with alternate function and
+ * low power mode
+ * MFP_CFG_X - default MFPR value with alternate function,
+ * pin drive strength and low power mode
+ */
+
+typedef unsigned long mfp_cfg_t;
+
+#define MFP_PIN(x) ((x) & 0x3ff)
+
+#define MFP_AF0 (0x0 << 10)
+#define MFP_AF1 (0x1 << 10)
+#define MFP_AF2 (0x2 << 10)
+#define MFP_AF3 (0x3 << 10)
+#define MFP_AF4 (0x4 << 10)
+#define MFP_AF5 (0x5 << 10)
+#define MFP_AF6 (0x6 << 10)
+#define MFP_AF7 (0x7 << 10)
+#define MFP_AF_MASK (0x7 << 10)
+#define MFP_AF(x) (((x) >> 10) & 0x7)
+
+#define MFP_DS01X (0x0 << 13)
+#define MFP_DS02X (0x1 << 13)
+#define MFP_DS03X (0x2 << 13)
+#define MFP_DS04X (0x3 << 13)
+#define MFP_DS06X (0x4 << 13)
+#define MFP_DS08X (0x5 << 13)
+#define MFP_DS10X (0x6 << 13)
+#define MFP_DS13X (0x7 << 13)
+#define MFP_DS_MASK (0x7 << 13)
+#define MFP_DS(x) (((x) >> 13) & 0x7)
+
+#define MFP_LPM_DEFAULT (0x0 << 16)
+#define MFP_LPM_DRIVE_LOW (0x1 << 16)
+#define MFP_LPM_DRIVE_HIGH (0x2 << 16)
+#define MFP_LPM_PULL_LOW (0x3 << 16)
+#define MFP_LPM_PULL_HIGH (0x4 << 16)
+#define MFP_LPM_FLOAT (0x5 << 16)
+#define MFP_LPM_INPUT (0x6 << 16)
+#define MFP_LPM_STATE_MASK (0x7 << 16)
+#define MFP_LPM_STATE(x) (((x) >> 16) & 0x7)
+
+#define MFP_LPM_EDGE_NONE (0x0 << 19)
+#define MFP_LPM_EDGE_RISE (0x1 << 19)
+#define MFP_LPM_EDGE_FALL (0x2 << 19)
+#define MFP_LPM_EDGE_BOTH (0x3 << 19)
+#define MFP_LPM_EDGE_MASK (0x3 << 19)
+#define MFP_LPM_EDGE(x) (((x) >> 19) & 0x3)
+
+#define MFP_PULL_NONE (0x0 << 21)
+#define MFP_PULL_LOW (0x1 << 21)
+#define MFP_PULL_HIGH (0x2 << 21)
+#define MFP_PULL_BOTH (0x3 << 21)
+#define MFP_PULL_FLOAT (0x4 << 21)
+#define MFP_PULL_MASK (0x7 << 21)
+#define MFP_PULL(x) (((x) >> 21) & 0x7)
+
+#define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_DEFAULT |\
+ MFP_LPM_EDGE_NONE | MFP_PULL_NONE)
+
+#define MFP_CFG(pin, af) \
+ ((MFP_CFG_DEFAULT & ~MFP_AF_MASK) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af))
+
+#define MFP_CFG_DRV(pin, af, drv) \
+ ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK)) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv))
+
+#define MFP_CFG_LPM(pin, af, lpm) \
+ ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_LPM_STATE_MASK)) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_LPM_##lpm))
+
+#define MFP_CFG_X(pin, af, drv, lpm) \
+ ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK | MFP_LPM_STATE_MASK)) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv | MFP_LPM_##lpm))
+
+#if defined(CONFIG_PXA3xx) || defined(CONFIG_ARCH_MMP)
+/*
+ * each MFP pin will have a MFPR register, since the offset of the
+ * register varies between processors, the processor specific code
+ * should initialize the pin offsets by mfp_init()
+ *
+ * mfp_init_base() - accepts a virtual base for all MFPR registers and
+ * initialize the MFP table to a default state
+ *
+ * mfp_init_addr() - accepts a table of "mfp_addr_map" structure, which
+ * represents a range of MFP pins from "start" to "end", with the offset
+ * beginning at "offset", to define a single pin, let "end" = -1.
+ *
+ * use
+ *
+ * MFP_ADDR_X() to define a range of pins
+ * MFP_ADDR() to define a single pin
+ * MFP_ADDR_END to signal the end of pin offset definitions
+ */
+struct mfp_addr_map {
+ unsigned int start;
+ unsigned int end;
+ unsigned long offset;
+};
+
+#define MFP_ADDR_X(start, end, offset) \
+ { MFP_PIN_##start, MFP_PIN_##end, offset }
+
+#define MFP_ADDR(pin, offset) \
+ { MFP_PIN_##pin, -1, offset }
+
+#define MFP_ADDR_END { MFP_PIN_INVALID, 0 }
+
+void mfp_init_base(void __iomem *mfpr_base);
+void mfp_init_addr(struct mfp_addr_map *map);
+
+/*
+ * mfp_{read, write}() - for direct read/write access to the MFPR register
+ * mfp_config() - for configuring a group of MFPR registers
+ * mfp_config_lpm() - configuring all low power MFPR registers for suspend
+ * mfp_config_run() - configuring all run time MFPR registers after resume
+ */
+unsigned long mfp_read(int mfp);
+void mfp_write(int mfp, unsigned long mfpr_val);
+void mfp_config(unsigned long *mfp_cfgs, int num);
+void mfp_config_run(void);
+void mfp_config_lpm(void);
+#endif /* CONFIG_PXA3xx || CONFIG_ARCH_MMP */
+
+#endif /* __ASM_PLAT_MFP_H */
diff --git a/include/linux/soc/pxa/smemc.h b/include/linux/soc/pxa/smemc.h
new file mode 100644
index 000000000000..f1ffea236c15
--- /dev/null
+++ b/include/linux/soc/pxa/smemc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PXA_REGS_H
+#define __PXA_REGS_H
+
+#include <linux/types.h>
+
+void pxa_smemc_set_pcmcia_timing(int sock, u32 mcmem, u32 mcatt, u32 mcio);
+void pxa_smemc_set_pcmcia_socket(int nr);
+int pxa2xx_smemc_get_sdram_rows(void);
+unsigned int pxa3xx_smemc_get_memclkdiv(void);
+void __iomem *pxa_smemc_get_mdrefr(void);
+
+#endif
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index c5d52e2cb275..23c5b30f3511 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <dt-bindings/soc/qcom,apr.h>
+#include <dt-bindings/soc/qcom,gpr.h>
extern struct bus_type aprbus;
@@ -75,31 +76,92 @@ struct apr_resp_pkt {
int payload_size;
};
+struct gpr_hdr {
+ uint32_t version:4;
+ uint32_t hdr_size:4;
+ uint32_t pkt_size:24;
+ uint32_t dest_domain:8;
+ uint32_t src_domain:8;
+ uint32_t reserved:16;
+ uint32_t src_port;
+ uint32_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+} __packed;
+
+struct gpr_pkt {
+ struct gpr_hdr hdr;
+ uint32_t payload[];
+};
+
+struct gpr_resp_pkt {
+ struct gpr_hdr hdr;
+ void *payload;
+ int payload_size;
+};
+
+#define GPR_HDR_SIZE sizeof(struct gpr_hdr)
+#define GPR_PKT_VER 0x0
+#define GPR_PKT_HEADER_WORD_SIZE ((sizeof(struct gpr_pkt) + 3) >> 2)
+#define GPR_PKT_HEADER_BYTE_SIZE (GPR_PKT_HEADER_WORD_SIZE << 2)
+
+#define GPR_BASIC_RSP_RESULT 0x02001005
+
+struct gpr_ibasic_rsp_result_t {
+ uint32_t opcode;
+ uint32_t status;
+};
+
+#define GPR_BASIC_EVT_ACCEPTED 0x02001006
+
+struct gpr_ibasic_rsp_accepted_t {
+ uint32_t opcode;
+};
+
/* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */
#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF)
#define APR_SVC_MINOR_VERSION(v) (v & 0xFF)
+typedef int (*gpr_port_cb) (struct gpr_resp_pkt *d, void *priv, int op);
+struct packet_router;
+struct pkt_router_svc {
+ struct device *dev;
+ gpr_port_cb callback;
+ struct packet_router *pr;
+ spinlock_t lock;
+ int id;
+ void *priv;
+};
+
+typedef struct pkt_router_svc gpr_port_t;
+
struct apr_device {
struct device dev;
uint16_t svc_id;
uint16_t domain_id;
uint32_t version;
char name[APR_NAME_SIZE];
- spinlock_t lock;
+ const char *service_path;
+ struct pkt_router_svc svc;
struct list_head node;
};
+typedef struct apr_device gpr_device_t;
+
#define to_apr_device(d) container_of(d, struct apr_device, dev)
+#define svc_to_apr_device(d) container_of(d, struct apr_device, svc)
struct apr_driver {
int (*probe)(struct apr_device *sl);
int (*remove)(struct apr_device *sl);
int (*callback)(struct apr_device *a,
struct apr_resp_pkt *d);
+ int (*gpr_callback)(struct gpr_resp_pkt *d, void *data, int op);
struct device_driver driver;
const struct apr_device_id *id_table;
};
+typedef struct apr_driver gpr_driver_t;
#define to_apr_driver(d) container_of(d, struct apr_driver, driver)
/*
@@ -112,7 +174,7 @@ void apr_driver_unregister(struct apr_driver *drv);
/**
* module_apr_driver() - Helper macro for registering a aprbus driver
- * @__aprbus_driver: aprbus_driver struct
+ * @__apr_driver: apr_driver struct
*
* Helper macro for aprbus drivers which do not do anything special in
* module init/exit. This eliminates a lot of boilerplate. Each module
@@ -122,7 +184,14 @@ void apr_driver_unregister(struct apr_driver *drv);
#define module_apr_driver(__apr_driver) \
module_driver(__apr_driver, apr_driver_register, \
apr_driver_unregister)
+#define module_gpr_driver(__gpr_driver) module_apr_driver(__gpr_driver)
int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt);
+gpr_port_t *gpr_alloc_port(gpr_device_t *gdev, struct device *dev,
+ gpr_port_cb cb, void *priv);
+void gpr_free_port(gpr_port_t *port);
+int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt);
+int gpr_send_pkt(gpr_device_t *gdev, struct gpr_pkt *pkt);
+
#endif /* __QCOM_APR_H_ */
diff --git a/include/linux/soc/qcom/irq.h b/include/linux/soc/qcom/irq.h
index 9e1ece58e55b..72b9231e9fdd 100644
--- a/include/linux/soc/qcom/irq.h
+++ b/include/linux/soc/qcom/irq.h
@@ -7,7 +7,7 @@
#define GPIO_NO_WAKE_IRQ ~0U
-/**
+/*
* QCOM specific IRQ domain flags that distinguishes the handling of wakeup
* capable interrupts by different interrupt controllers.
*
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 90b864655822..bc2fb8343a94 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -16,6 +16,7 @@
#define LLCC_AUDIO 6
#define LLCC_MDMHPGRW 7
#define LLCC_MDM 8
+#define LLCC_MODHW 9
#define LLCC_CMPT 10
#define LLCC_GPUHTW 11
#define LLCC_GPU 12
@@ -26,9 +27,25 @@
#define LLCC_MDMHPFX 20
#define LLCC_MDMPNG 21
#define LLCC_AUDHW 22
+#define LLCC_NPU 23
+#define LLCC_WLHW 24
+#define LLCC_PIMEM 25
+#define LLCC_DRE 26
+#define LLCC_CVP 28
+#define LLCC_MODPE 29
+#define LLCC_APTCM 30
+#define LLCC_WRCACHE 31
+#define LLCC_CVPFW 32
+#define LLCC_CPUSS1 33
+#define LLCC_CAMEXP0 34
+#define LLCC_CPUMTE 35
+#define LLCC_CPUHWT 36
+#define LLCC_MDMCLAD2 37
+#define LLCC_CAMEXP1 38
+#define LLCC_AENPU 45
/**
- * llcc_slice_desc - Cache slice descriptor
+ * struct llcc_slice_desc - Cache slice descriptor
* @slice_id: llcc slice id
* @slice_size: Size allocated for the llcc slice
*/
@@ -38,7 +55,7 @@ struct llcc_slice_desc {
};
/**
- * llcc_edac_reg_data - llcc edac registers data for each error type
+ * struct llcc_edac_reg_data - llcc edac registers data for each error type
* @name: Name of the error
* @synd_reg: Syndrome register address
* @count_status_reg: Status register address to read the error count
@@ -61,11 +78,40 @@ struct llcc_edac_reg_data {
u8 ways_shift;
};
+struct llcc_edac_reg_offset {
+ /* LLCC TRP registers */
+ u32 trp_ecc_error_status0;
+ u32 trp_ecc_error_status1;
+ u32 trp_ecc_sb_err_syn0;
+ u32 trp_ecc_db_err_syn0;
+ u32 trp_ecc_error_cntr_clear;
+ u32 trp_interrupt_0_status;
+ u32 trp_interrupt_0_clear;
+ u32 trp_interrupt_0_enable;
+
+ /* LLCC Common registers */
+ u32 cmn_status0;
+ u32 cmn_interrupt_0_enable;
+ u32 cmn_interrupt_2_enable;
+
+ /* LLCC DRP registers */
+ u32 drp_ecc_error_cfg;
+ u32 drp_ecc_error_cntr_clear;
+ u32 drp_interrupt_status;
+ u32 drp_interrupt_clear;
+ u32 drp_interrupt_enable;
+ u32 drp_ecc_error_status0;
+ u32 drp_ecc_error_status1;
+ u32 drp_ecc_sb_err_syn0;
+ u32 drp_ecc_db_err_syn0;
+};
+
/**
- * llcc_drv_data - Data associated with the llcc driver
+ * struct llcc_drv_data - Data associated with the llcc driver
* @regmap: regmap associated with the llcc device
* @bcast_regmap: regmap associated with llcc broadcast offset
* @cfg: pointer to the data structure for slice configuration
+ * @edac_reg_offset: Offset of the LLCC EDAC registers
* @lock: mutex associated with each slice
* @cfg_size: size of the config data table
* @max_slices: max slices as read from device tree
@@ -73,11 +119,13 @@ struct llcc_edac_reg_data {
* @bitmap: Bit map to track the active slice ids
* @offsets: Pointer to the bank offsets array
* @ecc_irq: interrupt for llcc cache error detection and reporting
+ * @version: Indicates the LLCC version
*/
struct llcc_drv_data {
struct regmap *regmap;
struct regmap *bcast_regmap;
const struct llcc_slice_config *cfg;
+ const struct llcc_edac_reg_offset *edac_reg_offset;
struct mutex lock;
u32 cfg_size;
u32 max_slices;
@@ -85,6 +133,7 @@ struct llcc_drv_data {
unsigned long *bitmap;
u32 *offsets;
int ecc_irq;
+ u32 version;
};
#if IS_ENABLED(CONFIG_QCOM_LLCC)
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index e600baec6825..9e8e60421192 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -10,8 +10,14 @@
struct device;
struct firmware;
+struct qcom_scm_pas_metadata;
+
+#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
ssize_t qcom_mdt_get_size(const struct firmware *fw);
+int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, phys_addr_t mem_phys,
+ struct qcom_scm_pas_metadata *pas_metadata_ctx);
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
@@ -21,6 +27,48 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
-void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len);
+void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
+ const char *fw_name, struct device *dev);
+
+#else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
+
+static inline ssize_t qcom_mdt_get_size(const struct firmware *fw)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, phys_addr_t mem_phys,
+ struct qcom_scm_pas_metadata *pas_metadata_ctx)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size, phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_load_no_init(struct device *dev,
+ const struct firmware *fw,
+ const char *fw_name, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size,
+ phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
+static inline void *qcom_mdt_read_metadata(const struct firmware *fw,
+ size_t *data_len, const char *fw_name,
+ struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#endif /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
#endif
diff --git a/include/linux/soc/qcom/pdr.h b/include/linux/soc/qcom/pdr.h
new file mode 100644
index 000000000000..83a8ea612e69
--- /dev/null
+++ b/include/linux/soc/qcom/pdr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PDR_HELPER__
+#define __QCOM_PDR_HELPER__
+
+#include <linux/soc/qcom/qmi.h>
+
+#define SERVREG_NAME_LENGTH 64
+
+struct pdr_service;
+struct pdr_handle;
+
+enum servreg_service_state {
+ SERVREG_LOCATOR_ERR = 0x1,
+ SERVREG_SERVICE_STATE_DOWN = 0x0FFFFFFF,
+ SERVREG_SERVICE_STATE_UP = 0x1FFFFFFF,
+ SERVREG_SERVICE_STATE_EARLY_DOWN = 0x2FFFFFFF,
+ SERVREG_SERVICE_STATE_UNINIT = 0x7FFFFFFF,
+};
+
+struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
+ char *service_path,
+ void *priv), void *priv);
+struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
+ const char *service_name,
+ const char *service_path);
+int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds);
+void pdr_handle_release(struct pdr_handle *pdr);
+
+#endif
diff --git a/include/linux/soc/qcom/qcom_aoss.h b/include/linux/soc/qcom/qcom_aoss.h
new file mode 100644
index 000000000000..3c2a82e606f8
--- /dev/null
+++ b/include/linux/soc/qcom/qcom_aoss.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCOM_AOSS_H__
+#define __QCOM_AOSS_H__
+
+#include <linux/err.h>
+#include <linux/device.h>
+
+struct qmp;
+
+#if IS_ENABLED(CONFIG_QCOM_AOSS_QMP)
+
+int qmp_send(struct qmp *qmp, const void *data, size_t len);
+struct qmp *qmp_get(struct device *dev);
+void qmp_put(struct qmp *qmp);
+
+#else
+
+static inline int qmp_send(struct qmp *qmp, const void *data, size_t len)
+{
+ return -ENODEV;
+}
+
+static inline struct qmp *qmp_get(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void qmp_put(struct qmp *qmp)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index 5efa2b67fa55..469e02d2aa0d 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -16,7 +16,7 @@
struct socket;
/**
- * qmi_header - wireformat header of QMI messages
+ * struct qmi_header - wireformat header of QMI messages
* @type: type of message
* @txn_id: transaction id
* @msg_id: message id
@@ -75,7 +75,7 @@ struct qmi_elem_info {
enum qmi_array_type array_type;
u8 tlv_type;
u32 offset;
- struct qmi_elem_info *ei_array;
+ const struct qmi_elem_info *ei_array;
};
#define QMI_RESULT_SUCCESS_V01 0
@@ -88,11 +88,12 @@ struct qmi_elem_info {
#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5
#define QMI_ERR_INVALID_ID_V01 41
#define QMI_ERR_ENCODING_V01 58
+#define QMI_ERR_DISABLED_V01 69
#define QMI_ERR_INCOMPATIBLE_STATE_V01 90
#define QMI_ERR_NOT_SUPPORTED_V01 94
/**
- * qmi_response_type_v01 - common response header (decoded)
+ * struct qmi_response_type_v01 - common response header (decoded)
* @result: result of the transaction
* @error: error value, when @result is QMI_RESULT_FAILURE_V01
*/
@@ -101,7 +102,7 @@ struct qmi_response_type_v01 {
u16 error;
};
-extern struct qmi_elem_info qmi_response_type_v01_ei[];
+extern const struct qmi_elem_info qmi_response_type_v01_ei[];
/**
* struct qmi_service - context to track lookup-results
@@ -172,7 +173,7 @@ struct qmi_txn {
struct completion completion;
int result;
- struct qmi_elem_info *ei;
+ const struct qmi_elem_info *ei;
void *dest;
};
@@ -188,7 +189,7 @@ struct qmi_msg_handler {
unsigned int type;
unsigned int msg_id;
- struct qmi_elem_info *ei;
+ const struct qmi_elem_info *ei;
size_t decoded_size;
void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
@@ -248,23 +249,23 @@ void qmi_handle_release(struct qmi_handle *qmi);
ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct);
+ const struct qmi_elem_info *ei, const void *c_struct);
ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct);
+ const struct qmi_elem_info *ei, const void *c_struct);
ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
- int msg_id, size_t len, struct qmi_elem_info *ei,
+ int msg_id, size_t len, const struct qmi_elem_info *ei,
const void *c_struct);
void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
- unsigned int txn_id, struct qmi_elem_info *ei,
+ unsigned int txn_id, const struct qmi_elem_info *ei,
const void *c_struct);
int qmi_decode_message(const void *buf, size_t len,
- struct qmi_elem_info *ei, void *c_struct);
+ const struct qmi_elem_info *ei, void *c_struct);
int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
- struct qmi_elem_info *ei, void *c_struct);
+ const struct qmi_elem_info *ei, void *c_struct);
int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout);
void qmi_txn_cancel(struct qmi_txn *txn);
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 9e4fdd861a51..3ab8c07f71c0 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -10,6 +10,7 @@ struct qcom_smd_rpm;
/*
* Constants used for addressing resources in the RPM.
*/
+#define QCOM_SMD_RPM_BBYB 0x62796262
#define QCOM_SMD_RPM_BOBB 0x62626f62
#define QCOM_SMD_RPM_BOOST 0x61747362
#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63
@@ -18,12 +19,17 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63
#define QCOM_SMD_RPM_LDOA 0x616f646c
#define QCOM_SMD_RPM_LDOB 0x626F646C
+#define QCOM_SMD_RPM_RWCX 0x78637772
+#define QCOM_SMD_RPM_RWMX 0x786d7772
+#define QCOM_SMD_RPM_RWLC 0x636c7772
+#define QCOM_SMD_RPM_RWLM 0x6d6c7772
#define QCOM_SMD_RPM_MEM_CLK 0x326b6c63
#define QCOM_SMD_RPM_MISC_CLK 0x306b6c63
#define QCOM_SMD_RPM_NCPA 0x6170636E
#define QCOM_SMD_RPM_NCPB 0x6270636E
#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f
#define QCOM_SMD_RPM_QPIC_CLK 0x63697071
+#define QCOM_SMD_RPM_QUP_CLK 0x707571
#define QCOM_SMD_RPM_SMPA 0x61706d73
#define QCOM_SMD_RPM_SMPB 0x62706d73
#define QCOM_SMD_RPM_SPDM 0x63707362
@@ -32,6 +38,10 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_IPA_CLK 0x617069
#define QCOM_SMD_RPM_CE_CLK 0x6563
#define QCOM_SMD_RPM_AGGR_CLK 0x72676761
+#define QCOM_SMD_RPM_HWKM_CLK 0x6d6b7768
+#define QCOM_SMD_RPM_PKA_CLK 0x616b70
+#define QCOM_SMD_RPM_MCFG_CLK 0x6766636d
+#define QCOM_SMD_RPM_MMXI_CLK 0x69786d6d
int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
int state,
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index 63ad8cddad14..652c0158baac 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -14,6 +14,7 @@ struct qcom_smem_state_ops {
#ifdef CONFIG_QCOM_SMEM_STATE
struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
+struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
void qcom_smem_state_put(struct qcom_smem_state *);
int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value);
@@ -29,6 +30,13 @@ static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
return ERR_PTR(-EINVAL);
}
+static inline struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline void qcom_smem_state_put(struct qcom_smem_state *state)
{
}
diff --git a/include/linux/soc/renesas/r9a06g032-sysctrl.h b/include/linux/soc/renesas/r9a06g032-sysctrl.h
new file mode 100644
index 000000000000..066dfb15cbdd
--- /dev/null
+++ b/include/linux/soc/renesas/r9a06g032-sysctrl.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
+#define __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
+
+#ifdef CONFIG_CLK_R9A06G032
+int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val);
+#else
+static inline int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) { return -ENODEV; }
+#endif
+
+#endif /* __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ */
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h
index 7899a5b8c247..1f1fe8bfaa76 100644
--- a/include/linux/soc/renesas/rcar-rst.h
+++ b/include/linux/soc/renesas/rcar-rst.h
@@ -4,8 +4,10 @@
#ifdef CONFIG_RST_RCAR
int rcar_rst_read_mode_pins(u32 *mode);
+int rcar_rst_set_rproc_boot_addr(u64 boot_addr);
#else
static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; }
+static inline int rcar_rst_set_rproc_boot_addr(u64 boot_addr) { return -ENODEV; }
#endif
#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */
diff --git a/include/linux/soc/samsung/exynos-chipid.h b/include/linux/soc/samsung/exynos-chipid.h
index 8bca6763f99c..62f0e2531068 100644
--- a/include/linux/soc/samsung/exynos-chipid.h
+++ b/include/linux/soc/samsung/exynos-chipid.h
@@ -9,10 +9,8 @@
#define __LINUX_SOC_EXYNOS_CHIPID_H
#define EXYNOS_CHIPID_REG_PRO_ID 0x00
-#define EXYNOS_SUBREV_MASK (0xf << 4)
-#define EXYNOS_MAINREV_MASK (0xf << 0)
-#define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \
- EXYNOS_MAINREV_MASK)
+#define EXYNOS_REV_PART_MASK 0xf
+#define EXYNOS_REV_PART_SHIFT 4
#define EXYNOS_MASK 0xfffff000
#define EXYNOS_CHIPID_REG_PKG_ID 0x04
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index fc9250fb3133..aa840ed043e1 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -611,12 +611,6 @@
#define EXYNOS5420_FSYS2_OPTION 0x4168
#define EXYNOS5420_PSGEN_OPTION 0x4188
-/* For EXYNOS_CENTRAL_SEQ_OPTION */
-#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16)
-#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17)
-#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24)
-#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25)
-
#define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4)
#define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5)
#define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6)
diff --git a/include/linux/soc/samsung/s3c-adc.h b/include/linux/soc/samsung/s3c-adc.h
new file mode 100644
index 000000000000..591c94ef957d
--- /dev/null
+++ b/include/linux/soc/samsung/s3c-adc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C ADC driver information
+ */
+
+#ifndef __LINUX_SOC_SAMSUNG_S3C_ADC_H
+#define __LINUX_SOC_SAMSUNG_S3C_ADC_H __FILE__
+
+struct s3c_adc_client;
+struct platform_device;
+
+extern int s3c_adc_start(struct s3c_adc_client *client,
+ unsigned int channel, unsigned int nr_samples);
+
+extern int s3c_adc_read(struct s3c_adc_client *client, unsigned int ch);
+
+extern struct s3c_adc_client *
+ s3c_adc_register(struct platform_device *pdev,
+ void (*select)(struct s3c_adc_client *client,
+ unsigned selected),
+ void (*conv)(struct s3c_adc_client *client,
+ unsigned d0, unsigned d1,
+ unsigned *samples_left),
+ unsigned int is_ts);
+
+extern void s3c_adc_release(struct s3c_adc_client *client);
+
+#endif /* __LINUX_SOC_SAMSUNG_S3C_ADC_H */
diff --git a/include/linux/soc/samsung/s3c-cpu-freq.h b/include/linux/soc/samsung/s3c-cpu-freq.h
new file mode 100644
index 000000000000..63e88fd5dea2
--- /dev/null
+++ b/include/linux/soc/samsung/s3c-cpu-freq.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2006-2007 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C CPU frequency scaling support - driver and board
+ */
+#ifndef __LINUX_SOC_SAMSUNG_S3C_CPU_FREQ_H
+#define __LINUX_SOC_SAMSUNG_S3C_CPU_FREQ_H
+
+#include <linux/cpufreq.h>
+
+struct s3c_cpufreq_info;
+struct s3c_cpufreq_board;
+struct s3c_iotimings;
+
+/**
+ * struct s3c_freq - frequency information (mainly for core drivers)
+ * @fclk: The FCLK frequency in Hz.
+ * @armclk: The ARMCLK frequency in Hz.
+ * @hclk_tns: HCLK cycle time in 10ths of nano-seconds.
+ * @hclk: The HCLK frequency in Hz.
+ * @pclk: The PCLK frequency in Hz.
+ *
+ * This contains the frequency information about the current configuration
+ * mainly for the core drivers to ensure we do not end up passing about
+ * a large number of parameters.
+ *
+ * The @hclk_tns field is a useful cache for the parts of the drivers that
+ * need to calculate IO timings and suchlike.
+ */
+struct s3c_freq {
+ unsigned long fclk;
+ unsigned long armclk;
+ unsigned long hclk_tns; /* in 10ths of ns */
+ unsigned long hclk;
+ unsigned long pclk;
+};
+
+/**
+ * struct s3c_cpufreq_freqs - s3c cpufreq notification information.
+ * @freqs: The cpufreq setting information.
+ * @old: The old clock settings.
+ * @new: The new clock settings.
+ * @pll_changing: Set if the PLL is changing.
+ *
+ * Wrapper 'struct cpufreq_freqs' so that any drivers receiving the
+ * notification can use this information that is not provided by just
+ * having the core frequency alone.
+ *
+ * The pll_changing flag is used to indicate if the PLL itself is
+ * being set during this change. This is important as the clocks
+ * will temporarily be set to the XTAL clock during this time, so
+ * drivers may want to close down their output during this time.
+ *
+ * Note, this is not being used by any current drivers and therefore
+ * may be removed in the future.
+ */
+struct s3c_cpufreq_freqs {
+ struct cpufreq_freqs freqs;
+ struct s3c_freq old;
+ struct s3c_freq new;
+
+ unsigned int pll_changing:1;
+};
+
+#define to_s3c_cpufreq(_cf) container_of(_cf, struct s3c_cpufreq_freqs, freqs)
+
+/**
+ * struct s3c_clkdivs - clock divisor information
+ * @p_divisor: Divisor from FCLK to PCLK.
+ * @h_divisor: Divisor from FCLK to HCLK.
+ * @arm_divisor: Divisor from FCLK to ARMCLK (not all CPUs).
+ * @dvs: Non-zero if using DVS mode for ARMCLK.
+ *
+ * Divisor settings for the core clocks.
+ */
+struct s3c_clkdivs {
+ int p_divisor;
+ int h_divisor;
+ int arm_divisor;
+ unsigned char dvs;
+};
+
+#define PLLVAL(_m, _p, _s) (((_m) << 12) | ((_p) << 4) | (_s))
+
+/**
+ * struct s3c_pllval - PLL value entry.
+ * @freq: The frequency for this entry in Hz.
+ * @pll_reg: The PLL register setting for this PLL value.
+ */
+struct s3c_pllval {
+ unsigned long freq;
+ unsigned long pll_reg;
+};
+
+/**
+ * struct s3c_cpufreq_board - per-board cpu frequency informatin
+ * @refresh: The SDRAM refresh period in nanoseconds.
+ * @auto_io: Set if the IO timing settings should be generated from the
+ * initialisation time hardware registers.
+ * @need_io: Set if the board has external IO on any of the chipselect
+ * lines that will require the hardware timing registers to be
+ * updated on a clock change.
+ * @max: The maxium frequency limits for the system. Any field that
+ * is left at zero will use the CPU's settings.
+ *
+ * This contains the board specific settings that affect how the CPU
+ * drivers chose settings. These include the memory refresh and IO
+ * timing information.
+ *
+ * Registration depends on the driver being used, the ARMCLK only
+ * implementation does not currently need this but the older style
+ * driver requires this to be available.
+ */
+struct s3c_cpufreq_board {
+ unsigned int refresh;
+ unsigned int auto_io:1; /* automatically init io timings. */
+ unsigned int need_io:1; /* set if needs io timing support. */
+
+ /* any non-zero field in here is taken as an upper limit. */
+ struct s3c_freq max; /* frequency limits */
+};
+
+/* Things depending on frequency scaling. */
+#ifdef CONFIG_ARM_S3C_CPUFREQ
+#define __init_or_cpufreq
+#else
+#define __init_or_cpufreq __init
+#endif
+
+/* Board functions */
+
+#ifdef CONFIG_ARM_S3C_CPUFREQ
+extern int s3c_cpufreq_setboard(struct s3c_cpufreq_board *board);
+#else
+
+static inline int s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
+{
+ return 0;
+}
+#endif /* CONFIG_ARM_S3C_CPUFREQ */
+
+#endif
diff --git a/include/linux/soc/samsung/s3c-cpufreq-core.h b/include/linux/soc/samsung/s3c-cpufreq-core.h
new file mode 100644
index 000000000000..3b278afb769b
--- /dev/null
+++ b/include/linux/soc/samsung/s3c-cpufreq-core.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2006-2009 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C CPU frequency scaling support - core support
+ */
+#ifndef __LINUX_SOC_SAMSUNG_S3C_CPUFREQ_CORE_H
+#define __LINUX_SOC_SAMSUNG_S3C_CPUFREQ_CORE_H
+
+#include <linux/soc/samsung/s3c-cpu-freq.h>
+
+struct seq_file;
+
+#define MAX_BANKS (8)
+#define S3C2412_MAX_IO (8)
+
+/**
+ * struct s3c2410_iobank_timing - IO bank timings for S3C2410 style timings
+ * @bankcon: The cached version of settings in this structure.
+ * @tacp:
+ * @tacs: Time from address valid to nCS asserted.
+ * @tcos: Time from nCS asserted to nOE or nWE asserted.
+ * @tacc: Time that nOE or nWE is asserted.
+ * @tcoh: Time nCS is held after nOE or nWE are released.
+ * @tcah: Time address is held for after
+ * @nwait_en: Whether nWAIT is enabled for this bank.
+ *
+ * This structure represents the IO timings for a S3C2410 style IO bank
+ * used by the CPU frequency support if it needs to change the settings
+ * of the IO.
+ */
+struct s3c2410_iobank_timing {
+ unsigned long bankcon;
+ unsigned int tacp;
+ unsigned int tacs;
+ unsigned int tcos;
+ unsigned int tacc;
+ unsigned int tcoh; /* nCS hold after nOE/nWE */
+ unsigned int tcah; /* Address hold after nCS */
+ unsigned char nwait_en; /* nWait enabled for bank. */
+};
+
+/**
+ * struct s3c2412_iobank_timing - io timings for PL092 (S3C2412) style IO
+ * @idcy: The idle cycle time between transactions.
+ * @wstrd: nCS release to end of read cycle.
+ * @wstwr: nCS release to end of write cycle.
+ * @wstoen: nCS assertion to nOE assertion time.
+ * @wstwen: nCS assertion to nWE assertion time.
+ * @wstbrd: Burst ready delay.
+ * @smbidcyr: Register cache for smbidcyr value.
+ * @smbwstrd: Register cache for smbwstrd value.
+ * @smbwstwr: Register cache for smbwstwr value.
+ * @smbwstoen: Register cache for smbwstoen value.
+ * @smbwstwen: Register cache for smbwstwen value.
+ * @smbwstbrd: Register cache for smbwstbrd value.
+ *
+ * Timing information for a IO bank on an S3C2412 or similar system which
+ * uses a PL093 block.
+ */
+struct s3c2412_iobank_timing {
+ unsigned int idcy;
+ unsigned int wstrd;
+ unsigned int wstwr;
+ unsigned int wstoen;
+ unsigned int wstwen;
+ unsigned int wstbrd;
+
+ /* register cache */
+ unsigned char smbidcyr;
+ unsigned char smbwstrd;
+ unsigned char smbwstwr;
+ unsigned char smbwstoen;
+ unsigned char smbwstwen;
+ unsigned char smbwstbrd;
+};
+
+union s3c_iobank {
+ struct s3c2410_iobank_timing *io_2410;
+ struct s3c2412_iobank_timing *io_2412;
+};
+
+/**
+ * struct s3c_iotimings - Chip IO timings holder
+ * @bank: The timings for each IO bank.
+ */
+struct s3c_iotimings {
+ union s3c_iobank bank[MAX_BANKS];
+};
+
+/**
+ * struct s3c_plltab - PLL table information.
+ * @vals: List of PLL values.
+ * @size: Size of the PLL table @vals.
+ */
+struct s3c_plltab {
+ struct s3c_pllval *vals;
+ int size;
+};
+
+/**
+ * struct s3c_cpufreq_config - current cpu frequency configuration
+ * @freq: The current settings for the core clocks.
+ * @max: Maxium settings, derived from core, board and user settings.
+ * @pll: The PLL table entry for the current PLL settings.
+ * @divs: The divisor settings for the core clocks.
+ * @info: The current core driver information.
+ * @board: The information for the board we are running on.
+ * @lock_pll: Set if the PLL settings cannot be changed.
+ *
+ * This is for the core drivers that need to know information about
+ * the current settings and values. It should not be needed by any
+ * device drivers.
+*/
+struct s3c_cpufreq_config {
+ struct s3c_freq freq;
+ struct s3c_freq max;
+ struct clk *mpll;
+ struct cpufreq_frequency_table pll;
+ struct s3c_clkdivs divs;
+ struct s3c_cpufreq_info *info; /* for core, not drivers */
+ struct s3c_cpufreq_board *board;
+
+ unsigned int lock_pll:1;
+};
+
+/**
+ * struct s3c_cpufreq_info - Information for the CPU frequency driver.
+ * @name: The name of this implementation.
+ * @max: The maximum frequencies for the system.
+ * @latency: Transition latency to give to cpufreq.
+ * @locktime_m: The lock-time in uS for the MPLL.
+ * @locktime_u: The lock-time in uS for the UPLL.
+ * @locttime_bits: The number of bits each LOCKTIME field.
+ * @need_pll: Set if this driver needs to change the PLL values to achieve
+ * any frequency changes. This is really only need by devices like the
+ * S3C2410 where there is no or limited divider between the PLL and the
+ * ARMCLK.
+ * @get_iotiming: Get the current IO timing data, mainly for use at start.
+ * @set_iotiming: Update the IO timings from the cached copies calculated
+ * from the @calc_iotiming entry when changing the frequency.
+ * @calc_iotiming: Calculate and update the cached copies of the IO timings
+ * from the newly calculated frequencies.
+ * @calc_freqtable: Calculate (fill in) the given frequency table from the
+ * current frequency configuration. If the table passed in is NULL,
+ * then the return is the number of elements to be filled for allocation
+ * of the table.
+ * @set_refresh: Set the memory refresh configuration.
+ * @set_fvco: Set the PLL frequencies.
+ * @set_divs: Update the clock divisors.
+ * @calc_divs: Calculate the clock divisors.
+ */
+struct s3c_cpufreq_info {
+ const char *name;
+ struct s3c_freq max;
+
+ unsigned int latency;
+
+ unsigned int locktime_m;
+ unsigned int locktime_u;
+ unsigned char locktime_bits;
+
+ unsigned int need_pll:1;
+
+ /* driver routines */
+
+ int (*get_iotiming)(struct s3c_cpufreq_config *cfg,
+ struct s3c_iotimings *timings);
+
+ void (*set_iotiming)(struct s3c_cpufreq_config *cfg,
+ struct s3c_iotimings *timings);
+
+ int (*calc_iotiming)(struct s3c_cpufreq_config *cfg,
+ struct s3c_iotimings *timings);
+
+ int (*calc_freqtable)(struct s3c_cpufreq_config *cfg,
+ struct cpufreq_frequency_table *t,
+ size_t table_size);
+
+ void (*debug_io_show)(struct seq_file *seq,
+ struct s3c_cpufreq_config *cfg,
+ union s3c_iobank *iob);
+
+ void (*set_refresh)(struct s3c_cpufreq_config *cfg);
+ void (*set_fvco)(struct s3c_cpufreq_config *cfg);
+ void (*set_divs)(struct s3c_cpufreq_config *cfg);
+ int (*calc_divs)(struct s3c_cpufreq_config *cfg);
+};
+
+extern int s3c_cpufreq_register(struct s3c_cpufreq_info *info);
+
+extern int s3c_plltab_register(struct cpufreq_frequency_table *plls,
+ unsigned int plls_no);
+
+/* exports and utilities for debugfs */
+extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void);
+extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void);
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS
+#define s3c_cpufreq_debugfs_call(x) x
+#else
+#define s3c_cpufreq_debugfs_call(x) NULL
+#endif
+
+/* Useful utility functions. */
+
+extern struct clk *s3c_cpufreq_clk_get(struct device *, const char *);
+
+/* S3C2410 and compatible exported functions */
+
+extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg);
+extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg);
+
+#ifdef CONFIG_S3C2410_IOTIMING
+extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
+ struct s3c_cpufreq_config *cfg,
+ union s3c_iobank *iob);
+
+extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg,
+ struct s3c_iotimings *iot);
+
+extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg,
+ struct s3c_iotimings *timings);
+
+extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
+ struct s3c_iotimings *iot);
+#else
+#define s3c2410_iotiming_debugfs NULL
+#define s3c2410_iotiming_calc NULL
+#define s3c2410_iotiming_get NULL
+#define s3c2410_iotiming_set NULL
+#endif /* CONFIG_S3C2410_IOTIMING */
+
+/* S3C2412 compatible routines */
+
+#ifdef CONFIG_S3C2412_IOTIMING
+extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
+ struct s3c_cpufreq_config *cfg,
+ union s3c_iobank *iob);
+
+extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
+ struct s3c_iotimings *timings);
+
+extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg,
+ struct s3c_iotimings *iot);
+
+extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg,
+ struct s3c_iotimings *iot);
+extern void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg);
+#else
+#define s3c2412_iotiming_debugfs NULL
+#define s3c2412_iotiming_calc NULL
+#define s3c2412_iotiming_get NULL
+#define s3c2412_iotiming_set NULL
+#endif /* CONFIG_S3C2412_IOTIMING */
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUG
+#define s3c_freq_dbg(x...) printk(KERN_INFO x)
+#else
+#define s3c_freq_dbg(x...) do { if (0) printk(x); } while (0)
+#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_DEBUG */
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_IODEBUG
+#define s3c_freq_iodbg(x...) printk(KERN_INFO x)
+#else
+#define s3c_freq_iodbg(x...) do { if (0) printk(x); } while (0)
+#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_IODEBUG */
+
+static inline int s3c_cpufreq_addfreq(struct cpufreq_frequency_table *table,
+ int index, size_t table_size,
+ unsigned int freq)
+{
+ if (index < 0)
+ return index;
+
+ if (table) {
+ if (index >= table_size)
+ return -ENOMEM;
+
+ s3c_freq_dbg("%s: { %d = %u kHz }\n",
+ __func__, index, freq);
+
+ table[index].driver_data = index;
+ table[index].frequency = freq;
+ }
+
+ return index + 1;
+}
+
+u32 s3c2440_read_camdivn(void);
+void s3c2440_write_camdivn(u32 camdiv);
+u32 s3c24xx_read_clkdivn(void);
+void s3c24xx_write_clkdivn(u32 clkdiv);
+u32 s3c24xx_read_mpllcon(void);
+void s3c24xx_write_locktime(u32 locktime);
+
+#endif
diff --git a/include/linux/soc/samsung/s3c-pm.h b/include/linux/soc/samsung/s3c-pm.h
new file mode 100644
index 000000000000..f9164559c99f
--- /dev/null
+++ b/include/linux/soc/samsung/s3c-pm.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Tomasz Figa <t.figa@samsung.com>
+ * Copyright (c) 2004 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Written by Ben Dooks, <ben@simtec.co.uk>
+ */
+
+#ifndef __LINUX_SOC_SAMSUNG_S3C_PM_H
+#define __LINUX_SOC_SAMSUNG_S3C_PM_H __FILE__
+
+#include <linux/types.h>
+
+/* PM debug functions */
+
+/**
+ * struct pm_uart_save - save block for core UART
+ * @ulcon: Save value for S3C2410_ULCON
+ * @ucon: Save value for S3C2410_UCON
+ * @ufcon: Save value for S3C2410_UFCON
+ * @umcon: Save value for S3C2410_UMCON
+ * @ubrdiv: Save value for S3C2410_UBRDIV
+ *
+ * Save block for UART registers to be held over sleep and restored if they
+ * are needed (say by debug).
+*/
+struct pm_uart_save {
+ u32 ulcon;
+ u32 ucon;
+ u32 ufcon;
+ u32 umcon;
+ u32 ubrdiv;
+ u32 udivslot;
+};
+
+#ifdef CONFIG_SAMSUNG_PM_DEBUG
+/**
+ * s3c_pm_dbg() - low level debug function for use in suspend/resume.
+ * @msg: The message to print.
+ *
+ * This function is used mainly to debug the resume process before the system
+ * can rely on printk/console output. It uses the low-level debugging output
+ * routine printascii() to do its work.
+ */
+extern void s3c_pm_dbg(const char *msg, ...);
+
+#define S3C_PMDBG(fmt...) s3c_pm_dbg(fmt)
+
+extern void s3c_pm_save_uarts(bool is_s3c24xx);
+extern void s3c_pm_restore_uarts(bool is_s3c24xx);
+
+#ifdef CONFIG_ARCH_S3C64XX
+extern void s3c_pm_arch_update_uart(void __iomem *regs,
+ struct pm_uart_save *save);
+#else
+static inline void
+s3c_pm_arch_update_uart(void __iomem *regs, struct pm_uart_save *save)
+{
+}
+#endif
+
+#else
+#define S3C_PMDBG(fmt...) pr_debug(fmt)
+
+static inline void s3c_pm_save_uarts(bool is_s3c24xx) { }
+static inline void s3c_pm_restore_uarts(bool is_s3c24xx) { }
+#endif
+
+/* suspend memory checking */
+
+#ifdef CONFIG_SAMSUNG_PM_CHECK
+extern void s3c_pm_check_prepare(void);
+extern void s3c_pm_check_restore(void);
+extern void s3c_pm_check_cleanup(void);
+extern void s3c_pm_check_store(void);
+#else
+#define s3c_pm_check_prepare() do { } while (0)
+#define s3c_pm_check_restore() do { } while (0)
+#define s3c_pm_check_cleanup() do { } while (0)
+#define s3c_pm_check_store() do { } while (0)
+#endif
+
+/* system device subsystems */
+
+extern struct bus_type s3c2410_subsys;
+extern struct bus_type s3c2410a_subsys;
+extern struct bus_type s3c2412_subsys;
+extern struct bus_type s3c2416_subsys;
+extern struct bus_type s3c2440_subsys;
+extern struct bus_type s3c2442_subsys;
+extern struct bus_type s3c2443_subsys;
+
+#endif
diff --git a/include/linux/soc/sunxi/sunxi_sram.h b/include/linux/soc/sunxi/sunxi_sram.h
index c5f663bba9c2..60e274d1b821 100644
--- a/include/linux/soc/sunxi/sunxi_sram.h
+++ b/include/linux/soc/sunxi/sunxi_sram.h
@@ -14,6 +14,6 @@
#define _SUNXI_SRAM_H_
int sunxi_sram_claim(struct device *dev);
-int sunxi_sram_release(struct device *dev);
+void sunxi_sram_release(struct device *dev);
#endif /* _SUNXI_SRAM_H_ */
diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h
index 26f73df0a524..39b022b92598 100644
--- a/include/linux/soc/ti/k3-ringacc.h
+++ b/include/linux/soc/ti/k3-ringacc.h
@@ -2,7 +2,7 @@
/*
* K3 Ring Accelerator (RA) subsystem interface
*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
*/
#ifndef __SOC_TI_K3_RINGACC_API_H_
@@ -67,6 +67,10 @@ struct k3_ring;
* few times. It's usable when the same ring is used as Free Host PD ring
* for different flows, for example.
* Note: Locking should be done by consumer if required
+ * @dma_dev: Master device which is using and accessing to the ring
+ * memory when the mode is K3_RINGACC_RING_MODE_RING. Memory allocations
+ * should be done using this device.
+ * @asel: Address Space Select value for physical addresses
*/
struct k3_ring_cfg {
u32 size;
@@ -74,6 +78,9 @@ struct k3_ring_cfg {
enum k3_ring_mode mode;
#define K3_RINGACC_RING_SHARED BIT(1)
u32 flags;
+
+ struct device *dma_dev;
+ u32 asel;
};
#define K3_RINGACC_RING_ID_ANY (-1)
@@ -107,6 +114,10 @@ struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
int id, u32 flags);
+int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
+ int fwd_id, int compl_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring);
/**
* k3_ringacc_ring_reset - ring reset
* @ring: pointer on Ring
@@ -241,4 +252,19 @@ int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem);
u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring);
+/* DMA ring support */
+struct ti_sci_handle;
+
+/**
+ * struct struct k3_ringacc_init_data - Initialization data for DMA rings
+ */
+struct k3_ringacc_init_data {
+ const struct ti_sci_handle *tisci;
+ u32 tisci_dev_id;
+ u32 num_rings;
+};
+
+struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
+ struct k3_ringacc_init_data *data);
+
#endif /* __SOC_TI_K3_RINGACC_API_H_ */
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 7127ec301537..18d806a8e52c 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Texas Instruments Incorporated
* Authors: Sandeep Nair <sandeep_n@ti.com
* Cyril Chemparathy <cyril@ti.com
Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
index 9745df6ed9d3..175f466ebcc3 100644
--- a/include/linux/soc/ti/knav_qmss.h
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -1,19 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Keystone Navigator Queue Management Sub-System header
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
* Author: Sandeep Nair <sandeep_n@ti.com>
* Cyril Chemparathy <cyril@ti.com>
* Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __SOC_TI_KNAV_QMSS_H__
diff --git a/include/linux/soc/ti/omap1-io.h b/include/linux/soc/ti/omap1-io.h
new file mode 100644
index 000000000000..f7f12728d4a6
--- /dev/null
+++ b/include/linux/soc/ti/omap1-io.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_ARCH_OMAP_IO_H
+#define __ASM_ARCH_OMAP_IO_H
+
+#ifndef __ASSEMBLER__
+#include <linux/types.h>
+
+#ifdef CONFIG_ARCH_OMAP1_ANY
+/*
+ * NOTE: Please use ioremap + __raw_read/write where possible instead of these
+ */
+extern u8 omap_readb(u32 pa);
+extern u16 omap_readw(u32 pa);
+extern u32 omap_readl(u32 pa);
+extern void omap_writeb(u8 v, u32 pa);
+extern void omap_writew(u16 v, u32 pa);
+extern void omap_writel(u32 v, u32 pa);
+#else
+static inline u8 omap_readb(u32 pa) { return 0; }
+static inline u16 omap_readw(u32 pa) { return 0; }
+static inline u32 omap_readl(u32 pa) { return 0; }
+static inline void omap_writeb(u8 v, u32 pa) { }
+static inline void omap_writew(u16 v, u32 pa) { }
+static inline void omap_writel(u32 v, u32 pa) { }
+#endif
+#endif
+
+/*
+ * ----------------------------------------------------------------------------
+ * System control registers
+ * ----------------------------------------------------------------------------
+ */
+#define MOD_CONF_CTRL_0 0xfffe1080
+#define MOD_CONF_CTRL_1 0xfffe1110
+
+/*
+ * ---------------------------------------------------------------------------
+ * UPLD
+ * ---------------------------------------------------------------------------
+ */
+#define ULPD_REG_BASE (0xfffe0800)
+#define ULPD_IT_STATUS (ULPD_REG_BASE + 0x14)
+#define ULPD_SETUP_ANALOG_CELL_3 (ULPD_REG_BASE + 0x24)
+#define ULPD_CLOCK_CTRL (ULPD_REG_BASE + 0x30)
+# define DIS_USB_PVCI_CLK (1 << 5) /* no USB/FAC synch */
+# define USB_MCLK_EN (1 << 4) /* enable W4_USB_CLKO */
+#define ULPD_SOFT_REQ (ULPD_REG_BASE + 0x34)
+# define SOFT_UDC_REQ (1 << 4)
+# define SOFT_USB_CLK_REQ (1 << 3)
+# define SOFT_DPLL_REQ (1 << 0)
+#define ULPD_DPLL_CTRL (ULPD_REG_BASE + 0x3c)
+#define ULPD_STATUS_REQ (ULPD_REG_BASE + 0x40)
+#define ULPD_APLL_CTRL (ULPD_REG_BASE + 0x4c)
+#define ULPD_POWER_CTRL (ULPD_REG_BASE + 0x50)
+#define ULPD_SOFT_DISABLE_REQ_REG (ULPD_REG_BASE + 0x68)
+# define DIS_MMC2_DPLL_REQ (1 << 11)
+# define DIS_MMC1_DPLL_REQ (1 << 10)
+# define DIS_UART3_DPLL_REQ (1 << 9)
+# define DIS_UART2_DPLL_REQ (1 << 8)
+# define DIS_UART1_DPLL_REQ (1 << 7)
+# define DIS_USB_HOST_DPLL_REQ (1 << 6)
+#define ULPD_SDW_CLK_DIV_CTRL_SEL (ULPD_REG_BASE + 0x74)
+#define ULPD_CAM_CLK_CTRL (ULPD_REG_BASE + 0x7c)
+
+/*
+ * ----------------------------------------------------------------------------
+ * Clocks
+ * ----------------------------------------------------------------------------
+ */
+#define CLKGEN_REG_BASE (0xfffece00)
+#define ARM_CKCTL (CLKGEN_REG_BASE + 0x0)
+#define ARM_IDLECT1 (CLKGEN_REG_BASE + 0x4)
+#define ARM_IDLECT2 (CLKGEN_REG_BASE + 0x8)
+#define ARM_EWUPCT (CLKGEN_REG_BASE + 0xC)
+#define ARM_RSTCT1 (CLKGEN_REG_BASE + 0x10)
+#define ARM_RSTCT2 (CLKGEN_REG_BASE + 0x14)
+#define ARM_SYSST (CLKGEN_REG_BASE + 0x18)
+#define ARM_IDLECT3 (CLKGEN_REG_BASE + 0x24)
+
+#define CK_RATEF 1
+#define CK_IDLEF 2
+#define CK_ENABLEF 4
+#define CK_SELECTF 8
+#define SETARM_IDLE_SHIFT
+
+/* DPLL control registers */
+#define DPLL_CTL (0xfffecf00)
+
+/* DSP clock control. Must use __raw_readw() and __raw_writew() with these */
+#define DSP_CONFIG_REG_BASE IOMEM(0xe1008000)
+#define DSP_CKCTL (DSP_CONFIG_REG_BASE + 0x0)
+#define DSP_IDLECT1 (DSP_CONFIG_REG_BASE + 0x4)
+#define DSP_IDLECT2 (DSP_CONFIG_REG_BASE + 0x8)
+#define DSP_RSTCT2 (DSP_CONFIG_REG_BASE + 0x14)
+
+/*
+ * ----------------------------------------------------------------------------
+ * Pulse-Width Light
+ * ----------------------------------------------------------------------------
+ */
+#define OMAP_PWL_BASE 0xfffb5800
+#define OMAP_PWL_ENABLE (OMAP_PWL_BASE + 0x00)
+#define OMAP_PWL_CLK_ENABLE (OMAP_PWL_BASE + 0x04)
+
+/*
+ * ----------------------------------------------------------------------------
+ * Pin multiplexing registers
+ * ----------------------------------------------------------------------------
+ */
+#define FUNC_MUX_CTRL_0 0xfffe1000
+#define FUNC_MUX_CTRL_1 0xfffe1004
+#define FUNC_MUX_CTRL_2 0xfffe1008
+#define COMP_MODE_CTRL_0 0xfffe100c
+#define FUNC_MUX_CTRL_3 0xfffe1010
+#define FUNC_MUX_CTRL_4 0xfffe1014
+#define FUNC_MUX_CTRL_5 0xfffe1018
+#define FUNC_MUX_CTRL_6 0xfffe101C
+#define FUNC_MUX_CTRL_7 0xfffe1020
+#define FUNC_MUX_CTRL_8 0xfffe1024
+#define FUNC_MUX_CTRL_9 0xfffe1028
+#define FUNC_MUX_CTRL_A 0xfffe102C
+#define FUNC_MUX_CTRL_B 0xfffe1030
+#define FUNC_MUX_CTRL_C 0xfffe1034
+#define FUNC_MUX_CTRL_D 0xfffe1038
+#define PULL_DWN_CTRL_0 0xfffe1040
+#define PULL_DWN_CTRL_1 0xfffe1044
+#define PULL_DWN_CTRL_2 0xfffe1048
+#define PULL_DWN_CTRL_3 0xfffe104c
+#define PULL_DWN_CTRL_4 0xfffe10ac
+
+/* OMAP-1610 specific multiplexing registers */
+#define FUNC_MUX_CTRL_E 0xfffe1090
+#define FUNC_MUX_CTRL_F 0xfffe1094
+#define FUNC_MUX_CTRL_10 0xfffe1098
+#define FUNC_MUX_CTRL_11 0xfffe109c
+#define FUNC_MUX_CTRL_12 0xfffe10a0
+#define PU_PD_SEL_0 0xfffe10b4
+#define PU_PD_SEL_1 0xfffe10b8
+#define PU_PD_SEL_2 0xfffe10bc
+#define PU_PD_SEL_3 0xfffe10c0
+#define PU_PD_SEL_4 0xfffe10c4
+
+#endif
diff --git a/include/linux/soc/ti/omap1-mux.h b/include/linux/soc/ti/omap1-mux.h
new file mode 100644
index 000000000000..59c239b5569c
--- /dev/null
+++ b/include/linux/soc/ti/omap1-mux.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __SOC_TI_OMAP1_MUX_H
+#define __SOC_TI_OMAP1_MUX_H
+/*
+ * This should not really be a global header, it reflects the
+ * traditional way that omap1 does pin muxing without the
+ * pinctrl subsystem.
+ */
+
+enum omap7xx_index {
+ /* OMAP 730 keyboard */
+ E2_7XX_KBR0,
+ J7_7XX_KBR1,
+ E1_7XX_KBR2,
+ F3_7XX_KBR3,
+ D2_7XX_KBR4,
+ C2_7XX_KBC0,
+ D3_7XX_KBC1,
+ E4_7XX_KBC2,
+ F4_7XX_KBC3,
+ E3_7XX_KBC4,
+
+ /* USB */
+ AA17_7XX_USB_DM,
+ W16_7XX_USB_PU_EN,
+ W17_7XX_USB_VBUSI,
+ W18_7XX_USB_DMCK_OUT,
+ W19_7XX_USB_DCRST,
+
+ /* MMC */
+ MMC_7XX_CMD,
+ MMC_7XX_CLK,
+ MMC_7XX_DAT0,
+
+ /* I2C */
+ I2C_7XX_SCL,
+ I2C_7XX_SDA,
+
+ /* SPI */
+ SPI_7XX_1,
+ SPI_7XX_2,
+ SPI_7XX_3,
+ SPI_7XX_4,
+ SPI_7XX_5,
+ SPI_7XX_6,
+
+ /* UART */
+ UART_7XX_1,
+ UART_7XX_2,
+};
+
+enum omap1xxx_index {
+ /* UART1 (BT_UART_GATING)*/
+ UART1_TX = 0,
+ UART1_RTS,
+
+ /* UART2 (COM_UART_GATING)*/
+ UART2_TX,
+ UART2_RX,
+ UART2_CTS,
+ UART2_RTS,
+
+ /* UART3 (GIGA_UART_GATING) */
+ UART3_TX,
+ UART3_RX,
+ UART3_CTS,
+ UART3_RTS,
+ UART3_CLKREQ,
+ UART3_BCLK, /* 12MHz clock out */
+ Y15_1610_UART3_RTS,
+
+ /* PWT & PWL */
+ PWT,
+ PWL,
+
+ /* USB master generic */
+ R18_USB_VBUS,
+ R18_1510_USB_GPIO0,
+ W4_USB_PUEN,
+ W4_USB_CLKO,
+ W4_USB_HIGHZ,
+ W4_GPIO58,
+
+ /* USB1 master */
+ USB1_SUSP,
+ USB1_SEO,
+ W13_1610_USB1_SE0,
+ USB1_TXEN,
+ USB1_TXD,
+ USB1_VP,
+ USB1_VM,
+ USB1_RCV,
+ USB1_SPEED,
+ R13_1610_USB1_SPEED,
+ R13_1710_USB1_SE0,
+
+ /* USB2 master */
+ USB2_SUSP,
+ USB2_VP,
+ USB2_TXEN,
+ USB2_VM,
+ USB2_RCV,
+ USB2_SEO,
+ USB2_TXD,
+
+ /* OMAP-1510 GPIO */
+ R18_1510_GPIO0,
+ R19_1510_GPIO1,
+ M14_1510_GPIO2,
+
+ /* OMAP1610 GPIO */
+ P18_1610_GPIO3,
+ Y15_1610_GPIO17,
+
+ /* OMAP-1710 GPIO */
+ R18_1710_GPIO0,
+ V2_1710_GPIO10,
+ N21_1710_GPIO14,
+ W15_1710_GPIO40,
+
+ /* MPUIO */
+ MPUIO2,
+ N15_1610_MPUIO2,
+ MPUIO4,
+ MPUIO5,
+ T20_1610_MPUIO5,
+ W11_1610_MPUIO6,
+ V10_1610_MPUIO7,
+ W11_1610_MPUIO9,
+ V10_1610_MPUIO10,
+ W10_1610_MPUIO11,
+ E20_1610_MPUIO13,
+ U20_1610_MPUIO14,
+ E19_1610_MPUIO15,
+
+ /* MCBSP2 */
+ MCBSP2_CLKR,
+ MCBSP2_CLKX,
+ MCBSP2_DR,
+ MCBSP2_DX,
+ MCBSP2_FSR,
+ MCBSP2_FSX,
+
+ /* MCBSP3 */
+ MCBSP3_CLKX,
+
+ /* Misc ballouts */
+ BALLOUT_V8_ARMIO3,
+ N20_HDQ,
+
+ /* OMAP-1610 MMC2 */
+ W8_1610_MMC2_DAT0,
+ V8_1610_MMC2_DAT1,
+ W15_1610_MMC2_DAT2,
+ R10_1610_MMC2_DAT3,
+ Y10_1610_MMC2_CLK,
+ Y8_1610_MMC2_CMD,
+ V9_1610_MMC2_CMDDIR,
+ V5_1610_MMC2_DATDIR0,
+ W19_1610_MMC2_DATDIR1,
+ R18_1610_MMC2_CLKIN,
+
+ /* OMAP-1610 External Trace Interface */
+ M19_1610_ETM_PSTAT0,
+ L15_1610_ETM_PSTAT1,
+ L18_1610_ETM_PSTAT2,
+ L19_1610_ETM_D0,
+ J19_1610_ETM_D6,
+ J18_1610_ETM_D7,
+
+ /* OMAP16XX GPIO */
+ P20_1610_GPIO4,
+ V9_1610_GPIO7,
+ W8_1610_GPIO9,
+ N20_1610_GPIO11,
+ N19_1610_GPIO13,
+ P10_1610_GPIO22,
+ V5_1610_GPIO24,
+ AA20_1610_GPIO_41,
+ W19_1610_GPIO48,
+ M7_1610_GPIO62,
+ V14_16XX_GPIO37,
+ R9_16XX_GPIO18,
+ L14_16XX_GPIO49,
+
+ /* OMAP-1610 uWire */
+ V19_1610_UWIRE_SCLK,
+ U18_1610_UWIRE_SDI,
+ W21_1610_UWIRE_SDO,
+ N14_1610_UWIRE_CS0,
+ P15_1610_UWIRE_CS3,
+ N15_1610_UWIRE_CS1,
+
+ /* OMAP-1610 SPI */
+ U19_1610_SPIF_SCK,
+ U18_1610_SPIF_DIN,
+ P20_1610_SPIF_DIN,
+ W21_1610_SPIF_DOUT,
+ R18_1610_SPIF_DOUT,
+ N14_1610_SPIF_CS0,
+ N15_1610_SPIF_CS1,
+ T19_1610_SPIF_CS2,
+ P15_1610_SPIF_CS3,
+
+ /* OMAP-1610 Flash */
+ L3_1610_FLASH_CS2B_OE,
+ M8_1610_FLASH_CS2B_WE,
+
+ /* First MMC */
+ MMC_CMD,
+ MMC_DAT1,
+ MMC_DAT2,
+ MMC_DAT0,
+ MMC_CLK,
+ MMC_DAT3,
+
+ /* OMAP-1710 MMC CMDDIR and DATDIR0 */
+ M15_1710_MMC_CLKI,
+ P19_1710_MMC_CMDDIR,
+ P20_1710_MMC_DATDIR0,
+
+ /* OMAP-1610 USB0 alternate pin configuration */
+ W9_USB0_TXEN,
+ AA9_USB0_VP,
+ Y5_USB0_RCV,
+ R9_USB0_VM,
+ V6_USB0_TXD,
+ W5_USB0_SE0,
+ V9_USB0_SPEED,
+ V9_USB0_SUSP,
+
+ /* USB2 */
+ W9_USB2_TXEN,
+ AA9_USB2_VP,
+ Y5_USB2_RCV,
+ R9_USB2_VM,
+ V6_USB2_TXD,
+ W5_USB2_SE0,
+
+ /* 16XX UART */
+ R13_1610_UART1_TX,
+ V14_16XX_UART1_RX,
+ R14_1610_UART1_CTS,
+ AA15_1610_UART1_RTS,
+ R9_16XX_UART2_RX,
+ L14_16XX_UART3_RX,
+
+ /* I2C OMAP-1610 */
+ I2C_SCL,
+ I2C_SDA,
+
+ /* Keypad */
+ F18_1610_KBC0,
+ D20_1610_KBC1,
+ D19_1610_KBC2,
+ E18_1610_KBC3,
+ C21_1610_KBC4,
+ G18_1610_KBR0,
+ F19_1610_KBR1,
+ H14_1610_KBR2,
+ E20_1610_KBR3,
+ E19_1610_KBR4,
+ N19_1610_KBR5,
+
+ /* Power management */
+ T20_1610_LOW_PWR,
+
+ /* MCLK Settings */
+ V5_1710_MCLK_ON,
+ V5_1710_MCLK_OFF,
+ R10_1610_MCLK_ON,
+ R10_1610_MCLK_OFF,
+
+ /* CompactFlash controller */
+ P11_1610_CF_CD2,
+ R11_1610_CF_IOIS16,
+ V10_1610_CF_IREQ,
+ W10_1610_CF_RESET,
+ W11_1610_CF_CD1,
+
+ /* parallel camera */
+ J15_1610_CAM_LCLK,
+ J18_1610_CAM_D7,
+ J19_1610_CAM_D6,
+ J14_1610_CAM_D5,
+ K18_1610_CAM_D4,
+ K19_1610_CAM_D3,
+ K15_1610_CAM_D2,
+ K14_1610_CAM_D1,
+ L19_1610_CAM_D0,
+ L18_1610_CAM_VS,
+ L15_1610_CAM_HS,
+ M19_1610_CAM_RSTZ,
+ Y15_1610_CAM_OUTCLK,
+
+ /* serial camera */
+ H19_1610_CAM_EXCLK,
+ Y12_1610_CCP_CLKP,
+ W13_1610_CCP_CLKM,
+ W14_1610_CCP_DATAP,
+ Y14_1610_CCP_DATAM,
+
+};
+
+#ifdef CONFIG_OMAP_MUX
+extern int omap_cfg_reg(unsigned long reg_cfg);
+#else
+static inline int omap_cfg_reg(unsigned long reg_cfg) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/soc/ti/omap1-soc.h b/include/linux/soc/ti/omap1-soc.h
new file mode 100644
index 000000000000..81008d400bb6
--- /dev/null
+++ b/include/linux/soc/ti/omap1-soc.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * OMAP cpu type detection
+ *
+ * Copyright (C) 2004, 2008 Nokia Corporation
+ *
+ * Copyright (C) 2009-11 Texas Instruments.
+ *
+ * Written by Tony Lindgren <tony.lindgren@nokia.com>
+ *
+ * Added OMAP4/5 specific defines - Santosh Shilimkar<santosh.shilimkar@ti.com>
+ */
+
+#ifndef __ASM_ARCH_OMAP_CPU_H
+#define __ASM_ARCH_OMAP_CPU_H
+
+/*
+ * Test if multicore OMAP support is needed
+ */
+#undef MULTI_OMAP1
+#undef OMAP_NAME
+
+#ifdef CONFIG_ARCH_OMAP730
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap730
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP850
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap850
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap1510
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap16xx
+# endif
+#endif
+
+/*
+ * omap_rev bits:
+ * CPU id bits (0730, 1510, 1710, 2422...) [31:16]
+ * CPU revision (See _REV_ defined in cpu.h) [15:08]
+ * CPU class bits (15xx, 16xx, 24xx, 34xx...) [07:00]
+ */
+unsigned int omap_rev(void);
+
+/*
+ * Get the CPU revision for OMAP devices
+ */
+#define GET_OMAP_REVISION() ((omap_rev() >> 8) & 0xff)
+
+/*
+ * Macros to group OMAP into cpu classes.
+ * These can be used in most places.
+ * cpu_is_omap7xx(): True for OMAP730, OMAP850
+ * cpu_is_omap15xx(): True for OMAP1510, OMAP5910 and OMAP310
+ * cpu_is_omap16xx(): True for OMAP1610, OMAP5912 and OMAP1710
+ */
+#define GET_OMAP_CLASS (omap_rev() & 0xff)
+
+#define IS_OMAP_CLASS(class, id) \
+static inline int is_omap ##class (void) \
+{ \
+ return (GET_OMAP_CLASS == (id)) ? 1 : 0; \
+}
+
+#define GET_OMAP_SUBCLASS ((omap_rev() >> 20) & 0x0fff)
+
+#define IS_OMAP_SUBCLASS(subclass, id) \
+static inline int is_omap ##subclass (void) \
+{ \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+}
+
+IS_OMAP_CLASS(7xx, 0x07)
+IS_OMAP_CLASS(15xx, 0x15)
+IS_OMAP_CLASS(16xx, 0x16)
+
+#define cpu_is_omap7xx() 0
+#define cpu_is_omap15xx() 0
+#define cpu_is_omap16xx() 0
+
+#if defined(MULTI_OMAP1)
+# if defined(CONFIG_ARCH_OMAP730)
+# undef cpu_is_omap7xx
+# define cpu_is_omap7xx() is_omap7xx()
+# endif
+# if defined(CONFIG_ARCH_OMAP850)
+# undef cpu_is_omap7xx
+# define cpu_is_omap7xx() is_omap7xx()
+# endif
+# if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap15xx
+# define cpu_is_omap15xx() is_omap15xx()
+# endif
+# if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap16xx
+# define cpu_is_omap16xx() is_omap16xx()
+# endif
+#else
+# if defined(CONFIG_ARCH_OMAP730)
+# undef cpu_is_omap7xx
+# define cpu_is_omap7xx() 1
+# endif
+# if defined(CONFIG_ARCH_OMAP850)
+# undef cpu_is_omap7xx
+# define cpu_is_omap7xx() 1
+# endif
+# if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap15xx
+# define cpu_is_omap15xx() 1
+# endif
+# if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap16xx
+# define cpu_is_omap16xx() 1
+# endif
+#endif
+
+/*
+ * Macros to detect individual cpu types.
+ * These are only rarely needed.
+ * cpu_is_omap310(): True for OMAP310
+ * cpu_is_omap1510(): True for OMAP1510
+ * cpu_is_omap1610(): True for OMAP1610
+ * cpu_is_omap1611(): True for OMAP1611
+ * cpu_is_omap5912(): True for OMAP5912
+ * cpu_is_omap1621(): True for OMAP1621
+ * cpu_is_omap1710(): True for OMAP1710
+ */
+#define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff)
+
+#define IS_OMAP_TYPE(type, id) \
+static inline int is_omap ##type (void) \
+{ \
+ return (GET_OMAP_TYPE == (id)) ? 1 : 0; \
+}
+
+IS_OMAP_TYPE(310, 0x0310)
+IS_OMAP_TYPE(1510, 0x1510)
+IS_OMAP_TYPE(1610, 0x1610)
+IS_OMAP_TYPE(1611, 0x1611)
+IS_OMAP_TYPE(5912, 0x1611)
+IS_OMAP_TYPE(1621, 0x1621)
+IS_OMAP_TYPE(1710, 0x1710)
+
+#define cpu_is_omap310() 0
+#define cpu_is_omap1510() 0
+#define cpu_is_omap1610() 0
+#define cpu_is_omap5912() 0
+#define cpu_is_omap1611() 0
+#define cpu_is_omap1621() 0
+#define cpu_is_omap1710() 0
+
+#define cpu_class_is_omap1() 1
+
+/*
+ * Whether we have MULTI_OMAP1 or not, we still need to distinguish
+ * between 310 vs. 1510 and 1611B/5912 vs. 1710.
+ */
+
+#if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap310
+# undef cpu_is_omap1510
+# define cpu_is_omap310() is_omap310()
+# define cpu_is_omap1510() is_omap1510()
+#endif
+
+#if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap1610
+# undef cpu_is_omap1611
+# undef cpu_is_omap5912
+# undef cpu_is_omap1621
+# undef cpu_is_omap1710
+# define cpu_is_omap1610() is_omap1610()
+# define cpu_is_omap1611() is_omap1611()
+# define cpu_is_omap5912() is_omap5912()
+# define cpu_is_omap1621() is_omap1621()
+# define cpu_is_omap1710() is_omap1710()
+#endif
+
+#endif
diff --git a/include/linux/soc/ti/omap1-usb.h b/include/linux/soc/ti/omap1-usb.h
new file mode 100644
index 000000000000..67488698601a
--- /dev/null
+++ b/include/linux/soc/ti/omap1-usb.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_TI_OMAP1_USB
+#define __SOC_TI_OMAP1_USB
+/*
+ * Constants in this file are used all over the place, in platform
+ * code, as well as the udc, phy and ohci drivers.
+ * This is not a great design, but unlikely to get fixed after
+ * such a long time. Don't do this elsewhere.
+ */
+
+#define OMAP1_OTG_BASE 0xfffb0400
+#define OMAP1_UDC_BASE 0xfffb4000
+
+#define OMAP2_UDC_BASE 0x4805e200
+#define OMAP2_OTG_BASE 0x4805e300
+#define OTG_BASE OMAP1_OTG_BASE
+#define UDC_BASE OMAP1_UDC_BASE
+
+/*
+ * OTG and transceiver registers, for OMAPs starting with ARM926
+ */
+#define OTG_REV (OTG_BASE + 0x00)
+#define OTG_SYSCON_1 (OTG_BASE + 0x04)
+# define USB2_TRX_MODE(w) (((w)>>24)&0x07)
+# define USB1_TRX_MODE(w) (((w)>>20)&0x07)
+# define USB0_TRX_MODE(w) (((w)>>16)&0x07)
+# define OTG_IDLE_EN (1 << 15)
+# define HST_IDLE_EN (1 << 14)
+# define DEV_IDLE_EN (1 << 13)
+# define OTG_RESET_DONE (1 << 2)
+# define OTG_SOFT_RESET (1 << 1)
+#define OTG_SYSCON_2 (OTG_BASE + 0x08)
+# define OTG_EN (1 << 31)
+# define USBX_SYNCHRO (1 << 30)
+# define OTG_MST16 (1 << 29)
+# define SRP_GPDATA (1 << 28)
+# define SRP_GPDVBUS (1 << 27)
+# define SRP_GPUVBUS(w) (((w)>>24)&0x07)
+# define A_WAIT_VRISE(w) (((w)>>20)&0x07)
+# define B_ASE_BRST(w) (((w)>>16)&0x07)
+# define SRP_DPW (1 << 14)
+# define SRP_DATA (1 << 13)
+# define SRP_VBUS (1 << 12)
+# define OTG_PADEN (1 << 10)
+# define HMC_PADEN (1 << 9)
+# define UHOST_EN (1 << 8)
+# define HMC_TLLSPEED (1 << 7)
+# define HMC_TLLATTACH (1 << 6)
+# define OTG_HMC(w) (((w)>>0)&0x3f)
+#define OTG_CTRL (OTG_BASE + 0x0c)
+# define OTG_USB2_EN (1 << 29)
+# define OTG_USB2_DP (1 << 28)
+# define OTG_USB2_DM (1 << 27)
+# define OTG_USB1_EN (1 << 26)
+# define OTG_USB1_DP (1 << 25)
+# define OTG_USB1_DM (1 << 24)
+# define OTG_USB0_EN (1 << 23)
+# define OTG_USB0_DP (1 << 22)
+# define OTG_USB0_DM (1 << 21)
+# define OTG_ASESSVLD (1 << 20)
+# define OTG_BSESSEND (1 << 19)
+# define OTG_BSESSVLD (1 << 18)
+# define OTG_VBUSVLD (1 << 17)
+# define OTG_ID (1 << 16)
+# define OTG_DRIVER_SEL (1 << 15)
+# define OTG_A_SETB_HNPEN (1 << 12)
+# define OTG_A_BUSREQ (1 << 11)
+# define OTG_B_HNPEN (1 << 9)
+# define OTG_B_BUSREQ (1 << 8)
+# define OTG_BUSDROP (1 << 7)
+# define OTG_PULLDOWN (1 << 5)
+# define OTG_PULLUP (1 << 4)
+# define OTG_DRV_VBUS (1 << 3)
+# define OTG_PD_VBUS (1 << 2)
+# define OTG_PU_VBUS (1 << 1)
+# define OTG_PU_ID (1 << 0)
+#define OTG_IRQ_EN (OTG_BASE + 0x10) /* 16-bit */
+# define DRIVER_SWITCH (1 << 15)
+# define A_VBUS_ERR (1 << 13)
+# define A_REQ_TMROUT (1 << 12)
+# define A_SRP_DETECT (1 << 11)
+# define B_HNP_FAIL (1 << 10)
+# define B_SRP_TMROUT (1 << 9)
+# define B_SRP_DONE (1 << 8)
+# define B_SRP_STARTED (1 << 7)
+# define OPRT_CHG (1 << 0)
+#define OTG_IRQ_SRC (OTG_BASE + 0x14) /* 16-bit */
+ // same bits as in IRQ_EN
+#define OTG_OUTCTRL (OTG_BASE + 0x18) /* 16-bit */
+# define OTGVPD (1 << 14)
+# define OTGVPU (1 << 13)
+# define OTGPUID (1 << 12)
+# define USB2VDR (1 << 10)
+# define USB2PDEN (1 << 9)
+# define USB2PUEN (1 << 8)
+# define USB1VDR (1 << 6)
+# define USB1PDEN (1 << 5)
+# define USB1PUEN (1 << 4)
+# define USB0VDR (1 << 2)
+# define USB0PDEN (1 << 1)
+# define USB0PUEN (1 << 0)
+#define OTG_TEST (OTG_BASE + 0x20) /* 16-bit */
+#define OTG_VENDOR_CODE (OTG_BASE + 0xfc) /* 16-bit */
+
+/*-------------------------------------------------------------------------*/
+
+/* OMAP1 */
+#define USB_TRANSCEIVER_CTRL (0xfffe1000 + 0x0064)
+# define CONF_USB2_UNI_R (1 << 8)
+# define CONF_USB1_UNI_R (1 << 7)
+# define CONF_USB_PORT0_R(x) (((x)>>4)&0x7)
+# define CONF_USB0_ISOLATE_R (1 << 3)
+# define CONF_USB_PWRDN_DM_R (1 << 2)
+# define CONF_USB_PWRDN_DP_R (1 << 1)
+
+#endif
diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h
index eac8e0c6fe11..543da257a5f2 100644
--- a/include/linux/soc/ti/ti-msgmgr.h
+++ b/include/linux/soc/ti/ti-msgmgr.h
@@ -1,26 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Texas Instruments' Message Manager
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef TI_MSGMGR_H
#define TI_MSGMGR_H
+struct mbox_chan;
+
/**
* struct ti_msgmgr_message - Message Manager structure
* @len: Length of data in the Buffer
* @buf: Buffer pointer
+ * @chan_rx: Expected channel for response, must be provided to use polled rx
+ * @timeout_rx_ms: Timeout value to use if polling for response
*
* This is the structure for data used in mbox_send_message
* the length of data buffer used depends on the SoC integration
@@ -30,6 +26,8 @@
struct ti_msgmgr_message {
size_t len;
u8 *buf;
+ struct mbox_chan *chan_rx;
+ int timeout_rx_ms;
};
#endif /* TI_MSGMGR_H */
diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h
index 11fb5048f5f6..4dba2f2aff6f 100644
--- a/include/linux/soc/ti/ti_sci_inta_msi.h
+++ b/include/linux/soc/ti/ti_sci_inta_msi.h
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 TI SCI INTA MSI helper
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
@@ -18,6 +18,4 @@ struct irq_domain
struct irq_domain *parent);
int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
struct ti_sci_resource *res);
-unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 index);
-void ti_sci_inta_msi_domain_free_irqs(struct device *dev);
#endif /* __INCLUDE_LINUX_IRQCHIP_TI_SCI_INTA_H */
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index 9531ec823298..bd0d11af76c5 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -196,6 +196,22 @@ struct ti_sci_clk_ops {
};
/**
+ * struct ti_sci_resource_desc - Description of TI SCI resource instance range.
+ * @start: Start index of the first resource range.
+ * @num: Number of resources in the first range.
+ * @start_sec: Start index of the second resource range.
+ * @num_sec: Number of resources in the second range.
+ * @res_map: Bitmap to manage the allocation of these resources.
+ */
+struct ti_sci_resource_desc {
+ u16 start;
+ u16 num;
+ u16 start_sec;
+ u16 num_sec;
+ unsigned long *res_map;
+};
+
+/**
* struct ti_sci_rm_core_ops - Resource management core operations
* @get_range: Get a range of resources belonging to ti sci host.
* @get_rage_from_shost: Get a range of resources belonging to
@@ -209,25 +225,28 @@ struct ti_sci_clk_ops {
* - dev_id: TISCI device ID.
* - subtype: Resource assignment subtype that is being requested
* from the given device.
- * - range_start: Start index of the resource range
- * - range_end: Number of resources in the range
+ * - desc: Pointer to ti_sci_resource_desc to be updated with the resource
+ * range start index and number of resources
*/
struct ti_sci_rm_core_ops {
int (*get_range)(const struct ti_sci_handle *handle, u32 dev_id,
- u8 subtype, u16 *range_start, u16 *range_num);
+ u8 subtype, struct ti_sci_resource_desc *desc);
int (*get_range_from_shost)(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num);
+ struct ti_sci_resource_desc *desc);
};
+#define TI_SCI_RESASG_SUBTYPE_IR_OUTPUT 0
+#define TI_SCI_RESASG_SUBTYPE_IA_VINT 0xa
+#define TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT 0xd
/**
* struct ti_sci_rm_irq_ops: IRQ management operations
* @set_irq: Set an IRQ route between the requested source
* and destination
* @set_event_map: Set an Event based peripheral irq to Interrupt
* Aggregator.
- * @free_irq: Free an an IRQ route between the requested source
- * destination.
+ * @free_irq: Free an IRQ route between the requested source
+ * and destination.
* @free_event_map: Free an event based peripheral irq to Interrupt
* Aggregator.
*/
@@ -256,30 +275,46 @@ struct ti_sci_rm_irq_ops {
#define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4)
/* RA config.order_id parameter is valid for RM ring configure TISCI message */
#define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5)
+/* RA config.virtid parameter is valid for RM ring configure TISCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_VIRTID_VALID BIT(6)
+/* RA config.asel parameter is valid for RM ring configure TISCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_ASEL_VALID BIT(7)
#define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \
(TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \
TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \
TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \
TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \
- TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID)
+ TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_ASEL_VALID)
+
+/**
+ * struct ti_sci_msg_rm_ring_cfg - Ring configuration
+ *
+ * Parameters for Navigator Subsystem ring configuration
+ * See @ti_sci_msg_rm_ring_cfg_req
+ */
+struct ti_sci_msg_rm_ring_cfg {
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 count;
+ u8 mode;
+ u8 size;
+ u8 order_id;
+ u16 virtid;
+ u8 asel;
+};
/**
* struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations
- * @config: configure the SoC Navigator Subsystem Ring Accelerator ring
- * @get_config: get the SoC Navigator Subsystem Ring Accelerator ring
- * configuration
+ * @set_cfg: configure the SoC Navigator Subsystem Ring Accelerator ring
*/
struct ti_sci_rm_ringacc_ops {
- int (*config)(const struct ti_sci_handle *handle,
- u32 valid_params, u16 nav_id, u16 index,
- u32 addr_lo, u32 addr_hi, u32 count, u8 mode,
- u8 size, u8 order_id
- );
- int (*get_config)(const struct ti_sci_handle *handle,
- u32 nav_id, u32 index, u8 *mode,
- u32 *addr_lo, u32 *addr_hi, u32 *count,
- u8 *size, u8 *order_id);
+ int (*set_cfg)(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_ring_cfg *params);
};
/**
@@ -317,6 +352,9 @@ struct ti_sci_rm_psil_ops {
#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES 2
#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES 3
+#define TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_TCHAN 0
+#define TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN 1
+
/* UDMAP TX/RX channel valid_params common declarations */
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0)
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1)
@@ -342,6 +380,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg {
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11)
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12)
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID BIT(15)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID BIT(16)
u16 nav_id;
u16 index;
u8 tx_pause_on_err;
@@ -359,6 +399,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg {
u16 fdepth;
u8 tx_sched_priority;
u8 tx_burst_size;
+ u8 tx_tdtype;
+ u8 extended_ch_type;
};
/**
@@ -518,18 +560,6 @@ struct ti_sci_handle {
#define TI_SCI_RESOURCE_NULL 0xffff
/**
- * struct ti_sci_resource_desc - Description of TI SCI resource instance range.
- * @start: Start index of the resource.
- * @num: Number of resources.
- * @res_map: Bitmap to manage the allocation of these resources.
- */
-struct ti_sci_resource_desc {
- u16 start;
- u16 num;
- unsigned long *res_map;
-};
-
-/**
* struct ti_sci_resource - Structure representing a resource assigned
* to a device.
* @sets: Number of sets available from this resource type
@@ -556,6 +586,9 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res);
struct ti_sci_resource *
devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
struct device *dev, u32 dev_id, char *of_prop);
+struct ti_sci_resource *
+devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
+ u32 dev_id, u32 sub_type);
#else /* CONFIG_TI_SCI_PROTOCOL */
@@ -609,6 +642,13 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
{
return ERR_PTR(-EINVAL);
}
+
+static inline struct ti_sci_resource *
+devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
+ u32 dev_id, u32 sub_type)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_TI_SCI_PROTOCOL */
#endif /* __TISCI_PROTOCOL_H */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 15fe980a27ea..0b9ecd8cf979 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -25,7 +25,19 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
-u64 sock_gen_cookie(struct sock *sk);
+u64 __sock_gen_cookie(struct sock *sk);
+
+static inline u64 sock_gen_cookie(struct sock *sk)
+{
+ u64 cookie;
+
+ preempt_disable();
+ cookie = __sock_gen_cookie(sk);
+ preempt_enable();
+
+ return cookie;
+}
+
int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 15f3412d481e..de3701a2a212 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -10,9 +10,12 @@
#include <linux/compiler.h> /* __user */
#include <uapi/linux/socket.h>
+struct file;
struct pid;
struct cred;
struct socket;
+struct sock;
+struct sk_buff;
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -49,11 +52,28 @@ struct linger {
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
+
+ int msg_inq; /* output, data left in socket */
+
struct iov_iter msg_iter; /* data */
- void *msg_control; /* ancillary data */
- __kernel_size_t msg_controllen; /* ancillary data buffer length */
+
+ /*
+ * Ancillary data. msg_control_user is the user buffer used for the
+ * recv* side when msg_control_is_user is set, msg_control is the kernel
+ * buffer used for all other cases.
+ */
+ union {
+ void *msg_control;
+ void __user *msg_control_user;
+ };
+ bool msg_control_is_user : 1;
+ bool msg_get_inq : 1;/* return INQ after receive */
unsigned int msg_flags; /* flags on received message */
+ __kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
+ struct ubuf_info *msg_ubuf;
+ int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
};
struct user_msghdr {
@@ -74,7 +94,7 @@ struct mmsghdr {
/*
* POSIX 1003.1g - ancillary data object information
- * Ancillary data consits of a sequence of pairs of
+ * Ancillary data consists of a sequence of pairs of
* (cmsghdr, cmsg_data[])
*/
@@ -94,7 +114,10 @@ struct cmsghdr {
#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) )
-#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr)))
+#define CMSG_DATA(cmsg) \
+ ((void *)(cmsg) + sizeof(struct cmsghdr))
+#define CMSG_USER_DATA(cmsg) \
+ ((void __user *)(cmsg) + sizeof(struct cmsghdr))
#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len))
#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len))
@@ -209,8 +232,11 @@ struct ucred {
* reuses AF_INET address family
*/
#define AF_XDP 44 /* XDP sockets */
+#define AF_MCTP 45 /* Management component
+ * transport protocol
+ */
-#define AF_MAX 45 /* For now.. */
+#define AF_MAX 46 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -260,6 +286,7 @@ struct ucred {
#define PF_QIPCRTR AF_QIPCRTR
#define PF_SMC AF_SMC
#define PF_XDP AF_XDP
+#define PF_MCTP AF_MCTP
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -346,6 +373,9 @@ struct ucred {
#define SOL_KCM 281
#define SOL_TLS 282
#define SOL_XDP 283
+#define SOL_MPTCP 284
+#define SOL_MCTP 285
+#define SOL_SMC 286
/* IPX options */
#define IPX_TYPE 1
@@ -391,6 +421,9 @@ extern int recvmsg_copy_msghdr(struct msghdr *msg,
struct user_msghdr __user *umsg, unsigned flags,
struct sockaddr __user **uaddr,
struct iovec **iov);
+extern int __copy_msghdr(struct msghdr *kmsg,
+ struct user_msghdr *umsg,
+ struct sockaddr __user **save_addr);
/* helpers which do the actual work for syscalls */
extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
@@ -399,13 +432,13 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
-extern int __sys_accept4_file(struct file *file, unsigned file_flags,
- struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags,
- unsigned long nofile);
+extern struct file *do_accept(struct file *file, unsigned file_flags,
+ struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
+extern struct file *__sys_socket_file(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
int addrlen, int file_flags);
@@ -418,7 +451,6 @@ extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
int __user *usockaddr_len);
extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
+extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
new file mode 100644
index 000000000000..bae5e2369b4f
--- /dev/null
+++ b/include/linux/sockptr.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Christoph Hellwig.
+ *
+ * Support for "universal" pointers that can point to either kernel or userspace
+ * memory.
+ */
+#ifndef _LINUX_SOCKPTR_H
+#define _LINUX_SOCKPTR_H
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+typedef struct {
+ union {
+ void *kernel;
+ void __user *user;
+ };
+ bool is_kernel : 1;
+} sockptr_t;
+
+static inline bool sockptr_is_kernel(sockptr_t sockptr)
+{
+ return sockptr.is_kernel;
+}
+
+static inline sockptr_t KERNEL_SOCKPTR(void *p)
+{
+ return (sockptr_t) { .kernel = p, .is_kernel = true };
+}
+
+static inline sockptr_t USER_SOCKPTR(void __user *p)
+{
+ return (sockptr_t) { .user = p };
+}
+
+static inline bool sockptr_is_null(sockptr_t sockptr)
+{
+ if (sockptr_is_kernel(sockptr))
+ return !sockptr.kernel;
+ return !sockptr.user;
+}
+
+static inline int copy_from_sockptr_offset(void *dst, sockptr_t src,
+ size_t offset, size_t size)
+{
+ if (!sockptr_is_kernel(src))
+ return copy_from_user(dst, src.user + offset, size);
+ memcpy(dst, src.kernel + offset, size);
+ return 0;
+}
+
+static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
+{
+ return copy_from_sockptr_offset(dst, src, 0, size);
+}
+
+static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
+ const void *src, size_t size)
+{
+ if (!sockptr_is_kernel(dst))
+ return copy_to_user(dst.user + offset, src, size);
+ memcpy(dst.kernel + offset, src, size);
+ return 0;
+}
+
+static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size)
+{
+ return copy_to_sockptr_offset(dst, 0, src, size);
+}
+
+static inline void *memdup_sockptr(sockptr_t src, size_t len)
+{
+ void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_sockptr(p, src, len)) {
+ kfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ return p;
+}
+
+static inline void *memdup_sockptr_nul(sockptr_t src, size_t len)
+{
+ char *p = kmalloc_track_caller(len + 1, GFP_KERNEL);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_sockptr(p, src, len)) {
+ kfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ p[len] = '\0';
+ return p;
+}
+
+static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count)
+{
+ if (sockptr_is_kernel(src)) {
+ size_t len = min(strnlen(src.kernel, count - 1) + 1, count);
+
+ memcpy(dst, src.kernel, len);
+ return len;
+ }
+ return strncpy_from_user(dst, src.user, count);
+}
+
+static inline int check_zeroed_sockptr(sockptr_t src, size_t offset,
+ size_t size)
+{
+ if (!sockptr_is_kernel(src))
+ return check_zeroed_user(src.user + offset, size);
+ return memchr_inv(src.kernel + offset, 0, size) == NULL;
+}
+
+#endif /* _LINUX_SOCKPTR_H */
diff --git a/include/linux/softirq.h b/include/linux/softirq.h
new file mode 100644
index 000000000000..c73d7dcb4cb5
--- /dev/null
+++ b/include/linux/softirq.h
@@ -0,0 +1 @@
+#include <linux/interrupt.h>
diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h
index 374d0fdb0743..1e3c92feea6e 100644
--- a/include/linux/sony-laptop.h
+++ b/include/linux/sony-laptop.h
@@ -31,7 +31,7 @@
#if IS_ENABLED(CONFIG_SONY_LAPTOP)
int sony_pic_camera_command(int command, u8 value);
#else
-static inline int sony_pic_camera_command(int command, u8 value) { return 0; };
+static inline int sony_pic_camera_command(int command, u8 value) { return 0; }
#endif
#endif /* __KERNEL__ */
diff --git a/include/linux/sort.h b/include/linux/sort.h
index b5898725fe9d..e163287ac6c1 100644
--- a/include/linux/sort.h
+++ b/include/linux/sort.h
@@ -6,7 +6,7 @@
void sort_r(void *base, size_t num, size_t size,
cmp_r_func_t cmp_func,
- swap_func_t swap_func,
+ swap_r_func_t swap_func,
const void *priv);
void sort(void *base, size_t num, size_t size,
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index b451bb622335..9e4537f409c2 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -5,6 +5,7 @@
#define __SOUNDWIRE_H
#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
struct sdw_bus;
struct sdw_slave;
@@ -38,7 +39,8 @@ struct sdw_slave;
#define SDW_FRAME_CTRL_BITS 48
#define SDW_MAX_DEVICES 11
-#define SDW_VALID_PORT_RANGE(n) ((n) <= 14 && (n) >= 1)
+#define SDW_MAX_PORTS 15
+#define SDW_VALID_PORT_RANGE(n) ((n) < SDW_MAX_PORTS && (n) >= 1)
enum {
SDW_PORT_DIRN_SINK = 0,
@@ -80,6 +82,21 @@ enum sdw_slave_status {
};
/**
+ * enum sdw_clk_stop_type: clock stop operations
+ *
+ * @SDW_CLK_PRE_PREPARE: pre clock stop prepare
+ * @SDW_CLK_POST_PREPARE: post clock stop prepare
+ * @SDW_CLK_PRE_DEPREPARE: pre clock stop de-prepare
+ * @SDW_CLK_POST_DEPREPARE: post clock stop de-prepare
+ */
+enum sdw_clk_stop_type {
+ SDW_CLK_PRE_PREPARE = 0,
+ SDW_CLK_POST_PREPARE,
+ SDW_CLK_PRE_DEPREPARE,
+ SDW_CLK_POST_DEPREPARE,
+};
+
+/**
* enum sdw_command_response - Command response as defined by SDW spec
* @SDW_CMD_OK: cmd was successful
* @SDW_CMD_IGNORED: cmd was ignored
@@ -108,6 +125,12 @@ enum sdw_dpn_grouping {
SDW_BLK_GRP_CNT_4 = 3,
};
+/* block packing mode enum */
+enum sdw_dpn_pkg_mode {
+ SDW_BLK_PKG_PER_PORT = 0,
+ SDW_BLK_PKG_PER_CHANNEL = 1
+};
+
/**
* enum sdw_stream_type: data stream type
*
@@ -137,19 +160,19 @@ enum sdw_data_direction {
*
* @SDW_PORT_DATA_MODE_NORMAL: Normal data mode where audio data is received
* and transmitted.
+ * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce
+ * a pseudo random data pattern that is transferred
+ * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of
+ * logic 0. The encoding will result in no signal transitions
* @SDW_PORT_DATA_MODE_STATIC_1: Simple test mode which uses static value of
* logic 1. The encoding will result in signal transitions at every bitslot
* owned by this Port
- * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of
- * logic 0. The encoding will result in no signal transitions
- * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce
- * a pseudo random data pattern that is transferred
*/
enum sdw_port_data_mode {
SDW_PORT_DATA_MODE_NORMAL = 0,
- SDW_PORT_DATA_MODE_STATIC_1 = 1,
+ SDW_PORT_DATA_MODE_PRBS = 1,
SDW_PORT_DATA_MODE_STATIC_0 = 2,
- SDW_PORT_DATA_MODE_PRBS = 3,
+ SDW_PORT_DATA_MODE_STATIC_1 = 3,
};
/*
@@ -276,14 +299,15 @@ struct sdw_dpn_audio_mode {
* implementation-defined interrupts
* @max_ch: Maximum channels supported
* @min_ch: Minimum channels supported
- * @num_ch: Number of discrete channels supported
- * @ch: Discrete channels supported
+ * @num_channels: Number of discrete channels supported
+ * @channels: Discrete channels supported
* @num_ch_combinations: Number of channel combinations supported
* @ch_combinations: Channel combinations supported
* @modes: SDW mode supported
* @max_async_buffer: Number of samples that this port can buffer in
* asynchronous modes
* @block_pack_mode: Type of block port mode supported
+ * @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register
* @port_encoding: Payload Channel Sample encoding schemes supported
* @audio_modes: Audio modes supported
*/
@@ -300,13 +324,14 @@ struct sdw_dpn_prop {
u32 imp_def_interrupts;
u32 max_ch;
u32 min_ch;
- u32 num_ch;
- u32 *ch;
+ u32 num_channels;
+ u32 *channels;
u32 num_ch_combinations;
u32 *ch_combinations;
u32 modes;
u32 max_async_buffer;
bool block_pack_mode;
+ bool read_only_wordlength;
u32 port_encoding;
struct sdw_dpn_audio_mode *audio_modes;
};
@@ -338,6 +363,9 @@ struct sdw_dpn_prop {
* @dp0_prop: Data Port 0 properties
* @src_dpn_prop: Source Data Port N properties
* @sink_dpn_prop: Sink Data Port N properties
+ * @scp_int1_mask: SCP_INT1_MASK desired settings
+ * @quirks: bitmask identifying deltas from the MIPI specification
+ * @is_sdca: the Slave supports the SDCA specification
*/
struct sdw_slave_prop {
u32 mipi_revision;
@@ -359,8 +387,13 @@ struct sdw_slave_prop {
struct sdw_dp0_prop *dp0_prop;
struct sdw_dpn_prop *src_dpn_prop;
struct sdw_dpn_prop *sink_dpn_prop;
+ u8 scp_int1_mask;
+ u32 quirks;
+ bool is_sdca;
};
+#define SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY BIT(0)
+
/**
* struct sdw_master_prop - Master properties
* @revision: MIPI spec version of the implementation
@@ -378,6 +411,7 @@ struct sdw_slave_prop {
* command
* @mclk_freq: clock reference passed to SoundWire Master, in Hz.
* @hw_disabled: if true, the Master is not functional, typically due to pin-mux
+ * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification
*/
struct sdw_master_prop {
u32 revision;
@@ -394,8 +428,29 @@ struct sdw_master_prop {
u32 err_threshold;
u32 mclk_freq;
bool hw_disabled;
+ u64 quirks;
};
+/* Definitions for Master quirks */
+
+/*
+ * In a number of platforms bus clashes are reported after a hardware
+ * reset but without any explanations or evidence of a real problem.
+ * The following quirk will discard all initial bus clash interrupts
+ * but will leave the detection on should real bus clashes happen
+ */
+#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH BIT(0)
+
+/*
+ * Some Slave devices have known issues with incorrect parity errors
+ * reported after a hardware reset. However during integration unexplained
+ * parity errors can be reported by Slave devices, possibly due to electrical
+ * issues at the Master level.
+ * The following quirk will discard all initial parity errors but will leave
+ * the detection on should real parity errors happen.
+ */
+#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY BIT(1)
+
int sdw_master_read_prop(struct sdw_bus *bus);
int sdw_slave_read_prop(struct sdw_slave *slave);
@@ -409,8 +464,7 @@ int sdw_slave_read_prop(struct sdw_slave *slave);
* struct sdw_slave_id - Slave ID
* @mfg_id: MIPI Manufacturer ID
* @part_id: Device Part ID
- * @class_id: MIPI Class ID, unused now.
- * Currently a placeholder in MIPI SoundWire Spec
+ * @class_id: MIPI Class ID (defined starting with SoundWire 1.2 spec)
* @unique_id: Device unique ID
* @sdw_version: SDW version implemented
*
@@ -424,12 +478,43 @@ struct sdw_slave_id {
__u8 sdw_version:4;
};
+/*
+ * Helper macros to extract the MIPI-defined IDs
+ *
+ * Spec definition
+ * Register Bit Contents
+ * DevId_0 [7:4] 47:44 sdw_version
+ * DevId_0 [3:0] 43:40 unique_id
+ * DevId_1 39:32 mfg_id [15:8]
+ * DevId_2 31:24 mfg_id [7:0]
+ * DevId_3 23:16 part_id [15:8]
+ * DevId_4 15:08 part_id [7:0]
+ * DevId_5 07:00 class_id
+ *
+ * The MIPI DisCo for SoundWire defines in addition the link_id as bits 51:48
+ */
+#define SDW_DISCO_LINK_ID_MASK GENMASK_ULL(51, 48)
+#define SDW_VERSION_MASK GENMASK_ULL(47, 44)
+#define SDW_UNIQUE_ID_MASK GENMASK_ULL(43, 40)
+#define SDW_MFG_ID_MASK GENMASK_ULL(39, 24)
+#define SDW_PART_ID_MASK GENMASK_ULL(23, 8)
+#define SDW_CLASS_ID_MASK GENMASK_ULL(7, 0)
+
+#define SDW_DISCO_LINK_ID(addr) FIELD_GET(SDW_DISCO_LINK_ID_MASK, addr)
+#define SDW_VERSION(addr) FIELD_GET(SDW_VERSION_MASK, addr)
+#define SDW_UNIQUE_ID(addr) FIELD_GET(SDW_UNIQUE_ID_MASK, addr)
+#define SDW_MFG_ID(addr) FIELD_GET(SDW_MFG_ID_MASK, addr)
+#define SDW_PART_ID(addr) FIELD_GET(SDW_PART_ID_MASK, addr)
+#define SDW_CLASS_ID(addr) FIELD_GET(SDW_CLASS_ID_MASK, addr)
+
/**
* struct sdw_slave_intr_status - Slave interrupt status
+ * @sdca_cascade: set if the Slave device reports an SDCA interrupt
* @control_port: control port status
* @port: data port status
*/
struct sdw_slave_intr_status {
+ bool sdca_cascade;
u8 control_port;
u8 port[15];
};
@@ -501,6 +586,10 @@ enum sdw_port_prep_ops {
* @bandwidth: Current bandwidth
* @col: Active columns
* @row: Active rows
+ * @s_data_mode: NORMAL, STATIC or PRBS mode for all Slave ports
+ * @m_data_mode: NORMAL, STATIC or PRBS mode for all Master ports. The value
+ * should be the same to detect transmission issues, but can be different to
+ * test the interrupt reports
*/
struct sdw_bus_params {
enum sdw_reg_bank curr_bank;
@@ -510,6 +599,8 @@ struct sdw_bus_params {
unsigned int bandwidth;
unsigned int col;
unsigned int row;
+ int s_data_mode;
+ int m_data_mode;
};
/**
@@ -521,6 +612,7 @@ struct sdw_bus_params {
* @update_status: Update Slave status
* @bus_config: Update the bus config for Slave
* @port_prep: Prepare the port with parameters
+ * @clk_stop: handle imp-def sequences before and after prepare and de-prepare
*/
struct sdw_slave_ops {
int (*read_prop)(struct sdw_slave *sdw);
@@ -533,6 +625,10 @@ struct sdw_slave_ops {
int (*port_prep)(struct sdw_slave *slave,
struct sdw_prepare_ch *prepare_ch,
enum sdw_port_prep_ops pre_ops);
+ int (*clk_stop)(struct sdw_slave *slave,
+ enum sdw_clk_stop_mode mode,
+ enum sdw_clk_stop_type type);
+
};
/**
@@ -541,17 +637,14 @@ struct sdw_slave_ops {
* @dev: Linux device
* @status: Status reported by the Slave
* @bus: Bus handle
- * @ops: Slave callback ops
* @prop: Slave properties
* @debugfs: Slave debugfs
* @node: node for bus list
* @port_ready: Port ready completion flag for each Slave port
+ * @m_port_map: static Master port map for each Slave port
* @dev_num: Current Device Number, values can be 0 or dev_num_sticky
* @dev_num_sticky: one-time static Device Number assigned by Bus
* @probed: boolean tracking driver state
- * @probe_complete: completion utility to control potential races
- * on startup between driver probe/initialization and SoundWire
- * Slave state changes/implementation-defined interrupts
* @enumeration_complete: completion utility to control potential races
* on startup between device enumeration and read/write access to the
* Slave device
@@ -562,30 +655,50 @@ struct sdw_slave_ops {
* between the Master suspending and the codec resuming, and make sure that
* when the Master triggered a reset the Slave is properly enumerated and
* initialized
+ * @first_interrupt_done: status flag tracking if the interrupt handling
+ * for a Slave happens for the first time after enumeration
+ * @is_mockup_device: status flag used to squelch errors in the command/control
+ * protocol for SoundWire mockup devices
+ * @sdw_dev_lock: mutex used to protect callbacks/remove races
*/
struct sdw_slave {
struct sdw_slave_id id;
struct device dev;
enum sdw_slave_status status;
struct sdw_bus *bus;
- const struct sdw_slave_ops *ops;
struct sdw_slave_prop prop;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
struct list_head node;
- struct completion *port_ready;
+ struct completion port_ready[SDW_MAX_PORTS];
+ unsigned int m_port_map[SDW_MAX_PORTS];
u16 dev_num;
u16 dev_num_sticky;
bool probed;
- struct completion probe_complete;
struct completion enumeration_complete;
struct completion initialization_complete;
u32 unattach_request;
+ bool first_interrupt_done;
+ bool is_mockup_device;
+ struct mutex sdw_dev_lock; /* protect callbacks/remove races */
};
#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
+/**
+ * struct sdw_master_device - SoundWire 'Master Device' representation
+ * @dev: Linux device for this Master
+ * @bus: Bus handle shortcut
+ */
+struct sdw_master_device {
+ struct device dev;
+ struct sdw_bus *bus;
+};
+
+#define dev_to_sdw_master_device(d) \
+ container_of(d, struct sdw_master_device, dev)
+
struct sdw_driver {
const char *name;
@@ -600,10 +713,14 @@ struct sdw_driver {
struct device_driver driver;
};
-#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \
- { .mfg_id = (_mfg_id), .part_id = (_part_id), \
+#define SDW_SLAVE_ENTRY_EXT(_mfg_id, _part_id, _version, _c_id, _drv_data) \
+ { .mfg_id = (_mfg_id), .part_id = (_part_id), \
+ .sdw_version = (_version), .class_id = (_c_id), \
.driver_data = (unsigned long)(_drv_data) }
+#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \
+ SDW_SLAVE_ENTRY_EXT((_mfg_id), (_part_id), 0, 0, (_drv_data))
+
int sdw_handle_slave_status(struct sdw_bus *bus,
enum sdw_slave_status status[]);
@@ -715,16 +832,20 @@ struct sdw_defer {
/**
* struct sdw_master_ops - Master driver ops
* @read_prop: Read Master properties
+ * @override_adr: Override value read from firmware (quirk for buggy firmware)
* @xfer_msg: Transfer message callback
* @xfer_msg_defer: Defer version of transfer message callback
* @reset_page_addr: Reset the SCP page address registers
* @set_bus_conf: Set the bus configuration
* @pre_bank_switch: Callback for pre bank switch
* @post_bank_switch: Callback for post bank switch
+ * @read_ping_status: Read status from PING frames, reported with two bits per Device.
+ * Bits 31:24 are reserved.
*/
struct sdw_master_ops {
int (*read_prop)(struct sdw_bus *bus);
-
+ u64 (*override_adr)
+ (struct sdw_bus *bus, u64 addr);
enum sdw_command_response (*xfer_msg)
(struct sdw_bus *bus, struct sdw_msg *msg);
enum sdw_command_response (*xfer_msg_defer)
@@ -736,13 +857,16 @@ struct sdw_master_ops {
struct sdw_bus_params *params);
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
+ u32 (*read_ping_status)(struct sdw_bus *bus);
};
/**
* struct sdw_bus - SoundWire bus
- * @dev: Master linux device
+ * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
+ * @md: Master device
* @link_id: Link id number, can be 0 to N, unique for each Master
+ * @id: bus system-wide unique id
* @slaves: list of Slaves on this bus
* @assigned: Bitmap for Slave device numbers.
* Bit set implies used number, bit clear implies unused number.
@@ -763,10 +887,20 @@ struct sdw_master_ops {
* @multi_link: Store bus property that indicates if multi links
* are supported. This flag is populated by drivers after reading
* appropriate firmware (ACPI/DT).
+ * @hw_sync_min_links: Number of links used by a stream above which
+ * hardware-based synchronization is required. This value is only
+ * meaningful if multi_link is set. If set to 1, hardware-based
+ * synchronization will be used even if a stream only uses a single
+ * SoundWire segment.
+ * @dev_num_ida_min: if set, defines the minimum values for the IDA
+ * used to allocate system-unique device numbers. This value needs to be
+ * identical across all SoundWire bus in the system.
*/
struct sdw_bus {
struct device *dev;
+ struct sdw_master_device *md;
unsigned int link_id;
+ int id;
struct list_head slaves;
DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
struct mutex bus_lock;
@@ -784,10 +918,15 @@ struct sdw_bus {
unsigned int clk_stop_timeout;
u32 bank_switch_timeout;
bool multi_link;
+ int hw_sync_min_links;
+ int dev_num_ida_min;
};
-int sdw_add_bus_master(struct sdw_bus *bus);
-void sdw_delete_bus_master(struct sdw_bus *bus);
+int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
+ struct fwnode_handle *fwnode);
+void sdw_bus_master_delete(struct sdw_bus *bus);
+
+void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay);
/**
* sdw_port_config: Master or Slave Port configuration
@@ -874,6 +1013,9 @@ struct sdw_stream_runtime {
struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name);
void sdw_release_stream(struct sdw_stream_runtime *stream);
+
+int sdw_compute_params(struct sdw_bus *bus);
+
int sdw_stream_add_master(struct sdw_bus *bus,
struct sdw_stream_config *stream_config,
struct sdw_port_config *port_config,
@@ -888,16 +1030,28 @@ int sdw_stream_remove_master(struct sdw_bus *bus,
struct sdw_stream_runtime *stream);
int sdw_stream_remove_slave(struct sdw_slave *slave,
struct sdw_stream_runtime *stream);
+int sdw_startup_stream(void *sdw_substream);
int sdw_prepare_stream(struct sdw_stream_runtime *stream);
int sdw_enable_stream(struct sdw_stream_runtime *stream);
int sdw_disable_stream(struct sdw_stream_runtime *stream);
int sdw_deprepare_stream(struct sdw_stream_runtime *stream);
+void sdw_shutdown_stream(void *sdw_substream);
+int sdw_bus_prep_clk_stop(struct sdw_bus *bus);
+int sdw_bus_clk_stop(struct sdw_bus *bus);
+int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
/* messaging and data APIs */
int sdw_read(struct sdw_slave *slave, u32 addr);
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
+int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value);
+int sdw_read_no_pm(struct sdw_slave *slave, u32 addr);
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
-int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val);
+int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
+int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
+
+int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
+void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
#endif /* __SOUNDWIRE_H */
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 979b41b5dcb4..2e9fd91572d4 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -7,13 +7,107 @@
#include <linux/irqreturn.h>
#include <linux/soundwire/sdw.h>
+#define SDW_SHIM_BASE 0x2C000
+#define SDW_ALH_BASE 0x2C800
+#define SDW_SHIM_BASE_ACE 0x38000
+#define SDW_ALH_BASE_ACE 0x24000
+#define SDW_LINK_BASE 0x30000
+#define SDW_LINK_SIZE 0x10000
+
+/* Intel SHIM Registers Definition */
+/* LCAP */
+#define SDW_SHIM_LCAP 0x0
+#define SDW_SHIM_LCAP_LCOUNT_MASK GENMASK(2, 0)
+
+/* LCTL */
+#define SDW_SHIM_LCTL 0x4
+
+#define SDW_SHIM_LCTL_SPA BIT(0)
+#define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0)
+#define SDW_SHIM_LCTL_CPA BIT(8)
+#define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8)
+
+/* SYNC */
+#define SDW_SHIM_SYNC 0xC
+
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0)
+#define SDW_SHIM_SYNC_SYNCCPU BIT(15)
+#define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16)
+#define SDW_SHIM_SYNC_CMDSYNC BIT(16)
+#define SDW_SHIM_SYNC_SYNCGO BIT(24)
+
+/* Control stream capabililities and channel mask */
+#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x))
+#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x))
+#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x))
+#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x))
+#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x))
+
+/* PCM Stream capabilities */
+#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x))
+
+#define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0)
+#define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4)
+#define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8)
+
+/* PCM Stream Channel Map */
+#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y)))
+
+/* PCM Stream Channel Count */
+#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y)))
+
+#define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0)
+#define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4)
+#define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8)
+#define SDW_SHIM_PCMSYCM_DIR BIT(15)
+
+/* IO control */
+#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x))
+
+#define SDW_SHIM_IOCTL_MIF BIT(0)
+#define SDW_SHIM_IOCTL_CO BIT(1)
+#define SDW_SHIM_IOCTL_COE BIT(2)
+#define SDW_SHIM_IOCTL_DO BIT(3)
+#define SDW_SHIM_IOCTL_DOE BIT(4)
+#define SDW_SHIM_IOCTL_BKE BIT(5)
+#define SDW_SHIM_IOCTL_WPDD BIT(6)
+#define SDW_SHIM_IOCTL_CIBD BIT(8)
+#define SDW_SHIM_IOCTL_DIBD BIT(9)
+
+/* Wake Enable*/
+#define SDW_SHIM_WAKEEN 0x190
+
+#define SDW_SHIM_WAKEEN_ENABLE BIT(0)
+
+/* Wake Status */
+#define SDW_SHIM_WAKESTS 0x192
+
+#define SDW_SHIM_WAKESTS_STATUS BIT(0)
+
+/* AC Timing control */
+#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x))
+
+#define SDW_SHIM_CTMCTL_DACTQE BIT(0)
+#define SDW_SHIM_CTMCTL_DODS BIT(1)
+#define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3)
+
+/* Intel ALH Register definitions */
+#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x)))
+#define SDW_ALH_NUM_STREAMS 64
+
+#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3
+#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
+#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
+
/**
* struct sdw_intel_stream_params_data: configuration passed during
* the @params_stream callback, e.g. for interaction with DSP
* firmware.
*/
struct sdw_intel_stream_params_data {
- struct snd_pcm_substream *substream;
+ int stream;
struct snd_soc_dai *dai;
struct snd_pcm_hw_params *hw_params;
int link_id;
@@ -26,7 +120,7 @@ struct sdw_intel_stream_params_data {
* firmware.
*/
struct sdw_intel_stream_free_data {
- struct snd_pcm_substream *substream;
+ int stream;
struct snd_soc_dai *dai;
int link_id;
};
@@ -40,6 +134,7 @@ struct sdw_intel_ops {
struct sdw_intel_stream_params_data *params_data);
int (*free_stream)(struct device *dev,
struct sdw_intel_stream_free_data *free_data);
+ int (*trigger)(struct snd_soc_dai *dai, int cmd, int stream);
};
/**
@@ -58,7 +153,7 @@ struct sdw_intel_acpi_info {
u32 link_mask;
};
-struct sdw_intel_link_res;
+struct sdw_intel_link_dev;
/* Intel clock-stop/pm_runtime quirk definitions */
@@ -109,12 +204,15 @@ struct sdw_intel_slave_id {
* Controller
* @num_slaves: total number of devices exposed across all enabled links
* @handle: ACPI parent handle
- * @links: information for each link (controller-specific and kept
+ * @ldev: information for each link (controller-specific and kept
* opaque here)
* @ids: array of slave_id, representing Slaves exposed across all enabled
* links
* @link_list: list to handle interrupts across all links
* @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers.
+ * @shim_mask: flags to track initialization of SHIM shared registers
+ * @shim_base: sdw shim base.
+ * @alh_base: sdw alh base.
*/
struct sdw_intel_ctx {
int count;
@@ -122,10 +220,13 @@ struct sdw_intel_ctx {
u32 link_mask;
int num_slaves;
acpi_handle handle;
- struct sdw_intel_link_res *links;
+ struct sdw_intel_link_dev **ldev;
struct sdw_intel_slave_id *ids;
struct list_head link_list;
struct mutex shim_lock; /* lock for access to shared SHIM registers */
+ u32 shim_mask;
+ u32 shim_base;
+ u32 alh_base;
};
/**
@@ -144,6 +245,8 @@ struct sdw_intel_ctx {
* machine-specific quirks are handled in the DSP driver.
* @clock_stop_quirks: mask array of possible behaviors requested by the
* DSP driver. The quirks are common for all links for now.
+ * @shim_base: sdw shim base.
+ * @alh_base: sdw alh base.
*/
struct sdw_intel_res {
int count;
@@ -155,6 +258,8 @@ struct sdw_intel_res {
struct device *dev;
u32 link_mask;
u32 clock_stop_quirks;
+ u32 shim_base;
+ u32 alh_base;
};
/*
@@ -185,4 +290,6 @@ void sdw_intel_enable_irq(void __iomem *mmio_base, bool enable);
irqreturn_t sdw_intel_thread(int irq, void *dev_id);
+#define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1)
+
#endif
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
index a686f7988156..138bec908c40 100644
--- a/include/linux/soundwire/sdw_registers.h
+++ b/include/linux/soundwire/sdw_registers.h
@@ -5,14 +5,7 @@
#define __SDW_REGISTERS_H
/*
- * typically we define register and shifts but if one observes carefully,
- * the shift can be generated from MASKS using few bit primitaives like ffs
- * etc, so we use that and avoid defining shifts
- */
-#define SDW_REG_SHIFT(n) (ffs(n) - 1)
-
-/*
- * SDW registers as defined by MIPI 1.1 Spec
+ * SDW registers as defined by MIPI 1.2 Spec
*/
#define SDW_REGADDR GENMASK(14, 0)
#define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15)
@@ -43,9 +36,17 @@
#define SDW_DP0_INT_TEST_FAIL BIT(0)
#define SDW_DP0_INT_PORT_READY BIT(1)
#define SDW_DP0_INT_BRA_FAILURE BIT(2)
+#define SDW_DP0_SDCA_CASCADE BIT(3)
+/* BIT(4) not allocated in SoundWire specification 1.2 */
#define SDW_DP0_INT_IMPDEF1 BIT(5)
#define SDW_DP0_INT_IMPDEF2 BIT(6)
#define SDW_DP0_INT_IMPDEF3 BIT(7)
+#define SDW_DP0_INTERRUPTS (SDW_DP0_INT_TEST_FAIL | \
+ SDW_DP0_INT_PORT_READY | \
+ SDW_DP0_INT_BRA_FAILURE | \
+ SDW_DP0_INT_IMPDEF1 | \
+ SDW_DP0_INT_IMPDEF2 | \
+ SDW_DP0_INT_IMPDEF3)
#define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2)
#define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4)
@@ -106,6 +107,20 @@
#define SDW_SCP_ADDRPAGE2 0x49
#define SDW_SCP_KEEPEREN 0x4A
#define SDW_SCP_BANKDELAY 0x4B
+#define SDW_SCP_COMMIT 0x4C
+
+#define SDW_SCP_BUS_CLOCK_BASE 0x4D
+#define SDW_SCP_BASE_CLOCK_FREQ GENMASK(2, 0)
+#define SDW_SCP_BASE_CLOCK_UNKNOWN 0x0
+#define SDW_SCP_BASE_CLOCK_19200000_HZ 0x1
+#define SDW_SCP_BASE_CLOCK_24000000_HZ 0x2
+#define SDW_SCP_BASE_CLOCK_24576000_HZ 0x3
+#define SDW_SCP_BASE_CLOCK_22579200_HZ 0x4
+#define SDW_SCP_BASE_CLOCK_32000000_HZ 0x5
+#define SDW_SCP_BASE_CLOCK_RESERVED 0x6
+#define SDW_SCP_BASE_CLOCK_IMP_DEF 0x7
+
+/* 0x4E is not allocated in SoundWire specification 1.2 */
#define SDW_SCP_TESTMODE 0x4F
#define SDW_SCP_DEVID_0 0x50
#define SDW_SCP_DEVID_1 0x51
@@ -114,12 +129,111 @@
#define SDW_SCP_DEVID_4 0x54
#define SDW_SCP_DEVID_5 0x55
+/* Both INT and STATUS register are same */
+#define SDW_SCP_SDCA_INT1 0x58
+#define SDW_SCP_SDCA_INT_SDCA_0 BIT(0)
+#define SDW_SCP_SDCA_INT_SDCA_1 BIT(1)
+#define SDW_SCP_SDCA_INT_SDCA_2 BIT(2)
+#define SDW_SCP_SDCA_INT_SDCA_3 BIT(3)
+#define SDW_SCP_SDCA_INT_SDCA_4 BIT(4)
+#define SDW_SCP_SDCA_INT_SDCA_5 BIT(5)
+#define SDW_SCP_SDCA_INT_SDCA_6 BIT(6)
+#define SDW_SCP_SDCA_INT_SDCA_7 BIT(7)
+
+#define SDW_SCP_SDCA_INT2 0x59
+#define SDW_SCP_SDCA_INT_SDCA_8 BIT(0)
+#define SDW_SCP_SDCA_INT_SDCA_9 BIT(1)
+#define SDW_SCP_SDCA_INT_SDCA_10 BIT(2)
+#define SDW_SCP_SDCA_INT_SDCA_11 BIT(3)
+#define SDW_SCP_SDCA_INT_SDCA_12 BIT(4)
+#define SDW_SCP_SDCA_INT_SDCA_13 BIT(5)
+#define SDW_SCP_SDCA_INT_SDCA_14 BIT(6)
+#define SDW_SCP_SDCA_INT_SDCA_15 BIT(7)
+
+#define SDW_SCP_SDCA_INT3 0x5A
+#define SDW_SCP_SDCA_INT_SDCA_16 BIT(0)
+#define SDW_SCP_SDCA_INT_SDCA_17 BIT(1)
+#define SDW_SCP_SDCA_INT_SDCA_18 BIT(2)
+#define SDW_SCP_SDCA_INT_SDCA_19 BIT(3)
+#define SDW_SCP_SDCA_INT_SDCA_20 BIT(4)
+#define SDW_SCP_SDCA_INT_SDCA_21 BIT(5)
+#define SDW_SCP_SDCA_INT_SDCA_22 BIT(6)
+#define SDW_SCP_SDCA_INT_SDCA_23 BIT(7)
+
+#define SDW_SCP_SDCA_INT4 0x5B
+#define SDW_SCP_SDCA_INT_SDCA_24 BIT(0)
+#define SDW_SCP_SDCA_INT_SDCA_25 BIT(1)
+#define SDW_SCP_SDCA_INT_SDCA_26 BIT(2)
+#define SDW_SCP_SDCA_INT_SDCA_27 BIT(3)
+#define SDW_SCP_SDCA_INT_SDCA_28 BIT(4)
+#define SDW_SCP_SDCA_INT_SDCA_29 BIT(5)
+#define SDW_SCP_SDCA_INT_SDCA_30 BIT(6)
+/* BIT(7) not allocated in SoundWire 1.2 specification */
+
+#define SDW_SCP_SDCA_INTMASK1 0x5C
+#define SDW_SCP_SDCA_INTMASK_SDCA_0 BIT(0)
+#define SDW_SCP_SDCA_INTMASK_SDCA_1 BIT(1)
+#define SDW_SCP_SDCA_INTMASK_SDCA_2 BIT(2)
+#define SDW_SCP_SDCA_INTMASK_SDCA_3 BIT(3)
+#define SDW_SCP_SDCA_INTMASK_SDCA_4 BIT(4)
+#define SDW_SCP_SDCA_INTMASK_SDCA_5 BIT(5)
+#define SDW_SCP_SDCA_INTMASK_SDCA_6 BIT(6)
+#define SDW_SCP_SDCA_INTMASK_SDCA_7 BIT(7)
+
+#define SDW_SCP_SDCA_INTMASK2 0x5D
+#define SDW_SCP_SDCA_INTMASK_SDCA_8 BIT(0)
+#define SDW_SCP_SDCA_INTMASK_SDCA_9 BIT(1)
+#define SDW_SCP_SDCA_INTMASK_SDCA_10 BIT(2)
+#define SDW_SCP_SDCA_INTMASK_SDCA_11 BIT(3)
+#define SDW_SCP_SDCA_INTMASK_SDCA_12 BIT(4)
+#define SDW_SCP_SDCA_INTMASK_SDCA_13 BIT(5)
+#define SDW_SCP_SDCA_INTMASK_SDCA_14 BIT(6)
+#define SDW_SCP_SDCA_INTMASK_SDCA_15 BIT(7)
+
+#define SDW_SCP_SDCA_INTMASK3 0x5E
+#define SDW_SCP_SDCA_INTMASK_SDCA_16 BIT(0)
+#define SDW_SCP_SDCA_INTMASK_SDCA_17 BIT(1)
+#define SDW_SCP_SDCA_INTMASK_SDCA_18 BIT(2)
+#define SDW_SCP_SDCA_INTMASK_SDCA_19 BIT(3)
+#define SDW_SCP_SDCA_INTMASK_SDCA_20 BIT(4)
+#define SDW_SCP_SDCA_INTMASK_SDCA_21 BIT(5)
+#define SDW_SCP_SDCA_INTMASK_SDCA_22 BIT(6)
+#define SDW_SCP_SDCA_INTMASK_SDCA_23 BIT(7)
+
+#define SDW_SCP_SDCA_INTMASK4 0x5F
+#define SDW_SCP_SDCA_INTMASK_SDCA_24 BIT(0)
+#define SDW_SCP_SDCA_INTMASK_SDCA_25 BIT(1)
+#define SDW_SCP_SDCA_INTMASK_SDCA_26 BIT(2)
+#define SDW_SCP_SDCA_INTMASK_SDCA_27 BIT(3)
+#define SDW_SCP_SDCA_INTMASK_SDCA_28 BIT(4)
+#define SDW_SCP_SDCA_INTMASK_SDCA_29 BIT(5)
+#define SDW_SCP_SDCA_INTMASK_SDCA_30 BIT(6)
+/* BIT(7) not allocated in SoundWire 1.2 specification */
+
/* Banked Registers */
#define SDW_SCP_FRAMECTRL_B0 0x60
#define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET)
#define SDW_SCP_NEXTFRAME_B0 0x61
#define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET)
+#define SDW_SCP_BUSCLOCK_SCALE_B0 0x62
+#define SDW_SCP_BUSCLOCK_SCALE_B1 (0x62 + SDW_BANK1_OFFSET)
+#define SDW_SCP_CLOCK_SCALE GENMASK(3, 0)
+
+/* PHY registers - CTRL and STAT are the same address */
+#define SDW_SCP_PHY_OUT_CTRL_0 0x80
+#define SDW_SCP_PHY_OUT_CTRL_1 0x81
+#define SDW_SCP_PHY_OUT_CTRL_2 0x82
+#define SDW_SCP_PHY_OUT_CTRL_3 0x83
+#define SDW_SCP_PHY_OUT_CTRL_4 0x84
+#define SDW_SCP_PHY_OUT_CTRL_5 0x85
+#define SDW_SCP_PHY_OUT_CTRL_6 0x86
+#define SDW_SCP_PHY_OUT_CTRL_7 0x87
+
+#define SDW_SCP_CAP_LOAD_CTRL GENMASK(2, 0)
+#define SDW_SCP_DRIVE_STRENGTH_CTRL GENMASK(5, 3)
+#define SDW_SCP_SLEW_TIME_CTRL GENMASK(7, 6)
+
/* Both INT and STATUS register is same */
#define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n))
#define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n))
@@ -133,6 +247,11 @@
#define SDW_DPN_INT_IMPDEF1 BIT(5)
#define SDW_DPN_INT_IMPDEF2 BIT(6)
#define SDW_DPN_INT_IMPDEF3 BIT(7)
+#define SDW_DPN_INTERRUPTS (SDW_DPN_INT_TEST_FAIL | \
+ SDW_DPN_INT_PORT_READY | \
+ SDW_DPN_INT_IMPDEF1 | \
+ SDW_DPN_INT_IMPDEF2 | \
+ SDW_DPN_INT_IMPDEF3)
#define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0)
#define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2)
@@ -190,4 +309,36 @@
#define SDW_CASC_PORT_MASK_INTSTAT3 1
#define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2
+/*
+ * v1.2 device - SDCA address mapping
+ *
+ * Spec definition
+ * Bits Contents
+ * 31 0 (required by addressing range)
+ * 30:26 0b10000 (Control Prefix)
+ * 25 0 (Reserved)
+ * 24:22 Function Number [2:0]
+ * 21 Entity[6]
+ * 20:19 Control Selector[5:4]
+ * 18 0 (Reserved)
+ * 17:15 Control Number[5:3]
+ * 14 Next
+ * 13 MBQ
+ * 12:7 Entity[5:0]
+ * 6:3 Control Selector[3:0]
+ * 2:0 Control Number[2:0]
+ */
+
+#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \
+ (((fun) & 0x7) << 22) | \
+ (((ent) & 0x40) << 15) | \
+ (((ent) & 0x3f) << 7) | \
+ (((ctl) & 0x30) << 15) | \
+ (((ctl) & 0x0f) << 3) | \
+ (((ch) & 0x38) << 12) | \
+ ((ch) & 0x07))
+
+#define SDW_SDCA_MBQ_CTL(reg) ((reg) | BIT(13))
+#define SDW_SDCA_NEXT_CTL(reg) ((reg) | BIT(14))
+
#endif /* __SDW_REGISTERS_H */
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index aaa7f4267c14..52eb66cd11bc 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -5,6 +5,13 @@
#define __SOUNDWIRE_TYPES_H
extern struct bus_type sdw_bus_type;
+extern struct device_type sdw_slave_type;
+extern struct device_type sdw_master_type;
+
+static inline int is_sdw_slave(const struct device *dev)
+{
+ return dev->type == &sdw_slave_type;
+}
#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver)
@@ -14,7 +21,7 @@ extern struct bus_type sdw_bus_type;
int __sdw_register_driver(struct sdw_driver *drv, struct module *owner);
void sdw_unregister_driver(struct sdw_driver *drv);
-int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
+int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env);
/**
* module_sdw_driver() - Helper macro for registering a Soundwire driver
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
index 1a5eaef3b7f2..d424c1aadf38 100644
--- a/include/linux/spi/ads7846.h
+++ b/include/linux/spi/ads7846.h
@@ -1,17 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* linux/spi/ads7846.h */
-/* Touchscreen characteristics vary between boards and models. The
- * platform_data for the device's "struct device" holds this information.
- *
- * It's OK if the min/max values are zero.
- */
-enum ads7846_filter {
- ADS7846_FILTER_OK,
- ADS7846_FILTER_REPEAT,
- ADS7846_FILTER_IGNORE,
-};
-
struct ads7846_platform_data {
u16 model; /* 7843, 7845, 7846, 7873. */
u16 vref_delay_usecs; /* 0 for external vref; etc */
@@ -51,10 +40,6 @@ struct ads7846_platform_data {
int gpio_pendown_debounce; /* platform specific debounce time for
* the gpio_pendown */
int (*get_pendown_state)(void);
- int (*filter_init) (const struct ads7846_platform_data *pdata,
- void **filter_data);
- int (*filter) (void *filter_data, int data_idx, int *val);
- void (*filter_cleanup)(void *filter_data);
void (*wait_for_sync)(void);
bool wakeup;
unsigned long irq_flags;
diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h
new file mode 100644
index 000000000000..2e2a622e56da
--- /dev/null
+++ b/include/linux/spi/altera.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header File for Altera SPI Driver.
+ */
+#ifndef __LINUX_SPI_ALTERA_H
+#define __LINUX_SPI_ALTERA_H
+
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#define ALTERA_SPI_MAX_CS 32
+
+/**
+ * struct altera_spi_platform_data - Platform data of the Altera SPI driver
+ * @mode_bits: Mode bits of SPI master.
+ * @num_chipselect: Number of chipselects.
+ * @bits_per_word_mask: bitmask of supported bits_per_word for transfers.
+ * @num_devices: Number of devices that shall be added when the driver
+ * is probed.
+ * @devices: The devices to add.
+ */
+struct altera_spi_platform_data {
+ u16 mode_bits;
+ u16 num_chipselect;
+ u32 bits_per_word_mask;
+ u16 num_devices;
+ struct spi_board_info *devices;
+};
+
+struct altera_spi {
+ int irq;
+ int len;
+ int count;
+ int bytes_per_word;
+ u32 imr;
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+
+ struct regmap *regmap;
+ u32 regoff;
+ struct device *dev;
+};
+
+extern irqreturn_t altera_spi_irq(int irq, void *dev);
+extern void altera_spi_init_master(struct spi_master *master);
+#endif /* __LINUX_SPI_ALTERA_H */
diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h
index edf4beccdadb..0b857616919c 100644
--- a/include/linux/spi/corgi_lcd.h
+++ b/include/linux/spi/corgi_lcd.h
@@ -11,9 +11,6 @@ struct corgi_lcd_platform_data {
int default_intensity;
int limit_mask;
- int gpio_backlight_on; /* -1 if n/a */
- int gpio_backlight_cont; /* -1 if n/a */
-
void (*notify)(int intensity);
void (*kick_battery)(void);
};
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h
index aceccf9c71fb..1cca3dd5a748 100644
--- a/include/linux/spi/eeprom.h
+++ b/include/linux/spi/eeprom.h
@@ -14,7 +14,7 @@
struct spi_eeprom {
u32 byte_len;
char name[10];
- u16 page_size; /* for writes */
+ u32 page_size; /* for writes */
u16 flags;
#define EE_ADDR1 0x0001 /* 8 bit addrs */
#define EE_ADDR2 0x0002 /* 16 bit addrs */
diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h
deleted file mode 100644
index 694268c78d5d..000000000000
--- a/include/linux/spi/ifx_modem.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_IFX_MODEM_H
-#define LINUX_IFX_MODEM_H
-
-struct ifx_modem_platform_data {
- unsigned short rst_out; /* modem reset out */
- unsigned short pwr_on; /* power on */
- unsigned short rst_pmu; /* reset modem */
- unsigned short tx_pwr; /* modem power threshold */
- unsigned short srdy; /* SRDY */
- unsigned short mrdy; /* MRDY */
- unsigned char modem_type; /* Modem type */
- unsigned long max_hz; /* max SPI frequency */
- unsigned short use_dma:1; /* spi protocol driver supplies
- dma-able addrs */
-};
-#define IFX_MODEM_6160 1
-#define IFX_MODEM_6260 2
-
-#endif
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
deleted file mode 100644
index 831a5de7a0e2..000000000000
--- a/include/linux/spi/l4f00242t03.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD
- *
- * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- * Based on Marek Vasut work in lms283gf05.h
-*/
-
-#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
-#define _INCLUDE_LINUX_SPI_L4F00242T03_H_
-
-struct l4f00242t03_pdata {
- unsigned int reset_gpio;
- unsigned int data_enable_gpio;
-};
-
-#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
deleted file mode 100644
index f237b2d062e9..000000000000
--- a/include/linux/spi/lms283gf05.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD
- *
- * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
-*/
-
-#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
-#define _INCLUDE_LINUX_SPI_LMS283GF05_H_
-
-struct lms283gf05_pdata {
- unsigned long reset_gpio;
- bool reset_inverted;
-};
-
-#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */
diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h
index 433c20e2f46e..e392c53758bc 100644
--- a/include/linux/spi/max7301.h
+++ b/include/linux/spi/max7301.h
@@ -2,7 +2,7 @@
#ifndef LINUX_SPI_MAX7301_H
#define LINUX_SPI_MAX7301_H
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/*
* Some registers must be read back to modify.
@@ -31,6 +31,6 @@ struct max7301_platform_data {
u32 input_pullup_active;
};
-extern int __max730x_remove(struct device *dev);
+extern void __max730x_remove(struct device *dev);
extern int __max730x_probe(struct max7301 *ts);
#endif
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
deleted file mode 100644
index 738a45b435f2..000000000000
--- a/include/linux/spi/mcp23s08.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-struct mcp23s08_platform_data {
- /* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI
- * chipselect, each providing 1 gpio_chip instance with 8 gpios.
- * For mpc23s17, up to 8 slaves (numbered 0..7) can share one SPI
- * chipselect, each providing 1 gpio_chip (port A + port B) with
- * 16 gpios.
- */
- u32 spi_present_mask;
-
- /* "base" is the number of the first GPIO or -1 for dynamic
- * assignment. If there are gaps in chip addressing the GPIO
- * numbers are sequential .. so for example if only slaves 0
- * and 3 are present, their GPIOs range from base to base+15
- * (or base+31 for s17 variant).
- */
- unsigned base;
-};
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h
index 778ae8eb1f3e..9ad9a06e488d 100644
--- a/include/linux/spi/mmc_spi.h
+++ b/include/linux/spi/mmc_spi.h
@@ -35,16 +35,7 @@ struct mmc_spi_platform_data {
void (*setpower)(struct device *, unsigned int maskval);
};
-#ifdef CONFIG_OF
extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi);
extern void mmc_spi_put_pdata(struct spi_device *spi);
-#else
-static inline struct mmc_spi_platform_data *
-mmc_spi_get_pdata(struct spi_device *spi)
-{
- return spi->dev.platform_data;
-}
-static inline void mmc_spi_put_pdata(struct spi_device *spi) {}
-#endif /* CONFIG_OF */
#endif /* __LINUX_SPI_MMC_SPI_H */
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 31f00c7f4f59..4658e7801b42 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -2,17 +2,19 @@
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*/
-#ifndef __linux_pxa2xx_spi_h
-#define __linux_pxa2xx_spi_h
+#ifndef __LINUX_SPI_PXA2XX_SPI_H
+#define __LINUX_SPI_PXA2XX_SPI_H
-#include <linux/pxa2xx_ssp.h>
+#include <linux/types.h>
-#define PXA2XX_CS_ASSERT (0x01)
-#define PXA2XX_CS_DEASSERT (0x02)
+#include <linux/pxa2xx_ssp.h>
struct dma_chan;
-/* device.platform_data for SSP controller devices */
+/*
+ * The platform data for SSP controller devices
+ * (resides in device.platform_data).
+ */
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
@@ -28,8 +30,11 @@ struct pxa2xx_spi_controller {
struct ssp_device ssp;
};
-/* spi_board_info.controller_data for SPI slave devices,
- * copied to spi_device.platform_data ... mostly for dma tuning
+/*
+ * The controller specific data for SPI slave devices
+ * (resides in spi_board_info.controller_data),
+ * copied to spi_device.platform_data ... mostly for
+ * DMA tuning.
*/
struct pxa2xx_spi_chip {
u8 tx_threshold;
@@ -37,9 +42,6 @@ struct pxa2xx_spi_chip {
u8 rx_threshold;
u8 dma_burst_size;
u32 timeout;
- u8 enable_loopback;
- int gpio_cs;
- void (*cs_control)(u32 command);
};
#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
@@ -49,4 +51,5 @@ struct pxa2xx_spi_chip {
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info);
#endif
-#endif
+
+#endif /* __LINUX_SPI_PXA2XX_SPI_H */
diff --git a/include/linux/spi/s3c24xx-fiq.h b/include/linux/spi/s3c24xx-fiq.h
new file mode 100644
index 000000000000..d2842ac1de27
--- /dev/null
+++ b/include/linux/spi/s3c24xx-fiq.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* linux/drivers/spi/spi_s3c24xx_fiq.h
+ *
+ * Copyright 2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX SPI - FIQ pseudo-DMA transfer support
+*/
+
+#ifndef __LINUX_SPI_S3C24XX_FIQ_H
+#define __LINUX_SPI_S3C24XX_FIQ_H __FILE__
+
+/* We have R8 through R13 to play with */
+
+#ifdef __ASSEMBLY__
+#define __REG_NR(x) r##x
+#else
+
+extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
+extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
+extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
+
+#define __REG_NR(x) (x)
+#endif
+
+#define fiq_rspi __REG_NR(8)
+#define fiq_rtmp __REG_NR(9)
+#define fiq_rrx __REG_NR(10)
+#define fiq_rtx __REG_NR(11)
+#define fiq_rcount __REG_NR(12)
+#define fiq_rirq __REG_NR(13)
+
+#endif /* __LINUX_SPI_S3C24XX_FIQ_H */
diff --git a/include/linux/spi/s3c24xx.h b/include/linux/spi/s3c24xx.h
index c91d10b82f08..9b8bb22d5b0c 100644
--- a/include/linux/spi/s3c24xx.h
+++ b/include/linux/spi/s3c24xx.h
@@ -10,16 +10,11 @@
#define __LINUX_SPI_S3C24XX_H __FILE__
struct s3c2410_spi_info {
- int pin_cs; /* simple gpio cs */
unsigned int num_cs; /* total chipselects */
int bus_num; /* bus number to use. */
-
unsigned int use_fiq:1; /* use fiq */
-
- void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable);
- void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
};
-extern int s3c24xx_set_fiq(unsigned int irq, bool on);
+extern int s3c24xx_set_fiq(unsigned int irq, u32 *ack_ptr, bool on);
#endif /* __LINUX_SPI_S3C24XX_H */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index af9ff2f0f1b2..8e984d75f5b6 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -17,6 +17,7 @@
{ \
.buswidth = __buswidth, \
.opcode = __opcode, \
+ .nbytes = 1, \
}
#define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \
@@ -69,11 +70,15 @@ enum spi_mem_data_dir {
/**
* struct spi_mem_op - describes a SPI memory operation
+ * @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is
+ * sent MSB-first.
* @cmd.buswidth: number of IO lines used to transmit the command
* @cmd.opcode: operation opcode
+ * @cmd.dtr: whether the command opcode should be sent in DTR mode or not
* @addr.nbytes: number of address bytes to send. Can be zero if the operation
* does not need to send an address
* @addr.buswidth: number of IO lines used to transmit the address cycles
+ * @addr.dtr: whether the address should be sent in DTR mode or not
* @addr.val: address value. This value is always sent MSB first on the bus.
* Note that only @addr.nbytes are taken into account in this
* address value, so users should make sure the value fits in the
@@ -81,7 +86,10 @@ enum spi_mem_data_dir {
* @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
* be zero if the operation does not require dummy bytes
* @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
+ * @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not
* @data.buswidth: number of IO lanes used to send/receive the data
+ * @data.dtr: whether the data should be sent in DTR mode or not
+ * @data.ecc: whether error correction is required or not
* @data.dir: direction of the transfer
* @data.nbytes: number of data bytes to send/receive. Can be zero if the
* operation does not involve transferring data
@@ -90,23 +98,29 @@ enum spi_mem_data_dir {
*/
struct spi_mem_op {
struct {
+ u8 nbytes;
u8 buswidth;
- u8 opcode;
+ u8 dtr : 1;
+ u16 opcode;
} cmd;
struct {
u8 nbytes;
u8 buswidth;
+ u8 dtr : 1;
u64 val;
} addr;
struct {
u8 nbytes;
u8 buswidth;
+ u8 dtr : 1;
} dummy;
struct {
u8 buswidth;
+ u8 dtr : 1;
+ u8 ecc : 1;
enum spi_mem_data_dir dir;
unsigned int nbytes;
union {
@@ -211,7 +225,7 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
/**
* struct spi_controller_mem_ops - SPI memory operations
* @adjust_op_size: shrink the data xfer of an operation to match controller's
- * limitations (can be alignment of max RX/TX size
+ * limitations (can be alignment or max RX/TX size
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
@@ -238,6 +252,9 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* the currently mapped area), and the caller of
* spi_mem_dirmap_write() is responsible for calling it again in
* this case.
+ * @poll_status: poll memory device status until (status & mask) == match or
+ * when the timeout has expired. It fills the data buffer with
+ * the last status value.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
@@ -262,9 +279,28 @@ struct spi_controller_mem_ops {
u64 offs, size_t len, void *buf);
ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf);
+ int (*poll_status)(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms);
};
/**
+ * struct spi_controller_mem_caps - SPI memory controller capabilities
+ * @dtr: Supports DTR operations
+ * @ecc: Supports operations with error correction
+ */
+struct spi_controller_mem_caps {
+ bool dtr;
+ bool ecc;
+};
+
+#define spi_mem_controller_is_capable(ctlr, cap) \
+ ((ctlr)->mem_caps && (ctlr)->mem_caps->cap)
+
+/**
* struct spi_mem_driver - SPI memory driver
* @spidrv: inherit from a SPI driver
* @probe: probe a SPI memory. Usually where detection/initialization takes
@@ -298,7 +334,6 @@ void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
bool spi_mem_default_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
-
#else
static inline int
spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
@@ -321,7 +356,6 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
{
return false;
}
-
#endif /* CONFIG_SPI_MEM */
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
@@ -348,6 +382,13 @@ devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
void devm_spi_mem_dirmap_destroy(struct device *dev,
struct spi_mem_dirmap_desc *desc);
+int spi_mem_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_delay_us,
+ u16 timeout_ms);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 6d16ba01ff5a..fbf8c0d95968 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -6,6 +6,7 @@
#ifndef __LINUX_SPI_H
#define __LINUX_SPI_H
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
@@ -13,13 +14,18 @@
#include <linux/completion.h>
#include <linux/scatterlist.h>
#include <linux/gpio/consumer.h>
-#include <linux/ptp_clock_kernel.h>
+
+#include <uapi/linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/u64_stats_sync.h>
struct dma_chan;
-struct property_entry;
+struct software_node;
+struct ptp_system_timestamp;
struct spi_controller;
struct spi_transfer;
struct spi_controller_mem_ops;
+struct spi_controller_mem_caps;
/*
* INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
@@ -29,7 +35,8 @@ extern struct bus_type spi_bus_type;
/**
* struct spi_statistics - statistics for spi transfers
- * @lock: lock protecting this structure
+ * @syncp: seqcount to protect members in this struct for per-cpu udate
+ * on 32-bit systems
*
* @messages: number of spi-messages handled
* @transfers: number of spi_transfers handled
@@ -54,41 +61,48 @@ extern struct bus_type spi_bus_type;
* maxsize limit
*/
struct spi_statistics {
- spinlock_t lock; /* lock for the whole structure */
+ struct u64_stats_sync syncp;
- unsigned long messages;
- unsigned long transfers;
- unsigned long errors;
- unsigned long timedout;
+ u64_stats_t messages;
+ u64_stats_t transfers;
+ u64_stats_t errors;
+ u64_stats_t timedout;
- unsigned long spi_sync;
- unsigned long spi_sync_immediate;
- unsigned long spi_async;
+ u64_stats_t spi_sync;
+ u64_stats_t spi_sync_immediate;
+ u64_stats_t spi_async;
- unsigned long long bytes;
- unsigned long long bytes_rx;
- unsigned long long bytes_tx;
+ u64_stats_t bytes;
+ u64_stats_t bytes_rx;
+ u64_stats_t bytes_tx;
#define SPI_STATISTICS_HISTO_SIZE 17
- unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+ u64_stats_t transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
- unsigned long transfers_split_maxsize;
+ u64_stats_t transfers_split_maxsize;
};
-void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
- struct spi_transfer *xfer,
- struct spi_controller *ctlr);
-
-#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
- do { \
- unsigned long flags; \
- spin_lock_irqsave(&(stats)->lock, flags); \
- (stats)->field += count; \
- spin_unlock_irqrestore(&(stats)->lock, flags); \
+#define SPI_STATISTICS_ADD_TO_FIELD(pcpu_stats, field, count) \
+ do { \
+ struct spi_statistics *__lstats; \
+ get_cpu(); \
+ __lstats = this_cpu_ptr(pcpu_stats); \
+ u64_stats_update_begin(&__lstats->syncp); \
+ u64_stats_add(&__lstats->field, count); \
+ u64_stats_update_end(&__lstats->syncp); \
+ put_cpu(); \
} while (0)
-#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
- SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
+#define SPI_STATISTICS_INCREMENT_FIELD(pcpu_stats, field) \
+ do { \
+ struct spi_statistics *__lstats; \
+ get_cpu(); \
+ __lstats = this_cpu_ptr(pcpu_stats); \
+ u64_stats_update_begin(&__lstats->syncp); \
+ u64_stats_inc(&__lstats->field); \
+ u64_stats_update_end(&__lstats->syncp); \
+ put_cpu(); \
+ } while (0)
/**
* struct spi_delay - SPI delay information
@@ -135,15 +149,20 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
* @modalias: Name of the driver to use with this device, or an alias
* for that name. This appears in the sysfs "modalias" attribute
* for driver coldplugging, and in uevents used for hotplugging
- * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when
- * not using a GPIO line) use cs_gpiod in new drivers by opting in on
- * the spi_master.
+ * @driver_override: If the name of a driver is written to this attribute, then
+ * the device will bind to the named driver and only the named driver.
+ * Do not set directly, because core frees it; use driver_set_override() to
+ * set or clear it.
* @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
* not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
* words of a transfer
- *
- * @statistics: statistics for the spi_device
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ * deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ * two delays will be added up.
+ * @pcpu_statistics: statistics for the spi_device
*
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
@@ -157,43 +176,38 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
struct spi_device {
struct device dev;
struct spi_controller *controller;
- struct spi_controller *master; /* compatibility layer */
+ struct spi_controller *master; /* Compatibility layer */
u32 max_speed_hz;
u8 chip_select;
u8 bits_per_word;
bool rt;
+#define SPI_NO_TX BIT(31) /* No transmit wire */
+#define SPI_NO_RX BIT(30) /* No receive wire */
+ /*
+ * All bits defined above should be covered by SPI_MODE_KERNEL_MASK.
+ * The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart,
+ * which is defined in 'include/uapi/linux/spi/spi.h'.
+ * The bits defined here are from bit 31 downwards, while in
+ * SPI_MODE_USER_MASK are from 0 upwards.
+ * These bits must not overlap. A static assert check should make sure of that.
+ * If adding extra bits, make sure to decrease the bit index below as well.
+ */
+#define SPI_MODE_KERNEL_MASK (~(BIT(30) - 1))
u32 mode;
-#define SPI_CPHA 0x01 /* clock phase */
-#define SPI_CPOL 0x02 /* clock polarity */
-#define SPI_MODE_0 (0|0) /* (original MicroWire) */
-#define SPI_MODE_1 (0|SPI_CPHA)
-#define SPI_MODE_2 (SPI_CPOL|0)
-#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
-#define SPI_CS_HIGH 0x04 /* chipselect active high? */
-#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
-#define SPI_3WIRE 0x10 /* SI/SO signals shared */
-#define SPI_LOOP 0x20 /* loopback mode */
-#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
-#define SPI_READY 0x80 /* slave pulls low to pause */
-#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
-#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
-#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
-#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
-#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
-#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */
-#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */
-#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */
int irq;
void *controller_state;
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- int cs_gpio; /* LEGACY: chip select gpio */
- struct gpio_desc *cs_gpiod; /* chip select gpio desc */
- struct spi_delay word_delay; /* inter-word delay */
+ struct gpio_desc *cs_gpiod; /* Chip select gpio desc */
+ struct spi_delay word_delay; /* Inter-word delay */
+ /* CS delays */
+ struct spi_delay cs_setup;
+ struct spi_delay cs_hold;
+ struct spi_delay cs_inactive;
- /* the statistics */
- struct spi_statistics statistics;
+ /* The statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
/*
* likely need more hooks for more protocol options affecting how
@@ -205,12 +219,16 @@ struct spi_device {
*/
};
+/* Make sure that SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK don't overlap */
+static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
+ "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
+
static inline struct spi_device *to_spi_device(struct device *dev)
{
return dev ? container_of(dev, struct spi_device, dev) : NULL;
}
-/* most drivers won't need to care about device refcounting */
+/* Most drivers won't need to care about device refcounting */
static inline struct spi_device *spi_dev_get(struct spi_device *spi)
{
return (spi && get_device(&spi->dev)) ? spi : NULL;
@@ -233,7 +251,7 @@ static inline void spi_set_ctldata(struct spi_device *spi, void *state)
spi->controller_state = state;
}
-/* device driver data */
+/* Device driver data */
static inline void spi_set_drvdata(struct spi_device *spi, void *data)
{
@@ -246,7 +264,6 @@ static inline void *spi_get_drvdata(struct spi_device *spi)
}
struct spi_message;
-struct spi_transfer;
/**
* struct spi_driver - Host side "protocol" driver
@@ -276,7 +293,7 @@ struct spi_transfer;
struct spi_driver {
const struct spi_device_id *id_table;
int (*probe)(struct spi_device *spi);
- int (*remove)(struct spi_device *spi);
+ void (*remove)(struct spi_device *spi);
void (*shutdown)(struct spi_device *spi);
struct device_driver driver;
};
@@ -299,7 +316,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
-/* use a define to avoid include chaining to get THIS_MODULE */
+extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select);
+
+/* Use a define to avoid include chaining to get THIS_MODULE */
#define spi_register_driver(driver) \
__spi_register_driver(THIS_MODULE, driver)
@@ -327,6 +346,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* every chipselect is connected to a slave.
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
* @mode_bits: flags understood by this controller driver
+ * @buswidth_override_bits: flags to override for this controller driver
* @bits_per_word_mask: A mask indicating which values of bits_per_word are
* supported by the driver. Bit n indicates that a bits_per_word n+1 is
* supported. If set, the SPI core will reject any transfer with an
@@ -336,11 +356,13 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
* @slave: indicates that this is an SPI slave controller
+ * @devm_allocated: whether the allocation of this struct is devres-managed
* @max_transfer_size: function that returns the max transfer size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @max_message_size: function that returns the max message size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @io_mutex: mutex for physical bus access
+ * @add_lock: mutex to avoid adding devices to the same chipselect
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -355,17 +377,26 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
+ * @dma_map_dev: device which can be used for DMA mapping
+ * @cur_rx_dma_dev: device which is currently used for RX DMA mapping
+ * @cur_tx_dma_dev: device which is currently used for TX DMA mapping
* @queued: whether this controller is providing an internal message queue
- * @kworker: thread struct for message pump
- * @kworker_task: pointer to task for message pump kworker thread
+ * @kworker: pointer to thread struct for message pump
* @pump_messages: work struct for scheduling work to the message pump
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
- * @idling: the device is entering idle state
* @cur_msg: the currently in-flight message
- * @cur_msg_prepared: spi_prepare_message was called for the currently
- * in-flight message
+ * @cur_msg_completion: a completion for the current in-flight message
+ * @cur_msg_incomplete: Flag used internally to opportunistically skip
+ * the @cur_msg_completion. This flag is used to check if the driver has
+ * already called spi_finalize_current_message().
+ * @cur_msg_need_completion: Flag used internally to opportunistically skip
+ * the @cur_msg_completion. This flag is used to signal the context that
+ * is running spi_finalize_current_message() that it needs to complete()
* @cur_msg_mapped: message has been mapped for DMA
+ * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip
+ * selected
+ * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
* @xfer_completion: used by core transfer_one_message()
* @busy: message pump is busy
* @running: message pump is running
@@ -392,6 +423,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* for example doing DMA mapping. Called from threaded
* context.
* @transfer_one: transfer a single spi_transfer.
+ *
* - return 0 if the transfer is finished,
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
@@ -405,31 +437,22 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
* This field is optional and should only be implemented if the
* controller has native support for memory like operations.
+ * @mem_caps: controller capabilities for the handling of memory operations.
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
- * @cs_setup: delay to be introduced by the controller after CS is asserted
- * @cs_hold: delay to be introduced by the controller before CS is deasserted
- * @cs_inactive: delay to be introduced by the controller after CS is
- * deasserted. If @cs_change_delay is used from @spi_transfer, then the
- * two delays will be added up.
- * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
- * CS number. Any individual value may be -ENOENT for CS lines that
- * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
- * in new drivers.
* @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
* are not GPIOs (driven by the SPI controller itself).
* @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
- * GPIO descriptors rather than using global GPIO numbers grabbed by the
- * driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
- * and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
+ * GPIO descriptors. This will fill in @cs_gpiods and SPI devices will have
+ * the cs_gpiod assigned if a GPIO line is found for the chipselect.
* @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
* fill in this field with the first unused native CS, to be used by SPI
* controller drivers that need to drive a native CS when using GPIO CS.
* @max_native_cs: When cs_gpiods is used, and this field is filled in,
* spi_register_controller() will validate all native CS (including the
* unused native CS) against this value.
- * @statistics: statistics for the spi_controller
+ * @pcpu_statistics: statistics for the spi_controller
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
@@ -443,6 +466,12 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @spi_transfer->ptp_sts_word_post were transmitted.
* If the driver does not set this, the SPI core takes the snapshot as
* close to the driver hand-over as possible.
+ * @irq_flags: Interrupt enable state during PTP system timestamping
+ * @fallback: fallback to pio if dma transfer return failure with
+ * SPI_TRANS_FAIL_NO_START.
+ * @queue_empty: signal green light for opportunistically skipping the queue
+ * for spi_sync transfers.
+ * @must_async: disable all fast paths in the core
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -460,7 +489,7 @@ struct spi_controller {
struct list_head list;
- /* other than negative (== assign one dynamically), bus_num is fully
+ /* Other than negative (== assign one dynamically), bus_num is fully
* board-specific. usually that simplifies to being SOC-specific.
* example: one SOC has three SPI controllers, numbered 0..2,
* and one board's schematics might show it using SPI-2. software
@@ -473,7 +502,7 @@ struct spi_controller {
*/
u16 num_chipselect;
- /* some SPI controllers pose alignment requirements on DMAable
+ /* Some SPI controllers pose alignment requirements on DMAable
* buffers; let protocol drivers know about these requirements.
*/
u16 dma_alignment;
@@ -481,26 +510,32 @@ struct spi_controller {
/* spi_device.mode flags understood by this controller driver */
u32 mode_bits;
- /* bitmask of supported bits_per_word for transfers */
+ /* spi_device.mode flags override flags for this controller */
+ u32 buswidth_override_bits;
+
+ /* Bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1)
- /* limits on transfer speed */
+ /* Limits on transfer speed */
u32 min_speed_hz;
u32 max_speed_hz;
- /* other constraints relevant to this driver */
+ /* Other constraints relevant to this driver */
u16 flags;
-#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */
-#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */
-#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */
-#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */
-#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */
+#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* Can't do full duplex */
+#define SPI_CONTROLLER_NO_RX BIT(1) /* Can't do buffer read */
+#define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */
+#define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
+#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
- /* flag indicating this is an SPI slave controller */
+ /* Flag indicating if the allocation of this struct is devres-managed */
+ bool devm_allocated;
+
+ /* Flag indicating this is an SPI slave controller */
bool slave;
/*
@@ -513,11 +548,14 @@ struct spi_controller {
/* I/O mutex */
struct mutex io_mutex;
- /* lock and mutex for SPI bus locking */
+ /* Used to avoid adding the same CS twice */
+ struct mutex add_lock;
+
+ /* Lock and mutex for SPI bus locking */
spinlock_t bus_lock_spinlock;
struct mutex bus_lock_mutex;
- /* flag indicating that the SPI bus is locked for exclusive use */
+ /* Flag indicating that the SPI bus is locked for exclusive use */
bool bus_lock_flag;
/* Setup mode and clock, etc (spi driver may call many times).
@@ -536,10 +574,9 @@ struct spi_controller {
* to configure specific CS timing through spi_set_cs_timing() after
* spi_setup().
*/
- int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
- struct spi_delay *hold, struct spi_delay *inactive);
+ int (*set_cs_timing)(struct spi_device *spi);
- /* bidirectional bulk transfers
+ /* Bidirectional bulk transfers
*
* + The transfer() method may not sleep; its main role is
* just to add the message to the queue.
@@ -561,7 +598,7 @@ struct spi_controller {
int (*transfer)(struct spi_device *spi,
struct spi_message *mesg);
- /* called on release() to free memory provided by spi_controller */
+ /* Called on release() to free memory provided by spi_controller */
void (*cleanup)(struct spi_device *spi);
/*
@@ -574,6 +611,9 @@ struct spi_controller {
bool (*can_dma)(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer);
+ struct device *dma_map_dev;
+ struct device *cur_rx_dma_dev;
+ struct device *cur_tx_dma_dev;
/*
* These hooks are for drivers that want to use the generic
@@ -582,19 +622,22 @@ struct spi_controller {
* Over time we expect SPI drivers to be phased over to this API.
*/
bool queued;
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
+ struct kthread_worker *kworker;
struct kthread_work pump_messages;
spinlock_t queue_lock;
struct list_head queue;
struct spi_message *cur_msg;
- bool idling;
+ struct completion cur_msg_completion;
+ bool cur_msg_incomplete;
+ bool cur_msg_need_completion;
bool busy;
bool running;
bool rt;
bool auto_runtime_pm;
- bool cur_msg_prepared;
bool cur_msg_mapped;
+ char last_cs;
+ bool last_cs_mode_high;
+ bool fallback;
struct completion xfer_completion;
size_t max_dma_len;
@@ -610,7 +653,7 @@ struct spi_controller {
/*
* These hooks are for drivers that use a generic implementation
- * of transfer_one_message() provied by the core.
+ * of transfer_one_message() provided by the core.
*/
void (*set_cs)(struct spi_device *spi, bool enable);
int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi,
@@ -620,27 +663,22 @@ struct spi_controller {
/* Optimized handlers for SPI memory-like operations. */
const struct spi_controller_mem_ops *mem_ops;
-
- /* CS delays */
- struct spi_delay cs_setup;
- struct spi_delay cs_hold;
- struct spi_delay cs_inactive;
+ const struct spi_controller_mem_caps *mem_caps;
/* gpio chip select */
- int *cs_gpios;
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
- u8 unused_native_cs;
- u8 max_native_cs;
+ s8 unused_native_cs;
+ s8 max_native_cs;
- /* statistics */
- struct spi_statistics statistics;
+ /* Statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
/* DMA channels for use with core dmaengine helpers */
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
- /* dummy data for full duplex devices */
+ /* Dummy data for full duplex devices */
void *dummy_rx;
void *dummy_tx;
@@ -654,6 +692,10 @@ struct spi_controller {
/* Interrupt enable state during PTP system timestamping */
unsigned long irq_flags;
+
+ /* Flag for enabling opportunistic skipping of the queue in spi_sync */
+ bool queue_empty;
+ bool must_async;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
@@ -702,7 +744,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
size_t progress, bool irqs_off);
-/* the spi driver core manages memory for the spi_controller classdev */
+/* The spi driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
unsigned int size, bool slave);
@@ -721,12 +763,36 @@ static inline struct spi_controller *spi_alloc_slave(struct device *host,
return __spi_alloc_controller(host, size, true);
}
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool slave);
+
+static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
+ unsigned int size)
+{
+ return __devm_spi_alloc_controller(dev, size, false);
+}
+
+static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
+ unsigned int size)
+{
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __devm_spi_alloc_controller(dev, size, true);
+}
+
extern int spi_register_controller(struct spi_controller *ctlr);
extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
extern void spi_unregister_controller(struct spi_controller *ctlr);
-extern struct spi_controller *spi_busnum_to_master(u16 busnum);
+#if IS_ENABLED(CONFIG_ACPI)
+extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index);
+int acpi_spi_count_resources(struct acpi_device *adev);
+#endif
/*
* SPI resource management while processing a SPI message
@@ -748,18 +814,9 @@ typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
struct spi_res {
struct list_head entry;
spi_res_release_t release;
- unsigned long long data[]; /* guarantee ull alignment */
+ unsigned long long data[]; /* Guarantee ull alignment */
};
-extern void *spi_res_alloc(struct spi_device *spi,
- spi_res_release_t release,
- size_t size, gfp_t gfp);
-extern void spi_res_add(struct spi_message *message, void *res);
-extern void spi_res_free(void *res);
-
-extern void spi_res_release(struct spi_controller *ctlr,
- struct spi_message *message);
-
/*---------------------------------------------------------------------------*/
/*
@@ -794,15 +851,14 @@ extern void spi_res_release(struct spi_controller *ctlr,
* transfer. If 0 the default (from @spi_device) is used.
* @bits_per_word: select a bits_per_word other than the device default
* for this transfer. If 0 the default (from @spi_device) is used.
+ * @dummy_data: indicates transfer is dummy bytes transfer.
+ * @cs_off: performs the transfer with chipselect off.
* @cs_change: affects chipselect after this transfer completes
* @cs_change_delay: delay between cs deassert and assert when
* @cs_change is set and @spi_transfer is not the last in @spi_message
* @delay: delay to be introduced after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
- * @delay_usecs: microseconds to delay after this transfer before
- * (optionally) changing the chipselect status, then starting
- * the next transfer or completing this @spi_message.
* @word_delay: inter word delay to be introduced after each word size
* (set by bits_per_word) transmission.
* @effective_speed_hz: the effective SCK-speed that was used to
@@ -834,12 +890,8 @@ extern void spi_res_release(struct spi_controller *ctlr,
* processed the word, i.e. the "pre" timestamp should be taken before
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
- * @timestamped_pre: Set by the SPI controller driver to denote it has acted
- * upon the @ptp_sts request. Not set when the SPI core has taken care of
- * the task. SPI device drivers are free to print a warning if this comes
- * back unset and they need the better resolution.
- * @timestamped_post: See above. The reason why both exist is that these
- * booleans are also used to keep state in the core SPI logic.
+ * @timestamped: true if the transfer has been timestamped
+ * @error: Error status logged by spi controller driver.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
@@ -896,7 +948,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* and its transfers, ignore them until its completion callback.
*/
struct spi_transfer {
- /* it's ok if tx_buf == rx_buf (right?)
+ /* It's ok if tx_buf == rx_buf (right?)
* for MicroWire, one buffer must be null
* buffers must work with dma_*map_single() calls, unless
* spi_message.is_dma_mapped reports a pre-existing mapping
@@ -910,6 +962,8 @@ struct spi_transfer {
struct sg_table tx_sg;
struct sg_table rx_sg;
+ unsigned dummy_data:1;
+ unsigned cs_off:1;
unsigned cs_change:1;
unsigned tx_nbits:3;
unsigned rx_nbits:3;
@@ -917,7 +971,6 @@ struct spi_transfer {
#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
u8 bits_per_word;
- u16 delay_usecs;
struct spi_delay delay;
struct spi_delay cs_change_delay;
struct spi_delay word_delay;
@@ -930,10 +983,12 @@ struct spi_transfer {
struct ptp_system_timestamp *ptp_sts;
- bool timestamped_pre;
- bool timestamped_post;
+ bool timestamped;
struct list_head transfer_list;
+
+#define SPI_TRANS_FAIL_NO_START BIT(0)
+ u16 error;
};
/**
@@ -951,12 +1006,13 @@ struct spi_transfer {
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
* @resources: for resource management when the spi message is processed
+ * @prepared: spi_prepare_message was called for the this message
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
* in the sense that no other spi_message may use that SPI bus until that
* sequence completes. On some systems, many such sequences can execute as
- * as single programmed DMA transfer. On all systems, these messages are
+ * a single programmed DMA transfer. On all systems, these messages are
* queued, and might complete after transactions to other devices. Messages
* sent to a given spi_device are always executed in FIFO order.
*
@@ -984,22 +1040,25 @@ struct spi_message {
* tell them about such special cases.
*/
- /* completion is reported through a callback */
+ /* Completion is reported through a callback */
void (*complete)(void *context);
void *context;
unsigned frame_length;
unsigned actual_length;
int status;
- /* for optional use by whatever driver currently owns the
+ /* For optional use by whatever driver currently owns the
* spi_message ... between calls to spi_async and then later
* complete(), that's the spi_controller controller driver.
*/
struct list_head queue;
void *state;
- /* list of spi_res reources when the spi message is processed */
+ /* List of spi_res reources when the spi message is processed */
struct list_head resources;
+
+ /* spi_prepare_message() was called for this message */
+ bool prepared;
};
static inline void spi_message_init_no_memset(struct spi_message *m)
@@ -1029,14 +1088,6 @@ spi_transfer_del(struct spi_transfer *t)
static inline int
spi_transfer_delay_exec(struct spi_transfer *t)
{
- struct spi_delay d;
-
- if (t->delay_usecs) {
- d.value = t->delay_usecs;
- d.unit = SPI_DELAY_UNIT_USECS;
- return spi_delay_exec(&d, NULL);
- }
-
return spi_delay_exec(&t->delay, t);
}
@@ -1087,15 +1138,8 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
-extern int spi_set_cs_timing(struct spi_device *spi,
- struct spi_delay *setup,
- struct spi_delay *hold,
- struct spi_delay *inactive);
-
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
-extern int spi_async_locked(struct spi_device *spi,
- struct spi_message *message);
extern int spi_slave_abort(struct spi_device *spi);
static inline size_t
@@ -1118,7 +1162,7 @@ spi_max_transfer_size(struct spi_device *spi)
if (ctlr->max_transfer_size)
tr_max = ctlr->max_transfer_size(spi);
- /* transfer size limit must not be greater than messsage size limit */
+ /* Transfer size limit must not be greater than message size limit */
return min(tr_max, msg_max);
}
@@ -1178,15 +1222,6 @@ struct spi_replaced_transfers {
struct spi_transfer inserted_transfers[];
};
-extern struct spi_replaced_transfers *spi_replace_transfers(
- struct spi_message *msg,
- struct spi_transfer *xfer_first,
- size_t remove,
- size_t insert,
- spi_replaced_release_t release,
- size_t extradatasize,
- gfp_t gfp);
-
/*---------------------------------------------------------------------------*/
/* SPI transfer transformation methods */
@@ -1219,7 +1254,7 @@ extern int spi_bus_unlock(struct spi_controller *ctlr);
*
* For more specific semantics see spi_sync().
*
- * Return: Return: zero on success, else a negative error code.
+ * Return: zero on success, else a negative error code.
*/
static inline int
spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
@@ -1278,7 +1313,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len)
return spi_sync_transfer(spi, &t, 1);
}
-/* this copies txbuf and rxbuf data; for small transfers only! */
+/* This copies txbuf and rxbuf data; for small transfers only! */
extern int spi_write_then_read(struct spi_device *spi,
const void *txbuf, unsigned n_tx,
void *rxbuf, unsigned n_rx);
@@ -1301,7 +1336,7 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
status = spi_write_then_read(spi, &cmd, 1, &result, 1);
- /* return negative errno or unsigned value */
+ /* Return negative errno or unsigned value */
return (status < 0) ? status : result;
}
@@ -1326,7 +1361,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
status = spi_write_then_read(spi, &cmd, 1, &result, 2);
- /* return negative errno or unsigned value */
+ /* Return negative errno or unsigned value */
return (status < 0) ? status : result;
}
@@ -1378,7 +1413,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* @modalias: Initializes spi_device.modalias; identifies the driver.
* @platform_data: Initializes spi_device.platform_data; the particular
* data stored there is driver-specific.
- * @properties: Additional device properties for the device.
+ * @swnode: Software node for the device.
* @controller_data: Initializes spi_device.controller_data; some
* controllers need hints about hardware setup, e.g. for DMA.
* @irq: Initializes spi_device.irq; depends on how the board is wired.
@@ -1406,21 +1441,20 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* are active in some dynamic board configuration models.
*/
struct spi_board_info {
- /* the device name and module name are coupled, like platform_bus;
+ /* The device name and module name are coupled, like platform_bus;
* "modalias" is normally the driver name.
*
* platform_data goes to spi_device.dev.platform_data,
* controller_data goes to spi_device.controller_data,
- * device properties are copied and attached to spi_device,
* irq is copied too
*/
char modalias[SPI_NAME_SIZE];
const void *platform_data;
- const struct property_entry *properties;
+ const struct software_node *swnode;
void *controller_data;
int irq;
- /* slower signaling on noisy or low voltage boards */
+ /* Slower signaling on noisy or low voltage boards */
u32 max_speed_hz;
@@ -1449,7 +1483,7 @@ struct spi_board_info {
extern int
spi_register_board_info(struct spi_board_info const *info, unsigned n);
#else
-/* board init code may ignore whether SPI is configured or not */
+/* Board init code may ignore whether SPI is configured or not */
static inline int
spi_register_board_info(struct spi_board_info const *info, unsigned n)
{ return 0; }
@@ -1461,7 +1495,7 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
* normally that would be handled by spi_unregister_controller().
*
* You can also use spi_alloc_device() and spi_add_device() to use a two
- * stage registration sequence for each spi_device. This gives the caller
+ * stage registration sequence for each spi_device. This gives the caller
* some more control over the spi_device structure before it is registered,
* but requires that caller to initialize fields that would otherwise
* be defined using the board info.
@@ -1486,23 +1520,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
}
-/* OF support code */
-#if IS_ENABLED(CONFIG_OF)
-
-/* must call put_device() when done with returned spi_device device */
-extern struct spi_device *
-of_find_spi_device_by_node(struct device_node *node);
-
-#else
-
-static inline struct spi_device *
-of_find_spi_device_by_node(struct device_node *node)
-{
- return NULL;
-}
-
-#endif /* IS_ENABLED(CONFIG_OF) */
-
/* Compatibility layer */
#define spi_master spi_controller
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 031ce8617df8..1341f7d62da4 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
+#define __LINUX_INSIDE_SPINLOCK_H
/*
* include/linux/spinlock.h - generic spinlock/rwlock declarations
@@ -12,6 +13,8 @@
* asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
+ * linux/spinlock_types_raw:
+ * The raw types and initializers
* linux/spinlock_types.h:
* defines the generic type and initializers
*
@@ -31,6 +34,8 @@
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
+ * linux/spinlock_types_raw:
+ * The raw RT types and initializers
* linux/spinlock_types.h:
* defines the generic type and initializers
*
@@ -53,9 +58,9 @@
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
-#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
+#include <linux/lockdep.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>
@@ -75,7 +80,7 @@
#define LOCK_SECTION_END \
".previous\n\t"
-#define __lockfunc __attribute__((section(".spinlock.text")))
+#define __lockfunc __section(".spinlock.text")
/*
* Pull the arch_spinlock_t and arch_rwlock_t definitions:
@@ -93,12 +98,13 @@
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define raw_spin_lock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __raw_spin_lock_init((lock), #lock, &__key); \
+ struct lock_class_key *key, short inner);
+
+# define raw_spin_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
} while (0)
#else
@@ -166,12 +172,11 @@ do { \
* Architectures that can implement ACQUIRE better need to take care.
*/
#ifndef smp_mb__after_spinlock
-#define smp_mb__after_spinlock() do { } while (0)
+#define smp_mb__after_spinlock() kcsan_mb()
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
-#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
#else
@@ -182,18 +187,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
mmiowb_spin_lock();
}
-#ifndef arch_spin_lock_flags
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#endif
-
-static inline void
-do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
-{
- __acquire(lock);
- arch_spin_lock_flags(&lock->raw_lock, *flags);
- mmiowb_spin_lock();
-}
-
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&(lock)->raw_lock);
@@ -306,8 +299,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
1 : ({ local_irq_restore(flags); 0; }); \
})
-/* Include rwlock functions */
+#ifndef CONFIG_PREEMPT_RT
+/* Include rwlock functions for !RT */
#include <linux/rwlock.h>
+#endif
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -318,6 +313,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
+/* Non PREEMPT_RT kernel, map to raw spinlocks: */
+#ifndef CONFIG_PREEMPT_RT
+
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
@@ -327,12 +325,26 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
return &lock->rlock;
}
-#define spin_lock_init(_lock) \
-do { \
- spinlock_check(_lock); \
- raw_spin_lock_init(&(_lock)->rlock); \
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+# define spin_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __raw_spin_lock_init(spinlock_check(lock), \
+ #lock, &__key, LD_WAIT_CONFIG); \
} while (0)
+#else
+
+# define spin_lock_init(_lock) \
+do { \
+ spinlock_check(_lock); \
+ *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
+} while (0)
+
+#endif
+
static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
@@ -438,6 +450,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
+#else /* !CONFIG_PREEMPT_RT */
+# include <linux/spinlock_rt.h>
+#endif /* CONFIG_PREEMPT_RT */
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -477,4 +493,5 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
void free_bucket_spinlocks(spinlock_t *locks);
+#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api.h b/include/linux/spinlock_api.h
new file mode 100644
index 000000000000..6338b27f98df
--- /dev/null
+++ b/include/linux/spinlock_api.h
@@ -0,0 +1 @@
+#include <linux/spinlock.h>
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 19a9be9d97ee..89eb6f4c659c 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_API_SMP_H
#define __LINUX_SPINLOCK_API_SMP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- /*
- * On lockdep we dont want the hand-coded irq-enable of
- * do_raw_spin_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#ifdef CONFIG_LOCKDEP
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
-#else
- do_raw_spin_lock_flags(lock, &flags);
-#endif
return flags;
}
@@ -187,6 +178,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
return 0;
}
+/* PREEMPT_RT has its own rwlock implementation */
+#ifndef CONFIG_PREEMPT_RT
#include <linux/rwlock_api_smp.h>
+#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d0d188861ad6..819aeba1c87e 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_API_UP_H
#define __LINUX_SPINLOCK_API_UP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -59,6 +59,7 @@
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
new file mode 100644
index 000000000000..61c49b16f69a
--- /dev/null
+++ b/include/linux/spinlock_rt.h
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
+#ifndef __LINUX_INSIDE_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key, bool percpu);
+#else
+static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key, bool percpu)
+{
+}
+#endif
+
+#define spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_base_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key, false); \
+} while (0)
+
+#define local_spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_base_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key, true); \
+} while (0)
+
+extern void rt_spin_lock(spinlock_t *lock);
+extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
+extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
+extern void rt_spin_unlock(spinlock_t *lock);
+extern void rt_spin_lock_unlock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock);
+extern int rt_spin_trylock(spinlock_t *lock);
+
+static __always_inline void spin_lock(spinlock_t *lock)
+{
+ rt_spin_lock(lock);
+}
+
+#ifdef CONFIG_LOCKDEP
+# define __spin_lock_nested(lock, subclass) \
+ rt_spin_lock_nested(lock, subclass)
+
+# define __spin_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
+# define __spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __spin_lock_nested(lock, subclass); \
+ } while (0)
+
+#else
+ /*
+ * Always evaluate the 'subclass' argument to avoid that the compiler
+ * warns about set-but-not-used variables when building with
+ * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
+ */
+# define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_irqsave_nested(lock, flags, subclass) \
+ spin_lock_irqsave(((void)(subclass), (lock)), flags)
+#endif
+
+#define spin_lock_nested(lock, subclass) \
+ __spin_lock_nested(lock, subclass)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+ __spin_lock_nest_lock(lock, nest_lock)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+ __spin_lock_irqsave_nested(lock, flags, subclass)
+
+static __always_inline void spin_lock_bh(spinlock_t *lock)
+{
+ /* Investigate: Drop bh when blocking ? */
+ local_bh_disable();
+ rt_spin_lock(lock);
+}
+
+static __always_inline void spin_lock_irq(spinlock_t *lock)
+{
+ rt_spin_lock(lock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ spin_lock(lock); \
+ } while (0)
+
+static __always_inline void spin_unlock(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+ local_bh_enable();
+}
+
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
+ unsigned long flags)
+{
+ rt_spin_unlock(lock);
+}
+
+#define spin_trylock(lock) \
+ __cond_lock(lock, rt_spin_trylock(lock))
+
+#define spin_trylock_bh(lock) \
+ __cond_lock(lock, rt_spin_trylock_bh(lock))
+
+#define spin_trylock_irq(lock) \
+ __cond_lock(lock, rt_spin_trylock(lock))
+
+#define __spin_trylock_irqsave(lock, flags) \
+({ \
+ int __locked; \
+ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __locked = spin_trylock(lock); \
+ __locked; \
+})
+
+#define spin_trylock_irqsave(lock, flags) \
+ __cond_lock(lock, __spin_trylock_irqsave(lock, flags))
+
+#define spin_is_contended(lock) (((void)(lock), 0))
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return rt_mutex_base_is_locked(&lock->lock);
+}
+
+#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
+
+#include <linux/rwlock_rt.h>
+
+#endif
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 24b4e6f2c1a2..2dfa35ffec76 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,55 +9,11 @@
* Released under the General Public License (GPL).
*/
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep.h>
-
-typedef struct raw_spinlock {
- arch_spinlock_t raw_lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname) \
- .magic = SPINLOCK_MAGIC, \
- .owner_cpu = -1, \
- .owner = SPINLOCK_OWNER_INIT,
-#else
-# define SPIN_DEBUG_INIT(lockname)
-#endif
-
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- SPIN_DEBUG_INIT(lockname) \
- SPIN_DEP_MAP_INIT(lockname) }
+#include <linux/spinlock_types_raw.h>
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+#ifndef CONFIG_PREEMPT_RT
+/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
typedef struct spinlock {
union {
struct raw_spinlock rlock;
@@ -72,14 +28,49 @@ typedef struct spinlock {
};
} spinlock_t;
+#define ___SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
#define __SPIN_LOCK_INITIALIZER(lockname) \
- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+ { { .rlock = ___SPIN_LOCK_INITIALIZER(lockname) } }
#define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+ (spinlock_t) __SPIN_LOCK_INITIALIZER(lockname)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#else /* !CONFIG_PREEMPT_RT */
+
+/* PREEMPT_RT kernels map spinlock to rt_mutex */
+#include <linux/rtmutex.h>
+
+typedef struct spinlock {
+ struct rt_mutex_base lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} spinlock_t;
+
+#define __SPIN_LOCK_UNLOCKED(name) \
+ { \
+ .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+ SPIN_DEP_MAP_INIT(name) \
+ }
+
+#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \
+ { \
+ .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+ LOCAL_SPIN_DEP_MAP_INIT(name) \
+ }
+
+#define DEFINE_SPINLOCK(name) \
+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
new file mode 100644
index 000000000000..91cb36b65a17
--- /dev/null
+++ b/include/linux/spinlock_types_raw.h
@@ -0,0 +1,73 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
+
+#include <linux/types.h>
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep_types.h>
+
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RAW_SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SPIN, \
+ }
+# define SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
+
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ .lock_type = LD_LOCK_PERCPU, \
+ }
+#else
+# define RAW_SPIN_DEP_MAP_INIT(lockname)
+# define SPIN_DEP_MAP_INIT(lockname)
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
+#else
+# define SPIN_DEBUG_INIT(lockname)
+#endif
+
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+{ \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ RAW_SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_SPINLOCK_TYPES_RAW_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index c09b6407ae1b..7f86a2016ac5 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_TYPES_UP_H
#define __LINUX_SPINLOCK_TYPES_UP_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly"
#endif
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 0ac9112c1bbe..c87204247592 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_UP_H
#define __LINUX_SPINLOCK_UP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -62,7 +62,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched/core.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 74b4911ac16d..a55179fd60fc 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -78,6 +78,12 @@ extern ssize_t add_to_pipe(struct pipe_inode_info *,
struct pipe_buffer *);
extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
splice_direct_actor *);
+extern long do_splice(struct file *in, loff_t *off_in,
+ struct file *out, loff_t *off_out,
+ size_t len, unsigned int flags);
+
+extern long do_tee(struct file *in, struct file *out, size_t len,
+ unsigned int flags);
/*
* for dynamic pipe sizing
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 394a3f68bad5..eac1956a8727 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -138,6 +138,7 @@ struct spmi_driver {
struct device_driver driver;
int (*probe)(struct spmi_device *sdev);
void (*remove)(struct spmi_device *sdev);
+ void (*shutdown)(struct spmi_device *sdev);
};
static inline struct spmi_driver *to_spmi_driver(struct device_driver *d)
@@ -163,6 +164,9 @@ static inline void spmi_driver_unregister(struct spmi_driver *sdrv)
module_driver(__spmi_driver, spmi_driver_register, \
spmi_driver_unregister)
+struct device_node;
+
+struct spmi_device *spmi_device_from_of(struct device_node *np);
int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf);
int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
size_t len);
diff --git a/include/linux/sram.h b/include/linux/sram.h
index 4fb405fb0480..d7dee19505c6 100644
--- a/include/linux/sram.h
+++ b/include/linux/sram.h
@@ -1,15 +1,5 @@
-/*
- * Generic SRAM Driver Interface
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Generic SRAM Driver Interface */
#ifndef __LINUX_SRAM_H__
#define __LINUX_SRAM_H__
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index e432cc92c73d..01226e4d960a 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -60,6 +60,15 @@ void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);
+unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
+unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
+bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
+
+#ifdef CONFIG_SRCU
+void srcu_init(void);
+#else /* #ifdef CONFIG_SRCU */
+static inline void srcu_init(void) { }
+#endif /* #else #ifdef CONFIG_SRCU */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -108,7 +117,8 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* lockdep_is_held() calls.
*/
#define srcu_dereference_check(p, ssp, c) \
- __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || srcu_read_lock_held(ssp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 5a5a1941ca15..5aa5e0faf6a1 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -15,9 +15,10 @@
struct srcu_struct {
short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
- short srcu_idx; /* Current reader array element. */
u8 srcu_gp_running; /* GP workqueue running? */
u8 srcu_gp_waiting; /* GP waiting for readers? */
+ unsigned long srcu_idx; /* Current reader array element in bit 0x2. */
+ unsigned long srcu_idx_max; /* Furthest future srcu_idx request. */
struct swait_queue_head srcu_wq;
/* Last srcu_read_unlock() wakes GP. */
struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
@@ -59,8 +60,8 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- idx = READ_ONCE(ssp->srcu_idx);
- WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
+ idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
+ WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
return idx;
}
@@ -80,11 +81,13 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
{
int idx;
- idx = READ_ONCE(ssp->srcu_idx) & 0x1;
- pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
+ idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
tt, tf, idx,
- READ_ONCE(ssp->srcu_lock_nesting[!idx]),
- READ_ONCE(ssp->srcu_lock_nesting[idx]));
+ data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
+ data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
+ data_race(READ_ONCE(ssp->srcu_idx)),
+ data_race(READ_ONCE(ssp->srcu_idx_max)));
}
#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 9cfcc8a756ae..e3014319d1ad 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -47,11 +47,9 @@ struct srcu_data {
*/
struct srcu_node {
spinlock_t __private lock;
- unsigned long srcu_have_cbs[4]; /* GP seq for children */
- /* having CBs, but only */
- /* is > ->srcu_gq_seq. */
- unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */
- /* have CBs for given GP? */
+ unsigned long srcu_have_cbs[4]; /* GP seq for children having CBs, but only */
+ /* if greater than ->srcu_gq_seq. */
+ unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs have CBs for given GP? */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
struct srcu_node *srcu_parent; /* Next up in tree. */
int grplo; /* Least CPU for node. */
@@ -62,18 +60,24 @@ struct srcu_node {
* Per-SRCU-domain structure, similar in function to rcu_state.
*/
struct srcu_struct {
- struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */
+ struct srcu_node *node; /* Combining tree. */
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
+ int srcu_size_state; /* Small-to-big transition state. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
- spinlock_t __private lock; /* Protect counters */
+ spinlock_t __private lock; /* Protect counters and size state. */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ unsigned long srcu_gp_start; /* Last GP start timestamp (jiffies) */
unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
+ unsigned long srcu_size_jiffies; /* Current contention-measurement interval. */
+ unsigned long srcu_n_lock_retries; /* Contention events in current interval. */
+ unsigned long srcu_n_exp_nodelay; /* # expedited no-delays in current GP phase. */
struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ bool sda_is_static; /* May ->sda be passed to free_percpu()? */
unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
struct completion srcu_barrier_completion;
@@ -81,12 +85,23 @@ struct srcu_struct {
atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */
/* callback for the barrier */
/* operation. */
+ unsigned long reschedule_jiffies;
+ unsigned long reschedule_count;
struct delayed_work work;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
};
+/* Values for size state variable (->srcu_size_state). */
+#define SRCU_SIZE_SMALL 0
+#define SRCU_SIZE_ALLOC 1
+#define SRCU_SIZE_WAIT_BARRIER 2
+#define SRCU_SIZE_WAIT_CALL 3
+#define SRCU_SIZE_WAIT_CBS1 4
+#define SRCU_SIZE_WAIT_CBS2 5
+#define SRCU_SIZE_WAIT_CBS3 6
+#define SRCU_SIZE_WAIT_CBS4 7
+#define SRCU_SIZE_BIG 8
+
/* Values for state variable (bottom bits of ->srcu_gp_seq). */
#define SRCU_STATE_IDLE 0
#define SRCU_STATE_SCAN1 1
@@ -123,6 +138,7 @@ struct srcu_struct {
#ifdef MODULE
# define __DEFINE_SRCU(name, is_static) \
is_static struct srcu_struct name; \
+ extern struct srcu_struct * const __srcu_struct_##name; \
struct srcu_struct * const __srcu_struct_##name \
__section("___srcu_struct_ptrs") = &name
#else
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 0d5a2691e7e9..f9b53acb4e02 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h
index 3f8bc973d67d..19253bfacd1a 100644
--- a/include/linux/ssb/ssb_driver_extif.h
+++ b/include/linux/ssb/ssb_driver_extif.h
@@ -197,7 +197,7 @@ struct ssb_extif {
static inline bool ssb_extif_available(struct ssb_extif *extif)
{
- return 0;
+ return false;
}
static inline
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 31593b34608e..28c145a51e57 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -76,7 +76,7 @@ static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
if (dev)
return !!(dev->dev->bus->sprom.boardflags_lo &
SSB_GIGE_BFL_ROBOSWITCH);
- return 0;
+ return false;
}
/* Returns whether we can only do one DMA at once. */
@@ -86,7 +86,7 @@ static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
if (dev)
return ((dev->dev->bus->chip_id == 0x4785) &&
(dev->dev->bus->chip_rev < 2));
- return 0;
+ return false;
}
/* Returns whether we must flush posted writes. */
@@ -95,7 +95,7 @@ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
if (dev)
return (dev->dev->bus->chip_id == 0x4785);
- return 0;
+ return false;
}
/* Get the device MAC address */
@@ -159,7 +159,7 @@ static inline void ssb_gige_exit(void)
static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
{
@@ -167,19 +167,19 @@ static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
}
static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
{
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 3efa97d482cb..9ca7798d7a31 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -11,7 +11,53 @@
#ifndef _LINUX_STACKDEPOT_H
#define _LINUX_STACKDEPOT_H
+#include <linux/gfp.h>
+
typedef u32 depot_stack_handle_t;
+/*
+ * Number of bits in the handle that stack depot doesn't use. Users may store
+ * information in them.
+ */
+#define STACK_DEPOT_EXTRA_BITS 5
+
+depot_stack_handle_t __stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ unsigned int extra_bits,
+ gfp_t gfp_flags, bool can_alloc);
+
+/*
+ * Every user of stack depot has to call stack_depot_init() during its own init
+ * when it's decided that it will be calling stack_depot_save() later. This is
+ * recommended for e.g. modules initialized later in the boot process, when
+ * slab_is_available() is true.
+ *
+ * The alternative is to select STACKDEPOT_ALWAYS_INIT to have stack depot
+ * enabled as part of mm_init(), for subsystems where it's known at compile time
+ * that stack depot will be used.
+ *
+ * Another alternative is to call stack_depot_want_early_init(), when the
+ * decision to use stack depot is taken e.g. when evaluating kernel boot
+ * parameters, which precedes the enablement point in mm_init().
+ *
+ * stack_depot_init() and stack_depot_want_early_init() can be called regardless
+ * of CONFIG_STACKDEPOT and are no-op when disabled. The actual save/fetch/print
+ * functions should only be called from code that makes sure CONFIG_STACKDEPOT
+ * is enabled.
+ */
+#ifdef CONFIG_STACKDEPOT
+int stack_depot_init(void);
+
+void __init stack_depot_want_early_init(void);
+
+/* This is supposed to be called only from mm_init() */
+int __init stack_depot_early_init(void);
+#else
+static inline int stack_depot_init(void) { return 0; }
+
+static inline void stack_depot_want_early_init(void) { }
+
+static inline int stack_depot_early_init(void) { return 0; }
+#endif
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);
@@ -19,4 +65,11 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
+
+int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
+ int spaces);
+
+void stack_depot_print(depot_stack_handle_t stack);
+
#endif
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
index 3d5c3271a9a8..c36e7a3b45e7 100644
--- a/include/linux/stackleak.h
+++ b/include/linux/stackleak.h
@@ -15,19 +15,67 @@
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
#include <asm/stacktrace.h>
+/*
+ * The lowest address on tsk's stack which we can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_low_bound(const struct task_struct *tsk)
+{
+ /*
+ * The lowest unsigned long on the task stack contains STACK_END_MAGIC,
+ * which we must not corrupt.
+ */
+ return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
+}
+
+/*
+ * The address immediately after the highest address on tsk's stack which we
+ * can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_high_bound(const struct task_struct *tsk)
+{
+ /*
+ * The task's pt_regs lives at the top of the task stack and will be
+ * overwritten by exception entry, so there's no need to erase them.
+ */
+ return (unsigned long)task_pt_regs(tsk);
+}
+
+/*
+ * Find the address immediately above the poisoned region of the stack, where
+ * that region falls between 'low' (inclusive) and 'high' (exclusive).
+ */
+static __always_inline unsigned long
+stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
+{
+ const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+ unsigned int poison_count = 0;
+ unsigned long poison_high = high;
+ unsigned long sp = high;
+
+ while (sp > low && poison_count < depth) {
+ sp -= sizeof(unsigned long);
+
+ if (*(unsigned long *)sp == STACKLEAK_POISON) {
+ poison_count++;
+ } else {
+ poison_count = 0;
+ poison_high = sp;
+ }
+ }
+
+ return poison_high;
+}
+
static inline void stackleak_task_init(struct task_struct *t)
{
- t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
+ t->lowest_stack = stackleak_task_low_bound(t);
# ifdef CONFIG_STACKLEAK_METRICS
t->prev_lowest_stack = t->lowest_stack;
# endif
}
-#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
-int stack_erasing_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-#endif
-
#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
static inline void stackleak_task_init(struct task_struct *t) { }
#endif
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
index 6b792d080eee..4c678c4fec58 100644
--- a/include/linux/stackprotector.h
+++ b/include/linux/stackprotector.h
@@ -6,7 +6,7 @@
#include <linux/sched.h>
#include <linux/random.h>
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH)
# include <asm/stackprotector.h>
#else
static inline void boot_init_stack_canary(void)
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 83bd8cb475d7..97455880ac41 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -8,35 +8,17 @@
struct task_struct;
struct pt_regs;
-#ifdef CONFIG_STACKTRACE
-void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
- int spaces);
-int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
- unsigned int nr_entries, int spaces);
-unsigned int stack_trace_save(unsigned long *store, unsigned int size,
- unsigned int skipnr);
-unsigned int stack_trace_save_tsk(struct task_struct *task,
- unsigned long *store, unsigned int size,
- unsigned int skipnr);
-unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
- unsigned int size, unsigned int skipnr);
-unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
-
-/* Internal interfaces. Do not use in generic code */
#ifdef CONFIG_ARCH_STACKWALK
/**
* stack_trace_consume_fn - Callback for arch_stack_walk()
* @cookie: Caller supplied pointer handed back by arch_stack_walk()
* @addr: The stack entry address to consume
- * @reliable: True when the stack entry is reliable. Required by
- * some printk based consumers.
*
* Return: True, if the entry was consumed or skipped
* False, if there is no space left to store
*/
-typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr,
- bool reliable);
+typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr);
/**
* arch_stack_walk - Architecture specific function to walk the stack
* @consume_entry: Callback which is invoked by the architecture code for
@@ -55,16 +37,52 @@ typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr,
*/
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * arch_stack_walk_reliable - Architecture specific function to walk the
+ * stack reliably
+ *
+ * @consume_entry: Callback which is invoked by the architecture code for
+ * each entry.
+ * @cookie: Caller supplied pointer which is handed back to
+ * @consume_entry
+ * @task: Pointer to a task struct, can be NULL
+ *
+ * This function returns an error if it detects any unreliable
+ * features of the stack. Otherwise it guarantees that the stack
+ * trace is reliable.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is
+ * inactive and its stack is pinned.
+ */
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task);
+
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs);
+#endif /* CONFIG_ARCH_STACKWALK */
-#else /* CONFIG_ARCH_STACKWALK */
+#ifdef CONFIG_STACKTRACE
+void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
+ int spaces);
+int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
+ unsigned int nr_entries, int spaces);
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_tsk(struct task_struct *task,
+ unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+ unsigned int size, unsigned int skipnr);
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
+unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
+
+#ifndef CONFIG_ARCH_STACKWALK
+/* Internal interfaces. Do not use in generic code */
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
- int skip; /* input argument: How many entries to skip */
+ unsigned int skip; /* input argument: How many entries to skip */
};
extern void save_stack_trace(struct stack_trace *trace);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 528c4baad091..ff277ced50e9 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -19,8 +19,6 @@
#include <linux/time.h>
#include <linux/uidgid.h>
-#define KSTAT_QUERY_FLAGS (AT_STATX_SYNC_TYPE)
-
struct kstat {
u32 result_mask; /* What fields the user got */
umode_t mode;
@@ -36,6 +34,10 @@ struct kstat {
STATX_ATTR_ENCRYPTED | \
STATX_ATTR_VERITY \
)/* Attrs corresponding to FS_*_FL flags */
+#define KSTAT_ATTR_VFS_FLAGS \
+ (STATX_ATTR_IMMUTABLE | \
+ STATX_ATTR_APPEND \
+ ) /* Attrs corresponding to S_* flags that are enforced by the VFS */
u64 ino;
dev_t dev;
dev_t rdev;
@@ -47,6 +49,9 @@ struct kstat {
struct timespec64 ctime;
struct timespec64 btime; /* File creation time */
u64 blocks;
+ u64 mnt_id;
+ u32 dio_mem_align;
+ u32 dio_offset_align;
};
#endif
diff --git a/include/linux/statfs.h b/include/linux/statfs.h
index 9bc69edb8f18..02c862686ea3 100644
--- a/include/linux/statfs.h
+++ b/include/linux/statfs.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <asm/statfs.h>
+#include <asm/byteorder.h>
struct kstatfs {
long f_type;
@@ -40,8 +41,21 @@ struct kstatfs {
#define ST_NOATIME 0x0400 /* do not update access times */
#define ST_NODIRATIME 0x0800 /* do not update directory access times */
#define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */
+#define ST_NOSYMFOLLOW 0x2000 /* do not follow symlinks */
struct dentry;
extern int vfs_get_fsid(struct dentry *dentry, __kernel_fsid_t *fsid);
+static inline __kernel_fsid_t u64_to_fsid(u64 v)
+{
+ return (__kernel_fsid_t){.val = {(u32)v, (u32)(v>>32)}};
+}
+
+/* Fold 16 bytes uuid to 64 bit fsid */
+static inline __kernel_fsid_t uuid_to_fsid(__u8 *uuid)
+{
+ return u64_to_fsid(le64_to_cpup((void *)uuid) ^
+ le64_to_cpup((void *)(uuid + sizeof(u64))));
+}
+
#endif
diff --git a/include/linux/static_call.h b/include/linux/static_call.h
new file mode 100644
index 000000000000..df53bed9d71f
--- /dev/null
+++ b/include/linux/static_call.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STATIC_CALL_H
+#define _LINUX_STATIC_CALL_H
+
+/*
+ * Static call support
+ *
+ * Static calls use code patching to hard-code function pointers into direct
+ * branch instructions. They give the flexibility of function pointers, but
+ * with improved performance. This is especially important for cases where
+ * retpolines would otherwise be used, as retpolines can significantly impact
+ * performance.
+ *
+ *
+ * API overview:
+ *
+ * DECLARE_STATIC_CALL(name, func);
+ * DEFINE_STATIC_CALL(name, func);
+ * DEFINE_STATIC_CALL_NULL(name, typename);
+ * DEFINE_STATIC_CALL_RET0(name, typename);
+ *
+ * __static_call_return0;
+ *
+ * static_call(name)(args...);
+ * static_call_cond(name)(args...);
+ * static_call_update(name, func);
+ * static_call_query(name);
+ *
+ * EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}()
+ *
+ * Usage example:
+ *
+ * # Start with the following functions (with identical prototypes):
+ * int func_a(int arg1, int arg2);
+ * int func_b(int arg1, int arg2);
+ *
+ * # Define a 'my_name' reference, associated with func_a() by default
+ * DEFINE_STATIC_CALL(my_name, func_a);
+ *
+ * # Call func_a()
+ * static_call(my_name)(arg1, arg2);
+ *
+ * # Update 'my_name' to point to func_b()
+ * static_call_update(my_name, &func_b);
+ *
+ * # Call func_b()
+ * static_call(my_name)(arg1, arg2);
+ *
+ *
+ * Implementation details:
+ *
+ * This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL).
+ * Otherwise basic indirect calls are used (with function pointers).
+ *
+ * Each static_call() site calls into a trampoline associated with the name.
+ * The trampoline has a direct branch to the default function. Updates to a
+ * name will modify the trampoline's branch destination.
+ *
+ * If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites
+ * themselves will be patched at runtime to call the functions directly,
+ * rather than calling through the trampoline. This requires objtool or a
+ * compiler plugin to detect all the static_call() sites and annotate them
+ * in the .static_call_sites section.
+ *
+ *
+ * Notes on NULL function pointers:
+ *
+ * Static_call()s support NULL functions, with many of the caveats that
+ * regular function pointers have.
+ *
+ * Clearly calling a NULL function pointer is 'BAD', so too for
+ * static_call()s (although when HAVE_STATIC_CALL it might not be immediately
+ * fatal). A NULL static_call can be the result of:
+ *
+ * DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int));
+ *
+ * which is equivalent to declaring a NULL function pointer with just a
+ * typename:
+ *
+ * void (*my_func_ptr)(int arg1) = NULL;
+ *
+ * or using static_call_update() with a NULL function. In both cases the
+ * HAVE_STATIC_CALL implementation will patch the trampoline with a RET
+ * instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE
+ * architectures can patch the trampoline call to a NOP.
+ *
+ * In all cases, any argument evaluation is unconditional. Unlike a regular
+ * conditional function pointer call:
+ *
+ * if (my_func_ptr)
+ * my_func_ptr(arg1)
+ *
+ * where the argument evaludation also depends on the pointer value.
+ *
+ * When calling a static_call that can be NULL, use:
+ *
+ * static_call_cond(name)(arg1);
+ *
+ * which will include the required value tests to avoid NULL-pointer
+ * dereferences.
+ *
+ * To query which function is currently set to be called, use:
+ *
+ * func = static_call_query(name);
+ *
+ *
+ * DEFINE_STATIC_CALL_RET0 / __static_call_return0:
+ *
+ * Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the
+ * conditional void function call, DEFINE_STATIC_CALL_RET0 /
+ * __static_call_return0 optimize the do nothing return 0 function.
+ *
+ * This feature is strictly UB per the C standard (since it casts a function
+ * pointer to a different signature) and relies on the architecture ABI to
+ * make things work. In particular it relies on Caller Stack-cleanup and the
+ * whole return register being clobbered for short return values. All normal
+ * CDECL style ABIs conform.
+ *
+ * In particular the x86_64 implementation replaces the 5 byte CALL
+ * instruction at the callsite with a 5 byte clear of the RAX register,
+ * completely eliding any function call overhead.
+ *
+ * Notably argument setup is unconditional.
+ *
+ *
+ * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP():
+ *
+ * The difference is that the _TRAMP variant tries to only export the
+ * trampoline with the result that a module can use static_call{,_cond}() but
+ * not static_call_update().
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/cpu.h>
+#include <linux/static_call_types.h>
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+#include <asm/static_call.h>
+
+/*
+ * Either @site or @tramp can be NULL.
+ */
+extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail);
+
+#define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
+
+#else
+#define STATIC_CALL_TRAMP_ADDR(name) NULL
+#endif
+
+#define static_call_update(name, func) \
+({ \
+ typeof(&STATIC_CALL_TRAMP(name)) __F = (func); \
+ __static_call_update(&STATIC_CALL_KEY(name), \
+ STATIC_CALL_TRAMP_ADDR(name), __F); \
+})
+
+#define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func))
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+
+extern int __init static_call_init(void);
+
+struct static_call_mod {
+ struct static_call_mod *next;
+ struct module *mod; /* for vmlinux, mod == NULL */
+ struct static_call_site *sites;
+};
+
+/* For finding the key associated with a trampoline */
+struct static_call_tramp_key {
+ s32 tramp;
+ s32 key;
+};
+
+extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
+extern int static_call_mod_init(struct module *mod);
+extern int static_call_text_reserved(void *start, void *end);
+
+extern long __static_call_return0(void);
+
+#define DEFINE_STATIC_CALL(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = _func, \
+ .type = 1, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
+
+#define DEFINE_STATIC_CALL_NULL(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = NULL, \
+ .type = 1, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = __static_call_return0, \
+ .type = 1, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
+
+#define static_call_cond(name) (void)__static_call(name)
+
+#define EXPORT_STATIC_CALL(name) \
+ EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+
+/* Leave the key unexported, so modules can't change static call targets: */
+#define EXPORT_STATIC_CALL_TRAMP(name) \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \
+ ARCH_ADD_TRAMP_KEY(name)
+#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \
+ ARCH_ADD_TRAMP_KEY(name)
+
+#elif defined(CONFIG_HAVE_STATIC_CALL)
+
+static inline int static_call_init(void) { return 0; }
+
+#define DEFINE_STATIC_CALL(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = _func, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
+
+#define DEFINE_STATIC_CALL_NULL(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = NULL, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = __static_call_return0, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
+
+#define static_call_cond(name) (void)__static_call(name)
+
+static inline
+void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+{
+ cpus_read_lock();
+ WRITE_ONCE(key->func, func);
+ arch_static_call_transform(NULL, tramp, func, false);
+ cpus_read_unlock();
+}
+
+static inline int static_call_text_reserved(void *start, void *end)
+{
+ return 0;
+}
+
+extern long __static_call_return0(void);
+
+#define EXPORT_STATIC_CALL(name) \
+ EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+
+/* Leave the key unexported, so modules can't change static call targets: */
+#define EXPORT_STATIC_CALL_TRAMP(name) \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+
+#else /* Generic implementation */
+
+static inline int static_call_init(void) { return 0; }
+
+static inline long __static_call_return0(void)
+{
+ return 0;
+}
+
+#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = _func_init, \
+ }
+
+#define DEFINE_STATIC_CALL(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, _func)
+
+#define DEFINE_STATIC_CALL_NULL(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, NULL)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
+
+static inline void __static_call_nop(void) { }
+
+/*
+ * This horrific hack takes care of two things:
+ *
+ * - it ensures the compiler will only load the function pointer ONCE,
+ * which avoids a reload race.
+ *
+ * - it ensures the argument evaluation is unconditional, similar
+ * to the HAVE_STATIC_CALL variant.
+ *
+ * Sadly current GCC/Clang (10 for both) do not optimize this properly
+ * and will emit an indirect call for the NULL case :-(
+ */
+#define __static_call_cond(name) \
+({ \
+ void *func = READ_ONCE(STATIC_CALL_KEY(name).func); \
+ if (!func) \
+ func = &__static_call_nop; \
+ (typeof(STATIC_CALL_TRAMP(name))*)func; \
+})
+
+#define static_call_cond(name) (void)__static_call_cond(name)
+
+static inline
+void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+{
+ WRITE_ONCE(key->func, func);
+}
+
+static inline int static_call_text_reserved(void *start, void *end)
+{
+ return 0;
+}
+
+#define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name))
+#define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
+
+#endif /* CONFIG_HAVE_STATIC_CALL */
+
+#endif /* _LINUX_STATIC_CALL_H */
diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
new file mode 100644
index 000000000000..5a00b8b2cf9f
--- /dev/null
+++ b/include/linux/static_call_types.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _STATIC_CALL_TYPES_H
+#define _STATIC_CALL_TYPES_H
+
+#include <linux/types.h>
+#include <linux/stringify.h>
+#include <linux/compiler.h>
+
+#define STATIC_CALL_KEY_PREFIX __SCK__
+#define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
+#define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
+#define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
+#define STATIC_CALL_KEY_STR(name) __stringify(STATIC_CALL_KEY(name))
+
+#define STATIC_CALL_TRAMP_PREFIX __SCT__
+#define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
+#define STATIC_CALL_TRAMP_PREFIX_LEN (sizeof(STATIC_CALL_TRAMP_PREFIX_STR) - 1)
+#define STATIC_CALL_TRAMP(name) __PASTE(STATIC_CALL_TRAMP_PREFIX, name)
+#define STATIC_CALL_TRAMP_STR(name) __stringify(STATIC_CALL_TRAMP(name))
+
+/*
+ * Flags in the low bits of static_call_site::key.
+ */
+#define STATIC_CALL_SITE_TAIL 1UL /* tail call */
+#define STATIC_CALL_SITE_INIT 2UL /* init section */
+#define STATIC_CALL_SITE_FLAGS 3UL
+
+/*
+ * The static call site table needs to be created by external tooling (objtool
+ * or a compiler plugin).
+ */
+struct static_call_site {
+ s32 addr;
+ s32 key;
+};
+
+#define DECLARE_STATIC_CALL(name, func) \
+ extern struct static_call_key STATIC_CALL_KEY(name); \
+ extern typeof(func) STATIC_CALL_TRAMP(name);
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __raw_static_call(name) (&STATIC_CALL_TRAMP(name))
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+
+/*
+ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
+ * the symbol table so that objtool can reference it when it generates the
+ * .static_call_sites section.
+ */
+#define __STATIC_CALL_ADDRESSABLE(name) \
+ __ADDRESSABLE(STATIC_CALL_KEY(name))
+
+#define __static_call(name) \
+({ \
+ __STATIC_CALL_ADDRESSABLE(name); \
+ __raw_static_call(name); \
+})
+
+struct static_call_key {
+ void *func;
+ union {
+ /* bit 0: 0 = mods, 1 = sites */
+ unsigned long type;
+ struct static_call_mod *mods;
+ struct static_call_site *sites;
+ };
+};
+
+#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#define __STATIC_CALL_ADDRESSABLE(name)
+#define __static_call(name) __raw_static_call(name)
+
+struct static_call_key {
+ void *func;
+};
+
+#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#ifdef MODULE
+#define __STATIC_CALL_MOD_ADDRESSABLE(name)
+#define static_call_mod(name) __raw_static_call(name)
+#else
+#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
+#define static_call_mod(name) __static_call(name)
+#endif
+
+#define static_call(name) __static_call(name)
+
+#else
+
+struct static_call_key {
+ void *func;
+};
+
+#define static_call(name) \
+ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
+
+#endif /* CONFIG_HAVE_STATIC_CALL */
+
+#endif /* _STATIC_CALL_TYPES_H */
diff --git a/include/linux/stdarg.h b/include/linux/stdarg.h
new file mode 100644
index 000000000000..c8dc7f4f390c
--- /dev/null
+++ b/include/linux/stdarg.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#ifndef _LINUX_STDARG_H
+#define _LINUX_STDARG_H
+
+typedef __builtin_va_list va_list;
+#define va_start(v, l) __builtin_va_start(v, l)
+#define va_end(v) __builtin_va_end(v)
+#define va_arg(v, T) __builtin_va_arg(v, T)
+#define va_copy(d, s) __builtin_va_copy(d, s)
+
+#endif
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 998a4ba28eba..929d67710cc5 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -13,14 +13,10 @@ enum {
};
#undef offsetof
-#ifdef __compiler_offsetof
-#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
-#else
-#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
-#endif
+#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
/**
- * sizeof_field(TYPE, MEMBER)
+ * sizeof_field() - Report the size of a struct field in bytes
*
* @TYPE: The structure containing the field of interest
* @MEMBER: The field to return the size of
@@ -28,7 +24,7 @@ enum {
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
/**
- * offsetofend(TYPE, MEMBER)
+ * offsetofend() - Report the offset of a struct field within the struct
*
* @TYPE: The type of the structure
* @MEMBER: The member within the structure to get the end offset of
@@ -36,4 +32,65 @@ enum {
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+/**
+ * struct_group() - Wrap a set of declarations in a mirrored struct
+ *
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members.
+ */
+#define struct_group(NAME, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS)
+
+/**
+ * struct_group_attr() - Create a struct_group() with trailing attributes
+ *
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @ATTRS: Any struct attributes to apply
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members. Includes structure attributes argument.
+ */
+#define struct_group_attr(NAME, ATTRS, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, ATTRS, MEMBERS)
+
+/**
+ * struct_group_tagged() - Create a struct_group with a reusable tag
+ *
+ * @TAG: The tag name for the named sub-struct
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members. Includes struct tag argument for the named copy,
+ * so the specified layout can be reused later.
+ */
+#define struct_group_tagged(TAG, NAME, MEMBERS...) \
+ __struct_group(TAG, NAME, /* no attrs */, MEMBERS)
+
+/**
+ * DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+ *
+ * @TYPE: The type of each flexible array element
+ * @NAME: The name of the flexible array member
+ *
+ * In order to have a flexible array member in a union or alone in a
+ * struct, it needs to be wrapped in an anonymous struct with at least 1
+ * named member, but that member can be empty.
+ */
+#define DECLARE_FLEX_ARRAY(TYPE, NAME) \
+ __DECLARE_FLEX_ARRAY(TYPE, NAME)
+
#endif
diff --git a/include/linux/stm.h b/include/linux/stm.h
index c6f577ab6f21..3b22689512be 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -57,7 +57,7 @@ struct stm_device;
*
* Normally, an STM device will have a range of masters available to software
* and the rest being statically assigned to various hardware trace sources.
- * The former is defined by the the range [@sw_start..@sw_end] of the device
+ * The former is defined by the range [@sw_start..@sw_end] of the device
* description. That is, the lowest master that can be allocated to software
* writers is @sw_start and data from this writer will appear is @sw_start
* master in the STP stream.
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 19190c609282..fb2e88614f5d 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -80,6 +80,8 @@
struct stmmac_mdio_bus_data {
unsigned int phy_mask;
+ unsigned int has_xpcs;
+ unsigned int xpcs_an_inband;
int *irqs;
int probed_phy_irq;
bool needs_reset;
@@ -94,6 +96,8 @@ struct stmmac_dma_cfg {
int mixed_burst;
bool aal;
bool eame;
+ bool multi_msi_en;
+ bool dche;
};
#define AXI_BLEN 7
@@ -111,7 +115,9 @@ struct stmmac_axi {
#define EST_GCL 1024
struct stmmac_est {
+ struct mutex lock;
int enable;
+ u32 btr_reserve[2];
u32 btr_offset[2];
u32 btr[2];
u32 ctr[2];
@@ -142,6 +148,44 @@ struct stmmac_txq_cfg {
int tbs_en;
};
+/* FPE link state */
+enum stmmac_fpe_state {
+ FPE_STATE_OFF = 0,
+ FPE_STATE_CAPABLE = 1,
+ FPE_STATE_ENTERING_ON = 2,
+ FPE_STATE_ON = 3,
+};
+
+/* FPE link-partner hand-shaking mPacket type */
+enum stmmac_mpacket_type {
+ MPACKET_VERIFY = 0,
+ MPACKET_RESPONSE = 1,
+};
+
+enum stmmac_fpe_task_state_t {
+ __FPE_REMOVING,
+ __FPE_TASK_SCHED,
+};
+
+struct stmmac_fpe_cfg {
+ bool enable; /* FPE enable */
+ bool hs_enable; /* FPE handshake enable */
+ enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
+ enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
+};
+
+struct stmmac_safety_feature_cfg {
+ u32 tsoee;
+ u32 mrxpee;
+ u32 mestee;
+ u32 mrxee;
+ u32 mtxee;
+ u32 epsi;
+ u32 edpp;
+ u32 prtyen;
+ u32 tmouten;
+};
+
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
@@ -153,6 +197,8 @@ struct plat_stmmacenet_data {
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
struct stmmac_est *est;
+ struct stmmac_fpe_cfg *fpe_cfg;
+ struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
int has_gmac;
int enh_desc;
@@ -169,6 +215,7 @@ struct plat_stmmacenet_data {
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
+ u32 addr64;
u32 rx_queues_to_use;
u32 tx_queues_to_use;
u8 rx_sched_algorithm;
@@ -176,17 +223,28 @@ struct plat_stmmacenet_data {
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
void (*fix_mac_speed)(void *priv, unsigned int speed);
+ int (*serdes_powerup)(struct net_device *ndev, void *priv);
+ void (*serdes_powerdown)(struct net_device *ndev, void *priv);
+ void (*speed_mode_2500)(struct net_device *ndev, void *priv);
+ void (*ptp_clk_freq_config)(void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
struct mac_device_info *(*setup)(void *priv);
+ int (*clks_config)(void *priv, bool enabled);
+ int (*crosststamp)(ktime_t *device, struct system_counterval_t *system,
+ void *ctx);
+ void (*dump_debug_regs)(void *priv);
void *bsp_priv;
struct clk *stmmac_clk;
struct clk *pclk;
struct clk *clk_ptp_ref;
unsigned int clk_ptp_rate;
unsigned int clk_ref_rate;
+ unsigned int mult_fact_100ns;
s32 ptp_max_adj;
+ u32 cdc_error_adj;
struct reset_control *stmmac_rst;
+ struct reset_control *stmmac_ahb_rst;
struct stmmac_axi *axi;
int has_gmac4;
bool has_sun8i;
@@ -195,5 +253,23 @@ struct plat_stmmacenet_data {
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
int has_xgmac;
+ bool vlan_fail_q_en;
+ u8 vlan_fail_q;
+ unsigned int eee_usecs_rate;
+ struct pci_dev *pdev;
+ int int_snapshot_num;
+ int ext_snapshot_num;
+ bool int_snapshot_en;
+ bool ext_snapshot_en;
+ bool multi_msi_en;
+ int msi_mac_vec;
+ int msi_wol_vec;
+ int msi_lpi_vec;
+ int msi_sfty_ce_vec;
+ int msi_sfty_ue_vec;
+ int msi_rx_base_vec;
+ int msi_tx_base_vec;
+ bool use_phy_wol;
+ bool sph_disable;
};
#endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 76d8b09384a7..ea7a74ea7389 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -24,6 +24,7 @@ typedef int (*cpu_stop_fn_t)(void *arg);
struct cpu_stop_work {
struct list_head list; /* cpu_stopper->works */
cpu_stop_fn_t fn;
+ unsigned long caller;
void *arg;
struct cpu_stop_done *done;
};
@@ -36,6 +37,8 @@ void stop_machine_park(int cpu);
void stop_machine_unpark(int cpu);
void stop_machine_yield(const struct cpumask *cpumask);
+extern void print_stop_info(const char *log_lvl, struct task_struct *task);
+
#else /* CONFIG_SMP */
#include <linux/workqueue.h>
@@ -80,6 +83,8 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu,
return false;
}
+static inline void print_stop_info(const char *log_lvl, struct task_struct *task) { }
+
#endif /* CONFIG_SMP */
/*
@@ -119,11 +124,27 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
*/
int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
+/**
+ * stop_core_cpuslocked: - stop all threads on just one core
+ * @cpu: any cpu in the targeted core
+ * @fn: the function to run
+ * @data: the data ptr for @fn()
+ *
+ * Same as above, but instead of every CPU, only the logical CPUs of a
+ * single core are affected.
+ *
+ * Context: Must be called from within a cpus_read_lock() protected region.
+ *
+ * Return: 0 if all executions of @fn returned 0, any non zero return
+ * value if any returned non zero.
+ */
+int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data);
+
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
-static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
+static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
unsigned long flags;
@@ -134,14 +155,15 @@ static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
return ret;
}
-static inline int stop_machine(cpu_stop_fn_t fn, void *data,
- const struct cpumask *cpus)
+static __always_inline int
+stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
{
return stop_machine_cpuslocked(fn, data, cpus);
}
-static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
- const struct cpumask *cpus)
+static __always_inline int
+stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+ const struct cpumask *cpus)
{
return stop_machine(fn, data, cpus);
}
diff --git a/include/linux/string.h b/include/linux/string.h
index 6dfbb2efa815..cf7607b32102 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -2,11 +2,11 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
-
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
-#include <stdarg.h>
+#include <linux/errno.h> /* for E2BIG */
+#include <linux/stdarg.h>
#include <uapi/linux/string.h>
extern char *strndup_user(const char __user *, long);
@@ -161,20 +161,13 @@ extern int bcmp(const void *,const void *,__kernel_size_t);
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
-#ifndef __HAVE_ARCH_MEMCPY_MCSAFE
-static inline __must_check unsigned long memcpy_mcsafe(void *dst,
- const void *src, size_t cnt)
-{
- memcpy(dst, src, cnt);
- return 0;
-}
-#endif
#ifndef __HAVE_ARCH_MEMCPY_FLUSHCACHE
static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
}
#endif
+
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);
@@ -190,12 +183,6 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
extern bool sysfs_streq(const char *s1, const char *s2);
-extern int kstrtobool(const char *s, bool *res);
-static inline int strtobool(const char *s, bool *res)
-{
- return kstrtobool(s, res);
-}
-
int match_string(const char * const *array, size_t n, const char *string);
int __sysfs_match_string(const char * const *array, size_t n, const char *s);
@@ -262,228 +249,94 @@ static inline const char *kbasename(const char *path)
return tail ? tail + 1 : path;
}
-#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
-#define __RENAME(x) __asm__(#x)
-
-void fortify_panic(const char *name) __noreturn __cold;
-void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
-void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
-void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
-void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
-
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
-__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __write_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __builtin_strncpy(p, q, size);
-}
-
-__FORTIFY_INLINE char *strcat(char *p, const char *q)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (p_size == (size_t)-1)
- return __builtin_strcat(p, q);
- if (strlcat(p, q, p_size) >= p_size)
- fortify_panic(__func__);
- return p;
-}
-
-__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
-{
- __kernel_size_t ret;
- size_t p_size = __builtin_object_size(p, 0);
-
- /* Work around gcc excess stack consumption issue */
- if (p_size == (size_t)-1 ||
- (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
- return __builtin_strlen(p);
- ret = strnlen(p, p_size);
- if (p_size <= ret)
- fortify_panic(__func__);
- return ret;
-}
-
-extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
-__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
-{
- size_t p_size = __builtin_object_size(p, 0);
- __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
- if (p_size <= ret && maxlen != ret)
- fortify_panic(__func__);
- return ret;
-}
-
-/* defined after fortified strlen to reuse it */
-extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
-__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
-{
- size_t ret;
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __real_strlcpy(p, q, size);
- ret = strlen(q);
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
- if (__builtin_constant_p(len) && len >= p_size)
- __write_overflow();
- if (len >= p_size)
- fortify_panic(__func__);
- __builtin_memcpy(p, q, len);
- p[len] = '\0';
- }
- return ret;
-}
-
-/* defined after fortified strlen and strnlen to reuse them */
-__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
-{
- size_t p_len, copy_len;
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strncat(p, q, count);
- p_len = strlen(p);
- copy_len = strnlen(q, count);
- if (p_size < p_len + copy_len + 1)
- fortify_panic(__func__);
- __builtin_memcpy(p + p_len, q, copy_len);
- p[p_len + copy_len] = '\0';
- return p;
-}
-
-__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __write_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __builtin_memset(p, c, size);
-}
-
-__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (__builtin_constant_p(size)) {
- if (p_size < size)
- __write_overflow();
- if (q_size < size)
- __read_overflow2();
- }
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __builtin_memcpy(p, q, size);
-}
-
-__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (__builtin_constant_p(size)) {
- if (p_size < size)
- __write_overflow();
- if (q_size < size)
- __read_overflow2();
- }
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __builtin_memmove(p, q, size);
-}
-
-extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
-__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __real_memscan(p, c, size);
-}
-
-__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (__builtin_constant_p(size)) {
- if (p_size < size)
- __read_overflow();
- if (q_size < size)
- __read_overflow2();
- }
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __builtin_memcmp(p, q, size);
-}
+#include <linux/fortify-string.h>
+#endif
+#ifndef unsafe_memcpy
+#define unsafe_memcpy(dst, src, bytes, justification) \
+ memcpy(dst, src, bytes)
+#endif
-__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __builtin_memchr(p, c, size);
-}
+void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ int pad);
-void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
-__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __real_memchr_inv(p, c, size);
-}
-
-extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
-__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __real_kmemdup(p, size, gfp);
-}
+/**
+ * strtomem_pad - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ * @pad: Padding character to fill any remaining bytes of @dest after copy
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * an explicit padding character. If padding is not required, use strtomem().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
+ */
+#define strtomem_pad(dest, src, pad) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
+} while (0)
-/* defined after fortified strlen and memcpy to reuse them */
-__FORTIFY_INLINE char *strcpy(char *p, const char *q)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strcpy(p, q);
- memcpy(p, q, strlen(q) + 1);
- return p;
-}
+/**
+ * strtomem - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * without trailing padding. If padding is required, use strtomem_pad().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
+ */
+#define strtomem(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \
+} while (0)
-#endif
+/**
+ * memset_after - Set a value after a struct member to the end of a struct
+ *
+ * @obj: Address of target struct instance
+ * @v: Byte value to repeatedly write
+ * @member: after which struct member to start writing bytes
+ *
+ * This is good for clearing padding following the given member.
+ */
+#define memset_after(obj, v, member) \
+({ \
+ u8 *__ptr = (u8 *)(obj); \
+ typeof(v) __val = (v); \
+ memset(__ptr + offsetofend(typeof(*(obj)), member), __val, \
+ sizeof(*(obj)) - offsetofend(typeof(*(obj)), member)); \
+})
/**
- * memcpy_and_pad - Copy one buffer to another with padding
- * @dest: Where to copy to
- * @dest_len: The destination buffer size
- * @src: Where to copy from
- * @count: The number of bytes to copy
- * @pad: Character to use for padding if space is left in destination.
+ * memset_startat - Set a value starting at a member to the end of a struct
+ *
+ * @obj: Address of target struct instance
+ * @v: Byte value to repeatedly write
+ * @member: struct member to start writing at
+ *
+ * Note that if there is padding between the prior member and the target
+ * member, memset_after() should be used to clear the prior padding.
*/
-static inline void memcpy_and_pad(void *dest, size_t dest_len,
- const void *src, size_t count, int pad)
-{
- if (dest_len > count) {
- memcpy(dest, src, count);
- memset(dest + count, pad, dest_len - count);
- } else
- memcpy(dest, src, dest_len);
-}
+#define memset_startat(obj, v, member) \
+({ \
+ u8 *__ptr = (u8 *)(obj); \
+ typeof(v) __val = (v); \
+ memset(__ptr + offsetof(typeof(*(obj)), member), __val, \
+ sizeof(*(obj)) - offsetof(typeof(*(obj)), member)); \
+})
/**
* str_has_prefix - Test if a string has a given prefix
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index c28955132234..8530c7328269 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -2,8 +2,12 @@
#ifndef _LINUX_STRING_HELPERS_H_
#define _LINUX_STRING_HELPERS_H_
+#include <linux/bits.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/types.h>
+struct device;
struct file;
struct task_struct;
@@ -17,13 +21,17 @@ enum string_size_units {
void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
char *buf, int len);
-#define UNESCAPE_SPACE 0x01
-#define UNESCAPE_OCTAL 0x02
-#define UNESCAPE_HEX 0x04
-#define UNESCAPE_SPECIAL 0x08
+int parse_int_array_user(const char __user *from, size_t count, int **array);
+
+#define UNESCAPE_SPACE BIT(0)
+#define UNESCAPE_OCTAL BIT(1)
+#define UNESCAPE_HEX BIT(2)
+#define UNESCAPE_SPECIAL BIT(3)
#define UNESCAPE_ANY \
(UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL)
+#define UNESCAPE_ALL_MASK GENMASK(3, 0)
+
int string_unescape(char *src, char *dst, size_t size, unsigned int flags);
static inline int string_unescape_inplace(char *buf, unsigned int flags)
@@ -41,22 +49,24 @@ static inline int string_unescape_any_inplace(char *buf)
return string_unescape_any(buf, buf, 0);
}
-#define ESCAPE_SPACE 0x01
-#define ESCAPE_SPECIAL 0x02
-#define ESCAPE_NULL 0x04
-#define ESCAPE_OCTAL 0x08
+#define ESCAPE_SPACE BIT(0)
+#define ESCAPE_SPECIAL BIT(1)
+#define ESCAPE_NULL BIT(2)
+#define ESCAPE_OCTAL BIT(3)
#define ESCAPE_ANY \
(ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL)
-#define ESCAPE_NP 0x10
+#define ESCAPE_NP BIT(4)
#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
-#define ESCAPE_HEX 0x20
+#define ESCAPE_HEX BIT(5)
+#define ESCAPE_NA BIT(6)
+#define ESCAPE_NAP BIT(7)
+#define ESCAPE_APPEND BIT(8)
+
+#define ESCAPE_ALL_MASK GENMASK(8, 0)
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *only);
-int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
- size_t osz);
-
static inline int string_escape_mem_any_np(const char *src, size_t isz,
char *dst, size_t osz, const char *only)
{
@@ -75,8 +85,52 @@ static inline int string_escape_str_any_np(const char *src, char *dst,
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
}
+static inline void string_upper(char *dst, const char *src)
+{
+ do {
+ *dst++ = toupper(*src);
+ } while (*src++);
+}
+
+static inline void string_lower(char *dst, const char *src)
+{
+ do {
+ *dst++ = tolower(*src);
+ } while (*src++);
+}
+
char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n);
+void kfree_strarray(char **array, size_t n);
+
+char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n);
+
+static inline const char *str_yes_no(bool v)
+{
+ return v ? "yes" : "no";
+}
+
+static inline const char *str_on_off(bool v)
+{
+ return v ? "on" : "off";
+}
+
+static inline const char *str_enable_disable(bool v)
+{
+ return v ? "enable" : "disable";
+}
+
+static inline const char *str_enabled_disabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
+static inline const char *str_read_write(bool v)
+{
+ return v ? "read" : "write";
+}
+
#endif
diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h
index 3a11fa41a131..c505f30e8b68 100644
--- a/include/linux/sungem_phy.h
+++ b/include/linux/sungem_phy.h
@@ -2,6 +2,8 @@
#ifndef __SUNGEM_PHY_H__
#define __SUNGEM_PHY_H__
+#include <linux/types.h>
+
struct mii_phy;
/* Operations supported by any kind of PHY */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 4f6b28487f28..3e6ce288a7fc 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -76,7 +76,7 @@ struct rpc_auth {
unsigned int au_verfsize; /* size of reply verifier */
unsigned int au_ralign; /* words before UL header */
- unsigned int au_flags;
+ unsigned long au_flags;
const struct rpc_authops *au_ops;
rpc_authflavor_t au_flavor; /* pseudoflavor (note may
* differ from the flavor in
@@ -89,7 +89,8 @@ struct rpc_auth {
};
/* rpc_auth au_flags */
-#define RPCAUTH_AUTH_DATATOUCH 0x00000002
+#define RPCAUTH_AUTH_DATATOUCH (1)
+#define RPCAUTH_AUTH_UPDATE_SLACK (2)
struct rpc_auth_create_args {
rpc_authflavor_t pseudoflavor;
@@ -98,6 +99,7 @@ struct rpc_auth_create_args {
/* Flags for rpcauth_lookupcred() */
#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */
+#define RPCAUTH_LOOKUP_ASYNC 0x02 /* Don't block waiting for memory */
/*
* Client authentication ops
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index d796058cdff2..db30a159f9d5 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
(c) 2008 NetApp. All Rights Reserved.
-NetApp provides this source code under the GPL v2 License.
-The GPL v2 license is available at
-http://opensource.org/licenses/gpl-license.php.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
@@ -83,4 +69,3 @@ static inline void xprt_free_bc_request(struct rpc_rqst *req)
}
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
#endif /* _LINUX_SUNRPC_BC_XPRT_H */
-
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 0f64de7caa39..ec5a555df96f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -14,6 +14,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <linux/kstrtox.h>
#include <linux/proc_fs.h>
/*
@@ -45,7 +46,8 @@
*/
struct cache_head {
struct hlist_node cache_list;
- time64_t expiry_time; /* After time time, don't use the data */
+ time64_t expiry_time; /* After time expiry_time, don't use
+ * the data */
time64_t last_refresh; /* If CACHE_PENDING, this is when upcall was
* sent, else this is when update was
* received, though it is alway set to
@@ -119,17 +121,17 @@ struct cache_detail {
struct net *net;
};
-
/* this must be embedded in any request structure that
* identifies an object that will want a callback on
* a cache fill
*/
struct cache_req {
struct cache_deferred_req *(*defer)(struct cache_req *req);
- int thread_wait; /* How long (jiffies) we can block the
- * current thread to wait for updates.
- */
+ unsigned long thread_wait; /* How long (jiffies) we can block the
+ * current thread to wait for updates.
+ */
};
+
/* this must be embedded in a deferred_request that is being
* delayed awaiting cache-fill
*/
@@ -179,6 +181,9 @@ sunrpc_cache_update(struct cache_detail *detail,
extern int
sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h);
+extern int
+sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
+ struct cache_head *h);
extern void cache_clean_deferred(void *owner);
@@ -206,11 +211,11 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h)
{
+ if (h->expiry_time < seconds_since_boot())
+ return true;
if (!test_bit(CACHE_VALID, &h->flags))
return false;
-
- return (h->expiry_time < seconds_since_boot()) ||
- (detail->flush_time >= h->last_refresh);
+ return detail->flush_time >= h->last_refresh;
}
extern int cache_check(struct cache_detail *detail,
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index ca7e108248e2..770ef2cb5775 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -14,6 +14,7 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/refcount.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/sched.h>
@@ -29,15 +30,17 @@
#include <linux/sunrpc/xprtmultipath.h>
struct rpc_inode;
+struct rpc_sysfs_client;
/*
* The high-level client handle
*/
struct rpc_clnt {
- atomic_t cl_count; /* Number of references */
+ refcount_t cl_count; /* Number of references */
unsigned int cl_clid; /* client id */
struct list_head cl_clients; /* Global list of clients */
struct list_head cl_tasks; /* List of tasks */
+ atomic_t cl_pid; /* task PID counter */
spinlock_t cl_lock; /* spinlock */
struct rpc_xprt __rcu * cl_xprt; /* transport */
const struct rpc_procinfo *cl_procinfo; /* procedure info */
@@ -71,8 +74,16 @@ struct rpc_clnt {
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
- struct rpc_xprt_iter cl_xpi;
+ struct rpc_sysfs_client *cl_sysfs; /* sysfs directory */
+ /* cl_work is only needed after cl_xpi is no longer used,
+ * and that are of similar size
+ */
+ union {
+ struct rpc_xprt_iter cl_xpi;
+ struct work_struct cl_work;
+ };
const struct cred *cl_cred;
+ unsigned int cl_max_connect; /* max number of transports not to the same IP */
};
/*
@@ -127,6 +138,7 @@ struct rpc_create_args {
char *client_name;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
const struct cred *cred;
+ unsigned int max_connect;
};
struct rpc_add_xprt_test {
@@ -148,6 +160,7 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
+#define RPC_CLNT_CREATE_CONNECTED (1UL << 12)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
@@ -221,13 +234,19 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *,
struct rpc_xprt_switch *,
struct rpc_xprt *,
void *);
+void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *);
+void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *,
+ struct rpc_add_xprt_test *);
const char *rpc_proc_name(const struct rpc_task *task);
void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
+void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap);
+void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt);
+void rpc_clnt_disconnect(struct rpc_clnt *clnt);
void rpc_cleanup_clids(void);
static inline int rpc_reply_expected(struct rpc_task *task)
@@ -236,4 +255,9 @@ static inline int rpc_reply_expected(struct rpc_task *task)
(task->tk_msg.rpc_proc->p_decode != NULL);
}
+static inline void rpc_task_close_connection(struct rpc_task *task)
+{
+ if (task->tk_xprt)
+ xprt_force_disconnect(task->tk_xprt);
+}
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 48c1b1674cbf..bf4ac8a0268c 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -21,6 +21,7 @@
struct gss_ctx {
struct gss_api_mech *mech_type;
void *internal_ctx_id;
+ unsigned int slack, align;
};
#define GSS_C_NO_BUFFER ((struct xdr_netobj) 0)
@@ -66,6 +67,7 @@ u32 gss_wrap(
u32 gss_unwrap(
struct gss_ctx *ctx_id,
int offset,
+ int len,
struct xdr_buf *inbuf);
u32 gss_delete_sec_context(
struct gss_ctx **ctx_id);
@@ -82,6 +84,7 @@ struct pf_desc {
u32 service;
char *name;
char *auth_domain_name;
+ struct auth_domain *domain;
bool datatouch;
};
@@ -126,6 +129,7 @@ struct gss_api_ops {
u32 (*gss_unwrap)(
struct gss_ctx *ctx_id,
int offset,
+ int len,
struct xdr_buf *buf);
void (*gss_delete_sec_context)(
void *internal_ctx_id);
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index c1d77dd8ed41..91f43d86879d 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -83,7 +83,7 @@ struct gss_krb5_enctype {
u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf,
struct page **pages); /* v2 encryption function */
- u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
+ u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *headskip,
u32 *tailskip); /* v2 decryption function */
};
@@ -141,14 +141,12 @@ enum sgn_alg {
SGN_ALG_MD2_5 = 0x0001,
SGN_ALG_DES_MAC = 0x0002,
SGN_ALG_3 = 0x0003, /* not published */
- SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; no support */
SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004
};
enum seal_alg {
SEAL_ALG_NONE = 0xffff,
SEAL_ALG_DES = 0x0000,
SEAL_ALG_1 = 0x0001, /* not published */
- SEAL_ALG_MICROSOFT_RC4 = 0x0010,/* microsoft w2k; no support */
SEAL_ALG_DES3KD = 0x0002
};
@@ -255,7 +253,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
struct xdr_buf *outbuf, struct page **pages);
u32
-gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
+gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
struct xdr_buf *buf);
@@ -312,18 +310,9 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct page **pages);
u32
-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *plainoffset,
u32 *plainlen);
-int
-krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
- struct crypto_sync_skcipher *cipher,
- unsigned char *cksum);
-
-int
-krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
- struct crypto_sync_skcipher *cipher,
- s32 seqnum);
void
gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
index 981c89cef19d..87eea679d750 100644
--- a/include/linux/sunrpc/gss_krb5_enctypes.h
+++ b/include/linux/sunrpc/gss_krb5_enctypes.h
@@ -13,15 +13,13 @@
#ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
/*
- * NB: This list includes encryption types that were deprecated
- * by RFC 8429 (DES3_CBC_SHA1 and ARCFOUR_HMAC).
+ * NB: This list includes DES3_CBC_SHA1, which was deprecated by RFC 8429.
*
* ENCTYPE_AES256_CTS_HMAC_SHA1_96
* ENCTYPE_AES128_CTS_HMAC_SHA1_96
* ENCTYPE_DES3_CBC_SHA1
- * ENCTYPE_ARCFOUR_HMAC
*/
-#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23"
+#define KRB5_SUPPORTED_ENCTYPES "18,17,16"
#else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
@@ -32,12 +30,11 @@
* ENCTYPE_AES256_CTS_HMAC_SHA1_96
* ENCTYPE_AES128_CTS_HMAC_SHA1_96
* ENCTYPE_DES3_CBC_SHA1
- * ENCTYPE_ARCFOUR_HMAC
* ENCTYPE_DES_CBC_MD5
* ENCTYPE_DES_CBC_CRC
* ENCTYPE_DES_CBC_MD4
*/
-#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
+#define KRB5_SUPPORTED_ENCTYPES "18,17,16,3,1,2"
#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index bea40d9f03a1..02117ed0fa2e 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -10,9 +10,6 @@
#define RPC_VERSION 2
-/* size of an XDR encoding unit in bytes, i.e. 32bit */
-#define XDR_UNIT (4)
-
/* spec defines authentication flavor as an unsigned 32 bit integer */
typedef u32 rpc_authflavor_t;
@@ -23,6 +20,7 @@ enum rpc_auth_flavors {
RPC_AUTH_DES = 3,
RPC_AUTH_KRB = 4,
RPC_AUTH_GSS = 6,
+ RPC_AUTH_TLS = 7,
RPC_AUTH_MAXFLAVOR = 8,
/* pseudoflavors: */
RPC_AUTH_GSS_KRB5 = 390003,
@@ -143,7 +141,7 @@ typedef __be32 rpc_fraghdr;
/*
* Well-known netids. See:
*
- * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml
+ * https://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml
*/
#define RPCBIND_NETID_UDP "udp"
#define RPCBIND_NETID_TCP "tcp"
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 92d182fd8e3b..4af31bbc8802 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -58,7 +58,8 @@ enum {
enum {
rpcrdma_fixed_maxsz = 4,
rpcrdma_segment_maxsz = 4,
- rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz,
+ rpcrdma_readseg_maxsz = 1 + rpcrdma_segment_maxsz,
+ rpcrdma_readchunk_maxsz = 1 + rpcrdma_readseg_maxsz,
};
/*
@@ -123,4 +124,78 @@ rpcrdma_decode_buffer_size(u8 val)
return ((unsigned int)val + 1) << 10;
}
+/**
+ * xdr_encode_rdma_segment - Encode contents of an RDMA segment
+ * @p: Pointer into a send buffer
+ * @handle: The RDMA handle to encode
+ * @length: The RDMA length to encode
+ * @offset: The RDMA offset to encode
+ *
+ * Return value:
+ * Pointer to the XDR position that follows the encoded RDMA segment
+ */
+static inline __be32 *xdr_encode_rdma_segment(__be32 *p, u32 handle,
+ u32 length, u64 offset)
+{
+ *p++ = cpu_to_be32(handle);
+ *p++ = cpu_to_be32(length);
+ return xdr_encode_hyper(p, offset);
+}
+
+/**
+ * xdr_encode_read_segment - Encode contents of a Read segment
+ * @p: Pointer into a send buffer
+ * @position: The position to encode
+ * @handle: The RDMA handle to encode
+ * @length: The RDMA length to encode
+ * @offset: The RDMA offset to encode
+ *
+ * Return value:
+ * Pointer to the XDR position that follows the encoded Read segment
+ */
+static inline __be32 *xdr_encode_read_segment(__be32 *p, u32 position,
+ u32 handle, u32 length,
+ u64 offset)
+{
+ *p++ = cpu_to_be32(position);
+ return xdr_encode_rdma_segment(p, handle, length, offset);
+}
+
+/**
+ * xdr_decode_rdma_segment - Decode contents of an RDMA segment
+ * @p: Pointer to the undecoded RDMA segment
+ * @handle: Upon return, the RDMA handle
+ * @length: Upon return, the RDMA length
+ * @offset: Upon return, the RDMA offset
+ *
+ * Return value:
+ * Pointer to the XDR item that follows the RDMA segment
+ */
+static inline __be32 *xdr_decode_rdma_segment(__be32 *p, u32 *handle,
+ u32 *length, u64 *offset)
+{
+ *handle = be32_to_cpup(p++);
+ *length = be32_to_cpup(p++);
+ return xdr_decode_hyper(p, offset);
+}
+
+/**
+ * xdr_decode_read_segment - Decode contents of a Read segment
+ * @p: Pointer to the undecoded Read segment
+ * @position: Upon return, the segment's position
+ * @handle: Upon return, the RDMA handle
+ * @length: Upon return, the RDMA length
+ * @offset: Upon return, the RDMA offset
+ *
+ * Return value:
+ * Pointer to the XDR item that follows the Read segment
+ */
+static inline __be32 *xdr_decode_read_segment(__be32 *p, u32 *position,
+ u32 *handle, u32 *length,
+ u64 *offset)
+{
+ *position = be32_to_cpup(p++);
+ return xdr_decode_rdma_segment(p, handle, length, offset);
+}
+
#endif /* _LINUX_SUNRPC_RPC_RDMA_H */
diff --git a/include/linux/sunrpc/rpc_rdma_cid.h b/include/linux/sunrpc/rpc_rdma_cid.h
new file mode 100644
index 000000000000..be24ab2baa6a
--- /dev/null
+++ b/include/linux/sunrpc/rpc_rdma_cid.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#ifndef RPC_RDMA_CID_H
+#define RPC_RDMA_CID_H
+
+/*
+ * The rpc_rdma_cid struct records completion ID information. A
+ * completion ID matches an incoming Send or Receive completion
+ * to a Completion Queue and to a previous ib_post_*(). The ID
+ * can then be displayed in an error message or recorded in a
+ * trace record.
+ *
+ * This struct is shared between the server and client RPC/RDMA
+ * transport implementations.
+ */
+struct rpc_rdma_cid {
+ u32 ci_queue_id;
+ int ci_completion_id;
+};
+
+#endif /* RPC_RDMA_CID_H */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index a6ef35184ef1..b8ca3ecaf8d7 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -61,8 +61,6 @@ struct rpc_task {
struct rpc_wait tk_wait; /* RPC wait */
} u;
- int tk_rpc_status; /* Result of last RPC operation */
-
/*
* RPC call state
*/
@@ -82,6 +80,8 @@ struct rpc_task {
ktime_t tk_start; /* RPC task init timestamp */
pid_t tk_owner; /* Process id for batching tasks */
+
+ int tk_rpc_status; /* Result of last RPC operation */
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
@@ -121,9 +121,9 @@ struct rpc_task_setup {
*/
#define RPC_TASK_ASYNC 0x0001 /* is an async task */
#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
+#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */
#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
-#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
@@ -132,12 +132,14 @@ struct rpc_task_setup {
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
#define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */
+#define RPC_TASK_CRED_NOREF 0x8000 /* No refcount on the credential */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
+#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
@@ -147,25 +149,13 @@ struct rpc_task_setup {
#define RPC_TASK_MSG_PIN_WAIT 5
#define RPC_TASK_SIGNALLED 6
-#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
-#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
-#define rpc_clear_running(t) \
- do { \
- smp_mb__before_atomic(); \
- clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
- smp_mb__after_atomic(); \
- } while (0)
+#define rpc_clear_running(t) clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
-#define rpc_clear_queued(t) \
- do { \
- smp_mb__before_atomic(); \
- clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
- smp_mb__after_atomic(); \
- } while (0)
+#define rpc_clear_queued(t) clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
@@ -219,11 +209,17 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
+bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
+void rpc_task_try_cancel(struct rpc_task *task, int error);
void rpc_signal_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
void rpc_killall_tasks(struct rpc_clnt *);
+unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
+ bool (*fnmatch)(const struct rpc_task *,
+ const void *),
+ const void *data);
void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
@@ -262,7 +258,7 @@ int rpc_malloc(struct rpc_task *);
void rpc_free(struct rpc_task *);
int rpciod_up(void);
void rpciod_down(void);
-int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
+int rpc_wait_for_completion_task(struct rpc_task *task);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct net;
void rpc_show_tasks(struct net *);
@@ -272,11 +268,7 @@ void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
extern struct workqueue_struct *xprtiod_workqueue;
void rpc_prepare_task(struct rpc_task *task);
-
-static inline int rpc_wait_for_completion_task(struct rpc_task *task)
-{
- return __rpc_wait_for_completion_task(task, NULL);
-}
+gfp_t rpc_task_gfp_mask(void);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
static inline const char * rpc_qname(const struct rpc_wait_queue *q)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 1afe38eb33f7..88de45491376 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -19,6 +19,7 @@
#include <linux/sunrpc/svcauth.h>
#include <linux/wait.h>
#include <linux/mm.h>
+#include <linux/pagevec.h>
/* statistics for svc_pool structures */
struct svc_pool_stats {
@@ -51,25 +52,6 @@ struct svc_pool {
unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
-struct svc_serv;
-
-struct svc_serv_ops {
- /* Callback to use when last thread exits. */
- void (*svo_shutdown)(struct svc_serv *, struct net *);
-
- /* function for service threads to run */
- int (*svo_function)(void *);
-
- /* queue up a transport for servicing */
- void (*svo_enqueue_xprt)(struct svc_xprt *);
-
- /* set up thread (or whatever) execution context */
- int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
-
- /* optional module to count when adding threads (pooled svcs only) */
- struct module *svo_module;
-};
-
/*
* RPC service.
*
@@ -84,6 +66,7 @@ struct svc_serv {
struct svc_program * sv_program; /* RPC program */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
+ struct kref sv_refcnt;
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_maxconn; /* max connections allowed or
* '0' causing max to be based
@@ -101,7 +84,8 @@ struct svc_serv {
unsigned int sv_nrpools; /* number of thread pools */
struct svc_pool * sv_pools; /* array of thread pools */
- const struct svc_serv_ops *sv_ops; /* server operations */
+ int (*sv_threadfn)(void *data);
+
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct list_head sv_cb_list; /* queue for callback requests
* that arrive over the same
@@ -113,15 +97,43 @@ struct svc_serv {
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
-/*
- * We use sv_nrthreads as a reference count. svc_destroy() drops
- * this refcount, so we need to bump it up around operations that
- * change the number of threads. Horrible, but there it is.
- * Should be called with the "service mutex" held.
+/**
+ * svc_get() - increment reference count on a SUNRPC serv
+ * @serv: the svc_serv to have count incremented
+ *
+ * Returns: the svc_serv that was passed in.
+ */
+static inline struct svc_serv *svc_get(struct svc_serv *serv)
+{
+ kref_get(&serv->sv_refcnt);
+ return serv;
+}
+
+void svc_destroy(struct kref *);
+
+/**
+ * svc_put - decrement reference count on a SUNRPC serv
+ * @serv: the svc_serv to have count decremented
+ *
+ * When the reference count reaches zero, svc_destroy()
+ * is called to clean up and free the serv.
+ */
+static inline void svc_put(struct svc_serv *serv)
+{
+ kref_put(&serv->sv_refcnt, svc_destroy);
+}
+
+/**
+ * svc_put_not_last - decrement non-final reference count on SUNRPC serv
+ * @serv: the svc_serv to have count decremented
+ *
+ * Returns: %true is refcount was decremented.
+ *
+ * If the refcount is 1, it is not decremented and instead failure is reported.
*/
-static inline void svc_get(struct svc_serv *serv)
+static inline bool svc_put_not_last(struct svc_serv *serv)
{
- serv->sv_nrthreads++;
+ return refcount_dec_not_one(&serv->sv_refcnt.refcount);
}
/*
@@ -245,15 +257,19 @@ struct svc_rqst {
void * rq_xprt_ctxt; /* transport specific context ptr */
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
- size_t rq_xprt_hlen; /* xprt header len */
struct xdr_buf rq_arg;
+ struct xdr_stream rq_arg_stream;
+ struct xdr_stream rq_res_stream;
+ struct page *rq_scratch_page;
struct xdr_buf rq_res;
struct page *rq_pages[RPCSVC_MAXPAGES + 1];
struct page * *rq_respages; /* points into rq_pages */
struct page * *rq_next_page; /* next reply page to use */
struct page * *rq_page_end; /* one past the last page */
+ struct pagevec rq_pvec;
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
+ struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
__be32 rq_xid; /* transmission id */
u32 rq_prog; /* program number */
@@ -271,13 +287,13 @@ struct svc_rqst {
#define RQ_VICTIM (5) /* about to be shut down */
#define RQ_BUSY (6) /* request is busy */
#define RQ_DATA (7) /* request has data */
-#define RQ_AUTHERR (8) /* Request status is auth error */
unsigned long rq_flags; /* flags field */
ktime_t rq_qtime; /* enqueue time */
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
void * rq_auth_data; /* flavor-specific data */
+ __be32 rq_auth_stat; /* authentication status */
int rq_auth_slack; /* extra space xdr code
* should leave in head
* for krb5i, krb5p.
@@ -299,6 +315,7 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+ void ** rq_lease_breaker; /* The v4 client breaking a lease */
};
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
@@ -377,10 +394,10 @@ struct svc_deferred_req {
size_t addrlen;
struct sockaddr_storage daddr; /* where reply must come from */
size_t daddrlen;
+ void *xprt_ctxt;
struct cache_deferred_req handle;
- size_t xprt_hlen;
int argslen;
- __be32 args[0];
+ __be32 args[];
};
struct svc_process_info {
@@ -436,10 +453,7 @@ struct svc_version {
/* Need xprt with congestion control */
bool vs_need_cong_ctrl;
- /* Override dispatch function (e.g. when caching replies).
- * A return value of 0 means drop the request.
- * vs_dispatch == NULL means use default dispatcher.
- */
+ /* Dispatch function */
int (*vs_dispatch)(struct svc_rqst *, __be32 *);
};
@@ -450,63 +464,39 @@ struct svc_procedure {
/* process the request: */
__be32 (*pc_func)(struct svc_rqst *);
/* XDR decode args: */
- int (*pc_decode)(struct svc_rqst *, __be32 *data);
+ bool (*pc_decode)(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr);
/* XDR encode result: */
- int (*pc_encode)(struct svc_rqst *, __be32 *data);
+ bool (*pc_encode)(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr);
/* XDR free result: */
void (*pc_release)(struct svc_rqst *);
unsigned int pc_argsize; /* argument struct size */
+ unsigned int pc_argzero; /* how much of argument to clear */
unsigned int pc_ressize; /* result struct size */
unsigned int pc_cachetype; /* cache info (NFS) */
unsigned int pc_xdrressize; /* maximum size of XDR reply */
+ const char * pc_name; /* for display */
};
/*
- * Mode for mapping cpus to pools.
- */
-enum {
- SVC_POOL_AUTO = -1, /* choose one of the others */
- SVC_POOL_GLOBAL, /* no mapping, just a single global pool
- * (legacy & UP mode) */
- SVC_POOL_PERCPU, /* one pool per cpu */
- SVC_POOL_PERNODE /* one pool per numa node */
-};
-
-struct svc_pool_map {
- int count; /* How many svc_servs use us */
- int mode; /* Note: int not enum to avoid
- * warnings about "enumeration value
- * not handled in switch" */
- unsigned int npools;
- unsigned int *pool_to; /* maps pool id to cpu or node */
- unsigned int *to_pool; /* maps cpu or node to pool id */
-};
-
-extern struct svc_pool_map svc_pool_map;
-
-/*
* Function prototypes.
*/
int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
- const struct svc_serv_ops *);
+ int (*threadfn)(void *data));
struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
struct svc_pool *pool, int node);
-struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
- struct svc_pool *pool, int node);
+void svc_rqst_replace_page(struct svc_rqst *rqstp,
+ struct page *page);
void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
-unsigned int svc_pool_map_get(void);
-void svc_pool_map_put(void);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
- const struct svc_serv_ops *);
+ int (*threadfn)(void *data));
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
-int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
-void svc_destroy(struct svc_serv *);
-void svc_shutdown_net(struct svc_serv *, struct net *);
int svc_process(struct svc_rqst *);
int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
struct svc_rqst *);
@@ -515,15 +505,17 @@ int svc_register(const struct svc_serv *, struct net *, const int,
void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
-struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
+struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
+const char * svc_proc_name(const struct svc_rqst *rqstp);
+int svc_encode_result_payload(struct svc_rqst *rqstp,
+ unsigned int offset,
+ unsigned int length);
unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
- struct page **pages,
- struct kvec *first, size_t total);
+ struct xdr_buf *payload);
char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
struct kvec *first, void *p,
size_t total);
-__be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err);
__be32 svc_generic_init_request(struct svc_rqst *rqstp,
const struct svc_program *progp,
struct svc_process_info *procinfo);
@@ -552,4 +544,53 @@ static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
svc_reserve(rqstp, space + rqstp->rq_auth_slack);
}
+/**
+ * svcxdr_init_decode - Prepare an xdr_stream for Call decoding
+ * @rqstp: controlling server RPC transaction context
+ *
+ * This function currently assumes the RPC header in rq_arg has
+ * already been decoded. Upon return, xdr->p points to the
+ * location of the upper layer header.
+ */
+static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
+{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct xdr_buf *buf = &rqstp->rq_arg;
+ struct kvec *argv = buf->head;
+
+ /*
+ * svc_getnl() and friends do not keep the xdr_buf's ::len
+ * field up to date. Refresh that field before initializing
+ * the argument decoding stream.
+ */
+ buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
+
+ xdr_init_decode(xdr, buf, argv->iov_base, NULL);
+ xdr_set_scratch_page(xdr, rqstp->rq_scratch_page);
+}
+
+/**
+ * svcxdr_init_encode - Prepare an xdr_stream for svc Reply encoding
+ * @rqstp: controlling server RPC transaction context
+ *
+ */
+static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
+{
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct xdr_buf *buf = &rqstp->rq_res;
+ struct kvec *resv = buf->head;
+
+ xdr_reset_scratch_buffer(xdr);
+
+ xdr->buf = buf;
+ xdr->iov = resv;
+ xdr->p = resv->iov_base + resv->iov_len;
+ xdr->end = resv->iov_base + PAGE_SIZE - rqstp->rq_auth_slack;
+ buf->len = resv->iov_len;
+ xdr->page_ptr = buf->pages - 1;
+ buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages);
+ buf->buflen -= rqstp->rq_auth_slack;
+ xdr->rqst = NULL;
+}
+
#endif /* SUNRPC_SVC_H */
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 40f65888dd38..24aa159d29a7 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -46,12 +46,16 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/rpc_rdma.h>
+#include <linux/sunrpc/rpc_rdma_cid.h>
+#include <linux/sunrpc/svc_rdma_pcl.h>
+
+#include <linux/percpu_counter.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
-#define SVCRDMA_DEBUG
/* Default and maximum inline threshold sizes */
enum {
+ RPCRDMA_PULLUP_THRESH = RPCRDMA_V1_DEF_INLINE_SIZE >> 1,
RPCRDMA_DEF_INLINE_THRESH = 4096,
RPCRDMA_MAX_INLINE_THRESH = 65536
};
@@ -62,15 +66,10 @@ extern unsigned int svcrdma_max_requests;
extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size;
-extern atomic_t rdma_stat_recv;
-extern atomic_t rdma_stat_read;
-extern atomic_t rdma_stat_write;
-extern atomic_t rdma_stat_sq_starve;
-extern atomic_t rdma_stat_rq_starve;
-extern atomic_t rdma_stat_rq_poll;
-extern atomic_t rdma_stat_rq_prod;
-extern atomic_t rdma_stat_sq_poll;
-extern atomic_t rdma_stat_sq_prod;
+extern struct percpu_counter svcrdma_stat_read;
+extern struct percpu_counter svcrdma_stat_recv;
+extern struct percpu_counter svcrdma_stat_sq_starve;
+extern struct percpu_counter svcrdma_stat_write;
struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
@@ -91,10 +90,12 @@ struct svcxprt_rdma {
struct ib_pd *sc_pd;
spinlock_t sc_send_lock;
- struct list_head sc_send_ctxts;
+ struct llist_head sc_send_ctxts;
spinlock_t sc_rw_ctxt_lock;
- struct list_head sc_rw_ctxts;
+ struct llist_head sc_rw_ctxts;
+ u32 sc_pending_recvs;
+ u32 sc_recv_batch;
struct list_head sc_rq_dto_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;
@@ -105,10 +106,11 @@ struct svcxprt_rdma {
wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
unsigned long sc_flags;
- struct list_head sc_read_complete_q;
struct work_struct sc_work;
struct llist_head sc_recv_ctxts;
+
+ atomic_t sc_completion_ids;
};
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
@@ -129,51 +131,65 @@ struct svc_rdma_recv_ctxt {
struct list_head rc_list;
struct ib_recv_wr rc_recv_wr;
struct ib_cqe rc_cqe;
+ struct rpc_rdma_cid rc_cid;
struct ib_sge rc_recv_sge;
void *rc_recv_buf;
- struct xdr_buf rc_arg;
+ struct xdr_stream rc_stream;
bool rc_temp;
u32 rc_byte_len;
unsigned int rc_page_count;
- unsigned int rc_hdr_count;
u32 rc_inv_rkey;
- struct page *rc_pages[RPCSVC_MAXPAGES];
+ __be32 rc_msgtype;
+
+ struct svc_rdma_pcl rc_call_pcl;
+
+ struct svc_rdma_pcl rc_read_pcl;
+ struct svc_rdma_chunk *rc_cur_result_payload;
+ struct svc_rdma_pcl rc_write_pcl;
+ struct svc_rdma_pcl rc_reply_pcl;
};
struct svc_rdma_send_ctxt {
- struct list_head sc_list;
+ struct llist_node sc_node;
+ struct rpc_rdma_cid sc_cid;
+
struct ib_send_wr sc_send_wr;
struct ib_cqe sc_cqe;
+ struct completion sc_done;
+ struct xdr_buf sc_hdrbuf;
+ struct xdr_stream sc_stream;
void *sc_xprt_buf;
- int sc_page_count;
int sc_cur_sge_no;
- struct page *sc_pages[RPCSVC_MAXPAGES];
+
struct ib_sge sc_sges[];
};
/* svc_rdma_backchannel.c */
-extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
- __be32 *rdma_resp,
- struct xdr_buf *rcvbuf);
+extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
+ struct svc_rdma_recv_ctxt *rctxt);
/* svc_rdma_recvfrom.c */
extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
+extern struct svc_rdma_recv_ctxt *
+ svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma);
extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt);
extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
+extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
-extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma,
- struct svc_rqst *rqstp,
- struct svc_rdma_recv_ctxt *head, __be32 *p);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
- __be32 *wr_ch, struct xdr_buf *xdr);
+ const struct svc_rdma_chunk *chunk,
+ const struct xdr_buf *xdr);
extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
- __be32 *rp_ch, bool writelist,
- struct xdr_buf *xdr);
+ const struct svc_rdma_recv_ctxt *rctxt,
+ const struct xdr_buf *xdr);
+extern int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
+ struct svc_rqst *rqstp,
+ struct svc_rdma_recv_ctxt *head);
/* svc_rdma_sendto.c */
extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma);
@@ -181,20 +197,22 @@ extern struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma);
extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
-extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr);
-extern void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt,
- unsigned int len);
+extern int svc_rdma_send(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt);
extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt,
- struct xdr_buf *xdr, __be32 *wr_lst);
+ struct svc_rdma_send_ctxt *sctxt,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ const struct xdr_buf *xdr);
+extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *sctxt,
+ struct svc_rdma_recv_ctxt *rctxt,
+ int status);
+extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail);
extern int svc_rdma_sendto(struct svc_rqst *);
+extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
+ unsigned int length);
/* svc_rdma_transport.c */
-extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
-extern void svc_sq_reap(struct svcxprt_rdma *);
-extern void svc_rq_reap(struct svcxprt_rdma *);
-
extern struct svc_xprt_class svc_rdma_class;
#ifdef CONFIG_SUNRPC_BACKCHANNEL
extern struct svc_xprt_class svc_rdma_bc_class;
diff --git a/include/linux/sunrpc/svc_rdma_pcl.h b/include/linux/sunrpc/svc_rdma_pcl.h
new file mode 100644
index 000000000000..7516ad0fae80
--- /dev/null
+++ b/include/linux/sunrpc/svc_rdma_pcl.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates
+ */
+
+#ifndef SVC_RDMA_PCL_H
+#define SVC_RDMA_PCL_H
+
+#include <linux/list.h>
+
+struct svc_rdma_segment {
+ u32 rs_handle;
+ u32 rs_length;
+ u64 rs_offset;
+};
+
+struct svc_rdma_chunk {
+ struct list_head ch_list;
+
+ u32 ch_position;
+ u32 ch_length;
+ u32 ch_payload_length;
+
+ u32 ch_segcount;
+ struct svc_rdma_segment ch_segments[];
+};
+
+struct svc_rdma_pcl {
+ unsigned int cl_count;
+ struct list_head cl_chunks;
+};
+
+/**
+ * pcl_init - Initialize a parsed chunk list
+ * @pcl: parsed chunk list to initialize
+ *
+ */
+static inline void pcl_init(struct svc_rdma_pcl *pcl)
+{
+ INIT_LIST_HEAD(&pcl->cl_chunks);
+}
+
+/**
+ * pcl_is_empty - Return true if parsed chunk list is empty
+ * @pcl: parsed chunk list
+ *
+ */
+static inline bool pcl_is_empty(const struct svc_rdma_pcl *pcl)
+{
+ return list_empty(&pcl->cl_chunks);
+}
+
+/**
+ * pcl_first_chunk - Return first chunk in a parsed chunk list
+ * @pcl: parsed chunk list
+ *
+ * Returns the first chunk in the list, or NULL if the list is empty.
+ */
+static inline struct svc_rdma_chunk *
+pcl_first_chunk(const struct svc_rdma_pcl *pcl)
+{
+ if (pcl_is_empty(pcl))
+ return NULL;
+ return list_first_entry(&pcl->cl_chunks, struct svc_rdma_chunk,
+ ch_list);
+}
+
+/**
+ * pcl_next_chunk - Return next chunk in a parsed chunk list
+ * @pcl: a parsed chunk list
+ * @chunk: chunk in @pcl
+ *
+ * Returns the next chunk in the list, or NULL if @chunk is already last.
+ */
+static inline struct svc_rdma_chunk *
+pcl_next_chunk(const struct svc_rdma_pcl *pcl, struct svc_rdma_chunk *chunk)
+{
+ if (list_is_last(&chunk->ch_list, &pcl->cl_chunks))
+ return NULL;
+ return list_next_entry(chunk, ch_list);
+}
+
+/**
+ * pcl_for_each_chunk - Iterate over chunks in a parsed chunk list
+ * @pos: the loop cursor
+ * @pcl: a parsed chunk list
+ */
+#define pcl_for_each_chunk(pos, pcl) \
+ for (pos = list_first_entry(&(pcl)->cl_chunks, struct svc_rdma_chunk, ch_list); \
+ &pos->ch_list != &(pcl)->cl_chunks; \
+ pos = list_next_entry(pos, ch_list))
+
+/**
+ * pcl_for_each_segment - Iterate over segments in a parsed chunk
+ * @pos: the loop cursor
+ * @chunk: a parsed chunk
+ */
+#define pcl_for_each_segment(pos, chunk) \
+ for (pos = &(chunk)->ch_segments[0]; \
+ pos <= &(chunk)->ch_segments[(chunk)->ch_segcount - 1]; \
+ pos++)
+
+/**
+ * pcl_chunk_end_offset - Return offset of byte range following @chunk
+ * @chunk: chunk in @pcl
+ *
+ * Returns starting offset of the region just after @chunk
+ */
+static inline unsigned int
+pcl_chunk_end_offset(const struct svc_rdma_chunk *chunk)
+{
+ return xdr_align_size(chunk->ch_position + chunk->ch_payload_length);
+}
+
+struct svc_rdma_recv_ctxt;
+
+extern void pcl_free(struct svc_rdma_pcl *pcl);
+extern bool pcl_alloc_call(struct svc_rdma_recv_ctxt *rctxt, __be32 *p);
+extern bool pcl_alloc_read(struct svc_rdma_recv_ctxt *rctxt, __be32 *p);
+extern bool pcl_alloc_write(struct svc_rdma_recv_ctxt *rctxt,
+ struct svc_rdma_pcl *pcl, __be32 *p);
+extern int pcl_process_nonpayloads(const struct svc_rdma_pcl *pcl,
+ const struct xdr_buf *xdr,
+ int (*actor)(const struct xdr_buf *,
+ void *),
+ void *data);
+
+#endif /* SVC_RDMA_PCL_H */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ea6f46be9cb7..d42a75b3be10 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -21,11 +21,14 @@ struct svc_xprt_ops {
int (*xpo_has_wspace)(struct svc_xprt *);
int (*xpo_recvfrom)(struct svc_rqst *);
int (*xpo_sendto)(struct svc_rqst *);
+ int (*xpo_result_payload)(struct svc_rqst *, unsigned int,
+ unsigned int);
void (*xpo_release_rqst)(struct svc_rqst *);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
void (*xpo_secure_port)(struct svc_rqst *rqstp);
void (*xpo_kill_temp_xprt)(struct svc_xprt *);
+ void (*xpo_start_tls)(struct svc_xprt *);
};
struct svc_xprt_class {
@@ -86,6 +89,7 @@ struct svc_xprt {
struct list_head xpt_users; /* callbacks on free */
struct net *xpt_net;
+ netns_tracker ns_tracker;
const struct cred *xpt_cred;
struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
@@ -115,18 +119,26 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u
return 0;
}
+static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt)
+{
+ return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) ||
+ (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0);
+}
+
int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
-int svc_create_xprt(struct svc_serv *, const char *, struct net *,
- const int, const unsigned short, int,
- const struct cred *);
-void svc_xprt_do_enqueue(struct svc_xprt *xprt);
+int svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
+ struct net *net, const int family,
+ const unsigned short port, int flags,
+ const struct cred *cred);
+void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net);
+void svc_xprt_received(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
-void svc_close_xprt(struct svc_xprt *xprt);
+void svc_xprt_close(struct svc_xprt *xprt);
int svc_port_is_privileged(struct sockaddr *sin);
int svc_print_xprts(char *buf, int maxlen);
struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
@@ -135,6 +147,7 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen);
void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt);
void svc_age_temp_xprts_now(struct svc_serv *, struct sockaddr *);
+void svc_xprt_deferred_close(struct svc_xprt *xprt);
static inline void svc_xprt_get(struct svc_xprt *xprt)
{
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index b0003866a249..6d9cc9080aca 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -127,7 +127,7 @@ struct auth_ops {
char * name;
struct module *owner;
int flavour;
- int (*accept)(struct svc_rqst *rq, __be32 *authp);
+ int (*accept)(struct svc_rqst *rq);
int (*release)(struct svc_rqst *rq);
void (*domain_release)(struct auth_domain *);
int (*set_client)(struct svc_rqst *rq);
@@ -149,7 +149,7 @@ struct auth_ops {
struct svc_xprt;
-extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp);
+extern int svc_authenticate(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
extern int svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index ca39a388dc22..f09c82b0a7ae 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -20,7 +20,8 @@ int gss_svc_init(void);
void gss_svc_shutdown(void);
int gss_svc_init_net(struct net *net);
void gss_svc_shutdown_net(struct net *net);
-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
+struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor,
+ char *name);
u32 svcauth_gss_flavor(struct auth_domain *dom);
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 771baadaee9d..bcc555c7ae9c 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,25 +28,27 @@ struct svc_sock {
/* private TCP part */
/* On-the-wire fragment header: */
- __be32 sk_reclen;
+ __be32 sk_marker;
/* As we receive a record, this includes the length received so
* far (including the fragment header): */
u32 sk_tcplen;
/* Total length of the data (not including fragment headers)
* received so far in the fragments making up this rpc: */
u32 sk_datalen;
+ /* Number of queued send requests */
+ atomic_t sk_sendqlen;
struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
};
static inline u32 svc_sock_reclen(struct svc_sock *svsk)
{
- return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK;
+ return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK;
}
static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
{
- return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT;
+ return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT;
}
/*
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index b41f34977995..f84e2a1358e1 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -20,13 +20,19 @@ struct bio_vec;
struct rpc_rqst;
/*
+ * Size of an XDR encoding unit in bytes, i.e. 32 bits,
+ * as defined in Section 3 of RFC 4506. All encoded
+ * XDR data items are aligned on a boundary of 32 bits.
+ */
+#define XDR_UNIT sizeof(__be32)
+
+/*
* Buffer adjustment
*/
#define XDR_QUADLEN(l) (((l) + 3) >> 2)
/*
- * Generic opaque `network object.' At the kernel level, this type
- * is used only by lockd.
+ * Generic opaque `network object.'
*/
#define XDR_MAX_NETOBJ 1024
struct xdr_netobj {
@@ -89,6 +95,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX)
#define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT)
#define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS)
+#define rpc_auth_tls cpu_to_be32(RPC_AUTH_TLS)
#define rpc_call cpu_to_be32(RPC_CALL)
#define rpc_reply cpu_to_be32(RPC_REPLY)
@@ -128,8 +135,8 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
void xdr_inline_pages(struct xdr_buf *, unsigned int,
struct page **, unsigned int, unsigned int);
-void xdr_terminate_string(struct xdr_buf *, const u32);
-size_t xdr_buf_pagecount(struct xdr_buf *buf);
+void xdr_terminate_string(const struct xdr_buf *, const u32);
+size_t xdr_buf_pagecount(const struct xdr_buf *buf);
int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
void xdr_free_bvec(struct xdr_buf *buf);
@@ -182,28 +189,14 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
* XDR buffer helper functions
*/
extern void xdr_shift_buf(struct xdr_buf *, size_t);
-extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
-extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
-extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int);
-extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
-extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
-
-/*
- * Helper structure for copying from an sk_buff.
- */
-struct xdr_skb_reader {
- struct sk_buff *skb;
- unsigned int offset;
- size_t count;
- __wsum csum;
-};
-
-typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
+extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *);
+extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
+extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
+extern int read_bytes_from_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int);
+extern int write_bytes_to_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int);
-extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
-
-extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
-extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
+extern int xdr_encode_word(const struct xdr_buf *, unsigned int, u32);
+extern int xdr_decode_word(const struct xdr_buf *, unsigned int, u32 *);
struct xdr_array2_desc;
typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem);
@@ -214,9 +207,9 @@ struct xdr_array2_desc {
xdr_xcode_elem_t xcode;
};
-extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
+extern int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc);
-extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
+extern int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc);
extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
size_t len);
@@ -247,22 +240,90 @@ typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
+extern void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
+ struct page **pages, struct rpc_rqst *rqst);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
-extern void xdr_commit_encode(struct xdr_stream *xdr);
+extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec,
+ size_t nbytes);
+extern void __xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr);
+extern unsigned int xdr_page_pos(const struct xdr_stream *xdr);
extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len);
-extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
-extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
+extern int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
+extern void xdr_set_pagelen(struct xdr_stream *, unsigned int len);
+extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
+ unsigned int len);
+extern unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int target, unsigned int length);
+extern unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int length);
+
+/**
+ * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to an empty buffer
+ * @buflen: size of 'buf'
+ *
+ * The scratch buffer is used when decoding from an array of pages.
+ * If an xdr_inline_decode() call spans across page boundaries, then
+ * we copy the data into the scratch buffer in order to allow linear
+ * access.
+ */
+static inline void
+xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
+{
+ xdr->scratch.iov_base = buf;
+ xdr->scratch.iov_len = buflen;
+}
+
+/**
+ * xdr_set_scratch_page - Attach a scratch buffer for decoding data
+ * @xdr: pointer to xdr_stream struct
+ * @page: an anonymous page
+ *
+ * See xdr_set_scratch_buffer().
+ */
+static inline void
+xdr_set_scratch_page(struct xdr_stream *xdr, struct page *page)
+{
+ xdr_set_scratch_buffer(xdr, page_address(page), PAGE_SIZE);
+}
+
+/**
+ * xdr_reset_scratch_buffer - Clear scratch buffer information
+ * @xdr: pointer to xdr_stream struct
+ *
+ * See xdr_set_scratch_buffer().
+ */
+static inline void
+xdr_reset_scratch_buffer(struct xdr_stream *xdr)
+{
+ xdr_set_scratch_buffer(xdr, NULL, 0);
+}
+
+/**
+ * xdr_commit_encode - Ensure all data is written to xdr->buf
+ * @xdr: pointer to xdr_stream
+ *
+ * Handle encoding across page boundaries by giving the caller a
+ * temporary location to write to, then later copying the data into
+ * place. __xdr_commit_encode() does that copying.
+ */
+static inline void xdr_commit_encode(struct xdr_stream *xdr)
+{
+ if (unlikely(xdr->scratch.iov_len))
+ __xdr_commit_encode(xdr);
+}
/**
* xdr_stream_remaining - Return the number of bytes remaining in the stream
@@ -295,12 +356,99 @@ ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
static inline size_t
xdr_align_size(size_t n)
{
- const size_t mask = sizeof(__u32) - 1;
+ const size_t mask = XDR_UNIT - 1;
return (n + mask) & ~mask;
}
/**
+ * xdr_pad_size - Calculate size of an object's pad
+ * @n: Size of an object being XDR encoded (in bytes)
+ *
+ * This implementation avoids the need for conditional
+ * branches or modulo division.
+ *
+ * Return value:
+ * Size (in bytes) of the needed XDR pad
+ */
+static inline size_t xdr_pad_size(size_t n)
+{
+ return xdr_align_size(n) - n;
+}
+
+/**
+ * xdr_stream_encode_item_present - Encode a "present" list item
+ * @xdr: pointer to xdr_stream
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr)
+{
+ const size_t len = XDR_UNIT;
+ __be32 *p = xdr_reserve_space(xdr, len);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ *p = xdr_one;
+ return len;
+}
+
+/**
+ * xdr_stream_encode_item_absent - Encode a "not present" list item
+ * @xdr: pointer to xdr_stream
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr)
+{
+ const size_t len = XDR_UNIT;
+ __be32 *p = xdr_reserve_space(xdr, len);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ *p = xdr_zero;
+ return len;
+}
+
+/**
+ * xdr_encode_bool - Encode a boolean item
+ * @p: address in a buffer into which to encode
+ * @n: boolean value to encode
+ *
+ * Return value:
+ * Address of item following the encoded boolean
+ */
+static inline __be32 *xdr_encode_bool(__be32 *p, u32 n)
+{
+ *p++ = n ? xdr_one : xdr_zero;
+ return p;
+}
+
+/**
+ * xdr_stream_encode_bool - Encode a boolean item
+ * @xdr: pointer to xdr_stream
+ * @n: boolean value to encode
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline int xdr_stream_encode_bool(struct xdr_stream *xdr, __u32 n)
+{
+ const size_t len = XDR_UNIT;
+ __be32 *p = xdr_reserve_space(xdr, len);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ xdr_encode_bool(p, n);
+ return len;
+}
+
+/**
* xdr_stream_encode_u32 - Encode a 32-bit integer
* @xdr: pointer to xdr_stream
* @n: integer to encode
@@ -436,6 +584,53 @@ xdr_stream_encode_uint32_array(struct xdr_stream *xdr,
}
/**
+ * xdr_item_is_absent - symbolically handle XDR discriminators
+ * @p: pointer to undecoded discriminator
+ *
+ * Return values:
+ * %true if the following XDR item is absent
+ * %false if the following XDR item is present
+ */
+static inline bool xdr_item_is_absent(const __be32 *p)
+{
+ return *p == xdr_zero;
+}
+
+/**
+ * xdr_item_is_present - symbolically handle XDR discriminators
+ * @p: pointer to undecoded discriminator
+ *
+ * Return values:
+ * %true if the following XDR item is present
+ * %false if the following XDR item is absent
+ */
+static inline bool xdr_item_is_present(const __be32 *p)
+{
+ return *p != xdr_zero;
+}
+
+/**
+ * xdr_stream_decode_bool - Decode a boolean
+ * @xdr: pointer to xdr_stream
+ * @ptr: pointer to a u32 in which to store the result
+ *
+ * Return values:
+ * %0 on success
+ * %-EBADMSG on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_decode_bool(struct xdr_stream *xdr, __u32 *ptr)
+{
+ const size_t count = sizeof(*ptr);
+ __be32 *p = xdr_inline_decode(xdr, count);
+
+ if (unlikely(!p))
+ return -EBADMSG;
+ *ptr = (*p != xdr_zero);
+ return 0;
+}
+
+/**
* xdr_stream_decode_u32 - Decode a 32-bit integer
* @xdr: pointer to xdr_stream
* @ptr: location to store integer
@@ -457,6 +652,27 @@ xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr)
}
/**
+ * xdr_stream_decode_u64 - Decode a 64-bit integer
+ * @xdr: pointer to xdr_stream
+ * @ptr: location to store 64-bit integer
+ *
+ * Return values:
+ * %0 on success
+ * %-EBADMSG on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_decode_u64(struct xdr_stream *xdr, __u64 *ptr)
+{
+ const size_t count = sizeof(*ptr);
+ __be32 *p = xdr_inline_decode(xdr, count);
+
+ if (unlikely(!p))
+ return -EBADMSG;
+ xdr_decode_hyper(p, ptr);
+ return 0;
+}
+
+/**
* xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data
* @xdr: pointer to xdr_stream
* @ptr: location to store data
@@ -534,6 +750,8 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
return -EBADMSG;
+ if (len > SIZE_MAX / sizeof(*p))
+ return -EBADMSG;
p = xdr_inline_decode(xdr, len * sizeof(*p));
if (unlikely(!p))
return -EBADMSG;
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index e64bd8222f55..b9f59aabee53 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -53,6 +53,7 @@ enum rpc_display_format_t {
struct rpc_task;
struct rpc_xprt;
+struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
@@ -101,6 +102,7 @@ struct rpc_rqst {
* used in the softirq.
*/
unsigned long rq_majortimeo; /* major timeout alarm */
+ unsigned long rq_minortimeo; /* minor timeout alarm */
unsigned long rq_timeout; /* Current timeout value */
ktime_t rq_rtt; /* round-trip time */
unsigned int rq_retries; /* # of retries */
@@ -137,9 +139,13 @@ struct rpc_xprt_ops {
void (*rpcbind)(struct rpc_task *task);
void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
+ int (*get_srcaddr)(struct rpc_xprt *xprt, char *buf,
+ size_t buflen);
+ unsigned short (*get_srcport)(struct rpc_xprt *xprt);
int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(struct rpc_task *task);
- void (*prepare_request)(struct rpc_rqst *req);
+ int (*prepare_request)(struct rpc_rqst *req,
+ struct xdr_buf *buf);
int (*send_request)(struct rpc_rqst *req);
void (*wait_for_reply_request)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -181,9 +187,11 @@ enum xprt_transports {
XPRT_TRANSPORT_LOCAL = 257,
};
+struct rpc_sysfs_xprt;
struct rpc_xprt {
struct kref kref; /* Reference count */
const struct rpc_xprt_ops *ops; /* transport methods */
+ unsigned int id; /* transport id */
const struct rpc_timeout *timeout; /* timeout parms */
struct sockaddr_storage addr; /* server address */
@@ -246,6 +254,7 @@ struct rpc_xprt {
struct rpc_task * snd_task; /* Task blocked in send */
struct list_head xmit_queue; /* Send queue */
+ atomic_long_t xmit_queuelen;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -279,13 +288,16 @@ struct rpc_xprt {
} stat;
struct net *xprt_net;
+ netns_tracker ns_tracker;
const char *servername;
const char *address_strings[RPC_DISPLAY_MAX];
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *debugfs; /* debugfs directory */
- atomic_t inject_disconnect;
#endif
struct rcu_head rcu;
+ const struct xprt_class *xprt_class;
+ struct rpc_sysfs_xprt *xprt_sysfs;
+ bool main; /*mark if this is the 1st transport */
};
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -329,6 +341,7 @@ struct xprt_class {
struct rpc_xprt * (*setup)(struct xprt_create *);
struct module *owner;
char name[32];
+ const char * netid[];
};
/*
@@ -346,10 +359,9 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_free_slot(struct rpc_xprt *xprt,
struct rpc_rqst *req);
-void xprt_request_prepare(struct rpc_rqst *req);
bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_request_enqueue_transmit(struct rpc_task *task);
-void xprt_request_enqueue_receive(struct rpc_task *task);
+int xprt_request_enqueue_receive(struct rpc_task *task);
void xprt_request_wait_receive(struct rpc_task *task);
void xprt_request_dequeue_xprt(struct rpc_task *task);
bool xprt_request_need_retransmit(struct rpc_task *task);
@@ -365,6 +377,9 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int num_prealloc,
unsigned int max_req);
void xprt_free(struct rpc_xprt *);
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
+void xprt_cleanup_ids(void);
static inline int
xprt_enable_swap(struct rpc_xprt *xprt)
@@ -383,7 +398,7 @@ xprt_disable_swap(struct rpc_xprt *xprt)
*/
int xprt_register_transport(struct xprt_class *type);
int xprt_unregister_transport(struct xprt_class *type);
-int xprt_load_transport(const char *);
+int xprt_find_transport_ident(const char *);
void xprt_wait_for_reply_request_def(struct rpc_task *task);
void xprt_wait_for_reply_request_rtt(struct rpc_task *task);
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
@@ -403,6 +418,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
void xprt_unlock_connect(struct rpc_xprt *, void *);
+void xprt_release_write(struct rpc_xprt *, struct rpc_task *);
/*
* Reserved bit positions in xprt->state
@@ -414,9 +430,12 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
#define XPRT_BOUND (4)
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
+#define XPRT_OFFLINE (7)
+#define XPRT_REMOVE (8)
#define XPRT_CONGESTED (9)
#define XPRT_CWND_WAIT (10)
#define XPRT_WRITE_SPACE (11)
+#define XPRT_SND_IS_COOKIE (12)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
@@ -487,21 +506,7 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
return test_and_set_bit(XPRT_BINDING, &xprt->state);
}
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-extern unsigned int rpc_inject_disconnect;
-static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
-{
- if (!rpc_inject_disconnect)
- return;
- if (atomic_dec_return(&xprt->inject_disconnect))
- return;
- atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
- xprt->ops->inject_disconnect(xprt);
-}
-#else
-static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
-{
-}
-#endif
-
+void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
+void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
+void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
#endif /* _LINUX_SUNRPC_XPRT_H */
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index c6cce3fbf29d..c0514c684b2c 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -10,12 +10,15 @@
#define _NET_SUNRPC_XPRTMULTIPATH_H
struct rpc_xprt_iter_ops;
+struct rpc_sysfs_xprt_switch;
struct rpc_xprt_switch {
spinlock_t xps_lock;
struct kref xps_kref;
+ unsigned int xps_id;
unsigned int xps_nxprts;
unsigned int xps_nactive;
+ unsigned int xps_nunique_destaddr_xprts;
atomic_long_t xps_queuelen;
struct list_head xps_xprt_list;
@@ -23,6 +26,7 @@ struct rpc_xprt_switch {
const struct rpc_xprt_iter_ops *xps_iter_ops;
+ struct rpc_sysfs_xprt_switch *xps_sysfs;
struct rcu_head xps_rcu;
};
@@ -51,7 +55,7 @@ extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps);
extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt);
extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
- struct rpc_xprt *xprt);
+ struct rpc_xprt *xprt, bool offline);
extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
@@ -59,8 +63,13 @@ extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
+extern void xprt_iter_init_listoffline(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *xps);
+
extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi);
+extern void xprt_iter_rewind(struct rpc_xprt_iter *xpi);
+
extern struct rpc_xprt_switch *xprt_iter_xchg_switch(
struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *newswitch);
@@ -71,4 +80,7 @@ extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
const struct sockaddr *sap);
+
+extern void xprt_multipath_cleanup_ids(void);
+
#endif
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 3c1423ee74b4..38284f25eddf 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -88,5 +88,7 @@ struct sock_xprt {
#define XPRT_SOCK_WAKE_WRITE (5)
#define XPRT_SOCK_WAKE_PENDING (6)
#define XPRT_SOCK_WAKE_DISCONNECT (7)
+#define XPRT_SOCK_CONNECT_SENT (8)
+#define XPRT_SOCK_NOSPACE (9)
#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/sunxi-rsb.h b/include/linux/sunxi-rsb.h
index 7e75bb0346d0..bf0d365f471c 100644
--- a/include/linux/sunxi-rsb.h
+++ b/include/linux/sunxi-rsb.h
@@ -59,7 +59,7 @@ static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev,
struct sunxi_rsb_driver {
struct device_driver driver;
int (*probe)(struct sunxi_rsb_device *rdev);
- int (*remove)(struct sunxi_rsb_device *rdev);
+ void (*remove)(struct sunxi_rsb_device *rdev);
};
static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d)
diff --git a/include/linux/surface_acpi_notify.h b/include/linux/surface_acpi_notify.h
new file mode 100644
index 000000000000..8e3e86c7d78c
--- /dev/null
+++ b/include/linux/surface_acpi_notify.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Interface for Surface ACPI Notify (SAN) driver.
+ *
+ * Provides access to discrete GPU notifications sent from ACPI via the SAN
+ * driver, which are not handled by this driver directly.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_ACPI_NOTIFY_H
+#define _LINUX_SURFACE_ACPI_NOTIFY_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+/**
+ * struct san_dgpu_event - Discrete GPU ACPI event.
+ * @category: Category of the event.
+ * @target: Target ID of the event source.
+ * @command: Command ID of the event.
+ * @instance: Instance ID of the event source.
+ * @length: Length of the event's payload data (in bytes).
+ * @payload: Pointer to the event's payload data.
+ */
+struct san_dgpu_event {
+ u8 category;
+ u8 target;
+ u8 command;
+ u8 instance;
+ u16 length;
+ u8 *payload;
+};
+
+int san_client_link(struct device *client);
+int san_dgpu_notifier_register(struct notifier_block *nb);
+int san_dgpu_notifier_unregister(struct notifier_block *nb);
+
+#endif /* _LINUX_SURFACE_ACPI_NOTIFY_H */
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
new file mode 100644
index 000000000000..d11a1c6e3186
--- /dev/null
+++ b/include/linux/surface_aggregator/controller.h
@@ -0,0 +1,994 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface System Aggregator Module (SSAM) controller interface.
+ *
+ * Main communication interface for the SSAM EC. Provides a controller
+ * managing access and communication to and from the SSAM EC, as well as main
+ * communication structures and definitions.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
+#define _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+
+
+/* -- Main data types and definitions --------------------------------------- */
+
+/**
+ * enum ssam_event_flags - Flags for enabling/disabling SAM events
+ * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame.
+ */
+enum ssam_event_flags {
+ SSAM_EVENT_SEQUENCED = BIT(0),
+};
+
+/**
+ * struct ssam_event - SAM event sent from the EC to the host.
+ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
+ * @target_id: Target ID of the event source.
+ * @command_id: Command ID of the event.
+ * @instance_id: Instance ID of the event source.
+ * @length: Length of the event payload in bytes.
+ * @data: Event payload data.
+ */
+struct ssam_event {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u16 length;
+ u8 data[];
+};
+
+/**
+ * enum ssam_request_flags - Flags for SAM requests.
+ *
+ * @SSAM_REQUEST_HAS_RESPONSE:
+ * Specifies that the request expects a response. If not set, the request
+ * will be directly completed after its underlying packet has been
+ * transmitted. If set, the request transport system waits for a response
+ * of the request.
+ *
+ * @SSAM_REQUEST_UNSEQUENCED:
+ * Specifies that the request should be transmitted via an unsequenced
+ * packet. If set, the request must not have a response, meaning that this
+ * flag and the %SSAM_REQUEST_HAS_RESPONSE flag are mutually exclusive.
+ */
+enum ssam_request_flags {
+ SSAM_REQUEST_HAS_RESPONSE = BIT(0),
+ SSAM_REQUEST_UNSEQUENCED = BIT(1),
+};
+
+/**
+ * struct ssam_request - SAM request description.
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
+ * @target_id: ID of the request's target.
+ * @command_id: Command ID of the request.
+ * @instance_id: Instance ID of the request's target.
+ * @flags: Flags for the request. See &enum ssam_request_flags.
+ * @length: Length of the request payload in bytes.
+ * @payload: Request payload data.
+ *
+ * This struct fully describes a SAM request with payload. It is intended to
+ * help set up the actual transport struct, e.g. &struct ssam_request_sync,
+ * and specifically its raw message data via ssam_request_write_data().
+ */
+struct ssam_request {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u16 flags;
+ u16 length;
+ const u8 *payload;
+};
+
+/**
+ * struct ssam_response - Response buffer for SAM request.
+ * @capacity: Capacity of the buffer, in bytes.
+ * @length: Length of the actual data stored in the memory pointed to by
+ * @pointer, in bytes. Set by the transport system.
+ * @pointer: Pointer to the buffer's memory, storing the response payload data.
+ */
+struct ssam_response {
+ size_t capacity;
+ size_t length;
+ u8 *pointer;
+};
+
+struct ssam_controller;
+
+struct ssam_controller *ssam_get_controller(void);
+struct ssam_controller *ssam_client_bind(struct device *client);
+int ssam_client_link(struct ssam_controller *ctrl, struct device *client);
+
+struct device *ssam_controller_device(struct ssam_controller *c);
+
+struct ssam_controller *ssam_controller_get(struct ssam_controller *c);
+void ssam_controller_put(struct ssam_controller *c);
+
+void ssam_controller_statelock(struct ssam_controller *c);
+void ssam_controller_stateunlock(struct ssam_controller *c);
+
+ssize_t ssam_request_write_data(struct ssam_span *buf,
+ struct ssam_controller *ctrl,
+ const struct ssam_request *spec);
+
+
+/* -- Synchronous request interface. ---------------------------------------- */
+
+/**
+ * struct ssam_request_sync - Synchronous SAM request struct.
+ * @base: Underlying SSH request.
+ * @comp: Completion used to signal full completion of the request. After the
+ * request has been submitted, this struct may only be modified or
+ * deallocated after the completion has been signaled.
+ * request has been submitted,
+ * @resp: Buffer to store the response.
+ * @status: Status of the request, set after the base request has been
+ * completed or has failed.
+ */
+struct ssam_request_sync {
+ struct ssh_request base;
+ struct completion comp;
+ struct ssam_response *resp;
+ int status;
+};
+
+int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
+ struct ssam_request_sync **rqst,
+ struct ssam_span *buffer);
+
+void ssam_request_sync_free(struct ssam_request_sync *rqst);
+
+int ssam_request_sync_init(struct ssam_request_sync *rqst,
+ enum ssam_request_flags flags);
+
+/**
+ * ssam_request_sync_set_data - Set message data of a synchronous request.
+ * @rqst: The request.
+ * @ptr: Pointer to the request message data.
+ * @len: Length of the request message data.
+ *
+ * Set the request message data of a synchronous request. The provided buffer
+ * needs to live until the request has been completed.
+ */
+static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst,
+ u8 *ptr, size_t len)
+{
+ ssh_request_set_data(&rqst->base, ptr, len);
+}
+
+/**
+ * ssam_request_sync_set_resp - Set response buffer of a synchronous request.
+ * @rqst: The request.
+ * @resp: The response buffer.
+ *
+ * Sets the response buffer of a synchronous request. This buffer will store
+ * the response of the request after it has been completed. May be %NULL if no
+ * response is expected.
+ */
+static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst,
+ struct ssam_response *resp)
+{
+ rqst->resp = resp;
+}
+
+int ssam_request_sync_submit(struct ssam_controller *ctrl,
+ struct ssam_request_sync *rqst);
+
+/**
+ * ssam_request_sync_wait - Wait for completion of a synchronous request.
+ * @rqst: The request to wait for.
+ *
+ * Wait for completion and release of a synchronous request. After this
+ * function terminates, the request is guaranteed to have left the transport
+ * system. After successful submission of a request, this function must be
+ * called before accessing the response of the request, freeing the request,
+ * or freeing any of the buffers associated with the request.
+ *
+ * This function must not be called if the request has not been submitted yet
+ * and may lead to a deadlock/infinite wait if a subsequent request submission
+ * fails in that case, due to the completion never triggering.
+ *
+ * Return: Returns the status of the given request, which is set on completion
+ * of the packet. This value is zero on success and negative on failure.
+ */
+static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
+{
+ wait_for_completion(&rqst->comp);
+ return rqst->status;
+}
+
+int ssam_request_sync(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp);
+
+int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp,
+ struct ssam_span *buf);
+
+/**
+ * ssam_request_sync_onstack - Execute a synchronous request on the stack.
+ * @ctrl: The controller via which the request is submitted.
+ * @rqst: The request specification.
+ * @rsp: The response buffer.
+ * @payload_len: The (maximum) request payload length.
+ *
+ * Allocates a synchronous request with specified payload length on the stack,
+ * fully initializes it via the provided request specification, submits it,
+ * and finally waits for its completion before returning its status. This
+ * helper macro essentially allocates the request message buffer on the stack
+ * and then calls ssam_request_sync_with_buffer().
+ *
+ * Note: The @payload_len parameter specifies the maximum payload length, used
+ * for buffer allocation. The actual payload length may be smaller.
+ *
+ * Return: Returns the status of the request or any failure during setup, i.e.
+ * zero on success and a negative value on failure.
+ */
+#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len) \
+ ({ \
+ u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \
+ struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \
+ \
+ ssam_request_sync_with_buffer(ctrl, rqst, rsp, &__buf); \
+ })
+
+/**
+ * __ssam_retry - Retry request in case of I/O errors or timeouts.
+ * @request: The request function to execute. Must return an integer.
+ * @n: Number of tries.
+ * @args: Arguments for the request function.
+ *
+ * Executes the given request function, i.e. calls @request. In case the
+ * request returns %-EREMOTEIO (indicates I/O error) or %-ETIMEDOUT (request
+ * or underlying packet timed out), @request will be re-executed again, up to
+ * @n times in total.
+ *
+ * Return: Returns the return value of the last execution of @request.
+ */
+#define __ssam_retry(request, n, args...) \
+ ({ \
+ int __i, __s = 0; \
+ \
+ for (__i = (n); __i > 0; __i--) { \
+ __s = request(args); \
+ if (__s != -ETIMEDOUT && __s != -EREMOTEIO) \
+ break; \
+ } \
+ __s; \
+ })
+
+/**
+ * ssam_retry - Retry request in case of I/O errors or timeouts up to three
+ * times in total.
+ * @request: The request function to execute. Must return an integer.
+ * @args: Arguments for the request function.
+ *
+ * Executes the given request function, i.e. calls @request. In case the
+ * request returns %-EREMOTEIO (indicates I/O error) or -%ETIMEDOUT (request
+ * or underlying packet timed out), @request will be re-executed again, up to
+ * three times in total.
+ *
+ * See __ssam_retry() for a more generic macro for this purpose.
+ *
+ * Return: Returns the return value of the last execution of @request.
+ */
+#define ssam_retry(request, args...) \
+ __ssam_retry(request, 3, args)
+
+/**
+ * struct ssam_request_spec - Blue-print specification of SAM request.
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
+ * @target_id: ID of the request's target.
+ * @command_id: Command ID of the request.
+ * @instance_id: Instance ID of the request's target.
+ * @flags: Flags for the request. See &enum ssam_request_flags.
+ *
+ * Blue-print specification for a SAM request. This struct describes the
+ * unique static parameters of a request (i.e. type) without specifying any of
+ * its instance-specific data (e.g. payload). It is intended to be used as base
+ * for defining simple request functions via the
+ * ``SSAM_DEFINE_SYNC_REQUEST_x()`` family of macros.
+ */
+struct ssam_request_spec {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u8 flags;
+};
+
+/**
+ * struct ssam_request_spec_md - Blue-print specification for multi-device SAM
+ * request.
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
+ * @command_id: Command ID of the request.
+ * @flags: Flags for the request. See &enum ssam_request_flags.
+ *
+ * Blue-print specification for a multi-device SAM request, i.e. a request
+ * that is applicable to multiple device instances, described by their
+ * individual target and instance IDs. This struct describes the unique static
+ * parameters of a request (i.e. type) without specifying any of its
+ * instance-specific data (e.g. payload) and without specifying any of its
+ * device specific IDs (i.e. target and instance ID). It is intended to be
+ * used as base for defining simple multi-device request functions via the
+ * ``SSAM_DEFINE_SYNC_REQUEST_MD_x()`` and ``SSAM_DEFINE_SYNC_REQUEST_CL_x()``
+ * families of macros.
+ */
+struct ssam_request_spec_md {
+ u8 target_category;
+ u8 command_id;
+ u8 flags;
+};
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_N() - Define synchronous SAM request function
+ * with neither argument nor return value.
+ * @name: Name of the generated function.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request having neither argument nor return value. The
+ * generated function takes care of setting up the request struct and buffer
+ * allocation, as well as execution of the request itself, returning once the
+ * request has been fully completed. The required transport buffer will be
+ * allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl)``, returning the status of the request, which is
+ * zero on success and negative on failure. The ``ctrl`` parameter is the
+ * controller via which the request is being sent.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \
+ static int name(struct ssam_controller *ctrl) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_W() - Define synchronous SAM request function with
+ * argument.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking an argument of type @atype and having no
+ * return value. The generated function takes care of setting up the request
+ * struct, buffer allocation, as well as execution of the request itself,
+ * returning once the request has been fully completed. The required transport
+ * buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, const atype *arg)``, returning the status of the
+ * request, which is zero on success and negative on failure. The ``ctrl``
+ * parameter is the controller via which the request is sent. The request
+ * argument is specified via the ``arg`` pointer.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...) \
+ static int name(struct ssam_controller *ctrl, const atype *arg) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
+ sizeof(atype)); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_R() - Define synchronous SAM request function with
+ * return value.
+ * @name: Name of the generated function.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking no argument but having a return value of
+ * type @rtype. The generated function takes care of setting up the request
+ * and response structs, buffer allocation, as well as execution of the
+ * request itself, returning once the request has been fully completed. The
+ * required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, rtype *ret)``, returning the status of the request,
+ * which is zero on success and negative on failure. The ``ctrl`` parameter is
+ * the controller via which the request is sent. The request's return value is
+ * written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, rtype *ret) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_WR() - Define synchronous SAM request function with
+ * both argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. The generated function takes care of setting up the request
+ * and response structs, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, const atype *arg, rtype *ret)``, returning the status
+ * of the request, which is zero on success and negative on failure. The
+ * ``ctrl`` parameter is the controller via which the request is sent. The
+ * request argument is specified via the ``arg`` pointer. The request's return
+ * value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_WR(name, atype, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, const atype *arg, rtype *ret) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_N() - Define synchronous multi-device SAM
+ * request function with neither argument nor return value.
+ * @name: Name of the generated function.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request having neither argument nor return value. Device
+ * specifying parameters are not hard-coded, but instead must be provided to
+ * the function. The generated function takes care of setting up the request
+ * struct, buffer allocation, as well as execution of the request itself,
+ * returning once the request has been fully completed. The required transport
+ * buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid)``, returning the status of the
+ * request, which is zero on success and negative on failure. The ``ctrl``
+ * parameter is the controller via which the request is sent, ``tid`` the
+ * target ID for the request, and ``iid`` the instance ID.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_W() - Define synchronous multi-device SAM
+ * request function with argument.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking an argument of type @atype and having no
+ * return value. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request struct, buffer allocation, as well as execution of
+ * the request itself, returning once the request has been fully completed.
+ * The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the
+ * status of the request, which is zero on success and negative on failure.
+ * The ``ctrl`` parameter is the controller via which the request is sent,
+ * ``tid`` the target ID for the request, and ``iid`` the instance ID. The
+ * request argument is specified via the ``arg`` pointer.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
+ sizeof(atype)); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_R() - Define synchronous multi-device SAM
+ * request function with return value.
+ * @name: Name of the generated function.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking no argument but having a return value of
+ * type @rtype. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request and response structs, buffer allocation, as well as
+ * execution of the request itself, returning once the request has been fully
+ * completed. The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status
+ * of the request, which is zero on success and negative on failure. The
+ * ``ctrl`` parameter is the controller via which the request is sent, ``tid``
+ * the target ID for the request, and ``iid`` the instance ID. The request's
+ * return value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_WR() - Define synchronous multi-device SAM
+ * request function with both argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request and response structs, buffer allocation, as well as
+ * execution of the request itself, returning once the request has been fully
+ * completed. The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg, rtype *ret)``,
+ * returning the status of the request, which is zero on success and negative
+ * on failure. The ``ctrl`` parameter is the controller via which the request
+ * is sent, ``tid`` the target ID for the request, and ``iid`` the instance ID.
+ * The request argument is specified via the ``arg`` pointer. The request's
+ * return value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_WR(name, atype, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, \
+ const atype *arg, rtype *ret) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+
+/* -- Event notifier/callbacks. --------------------------------------------- */
+
+#define SSAM_NOTIF_STATE_SHIFT 2
+#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1)
+
+/**
+ * enum ssam_notif_flags - Flags used in return values from SSAM notifier
+ * callback functions.
+ *
+ * @SSAM_NOTIF_HANDLED:
+ * Indicates that the notification has been handled. This flag should be
+ * set by the handler if the handler can act/has acted upon the event
+ * provided to it. This flag should not be set if the handler is not a
+ * primary handler intended for the provided event.
+ *
+ * If this flag has not been set by any handler after the notifier chain
+ * has been traversed, a warning will be emitted, stating that the event
+ * has not been handled.
+ *
+ * @SSAM_NOTIF_STOP:
+ * Indicates that the notifier traversal should stop. If this flag is
+ * returned from a notifier callback, notifier chain traversal will
+ * immediately stop and any remaining notifiers will not be called. This
+ * flag is automatically set when ssam_notifier_from_errno() is called
+ * with a negative error value.
+ */
+enum ssam_notif_flags {
+ SSAM_NOTIF_HANDLED = BIT(0),
+ SSAM_NOTIF_STOP = BIT(1),
+};
+
+struct ssam_event_notifier;
+
+typedef u32 (*ssam_notifier_fn_t)(struct ssam_event_notifier *nf,
+ const struct ssam_event *event);
+
+/**
+ * struct ssam_notifier_block - Base notifier block for SSAM event
+ * notifications.
+ * @node: The node for the list of notifiers.
+ * @fn: The callback function of this notifier. This function takes the
+ * respective notifier block and event as input and should return
+ * a notifier value, which can either be obtained from the flags
+ * provided in &enum ssam_notif_flags, converted from a standard
+ * error value via ssam_notifier_from_errno(), or a combination of
+ * both (e.g. ``ssam_notifier_from_errno(e) | SSAM_NOTIF_HANDLED``).
+ * @priority: Priority value determining the order in which notifier callbacks
+ * will be called. A higher value means higher priority, i.e. the
+ * associated callback will be executed earlier than other (lower
+ * priority) callbacks.
+ */
+struct ssam_notifier_block {
+ struct list_head node;
+ ssam_notifier_fn_t fn;
+ int priority;
+};
+
+/**
+ * ssam_notifier_from_errno() - Convert standard error value to notifier
+ * return code.
+ * @err: The error code to convert, must be negative (in case of failure) or
+ * zero (in case of success).
+ *
+ * Return: Returns the notifier return value obtained by converting the
+ * specified @err value. In case @err is negative, the %SSAM_NOTIF_STOP flag
+ * will be set, causing notifier call chain traversal to abort.
+ */
+static inline u32 ssam_notifier_from_errno(int err)
+{
+ if (WARN_ON(err > 0) || err == 0)
+ return 0;
+ else
+ return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP;
+}
+
+/**
+ * ssam_notifier_to_errno() - Convert notifier return code to standard error
+ * value.
+ * @ret: The notifier return value to convert.
+ *
+ * Return: Returns the negative error value encoded in @ret or zero if @ret
+ * indicates success.
+ */
+static inline int ssam_notifier_to_errno(u32 ret)
+{
+ return -(ret >> SSAM_NOTIF_STATE_SHIFT);
+}
+
+
+/* -- Event/notification registry. ------------------------------------------ */
+
+/**
+ * struct ssam_event_registry - Registry specification used for enabling events.
+ * @target_category: Target category for the event registry requests.
+ * @target_id: Target ID for the event registry requests.
+ * @cid_enable: Command ID for the event-enable request.
+ * @cid_disable: Command ID for the event-disable request.
+ *
+ * This struct describes a SAM event registry via the minimal collection of
+ * SAM IDs specifying the requests to use for enabling and disabling an event.
+ * The individual event to be enabled/disabled itself is specified via &struct
+ * ssam_event_id.
+ */
+struct ssam_event_registry {
+ u8 target_category;
+ u8 target_id;
+ u8 cid_enable;
+ u8 cid_disable;
+};
+
+/**
+ * struct ssam_event_id - Unique event ID used for enabling events.
+ * @target_category: Target category of the event source.
+ * @instance: Instance ID of the event source.
+ *
+ * This struct specifies the event to be enabled/disabled via an externally
+ * provided registry. It does not specify the registry to be used itself, this
+ * is done via &struct ssam_event_registry.
+ */
+struct ssam_event_id {
+ u8 target_category;
+ u8 instance;
+};
+
+/**
+ * enum ssam_event_mask - Flags specifying how events are matched to notifiers.
+ *
+ * @SSAM_EVENT_MASK_NONE:
+ * Run the callback for any event with matching target category. Do not
+ * do any additional filtering.
+ *
+ * @SSAM_EVENT_MASK_TARGET:
+ * In addition to filtering by target category, only execute the notifier
+ * callback for events with a target ID matching to the one of the
+ * registry used for enabling/disabling the event.
+ *
+ * @SSAM_EVENT_MASK_INSTANCE:
+ * In addition to filtering by target category, only execute the notifier
+ * callback for events with an instance ID matching to the instance ID
+ * used when enabling the event.
+ *
+ * @SSAM_EVENT_MASK_STRICT:
+ * Do all the filtering above.
+ */
+enum ssam_event_mask {
+ SSAM_EVENT_MASK_TARGET = BIT(0),
+ SSAM_EVENT_MASK_INSTANCE = BIT(1),
+
+ SSAM_EVENT_MASK_NONE = 0,
+ SSAM_EVENT_MASK_STRICT =
+ SSAM_EVENT_MASK_TARGET
+ | SSAM_EVENT_MASK_INSTANCE,
+};
+
+/**
+ * SSAM_EVENT_REGISTRY() - Define a new event registry.
+ * @tc: Target category for the event registry requests.
+ * @tid: Target ID for the event registry requests.
+ * @cid_en: Command ID for the event-enable request.
+ * @cid_dis: Command ID for the event-disable request.
+ *
+ * Return: Returns the &struct ssam_event_registry specified by the given
+ * parameters.
+ */
+#define SSAM_EVENT_REGISTRY(tc, tid, cid_en, cid_dis) \
+ ((struct ssam_event_registry) { \
+ .target_category = (tc), \
+ .target_id = (tid), \
+ .cid_enable = (cid_en), \
+ .cid_disable = (cid_dis), \
+ })
+
+#define SSAM_EVENT_REGISTRY_SAM \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c)
+
+#define SSAM_EVENT_REGISTRY_KIP \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
+
+#define SSAM_EVENT_REGISTRY_REG(tid)\
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, tid, 0x01, 0x02)
+
+/**
+ * enum ssam_event_notifier_flags - Flags for event notifiers.
+ * @SSAM_EVENT_NOTIFIER_OBSERVER:
+ * The corresponding notifier acts as observer. Registering a notifier
+ * with this flag set will not attempt to enable any event. Equally,
+ * unregistering will not attempt to disable any event. Note that a
+ * notifier with this flag may not even correspond to a certain event at
+ * all, only to a specific event target category. Event matching will not
+ * be influenced by this flag.
+ */
+enum ssam_event_notifier_flags {
+ SSAM_EVENT_NOTIFIER_OBSERVER = BIT(0),
+};
+
+/**
+ * struct ssam_event_notifier - Notifier block for SSAM events.
+ * @base: The base notifier block with callback function and priority.
+ * @event: The event for which this block will receive notifications.
+ * @event.reg: Registry via which the event will be enabled/disabled.
+ * @event.id: ID specifying the event.
+ * @event.mask: Flags determining how events are matched to the notifier.
+ * @event.flags: Flags used for enabling the event.
+ * @flags: Notifier flags (see &enum ssam_event_notifier_flags).
+ */
+struct ssam_event_notifier {
+ struct ssam_notifier_block base;
+
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ u8 flags;
+ } event;
+
+ unsigned long flags;
+};
+
+int ssam_notifier_register(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n);
+
+int __ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n, bool disable);
+
+/**
+ * ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero, the event will be disabled.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+static inline int ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n)
+{
+ return __ssam_notifier_unregister(ctrl, n, true);
+}
+
+int ssam_controller_event_enable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags);
+
+int ssam_controller_event_disable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags);
+
+#endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
new file mode 100644
index 000000000000..46c45d1b6368
--- /dev/null
+++ b/include/linux/surface_aggregator/device.h
@@ -0,0 +1,640 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface System Aggregator Module (SSAM) bus and client-device subsystem.
+ *
+ * Main interface for the surface-aggregator bus, surface-aggregator client
+ * devices, and respective drivers building on top of the SSAM controller.
+ * Provides support for non-platform/non-ACPI SSAM clients via dedicated
+ * subsystem.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H
+#define _LINUX_SURFACE_AGGREGATOR_DEVICE_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+
+
+/* -- Surface System Aggregator Module bus. --------------------------------- */
+
+/**
+ * enum ssam_device_domain - SAM device domain.
+ * @SSAM_DOMAIN_VIRTUAL: Virtual device.
+ * @SSAM_DOMAIN_SERIALHUB: Physical device connected via Surface Serial Hub.
+ */
+enum ssam_device_domain {
+ SSAM_DOMAIN_VIRTUAL = 0x00,
+ SSAM_DOMAIN_SERIALHUB = 0x01,
+};
+
+/**
+ * enum ssam_virtual_tc - Target categories for the virtual SAM domain.
+ * @SSAM_VIRTUAL_TC_HUB: Device hub category.
+ */
+enum ssam_virtual_tc {
+ SSAM_VIRTUAL_TC_HUB = 0x00,
+};
+
+/**
+ * struct ssam_device_uid - Unique identifier for SSAM device.
+ * @domain: Domain of the device.
+ * @category: Target category of the device.
+ * @target: Target ID of the device.
+ * @instance: Instance ID of the device.
+ * @function: Sub-function of the device. This field can be used to split a
+ * single SAM device into multiple virtual subdevices to separate
+ * different functionality of that device and allow one driver per
+ * such functionality.
+ */
+struct ssam_device_uid {
+ u8 domain;
+ u8 category;
+ u8 target;
+ u8 instance;
+ u8 function;
+};
+
+/*
+ * Special values for device matching.
+ *
+ * These values are intended to be used with SSAM_DEVICE(), SSAM_VDEV(), and
+ * SSAM_SDEV() exclusively. Specifically, they are used to initialize the
+ * match_flags member of the device ID structure. Do not use them directly
+ * with struct ssam_device_id or struct ssam_device_uid.
+ */
+#define SSAM_ANY_TID 0xffff
+#define SSAM_ANY_IID 0xffff
+#define SSAM_ANY_FUN 0xffff
+
+/**
+ * SSAM_DEVICE() - Initialize a &struct ssam_device_id with the given
+ * parameters.
+ * @d: Domain of the device.
+ * @cat: Target category of the device.
+ * @tid: Target ID of the device.
+ * @iid: Instance ID of the device.
+ * @fun: Sub-function of the device.
+ *
+ * Initializes a &struct ssam_device_id with the given parameters. See &struct
+ * ssam_device_uid for details regarding the parameters. The special values
+ * %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be used to specify that
+ * matching should ignore target ID, instance ID, and/or sub-function,
+ * respectively. This macro initializes the ``match_flags`` field based on the
+ * given parameters.
+ *
+ * Note: The parameters @d and @cat must be valid &u8 values, the parameters
+ * @tid, @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
+ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
+ * allowed.
+ */
+#define SSAM_DEVICE(d, cat, tid, iid, fun) \
+ .match_flags = (((tid) != SSAM_ANY_TID) ? SSAM_MATCH_TARGET : 0) \
+ | (((iid) != SSAM_ANY_IID) ? SSAM_MATCH_INSTANCE : 0) \
+ | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \
+ .domain = d, \
+ .category = cat, \
+ .target = __builtin_choose_expr((tid) != SSAM_ANY_TID, (tid), 0), \
+ .instance = __builtin_choose_expr((iid) != SSAM_ANY_IID, (iid), 0), \
+ .function = __builtin_choose_expr((fun) != SSAM_ANY_FUN, (fun), 0)
+
+/**
+ * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
+ * the given parameters.
+ * @cat: Target category of the device.
+ * @tid: Target ID of the device.
+ * @iid: Instance ID of the device.
+ * @fun: Sub-function of the device.
+ *
+ * Initializes a &struct ssam_device_id with the given parameters in the
+ * virtual domain. See &struct ssam_device_uid for details regarding the
+ * parameters. The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and
+ * %SSAM_ANY_FUN can be used to specify that matching should ignore target ID,
+ * instance ID, and/or sub-function, respectively. This macro initializes the
+ * ``match_flags`` field based on the given parameters.
+ *
+ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
+ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
+ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
+ * allowed.
+ */
+#define SSAM_VDEV(cat, tid, iid, fun) \
+ SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, tid, iid, fun)
+
+/**
+ * SSAM_SDEV() - Initialize a &struct ssam_device_id as physical SSH device
+ * with the given parameters.
+ * @cat: Target category of the device.
+ * @tid: Target ID of the device.
+ * @iid: Instance ID of the device.
+ * @fun: Sub-function of the device.
+ *
+ * Initializes a &struct ssam_device_id with the given parameters in the SSH
+ * domain. See &struct ssam_device_uid for details regarding the parameters.
+ * The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be
+ * used to specify that matching should ignore target ID, instance ID, and/or
+ * sub-function, respectively. This macro initializes the ``match_flags``
+ * field based on the given parameters.
+ *
+ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
+ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
+ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
+ * allowed.
+ */
+#define SSAM_SDEV(cat, tid, iid, fun) \
+ SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, tid, iid, fun)
+
+/*
+ * enum ssam_device_flags - Flags for SSAM client devices.
+ * @SSAM_DEVICE_HOT_REMOVED_BIT:
+ * The device has been hot-removed. Further communication with it may time
+ * out and should be avoided.
+ */
+enum ssam_device_flags {
+ SSAM_DEVICE_HOT_REMOVED_BIT = 0,
+};
+
+/**
+ * struct ssam_device - SSAM client device.
+ * @dev: Driver model representation of the device.
+ * @ctrl: SSAM controller managing this device.
+ * @uid: UID identifying the device.
+ * @flags: Device state flags, see &enum ssam_device_flags.
+ */
+struct ssam_device {
+ struct device dev;
+ struct ssam_controller *ctrl;
+
+ struct ssam_device_uid uid;
+
+ unsigned long flags;
+};
+
+/**
+ * struct ssam_device_driver - SSAM client device driver.
+ * @driver: Base driver model structure.
+ * @match_table: Match table specifying which devices the driver should bind to.
+ * @probe: Called when the driver is being bound to a device.
+ * @remove: Called when the driver is being unbound from the device.
+ */
+struct ssam_device_driver {
+ struct device_driver driver;
+
+ const struct ssam_device_id *match_table;
+
+ int (*probe)(struct ssam_device *sdev);
+ void (*remove)(struct ssam_device *sdev);
+};
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+extern struct bus_type ssam_bus_type;
+extern const struct device_type ssam_device_type;
+
+/**
+ * is_ssam_device() - Check if the given device is a SSAM client device.
+ * @d: The device to test the type of.
+ *
+ * Return: Returns %true if the specified device is of type &struct
+ * ssam_device, i.e. the device type points to %ssam_device_type, and %false
+ * otherwise.
+ */
+static inline bool is_ssam_device(struct device *d)
+{
+ return d->type == &ssam_device_type;
+}
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline bool is_ssam_device(struct device *d)
+{
+ return false;
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+/**
+ * to_ssam_device() - Casts the given device to a SSAM client device.
+ * @d: The device to cast.
+ *
+ * Casts the given &struct device to a &struct ssam_device. The caller has to
+ * ensure that the given device is actually enclosed in a &struct ssam_device,
+ * e.g. by calling is_ssam_device().
+ *
+ * Return: Returns a pointer to the &struct ssam_device wrapping the given
+ * device @d.
+ */
+static inline struct ssam_device *to_ssam_device(struct device *d)
+{
+ return container_of(d, struct ssam_device, dev);
+}
+
+/**
+ * to_ssam_device_driver() - Casts the given device driver to a SSAM client
+ * device driver.
+ * @d: The driver to cast.
+ *
+ * Casts the given &struct device_driver to a &struct ssam_device_driver. The
+ * caller has to ensure that the given driver is actually enclosed in a
+ * &struct ssam_device_driver.
+ *
+ * Return: Returns the pointer to the &struct ssam_device_driver wrapping the
+ * given device driver @d.
+ */
+static inline
+struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d)
+{
+ return container_of(d, struct ssam_device_driver, driver);
+}
+
+const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
+ const struct ssam_device_uid uid);
+
+const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev);
+
+const void *ssam_device_get_match_data(const struct ssam_device *dev);
+
+struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
+ struct ssam_device_uid uid);
+
+int ssam_device_add(struct ssam_device *sdev);
+void ssam_device_remove(struct ssam_device *sdev);
+
+/**
+ * ssam_device_mark_hot_removed() - Mark the given device as hot-removed.
+ * @sdev: The device to mark as hot-removed.
+ *
+ * Mark the device as having been hot-removed. This signals drivers using the
+ * device that communication with the device should be avoided and may lead to
+ * timeouts.
+ */
+static inline void ssam_device_mark_hot_removed(struct ssam_device *sdev)
+{
+ dev_dbg(&sdev->dev, "marking device as hot-removed\n");
+ set_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags);
+}
+
+/**
+ * ssam_device_is_hot_removed() - Check if the given device has been
+ * hot-removed.
+ * @sdev: The device to check.
+ *
+ * Checks if the given device has been marked as hot-removed. See
+ * ssam_device_mark_hot_removed() for more details.
+ *
+ * Return: Returns ``true`` if the device has been marked as hot-removed.
+ */
+static inline bool ssam_device_is_hot_removed(struct ssam_device *sdev)
+{
+ return test_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags);
+}
+
+/**
+ * ssam_device_get() - Increment reference count of SSAM client device.
+ * @sdev: The device to increment the reference count of.
+ *
+ * Increments the reference count of the given SSAM client device by
+ * incrementing the reference count of the enclosed &struct device via
+ * get_device().
+ *
+ * See ssam_device_put() for the counter-part of this function.
+ *
+ * Return: Returns the device provided as input.
+ */
+static inline struct ssam_device *ssam_device_get(struct ssam_device *sdev)
+{
+ return sdev ? to_ssam_device(get_device(&sdev->dev)) : NULL;
+}
+
+/**
+ * ssam_device_put() - Decrement reference count of SSAM client device.
+ * @sdev: The device to decrement the reference count of.
+ *
+ * Decrements the reference count of the given SSAM client device by
+ * decrementing the reference count of the enclosed &struct device via
+ * put_device().
+ *
+ * See ssam_device_get() for the counter-part of this function.
+ */
+static inline void ssam_device_put(struct ssam_device *sdev)
+{
+ if (sdev)
+ put_device(&sdev->dev);
+}
+
+/**
+ * ssam_device_get_drvdata() - Get driver-data of SSAM client device.
+ * @sdev: The device to get the driver-data from.
+ *
+ * Return: Returns the driver-data of the given device, previously set via
+ * ssam_device_set_drvdata().
+ */
+static inline void *ssam_device_get_drvdata(struct ssam_device *sdev)
+{
+ return dev_get_drvdata(&sdev->dev);
+}
+
+/**
+ * ssam_device_set_drvdata() - Set driver-data of SSAM client device.
+ * @sdev: The device to set the driver-data of.
+ * @data: The data to set the device's driver-data pointer to.
+ */
+static inline void ssam_device_set_drvdata(struct ssam_device *sdev, void *data)
+{
+ dev_set_drvdata(&sdev->dev, data);
+}
+
+int __ssam_device_driver_register(struct ssam_device_driver *d, struct module *o);
+void ssam_device_driver_unregister(struct ssam_device_driver *d);
+
+/**
+ * ssam_device_driver_register() - Register a SSAM client device driver.
+ * @drv: The driver to register.
+ */
+#define ssam_device_driver_register(drv) \
+ __ssam_device_driver_register(drv, THIS_MODULE)
+
+/**
+ * module_ssam_device_driver() - Helper macro for SSAM device driver
+ * registration.
+ * @drv: The driver managed by this module.
+ *
+ * Helper macro to register a SSAM device driver via module_init() and
+ * module_exit(). This macro may only be used once per module and replaces the
+ * aforementioned definitions.
+ */
+#define module_ssam_device_driver(drv) \
+ module_driver(drv, ssam_device_driver_register, \
+ ssam_device_driver_unregister)
+
+
+/* -- Helpers for controller and hub devices. ------------------------------- */
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node);
+void ssam_remove_clients(struct device *dev);
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ return 0;
+}
+
+static inline void ssam_remove_clients(struct device *dev) {}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+/**
+ * ssam_register_clients() - Register all client devices defined under the
+ * given parent device.
+ * @dev: The parent device under which clients should be registered.
+ * @ctrl: The controller with which client should be registered.
+ *
+ * Register all clients that have via firmware nodes been defined as children
+ * of the given (parent) device. The respective child firmware nodes will be
+ * associated with the correspondingly created child devices.
+ *
+ * The given controller will be used to instantiate the new devices. See
+ * ssam_device_add() for details.
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+static inline int ssam_register_clients(struct device *dev, struct ssam_controller *ctrl)
+{
+ return __ssam_register_clients(dev, ctrl, dev_fwnode(dev));
+}
+
+/**
+ * ssam_device_register_clients() - Register all client devices defined under
+ * the given SSAM parent device.
+ * @sdev: The parent device under which clients should be registered.
+ *
+ * Register all clients that have via firmware nodes been defined as children
+ * of the given (parent) device. The respective child firmware nodes will be
+ * associated with the correspondingly created child devices.
+ *
+ * The controller used by the parent device will be used to instantiate the new
+ * devices. See ssam_device_add() for details.
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+static inline int ssam_device_register_clients(struct ssam_device *sdev)
+{
+ return ssam_register_clients(&sdev->dev, sdev->ctrl);
+}
+
+
+/* -- Helpers for client-device requests. ----------------------------------- */
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_N() - Define synchronous client-device SAM
+ * request function with neither argument nor return value.
+ * @name: Name of the generated function.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request having neither argument nor return value. Device
+ * specifying parameters are not hard-coded, but instead are provided via the
+ * client device, specifically its UID, supplied when calling this function.
+ * The generated function takes care of setting up the request struct, buffer
+ * allocation, as well as execution of the request itself, returning once the
+ * request has been fully completed. The required transport buffer will be
+ * allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev)``, returning the status of the request, which is zero on success and
+ * negative on failure. The ``sdev`` parameter specifies both the target
+ * device of the request and by association the controller via which the
+ * request is sent.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec) \
+ static int name(struct ssam_device *sdev) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_W() - Define synchronous client-device SAM
+ * request function with argument.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking an argument of type @atype and having no
+ * return value. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, const atype *arg)``, returning the status of the request, which is
+ * zero on success and negative on failure. The ``sdev`` parameter specifies
+ * both the target device of the request and by association the controller via
+ * which the request is sent. The request's argument is specified via the
+ * ``arg`` pointer.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec) \
+ static int name(struct ssam_device *sdev, const atype *arg) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, arg); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_R() - Define synchronous client-device SAM
+ * request function with return value.
+ * @name: Name of the generated function.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking no argument but having a return value of
+ * type @rtype. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, rtype *ret)``, returning the status of the request, which is zero on
+ * success and negative on failure. The ``sdev`` parameter specifies both the
+ * target device of the request and by association the controller via which
+ * the request is sent. The request's return value is written to the memory
+ * pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec) \
+ static int name(struct ssam_device *sdev, rtype *ret) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, ret); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_WR() - Define synchronous client-device SAM
+ * request function with argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, const atype *arg, rtype *ret)``, returning the status of the request,
+ * which is zero on success and negative on failure. The ``sdev`` parameter
+ * specifies both the target device of the request and by association the
+ * controller via which the request is sent. The request's argument is
+ * specified via the ``arg`` pointer. The request's return value is written to
+ * the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_WR(name, atype, rtype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_WR(__raw_##name, atype, rtype, spec) \
+ static int name(struct ssam_device *sdev, const atype *arg, rtype *ret) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, arg, ret); \
+ }
+
+
+/* -- Helpers for client-device notifiers. ---------------------------------- */
+
+/**
+ * ssam_device_notifier_register() - Register an event notifier for the
+ * specified client device.
+ * @sdev: The device the notifier should be registered on.
+ * @n: The event notifier to register.
+ *
+ * Register an event notifier. Increment the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the event is not
+ * marked as an observer and is currently not enabled, it will be enabled
+ * during this call. If the notifier is marked as an observer, no attempt will
+ * be made at enabling any event and no reference count will be modified.
+ *
+ * Notifiers marked as observers do not need to be associated with one specific
+ * event, i.e. as long as no event matching is performed, only the event target
+ * category needs to be set.
+ *
+ * Return: Returns zero on success, %-ENOSPC if there have already been
+ * %INT_MAX notifiers for the event ID/type associated with the notifier block
+ * registered, %-ENOMEM if the corresponding event entry could not be
+ * allocated, %-ENODEV if the device is marked as hot-removed. If this is the
+ * first time that a notifier block is registered for the specific associated
+ * event, returns the status of the event-enable EC-command.
+ */
+static inline int ssam_device_notifier_register(struct ssam_device *sdev,
+ struct ssam_event_notifier *n)
+{
+ /*
+ * Note that this check does not provide any guarantees whatsoever as
+ * hot-removal could happen at any point and we can't protect against
+ * it. Nevertheless, if we can detect hot-removal, bail early to avoid
+ * communication timeouts.
+ */
+ if (ssam_device_is_hot_removed(sdev))
+ return -ENODEV;
+
+ return ssam_notifier_register(sdev->ctrl, n);
+}
+
+/**
+ * ssam_device_notifier_unregister() - Unregister an event notifier for the
+ * specified client device.
+ * @sdev: The device the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero, the event will be disabled.
+ *
+ * In case the device has been marked as hot-removed, the event will not be
+ * disabled on the EC, as in those cases any attempt at doing so may time out.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+static inline int ssam_device_notifier_unregister(struct ssam_device *sdev,
+ struct ssam_event_notifier *n)
+{
+ return __ssam_notifier_unregister(sdev->ctrl, n,
+ !ssam_device_is_hot_removed(sdev));
+}
+
+#endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
new file mode 100644
index 000000000000..45501b6e54e8
--- /dev/null
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -0,0 +1,677 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface Serial Hub (SSH) protocol and communication interface.
+ *
+ * Lower-level communication layers and SSH protocol definitions for the
+ * Surface System Aggregator Module (SSAM). Provides the interface for basic
+ * packet- and request-based communication with the SSAM EC via SSH.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
+#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
+
+#include <linux/crc-ccitt.h>
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+
+/* -- Data structures for SAM-over-SSH communication. ----------------------- */
+
+/**
+ * enum ssh_frame_type - Frame types for SSH frames.
+ *
+ * @SSH_FRAME_TYPE_DATA_SEQ:
+ * Indicates a data frame, followed by a payload with the length specified
+ * in the ``struct ssh_frame.len`` field. This frame is sequenced, meaning
+ * that an ACK is required.
+ *
+ * @SSH_FRAME_TYPE_DATA_NSQ:
+ * Same as %SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, meaning that the
+ * message does not have to be ACKed.
+ *
+ * @SSH_FRAME_TYPE_ACK:
+ * Indicates an ACK message.
+ *
+ * @SSH_FRAME_TYPE_NAK:
+ * Indicates an error response for previously sent frame. In general, this
+ * means that the frame and/or payload is malformed, e.g. a CRC is wrong.
+ * For command-type payloads, this can also mean that the command is
+ * invalid.
+ */
+enum ssh_frame_type {
+ SSH_FRAME_TYPE_DATA_SEQ = 0x80,
+ SSH_FRAME_TYPE_DATA_NSQ = 0x00,
+ SSH_FRAME_TYPE_ACK = 0x40,
+ SSH_FRAME_TYPE_NAK = 0x04,
+};
+
+/**
+ * struct ssh_frame - SSH communication frame.
+ * @type: The type of the frame. See &enum ssh_frame_type.
+ * @len: The length of the frame payload directly following the CRC for this
+ * frame. Does not include the final CRC for that payload.
+ * @seq: The sequence number for this message/exchange.
+ */
+struct ssh_frame {
+ u8 type;
+ __le16 len;
+ u8 seq;
+} __packed;
+
+static_assert(sizeof(struct ssh_frame) == 4);
+
+/*
+ * SSH_FRAME_MAX_PAYLOAD_SIZE - Maximum SSH frame payload length in bytes.
+ *
+ * This is the physical maximum length of the protocol. Implementations may
+ * set a more constrained limit.
+ */
+#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX
+
+/**
+ * enum ssh_payload_type - Type indicator for the SSH payload.
+ * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command
+ * payload.
+ */
+enum ssh_payload_type {
+ SSH_PLD_TYPE_CMD = 0x80,
+};
+
+/**
+ * struct ssh_command - Payload of a command-type frame.
+ * @type: The type of the payload. See &enum ssh_payload_type. Should be
+ * SSH_PLD_TYPE_CMD for this struct.
+ * @tc: Command target category.
+ * @tid_out: Output target ID. Should be zero if this an incoming (EC to host)
+ * message.
+ * @tid_in: Input target ID. Should be zero if this is an outgoing (host to
+ * EC) message.
+ * @iid: Instance ID.
+ * @rqid: Request ID. Used to match requests with responses and differentiate
+ * between responses and events.
+ * @cid: Command ID.
+ */
+struct ssh_command {
+ u8 type;
+ u8 tc;
+ u8 tid_out;
+ u8 tid_in;
+ u8 iid;
+ __le16 rqid;
+ u8 cid;
+} __packed;
+
+static_assert(sizeof(struct ssh_command) == 8);
+
+/*
+ * SSH_COMMAND_MAX_PAYLOAD_SIZE - Maximum SSH command payload length in bytes.
+ *
+ * This is the physical maximum length of the protocol. Implementations may
+ * set a more constrained limit.
+ */
+#define SSH_COMMAND_MAX_PAYLOAD_SIZE \
+ (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command))
+
+/*
+ * SSH_MSG_LEN_BASE - Base-length of a SSH message.
+ *
+ * This is the minimum number of bytes required to form a message. The actual
+ * message length is SSH_MSG_LEN_BASE plus the length of the frame payload.
+ */
+#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16))
+
+/*
+ * SSH_MSG_LEN_CTRL - Length of a SSH control message.
+ *
+ * This is the length of a SSH control message, which is equal to a SSH
+ * message without any payload.
+ */
+#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE
+
+/**
+ * SSH_MESSAGE_LENGTH() - Compute length of SSH message.
+ * @payload_size: Length of the payload inside the SSH frame.
+ *
+ * Return: Returns the length of a SSH message with payload of specified size.
+ */
+#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + (payload_size))
+
+/**
+ * SSH_COMMAND_MESSAGE_LENGTH() - Compute length of SSH command message.
+ * @payload_size: Length of the command payload.
+ *
+ * Return: Returns the length of a SSH command message with command payload of
+ * specified size.
+ */
+#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \
+ SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + (payload_size))
+
+/**
+ * SSH_MSGOFFSET_FRAME() - Compute offset in SSH message to specified field in
+ * frame.
+ * @field: The field for which the offset should be computed.
+ *
+ * Return: Returns the offset of the specified &struct ssh_frame field in the
+ * raw SSH message data as. Takes SYN bytes (u16) preceding the frame into
+ * account.
+ */
+#define SSH_MSGOFFSET_FRAME(field) \
+ (sizeof(u16) + offsetof(struct ssh_frame, field))
+
+/**
+ * SSH_MSGOFFSET_COMMAND() - Compute offset in SSH message to specified field
+ * in command.
+ * @field: The field for which the offset should be computed.
+ *
+ * Return: Returns the offset of the specified &struct ssh_command field in
+ * the raw SSH message data. Takes SYN bytes (u16) preceding the frame and the
+ * frame CRC (u16) between frame and command into account.
+ */
+#define SSH_MSGOFFSET_COMMAND(field) \
+ (2ull * sizeof(u16) + sizeof(struct ssh_frame) \
+ + offsetof(struct ssh_command, field))
+
+/*
+ * SSH_MSG_SYN - SSH message synchronization (SYN) bytes as u16.
+ */
+#define SSH_MSG_SYN ((u16)0x55aa)
+
+/**
+ * ssh_crc() - Compute CRC for SSH messages.
+ * @buf: The pointer pointing to the data for which the CRC should be computed.
+ * @len: The length of the data for which the CRC should be computed.
+ *
+ * Return: Returns the CRC computed on the provided data, as used for SSH
+ * messages.
+ */
+static inline u16 ssh_crc(const u8 *buf, size_t len)
+{
+ return crc_ccitt_false(0xffff, buf, len);
+}
+
+/*
+ * SSH_NUM_EVENTS - The number of reserved event IDs.
+ *
+ * The number of reserved event IDs, used for registering an SSH event
+ * handler. Valid event IDs are numbers below or equal to this value, with
+ * exception of zero, which is not an event ID. Thus, this is also the
+ * absolute maximum number of event handlers that can be registered.
+ */
+#define SSH_NUM_EVENTS 38
+
+/*
+ * SSH_NUM_TARGETS - The number of communication targets used in the protocol.
+ */
+#define SSH_NUM_TARGETS 2
+
+/**
+ * ssh_rqid_next_valid() - Return the next valid request ID.
+ * @rqid: The current request ID.
+ *
+ * Return: Returns the next valid request ID, following the current request ID
+ * provided to this function. This function skips any request IDs reserved for
+ * events.
+ */
+static inline u16 ssh_rqid_next_valid(u16 rqid)
+{
+ return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u;
+}
+
+/**
+ * ssh_rqid_to_event() - Convert request ID to its corresponding event ID.
+ * @rqid: The request ID to convert.
+ */
+static inline u16 ssh_rqid_to_event(u16 rqid)
+{
+ return rqid - 1u;
+}
+
+/**
+ * ssh_rqid_is_event() - Check if given request ID is a valid event ID.
+ * @rqid: The request ID to check.
+ */
+static inline bool ssh_rqid_is_event(u16 rqid)
+{
+ return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS;
+}
+
+/**
+ * ssh_tc_to_rqid() - Convert target category to its corresponding request ID.
+ * @tc: The target category to convert.
+ */
+static inline u16 ssh_tc_to_rqid(u8 tc)
+{
+ return tc;
+}
+
+/**
+ * ssh_tid_to_index() - Convert target ID to its corresponding target index.
+ * @tid: The target ID to convert.
+ */
+static inline u8 ssh_tid_to_index(u8 tid)
+{
+ return tid - 1u;
+}
+
+/**
+ * ssh_tid_is_valid() - Check if target ID is valid/supported.
+ * @tid: The target ID to check.
+ */
+static inline bool ssh_tid_is_valid(u8 tid)
+{
+ return ssh_tid_to_index(tid) < SSH_NUM_TARGETS;
+}
+
+/**
+ * struct ssam_span - Reference to a buffer region.
+ * @ptr: Pointer to the buffer region.
+ * @len: Length of the buffer region.
+ *
+ * A reference to a (non-owned) buffer segment, consisting of pointer and
+ * length. Use of this struct indicates non-owned data, i.e. data of which the
+ * life-time is managed (i.e. it is allocated/freed) via another pointer.
+ */
+struct ssam_span {
+ u8 *ptr;
+ size_t len;
+};
+
+/*
+ * Known SSH/EC target categories.
+ *
+ * List of currently known target category values; "Known" as in we know they
+ * exist and are valid on at least some device/model. Detailed functionality
+ * or the full category name is only known for some of these categories and
+ * is detailed in the respective comment below.
+ *
+ * These values and abbreviations have been extracted from strings inside the
+ * Windows driver.
+ */
+enum ssam_ssh_tc {
+ /* Category 0x00 is invalid for EC use. */
+ SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */
+ SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */
+ SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */
+ SSAM_SSH_TC_PMC = 0x04,
+ SSAM_SSH_TC_FAN = 0x05,
+ SSAM_SSH_TC_PoM = 0x06,
+ SSAM_SSH_TC_DBG = 0x07,
+ SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */
+ SSAM_SSH_TC_FWU = 0x09,
+ SSAM_SSH_TC_UNI = 0x0a,
+ SSAM_SSH_TC_LPC = 0x0b,
+ SSAM_SSH_TC_TCL = 0x0c,
+ SSAM_SSH_TC_SFL = 0x0d,
+ SSAM_SSH_TC_KIP = 0x0e, /* Manages detachable peripherals (Pro X/8 keyboard cover) */
+ SSAM_SSH_TC_EXT = 0x0f,
+ SSAM_SSH_TC_BLD = 0x10,
+ SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */
+ SSAM_SSH_TC_SEN = 0x12,
+ SSAM_SSH_TC_SRQ = 0x13,
+ SSAM_SSH_TC_MCU = 0x14,
+ SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */
+ SSAM_SSH_TC_TCH = 0x16,
+ SSAM_SSH_TC_BKL = 0x17,
+ SSAM_SSH_TC_TAM = 0x18,
+ SSAM_SSH_TC_ACC0 = 0x19,
+ SSAM_SSH_TC_UFI = 0x1a,
+ SSAM_SSH_TC_USC = 0x1b,
+ SSAM_SSH_TC_PEN = 0x1c,
+ SSAM_SSH_TC_VID = 0x1d,
+ SSAM_SSH_TC_AUD = 0x1e,
+ SSAM_SSH_TC_SMC = 0x1f,
+ SSAM_SSH_TC_KPD = 0x20,
+ SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */
+ SSAM_SSH_TC_SPT = 0x22,
+ SSAM_SSH_TC_SYS = 0x23,
+ SSAM_SSH_TC_ACC1 = 0x24,
+ SSAM_SSH_TC_SHB = 0x25,
+ SSAM_SSH_TC_POS = 0x26, /* For obtaining Laptop Studio screen position. */
+};
+
+
+/* -- Packet transport layer (ptl). ----------------------------------------- */
+
+/**
+ * enum ssh_packet_base_priority - Base priorities for &struct ssh_packet.
+ * @SSH_PACKET_PRIORITY_FLUSH: Base priority for flush packets.
+ * @SSH_PACKET_PRIORITY_DATA: Base priority for normal data packets.
+ * @SSH_PACKET_PRIORITY_NAK: Base priority for NAK packets.
+ * @SSH_PACKET_PRIORITY_ACK: Base priority for ACK packets.
+ */
+enum ssh_packet_base_priority {
+ SSH_PACKET_PRIORITY_FLUSH = 0, /* same as DATA to sequence flush */
+ SSH_PACKET_PRIORITY_DATA = 0,
+ SSH_PACKET_PRIORITY_NAK = 1,
+ SSH_PACKET_PRIORITY_ACK = 2,
+};
+
+/*
+ * Same as SSH_PACKET_PRIORITY() below, only with actual values.
+ */
+#define __SSH_PACKET_PRIORITY(base, try) \
+ (((base) << 4) | ((try) & 0x0f))
+
+/**
+ * SSH_PACKET_PRIORITY() - Compute packet priority from base priority and
+ * number of tries.
+ * @base: The base priority as suffix of &enum ssh_packet_base_priority, e.g.
+ * ``FLUSH``, ``DATA``, ``ACK``, or ``NAK``.
+ * @try: The number of tries (must be less than 16).
+ *
+ * Compute the combined packet priority. The combined priority is dominated by
+ * the base priority, whereas the number of (re-)tries decides the precedence
+ * of packets with the same base priority, giving higher priority to packets
+ * that already have more tries.
+ *
+ * Return: Returns the computed priority as value fitting inside a &u8. A
+ * higher number means a higher priority.
+ */
+#define SSH_PACKET_PRIORITY(base, try) \
+ __SSH_PACKET_PRIORITY(SSH_PACKET_PRIORITY_##base, (try))
+
+/**
+ * ssh_packet_priority_get_try() - Get number of tries from packet priority.
+ * @priority: The packet priority.
+ *
+ * Return: Returns the number of tries encoded in the specified packet
+ * priority.
+ */
+static inline u8 ssh_packet_priority_get_try(u8 priority)
+{
+ return priority & 0x0f;
+}
+
+/**
+ * ssh_packet_priority_get_base - Get base priority from packet priority.
+ * @priority: The packet priority.
+ *
+ * Return: Returns the base priority encoded in the given packet priority.
+ */
+static inline u8 ssh_packet_priority_get_base(u8 priority)
+{
+ return (priority & 0xf0) >> 4;
+}
+
+enum ssh_packet_flags {
+ /* state flags */
+ SSH_PACKET_SF_LOCKED_BIT,
+ SSH_PACKET_SF_QUEUED_BIT,
+ SSH_PACKET_SF_PENDING_BIT,
+ SSH_PACKET_SF_TRANSMITTING_BIT,
+ SSH_PACKET_SF_TRANSMITTED_BIT,
+ SSH_PACKET_SF_ACKED_BIT,
+ SSH_PACKET_SF_CANCELED_BIT,
+ SSH_PACKET_SF_COMPLETED_BIT,
+
+ /* type flags */
+ SSH_PACKET_TY_FLUSH_BIT,
+ SSH_PACKET_TY_SEQUENCED_BIT,
+ SSH_PACKET_TY_BLOCKING_BIT,
+
+ /* mask for state flags */
+ SSH_PACKET_FLAGS_SF_MASK =
+ BIT(SSH_PACKET_SF_LOCKED_BIT)
+ | BIT(SSH_PACKET_SF_QUEUED_BIT)
+ | BIT(SSH_PACKET_SF_PENDING_BIT)
+ | BIT(SSH_PACKET_SF_TRANSMITTING_BIT)
+ | BIT(SSH_PACKET_SF_TRANSMITTED_BIT)
+ | BIT(SSH_PACKET_SF_ACKED_BIT)
+ | BIT(SSH_PACKET_SF_CANCELED_BIT)
+ | BIT(SSH_PACKET_SF_COMPLETED_BIT),
+
+ /* mask for type flags */
+ SSH_PACKET_FLAGS_TY_MASK =
+ BIT(SSH_PACKET_TY_FLUSH_BIT)
+ | BIT(SSH_PACKET_TY_SEQUENCED_BIT)
+ | BIT(SSH_PACKET_TY_BLOCKING_BIT),
+};
+
+struct ssh_ptl;
+struct ssh_packet;
+
+/**
+ * struct ssh_packet_ops - Callback operations for a SSH packet.
+ * @release: Function called when the packet reference count reaches zero.
+ * This callback must be relied upon to ensure that the packet has
+ * left the transport system(s).
+ * @complete: Function called when the packet is completed, either with
+ * success or failure. In case of failure, the reason for the
+ * failure is indicated by the value of the provided status code
+ * argument. This value will be zero in case of success. Note that
+ * a call to this callback does not guarantee that the packet is
+ * not in use by the transport system any more.
+ */
+struct ssh_packet_ops {
+ void (*release)(struct ssh_packet *p);
+ void (*complete)(struct ssh_packet *p, int status);
+};
+
+/**
+ * struct ssh_packet - SSH transport packet.
+ * @ptl: Pointer to the packet transport layer. May be %NULL if the packet
+ * (or enclosing request) has not been submitted yet.
+ * @refcnt: Reference count of the packet.
+ * @priority: Priority of the packet. Must be computed via
+ * SSH_PACKET_PRIORITY(). Must only be accessed while holding the
+ * queue lock after first submission.
+ * @data: Raw message data.
+ * @data.len: Length of the raw message data.
+ * @data.ptr: Pointer to the raw message data buffer.
+ * @state: State and type flags describing current packet state (dynamic)
+ * and type (static). See &enum ssh_packet_flags for possible
+ * options.
+ * @timestamp: Timestamp specifying when the latest transmission of a
+ * currently pending packet has been started. May be %KTIME_MAX
+ * before or in-between transmission attempts. Used for the packet
+ * timeout implementation. Must only be accessed while holding the
+ * pending lock after first submission.
+ * @queue_node: The list node for the packet queue.
+ * @pending_node: The list node for the set of pending packets.
+ * @ops: Packet operations.
+ */
+struct ssh_packet {
+ struct ssh_ptl *ptl;
+ struct kref refcnt;
+
+ u8 priority;
+
+ struct {
+ size_t len;
+ u8 *ptr;
+ } data;
+
+ unsigned long state;
+ ktime_t timestamp;
+
+ struct list_head queue_node;
+ struct list_head pending_node;
+
+ const struct ssh_packet_ops *ops;
+};
+
+struct ssh_packet *ssh_packet_get(struct ssh_packet *p);
+void ssh_packet_put(struct ssh_packet *p);
+
+/**
+ * ssh_packet_set_data() - Set raw message data of packet.
+ * @p: The packet for which the message data should be set.
+ * @ptr: Pointer to the memory holding the message data.
+ * @len: Length of the message data.
+ *
+ * Sets the raw message data buffer of the packet to the provided memory. The
+ * memory is not copied. Instead, the caller is responsible for management
+ * (i.e. allocation and deallocation) of the memory. The caller must ensure
+ * that the provided memory is valid and contains a valid SSH message,
+ * starting from the time of submission of the packet until the ``release``
+ * callback has been called. During this time, the memory may not be altered
+ * in any way.
+ */
+static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len)
+{
+ p->data.ptr = ptr;
+ p->data.len = len;
+}
+
+
+/* -- Request transport layer (rtl). ---------------------------------------- */
+
+enum ssh_request_flags {
+ /* state flags */
+ SSH_REQUEST_SF_LOCKED_BIT,
+ SSH_REQUEST_SF_QUEUED_BIT,
+ SSH_REQUEST_SF_PENDING_BIT,
+ SSH_REQUEST_SF_TRANSMITTING_BIT,
+ SSH_REQUEST_SF_TRANSMITTED_BIT,
+ SSH_REQUEST_SF_RSPRCVD_BIT,
+ SSH_REQUEST_SF_CANCELED_BIT,
+ SSH_REQUEST_SF_COMPLETED_BIT,
+
+ /* type flags */
+ SSH_REQUEST_TY_FLUSH_BIT,
+ SSH_REQUEST_TY_HAS_RESPONSE_BIT,
+
+ /* mask for state flags */
+ SSH_REQUEST_FLAGS_SF_MASK =
+ BIT(SSH_REQUEST_SF_LOCKED_BIT)
+ | BIT(SSH_REQUEST_SF_QUEUED_BIT)
+ | BIT(SSH_REQUEST_SF_PENDING_BIT)
+ | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT)
+ | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT)
+ | BIT(SSH_REQUEST_SF_RSPRCVD_BIT)
+ | BIT(SSH_REQUEST_SF_CANCELED_BIT)
+ | BIT(SSH_REQUEST_SF_COMPLETED_BIT),
+
+ /* mask for type flags */
+ SSH_REQUEST_FLAGS_TY_MASK =
+ BIT(SSH_REQUEST_TY_FLUSH_BIT)
+ | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),
+};
+
+struct ssh_rtl;
+struct ssh_request;
+
+/**
+ * struct ssh_request_ops - Callback operations for a SSH request.
+ * @release: Function called when the request's reference count reaches zero.
+ * This callback must be relied upon to ensure that the request has
+ * left the transport systems (both, packet an request systems).
+ * @complete: Function called when the request is completed, either with
+ * success or failure. The command data for the request response
+ * is provided via the &struct ssh_command parameter (``cmd``),
+ * the command payload of the request response via the &struct
+ * ssh_span parameter (``data``).
+ *
+ * If the request does not have any response or has not been
+ * completed with success, both ``cmd`` and ``data`` parameters will
+ * be NULL. If the request response does not have any command
+ * payload, the ``data`` span will be an empty (zero-length) span.
+ *
+ * In case of failure, the reason for the failure is indicated by
+ * the value of the provided status code argument (``status``). This
+ * value will be zero in case of success and a regular errno
+ * otherwise.
+ *
+ * Note that a call to this callback does not guarantee that the
+ * request is not in use by the transport systems any more.
+ */
+struct ssh_request_ops {
+ void (*release)(struct ssh_request *rqst);
+ void (*complete)(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data, int status);
+};
+
+/**
+ * struct ssh_request - SSH transport request.
+ * @packet: The underlying SSH transport packet.
+ * @node: List node for the request queue and pending set.
+ * @state: State and type flags describing current request state (dynamic)
+ * and type (static). See &enum ssh_request_flags for possible
+ * options.
+ * @timestamp: Timestamp specifying when we start waiting on the response of
+ * the request. This is set once the underlying packet has been
+ * completed and may be %KTIME_MAX before that, or when the request
+ * does not expect a response. Used for the request timeout
+ * implementation.
+ * @ops: Request Operations.
+ */
+struct ssh_request {
+ struct ssh_packet packet;
+ struct list_head node;
+
+ unsigned long state;
+ ktime_t timestamp;
+
+ const struct ssh_request_ops *ops;
+};
+
+/**
+ * to_ssh_request() - Cast a SSH packet to its enclosing SSH request.
+ * @p: The packet to cast.
+ *
+ * Casts the given &struct ssh_packet to its enclosing &struct ssh_request.
+ * The caller is responsible for making sure that the packet is actually
+ * wrapped in a &struct ssh_request.
+ *
+ * Return: Returns the &struct ssh_request wrapping the provided packet.
+ */
+static inline struct ssh_request *to_ssh_request(struct ssh_packet *p)
+{
+ return container_of(p, struct ssh_request, packet);
+}
+
+/**
+ * ssh_request_get() - Increment reference count of request.
+ * @r: The request to increment the reference count of.
+ *
+ * Increments the reference count of the given request by incrementing the
+ * reference count of the underlying &struct ssh_packet, enclosed in it.
+ *
+ * See also ssh_request_put(), ssh_packet_get().
+ *
+ * Return: Returns the request provided as input.
+ */
+static inline struct ssh_request *ssh_request_get(struct ssh_request *r)
+{
+ return r ? to_ssh_request(ssh_packet_get(&r->packet)) : NULL;
+}
+
+/**
+ * ssh_request_put() - Decrement reference count of request.
+ * @r: The request to decrement the reference count of.
+ *
+ * Decrements the reference count of the given request by decrementing the
+ * reference count of the underlying &struct ssh_packet, enclosed in it. If
+ * the reference count reaches zero, the ``release`` callback specified in the
+ * request's &struct ssh_request_ops, i.e. ``r->ops->release``, will be
+ * called.
+ *
+ * See also ssh_request_get(), ssh_packet_put().
+ */
+static inline void ssh_request_put(struct ssh_request *r)
+{
+ if (r)
+ ssh_packet_put(&r->packet);
+}
+
+/**
+ * ssh_request_set_data() - Set raw message data of request.
+ * @r: The request for which the message data should be set.
+ * @ptr: Pointer to the memory holding the message data.
+ * @len: Length of the message data.
+ *
+ * Sets the raw message data buffer of the underlying packet to the specified
+ * buffer. Does not copy the actual message data, just sets the buffer pointer
+ * and length. Refer to ssh_packet_set_data() for more details.
+ */
+static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len)
+{
+ ssh_packet_set_data(&r->packet, ptr, len);
+}
+
+#endif /* _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 2b2055b035ee..cfe19a028918 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -75,7 +75,7 @@ extern struct suspend_stats suspend_stats;
static inline void dpm_save_failed_dev(const char *name)
{
- strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
+ strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
name,
sizeof(suspend_stats.failed_devs[0]));
suspend_stats.last_failed_dev++;
@@ -191,6 +191,7 @@ struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
int (*prepare_late)(void);
+ void (*check)(void);
bool (*wake)(void);
void (*restore_early)(void);
void (*restore)(void);
@@ -430,15 +431,7 @@ struct platform_hibernation_ops {
#ifdef CONFIG_HIBERNATION
/* kernel/power/snapshot.c */
-extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
-static inline void __init register_nosave_region(unsigned long b, unsigned long e)
-{
- __register_nosave_region(b, e, 0);
-}
-static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
-{
- __register_nosave_region(b, e, 1);
-}
+extern void register_nosave_region(unsigned long b, unsigned long e);
extern int swsusp_page_is_forbidden(struct page *);
extern void swsusp_set_page_free(struct page *);
extern void swsusp_unset_page_free(struct page *);
@@ -446,6 +439,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
extern asmlinkage int swsusp_arch_suspend(void);
extern asmlinkage int swsusp_arch_resume(void);
+extern u32 swsusp_hardware_signature;
extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
extern int hibernate(void);
extern bool system_entering_hibernation(void);
@@ -453,9 +447,10 @@ extern bool hibernation_available(void);
asmlinkage int swsusp_save(void);
extern struct pbe *restore_pblist;
int pfn_is_nosave(unsigned long pfn);
+
+int hibernate_quiet_exec(int (*func)(void *data), void *data);
#else /* CONFIG_HIBERNATION */
static inline void register_nosave_region(unsigned long b, unsigned long e) {}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
@@ -464,8 +459,18 @@ static inline void hibernation_set_ops(const struct platform_hibernation_ops *op
static inline int hibernate(void) { return -ENOSYS; }
static inline bool system_entering_hibernation(void) { return false; }
static inline bool hibernation_available(void) { return false; }
+
+static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
+ return -ENOTSUPP;
+}
#endif /* CONFIG_HIBERNATION */
+#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
+int is_hibernate_resume_dev(dev_t dev);
+#else
+static inline int is_hibernate_resume_dev(dev_t dev) { return 0; }
+#endif
+
/* Hibernation and suspend events */
#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
@@ -493,21 +498,21 @@ extern void ksys_sync_helper(void);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
-extern unsigned int pm_wakeup_irq;
extern suspend_state_t pm_suspend_target_state;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(unsigned int irq_number);
extern void pm_system_irq_wakeup(unsigned int irq_number);
+extern unsigned int pm_wakeup_irq(void);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
extern void pm_print_active_wakeup_sources(void);
-extern void lock_system_sleep(void);
-extern void unlock_system_sleep(void);
+extern unsigned int lock_system_sleep(void);
+extern void unlock_system_sleep(unsigned int);
#else /* !CONFIG_PM_SLEEP */
@@ -530,30 +535,64 @@ static inline void pm_system_wakeup(void) {}
static inline void pm_wakeup_clear(bool reset) {}
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
-static inline void lock_system_sleep(void) {}
-static inline void unlock_system_sleep(void) {}
+static inline unsigned int lock_system_sleep(void) { return 0; }
+static inline void unlock_system_sleep(unsigned int flags) {}
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
extern bool pm_print_times_enabled;
extern bool pm_debug_messages_on;
-extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
+static inline int pm_dyn_debug_messages_on(void)
+{
+#ifdef CONFIG_DYNAMIC_DEBUG
+ return 1;
+#else
+ return 0;
+#endif
+}
+#ifndef pr_fmt
+#define pr_fmt(fmt) "PM: " fmt
+#endif
+#define __pm_pr_dbg(fmt, ...) \
+ do { \
+ if (pm_debug_messages_on) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ else if (pm_dyn_debug_messages_on()) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+#define __pm_deferred_pr_dbg(fmt, ...) \
+ do { \
+ if (pm_debug_messages_on) \
+ printk_deferred(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ } while (0)
#else
#define pm_print_times_enabled (false)
#define pm_debug_messages_on (false)
#include <linux/printk.h>
-#define __pm_pr_dbg(defer, fmt, ...) \
- no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#define __pm_pr_dbg(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#define __pm_deferred_pr_dbg(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
+/**
+ * pm_pr_dbg - print pm sleep debug messages
+ *
+ * If pm_debug_messages_on is enabled, print message.
+ * If pm_debug_messages_on is disabled and CONFIG_DYNAMIC_DEBUG is enabled,
+ * print message only from instances explicitly enabled on dynamic debug's
+ * control.
+ * If pm_debug_messages_on is disabled and CONFIG_DYNAMIC_DEBUG is disabled,
+ * don't print message.
+ */
#define pm_pr_dbg(fmt, ...) \
- __pm_pr_dbg(false, fmt, ##__VA_ARGS__)
+ __pm_pr_dbg(fmt, ##__VA_ARGS__)
#define pm_deferred_pr_dbg(fmt, ...) \
- __pm_pr_dbg(true, fmt, ##__VA_ARGS__)
+ __pm_deferred_pr_dbg(fmt, ##__VA_ARGS__)
#ifdef CONFIG_PM_AUTOSLEEP
@@ -566,38 +605,4 @@ static inline void queue_up_suspend_work(void) {}
#endif /* !CONFIG_PM_AUTOSLEEP */
-#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
-/*
- * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
- * to save/restore additional information to/from the array of page
- * frame numbers in the hibernation image. For s390 this is used to
- * save and restore the storage key for each page that is included
- * in the hibernation image.
- */
-unsigned long page_key_additional_pages(unsigned long pages);
-int page_key_alloc(unsigned long pages);
-void page_key_free(void);
-void page_key_read(unsigned long *pfn);
-void page_key_memorize(unsigned long *pfn);
-void page_key_write(void *address);
-
-#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
-
-static inline unsigned long page_key_additional_pages(unsigned long pages)
-{
- return 0;
-}
-
-static inline int page_key_alloc(unsigned long pages)
-{
- return 0;
-}
-
-static inline void page_key_free(void) {}
-static inline void page_key_read(unsigned long *pfn) {}
-static inline void page_key_memorize(unsigned long *pfn) {}
-static inline void page_key_write(void *address) {}
-
-#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
-
#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/swab.h b/include/linux/swab.h
index bcff5149861a..9b804dbb0d79 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -20,4 +20,29 @@
# define swab64s __swab64s
# define swahw32s __swahw32s
# define swahb32s __swahb32s
+
+static inline void swab16_array(u16 *buf, unsigned int words)
+{
+ while (words--) {
+ swab16s(buf);
+ buf++;
+ }
+}
+
+static inline void swab32_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ swab32s(buf);
+ buf++;
+ }
+}
+
+static inline void swab64_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ swab64s(buf);
+ buf++;
+ }
+}
+
#endif /* _LINUX_SWAB_H */
diff --git a/include/linux/swait.h b/include/linux/swait.h
index 73e06e9986d4..6a8c22b8c2a5 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -9,23 +9,10 @@
#include <asm/current.h>
/*
- * BROKEN wait-queues.
- *
- * These "simple" wait-queues are broken garbage, and should never be
- * used. The comments below claim that they are "similar" to regular
- * wait-queues, but the semantics are actually completely different, and
- * every single user we have ever had has been buggy (or pointless).
- *
- * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
- * "wake_up()" does, and has led to problems. In other cases, it has
- * been fine, because there's only ever one waiter (kvm), but in that
- * case gthe whole "simple" wait-queue is just pointless to begin with,
- * since there is no "queue". Use "wake_up_process()" with a direct
- * pointer instead.
- *
- * While these are very similar to regular wait queues (wait.h) the most
- * important difference is that the simple waitqueue allows for deterministic
- * behaviour -- IOW it has strictly bounded IRQ and lock hold times.
+ * Simple waitqueues are semantically very different to regular wait queues
+ * (wait.h). The most important difference is that the simple waitqueue allows
+ * for deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
+ * times.
*
* Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
* from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
@@ -39,7 +26,7 @@
* sleeper state.
*
* - the !exclusive mode; because that leads to O(n) wakeups, everything is
- * exclusive.
+ * exclusive. As such swake_up_one will only ever awake _one_ waiter.
*
* - custom wake callback functions; because you cannot give any guarantees
* about random code. This also allows swait to be used in RT, such that
diff --git a/include/linux/swait_api.h b/include/linux/swait_api.h
new file mode 100644
index 000000000000..1eeaaaaa5ea7
--- /dev/null
+++ b/include/linux/swait_api.h
@@ -0,0 +1 @@
+#include <linux/swait.h>
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1e99f7ac1d7e..a18cf4b7c724 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -10,8 +10,10 @@
#include <linux/sched.h>
#include <linux/node.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/atomic.h>
#include <linux/page-flags.h>
+#include <uapi/linux/mempolicy.h>
#include <asm/page.h>
struct notifier_block;
@@ -53,30 +55,59 @@ static inline int current_is_kswapd(void)
* actions on faults.
*/
+#define SWP_SWAPIN_ERROR_NUM 1
+#define SWP_SWAPIN_ERROR (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
+ SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \
+ SWP_PTE_MARKER_NUM)
+/*
+ * PTE markers are used to persist information onto PTEs that are mapped with
+ * file-backed memories. As its name "PTE" hints, it should only be applied to
+ * the leaves of pgtables.
+ */
+#ifdef CONFIG_PTE_MARKER
+#define SWP_PTE_MARKER_NUM 1
+#define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
+ SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
+#else
+#define SWP_PTE_MARKER_NUM 0
+#endif
+
/*
* Unaddressable device memory support. See include/linux/hmm.h and
- * Documentation/vm/hmm.rst. Short description is we need struct pages for
+ * Documentation/mm/hmm.rst. Short description is we need struct pages for
* device memory that is unaddressable (inaccessible) by CPU, so that we can
* migrate part of a process memory to device memory.
*
* When a page is migrated from CPU to device, we set the CPU page table entry
- * to a special SWP_DEVICE_* entry.
+ * to a special SWP_DEVICE_{READ|WRITE} entry.
+ *
+ * When a page is mapped by the device for exclusive access we set the CPU page
+ * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
*/
#ifdef CONFIG_DEVICE_PRIVATE
-#define SWP_DEVICE_NUM 2
+#define SWP_DEVICE_NUM 4
#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
+#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
+#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
#else
#define SWP_DEVICE_NUM 0
#endif
/*
- * NUMA node memory migration support
+ * Page migration support.
+ *
+ * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
+ * indicates that the referenced (part of) an anonymous page is exclusive to
+ * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
+ * (part of) an anonymous page that are mapped writable are exclusive to a
+ * single process.
*/
#ifdef CONFIG_MIGRATION
-#define SWP_MIGRATION_NUM 2
-#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
-#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#define SWP_MIGRATION_NUM 3
+#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
+#define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
#else
#define SWP_MIGRATION_NUM 0
#endif
@@ -93,7 +124,8 @@ static inline int current_is_kswapd(void)
#define MAX_SWAPFILES \
((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
- SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
+ SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
+ SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM)
/*
* Magic header for a swap area. The first part of the union is
@@ -130,6 +162,10 @@ union swap_header {
*/
struct reclaim_state {
unsigned long reclaimed_slab;
+#ifdef CONFIG_LRU_GEN
+ /* per-thread mm walk data */
+ struct lru_gen_mm_walk *mm_walk;
+#endif
};
#ifdef __KERNEL__
@@ -141,8 +177,8 @@ struct zone;
/*
* A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
- * disk blocks. A list of swap extents maps the entire swapfile. (Where the
- * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
+ * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
+ * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
* from setup, they're handled identically.
*
* We always assume that blocks are of size PAGE_SIZE.
@@ -170,12 +206,11 @@ enum {
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
SWP_BLKDEV = (1 << 6), /* its a block device */
SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
- SWP_FS = (1 << 8), /* swap file goes through fs */
+ SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
- SWP_VALID = (1 << 13), /* swap is valid to be operated on? */
/* add others here before... */
SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
};
@@ -183,12 +218,17 @@ enum {
#define SWAP_CLUSTER_MAX 32UL
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
-#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
-#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
+/* Bit flag in swap_map */
#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
-#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
-#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
-#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
+#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
+
+/* Special value in first swap_map */
+#define SWAP_MAP_MAX 0x3e /* Max count */
+#define SWAP_MAP_BAD 0x3f /* Note page is bad */
+#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
+
+/* Special value in each swap_map continuation */
+#define SWAP_CONT_MAX 0x7f /* Max count */
/*
* We use this to track usage of a cluster. A cluster is a block of swap disk
@@ -233,6 +273,7 @@ struct swap_cluster_list {
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
+ struct percpu_ref users; /* indicate and keep swap device valid. */
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
@@ -247,11 +288,13 @@ struct swap_info_struct {
unsigned int inuse_pages; /* number of those currently in use */
unsigned int cluster_next; /* likely index for next allocation */
unsigned int cluster_nr; /* countdown to next cluster search */
+ unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
+ struct completion comp; /* seldom referenced */
#ifdef CONFIG_FRONTSWAP
unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
atomic_t frontswap_pages; /* frontswap pages in-use counter */
@@ -275,7 +318,7 @@ struct swap_info_struct {
*/
struct work_struct discard_work; /* discard worker */
struct swap_cluster_list discard_clusters; /* discard clusters list */
- struct plist_node avail_lists[0]; /*
+ struct plist_node avail_lists[]; /*
* entries in swap_avail_heads, one
* entry per node.
* Must be last as the number of the
@@ -306,64 +349,91 @@ struct vma_swap_readahead {
#endif
};
+static inline swp_entry_t folio_swap_entry(struct folio *folio)
+{
+ swp_entry_t entry = { .val = page_private(&folio->page) };
+ return entry;
+}
+
+static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
+{
+ folio->private = (void *)entry.val;
+}
+
/* linux/mm/workingset.c */
-void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
-void workingset_refault(struct page *page, void *shadow);
-void workingset_activation(struct page *page);
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
+void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
+void workingset_refault(struct folio *folio, void *shadow);
+void workingset_activation(struct folio *folio);
/* Only track the nodes of mappings with shadow entries */
void workingset_update_node(struct xa_node *node);
+extern struct list_lru shadow_nodes;
#define mapping_set_update(xas, mapping) do { \
- if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
+ if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
xas_set_update(xas, workingset_update_node); \
+ xas_set_lru(xas, &shadow_nodes); \
+ } \
} while (0)
/* linux/mm/page_alloc.c */
extern unsigned long totalreserve_pages;
-extern unsigned long nr_free_buffer_pages(void);
-extern unsigned long nr_free_pagecache_pages(void);
/* Definition of global_zone_page_state not available yet */
#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
/* linux/mm/swap.c */
-extern void lru_cache_add(struct page *);
-extern void lru_cache_add_anon(struct page *page);
-extern void lru_cache_add_file(struct page *page);
-extern void lru_add_page_tail(struct page *page, struct page *page_tail,
- struct lruvec *lruvec, struct list_head *head);
-extern void activate_page(struct page *);
-extern void mark_page_accessed(struct page *);
+void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages);
+void lru_note_cost_folio(struct folio *);
+void folio_add_lru(struct folio *);
+void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
+void lru_cache_add(struct page *);
+void mark_page_accessed(struct page *);
+void folio_mark_accessed(struct folio *);
+
+extern atomic_t lru_disable_count;
+
+static inline bool lru_cache_disabled(void)
+{
+ return atomic_read(&lru_disable_count);
+}
+
+static inline void lru_cache_enable(void)
+{
+ atomic_dec(&lru_disable_count);
+}
+
+extern void lru_cache_disable(void);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
+extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void rotate_reclaimable_page(struct page *page);
-extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
-extern void lru_cache_add_active_or_unevictable(struct page *page,
+extern void lru_cache_add_inactive_or_unevictable(struct page *page,
struct vm_area_struct *vma);
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
+
+#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
+#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
- bool may_swap);
+ unsigned int reclaim_options);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
-extern int remove_mapping(struct address_space *mapping, struct page *page);
-extern unsigned long vm_total_pages;
+long remove_mapping(struct address_space *mapping, struct folio *folio);
extern unsigned long reclaim_pages(struct list_head *page_list);
#ifdef CONFIG_NUMA
@@ -374,60 +444,33 @@ extern int sysctl_min_slab_ratio;
#define node_reclaim_mode 0
#endif
-extern int page_evictable(struct page *page);
-extern void check_move_unevictable_pages(struct pagevec *pvec);
+static inline bool node_reclaim_enabled(void)
+{
+ /* Is any node_reclaim_mode bit set? */
+ return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
+}
-extern int kswapd_run(int nid);
+void check_move_unevictable_folios(struct folio_batch *fbatch);
+void check_move_unevictable_pages(struct pagevec *pvec);
+
+extern void kswapd_run(int nid);
extern void kswapd_stop(int nid);
#ifdef CONFIG_SWAP
-#include <linux/blk_types.h> /* for bio_end_io_t */
-
-/* linux/mm/page_io.c */
-extern int swap_readpage(struct page *page, bool do_poll);
-extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern void end_swap_bio_write(struct bio *bio);
-extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
- bio_end_io_t end_write_func);
-extern int swap_set_page_dirty(struct page *page);
-
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block);
int generic_swapfile_activate(struct swap_info_struct *, struct file *,
sector_t *);
-/* linux/mm/swap_state.c */
-/* One swap address space for each 64M swap space */
-#define SWAP_ADDRESS_SPACE_SHIFT 14
-#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
-extern struct address_space *swapper_spaces[];
-#define swap_address_space(entry) \
- (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
- >> SWAP_ADDRESS_SPACE_SHIFT])
-extern unsigned long total_swapcache_pages(void);
-extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *page);
-extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
-extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
-extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
-extern void delete_from_swap_cache(struct page *);
+static inline unsigned long total_swapcache_pages(void)
+{
+ return global_node_page_state(NR_SWAPCACHE);
+}
+
+extern void free_swap_cache(struct page *page);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
-extern struct page *lookup_swap_cache(swp_entry_t entry,
- struct vm_area_struct *vma,
- unsigned long addr);
-extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr,
- bool do_poll);
-extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated);
-extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
-extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
-
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
@@ -446,8 +489,9 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-extern swp_entry_t get_swap_page(struct page *page);
-extern void put_swap_page(struct page *page, swp_entry_t entry);
+swp_entry_t folio_alloc_swap(struct folio *folio);
+bool folio_free_swap(struct folio *folio);
+void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
@@ -457,41 +501,41 @@ extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern int free_swap_and_cache(swp_entry_t);
-extern int swap_type_of(dev_t, sector_t, struct block_device **);
+int swap_type_of(dev_t device, sector_t offset);
+int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
-extern int page_swapcount(struct page *);
extern int __swap_count(swp_entry_t entry);
extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
-extern bool reuse_swap_page(struct page *, int *);
-extern int try_to_free_swap(struct page *);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
+sector_t swap_page_sector(struct page *page);
static inline void put_swap_device(struct swap_info_struct *si)
{
- rcu_read_unlock();
+ percpu_ref_put(&si->users);
}
#else /* CONFIG_SWAP */
-
-static inline int swap_readpage(struct page *page, bool do_poll)
+static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
{
- return 0;
+ return NULL;
}
-static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
{
return NULL;
}
-#define swap_address_space(entry) (NULL)
+static inline void put_swap_device(struct swap_info_struct *si)
+{
+}
+
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
@@ -506,13 +550,13 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr));
-static inline void show_swap_cache_info(void)
+/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
+#define free_swap_and_cache(e) is_pfn_swap_entry(e)
+
+static inline void free_swap_cache(struct page *page)
{
}
-#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
-#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
-
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
return 0;
@@ -531,57 +575,8 @@ static inline void swap_free(swp_entry_t swp)
{
}
-static inline void put_swap_page(struct page *page, swp_entry_t swp)
-{
-}
-
-static inline struct page *swap_cluster_readahead(swp_entry_t entry,
- gfp_t gfp_mask, struct vm_fault *vmf)
+static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
{
- return NULL;
-}
-
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
- struct vm_fault *vmf)
-{
- return NULL;
-}
-
-static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
-{
- return 0;
-}
-
-static inline struct page *lookup_swap_cache(swp_entry_t swp,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- return NULL;
-}
-
-static inline int add_to_swap(struct page *page)
-{
- return 0;
-}
-
-static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
- gfp_t gfp_mask)
-{
- return -1;
-}
-
-static inline void __delete_from_swap_cache(struct page *page,
- swp_entry_t entry)
-{
-}
-
-static inline void delete_from_swap_cache(struct page *page)
-{
-}
-
-static inline int page_swapcount(struct page *page)
-{
- return 0;
}
static inline int __swap_count(swp_entry_t entry)
@@ -599,21 +594,24 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-#define reuse_swap_page(page, total_map_swapcount) \
- (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
-
-static inline int try_to_free_swap(struct page *page)
-{
- return 0;
-}
-
-static inline swp_entry_t get_swap_page(struct page *page)
+static inline swp_entry_t folio_alloc_swap(struct folio *folio)
{
swp_entry_t entry;
entry.val = 0;
return entry;
}
+static inline bool folio_free_swap(struct folio *folio)
+{
+ return false;
+}
+
+static inline int add_swap_extent(struct swap_info_struct *sis,
+ unsigned long start_page,
+ unsigned long nr_pages, sector_t start_block)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SWAP */
#ifdef CONFIG_THP_SWAP
@@ -645,28 +643,56 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
}
#endif
+#ifdef CONFIG_ZSWAP
+extern u64 zswap_pool_total_size;
+extern atomic_t zswap_stored_pages;
+#endif
+
#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
- gfp_t gfp_mask);
+extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
+static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __cgroup_throttle_swaprate(page, gfp_mask);
+}
#else
-static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
- int node, gfp_t gfp_mask)
+static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
{
}
#endif
+static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
+{
+ cgroup_throttle_swaprate(&folio->page, gfp);
+}
+
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
+void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
+int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
+ swp_entry_t entry)
+{
+ if (mem_cgroup_disabled())
+ return 0;
+ return __mem_cgroup_try_charge_swap(folio, entry);
+}
+
+extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
+static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge_swap(entry, nr_pages);
+}
-#ifdef CONFIG_MEMCG_SWAP
-extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
-extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
-extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
-extern bool mem_cgroup_swap_full(struct page *page);
+extern bool mem_cgroup_swap_full(struct folio *folio);
#else
-static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
}
-static inline int mem_cgroup_try_charge_swap(struct page *page,
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
{
return 0;
@@ -682,7 +708,7 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return get_nr_swap_pages();
}
-static inline bool mem_cgroup_swap_full(struct page *page)
+static inline bool mem_cgroup_swap_full(struct folio *folio)
{
return vm_swap_full();
}
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index a12dd1c3966c..ae73a87775b3 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -4,7 +4,7 @@
#include <linux/swap.h>
-#ifdef CONFIG_MEMCG_SWAP
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
unsigned short old, unsigned short new);
@@ -40,6 +40,6 @@ static inline void swap_cgroup_swapoff(int type)
return;
}
-#endif /* CONFIG_MEMCG_SWAP */
+#endif
#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h
index e36b200c2a77..15adfb8c813a 100644
--- a/include/linux/swap_slots.h
+++ b/include/linux/swap_slots.h
@@ -23,8 +23,8 @@ struct swap_slots_cache {
void disable_swap_slots_cache_lock(void);
void reenable_swap_slots_cache_unlock(void);
-int enable_swap_slots_cache(void);
-int free_swap_slot(swp_entry_t entry);
+void enable_swap_slots_cache(void);
+void free_swap_slot(swp_entry_t entry);
extern bool swap_slot_cache_enabled;
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index e06febf62978..7ed529a77c5b 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -6,11 +6,13 @@
* these were static in swapfile.c but frontswap.c needs them and we don't
* want to expose them to the dozens of source files that include swap.h
*/
-extern spinlock_t swap_lock;
-extern struct plist_head swap_active_head;
extern struct swap_info_struct *swap_info[];
-extern int try_to_unuse(unsigned int, bool, unsigned long);
extern unsigned long generic_max_swapfile_size(void);
-extern unsigned long max_swapfile_size(void);
+unsigned long arch_max_swapfile_size(void);
+
+/* Maximum swapfile size supported for the arch (not inclusive). */
+extern unsigned long swapfile_maximum_size;
+/* Whether swap migration entry supports storing A/D bits for the arch */
+extern bool swap_migration_ad_supported;
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 877fd239b6ff..86b95ccb81bb 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -8,15 +8,19 @@
#ifdef CONFIG_MMU
+#ifdef CONFIG_SWAP
+#include <linux/swapfile.h>
+#endif /* CONFIG_SWAP */
+
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
* get good packing density in that tree, so the index should be dense in
* the low-order bits.
*
- * We arrange the `type' and `offset' fields so that `type' is at the seven
+ * We arrange the `type' and `offset' fields so that `type' is at the six
* high-order bits of the swp_entry_t and `offset' is right-aligned in the
* remaining bits. Although `type' itself needs only five bits, we allow for
- * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
+ * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
*
* swp_entry_t's are *never* stored anywhere in their arch-dependent format.
*/
@@ -24,6 +28,57 @@
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
/*
+ * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
+ * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
+ * can use the extra bits to store other information besides PFN.
+ */
+#ifdef MAX_PHYSMEM_BITS
+#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#else /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT)
+#endif /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
+
+/**
+ * Migration swap entry specific bitfield definitions. Layout:
+ *
+ * |----------+--------------------|
+ * | swp_type | swp_offset |
+ * |----------+--------+-+-+-------|
+ * | | resv |D|A| PFN |
+ * |----------+--------+-+-+-------|
+ *
+ * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
+ * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
+ *
+ * Note: A/D bits will be stored in migration entries iff there're enough
+ * free bits in arch specific swp offset. By default we'll ignore A/D bits
+ * when migrating a page. Please refer to migration_entry_supports_ad()
+ * for more information. If there're more bits besides PFN and A/D bits,
+ * they should be reserved and always be zeros.
+ */
+#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
+#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
+#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
+
+#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
+#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
+
+static inline bool is_pfn_swap_entry(swp_entry_t entry);
+
+/* Clear all flags but only keep swp_entry_t related information */
+static inline pte_t pte_swp_clear_flags(pte_t pte)
+{
+ if (pte_swp_exclusive(pte))
+ pte = pte_swp_clear_exclusive(pte);
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
+ if (pte_swp_uffd_wp(pte))
+ pte = pte_swp_clear_uffd_wp(pte);
+ return pte;
+}
+
+/*
* Store a type+offset into a swp_entry_t in an arch-independent format
*/
static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
@@ -52,6 +107,17 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
return entry.val & SWP_OFFSET_MASK;
}
+/*
+ * This should only be called upon a pfn swap entry to get the PFN stored
+ * in the swap entry. Please refers to is_pfn_swap_entry() for definition
+ * of pfn swap entry.
+ */
+static inline unsigned long swp_offset_pfn(swp_entry_t entry)
+{
+ VM_BUG_ON(!is_pfn_swap_entry(entry));
+ return swp_offset(entry) & SWP_PFN_MASK;
+}
+
/* check whether a pte points to a swap entry */
static inline int is_swap_pte(pte_t pte)
{
@@ -66,8 +132,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
{
swp_entry_t arch_entry;
- if (pte_swp_soft_dirty(pte))
- pte = pte_swp_clear_soft_dirty(pte);
+ pte = pte_swp_clear_flags(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
@@ -97,11 +162,25 @@ static inline void *swp_to_radix_entry(swp_entry_t entry)
return xa_mk_value(entry.val);
}
+static inline swp_entry_t make_swapin_error_entry(struct page *page)
+{
+ return swp_entry(SWP_SWAPIN_ERROR, page_to_pfn(page));
+}
+
+static inline int is_swapin_error_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_SWAPIN_ERROR;
+}
+
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
-static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
- return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
- page_to_pfn(page));
+ return swp_entry(SWP_DEVICE_READ, offset);
+}
+
+static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
+{
+ return swp_entry(SWP_DEVICE_WRITE, offset);
}
static inline bool is_device_private_entry(swp_entry_t entry)
@@ -110,33 +189,40 @@ static inline bool is_device_private_entry(swp_entry_t entry)
return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
}
-static inline void make_device_private_entry_read(swp_entry_t *entry)
+static inline bool is_writable_device_private_entry(swp_entry_t entry)
{
- *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
+ return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
}
-static inline bool is_write_device_private_entry(swp_entry_t entry)
+static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
{
- return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
+ return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
}
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
{
- return swp_offset(entry);
+ return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
}
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+static inline bool is_device_exclusive_entry(swp_entry_t entry)
{
- return pfn_to_page(swp_offset(entry));
+ return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
+ swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
+}
+
+static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
+{
+ return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
}
#else /* CONFIG_DEVICE_PRIVATE */
-static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
-static inline void make_device_private_entry_read(swp_entry_t *entry)
+static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
+ return swp_entry(0, 0);
}
static inline bool is_device_private_entry(swp_entry_t entry)
@@ -144,105 +230,293 @@ static inline bool is_device_private_entry(swp_entry_t entry)
return false;
}
-static inline bool is_write_device_private_entry(swp_entry_t entry)
+static inline bool is_writable_device_private_entry(swp_entry_t entry)
{
return false;
}
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
{
- return NULL;
+ return swp_entry(0, 0);
}
-#endif /* CONFIG_DEVICE_PRIVATE */
-#ifdef CONFIG_MIGRATION
-static inline swp_entry_t make_migration_entry(struct page *page, int write)
+static inline bool is_device_exclusive_entry(swp_entry_t entry)
{
- BUG_ON(!PageLocked(compound_head(page)));
+ return false;
+}
- return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
- page_to_pfn(page));
+static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
+{
+ return false;
}
+#endif /* CONFIG_DEVICE_PRIVATE */
+#ifdef CONFIG_MIGRATION
static inline int is_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
+ swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
swp_type(entry) == SWP_MIGRATION_WRITE);
}
-static inline int is_write_migration_entry(swp_entry_t entry)
+static inline int is_writable_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+static inline int is_readable_migration_entry(swp_entry_t entry)
{
- return swp_offset(entry);
+ return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
}
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
+static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
{
- struct page *p = pfn_to_page(swp_offset(entry));
- /*
- * Any use of migration entries may only occur while the
- * corresponding page is locked
- */
- BUG_ON(!PageLocked(compound_head(p)));
- return p;
+ return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
+}
+
+static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
+{
+ return swp_entry(SWP_MIGRATION_READ, offset);
+}
+
+static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
+{
+ return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
}
-static inline void make_migration_entry_read(swp_entry_t *entry)
+static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
- *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
+ return swp_entry(SWP_MIGRATION_WRITE, offset);
+}
+
+/*
+ * Returns whether the host has large enough swap offset field to support
+ * carrying over pgtable A/D bits for page migrations. The result is
+ * pretty much arch specific.
+ */
+static inline bool migration_entry_supports_ad(void)
+{
+#ifdef CONFIG_SWAP
+ return swap_migration_ad_supported;
+#else /* CONFIG_SWAP */
+ return false;
+#endif /* CONFIG_SWAP */
+}
+
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_YOUNG);
+ return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_YOUNG;
+ /* Keep the old behavior of aging page after migration */
+ return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_DIRTY);
+ return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_DIRTY;
+ /* Keep the old behavior of clean page after migration */
+ return false;
}
extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl);
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
-extern void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte);
-#else
+#ifdef CONFIG_HUGETLB_PAGE
+extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
+extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
+#endif /* CONFIG_HUGETLB_PAGE */
+#else /* CONFIG_MIGRATION */
+static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
+{
+ return swp_entry(0, 0);
+}
-#define make_migration_entry(page, write) swp_entry(0, 0)
-static inline int is_migration_entry(swp_entry_t swp)
+static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
+static inline int is_migration_entry(swp_entry_t swp)
{
- return NULL;
+ return 0;
}
-static inline void make_migration_entry_read(swp_entry_t *entryp) { }
static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
-static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte) { }
-static inline int is_write_migration_entry(swp_entry_t entry)
+#ifdef CONFIG_HUGETLB_PAGE
+static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
+static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
+#endif /* CONFIG_HUGETLB_PAGE */
+static inline int is_writable_migration_entry(swp_entry_t entry)
+{
+ return 0;
+}
+static inline int is_readable_migration_entry(swp_entry_t entry)
+{
+ return 0;
+}
+
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+ return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+ return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+ return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+ return false;
+}
+#endif /* CONFIG_MIGRATION */
+
+typedef unsigned long pte_marker;
+
+#define PTE_MARKER_UFFD_WP BIT(0)
+#define PTE_MARKER_MASK (PTE_MARKER_UFFD_WP)
+
+#ifdef CONFIG_PTE_MARKER
+
+static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
+{
+ return swp_entry(SWP_PTE_MARKER, marker);
+}
+
+static inline bool is_pte_marker_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_PTE_MARKER;
+}
+
+static inline pte_marker pte_marker_get(swp_entry_t entry)
+{
+ return swp_offset(entry) & PTE_MARKER_MASK;
+}
+
+static inline bool is_pte_marker(pte_t pte)
+{
+ return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte));
+}
+
+#else /* CONFIG_PTE_MARKER */
+
+static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
+{
+ /* This should never be called if !CONFIG_PTE_MARKER */
+ WARN_ON_ONCE(1);
+ return swp_entry(0, 0);
+}
+
+static inline bool is_pte_marker_entry(swp_entry_t entry)
+{
+ return false;
+}
+
+static inline pte_marker pte_marker_get(swp_entry_t entry)
{
return 0;
}
-#endif
+static inline bool is_pte_marker(pte_t pte)
+{
+ return false;
+}
+
+#endif /* CONFIG_PTE_MARKER */
+
+static inline pte_t make_pte_marker(pte_marker marker)
+{
+ return swp_entry_to_pte(make_pte_marker_entry(marker));
+}
+
+/*
+ * This is a special version to check pte_none() just to cover the case when
+ * the pte is a pte marker. It existed because in many cases the pte marker
+ * should be seen as a none pte; it's just that we have stored some information
+ * onto the none pte so it becomes not-none any more.
+ *
+ * It should be used when the pte is file-backed, ram-based and backing
+ * userspace pages, like shmem. It is not needed upon pgtables that do not
+ * support pte markers at all. For example, it's not needed on anonymous
+ * memory, kernel-only memory (including when the system is during-boot),
+ * non-ram based generic file-system. It's fine to be used even there, but the
+ * extra pte marker check will be pure overhead.
+ *
+ * For systems configured with !CONFIG_PTE_MARKER this will be automatically
+ * optimized to pte_none().
+ */
+static inline int pte_none_mostly(pte_t pte)
+{
+ return pte_none(pte) || is_pte_marker(pte);
+}
+
+static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
+{
+ struct page *p = pfn_to_page(swp_offset_pfn(entry));
+
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ BUG_ON(is_migration_entry(entry) && !PageLocked(p));
+
+ return p;
+}
+
+/*
+ * A pfn swap entry is a special type of swap entry that always has a pfn stored
+ * in the swap offset. They are used to represent unaddressable device memory
+ * and to restrict access to a page undergoing migration.
+ */
+static inline bool is_pfn_swap_entry(swp_entry_t entry)
+{
+ /* Make sure the swp offset can always store the needed fields */
+ BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
+ return is_migration_entry(entry) || is_device_private_entry(entry) ||
+ is_device_exclusive_entry(entry);
+}
struct page_vma_mapped_walk;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page);
extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
@@ -256,6 +530,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
if (pmd_swp_soft_dirty(pmd))
pmd = pmd_swp_clear_soft_dirty(pmd);
+ if (pmd_swp_uffd_wp(pmd))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
arch_entry = __pmd_to_swp_entry(pmd);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
@@ -270,10 +546,10 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
static inline int is_pmd_migration_entry(pmd_t pmd)
{
- return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
+ return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
}
-#else
-static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
BUILD_BUG();
@@ -301,7 +577,7 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
{
return 0;
}
-#endif
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
#ifdef CONFIG_MEMORY_FAILURE
@@ -326,12 +602,12 @@ static inline void num_poisoned_pages_inc(void)
atomic_long_inc(&num_poisoned_pages);
}
-static inline void num_poisoned_pages_dec(void)
+static inline void num_poisoned_pages_sub(long i)
{
- atomic_long_dec(&num_poisoned_pages);
+ atomic_long_sub(i, &num_poisoned_pages);
}
-#else
+#else /* CONFIG_MEMORY_FAILURE */
static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
@@ -346,19 +622,16 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
static inline void num_poisoned_pages_inc(void)
{
}
-#endif
-#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
-static inline int non_swap_entry(swp_entry_t entry)
+static inline void num_poisoned_pages_sub(long i)
{
- return swp_type(entry) >= MAX_SWAPFILES;
}
-#else
+#endif /* CONFIG_MEMORY_FAILURE */
+
static inline int non_swap_entry(swp_entry_t entry)
{
- return 0;
+ return swp_type(entry) >= MAX_SWAPFILES;
}
-#endif
#endif /* CONFIG_MMU */
#endif /* _LINUX_SWAPOPS_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 046bb94bd4d6..35bc4e281c21 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -2,19 +2,20 @@
#ifndef __LINUX_SWIOTLB_H
#define __LINUX_SWIOTLB_H
+#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/spinlock.h>
struct device;
struct page;
struct scatterlist;
-enum swiotlb_force {
- SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
- SWIOTLB_FORCE, /* swiotlb=force */
- SWIOTLB_NO_FORCE, /* swiotlb=noforce */
-};
+#define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */
+#define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */
+#define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */
/*
* Maximum allowable number of contiguous slabs to map,
@@ -28,61 +29,111 @@ enum swiotlb_force {
* controllable.
*/
#define IO_TLB_SHIFT 11
+#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
+
+/* default to 64MB */
+#define IO_TLB_DEFAULT_SIZE (64UL<<20)
-extern void swiotlb_init(int verbose);
-int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
-extern unsigned long swiotlb_nr_tbl(void);
unsigned long swiotlb_size_or_default(void);
-extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
+void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ int (*remap)(void *tlb, unsigned long nslabs));
+int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ int (*remap)(void *tlb, unsigned long nslabs));
extern void __init swiotlb_update_mem_attributes(void);
-/*
- * Enumeration for sync targets
- */
-enum dma_sync_target {
- SYNC_FOR_CPU = 0,
- SYNC_FOR_DEVICE = 1,
-};
-
-extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
- dma_addr_t tbl_dma_addr,
- phys_addr_t phys,
- size_t mapping_size,
- size_t alloc_size,
- enum dma_data_direction dir,
- unsigned long attrs);
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
+ size_t mapping_size, size_t alloc_size,
+ unsigned int alloc_aligned_mask, enum dma_data_direction dir,
+ unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
size_t mapping_size,
- size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs);
-extern void swiotlb_tbl_sync_single(struct device *hwdev,
- phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir,
- enum dma_sync_target target);
-
+void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir);
+void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir);
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
#ifdef CONFIG_SWIOTLB
-extern enum swiotlb_force swiotlb_force;
-extern phys_addr_t io_tlb_start, io_tlb_end;
-static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+/**
+ * struct io_tlb_mem - IO TLB Memory Pool Descriptor
+ *
+ * @start: The start address of the swiotlb memory pool. Used to do a quick
+ * range check to see if the memory was in fact allocated by this
+ * API.
+ * @end: The end address of the swiotlb memory pool. Used to do a quick
+ * range check to see if the memory was in fact allocated by this
+ * API.
+ * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
+ * may be remapped in the memory encrypted case and store virtual
+ * address for bounce buffer operation.
+ * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
+ * @end. For default swiotlb, this is command line adjustable via
+ * setup_io_tlb_npages.
+ * @used: The number of used IO TLB block.
+ * @list: The free list describing the number of free entries available
+ * from each index.
+ * @orig_addr: The original address corresponding to a mapped entry.
+ * @alloc_size: Size of the allocated buffer.
+ * @debugfs: The dentry to debugfs.
+ * @late_alloc: %true if allocated using the page allocator
+ * @force_bounce: %true if swiotlb bouncing is forced
+ * @for_alloc: %true if the pool is used for memory allocation
+ * @nareas: The area number in the pool.
+ * @area_nslabs: The slot number in the area.
+ */
+struct io_tlb_mem {
+ phys_addr_t start;
+ phys_addr_t end;
+ void *vaddr;
+ unsigned long nslabs;
+ unsigned long used;
+ struct dentry *debugfs;
+ bool late_alloc;
+ bool force_bounce;
+ bool for_alloc;
+ unsigned int nareas;
+ unsigned int area_nslabs;
+ struct io_tlb_area *areas;
+ struct io_tlb_slot *slots;
+};
+extern struct io_tlb_mem io_tlb_default_mem;
+
+static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
- return paddr >= io_tlb_start && paddr < io_tlb_end;
+ struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+
+ return mem && paddr >= mem->start && paddr < mem->end;
+}
+
+static inline bool is_swiotlb_force_bounce(struct device *dev)
+{
+ struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+
+ return mem && mem->force_bounce;
}
+void swiotlb_init(bool addressing_limited, unsigned int flags);
void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
-bool is_swiotlb_active(void);
+bool is_swiotlb_active(struct device *dev);
+void __init swiotlb_adjust_size(unsigned long size);
#else
-#define swiotlb_force SWIOTLB_NO_FORCE
-static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
+{
+}
+static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
+{
+ return false;
+}
+static inline bool is_swiotlb_force_bounce(struct device *dev)
{
return false;
}
@@ -98,13 +149,42 @@ static inline size_t swiotlb_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
-static inline bool is_swiotlb_active(void)
+static inline bool is_swiotlb_active(struct device *dev)
{
return false;
}
+
+static inline void swiotlb_adjust_size(unsigned long size)
+{
+}
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
-extern void swiotlb_set_max_segment(unsigned int);
+
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+struct page *swiotlb_alloc(struct device *dev, size_t size);
+bool swiotlb_free(struct device *dev, struct page *page, size_t size);
+
+static inline bool is_swiotlb_for_alloc(struct device *dev)
+{
+ return dev->dma_io_tlb_mem->for_alloc;
+}
+#else
+static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
+{
+ return NULL;
+}
+static inline bool swiotlb_free(struct device *dev, struct page *page,
+ size_t size)
+{
+ return false;
+}
+static inline bool is_swiotlb_for_alloc(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_DMA_RESTRICTED_POOL */
+
+extern phys_addr_t swiotlb_unencrypted_base;
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 082f1d51957a..48fabe36509e 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -19,6 +19,7 @@
#define SWITCHTEC_EVENT_EN_CLI BIT(2)
#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
#define SWITCHTEC_EVENT_FATAL BIT(4)
+#define SWITCHTEC_EVENT_NOT_SUPP BIT(31)
#define SWITCHTEC_DMA_MRPC_EN BIT(0)
@@ -336,8 +337,6 @@ enum {
NTB_CTRL_REQ_ID_EN = 1 << 0,
NTB_CTRL_LUT_EN = 1 << 0,
-
- NTB_PART_CTRL_ID_PROT_DIS = 1 << 0,
};
struct ntb_ctrl_regs {
diff --git a/include/linux/syscall_user_dispatch.h b/include/linux/syscall_user_dispatch.h
new file mode 100644
index 000000000000..a0ae443fb7df
--- /dev/null
+++ b/include/linux/syscall_user_dispatch.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Collabora Ltd.
+ */
+#ifndef _SYSCALL_USER_DISPATCH_H
+#define _SYSCALL_USER_DISPATCH_H
+
+#include <linux/thread_info.h>
+
+#ifdef CONFIG_GENERIC_ENTRY
+
+struct syscall_user_dispatch {
+ char __user *selector;
+ unsigned long offset;
+ unsigned long len;
+ bool on_dispatch;
+};
+
+int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
+ unsigned long len, char __user *selector);
+
+#define clear_syscall_work_syscall_user_dispatch(tsk) \
+ clear_task_syscall_work(tsk, SYSCALL_USER_DISPATCH)
+
+#else
+struct syscall_user_dispatch {};
+
+static inline int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
+ unsigned long len, char __user *selector)
+{
+ return -EINVAL;
+}
+
+static inline void clear_syscall_work_syscall_user_dispatch(struct task_struct *tsk)
+{
+}
+
+#endif /* CONFIG_GENERIC_ENTRY */
+
+#endif /* _SYSCALL_USER_DISPATCH_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 1815065d52f3..a34b0f9a9972 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -47,7 +47,6 @@ struct stat64;
struct statfs;
struct statfs64;
struct statx;
-struct __sysctl_args;
struct sysinfo;
struct timespec;
struct __kernel_old_timeval;
@@ -59,6 +58,7 @@ struct mq_attr;
struct compat_stat;
struct old_timeval32;
struct robust_list_head;
+struct futex_waitv;
struct getcpu_cache;
struct old_linux_dirent;
struct perf_event_attr;
@@ -69,6 +69,9 @@ union bpf_attr;
struct io_uring_params;
struct clone_args;
struct open_how;
+struct mount_attr;
+struct landlock_ruleset_attr;
+enum landlock_rule_type;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -145,7 +148,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct trace_event_call __used \
- __attribute__((section("_ftrace_events"))) \
+ __section("_ftrace_events") \
*__event_enter_##sname = &event_enter_##sname;
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
@@ -161,7 +164,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct trace_event_call __used \
- __attribute__((section("_ftrace_events"))) \
+ __section("_ftrace_events") \
*__event_exit_##sname = &event_exit_##sname;
#define SYSCALL_METADATA(sname, nb, ...) \
@@ -185,7 +188,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
}; \
static struct syscall_metadata __used \
- __attribute__((section("__syscalls_metadata"))) \
+ __section("__syscalls_metadata") \
*__p_syscall_meta_##sname = &__syscall_meta_##sname;
static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
@@ -252,6 +255,30 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* __SYSCALL_DEFINEx */
+/* For split 64-bit arguments on 32-bit architectures */
+#ifdef __LITTLE_ENDIAN
+#define SC_ARG64(name) u32, name##_lo, u32, name##_hi
+#else
+#define SC_ARG64(name) u32, name##_hi, u32, name##_lo
+#endif
+#define SC_VAL64(type, name) ((type) name##_hi << 32 | name##_lo)
+
+#ifdef CONFIG_COMPAT
+#define SYSCALL32_DEFINE1 COMPAT_SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 COMPAT_SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 COMPAT_SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 COMPAT_SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 COMPAT_SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 COMPAT_SYSCALL_DEFINE6
+#else
+#define SYSCALL32_DEFINE1 SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 SYSCALL_DEFINE6
+#endif
+
/*
* Called before coming back to user-mode. Returning to user-mode with an
* address limit different than USER_DS can allow to overwrite kernel memory.
@@ -263,10 +290,6 @@ static inline void addr_limit_user_check(void)
return;
#endif
- if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS),
- "Invalid address limit on user-mode return"))
- force_sig(SIGKILL);
-
#ifdef TIF_FSCHECK
clear_thread_flag(TIF_FSCHECK);
#endif
@@ -318,7 +341,7 @@ asmlinkage long sys_io_uring_setup(u32 entries,
struct io_uring_params __user *p);
asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit,
u32 min_complete, u32 flags,
- const sigset_t __user *sig, size_t sigsz);
+ const void __user *argp, size_t argsz);
asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op,
void __user *arg, unsigned int nr_args);
@@ -363,6 +386,11 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
int maxevents, int timeout,
const sigset_t __user *sigmask,
size_t sigsetsize);
+asmlinkage long sys_epoll_pwait2(int epfd, struct epoll_event __user *events,
+ int maxevents,
+ const struct __kernel_timespec __user *timeout,
+ const sigset_t __user *sigmask,
+ size_t sigsetsize);
/* fs/fcntl.c */
asmlinkage long sys_dup(unsigned int fildes);
@@ -428,6 +456,8 @@ asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
#endif
asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode);
+asmlinkage long sys_faccessat2(int dfd, const char __user *filename, int mode,
+ int flags);
asmlinkage long sys_chdir(const char __user *filename);
asmlinkage long sys_fchdir(unsigned int fd);
asmlinkage long sys_chroot(const char __user *filename);
@@ -442,6 +472,8 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
asmlinkage long sys_openat2(int dfd, const char __user *filename,
struct open_how *how, size_t size);
asmlinkage long sys_close(unsigned int fd);
+asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd,
+ unsigned int flags);
asmlinkage long sys_vhangup(void);
/* fs/pipe.c */
@@ -450,6 +482,8 @@ asmlinkage long sys_pipe2(int __user *fildes, int flags);
/* fs/quota.c */
asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special,
qid_t id, void __user *addr);
+asmlinkage long sys_quotactl_fd(unsigned int fd, unsigned int cmd, qid_t id,
+ void __user *addr);
/* fs/readdir.c */
asmlinkage long sys_getdents64(unsigned int fd,
@@ -573,19 +607,23 @@ asmlinkage long sys_waitid(int which, pid_t pid,
asmlinkage long sys_set_tid_address(int __user *tidptr);
asmlinkage long sys_unshare(unsigned long unshare_flags);
-/* kernel/futex.c */
+/* kernel/futex/syscalls.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
- struct __kernel_timespec __user *utime, u32 __user *uaddr2,
- u32 val3);
+ const struct __kernel_timespec __user *utime,
+ u32 __user *uaddr2, u32 val3);
asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val,
- struct old_timespec32 __user *utime, u32 __user *uaddr2,
- u32 val3);
+ const struct old_timespec32 __user *utime,
+ u32 __user *uaddr2, u32 val3);
asmlinkage long sys_get_robust_list(int pid,
struct robust_list_head __user * __user *head_ptr,
size_t __user *len_ptr);
asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
size_t len);
+asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
+ unsigned int nr_futexes, unsigned int flags,
+ struct __kernel_timespec __user *timeout, clockid_t clockid);
+
/* kernel/hrtimer.c */
asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
struct __kernel_timespec __user *rmtp);
@@ -741,7 +779,7 @@ asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p);
asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p);
-/* kernel/timer.c */
+/* kernel/sys.c */
asmlinkage long sys_getpid(void);
asmlinkage long sys_getppid(void);
asmlinkage long sys_getuid(void);
@@ -876,6 +914,9 @@ asmlinkage long sys_munlockall(void);
asmlinkage long sys_mincore(unsigned long start, size_t len,
unsigned char __user * vec);
asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior);
+asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec,
+ size_t vlen, int behavior, unsigned int flags);
+asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags);
asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
unsigned long prot, unsigned long pgoff,
unsigned long flags);
@@ -971,7 +1012,7 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp, int flags);
asmlinkage long sys_userfaultfd(int flags);
-asmlinkage long sys_membarrier(int cmd, int flags);
+asmlinkage long sys_membarrier(int cmd, unsigned int flags, int cpu_id);
asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags);
asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in,
int fd_out, loff_t __user *off_out,
@@ -994,6 +1035,9 @@ asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags);
asmlinkage long sys_move_mount(int from_dfd, const char __user *from_path,
int to_dfd, const char __user *to_path,
unsigned int ms_flags);
+asmlinkage long sys_mount_setattr(int dfd, const char __user *path,
+ unsigned int flags,
+ struct mount_attr __user *uattr, size_t usize);
asmlinkage long sys_fsopen(const char __user *fs_name, unsigned int flags);
asmlinkage long sys_fsconfig(int fs_fd, unsigned int cmd, const char __user *key,
const void __user *value, int aux);
@@ -1003,6 +1047,15 @@ asmlinkage long sys_pidfd_send_signal(int pidfd, int sig,
siginfo_t __user *info,
unsigned int flags);
asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags);
+asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr __user *attr,
+ size_t size, __u32 flags);
+asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type,
+ const void __user *rule_attr, __u32 flags);
+asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags);
+asmlinkage long sys_memfd_secret(unsigned int flags);
+asmlinkage long sys_set_mempolicy_home_node(unsigned long start, unsigned long len,
+ unsigned long home_node,
+ unsigned long flags);
/*
* Architecture-specific system calls
@@ -1110,10 +1163,8 @@ asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf);
asmlinkage long sys_vfork(void);
asmlinkage long sys_recv(int, void __user *, size_t, unsigned);
asmlinkage long sys_send(int, void __user *, size_t, unsigned);
-asmlinkage long sys_bdflush(int func, long data);
asmlinkage long sys_oldumount(char __user *name);
asmlinkage long sys_uselib(const char __user *library);
-asmlinkage long sys_sysctl(struct __sysctl_args __user *args);
asmlinkage long sys_sysfs(int option,
unsigned long arg1, unsigned long arg2);
asmlinkage long sys_fork(void);
@@ -1233,18 +1284,8 @@ asmlinkage long sys_ni_syscall(void);
* Instead, use one of the functions which work equivalently, such as
* the ksys_xyzyyz() functions prototyped below.
*/
-
-int ksys_umount(char __user *name, int flags);
-int ksys_dup(unsigned int fildes);
-int ksys_chroot(const char __user *filename);
ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count);
-int ksys_chdir(const char __user *filename);
-int ksys_fchmod(unsigned int fd, umode_t mode);
int ksys_fchown(unsigned int fd, uid_t user, gid_t group);
-int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
- unsigned int count);
-int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
-off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence);
ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count);
void ksys_sync(void);
int ksys_unshare(unsigned long unshare_flags);
@@ -1278,68 +1319,6 @@ int compat_ksys_ipc(u32 call, int first, int second,
* The following kernel syscall equivalents are just wrappers to fs-internal
* functions. Therefore, provide stubs to be inlined at the callsites.
*/
-extern long do_unlinkat(int dfd, struct filename *name);
-
-static inline long ksys_unlink(const char __user *pathname)
-{
- return do_unlinkat(AT_FDCWD, getname(pathname));
-}
-
-extern long do_rmdir(int dfd, const char __user *pathname);
-
-static inline long ksys_rmdir(const char __user *pathname)
-{
- return do_rmdir(AT_FDCWD, pathname);
-}
-
-extern long do_mkdirat(int dfd, const char __user *pathname, umode_t mode);
-
-static inline long ksys_mkdir(const char __user *pathname, umode_t mode)
-{
- return do_mkdirat(AT_FDCWD, pathname, mode);
-}
-
-extern long do_symlinkat(const char __user *oldname, int newdfd,
- const char __user *newname);
-
-static inline long ksys_symlink(const char __user *oldname,
- const char __user *newname)
-{
- return do_symlinkat(oldname, AT_FDCWD, newname);
-}
-
-extern long do_mknodat(int dfd, const char __user *filename, umode_t mode,
- unsigned int dev);
-
-static inline long ksys_mknod(const char __user *filename, umode_t mode,
- unsigned int dev)
-{
- return do_mknodat(AT_FDCWD, filename, mode, dev);
-}
-
-extern int do_linkat(int olddfd, const char __user *oldname, int newdfd,
- const char __user *newname, int flags);
-
-static inline long ksys_link(const char __user *oldname,
- const char __user *newname)
-{
- return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
-}
-
-extern int do_fchmodat(int dfd, const char __user *filename, umode_t mode);
-
-static inline int ksys_chmod(const char __user *filename, umode_t mode)
-{
- return do_fchmodat(AT_FDCWD, filename, mode);
-}
-
-extern long do_faccessat(int dfd, const char __user *filename, int mode);
-
-static inline long ksys_access(const char __user *filename, int mode)
-{
- return do_faccessat(AT_FDCWD, filename, mode);
-}
-
extern int do_fchownat(int dfd, const char __user *filename, uid_t user,
gid_t group, int flag);
@@ -1358,34 +1337,11 @@ static inline long ksys_lchown(const char __user *filename, uid_t user,
extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
-static inline long ksys_ftruncate(unsigned int fd, unsigned long length)
+static inline long ksys_ftruncate(unsigned int fd, loff_t length)
{
return do_sys_ftruncate(fd, length, 1);
}
-extern int __close_fd(struct files_struct *files, unsigned int fd);
-
-/*
- * In contrast to sys_close(), this stub does not check whether the syscall
- * should or should not be restarted, but returns the raw error codes from
- * __close_fd().
- */
-static inline int ksys_close(unsigned int fd)
-{
- return __close_fd(current->files, fd);
-}
-
-extern long do_sys_open(int dfd, const char __user *filename, int flags,
- umode_t mode);
-
-static inline long ksys_open(const char __user *filename, int flags,
- umode_t mode)
-{
- if (force_o_largefile())
- flags |= O_LARGEFILE;
- return do_sys_open(AT_FDCWD, filename, flags, mode);
-}
-
extern long do_sys_truncate(const char __user *pathname, loff_t length);
static inline long ksys_truncate(const char __user *pathname, loff_t length)
@@ -1421,5 +1377,12 @@ long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
const struct old_timespec32 __user *timeout);
-
+long __do_semtimedop(int semid, struct sembuf *tsems, unsigned int nsops,
+ const struct timespec64 *timeout,
+ struct ipc_namespace *ns);
+
+int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
+ int __user *optlen);
+int __sys_setsockopt(int fd, int level, int optname, char __user *optval,
+ int optlen);
#endif
diff --git a/include/linux/syscalls_api.h b/include/linux/syscalls_api.h
new file mode 100644
index 000000000000..23e012b04db4
--- /dev/null
+++ b/include/linux/syscalls_api.h
@@ -0,0 +1 @@
+#include <linux/syscalls.h>
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 02fa84493f23..780690dc08cd 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -38,41 +38,55 @@ struct ctl_table_header;
struct ctl_dir;
/* Keep the same order as in fs/proc/proc_sysctl.c */
-#define SYSCTL_ZERO ((void *)&sysctl_vals[0])
-#define SYSCTL_ONE ((void *)&sysctl_vals[1])
-#define SYSCTL_INT_MAX ((void *)&sysctl_vals[2])
+#define SYSCTL_ZERO ((void *)&sysctl_vals[0])
+#define SYSCTL_ONE ((void *)&sysctl_vals[1])
+#define SYSCTL_TWO ((void *)&sysctl_vals[2])
+#define SYSCTL_THREE ((void *)&sysctl_vals[3])
+#define SYSCTL_FOUR ((void *)&sysctl_vals[4])
+#define SYSCTL_ONE_HUNDRED ((void *)&sysctl_vals[5])
+#define SYSCTL_TWO_HUNDRED ((void *)&sysctl_vals[6])
+#define SYSCTL_ONE_THOUSAND ((void *)&sysctl_vals[7])
+#define SYSCTL_THREE_THOUSAND ((void *)&sysctl_vals[8])
+#define SYSCTL_INT_MAX ((void *)&sysctl_vals[9])
+
+/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
+#define SYSCTL_MAXOLDUID ((void *)&sysctl_vals[10])
+#define SYSCTL_NEG_ONE ((void *)&sysctl_vals[11])
extern const int sysctl_vals[];
-typedef int proc_handler (struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
-extern int proc_dostring(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int proc_dointvec(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int proc_douintvec(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int proc_dointvec_minmax(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int proc_douintvec_minmax(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-extern int proc_dointvec_jiffies(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int proc_dointvec_ms_jiffies(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int proc_doulongvec_minmax(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
- void __user *, size_t *, loff_t *);
-extern int proc_do_large_bitmap(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int proc_do_static_key(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+#define SYSCTL_LONG_ZERO ((void *)&sysctl_long_vals[0])
+#define SYSCTL_LONG_ONE ((void *)&sysctl_long_vals[1])
+#define SYSCTL_LONG_MAX ((void *)&sysctl_long_vals[2])
+
+extern const unsigned long sysctl_long_vals[];
+
+typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
+
+int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dobool(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *,
+ size_t *, loff_t *);
+int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
/*
* Register a set of sysctl names by calling register_sysctl_table
@@ -83,15 +97,13 @@ extern int proc_do_static_key(struct ctl_table *table, int write,
* sysctl names can be mirrored automatically under /proc/sys. The
* procname supplied controls /proc naming.
*
- * The table's mode will be honoured both for sys_sysctl(2) and
- * proc-fs access.
+ * The table's mode will be honoured for proc-fs access.
*
* Leaf nodes in the sysctl tree will be represented by a single file
* under /proc; non-leaf nodes will be represented by directories. A
* null procname disables /proc mirroring at this node.
*
- * sysctl(2) can automatically manage read and write requests through
- * the sysctl table. The data and maxlen fields of the ctl_table
+ * The data and maxlen fields of the ctl_table
* struct enable minimal validation of the values being written to be
* performed, and the mode field allows minimal authentication.
*
@@ -185,6 +197,20 @@ struct ctl_path {
#ifdef CONFIG_SYSCTL
+#define DECLARE_SYSCTL_BASE(_name, _table) \
+static struct ctl_table _name##_base_table[] = { \
+ { \
+ .procname = #_name, \
+ .mode = 0555, \
+ .child = _table, \
+ }, \
+ { }, \
+}
+
+extern int __register_sysctl_base(struct ctl_table *base_table);
+
+#define register_sysctl_base(_name) __register_sysctl_base(_name##_base_table)
+
void proc_sys_poll_notify(struct ctl_table_poll *poll);
extern void setup_sysctl_set(struct ctl_table_set *p,
@@ -205,16 +231,52 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
void unregister_sysctl_table(struct ctl_table_header * table);
-extern int sysctl_init(void);
+extern int sysctl_init_bases(void);
+extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+ const char *table_name);
+#define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table)
+extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
+
+void do_sysctl_args(void);
+int do_proc_douintvec(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data),
+ void *data);
+
+extern int pwrsw_enabled;
+extern int unaligned_enabled;
+extern int unaligned_dump_stack;
+extern int no_unaligned_warning;
extern struct ctl_table sysctl_mount_point[];
#else /* CONFIG_SYSCTL */
+
+#define DECLARE_SYSCTL_BASE(_name, _table)
+
+static inline int __register_sysctl_base(struct ctl_table *base_table)
+{
+ return 0;
+}
+
+#define register_sysctl_base(table) __register_sysctl_base(table)
+
static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
{
return NULL;
}
+static inline void register_sysctl_init(const char *path, struct ctl_table *table)
+{
+}
+
+static inline struct ctl_table_header *register_sysctl_mount_point(const char *path)
+{
+ return NULL;
+}
+
static inline struct ctl_table_header *register_sysctl_paths(
const struct ctl_path *path, struct ctl_table *table)
{
@@ -236,9 +298,12 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
{
}
+static inline void do_sysctl_args(void)
+{
+}
#endif /* CONFIG_SYSCTL */
-int sysctl_max_threads(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
#endif /* _LINUX_SYSCTL_H */
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
new file mode 100644
index 000000000000..8ba8b5be5567
--- /dev/null
+++ b/include/linux/sysfb.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_SYSFB_H
+#define _LINUX_SYSFB_H
+
+/*
+ * Generic System Framebuffers on x86
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/screen_info.h>
+
+enum {
+ M_I17, /* 17-Inch iMac */
+ M_I20, /* 20-Inch iMac */
+ M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
+ M_I24, /* 24-Inch iMac */
+ M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
+ M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
+ M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
+ M_MINI, /* Mac Mini */
+ M_MINI_3_1, /* Mac Mini, 3,1th gen */
+ M_MINI_4_1, /* Mac Mini, 4,1th gen */
+ M_MB, /* MacBook */
+ M_MB_2, /* MacBook, 2nd rev. */
+ M_MB_3, /* MacBook, 3rd rev. */
+ M_MB_5_1, /* MacBook, 5th rev. */
+ M_MB_6_1, /* MacBook, 6th rev. */
+ M_MB_7_1, /* MacBook, 7th rev. */
+ M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
+ M_MBA, /* MacBook Air */
+ M_MBA_3, /* Macbook Air, 3rd rev */
+ M_MBP, /* MacBook Pro */
+ M_MBP_2, /* MacBook Pro 2nd gen */
+ M_MBP_2_2, /* MacBook Pro 2,2nd gen */
+ M_MBP_SR, /* MacBook Pro (Santa Rosa) */
+ M_MBP_4, /* MacBook Pro, 4th gen */
+ M_MBP_5_1, /* MacBook Pro, 5,1th gen */
+ M_MBP_5_2, /* MacBook Pro, 5,2th gen */
+ M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
+ M_MBP_6_1, /* MacBook Pro, 6,1th gen */
+ M_MBP_6_2, /* MacBook Pro, 6,2th gen */
+ M_MBP_7_1, /* MacBook Pro, 7,1th gen */
+ M_MBP_8_2, /* MacBook Pro, 8,2nd gen */
+ M_UNKNOWN /* placeholder */
+};
+
+struct efifb_dmi_info {
+ char *optname;
+ unsigned long base;
+ int stride;
+ int width;
+ int height;
+ int flags;
+};
+
+#ifdef CONFIG_SYSFB
+
+void sysfb_disable(void);
+
+#else /* CONFIG_SYSFB */
+
+static inline void sysfb_disable(void)
+{
+}
+
+#endif /* CONFIG_SYSFB */
+
+#ifdef CONFIG_EFI
+
+extern struct efifb_dmi_info efifb_dmi_list[];
+void sysfb_apply_efi_quirks(struct platform_device *pd);
+
+#else /* CONFIG_EFI */
+
+static inline void sysfb_apply_efi_quirks(struct platform_device *pd)
+{
+}
+
+#endif /* CONFIG_EFI */
+
+#ifdef CONFIG_SYSFB_SIMPLEFB
+
+bool sysfb_parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode);
+struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode);
+
+#else /* CONFIG_SYSFB_SIMPLE */
+
+static inline bool sysfb_parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode)
+{
+ return false;
+}
+
+static inline struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif /* CONFIG_SYSFB_SIMPLE */
+
+#endif /* _LINUX_SYSFB_H */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index fa7ee503fb76..fd3fe5c8c17f 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -7,7 +7,7 @@
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007 Tejun Heo <teheo@suse.de>
*
- * Please see Documentation/filesystems/sysfs.txt for more information.
+ * Please see Documentation/filesystems/sysfs.rst for more information.
*/
#ifndef _SYSFS_H_
@@ -123,6 +123,13 @@ struct attribute_group {
.show = _name##_show, \
}
+#define __ATTR_RW_MODE(_name, _mode) { \
+ .attr = { .name = __stringify(_name), \
+ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
#define __ATTR_WO(_name) { \
.attr = { .name = __stringify(_name), .mode = 0200 }, \
.store = _name##_store, \
@@ -155,13 +162,21 @@ static const struct attribute_group _name##_group = { \
}; \
__ATTRIBUTE_GROUPS(_name)
+#define BIN_ATTRIBUTE_GROUPS(_name) \
+static const struct attribute_group _name##_group = { \
+ .bin_attrs = _name##_attrs, \
+}; \
+__ATTRIBUTE_GROUPS(_name)
+
struct file;
struct vm_area_struct;
+struct address_space;
struct bin_attribute {
struct attribute attr;
size_t size;
void *private;
+ struct address_space *(*f_mapping)(void);
ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
@@ -220,6 +235,22 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size)
#define BIN_ATTR_RW(_name, _size) \
struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
+
+#define __BIN_ATTR_ADMIN_RO(_name, _size) { \
+ .attr = { .name = __stringify(_name), .mode = 0400 }, \
+ .read = _name##_read, \
+ .size = _size, \
+}
+
+#define __BIN_ATTR_ADMIN_RW(_name, _size) \
+ __BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size)
+
+#define BIN_ATTR_ADMIN_RO(_name, _size) \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size)
+
+#define BIN_ATTR_ADMIN_RW(_name, _size) \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size)
+
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *, char *);
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
@@ -297,9 +328,10 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
struct kobject *target, const char *link_name);
void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
const char *link_name);
-int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
- struct kobject *target_kobj,
- const char *target_name);
+int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name,
+ const char *symlink_name);
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
@@ -310,6 +342,22 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn)
return kernfs_enable_ns(kn);
}
+int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid,
+ kgid_t kgid);
+int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid);
+int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ,
+ const char *name, kuid_t kuid, kgid_t kgid);
+int sysfs_groups_change_owner(struct kobject *kobj,
+ const struct attribute_group **groups,
+ kuid_t kuid, kgid_t kgid);
+int sysfs_group_change_owner(struct kobject *kobj,
+ const struct attribute_group *groups, kuid_t kuid,
+ kgid_t kgid);
+__printf(2, 3)
+int sysfs_emit(char *buf, const char *fmt, ...);
+__printf(3, 4)
+int sysfs_emit_at(char *buf, int at, const char *fmt, ...);
+
#else /* CONFIG_SYSFS */
static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
@@ -500,10 +548,10 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj,
{
}
-static inline int __compat_only_sysfs_link_entry_to_kobj(
- struct kobject *kobj,
- struct kobject *target_kobj,
- const char *target_name)
+static inline int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name,
+ const char *symlink_name)
{
return 0;
}
@@ -522,6 +570,51 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn)
{
}
+static inline int sysfs_file_change_owner(struct kobject *kobj,
+ const char *name, kuid_t kuid,
+ kgid_t kgid)
+{
+ return 0;
+}
+
+static inline int sysfs_link_change_owner(struct kobject *kobj,
+ struct kobject *targ,
+ const char *name, kuid_t kuid,
+ kgid_t kgid)
+{
+ return 0;
+}
+
+static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid)
+{
+ return 0;
+}
+
+static inline int sysfs_groups_change_owner(struct kobject *kobj,
+ const struct attribute_group **groups,
+ kuid_t kuid, kgid_t kgid)
+{
+ return 0;
+}
+
+static inline int sysfs_group_change_owner(struct kobject *kobj,
+ const struct attribute_group *groups,
+ kuid_t kuid, kgid_t kgid)
+{
+ return 0;
+}
+
+__printf(2, 3)
+static inline int sysfs_emit(char *buf, const char *fmt, ...)
+{
+ return 0;
+}
+
+__printf(3, 4)
+static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
+{
+ return 0;
+}
#endif /* CONFIG_SYSFS */
static inline int __must_check sysfs_create_file(struct kobject *kobj,
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 86af908e2663..955f80e34d4f 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -8,6 +8,8 @@
#ifndef _LINUX_SYSLOG_H
#define _LINUX_SYSLOG_H
+#include <linux/wait.h>
+
/* Close the log. Currently a NOP. */
#define SYSLOG_ACTION_CLOSE 0
/* Open the log. Currently a NOP. */
@@ -35,5 +37,6 @@
#define SYSLOG_FROM_PROC 1
int do_syslog(int type, char __user *buf, int count, int source);
+extern wait_queue_head_t log_wait;
#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 8c71874e8485..3a582ec7a2f1 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -30,10 +30,10 @@
#define SYSRQ_ENABLE_RTNICE 0x0100
struct sysrq_key_op {
- void (*handler)(int);
- char *help_msg;
- char *action_msg;
- int enable_mask;
+ void (* const handler)(int);
+ const char * const help_msg;
+ const char * const action_msg;
+ const int enable_mask;
};
#ifdef CONFIG_MAGIC_SYSRQ
@@ -45,11 +45,12 @@ struct sysrq_key_op {
void handle_sysrq(int key);
void __handle_sysrq(int key, bool check_mask);
-int register_sysrq_key(int key, struct sysrq_key_op *op);
-int unregister_sysrq_key(int key, struct sysrq_key_op *op);
-struct sysrq_key_op *__sysrq_get_key_op(int key);
+int register_sysrq_key(int key, const struct sysrq_key_op *op);
+int unregister_sysrq_key(int key, const struct sysrq_key_op *op);
+extern const struct sysrq_key_op *__sysrq_reboot_op;
int sysrq_toggle_support(int enable_mask);
+int sysrq_mask(void);
#else
@@ -61,16 +62,22 @@ static inline void __handle_sysrq(int key, bool check_mask)
{
}
-static inline int register_sysrq_key(int key, struct sysrq_key_op *op)
+static inline int register_sysrq_key(int key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
-static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op)
+static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
+static inline int sysrq_mask(void)
+{
+ /* Magic SysRq disabled mask */
+ return 0;
+}
+
#endif
#endif /* _LINUX_SYSRQ_H */
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 96305a64a5a7..248f4ac95642 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -3,7 +3,7 @@
#define _LINUX_T10_PI_H
#include <linux/types.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
/*
* A T10 PI-capable target device can be formatted with different
@@ -53,4 +53,33 @@ extern const struct blk_integrity_profile t10_pi_type1_ip;
extern const struct blk_integrity_profile t10_pi_type3_crc;
extern const struct blk_integrity_profile t10_pi_type3_ip;
+struct crc64_pi_tuple {
+ __be64 guard_tag;
+ __be16 app_tag;
+ __u8 ref_tag[6];
+};
+
+/**
+ * lower_48_bits() - return bits 0-47 of a number
+ * @n: the number we're accessing
+ */
+static inline u64 lower_48_bits(u64 n)
+{
+ return n & ((1ull << 48) - 1);
+}
+
+static inline u64 ext_pi_ref_tag(struct request *rq)
+{
+ unsigned int shift = ilog2(queue_logical_block_size(rq->q));
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (rq->q->integrity.interval_exp)
+ shift = rq->q->integrity.interval_exp;
+#endif
+ return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT));
+}
+
+extern const struct blk_integrity_profile ext_pi_type1_crc64;
+extern const struct blk_integrity_profile ext_pi_type3_crc64;
+
#endif
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index bd9a6a91c097..795ef5a68429 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -13,7 +13,23 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
twork->func = func;
}
-int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
+enum task_work_notify_mode {
+ TWA_NONE,
+ TWA_RESUME,
+ TWA_SIGNAL,
+ TWA_SIGNAL_NO_IPI,
+};
+
+static inline bool task_work_pending(struct task_struct *task)
+{
+ return READ_ONCE(task->task_works);
+}
+
+int task_work_add(struct task_struct *task, struct callback_head *twork,
+ enum task_work_notify_mode mode);
+
+struct callback_head *task_work_cancel_match(struct task_struct *task,
+ bool (*match)(struct callback_head *, void *data), void *data);
struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
void task_work_run(void);
diff --git a/include/linux/tboot.h b/include/linux/tboot.h
index 5424bc6feac8..d2279160ef39 100644
--- a/include/linux/tboot.h
+++ b/include/linux/tboot.h
@@ -44,7 +44,7 @@ struct tboot_acpi_generic_address {
/*
* combines Sx info from FADT and FACS tables per ACPI 2.0+ spec
- * (http://www.acpi.info/)
+ * (https://uefi.org/specifications)
*/
struct tboot_acpi_sleep_info {
struct tboot_acpi_generic_address pm1a_cnt_blk;
@@ -121,18 +121,11 @@ struct tboot {
#define TBOOT_UUID {0xff, 0x8d, 0x3c, 0x66, 0xb3, 0xe8, 0x82, 0x4b, 0xbf,\
0xaa, 0x19, 0xea, 0x4d, 0x5, 0x7a, 0x8}
-extern struct tboot *tboot;
-
-static inline int tboot_enabled(void)
-{
- return tboot != NULL;
-}
-
+bool tboot_enabled(void);
extern void tboot_probe(void);
extern void tboot_shutdown(u32 shutdown_type);
extern struct acpi_table_header *tboot_get_dmar_table(
struct acpi_table_header *dmar_tbl);
-extern int tboot_force_iommu(void);
#else
@@ -142,7 +135,6 @@ extern int tboot_force_iommu(void);
#define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \
do { } while (0)
#define tboot_get_dmar_table(dmar_tbl) (dmar_tbl)
-#define tboot_force_iommu() 0
#endif /* !CONFIG_INTEL_TXT */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3dc964010fef..41b1da621a45 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -46,6 +46,36 @@ static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
return inner_tcp_hdr(skb)->doff * 4;
}
+/**
+ * skb_tcp_all_headers - Returns size of all headers for a TCP packet
+ * @skb: buffer
+ *
+ * Used in TX path, for a packet known to be a TCP one.
+ *
+ * if (skb_is_gso(skb)) {
+ * int hlen = skb_tcp_all_headers(skb);
+ * ...
+ */
+static inline int skb_tcp_all_headers(const struct sk_buff *skb)
+{
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
+}
+
+/**
+ * skb_inner_tcp_all_headers - Returns size of all headers for an encap TCP packet
+ * @skb: buffer
+ *
+ * Used in TX path, for a packet known to be a TCP one.
+ *
+ * if (skb_is_gso(skb) && skb->encapsulation) {
+ * int hlen = skb_inner_tcp_all_headers(skb);
+ * ...
+ */
+static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb)
+{
+ return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+}
+
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
{
return (tcp_hdr(skb)->doff - 5) * 4;
@@ -78,27 +108,6 @@ struct tcp_sack_block {
#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
-#if IS_ENABLED(CONFIG_MPTCP)
-struct mptcp_options_received {
- u64 sndr_key;
- u64 rcvr_key;
- u64 data_ack;
- u64 data_seq;
- u32 subflow_seq;
- u16 data_len;
- u8 mp_capable : 1,
- mp_join : 1,
- dss : 1;
- u8 use_map:1,
- dsn64:1,
- data_fin:1,
- use_ack:1,
- ack64:1,
- mpc_map:1,
- __unused:2;
-};
-#endif
-
struct tcp_options_received {
/* PAWS/RTTM data */
int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
@@ -113,12 +122,11 @@ struct tcp_options_received {
smc_ok : 1, /* SMC seen on SYN packet */
snd_wscale : 4, /* Window scaling received from sender */
rcv_wscale : 4; /* Window scaling to send to receiver */
+ u8 saw_unknown:1, /* Received unknown option */
+ unused:7;
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
-#if IS_ENABLED(CONFIG_MPTCP)
- struct mptcp_options_received mptcp;
-#endif
};
static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@@ -128,11 +136,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
#if IS_ENABLED(CONFIG_SMC)
rx_opt->smc_ok = 0;
#endif
-#if IS_ENABLED(CONFIG_MPTCP)
- rx_opt->mptcp.mp_capable = 0;
- rx_opt->mptcp.mp_join = 0;
- rx_opt->mptcp.dss = 0;
-#endif
}
/* This is the max number of SACKS that we'll generate and process. It's safe
@@ -149,6 +152,9 @@ struct tcp_request_sock {
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
bool is_mptcp;
+#if IS_ENABLED(CONFIG_MPTCP)
+ bool drop_req;
+#endif
u32 txhash;
u32 rcv_isn;
u32 snt_isn;
@@ -158,6 +164,7 @@ struct tcp_request_sock {
* FastOpen it's the seq#
* after data-in-SYN.
*/
+ u8 syn_tos;
};
static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -246,6 +253,9 @@ struct tcp_sock {
} rack;
u16 advmss; /* Advertised MSS */
u8 compressed_ack;
+ u8 dup_ack_counter:2,
+ tlp_retrans:1, /* TLP is a retransmission */
+ unused:5;
u32 chrono_start; /* Start time in jiffies of a TCP chrono */
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u8 chrono_type:2, /* current chronograph type */
@@ -260,15 +270,14 @@ struct tcp_sock {
repair : 1,
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
- u8 syn_data:1, /* SYN includes data */
+ u8 save_syn:2, /* Save headers of SYN packet */
+ syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
- save_syn:1, /* Save headers of SYN packet */
- is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
- syn_smc:1; /* SYN includes SMC */
- u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
+ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
+ u32 tlp_high_seq; /* snd_nxt at the time of TLP */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
@@ -286,7 +295,7 @@ struct tcp_sock {
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
u32 max_packets_out; /* max packets_out in last window */
- u32 max_packets_seq; /* right edge of max_packets_out flight */
+ u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
u16 urg_data; /* Saved octet of OOB data and control flags */
u8 ecn_flags; /* ECN status bits. */
@@ -379,6 +388,12 @@ struct tcp_sock {
u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs
* values defined in uapi/linux/tcp.h
*/
+ u8 bpf_chg_cc_inprogress:1; /* In the middle of
+ * bpf_setsockopt(TCP_CONGESTION),
+ * it is to avoid the bpf_tcp_cc->init()
+ * to recur itself by calling
+ * bpf_setsockopt(TCP_CONGESTION, "itself").
+ */
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG)
#else
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
@@ -414,6 +429,10 @@ struct tcp_sock {
#if IS_ENABLED(CONFIG_MPTCP)
bool is_mptcp;
#endif
+#if IS_ENABLED(CONFIG_SMC)
+ bool (*smc_hs_congested)(const struct sock *sk);
+ bool syn_smc; /* SYN includes SMC */
+#endif
#ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */
@@ -429,7 +448,7 @@ struct tcp_sock {
* socket. Used to retransmit SYNACKs etc.
*/
struct request_sock __rcu *fastopen_rsk;
- u32 *saved_syn;
+ struct saved_syn *saved_syn;
};
enum tsq_enum {
@@ -507,7 +526,15 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
tp->saved_syn = NULL;
}
-struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
+static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn)
+{
+ return saved_syn->mac_hdrlen + saved_syn->network_hdrlen +
+ saved_syn->tcp_hdrlen;
+}
+
+struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
+ const struct sk_buff *orig_skb,
+ const struct sk_buff *ack_skb);
static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
{
@@ -522,4 +549,16 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
int shiftlen);
+void __tcp_sock_set_cork(struct sock *sk, bool on);
+void tcp_sock_set_cork(struct sock *sk, bool on);
+int tcp_sock_set_keepcnt(struct sock *sk, int val);
+int tcp_sock_set_keepidle_locked(struct sock *sk, int val);
+int tcp_sock_set_keepidle(struct sock *sk, int val);
+int tcp_sock_set_keepintvl(struct sock *sk, int val);
+void __tcp_sock_set_nodelay(struct sock *sk, bool on);
+void tcp_sock_set_nodelay(struct sock *sk);
+void tcp_sock_set_quickack(struct sock *sk, int val);
+int tcp_sock_set_syncnt(struct sock *sk, int val);
+void tcp_sock_set_user_timeout(struct sock *sk, u32 val);
+
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 7a03f68fb982..17eb1c5205d3 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2016, Linaro Limited
+ * Copyright (c) 2015-2022 Linaro Limited
*/
#ifndef __TEE_DRV_H
@@ -20,12 +20,11 @@
* specific TEE driver.
*/
-#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */
-#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */
-#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */
-#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
-#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
-#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
+#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */
+ /* in secure world */
+#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
+#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
+#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
struct device;
struct tee_device;
@@ -46,14 +45,16 @@ struct tee_shm_pool;
* and just return with an error code. It is needed for requests
* that arises from TEE based kernel drivers that should be
* non-blocking in nature.
+ * @cap_memref_null: flag indicating if the TEE Client support shared
+ * memory buffer with a NULL pointer.
*/
struct tee_context {
struct tee_device *teedev;
- struct list_head list_shm;
void *data;
struct kref refcount;
bool releasing;
bool supp_nowait;
+ bool cap_memref_null;
};
struct tee_param_memref {
@@ -85,7 +86,7 @@ struct tee_param {
* @close_session: close a session
* @invoke_func: invoke a trusted function
* @cancel_req: request cancel of an ongoing invoke or open
- * @supp_revc: called for supplicant to get a command
+ * @supp_recv: called for supplicant to get a command
* @supp_send: called for supplicant to send a response
* @shm_register: register shared memory buffer in TEE
* @shm_unregister: unregister shared memory buffer in TEE
@@ -167,125 +168,89 @@ int tee_device_register(struct tee_device *teedev);
void tee_device_unregister(struct tee_device *teedev);
/**
+ * tee_session_calc_client_uuid() - Calculates client UUID for session
+ * @uuid: Resulting UUID
+ * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
+ * @connectuon_data: Connection data for opening session
+ *
+ * Based on connection method calculates UUIDv5 based client UUID.
+ *
+ * For group based logins verifies that calling process has specified
+ * credentials.
+ *
+ * @return < 0 on failure
+ */
+int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ const u8 connection_data[TEE_IOCTL_UUID_LEN]);
+
+/**
* struct tee_shm - shared memory object
- * @teedev: device used to allocate the object
- * @ctx: context using the object, if NULL the context is gone
- * @link link element
+ * @ctx: context using the object
* @paddr: physical address of the shared memory
* @kaddr: virtual address of the shared memory
* @size: size of shared memory
* @offset: offset of buffer in user space
* @pages: locked pages from userspace
* @num_pages: number of locked pages
- * @dmabuf: dmabuf used to for exporting to user space
+ * @refcount: reference counter
* @flags: defined by TEE_SHM_* in tee_drv.h
- * @id: unique id of a shared memory object on this device
+ * @id: unique id of a shared memory object on this device, shared
+ * with user space
+ * @sec_world_id:
+ * secure world assigned id of this shared memory object, not
+ * used by all drivers
*
* This pool is only supposed to be accessed directly from the TEE
* subsystem and from drivers that implements their own shm pool manager.
*/
struct tee_shm {
- struct tee_device *teedev;
struct tee_context *ctx;
- struct list_head link;
phys_addr_t paddr;
void *kaddr;
size_t size;
unsigned int offset;
struct page **pages;
size_t num_pages;
- struct dma_buf *dmabuf;
+ refcount_t refcount;
u32 flags;
int id;
+ u64 sec_world_id;
};
/**
- * struct tee_shm_pool_mgr - shared memory manager
+ * struct tee_shm_pool - shared memory pool
* @ops: operations
* @private_data: private data for the shared memory manager
*/
-struct tee_shm_pool_mgr {
- const struct tee_shm_pool_mgr_ops *ops;
+struct tee_shm_pool {
+ const struct tee_shm_pool_ops *ops;
void *private_data;
};
/**
- * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
+ * struct tee_shm_pool_ops - shared memory pool operations
* @alloc: called when allocating shared memory
* @free: called when freeing shared memory
- * @destroy_poolmgr: called when destroying the pool manager
+ * @destroy_pool: called when destroying the pool
*/
-struct tee_shm_pool_mgr_ops {
- int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
- size_t size);
- void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
- void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr);
+struct tee_shm_pool_ops {
+ int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align);
+ void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm);
+ void (*destroy_pool)(struct tee_shm_pool *pool);
};
-/**
- * tee_shm_pool_alloc() - Create a shared memory pool from shm managers
- * @priv_mgr: manager for driver private shared memory allocations
- * @dmabuf_mgr: manager for dma-buf shared memory allocations
- *
- * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
- * in @dmabuf, others will use the range provided by @priv.
- *
- * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
- */
-struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
- struct tee_shm_pool_mgr *dmabuf_mgr);
-
/*
- * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved
- * memory
+ * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory
* @vaddr: Virtual address of start of pool
* @paddr: Physical address of start of pool
* @size: Size in bytes of the pool
*
- * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure.
- */
-struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
- phys_addr_t paddr,
- size_t size,
- int min_alloc_order);
-
-/**
- * tee_shm_pool_mgr_destroy() - Free a shared memory manager
- */
-static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm)
-{
- poolm->ops->destroy_poolmgr(poolm);
-}
-
-/**
- * struct tee_shm_pool_mem_info - holds information needed to create a shared
- * memory pool
- * @vaddr: Virtual address of start of pool
- * @paddr: Physical address of start of pool
- * @size: Size in bytes of the pool
- */
-struct tee_shm_pool_mem_info {
- unsigned long vaddr;
- phys_addr_t paddr;
- size_t size;
-};
-
-/**
- * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
- * memory range
- * @priv_info: Information for driver private shared memory pool
- * @dmabuf_info: Information for dma-buf shared memory pool
- *
- * Start and end of pools will must be page aligned.
- *
- * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
- * in @dmabuf, others will use the range provided by @priv.
- *
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
*/
-struct tee_shm_pool *
-tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
- struct tee_shm_pool_mem_info *dmabuf_info);
+struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
+ phys_addr_t paddr, size_t size,
+ int min_alloc_order);
/**
* tee_shm_pool_free() - Free a shared memory pool
@@ -294,7 +259,10 @@ tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
* The must be no remaining shared memory allocated from this pool when
* this function is called.
*/
-void tee_shm_pool_free(struct tee_shm_pool *pool);
+static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
+{
+ pool->ops->destroy_pool(pool);
+}
/**
* tee_get_drvdata() - Return driver_data pointer
@@ -302,54 +270,20 @@ void tee_shm_pool_free(struct tee_shm_pool *pool);
*/
void *tee_get_drvdata(struct tee_device *teedev);
-/**
- * tee_shm_alloc() - Allocate shared memory
- * @ctx: Context that allocates the shared memory
- * @size: Requested size of shared memory
- * @flags: Flags setting properties for the requested shared memory.
- *
- * Memory allocated as global shared memory is automatically freed when the
- * TEE file pointer is closed. The @flags field uses the bits defined by
- * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
- * TEE_SHM_DMA_BUF global shared memory will be allocated and associated
- * with a dma-buf handle, else driver private memory.
- *
- * @returns a pointer to 'struct tee_shm'
- */
-struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
-
-/**
- * tee_shm_priv_alloc() - Allocate shared memory privately
- * @dev: Device that allocates the shared memory
- * @size: Requested size of shared memory
- *
- * Allocates shared memory buffer that is not associated with any client
- * context. Such buffers are owned by TEE driver and used for internal calls.
- *
- * @returns a pointer to 'struct tee_shm'
- */
-struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
+struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
-/**
- * tee_shm_register() - Register shared memory buffer
- * @ctx: Context that registers the shared memory
- * @addr: Address is userspace of the shared buffer
- * @length: Length of the shared buffer
- * @flags: Flags setting properties for the requested shared memory.
- *
- * @returns a pointer to 'struct tee_shm'
- */
-struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
- size_t length, u32 flags);
+struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
+ void *addr, size_t length);
/**
- * tee_shm_is_registered() - Check if shared memory object in registered in TEE
+ * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind
* @shm: Shared memory handle
- * @returns true if object is registered in TEE
+ * @returns true if object is dynamic shared memory
*/
-static inline bool tee_shm_is_registered(struct tee_shm *shm)
+static inline bool tee_shm_is_dynamic(struct tee_shm *shm)
{
- return shm && (shm->flags & TEE_SHM_REGISTER);
+ return shm && (shm->flags & TEE_SHM_DYNAMIC);
}
/**
@@ -365,24 +299,6 @@ void tee_shm_free(struct tee_shm *shm);
void tee_shm_put(struct tee_shm *shm);
/**
- * tee_shm_va2pa() - Get physical address of a virtual address
- * @shm: Shared memory handle
- * @va: Virtual address to tranlsate
- * @pa: Returned physical address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
-
-/**
- * tee_shm_pa2va() - Get virtual address of a physical address
- * @shm: Shared memory handle
- * @pa: Physical address to tranlsate
- * @va: Returned virtual address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
-
-/**
* tee_shm_get_va() - Get virtual address of a shared memory plus an offset
* @shm: Shared memory handle
* @offs: Offset from start of this shared memory
@@ -577,4 +493,18 @@ struct tee_client_driver {
#define to_tee_client_driver(d) \
container_of(d, struct tee_client_driver, driver)
+/**
+ * teedev_open() - Open a struct tee_device
+ * @teedev: Device to open
+ *
+ * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
+ */
+struct tee_context *teedev_open(struct tee_device *teedev);
+
+/**
+ * teedev_close_context() - closes a struct tee_context
+ * @ctx: The struct tee_context to close
+ */
+void teedev_close_context(struct tee_context *ctx);
+
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/termios_internal.h b/include/linux/termios_internal.h
new file mode 100644
index 000000000000..d77f29e5e2b7
--- /dev/null
+++ b/include/linux/termios_internal.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TERMIOS_CONV_H
+#define _LINUX_TERMIOS_CONV_H
+
+#include <linux/uaccess.h>
+#include <asm/termios.h>
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^O werase=^W lnext=^V
+ eol2=\0
+*/
+
+#ifdef VDSUSP
+#define INIT_C_CC_VDSUSP_EXTRA [VDSUSP] = 'Y'-0x40,
+#else
+#define INIT_C_CC_VDSUSP_EXTRA
+#endif
+
+#define INIT_C_CC { \
+ [VINTR] = 'C'-0x40, \
+ [VQUIT] = '\\'-0x40, \
+ [VERASE] = '\177', \
+ [VKILL] = 'U'-0x40, \
+ [VEOF] = 'D'-0x40, \
+ [VSTART] = 'Q'-0x40, \
+ [VSTOP] = 'S'-0x40, \
+ [VSUSP] = 'Z'-0x40, \
+ [VREPRINT] = 'R'-0x40, \
+ [VDISCARD] = 'O'-0x40, \
+ [VWERASE] = 'W'-0x40, \
+ [VLNEXT] = 'V'-0x40, \
+ INIT_C_CC_VDSUSP_EXTRA \
+ [VMIN] = 1 }
+
+int user_termio_to_kernel_termios(struct ktermios *, struct termio __user *);
+int kernel_termios_to_user_termio(struct termio __user *, struct ktermios *);
+#ifdef TCGETS2
+int user_termios_to_kernel_termios(struct ktermios *, struct termios2 __user *);
+int kernel_termios_to_user_termios(struct termios2 __user *, struct ktermios *);
+int user_termios_to_kernel_termios_1(struct ktermios *, struct termios __user *);
+int kernel_termios_to_user_termios_1(struct termios __user *, struct ktermios *);
+#else /* TCGETS2 */
+int user_termios_to_kernel_termios(struct ktermios *, struct termios __user *);
+int kernel_termios_to_user_termios(struct termios __user *, struct ktermios *);
+#endif /* TCGETS2 */
+
+#endif /* _LINUX_TERMIOS_CONV_H */
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 13770cfe33ad..6673e4d4ac2e 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -23,7 +23,7 @@ struct ts_config;
struct ts_state
{
unsigned int offset;
- char cb[40];
+ char cb[48];
};
/**
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 126913c6a53b..9ecc128944a1 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -17,9 +17,6 @@
#include <linux/workqueue.h>
#include <uapi/linux/thermal.h>
-#define THERMAL_TRIPS_NONE -1
-#define THERMAL_MAX_TRIPS 12
-
/* invalid cooling state */
#define THERMAL_CSTATE_INVALID -1UL
@@ -32,39 +29,15 @@
/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
#define THERMAL_TEMP_INVALID -274000
-/* Default Thermal Governor */
-#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
-#define DEFAULT_THERMAL_GOVERNOR "step_wise"
-#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE)
-#define DEFAULT_THERMAL_GOVERNOR "fair_share"
-#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE)
-#define DEFAULT_THERMAL_GOVERNOR "user_space"
-#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR)
-#define DEFAULT_THERMAL_GOVERNOR "power_allocator"
-#endif
-
struct thermal_zone_device;
struct thermal_cooling_device;
struct thermal_instance;
-
-enum thermal_device_mode {
- THERMAL_DEVICE_DISABLED = 0,
- THERMAL_DEVICE_ENABLED,
-};
-
-enum thermal_trip_type {
- THERMAL_TRIP_ACTIVE = 0,
- THERMAL_TRIP_PASSIVE,
- THERMAL_TRIP_HOT,
- THERMAL_TRIP_CRITICAL,
-};
+struct thermal_attr;
enum thermal_trend {
THERMAL_TREND_STABLE, /* temperature is stable */
THERMAL_TREND_RAISING, /* temperature is raising */
THERMAL_TREND_DROPPING, /* temperature is dropping */
- THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */
- THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
};
/* Thermal notification reason */
@@ -77,6 +50,7 @@ enum thermal_notify_event {
THERMAL_DEVICE_UP, /* Thermal device is up after a down event */
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */
THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */
+ THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */
};
struct thermal_zone_device_ops {
@@ -86,9 +60,7 @@ struct thermal_zone_device_ops {
struct thermal_cooling_device *);
int (*get_temp) (struct thermal_zone_device *, int *);
int (*set_trips) (struct thermal_zone_device *, int, int);
- int (*get_mode) (struct thermal_zone_device *,
- enum thermal_device_mode *);
- int (*set_mode) (struct thermal_zone_device *,
+ int (*change_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
int (*get_trip_type) (struct thermal_zone_device *, int,
enum thermal_trip_type *);
@@ -100,25 +72,34 @@ struct thermal_zone_device_ops {
int (*set_emul_temp) (struct thermal_zone_device *, int);
int (*get_trend) (struct thermal_zone_device *, int,
enum thermal_trend *);
- int (*notify) (struct thermal_zone_device *, int,
- enum thermal_trip_type);
+ void (*hot)(struct thermal_zone_device *);
+ void (*critical)(struct thermal_zone_device *);
+};
+
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ */
+struct thermal_trip {
+ int temperature;
+ int hysteresis;
+ enum thermal_trip_type type;
};
struct thermal_cooling_device_ops {
int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
- int (*get_requested_power)(struct thermal_cooling_device *,
- struct thermal_zone_device *, u32 *);
- int (*state2power)(struct thermal_cooling_device *,
- struct thermal_zone_device *, unsigned long, u32 *);
- int (*power2state)(struct thermal_cooling_device *,
- struct thermal_zone_device *, u32, unsigned long *);
+ int (*get_requested_power)(struct thermal_cooling_device *, u32 *);
+ int (*state2power)(struct thermal_cooling_device *, unsigned long, u32 *);
+ int (*power2state)(struct thermal_cooling_device *, u32, unsigned long *);
};
struct thermal_cooling_device {
int id;
- char type[THERMAL_NAME_LENGTH];
+ char *type;
struct device device;
struct device_node *np;
void *devdata;
@@ -130,11 +111,6 @@ struct thermal_cooling_device {
struct list_head node;
};
-struct thermal_attr {
- struct device_attribute attr;
- char name[THERMAL_NAME_LENGTH];
-};
-
/**
* struct thermal_zone_device - structure for a thermal zone
* @id: unique id number for each thermal zone
@@ -143,12 +119,14 @@ struct thermal_attr {
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
+ * @mode: current mode of this thermal zone
* @devdata: private pointer for device private data
- * @trips: number of trip points the thermal zone supports
+ * @trips: an array of struct thermal_trip
+ * @num_trips: number of trip points the thermal zone supports
* @trips_disabled; bitmap for disabled trips
- * @passive_delay: number of milliseconds to wait between polls when
+ * @passive_delay_jiffies: number of jiffies to wait between polls when
* performing passive cooling.
- * @polling_delay: number of milliseconds to wait between polls when
+ * @polling_delay_jiffies: number of jiffies to wait between polls when
* checking whether trip points have been crossed (0 for
* interrupt driven systems)
* @temperature: current temperature. This is only for core code,
@@ -161,9 +139,6 @@ struct thermal_attr {
trip point.
* @prev_high_trip: the above current temperature if you've crossed a
passive trip point.
- * @forced_passive: If > 0, temperature at which to switch on all ACPI
- * processor cooling devices. Currently only used by the
- * step-wise governor.
* @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
* @ops: operations this &thermal_zone_device supports
* @tzp: thermal zone parameters
@@ -185,18 +160,19 @@ struct thermal_zone_device {
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
struct thermal_attr *trip_hyst_attrs;
+ enum thermal_device_mode mode;
void *devdata;
- int trips;
+ struct thermal_trip *trips;
+ int num_trips;
unsigned long trips_disabled; /* bitmap for disabled trips */
- int passive_delay;
- int polling_delay;
+ unsigned long passive_delay_jiffies;
+ unsigned long polling_delay_jiffies;
int temperature;
int last_temperature;
int emul_temperature;
int passive;
int prev_low_trip;
int prev_high_trip;
- unsigned int forced_passive;
atomic_t need_update;
struct thermal_zone_device_ops *ops;
struct thermal_zone_params *tzp;
@@ -318,109 +294,57 @@ struct thermal_zone_params {
int offset;
};
-struct thermal_genl_event {
- u32 orig;
- enum events event;
-};
+/* Function declarations */
+#ifdef CONFIG_THERMAL_OF
+struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, int id, void *data,
+ const struct thermal_zone_device_ops *ops);
-/**
- * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones
- *
- * Mandatory:
- * @get_temp: a pointer to a function that reads the sensor temperature.
- *
- * Optional:
- * @get_trend: a pointer to a function that reads the sensor temperature trend.
- * @set_trips: a pointer to a function that sets a temperature window. When
- * this window is left the driver must inform the thermal core via
- * thermal_zone_device_update.
- * @set_emul_temp: a pointer to a function that sets sensor emulated
- * temperature.
- * @set_trip_temp: a pointer to a function that sets the trip temperature on
- * hardware.
- */
-struct thermal_zone_of_device_ops {
- int (*get_temp)(void *, int *);
- int (*get_trend)(void *, int, enum thermal_trend *);
- int (*set_trips)(void *, int, int);
- int (*set_emul_temp)(void *, int);
- int (*set_trip_temp)(void *, int, int);
-};
+struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_device_ops *ops);
-/**
- * struct thermal_trip - representation of a point in temperature domain
- * @np: pointer to struct device_node that this trip point was created from
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- */
+void thermal_of_zone_unregister(struct thermal_zone_device *tz);
-struct thermal_trip {
- struct device_node *np;
- int temperature;
- int hysteresis;
- enum thermal_trip_type type;
-};
+void devm_thermal_of_zone_unregister(struct device *dev, struct thermal_zone_device *tz);
+
+void thermal_of_zone_unregister(struct thermal_zone_device *tz);
-/* Function declarations */
-#ifdef CONFIG_THERMAL_OF
-struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops);
-void thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz);
-struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
- struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops);
-void devm_thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz);
#else
-static inline struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops)
+static inline
+struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, int id, void *data,
+ const struct thermal_zone_device_ops *ops)
{
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-ENOTSUPP);
}
static inline
-void thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz)
+struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_device_ops *ops)
{
+ return ERR_PTR(-ENOTSUPP);
}
-static inline struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
- struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops)
+static inline void thermal_of_zone_unregister(struct thermal_zone_device *tz)
{
- return ERR_PTR(-ENODEV);
}
-static inline
-void devm_thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz)
+static inline void devm_thermal_of_zone_unregister(struct device *dev,
+ struct thermal_zone_device *tz)
{
}
-
#endif
-#if IS_ENABLED(CONFIG_THERMAL)
-static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
-{
- return cdev->ops->get_requested_power && cdev->ops->state2power &&
- cdev->ops->power2state;
-}
-
-int power_actor_get_max_power(struct thermal_cooling_device *,
- struct thermal_zone_device *tz, u32 *max_power);
-int power_actor_get_min_power(struct thermal_cooling_device *,
- struct thermal_zone_device *tz, u32 *min_power);
-int power_actor_set_power(struct thermal_cooling_device *,
- struct thermal_instance *, u32);
+#ifdef CONFIG_THERMAL
struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
void *, struct thermal_zone_device_ops *,
struct thermal_zone_params *, int, int);
+
void thermal_zone_device_unregister(struct thermal_zone_device *);
+struct thermal_zone_device *
+thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int,
+ void *, struct thermal_zone_device_ops *,
+ struct thermal_zone_params *, int, int);
+
int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *,
unsigned long, unsigned long,
@@ -429,7 +353,6 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
enum thermal_notify_event);
-void thermal_zone_set_trips(struct thermal_zone_device *);
struct thermal_cooling_device *thermal_cooling_device_register(const char *,
void *, const struct thermal_cooling_device_ops *);
@@ -447,24 +370,10 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int thermal_zone_get_slope(struct thermal_zone_device *tz);
int thermal_zone_get_offset(struct thermal_zone_device *tz);
-int get_tz_trend(struct thermal_zone_device *, int);
-struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
- struct thermal_cooling_device *, int);
-void thermal_cdev_update(struct thermal_cooling_device *);
-void thermal_notify_framework(struct thermal_zone_device *, int);
+int thermal_zone_device_enable(struct thermal_zone_device *tz);
+int thermal_zone_device_disable(struct thermal_zone_device *tz);
+void thermal_zone_device_critical(struct thermal_zone_device *tz);
#else
-static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
-{ return false; }
-static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz, u32 *max_power)
-{ return 0; }
-static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz,
- u32 *min_power)
-{ return -ENODEV; }
-static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
- struct thermal_instance *tz, u32 power)
-{ return 0; }
static inline struct thermal_zone_device *thermal_zone_device_register(
const char *type, int trips, int mask, void *devdata,
struct thermal_zone_device_ops *ops,
@@ -474,28 +383,14 @@ static inline struct thermal_zone_device *thermal_zone_device_register(
static inline void thermal_zone_device_unregister(
struct thermal_zone_device *tz)
{ }
-static inline int thermal_zone_bind_cooling_device(
- struct thermal_zone_device *tz, int trip,
- struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower,
- unsigned int weight)
-{ return -ENODEV; }
-static inline int thermal_zone_unbind_cooling_device(
- struct thermal_zone_device *tz, int trip,
- struct thermal_cooling_device *cdev)
-{ return -ENODEV; }
-static inline void thermal_zone_device_update(struct thermal_zone_device *tz,
- enum thermal_notify_event event)
-{ }
-static inline void thermal_zone_set_trips(struct thermal_zone_device *tz)
-{ }
static inline struct thermal_cooling_device *
-thermal_cooling_device_register(char *type, void *devdata,
+thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{ return ERR_PTR(-ENODEV); }
static inline struct thermal_cooling_device *
thermal_of_cooling_device_register(struct device_node *np,
- char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+ const char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
{ return ERR_PTR(-ENODEV); }
static inline struct thermal_cooling_device *
devm_thermal_of_cooling_device_register(struct device *dev,
@@ -520,17 +415,12 @@ static inline int thermal_zone_get_slope(
static inline int thermal_zone_get_offset(
struct thermal_zone_device *tz)
{ return -ENODEV; }
-static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
+
+static inline int thermal_zone_device_enable(struct thermal_zone_device *tz)
+{ return -ENODEV; }
+
+static inline int thermal_zone_device_disable(struct thermal_zone_device *tz)
{ return -ENODEV; }
-static inline struct thermal_instance *
-get_thermal_instance(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev, int trip)
-{ return ERR_PTR(-ENODEV); }
-static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
-{ }
-static inline void thermal_notify_framework(struct thermal_zone_device *tz,
- int trip)
-{ }
#endif /* CONFIG_THERMAL */
#endif /* __THERMAL_H__ */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index e93e249a4e9b..9f392ec76f2b 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,8 +9,10 @@
#define _LINUX_THREAD_INFO_H
#include <linux/types.h>
+#include <linux/limits.h>
#include <linux/bug.h>
#include <linux/restart_block.h>
+#include <linux/errno.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -35,10 +37,42 @@ enum {
GOOD_STACK,
};
+#ifdef CONFIG_GENERIC_ENTRY
+enum syscall_work_bit {
+ SYSCALL_WORK_BIT_SECCOMP,
+ SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
+ SYSCALL_WORK_BIT_SYSCALL_TRACE,
+ SYSCALL_WORK_BIT_SYSCALL_EMU,
+ SYSCALL_WORK_BIT_SYSCALL_AUDIT,
+ SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
+ SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
+};
+
+#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
+#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
+#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
+#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
+#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
+#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
+#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
+#endif
+
#include <asm/thread_info.h>
#ifdef __KERNEL__
+#ifndef arch_set_restart_data
+#define arch_set_restart_data(restart) do { } while (0)
+#endif
+
+static inline long set_restart_fn(struct restart_block *restart,
+ long (*fn)(struct restart_block *))
+{
+ restart->fn = fn;
+ arch_set_restart_data(restart);
+ return -ERESTART_RESTARTBLOCK;
+}
+
#ifndef THREAD_ALIGN
#define THREAD_ALIGN THREAD_SIZE
#endif
@@ -84,6 +118,15 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
return test_bit(flag, (unsigned long *)&ti->flags);
}
+/*
+ * This may be used in noinstr code, and needs to be __always_inline to prevent
+ * inadvertent instrumentation.
+ */
+static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
+{
+ return READ_ONCE(ti->flags);
+}
+
#define set_thread_flag(flag) \
set_ti_thread_flag(current_thread_info(), flag)
#define clear_thread_flag(flag) \
@@ -96,6 +139,43 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
test_and_clear_ti_thread_flag(current_thread_info(), flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
+#define read_thread_flags() \
+ read_ti_thread_flags(current_thread_info())
+
+#define read_task_thread_flags(t) \
+ read_ti_thread_flags(task_thread_info(t))
+
+#ifdef CONFIG_GENERIC_ENTRY
+#define set_syscall_work(fl) \
+ set_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
+#define test_syscall_work(fl) \
+ test_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
+#define clear_syscall_work(fl) \
+ clear_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
+
+#define set_task_syscall_work(t, fl) \
+ set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
+#define test_task_syscall_work(t, fl) \
+ test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
+#define clear_task_syscall_work(t, fl) \
+ clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
+
+#else /* CONFIG_GENERIC_ENTRY */
+
+#define set_syscall_work(fl) \
+ set_ti_thread_flag(current_thread_info(), TIF_##fl)
+#define test_syscall_work(fl) \
+ test_ti_thread_flag(current_thread_info(), TIF_##fl)
+#define clear_syscall_work(fl) \
+ clear_ti_thread_flag(current_thread_info(), TIF_##fl)
+
+#define set_task_syscall_work(t, fl) \
+ set_ti_thread_flag(task_thread_info(t), TIF_##fl)
+#define test_task_syscall_work(t, fl) \
+ test_ti_thread_flag(task_thread_info(t), TIF_##fl)
+#define clear_task_syscall_work(t, fl) \
+ clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
+#endif /* !CONFIG_GENERIC_ENTRY */
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
@@ -129,15 +209,18 @@ __bad_copy_from(void);
extern void __compiletime_error("copy destination size is too small")
__bad_copy_to(void);
+void __copy_overflow(int size, unsigned long count);
+
static inline void copy_overflow(int size, unsigned long count)
{
- WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+ if (IS_ENABLED(CONFIG_BUG))
+ __copy_overflow(size, count);
}
static __always_inline __must_check bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
- int sz = __compiletime_object_size(addr);
+ int sz = __builtin_object_size(addr, 0);
if (unlikely(sz >= 0 && sz < bytes)) {
if (!__builtin_constant_p(bytes))
copy_overflow(sz, bytes);
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 3086dba525e2..c34173e6c5f1 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -29,7 +29,7 @@
/*
* A maximum of 4 million PIDs should be enough for a while.
- * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.]
+ * [NOTE: PID/TIDs are limited to 2^30 ~= 1 billion, see FUTEX_TID_MASK.]
*/
#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
(sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
@@ -38,7 +38,7 @@
* Define a minimum number of pids per cpu. Heuristically based
* on original pid max of 32k for 32 cpus. Also, increase the
* minimum settable value for pid_max on the running system based
- * on similar defaults. See kernel/pid.c:pidmap_init() for details.
+ * on similar defaults. See kernel/pid.c:pid_idr_init() for details.
*/
#define PIDS_PER_CPU_DEFAULT 1024
#define PIDS_PER_CPU_MIN 8
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index ece782ef5466..90cd08ab2f5d 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -45,6 +45,8 @@ enum tb_cfg_pkg_type {
* @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
* Thunderbolt dock (and Display Port). All PCIe
* links downstream of the dock are removed.
+ * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the
+ * PCIe tunneling is disabled from the BIOS.
*/
enum tb_security_level {
TB_SECURITY_NONE,
@@ -52,6 +54,7 @@ enum tb_security_level {
TB_SECURITY_SECURE,
TB_SECURITY_DPONLY,
TB_SECURITY_USBONLY,
+ TB_SECURITY_NOPCIE,
};
/**
@@ -80,7 +83,7 @@ struct tb {
int index;
enum tb_security_level security_level;
size_t nboot_acl;
- unsigned long privdata[0];
+ unsigned long privdata[];
};
extern struct bus_type tb_bus_type;
@@ -143,6 +146,7 @@ struct tb_property_dir *tb_property_parse_dir(const u32 *block,
size_t block_len);
ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
size_t block_len);
+struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir);
struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
void tb_property_free_dir(struct tb_property_dir *dir);
int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
@@ -176,30 +180,34 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @route: Route string the other domain can be reached
* @vendor: Vendor ID of the remote domain
* @device: Device ID of the demote domain
+ * @local_max_hopid: Maximum input HopID of this host
+ * @remote_max_hopid: Maximum input HopID of the remote host
* @lock: Lock to serialize access to the following fields of this structure
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
+ * @link_speed: Speed of the link in Gb/s
+ * @link_width: Width of the link (1 or 2)
+ * @link_usb4: Downstream link is USB4
* @is_unplugged: The XDomain is unplugged
- * @resume: The XDomain is being resumed
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
* queried first
- * @transmit_path: HopID which the remote end expects us to transmit
- * @transmit_ring: Local ring (hop) where outgoing packets are pushed
- * @receive_path: HopID which we expect the remote end to transmit
- * @receive_ring: Local ring (hop) where incoming packets arrive
* @service_ids: Used to generate IDs for the services
- * @properties: Properties exported by the remote domain
- * @property_block_gen: Generation of @properties
- * @properties_lock: Lock protecting @properties.
- * @get_uuid_work: Work used to retrieve @remote_uuid
- * @uuid_retries: Number of times left @remote_uuid is requested before
- * giving up
- * @get_properties_work: Work used to get remote domain properties
- * @properties_retries: Number of times left to read properties
+ * @in_hopids: Input HopIDs for DMA tunneling
+ * @out_hopids; Output HopIDs for DMA tunneling
+ * @local_property_block: Local block of properties
+ * @local_property_block_gen: Generation of @local_property_block
+ * @local_property_block_len: Length of the @local_property_block in dwords
+ * @remote_properties: Properties exported by the remote domain
+ * @remote_property_block_gen: Generation of @remote_properties
+ * @state: Next XDomain discovery state to run
+ * @state_work: Work used to run the next state
+ * @state_retries: Number of retries remain for the state
* @properties_changed_work: Work used to notify the remote domain that
* our properties have changed
* @properties_changed_retries: Number of times left to send properties
* changed notification
+ * @bonding_possible: True if lane bonding is possible on local side
+ * @target_link_width: Target link width from the remote host
* @link: Root switch link the remote domain is connected (ICM only)
* @depth: Depth in the chain the remote domain is connected (ICM only)
*
@@ -220,33 +228,53 @@ struct tb_xdomain {
u64 route;
u16 vendor;
u16 device;
+ unsigned int local_max_hopid;
+ unsigned int remote_max_hopid;
struct mutex lock;
const char *vendor_name;
const char *device_name;
+ unsigned int link_speed;
+ unsigned int link_width;
+ bool link_usb4;
bool is_unplugged;
- bool resume;
bool needs_uuid;
- u16 transmit_path;
- u16 transmit_ring;
- u16 receive_path;
- u16 receive_ring;
struct ida service_ids;
- struct tb_property_dir *properties;
- u32 property_block_gen;
- struct delayed_work get_uuid_work;
- int uuid_retries;
- struct delayed_work get_properties_work;
- int properties_retries;
+ struct ida in_hopids;
+ struct ida out_hopids;
+ u32 *local_property_block;
+ u32 local_property_block_gen;
+ u32 local_property_block_len;
+ struct tb_property_dir *remote_properties;
+ u32 remote_property_block_gen;
+ int state;
+ struct delayed_work state_work;
+ int state_retries;
struct delayed_work properties_changed_work;
int properties_changed_retries;
+ bool bonding_possible;
+ u8 target_link_width;
u8 link;
u8 depth;
};
-int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
- u16 transmit_ring, u16 receive_path,
- u16 receive_ring);
-int tb_xdomain_disable_paths(struct tb_xdomain *xd);
+int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
+void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
+int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid);
+void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid);
+int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid);
+void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid);
+int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
+ int transmit_ring, int receive_path,
+ int receive_ring);
+int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
+ int transmit_ring, int receive_path,
+ int receive_ring);
+
+static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd)
+{
+ return tb_xdomain_disable_paths(xd, -1, -1, -1, -1);
+}
+
struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
@@ -344,6 +372,9 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
* @prtcvers: Protocol version from the properties directory
* @prtcrevs: Protocol software revision from the properties directory
* @prtcstns: Protocol settings mask from the properties directory
+ * @debugfs_dir: Pointer to the service debugfs directory. Always created
+ * when debugfs is enabled. Can be used by service drivers to
+ * add their own entries under the service.
*
* Each domain exposes set of services it supports as collection of
* properties. For each service there will be one corresponding
@@ -357,6 +388,7 @@ struct tb_service {
u32 prtcvers;
u32 prtcrevs;
u32 prtcstns;
+ struct dentry *debugfs_dir;
};
static inline struct tb_service *tb_service_get(struct tb_service *svc)
@@ -436,9 +468,11 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
* @msix_ida: Used to allocate MSI-X vectors for rings
* @going_away: The host controller device is about to disappear so when
* this flag is set, avoid touching the hardware anymore.
+ * @iommu_dma_protection: An IOMMU will isolate external-facing ports.
* @interrupt_work: Work scheduled to handle ring interrupt when no
* MSI-X is used.
* @hop_count: Number of rings (end point hops) supported by NHI.
+ * @quirks: NHI specific quirks if any
*/
struct tb_nhi {
spinlock_t lock;
@@ -449,8 +483,10 @@ struct tb_nhi {
struct tb_ring **rx_rings;
struct ida msix_ida;
bool going_away;
+ bool iommu_dma_protection;
struct work_struct interrupt_work;
u32 hop_count;
+ unsigned long quirks;
};
/**
@@ -471,6 +507,8 @@ struct tb_nhi {
* @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
* @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
* @flags: Ring specific flags
+ * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to
+ * RX ring. For TX ring this should be set to %0.
* @sof_mask: Bit mask used to detect start of frame PDF
* @eof_mask: Bit mask used to detect end of frame PDF
* @start_poll: Called when ring interrupt is triggered to start
@@ -494,6 +532,7 @@ struct tb_ring {
int irq;
u8 vector;
unsigned int flags;
+ int e2e_tx_hop;
u16 sof_mask;
u16 eof_mask;
void (*start_poll)(void *data);
@@ -554,7 +593,8 @@ struct ring_frame {
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags);
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags, u16 sof_mask, u16 eof_mask,
+ unsigned int flags, int e2e_tx_hop,
+ u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *), void *poll_data);
void tb_ring_start(struct tb_ring *ring);
void tb_ring_stop(struct tb_ring *ring);
diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h
index 2fc854155c27..441b2988e66a 100644
--- a/include/linux/ti-emif-sram.h
+++ b/include/linux/ti-emif-sram.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI AM33XX EMIF Routines
*
* Copyright (C) 2016-2017 Texas Instruments Inc.
* Dave Gerlach
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_TI_EMIF_H
#define __LINUX_TI_EMIF_H
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index eb6cbdf10e50..44a7f9169ac6 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -295,7 +295,7 @@ struct bts_header {
u32 magic;
u32 version;
u8 future[24];
- u8 actions[0];
+ u8 actions[];
} __attribute__ ((packed));
/**
@@ -305,7 +305,7 @@ struct bts_header {
struct bts_action {
u16 type;
u16 size;
- u8 data[0];
+ u8 data[];
} __attribute__ ((packed));
struct bts_action_send {
@@ -315,7 +315,7 @@ struct bts_action_send {
struct bts_action_wait {
u32 msec;
u32 size;
- u8 data[0];
+ u8 data[];
} __attribute__ ((packed));
struct bts_action_delay {
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 7340613c7eff..bfd571f18cfd 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -11,6 +11,7 @@
#include <linux/context_tracking_state.h>
#include <linux/cpumask.h>
#include <linux/sched.h>
+#include <linux/rcupdate.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void);
@@ -185,13 +186,17 @@ static inline bool tick_nohz_full_enabled(void)
return tick_nohz_full_running;
}
-static inline bool tick_nohz_full_cpu(int cpu)
-{
- if (!tick_nohz_full_enabled())
- return false;
-
- return cpumask_test_cpu(cpu, tick_nohz_full_mask);
-}
+/*
+ * Check if a CPU is part of the nohz_full subset. Arrange for evaluating
+ * the cpu expression (typically smp_processor_id()) _after_ the static
+ * key.
+ */
+#define tick_nohz_full_cpu(_cpu) ({ \
+ bool __ret = false; \
+ if (tick_nohz_full_enabled()) \
+ __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \
+ __ret; \
+})
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
{
@@ -207,7 +212,7 @@ extern void tick_nohz_dep_set_task(struct task_struct *tsk,
enum tick_dep_bits bit);
extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit);
-extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
+extern void tick_nohz_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit);
extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit);
@@ -252,11 +257,11 @@ static inline void tick_dep_clear_task(struct task_struct *tsk,
if (tick_nohz_full_enabled())
tick_nohz_dep_clear_task(tsk, bit);
}
-static inline void tick_dep_set_signal(struct signal_struct *signal,
+static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit)
{
if (tick_nohz_full_enabled())
- tick_nohz_dep_set_signal(signal, bit);
+ tick_nohz_dep_set_signal(tsk, bit);
}
static inline void tick_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit)
@@ -284,7 +289,7 @@ static inline void tick_dep_set_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
-static inline void tick_dep_set_signal(struct signal_struct *signal,
+static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit) { }
@@ -300,4 +305,10 @@ static inline void tick_nohz_task_switch(void)
__tick_nohz_task_switch();
}
+static inline void tick_nohz_user_enter_prepare(void)
+{
+ if (tick_nohz_full_cpu(smp_processor_id()))
+ rcu_nocb_flush_deferred_wakeup();
+}
+
#endif
diff --git a/include/linux/tifm.h b/include/linux/tifm.h
index 299cbb8c63bb..44073d06710f 100644
--- a/include/linux/tifm.h
+++ b/include/linux/tifm.h
@@ -124,7 +124,7 @@ struct tifm_adapter {
int (*has_ms_pif)(struct tifm_adapter *fm,
struct tifm_dev *sock);
- struct tifm_dev *sockets[0];
+ struct tifm_dev *sockets[];
};
struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets,
diff --git a/include/linux/time.h b/include/linux/time.h
index 8ef5e5cc9f57..16cf4522d6f3 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -3,7 +3,6 @@
#define _LINUX_TIME_H
# include <linux/cache.h>
-# include <linux/seqlock.h>
# include <linux/math64.h>
# include <linux/time64.h>
@@ -22,19 +21,6 @@ extern time64_t mktime64(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec);
-/* Some architectures do not supply their own clocksource.
- * This is mainly the case in architectures that get their
- * inter-tick times by reading the counter on their interval
- * timer. Since these timers wrap every tick, they're not really
- * useful as clocksources. Wrapping them to act like one is possible
- * but not very efficient. So we provide a callout these arches
- * can implement for use with the jiffies clocksource to provide
- * finer then tick granular time.
- */
-#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
-extern u32 (*arch_gettimeoffset)(void);
-#endif
-
#ifdef CONFIG_POSIX_TIMERS
extern void clear_itimer(void);
#else
@@ -111,9 +97,6 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its)
*/
#define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l))
-struct timens_offset {
- s64 sec;
- u64 nsec;
-};
+# include <vdso/time.h>
#endif
diff --git a/include/linux/time32.h b/include/linux/time32.h
index cf9320cd2d0b..83a400b6ba99 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -12,17 +12,7 @@
#include <linux/time64.h>
#include <linux/timex.h>
-typedef s32 old_time32_t;
-
-struct old_timespec32 {
- old_time32_t tv_sec;
- s32 tv_nsec;
-};
-
-struct old_timeval32 {
- old_time32_t tv_sec;
- s32 tv_usec;
-};
+#include <vdso/time32.h>
struct old_itimerspec32 {
struct old_timespec32 it_interval;
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 19125489ae94..f1bcea8c124a 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -3,6 +3,7 @@
#define _LINUX_TIME64_H
#include <linux/math64.h>
+#include <vdso/time64.h>
typedef __s64 time64_t;
typedef __u64 timeu64_t;
@@ -20,20 +21,16 @@ struct itimerspec64 {
};
/* Parameters used to convert the timespec values: */
-#define MSEC_PER_SEC 1000L
-#define USEC_PER_MSEC 1000L
-#define NSEC_PER_USEC 1000L
-#define NSEC_PER_MSEC 1000000L
-#define USEC_PER_SEC 1000000L
-#define NSEC_PER_SEC 1000000000L
-#define FSEC_PER_SEC 1000000000000000LL
+#define PSEC_PER_NSEC 1000L
/* Located here for timespec[64]_valid_strict */
#define TIME64_MAX ((s64)~((u64)1 << 63))
#define TIME64_MIN (-TIME64_MAX - 1)
#define KTIME_MAX ((s64)~((u64)1 << 63))
+#define KTIME_MIN (-KTIME_MAX - 1)
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC)
/*
* Limits for settimeofday():
@@ -132,6 +129,13 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts)
*/
static inline s64 timespec64_to_ns(const struct timespec64 *ts)
{
+ /* Prevent multiplication overflow / underflow */
+ if (ts->tv_sec >= KTIME_SEC_MAX)
+ return KTIME_MAX;
+
+ if (ts->tv_sec <= KTIME_SEC_MIN)
+ return KTIME_MIN;
+
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
@@ -141,7 +145,7 @@ static inline s64 timespec64_to_ns(const struct timespec64 *ts)
*
* Returns the timespec64 representation of the nsec parameter.
*/
-extern struct timespec64 ns_to_timespec64(const s64 nsec);
+extern struct timespec64 ns_to_timespec64(s64 nsec);
/**
* timespec64_add_ns - Adds nanoseconds to a timespec64
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
index 824d54e057eb..3146f1c056c9 100644
--- a/include/linux/time_namespace.h
+++ b/include/linux/time_namespace.h
@@ -4,7 +4,6 @@
#include <linux/sched.h>
-#include <linux/kref.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
@@ -18,7 +17,6 @@ struct timens_offsets {
};
struct time_namespace {
- struct kref kref;
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct ns_common ns;
@@ -33,23 +31,25 @@ extern struct time_namespace init_time_ns;
#ifdef CONFIG_TIME_NS
extern int vdso_join_timens(struct task_struct *task,
struct time_namespace *ns);
+extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns);
static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
{
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
return ns;
}
struct time_namespace *copy_time_ns(unsigned long flags,
struct user_namespace *user_ns,
struct time_namespace *old_ns);
-void free_time_ns(struct kref *kref);
-int timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
+void free_time_ns(struct time_namespace *ns);
+void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
struct vdso_data *arch_get_vdso_data(void *vvar_page);
static inline void put_time_ns(struct time_namespace *ns)
{
- kref_put(&ns->kref, free_time_ns);
+ if (refcount_dec_and_test(&ns->ns.count))
+ free_time_ns(ns);
}
void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m);
@@ -76,6 +76,20 @@ static inline void timens_add_boottime(struct timespec64 *ts)
*ts = timespec64_add(*ts, ns_offsets->boottime);
}
+static inline u64 timens_add_boottime_ns(u64 nsec)
+{
+ struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+ return nsec + timespec64_to_ns(&ns_offsets->boottime);
+}
+
+static inline void timens_sub_boottime(struct timespec64 *ts)
+{
+ struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+ *ts = timespec64_sub(*ts, ns_offsets->boottime);
+}
+
ktime_t do_timens_ktime_to_host(clockid_t clockid, ktime_t tim,
struct timens_offsets *offsets);
@@ -96,6 +110,11 @@ static inline int vdso_join_timens(struct task_struct *task,
return 0;
}
+static inline void timens_commit(struct task_struct *tsk,
+ struct time_namespace *ns)
+{
+}
+
static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
{
return NULL;
@@ -116,14 +135,22 @@ struct time_namespace *copy_time_ns(unsigned long flags,
return old_ns;
}
-static inline int timens_on_fork(struct nsproxy *nsproxy,
+static inline void timens_on_fork(struct nsproxy *nsproxy,
struct task_struct *tsk)
{
- return 0;
+ return;
}
static inline void timens_add_monotonic(struct timespec64 *ts) { }
static inline void timens_add_boottime(struct timespec64 *ts) { }
+
+static inline u64 timens_add_boottime_ns(u64 nsec)
+{
+ return nsec;
+}
+
+static inline void timens_sub_boottime(struct timespec64 *ts) { }
+
static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
{
return tim;
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index 754b74a2167f..c6540ceea143 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -124,7 +124,7 @@ extern u64 timecounter_read(struct timecounter *tc);
* This allows conversion of cycle counter values which were generated
* in the past.
*/
-extern u64 timecounter_cyc2time(struct timecounter *tc,
+extern u64 timecounter_cyc2time(const struct timecounter *tc,
u64 cycle_tstamp);
#endif
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index b27e2ffa96c1..fe1e467ba046 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -3,6 +3,7 @@
#define _LINUX_TIMEKEEPING_H
#include <linux/errno.h>
+#include <linux/clocksource_ids.h>
/* Included from linux/ktime.h */
@@ -10,8 +11,7 @@ void timekeeping_init(void);
extern int timekeeping_suspended;
/* Architecture timer tick functions: */
-extern void update_process_times(int user);
-extern void xtime_update(unsigned long ticks);
+extern void legacy_timer_tick(unsigned long ticks);
/*
* Get and set timeofday
@@ -177,6 +177,7 @@ static inline u64 ktime_get_raw_ns(void)
extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
extern u64 ktime_get_boot_fast_ns(void);
+extern u64 ktime_get_tai_fast_ns(void);
extern u64 ktime_get_real_fast_ns(void);
/*
@@ -223,8 +224,20 @@ extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
/*
+ * struct ktime_timestanps - Simultaneous mono/boot/real timestamps
+ * @mono: Monotonic timestamp
+ * @boot: Boottime timestamp
+ * @real: Realtime timestamp
+ */
+struct ktime_timestamps {
+ u64 mono;
+ u64 boot;
+ u64 real;
+};
+
+/**
* struct system_time_snapshot - simultaneous raw/real time capture with
- * counter value
+ * counter value
* @cycles: Clocksource counter value to produce the system times
* @real: Realtime system time
* @raw: Monotonic raw system time
@@ -232,16 +245,17 @@ extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
* @cs_was_changed_seq: The sequence number of clocksource change events
*/
struct system_time_snapshot {
- u64 cycles;
- ktime_t real;
- ktime_t raw;
- unsigned int clock_was_set_seq;
- u8 cs_was_changed_seq;
+ u64 cycles;
+ ktime_t real;
+ ktime_t raw;
+ enum clocksource_ids cs_id;
+ unsigned int clock_was_set_seq;
+ u8 cs_was_changed_seq;
};
-/*
+/**
* struct system_device_crosststamp - system/device cross-timestamp
- * (syncronized capture)
+ * (synchronized capture)
* @device: Device time
* @sys_realtime: Realtime simultaneous with device time
* @sys_monoraw: Monotonic raw simultaneous with device time
@@ -252,12 +266,12 @@ struct system_device_crosststamp {
ktime_t sys_monoraw;
};
-/*
+/**
* struct system_counterval_t - system counter value with the pointer to the
- * corresponding clocksource
+ * corresponding clocksource
* @cycles: System counter value
* @cs: Clocksource corresponding to system counter value. Used by
- * timekeeping code to verify comparibility of two cycle values
+ * timekeeping code to verify comparibility of two cycle values
*/
struct system_counterval_t {
u64 cycles;
@@ -280,6 +294,9 @@ extern int get_device_system_crosststamp(
*/
extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
+/* NMI safe mono/boot/realtime timestamps */
+extern void ktime_get_fast_timestamps(struct ktime_timestamps *snap);
+
/*
* Persistent clock related interfaces
*/
@@ -288,6 +305,8 @@ extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
struct timespec64 *boot_offset);
+#ifdef CONFIG_GENERIC_CMOS_UPDATE
extern int update_persistent_clock64(struct timespec64 now);
+#endif
#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
deleted file mode 100644
index 266017fc9ee9..000000000000
--- a/include/linux/timekeeping32.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LINUX_TIMEKEEPING32_H
-#define _LINUX_TIMEKEEPING32_H
-/*
- * These interfaces are all based on the old timespec type
- * and should get replaced with the timespec64 based versions
- * over time so we can remove the file here.
- */
-
-static inline unsigned long get_seconds(void)
-{
- return ktime_get_real_seconds();
-}
-
-#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 1e6650ed066d..648f00105f58 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -67,6 +67,7 @@ struct timer_list {
#define TIMER_DEFERRABLE 0x00080000
#define TIMER_PINNED 0x00100000
#define TIMER_IRQSAFE 0x00200000
+#define TIMER_INIT_FLAGS (TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
#define TIMER_ARRAYSHIFT 22
#define TIMER_ARRAYMASK 0xFFC00000
@@ -164,7 +165,7 @@ static inline void destroy_timer_on_stack(struct timer_list *timer) { }
*/
static inline int timer_pending(const struct timer_list * timer)
{
- return timer->entry.pprev != NULL;
+ return !hlist_unhashed_lockless(&timer->entry);
}
extern void add_timer_on(struct timer_list *timer, int cpu);
@@ -192,19 +193,9 @@ extern int try_to_del_timer_sync(struct timer_list *timer);
#define del_singleshot_timer_sync(t) del_timer_sync(t)
extern void init_timers(void);
-extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-struct ctl_table;
-
-extern unsigned int sysctl_timer_migration;
-int timer_migration_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-#endif
-
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index ce0859763670..3871b06bd302 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -62,6 +62,8 @@
#include <linux/types.h>
#include <linux/param.h>
+unsigned long random_get_entropy_fallback(void);
+
#include <asm/timex.h>
#ifndef random_get_entropy
@@ -74,8 +76,14 @@
*
* By default we use get_cycles() for this purpose, but individual
* architectures may override this in their asm/timex.h header file.
+ * If a given arch does not have get_cycles(), then we fallback to
+ * using random_get_entropy_fallback().
*/
-#define random_get_entropy() get_cycles()
+#ifdef get_cycles
+#define random_get_entropy() ((unsigned long)get_cycles())
+#else
+#define random_get_entropy() random_get_entropy_fallback()
+#endif
#endif
/*
@@ -133,7 +141,7 @@
/*
* kernel variables
- * Note: maximum error = NTP synch distance = dispersion + delay / 2;
+ * Note: maximum error = NTP sync distance = dispersion + delay / 2;
* estimated error = NTP dispersion.
*/
extern unsigned long tick_usec; /* USER_HZ period (usec) */
@@ -157,7 +165,6 @@ extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex *
extern void hardpps(const struct timespec64 *, const struct timespec64 *);
int read_current_timer(unsigned long *timer_val);
-void ntp_notify_cmos_timer(void);
/* The clock frequency of the i8253/i8254 PIT */
#define PIT_TICK_RATE 1193182ul
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index ea627d1ab7e3..1c3948a1d6ad 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -21,7 +21,12 @@ struct tnum {
struct tnum tnum_const(u64 value);
/* A completely unknown value */
extern const struct tnum tnum_unknown;
-/* A value that's unknown except that @min <= value <= @max */
+/* An unknown value that is a superset of @min <= value <= @max.
+ *
+ * Could include values outside the range of [@min, @max].
+ * For example tnum_range(0, 2) is represented by {0, 1, 2, *3*},
+ * rather than the intended set of {0, 1, 2}.
+ */
struct tnum tnum_range(u64 min, u64 max);
/* Arithmetic and logical ops */
@@ -73,7 +78,18 @@ static inline bool tnum_is_unknown(struct tnum a)
*/
bool tnum_is_aligned(struct tnum a, u64 size);
-/* Returns true if @b represents a subset of @a. */
+/* Returns true if @b represents a subset of @a.
+ *
+ * Note that using tnum_range() as @a requires extra cautions as tnum_in() may
+ * return true unexpectedly due to tnum limited ability to represent tight
+ * range, e.g.
+ *
+ * tnum_in(tnum_range(0, 2), tnum_const(3)) == true
+ *
+ * As a rule of thumb, if @a is explicitly coded rather than coming from
+ * reg->var_off, it should be in form of tnum_const(), tnum_range(0, 2**n - 1),
+ * or tnum_range(2**n, 2**(n+1) - 1).
+ */
bool tnum_in(struct tnum a, struct tnum b);
/* Formatting functions. These have snprintf-like semantics: they will write
@@ -86,4 +102,16 @@ int tnum_strn(char *str, size_t size, struct tnum a);
/* Format a tnum as tristate binary expansion */
int tnum_sbin(char *str, size_t size, struct tnum a);
+/* Returns the 32-bit subreg */
+struct tnum tnum_subreg(struct tnum a);
+/* Returns the tnum with the lower 32-bit subreg cleared */
+struct tnum tnum_clear_subreg(struct tnum a);
+/* Returns the tnum with the lower 32-bit subreg set to value */
+struct tnum tnum_const_subreg(struct tnum a, u32 value);
+/* Returns true if 32-bit subreg @a is a known constant*/
+static inline bool tnum_subreg_is_const(struct tnum a)
+{
+ return !(tnum_subreg(a)).mask;
+}
+
#endif /* _LINUX_TNUM_H */
diff --git a/include/linux/topology.h b/include/linux/topology.h
index eb2fe6edd73c..4564faafd0e1 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -48,6 +48,7 @@ int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */
#define LOCAL_DISTANCE 10
#define REMOTE_DISTANCE 20
+#define DISTANCE_BITS 8
#ifndef node_distance
#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
#endif
@@ -130,20 +131,11 @@ static inline int numa_node_id(void)
* Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
*/
DECLARE_PER_CPU(int, _numa_mem_);
-extern int _node_numa_mem_[MAX_NUMNODES];
#ifndef set_numa_mem
static inline void set_numa_mem(int node)
{
this_cpu_write(_numa_mem_, node);
- _node_numa_mem_[numa_node_id()] = node;
-}
-#endif
-
-#ifndef node_to_mem_node
-static inline int node_to_mem_node(int node)
-{
- return _node_numa_mem_[node];
}
#endif
@@ -166,7 +158,6 @@ static inline int cpu_to_mem(int cpu)
static inline void set_cpu_numa_mem(int cpu, int node)
{
per_cpu(_numa_mem_, cpu) = node;
- _node_numa_mem_[cpu_to_node(cpu)] = node;
}
#endif
@@ -180,13 +171,6 @@ static inline int numa_mem_id(void)
}
#endif
-#ifndef node_to_mem_node
-static inline int node_to_mem_node(int node)
-{
- return node;
-}
-#endif
-
#ifndef cpu_to_mem
static inline int cpu_to_mem(int cpu)
{
@@ -196,26 +180,60 @@ static inline int cpu_to_mem(int cpu)
#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
+#if defined(topology_die_id) && defined(topology_die_cpumask)
+#define TOPOLOGY_DIE_SYSFS
+#endif
+#if defined(topology_cluster_id) && defined(topology_cluster_cpumask)
+#define TOPOLOGY_CLUSTER_SYSFS
+#endif
+#if defined(topology_book_id) && defined(topology_book_cpumask)
+#define TOPOLOGY_BOOK_SYSFS
+#endif
+#if defined(topology_drawer_id) && defined(topology_drawer_cpumask)
+#define TOPOLOGY_DRAWER_SYSFS
+#endif
+
#ifndef topology_physical_package_id
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
#endif
#ifndef topology_die_id
#define topology_die_id(cpu) ((void)(cpu), -1)
#endif
+#ifndef topology_cluster_id
+#define topology_cluster_id(cpu) ((void)(cpu), -1)
+#endif
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
+#ifndef topology_book_id
+#define topology_book_id(cpu) ((void)(cpu), -1)
+#endif
+#ifndef topology_drawer_id
+#define topology_drawer_id(cpu) ((void)(cpu), -1)
+#endif
+#ifndef topology_ppin
+#define topology_ppin(cpu) ((void)(cpu), 0ull)
+#endif
#ifndef topology_sibling_cpumask
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
+#ifndef topology_cluster_cpumask
+#define topology_cluster_cpumask(cpu) cpumask_of(cpu)
+#endif
#ifndef topology_die_cpumask
#define topology_die_cpumask(cpu) cpumask_of(cpu)
#endif
+#ifndef topology_book_cpumask
+#define topology_book_cpumask(cpu) cpumask_of(cpu)
+#endif
+#ifndef topology_drawer_cpumask
+#define topology_drawer_cpumask(cpu) cpumask_of(cpu)
+#endif
-#ifdef CONFIG_SCHED_SMT
+#if defined(CONFIG_SCHED_SMT) && !defined(cpu_smt_mask)
static inline const struct cpumask *cpu_smt_mask(int cpu)
{
return topology_sibling_cpumask(cpu);
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 6241f59e2d6f..7038104463e4 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -32,11 +32,30 @@
#define TOROUT_STRING(s) \
pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s)
#define VERBOSE_TOROUT_STRING(s) \
- do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
-#define VERBOSE_TOROUT_ERRSTRING(s) \
- do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
+do { \
+ if (verbose) { \
+ verbose_torout_sleep(); \
+ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); \
+ } \
+} while (0)
+#define TOROUT_ERRSTRING(s) \
+ pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s)
+void verbose_torout_sleep(void);
+
+#define torture_init_error(firsterr) \
+({ \
+ int ___firsterr = (firsterr); \
+ \
+ WARN_ONCE(!IS_MODULE(CONFIG_RCU_TORTURE_TEST) && ___firsterr < 0, "Torture-test initialization failed with error code %d\n", ___firsterr); \
+ ___firsterr < 0; \
+})
/* Definitions for online/offline exerciser. */
+#ifdef CONFIG_HOTPLUG_CPU
+int torture_num_online_cpus(void);
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+static inline int torture_num_online_cpus(void) { return 1; }
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
typedef void torture_ofl_func(void);
bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
unsigned long *sum_offl, int *min_onl, int *max_onl);
@@ -55,6 +74,18 @@ struct torture_random_state {
#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
DEFINE_PER_CPU(struct torture_random_state, name)
unsigned long torture_random(struct torture_random_state *trsp);
+static inline void torture_random_init(struct torture_random_state *trsp)
+{
+ trsp->trs_state = 0;
+ trsp->trs_count = 0;
+}
+
+/* Definitions for high-resolution-timer sleeps. */
+int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp);
+int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
+int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
+int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
+int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp);
/* Task shuffler, which causes CPUs to occasionally go idle. */
void torture_shuffle_task_register(struct task_struct *tp);
@@ -87,9 +118,9 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
_torture_stop_kthread("Stopping " #n " task", &(tp))
#ifdef CONFIG_PREEMPTION
-#define torture_preempt_schedule() preempt_schedule()
+#define torture_preempt_schedule() __preempt_schedule()
#else
-#define torture_preempt_schedule()
+#define torture_preempt_schedule() do { } while (0)
#endif
#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 03e9b184411b..dfeb25a0362d 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -31,6 +31,7 @@ struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
+/* if you add a new hash to this, increment TPM_MAX_HASHES below */
enum tpm_algorithms {
TPM_ALG_ERROR = 0x0000,
TPM_ALG_SHA1 = 0x0004,
@@ -42,6 +43,12 @@ enum tpm_algorithms {
TPM_ALG_SM3_256 = 0x0012,
};
+/*
+ * maximum number of hashing algorithms a TPM can have. This is
+ * basically a count of every hash in tpm_algorithms above
+ */
+#define TPM_MAX_HASHES 5
+
struct tpm_digest {
u16 alg_id;
u8 digest[TPM_MAX_DIGEST_SIZE];
@@ -96,6 +103,7 @@ struct tpm_space {
u8 *context_buf;
u32 session_tbl[3];
u8 *session_buf;
+ u32 buf_size;
};
struct tpm_bios_log {
@@ -145,7 +153,7 @@ struct tpm_chip {
struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
- const struct attribute_group *groups[3];
+ const struct attribute_group *groups[3 + TPM_MAX_HASHES];
unsigned int groups_cnt;
u32 nr_allocated_banks;
@@ -199,6 +207,7 @@ enum tpm2_return_codes {
TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
TPM2_RC_FAILURE = 0x0101,
TPM2_RC_DISABLED = 0x0120,
+ TPM2_RC_UPGRADE = 0x012D,
TPM2_RC_COMMAND_CODE = 0x0143,
TPM2_RC_TESTING = 0x090A, /* RC_WARN */
TPM2_RC_REFERENCE_H0 = 0x0910,
@@ -261,6 +270,7 @@ enum tpm2_cc_attrs {
#define TPM_VID_INTEL 0x8086
#define TPM_VID_WINBOND 0x1050
#define TPM_VID_STM 0x104A
+#define TPM_VID_ATML 0x1114
enum tpm_chip_flags {
TPM_CHIP_FLAG_TPM2 = BIT(1),
@@ -269,6 +279,7 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6),
+ TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7),
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
@@ -297,6 +308,8 @@ struct tpm_buf {
};
enum tpm2_object_attributes {
+ TPM2_OA_FIXED_TPM = BIT(1),
+ TPM2_OA_FIXED_PARENT = BIT(4),
TPM2_OA_USER_WITH_AUTH = BIT(6),
};
@@ -388,6 +401,14 @@ static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
tpm_buf_append(buf, (u8 *) &value2, 4);
}
+/*
+ * Check if TPM device is in the firmware upgrade mode.
+ */
+static inline bool tpm_is_firmware_upgrade(struct tpm_chip *chip)
+{
+ return chip->flags & TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+}
+
static inline u32 tpm2_rc_value(u32 rc)
{
return (rc & BIT(7)) ? rc & 0xff : rc;
@@ -396,6 +417,10 @@ static inline u32 tpm2_rc_value(u32 rc)
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
+extern __must_check int tpm_try_get_ops(struct tpm_chip *chip);
+extern void tpm_put_ops(struct tpm_chip *chip);
+extern ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
+ size_t min_rsp_body_length, const char *desc);
extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digest);
extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
@@ -409,7 +434,6 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
return -ENODEV;
}
-
static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx,
struct tpm_digest *digest)
{
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 131ea1bad458..20c0ff54b7a0 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -28,7 +28,7 @@ struct tcpa_event {
u32 event_type;
u8 pcr_value[20]; /* SHA1 */
u32 event_size;
- u8 event_data[0];
+ u8 event_data[];
};
enum tcpa_event_types {
@@ -55,7 +55,7 @@ enum tcpa_event_types {
struct tcpa_pc_event {
u32 event_id;
u32 event_size;
- u8 event_data[0];
+ u8 event_data[];
};
enum tcpa_pc_event_ids {
@@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs {
u16 digest_size;
} __packed;
+#define TCG_SPECID_SIG "Spec ID Event03"
+
struct tcg_efi_specid_event_head {
u8 signature[16];
u32 platform_class;
@@ -97,12 +99,12 @@ struct tcg_pcr_event {
u32 event_type;
u8 digest[20];
u32 event_size;
- u8 event[0];
+ u8 event[];
} __packed;
struct tcg_event_field {
u32 event_size;
- u8 event[0];
+ u8 event[];
} __packed;
struct tcg_pcr_event2_head {
@@ -155,7 +157,7 @@ struct tcg_algorithm_info {
* Return: size of the event on success, 0 on failure
*/
-static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
struct tcg_pcr_event *event_header,
bool do_mapping)
{
@@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
int i;
int j;
u32 count, event_type;
+ const u8 zero_digest[sizeof(event_header->digest)] = {0};
marker = event;
marker_start = marker;
@@ -198,10 +201,26 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
count = READ_ONCE(event->count);
event_type = READ_ONCE(event->event_type);
+ /* Verify that it's the log header */
+ if (event_header->pcr_idx != 0 ||
+ event_header->event_type != NO_ACTION ||
+ memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) {
+ size = 0;
+ goto out;
+ }
+
efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
- /* Check if event is malformed. */
- if (count > efispecid->num_algs) {
+ /*
+ * Perform validation of the event in order to identify malformed
+ * events. This function may be asked to parse arbitrary byte sequences
+ * immediately following a valid event log. The caller expects this
+ * function to recognize that the byte sequence is not a valid event
+ * and to return an event size of 0.
+ */
+ if (memcmp(efispecid->signature, TCG_SPECID_SIG,
+ sizeof(TCG_SPECID_SIG)) ||
+ !efispecid->num_algs || count != efispecid->num_algs) {
size = 0;
goto out;
}
diff --git a/include/linux/trace.h b/include/linux/trace.h
index 7fd86d3c691f..b5e16e438448 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -2,7 +2,10 @@
#ifndef _LINUX_TRACE_H
#define _LINUX_TRACE_H
-#ifdef CONFIG_TRACING
+#define TRACE_EXPORT_FUNCTION BIT(0)
+#define TRACE_EXPORT_EVENT BIT(1)
+#define TRACE_EXPORT_MARKER BIT(2)
+
/*
* The trace export - an export of Ftrace output. The trace_export
* can process traces and export them to a registered destination as
@@ -15,23 +18,68 @@
* next - pointer to the next trace_export
* write - copy traces which have been delt with ->commit() to
* the destination
+ * flags - which ftrace to be exported
*/
struct trace_export {
struct trace_export __rcu *next;
void (*write)(struct trace_export *, const void *, unsigned int);
+ int flags;
};
+#ifdef CONFIG_TRACING
+
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
struct trace_array;
void trace_printk_init_buffers(void);
+__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip,
- const char *fmt, ...);
+ const char *fmt, ...);
+int trace_array_init_printk(struct trace_array *tr);
void trace_array_put(struct trace_array *tr);
struct trace_array *trace_array_get_by_name(const char *name);
int trace_array_destroy(struct trace_array *tr);
+
+/* For osnoise tracer */
+int osnoise_arch_register(void);
+void osnoise_arch_unregister(void);
+void osnoise_trace_irq_entry(int id);
+void osnoise_trace_irq_exit(int id, const char *desc);
+
+#else /* CONFIG_TRACING */
+static inline int register_ftrace_export(struct trace_export *export)
+{
+ return -EINVAL;
+}
+static inline int unregister_ftrace_export(struct trace_export *export)
+{
+ return 0;
+}
+static inline void trace_printk_init_buffers(void)
+{
+}
+static inline int trace_array_printk(struct trace_array *tr, unsigned long ip,
+ const char *fmt, ...)
+{
+ return 0;
+}
+static inline int trace_array_init_printk(struct trace_array *tr)
+{
+ return -EINVAL;
+}
+static inline void trace_array_put(struct trace_array *tr)
+{
+}
+static inline struct trace_array *trace_array_get_by_name(const char *name)
+{
+ return NULL;
+}
+static inline int trace_array_destroy(struct trace_array *tr)
+{
+ return 0;
+}
#endif /* CONFIG_TRACING */
#endif /* _LINUX_TRACE_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 6c7a10a6d71e..20749bd9db71 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -15,6 +15,7 @@ struct array_buffer;
struct tracer;
struct dentry;
struct bpf_prog;
+union bpf_attr;
const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
unsigned long flags,
@@ -55,6 +56,8 @@ struct trace_event;
int trace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *event);
+extern __printf(2, 3)
+void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -85,6 +88,11 @@ struct trace_iterator {
struct mutex mutex;
struct ring_buffer_iter **buffer_iter;
unsigned long iter_flags;
+ void *temp; /* temp holder */
+ unsigned int temp_size;
+ char *fmt; /* modified format holder */
+ unsigned int fmt_size;
+ long wait_index;
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
@@ -146,17 +154,76 @@ enum print_line_t {
enum print_line_t trace_handle_return(struct trace_seq *s);
-void tracing_generic_entry_update(struct trace_entry *entry,
- unsigned short type,
- unsigned long flags,
- int pc);
+static inline void tracing_generic_entry_update(struct trace_entry *entry,
+ unsigned short type,
+ unsigned int trace_ctx)
+{
+ entry->preempt_count = trace_ctx & 0xff;
+ entry->pid = current->pid;
+ entry->type = type;
+ entry->flags = trace_ctx >> 16;
+}
+
+unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
+
+enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+ TRACE_FLAG_NMI = 0x40,
+ TRACE_FLAG_BH_OFF = 0x80,
+};
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
+{
+ unsigned int irq_status = irqs_disabled_flags(irqflags) ?
+ TRACE_FLAG_IRQS_OFF : 0;
+ return tracing_gen_ctx_irq_test(irq_status);
+}
+static inline unsigned int tracing_gen_ctx(void)
+{
+ unsigned long irqflags;
+
+ local_save_flags(irqflags);
+ return tracing_gen_ctx_flags(irqflags);
+}
+#else
+
+static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
+{
+ return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
+}
+static inline unsigned int tracing_gen_ctx(void)
+{
+ return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
+}
+#endif
+
+static inline unsigned int tracing_gen_ctx_dec(void)
+{
+ unsigned int trace_ctx;
+
+ trace_ctx = tracing_gen_ctx();
+ /*
+ * Subtract one from the preemption counter if preemption is enabled,
+ * see trace_event_buffer_reserve()for details.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ trace_ctx--;
+ return trace_ctx;
+}
+
struct trace_event_file;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
struct trace_event_file *trace_file,
int type, unsigned long len,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
#define TRACE_RECORD_CMDLINE BIT(0)
#define TRACE_RECORD_TGID BIT(1)
@@ -230,8 +297,7 @@ struct trace_event_buffer {
struct ring_buffer_event *event;
struct trace_event_file *trace_file;
void *entry;
- unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
struct pt_regs *regs;
};
@@ -247,8 +313,11 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
+ TRACE_EVENT_FL_DYNAMIC_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
TRACE_EVENT_FL_UPROBE_BIT,
+ TRACE_EVENT_FL_EPROBE_BIT,
+ TRACE_EVENT_FL_CUSTOM_BIT,
};
/*
@@ -258,8 +327,13 @@ enum {
* NO_SET_FILTER - Set when filter has error and is to be ignored
* IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
* TRACEPOINT - Event is a tracepoint
+ * DYNAMIC - Event is a dynamic event (created at run time)
* KPROBE - Event is a kprobe
* UPROBE - Event is a uprobe
+ * EPROBE - Event is an event probe
+ * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
+ * This is set when the custom event has not been attached
+ * to a tracepoint yet, then it is cleared when it is.
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -267,8 +341,11 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
+ TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
+ TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
+ TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
};
#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
@@ -284,17 +361,17 @@ struct trace_event_call {
struct trace_event event;
char *print_fmt;
struct event_filter *filter;
- void *mod;
- void *data;
/*
- * bit 0: filter_active
- * bit 1: allow trace by non root (cap any)
- * bit 2: failed to apply filter
- * bit 3: trace internal event (do not enable)
- * bit 4: Event was enabled by module
- * bit 5: use call filter rather than file filter
- * bit 6: Event is a tracepoint
+ * Static events can disappear with modules,
+ * where as dynamic ones need their own ref count.
*/
+ union {
+ void *module;
+ atomic_t refcnt;
+ };
+ void *data;
+
+ /* See the TRACE_EVENT_FL_* flags above */
int flags; /* static flags of different events */
#ifdef CONFIG_PERF_EVENTS
@@ -307,6 +384,42 @@ struct trace_event_call {
#endif
};
+#ifdef CONFIG_DYNAMIC_EVENTS
+bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
+void trace_event_dyn_put_ref(struct trace_event_call *call);
+bool trace_event_dyn_busy(struct trace_event_call *call);
+#else
+static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
+{
+ /* Without DYNAMIC_EVENTS configured, nothing should be calling this */
+ return false;
+}
+static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
+{
+}
+static inline bool trace_event_dyn_busy(struct trace_event_call *call)
+{
+ /* Nothing should call this without DYNAIMIC_EVENTS configured. */
+ return true;
+}
+#endif
+
+static inline bool trace_event_try_get_ref(struct trace_event_call *call)
+{
+ if (call->flags & TRACE_EVENT_FL_DYNAMIC)
+ return trace_event_dyn_try_get_ref(call);
+ else
+ return try_module_get(call->module);
+}
+
+static inline void trace_event_put_ref(struct trace_event_call *call)
+{
+ if (call->flags & TRACE_EVENT_FL_DYNAMIC)
+ trace_event_dyn_put_ref(call);
+ else
+ module_put(call->module);
+}
+
#ifdef CONFIG_PERF_EVENTS
static inline bool bpf_prog_array_valid(struct trace_event_call *call)
{
@@ -334,7 +447,9 @@ static inline bool bpf_prog_array_valid(struct trace_event_call *call)
static inline const char *
trace_event_name(struct trace_event_call *call)
{
- if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
+ if (call->flags & TRACE_EVENT_FL_CUSTOM)
+ return call->name;
+ else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
return call->tp ? call->tp->name : NULL;
else
return call->name;
@@ -348,7 +463,6 @@ trace_get_fields(struct trace_event_call *event_call)
return event_call->class->get_fields(event_call);
}
-struct trace_array;
struct trace_subsystem_dir;
enum {
@@ -567,9 +681,9 @@ struct trace_event_file {
} \
early_initcall(trace_init_perf_perm_##name);
-#define PERF_MAX_TRACE_SIZE 2048
+#define PERF_MAX_TRACE_SIZE 8192
-#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
+#define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
enum event_trigger_type {
ETT_NONE = (0),
@@ -579,12 +693,14 @@ enum event_trigger_type {
ETT_EVENT_ENABLE = (1 << 3),
ETT_EVENT_HIST = (1 << 4),
ETT_HIST_ENABLE = (1 << 5),
+ ETT_EVENT_EPROBE = (1 << 6),
};
extern int filter_match_preds(struct event_filter *filter, void *rec);
extern enum event_trigger_type
-event_triggers_call(struct trace_event_file *file, void *rec,
+event_triggers_call(struct trace_event_file *file,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event);
extern void
event_triggers_post_call(struct trace_event_file *file,
@@ -592,6 +708,8 @@ event_triggers_post_call(struct trace_event_file *file,
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
+bool __trace_trigger_soft_disabled(struct trace_event_file *file);
+
/**
* trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test
@@ -601,25 +719,25 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
* triggers that require testing the fields, it will return true,
* otherwise false.
*/
-static inline bool
+static __always_inline bool
trace_trigger_soft_disabled(struct trace_event_file *file)
{
unsigned long eflags = file->flags;
- if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
- if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
- event_triggers_call(file, NULL, NULL);
- if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
- return true;
- if (eflags & EVENT_FILE_FL_PID_FILTER)
- return trace_event_ignore_this_pid(file);
- }
- return false;
+ if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
+ EVENT_FILE_FL_SOFT_DISABLED |
+ EVENT_FILE_FL_PID_FILTER))))
+ return false;
+
+ if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
+ return false;
+
+ return __trace_trigger_soft_disabled(file);
}
#ifdef CONFIG_BPF_EVENTS
unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
-int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
+int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
@@ -629,6 +747,7 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
u64 *probe_offset, u64 *probe_addr);
+int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
#else
static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
@@ -636,7 +755,7 @@ static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *c
}
static inline int
-perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
+perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
{
return -EOPNOTSUPP;
}
@@ -670,12 +789,18 @@ static inline int bpf_get_perf_event_info(const struct perf_event *event,
{
return -EOPNOTSUPP;
}
+static inline int
+bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
#endif
enum {
FILTER_OTHER = 0,
FILTER_STATIC_STRING,
FILTER_DYN_STRING,
+ FILTER_RDYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
FILTER_COMM,
@@ -690,8 +815,6 @@ extern int trace_add_event_call(struct trace_event_call *call);
extern int trace_remove_event_call(struct trace_event_call *call);
extern int trace_event_get_offsets(struct trace_event_call *call);
-#define is_signed_type(type) (((type)(-1)) < (type)1)
-
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
int trace_array_set_clr_event(struct trace_array *tr, const char *system,
@@ -707,7 +830,7 @@ do { \
tracing_record_cmdline(current); \
if (__builtin_constant_p(fmt)) { \
static const char *trace_printk_fmt \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__trace_bprintk(ip, trace_printk_fmt, ##args); \
@@ -747,6 +870,9 @@ extern void ftrace_profile_free_filter(struct perf_event *event);
void perf_trace_buf_update(void *record, u16 type);
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
+int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
+void perf_event_free_bpf_prog(struct perf_event *event);
+
void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
@@ -789,4 +915,37 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
#endif
+#define TRACE_EVENT_STR_MAX 512
+
+/*
+ * gcc warns that you can not use a va_list in an inlined
+ * function. But lets me make it into a macro :-/
+ */
+#define __trace_event_vstr_len(fmt, va) \
+({ \
+ va_list __ap; \
+ int __ret; \
+ \
+ va_copy(__ap, *(va)); \
+ __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \
+ va_end(__ap); \
+ \
+ min(__ret, TRACE_EVENT_STR_MAX); \
+})
+
#endif /* _LINUX_TRACE_EVENT_H */
+
+/*
+ * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
+ * This is due to the way trace custom events work. If a file includes two
+ * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
+ * will override the TRACE_CUSTOM_EVENT and break the second include.
+ */
+
+#ifndef TRACE_CUSTOM_EVENT
+
+#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args)
+#define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
+
+#endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
new file mode 100644
index 000000000000..c303f7a114e9
--- /dev/null
+++ b/include/linux/trace_recursion.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TRACE_RECURSION_H
+#define _LINUX_TRACE_RECURSION_H
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_TRACING
+
+/* Only current can touch trace_recursion */
+
+/*
+ * For function tracing recursion:
+ * The order of these bits are important.
+ *
+ * When function tracing occurs, the following steps are made:
+ * If arch does not support a ftrace feature:
+ * call internal function (uses INTERNAL bits) which calls...
+ * The function callback, which can use the FTRACE bits to
+ * check for recursion.
+ */
+enum {
+ /* Function recursion bits */
+ TRACE_FTRACE_BIT,
+ TRACE_FTRACE_NMI_BIT,
+ TRACE_FTRACE_IRQ_BIT,
+ TRACE_FTRACE_SIRQ_BIT,
+ TRACE_FTRACE_TRANSITION_BIT,
+
+ /* Internal use recursion bits */
+ TRACE_INTERNAL_BIT,
+ TRACE_INTERNAL_NMI_BIT,
+ TRACE_INTERNAL_IRQ_BIT,
+ TRACE_INTERNAL_SIRQ_BIT,
+ TRACE_INTERNAL_TRANSITION_BIT,
+
+ TRACE_BRANCH_BIT,
+/*
+ * Abuse of the trace_recursion.
+ * As we need a way to maintain state if we are tracing the function
+ * graph in irq because we want to trace a particular function that
+ * was called in irq context but we have irq tracing off. Since this
+ * can only be modified by current, we can reuse trace_recursion.
+ */
+ TRACE_IRQ_BIT,
+
+ /* Set if the function is in the set_graph_function file */
+ TRACE_GRAPH_BIT,
+
+ /*
+ * In the very unlikely case that an interrupt came in
+ * at a start of graph tracing, and we want to trace
+ * the function in that interrupt, the depth can be greater
+ * than zero, because of the preempted start of a previous
+ * trace. In an even more unlikely case, depth could be 2
+ * if a softirq interrupted the start of graph tracing,
+ * followed by an interrupt preempting a start of graph
+ * tracing in the softirq, and depth can even be 3
+ * if an NMI came in at the start of an interrupt function
+ * that preempted a softirq start of a function that
+ * preempted normal context!!!! Luckily, it can't be
+ * greater than 3, so the next two bits are a mask
+ * of what the depth is when we set TRACE_GRAPH_BIT
+ */
+
+ TRACE_GRAPH_DEPTH_START_BIT,
+ TRACE_GRAPH_DEPTH_END_BIT,
+
+ /*
+ * To implement set_graph_notrace, if this bit is set, we ignore
+ * function graph tracing of called functions, until the return
+ * function is called to clear it.
+ */
+ TRACE_GRAPH_NOTRACE_BIT,
+
+ /* Used to prevent recursion recording from recursing. */
+ TRACE_RECORD_RECURSION_BIT,
+};
+
+#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
+#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
+#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
+
+#define trace_recursion_depth() \
+ (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
+#define trace_recursion_set_depth(depth) \
+ do { \
+ current->trace_recursion &= \
+ ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
+ current->trace_recursion |= \
+ ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
+ } while (0)
+
+#define TRACE_CONTEXT_BITS 4
+
+#define TRACE_FTRACE_START TRACE_FTRACE_BIT
+
+#define TRACE_LIST_START TRACE_INTERNAL_BIT
+
+#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
+
+/*
+ * Used for setting context
+ * NMI = 0
+ * IRQ = 1
+ * SOFTIRQ = 2
+ * NORMAL = 3
+ */
+enum {
+ TRACE_CTX_NMI,
+ TRACE_CTX_IRQ,
+ TRACE_CTX_SOFTIRQ,
+ TRACE_CTX_NORMAL,
+ TRACE_CTX_TRANSITION,
+};
+
+static __always_inline int trace_get_context_bit(void)
+{
+ unsigned char bit = interrupt_context_level();
+
+ return TRACE_CTX_NORMAL - bit;
+}
+
+#ifdef CONFIG_FTRACE_RECORD_RECURSION
+extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
+# define do_ftrace_record_recursion(ip, pip) \
+ do { \
+ if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
+ trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
+ ftrace_record_recursion(ip, pip); \
+ trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
+ } \
+ } while (0)
+#else
+# define do_ftrace_record_recursion(ip, pip) do { } while (0)
+#endif
+
+/*
+ * Preemption is promised to be disabled when return bit >= 0.
+ */
+static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
+ int start)
+{
+ unsigned int val = READ_ONCE(current->trace_recursion);
+ int bit;
+
+ bit = trace_get_context_bit() + start;
+ if (unlikely(val & (1 << bit))) {
+ /*
+ * If an interrupt occurs during a trace, and another trace
+ * happens in that interrupt but before the preempt_count is
+ * updated to reflect the new interrupt context, then this
+ * will think a recursion occurred, and the event will be dropped.
+ * Let a single instance happen via the TRANSITION_BIT to
+ * not drop those events.
+ */
+ bit = TRACE_CTX_TRANSITION + start;
+ if (val & (1 << bit)) {
+ do_ftrace_record_recursion(ip, pip);
+ return -1;
+ }
+ }
+
+ val |= 1 << bit;
+ current->trace_recursion = val;
+ barrier();
+
+ preempt_disable_notrace();
+
+ return bit;
+}
+
+/*
+ * Preemption will be enabled (if it was previously enabled).
+ */
+static __always_inline void trace_clear_recursion(int bit)
+{
+ preempt_enable_notrace();
+ barrier();
+ trace_recursion_clear(bit);
+}
+
+/**
+ * ftrace_test_recursion_trylock - tests for recursion in same context
+ *
+ * Use this for ftrace callbacks. This will detect if the function
+ * tracing recursed in the same context (normal vs interrupt),
+ *
+ * Returns: -1 if a recursion happened.
+ * >= 0 if no recursion.
+ */
+static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
+ unsigned long parent_ip)
+{
+ return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
+}
+
+/**
+ * ftrace_test_recursion_unlock - called when function callback is complete
+ * @bit: The return of a successful ftrace_test_recursion_trylock()
+ *
+ * This is used at the end of a ftrace callback.
+ */
+static __always_inline void ftrace_test_recursion_unlock(int bit)
+{
+ trace_clear_recursion(bit);
+}
+
+#endif /* CONFIG_TRACING */
+#endif /* _LINUX_TRACE_RECURSION_H */
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 6c30508fca19..5a2c650d9e1c 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -12,7 +12,7 @@
*/
struct trace_seq {
- unsigned char buffer[PAGE_SIZE];
+ char buffer[PAGE_SIZE];
struct seq_buf seq;
int full;
};
@@ -51,7 +51,7 @@ static inline int trace_seq_used(struct trace_seq *s)
* that is about to be written to and then return the result
* of that write.
*/
-static inline unsigned char *
+static inline char *
trace_seq_buffer_ptr(struct trace_seq *s)
{
return s->buffer + seq_buf_used(&s->seq);
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
deleted file mode 100644
index 36fb3bbed6b2..000000000000
--- a/include/linux/tracehook.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Tracing hooks
- *
- * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
- *
- * This file defines hook entry points called by core code where
- * user tracing/debugging support might need to do something. These
- * entry points are called tracehook_*(). Each hook declared below
- * has a detailed kerneldoc comment giving the context (locking et
- * al) from which it is called, and the meaning of its return value.
- *
- * Each function here typically has only one call site, so it is ok
- * to have some nontrivial tracehook_*() inlines. In all cases, the
- * fast path when no tracing is enabled should be very short.
- *
- * The purpose of this file and the tracehook_* layer is to consolidate
- * the interface that the kernel core and arch code uses to enable any
- * user debugging or tracing facility (such as ptrace). The interfaces
- * here are carefully documented so that maintainers of core and arch
- * code do not need to think about the implementation details of the
- * tracing facilities. Likewise, maintainers of the tracing code do not
- * need to understand all the calling core or arch code in detail, just
- * documented circumstances of each call, such as locking conditions.
- *
- * If the calling core code changes so that locking is different, then
- * it is ok to change the interface documented here. The maintainer of
- * core code changing should notify the maintainers of the tracing code
- * that they need to work out the change.
- *
- * Some tracehook_*() inlines take arguments that the current tracing
- * implementations might not necessarily use. These function signatures
- * are chosen to pass in all the information that is on hand in the
- * caller and might conceivably be relevant to a tracer, so that the
- * core code won't have to be updated when tracing adds more features.
- * If a call site changes so that some of those parameters are no longer
- * already on hand without extra work, then the tracehook_* interface
- * can change so there is no make-work burden on the core code. The
- * maintainer of core code changing should notify the maintainers of the
- * tracing code that they need to work out the change.
- */
-
-#ifndef _LINUX_TRACEHOOK_H
-#define _LINUX_TRACEHOOK_H 1
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/security.h>
-#include <linux/task_work.h>
-#include <linux/memcontrol.h>
-#include <linux/blk-cgroup.h>
-struct linux_binprm;
-
-/*
- * ptrace report for syscall entry and exit looks identical.
- */
-static inline int ptrace_report_syscall(struct pt_regs *regs,
- unsigned long message)
-{
- int ptrace = current->ptrace;
-
- if (!(ptrace & PT_PTRACED))
- return 0;
-
- current->ptrace_message = message;
- ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
-
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
-
- current->ptrace_message = 0;
- return fatal_signal_pending(current);
-}
-
-/**
- * tracehook_report_syscall_entry - task is about to attempt a system call
- * @regs: user register state of current task
- *
- * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
- * when the current task has just entered the kernel for a system call.
- * Full user register state is available here. Changing the values
- * in @regs can affect the system call number and arguments to be tried.
- * It is safe to block here, preventing the system call from beginning.
- *
- * Returns zero normally, or nonzero if the calling arch code should abort
- * the system call. That must prevent normal entry so no system call is
- * made. If @task ever returns to user mode after this, its register state
- * is unspecified, but should be something harmless like an %ENOSYS error
- * return. It should preserve enough information so that syscall_rollback()
- * can work (see asm-generic/syscall.h).
- *
- * Called without locks, just after entering kernel mode.
- */
-static inline __must_check int tracehook_report_syscall_entry(
- struct pt_regs *regs)
-{
- return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY);
-}
-
-/**
- * tracehook_report_syscall_exit - task has just finished a system call
- * @regs: user register state of current task
- * @step: nonzero if simulating single-step or block-step
- *
- * This will be called if %TIF_SYSCALL_TRACE has been set, when the
- * current task has just finished an attempted system call. Full
- * user register state is available here. It is safe to block here,
- * preventing signals from being processed.
- *
- * If @step is nonzero, this report is also in lieu of the normal
- * trap that would follow the system call instruction because
- * user_enable_block_step() or user_enable_single_step() was used.
- * In this case, %TIF_SYSCALL_TRACE might not be set.
- *
- * Called without locks, just before checking for pending signals.
- */
-static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
-{
- if (step)
- user_single_step_report(regs);
- else
- ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT);
-}
-
-/**
- * tracehook_signal_handler - signal handler setup is complete
- * @stepping: nonzero if debugger single-step or block-step in use
- *
- * Called by the arch code after a signal handler has been set up.
- * Register and stack state reflects the user handler about to run.
- * Signal mask changes have already been made.
- *
- * Called without locks, shortly before returning to user mode
- * (or handling more signals).
- */
-static inline void tracehook_signal_handler(int stepping)
-{
- if (stepping)
- ptrace_notify(SIGTRAP);
-}
-
-/**
- * set_notify_resume - cause tracehook_notify_resume() to be called
- * @task: task that will call tracehook_notify_resume()
- *
- * Calling this arranges that @task will call tracehook_notify_resume()
- * before returning to user mode. If it's already running in user mode,
- * it will enter the kernel and call tracehook_notify_resume() soon.
- * If it's blocked, it will not be woken.
- */
-static inline void set_notify_resume(struct task_struct *task)
-{
-#ifdef TIF_NOTIFY_RESUME
- if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
- kick_process(task);
-#endif
-}
-
-/**
- * tracehook_notify_resume - report when about to return to user mode
- * @regs: user-mode registers of @current task
- *
- * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
- * about to return to user mode, and the user state in @regs can be
- * inspected or adjusted. The caller in arch code has cleared
- * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
- * asynchronously, this will be called again before we return to
- * user mode.
- *
- * Called without locks.
- */
-static inline void tracehook_notify_resume(struct pt_regs *regs)
-{
- /*
- * The caller just cleared TIF_NOTIFY_RESUME. This barrier
- * pairs with task_work_add()->set_notify_resume() after
- * hlist_add_head(task->task_works);
- */
- smp_mb__after_atomic();
- if (unlikely(current->task_works))
- task_work_run();
-
-#ifdef CONFIG_KEYS_REQUEST_CACHE
- if (unlikely(current->cached_requested_key)) {
- key_put(current->cached_requested_key);
- current->cached_requested_key = NULL;
- }
-#endif
-
- mem_cgroup_handle_over_high();
- blkcg_maybe_throttle_current();
-}
-
-#endif /* <linux/tracehook.h> */
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index b29950a19205..e7c2276be33e 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -11,6 +11,8 @@
#include <linux/atomic.h>
#include <linux/static_key.h>
+struct static_call_key;
+
struct trace_print_flags {
unsigned long mask;
const char *name;
@@ -30,6 +32,9 @@ struct tracepoint_func {
struct tracepoint {
const char *name; /* Tracepoint name */
struct static_key key;
+ struct static_call_key *static_call_key;
+ void *static_call_tramp;
+ void *iterator;
int (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs;
@@ -48,4 +53,38 @@ struct bpf_raw_event_map {
u32 writable_size;
} __aligned(32);
+/*
+ * If a tracepoint needs to be called from a header file, it is not
+ * recommended to call it directly, as tracepoints in header files
+ * may cause side-effects and bloat the kernel. Instead, use
+ * tracepoint_enabled() to test if the tracepoint is enabled, then if
+ * it is, call a wrapper function defined in a C file that will then
+ * call the tracepoint.
+ *
+ * For "trace_foo_bar()", you would need to create a wrapper function
+ * in a C file to call trace_foo_bar():
+ * void do_trace_foo_bar(args) { trace_foo_bar(args); }
+ * Then in the header file, declare the tracepoint:
+ * DECLARE_TRACEPOINT(foo_bar);
+ * And call your wrapper:
+ * static inline void some_inlined_function() {
+ * [..]
+ * if (tracepoint_enabled(foo_bar))
+ * do_trace_foo_bar(args);
+ * [..]
+ * }
+ *
+ * Note: tracepoint_enabled(foo_bar) is equivalent to trace_foo_bar_enabled()
+ * but is safe to have in headers, where trace_foo_bar_enabled() is not.
+ */
+#define DECLARE_TRACEPOINT(tp) \
+ extern struct tracepoint __tracepoint_##tp
+
+#ifdef CONFIG_TRACEPOINTS
+# define tracepoint_enabled(tp) \
+ static_key_false(&(__tracepoint_##tp).key)
+#else
+# define tracepoint_enabled(tracepoint) false
+#endif
+
#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 1fb11daa5c53..4b33b95eb8be 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -19,6 +19,7 @@
#include <linux/cpumask.h>
#include <linux/rcupdate.h>
#include <linux/tracepoint-defs.h>
+#include <linux/static_call.h>
struct module;
struct tracepoint;
@@ -40,7 +41,17 @@ extern int
tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
int prio);
extern int
+tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data,
+ int prio);
+extern int
tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
+static inline int
+tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe,
+ void *data)
+{
+ return tracepoint_probe_register_prio_may_exist(tp, probe, data,
+ TRACEPOINT_DEFAULT_PRIO);
+}
extern void
for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
void *priv);
@@ -92,7 +103,9 @@ extern int syscall_regfunc(void);
extern void syscall_unregfunc(void);
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
+#ifndef PARAMS
#define PARAMS(args...) args
+#endif
#define TRACE_DEFINE_ENUM(x)
#define TRACE_DEFINE_SIZEOF(x)
@@ -116,8 +129,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __TRACEPOINT_ENTRY(name) \
static tracepoint_ptr_t __tracepoint_ptr_##name __used \
- __attribute__((section("__tracepoints_ptrs"))) = \
- &__tracepoint_##name
+ __section("__tracepoints_ptrs") = &__tracepoint_##name
#endif
#endif /* _LINUX_TRACEPOINT_H */
@@ -139,7 +151,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
/*
* Individual subsystem my have a separate configuration to
* enable their tracepoints. By default, this file will create
- * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem
+ * the tracepoints if CONFIG_TRACEPOINTS is defined. If a subsystem
* wants to be able to disable its tracepoints from being created
* it can define NOTRACE before including the tracepoint headers.
*/
@@ -149,21 +161,28 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#ifdef TRACEPOINTS_ENABLED
+#ifdef CONFIG_HAVE_STATIC_CALL
+#define __DO_TRACE_CALL(name, args) \
+ do { \
+ struct tracepoint_func *it_func_ptr; \
+ void *__data; \
+ it_func_ptr = \
+ rcu_dereference_raw((&__tracepoint_##name)->funcs); \
+ if (it_func_ptr) { \
+ __data = (it_func_ptr)->data; \
+ static_call(tp_func_##name)(__data, args); \
+ } \
+ } while (0)
+#else
+#define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args)
+#endif /* CONFIG_HAVE_STATIC_CALL */
+
/*
* it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL.
- *
- * Note, the proto and args passed in includes "__data" as the first parameter.
- * The reason for this is to handle the "void" prototype. If a tracepoint
- * has a "void" prototype, then it is invalid to declare a function
- * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
- * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
-#define __DO_TRACE(tp, proto, args, cond, rcuidle) \
+#define __DO_TRACE(name, args, cond, rcuidle) \
do { \
- struct tracepoint_func *it_func_ptr; \
- void *it_func; \
- void *__data; \
int __maybe_unused __idx = 0; \
\
if (!(cond)) \
@@ -181,21 +200,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
*/ \
if (rcuidle) { \
__idx = srcu_read_lock_notrace(&tracepoint_srcu);\
- rcu_irq_enter_irqson(); \
+ ct_irq_enter_irqson(); \
} \
\
- it_func_ptr = rcu_dereference_raw((tp)->funcs); \
- \
- if (it_func_ptr) { \
- do { \
- it_func = (it_func_ptr)->func; \
- __data = (it_func_ptr)->data; \
- ((void(*)(proto))(it_func))(args); \
- } while ((++it_func_ptr)->func); \
- } \
+ __DO_TRACE_CALL(name, TP_ARGS(args)); \
\
if (rcuidle) { \
- rcu_irq_exit_irqson(); \
+ ct_irq_exit_irqson(); \
srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
} \
\
@@ -203,17 +214,16 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} while (0)
#ifndef MODULE
-#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE_RCU(name, proto, args, cond) \
static inline void trace_##name##_rcuidle(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
- __DO_TRACE(&__tracepoint_##name, \
- TP_PROTO(data_proto), \
- TP_ARGS(data_args), \
+ __DO_TRACE(name, \
+ TP_ARGS(args), \
TP_CONDITION(cond), 1); \
}
#else
-#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
+#define __DECLARE_TRACE_RCU(name, proto, args, cond)
#endif
/*
@@ -228,14 +238,15 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* even when this tracepoint is off. This code has no purpose other than
* poking RCU a bit.
*/
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
+ extern int __traceiter_##name(data_proto); \
+ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
- __DO_TRACE(&__tracepoint_##name, \
- TP_PROTO(data_proto), \
- TP_ARGS(data_args), \
+ __DO_TRACE(name, \
+ TP_ARGS(args), \
TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
rcu_read_lock_sched_notrace(); \
@@ -244,7 +255,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} \
} \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
- PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
+ PARAMS(cond)) \
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
{ \
@@ -279,24 +290,55 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* structures, so we create an array of pointers that will be used for iteration
* on the tracepoints.
*/
-#define DEFINE_TRACE_FN(name, reg, unreg) \
- static const char __tpstrtab_##name[] \
- __attribute__((section("__tracepoints_strings"))) = #name; \
- struct tracepoint __tracepoint_##name \
- __attribute__((section("__tracepoints"), used)) = \
- { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
- __TRACEPOINT_ENTRY(name);
+#define DEFINE_TRACE_FN(_name, _reg, _unreg, proto, args) \
+ static const char __tpstrtab_##_name[] \
+ __section("__tracepoints_strings") = #_name; \
+ extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \
+ int __traceiter_##_name(void *__data, proto); \
+ struct tracepoint __tracepoint_##_name __used \
+ __section("__tracepoints") = { \
+ .name = __tpstrtab_##_name, \
+ .key = STATIC_KEY_INIT_FALSE, \
+ .static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \
+ .static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
+ .iterator = &__traceiter_##_name, \
+ .regfunc = _reg, \
+ .unregfunc = _unreg, \
+ .funcs = NULL }; \
+ __TRACEPOINT_ENTRY(_name); \
+ int __traceiter_##_name(void *__data, proto) \
+ { \
+ struct tracepoint_func *it_func_ptr; \
+ void *it_func; \
+ \
+ it_func_ptr = \
+ rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
+ if (it_func_ptr) { \
+ do { \
+ it_func = READ_ONCE((it_func_ptr)->func); \
+ __data = (it_func_ptr)->data; \
+ ((void(*)(void *, proto))(it_func))(__data, args); \
+ } while ((++it_func_ptr)->func); \
+ } \
+ return 0; \
+ } \
+ DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
-#define DEFINE_TRACE(name) \
- DEFINE_TRACE_FN(name, NULL, NULL);
+#define DEFINE_TRACE(name, proto, args) \
+ DEFINE_TRACE_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args));
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
- EXPORT_SYMBOL_GPL(__tracepoint_##name)
+ EXPORT_SYMBOL_GPL(__tracepoint_##name); \
+ EXPORT_SYMBOL_GPL(__traceiter_##name); \
+ EXPORT_STATIC_CALL_GPL(tp_func_##name)
#define EXPORT_TRACEPOINT_SYMBOL(name) \
- EXPORT_SYMBOL(__tracepoint_##name)
+ EXPORT_SYMBOL(__tracepoint_##name); \
+ EXPORT_SYMBOL(__traceiter_##name); \
+ EXPORT_STATIC_CALL(tp_func_##name)
+
#else /* !TRACEPOINTS_ENABLED */
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
static inline void trace_##name(proto) \
{ } \
static inline void trace_##name##_rcuidle(proto) \
@@ -322,8 +364,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
return false; \
}
-#define DEFINE_TRACE_FN(name, reg, unreg)
-#define DEFINE_TRACE(name)
+#define DEFINE_TRACE_FN(name, reg, unreg, proto, args)
+#define DEFINE_TRACE(name, proto, args)
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
@@ -362,7 +404,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static const char *___tp_str __tracepoint_string = str; \
___tp_str; \
})
-#define __tracepoint_string __attribute__((section("__tracepoint_str")))
+#define __tracepoint_string __used __section("__tracepoint_str")
#else
/*
* tracepoint_string() is used to save the string address for userspace
@@ -373,36 +415,15 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
# define __tracepoint_string
#endif
-/*
- * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype
- * (void). "void" is a special value in a function prototype and can
- * not be combined with other arguments. Since the DECLARE_TRACE()
- * macro adds a data element at the beginning of the prototype,
- * we need a way to differentiate "(void *data, proto)" from
- * "(void *data, void)". The second prototype is invalid.
- *
- * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype
- * and "void *__data" as the callback prototype.
- *
- * DECLARE_TRACE() passes "proto" as the tracepoint protoype and
- * "void *__data, proto" as the callback prototype.
- */
-#define DECLARE_TRACE_NOARGS(name) \
- __DECLARE_TRACE(name, void, , \
- cpu_online(raw_smp_processor_id()), \
- void *__data, __data)
-
#define DECLARE_TRACE(name, proto, args) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()), \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ PARAMS(void *__data, proto))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ PARAMS(void *__data, proto))
#define TRACE_EVENT_FLAGS(event, flag)
@@ -454,7 +475,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* *
* * The declared 'local variable' is called '__entry'
* *
- * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
+ * * __field(pid_t, prev_pid) is equivalent to a standard declaration:
* *
* * pid_t prev_pid;
* *
diff --git a/include/linux/tty.h b/include/linux/tty.h
index bd5fe0e907e8..730c3301d710 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -6,41 +6,18 @@
#include <linux/major.h>
#include <linux/termios.h>
#include <linux/workqueue.h>
+#include <linux/tty_buffer.h>
#include <linux/tty_driver.h>
#include <linux/tty_ldisc.h>
+#include <linux/tty_port.h>
#include <linux/mutex.h>
#include <linux/tty_flags.h>
-#include <linux/seq_file.h>
#include <uapi/linux/tty.h>
#include <linux/rwsem.h>
#include <linux/llist.h>
/*
- * Lock subclasses for tty locks
- *
- * TTY_LOCK_NORMAL is for normal ttys and master ptys.
- * TTY_LOCK_SLAVE is for slave ptys only.
- *
- * Lock subclasses are necessary for handling nested locking with pty pairs.
- * tty locks which use nested locking:
- *
- * legacy_mutex - Nested tty locks are necessary for releasing pty pairs.
- * The stable lock order is master pty first, then slave pty.
- * termios_rwsem - The stable lock order is tty_buffer lock->termios_rwsem.
- * Subclassing this lock enables the slave pty to hold its
- * termios_rwsem when claiming the master tty_buffer lock.
- * tty_buffer lock - slave ptys can claim nested buffer lock when handling
- * signal chars. The stable lock order is slave pty, then
- * master.
- */
-
-enum {
- TTY_LOCK_NORMAL = 0,
- TTY_LOCK_SLAVE,
-};
-
-/*
* (Note: the *_driver.minor_start values 1, 64, 128, 192 are
* hardcoded at present.)
*/
@@ -55,54 +32,6 @@ enum {
*/
#define __DISABLED_CHAR '\0'
-struct tty_buffer {
- union {
- struct tty_buffer *next;
- struct llist_node free;
- };
- int used;
- int size;
- int commit;
- int read;
- int flags;
- /* Data points here */
- unsigned long data[0];
-};
-
-/* Values for .flags field of tty_buffer */
-#define TTYB_NORMAL 1 /* buffer has no flags buffer */
-
-static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
-{
- return ((unsigned char *)b->data) + ofs;
-}
-
-static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs)
-{
- return (char *)char_buf_ptr(b, ofs) + b->size;
-}
-
-struct tty_bufhead {
- struct tty_buffer *head; /* Queue head */
- struct work_struct work;
- struct mutex lock;
- atomic_t priority;
- struct tty_buffer sentinel;
- struct llist_head free; /* Free queue head */
- atomic_t mem_used; /* In-use buffers excluding free list */
- int mem_limit;
- struct tty_buffer *tail; /* Active buffer */
-};
-/*
- * When a break, frame error, or parity error happens, these codes are
- * stuffed into the flags buffer.
- */
-#define TTY_NORMAL 0
-#define TTY_BREAK 1
-#define TTY_FRAME 2
-#define TTY_PARITY 3
-#define TTY_OVERRUN 4
-
#define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR])
#define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT])
#define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE])
@@ -188,109 +117,86 @@ struct tty_bufhead {
struct device;
struct signal_struct;
+struct tty_operations;
-/*
- * Port level information. Each device keeps its own port level information
- * so provide a common structure for those ports wanting to use common support
- * routines.
+/**
+ * struct tty_struct - state associated with a tty while open
*
- * The tty port has a different lifetime to the tty so must be kept apart.
- * In addition be careful as tty -> port mappings are valid for the life
- * of the tty object but in many cases port -> tty mappings are valid only
- * until a hangup so don't use the wrong path.
- */
-
-struct tty_port;
-
-struct tty_port_operations {
- /* Return 1 if the carrier is raised */
- int (*carrier_raised)(struct tty_port *port);
- /* Control the DTR line */
- void (*dtr_rts)(struct tty_port *port, int raise);
- /* Called when the last close completes or a hangup finishes
- IFF the port was initialized. Do not use to free resources. Called
- under the port mutex to serialize against activate/shutdowns */
- void (*shutdown)(struct tty_port *port);
- /* Called under the port mutex from tty_port_open, serialized using
- the port mutex */
- /* FIXME: long term getting the tty argument *out* of this would be
- good for consoles */
- int (*activate)(struct tty_port *port, struct tty_struct *tty);
- /* Called on the final put of a port */
- void (*destruct)(struct tty_port *port);
-};
-
-struct tty_port_client_operations {
- int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t);
- void (*write_wakeup)(struct tty_port *port);
-};
-
-extern const struct tty_port_client_operations tty_port_default_client_ops;
-
-struct tty_port {
- struct tty_bufhead buf; /* Locked internally */
- struct tty_struct *tty; /* Back pointer */
- struct tty_struct *itty; /* internal back ptr */
- const struct tty_port_operations *ops; /* Port operations */
- const struct tty_port_client_operations *client_ops; /* Port client operations */
- spinlock_t lock; /* Lock protecting tty field */
- int blocked_open; /* Waiting to open */
- int count; /* Usage count */
- wait_queue_head_t open_wait; /* Open waiters */
- wait_queue_head_t delta_msr_wait; /* Modem status change */
- unsigned long flags; /* User TTY flags ASYNC_ */
- unsigned long iflags; /* Internal flags TTY_PORT_ */
- unsigned char console:1, /* port is a console */
- low_latency:1; /* optional: tune for latency */
- struct mutex mutex; /* Locking */
- struct mutex buf_mutex; /* Buffer alloc lock */
- unsigned char *xmit_buf; /* Optional buffer */
- unsigned int close_delay; /* Close port delay */
- unsigned int closing_wait; /* Delay for output */
- int drain_delay; /* Set to zero if no pure time
- based drain is needed else
- set to size of fifo */
- struct kref kref; /* Ref counter */
- void *client_data;
-};
-
-/* tty_port::iflags bits -- use atomic bit ops */
-#define TTY_PORT_INITIALIZED 0 /* device is initialized */
-#define TTY_PORT_SUSPENDED 1 /* device is suspended */
-#define TTY_PORT_ACTIVE 2 /* device is open */
-
-/*
- * uart drivers: use the uart_port::status field and the UPSTAT_* defines
- * for s/w-based flow control steering and carrier detection status
- */
-#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */
-#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */
-#define TTY_PORT_KOPENED 5 /* device exclusively opened by
- kernel */
-
-/*
- * Where all of the state associated with a tty is kept while the tty
- * is open. Since the termios state should be kept even if the tty
- * has been closed --- for things like the baud rate, etc --- it is
- * not stored here, but rather a pointer to the real state is stored
- * here. Possible the winsize structure should have the same
- * treatment, but (1) the default 80x24 is usually right and (2) it's
- * most often used by a windowing system, which will set the correct
- * size each time the window is created or resized anyway.
- * - TYT, 9/14/92
+ * @kref: reference counting by tty_kref_get() and tty_kref_put(), reaching zero
+ * frees the structure
+ * @dev: class device or %NULL (e.g. ptys, serdev)
+ * @driver: &struct tty_driver operating this tty
+ * @ops: &struct tty_operations of @driver for this tty (open, close, etc.)
+ * @index: index of this tty (e.g. to construct @name like tty12)
+ * @ldisc_sem: protects line discipline changes (@ldisc) -- lock tty not pty
+ * @ldisc: the current line discipline for this tty (n_tty by default)
+ * @atomic_write_lock: protects against concurrent writers, i.e. locks
+ * @write_cnt, @write_buf and similar
+ * @legacy_mutex: leftover from history (BKL -> BTM -> @legacy_mutex),
+ * protecting several operations on this tty
+ * @throttle_mutex: protects against concurrent tty_throttle_safe() and
+ * tty_unthrottle_safe() (but not tty_unthrottle())
+ * @termios_rwsem: protects @termios and @termios_locked
+ * @winsize_mutex: protects @winsize
+ * @termios: termios for the current tty, copied from/to @driver.termios
+ * @termios_locked: locked termios (by %TIOCGLCKTRMIOS and %TIOCSLCKTRMIOS
+ * ioctls)
+ * @name: name of the tty constructed by tty_line_name() (e.g. ttyS3)
+ * @flags: bitwise OR of %TTY_THROTTLED, %TTY_IO_ERROR, ...
+ * @count: count of open processes, reaching zero cancels all the work for
+ * this tty and drops a @kref too (but does not free this tty)
+ * @winsize: size of the terminal "window" (cf. @winsize_mutex)
+ * @flow: flow settings grouped together, see also @flow.unused
+ * @flow.lock: lock for @flow members
+ * @flow.stopped: tty stopped/started by stop_tty()/start_tty()
+ * @flow.tco_stopped: tty stopped/started by %TCOOFF/%TCOON ioctls (it has
+ * precedence over @flow.stopped)
+ * @flow.unused: alignment for Alpha, so that no members other than @flow.* are
+ * modified by the same 64b word store. The @flow's __aligned is
+ * there for the very same reason.
+ * @ctrl: control settings grouped together, see also @ctrl.unused
+ * @ctrl.lock: lock for @ctrl members
+ * @ctrl.pgrp: process group of this tty (setpgrp(2))
+ * @ctrl.session: session of this tty (setsid(2)). Writes are protected by both
+ * @ctrl.lock and @legacy_mutex, readers must use at least one of
+ * them.
+ * @ctrl.pktstatus: packet mode status (bitwise OR of %TIOCPKT_ constants)
+ * @ctrl.packet: packet mode enabled
+ * @ctrl.unused: alignment for Alpha, see @flow.unused for explanation
+ * @hw_stopped: not controlled by the tty layer, under @driver's control for CTS
+ * handling
+ * @receive_room: bytes permitted to feed to @ldisc without any being lost
+ * @flow_change: controls behavior of throttling, see tty_throttle_safe() and
+ * tty_unthrottle_safe()
+ * @link: link to another pty (master -> slave and vice versa)
+ * @fasync: state for %O_ASYNC (for %SIGIO); managed by fasync_helper()
+ * @write_wait: concurrent writers are waiting in this queue until they are
+ * allowed to write
+ * @read_wait: readers wait for data in this queue
+ * @hangup_work: normally a work to perform a hangup (do_tty_hangup()); while
+ * freeing the tty, (re)used to release_one_tty()
+ * @disc_data: pointer to @ldisc's private data (e.g. to &struct n_tty_data)
+ * @driver_data: pointer to @driver's private data (e.g. &struct uart_state)
+ * @files_lock: protects @tty_files list
+ * @tty_files: list of (re)openers of this tty (i.e. linked &struct
+ * tty_file_private)
+ * @closing: when set during close, n_tty processes only START & STOP chars
+ * @write_buf: temporary buffer used during tty_write() to copy user data to
+ * @write_cnt: count of bytes written in tty_write() to @write_buf
+ * @SAK_work: if the tty has a pending do_SAK, it is queued here
+ * @port: persistent storage for this device (i.e. &struct tty_port)
+ *
+ * All of the state associated with a tty while the tty is open. Persistent
+ * storage for tty devices is referenced here as @port and is documented in
+ * &struct tty_port.
*/
-
-struct tty_operations;
-
struct tty_struct {
- int magic;
struct kref kref;
struct device *dev;
struct tty_driver *driver;
const struct tty_operations *ops;
int index;
- /* Protects ldisc changes: Lock tty not pty */
struct ld_semaphore ldisc_sem;
struct tty_ldisc *ldisc;
@@ -299,25 +205,30 @@ struct tty_struct {
struct mutex throttle_mutex;
struct rw_semaphore termios_rwsem;
struct mutex winsize_mutex;
- spinlock_t ctrl_lock;
- spinlock_t flow_lock;
- /* Termios values are protected by the termios rwsem */
struct ktermios termios, termios_locked;
- struct termiox *termiox; /* May be NULL for unsupported */
char name[64];
- struct pid *pgrp; /* Protected by ctrl lock */
- struct pid *session;
unsigned long flags;
int count;
- struct winsize winsize; /* winsize_mutex */
- unsigned long stopped:1, /* flow_lock */
- flow_stopped:1,
- unused:BITS_PER_LONG - 2;
+ struct winsize winsize;
+
+ struct {
+ spinlock_t lock;
+ bool stopped;
+ bool tco_stopped;
+ unsigned long unused[0];
+ } __aligned(sizeof(unsigned long)) flow;
+
+ struct {
+ spinlock_t lock;
+ struct pid *pgrp;
+ struct pid *session;
+ unsigned char pktstatus;
+ bool packet;
+ unsigned long unused[0];
+ } __aligned(sizeof(unsigned long)) ctrl;
+
int hw_stopped;
- unsigned long ctrl_status:8, /* ctrl_lock */
- packet:1,
- unused_ctrl:BITS_PER_LONG - 9;
- unsigned int receive_room; /* Bytes free for queue */
+ unsigned int receive_room;
int flow_change;
struct tty_struct *link;
@@ -327,7 +238,7 @@ struct tty_struct {
struct work_struct hangup_work;
void *disc_data;
void *driver_data;
- spinlock_t files_lock; /* protects tty_files list */
+ spinlock_t files_lock;
struct list_head tty_files;
#define N_TTY_BUF_SIZE 4096
@@ -335,7 +246,6 @@ struct tty_struct {
int closing;
unsigned char *write_buf;
int write_cnt;
- /* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
} __randomize_layout;
@@ -347,44 +257,72 @@ struct tty_file_private {
struct list_head list;
};
-/* tty magic number */
-#define TTY_MAGIC 0x5401
-
-/*
- * These bits are used in the flags field of the tty structure.
+/**
+ * DOC: TTY Struct Flags
+ *
+ * These bits are used in the :c:member:`tty_struct.flags` field.
*
* So that interrupts won't be able to mess up the queues,
* copy_to_cooked must be atomic with respect to itself, as must
* tty->write. Thus, you must use the inline functions set_bit() and
* clear_bit() to make things atomic.
+ *
+ * TTY_THROTTLED
+ * Driver input is throttled. The ldisc should call
+ * :c:member:`tty_driver.unthrottle()` in order to resume reception when
+ * it is ready to process more data (at threshold min).
+ *
+ * TTY_IO_ERROR
+ * If set, causes all subsequent userspace read/write calls on the tty to
+ * fail, returning -%EIO. (May be no ldisc too.)
+ *
+ * TTY_OTHER_CLOSED
+ * Device is a pty and the other side has closed.
+ *
+ * TTY_EXCLUSIVE
+ * Exclusive open mode (a single opener).
+ *
+ * TTY_DO_WRITE_WAKEUP
+ * If set, causes the driver to call the
+ * :c:member:`tty_ldisc_ops.write_wakeup()` method in order to resume
+ * transmission when it can accept more data to transmit.
+ *
+ * TTY_LDISC_OPEN
+ * Indicates that a line discipline is open. For debugging purposes only.
+ *
+ * TTY_PTY_LOCK
+ * A flag private to pty code to implement %TIOCSPTLCK/%TIOCGPTLCK logic.
+ *
+ * TTY_NO_WRITE_SPLIT
+ * Prevent driver from splitting up writes into smaller chunks (preserve
+ * write boundaries to driver).
+ *
+ * TTY_HUPPED
+ * The TTY was hung up. This is set post :c:member:`tty_driver.hangup()`.
+ *
+ * TTY_HUPPING
+ * The TTY is in the process of hanging up to abort potential readers.
+ *
+ * TTY_LDISC_CHANGING
+ * Line discipline for this TTY is being changed. I/O should not block
+ * when this is set. Use tty_io_nonblock() to check.
+ *
+ * TTY_LDISC_HALTED
+ * Line discipline for this TTY was stopped. No work should be queued to
+ * this ldisc.
*/
-#define TTY_THROTTLED 0 /* Call unthrottle() at threshold min */
-#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */
-#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */
-#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
-#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
-#define TTY_LDISC_OPEN 11 /* Line discipline is open */
-#define TTY_PTY_LOCK 16 /* pty private */
-#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
-#define TTY_HUPPED 18 /* Post driver->hangup() */
-#define TTY_HUPPING 19 /* Hangup in progress */
-#define TTY_LDISC_CHANGING 20 /* Change pending - non-block IO */
-#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
-
-/* Values for tty->flow_change */
-#define TTY_THROTTLE_SAFE 1
-#define TTY_UNTHROTTLE_SAFE 2
-
-static inline void __tty_set_flow_change(struct tty_struct *tty, int val)
-{
- tty->flow_change = val;
-}
-
-static inline void tty_set_flow_change(struct tty_struct *tty, int val)
-{
- tty->flow_change = val;
- smp_mb();
-}
+#define TTY_THROTTLED 0
+#define TTY_IO_ERROR 1
+#define TTY_OTHER_CLOSED 2
+#define TTY_EXCLUSIVE 3
+#define TTY_DO_WRITE_WAKEUP 5
+#define TTY_LDISC_OPEN 11
+#define TTY_PTY_LOCK 16
+#define TTY_NO_WRITE_SPLIT 17
+#define TTY_HUPPED 18
+#define TTY_HUPPING 19
+#define TTY_LDISC_CHANGING 20
+#define TTY_LDISC_HALTED 22
static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file)
{
@@ -403,21 +341,20 @@ static inline bool tty_throttled(struct tty_struct *tty)
}
#ifdef CONFIG_TTY
-extern void tty_kref_put(struct tty_struct *tty);
-extern struct pid *tty_get_pgrp(struct tty_struct *tty);
-extern void tty_vhangup_self(void);
-extern void disassociate_ctty(int priv);
-extern dev_t tty_devnum(struct tty_struct *tty);
-extern void proc_clear_tty(struct task_struct *p);
-extern struct tty_struct *get_current_tty(void);
+void tty_kref_put(struct tty_struct *tty);
+struct pid *tty_get_pgrp(struct tty_struct *tty);
+void tty_vhangup_self(void);
+void disassociate_ctty(int priv);
+dev_t tty_devnum(struct tty_struct *tty);
+void proc_clear_tty(struct task_struct *p);
+struct tty_struct *get_current_tty(void);
/* tty_io.c */
-extern int __init tty_init(void);
-extern const char *tty_name(const struct tty_struct *tty);
-extern struct tty_struct *tty_kopen(dev_t device);
-extern void tty_kclose(struct tty_struct *tty);
-extern int tty_dev_name_to_number(const char *name, dev_t *number);
-extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
-extern void tty_ldisc_unlock(struct tty_struct *tty);
+int __init tty_init(void);
+const char *tty_name(const struct tty_struct *tty);
+struct tty_struct *tty_kopen_exclusive(dev_t device);
+struct tty_struct *tty_kopen_shared(dev_t device);
+void tty_kclose(struct tty_struct *tty);
+int tty_dev_name_to_number(const char *name, dev_t *number);
#else
static inline void tty_kref_put(struct tty_struct *tty)
{ }
@@ -438,7 +375,7 @@ static inline int __init tty_init(void)
{ return 0; }
static inline const char *tty_name(const struct tty_struct *tty)
{ return "(none)"; }
-static inline struct tty_struct *tty_kopen(dev_t device)
+static inline struct tty_struct *tty_kopen_exclusive(dev_t device)
{ return ERR_PTR(-ENODEV); }
static inline void tty_kclose(struct tty_struct *tty)
{ }
@@ -448,7 +385,7 @@ static inline int tty_dev_name_to_number(const char *name, dev_t *number)
extern struct ktermios tty_std_termios;
-extern int vcs_init(void);
+int vcs_init(void);
extern struct class *tty_class;
@@ -468,58 +405,34 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
return tty;
}
-extern const char *tty_driver_name(const struct tty_struct *tty);
-extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
-extern int __tty_check_change(struct tty_struct *tty, int sig);
-extern int tty_check_change(struct tty_struct *tty);
-extern void __stop_tty(struct tty_struct *tty);
-extern void stop_tty(struct tty_struct *tty);
-extern void __start_tty(struct tty_struct *tty);
-extern void start_tty(struct tty_struct *tty);
-extern int tty_register_driver(struct tty_driver *driver);
-extern int tty_unregister_driver(struct tty_driver *driver);
-extern struct device *tty_register_device(struct tty_driver *driver,
- unsigned index, struct device *dev);
-extern struct device *tty_register_device_attr(struct tty_driver *driver,
- unsigned index, struct device *device,
- void *drvdata,
- const struct attribute_group **attr_grp);
-extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
-extern void tty_write_message(struct tty_struct *tty, char *msg);
-extern int tty_send_xchar(struct tty_struct *tty, char ch);
-extern int tty_put_char(struct tty_struct *tty, unsigned char c);
-extern int tty_chars_in_buffer(struct tty_struct *tty);
-extern int tty_write_room(struct tty_struct *tty);
-extern void tty_driver_flush_buffer(struct tty_struct *tty);
-extern void tty_throttle(struct tty_struct *tty);
-extern void tty_unthrottle(struct tty_struct *tty);
-extern int tty_throttle_safe(struct tty_struct *tty);
-extern int tty_unthrottle_safe(struct tty_struct *tty);
-extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
-extern int is_current_pgrp_orphaned(void);
-extern void tty_hangup(struct tty_struct *tty);
-extern void tty_vhangup(struct tty_struct *tty);
-extern void tty_vhangup_session(struct tty_struct *tty);
-extern int tty_hung_up_p(struct file *filp);
-extern void do_SAK(struct tty_struct *tty);
-extern void __do_SAK(struct tty_struct *tty);
-extern void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
-extern int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
-extern void session_clear_tty(struct pid *session);
-extern void no_tty(void);
-extern void tty_buffer_free_all(struct tty_port *port);
-extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
-extern void tty_buffer_init(struct tty_port *port);
-extern void tty_buffer_set_lock_subclass(struct tty_port *port);
-extern bool tty_buffer_restart_work(struct tty_port *port);
-extern bool tty_buffer_cancel_work(struct tty_port *port);
-extern void tty_buffer_flush_work(struct tty_port *port);
-extern speed_t tty_termios_baud_rate(struct ktermios *termios);
-extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
-extern void tty_termios_encode_baud_rate(struct ktermios *termios,
- speed_t ibaud, speed_t obaud);
-extern void tty_encode_baud_rate(struct tty_struct *tty,
- speed_t ibaud, speed_t obaud);
+const char *tty_driver_name(const struct tty_struct *tty);
+void tty_wait_until_sent(struct tty_struct *tty, long timeout);
+void stop_tty(struct tty_struct *tty);
+void start_tty(struct tty_struct *tty);
+void tty_write_message(struct tty_struct *tty, char *msg);
+int tty_send_xchar(struct tty_struct *tty, char ch);
+int tty_put_char(struct tty_struct *tty, unsigned char c);
+unsigned int tty_chars_in_buffer(struct tty_struct *tty);
+unsigned int tty_write_room(struct tty_struct *tty);
+void tty_driver_flush_buffer(struct tty_struct *tty);
+void tty_unthrottle(struct tty_struct *tty);
+int tty_throttle_safe(struct tty_struct *tty);
+int tty_unthrottle_safe(struct tty_struct *tty);
+int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
+int tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount);
+int is_current_pgrp_orphaned(void);
+void tty_hangup(struct tty_struct *tty);
+void tty_vhangup(struct tty_struct *tty);
+int tty_hung_up_p(struct file *filp);
+void do_SAK(struct tty_struct *tty);
+void __do_SAK(struct tty_struct *tty);
+void no_tty(void);
+speed_t tty_termios_baud_rate(const struct ktermios *termios);
+void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud,
+ speed_t obaud);
+void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud,
+ speed_t obaud);
/**
* tty_get_baud_rate - get tty bit rates
@@ -536,211 +449,40 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
return tty_termios_baud_rate(&tty->termios);
}
-extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
-extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
-extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
-
-extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
-extern void tty_ldisc_deref(struct tty_ldisc *);
-extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
-extern void tty_ldisc_hangup(struct tty_struct *tty, bool reset);
-extern int tty_ldisc_reinit(struct tty_struct *tty, int disc);
-extern const struct seq_operations tty_ldiscs_seq_ops;
-
-extern void tty_wakeup(struct tty_struct *tty);
-extern void tty_ldisc_flush(struct tty_struct *tty);
-
-extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
-extern long tty_jobctrl_ioctl(struct tty_struct *tty, struct tty_struct *real_tty,
- struct file *file, unsigned int cmd, unsigned long arg);
-extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
-extern void tty_default_fops(struct file_operations *fops);
-extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx);
-extern int tty_alloc_file(struct file *file);
-extern void tty_add_file(struct tty_struct *tty, struct file *file);
-extern void tty_free_file(struct file *file);
-extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
-extern void tty_release_struct(struct tty_struct *tty, int idx);
-extern int tty_release(struct inode *inode, struct file *filp);
-extern void tty_init_termios(struct tty_struct *tty);
-extern void tty_save_termios(struct tty_struct *tty);
-extern int tty_standard_install(struct tty_driver *driver,
- struct tty_struct *tty);
-
-extern struct mutex tty_mutex;
-
-#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
-
-extern void tty_port_init(struct tty_port *port);
-extern void tty_port_link_device(struct tty_port *port,
- struct tty_driver *driver, unsigned index);
-extern struct device *tty_port_register_device(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *device);
-extern struct device *tty_port_register_device_attr(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *device, void *drvdata,
- const struct attribute_group **attr_grp);
-extern struct device *tty_port_register_device_serdev(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *device);
-extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *device, void *drvdata,
- const struct attribute_group **attr_grp);
-extern void tty_port_unregister_device(struct tty_port *port,
- struct tty_driver *driver, unsigned index);
-extern int tty_port_alloc_xmit_buf(struct tty_port *port);
-extern void tty_port_free_xmit_buf(struct tty_port *port);
-extern void tty_port_destroy(struct tty_port *port);
-extern void tty_port_put(struct tty_port *port);
-
-static inline struct tty_port *tty_port_get(struct tty_port *port)
-{
- if (port && kref_get_unless_zero(&port->kref))
- return port;
- return NULL;
-}
-
-/* If the cts flow control is enabled, return true. */
-static inline bool tty_port_cts_enabled(struct tty_port *port)
-{
- return test_bit(TTY_PORT_CTS_FLOW, &port->iflags);
-}
-
-static inline void tty_port_set_cts_flow(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_CTS_FLOW, &port->iflags);
- else
- clear_bit(TTY_PORT_CTS_FLOW, &port->iflags);
-}
-
-static inline bool tty_port_active(struct tty_port *port)
-{
- return test_bit(TTY_PORT_ACTIVE, &port->iflags);
-}
-
-static inline void tty_port_set_active(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_ACTIVE, &port->iflags);
- else
- clear_bit(TTY_PORT_ACTIVE, &port->iflags);
-}
-
-static inline bool tty_port_check_carrier(struct tty_port *port)
-{
- return test_bit(TTY_PORT_CHECK_CD, &port->iflags);
-}
-
-static inline void tty_port_set_check_carrier(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_CHECK_CD, &port->iflags);
- else
- clear_bit(TTY_PORT_CHECK_CD, &port->iflags);
-}
-
-static inline bool tty_port_suspended(struct tty_port *port)
-{
- return test_bit(TTY_PORT_SUSPENDED, &port->iflags);
-}
+unsigned char tty_get_char_size(unsigned int cflag);
+unsigned char tty_get_frame_size(unsigned int cflag);
-static inline void tty_port_set_suspended(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_SUSPENDED, &port->iflags);
- else
- clear_bit(TTY_PORT_SUSPENDED, &port->iflags);
-}
+void tty_termios_copy_hw(struct ktermios *new, const struct ktermios *old);
+int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
+int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
-static inline bool tty_port_initialized(struct tty_port *port)
-{
- return test_bit(TTY_PORT_INITIALIZED, &port->iflags);
-}
+void tty_wakeup(struct tty_struct *tty);
-static inline void tty_port_set_initialized(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_INITIALIZED, &port->iflags);
- else
- clear_bit(TTY_PORT_INITIALIZED, &port->iflags);
-}
-
-static inline bool tty_port_kopened(struct tty_port *port)
-{
- return test_bit(TTY_PORT_KOPENED, &port->iflags);
-}
-
-static inline void tty_port_set_kopened(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_KOPENED, &port->iflags);
- else
- clear_bit(TTY_PORT_KOPENED, &port->iflags);
-}
-
-extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
-extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
-extern int tty_port_carrier_raised(struct tty_port *port);
-extern void tty_port_raise_dtr_rts(struct tty_port *port);
-extern void tty_port_lower_dtr_rts(struct tty_port *port);
-extern void tty_port_hangup(struct tty_port *port);
-extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal);
-extern void tty_port_tty_wakeup(struct tty_port *port);
-extern int tty_port_block_til_ready(struct tty_port *port,
- struct tty_struct *tty, struct file *filp);
-extern int tty_port_close_start(struct tty_port *port,
- struct tty_struct *tty, struct file *filp);
-extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty);
-extern void tty_port_close(struct tty_port *port,
- struct tty_struct *tty, struct file *filp);
-extern int tty_port_install(struct tty_port *port, struct tty_driver *driver,
- struct tty_struct *tty);
-extern int tty_port_open(struct tty_port *port,
- struct tty_struct *tty, struct file *filp);
-static inline int tty_port_users(struct tty_port *port)
-{
- return port->count + port->blocked_open;
-}
+int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
+struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
+void tty_release_struct(struct tty_struct *tty, int idx);
+void tty_init_termios(struct tty_struct *tty);
+void tty_save_termios(struct tty_struct *tty);
+int tty_standard_install(struct tty_driver *driver,
+ struct tty_struct *tty);
-extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
-extern int tty_unregister_ldisc(int disc);
-extern int tty_set_ldisc(struct tty_struct *tty, int disc);
-extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
-extern void tty_ldisc_release(struct tty_struct *tty);
-extern int __must_check tty_ldisc_init(struct tty_struct *tty);
-extern void tty_ldisc_deinit(struct tty_struct *tty);
-extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
- char *f, int count);
+extern struct mutex tty_mutex;
/* n_tty.c */
-extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
#ifdef CONFIG_TTY
-extern void __init n_tty_init(void);
+void __init n_tty_init(void);
#else
static inline void n_tty_init(void) { }
#endif
/* tty_audit.c */
#ifdef CONFIG_AUDIT
-extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size);
-extern void tty_audit_exit(void);
-extern void tty_audit_fork(struct signal_struct *sig);
-extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
-extern int tty_audit_push(void);
+void tty_audit_exit(void);
+void tty_audit_fork(struct signal_struct *sig);
+int tty_audit_push(void);
#else
-static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size)
-{
-}
-static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
-{
-}
static inline void tty_audit_exit(void)
{
}
@@ -754,44 +496,23 @@ static inline int tty_audit_push(void)
#endif
/* tty_ioctl.c */
-extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
+int n_tty_ioctl_helper(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
/* vt.c */
-extern int vt_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
+int vt_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
-extern long vt_compat_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
+long vt_compat_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
/* tty_mutex.c */
/* functions for preparation of BKL removal */
-extern void tty_lock(struct tty_struct *tty);
-extern int tty_lock_interruptible(struct tty_struct *tty);
-extern void tty_unlock(struct tty_struct *tty);
-extern void tty_lock_slave(struct tty_struct *tty);
-extern void tty_unlock_slave(struct tty_struct *tty);
-extern void tty_set_lock_subclass(struct tty_struct *tty);
-
-#ifdef CONFIG_PROC_FS
-extern void proc_tty_register_driver(struct tty_driver *);
-extern void proc_tty_unregister_driver(struct tty_driver *);
-#else
-static inline void proc_tty_register_driver(struct tty_driver *d) {}
-static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
-#endif
-
-#define tty_msg(fn, tty, f, ...) \
- fn("%s %s: " f, tty_driver_name(tty), tty_name(tty), ##__VA_ARGS__)
-
-#define tty_debug(tty, f, ...) tty_msg(pr_debug, tty, f, ##__VA_ARGS__)
-#define tty_info(tty, f, ...) tty_msg(pr_info, tty, f, ##__VA_ARGS__)
-#define tty_notice(tty, f, ...) tty_msg(pr_notice, tty, f, ##__VA_ARGS__)
-#define tty_warn(tty, f, ...) tty_msg(pr_warn, tty, f, ##__VA_ARGS__)
-#define tty_err(tty, f, ...) tty_msg(pr_err, tty, f, ##__VA_ARGS__)
-
-#define tty_info_ratelimited(tty, f, ...) \
- tty_msg(pr_info_ratelimited, tty, f, ##__VA_ARGS__)
+void tty_lock(struct tty_struct *tty);
+int tty_lock_interruptible(struct tty_struct *tty);
+void tty_unlock(struct tty_struct *tty);
+void tty_lock_slave(struct tty_struct *tty);
+void tty_unlock_slave(struct tty_struct *tty);
+void tty_set_lock_subclass(struct tty_struct *tty);
#endif
diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h
new file mode 100644
index 000000000000..1796648c2907
--- /dev/null
+++ b/include/linux/tty_buffer.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TTY_BUFFER_H
+#define _LINUX_TTY_BUFFER_H
+
+#include <linux/atomic.h>
+#include <linux/llist.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct tty_buffer {
+ union {
+ struct tty_buffer *next;
+ struct llist_node free;
+ };
+ int used;
+ int size;
+ int commit;
+ int lookahead; /* Lazy update on recv, can become less than "read" */
+ int read;
+ int flags;
+ /* Data points here */
+ unsigned long data[];
+};
+
+/* Values for .flags field of tty_buffer */
+#define TTYB_NORMAL 1 /* buffer has no flags buffer */
+
+static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
+{
+ return ((unsigned char *)b->data) + ofs;
+}
+
+static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs)
+{
+ return (char *)char_buf_ptr(b, ofs) + b->size;
+}
+
+struct tty_bufhead {
+ struct tty_buffer *head; /* Queue head */
+ struct work_struct work;
+ struct mutex lock;
+ atomic_t priority;
+ struct tty_buffer sentinel;
+ struct llist_head free; /* Free queue head */
+ atomic_t mem_used; /* In-use buffers excluding free list */
+ int mem_limit;
+ struct tty_buffer *tail; /* Active buffer */
+};
+
+/*
+ * When a break, frame error, or parity error happens, these codes are
+ * stuffed into the flags buffer.
+ */
+#define TTY_NORMAL 0
+#define TTY_BREAK 1
+#define TTY_FRAME 2
+#define TTY_PARITY 3
+#define TTY_OVERRUN 4
+
+#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 358446247ccd..e00034118c7b 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -2,255 +2,351 @@
#ifndef _LINUX_TTY_DRIVER_H
#define _LINUX_TTY_DRIVER_H
-/*
- * This structure defines the interface between the low-level tty
- * driver and the tty routines. The following routines can be
- * defined; unless noted otherwise, they are optional, and can be
- * filled in with a null pointer.
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/termios.h>
+#include <linux/seq_file.h>
+
+struct tty_struct;
+struct tty_driver;
+struct serial_icounter_struct;
+struct serial_struct;
+
+/**
+ * struct tty_operations -- interface between driver and tty
+ *
+ * @lookup: ``struct tty_struct *()(struct tty_driver *self, struct file *,
+ * int idx)``
*
- * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
+ * Return the tty device corresponding to @idx, %NULL if there is not
+ * one currently in use and an %ERR_PTR value on error. Called under
+ * %tty_mutex (for now!)
*
- * Return the tty device corresponding to idx, NULL if there is not
- * one currently in use and an ERR_PTR value on error. Called under
- * tty_mutex (for now!)
+ * Optional method. Default behaviour is to use the @self->ttys array.
*
- * Optional method. Default behaviour is to use the ttys array
+ * @install: ``int ()(struct tty_driver *self, struct tty_struct *tty)``
*
- * int (*install)(struct tty_driver *self, struct tty_struct *tty)
+ * Install a new @tty into the @self's internal tables. Used in
+ * conjunction with @lookup and @remove methods.
*
- * Install a new tty into the tty driver internal tables. Used in
- * conjunction with lookup and remove methods.
+ * Optional method. Default behaviour is to use the @self->ttys array.
*
- * Optional method. Default behaviour is to use the ttys array
+ * @remove: ``void ()(struct tty_driver *self, struct tty_struct *tty)``
*
- * void (*remove)(struct tty_driver *self, struct tty_struct *tty)
+ * Remove a closed @tty from the @self's internal tables. Used in
+ * conjunction with @lookup and @remove methods.
*
- * Remove a closed tty from the tty driver internal tables. Used in
- * conjunction with lookup and remove methods.
+ * Optional method. Default behaviour is to use the @self->ttys array.
*
- * Optional method. Default behaviour is to use the ttys array
+ * @open: ``int ()(struct tty_struct *tty, struct file *)``
*
- * int (*open)(struct tty_struct * tty, struct file * filp);
+ * This routine is called when a particular @tty device is opened. This
+ * routine is mandatory; if this routine is not filled in, the attempted
+ * open will fail with %ENODEV.
*
- * This routine is called when a particular tty device is opened.
- * This routine is mandatory; if this routine is not filled in,
- * the attempted open will fail with ENODEV.
+ * Required method. Called with tty lock held. May sleep.
*
- * Required method. Called with tty lock held.
+ * @close: ``void ()(struct tty_struct *tty, struct file *)``
*
- * void (*close)(struct tty_struct * tty, struct file * filp);
+ * This routine is called when a particular @tty device is closed. At the
+ * point of return from this call the driver must make no further ldisc
+ * calls of any kind.
*
- * This routine is called when a particular tty device is closed.
- * Note: called even if the corresponding open() failed.
+ * Remark: called even if the corresponding @open() failed.
*
- * Required method. Called with tty lock held.
+ * Required method. Called with tty lock held. May sleep.
*
- * void (*shutdown)(struct tty_struct * tty);
+ * @shutdown: ``void ()(struct tty_struct *tty)``
*
- * This routine is called under the tty lock when a particular tty device
- * is closed for the last time. It executes before the tty resources
- * are freed so may execute while another function holds a tty kref.
+ * This routine is called under the tty lock when a particular @tty device
+ * is closed for the last time. It executes before the @tty resources
+ * are freed so may execute while another function holds a @tty kref.
*
- * void (*cleanup)(struct tty_struct * tty);
+ * @cleanup: ``void ()(struct tty_struct *tty)``
*
- * This routine is called asynchronously when a particular tty device
+ * This routine is called asynchronously when a particular @tty device
* is closed for the last time freeing up the resources. This is
* actually the second part of shutdown for routines that might sleep.
*
+ * @write: ``int ()(struct tty_struct *tty, const unsigned char *buf,
+ * int count)``
*
- * int (*write)(struct tty_struct * tty,
- * const unsigned char *buf, int count);
- *
- * This routine is called by the kernel to write a series of
- * characters to the tty device. The characters may come from
- * user space or kernel space. This routine will return the
+ * This routine is called by the kernel to write a series (@count) of
+ * characters (@buf) to the @tty device. The characters may come from
+ * user space or kernel space. This routine will return the
* number of characters actually accepted for writing.
*
- * Optional: Required for writable devices.
+ * May occur in parallel in special cases. Because this includes panic
+ * paths drivers generally shouldn't try and do clever locking here.
*
- * int (*put_char)(struct tty_struct *tty, unsigned char ch);
+ * Optional: Required for writable devices. May not sleep.
*
- * This routine is called by the kernel to write a single
- * character to the tty device. If the kernel uses this routine,
- * it must call the flush_chars() routine (if defined) when it is
- * done stuffing characters into the driver. If there is no room
- * in the queue, the character is ignored.
+ * @put_char: ``int ()(struct tty_struct *tty, unsigned char ch)``
*
- * Optional: Kernel will use the write method if not provided.
+ * This routine is called by the kernel to write a single character @ch to
+ * the @tty device. If the kernel uses this routine, it must call the
+ * @flush_chars() routine (if defined) when it is done stuffing characters
+ * into the driver. If there is no room in the queue, the character is
+ * ignored.
*
- * Note: Do not call this function directly, call tty_put_char
+ * Optional: Kernel will use the @write method if not provided. Do not
+ * call this function directly, call tty_put_char().
*
- * void (*flush_chars)(struct tty_struct *tty);
+ * @flush_chars: ``void ()(struct tty_struct *tty)``
*
- * This routine is called by the kernel after it has written a
- * series of characters to the tty device using put_char().
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using @put_char().
*
- * Optional:
+ * Optional. Do not call this function directly, call
+ * tty_driver_flush_chars().
*
- * Note: Do not call this function directly, call tty_driver_flush_chars
- *
- * int (*write_room)(struct tty_struct *tty);
+ * @write_room: ``unsigned int ()(struct tty_struct *tty)``
*
- * This routine returns the numbers of characters the tty driver
- * will accept for queuing to be written. This number is subject
- * to change as output buffers get emptied, or if the output flow
+ * This routine returns the numbers of characters the @tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
* control is acted.
*
- * Required if write method is provided else not needed.
+ * The ldisc is responsible for being intelligent about multi-threading of
+ * write_room/write calls
+ *
+ * Required if @write method is provided else not needed. Do not call this
+ * function directly, call tty_write_room()
+ *
+ * @chars_in_buffer: ``unsigned int ()(struct tty_struct *tty)``
+ *
+ * This routine returns the number of characters in the device private
+ * output queue. Used in tty_wait_until_sent() and for poll()
+ * implementation.
+ *
+ * Optional: if not provided, it is assumed there is no queue on the
+ * device. Do not call this function directly, call tty_chars_in_buffer().
+ *
+ * @ioctl: ``int ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
*
- * Note: Do not call this function directly, call tty_write_room
- *
- * int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+ * This routine allows the @tty driver to implement device-specific
+ * ioctls. If the ioctl number passed in @cmd is not recognized by the
+ * driver, it should return %ENOIOCTLCMD.
*
- * This routine allows the tty driver to implement
- * device-specific ioctls. If the ioctl number passed in cmd
- * is not recognized by the driver, it should return ENOIOCTLCMD.
+ * Optional.
*
- * Optional
+ * @compat_ioctl: ``long ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
*
- * long (*compat_ioctl)(struct tty_struct *tty,,
- * unsigned int cmd, unsigned long arg);
+ * Implement ioctl processing for 32 bit process on 64 bit system.
*
- * implement ioctl processing for 32 bit process on 64 bit system
+ * Optional.
*
- * Optional
- *
- * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
+ * @set_termios: ``void ()(struct tty_struct *tty, const struct ktermios *old)``
*
- * This routine allows the tty driver to be notified when
- * device's termios settings have changed.
+ * This routine allows the @tty driver to be notified when device's
+ * termios settings have changed. New settings are in @tty->termios.
+ * Previous settings are passed in the @old argument.
*
- * Optional: Called under the termios lock
+ * The API is defined such that the driver should return the actual modes
+ * selected. This means that the driver is responsible for modifying any
+ * bits in @tty->termios it cannot fulfill to indicate the actual modes
+ * being used.
*
+ * Optional. Called under the @tty->termios_rwsem. May sleep.
*
- * void (*set_ldisc)(struct tty_struct *tty);
+ * @set_ldisc: ``void ()(struct tty_struct *tty)``
*
- * This routine allows the tty driver to be notified when the
- * device's termios settings have changed.
+ * This routine allows the @tty driver to be notified when the device's
+ * line discipline is being changed. At the point this is done the
+ * discipline is not yet usable.
*
- * Optional: Called under BKL (currently)
- *
- * void (*throttle)(struct tty_struct * tty);
+ * Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem.
*
- * This routine notifies the tty driver that input buffers for
- * the line discipline are close to full, and it should somehow
- * signal that no more characters should be sent to the tty.
+ * @throttle: ``void ()(struct tty_struct *tty)``
*
- * Optional: Always invoke via tty_throttle(), called under the
- * termios lock.
- *
- * void (*unthrottle)(struct tty_struct * tty);
+ * This routine notifies the @tty driver that input buffers for the line
+ * discipline are close to full, and it should somehow signal that no more
+ * characters should be sent to the @tty.
*
- * This routine notifies the tty drivers that it should signals
- * that characters can now be sent to the tty without fear of
- * overrunning the input buffers of the line disciplines.
- *
- * Optional: Always invoke via tty_unthrottle(), called under the
- * termios lock.
+ * Serialization including with @unthrottle() is the job of the ldisc
+ * layer.
*
- * void (*stop)(struct tty_struct *tty);
+ * Optional: Always invoke via tty_throttle_safe(). Called under the
+ * @tty->termios_rwsem.
*
- * This routine notifies the tty driver that it should stop
- * outputting characters to the tty device.
+ * @unthrottle: ``void ()(struct tty_struct *tty)``
*
- * Called with ->flow_lock held. Serialized with start() method.
+ * This routine notifies the @tty driver that it should signal that
+ * characters can now be sent to the @tty without fear of overrunning the
+ * input buffers of the line disciplines.
*
- * Optional:
+ * Optional. Always invoke via tty_unthrottle(). Called under the
+ * @tty->termios_rwsem.
*
- * Note: Call stop_tty not this method.
- *
- * void (*start)(struct tty_struct *tty);
+ * @stop: ``void ()(struct tty_struct *tty)``
*
- * This routine notifies the tty driver that it resume sending
+ * This routine notifies the @tty driver that it should stop outputting
* characters to the tty device.
*
- * Called with ->flow_lock held. Serialized with stop() method.
+ * Called with @tty->flow.lock held. Serialized with @start() method.
*
- * Optional:
+ * Optional. Always invoke via stop_tty().
*
- * Note: Call start_tty not this method.
- *
- * void (*hangup)(struct tty_struct *tty);
+ * @start: ``void ()(struct tty_struct *tty)``
*
- * This routine notifies the tty driver that it should hang up the
- * tty device.
+ * This routine notifies the @tty driver that it resumed sending
+ * characters to the @tty device.
*
- * Optional:
+ * Called with @tty->flow.lock held. Serialized with stop() method.
*
- * Called with tty lock held.
+ * Optional. Always invoke via start_tty().
*
- * int (*break_ctl)(struct tty_struct *tty, int state);
+ * @hangup: ``void ()(struct tty_struct *tty)``
*
- * This optional routine requests the tty driver to turn on or
- * off BREAK status on the RS-232 port. If state is -1,
- * then the BREAK status should be turned on; if state is 0, then
- * BREAK should be turned off.
+ * This routine notifies the @tty driver that it should hang up the @tty
+ * device.
*
- * If this routine is implemented, the high-level tty driver will
- * handle the following ioctls: TCSBRK, TCSBRKP, TIOCSBRK,
- * TIOCCBRK.
+ * Optional. Called with tty lock held.
*
- * If the driver sets TTY_DRIVER_HARDWARE_BREAK then the interface
- * will also be called with actual times and the hardware is expected
- * to do the delay work itself. 0 and -1 are still used for on/off.
+ * @break_ctl: ``int ()(struct tty_struct *tty, int state)``
*
- * Optional: Required for TCSBRK/BRKP/etc handling.
+ * This optional routine requests the @tty driver to turn on or off BREAK
+ * status on the RS-232 port. If @state is -1, then the BREAK status
+ * should be turned on; if @state is 0, then BREAK should be turned off.
*
- * void (*wait_until_sent)(struct tty_struct *tty, int timeout);
- *
- * This routine waits until the device has written out all of the
- * characters in its transmitter FIFO.
+ * If this routine is implemented, the high-level tty driver will handle
+ * the following ioctls: %TCSBRK, %TCSBRKP, %TIOCSBRK, %TIOCCBRK.
*
- * Optional: If not provided the device is assumed to have no FIFO
+ * If the driver sets %TTY_DRIVER_HARDWARE_BREAK in tty_alloc_driver(),
+ * then the interface will also be called with actual times and the
+ * hardware is expected to do the delay work itself. 0 and -1 are still
+ * used for on/off.
*
- * Note: Usually correct to call tty_wait_until_sent
+ * Optional: Required for %TCSBRK/%BRKP/etc. handling. May sleep.
*
- * void (*send_xchar)(struct tty_struct *tty, char ch);
+ * @flush_buffer: ``void ()(struct tty_struct *tty)``
*
- * This routine is used to send a high-priority XON/XOFF
- * character to the device.
+ * This routine discards device private output buffer. Invoked on close,
+ * hangup, to implement %TCOFLUSH ioctl and similar.
*
- * Optional: If not provided then the write method is called under
- * the atomic write lock to keep it serialized with the ldisc.
+ * Optional: if not provided, it is assumed there is no queue on the
+ * device. Do not call this function directly, call
+ * tty_driver_flush_buffer().
*
- * int (*resize)(struct tty_struct *tty, struct winsize *ws)
+ * @wait_until_sent: ``void ()(struct tty_struct *tty, int timeout)``
*
- * Called when a termios request is issued which changes the
- * requested terminal geometry.
+ * This routine waits until the device has written out all of the
+ * characters in its transmitter FIFO. Or until @timeout (in jiffies) is
+ * reached.
+ *
+ * Optional: If not provided, the device is assumed to have no FIFO.
+ * Usually correct to invoke via tty_wait_until_sent(). May sleep.
+ *
+ * @send_xchar: ``void ()(struct tty_struct *tty, char ch)``
+ *
+ * This routine is used to send a high-priority XON/XOFF character (@ch)
+ * to the @tty device.
+ *
+ * Optional: If not provided, then the @write method is called under
+ * the @tty->atomic_write_lock to keep it serialized with the ldisc.
+ *
+ * @tiocmget: ``int ()(struct tty_struct *tty)``
+ *
+ * This routine is used to obtain the modem status bits from the @tty
+ * driver.
+ *
+ * Optional: If not provided, then %ENOTTY is returned from the %TIOCMGET
+ * ioctl. Do not call this function directly, call tty_tiocmget().
+ *
+ * @tiocmset: ``int ()(struct tty_struct *tty,
+ * unsigned int set, unsigned int clear)``
+ *
+ * This routine is used to set the modem status bits to the @tty driver.
+ * First, @clear bits should be cleared, then @set bits set.
+ *
+ * Optional: If not provided, then %ENOTTY is returned from the %TIOCMSET
+ * ioctl. Do not call this function directly, call tty_tiocmset().
+ *
+ * @resize: ``int ()(struct tty_struct *tty, struct winsize *ws)``
+ *
+ * Called when a termios request is issued which changes the requested
+ * terminal geometry to @ws.
*
* Optional: the default action is to update the termios structure
* without error. This is usually the correct behaviour. Drivers should
- * not force errors here if they are not resizable objects (eg a serial
+ * not force errors here if they are not resizable objects (e.g. a serial
* line). See tty_do_resize() if you need to wrap the standard method
- * in your own logic - the usual case.
+ * in your own logic -- the usual case.
+ *
+ * @get_icount: ``int ()(struct tty_struct *tty,
+ * struct serial_icounter *icount)``
+ *
+ * Called when the @tty device receives a %TIOCGICOUNT ioctl. Passed a
+ * kernel structure @icount to complete.
+ *
+ * Optional: called only if provided, otherwise %ENOTTY will be returned.
+ *
+ * @get_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)``
+ *
+ * Called when the @tty device receives a %TIOCGSERIAL ioctl. Passed a
+ * kernel structure @p (&struct serial_struct) to complete.
+ *
+ * Optional: called only if provided, otherwise %ENOTTY will be returned.
+ * Do not call this function directly, call tty_tiocgserial().
*
- * void (*set_termiox)(struct tty_struct *tty, struct termiox *new);
+ * @set_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)``
*
- * Called when the device receives a termiox based ioctl. Passes down
- * the requested data from user space. This method will not be invoked
- * unless the tty also has a valid tty->termiox pointer.
+ * Called when the @tty device receives a %TIOCSSERIAL ioctl. Passed a
+ * kernel structure @p (&struct serial_struct) to set the values from.
*
- * Optional: Called under the termios lock
+ * Optional: called only if provided, otherwise %ENOTTY will be returned.
+ * Do not call this function directly, call tty_tiocsserial().
*
- * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount);
+ * @show_fdinfo: ``void ()(struct tty_struct *tty, struct seq_file *m)``
*
- * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
- * structure to complete. This method is optional and will only be called
- * if provided (otherwise EINVAL will be returned).
+ * Called when the @tty device file descriptor receives a fdinfo request
+ * from VFS (to show in /proc/<pid>/fdinfo/). @m should be filled with
+ * information.
+ *
+ * Optional: called only if provided, otherwise nothing is written to @m.
+ * Do not call this function directly, call tty_show_fdinfo().
+ *
+ * @poll_init: ``int ()(struct tty_driver *driver, int line, char *options)``
+ *
+ * kgdboc support (Documentation/dev-tools/kgdb.rst). This routine is
+ * called to initialize the HW for later use by calling @poll_get_char or
+ * @poll_put_char.
+ *
+ * Optional: called only if provided, otherwise skipped as a non-polling
+ * driver.
+ *
+ * @poll_get_char: ``int ()(struct tty_driver *driver, int line)``
+ *
+ * kgdboc support (see @poll_init). @driver should read a character from a
+ * tty identified by @line and return it.
+ *
+ * Optional: called only if @poll_init provided.
+ *
+ * @poll_put_char: ``void ()(struct tty_driver *driver, int line, char ch)``
+ *
+ * kgdboc support (see @poll_init). @driver should write character @ch to
+ * a tty identified by @line.
+ *
+ * Optional: called only if @poll_init provided.
+ *
+ * @proc_show: ``int ()(struct seq_file *m, void *driver)``
+ *
+ * Driver @driver (cast to &struct tty_driver) can show additional info in
+ * /proc/tty/driver/<driver_name>. It is enough to fill in the information
+ * into @m.
+ *
+ * Optional: called only if provided, otherwise no /proc entry created.
+ *
+ * This structure defines the interface between the low-level tty driver and
+ * the tty routines. These routines can be defined. Unless noted otherwise,
+ * they are optional, and can be filled in with a %NULL pointer.
*/
-
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/cdev.h>
-#include <linux/termios.h>
-#include <linux/seq_file.h>
-
-struct tty_struct;
-struct tty_driver;
-struct serial_icounter_struct;
-struct serial_struct;
-
struct tty_operations {
struct tty_struct * (*lookup)(struct tty_driver *driver,
struct file *filp, int idx);
@@ -264,13 +360,13 @@ struct tty_operations {
const unsigned char *buf, int count);
int (*put_char)(struct tty_struct *tty, unsigned char ch);
void (*flush_chars)(struct tty_struct *tty);
- int (*write_room)(struct tty_struct *tty);
- int (*chars_in_buffer)(struct tty_struct *tty);
+ unsigned int (*write_room)(struct tty_struct *tty);
+ unsigned int (*chars_in_buffer)(struct tty_struct *tty);
int (*ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
long (*compat_ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
- void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
+ void (*set_termios)(struct tty_struct *tty, const struct ktermios *old);
void (*throttle)(struct tty_struct * tty);
void (*unthrottle)(struct tty_struct * tty);
void (*stop)(struct tty_struct *tty);
@@ -285,7 +381,6 @@ struct tty_operations {
int (*tiocmset)(struct tty_struct *tty,
unsigned int set, unsigned int clear);
int (*resize)(struct tty_struct *tty, struct winsize *ws);
- int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
int (*get_icount)(struct tty_struct *tty,
struct serial_icounter_struct *icount);
int (*get_serial)(struct tty_struct *tty, struct serial_struct *p);
@@ -296,26 +391,62 @@ struct tty_operations {
int (*poll_get_char)(struct tty_driver *driver, int line);
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
- int (*proc_show)(struct seq_file *, void *);
+ int (*proc_show)(struct seq_file *m, void *driver);
} __randomize_layout;
+/**
+ * struct tty_driver -- driver for TTY devices
+ *
+ * @kref: reference counting. Reaching zero frees all the internals and the
+ * driver.
+ * @cdevs: allocated/registered character /dev devices
+ * @owner: modules owning this driver. Used drivers cannot be rmmod'ed.
+ * Automatically set by tty_alloc_driver().
+ * @driver_name: name of the driver used in /proc/tty
+ * @name: used for constructing /dev node name
+ * @name_base: used as a number base for constructing /dev node name
+ * @major: major /dev device number (zero for autoassignment)
+ * @minor_start: the first minor /dev device number
+ * @num: number of devices allocated
+ * @type: type of tty driver (%TTY_DRIVER_TYPE_)
+ * @subtype: subtype of tty driver (%SYSTEM_TYPE_, %PTY_TYPE_, %SERIAL_TYPE_)
+ * @init_termios: termios to set to each tty initially (e.g. %tty_std_termios)
+ * @flags: tty driver flags (%TTY_DRIVER_)
+ * @proc_entry: proc fs entry, used internally
+ * @other: driver of the linked tty; only used for the PTY driver
+ * @ttys: array of active &struct tty_struct, set by tty_standard_install()
+ * @ports: array of &struct tty_port; can be set during initialization by
+ * tty_port_link_device() and similar
+ * @termios: storage for termios at each TTY close for the next open
+ * @driver_state: pointer to driver's arbitrary data
+ * @ops: driver hooks for TTYs. Set them using tty_set_operations(). Use &struct
+ * tty_port helpers in them as much as possible.
+ * @tty_drivers: used internally to link tty_drivers together
+ *
+ * The usual handling of &struct tty_driver is to allocate it by
+ * tty_alloc_driver(), set up all the necessary members, and register it by
+ * tty_register_driver(). At last, the driver is torn down by calling
+ * tty_unregister_driver() followed by tty_driver_kref_put().
+ *
+ * The fields required to be set before calling tty_register_driver() include
+ * @driver_name, @name, @type, @subtype, @init_termios, and @ops.
+ */
struct tty_driver {
- int magic; /* magic number for this structure */
- struct kref kref; /* Reference management */
+ struct kref kref;
struct cdev **cdevs;
struct module *owner;
const char *driver_name;
const char *name;
- int name_base; /* offset of printed name */
- int major; /* major device number */
- int minor_start; /* start of minor device number */
- unsigned int num; /* number of devices allocated */
- short type; /* type of tty driver */
- short subtype; /* subtype of tty driver */
- struct ktermios init_termios; /* Initial termios */
- unsigned long flags; /* tty driver flags */
- struct proc_dir_entry *proc_entry; /* /proc fs entry */
- struct tty_driver *other; /* only used for the PTY driver */
+ int name_base;
+ int major;
+ int minor_start;
+ unsigned int num;
+ short type;
+ short subtype;
+ struct ktermios init_termios;
+ unsigned long flags;
+ struct proc_dir_entry *proc_entry;
+ struct tty_driver *other;
/*
* Pointer to the tty data structures
@@ -335,83 +466,75 @@ struct tty_driver {
extern struct list_head tty_drivers;
-extern struct tty_driver *__tty_alloc_driver(unsigned int lines,
- struct module *owner, unsigned long flags);
-extern void put_tty_driver(struct tty_driver *driver);
-extern void tty_set_operations(struct tty_driver *driver,
- const struct tty_operations *op);
-extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
+struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
+ unsigned long flags);
+struct tty_driver *tty_find_polling_driver(char *name, int *line);
-extern void tty_driver_kref_put(struct tty_driver *driver);
+void tty_driver_kref_put(struct tty_driver *driver);
/* Use TTY_DRIVER_* flags below */
#define tty_alloc_driver(lines, flags) \
__tty_alloc_driver(lines, THIS_MODULE, flags)
-/*
- * DEPRECATED Do not use this in new code, use tty_alloc_driver instead.
- * (And change the return value checks.)
- */
-static inline struct tty_driver *alloc_tty_driver(unsigned int lines)
-{
- struct tty_driver *ret = tty_alloc_driver(lines, 0);
- if (IS_ERR(ret))
- return NULL;
- return ret;
-}
-
static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
{
kref_get(&d->kref);
return d;
}
-/* tty driver magic number */
-#define TTY_DRIVER_MAGIC 0x5402
+static inline void tty_set_operations(struct tty_driver *driver,
+ const struct tty_operations *op)
+{
+ driver->ops = op;
+}
-/*
- * tty driver flags
- *
- * TTY_DRIVER_RESET_TERMIOS --- requests the tty layer to reset the
- * termios setting when the last process has closed the device.
- * Used for PTY's, in particular.
- *
- * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will
- * guarantee never not to set any special character handling
- * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR ||
- * !INPCK)). That is, if there is no reason for the driver to
- * send notifications of parity and break characters up to the
- * line driver, it won't do so. This allows the line driver to
- * optimize for this case if this flag is set. (Note that there
- * is also a promise, if the above case is true, not to signal
- * overruns, either.)
- *
- * TTY_DRIVER_DYNAMIC_DEV --- if set, the individual tty devices need
- * to be registered with a call to tty_register_device() when the
- * device is found in the system and unregistered with a call to
- * tty_unregister_device() so the devices will be show up
- * properly in sysfs. If not set, driver->num entries will be
- * created by the tty core in sysfs when tty_register_driver() is
- * called. This is to be used by drivers that have tty devices
- * that can appear and disappear while the main tty driver is
- * registered with the tty core.
- *
- * TTY_DRIVER_DEVPTS_MEM -- don't use the standard arrays, instead
- * use dynamic memory keyed through the devpts filesystem. This
- * is only applicable to the pty driver.
- *
- * TTY_DRIVER_HARDWARE_BREAK -- hardware handles break signals. Pass
- * the requested timeout to the caller instead of using a simple
- * on/off interface.
- *
- * TTY_DRIVER_DYNAMIC_ALLOC -- do not allocate structures which are
- * needed per line for this driver as it would waste memory.
- * The driver will take care.
- *
- * TTY_DRIVER_UNNUMBERED_NODE -- do not create numbered /dev nodes. In
- * other words create /dev/ttyprintk and not /dev/ttyprintk0.
- * Applicable only when a driver for a single tty device is
- * being allocated.
+/**
+ * DOC: TTY Driver Flags
+ *
+ * TTY_DRIVER_RESET_TERMIOS
+ * Requests the tty layer to reset the termios setting when the last
+ * process has closed the device. Used for PTYs, in particular.
+ *
+ * TTY_DRIVER_REAL_RAW
+ * Indicates that the driver will guarantee not to set any special
+ * character handling flags if this is set for the tty:
+ *
+ * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)``
+ *
+ * That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the line
+ * driver, it won't do so. This allows the line driver to optimize for
+ * this case if this flag is set. (Note that there is also a promise, if
+ * the above case is true, not to signal overruns, either.)
+ *
+ * TTY_DRIVER_DYNAMIC_DEV
+ * The individual tty devices need to be registered with a call to
+ * tty_register_device() when the device is found in the system and
+ * unregistered with a call to tty_unregister_device() so the devices will
+ * be show up properly in sysfs. If not set, all &tty_driver.num entries
+ * will be created by the tty core in sysfs when tty_register_driver() is
+ * called. This is to be used by drivers that have tty devices that can
+ * appear and disappear while the main tty driver is registered with the
+ * tty core.
+ *
+ * TTY_DRIVER_DEVPTS_MEM
+ * Don't use the standard arrays (&tty_driver.ttys and
+ * &tty_driver.termios), instead use dynamic memory keyed through the
+ * devpts filesystem. This is only applicable to the PTY driver.
+ *
+ * TTY_DRIVER_HARDWARE_BREAK
+ * Hardware handles break signals. Pass the requested timeout to the
+ * &tty_operations.break_ctl instead of using a simple on/off interface.
+ *
+ * TTY_DRIVER_DYNAMIC_ALLOC
+ * Do not allocate structures which are needed per line for this driver
+ * (&tty_driver.ports) as it would waste memory. The driver will take
+ * care. This is only applicable to the PTY driver.
+ *
+ * TTY_DRIVER_UNNUMBERED_NODE
+ * Do not create numbered ``/dev`` nodes. For example, create
+ * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a
+ * driver for a single tty device is being allocated.
*/
#define TTY_DRIVER_INSTALLED 0x0001
#define TTY_DRIVER_RESET_TERMIOS 0x0002
@@ -443,4 +566,21 @@ static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
/* serial subtype definitions */
#define SERIAL_TYPE_NORMAL 1
+int tty_register_driver(struct tty_driver *driver);
+void tty_unregister_driver(struct tty_driver *driver);
+struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+ struct device *dev);
+struct device *tty_register_device_attr(struct tty_driver *driver,
+ unsigned index, struct device *device, void *drvdata,
+ const struct attribute_group **attr_grp);
+void tty_unregister_device(struct tty_driver *driver, unsigned index);
+
+#ifdef CONFIG_PROC_FS
+void proc_tty_register_driver(struct tty_driver *);
+void proc_tty_unregister_driver(struct tty_driver *);
+#else
+static inline void proc_tty_register_driver(struct tty_driver *d) {}
+static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
+#endif
+
#endif /* #ifdef _LINUX_TTY_DRIVER_H */
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index 767f62086bd9..483d41cbcbb7 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -2,17 +2,21 @@
#ifndef _LINUX_TTY_FLIP_H
#define _LINUX_TTY_FLIP_H
-extern int tty_buffer_set_limit(struct tty_port *port, int limit);
-extern int tty_buffer_space_avail(struct tty_port *port);
-extern int tty_buffer_request_room(struct tty_port *port, size_t size);
-extern int tty_insert_flip_string_flags(struct tty_port *port,
+#include <linux/tty_buffer.h>
+#include <linux/tty_port.h>
+
+struct tty_ldisc;
+
+int tty_buffer_set_limit(struct tty_port *port, int limit);
+unsigned int tty_buffer_space_avail(struct tty_port *port);
+int tty_buffer_request_room(struct tty_port *port, size_t size);
+int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size);
-extern int tty_insert_flip_string_fixed_flag(struct tty_port *port,
+int tty_insert_flip_string_fixed_flag(struct tty_port *port,
const unsigned char *chars, char flag, size_t size);
-extern int tty_prepare_flip_string(struct tty_port *port,
- unsigned char **chars, size_t size);
-extern void tty_flip_buffer_push(struct tty_port *port);
-void tty_schedule_flip(struct tty_port *port);
+int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
+ size_t size);
+void tty_flip_buffer_push(struct tty_port *port);
int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
@@ -37,7 +41,10 @@ static inline int tty_insert_flip_string(struct tty_port *port,
return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size);
}
-extern void tty_buffer_lock_exclusive(struct tty_port *port);
-extern void tty_buffer_unlock_exclusive(struct tty_port *port);
+int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
+ const char *f, int count);
+
+void tty_buffer_lock_exclusive(struct tty_port *port);
+void tty_buffer_unlock_exclusive(struct tty_port *port);
#endif /* _LINUX_TTY_FLIP_H */
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index b1e6043e9917..dcb61ec11424 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -2,130 +2,14 @@
#ifndef _LINUX_TTY_LDISC_H
#define _LINUX_TTY_LDISC_H
-/*
- * This structure defines the interface between the tty line discipline
- * implementation and the tty routines. The following routines can be
- * defined; unless noted otherwise, they are optional, and can be
- * filled in with a null pointer.
- *
- * int (*open)(struct tty_struct *);
- *
- * This function is called when the line discipline is associated
- * with the tty. The line discipline can use this as an
- * opportunity to initialize any state needed by the ldisc routines.
- *
- * void (*close)(struct tty_struct *);
- *
- * This function is called when the line discipline is being
- * shutdown, either because the tty is being closed or because
- * the tty is being changed to use a new line discipline
- *
- * void (*flush_buffer)(struct tty_struct *tty);
- *
- * This function instructs the line discipline to clear its
- * buffers of any input characters it may have queued to be
- * delivered to the user mode process.
- *
- * ssize_t (*read)(struct tty_struct * tty, struct file * file,
- * unsigned char * buf, size_t nr);
- *
- * This function is called when the user requests to read from
- * the tty. The line discipline will return whatever characters
- * it has buffered up for the user. If this function is not
- * defined, the user will receive an EIO error.
- *
- * ssize_t (*write)(struct tty_struct * tty, struct file * file,
- * const unsigned char * buf, size_t nr);
- *
- * This function is called when the user requests to write to the
- * tty. The line discipline will deliver the characters to the
- * low-level tty device for transmission, optionally performing
- * some processing on the characters first. If this function is
- * not defined, the user will receive an EIO error.
- *
- * int (*ioctl)(struct tty_struct * tty, struct file * file,
- * unsigned int cmd, unsigned long arg);
- *
- * This function is called when the user requests an ioctl which
- * is not handled by the tty layer or the low-level tty driver.
- * It is intended for ioctls which affect line discpline
- * operation. Note that the search order for ioctls is (1) tty
- * layer, (2) tty low-level driver, (3) line discpline. So a
- * low-level driver can "grab" an ioctl request before the line
- * discpline has a chance to see it.
- *
- * int (*compat_ioctl)(struct tty_struct * tty, struct file * file,
- * unsigned int cmd, unsigned long arg);
- *
- * Process ioctl calls from 32-bit process on 64-bit system
- *
- * NOTE: only ioctls that are neither "pointer to compatible
- * structure" nor tty-generic. Something private that takes
- * an integer or a pointer to wordsize-sensitive structure
- * belongs here, but most of ldiscs will happily leave
- * it NULL.
- *
- * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
- *
- * This function notifies the line discpline that a change has
- * been made to the termios structure.
- *
- * int (*poll)(struct tty_struct * tty, struct file * file,
- * poll_table *wait);
- *
- * This function is called when a user attempts to select/poll on a
- * tty device. It is solely the responsibility of the line
- * discipline to handle poll requests.
- *
- * void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
- * char *fp, int count);
- *
- * This function is called by the low-level tty driver to send
- * characters received by the hardware to the line discpline for
- * processing. <cp> is a pointer to the buffer of input
- * character received by the device. <fp> is a pointer to a
- * pointer of flag bytes which indicate whether a character was
- * received with a parity error, etc. <fp> may be NULL to indicate
- * all data received is TTY_NORMAL.
- *
- * void (*write_wakeup)(struct tty_struct *);
- *
- * This function is called by the low-level tty driver to signal
- * that line discpline should try to send more characters to the
- * low-level driver for transmission. If the line discpline does
- * not have any more data to send, it can just return. If the line
- * discipline does have some data to send, please arise a tasklet
- * or workqueue to do the real data transfer. Do not send data in
- * this hook, it may leads to a deadlock.
- *
- * int (*hangup)(struct tty_struct *)
- *
- * Called on a hangup. Tells the discipline that it should
- * cease I/O to the tty driver. Can sleep. The driver should
- * seek to perform this action quickly but should wait until
- * any pending driver I/O is completed.
- *
- * void (*dcd_change)(struct tty_struct *tty, unsigned int status)
- *
- * Tells the discipline that the DCD pin has changed its status.
- * Used exclusively by the N_PPS (Pulse-Per-Second) line discipline.
- *
- * int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
- * char *fp, int count);
- *
- * This function is called by the low-level tty driver to send
- * characters received by the hardware to the line discpline for
- * processing. <cp> is a pointer to the buffer of input
- * character received by the device. <fp> is a pointer to a
- * pointer of flag bytes which indicate whether a character was
- * received with a parity error, etc. <fp> may be NULL to indicate
- * all data received is TTY_NORMAL.
- * If assigned, prefer this function for automatic flow control.
- */
+struct tty_struct;
#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/seq_file.h>
/*
* the semaphore definition
@@ -141,7 +25,7 @@ struct ld_semaphore {
#endif
};
-extern void __init_ldsem(struct ld_semaphore *sem, const char *name,
+void __init_ldsem(struct ld_semaphore *sem, const char *name,
struct lock_class_key *key);
#define init_ldsem(sem) \
@@ -152,18 +36,18 @@ do { \
} while (0)
-extern int ldsem_down_read(struct ld_semaphore *sem, long timeout);
-extern int ldsem_down_read_trylock(struct ld_semaphore *sem);
-extern int ldsem_down_write(struct ld_semaphore *sem, long timeout);
-extern int ldsem_down_write_trylock(struct ld_semaphore *sem);
-extern void ldsem_up_read(struct ld_semaphore *sem);
-extern void ldsem_up_write(struct ld_semaphore *sem);
+int ldsem_down_read(struct ld_semaphore *sem, long timeout);
+int ldsem_down_read_trylock(struct ld_semaphore *sem);
+int ldsem_down_write(struct ld_semaphore *sem, long timeout);
+int ldsem_down_write_trylock(struct ld_semaphore *sem);
+void ldsem_up_read(struct ld_semaphore *sem);
+void ldsem_up_write(struct ld_semaphore *sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass,
- long timeout);
-extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
- long timeout);
+int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass,
+ long timeout);
+int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
+ long timeout);
#else
# define ldsem_down_read_nested(sem, subclass, timeout) \
ldsem_down_read(sem, timeout)
@@ -171,45 +55,196 @@ extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
ldsem_down_write(sem, timeout)
#endif
-
+/**
+ * struct tty_ldisc_ops - ldisc operations
+ *
+ * @name: name of this ldisc rendered in /proc/tty/ldiscs
+ * @num: ``N_*`` number (%N_TTY, %N_HDLC, ...) reserved to this ldisc
+ *
+ * @open: [TTY] ``int ()(struct tty_struct *tty)``
+ *
+ * This function is called when the line discipline is associated with the
+ * @tty. No other call into the line discipline for this tty will occur
+ * until it completes successfully. It should initialize any state needed
+ * by the ldisc, and set @tty->receive_room to the maximum amount of data
+ * the line discipline is willing to accept from the driver with a single
+ * call to @receive_buf(). Returning an error will prevent the ldisc from
+ * being attached.
+ *
+ * Can sleep.
+ *
+ * @close: [TTY] ``void ()(struct tty_struct *tty)``
+ *
+ * This function is called when the line discipline is being shutdown,
+ * either because the @tty is being closed or because the @tty is being
+ * changed to use a new line discipline. At the point of execution no
+ * further users will enter the ldisc code for this tty.
+ *
+ * Can sleep.
+ *
+ * @flush_buffer: [TTY] ``void ()(struct tty_struct *tty)``
+ *
+ * This function instructs the line discipline to clear its buffers of any
+ * input characters it may have queued to be delivered to the user mode
+ * process. It may be called at any point between open and close.
+ *
+ * @read: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
+ * unsigned char *buf, size_t nr)``
+ *
+ * This function is called when the user requests to read from the @tty.
+ * The line discipline will return whatever characters it has buffered up
+ * for the user. If this function is not defined, the user will receive
+ * an %EIO error. Multiple read calls may occur in parallel and the ldisc
+ * must deal with serialization issues.
+ *
+ * Can sleep.
+ *
+ * @write: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
+ * const unsigned char *buf, size_t nr)``
+ *
+ * This function is called when the user requests to write to the @tty.
+ * The line discipline will deliver the characters to the low-level tty
+ * device for transmission, optionally performing some processing on the
+ * characters first. If this function is not defined, the user will
+ * receive an %EIO error.
+ *
+ * Can sleep.
+ *
+ * @ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * This function is called when the user requests an ioctl which is not
+ * handled by the tty layer or the low-level tty driver. It is intended
+ * for ioctls which affect line discpline operation. Note that the search
+ * order for ioctls is (1) tty layer, (2) tty low-level driver, (3) line
+ * discpline. So a low-level driver can "grab" an ioctl request before
+ * the line discpline has a chance to see it.
+ *
+ * @compat_ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * Process ioctl calls from 32-bit process on 64-bit system.
+ *
+ * Note that only ioctls that are neither "pointer to compatible
+ * structure" nor tty-generic. Something private that takes an integer or
+ * a pointer to wordsize-sensitive structure belongs here, but most of
+ * ldiscs will happily leave it %NULL.
+ *
+ * @set_termios: [TTY] ``void ()(struct tty_struct *tty, const struct ktermios *old)``
+ *
+ * This function notifies the line discpline that a change has been made
+ * to the termios structure.
+ *
+ * @poll: [TTY] ``int ()(struct tty_struct *tty, struct file *file,
+ * struct poll_table_struct *wait)``
+ *
+ * This function is called when a user attempts to select/poll on a @tty
+ * device. It is solely the responsibility of the line discipline to
+ * handle poll requests.
+ *
+ * @hangup: [TTY] ``void ()(struct tty_struct *tty)``
+ *
+ * Called on a hangup. Tells the discipline that it should cease I/O to
+ * the tty driver. The driver should seek to perform this action quickly
+ * but should wait until any pending driver I/O is completed. No further
+ * calls into the ldisc code will occur.
+ *
+ * Can sleep.
+ *
+ * @receive_buf: [DRV] ``void ()(struct tty_struct *tty,
+ * const unsigned char *cp, const char *fp, int count)``
+ *
+ * This function is called by the low-level tty driver to send characters
+ * received by the hardware to the line discpline for processing. @cp is
+ * a pointer to the buffer of input character received by the device. @fp
+ * is a pointer to an array of flag bytes which indicate whether a
+ * character was received with a parity error, etc. @fp may be %NULL to
+ * indicate all data received is %TTY_NORMAL.
+ *
+ * @write_wakeup: [DRV] ``void ()(struct tty_struct *tty)``
+ *
+ * This function is called by the low-level tty driver to signal that line
+ * discpline should try to send more characters to the low-level driver
+ * for transmission. If the line discpline does not have any more data to
+ * send, it can just return. If the line discipline does have some data to
+ * send, please arise a tasklet or workqueue to do the real data transfer.
+ * Do not send data in this hook, it may lead to a deadlock.
+ *
+ * @dcd_change: [DRV] ``void ()(struct tty_struct *tty, unsigned int status)``
+ *
+ * Tells the discipline that the DCD pin has changed its status. Used
+ * exclusively by the %N_PPS (Pulse-Per-Second) line discipline.
+ *
+ * @receive_buf2: [DRV] ``int ()(struct tty_struct *tty,
+ * const unsigned char *cp, const char *fp, int count)``
+ *
+ * This function is called by the low-level tty driver to send characters
+ * received by the hardware to the line discpline for processing. @cp is a
+ * pointer to the buffer of input character received by the device. @fp
+ * is a pointer to an array of flag bytes which indicate whether a
+ * character was received with a parity error, etc. @fp may be %NULL to
+ * indicate all data received is %TTY_NORMAL. If assigned, prefer this
+ * function for automatic flow control.
+ *
+ * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty,
+ * const unsigned char *cp, const char *fp, int count)``
+ *
+ * This function is called by the low-level tty driver for characters
+ * not eaten by ->receive_buf() or ->receive_buf2(). It is useful for
+ * processing high-priority characters such as software flow-control
+ * characters that could otherwise get stuck into the intermediate
+ * buffer until tty has room to receive them. Ldisc must be able to
+ * handle later a ->receive_buf() or ->receive_buf2() call for the
+ * same characters (e.g. by skipping the actions for high-priority
+ * characters already handled by ->lookahead_buf()).
+ *
+ * @owner: module containting this ldisc (for reference counting)
+ *
+ * This structure defines the interface between the tty line discipline
+ * implementation and the tty routines. The above routines can be defined.
+ * Unless noted otherwise, they are optional, and can be filled in with a %NULL
+ * pointer.
+ *
+ * Hooks marked [TTY] are invoked from the TTY core, the [DRV] ones from the
+ * tty_driver side.
+ */
struct tty_ldisc_ops {
- int magic;
char *name;
int num;
- int flags;
/*
* The following routines are called from above.
*/
- int (*open)(struct tty_struct *);
- void (*close)(struct tty_struct *);
+ int (*open)(struct tty_struct *tty);
+ void (*close)(struct tty_struct *tty);
void (*flush_buffer)(struct tty_struct *tty);
ssize_t (*read)(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr);
+ unsigned char *buf, size_t nr,
+ void **cookie, unsigned long offset);
ssize_t (*write)(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr);
- int (*ioctl)(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
- int (*compat_ioctl)(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
- void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
- __poll_t (*poll)(struct tty_struct *, struct file *,
- struct poll_table_struct *);
- int (*hangup)(struct tty_struct *tty);
+ int (*ioctl)(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+ int (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, const struct ktermios *old);
+ __poll_t (*poll)(struct tty_struct *tty, struct file *file,
+ struct poll_table_struct *wait);
+ void (*hangup)(struct tty_struct *tty);
/*
* The following routines are called from below.
*/
- void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
- char *fp, int count);
- void (*write_wakeup)(struct tty_struct *);
- void (*dcd_change)(struct tty_struct *, unsigned int);
- int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
- char *fp, int count);
+ void (*receive_buf)(struct tty_struct *tty, const unsigned char *cp,
+ const char *fp, int count);
+ void (*write_wakeup)(struct tty_struct *tty);
+ void (*dcd_change)(struct tty_struct *tty, unsigned int status);
+ int (*receive_buf2)(struct tty_struct *tty, const unsigned char *cp,
+ const char *fp, int count);
+ void (*lookahead_buf)(struct tty_struct *tty, const unsigned char *cp,
+ const unsigned char *fp, unsigned int count);
struct module *owner;
-
- int refcount;
};
struct tty_ldisc {
@@ -217,11 +252,19 @@ struct tty_ldisc {
struct tty_struct *tty;
};
-#define TTY_LDISC_MAGIC 0x5403
-
-#define LDISC_FLAG_DEFINED 0x00000001
-
#define MODULE_ALIAS_LDISC(ldisc) \
MODULE_ALIAS("tty-ldisc-" __stringify(ldisc))
+extern const struct seq_operations tty_ldiscs_seq_ops;
+
+struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
+void tty_ldisc_deref(struct tty_ldisc *);
+struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
+
+void tty_ldisc_flush(struct tty_struct *tty);
+
+int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc);
+void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc);
+int tty_set_ldisc(struct tty_struct *tty, int disc);
+
#endif /* _LINUX_TTY_LDISC_H */
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
new file mode 100644
index 000000000000..fa3c3bdaa234
--- /dev/null
+++ b/include/linux/tty_port.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TTY_PORT_H
+#define _LINUX_TTY_PORT_H
+
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/tty_buffer.h>
+#include <linux/wait.h>
+
+struct attribute_group;
+struct tty_driver;
+struct tty_port;
+struct tty_struct;
+
+/**
+ * struct tty_port_operations -- operations on tty_port
+ * @carrier_raised: return 1 if the carrier is raised on @port
+ * @dtr_rts: raise the DTR line if @raise is nonzero, otherwise lower DTR
+ * @shutdown: called when the last close completes or a hangup finishes IFF the
+ * port was initialized. Do not use to free resources. Turn off the device
+ * only. Called under the port mutex to serialize against @activate and
+ * @shutdown.
+ * @activate: called under the port mutex from tty_port_open(), serialized using
+ * the port mutex. Supposed to turn on the device.
+ *
+ * FIXME: long term getting the tty argument *out* of this would be good
+ * for consoles.
+ *
+ * @destruct: called on the final put of a port. Free resources, possibly incl.
+ * the port itself.
+ */
+struct tty_port_operations {
+ int (*carrier_raised)(struct tty_port *port);
+ void (*dtr_rts)(struct tty_port *port, int raise);
+ void (*shutdown)(struct tty_port *port);
+ int (*activate)(struct tty_port *port, struct tty_struct *tty);
+ void (*destruct)(struct tty_port *port);
+};
+
+struct tty_port_client_operations {
+ int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t);
+ void (*lookahead_buf)(struct tty_port *port, const unsigned char *cp,
+ const unsigned char *fp, unsigned int count);
+ void (*write_wakeup)(struct tty_port *port);
+};
+
+extern const struct tty_port_client_operations tty_port_default_client_ops;
+
+/**
+ * struct tty_port -- port level information
+ *
+ * @buf: buffer for this port, locked internally
+ * @tty: back pointer to &struct tty_struct, valid only if the tty is open. Use
+ * tty_port_tty_get() to obtain it (and tty_kref_put() to release).
+ * @itty: internal back pointer to &struct tty_struct. Avoid this. It should be
+ * eliminated in the long term.
+ * @ops: tty port operations (like activate, shutdown), see &struct
+ * tty_port_operations
+ * @client_ops: tty port client operations (like receive_buf, write_wakeup).
+ * By default, tty_port_default_client_ops is used.
+ * @lock: lock protecting @tty
+ * @blocked_open: # of procs waiting for open in tty_port_block_til_ready()
+ * @count: usage count
+ * @open_wait: open waiters queue (waiting e.g. for a carrier)
+ * @delta_msr_wait: modem status change queue (waiting for MSR changes)
+ * @flags: user TTY flags (%ASYNC_)
+ * @iflags: internal flags (%TTY_PORT_)
+ * @console: when set, the port is a console
+ * @mutex: locking, for open, shutdown and other port operations
+ * @buf_mutex: @xmit_buf alloc lock
+ * @xmit_buf: optional xmit buffer used by some drivers
+ * @xmit_fifo: optional xmit buffer used by some drivers
+ * @close_delay: delay in jiffies to wait when closing the port
+ * @closing_wait: delay in jiffies for output to be sent before closing
+ * @drain_delay: set to zero if no pure time based drain is needed else set to
+ * size of fifo
+ * @kref: references counter. Reaching zero calls @ops->destruct() if non-%NULL
+ * or frees the port otherwise.
+ * @client_data: pointer to private data, for @client_ops
+ *
+ * Each device keeps its own port level information. &struct tty_port was
+ * introduced as a common structure for such information. As every TTY device
+ * shall have a backing tty_port structure, every driver can use these members.
+ *
+ * The tty port has a different lifetime to the tty so must be kept apart.
+ * In addition be careful as tty -> port mappings are valid for the life
+ * of the tty object but in many cases port -> tty mappings are valid only
+ * until a hangup so don't use the wrong path.
+ *
+ * Tty port shall be initialized by tty_port_init() and shut down either by
+ * tty_port_destroy() (refcounting not used), or tty_port_put() (refcounting).
+ *
+ * There is a lot of helpers around &struct tty_port too. To name the most
+ * significant ones: tty_port_open(), tty_port_close() (or
+ * tty_port_close_start() and tty_port_close_end() separately if need be), and
+ * tty_port_hangup(). These call @ops->activate() and @ops->shutdown() as
+ * needed.
+ */
+struct tty_port {
+ struct tty_bufhead buf;
+ struct tty_struct *tty;
+ struct tty_struct *itty;
+ const struct tty_port_operations *ops;
+ const struct tty_port_client_operations *client_ops;
+ spinlock_t lock;
+ int blocked_open;
+ int count;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t delta_msr_wait;
+ unsigned long flags;
+ unsigned long iflags;
+ unsigned char console:1;
+ struct mutex mutex;
+ struct mutex buf_mutex;
+ unsigned char *xmit_buf;
+ DECLARE_KFIFO_PTR(xmit_fifo, unsigned char);
+ unsigned int close_delay;
+ unsigned int closing_wait;
+ int drain_delay;
+ struct kref kref;
+ void *client_data;
+};
+
+/* tty_port::iflags bits -- use atomic bit ops */
+#define TTY_PORT_INITIALIZED 0 /* device is initialized */
+#define TTY_PORT_SUSPENDED 1 /* device is suspended */
+#define TTY_PORT_ACTIVE 2 /* device is open */
+
+/*
+ * uart drivers: use the uart_port::status field and the UPSTAT_* defines
+ * for s/w-based flow control steering and carrier detection status
+ */
+#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */
+#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */
+#define TTY_PORT_KOPENED 5 /* device exclusively opened by
+ kernel */
+
+void tty_port_init(struct tty_port *port);
+void tty_port_link_device(struct tty_port *port, struct tty_driver *driver,
+ unsigned index);
+struct device *tty_port_register_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device);
+struct device *tty_port_register_device_attr(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device, void *drvdata,
+ const struct attribute_group **attr_grp);
+struct device *tty_port_register_device_serdev(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device);
+struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device, void *drvdata,
+ const struct attribute_group **attr_grp);
+void tty_port_unregister_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index);
+int tty_port_alloc_xmit_buf(struct tty_port *port);
+void tty_port_free_xmit_buf(struct tty_port *port);
+void tty_port_destroy(struct tty_port *port);
+void tty_port_put(struct tty_port *port);
+
+static inline struct tty_port *tty_port_get(struct tty_port *port)
+{
+ if (port && kref_get_unless_zero(&port->kref))
+ return port;
+ return NULL;
+}
+
+/* If the cts flow control is enabled, return true. */
+static inline bool tty_port_cts_enabled(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_CTS_FLOW, &port->iflags);
+}
+
+static inline void tty_port_set_cts_flow(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val);
+}
+
+static inline bool tty_port_active(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_ACTIVE, &port->iflags);
+}
+
+static inline void tty_port_set_active(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_ACTIVE, &port->iflags, val);
+}
+
+static inline bool tty_port_check_carrier(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_CHECK_CD, &port->iflags);
+}
+
+static inline void tty_port_set_check_carrier(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val);
+}
+
+static inline bool tty_port_suspended(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_SUSPENDED, &port->iflags);
+}
+
+static inline void tty_port_set_suspended(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val);
+}
+
+static inline bool tty_port_initialized(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_INITIALIZED, &port->iflags);
+}
+
+static inline void tty_port_set_initialized(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val);
+}
+
+static inline bool tty_port_kopened(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_KOPENED, &port->iflags);
+}
+
+static inline void tty_port_set_kopened(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_KOPENED, &port->iflags, val);
+}
+
+struct tty_struct *tty_port_tty_get(struct tty_port *port);
+void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
+int tty_port_carrier_raised(struct tty_port *port);
+void tty_port_raise_dtr_rts(struct tty_port *port);
+void tty_port_lower_dtr_rts(struct tty_port *port);
+void tty_port_hangup(struct tty_port *port);
+void tty_port_tty_hangup(struct tty_port *port, bool check_clocal);
+void tty_port_tty_wakeup(struct tty_port *port);
+int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp);
+int tty_port_close_start(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp);
+void tty_port_close_end(struct tty_port *port, struct tty_struct *tty);
+void tty_port_close(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp);
+int tty_port_install(struct tty_port *port, struct tty_driver *driver,
+ struct tty_struct *tty);
+int tty_port_open(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp);
+
+static inline int tty_port_users(struct tty_port *port)
+{
+ return port->count + port->blocked_open;
+}
+
+#endif
diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h
index 20d310331eb5..46b15e2aaefb 100644
--- a/include/linux/typecheck.h
+++ b/include/linux/typecheck.h
@@ -22,4 +22,13 @@
(void)__tmp; \
})
+/*
+ * Check at compile time that something is a pointer type.
+ */
+#define typecheck_pointer(x) \
+({ typeof(x) __dummy; \
+ (void)sizeof(*__dummy); \
+ 1; \
+})
+
#endif /* TYPECHECK_H_INCLUDED */
diff --git a/include/linux/types.h b/include/linux/types.h
index d3021c879179..ea8cf60a8a79 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -14,7 +14,7 @@ typedef u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
-typedef __kernel_ino_t ino_t;
+typedef __kernel_ulong_t ino_t;
typedef __kernel_mode_t mode_t;
typedef unsigned short umode_t;
typedef u32 nlink_t;
@@ -167,6 +167,8 @@ typedef struct {
int counter;
} atomic_t;
+#define ATOMIC_INIT(i) { (i) }
+
#ifdef CONFIG_64BIT
typedef struct {
s64 counter;
@@ -187,7 +189,11 @@ struct hlist_node {
struct ustat {
__kernel_daddr_t f_tfree;
- __kernel_ino_t f_tinode;
+#ifdef CONFIG_ARCH_32BIT_USTAT_F_TINODE
+ unsigned int f_tinode;
+#else
+ unsigned long f_tinode;
+#endif
char f_fname[6];
char f_fpack[6];
};
@@ -220,6 +226,7 @@ struct callback_head {
typedef void (*rcu_callback_t)(struct rcu_head *head);
typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+typedef void (*swap_r_func_t)(void *a, void *b, int size, const void *priv);
typedef void (*swap_func_t)(void *a, void *b, int size);
typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 9de5c10293f5..46040d66334a 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -3,33 +3,32 @@
#define _LINUX_U64_STATS_SYNC_H
/*
- * To properly implement 64bits network statistics on 32bit and 64bit hosts,
- * we provide a synchronization point, that is a noop on 64bit or UP kernels.
+ * Protect against 64-bit values tearing on 32-bit architectures. This is
+ * typically used for statistics read/update in different subsystems.
*
* Key points :
- * 1) Use a seqcount on SMP 32bits, with low overhead.
- * 2) Whole thing is a noop on 64bit arches or UP kernels.
- * 3) Write side must ensure mutual exclusion or one seqcount update could
+ *
+ * - Use a seqcount on 32-bit
+ * - The whole thing is a no-op on 64-bit architectures.
+ *
+ * Usage constraints:
+ *
+ * 1) Write side must ensure mutual exclusion, or one seqcount update could
* be lost, thus blocking readers forever.
- * If this synchronization point is not a mutex, but a spinlock or
- * spinlock_bh() or disable_bh() :
- * 3.1) Write side should not sleep.
- * 3.2) Write side should not allow preemption.
- * 3.3) If applicable, interrupts should be disabled.
*
- * 4) If reader fetches several counters, there is no guarantee the whole values
- * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
+ * 2) Write side must disable preemption, or a seqcount reader can preempt the
+ * writer and also spin forever.
*
- * 5) readers are allowed to sleep or be preempted/interrupted : They perform
- * pure reads. But if they have to fetch many values, it's better to not allow
- * preemptions/interruptions to avoid many retries.
+ * 3) Write side must use the _irqsave() variant if other writers, or a reader,
+ * can be invoked from an IRQ context. On 64bit systems this variant does not
+ * disable interrupts.
*
- * 6) If counter might be written by an interrupt, readers should block interrupts.
- * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
- * read partial values)
+ * 4) If reader fetches several counters, there is no guarantee the whole values
+ * are consistent w.r.t. each other (remember point #2: seqcounts are not
+ * used for 64bit architectures).
*
- * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
- * u64_stats_fetch_retry_irq() helpers
+ * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
+ * pure reads.
*
* Usage :
*
@@ -63,7 +62,7 @@
#include <linux/seqlock.h>
struct u64_stats_sync {
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+#if BITS_PER_LONG == 32
seqcount_t seq;
#endif
};
@@ -80,6 +79,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return local64_read(&p->v);
}
+static inline void u64_stats_set(u64_stats_t *p, u64 val)
+{
+ local64_set(&p->v, val);
+}
+
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
{
local64_add(val, &p->v);
@@ -90,7 +94,22 @@ static inline void u64_stats_inc(u64_stats_t *p)
local64_inc(&p->v);
}
-#else
+static inline void u64_stats_init(struct u64_stats_sync *syncp) { }
+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) { }
+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) { }
+static inline unsigned long __u64_stats_irqsave(void) { return 0; }
+static inline void __u64_stats_irqrestore(unsigned long flags) { }
+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+ return 0;
+}
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+ return false;
+}
+
+#else /* 64 bit */
typedef struct {
u64 v;
@@ -101,6 +120,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return p->v;
}
+static inline void u64_stats_set(u64_stats_t *p, u64 val)
+{
+ p->v = val;
+}
+
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
{
p->v += val;
@@ -110,108 +134,95 @@ static inline void u64_stats_inc(u64_stats_t *p)
{
p->v++;
}
-#endif
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
seqcount_init(&syncp->seq);
-#endif
}
-static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ preempt_disable_nested();
write_seqcount_begin(&syncp->seq);
-#endif
}
-static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_end(&syncp->seq);
-#endif
+ preempt_enable_nested();
}
-static inline unsigned long
-u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+static inline unsigned long __u64_stats_irqsave(void)
{
- unsigned long flags = 0;
+ unsigned long flags;
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
local_irq_save(flags);
- write_seqcount_begin(&syncp->seq);
-#endif
return flags;
}
-static inline void
-u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
- unsigned long flags)
+static inline void __u64_stats_irqrestore(unsigned long flags)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- write_seqcount_end(&syncp->seq);
local_irq_restore(flags);
-#endif
}
static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
-#else
- return 0;
-#endif
}
-static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
{
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
- preempt_disable();
-#endif
- return __u64_stats_fetch_begin(syncp);
+ return read_seqcount_retry(&syncp->seq, start);
}
+#endif /* !64 bit */
-static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
- unsigned int start)
+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- return read_seqcount_retry(&syncp->seq, start);
-#else
- return false;
-#endif
+ __u64_stats_update_begin(syncp);
+}
+
+static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+ __u64_stats_update_end(syncp);
+}
+
+static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+{
+ unsigned long flags = __u64_stats_irqsave();
+
+ __u64_stats_update_begin(syncp);
+ return flags;
+}
+
+static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
+ unsigned long flags)
+{
+ __u64_stats_update_end(syncp);
+ __u64_stats_irqrestore(flags);
+}
+
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+ return __u64_stats_fetch_begin(syncp);
}
static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
- preempt_enable();
-#endif
return __u64_stats_fetch_retry(syncp, start);
}
-/*
- * In case irq handlers can update u64 counters, readers can use following helpers
- * - SMP 32bit arches use seqcount protection, irq safe.
- * - UP 32bit must disable irqs.
- * - 64bit have no problem atomically reading u64 values, irq safe.
- */
+/* Obsolete interfaces */
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
- local_irq_disable();
-#endif
- return __u64_stats_fetch_begin(syncp);
+ return u64_stats_fetch_begin(syncp);
}
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
unsigned int start)
{
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
- local_irq_enable();
-#endif
- return __u64_stats_fetch_retry(syncp, start);
+ return u64_stats_fetch_retry(syncp, start);
}
#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/u64_stats_sync_api.h b/include/linux/u64_stats_sync_api.h
new file mode 100644
index 000000000000..c72ca63da44b
--- /dev/null
+++ b/include/linux/u64_stats_sync_api.h
@@ -0,0 +1 @@
+#include <linux/u64_stats_sync.h>
diff --git a/include/linux/uacce.h b/include/linux/uacce.h
new file mode 100644
index 000000000000..9ce88c28b0a8
--- /dev/null
+++ b/include/linux/uacce.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_UACCE_H
+#define _LINUX_UACCE_H
+
+#include <linux/cdev.h>
+#include <uapi/misc/uacce/uacce.h>
+
+#define UACCE_NAME "uacce"
+#define UACCE_MAX_REGION 2
+#define UACCE_MAX_NAME_SIZE 64
+
+struct uacce_queue;
+struct uacce_device;
+
+/**
+ * struct uacce_qfile_region - structure of queue file region
+ * @type: type of the region
+ */
+struct uacce_qfile_region {
+ enum uacce_qfrt type;
+};
+
+/**
+ * struct uacce_ops - uacce device operations
+ * @get_available_instances: get available instances left of the device
+ * @get_queue: get a queue from the device
+ * @put_queue: free a queue to the device
+ * @start_queue: make the queue start work after get_queue
+ * @stop_queue: make the queue stop work before put_queue
+ * @is_q_updated: check whether the task is finished
+ * @mmap: mmap addresses of queue to user space
+ * @ioctl: ioctl for user space users of the queue
+ */
+struct uacce_ops {
+ int (*get_available_instances)(struct uacce_device *uacce);
+ int (*get_queue)(struct uacce_device *uacce, unsigned long arg,
+ struct uacce_queue *q);
+ void (*put_queue)(struct uacce_queue *q);
+ int (*start_queue)(struct uacce_queue *q);
+ void (*stop_queue)(struct uacce_queue *q);
+ int (*is_q_updated)(struct uacce_queue *q);
+ int (*mmap)(struct uacce_queue *q, struct vm_area_struct *vma,
+ struct uacce_qfile_region *qfr);
+ long (*ioctl)(struct uacce_queue *q, unsigned int cmd,
+ unsigned long arg);
+};
+
+/**
+ * struct uacce_interface - interface required for uacce_register()
+ * @name: the uacce device name. Will show up in sysfs
+ * @flags: uacce device attributes
+ * @ops: pointer to the struct uacce_ops
+ */
+struct uacce_interface {
+ char name[UACCE_MAX_NAME_SIZE];
+ unsigned int flags;
+ const struct uacce_ops *ops;
+};
+
+enum uacce_q_state {
+ UACCE_Q_ZOMBIE = 0,
+ UACCE_Q_INIT,
+ UACCE_Q_STARTED,
+};
+
+/**
+ * struct uacce_queue
+ * @uacce: pointer to uacce
+ * @priv: private pointer
+ * @wait: wait queue head
+ * @list: index into uacce queues list
+ * @qfrs: pointer of qfr regions
+ * @mutex: protects queue state
+ * @state: queue state machine
+ * @pasid: pasid associated to the mm
+ * @handle: iommu_sva handle returned by iommu_sva_bind_device()
+ */
+struct uacce_queue {
+ struct uacce_device *uacce;
+ void *priv;
+ wait_queue_head_t wait;
+ struct list_head list;
+ struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
+ struct mutex mutex;
+ enum uacce_q_state state;
+ u32 pasid;
+ struct iommu_sva *handle;
+};
+
+/**
+ * struct uacce_device
+ * @algs: supported algorithms
+ * @api_ver: api version
+ * @ops: pointer to the struct uacce_ops
+ * @qf_pg_num: page numbers of the queue file regions
+ * @parent: pointer to the parent device
+ * @is_vf: whether virtual function
+ * @flags: uacce attributes
+ * @dev_id: id of the uacce device
+ * @cdev: cdev of the uacce
+ * @dev: dev of the uacce
+ * @mutex: protects uacce operation
+ * @priv: private pointer of the uacce
+ * @queues: list of queues
+ * @inode: core vfs
+ */
+struct uacce_device {
+ const char *algs;
+ const char *api_ver;
+ const struct uacce_ops *ops;
+ unsigned long qf_pg_num[UACCE_MAX_REGION];
+ struct device *parent;
+ bool is_vf;
+ u32 flags;
+ u32 dev_id;
+ struct cdev *cdev;
+ struct device dev;
+ struct mutex mutex;
+ void *priv;
+ struct list_head queues;
+ struct inode *inode;
+};
+
+#if IS_ENABLED(CONFIG_UACCE)
+
+struct uacce_device *uacce_alloc(struct device *parent,
+ struct uacce_interface *interface);
+int uacce_register(struct uacce_device *uacce);
+void uacce_remove(struct uacce_device *uacce);
+
+#else /* CONFIG_UACCE */
+
+static inline
+struct uacce_device *uacce_alloc(struct device *parent,
+ struct uacce_interface *interface)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int uacce_register(struct uacce_device *uacce)
+{
+ return -EINVAL;
+}
+
+static inline void uacce_remove(struct uacce_device *uacce) {}
+
+#endif /* CONFIG_UACCE */
+
+#endif /* _LINUX_UACCE_H */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 67f016010aad..afb18f198843 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,11 +2,11 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/fault-inject-usercopy.h>
+#include <linux/instrumented.h>
+#include <linux/minmax.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
-#include <linux/kasan-checks.h>
-
-#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
#include <asm/uaccess.h>
@@ -58,18 +58,28 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- kasan_check_write(to, n);
+ unsigned long res;
+
+ instrument_copy_from_user_before(to, from, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ return res;
}
static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res;
+
might_fault();
- kasan_check_write(to, n);
+ instrument_copy_from_user_before(to, from, n);
+ if (should_fail_usercopy())
+ return n;
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ return res;
}
/**
@@ -88,7 +98,9 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
- kasan_check_read(from, n);
+ if (should_fail_usercopy())
+ return n;
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -97,7 +109,9 @@ static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
- kasan_check_read(from, n);
+ if (should_fail_usercopy())
+ return n;
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -108,9 +122,10 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
- if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ if (!should_fail_usercopy() && likely(access_ok(from, n))) {
+ instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
@@ -126,8 +141,10 @@ static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
+ if (should_fail_usercopy())
+ return n;
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
@@ -140,7 +157,7 @@ _copy_to_user(void __user *, const void *, unsigned long);
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (likely(check_copy_size(to, n, false)))
+ if (check_copy_size(to, n, false))
n = _copy_from_user(to, from, n);
return n;
}
@@ -148,18 +165,21 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (likely(check_copy_size(from, n, true)))
+ if (check_copy_size(from, n, true))
n = _copy_to_user(to, from, n);
return n;
}
-#ifdef CONFIG_COMPAT
-static __always_inline unsigned long __must_check
-copy_in_user(void __user *to, const void __user *from, unsigned long n)
+
+#ifndef copy_mc_to_kernel
+/*
+ * Without arch opt-in this generic copy_mc_to_kernel() will not handle
+ * #MC (or arch equivalent) during source read.
+ */
+static inline unsigned long __must_check
+copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
{
- might_fault();
- if (access_ok(to, n) && access_ok(from, n))
- n = raw_copy_in_user(to, from, n);
- return n;
+ memcpy(dst, src, cnt);
+ return 0;
}
#endif
@@ -220,6 +240,28 @@ static inline bool pagefault_disabled(void)
*/
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
+#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
+
+/**
+ * probe_subpage_writeable: probe the user range for write faults at sub-page
+ * granularity (e.g. arm64 MTE)
+ * @uaddr: start of address range
+ * @size: size of address range
+ *
+ * Returns 0 on success, the number of bytes not probed on fault.
+ *
+ * It is expected that the caller checked for the write permission of each
+ * page in the range either by put_user() or GUP. The architecture port can
+ * implement a more efficient get_user() probing if the same sub-page faults
+ * are triggered by either a read or a write.
+ */
+static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
+
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline __must_check unsigned long
@@ -301,72 +343,52 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
return 0;
}
-/*
- * probe_kernel_read(): safely attempt to read from a location
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from
- * @size: size of the data chunk
- *
- * Safely read from address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long probe_kernel_read(void *dst, const void *src, size_t size);
-extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
-extern long __probe_kernel_read(void *dst, const void *src, size_t size);
-
-/*
- * probe_user_read(): safely attempt to read from a location in user space
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from
- * @size: size of the data chunk
- *
- * Safely read from address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long probe_user_read(void *dst, const void __user *src, size_t size);
-extern long __probe_user_read(void *dst, const void __user *src, size_t size);
-
-/*
- * probe_kernel_write(): safely attempt to write to a location
- * @dst: address to write to
- * @src: pointer to the data that shall be written
- * @size: size of the data chunk
- *
- * Safely write to address @dst from the buffer at @src. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
-extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
-
-/*
- * probe_user_write(): safely attempt to write to a location in user space
- * @dst: address to write to
- * @src: pointer to the data that shall be written
- * @size: size of the data chunk
- *
- * Safely write to address @dst from the buffer at @src. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
-extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
-
-extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
-extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
- long count);
-extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
-extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
- long count);
-extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
+
+long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
+long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
+
+long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
+long notrace copy_to_user_nofault(void __user *dst, const void *src,
+ size_t size);
+
+long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
+ long count);
+
+long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
+ long count);
+long strnlen_user_nofault(const void __user *unsafe_addr, long count);
+
+#ifndef __get_kernel_nofault
+#define __get_kernel_nofault(dst, src, type, label) \
+do { \
+ type __user *p = (type __force __user *)(src); \
+ type data; \
+ if (__get_user(data, p)) \
+ goto label; \
+ *(type *)dst = data; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, label) \
+do { \
+ type __user *p = (type __force __user *)(dst); \
+ type data = *(type *)src; \
+ if (__put_user(data, p)) \
+ goto label; \
+} while (0)
+#endif
/**
- * probe_kernel_address(): safely attempt to read from a location
- * @addr: address to read from
- * @retval: read into this variable
+ * get_kernel_nofault(): safely attempt to read from a location
+ * @val: read into this variable
+ * @ptr: address to read from
*
* Returns 0 on success, or -EFAULT.
*/
-#define probe_kernel_address(addr, retval) \
- probe_kernel_read(&retval, addr, sizeof(retval))
+#define get_kernel_nofault(val, ptr) ({ \
+ const typeof(val) *__gk_ptr = (ptr); \
+ copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
+})
#ifndef user_access_begin
#define user_access_begin(ptr,len) access_ok(ptr, len)
@@ -375,13 +397,20 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
+#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
#endif
+#ifndef user_write_access_begin
+#define user_write_access_begin user_access_begin
+#define user_write_access_end user_access_end
+#endif
+#ifndef user_read_access_begin
+#define user_read_access_begin user_access_begin
+#define user_read_access_end user_access_end
+#endif
#ifdef CONFIG_HARDENED_USERCOPY
-void usercopy_warn(const char *name, const char *detail, bool to_user,
- unsigned long offset, unsigned long len);
void __noreturn usercopy_abort(const char *name, const char *detail,
bool to_user, unsigned long offset,
unsigned long len);
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
index 0968ef458447..2516082cd3a9 100644
--- a/include/linux/ucb1400.h
+++ b/include/linux/ucb1400.h
@@ -23,7 +23,7 @@
#include <sound/ac97_codec.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/*
* UCB1400 AC-link registers
@@ -84,8 +84,6 @@ struct ucb1400_gpio {
struct gpio_chip gc;
struct snd_ac97 *ac97;
int gpio_offset;
- int (*gpio_setup)(struct device *dev, int ngpio);
- int (*gpio_teardown)(struct device *dev, int ngpio);
};
struct ucb1400_ts {
diff --git a/include/linux/udp.h b/include/linux/udp.h
index aa84597bdc33..e96da4157d04 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -23,11 +23,6 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
return (struct udphdr *)skb_transport_header(skb);
}
-static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb)
-{
- return (struct udphdr *)skb_inner_transport_header(skb);
-}
-
#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
@@ -51,7 +46,9 @@ struct udp_sock {
* different encapsulation layer set
* this
*/
- gro_enabled:1; /* Can accept GRO packets */
+ gro_enabled:1, /* Request GRO aggregation */
+ accept_udp_l4:1,
+ accept_udp_fraglist:1;
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -73,6 +70,7 @@ struct udp_sock {
* For encapsulation sockets.
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
@@ -131,8 +129,22 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
{
- return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
- skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
+ if (!skb_is_gso(skb))
+ return false;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
+ return true;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
+ return true;
+
+ return false;
+}
+
+static inline void udp_allow_gso(struct sock *sk)
+{
+ udp_sk(sk)->accept_udp_l4 = 1;
+ udp_sk(sk)->accept_udp_fraglist = 1;
}
#define udp_portaddr_for_each_entry(__sk, list) \
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 9576fd8158d7..2e3134b14ffd 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/thread_info.h>
-#include <crypto/hash.h>
+#include <linux/mm_types.h>
#include <uapi/linux/uio.h>
struct page;
@@ -20,27 +20,38 @@ struct kvec {
enum iter_type {
/* iter types */
- ITER_IOVEC = 4,
- ITER_KVEC = 8,
- ITER_BVEC = 16,
- ITER_PIPE = 32,
- ITER_DISCARD = 64,
+ ITER_IOVEC,
+ ITER_KVEC,
+ ITER_BVEC,
+ ITER_PIPE,
+ ITER_XARRAY,
+ ITER_DISCARD,
+ ITER_UBUF,
};
-struct iov_iter {
- /*
- * Bit 0 is the read/write bit, set if we're writing.
- * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
- * the caller isn't expecting to drop a page reference when done.
- */
- unsigned int type;
+struct iov_iter_state {
size_t iov_offset;
size_t count;
+ unsigned long nr_segs;
+};
+
+struct iov_iter {
+ u8 iter_type;
+ bool nofault;
+ bool data_source;
+ bool user_backed;
+ union {
+ size_t iov_offset;
+ int last_offset;
+ };
+ size_t count;
union {
const struct iovec *iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
+ struct xarray *xarray;
struct pipe_inode_info *pipe;
+ void __user *ubuf;
};
union {
unsigned long nr_segs;
@@ -48,12 +59,26 @@ struct iov_iter {
unsigned int head;
unsigned int start_head;
};
+ loff_t xarray_start;
};
};
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
{
- return i->type & ~(READ | WRITE);
+ return i->iter_type;
+}
+
+static inline void iov_iter_save_state(struct iov_iter *iter,
+ struct iov_iter_state *state)
+{
+ state->iov_offset = iter->iov_offset;
+ state->count = iter->count;
+ state->nr_segs = iter->nr_segs;
+}
+
+static inline bool iter_is_ubuf(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_UBUF;
}
static inline bool iter_is_iovec(const struct iov_iter *i)
@@ -81,9 +106,19 @@ static inline bool iov_iter_is_discard(const struct iov_iter *i)
return iov_iter_type(i) == ITER_DISCARD;
}
+static inline bool iov_iter_is_xarray(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_XARRAY;
+}
+
static inline unsigned char iov_iter_rw(const struct iov_iter *i)
{
- return i->type & (READ | WRITE);
+ return i->data_source ? WRITE : READ;
+}
+
+static inline bool user_backed_iter(const struct iov_iter *i)
+{
+ return i->user_backed;
}
/*
@@ -112,11 +147,12 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
};
}
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
+ size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
+size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
@@ -125,59 +161,62 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
-bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
-bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
+
+static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
+ size_t bytes, struct iov_iter *i)
+{
+ return copy_page_to_iter(&folio->page, offset, bytes, i);
+}
static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, true))
return _copy_to_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return false;
- else
- return _copy_from_iter_full(addr, bytes, i);
+ size_t copied = copy_from_iter(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
}
static __always_inline __must_check
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter_nocache(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return false;
- else
- return _copy_from_iter_full_nocache(addr, bytes, i);
+ size_t copied = copy_from_iter_nocache(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
}
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
* Note, users like pmem that depend on the stricter semantics of
- * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
+ * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
* IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
* destination is flushed from the cache on return.
*/
@@ -186,31 +225,15 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#define _copy_from_iter_flushcache _copy_from_iter_nocache
#endif
-#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
-size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
#else
-#define _copy_to_iter_mcsafe _copy_to_iter
+#define _copy_mc_to_iter _copy_to_iter
#endif
-static __always_inline __must_check
-size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
- return _copy_from_iter_flushcache(addr, bytes, i);
-}
-
-static __always_inline __must_check
-size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
- return _copy_to_iter_mcsafe(addr, bytes, i);
-}
-
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
+bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
@@ -222,11 +245,14 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_
void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
-ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
+void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
+ loff_t start, size_t count);
+ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
-ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
+ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
@@ -261,28 +287,68 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
+
+static inline int
+iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
+{
+ size_t shorted = 0;
+ int npages;
+
+ if (iov_iter_count(i) > max_bytes) {
+ shorted = iov_iter_count(i) - max_bytes;
+ iov_iter_truncate(i, max_bytes);
+ }
+ npages = iov_iter_npages(i, maxpages);
+ if (shorted)
+ iov_iter_reexpand(i, iov_iter_count(i) + shorted);
+
+ return npages;
+}
+
+struct csum_state {
+ __wsum csum;
+ size_t off;
+};
+
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+
+static __always_inline __must_check
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
+ __wsum *csum, struct iov_iter *i)
+{
+ size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
+}
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i);
-ssize_t import_iovec(int type, const struct iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i);
-
-#ifdef CONFIG_COMPAT
-struct compat_iovec;
-ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i);
-#endif
-
+struct iovec *iovec_from_user(const struct iovec __user *uvector,
+ unsigned long nr_segs, unsigned long fast_segs,
+ struct iovec *fast_iov, bool compat);
+ssize_t import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
+ struct iov_iter *i);
+ssize_t __import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
+ struct iov_iter *i, bool compat);
int import_single_range(int type, void __user *buf, size_t len,
struct iovec *iov, struct iov_iter *i);
-int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
- int (*f)(struct kvec *vec, void *context),
- void *context);
+static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
+ void __user *buf, size_t count)
+{
+ WARN_ON(direction & ~(READ | WRITE));
+ *i = (struct iov_iter) {
+ .iter_type = ITER_UBUF,
+ .user_backed = true,
+ .data_source = direction,
+ .ubuf = buf,
+ .count = count
+ };
+}
#endif
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 01081c4726c0..47c5962b876b 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -24,10 +24,10 @@ struct uio_map;
* struct uio_mem - description of a UIO memory region
* @name: name of the memory region for identification
* @addr: address of the device's memory rounded to page
- * size (phys_addr is used since addr can be
- * logical, virtual, or physical & phys_addr_t
- * should always be large enough to handle any of
- * the address types)
+ * size (phys_addr is used since addr can be
+ * logical, virtual, or physical & phys_addr_t
+ * should always be large enough to handle any of
+ * the address types)
* @offs: offset of device memory within the page
* @size: size of IO (multiple of page size)
* @memtype: type of memory addr points to
@@ -67,16 +67,16 @@ struct uio_port {
#define MAX_UIO_PORT_REGIONS 5
struct uio_device {
- struct module *owner;
+ struct module *owner;
struct device dev;
- int minor;
- atomic_t event;
- struct fasync_struct *async_queue;
- wait_queue_head_t wait;
- struct uio_info *info;
+ int minor;
+ atomic_t event;
+ struct fasync_struct *async_queue;
+ wait_queue_head_t wait;
+ struct uio_info *info;
struct mutex info_lock;
- struct kobject *map_dir;
- struct kobject *portio_dir;
+ struct kobject *map_dir;
+ struct kobject *portio_dir;
};
/**
@@ -117,12 +117,37 @@ extern int __must_check
struct uio_info *info);
/* use a define to avoid include chaining to get THIS_MODULE */
+
+/**
+ * uio_register_device - register a new userspace IO device
+ * @parent: parent device
+ * @info: UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
#define uio_register_device(parent, info) \
__uio_register_device(THIS_MODULE, parent, info)
extern void uio_unregister_device(struct uio_info *info);
extern void uio_event_notify(struct uio_info *info);
+extern int __must_check
+ __devm_uio_register_device(struct module *owner,
+ struct device *parent,
+ struct uio_info *info);
+
+/* use a define to avoid include chaining to get THIS_MODULE */
+
+/**
+ * devm_uio_register_device - Resource managed uio_register_device()
+ * @parent: parent device
+ * @info: UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
+#define devm_uio_register_device(parent, info) \
+ __devm_uio_register_device(THIS_MODULE, parent, info)
+
/* defines for uio_info->irq */
#define UIO_IRQ_CUSTOM -1
#define UIO_IRQ_NONE 0
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 0c08de356d0d..5d1f6129b847 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -11,10 +11,11 @@
struct cred;
struct file;
-#define UMH_NO_WAIT 0 /* don't wait at all */
-#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */
-#define UMH_WAIT_PROC 2 /* wait for the process to complete */
-#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
+#define UMH_NO_WAIT 0x00 /* don't wait at all */
+#define UMH_WAIT_EXEC 0x01 /* wait for the exec, but not the process */
+#define UMH_WAIT_PROC 0x02 /* wait for the process to complete */
+#define UMH_KILLABLE 0x04 /* wait for EXEC/PROC killable */
+#define UMH_FREEZABLE 0x08 /* wait for EXEC/PROC freezable */
struct subprocess_info {
struct work_struct work;
@@ -22,10 +23,8 @@ struct subprocess_info {
const char *path;
char **argv;
char **envp;
- struct file *file;
int wait;
int retval;
- pid_t pid;
int (*init)(struct subprocess_info *info, struct cred *new);
void (*cleanup)(struct subprocess_info *info);
void *data;
@@ -40,19 +39,6 @@ call_usermodehelper_setup(const char *path, char **argv, char **envp,
int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *), void *data);
-struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
- int (*init)(struct subprocess_info *info, struct cred *new),
- void (*cleanup)(struct subprocess_info *), void *data);
-struct umh_info {
- const char *cmdline;
- struct file *pipe_to_umh;
- struct file *pipe_from_umh;
- struct list_head list;
- void (*cleanup)(struct umh_info *info);
- pid_t pid;
-};
-int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
-
extern int
call_usermodehelper_exec(struct subprocess_info *info, int wait);
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
deleted file mode 100644
index 167aa849c0ce..000000000000
--- a/include/linux/unaligned/access_ok.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
-#define _LINUX_UNALIGNED_ACCESS_OK_H
-
-#include <linux/kernel.h>
-#include <asm/byteorder.h>
-
-static __always_inline u16 get_unaligned_le16(const void *p)
-{
- return le16_to_cpup((__le16 *)p);
-}
-
-static __always_inline u32 get_unaligned_le32(const void *p)
-{
- return le32_to_cpup((__le32 *)p);
-}
-
-static __always_inline u64 get_unaligned_le64(const void *p)
-{
- return le64_to_cpup((__le64 *)p);
-}
-
-static __always_inline u16 get_unaligned_be16(const void *p)
-{
- return be16_to_cpup((__be16 *)p);
-}
-
-static __always_inline u32 get_unaligned_be32(const void *p)
-{
- return be32_to_cpup((__be32 *)p);
-}
-
-static __always_inline u64 get_unaligned_be64(const void *p)
-{
- return be64_to_cpup((__be64 *)p);
-}
-
-static __always_inline void put_unaligned_le16(u16 val, void *p)
-{
- *((__le16 *)p) = cpu_to_le16(val);
-}
-
-static __always_inline void put_unaligned_le32(u32 val, void *p)
-{
- *((__le32 *)p) = cpu_to_le32(val);
-}
-
-static __always_inline void put_unaligned_le64(u64 val, void *p)
-{
- *((__le64 *)p) = cpu_to_le64(val);
-}
-
-static __always_inline void put_unaligned_be16(u16 val, void *p)
-{
- *((__be16 *)p) = cpu_to_be16(val);
-}
-
-static __always_inline void put_unaligned_be32(u32 val, void *p)
-{
- *((__be32 *)p) = cpu_to_be32(val);
-}
-
-static __always_inline void put_unaligned_be64(u64 val, void *p)
-{
- *((__be64 *)p) = cpu_to_be64(val);
-}
-
-#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h
deleted file mode 100644
index 8bdb8fa01bd4..000000000000
--- a/include/linux/unaligned/be_byteshift.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H
-#define _LINUX_UNALIGNED_BE_BYTESHIFT_H
-
-#include <linux/types.h>
-
-static inline u16 __get_unaligned_be16(const u8 *p)
-{
- return p[0] << 8 | p[1];
-}
-
-static inline u32 __get_unaligned_be32(const u8 *p)
-{
- return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
-}
-
-static inline u64 __get_unaligned_be64(const u8 *p)
-{
- return (u64)__get_unaligned_be32(p) << 32 |
- __get_unaligned_be32(p + 4);
-}
-
-static inline void __put_unaligned_be16(u16 val, u8 *p)
-{
- *p++ = val >> 8;
- *p++ = val;
-}
-
-static inline void __put_unaligned_be32(u32 val, u8 *p)
-{
- __put_unaligned_be16(val >> 16, p);
- __put_unaligned_be16(val, p + 2);
-}
-
-static inline void __put_unaligned_be64(u64 val, u8 *p)
-{
- __put_unaligned_be32(val >> 32, p);
- __put_unaligned_be32(val, p + 4);
-}
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return __get_unaligned_be16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_be32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return __get_unaligned_be64((const u8 *)p);
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- __put_unaligned_be16(val, p);
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
- __put_unaligned_be32(val, p);
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
- __put_unaligned_be64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h
deleted file mode 100644
index 7164214a4ba1..000000000000
--- a/include/linux/unaligned/be_memmove.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H
-#define _LINUX_UNALIGNED_BE_MEMMOVE_H
-
-#include <linux/unaligned/memmove.h>
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return __get_unaligned_memmove16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_memmove32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return __get_unaligned_memmove64((const u8 *)p);
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- __put_unaligned_memmove16(val, p);
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
- __put_unaligned_memmove32(val, p);
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
- __put_unaligned_memmove64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h
deleted file mode 100644
index 15ea503a13fc..000000000000
--- a/include/linux/unaligned/be_struct.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_BE_STRUCT_H
-#define _LINUX_UNALIGNED_BE_STRUCT_H
-
-#include <linux/unaligned/packed_struct.h>
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return __get_unaligned_cpu16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_cpu32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return __get_unaligned_cpu64((const u8 *)p);
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- __put_unaligned_cpu16(val, p);
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
- __put_unaligned_cpu32(val, p);
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
- __put_unaligned_cpu64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
deleted file mode 100644
index 57d3114656e5..000000000000
--- a/include/linux/unaligned/generic.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_GENERIC_H
-#define _LINUX_UNALIGNED_GENERIC_H
-
-/*
- * Cause a link-time error if we try an unaligned access other than
- * 1,2,4 or 8 bytes long
- */
-extern void __bad_unaligned_access_size(void);
-
-#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
- __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
- __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
- __bad_unaligned_access_size())))); \
- }))
-
-#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
- __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
- __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
- __bad_unaligned_access_size())))); \
- }))
-
-#define __put_unaligned_le(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_le16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_le32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_le64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
-#define __put_unaligned_be(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_be16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_be32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_be64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
-#endif /* _LINUX_UNALIGNED_GENERIC_H */
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h
deleted file mode 100644
index 1628b75866f0..000000000000
--- a/include/linux/unaligned/le_byteshift.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H
-#define _LINUX_UNALIGNED_LE_BYTESHIFT_H
-
-#include <linux/types.h>
-
-static inline u16 __get_unaligned_le16(const u8 *p)
-{
- return p[0] | p[1] << 8;
-}
-
-static inline u32 __get_unaligned_le32(const u8 *p)
-{
- return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
-}
-
-static inline u64 __get_unaligned_le64(const u8 *p)
-{
- return (u64)__get_unaligned_le32(p + 4) << 32 |
- __get_unaligned_le32(p);
-}
-
-static inline void __put_unaligned_le16(u16 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
-}
-
-static inline void __put_unaligned_le32(u32 val, u8 *p)
-{
- __put_unaligned_le16(val >> 16, p + 2);
- __put_unaligned_le16(val, p);
-}
-
-static inline void __put_unaligned_le64(u64 val, u8 *p)
-{
- __put_unaligned_le32(val >> 32, p + 4);
- __put_unaligned_le32(val, p);
-}
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return __get_unaligned_le16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return __get_unaligned_le32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return __get_unaligned_le64((const u8 *)p);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- __put_unaligned_le16(val, p);
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
- __put_unaligned_le32(val, p);
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
- __put_unaligned_le64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h
deleted file mode 100644
index 9202e864d026..000000000000
--- a/include/linux/unaligned/le_memmove.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H
-#define _LINUX_UNALIGNED_LE_MEMMOVE_H
-
-#include <linux/unaligned/memmove.h>
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return __get_unaligned_memmove16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return __get_unaligned_memmove32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return __get_unaligned_memmove64((const u8 *)p);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- __put_unaligned_memmove16(val, p);
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
- __put_unaligned_memmove32(val, p);
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
- __put_unaligned_memmove64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h
deleted file mode 100644
index 9977987883a6..000000000000
--- a/include/linux/unaligned/le_struct.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_LE_STRUCT_H
-#define _LINUX_UNALIGNED_LE_STRUCT_H
-
-#include <linux/unaligned/packed_struct.h>
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return __get_unaligned_cpu16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return __get_unaligned_cpu32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return __get_unaligned_cpu64((const u8 *)p);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- __put_unaligned_cpu16(val, p);
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
- __put_unaligned_cpu32(val, p);
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
- __put_unaligned_cpu64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */
diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h
deleted file mode 100644
index ac71b53bc6dc..000000000000
--- a/include/linux/unaligned/memmove.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_MEMMOVE_H
-#define _LINUX_UNALIGNED_MEMMOVE_H
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-
-/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
-
-static inline u16 __get_unaligned_memmove16(const void *p)
-{
- u16 tmp;
- memmove(&tmp, p, 2);
- return tmp;
-}
-
-static inline u32 __get_unaligned_memmove32(const void *p)
-{
- u32 tmp;
- memmove(&tmp, p, 4);
- return tmp;
-}
-
-static inline u64 __get_unaligned_memmove64(const void *p)
-{
- u64 tmp;
- memmove(&tmp, p, 8);
- return tmp;
-}
-
-static inline void __put_unaligned_memmove16(u16 val, void *p)
-{
- memmove(p, &val, 2);
-}
-
-static inline void __put_unaligned_memmove32(u32 val, void *p)
-{
- memmove(p, &val, 4);
-}
-
-static inline void __put_unaligned_memmove64(u64 val, void *p)
-{
- memmove(p, &val, 8);
-}
-
-#endif /* _LINUX_UNALIGNED_MEMMOVE_H */
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
index c0d817de4df2..f4c8eaf4d012 100644
--- a/include/linux/unaligned/packed_struct.h
+++ b/include/linux/unaligned/packed_struct.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
#define _LINUX_UNALIGNED_PACKED_STRUCT_H
-#include <linux/kernel.h>
+#include <linux/types.h>
struct __una_u16 { u16 x; } __packed;
struct __una_u32 { u32 x; } __packed;
diff --git a/include/linux/unicode.h b/include/linux/unicode.h
index 990aa97d8049..4d39e6e11a95 100644
--- a/include/linux/unicode.h
+++ b/include/linux/unicode.h
@@ -5,9 +5,52 @@
#include <linux/init.h>
#include <linux/dcache.h>
+struct utf8data;
+struct utf8data_table;
+
+#define UNICODE_MAJ_SHIFT 16
+#define UNICODE_MIN_SHIFT 8
+
+#define UNICODE_AGE(MAJ, MIN, REV) \
+ (((unsigned int)(MAJ) << UNICODE_MAJ_SHIFT) | \
+ ((unsigned int)(MIN) << UNICODE_MIN_SHIFT) | \
+ ((unsigned int)(REV)))
+
+static inline u8 unicode_major(unsigned int age)
+{
+ return (age >> UNICODE_MAJ_SHIFT) & 0xff;
+}
+
+static inline u8 unicode_minor(unsigned int age)
+{
+ return (age >> UNICODE_MIN_SHIFT) & 0xff;
+}
+
+static inline u8 unicode_rev(unsigned int age)
+{
+ return age & 0xff;
+}
+
+/*
+ * Two normalization forms are supported:
+ * 1) NFDI
+ * - Apply unicode normalization form NFD.
+ * - Remove any Default_Ignorable_Code_Point.
+ * 2) NFDICF
+ * - Apply unicode normalization form NFD.
+ * - Remove any Default_Ignorable_Code_Point.
+ * - Apply a full casefold (C + F).
+ */
+enum utf8_normalization {
+ UTF8_NFDI = 0,
+ UTF8_NFDICF,
+ UTF8_NMAX,
+};
+
struct unicode_map {
- const char *charset;
- int version;
+ unsigned int version;
+ const struct utf8data *ntab[UTF8_NMAX];
+ const struct utf8data_table *tables;
};
int utf8_validate(const struct unicode_map *um, const struct qstr *str);
@@ -27,7 +70,10 @@ int utf8_normalize(const struct unicode_map *um, const struct qstr *str,
int utf8_casefold(const struct unicode_map *um, const struct qstr *str,
unsigned char *dest, size_t dlen);
-struct unicode_map *utf8_load(const char *version);
+int utf8_casefold_hash(const struct unicode_map *um, const void *salt,
+ struct qstr *str);
+
+struct unicode_map *utf8_load(unsigned int version);
void utf8_unload(struct unicode_map *um);
#endif /* _LINUX_UNICODE_H */
diff --git a/include/linux/units.h b/include/linux/units.h
index aaf716364ec3..2793a41e73a2 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -2,7 +2,34 @@
#ifndef _LINUX_UNITS_H
#define _LINUX_UNITS_H
-#include <linux/kernel.h>
+#include <linux/math.h>
+
+/* Metric prefixes in accordance with Système international (d'unités) */
+#define PETA 1000000000000000ULL
+#define TERA 1000000000000ULL
+#define GIGA 1000000000UL
+#define MEGA 1000000UL
+#define KILO 1000UL
+#define HECTO 100UL
+#define DECA 10UL
+#define DECI 10UL
+#define CENTI 100UL
+#define MILLI 1000UL
+#define MICRO 1000000UL
+#define NANO 1000000000UL
+#define PICO 1000000000000ULL
+#define FEMTO 1000000000000000ULL
+
+#define NANOHZ_PER_HZ 1000000000UL
+#define MICROHZ_PER_HZ 1000000UL
+#define MILLIHZ_PER_HZ 1000UL
+#define HZ_PER_KHZ 1000UL
+#define KHZ_PER_MHZ 1000UL
+#define HZ_PER_MHZ 1000000UL
+
+#define MILLIWATT_PER_WATT 1000UL
+#define MICROWATT_PER_MILLIWATT 1000UL
+#define MICROWATT_PER_WATT 1000000UL
#define ABSOLUTE_ZERO_MILLICELSIUS -273150
diff --git a/include/linux/usb.h b/include/linux/usb.h
index e656e7b4b1e4..9ff1ad4dfad1 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -325,7 +325,7 @@ struct usb_interface_cache {
/* variable-length array of alternate settings for this interface,
* stored in no particular order */
- struct usb_host_interface altsetting[0];
+ struct usb_host_interface altsetting[];
};
#define ref_to_usb_interface_cache(r) \
container_of(r, struct usb_interface_cache, ref)
@@ -341,7 +341,7 @@ struct usb_interface_cache {
* @interface: array of pointers to usb_interface structures, one for each
* interface in the configuration. The number of interfaces is stored
* in desc.bNumInterfaces. These pointers are valid only while the
- * the configuration is active.
+ * configuration is active.
* @intf_cache: array of pointers to usb_interface_cache structures, one
* for each interface in the configuration. These structures exist
* for the entire life of the device.
@@ -422,7 +422,7 @@ struct usb_devmap {
* Allocated per bus (tree of devices) we have:
*/
struct usb_bus {
- struct device *controller; /* host/master side hardware */
+ struct device *controller; /* host side hardware */
struct device *sysdev; /* as seen from firmware or bus */
int busnum; /* Bus number (in order of reg) */
const char *bus_name; /* stable id (PCI slot_name etc) */
@@ -473,12 +473,6 @@ struct usb_dev_state;
struct usb_tt;
-enum usb_device_removable {
- USB_DEVICE_REMOVABLE_UNKNOWN = 0,
- USB_DEVICE_REMOVABLE,
- USB_DEVICE_FIXED,
-};
-
enum usb_port_connect_type {
USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
USB_PORT_CONNECT_TYPE_HOT_PLUG,
@@ -560,6 +554,7 @@ struct usb3_lpm_parameters {
* @speed: device speed: high/full/low (or error)
* @rx_lanes: number of rx lanes in use, USB 3.2 adds dual-lane support
* @tx_lanes: number of tx lanes in use, USB 3.2 adds dual-lane support
+ * @ssp_rate: SuperSpeed Plus phy signaling rate and lane count
* @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
* @ttport: device port on that tt hub
* @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints
@@ -580,6 +575,7 @@ struct usb3_lpm_parameters {
* @devaddr: device address, XHCI: assigned by HW, others: same as devnum
* @can_submit: URBs may be submitted
* @persist_enabled: USB_PERSIST enabled for this device
+ * @reset_in_progress: the device is being reset
* @have_langid: whether string_langid is valid
* @authorized: policy has said we can use it;
* (user space) policy determines if we authorize this device to be
@@ -589,6 +585,7 @@ struct usb3_lpm_parameters {
* @authenticated: Crypto authentication passed
* @wusb: device is Wireless USB
* @lpm_capable: device supports LPM
+ * @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
* @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
* @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
@@ -620,9 +617,9 @@ struct usb3_lpm_parameters {
* Management to be disabled for this usb_device. This count should only
* be manipulated by those functions, with the bandwidth_mutex is held.
* @hub_delay: cached value consisting of:
- * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns)
- *
+ * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns)
* Will be used as wValue for SetIsochDelay requests.
+ * @use_generic_driver: ask driver core to reprobe using the generic driver.
*
* Notes:
* Usbcore drivers should not set usbdev->state directly. Instead use
@@ -636,6 +633,7 @@ struct usb_device {
enum usb_device_speed speed;
unsigned int rx_lanes;
unsigned int tx_lanes;
+ enum usb_ssp_rate ssp_rate;
struct usb_tt *tt;
int ttport;
@@ -665,11 +663,13 @@ struct usb_device {
unsigned can_submit:1;
unsigned persist_enabled:1;
+ unsigned reset_in_progress:1;
unsigned have_langid:1;
unsigned authorized:1;
unsigned authenticated:1;
unsigned wusb:1;
unsigned lpm_capable:1;
+ unsigned lpm_devinit_allow:1;
unsigned usb2_hw_lpm_capable:1;
unsigned usb2_hw_lpm_besl_capable:1;
unsigned usb2_hw_lpm_enabled:1;
@@ -701,13 +701,13 @@ struct usb_device {
#endif
struct wusb_dev *wusb_dev;
int slot_id;
- enum usb_device_removable removable;
struct usb2_lpm_parameters l1_params;
struct usb3_lpm_parameters u1_params;
struct usb3_lpm_parameters u2_params;
unsigned lpm_disable_count;
u16 hub_delay;
+ unsigned use_generic_driver:1;
};
#define to_usb_device(d) container_of(d, struct usb_device, dev)
@@ -745,6 +745,8 @@ extern int usb_lock_device_for_reset(struct usb_device *udev,
extern int usb_reset_device(struct usb_device *dev);
extern void usb_queue_reset_device(struct usb_interface *dev);
+extern struct device *usb_intf_get_dma_device(struct usb_interface *intf);
+
#ifdef CONFIG_ACPI
extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
bool enable);
@@ -838,7 +840,7 @@ extern int usb_free_streams(struct usb_interface *interface,
/* used these for multi-interface device registration */
extern int usb_driver_claim_interface(struct usb_driver *driver,
- struct usb_interface *iface, void *priv);
+ struct usb_interface *iface, void *data);
/**
* usb_interface_claimed - returns true iff an interface is claimed
@@ -1214,6 +1216,7 @@ struct usb_driver {
* struct usb_device_driver - identifies USB device driver to usbcore
* @name: The driver name should be unique among USB drivers,
* and should normally be the same as the module name.
+ * @match: If set, used for better device/driver matching.
* @probe: Called to see if the driver is willing to manage a particular
* device. If it is, probe returns zero and uses dev_set_drvdata()
* to associate driver-specific data with the device. If unwilling
@@ -1226,14 +1229,21 @@ struct usb_driver {
* @dev_groups: Attributes attached to the device that will be created once it
* is bound to the driver.
* @drvwrap: Driver-model core structure wrapper.
+ * @id_table: used with @match() to select better matching driver at
+ * probe() time.
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
* for devices bound to this driver.
+ * @generic_subclass: if set to 1, the generic USB driver's probe, disconnect,
+ * resume and suspend functions will be called in addition to the driver's
+ * own, so this part of the setup does not need to be replicated.
*
- * USB drivers must provide all the fields listed above except drvwrap.
+ * USB drivers must provide all the fields listed above except drvwrap,
+ * match, and id_table.
*/
struct usb_device_driver {
const char *name;
+ bool (*match) (struct usb_device *udev);
int (*probe) (struct usb_device *udev);
void (*disconnect) (struct usb_device *udev);
@@ -1241,13 +1251,13 @@ struct usb_device_driver {
int (*resume) (struct usb_device *udev, pm_message_t message);
const struct attribute_group **dev_groups;
struct usbdrv_wrap drvwrap;
+ const struct usb_device_id *id_table;
unsigned int supports_autosuspend:1;
+ unsigned int generic_subclass:1;
};
#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
drvwrap.driver)
-extern struct bus_type usb_bus_type;
-
/**
* struct usb_class_driver - identifies a USB driver that wants to use the USB major number
* @name: the usb class device name for this driver. Will show up in sysfs.
@@ -1463,7 +1473,7 @@ typedef void (*usb_complete_t)(struct urb *);
*
* Note that transfer_buffer must still be set if the controller
* does not support DMA (as indicated by hcd_uses_dma()) and when talking
- * to root hub. If you have to trasfer between highmem zone and the device
+ * to root hub. If you have to transfer between highmem zone and the device
* on such controller, create a bounce buffer or bail out with an error.
* If transfer_buffer cannot be set (is in highmem) and the controller is DMA
* capable, assign NULL to it, so that usbmon knows not to use the value.
@@ -1582,7 +1592,7 @@ struct urb {
int error_count; /* (return) number of ISO errors */
void *context; /* (in) context for completion */
usb_complete_t complete; /* (in) completion routine */
- struct usb_iso_packet_descriptor iso_frame_desc[0];
+ struct usb_iso_packet_descriptor iso_frame_desc[];
/* (in) ISO ONLY */
};
@@ -1753,6 +1763,7 @@ static inline int usb_urb_dir_out(struct urb *urb)
return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT;
}
+int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe);
int usb_urb_ep_type_check(const struct urb *urb);
void *usb_alloc_coherent(struct usb_device *dev, size_t size,
@@ -1790,6 +1801,14 @@ extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
int timeout);
/* wrappers around usb_control_msg() for the most common standard requests */
+int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index,
+ const void *data, __u16 size, int timeout,
+ gfp_t memflags);
+int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index,
+ void *data, __u16 size, int timeout,
+ gfp_t memflags);
extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype,
unsigned char descindex, void *buf, int size);
extern int usb_get_status(struct usb_device *dev,
@@ -1954,21 +1973,10 @@ usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe)
return eps[usb_pipeendpoint(pipe)];
}
-/*-------------------------------------------------------------------------*/
-
-static inline __u16
-usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
+static inline u16 usb_maxpacket(struct usb_device *udev, int pipe)
{
- struct usb_host_endpoint *ep;
- unsigned epnum = usb_pipeendpoint(pipe);
+ struct usb_host_endpoint *ep = usb_pipe_endpoint(udev, pipe);
- if (is_out) {
- WARN_ON(usb_pipein(pipe));
- ep = udev->ep_out[epnum];
- } else {
- WARN_ON(usb_pipeout(pipe));
- ep = udev->ep_in[epnum];
- }
if (!ep)
return 0;
@@ -1976,8 +1984,6 @@ usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
return usb_endpoint_maxp(&ep->desc);
}
-/* ----------------------------------------------------------------------- */
-
/* translate USB error codes to codes user space understands */
static inline int usb_translate_errors(int error_code)
{
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index ba4b3e3327ff..ca796dc1a984 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -2,9 +2,6 @@
/*
* Copyright (c) 2010 Daniel Mack <daniel@caiaq.de>
*
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
* This file holds USB constants and structures defined
* by the USB Device Class Definition for Audio Devices in version 2.0.
* Comments below reference relevant sections of the documents contained
@@ -153,7 +150,33 @@ struct uac2_feature_unit_descriptor {
__u8 bSourceID;
/* bmaControls is actually u32,
* but u8 is needed for the hybrid parser */
- __u8 bmaControls[0]; /* variable length */
+ __u8 bmaControls[]; /* variable length */
+} __attribute__((packed));
+
+#define UAC2_DT_FEATURE_UNIT_SIZE(ch) (6 + ((ch) + 1) * 4)
+
+/* As above, but more useful for defining your own descriptors: */
+#define DECLARE_UAC2_FEATURE_UNIT_DESCRIPTOR(ch) \
+struct uac2_feature_unit_descriptor_##ch { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bUnitID; \
+ __u8 bSourceID; \
+ __le32 bmaControls[ch + 1]; \
+ __u8 iFeature; \
+} __packed
+
+/* 4.7.2.10 Effect Unit Descriptor */
+
+struct uac2_effect_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __le16 wEffectType;
+ __u8 bSourceID;
+ __u8 bmaControls[]; /* variable length */
} __attribute__((packed));
/* 4.9.2 Class-Specific AS Interface Descriptor */
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index 6b708434b7f9..c69a6f2e6837 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -109,7 +109,7 @@ struct uac3_feature_unit_descriptor {
__u8 bSourceID;
/* bmaControls is actually u32,
* but u8 is needed for the hybrid parser */
- __u8 bmaControls[0]; /* variable length */
+ __u8 bmaControls[]; /* variable length */
/* wFeatureDescrStr omitted */
} __attribute__((packed));
diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
index 170acd500ea1..0747b24a1a7c 100644
--- a/include/linux/usb/audio.h
+++ b/include/linux/usb/audio.h
@@ -6,9 +6,6 @@
* Developed for Thumtronics by Grey Innovation
* Ben Williamson <ben.williamson@greyinnovation.com>
*
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
* This file holds USB constants and structures defined
* by the USB Device Class Definition for Audio Devices.
* Comments below reference relevant sections of that document:
diff --git a/include/linux/usb/c67x00.h b/include/linux/usb/c67x00.h
index 2fc39e3b7281..45e0757e58f3 100644
--- a/include/linux/usb/c67x00.h
+++ b/include/linux/usb/c67x00.h
@@ -3,21 +3,6 @@
* usb_c67x00.h: platform definitions for the Cypress C67X00 USB chip
*
* Copyright (C) 2006-2008 Barco N.V.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#ifndef _LINUX_USB_C67X00_H
diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h
index 9b895f93d8de..85417f00a89a 100644
--- a/include/linux/usb/cdc-wdm.h
+++ b/include/linux/usb/cdc-wdm.h
@@ -3,20 +3,17 @@
* USB CDC Device Management subdriver
*
* Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef __LINUX_USB_CDC_WDM_H
#define __LINUX_USB_CDC_WDM_H
+#include <linux/wwan.h>
#include <uapi/linux/usb/cdc-wdm.h>
extern struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep,
- int bufsize,
+ int bufsize, enum wwan_port_type type,
int (*manage_power)(struct usb_interface *, int));
#endif /* __LINUX_USB_CDC_WDM_H */
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index 35d784cf32a4..0af0db51bc89 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -3,10 +3,6 @@
* USB CDC common helpers
*
* Copyright (c) 2015 Oliver Neukum <oneukum@suse.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef __LINUX_USB_CDC_H
#define __LINUX_USB_CDC_H
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 1646c06989df..2d207cb4837d 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -46,12 +46,15 @@
#define CDC_NCM_DATA_ALTSETTING_NCM 1
#define CDC_NCM_DATA_ALTSETTING_MBIM 2
-/* CDC NCM subclass 3.2.1 */
+/* CDC NCM subclass 3.3.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
+/* CDC NCM subclass 3.3.2 */
+#define USB_CDC_NCM_NDP32_LENGTH_MIN 0x20
+
/* Maximum NTB length */
-#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
-#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
+#define CDC_NCM_NTB_MAX_SIZE_TX 65536 /* bytes */
+#define CDC_NCM_NTB_MAX_SIZE_RX 65536 /* bytes */
/* Initial NTB length */
#define CDC_NCM_NTB_DEF_SIZE_TX 16384 /* bytes */
@@ -84,7 +87,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
-#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
+#define CDC_NCM_FLAG_PREFER_NTB32 0x08 /* prefer NDP32 over NDP16 */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
@@ -95,6 +98,8 @@ struct cdc_ncm_ctx {
struct hrtimer tx_timer;
struct tasklet_struct bh;
+ struct usbnet *dev;
+
const struct usb_cdc_ncm_desc *func_desc;
const struct usb_cdc_mbim_desc *mbim_desc;
const struct usb_cdc_mbim_extended_desc *mbim_extended_desc;
@@ -113,7 +118,11 @@ struct cdc_ncm_ctx {
u32 timer_interval;
u32 max_ndp_size;
- struct usb_cdc_ncm_ndp16 *delayed_ndp16;
+ u8 is_ndp16;
+ union {
+ struct usb_cdc_ncm_ndp16 *delayed_ndp16;
+ struct usb_cdc_ncm_ndp32 *delayed_ndp32;
+ };
u32 tx_timer_pending;
u32 tx_curr_frame_num;
@@ -150,6 +159,8 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset);
struct sk_buff *
cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in);
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 58b83066bea4..969e7dba6358 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -6,13 +6,13 @@
* Wireless USB 1.0 (spread around). Linux has several APIs in C that
* need these:
*
- * - the master/host side Linux-USB kernel driver API;
+ * - the host side Linux-USB kernel driver API;
* - the "usbfs" user space API; and
- * - the Linux "gadget" slave/device/peripheral side driver API.
+ * - the Linux "gadget" device/peripheral side driver API.
*
* USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems
- * act either as a USB master/host or as a USB slave/device. That means
- * the master and slave side APIs benefit from working well together.
+ * act either as a USB host or as a USB device. That means the host and
+ * device side APIs benefit from working well together.
*
* There's also "Wireless USB", using low power short range radios for
* peripheral interconnection but otherwise building on the USB framework.
@@ -33,65 +33,28 @@
#ifndef __LINUX_USB_CH9_H
#define __LINUX_USB_CH9_H
-#include <linux/device.h>
#include <uapi/linux/usb/ch9.h>
-/**
- * usb_ep_type_string() - Returns human readable-name of the endpoint type.
- * @ep_type: The endpoint type to return human-readable name for. If it's not
- * any of the types: USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT},
- * usually got by usb_endpoint_type(), the string 'unknown' will be returned.
- */
-extern const char *usb_ep_type_string(int ep_type);
+/* USB 3.2 SuperSpeed Plus phy signaling rate generation and lane count */
-/**
- * usb_speed_string() - Returns human readable-name of the speed.
- * @speed: The speed to return human-readable name for. If it's not
- * any of the speeds defined in usb_device_speed enum, string for
- * USB_SPEED_UNKNOWN will be returned.
- */
-extern const char *usb_speed_string(enum usb_device_speed speed);
+enum usb_ssp_rate {
+ USB_SSP_GEN_UNKNOWN = 0,
+ USB_SSP_GEN_2x1,
+ USB_SSP_GEN_1x2,
+ USB_SSP_GEN_2x2,
+};
-/**
- * usb_get_maximum_speed - Get maximum requested speed for a given USB
- * controller.
- * @dev: Pointer to the given USB controller device
- *
- * The function gets the maximum speed string from property "maximum-speed",
- * and returns the corresponding enum usb_device_speed.
- */
-extern enum usb_device_speed usb_get_maximum_speed(struct device *dev);
+struct device;
-/**
- * usb_state_string - Returns human readable name for the state.
- * @state: The state to return a human-readable name for. If it's not
- * any of the states devices in usb_device_state_string enum,
- * the string UNKNOWN will be returned.
- */
+extern const char *usb_ep_type_string(int ep_type);
+extern const char *usb_speed_string(enum usb_device_speed speed);
+extern enum usb_device_speed usb_get_maximum_speed(struct device *dev);
+extern enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev);
extern const char *usb_state_string(enum usb_device_state state);
+unsigned int usb_decode_interval(const struct usb_endpoint_descriptor *epd,
+ enum usb_device_speed speed);
#ifdef CONFIG_TRACING
-/**
- * usb_decode_ctrl - Returns human readable representation of control request.
- * @str: buffer to return a human-readable representation of control request.
- * This buffer should have about 200 bytes.
- * @size: size of str buffer.
- * @bRequestType: matches the USB bmRequestType field
- * @bRequest: matches the USB bRequest field
- * @wValue: matches the USB wValue field (CPU byte order)
- * @wIndex: matches the USB wIndex field (CPU byte order)
- * @wLength: matches the USB wLength field (CPU byte order)
- *
- * Function returns decoded, formatted and human-readable description of
- * control request packet.
- *
- * The usage scenario for this is for tracepoints, so function as a return
- * use the same value as in parameters. This approach allows to use this
- * function in TP_printk
- *
- * Important: wValue, wIndex, wLength parameters before invoking this function
- * should be processed by le16_to_cpu macro.
- */
extern const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
__u8 bRequest, __u16 wValue, __u16 wIndex,
__u16 wLength);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index edd89b7c8f18..ee38835ed77c 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -62,11 +62,13 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
#define CI_HDRC_IMX_IS_HSIC BIT(14)
#define CI_HDRC_PMQOS BIT(15)
+#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
#define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2
#define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3
+#define CI_HDRC_CONTROLLER_VBUS_EVENT 4
int (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
struct usb_otg_caps ci_otg_caps;
@@ -87,6 +89,12 @@ struct ci_hdrc_platform_data {
struct pinctrl_state *pins_default;
struct pinctrl_state *pins_host;
struct pinctrl_state *pins_device;
+
+ /* platform-specific hooks */
+ int (*hub_control)(struct ci_hdrc *ci, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength,
+ bool *done, unsigned long *flags);
+ void (*enter_lpm)(struct ci_hdrc *ci, bool enable);
};
/* Default offset of capability registers */
@@ -98,5 +106,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
struct ci_hdrc_platform_data *platdata);
/* Remove ci hdrc device */
void ci_hdrc_remove_device(struct platform_device *pdev);
+/* Get current available role */
+enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev);
#endif
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 8675e145ea8b..43ac3fa760db 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -3,20 +3,6 @@
* composite.h -- framework for usb gadgets which are composite devices
*
* Copyright (C) 2006-2008 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __LINUX_USB_COMPOSITE_H
@@ -249,6 +235,9 @@ int usb_function_activate(struct usb_function *);
int usb_interface_id(struct usb_configuration *, struct usb_function *);
+int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f,
+ struct usb_ep *_ep, u8 alt);
+
int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
struct usb_ep *_ep);
@@ -268,7 +257,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
* @bConfigurationValue: Copied into configuration descriptor.
* @iConfiguration: Copied into configuration descriptor.
* @bmAttributes: Copied into configuration descriptor.
- * @MaxPower: Power consumtion in mA. Used to compute bMaxPower in the
+ * @MaxPower: Power consumption in mA. Used to compute bMaxPower in the
* configuration descriptor after considering the bus speed.
* @cdev: assigned by @usb_add_config() before calling @bind(); this is
* the device associated with this configuration.
@@ -434,7 +423,7 @@ static inline struct usb_composite_driver *to_cdriver(
#define OS_STRING_IDX 0xEE
/**
- * struct usb_composite_device - represents one composite usb gadget
+ * struct usb_composite_dev - represents one composite usb gadget
* @gadget: read-only, abstracts the gadget's usb peripheral controller
* @req: used for control responses; buffer is pre-allocated
* @os_desc_req: used for OS descriptors responses; buffer is pre-allocated
@@ -522,6 +511,8 @@ extern struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev,
extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
extern void composite_disconnect(struct usb_gadget *gadget);
+extern void composite_reset(struct usb_gadget *gadget);
+
extern int composite_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl);
extern void composite_suspend(struct usb_gadget *gadget);
@@ -570,8 +561,8 @@ static inline u16 get_default_bcdDevice(void)
{
u16 bcdDevice;
- bcdDevice = bin2bcd((LINUX_VERSION_CODE >> 16 & 0xff)) << 8;
- bcdDevice |= bin2bcd((LINUX_VERSION_CODE >> 8 & 0xff));
+ bcdDevice = bin2bcd(LINUX_VERSION_MAJOR) << 8;
+ bcdDevice |= bin2bcd(LINUX_VERSION_PATCHLEVEL);
return bcdDevice;
}
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index a15ce99dfc2d..fbabadd3b372 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -1,20 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_USB_EHCI_DEF_H
@@ -45,6 +31,7 @@ struct ehci_caps {
#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */
#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+#define HCS_N_PORTS_MAX 15 /* N_PORTS valid 0x1-0xF */
u32 hcc_params; /* HCCPARAMS - offset 0x8 */
/* EHCI 1.1 addendum */
@@ -126,8 +113,9 @@ struct ehci_regs {
u32 configured_flag;
#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
- /* PORTSC: offset 0x44 */
- u32 port_status[0]; /* up to N_PORTS */
+ union {
+ /* PORTSC: offset 0x44 */
+ u32 port_status[HCS_N_PORTS_MAX]; /* up to N_PORTS */
/* EHCI 1.1 addendum */
#define PORTSC_SUSPEND_STS_ACK 0
#define PORTSC_SUSPEND_STS_NYET 1
@@ -151,7 +139,7 @@ struct ehci_regs {
#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
#define PORT_POWER (1<<12) /* true: has power (see PPC) */
#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
-/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+#define PORT_LS_MASK (3<<10) /* Link status (SE0, K or J */
/* 9 reserved */
#define PORT_LPM (1<<9) /* LPM transaction */
#define PORT_RESET (1<<8) /* reset port */
@@ -164,28 +152,35 @@ struct ehci_regs {
#define PORT_CSC (1<<1) /* connect status change */
#define PORT_CONNECT (1<<0) /* device connected */
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
-
- u32 reserved3[9];
-
- /* USBMODE: offset 0x68 */
- u32 usbmode; /* USB Device mode */
+ struct {
+ u32 reserved3[9];
+ /* USBMODE: offset 0x68 */
+ u32 usbmode; /* USB Device mode */
+ };
#define USBMODE_SDIS (1<<3) /* Stream disable */
#define USBMODE_BE (1<<2) /* BE/LE endianness select */
#define USBMODE_CM_HC (3<<0) /* host controller mode */
#define USBMODE_CM_IDLE (0<<0) /* idle state */
-
- u32 reserved4[6];
+ };
/* Moorestown has some non-standard registers, partially due to the fact that
* its EHCI controller has both TT and LPM support. HOSTPCx are extensions to
* PORTSCx
*/
- /* HOSTPC: offset 0x84 */
- u32 hostpc[0]; /* HOSTPC extension */
+ union {
+ struct {
+ u32 reserved4;
+ /* HOSTPC: offset 0x84 */
+ u32 hostpc[HCS_N_PORTS_MAX];
#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
#define HOSTPC_PSPD (3<<25) /* Port speed detection */
+ };
+
+ /* Broadcom-proprietary USB_EHCI_INSNREG00 @ 0x80 */
+ u32 brcm_insnreg[4];
+ };
- u32 reserved5[17];
+ u32 reserved5[2];
/* USBMODE_EX: offset 0xc8 */
u32 usbmode_ex; /* USB Device mode extension */
diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h
index dd742afdc03f..0f1b166f5aa0 100644
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -1,20 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __USB_CORE_EHCI_PDRIVER_H
@@ -50,6 +36,7 @@ struct usb_ehci_pdata {
unsigned no_io_watchdog:1;
unsigned reset_on_resume:1;
unsigned dma_mask_64:1;
+ unsigned spurious_oc:1;
/* Turn on all power and clocks */
int (*power_on)(struct platform_device *pdev);
diff --git a/include/linux/usb/g_hid.h b/include/linux/usb/g_hid.h
index 7581e488c237..d56bfedeb079 100644
--- a/include/linux/usb/g_hid.h
+++ b/include/linux/usb/g_hid.h
@@ -3,20 +3,6 @@
* g_hid.h -- Header file for USB HID gadget driver
*
* Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_G_HID_H
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 124462d65eac..dc3092cea99e 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -4,13 +4,12 @@
*
* We call the USB code inside a Linux-based peripheral device a "gadget"
* driver, except for the hardware-specific bus glue. One USB host can
- * master many USB gadgets, but the gadgets are only slaved to one host.
+ * talk to many USB gadgets, but the gadgets are only able to communicate
+ * to one host.
*
*
* (C) Copyright 2002-2004 by David Brownell
* All Rights Reserved.
- *
- * This software is licensed under the GNU GPL version 2.
*/
#ifndef __LINUX_USB_GADGET_H
@@ -42,6 +41,8 @@ struct usb_ep;
* @num_mapped_sgs: number of SG entries mapped to DMA (internal)
* @length: Length of that data
* @stream_id: The stream id, when USB3.0 bulk streams are being used
+ * @is_last: Indicates if this is the last request of a stream_id before
+ * switching to a different stream (required for DWC3 controllers).
* @no_interrupt: If true, hints that no completion irq is needed.
* Helpful sometimes with deep request queues that are handled
* directly by DMA controllers.
@@ -104,6 +105,7 @@ struct usb_request {
unsigned num_mapped_sgs;
unsigned stream_id:16;
+ unsigned is_last:1;
unsigned no_interrupt:1;
unsigned zero:1;
unsigned short_not_ok:1;
@@ -193,7 +195,7 @@ struct usb_ep_caps {
* @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
* @ops: Function pointers used to access hardware-specific operations.
* @ep_list:the gadget's ep_list holds all of its endpoints
- * @caps:The structure describing types and directions supported by endoint.
+ * @caps:The structure describing types and directions supported by endpoint.
* @enabled: The current endpoint enabled/disabled state.
* @claimed: True if this endpoint is claimed by a function.
* @maxpacket:The maximum packet size used on this endpoint. The initial
@@ -319,13 +321,17 @@ struct usb_gadget_ops {
struct usb_gadget_driver *);
int (*udc_stop)(struct usb_gadget *);
void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed);
+ void (*udc_set_ssp_rate)(struct usb_gadget *gadget,
+ enum usb_ssp_rate rate);
+ void (*udc_async_callbacks)(struct usb_gadget *gadget, bool enable);
struct usb_ep *(*match_ep)(struct usb_gadget *,
struct usb_endpoint_descriptor *,
struct usb_ss_ep_comp_descriptor *);
+ int (*check_config)(struct usb_gadget *gadget);
};
/**
- * struct usb_gadget - represents a usb slave device
+ * struct usb_gadget - represents a usb device
* @work: (internal use) Workqueue to be used for sysfs_notify()
* @udc: struct usb_udc pointer for this gadget
* @ops: Function pointers used to access hardware-specific operations.
@@ -335,6 +341,10 @@ struct usb_gadget_ops {
* @speed: Speed of current connection to USB host.
* @max_speed: Maximal speed the UDC can handle. UDC must support this
* and all slower speeds.
+ * @ssp_rate: Current connected SuperSpeed Plus signaling rate and lane count.
+ * @max_ssp_rate: Maximum SuperSpeed Plus signaling rate and lane count the UDC
+ * can handle. The UDC must support this and all slower speeds and lower
+ * number of lanes.
* @state: the state we are now (attached, suspended, configured, etc)
* @name: Identifies the controller hardware type. Used in diagnostics
* and sometimes configuration.
@@ -373,6 +383,8 @@ struct usb_gadget_ops {
* @connected: True if gadget is connected.
* @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag
* indicates that it supports LPM as per the LPM ECN & errata.
+ * @irq: the interrupt number for device controller.
+ * @id_number: a unique ID number for ensuring that gadget names are distinct
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -401,6 +413,11 @@ struct usb_gadget {
struct list_head ep_list; /* of usb_ep */
enum usb_device_speed speed;
enum usb_device_speed max_speed;
+
+ /* USB SuperSpeed Plus only */
+ enum usb_ssp_rate ssp_rate;
+ enum usb_ssp_rate max_ssp_rate;
+
enum usb_device_state state;
const char *name;
struct device dev;
@@ -427,9 +444,12 @@ struct usb_gadget {
unsigned deactivated:1;
unsigned connected:1;
unsigned lpm_capable:1;
+ int irq;
+ int id_number;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
+/* Interface to the device model */
static inline void set_gadget_data(struct usb_gadget *gadget, void *data)
{ dev_set_drvdata(&gadget->dev, data); }
static inline void *get_gadget_data(struct usb_gadget *gadget)
@@ -438,6 +458,26 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
{
return container_of(dev, struct usb_gadget, dev);
}
+static inline struct usb_gadget *usb_get_gadget(struct usb_gadget *gadget)
+{
+ get_device(&gadget->dev);
+ return gadget;
+}
+static inline void usb_put_gadget(struct usb_gadget *gadget)
+{
+ put_device(&gadget->dev);
+}
+extern void usb_initialize_gadget(struct device *parent,
+ struct usb_gadget *gadget, void (*release)(struct device *dev));
+extern int usb_add_gadget(struct usb_gadget *gadget);
+extern void usb_del_gadget(struct usb_gadget *gadget);
+
+/* Legacy device-model interface */
+extern int usb_add_gadget_udc_release(struct device *parent,
+ struct usb_gadget *gadget, void (*release)(struct device *dev));
+extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
+extern void usb_del_gadget_udc(struct usb_gadget *gadget);
+extern char *usb_get_gadget_udc_name(void);
/* iterates the non-control endpoints; 'tmp' is a struct usb_ep pointer */
#define gadget_for_each_ep(tmp, gadget) \
@@ -452,7 +492,7 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
*/
static inline size_t usb_ep_align(struct usb_ep *ep, size_t len)
{
- int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff;
+ int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc);
return round_up(len, max_packet_size);
}
@@ -569,6 +609,7 @@ int usb_gadget_connect(struct usb_gadget *gadget);
int usb_gadget_disconnect(struct usb_gadget *gadget);
int usb_gadget_deactivate(struct usb_gadget *gadget);
int usb_gadget_activate(struct usb_gadget *gadget);
+int usb_gadget_check_config(struct usb_gadget *gadget);
#else
static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
{ return 0; }
@@ -592,12 +633,14 @@ static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
{ return 0; }
static inline int usb_gadget_activate(struct usb_gadget *gadget)
{ return 0; }
+static inline int usb_gadget_check_config(struct usb_gadget *gadget)
+{ return 0; }
#endif /* CONFIG_USB_GADGET */
/*-------------------------------------------------------------------------*/
/**
- * struct usb_gadget_driver - driver for usb 'slave' devices
+ * struct usb_gadget_driver - driver for usb gadget devices
* @function: String describing the gadget's function
* @max_speed: Highest speed the driver handles.
* @setup: Invoked for ep0 control requests that aren't handled by
@@ -621,9 +664,9 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
* @driver: Driver model state for this driver.
* @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
* this driver will be bound to any available UDC.
- * @pending: UDC core private data used for deferred probe of this driver.
- * @match_existing_only: If udc is not found, return an error and don't add this
- * gadget driver to list of pending driver
+ * @match_existing_only: If udc is not found, return an error and fail
+ * the driver registration
+ * @is_bound: Allow a driver to be bound to only one gadget
*
* Devices are disabled till a gadget driver successfully bind()s, which
* means the driver will handle setup() requests needed to enumerate (and
@@ -686,8 +729,8 @@ struct usb_gadget_driver {
struct device_driver driver;
char *udc_name;
- struct list_head pending;
unsigned match_existing_only:1;
+ bool is_bound:1;
};
@@ -697,22 +740,30 @@ struct usb_gadget_driver {
/* driver modules register and unregister, as usual.
* these calls must be made in a context that can sleep.
*
- * these will usually be implemented directly by the hardware-dependent
- * usb bus interface driver, which will only support a single driver.
+ * A gadget driver can be bound to only one gadget at a time.
*/
/**
- * usb_gadget_probe_driver - probe a gadget driver
+ * usb_gadget_register_driver_owner - register a gadget driver
* @driver: the driver being registered
+ * @owner: the driver module
+ * @mod_name: the driver module's build name
* Context: can sleep
*
* Call this in your gadget driver's module initialization function,
- * to tell the underlying usb controller driver about your driver.
+ * to tell the underlying UDC controller driver about your driver.
* The @bind() function will be called to bind it to a gadget before this
* registration call returns. It's expected that the @bind() function will
* be in init sections.
+ *
+ * Use the macro defined below instead of calling this directly.
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver);
+int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
+ struct module *owner, const char *mod_name);
+
+/* use a define to avoid include chaining to get THIS_MODULE & friends */
+#define usb_gadget_register_driver(driver) \
+ usb_gadget_register_driver_owner(driver, THIS_MODULE, KBUILD_MODNAME)
/**
* usb_gadget_unregister_driver - unregister a gadget driver
@@ -725,16 +776,10 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver);
* it will first disconnect(). The driver is also requested
* to unbind() and clean up any device state, before this procedure
* finally returns. It's expected that the unbind() functions
- * will in in exit sections, so may not be linked in some kernels.
+ * will be in exit sections, so may not be linked in some kernels.
*/
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver);
-extern int usb_add_gadget_udc_release(struct device *parent,
- struct usb_gadget *gadget, void (*release)(struct device *dev));
-extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
-extern void usb_del_gadget_udc(struct usb_gadget *gadget);
-extern char *usb_get_gadget_udc_name(void);
-
/*-------------------------------------------------------------------------*/
/* utility to simplify dealing with string descriptors */
@@ -767,12 +812,15 @@ struct usb_gadget_strings {
struct usb_gadget_string_container {
struct list_head list;
- u8 *stash[0];
+ u8 *stash[];
};
/* put descriptor for string with that id into buf (buflen >= 256) */
int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf);
+/* check if the given language identifier is valid */
+bool usb_validate_langid(u16 langid);
+
/*-------------------------------------------------------------------------*/
/* utility to simplify managing config descriptors */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 712b2a603645..78cd566ee238 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -1,20 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __USB_CORE_HCD_H
@@ -59,13 +45,14 @@
* USB Host Controller Driver (usb_hcd) framework
*
* Since "struct usb_bus" is so thin, you can't share much code in it.
- * This framework is a layer over that, and should be more sharable.
+ * This framework is a layer over that, and should be more shareable.
*/
/*-------------------------------------------------------------------------*/
struct giveback_urb_bh {
bool running;
+ bool high_prio;
spinlock_t lock;
struct list_head head;
struct tasklet_struct bh;
@@ -124,6 +111,7 @@ struct usb_hcd {
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
+#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -134,6 +122,7 @@ struct usb_hcd {
#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
/*
* Specifies if interfaces are authorized by default
@@ -228,7 +217,7 @@ struct usb_hcd {
/* The HC driver's private data is stored at the end of
* this structure.
*/
- unsigned long hcd_priv[0]
+ unsigned long hcd_priv[]
__attribute__ ((aligned(sizeof(s64))));
};
@@ -299,7 +288,7 @@ struct hc_driver {
* (optional) these hooks allow an HCD to override the default DMA
* mapping and unmapping routines. In general, they shouldn't be
* necessary unless the host controller has special DMA requirements,
- * such as alignment contraints. If these are not specified, the
+ * such as alignment constraints. If these are not specified, the
* general usb_hcd_(un)?map_urb_for_dma functions will be used instead
* (and it may be a good idea to call these functions in your HCD
* implementation)
@@ -409,7 +398,10 @@ struct hc_driver {
int (*find_raw_port_number)(struct usb_hcd *, int);
/* Call for power on/off the port if necessary */
int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
-
+ /* Call for SINGLE_STEP_SET_FEATURE Test for USB2 EH certification */
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
+ int (*submit_single_step_set_feature)(struct usb_hcd *,
+ struct urb *, int);
};
static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -474,12 +466,20 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
struct platform_device;
extern void usb_hcd_platform_shutdown(struct platform_device *dev);
+#ifdef CONFIG_USB_HCD_TEST_MODE
+extern int ehset_single_step_set_feature(struct usb_hcd *hcd, int port);
+#else
+static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+ return 0;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
#ifdef CONFIG_USB_PCI
struct pci_dev;
struct pci_device_id;
extern int usb_hcd_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id);
+ const struct hc_driver *driver);
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
@@ -733,10 +733,6 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
/* random stuff */
-#define RUN_CONTEXT (in_irq() ? "in_irq" \
- : (in_interrupt() ? "in_interrupt" : "can sleep"))
-
-
/* This rwsem is for use only by the hub driver and ehci-hcd.
* Nobody else should touch it.
*/
diff --git a/include/linux/usb/input.h b/include/linux/usb/input.h
index 974befa72ac0..5e759b2cf551 100644
--- a/include/linux/usb/input.h
+++ b/include/linux/usb/input.h
@@ -1,10 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Dmitry Torokhov
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __LINUX_USB_INPUT_H
diff --git a/include/linux/usb/isp1301.h b/include/linux/usb/isp1301.h
index dedb3b2473e8..fa986b926a12 100644
--- a/include/linux/usb/isp1301.h
+++ b/include/linux/usb/isp1301.h
@@ -3,16 +3,6 @@
* NXP ISP1301 USB transceiver driver
*
* Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_USB_ISP1301_H
diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h
deleted file mode 100644
index b75ded28db81..000000000000
--- a/include/linux/usb/isp1760.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * board initialization should put one of these into dev->platform_data
- * and place the isp1760 onto platform_bus named "isp1760-hcd".
- */
-
-#ifndef __LINUX_USB_ISP1760_H
-#define __LINUX_USB_ISP1760_H
-
-struct isp1760_platform_data {
- unsigned is_isp1761:1; /* Chip is ISP1761 */
- unsigned bus_width_16:1; /* 16/32-bit data bus width */
- unsigned port1_otg:1; /* Port 1 supports OTG */
- unsigned analog_oc:1; /* Analog overcurrent */
- unsigned dack_polarity_high:1; /* DACK active high */
- unsigned dreq_polarity_high:1; /* DREQ active high */
-};
-
-#endif /* __LINUX_USB_ISP1760_H */
diff --git a/include/linux/usb/m66592.h b/include/linux/usb/m66592.h
index 2dfe68183495..5f04de2b47fd 100644
--- a/include/linux/usb/m66592.h
+++ b/include/linux/usb/m66592.h
@@ -3,20 +3,6 @@
* M66592 driver platform data
*
* Copyright (C) 2009 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_USB_M66592_H
diff --git a/include/linux/usb/musb-ux500.h b/include/linux/usb/musb-ux500.h
index c4b7ad9850ca..d60dcfc56b5a 100644
--- a/include/linux/usb/musb-ux500.h
+++ b/include/linux/usb/musb-ux500.h
@@ -1,16 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2013 ST-Ericsson AB
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MUSB_UX500_H__
diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h
index 08b85caecfaf..f29fe6a1f415 100644
--- a/include/linux/usb/net2280.h
+++ b/include/linux/usb/net2280.h
@@ -5,20 +5,6 @@
*
* Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
* Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_NET2280_H
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index dba55ccb9b53..98487fd7ab11 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* OF helpers for usb devices.
- *
- * This file is released under the GPLv2
*/
#ifndef __LINUX_USB_OF_H
diff --git a/include/linux/usb/ohci_pdriver.h b/include/linux/usb/ohci_pdriver.h
index 7eb16cf587ee..2447c78b1766 100644
--- a/include/linux/usb/ohci_pdriver.h
+++ b/include/linux/usb/ohci_pdriver.h
@@ -1,20 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __USB_CORE_OHCI_PDRIVER_H
diff --git a/include/linux/usb/onboard_hub.h b/include/linux/usb/onboard_hub.h
new file mode 100644
index 000000000000..d9373230556e
--- /dev/null
+++ b/include/linux/usb/onboard_hub.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_USB_ONBOARD_HUB_H
+#define __LINUX_USB_ONBOARD_HUB_H
+
+struct usb_device;
+struct list_head;
+
+#if IS_ENABLED(CONFIG_USB_ONBOARD_HUB)
+void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list);
+void onboard_hub_destroy_pdevs(struct list_head *pdev_list);
+#else
+static inline void onboard_hub_create_pdevs(struct usb_device *parent_hub,
+ struct list_head *pdev_list) {}
+static inline void onboard_hub_destroy_pdevs(struct list_head *pdev_list) {}
+#endif
+
+#endif /* __LINUX_USB_ONBOARD_HUB_H */
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index e78eb577d0fa..6135d076c53d 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -1,19 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
-/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
+/*
+ * Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
*/
#ifndef __LINUX_USB_OTG_FSM_H
@@ -98,7 +85,7 @@ enum otg_fsm_timer {
* @b_bus_req: TRUE during the time that the Application running on the
* B-device wants to use the bus
*
- * Auxilary inputs (OTG v1.3 only. Obsolete now.)
+ * Auxiliary inputs (OTG v1.3 only. Obsolete now.)
* @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD
* @b_bus_suspend: TRUE when the A-device detects that the B-device has put
* the bus into suspend
@@ -153,7 +140,7 @@ struct otg_fsm {
int a_bus_req;
int b_bus_req;
- /* Auxilary inputs */
+ /* Auxiliary inputs */
int a_sess_vld;
int b_bus_resume;
int b_bus_suspend;
@@ -177,7 +164,7 @@ struct otg_fsm {
int a_bus_req_inf;
int a_clr_err_inf;
int b_bus_req_inf;
- /* Auxilary informative variables */
+ /* Auxiliary informative variables */
int a_suspend_req_inf;
/* Timeout indicator for timers */
@@ -196,6 +183,7 @@ struct otg_fsm {
struct mutex lock;
u8 *host_req_flag;
struct delayed_work hnp_polling_work;
+ bool hnp_work_inited;
bool state_changed;
};
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 69f1b6328532..6475f880be37 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -125,8 +125,9 @@ enum usb_dr_mode {
* @dev: Pointer to the given device
*
* The function gets phy interface string from property 'dr_mode',
- * and returns the correspondig enum usb_dr_mode
+ * and returns the corresponding enum usb_dr_mode
*/
extern enum usb_dr_mode usb_get_dr_mode(struct device *dev);
+extern enum usb_dr_mode usb_get_role_switch_default_mode(struct device *dev);
#endif /* __LINUX_USB_OTG_H */
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index a665d7f21142..c59fb79a42e8 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -219,14 +219,17 @@ enum pd_pdo_type {
#define PDO_CURR_MASK 0x3ff
#define PDO_PWR_MASK 0x3ff
-#define PDO_FIXED_DUAL_ROLE BIT(29) /* Power role swap supported */
-#define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (Source) */
-#define PDO_FIXED_HIGHER_CAP BIT(28) /* Requires more than vSafe5V (Sink) */
-#define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */
-#define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */
-#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */
-#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
-#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
+#define PDO_FIXED_DUAL_ROLE BIT(29) /* Power role swap supported */
+#define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (Source) */
+#define PDO_FIXED_HIGHER_CAP BIT(28) /* Requires more than vSafe5V (Sink) */
+#define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */
+#define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */
+#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */
+#define PDO_FIXED_UNCHUNK_EXT BIT(24) /* Unchunked Extended Message supported (Source) */
+#define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */
+#define PDO_FIXED_FRS_CURR_SHIFT 23
+#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
+#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
#define PDO_FIXED_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_FIXED_VOLT_SHIFT)
#define PDO_FIXED_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_FIXED_CURR_SHIFT)
@@ -454,15 +457,17 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */
#define PD_T_SEND_SOURCE_CAP 150 /* 100 - 200 ms */
#define PD_T_SENDER_RESPONSE 60 /* 24 - 30 ms, relaxed */
+#define PD_T_RECEIVER_RESPONSE 15 /* 15ms max */
#define PD_T_SOURCE_ACTIVITY 45
#define PD_T_SINK_ACTIVITY 135
-#define PD_T_SINK_WAIT_CAP 240
+#define PD_T_SINK_WAIT_CAP 310 /* 310 - 620 ms */
#define PD_T_PS_TRANSITION 500
#define PD_T_SRC_TRANSITION 35
#define PD_T_DRP_SNK 40
#define PD_T_DRP_SRC 30
#define PD_T_PS_SOURCE_OFF 920
#define PD_T_PS_SOURCE_ON 480
+#define PD_T_PS_SOURCE_ON_PRS 450 /* 390 - 480ms */
#define PD_T_PS_HARD_RESET 30
#define PD_T_SRC_RECOVER 760
#define PD_T_SRC_RECOVER_MAX 1000
@@ -471,16 +476,61 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_VCONN_SOURCE_ON 100
#define PD_T_SINK_REQUEST 100 /* 100 ms minimum */
#define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */
-#define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */
-#define PD_T_NEWSRC 250 /* Maximum of 275ms */
+#define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */
+#define PD_T_NEWSRC 250 /* Maximum of 275ms */
+#define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */
+#define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */
+#define PD_T_SINK_TX 16 /* 16 - 20 ms */
+#define PD_T_CHUNK_NOT_SUPP 42 /* 40 - 50 ms */
#define PD_T_DRP_TRY 100 /* 75 - 150 ms */
#define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */
#define PD_T_CC_DEBOUNCE 200 /* 100 - 200 ms */
#define PD_T_PD_DEBOUNCE 20 /* 10 - 20 ms */
+#define PD_T_TRY_CC_DEBOUNCE 15 /* 10 - 20 ms */
#define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
#define PD_N_HARD_RESET_COUNT 2
+#define PD_P_SNK_STDBY_MW 2500 /* 2500 mW */
+
+#if IS_ENABLED(CONFIG_TYPEC)
+
+struct usb_power_delivery;
+
+/**
+ * usb_power_delivery_desc - USB Power Delivery Descriptor
+ * @revision: USB Power Delivery Specification Revision
+ * @version: USB Power Delivery Specicication Version - optional
+ */
+struct usb_power_delivery_desc {
+ u16 revision;
+ u16 version;
+};
+
+/**
+ * usb_power_delivery_capabilities_desc - Description of USB Power Delivery Capabilities Message
+ * @pdo: The Power Data Objects in the Capability Message
+ * @role: Power role of the capabilities
+ */
+struct usb_power_delivery_capabilities_desc {
+ u32 pdo[PDO_MAX_OBJECTS];
+ enum typec_role role;
+};
+
+struct usb_power_delivery_capabilities *
+usb_power_delivery_register_capabilities(struct usb_power_delivery *pd,
+ struct usb_power_delivery_capabilities_desc *desc);
+void usb_power_delivery_unregister_capabilities(struct usb_power_delivery_capabilities *cap);
+
+struct usb_power_delivery *usb_power_delivery_register(struct device *parent,
+ struct usb_power_delivery_desc *desc);
+void usb_power_delivery_unregister(struct usb_power_delivery *pd);
+
+int usb_power_delivery_link_device(struct usb_power_delivery *pd, struct device *dev);
+void usb_power_delivery_unlink_device(struct usb_power_delivery *pd, struct device *dev);
+
+#endif /* CONFIG_TYPEC */
+
#endif /* __LINUX_USB_PD_H */
diff --git a/include/linux/usb/pd_bdo.h b/include/linux/usb/pd_bdo.h
index 033fe3e17141..7c25b88d79f9 100644
--- a/include/linux/usb/pd_bdo.h
+++ b/include/linux/usb/pd_bdo.h
@@ -15,7 +15,7 @@
#define BDO_MODE_CARRIER2 (5 << 28)
#define BDO_MODE_CARRIER3 (6 << 28)
#define BDO_MODE_EYE (7 << 28)
-#define BDO_MODE_TESTDATA (8 << 28)
+#define BDO_MODE_TESTDATA (8U << 28)
#define BDO_MODE_MASK(mode) ((mode) & 0xf0000000)
diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h
index 0eb83ce19597..b517ebc8f0ff 100644
--- a/include/linux/usb/pd_ext_sdb.h
+++ b/include/linux/usb/pd_ext_sdb.h
@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
#define USB_PD_EXT_SDB_EVENT_OVP BIT(3)
#define USB_PD_EXT_SDB_EVENT_CF_CV_MODE BIT(4)
-#define USB_PD_EXT_SDB_PPS_EVENTS (USB_PD_EXT_SDB_EVENT_OCP | \
- USB_PD_EXT_SDB_EVENT_OTP | \
- USB_PD_EXT_SDB_EVENT_OVP)
-
#endif /* __LINUX_USB_PD_EXT_SDB_H */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index 35b8e15efaa0..b057250704e8 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -21,22 +21,24 @@
* ----------
* <31:16> :: SVID
* <15> :: VDM type ( 1b == structured, 0b == unstructured )
- * <14:13> :: Structured VDM version (can only be 00 == 1.0 currently)
+ * <14:13> :: Structured VDM version
* <12:11> :: reserved
* <10:8> :: object position (1-7 valid ... used for enter/exit mode only)
* <7:6> :: command type (SVDM only?)
* <5> :: reserved (SVDM), command type (UVDM)
* <4:0> :: command
*/
-#define VDO(vid, type, custom) \
+#define VDO(vid, type, ver, custom) \
(((vid) << 16) | \
((type) << 15) | \
+ ((ver) << 13) | \
((custom) & 0x7FFF))
#define VDO_SVDM_TYPE (1 << 15)
#define VDO_SVDM_VERS(x) ((x) << 13)
#define VDO_OPOS(x) ((x) << 8)
#define VDO_CMDT(x) ((x) << 6)
+#define VDO_SVDM_VERS_MASK VDO_SVDM_VERS(0x3)
#define VDO_OPOS_MASK VDO_OPOS(0x7)
#define VDO_CMDT_MASK VDO_CMDT(0x3)
@@ -74,6 +76,7 @@
#define PD_VDO_VID(vdo) ((vdo) >> 16)
#define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1)
+#define PD_VDO_SVDM_VER(vdo) (((vdo) >> 13) & 0x3)
#define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7)
#define PD_VDO_CMD(vdo) ((vdo) & 0x1f)
#define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3)
@@ -103,25 +106,50 @@
* --------------------
* <31> :: data capable as a USB host
* <30> :: data capable as a USB device
- * <29:27> :: product type
+ * <29:27> :: product type (UFP / Cable / VPD)
* <26> :: modal operation supported (1b == yes)
- * <25:16> :: Reserved, Shall be set to zero
+ * <25:23> :: product type (DFP) (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <22:21> :: connector type (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <20:16> :: Reserved, Shall be set to zero
* <15:0> :: USB-IF assigned VID for this cable vendor
*/
+
+/* PD Rev2.0 definition */
#define IDH_PTYPE_UNDEF 0
+
+/* SOP Product Type (UFP) */
+#define IDH_PTYPE_NOT_UFP 0
#define IDH_PTYPE_HUB 1
#define IDH_PTYPE_PERIPH 2
+#define IDH_PTYPE_PSD 3
+#define IDH_PTYPE_AMA 5
+
+/* SOP' Product Type (Cable Plug / VPD) */
+#define IDH_PTYPE_NOT_CABLE 0
#define IDH_PTYPE_PCABLE 3
#define IDH_PTYPE_ACABLE 4
-#define IDH_PTYPE_AMA 5
+#define IDH_PTYPE_VPD 6
-#define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \
- ((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \
- | (is_modal) << 26 | ((vid) & 0xffff))
+/* SOP Product Type (DFP) */
+#define IDH_PTYPE_NOT_DFP 0
+#define IDH_PTYPE_DFP_HUB 1
+#define IDH_PTYPE_DFP_HOST 2
+#define IDH_PTYPE_DFP_PB 3
+
+/* ID Header Mask */
+#define IDH_DFP_MASK GENMASK(25, 23)
+#define IDH_CONN_MASK GENMASK(22, 21)
+
+#define VDO_IDH(usbh, usbd, ufp_cable, is_modal, dfp, conn, vid) \
+ ((usbh) << 31 | (usbd) << 30 | ((ufp_cable) & 0x7) << 27 \
+ | (is_modal) << 26 | ((dfp) & 0x7) << 23 | ((conn) & 0x3) << 21 \
+ | ((vid) & 0xffff))
#define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7)
#define PD_IDH_VID(vdo) ((vdo) & 0xffff)
#define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26))
+#define PD_IDH_DFP_PTYPE(vdo) (((vdo) >> 23) & 0x7)
+#define PD_IDH_CONN_TYPE(vdo) (((vdo) >> 21) & 0x3)
/*
* Cert Stat VDO
@@ -129,6 +157,7 @@
* <31:0> : USB-IF assigned XID for this cable
*/
#define PD_CSTAT_XID(vdo) (vdo)
+#define VDO_CERT(xid) ((xid) & 0xffffffff)
/*
* Product VDO
@@ -140,77 +169,271 @@
#define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff)
/*
- * UFP VDO1
+ * UFP VDO (PD Revision 3.0+ only)
* --------
* <31:29> :: UFP VDO version
* <28> :: Reserved
* <27:24> :: Device capability
- * <23:6> :: Reserved
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:11> :: Reserved
+ * <10:8> :: Vconn power (AMA only)
+ * <7> :: Vconn required (AMA only, 0b == no, 1b == yes)
+ * <6> :: Vbus required (AMA only, 0b == yes, 1b == no)
* <5:3> :: Alternate modes
* <2:0> :: USB highest speed
*/
-#define PD_VDO1_UFP_DEVCAP(vdo) (((vdo) & GENMASK(27, 24)) >> 24)
+#define PD_VDO_UFP_DEVCAP(vdo) (((vdo) & GENMASK(27, 24)) >> 24)
+/* UFP VDO Version */
+#define UFP_VDO_VER1_2 2
+
+/* Device Capability */
#define DEV_USB2_CAPABLE BIT(0)
#define DEV_USB2_BILLBOARD BIT(1)
#define DEV_USB3_CAPABLE BIT(2)
#define DEV_USB4_CAPABLE BIT(3)
+/* Connector Type */
+#define UFP_RECEPTACLE 2
+#define UFP_CAPTIVE 3
+
+/* Vconn Power (AMA only, set to AMA_VCONN_NOT_REQ if Vconn is not required) */
+#define AMA_VCONN_PWR_1W 0
+#define AMA_VCONN_PWR_1W5 1
+#define AMA_VCONN_PWR_2W 2
+#define AMA_VCONN_PWR_3W 3
+#define AMA_VCONN_PWR_4W 4
+#define AMA_VCONN_PWR_5W 5
+#define AMA_VCONN_PWR_6W 6
+
+/* Vconn Required (AMA only) */
+#define AMA_VCONN_NOT_REQ 0
+#define AMA_VCONN_REQ 1
+
+/* Vbus Required (AMA only) */
+#define AMA_VBUS_REQ 0
+#define AMA_VBUS_NOT_REQ 1
+
+/* Alternate Modes */
+#define UFP_ALTMODE_NOT_SUPP 0
+#define UFP_ALTMODE_TBT3 BIT(0)
+#define UFP_ALTMODE_RECFG BIT(1)
+#define UFP_ALTMODE_NO_RECFG BIT(2)
+
+/* USB Highest Speed */
+#define UFP_USB2_ONLY 0
+#define UFP_USB32_GEN1 1
+#define UFP_USB32_4_GEN2 2
+#define UFP_USB4_GEN3 3
+
+#define VDO_UFP(ver, cap, conn, vcpwr, vcr, vbr, alt, spd) \
+ (((ver) & 0x7) << 29 | ((cap) & 0xf) << 24 | ((conn) & 0x3) << 22 \
+ | ((vcpwr) & 0x7) << 8 | (vcr) << 7 | (vbr) << 6 | ((alt) & 0x7) << 3 \
+ | ((spd) & 0x7))
+
/*
- * DFP VDO
+ * DFP VDO (PD Revision 3.0+ only)
* --------
* <31:29> :: DFP VDO version
* <28:27> :: Reserved
* <26:24> :: Host capability
- * <23:5> :: Reserved
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:5> :: Reserved
* <4:0> :: Port number
*/
#define PD_VDO_DFP_HOSTCAP(vdo) (((vdo) & GENMASK(26, 24)) >> 24)
+#define DFP_VDO_VER1_1 1
#define HOST_USB2_CAPABLE BIT(0)
#define HOST_USB3_CAPABLE BIT(1)
#define HOST_USB4_CAPABLE BIT(2)
+#define DFP_RECEPTACLE 2
+#define DFP_CAPTIVE 3
+
+#define VDO_DFP(ver, cap, conn, pnum) \
+ (((ver) & 0x7) << 29 | ((cap) & 0x7) << 24 | ((conn) & 0x3) << 22 \
+ | ((pnum) & 0x1f))
/*
- * Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
* <23:20> :: Reserved, Shall be set to zero
- * <19:18> :: type-C to Type-A/B/C (00b == A, 01 == B, 10 == C)
- * <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle)
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17> :: Reserved, Shall be set to zero
* <16:13> :: cable latency (0001 == <10ns(~1m length))
* <12:11> :: cable termination type (11b == both ends active VCONN req)
* <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
* <9> :: SSTX2 Directionality support
* <8> :: SSRX1 Directionality support
* <7> :: SSRX2 Directionality support
- * <6:5> :: Vbus current handling capability
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
* <4> :: Vbus through cable (0b == no, 1b == yes)
* <3> :: SOP" controller present? (0b == no, 1b == yes)
* <2:0> :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Type-C to Type-C/Captive (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == Vconn not req, 01b == Vconn req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8:7> :: Reserved, Shall be set to zero
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4:3> :: Reserved, Shall be set to zero
+ * <2:0> :: USB highest speed
+ *
+ * Active Cable VDO 1 (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Connector type (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == one end active, 11b == both ends active VCONN req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8> :: SBU supported (0b == supported, 1b == not supported)
+ * <7> :: SBU type (0b == passive, 1b == active)
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB highest speed
*/
+/* Cable VDO Version */
+#define CABLE_VDO_VER1_0 0
+#define CABLE_VDO_VER1_3 3
+
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
#define CABLE_ATYPE 0
#define CABLE_BTYPE 1
#define CABLE_CTYPE 2
-#define CABLE_PLUG 0
-#define CABLE_RECEPTACLE 1
-#define CABLE_CURR_1A5 0
+#define CABLE_CAPTIVE 3
+
+/* Cable Latency */
+#define CABLE_LATENCY_1M 1
+#define CABLE_LATENCY_2M 2
+#define CABLE_LATENCY_3M 3
+#define CABLE_LATENCY_4M 4
+#define CABLE_LATENCY_5M 5
+#define CABLE_LATENCY_6M 6
+#define CABLE_LATENCY_7M 7
+#define CABLE_LATENCY_7M_PLUS 8
+
+/* Cable Termination Type */
+#define PCABLE_VCONN_NOT_REQ 0
+#define PCABLE_VCONN_REQ 1
+#define ACABLE_ONE_END 2
+#define ACABLE_BOTH_END 3
+
+/* Maximum Vbus Voltage */
+#define CABLE_MAX_VBUS_20V 0
+#define CABLE_MAX_VBUS_30V 1
+#define CABLE_MAX_VBUS_40V 2
+#define CABLE_MAX_VBUS_50V 3
+
+/* Active Cable SBU Supported/Type */
+#define ACABLE_SBU_SUPP 0
+#define ACABLE_SBU_NOT_SUPP 1
+#define ACABLE_SBU_PASSIVE 0
+#define ACABLE_SBU_ACTIVE 1
+
+/* Vbus Current Handling Capability */
+#define CABLE_CURR_DEF 0
#define CABLE_CURR_3A 1
#define CABLE_CURR_5A 2
+
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
#define CABLE_USBSS_U2_ONLY 0
#define CABLE_USBSS_U31_GEN1 1
#define CABLE_USBSS_U31_GEN2 2
-#define VDO_CABLE(hw, fw, cbl, gdr, lat, term, tx1d, tx2d, rx1d, rx2d, cur,\
- vps, sopp, usbss) \
- (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
- | (gdr) << 17 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 \
- | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \
- | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \
- | ((usbss) & 0x7))
+
+/* USB Highest Speed */
+#define CABLE_USB2_ONLY 0
+#define CABLE_USB32_GEN1 1
+#define CABLE_USB32_4_GEN2 2
+#define CABLE_USB4_GEN3 3
+
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \
+ | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
+ | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
+#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7))
+#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
+ | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7))
+
+#define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3)
/*
- * AMA VDO
+ * Active Cable VDO 2
+ * ---------
+ * <31:24> :: Maximum operating temperature
+ * <23:16> :: Shutdown temperature
+ * <15> :: Reserved, Shall be set to zero
+ * <14:12> :: U3/CLd power
+ * <11> :: U3 to U0 transition mode (0b == direct, 1b == through U3S)
+ * <10> :: Physical connection (0b == copper, 1b == optical)
+ * <9> :: Active element (0b == redriver, 1b == retimer)
+ * <8> :: USB4 supported (0b == yes, 1b == no)
+ * <7:6> :: USB2 hub hops consumed
+ * <5> :: USB2 supported (0b == yes, 1b == no)
+ * <4> :: USB3.2 supported (0b == yes, 1b == no)
+ * <3> :: USB lanes supported (0b == one lane, 1b == two lanes)
+ * <2> :: Optically isolated active cable (0b == no, 1b == yes)
+ * <1> :: Reserved, Shall be set to zero
+ * <0> :: USB gen (0b == gen1, 1b == gen2+)
+ */
+/* U3/CLd Power*/
+#define ACAB2_U3_CLD_10MW_PLUS 0
+#define ACAB2_U3_CLD_10MW 1
+#define ACAB2_U3_CLD_5MW 2
+#define ACAB2_U3_CLD_1MW 3
+#define ACAB2_U3_CLD_500UW 4
+#define ACAB2_U3_CLD_200UW 5
+#define ACAB2_U3_CLD_50UW 6
+
+/* Other Active Cable VDO 2 Fields */
+#define ACAB2_U3U0_DIRECT 0
+#define ACAB2_U3U0_U3S 1
+#define ACAB2_PHY_COPPER 0
+#define ACAB2_PHY_OPTICAL 1
+#define ACAB2_REDRIVER 0
+#define ACAB2_RETIMER 1
+#define ACAB2_USB4_SUPP 0
+#define ACAB2_USB4_NOT_SUPP 1
+#define ACAB2_USB2_SUPP 0
+#define ACAB2_USB2_NOT_SUPP 1
+#define ACAB2_USB32_SUPP 0
+#define ACAB2_USB32_NOT_SUPP 1
+#define ACAB2_LANES_ONE 0
+#define ACAB2_LANES_TWO 1
+#define ACAB2_OPT_ISO_NO 0
+#define ACAB2_OPT_ISO_YES 1
+#define ACAB2_GEN_1 0
+#define ACAB2_GEN_2_PLUS 1
+
+#define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen) \
+ (((mtemp) & 0xff) << 24 | ((stemp) & 0xff) << 16 | ((u3p) & 0x7) << 12 \
+ | (trans) << 11 | (phy) << 10 | (ele) << 9 | (u4) << 8 \
+ | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3 \
+ | (iso) << 2 | (gen))
+
+/*
+ * AMA VDO (PD Rev2.0)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -233,23 +456,45 @@
#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
-#define AMA_VCONN_PWR_1W 0
-#define AMA_VCONN_PWR_1W5 1
-#define AMA_VCONN_PWR_2W 2
-#define AMA_VCONN_PWR_3W 3
-#define AMA_VCONN_PWR_4W 4
-#define AMA_VCONN_PWR_5W 5
-#define AMA_VCONN_PWR_6W 6
#define AMA_USBSS_U2_ONLY 0
#define AMA_USBSS_U31_GEN1 1
#define AMA_USBSS_U31_GEN2 2
#define AMA_USBSS_BBONLY 3
/*
+ * VPD VDO
+ * ---------
+ * <31:28> :: HW version
+ * <27:24> :: FW version
+ * <23:21> :: VDO version
+ * <20:17> :: Reserved, Shall be set to zero
+ * <16:15> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <14> :: Charge through current support (0b == 3A, 1b == 5A)
+ * <13> :: Reserved, Shall be set to zero
+ * <12:7> :: Vbus impedance
+ * <6:1> :: Ground impedance
+ * <0> :: Charge through support (0b == no, 1b == yes)
+ */
+#define VPD_VDO_VER1_0 0
+#define VPD_MAX_VBUS_20V 0
+#define VPD_MAX_VBUS_30V 1
+#define VPD_MAX_VBUS_40V 2
+#define VPD_MAX_VBUS_50V 3
+#define VPDCT_CURR_3A 0
+#define VPDCT_CURR_5A 1
+#define VPDCT_NOT_SUPP 0
+#define VPDCT_SUPP 1
+
+#define VDO_VPD(hw, fw, ver, vbm, curr, vbi, gi, ct) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((vbm) & 0x3) << 15 | (curr) << 14 | ((vbi) & 0x3f) << 7 \
+ | ((gi) & 0x3f) << 1 | (ct))
+
+/*
* SVDM Discover SVIDs request -> response
*
* Request is properly formatted VDM Header with discover SVIDs command.
- * Response is a set of SVIDs of all all supported SVIDs with all zero's to
+ * Response is a set of SVIDs of all supported SVIDs with all zero's to
* mark the end of SVIDs. If more than 12 SVIDs are supported command SHOULD be
* repeated.
*/
diff --git a/include/linux/usb/phy_companion.h b/include/linux/usb/phy_companion.h
index 407f530061cd..862aaeca2319 100644
--- a/include/linux/usb/phy_companion.h
+++ b/include/linux/usb/phy_companion.h
@@ -2,19 +2,9 @@
/*
* phy-companion.h -- phy companion to indicate the comparator part of PHY
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __DRIVERS_PHY_COMPANION_H
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 22c1f579afe3..eeb7c2157c72 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -32,7 +32,7 @@
#define USB_QUIRK_DELAY_INIT BIT(6)
/*
- * For high speed and super speed interupt endpoints, the USB 2.0 and
+ * For high speed and super speed interrupt endpoints, the USB 2.0 and
* USB 3.0 spec require the interval in microframes
* (1 microframe = 125 microseconds) to be calculated as
* interval = 2 ^ (bInterval-1).
@@ -69,7 +69,7 @@
/* Hub needs extra delay after resetting its port. */
#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
-/* device has blacklisted endpoints */
-#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
+/* device has endpoints that should be ignored */
+#define USB_QUIRK_ENDPOINT_IGNORE BIT(15)
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
new file mode 100644
index 000000000000..20d88b1defc3
--- /dev/null
+++ b/include/linux/usb/r8152.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Realtek Semiconductor Corp. All rights reserved.
+ */
+
+#ifndef __LINUX_R8152_H
+#define __LINUX_R8152_H
+
+#define RTL8152_REQT_READ 0xc0
+#define RTL8152_REQT_WRITE 0x40
+#define RTL8152_REQ_GET_REGS 0x05
+#define RTL8152_REQ_SET_REGS 0x05
+
+#define BYTE_EN_DWORD 0xff
+#define BYTE_EN_WORD 0x33
+#define BYTE_EN_BYTE 0x11
+#define BYTE_EN_SIX_BYTES 0x3f
+#define BYTE_EN_START_MASK 0x0f
+#define BYTE_EN_END_MASK 0xf0
+
+#define MCU_TYPE_PLA 0x0100
+#define MCU_TYPE_USB 0x0000
+
+/* Define these values to match your device */
+#define VENDOR_ID_REALTEK 0x0bda
+#define VENDOR_ID_MICROSOFT 0x045e
+#define VENDOR_ID_SAMSUNG 0x04e8
+#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
+#define VENDOR_ID_NVIDIA 0x0955
+#define VENDOR_ID_TPLINK 0x2357
+
+#if IS_REACHABLE(CONFIG_USB_RTL8152)
+extern u8 rtl8152_get_version(struct usb_interface *intf);
+#endif
+
+#endif /* __LINUX_R8152_H */
diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h
index c0753d026bbf..f0fa7ddadbaa 100644
--- a/include/linux/usb/r8a66597.h
+++ b/include/linux/usb/r8a66597.h
@@ -5,20 +5,6 @@
* Copyright (C) 2009 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_USB_R8A66597_H
diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h
index 809bccd08455..489cfb1d00f6 100644
--- a/include/linux/usb/rndis_host.h
+++ b/include/linux/usb/rndis_host.h
@@ -2,20 +2,6 @@
/*
* Host Side support for RNDIS Networking Links
* Copyright (C) 2005 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_RNDIS_HOST_H
@@ -197,6 +183,7 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */
/* Flags for driver_info::data */
#define RNDIS_DRIVER_DATA_POLL_STATUS 1 /* poll status before control */
+#define RNDIS_DRIVER_DATA_DST_MAC_FIXUP 2 /* device ignores configured MAC address */
extern void rndis_status(struct usbnet *dev, struct urb *urb);
extern int
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
index efac3af83d6b..b5deafd91f67 100644
--- a/include/linux/usb/role.h
+++ b/include/linux/usb/role.h
@@ -13,8 +13,9 @@ enum usb_role {
USB_ROLE_DEVICE,
};
-typedef int (*usb_role_switch_set_t)(struct device *dev, enum usb_role role);
-typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev);
+typedef int (*usb_role_switch_set_t)(struct usb_role_switch *sw,
+ enum usb_role role);
+typedef enum usb_role (*usb_role_switch_get_t)(struct usb_role_switch *sw);
/**
* struct usb_role_switch_desc - USB Role Switch Descriptor
@@ -25,6 +26,8 @@ typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev);
* @set: Callback for setting the role
* @get: Callback for getting the role (optional)
* @allow_userspace_control: If true userspace may change the role through sysfs
+ * @driver_data: Private data pointer
+ * @name: Name for the switch (optional)
*
* @usb2_port and @usb3_port will point to the USB host port and @udc to the USB
* device controller behind the USB connector with the role switch. If
@@ -40,6 +43,8 @@ struct usb_role_switch_desc {
usb_role_switch_set_t set;
usb_role_switch_get_t get;
bool allow_userspace_control;
+ void *driver_data;
+ const char *name;
};
@@ -57,6 +62,10 @@ struct usb_role_switch *
usb_role_switch_register(struct device *parent,
const struct usb_role_switch_desc *desc);
void usb_role_switch_unregister(struct usb_role_switch *sw);
+
+void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data);
+void *usb_role_switch_get_drvdata(struct usb_role_switch *sw);
+const char *usb_role_string(enum usb_role role);
#else
static inline int usb_role_switch_set_role(struct usb_role_switch *sw,
enum usb_role role)
@@ -83,6 +92,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node)
static inline void usb_role_switch_put(struct usb_role_switch *sw) { }
static inline struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct usb_role_switch *
usb_role_switch_register(struct device *parent,
const struct usb_role_switch_desc *desc)
{
@@ -90,6 +105,22 @@ usb_role_switch_register(struct device *parent,
}
static inline void usb_role_switch_unregister(struct usb_role_switch *sw) { }
+
+static inline void
+usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data)
+{
+}
+
+static inline void *usb_role_switch_get_drvdata(struct usb_role_switch *sw)
+{
+ return NULL;
+}
+
+static inline const char *usb_role_string(enum usb_role role)
+{
+ return "unknown";
+}
+
#endif
#endif /* __LINUX_USB_ROLE_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 14cac4a1ae8f..f7bfedb740f5 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -4,11 +4,6 @@
*
* Copyright (C) 1999 - 2012
* Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
*/
#ifndef __LINUX_USB_SERIAL_H
@@ -17,7 +12,6 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/serial.h>
-#include <linux/sysrq.h>
#include <linux/kfifo.h>
/* The maximum number of ports one device can grab at once */
@@ -63,7 +57,6 @@
* @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
* port.
* @flags: usb serial port flags
- * @write_wait: a wait_queue_head_t used by the port.
* @work: work queue entry for the line discipline waking up.
* @dev: pointer to the serial device
*
@@ -109,7 +102,6 @@ struct usb_serial_port {
int tx_bytes;
unsigned long flags;
- wait_queue_head_t write_wait;
struct work_struct work;
unsigned long sysrq; /* sysrq timeout */
struct device dev;
@@ -133,6 +125,8 @@ static inline void usb_set_serial_port_data(struct usb_serial_port *port,
* @dev: pointer to the struct usb_device for this device
* @type: pointer to the struct usb_serial_driver for this device
* @interface: pointer to the struct usb_interface for this device
+ * @sibling: pointer to the struct usb_interface of any sibling interface
+ * @suspend_count: number of suspended (sibling) interfaces
* @num_ports: the number of ports this device has
* @num_interrupt_in: number of interrupt in endpoints we have
* @num_interrupt_out: number of interrupt out endpoints we have
@@ -148,8 +142,9 @@ struct usb_serial {
struct usb_device *dev;
struct usb_serial_driver *type;
struct usb_interface *interface;
+ struct usb_interface *sibling;
+ unsigned int suspend_count;
unsigned char disconnected:1;
- unsigned char suspending:1;
unsigned char attached:1;
unsigned char minors_reserved:1;
unsigned char num_ports;
@@ -213,7 +208,7 @@ struct usb_serial_endpoints {
* Return 0 to continue on with the initialization sequence. Anything
* else will abort it.
* @attach: pointer to the driver's attach function.
- * This will be called when the struct usb_serial structure is fully set
+ * This will be called when the struct usb_serial structure is fully
* set up. Do any local initialization of the device, or any private
* memory structure allocation at this point in time.
* @disconnect: pointer to the driver's disconnect function. This will be
@@ -263,7 +258,7 @@ struct usb_serial_driver {
void (*release)(struct usb_serial *serial);
int (*port_probe)(struct usb_serial_port *port);
- int (*port_remove)(struct usb_serial_port *port);
+ void (*port_remove)(struct usb_serial_port *port);
int (*suspend)(struct usb_serial *serial, pm_message_t message);
int (*resume)(struct usb_serial *serial);
@@ -276,15 +271,15 @@ struct usb_serial_driver {
int (*write)(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
/* Called only by the tty layer */
- int (*write_room)(struct tty_struct *tty);
+ unsigned int (*write_room)(struct tty_struct *tty);
int (*ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
- int (*get_serial)(struct tty_struct *tty, struct serial_struct *ss);
+ void (*get_serial)(struct tty_struct *tty, struct serial_struct *ss);
int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss);
- void (*set_termios)(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port,
+ const struct ktermios *old);
void (*break_ctl)(struct tty_struct *tty, int break_state);
- int (*chars_in_buffer)(struct tty_struct *tty);
+ unsigned int (*chars_in_buffer)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, long timeout);
bool (*tx_empty)(struct usb_serial_port *port);
void (*throttle)(struct tty_struct *tty);
@@ -316,19 +311,19 @@ struct usb_serial_driver {
#define to_usb_serial_driver(d) \
container_of(d, struct usb_serial_driver, driver)
-extern int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
+int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
const char *name, const struct usb_device_id *id_table);
-extern void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]);
-extern void usb_serial_port_softint(struct usb_serial_port *port);
+void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]);
+void usb_serial_port_softint(struct usb_serial_port *port);
-extern int usb_serial_suspend(struct usb_interface *intf, pm_message_t message);
-extern int usb_serial_resume(struct usb_interface *intf);
+int usb_serial_suspend(struct usb_interface *intf, pm_message_t message);
+int usb_serial_resume(struct usb_interface *intf);
/* USB Serial console functions */
#ifdef CONFIG_USB_SERIAL_CONSOLE
-extern void usb_serial_console_init(int minor);
-extern void usb_serial_console_exit(void);
-extern void usb_serial_console_disconnect(struct usb_serial *serial);
+void usb_serial_console_init(int minor);
+void usb_serial_console_exit(void);
+void usb_serial_console_disconnect(struct usb_serial *serial);
#else
static inline void usb_serial_console_init(int minor) { }
static inline void usb_serial_console_exit(void) { }
@@ -336,45 +331,52 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {}
#endif
/* Functions needed by other parts of the usbserial core */
-extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor);
-extern void usb_serial_put(struct usb_serial *serial);
-extern int usb_serial_generic_open(struct tty_struct *tty,
- struct usb_serial_port *port);
-extern int usb_serial_generic_write_start(struct usb_serial_port *port,
- gfp_t mem_flags);
-extern int usb_serial_generic_write(struct tty_struct *tty,
- struct usb_serial_port *port, const unsigned char *buf, int count);
-extern void usb_serial_generic_close(struct usb_serial_port *port);
-extern int usb_serial_generic_resume(struct usb_serial *serial);
-extern int usb_serial_generic_write_room(struct tty_struct *tty);
-extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
-extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty,
- long timeout);
-extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
-extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
-extern void usb_serial_generic_throttle(struct tty_struct *tty);
-extern void usb_serial_generic_unthrottle(struct tty_struct *tty);
-extern int usb_serial_generic_tiocmiwait(struct tty_struct *tty,
- unsigned long arg);
-extern int usb_serial_generic_get_icount(struct tty_struct *tty,
- struct serial_icounter_struct *icount);
-extern int usb_serial_generic_register(void);
-extern void usb_serial_generic_deregister(void);
-extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port,
- gfp_t mem_flags);
-extern void usb_serial_generic_process_read_urb(struct urb *urb);
-extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size);
-extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
- unsigned int ch);
-extern int usb_serial_handle_break(struct usb_serial_port *port);
-extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
- struct tty_struct *tty,
- unsigned int status);
+struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor);
+void usb_serial_put(struct usb_serial *serial);
+
+int usb_serial_claim_interface(struct usb_serial *serial, struct usb_interface *intf);
+
+int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port);
+int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags);
+int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+void usb_serial_generic_close(struct usb_serial_port *port);
+int usb_serial_generic_resume(struct usb_serial *serial);
+unsigned int usb_serial_generic_write_room(struct tty_struct *tty);
+unsigned int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
+void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout);
+void usb_serial_generic_read_bulk_callback(struct urb *urb);
+void usb_serial_generic_write_bulk_callback(struct urb *urb);
+void usb_serial_generic_throttle(struct tty_struct *tty);
+void usb_serial_generic_unthrottle(struct tty_struct *tty);
+int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg);
+int usb_serial_generic_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount);
+int usb_serial_generic_register(void);
+void usb_serial_generic_deregister(void);
+int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, gfp_t mem_flags);
+void usb_serial_generic_process_read_urb(struct urb *urb);
+int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size);
+
+#if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch);
+int usb_serial_handle_break(struct usb_serial_port *port);
+#else
+static inline int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
+{
+ return 0;
+}
+static inline int usb_serial_handle_break(struct usb_serial_port *port)
+{
+ return 0;
+}
+#endif
+
+void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+ struct tty_struct *tty, unsigned int status);
-extern int usb_serial_bus_register(struct usb_serial_driver *device);
-extern void usb_serial_bus_deregister(struct usb_serial_driver *device);
+int usb_serial_bus_register(struct usb_serial_driver *device);
+void usb_serial_bus_deregister(struct usb_serial_driver *device);
extern struct bus_type usb_serial_bus_type;
extern struct tty_driver *usb_serial_tty_driver;
@@ -388,7 +390,7 @@ static inline void usb_serial_debug_data(struct device *dev,
}
/*
- * Macro for reporting errors in write path to avoid inifinite loop
+ * Macro for reporting errors in write path to avoid infinite loop
* when port is used as a console.
*/
#define dev_err_console(usport, fmt, ...) \
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h
index e0240f864548..2827ce72e502 100644
--- a/include/linux/usb/storage.h
+++ b/include/linux/usb/storage.h
@@ -9,8 +9,6 @@
*
* This file contains definitions taken from the
* USB Mass Storage Class Specification Overview
- *
- * Distributed under the terms of the GNU GPL, version two.
*/
/* Storage subclass codes */
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
new file mode 100644
index 000000000000..17657451c762
--- /dev/null
+++ b/include/linux/usb/tcpci.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2017 Google, Inc
+ *
+ * USB Type-C Port Controller Interface.
+ */
+
+#ifndef __LINUX_USB_TCPCI_H
+#define __LINUX_USB_TCPCI_H
+
+#include <linux/usb/typec.h>
+#include <linux/usb/tcpm.h>
+
+#define TCPC_VENDOR_ID 0x0
+#define TCPC_PRODUCT_ID 0x2
+#define TCPC_BCD_DEV 0x4
+#define TCPC_TC_REV 0x6
+#define TCPC_PD_REV 0x8
+#define TCPC_PD_INT_REV 0xa
+
+#define TCPC_ALERT 0x10
+#define TCPC_ALERT_EXTND BIT(14)
+#define TCPC_ALERT_EXTENDED_STATUS BIT(13)
+#define TCPC_ALERT_VBUS_DISCNCT BIT(11)
+#define TCPC_ALERT_RX_BUF_OVF BIT(10)
+#define TCPC_ALERT_FAULT BIT(9)
+#define TCPC_ALERT_V_ALARM_LO BIT(8)
+#define TCPC_ALERT_V_ALARM_HI BIT(7)
+#define TCPC_ALERT_TX_SUCCESS BIT(6)
+#define TCPC_ALERT_TX_DISCARDED BIT(5)
+#define TCPC_ALERT_TX_FAILED BIT(4)
+#define TCPC_ALERT_RX_HARD_RST BIT(3)
+#define TCPC_ALERT_RX_STATUS BIT(2)
+#define TCPC_ALERT_POWER_STATUS BIT(1)
+#define TCPC_ALERT_CC_STATUS BIT(0)
+
+#define TCPC_ALERT_MASK 0x12
+#define TCPC_POWER_STATUS_MASK 0x14
+#define TCPC_FAULT_STATUS_MASK 0x15
+
+#define TCPC_EXTENDED_STATUS_MASK 0x16
+#define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0)
+
+#define TCPC_ALERT_EXTENDED_MASK 0x17
+#define TCPC_SINK_FAST_ROLE_SWAP BIT(0)
+
+#define TCPC_CONFIG_STD_OUTPUT 0x18
+
+#define TCPC_TCPC_CTRL 0x19
+#define TCPC_TCPC_CTRL_ORIENTATION BIT(0)
+#define PLUG_ORNT_CC1 0
+#define PLUG_ORNT_CC2 1
+#define TCPC_TCPC_CTRL_BIST_TM BIT(1)
+#define TCPC_TCPC_CTRL_EN_LK4CONN_ALRT BIT(6)
+
+#define TCPC_EXTENDED_STATUS 0x20
+#define TCPC_EXTENDED_STATUS_VSAFE0V BIT(0)
+
+#define TCPC_ROLE_CTRL 0x1a
+#define TCPC_ROLE_CTRL_DRP BIT(6)
+#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4
+#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3
+#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0
+#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1
+#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2
+#define TCPC_ROLE_CTRL_CC2_SHIFT 2
+#define TCPC_ROLE_CTRL_CC2_MASK 0x3
+#define TCPC_ROLE_CTRL_CC1_SHIFT 0
+#define TCPC_ROLE_CTRL_CC1_MASK 0x3
+#define TCPC_ROLE_CTRL_CC_RA 0x0
+#define TCPC_ROLE_CTRL_CC_RP 0x1
+#define TCPC_ROLE_CTRL_CC_RD 0x2
+#define TCPC_ROLE_CTRL_CC_OPEN 0x3
+
+#define TCPC_FAULT_CTRL 0x1b
+
+#define TCPC_POWER_CTRL 0x1c
+#define TCPC_POWER_CTRL_VCONN_ENABLE BIT(0)
+#define TCPC_POWER_CTRL_BLEED_DISCHARGE BIT(3)
+#define TCPC_POWER_CTRL_AUTO_DISCHARGE BIT(4)
+#define TCPC_DIS_VOLT_ALRM BIT(5)
+#define TCPC_POWER_CTRL_VBUS_VOLT_MON BIT(6)
+#define TCPC_FAST_ROLE_SWAP_EN BIT(7)
+
+#define TCPC_CC_STATUS 0x1d
+#define TCPC_CC_STATUS_TOGGLING BIT(5)
+#define TCPC_CC_STATUS_TERM BIT(4)
+#define TCPC_CC_STATUS_TERM_RP 0
+#define TCPC_CC_STATUS_TERM_RD 1
+#define TCPC_CC_STATE_SRC_OPEN 0
+#define TCPC_CC_STATUS_CC2_SHIFT 2
+#define TCPC_CC_STATUS_CC2_MASK 0x3
+#define TCPC_CC_STATUS_CC1_SHIFT 0
+#define TCPC_CC_STATUS_CC1_MASK 0x3
+
+#define TCPC_POWER_STATUS 0x1e
+#define TCPC_POWER_STATUS_DBG_ACC_CON BIT(7)
+#define TCPC_POWER_STATUS_UNINIT BIT(6)
+#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4)
+#define TCPC_POWER_STATUS_VBUS_DET BIT(3)
+#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
+#define TCPC_POWER_STATUS_VCONN_PRES BIT(1)
+#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
+
+#define TCPC_FAULT_STATUS 0x1f
+
+#define TCPC_ALERT_EXTENDED 0x21
+
+#define TCPC_COMMAND 0x23
+#define TCPC_CMD_WAKE_I2C 0x11
+#define TCPC_CMD_DISABLE_VBUS_DETECT 0x22
+#define TCPC_CMD_ENABLE_VBUS_DETECT 0x33
+#define TCPC_CMD_DISABLE_SINK_VBUS 0x44
+#define TCPC_CMD_SINK_VBUS 0x55
+#define TCPC_CMD_DISABLE_SRC_VBUS 0x66
+#define TCPC_CMD_SRC_VBUS_DEFAULT 0x77
+#define TCPC_CMD_SRC_VBUS_HIGH 0x88
+#define TCPC_CMD_LOOK4CONNECTION 0x99
+#define TCPC_CMD_RXONEMORE 0xAA
+#define TCPC_CMD_I2C_IDLE 0xFF
+
+#define TCPC_DEV_CAP_1 0x24
+#define TCPC_DEV_CAP_2 0x26
+#define TCPC_STD_INPUT_CAP 0x28
+#define TCPC_STD_OUTPUT_CAP 0x29
+
+#define TCPC_MSG_HDR_INFO 0x2e
+#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3)
+#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0)
+#define TCPC_MSG_HDR_INFO_REV_SHIFT 1
+#define TCPC_MSG_HDR_INFO_REV_MASK 0x3
+
+#define TCPC_RX_DETECT 0x2f
+#define TCPC_RX_DETECT_HARD_RESET BIT(5)
+#define TCPC_RX_DETECT_SOP BIT(0)
+#define TCPC_RX_DETECT_SOP1 BIT(1)
+#define TCPC_RX_DETECT_SOP2 BIT(2)
+#define TCPC_RX_DETECT_DBG1 BIT(3)
+#define TCPC_RX_DETECT_DBG2 BIT(4)
+
+#define TCPC_RX_BYTE_CNT 0x30
+#define TCPC_RX_BUF_FRAME_TYPE 0x31
+#define TCPC_RX_BUF_FRAME_TYPE_SOP 0
+#define TCPC_RX_HDR 0x32
+#define TCPC_RX_DATA 0x34 /* through 0x4f */
+
+#define TCPC_TRANSMIT 0x50
+#define TCPC_TRANSMIT_RETRY_SHIFT 4
+#define TCPC_TRANSMIT_RETRY_MASK 0x3
+#define TCPC_TRANSMIT_TYPE_SHIFT 0
+#define TCPC_TRANSMIT_TYPE_MASK 0x7
+
+#define TCPC_TX_BYTE_CNT 0x51
+#define TCPC_TX_HDR 0x52
+#define TCPC_TX_DATA 0x54 /* through 0x6f */
+
+#define TCPC_VBUS_VOLTAGE 0x70
+#define TCPC_VBUS_VOLTAGE_MASK 0x3ff
+#define TCPC_VBUS_VOLTAGE_LSB_MV 25
+#define TCPC_VBUS_SINK_DISCONNECT_THRESH 0x72
+#define TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV 25
+#define TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX 0x3ff
+#define TCPC_VBUS_STOP_DISCHARGE_THRESH 0x74
+#define TCPC_VBUS_VOLTAGE_ALARM_HI_CFG 0x76
+#define TCPC_VBUS_VOLTAGE_ALARM_LO_CFG 0x78
+
+/* I2C_WRITE_BYTE_COUNT + 1 when TX_BUF_BYTE_x is only accessible I2C_WRITE_BYTE_COUNT */
+#define TCPC_TRANSMIT_BUFFER_MAX_LEN 31
+
+#define tcpc_presenting_rd(reg, cc) \
+ (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
+ (((reg) & (TCPC_ROLE_CTRL_## cc ##_MASK << TCPC_ROLE_CTRL_## cc ##_SHIFT)) == \
+ (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_## cc ##_SHIFT)))
+
+struct tcpci;
+
+/*
+ * @TX_BUF_BYTE_x_hidden:
+ * optional; Set when TX_BUF_BYTE_x can only be accessed through I2C_WRITE_BYTE_COUNT.
+ * @frs_sourcing_vbus:
+ * Optional; Callback to perform chip specific operations when FRS
+ * is sourcing vbus.
+ * @auto_discharge_disconnect:
+ * Optional; Enables TCPC to autonously discharge vbus on disconnect.
+ * @vbus_vsafe0v:
+ * optional; Set when TCPC can detect whether vbus is at VSAFE0V.
+ * @set_partner_usb_comm_capable:
+ * Optional; The USB Communications Capable bit indicates if port
+ * partner is capable of communication over the USB data lines
+ * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
+ */
+struct tcpci_data {
+ struct regmap *regmap;
+ unsigned char TX_BUF_BYTE_x_hidden:1;
+ unsigned char auto_discharge_disconnect:1;
+ unsigned char vbus_vsafe0v:1;
+
+ int (*init)(struct tcpci *tcpci, struct tcpci_data *data);
+ int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data,
+ bool enable);
+ int (*start_drp_toggling)(struct tcpci *tcpci, struct tcpci_data *data,
+ enum typec_cc_status cc);
+ int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink);
+ void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data);
+ void (*set_partner_usb_comm_capable)(struct tcpci *tcpci, struct tcpci_data *data,
+ bool capable);
+};
+
+struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data);
+void tcpci_unregister_port(struct tcpci *tcpci);
+irqreturn_t tcpci_irq(struct tcpci *tcpci);
+
+struct tcpm_port;
+struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci);
+
+static inline enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
+{
+ switch (cc) {
+ case 0x1:
+ return sink ? TYPEC_CC_RP_DEF : TYPEC_CC_RA;
+ case 0x2:
+ return sink ? TYPEC_CC_RP_1_5 : TYPEC_CC_RD;
+ case 0x3:
+ if (sink)
+ return TYPEC_CC_RP_3_0;
+ fallthrough;
+ case 0x0:
+ default:
+ return TYPEC_CC_OPEN;
+ }
+}
+#endif /* __LINUX_USB_TCPCI_H */
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index e7979c01c351..bffc8d3e14ad 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -19,6 +19,10 @@ enum typec_cc_status {
TYPEC_CC_RP_3_0,
};
+/* Collision Avoidance */
+#define SINK_TX_NG TYPEC_CC_RP_1_5
+#define SINK_TX_OK TYPEC_CC_RP_3_0
+
enum typec_cc_polarity {
TYPEC_POLARITY_CC1,
TYPEC_POLARITY_CC2,
@@ -62,6 +66,8 @@ enum tcpm_transmit_type {
* For example, some tcpcs may include BC1.2 charger detection
* and use that in this case.
* @set_cc: Called to set value of CC pins
+ * @apply_rc: Optional; Needed to move TCPCI based chipset to APPLY_RC state
+ * as stated by the TCPCI specification.
* @get_cc: Called to read current CC pin values
* @set_polarity:
* Called to set polarity
@@ -78,7 +84,36 @@ enum tcpm_transmit_type {
* automatically if a connection is established.
* @try_role: Optional; called to set a preferred role
* @pd_transmit:Called to transmit PD message
- * @mux: Pointer to multiplexer data
+ * @set_bist_data: Turn on/off bist data mode for compliance testing
+ * @enable_frs:
+ * Optional; Called to enable/disable PD 3.0 fast role swap.
+ * Enabling frs is accessory dependent as not all PD3.0
+ * accessories support fast role swap.
+ * @frs_sourcing_vbus:
+ * Optional; Called to notify that vbus is now being sourced.
+ * Low level drivers can perform chip specific operations, if any.
+ * @enable_auto_vbus_discharge:
+ * Optional; TCPCI spec based TCPC implementations can optionally
+ * support hardware to autonomously dischrge vbus upon disconnecting
+ * as sink or source. TCPM signals TCPC to enable the mechanism upon
+ * entering connected state and signals disabling upon disconnect.
+ * @set_auto_vbus_discharge_threshold:
+ * Mandatory when enable_auto_vbus_discharge is implemented. TCPM
+ * calls this function to allow lower levels drivers to program the
+ * vbus threshold voltage below which the vbus discharge circuit
+ * will be turned on. requested_vbus_voltage is set to 0 when vbus
+ * is going to disappear knowingly i.e. during PR_SWAP and
+ * HARD_RESET etc.
+ * @is_vbus_vsafe0v:
+ * Optional; TCPCI spec based TCPC implementations are expected to
+ * detect VSAFE0V voltage level at vbus. When detection of VSAFE0V
+ * is supported by TCPC, set this callback for TCPM to query
+ * whether vbus is at VSAFE0V when needed.
+ * Returns true when vbus is at VSAFE0V, false otherwise.
+ * @set_partner_usb_comm_capable:
+ * Optional; The USB Communications Capable bit indicates if port
+ * partner is capable of communication over the USB data lines
+ * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
*/
struct tcpc_dev {
struct fwnode_handle *fwnode;
@@ -87,6 +122,8 @@ struct tcpc_dev {
int (*get_vbus)(struct tcpc_dev *dev);
int (*get_current_limit)(struct tcpc_dev *dev);
int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc);
+ int (*apply_rc)(struct tcpc_dev *dev, enum typec_cc_status cc,
+ enum typec_cc_polarity polarity);
int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1,
enum typec_cc_status *cc2);
int (*set_polarity)(struct tcpc_dev *dev,
@@ -102,7 +139,15 @@ struct tcpc_dev {
enum typec_cc_status cc);
int (*try_role)(struct tcpc_dev *dev, int role);
int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
- const struct pd_message *msg);
+ const struct pd_message *msg, unsigned int negotiated_rev);
+ int (*set_bist_data)(struct tcpc_dev *dev, bool on);
+ int (*enable_frs)(struct tcpc_dev *dev, bool enable);
+ void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
+ int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
+ int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+ bool pps_active, u32 requested_vbus_voltage);
+ bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
+ void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
};
struct tcpm_port;
@@ -112,6 +157,8 @@ void tcpm_unregister_port(struct tcpm_port *port);
void tcpm_vbus_change(struct tcpm_port *port);
void tcpm_cc_change(struct tcpm_port *port);
+void tcpm_sink_frs(struct tcpm_port *port);
+void tcpm_sourcing_vbus(struct tcpm_port *port);
void tcpm_pd_receive(struct tcpm_port *port,
const struct pd_message *msg);
void tcpm_pd_transmit_complete(struct tcpm_port *port,
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index c29d1b4c9381..46e73584b6e6 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -1,16 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __TEGRA_USB_PHY_H
@@ -18,6 +8,7 @@
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/usb/otg.h>
@@ -30,6 +21,7 @@
* enter host mode
* requires_extra_tuning_parameters: true if xcvr_hsslew, hssquelch_level
* and hsdiscon_level should be set for adequate signal quality
+ * requires_pmc_ao_power_up: true if USB AO is powered down by default
*/
struct tegra_phy_soc_config {
@@ -37,6 +29,7 @@ struct tegra_phy_soc_config {
bool has_hostpc;
bool requires_usbmode_setup;
bool requires_extra_tuning_parameters;
+ bool requires_pmc_ao_power_up;
};
struct tegra_utmip_config {
@@ -62,6 +55,7 @@ enum tegra_usb_phy_port_speed {
struct tegra_xtal_freq;
struct tegra_usb_phy {
+ int irq;
int instance;
const struct tegra_xtal_freq *freq;
void __iomem *regs;
@@ -70,6 +64,7 @@ struct tegra_usb_phy {
struct clk *pll_u;
struct clk *pad_clk;
struct regulator *vbus;
+ struct regmap *pmc_regmap;
enum usb_dr_mode mode;
void *config;
const struct tegra_phy_soc_config *soc_config;
@@ -79,6 +74,8 @@ struct tegra_usb_phy {
bool is_ulpi_phy;
struct gpio_desc *reset_gpio;
struct reset_control *pad_rst;
+ bool wakeup_enabled;
+ bool pad_wakeup;
bool powered_on;
};
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index c358b3fd05c9..7751bedcae5d 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -17,10 +17,13 @@ struct typec_partner;
struct typec_cable;
struct typec_plug;
struct typec_port;
+struct typec_altmode_ops;
struct fwnode_handle;
struct device;
+struct usb_power_delivery;
+
enum typec_port_type {
TYPEC_PORT_SRC,
TYPEC_PORT_SNK,
@@ -51,6 +54,16 @@ enum typec_role {
TYPEC_SOURCE,
};
+static inline int is_sink(enum typec_role role)
+{
+ return role == TYPEC_SINK;
+}
+
+static inline int is_source(enum typec_role role)
+{
+ return role == TYPEC_SOURCE;
+}
+
enum typec_pwr_opmode {
TYPEC_PWR_MODE_USB,
TYPEC_PWR_MODE_1_5A,
@@ -73,6 +86,20 @@ enum typec_orientation {
};
/*
+ * struct enter_usb_data - Enter_USB Message details
+ * @eudo: Enter_USB Data Object
+ * @active_link_training: Active Cable Plug Link Training
+ *
+ * @active_link_training is a flag that should be set with uni-directional SBRX
+ * communication, and left 0 with passive cables and with bi-directional SBRX
+ * communication.
+ */
+struct enter_usb_data {
+ u32 eudo;
+ unsigned char active_link_training:1;
+};
+
+/*
* struct usb_pd_identity - USB Power Delivery identity data
* @id_header: ID Header VDO
* @cert_stat: Cert Stat VDO
@@ -112,15 +139,23 @@ struct typec_altmode_desc {
enum typec_port_data roles;
};
+void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision);
+int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes);
struct typec_altmode
*typec_partner_register_altmode(struct typec_partner *partner,
const struct typec_altmode_desc *desc);
+int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes);
struct typec_altmode
*typec_plug_register_altmode(struct typec_plug *plug,
const struct typec_altmode_desc *desc);
struct typec_altmode
*typec_port_register_altmode(struct typec_port *port,
const struct typec_altmode_desc *desc);
+
+void typec_port_register_altmodes(struct typec_port *port,
+ const struct typec_altmode_ops *ops, void *drvdata,
+ struct typec_altmode **altmodes, size_t n);
+
void typec_unregister_altmode(struct typec_altmode *altmode);
struct typec_port *typec_altmode2port(struct typec_altmode *alt);
@@ -148,6 +183,7 @@ struct typec_plug_desc {
* @type: The plug type from USB PD Cable VDO
* @active: Is the cable active or passive
* @identity: Result of Discover Identity command
+ * @pd_revision: USB Power Delivery Specification revision if supported
*
* Represents USB Type-C Cable attached to USB Type-C port.
*/
@@ -155,6 +191,8 @@ struct typec_cable_desc {
enum typec_plug_type type;
unsigned int active:1;
struct usb_pd_identity *identity;
+ u16 pd_revision; /* 0300H = "3.0" */
+
};
/*
@@ -162,15 +200,22 @@ struct typec_cable_desc {
* @usb_pd: USB Power Delivery support
* @accessory: Audio, Debug or none.
* @identity: Discover Identity command data
+ * @pd_revision: USB Power Delivery Specification Revision if supported
*
* Details about a partner that is attached to USB Type-C port. If @identity
* member exists when partner is registered, a directory named "identity" is
* created to sysfs for the partner device.
+ *
+ * @pd_revision is based on the setting of the "Specification Revision" field
+ * in the message header on the initial "Source Capabilities" message received
+ * from the partner, or a "Request" message received from the partner, depending
+ * on whether our port is a Sink or a Source.
*/
struct typec_partner_desc {
unsigned int usb_pd:1;
enum typec_accessory accessory;
struct usb_pd_identity *identity;
+ u16 pd_revision; /* 0300H = "3.0" */
};
/**
@@ -180,6 +225,8 @@ struct typec_partner_desc {
* @pr_set: Set Power Role
* @vconn_set: Source VCONN
* @port_type_set: Set port type
+ * @pd_get: Get available USB Power Delivery Capabilities.
+ * @pd_set: Set USB Power Delivery Capabilities.
*/
struct typec_operations {
int (*try_role)(struct typec_port *port, int role);
@@ -188,6 +235,14 @@ struct typec_operations {
int (*vconn_set)(struct typec_port *port, enum typec_role role);
int (*port_type_set)(struct typec_port *port,
enum typec_port_type type);
+ struct usb_power_delivery **(*pd_get)(struct typec_port *port);
+ int (*pd_set)(struct typec_port *port, struct usb_power_delivery *pd);
+};
+
+enum usb_pd_svdm_ver {
+ SVDM_VER_1_0 = 0,
+ SVDM_VER_2_0 = 1,
+ SVDM_VER_MAX = SVDM_VER_2_0,
};
/*
@@ -196,12 +251,12 @@ struct typec_operations {
* @data: Supported data role of the port
* @revision: USB Type-C Specification release. Binary coded decimal
* @pd_revision: USB Power Delivery Specification revision if supported
+ * @svdm_version: USB PD Structured VDM version if supported
* @prefer_role: Initial role preference (DRP ports).
* @accessory: Supported Accessory Modes
- * @sw: Cable plug orientation switch
- * @mux: Multiplexer switch for Alternate/Accessory Modes
* @fwnode: Optional fwnode of the port
* @driver_data: Private pointer for driver specific info
+ * @pd: Optional USB Power Delivery Support
* @ops: Port operations vector
*
* Static capabilities of a single USB Type-C port.
@@ -211,12 +266,16 @@ struct typec_capability {
enum typec_port_data data;
u16 revision; /* 0120H = "1.2" */
u16 pd_revision; /* 0300H = "3.0" */
+ enum usb_pd_svdm_ver svdm_version;
int prefer_role;
enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
+ unsigned int orientation_aware:1;
struct fwnode_handle *fwnode;
void *driver_data;
+ struct usb_power_delivery *pd;
+
const struct typec_operations *ops;
};
@@ -255,7 +314,21 @@ int typec_set_mode(struct typec_port *port, int mode);
void *typec_get_drvdata(struct typec_port *port);
+int typec_get_fw_cap(struct typec_capability *cap,
+ struct fwnode_handle *fwnode);
+
+int typec_find_pwr_opmode(const char *name);
+int typec_find_orientation(const char *name);
int typec_find_port_power_role(const char *name);
int typec_find_power_role(const char *name);
int typec_find_port_data_role(const char *name);
+
+void typec_partner_set_svdm_version(struct typec_partner *partner,
+ enum usb_pd_svdm_ver svdm_version);
+int typec_get_negotiated_svdm_version(struct typec_port *port);
+
+int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_delivery *pd);
+int typec_partner_set_usb_power_delivery(struct typec_partner *partner,
+ struct usb_power_delivery *pd);
+
#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index 923ff3af0628..350d49012659 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -95,13 +95,7 @@ enum {
*
* Port drivers can use TYPEC_MODE_AUDIO and TYPEC_MODE_DEBUG as the mode
* value for typec_set_mode() when accessory modes are supported.
- */
-enum {
- TYPEC_MODE_AUDIO = TYPEC_STATE_MODAL, /* Audio Accessory */
- TYPEC_MODE_DEBUG, /* Debug Accessory */
-};
-
-/*
+ *
* USB4 also requires that the pins on the connector are repurposed, just like
* Alternate Modes. USB4 mode is however not entered with the Enter Mode Command
* like the Alternate Modes are, but instead with a special Enter_USB Message.
@@ -112,9 +106,11 @@ enum {
* state values, just like the Accessory Modes.
*/
enum {
- TYPEC_MODE_USB2 = TYPEC_MODE_DEBUG, /* USB 2.0 mode */
+ TYPEC_MODE_USB2 = TYPEC_STATE_MODAL, /* USB 2.0 mode */
TYPEC_MODE_USB3, /* USB 3.2 mode */
- TYPEC_MODE_USB4 /* USB4 mode */
+ TYPEC_MODE_USB4, /* USB4 mode */
+ TYPEC_MODE_AUDIO, /* Audio Accessory */
+ TYPEC_MODE_DEBUG, /* Debug Accessory */
};
#define TYPEC_MODAL_STATE(_state_) ((_state_) + TYPEC_STATE_MODAL)
@@ -126,16 +122,9 @@ void typec_altmode_put_plug(struct typec_altmode *plug);
struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
size_t n, u16 svid, u8 mode);
-struct typec_altmode *
-typec_altmode_register_notifier(struct device *dev, u16 svid, u8 mode,
- struct notifier_block *nb);
-
-void typec_altmode_unregister_notifier(struct typec_altmode *adev,
- struct notifier_block *nb);
-
/**
* typec_altmode_get_orientation - Get cable plug orientation
- * altmode: Handle to the alternate mode
+ * @altmode: Handle to the alternate mode
*/
static inline enum typec_orientation
typec_altmode_get_orientation(struct typec_altmode *altmode)
@@ -144,6 +133,16 @@ typec_altmode_get_orientation(struct typec_altmode *altmode)
}
/**
+ * typec_altmode_get_svdm_version - Get negotiated SVDM version
+ * @altmode: Handle to the alternate mode
+ */
+static inline int
+typec_altmode_get_svdm_version(struct typec_altmode *altmode)
+{
+ return typec_get_negotiated_svdm_version(typec_altmode2port(altmode));
+}
+
+/**
* struct typec_altmode_driver - USB Type-C alternate mode device driver
* @id_table: Null terminated array of SVIDs
* @probe: Callback for device binding
@@ -163,10 +162,26 @@ struct typec_altmode_driver {
#define to_altmode_driver(d) container_of(d, struct typec_altmode_driver, \
driver)
+/**
+ * typec_altmode_register_driver - registers a USB Type-C alternate mode
+ * device driver
+ * @drv: pointer to struct typec_altmode_driver
+ *
+ * These drivers will be bind to the partner alternate mode devices. They will
+ * handle all SVID specific communication.
+ */
#define typec_altmode_register_driver(drv) \
__typec_altmode_register_driver(drv, THIS_MODULE)
int __typec_altmode_register_driver(struct typec_altmode_driver *drv,
struct module *module);
+/**
+ * typec_altmode_unregister_driver - unregisters a USB Type-C alternate mode
+ * device driver
+ * @drv: pointer to struct typec_altmode_driver
+ *
+ * These drivers will be bind to the partner alternate mode devices. They will
+ * handle all SVID specific communication.
+ */
void typec_altmode_unregister_driver(struct typec_altmode_driver *drv);
#define module_typec_altmode_driver(__typec_altmode_driver) \
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index fc4c7edb2e8a..8d09c2f0a9b8 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -73,6 +73,11 @@ enum {
#define DP_CAP_USB BIT(7)
#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
#define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16)
+/* Get pin assignment taking plug & receptacle into consideration */
+#define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
+#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
/* DisplayPort Status Update VDO bits */
#define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
@@ -97,7 +102,7 @@ enum {
#define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8
#define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8)
-/* Helper for setting/getting the pin assignement value to the configuration */
+/* Helper for setting/getting the pin assignment value to the configuration */
#define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8)
#define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8)
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index be7292c0be5e..9292f0e07846 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -3,32 +3,44 @@
#ifndef __USB_TYPEC_MUX
#define __USB_TYPEC_MUX
+#include <linux/property.h>
#include <linux/usb/typec.h>
struct device;
struct typec_mux;
+struct typec_mux_dev;
struct typec_switch;
+struct typec_switch_dev;
struct typec_altmode;
struct fwnode_handle;
-typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw,
+typedef int (*typec_switch_set_fn_t)(struct typec_switch_dev *sw,
enum typec_orientation orientation);
struct typec_switch_desc {
struct fwnode_handle *fwnode;
typec_switch_set_fn_t set;
+ const char *name;
void *drvdata;
};
-struct typec_switch *typec_switch_get(struct device *dev);
+struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode);
void typec_switch_put(struct typec_switch *sw);
-struct typec_switch *
+int typec_switch_set(struct typec_switch *sw,
+ enum typec_orientation orientation);
+
+static inline struct typec_switch *typec_switch_get(struct device *dev)
+{
+ return fwnode_typec_switch_get(dev_fwnode(dev));
+}
+
+struct typec_switch_dev *
typec_switch_register(struct device *parent,
const struct typec_switch_desc *desc);
-void typec_switch_unregister(struct typec_switch *sw);
+void typec_switch_unregister(struct typec_switch_dev *sw);
-void typec_switch_set_drvdata(struct typec_switch *sw, void *data);
-void *typec_switch_get_drvdata(struct typec_switch *sw);
+void typec_switch_set_drvdata(struct typec_switch_dev *sw, void *data);
+void *typec_switch_get_drvdata(struct typec_switch_dev *sw);
struct typec_mux_state {
struct typec_altmode *alt;
@@ -36,23 +48,64 @@ struct typec_mux_state {
void *data;
};
-typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux,
+typedef int (*typec_mux_set_fn_t)(struct typec_mux_dev *mux,
struct typec_mux_state *state);
struct typec_mux_desc {
struct fwnode_handle *fwnode;
typec_mux_set_fn_t set;
+ const char *name;
void *drvdata;
};
-struct typec_mux *
-typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc);
+#if IS_ENABLED(CONFIG_TYPEC)
+
+struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
+ const struct typec_altmode_desc *desc);
void typec_mux_put(struct typec_mux *mux);
-struct typec_mux *
+int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state);
+
+struct typec_mux_dev *
typec_mux_register(struct device *parent, const struct typec_mux_desc *desc);
-void typec_mux_unregister(struct typec_mux *mux);
+void typec_mux_unregister(struct typec_mux_dev *mux);
+
+void typec_mux_set_drvdata(struct typec_mux_dev *mux, void *data);
+void *typec_mux_get_drvdata(struct typec_mux_dev *mux);
+
+#else
+
+static inline struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
+ const struct typec_altmode_desc *desc)
+{
+ return NULL;
+}
+
+static inline void typec_mux_put(struct typec_mux *mux) {}
+
+static inline int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+{
+ return 0;
+}
+
+static inline struct typec_mux_dev *
+typec_mux_register(struct device *parent, const struct typec_mux_desc *desc)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline void typec_mux_unregister(struct typec_mux_dev *mux) {}
+
+static inline void typec_mux_set_drvdata(struct typec_mux_dev *mux, void *data) {}
+static inline void *typec_mux_get_drvdata(struct typec_mux_dev *mux)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+#endif /* CONFIG_TYPEC */
-void typec_mux_set_drvdata(struct typec_mux *mux, void *data);
-void *typec_mux_get_drvdata(struct typec_mux *mux);
+static inline struct typec_mux *
+typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc)
+{
+ return fwnode_typec_mux_get(dev_fwnode(dev), desc);
+}
#endif /* __USB_TYPEC_MUX */
diff --git a/include/linux/usb/typec_retimer.h b/include/linux/usb/typec_retimer.h
new file mode 100644
index 000000000000..5e036b3360e2
--- /dev/null
+++ b/include/linux/usb/typec_retimer.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __USB_TYPEC_RETIMER
+#define __USB_TYPEC_RETIMER
+
+#include <linux/property.h>
+#include <linux/usb/typec.h>
+
+struct device;
+struct typec_retimer;
+struct typec_altmode;
+struct fwnode_handle;
+
+struct typec_retimer_state {
+ struct typec_altmode *alt;
+ unsigned long mode;
+ void *data;
+};
+
+typedef int (*typec_retimer_set_fn_t)(struct typec_retimer *retimer,
+ struct typec_retimer_state *state);
+
+struct typec_retimer_desc {
+ struct fwnode_handle *fwnode;
+ typec_retimer_set_fn_t set;
+ const char *name;
+ void *drvdata;
+};
+
+struct typec_retimer *fwnode_typec_retimer_get(struct fwnode_handle *fwnode);
+void typec_retimer_put(struct typec_retimer *retimer);
+int typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state);
+
+static inline struct typec_retimer *typec_retimer_get(struct device *dev)
+{
+ return fwnode_typec_retimer_get(dev_fwnode(dev));
+}
+
+struct typec_retimer *
+typec_retimer_register(struct device *parent, const struct typec_retimer_desc *desc);
+void typec_retimer_unregister(struct typec_retimer *retimer);
+
+void *typec_retimer_get_drvdata(struct typec_retimer *retimer);
+
+#endif /* __USB_TYPEC_RETIMER */
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
new file mode 100644
index 000000000000..63dd44b72e0c
--- /dev/null
+++ b/include/linux/usb/typec_tbt.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __USB_TYPEC_TBT_H
+#define __USB_TYPEC_TBT_H
+
+#include <linux/usb/typec_altmode.h>
+
+#define USB_TYPEC_VENDOR_INTEL 0x8087
+/* Alias for convenience */
+#define USB_TYPEC_TBT_SID USB_TYPEC_VENDOR_INTEL
+
+/* Connector state for Thunderbolt3 */
+#define TYPEC_TBT_MODE TYPEC_STATE_MODAL
+
+/**
+ * struct typec_thunderbolt_data - Thundebolt3 Alt Mode specific data
+ * @device_mode: Device Discover Mode VDO
+ * @cable_mode: Cable Discover Mode VDO
+ * @enter_vdo: Enter Mode VDO
+ */
+struct typec_thunderbolt_data {
+ u32 device_mode;
+ u32 cable_mode;
+ u32 enter_vdo;
+};
+
+/* TBT3 Device Discover Mode VDO bits */
+#define TBT_MODE BIT(0)
+#define TBT_ADAPTER(_vdo_) (((_vdo_) & BIT(16)) >> 16)
+#define TBT_ADAPTER_LEGACY 0
+#define TBT_ADAPTER_TBT3 1
+#define TBT_INTEL_SPECIFIC_B0 BIT(26)
+#define TBT_VENDOR_SPECIFIC_B0 BIT(30)
+#define TBT_VENDOR_SPECIFIC_B1 BIT(31)
+
+#define TBT_SET_ADAPTER(a) (((a) & 1) << 16)
+
+/* TBT3 Cable Discover Mode VDO bits */
+#define TBT_CABLE_SPEED(_vdo_) (((_vdo_) & GENMASK(18, 16)) >> 16)
+#define TBT_CABLE_USB3_GEN1 1
+#define TBT_CABLE_USB3_PASSIVE 2
+#define TBT_CABLE_10_AND_20GBPS 3
+#define TBT_CABLE_ROUNDED_SUPPORT(_vdo_) \
+ (((_vdo_) & GENMASK(20, 19)) >> 19)
+#define TBT_GEN3_NON_ROUNDED 0
+#define TBT_GEN3_GEN4_ROUNDED_NON_ROUNDED 1
+#define TBT_CABLE_OPTICAL BIT(21)
+#define TBT_CABLE_RETIMER BIT(22)
+#define TBT_CABLE_LINK_TRAINING BIT(23)
+
+#define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16)
+#define TBT_SET_CABLE_ROUNDED(_g_) (((_g_) & GENMASK(1, 0)) << 19)
+
+/* TBT3 Device Enter Mode VDO bits */
+#define TBT_ENTER_MODE_CABLE_SPEED(s) TBT_SET_CABLE_SPEED(s)
+#define TBT_ENTER_MODE_ACTIVE_CABLE BIT(24)
+
+#endif /* __USB_TYPEC_TBT_H */
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 36c2982780ad..5050f502c1ed 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -3,10 +3,6 @@
* ulpi.h -- ULPI defines and function prorotypes
*
* Copyright (C) 2010 Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * version 2 of that License.
*/
#ifndef __LINUX_USB_ULPI_H
diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h
index 20020c1336d5..70a7e3cdb3c9 100644
--- a/include/linux/usb/usb338x.h
+++ b/include/linux/usb/usb338x.h
@@ -6,17 +6,6 @@
* Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
* Copyright (C) 2003 David Brownell
* Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_USB_USB338X_H
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index b0bff3083278..9f08a584d707 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -4,25 +4,17 @@
*
* Copyright (C) 2000-2005 by David Brownell <dbrownell@users.sourceforge.net>
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_USBNET_H
#define __LINUX_USB_USBNET_H
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
/* interface from usbnet core to each USB networking link we handle */
struct usbnet {
/* housekeeping */
@@ -53,6 +45,9 @@ struct usbnet {
u32 hard_mtu; /* count any extra framing */
size_t rx_urb_size; /* size for rx urbs */
struct mii_if_info mii;
+ long rx_speed; /* If MII not used */
+ long tx_speed; /* If MII not used */
+# define SPEED_UNSET -1
/* various kinds of pending driver work */
struct sk_buff_head rxq;
@@ -65,8 +60,6 @@ struct usbnet {
struct usb_anchor deferred;
struct tasklet_struct bh;
- struct pcpu_sw_netstats __percpu *stats64;
-
struct work_struct kevent;
unsigned long flags;
# define EVENT_TX_HALT 0
@@ -207,11 +200,13 @@ struct cdc_state {
struct usb_interface *data;
};
+extern void usbnet_cdc_update_filter(struct usbnet *dev);
extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf);
extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_status(struct usbnet *, struct urb *);
+extern int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */
#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
@@ -266,13 +261,16 @@ extern void usbnet_pause_rx(struct usbnet *);
extern void usbnet_resume_rx(struct usbnet *);
extern void usbnet_purge_paused_rxq(struct usbnet *);
-extern int usbnet_get_link_ksettings(struct net_device *net,
+extern int usbnet_get_link_ksettings_mii(struct net_device *net,
struct ethtool_link_ksettings *cmd);
-extern int usbnet_set_link_ksettings(struct net_device *net,
+extern int usbnet_set_link_ksettings_mii(struct net_device *net,
const struct ethtool_link_ksettings *cmd);
+extern int usbnet_get_link_ksettings_internal(struct net_device *net,
+ struct ethtool_link_ksettings *cmd);
extern u32 usbnet_get_link(struct net_device *net);
extern u32 usbnet_get_msglevel(struct net_device *);
extern void usbnet_set_msglevel(struct net_device *, u32);
+extern void usbnet_set_rx_mode(struct net_device *net);
extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
extern int usbnet_nway_reset(struct net_device *net);
@@ -283,7 +281,5 @@ extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
extern void usbnet_status_stop(struct usbnet *dev);
extern void usbnet_update_max_qlen(struct usbnet *dev);
-extern void usbnet_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h
index 0a37f1283bf0..171fd74b1cfc 100644
--- a/include/linux/usb/xhci-dbgp.h
+++ b/include/linux/usb/xhci-dbgp.h
@@ -5,17 +5,13 @@
* Copyright (C) 2016 Intel Corporation
*
* Author: Lu Baolu <baolu.lu@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_XHCI_DBGP_H
#define __LINUX_XHCI_DBGP_H
#ifdef CONFIG_EARLY_PRINTK_USB_XDBC
-int __init early_xdbc_parse_parameter(char *s);
+int __init early_xdbc_parse_parameter(char *s, int keep_early);
int __init early_xdbc_setup_hardware(void);
void __init early_xdbc_register_console(void);
#else
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 000a5954b2e8..712363c7a2e8 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -84,6 +84,10 @@
/* Cannot handle REPORT_LUNS */ \
US_FLAG(ALWAYS_SYNC, 0x20000000) \
/* lies about caching, so always sync */ \
+ US_FLAG(NO_SAME, 0x40000000) \
+ /* Cannot handle WRITE_SAME */ \
+ US_FLAG(SENSE_AFTER_SYNC, 0x80000000) \
+ /* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
@@ -92,6 +96,6 @@ enum { US_DO_ALL_FLAGS };
#include <linux/usb/storage.h>
extern int usb_usual_ignore_device(struct usb_interface *intf);
-extern struct usb_device_id usb_storage_usb_ids[];
+extern const struct usb_device_id usb_storage_usb_ids[];
#endif /* __LINUX_USB_USUAL_H */
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index 79aab0065ec8..14ea197ce37f 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -69,7 +69,7 @@ struct usbdevfs_urb32 {
compat_int_t error_count;
compat_uint_t signr;
compat_caddr_t usercontext; /* unused */
- struct usbdevfs_iso_packet_desc iso_frame_desc[0];
+ struct usbdevfs_iso_packet_desc iso_frame_desc[];
};
struct usbdevfs_ioctl32 {
diff --git a/include/linux/user_events.h b/include/linux/user_events.h
new file mode 100644
index 000000000000..592a3fbed98e
--- /dev/null
+++ b/include/linux/user_events.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2021, Microsoft Corporation.
+ *
+ * Authors:
+ * Beau Belgrave <beaub@linux.microsoft.com>
+ */
+#ifndef _UAPI_LINUX_USER_EVENTS_H
+#define _UAPI_LINUX_USER_EVENTS_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#ifdef __KERNEL__
+#include <linux/uio.h>
+#else
+#include <sys/uio.h>
+#endif
+
+#define USER_EVENTS_SYSTEM "user_events"
+#define USER_EVENTS_PREFIX "u:"
+
+/* Create dynamic location entry within a 32-bit value */
+#define DYN_LOC(offset, size) ((size) << 16 | (offset))
+
+/*
+ * Describes an event registration and stores the results of the registration.
+ * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum
+ * must set the size and name_args before invocation.
+ */
+struct user_reg {
+
+ /* Input: Size of the user_reg structure being used */
+ __u32 size;
+
+ /* Input: Pointer to string with event name, description and flags */
+ __u64 name_args;
+
+ /* Output: Bitwise index of the event within the status page */
+ __u32 status_bit;
+
+ /* Output: Index of the event to use when writing data */
+ __u32 write_index;
+} __attribute__((__packed__));
+
+#define DIAG_IOC_MAGIC '*'
+
+/* Requests to register a user_event */
+#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg*)
+
+/* Requests to delete a user_event */
+#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*)
+
+#endif /* _UAPI_LINUX_USER_EVENTS_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 6ef1c7109fc4..45f09bec02c4 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -50,20 +50,34 @@ enum ucount_type {
UCOUNT_INOTIFY_INSTANCES,
UCOUNT_INOTIFY_WATCHES,
#endif
+#ifdef CONFIG_FANOTIFY
+ UCOUNT_FANOTIFY_GROUPS,
+ UCOUNT_FANOTIFY_MARKS,
+#endif
UCOUNT_COUNTS,
};
+enum rlimit_type {
+ UCOUNT_RLIMIT_NPROC,
+ UCOUNT_RLIMIT_MSGQUEUE,
+ UCOUNT_RLIMIT_SIGPENDING,
+ UCOUNT_RLIMIT_MEMLOCK,
+ UCOUNT_RLIMIT_COUNTS,
+};
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
struct uid_gid_map projid_map;
- atomic_t count;
struct user_namespace *parent;
int level;
kuid_t owner;
kgid_t group;
struct ns_common ns;
unsigned long flags;
+ /* parent_could_setfcap: true if the creator if this ns had CAP_SETFCAP
+ * in its effective capability set at the child ns creation time. */
+ bool parent_could_setfcap;
#ifdef CONFIG_KEYS
/* List of joinable keyrings in this namespace. Modification access of
@@ -86,30 +100,58 @@ struct user_namespace {
struct ctl_table_header *sysctls;
#endif
struct ucounts *ucounts;
- int ucount_max[UCOUNT_COUNTS];
+ long ucount_max[UCOUNT_COUNTS];
+ long rlimit_max[UCOUNT_RLIMIT_COUNTS];
} __randomize_layout;
struct ucounts {
struct hlist_node node;
struct user_namespace *ns;
kuid_t uid;
- int count;
- atomic_t ucount[UCOUNT_COUNTS];
+ atomic_t count;
+ atomic_long_t ucount[UCOUNT_COUNTS];
+ atomic_long_t rlimit[UCOUNT_RLIMIT_COUNTS];
};
extern struct user_namespace init_user_ns;
+extern struct ucounts init_ucounts;
bool setup_userns_sysctls(struct user_namespace *ns);
void retire_userns_sysctls(struct user_namespace *ns);
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
+struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
+struct ucounts * __must_check get_ucounts(struct ucounts *ucounts);
+void put_ucounts(struct ucounts *ucounts);
+
+static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type type)
+{
+ return atomic_long_read(&ucounts->rlimit[type]);
+}
+
+long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
+bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long max);
+
+static inline long get_userns_rlimit_max(struct user_namespace *ns, enum rlimit_type type)
+{
+ return READ_ONCE(ns->rlimit_max[type]);
+}
+
+static inline void set_userns_rlimit_max(struct user_namespace *ns,
+ enum rlimit_type type, unsigned long max)
+{
+ ns->rlimit_max[type] = max <= LONG_MAX ? max : LONG_MAX;
+}
#ifdef CONFIG_USER_NS
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
return ns;
}
@@ -119,7 +161,7 @@ extern void __put_user_ns(struct user_namespace *ns);
static inline void put_user_ns(struct user_namespace *ns)
{
- if (ns && atomic_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->ns.count))
__put_user_ns(ns);
}
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index ac9d71e24b81..9df0b9a762cc 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -14,6 +14,14 @@
#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <asm-generic/pgtable_uffd.h>
+#include <linux/hugetlb_inline.h>
+
+/* The set of all possible UFFD-related VM flags. */
+#define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -32,13 +40,41 @@ extern int sysctl_unprivileged_userfaultfd;
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
+/*
+ * The mode of operation for __mcopy_atomic and its helpers.
+ *
+ * This is almost an implementation detail (mcopy_atomic below doesn't take this
+ * as a parameter), but it's exposed here because memory-kind-specific
+ * implementations (e.g. hugetlbfs) need to know the mode of operation.
+ */
+enum mcopy_atomic_mode {
+ /* A normal copy_from_user into the destination range. */
+ MCOPY_ATOMIC_NORMAL,
+ /* Don't copy; map the destination range to the zero page. */
+ MCOPY_ATOMIC_ZEROPAGE,
+ /* Just install pte(s) with the existing page(s) in the page cache. */
+ MCOPY_ATOMIC_CONTINUE,
+};
+
+extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, struct page *page,
+ bool newly_allocated, bool wp_copy);
+
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len,
- bool *mmap_changing);
+ atomic_t *mmap_changing, __u64 mode);
extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long len,
- bool *mmap_changing);
+ atomic_t *mmap_changing);
+extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
+ unsigned long len, atomic_t *mmap_changing);
+extern int mwriteprotect_range(struct mm_struct *dst_mm,
+ unsigned long start, unsigned long len,
+ bool enable_wp, atomic_t *mmap_changing);
+extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
+ unsigned long start, unsigned long len, bool enable_wp);
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
@@ -47,14 +83,83 @@ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
}
+/*
+ * Never enable huge pmd sharing on some uffd registered vmas:
+ *
+ * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry.
+ *
+ * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for
+ * VMAs which share huge pmds. (If you have two mappings to the same
+ * underlying pages, and fault in the non-UFFD-registered one with a write,
+ * with huge pmd sharing this would *also* setup the second UFFD-registered
+ * mapping, and we'd not get minor faults.)
+ */
+static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
+}
+
+/*
+ * Don't do fault around for either WP or MINOR registered uffd range. For
+ * MINOR registered range, fault around will be a total disaster and ptes can
+ * be installed without notifications; for WP it should mostly be fine as long
+ * as the fault around checks for pte_none() before the installation, however
+ * to be super safe we just forbid it.
+ */
+static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
+}
+
static inline bool userfaultfd_missing(struct vm_area_struct *vma)
{
return vma->vm_flags & VM_UFFD_MISSING;
}
+static inline bool userfaultfd_wp(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_UFFD_WP;
+}
+
+static inline bool userfaultfd_minor(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_UFFD_MINOR;
+}
+
+static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
+ pte_t pte)
+{
+ return userfaultfd_wp(vma) && pte_uffd_wp(pte);
+}
+
+static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
+ pmd_t pmd)
+{
+ return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
+}
+
static inline bool userfaultfd_armed(struct vm_area_struct *vma)
{
- return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
+ return vma->vm_flags & __VM_UFFD_FLAGS;
+}
+
+static inline bool vma_can_userfault(struct vm_area_struct *vma,
+ unsigned long vm_flags)
+{
+ if ((vm_flags & VM_UFFD_MINOR) &&
+ (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
+ return false;
+#ifndef CONFIG_PTE_MARKER_UFFD_WP
+ /*
+ * If user requested uffd-wp but not enabled pte markers for
+ * uffd-wp, then shmem & hugetlbfs are not supported but only
+ * anonymous.
+ */
+ if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
+ return false;
+#endif
+ return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
+ vma_is_shmem(vma);
}
extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
@@ -70,9 +175,8 @@ extern bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
-extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf);
+extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf);
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
struct list_head *uf);
@@ -96,6 +200,29 @@ static inline bool userfaultfd_missing(struct vm_area_struct *vma)
return false;
}
+static inline bool userfaultfd_wp(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool userfaultfd_minor(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
+ pte_t pte)
+{
+ return false;
+}
+
+static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
+ pmd_t pmd)
+{
+ return false;
+}
+
+
static inline bool userfaultfd_armed(struct vm_area_struct *vma)
{
return false;
@@ -130,7 +257,7 @@ static inline bool userfaultfd_remove(struct vm_area_struct *vma,
return true;
}
-static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+static inline int userfaultfd_unmap_prep(struct mm_struct *mm,
unsigned long start, unsigned long end,
struct list_head *uf)
{
@@ -142,6 +269,56 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
{
}
+static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
+{
+ return false;
+}
+
#endif /* CONFIG_USERFAULTFD */
+static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
+{
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
+ return is_pte_marker_entry(entry) &&
+ (pte_marker_get(entry) & PTE_MARKER_UFFD_WP);
+#else
+ return false;
+#endif
+}
+
+static inline bool pte_marker_uffd_wp(pte_t pte)
+{
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
+ swp_entry_t entry;
+
+ if (!is_swap_pte(pte))
+ return false;
+
+ entry = pte_to_swp_entry(pte);
+
+ return pte_marker_entry_uffd_wp(entry);
+#else
+ return false;
+#endif
+}
+
+/*
+ * Returns true if this is a swap pte and was uffd-wp wr-protected in either
+ * forms (pte marker or a normal swap pte), false otherwise.
+ */
+static inline bool pte_swp_uffd_wp_any(pte_t pte)
+{
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
+ if (!is_swap_pte(pte))
+ return false;
+
+ if (pte_swp_uffd_wp(pte))
+ return true;
+
+ if (pte_marker_uffd_wp(pte))
+ return true;
+#endif
+ return false;
+}
+
#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h
new file mode 100644
index 000000000000..ad970416260d
--- /dev/null
+++ b/include/linux/usermode_driver.h
@@ -0,0 +1,19 @@
+#ifndef __LINUX_USERMODE_DRIVER_H__
+#define __LINUX_USERMODE_DRIVER_H__
+
+#include <linux/umh.h>
+#include <linux/path.h>
+
+struct umd_info {
+ const char *driver_name;
+ struct file *pipe_to_umh;
+ struct file *pipe_from_umh;
+ struct path wd;
+ struct pid *tgid;
+};
+int umd_load_blob(struct umd_info *info, const void *data, size_t len);
+int umd_unload_blob(struct umd_info *info);
+int fork_usermode_driver(struct umd_info *info);
+void umd_cleanup_helper(struct umd_info *info);
+
+#endif /* __LINUX_USERMODE_DRIVER_H__ */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 44429d9142ca..bf7613ba412b 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -4,13 +4,13 @@
#include <linux/sched.h>
-#include <linux/kref.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
#include <uapi/linux/utsname.h>
enum uts_proc {
+ UTS_PROC_ARCH,
UTS_PROC_OSTYPE,
UTS_PROC_OSRELEASE,
UTS_PROC_VERSION,
@@ -22,7 +22,6 @@ struct user_namespace;
extern struct user_namespace init_user_ns;
struct uts_namespace {
- struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -33,16 +32,17 @@ extern struct uts_namespace init_uts_ns;
#ifdef CONFIG_UTS_NS
static inline void get_uts_ns(struct uts_namespace *ns)
{
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
}
extern struct uts_namespace *copy_utsname(unsigned long flags,
struct user_namespace *user_ns, struct uts_namespace *old_ns);
-extern void free_uts_ns(struct kref *kref);
+extern void free_uts_ns(struct uts_namespace *ns);
static inline void put_uts_ns(struct uts_namespace *ns)
{
- kref_put(&ns->kref, free_uts_ns);
+ if (refcount_dec_and_test(&ns->ns.count))
+ free_uts_ns(ns);
}
void uts_ns_init(void);
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 0c631e2a73b6..8cdc0d3567cd 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -43,6 +43,16 @@ static inline void guid_copy(guid_t *dst, const guid_t *src)
memcpy(dst, src, sizeof(guid_t));
}
+static inline void import_guid(guid_t *dst, const __u8 *src)
+{
+ memcpy(dst, src, sizeof(guid_t));
+}
+
+static inline void export_guid(__u8 *dst, const guid_t *src)
+{
+ memcpy(dst, src, sizeof(guid_t));
+}
+
static inline bool guid_is_null(const guid_t *guid)
{
return guid_equal(guid, &guid_null);
@@ -58,12 +68,23 @@ static inline void uuid_copy(uuid_t *dst, const uuid_t *src)
memcpy(dst, src, sizeof(uuid_t));
}
+static inline void import_uuid(uuid_t *dst, const __u8 *src)
+{
+ memcpy(dst, src, sizeof(uuid_t));
+}
+
+static inline void export_uuid(__u8 *dst, const uuid_t *src)
+{
+ memcpy(dst, src, sizeof(uuid_t));
+}
+
static inline bool uuid_is_null(const uuid_t *uuid)
{
return uuid_equal(uuid, &uuid_null);
}
void generate_random_uuid(unsigned char uuid[16]);
+void generate_random_guid(unsigned char guid[16]);
extern void guid_gen(guid_t *u);
extern void uuid_gen(uuid_t *u);
@@ -77,9 +98,6 @@ int guid_parse(const char *uuid, guid_t *u);
int uuid_parse(const char *uuid, uuid_t *u);
/* backwards compatibility, don't use in new code */
-#define uuid_le_gen(u) guid_gen(u)
-#define uuid_le_to_bin(guid, u) guid_parse(guid, u)
-
static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
{
return memcmp(&u1, &u2, sizeof(guid_t));
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index ff56c443180c..db8a7d118093 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -16,6 +16,7 @@ struct vbg_dev;
__printf(1, 2) void vbg_info(const char *fmt, ...);
__printf(1, 2) void vbg_warn(const char *fmt, ...);
__printf(1, 2) void vbg_err(const char *fmt, ...);
+__printf(1, 2) void vbg_err_ratelimited(const char *fmt, ...);
/* Only use backdoor logging for non-dynamic debug builds */
#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
new file mode 100644
index 000000000000..6d0f5e4e82c2
--- /dev/null
+++ b/include/linux/vdpa.h
@@ -0,0 +1,513 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VDPA_H
+#define _LINUX_VDPA_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/vhost_iotlb.h>
+#include <linux/virtio_net.h>
+#include <linux/if_ether.h>
+
+/**
+ * struct vdpa_calllback - vDPA callback definition.
+ * @callback: interrupt callback function
+ * @private: the data passed to the callback function
+ */
+struct vdpa_callback {
+ irqreturn_t (*callback)(void *data);
+ void *private;
+};
+
+/**
+ * struct vdpa_notification_area - vDPA notification area
+ * @addr: base address of the notification area
+ * @size: size of the notification area
+ */
+struct vdpa_notification_area {
+ resource_size_t addr;
+ resource_size_t size;
+};
+
+/**
+ * struct vdpa_vq_state_split - vDPA split virtqueue state
+ * @avail_index: available index
+ */
+struct vdpa_vq_state_split {
+ u16 avail_index;
+};
+
+/**
+ * struct vdpa_vq_state_packed - vDPA packed virtqueue state
+ * @last_avail_counter: last driver ring wrap counter observed by device
+ * @last_avail_idx: device available index
+ * @last_used_counter: device ring wrap counter
+ * @last_used_idx: used index
+ */
+struct vdpa_vq_state_packed {
+ u16 last_avail_counter:1;
+ u16 last_avail_idx:15;
+ u16 last_used_counter:1;
+ u16 last_used_idx:15;
+};
+
+struct vdpa_vq_state {
+ union {
+ struct vdpa_vq_state_split split;
+ struct vdpa_vq_state_packed packed;
+ };
+};
+
+struct vdpa_mgmt_dev;
+
+/**
+ * struct vdpa_device - representation of a vDPA device
+ * @dev: underlying device
+ * @dma_dev: the actual device that is performing DMA
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
+ * @config: the configuration ops for this device.
+ * @cf_lock: Protects get and set access to configuration layout.
+ * @index: device index
+ * @features_valid: were features initialized? for legacy guests
+ * @ngroups: the number of virtqueue groups
+ * @nas: the number of address spaces
+ * @use_va: indicate whether virtual address must be used by this device
+ * @nvqs: maximum number of supported virtqueues
+ * @mdev: management device pointer; caller must setup when registering device as part
+ * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
+ */
+struct vdpa_device {
+ struct device dev;
+ struct device *dma_dev;
+ const char *driver_override;
+ const struct vdpa_config_ops *config;
+ struct rw_semaphore cf_lock; /* Protects get/set config */
+ unsigned int index;
+ bool features_valid;
+ bool use_va;
+ u32 nvqs;
+ struct vdpa_mgmt_dev *mdev;
+ unsigned int ngroups;
+ unsigned int nas;
+};
+
+/**
+ * struct vdpa_iova_range - the IOVA range support by the device
+ * @first: start of the IOVA range
+ * @last: end of the IOVA range
+ */
+struct vdpa_iova_range {
+ u64 first;
+ u64 last;
+};
+
+struct vdpa_dev_set_config {
+ u64 device_features;
+ struct {
+ u8 mac[ETH_ALEN];
+ u16 mtu;
+ u16 max_vq_pairs;
+ } net;
+ u64 mask;
+};
+
+/**
+ * Corresponding file area for device memory mapping
+ * @file: vma->vm_file for the mapping
+ * @offset: mapping offset in the vm_file
+ */
+struct vdpa_map_file {
+ struct file *file;
+ u64 offset;
+};
+
+/**
+ * struct vdpa_config_ops - operations for configuring a vDPA device.
+ * Note: vDPA device drivers are required to implement all of the
+ * operations unless it is mentioned to be optional in the following
+ * list.
+ *
+ * @set_vq_address: Set the address of virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * @desc_area: address of desc area
+ * @driver_area: address of driver area
+ * @device_area: address of device area
+ * Returns integer: success (0) or error (< 0)
+ * @set_vq_num: Set the size of virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * @num: the size of virtqueue
+ * @kick_vq: Kick the virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * @set_vq_cb: Set the interrupt callback function for
+ * a virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * @cb: virtio-vdev interrupt callback structure
+ * @set_vq_ready: Set ready status for a virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * @ready: ready (true) not ready(false)
+ * @get_vq_ready: Get ready status for a virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns boolean: ready (true) or not (false)
+ * @set_vq_state: Set the state for a virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * @state: pointer to set virtqueue state (last_avail_idx)
+ * Returns integer: success (0) or error (< 0)
+ * @get_vq_state: Get the state for a virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * @state: pointer to returned state (last_avail_idx)
+ * @get_vq_notification: Get the notification area for a virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns the notifcation area
+ * @get_vq_irq: Get the irq number of a virtqueue (optional,
+ * but must implemented if require vq irq offloading)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns int: irq number of a virtqueue,
+ * negative number if no irq assigned.
+ * @get_vq_align: Get the virtqueue align requirement
+ * for the device
+ * @vdev: vdpa device
+ * Returns virtqueue algin requirement
+ * @get_vq_group: Get the group id for a specific
+ * virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns u32: group id for this virtqueue
+ * @get_device_features: Get virtio features supported by the device
+ * @vdev: vdpa device
+ * Returns the virtio features support by the
+ * device
+ * @set_driver_features: Set virtio features supported by the driver
+ * @vdev: vdpa device
+ * @features: feature support by the driver
+ * Returns integer: success (0) or error (< 0)
+ * @get_driver_features: Get the virtio driver features in action
+ * @vdev: vdpa device
+ * Returns the virtio features accepted
+ * @set_config_cb: Set the config interrupt callback
+ * @vdev: vdpa device
+ * @cb: virtio-vdev interrupt callback structure
+ * @get_vq_num_max: Get the max size of virtqueue
+ * @vdev: vdpa device
+ * Returns u16: max size of virtqueue
+ * @get_vq_num_min: Get the min size of virtqueue (optional)
+ * @vdev: vdpa device
+ * Returns u16: min size of virtqueue
+ * @get_device_id: Get virtio device id
+ * @vdev: vdpa device
+ * Returns u32: virtio device id
+ * @get_vendor_id: Get id for the vendor that provides this device
+ * @vdev: vdpa device
+ * Returns u32: virtio vendor id
+ * @get_status: Get the device status
+ * @vdev: vdpa device
+ * Returns u8: virtio device status
+ * @set_status: Set the device status
+ * @vdev: vdpa device
+ * @status: virtio device status
+ * @reset: Reset device
+ * @vdev: vdpa device
+ * Returns integer: success (0) or error (< 0)
+ * @suspend: Suspend or resume the device (optional)
+ * @vdev: vdpa device
+ * Returns integer: success (0) or error (< 0)
+ * @get_config_size: Get the size of the configuration space includes
+ * fields that are conditional on feature bits.
+ * @vdev: vdpa device
+ * Returns size_t: configuration size
+ * @get_config: Read from device specific configuration space
+ * @vdev: vdpa device
+ * @offset: offset from the beginning of
+ * configuration space
+ * @buf: buffer used to read to
+ * @len: the length to read from
+ * configuration space
+ * @set_config: Write to device specific configuration space
+ * @vdev: vdpa device
+ * @offset: offset from the beginning of
+ * configuration space
+ * @buf: buffer used to write from
+ * @len: the length to write to
+ * configuration space
+ * @get_generation: Get device config generation (optional)
+ * @vdev: vdpa device
+ * Returns u32: device generation
+ * @get_iova_range: Get supported iova range (optional)
+ * @vdev: vdpa device
+ * Returns the iova range supported by
+ * the device.
+ * @set_group_asid: Set address space identifier for a
+ * virtqueue group (optional)
+ * @vdev: vdpa device
+ * @group: virtqueue group
+ * @asid: address space id for this group
+ * Returns integer: success (0) or error (< 0)
+ * @set_map: Set device memory mapping (optional)
+ * Needed for device that using device
+ * specific DMA translation (on-chip IOMMU)
+ * @vdev: vdpa device
+ * @asid: address space identifier
+ * @iotlb: vhost memory mapping to be
+ * used by the vDPA
+ * Returns integer: success (0) or error (< 0)
+ * @dma_map: Map an area of PA to IOVA (optional)
+ * Needed for device that using device
+ * specific DMA translation (on-chip IOMMU)
+ * and preferring incremental map.
+ * @vdev: vdpa device
+ * @asid: address space identifier
+ * @iova: iova to be mapped
+ * @size: size of the area
+ * @pa: physical address for the map
+ * @perm: device access permission (VHOST_MAP_XX)
+ * Returns integer: success (0) or error (< 0)
+ * @dma_unmap: Unmap an area of IOVA (optional but
+ * must be implemented with dma_map)
+ * Needed for device that using device
+ * specific DMA translation (on-chip IOMMU)
+ * and preferring incremental unmap.
+ * @vdev: vdpa device
+ * @asid: address space identifier
+ * @iova: iova to be unmapped
+ * @size: size of the area
+ * Returns integer: success (0) or error (< 0)
+ * @free: Free resources that belongs to vDPA (optional)
+ * @vdev: vdpa device
+ */
+struct vdpa_config_ops {
+ /* Virtqueue ops */
+ int (*set_vq_address)(struct vdpa_device *vdev,
+ u16 idx, u64 desc_area, u64 driver_area,
+ u64 device_area);
+ void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
+ void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
+ void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
+ struct vdpa_callback *cb);
+ void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
+ bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
+ int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
+ const struct vdpa_vq_state *state);
+ int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
+ struct vdpa_vq_state *state);
+ int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack);
+ struct vdpa_notification_area
+ (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
+ /* vq irq is not expected to be changed once DRIVER_OK is set */
+ int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx);
+
+ /* Device ops */
+ u32 (*get_vq_align)(struct vdpa_device *vdev);
+ u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
+ u64 (*get_device_features)(struct vdpa_device *vdev);
+ int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
+ u64 (*get_driver_features)(struct vdpa_device *vdev);
+ void (*set_config_cb)(struct vdpa_device *vdev,
+ struct vdpa_callback *cb);
+ u16 (*get_vq_num_max)(struct vdpa_device *vdev);
+ u16 (*get_vq_num_min)(struct vdpa_device *vdev);
+ u32 (*get_device_id)(struct vdpa_device *vdev);
+ u32 (*get_vendor_id)(struct vdpa_device *vdev);
+ u8 (*get_status)(struct vdpa_device *vdev);
+ void (*set_status)(struct vdpa_device *vdev, u8 status);
+ int (*reset)(struct vdpa_device *vdev);
+ int (*suspend)(struct vdpa_device *vdev);
+ size_t (*get_config_size)(struct vdpa_device *vdev);
+ void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
+ void *buf, unsigned int len);
+ void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len);
+ u32 (*get_generation)(struct vdpa_device *vdev);
+ struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
+
+ /* DMA ops */
+ int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
+ struct vhost_iotlb *iotlb);
+ int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
+ u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
+ int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
+ u64 iova, u64 size);
+ int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
+ unsigned int asid);
+
+ /* Free device resources */
+ void (*free)(struct vdpa_device *vdev);
+};
+
+struct vdpa_device *__vdpa_alloc_device(struct device *parent,
+ const struct vdpa_config_ops *config,
+ unsigned int ngroups, unsigned int nas,
+ size_t size, const char *name,
+ bool use_va);
+
+/**
+ * vdpa_alloc_device - allocate and initilaize a vDPA device
+ *
+ * @dev_struct: the type of the parent structure
+ * @member: the name of struct vdpa_device within the @dev_struct
+ * @parent: the parent device
+ * @config: the bus operations that is supported by this device
+ * @ngroups: the number of virtqueue groups supported by this device
+ * @nas: the number of address spaces
+ * @name: name of the vdpa device
+ * @use_va: indicate whether virtual address must be used by this device
+ *
+ * Return allocated data structure or ERR_PTR upon error
+ */
+#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
+ name, use_va) \
+ container_of((__vdpa_alloc_device( \
+ parent, config, ngroups, nas, \
+ (sizeof(dev_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof( \
+ dev_struct, member))), name, use_va)), \
+ dev_struct, member)
+
+int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
+void vdpa_unregister_device(struct vdpa_device *vdev);
+
+int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
+void _vdpa_unregister_device(struct vdpa_device *vdev);
+
+/**
+ * struct vdpa_driver - operations for a vDPA driver
+ * @driver: underlying device driver
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct vdpa_driver {
+ struct device_driver driver;
+ int (*probe)(struct vdpa_device *vdev);
+ void (*remove)(struct vdpa_device *vdev);
+};
+
+#define vdpa_register_driver(drv) \
+ __vdpa_register_driver(drv, THIS_MODULE)
+int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner);
+void vdpa_unregister_driver(struct vdpa_driver *drv);
+
+#define module_vdpa_driver(__vdpa_driver) \
+ module_driver(__vdpa_driver, vdpa_register_driver, \
+ vdpa_unregister_driver)
+
+static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver)
+{
+ return container_of(driver, struct vdpa_driver, driver);
+}
+
+static inline struct vdpa_device *dev_to_vdpa(struct device *_dev)
+{
+ return container_of(_dev, struct vdpa_device, dev);
+}
+
+static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev)
+{
+ return dev_get_drvdata(&vdev->dev);
+}
+
+static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
+{
+ dev_set_drvdata(&vdev->dev, data);
+}
+
+static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
+{
+ return vdev->dma_dev;
+}
+
+static inline int vdpa_reset(struct vdpa_device *vdev)
+{
+ const struct vdpa_config_ops *ops = vdev->config;
+ int ret;
+
+ down_write(&vdev->cf_lock);
+ vdev->features_valid = false;
+ ret = ops->reset(vdev);
+ up_write(&vdev->cf_lock);
+ return ret;
+}
+
+static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
+{
+ const struct vdpa_config_ops *ops = vdev->config;
+ int ret;
+
+ vdev->features_valid = true;
+ ret = ops->set_driver_features(vdev, features);
+
+ return ret;
+}
+
+static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
+{
+ int ret;
+
+ down_write(&vdev->cf_lock);
+ ret = vdpa_set_features_unlocked(vdev, features);
+ up_write(&vdev->cf_lock);
+
+ return ret;
+}
+
+void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
+ void *buf, unsigned int len);
+void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
+ const void *buf, unsigned int length);
+void vdpa_set_status(struct vdpa_device *vdev, u8 status);
+
+/**
+ * struct vdpa_mgmtdev_ops - vdpa device ops
+ * @dev_add: Add a vdpa device using alloc and register
+ * @mdev: parent device to use for device addition
+ * @name: name of the new vdpa device
+ * @config: config attributes to apply to the device under creation
+ * Driver need to add a new device using _vdpa_register_device()
+ * after fully initializing the vdpa device. Driver must return 0
+ * on success or appropriate error code.
+ * @dev_del: Remove a vdpa device using unregister
+ * @mdev: parent device to use for device removal
+ * @dev: vdpa device to remove
+ * Driver need to remove the specified device by calling
+ * _vdpa_unregister_device().
+ */
+struct vdpa_mgmtdev_ops {
+ int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
+ const struct vdpa_dev_set_config *config);
+ void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
+};
+
+/**
+ * struct vdpa_mgmt_dev - vdpa management device
+ * @device: Management parent device
+ * @ops: operations supported by management device
+ * @id_table: Pointer to device id table of supported ids
+ * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
+ * management device support during dev_add callback
+ * @list: list entry
+ */
+struct vdpa_mgmt_dev {
+ struct device *device;
+ const struct vdpa_mgmtdev_ops *ops;
+ struct virtio_device_id *id_table;
+ u64 config_attr_mask;
+ struct list_head list;
+ u64 supported_features;
+ u32 max_supported_vqs;
+};
+
+int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
+void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
+
+#endif /* _LINUX_VDPA_H */
diff --git a/include/linux/verification.h b/include/linux/verification.h
index 911ab7c2b1ab..f34e50ebcf60 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -8,6 +8,8 @@
#ifndef _LINUX_VERIFICATION_H
#define _LINUX_VERIFICATION_H
+#include <linux/types.h>
+
/*
* Indicate that both builtin trusted keys and secondary trusted keys
* should be used.
@@ -15,6 +17,14 @@
#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
#define VERIFY_USE_PLATFORM_KEYRING ((struct key *)2UL)
+static inline int system_keyring_id_check(u64 id)
+{
+ if (id > (unsigned long)VERIFY_USE_PLATFORM_KEYRING)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* The use to which an asymmetric key is being put.
*/
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 9aced11e9000..a54046bf37e5 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -1,5 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VERMAGIC_H
+#define _LINUX_VERMAGIC_H
+
+#ifndef INCLUDE_VERMAGIC
+#error "This header can be included from kernel/module.c or *.mod.c only"
+#endif
+
#include <generated/utsrelease.h>
+#include <asm/vermagic.h>
/* Simply sanity version stamp for modules. */
#ifdef CONFIG_SMP
@@ -7,7 +15,7 @@
#else
#define MODULE_VERMAGIC_SMP ""
#endif
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_BUILD
#define MODULE_VERMAGIC_PREEMPT "preempt "
#elif defined(CONFIG_PREEMPT_RT)
#define MODULE_VERMAGIC_PREEMPT "preempt_rt "
@@ -24,14 +32,11 @@
#else
#define MODULE_VERMAGIC_MODVERSIONS ""
#endif
-#ifndef MODULE_ARCH_VERMAGIC
-#define MODULE_ARCH_VERMAGIC ""
-#endif
-#ifdef RANDSTRUCT_PLUGIN
-#include <generated/randomize_layout_hash.h>
-#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
+#ifdef RANDSTRUCT
+#include <generated/randstruct_hash.h>
+#define MODULE_RANDSTRUCT "RANDSTRUCT_" RANDSTRUCT_HASHED_SEED
#else
-#define MODULE_RANDSTRUCT_PLUGIN
+#define MODULE_RANDSTRUCT
#endif
#define VERMAGIC_STRING \
@@ -39,5 +44,6 @@
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
MODULE_ARCH_VERMAGIC \
- MODULE_RANDSTRUCT_PLUGIN
+ MODULE_RANDSTRUCT
+#endif /* _LINUX_VERMAGIC_H */
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index 0e130b5077a5..2f9dd072f11f 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -10,38 +10,8 @@
#include <linux/device.h>
#include <linux/regmap.h>
-#define VEXPRESS_SITE_MB 0
-#define VEXPRESS_SITE_DB1 1
-#define VEXPRESS_SITE_DB2 2
-#define VEXPRESS_SITE_MASTER 0xf
-
-/* Config infrastructure */
-
-void vexpress_config_set_master(u32 site);
-u32 vexpress_config_get_master(void);
-
-void vexpress_config_lock(void *arg);
-void vexpress_config_unlock(void *arg);
-
-int vexpress_config_get_topo(struct device_node *node, u32 *site,
- u32 *position, u32 *dcc);
-
-/* Config bridge API */
-
-struct vexpress_config_bridge_ops {
- struct regmap * (*regmap_init)(struct device *dev, void *context);
- void (*regmap_exit)(struct regmap *regmap, void *context);
-};
-
-struct device *vexpress_config_bridge_register(struct device *parent,
- struct vexpress_config_bridge_ops *ops, void *context);
-
/* Config regmap API */
struct regmap *devm_regmap_init_vexpress_config(struct device *dev);
-/* Platform control */
-
-void vexpress_flags_set(u32 data);
-
#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index e42a711a2800..e7cebeb875dd 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -14,121 +14,203 @@
#include <linux/workqueue.h>
#include <linux/poll.h>
#include <uapi/linux/vfio.h>
+#include <linux/iova_bitmap.h>
+
+struct kvm;
+
+/*
+ * VFIO devices can be placed in a set, this allows all devices to share this
+ * structure and the VFIO core will provide a lock that is held around
+ * open_device()/close_device() for all devices in the set.
+ */
+struct vfio_device_set {
+ void *set_id;
+ struct mutex lock;
+ struct list_head device_list;
+ unsigned int device_count;
+};
+
+struct vfio_device {
+ struct device *dev;
+ const struct vfio_device_ops *ops;
+ /*
+ * mig_ops/log_ops is a static property of the vfio_device which must
+ * be set prior to registering the vfio_device.
+ */
+ const struct vfio_migration_ops *mig_ops;
+ const struct vfio_log_ops *log_ops;
+ struct vfio_group *group;
+ struct vfio_device_set *dev_set;
+ struct list_head dev_set_list;
+ unsigned int migration_flags;
+ /* Driver must reference the kvm during open_device or never touch it */
+ struct kvm *kvm;
+
+ /* Members below here are private, not for driver use */
+ unsigned int index;
+ struct device device; /* device.kref covers object life circle */
+ refcount_t refcount; /* user count on registered device*/
+ unsigned int open_count;
+ struct completion comp;
+ struct list_head group_next;
+ struct list_head iommu_entry;
+};
/**
* struct vfio_device_ops - VFIO bus driver device callbacks
*
- * @open: Called when userspace creates new file descriptor for device
- * @release: Called when userspace releases file descriptor for device
+ * @init: initialize private fields in device structure
+ * @release: Reclaim private fields in device structure
+ * @open_device: Called when the first file descriptor is opened for this device
+ * @close_device: Opposite of open_device
* @read: Perform read(2) on device file descriptor
* @write: Perform write(2) on device file descriptor
* @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
* operations documented below
* @mmap: Perform mmap(2) on a region of the device file descriptor
* @request: Request for the bus driver to release the device
+ * @match: Optional device name match callback (return: 0 for no-match, >0 for
+ * match, -errno for abort (ex. match with insufficient or incorrect
+ * additional args)
+ * @dma_unmap: Called when userspace unmaps IOVA from the container
+ * this device is attached to.
+ * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
*/
struct vfio_device_ops {
char *name;
- int (*open)(void *device_data);
- void (*release)(void *device_data);
- ssize_t (*read)(void *device_data, char __user *buf,
+ int (*init)(struct vfio_device *vdev);
+ void (*release)(struct vfio_device *vdev);
+ int (*open_device)(struct vfio_device *vdev);
+ void (*close_device)(struct vfio_device *vdev);
+ ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
size_t count, loff_t *ppos);
- ssize_t (*write)(void *device_data, const char __user *buf,
+ ssize_t (*write)(struct vfio_device *vdev, const char __user *buf,
size_t count, loff_t *size);
- long (*ioctl)(void *device_data, unsigned int cmd,
+ long (*ioctl)(struct vfio_device *vdev, unsigned int cmd,
unsigned long arg);
- int (*mmap)(void *device_data, struct vm_area_struct *vma);
- void (*request)(void *device_data, unsigned int count);
+ int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
+ void (*request)(struct vfio_device *vdev, unsigned int count);
+ int (*match)(struct vfio_device *vdev, char *buf);
+ void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
+ int (*device_feature)(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz);
};
-extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
-extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
-
-extern int vfio_add_group_dev(struct device *dev,
- const struct vfio_device_ops *ops,
- void *device_data);
-
-extern void *vfio_del_group_dev(struct device *dev);
-extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
-extern void vfio_device_put(struct vfio_device *device);
-extern void *vfio_device_data(struct vfio_device *device);
-
/**
- * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
+ * @migration_set_state: Optional callback to change the migration state for
+ * devices that support migration. It's mandatory for
+ * VFIO_DEVICE_FEATURE_MIGRATION migration support.
+ * The returned FD is used for data transfer according to the FSM
+ * definition. The driver is responsible to ensure that FD reaches end
+ * of stream or error whenever the migration FSM leaves a data transfer
+ * state or before close_device() returns.
+ * @migration_get_state: Optional callback to get the migration state for
+ * devices that support migration. It's mandatory for
+ * VFIO_DEVICE_FEATURE_MIGRATION migration support.
*/
-struct vfio_iommu_driver_ops {
- char *name;
- struct module *owner;
- void *(*open)(unsigned long arg);
- void (*release)(void *iommu_data);
- ssize_t (*read)(void *iommu_data, char __user *buf,
- size_t count, loff_t *ppos);
- ssize_t (*write)(void *iommu_data, const char __user *buf,
- size_t count, loff_t *size);
- long (*ioctl)(void *iommu_data, unsigned int cmd,
- unsigned long arg);
- int (*mmap)(void *iommu_data, struct vm_area_struct *vma);
- int (*attach_group)(void *iommu_data,
- struct iommu_group *group);
- void (*detach_group)(void *iommu_data,
- struct iommu_group *group);
- int (*pin_pages)(void *iommu_data, unsigned long *user_pfn,
- int npage, int prot,
- unsigned long *phys_pfn);
- int (*unpin_pages)(void *iommu_data,
- unsigned long *user_pfn, int npage);
- int (*register_notifier)(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb);
- int (*unregister_notifier)(void *iommu_data,
- struct notifier_block *nb);
+struct vfio_migration_ops {
+ struct file *(*migration_set_state)(
+ struct vfio_device *device,
+ enum vfio_device_mig_state new_state);
+ int (*migration_get_state)(struct vfio_device *device,
+ enum vfio_device_mig_state *curr_state);
};
-extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
-
-extern void vfio_unregister_iommu_driver(
- const struct vfio_iommu_driver_ops *ops);
+/**
+ * @log_start: Optional callback to ask the device start DMA logging.
+ * @log_stop: Optional callback to ask the device stop DMA logging.
+ * @log_read_and_clear: Optional callback to ask the device read
+ * and clear the dirty DMAs in some given range.
+ *
+ * The vfio core implementation of the DEVICE_FEATURE_DMA_LOGGING_ set
+ * of features does not track logging state relative to the device,
+ * therefore the device implementation of vfio_log_ops must handle
+ * arbitrary user requests. This includes rejecting subsequent calls
+ * to log_start without an intervening log_stop, as well as graceful
+ * handling of log_stop and log_read_and_clear from invalid states.
+ */
+struct vfio_log_ops {
+ int (*log_start)(struct vfio_device *device,
+ struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
+ int (*log_stop)(struct vfio_device *device);
+ int (*log_read_and_clear)(struct vfio_device *device,
+ unsigned long iova, unsigned long length,
+ struct iova_bitmap *dirty);
+};
-/*
- * External user API
+/**
+ * vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl
+ * @flags: Arg from the device_feature op
+ * @argsz: Arg from the device_feature op
+ * @supported_ops: Combination of VFIO_DEVICE_FEATURE_GET and SET the driver
+ * supports
+ * @minsz: Minimum data size the driver accepts
+ *
+ * For use in a driver's device_feature op. Checks that the inputs to the
+ * VFIO_DEVICE_FEATURE ioctl are correct for the driver's feature. Returns 1 if
+ * the driver should execute the get or set, otherwise the relevant
+ * value should be returned.
*/
-extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
-extern void vfio_group_put_external_user(struct vfio_group *group);
-extern bool vfio_external_group_match_file(struct vfio_group *group,
- struct file *filep);
-extern int vfio_external_user_iommu_id(struct vfio_group *group);
-extern long vfio_external_check_extension(struct vfio_group *group,
- unsigned long arg);
+static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops,
+ size_t minsz)
+{
+ if ((flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)) &
+ ~supported_ops)
+ return -EINVAL;
+ if (flags & VFIO_DEVICE_FEATURE_PROBE)
+ return 0;
+ /* Without PROBE one of GET or SET must be requested */
+ if (!(flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)))
+ return -EINVAL;
+ if (argsz < minsz)
+ return -EINVAL;
+ return 1;
+}
-#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
+struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
+ const struct vfio_device_ops *ops);
+#define vfio_alloc_device(dev_struct, member, dev, ops) \
+ container_of(_vfio_alloc_device(sizeof(struct dev_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof( \
+ struct dev_struct, member)), \
+ dev, ops), \
+ struct dev_struct, member)
+
+int vfio_init_device(struct vfio_device *device, struct device *dev,
+ const struct vfio_device_ops *ops);
+void vfio_free_device(struct vfio_device *device);
+static inline void vfio_put_device(struct vfio_device *device)
+{
+ put_device(&device->device);
+}
-extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
- int npage, int prot, unsigned long *phys_pfn);
-extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
- int npage);
+int vfio_register_group_dev(struct vfio_device *device);
+int vfio_register_emulated_iommu_dev(struct vfio_device *device);
+void vfio_unregister_group_dev(struct vfio_device *device);
-/* each type has independent events */
-enum vfio_notify_type {
- VFIO_IOMMU_NOTIFY = 0,
- VFIO_GROUP_NOTIFY = 1,
-};
+int vfio_assign_device_set(struct vfio_device *device, void *set_id);
-/* events for VFIO_IOMMU_NOTIFY */
-#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0)
+int vfio_mig_get_next_state(struct vfio_device *device,
+ enum vfio_device_mig_state cur_fsm,
+ enum vfio_device_mig_state new_fsm,
+ enum vfio_device_mig_state *next_fsm);
-/* events for VFIO_GROUP_NOTIFY */
-#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0)
+/*
+ * External user API
+ */
+struct iommu_group *vfio_file_iommu_group(struct file *file);
+bool vfio_file_is_group(struct file *file);
+bool vfio_file_enforced_coherent(struct file *file);
+void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
+bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
-extern int vfio_register_notifier(struct device *dev,
- enum vfio_notify_type type,
- unsigned long *required_events,
- struct notifier_block *nb);
-extern int vfio_unregister_notifier(struct device *dev,
- enum vfio_notify_type type,
- struct notifier_block *nb);
+#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
-struct kvm;
-extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
+int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages);
+void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage);
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova,
+ void *data, size_t len, bool write);
/*
* Sub-module helpers
@@ -137,25 +219,24 @@ struct vfio_info_cap {
struct vfio_info_cap_header *buf;
size_t size;
};
-extern struct vfio_info_cap_header *vfio_info_cap_add(
- struct vfio_info_cap *caps, size_t size, u16 id, u16 version);
-extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
+struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
+ size_t size, u16 id,
+ u16 version);
+void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
-extern int vfio_info_add_capability(struct vfio_info_cap *caps,
- struct vfio_info_cap_header *cap,
- size_t size);
+int vfio_info_add_capability(struct vfio_info_cap *caps,
+ struct vfio_info_cap_header *cap, size_t size);
-extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
- int num_irqs, int max_irq_type,
- size_t *data_size);
+int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
+ int num_irqs, int max_irq_type,
+ size_t *data_size);
struct pci_dev;
#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH)
-extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
-extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
-extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
- unsigned int cmd,
- unsigned long arg);
+void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
+void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
+long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, unsigned int cmd,
+ unsigned long arg);
#else
static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev)
{
@@ -189,10 +270,9 @@ struct virqfd {
struct virqfd **pvirqfd;
};
-extern int vfio_virqfd_enable(void *opaque,
- int (*handler)(void *, void *),
- void (*thread)(void *, void *),
- void *data, struct virqfd **pvirqfd, int fd);
-extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
+int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
+ void (*thread)(void *, void *), void *data,
+ struct virqfd **pvirqfd, int fd);
+void vfio_virqfd_disable(struct virqfd **pvirqfd);
#endif /* VFIO_H */
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
new file mode 100644
index 000000000000..367fd79226a3
--- /dev/null
+++ b/include/linux/vfio_pci_core.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/vfio.h>
+#include <linux/irqbypass.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/notifier.h>
+
+#ifndef VFIO_PCI_CORE_H
+#define VFIO_PCI_CORE_H
+
+#define VFIO_PCI_OFFSET_SHIFT 40
+#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+
+struct vfio_pci_core_device;
+struct vfio_pci_region;
+
+struct vfio_pci_regops {
+ ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+ void (*release)(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_region *region);
+ int (*mmap)(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_region *region,
+ struct vm_area_struct *vma);
+ int (*add_capability)(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_region *region,
+ struct vfio_info_cap *caps);
+};
+
+struct vfio_pci_region {
+ u32 type;
+ u32 subtype;
+ const struct vfio_pci_regops *ops;
+ void *data;
+ size_t size;
+ u32 flags;
+};
+
+struct vfio_pci_core_device {
+ struct vfio_device vdev;
+ struct pci_dev *pdev;
+ void __iomem *barmap[PCI_STD_NUM_BARS];
+ bool bar_mmap_supported[PCI_STD_NUM_BARS];
+ u8 *pci_config_map;
+ u8 *vconfig;
+ struct perm_bits *msi_perm;
+ spinlock_t irqlock;
+ struct mutex igate;
+ struct vfio_pci_irq_ctx *ctx;
+ int num_ctx;
+ int irq_type;
+ int num_regions;
+ struct vfio_pci_region *region;
+ u8 msi_qmax;
+ u8 msix_bar;
+ u16 msix_size;
+ u32 msix_offset;
+ u32 rbar[7];
+ bool pci_2_3;
+ bool virq_disabled;
+ bool reset_works;
+ bool extended_caps;
+ bool bardirty;
+ bool has_vga;
+ bool needs_reset;
+ bool nointx;
+ bool needs_pm_restore;
+ bool pm_intx_masked;
+ bool pm_runtime_engaged;
+ struct pci_saved_state *pci_saved_state;
+ struct pci_saved_state *pm_save;
+ int ioeventfds_nr;
+ struct eventfd_ctx *err_trigger;
+ struct eventfd_ctx *req_trigger;
+ struct eventfd_ctx *pm_wake_eventfd_ctx;
+ struct list_head dummy_resources_list;
+ struct mutex ioeventfds_lock;
+ struct list_head ioeventfds_list;
+ struct vfio_pci_vf_token *vf_token;
+ struct list_head sriov_pfs_item;
+ struct vfio_pci_core_device *sriov_pf_core_dev;
+ struct notifier_block nb;
+ struct mutex vma_lock;
+ struct list_head vma_list;
+ struct rw_semaphore memory_lock;
+};
+
+/* Will be exported for vfio pci drivers usage */
+int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data);
+void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga,
+ bool is_disable_idle_d3);
+void vfio_pci_core_close_device(struct vfio_device *core_vdev);
+int vfio_pci_core_init_dev(struct vfio_device *core_vdev);
+void vfio_pci_core_release_dev(struct vfio_device *core_vdev);
+int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
+void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
+extern const struct pci_error_handlers vfio_pci_core_err_handlers;
+int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
+ int nr_virtfn);
+long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+ unsigned long arg);
+int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz);
+ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
+ size_t count, loff_t *ppos);
+ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
+ size_t count, loff_t *ppos);
+int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
+void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
+int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
+int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
+void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
+void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
+pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+
+#endif /* VFIO_PCI_CORE_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 553b34c8b5f7..b4b9137f9792 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -33,6 +33,8 @@
#include <video/vga.h>
+struct pci_dev;
+
/* Legacy VGA regions */
#define VGA_RSRC_NONE 0x00
#define VGA_RSRC_LEGACY_IO 0x01
@@ -42,42 +44,45 @@
#define VGA_RSRC_NORMAL_IO 0x04
#define VGA_RSRC_NORMAL_MEM 0x08
-/* Passing that instead of a pci_dev to use the system "default"
- * device, that is the one used by vgacon. Archs will probably
- * have to provide their own vga_default_device();
- */
-#define VGA_DEFAULT_DEVICE (NULL)
-
-struct pci_dev;
-
-/* For use by clients */
-
-/**
- * vga_set_legacy_decoding
- *
- * @pdev: pci device of the VGA card
- * @decodes: bit mask of what legacy regions the card decodes
- *
- * Indicates to the arbiter if the card decodes legacy VGA IOs,
- * legacy VGA Memory, both, or none. All cards default to both,
- * the card driver (fbdev for example) should tell the arbiter
- * if it has disabled legacy decoding, so the card can be left
- * out of the arbitration process (and can be safe to take
- * interrupts at any time.
- */
-#if defined(CONFIG_VGA_ARB)
-extern void vga_set_legacy_decoding(struct pci_dev *pdev,
- unsigned int decodes);
-#else
+#ifdef CONFIG_VGA_ARB
+void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes);
+int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
+void vga_put(struct pci_dev *pdev, unsigned int rsrc);
+struct pci_dev *vga_default_device(void);
+void vga_set_default_device(struct pci_dev *pdev);
+int vga_remove_vgacon(struct pci_dev *pdev);
+int vga_client_register(struct pci_dev *pdev,
+ unsigned int (*set_decode)(struct pci_dev *pdev, bool state));
+#else /* CONFIG_VGA_ARB */
static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
- unsigned int decodes) { };
-#endif
-
-#if defined(CONFIG_VGA_ARB)
-extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
-#else
-static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
-#endif
+ unsigned int decodes)
+{
+};
+static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc,
+ int interruptible)
+{
+ return 0;
+}
+static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc)
+{
+}
+static inline struct pci_dev *vga_default_device(void)
+{
+ return NULL;
+}
+static inline void vga_set_default_device(struct pci_dev *pdev)
+{
+}
+static inline int vga_remove_vgacon(struct pci_dev *pdev)
+{
+ return 0;
+}
+static inline int vga_client_register(struct pci_dev *pdev,
+ unsigned int (*set_decode)(struct pci_dev *pdev, bool state))
+{
+ return 0;
+}
+#endif /* CONFIG_VGA_ARB */
/**
* vga_get_interruptible
@@ -109,52 +114,9 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
return vga_get(pdev, rsrc, 0);
}
-#if defined(CONFIG_VGA_ARB)
-extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
-#else
-static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
-#endif
-
-#if defined(CONFIG_VGA_ARB)
-extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
-#else
-#define vga_put(pdev, rsrc)
-#endif
-
-
-#ifdef CONFIG_VGA_ARB
-extern struct pci_dev *vga_default_device(void);
-extern void vga_set_default_device(struct pci_dev *pdev);
-extern int vga_remove_vgacon(struct pci_dev *pdev);
-#else
-static inline struct pci_dev *vga_default_device(void) { return NULL; };
-static inline void vga_set_default_device(struct pci_dev *pdev) { };
-static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; };
-#endif
-
-/*
- * Architectures should define this if they have several
- * independent PCI domains that can afford concurrent VGA
- * decoding
- */
-#ifndef __ARCH_HAS_VGA_CONFLICT
-static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
-{
- return 1;
-}
-#endif
-
-#if defined(CONFIG_VGA_ARB)
-int vga_client_register(struct pci_dev *pdev, void *cookie,
- void (*irq_set_state)(void *cookie, bool state),
- unsigned int (*set_vga_decode)(void *cookie, bool state));
-#else
-static inline int vga_client_register(struct pci_dev *pdev, void *cookie,
- void (*irq_set_state)(void *cookie, bool state),
- unsigned int (*set_vga_decode)(void *cookie, bool state))
+static inline void vga_client_unregister(struct pci_dev *pdev)
{
- return 0;
+ vga_client_register(pdev, NULL);
}
-#endif
#endif /* LINUX_VGA_H */
diff --git a/include/linux/vhost_iotlb.h b/include/linux/vhost_iotlb.h
new file mode 100644
index 000000000000..e79a40838998
--- /dev/null
+++ b/include/linux/vhost_iotlb.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VHOST_IOTLB_H
+#define _LINUX_VHOST_IOTLB_H
+
+#include <linux/interval_tree_generic.h>
+
+struct vhost_iotlb_map {
+ struct rb_node rb;
+ struct list_head link;
+ u64 start;
+ u64 last;
+ u64 size;
+ u64 addr;
+#define VHOST_MAP_RO 0x1
+#define VHOST_MAP_WO 0x2
+#define VHOST_MAP_RW 0x3
+ u32 perm;
+ u32 flags_padding;
+ u64 __subtree_last;
+ void *opaque;
+};
+
+#define VHOST_IOTLB_FLAG_RETIRE 0x1
+
+struct vhost_iotlb {
+ struct rb_root_cached root;
+ struct list_head list;
+ unsigned int limit;
+ unsigned int nmaps;
+ unsigned int flags;
+};
+
+int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, u64 start, u64 last,
+ u64 addr, unsigned int perm, void *opaque);
+int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last,
+ u64 addr, unsigned int perm);
+void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last);
+
+void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
+ unsigned int flags);
+struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags);
+void vhost_iotlb_free(struct vhost_iotlb *iotlb);
+void vhost_iotlb_reset(struct vhost_iotlb *iotlb);
+
+struct vhost_iotlb_map *
+vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last);
+struct vhost_iotlb_map *
+vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last);
+
+void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_map *map);
+#endif
diff --git a/include/linux/via-core.h b/include/linux/via-core.h
index 9e802deedb2d..8737599b9148 100644
--- a/include/linux/via-core.h
+++ b/include/linux/via-core.h
@@ -47,7 +47,6 @@ struct via_port_cfg {
/*
* Allow subdevs to register suspend/resume hooks.
*/
-#ifdef CONFIG_PM
struct viafb_pm_hooks {
struct list_head list;
int (*suspend)(void *private);
@@ -57,7 +56,6 @@ struct viafb_pm_hooks {
void viafb_pm_register(struct viafb_pm_hooks *hooks);
void viafb_pm_unregister(struct viafb_pm_hooks *hooks);
-#endif /* CONFIG_PM */
/*
* This is the global viafb "device" containing stuff needed by
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 16c0ed6c50a7..219037f4c08d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -57,6 +57,7 @@
#define __LINUX_VIDEODEV2_H
#include <linux/time.h> /* need struct timeval */
+#include <linux/kernel.h>
#include <uapi/linux/videodev2.h>
#endif /* __LINUX_VIDEODEV2_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 15f906e4a748..dcab9c7e8784 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -9,10 +9,9 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
-#include <linux/vringh.h>
/**
- * virtqueue - a queue to register buffers for sending or receiving.
+ * struct virtqueue - a queue to register buffers for sending or receiving.
* @list: the chain of virtqueues for this device
* @callback: the function to call when buffers are consumed (can be NULL).
* @name: the name of this virtqueue (mainly for debugging)
@@ -20,6 +19,8 @@
* @priv: a pointer for the virtqueue implementation to use.
* @index: the zero-based ordinal number for this queue.
* @num_free: number of elements we expect to be able to fit.
+ * @num_max: the maximum number of elements supported by the device.
+ * @reset: vq is in reset state or not.
*
* A note on @num_free: with indirect buffers, each buffer needs one
* element in the queue, otherwise a buffer will need one element per
@@ -32,7 +33,9 @@ struct virtqueue {
struct virtio_device *vdev;
unsigned int index;
unsigned int num_free;
+ unsigned int num_max;
void *priv;
+ bool reset;
};
int virtqueue_add_outbuf(struct virtqueue *vq,
@@ -90,8 +93,11 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq);
dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
+int virtqueue_resize(struct virtqueue *vq, u32 num,
+ void (*recycle)(struct virtqueue *vq, void *buf));
+
/**
- * virtio_device - representation of a device using virtio
+ * struct virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
* @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
* @config_enabled: configuration change reporting enabled
@@ -111,6 +117,7 @@ struct virtio_device {
bool config_enabled;
bool config_change_pending;
spinlock_t config_lock;
+ spinlock_t vqs_list_lock; /* Protects VQs list access */
struct device dev;
struct virtio_device_id id;
const struct virtio_config_ops *config;
@@ -128,17 +135,20 @@ static inline struct virtio_device *dev_to_virtio(struct device *_dev)
void virtio_add_status(struct virtio_device *dev, unsigned int status);
int register_virtio_device(struct virtio_device *dev);
void unregister_virtio_device(struct virtio_device *dev);
+bool is_virtio_device(struct device *dev);
void virtio_break_device(struct virtio_device *dev);
+void __virtio_unbreak_device(struct virtio_device *dev);
+
+void __virtqueue_break(struct virtqueue *_vq);
+void __virtqueue_unbreak(struct virtqueue *_vq);
void virtio_config_changed(struct virtio_device *dev);
-void virtio_config_disable(struct virtio_device *dev);
-void virtio_config_enable(struct virtio_device *dev);
-int virtio_finalize_features(struct virtio_device *dev);
#ifdef CONFIG_PM_SLEEP
int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
+void virtio_reset_device(struct virtio_device *dev);
size_t virtio_max_dma_size(struct virtio_device *vdev);
@@ -146,7 +156,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev);
list_for_each_entry(vq, &vdev->vqs, list)
/**
- * virtio_driver - operations for a virtio I/O driver
+ * struct virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this driver.
diff --git a/include/linux/virtio_anchor.h b/include/linux/virtio_anchor.h
new file mode 100644
index 000000000000..432e6c00b3ca
--- /dev/null
+++ b/include/linux/virtio_anchor.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_ANCHOR_H
+#define _LINUX_VIRTIO_ANCHOR_H
+
+#ifdef CONFIG_VIRTIO_ANCHOR
+struct virtio_device;
+
+bool virtio_require_restricted_mem_acc(struct virtio_device *dev);
+extern bool (*virtio_check_mem_acc_cb)(struct virtio_device *dev);
+
+static inline void virtio_set_mem_acc_cb(bool (*func)(struct virtio_device *))
+{
+ virtio_check_mem_acc_cb = func;
+}
+#else
+#define virtio_set_mem_acc_cb(func) do { } while (0)
+#endif
+
+#endif /* _LINUX_VIRTIO_ANCHOR_H */
diff --git a/include/linux/virtio_caif.h b/include/linux/virtio_caif.h
index 5d2d3124ca3d..ea722479510c 100644
--- a/include/linux/virtio_caif.h
+++ b/include/linux/virtio_caif.h
@@ -11,9 +11,9 @@
#include <linux/types.h>
struct virtio_caif_transf_config {
- u16 headroom;
- u16 tailroom;
- u32 mtu;
+ __virtio16 headroom;
+ __virtio16 tailroom;
+ __virtio32 mtu;
u8 reserved[4];
};
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index bb4cc4910750..4b517649cfe8 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -6,10 +6,16 @@
#include <linux/bug.h>
#include <linux/virtio.h>
#include <linux/virtio_byteorder.h>
+#include <linux/compiler_types.h>
#include <uapi/linux/virtio_config.h>
struct irq_affinity;
+struct virtio_shm_region {
+ u64 addr;
+ u64 len;
+};
+
/**
* virtio_config_ops - operations for configuring a virtio device
* Note: Do not assume that a transport implements all of the operations
@@ -51,13 +57,19 @@ struct irq_affinity;
* include a NULL entry for vqs unused by driver
* Returns 0 on success or error status
* @del_vqs: free virtqueues found by find_vqs().
+ * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
+ * The function guarantees that all memory operations on the
+ * queue before it are visible to the vring_interrupt() that is
+ * called after it.
+ * vdev: the virtio_device
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
* Returns the first 64 feature bits (all we currently need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
- * This gives the final feature bits for the device: it can change
+ * This sends the driver feature bits to the device: it can change
* the dev->feature bits if it wants.
+ * Note: despite the name this can be called any number of times.
* Returns 0 on success or error status
* @bus_name: return the bus name associated with the device (optional)
* vdev: the virtio_device
@@ -65,6 +77,19 @@ struct irq_affinity;
* the caller can then copy.
* @set_vq_affinity: set the affinity for a virtqueue (optional).
* @get_vq_affinity: get the affinity for a virtqueue (optional).
+ * @get_shm_region: get a shared memory region based on the index.
+ * @disable_vq_and_reset: reset a queue individually (optional).
+ * vq: the virtqueue
+ * Returns 0 on success or error status
+ * disable_vq_and_reset will guarantee that the callbacks are disabled and
+ * synchronized.
+ * Except for the callback, the caller should guarantee that the vring is
+ * not accessed by any functions of virtqueue.
+ * @enable_vq_after_reset: enable a reset queue
+ * vq: the virtqueue
+ * Returns 0 on success or error status
+ * If disable_vq_and_reset is set, then enable_vq_after_reset must also be
+ * set.
*/
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
@@ -81,6 +106,7 @@ struct virtio_config_ops {
const char * const names[], const bool *ctx,
struct irq_affinity *desc);
void (*del_vqs)(struct virtio_device *);
+ void (*synchronize_cbs)(struct virtio_device *);
u64 (*get_features)(struct virtio_device *vdev);
int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
@@ -88,6 +114,10 @@ struct virtio_config_ops {
const struct cpumask *cpu_mask);
const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
int index);
+ bool (*get_shm_region)(struct virtio_device *vdev,
+ struct virtio_shm_region *region, u8 id);
+ int (*disable_vq_and_reset)(struct virtqueue *vq);
+ int (*enable_vq_after_reset)(struct virtqueue *vq);
};
/* If driver didn't advertise the feature, it will never appear. */
@@ -162,16 +192,16 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
}
/**
- * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
+ * virtio_has_dma_quirk - determine whether this device has the DMA quirk
* @vdev: the device
*/
-static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
+static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
{
/*
* Note the reverse polarity of the quirk feature (compared to most
* other features), this is for compatibility with legacy systems.
*/
- return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+ return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
}
static inline
@@ -208,8 +238,27 @@ int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
}
/**
+ * virtio_synchronize_cbs - synchronize with virtqueue callbacks
+ * @dev: the virtio device
+ */
+static inline
+void virtio_synchronize_cbs(struct virtio_device *dev)
+{
+ if (dev->config->synchronize_cbs) {
+ dev->config->synchronize_cbs(dev);
+ } else {
+ /*
+ * A best effort fallback to synchronize with
+ * interrupts, preemption and softirq disabled
+ * regions. See comment above synchronize_rcu().
+ */
+ synchronize_rcu();
+ }
+}
+
+/**
* virtio_device_ready - enable vq use in probe function
- * @vdev: the device
+ * @dev: the virtio device
*
* Driver must call this to use vqs in the probe function.
*
@@ -220,7 +269,29 @@ void virtio_device_ready(struct virtio_device *dev)
{
unsigned status = dev->config->get_status(dev);
- BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+ WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ /*
+ * The virtio_synchronize_cbs() makes sure vring_interrupt()
+ * will see the driver specific setup if it sees vq->broken
+ * as false (even if the notifications come before DRIVER_OK).
+ */
+ virtio_synchronize_cbs(dev);
+ __virtio_unbreak_device(dev);
+#endif
+ /*
+ * The transport should ensure the visibility of vq->broken
+ * before setting DRIVER_OK. See the comments for the transport
+ * specific set_status() method.
+ *
+ * A well behaved device will only notify a virtqueue after
+ * DRIVER_OK, this means the device should "see" the coherenct
+ * memory write that set vq->broken as false which is done by
+ * the driver when it sees DRIVER_OK, then the following
+ * driver's vring_interrupt() will see vq->broken as false so
+ * we won't lose any notification.
+ */
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
}
@@ -235,7 +306,7 @@ const char *virtio_bus_name(struct virtio_device *vdev)
/**
* virtqueue_set_affinity - setting affinity for a virtqueue
* @vq: the virtqueue
- * @cpu: the cpu no.
+ * @cpu_mask: the cpu mask
*
* Pay attention the function are best-effort: the affinity hint may not be set
* due to config support, irq type and sharing.
@@ -250,6 +321,15 @@ int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
return 0;
}
+static inline
+bool virtio_get_shm_region(struct virtio_device *vdev,
+ struct virtio_shm_region *region, u8 id)
+{
+ if (!vdev->config->get_shm_region)
+ return false;
+ return vdev->config->get_shm_region(vdev, region, id);
+}
+
static inline bool virtio_is_little_endian(struct virtio_device *vdev)
{
return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
@@ -287,70 +367,133 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
}
+#define virtio_to_cpu(vdev, x) \
+ _Generic((x), \
+ __u8: (x), \
+ __virtio16: virtio16_to_cpu((vdev), (x)), \
+ __virtio32: virtio32_to_cpu((vdev), (x)), \
+ __virtio64: virtio64_to_cpu((vdev), (x)) \
+ )
+
+#define cpu_to_virtio(vdev, x, m) \
+ _Generic((m), \
+ __u8: (x), \
+ __virtio16: cpu_to_virtio16((vdev), (x)), \
+ __virtio32: cpu_to_virtio32((vdev), (x)), \
+ __virtio64: cpu_to_virtio64((vdev), (x)) \
+ )
+
+#define __virtio_native_type(structname, member) \
+ typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
+
/* Config space accessors. */
#define virtio_cread(vdev, structname, member, ptr) \
do { \
+ typeof(((structname*)0)->member) virtio_cread_v; \
+ \
might_sleep(); \
- /* Must match the member's type, and be integer */ \
- if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
- (*ptr) = 1; \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
\
- switch (sizeof(*ptr)) { \
+ switch (sizeof(virtio_cread_v)) { \
case 1: \
- *(ptr) = virtio_cread8(vdev, \
- offsetof(structname, member)); \
- break; \
case 2: \
- *(ptr) = virtio_cread16(vdev, \
- offsetof(structname, member)); \
- break; \
case 4: \
- *(ptr) = virtio_cread32(vdev, \
- offsetof(structname, member)); \
- break; \
- case 8: \
- *(ptr) = virtio_cread64(vdev, \
- offsetof(structname, member)); \
+ vdev->config->get((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ sizeof(virtio_cread_v)); \
break; \
default: \
- BUG(); \
+ __virtio_cread_many((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ 1, \
+ sizeof(virtio_cread_v)); \
+ break; \
} \
+ *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
} while(0)
/* Config space accessors. */
#define virtio_cwrite(vdev, structname, member, ptr) \
do { \
+ typeof(((structname*)0)->member) virtio_cwrite_v = \
+ cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
+ \
might_sleep(); \
- /* Must match the member's type, and be integer */ \
- if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
- BUG_ON((*ptr) == 1); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
\
- switch (sizeof(*ptr)) { \
+ vdev->config->set((vdev), offsetof(structname, member), \
+ &virtio_cwrite_v, \
+ sizeof(virtio_cwrite_v)); \
+ } while(0)
+
+/*
+ * Nothing virtio-specific about these, but let's worry about generalizing
+ * these later.
+ */
+#define virtio_le_to_cpu(x) \
+ _Generic((x), \
+ __u8: (u8)(x), \
+ __le16: (u16)le16_to_cpu(x), \
+ __le32: (u32)le32_to_cpu(x), \
+ __le64: (u64)le64_to_cpu(x) \
+ )
+
+#define virtio_cpu_to_le(x, m) \
+ _Generic((m), \
+ __u8: (x), \
+ __le16: cpu_to_le16(x), \
+ __le32: cpu_to_le32(x), \
+ __le64: cpu_to_le64(x) \
+ )
+
+/* LE (e.g. modern) Config space accessors. */
+#define virtio_cread_le(vdev, structname, member, ptr) \
+ do { \
+ typeof(((structname*)0)->member) virtio_cread_v; \
+ \
+ might_sleep(); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
+ \
+ switch (sizeof(virtio_cread_v)) { \
case 1: \
- virtio_cwrite8(vdev, \
- offsetof(structname, member), \
- *(ptr)); \
- break; \
case 2: \
- virtio_cwrite16(vdev, \
- offsetof(structname, member), \
- *(ptr)); \
- break; \
case 4: \
- virtio_cwrite32(vdev, \
- offsetof(structname, member), \
- *(ptr)); \
- break; \
- case 8: \
- virtio_cwrite64(vdev, \
- offsetof(structname, member), \
- *(ptr)); \
+ vdev->config->get((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ sizeof(virtio_cread_v)); \
break; \
default: \
- BUG(); \
+ __virtio_cread_many((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ 1, \
+ sizeof(virtio_cread_v)); \
+ break; \
} \
+ *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
} while(0)
+#define virtio_cwrite_le(vdev, structname, member, ptr) \
+ do { \
+ typeof(((structname*)0)->member) virtio_cwrite_v = \
+ virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
+ \
+ might_sleep(); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
+ \
+ vdev->config->set((vdev), offsetof(structname, member), \
+ &virtio_cwrite_v, \
+ sizeof(virtio_cwrite_v)); \
+ } while(0)
+
+
/* Read @count fields, @bytes each. */
static inline void __virtio_cread_many(struct virtio_device *vdev,
unsigned int offset,
@@ -399,53 +542,60 @@ static inline void virtio_cwrite8(struct virtio_device *vdev,
static inline u16 virtio_cread16(struct virtio_device *vdev,
unsigned int offset)
{
- u16 ret;
+ __virtio16 ret;
might_sleep();
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return virtio16_to_cpu(vdev, (__force __virtio16)ret);
+ return virtio16_to_cpu(vdev, ret);
}
static inline void virtio_cwrite16(struct virtio_device *vdev,
unsigned int offset, u16 val)
{
+ __virtio16 v;
+
might_sleep();
- val = (__force u16)cpu_to_virtio16(vdev, val);
- vdev->config->set(vdev, offset, &val, sizeof(val));
+ v = cpu_to_virtio16(vdev, val);
+ vdev->config->set(vdev, offset, &v, sizeof(v));
}
static inline u32 virtio_cread32(struct virtio_device *vdev,
unsigned int offset)
{
- u32 ret;
+ __virtio32 ret;
might_sleep();
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return virtio32_to_cpu(vdev, (__force __virtio32)ret);
+ return virtio32_to_cpu(vdev, ret);
}
static inline void virtio_cwrite32(struct virtio_device *vdev,
unsigned int offset, u32 val)
{
+ __virtio32 v;
+
might_sleep();
- val = (__force u32)cpu_to_virtio32(vdev, val);
- vdev->config->set(vdev, offset, &val, sizeof(val));
+ v = cpu_to_virtio32(vdev, val);
+ vdev->config->set(vdev, offset, &v, sizeof(v));
}
static inline u64 virtio_cread64(struct virtio_device *vdev,
unsigned int offset)
{
- u64 ret;
+ __virtio64 ret;
+
__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
- return virtio64_to_cpu(vdev, (__force __virtio64)ret);
+ return virtio64_to_cpu(vdev, ret);
}
static inline void virtio_cwrite64(struct virtio_device *vdev,
unsigned int offset, u64 val)
{
+ __virtio64 v;
+
might_sleep();
- val = (__force u64)cpu_to_virtio64(vdev, val);
- vdev->config->set(vdev, offset, &val, sizeof(val));
+ v = cpu_to_virtio64(vdev, val);
+ vdev->config->set(vdev, offset, &v, sizeof(v));
}
/* Conditional config space accessors. */
@@ -459,4 +609,15 @@ static inline void virtio_cwrite64(struct virtio_device *vdev,
_r; \
})
+/* Conditional config space accessors. */
+#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
+ ({ \
+ int _r = 0; \
+ if (!virtio_has_feature(vdev, fbit)) \
+ _r = -ENOENT; \
+ else \
+ virtio_cread_le((vdev), structname, member, ptr); \
+ _r; \
+ })
+
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_dma_buf.h b/include/linux/virtio_dma_buf.h
new file mode 100644
index 000000000000..a2fdf217ac62
--- /dev/null
+++ b/include/linux/virtio_dma_buf.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dma-bufs for virtio exported objects
+ *
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#ifndef _LINUX_VIRTIO_DMA_BUF_H
+#define _LINUX_VIRTIO_DMA_BUF_H
+
+#include <linux/dma-buf.h>
+#include <linux/uuid.h>
+#include <linux/virtio.h>
+
+/**
+ * struct virtio_dma_buf_ops - operations possible on exported object dma-buf
+ * @ops: the base dma_buf_ops. ops.attach MUST be virtio_dma_buf_attach.
+ * @device_attach: [optional] callback invoked by virtio_dma_buf_attach during
+ * all attach operations.
+ * @get_uid: [required] callback to get the uuid of the exported object.
+ */
+struct virtio_dma_buf_ops {
+ struct dma_buf_ops ops;
+ int (*device_attach)(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach);
+ int (*get_uuid)(struct dma_buf *dma_buf, uuid_t *uuid);
+};
+
+int virtio_dma_buf_attach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach);
+
+struct dma_buf *virtio_dma_buf_export
+ (const struct dma_buf_export_info *exp_info);
+bool is_virtio_dma_buf(struct dma_buf *dma_buf);
+int virtio_dma_buf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid);
+
+#endif /* _LINUX_VIRTIO_DMA_BUF_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 0d1fe9297ac6..a960de68ac69 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -3,11 +3,31 @@
#define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
+static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
+{
+ switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ return protocol == cpu_to_be16(ETH_P_IP);
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ return protocol == cpu_to_be16(ETH_P_IPV6);
+ case VIRTIO_NET_HDR_GSO_UDP:
+ return protocol == cpu_to_be16(ETH_P_IP) ||
+ protocol == cpu_to_be16(ETH_P_IPV6);
+ default:
+ return false;
+ }
+}
+
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
const struct virtio_net_hdr *hdr)
{
+ if (skb->protocol)
+ return 0;
+
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_UDP:
@@ -28,17 +48,26 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
bool little_endian)
{
unsigned int gso_type = 0;
+ unsigned int thlen = 0;
+ unsigned int p_off = 0;
+ unsigned int ip_proto;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
gso_type = SKB_GSO_TCPV4;
+ ip_proto = IPPROTO_TCP;
+ thlen = sizeof(struct tcphdr);
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6;
+ ip_proto = IPPROTO_TCP;
+ thlen = sizeof(struct tcphdr);
break;
case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP;
+ ip_proto = IPPROTO_UDP;
+ thlen = sizeof(struct udphdr);
break;
default:
return -EINVAL;
@@ -51,22 +80,43 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
return -EINVAL;
}
+ skb_reset_mac_header(skb);
+
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
- u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+ u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
+ u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+ u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
+
+ if (!pskb_may_pull(skb, needed))
+ return -EINVAL;
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
+
+ p_off = skb_transport_offset(skb) + thlen;
+ if (!pskb_may_pull(skb, p_off))
+ return -EINVAL;
} else {
/* gso packets without NEEDS_CSUM do not set transport_offset.
* probe and drop if does not match one of the above types.
*/
if (gso_type && skb->network_header) {
- if (!skb->protocol)
- virtio_net_hdr_set_proto(skb, hdr);
+ struct flow_keys_basic keys;
+
+ if (!skb->protocol) {
+ __be16 protocol = dev_parse_header_protocol(skb);
+
+ if (!protocol)
+ virtio_net_hdr_set_proto(skb, hdr);
+ else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
+ return -EINVAL;
+ else
+ skb->protocol = protocol;
+ }
retry:
- skb_probe_transport_header(skb);
- if (!skb_transport_header_was_set(skb)) {
+ if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
+ NULL, 0, 0, 0,
+ 0)) {
/* UFO does not specify ipv4 or 6: try both */
if (gso_type & SKB_GSO_UDP &&
skb->protocol == htons(ETH_P_IP)) {
@@ -75,18 +125,38 @@ retry:
}
return -EINVAL;
}
+
+ p_off = keys.control.thoff + thlen;
+ if (!pskb_may_pull(skb, p_off) ||
+ keys.basic.ip_proto != ip_proto)
+ return -EINVAL;
+
+ skb_set_transport_header(skb, keys.control.thoff);
+ } else if (gso_type) {
+ p_off = thlen;
+ if (!pskb_may_pull(skb, p_off))
+ return -EINVAL;
}
}
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
+ unsigned int nh_off = p_off;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
- skb_shinfo(skb)->gso_size = gso_size;
- skb_shinfo(skb)->gso_type = gso_type;
+ /* UFO may not include transport header in gso_size. */
+ if (gso_type & SKB_GSO_UDP)
+ nh_off -= thlen;
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ /* Too small packets are not really GSO ones. */
+ if (skb->len - nh_off > gso_size) {
+ shinfo->gso_size = gso_size;
+ shinfo->gso_type = gso_type;
+
+ /* Header must be checked, and gso_segs computed. */
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
+ }
}
return 0;
diff --git a/include/linux/virtio_pci_legacy.h b/include/linux/virtio_pci_legacy.h
new file mode 100644
index 000000000000..a8dc757d0367
--- /dev/null
+++ b/include/linux/virtio_pci_legacy.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_LEGACY_H
+#define _LINUX_VIRTIO_PCI_LEGACY_H
+
+#include "linux/mod_devicetable.h"
+#include <linux/pci.h>
+#include <linux/virtio_pci.h>
+
+struct virtio_pci_legacy_device {
+ struct pci_dev *pci_dev;
+
+ /* Where to read and clear interrupt */
+ u8 __iomem *isr;
+ /* The IO mapping for the PCI config space (legacy mode only) */
+ void __iomem *ioaddr;
+
+ struct virtio_device_id id;
+};
+
+u64 vp_legacy_get_features(struct virtio_pci_legacy_device *ldev);
+u64 vp_legacy_get_driver_features(struct virtio_pci_legacy_device *ldev);
+void vp_legacy_set_features(struct virtio_pci_legacy_device *ldev,
+ u32 features);
+u8 vp_legacy_get_status(struct virtio_pci_legacy_device *ldev);
+void vp_legacy_set_status(struct virtio_pci_legacy_device *ldev,
+ u8 status);
+u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
+ u16 idx, u16 vector);
+u16 vp_legacy_config_vector(struct virtio_pci_legacy_device *ldev,
+ u16 vector);
+void vp_legacy_set_queue_address(struct virtio_pci_legacy_device *ldev,
+ u16 index, u32 queue_pfn);
+bool vp_legacy_get_queue_enable(struct virtio_pci_legacy_device *ldev,
+ u16 idx);
+u16 vp_legacy_get_queue_size(struct virtio_pci_legacy_device *ldev,
+ u16 idx);
+int vp_legacy_probe(struct virtio_pci_legacy_device *ldev);
+void vp_legacy_remove(struct virtio_pci_legacy_device *ldev);
+
+#endif
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
new file mode 100644
index 000000000000..c4eeb79b0139
--- /dev/null
+++ b/include/linux/virtio_pci_modern.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_MODERN_H
+#define _LINUX_VIRTIO_PCI_MODERN_H
+
+#include <linux/pci.h>
+#include <linux/virtio_pci.h>
+
+struct virtio_pci_modern_common_cfg {
+ struct virtio_pci_common_cfg cfg;
+
+ __le16 queue_notify_data; /* read-write */
+ __le16 queue_reset; /* read-write */
+};
+
+struct virtio_pci_modern_device {
+ struct pci_dev *pci_dev;
+
+ struct virtio_pci_common_cfg __iomem *common;
+ /* Device-specific data (non-legacy mode) */
+ void __iomem *device;
+ /* Base of vq notifications (non-legacy mode). */
+ void __iomem *notify_base;
+ /* Physical base of vq notifications */
+ resource_size_t notify_pa;
+ /* Where to read and clear interrupt */
+ u8 __iomem *isr;
+
+ /* So we can sanity-check accesses. */
+ size_t notify_len;
+ size_t device_len;
+
+ /* Capability for when we need to map notifications per-vq. */
+ int notify_map_cap;
+
+ /* Multiply queue_notify_off by this value. (non-legacy mode). */
+ u32 notify_offset_multiplier;
+
+ int modern_bars;
+
+ struct virtio_device_id id;
+};
+
+/*
+ * Type-safe wrappers for io accesses.
+ * Use these to enforce at compile time the following spec requirement:
+ *
+ * The driver MUST access each field using the “natural” access
+ * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
+ * for 16-bit fields and 8-bit accesses for 8-bit fields.
+ */
+static inline u8 vp_ioread8(const u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+static inline u16 vp_ioread16 (const __le16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 vp_ioread32(const __le32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static inline void vp_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo,
+ __le32 __iomem *hi)
+{
+ vp_iowrite32((u32)val, lo);
+ vp_iowrite32(val >> 32, hi);
+}
+
+u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
+u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev);
+void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
+ u64 features);
+u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
+u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
+void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
+ u8 status);
+u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
+ u16 idx, u16 vector);
+u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
+ u16 vector);
+void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
+ u16 index, u64 desc_addr, u64 driver_addr,
+ u64 device_addr);
+void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 idx, bool enable);
+bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 idx, u16 size);
+u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
+void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
+ u16 index, resource_size_t *pa);
+int vp_modern_probe(struct virtio_pci_modern_device *mdev);
+void vp_modern_remove(struct virtio_pci_modern_device *mdev);
+int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
+void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
+#endif
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 3dc70adfe5f5..8b8af1a38991 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -46,16 +46,15 @@ static inline void virtio_wmb(bool weak_barriers)
dma_wmb();
}
-static inline void virtio_store_mb(bool weak_barriers,
- __virtio16 *p, __virtio16 v)
-{
- if (weak_barriers) {
- virt_store_mb(*p, v);
- } else {
- WRITE_ONCE(*p, v);
- mb();
- }
-}
+#define virtio_store_mb(weak_barriers, p, v) \
+do { \
+ if (weak_barriers) { \
+ virt_store_mb(*p, v); \
+ } else { \
+ WRITE_ONCE(*p, v); \
+ mb(); \
+ } \
+} while (0) \
struct virtio_device;
struct virtqueue;
@@ -77,16 +76,6 @@ struct virtqueue *vring_create_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name);
-/* Creates a virtqueue with a custom layout. */
-struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring vring,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool ctx,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name);
-
/*
* Creates a virtqueue with a standard layout but a caller-allocated
* ring.
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 71c81e0dc8f2..35d7eedb5e8e 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -36,6 +36,7 @@ struct virtio_vsock_sock {
u32 rx_bytes;
u32 buf_alloc;
struct list_head rx_queue;
+ u32 msg_count;
};
struct virtio_vsock_pkt {
@@ -48,6 +49,7 @@ struct virtio_vsock_pkt {
u32 len;
u32 off;
bool reply;
+ bool tap_delivered;
};
struct virtio_vsock_pkt_info {
@@ -79,8 +81,17 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len, int flags);
+int
+virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len);
+ssize_t
+virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ int flags);
s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
+u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk);
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
struct vsock_sock *psk);
diff --git a/include/linux/visorbus.h b/include/linux/visorbus.h
deleted file mode 100644
index 0d8bd6769b13..000000000000
--- a/include/linux/visorbus.h
+++ /dev/null
@@ -1,344 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/*
- * This header file is to be included by other kernel mode components that
- * implement a particular kind of visor_device. Each of these other kernel
- * mode components is called a visor device driver. Refer to visortemplate
- * for a minimal sample visor device driver.
- *
- * There should be nothing in this file that is private to the visorbus
- * bus implementation itself.
- */
-
-#ifndef __VISORBUS_H__
-#define __VISORBUS_H__
-
-#include <linux/device.h>
-
-#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E')
-
-/*
- * enum channel_serverstate
- * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state.
- * @CHANNELSRV_READY: Channel has been initialized by server.
- */
-enum channel_serverstate {
- CHANNELSRV_UNINITIALIZED = 0,
- CHANNELSRV_READY = 1
-};
-
-/*
- * enum channel_clientstate
- * @CHANNELCLI_DETACHED:
- * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it
- * unless given TBD* explicit request
- * (should actually be < DETACHED).
- * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach.
- * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time.
- * @CHANNELCLI_BUSY: Client either wants to use or is using channel.
- * @CHANNELCLI_OWNED: "No worries" state - client can access channel
- * anytime.
- */
-enum channel_clientstate {
- CHANNELCLI_DETACHED = 0,
- CHANNELCLI_DISABLED = 1,
- CHANNELCLI_ATTACHING = 2,
- CHANNELCLI_ATTACHED = 3,
- CHANNELCLI_BUSY = 4,
- CHANNELCLI_OWNED = 5
-};
-
-/*
- * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that
- * a guest can look at the FeatureFlags in the io channel, and configure the
- * driver to use interrupts or not based on this setting. All feature bits for
- * all channels should be defined here. The io channel feature bits are defined
- * below.
- */
-#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1)
-#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3)
-#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4)
-#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5)
-#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
-
-/*
- * struct channel_header - Common Channel Header
- * @signature: Signature.
- * @legacy_state: DEPRECATED - being replaced by.
- * @header_size: sizeof(struct channel_header).
- * @size: Total size of this channel in bytes.
- * @features: Flags to modify behavior.
- * @chtype: Channel type: data, bus, control, etc..
- * @partition_handle: ID of guest partition.
- * @handle: Device number of this channel in client.
- * @ch_space_offset: Offset in bytes to channel specific area.
- * @version_id: Struct channel_header Version ID.
- * @partition_index: Index of guest partition.
- * @zone_uuid: Guid of Channel's zone.
- * @cli_str_offset: Offset from channel header to null-terminated
- * ClientString (0 if ClientString not present).
- * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this
- * channel.
- * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
- * ServerStateUp, ServerStateDown, etc).
- * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel.
- * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>.
- * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
- * ServerStateUp, ServerStateDown, etc).
- * @srv_state: CHANNEL_SERVERSTATE.
- * @cli_error_boot: Bits to indicate err states for boot clients, so err
- * messages can be throttled.
- * @cli_error_os: Bits to indicate err states for OS clients, so err
- * messages can be throttled.
- * @filler: Pad out to 128 byte cacheline.
- * @recover_channel: Please add all new single-byte values below here.
- */
-struct channel_header {
- u64 signature;
- u32 legacy_state;
- /* SrvState, CliStateBoot, and CliStateOS below */
- u32 header_size;
- u64 size;
- u64 features;
- guid_t chtype;
- u64 partition_handle;
- u64 handle;
- u64 ch_space_offset;
- u32 version_id;
- u32 partition_index;
- guid_t zone_guid;
- u32 cli_str_offset;
- u32 cli_state_boot;
- u32 cmd_state_cli;
- u32 cli_state_os;
- u32 ch_characteristic;
- u32 cmd_state_srv;
- u32 srv_state;
- u8 cli_error_boot;
- u8 cli_error_os;
- u8 filler[1];
- u8 recover_channel;
-} __packed;
-
-#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0)
-
-/*
- * struct signal_queue_header - Subheader for the Signal Type variation of the
- * Common Channel.
- * @version: SIGNAL_QUEUE_HEADER Version ID.
- * @chtype: Queue type: storage, network.
- * @size: Total size of this queue in bytes.
- * @sig_base_offset: Offset to signal queue area.
- * @features: Flags to modify behavior.
- * @num_sent: Total # of signals placed in this queue.
- * @num_overflows: Total # of inserts failed due to full queue.
- * @signal_size: Total size of a signal for this queue.
- * @max_slots: Max # of slots in queue, 1 slot is always empty.
- * @max_signals: Max # of signals in queue (MaxSignalSlots-1).
- * @head: Queue head signal #.
- * @num_received: Total # of signals removed from this queue.
- * @tail: Queue tail signal.
- * @reserved1: Reserved field.
- * @reserved2: Reserved field.
- * @client_queue:
- * @num_irq_received: Total # of Interrupts received. This is incremented by the
- * ISR in the guest windows driver.
- * @num_empty: Number of times that visor_signal_remove is called and
- * returned Empty Status.
- * @errorflags: Error bits set during SignalReinit to denote trouble with
- * client's fields.
- * @filler: Pad out to 64 byte cacheline.
- */
-struct signal_queue_header {
- /* 1st cache line */
- u32 version;
- u32 chtype;
- u64 size;
- u64 sig_base_offset;
- u64 features;
- u64 num_sent;
- u64 num_overflows;
- u32 signal_size;
- u32 max_slots;
- u32 max_signals;
- u32 head;
- /* 2nd cache line */
- u64 num_received;
- u32 tail;
- u32 reserved1;
- u64 reserved2;
- u64 client_queue;
- u64 num_irq_received;
- u64 num_empty;
- u32 errorflags;
- u8 filler[12];
-} __packed;
-
-/* VISORCHANNEL Guids */
-/* {414815ed-c58c-11da-95a9-00e08161165f} */
-#define VISOR_VHBA_CHANNEL_GUID \
- GUID_INIT(0x414815ed, 0xc58c, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-#define VISOR_VHBA_CHANNEL_GUID_STR \
- "414815ed-c58c-11da-95a9-00e08161165f"
-struct visorchipset_state {
- u32 created:1;
- u32 attached:1;
- u32 configured:1;
- u32 running:1;
- /* Remaining bits in this 32-bit word are reserved. */
-};
-
-/**
- * struct visor_device - A device type for things "plugged" into the visorbus
- * bus
- * @visorchannel: Points to the channel that the device is
- * associated with.
- * @channel_type_guid: Identifies the channel type to the bus driver.
- * @device: Device struct meant for use by the bus driver
- * only.
- * @list_all: Used by the bus driver to enumerate devices.
- * @timer: Timer fired periodically to do interrupt-type
- * activity.
- * @being_removed: Indicates that the device is being removed from
- * the bus. Private bus driver use only.
- * @visordriver_callback_lock: Used by the bus driver to lock when adding and
- * removing devices.
- * @pausing: Indicates that a change towards a paused state.
- * is in progress. Only modified by the bus driver.
- * @resuming: Indicates that a change towards a running state
- * is in progress. Only modified by the bus driver.
- * @chipset_bus_no: Private field used by the bus driver.
- * @chipset_dev_no: Private field used the bus driver.
- * @state: Used to indicate the current state of the
- * device.
- * @inst: Unique GUID for this instance of the device.
- * @name: Name of the device.
- * @pending_msg_hdr: For private use by bus driver to respond to
- * hypervisor requests.
- * @vbus_hdr_info: A pointer to header info. Private use by bus
- * driver.
- * @partition_guid: Indicates client partion id. This should be the
- * same across all visor_devices in the current
- * guest. Private use by bus driver only.
- */
-struct visor_device {
- struct visorchannel *visorchannel;
- guid_t channel_type_guid;
- /* These fields are for private use by the bus driver only. */
- struct device device;
- struct list_head list_all;
- struct timer_list timer;
- bool timer_active;
- bool being_removed;
- struct mutex visordriver_callback_lock; /* synchronize probe/remove */
- bool pausing;
- bool resuming;
- u32 chipset_bus_no;
- u32 chipset_dev_no;
- struct visorchipset_state state;
- guid_t inst;
- u8 *name;
- struct controlvm_message_header *pending_msg_hdr;
- void *vbus_hdr_info;
- guid_t partition_guid;
- struct dentry *debugfs_dir;
- struct dentry *debugfs_bus_info;
-};
-
-#define to_visor_device(x) container_of(x, struct visor_device, device)
-
-typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
- int status);
-
-/*
- * This struct describes a specific visor channel, by providing its GUID, name,
- * and sizes.
- */
-struct visor_channeltype_descriptor {
- const guid_t guid;
- const char *name;
- u64 min_bytes;
- u32 version;
-};
-
-/**
- * struct visor_driver - Information provided by each visor driver when it
- * registers with the visorbus driver
- * @name: Name of the visor driver.
- * @owner: The module owner.
- * @channel_types: Types of channels handled by this driver, ending with
- * a zero GUID. Our specialized BUS.match() method knows
- * about this list, and uses it to determine whether this
- * driver will in fact handle a new device that it has
- * detected.
- * @probe: Called when a new device comes online, by our probe()
- * function specified by driver.probe() (triggered
- * ultimately by some call to driver_register(),
- * bus_add_driver(), or driver_attach()).
- * @remove: Called when a new device is removed, by our remove()
- * function specified by driver.remove() (triggered
- * ultimately by some call to device_release_driver()).
- * @channel_interrupt: Called periodically, whenever there is a possiblity
- * that "something interesting" may have happened to the
- * channel.
- * @pause: Called to initiate a change of the device's state. If
- * the return valu`e is < 0, there was an error and the
- * state transition will NOT occur. If the return value
- * is >= 0, then the state transition was INITIATED
- * successfully, and complete_func() will be called (or
- * was just called) with the final status when either the
- * state transition fails or completes successfully.
- * @resume: Behaves similar to pause.
- * @driver: Private reference to the device driver. For use by bus
- * driver only.
- */
-struct visor_driver {
- const char *name;
- struct module *owner;
- struct visor_channeltype_descriptor *channel_types;
- int (*probe)(struct visor_device *dev);
- void (*remove)(struct visor_device *dev);
- void (*channel_interrupt)(struct visor_device *dev);
- int (*pause)(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
- int (*resume)(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
-
- /* These fields are for private use by the bus driver only. */
- struct device_driver driver;
-};
-
-#define to_visor_driver(x) (container_of(x, struct visor_driver, driver))
-
-int visor_check_channel(struct channel_header *ch, struct device *dev,
- const guid_t *expected_uuid, char *chname,
- u64 expected_min_bytes, u32 expected_version,
- u64 expected_signature);
-
-int visorbus_register_visor_driver(struct visor_driver *drv);
-void visorbus_unregister_visor_driver(struct visor_driver *drv);
-int visorbus_read_channel(struct visor_device *dev,
- unsigned long offset, void *dest,
- unsigned long nbytes);
-int visorbus_write_channel(struct visor_device *dev,
- unsigned long offset, void *src,
- unsigned long nbytes);
-int visorbus_enable_channel_interrupts(struct visor_device *dev);
-void visorbus_disable_channel_interrupts(struct visor_device *dev);
-
-int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
- void *msg);
-int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
- void *msg);
-bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
-const guid_t *visorchannel_get_guid(struct visorchannel *channel);
-
-#define BUS_ROOT_DEVICE UINT_MAX
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
- struct visor_device *from);
-#endif
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 47a3441cf4c4..3518dba1e02f 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -20,21 +20,35 @@
#define HIGHMEM_ZONE(xx)
#endif
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
+#ifdef CONFIG_ZONE_DEVICE
+#define DEVICE_ZONE(xx) xx##_DEVICE,
+#else
+#define DEVICE_ZONE(xx)
+#endif
+
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, \
+ HIGHMEM_ZONE(xx) xx##_MOVABLE, DEVICE_ZONE(xx)
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
- FOR_ALL_ZONES(PGALLOC),
- FOR_ALL_ZONES(ALLOCSTALL),
- FOR_ALL_ZONES(PGSCAN_SKIP),
+ FOR_ALL_ZONES(PGALLOC)
+ FOR_ALL_ZONES(ALLOCSTALL)
+ FOR_ALL_ZONES(PGSCAN_SKIP)
PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
PGREFILL,
+ PGREUSE,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
+ PGDEMOTE_KSWAPD,
+ PGDEMOTE_DIRECT,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_DIRECT_THROTTLE,
+ PGSCAN_ANON,
+ PGSCAN_FILE,
+ PGSTEAL_ANON,
+ PGSTEAL_FILE,
#ifdef CONFIG_NUMA
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
@@ -52,6 +66,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#endif
#ifdef CONFIG_MIGRATION
PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
+ THP_MIGRATION_SUCCESS,
+ THP_MIGRATION_FAIL,
+ THP_MIGRATION_SPLIT,
#endif
#ifdef CONFIG_COMPACTION
COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
@@ -63,6 +80,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
#endif
+#ifdef CONFIG_CMA
+ CMA_ALLOC_SUCCESS,
+ CMA_ALLOC_FAIL,
+#endif
UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
@@ -73,14 +94,20 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
THP_FAULT_ALLOC,
THP_FAULT_FALLBACK,
+ THP_FAULT_FALLBACK_CHARGE,
THP_COLLAPSE_ALLOC,
THP_COLLAPSE_ALLOC_FAILED,
THP_FILE_ALLOC,
+ THP_FILE_FALLBACK,
+ THP_FILE_FALLBACK_CHARGE,
THP_FILE_MAPPED,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
THP_SPLIT_PMD,
+ THP_SCAN_EXCEED_NONE_PTE,
+ THP_SCAN_EXCEED_SWAP_PTE,
+ THP_SCAN_EXCEED_SHARED_PTE,
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
THP_SPLIT_PUD,
#endif
@@ -102,19 +129,31 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE,
#endif /* CONFIG_DEBUG_TLBFLUSH */
-#ifdef CONFIG_DEBUG_VM_VMACACHE
- VMACACHE_FIND_CALLS,
- VMACACHE_FIND_HITS,
-#endif
#ifdef CONFIG_SWAP
SWAP_RA,
SWAP_RA_HIT,
+#ifdef CONFIG_KSM
+ KSM_SWPIN_COPY,
+#endif
+#endif
+#ifdef CONFIG_KSM
+ COW_KSM,
+#endif
+#ifdef CONFIG_ZSWAP
+ ZSWPIN,
+ ZSWPOUT,
+#endif
+#ifdef CONFIG_X86
+ DIRECT_MAP_LEVEL2_SPLIT,
+ DIRECT_MAP_LEVEL3_SPLIT,
#endif
NR_VM_EVENT_ITEMS
};
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define THP_FILE_ALLOC ({ BUILD_BUG(); 0; })
+#define THP_FILE_FALLBACK ({ BUILD_BUG(); 0; })
+#define THP_FILE_FALLBACK_CHARGE ({ BUILD_BUG(); 0; })
#define THP_FILE_MAPPED ({ BUILD_BUG(); 0; })
#endif
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
deleted file mode 100644
index 6fce268a4588..000000000000
--- a/include/linux/vmacache.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_VMACACHE_H
-#define __LINUX_VMACACHE_H
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-
-static inline void vmacache_flush(struct task_struct *tsk)
-{
- memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
-}
-
-extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
-extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
- unsigned long addr);
-
-#ifndef CONFIG_MMU
-extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
-#endif
-
-static inline void vmacache_invalidate(struct mm_struct *mm)
-{
- mm->vmacache_seqnum++;
-}
-
-#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 0507a162ccd0..096d48aa3437 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -22,31 +22,24 @@ struct notifier_block; /* in notifier.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
-#define VM_NO_GUARD 0x00000040 /* don't add guard page */
+#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
+#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
+#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
-/*
- * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
- *
- * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
- * shadow memory has been mapped. It's used to handle allocation errors so that
- * we don't try to poision shadow on free if it was never allocated.
- *
- * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
- * determine which allocations need the module shadow freed.
- */
-
-/*
- * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
- * vfree_atomic().
- */
-#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */
+#else
+#define VM_DEFER_KMEMLEAK 0
+#endif
/* bits [20..32] reserved for arch specific ioremap internals */
/*
* Maximum alignment for ioremap() regions.
- * Can be overriden by arch-specific value.
+ * Can be overridden by arch-specific value.
*/
#ifndef IOREMAP_MAX_ORDER
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
@@ -58,6 +51,9 @@ struct vm_struct {
unsigned long size;
unsigned long flags;
struct page **pages;
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
+ unsigned int page_order;
+#endif
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
@@ -71,25 +67,66 @@ struct vmap_area {
struct list_head list; /* address sorted list */
/*
- * The following three variables can be packed, because
- * a vmap_area object is always one of the three states:
- * 1) in "free" tree (root is vmap_area_root)
- * 2) in "busy" tree (root is free_vmap_area_root)
- * 3) in purge list (head is vmap_purge_list)
+ * The following two variables can be packed, because
+ * a vmap_area object can be either:
+ * 1) in "free" tree (root is free_vmap_area_root)
+ * 2) or "busy" tree (root is vmap_area_root)
*/
union {
unsigned long subtree_max_size; /* in "free" tree */
struct vm_struct *vm; /* in "busy" tree */
- struct llist_node purge_list; /* in purge list */
};
};
+/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
+#ifndef arch_vmap_p4d_supported
+static inline bool arch_vmap_p4d_supported(pgprot_t prot)
+{
+ return false;
+}
+#endif
+
+#ifndef arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ return false;
+}
+#endif
+
+#ifndef arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return false;
+}
+#endif
+
+#ifndef arch_vmap_pte_range_map_size
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ return PAGE_SIZE;
+}
+#endif
+
+#ifndef arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ return PAGE_SHIFT;
+}
+#endif
+
+#ifndef arch_vmap_pgprot_tagged
+static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
+{
+ return prot;
+}
+#endif
+
/*
* Highlevel APIs for driver use
*/
extern void vm_unmap_ram(const void *mem, unsigned int count);
-extern void *vm_map_ram(struct page **pages, unsigned int count,
- int node, pgprot_t prot);
+extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU
@@ -102,47 +139,57 @@ static inline void vmalloc_init(void)
static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif
-extern void *vmalloc(unsigned long size);
-extern void *vzalloc(unsigned long size);
-extern void *vmalloc_user(unsigned long size);
-extern void *vmalloc_node(unsigned long size, int node);
-extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags);
-extern void *vmalloc_exec(unsigned long size);
-extern void *vmalloc_32(unsigned long size);
-extern void *vmalloc_32_user(unsigned long size);
-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
+extern void *vmalloc(unsigned long size) __alloc_size(1);
+extern void *vzalloc(unsigned long size) __alloc_size(1);
+extern void *vmalloc_user(unsigned long size) __alloc_size(1);
+extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
+extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
+extern void *vmalloc_32(unsigned long size) __alloc_size(1);
+extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller);
-#ifndef CONFIG_MMU
-extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
-static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
- gfp_t flags, void *caller)
-{
- return __vmalloc_node_flags(size, node, flags);
-}
-#else
-extern void *__vmalloc_node_flags_caller(unsigned long size,
- int node, gfp_t flags, void *caller);
-#endif
+ const void *caller) __alloc_size(1);
+void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
+ int node, const void *caller) __alloc_size(1);
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+
+extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
+extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
+extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
+extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
+void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
unsigned long uaddr, void *kaddr,
- unsigned long size);
+ unsigned long pgoff, unsigned long size);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
-void vmalloc_sync_mappings(void);
-void vmalloc_sync_unmappings(void);
+
+/*
+ * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
+ * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
+ * needs to be called.
+ */
+#ifndef ARCH_PAGE_TABLE_SYNC_MASK
+#define ARCH_PAGE_TABLE_SYNC_MASK 0
+#endif
+
+/*
+ * There is no default implementation for arch_sync_kernel_mappings(). It is
+ * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
+ * is 0.
+ */
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
/*
* Lowlevel-APIs (not for driver use!)
@@ -161,22 +208,33 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *get_vm_area_caller(unsigned long size,
unsigned long flags, const void *caller);
-extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
- unsigned long start, unsigned long end);
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
unsigned long flags,
unsigned long start, unsigned long end,
const void *caller);
+void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
+struct vmap_area *find_vmap_area(unsigned long addr);
+
+static inline bool is_vm_area_hugepages(const void *addr)
+{
+ /*
+ * This may not 100% tell if the area is mapped with > PAGE_SIZE
+ * page table entries, if for some reason the architecture indicates
+ * larger sizes are available but decides not to use them, nothing
+ * prevents that. This only indicates the size of the physical page
+ * allocated in the vmalloc layer.
+ */
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
+ return find_vm_area(addr)->page_order > 0;
+#else
+ return false;
+#endif
+}
-extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
- struct page **pages);
#ifdef CONFIG_MMU
-extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
- pgprot_t prot, struct page **pages);
-extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
-extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+void vunmap_range(unsigned long addr, unsigned long end);
static inline void set_vm_flush_reset_perms(void *addr)
{
struct vm_struct *vm = find_vm_area(addr);
@@ -184,36 +242,18 @@ static inline void set_vm_flush_reset_perms(void *addr)
if (vm)
vm->flags |= VM_FLUSH_RESET_PERMS;
}
+
#else
-static inline int
-map_kernel_range_noflush(unsigned long start, unsigned long size,
- pgprot_t prot, struct page **pages)
-{
- return size >> PAGE_SHIFT;
-}
-static inline void
-unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
-{
-}
-static inline void
-unmap_kernel_range(unsigned long addr, unsigned long size)
-{
-}
static inline void set_vm_flush_reset_perms(void *addr)
{
}
#endif
-/* Allocate/destroy a 'vmalloc' VM area. */
-extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
-extern void free_vm_area(struct vm_struct *area);
-
-/* for /dev/kmem */
+/* for /proc/kcore */
extern long vread(char *buf, char *addr, unsigned long count);
-extern long vwrite(char *buf, char *addr, unsigned long count);
/*
- * Internals. Dont't use..
+ * Internals. Don't use..
*/
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
@@ -251,4 +291,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
int register_vmap_purge_notifier(struct notifier_block *nb);
int unregister_vmap_purge_notifier(struct notifier_block *nb);
+#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
+bool vmalloc_dump_obj(void *object);
+#else
+static inline bool vmalloc_dump_obj(void *object) { return false; }
+#endif
+
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 6d28bc433c1c..6a2f51ebbfd3 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -37,7 +37,7 @@ extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
extern void vmpressure_init(struct vmpressure *vmpr);
extern void vmpressure_cleanup(struct vmpressure *vmpr);
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
-extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
+extern struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr);
extern int vmpressure_register_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd,
const char *args);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 292485f3d24d..19cf5b6892ce 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -8,6 +8,7 @@
#include <linux/vm_event_item.h>
#include <linux/atomic.h>
#include <linux/static_key.h>
+#include <linux/mmdebug.h>
extern int sysctl_stat_interval;
@@ -16,8 +17,8 @@ extern int sysctl_stat_interval;
#define DISABLE_NUMA_STAT 0
extern int sysctl_vm_numa_stat;
DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
-extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
- int write, void __user *buffer, size_t *length, loff_t *ppos);
+int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *length, loff_t *ppos);
#endif
struct reclaim_stat {
@@ -26,9 +27,11 @@ struct reclaim_stat {
unsigned nr_congested;
unsigned nr_writeback;
unsigned nr_immediate;
- unsigned nr_activate[2];
+ unsigned nr_pageout;
+ unsigned nr_activate[ANON_AND_FILE];
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
+ unsigned nr_lazyfree_fail;
};
enum writeback_stat_item {
@@ -122,12 +125,6 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
#endif
-#ifdef CONFIG_DEBUG_VM_VMACACHE
-#define count_vm_vmacache_event(x) count_vm_event(x)
-#else
-#define count_vm_vmacache_event(x) do {} while (0)
-#endif
-
#define __count_zid_vm_events(item, zid, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
@@ -135,34 +132,27 @@ static inline void vm_events_fold_cpu(int cpu)
* Zone and node-based page accounting with per cpu differentials.
*/
extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
-extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
+extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
#ifdef CONFIG_NUMA
-static inline void zone_numa_state_add(long x, struct zone *zone,
- enum numa_stat_item item)
+static inline void zone_numa_event_add(long x, struct zone *zone,
+ enum numa_stat_item item)
{
- atomic_long_add(x, &zone->vm_numa_stat[item]);
- atomic_long_add(x, &vm_numa_stat[item]);
+ atomic_long_add(x, &zone->vm_numa_event[item]);
+ atomic_long_add(x, &vm_numa_event[item]);
}
-static inline unsigned long global_numa_state(enum numa_stat_item item)
+static inline unsigned long zone_numa_event_state(struct zone *zone,
+ enum numa_stat_item item)
{
- long x = atomic_long_read(&vm_numa_stat[item]);
-
- return x;
+ return atomic_long_read(&zone->vm_numa_event[item]);
}
-static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
- enum numa_stat_item item)
+static inline unsigned long
+global_numa_event_state(enum numa_stat_item item)
{
- long x = atomic_long_read(&zone->vm_numa_stat[item]);
- int cpu;
-
- for_each_online_cpu(cpu)
- x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
-
- return x;
+ return atomic_long_read(&vm_numa_event[item]);
}
#endif /* CONFIG_NUMA */
@@ -190,7 +180,8 @@ static inline unsigned long global_zone_page_state(enum zone_stat_item item)
return x;
}
-static inline unsigned long global_node_page_state(enum node_stat_item item)
+static inline
+unsigned long global_node_page_state_pages(enum node_stat_item item)
{
long x = atomic_long_read(&vm_node_stat[item]);
#ifdef CONFIG_SMP
@@ -200,6 +191,13 @@ static inline unsigned long global_node_page_state(enum node_stat_item item)
return x;
}
+static inline unsigned long global_node_page_state(enum node_stat_item item)
+{
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+
+ return global_node_page_state_pages(item);
+}
+
static inline unsigned long zone_page_state(struct zone *zone,
enum zone_stat_item item)
{
@@ -225,7 +223,7 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
#ifdef CONFIG_SMP
int cpu;
for_each_online_cpu(cpu)
- x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+ x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
if (x < 0)
x = 0;
@@ -234,15 +232,38 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
}
#ifdef CONFIG_NUMA
-extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
+/* See __count_vm_event comment on why raw_cpu_inc is used. */
+static inline void
+__count_numa_event(struct zone *zone, enum numa_stat_item item)
+{
+ struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
+
+ raw_cpu_inc(pzstats->vm_numa_event[item]);
+}
+
+static inline void
+__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
+{
+ struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
+
+ raw_cpu_add(pzstats->vm_numa_event[item], delta);
+}
+
extern unsigned long sum_zone_node_page_state(int node,
enum zone_stat_item item);
-extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
+extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
extern unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item);
+extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
+ enum node_stat_item item);
+extern void fold_vm_numa_events(void);
#else
#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
#define node_page_state(node, item) global_node_page_state(item)
+#define node_page_state_pages(node, item) global_node_page_state_pages(item)
+static inline void fold_vm_numa_events(void)
+{
+}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_SMP
@@ -274,10 +295,10 @@ void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);
struct ctl_table;
-int vmstat_refresh(struct ctl_table *, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
+ loff_t *ppos);
-void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
+void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
@@ -298,6 +319,17 @@ static inline void __mod_zone_page_state(struct zone *zone,
static inline void __mod_node_page_state(struct pglist_data *pgdat,
enum node_stat_item item, int delta)
{
+ if (vmstat_item_in_bytes(item)) {
+ /*
+ * Only cgroups use subpage accounting right now; at
+ * the global level, these items still change in
+ * multiples of whole pages. Store them as pages
+ * internally to keep the per-cpu counters compact.
+ */
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
+ delta >>= PAGE_SHIFT;
+ }
+
node_page_state_add(delta, pgdat, item);
}
@@ -374,9 +406,81 @@ static inline void cpu_vm_stats_fold(int cpu) { }
static inline void quiet_vmstat(void) { }
static inline void drain_zonestat(struct zone *zone,
- struct per_cpu_pageset *pset) { }
+ struct per_cpu_zonestat *pzstats) { }
#endif /* CONFIG_SMP */
+static inline void __zone_stat_mod_folio(struct folio *folio,
+ enum zone_stat_item item, long nr)
+{
+ __mod_zone_page_state(folio_zone(folio), item, nr);
+}
+
+static inline void __zone_stat_add_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
+}
+
+static inline void __zone_stat_sub_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
+}
+
+static inline void zone_stat_mod_folio(struct folio *folio,
+ enum zone_stat_item item, long nr)
+{
+ mod_zone_page_state(folio_zone(folio), item, nr);
+}
+
+static inline void zone_stat_add_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
+}
+
+static inline void zone_stat_sub_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
+}
+
+static inline void __node_stat_mod_folio(struct folio *folio,
+ enum node_stat_item item, long nr)
+{
+ __mod_node_page_state(folio_pgdat(folio), item, nr);
+}
+
+static inline void __node_stat_add_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
+}
+
+static inline void __node_stat_sub_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
+}
+
+static inline void node_stat_mod_folio(struct folio *folio,
+ enum node_stat_item item, long nr)
+{
+ mod_node_page_state(folio_pgdat(folio), item, nr);
+}
+
+static inline void node_stat_add_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
+}
+
+static inline void node_stat_sub_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
+}
+
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
int migratetype)
{
@@ -403,7 +507,7 @@ static inline const char *numa_stat_name(enum numa_stat_item item)
static inline const char *node_stat_name(enum node_stat_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NUMA_EVENT_ITEMS +
item];
}
@@ -415,7 +519,7 @@ static inline const char *lru_list_name(enum lru_list lru)
static inline const char *writeback_stat_name(enum writeback_stat_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NUMA_EVENT_ITEMS +
NR_VM_NODE_STAT_ITEMS +
item];
}
@@ -424,11 +528,126 @@ static inline const char *writeback_stat_name(enum writeback_stat_item item)
static inline const char *vm_event_name(enum vm_event_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NUMA_EVENT_ITEMS +
NR_VM_NODE_STAT_ITEMS +
NR_VM_WRITEBACK_STAT_ITEMS +
item];
}
#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
+#ifdef CONFIG_MEMCG
+
+void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ int val);
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __mod_lruvec_state(lruvec, idx, val);
+ local_irq_restore(flags);
+}
+
+void __mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val);
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __mod_lruvec_page_state(page, idx, val);
+ local_irq_restore(flags);
+}
+
+#else
+
+static inline void __mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void __mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ __mod_node_page_state(page_pgdat(page), idx, val);
+}
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(page_pgdat(page), idx, val);
+}
+
+#endif /* CONFIG_MEMCG */
+
+static inline void __inc_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_page_state(page, idx, 1);
+}
+
+static inline void __dec_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_page_state(page, idx, -1);
+}
+
+static inline void __lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
+{
+ __mod_lruvec_page_state(&folio->page, idx, val);
+}
+
+static inline void __lruvec_stat_add_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
+}
+
+static inline void __lruvec_stat_sub_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
+}
+
+static inline void inc_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ mod_lruvec_page_state(page, idx, 1);
+}
+
+static inline void dec_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ mod_lruvec_page_state(page, idx, -1);
+}
+
+static inline void lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
+{
+ mod_lruvec_page_state(&folio->page, idx, val);
+}
+
+static inline void lruvec_stat_add_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
+}
+
+static inline void lruvec_stat_sub_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
+}
#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index fefb5292403b..6fb663b36f72 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -12,15 +12,20 @@
#include <linux/bits.h>
/* Register offsets. */
-#define VMCI_STATUS_ADDR 0x00
-#define VMCI_CONTROL_ADDR 0x04
-#define VMCI_ICR_ADDR 0x08
-#define VMCI_IMR_ADDR 0x0c
-#define VMCI_DATA_OUT_ADDR 0x10
-#define VMCI_DATA_IN_ADDR 0x14
-#define VMCI_CAPS_ADDR 0x18
-#define VMCI_RESULT_LOW_ADDR 0x1c
-#define VMCI_RESULT_HIGH_ADDR 0x20
+#define VMCI_STATUS_ADDR 0x00
+#define VMCI_CONTROL_ADDR 0x04
+#define VMCI_ICR_ADDR 0x08
+#define VMCI_IMR_ADDR 0x0c
+#define VMCI_DATA_OUT_ADDR 0x10
+#define VMCI_DATA_IN_ADDR 0x14
+#define VMCI_CAPS_ADDR 0x18
+#define VMCI_RESULT_LOW_ADDR 0x1c
+#define VMCI_RESULT_HIGH_ADDR 0x20
+#define VMCI_DATA_OUT_LOW_ADDR 0x24
+#define VMCI_DATA_OUT_HIGH_ADDR 0x28
+#define VMCI_DATA_IN_LOW_ADDR 0x2c
+#define VMCI_DATA_IN_HIGH_ADDR 0x30
+#define VMCI_GUEST_PAGE_SHIFT 0x34
/* Max number of devices. */
#define VMCI_MAX_DEVICES 1
@@ -39,17 +44,27 @@
#define VMCI_CAPS_DATAGRAM BIT(2)
#define VMCI_CAPS_NOTIFICATIONS BIT(3)
#define VMCI_CAPS_PPN64 BIT(4)
+#define VMCI_CAPS_DMA_DATAGRAM BIT(5)
/* Interrupt Cause register bits. */
#define VMCI_ICR_DATAGRAM BIT(0)
#define VMCI_ICR_NOTIFICATION BIT(1)
+#define VMCI_ICR_DMA_DATAGRAM BIT(2)
/* Interrupt Mask register bits. */
#define VMCI_IMR_DATAGRAM BIT(0)
#define VMCI_IMR_NOTIFICATION BIT(1)
+#define VMCI_IMR_DMA_DATAGRAM BIT(2)
-/* Maximum MSI/MSI-X interrupt vectors in the device. */
-#define VMCI_MAX_INTRS 2
+/*
+ * Maximum MSI/MSI-X interrupt vectors in the device.
+ * If VMCI_CAPS_DMA_DATAGRAM is supported by the device,
+ * VMCI_MAX_INTRS_DMA_DATAGRAM vectors are available,
+ * otherwise only VMCI_MAX_INTRS_NOTIFICATION.
+ */
+#define VMCI_MAX_INTRS_NOTIFICATION 2
+#define VMCI_MAX_INTRS_DMA_DATAGRAM 3
+#define VMCI_MAX_INTRS VMCI_MAX_INTRS_DMA_DATAGRAM
/*
* Supported interrupt vectors. There is one for each ICR value above,
@@ -58,6 +73,7 @@
enum {
VMCI_INTR_DATAGRAM = 0,
VMCI_INTR_NOTIFICATION = 1,
+ VMCI_INTR_DMA_DATAGRAM = 2,
};
/*
@@ -66,7 +82,7 @@ enum {
* consists of at least two pages, the memory limit also dictates the
* number of queue pairs a guest can create.
*/
-#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
+#define VMCI_MAX_GUEST_QP_MEMORY ((size_t)(128 * 1024 * 1024))
#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
/*
@@ -80,7 +96,53 @@ enum {
* too much kernel memory (especially on vmkernel). We limit a queuepair to
* 32 KB, or 16 KB per queue for symmetrical pairs.
*/
-#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
+#define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024))
+
+/*
+ * The version of the VMCI device that supports MMIO access to registers
+ * requests 256KB for BAR1 whereas the version of VMCI that supports
+ * MSI/MSI-X only requests 8KB. The layout of the larger 256KB region is:
+ * - the first 128KB are used for MSI/MSI-X.
+ * - the following 64KB are used for MMIO register access.
+ * - the remaining 64KB are unused.
+ */
+#define VMCI_WITH_MMIO_ACCESS_BAR_SIZE ((size_t)(256 * 1024))
+#define VMCI_MMIO_ACCESS_OFFSET ((size_t)(128 * 1024))
+#define VMCI_MMIO_ACCESS_SIZE ((size_t)(64 * 1024))
+
+/*
+ * For VMCI devices supporting the VMCI_CAPS_DMA_DATAGRAM capability, the
+ * sending and receiving of datagrams can be performed using DMA to/from
+ * a driver allocated buffer.
+ * Sending and receiving will be handled as follows:
+ * - when sending datagrams, the driver initializes the buffer where the
+ * data part will refer to the outgoing VMCI datagram, sets the busy flag
+ * to 1 and writes the address of the buffer to VMCI_DATA_OUT_HIGH_ADDR
+ * and VMCI_DATA_OUT_LOW_ADDR. Writing to VMCI_DATA_OUT_LOW_ADDR triggers
+ * the device processing of the buffer. When the device has processed the
+ * buffer, it will write the result value to the buffer and then clear the
+ * busy flag.
+ * - when receiving datagrams, the driver initializes the buffer where the
+ * data part will describe the receive buffer, clears the busy flag and
+ * writes the address of the buffer to VMCI_DATA_IN_HIGH_ADDR and
+ * VMCI_DATA_IN_LOW_ADDR. Writing to VMCI_DATA_IN_LOW_ADDR triggers the
+ * device processing of the buffer. The device will copy as many available
+ * datagrams into the buffer as possible, and then sets the busy flag.
+ * When the busy flag is set, the driver will process the datagrams in the
+ * buffer.
+ */
+struct vmci_data_in_out_header {
+ uint32_t busy;
+ uint32_t opcode;
+ uint32_t size;
+ uint32_t rsvd;
+ uint64_t result;
+};
+
+struct vmci_sg_elem {
+ uint64_t addr;
+ uint64_t size;
+};
/*
* We have a fixed set of resource IDs available in the VMX.
@@ -159,7 +221,7 @@ static inline bool vmci_handle_is_invalid(struct vmci_handle h)
*/
#define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
#define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
-static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
+static const struct vmci_handle __maybe_unused VMCI_ANON_SRC_HANDLE = {
.context = VMCI_ANON_SRC_CONTEXT_ID,
.resource = VMCI_ANON_SRC_RESOURCE_ID
};
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index d237087eb257..212892cf9822 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -14,6 +14,11 @@
#include <linux/virtio_byteorder.h>
#include <linux/uio.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
+#include <linux/dma-direction.h>
+#include <linux/vhost_iotlb.h>
+#endif
#include <asm/barrier.h>
/* virtio_ring with information needed for host access. */
@@ -39,6 +44,12 @@ struct vringh {
/* The vring (note: it may contain user pointers!) */
struct vring vring;
+ /* IOTLB for this vring */
+ struct vhost_iotlb *iotlb;
+
+ /* spinlock to synchronize IOTLB accesses */
+ spinlock_t *iotlb_lock;
+
/* The function to call to notify the guest about added buffers */
void (*notify)(struct vringh *);
};
@@ -98,9 +109,9 @@ struct vringh_kiov {
/* Helpers for userspace vrings. */
int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
- struct vring_desc __user *desc,
- struct vring_avail __user *avail,
- struct vring_used __user *used);
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used);
static inline void vringh_iov_init(struct vringh_iov *iov,
struct iovec *iovec, unsigned num)
@@ -189,6 +200,19 @@ static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
kiov->iov = NULL;
}
+static inline size_t vringh_kiov_length(struct vringh_kiov *kiov)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = kiov->i; i < kiov->used; i++)
+ len += kiov->iov[i].iov_len;
+
+ return len;
+}
+
+void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len);
+
int vringh_getdesc_kern(struct vringh *vrh,
struct vringh_kiov *riov,
struct vringh_kiov *wiov,
@@ -248,4 +272,40 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
{
return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
}
+
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
+
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
+ spinlock_t *iotlb_lock);
+
+int vringh_init_iotlb(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used);
+
+int vringh_getdesc_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ u16 *head,
+ gfp_t gfp);
+
+ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ void *dst, size_t len);
+ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
+ struct vringh_kiov *wiov,
+ const void *src, size_t len);
+
+void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num);
+
+int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len);
+
+bool vringh_notify_enable_iotlb(struct vringh *vrh);
+void vringh_notify_disable_iotlb(struct vringh *vrh);
+
+int vringh_need_notify_iotlb(struct vringh *vrh);
+
+#endif /* CONFIG_VHOST_IOTLB */
+
#endif /* _LINUX_VRINGH_H */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 8dc77e40bc03..c1f5aebef170 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -16,20 +16,9 @@
#include <linux/consolemap.h>
#include <linux/notifier.h>
-/*
- * Presently, a lot of graphics programs do not restore the contents of
- * the higher font pages. Defining this flag will avoid use of them, but
- * will lose support for PIO_FONTRESET. Note that many font operations are
- * not likely to work with these programs anyway; they need to be
- * fixed. The linux/Documentation directory includes a code snippet
- * to save and restore the text font.
- */
-#ifdef CONFIG_VGA_CONSOLE
-#define BROKEN_GRAPHICS_PROGRAMS 1
-#endif
+void kd_mksound(unsigned int hz, unsigned int ticks);
+int kbd_rate(struct kbd_repeat *rep);
-extern void kd_mksound(unsigned int hz, unsigned int ticks);
-extern int kbd_rate(struct kbd_repeat *rep);
extern int fg_console, last_console, want_console;
/* console.c */
@@ -41,7 +30,6 @@ struct vc_data *vc_deallocate(unsigned int console);
void reset_palette(struct vc_data *vc);
void do_blank_screen(int entering_gfx);
void do_unblank_screen(int leaving_gfx);
-void unblank_screen(void);
void poke_blanked_console(void);
int con_font_op(struct vc_data *vc, struct console_font_op *op);
int con_set_cmap(unsigned char __user *cmap);
@@ -73,8 +61,6 @@ int con_set_default_unimap(struct vc_data *vc);
void con_free_unimap(struct vc_data *vc);
int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
-#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \
- ((vc)->vc_toggle_meta ? 0x80 : 0)])
#else
static inline int con_set_trans_old(unsigned char __user *table)
{
@@ -123,7 +109,6 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
return 0;
}
-#define vc_translate(vc, c) (c)
#endif
/* vt.c */
@@ -131,11 +116,11 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
int vt_waitactive(int n);
void change_console(struct vc_data *new_vc);
void reset_vc(struct vc_data *vc);
-extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
- int deflt);
+int do_unbind_con_driver(const struct consw *csw, int first, int last,
+ int deflt);
int vty_init(const struct file_operations *console_fops);
-extern char vt_dont_switch;
+extern bool vt_dont_switch;
extern int default_utf8;
extern int global_cursor_default;
@@ -146,7 +131,7 @@ struct vt_spawn_console {
};
extern struct vt_spawn_console vt_spawn_con;
-extern int vt_move_to_console(unsigned int vt, int alloc);
+int vt_move_to_console(unsigned int vt, int alloc);
/* Interfaces for VC notification of character events (for accessibility etc) */
@@ -155,35 +140,33 @@ struct vt_notifier_param {
unsigned int c; /* Printed char */
};
-extern int register_vt_notifier(struct notifier_block *nb);
-extern int unregister_vt_notifier(struct notifier_block *nb);
+int register_vt_notifier(struct notifier_block *nb);
+int unregister_vt_notifier(struct notifier_block *nb);
-extern void hide_boot_cursor(bool hide);
+void hide_boot_cursor(bool hide);
/* keyboard provided interfaces */
-extern int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm);
-extern int vt_do_kdskbmode(int console, unsigned int arg);
-extern int vt_do_kdskbmeta(int console, unsigned int arg);
-extern int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc,
- int perm);
-extern int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe,
- int perm, int console);
-extern int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb,
- int perm);
-extern int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm);
-extern int vt_do_kdgkbmode(int console);
-extern int vt_do_kdgkbmeta(int console);
-extern void vt_reset_unicode(int console);
-extern int vt_get_shift_state(void);
-extern void vt_reset_keyboard(int console);
-extern int vt_get_leds(int console, int flag);
-extern int vt_get_kbd_mode_bit(int console, int bit);
-extern void vt_set_kbd_mode_bit(int console, int bit);
-extern void vt_clr_kbd_mode_bit(int console, int bit);
-extern void vt_set_led_state(int console, int leds);
-extern void vt_set_led_state(int console, int leds);
-extern void vt_kbd_con_start(int console);
-extern void vt_kbd_con_stop(int console);
+int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm);
+int vt_do_kdskbmode(unsigned int console, unsigned int arg);
+int vt_do_kdskbmeta(unsigned int console, unsigned int arg);
+int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc,
+ int perm);
+int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
+ unsigned int console);
+int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm);
+int vt_do_kdskled(unsigned int console, int cmd, unsigned long arg, int perm);
+int vt_do_kdgkbmode(unsigned int console);
+int vt_do_kdgkbmeta(unsigned int console);
+void vt_reset_unicode(unsigned int console);
+int vt_get_shift_state(void);
+void vt_reset_keyboard(unsigned int console);
+int vt_get_leds(unsigned int console, int flag);
+int vt_get_kbd_mode_bit(unsigned int console, int bit);
+void vt_set_kbd_mode_bit(unsigned int console, int bit);
+void vt_clr_kbd_mode_bit(unsigned int console, int bit);
+void vt_set_led_state(unsigned int console, int leds);
+void vt_kbd_con_start(unsigned int console);
+void vt_kbd_con_stop(unsigned int console);
void vc_scrolldelta_helper(struct vc_data *c, int lines,
unsigned int rolled_over, void *_base, unsigned int size);
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index 2cdeca062db3..3684487d01e1 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -3,12 +3,46 @@
#define _LINUX_KERNEL_VTIME_H
#include <linux/context_tracking_state.h>
+#include <linux/sched.h>
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm/vtime.h>
#endif
+/*
+ * Common vtime APIs
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void vtime_account_kernel(struct task_struct *tsk);
+extern void vtime_account_idle(struct task_struct *tsk);
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern void arch_vtime_task_switch(struct task_struct *tsk);
+extern void vtime_user_enter(struct task_struct *tsk);
+extern void vtime_user_exit(struct task_struct *tsk);
+extern void vtime_guest_enter(struct task_struct *tsk);
+extern void vtime_guest_exit(struct task_struct *tsk);
+extern void vtime_init_idle(struct task_struct *tsk, int cpu);
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+static inline void vtime_user_enter(struct task_struct *tsk) { }
+static inline void vtime_user_exit(struct task_struct *tsk) { }
+static inline void vtime_guest_enter(struct task_struct *tsk) { }
+static inline void vtime_guest_exit(struct task_struct *tsk) { }
+static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
+#endif
-struct task_struct;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
+extern void vtime_account_softirq(struct task_struct *tsk);
+extern void vtime_account_hardirq(struct task_struct *tsk);
+extern void vtime_flush(struct task_struct *tsk);
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
+static inline void vtime_account_softirq(struct task_struct *tsk) { }
+static inline void vtime_account_hardirq(struct task_struct *tsk) { }
+static inline void vtime_flush(struct task_struct *tsk) { }
+#endif
/*
* vtime_accounting_enabled_this_cpu() definitions/declarations
@@ -18,6 +52,18 @@ struct task_struct;
static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
extern void vtime_task_switch(struct task_struct *prev);
+static __always_inline void vtime_account_guest_enter(void)
+{
+ vtime_account_kernel(current);
+ current->flags |= PF_VCPU;
+}
+
+static __always_inline void vtime_account_guest_exit(void)
+{
+ vtime_account_kernel(current);
+ current->flags &= ~PF_VCPU;
+}
+
#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
/*
@@ -49,70 +95,68 @@ static inline void vtime_task_switch(struct task_struct *prev)
vtime_task_switch_generic(prev);
}
+static __always_inline void vtime_account_guest_enter(void)
+{
+ if (vtime_accounting_enabled_this_cpu())
+ vtime_guest_enter(current);
+ else
+ current->flags |= PF_VCPU;
+}
+
+static __always_inline void vtime_account_guest_exit(void)
+{
+ if (vtime_accounting_enabled_this_cpu())
+ vtime_guest_exit(current);
+ else
+ current->flags &= ~PF_VCPU;
+}
+
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
-static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; }
static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
static inline void vtime_task_switch(struct task_struct *prev) { }
-#endif
-
-/*
- * Common vtime APIs
- */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void vtime_account_kernel(struct task_struct *tsk);
-extern void vtime_account_idle(struct task_struct *tsk);
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
-static inline void vtime_account_kernel(struct task_struct *tsk) { }
-#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void arch_vtime_task_switch(struct task_struct *tsk);
-extern void vtime_user_enter(struct task_struct *tsk);
-extern void vtime_user_exit(struct task_struct *tsk);
-extern void vtime_guest_enter(struct task_struct *tsk);
-extern void vtime_guest_exit(struct task_struct *tsk);
-extern void vtime_init_idle(struct task_struct *tsk, int cpu);
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-static inline void vtime_user_enter(struct task_struct *tsk) { }
-static inline void vtime_user_exit(struct task_struct *tsk) { }
-static inline void vtime_guest_enter(struct task_struct *tsk) { }
-static inline void vtime_guest_exit(struct task_struct *tsk) { }
-static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
-#endif
+static __always_inline void vtime_account_guest_enter(void)
+{
+ current->flags |= PF_VCPU;
+}
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-extern void vtime_account_irq_enter(struct task_struct *tsk);
-static inline void vtime_account_irq_exit(struct task_struct *tsk)
+static __always_inline void vtime_account_guest_exit(void)
{
- /* On hard|softirq exit we always account to hard|softirq cputime */
- vtime_account_kernel(tsk);
+ current->flags &= ~PF_VCPU;
}
-extern void vtime_flush(struct task_struct *tsk);
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
-static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
-static inline void vtime_flush(struct task_struct *tsk) { }
+
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-extern void irqtime_account_irq(struct task_struct *tsk);
+extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);
#else
-static inline void irqtime_account_irq(struct task_struct *tsk) { }
+static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
#endif
-static inline void account_irq_enter_time(struct task_struct *tsk)
+static inline void account_softirq_enter(struct task_struct *tsk)
+{
+ vtime_account_irq(tsk, SOFTIRQ_OFFSET);
+ irqtime_account_irq(tsk, SOFTIRQ_OFFSET);
+}
+
+static inline void account_softirq_exit(struct task_struct *tsk)
+{
+ vtime_account_softirq(tsk);
+ irqtime_account_irq(tsk, 0);
+}
+
+static inline void account_hardirq_enter(struct task_struct *tsk)
{
- vtime_account_irq_enter(tsk);
- irqtime_account_irq(tsk);
+ vtime_account_irq(tsk, HARDIRQ_OFFSET);
+ irqtime_account_irq(tsk, HARDIRQ_OFFSET);
}
-static inline void account_irq_exit_time(struct task_struct *tsk)
+static inline void account_hardirq_exit(struct task_struct *tsk)
{
- vtime_account_irq_exit(tsk);
- irqtime_account_irq(tsk);
+ vtime_account_hardirq(tsk);
+ irqtime_account_irq(tsk, 0);
}
#endif /* _LINUX_KERNEL_VTIME_H */
diff --git a/include/linux/w1.h b/include/linux/w1.h
index cebf3464bc03..9a2a0ef39018 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -269,7 +269,7 @@ struct w1_family {
struct list_head family_entry;
u8 fid;
- struct w1_family_ops *fops;
+ const struct w1_family_ops *fops;
const struct of_device_id *of_match_table;
@@ -280,7 +280,7 @@ int w1_register_family(struct w1_family *family);
void w1_unregister_family(struct w1_family *family);
/**
- * module_w1_driver() - Helper macro for registering a 1-Wire families
+ * module_w1_family() - Helper macro for registering a 1-Wire families
* @__w1_family: w1_family struct
*
* Helper macro for 1-Wire families which do not do anything special in module
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3283c8d02137..7f5a51aae0a7 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -20,6 +20,9 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
#define WQ_FLAG_BOOKMARK 0x04
+#define WQ_FLAG_CUSTOM 0x08
+#define WQ_FLAG_DONE 0x10
+#define WQ_FLAG_PRIORITY 0x20
/*
* A single wait-queue entry structure:
@@ -53,7 +56,7 @@ struct task_struct;
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .head = { &(name).head, &(name).head } }
+ .head = LIST_HEAD_INIT(name.head) }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
@@ -162,11 +165,20 @@ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
- list_add(&wq_entry->entry, &wq_head->head);
+ struct list_head *head = &wq_head->head;
+ struct wait_queue_entry *wq;
+
+ list_for_each_entry(wq, &wq_head->head, entry) {
+ if (!(wq->flags & WQ_FLAG_PRIORITY))
+ break;
+ head = &wq->entry;
+ }
+ list_add(&wq_entry->entry, head);
}
/*
@@ -205,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
+void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -233,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
#define wake_up_interruptible_sync_poll_locked(x, m) \
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
+/**
+ * wake_up_pollfree - signal that a polled waitqueue is going away
+ * @wq_head: the wait queue head
+ *
+ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
+ * lifetime is tied to a task rather than to the 'struct file' being polled,
+ * this function must be called before the waitqueue is freed so that
+ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
+ *
+ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
+ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
+ */
+static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+ /*
+ * For performance reasons, we don't always take the queue lock here.
+ * Therefore, we might race with someone removing the last entry from
+ * the queue, and proceed while they still hold the queue lock.
+ * However, rcu_read_lock() is required to be held in such cases, so we
+ * can safely proceed with an RCU-delayed free.
+ */
+ if (waitqueue_active(wq_head))
+ __wake_up_pollfree(wq_head);
+}
+
#define ___wait_cond_timeout(condition) \
({ \
bool __cond = (condition); \
@@ -243,7 +281,7 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
#define ___wait_is_interruptible(state) \
(!__builtin_constant_p(state) || \
- state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
+ (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
@@ -323,8 +361,8 @@ do { \
} while (0)
#define __wait_event_freezable(wq_head, condition) \
- ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
- freezable_schedule())
+ ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \
+ 0, 0, schedule())
/**
* wait_event_freezable - sleep (or freeze) until a condition gets true
@@ -382,8 +420,8 @@ do { \
#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
- TASK_INTERRUPTIBLE, 0, timeout, \
- __ret = freezable_schedule_timeout(__ret))
+ (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \
+ __ret = schedule_timeout(__ret))
/*
* like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
@@ -506,10 +544,11 @@ do { \
\
hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
HRTIMER_MODE_REL); \
- if ((timeout) != KTIME_MAX) \
- hrtimer_start_range_ns(&__t.timer, timeout, \
- current->timer_slack_ns, \
- HRTIMER_MODE_REL); \
+ if ((timeout) != KTIME_MAX) { \
+ hrtimer_set_expires_range_ns(&__t.timer, timeout, \
+ current->timer_slack_ns); \
+ hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
+ } \
\
__ret = ___wait_event(wq_head, condition, state, 0, 0, \
if (!__t.task) { \
@@ -603,8 +642,8 @@ do { \
#define __wait_event_freezable_exclusive(wq, condition) \
- ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
- freezable_schedule())
+ ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\
+ schedule())
#define wait_event_freezable_exclusive(wq, condition) \
({ \
@@ -893,6 +932,34 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
__ret; \
})
+#define __wait_event_state(wq, condition, state) \
+ ___wait_event(wq, condition, state, 0, 0, schedule())
+
+/**
+ * wait_event_state - sleep until a condition gets true
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @state: state to sleep in
+ *
+ * The process is put to sleep (@state) until the @condition evaluates to true
+ * or a signal is received (when allowed by @state). The @condition is checked
+ * each time the waitqueue @wq_head is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a signal
+ * (when allowed by @state) and 0 if @condition evaluated to true.
+ */
+#define wait_event_state(wq_head, condition, state) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_state(wq_head, condition, state); \
+ __ret; \
+})
+
#define __wait_event_killable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_KILLABLE, 0, timeout, \
@@ -1124,7 +1191,7 @@ do { \
* Waitqueues which are removed from the waitqueue_head at wakeup time
*/
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
-void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
@@ -1148,4 +1215,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
(wait)->flags = 0; \
} while (0)
+typedef int (*task_call_f)(struct task_struct *p, void *arg);
+extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
+
#endif /* _LINUX_WAIT_H */
diff --git a/include/linux/wait_api.h b/include/linux/wait_api.h
new file mode 100644
index 000000000000..4e930548935a
--- /dev/null
+++ b/include/linux/wait_api.h
@@ -0,0 +1 @@
+#include <linux/wait.h>
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 7dec36aecbd9..7725b7579b78 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -71,7 +71,7 @@ static inline int
wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait,
@@ -96,7 +96,7 @@ static inline int
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait_io,
@@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
unsigned long timeout)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit_timeout(word, bit,
bit_wait_timeout,
@@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit, action, mode);
}
diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h
new file mode 100644
index 000000000000..fc6bba20273b
--- /dev/null
+++ b/include/linux/watch_queue.h
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/* User-mappable watch queue
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See Documentation/core-api/watch_queue.rst
+ */
+
+#ifndef _LINUX_WATCH_QUEUE_H
+#define _LINUX_WATCH_QUEUE_H
+
+#include <uapi/linux/watch_queue.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+
+#ifdef CONFIG_WATCH_QUEUE
+
+struct cred;
+
+struct watch_type_filter {
+ enum watch_notification_type type;
+ __u32 subtype_filter[1]; /* Bitmask of subtypes to filter on */
+ __u32 info_filter; /* Filter on watch_notification::info */
+ __u32 info_mask; /* Mask of relevant bits in info_filter */
+};
+
+struct watch_filter {
+ union {
+ struct rcu_head rcu;
+ /* Bitmask of accepted types */
+ DECLARE_BITMAP(type_filter, WATCH_TYPE__NR);
+ };
+ u32 nr_filters; /* Number of filters */
+ struct watch_type_filter filters[];
+};
+
+struct watch_queue {
+ struct rcu_head rcu;
+ struct watch_filter __rcu *filter;
+ struct pipe_inode_info *pipe; /* The pipe we're using as a buffer */
+ struct hlist_head watches; /* Contributory watches */
+ struct page **notes; /* Preallocated notifications */
+ unsigned long *notes_bitmap; /* Allocation bitmap for notes */
+ struct kref usage; /* Object usage count */
+ spinlock_t lock;
+ unsigned int nr_notes; /* Number of notes */
+ unsigned int nr_pages; /* Number of pages in notes[] */
+ bool defunct; /* T when queues closed */
+};
+
+/*
+ * Representation of a watch on an object.
+ */
+struct watch {
+ union {
+ struct rcu_head rcu;
+ u32 info_id; /* ID to be OR'd in to info field */
+ };
+ struct watch_queue __rcu *queue; /* Queue to post events to */
+ struct hlist_node queue_node; /* Link in queue->watches */
+ struct watch_list __rcu *watch_list;
+ struct hlist_node list_node; /* Link in watch_list->watchers */
+ const struct cred *cred; /* Creds of the owner of the watch */
+ void *private; /* Private data for the watched object */
+ u64 id; /* Internal identifier */
+ struct kref usage; /* Object usage count */
+};
+
+/*
+ * List of watches on an object.
+ */
+struct watch_list {
+ struct rcu_head rcu;
+ struct hlist_head watchers;
+ void (*release_watch)(struct watch *);
+ spinlock_t lock;
+};
+
+extern void __post_watch_notification(struct watch_list *,
+ struct watch_notification *,
+ const struct cred *,
+ u64);
+extern struct watch_queue *get_watch_queue(int);
+extern void put_watch_queue(struct watch_queue *);
+extern void init_watch(struct watch *, struct watch_queue *);
+extern int add_watch_to_object(struct watch *, struct watch_list *);
+extern int remove_watch_from_object(struct watch_list *, struct watch_queue *, u64, bool);
+extern long watch_queue_set_size(struct pipe_inode_info *, unsigned int);
+extern long watch_queue_set_filter(struct pipe_inode_info *,
+ struct watch_notification_filter __user *);
+extern int watch_queue_init(struct pipe_inode_info *);
+extern void watch_queue_clear(struct watch_queue *);
+
+static inline void init_watch_list(struct watch_list *wlist,
+ void (*release_watch)(struct watch *))
+{
+ INIT_HLIST_HEAD(&wlist->watchers);
+ spin_lock_init(&wlist->lock);
+ wlist->release_watch = release_watch;
+}
+
+static inline void post_watch_notification(struct watch_list *wlist,
+ struct watch_notification *n,
+ const struct cred *cred,
+ u64 id)
+{
+ if (unlikely(wlist))
+ __post_watch_notification(wlist, n, cred, id);
+}
+
+static inline void remove_watch_list(struct watch_list *wlist, u64 id)
+{
+ if (wlist) {
+ remove_watch_from_object(wlist, NULL, id, true);
+ kfree_rcu(wlist, rcu);
+ }
+}
+
+/**
+ * watch_sizeof - Calculate the information part of the size of a watch record,
+ * given the structure size.
+ */
+#define watch_sizeof(STRUCT) (sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT)
+
+#else
+static inline int watch_queue_init(struct pipe_inode_info *pipe)
+{
+ return -ENOPKG;
+}
+
+#endif
+
+#endif /* _LINUX_WATCH_QUEUE_H */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 417d9f37077a..99660197a36c 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -37,15 +37,15 @@ struct watchdog_governor;
*
* The watchdog_ops structure contains a list of low-level operations
* that control a watchdog device. It also contains the module that owns
- * these operations. The start and stop function are mandatory, all other
+ * these operations. The start function is mandatory, all other
* functions are optional.
*/
struct watchdog_ops {
struct module *owner;
/* mandatory operations */
int (*start)(struct watchdog_device *);
- int (*stop)(struct watchdog_device *);
/* optional operations */
+ int (*stop)(struct watchdog_device *);
int (*ping)(struct watchdog_device *);
unsigned int (*status)(struct watchdog_device *);
int (*set_timeout)(struct watchdog_device *, unsigned int);
@@ -107,6 +107,7 @@ struct watchdog_device {
unsigned int max_hw_heartbeat_ms;
struct notifier_block reboot_nb;
struct notifier_block restart_nb;
+ struct notifier_block pm_nb;
void *driver_data;
struct watchdog_core_data *wd_data;
unsigned long status;
@@ -116,6 +117,7 @@ struct watchdog_device {
#define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */
#define WDOG_HW_RUNNING 3 /* True if HW watchdog running */
#define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */
+#define WDOG_NO_PING_ON_SUSPEND 5 /* Ping worker should be stopped on suspend */
struct list_head deferred;
};
@@ -156,6 +158,12 @@ static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd)
set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status);
}
+/* Use the following function to stop the wdog ping worker when suspending */
+static inline void watchdog_stop_ping_on_suspend(struct watchdog_device *wdd)
+{
+ set_bit(WDOG_NO_PING_ON_SUSPEND, &wdd->status);
+}
+
/* Use the following function to check if a timeout value is invalid */
static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t)
{
@@ -209,6 +217,10 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
unsigned int timeout_parm, struct device *dev);
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
+int watchdog_dev_suspend(struct watchdog_device *wdd);
+int watchdog_dev_resume(struct watchdog_device *wdd);
+
+int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int);
/* devres register variant */
int devm_watchdog_register_device(struct device *dev, struct watchdog_device *);
diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h
deleted file mode 100644
index 4dd2c1cea6a9..000000000000
--- a/include/linux/wimax/debug.h
+++ /dev/null
@@ -1,491 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Linux WiMAX
- * Collection of tools to manage debug operations.
- *
- * Copyright (C) 2005-2007 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Don't #include this file directly, read on!
- *
- * EXECUTING DEBUGGING ACTIONS OR NOT
- *
- * The main thing this framework provides is decission power to take a
- * debug action (like printing a message) if the current debug level
- * allows it.
- *
- * The decission power is at two levels: at compile-time (what does
- * not make it is compiled out) and at run-time. The run-time
- * selection is done per-submodule (as they are declared by the user
- * of the framework).
- *
- * A call to d_test(L) (L being the target debug level) returns true
- * if the action should be taken because the current debug levels
- * allow it (both compile and run time).
- *
- * It follows that a call to d_test() that can be determined to be
- * always false at compile time will get the code depending on it
- * compiled out by optimization.
- *
- * DEBUG LEVELS
- *
- * It is up to the caller to define how much a debugging level is.
- *
- * Convention sets 0 as "no debug" (so an action marked as debug level 0
- * will always be taken). The increasing debug levels are used for
- * increased verbosity.
- *
- * USAGE
- *
- * Group the code in modules and submodules inside each module [which
- * in most cases maps to Linux modules and .c files that compose
- * those].
- *
- * For each module, there is:
- *
- * - a MODULENAME (single word, legal C identifier)
- *
- * - a debug-levels.h header file that declares the list of
- * submodules and that is included by all .c files that use
- * the debugging tools. The file name can be anything.
- *
- * - some (optional) .c code to manipulate the runtime debug levels
- * through debugfs.
- *
- * The debug-levels.h file would look like:
- *
- * #ifndef __debug_levels__h__
- * #define __debug_levels__h__
- *
- * #define D_MODULENAME modulename
- * #define D_MASTER 10
- *
- * #include <linux/wimax/debug.h>
- *
- * enum d_module {
- * D_SUBMODULE_DECLARE(submodule_1),
- * D_SUBMODULE_DECLARE(submodule_2),
- * ...
- * D_SUBMODULE_DECLARE(submodule_N)
- * };
- *
- * #endif
- *
- * D_MASTER is the maximum compile-time debug level; any debug actions
- * above this will be out. D_MODULENAME is the module name (legal C
- * identifier), which has to be unique for each module (to avoid
- * namespace collisions during linkage). Note those #defines need to
- * be done before #including debug.h
- *
- * We declare N different submodules whose debug level can be
- * independently controlled during runtime.
- *
- * In a .c file of the module (and only in one of them), define the
- * following code:
- *
- * struct d_level D_LEVEL[] = {
- * D_SUBMODULE_DEFINE(submodule_1),
- * D_SUBMODULE_DEFINE(submodule_2),
- * ...
- * D_SUBMODULE_DEFINE(submodule_N),
- * };
- * size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
- *
- * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used
- * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros
- * #defined also in this file.
- *
- * To manipulate from user space the levels, create a debugfs dentry
- * and then register each submodule with:
- *
- * d_level_register_debugfs("PREFIX_", submodule_X, parent);
- *
- * Where PREFIX_ is a name of your chosing. This will create debugfs
- * file with a single numeric value that can be use to tweak it. To
- * remove the entires, just use debugfs_remove_recursive() on 'parent'.
- *
- * NOTE: remember that even if this will show attached to some
- * particular instance of a device, the settings are *global*.
- *
- * On each submodule (for example, .c files), the debug infrastructure
- * should be included like this:
- *
- * #define D_SUBMODULE submodule_x // matches one in debug-levels.h
- * #include "debug-levels.h"
- *
- * after #including all your include files.
- *
- * Now you can use the d_*() macros below [d_test(), d_fnstart(),
- * d_fnend(), d_printf(), d_dump()].
- *
- * If their debug level is greater than D_MASTER, they will be
- * compiled out.
- *
- * If their debug level is lower or equal than D_MASTER but greater
- * than the current debug level of their submodule, they'll be
- * ignored.
- *
- * Otherwise, the action will be performed.
- */
-#ifndef __debug__h__
-#define __debug__h__
-
-#include <linux/types.h>
-#include <linux/slab.h>
-
-struct device;
-
-/* Backend stuff */
-
-/*
- * Debug backend: generate a message header from a 'struct device'
- *
- * @head: buffer where to place the header
- * @head_size: length of @head
- * @dev: pointer to device used to generate a header from. If NULL,
- * an empty ("") header is generated.
- */
-static inline
-void __d_head(char *head, size_t head_size,
- struct device *dev)
-{
- if (dev == NULL)
- head[0] = 0;
- else if ((unsigned long)dev < 4096) {
- printk(KERN_ERR "E: Corrupt dev %p\n", dev);
- WARN_ON(1);
- } else
- snprintf(head, head_size, "%s %s: ",
- dev_driver_string(dev), dev_name(dev));
-}
-
-
-/*
- * Debug backend: log some message if debugging is enabled
- *
- * @l: intended debug level
- * @tag: tag to prefix the message with
- * @dev: 'struct device' associated to this message
- * @f: printf-like format and arguments
- *
- * Note this is optimized out if it doesn't pass the compile-time
- * check; however, it is *always* compiled. This is useful to make
- * sure the printf-like formats and variables are always checked and
- * they don't get bit rot if you have all the debugging disabled.
- */
-#define _d_printf(l, tag, dev, f, a...) \
-do { \
- char head[64]; \
- if (!d_test(l)) \
- break; \
- __d_head(head, sizeof(head), dev); \
- printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \
-} while (0)
-
-
-/*
- * CPP sintatic sugar to generate A_B like symbol names when one of
- * the arguments is a a preprocessor #define.
- */
-#define __D_PASTE__(varname, modulename) varname##_##modulename
-#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename))
-#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name))
-
-
-/*
- * Store a submodule's runtime debug level and name
- */
-struct d_level {
- u8 level;
- const char *name;
-};
-
-
-/*
- * List of available submodules and their debug levels
- *
- * We call them d_level_MODULENAME and d_level_size_MODULENAME; the
- * macros D_LEVEL and D_LEVEL_SIZE contain the name already for
- * convenience.
- *
- * This array and the size are defined on some .c file that is part of
- * the current module.
- */
-#define D_LEVEL __D_PASTE(d_level, D_MODULENAME)
-#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME)
-
-extern struct d_level D_LEVEL[];
-extern size_t D_LEVEL_SIZE;
-
-
-/*
- * Frontend stuff
- *
- *
- * Stuff you need to declare prior to using the actual "debug" actions
- * (defined below).
- */
-
-#ifndef D_MODULENAME
-#error D_MODULENAME is not defined in your debug-levels.h file
-/**
- * D_MODULE - Name of the current module
- *
- * #define in your module's debug-levels.h, making sure it is
- * unique. This has to be a legal C identifier.
- */
-#define D_MODULENAME undefined_modulename
-#endif
-
-
-#ifndef D_MASTER
-#warning D_MASTER not defined, but debug.h included! [see docs]
-/**
- * D_MASTER - Compile time maximum debug level
- *
- * #define in your debug-levels.h file to the maximum debug level the
- * runtime code will be allowed to have. This allows you to provide a
- * main knob.
- *
- * Anything above that level will be optimized out of the compile.
- *
- * Defaults to zero (no debug code compiled in).
- *
- * Maximum one definition per module (at the debug-levels.h file).
- */
-#define D_MASTER 0
-#endif
-
-#ifndef D_SUBMODULE
-#error D_SUBMODULE not defined, but debug.h included! [see docs]
-/**
- * D_SUBMODULE - Name of the current submodule
- *
- * #define in your submodule .c file before #including debug-levels.h
- * to the name of the current submodule as previously declared and
- * defined with D_SUBMODULE_DECLARE() (in your module's
- * debug-levels.h) and D_SUBMODULE_DEFINE().
- *
- * This is used to provide runtime-control over the debug levels.
- *
- * Maximum one per .c file! Can be shared among different .c files
- * (meaning they belong to the same submodule categorization).
- */
-#define D_SUBMODULE undefined_module
-#endif
-
-
-/**
- * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control
- *
- * @_name: name of the submodule, restricted to the chars that make up a
- * valid C identifier ([a-zA-Z0-9_]).
- *
- * Declare in the module's debug-levels.h header file as:
- *
- * enum d_module {
- * D_SUBMODULE_DECLARE(submodule_1),
- * D_SUBMODULE_DECLARE(submodule_2),
- * D_SUBMODULE_DECLARE(submodule_3),
- * };
- *
- * Some corresponding .c file needs to have a matching
- * D_SUBMODULE_DEFINE().
- */
-#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name
-
-
-/**
- * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control
- *
- * @_name: name of the submodule, restricted to the chars that make up a
- * valid C identifier ([a-zA-Z0-9_]).
- *
- * Use once per module (in some .c file) as:
- *
- * static
- * struct d_level d_level_SUBMODULENAME[] = {
- * D_SUBMODULE_DEFINE(submodule_1),
- * D_SUBMODULE_DEFINE(submodule_2),
- * D_SUBMODULE_DEFINE(submodule_3),
- * };
- * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME);
- *
- * Matching D_SUBMODULE_DECLARE()s have to be present in a
- * debug-levels.h header file.
- */
-#define D_SUBMODULE_DEFINE(_name) \
-[__D_SUBMODULE_##_name] = { \
- .level = 0, \
- .name = #_name \
-}
-
-
-
-/* The actual "debug" operations */
-
-
-/**
- * d_test - Returns true if debugging should be enabled
- *
- * @l: intended debug level (unsigned)
- *
- * If the master debug switch is enabled and the current settings are
- * higher or equal to the requested level, then debugging
- * output/actions should be enabled.
- *
- * NOTE:
- *
- * This needs to be coded so that it can be evaluated in compile
- * time; this is why the ugly BUG_ON() is placed in there, so the
- * D_MASTER evaluation compiles all out if it is compile-time false.
- */
-#define d_test(l) \
-({ \
- unsigned __l = l; /* type enforcer */ \
- (D_MASTER) >= __l \
- && ({ \
- BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\
- D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l; \
- }); \
-})
-
-
-/**
- * d_fnstart - log message at function start if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a)
-
-
-/**
- * d_fnend - log message at function end if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a)
-
-
-/**
- * d_printf - log message if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a)
-
-
-/**
- * d_dump - log buffer hex dump if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_dump(l, dev, ptr, size) \
-do { \
- char head[64]; \
- if (!d_test(l)) \
- break; \
- __d_head(head, sizeof(head), dev); \
- print_hex_dump(KERN_ERR, head, 0, 16, 1, \
- ((void *) ptr), (size), 0); \
-} while (0)
-
-
-/**
- * Export a submodule's debug level over debugfs as PREFIXSUBMODULE
- *
- * @prefix: string to prefix the name with
- * @submodule: name of submodule (not a string, just the name)
- * @dentry: debugfs parent dentry
- *
- * For removing, just use debugfs_remove_recursive() on the parent.
- */
-#define d_level_register_debugfs(prefix, name, parent) \
-({ \
- debugfs_create_u8( \
- prefix #name, 0600, parent, \
- &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \
-})
-
-
-static inline
-void d_submodule_set(struct d_level *d_level, size_t d_level_size,
- const char *submodule, u8 level, const char *tag)
-{
- struct d_level *itr, *top;
- int index = -1;
-
- for (itr = d_level, top = itr + d_level_size; itr < top; itr++) {
- index++;
- if (itr->name == NULL) {
- printk(KERN_ERR "%s: itr->name NULL?? (%p, #%d)\n",
- tag, itr, index);
- continue;
- }
- if (!strcmp(itr->name, submodule)) {
- itr->level = level;
- return;
- }
- }
- printk(KERN_ERR "%s: unknown submodule %s\n", tag, submodule);
-}
-
-
-/**
- * d_parse_params - Parse a string with debug parameters from the
- * command line
- *
- * @d_level: level structure (D_LEVEL)
- * @d_level_size: number of items in the level structure
- * (D_LEVEL_SIZE).
- * @_params: string with the parameters; this is a space (not tab!)
- * separated list of NAME:VALUE, where value is the debug level
- * and NAME is the name of the submodule.
- * @tag: string for error messages (example: MODULE.ARGNAME).
- */
-static inline
-void d_parse_params(struct d_level *d_level, size_t d_level_size,
- const char *_params, const char *tag)
-{
- char submodule[130], *params, *params_orig, *token, *colon;
- unsigned level, tokens;
-
- if (_params == NULL)
- return;
- params_orig = kstrdup(_params, GFP_KERNEL);
- params = params_orig;
- while (1) {
- token = strsep(&params, " ");
- if (token == NULL)
- break;
- if (*token == '\0') /* eat joint spaces */
- continue;
- /* kernel's sscanf %s eats until whitespace, so we
- * replace : by \n so it doesn't get eaten later by
- * strsep */
- colon = strchr(token, ':');
- if (colon != NULL)
- *colon = '\n';
- tokens = sscanf(token, "%s\n%u", submodule, &level);
- if (colon != NULL)
- *colon = ':'; /* set back, for error messages */
- if (tokens == 2)
- d_submodule_set(d_level, d_level_size,
- submodule, level, tag);
- else
- printk(KERN_ERR "%s: can't parse '%s' as a "
- "SUBMODULE:LEVEL (%d tokens)\n",
- tag, token, tokens);
- }
- kfree(params_orig);
-}
-
-#endif /* #ifndef __debug__h__ */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 2d1b54556eff..e6e34d74dda0 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -26,7 +26,15 @@ struct compat_iw_point {
struct __compat_iw_event {
__u16 len; /* Real length of this stuff */
__u16 cmd; /* Wireless IOCTL */
- compat_caddr_t pointer;
+
+ union {
+ compat_caddr_t pointer;
+
+ /* we need ptr_bytes to make memcpy() run-time destination
+ * buffer bounds checking happy, nothing special
+ */
+ DECLARE_FLEX_ARRAY(__u8, ptr_bytes);
+ };
};
#define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer)
#define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length)
diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h
index e497e621dbb7..5e1b26f988e2 100644
--- a/include/linux/wkup_m3_ipc.h
+++ b/include/linux/wkup_m3_ipc.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Wakeup M3 for AMx3 SoCs Power Management Routines
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach <d-gerlach@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_WKUP_M3_IPC_H
@@ -33,7 +25,13 @@ struct wkup_m3_ipc {
int mem_type;
unsigned long resume_addr;
+ int vtt_conf;
+ int isolation_conf;
int state;
+ u32 halt;
+
+ unsigned long volt_scale_offsets;
+ const char *sd_fw_name;
struct completion sync_complete;
struct mbox_client mbox_client;
@@ -41,6 +39,7 @@ struct wkup_m3_ipc {
struct wkup_m3_ipc_ops *ops;
int is_rtc_only;
+ struct dentry *dbg_path;
};
struct wkup_m3_wakeup_src {
@@ -48,6 +47,12 @@ struct wkup_m3_wakeup_src {
char src[10];
};
+struct wkup_m3_scale_data_header {
+ u16 magic;
+ u8 sleep_offset;
+ u8 wake_offset;
+} __packed;
+
struct wkup_m3_ipc_ops {
void (*set_mem_type)(struct wkup_m3_ipc *m3_ipc, int mem_type);
void (*set_resume_address)(struct wkup_m3_ipc *m3_ipc, void *addr);
diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h
index 58e082dadc68..332d2b0f29b9 100644
--- a/include/linux/wm97xx.h
+++ b/include/linux/wm97xx.h
@@ -254,9 +254,6 @@ struct wm97xx_mach_ops {
int (*acc_startup) (struct wm97xx *);
void (*acc_shutdown) (struct wm97xx *);
- /* interrupt mask control - required for accelerated operation */
- void (*irq_enable) (struct wm97xx *, int enable);
-
/* GPIO pin used for accelerated operation */
int irq_gpio;
@@ -281,7 +278,6 @@ struct wm97xx {
unsigned long ts_reader_min_interval; /* Minimum interval */
unsigned int pen_irq; /* Pen IRQ number in use */
struct workqueue_struct *ts_workq;
- struct work_struct pen_event_work;
u16 acc_slot; /* AC97 slot used for acc touch data */
u16 acc_rate; /* acc touch data rate */
unsigned pen_is_down:1; /* Pen is down */
@@ -294,7 +290,6 @@ struct wm97xx {
struct wm97xx_batt_pdata {
int batt_aux;
int temp_aux;
- int charge_gpio;
int min_voltage;
int max_voltage;
int batt_div;
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 8ef7e7faea1e..b88d7b58e61e 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -35,9 +35,10 @@ extern int set_required_buffer_size(struct wmi_device *wdev, u64 length);
struct wmi_driver {
struct device_driver driver;
const struct wmi_device_id *id_table;
+ bool no_notify_data;
int (*probe)(struct wmi_device *wdev, const void *context);
- int (*remove)(struct wmi_device *wdev);
+ void (*remove)(struct wmi_device *wdev);
void (*notify)(struct wmi_device *device, union acpi_object *data);
long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd,
struct wmi_ioctl_buffer *arg);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index e48554e6526c..a0143dd24430 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -29,7 +29,7 @@ void delayed_work_timer_fn(struct timer_list *t);
enum {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
- WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
+ WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */
WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -42,7 +42,7 @@ enum {
WORK_STRUCT_COLOR_BITS = 4,
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
- WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
+ WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -51,19 +51,14 @@ enum {
WORK_STRUCT_STATIC = 0,
#endif
- /*
- * The last color is no color used for works which don't
- * participate in workqueue flushing.
- */
- WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
- WORK_NO_COLOR = WORK_NR_COLORS,
+ WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS),
/* not bound to any CPU, prefer the local CPU */
WORK_CPU_UNBOUND = NR_CPUS,
/*
- * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
- * This makes pwqs aligned to 256 bytes and allows 15 workqueue
+ * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
+ * This makes pwqs aligned to 256 bytes and allows 16 workqueue
* flush colors.
*/
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
@@ -311,7 +306,7 @@ enum {
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
- WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
+ WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
/*
* Per-cpu workqueues are generally preferred because they tend to
@@ -324,7 +319,7 @@ enum {
* to execute and tries to keep idle cores idle to conserve power;
* however, for example, a per-cpu work item scheduled from an
* interrupt handler on an idle CPU will force the scheduler to
- * excute the work item on that CPU breaking the idleness, which in
+ * execute the work item on that CPU breaking the idleness, which in
* turn may lead to more scheduling choices which are sub-optimal
* in terms of power consumption.
*
@@ -404,15 +399,14 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
-struct workqueue_struct *alloc_workqueue(const char *fmt,
- unsigned int flags,
- int max_active, ...);
+__printf(1, 4) struct workqueue_struct *
+alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args...: args for @fmt
+ * @args: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are
@@ -451,7 +445,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
-extern void flush_workqueue(struct workqueue_struct *wq);
+extern void __flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern int schedule_on_each_cpu(work_func_t func);
@@ -459,6 +453,7 @@ extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
+extern bool cancel_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
@@ -475,7 +470,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
-extern void show_workqueue_state(void);
+extern void show_all_workqueues(void);
+extern void show_one_workqueue(struct workqueue_struct *wq);
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
/**
@@ -568,15 +564,23 @@ static inline bool schedule_work(struct work_struct *work)
return queue_work(system_wq, work);
}
+/*
+ * Detect attempt to flush system-wide workqueues at compile time when possible.
+ *
+ * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
+ * for reasons and steps for converting system-wide workqueues into local workqueues.
+ */
+extern void __warn_flushing_systemwide_wq(void)
+ __compiletime_warning("Please avoid flushing system-wide workqueues.");
+
/**
* flush_scheduled_work - ensure that any scheduled work has run to completion.
*
* Forces execution of the kernel-global workqueue and blocks until its
* completion.
*
- * Think twice before calling this function! It's very easy to get into
- * trouble if you don't take great care. Either of the following situations
- * will lead to deadlock:
+ * It's very easy to get into trouble if you don't take great care.
+ * Either of the following situations will lead to deadlock:
*
* One of the work items currently on the workqueue needs to acquire
* a lock held by your code or its caller.
@@ -591,11 +595,51 @@ static inline bool schedule_work(struct work_struct *work)
* need to know that a particular work item isn't queued and isn't running.
* In such cases you should use cancel_delayed_work_sync() or
* cancel_work_sync() instead.
+ *
+ * Please stop calling this function! A conversion to stop flushing system-wide
+ * workqueues is in progress. This function will be removed after all in-tree
+ * users stopped calling this function.
*/
-static inline void flush_scheduled_work(void)
-{
- flush_workqueue(system_wq);
-}
+/*
+ * The background of commit 771c035372a036f8 ("deprecate the
+ * '__deprecated' attribute warnings entirely and for good") is that,
+ * since Linus builds all modules between every single pull he does,
+ * the standard kernel build needs to be _clean_ in order to be able to
+ * notice when new problems happen. Therefore, don't emit warning while
+ * there are in-tree users.
+ */
+#define flush_scheduled_work() \
+({ \
+ if (0) \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(system_wq); \
+})
+
+/*
+ * Although there is no longer in-tree caller, for now just emit warning
+ * in order to give out-of-tree callers time to update.
+ */
+#define flush_workqueue(wq) \
+({ \
+ struct workqueue_struct *_wq = (wq); \
+ \
+ if ((__builtin_constant_p(_wq == system_wq) && \
+ _wq == system_wq) || \
+ (__builtin_constant_p(_wq == system_highpri_wq) && \
+ _wq == system_highpri_wq) || \
+ (__builtin_constant_p(_wq == system_long_wq) && \
+ _wq == system_long_wq) || \
+ (__builtin_constant_p(_wq == system_unbound_wq) && \
+ _wq == system_unbound_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_wq) && \
+ _wq == system_freezable_wq) || \
+ (__builtin_constant_p(_wq == system_power_efficient_wq) && \
+ _wq == system_power_efficient_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
+ _wq == system_freezable_power_efficient_wq)) \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(_wq); \
+})
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
@@ -665,7 +709,7 @@ int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif
-int __init workqueue_init_early(void);
-int __init workqueue_init(void);
+void __init workqueue_init_early(void);
+void __init workqueue_init(void);
#endif
diff --git a/include/linux/workqueue_api.h b/include/linux/workqueue_api.h
new file mode 100644
index 000000000000..77debb5d2760
--- /dev/null
+++ b/include/linux/workqueue_api.h
@@ -0,0 +1 @@
+#include <linux/workqueue.h>
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a19d845dd7eb..06f9291b6fd5 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -11,27 +11,18 @@
#include <linux/flex_proportions.h>
#include <linux/backing-dev-defs.h>
#include <linux/blk_types.h>
-#include <linux/blk-cgroup.h>
struct bio;
DECLARE_PER_CPU(int, dirty_throttle_leaks);
/*
- * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
- *
- * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
- *
- * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
- * time) for the dirty pages to drop, unless written enough pages.
- *
* The global dirty threshold is normally equal to the global dirty limit,
* except when the system suddenly allocates a lot of anonymous memory and
* knocks down the global dirty threshold quickly, in which case the global
* dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
*/
#define DIRTY_SCOPE 8
-#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
struct backing_dev_info;
@@ -69,6 +60,7 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
+ unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */
/*
* When writeback IOs are bounced through async layers, only the
@@ -80,6 +72,13 @@ struct writeback_control {
unsigned punt_to_cgroup:1; /* cgrp punting, see __REQ_CGROUP_PUNT */
+ /* To enable batching of swap writes to non-block-device backends,
+ * "plug" can be set point to a 'struct swap_iocb *'. When all swap
+ * writes have been submitted, if with swap_iocb is not NULL,
+ * swap_write_unplug() should be called.
+ */
+ struct swap_iocb **swap_plug;
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *wb; /* wb this writeback is issued under */
struct inode *inode; /* inode being written out */
@@ -94,9 +93,9 @@ struct writeback_control {
#endif
};
-static inline int wbc_to_write_flags(struct writeback_control *wbc)
+static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc)
{
- int flags = 0;
+ blk_opf_t flags = 0;
if (wbc->punt_to_cgroup)
flags = REQ_CGROUP_PUNT;
@@ -109,15 +108,12 @@ static inline int wbc_to_write_flags(struct writeback_control *wbc)
return flags;
}
-static inline struct cgroup_subsys_state *
-wbc_blkcg_css(struct writeback_control *wbc)
-{
#ifdef CONFIG_CGROUP_WRITEBACK
- if (wbc->wb)
- return wbc->wb->blkcg_css;
-#endif
- return blkcg_root_css;
-}
+#define wbc_blkcg_css(wbc) \
+ ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css)
+#else
+#define wbc_blkcg_css(wbc) (blkcg_root_css)
+#endif /* CONFIG_CGROUP_WRITEBACK */
/*
* A wb_domain represents a domain that wb's (bdi_writeback's) belong to
@@ -197,6 +193,7 @@ void wakeup_flusher_threads(enum wb_reason reason);
void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
+void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
@@ -217,9 +214,10 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
void wbc_detach_inode(struct writeback_control *wbc);
void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
size_t bytes);
-int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr_pages,
+int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
enum wb_reason reason, struct wb_completion *done);
void cgroup_writeback_umount(void);
+bool cleanup_offline_cgwb(struct bdi_writeback *wb);
/**
* inode_attach_wb - associate an inode with its wb
@@ -334,14 +332,9 @@ static inline void cgroup_writeback_umount(void)
/*
* mm/page-writeback.c
*/
-#ifdef CONFIG_BLOCK
void laptop_io_completion(struct backing_dev_info *info);
void laptop_sync_completion(void);
-void laptop_mode_sync(struct work_struct *work);
void laptop_mode_timer_fn(struct timer_list *t);
-#else
-static inline void laptop_sync_completion(void) { }
-#endif
bool node_dirty_ok(struct pglist_data *pgdat);
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -351,41 +344,26 @@ void wb_domain_exit(struct wb_domain *dom);
extern struct wb_domain global_wb_domain;
/* These are exported to sysctl. */
-extern int dirty_background_ratio;
-extern unsigned long dirty_background_bytes;
-extern int vm_dirty_ratio;
-extern unsigned long vm_dirty_bytes;
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
extern unsigned int dirtytime_expire_interval;
-extern int vm_highmem_is_dirtyable;
-extern int block_dump;
extern int laptop_mode;
-extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-extern int dirty_ratio_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-extern int dirty_bytes_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
int dirtytime_interval_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
-struct ctl_table;
-int dirty_writeback_centisecs_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+ void *buffer, size_t *lenp, loff_t *ppos);
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
-void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
+void wb_update_bandwidth(struct bdi_writeback *wb);
+
+/* Invoke balance dirty pages in async mode. */
+#define BDP_ASYNC 0x0001
+
void balance_dirty_pages_ratelimited(struct address_space *mapping);
+int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
+ unsigned int flags);
+
bool wb_over_bg_thresh(struct bdi_writeback *wb);
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
@@ -403,7 +381,14 @@ void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-void account_page_redirty(struct page *page);
+bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
+void folio_account_redirty(struct folio *folio);
+static inline void account_page_redirty(struct page *page)
+{
+ folio_account_redirty(page_folio(page));
+}
+bool folio_redirty_for_writepage(struct writeback_control *, struct folio *);
+bool redirty_page_for_writepage(struct writeback_control *, struct page *);
void sb_mark_inode_writeback(struct inode *inode);
void sb_clear_inode_writeback(struct inode *inode);
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index d7554252404c..bb763085479a 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -18,6 +18,22 @@
#define __LINUX_WW_MUTEX_H
#include <linux/mutex.h>
+#include <linux/rtmutex.h>
+
+#if defined(CONFIG_DEBUG_MUTEXES) || \
+ (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES))
+#define DEBUG_WW_MUTEXES
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+#define WW_MUTEX_BASE mutex
+#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k)
+#define ww_mutex_base_is_locked(b) mutex_is_locked((b))
+#else
+#define WW_MUTEX_BASE rt_mutex
+#define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k)
+#define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex)
+#endif
struct ww_class {
atomic_long_t stamp;
@@ -28,16 +44,24 @@ struct ww_class {
unsigned int is_wait_die;
};
+struct ww_mutex {
+ struct WW_MUTEX_BASE base;
+ struct ww_acquire_ctx *ctx;
+#ifdef DEBUG_WW_MUTEXES
+ struct ww_class *ww_class;
+#endif
+};
+
struct ww_acquire_ctx {
struct task_struct *task;
unsigned long stamp;
unsigned int acquired;
unsigned short wounded;
unsigned short is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
unsigned int done_acquire;
struct ww_class *ww_class;
- struct ww_mutex *contending_lock;
+ void *contending_lock;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -48,56 +72,35 @@ struct ww_acquire_ctx {
#endif
};
-struct ww_mutex {
- struct mutex base;
- struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- struct ww_class *ww_class;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
- , .ww_class = class
-#else
-# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
-#endif
-
#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \
{ .stamp = ATOMIC_LONG_INIT(0) \
, .acquire_name = #ww_class "_acquire" \
, .mutex_name = #ww_class "_mutex" \
, .is_wait_die = _is_wait_die }
-#define __WW_MUTEX_INITIALIZER(lockname, class) \
- { .base = __MUTEX_INITIALIZER(lockname.base) \
- __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
-
#define DEFINE_WD_CLASS(classname) \
struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
#define DEFINE_WW_CLASS(classname) \
struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
-#define DEFINE_WW_MUTEX(mutexname, ww_class) \
- struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
-
/**
* ww_mutex_init - initialize the w/w mutex
* @lock: the mutex to be initialized
* @ww_class: the w/w class the mutex should belong to
*
* Initialize the w/w mutex to unlocked state and associate it with the given
- * class.
+ * class. Static define macro for w/w mutex is not provided and this function
+ * is the only way to properly initialize the w/w mutex.
*
* It is not allowed to initialize an already locked mutex.
*/
static inline void ww_mutex_init(struct ww_mutex *lock,
struct ww_class *ww_class)
{
- __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
+ ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
lock->ctx = NULL;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
lock->ww_class = ww_class;
#endif
}
@@ -134,7 +137,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
ctx->acquired = 0;
ctx->wounded = false;
ctx->is_wait_die = ww_class->is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
ctx->ww_class = ww_class;
ctx->done_acquire = 0;
ctx->contending_lock = NULL;
@@ -164,7 +167,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
lockdep_assert_held(ctx);
DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
@@ -181,9 +184,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->dep_map, _THIS_IP_);
-
+#endif
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(ctx->acquired);
if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
/*
@@ -289,7 +293,7 @@ static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
ret = ww_mutex_lock(lock, ctx);
@@ -325,7 +329,7 @@ static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
return ww_mutex_lock_interruptible(lock, ctx);
@@ -333,17 +337,8 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
extern void ww_mutex_unlock(struct ww_mutex *lock);
-/**
- * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
- * @lock: mutex to lock
- *
- * Trylocks a mutex without acquire context, so no deadlock detection is
- * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
- */
-static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
-{
- return mutex_trylock(&lock->base);
-}
+extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx);
/***
* ww_mutex_destroy - mark a w/w mutex unusable
@@ -355,7 +350,9 @@ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
{
+#ifndef CONFIG_PREEMPT_RT
mutex_destroy(&lock->base);
+#endif
}
/**
@@ -366,7 +363,7 @@ static inline void ww_mutex_destroy(struct ww_mutex *lock)
*/
static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
{
- return mutex_is_locked(&lock->base);
+ return ww_mutex_base_is_locked(&lock->base);
}
#endif
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
new file mode 100644
index 000000000000..5ce2acf444fb
--- /dev/null
+++ b/include/linux/wwan.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+
+#ifndef __WWAN_H
+#define __WWAN_H
+
+#include <linux/poll.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+/**
+ * enum wwan_port_type - WWAN port types
+ * @WWAN_PORT_AT: AT commands
+ * @WWAN_PORT_MBIM: Mobile Broadband Interface Model control
+ * @WWAN_PORT_QMI: Qcom modem/MSM interface for modem control
+ * @WWAN_PORT_QCDM: Qcom Modem diagnostic interface
+ * @WWAN_PORT_FIREHOSE: XML based command protocol
+ *
+ * @WWAN_PORT_MAX: Highest supported port types
+ * @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type
+ * @__WWAN_PORT_MAX: Internal use
+ */
+enum wwan_port_type {
+ WWAN_PORT_AT,
+ WWAN_PORT_MBIM,
+ WWAN_PORT_QMI,
+ WWAN_PORT_QCDM,
+ WWAN_PORT_FIREHOSE,
+
+ /* Add new port types above this line */
+
+ __WWAN_PORT_MAX,
+ WWAN_PORT_MAX = __WWAN_PORT_MAX - 1,
+ WWAN_PORT_UNKNOWN,
+};
+
+struct device;
+struct file;
+struct netlink_ext_ack;
+struct sk_buff;
+struct wwan_port;
+
+/** struct wwan_port_ops - The WWAN port operations
+ * @start: The routine for starting the WWAN port device.
+ * @stop: The routine for stopping the WWAN port device.
+ * @tx: Non-blocking routine that sends WWAN port protocol data to the device.
+ * @tx_blocking: Optional blocking routine that sends WWAN port protocol data
+ * to the device.
+ * @tx_poll: Optional routine that sets additional TX poll flags.
+ *
+ * The wwan_port_ops structure contains a list of low-level operations
+ * that control a WWAN port device. All functions are mandatory unless specified.
+ */
+struct wwan_port_ops {
+ int (*start)(struct wwan_port *port);
+ void (*stop)(struct wwan_port *port);
+ int (*tx)(struct wwan_port *port, struct sk_buff *skb);
+
+ /* Optional operations */
+ int (*tx_blocking)(struct wwan_port *port, struct sk_buff *skb);
+ __poll_t (*tx_poll)(struct wwan_port *port, struct file *filp,
+ poll_table *wait);
+};
+
+/**
+ * wwan_create_port - Add a new WWAN port
+ * @parent: Device to use as parent and shared by all WWAN ports
+ * @type: WWAN port type
+ * @ops: WWAN port operations
+ * @drvdata: Pointer to caller driver data
+ *
+ * Allocate and register a new WWAN port. The port will be automatically exposed
+ * to user as a character device and attached to the right virtual WWAN device,
+ * based on the parent pointer. The parent pointer is the device shared by all
+ * components of a same WWAN modem (e.g. USB dev, PCI dev, MHI controller...).
+ *
+ * drvdata will be placed in the WWAN port device driver data and can be
+ * retrieved with wwan_port_get_drvdata().
+ *
+ * This function must be balanced with a call to wwan_remove_port().
+ *
+ * Returns a valid pointer to wwan_port on success or PTR_ERR on failure
+ */
+struct wwan_port *wwan_create_port(struct device *parent,
+ enum wwan_port_type type,
+ const struct wwan_port_ops *ops,
+ void *drvdata);
+
+/**
+ * wwan_remove_port - Remove a WWAN port
+ * @port: WWAN port to remove
+ *
+ * Remove a previously created port.
+ */
+void wwan_remove_port(struct wwan_port *port);
+
+/**
+ * wwan_port_rx - Receive data from the WWAN port
+ * @port: WWAN port for which data is received
+ * @skb: Pointer to the rx buffer
+ *
+ * A port driver calls this function upon data reception (MBIM, AT...).
+ */
+void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb);
+
+/**
+ * wwan_port_txoff - Stop TX on WWAN port
+ * @port: WWAN port for which TX must be stopped
+ *
+ * Used for TX flow control, a port driver calls this function to indicate TX
+ * is temporary unavailable (e.g. due to ring buffer fullness).
+ */
+void wwan_port_txoff(struct wwan_port *port);
+
+
+/**
+ * wwan_port_txon - Restart TX on WWAN port
+ * @port: WWAN port for which TX must be restarted
+ *
+ * Used for TX flow control, a port driver calls this function to indicate TX
+ * is available again.
+ */
+void wwan_port_txon(struct wwan_port *port);
+
+/**
+ * wwan_port_get_drvdata - Retrieve driver data from a WWAN port
+ * @port: Related WWAN port
+ */
+void *wwan_port_get_drvdata(struct wwan_port *port);
+
+/**
+ * struct wwan_netdev_priv - WWAN core network device private data
+ * @link_id: WWAN device data link id
+ * @drv_priv: driver private data area, size is determined in &wwan_ops
+ */
+struct wwan_netdev_priv {
+ u32 link_id;
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+static inline void *wwan_netdev_drvpriv(struct net_device *dev)
+{
+ return ((struct wwan_netdev_priv *)netdev_priv(dev))->drv_priv;
+}
+
+/*
+ * Used to indicate that the WWAN core should not create a default network
+ * link.
+ */
+#define WWAN_NO_DEFAULT_LINK U32_MAX
+
+/**
+ * struct wwan_ops - WWAN device ops
+ * @priv_size: size of private netdev data area
+ * @setup: set up a new netdev
+ * @newlink: register the new netdev
+ * @dellink: remove the given netdev
+ */
+struct wwan_ops {
+ unsigned int priv_size;
+ void (*setup)(struct net_device *dev);
+ int (*newlink)(void *ctxt, struct net_device *dev,
+ u32 if_id, struct netlink_ext_ack *extack);
+ void (*dellink)(void *ctxt, struct net_device *dev,
+ struct list_head *head);
+};
+
+int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
+ void *ctxt, u32 def_link_id);
+
+void wwan_unregister_ops(struct device *parent);
+
+#ifdef CONFIG_WWAN_DEBUGFS
+struct dentry *wwan_get_debugfs_dir(struct device *parent);
+void wwan_put_debugfs_dir(struct dentry *dir);
+#else
+static inline struct dentry *wwan_get_debugfs_dir(struct device *parent)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void wwan_put_debugfs_dir(struct dentry *dir) {}
+#endif
+
+#endif /* __WWAN_H */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index f73e1775ded0..44dd6d6e01bc 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -9,12 +9,14 @@
* See Documentation/core-api/xarray.rst for how to use the XArray.
*/
+#include <linux/bitmap.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/gfp.h>
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
+#include <linux/sched/mm.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -32,8 +34,8 @@
* The following internal entries have a special meaning:
*
* 0-62: Sibling entries
- * 256: Zero entry
- * 257: Retry entry
+ * 256: Retry entry
+ * 257: Zero entry
*
* Errors are also represented as internal entries, but use the negative
* space (-4094 to -2). They're never stored in the slots array; only
@@ -229,9 +231,10 @@ static inline int xa_err(void *entry)
*
* This structure is used either directly or via the XA_LIMIT() macro
* to communicate the range of IDs that are valid for allocation.
- * Two common ranges are predefined for you:
+ * Three common ranges are predefined for you:
* * xa_limit_32b - [0 - UINT_MAX]
* * xa_limit_31b - [0 - INT_MAX]
+ * * xa_limit_16b - [0 - USHRT_MAX]
*/
struct xa_limit {
u32 max;
@@ -242,6 +245,7 @@ struct xa_limit {
#define xa_limit_32b XA_LIMIT(0, UINT_MAX)
#define xa_limit_31b XA_LIMIT(0, INT_MAX)
+#define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
typedef unsigned __bitwise xa_mark_t;
#define XA_MARK_0 ((__force xa_mark_t)0U)
@@ -576,13 +580,14 @@ void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
- * Return: The entry which used to be at this index.
+ * Return: The old entry at this index or xa_err() if an error happened.
*/
static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr;
+ might_alloc(gfp);
xa_lock_bh(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_bh(xa);
@@ -602,13 +607,14 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts.
- * Return: The entry which used to be at this index.
+ * Return: The old entry at this index or xa_err() if an error happened.
*/
static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr;
+ might_alloc(gfp);
xa_lock_irq(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_irq(xa);
@@ -684,6 +690,7 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock(xa);
@@ -711,6 +718,7 @@ static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_bh(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_bh(xa);
@@ -738,6 +746,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_irq(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_irq(xa);
@@ -767,6 +776,7 @@ static inline int __must_check xa_insert(struct xarray *xa,
{
int err;
+ might_alloc(gfp);
xa_lock(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock(xa);
@@ -796,6 +806,7 @@ static inline int __must_check xa_insert_bh(struct xarray *xa,
{
int err;
+ might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_bh(xa);
@@ -825,6 +836,7 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
{
int err;
+ might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_irq(xa);
@@ -854,6 +866,7 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
{
int err;
+ might_alloc(gfp);
xa_lock(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock(xa);
@@ -883,6 +896,7 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
{
int err;
+ might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_bh(xa);
@@ -912,6 +926,7 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
{
int err;
+ might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_irq(xa);
@@ -945,6 +960,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
{
int err;
+ might_alloc(gfp);
xa_lock(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock(xa);
@@ -978,6 +994,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
{
int err;
+ might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_bh(xa);
@@ -1011,6 +1028,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
{
int err;
+ might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_irq(xa);
@@ -1286,6 +1304,8 @@ static inline bool xa_is_advanced(const void *entry)
*/
typedef void (*xa_update_node_t)(struct xa_node *node);
+void xa_delete_node(struct xa_node *, xa_update_node_t);
+
/*
* The xa_state is opaque to its users. It contains various different pieces
* of state involved in the current operation on the XArray. It should be
@@ -1313,6 +1333,7 @@ struct xa_state {
struct xa_node *xa_node;
struct xa_node *xa_alloc;
xa_update_node_t xa_update;
+ struct list_lru *xa_lru;
};
/*
@@ -1332,7 +1353,8 @@ struct xa_state {
.xa_pad = 0, \
.xa_node = XAS_RESTART, \
.xa_alloc = NULL, \
- .xa_update = NULL \
+ .xa_update = NULL, \
+ .xa_lru = NULL, \
}
/**
@@ -1501,10 +1523,33 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
void xas_init_marks(const struct xa_state *);
bool xas_nomem(struct xa_state *, gfp_t);
+void xas_destroy(struct xa_state *);
void xas_pause(struct xa_state *);
void xas_create_range(struct xa_state *);
+#ifdef CONFIG_XARRAY_MULTI
+int xa_get_order(struct xarray *, unsigned long index);
+void xas_split(struct xa_state *, void *entry, unsigned int order);
+void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
+#else
+static inline int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ return 0;
+}
+
+static inline void xas_split(struct xa_state *xas, void *entry,
+ unsigned int order)
+{
+ xas_store(xas, entry);
+}
+
+static inline void xas_split_alloc(struct xa_state *xas, void *entry,
+ unsigned int order, gfp_t gfp)
+{
+}
+#endif
+
/**
* xas_reload() - Refetch an entry from the xarray.
* @xas: XArray operation state.
@@ -1522,10 +1567,21 @@ void xas_create_range(struct xa_state *);
static inline void *xas_reload(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
-
- if (node)
- return xa_entry(xas->xa, node, xas->xa_offset);
- return xa_head(xas->xa);
+ void *entry;
+ char offset;
+
+ if (!node)
+ return xa_head(xas->xa);
+ if (IS_ENABLED(CONFIG_XARRAY_MULTI)) {
+ offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
+ entry = xa_entry(xas->xa, node, offset);
+ if (!xa_is_sibling(entry))
+ return entry;
+ offset = xa_to_sibling(entry);
+ } else {
+ offset = xas->xa_offset;
+ }
+ return xa_entry(xas->xa, node, offset);
}
/**
@@ -1544,6 +1600,24 @@ static inline void xas_set(struct xa_state *xas, unsigned long index)
}
/**
+ * xas_advance() - Skip over sibling entries.
+ * @xas: XArray operation state.
+ * @index: Index of last sibling entry.
+ *
+ * Move the operation state to refer to the last sibling entry.
+ * This is useful for loops that normally want to see sibling
+ * entries but sometimes want to skip them. Use xas_set() if you
+ * want to move to an index which is not part of this entry.
+ */
+static inline void xas_advance(struct xa_state *xas, unsigned long index)
+{
+ unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;
+
+ xas->xa_index = index;
+ xas->xa_offset = (index >> shift) & XA_CHUNK_MASK;
+}
+
+/**
* xas_set_order() - Set up XArray operation state for a multislot entry.
* @xas: XArray operation state.
* @index: Target of the operation.
@@ -1576,6 +1650,11 @@ static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
xas->xa_update = update;
}
+static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
+{
+ xas->xa_lru = lru;
+}
+
/**
* xas_next_entry() - Advance iterator to next present entry.
* @xas: XArray operation state.
@@ -1648,6 +1727,7 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
xa_mark_t mark)
{
struct xa_node *node = xas->xa_node;
+ void *entry;
unsigned int offset;
if (unlikely(xas_not_node(node) || node->shift))
@@ -1659,7 +1739,10 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
return NULL;
if (offset == XA_CHUNK_SIZE)
return xas_find_marked(xas, max, mark);
- return xa_entry(xas->xa, node, offset);
+ entry = xa_entry(xas->xa, node, offset);
+ if (!entry)
+ return xas_find_marked(xas, max, mark);
+ return entry;
}
/*
@@ -1710,13 +1793,12 @@ enum {
* @xas: XArray operation state.
* @entry: Entry retrieved from the array.
*
- * The loop body will be executed for each entry in the XArray that lies
- * within the range specified by @xas. If the loop completes successfully,
- * any entries that lie in this range will be replaced by @entry. The caller
- * may break out of the loop; if they do so, the contents of the XArray will
- * be unchanged. The operation may fail due to an out of memory condition.
- * The caller may also call xa_set_err() to exit the loop while setting an
- * error to record the reason.
+ * The loop body will be executed for each entry in the XArray that
+ * lies within the range specified by @xas. If the loop terminates
+ * normally, @entry will be %NULL. The user may break out of the loop,
+ * which will leave @entry set to the conflicting entry. The caller
+ * may also call xa_set_err() to exit the loop while setting an error
+ * to record the reason.
*/
#define xas_for_each_conflict(xas, entry) \
while ((entry = xas_find_conflict(xas)))
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 6dad031be3c2..4c379d23ec6e 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -15,6 +15,8 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/user_namespace.h>
#include <uapi/linux/xattr.h>
struct inode;
@@ -33,7 +35,8 @@ struct xattr_handler {
int (*get)(const struct xattr_handler *, struct dentry *dentry,
struct inode *inode, const char *name, void *buffer,
size_t size);
- int (*set)(const struct xattr_handler *, struct dentry *dentry,
+ int (*set)(const struct xattr_handler *,
+ struct user_namespace *mnt_userns, struct dentry *dentry,
struct inode *inode, const char *name, const void *buffer,
size_t size, int flags);
};
@@ -47,18 +50,30 @@ struct xattr {
};
ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t);
-ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
+ssize_t vfs_getxattr(struct user_namespace *, struct dentry *, const char *,
+ void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
-int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
-int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
-int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
-int __vfs_removexattr(struct dentry *, const char *);
-int vfs_removexattr(struct dentry *, const char *);
+int __vfs_setxattr(struct user_namespace *, struct dentry *, struct inode *,
+ const char *, const void *, size_t, int);
+int __vfs_setxattr_noperm(struct user_namespace *, struct dentry *,
+ const char *, const void *, size_t, int);
+int __vfs_setxattr_locked(struct user_namespace *, struct dentry *,
+ const char *, const void *, size_t, int,
+ struct inode **);
+int vfs_setxattr(struct user_namespace *, struct dentry *, const char *,
+ const void *, size_t, int);
+int __vfs_removexattr(struct user_namespace *, struct dentry *, const char *);
+int __vfs_removexattr_locked(struct user_namespace *, struct dentry *,
+ const char *, struct inode **);
+int vfs_removexattr(struct user_namespace *, struct dentry *, const char *);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
-ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
+ssize_t vfs_getxattr_alloc(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
char **xattr_value, size_t size, gfp_t flags);
+int xattr_supported_namespace(struct inode *inode, const char *prefix);
+
static inline const char *xattr_prefix(const struct xattr_handler *handler)
{
return handler->prefix ?: handler->name;
@@ -73,7 +88,7 @@ struct simple_xattr {
struct list_head list;
char *name;
size_t size;
- char value[0];
+ char value[];
};
/*
@@ -94,7 +109,7 @@ static inline void simple_xattrs_free(struct simple_xattrs *xattrs)
list_for_each_entry_safe(xattr, node, &xattrs->head, list) {
kfree(xattr->name);
- kfree(xattr);
+ kvfree(xattr);
}
}
@@ -102,7 +117,8 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
void *buffer, size_t size);
int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
- const void *value, size_t size, int flags);
+ const void *value, size_t size, int flags,
+ ssize_t *removed_size);
ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer,
size_t size);
void simple_xattr_list_add(struct simple_xattrs *xattrs,
diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h
index 52b073fea17f..df42511438d0 100644
--- a/include/linux/xxhash.h
+++ b/include/linux/xxhash.h
@@ -34,7 +34,7 @@
* ("BSD").
*
* You can contact the author at:
- * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
* - xxHash source repository: https://github.com/Cyan4973/xxHash
*/
diff --git a/include/linux/xz.h b/include/linux/xz.h
index 64cffa6ddfce..7285ca5d56e9 100644
--- a/include/linux/xz.h
+++ b/include/linux/xz.h
@@ -2,7 +2,7 @@
* XZ decompressor
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
@@ -28,7 +28,7 @@
* enum xz_mode - Operation mode
*
* @XZ_SINGLE: Single-call mode. This uses less RAM than
- * than multi-call modes, because the LZMA2
+ * multi-call modes, because the LZMA2
* dictionary doesn't need to be allocated as
* part of the decoder state. All required data
* structures are allocated at initialization,
@@ -234,6 +234,112 @@ XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
XZ_EXTERN void xz_dec_end(struct xz_dec *s);
/*
+ * Decompressor for MicroLZMA, an LZMA variant with a very minimal header.
+ * See xz_dec_microlzma_alloc() below for details.
+ *
+ * These functions aren't used or available in preboot code and thus aren't
+ * marked with XZ_EXTERN. This avoids warnings about static functions that
+ * are never defined.
+ */
+/**
+ * struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state
+ */
+struct xz_dec_microlzma;
+
+/**
+ * xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder
+ * @mode XZ_SINGLE or XZ_PREALLOC
+ * @dict_size LZMA dictionary size. This must be at least 4 KiB and
+ * at most 3 GiB.
+ *
+ * In contrast to xz_dec_init(), this function only allocates the memory
+ * and remembers the dictionary size. xz_dec_microlzma_reset() must be used
+ * before calling xz_dec_microlzma_run().
+ *
+ * The amount of allocated memory is a little less than 30 KiB with XZ_SINGLE.
+ * With XZ_PREALLOC also a dictionary buffer of dict_size bytes is allocated.
+ *
+ * On success, xz_dec_microlzma_alloc() returns a pointer to
+ * struct xz_dec_microlzma. If memory allocation fails or
+ * dict_size is invalid, NULL is returned.
+ *
+ * The compressed format supported by this decoder is a raw LZMA stream
+ * whose first byte (always 0x00) has been replaced with bitwise-negation
+ * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
+ * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
+ * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
+ * marker must not be used. The unused values are reserved for future use.
+ * This MicroLZMA header format was created for use in EROFS but may be used
+ * by others too.
+ */
+extern struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size);
+
+/**
+ * xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state
+ * @s Decoder state allocated using xz_dec_microlzma_alloc()
+ * @comp_size Compressed size of the input stream
+ * @uncomp_size Uncompressed size of the input stream. A value smaller
+ * than the real uncompressed size of the input stream can
+ * be specified if uncomp_size_is_exact is set to false.
+ * uncomp_size can never be set to a value larger than the
+ * expected real uncompressed size because it would eventually
+ * result in XZ_DATA_ERROR.
+ * @uncomp_size_is_exact This is an int instead of bool to avoid
+ * requiring stdbool.h. This should normally be set to true.
+ * When this is set to false, error detection is weaker.
+ */
+extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
+ uint32_t comp_size, uint32_t uncomp_size,
+ int uncomp_size_is_exact);
+
+/**
+ * xz_dec_microlzma_run() - Run the MicroLZMA decoder
+ * @s Decoder state initialized using xz_dec_microlzma_reset()
+ * @b: Input and output buffers
+ *
+ * This works similarly to xz_dec_run() with a few important differences.
+ * Only the differences are documented here.
+ *
+ * The only possible return values are XZ_OK, XZ_STREAM_END, and
+ * XZ_DATA_ERROR. This function cannot return XZ_BUF_ERROR: if no progress
+ * is possible due to lack of input data or output space, this function will
+ * keep returning XZ_OK. Thus, the calling code must be written so that it
+ * will eventually provide input and output space matching (or exceeding)
+ * comp_size and uncomp_size arguments given to xz_dec_microlzma_reset().
+ * If the caller cannot do this (for example, if the input file is truncated
+ * or otherwise corrupt), the caller must detect this error by itself to
+ * avoid an infinite loop.
+ *
+ * If the compressed data seems to be corrupt, XZ_DATA_ERROR is returned.
+ * This can happen also when incorrect dictionary, uncompressed, or
+ * compressed sizes have been specified.
+ *
+ * With XZ_PREALLOC only: As an extra feature, b->out may be NULL to skip over
+ * uncompressed data. This way the caller doesn't need to provide a temporary
+ * output buffer for the bytes that will be ignored.
+ *
+ * With XZ_SINGLE only: In contrast to xz_dec_run(), the return value XZ_OK
+ * is also possible and thus XZ_SINGLE is actually a limited multi-call mode.
+ * After XZ_OK the bytes decoded so far may be read from the output buffer.
+ * It is possible to continue decoding but the variables b->out and b->out_pos
+ * MUST NOT be changed by the caller. Increasing the value of b->out_size is
+ * allowed to make more output space available; one doesn't need to provide
+ * space for the whole uncompressed data on the first call. The input buffer
+ * may be changed normally like with XZ_PREALLOC. This way input data can be
+ * provided from non-contiguous memory.
+ */
+extern enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s,
+ struct xz_buf *b);
+
+/**
+ * xz_dec_microlzma_end() - Free the memory allocated for the decoder state
+ * @s: Decoder state allocated using xz_dec_microlzma_alloc().
+ * If s is NULL, this function does nothing.
+ */
+extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
+
+/*
* Standalone build (userspace build or in-kernel build for boot time use)
* needs a CRC32 implementation. For normal in-kernel use, kernel's own
* CRC32 module is used instead, and users of this module don't need to
diff --git a/include/linux/z2_battery.h b/include/linux/z2_battery.h
index eaba53ff387c..9e8be7a7cd25 100644
--- a/include/linux/z2_battery.h
+++ b/include/linux/z2_battery.h
@@ -6,7 +6,6 @@ struct z2_battery_info {
int batt_I2C_bus;
int batt_I2C_addr;
int batt_I2C_reg;
- int charge_gpio;
int min_voltage;
int max_voltage;
int batt_div;
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
deleted file mode 100644
index b1eaf6e31735..000000000000
--- a/include/linux/zbud.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ZBUD_H_
-#define _ZBUD_H_
-
-#include <linux/types.h>
-
-struct zbud_pool;
-
-struct zbud_ops {
- int (*evict)(struct zbud_pool *pool, unsigned long handle);
-};
-
-struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
-void zbud_destroy_pool(struct zbud_pool *pool);
-int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
- unsigned long *handle);
-void zbud_free(struct zbud_pool *pool, unsigned long handle);
-int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
-void *zbud_map(struct zbud_pool *pool, unsigned long handle);
-void zbud_unmap(struct zbud_pool *pool, unsigned long handle);
-u64 zbud_get_pool_size(struct zbud_pool *pool);
-
-#endif /* _ZBUD_H_ */
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
index c757d848a758..78ede944c082 100644
--- a/include/linux/zlib.h
+++ b/include/linux/zlib.h
@@ -23,7 +23,7 @@
The data format used by the zlib library is described by RFCs (Request for
- Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt
+ Comments) 1950 to 1952 in the files https://www.ietf.org/rfc/rfc1950.txt
(zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
*/
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index 63fbba0740c2..db7416ed6057 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -29,7 +29,6 @@
struct zorro_dev {
struct ExpansionRom rom;
zorro_id id;
- struct zorro_driver *driver; /* which driver has allocated this device */
struct device dev; /* Generic device interface */
u16 slotaddr;
u16 slotsize;
@@ -41,13 +40,6 @@ struct zorro_dev {
/*
- * Zorro bus
- */
-
-extern struct bus_type zorro_bus_type;
-
-
- /*
* Zorro device drivers
*/
@@ -70,11 +62,6 @@ struct zorro_driver {
/* New-style probing */
extern int zorro_register_driver(struct zorro_driver *);
extern void zorro_unregister_driver(struct zorro_driver *);
-extern const struct zorro_device_id *zorro_match_device(const struct zorro_device_id *ids, const struct zorro_dev *z);
-static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z)
-{
- return z->driver;
-}
extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 51bf43076165..e8997010612a 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -73,6 +73,7 @@ u64 zpool_get_total_size(struct zpool *pool);
* @malloc: allocate mem from a pool.
* @free: free mem from a pool.
* @shrink: shrink the pool.
+ * @sleep_mapped: whether zpool driver can sleep during map.
* @map: map a handle.
* @unmap: unmap a handle.
* @total_size: get total size of a pool.
@@ -100,6 +101,7 @@ struct zpool_driver {
int (*shrink)(void *pool, unsigned int pages,
unsigned int *reclaimed);
+ bool sleep_mapped;
void *(*map)(void *pool, unsigned long handle,
enum zpool_mapmode mm);
void (*unmap)(void *pool, unsigned long handle);
@@ -112,5 +114,6 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver);
bool zpool_evictable(struct zpool *pool);
+bool zpool_can_sleep_mapped(struct zpool *pool);
#endif
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 2219cce81ca4..2a430e713ce5 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -20,7 +20,6 @@
* zsmalloc mapping modes
*
* NOTE: These only make a difference when a mapped object spans pages.
- * They also have no effect when PGTABLE_MAPPING is selected.
*/
enum zs_mapmode {
ZS_MM_RW, /* normal read-write mapping */
@@ -36,7 +35,7 @@ enum zs_mapmode {
struct zs_pool_stats {
/* How many pages were migrated (freed) */
- unsigned long pages_compacted;
+ atomic_long_t pages_compacted;
};
struct zs_pool;
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
index 249575e2485f..113408eef6ec 100644
--- a/include/linux/zstd.h
+++ b/include/linux/zstd.h
@@ -1,138 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd) and
+ * the GPLv2 (found in the COPYING file in the root directory of
+ * https://github.com/facebook/zstd). You may select, at your option, one of the
+ * above-listed licenses.
*/
-#ifndef ZSTD_H
-#define ZSTD_H
+#ifndef LINUX_ZSTD_H
+#define LINUX_ZSTD_H
-/* ====== Dependency ======*/
-#include <linux/types.h> /* size_t */
+/**
+ * This is a kernel-style API that wraps the upstream zstd API, which cannot be
+ * used directly because the symbols aren't exported. It exposes the minimal
+ * functionality which is currently required by users of zstd in the kernel.
+ * Expose extra functions from lib/zstd/zstd.h as needed.
+ */
+/* ====== Dependency ====== */
+#include <linux/types.h>
+#include <linux/zstd_errors.h>
+#include <linux/zstd_lib.h>
-/*-*****************************************************************************
- * Introduction
+/* ====== Helper Functions ====== */
+/**
+ * zstd_compress_bound() - maximum compressed size in worst case scenario
+ * @src_size: The size of the data to compress.
*
- * zstd, short for Zstandard, is a fast lossless compression algorithm,
- * targeting real-time compression scenarios at zlib-level and better
- * compression ratios. The zstd compression library provides in-memory
- * compression and decompression functions. The library supports compression
- * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
- * ultra, should be used with caution, as they require more memory.
- * Compression can be done in:
- * - a single step, reusing a context (described as Explicit memory management)
- * - unbounded multiple steps (described as Streaming compression)
- * The compression ratio achievable on small data can be highly improved using
- * compression with a dictionary in:
- * - a single step (described as Simple dictionary API)
- * - a single step, reusing a dictionary (described as Fast dictionary API)
- ******************************************************************************/
-
-/*====== Helper functions ======*/
+ * Return: The maximum compressed size in the worst case scenario.
+ */
+size_t zstd_compress_bound(size_t src_size);
/**
- * enum ZSTD_ErrorCode - zstd error codes
+ * zstd_is_error() - tells if a size_t function result is an error code
+ * @code: The function result to check for error.
*
- * Functions that return size_t can be checked for errors using ZSTD_isError()
- * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
+ * Return: Non-zero iff the code is an error.
+ */
+unsigned int zstd_is_error(size_t code);
+
+/**
+ * enum zstd_error_code - zstd error codes
*/
-typedef enum {
- ZSTD_error_no_error,
- ZSTD_error_GENERIC,
- ZSTD_error_prefix_unknown,
- ZSTD_error_version_unsupported,
- ZSTD_error_parameter_unknown,
- ZSTD_error_frameParameter_unsupported,
- ZSTD_error_frameParameter_unsupportedBy32bits,
- ZSTD_error_frameParameter_windowTooLarge,
- ZSTD_error_compressionParameter_unsupported,
- ZSTD_error_init_missing,
- ZSTD_error_memory_allocation,
- ZSTD_error_stage_wrong,
- ZSTD_error_dstSize_tooSmall,
- ZSTD_error_srcSize_wrong,
- ZSTD_error_corruption_detected,
- ZSTD_error_checksum_wrong,
- ZSTD_error_tableLog_tooLarge,
- ZSTD_error_maxSymbolValue_tooLarge,
- ZSTD_error_maxSymbolValue_tooSmall,
- ZSTD_error_dictionary_corrupted,
- ZSTD_error_dictionary_wrong,
- ZSTD_error_dictionaryCreation_failed,
- ZSTD_error_maxCode
-} ZSTD_ErrorCode;
+typedef ZSTD_ErrorCode zstd_error_code;
/**
- * ZSTD_maxCLevel() - maximum compression level available
+ * zstd_get_error_code() - translates an error function result to an error code
+ * @code: The function result for which zstd_is_error(code) is true.
*
- * Return: Maximum compression level available.
+ * Return: A unique error code for this error.
*/
-int ZSTD_maxCLevel(void);
+zstd_error_code zstd_get_error_code(size_t code);
+
/**
- * ZSTD_compressBound() - maximum compressed size in worst case scenario
- * @srcSize: The size of the data to compress.
+ * zstd_get_error_name() - translates an error function result to a string
+ * @code: The function result for which zstd_is_error(code) is true.
*
- * Return: The maximum compressed size in the worst case scenario.
+ * Return: An error string corresponding to the error code.
*/
-size_t ZSTD_compressBound(size_t srcSize);
+const char *zstd_get_error_name(size_t code);
+
/**
- * ZSTD_isError() - tells if a size_t function result is an error code
- * @code: The function result to check for error.
+ * zstd_min_clevel() - minimum allowed compression level
*
- * Return: Non-zero iff the code is an error.
+ * Return: The minimum allowed compression level.
*/
-static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
-{
- return code > (size_t)-ZSTD_error_maxCode;
-}
+int zstd_min_clevel(void);
+
/**
- * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
- * @functionResult: The result of a function for which ZSTD_isError() is true.
+ * zstd_max_clevel() - maximum allowed compression level
*
- * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0
- * if the functionResult isn't an error.
+ * Return: The maximum allowed compression level.
*/
-static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
- size_t functionResult)
-{
- if (!ZSTD_isError(functionResult))
- return (ZSTD_ErrorCode)0;
- return (ZSTD_ErrorCode)(0 - functionResult);
-}
+int zstd_max_clevel(void);
+
+/* ====== Parameter Selection ====== */
/**
- * enum ZSTD_strategy - zstd compression search strategy
+ * enum zstd_strategy - zstd compression search strategy
*
- * From faster to stronger.
+ * From faster to stronger. See zstd_lib.h.
*/
-typedef enum {
- ZSTD_fast,
- ZSTD_dfast,
- ZSTD_greedy,
- ZSTD_lazy,
- ZSTD_lazy2,
- ZSTD_btlazy2,
- ZSTD_btopt,
- ZSTD_btopt2
-} ZSTD_strategy;
+typedef ZSTD_strategy zstd_strategy;
/**
- * struct ZSTD_compressionParameters - zstd compression parameters
+ * struct zstd_compression_parameters - zstd compression parameters
* @windowLog: Log of the largest match distance. Larger means more
* compression, and more memory needed during decompression.
- * @chainLog: Fully searched segment. Larger means more compression, slower,
- * and more memory (useless for fast).
+ * @chainLog: Fully searched segment. Larger means more compression,
+ * slower, and more memory (useless for fast).
* @hashLog: Dispatch table. Larger means more compression,
* slower, and more memory.
* @searchLog: Number of searches. Larger means more compression and slower.
@@ -141,1017 +100,348 @@ typedef enum {
* @targetLength: Acceptable match size for optimal parser (only). Larger means
* more compression, and slower.
* @strategy: The zstd compression strategy.
+ *
+ * See zstd_lib.h.
*/
-typedef struct {
- unsigned int windowLog;
- unsigned int chainLog;
- unsigned int hashLog;
- unsigned int searchLog;
- unsigned int searchLength;
- unsigned int targetLength;
- ZSTD_strategy strategy;
-} ZSTD_compressionParameters;
+typedef ZSTD_compressionParameters zstd_compression_parameters;
/**
- * struct ZSTD_frameParameters - zstd frame parameters
- * @contentSizeFlag: Controls whether content size will be present in the frame
- * header (when known).
- * @checksumFlag: Controls whether a 32-bit checksum is generated at the end
- * of the frame for error detection.
- * @noDictIDFlag: Controls whether dictID will be saved into the frame header
- * when using dictionary compression.
+ * struct zstd_frame_parameters - zstd frame parameters
+ * @contentSizeFlag: Controls whether content size will be present in the
+ * frame header (when known).
+ * @checksumFlag: Controls whether a 32-bit checksum is generated at the
+ * end of the frame for error detection.
+ * @noDictIDFlag: Controls whether dictID will be saved into the frame
+ * header when using dictionary compression.
*
- * The default value is all fields set to 0.
+ * The default value is all fields set to 0. See zstd_lib.h.
*/
-typedef struct {
- unsigned int contentSizeFlag;
- unsigned int checksumFlag;
- unsigned int noDictIDFlag;
-} ZSTD_frameParameters;
+typedef ZSTD_frameParameters zstd_frame_parameters;
/**
- * struct ZSTD_parameters - zstd parameters
+ * struct zstd_parameters - zstd parameters
* @cParams: The compression parameters.
* @fParams: The frame parameters.
*/
-typedef struct {
- ZSTD_compressionParameters cParams;
- ZSTD_frameParameters fParams;
-} ZSTD_parameters;
+typedef ZSTD_parameters zstd_parameters;
/**
- * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
- * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
- * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
- * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
+ * zstd_get_params() - returns zstd_parameters for selected level
+ * @level: The compression level
+ * @estimated_src_size: The estimated source size to compress or 0
+ * if unknown.
*
- * Return: The selected ZSTD_compressionParameters.
+ * Return: The selected zstd_parameters.
*/
-ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
- unsigned long long estimatedSrcSize, size_t dictSize);
+zstd_parameters zstd_get_params(int level,
+ unsigned long long estimated_src_size);
-/**
- * ZSTD_getParams() - returns ZSTD_parameters for selected level
- * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
- * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
- * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
- *
- * The same as ZSTD_getCParams() except also selects the default frame
- * parameters (all zero).
- *
- * Return: The selected ZSTD_parameters.
- */
-ZSTD_parameters ZSTD_getParams(int compressionLevel,
- unsigned long long estimatedSrcSize, size_t dictSize);
+/* ====== Single-pass Compression ====== */
-/*-*************************************
- * Explicit memory management
- **************************************/
+typedef ZSTD_CCtx zstd_cctx;
/**
- * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
- * @cParams: The compression parameters to be used for compression.
+ * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
+ * @parameters: The compression parameters to be used.
*
* If multiple compression parameters might be used, the caller must call
- * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
+ * zstd_cctx_workspace_bound() for each set of parameters and use the maximum
* size.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCCtx().
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_cctx().
*/
-size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
+size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
/**
- * struct ZSTD_CCtx - the zstd compression context
- *
- * When compressing many times it is recommended to allocate a context just once
- * and reuse it for each successive compression operation.
- */
-typedef struct ZSTD_CCtx_s ZSTD_CCtx;
-/**
- * ZSTD_initCCtx() - initialize a zstd compression context
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
- * determine how large the workspace must be.
- *
- * Return: A compression context emplaced into workspace.
- */
-ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
-
-/**
- * ZSTD_compressCCtx() - compress src into dst
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, ZSTD_parameters params);
-
-/**
- * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
- *
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDCtx().
- */
-size_t ZSTD_DCtxWorkspaceBound(void);
-
-/**
- * struct ZSTD_DCtx - the zstd decompression context
- *
- * When decompressing many times it is recommended to allocate a context just
- * once and reuse it for each successive decompression operation.
- */
-typedef struct ZSTD_DCtx_s ZSTD_DCtx;
-/**
- * ZSTD_initDCtx() - initialize a zstd decompression context
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
- * determine how large the workspace must be.
- *
- * Return: A decompression context emplaced into workspace.
- */
-ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
-
-/**
- * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- *
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-
-/*-************************
- * Simple dictionary API
- **************************/
-
-/**
- * ZSTD_compress_usingDict() - compress src into dst using a dictionary
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @dict: The dictionary to use for compression.
- * @dictSize: The size of the dictionary.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Compression using a predefined dictionary. The same dictionary must be used
- * during decompression.
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const void *dict, size_t dictSize,
- ZSTD_parameters params);
-
-/**
- * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- * @dict: The dictionary to use for decompression. The same dictionary
- * must've been used to compress the data.
- * @dictSize: The size of the dictionary.
- *
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const void *dict, size_t dictSize);
-
-/*-**************************
- * Fast dictionary API
- ***************************/
-
-/**
- * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
- * @cParams: The compression parameters to be used for compression.
+ * zstd_init_cctx() - initialize a zstd compression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace. Use zstd_cctx_workspace_bound() to
+ * determine how large the workspace must be.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCDict().
- */
-size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
-
-/**
- * struct ZSTD_CDict - a digested dictionary to be used for compression
+ * Return: A zstd compression context or NULL on error.
*/
-typedef struct ZSTD_CDict_s ZSTD_CDict;
+zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
/**
- * ZSTD_initCDict() - initialize a digested dictionary for compression
- * @dictBuffer: The dictionary to digest. The buffer is referenced by the
- * ZSTD_CDict so it must outlive the returned ZSTD_CDict.
- * @dictSize: The size of the dictionary.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- * @workspace: The workspace. It must outlive the returned ZSTD_CDict.
- * @workspaceSize: The workspace size. Must be at least
- * ZSTD_CDictWorkspaceBound(params.cParams).
+ * zstd_compress_cctx() - compress src into dst with the initialized parameters
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @dst: The buffer to compress src into.
+ * @dst_capacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @src_size: The size of the data to compress.
+ * @parameters: The compression parameters to be used.
*
- * When compressing multiple messages / blocks with the same dictionary it is
- * recommended to load it just once. The ZSTD_CDict merely references the
- * dictBuffer, so it must outlive the returned ZSTD_CDict.
- *
- * Return: The digested dictionary emplaced into workspace.
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
*/
-ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
- ZSTD_parameters params, void *workspace, size_t workspaceSize);
+size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size, const zstd_parameters *parameters);
-/**
- * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(cParams) where
- * cParams are the compression parameters used to initialize the
- * cdict.
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @cdict: The digested dictionary to use for compression.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Compression using a digested dictionary. The same dictionary must be used
- * during decompression.
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const ZSTD_CDict *cdict);
+/* ====== Single-pass Decompression ====== */
+typedef ZSTD_DCtx zstd_dctx;
/**
- * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
+ * zstd_dctx_workspace_bound() - max memory needed to initialize a zstd_dctx
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDDict().
- */
-size_t ZSTD_DDictWorkspaceBound(void);
-
-/**
- * struct ZSTD_DDict - a digested dictionary to be used for decompression
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_dctx().
*/
-typedef struct ZSTD_DDict_s ZSTD_DDict;
+size_t zstd_dctx_workspace_bound(void);
/**
- * ZSTD_initDDict() - initialize a digested dictionary for decompression
- * @dictBuffer: The dictionary to digest. The buffer is referenced by the
- * ZSTD_DDict so it must outlive the returned ZSTD_DDict.
- * @dictSize: The size of the dictionary.
- * @workspace: The workspace. It must outlive the returned ZSTD_DDict.
- * @workspaceSize: The workspace size. Must be at least
- * ZSTD_DDictWorkspaceBound().
- *
- * When decompressing multiple messages / blocks with the same dictionary it is
- * recommended to load it just once. The ZSTD_DDict merely references the
- * dictBuffer, so it must outlive the returned ZSTD_DDict.
+ * zstd_init_dctx() - initialize a zstd decompression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace. Use zstd_dctx_workspace_bound() to
+ * determine how large the workspace must be.
*
- * Return: The digested dictionary emplaced into workspace.
+ * Return: A zstd decompression context or NULL on error.
*/
-ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
- void *workspace, size_t workspaceSize);
+zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
/**
- * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- * @ddict: The digested dictionary to use for decompression. The same
- * dictionary must've been used to compress the data.
+ * zstd_decompress_dctx() - decompress zstd compressed src into dst
+ * @dctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dst_capacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @src_size: The exact size of the data to decompress.
*
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
+ * Return: The decompressed size or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
- size_t dstCapacity, const void *src, size_t srcSize,
- const ZSTD_DDict *ddict);
+size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size);
-
-/*-**************************
- * Streaming
- ***************************/
+/* ====== Streaming Buffers ====== */
/**
- * struct ZSTD_inBuffer - input buffer for streaming
+ * struct zstd_in_buffer - input buffer for streaming
* @src: Start of the input buffer.
* @size: Size of the input buffer.
* @pos: Position where reading stopped. Will be updated.
* Necessarily 0 <= pos <= size.
+ *
+ * See zstd_lib.h.
*/
-typedef struct ZSTD_inBuffer_s {
- const void *src;
- size_t size;
- size_t pos;
-} ZSTD_inBuffer;
+typedef ZSTD_inBuffer zstd_in_buffer;
/**
- * struct ZSTD_outBuffer - output buffer for streaming
+ * struct zstd_out_buffer - output buffer for streaming
* @dst: Start of the output buffer.
* @size: Size of the output buffer.
* @pos: Position where writing stopped. Will be updated.
* Necessarily 0 <= pos <= size.
+ *
+ * See zstd_lib.h.
*/
-typedef struct ZSTD_outBuffer_s {
- void *dst;
- size_t size;
- size_t pos;
-} ZSTD_outBuffer;
+typedef ZSTD_outBuffer zstd_out_buffer;
+/* ====== Streaming Compression ====== */
-
-/*-*****************************************************************************
- * Streaming compression - HowTo
- *
- * A ZSTD_CStream object is required to track streaming operation.
- * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
- * ZSTD_CStream objects can be reused multiple times on consecutive compression
- * operations. It is recommended to re-use ZSTD_CStream in situations where many
- * streaming operations will be achieved consecutively. Use one separate
- * ZSTD_CStream per thread for parallel execution.
- *
- * Use ZSTD_compressStream() repetitively to consume input stream.
- * The function will automatically update both `pos` fields.
- * Note that it may not consume the entire input, in which case `pos < size`,
- * and it's up to the caller to present again remaining data.
- * It returns a hint for the preferred number of bytes to use as an input for
- * the next function call.
- *
- * At any moment, it's possible to flush whatever data remains within internal
- * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
- * still be some content left within the internal buffer if `output->size` is
- * too small. It returns the number of bytes left in the internal buffer and
- * must be called until it returns 0.
- *
- * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
- * write frame epilogue. The epilogue is required for decoders to consider a
- * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
- * the full content if `output->size` is too small. In which case, call again
- * ZSTD_endStream() to complete the flush. It returns the number of bytes left
- * in the internal buffer and must be called until it returns 0.
- ******************************************************************************/
+typedef ZSTD_CStream zstd_cstream;
/**
- * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
- * @cParams: The compression parameters to be used for compression.
+ * zstd_cstream_workspace_bound() - memory needed to initialize a zstd_cstream
+ * @cparams: The compression parameters to be used for compression.
*
* Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
- */
-size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
-
-/**
- * struct ZSTD_CStream - the zstd streaming compression context
- */
-typedef struct ZSTD_CStream_s ZSTD_CStream;
-
-/*===== ZSTD_CStream management functions =====*/
-/**
- * ZSTD_initCStream() - initialize a zstd streaming compression context
- * @params: The zstd compression parameters.
- * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
- * pass the source size (zero means empty source). Otherwise,
- * the caller may optionally pass the source size, or zero if
- * unknown.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
- * how large the workspace must be.
- *
- * Return: The zstd streaming compression context.
+ * zstd_init_cstream().
*/
-ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
- unsigned long long pledgedSrcSize, void *workspace,
- size_t workspaceSize);
+size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
/**
- * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
- * @cdict: The digested dictionary to use for compression.
- * @pledgedSrcSize: Optionally the source size, or zero if unknown.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound()
- * with the cParams used to initialize the cdict to determine
- * how large the workspace must be.
+ * zstd_init_cstream() - initialize a zstd streaming compression context
+ * @parameters The zstd parameters to use for compression.
+ * @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller
+ * must pass the source size (zero means empty source).
+ * Otherwise, the caller may optionally pass the source
+ * size, or zero if unknown.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace.
+ * Use zstd_cstream_workspace_bound(params->cparams) to
+ * determine how large the workspace must be.
*
- * Return: The zstd streaming compression context.
+ * Return: The zstd streaming compression context or NULL on error.
*/
-ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
- unsigned long long pledgedSrcSize, void *workspace,
- size_t workspaceSize);
+zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
+ unsigned long long pledged_src_size, void *workspace, size_t workspace_size);
-/*===== Streaming compression functions =====*/
/**
- * ZSTD_resetCStream() - reset the context using parameters from creation
- * @zcs: The zstd streaming compression context to reset.
- * @pledgedSrcSize: Optionally the source size, or zero if unknown.
+ * zstd_reset_cstream() - reset the context using parameters from creation
+ * @cstream: The zstd streaming compression context to reset.
+ * @pledged_src_size: Optionally the source size, or zero if unknown.
*
* Resets the context using the parameters from creation. Skips dictionary
- * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
+ * loading, since it can be reused. If `pledged_src_size` is non-zero the frame
* content size is always written into the frame header.
*
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: Zero or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
+size_t zstd_reset_cstream(zstd_cstream *cstream,
+ unsigned long long pledged_src_size);
+
/**
- * ZSTD_compressStream() - streaming compress some of input into output
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
- * @input: Source buffer. `input->pos` is updated to indicate how much data was
- * read. Note that it may not consume the entire input, in which case
- * `input->pos < input->size`, and it's up to the caller to present
- * remaining data again.
+ * zstd_compress_stream() - streaming compress some of input into output
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
+ * @input: Source buffer. `input->pos` is updated to indicate how much data
+ * was read. Note that it may not consume the entire input, in which
+ * case `input->pos < input->size`, and it's up to the caller to
+ * present remaining data again.
*
* The `input` and `output` buffers may be any size. Guaranteed to make some
* forward progress if `input` and `output` are not empty.
*
- * Return: A hint for the number of bytes to use as the input for the next
- * function call or an error, which can be checked using
- * ZSTD_isError().
+ * Return: A hint for the number of bytes to use as the input for the next
+ * function call or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
- ZSTD_inBuffer *input);
+size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
+ zstd_in_buffer *input);
+
/**
- * ZSTD_flushStream() - flush internal buffers into output
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
+ * zstd_flush_stream() - flush internal buffers into output
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
*
- * ZSTD_flushStream() must be called until it returns 0, meaning all the data
- * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
+ * zstd_flush_stream() must be called until it returns 0, meaning all the data
+ * has been flushed. Since zstd_flush_stream() causes a block to be ended,
* calling it too often will degrade the compression ratio.
*
- * Return: The number of bytes still present within internal buffers or an
- * error, which can be checked using ZSTD_isError().
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
-/**
- * ZSTD_endStream() - flush internal buffers into output and end the frame
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
- *
- * ZSTD_endStream() must be called until it returns 0, meaning all the data has
- * been flushed and the frame epilogue has been written.
- *
- * Return: The number of bytes still present within internal buffers or an
- * error, which can be checked using ZSTD_isError().
- */
-size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
+size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output);
/**
- * ZSTD_CStreamInSize() - recommended size for the input buffer
- *
- * Return: The recommended size for the input buffer.
- */
-size_t ZSTD_CStreamInSize(void);
-/**
- * ZSTD_CStreamOutSize() - recommended size for the output buffer
+ * zstd_end_stream() - flush internal buffers into output and end the frame
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
*
- * When the output buffer is at least this large, it is guaranteed to be large
- * enough to flush at least one complete compressed block.
+ * zstd_end_stream() must be called until it returns 0, meaning all the data has
+ * been flushed and the frame epilogue has been written.
*
- * Return: The recommended size for the output buffer.
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_CStreamOutSize(void);
+size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output);
+/* ====== Streaming Decompression ====== */
-
-/*-*****************************************************************************
- * Streaming decompression - HowTo
- *
- * A ZSTD_DStream object is required to track streaming operations.
- * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
- * ZSTD_DStream objects can be re-used multiple times.
- *
- * Use ZSTD_decompressStream() repetitively to consume your input.
- * The function will update both `pos` fields.
- * If `input->pos < input->size`, some input has not been consumed.
- * It's up to the caller to present again remaining data.
- * If `output->pos < output->size`, decoder has flushed everything it could.
- * Returns 0 iff a frame is completely decoded and fully flushed.
- * Otherwise it returns a suggested next input size that will never load more
- * than the current frame.
- ******************************************************************************/
+typedef ZSTD_DStream zstd_dstream;
/**
- * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
- * @maxWindowSize: The maximum window size allowed for compressed frames.
+ * zstd_dstream_workspace_bound() - memory needed to initialize a zstd_dstream
+ * @max_window_size: The maximum window size allowed for compressed frames.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
+ * Return: A lower bound on the size of the workspace that is passed
+ * to zstd_init_dstream().
*/
-size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
+size_t zstd_dstream_workspace_bound(size_t max_window_size);
/**
- * struct ZSTD_DStream - the zstd streaming decompression context
- */
-typedef struct ZSTD_DStream_s ZSTD_DStream;
-/*===== ZSTD_DStream management functions =====*/
-/**
- * ZSTD_initDStream() - initialize a zstd streaming decompression context
- * @maxWindowSize: The maximum window size allowed for compressed frames.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
- * how large the workspace must be.
- *
- * Return: The zstd streaming decompression context.
- */
-ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
- size_t workspaceSize);
-/**
- * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
- * @maxWindowSize: The maximum window size allowed for compressed frames.
- * @ddict: The digested dictionary to use for decompression.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
- * how large the workspace must be.
+ * zstd_init_dstream() - initialize a zstd streaming decompression context
+ * @max_window_size: The maximum window size allowed for compressed frames.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace.
+ * Use zstd_dstream_workspace_bound(max_window_size) to
+ * determine how large the workspace must be.
*
- * Return: The zstd streaming decompression context.
+ * Return: The zstd streaming decompression context.
*/
-ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
- const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
+zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
+ size_t workspace_size);
-/*===== Streaming decompression functions =====*/
/**
- * ZSTD_resetDStream() - reset the context using parameters from creation
- * @zds: The zstd streaming decompression context to reset.
+ * zstd_reset_dstream() - reset the context using parameters from creation
+ * @dstream: The zstd streaming decompression context to reset.
*
* Resets the context using the parameters from creation. Skips dictionary
* loading, since it can be reused.
*
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: Zero or an error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_resetDStream(ZSTD_DStream *zds);
+size_t zstd_reset_dstream(zstd_dstream *dstream);
+
/**
- * ZSTD_decompressStream() - streaming decompress some of input into output
- * @zds: The zstd streaming decompression context.
- * @output: Destination buffer. `output.pos` is updated to indicate how much
- * decompressed data was written.
- * @input: Source buffer. `input.pos` is updated to indicate how much data was
- * read. Note that it may not consume the entire input, in which case
- * `input.pos < input.size`, and it's up to the caller to present
- * remaining data again.
+ * zstd_decompress_stream() - streaming decompress some of input into output
+ * @dstream: The zstd streaming decompression context.
+ * @output: Destination buffer. `output.pos` is updated to indicate how much
+ * decompressed data was written.
+ * @input: Source buffer. `input.pos` is updated to indicate how much data was
+ * read. Note that it may not consume the entire input, in which case
+ * `input.pos < input.size`, and it's up to the caller to present
+ * remaining data again.
*
* The `input` and `output` buffers may be any size. Guaranteed to make some
* forward progress if `input` and `output` are not empty.
- * ZSTD_decompressStream() will not consume the last byte of the frame until
+ * zstd_decompress_stream() will not consume the last byte of the frame until
* the entire frame is flushed.
*
- * Return: Returns 0 iff a frame is completely decoded and fully flushed.
- * Otherwise returns a hint for the number of bytes to use as the input
- * for the next function call or an error, which can be checked using
- * ZSTD_isError(). The size hint will never load more than the frame.
+ * Return: Returns 0 iff a frame is completely decoded and fully flushed.
+ * Otherwise returns a hint for the number of bytes to use as the
+ * input for the next function call or an error, which can be checked
+ * using zstd_is_error(). The size hint will never load more than the
+ * frame.
*/
-size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
- ZSTD_inBuffer *input);
+size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
+ zstd_in_buffer *input);
-/**
- * ZSTD_DStreamInSize() - recommended size for the input buffer
- *
- * Return: The recommended size for the input buffer.
- */
-size_t ZSTD_DStreamInSize(void);
-/**
- * ZSTD_DStreamOutSize() - recommended size for the output buffer
- *
- * When the output buffer is at least this large, it is guaranteed to be large
- * enough to flush at least one complete decompressed block.
- *
- * Return: The recommended size for the output buffer.
- */
-size_t ZSTD_DStreamOutSize(void);
-
-
-/* --- Constants ---*/
-#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */
-#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U
-
-#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
-#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
-
-#define ZSTD_WINDOWLOG_MAX_32 27
-#define ZSTD_WINDOWLOG_MAX_64 27
-#define ZSTD_WINDOWLOG_MAX \
- ((unsigned int)(sizeof(size_t) == 4 \
- ? ZSTD_WINDOWLOG_MAX_32 \
- : ZSTD_WINDOWLOG_MAX_64))
-#define ZSTD_WINDOWLOG_MIN 10
-#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
-#define ZSTD_HASHLOG_MIN 6
-#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1)
-#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
-#define ZSTD_HASHLOG3_MAX 17
-#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
-#define ZSTD_SEARCHLOG_MIN 1
-/* only for ZSTD_fast, other strategies are limited to 6 */
-#define ZSTD_SEARCHLENGTH_MAX 7
-/* only for ZSTD_btopt, other strategies are limited to 4 */
-#define ZSTD_SEARCHLENGTH_MIN 3
-#define ZSTD_TARGETLENGTH_MIN 4
-#define ZSTD_TARGETLENGTH_MAX 999
-
-/* for static allocation */
-#define ZSTD_FRAMEHEADERSIZE_MAX 18
-#define ZSTD_FRAMEHEADERSIZE_MIN 6
-static const size_t ZSTD_frameHeaderSize_prefix = 5;
-static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
-static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
-/* magic number + skippable frame length */
-static const size_t ZSTD_skippableHeaderSize = 8;
-
-
-/*-*************************************
- * Compressed size functions
- **************************************/
-
-/**
- * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
- * @src: Source buffer. It should point to the start of a zstd encoded frame
- * or a skippable frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * size of the frame.
- *
- * Return: The compressed size of the frame pointed to by `src` or an error,
- * which can be check with ZSTD_isError().
- * Suitable to pass to ZSTD_decompress() or similar functions.
- */
-size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
-
-/*-*************************************
- * Decompressed size functions
- **************************************/
-/**
- * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
- * @src: It should point to the start of a zstd encoded frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
- *
- * Return: The frame content size stored in the frame header if known.
- * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
- * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
- */
-unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+/* ====== Frame Inspection Functions ====== */
/**
- * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
- * @src: It should point to the start of a series of zstd encoded and/or
- * skippable frames.
- * @srcSize: The exact size of the series of frames.
+ * zstd_find_frame_compressed_size() - returns the size of a compressed frame
+ * @src: Source buffer. It should point to the start of a zstd encoded
+ * frame or a skippable frame.
+ * @src_size: The size of the source buffer. It must be at least as large as the
+ * size of the frame.
*
- * If any zstd encoded frame in the series doesn't have the frame content size
- * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
- * set when using ZSTD_compress(). The decompressed size can be very large.
- * If the source is untrusted, the decompressed size could be wrong or
- * intentionally modified. Always ensure the result fits within the
- * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
- * frames, and so it must traverse the input to read each frame header. This is
- * efficient as most of the data is skipped, however it does mean that all frame
- * data must be present and valid.
- *
- * Return: Decompressed size of all the data contained in the frames if known.
- * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
- * `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
- */
-unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
-
-/*-*************************************
- * Advanced compression functions
- **************************************/
-/**
- * ZSTD_checkCParams() - ensure parameter values remain within authorized range
- * @cParams: The zstd compression parameters.
- *
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: The compressed size of the frame pointed to by `src` or an error,
+ * which can be check with zstd_is_error().
+ * Suitable to pass to ZSTD_decompress() or similar functions.
*/
-size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
+size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
/**
- * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
- * @srcSize: Optionally the estimated source size, or zero if unknown.
- * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
- *
- * Return: The optimized parameters.
- */
-ZSTD_compressionParameters ZSTD_adjustCParams(
- ZSTD_compressionParameters cParams, unsigned long long srcSize,
- size_t dictSize);
-
-/*--- Advanced decompression functions ---*/
-
-/**
- * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
- * @buffer: The source buffer to check.
- * @size: The size of the source buffer, must be at least 4 bytes.
- *
- * Return: True iff the buffer starts with a zstd or skippable frame identifier.
- */
-unsigned int ZSTD_isFrame(const void *buffer, size_t size);
-
-/**
- * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
- * @dict: The dictionary buffer.
- * @dictSize: The size of the dictionary buffer.
- *
- * Return: The dictionary id stored within the dictionary or 0 if the
- * dictionary is not a zstd dictionary. If it returns 0 the
- * dictionary can still be loaded as a content-only dictionary.
- */
-unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
-
-/**
- * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
- * @ddict: The ddict to find the id of.
- *
- * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
- * a zstd dictionary. If it returns 0 `ddict` will be loaded as a
- * content-only dictionary.
- */
-unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
-
-/**
- * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
- * @src: Source buffer. It must be a zstd encoded frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
- *
- * Return: The dictionary id required to decompress the frame stored within
- * `src` or 0 if the dictionary id could not be decoded. It can return
- * 0 if the frame does not require a dictionary, the dictionary id
- * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
- * is too small.
- */
-unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
-
-/**
- * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
- * @frameContentSize: The frame content size, or 0 if not present.
+ * struct zstd_frame_params - zstd frame parameters stored in the frame header
+ * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
+ * present.
* @windowSize: The window size, or 0 if the frame is a skippable frame.
+ * @blockSizeMax: The maximum block size.
+ * @frameType: The frame type (zstd or skippable)
+ * @headerSize: The size of the frame header.
* @dictID: The dictionary id, or 0 if not present.
* @checksumFlag: Whether a checksum was used.
+ *
+ * See zstd_lib.h.
*/
-typedef struct {
- unsigned long long frameContentSize;
- unsigned int windowSize;
- unsigned int dictID;
- unsigned int checksumFlag;
-} ZSTD_frameParams;
+typedef ZSTD_frameHeader zstd_frame_header;
/**
- * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
- * @fparamsPtr: On success the frame parameters are written here.
- * @src: The source buffer. It must point to a zstd or skippable frame.
- * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is
- * always large enough to succeed.
+ * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
+ * @params: On success the frame parameters are written here.
+ * @src: The source buffer. It must point to a zstd or skippable frame.
+ * @src_size: The size of the source buffer.
*
- * Return: 0 on success. If more data is required it returns how many bytes
- * must be provided to make forward progress. Otherwise it returns
- * an error, which can be checked using ZSTD_isError().
+ * Return: 0 on success. If more data is required it returns how many bytes
+ * must be provided to make forward progress. Otherwise it returns
+ * an error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
- size_t srcSize);
-
-/*-*****************************************************************************
- * Buffer-less and synchronous inner streaming functions
- *
- * This is an advanced API, giving full control over buffer management, for
- * users which need direct control over memory.
- * But it's also a complex one, with many restrictions (documented below).
- * Prefer using normal streaming API for an easier experience
- ******************************************************************************/
-
-/*-*****************************************************************************
- * Buffer-less streaming compression (synchronous mode)
- *
- * A ZSTD_CCtx object is required to track streaming operations.
- * Use ZSTD_initCCtx() to initialize a context.
- * ZSTD_CCtx object can be re-used multiple times within successive compression
- * operations.
- *
- * Start by initializing a context.
- * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
- * compression,
- * or ZSTD_compressBegin_advanced(), for finer parameter control.
- * It's also possible to duplicate a reference context which has already been
- * initialized, using ZSTD_copyCCtx()
- *
- * Then, consume your input using ZSTD_compressContinue().
- * There are some important considerations to keep in mind when using this
- * advanced function :
- * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
- * buffer only.
- * - Interface is synchronous : input is consumed entirely and produce 1+
- * (or more) compressed blocks.
- * - Caller must ensure there is enough space in `dst` to store compressed data
- * under worst case scenario. Worst case evaluation is provided by
- * ZSTD_compressBound().
- * ZSTD_compressContinue() doesn't guarantee recover after a failed
- * compression.
- * - ZSTD_compressContinue() presumes prior input ***is still accessible and
- * unmodified*** (up to maximum distance size, see WindowLog).
- * It remembers all previous contiguous blocks, plus one separated memory
- * segment (which can itself consists of multiple contiguous blocks)
- * - ZSTD_compressContinue() detects that prior input has been overwritten when
- * `src` buffer overlaps. In which case, it will "discard" the relevant memory
- * section from its history.
- *
- * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
- * and optional checksum. It's possible to use srcSize==0, in which case, it
- * will write a final empty block to end the frame. Without last block mark,
- * frames will be considered unfinished (corrupted) by decoders.
- *
- * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
- * frame.
- ******************************************************************************/
-
-/*===== Buffer-less streaming compression functions =====*/
-size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
- size_t dictSize, int compressionLevel);
-size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
- size_t dictSize, ZSTD_parameters params,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-
-
-
-/*-*****************************************************************************
- * Buffer-less streaming decompression (synchronous mode)
- *
- * A ZSTD_DCtx object is required to track streaming operations.
- * Use ZSTD_initDCtx() to initialize a context.
- * A ZSTD_DCtx object can be re-used multiple times.
- *
- * First typical operation is to retrieve frame parameters, using
- * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
- * important information to correctly decode the frame, such as the minimum
- * rolling buffer size to allocate to decompress data (`windowSize`), and the
- * dictionary ID used.
- * Note: content size is optional, it may not be present. 0 means unknown.
- * Note that these values could be wrong, either because of data malformation,
- * or because an attacker is spoofing deliberate false information. As a
- * consequence, check that values remain within valid application range,
- * especially `windowSize`, before allocation. Each application can set its own
- * limit, depending on local restrictions. For extended interoperability, it is
- * recommended to support at least 8 MB.
- * Frame parameters are extracted from the beginning of the compressed frame.
- * Data fragment must be large enough to ensure successful decoding, typically
- * `ZSTD_frameHeaderSize_max` bytes.
- * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
- * >0: `srcSize` is too small, provide at least this many bytes.
- * errorCode, which can be tested using ZSTD_isError().
- *
- * Start decompression, with ZSTD_decompressBegin() or
- * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
- * context, using ZSTD_copyDCtx().
- *
- * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
- * alternatively.
- * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
- * to ZSTD_decompressContinue().
- * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
- * fail.
- *
- * The result of ZSTD_decompressContinue() is the number of bytes regenerated
- * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
- * error; it just means ZSTD_decompressContinue() has decoded some metadata
- * item. It can also be an error code, which can be tested with ZSTD_isError().
- *
- * ZSTD_decompressContinue() needs previous data blocks during decompression, up
- * to `windowSize`. They should preferably be located contiguously, prior to
- * current block. Alternatively, a round buffer of sufficient size is also
- * possible. Sufficient size is determined by frame parameters.
- * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
- * follow each other, make sure that either the compressor breaks contiguity at
- * the same place, or that previous contiguous segment is large enough to
- * properly handle maximum back-reference.
- *
- * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
- * Context can then be reset to start a new decompression.
- *
- * Note: it's possible to know if next input to present is a header or a block,
- * using ZSTD_nextInputType(). This information is not required to properly
- * decode a frame.
- *
- * == Special case: skippable frames ==
- *
- * Skippable frames allow integration of user-defined data into a flow of
- * concatenated frames. Skippable frames will be ignored (skipped) by a
- * decompressor. The format of skippable frames is as follows:
- * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
- * 0x184D2A50 to 0x184D2A5F
- * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
- * c) Frame Content - any content (User Data) of length equal to Frame Size
- * For skippable frames ZSTD_decompressContinue() always returns 0.
- * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
- * what means that a frame is skippable.
- * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
- * actually be a zstd encoded frame with no content. For purposes of
- * decompression, it is valid in both cases to skip the frame using
- * ZSTD_findFrameCompressedSize() to find its size in bytes.
- * It also returns frame size as fparamsPtr->frameContentSize.
- ******************************************************************************/
-
-/*===== Buffer-less streaming decompression functions =====*/
-size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
-size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
- size_t dictSize);
-void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
-size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
-size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-typedef enum {
- ZSTDnit_frameHeader,
- ZSTDnit_blockHeader,
- ZSTDnit_block,
- ZSTDnit_lastBlock,
- ZSTDnit_checksum,
- ZSTDnit_skippableFrame
-} ZSTD_nextInputType_e;
-ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
-
-/*-*****************************************************************************
- * Block functions
- *
- * Block functions produce and decode raw zstd blocks, without frame metadata.
- * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
- * very small blocks (< 100 bytes). User will have to take in charge required
- * information to regenerate data, such as compressed and content sizes.
- *
- * A few rules to respect:
- * - Compressing and decompressing require a context structure
- * + Use ZSTD_initCCtx() and ZSTD_initDCtx()
- * - It is necessary to init context before starting
- * + compression : ZSTD_compressBegin()
- * + decompression : ZSTD_decompressBegin()
- * + variants _usingDict() are also allowed
- * + copyCCtx() and copyDCtx() work too
- * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
- * + If you need to compress more, cut data into multiple blocks
- * + Consider using the regular ZSTD_compress() instead, as frame metadata
- * costs become negligible when source size is large.
- * - When a block is considered not compressible enough, ZSTD_compressBlock()
- * result will be zero. In which case, nothing is produced into `dst`.
- * + User must test for such outcome and deal directly with uncompressed data
- * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
- * + In case of multiple successive blocks, decoder must be informed of
- * uncompressed block existence to follow proper history. Use
- * ZSTD_insertBlock() in such a case.
- ******************************************************************************/
-
-/* Define for static allocation */
-#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
-/*===== Raw zstd block functions =====*/
-size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
-size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
- size_t blockSize);
+size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
+ size_t src_size);
-#endif /* ZSTD_H */
+#endif /* LINUX_ZSTD_H */
diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
new file mode 100644
index 000000000000..58b6dd45a969
--- /dev/null
+++ b/include/linux/zstd_errors.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_ERRORS_H_398273423
+#define ZSTD_ERRORS_H_398273423
+
+
+/*===== dependency =====*/
+#include <linux/types.h> /* size_t */
+
+
+/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
+#define ZSTDERRORLIB_VISIBILITY
+#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+
+/*-*********************************************
+ * Error codes list
+ *-*********************************************
+ * Error codes _values_ are pinned down since v1.3.1 only.
+ * Therefore, don't rely on values if you may link to any version < v1.3.1.
+ *
+ * Only values < 100 are considered stable.
+ *
+ * note 1 : this API shall be used with static linking only.
+ * dynamic linking is not yet officially supported.
+ * note 2 : Prefer relying on the enum than on its value whenever possible
+ * This is the only supported way to use the error list < v1.3.1
+ * note 3 : ZSTD_isError() is always correct, whatever the library version.
+ **********************************************/
+typedef enum {
+ ZSTD_error_no_error = 0,
+ ZSTD_error_GENERIC = 1,
+ ZSTD_error_prefix_unknown = 10,
+ ZSTD_error_version_unsupported = 12,
+ ZSTD_error_frameParameter_unsupported = 14,
+ ZSTD_error_frameParameter_windowTooLarge = 16,
+ ZSTD_error_corruption_detected = 20,
+ ZSTD_error_checksum_wrong = 22,
+ ZSTD_error_dictionary_corrupted = 30,
+ ZSTD_error_dictionary_wrong = 32,
+ ZSTD_error_dictionaryCreation_failed = 34,
+ ZSTD_error_parameter_unsupported = 40,
+ ZSTD_error_parameter_outOfBound = 42,
+ ZSTD_error_tableLog_tooLarge = 44,
+ ZSTD_error_maxSymbolValue_tooLarge = 46,
+ ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_stage_wrong = 60,
+ ZSTD_error_init_missing = 62,
+ ZSTD_error_memory_allocation = 64,
+ ZSTD_error_workSpace_tooSmall= 66,
+ ZSTD_error_dstSize_tooSmall = 70,
+ ZSTD_error_srcSize_wrong = 72,
+ ZSTD_error_dstBuffer_null = 74,
+ /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
+ ZSTD_error_frameIndex_tooLarge = 100,
+ ZSTD_error_seekableIO = 102,
+ ZSTD_error_dstBuffer_wrong = 104,
+ ZSTD_error_srcBuffer_wrong = 105,
+ ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
+} ZSTD_ErrorCode;
+
+/*! ZSTD_getErrorCode() :
+ convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
+ which can be used to compare with enum list published above */
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /*< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
+
+
+
+#endif /* ZSTD_ERRORS_H_398273423 */
diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
new file mode 100644
index 000000000000..b8c7dbf98390
--- /dev/null
+++ b/include/linux/zstd_lib.h
@@ -0,0 +1,2432 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_H_235446
+#define ZSTD_H_235446
+
+/* ====== Dependency ======*/
+#include <linux/limits.h> /* INT_MAX */
+#include <linux/types.h> /* size_t */
+
+
+/* ===== ZSTDLIB_API : control library symbols visibility ===== */
+#define ZSTDLIB_VISIBILITY
+#define ZSTDLIB_API ZSTDLIB_VISIBILITY
+
+
+/* *****************************************************************************
+ Introduction
+
+ zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
+ real-time compression scenarios at zlib-level and better compression ratios.
+ The zstd compression library provides in-memory compression and decompression
+ functions.
+
+ The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
+ which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
+ caution, as they require more memory. The library also offers negative
+ compression levels, which extend the range of speed vs. ratio preferences.
+ The lower the level, the faster the speed (at the cost of compression).
+
+ Compression can be done in:
+ - a single step (described as Simple API)
+ - a single step, reusing a context (described as Explicit context)
+ - unbounded multiple steps (described as Streaming compression)
+
+ The compression ratio achievable on small data can be highly improved using
+ a dictionary. Dictionary compression can be performed in:
+ - a single step (described as Simple dictionary API)
+ - a single step, reusing a dictionary (described as Bulk-processing
+ dictionary API)
+
+ Advanced experimental functions can be accessed using
+ `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
+
+ Advanced experimental APIs should never be used with a dynamically-linked
+ library. They are not "stable"; their definitions or signatures may change in
+ the future. Only static linking is allowed.
+*******************************************************************************/
+
+/*------ Version ------*/
+#define ZSTD_VERSION_MAJOR 1
+#define ZSTD_VERSION_MINOR 4
+#define ZSTD_VERSION_RELEASE 10
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+/*! ZSTD_versionNumber() :
+ * Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
+ZSTDLIB_API unsigned ZSTD_versionNumber(void);
+
+#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
+#define ZSTD_QUOTE(str) #str
+#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
+#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
+
+/*! ZSTD_versionString() :
+ * Return runtime library version, like "1.4.5". Requires v1.3.0+. */
+ZSTDLIB_API const char* ZSTD_versionString(void);
+
+/* *************************************
+ * Default constant
+ ***************************************/
+#ifndef ZSTD_CLEVEL_DEFAULT
+# define ZSTD_CLEVEL_DEFAULT 3
+#endif
+
+/* *************************************
+ * Constants
+ ***************************************/
+
+/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
+#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */
+#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
+#define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0
+
+#define ZSTD_BLOCKSIZELOG_MAX 17
+#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX)
+
+
+
+/* *************************************
+* Simple API
+***************************************/
+/*! ZSTD_compress() :
+ * Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress() :
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize);
+
+/*! ZSTD_getFrameContentSize() : requires v1.3.0+
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * @return : - decompressed size of `src` frame content, if known
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
+/*! ZSTD_getDecompressedSize() :
+ * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+ * Both functions work the same way, but ZSTD_getDecompressedSize() blends
+ * "empty", "unknown" and "error" results to the same return value (0),
+ * while ZSTD_getFrameContentSize() gives them separate return values.
+ * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
+ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_findFrameCompressedSize() :
+ * `src` should point to the start of a ZSTD frame or skippable frame.
+ * `srcSize` must be >= first frame size
+ * @return : the compressed size of the first frame starting at `src`,
+ * suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
+ * or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
+
+/*====== Helper functions ======*/
+#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+
+
+/* *************************************
+* Explicit context
+***************************************/
+/*= Compression context
+ * When compressing many times,
+ * it is recommended to allocate a context just once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Note : re-using context is just a speed / resource optimization.
+ * It doesn't change the compression ratio, which remains identical.
+ * Note 2 : In multi-threaded environments,
+ * use one different context per thread for parallel execution.
+ */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */
+
+/*! ZSTD_compressCCtx() :
+ * Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
+ * Important : in order to behave similarly to `ZSTD_compress()`,
+ * this function compresses at requested compression level,
+ * __ignoring any other parameter__ .
+ * If any advanced parameter was set using the advanced API,
+ * they will all be reset. Only `compressionLevel` remains.
+ */
+ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*= Decompression context
+ * When decompressing many times,
+ * it is recommended to allocate a context only once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution. */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
+ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer */
+
+/*! ZSTD_decompressDCtx() :
+ * Same as ZSTD_decompress(),
+ * requires an allocated ZSTD_DCtx.
+ * Compatible with sticky parameters.
+ */
+ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/* *************************************
+* Advanced compression API
+***************************************/
+
+/* API design :
+ * Parameters are pushed one by one into an existing context,
+ * using ZSTD_CCtx_set*() functions.
+ * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
+ * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
+ * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
+ *
+ * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
+ *
+ * This API supercedes all other "advanced" API entry points in the experimental section.
+ * In the future, we expect to remove from experimental API entry points which are redundant with this API.
+ */
+
+
+/* Compression strategies, listed from fastest to strongest */
+typedef enum { ZSTD_fast=1,
+ ZSTD_dfast=2,
+ ZSTD_greedy=3,
+ ZSTD_lazy=4,
+ ZSTD_lazy2=5,
+ ZSTD_btlazy2=6,
+ ZSTD_btopt=7,
+ ZSTD_btultra=8,
+ ZSTD_btultra2=9
+ /* note : new strategies _might_ be added in the future.
+ Only the order (from fast to strong) is guaranteed */
+} ZSTD_strategy;
+
+
+typedef enum {
+
+ /* compression parameters
+ * Note: When compressing with a ZSTD_CDict these parameters are superseded
+ * by the parameters used to construct the ZSTD_CDict.
+ * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
+ ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
+ * Note that exact compression parameters are dynamically determined,
+ * depending on both compression level and srcSize (when known).
+ * Default level is ZSTD_CLEVEL_DEFAULT==3.
+ * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
+ * Note 1 : it's possible to pass a negative compression level.
+ * Note 2 : setting a level does not automatically set all other compression parameters
+ * to default. Setting this will however eventually dynamically impact the compression
+ * parameters which have not been manually set. The manually set
+ * ones will 'stick'. */
+ /* Advanced compression parameters :
+ * It's possible to pin down compression parameters to some specific values.
+ * In which case, these values are no longer dynamically selected by the compressor */
+ ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2.
+ * This will set a memory budget for streaming decompression,
+ * with larger values requiring more memory
+ * and typically compressing more.
+ * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
+ * Special: value 0 means "use default windowLog".
+ * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
+ * requires explicitly allowing such size at streaming decompression stage. */
+ ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2.
+ * Resulting memory usage is (1 << (hashLog+2)).
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
+ * Larger tables improve compression ratio of strategies <= dFast,
+ * and improve speed of strategies > dFast.
+ * Special: value 0 means "use default hashLog". */
+ ZSTD_c_chainLog=103, /* Size of the multi-probe search table, as a power of 2.
+ * Resulting memory usage is (1 << (chainLog+2)).
+ * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
+ * Larger tables result in better and slower compression.
+ * This parameter is useless for "fast" strategy.
+ * It's still useful when using "dfast" strategy,
+ * in which case it defines a secondary probe table.
+ * Special: value 0 means "use default chainLog". */
+ ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2.
+ * More attempts result in better and slower compression.
+ * This parameter is useless for "fast" and "dFast" strategies.
+ * Special: value 0 means "use default searchLog". */
+ ZSTD_c_minMatch=105, /* Minimum size of searched matches.
+ * Note that Zstandard can still find matches of smaller size,
+ * it just tweaks its search algorithm to look for this size and larger.
+ * Larger values increase compression and decompression speed, but decrease ratio.
+ * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
+ * Note that currently, for all strategies < btopt, effective minimum is 4.
+ * , for all strategies > fast, effective maximum is 6.
+ * Special: value 0 means "use default minMatchLength". */
+ ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
+ * For strategies btopt, btultra & btultra2:
+ * Length of Match considered "good enough" to stop search.
+ * Larger values make compression stronger, and slower.
+ * For strategy fast:
+ * Distance between match sampling.
+ * Larger values make compression faster, and weaker.
+ * Special: value 0 means "use default targetLength". */
+ ZSTD_c_strategy=107, /* See ZSTD_strategy enum definition.
+ * The higher the value of selected strategy, the more complex it is,
+ * resulting in stronger and slower compression.
+ * Special: value 0 means "use default strategy". */
+
+ /* LDM mode parameters */
+ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
+ * This parameter is designed to improve compression ratio
+ * for large inputs, by finding large matches at long distance.
+ * It increases memory usage and window size.
+ * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
+ * except when expressly set to a different value.
+ * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
+ * compression strategy >= ZSTD_btopt (== compression level 16+) */
+ ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2.
+ * Larger values increase memory usage and compression ratio,
+ * but decrease compression speed.
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+ * default: windowlog - 7.
+ * Special: value 0 means "automatically determine hashlog". */
+ ZSTD_c_ldmMinMatch=162, /* Minimum match size for long distance matcher.
+ * Larger/too small values usually decrease compression ratio.
+ * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
+ * Special: value 0 means "use default value" (default: 64). */
+ ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
+ * Larger values improve collision resolution but decrease compression speed.
+ * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
+ * Special: value 0 means "use default value" (default: 3). */
+ ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
+ * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
+ * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
+ * Larger values improve compression speed.
+ * Deviating far from default value will likely result in a compression ratio decrease.
+ * Special: value 0 means "automatically determine hashRateLog". */
+
+ /* frame parameters */
+ ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
+ * Content size must be known at the beginning of compression.
+ * This is automatically the case when using ZSTD_compress2(),
+ * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
+ ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
+ ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */
+
+ /* multi-threading parameters */
+ /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
+ * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
+ * In a situation where it's unknown if the linked library supports multi-threading or not,
+ * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
+ */
+ ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel.
+ * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
+ * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
+ * while compression is performed in parallel, within worker thread(s).
+ * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
+ * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
+ * More workers improve speed, but also increase memory usage.
+ * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
+ * compression is performed inside Caller's thread, and all invocations are blocking */
+ ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
+ * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
+ * 0 means default, which is dynamically determined based on compression parameters.
+ * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
+ * The minimum size is automatically and transparently enforced. */
+ ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size.
+ * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
+ * It helps preserve compression ratio, while each job is compressed in parallel.
+ * This value is enforced only when nbWorkers >= 1.
+ * Larger values increase compression ratio, but decrease speed.
+ * Possible values range from 0 to 9 :
+ * - 0 means "default" : value will be determined by the library, depending on strategy
+ * - 1 means "no overlap"
+ * - 9 means "full overlap", using a full window size.
+ * Each intermediate rank increases/decreases load size by a factor 2 :
+ * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default
+ * default value varies between 6 and 9, depending on strategy */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_c_rsyncable
+ * ZSTD_c_format
+ * ZSTD_c_forceMaxWindow
+ * ZSTD_c_forceAttachDict
+ * ZSTD_c_literalCompressionMode
+ * ZSTD_c_targetCBlockSize
+ * ZSTD_c_srcSizeHint
+ * ZSTD_c_enableDedicatedDictSearch
+ * ZSTD_c_stableInBuffer
+ * ZSTD_c_stableOutBuffer
+ * ZSTD_c_blockDelimiters
+ * ZSTD_c_validateSequences
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly;
+ * also, the enums values themselves are unstable and can still change.
+ */
+ ZSTD_c_experimentalParam1=500,
+ ZSTD_c_experimentalParam2=10,
+ ZSTD_c_experimentalParam3=1000,
+ ZSTD_c_experimentalParam4=1001,
+ ZSTD_c_experimentalParam5=1002,
+ ZSTD_c_experimentalParam6=1003,
+ ZSTD_c_experimentalParam7=1004,
+ ZSTD_c_experimentalParam8=1005,
+ ZSTD_c_experimentalParam9=1006,
+ ZSTD_c_experimentalParam10=1007,
+ ZSTD_c_experimentalParam11=1008,
+ ZSTD_c_experimentalParam12=1009
+} ZSTD_cParameter;
+
+typedef struct {
+ size_t error;
+ int lowerBound;
+ int upperBound;
+} ZSTD_bounds;
+
+/*! ZSTD_cParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - lower and upper bounds, both inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
+
+/*! ZSTD_CCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is generally only possible during frame initialization (before starting compression).
+ * Exception : when using multi-threading mode (nbWorkers >= 1),
+ * the following parameters can be updated _during_ compression (within same frame):
+ * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
+ * new parameters will be active for next job only (after a flush()).
+ * @return : an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtx_setPledgedSrcSize() :
+ * Total input data size to be compressed as a single frame.
+ * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
+ * This value will also be controlled at end of frame, and trigger an error if not respected.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
+ * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
+ * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
+ * Note 2 : pledgedSrcSize is only valid once, for the next frame.
+ * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note 3 : Whenever all input data is provided and consumed in a single round,
+ * for example with ZSTD_compress2(),
+ * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
+ * this value is automatically overridden by srcSize instead.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
+
+typedef enum {
+ ZSTD_reset_session_only = 1,
+ ZSTD_reset_parameters = 2,
+ ZSTD_reset_session_and_parameters = 3
+} ZSTD_ResetDirective;
+
+/*! ZSTD_CCtx_reset() :
+ * There are 2 different things that can be reset, independently or jointly :
+ * - The session : will stop compressing current frame, and make CCtx ready to start a new one.
+ * Useful after an error, or to interrupt any ongoing compression.
+ * Any internal data not yet flushed is cancelled.
+ * Compression parameters and dictionary remain unchanged.
+ * They will be used to compress next frame.
+ * Resetting session never fails.
+ * - The parameters : changes all parameters back to "default".
+ * This removes any reference to any dictionary too.
+ * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
+ * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
+ * - Both : similar to resetting the session, followed by resetting parameters.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
+
+/*! ZSTD_compress2() :
+ * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
+ * ZSTD_compress2() always starts a new frame.
+ * Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - The function is always blocking, returns when compression is completed.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/* *************************************
+* Advanced decompression API
+***************************************/
+
+/* The advanced API pushes parameters one by one into an existing DCtx context.
+ * Parameters are sticky, and remain valid for all following frames
+ * using the same DCtx context.
+ * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
+ * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
+ * Therefore, no new decompression function is necessary.
+ */
+
+typedef enum {
+
+ ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
+ * the streaming API will refuse to allocate memory buffer
+ * in order to protect the host from unreasonable memory requirements.
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
+ * Special: value 0 means "use default maximum windowLog". */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_d_format
+ * ZSTD_d_stableOutBuffer
+ * ZSTD_d_forceIgnoreChecksum
+ * ZSTD_d_refMultipleDDicts
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly
+ */
+ ZSTD_d_experimentalParam1=1000,
+ ZSTD_d_experimentalParam2=1001,
+ ZSTD_d_experimentalParam3=1002,
+ ZSTD_d_experimentalParam4=1003
+
+} ZSTD_dParameter;
+
+/*! ZSTD_dParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - both lower and upper bounds, inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
+
+/*! ZSTD_DCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_dParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is only possible during frame initialization (before starting decompression).
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
+
+/*! ZSTD_DCtx_reset() :
+ * Return a DCtx to clean state.
+ * Session and parameters can be reset jointly or separately.
+ * Parameters can only be reset when no active frame is being decompressed.
+ * @return : 0, or an error code, which can be tested with ZSTD_isError()
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
+
+
+/* **************************
+* Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+ const void* src; /*< start of input buffer */
+ size_t size; /*< size of input buffer */
+ size_t pos; /*< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+ void* dst; /*< start of output buffer */
+ size_t size; /*< size of output buffer */
+ size_t pos; /*< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+* Streaming compression - HowTo
+*
+* A ZSTD_CStream object is required to track streaming operation.
+* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+*
+* For parallel execution, use one separate ZSTD_CStream per thread.
+*
+* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
+*
+* Parameters are sticky : when starting a new compression on the same context,
+* it will re-use the same sticky parameters as previous compression session.
+* When in doubt, it's recommended to fully initialize the context before usage.
+* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
+* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
+* set more specific parameters, the pledged source size, or load a dictionary.
+*
+* Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
+* consume input stream. The function will automatically update both `pos`
+* fields within `input` and `output`.
+* Note that the function may not consume the entire input, for example, because
+* the output buffer is already full, in which case `input.pos < input.size`.
+* The caller must check if input has been entirely consumed.
+* If not, the caller must make some room to receive more compressed data,
+* and then present again remaining input data.
+* note: ZSTD_e_continue is guaranteed to make some forward progress when called,
+* but doesn't guarantee maximal forward progress. This is especially relevant
+* when compressing with multiple threads. The call won't block if it can
+* consume some input, but if it can't it will wait for some, but not all,
+* output to be flushed.
+* @return : provides a minimum amount of data remaining to be flushed from internal buffers
+* or an error code, which can be tested using ZSTD_isError().
+*
+* At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+* using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
+* Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
+* In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
+* operation.
+* note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if internal buffers are entirely flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
+* It will perform a flush and write frame epilogue.
+* The epilogue is required for decoders to consider a frame completed.
+* flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
+* start a new frame.
+* note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if frame fully completed and fully flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef ZSTD_CCtx ZSTD_CStream; /*< CCtx and CStream are now effectively same object (>= v1.3.0) */
+ /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
+/*===== ZSTD_CStream management functions =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /* accept NULL pointer */
+
+/*===== Streaming compression functions =====*/
+typedef enum {
+ ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
+ ZSTD_e_flush=1, /* flush any data provided so far,
+ * it creates (at least) one new block, that can be decoded immediately on reception;
+ * frame will continue: any future data can still reference previously compressed data, improving compression.
+ * note : multithreaded compression will block to flush as much output as possible. */
+ ZSTD_e_end=2 /* flush any remaining data _and_ close current frame.
+ * note that frame is only closed after compressed data is fully flushed (return value == 0).
+ * After that point, any additional data starts a new frame.
+ * note : each frame is independent (does not reference any content from previous frame).
+ : note : multithreaded compression will block to flush as much output as possible. */
+} ZSTD_EndDirective;
+
+/*! ZSTD_compressStream2() :
+ * Behaves about the same as ZSTD_compressStream, with additional control on end directive.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
+ * - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+ * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ * - endOp must be a valid directive
+ * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
+ * - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
+ * and then immediately returns, just indicating that there is some data remaining to be flushed.
+ * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
+ * - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ * or an error code, which can be tested using ZSTD_isError().
+ * if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ * only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ * Before starting a new compression job, or changing compression parameters,
+ * it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp);
+
+
+/* These buffer sizes are softly recommended.
+ * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
+ * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
+ * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
+ *
+ * However, note that these recommendations are from the perspective of a C caller program.
+ * If the streaming interface is invoked from some other language,
+ * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
+ * a major performance rule is to reduce crossing such interface to an absolute minimum.
+ * It's not rare that performance ends being spent more into the interface, rather than compression itself.
+ * In which cases, prefer using large buffers, as large as practical,
+ * for both input and output, to reduce the nb of roundtrips.
+ */
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /*< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
+
+
+/* *****************************************************************************
+ * This following is a legacy streaming API.
+ * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
+ * It is redundant, but remains fully supported.
+ * Advanced parameters and dictionary compression can only be used through the
+ * new API.
+ ******************************************************************************/
+
+/*!
+ * Equivalent to:
+ *
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ */
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+/*!
+ * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
+ * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
+ * the next read size (if non-zero and not an error). ZSTD_compressStream2()
+ * returns the minimum nb of bytes left to flush (if non-zero and not an error).
+ */
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+
+/*-***************************************************************************
+* Streaming decompression - HowTo
+*
+* A ZSTD_DStream object is required to track streaming operations.
+* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+* ZSTD_DStream objects can be re-used multiple times.
+*
+* Use ZSTD_initDStream() to start a new decompression operation.
+* @return : recommended first input size
+* Alternatively, use advanced API to set specific properties.
+*
+* Use ZSTD_decompressStream() repetitively to consume your input.
+* The function will update both `pos` fields.
+* If `input.pos < input.size`, some input has not been consumed.
+* It's up to the caller to present again remaining data.
+* The function tries to flush all data decoded immediately, respecting output buffer size.
+* If `output.pos < output.size`, decoder has flushed everything it could.
+* But if `output.pos == output.size`, there might be some data left within internal buffers.,
+* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
+* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
+* @return : 0 when a frame is completely decoded and fully flushed,
+* or an error code, which can be tested using ZSTD_isError(),
+* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
+* the return value is a suggested next input size (just a hint for better latency)
+* that will never request more than the remaining frame size.
+* *******************************************************************************/
+
+typedef ZSTD_DCtx ZSTD_DStream; /*< DCtx and DStream are now effectively same object (>= v1.3.0) */
+ /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
+/*===== ZSTD_DStream management functions =====*/
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer */
+
+/*===== Streaming decompression functions =====*/
+
+/* This function is redundant with the advanced API and equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, NULL);
+ */
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+
+/* ************************
+* Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ * Compression at an explicit compression level using a Dictionary.
+ * A dictionary can be any arbitrary data segment (also called a prefix),
+ * or a buffer with specified information (see dictBuilder/zdict.h).
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ * Decompression using a known Dictionary.
+ * Dictionary must be identical to the one used during compression.
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/* *********************************
+ * Bulk processing dictionary API
+ **********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ * When compressing multiple messages or blocks using the same dictionary,
+ * it's recommended to digest the dictionary only once, since it's a costly operation.
+ * ZSTD_createCDict() will create a state from digesting a dictionary.
+ * The resulting state can be used for future compression operations with very limited startup cost.
+ * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
+ * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
+ * in which case the only thing that it transports is the @compressionLevel.
+ * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
+ * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ * Function frees memory allocated by ZSTD_createCDict().
+ * If a NULL pointer is passed, no operation is performed. */
+ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times.
+ * Note : compression level is _decided at dictionary creation time_,
+ * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ * Function frees memory allocated with ZSTD_createDDict()
+ * If a NULL pointer is passed, no operation is performed. */
+ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ * Decompression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict);
+
+
+/* ******************************
+ * Dictionary helper functions
+ *******************************/
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompressed the frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary to be decoded (most common case).
+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/* *****************************************************************************
+ * Advanced dictionary and prefix API
+ *
+ * This API allows dictionaries to be used with ZSTD_compress2(),
+ * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
+ * only reset with the context is reset with ZSTD_reset_parameters or
+ * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ ******************************************************************************/
+
+
+/*! ZSTD_CCtx_loadDictionary() :
+ * Create an internal CDict from `dict` buffer.
+ * Decompression will have to use same dictionary.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
+ * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+ * Note 2 : Loading a dictionary involves building tables.
+ * It's also a CPU consuming operation, with non-negligible impact on latency.
+ * Tables are dependent on compression parameters, and for this reason,
+ * compression parameters can no longer be changed after loading a dictionary.
+ * Note 3 :`dict` content will be copied internally.
+ * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
+ * In such a case, dictionary buffer must outlive its users.
+ * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
+ * to precisely select how dictionary content must be interpreted. */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_refCDict() :
+ * Reference a prepared dictionary, to be used for all next compressed frames.
+ * Note that compression parameters are enforced from within CDict,
+ * and supersede any compression parameter previously set within CCtx.
+ * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
+ * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
+ * The dictionary will remain valid for future compressed frames using same CCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Referencing a NULL CDict means "return to no-dictionary mode".
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
+ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+/*! ZSTD_CCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) for next compressed frame.
+ * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
+ * Decompression will need same prefix to properly regenerate data.
+ * Compressing with a prefix is similar in outcome as performing a diff and compressing it,
+ * but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
+ * Note 1 : Prefix buffer is referenced. It **must** outlive compression.
+ * Its content must remain unmodified during compression.
+ * Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
+ * ensure that the window size is large enough to contain the entire source.
+ * See ZSTD_c_windowLog.
+ * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ * It's a CPU consuming operation, with non-negligible impact on latency.
+ * If there is a need to use the same prefix multiple times, consider loadDictionary instead.
+ * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
+ * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+ const void* prefix, size_t prefixSize);
+
+/*! ZSTD_DCtx_loadDictionary() :
+ * Create an internal DDict from dict buffer,
+ * to be used to decompress next frames.
+ * The dictionary remains valid for all future frames, until explicitly invalidated.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Loading a dictionary involves building tables,
+ * which has a non-negligible impact on CPU usage and latency.
+ * It's recommended to "load once, use many times", to amortize the cost
+ * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
+ * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
+ * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
+ * how dictionary content is loaded and interpreted.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_refDDict() :
+ * Reference a prepared dictionary, to be used to decompress next frames.
+ * The dictionary remains active for decompression of future frames using same DCtx.
+ *
+ * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
+ * will store the DDict references in a table, and the DDict used for decompression
+ * will be determined at decompression time, as per the dict ID in the frame.
+ * The memory for the table is allocated on the first call to refDDict, and can be
+ * freed with ZSTD_freeDCtx().
+ *
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Special: referencing a NULL DDict means "return to no-dictionary mode".
+ * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+/*! ZSTD_DCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) to decompress next frame.
+ * This is the reverse operation of ZSTD_CCtx_refPrefix(),
+ * and must use the same prefix as the one used during compression.
+ * Prefix is **only used once**. Reference is discarded at end of frame.
+ * End of frame is reached when ZSTD_decompressStream() returns 0.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ * Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
+ * Prefix buffer must remain unmodified up to the end of frame,
+ * reached when ZSTD_decompressStream() returns 0.
+ * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
+ * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
+ * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ * A full dictionary is more costly, as it requires building tables.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
+ const void* prefix, size_t prefixSize);
+
+/* === Memory management === */
+
+/*! ZSTD_sizeof_*() :
+ * These functions give the _current_ memory usage of selected object.
+ * Note that object memory usage can evolve (increase or decrease) over time. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+#endif /* ZSTD_H_235446 */
+
+
+/* **************************************************************************************
+ * ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
+ * The definitions in the following section are considered experimental.
+ * They are provided for advanced scenarios.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+
+#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
+/* **************************************************************************************
+ * experimental API (static linking only)
+ ****************************************************************************************
+ * The following symbols and constants
+ * are not planned to join "stable API" status in the near future.
+ * They can still change in future versions.
+ * Some of them are planned to remain in the static_only section indefinitely.
+ * Some of them might be removed in the future (especially when redundant with existing stable functions)
+ * ***************************************************************************************/
+
+#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2)
+#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */
+#define ZSTD_SKIPPABLEHEADERSIZE 8
+
+/* compression parameter bounds */
+#define ZSTD_WINDOWLOG_MAX_32 30
+#define ZSTD_WINDOWLOG_MAX_64 31
+#define ZSTD_WINDOWLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN 10
+#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
+#define ZSTD_HASHLOG_MIN 6
+#define ZSTD_CHAINLOG_MAX_32 29
+#define ZSTD_CHAINLOG_MAX_64 30
+#define ZSTD_CHAINLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
+#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+#define ZSTD_MINMATCH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_MINMATCH_MIN 3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
+#define ZSTD_STRATEGY_MIN ZSTD_fast
+#define ZSTD_STRATEGY_MAX ZSTD_btultra2
+
+
+#define ZSTD_OVERLAPLOG_MIN 0
+#define ZSTD_OVERLAPLOG_MAX 9
+
+#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame
+ * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
+ * to preserve host's memory from unreasonable requirements.
+ * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
+ * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
+
+
+/* LDM parameter bounds */
+#define ZSTD_LDM_HASHLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_LDM_HASHLOG_MAX ZSTD_HASHLOG_MAX
+#define ZSTD_LDM_MINMATCH_MIN 4
+#define ZSTD_LDM_MINMATCH_MAX 4096
+#define ZSTD_LDM_BUCKETSIZELOG_MIN 1
+#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
+#define ZSTD_LDM_HASHRATELOG_MIN 0
+#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
+
+/* Advanced parameter bounds */
+#define ZSTD_TARGETCBLOCKSIZE_MIN 64
+#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_SRCSIZEHINT_MIN 0
+#define ZSTD_SRCSIZEHINT_MAX INT_MAX
+
+/* internal */
+#define ZSTD_HASHLOG3_MAX 17
+
+
+/* --- Advanced types --- */
+
+typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
+
+typedef struct {
+ unsigned int offset; /* The offset of the match. (NOT the same as the offset code)
+ * If offset == 0 and matchLength == 0, this sequence represents the last
+ * literals in the block of litLength size.
+ */
+
+ unsigned int litLength; /* Literal length of the sequence. */
+ unsigned int matchLength; /* Match length of the sequence. */
+
+ /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
+ * In this case, we will treat the sequence as a marker for a block boundary.
+ */
+
+ unsigned int rep; /* Represents which repeat offset is represented by the field 'offset'.
+ * Ranges from [0, 3].
+ *
+ * Repeat offsets are essentially previous offsets from previous sequences sorted in
+ * recency order. For more detail, see doc/zstd_compression_format.md
+ *
+ * If rep == 0, then 'offset' does not contain a repeat offset.
+ * If rep > 0:
+ * If litLength != 0:
+ * rep == 1 --> offset == repeat_offset_1
+ * rep == 2 --> offset == repeat_offset_2
+ * rep == 3 --> offset == repeat_offset_3
+ * If litLength == 0:
+ * rep == 1 --> offset == repeat_offset_2
+ * rep == 2 --> offset == repeat_offset_3
+ * rep == 3 --> offset == repeat_offset_1 - 1
+ *
+ * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
+ * 'rep', but repeat offsets do not necessarily need to be calculated from an external
+ * sequence provider's perspective. For example, ZSTD_compressSequences() does not
+ * use this 'rep' field at all (as of now).
+ */
+} ZSTD_Sequence;
+
+typedef struct {
+ unsigned windowLog; /*< largest match distance : larger == more compression, more memory needed during decompression */
+ unsigned chainLog; /*< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
+ unsigned hashLog; /*< dispatch table : larger == faster, more memory */
+ unsigned searchLog; /*< nb of searches : larger == more compression, slower */
+ unsigned minMatch; /*< match length searched : larger == faster decompression, sometimes less compression */
+ unsigned targetLength; /*< acceptable match size for optimal parser (only) : larger == more compression, slower */
+ ZSTD_strategy strategy; /*< see ZSTD_strategy definition above */
+} ZSTD_compressionParameters;
+
+typedef struct {
+ int contentSizeFlag; /*< 1: content size will be in frame header (when known) */
+ int checksumFlag; /*< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
+ int noDictIDFlag; /*< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
+} ZSTD_frameParameters;
+
+typedef struct {
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+typedef enum {
+ ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
+ ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
+ ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
+} ZSTD_dictContentType_e;
+
+typedef enum {
+ ZSTD_dlm_byCopy = 0, /*< Copy dictionary content internally */
+ ZSTD_dlm_byRef = 1 /*< Reference dictionary content -- the dictionary buffer must outlive its users. */
+} ZSTD_dictLoadMethod_e;
+
+typedef enum {
+ ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
+ ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number.
+ * Useful to save 4 bytes per generated frame.
+ * Decoder cannot recognise automatically this format, requiring this instruction. */
+} ZSTD_format_e;
+
+typedef enum {
+ /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
+ ZSTD_d_validateChecksum = 0,
+ ZSTD_d_ignoreChecksum = 1
+} ZSTD_forceIgnoreChecksum_e;
+
+typedef enum {
+ /* Note: this enum controls ZSTD_d_refMultipleDDicts */
+ ZSTD_rmd_refSingleDDict = 0,
+ ZSTD_rmd_refMultipleDDicts = 1
+} ZSTD_refMultipleDDicts_e;
+
+typedef enum {
+ /* Note: this enum and the behavior it controls are effectively internal
+ * implementation details of the compressor. They are expected to continue
+ * to evolve and should be considered only in the context of extremely
+ * advanced performance tuning.
+ *
+ * Zstd currently supports the use of a CDict in three ways:
+ *
+ * - The contents of the CDict can be copied into the working context. This
+ * means that the compression can search both the dictionary and input
+ * while operating on a single set of internal tables. This makes
+ * the compression faster per-byte of input. However, the initial copy of
+ * the CDict's tables incurs a fixed cost at the beginning of the
+ * compression. For small compressions (< 8 KB), that copy can dominate
+ * the cost of the compression.
+ *
+ * - The CDict's tables can be used in-place. In this model, compression is
+ * slower per input byte, because the compressor has to search two sets of
+ * tables. However, this model incurs no start-up cost (as long as the
+ * working context's tables can be reused). For small inputs, this can be
+ * faster than copying the CDict's tables.
+ *
+ * - The CDict's tables are not used at all, and instead we use the working
+ * context alone to reload the dictionary and use params based on the source
+ * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
+ * This method is effective when the dictionary sizes are very small relative
+ * to the input size, and the input size is fairly large to begin with.
+ *
+ * Zstd has a simple internal heuristic that selects which strategy to use
+ * at the beginning of a compression. However, if experimentation shows that
+ * Zstd is making poor choices, it is possible to override that choice with
+ * this enum.
+ */
+ ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
+ ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */
+ ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */
+ ZSTD_dictForceLoad = 3 /* Always reload the dictionary */
+} ZSTD_dictAttachPref_e;
+
+typedef enum {
+ ZSTD_lcm_auto = 0, /*< Automatically determine the compression mode based on the compression level.
+ * Negative compression levels will be uncompressed, and positive compression
+ * levels will be compressed. */
+ ZSTD_lcm_huffman = 1, /*< Always attempt Huffman compression. Uncompressed literals will still be
+ * emitted if Huffman compression is not profitable. */
+ ZSTD_lcm_uncompressed = 2 /*< Always emit uncompressed literals. */
+} ZSTD_literalCompressionMode_e;
+
+
+/* *************************************
+* Frame size functions
+***************************************/
+
+/*! ZSTD_findDecompressedSize() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - decompressed size of all data in all successive frames
+ * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 2 : decompressed size is always present when compression is done with ZSTD_compress()
+ * note 3 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure result fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
+ * read each contained frame header. This is fast as most of the data is skipped,
+ * however it does mean that all frame data must be present and valid. */
+ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_decompressBound() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - upper-bound for the decompressed size of all data in all successive frames
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame.
+ * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
+ * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
+ * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
+ * upper-bound = # blocks * min(128 KB, Window_Size)
+ */
+ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
+
+/*! ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+
+typedef enum {
+ ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
+ ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
+} ZSTD_sequenceFormat_e;
+
+/*! ZSTD_generateSequences() :
+ * Generate sequences using ZSTD_compress2, given a source buffer.
+ *
+ * Each block will end with a dummy sequence
+ * with offset == 0, matchLength == 0, and litLength == length of last literals.
+ * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
+ * simply acts as a block delimiter.
+ *
+ * zc can be used to insert custom compression params.
+ * This function invokes ZSTD_compress2
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
+ * @return : number of sequences generated
+ */
+
+ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ size_t outSeqsSize, const void* src, size_t srcSize);
+
+/*! ZSTD_mergeBlockDelimiters() :
+ * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
+ * by merging them into into the literals of the next sequence.
+ *
+ * As such, the final generated result has no explicit representation of block boundaries,
+ * and the final last literals segment is not represented in the sequences.
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
+ * @return : number of sequences left after merging
+ */
+ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
+
+/*! ZSTD_compressSequences() :
+ * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
+ * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
+ * The entire source is compressed into a single frame.
+ *
+ * The compression behavior changes based on cctx params. In particular:
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on
+ * the block size derived from the cctx, and sequences may be split. This is the default setting.
+ *
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ *
+ * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
+ * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
+ *
+ * In addition to the two adjustable experimental params, there are other important cctx params.
+ * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
+ * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
+ * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
+ * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
+ *
+ * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
+ * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
+ * and cannot emit an RLE block that disagrees with the repcode history
+ * @return : final compressed size or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize);
+
+
+/*! ZSTD_writeSkippableFrame() :
+ * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ *
+ * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
+ * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
+ * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
+ * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, if the source size is not representable
+ * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, unsigned magicVariant);
+
+
+/* *************************************
+* Memory management
+***************************************/
+
+/*! ZSTD_estimate*() :
+ * These functions make it possible to estimate memory usage
+ * of a future {D,C}Ctx, before its creation.
+ *
+ * ZSTD_estimateCCtxSize() will provide a memory budget large enough
+ * for any compression level up to selected one.
+ * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
+ * does not include space for a window buffer.
+ * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
+ * The estimate will assume the input may be arbitrarily large,
+ * which is the worst case.
+ *
+ * When srcSize can be bound by a known and rather "small" value,
+ * this fact can be used to provide a tighter estimation
+ * because the CCtx compression context will need less memory.
+ * This tighter estimation can be provided by more advanced functions
+ * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
+ * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
+ * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
+ *
+ * Note 2 : only single-threaded compression is supported.
+ * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
+ */
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
+
+/*! ZSTD_estimateCStreamSize() :
+ * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
+ * It will also consider src size to be arbitrarily "large", which is worst case.
+ * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * Note : CStream size estimation is only correct for single-threaded compression.
+ * ZSTD_DStream memory budget depends on window Size.
+ * This information can be passed manually, using ZSTD_estimateDStreamSize,
+ * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ * Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
+ * an internal ?Dict will be created, which additional size is not estimated here.
+ * In this case, get total size by adding ZSTD_estimate?DictSize */
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+
+/*! ZSTD_estimate?DictSize() :
+ * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
+ * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
+ * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
+ */
+ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
+
+/*! ZSTD_initStatic*() :
+ * Initialize an object using a pre-allocated fixed-size buffer.
+ * workspace: The memory area to emplace the object into.
+ * Provided pointer *must be 8-bytes aligned*.
+ * Buffer must outlive object.
+ * workspaceSize: Use ZSTD_estimate*Size() to determine
+ * how large workspace must be to support target scenario.
+ * @return : pointer to object (same address as workspace, just different type),
+ * or NULL if error (size too small, incorrect alignment, etc.)
+ * Note : zstd will never resize nor malloc() when using a static buffer.
+ * If the object requires more memory than available,
+ * zstd will just error out (typically ZSTD_error_memory_allocation).
+ * Note 2 : there is no corresponding "free" function.
+ * Since workspace is allocated externally, it must be freed externally too.
+ * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
+ * into its associated cParams.
+ * Limitation 1 : currently not compatible with internal dictionary creation, triggered by
+ * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
+ * Limitation 2 : static cctx currently not compatible with multi-threading.
+ * Limitation 3 : static dctx is incompatible with legacy support.
+ */
+ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticCCtx() */
+
+ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticDCtx() */
+
+ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams);
+
+ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType);
+
+
+/*! Custom memory allocation :
+ * These prototypes make it possible to pass your own allocation/free functions.
+ * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
+ * All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
+ */
+typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
+typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
+static
+__attribute__((__unused__))
+ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /*< this constant defers to stdlib's functions */
+
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem);
+
+/* ! Thread pool :
+ * These prototypes make it possible to share a thread pool among multiple compression contexts.
+ * This can limit resources for applications with multiple threads where each one uses
+ * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
+ * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
+ * Note that the lifetime of such pool must exist while being used.
+ * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
+ * to use an internal thread pool).
+ * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
+ */
+typedef struct POOL_ctx_s ZSTD_threadPool;
+ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
+ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
+ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
+
+
+/*
+ * This API is temporary and is expected to change or disappear in the future!
+ */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem);
+
+
+/* *************************************
+* Advanced compression functions
+***************************************/
+
+/*! ZSTD_createCDict_byReference() :
+ * Create a digested dictionary for compression
+ * Dictionary content is just referenced, not duplicated.
+ * As a consequence, `dictBuffer` **must** outlive CDict,
+ * and its content must remain unmodified throughout the lifetime of CDict.
+ * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
+
+/*! ZSTD_getDictID_fromCDict() :
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
+
+/*! ZSTD_getCParams() :
+ * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+ * `estimatedSrcSize` value is optional, select 0 if not known */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_getParams() :
+ * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+ * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_checkCParams() :
+ * Ensure param values remain within authorized range.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+
+/*! ZSTD_adjustCParams() :
+ * optimize params for a given `srcSize` and `dictSize`.
+ * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
+ * `dictSize` must be `0` when there is no dictionary.
+ * cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
+ * This function never fails (wide contract) */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+
+/*! ZSTD_compress_advanced() :
+ * Note : this function is now DEPRECATED.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
+ * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
+ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params);
+
+/*! ZSTD_compress_usingCDict_advanced() :
+ * Note : this function is now REDUNDANT.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
+ * This prototype will be marked as deprecated and generate compilation warning in some future version */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams);
+
+
+/*! ZSTD_CCtx_loadDictionary_byReference() :
+ * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
+ * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_loadDictionary_advanced() :
+ * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_CCtx_refPrefix_advanced() :
+ * Same as ZSTD_CCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/* === experimental parameters === */
+/* these parameters can be used with ZSTD_setParameter()
+ * they are not guaranteed to remain supported in the future */
+
+ /* Enables rsyncable mode,
+ * which makes compressed files more rsync friendly
+ * by adding periodic synchronization points to the compressed data.
+ * The target average block size is ZSTD_c_jobSize / 2.
+ * It's possible to modify the job size to increase or decrease
+ * the granularity of the synchronization point.
+ * Once the jobSize is smaller than the window size,
+ * it will result in compression ratio degradation.
+ * NOTE 1: rsyncable mode only works when multithreading is enabled.
+ * NOTE 2: rsyncable performs poorly in combination with long range mode,
+ * since it will decrease the effectiveness of synchronization points,
+ * though mileage may vary.
+ * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
+ * If the selected compression level is already running significantly slower,
+ * the overall speed won't be significantly impacted.
+ */
+ #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
+
+/* Select a compression format.
+ * The value must be of type ZSTD_format_e.
+ * See ZSTD_format_e enum definition for details */
+#define ZSTD_c_format ZSTD_c_experimentalParam2
+
+/* Force back-reference distances to remain < windowSize,
+ * even when referencing into Dictionary content (default:0) */
+#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
+
+/* Controls whether the contents of a CDict
+ * are used in place, or copied into the working context.
+ * Accepts values from the ZSTD_dictAttachPref_e enum.
+ * See the comments on that enum for an explanation of the feature. */
+#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
+
+/* Controls how the literals are compressed (default is auto).
+ * The value must be of type ZSTD_literalCompressionMode_e.
+ * See ZSTD_literalCompressionMode_t enum definition for details.
+ */
+#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
+
+/* Tries to fit compressed block size to be around targetCBlockSize.
+ * No target when targetCBlockSize == 0.
+ * There is no guarantee on compressed block size (default:0) */
+#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
+
+/* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size,
+ * but compression ratio may regress significantly if guess considerably underestimates */
+#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
+
+/* Controls whether the new and experimental "dedicated dictionary search
+ * structure" can be used. This feature is still rough around the edges, be
+ * prepared for surprising behavior!
+ *
+ * How to use it:
+ *
+ * When using a CDict, whether to use this feature or not is controlled at
+ * CDict creation, and it must be set in a CCtxParams set passed into that
+ * construction (via ZSTD_createCDict_advanced2()). A compression will then
+ * use the feature or not based on how the CDict was constructed; the value of
+ * this param, set in the CCtx, will have no effect.
+ *
+ * However, when a dictionary buffer is passed into a CCtx, such as via
+ * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control
+ * whether the CDict that is created internally can use the feature or not.
+ *
+ * What it does:
+ *
+ * Normally, the internal data structures of the CDict are analogous to what
+ * would be stored in a CCtx after compressing the contents of a dictionary.
+ * To an approximation, a compression using a dictionary can then use those
+ * data structures to simply continue what is effectively a streaming
+ * compression where the simulated compression of the dictionary left off.
+ * Which is to say, the search structures in the CDict are normally the same
+ * format as in the CCtx.
+ *
+ * It is possible to do better, since the CDict is not like a CCtx: the search
+ * structures are written once during CDict creation, and then are only read
+ * after that, while the search structures in the CCtx are both read and
+ * written as the compression goes along. This means we can choose a search
+ * structure for the dictionary that is read-optimized.
+ *
+ * This feature enables the use of that different structure.
+ *
+ * Note that some of the members of the ZSTD_compressionParameters struct have
+ * different semantics and constraints in the dedicated search structure. It is
+ * highly recommended that you simply set a compression level in the CCtxParams
+ * you pass into the CDict creation call, and avoid messing with the cParams
+ * directly.
+ *
+ * Effects:
+ *
+ * This will only have any effect when the selected ZSTD_strategy
+ * implementation supports this feature. Currently, that's limited to
+ * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2.
+ *
+ * Note that this means that the CDict tables can no longer be copied into the
+ * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
+ * useable. The dictionary can only be attached or reloaded.
+ *
+ * In general, you should expect compression to be faster--sometimes very much
+ * so--and CDict creation to be slightly slower. Eventually, we will probably
+ * make this mode the default.
+ */
+#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8
+
+/* ZSTD_c_stableInBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
+ * between calls, except for the modifications that zstd makes to pos (the
+ * caller must not modify pos). This is checked by the compressor, and
+ * compression will fail if it ever changes. This means the only flush
+ * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
+ * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
+ * MUST not be modified during compression or you will get data corruption.
+ *
+ * When this flag is enabled zstd won't allocate an input window buffer,
+ * because the user guarantees it can reference the ZSTD_inBuffer until
+ * the frame is complete. But, it will still allocate an output buffer
+ * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
+ * avoid the memcpy() from the input buffer to the input window buffer.
+ *
+ * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
+ * That means this flag cannot be used with ZSTD_compressStream().
+ *
+ * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
+ * this flag is ALWAYS memory safe, and will never access out-of-bounds
+ * memory. However, compression WILL fail if you violate the preconditions.
+ *
+ * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
+ * not be modified during compression or you will get data corruption. This
+ * is because zstd needs to reference data in the ZSTD_inBuffer to find
+ * matches. Normally zstd maintains its own window buffer for this purpose,
+ * but passing this flag tells zstd to use the user provided buffer.
+ */
+#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
+
+/* ZSTD_c_stableOutBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells he compressor that the ZSTD_outBuffer will not be resized between
+ * calls. Specifically: (out.size - out.pos) will never grow. This gives the
+ * compressor the freedom to say: If the compressed data doesn't fit in the
+ * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to
+ * always decompress directly into the output buffer, instead of decompressing
+ * into an internal buffer and copying to the output buffer.
+ *
+ * When this flag is enabled zstd won't allocate an output buffer, because
+ * it can write directly to the ZSTD_outBuffer. It will still allocate the
+ * input window buffer (see ZSTD_c_stableInBuffer).
+ *
+ * Zstd will check that (out.size - out.pos) never grows and return an error
+ * if it does. While not strictly necessary, this should prevent surprises.
+ */
+#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10
+
+/* ZSTD_c_blockDelimiters
+ * Default is 0 == ZSTD_sf_noBlockDelimiters.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ *
+ * Designates whether or not the given array of ZSTD_Sequence contains block delimiters
+ * and last literals, which are defined as sequences with offset == 0 and matchLength == 0.
+ * See the definition of ZSTD_Sequence for more specifics.
+ */
+#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11
+
+/* ZSTD_c_validateSequences
+ * Default is 0 == disabled. Set to 1 to enable sequence validation.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
+ * during function execution.
+ *
+ * Without validation, providing a sequence that does not conform to the zstd spec will cause
+ * undefined behavior, and may produce a corrupted block.
+ *
+ * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and
+ * return an error.
+ *
+ */
+#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
+
+/*! ZSTD_CCtx_getParameter() :
+ * Get the requested compression parameter value, selected by enum ZSTD_cParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
+
+
+/*! ZSTD_CCtx_params :
+ * Quick howto :
+ * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
+ * an existing ZSTD_CCtx_params structure.
+ * This is similar to
+ * ZSTD_CCtx_setParameter().
+ * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ * an existing CCtx.
+ * These parameters will be applied to
+ * all subsequent frames.
+ * - ZSTD_compressStream2() : Do compression using the CCtx.
+ * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.
+ *
+ * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ * for static allocation of CCtx for single-threaded compression.
+ */
+ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
+
+/*! ZSTD_CCtxParams_reset() :
+ * Reset params to default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_init() :
+ * Initializes the compression parameters of cctxParams according to
+ * compression level. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+
+/*! ZSTD_CCtxParams_init_advanced() :
+ * Initializes the compression and frame parameters of cctxParams according to
+ * params. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+
+/*! ZSTD_CCtxParams_setParameter() :
+ * Similar to ZSTD_CCtx_setParameter.
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * Parameters must be applied to a ZSTD_CCtx using
+ * ZSTD_CCtx_setParametersUsingCCtxParams().
+ * @result : a code representing success or failure (which can be tested with
+ * ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtxParams_getParameter() :
+ * Similar to ZSTD_CCtx_getParameter.
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+
+/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * Apply a set of ZSTD_CCtx_params to the compression context.
+ * This can be done even after compression is started,
+ * if nbWorkers==0, this will have no impact until a new compression is started.
+ * if nbWorkers>=1, new parameters will be picked up at next job,
+ * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+
+/*! ZSTD_compressStream2_simpleArgs() :
+ * Same as ZSTD_compressStream2(),
+ * but using only integral types as arguments.
+ * This variant might be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp);
+
+
+/* *************************************
+* Advanced decompression functions
+***************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * Dictionary content is referenced, and therefore stays in dictBuffer.
+ * It is important that dictBuffer outlives DDict,
+ * it must remain read accessible throughout the lifetime of DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_byReference() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but references `dict` content instead of copying it into `dctx`.
+ * This saves memory if `dict` remains around.,
+ * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_advanced() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but gives direct control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?). */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_refPrefix_advanced() :
+ * Same as ZSTD_DCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_setMaxWindowSize() :
+ * Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ * This protects a decoder context from reserving too much memory for itself (potential attack scenario).
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+
+/*! ZSTD_DCtx_getParameter() :
+ * Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
+
+/* ZSTD_d_format
+ * experimental parameter,
+ * allowing selection between ZSTD_format_e input compression formats
+ */
+#define ZSTD_d_format ZSTD_d_experimentalParam1
+/* ZSTD_d_stableOutBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same
+ * between calls, except for the modifications that zstd makes to pos (the
+ * caller must not modify pos). This is checked by the decompressor, and
+ * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer
+ * MUST be large enough to fit the entire decompressed frame. This will be
+ * checked when the frame content size is known. The data in the ZSTD_outBuffer
+ * in the range [dst, dst + pos) MUST not be modified during decompression
+ * or you will get data corruption.
+ *
+ * When this flags is enabled zstd won't allocate an output buffer, because
+ * it can write directly to the ZSTD_outBuffer, but it will still allocate
+ * an input buffer large enough to fit any compressed block. This will also
+ * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
+ * If you need to avoid the input buffer allocation use the buffer-less
+ * streaming API.
+ *
+ * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using
+ * this flag is ALWAYS memory safe, and will never access out-of-bounds
+ * memory. However, decompression WILL fail if you violate the preconditions.
+ *
+ * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST
+ * not be modified during decompression or you will get data corruption. This
+ * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate
+ * matches. Normally zstd maintains its own buffer for this purpose, but passing
+ * this flag tells zstd to use the user provided buffer.
+ */
+#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
+
+/* ZSTD_d_forceIgnoreChecksum
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * Tells the decompressor to skip checksum validation during decompression, regardless
+ * of whether checksumming was specified during compression. This offers some
+ * slight performance benefits, and may be useful for debugging.
+ * Param has values of type ZSTD_forceIgnoreChecksum_e
+ */
+#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
+
+/* ZSTD_d_refMultipleDDicts
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * If enabled and dctx is allocated on the heap, then additional memory will be allocated
+ * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
+ * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
+ * store all references. At decompression time, the appropriate dictID is selected
+ * from the set of DDicts based on the dictID in the frame.
+ *
+ * Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
+ *
+ * Param has values of byte ZSTD_refMultipleDDicts_e
+ *
+ * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
+ * allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
+ * Memory is allocated as per ZSTD_DCtx::customMem.
+ *
+ * Although this function allocates memory for the table, the user is still responsible for
+ * memory management of the underlying ZSTD_DDict* themselves.
+ */
+#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
+
+
+/*! ZSTD_DCtx_setFormat() :
+ * Instruct the decoder context about what kind of data to decode next.
+ * This instruction is mandatory to decode data without a fully-formed header,
+ * such ZSTD_f_zstd1_magicless for example.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+
+/*! ZSTD_decompressStream_simpleArgs() :
+ * Same as ZSTD_decompressStream(),
+ * but using only integral types as arguments.
+ * This can be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos);
+
+
+/* ******************************************************************
+* Advanced streaming functions
+* Warning : most of these functions are now redundant with the Advanced API.
+* Once Advanced API reaches "stable" status,
+* redundant functions will be deprecated, and then at some point removed.
+********************************************************************/
+
+/*===== Advanced Streaming compression functions =====*/
+
+/*! ZSTD_initCStream_srcSize() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * pledgedSrcSize must be correct. If it is not known at init time, use
+ * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
+ * "0" also disables frame content size field. It may be enabled in the future.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
+ int compressionLevel,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_initCStream_usingDict() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * Creates of an internal CDict (incompatible with static CCtx), except if
+ * dict == NULL or dictSize < 8, in which case no dict is used.
+ * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
+ * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_initCStream_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * // Pseudocode: Set each zstd parameter and leave the rest as-is.
+ * for ((param, value) : params) {
+ * ZSTD_CCtx_setParameter(zcs, param, value);
+ * }
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
+ * pledgedSrcSize must be correct.
+ * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_initCStream_usingCDict() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * note : cdict will just be referenced, and must outlive compression session
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+
+/*! ZSTD_initCStream_usingCDict_advanced() :
+ * This function is DEPRECATED, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
+ * for ((fParam, value) : fParams) {
+ * ZSTD_CCtx_setParameter(zcs, fParam, value);
+ * }
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_resetCStream() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * start a new frame, using same parameters from previous frame.
+ * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
+ * Note that zcs must be init at least once before using ZSTD_resetCStream().
+ * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
+ * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
+ * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
+ * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError())
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
+
+
+typedef struct {
+ unsigned long long ingested; /* nb input bytes read and buffered */
+ unsigned long long consumed; /* nb input bytes actually compressed */
+ unsigned long long produced; /* nb of compressed bytes generated and buffered */
+ unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
+ unsigned currentJobID; /* MT only : latest started job nb */
+ unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */
+} ZSTD_frameProgression;
+
+/* ZSTD_getFrameProgression() :
+ * tells how much data has been ingested (read from input)
+ * consumed (input actually compressed) and produced (output) for current frame.
+ * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
+ * Aggregates progression inside active worker threads.
+ */
+ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+
+/*! ZSTD_toFlushNow() :
+ * Tell how many bytes are ready to be flushed immediately.
+ * Useful for multithreading scenarios (nbWorkers >= 1).
+ * Probe the oldest active job, defined as oldest job not yet entirely flushed,
+ * and check its output buffer.
+ * @return : amount of data stored in oldest job and ready to be flushed immediately.
+ * if @return == 0, it means either :
+ * + there is no active job (could be checked with ZSTD_frameProgression()), or
+ * + oldest job is still actively compressing data,
+ * but everything it has produced has also been flushed so far,
+ * therefore flush speed is limited by production speed of oldest job
+ * irrespective of the speed of concurrent (and newer) jobs.
+ */
+ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
+
+
+/*===== Advanced Streaming decompression functions =====*/
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
+ *
+ * note: no dictionary will be used if dict == NULL or dictSize < 8
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, ddict);
+ *
+ * note : ddict is referenced, it must outlive decompression session
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *
+ * re-use decompression parameters from previous init; saves dictionary loading
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+
+
+/* *******************************************************************
+* Buffer-less and synchronous inner streaming functions
+*
+* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
+* But it's also a complex one, with several restrictions, documented below.
+* Prefer normal streaming API for an easier experience.
+********************************************************************* */
+
+/*
+ Buffer-less streaming compression (synchronous mode)
+
+ A ZSTD_CCtx object is required to track streaming operations.
+ Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
+ ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+
+ Start by initializing a context.
+ Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
+ or ZSTD_compressBegin_advanced(), for finer parameter control.
+ It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
+
+ Then, consume your input using ZSTD_compressContinue().
+ There are some important considerations to keep in mind when using this advanced function :
+ - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+ - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
+ - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
+ Worst case evaluation is provided by ZSTD_compressBound().
+ ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
+ - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
+ It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
+ - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
+ In which case, it will "discard" the relevant memory section from its history.
+
+ Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
+ It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
+
+ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+*/
+
+/*===== Buffer-less streaming compression functions =====*/
+ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*< note: fails if cdict==NULL */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*
+ Buffer-less streaming decompression (synchronous mode)
+
+ A ZSTD_DCtx object is required to track streaming operations.
+ Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+ A ZSTD_DCtx object can be re-used multiple times.
+
+ First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
+ Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
+ Data fragment must be large enough to ensure successful decoding.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+ >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+ errorCode, which can be tested using ZSTD_isError().
+
+ It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+ Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+ As a consequence, check that values remain within valid application range.
+ For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+ Each application can set its own limits, depending on local restrictions.
+ For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+ ZSTD_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+ There are multiple ways to guarantee this condition.
+
+ The most memory efficient way is to use a round buffer of sufficient size.
+ Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+ which can @return an error code if required value is too large for current system (in 32-bits mode).
+ In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+ up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+ which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+ At which point, decoding can resume from the beginning of the buffer.
+ Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+ There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+ Finally, if you control the compression process, you can also ignore all buffer size rules,
+ as long as the encoder and decoder progress in "lock-step",
+ aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+ Once buffers are setup, start decompression, with ZSTD_decompressBegin().
+ If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
+
+ Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
+
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
+ It can also be an error code, which can be tested with ZSTD_isError().
+
+ A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+
+ Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
+ This information is not required to properly decode a frame.
+
+ == Special case : skippable frames ==
+
+ Skippable frames allow integration of user-defined data into a flow of concatenated frames.
+ Skippable frames will be ignored (skipped) by decompressor.
+ The format of skippable frames is as follows :
+ a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+ b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+ c) Frame Content - any content (User Data) of length equal to Frame Size
+ For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+ For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
+*/
+
+/*===== Buffer-less streaming decompression functions =====*/
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+typedef struct {
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
+ ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ unsigned headerSize;
+ unsigned dictID;
+ unsigned checksumFlag;
+} ZSTD_frameHeader;
+
+/*! ZSTD_getFrameHeader() :
+ * decode Frame Header, or requires larger `srcSize`.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /*< doesn't consume input */
+/*! ZSTD_getFrameHeader_advanced() :
+ * same as ZSTD_getFrameHeader(),
+ * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* misc */
+ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
+ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+
+
+
+
+/* ============================ */
+/* Block level API */
+/* ============================ */
+
+/*!
+ Block functions produce and decode raw zstd blocks, without frame metadata.
+ Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+ But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
+
+ A few rules to respect :
+ - Compressing and decompressing require a context structure
+ + Use ZSTD_createCCtx() and ZSTD_createDCtx()
+ - It is necessary to init context before starting
+ + compression : any ZSTD_compressBegin*() variant, including with dictionary
+ + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ + copyCCtx() and copyDCtx() can be used too
+ - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ + If input is larger than a block size, it's necessary to split input data into multiple blocks
+ + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
+ Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
+ - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
+ ===> In which case, nothing is produced into `dst` !
+ + User __must__ test for such outcome and deal directly with uncompressed data
+ + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
+ Doing so would mess up with statistics history, leading to potential data corruption.
+ + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ + In case of multiple successive blocks, should some of them be uncompressed,
+ decoder must be informed of their existence in order to follow proper history.
+ Use ZSTD_insertBlock() for such a case.
+*/
+
+/*===== Raw zstd block functions =====*/
+ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
+
+
+#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
+
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index adcc6a97db61..8ce066c035cf 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -308,7 +308,7 @@ do { \
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
R##_e = X##_e; \
- /* Fall through */ \
+ fallthrough; \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
@@ -319,7 +319,7 @@ do { \
\
case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \
R##_e = Y##_e; \
- /* Fall through */ \
+ fallthrough; \
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
@@ -338,7 +338,7 @@ do { \
FP_SET_EXCEPTION(FP_EX_INVALID | FP_EX_INVALID_ISI); \
break; \
} \
- /* FALLTHRU */ \
+ fallthrough; \
\
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
@@ -417,7 +417,7 @@ do { \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
R##_s = X##_s; \
- /* Fall through */ \
+ fallthrough; \
\
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
@@ -431,7 +431,7 @@ do { \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
R##_s = Y##_s; \
- /* Fall through */ \
+ fallthrough; \
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
@@ -497,7 +497,7 @@ do { \
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
FP_SET_EXCEPTION(FP_EX_DIVZERO); \
- /* Fall through */ \
+ fallthrough; \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
R##_c = FP_CLS_INF; \
@@ -662,12 +662,14 @@ do { \
if (X##_e < 0) \
{ \
FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ fallthrough; \
case FP_CLS_ZERO: \
r = 0; \
} \
else if (X##_e >= rsize - (rsigned > 0 || X##_s) \
|| (!rsigned && X##_s)) \
{ /* overflow */ \
+ fallthrough; \
case FP_CLS_NAN: \
case FP_CLS_INF: \
if (rsigned == 2) \
@@ -767,6 +769,7 @@ do { \
if (X##_e >= rsize - (rsigned > 0 || X##_s) \
|| (!rsigned && X##_s)) \
{ /* overflow */ \
+ fallthrough; \
case FP_CLS_NAN: \
case FP_CLS_INF: \
if (!rsigned) \
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index 139e93be13b0..b1c839734124 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -2,7 +2,7 @@
/*
* cec-notifier.h - notify CEC drivers of physical address changes
*
- * Copyright 2016 Russell King <rmk+kernel@arm.linux.org.uk>
+ * Copyright 2016 Russell King.
* Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
@@ -20,31 +20,14 @@ struct cec_notifier;
#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
/**
- * cec_notifier_get_conn - find or create a new cec_notifier for the given
- * device and connector tuple.
- * @dev: device that sends the events.
- * @conn: the connector name from which the event occurs
- *
- * If a notifier for device @dev already exists, then increase the refcount
- * and return that notifier.
- *
- * If it doesn't exist, then allocate a new notifier struct and return a
- * pointer to that new struct.
- *
- * Return NULL if the memory could not be allocated.
- */
-struct cec_notifier *cec_notifier_get_conn(struct device *dev,
- const char *conn);
-
-/**
* cec_notifier_conn_register - find or create a new cec_notifier for the given
* HDMI device and connector tuple.
* @hdmi_dev: HDMI device that sends the events.
- * @conn_name: the connector name from which the event occurs. May be NULL
+ * @port_name: the connector name from which the event occurs. May be NULL
* if there is always only one HDMI connector created by the HDMI device.
* @conn_info: the connector info from which the event occurs (may be NULL)
*
- * If a notifier for device @dev and connector @conn_name already exists, then
+ * If a notifier for device @dev and connector @port_name already exists, then
* increase the refcount and return that notifier.
*
* If it doesn't exist, then allocate a new notifier struct and return a
@@ -53,7 +36,7 @@ struct cec_notifier *cec_notifier_get_conn(struct device *dev,
* Return NULL if the memory could not be allocated.
*/
struct cec_notifier *
-cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name,
const struct cec_connector_info *conn_info);
/**
@@ -67,11 +50,11 @@ void cec_notifier_conn_unregister(struct cec_notifier *n);
* cec_notifier_cec_adap_register - find or create a new cec_notifier for the
* given device.
* @hdmi_dev: HDMI device that sends the events.
- * @conn_name: the connector name from which the event occurs. May be NULL
+ * @port_name: the connector name from which the event occurs. May be NULL
* if there is always only one HDMI connector created by the HDMI device.
* @adap: the cec adapter that registered this notifier.
*
- * If a notifier for device @dev and connector @conn_name already exists, then
+ * If a notifier for device @dev and connector @port_name already exists, then
* increase the refcount and return that notifier.
*
* If it doesn't exist, then allocate a new notifier struct and return a
@@ -80,7 +63,7 @@ void cec_notifier_conn_unregister(struct cec_notifier *n);
* Return NULL if the memory could not be allocated.
*/
struct cec_notifier *
-cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name,
struct cec_adapter *adap);
/**
@@ -125,15 +108,9 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
struct device *cec_notifier_parse_hdmi_phandle(struct device *dev);
#else
-static inline struct cec_notifier *cec_notifier_get_conn(struct device *dev,
- const char *conn)
-{
- /* A non-NULL pointer is expected on success */
- return (struct cec_notifier *)0xdeadfeed;
-}
static inline struct cec_notifier *
-cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name,
const struct cec_connector_info *conn_info)
{
/* A non-NULL pointer is expected on success */
@@ -145,7 +122,7 @@ static inline void cec_notifier_conn_unregister(struct cec_notifier *n)
}
static inline struct cec_notifier *
-cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name,
struct cec_adapter *adap)
{
/* A non-NULL pointer is expected on success */
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index 88c8b016eb09..483bc4769fe9 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -13,7 +13,8 @@
/**
* struct cec_pin_ops - low-level CEC pin operations
- * @read: read the CEC pin. Return true if high, false if low.
+ * @read: read the CEC pin. Returns > 0 if high, 0 if low, or an error
+ * if negative.
* @low: drive the CEC pin low.
* @high: stop driving the CEC pin. The pull-up will drive the pin
* high, unless someone else is driving the pin low.
@@ -22,13 +23,10 @@
* @free: optional. Free any allocated resources. Called when the
* adapter is deleted.
* @status: optional, log status information.
- * @read_hpd: read the HPD pin. Return true if high, false if low or
- * an error if negative. If NULL or -ENOTTY is returned,
- * then this is not supported.
- * @read_5v: read the 5V pin. Return true if high, false if low or
- * an error if negative. If NULL or -ENOTTY is returned,
- * then this is not supported.
- *
+ * @read_hpd: optional. Read the HPD pin. Returns > 0 if high, 0 if low or
+ * an error if negative.
+ * @read_5v: optional. Read the 5V pin. Returns > 0 if high, 0 if low or
+ * an error if negative.
* @received: optional. High-level CEC message callback. Allows the driver
* to process CEC messages.
*
@@ -36,7 +34,7 @@
* cec pin framework to manipulate the CEC pin.
*/
struct cec_pin_ops {
- bool (*read)(struct cec_adapter *adap);
+ int (*read)(struct cec_adapter *adap);
void (*low)(struct cec_adapter *adap);
void (*high)(struct cec_adapter *adap);
bool (*enable_irq)(struct cec_adapter *adap);
diff --git a/include/media/cec.h b/include/media/cec.h
index 972bc8cd4384..abee41ae02d0 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -26,13 +26,17 @@
* @dev: cec device
* @cdev: cec character device
* @minor: device node minor number
+ * @lock: lock to serialize open/release and registration
* @registered: the device was correctly registered
* @unregistered: the device was unregistered
- * @fhs_lock: lock to control access to the filehandle list
+ * @lock_fhs: lock to control access to @fhs
* @fhs: the list of open filehandles (cec_fh)
*
* This structure represents a cec-related device node.
*
+ * To add or remove filehandles from @fhs the @lock must be taken first,
+ * followed by @lock_fhs. It is safe to access @fhs if either lock is held.
+ *
* The @parent is a physical device. It must be set by core or device drivers
* before registering the node.
*/
@@ -43,10 +47,13 @@ struct cec_devnode {
/* device info */
int minor;
+ /* serialize open/release and registration */
+ struct mutex lock;
bool registered;
bool unregistered;
+ /* protect access to fhs */
+ struct mutex lock_fhs;
struct list_head fhs;
- struct mutex lock;
};
struct cec_adapter;
@@ -111,6 +118,7 @@ struct cec_adap_ops {
int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
+ void (*adap_configured)(struct cec_adapter *adap, bool configured);
int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg);
void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
@@ -144,6 +152,69 @@ struct cec_adap_ops {
*/
#define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1)
+/**
+ * struct cec_adapter - cec adapter structure
+ * @owner: module owner
+ * @name: name of the CEC adapter
+ * @devnode: device node for the /dev/cecX device
+ * @lock: mutex controlling access to this structure
+ * @rc: remote control device
+ * @transmit_queue: queue of pending transmits
+ * @transmit_queue_sz: number of pending transmits
+ * @wait_queue: queue of transmits waiting for a reply
+ * @transmitting: CEC messages currently being transmitted
+ * @transmit_in_progress: true if a transmit is in progress
+ * @transmit_in_progress_aborted: true if a transmit is in progress is to be
+ * aborted. This happens if the logical address is
+ * invalidated while the transmit is ongoing. In that
+ * case the transmit will finish, but will not retransmit
+ * and be marked as ABORTED.
+ * @xfer_timeout_ms: the transfer timeout in ms.
+ * If 0, then timeout after 2.1 ms.
+ * @kthread_config: kthread used to configure a CEC adapter
+ * @config_completion: used to signal completion of the config kthread
+ * @kthread: main CEC processing thread
+ * @kthread_waitq: main CEC processing wait_queue
+ * @ops: cec adapter ops
+ * @priv: cec driver's private data
+ * @capabilities: cec adapter capabilities
+ * @available_log_addrs: maximum number of available logical addresses
+ * @phys_addr: the current physical address
+ * @needs_hpd: if true, then the HDMI HotPlug Detect pin must be high
+ * in order to transmit or receive CEC messages. This is usually a HW
+ * limitation.
+ * @is_enabled: the CEC adapter is enabled
+ * @is_configuring: the CEC adapter is configuring (i.e. claiming LAs)
+ * @must_reconfigure: while configuring, the PA changed, so reclaim LAs
+ * @is_configured: the CEC adapter is configured (i.e. has claimed LAs)
+ * @cec_pin_is_high: if true then the CEC pin is high. Only used with the
+ * CEC pin framework.
+ * @adap_controls_phys_addr: if true, then the CEC adapter controls the
+ * physical address, i.e. the CEC hardware can detect HPD changes and
+ * read the EDID and is not dependent on an external HDMI driver.
+ * Drivers that need this can set this field to true after the
+ * cec_allocate_adapter() call.
+ * @last_initiator: the initiator of the last transmitted message.
+ * @monitor_all_cnt: number of filehandles monitoring all msgs
+ * @monitor_pin_cnt: number of filehandles monitoring pin changes
+ * @follower_cnt: number of filehandles in follower mode
+ * @cec_follower: filehandle of the exclusive follower
+ * @cec_initiator: filehandle of the exclusive initiator
+ * @passthrough: if true, then the exclusive follower is in
+ * passthrough mode.
+ * @log_addrs: current logical addresses
+ * @conn_info: current connector info
+ * @tx_timeouts: number of transmit timeouts
+ * @notifier: CEC notifier
+ * @pin: CEC pin status struct
+ * @cec_dir: debugfs cec directory
+ * @status_file: debugfs cec status file
+ * @error_inj_file: debugfs cec error injection file
+ * @sequence: transmit sequence counter
+ * @input_phys: remote control input_phys name
+ *
+ * This structure represents a cec adapter.
+ */
struct cec_adapter {
struct module *owner;
char name[32];
@@ -156,13 +227,14 @@ struct cec_adapter {
struct list_head wait_queue;
struct cec_data *transmitting;
bool transmit_in_progress;
+ bool transmit_in_progress_aborted;
+ unsigned int xfer_timeout_ms;
struct task_struct *kthread_config;
struct completion config_completion;
struct task_struct *kthread;
wait_queue_head_t kthread_waitq;
- wait_queue_head_t waitq;
const struct cec_adap_ops *ops;
void *priv;
@@ -171,9 +243,12 @@ struct cec_adapter {
u16 phys_addr;
bool needs_hpd;
+ bool is_enabled;
bool is_configuring;
+ bool must_reconfigure;
bool is_configured;
bool cec_pin_is_high;
+ bool adap_controls_phys_addr;
u8 last_initiator;
u32 monitor_all_cnt;
u32 monitor_pin_cnt;
@@ -194,10 +269,7 @@ struct cec_adapter {
#endif
struct dentry *cec_dir;
- struct dentry *status_file;
- struct dentry *error_inj_file;
- u16 phys_addrs[15];
u32 sequence;
char input_phys[32];
diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h
index 56d05a855140..d8751ea926a2 100644
--- a/include/media/davinci/vpbe_display.h
+++ b/include/media/davinci/vpbe_display.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef VPBE_DISPLAY_H
#define VPBE_DISPLAY_H
@@ -69,13 +69,13 @@ struct vpbe_layer {
struct vpbe_disp_buffer *cur_frm;
/* Pointer pointing to next v4l2_buffer */
struct vpbe_disp_buffer *next_frm;
- /* videobuf specific parameters
- * Buffer queue used in video-buf
+ /* vb2 specific parameters
+ * Buffer queue used in vb2
*/
struct vb2_queue buffer_queue;
/* Queue of filled frames */
struct list_head dma_queue;
- /* Used in video-buf */
+ /* Used for video buffer handling */
spinlock_t irqlock;
/* V4l2 specific parameters */
/* Identifies video device for this layer */
diff --git a/include/media/davinci/vpbe_osd.h b/include/media/davinci/vpbe_osd.h
index e1b1c76aa50f..a4fc4f2a56fb 100644
--- a/include/media/davinci/vpbe_osd.h
+++ b/include/media/davinci/vpbe_osd.h
@@ -54,9 +54,9 @@ enum osd_win_layer {
* @PIXFMT_4BPP: 4-bits-per-pixel bitmap
* @PIXFMT_8BPP: 8-bits-per-pixel bitmap
* @PIXFMT_RGB565: 16-bits-per-pixel RGB565
- * @PIXFMT_YCbCrI: YUV 4:2:2
+ * @PIXFMT_YCBCRI: YUV 4:2:2
* @PIXFMT_RGB888: 24-bits-per-pixel RGB888
- * @PIXFMT_YCrCbI: YUV 4:2:2 with chroma swap
+ * @PIXFMT_YCRCBI: YUV 4:2:2 with chroma swap
* @PIXFMT_NV12: YUV 4:2:0 planar
* @PIXFMT_OSD_ATTR: OSD Attribute Window pixel format (4bpp)
*
@@ -210,7 +210,7 @@ enum osd_cursor_h_width {
};
/**
- * enum davinci_cursor_v_width
+ * enum osd_cursor_v_width
* @V_WIDTH_1: vertical line width is 1 line
* @V_WIDTH_2: vertical line width is 2 lines
* @V_WIDTH_4: vertical line width is 4 lines
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index 8439e46fb993..d03e5c54347a 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -48,8 +48,6 @@ struct vpif_display_config {
int i2c_adapter_id;
struct vpif_display_chan_config chan_config[VPIF_DISPLAY_MAX_CHANNELS];
const char *card_name;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- int *asd_sizes; /* 0-terminated array of asd group sizes */
};
struct vpif_input {
diff --git a/include/media/dmxdev.h b/include/media/dmxdev.h
index baafa3b8aca4..63219a699370 100644
--- a/include/media/dmxdev.h
+++ b/include/media/dmxdev.h
@@ -21,7 +21,6 @@
#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/wait.h>
diff --git a/include/media/drv-intf/s3c_camif.h b/include/media/drv-intf/s3c_camif.h
index d1200b40f53a..f746851a5ce6 100644
--- a/include/media/drv-intf/s3c_camif.h
+++ b/include/media/drv-intf/s3c_camif.h
@@ -35,8 +35,4 @@ struct s3c_camif_plat_data {
int (*gpio_put)(void);
};
-/* Platform default helper functions */
-int s3c_camif_gpio_get(void);
-int s3c_camif_gpio_put(void);
-
#endif /* MEDIA_S3C_CAMIF_ */
diff --git a/include/media/drv-intf/soc_mediabus.h b/include/media/drv-intf/soc_mediabus.h
deleted file mode 100644
index 361f8852c9fc..000000000000
--- a/include/media/drv-intf/soc_mediabus.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SoC-camera Media Bus API extensions
- *
- * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- */
-
-#ifndef SOC_MEDIABUS_H
-#define SOC_MEDIABUS_H
-
-#include <linux/videodev2.h>
-#include <linux/v4l2-mediabus.h>
-
-/**
- * enum soc_mbus_packing - data packing types on the media-bus
- * @SOC_MBUS_PACKING_NONE: no packing, bit-for-bit transfer to RAM, one
- * sample represents one pixel
- * @SOC_MBUS_PACKING_2X8_PADHI: 16 bits transferred in 2 8-bit samples, in the
- * possibly incomplete byte high bits are padding
- * @SOC_MBUS_PACKING_2X8_PADLO: as above, but low bits are padding
- * @SOC_MBUS_PACKING_EXTEND16: sample width (e.g., 10 bits) has to be extended
- * to 16 bits
- * @SOC_MBUS_PACKING_VARIABLE: compressed formats with variable packing
- * @SOC_MBUS_PACKING_1_5X8: used for packed YUV 4:2:0 formats, where 4
- * pixels occupy 6 bytes in RAM
- * @SOC_MBUS_PACKING_EXTEND32: sample width (e.g., 24 bits) has to be extended
- * to 32 bits
- */
-enum soc_mbus_packing {
- SOC_MBUS_PACKING_NONE,
- SOC_MBUS_PACKING_2X8_PADHI,
- SOC_MBUS_PACKING_2X8_PADLO,
- SOC_MBUS_PACKING_EXTEND16,
- SOC_MBUS_PACKING_VARIABLE,
- SOC_MBUS_PACKING_1_5X8,
- SOC_MBUS_PACKING_EXTEND32,
-};
-
-/**
- * enum soc_mbus_order - sample order on the media bus
- * @SOC_MBUS_ORDER_LE: least significant sample first
- * @SOC_MBUS_ORDER_BE: most significant sample first
- */
-enum soc_mbus_order {
- SOC_MBUS_ORDER_LE,
- SOC_MBUS_ORDER_BE,
-};
-
-/**
- * enum soc_mbus_layout - planes layout in memory
- * @SOC_MBUS_LAYOUT_PACKED: color components packed
- * @SOC_MBUS_LAYOUT_PLANAR_2Y_U_V: YUV components stored in 3 planes (4:2:2)
- * @SOC_MBUS_LAYOUT_PLANAR_2Y_C: YUV components stored in a luma and a
- * chroma plane (C plane is half the size
- * of Y plane)
- * @SOC_MBUS_LAYOUT_PLANAR_Y_C: YUV components stored in a luma and a
- * chroma plane (C plane is the same size
- * as Y plane)
- */
-enum soc_mbus_layout {
- SOC_MBUS_LAYOUT_PACKED = 0,
- SOC_MBUS_LAYOUT_PLANAR_2Y_U_V,
- SOC_MBUS_LAYOUT_PLANAR_2Y_C,
- SOC_MBUS_LAYOUT_PLANAR_Y_C,
-};
-
-/**
- * struct soc_mbus_pixelfmt - Data format on the media bus
- * @fourcc: Fourcc code, that will be obtained if the data is
- * stored in memory in the following way:
- * @packing: Type of sample-packing, that has to be used
- * @order: Sample order when storing in memory
- * @bits_per_sample: How many bits the bridge has to sample
- */
-struct soc_mbus_pixelfmt {
- u32 fourcc;
- enum soc_mbus_packing packing;
- enum soc_mbus_order order;
- enum soc_mbus_layout layout;
- u8 bits_per_sample;
-};
-
-/**
- * struct soc_mbus_lookup - Lookup FOURCC IDs by mediabus codes for pass-through
- * @code: mediabus pixel-code
- * @fmt: pixel format description
- */
-struct soc_mbus_lookup {
- u32 code;
- struct soc_mbus_pixelfmt fmt;
-};
-
-const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
- u32 code,
- const struct soc_mbus_lookup *lookup,
- int n);
-const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
- u32 code);
-s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf);
-s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
- u32 bytes_per_line, u32 height);
-int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf,
- unsigned int *numerator, unsigned int *denominator);
-unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
- unsigned int flags);
-
-#endif
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
index 800d473b03c4..1b7d10f3d4aa 100644
--- a/include/media/dvb-usb-ids.h
+++ b/include/media/dvb-usb-ids.h
@@ -10,80 +10,88 @@
#ifndef _DVB_USB_IDS_H_
#define _DVB_USB_IDS_H_
+#include <linux/usb.h>
+
+#define DVB_USB_DEV(pid, vid) \
+ [vid] = { USB_DEVICE(USB_VID_ ## pid, USB_PID_ ## vid) }
+
+#define DVB_USB_DEV_VER(pid, vid, lo, hi) \
+ [vid] = { USB_DEVICE_VER(USB_VID_ ## pid, USB_PID_ ## vid, lo, hi) }
+
/* Vendor IDs */
-#define USB_VID_ADSTECH 0x06e1
-#define USB_VID_AFATECH 0x15a4
+
+#define USB_VID_774 0x7a69
+#define USB_VID_ADSTECH 0x06e1
+#define USB_VID_AFATECH 0x15a4
#define USB_VID_ALCOR_MICRO 0x058f
#define USB_VID_ALINK 0x05e3
+#define USB_VID_AME 0x06be
#define USB_VID_AMT 0x1c73
#define USB_VID_ANCHOR 0x0547
-#define USB_VID_ANSONIC 0x10b9
+#define USB_VID_ANSONIC 0x10b9
#define USB_VID_ANUBIS_ELECTRONIC 0x10fd
#define USB_VID_ASUS 0x0b05
#define USB_VID_AVERMEDIA 0x07ca
+#define USB_VID_AZUREWAVE 0x13d3
#define USB_VID_COMPRO 0x185b
#define USB_VID_COMPRO_UNK 0x145f
#define USB_VID_CONEXANT 0x0572
-#define USB_VID_CYPRESS 0x04b4
-#define USB_VID_DEXATEK 0x1d19
+#define USB_VID_CYPRESS 0x04b4
+#define USB_VID_DEXATEK 0x1d19
#define USB_VID_DIBCOM 0x10b8
#define USB_VID_DPOSH 0x1498
#define USB_VID_DVICO 0x0fe9
#define USB_VID_E3C 0x18b4
#define USB_VID_ELGATO 0x0fd9
#define USB_VID_EMPIA 0xeb1a
+#define USB_VID_EVOLUTEPC 0x1e59
#define USB_VID_GENPIX 0x09c0
+#define USB_VID_GIGABYTE 0x1044
+#define USB_VID_GOTVIEW 0x1fe1
#define USB_VID_GRANDTEC 0x5032
#define USB_VID_GTEK 0x1f4d
-#define USB_VID_HANFTEK 0x15f4
+#define USB_VID_HAMA 0x147f
+#define USB_VID_HANFTEK 0x15f4
#define USB_VID_HAUPPAUGE 0x2040
+#define USB_VID_HUMAX_COEX 0x10b9
#define USB_VID_HYPER_PALTEK 0x1025
#define USB_VID_INTEL 0x8086
-#define USB_VID_ITETECH 0x048d
+#define USB_VID_ITETECH 0x048d
#define USB_VID_KWORLD 0xeb2a
#define USB_VID_KWORLD_2 0x1b80
#define USB_VID_KYE 0x0458
-#define USB_VID_LEADTEK 0x0413
+#define USB_VID_LEADTEK 0x0413
#define USB_VID_LITEON 0x04ca
#define USB_VID_MEDION 0x1660
+#define USB_VID_MICROSOFT 0x045e
#define USB_VID_MIGLIA 0x18f3
#define USB_VID_MSI 0x0db0
#define USB_VID_MSI_2 0x1462
#define USB_VID_OPERA1 0x695c
-#define USB_VID_PINNACLE 0x2304
#define USB_VID_PCTV 0x2013
+#define USB_VID_PINNACLE 0x2304
#define USB_VID_PIXELVIEW 0x1554
-#define USB_VID_REALTEK 0x0bda
+#define USB_VID_PROF_1 0x3011
+#define USB_VID_PROF_2 0x3034
+#define USB_VID_REALTEK 0x0bda
+#define USB_VID_SONY 0x1415
+#define USB_VID_TECHNISAT 0x14f7
#define USB_VID_TECHNOTREND 0x0b48
-#define USB_VID_TERRATEC 0x0ccd
#define USB_VID_TELESTAR 0x10b9
-#define USB_VID_VISIONPLUS 0x13d3
-#define USB_VID_SONY 0x1415
-#define USB_PID_TEVII_S421 0xd421
-#define USB_PID_TEVII_S480_1 0xd481
-#define USB_PID_TEVII_S480_2 0xd482
-#define USB_PID_TEVII_S630 0xd630
-#define USB_PID_TEVII_S632 0xd632
-#define USB_PID_TEVII_S650 0xd650
-#define USB_PID_TEVII_S660 0xd660
-#define USB_PID_TEVII_S662 0xd662
-#define USB_VID_TWINHAN 0x1822
+#define USB_VID_TERRATEC 0x0ccd
+#define USB_VID_TERRATEC_2 0x153b
+#define USB_VID_TEVII 0x9022
+#define USB_VID_TWINHAN 0x1822
#define USB_VID_ULTIMA_ELECTRONIC 0x05d8
-#define USB_VID_UNIWILL 0x1584
+#define USB_VID_UNIWILL 0x1584
+#define USB_VID_VISIONPLUS 0x13d3
#define USB_VID_WIDEVIEW 0x14aa
-#define USB_VID_GIGABYTE 0x1044
-#define USB_VID_YUAN 0x1164
#define USB_VID_XTENSIONS 0x1ae7
+#define USB_VID_YUAN 0x1164
#define USB_VID_ZYDAS 0x0ace
-#define USB_VID_HUMAX_COEX 0x10b9
-#define USB_VID_774 0x7a69
-#define USB_VID_EVOLUTEPC 0x1e59
-#define USB_VID_AZUREWAVE 0x13d3
-#define USB_VID_TECHNISAT 0x14f7
-#define USB_VID_HAMA 0x147f
-#define USB_VID_MICROSOFT 0x045e
/* Product IDs */
+
#define USB_PID_ADSTECH_USB2_COLD 0xa333
#define USB_PID_ADSTECH_USB2_WARM 0xa334
#define USB_PID_AFATECH_AF9005 0x9020
@@ -94,337 +102,370 @@
#define USB_PID_AFATECH_AF9035_1002 0x1002
#define USB_PID_AFATECH_AF9035_1003 0x1003
#define USB_PID_AFATECH_AF9035_9035 0x9035
-#define USB_PID_TREKSTOR_DVBT 0x901b
-#define USB_PID_TREKSTOR_TERRES_2_0 0xC803
-#define USB_VID_ALINK_DTU 0xf170
+#define USB_PID_ALINK_DTU 0xf170
+#define USB_PID_AME_DTV5100 0xa232
+#define USB_PID_ANCHOR_NEBULA_DIGITV 0x0201
#define USB_PID_ANSONIC_DVBT_USB 0x6000
+#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514
+#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513
+#define USB_PID_ANUBIS_MSI_DIGI_VOX_MINI_II 0x1513
#define USB_PID_ANYSEE 0x861f
-#define USB_PID_AZUREWAVE_AD_TU700 0x3237
-#define USB_PID_AZUREWAVE_6007 0x0ccd
-#define USB_PID_AVERMEDIA_DVBT_USB_COLD 0x0001
-#define USB_PID_AVERMEDIA_DVBT_USB_WARM 0x0002
+#define USB_PID_ASUS_U3000 0x171f
+#define USB_PID_ASUS_U3000H 0x1736
+#define USB_PID_ASUS_U3100 0x173f
+#define USB_PID_ASUS_U3100MINI_PLUS 0x1779
+#define USB_PID_AVERMEDIA_1867 0x1867
+#define USB_PID_AVERMEDIA_A309 0xa309
+#define USB_PID_AVERMEDIA_A310 0xa310
+#define USB_PID_AVERMEDIA_A805 0xa805
+#define USB_PID_AVERMEDIA_A815M 0x815a
+#define USB_PID_AVERMEDIA_A835 0xa835
+#define USB_PID_AVERMEDIA_A835B_1835 0x1835
+#define USB_PID_AVERMEDIA_A835B_2835 0x2835
+#define USB_PID_AVERMEDIA_A835B_3835 0x3835
+#define USB_PID_AVERMEDIA_A835B_4835 0x4835
+#define USB_PID_AVERMEDIA_A850 0x850a
+#define USB_PID_AVERMEDIA_A850T 0x850b
+#define USB_PID_AVERMEDIA_A867 0xa867
+#define USB_PID_AVERMEDIA_B835 0xb835
#define USB_PID_AVERMEDIA_DVBT_USB2_COLD 0xa800
#define USB_PID_AVERMEDIA_DVBT_USB2_WARM 0xa801
+#define USB_PID_AVERMEDIA_EXPRESS 0xb568
+#define USB_PID_AVERMEDIA_H335 0x0335
+#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039
+#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039
+#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039
+#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228
+#define USB_PID_AVERMEDIA_TD110 0xa110
+#define USB_PID_AVERMEDIA_TD310 0x1871
+#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
+#define USB_PID_AVERMEDIA_VOLAR 0xa807
+#define USB_PID_AVERMEDIA_VOLAR_2 0xb808
+#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868
+#define USB_PID_AVERMEDIA_VOLAR_X 0xa815
+#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150
+#define USB_PID_AZUREWAVE_6007 0x0ccd
+#define USB_PID_AZUREWAVE_AD_TU700 0x3237
+#define USB_PID_AZUREWAVE_AZ6027 0x3275
+#define USB_PID_AZUREWAVE_TWINHAN_VP7049 0x3219
#define USB_PID_COMPRO_DVBU2000_COLD 0xd000
-#define USB_PID_COMPRO_DVBU2000_WARM 0xd001
#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c
#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d
+#define USB_PID_COMPRO_DVBU2000_WARM 0xd001
#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78
#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80
#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397
#define USB_PID_CONEXANT_D680_DMB 0x86d6
-#define USB_PID_CREATIX_CTX1921 0x1921
+#define USB_PID_CPYTO_REDI_PC50A 0xa803
+#define USB_PID_CTVDIGDUAL_V2 0xe410
+#define USB_PID_CYPRESS_DW2101 0x2101
+#define USB_PID_CYPRESS_DW2102 0x2102
+#define USB_PID_CYPRESS_DW2104 0x2104
+#define USB_PID_CYPRESS_DW3101 0x3101
+#define USB_PID_CYPRESS_OPERA1_COLD 0x2830
#define USB_PID_DELOCK_USB2_DVBT 0xb803
+#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064
#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065
#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8
#define USB_PID_DIBCOM_MOD3000_WARM 0x0bb9
#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6
#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7
-#define USB_PID_DIBCOM_STK7700P 0x1e14
+#define USB_PID_DIBCOM_NIM7090 0x1bb2
+#define USB_PID_DIBCOM_NIM8096MD 0x1fa8
+#define USB_PID_DIBCOM_NIM9090M 0x2383
+#define USB_PID_DIBCOM_NIM9090MD 0x2384
+#define USB_PID_DIBCOM_STK7070P 0x1ebc
+#define USB_PID_DIBCOM_STK7070PD 0x1ebe
+#define USB_PID_DIBCOM_STK7700D 0x1ef0
+#define USB_PID_DIBCOM_STK7700P 0x1e14
#define USB_PID_DIBCOM_STK7700P_PC 0x1e78
-#define USB_PID_DIBCOM_STK7700D 0x1ef0
#define USB_PID_DIBCOM_STK7700_U7000 0x7001
-#define USB_PID_DIBCOM_STK7070P 0x1ebc
-#define USB_PID_DIBCOM_STK7070PD 0x1ebe
-#define USB_PID_DIBCOM_STK807XP 0x1f90
+#define USB_PID_DIBCOM_STK7770P 0x1e80
+#define USB_PID_DIBCOM_STK807XP 0x1f90
#define USB_PID_DIBCOM_STK807XPVR 0x1f98
-#define USB_PID_DIBCOM_STK8096GP 0x1fa0
-#define USB_PID_DIBCOM_STK8096PVR 0x1faa
-#define USB_PID_DIBCOM_NIM8096MD 0x1fa8
-#define USB_PID_DIBCOM_TFE8096P 0x1f9C
-#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
-#define USB_PID_DIBCOM_STK7770P 0x1e80
-#define USB_PID_DIBCOM_NIM7090 0x1bb2
+#define USB_PID_DIBCOM_STK8096GP 0x1fa0
+#define USB_PID_DIBCOM_STK8096PVR 0x1faa
#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4
-#define USB_PID_DIBCOM_TFE7790P 0x1e6e
-#define USB_PID_DIBCOM_NIM9090M 0x2383
-#define USB_PID_DIBCOM_NIM9090MD 0x2384
+#define USB_PID_DIBCOM_TFE7790P 0x1e6e
+#define USB_PID_DIBCOM_TFE8096P 0x1f9C
+#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54
+#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55
#define USB_PID_DPOSH_M9206_COLD 0x9206
#define USB_PID_DPOSH_M9206_WARM 0xa090
+#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50
+#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51
+#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58
+#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59
+#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78
+#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98
+#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70
+#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71
+#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
+#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501
+#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
+#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00
+#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01
+#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10
+#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11
#define USB_PID_E3C_EC168 0x1689
#define USB_PID_E3C_EC168_2 0xfffa
#define USB_PID_E3C_EC168_3 0xfffb
#define USB_PID_E3C_EC168_4 0x1001
#define USB_PID_E3C_EC168_5 0x1002
+#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
+#define USB_PID_ELGATO_EYETV_DTT 0x0021
+#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
+#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
+#define USB_PID_ELGATO_EYETV_SAT 0x002a
+#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025
+#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036
+#define USB_PID_EMPIA_DIGIVOX_MINI_SL_COLD 0xe360
+#define USB_PID_EMPIA_DIGIVOX_MINI_SL_WARM 0xe361
+#define USB_PID_EMPIA_VSTREAM_COLD 0x17de
+#define USB_PID_EMPIA_VSTREAM_WARM 0x17df
+#define USB_PID_EVOLUTEPC_TVWAY_PLUS 0x0002
+#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
#define USB_PID_FREECOM_DVBT 0x0160
#define USB_PID_FREECOM_DVBT_2 0x0161
-#define USB_PID_UNIWILL_STK7700P 0x6003
+#define USB_PID_FRIIO_WHITE 0x0001
+#define USB_PID_GENIATECH_SU3000 0x3000
+#define USB_PID_GENIATECH_T220 0xd220
+#define USB_PID_GENIATECH_X3M_SPC1400HD 0x3100
#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012
+#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
+#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201
+#define USB_PID_GENPIX_8PSK_REV_2 0x0202
+#define USB_PID_GENPIX_SKYWALKER_1 0x0203
+#define USB_PID_GENPIX_SKYWALKER_2 0x0206
+#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204
+#define USB_PID_GIGABYTE_U7000 0x7001
+#define USB_PID_GIGABYTE_U8000 0x7002
+#define USB_PID_GOTVIEW_SAT_HD 0x5456
+#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6
+#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7
#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
-#define USB_PID_GOTVIEW_SAT_HD 0x5456
+#define USB_PID_GRANDTEC_MOD3000_COLD 0x0bb8
+#define USB_PID_GRANDTEC_MOD3000_WARM 0x0bb9
+#define USB_PID_HAMA_DVBT_HYBRID 0x2758
+#define USB_PID_HANFTEK_UMT_010_COLD 0x0001
+#define USB_PID_HANFTEK_UMT_010_WARM 0x0015
+#define USB_PID_HAUPPAUGE_MAX_S2 0xd900
+#define USB_PID_HAUPPAUGE_MYTV_T 0x7080
+#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580
+#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200
+#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941
+#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950
+#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400
+#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050
+#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060
+#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070
+#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200
+#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210
+#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_COLD 0x9300
+#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_WARM 0x9301
+#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_COLD 0x5000
+#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_WARM 0x5001
#define USB_PID_INTEL_CE9500 0x9500
#define USB_PID_ITETECH_IT9135 0x9135
#define USB_PID_ITETECH_IT9135_9005 0x9005
#define USB_PID_ITETECH_IT9135_9006 0x9006
#define USB_PID_ITETECH_IT9303 0x9306
-#define USB_PID_KWORLD_399U 0xe399
-#define USB_PID_KWORLD_399U_2 0xe400
#define USB_PID_KWORLD_395U 0xe396
#define USB_PID_KWORLD_395U_2 0xe39b
#define USB_PID_KWORLD_395U_3 0xe395
#define USB_PID_KWORLD_395U_4 0xe39a
+#define USB_PID_KWORLD_399U 0xe399
+#define USB_PID_KWORLD_399U_2 0xe400
#define USB_PID_KWORLD_MC810 0xc810
-#define USB_PID_KWORLD_PC160_2T 0xc160
+#define USB_PID_KWORLD_PC160_2T 0xc160
#define USB_PID_KWORLD_PC160_T 0xc161
#define USB_PID_KWORLD_UB383_T 0xe383
#define USB_PID_KWORLD_UB499_2T_T09 0xe409
#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
-#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
+#define USB_PID_KYE_DVB_T_COLD 0x701e
+#define USB_PID_KYE_DVB_T_WARM 0x701f
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_COLD 0x6025
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_H 0x60f6
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P 0x6f00
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_WARM 0x6026
+#define USB_PID_LITEON_DVB_T_COLD 0xf000
+#define USB_PID_LITEON_DVB_T_WARM 0xf001
+#define USB_PID_MEDION_CREATIX_CTX1921 0x1921
+#define USB_PID_MEDION_MD95700 0x0932
+#define USB_PID_MICROSOFT_XBOX_ONE_TUNER 0x02d5
+#define USB_PID_MIGLIA_WT220U_ZAP250_COLD 0x0220
+#define USB_PID_MSI_DIGIVOX_DUO 0x8801
+#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
+#define USB_PID_MSI_MEGASKY580 0x5580
+#define USB_PID_MSI_MEGASKY580_55801 0x5581
+#define USB_PID_MYGICA_D689 0xd811
+#define USB_PID_MYGICA_T230 0xc688
+#define USB_PID_MYGICA_T230A 0x689a
+#define USB_PID_MYGICA_T230C 0xc689
+#define USB_PID_MYGICA_T230C2 0xc68a
+#define USB_PID_MYGICA_T230C2_LITE 0xc69a
+#define USB_PID_MYGICA_T230C_LITE 0xc699
+#define USB_PID_NOXON_DAB_STICK 0x00b3
+#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
+#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4
+#define USB_PID_OPERA1_WARM 0x3829
+#define USB_PID_PCTV_2002E 0x025c
+#define USB_PID_PCTV_2002E_SE 0x025d
+#define USB_PID_PCTV_200E 0x020e
+#define USB_PID_PCTV_78E 0x025a
+#define USB_PID_PCTV_79E 0x0262
+#define USB_PID_PCTV_DIBCOM_STK8096PVR 0x1faa
+#define USB_PID_PCTV_PINNACLE_PCTV282E 0x0248
+#define USB_PID_PCTV_PINNACLE_PCTV73ESE 0x0245
+#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
+#define USB_PID_PINNACLE_PCTV2000E 0x022c
+#define USB_PID_PINNACLE_PCTV282E 0x0248
+#define USB_PID_PINNACLE_PCTV340E 0x023d
+#define USB_PID_PINNACLE_PCTV340E_SE 0x023e
+#define USB_PID_PINNACLE_PCTV71E 0x022b
+#define USB_PID_PINNACLE_PCTV72E 0x0236
+#define USB_PID_PINNACLE_PCTV73A 0x0243
+#define USB_PID_PINNACLE_PCTV73E 0x0237
+#define USB_PID_PINNACLE_PCTV73ESE 0x0245
+#define USB_PID_PINNACLE_PCTV74E 0x0246
+#define USB_PID_PINNACLE_PCTV801E 0x023a
+#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
+#define USB_PID_PINNACLE_PCTV_400E 0x020f
+#define USB_PID_PINNACLE_PCTV_450E 0x0222
+#define USB_PID_PINNACLE_PCTV_452E 0x021f
+#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229
+#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
+#define USB_PID_PIXELVIEW_SBTVD 0x5010
#define USB_PID_PROF_1100 0xb012
-#define USB_PID_TERRATEC_CINERGY_S 0x0064
-#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
-#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
-#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
-#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
-#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
-#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9
-#define USB_PID_TERRATEC_CINERGY_TC2_STICK 0x10b2
-#define USB_PID_TWINHAN_VP7041_COLD 0x3201
-#define USB_PID_TWINHAN_VP7041_WARM 0x3202
-#define USB_PID_TWINHAN_VP7020_COLD 0x3203
-#define USB_PID_TWINHAN_VP7020_WARM 0x3204
-#define USB_PID_TWINHAN_VP7045_COLD 0x3205
-#define USB_PID_TWINHAN_VP7045_WARM 0x3206
-#define USB_PID_TWINHAN_VP7021_COLD 0x3207
-#define USB_PID_TWINHAN_VP7021_WARM 0x3208
-#define USB_PID_TWINHAN_VP7049 0x3219
-#define USB_PID_TINYTWIN 0x3226
-#define USB_PID_TINYTWIN_2 0xe402
-#define USB_PID_TINYTWIN_3 0x9016
-#define USB_PID_DNTV_TINYUSB2_COLD 0x3223
-#define USB_PID_DNTV_TINYUSB2_WARM 0x3224
-#define USB_PID_ULTIMA_TVBOX_COLD 0x8105
-#define USB_PID_ULTIMA_TVBOX_WARM 0x8106
-#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107
-#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108
-#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235
-#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109
-#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a
-#define USB_PID_ARTEC_T14_COLD 0x810b
-#define USB_PID_ARTEC_T14_WARM 0x810c
-#define USB_PID_ARTEC_T14BR 0x810f
-#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613
-#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002
-#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e
-#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f
-#define USB_PID_HANFTEK_UMT_010_COLD 0x0001
-#define USB_PID_HANFTEK_UMT_010_WARM 0x0015
-#define USB_PID_DTT200U_COLD 0x0201
-#define USB_PID_DTT200U_WARM 0x0301
-#define USB_PID_WT220U_ZAP250_COLD 0x0220
-#define USB_PID_WT220U_COLD 0x0222
-#define USB_PID_WT220U_WARM 0x0221
-#define USB_PID_WT220U_FC_COLD 0x0225
-#define USB_PID_WT220U_FC_WARM 0x0226
-#define USB_PID_WT220U_ZL0353_COLD 0x022a
-#define USB_PID_WT220U_ZL0353_WARM 0x022b
-#define USB_PID_WINTV_NOVA_T_USB2_COLD 0x9300
-#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301
-#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941
-#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950
-#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400
-#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050
-#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060
-#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070
-#define USB_PID_HAUPPAUGE_MYTV_T 0x7080
-#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580
-#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200
-#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200
-#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210
-#define USB_PID_AVERMEDIA_EXPRESS 0xb568
-#define USB_PID_AVERMEDIA_VOLAR 0xa807
-#define USB_PID_AVERMEDIA_VOLAR_2 0xb808
-#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868
-#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228
-#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039
-#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039
-#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039
-#define USB_PID_AVERMEDIA_VOLAR_X 0xa815
-#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150
-#define USB_PID_AVERMEDIA_A309 0xa309
-#define USB_PID_AVERMEDIA_A310 0xa310
-#define USB_PID_AVERMEDIA_A850 0x850a
-#define USB_PID_AVERMEDIA_A850T 0x850b
-#define USB_PID_AVERMEDIA_A805 0xa805
-#define USB_PID_AVERMEDIA_A815M 0x815a
-#define USB_PID_AVERMEDIA_A835 0xa835
-#define USB_PID_AVERMEDIA_B835 0xb835
-#define USB_PID_AVERMEDIA_A835B_1835 0x1835
-#define USB_PID_AVERMEDIA_A835B_2835 0x2835
-#define USB_PID_AVERMEDIA_A835B_3835 0x3835
-#define USB_PID_AVERMEDIA_A835B_4835 0x4835
-#define USB_PID_AVERMEDIA_1867 0x1867
-#define USB_PID_AVERMEDIA_A867 0xa867
-#define USB_PID_AVERMEDIA_H335 0x0335
-#define USB_PID_AVERMEDIA_TD110 0xa110
-#define USB_PID_AVERMEDIA_TD310 0x1871
-#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
-#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
-#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
-#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
-#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011
-#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
+#define USB_PID_PROF_7500 0x7500
+#define USB_PID_PROLECTRIX_DV107669 0xd803
+#define USB_PID_REALTEK_RTL2831U 0x2831
+#define USB_PID_REALTEK_RTL2832U 0x2832
+#define USB_PID_SIGMATEK_DVB_110 0x6610
+#define USB_PID_SONY_PLAYTV 0x0003
+#define USB_PID_SVEON_STV20 0xe39d
+#define USB_PID_SVEON_STV20_RTL2832U 0xd39d
+#define USB_PID_SVEON_STV21 0xd3b0
+#define USB_PID_SVEON_STV22 0xe401
+#define USB_PID_SVEON_STV22_IT9137 0xe411
+#define USB_PID_SVEON_STV27 0xd3af
+#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004
+#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003
+#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
+#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
+#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
+#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015
-#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
+#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
+#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
+#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
+#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
+#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a
+#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011
#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017
+#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
+#define USB_PID_TELESTAR_STARSTICK_2 0x8000
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
-#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058
#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060
-#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062
-#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078
-#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab
+#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058
+#define USB_PID_TERRATEC_CINERGY_S 0x0064
+#define USB_PID_TERRATEC_CINERGY_S2_1 0x1181
+#define USB_PID_TERRATEC_CINERGY_S2_2 0x1182
+#define USB_PID_TERRATEC_CINERGY_S2_BOX 0x0105
#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8
#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0
#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102
#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105
+#define USB_PID_TERRATEC_CINERGY_T2 0x0038
+#define USB_PID_TERRATEC_CINERGY_TC2_STICK 0x10b2
+#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062
+#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
+#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9
+#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
+#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
+#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
+#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
+#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078
+#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab
+#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
+#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
#define USB_PID_TERRATEC_H7 0x10b4
#define USB_PID_TERRATEC_H7_2 0x10a3
#define USB_PID_TERRATEC_H7_3 0x10a5
#define USB_PID_TERRATEC_T1 0x10ae
#define USB_PID_TERRATEC_T3 0x10a0
#define USB_PID_TERRATEC_T5 0x10a1
-#define USB_PID_NOXON_DAB_STICK 0x00b3
-#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
-#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4
-#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
-#define USB_PID_PINNACLE_PCTV2000E 0x022c
-#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
-#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229
-#define USB_PID_PINNACLE_PCTV71E 0x022b
-#define USB_PID_PINNACLE_PCTV72E 0x0236
-#define USB_PID_PINNACLE_PCTV73E 0x0237
-#define USB_PID_PINNACLE_PCTV310E 0x3211
-#define USB_PID_PINNACLE_PCTV801E 0x023a
-#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
-#define USB_PID_PINNACLE_PCTV340E 0x023d
-#define USB_PID_PINNACLE_PCTV340E_SE 0x023e
-#define USB_PID_PINNACLE_PCTV73A 0x0243
-#define USB_PID_PINNACLE_PCTV73ESE 0x0245
-#define USB_PID_PINNACLE_PCTV74E 0x0246
-#define USB_PID_PINNACLE_PCTV282E 0x0248
-#define USB_PID_PIXELVIEW_SBTVD 0x5010
-#define USB_PID_PCTV_200E 0x020e
-#define USB_PID_PCTV_400E 0x020f
-#define USB_PID_PCTV_450E 0x0222
-#define USB_PID_PCTV_452E 0x021f
-#define USB_PID_PCTV_78E 0x025a
-#define USB_PID_PCTV_79E 0x0262
-#define USB_PID_REALTEK_RTL2831U 0x2831
-#define USB_PID_REALTEK_RTL2832U 0x2832
-#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
-#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a
-#define USB_PID_NEBULA_DIGITV 0x0201
-#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
-#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
-#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501
-#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00
-#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01
-#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10
-#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11
-#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50
-#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51
-#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58
-#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59
-#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78
-#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98
-#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70
-#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71
-#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54
-#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55
-#define USB_PID_MEDION_MD95700 0x0932
-#define USB_PID_MSI_MEGASKY580 0x5580
-#define USB_PID_MSI_MEGASKY580_55801 0x5581
-#define USB_PID_KYE_DVB_T_COLD 0x701e
-#define USB_PID_KYE_DVB_T_WARM 0x701f
-#define USB_PID_LITEON_DVB_T_COLD 0xf000
-#define USB_PID_LITEON_DVB_T_WARM 0xf001
-#define USB_PID_DIGIVOX_MINI_SL_COLD 0xe360
-#define USB_PID_DIGIVOX_MINI_SL_WARM 0xe361
-#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6
-#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7
+#define USB_PID_TEVII_S421 0xd421
+#define USB_PID_TEVII_S480_1 0xd481
+#define USB_PID_TEVII_S480_2 0xd482
+#define USB_PID_TEVII_S482_1 0xd483
+#define USB_PID_TEVII_S482_2 0xd484
+#define USB_PID_TEVII_S630 0xd630
+#define USB_PID_TEVII_S632 0xd632
+#define USB_PID_TEVII_S650 0xd650
+#define USB_PID_TEVII_S660 0xd660
+#define USB_PID_TEVII_S662 0xd662
+#define USB_PID_TINYTWIN 0x3226
+#define USB_PID_TINYTWIN_2 0xe402
+#define USB_PID_TINYTWIN_3 0x9016
+#define USB_PID_TREKSTOR_DVBT 0x901b
+#define USB_PID_TREKSTOR_TERRES_2_0 0xC803
+#define USB_PID_TURBOX_DTT_2000 0xd3a4
+#define USB_PID_TWINHAN_VP7021_WARM 0x3208
+#define USB_PID_TWINHAN_VP7041_COLD 0x3201
+#define USB_PID_TWINHAN_VP7041_WARM 0x3202
+#define USB_PID_ULTIMA_ARTEC_T14BR 0x810f
+#define USB_PID_ULTIMA_ARTEC_T14_COLD 0x810b
+#define USB_PID_ULTIMA_ARTEC_T14_WARM 0x810c
+#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107
+#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108
+#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235
+#define USB_PID_ULTIMA_TVBOX_COLD 0x8105
+#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109
+#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613
+#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002
+#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a
+#define USB_PID_ULTIMA_TVBOX_WARM 0x8106
+#define USB_PID_UNIWILL_STK7700P 0x6003
+#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e
+#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f
+#define USB_PID_VISIONPLUS_PINNACLE_PCTV310E 0x3211
+#define USB_PID_VISIONPLUS_TINYUSB2_COLD 0x3223
+#define USB_PID_VISIONPLUS_TINYUSB2_WARM 0x3224
+#define USB_PID_VISIONPLUS_VP7020_COLD 0x3203
+#define USB_PID_VISIONPLUS_VP7020_WARM 0x3204
+#define USB_PID_VISIONPLUS_VP7021_COLD 0x3207
+#define USB_PID_VISIONPLUS_VP7041_COLD 0x3201
+#define USB_PID_VISIONPLUS_VP7041_WARM 0x3202
+#define USB_PID_VISIONPLUS_VP7045_COLD 0x3205
+#define USB_PID_VISIONPLUS_VP7045_WARM 0x3206
+#define USB_PID_WIDEVIEW_DTT200U_COLD 0x0201
+#define USB_PID_WIDEVIEW_DTT200U_WARM 0x0301
+#define USB_PID_WIDEVIEW_DVBT_USB_COLD 0x0001
+#define USB_PID_WIDEVIEW_DVBT_USB_WARM 0x0002
+#define USB_PID_WIDEVIEW_WT220U_COLD 0x0222
+#define USB_PID_WIDEVIEW_WT220U_FC_COLD 0x0225
+#define USB_PID_WIDEVIEW_WT220U_FC_WARM 0x0226
+#define USB_PID_WIDEVIEW_WT220U_WARM 0x0221
+#define USB_PID_WIDEVIEW_WT220U_ZAP250_COLD 0x0220
+#define USB_PID_WIDEVIEW_WT220U_ZL0353_COLD 0x022a
+#define USB_PID_WIDEVIEW_WT220U_ZL0353_WARM 0x022b
#define USB_PID_WINFAST_DTV2000DS 0x6a04
#define USB_PID_WINFAST_DTV2000DS_PLUS 0x6f12
-#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025
-#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026
-#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00
-#define USB_PID_WINFAST_DTV_DONGLE_H 0x60f6
-#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
-#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
+#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f
-#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
-#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201
-#define USB_PID_GENPIX_8PSK_REV_2 0x0202
-#define USB_PID_GENPIX_SKYWALKER_1 0x0203
-#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204
-#define USB_PID_GENPIX_SKYWALKER_2 0x0206
-#define USB_PID_SIGMATEK_DVB_110 0x6610
-#define USB_PID_MSI_DIGI_VOX_MINI_II 0x1513
-#define USB_PID_MSI_DIGIVOX_DUO 0x8801
-#define USB_PID_OPERA1_COLD 0x2830
-#define USB_PID_OPERA1_WARM 0x3829
-#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514
-#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513
-#define USB_PID_GIGABYTE_U7000 0x7001
-#define USB_PID_GIGABYTE_U8000 0x7002
-#define USB_PID_ASUS_U3000 0x171f
-#define USB_PID_ASUS_U3000H 0x1736
-#define USB_PID_ASUS_U3100 0x173f
-#define USB_PID_ASUS_U3100MINI_PLUS 0x1779
+#define USB_PID_WINTV_SOLOHD 0x0264
+#define USB_PID_WINTV_SOLOHD_2 0x8268
+#define USB_PID_XTENSIONS_XD_380 0x0381
#define USB_PID_YUAN_EC372S 0x1edc
-#define USB_PID_YUAN_STK7700PH 0x1f08
-#define USB_PID_YUAN_PD378S 0x2edc
#define USB_PID_YUAN_MC770 0x0871
+#define USB_PID_YUAN_PD378S 0x2edc
#define USB_PID_YUAN_STK7700D 0x1efc
-#define USB_PID_YUAN_STK7700D_2 0x1e8c
-#define USB_PID_DW2102 0x2102
-#define USB_PID_DW2104 0x2104
-#define USB_PID_DW3101 0x3101
-#define USB_PID_XTENSIONS_XD_380 0x0381
-#define USB_PID_TELESTAR_STARSTICK_2 0x8000
-#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
-#define USB_PID_SONY_PLAYTV 0x0003
-#define USB_PID_MYGICA_D689 0xd811
-#define USB_PID_MYGICA_T230 0xc688
-#define USB_PID_MYGICA_T230C 0xc689
-#define USB_PID_MYGICA_T230C2 0xc68a
-#define USB_PID_MYGICA_T230C_LITE 0xc699
-#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
-#define USB_PID_ELGATO_EYETV_DTT 0x0021
-#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
-#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
-#define USB_PID_ELGATO_EYETV_SAT 0x002a
-#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025
-#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036
-#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
-#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_WARM 0x5001
-#define USB_PID_FRIIO_WHITE 0x0001
-#define USB_PID_TVWAY_PLUS 0x0002
-#define USB_PID_SVEON_STV20 0xe39d
-#define USB_PID_SVEON_STV20_RTL2832U 0xd39d
-#define USB_PID_SVEON_STV21 0xd3b0
-#define USB_PID_SVEON_STV22 0xe401
-#define USB_PID_SVEON_STV22_IT9137 0xe411
-#define USB_PID_AZUREWAVE_AZ6027 0x3275
-#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
-#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
-#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
-#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
-#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003
-#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004
-#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
-#define USB_PID_CPYTO_REDI_PC50A 0xa803
-#define USB_PID_CTVDIGDUAL_V2 0xe410
-#define USB_PID_PCTV_2002E 0x025c
-#define USB_PID_PCTV_2002E_SE 0x025d
-#define USB_PID_SVEON_STV27 0xd3af
-#define USB_PID_TURBOX_DTT_2000 0xd3a4
-#define USB_PID_WINTV_SOLOHD 0x0264
-#define USB_PID_WINTV_SOLOHD_2 0x8268
-#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
-#define USB_PID_HAMA_DVBT_HYBRID 0x2758
-#define USB_PID_XBOX_ONE_TUNER 0x02d5
-#define USB_PID_PROLECTRIX_DV107669 0xd803
+#define USB_PID_YUAN_STK7700D_2 0x1e8c
+#define USB_PID_YUAN_STK7700PH 0x1f08
+
#endif
diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
index 0d76fa4551b3..e7c44870f20d 100644
--- a/include/media/dvb_frontend.h
+++ b/include/media/dvb_frontend.h
@@ -364,6 +364,10 @@ struct dvb_frontend_internal_info {
* allocated by the driver.
* @init: callback function used to initialize the tuner device.
* @sleep: callback function used to put the tuner to sleep.
+ * @suspend: callback function used to inform that the Kernel will
+ * suspend.
+ * @resume: callback function used to inform that the Kernel is
+ * resuming from suspend.
* @write: callback function used by some demod legacy drivers to
* allow other drivers to write data into their registers.
* Should not be used on new drivers.
@@ -443,6 +447,8 @@ struct dvb_frontend_ops {
int (*init)(struct dvb_frontend* fe);
int (*sleep)(struct dvb_frontend* fe);
+ int (*suspend)(struct dvb_frontend *fe);
+ int (*resume)(struct dvb_frontend *fe);
int (*write)(struct dvb_frontend* fe, const u8 buf[], int len);
@@ -755,7 +761,8 @@ void dvb_frontend_detach(struct dvb_frontend *fe);
* &dvb_frontend_ops.tuner_ops.suspend\(\) is available, it calls it. Otherwise,
* it will call &dvb_frontend_ops.tuner_ops.sleep\(\), if available.
*
- * It will also call &dvb_frontend_ops.sleep\(\) to put the demod to suspend.
+ * It will also call &dvb_frontend_ops.suspend\(\) to put the demod to suspend,
+ * if available. Otherwise it will call &dvb_frontend_ops.sleep\(\).
*
* The drivers should also call dvb_frontend_suspend\(\) as part of their
* handler for the &device_driver.suspend\(\).
@@ -769,7 +776,9 @@ int dvb_frontend_suspend(struct dvb_frontend *fe);
*
* This function resumes the usual operation of the tuner after resume.
*
- * In order to resume the frontend, it calls the demod &dvb_frontend_ops.init\(\).
+ * In order to resume the frontend, it calls the demod
+ * &dvb_frontend_ops.resume\(\) if available. Otherwise it calls demod
+ * &dvb_frontend_ops.init\(\).
*
* If &dvb_frontend_ops.tuner_ops.resume\(\) is available, It, it calls it.
* Otherwise,t will call &dvb_frontend_ops.tuner_ops.init\(\), if available.
diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
index 551325858de3..2f6b0861322a 100644
--- a/include/media/dvbdev.h
+++ b/include/media/dvbdev.h
@@ -293,8 +293,8 @@ static inline void dvb_register_media_controller(struct dvb_adapter *adap,
*
* @adap: pointer to &struct dvb_adapter
*/
-static inline struct media_device
-*dvb_get_media_controller(struct dvb_adapter *adap)
+static inline struct media_device *
+dvb_get_media_controller(struct dvb_adapter *adap)
{
return adap->mdev;
}
@@ -321,7 +321,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
int dvb_generic_open(struct inode *inode, struct file *file);
/**
- * dvb_generic_close - Digital TV close function, used by DVB devices
+ * dvb_generic_release - Digital TV close function, used by DVB devices
*
* @inode: pointer to &struct inode.
* @file: pointer to &struct file.
@@ -385,7 +385,7 @@ struct i2c_client;
* with dvb_module_probe() should use dvb_module_release() to unbind.
*
* Return:
- * On success, return an &struct i2c_client, pointing the the bound
+ * On success, return an &struct i2c_client, pointing to the bound
* I2C device. %NULL otherwise.
*
* .. note::
@@ -421,7 +421,7 @@ void dvb_module_release(struct i2c_client *client);
* dvb_attach - attaches a DVB frontend into the DVB core.
*
* @FUNCTION: function on a frontend module to be called.
- * @ARGS...: @FUNCTION arguments.
+ * @ARGS: @FUNCTION arguments.
*
* This ancillary function loads a frontend module in runtime and runs
* the @FUNCTION function there, with @ARGS.
diff --git a/include/media/frame_vector.h b/include/media/frame_vector.h
new file mode 100644
index 000000000000..bfed1710dc24
--- /dev/null
+++ b/include/media/frame_vector.h
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _MEDIA_FRAME_VECTOR_H
+#define _MEDIA_FRAME_VECTOR_H
+
+/* Container for pinned pfns / pages in frame_vector.c */
+struct frame_vector {
+ unsigned int nr_allocated; /* Number of frames we have space for */
+ unsigned int nr_frames; /* Number of frames stored in ptrs array */
+ bool got_ref; /* Did we pin pages by getting page ref? */
+ bool is_pfns; /* Does array contain pages or pfns? */
+ void *ptrs[]; /* Array of pinned pfns / pages. Use
+ * pfns_vector_pages() or pfns_vector_pfns()
+ * for access */
+};
+
+struct frame_vector *frame_vector_create(unsigned int nr_frames);
+void frame_vector_destroy(struct frame_vector *vec);
+int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
+ struct frame_vector *vec);
+void put_vaddr_frames(struct frame_vector *vec);
+int frame_vector_to_pages(struct frame_vector *vec);
+void frame_vector_to_pfns(struct frame_vector *vec);
+
+static inline unsigned int frame_vector_count(struct frame_vector *vec)
+{
+ return vec->nr_frames;
+}
+
+static inline struct page **frame_vector_pages(struct frame_vector *vec)
+{
+ if (vec->is_pfns) {
+ int err = frame_vector_to_pages(vec);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+ return (struct page **)(vec->ptrs);
+}
+
+static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
+{
+ if (!vec->is_pfns)
+ frame_vector_to_pfns(vec);
+ return (unsigned long *)(vec->ptrs);
+}
+
+#endif /* _MEDIA_FRAME_VECTOR_H */
diff --git a/include/media/fwht-ctrls.h b/include/media/fwht-ctrls.h
deleted file mode 100644
index 615027410e47..000000000000
--- a/include/media/fwht-ctrls.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the FWHT state controls for use with stateless FWHT
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _FWHT_CTRLS_H_
-#define _FWHT_CTRLS_H_
-
-#define V4L2_CTRL_TYPE_FWHT_PARAMS 0x0105
-
-#define V4L2_CID_MPEG_VIDEO_FWHT_PARAMS (V4L2_CID_MPEG_BASE + 292)
-
-struct v4l2_ctrl_fwht_params {
- __u64 backward_ref_ts;
- __u32 version;
- __u32 width;
- __u32 height;
- __u32 flags;
- __u32 colorspace;
- __u32 xfer_func;
- __u32 ycbcr_enc;
- __u32 quantization;
-};
-
-
-#endif
diff --git a/include/media/h264-ctrls.h b/include/media/h264-ctrls.h
deleted file mode 100644
index e877bf1d537c..000000000000
--- a/include/media/h264-ctrls.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the H.264 state controls for use with stateless H.264
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _H264_CTRLS_H_
-#define _H264_CTRLS_H_
-
-#include <linux/videodev2.h>
-
-/* Our pixel format isn't stable at the moment */
-#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
-
-/*
- * This is put insanely high to avoid conflicting with controls that
- * would be added during the phase where those controls are not
- * stable. It should be fixed eventually.
- */
-#define V4L2_CID_MPEG_VIDEO_H264_SPS (V4L2_CID_MPEG_BASE+1000)
-#define V4L2_CID_MPEG_VIDEO_H264_PPS (V4L2_CID_MPEG_BASE+1001)
-#define V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX (V4L2_CID_MPEG_BASE+1002)
-#define V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS (V4L2_CID_MPEG_BASE+1003)
-#define V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS (V4L2_CID_MPEG_BASE+1004)
-#define V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE (V4L2_CID_MPEG_BASE+1005)
-#define V4L2_CID_MPEG_VIDEO_H264_START_CODE (V4L2_CID_MPEG_BASE+1006)
-
-/* enum v4l2_ctrl_type type values */
-#define V4L2_CTRL_TYPE_H264_SPS 0x0110
-#define V4L2_CTRL_TYPE_H264_PPS 0x0111
-#define V4L2_CTRL_TYPE_H264_SCALING_MATRIX 0x0112
-#define V4L2_CTRL_TYPE_H264_SLICE_PARAMS 0x0113
-#define V4L2_CTRL_TYPE_H264_DECODE_PARAMS 0x0114
-
-enum v4l2_mpeg_video_h264_decode_mode {
- V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED,
- V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
-};
-
-enum v4l2_mpeg_video_h264_start_code {
- V4L2_MPEG_VIDEO_H264_START_CODE_NONE,
- V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
-};
-
-#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01
-#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02
-#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04
-#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08
-#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10
-#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20
-
-#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01
-#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02
-#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04
-#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08
-#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10
-#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20
-#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40
-
-struct v4l2_ctrl_h264_sps {
- __u8 profile_idc;
- __u8 constraint_set_flags;
- __u8 level_idc;
- __u8 seq_parameter_set_id;
- __u8 chroma_format_idc;
- __u8 bit_depth_luma_minus8;
- __u8 bit_depth_chroma_minus8;
- __u8 log2_max_frame_num_minus4;
- __u8 pic_order_cnt_type;
- __u8 log2_max_pic_order_cnt_lsb_minus4;
- __u8 max_num_ref_frames;
- __u8 num_ref_frames_in_pic_order_cnt_cycle;
- __s32 offset_for_ref_frame[255];
- __s32 offset_for_non_ref_pic;
- __s32 offset_for_top_to_bottom_field;
- __u16 pic_width_in_mbs_minus1;
- __u16 pic_height_in_map_units_minus1;
- __u32 flags;
-};
-
-#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001
-#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002
-#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004
-#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008
-#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010
-#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020
-#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040
-#define V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT 0x0080
-
-struct v4l2_ctrl_h264_pps {
- __u8 pic_parameter_set_id;
- __u8 seq_parameter_set_id;
- __u8 num_slice_groups_minus1;
- __u8 num_ref_idx_l0_default_active_minus1;
- __u8 num_ref_idx_l1_default_active_minus1;
- __u8 weighted_bipred_idc;
- __s8 pic_init_qp_minus26;
- __s8 pic_init_qs_minus26;
- __s8 chroma_qp_index_offset;
- __s8 second_chroma_qp_index_offset;
- __u16 flags;
-};
-
-struct v4l2_ctrl_h264_scaling_matrix {
- __u8 scaling_list_4x4[6][16];
- __u8 scaling_list_8x8[6][64];
-};
-
-struct v4l2_h264_weight_factors {
- __s16 luma_weight[32];
- __s16 luma_offset[32];
- __s16 chroma_weight[32][2];
- __s16 chroma_offset[32][2];
-};
-
-struct v4l2_h264_pred_weight_table {
- __u16 luma_log2_weight_denom;
- __u16 chroma_log2_weight_denom;
- struct v4l2_h264_weight_factors weight_factors[2];
-};
-
-#define V4L2_H264_SLICE_TYPE_P 0
-#define V4L2_H264_SLICE_TYPE_B 1
-#define V4L2_H264_SLICE_TYPE_I 2
-#define V4L2_H264_SLICE_TYPE_SP 3
-#define V4L2_H264_SLICE_TYPE_SI 4
-
-#define V4L2_H264_SLICE_FLAG_FIELD_PIC 0x01
-#define V4L2_H264_SLICE_FLAG_BOTTOM_FIELD 0x02
-#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x04
-#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x08
-
-struct v4l2_ctrl_h264_slice_params {
- /* Size in bytes, including header */
- __u32 size;
-
- /* Offset in bytes to the start of slice in the OUTPUT buffer. */
- __u32 start_byte_offset;
-
- /* Offset in bits to slice_data() from the beginning of this slice. */
- __u32 header_bit_size;
-
- __u16 first_mb_in_slice;
- __u8 slice_type;
- __u8 pic_parameter_set_id;
- __u8 colour_plane_id;
- __u8 redundant_pic_cnt;
- __u16 frame_num;
- __u16 idr_pic_id;
- __u16 pic_order_cnt_lsb;
- __s32 delta_pic_order_cnt_bottom;
- __s32 delta_pic_order_cnt0;
- __s32 delta_pic_order_cnt1;
-
- struct v4l2_h264_pred_weight_table pred_weight_table;
- /* Size in bits of dec_ref_pic_marking() syntax element. */
- __u32 dec_ref_pic_marking_bit_size;
- /* Size in bits of pic order count syntax. */
- __u32 pic_order_cnt_bit_size;
-
- __u8 cabac_init_idc;
- __s8 slice_qp_delta;
- __s8 slice_qs_delta;
- __u8 disable_deblocking_filter_idc;
- __s8 slice_alpha_c0_offset_div2;
- __s8 slice_beta_offset_div2;
- __u8 num_ref_idx_l0_active_minus1;
- __u8 num_ref_idx_l1_active_minus1;
- __u32 slice_group_change_cycle;
-
- /*
- * Entries on each list are indices into
- * v4l2_ctrl_h264_decode_params.dpb[].
- */
- __u8 ref_pic_list0[32];
- __u8 ref_pic_list1[32];
-
- __u32 flags;
-};
-
-#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01
-#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02
-#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04
-
-struct v4l2_h264_dpb_entry {
- __u64 reference_ts;
- __u16 frame_num;
- __u16 pic_num;
- /* Note that field is indicated by v4l2_buffer.field */
- __s32 top_field_order_cnt;
- __s32 bottom_field_order_cnt;
- __u32 flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */
-};
-
-#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01
-
-struct v4l2_ctrl_h264_decode_params {
- struct v4l2_h264_dpb_entry dpb[16];
- __u16 num_slices;
- __u16 nal_ref_idc;
- __s32 top_field_order_cnt;
- __s32 bottom_field_order_cnt;
- __u32 flags; /* V4L2_H264_DECODE_PARAM_FLAG_* */
-};
-
-#endif
diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
deleted file mode 100644
index 1009cf0891cc..000000000000
--- a/include/media/hevc-ctrls.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the HEVC state controls for use with stateless HEVC
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _HEVC_CTRLS_H_
-#define _HEVC_CTRLS_H_
-
-#include <linux/videodev2.h>
-
-/* The pixel format isn't stable at the moment and will likely be renamed. */
-#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
-
-#define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_MPEG_BASE + 1008)
-#define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_MPEG_BASE + 1009)
-#define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_MPEG_BASE + 1010)
-#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_MPEG_BASE + 1015)
-#define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_MPEG_BASE + 1016)
-
-/* enum v4l2_ctrl_type type values */
-#define V4L2_CTRL_TYPE_HEVC_SPS 0x0120
-#define V4L2_CTRL_TYPE_HEVC_PPS 0x0121
-#define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122
-
-enum v4l2_mpeg_video_hevc_decode_mode {
- V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
- V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
-};
-
-enum v4l2_mpeg_video_hevc_start_code {
- V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
- V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
-};
-
-#define V4L2_HEVC_SLICE_TYPE_B 0
-#define V4L2_HEVC_SLICE_TYPE_P 1
-#define V4L2_HEVC_SLICE_TYPE_I 2
-
-#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0)
-#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1)
-#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2)
-#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3)
-#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4)
-#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5)
-#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6)
-#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7)
-#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8)
-
-/* The controls are not stable at the moment and will likely be reworked. */
-struct v4l2_ctrl_hevc_sps {
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: Sequence parameter set */
- __u16 pic_width_in_luma_samples;
- __u16 pic_height_in_luma_samples;
- __u8 bit_depth_luma_minus8;
- __u8 bit_depth_chroma_minus8;
- __u8 log2_max_pic_order_cnt_lsb_minus4;
- __u8 sps_max_dec_pic_buffering_minus1;
- __u8 sps_max_num_reorder_pics;
- __u8 sps_max_latency_increase_plus1;
- __u8 log2_min_luma_coding_block_size_minus3;
- __u8 log2_diff_max_min_luma_coding_block_size;
- __u8 log2_min_luma_transform_block_size_minus2;
- __u8 log2_diff_max_min_luma_transform_block_size;
- __u8 max_transform_hierarchy_depth_inter;
- __u8 max_transform_hierarchy_depth_intra;
- __u8 pcm_sample_bit_depth_luma_minus1;
- __u8 pcm_sample_bit_depth_chroma_minus1;
- __u8 log2_min_pcm_luma_coding_block_size_minus3;
- __u8 log2_diff_max_min_pcm_luma_coding_block_size;
- __u8 num_short_term_ref_pic_sets;
- __u8 num_long_term_ref_pics_sps;
- __u8 chroma_format_idc;
-
- __u8 padding;
-
- __u64 flags;
-};
-
-#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0)
-#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
-#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
-#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
-#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4)
-#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5)
-#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6)
-#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7)
-#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8)
-#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9)
-#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10)
-#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11)
-#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12)
-#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13)
-#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14)
-#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15)
-#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
-#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
-#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
-
-struct v4l2_ctrl_hevc_pps {
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */
- __u8 num_extra_slice_header_bits;
- __s8 init_qp_minus26;
- __u8 diff_cu_qp_delta_depth;
- __s8 pps_cb_qp_offset;
- __s8 pps_cr_qp_offset;
- __u8 num_tile_columns_minus1;
- __u8 num_tile_rows_minus1;
- __u8 column_width_minus1[20];
- __u8 row_height_minus1[22];
- __s8 pps_beta_offset_div2;
- __s8 pps_tc_offset_div2;
- __u8 log2_parallel_merge_level_minus2;
-
- __u8 padding[4];
- __u64 flags;
-};
-
-#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE 0x01
-#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER 0x02
-#define V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR 0x03
-
-#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16
-
-struct v4l2_hevc_dpb_entry {
- __u64 timestamp;
- __u8 rps;
- __u8 field_pic;
- __u16 pic_order_cnt[2];
- __u8 padding[2];
-};
-
-struct v4l2_hevc_pred_weight_table {
- __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
- __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
-
- __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
- __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
-
- __u8 padding[6];
-
- __u8 luma_log2_weight_denom;
- __s8 delta_chroma_log2_weight_denom;
-};
-
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
-
-struct v4l2_ctrl_hevc_slice_params {
- __u32 bit_size;
- __u32 data_bit_offset;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
- __u8 nal_unit_type;
- __u8 nuh_temporal_id_plus1;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
- __u8 slice_type;
- __u8 colour_plane_id;
- __u16 slice_pic_order_cnt;
- __u8 num_ref_idx_l0_active_minus1;
- __u8 num_ref_idx_l1_active_minus1;
- __u8 collocated_ref_idx;
- __u8 five_minus_max_num_merge_cand;
- __s8 slice_qp_delta;
- __s8 slice_cb_qp_offset;
- __s8 slice_cr_qp_offset;
- __s8 slice_act_y_qp_offset;
- __s8 slice_act_cb_qp_offset;
- __s8 slice_act_cr_qp_offset;
- __s8 slice_beta_offset_div2;
- __s8 slice_tc_offset_div2;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
- __u8 pic_struct;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
- __u8 num_active_dpb_entries;
- __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
-
- __u8 num_rps_poc_st_curr_before;
- __u8 num_rps_poc_st_curr_after;
- __u8 num_rps_poc_lt_curr;
-
- __u8 padding;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
- struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
- struct v4l2_hevc_pred_weight_table pred_weight_table;
-
- __u64 flags;
-};
-
-#endif
diff --git a/include/media/i2c/adv7343.h b/include/media/i2c/adv7343.h
index e4142b1ef8cd..d35d3e925795 100644
--- a/include/media/i2c/adv7343.h
+++ b/include/media/i2c/adv7343.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ADV7343 header file
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef ADV7343_H
@@ -21,7 +13,7 @@
#define ADV7343_SVIDEO_ID (2)
/**
- * adv7343_power_mode - power mode configuration.
+ * struct adv7343_power_mode - power mode configuration.
* @sleep_mode: on enable the current consumption is reduced to micro ampere
* level. All DACs and the internal PLL circuit are disabled.
* Registers can be read from and written in sleep mode.
diff --git a/include/media/i2c/adv7393.h b/include/media/i2c/adv7393.h
index b28edf351842..c73b36321d06 100644
--- a/include/media/i2c/adv7393.h
+++ b/include/media/i2c/adv7393.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ADV7393 header file
*
@@ -7,15 +8,6 @@
* Based on ADV7343 driver,
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef ADV7393_H
diff --git a/include/media/i2c/ir-kbd-i2c.h b/include/media/i2c/ir-kbd-i2c.h
index 9f47d6a48cff..0b58f8b9e7a4 100644
--- a/include/media/i2c/ir-kbd-i2c.h
+++ b/include/media/i2c/ir-kbd-i2c.h
@@ -35,6 +35,7 @@ enum ir_kbd_get_key_fn {
IR_KBD_GET_KEY_PIXELVIEW,
IR_KBD_GET_KEY_HAUP,
IR_KBD_GET_KEY_KNC1,
+ IR_KBD_GET_KEY_GENIATECH,
IR_KBD_GET_KEY_FUSIONHDTV,
IR_KBD_GET_KEY_HAUP_XVR,
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS,
diff --git a/include/media/i2c/m5mols.h b/include/media/i2c/m5mols.h
index 9cec5a09e125..a56ae353c891 100644
--- a/include/media/i2c/m5mols.h
+++ b/include/media/i2c/m5mols.h
@@ -14,15 +14,11 @@
/**
* struct m5mols_platform_data - platform data for M-5MOLS driver
- * @gpio_reset: GPIO driving the reset pin of M-5MOLS
- * @reset_polarity: active state for gpio_reset pin, 0 or 1
* @set_power: an additional callback to the board setup code
* to be called after enabling and before disabling
* the sensor's supply regulators
*/
struct m5mols_platform_data {
- int gpio_reset;
- u8 reset_polarity;
int (*set_power)(struct device *dev, int on);
};
diff --git a/include/media/i2c/mt9p031.h b/include/media/i2c/mt9p031.h
index 7c29c53aa988..f933cd0be8e5 100644
--- a/include/media/i2c/mt9p031.h
+++ b/include/media/i2c/mt9p031.h
@@ -10,6 +10,7 @@ struct v4l2_subdev;
* @target_freq: Pixel clock frequency
*/
struct mt9p031_platform_data {
+ unsigned int pixclk_pol:1;
int ext_freq;
int target_freq;
};
diff --git a/include/media/i2c/mt9t112.h b/include/media/i2c/mt9t112.h
index cc80d5cc2104..825b4a169da8 100644
--- a/include/media/i2c/mt9t112.h
+++ b/include/media/i2c/mt9t112.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 */
/* mt9t112 Camera
*
* Copyright (C) 2009 Renesas Solutions Corp.
@@ -14,7 +14,7 @@ struct mt9t112_pll_divider {
};
/**
- * mt9t112_platform_data - mt9t112 driver interface
+ * struct mt9t112_platform_data - mt9t112 driver interface
* @flags: Sensor media bus configuration.
* @divider: Sensor PLL configuration
*/
diff --git a/include/media/i2c/noon010pc30.h b/include/media/i2c/noon010pc30.h
index a035d2d9a564..1880dad25cf0 100644
--- a/include/media/i2c/noon010pc30.h
+++ b/include/media/i2c/noon010pc30.h
@@ -10,15 +10,12 @@
#define NOON010PC30_H
/**
+ * struct noon010pc30_platform_data - platform data
* @clk_rate: the clock frequency in Hz
- * @gpio_nreset: GPIO driving nRESET pin
- * @gpio_nstby: GPIO driving nSTBY pin
*/
struct noon010pc30_platform_data {
unsigned long clk_rate;
- int gpio_nreset;
- int gpio_nstby;
};
#endif /* NOON010PC30_H */
diff --git a/include/media/i2c/ov2659.h b/include/media/i2c/ov2659.h
index 4216adc1ede2..c9ea318a8fc3 100644
--- a/include/media/i2c/ov2659.h
+++ b/include/media/i2c/ov2659.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Omnivision OV2659 CMOS Image Sensor driver
*
@@ -5,19 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef OV2659_H
diff --git a/include/media/i2c/ov772x.h b/include/media/i2c/ov772x.h
index a1702d420087..26f363ea4001 100644
--- a/include/media/i2c/ov772x.h
+++ b/include/media/i2c/ov772x.h
@@ -46,7 +46,7 @@ struct ov772x_edge_ctrl {
}
/**
- * ov772x_camera_info - ov772x driver interface structure
+ * struct ov772x_camera_info - ov772x driver interface structure
* @flags: Sensor configuration flags
* @edgectrl: Sensor edge control
*/
diff --git a/include/media/i2c/s5c73m3.h b/include/media/i2c/s5c73m3.h
index ccb9e5448762..a51f1025ba1c 100644
--- a/include/media/i2c/s5c73m3.h
+++ b/include/media/i2c/s5c73m3.h
@@ -35,6 +35,7 @@ struct s5c73m3_gpio {
* @mclk_frequency: sensor's master clock frequency in Hz
* @gpio_reset: GPIO driving RESET pin
* @gpio_stby: GPIO driving STBY pin
+ * @bus_type: bus type
* @nlanes: maximum number of MIPI-CSI lanes used
* @horiz_flip: default horizontal image flip value, non zero to enable
* @vert_flip: default vertical image flip value, non zero to enable
diff --git a/include/media/i2c/s5k4ecgx.h b/include/media/i2c/s5k4ecgx.h
index fccb7be8ed8f..92202eb35249 100644
--- a/include/media/i2c/s5k4ecgx.h
+++ b/include/media/i2c/s5k4ecgx.h
@@ -11,7 +11,7 @@
/**
* struct s5k4ecgx_gpio - data structure describing a GPIO
- * @gpio : GPIO number
+ * @gpio: GPIO number
* @level: indicates active state of the @gpio
*/
struct s5k4ecgx_gpio {
@@ -20,9 +20,9 @@ struct s5k4ecgx_gpio {
};
/**
- * struct ss5k4ecgx_platform_data- s5k4ecgx driver platform data
+ * struct s5k4ecgx_platform_data - s5k4ecgx driver platform data
* @gpio_reset: GPIO driving RESET pin
- * @gpio_stby : GPIO driving STBY pin
+ * @gpio_stby: GPIO driving STBY pin
*/
struct s5k4ecgx_platform_data {
diff --git a/include/media/i2c/s5k6aa.h b/include/media/i2c/s5k6aa.h
index fd78e85e8b78..eb3444d8b731 100644
--- a/include/media/i2c/s5k6aa.h
+++ b/include/media/i2c/s5k6aa.h
@@ -28,6 +28,7 @@ struct s5k6aa_gpio {
* @mclk_frequency: sensor's master clock frequency in Hz
* @gpio_reset: GPIO driving RESET pin
* @gpio_stby: GPIO driving STBY pin
+ * @bus_type: bus type
* @nlanes: maximum number of MIPI-CSI lanes used
* @horiz_flip: default horizontal image flip value, non zero to enable
* @vert_flip: default vertical image flip value, non zero to enable
diff --git a/include/media/i2c/smiapp.h b/include/media/i2c/smiapp.h
deleted file mode 100644
index 80f8251d87a3..000000000000
--- a/include/media/i2c/smiapp.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * include/media/i2c/smiapp.h
- *
- * Generic driver for SMIA/SMIA++ compliant camera modules
- *
- * Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@iki.fi>
- */
-
-#ifndef __SMIAPP_H_
-#define __SMIAPP_H_
-
-#include <media/v4l2-subdev.h>
-
-#define SMIAPP_NAME "smiapp"
-
-#define SMIAPP_DFL_I2C_ADDR (0x20 >> 1) /* Default I2C Address */
-#define SMIAPP_ALT_I2C_ADDR (0x6e >> 1) /* Alternate I2C Address */
-
-#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_CLOCK 0
-#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE 1
-#define SMIAPP_CSI_SIGNALLING_MODE_CSI2 2
-
-/*
- * Sometimes due to board layout considerations the camera module can be
- * mounted rotated. The typical rotation used is 180 degrees which can be
- * corrected by giving a default H-FLIP and V-FLIP in the sensor readout.
- * FIXME: rotation also changes the bayer pattern.
- */
-enum smiapp_module_board_orient {
- SMIAPP_MODULE_BOARD_ORIENT_0 = 0,
- SMIAPP_MODULE_BOARD_ORIENT_180,
-};
-
-struct smiapp_flash_strobe_parms {
- u8 mode;
- u32 strobe_width_high_us;
- u16 strobe_delay;
- u16 stobe_start_point;
- u8 trigger;
-};
-
-struct smiapp_hwconfig {
- /*
- * Change the cci address if i2c_addr_alt is set.
- * Both default and alternate cci addr need to be present
- */
- unsigned short i2c_addr_dfl; /* Default i2c addr */
- unsigned short i2c_addr_alt; /* Alternate i2c addr */
-
- uint32_t ext_clk; /* sensor external clk */
-
- unsigned int lanes; /* Number of CSI-2 lanes */
- uint32_t csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */
- uint64_t *op_sys_clock;
-
- enum smiapp_module_board_orient module_board_orient;
-
- struct smiapp_flash_strobe_parms *strobe_setup;
-};
-
-#endif /* __SMIAPP_H_ */
diff --git a/include/media/i2c/tvp514x.h b/include/media/i2c/tvp514x.h
index 0c1bb04bdbcb..837efff0a6a0 100644
--- a/include/media/i2c/tvp514x.h
+++ b/include/media/i2c/tvp514x.h
@@ -29,10 +29,7 @@
#define PAL_NUM_ACTIVE_PIXELS (720)
#define PAL_NUM_ACTIVE_LINES (576)
-/**
- * enum tvp514x_input - enum for different decoder input pin
- * configuration.
- */
+/* enum for different decoder input pin configuration */
enum tvp514x_input {
/*
* CVBS input selection
@@ -69,11 +66,7 @@ enum tvp514x_input {
INPUT_INVALID
};
-/**
- * enum tvp514x_output - enum for output format
- * supported.
- *
- */
+/* enum for output format supported. */
enum tvp514x_output {
OUTPUT_10BIT_422_EMBEDDED_SYNC = 0,
OUTPUT_20BIT_422_SEPERATE_SYNC,
diff --git a/include/media/i2c/tw9910.h b/include/media/i2c/tw9910.h
index 92d31bd1afe6..77da94f909e3 100644
--- a/include/media/i2c/tw9910.h
+++ b/include/media/i2c/tw9910.h
@@ -13,9 +13,7 @@
#ifndef __TW9910_H__
#define __TW9910_H__
-/**
- * tw9910_mpout_pin - MPOUT (multi-purpose output) pin functions
- */
+/* MPOUT (multi-purpose output) pin functions */
enum tw9910_mpout_pin {
TW9910_MPO_VLOSS,
TW9910_MPO_HLOCK,
@@ -28,10 +26,10 @@ enum tw9910_mpout_pin {
};
/**
- * tw9910_video_info - tw9910 driver interface structure
+ * struct tw9910_video_info - tw9910 driver interface structure
* @buswidth: Parallel data bus width (8 or 16).
* @mpout: Selected function of MPOUT (multi-purpose output) pin.
- * See &enum tw9910_mpout_pin
+ * See enum tw9910_mpout_pin
*/
struct tw9910_video_info {
unsigned long buswidth;
diff --git a/include/media/i2c/wm8775.h b/include/media/i2c/wm8775.h
index 83675817639e..6ccdeb3817ab 100644
--- a/include/media/i2c/wm8775.h
+++ b/include/media/i2c/wm8775.h
@@ -23,7 +23,7 @@
struct wm8775_platform_data {
/*
- * FIXME: Instead, we should parametrize the params
+ * FIXME: Instead, we should parameterize the params
* that need different settings between ivtv, pvrusb2, and Nova-S
*/
bool is_nova_s;
diff --git a/include/media/media-dev-allocator.h b/include/media/media-dev-allocator.h
index b35ea6062596..2ab54d426c64 100644
--- a/include/media/media-dev-allocator.h
+++ b/include/media/media-dev-allocator.h
@@ -19,7 +19,7 @@
struct usb_device;
-#if defined(CONFIG_MEDIA_CONTROLLER) && defined(CONFIG_USB)
+#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB)
/**
* media_device_usb_allocate() - Allocate and return struct &media device
*
diff --git a/include/media/media-device.h b/include/media/media-device.h
index fa0895430720..86716ee7cc6c 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -13,12 +13,13 @@
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <media/media-devnode.h>
#include <media/media-entity.h>
struct ida;
-struct device;
struct media_device;
/**
@@ -128,7 +129,7 @@ struct media_device_ops {
*
* Use-case: find tuner entity connected to the decoder
* entity and check if it is available, and activate the
- * the link between them from @enable_source and deactivate
+ * link between them from @enable_source and deactivate
* from @disable_source.
*
* .. note::
@@ -181,8 +182,7 @@ struct media_device {
atomic_t request_id;
};
-/* We don't need to include pci.h or usb.h here */
-struct pci_dev;
+/* We don't need to include usb.h here */
struct usb_device;
#ifdef CONFIG_MEDIA_CONTROLLER
@@ -192,21 +192,6 @@ struct usb_device;
#define MEDIA_DEV_NOTIFY_POST_LINK_CH 1
/**
- * media_entity_enum_init - Initialise an entity enumeration
- *
- * @ent_enum: Entity enumeration to be initialised
- * @mdev: The related media device
- *
- * Return: zero on success or a negative error code.
- */
-static inline __must_check int media_entity_enum_init(
- struct media_entity_enum *ent_enum, struct media_device *mdev)
-{
- return __media_entity_enum_init(ent_enum,
- mdev->entity_internal_idx_max + 1);
-}
-
-/**
* media_device_init() - Initializes a media device element
*
* @mdev: pointer to struct &media_device
@@ -219,6 +204,15 @@ static inline __must_check int media_entity_enum_init(
* So drivers need to first initialize the media device, register any entity
* within the media device, create pad to pad links and then finally register
* the media device by calling media_device_register() as a final step.
+ *
+ * The caller is responsible for initializing the media device before
+ * registration. The following fields must be set:
+ *
+ * - dev must point to the parent device
+ * - model must be filled with the device model name
+ *
+ * The bus_info field is set by media_device_init() for PCI and platform devices
+ * if the field begins with '\0'.
*/
void media_device_init(struct media_device *mdev);
@@ -243,28 +237,25 @@ void media_device_cleanup(struct media_device *mdev);
* The caller is responsible for initializing the &media_device structure
* before registration. The following fields of &media_device must be set:
*
- * - &media_entity.dev must point to the parent device (usually a &pci_dev,
- * &usb_interface or &platform_device instance).
- *
- * - &media_entity.model must be filled with the device model name as a
+ * - &media_device.model must be filled with the device model name as a
* NUL-terminated UTF-8 string. The device/model revision must not be
* stored in this field.
*
* The following fields are optional:
*
- * - &media_entity.serial is a unique serial number stored as a
+ * - &media_device.serial is a unique serial number stored as a
* NUL-terminated ASCII string. The field is big enough to store a GUID
* in text form. If the hardware doesn't provide a unique serial number
* this field must be left empty.
*
- * - &media_entity.bus_info represents the location of the device in the
+ * - &media_device.bus_info represents the location of the device in the
* system as a NUL-terminated ASCII string. For PCI/PCIe devices
- * &media_entity.bus_info must be set to "PCI:" (or "PCIe:") followed by
+ * &media_device.bus_info must be set to "PCI:" (or "PCIe:") followed by
* the value of pci_name(). For USB devices,the usb_make_path() function
* must be used. This field is used by applications to distinguish between
* otherwise identical devices that don't provide a serial number.
*
- * - &media_entity.hw_revision is the hardware device revision in a
+ * - &media_device.hw_revision is the hardware device revision in a
* driver-specific format. When possible the revision should be formatted
* with the KERNEL_VERSION() macro.
*
@@ -496,4 +487,27 @@ static inline void __media_device_usb_init(struct media_device *mdev,
#define media_device_usb_init(mdev, udev, name) \
__media_device_usb_init(mdev, udev, name, KBUILD_MODNAME)
+/**
+ * media_set_bus_info() - Set bus_info field
+ *
+ * @bus_info: Variable where to write the bus info (char array)
+ * @bus_info_size: Length of the bus_info
+ * @dev: Related struct device
+ *
+ * Sets bus information based on &dev. This is currently done for PCI and
+ * platform devices. dev is required to be non-NULL for this to happen.
+ *
+ * This function is not meant to be called from drivers.
+ */
+static inline void
+media_set_bus_info(char *bus_info, size_t bus_info_size, struct device *dev)
+{
+ if (!dev)
+ strscpy(bus_info, "no bus info", bus_info_size);
+ else if (dev_is_platform(dev))
+ snprintf(bus_info, bus_info_size, "platform:%s", dev_name(dev));
+ else if (dev_is_pci(dev))
+ snprintf(bus_info, bus_info_size, "PCI:%s", dev_name(dev));
+}
+
#endif
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 6393842c6b21..d27c1c646c28 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -39,7 +39,7 @@ struct media_device;
* @poll: pointer to the function that implements poll() syscall
* @ioctl: pointer to the function that implements ioctl() syscall
* @compat_ioctl: pointer to the function that will handle 32 bits userspace
- * calls to the the ioctl() syscall on a Kernel compiled with 64 bits.
+ * calls to the ioctl() syscall on a Kernel compiled with 64 bits.
* @open: pointer to the function that implements open() syscall
* @release: pointer to the function that will release the resources allocated
* by the @open function.
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 8cb2c504a05c..28c9de8a1f34 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -13,10 +13,12 @@
#include <linux/bitmap.h>
#include <linux/bug.h>
+#include <linux/container_of.h>
#include <linux/fwnode.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/media.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
/* Enums used internally at the media controller to represent graphs */
@@ -98,12 +100,34 @@ struct media_graph {
/**
* struct media_pipeline - Media pipeline related information
*
- * @streaming_count: Streaming start count - streaming stop count
- * @graph: Media graph walk during pipeline start / stop
+ * @allocated: Media pipeline allocated and freed by the framework
+ * @mdev: The media device the pipeline is part of
+ * @pads: List of media_pipeline_pad
+ * @start_count: Media pipeline start - stop count
*/
struct media_pipeline {
- int streaming_count;
- struct media_graph graph;
+ bool allocated;
+ struct media_device *mdev;
+ struct list_head pads;
+ int start_count;
+};
+
+/**
+ * struct media_pipeline_pad - A pad part of a media pipeline
+ *
+ * @list: Entry in the media_pad pads list
+ * @pipe: The media_pipeline that the pad is part of
+ * @pad: The media pad
+ *
+ * This structure associate a pad with a media pipeline. Instances of
+ * media_pipeline_pad are created by media_pipeline_start() when it builds the
+ * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop()
+ * removes the entries from the list and deletes them.
+ */
+struct media_pipeline_pad {
+ struct list_head list;
+ struct media_pipeline *pipe;
+ struct media_pad *pad;
};
/**
@@ -155,7 +179,7 @@ struct media_link {
* uniquely identified by the pad number.
* @PAD_SIGNAL_ANALOG:
* The pad contains an analog signal. It can be Radio Frequency,
- * Intermediate Frequency, a baseband signal or sub-cariers.
+ * Intermediate Frequency, a baseband signal or sub-carriers.
* Tuner inputs, IF-PLL demodulators, composite and s-video signals
* should use it.
* @PAD_SIGNAL_DV:
@@ -185,6 +209,8 @@ enum media_pad_signal_type {
* @flags: Pad flags, as defined in
* :ref:`include/uapi/linux/media.h <media_header>`
* (seek for ``MEDIA_PAD_FL_*``)
+ * @pipe: Pipeline this pad belongs to. Use media_entity_pipeline() to
+ * access this field.
*/
struct media_pad {
struct media_gobj graph_obj; /* must be first field in struct */
@@ -192,6 +218,12 @@ struct media_pad {
u16 index;
enum media_pad_signal_type sig_type;
unsigned long flags;
+
+ /*
+ * The fields below are private, and should only be accessed via
+ * appropriate functions.
+ */
+ struct media_pipeline *pipe;
};
/**
@@ -205,6 +237,14 @@ struct media_pad {
* @link_validate: Return whether a link is valid from the entity point of
* view. The media_pipeline_start() function
* validates all links by calling this operation. Optional.
+ * @has_pad_interdep: Return whether a two pads inside the entity are
+ * interdependent. If two pads are interdependent they are
+ * part of the same pipeline and enabling one of the pads
+ * means that the other pad will become "locked" and
+ * doesn't allow configuration changes. pad0 and pad1 are
+ * guaranteed to not both be sinks or sources.
+ * Optional: If the operation isn't implemented all pads
+ * will be considered as interdependent.
*
* .. note::
*
@@ -212,11 +252,14 @@ struct media_pad {
* mutex held.
*/
struct media_entity_operations {
- int (*get_fwnode_pad)(struct fwnode_endpoint *endpoint);
+ int (*get_fwnode_pad)(struct media_entity *entity,
+ struct fwnode_endpoint *endpoint);
int (*link_setup)(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
int (*link_validate)(struct media_link *link);
+ bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1);
};
/**
@@ -266,9 +309,7 @@ enum media_entity_type {
* @pads: Pads array with the size defined by @num_pads.
* @links: List of data links.
* @ops: Entity operations.
- * @stream_count: Stream count for the entity.
* @use_count: Use count for the entity.
- * @pipe: Pipeline this entity belongs to.
* @info: Union with devnode information. Kept just for backward
* compatibility.
* @info.dev: Contains device major and minor info.
@@ -281,10 +322,9 @@ enum media_entity_type {
*
* .. note::
*
- * @stream_count and @use_count reference counts must never be
- * negative, but are signed integers on purpose: a simple ``WARN_ON(<0)``
- * check can be used to detect reference count bugs that would make them
- * negative.
+ * The @use_count reference count must never be negative, but is a signed
+ * integer on purpose: a simple ``WARN_ON(<0)`` check can be used to detect
+ * reference count bugs that would make it negative.
*/
struct media_entity {
struct media_gobj graph_obj; /* must be first field in struct */
@@ -303,11 +343,8 @@ struct media_entity {
const struct media_entity_operations *ops;
- int stream_count;
int use_count;
- struct media_pipeline *pipe;
-
union {
struct {
u32 major;
@@ -317,6 +354,18 @@ struct media_entity {
};
/**
+ * media_entity_for_each_pad - Iterate on all pads in an entity
+ * @entity: The entity the pads belong to
+ * @iter: The iterator pad
+ *
+ * Iterate on all pads in a media entity.
+ */
+#define media_entity_for_each_pad(entity, iter) \
+ for (iter = (entity)->pads; \
+ iter < &(entity)->pads[(entity)->num_pads]; \
+ ++iter)
+
+/**
* struct media_interface - A media interface graph object.
*
* @graph_obj: embedded graph object
@@ -427,15 +476,15 @@ static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity)
}
/**
- * __media_entity_enum_init - Initialise an entity enumeration
+ * media_entity_enum_init - Initialise an entity enumeration
*
* @ent_enum: Entity enumeration to be initialised
- * @idx_max: Maximum number of entities in the enumeration
+ * @mdev: The related media device
*
- * Return: Returns zero on success or a negative error code.
+ * Return: zero on success or a negative error code.
*/
-__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum,
- int idx_max);
+__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum,
+ struct media_device *mdev);
/**
* media_entity_enum_cleanup - Release resources of an entity enumeration
@@ -655,6 +704,10 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
*
* This function must be called during the cleanup phase after unregistering
* the entity (currently, it does nothing).
+ *
+ * Calling media_entity_cleanup() on a media_entity whose memory has been
+ * zeroed but that has not been initialized with media_entity_pad_init() is
+ * valid and is a no-op.
*/
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
static inline void media_entity_cleanup(struct media_entity *entity) {}
@@ -802,7 +855,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags);
* @flags: the requested new link flags
*
* The only configurable property is the %MEDIA_LNK_FL_ENABLED link flag
- * flag to enable/disable a link. Links marked with the
+ * to enable/disable a link. Links marked with the
* %MEDIA_LNK_FL_IMMUTABLE link flag can not be enabled or disabled.
*
* When a link is enabled or disabled, the media framework calls the
@@ -845,7 +898,7 @@ struct media_link *media_entity_find_link(struct media_pad *source,
struct media_pad *sink);
/**
- * media_entity_remote_pad - Find the pad at the remote end of a link
+ * media_pad_remote_pad_first - Find the first pad at the remote end of a link
* @pad: Pad at the local end of the link
*
* Search for a remote pad connected to the given pad by iterating over all
@@ -854,7 +907,135 @@ struct media_link *media_entity_find_link(struct media_pad *source,
* Return: returns a pointer to the pad at the remote end of the first found
* enabled link, or %NULL if no enabled link has been found.
*/
-struct media_pad *media_entity_remote_pad(const struct media_pad *pad);
+struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad);
+
+/**
+ * media_pad_remote_pad_unique - Find a remote pad connected to a pad
+ * @pad: The pad
+ *
+ * Search for and return a remote pad connected to @pad through an enabled
+ * link. If multiple (or no) remote pads are found, an error is returned.
+ *
+ * The uniqueness constraint makes this helper function suitable for entities
+ * that support a single active source at a time on a given pad.
+ *
+ * Return: A pointer to the remote pad, or one of the following error pointers
+ * if an error occurs:
+ *
+ * * -ENOTUNIQ - Multiple links are enabled
+ * * -ENOLINK - No connected pad found
+ */
+struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad);
+
+/**
+ * media_entity_remote_pad_unique - Find a remote pad connected to an entity
+ * @entity: The entity
+ * @type: The type of pad to find (MEDIA_PAD_FL_SINK or MEDIA_PAD_FL_SOURCE)
+ *
+ * Search for and return a remote pad of @type connected to @entity through an
+ * enabled link. If multiple (or no) remote pads match these criteria, an error
+ * is returned.
+ *
+ * The uniqueness constraint makes this helper function suitable for entities
+ * that support a single active source or sink at a time.
+ *
+ * Return: A pointer to the remote pad, or one of the following error pointers
+ * if an error occurs:
+ *
+ * * -ENOTUNIQ - Multiple links are enabled
+ * * -ENOLINK - No connected pad found
+ */
+struct media_pad *
+media_entity_remote_pad_unique(const struct media_entity *entity,
+ unsigned int type);
+
+/**
+ * media_entity_remote_source_pad_unique - Find a remote source pad connected to
+ * an entity
+ * @entity: The entity
+ *
+ * Search for and return a remote source pad connected to @entity through an
+ * enabled link. If multiple (or no) remote pads match these criteria, an error
+ * is returned.
+ *
+ * The uniqueness constraint makes this helper function suitable for entities
+ * that support a single active source at a time.
+ *
+ * Return: A pointer to the remote pad, or one of the following error pointers
+ * if an error occurs:
+ *
+ * * -ENOTUNIQ - Multiple links are enabled
+ * * -ENOLINK - No connected pad found
+ */
+static inline struct media_pad *
+media_entity_remote_source_pad_unique(const struct media_entity *entity)
+{
+ return media_entity_remote_pad_unique(entity, MEDIA_PAD_FL_SOURCE);
+}
+
+/**
+ * media_pad_is_streaming - Test if a pad is part of a streaming pipeline
+ * @pad: The pad
+ *
+ * Return: True if the pad is part of a pipeline started with the
+ * media_pipeline_start() function, false otherwise.
+ */
+static inline bool media_pad_is_streaming(const struct media_pad *pad)
+{
+ return pad->pipe;
+}
+
+/**
+ * media_entity_is_streaming - Test if an entity is part of a streaming pipeline
+ * @entity: The entity
+ *
+ * Return: True if the entity is part of a pipeline started with the
+ * media_pipeline_start() function, false otherwise.
+ */
+static inline bool media_entity_is_streaming(const struct media_entity *entity)
+{
+ struct media_pad *pad;
+
+ media_entity_for_each_pad(entity, pad) {
+ if (media_pad_is_streaming(pad))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * media_entity_pipeline - Get the media pipeline an entity is part of
+ * @entity: The entity
+ *
+ * DEPRECATED: use media_pad_pipeline() instead.
+ *
+ * This function returns the media pipeline that an entity has been associated
+ * with when constructing the pipeline with media_pipeline_start(). The pointer
+ * remains valid until media_pipeline_stop() is called.
+ *
+ * In general, entities can be part of multiple pipelines, when carrying
+ * multiple streams (either on different pads, or on the same pad using
+ * multiplexed streams). This function is to be used only for entities that
+ * do not support multiple pipelines.
+ *
+ * Return: The media_pipeline the entity is part of, or NULL if the entity is
+ * not part of any pipeline.
+ */
+struct media_pipeline *media_entity_pipeline(struct media_entity *entity);
+
+/**
+ * media_pad_pipeline - Get the media pipeline a pad is part of
+ * @pad: The pad
+ *
+ * This function returns the media pipeline that a pad has been associated
+ * with when constructing the pipeline with media_pipeline_start(). The pointer
+ * remains valid until media_pipeline_stop() is called.
+ *
+ * Return: The media_pipeline the pad is part of, or NULL if the pad is
+ * not part of any pipeline.
+ */
+struct media_pipeline *media_pad_pipeline(struct media_pad *pad);
/**
* media_entity_get_fwnode_pad - Get pad number from fwnode
@@ -884,6 +1065,11 @@ int media_entity_get_fwnode_pad(struct media_entity *entity,
*
* @graph: Media graph structure that will be used to walk the graph
* @mdev: Pointer to the &media_device that contains the object
+ *
+ * The caller is required to hold the media_device graph_mutex during the graph
+ * walk until the graph state is released.
+ *
+ * Returns zero on success or a negative error code otherwise.
*/
__must_check int media_graph_walk_init(
struct media_graph *graph, struct media_device *mdev);
@@ -929,53 +1115,66 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph);
/**
* media_pipeline_start - Mark a pipeline as streaming
- * @entity: Starting entity
- * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ * @pad: Starting pad
+ * @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
- * Mark all entities connected to a given entity through enabled links, either
+ * Mark all pads connected to a given pad through enabled links, either
* directly or indirectly, as streaming. The given pipeline object is assigned
- * to every entity in the pipeline and stored in the media_entity pipe field.
+ * to every pad in the pipeline and stored in the media_pad pipe field.
*
* Calls to this function can be nested, in which case the same number of
* media_pipeline_stop() calls will be required to stop streaming. The
* pipeline pointer must be identical for all nested calls to
* media_pipeline_start().
*/
-__must_check int media_pipeline_start(struct media_entity *entity,
+__must_check int media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe);
/**
* __media_pipeline_start - Mark a pipeline as streaming
*
- * @entity: Starting entity
- * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ * @pad: Starting pad
+ * @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
* ..note:: This is the non-locking version of media_pipeline_start()
*/
-__must_check int __media_pipeline_start(struct media_entity *entity,
+__must_check int __media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe);
/**
* media_pipeline_stop - Mark a pipeline as not streaming
- * @entity: Starting entity
+ * @pad: Starting pad
*
- * Mark all entities connected to a given entity through enabled links, either
- * directly or indirectly, as not streaming. The media_entity pipe field is
+ * Mark all pads connected to a given pads through enabled links, either
+ * directly or indirectly, as not streaming. The media_pad pipe field is
* reset to %NULL.
*
* If multiple calls to media_pipeline_start() have been made, the same
* number of calls to this function are required to mark the pipeline as not
* streaming.
*/
-void media_pipeline_stop(struct media_entity *entity);
+void media_pipeline_stop(struct media_pad *pad);
/**
* __media_pipeline_stop - Mark a pipeline as not streaming
*
- * @entity: Starting entity
+ * @pad: Starting pad
*
* .. note:: This is the non-locking version of media_pipeline_stop()
*/
-void __media_pipeline_stop(struct media_entity *entity);
+void __media_pipeline_stop(struct media_pad *pad);
+
+/**
+ * media_pipeline_alloc_start - Mark a pipeline as streaming
+ * @pad: Starting pad
+ *
+ * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead
+ * of working on a given pipeline the function will use an existing pipeline if
+ * the pad is already part of a pipeline, or allocate a new pipeline.
+ *
+ * Calls to media_pipeline_alloc_start() must be matched with
+ * media_pipeline_stop().
+ */
+__must_check int media_pipeline_alloc_start(struct media_pad *pad);
/**
* media_devnode_create() - creates and initializes a device node interface
@@ -1010,7 +1209,6 @@ __must_check media_devnode_create(struct media_device *mdev,
* removed.
*/
void media_devnode_remove(struct media_intf_devnode *devnode);
-struct media_link *
/**
* media_create_intf_link() - creates a link between an entity and an interface
@@ -1041,6 +1239,7 @@ struct media_link *
* the interface and media_device_register_entity() should be called for the
* interface that will be part of the link.
*/
+struct media_link *
__must_check media_create_intf_link(struct media_entity *entity,
struct media_interface *intf,
u32 flags);
@@ -1101,4 +1300,53 @@ void media_remove_intf_links(struct media_interface *intf);
(((entity)->ops && (entity)->ops->operation) ? \
(entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD)
+/**
+ * media_create_ancillary_link() - create an ancillary link between two
+ * instances of &media_entity
+ *
+ * @primary: pointer to the primary &media_entity
+ * @ancillary: pointer to the ancillary &media_entity
+ *
+ * Create an ancillary link between two entities, indicating that they
+ * represent two connected pieces of hardware that form a single logical unit.
+ * A typical example is a camera lens controller being linked to the sensor that
+ * it is supporting.
+ *
+ * The function sets both MEDIA_LNK_FL_ENABLED and MEDIA_LNK_FL_IMMUTABLE for
+ * the new link.
+ */
+struct media_link *
+media_create_ancillary_link(struct media_entity *primary,
+ struct media_entity *ancillary);
+
+/**
+ * __media_entity_next_link() - Iterate through a &media_entity's links
+ *
+ * @entity: pointer to the &media_entity
+ * @link: pointer to a &media_link to hold the iterated values
+ * @link_type: one of the MEDIA_LNK_FL_LINK_TYPE flags
+ *
+ * Return the next link against an entity matching a specific link type. This
+ * allows iteration through an entity's links whilst guaranteeing all of the
+ * returned links are of the given type.
+ */
+struct media_link *__media_entity_next_link(struct media_entity *entity,
+ struct media_link *link,
+ unsigned long link_type);
+
+/**
+ * for_each_media_entity_data_link() - Iterate through an entity's data links
+ *
+ * @entity: pointer to the &media_entity
+ * @link: pointer to a &media_link to hold the iterated values
+ *
+ * Iterate over a &media_entity's data links
+ */
+#define for_each_media_entity_data_link(entity, link) \
+ for (link = __media_entity_next_link(entity, NULL, \
+ MEDIA_LNK_FL_DATA_LINK); \
+ link; \
+ link = __media_entity_next_link(entity, link, \
+ MEDIA_LNK_FL_DATA_LINK))
+
#endif
diff --git a/include/media/mipi-csi2.h b/include/media/mipi-csi2.h
new file mode 100644
index 000000000000..c3d8f12234b1
--- /dev/null
+++ b/include/media/mipi-csi2.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MIPI CSI-2 Data Types
+ *
+ * Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef _MEDIA_MIPI_CSI2_H
+#define _MEDIA_MIPI_CSI2_H
+
+/* Short packet data types */
+#define MIPI_CSI2_DT_FS 0x00
+#define MIPI_CSI2_DT_FE 0x01
+#define MIPI_CSI2_DT_LS 0x02
+#define MIPI_CSI2_DT_LE 0x03
+#define MIPI_CSI2_DT_GENERIC_SHORT(n) (0x08 + (n)) /* 0..7 */
+
+/* Long packet data types */
+#define MIPI_CSI2_DT_NULL 0x10
+#define MIPI_CSI2_DT_BLANKING 0x11
+#define MIPI_CSI2_DT_EMBEDDED_8B 0x12
+#define MIPI_CSI2_DT_YUV420_8B 0x18
+#define MIPI_CSI2_DT_YUV420_10B 0x19
+#define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a
+#define MIPI_CSI2_DT_YUV420_8B_CS 0x1c
+#define MIPI_CSI2_DT_YUV420_10B_CS 0x1d
+#define MIPI_CSI2_DT_YUV422_8B 0x1e
+#define MIPI_CSI2_DT_YUV422_10B 0x1f
+#define MIPI_CSI2_DT_RGB444 0x20
+#define MIPI_CSI2_DT_RGB555 0x21
+#define MIPI_CSI2_DT_RGB565 0x22
+#define MIPI_CSI2_DT_RGB666 0x23
+#define MIPI_CSI2_DT_RGB888 0x24
+#define MIPI_CSI2_DT_RAW28 0x26
+#define MIPI_CSI2_DT_RAW24 0x27
+#define MIPI_CSI2_DT_RAW6 0x28
+#define MIPI_CSI2_DT_RAW7 0x29
+#define MIPI_CSI2_DT_RAW8 0x2a
+#define MIPI_CSI2_DT_RAW10 0x2b
+#define MIPI_CSI2_DT_RAW12 0x2c
+#define MIPI_CSI2_DT_RAW14 0x2d
+#define MIPI_CSI2_DT_RAW16 0x2e
+#define MIPI_CSI2_DT_RAW20 0x2f
+#define MIPI_CSI2_DT_USER_DEFINED(n) (0x30 + (n)) /* 0..7 */
+
+#endif /* _MEDIA_MIPI_CSI2_H */
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
deleted file mode 100644
index 6601455b3d5e..000000000000
--- a/include/media/mpeg2-ctrls.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the MPEG2 state controls for use with stateless MPEG-2
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _MPEG2_CTRLS_H_
-#define _MPEG2_CTRLS_H_
-
-#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
-#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
-
-/* enum v4l2_ctrl_type type values */
-#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103
-#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104
-
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
-
-struct v4l2_mpeg2_sequence {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
- __u16 horizontal_size;
- __u16 vertical_size;
- __u32 vbv_buffer_size;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
- __u16 profile_and_level_indication;
- __u8 progressive_sequence;
- __u8 chroma_format;
-};
-
-struct v4l2_mpeg2_picture {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
- __u8 picture_coding_type;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
- __u8 f_code[2][2];
- __u8 intra_dc_precision;
- __u8 picture_structure;
- __u8 top_field_first;
- __u8 frame_pred_frame_dct;
- __u8 concealment_motion_vectors;
- __u8 q_scale_type;
- __u8 intra_vlc_format;
- __u8 alternate_scan;
- __u8 repeat_first_field;
- __u16 progressive_frame;
-};
-
-struct v4l2_ctrl_mpeg2_slice_params {
- __u32 bit_size;
- __u32 data_bit_offset;
- __u64 backward_ref_ts;
- __u64 forward_ref_ts;
-
- struct v4l2_mpeg2_sequence sequence;
- struct v4l2_mpeg2_picture picture;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
- __u32 quantiser_scale_code;
-};
-
-struct v4l2_ctrl_mpeg2_quantization {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
- __u8 load_intra_quantiser_matrix;
- __u8 load_non_intra_quantiser_matrix;
- __u8 load_chroma_intra_quantiser_matrix;
- __u8 load_chroma_non_intra_quantiser_matrix;
-
- __u8 intra_quantiser_matrix[64];
- __u8 non_intra_quantiser_matrix[64];
- __u8 chroma_intra_quantiser_matrix[64];
- __u8 chroma_non_intra_quantiser_matrix[64];
-};
-
-#endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 1f695d9c200a..803349599c27 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -59,7 +59,6 @@ enum rc_filter_type {
* @rc: rcdev for this lirc chardev
* @carrier_low: when setting the carrier range, first the low end must be
* set with an ioctl and then the high end with another ioctl
- * @send_timeout_reports: report timeouts in lirc raw IR.
* @rawir: queue for incoming raw IR
* @scancodes: queue for incoming decoded scancodes
* @wait_poll: poll struct for lirc device
@@ -72,7 +71,6 @@ struct lirc_fh {
struct list_head list;
struct rc_dev *rc;
int carrier_low;
- bool send_timeout_reports;
DECLARE_KFIFO_PTR(rawir, unsigned int);
DECLARE_KFIFO_PTR(scancodes, struct lirc_scancode);
wait_queue_head_t wait_poll;
@@ -128,13 +126,11 @@ struct lirc_fh {
* @timeout: optional time after which device stops sending data
* @min_timeout: minimum timeout supported by device
* @max_timeout: maximum timeout supported by device
- * @rx_resolution : resolution (in ns) of input sampler
- * @tx_resolution: resolution (in ns) of output sampler
+ * @rx_resolution : resolution (in us) of input sampler
+ * @tx_resolution: resolution (in us) of output sampler
* @lirc_dev: lirc device
* @lirc_cdev: lirc char cdev
- * @gap_start: time when gap starts
- * @gap_duration: duration of initial gap
- * @gap: true if we're in a gap
+ * @gap_start: start time for gap after timeout if non-zero
* @lirc_fh_lock: protects lirc_fh list
* @lirc_fh: list of open files
* @registered: set to true by rc_register_device(), false by
@@ -151,13 +147,13 @@ struct lirc_fh {
* @tx_ir: transmit IR
* @s_idle: enable/disable hardware idle mode, upon which,
* device doesn't interrupt host until it sees IR pulses
- * @s_learning_mode: enable wide band receiver used for learning
+ * @s_wideband_receiver: enable wide band receiver used for learning
* @s_carrier_report: enable carrier reports
* @s_filter: set the scancode filter
* @s_wakeup_filter: set the wakeup scancode filter. If the mask is zero
* then wakeup should be disabled. wakeup_protocol will be set to
* a valid protocol if mask is nonzero.
- * @s_timeout: set hardware timeout in ns
+ * @s_timeout: set hardware timeout in us
*/
struct rc_dev {
struct device dev;
@@ -192,7 +188,7 @@ struct rc_dev {
struct timer_list timer_repeat;
u32 last_keycode;
enum rc_proto last_protocol;
- u32 last_scancode;
+ u64 last_scancode;
u8 last_toggle;
u32 timeout;
u32 min_timeout;
@@ -203,8 +199,6 @@ struct rc_dev {
struct device lirc_dev;
struct cdev lirc_cdev;
ktime_t gap_start;
- u64 gap_duration;
- bool gap;
spinlock_t lirc_fh_lock;
struct list_head lirc_fh;
#endif
@@ -218,7 +212,7 @@ struct rc_dev {
int (*s_rx_carrier_range)(struct rc_dev *dev, u32 min, u32 max);
int (*tx_ir)(struct rc_dev *dev, unsigned *txbuf, unsigned n);
void (*s_idle)(struct rc_dev *dev, bool enable);
- int (*s_learning_mode)(struct rc_dev *dev, int enable);
+ int (*s_wideband_receiver)(struct rc_dev *dev, int enable);
int (*s_carrier_report) (struct rc_dev *dev, int enable);
int (*s_filter)(struct rc_dev *dev,
struct rc_scancode_filter *filter);
@@ -284,12 +278,12 @@ int devm_rc_register_device(struct device *parent, struct rc_dev *dev);
void rc_unregister_device(struct rc_dev *dev);
void rc_repeat(struct rc_dev *dev);
-void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
+void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
u8 toggle);
void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol,
- u32 scancode, u8 toggle);
+ u64 scancode, u8 toggle);
void rc_keyup(struct rc_dev *dev);
-u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode);
+u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode);
/*
* From rc-raw.c
@@ -304,16 +298,16 @@ struct ir_raw_event {
u8 duty_cycle;
unsigned pulse:1;
- unsigned reset:1;
+ unsigned overflow:1;
unsigned timeout:1;
unsigned carrier_report:1;
};
-#define IR_DEFAULT_TIMEOUT MS_TO_NS(125)
-#define IR_MAX_DURATION 500000000 /* 500 ms */
#define US_TO_NS(usec) ((usec) * 1000)
#define MS_TO_US(msec) ((msec) * 1000)
-#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
+#define IR_MAX_DURATION MS_TO_US(500)
+#define IR_DEFAULT_TIMEOUT MS_TO_US(125)
+#define IR_MAX_TIMEOUT LIRC_VALUE_MASK
void ir_raw_event_handle(struct rc_dev *dev);
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -327,9 +321,9 @@ int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max);
int ir_raw_encode_carrier(enum rc_proto protocol);
-static inline void ir_raw_event_reset(struct rc_dev *dev)
+static inline void ir_raw_event_overflow(struct rc_dev *dev)
{
- ir_raw_event_store(dev, &((struct ir_raw_event) { .reset = true }));
+ ir_raw_event_store(dev, &((struct ir_raw_event) { .overflow = true }));
dev->idle = true;
ir_raw_event_handle(dev);
}
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index f99575a0d29c..793b54342dff 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -41,36 +41,85 @@
#define RC_PROTO_BIT_RCMM32 BIT_ULL(RC_PROTO_RCMM32)
#define RC_PROTO_BIT_XBOX_DVD BIT_ULL(RC_PROTO_XBOX_DVD)
-/* All rc protocols for which we have decoders */
+#if IS_ENABLED(CONFIG_IR_RC5_DECODER)
+#define __RC_PROTO_RC5_CODEC \
+ (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | RC_PROTO_BIT_RC5_SZ)
+#else
+#define __RC_PROTO_RC5_CODEC 0
+#endif
+
+#if IS_ENABLED(CONFIG_IR_JVC_DECODER)
+#define __RC_PROTO_JVC_CODEC RC_PROTO_BIT_JVC
+#else
+#define __RC_PROTO_JVC_CODEC 0
+#endif
+#if IS_ENABLED(CONFIG_IR_SONY_DECODER)
+#define __RC_PROTO_SONY_CODEC \
+ (RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | RC_PROTO_BIT_SONY20)
+#else
+#define __RC_PROTO_SONY_CODEC 0
+#endif
+#if IS_ENABLED(CONFIG_IR_NEC_DECODER)
+#define __RC_PROTO_NEC_CODEC \
+ (RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32)
+#else
+#define __RC_PROTO_NEC_CODEC 0
+#endif
+#if IS_ENABLED(CONFIG_IR_SANYO_DECODER)
+#define __RC_PROTO_SANYO_CODEC RC_PROTO_BIT_SANYO
+#else
+#define __RC_PROTO_SANYO_CODEC 0
+#endif
+#if IS_ENABLED(CONFIG_IR_MCE_KBD_DECODER)
+#define __RC_PROTO_MCE_KBD_CODEC \
+ (RC_PROTO_BIT_MCIR2_KBD | RC_PROTO_BIT_MCIR2_MSE)
+#else
+#define __RC_PROTO_MCE_KBD_CODEC 0
+#endif
+#if IS_ENABLED(CONFIG_IR_RC6_DECODER)
+#define __RC_PROTO_RC6_CODEC \
+ (RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \
+ RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \
+ RC_PROTO_BIT_RC6_MCE)
+#else
+#define __RC_PROTO_RC6_CODEC 0
+#endif
+#if IS_ENABLED(CONFIG_IR_SHARP_DECODER)
+#define __RC_PROTO_SHARP_CODEC RC_PROTO_BIT_SHARP
+#else
+#define __RC_PROTO_SHARP_CODEC 0
+#endif
+#if IS_ENABLED(CONFIG_IR_XMP_DECODER)
+#define __RC_PROTO_XMP_CODEC RC_PROTO_BIT_XMP
+#else
+#define __RC_PROTO_XMP_CODEC 0
+#endif
+#if IS_ENABLED(CONFIG_IR_IMON_DECODER)
+#define __RC_PROTO_IMON_CODEC RC_PROTO_BIT_IMON
+#else
+#define __RC_PROTO_IMON_CODEC 0
+#endif
+#if IS_ENABLED(CONFIG_IR_RCMM_DECODER)
+#define __RC_PROTO_RCMM_CODEC \
+ (RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32)
+#else
+#define __RC_PROTO_RCMM_CODEC 0
+#endif
+
+/* All kernel-based codecs have encoders and decoders */
#define RC_PROTO_BIT_ALL_IR_DECODER \
- (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \
- RC_PROTO_BIT_RC5_SZ | RC_PROTO_BIT_JVC | \
- RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | \
- RC_PROTO_BIT_SONY20 | RC_PROTO_BIT_NEC | \
- RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 | \
- RC_PROTO_BIT_SANYO | RC_PROTO_BIT_MCIR2_KBD | \
- RC_PROTO_BIT_MCIR2_MSE | \
- RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \
- RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \
- RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_SHARP | \
- RC_PROTO_BIT_XMP | RC_PROTO_BIT_IMON | \
- RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | \
- RC_PROTO_BIT_RCMM32)
+ (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \
+ __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \
+ __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \
+ __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \
+ __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC)
#define RC_PROTO_BIT_ALL_IR_ENCODER \
- (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \
- RC_PROTO_BIT_RC5_SZ | RC_PROTO_BIT_JVC | \
- RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | \
- RC_PROTO_BIT_SONY20 | RC_PROTO_BIT_NEC | \
- RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 | \
- RC_PROTO_BIT_SANYO | RC_PROTO_BIT_MCIR2_KBD | \
- RC_PROTO_BIT_MCIR2_MSE | \
- RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \
- RC_PROTO_BIT_RC6_6A_24 | \
- RC_PROTO_BIT_RC6_6A_32 | RC_PROTO_BIT_RC6_MCE | \
- RC_PROTO_BIT_SHARP | RC_PROTO_BIT_IMON | \
- RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | \
- RC_PROTO_BIT_RCMM32)
+ (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \
+ __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \
+ __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \
+ __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \
+ __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC)
#define RC_SCANCODE_UNKNOWN(x) (x)
#define RC_SCANCODE_OTHER(x) (x)
@@ -85,11 +134,11 @@
/**
* struct rc_map_table - represents a scancode/keycode pair
*
- * @scancode: scan code (u32)
+ * @scancode: scan code (u64)
* @keycode: Linux input keycode
*/
struct rc_map_table {
- u32 scancode;
+ u64 scancode;
u32 keycode;
};
@@ -126,6 +175,13 @@ struct rc_map_list {
struct rc_map map;
};
+#ifdef CONFIG_MEDIA_CEC_RC
+/*
+ * rc_map_list from rc-cec.c
+ */
+extern struct rc_map_list cec_map;
+#endif
+
/* Routines from rc-map.c */
/**
@@ -175,6 +231,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_CEC "rc-cec"
#define RC_MAP_CINERGY "rc-cinergy"
#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
+#define RC_MAP_CT_90405 "rc-ct-90405"
#define RC_MAP_D680_DMB "rc-d680-dmb"
#define RC_MAP_DELOCK_61959 "rc-delock-61959"
#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec"
@@ -214,15 +271,19 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_IT913X_V2 "rc-it913x-v2"
#define RC_MAP_KAIOMY "rc-kaiomy"
#define RC_MAP_KHADAS "rc-khadas"
+#define RC_MAP_KHAMSIN "rc-khamsin"
#define RC_MAP_KWORLD_315U "rc-kworld-315u"
#define RC_MAP_KWORLD_PC150U "rc-kworld-pc150u"
#define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog"
#define RC_MAP_LEADTEK_Y04G0051 "rc-leadtek-y04g0051"
#define RC_MAP_LME2510 "rc-lme2510"
#define RC_MAP_MANLI "rc-manli"
+#define RC_MAP_MECOOL_KII_PRO "rc-mecool-kii-pro"
+#define RC_MAP_MECOOL_KIII_PRO "rc-mecool-kiii-pro"
#define RC_MAP_MEDION_X10 "rc-medion-x10"
#define RC_MAP_MEDION_X10_DIGITAINER "rc-medion-x10-digitainer"
#define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x"
+#define RC_MAP_MINIX_NEO "rc-minix-neo"
#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii"
#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii"
#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere"
@@ -233,6 +294,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_NPGTECH "rc-npgtech"
#define RC_MAP_ODROID "rc-odroid"
#define RC_MAP_PCTV_SEDNA "rc-pctv-sedna"
+#define RC_MAP_PINE64 "rc-pine64"
#define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color"
#define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey"
#define RC_MAP_PINNACLE_PCTV_HD "rc-pinnacle-pctv-hd"
@@ -251,7 +313,6 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
#define RC_MAP_STREAMZAP "rc-streamzap"
#define RC_MAP_SU3000 "rc-su3000"
-#define RC_MAP_TANGO "rc-tango"
#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini"
#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max"
#define RC_MAP_TBS_NEC "rc-tbs-nec"
@@ -274,11 +335,13 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100"
#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350"
#define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr"
+#define RC_MAP_KII_PRO "rc-videostrong-kii-pro"
#define RC_MAP_WETEK_HUB "rc-wetek-hub"
#define RC_MAP_WETEK_PLAY2 "rc-wetek-play2"
#define RC_MAP_WINFAST "rc-winfast"
#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe"
#define RC_MAP_X96MAX "rc-x96max"
+#define RC_MAP_XBOX_360 "rc-xbox-360"
#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_ZX_IRDEC "rc-zx-irdec"
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
deleted file mode 100644
index 331c343a5b5a..000000000000
--- a/include/media/soc_camera.h
+++ /dev/null
@@ -1,397 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * camera image capture (abstract) bus driver header
- *
- * Copyright (C) 2006, Sascha Hauer, Pengutronix
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- */
-
-#ifndef SOC_CAMERA_H
-#define SOC_CAMERA_H
-
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/pm.h>
-#include <linux/videodev2.h>
-#include <media/videobuf2-v4l2.h>
-#include <media/v4l2-async.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-
-struct file;
-struct soc_camera_desc;
-struct soc_camera_async_client;
-
-struct soc_camera_device {
- struct list_head list; /* list of all registered devices */
- struct soc_camera_desc *sdesc;
- struct device *pdev; /* Platform device */
- struct device *parent; /* Camera host device */
- struct device *control; /* E.g., the i2c client */
- s32 user_width;
- s32 user_height;
- u32 bytesperline; /* for padding, zero if unused */
- u32 sizeimage;
- enum v4l2_colorspace colorspace;
- unsigned char iface; /* Host number */
- unsigned char devnum; /* Device number per host */
- struct soc_camera_sense *sense; /* See comment in struct definition */
- struct video_device *vdev;
- struct v4l2_ctrl_handler ctrl_handler;
- const struct soc_camera_format_xlate *current_fmt;
- struct soc_camera_format_xlate *user_formats;
- int num_user_formats;
- enum v4l2_field field; /* Preserve field over close() */
- void *host_priv; /* Per-device host private data */
- /* soc_camera.c private count. Only accessed with .host_lock held */
- int use_count;
- struct file *streamer; /* stream owner */
- struct v4l2_clk *clk;
- /* Asynchronous subdevice management */
- struct soc_camera_async_client *sasc;
- /* video buffer queue */
- struct vb2_queue vb2_vidq;
-};
-
-/* Host supports programmable stride */
-#define SOCAM_HOST_CAP_STRIDE (1 << 0)
-
-enum soc_camera_subdev_role {
- SOCAM_SUBDEV_DATA_SOURCE = 1,
- SOCAM_SUBDEV_DATA_SINK,
- SOCAM_SUBDEV_DATA_PROCESSOR,
-};
-
-struct soc_camera_async_subdev {
- struct v4l2_async_subdev asd;
- enum soc_camera_subdev_role role;
-};
-
-struct soc_camera_host {
- struct v4l2_device v4l2_dev;
- struct list_head list;
- struct mutex host_lock; /* Main synchronisation lock */
- struct mutex clk_lock; /* Protect pipeline modifications */
- unsigned char nr; /* Host number */
- u32 capabilities;
- struct soc_camera_device *icd; /* Currently attached client */
- void *priv;
- const char *drv_name;
- struct soc_camera_host_ops *ops;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- unsigned int *asd_sizes; /* 0-terminated array of asd group sizes */
-};
-
-struct soc_camera_host_ops {
- struct module *owner;
- int (*add)(struct soc_camera_device *);
- void (*remove)(struct soc_camera_device *);
- int (*clock_start)(struct soc_camera_host *);
- void (*clock_stop)(struct soc_camera_host *);
- /*
- * .get_formats() is called for each client device format, but
- * .put_formats() is only called once. Further, if any of the calls to
- * .get_formats() fail, .put_formats() will not be called at all, the
- * failing .get_formats() must then clean up internally.
- */
- int (*get_formats)(struct soc_camera_device *, unsigned int,
- struct soc_camera_format_xlate *);
- void (*put_formats)(struct soc_camera_device *);
- int (*get_selection)(struct soc_camera_device *, struct v4l2_selection *);
- int (*set_selection)(struct soc_camera_device *, struct v4l2_selection *);
- /*
- * The difference to .set_selection() is, that .set_liveselection is not allowed
- * to change the output sizes
- */
- int (*set_liveselection)(struct soc_camera_device *, struct v4l2_selection *);
- int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *);
- int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *);
- int (*init_videobuf2)(struct vb2_queue *,
- struct soc_camera_device *);
- int (*querycap)(struct soc_camera_host *, struct v4l2_capability *);
- int (*set_bus_param)(struct soc_camera_device *);
- int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
- int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
- int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *);
- __poll_t (*poll)(struct file *, poll_table *);
-};
-
-#define SOCAM_SENSOR_INVERT_PCLK (1 << 0)
-#define SOCAM_SENSOR_INVERT_MCLK (1 << 1)
-#define SOCAM_SENSOR_INVERT_HSYNC (1 << 2)
-#define SOCAM_SENSOR_INVERT_VSYNC (1 << 3)
-#define SOCAM_SENSOR_INVERT_DATA (1 << 4)
-
-struct i2c_board_info;
-struct regulator_bulk_data;
-
-struct soc_camera_subdev_desc {
- /* Per camera SOCAM_SENSOR_* bus flags */
- unsigned long flags;
-
- /* sensor driver private platform data */
- void *drv_priv;
-
- /*
- * Set unbalanced_power to true to deal with legacy drivers, failing to
- * balance their calls to subdevice's .s_power() method. clock_state is
- * then used internally by helper functions, it shouldn't be touched by
- * drivers or the platform code.
- */
- bool unbalanced_power;
- unsigned long clock_state;
-
- /* Optional callbacks to power on or off and reset the sensor */
- int (*power)(struct device *, int);
- int (*reset)(struct device *);
-
- /*
- * some platforms may support different data widths than the sensors
- * native ones due to different data line routing. Let the board code
- * overwrite the width flags.
- */
- int (*set_bus_param)(struct soc_camera_subdev_desc *, unsigned long flags);
- unsigned long (*query_bus_param)(struct soc_camera_subdev_desc *);
- void (*free_bus)(struct soc_camera_subdev_desc *);
-
- /* Optional regulators that have to be managed on power on/off events */
- struct v4l2_subdev_platform_data sd_pdata;
-};
-
-struct soc_camera_host_desc {
- /* Camera bus id, used to match a camera and a bus */
- int bus_id;
- int i2c_adapter_id;
- struct i2c_board_info *board_info;
- const char *module_name;
-
- /*
- * For non-I2C devices platform has to provide methods to add a device
- * to the system and to remove it
- */
- int (*add_device)(struct soc_camera_device *);
- void (*del_device)(struct soc_camera_device *);
-};
-
-/*
- * Platform data for "soc-camera-pdrv"
- * This MUST be kept binary-identical to struct soc_camera_link below, until
- * it is completely replaced by this one, after which we can split it into its
- * two components.
- */
-struct soc_camera_desc {
- struct soc_camera_subdev_desc subdev_desc;
- struct soc_camera_host_desc host_desc;
-};
-
-/* Prepare to replace this struct: don't change its layout any more! */
-struct soc_camera_link {
- /*
- * Subdevice part - keep at top and compatible to
- * struct soc_camera_subdev_desc
- */
-
- /* Per camera SOCAM_SENSOR_* bus flags */
- unsigned long flags;
-
- void *priv;
-
- /* Set by platforms to handle misbehaving drivers */
- bool unbalanced_power;
- /* Used by soc-camera helper functions */
- unsigned long clock_state;
-
- /* Optional callbacks to power on or off and reset the sensor */
- int (*power)(struct device *, int);
- int (*reset)(struct device *);
- /*
- * some platforms may support different data widths than the sensors
- * native ones due to different data line routing. Let the board code
- * overwrite the width flags.
- */
- int (*set_bus_param)(struct soc_camera_link *, unsigned long flags);
- unsigned long (*query_bus_param)(struct soc_camera_link *);
- void (*free_bus)(struct soc_camera_link *);
-
- /* Optional regulators that have to be managed on power on/off events */
- struct regulator_bulk_data *regulators;
- int num_regulators;
-
- void *host_priv;
-
- /*
- * Host part - keep at bottom and compatible to
- * struct soc_camera_host_desc
- */
-
- /* Camera bus id, used to match a camera and a bus */
- int bus_id;
- int i2c_adapter_id;
- struct i2c_board_info *board_info;
- const char *module_name;
-
- /*
- * For non-I2C devices platform has to provide methods to add a device
- * to the system and to remove it
- */
- int (*add_device)(struct soc_camera_device *);
- void (*del_device)(struct soc_camera_device *);
-};
-
-static inline struct soc_camera_host *to_soc_camera_host(
- const struct device *dev)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
-
- return container_of(v4l2_dev, struct soc_camera_host, v4l2_dev);
-}
-
-static inline struct soc_camera_desc *to_soc_camera_desc(
- const struct soc_camera_device *icd)
-{
- return icd->sdesc;
-}
-
-static inline struct device *to_soc_camera_control(
- const struct soc_camera_device *icd)
-{
- return icd->control;
-}
-
-static inline struct v4l2_subdev *soc_camera_to_subdev(
- const struct soc_camera_device *icd)
-{
- struct device *control = to_soc_camera_control(icd);
- return dev_get_drvdata(control);
-}
-
-int soc_camera_host_register(struct soc_camera_host *ici);
-void soc_camera_host_unregister(struct soc_camera_host *ici);
-
-const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
- struct soc_camera_device *icd, unsigned int fourcc);
-
-/**
- * struct soc_camera_format_xlate - match between host and sensor formats
- * @code: code of a sensor provided format
- * @host_fmt: host format after host translation from code
- *
- * Host and sensor translation structure. Used in table of host and sensor
- * formats matchings in soc_camera_device. A host can override the generic list
- * generation by implementing get_formats(), and use it for format checks and
- * format setup.
- */
-struct soc_camera_format_xlate {
- u32 code;
- const struct soc_mbus_pixelfmt *host_fmt;
-};
-
-#define SOCAM_SENSE_PCLK_CHANGED (1 << 0)
-
-/**
- * This struct can be attached to struct soc_camera_device by the host driver
- * to request sense from the camera, for example, when calling .set_fmt(). The
- * host then can check which flags are set and verify respective values if any.
- * For example, if SOCAM_SENSE_PCLK_CHANGED is set, it means, pixclock has
- * changed during this operation. After completion the host should detach sense.
- *
- * @flags ored SOCAM_SENSE_* flags
- * @master_clock if the host wants to be informed about pixel-clock
- * change, it better set master_clock.
- * @pixel_clock_max maximum pixel clock frequency supported by the host,
- * camera is not allowed to exceed this.
- * @pixel_clock if the camera driver changed pixel clock during this
- * operation, it sets SOCAM_SENSE_PCLK_CHANGED, uses
- * master_clock to calculate the new pixel-clock and
- * sets this field.
- */
-struct soc_camera_sense {
- unsigned long flags;
- unsigned long master_clock;
- unsigned long pixel_clock_max;
- unsigned long pixel_clock;
-};
-
-#define SOCAM_DATAWIDTH(x) BIT((x) - 1)
-#define SOCAM_DATAWIDTH_4 SOCAM_DATAWIDTH(4)
-#define SOCAM_DATAWIDTH_8 SOCAM_DATAWIDTH(8)
-#define SOCAM_DATAWIDTH_9 SOCAM_DATAWIDTH(9)
-#define SOCAM_DATAWIDTH_10 SOCAM_DATAWIDTH(10)
-#define SOCAM_DATAWIDTH_12 SOCAM_DATAWIDTH(12)
-#define SOCAM_DATAWIDTH_15 SOCAM_DATAWIDTH(15)
-#define SOCAM_DATAWIDTH_16 SOCAM_DATAWIDTH(16)
-#define SOCAM_DATAWIDTH_18 SOCAM_DATAWIDTH(18)
-#define SOCAM_DATAWIDTH_24 SOCAM_DATAWIDTH(24)
-
-#define SOCAM_DATAWIDTH_MASK (SOCAM_DATAWIDTH_4 | SOCAM_DATAWIDTH_8 | \
- SOCAM_DATAWIDTH_9 | SOCAM_DATAWIDTH_10 | \
- SOCAM_DATAWIDTH_12 | SOCAM_DATAWIDTH_15 | \
- SOCAM_DATAWIDTH_16 | SOCAM_DATAWIDTH_18 | \
- SOCAM_DATAWIDTH_24)
-
-static inline void soc_camera_limit_side(int *start, int *length,
- unsigned int start_min,
- unsigned int length_min, unsigned int length_max)
-{
- if (*length < length_min)
- *length = length_min;
- else if (*length > length_max)
- *length = length_max;
-
- if (*start < start_min)
- *start = start_min;
- else if (*start > start_min + length_max - *length)
- *start = start_min + length_max - *length;
-}
-
-unsigned long soc_camera_apply_board_flags(struct soc_camera_subdev_desc *ssdd,
- const struct v4l2_mbus_config *cfg);
-
-int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd);
-int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd,
- struct v4l2_clk *clk);
-int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd,
- struct v4l2_clk *clk);
-
-static inline int soc_camera_set_power(struct device *dev,
- struct soc_camera_subdev_desc *ssdd, struct v4l2_clk *clk, bool on)
-{
- return on ? soc_camera_power_on(dev, ssdd, clk)
- : soc_camera_power_off(dev, ssdd, clk);
-}
-
-/* This is only temporary here - until v4l2-subdev begins to link to video_device */
-#include <linux/i2c.h>
-static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
- return icd ? icd->vdev : NULL;
-}
-
-static inline struct soc_camera_subdev_desc *soc_camera_i2c_to_desc(const struct i2c_client *client)
-{
- return client->dev.platform_data;
-}
-
-static inline struct v4l2_subdev *soc_camera_vdev_to_subdev(struct video_device *vdev)
-{
- struct soc_camera_device *icd = video_get_drvdata(vdev);
- return soc_camera_to_subdev(icd);
-}
-
-static inline struct soc_camera_device *soc_camera_from_vb2q(const struct vb2_queue *vq)
-{
- return container_of(vq, struct soc_camera_device, vb2_vidq);
-}
-
-static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
-{
- return (icd->iface << 8) | (icd->devnum + 1);
-}
-
-void soc_camera_lock(struct vb2_queue *vq);
-void soc_camera_unlock(struct vb2_queue *vq);
-
-#endif
diff --git a/include/media/tpg/v4l2-tpg.h b/include/media/tpg/v4l2-tpg.h
index eb191e85d363..a55088921d1d 100644
--- a/include/media/tpg/v4l2-tpg.h
+++ b/include/media/tpg/v4l2-tpg.h
@@ -210,6 +210,7 @@ struct tpg_data {
bool show_square;
bool insert_sav;
bool insert_eav;
+ bool insert_hdmi_video_guard_band;
/* Test pattern movement */
enum tpg_move_mode mv_hor_mode;
@@ -241,7 +242,7 @@ void tpg_log_status(struct tpg_data *tpg);
void tpg_set_font(const u8 *f);
void tpg_gen_text(const struct tpg_data *tpg,
- u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
+ u8 *basep[TPG_MAX_PLANES][2], int y, int x, const char *text);
void tpg_calc_text_basep(struct tpg_data *tpg,
u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line);
@@ -252,6 +253,7 @@ void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std,
bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
const struct v4l2_rect *compose);
+const char *tpg_g_color_order(const struct tpg_data *tpg);
static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern)
{
@@ -324,6 +326,7 @@ static inline void tpg_s_saturation(struct tpg_data *tpg,
static inline void tpg_s_hue(struct tpg_data *tpg,
s16 hue)
{
+ hue = clamp_t(s16, hue, -128, 128);
if (tpg->hue == hue)
return;
tpg->hue = hue;
@@ -589,6 +592,21 @@ static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav)
tpg->insert_eav = insert_eav;
}
+/*
+ * This inserts 4 pixels of the RGB color 0xab55ab at the left hand side of the
+ * image. This is only done for 3 or 4 byte RGB pixel formats. This pixel value
+ * equals the Video Guard Band value as defined by HDMI (see section 5.2.2.1
+ * in the HDMI 1.3 Specification) that preceeds the first actual pixel. If the
+ * HDMI receiver doesn't handle this correctly, then it might keep skipping
+ * these Video Guard Band patterns and end up with a shorter video line. So this
+ * is a nice pattern to test with.
+ */
+static inline void tpg_s_insert_hdmi_video_guard_band(struct tpg_data *tpg,
+ bool insert_hdmi_video_guard_band)
+{
+ tpg->insert_hdmi_video_guard_band = insert_hdmi_video_guard_band;
+}
+
void tpg_update_mv_step(struct tpg_data *tpg);
static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg,
diff --git a/include/media/tuner.h b/include/media/tuner.h
index ff85d7227f91..a7796e0a3659 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -132,6 +132,7 @@
#define TUNER_SONY_BTF_PG472Z 89 /* PAL+SECAM */
#define TUNER_SONY_BTF_PK467Z 90 /* NTSC_JP */
#define TUNER_SONY_BTF_PB463Z 91 /* NTSC */
+#define TUNER_SI2157 92
/* tv card specific */
#define TDA9887_PRESENT (1<<0)
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 8319284c93cb..25eb1d138c06 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
+struct dentry;
struct device;
struct device_node;
struct v4l2_device;
@@ -21,18 +22,13 @@ struct v4l2_async_notifier;
* enum v4l2_async_match_type - type of asynchronous subdevice logic to be used
* in order to identify a match
*
- * @V4L2_ASYNC_MATCH_CUSTOM: Match will use the logic provided by &struct
- * v4l2_async_subdev.match ops
- * @V4L2_ASYNC_MATCH_DEVNAME: Match will use the device name
* @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address
* @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node
*
- * This enum is used by the asyncrhronous sub-device logic to define the
+ * This enum is used by the asynchronous sub-device logic to define the
* algorithm that will be used to match an asynchronous device.
*/
enum v4l2_async_match_type {
- V4L2_ASYNC_MATCH_CUSTOM,
- V4L2_ASYNC_MATCH_DEVNAME,
V4L2_ASYNC_MATCH_I2C,
V4L2_ASYNC_MATCH_FWNODE,
};
@@ -45,9 +41,6 @@ enum v4l2_async_match_type {
* @match.fwnode:
* pointer to &struct fwnode_handle to be matched.
* Used if @match_type is %V4L2_ASYNC_MATCH_FWNODE.
- * @match.device_name:
- * string containing the device name to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_DEVNAME.
* @match.i2c: embedded struct with I2C parameters to be matched.
* Both @match.i2c.adapter_id and @match.i2c.address
* should be matched.
@@ -58,15 +51,6 @@ enum v4l2_async_match_type {
* @match.i2c.address:
* I2C address to be matched.
* Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.custom:
- * Driver-specific match criteria.
- * Used if @match_type is %V4L2_ASYNC_MATCH_CUSTOM.
- * @match.custom.match:
- * Driver-specific match function to be used if
- * %V4L2_ASYNC_MATCH_CUSTOM.
- * @match.custom.priv:
- * Driver-specific private struct with match parameters
- * to be used if %V4L2_ASYNC_MATCH_CUSTOM.
* @asd_list: used to add struct v4l2_async_subdev objects to the
* master notifier @asd_list
* @list: used to link struct v4l2_async_subdev objects, waiting to be
@@ -80,16 +64,10 @@ struct v4l2_async_subdev {
enum v4l2_async_match_type match_type;
union {
struct fwnode_handle *fwnode;
- const char *device_name;
struct {
int adapter_id;
unsigned short address;
} i2c;
- struct {
- bool (*match)(struct device *dev,
- struct v4l2_async_subdev *sd);
- void *priv;
- } custom;
} match;
/* v4l2-async core private: not to be used by drivers */
@@ -103,6 +81,7 @@ struct v4l2_async_subdev {
* @complete: All subdevices have been probed successfully. The complete
* callback is only executed for the root notifier.
* @unbind: a subdevice is leaving
+ * @destroy: the asd is about to be freed
*/
struct v4l2_async_notifier_operations {
int (*bound)(struct v4l2_async_notifier *notifier,
@@ -112,6 +91,7 @@ struct v4l2_async_notifier_operations {
void (*unbind)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd);
+ void (*destroy)(struct v4l2_async_subdev *asd);
};
/**
@@ -138,157 +118,163 @@ struct v4l2_async_notifier {
};
/**
- * v4l2_async_notifier_init - Initialize a notifier.
+ * v4l2_async_debug_init - Initialize debugging tools.
+ *
+ * @debugfs_dir: pointer to the parent debugfs &struct dentry
+ */
+void v4l2_async_debug_init(struct dentry *debugfs_dir);
+
+/**
+ * v4l2_async_nf_init - Initialize a notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
*
* This function initializes the notifier @asd_list. It must be called
- * before the first call to @v4l2_async_notifier_add_subdev.
+ * before adding a subdevice to a notifier, using one of:
+ * v4l2_async_nf_add_fwnode_remote(),
+ * v4l2_async_nf_add_fwnode(),
+ * v4l2_async_nf_add_i2c(),
+ * __v4l2_async_nf_add_subdev() or
+ * v4l2_async_nf_parse_fwnode_endpoints().
*/
-void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_notifier_add_subdev - Add an async subdev to the
+ * __v4l2_async_nf_add_subdev - Add an async subdev to the
* notifier's master asd list.
*
* @notifier: pointer to &struct v4l2_async_notifier
* @asd: pointer to &struct v4l2_async_subdev
*
- * Call this function before registering a notifier to link the
- * provided asd to the notifiers master @asd_list.
+ * \warning: Drivers should avoid using this function and instead use one of:
+ * v4l2_async_nf_add_fwnode(),
+ * v4l2_async_nf_add_fwnode_remote() or
+ * v4l2_async_nf_add_i2c().
+ *
+ * Call this function before registering a notifier to link the provided @asd to
+ * the notifiers master @asd_list. The @asd must be allocated with k*alloc() as
+ * it will be freed by the framework when the notifier is destroyed.
*/
-int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd);
+int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd);
+struct v4l2_async_subdev *
+__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asd_struct_size);
/**
- * v4l2_async_notifier_add_fwnode_subdev - Allocate and add a fwnode async
+ * v4l2_async_nf_add_fwnode - Allocate and add a fwnode async
* subdev to the notifier's master asd_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @fwnode: fwnode handle of the sub-device to be matched
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
+ * @fwnode: fwnode handle of the sub-device to be matched, pointer to
+ * &struct fwnode_handle
+ * @type: Type of the driver's async sub-device struct. The &struct
+ * v4l2_async_subdev shall be the first member of the driver's async
+ * sub-device struct, i.e. both begin at the same memory address.
*
* Allocate a fwnode-matched asd of size asd_struct_size, and add it to the
* notifiers @asd_list. The function also gets a reference of the fwnode which
* is released later at notifier cleanup time.
*/
-struct v4l2_async_subdev *
-v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
- struct fwnode_handle *fwnode,
- unsigned int asd_struct_size);
+#define v4l2_async_nf_add_fwnode(notifier, fwnode, type) \
+ ((type *)__v4l2_async_nf_add_fwnode(notifier, fwnode, sizeof(type)))
+struct v4l2_async_subdev *
+__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size);
/**
- * v4l2_async_notifier_add_fwnode_remote_subdev - Allocate and add a fwnode
+ * v4l2_async_nf_add_fwnode_remote - Allocate and add a fwnode
* remote async subdev to the
* notifier's master asd_list.
*
- * @notif: pointer to &struct v4l2_async_notifier
- * @endpoint: local endpoint pointing to the remote sub-device to be matched
- * @asd: Async sub-device struct allocated by the caller. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @notifier: pointer to &struct v4l2_async_notifier
+ * @ep: local endpoint pointing to the remote sub-device to be matched,
+ * pointer to &struct fwnode_handle
+ * @type: Type of the driver's async sub-device struct. The &struct
+ * v4l2_async_subdev shall be the first member of the driver's async
+ * sub-device struct, i.e. both begin at the same memory address.
*
* Gets the remote endpoint of a given local endpoint, set it up for fwnode
* matching and adds the async sub-device to the notifier's @asd_list. The
* function also gets a reference of the fwnode which is released later at
* notifier cleanup time.
*
- * This is just like @v4l2_async_notifier_add_fwnode_subdev, but with the
- * exception that the fwnode refers to a local endpoint, not the remote one, and
- * the function relies on the caller to allocate the async sub-device struct.
+ * This is just like v4l2_async_nf_add_fwnode(), but with the
+ * exception that the fwnode refers to a local endpoint, not the remote one.
*/
-int
-v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
- struct fwnode_handle *endpoint,
- struct v4l2_async_subdev *asd);
+#define v4l2_async_nf_add_fwnode_remote(notifier, ep, type) \
+ ((type *)__v4l2_async_nf_add_fwnode_remote(notifier, ep, sizeof(type)))
+struct v4l2_async_subdev *
+__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier,
+ int adapter_id, unsigned short address,
+ unsigned int asd_struct_size);
/**
- * v4l2_async_notifier_add_i2c_subdev - Allocate and add an i2c async
+ * v4l2_async_nf_add_i2c - Allocate and add an i2c async
* subdev to the notifier's master asd_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @adapter_id: I2C adapter ID to be matched
+ * @adapter: I2C adapter ID to be matched
* @address: I2C address of sub-device to be matched
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
+ * @type: Type of the driver's async sub-device struct. The &struct
+ * v4l2_async_subdev shall be the first member of the driver's async
+ * sub-device struct, i.e. both begin at the same memory address.
*
- * Same as above but for I2C matched sub-devices.
+ * Same as v4l2_async_nf_add_fwnode() but for I2C matched
+ * sub-devices.
*/
-struct v4l2_async_subdev *
-v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
- int adapter_id, unsigned short address,
- unsigned int asd_struct_size);
-
-/**
- * v4l2_async_notifier_add_devname_subdev - Allocate and add a device-name
- * async subdev to the notifier's master asd_list.
- *
- * @notifier: pointer to &struct v4l2_async_notifier
- * @device_name: device name string to be matched
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- *
- * Same as above but for device-name matched sub-devices.
- */
-struct v4l2_async_subdev *
-v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
- const char *device_name,
- unsigned int asd_struct_size);
+#define v4l2_async_nf_add_i2c(notifier, adapter, address, type) \
+ ((type *)__v4l2_async_nf_add_i2c(notifier, adapter, address, \
+ sizeof(type)))
/**
- * v4l2_async_notifier_register - registers a subdevice asynchronous notifier
+ * v4l2_async_nf_register - registers a subdevice asynchronous notifier
*
* @v4l2_dev: pointer to &struct v4l2_device
* @notifier: pointer to &struct v4l2_async_notifier
*/
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_subdev_notifier_register - registers a subdevice asynchronous
+ * v4l2_async_subdev_nf_register - registers a subdevice asynchronous
* notifier for a sub-device
*
* @sd: pointer to &struct v4l2_subdev
* @notifier: pointer to &struct v4l2_async_notifier
*/
-int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
+ struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_notifier_unregister - unregisters a subdevice
+ * v4l2_async_nf_unregister - unregisters a subdevice
* asynchronous notifier
*
* @notifier: pointer to &struct v4l2_async_notifier
*/
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_notifier_cleanup - clean up notifier resources
+ * v4l2_async_nf_cleanup - clean up notifier resources
* @notifier: the notifier the resources of which are to be cleaned up
*
* Release memory resources related to a notifier, including the async
* sub-devices allocated for the purposes of the notifier but not the notifier
* itself. The user is responsible for calling this function to clean up the
* notifier after calling
- * @v4l2_async_notifier_add_subdev,
- * @v4l2_async_notifier_parse_fwnode_endpoints or
- * @v4l2_fwnode_reference_parse_sensor_common.
+ * v4l2_async_nf_add_fwnode_remote(),
+ * v4l2_async_nf_add_fwnode(),
+ * v4l2_async_nf_add_i2c(),
+ * __v4l2_async_nf_add_subdev() or
+ * v4l2_async_nf_parse_fwnode_endpoints().
*
- * There is no harm from calling v4l2_async_notifier_cleanup in other
+ * There is no harm from calling v4l2_async_nf_cleanup() in other
* cases as long as its memory has been zeroed after it has been
* allocated.
*/
-void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier);
/**
* v4l2_async_register_subdev - registers a sub-device to the asynchronous
@@ -299,16 +285,16 @@ void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier);
int v4l2_async_register_subdev(struct v4l2_subdev *sd);
/**
- * v4l2_async_register_subdev_sensor_common - registers a sensor sub-device to
- * the asynchronous sub-device
- * framework and parse set up common
- * sensor related devices
+ * v4l2_async_register_subdev_sensor - registers a sensor sub-device to the
+ * asynchronous sub-device framework and
+ * parse set up common sensor related
+ * devices
*
* @sd: pointer to struct &v4l2_subdev
*
* This function is just like v4l2_async_register_subdev() with the exception
* that calling it will also parse firmware interfaces for remote references
- * using v4l2_async_notifier_parse_fwnode_sensor_common() and registers the
+ * using v4l2_async_nf_parse_fwnode_sensor() and registers the
* async sub-devices. The sub-device is similarly unregistered by calling
* v4l2_async_unregister_subdev().
*
@@ -318,7 +304,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd);
* to register it.
*/
int __must_check
-v4l2_async_register_subdev_sensor_common(struct v4l2_subdev *sd);
+v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd);
/**
* v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous
diff --git a/include/media/v4l2-clk.h b/include/media/v4l2-clk.h
deleted file mode 100644
index d9d21a43a834..000000000000
--- a/include/media/v4l2-clk.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * V4L2 clock service
- *
- * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * ATTENTION: This is a temporary API and it shall be replaced by the generic
- * clock API, when the latter becomes widely available.
- */
-
-#ifndef MEDIA_V4L2_CLK_H
-#define MEDIA_V4L2_CLK_H
-
-#include <linux/atomic.h>
-#include <linux/export.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-
-struct module;
-struct device;
-
-struct clk;
-struct v4l2_clk {
- struct list_head list;
- const struct v4l2_clk_ops *ops;
- const char *dev_id;
- int enable;
- struct mutex lock; /* Protect the enable count */
- atomic_t use_count;
- struct clk *clk;
- void *priv;
-};
-
-struct v4l2_clk_ops {
- struct module *owner;
- int (*enable)(struct v4l2_clk *clk);
- void (*disable)(struct v4l2_clk *clk);
- unsigned long (*get_rate)(struct v4l2_clk *clk);
- int (*set_rate)(struct v4l2_clk *clk, unsigned long);
-};
-
-struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
- const char *dev_name,
- void *priv);
-void v4l2_clk_unregister(struct v4l2_clk *clk);
-struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id);
-void v4l2_clk_put(struct v4l2_clk *clk);
-int v4l2_clk_enable(struct v4l2_clk *clk);
-void v4l2_clk_disable(struct v4l2_clk *clk);
-unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk);
-int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate);
-
-struct module;
-
-struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
- unsigned long rate, struct module *owner);
-void v4l2_clk_unregister_fixed(struct v4l2_clk *clk);
-
-static inline struct v4l2_clk *v4l2_clk_register_fixed(const char *dev_id,
- unsigned long rate)
-{
- return __v4l2_clk_register_fixed(dev_id, rate, THIS_MODULE);
-}
-
-#define V4L2_CLK_NAME_SIZE 64
-
-#define v4l2_clk_name_i2c(name, size, adap, client) snprintf(name, size, \
- "%d-%04x", adap, client)
-
-#define v4l2_clk_name_of(name, size, node) snprintf(name, size, \
- "of-%pOF", node)
-
-#endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 150ee16ebd81..1bdaea248089 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -175,7 +175,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
*
* @sd: pointer to &struct v4l2_subdev
* @client: pointer to struct i2c_client
- * @devname: the name of the device; if NULL, the I²C device's name will be used
+ * @devname: the name of the device; if NULL, the I²C device drivers's name
+ * will be used
* @postfix: sub-device specific string to put right after the I²C device name;
* may be NULL
*/
@@ -519,6 +520,31 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
u32 width, u32 height);
+/**
+ * v4l2_get_link_freq - Get link rate from transmitter
+ *
+ * @handler: The transmitter's control handler
+ * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on
+ * D-PHY, samples per clock on parallel. 0 otherwise.
+ * @div: The divisor between pixel rate and link frequency. Number of data lanes
+ * times two on D-PHY, 1 on parallel. 0 otherwise.
+ *
+ * This function is intended for obtaining the link frequency from the
+ * transmitter sub-devices. It returns the link rate, either from the
+ * V4L2_CID_LINK_FREQ control implemented by the transmitter, or value
+ * calculated based on the V4L2_CID_PIXEL_RATE implemented by the transmitter.
+ *
+ * Returns link frequency on success, otherwise a negative error code:
+ * -ENOENT: Link frequency or pixel rate control not found
+ * -EINVAL: Invalid link frequency value
+ */
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
+ unsigned int div);
+
+void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
+ unsigned int n_terms, unsigned int threshold);
+u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator);
+
static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
{
/*
@@ -539,4 +565,33 @@ static inline void v4l2_buffer_set_timestamp(struct v4l2_buffer *buf,
buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
}
+static inline bool v4l2_is_colorspace_valid(__u32 colorspace)
+{
+ return colorspace > V4L2_COLORSPACE_DEFAULT &&
+ colorspace < V4L2_COLORSPACE_LAST;
+}
+
+static inline bool v4l2_is_xfer_func_valid(__u32 xfer_func)
+{
+ return xfer_func > V4L2_XFER_FUNC_DEFAULT &&
+ xfer_func < V4L2_XFER_FUNC_LAST;
+}
+
+static inline bool v4l2_is_ycbcr_enc_valid(__u8 ycbcr_enc)
+{
+ return ycbcr_enc > V4L2_YCBCR_ENC_DEFAULT &&
+ ycbcr_enc < V4L2_YCBCR_ENC_LAST;
+}
+
+static inline bool v4l2_is_hsv_enc_valid(__u8 hsv_enc)
+{
+ return hsv_enc == V4L2_HSV_ENC_180 || hsv_enc == V4L2_HSV_ENC_256;
+}
+
+static inline bool v4l2_is_quant_valid(__u8 quantization)
+{
+ return quantization == V4L2_QUANTIZATION_FULL_RANGE ||
+ quantization == V4L2_QUANTIZATION_LIM_RANGE;
+}
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 7db9e719a583..e59d9a234631 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -13,26 +13,17 @@
#include <linux/videodev2.h>
#include <media/media-request.h>
-/*
- * Include the stateless codec compound control definitions.
- * This will move to the public headers once this API is fully stable.
- */
-#include <media/mpeg2-ctrls.h>
-#include <media/fwht-ctrls.h>
-#include <media/h264-ctrls.h>
-#include <media/vp8-ctrls.h>
-#include <media/hevc-ctrls.h>
-
/* forward references */
struct file;
+struct poll_table_struct;
+struct v4l2_ctrl;
struct v4l2_ctrl_handler;
struct v4l2_ctrl_helper;
-struct v4l2_ctrl;
-struct video_device;
+struct v4l2_fh;
+struct v4l2_fwnode_device_properties;
struct v4l2_subdev;
struct v4l2_subscribed_event;
-struct v4l2_fh;
-struct poll_table_struct;
+struct video_device;
/**
* union v4l2_ctrl_ptr - A pointer to a control value.
@@ -42,18 +33,24 @@ struct poll_table_struct;
* @p_u16: Pointer to a 16-bit unsigned value.
* @p_u32: Pointer to a 32-bit unsigned value.
* @p_char: Pointer to a string.
- * @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure.
- * @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure.
+ * @p_mpeg2_sequence: Pointer to a MPEG2 sequence structure.
+ * @p_mpeg2_picture: Pointer to a MPEG2 picture structure.
+ * @p_mpeg2_quantisation: Pointer to a MPEG2 quantisation data structure.
* @p_fwht_params: Pointer to a FWHT stateless parameters structure.
* @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps.
* @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps.
* @p_h264_scaling_matrix: Pointer to a struct v4l2_ctrl_h264_scaling_matrix.
* @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params.
* @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params.
- * @p_vp8_frame_header: Pointer to a VP8 frame header structure.
+ * @p_h264_pred_weights: Pointer to a struct v4l2_ctrl_h264_pred_weights.
+ * @p_vp8_frame: Pointer to a VP8 frame params structure.
+ * @p_vp9_compressed_hdr_probs: Pointer to a VP9 frame compressed header probs structure.
+ * @p_vp9_frame: Pointer to a VP9 frame params structure.
* @p_hevc_sps: Pointer to an HEVC sequence parameter set structure.
* @p_hevc_pps: Pointer to an HEVC picture parameter set structure.
* @p_hevc_slice_params: Pointer to an HEVC slice parameters structure.
+ * @p_hdr10_cll: Pointer to an HDR10 Content Light Level structure.
+ * @p_hdr10_mastering: Pointer to an HDR10 Mastering Display structure.
* @p_area: Pointer to an area.
* @p: Pointer to a compound value.
* @p_const: Pointer to a constant compound value.
@@ -65,18 +62,24 @@ union v4l2_ctrl_ptr {
u16 *p_u16;
u32 *p_u32;
char *p_char;
- struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
- struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization;
+ struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
+ struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation;
struct v4l2_ctrl_fwht_params *p_fwht_params;
struct v4l2_ctrl_h264_sps *p_h264_sps;
struct v4l2_ctrl_h264_pps *p_h264_pps;
struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
struct v4l2_ctrl_h264_decode_params *p_h264_decode_params;
- struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights;
+ struct v4l2_ctrl_vp8_frame *p_vp8_frame;
struct v4l2_ctrl_hevc_sps *p_hevc_sps;
struct v4l2_ctrl_hevc_pps *p_hevc_pps;
struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs;
+ struct v4l2_ctrl_vp9_frame *p_vp9_frame;
+ struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll;
+ struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
struct v4l2_area *p_area;
void *p;
const void *p_const;
@@ -118,21 +121,19 @@ struct v4l2_ctrl_ops {
* struct v4l2_ctrl_type_ops - The control type operations that the driver
* has to provide.
*
- * @equal: return true if both values are equal.
- * @init: initialize the value.
+ * @equal: return true if all ctrl->elems array elements are equal.
+ * @init: initialize the value for array elements from from_idx to ctrl->elems.
* @log: log the value.
- * @validate: validate the value. Return 0 on success and a negative value
- * otherwise.
+ * @validate: validate the value for ctrl->new_elems array elements.
+ * Return 0 on success and a negative value otherwise.
*/
struct v4l2_ctrl_type_ops {
- bool (*equal)(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr1,
- union v4l2_ctrl_ptr ptr2);
- void (*init)(const struct v4l2_ctrl *ctrl, u32 idx,
+ bool (*equal)(const struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
+ void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx,
union v4l2_ctrl_ptr ptr);
void (*log)(const struct v4l2_ctrl *ctrl);
- int (*validate)(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr);
+ int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr);
};
/**
@@ -176,6 +177,8 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* and/or has type %V4L2_CTRL_TYPE_STRING. In other words, &struct
* v4l2_ext_control uses field p to point to the data.
* @is_array: If set, then this control contains an N-dimensional array.
+ * @is_dyn_array: If set, then this control contains a dynamically sized 1-dimensional array.
+ * If this is set, then @is_array is also set.
* @has_volatiles: If set, then one or more members of the cluster are volatile.
* Drivers should never touch this flag.
* @call_notify: If set, then call the handler's notify function whenever the
@@ -196,6 +199,9 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* @step: The control's step value for non-menu controls.
* @elems: The number of elements in the N-dimensional array.
* @elem_size: The size in bytes of the control.
+ * @new_elems: The number of elements in p_new. This is the same as @elems,
+ * except for dynamic arrays. In that case it is in the range of
+ * 1 to @p_array_alloc_elems.
* @dims: The size of each dimension.
* @nr_of_dims:The number of dimensions in @dims.
* @menu_skip_mask: The control's skip mask for menu controls. This makes it
@@ -214,15 +220,20 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* :math:`ceil(\frac{maximum - minimum}{step}) + 1`.
* Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU.
* @flags: The control's flags.
- * @cur: Structure to store the current value.
- * @cur.val: The control's current value, if the @type is represented via
- * a u32 integer (see &enum v4l2_ctrl_type).
- * @val: The control's new s32 value.
* @priv: The control's private pointer. For use by the driver. It is
* untouched by the control framework. Note that this pointer is
* not freed when the control is deleted. Should this be needed
* then a new internal bitfield can be added to tell the framework
* to free this pointer.
+ * @p_array: Pointer to the allocated array. Only valid if @is_array is true.
+ * @p_array_alloc_elems: The number of elements in the allocated
+ * array for both the cur and new values. So @p_array is actually
+ * sized for 2 * @p_array_alloc_elems * @elem_size. Only valid if
+ * @is_array is true.
+ * @cur: Structure to store the current value.
+ * @cur.val: The control's current value, if the @type is represented via
+ * a u32 integer (see &enum v4l2_ctrl_type).
+ * @val: The control's new s32 value.
* @p_def: The control's default value represented via a union which
* provides a standard way of accessing control types
* through a pointer (for compound controls only).
@@ -251,6 +262,7 @@ struct v4l2_ctrl {
unsigned int is_string:1;
unsigned int is_ptr:1;
unsigned int is_array:1;
+ unsigned int is_dyn_array:1;
unsigned int has_volatiles:1;
unsigned int call_notify:1;
unsigned int manual_mode_value:8;
@@ -263,6 +275,7 @@ struct v4l2_ctrl {
s64 minimum, maximum, default_value;
u32 elems;
u32 elem_size;
+ u32 new_elems;
u32 dims[V4L2_CTRL_MAX_DIMS];
u32 nr_of_dims;
union {
@@ -275,6 +288,8 @@ struct v4l2_ctrl {
};
unsigned long flags;
void *priv;
+ void *p_array;
+ u32 p_array_alloc_elems;
s32 val;
struct {
s32 val;
@@ -300,12 +315,24 @@ struct v4l2_ctrl {
* the control has been applied. This prevents applying controls
* from a cluster with multiple controls twice (when the first
* control of a cluster is applied, they all are).
- * @req: If set, this refers to another request that sets this control.
+ * @p_req_valid: If set, then p_req contains the control value for the request.
+ * @p_req_array_enomem: If set, then p_req is invalid since allocating space for
+ * an array failed. Attempting to read this value shall
+ * result in ENOMEM. Only valid if ctrl->is_array is true.
+ * @p_req_array_alloc_elems: The number of elements allocated for the
+ * array. Only valid if @p_req_valid and ctrl->is_array are
+ * true.
+ * @p_req_elems: The number of elements in @p_req. This is the same as
+ * ctrl->elems, except for dynamic arrays. In that case it is in
+ * the range of 1 to @p_req_array_alloc_elems. Only valid if
+ * @p_req_valid is true.
* @p_req: If the control handler containing this control reference
* is bound to a media request, then this points to the
- * value of the control that should be applied when the request
+ * value of the control that must be applied when the request
* is executed, or to the value of the control at the time
- * that the request was completed.
+ * that the request was completed. If @p_req_valid is false,
+ * then this control was never set for this request and the
+ * control will not be updated when this request is applied.
*
* Each control handler has a list of these refs. The list_head is used to
* keep a sorted-by-control-ID list of all controls, while the next pointer
@@ -318,7 +345,10 @@ struct v4l2_ctrl_ref {
struct v4l2_ctrl_helper *helper;
bool from_other_dev;
bool req_done;
- struct v4l2_ctrl_ref *req;
+ bool p_req_valid;
+ bool p_req_array_enomem;
+ u32 p_req_array_alloc_elems;
+ u32 p_req_elems;
union v4l2_ctrl_ptr p_req;
};
@@ -345,7 +375,7 @@ struct v4l2_ctrl_ref {
* @error: The error code of the first failed control addition.
* @request_is_queued: True if the request was queued.
* @requests: List to keep track of open control handler request objects.
- * For the parent control handler (@req_obj.req == NULL) this
+ * For the parent control handler (@req_obj.ops == NULL) this
* is the list header. When the parent control handler is
* removed, it has to unbind and put all these requests since
* they refer to the parent.
@@ -685,7 +715,9 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
* @p_def: The control's default value.
*
* Sames as v4l2_ctrl_new_std(), but with support to compound controls, thanks
- * to the @p_def field.
+ * to the @p_def field. Use v4l2_ctrl_ptr_create() to create @p_def from a
+ * pointer. Use v4l2_ctrl_ptr_create(NULL) if the default value of the
+ * compound control should be all zeroes.
*
*/
struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
@@ -924,6 +956,59 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
}
/**
+ *__v4l2_ctrl_modify_dimensions() - Unlocked variant of v4l2_ctrl_modify_dimensions()
+ *
+ * @ctrl: The control to update.
+ * @dims: The control's new dimensions.
+ *
+ * Update the dimensions of an array control on the fly. The elements of the
+ * array are reset to their default value, even if the dimensions are
+ * unchanged.
+ *
+ * An error is returned if @dims is invalid for this control.
+ *
+ * The caller is responsible for acquiring the control handler mutex on behalf
+ * of __v4l2_ctrl_modify_dimensions().
+ *
+ * Note: calling this function when the same control is used in pending requests
+ * is untested. It should work (a request with the wrong size of the control
+ * will drop that control silently), but it will be very confusing.
+ */
+int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
+ u32 dims[V4L2_CTRL_MAX_DIMS]);
+
+/**
+ * v4l2_ctrl_modify_dimensions() - Update the dimensions of an array control.
+ *
+ * @ctrl: The control to update.
+ * @dims: The control's new dimensions.
+ *
+ * Update the dimensions of an array control on the fly. The elements of the
+ * array are reset to their default value, even if the dimensions are
+ * unchanged.
+ *
+ * An error is returned if @dims is invalid for this control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ *
+ * Note: calling this function when the same control is used in pending requests
+ * is untested. It should work (a request with the wrong size of the control
+ * will drop that control silently), but it will be very confusing.
+ */
+static inline int v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
+ u32 dims[V4L2_CTRL_MAX_DIMS])
+{
+ int rval;
+
+ v4l2_ctrl_lock(ctrl);
+ rval = __v4l2_ctrl_modify_dimensions(ctrl, dims);
+ v4l2_ctrl_unlock(ctrl);
+
+ return rval;
+}
+
+/**
* v4l2_ctrl_notify() - Function to set a notify callback for a control.
*
* @ctrl: The control.
@@ -1113,45 +1198,54 @@ static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
}
/**
- * __v4l2_ctrl_s_ctrl_area() - Unlocked variant of v4l2_ctrl_s_ctrl_area().
+ * __v4l2_ctrl_s_ctrl_compound() - Unlocked variant to set a compound control
*
- * @ctrl: The control.
- * @area: The new area.
+ * @ctrl: The control.
+ * @type: The type of the data.
+ * @p: The new compound payload.
*
- * This sets the control's new area safely by going through the control
- * framework. This function assumes the control's handler is already locked,
- * allowing it to be used from within the &v4l2_ctrl_ops functions.
+ * This sets the control's new compound payload safely by going through the
+ * control framework. This function assumes the control's handler is already
+ * locked, allowing it to be used from within the &v4l2_ctrl_ops functions.
*
- * This function is for area type controls only.
+ * This function is for compound type controls only.
*/
-int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
- const struct v4l2_area *area);
+int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl,
+ enum v4l2_ctrl_type type, const void *p);
/**
- * v4l2_ctrl_s_ctrl_area() - Helper function to set a control's area value
- * from within a driver.
+ * v4l2_ctrl_s_ctrl_compound() - Helper function to set a compound control
+ * from within a driver.
*
- * @ctrl: The control.
- * @area: The new area.
+ * @ctrl: The control.
+ * @type: The type of the data.
+ * @p: The new compound payload.
*
- * This sets the control's new area safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
+ * This sets the control's new compound payload safely by going through the
+ * control framework. This function will lock the control's handler, so it
+ * cannot be used from within the &v4l2_ctrl_ops functions.
*
- * This function is for area type controls only.
+ * This function is for compound type controls only.
*/
-static inline int v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
- const struct v4l2_area *area)
+static inline int v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl,
+ enum v4l2_ctrl_type type,
+ const void *p)
{
int rval;
v4l2_ctrl_lock(ctrl);
- rval = __v4l2_ctrl_s_ctrl_area(ctrl, area);
+ rval = __v4l2_ctrl_s_ctrl_compound(ctrl, type, p);
v4l2_ctrl_unlock(ctrl);
return rval;
}
+/* Helper defines for area type controls */
+#define __v4l2_ctrl_s_ctrl_area(ctrl, area) \
+ __v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area))
+#define v4l2_ctrl_s_ctrl_area(ctrl, area) \
+ v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area))
+
/* Internal helper functions that deal with control events. */
extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops;
@@ -1278,7 +1372,7 @@ static inline void v4l2_ctrl_request_hdl_put(struct v4l2_ctrl_handler *hdl)
}
/**
- * v4l2_ctrl_request_ctrl_find() - Find a control with the given ID.
+ * v4l2_ctrl_request_hdl_ctrl_find() - Find a control with the given ID.
*
* @hdl: The control handler from the request.
* @id: The ID of the control to find.
@@ -1417,4 +1511,73 @@ int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
*/
int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd);
+/**
+ * v4l2_ctrl_new_fwnode_properties() - Register controls for the device
+ * properties
+ *
+ * @hdl: pointer to &struct v4l2_ctrl_handler to register controls on
+ * @ctrl_ops: pointer to &struct v4l2_ctrl_ops to register controls with
+ * @p: pointer to &struct v4l2_fwnode_device_properties
+ *
+ * This function registers controls associated to device properties, using the
+ * property values contained in @p parameter, if the property has been set to
+ * a value.
+ *
+ * Currently the following v4l2 controls are parsed and registered:
+ * - V4L2_CID_CAMERA_ORIENTATION
+ * - V4L2_CID_CAMERA_SENSOR_ROTATION;
+ *
+ * Controls already registered by the caller with the @hdl control handler are
+ * not overwritten. Callers should register the controls they want to handle
+ * themselves before calling this function.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ctrl_ops,
+ const struct v4l2_fwnode_device_properties *p);
+
+/**
+ * v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ * @ptr1: A v4l2 control value.
+ * @ptr2: A v4l2 control value.
+ *
+ * Return: true if values are equal, otherwise false.
+ */
+bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
+
+/**
+ * v4l2_ctrl_type_op_init - Default v4l2_ctrl_type_ops init callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ * @from_idx: Starting element index.
+ * @ptr: The v4l2 control value.
+ *
+ * Return: void
+ */
+void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ union v4l2_ctrl_ptr ptr);
+
+/**
+ * v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ *
+ * Return: void
+ */
+void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl);
+
+/**
+ * v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ * @ptr: The v4l2 control value.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr);
+
#endif
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 48531e57cc5a..e0a13505f88d 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -24,7 +24,7 @@
/**
* enum vfl_devnode_type - type of V4L2 device node
*
- * @VFL_TYPE_GRABBER: for video input/output devices
+ * @VFL_TYPE_VIDEO: for video input/output devices
* @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext)
* @VFL_TYPE_RADIO: for radio tuners
* @VFL_TYPE_SUBDEV: for V4L2 subdevices
@@ -33,7 +33,7 @@
* @VFL_TYPE_MAX: number of VFL types, must always be last in the enum
*/
enum vfl_devnode_type {
- VFL_TYPE_GRABBER = 0,
+ VFL_TYPE_VIDEO,
VFL_TYPE_VBI,
VFL_TYPE_RADIO,
VFL_TYPE_SUBDEV,
@@ -43,8 +43,8 @@ enum vfl_devnode_type {
};
/**
- * enum vfl_direction - Identifies if a &struct video_device corresponds
- * to a receiver, a transmitter or a mem-to-mem device.
+ * enum vfl_devnode_direction - Identifies if a &struct video_device
+ * corresponds to a receiver, a transmitter or a mem-to-mem device.
*
* @VFL_DIR_RX: device is a receiver.
* @VFL_DIR_TX: device is a transmitter.
@@ -82,11 +82,18 @@ struct v4l2_ctrl_handler;
* but the old crop API will still work as expected in order to preserve
* backwards compatibility.
* Never set this flag for new drivers.
+ * @V4L2_FL_SUBDEV_RO_DEVNODE:
+ * indicates that the video device node is registered in read-only mode.
+ * The flag only applies to device nodes registered for sub-devices, it is
+ * set by the core when the sub-devices device nodes are registered with
+ * v4l2_device_register_ro_subdev_nodes() and used by the sub-device ioctl
+ * handler to restrict access to some ioctl calls.
*/
enum v4l2_video_device_flags {
V4L2_FL_REGISTERED = 0,
V4L2_FL_USES_V4L2_FH = 1,
V4L2_FL_QUIRK_INVERTED_CROP = 2,
+ V4L2_FL_SUBDEV_RO_DEVNODE = 3,
};
/* Priority helper functions */
@@ -253,8 +260,7 @@ struct v4l2_file_operations {
* Only set @dev_parent if that can't be deduced from @v4l2_dev.
*/
-struct video_device
-{
+struct video_device {
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_entity entity;
struct media_intf_devnode *intf_devnode;
@@ -533,4 +539,106 @@ static inline int video_is_registered(struct video_device *vdev)
return test_bit(V4L2_FL_REGISTERED, &vdev->flags);
}
+#if defined(CONFIG_MEDIA_CONTROLLER)
+
+/**
+ * video_device_pipeline_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ *
+ * Mark all entities connected to a given video device through enabled links,
+ * either directly or indirectly, as streaming. The given pipeline object is
+ * assigned to every pad in the pipeline and stored in the media_pad pipe
+ * field.
+ *
+ * Calls to this function can be nested, in which case the same number of
+ * video_device_pipeline_stop() calls will be required to stop streaming. The
+ * pipeline pointer must be identical for all nested calls to
+ * video_device_pipeline_start().
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_pipeline_start().
+ */
+__must_check int video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe);
+
+/**
+ * __video_device_pipeline_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ *
+ * ..note:: This is the non-locking version of video_device_pipeline_start()
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around __media_pipeline_start().
+ */
+__must_check int __video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe);
+
+/**
+ * video_device_pipeline_stop - Mark a pipeline as not streaming
+ * @vdev: Starting video device
+ *
+ * Mark all entities connected to a given video device through enabled links,
+ * either directly or indirectly, as not streaming. The media_pad pipe field
+ * is reset to %NULL.
+ *
+ * If multiple calls to media_pipeline_start() have been made, the same
+ * number of calls to this function are required to mark the pipeline as not
+ * streaming.
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_pipeline_stop().
+ */
+void video_device_pipeline_stop(struct video_device *vdev);
+
+/**
+ * __video_device_pipeline_stop - Mark a pipeline as not streaming
+ * @vdev: Starting video device
+ *
+ * .. note:: This is the non-locking version of media_pipeline_stop()
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around __media_pipeline_stop().
+ */
+void __video_device_pipeline_stop(struct video_device *vdev);
+
+/**
+ * video_device_pipeline_alloc_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ *
+ * video_device_pipeline_alloc_start() is similar to video_device_pipeline_start()
+ * but instead of working on a given pipeline the function will use an
+ * existing pipeline if the video device is already part of a pipeline, or
+ * allocate a new pipeline.
+ *
+ * Calls to video_device_pipeline_alloc_start() must be matched with
+ * video_device_pipeline_stop().
+ */
+__must_check int video_device_pipeline_alloc_start(struct video_device *vdev);
+
+/**
+ * video_device_pipeline - Get the media pipeline a video device is part of
+ * @vdev: The video device
+ *
+ * This function returns the media pipeline that a video device has been
+ * associated with when constructing the pipeline with
+ * video_device_pipeline_start(). The pointer remains valid until
+ * video_device_pipeline_stop() is called.
+ *
+ * Return: The media_pipeline the video device is part of, or NULL if the video
+ * device is not part of any pipeline.
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_entity_pipeline().
+ */
+struct media_pipeline *video_device_pipeline(struct video_device *vdev);
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
#endif /* _V4L2_DEV_H */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 95353ae476a1..8a8977a33ec1 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -174,14 +174,56 @@ int __must_check v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
void v4l2_device_unregister_subdev(struct v4l2_subdev *sd);
/**
- * v4l2_device_register_subdev_nodes - Registers device nodes for all subdevs
- * of the v4l2 device that are marked with
- * the %V4L2_SUBDEV_FL_HAS_DEVNODE flag.
+ * __v4l2_device_register_subdev_nodes - Registers device nodes for
+ * all subdevs of the v4l2 device that are marked with the
+ * %V4L2_SUBDEV_FL_HAS_DEVNODE flag.
*
* @v4l2_dev: pointer to struct v4l2_device
+ * @read_only: subdevices read-only flag. True to register the subdevices
+ * device nodes in read-only mode, false to allow full access to the
+ * subdevice userspace API.
*/
int __must_check
-v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev);
+__v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev,
+ bool read_only);
+
+/**
+ * v4l2_device_register_subdev_nodes - Registers subdevices device nodes with
+ * unrestricted access to the subdevice userspace operations
+ *
+ * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation
+ * for more details.
+ *
+ * @v4l2_dev: pointer to struct v4l2_device
+ */
+static inline int __must_check
+v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
+{
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ return __v4l2_device_register_subdev_nodes(v4l2_dev, false);
+#else
+ return 0;
+#endif
+}
+
+/**
+ * v4l2_device_register_ro_subdev_nodes - Registers subdevices device nodes
+ * in read-only mode
+ *
+ * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation
+ * for more details.
+ *
+ * @v4l2_dev: pointer to struct v4l2_device
+ */
+static inline int __must_check
+v4l2_device_register_ro_subdev_nodes(struct v4l2_device *v4l2_dev)
+{
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ return __v4l2_device_register_subdev_nodes(v4l2_dev, true);
+#else
+ return 0;
+#endif
+}
/**
* v4l2_subdev_notify - Sends a notification to v4l2_device.
@@ -240,7 +282,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Ignore any errors.
*
@@ -265,7 +307,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Ignore any errors.
*
@@ -293,7 +335,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Return:
*
@@ -328,7 +370,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Return:
*
@@ -359,7 +401,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Ignore any errors.
*
@@ -388,7 +430,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Return:
*
@@ -419,7 +461,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Ignore any errors.
*
@@ -447,7 +489,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Return:
*
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 2cc0cabc124f..8fa963326bf6 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -224,7 +224,7 @@ static inline bool can_reduce_fps(struct v4l2_bt_timings *bt)
}
/**
- * struct v4l2_hdmi_rx_colorimetry - describes the HDMI colorimetry information
+ * struct v4l2_hdmi_colorimetry - describes the HDMI colorimetry information
* @colorspace: enum v4l2_colorspace, the colorspace
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
* @quantization: enum v4l2_quantization, colorspace quantization
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 3f0281d06ec7..4ffa914ade3a 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -101,7 +101,7 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
*
* .. note::
* The driver's only responsibility is to fill in the type and the data
- * fields.The other fields will be filled in by V4L2.
+ * fields. The other fields will be filled in by V4L2.
*/
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev);
@@ -116,11 +116,20 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev);
*
* .. note::
* The driver's only responsibility is to fill in the type and the data
- * fields.The other fields will be filled in by V4L2.
+ * fields. The other fields will be filled in by V4L2.
*/
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev);
/**
+ * v4l2_event_wake_all - Wake all filehandles.
+ *
+ * Used when unregistering a video device.
+ *
+ * @vdev: pointer to &struct video_device
+ */
+void v4l2_event_wake_all(struct video_device *vdev);
+
+/**
* v4l2_event_pending - Check if an event is available
*
* @fh: pointer to &struct v4l2_fh
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index 53b4dbb4ae8e..b5b3e00c8e6a 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -53,9 +53,7 @@ struct v4l2_fh {
unsigned int navailable;
u32 sequence;
-#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV)
struct v4l2_m2m_ctx *m2m_ctx;
-#endif
};
/**
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index f6a7bcd13197..394d798f3dfa 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -20,74 +20,24 @@
#include <linux/types.h>
#include <media/v4l2-mediabus.h>
-#include <media/v4l2-subdev.h>
struct fwnode_handle;
struct v4l2_async_notifier;
struct v4l2_async_subdev;
-#define V4L2_FWNODE_CSI2_MAX_DATA_LANES 4
-
-/**
- * struct v4l2_fwnode_bus_mipi_csi2 - MIPI CSI-2 bus data structure
- * @flags: media bus (V4L2_MBUS_*) flags
- * @data_lanes: an array of physical data lane indexes
- * @clock_lane: physical lane index of the clock lane
- * @num_data_lanes: number of data lanes
- * @lane_polarities: polarity of the lanes. The order is the same of
- * the physical lanes.
- */
-struct v4l2_fwnode_bus_mipi_csi2 {
- unsigned int flags;
- unsigned char data_lanes[V4L2_FWNODE_CSI2_MAX_DATA_LANES];
- unsigned char clock_lane;
- unsigned short num_data_lanes;
- bool lane_polarities[1 + V4L2_FWNODE_CSI2_MAX_DATA_LANES];
-};
-
-/**
- * struct v4l2_fwnode_bus_parallel - parallel data bus data structure
- * @flags: media bus (V4L2_MBUS_*) flags
- * @bus_width: bus width in bits
- * @data_shift: data shift in bits
- */
-struct v4l2_fwnode_bus_parallel {
- unsigned int flags;
- unsigned char bus_width;
- unsigned char data_shift;
-};
-
-/**
- * struct v4l2_fwnode_bus_mipi_csi1 - CSI-1/CCP2 data bus structure
- * @clock_inv: polarity of clock/strobe signal
- * false - not inverted, true - inverted
- * @strobe: false - data/clock, true - data/strobe
- * @lane_polarity: the polarities of the clock (index 0) and data lanes
- * index (1)
- * @data_lane: the number of the data lane
- * @clock_lane: the number of the clock lane
- */
-struct v4l2_fwnode_bus_mipi_csi1 {
- unsigned char clock_inv:1;
- unsigned char strobe:1;
- bool lane_polarity[2];
- unsigned char data_lane;
- unsigned char clock_lane;
-};
-
/**
* struct v4l2_fwnode_endpoint - the endpoint data structure
* @base: fwnode endpoint of the v4l2_fwnode
* @bus_type: bus type
- * @bus: union with bus configuration data structure
- * @bus.parallel: embedded &struct v4l2_fwnode_bus_parallel.
+ * @bus: bus configuration data structure
+ * @bus.parallel: embedded &struct v4l2_mbus_config_parallel.
* Used if the bus is parallel.
- * @bus.mipi_csi1: embedded &struct v4l2_fwnode_bus_mipi_csi1.
+ * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1.
* Used if the bus is MIPI Alliance's Camera Serial
* Interface version 1 (MIPI CSI1) or Standard
* Mobile Imaging Architecture's Compact Camera Port 2
* (SMIA CCP2).
- * @bus.mipi_csi2: embedded &struct v4l2_fwnode_bus_mipi_csi2.
+ * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2.
* Used if the bus is MIPI Alliance's Camera Serial
* Interface version 2 (MIPI CSI2).
* @link_frequencies: array of supported link frequencies
@@ -95,32 +45,143 @@ struct v4l2_fwnode_bus_mipi_csi1 {
*/
struct v4l2_fwnode_endpoint {
struct fwnode_endpoint base;
- /*
- * Fields below this line will be zeroed by
- * v4l2_fwnode_endpoint_parse()
- */
enum v4l2_mbus_type bus_type;
- union {
- struct v4l2_fwnode_bus_parallel parallel;
- struct v4l2_fwnode_bus_mipi_csi1 mipi_csi1;
- struct v4l2_fwnode_bus_mipi_csi2 mipi_csi2;
+ struct {
+ struct v4l2_mbus_config_parallel parallel;
+ struct v4l2_mbus_config_mipi_csi1 mipi_csi1;
+ struct v4l2_mbus_config_mipi_csi2 mipi_csi2;
} bus;
u64 *link_frequencies;
unsigned int nr_of_link_frequencies;
};
/**
+ * V4L2_FWNODE_PROPERTY_UNSET - identify a non initialized property
+ *
+ * All properties in &struct v4l2_fwnode_device_properties are initialized
+ * to this value.
+ */
+#define V4L2_FWNODE_PROPERTY_UNSET (-1U)
+
+/**
+ * enum v4l2_fwnode_orientation - possible device orientation
+ * @V4L2_FWNODE_ORIENTATION_FRONT: device installed on the front side
+ * @V4L2_FWNODE_ORIENTATION_BACK: device installed on the back side
+ * @V4L2_FWNODE_ORIENTATION_EXTERNAL: device externally located
+ */
+enum v4l2_fwnode_orientation {
+ V4L2_FWNODE_ORIENTATION_FRONT,
+ V4L2_FWNODE_ORIENTATION_BACK,
+ V4L2_FWNODE_ORIENTATION_EXTERNAL
+};
+
+/**
+ * struct v4l2_fwnode_device_properties - fwnode device properties
+ * @orientation: device orientation. See &enum v4l2_fwnode_orientation
+ * @rotation: device rotation
+ */
+struct v4l2_fwnode_device_properties {
+ enum v4l2_fwnode_orientation orientation;
+ unsigned int rotation;
+};
+
+/**
* struct v4l2_fwnode_link - a link between two endpoints
* @local_node: pointer to device_node of this endpoint
* @local_port: identifier of the port this endpoint belongs to
+ * @local_id: identifier of the id this endpoint belongs to
* @remote_node: pointer to device_node of the remote endpoint
* @remote_port: identifier of the port the remote endpoint belongs to
+ * @remote_id: identifier of the id the remote endpoint belongs to
*/
struct v4l2_fwnode_link {
struct fwnode_handle *local_node;
unsigned int local_port;
+ unsigned int local_id;
struct fwnode_handle *remote_node;
unsigned int remote_port;
+ unsigned int remote_id;
+};
+
+/**
+ * enum v4l2_connector_type - connector type
+ * @V4L2_CONN_UNKNOWN: unknown connector type, no V4L2 connector configuration
+ * @V4L2_CONN_COMPOSITE: analog composite connector
+ * @V4L2_CONN_SVIDEO: analog svideo connector
+ */
+enum v4l2_connector_type {
+ V4L2_CONN_UNKNOWN,
+ V4L2_CONN_COMPOSITE,
+ V4L2_CONN_SVIDEO,
+};
+
+/**
+ * struct v4l2_connector_link - connector link data structure
+ * @head: structure to be used to add the link to the
+ * &struct v4l2_fwnode_connector
+ * @fwnode_link: &struct v4l2_fwnode_link link between the connector and the
+ * device the connector belongs to.
+ */
+struct v4l2_connector_link {
+ struct list_head head;
+ struct v4l2_fwnode_link fwnode_link;
+};
+
+/**
+ * struct v4l2_fwnode_connector_analog - analog connector data structure
+ * @sdtv_stds: sdtv standards this connector supports, set to V4L2_STD_ALL
+ * if no restrictions are specified.
+ */
+struct v4l2_fwnode_connector_analog {
+ v4l2_std_id sdtv_stds;
+};
+
+/**
+ * struct v4l2_fwnode_connector - the connector data structure
+ * @name: the connector device name
+ * @label: optional connector label
+ * @type: connector type
+ * @links: list of all connector &struct v4l2_connector_link links
+ * @nr_of_links: total number of links
+ * @connector: connector configuration
+ * @connector.analog: analog connector configuration
+ * &struct v4l2_fwnode_connector_analog
+ */
+struct v4l2_fwnode_connector {
+ const char *name;
+ const char *label;
+ enum v4l2_connector_type type;
+ struct list_head links;
+ unsigned int nr_of_links;
+
+ union {
+ struct v4l2_fwnode_connector_analog analog;
+ /* future connectors */
+ } connector;
+};
+
+/**
+ * enum v4l2_fwnode_bus_type - Video bus types defined by firmware properties
+ * @V4L2_FWNODE_BUS_TYPE_GUESS: Default value if no bus-type fwnode property
+ * @V4L2_FWNODE_BUS_TYPE_CSI2_CPHY: MIPI CSI-2 bus, C-PHY physical layer
+ * @V4L2_FWNODE_BUS_TYPE_CSI1: MIPI CSI-1 bus
+ * @V4L2_FWNODE_BUS_TYPE_CCP2: SMIA Compact Camera Port 2 bus
+ * @V4L2_FWNODE_BUS_TYPE_CSI2_DPHY: MIPI CSI-2 bus, D-PHY physical layer
+ * @V4L2_FWNODE_BUS_TYPE_PARALLEL: Camera Parallel Interface bus
+ * @V4L2_FWNODE_BUS_TYPE_BT656: BT.656 video format bus-type
+ * @V4L2_FWNODE_BUS_TYPE_DPI: Video Parallel Interface bus
+ * @NR_OF_V4L2_FWNODE_BUS_TYPE: Number of bus-types
+ */
+enum v4l2_fwnode_bus_type {
+ V4L2_FWNODE_BUS_TYPE_GUESS = 0,
+ V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
+ V4L2_FWNODE_BUS_TYPE_CSI1,
+ V4L2_FWNODE_BUS_TYPE_CCP2,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
+ V4L2_FWNODE_BUS_TYPE_PARALLEL,
+ V4L2_FWNODE_BUS_TYPE_BT656,
+ V4L2_FWNODE_BUS_TYPE_DPI,
+ NR_OF_V4L2_FWNODE_BUS_TYPE
};
/**
@@ -129,24 +190,35 @@ struct v4l2_fwnode_link {
* @vep: pointer to the V4L2 fwnode data structure
*
* This function parses the V4L2 fwnode endpoint specific parameters from the
- * firmware. The caller is responsible for assigning @vep.bus_type to a valid
- * media bus type. The caller may also set the default configuration for the
- * endpoint --- a configuration that shall be in line with the DT binding
- * documentation. Should a device support multiple bus types, the caller may
- * call this function once the correct type is found --- with a default
- * configuration valid for that type.
- *
- * As a compatibility means guessing the bus type is also supported by setting
- * @vep.bus_type to V4L2_MBUS_UNKNOWN. The caller may not provide a default
- * configuration in this case as the defaults are specific to a given bus type.
- * This functionality is deprecated and should not be used in new drivers and it
- * is only supported for CSI-2 D-PHY, parallel and Bt.656 buses.
+ * firmware. There are two ways to use this function, either by letting it
+ * obtain the type of the bus (by setting the @vep.bus_type field to
+ * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum
+ * v4l2_mbus_type types.
+ *
+ * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type"
+ * property to determine the type when it is available. The caller is
+ * responsible for validating the contents of @vep.bus_type field after the call
+ * returns.
+ *
+ * As a deprecated functionality to support older DT bindings without "bus-type"
+ * property for devices that support multiple types, if the "bus-type" property
+ * does not exist, the function will attempt to guess the type based on the
+ * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW
+ * DRIVERS OR BINDINGS.
+ *
+ * It is also possible to set @vep.bus_type corresponding to an actual bus. In
+ * this case the function will only attempt to parse properties related to this
+ * bus, and it will return an error if the value of the "bus-type" property
+ * corresponds to a different bus.
+ *
+ * The caller is required to initialise all fields of @vep, either with
+ * explicitly values, or by zeroing them.
*
* The function does not change the V4L2 fwnode endpoint state if it fails.
*
- * NOTE: This function does not parse properties the size of which is variable
- * without a low fixed limit. Please use v4l2_fwnode_endpoint_alloc_parse() in
- * new drivers instead.
+ * NOTE: This function does not parse "link-frequencies" property as its size is
+ * not known in advance. Please use v4l2_fwnode_endpoint_alloc_parse() if you
+ * need properties of variable size.
*
* Return: %0 on success or a negative error code on failure:
* %-ENOMEM on memory allocation failure
@@ -172,18 +244,29 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
* @vep: pointer to the V4L2 fwnode data structure
*
* This function parses the V4L2 fwnode endpoint specific parameters from the
- * firmware. The caller is responsible for assigning @vep.bus_type to a valid
- * media bus type. The caller may also set the default configuration for the
- * endpoint --- a configuration that shall be in line with the DT binding
- * documentation. Should a device support multiple bus types, the caller may
- * call this function once the correct type is found --- with a default
- * configuration valid for that type.
- *
- * As a compatibility means guessing the bus type is also supported by setting
- * @vep.bus_type to V4L2_MBUS_UNKNOWN. The caller may not provide a default
- * configuration in this case as the defaults are specific to a given bus type.
- * This functionality is deprecated and should not be used in new drivers and it
- * is only supported for CSI-2 D-PHY, parallel and Bt.656 buses.
+ * firmware. There are two ways to use this function, either by letting it
+ * obtain the type of the bus (by setting the @vep.bus_type field to
+ * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum
+ * v4l2_mbus_type types.
+ *
+ * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type"
+ * property to determine the type when it is available. The caller is
+ * responsible for validating the contents of @vep.bus_type field after the call
+ * returns.
+ *
+ * As a deprecated functionality to support older DT bindings without "bus-type"
+ * property for devices that support multiple types, if the "bus-type" property
+ * does not exist, the function will attempt to guess the type based on the
+ * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW
+ * DRIVERS OR BINDINGS.
+ *
+ * It is also possible to set @vep.bus_type corresponding to an actual bus. In
+ * this case the function will only attempt to parse properties related to this
+ * bus, and it will return an error if the value of the "bus-type" property
+ * corresponds to a different bus.
+ *
+ * The caller is required to initialise all fields of @vep, either with
+ * explicitly values, or by zeroing them.
*
* The function does not change the V4L2 fwnode endpoint state if it fails.
*
@@ -234,6 +317,83 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link);
/**
+ * v4l2_fwnode_connector_free() - free the V4L2 connector acquired memory
+ * @connector: the V4L2 connector resources of which are to be released
+ *
+ * Free all allocated memory and put all links acquired by
+ * v4l2_fwnode_connector_parse() and v4l2_fwnode_connector_add_link().
+ *
+ * It is safe to call this function with NULL argument or on a V4L2 connector
+ * the parsing of which failed.
+ */
+void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector);
+
+/**
+ * v4l2_fwnode_connector_parse() - initialize the 'struct v4l2_fwnode_connector'
+ * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector
+ * is connected to or to the connector endpoint fwnode handle.
+ * @connector: pointer to the V4L2 fwnode connector data structure
+ *
+ * Fill the &struct v4l2_fwnode_connector with the connector type, label and
+ * all &enum v4l2_connector_type specific connector data. The label is optional
+ * so it is set to %NULL if no one was found. The function initialize the links
+ * to zero. Adding links to the connector is done by calling
+ * v4l2_fwnode_connector_add_link().
+ *
+ * The memory allocated for the label must be freed when no longer needed.
+ * Freeing the memory is done by v4l2_fwnode_connector_free().
+ *
+ * Return:
+ * * %0 on success or a negative error code on failure:
+ * * %-EINVAL if @fwnode is invalid
+ * * %-ENOTCONN if connector type is unknown or connector device can't be found
+ */
+int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector);
+
+/**
+ * v4l2_fwnode_connector_add_link - add a link between a connector node and
+ * a v4l2-subdev node.
+ * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector
+ * is connected to
+ * @connector: pointer to the V4L2 fwnode connector data structure
+ *
+ * Add a new &struct v4l2_connector_link link to the
+ * &struct v4l2_fwnode_connector connector links list. The link local_node
+ * points to the connector node, the remote_node to the host v4l2 (sub)dev.
+ *
+ * The taken references to remote_node and local_node must be dropped and the
+ * allocated memory must be freed when no longer needed. Both is done by calling
+ * v4l2_fwnode_connector_free().
+ *
+ * Return:
+ * * %0 on success or a negative error code on failure:
+ * * %-EINVAL if @fwnode or @connector is invalid or @connector type is unknown
+ * * %-ENOMEM on link memory allocation failure
+ * * %-ENOTCONN if remote connector device can't be found
+ * * %-ENOLINK if link parsing between v4l2 (sub)dev and connector fails
+ */
+int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector);
+
+/**
+ * v4l2_fwnode_device_parse() - parse fwnode device properties
+ * @dev: pointer to &struct device
+ * @props: pointer to &struct v4l2_fwnode_device_properties where to store the
+ * parsed properties values
+ *
+ * This function parses and validates the V4L2 fwnode device properties from the
+ * firmware interface, and fills the @struct v4l2_fwnode_device_properties
+ * provided by the caller.
+ *
+ * Return:
+ * % 0 on success
+ * %-EINVAL if a parsed property value is not valid
+ */
+int v4l2_fwnode_device_parse(struct device *dev,
+ struct v4l2_fwnode_device_properties *props);
+
+/**
* typedef parse_endpoint_func - Driver's callback function to be called on
* each V4L2 fwnode endpoint.
*
@@ -252,7 +412,7 @@ typedef int (*parse_endpoint_func)(struct device *dev,
struct v4l2_async_subdev *asd);
/**
- * v4l2_async_notifier_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
+ * v4l2_async_nf_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
* device node
* @dev: the device the endpoints of which are to be parsed
* @notifier: notifier for @dev
@@ -264,6 +424,10 @@ typedef int (*parse_endpoint_func)(struct device *dev,
* @parse_endpoint: Driver's callback function called on each V4L2 fwnode
* endpoint. Optional.
*
+ * DEPRECATED! This function is deprecated. Don't use it in new drivers.
+ * Instead see an example in cio2_parse_firmware() function in
+ * drivers/media/pci/intel/ipu3/ipu3-cio2.c .
+ *
* Parse the fwnode endpoints of the @dev device and populate the async sub-
* devices list in the notifier. The @parse_endpoint callback function is
* called for each endpoint with the corresponding async sub-device pointer to
@@ -281,7 +445,7 @@ typedef int (*parse_endpoint_func)(struct device *dev,
* to retain that configuration, the user needs to allocate memory for it.
*
* Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_cleanup() after it has been unregistered and the async
+ * v4l2_async_nf_cleanup() after it has been unregistered and the async
* sub-devices are no longer in use, even if the function returned an error.
*
* Return: %0 on success, including when no async sub-devices are found
@@ -290,120 +454,31 @@ typedef int (*parse_endpoint_func)(struct device *dev,
* Other error codes as returned by @parse_endpoint
*/
int
-v4l2_async_notifier_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint);
+v4l2_async_nf_parse_fwnode_endpoints(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ parse_endpoint_func parse_endpoint);
-/**
- * v4l2_async_notifier_parse_fwnode_endpoints_by_port - Parse V4L2 fwnode
- * endpoints of a port in a
- * device node
- * @dev: the device the endpoints of which are to be parsed
- * @notifier: notifier for @dev
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- * @port: port number where endpoints are to be parsed
- * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
- * endpoint. Optional.
- *
- * This function is just like v4l2_async_notifier_parse_fwnode_endpoints() with
- * the exception that it only parses endpoints in a given port. This is useful
- * on devices that have both sinks and sources: the async sub-devices connected
- * to sources have already been configured by another driver (on capture
- * devices). In this case the driver must know which ports to parse.
- *
- * Parse the fwnode endpoints of the @dev device on a given @port and populate
- * the async sub-devices list of the notifier. The @parse_endpoint callback
- * function is called for each endpoint with the corresponding async sub-device
- * pointer to let the caller initialize the driver-specific part of the async
- * sub-device structure.
- *
- * The notifier memory shall be zeroed before this function is called on the
- * notifier the first time.
- *
- * This function may not be called on a registered notifier and may be called on
- * a notifier only once per port.
- *
- * The &struct v4l2_fwnode_endpoint passed to the callback function
- * @parse_endpoint is released once the function is finished. If there is a need
- * to retain that configuration, the user needs to allocate memory for it.
- *
- * Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_cleanup() after it has been unregistered and the async
- * sub-devices are no longer in use, even if the function returned an error.
- *
- * Return: %0 on success, including when no async sub-devices are found
- * %-ENOMEM if memory allocation failed
- * %-EINVAL if graph or endpoint parsing failed
- * Other error codes as returned by @parse_endpoint
- */
-int
-v4l2_async_notifier_parse_fwnode_endpoints_by_port(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- unsigned int port,
- parse_endpoint_func parse_endpoint);
+/* Helper macros to access the connector links. */
-/**
- * v4l2_fwnode_reference_parse_sensor_common - parse common references on
- * sensors for async sub-devices
- * @dev: the device node the properties of which are parsed for references
- * @notifier: the async notifier where the async subdevs will be added
+/** v4l2_connector_last_link - Helper macro to get the first
+ * &struct v4l2_fwnode_connector link
+ * @v4l2c: &struct v4l2_fwnode_connector owning the connector links
*
- * Parse common sensor properties for remote devices related to the
- * sensor and set up async sub-devices for them.
- *
- * Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_release() after it has been unregistered and the async
- * sub-devices are no longer in use, even in the case the function returned an
- * error.
- *
- * Return: 0 on success
- * -ENOMEM if memory allocation failed
- * -EINVAL if property parsing failed
+ * This marco returns the first added &struct v4l2_connector_link connector
+ * link or @NULL if the connector has no links.
*/
-int v4l2_async_notifier_parse_fwnode_sensor_common(struct device *dev,
- struct v4l2_async_notifier *notifier);
+#define v4l2_connector_first_link(v4l2c) \
+ list_first_entry_or_null(&(v4l2c)->links, \
+ struct v4l2_connector_link, head)
-/**
- * v4l2_async_register_fwnode_subdev - registers a sub-device to the
- * asynchronous sub-device framework
- * and parses fwnode endpoints
+/** v4l2_connector_last_link - Helper macro to get the last
+ * &struct v4l2_fwnode_connector link
+ * @v4l2c: &struct v4l2_fwnode_connector owning the connector links
*
- * @sd: pointer to struct &v4l2_subdev
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- * @ports: array of port id's to parse for fwnode endpoints. If NULL, will
- * parse all ports owned by the sub-device.
- * @num_ports: number of ports in @ports array. Ignored if @ports is NULL.
- * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
- * endpoint. Optional.
- *
- * This function is just like v4l2_async_register_subdev() with the
- * exception that calling it will also allocate a notifier for the
- * sub-device, parse the sub-device's firmware node endpoints using
- * v4l2_async_notifier_parse_fwnode_endpoints() or
- * v4l2_async_notifier_parse_fwnode_endpoints_by_port(), and
- * registers the sub-device notifier. The sub-device is similarly
- * unregistered by calling v4l2_async_unregister_subdev().
- *
- * While registered, the subdev module is marked as in-use.
- *
- * An error is returned if the module is no longer loaded on any attempts
- * to register it.
+ * This marco returns the last &struct v4l2_connector_link added connector link.
*/
-int
-v4l2_async_register_fwnode_subdev(struct v4l2_subdev *sd,
- size_t asd_struct_size,
- unsigned int *ports,
- unsigned int num_ports,
- parse_endpoint_func parse_endpoint);
+#define v4l2_connector_last_link(v4l2c) \
+ list_last_entry(&(v4l2c)->links, struct v4l2_connector_link, head)
#endif /* _V4L2_FWNODE_H */
diff --git a/include/media/v4l2-h264.h b/include/media/v4l2-h264.h
new file mode 100644
index 000000000000..0d9eaa956123
--- /dev/null
+++ b/include/media/v4l2-h264.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper functions for H264 codecs.
+ *
+ * Copyright (c) 2019 Collabora, Ltd.
+ *
+ * Author: Boris Brezillon <boris.brezillon@collabora.com>
+ */
+
+#ifndef _MEDIA_V4L2_H264_H
+#define _MEDIA_V4L2_H264_H
+
+#include <media/v4l2-ctrls.h>
+
+/**
+ * struct v4l2_h264_reflist_builder - Reference list builder object
+ *
+ * @refs.top_field_order_cnt: top field order count
+ * @refs.bottom_field_order_cnt: bottom field order count
+ * @refs.frame_num: reference frame number
+ * @refs.longterm: set to true for a long term reference
+ * @refs: array of references
+ * @cur_pic_order_count: picture order count of the frame being decoded
+ * @cur_pic_fields: fields present in the frame being decoded
+ * @unordered_reflist: unordered list of references. Will be used to generate
+ * ordered P/B0/B1 lists
+ * @num_valid: number of valid references in the refs array
+ *
+ * This object stores the context of the P/B0/B1 reference list builder.
+ * This procedure is described in section '8.2.4 Decoding process for reference
+ * picture lists construction' of the H264 spec.
+ */
+struct v4l2_h264_reflist_builder {
+ struct {
+ s32 top_field_order_cnt;
+ s32 bottom_field_order_cnt;
+ int frame_num;
+ u16 longterm : 1;
+ } refs[V4L2_H264_NUM_DPB_ENTRIES];
+
+ s32 cur_pic_order_count;
+ u8 cur_pic_fields;
+
+ struct v4l2_h264_reference unordered_reflist[V4L2_H264_REF_LIST_LEN];
+ u8 num_valid;
+};
+
+void
+v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
+ const struct v4l2_ctrl_h264_decode_params *dec_params,
+ const struct v4l2_ctrl_h264_sps *sps,
+ const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]);
+
+/**
+ * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists
+ *
+ * @builder: reference list builder context
+ * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry
+ * is a v4l2_h264_reference structure
+ * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry
+ * is a v4l2_h264_reference structure
+ *
+ * This functions builds the B0/B1 reference lists. This procedure is described
+ * in section '8.2.4 Decoding process for reference picture lists construction'
+ * of the H264 spec. This function can be used by H264 decoder drivers that
+ * need to pass B0/B1 reference lists to the hardware.
+ */
+void
+v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *b0_reflist,
+ struct v4l2_h264_reference *b1_reflist);
+
+/**
+ * v4l2_h264_build_p_ref_list() - Build the P reference list
+ *
+ * @builder: reference list builder context
+ * @reflist: 32 sized array used to store the P reference list. Each entry
+ * is a v4l2_h264_reference structure
+ *
+ * This functions builds the P reference lists. This procedure is describe in
+ * section '8.2.4 Decoding process for reference picture lists construction'
+ * of the H264 spec. This function can be used by H264 decoder drivers that
+ * need to pass a P reference list to the hardware.
+ */
+void
+v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist);
+
+#endif /* _MEDIA_V4L2_H264_H */
diff --git a/include/media/v4l2-image-sizes.h b/include/media/v4l2-image-sizes.h
index 450f4f5d3d6a..24a7a0bb59ab 100644
--- a/include/media/v4l2-image-sizes.h
+++ b/include/media/v4l2-image-sizes.h
@@ -10,6 +10,12 @@
#define CIF_WIDTH 352
#define CIF_HEIGHT 288
+#define HD_720_WIDTH 1280
+#define HD_720_HEIGHT 720
+
+#define HD_1080_WIDTH 1920
+#define HD_1080_HEIGHT 1080
+
#define QCIF_WIDTH 176
#define QCIF_HEIGHT 144
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 86878fba332b..edb733f21604 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -686,6 +686,16 @@ long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd,
unsigned long arg);
#endif
+unsigned int v4l2_compat_translate_cmd(unsigned int cmd);
+int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd);
+int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd);
+int v4l2_compat_get_array_args(struct file *file, void *mbuf,
+ void __user *user_ptr, size_t array_size,
+ unsigned int cmd, void *arg);
+int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr,
+ void *mbuf, size_t array_size,
+ unsigned int cmd, void *arg);
+
/**
* typedef v4l2_kioctl - Typedef used to pass an ioctl handler.
*
diff --git a/include/media/v4l2-jpeg.h b/include/media/v4l2-jpeg.h
new file mode 100644
index 000000000000..2dba843ce3bd
--- /dev/null
+++ b/include/media/v4l2-jpeg.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * V4L2 JPEG helpers header
+ *
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ *
+ * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1)
+ */
+
+#ifndef _V4L2_JPEG_H
+#define _V4L2_JPEG_H
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_JPEG_MAX_COMPONENTS 4
+#define V4L2_JPEG_MAX_TABLES 4
+
+/**
+ * struct v4l2_jpeg_reference - reference into the JPEG buffer
+ * @start: pointer to the start of the referenced segment or table
+ * @length: size of the referenced segment or table
+ *
+ * Wnen referencing marker segments, start points right after the marker code,
+ * and length is the size of the segment parameters, excluding the marker code.
+ */
+struct v4l2_jpeg_reference {
+ u8 *start;
+ size_t length;
+};
+
+/* B.2.2 Frame header syntax */
+
+/**
+ * struct v4l2_jpeg_frame_component_spec - frame component-specification
+ * @component_identifier: C[i]
+ * @horizontal_sampling_factor: H[i]
+ * @vertical_sampling_factor: V[i]
+ * @quantization_table_selector: quantization table destination selector Tq[i]
+ */
+struct v4l2_jpeg_frame_component_spec {
+ u8 component_identifier;
+ u8 horizontal_sampling_factor;
+ u8 vertical_sampling_factor;
+ u8 quantization_table_selector;
+};
+
+/**
+ * struct v4l2_jpeg_frame_header - JPEG frame header
+ * @height: Y
+ * @width: X
+ * @precision: P
+ * @num_components: Nf
+ * @component: component-specification, see v4l2_jpeg_frame_component_spec
+ * @subsampling: decoded subsampling from component-specification
+ */
+struct v4l2_jpeg_frame_header {
+ u16 height;
+ u16 width;
+ u8 precision;
+ u8 num_components;
+ struct v4l2_jpeg_frame_component_spec component[V4L2_JPEG_MAX_COMPONENTS];
+ enum v4l2_jpeg_chroma_subsampling subsampling;
+};
+
+/* B.2.3 Scan header syntax */
+
+/**
+ * struct v4l2_jpeg_scan_component_spec - scan component-specification
+ * @component_selector: Cs[j]
+ * @dc_entropy_coding_table_selector: Td[j]
+ * @ac_entropy_coding_table_selector: Ta[j]
+ */
+struct v4l2_jpeg_scan_component_spec {
+ u8 component_selector;
+ u8 dc_entropy_coding_table_selector;
+ u8 ac_entropy_coding_table_selector;
+};
+
+/**
+ * struct v4l2_jpeg_scan_header - JPEG scan header
+ * @num_components: Ns
+ * @component: component-specification, see v4l2_jpeg_scan_component_spec
+ */
+struct v4l2_jpeg_scan_header {
+ u8 num_components; /* Ns */
+ struct v4l2_jpeg_scan_component_spec component[V4L2_JPEG_MAX_COMPONENTS];
+ /* Ss, Se, Ah, and Al are not used by any driver */
+};
+
+/**
+ * enum v4l2_jpeg_app14_tf - APP14 transform flag
+ * According to Rec. ITU-T T.872 (06/2012) 6.5.3
+ * APP14 segment is for color encoding, it contains a transform flag,
+ * which may have values of 0, 1 and 2 and are interpreted as follows:
+ * @V4L2_JPEG_APP14_TF_CMYK_RGB: CMYK for images encoded with four components
+ * RGB for images encoded with three components
+ * @V4L2_JPEG_APP14_TF_YCBCR: an image encoded with three components using YCbCr
+ * @V4L2_JPEG_APP14_TF_YCCK: an image encoded with four components using YCCK
+ * @V4L2_JPEG_APP14_TF_UNKNOWN: indicate app14 is not present
+ */
+enum v4l2_jpeg_app14_tf {
+ V4L2_JPEG_APP14_TF_CMYK_RGB = 0,
+ V4L2_JPEG_APP14_TF_YCBCR = 1,
+ V4L2_JPEG_APP14_TF_YCCK = 2,
+ V4L2_JPEG_APP14_TF_UNKNOWN = -1,
+};
+
+/**
+ * struct v4l2_jpeg_header - parsed JPEG header
+ * @sof: pointer to frame header and size
+ * @sos: pointer to scan header and size
+ * @num_dht: number of entries in @dht
+ * @dht: pointers to huffman tables and sizes
+ * @num_dqt: number of entries in @dqt
+ * @dqt: pointers to quantization tables and sizes
+ * @frame: parsed frame header
+ * @scan: pointer to parsed scan header, optional
+ * @quantization_tables: references to four quantization tables, optional
+ * @huffman_tables: references to four Huffman tables in DC0, DC1, AC0, AC1
+ * order, optional
+ * @restart_interval: number of MCU per restart interval, Ri
+ * @ecs_offset: buffer offset in bytes to the entropy coded segment
+ * @app14_tf: transform flag from app14 data
+ *
+ * When this structure is passed to v4l2_jpeg_parse_header, the optional scan,
+ * quantization_tables, and huffman_tables pointers must be initialized to NULL
+ * or point at valid memory.
+ */
+struct v4l2_jpeg_header {
+ struct v4l2_jpeg_reference sof;
+ struct v4l2_jpeg_reference sos;
+ unsigned int num_dht;
+ struct v4l2_jpeg_reference dht[V4L2_JPEG_MAX_TABLES];
+ unsigned int num_dqt;
+ struct v4l2_jpeg_reference dqt[V4L2_JPEG_MAX_TABLES];
+
+ struct v4l2_jpeg_frame_header frame;
+ struct v4l2_jpeg_scan_header *scan;
+ struct v4l2_jpeg_reference *quantization_tables;
+ struct v4l2_jpeg_reference *huffman_tables;
+ u16 restart_interval;
+ size_t ecs_offset;
+ enum v4l2_jpeg_app14_tf app14_tf;
+};
+
+int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out);
+
+int v4l2_jpeg_parse_frame_header(void *buf, size_t len,
+ struct v4l2_jpeg_frame_header *frame_header);
+int v4l2_jpeg_parse_scan_header(void *buf, size_t len,
+ struct v4l2_jpeg_scan_header *scan_header);
+int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision,
+ struct v4l2_jpeg_reference *q_tables);
+int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len,
+ struct v4l2_jpeg_reference *huffman_tables);
+
+#endif
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index 384960249f01..c181685923d5 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -12,6 +12,7 @@
#include <media/media-device.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-subdev.h>
#include <linux/types.h>
/* We don't need to include pci.h or usb.h here */
@@ -84,25 +85,85 @@ void v4l_disable_media_source(struct video_device *vdev);
*/
int v4l_vb2q_enable_media_source(struct vb2_queue *q);
+/**
+ * v4l2_create_fwnode_links_to_pad - Create fwnode-based links from a
+ * source subdev to a sink subdev pad.
+ *
+ * @src_sd: pointer to a source subdev
+ * @sink: pointer to a subdev sink pad
+ * @flags: the link flags
+ *
+ * This function searches for fwnode endpoint connections from a source
+ * subdevice to a single sink pad, and if suitable connections are found,
+ * translates them into media links to that pad. The function can be
+ * called by the sink subdevice, in its v4l2-async notifier subdev bound
+ * callback, to create links from a bound source subdevice.
+ *
+ * The @flags argument specifies the link flags. The caller shall ensure that
+ * the flags are valid regardless of the number of links that may be created.
+ * For instance, setting the MEDIA_LNK_FL_ENABLED flag will cause all created
+ * links to be enabled, which isn't valid if more than one link is created.
+ *
+ * .. note::
+ *
+ * Any sink subdevice that calls this function must implement the
+ * .get_fwnode_pad media operation in order to verify endpoints passed
+ * to the sink are owned by the sink.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
+ struct media_pad *sink, u32 flags);
/**
- * v4l2_pipeline_pm_use - Update the use count of an entity
- * @entity: The entity
- * @use: Use (1) or stop using (0) the entity
+ * v4l2_create_fwnode_links - Create fwnode-based links from a source
+ * subdev to a sink subdev.
*
- * Update the use count of all entities in the pipeline and power entities on or
- * off accordingly.
+ * @src_sd: pointer to a source subdevice
+ * @sink_sd: pointer to a sink subdevice
*
- * This function is intended to be called in video node open (use ==
- * 1) and release (use == 0). It uses struct media_entity.use_count to
- * track the power status. The use of this function should be paired
- * with v4l2_pipeline_link_notify().
+ * This function searches for any and all fwnode endpoint connections
+ * between source and sink subdevices, and translates them into media
+ * links. The function can be called by the sink subdevice, in its
+ * v4l2-async notifier subdev bound callback, to create all links from
+ * a bound source subdevice.
*
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. No failure can occur when the use parameter is
- * set to 0.
+ * .. note::
+ *
+ * Any sink subdevice that calls this function must implement the
+ * .get_fwnode_pad media operation in order to verify endpoints passed
+ * to the sink are owned by the sink.
+ *
+ * Return 0 on success or a negative error code on failure.
*/
-int v4l2_pipeline_pm_use(struct media_entity *entity, int use);
+int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd,
+ struct v4l2_subdev *sink_sd);
+
+/**
+ * v4l2_pipeline_pm_get - Increase the use count of a pipeline
+ * @entity: The root entity of a pipeline
+ *
+ * Update the use count of all entities in the pipeline and power entities on.
+ *
+ * This function is intended to be called in video node open. It uses
+ * struct media_entity.use_count to track the power status. The use
+ * of this function should be paired with v4l2_pipeline_link_notify().
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+int v4l2_pipeline_pm_get(struct media_entity *entity);
+
+/**
+ * v4l2_pipeline_pm_put - Decrease the use count of a pipeline
+ * @entity: The root entity of a pipeline
+ *
+ * Update the use count of all entities in the pipeline and power entities off.
+ *
+ * This function is intended to be called in video node release. It uses
+ * struct media_entity.use_count to track the power status. The use
+ * of this function should be paired with v4l2_pipeline_link_notify().
+ */
+void v4l2_pipeline_pm_put(struct media_entity *entity);
/**
@@ -114,7 +175,7 @@ int v4l2_pipeline_pm_use(struct media_entity *entity, int use);
* React to link management on powered pipelines by updating the use count of
* all entities in the source and sink sides of the link. Entities are powered
* on or off accordingly. The use of this function should be paired
- * with v4l2_pipeline_pm_use().
+ * with v4l2_pipeline_pm_{get,put}().
*
* Return 0 on success or a negative error code on failure. Powering entities
* off is assumed to never fail. This function will not fail for disconnection
@@ -144,11 +205,14 @@ static inline int v4l_vb2q_enable_media_source(struct vb2_queue *q)
return 0;
}
-static inline int v4l2_pipeline_pm_use(struct media_entity *entity, int use)
+static inline int v4l2_pipeline_pm_get(struct media_entity *entity)
{
return 0;
}
+static inline void v4l2_pipeline_pm_put(struct media_entity *entity)
+{}
+
static inline int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
unsigned int notification)
{
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 45f88f0248c4..f67a74daf799 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -11,9 +11,31 @@
#include <linux/v4l2-mediabus.h>
#include <linux/bitops.h>
+/*
+ * How to use the V4L2_MBUS_* flags:
+ * Flags are defined for each of the possible states and values of a media
+ * bus configuration parameter. One and only one bit of each group of flags
+ * shall be set by the users of the v4l2_subdev_pad_ops.get_mbus_config
+ * operation to ensure that no conflicting settings are specified when
+ * reporting the media bus configuration. For example, it is invalid to set or
+ * clear both the V4L2_MBUS_HSYNC_ACTIVE_HIGH and the
+ * V4L2_MBUS_HSYNC_ACTIVE_LOW flag at the same time. Instead either flag
+ * V4L2_MBUS_HSYNC_ACTIVE_HIGH or flag V4L2_MBUS_HSYNC_ACTIVE_LOW shall be set.
+ *
+ * TODO: replace the existing V4L2_MBUS_* flags with structures of fields
+ * to avoid conflicting settings.
+ *
+ * In example:
+ * #define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2)
+ * #define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3)
+ * will be replaced by a field whose value reports the intended active state of
+ * the signal:
+ * unsigned int v4l2_mbus_hsync_active : 1;
+ */
+
/* Parallel flags */
/*
- * Can the client run in master or in slave mode. By "Master mode" an operation
+ * The client runs in master or in slave mode. By "Master mode" an operation
* mode is meant, when the client (e.g., a camera sensor) is producing
* horizontal and vertical synchronisation. In "Slave mode" the host is
* providing these signals to the slave.
@@ -45,28 +67,57 @@
#define V4L2_MBUS_DATA_ENABLE_LOW BIT(15)
/* Serial flags */
-/* How many lanes the client can use */
-#define V4L2_MBUS_CSI2_1_LANE BIT(0)
-#define V4L2_MBUS_CSI2_2_LANE BIT(1)
-#define V4L2_MBUS_CSI2_3_LANE BIT(2)
-#define V4L2_MBUS_CSI2_4_LANE BIT(3)
-/* On which channels it can send video data */
-#define V4L2_MBUS_CSI2_CHANNEL_0 BIT(4)
-#define V4L2_MBUS_CSI2_CHANNEL_1 BIT(5)
-#define V4L2_MBUS_CSI2_CHANNEL_2 BIT(6)
-#define V4L2_MBUS_CSI2_CHANNEL_3 BIT(7)
-/* Does it support only continuous or also non-continuous clock mode */
-#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK BIT(8)
-#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(9)
-
-#define V4L2_MBUS_CSI2_LANES (V4L2_MBUS_CSI2_1_LANE | \
- V4L2_MBUS_CSI2_2_LANE | \
- V4L2_MBUS_CSI2_3_LANE | \
- V4L2_MBUS_CSI2_4_LANE)
-#define V4L2_MBUS_CSI2_CHANNELS (V4L2_MBUS_CSI2_CHANNEL_0 | \
- V4L2_MBUS_CSI2_CHANNEL_1 | \
- V4L2_MBUS_CSI2_CHANNEL_2 | \
- V4L2_MBUS_CSI2_CHANNEL_3)
+/* Clock non-continuous mode support. */
+#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(0)
+
+#define V4L2_MBUS_CSI2_MAX_DATA_LANES 8
+
+/**
+ * struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration
+ * @flags: media bus (V4L2_MBUS_*) flags
+ * @data_lanes: an array of physical data lane indexes
+ * @clock_lane: physical lane index of the clock lane
+ * @num_data_lanes: number of data lanes
+ * @lane_polarities: polarity of the lanes. The order is the same of
+ * the physical lanes.
+ */
+struct v4l2_mbus_config_mipi_csi2 {
+ unsigned int flags;
+ unsigned char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES];
+ unsigned char clock_lane;
+ unsigned char num_data_lanes;
+ bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
+};
+
+/**
+ * struct v4l2_mbus_config_parallel - parallel data bus configuration
+ * @flags: media bus (V4L2_MBUS_*) flags
+ * @bus_width: bus width in bits
+ * @data_shift: data shift in bits
+ */
+struct v4l2_mbus_config_parallel {
+ unsigned int flags;
+ unsigned char bus_width;
+ unsigned char data_shift;
+};
+
+/**
+ * struct v4l2_mbus_config_mipi_csi1 - CSI-1/CCP2 data bus configuration
+ * @clock_inv: polarity of clock/strobe signal
+ * false - not inverted, true - inverted
+ * @strobe: false - data/clock, true - data/strobe
+ * @lane_polarity: the polarities of the clock (index 0) and data lanes
+ * index (1)
+ * @data_lane: the number of the data lane
+ * @clock_lane: the number of the clock lane
+ */
+struct v4l2_mbus_config_mipi_csi1 {
+ unsigned char clock_inv:1;
+ unsigned char strobe:1;
+ bool lane_polarity[2];
+ unsigned char data_lane;
+ unsigned char clock_lane;
+};
/**
* enum v4l2_mbus_type - media bus type
@@ -78,6 +129,8 @@
* @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2)
* @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY
* @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY
+ * @V4L2_MBUS_DPI: MIPI VIDEO DPI interface
+ * @V4L2_MBUS_INVALID: invalid bus type (keep as last)
*/
enum v4l2_mbus_type {
V4L2_MBUS_UNKNOWN,
@@ -87,16 +140,32 @@ enum v4l2_mbus_type {
V4L2_MBUS_CCP2,
V4L2_MBUS_CSI2_DPHY,
V4L2_MBUS_CSI2_CPHY,
+ V4L2_MBUS_DPI,
+ V4L2_MBUS_INVALID,
};
/**
* struct v4l2_mbus_config - media bus configuration
- * @type: in: interface type
- * @flags: in / out: configuration flags, depending on @type
+ * @type: interface type
+ * @bus: bus configuration data structure
+ * @bus.parallel: embedded &struct v4l2_mbus_config_parallel.
+ * Used if the bus is parallel or BT.656.
+ * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1.
+ * Used if the bus is MIPI Alliance's Camera Serial
+ * Interface version 1 (MIPI CSI1) or Standard
+ * Mobile Imaging Architecture's Compact Camera Port 2
+ * (SMIA CCP2).
+ * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2.
+ * Used if the bus is MIPI Alliance's Camera Serial
+ * Interface version 2 (MIPI CSI2).
*/
struct v4l2_mbus_config {
enum v4l2_mbus_type type;
- unsigned int flags;
+ union {
+ struct v4l2_mbus_config_parallel parallel;
+ struct v4l2_mbus_config_mipi_csi1 mipi_csi1;
+ struct v4l2_mbus_config_mipi_csi2 mipi_csi2;
+ } bus;
};
/**
@@ -120,7 +189,7 @@ v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
}
/**
- * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_fill_mbus_format - Ancillary routine that fills a &struct
* v4l2_mbus_framefmt from a &struct v4l2_pix_format and a
* data format code.
*
@@ -143,7 +212,7 @@ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
}
/**
- * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_fill_pix_format_mplane - Ancillary routine that fills a &struct
* v4l2_pix_format_mplane fields from a media bus structure.
*
* @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled
@@ -163,7 +232,7 @@ v4l2_fill_pix_format_mplane(struct v4l2_pix_format_mplane *pix_mp_fmt,
}
/**
- * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_fill_mbus_format_mplane - Ancillary routine that fills a &struct
* v4l2_mbus_framefmt from a &struct v4l2_pix_format_mplane.
*
* @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 1d85e24791e4..bb9de6a899e0 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -80,6 +80,10 @@ struct v4l2_m2m_queue_ctx {
* for an existing frame. This is always true unless
* V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
* indicates slicing support.
+ * @is_draining: indicates device is in draining phase
+ * @last_src_buf: indicate the last source buffer for draining
+ * @next_buf_last: next capture queud buffer will be tagged as last
+ * @has_stopped: indicate the device has been stopped
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @cap_q_ctx: Capture (output to memory) queue context
* @out_q_ctx: Output (input from memory) queue context
@@ -98,6 +102,11 @@ struct v4l2_m2m_ctx {
bool new_frame;
+ bool is_draining;
+ struct vb2_v4l2_buffer *last_src_buf;
+ bool next_buf_last;
+ bool has_stopped;
+
/* internal use only */
struct v4l2_m2m_dev *m2m_dev;
@@ -216,6 +225,108 @@ v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
}
/**
+ * v4l2_m2m_clear_state() - clear encoding/decoding state
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void
+v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ m2m_ctx->next_buf_last = false;
+ m2m_ctx->is_draining = false;
+ m2m_ctx->has_stopped = false;
+}
+
+/**
+ * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void
+v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ m2m_ctx->next_buf_last = false;
+ m2m_ctx->is_draining = false;
+ m2m_ctx->has_stopped = true;
+}
+
+/**
+ * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session
+ * draining management state of next queued capture buffer
+ *
+ * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify
+ * the end of the capture session.
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline bool
+v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return m2m_ctx->is_draining && m2m_ctx->next_buf_last;
+}
+
+/**
+ * v4l2_m2m_has_stopped() - return the current encoding/decoding session
+ * stopped state
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline bool
+v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return m2m_ctx->has_stopped;
+}
+
+/**
+ * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining
+ * state in the current encoding/decoding session
+ *
+ * This will identify the last output buffer queued before a session stop
+ * was required, leading to an actual encoding/decoding session stop state
+ * in the encoding/decoding process after being processed.
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vbuf: pointer to struct &v4l2_buffer
+ */
+static inline bool
+v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf;
+}
+
+/**
+ * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vbuf: pointer to struct &v4l2_buffer
+ */
+void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf);
+
+/**
+ * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
+ * to finish
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ *
+ * Called by a driver in the suspend hook. Stop new jobs from being run, and
+ * wait for current running job to finish.
+ */
+void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
+
+/**
+ * v4l2_m2m_resume() - resume job running and try to run a queued job
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ *
+ * Called by a driver in the resume hook. This reverts the operation of
+ * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
+ * there is any.
+ */
+void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
+
+/**
* v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
*
* @file: pointer to struct &file
@@ -313,6 +424,46 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type);
/**
+ * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding
+ * session state when a start of streaming of a video queue is requested
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @q: queue
+ */
+void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q);
+
+/**
+ * v4l2_m2m_update_stop_streaming_state() - update the encoding/decoding
+ * session state when a stop of streaming of a video queue is requested
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @q: queue
+ */
+void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q);
+
+/**
+ * v4l2_m2m_encoder_cmd() - execute an encoder command
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @ec: pointer to the encoder command
+ */
+int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_encoder_cmd *ec);
+
+/**
+ * v4l2_m2m_decoder_cmd() - execute a decoder command
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @dc: pointer to the decoder command
+ */
+int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_decoder_cmd *dc);
+
+/**
* v4l2_m2m_poll() - poll replacement, for destination buffers only
*
* @file: pointer to struct &file
@@ -335,15 +486,20 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
* @vma: pointer to struct &vm_area_struct
*
* Call from driver's mmap() function. Will handle mmap() for both queues
- * seamlessly for videobuffer, which will receive normal per-queue offsets and
- * proper videobuf queue pointers. The differentiation is made outside videobuf
- * by adding a predefined offset to buffers from one of the queues and
- * subtracting it before passing it back to videobuf. Only drivers (and
+ * seamlessly for the video buffer, which will receive normal per-queue offsets
+ * and proper vb2 queue pointers. The differentiation is made outside
+ * vb2 by adding a predefined offset to buffers from one of the queues
+ * and subtracting it before passing it back to vb2. Only drivers (and
* thus applications) receive modified offsets.
*/
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct vm_area_struct *vma);
+#ifndef CONFIG_MMU
+unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+#endif
/**
* v4l2_m2m_init() - initialize per-driver m2m data
*
@@ -388,7 +544,7 @@ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @drv_priv: driver's instance private data
* @queue_init: a callback for queue type-specific initialization function
- * to be used for initializing videobuf_queues
+ * to be used for initializing vb2_queues
*
* Usually called from driver's ``open()`` function.
*/
@@ -423,7 +579,7 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
* @vbuf: pointer to struct &vb2_v4l2_buffer
*
- * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
+ * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback.
*/
void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *vbuf);
@@ -704,6 +860,10 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
enum v4l2_buf_type type);
int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type);
+int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec);
+int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc);
int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec);
int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
index 8800a640c224..bd587d0c0dc3 100644
--- a/include/media/v4l2-rect.h
+++ b/include/media/v4l2-rect.h
@@ -184,4 +184,24 @@ static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1,
return true;
}
+/**
+ * v4l2_rect_enclosed() - is r1 enclosed in r2?
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Returns true if @r1 is enclosed in @r2.
+ */
+static inline bool v4l2_rect_enclosed(struct v4l2_rect *r1,
+ struct v4l2_rect *r2)
+{
+ if (r1->left < r2->left || r1->top < r2->top)
+ return false;
+ if (r1->left + r1->width > r2->left + r2->width)
+ return false;
+ if (r1->top + r1->height > r2->top + r2->height)
+ return false;
+
+ return true;
+}
+
#endif
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 761aa83a3f3c..2f80c9c818ed 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -162,6 +162,9 @@ struct v4l2_subdev_io_pin_config {
* @s_gpio: set GPIO pins. Very simple right now, might need to be extended with
* a direction argument if needed.
*
+ * @command: called by in-kernel drivers in order to call functions internal
+ * to subdev drivers driver that have a separate callback.
+ *
* @ioctl: called at the end of ioctl() syscall handler at the V4L2 core.
* used to provide support for private ioctls used on the driver.
*
@@ -193,6 +196,7 @@ struct v4l2_subdev_core_ops {
int (*load_fw)(struct v4l2_subdev *sd);
int (*reset)(struct v4l2_subdev *sd, u32 val);
int (*s_gpio)(struct v4l2_subdev *sd, u32 val);
+ long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd,
@@ -309,7 +313,18 @@ struct v4l2_subdev_audio_ops {
};
/**
- * enum v4l2_mbus_frame_desc_entry - media bus frame description flags
+ * struct v4l2_mbus_frame_desc_entry_csi2
+ *
+ * @vc: CSI-2 virtual channel
+ * @dt: CSI-2 data type ID
+ */
+struct v4l2_mbus_frame_desc_entry_csi2 {
+ u8 vc;
+ u8 dt;
+};
+
+/**
+ * enum v4l2_mbus_frame_desc_flags - media bus frame description flags
*
* @V4L2_MBUS_FRAME_DESC_FL_LEN_MAX:
* Indicates that &struct v4l2_mbus_frame_desc_entry->length field
@@ -331,26 +346,65 @@ enum v4l2_mbus_frame_desc_flags {
* %FRAME_DESC_FL_BLOB is not set.
* @length: number of octets per frame, valid if @flags
* %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX is set.
+ * @bus: Bus-specific frame descriptor parameters
+ * @bus.csi2: CSI-2-specific bus configuration
*/
struct v4l2_mbus_frame_desc_entry {
enum v4l2_mbus_frame_desc_flags flags;
u32 pixelcode;
u32 length;
+ union {
+ struct v4l2_mbus_frame_desc_entry_csi2 csi2;
+ } bus;
};
-#define V4L2_FRAME_DESC_ENTRY_MAX 4
+ /*
+ * If this number is too small, it should be dropped altogether and the
+ * API switched to a dynamic number of frame descriptor entries.
+ */
+#define V4L2_FRAME_DESC_ENTRY_MAX 8
+
+/**
+ * enum v4l2_mbus_frame_desc_type - media bus frame description type
+ *
+ * @V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED:
+ * Undefined frame desc type. Drivers should not use this, it is
+ * for backwards compatibility.
+ * @V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL:
+ * Parallel media bus.
+ * @V4L2_MBUS_FRAME_DESC_TYPE_CSI2:
+ * CSI-2 media bus. Frame desc parameters must be set in
+ * &struct v4l2_mbus_frame_desc_entry->csi2.
+ */
+enum v4l2_mbus_frame_desc_type {
+ V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED = 0,
+ V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL,
+ V4L2_MBUS_FRAME_DESC_TYPE_CSI2,
+};
/**
* struct v4l2_mbus_frame_desc - media bus data frame description
+ * @type: type of the bus (enum v4l2_mbus_frame_desc_type)
* @entry: frame descriptors array
* @num_entries: number of entries in @entry array
*/
struct v4l2_mbus_frame_desc {
+ enum v4l2_mbus_frame_desc_type type;
struct v4l2_mbus_frame_desc_entry entry[V4L2_FRAME_DESC_ENTRY_MAX];
unsigned short num_entries;
};
/**
+ * enum v4l2_subdev_pre_streamon_flags - Flags for pre_streamon subdev core op
+ *
+ * @V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP: Set the transmitter to either LP-11
+ * or LP-111 mode before call to s_stream().
+ */
+enum v4l2_subdev_pre_streamon_flags {
+ V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP = BIT(0),
+};
+
+/**
* struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened
* in video mode.
*
@@ -381,7 +435,7 @@ struct v4l2_mbus_frame_desc {
* OUTPUT device. This is ignored by video capture devices.
*
* @g_input_status: get input status. Same as the status field in the
- * &struct &v4l2_input
+ * &struct v4l2_input
*
* @s_stream: used to notify the driver that a video stream will start or has
* stopped.
@@ -402,15 +456,22 @@ struct v4l2_mbus_frame_desc {
*
* @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code.
*
- * @g_mbus_config: get supported mediabus configurations
- *
- * @s_mbus_config: set a certain mediabus configuration. This operation is added
- * for compatibility with soc-camera drivers and should not be used by new
- * software.
- *
* @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
* can adjust @size to a lower value and must not write more data to the
* buffer starting at @data than the original value of @size.
+ *
+ * @pre_streamon: May be called before streaming is actually started, to help
+ * initialising the bus. Current usage is to set a CSI-2 transmitter to
+ * LP-11 or LP-111 mode before streaming. See &enum
+ * v4l2_subdev_pre_streamon_flags.
+ *
+ * pre_streamon shall return error if it cannot perform the operation as
+ * indicated by the flags argument. In particular, -EACCES indicates lack
+ * of support for the operation. The caller shall call post_streamoff for
+ * each successful call of pre_streamon.
+ *
+ * @post_streamoff: Called after streaming is stopped, but if and only if
+ * pre_streamon was called earlier.
*/
struct v4l2_subdev_video_ops {
int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
@@ -435,12 +496,10 @@ struct v4l2_subdev_video_ops {
struct v4l2_dv_timings *timings);
int (*query_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
- int (*g_mbus_config)(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg);
- int (*s_mbus_config)(struct v4l2_subdev *sd,
- const struct v4l2_mbus_config *cfg);
int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf,
unsigned int *size);
+ int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags);
+ int (*post_streamoff)(struct v4l2_subdev *sd);
};
/**
@@ -566,9 +625,9 @@ struct v4l2_subdev_ir_parameters {
*
* @rx_read: Reads received codes or pulse width data.
* The semantics are similar to a non-blocking read() call.
- * @rx_g_parameters: Get the current operating parameters and state of the
+ * @rx_g_parameters: Get the current operating parameters and state of
* the IR receiver.
- * @rx_s_parameters: Set the current operating parameters and state of the
+ * @rx_s_parameters: Set the current operating parameters and state of
* the IR receiver. It is recommended to call
* [rt]x_g_parameters first to fill out the current state, and only change
* the fields that need to be changed. Upon return, the actual device
@@ -582,9 +641,9 @@ struct v4l2_subdev_ir_parameters {
*
* @tx_write: Writes codes or pulse width data for transmission.
* The semantics are similar to a non-blocking write() call.
- * @tx_g_parameters: Get the current operating parameters and state of the
+ * @tx_g_parameters: Get the current operating parameters and state of
* the IR transmitter.
- * @tx_s_parameters: Set the current operating parameters and state of the
+ * @tx_s_parameters: Set the current operating parameters and state of
* the IR transmitter. It is recommended to call
* [rt]x_g_parameters first to fill out the current state, and only change
* the fields that need to be changed. Upon return, the actual device
@@ -626,6 +685,9 @@ struct v4l2_subdev_ir_ops {
* This structure only needs to be passed to the pad op if the 'which' field
* of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For
* %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL.
+ *
+ * Note: This struct is also used in active state, and the 'try' prefix is
+ * historical and to be removed.
*/
struct v4l2_subdev_pad_config {
struct v4l2_mbus_framefmt try_fmt;
@@ -634,6 +696,24 @@ struct v4l2_subdev_pad_config {
};
/**
+ * struct v4l2_subdev_state - Used for storing subdev state information.
+ *
+ * @_lock: default for 'lock'
+ * @lock: mutex for the state. May be replaced by the user.
+ * @pads: &struct v4l2_subdev_pad_config array
+ *
+ * This structure only needs to be passed to the pad op if the 'which' field
+ * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For
+ * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL.
+ */
+struct v4l2_subdev_state {
+ /* lock for the struct v4l2_subdev_state fields */
+ struct mutex _lock;
+ struct mutex *lock;
+ struct v4l2_subdev_pad_config *pads;
+};
+
+/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
*
* @init_cfg: initialize the pad config to default values
@@ -670,30 +750,43 @@ struct v4l2_subdev_pad_config {
*
* @set_frame_desc: set the low level media bus frame parameters, @fd array
* may be adjusted by the subdev driver to device capabilities.
+ *
+ * @get_mbus_config: get the media bus configuration of a remote sub-device.
+ * The media bus configuration is usually retrieved from the
+ * firmware interface at sub-device probe time, immediately
+ * applied to the hardware and eventually adjusted by the
+ * driver. Remote sub-devices (usually video receivers) shall
+ * use this operation to query the transmitting end bus
+ * configuration in order to adjust their own one accordingly.
+ * Callers should make sure they get the most up-to-date as
+ * possible configuration from the remote end, likely calling
+ * this operation as close as possible to stream on time. The
+ * operation shall fail if the pad index it has been called on
+ * is not valid or in case of unrecoverable failures.
*/
struct v4l2_subdev_pad_ops {
int (*init_cfg)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg);
+ struct v4l2_subdev_state *state);
int (*enum_mbus_code)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code);
int (*enum_frame_size)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse);
int (*enum_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_interval_enum *fie);
int (*get_fmt)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format);
int (*set_fmt)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format);
int (*get_selection)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel);
int (*set_selection)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel);
int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
@@ -710,6 +803,8 @@ struct v4l2_subdev_pad_ops {
struct v4l2_mbus_frame_desc *fd);
int (*set_frame_desc)(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_frame_desc *fd);
+ int (*get_mbus_config)(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_config *config);
};
/**
@@ -836,8 +931,14 @@ struct v4l2_subdev_platform_data {
* @asd: Pointer to respective &struct v4l2_async_subdev.
* @notifier: Pointer to the managing notifier.
* @subdev_notifier: A sub-device notifier implicitly registered for the sub-
- * device using v4l2_device_register_sensor_subdev().
+ * device using v4l2_async_register_subdev_sensor().
* @pdata: common part of subdevice platform data
+ * @state_lock: A pointer to a lock used for all the subdev's states, set by the
+ * driver. This is optional. If NULL, each state instance will get
+ * a lock of its own.
+ * @active_state: Active state for the subdev (NULL for subdevs tracking the
+ * state internally). Initialized by calling
+ * v4l2_subdev_init_finalize().
*
* Each instance of a subdev driver should create this struct, either
* stand-alone or embedded in a larger struct.
@@ -869,6 +970,20 @@ struct v4l2_subdev {
struct v4l2_async_notifier *notifier;
struct v4l2_async_notifier *subdev_notifier;
struct v4l2_subdev_platform_data *pdata;
+ struct mutex *state_lock;
+
+ /*
+ * The fields below are private, and should only be accessed via
+ * appropriate functions.
+ */
+
+ /*
+ * TODO: active_state should most likely be changed from a pointer to an
+ * embedded field. For the time being it's kept as a pointer to more
+ * easily catch uses of active_state in the cases where the driver
+ * doesn't support it.
+ */
+ struct v4l2_subdev_state *active_state;
};
@@ -900,14 +1015,14 @@ struct v4l2_subdev {
* struct v4l2_subdev_fh - Used for storing subdev information per file handle
*
* @vfh: pointer to &struct v4l2_fh
- * @pad: pointer to &struct v4l2_subdev_pad_config
+ * @state: pointer to &struct v4l2_subdev_state
* @owner: module pointer to the owner of this file handle
*/
struct v4l2_subdev_fh {
struct v4l2_fh vfh;
struct module *owner;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- struct v4l2_subdev_pad_config *pad;
+ struct v4l2_subdev_state *state;
#endif
};
@@ -923,59 +1038,79 @@ struct v4l2_subdev_fh {
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
/**
- * v4l2_subdev_get_try_format - ancillary routine to call
+ * v4l2_subdev_get_pad_format - ancillary routine to call
* &struct v4l2_subdev_pad_config->try_fmt
*
* @sd: pointer to &struct v4l2_subdev
- * @cfg: pointer to &struct v4l2_subdev_pad_config array.
- * @pad: index of the pad in the @cfg array.
+ * @state: pointer to &struct v4l2_subdev_state
+ * @pad: index of the pad in the &struct v4l2_subdev_state->pads array
*/
-static inline struct v4l2_mbus_framefmt
-*v4l2_subdev_get_try_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
+static inline struct v4l2_mbus_framefmt *
+v4l2_subdev_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ unsigned int pad)
{
+ if (WARN_ON(!state))
+ return NULL;
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
- return &cfg[pad].try_fmt;
+ return &state->pads[pad].try_fmt;
}
/**
- * v4l2_subdev_get_try_crop - ancillary routine to call
+ * v4l2_subdev_get_pad_crop - ancillary routine to call
* &struct v4l2_subdev_pad_config->try_crop
*
* @sd: pointer to &struct v4l2_subdev
- * @cfg: pointer to &struct v4l2_subdev_pad_config array.
- * @pad: index of the pad in the @cfg array.
+ * @state: pointer to &struct v4l2_subdev_state.
+ * @pad: index of the pad in the &struct v4l2_subdev_state->pads array.
*/
-static inline struct v4l2_rect
-*v4l2_subdev_get_try_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
+static inline struct v4l2_rect *
+v4l2_subdev_get_pad_crop(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ unsigned int pad)
{
+ if (WARN_ON(!state))
+ return NULL;
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
- return &cfg[pad].try_crop;
+ return &state->pads[pad].try_crop;
}
/**
- * v4l2_subdev_get_try_crop - ancillary routine to call
+ * v4l2_subdev_get_pad_compose - ancillary routine to call
* &struct v4l2_subdev_pad_config->try_compose
*
* @sd: pointer to &struct v4l2_subdev
- * @cfg: pointer to &struct v4l2_subdev_pad_config array.
- * @pad: index of the pad in the @cfg array.
+ * @state: pointer to &struct v4l2_subdev_state.
+ * @pad: index of the pad in the &struct v4l2_subdev_state->pads array.
*/
-static inline struct v4l2_rect
-*v4l2_subdev_get_try_compose(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
+static inline struct v4l2_rect *
+v4l2_subdev_get_pad_compose(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ unsigned int pad)
{
+ if (WARN_ON(!state))
+ return NULL;
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
- return &cfg[pad].try_compose;
+ return &state->pads[pad].try_compose;
}
-#endif
+
+/*
+ * Temprary helpers until uses of v4l2_subdev_get_try_* functions have been
+ * renamed
+ */
+#define v4l2_subdev_get_try_format(sd, state, pad) \
+ v4l2_subdev_get_pad_format(sd, state, pad)
+
+#define v4l2_subdev_get_try_crop(sd, state, pad) \
+ v4l2_subdev_get_pad_crop(sd, state, pad)
+
+#define v4l2_subdev_get_try_compose(sd, state, pad) \
+ v4l2_subdev_get_pad_compose(sd, state, pad)
+
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
extern const struct v4l2_file_operations v4l2_subdev_fops;
@@ -1028,6 +1163,23 @@ static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd)
#ifdef CONFIG_MEDIA_CONTROLLER
/**
+ * v4l2_subdev_get_fwnode_pad_1_to_1 - Get pad number from a subdev fwnode
+ * endpoint, assuming 1:1 port:pad
+ *
+ * @entity: Pointer to the subdev entity
+ * @endpoint: Pointer to a parsed fwnode endpoint
+ *
+ * This function can be used as the .get_fwnode_pad operation for
+ * subdevices that map port numbers and pad indexes 1:1. If the endpoint
+ * is owned by the subdevice, the function returns the endpoint port
+ * number.
+ *
+ * Returns the endpoint port number on success or a negative error code.
+ */
+int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
+ struct fwnode_endpoint *endpoint);
+
+/**
* v4l2_subdev_link_validate_default - validates a media link
*
* @sd: pointer to &struct v4l2_subdev
@@ -1057,20 +1209,167 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
int v4l2_subdev_link_validate(struct media_link *link);
/**
- * v4l2_subdev_alloc_pad_config - Allocates memory for pad config
+ * __v4l2_subdev_state_alloc - allocate v4l2_subdev_state
+ *
+ * @sd: pointer to &struct v4l2_subdev for which the state is being allocated.
+ * @lock_name: name of the state lock
+ * @key: lock_class_key for the lock
+ *
+ * Must call __v4l2_subdev_state_free() when state is no longer needed.
+ *
+ * Not to be called directly by the drivers.
+ */
+struct v4l2_subdev_state *__v4l2_subdev_state_alloc(struct v4l2_subdev *sd,
+ const char *lock_name,
+ struct lock_class_key *key);
+
+/**
+ * __v4l2_subdev_state_free - free a v4l2_subdev_state
+ *
+ * @state: v4l2_subdev_state to be freed.
*
- * @sd: pointer to struct v4l2_subdev
+ * Not to be called directly by the drivers.
*/
-struct
-v4l2_subdev_pad_config *v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd);
+void __v4l2_subdev_state_free(struct v4l2_subdev_state *state);
/**
- * v4l2_subdev_free_pad_config - Frees memory allocated by
- * v4l2_subdev_alloc_pad_config().
+ * v4l2_subdev_init_finalize() - Finalizes the initialization of the subdevice
+ * @sd: The subdev
+ *
+ * This function finalizes the initialization of the subdev, including
+ * allocation of the active state for the subdev.
*
- * @cfg: pointer to &struct v4l2_subdev_pad_config
+ * This function must be called by the subdev drivers that use the centralized
+ * active state, after the subdev struct has been initialized and
+ * media_entity_pads_init() has been called, but before registering the
+ * subdev.
+ *
+ * The user must call v4l2_subdev_cleanup() when the subdev is being removed.
*/
-void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg);
+#define v4l2_subdev_init_finalize(sd) \
+ ({ \
+ static struct lock_class_key __key; \
+ const char *name = KBUILD_BASENAME \
+ ":" __stringify(__LINE__) ":sd->active_state->lock"; \
+ __v4l2_subdev_init_finalize(sd, name, &__key); \
+ })
+
+int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
+ struct lock_class_key *key);
+
+/**
+ * v4l2_subdev_cleanup() - Releases the resources allocated by the subdevice
+ * @sd: The subdevice
+ *
+ * This function will release the resources allocated in
+ * v4l2_subdev_init_finalize.
+ */
+void v4l2_subdev_cleanup(struct v4l2_subdev *sd);
+
+/**
+ * v4l2_subdev_lock_state() - Locks the subdev state
+ * @state: The subdevice state
+ *
+ * Locks the given subdev state.
+ *
+ * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ */
+static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state)
+{
+ mutex_lock(state->lock);
+}
+
+/**
+ * v4l2_subdev_unlock_state() - Unlocks the subdev state
+ * @state: The subdevice state
+ *
+ * Unlocks the given subdev state.
+ */
+static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state)
+{
+ mutex_unlock(state->lock);
+}
+
+/**
+ * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state
+ * is unlocked and returns it
+ * @sd: The subdevice
+ *
+ * Returns the active state for the subdevice, or NULL if the subdev does not
+ * support active state. If the state is not NULL, calls
+ * lockdep_assert_not_held() to issue a warning if the state is locked.
+ *
+ * This function is to be used e.g. when getting the active state for the sole
+ * purpose of passing it forward, without accessing the state fields.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ lockdep_assert_not_held(sd->active_state->lock);
+ return sd->active_state;
+}
+
+/**
+ * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state
+ * is locked and returns it
+ *
+ * @sd: The subdevice
+ *
+ * Returns the active state for the subdevice, or NULL if the subdev does not
+ * support active state. If the state is not NULL, calls lockdep_assert_held()
+ * to issue a warning if the state is not locked.
+ *
+ * This function is to be used when the caller knows that the active state is
+ * already locked.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ lockdep_assert_held(sd->active_state->lock);
+ return sd->active_state;
+}
+
+/**
+ * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev
+ * state for the subdevice
+ * @sd: The subdevice
+ *
+ * Returns the locked active state for the subdevice, or NULL if the subdev
+ * does not support active state.
+ *
+ * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ v4l2_subdev_lock_state(sd->active_state);
+ return sd->active_state;
+}
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+
+/**
+ * v4l2_subdev_get_fmt() - Fill format based on state
+ * @sd: subdevice
+ * @state: subdevice state
+ * @format: pointer to &struct v4l2_subdev_format
+ *
+ * Fill @format->format field based on the information in the @format struct.
+ *
+ * This function can be used by the subdev drivers which support active state to
+ * implement v4l2_subdev_pad_ops.get_fmt if the subdev driver does not need to
+ * do anything special in their get_fmt op.
+ *
+ * Returns 0 on success, error value otherwise.
+ */
+int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format);
+
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
#endif /* CONFIG_MEDIA_CONTROLLER */
/**
@@ -1093,7 +1392,7 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
* @f: callback function to be called.
* The callback functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Example: err = v4l2_subdev_call(sd, video, s_std, norm);
*/
@@ -1115,6 +1414,70 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
})
/**
+ * v4l2_subdev_call_state_active - call an operation of a v4l2_subdev which
+ * takes state as a parameter, passing the
+ * subdev its active state.
+ *
+ * @sd: pointer to the &struct v4l2_subdev
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of callbacks functions.
+ * @f: callback function to be called.
+ * The callback functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args: arguments for @f.
+ *
+ * This is similar to v4l2_subdev_call(), except that this version can only be
+ * used for ops that take a subdev state as a parameter. The macro will get the
+ * active state, lock it before calling the op and unlock it after the call.
+ */
+#define v4l2_subdev_call_state_active(sd, o, f, args...) \
+ ({ \
+ int __result; \
+ struct v4l2_subdev_state *state; \
+ state = v4l2_subdev_get_unlocked_active_state(sd); \
+ if (state) \
+ v4l2_subdev_lock_state(state); \
+ __result = v4l2_subdev_call(sd, o, f, state, ##args); \
+ if (state) \
+ v4l2_subdev_unlock_state(state); \
+ __result; \
+ })
+
+/**
+ * v4l2_subdev_call_state_try - call an operation of a v4l2_subdev which
+ * takes state as a parameter, passing the
+ * subdev a newly allocated try state.
+ *
+ * @sd: pointer to the &struct v4l2_subdev
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of callbacks functions.
+ * @f: callback function to be called.
+ * The callback functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args: arguments for @f.
+ *
+ * This is similar to v4l2_subdev_call_state_active(), except that as this
+ * version allocates a new state, this is only usable for
+ * V4L2_SUBDEV_FORMAT_TRY use cases.
+ *
+ * Note: only legacy non-MC drivers may need this macro.
+ */
+#define v4l2_subdev_call_state_try(sd, o, f, args...) \
+ ({ \
+ int __result; \
+ static struct lock_class_key __key; \
+ const char *name = KBUILD_BASENAME \
+ ":" __stringify(__LINE__) ":state->lock"; \
+ struct v4l2_subdev_state *state = \
+ __v4l2_subdev_state_alloc(sd, name, &__key); \
+ v4l2_subdev_lock_state(state); \
+ __result = v4l2_subdev_call(sd, o, f, state, ##args); \
+ v4l2_subdev_unlock_state(state); \
+ __v4l2_subdev_state_free(state); \
+ __result; \
+ })
+
+/**
* v4l2_subdev_has_op - Checks if a subdev defines a certain operation.
*
* @sd: pointer to the &struct v4l2_subdev
@@ -1138,4 +1501,4 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
const struct v4l2_event *ev);
-#endif
+#endif /* _V4L2_SUBDEV_H */
diff --git a/include/media/v4l2-uvc.h b/include/media/v4l2-uvc.h
new file mode 100644
index 000000000000..f83e31661333
--- /dev/null
+++ b/include/media/v4l2-uvc.h
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * v4l2 uvc internal API header
+ *
+ * Some commonly needed functions for uvc drivers
+ */
+
+#ifndef __LINUX_V4L2_UVC_H
+#define __LINUX_V4L2_UVC_H
+
+/* ------------------------------------------------------------------------
+ * GUIDs
+ */
+#define UVC_GUID_UVC_CAMERA \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
+#define UVC_GUID_UVC_OUTPUT \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}
+#define UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+#define UVC_GUID_UVC_PROCESSING \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01}
+#define UVC_GUID_UVC_SELECTOR \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02}
+#define UVC_GUID_EXT_GPIO_CONTROLLER \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
+
+#define UVC_GUID_FORMAT_MJPEG \
+ { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YUY2 \
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YUY2_ISIGHT \
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_NV12 \
+ { 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YV12 \
+ { 'Y', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_I420 \
+ { 'I', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_UYVY \
+ { 'U', 'Y', 'V', 'Y', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y800 \
+ { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8 \
+ { 'Y', '8', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y10 \
+ { 'Y', '1', '0', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12 \
+ { 'Y', '1', '2', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y16 \
+ { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BY8 \
+ { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BA81 \
+ { 'B', 'A', '8', '1', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GBRG \
+ { 'G', 'B', 'R', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GRBG \
+ { 'G', 'R', 'B', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RGGB \
+ { 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BG16 \
+ { 'B', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GB16 \
+ { 'G', 'B', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RG16 \
+ { 'R', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GR16 \
+ { 'G', 'R', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RGBP \
+ { 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BGR3 \
+ { 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
+#define UVC_GUID_FORMAT_M420 \
+ { 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+#define UVC_GUID_FORMAT_H264 \
+ { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_H265 \
+ { 'H', '2', '6', '5', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8I \
+ { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12I \
+ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Z16 \
+ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RW10 \
+ { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_INVZ \
+ { 'I', 'N', 'V', 'Z', 0x90, 0x2d, 0x58, 0x4a, \
+ 0x92, 0x0b, 0x77, 0x3f, 0x1f, 0x2c, 0x55, 0x6b}
+#define UVC_GUID_FORMAT_INZI \
+ { 'I', 'N', 'Z', 'I', 0x66, 0x1a, 0x42, 0xa2, \
+ 0x90, 0x65, 0xd0, 0x18, 0x14, 0xa8, 0xef, 0x8a}
+#define UVC_GUID_FORMAT_INVI \
+ { 'I', 'N', 'V', 'I', 0xdb, 0x57, 0x49, 0x5e, \
+ 0x8e, 0x3f, 0xf4, 0x79, 0x53, 0x2b, 0x94, 0x6f}
+#define UVC_GUID_FORMAT_CNF4 \
+ { 'C', ' ', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+#define UVC_GUID_FORMAT_D3DFMT_L8 \
+ {0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \
+ {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+#define UVC_GUID_FORMAT_HEVC \
+ { 'H', 'E', 'V', 'C', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+/* ------------------------------------------------------------------------
+ * Video formats
+ */
+
+struct uvc_format_desc {
+ char *name;
+ u8 guid[16];
+ u32 fcc;
+};
+
+static struct uvc_format_desc uvc_fmts[] = {
+ {
+ .name = "YUV 4:2:2 (YUYV)",
+ .guid = UVC_GUID_FORMAT_YUY2,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .name = "YUV 4:2:2 (YUYV)",
+ .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .name = "YUV 4:2:0 (NV12)",
+ .guid = UVC_GUID_FORMAT_NV12,
+ .fcc = V4L2_PIX_FMT_NV12,
+ },
+ {
+ .name = "MJPEG",
+ .guid = UVC_GUID_FORMAT_MJPEG,
+ .fcc = V4L2_PIX_FMT_MJPEG,
+ },
+ {
+ .name = "YVU 4:2:0 (YV12)",
+ .guid = UVC_GUID_FORMAT_YV12,
+ .fcc = V4L2_PIX_FMT_YVU420,
+ },
+ {
+ .name = "YUV 4:2:0 (I420)",
+ .guid = UVC_GUID_FORMAT_I420,
+ .fcc = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .name = "YUV 4:2:0 (M420)",
+ .guid = UVC_GUID_FORMAT_M420,
+ .fcc = V4L2_PIX_FMT_M420,
+ },
+ {
+ .name = "YUV 4:2:2 (UYVY)",
+ .guid = UVC_GUID_FORMAT_UYVY,
+ .fcc = V4L2_PIX_FMT_UYVY,
+ },
+ {
+ .name = "Greyscale 8-bit (Y800)",
+ .guid = UVC_GUID_FORMAT_Y800,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .name = "Greyscale 8-bit (Y8 )",
+ .guid = UVC_GUID_FORMAT_Y8,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .name = "Greyscale 8-bit (D3DFMT_L8)",
+ .guid = UVC_GUID_FORMAT_D3DFMT_L8,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .name = "IR 8-bit (L8_IR)",
+ .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .name = "Greyscale 10-bit (Y10 )",
+ .guid = UVC_GUID_FORMAT_Y10,
+ .fcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .name = "Greyscale 12-bit (Y12 )",
+ .guid = UVC_GUID_FORMAT_Y12,
+ .fcc = V4L2_PIX_FMT_Y12,
+ },
+ {
+ .name = "Greyscale 16-bit (Y16 )",
+ .guid = UVC_GUID_FORMAT_Y16,
+ .fcc = V4L2_PIX_FMT_Y16,
+ },
+ {
+ .name = "BGGR Bayer (BY8 )",
+ .guid = UVC_GUID_FORMAT_BY8,
+ .fcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .name = "BGGR Bayer (BA81)",
+ .guid = UVC_GUID_FORMAT_BA81,
+ .fcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .name = "GBRG Bayer (GBRG)",
+ .guid = UVC_GUID_FORMAT_GBRG,
+ .fcc = V4L2_PIX_FMT_SGBRG8,
+ },
+ {
+ .name = "GRBG Bayer (GRBG)",
+ .guid = UVC_GUID_FORMAT_GRBG,
+ .fcc = V4L2_PIX_FMT_SGRBG8,
+ },
+ {
+ .name = "RGGB Bayer (RGGB)",
+ .guid = UVC_GUID_FORMAT_RGGB,
+ .fcc = V4L2_PIX_FMT_SRGGB8,
+ },
+ {
+ .name = "RGB565",
+ .guid = UVC_GUID_FORMAT_RGBP,
+ .fcc = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ .name = "BGR 8:8:8 (BGR3)",
+ .guid = UVC_GUID_FORMAT_BGR3,
+ .fcc = V4L2_PIX_FMT_BGR24,
+ },
+ {
+ .name = "H.264",
+ .guid = UVC_GUID_FORMAT_H264,
+ .fcc = V4L2_PIX_FMT_H264,
+ },
+ {
+ .name = "H.265",
+ .guid = UVC_GUID_FORMAT_H265,
+ .fcc = V4L2_PIX_FMT_HEVC,
+ },
+ {
+ .name = "Greyscale 8 L/R (Y8I)",
+ .guid = UVC_GUID_FORMAT_Y8I,
+ .fcc = V4L2_PIX_FMT_Y8I,
+ },
+ {
+ .name = "Greyscale 12 L/R (Y12I)",
+ .guid = UVC_GUID_FORMAT_Y12I,
+ .fcc = V4L2_PIX_FMT_Y12I,
+ },
+ {
+ .name = "Depth data 16-bit (Z16)",
+ .guid = UVC_GUID_FORMAT_Z16,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .name = "Bayer 10-bit (SRGGB10P)",
+ .guid = UVC_GUID_FORMAT_RW10,
+ .fcc = V4L2_PIX_FMT_SRGGB10P,
+ },
+ {
+ .name = "Bayer 16-bit (SBGGR16)",
+ .guid = UVC_GUID_FORMAT_BG16,
+ .fcc = V4L2_PIX_FMT_SBGGR16,
+ },
+ {
+ .name = "Bayer 16-bit (SGBRG16)",
+ .guid = UVC_GUID_FORMAT_GB16,
+ .fcc = V4L2_PIX_FMT_SGBRG16,
+ },
+ {
+ .name = "Bayer 16-bit (SRGGB16)",
+ .guid = UVC_GUID_FORMAT_RG16,
+ .fcc = V4L2_PIX_FMT_SRGGB16,
+ },
+ {
+ .name = "Bayer 16-bit (SGRBG16)",
+ .guid = UVC_GUID_FORMAT_GR16,
+ .fcc = V4L2_PIX_FMT_SGRBG16,
+ },
+ {
+ .name = "Depth data 16-bit (Z16)",
+ .guid = UVC_GUID_FORMAT_INVZ,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .name = "Greyscale 10-bit (Y10 )",
+ .guid = UVC_GUID_FORMAT_INVI,
+ .fcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .name = "IR:Depth 26-bit (INZI)",
+ .guid = UVC_GUID_FORMAT_INZI,
+ .fcc = V4L2_PIX_FMT_INZI,
+ },
+ {
+ .name = "4-bit Depth Confidence (Packed)",
+ .guid = UVC_GUID_FORMAT_CNF4,
+ .fcc = V4L2_PIX_FMT_CNF4,
+ },
+ {
+ .name = "HEVC",
+ .guid = UVC_GUID_FORMAT_HEVC,
+ .fcc = V4L2_PIX_FMT_HEVC,
+ },
+};
+
+static inline struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16])
+{
+ unsigned int len = ARRAY_SIZE(uvc_fmts);
+ unsigned int i;
+
+ for (i = 0; i < len; ++i) {
+ if (memcmp(guid, uvc_fmts[i].guid, 16) == 0)
+ return &uvc_fmts[i];
+ }
+
+ return NULL;
+}
+
+#endif /* __LINUX_V4L2_UVC_H */
diff --git a/include/media/v4l2-vp9.h b/include/media/v4l2-vp9.h
new file mode 100644
index 000000000000..05478ad6d4ab
--- /dev/null
+++ b/include/media/v4l2-vp9.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper functions for vp9 codecs.
+ *
+ * Copyright (c) 2021 Collabora, Ltd.
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+ */
+
+#ifndef _MEDIA_V4L2_VP9_H
+#define _MEDIA_V4L2_VP9_H
+
+#include <media/v4l2-ctrls.h>
+
+/**
+ * struct v4l2_vp9_frame_mv_context - motion vector-related probabilities
+ *
+ * @joint: motion vector joint probabilities.
+ * @sign: motion vector sign probabilities.
+ * @classes: motion vector class probabilities.
+ * @class0_bit: motion vector class0 bit probabilities.
+ * @bits: motion vector bits probabilities.
+ * @class0_fr: motion vector class0 fractional bit probabilities.
+ * @fr: motion vector fractional bit probabilities.
+ * @class0_hp: motion vector class0 high precision fractional bit probabilities.
+ * @hp: motion vector high precision fractional bit probabilities.
+ *
+ * A member of v4l2_vp9_frame_context.
+ */
+struct v4l2_vp9_frame_mv_context {
+ u8 joint[3];
+ u8 sign[2];
+ u8 classes[2][10];
+ u8 class0_bit[2];
+ u8 bits[2][10];
+ u8 class0_fr[2][2][3];
+ u8 fr[2][3];
+ u8 class0_hp[2];
+ u8 hp[2];
+};
+
+/**
+ * struct v4l2_vp9_frame_context - frame probabilities, including motion-vector related
+ *
+ * @tx8: TX 8x8 probabilities.
+ * @tx16: TX 16x16 probabilities.
+ * @tx32: TX 32x32 probabilities.
+ * @coef: coefficient probabilities.
+ * @skip: skip probabilities.
+ * @inter_mode: inter mode probabilities.
+ * @interp_filter: interpolation filter probabilities.
+ * @is_inter: is inter-block probabilities.
+ * @comp_mode: compound prediction mode probabilities.
+ * @single_ref: single ref probabilities.
+ * @comp_ref: compound ref probabilities.
+ * @y_mode: Y prediction mode probabilities.
+ * @uv_mode: UV prediction mode probabilities.
+ * @partition: partition probabilities.
+ * @mv: motion vector probabilities.
+ *
+ * Drivers which need to keep track of frame context(s) can use this struct.
+ * The members correspond to probability tables, which are specified only implicitly in the
+ * vp9 spec. Section 10.5 "Default probability tables" contains all the types of involved
+ * tables, i.e. the actual tables are of the same kind, and when they are reset (which is
+ * mandated by the spec sometimes) they are overwritten with values from the default tables.
+ */
+struct v4l2_vp9_frame_context {
+ u8 tx8[2][1];
+ u8 tx16[2][2];
+ u8 tx32[2][3];
+ u8 coef[4][2][2][6][6][3];
+ u8 skip[3];
+ u8 inter_mode[7][3];
+ u8 interp_filter[4][2];
+ u8 is_inter[4];
+ u8 comp_mode[5];
+ u8 single_ref[5][2];
+ u8 comp_ref[5];
+ u8 y_mode[4][9];
+ u8 uv_mode[10][9];
+ u8 partition[16][3];
+
+ struct v4l2_vp9_frame_mv_context mv;
+};
+
+/**
+ * struct v4l2_vp9_frame_symbol_counts - pointers to arrays of symbol counts
+ *
+ * @partition: partition counts.
+ * @skip: skip counts.
+ * @intra_inter: is inter-block counts.
+ * @tx32p: TX32 counts.
+ * @tx16p: TX16 counts.
+ * @tx8p: TX8 counts.
+ * @y_mode: Y prediction mode counts.
+ * @uv_mode: UV prediction mode counts.
+ * @comp: compound prediction mode counts.
+ * @comp_ref: compound ref counts.
+ * @single_ref: single ref counts.
+ * @mv_mode: inter mode counts.
+ * @filter: interpolation filter counts.
+ * @mv_joint: motion vector joint counts.
+ * @sign: motion vector sign counts.
+ * @classes: motion vector class counts.
+ * @class0: motion vector class0 bit counts.
+ * @bits: motion vector bits counts.
+ * @class0_fp: motion vector class0 fractional bit counts.
+ * @fp: motion vector fractional bit counts.
+ * @class0_hp: motion vector class0 high precision fractional bit counts.
+ * @hp: motion vector high precision fractional bit counts.
+ * @coeff: coefficient counts.
+ * @eob: eob counts
+ *
+ * The fields correspond to what is specified in section 8.3 "Clear counts process" of the spec.
+ * Different pieces of hardware can report the counts in different order, so we cannot rely on
+ * simply overlaying a struct on a relevant block of memory. Instead we provide pointers to
+ * arrays or array of pointers to arrays in case of coeff, or array of pointers for eob.
+ */
+struct v4l2_vp9_frame_symbol_counts {
+ u32 (*partition)[16][4];
+ u32 (*skip)[3][2];
+ u32 (*intra_inter)[4][2];
+ u32 (*tx32p)[2][4];
+ u32 (*tx16p)[2][4];
+ u32 (*tx8p)[2][2];
+ u32 (*y_mode)[4][10];
+ u32 (*uv_mode)[10][10];
+ u32 (*comp)[5][2];
+ u32 (*comp_ref)[5][2];
+ u32 (*single_ref)[5][2][2];
+ u32 (*mv_mode)[7][4];
+ u32 (*filter)[4][3];
+ u32 (*mv_joint)[4];
+ u32 (*sign)[2][2];
+ u32 (*classes)[2][11];
+ u32 (*class0)[2][2];
+ u32 (*bits)[2][10][2];
+ u32 (*class0_fp)[2][2][4];
+ u32 (*fp)[2][4];
+ u32 (*class0_hp)[2][2];
+ u32 (*hp)[2][2];
+ u32 (*coeff[4][2][2][6][6])[3];
+ u32 *eob[4][2][2][6][6][2];
+};
+
+extern const u8 v4l2_vp9_kf_y_mode_prob[10][10][9]; /* Section 10.4 of the spec */
+extern const u8 v4l2_vp9_kf_partition_probs[16][3]; /* Section 10.4 of the spec */
+extern const u8 v4l2_vp9_kf_uv_mode_prob[10][9]; /* Section 10.4 of the spec */
+extern const struct v4l2_vp9_frame_context v4l2_vp9_default_probs; /* Section 10.5 of the spec */
+
+/**
+ * v4l2_vp9_fw_update_probs() - Perform forward update of vp9 probabilities
+ *
+ * @probs: current probabilities values
+ * @deltas: delta values from compressed header
+ * @dec_params: vp9 frame decoding parameters
+ *
+ * This function performs forward updates of probabilities for the vp9 boolean decoder.
+ * The frame header can contain a directive to update the probabilities (deltas), if so, then
+ * the deltas are provided in the header, too. The userspace parses those and passes the said
+ * deltas struct to the kernel.
+ */
+void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params);
+
+/**
+ * v4l2_vp9_reset_frame_ctx() - Reset appropriate frame context
+ *
+ * @dec_params: vp9 frame decoding parameters
+ * @frame_context: array of the 4 frame contexts
+ *
+ * This function resets appropriate frame contexts, based on what's in dec_params.
+ *
+ * Returns the frame context index after the update, which might be reset to zero if
+ * mandated by the spec.
+ */
+u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct v4l2_vp9_frame_context *frame_context);
+
+/**
+ * v4l2_vp9_adapt_coef_probs() - Perform backward update of vp9 coefficients probabilities
+ *
+ * @probs: current probabilities values
+ * @counts: values of symbol counts after the current frame has been decoded
+ * @use_128: flag to request that 128 is used as update factor if true, otherwise 112 is used
+ * @frame_is_intra: flag indicating that FrameIsIntra is true
+ *
+ * This function performs backward updates of coefficients probabilities for the vp9 boolean
+ * decoder. After a frame has been decoded the counts of how many times a given symbol has
+ * occurred are known and are used to update the probability of each symbol.
+ */
+void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ bool use_128,
+ bool frame_is_intra);
+
+/**
+ * v4l2_vp9_adapt_noncoef_probs() - Perform backward update of vp9 non-coefficients probabilities
+ *
+ * @probs: current probabilities values
+ * @counts: values of symbol counts after the current frame has been decoded
+ * @reference_mode: specifies the type of inter prediction to be used. See
+ * &v4l2_vp9_reference_mode for more details
+ * @interpolation_filter: specifies the filter selection used for performing inter prediction.
+ * See &v4l2_vp9_interpolation_filter for more details
+ * @tx_mode: specifies the TX mode. See &v4l2_vp9_tx_mode for more details
+ * @flags: combination of V4L2_VP9_FRAME_FLAG_* flags
+ *
+ * This function performs backward updates of non-coefficients probabilities for the vp9 boolean
+ * decoder. After a frame has been decoded the counts of how many times a given symbol has
+ * occurred are known and are used to update the probability of each symbol.
+ */
+void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ u8 reference_mode, u8 interpolation_filter, u8 tx_mode,
+ u32 flags);
+
+/**
+ * v4l2_vp9_seg_feat_enabled() - Check if a segmentation feature is enabled
+ *
+ * @feature_enabled: array of 8-bit flags (for all segments)
+ * @feature: id of the feature to check
+ * @segid: id of the segment to look up
+ *
+ * This function returns true if a given feature is active in a given segment.
+ */
+bool
+v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled,
+ unsigned int feature,
+ unsigned int segid);
+
+#endif /* _MEDIA_V4L2_VP9_H */
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index b89d5e31f172..930ff8d454fc 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -31,7 +31,7 @@
* does memory allocation too using vmalloc_32().
*
* videobuf_dma_*()
- * see Documentation/DMA-API-HOWTO.txt, these functions to
+ * see Documentation/core-api/dma-api-howto.rst, these functions to
* basically the same. The map function does also build a
* scatterlist for the buffer (and unmap frees it ...)
*
@@ -60,7 +60,7 @@ struct videobuf_dmabuf {
/* common */
struct scatterlist *sglist;
int sglen;
- int nr_pages;
+ unsigned long nr_pages;
int direction;
};
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index a2b2208b02da..3253bd2f6fee 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -18,6 +18,7 @@
#include <linux/dma-buf.h>
#include <linux/bitops.h>
#include <media/media-request.h>
+#include <media/frame_vector.h>
#define VB2_MAX_FRAME (32)
#define VB2_MAX_PLANES (8)
@@ -45,6 +46,7 @@ enum vb2_memory {
struct vb2_fileio_data;
struct vb2_threadio_data;
+struct vb2_buffer;
/**
* struct vb2_mem_ops - memory handling/memory allocator operations.
@@ -52,10 +54,8 @@ struct vb2_threadio_data;
* return ERR_PTR() on failure or a pointer to allocator private,
* per-buffer data on success; the returned private structure
* will then be passed as @buf_priv argument to other ops in this
- * structure. Additional gfp_flags to use when allocating the
- * are also passed to this operation. These flags are from the
- * gfp_flags field of vb2_queue. The size argument to this function
- * shall be *page aligned*.
+ * structure. The size argument to this function shall be
+ * *page aligned*.
* @put: inform the allocator that the buffer will no longer be used;
* usually will result in the allocator freeing the buffer (if
* no other users of this buffer are present); the @buf_priv
@@ -65,7 +65,7 @@ struct vb2_threadio_data;
* DMABUF memory types.
* @get_userptr: acquire userspace memory for a hardware operation; used for
* USERPTR memory types; vaddr is the address passed to the
- * videobuf layer when queuing a video buffer of USERPTR type;
+ * videobuf2 layer when queuing a video buffer of USERPTR type;
* should return an allocator private per-buffer structure
* associated with the buffer on success, ERR_PTR() on failure;
* the returned private structure will then be passed as @buf_priv
@@ -97,7 +97,7 @@ struct vb2_threadio_data;
* associated with the passed private structure or NULL if not
* available.
* @num_users: return the current number of users of a memory buffer;
- * return 1 if the videobuf layer (or actually the driver using
+ * return 1 if the videobuf2 layer (or actually the driver using
* it) is the only user.
* @mmap: setup a userspace mapping for a given memory buffer under
* the provided virtual memory region.
@@ -116,31 +116,33 @@ struct vb2_threadio_data;
* map_dmabuf, unmap_dmabuf.
*/
struct vb2_mem_ops {
- void *(*alloc)(struct device *dev, unsigned long attrs,
- unsigned long size,
- enum dma_data_direction dma_dir,
- gfp_t gfp_flags);
+ void *(*alloc)(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long size);
void (*put)(void *buf_priv);
- struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
-
- void *(*get_userptr)(struct device *dev, unsigned long vaddr,
- unsigned long size,
- enum dma_data_direction dma_dir);
+ struct dma_buf *(*get_dmabuf)(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags);
+
+ void *(*get_userptr)(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long vaddr,
+ unsigned long size);
void (*put_userptr)(void *buf_priv);
void (*prepare)(void *buf_priv);
void (*finish)(void *buf_priv);
- void *(*attach_dmabuf)(struct device *dev,
+ void *(*attach_dmabuf)(struct vb2_buffer *vb,
+ struct device *dev,
struct dma_buf *dbuf,
- unsigned long size,
- enum dma_data_direction dma_dir);
+ unsigned long size);
void (*detach_dmabuf)(void *buf_priv);
int (*map_dmabuf)(void *buf_priv);
void (*unmap_dmabuf)(void *buf_priv);
- void *(*vaddr)(void *buf_priv);
- void *(*cookie)(void *buf_priv);
+ void *(*vaddr)(struct vb2_buffer *vb, void *buf_priv);
+ void *(*cookie)(struct vb2_buffer *vb, void *buf_priv);
unsigned int (*num_users)(void *buf_priv);
@@ -153,9 +155,11 @@ struct vb2_mem_ops {
* @dbuf: dma_buf - shared buffer object.
* @dbuf_mapped: flag to show whether dbuf is mapped or not
* @bytesused: number of bytes occupied by data in the plane (payload).
- * @length: size of this plane (NOT the payload) in bytes.
+ * @length: size of this plane (NOT the payload) in bytes. The maximum
+ * valid size is MAX_UINT - PAGE_SIZE.
* @min_length: minimum required size of this plane (NOT the payload) in bytes.
- * @length is always greater or equal to @min_length.
+ * @length is always greater or equal to @min_length, and like
+ * @length, it is limited to MAX_UINT - PAGE_SIZE.
* @m: Union with memtype-specific data.
* @m.offset: when memory in the associated struct vb2_buffer is
* %VB2_MEMORY_MMAP, equals the offset from the start of
@@ -206,11 +210,11 @@ enum vb2_io_modes {
* enum vb2_buffer_state - current video buffer state.
* @VB2_BUF_STATE_DEQUEUED: buffer under userspace control.
* @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request.
- * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf.
- * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver.
+ * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf2.
+ * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf2, but not in driver.
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
* in a hardware operation.
- * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
+ * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf2, but
* not yet dequeued to userspace.
* @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer
* has ended with an error, which will be reported
@@ -263,6 +267,10 @@ struct vb2_buffer {
* after the 'buf_finish' op is called.
* copied_timestamp: the timestamp of this capture buffer was copied
* from an output buffer.
+ * skip_cache_sync_on_prepare: when set buffer's ->prepare() function
+ * skips cache sync/invalidation.
+ * skip_cache_sync_on_finish: when set buffer's ->finish() function
+ * skips cache sync/invalidation.
* queued_entry: entry on the queued buffers list, which holds
* all buffers queued from userspace
* done_entry: entry on the list that stores all buffers ready
@@ -273,6 +281,8 @@ struct vb2_buffer {
unsigned int synced:1;
unsigned int prepared:1;
unsigned int copied_timestamp:1;
+ unsigned int skip_cache_sync_on_prepare:1;
+ unsigned int skip_cache_sync_on_finish:1;
struct vb2_plane planes[VB2_MAX_PLANES];
struct list_head queued_entry;
@@ -456,7 +466,7 @@ struct vb2_buf_ops {
};
/**
- * struct vb2_queue - a videobuf queue.
+ * struct vb2_queue - a videobuf2 queue.
*
* @type: private buffer type whose content is defined by the vb2-core
* caller. For example, for V4L2, it should match
@@ -491,6 +501,11 @@ struct vb2_buf_ops {
* @uses_requests: requests are used for this queue. Set to 1 the first time
* a request is queued. Set to 0 when the queue is canceled.
* If this is 1, then you cannot queue buffers directly.
+ * @allow_cache_hints: when set user-space can pass cache management hints in
+ * order to skip cache flush/invalidation on ->prepare() or/and
+ * ->finish().
+ * @non_coherent_mem: when set queue will attempt to allocate buffers using
+ * non-coherent memory.
* @lock: pointer to a mutex that protects the &struct vb2_queue. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -509,8 +524,11 @@ struct vb2_buf_ops {
* by the vb2 core.
* @buf_struct_size: size of the driver-specific buffer structure;
* "0" indicates the driver doesn't want to use a custom buffer
- * structure type. for example, ``sizeof(struct vb2_v4l2_buffer)``
- * will be used for v4l2.
+ * structure type. In that case a subsystem-specific struct
+ * will be used (in the case of V4L2 that is
+ * ``sizeof(struct vb2_v4l2_buffer)``). The first field of the
+ * driver-specific buffer structure must be the subsystem-specific
+ * struct (vb2_v4l2_buffer in the case of V4L2).
* @timestamp_flags: Timestamp flags; ``V4L2_BUF_FLAG_TIMESTAMP_*`` and
* ``V4L2_BUF_FLAG_TSTAMP_SRC_*``
* @gfp_flags: additional gfp flags used when allocating the buffers.
@@ -526,7 +544,7 @@ struct vb2_buf_ops {
* @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @dma_dir: DMA mapping direction.
- * @bufs: videobuf buffer structures
+ * @bufs: videobuf2 buffer structures
* @num_buffers: number of allocated/used buffers
* @queued_list: list of buffers currently queued from userspace
* @queued_count: number of buffers queued and ready for streaming.
@@ -549,21 +567,25 @@ struct vb2_buf_ops {
* when a buffer with the %V4L2_BUF_FLAG_LAST is dequeued.
* @fileio: file io emulator internal data, used only if emulator is active
* @threadio: thread io internal data, used only if thread is active
+ * @name: queue name, used for logging purpose. Initialized automatically
+ * if left empty by drivers.
*/
struct vb2_queue {
unsigned int type;
unsigned int io_modes;
struct device *dev;
unsigned long dma_attrs;
- unsigned bidirectional:1;
- unsigned fileio_read_once:1;
- unsigned fileio_write_immediately:1;
- unsigned allow_zero_bytesused:1;
- unsigned quirk_poll_must_check_waiting_for_buffers:1;
- unsigned supports_requests:1;
- unsigned requires_requests:1;
- unsigned uses_qbuf:1;
- unsigned uses_requests:1;
+ unsigned int bidirectional:1;
+ unsigned int fileio_read_once:1;
+ unsigned int fileio_write_immediately:1;
+ unsigned int allow_zero_bytesused:1;
+ unsigned int quirk_poll_must_check_waiting_for_buffers:1;
+ unsigned int supports_requests:1;
+ unsigned int requires_requests:1;
+ unsigned int uses_qbuf:1;
+ unsigned int uses_requests:1;
+ unsigned int allow_cache_hints:1;
+ unsigned int non_coherent_mem:1;
struct mutex *lock;
void *owner;
@@ -609,6 +631,8 @@ struct vb2_queue {
struct vb2_fileio_data *fileio;
struct vb2_threadio_data *threadio;
+ char name[32];
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
/*
* Counters for how often these queue-related ops are
@@ -623,6 +647,17 @@ struct vb2_queue {
};
/**
+ * vb2_queue_allows_cache_hints() - Return true if the queue allows cache
+ * and memory consistency hints.
+ *
+ * @q: pointer to &struct vb2_queue with videobuf2 queue
+ */
+static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q)
+{
+ return q->allow_cache_hints && q->memory == VB2_MEMORY_MMAP;
+}
+
+/**
* vb2_plane_vaddr() - Return a kernel virtual address of a given plane.
* @vb: pointer to &struct vb2_buffer to which the plane in
* question belongs to.
@@ -648,7 +683,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
/**
- * vb2_buffer_done() - inform videobuf that an operation on a buffer
+ * vb2_buffer_done() - inform videobuf2 that an operation on a buffer
* is finished.
* @vb: pointer to &struct vb2_buffer to be used.
* @state: state of the buffer, as defined by &enum vb2_buffer_state.
@@ -716,6 +751,8 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
* vb2_core_reqbufs() - Initiate streaming.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @memory: memory type, as defined by &enum vb2_memory.
+ * @flags: auxiliary queue/buffer management flags. Currently, the only
+ * used flag is %V4L2_MEMORY_FLAG_NON_COHERENT.
* @count: requested buffer count.
*
* Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called
@@ -740,12 +777,13 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count);
+ unsigned int flags, unsigned int *count);
/**
* vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @memory: memory type, as defined by &enum vb2_memory.
+ * @flags: auxiliary queue/buffer management flags.
* @count: requested buffer count.
* @requested_planes: number of planes requested.
* @requested_sizes: array with the size of the planes.
@@ -763,7 +801,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count, unsigned int requested_planes,
+ unsigned int flags, unsigned int *count,
+ unsigned int requested_planes,
const unsigned int requested_sizes[]);
/**
@@ -1014,7 +1053,7 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
loff_t *ppos, int nonblock);
/**
- * vb2_read() - implements write() syscall logic.
+ * vb2_write() - implements write() syscall logic.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @data: pointed to target userspace buffer
* @count: number of bytes to write
@@ -1116,8 +1155,15 @@ static inline void *vb2_get_drv_priv(struct vb2_queue *q)
static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
unsigned int plane_no, unsigned long size)
{
- if (plane_no < vb->num_planes)
+ /*
+ * size must never be larger than the buffer length, so
+ * warn and clamp to the buffer length if that's the case.
+ */
+ if (plane_no < vb->num_planes) {
+ if (WARN_ON_ONCE(size > vb->planes[plane_no].length))
+ size = vb->planes[plane_no].length;
vb->planes[plane_no].bytesused = size;
+ }
}
/**
diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h
index 5604818d137e..5be313cbf7d7 100644
--- a/include/media/videobuf2-dma-contig.h
+++ b/include/media/videobuf2-dma-contig.h
@@ -25,7 +25,7 @@ vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
}
int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size);
-void vb2_dma_contig_clear_max_seg_size(struct device *dev);
+static inline void vb2_dma_contig_clear_max_seg_size(struct device *dev) { }
extern const struct vb2_mem_ops vb2_dma_contig_memops;
diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h
index 8605366ec87c..2d577b945637 100644
--- a/include/media/videobuf2-dvb.h
+++ b/include/media/videobuf2-dvb.h
@@ -24,7 +24,7 @@ struct vb2_dvb {
struct dvb_frontend *frontend;
struct vb2_queue dvbq;
- /* video-buf-dvb state info */
+ /* vb2-dvb state info */
struct mutex lock;
int nfeeds;
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 59bf33a12648..5a845887850b 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -23,6 +23,8 @@
#error VB2_MAX_PLANES != VIDEO_MAX_PLANES
#endif
+struct video_device;
+
/**
* struct vb2_v4l2_buffer - video buffer information for v4l2.
*
@@ -61,20 +63,14 @@ struct vb2_v4l2_buffer {
container_of(vb, struct vb2_v4l2_buffer, vb2_buf)
/**
- * vb2_find_timestamp() - Find buffer with given timestamp in the queue
+ * vb2_find_buffer() - Find a buffer with given timestamp
*
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @timestamp: the timestamp to find.
- * @start_idx: the start index (usually 0) in the buffer array to start
- * searching from. Note that there may be multiple buffers
- * with the same timestamp value, so you can restart the search
- * by setting @start_idx to the previously found index + 1.
*
- * Returns the buffer index of the buffer with the given @timestamp, or
- * -1 if no buffer with @timestamp was found.
+ * Returns the buffer with the given @timestamp, or NULL if not found.
*/
-int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
- unsigned int start_idx);
+struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp);
int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
@@ -237,6 +233,19 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
int __must_check vb2_queue_init(struct vb2_queue *q);
/**
+ * vb2_queue_init_name() - initialize a videobuf2 queue with a name
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @name: the queue name
+ *
+ * This function initializes the vb2_queue exactly like vb2_queue_init(),
+ * and additionally sets the queue name. The queue name is used for logging
+ * purpose, and should uniquely identify the queue within the context of the
+ * device it belongs to. This is useful to attribute kernel log messages to the
+ * right queue for m2m devices or other devices that handle multiple queues.
+ */
+int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name);
+
+/**
* vb2_queue_release() - stop streaming, release the queue and free memory
* @q: pointer to &struct vb2_queue with videobuf2 queue.
*
@@ -247,6 +256,22 @@ int __must_check vb2_queue_init(struct vb2_queue *q);
void vb2_queue_release(struct vb2_queue *q);
/**
+ * vb2_queue_change_type() - change the type of an inactive vb2_queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @type: the type to change to (V4L2_BUF_TYPE_VIDEO_*)
+ *
+ * This function changes the type of the vb2_queue. This is only possible
+ * if the queue is not busy (i.e. no buffers have been allocated).
+ *
+ * vb2_queue_change_type() can be used to support multiple buffer types using
+ * the same queue. The driver can implement v4l2_ioctl_ops.vidioc_reqbufs and
+ * v4l2_ioctl_ops.vidioc_create_bufs functions and call vb2_queue_change_type()
+ * before calling vb2_ioctl_reqbufs() or vb2_ioctl_create_bufs(), and thus
+ * "lock" the buffer type until the buffers have been released.
+ */
+int vb2_queue_change_type(struct vb2_queue *q, unsigned int type);
+
+/**
* vb2_poll() - implements poll userspace operation
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @file: file argument passed to the poll file operation handler
@@ -271,10 +296,29 @@ __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
* The following functions are not part of the vb2 core API, but are simple
* helper functions that you can use in your struct v4l2_file_operations,
* struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock
- * or video_device->lock is set, and they will set and test vb2_queue->owner
- * to check if the calling filehandle is permitted to do the queuing operation.
+ * or video_device->lock is set, and they will set and test the queue owner
+ * (vb2_queue->owner) to check if the calling filehandle is permitted to do the
+ * queuing operation.
*/
+/**
+ * vb2_queue_is_busy() - check if the queue is busy
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @file: file through which the vb2 queue access is performed
+ *
+ * The queue is considered busy if it has an owner and the owner is not the
+ * @file.
+ *
+ * Queue ownership is acquired and checked by some of the v4l2_ioctl_ops helpers
+ * below. Drivers can also use this function directly when they need to
+ * open-code ioctl handlers, for instance to add additional checks between the
+ * queue ownership test and the call to the corresponding vb2 operation.
+ */
+static inline bool vb2_queue_is_busy(struct vb2_queue *q, struct file *file)
+{
+ return q->owner && q->owner != file->private_data;
+}
+
/* struct v4l2_ioctl_ops helpers */
int vb2_ioctl_reqbufs(struct file *file, void *priv,
@@ -307,6 +351,21 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
#endif
/**
+ * vb2_video_unregister_device - unregister the video device and release queue
+ *
+ * @vdev: pointer to &struct video_device
+ *
+ * If the driver uses vb2_fop_release()/_vb2_fop_release(), then it should use
+ * vb2_video_unregister_device() instead of video_unregister_device().
+ *
+ * This function will call video_unregister_device() and then release the
+ * vb2_queue if streaming is in progress. This will stop streaming and
+ * this will simplify the unbind sequence since after this call all subdevs
+ * will have stopped streaming as well.
+ */
+void vb2_video_unregister_device(struct video_device *vdev);
+
+/**
* vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue
*
* @vq: pointer to &struct vb2_queue
diff --git a/include/media/vp8-ctrls.h b/include/media/vp8-ctrls.h
deleted file mode 100644
index 53cba826e482..000000000000
--- a/include/media/vp8-ctrls.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the VP8 state controls for use with stateless VP8
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _VP8_CTRLS_H_
-#define _VP8_CTRLS_H_
-
-#include <linux/types.h>
-
-#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F')
-
-#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER (V4L2_CID_MPEG_BASE + 2000)
-#define V4L2_CTRL_TYPE_VP8_FRAME_HEADER 0x301
-
-#define V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED 0x01
-#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP 0x02
-#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_FEATURE_DATA 0x04
-#define V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE 0x08
-
-struct v4l2_vp8_segment_header {
- __s8 quant_update[4];
- __s8 lf_update[4];
- __u8 segment_probs[3];
- __u8 padding;
- __u32 flags;
-};
-
-#define V4L2_VP8_LF_HEADER_ADJ_ENABLE 0x01
-#define V4L2_VP8_LF_HEADER_DELTA_UPDATE 0x02
-#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04
-struct v4l2_vp8_loopfilter_header {
- __s8 ref_frm_delta[4];
- __s8 mb_mode_delta[4];
- __u8 sharpness_level;
- __u8 level;
- __u16 padding;
- __u32 flags;
-};
-
-struct v4l2_vp8_quantization_header {
- __u8 y_ac_qi;
- __s8 y_dc_delta;
- __s8 y2_dc_delta;
- __s8 y2_ac_delta;
- __s8 uv_dc_delta;
- __s8 uv_ac_delta;
- __u16 padding;
-};
-
-struct v4l2_vp8_entropy_header {
- __u8 coeff_probs[4][8][3][11];
- __u8 y_mode_probs[4];
- __u8 uv_mode_probs[3];
- __u8 mv_probs[2][19];
- __u8 padding[3];
-};
-
-struct v4l2_vp8_entropy_coder_state {
- __u8 range;
- __u8 value;
- __u8 bit_count;
- __u8 padding;
-};
-
-#define V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME 0x01
-#define V4L2_VP8_FRAME_HEADER_FLAG_EXPERIMENTAL 0x02
-#define V4L2_VP8_FRAME_HEADER_FLAG_SHOW_FRAME 0x04
-#define V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF 0x08
-#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN 0x10
-#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT 0x20
-
-#define VP8_FRAME_IS_KEY_FRAME(hdr) \
- (!!((hdr)->flags & V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME))
-
-struct v4l2_ctrl_vp8_frame_header {
- struct v4l2_vp8_segment_header segment_header;
- struct v4l2_vp8_loopfilter_header lf_header;
- struct v4l2_vp8_quantization_header quant_header;
- struct v4l2_vp8_entropy_header entropy_header;
- struct v4l2_vp8_entropy_coder_state coder_state;
-
- __u16 width;
- __u16 height;
-
- __u8 horizontal_scale;
- __u8 vertical_scale;
-
- __u8 version;
- __u8 prob_skip_false;
- __u8 prob_intra;
- __u8 prob_last;
- __u8 prob_gf;
- __u8 num_dct_parts;
-
- __u32 first_part_size;
- __u32 first_part_header_bits;
- __u32 dct_part_sizes[8];
-
- __u64 last_frame_ts;
- __u64 golden_frame_ts;
- __u64 alt_frame_ts;
-
- __u64 flags;
-};
-
-#endif
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index cc1b0d42ce95..48f4a5023d81 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -51,6 +51,7 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
* @dst: destination rectangle on the display (integer coordinates)
* @alpha: alpha value (0: fully transparent, 255: fully opaque)
* @zpos: Z position of the plane (from 0 to number of planes minus 1)
+ * @premult: true for premultiplied alpha
*/
struct vsp1_du_atomic_config {
u32 pixelformat;
@@ -60,6 +61,7 @@ struct vsp1_du_atomic_config {
struct v4l2_rect dst;
unsigned int alpha;
unsigned int zpos;
+ bool premult;
};
/**
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
new file mode 100644
index 000000000000..9c0ad64b8d29
--- /dev/null
+++ b/include/memory/renesas-rpc-if.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RPC-IF core driver
+ *
+ * Copyright (C) 2018~2019 Renesas Solutions Corp.
+ * Copyright (C) 2019 Macronix International Co., Ltd.
+ * Copyright (C) 2019-2020 Cogent Embedded, Inc.
+ */
+
+#ifndef __RENESAS_RPC_IF_H
+#define __RENESAS_RPC_IF_H
+
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+
+enum rpcif_data_dir {
+ RPCIF_NO_DATA,
+ RPCIF_DATA_IN,
+ RPCIF_DATA_OUT,
+};
+
+struct rpcif_op {
+ struct {
+ u8 buswidth;
+ u8 opcode;
+ bool ddr;
+ } cmd, ocmd;
+
+ struct {
+ u8 nbytes;
+ u8 buswidth;
+ bool ddr;
+ u64 val;
+ } addr;
+
+ struct {
+ u8 ncycles;
+ u8 buswidth;
+ } dummy;
+
+ struct {
+ u8 nbytes;
+ u8 buswidth;
+ bool ddr;
+ u32 val;
+ } option;
+
+ struct {
+ u8 buswidth;
+ unsigned int nbytes;
+ enum rpcif_data_dir dir;
+ bool ddr;
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ } data;
+};
+
+enum rpcif_type {
+ RPCIF_RCAR_GEN3,
+ RPCIF_RZ_G2L,
+};
+
+struct rpcif {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *dirmap;
+ struct regmap *regmap;
+ struct reset_control *rstc;
+ size_t size;
+ enum rpcif_type type;
+ enum rpcif_data_dir dir;
+ u8 bus_size;
+ u8 xfer_size;
+ void *buffer;
+ u32 xferlen;
+ u32 smcr;
+ u32 smadr;
+ u32 command; /* DRCMR or SMCMR */
+ u32 option; /* DROPR or SMOPR */
+ u32 enable; /* DRENR or SMENR */
+ u32 dummy; /* DRDMCR or SMDMCR */
+ u32 ddr; /* DRDRENR or SMDRENR */
+};
+
+int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
+int rpcif_hw_init(struct rpcif *rpc, bool hyperflash);
+void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
+ size_t *len);
+int rpcif_manual_xfer(struct rpcif *rpc);
+ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf);
+
+static inline void rpcif_enable_rpm(struct rpcif *rpc)
+{
+ pm_runtime_enable(rpc->dev);
+}
+
+static inline void rpcif_disable_rpm(struct rpcif *rpc)
+{
+ pm_runtime_disable(rpc->dev);
+}
+
+#endif // __RENESAS_RPC_IF_H
diff --git a/include/misc/ocxl-config.h b/include/misc/ocxl-config.h
index 3526fa996a22..ccfd3b463517 100644
--- a/include/misc/ocxl-config.h
+++ b/include/misc/ocxl-config.h
@@ -41,5 +41,6 @@
#define OCXL_DVSEC_VENDOR_CFG_VERS 0x0C
#define OCXL_DVSEC_VENDOR_TLX_VERS 0x10
#define OCXL_DVSEC_VENDOR_DLX_VERS 0x20
+#define OCXL_DVSEC_VENDOR_RESET_RELOAD 0x38
#endif /* _OCXL_CONFIG_H_ */
diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h
index 06dd5839e438..3ed736da02c8 100644
--- a/include/misc/ocxl.h
+++ b/include/misc/ocxl.h
@@ -62,8 +62,7 @@ struct ocxl_context;
// Device detection & initialisation
/**
- * Open an OpenCAPI function on an OpenCAPI device
- *
+ * ocxl_function_open() - Open an OpenCAPI function on an OpenCAPI device
* @dev: The PCI device that contains the function
*
* Returns an opaque pointer to the function, or an error pointer (check with IS_ERR)
@@ -71,8 +70,7 @@ struct ocxl_context;
struct ocxl_fn *ocxl_function_open(struct pci_dev *dev);
/**
- * Get the list of AFUs associated with a PCI function device
- *
+ * ocxl_function_afu_list() - Get the list of AFUs associated with a PCI function device
* Returns a list of struct ocxl_afu *
*
* @fn: The OpenCAPI function containing the AFUs
@@ -80,8 +78,7 @@ struct ocxl_fn *ocxl_function_open(struct pci_dev *dev);
struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn);
/**
- * Fetch an AFU instance from an OpenCAPI function
- *
+ * ocxl_function_fetch_afu() - Fetch an AFU instance from an OpenCAPI function
* @fn: The OpenCAPI function to get the AFU from
* @afu_idx: The index of the AFU to get
*
@@ -92,23 +89,20 @@ struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn);
struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx);
/**
- * Take a reference to an AFU
- *
+ * ocxl_afu_get() - Take a reference to an AFU
* @afu: The AFU to increment the reference count on
*/
void ocxl_afu_get(struct ocxl_afu *afu);
/**
- * Release a reference to an AFU
- *
+ * ocxl_afu_put() - Release a reference to an AFU
* @afu: The AFU to decrement the reference count on
*/
void ocxl_afu_put(struct ocxl_afu *afu);
/**
- * Get the configuration information for an OpenCAPI function
- *
+ * ocxl_function_config() - Get the configuration information for an OpenCAPI function
* @fn: The OpenCAPI function to get the config for
*
* Returns the function config, or NULL on error
@@ -116,8 +110,7 @@ void ocxl_afu_put(struct ocxl_afu *afu);
const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn);
/**
- * Close an OpenCAPI function
- *
+ * ocxl_function_close() - Close an OpenCAPI function
* This will free any AFUs previously retrieved from the function, and
* detach and associated contexts. The contexts must by freed by the caller.
*
@@ -129,8 +122,7 @@ void ocxl_function_close(struct ocxl_fn *fn);
// Context allocation
/**
- * Allocate an OpenCAPI context
- *
+ * ocxl_context_alloc() - Allocate an OpenCAPI context
* @context: The OpenCAPI context to allocate, must be freed with ocxl_context_free
* @afu: The AFU the context belongs to
* @mapping: The mapping to unmap when the context is closed (may be NULL)
@@ -139,14 +131,13 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
struct address_space *mapping);
/**
- * Free an OpenCAPI context
- *
+ * ocxl_context_free() - Free an OpenCAPI context
* @ctx: The OpenCAPI context to free
*/
void ocxl_context_free(struct ocxl_context *ctx);
/**
- * Grant access to an MM to an OpenCAPI context
+ * ocxl_context_attach() - Grant access to an MM to an OpenCAPI context
* @ctx: The OpenCAPI context to attach
* @amr: The value of the AMR register to restrict access
* @mm: The mm to attach to the context
@@ -157,7 +148,7 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr,
struct mm_struct *mm);
/**
- * Detach an MM from an OpenCAPI context
+ * ocxl_context_detach() - Detach an MM from an OpenCAPI context
* @ctx: The OpenCAPI context to attach
*
* Returns 0 on success, negative on failure
@@ -167,25 +158,25 @@ int ocxl_context_detach(struct ocxl_context *ctx);
// AFU IRQs
/**
- * Allocate an IRQ associated with an AFU context
+ * ocxl_afu_irq_alloc() - Allocate an IRQ associated with an AFU context
* @ctx: the AFU context
* @irq_id: out, the IRQ ID
*
* Returns 0 on success, negative on failure
*/
-extern int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id);
+int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id);
/**
- * Frees an IRQ associated with an AFU context
+ * ocxl_afu_irq_free() - Frees an IRQ associated with an AFU context
* @ctx: the AFU context
* @irq_id: the IRQ ID
*
* Returns 0 on success, negative on failure
*/
-extern int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id);
+int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id);
/**
- * Gets the address of the trigger page for an IRQ
+ * ocxl_afu_irq_get_addr() - Gets the address of the trigger page for an IRQ
* This can then be provided to an AFU which will write to that
* page to trigger the IRQ.
* @ctx: The AFU context that the IRQ is associated with
@@ -193,10 +184,10 @@ extern int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id);
*
* returns the trigger page address, or 0 if the IRQ is not valid
*/
-extern u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id);
+u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id);
/**
- * Provide a callback to be called when an IRQ is triggered
+ * ocxl_irq_set_handler() - Provide a callback to be called when an IRQ is triggered
* @ctx: The AFU context that the IRQ is associated with
* @irq_id: The IRQ ID
* @handler: the callback to be called when the IRQ is triggered
@@ -213,8 +204,7 @@ int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id,
// AFU Metadata
/**
- * Get a pointer to the config for an AFU
- *
+ * ocxl_afu_config() - Get a pointer to the config for an AFU
* @afu: a pointer to the AFU to get the config for
*
* Returns a pointer to the AFU config
@@ -222,27 +212,24 @@ int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id,
struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu);
/**
- * Assign opaque hardware specific information to an OpenCAPI AFU.
- *
- * @dev: The PCI device associated with the OpenCAPI device
+ * ocxl_afu_set_private() - Assign opaque hardware specific information to an OpenCAPI AFU.
+ * @afu: The OpenCAPI AFU
* @private: the opaque hardware specific information to assign to the driver
*/
void ocxl_afu_set_private(struct ocxl_afu *afu, void *private);
/**
- * Fetch the hardware specific information associated with an external OpenCAPI
- * AFU. This may be consumed by an external OpenCAPI driver.
- *
- * @afu: The AFU
+ * ocxl_afu_get_private() - Fetch the hardware specific information associated with
+ * an external OpenCAPI AFU. This may be consumed by an external OpenCAPI driver.
+ * @afu: The OpenCAPI AFU
*
* Returns the opaque pointer associated with the device, or NULL if not set
*/
-void *ocxl_afu_get_private(struct ocxl_afu *dev);
+void *ocxl_afu_get_private(struct ocxl_afu *afu);
// Global MMIO
/**
- * Read a 32 bit value from global MMIO
- *
+ * ocxl_global_mmio_read32() - Read a 32 bit value from global MMIO
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -251,11 +238,10 @@ void *ocxl_afu_get_private(struct ocxl_afu *dev);
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u32 *val);
+ enum ocxl_endian endian, u32 *val);
/**
- * Read a 64 bit value from global MMIO
- *
+ * ocxl_global_mmio_read64() - Read a 64 bit value from global MMIO
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -264,11 +250,10 @@ int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u64 *val);
+ enum ocxl_endian endian, u64 *val);
/**
- * Write a 32 bit value to global MMIO
- *
+ * ocxl_global_mmio_write32() - Write a 32 bit value to global MMIO
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -277,11 +262,10 @@ int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u32 val);
+ enum ocxl_endian endian, u32 val);
/**
- * Write a 64 bit value to global MMIO
- *
+ * ocxl_global_mmio_write64() - Write a 64 bit value to global MMIO
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -290,11 +274,10 @@ int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u64 val);
+ enum ocxl_endian endian, u64 val);
/**
- * Set bits in a 32 bit global MMIO register
- *
+ * ocxl_global_mmio_set32() - Set bits in a 32 bit global MMIO register
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -303,11 +286,10 @@ int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u32 mask);
+ enum ocxl_endian endian, u32 mask);
/**
- * Set bits in a 64 bit global MMIO register
- *
+ * ocxl_global_mmio_set64() - Set bits in a 64 bit global MMIO register
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -316,11 +298,10 @@ int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u64 mask);
+ enum ocxl_endian endian, u64 mask);
/**
- * Set bits in a 32 bit global MMIO register
- *
+ * ocxl_global_mmio_clear32() - Set bits in a 32 bit global MMIO register
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -329,11 +310,10 @@ int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u32 mask);
+ enum ocxl_endian endian, u32 mask);
/**
- * Set bits in a 64 bit global MMIO register
- *
+ * ocxl_global_mmio_clear64() - Set bits in a 64 bit global MMIO register
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -342,7 +322,7 @@ int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_clear64(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u64 mask);
+ enum ocxl_endian endian, u64 mask);
// Functions left here are for compatibility with the cxlflash driver
@@ -467,7 +447,7 @@ void ocxl_link_release(struct pci_dev *dev, void *link_handle);
* defined
*/
int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
- u64 amr, struct mm_struct *mm,
+ u64 amr, u16 bdf, struct mm_struct *mm,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data);
@@ -480,14 +460,8 @@ int ocxl_link_remove_pe(void *link_handle, int pasid);
* Allocate an AFU interrupt associated to the link.
*
* 'hw_irq' is the hardware interrupt number
- * 'obj_handle' is the 64-bit object handle to be passed to the AFU to
- * trigger the interrupt.
- * On P9, 'obj_handle' is an address, which, if written, triggers the
- * interrupt. It is an MMIO address which needs to be remapped (one
- * page).
- */
-int ocxl_link_irq_alloc(void *link_handle, int *hw_irq,
- u64 *obj_handle);
+ */
+int ocxl_link_irq_alloc(void *link_handle, int *hw_irq);
/*
* Free a previously allocated AFU interrupt
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index a71378007e61..c80539be1542 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -138,7 +138,7 @@ struct lowpan_dev {
struct lowpan_iphc_ctx_table ctx;
/* must be last */
- u8 priv[0] __aligned(sizeof(void *));
+ u8 priv[] __aligned(sizeof(void *));
};
struct lowpan_802154_neigh {
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 03614de86942..13abe013af21 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/net/9p/9p.h
- *
* 9P protocol definitions.
*
* Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
@@ -32,13 +30,13 @@
*/
enum p9_debug_flags {
- P9_DEBUG_ERROR = (1<<0),
- P9_DEBUG_9P = (1<<2),
+ P9_DEBUG_ERROR = (1<<0),
+ P9_DEBUG_9P = (1<<2),
P9_DEBUG_VFS = (1<<3),
P9_DEBUG_CONV = (1<<4),
P9_DEBUG_MUX = (1<<5),
P9_DEBUG_TRANS = (1<<6),
- P9_DEBUG_SLABS = (1<<7),
+ P9_DEBUG_SLABS = (1<<7),
P9_DEBUG_FCALL = (1<<8),
P9_DEBUG_FID = (1<<9),
P9_DEBUG_PKT = (1<<10),
@@ -317,8 +315,8 @@ enum p9_qid_t {
};
/* 9P Magic Numbers */
-#define P9_NOTAG (u16)(~0)
-#define P9_NOFID (u32)(~0)
+#define P9_NOTAG ((u16)(~0))
+#define P9_NOFID ((u32)(~0))
#define P9_MAXWELEM 16
/* Minimal header size: size[4] type[1] tag[2] */
@@ -333,6 +331,9 @@ enum p9_qid_t {
/* size of header for zero copy read/write */
#define P9_ZC_HDR_SZ 4096
+/* maximum length of an error string */
+#define P9_ERRMAX 128
+
/**
* struct p9_qid - file system entity information
* @type: 8-bit type &p9_qid_t
@@ -553,6 +554,4 @@ struct p9_fcall {
int p9_errstr2errno(char *errstr, int len);
int p9_error_init(void);
-int p9_trans_fd_init(void);
-void p9_trans_fd_exit(void);
#endif /* NET_9P_H */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index acc60d8a3b3b..78ebcf782ce5 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/net/9p/client.h
- *
* 9P Client Definitions
*
* Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
@@ -13,6 +11,7 @@
#include <linux/utsname.h>
#include <linux/idr.h>
+#include <linux/tracepoint-defs.h>
/* Number of requests per row */
#define P9_ROW_MAXTAG 255
@@ -23,7 +22,7 @@
* @p9_proto_2000L: 9P2000.L extension
*/
-enum p9_proto_versions{
+enum p9_proto_versions {
p9_proto_legacy,
p9_proto_2000u,
p9_proto_2000L,
@@ -73,17 +72,15 @@ enum p9_req_status_t {
* @wq: wait_queue for the client to block on for this request
* @tc: the request fcall structure
* @rc: the response fcall structure
- * @aux: transport specific data (provided for trans_fd migration)
* @req_list: link for higher level objects to chain requests
*/
struct p9_req_t {
int status;
int t_err;
- struct kref refcount;
+ refcount_t refcount;
wait_queue_head_t wq;
struct p9_fcall tc;
struct p9_fcall rc;
- void *aux;
struct list_head req_list;
};
@@ -142,10 +139,16 @@ struct p9_client {
*
* TODO: This needs lots of explanation.
*/
+enum fid_source {
+ FID_FROM_OTHER,
+ FID_FROM_INODE,
+ FID_FROM_DENTRY,
+};
struct p9_fid {
struct p9_client *clnt;
u32 fid;
+ refcount_t count;
int mode;
struct p9_qid qid;
u32 iounit;
@@ -154,6 +157,7 @@ struct p9_fid {
void *rdir;
struct hlist_node dlist; /* list of all fids attached to a dentry */
+ struct hlist_node ilist;
};
/**
@@ -200,6 +204,8 @@ int p9_client_fsync(struct p9_fid *fid, int datasync);
int p9_client_remove(struct p9_fid *fid);
int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags);
int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err);
+int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ int *err);
int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
@@ -212,36 +218,80 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
u64 request_mask);
int p9_client_mknod_dotl(struct p9_fid *oldfid, const char *name, int mode,
- dev_t rdev, kgid_t gid, struct p9_qid *);
+ dev_t rdev, kgid_t gid, struct p9_qid *qid);
int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode,
- kgid_t gid, struct p9_qid *);
+ kgid_t gid, struct p9_qid *qid);
int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
void p9_fcall_fini(struct p9_fcall *fc);
-struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
+struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag);
static inline void p9_req_get(struct p9_req_t *r)
{
- kref_get(&r->refcount);
+ refcount_inc(&r->refcount);
}
static inline int p9_req_try_get(struct p9_req_t *r)
{
- return kref_get_unless_zero(&r->refcount);
+ return refcount_inc_not_zero(&r->refcount);
}
-int p9_req_put(struct p9_req_t *r);
+int p9_req_put(struct p9_client *c, struct p9_req_t *r);
+
+/* We cannot have the real tracepoints in header files,
+ * use a wrapper function */
+DECLARE_TRACEPOINT(9p_fid_ref);
+void do_trace_9p_fid_get(struct p9_fid *fid);
+void do_trace_9p_fid_put(struct p9_fid *fid);
+
+/* fid reference counting helpers:
+ * - fids used for any length of time should always be referenced through
+ * p9_fid_get(), and released with p9_fid_put()
+ * - v9fs_fid_lookup() or similar will automatically call get for you
+ * and also require a put
+ * - the *_fid_add() helpers will stash the fid in the inode,
+ * at which point it is the responsibility of evict_inode()
+ * to call the put
+ * - the last put will automatically send a clunk to the server
+ */
+static inline struct p9_fid *p9_fid_get(struct p9_fid *fid)
+{
+ if (tracepoint_enabled(9p_fid_ref))
+ do_trace_9p_fid_get(fid);
+
+ refcount_inc(&fid->count);
+
+ return fid;
+}
+
+static inline int p9_fid_put(struct p9_fid *fid)
+{
+ if (!fid || IS_ERR(fid))
+ return 0;
+
+ if (tracepoint_enabled(9p_fid_ref))
+ do_trace_9p_fid_put(fid);
+
+ if (!refcount_dec_and_test(&fid->count))
+ return 0;
+
+ return p9_client_clunk(fid);
+}
void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
-int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
-int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
-void p9stat_free(struct p9_wstat *);
+int p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type,
+ int16_t *tag, int rewind);
+int p9stat_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_wstat *st);
+void p9stat_free(struct p9_wstat *stbuf);
int p9_is_proto_dotu(struct p9_client *clnt);
int p9_is_proto_dotl(struct p9_client *clnt);
-struct p9_fid *p9_client_xattrwalk(struct p9_fid *, const char *, u64 *);
-int p9_client_xattrcreate(struct p9_fid *, const char *, u64, int);
+struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
+ const char *attr_name, u64 *attr_size);
+int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
+ u64 attr_size, int flags);
int p9_client_readlink(struct p9_fid *fid, char **target);
int p9_client_init(void);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 98a2be2de04a..766ec07c9599 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/net/9p/transport.h
- *
* Transport Definition
*
* Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
@@ -11,6 +9,8 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
+#include <linux/module.h>
+
#define P9_DEF_MIN_RESVPORT (665U)
#define P9_DEF_MAX_RESVPORT (1023U)
@@ -19,13 +19,17 @@
* @list: used to maintain a list of currently available transports
* @name: the human-readable name of the transport
* @maxsize: transport provided maximum packet size
+ * @pooled_rbuffers: currently only set for RDMA transport which pulls the
+ * response buffers from a shared pool, and accordingly
+ * we're less flexible when choosing the response message
+ * size in this case
* @def: set if this transport should be considered the default
* @create: member function to create a new connection on this transport
* @close: member function to discard a connection on this transport
* @request: member function to issue a request to the transport
* @cancel: member function to cancel a request (if it hasn't been sent)
* @cancelled: member function to notify that a cancelled request will not
- * not receive a reply
+ * receive a reply
*
* This is the basic API for a transport module which is registered by the
* transport module with the 9P core network module and used by the client
@@ -38,21 +42,28 @@ struct p9_trans_module {
struct list_head list;
char *name; /* name of transport */
int maxsize; /* max message size of transport */
+ bool pooled_rbuffers;
int def; /* this transport should be default */
struct module *owner;
- int (*create)(struct p9_client *, const char *, char *);
- void (*close) (struct p9_client *);
- int (*request) (struct p9_client *, struct p9_req_t *req);
- int (*cancel) (struct p9_client *, struct p9_req_t *req);
- int (*cancelled)(struct p9_client *, struct p9_req_t *req);
- int (*zc_request)(struct p9_client *, struct p9_req_t *,
- struct iov_iter *, struct iov_iter *, int , int, int);
- int (*show_options)(struct seq_file *, struct p9_client *);
+ int (*create)(struct p9_client *client,
+ const char *devname, char *args);
+ void (*close)(struct p9_client *client);
+ int (*request)(struct p9_client *client, struct p9_req_t *req);
+ int (*cancel)(struct p9_client *client, struct p9_req_t *req);
+ int (*cancelled)(struct p9_client *client, struct p9_req_t *req);
+ int (*zc_request)(struct p9_client *client, struct p9_req_t *req,
+ struct iov_iter *uidata, struct iov_iter *uodata,
+ int inlen, int outlen, int in_hdr_len);
+ int (*show_options)(struct seq_file *m, struct p9_client *client);
};
void v9fs_register_trans(struct p9_trans_module *m);
void v9fs_unregister_trans(struct p9_trans_module *m);
-struct p9_trans_module *v9fs_get_trans_by_name(char *s);
+struct p9_trans_module *v9fs_get_trans_by_name(const char *s);
struct p9_trans_module *v9fs_get_default_trans(void);
void v9fs_put_trans(struct p9_trans_module *m);
+
+#define MODULE_ALIAS_9P(transport) \
+ MODULE_ALIAS("9p-" transport)
+
#endif /* NET_9P_TRANSPORT_H */
diff --git a/include/net/Space.h b/include/net/Space.h
index 9cce0d80d37a..08ca9cef0213 100644
--- a/include/net/Space.h
+++ b/include/net/Space.h
@@ -8,23 +8,13 @@ struct net_device *ultra_probe(int unit);
struct net_device *wd_probe(int unit);
struct net_device *ne_probe(int unit);
struct net_device *fmv18x_probe(int unit);
-struct net_device *i82596_probe(int unit);
struct net_device *ni65_probe(int unit);
struct net_device *sonic_probe(int unit);
struct net_device *smc_init(int unit);
-struct net_device *atarilance_probe(int unit);
-struct net_device *sun3lance_probe(int unit);
-struct net_device *sun3_82586_probe(int unit);
-struct net_device *apne_probe(int unit);
struct net_device *cs89x0_probe(int unit);
-struct net_device *mvme147lance_probe(int unit);
struct net_device *tc515_probe(int unit);
struct net_device *lance_probe(int unit);
struct net_device *cops_probe(int unit);
-struct net_device *ltpc_probe(void);
/* Fibre Channel adapters */
int iph5526_probe(struct net_device *dev);
-
-/* SBNI adapters */
-int sbni_probe(int unit);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 71347a90a9d1..61f2ceb3939e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -7,6 +7,7 @@
*/
#include <linux/refcount.h>
+#include <net/flow_offload.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
@@ -30,17 +31,21 @@ struct tc_action {
atomic_t tcfa_bindcnt;
int tcfa_action;
struct tcf_t tcfa_tm;
- struct gnet_stats_basic_packed tcfa_bstats;
- struct gnet_stats_basic_packed tcfa_bstats_hw;
+ struct gnet_stats_basic_sync tcfa_bstats;
+ struct gnet_stats_basic_sync tcfa_bstats_hw;
struct gnet_stats_queue tcfa_qstats;
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
u32 tcfa_flags;
+ u8 hw_stats;
+ u8 used_hw_stats;
+ bool used_hw_stats_valid;
+ u32 in_hw_count;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -52,6 +57,17 @@ struct tc_action {
#define tcf_rate_est common.tcfa_rate_est
#define tcf_lock common.tcfa_lock
+#define TCA_ACT_HW_STATS_ANY (TCA_ACT_HW_STATS_IMMEDIATE | \
+ TCA_ACT_HW_STATS_DELAYED)
+
+/* Reserve 16 bits for user-space. See TCA_ACT_FLAGS_NO_PERCPU_STATS. */
+#define TCA_ACT_FLAGS_USER_BITS 16
+#define TCA_ACT_FLAGS_USER_MASK 0xffff
+#define TCA_ACT_FLAGS_POLICE (1U << TCA_ACT_FLAGS_USER_BITS)
+#define TCA_ACT_FLAGS_BIND (1U << (TCA_ACT_FLAGS_USER_BITS + 1))
+#define TCA_ACT_FLAGS_REPLACE (1U << (TCA_ACT_FLAGS_USER_BITS + 2))
+#define TCA_ACT_FLAGS_NO_RTNL (1U << (TCA_ACT_FLAGS_USER_BITS + 3))
+
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
*/
@@ -69,10 +85,21 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
{
dtm->install = jiffies_to_clock_t(jiffies - stm->install);
dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
- dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse);
+ dtm->firstuse = stm->firstuse ?
+ jiffies_to_clock_t(jiffies - stm->firstuse) : 0;
dtm->expires = jiffies_to_clock_t(stm->expires);
}
+static inline enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
+{
+ if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
+ return FLOW_ACTION_HW_STATS_DONT_CARE;
+ else if (!hw_stats)
+ return FLOW_ACTION_HW_STATS_DISABLED;
+
+ return hw_stats;
+}
+
#ifdef CONFIG_NET_CLS_ACT
#define ACT_P_CREATED 1
@@ -84,6 +111,7 @@ struct tc_action_ops {
struct list_head head;
char kind[IFNAMSIZ];
enum tca_id id; /* identifier should match kind */
+ unsigned int net_id;
size_t size;
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *,
@@ -92,20 +120,23 @@ struct tc_action_ops {
void (*cleanup)(struct tc_action *);
int (*lookup)(struct net *net, struct tc_action **a, u32 index);
int (*init)(struct net *net, struct nlattr *nla,
- struct nlattr *est, struct tc_action **act, int ovr,
- int bind, bool rtnl_held, struct tcf_proto *tp,
+ struct nlattr *est, struct tc_action **act,
+ struct tcf_proto *tp,
u32 flags, struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
const struct tc_action_ops *,
struct netlink_ext_ack *);
- void (*stats_update)(struct tc_action *, u64, u32, u64, bool);
+ void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool);
size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a,
tc_action_priv_destructor *destructor);
struct psample_group *
(*get_psample_group)(const struct tc_action *a,
tc_action_priv_destructor *destructor);
+ int (*offload_act_setup)(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack);
};
struct tc_action_net {
@@ -159,17 +190,11 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
struct nlattr *est, struct tc_action **a,
const struct tc_action_ops *ops, int bind,
u32 flags);
-void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
-
+void tcf_idr_insert_many(struct tc_action *actions[]);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
-int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
-
-static inline int tcf_idr_release(struct tc_action *a, bool bind)
-{
- return __tcf_idr_release(a, bind, false);
-}
+int tcf_idr_release(struct tc_action *a, bool bind);
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
@@ -178,16 +203,18 @@ int tcf_action_destroy(struct tc_action *actions[], int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
- struct nlattr *est, char *name, int ovr, int bind,
- struct tc_action *actions[], size_t *attr_size,
- bool rtnl_held, struct netlink_ext_ack *extack);
+ struct nlattr *est,
+ struct tc_action *actions[], int init_res[], size_t *attr_size,
+ u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
+struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
+ bool rtnl_held,
+ struct netlink_ext_ack *extack);
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
- char *name, int ovr, int bind,
- bool rtnl_held,
- struct netlink_ext_ack *extack);
+ struct tc_action_ops *a_o, int *init_res,
+ u32 flags, struct netlink_ext_ack *extack);
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
- int ref);
+ int ref, bool terse);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
@@ -195,7 +222,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb)
{
if (likely(a->cpu_bstats)) {
- bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
return;
}
spin_lock(&a->tcfa_lock);
@@ -225,25 +252,43 @@ static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
spin_unlock(&a->tcfa_lock);
}
-void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
- bool drop, bool hw);
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
+ u64 drops, bool hw);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
+int tcf_action_update_hw_stats(struct tc_action *action);
+int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
+ void *cb_priv, bool add);
int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
struct tcf_chain **handle,
struct netlink_ext_ack *newchain);
struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
struct tcf_chain *newchain);
+
+#ifdef CONFIG_INET
+DECLARE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
+#endif
+
+int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
+
+#else /* !CONFIG_NET_CLS_ACT */
+
+static inline int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
+ void *cb_priv, bool add) {
+ return 0;
+}
+
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
- u64 packets, u64 lastuse, bool hw)
+ u64 packets, u64 drops,
+ u64 lastuse, bool hw)
{
#ifdef CONFIG_NET_CLS_ACT
if (!a->ops->stats_update)
return;
- a->ops->stats_update(a, bytes, packets, lastuse, hw);
+ a->ops->stats_update(a, bytes, packets, drops, lastuse, hw);
#endif
}
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index a088349dd94f..c04f359655b8 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -64,6 +64,8 @@ struct ifa6_config {
const struct in6_addr *pfx;
unsigned int plen;
+ u8 ifa_proto;
+
const struct in6_addr *peer_pfx;
u32 rt_priority;
@@ -90,12 +92,18 @@ int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
#endif
+int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
+ unsigned char nsegs);
+
bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
const unsigned int prefix_len,
struct net_device *dev);
int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
+struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev);
+
struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
const struct in6_addr *addr,
struct net_device *dev, int strict);
@@ -103,8 +111,6 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- u32 banned_flags);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
@@ -229,7 +235,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
void ipv6_mc_remap(struct inet6_dev *idev);
void ipv6_mc_init_dev(struct inet6_dev *idev);
void ipv6_mc_destroy_dev(struct inet6_dev *idev);
-int ipv6_mc_check_icmpv6(struct sk_buff *skb);
int ipv6_mc_check_mld(struct sk_buff *skb);
void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
@@ -273,6 +278,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+void __ipv6_sock_ac_close(struct sock *sk);
void ipv6_sock_ac_close(struct sock *sk);
int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
@@ -399,6 +405,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
{
const struct inet6_dev *idev = __in6_dev_get(dev);
+ if (unlikely(!idev))
+ return true;
+
return !!idev->cnf.ignore_routes_with_linkdown;
}
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 1abae3c340a5..b69ca695935c 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -16,6 +16,12 @@ struct sock;
struct socket;
struct rxrpc_call;
+enum rxrpc_interruptibility {
+ RXRPC_INTERRUPTIBLE, /* Call is interruptible */
+ RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */
+ RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */
+};
+
/*
* Debug ID counter for tracing.
*/
@@ -41,31 +47,29 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
gfp_t,
rxrpc_notify_rx_t,
bool,
- bool,
+ enum rxrpc_interruptibility,
unsigned int);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
rxrpc_notify_end_tx_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
- struct iov_iter *, bool, u32 *, u16 *);
+ struct iov_iter *, size_t *, bool, u32 *, u16 *);
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
u32, int, const char *);
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *);
-u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
- u32 *);
-void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
-bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
- ktime_t *);
bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *,
unsigned long);
+int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val);
+
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 17e10fba2152..480fa579787e 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -16,18 +16,16 @@ void wait_for_unix_gc(void);
struct sock *unix_get_socket(struct file *filp);
struct sock *unix_peer_get(struct sock *sk);
-#define UNIX_HASH_SIZE 256
+#define UNIX_HASH_MOD (256 - 1)
+#define UNIX_HASH_SIZE (256 * 2)
#define UNIX_HASH_BITS 8
extern unsigned int unix_tot_inflight;
-extern spinlock_t unix_table_lock;
-extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
struct unix_address {
refcount_t refcnt;
int len;
- unsigned int hash;
- struct sockaddr_un name[0];
+ struct sockaddr_un name[];
};
struct unix_skb_parms {
@@ -42,7 +40,7 @@ struct unix_skb_parms {
} __randomize_layout;
struct scm_stat {
- u32 nr_fds;
+ atomic_t nr_fds;
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
@@ -70,6 +68,9 @@ struct unix_sock {
struct socket_wq peer_wq;
wait_queue_entry_t peer_wake;
struct scm_stat scm_stat;
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ struct sk_buff *oob_skb;
+#endif
};
static inline struct unix_sock *unix_sk(const struct sock *sk)
@@ -82,6 +83,10 @@ static inline struct unix_sock *unix_sk(const struct sock *sk)
long unix_inq_len(struct sock *sk);
long unix_outq_len(struct sock *sk);
+int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
+ int flags);
+int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
+ int flags);
#ifdef CONFIG_SYSCTL
int unix_sysctl_register(struct net *net);
void unix_sysctl_unregister(struct net *net);
@@ -89,4 +94,16 @@ void unix_sysctl_unregister(struct net *net);
static inline int unix_sysctl_register(struct net *net) { return 0; }
static inline void unix_sysctl_unregister(struct net *net) {}
#endif
+
+#ifdef CONFIG_BPF_SYSCALL
+extern struct proto unix_dgram_proto;
+extern struct proto unix_stream_proto;
+
+int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+void __init unix_bpf_build_proto(void);
+#else
+static inline void __init unix_bpf_build_proto(void)
+{}
+#endif
#endif
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index b1c717286993..568a87c5e0d0 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
+#include <net/sock.h>
#include <uapi/linux/vm_sockets.h>
#include "vsock_addr.h"
@@ -77,6 +78,7 @@ struct vsock_sock {
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
struct sock *vsock_create_connected(struct sock *parent);
+void vsock_data_ready(struct sock *sk);
/**** TRANSPORT ****/
@@ -134,6 +136,15 @@ struct vsock_transport {
u64 (*stream_rcvhiwat)(struct vsock_sock *);
bool (*stream_is_active)(struct vsock_sock *);
bool (*stream_allow)(u32 cid, u32 port);
+ int (*set_rcvlowat)(struct vsock_sock *vsk, int val);
+
+ /* SEQ_PACKET. */
+ ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+ int flags);
+ int (*seqpacket_enqueue)(struct vsock_sock *vsk, struct msghdr *msg,
+ size_t len);
+ bool (*seqpacket_allow)(u32 remote_cid);
+ u32 (*seqpacket_has_data)(struct vsock_sock *vsk);
/* Notification. */
int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
@@ -197,7 +208,8 @@ struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst);
void vsock_remove_sock(struct vsock_sock *vsk);
-void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
+void vsock_for_each_connected_socket(struct vsock_transport *transport,
+ void (*fn)(struct sock *sk));
int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
bool vsock_find_cid(unsigned int cid);
diff --git a/include/net/amt.h b/include/net/amt.h
new file mode 100644
index 000000000000..c881bc8b673b
--- /dev/null
+++ b/include/net/amt.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com>
+ */
+#ifndef _NET_AMT_H_
+#define _NET_AMT_H_
+
+#include <linux/siphash.h>
+#include <linux/jhash.h>
+#include <linux/netdevice.h>
+#include <net/gro_cells.h>
+#include <net/rtnetlink.h>
+
+enum amt_msg_type {
+ AMT_MSG_DISCOVERY = 1,
+ AMT_MSG_ADVERTISEMENT,
+ AMT_MSG_REQUEST,
+ AMT_MSG_MEMBERSHIP_QUERY,
+ AMT_MSG_MEMBERSHIP_UPDATE,
+ AMT_MSG_MULTICAST_DATA,
+ AMT_MSG_TEARDOWN,
+ __AMT_MSG_MAX,
+};
+
+#define AMT_MSG_MAX (__AMT_MSG_MAX - 1)
+
+enum amt_ops {
+ /* A*B */
+ AMT_OPS_INT,
+ /* A+B */
+ AMT_OPS_UNI,
+ /* A-B */
+ AMT_OPS_SUB,
+ /* B-A */
+ AMT_OPS_SUB_REV,
+ __AMT_OPS_MAX,
+};
+
+#define AMT_OPS_MAX (__AMT_OPS_MAX - 1)
+
+enum amt_filter {
+ AMT_FILTER_FWD,
+ AMT_FILTER_D_FWD,
+ AMT_FILTER_FWD_NEW,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_FILTER_ALL,
+ AMT_FILTER_NONE_NEW,
+ AMT_FILTER_BOTH,
+ AMT_FILTER_BOTH_NEW,
+ __AMT_FILTER_MAX,
+};
+
+#define AMT_FILTER_MAX (__AMT_FILTER_MAX - 1)
+
+enum amt_act {
+ AMT_ACT_GMI,
+ AMT_ACT_GMI_ZERO,
+ AMT_ACT_GT,
+ AMT_ACT_STATUS_FWD_NEW,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ AMT_ACT_STATUS_NONE_NEW,
+ __AMT_ACT_MAX,
+};
+
+#define AMT_ACT_MAX (__AMT_ACT_MAX - 1)
+
+enum amt_status {
+ AMT_STATUS_INIT,
+ AMT_STATUS_SENT_DISCOVERY,
+ AMT_STATUS_RECEIVED_DISCOVERY,
+ AMT_STATUS_SENT_ADVERTISEMENT,
+ AMT_STATUS_RECEIVED_ADVERTISEMENT,
+ AMT_STATUS_SENT_REQUEST,
+ AMT_STATUS_RECEIVED_REQUEST,
+ AMT_STATUS_SENT_QUERY,
+ AMT_STATUS_RECEIVED_QUERY,
+ AMT_STATUS_SENT_UPDATE,
+ AMT_STATUS_RECEIVED_UPDATE,
+ __AMT_STATUS_MAX,
+};
+
+#define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1)
+
+/* Gateway events only */
+enum amt_event {
+ AMT_EVENT_NONE,
+ AMT_EVENT_RECEIVE,
+ AMT_EVENT_SEND_DISCOVERY,
+ AMT_EVENT_SEND_REQUEST,
+ __AMT_EVENT_MAX,
+};
+
+struct amt_header {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 type:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 version:4,
+ type:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_header_discovery {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_advertisement {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+ __be32 ip4;
+} __packed;
+
+struct amt_header_request {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved1:7,
+ p:1,
+ reserved2:16;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ p:1,
+ reserved1:7,
+ reserved2:16;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_query {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:6,
+ l:1,
+ g:1,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ g:1,
+ l:1,
+ reserved:6,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_update {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:8,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ reserved:8,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_mcast_data {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type:4,
+ version:4,
+ reserved:8;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 version:4,
+ type:4,
+ reserved:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_advertisement advertisement;
+ struct amt_header_request request;
+ struct amt_header_membership_query query;
+ struct amt_header_membership_update update;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_gw_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_request request;
+ struct amt_header_membership_update update;
+ };
+} __packed;
+
+struct amt_relay_headers {
+ union {
+ struct amt_header_advertisement advertisement;
+ struct amt_header_membership_query query;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_skb_cb {
+ struct amt_tunnel_list *tunnel;
+};
+
+struct amt_tunnel_list {
+ struct list_head list;
+ /* Protect All resources under an amt_tunne_list */
+ spinlock_t lock;
+ struct amt_dev *amt;
+ u32 nr_groups;
+ u32 nr_sources;
+ enum amt_status status;
+ struct delayed_work gc_wq;
+ __be16 source_port;
+ __be32 ip4;
+ __be32 nonce;
+ siphash_key_t key;
+ u64 mac:48,
+ reserved:16;
+ struct rcu_head rcu;
+ struct hlist_head groups[];
+};
+
+union amt_addr {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+};
+
+/* RFC 3810
+ *
+ * When the router is in EXCLUDE mode, the router state is represented
+ * by the notation EXCLUDE (X,Y), where X is called the "Requested List"
+ * and Y is called the "Exclude List". All sources, except those from
+ * the Exclude List, will be forwarded by the router
+ */
+enum amt_source_status {
+ AMT_SOURCE_STATUS_NONE,
+ /* Node of Requested List */
+ AMT_SOURCE_STATUS_FWD,
+ /* Node of Exclude List */
+ AMT_SOURCE_STATUS_D_FWD,
+};
+
+/* protected by gnode->lock */
+struct amt_source_node {
+ struct hlist_node node;
+ struct amt_group_node *gnode;
+ struct delayed_work source_timer;
+ union amt_addr source_addr;
+ enum amt_source_status status;
+#define AMT_SOURCE_OLD 0
+#define AMT_SOURCE_NEW 1
+ u8 flags;
+ struct rcu_head rcu;
+};
+
+/* Protected by amt_tunnel_list->lock */
+struct amt_group_node {
+ struct amt_dev *amt;
+ union amt_addr group_addr;
+ union amt_addr host_addr;
+ bool v6;
+ u8 filter_mode;
+ u32 nr_sources;
+ struct amt_tunnel_list *tunnel_list;
+ struct hlist_node node;
+ struct delayed_work group_timer;
+ struct rcu_head rcu;
+ struct hlist_head sources[];
+};
+
+#define AMT_MAX_EVENTS 16
+struct amt_events {
+ enum amt_event event;
+ struct sk_buff *skb;
+};
+
+struct amt_dev {
+ struct net_device *dev;
+ struct net_device *stream_dev;
+ struct net *net;
+ /* Global lock for amt device */
+ spinlock_t lock;
+ /* Used only in relay mode */
+ struct list_head tunnel_list;
+ struct gro_cells gro_cells;
+
+ /* Protected by RTNL */
+ struct delayed_work discovery_wq;
+ /* Protected by RTNL */
+ struct delayed_work req_wq;
+ /* Protected by RTNL */
+ struct delayed_work secret_wq;
+ struct work_struct event_wq;
+ /* AMT status */
+ enum amt_status status;
+ /* Generated key */
+ siphash_key_t key;
+ struct socket __rcu *sock;
+ u32 max_groups;
+ u32 max_sources;
+ u32 hash_buckets;
+ u32 hash_seed;
+ /* Default 128 */
+ u32 max_tunnels;
+ /* Default 128 */
+ u32 nr_tunnels;
+ /* Gateway or Relay mode */
+ u32 mode;
+ /* Default 2268 */
+ __be16 relay_port;
+ /* Default 2268 */
+ __be16 gw_port;
+ /* Outer local ip */
+ __be32 local_ip;
+ /* Outer remote ip */
+ __be32 remote_ip;
+ /* Outer discovery ip */
+ __be32 discovery_ip;
+ /* Only used in gateway mode */
+ __be32 nonce;
+ /* Gateway sent request and received query */
+ bool ready4;
+ bool ready6;
+ u8 req_cnt;
+ u8 qi;
+ u64 qrv;
+ u64 qri;
+ /* Used only in gateway mode */
+ u64 mac:48,
+ reserved:16;
+ /* AMT gateway side message handler queue */
+ struct amt_events events[AMT_MAX_EVENTS];
+ u8 event_idx;
+ u8 nr_events;
+};
+
+#define AMT_TOS 0xc0
+#define AMT_IPHDR_OPTS 4
+#define AMT_IP6HDR_OPTS 8
+#define AMT_GC_INTERVAL (30 * 1000)
+#define AMT_MAX_GROUP 32
+#define AMT_MAX_SOURCE 128
+#define AMT_HSIZE_SHIFT 8
+#define AMT_HSIZE (1 << AMT_HSIZE_SHIFT)
+
+#define AMT_DISCOVERY_TIMEOUT 5000
+#define AMT_INIT_REQ_TIMEOUT 1
+#define AMT_INIT_QUERY_INTERVAL 125
+#define AMT_MAX_REQ_TIMEOUT 120
+#define AMT_MAX_REQ_COUNT 3
+#define AMT_SECRET_TIMEOUT 60000
+#define IANA_AMT_UDP_PORT 2268
+#define AMT_MAX_TUNNELS 128
+#define AMT_MAX_REQS 128
+#define AMT_GW_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_gw_headers))
+#define AMT_RELAY_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_relay_headers))
+
+static inline bool netif_is_amt(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "amt");
+}
+
+static inline u64 amt_gmi(const struct amt_dev *amt)
+{
+ return ((amt->qrv * amt->qi) + amt->qri) * 1000;
+}
+
+#endif /* _NET_AMT_H_ */
diff --git a/include/net/arp.h b/include/net/arp.h
index 4950191f6b2b..d7ef4ec71dfe 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -53,13 +53,7 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
rcu_read_lock_bh();
n = __ipv4_neigh_lookup_noref(dev, key);
- if (n) {
- unsigned long now = jiffies;
-
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
- }
+ neigh_confirm(n);
rcu_read_unlock_bh();
}
@@ -71,6 +65,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
const unsigned char *src_hw, const unsigned char *th);
int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
void arp_ifdown(struct net_device *dev);
+int arp_invalidate(struct net_device *dev, __be32 ip, bool force);
struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
struct net_device *dev, __be32 src_ip,
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 8b7eb46ad72d..f8cf3629a419 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -187,18 +187,12 @@ typedef struct {
typedef struct ax25_route {
struct ax25_route *next;
- refcount_t refcount;
ax25_address callsign;
struct net_device *dev;
ax25_digi *digipeat;
char ip_mode;
} ax25_route;
-static inline void ax25_hold_route(ax25_route *ax25_rt)
-{
- refcount_inc(&ax25_rt->refcount);
-}
-
void __ax25_put_route(ax25_route *ax25_rt);
extern rwlock_t ax25_route_lock;
@@ -213,12 +207,6 @@ static inline void ax25_route_lock_unuse(void)
read_unlock(&ax25_route_lock);
}
-static inline void ax25_put_route(ax25_route *ax25_rt)
-{
- if (refcount_dec_and_test(&ax25_rt->refcount))
- __ax25_put_route(ax25_rt);
-}
-
typedef struct {
char slave; /* slave_mode? */
struct timer_list slave_timer; /* timeout timer */
@@ -229,13 +217,18 @@ struct ctl_table;
typedef struct ax25_dev {
struct ax25_dev *next;
+
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
struct net_device *forward;
struct ctl_table_header *sysheader;
int values[AX25_MAX_VALUES];
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_dama_info dama;
#endif
+ refcount_t refcount;
+ bool device_up;
} ax25_dev;
typedef struct ax25_cb {
@@ -243,6 +236,7 @@ typedef struct ax25_cb {
ax25_address source_addr, dest_addr;
ax25_digi *digipeat;
ax25_dev *ax25_dev;
+ netdevice_tracker dev_tracker;
unsigned char iamdigi;
unsigned char state, modulus, pidincl;
unsigned short vs, vr, va;
@@ -290,6 +284,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25)
}
}
+static inline void ax25_dev_hold(ax25_dev *ax25_dev)
+{
+ refcount_inc(&ax25_dev->refcount);
+}
+
+static inline void ax25_dev_put(ax25_dev *ax25_dev)
+{
+ if (refcount_dec_and_test(&ax25_dev->refcount)) {
+ kfree(ax25_dev);
+ }
+}
static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
@@ -304,7 +309,7 @@ extern spinlock_t ax25_list_lock;
void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
-ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *,
+ax25_cb *ax25_find_cb(const ax25_address *, ax25_address *, ax25_digi *,
struct net_device *);
void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
void ax25_destroy_socket(ax25_cb *);
@@ -384,10 +389,11 @@ struct ax25_linkfail {
void ax25_linkfail_register(struct ax25_linkfail *lf);
void ax25_linkfail_release(struct ax25_linkfail *lf);
-int __must_check ax25_listen_register(ax25_address *, struct net_device *);
-void ax25_listen_release(ax25_address *, struct net_device *);
+int __must_check ax25_listen_register(const ax25_address *,
+ struct net_device *);
+void ax25_listen_release(const ax25_address *, struct net_device *);
int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
-int ax25_listen_mine(ax25_address *, struct net_device *);
+int ax25_listen_mine(const ax25_address *, struct net_device *);
void ax25_link_failed(ax25_cb *, int);
int ax25_protocol_is_registered(unsigned int);
@@ -401,8 +407,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
-ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
- ax25_digi *, struct net_device *);
+ax25_cb *ax25_send_frame(struct sk_buff *, int, const ax25_address *,
+ ax25_address *, ax25_digi *, struct net_device *);
void ax25_output(ax25_cb *, int, struct sk_buff *);
void ax25_kick(ax25_cb *);
void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
diff --git a/include/net/ax88796.h b/include/net/ax88796.h
index aa52b2e8ff7b..303100f08ab8 100644
--- a/include/net/ax88796.h
+++ b/include/net/ax88796.h
@@ -8,6 +8,8 @@
#ifndef __NET_AX88796_PLAT_H
#define __NET_AX88796_PLAT_H
+#include <linux/types.h>
+
struct sk_buff;
struct net_device;
struct platform_device;
@@ -32,10 +34,13 @@ struct ax_plat_data {
const unsigned char *buf, int star_page);
void (*block_input)(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
- /* returns nonzero if a pending interrupt request might by caused by
- * the ax88786. Handles all interrupts if set to NULL
+ /* returns nonzero if a pending interrupt request might be caused by
+ * the ax88796. Handles all interrupts if set to NULL
*/
int (*check_irq)(struct platform_device *pdev);
};
+/* exported from ax88796.c for xsurf100.c */
+extern void ax_NS8390_reinit(struct net_device *dev);
+
#endif /* __NET_AX88796_PLAT_H */
diff --git a/include/net/bareudp.h b/include/net/bareudp.h
new file mode 100644
index 000000000000..17610c8d6361
--- /dev/null
+++ b/include/net/bareudp.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NET_BAREUDP_H
+#define __NET_BAREUDP_H
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/rtnetlink.h>
+
+static inline bool netif_is_bareudp(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "bareudp");
+}
+
+#endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e42bb8e03c09..bcc5a4cd2c17 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -41,6 +41,8 @@
#define BLUETOOTH_VER_1_1 1
#define BLUETOOTH_VER_1_2 2
#define BLUETOOTH_VER_2_0 3
+#define BLUETOOTH_VER_2_1 4
+#define BLUETOOTH_VER_4_0 6
/* Reserv for core and drivers use */
#define BT_SKB_RESERVE 8
@@ -53,6 +55,8 @@
#define BTPROTO_CMTP 5
#define BTPROTO_HIDP 6
#define BTPROTO_AVDTP 7
+#define BTPROTO_ISO 8
+#define BTPROTO_LAST BTPROTO_ISO
#define SOL_HCI 0
#define SOL_L2CAP 6
@@ -121,6 +125,102 @@ struct bt_voice {
#define BT_SNDMTU 12
#define BT_RCVMTU 13
+#define BT_PHY 14
+
+#define BT_PHY_BR_1M_1SLOT 0x00000001
+#define BT_PHY_BR_1M_3SLOT 0x00000002
+#define BT_PHY_BR_1M_5SLOT 0x00000004
+#define BT_PHY_EDR_2M_1SLOT 0x00000008
+#define BT_PHY_EDR_2M_3SLOT 0x00000010
+#define BT_PHY_EDR_2M_5SLOT 0x00000020
+#define BT_PHY_EDR_3M_1SLOT 0x00000040
+#define BT_PHY_EDR_3M_3SLOT 0x00000080
+#define BT_PHY_EDR_3M_5SLOT 0x00000100
+#define BT_PHY_LE_1M_TX 0x00000200
+#define BT_PHY_LE_1M_RX 0x00000400
+#define BT_PHY_LE_2M_TX 0x00000800
+#define BT_PHY_LE_2M_RX 0x00001000
+#define BT_PHY_LE_CODED_TX 0x00002000
+#define BT_PHY_LE_CODED_RX 0x00004000
+
+#define BT_MODE 15
+
+#define BT_MODE_BASIC 0x00
+#define BT_MODE_ERTM 0x01
+#define BT_MODE_STREAMING 0x02
+#define BT_MODE_LE_FLOWCTL 0x03
+#define BT_MODE_EXT_FLOWCTL 0x04
+
+#define BT_PKT_STATUS 16
+
+#define BT_SCM_PKT_STATUS 0x03
+
+#define BT_ISO_QOS 17
+
+#define BT_ISO_QOS_CIG_UNSET 0xff
+#define BT_ISO_QOS_CIS_UNSET 0xff
+
+#define BT_ISO_QOS_BIG_UNSET 0xff
+#define BT_ISO_QOS_BIS_UNSET 0xff
+
+struct bt_iso_io_qos {
+ __u32 interval;
+ __u16 latency;
+ __u16 sdu;
+ __u8 phy;
+ __u8 rtn;
+};
+
+struct bt_iso_qos {
+ union {
+ __u8 cig;
+ __u8 big;
+ };
+ union {
+ __u8 cis;
+ __u8 bis;
+ };
+ union {
+ __u8 sca;
+ __u8 sync_interval;
+ };
+ __u8 packing;
+ __u8 framing;
+ struct bt_iso_io_qos in;
+ struct bt_iso_io_qos out;
+};
+
+#define BT_ISO_PHY_1M 0x01
+#define BT_ISO_PHY_2M 0x02
+#define BT_ISO_PHY_CODED 0x04
+#define BT_ISO_PHY_ANY (BT_ISO_PHY_1M | BT_ISO_PHY_2M | \
+ BT_ISO_PHY_CODED)
+
+#define BT_CODEC 19
+
+struct bt_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct bt_codec {
+ __u8 id;
+ __u16 cid;
+ __u16 vid;
+ __u8 data_path;
+ __u8 num_caps;
+} __packed;
+
+struct bt_codecs {
+ __u8 num_codecs;
+ struct bt_codec codecs[];
+} __packed;
+
+#define BT_CODEC_CVSD 0x02
+#define BT_CODEC_TRANSPARENT 0x03
+#define BT_CODEC_MSBC 0x05
+
+#define BT_ISO_BASE 20
__printf(1, 2)
void bt_info(const char *fmt, ...);
@@ -128,6 +228,12 @@ __printf(1, 2)
void bt_warn(const char *fmt, ...);
__printf(1, 2)
void bt_err(const char *fmt, ...);
+#if IS_ENABLED(CONFIG_BT_FEATURE_DEBUG)
+void bt_dbg_set(bool enable);
+bool bt_dbg_get(void);
+__printf(1, 2)
+void bt_dbg(const char *fmt, ...);
+#endif
__printf(1, 2)
void bt_warn_ratelimited(const char *fmt, ...);
__printf(1, 2)
@@ -136,21 +242,28 @@ void bt_err_ratelimited(const char *fmt, ...);
#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
#define BT_WARN(fmt, ...) bt_warn(fmt "\n", ##__VA_ARGS__)
#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
+
+#if IS_ENABLED(CONFIG_BT_FEATURE_DEBUG)
+#define BT_DBG(fmt, ...) bt_dbg(fmt "\n", ##__VA_ARGS__)
+#else
#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
+#endif
+
+#define bt_dev_name(hdev) ((hdev) ? (hdev)->name : "null")
#define bt_dev_info(hdev, fmt, ...) \
- BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ BT_INFO("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_warn(hdev, fmt, ...) \
- BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ BT_WARN("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_err(hdev, fmt, ...) \
- BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ BT_ERR("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_dbg(hdev, fmt, ...) \
- BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ BT_DBG("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_warn_ratelimited(hdev, fmt, ...) \
- bt_warn_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ bt_warn_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_err_ratelimited(hdev, fmt, ...) \
- bt_err_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
/* Connection and socket states */
enum {
@@ -250,6 +363,7 @@ struct bt_sock {
struct sock *parent;
unsigned long flags;
void (*skb_msg_name)(struct sk_buff *, void *, int *);
+ void (*skb_put_cmsg)(struct sk_buff *, struct msghdr *, struct sock *);
};
enum {
@@ -276,7 +390,7 @@ int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
-int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
+int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags);
void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
void bt_accept_unlink(struct sock *sk);
@@ -299,6 +413,10 @@ struct l2cap_ctrl {
struct l2cap_chan *chan;
};
+struct sco_ctrl {
+ u8 pkt_status;
+};
+
struct hci_dev;
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
@@ -309,6 +427,7 @@ typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
#define HCI_REQ_SKB BIT(1)
struct hci_ctrl {
+ struct sock *sk;
u16 opcode;
u8 req_flags;
u8 req_event;
@@ -318,6 +437,11 @@ struct hci_ctrl {
};
};
+struct mgmt_ctrl {
+ struct hci_dev *hdev;
+ u16 opcode;
+};
+
struct bt_skb_cb {
u8 pkt_type;
u8 force_active;
@@ -325,7 +449,9 @@ struct bt_skb_cb {
u8 incoming:1;
union {
struct l2cap_ctrl l2cap;
+ struct sco_ctrl sco;
struct hci_ctrl hci;
+ struct mgmt_ctrl mgmt;
};
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@@ -333,6 +459,8 @@ struct bt_skb_cb {
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
#define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
+#define hci_skb_event(skb) bt_cb((skb))->hci.req_event
+#define hci_skb_sk(skb) bt_cb((skb))->hci.sk
static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
{
@@ -372,7 +500,73 @@ out:
return NULL;
}
+/* Shall not be called with lock_sock held */
+static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb;
+ size_t size = min_t(size_t, len, mtu);
+ int err;
+
+ skb = bt_skb_send_alloc(sk, size + headroom + tailroom,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return ERR_PTR(err);
+
+ skb_reserve(skb, headroom);
+ skb_tailroom_reserve(skb, mtu, tailroom);
+
+ if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) {
+ kfree_skb(skb);
+ return ERR_PTR(-EFAULT);
+ }
+
+ skb->priority = sk->sk_priority;
+
+ return skb;
+}
+
+/* Similar to bt_skb_sendmsg but can split the msg into multiple fragments
+ * accourding to the MTU.
+ */
+static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb, **frag;
+
+ skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR(skb))
+ return skb;
+
+ len -= skb->len;
+ if (!len)
+ return skb;
+
+ /* Add remaining data over MTU as continuation fragments */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len) {
+ struct sk_buff *tmp;
+
+ tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR(tmp)) {
+ return skb;
+ }
+
+ len -= tmp->len;
+
+ *frag = tmp;
+ frag = &(*frag)->next;
+ }
+
+ return skb;
+}
+
int bt_to_errno(u16 code);
+__u8 bt_status(int err);
void hci_sock_set_flag(struct sock *sk, int nr);
void hci_sock_clear_flag(struct sock *sk, int nr);
@@ -410,8 +604,30 @@ static inline void sco_exit(void)
}
#endif
+#if IS_ENABLED(CONFIG_BT_LE)
+int iso_init(void);
+int iso_exit(void);
+bool iso_enabled(void);
+#else
+static inline int iso_init(void)
+{
+ return 0;
+}
+
+static inline int iso_exit(void)
+{
+ return 0;
+}
+
+static inline bool iso_enabled(void)
+{
+ return false;
+}
+#endif
+
int mgmt_init(void);
void mgmt_exit(void);
+void mgmt_cleanup(struct sock *sk);
void bt_sock_reclassify_lock(struct sock *sk, int proto);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 6293bdd7d862..e004ba04a9ae 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -36,7 +36,7 @@
#define HCI_MAX_AMP_ASSOC_SIZE 672
-#define HCI_MAX_CSB_DATA_SIZE 252
+#define HCI_MAX_CPB_DATA_SIZE 252
/* HCI dev events */
#define HCI_DEV_REG 1
@@ -53,6 +53,9 @@
#define HCI_NOTIFY_CONN_ADD 1
#define HCI_NOTIFY_CONN_DEL 2
#define HCI_NOTIFY_VOICE_SETTING 3
+#define HCI_NOTIFY_ENABLE_SCO_CVSD 4
+#define HCI_NOTIFY_ENABLE_SCO_TRANSP 5
+#define HCI_NOTIFY_DISABLE_SCO 6
/* HCI bus types */
#define HCI_VIRTUAL 0
@@ -65,6 +68,7 @@
#define HCI_SPI 7
#define HCI_I2C 8
#define HCI_SMD 9
+#define HCI_VIRTIO 10
/* HCI controller types */
#define HCI_PRIMARY 0x00
@@ -115,7 +119,7 @@ enum {
* wrongly configured local features that will require forcing
* them to enable this mode. Getting RSSI information with the
* inquiry responses is preferred since it allows for a better
- * user expierence.
+ * user experience.
*
* This quirk must be set before hci_register_dev is called.
*/
@@ -142,7 +146,7 @@ enum {
/* When this quirk is set, an external configuration step
* is required and will be indicated with the controller
- * configuation.
+ * configuration.
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
@@ -205,6 +209,60 @@ enum {
*
*/
HCI_QUIRK_NON_PERSISTENT_SETUP,
+
+ /* When this quirk is set, wide band speech is supported by
+ * the driver since no reliable mechanism exist to report
+ * this from the hardware, a driver flag is use to convey
+ * this support
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
+
+ /* When this quirk is set, the controller has validated that
+ * LE states reported through the HCI_LE_READ_SUPPORTED_STATES are
+ * valid. This mechanism is necessary as many controllers have
+ * been seen has having trouble initiating a connectable
+ * advertisement despite the state combination being reported as
+ * supported.
+ */
+ HCI_QUIRK_VALID_LE_STATES,
+
+ /*
+ * When this quirk is set, then the hci_suspend_notifier is not
+ * registered. This is intended for devices which drop completely
+ * from the bus on system-suspend and which will show up as a new
+ * HCI after resume.
+ */
+ HCI_QUIRK_NO_SUSPEND_NOTIFIER,
+
+ /*
+ * When this quirk is set, LE tx power is not queried on startup
+ * and the min/max tx power values default to HCI_TX_POWER_INVALID.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER,
+
+ /* When this quirk is set, HCI_OP_SET_EVENT_FLT requests with
+ * HCI_FLT_CLEAR_ALL are ignored and event filtering is
+ * completely avoided. A subset of the CSR controller
+ * clones struggle with this and instantly lock up.
+ *
+ * Note that devices using this must (separately) disable
+ * runtime suspend, because event filtering takes place there.
+ */
+ HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL,
+
+ /*
+ * When this quirk is set, disables the use of
+ * HCI_OP_ENHANCED_SETUP_SYNC_CONN command to setup SCO connections.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN,
};
/* HCI device flags */
@@ -236,6 +294,7 @@ enum {
HCI_MGMT_DEV_CLASS_EVENTS,
HCI_MGMT_LOCAL_NAME_EVENTS,
HCI_MGMT_OOB_DATA_EVENTS,
+ HCI_MGMT_EXP_FEATURE_EVENTS,
};
/*
@@ -257,6 +316,7 @@ enum {
HCI_USER_CHANNEL,
HCI_EXT_CONFIGURED,
HCI_LE_ADV,
+ HCI_LE_PER_ADV,
HCI_LE_SCAN,
HCI_SSP_ENABLED,
HCI_SC_ENABLED,
@@ -277,13 +337,26 @@ enum {
HCI_FAST_CONNECTABLE,
HCI_BREDR_ENABLED,
HCI_LE_SCAN_INTERRUPTED,
+ HCI_WIDEBAND_SPEECH_ENABLED,
+ HCI_EVENT_FILTER_CONFIGURED,
+ HCI_PA_SYNC,
HCI_DUT_MODE,
HCI_VENDOR_DIAG,
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
HCI_LL_RPA_RESOLUTION,
+ HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING,
+ HCI_FORCE_NO_MITM,
+ HCI_QUALITY_REPORT,
+ HCI_OFFLOAD_CODECS_ENABLED,
+ HCI_LE_SIMULTANEOUS_ROLES,
+ HCI_CMD_DRAIN_WORKQUEUE,
+
+ HCI_MESH_EXPERIMENTAL,
+ HCI_MESH,
+ HCI_MESH_SENDING,
__HCI_NUM_FLAGS,
};
@@ -293,6 +366,7 @@ enum {
#define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */
#define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */
#define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
@@ -418,6 +492,7 @@ enum {
#define LMP_EXT_INQ 0x01
#define LMP_SIMUL_LE_BR 0x02
#define LMP_SIMPLE_PAIR 0x08
+#define LMP_ERR_DATA_REPORTING 0x20
#define LMP_NO_FLUSH 0x40
#define LMP_LSTO 0x01
@@ -425,10 +500,10 @@ enum {
#define LMP_EXTFEATURES 0x80
/* Extended LMP features */
-#define LMP_CSB_MASTER 0x01
-#define LMP_CSB_SLAVE 0x02
-#define LMP_SYNC_TRAIN 0x04
-#define LMP_SYNC_SCAN 0x08
+#define LMP_CPB_CENTRAL 0x01
+#define LMP_CPB_PERIPHERAL 0x02
+#define LMP_SYNC_TRAIN 0x04
+#define LMP_SYNC_SCAN 0x08
#define LMP_SC 0x01
#define LMP_PING 0x02
@@ -442,18 +517,19 @@ enum {
/* LE features */
#define HCI_LE_ENCRYPTION 0x01
#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
-#define HCI_LE_SLAVE_FEATURES 0x08
+#define HCI_LE_PERIPHERAL_FEATURES 0x08
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
-#define HCI_LE_PHY_2M 0x01
-#define HCI_LE_PHY_CODED 0x08
-#define HCI_LE_EXT_ADV 0x10
+#define HCI_LE_LL_PRIVACY 0x40
#define HCI_LE_EXT_SCAN_POLICY 0x80
#define HCI_LE_PHY_2M 0x01
#define HCI_LE_PHY_CODED 0x08
+#define HCI_LE_EXT_ADV 0x10
+#define HCI_LE_PERIODIC_ADV 0x20
#define HCI_LE_CHAN_SEL_ALG2 0x40
-#define HCI_LE_CIS_MASTER 0x10
-#define HCI_LE_CIS_SLAVE 0x20
+#define HCI_LE_CIS_CENTRAL 0x10
+#define HCI_LE_CIS_PERIPHERAL 0x20
+#define HCI_LE_ISO_BROADCASTER 0x40
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -510,6 +586,7 @@ enum {
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
+#define HCI_ERROR_INVALID_PARAMETERS 0x12
#define HCI_ERROR_REMOTE_USER_TERM 0x13
#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
@@ -518,6 +595,7 @@ enum {
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
+#define HCI_ERROR_CANCELLED_BY_HOST 0x44
/* Flow control modes */
#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
@@ -546,6 +624,7 @@ enum {
#define EIR_SSP_RAND_R192 0x0F /* Simple Pairing Randomizer R-192 */
#define EIR_DEVICE_ID 0x10 /* device ID */
#define EIR_APPEARANCE 0x19 /* Device appearance */
+#define EIR_SERVICE_DATA 0x16 /* Service Data */
#define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */
#define EIR_LE_ROLE 0x1C /* LE role */
#define EIR_SSP_HASH_C256 0x1D /* Simple Pairing Hash C-256 */
@@ -825,23 +904,57 @@ struct hci_cp_logical_link_cancel {
__u8 flow_spec_id;
} __packed;
+#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
+struct hci_coding_format {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+} __packed;
+
+struct hci_cp_enhanced_setup_sync_conn {
+ __le16 handle;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ struct hci_coding_format tx_coding_format;
+ struct hci_coding_format rx_coding_format;
+ __le16 tx_codec_frame_size;
+ __le16 rx_codec_frame_size;
+ __le32 in_bandwidth;
+ __le32 out_bandwidth;
+ struct hci_coding_format in_coding_format;
+ struct hci_coding_format out_coding_format;
+ __le16 in_coded_data_size;
+ __le16 out_coded_data_size;
+ __u8 in_pcm_data_format;
+ __u8 out_pcm_data_format;
+ __u8 in_pcm_sample_payload_msb_pos;
+ __u8 out_pcm_sample_payload_msb_pos;
+ __u8 in_data_path;
+ __u8 out_data_path;
+ __u8 in_transport_unit_size;
+ __u8 out_transport_unit_size;
+ __le16 max_latency;
+ __le16 pkt_type;
+ __u8 retrans_effort;
+} __packed;
+
struct hci_rp_logical_link_cancel {
__u8 status;
__u8 phy_handle;
__u8 flow_spec_id;
} __packed;
-#define HCI_OP_SET_CSB 0x0441
-struct hci_cp_set_csb {
+#define HCI_OP_SET_CPB 0x0441
+struct hci_cp_set_cpb {
__u8 enable;
__u8 lt_addr;
__u8 lpo_allowed;
__le16 packet_type;
__le16 interval_min;
__le16 interval_max;
- __le16 csb_sv_tout;
+ __le16 cpb_sv_tout;
} __packed;
-struct hci_rp_set_csb {
+struct hci_rp_set_cpb {
__u8 status;
__u8 lt_addr;
__le16 interval;
@@ -932,10 +1045,14 @@ struct hci_cp_sniff_subrate {
#define HCI_OP_RESET 0x0c03
#define HCI_OP_SET_EVENT_FLT 0x0c05
-struct hci_cp_set_event_flt {
- __u8 flt_type;
- __u8 cond_type;
- __u8 condition[0];
+#define HCI_SET_EVENT_FLT_SIZE 9
+struct hci_cp_set_event_filter {
+ __u8 flt_type;
+ __u8 cond_type;
+ struct {
+ bdaddr_t bdaddr;
+ __u8 auto_accept;
+ } __packed addr_conn_flt;
} __packed;
/* Filter types */
@@ -949,8 +1066,9 @@ struct hci_cp_set_event_flt {
#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
/* CONN_SETUP Conditions */
-#define HCI_CONN_SETUP_AUTO_OFF 0x01
-#define HCI_CONN_SETUP_AUTO_ON 0x02
+#define HCI_CONN_SETUP_AUTO_OFF 0x01
+#define HCI_CONN_SETUP_AUTO_ON 0x02
+#define HCI_CONN_SETUP_AUTO_ON_WITH_RS 0x03
#define HCI_OP_READ_STORED_LINK_KEY 0x0c0d
struct hci_cp_read_stored_link_key {
@@ -959,8 +1077,8 @@ struct hci_cp_read_stored_link_key {
} __packed;
struct hci_rp_read_stored_link_key {
__u8 status;
- __u8 max_keys;
- __u8 num_keys;
+ __le16 max_keys;
+ __le16 num_keys;
} __packed;
#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
@@ -970,7 +1088,7 @@ struct hci_cp_delete_stored_link_key {
} __packed;
struct hci_rp_delete_stored_link_key {
__u8 status;
- __u8 num_keys;
+ __le16 num_keys;
} __packed;
#define HCI_MAX_NAME_LENGTH 248
@@ -1086,6 +1204,19 @@ struct hci_rp_read_inq_rsp_tx_power {
__s8 tx_power;
} __packed;
+#define HCI_OP_READ_DEF_ERR_DATA_REPORTING 0x0c5a
+ #define ERR_DATA_REPORTING_DISABLED 0x00
+ #define ERR_DATA_REPORTING_ENABLED 0x01
+struct hci_rp_read_def_err_data_reporting {
+ __u8 status;
+ __u8 err_data_reporting;
+} __packed;
+
+#define HCI_OP_WRITE_DEF_ERR_DATA_REPORTING 0x0c5b
+struct hci_cp_write_def_err_data_reporting {
+ __u8 err_data_reporting;
+} __packed;
+
#define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63
#define HCI_OP_READ_LOCATION_DATA 0x0c64
@@ -1120,14 +1251,14 @@ struct hci_rp_delete_reserved_lt_addr {
__u8 lt_addr;
} __packed;
-#define HCI_OP_SET_CSB_DATA 0x0c76
-struct hci_cp_set_csb_data {
+#define HCI_OP_SET_CPB_DATA 0x0c76
+struct hci_cp_set_cpb_data {
__u8 lt_addr;
__u8 fragment;
__u8 data_length;
- __u8 data[HCI_MAX_CSB_DATA_SIZE];
+ __u8 data[HCI_MAX_CPB_DATA_SIZE];
} __packed;
-struct hci_rp_set_csb_data {
+struct hci_rp_set_cpb_data {
__u8 status;
__u8 lt_addr;
} __packed;
@@ -1186,6 +1317,14 @@ struct hci_rp_read_local_oob_ext_data {
__u8 rand256[16];
} __packed;
+#define HCI_CONFIGURE_DATA_PATH 0x0c83
+struct hci_op_configure_data_path {
+ __u8 direction;
+ __u8 data_path_id;
+ __u8 vnd_len;
+ __u8 vnd_data[];
+} __packed;
+
#define HCI_OP_READ_LOCAL_VERSION 0x1001
struct hci_rp_read_local_version {
__u8 status;
@@ -1243,6 +1382,83 @@ struct hci_rp_read_data_block_size {
} __packed;
#define HCI_OP_READ_LOCAL_CODECS 0x100b
+struct hci_std_codecs {
+ __u8 num;
+ __u8 codec[];
+} __packed;
+
+struct hci_vnd_codec {
+ /* company id */
+ __le16 cid;
+ /* vendor codec id */
+ __le16 vid;
+} __packed;
+
+struct hci_vnd_codecs {
+ __u8 num;
+ struct hci_vnd_codec codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs {
+ __u8 status;
+ struct hci_std_codecs std_codecs;
+ struct hci_vnd_codecs vnd_codecs;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c
+struct hci_rp_read_local_pairing_opts {
+ __u8 status;
+ __u8 pairing_opts;
+ __u8 max_key_size;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODECS_V2 0x100d
+struct hci_std_codec_v2 {
+ __u8 id;
+ __u8 transport;
+} __packed;
+
+struct hci_std_codecs_v2 {
+ __u8 num;
+ struct hci_std_codec_v2 codec[];
+} __packed;
+
+struct hci_vnd_codec_v2 {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+} __packed;
+
+struct hci_vnd_codecs_v2 {
+ __u8 num;
+ struct hci_vnd_codec_v2 codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs_v2 {
+ __u8 status;
+ struct hci_std_codecs_v2 std_codecs;
+ struct hci_vnd_codecs_v2 vendor_codecs;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODEC_CAPS 0x100e
+struct hci_op_read_local_codec_caps {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+ __u8 direction;
+} __packed;
+
+struct hci_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct hci_rp_read_local_codec_caps {
+ __u8 status;
+ __u8 num_caps;
+} __packed;
#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
struct hci_rp_read_page_scan_activity {
@@ -1335,7 +1551,7 @@ struct hci_rp_read_local_amp_assoc {
__u8 status;
__u8 phy_handle;
__le16 rem_len;
- __u8 frag[0];
+ __u8 frag[];
} __packed;
#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
@@ -1343,7 +1559,7 @@ struct hci_cp_write_remote_amp_assoc {
__u8 phy_handle;
__le16 len_so_far;
__le16 rem_len;
- __u8 frag[0];
+ __u8 frag[];
} __packed;
struct hci_rp_write_remote_amp_assoc {
__u8 status;
@@ -1434,7 +1650,7 @@ struct hci_cp_le_set_scan_enable {
} __packed;
#define HCI_LE_USE_PEER_ADDR 0x00
-#define HCI_LE_USE_WHITELIST 0x01
+#define HCI_LE_USE_ACCEPT_LIST 0x01
#define HCI_OP_LE_CREATE_CONN 0x200d
struct hci_cp_le_create_conn {
@@ -1454,22 +1670,22 @@ struct hci_cp_le_create_conn {
#define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e
-#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f
-struct hci_rp_le_read_white_list_size {
+#define HCI_OP_LE_READ_ACCEPT_LIST_SIZE 0x200f
+struct hci_rp_le_read_accept_list_size {
__u8 status;
__u8 size;
} __packed;
-#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010
+#define HCI_OP_LE_CLEAR_ACCEPT_LIST 0x2010
-#define HCI_OP_LE_ADD_TO_WHITE_LIST 0x2011
-struct hci_cp_le_add_to_white_list {
+#define HCI_OP_LE_ADD_TO_ACCEPT_LIST 0x2011
+struct hci_cp_le_add_to_accept_list {
__u8 bdaddr_type;
bdaddr_t bdaddr;
} __packed;
-#define HCI_OP_LE_DEL_FROM_WHITE_LIST 0x2012
-struct hci_cp_le_del_from_white_list {
+#define HCI_OP_LE_DEL_FROM_ACCEPT_LIST 0x2012
+struct hci_cp_le_del_from_accept_list {
__u8 bdaddr_type;
bdaddr_t bdaddr;
} __packed;
@@ -1588,6 +1804,8 @@ struct hci_rp_le_read_resolv_list_size {
#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
+#define HCI_OP_LE_SET_RPA_TIMEOUT 0x202e
+
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
struct hci_rp_le_read_max_data_len {
__u8 status;
@@ -1613,7 +1831,7 @@ struct hci_cp_le_set_ext_scan_params {
__u8 own_addr_type;
__u8 filter_policy;
__u8 scanning_phys;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define LE_SCAN_PHY_1M 0x01
@@ -1641,7 +1859,7 @@ struct hci_cp_le_ext_create_conn {
__u8 peer_addr_type;
bdaddr_t peer_addr;
__u8 phys;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct hci_cp_le_ext_conn_param {
@@ -1655,6 +1873,22 @@ struct hci_cp_le_ext_conn_param {
__le16 max_ce_len;
} __packed;
+#define HCI_OP_LE_PA_CREATE_SYNC 0x2044
+struct hci_cp_le_pa_create_sync {
+ __u8 options;
+ __u8 sid;
+ __u8 addr_type;
+ bdaddr_t addr;
+ __le16 skip;
+ __le16 sync_timeout;
+ __u8 sync_cte_type;
+} __packed;
+
+#define HCI_OP_LE_PA_TERM_SYNC 0x2046
+struct hci_cp_le_pa_term_sync {
+ __le16 handle;
+} __packed;
+
#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
struct hci_rp_le_read_num_supported_adv_sets {
__u8 status;
@@ -1689,26 +1923,21 @@ struct hci_rp_le_set_ext_adv_params {
__u8 tx_power;
} __packed;
-#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
-struct hci_cp_le_set_ext_adv_enable {
- __u8 enable;
- __u8 num_of_sets;
- __u8 data[0];
-} __packed;
-
struct hci_cp_ext_adv_set {
__u8 handle;
__le16 duration;
__u8 max_events;
} __packed;
+#define HCI_MAX_EXT_AD_LENGTH 251
+
#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
struct hci_cp_le_set_ext_adv_data {
__u8 handle;
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[HCI_MAX_AD_LENGTH];
+ __u8 data[];
} __packed;
#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
@@ -1717,13 +1946,46 @@ struct hci_cp_le_set_ext_scan_rsp_data {
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[HCI_MAX_AD_LENGTH];
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
+struct hci_cp_le_set_ext_adv_enable {
+ __u8 enable;
+ __u8 num_of_sets;
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_PER_ADV_PARAMS 0x203e
+struct hci_cp_le_set_per_adv_params {
+ __u8 handle;
+ __le16 min_interval;
+ __le16 max_interval;
+ __le16 periodic_properties;
+} __packed;
+
+#define HCI_MAX_PER_AD_LENGTH 252
+
+#define HCI_OP_LE_SET_PER_ADV_DATA 0x203f
+struct hci_cp_le_set_per_adv_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_PER_ADV_ENABLE 0x2040
+struct hci_cp_le_set_per_adv_enable {
+ __u8 enable;
+ __u8 handle;
} __packed;
#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
#define LE_SET_ADV_DATA_NO_FRAG 0x01
+#define HCI_OP_LE_REMOVE_ADV_SET 0x203c
+
#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
@@ -1732,6 +1994,23 @@ struct hci_cp_le_set_adv_set_rand_addr {
bdaddr_t bdaddr;
} __packed;
+#define HCI_OP_LE_READ_TRANSMIT_POWER 0x204b
+struct hci_rp_le_read_transmit_power {
+ __u8 status;
+ __s8 min_le_tx_power;
+ __s8 max_le_tx_power;
+} __packed;
+
+#define HCI_NETWORK_PRIVACY 0x00
+#define HCI_DEVICE_PRIVACY 0x01
+
+#define HCI_OP_LE_SET_PRIVACY_MODE 0x204e
+struct hci_cp_le_set_privacy_mode {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 mode;
+} __packed;
+
#define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060
struct hci_rp_le_read_buffer_size_v2 {
__u8 status;
@@ -1757,32 +2036,32 @@ struct hci_rp_le_read_iso_tx_sync {
#define HCI_OP_LE_SET_CIG_PARAMS 0x2062
struct hci_cis_params {
__u8 cis_id;
- __le16 m_sdu;
- __le16 s_sdu;
- __u8 m_phy;
- __u8 s_phy;
- __u8 m_rtn;
- __u8 s_rtn;
+ __le16 c_sdu;
+ __le16 p_sdu;
+ __u8 c_phy;
+ __u8 p_phy;
+ __u8 c_rtn;
+ __u8 p_rtn;
} __packed;
struct hci_cp_le_set_cig_params {
__u8 cig_id;
- __u8 m_interval[3];
- __u8 s_interval[3];
+ __u8 c_interval[3];
+ __u8 p_interval[3];
__u8 sca;
__u8 packing;
__u8 framing;
- __le16 m_latency;
- __le16 s_latency;
+ __le16 c_latency;
+ __le16 p_latency;
__u8 num_cis;
- struct hci_cis_params cis[0];
+ struct hci_cis_params cis[];
} __packed;
struct hci_rp_le_set_cig_params {
__u8 status;
__u8 cig_id;
__u8 num_handles;
- __le16 handle[0];
+ __le16 handle[];
} __packed;
#define HCI_OP_LE_CREATE_CIS 0x2064
@@ -1793,7 +2072,7 @@ struct hci_cis {
struct hci_cp_le_create_cis {
__u8 num_cis;
- struct hci_cis cis[0];
+ struct hci_cis cis[];
} __packed;
#define HCI_OP_LE_REMOVE_CIG 0x2065
@@ -1812,7 +2091,78 @@ struct hci_cp_le_reject_cis {
__u8 reason;
} __packed;
+#define HCI_OP_LE_CREATE_BIG 0x2068
+struct hci_bis {
+ __u8 sdu_interval[3];
+ __le16 sdu;
+ __le16 latency;
+ __u8 rtn;
+ __u8 phy;
+ __u8 packing;
+ __u8 framing;
+ __u8 encryption;
+ __u8 bcode[16];
+} __packed;
+
+struct hci_cp_le_create_big {
+ __u8 handle;
+ __u8 adv_handle;
+ __u8 num_bis;
+ struct hci_bis bis;
+} __packed;
+
+#define HCI_OP_LE_TERM_BIG 0x206a
+struct hci_cp_le_term_big {
+ __u8 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_LE_BIG_CREATE_SYNC 0x206b
+struct hci_cp_le_big_create_sync {
+ __u8 handle;
+ __le16 sync_handle;
+ __u8 encryption;
+ __u8 bcode[16];
+ __u8 mse;
+ __le16 timeout;
+ __u8 num_bis;
+ __u8 bis[0];
+} __packed;
+
+#define HCI_OP_LE_BIG_TERM_SYNC 0x206c
+struct hci_cp_le_big_term_sync {
+ __u8 handle;
+} __packed;
+
+#define HCI_OP_LE_SETUP_ISO_PATH 0x206e
+struct hci_cp_le_setup_iso_path {
+ __le16 handle;
+ __u8 direction;
+ __u8 path;
+ __u8 codec;
+ __le16 codec_cid;
+ __le16 codec_vid;
+ __u8 delay[3];
+ __u8 codec_cfg_len;
+ __u8 codec_cfg[0];
+} __packed;
+
+struct hci_rp_le_setup_iso_path {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_SET_HOST_FEATURE 0x2074
+struct hci_cp_le_set_host_feature {
+ __u8 bit_number;
+ __u8 bit_value;
+} __packed;
+
/* ---- HCI Events ---- */
+struct hci_ev_status {
+ __u8 status;
+} __packed;
+
#define HCI_EV_INQUIRY_COMPLETE 0x01
#define HCI_EV_INQUIRY_RESULT 0x02
@@ -1825,6 +2175,11 @@ struct inquiry_info {
__le16 clock_offset;
} __packed;
+struct hci_ev_inquiry_result {
+ __u8 num;
+ struct inquiry_info info[];
+};
+
#define HCI_EV_CONN_COMPLETE 0x03
struct hci_ev_conn_complete {
__u8 status;
@@ -1936,8 +2291,8 @@ struct hci_comp_pkts_info {
} __packed;
struct hci_ev_num_comp_pkts {
- __u8 num_hndl;
- struct hci_comp_pkts_info handles[0];
+ __u8 num;
+ struct hci_comp_pkts_info handles[];
} __packed;
#define HCI_EV_MODE_CHANGE 0x14
@@ -1986,7 +2341,7 @@ struct hci_ev_pscan_rep_mode {
} __packed;
#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
-struct inquiry_info_with_rssi {
+struct inquiry_info_rssi {
bdaddr_t bdaddr;
__u8 pscan_rep_mode;
__u8 pscan_period_mode;
@@ -1994,7 +2349,7 @@ struct inquiry_info_with_rssi {
__le16 clock_offset;
__s8 rssi;
} __packed;
-struct inquiry_info_with_rssi_and_pscan_mode {
+struct inquiry_info_rssi_pscan {
bdaddr_t bdaddr;
__u8 pscan_rep_mode;
__u8 pscan_period_mode;
@@ -2003,6 +2358,10 @@ struct inquiry_info_with_rssi_and_pscan_mode {
__le16 clock_offset;
__s8 rssi;
} __packed;
+struct hci_ev_inquiry_result_rssi {
+ __u8 num;
+ __u8 data[];
+} __packed;
#define HCI_EV_REMOTE_EXT_FEATURES 0x23
struct hci_ev_remote_ext_features {
@@ -2057,6 +2416,11 @@ struct extended_inquiry_info {
__u8 data[240];
} __packed;
+struct hci_ev_ext_inquiry_result {
+ __u8 num;
+ struct extended_inquiry_info info[];
+} __packed;
+
#define HCI_EV_KEY_REFRESH_COMPLETE 0x30
struct hci_ev_key_refresh_complete {
__u8 status;
@@ -2170,7 +2534,7 @@ struct hci_comp_blocks_info {
struct hci_ev_num_comp_blocks {
__le16 num_blocks;
__u8 num_hndl;
- struct hci_comp_blocks_info handles[0];
+ struct hci_comp_blocks_info handles[];
} __packed;
#define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F
@@ -2178,7 +2542,7 @@ struct hci_ev_sync_train_complete {
__u8 status;
} __packed;
-#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54
+#define HCI_EV_PERIPHERAL_PAGE_RESP_TIMEOUT 0x54
#define HCI_EV_LE_CONN_COMPLETE 0x01
struct hci_ev_le_conn_complete {
@@ -2217,16 +2581,23 @@ struct hci_ev_le_conn_complete {
#define LE_EXT_ADV_SCAN_RSP 0x0008
#define LE_EXT_ADV_LEGACY_PDU 0x0010
-#define ADDR_LE_DEV_PUBLIC 0x00
-#define ADDR_LE_DEV_RANDOM 0x01
+#define ADDR_LE_DEV_PUBLIC 0x00
+#define ADDR_LE_DEV_RANDOM 0x01
+#define ADDR_LE_DEV_PUBLIC_RESOLVED 0x02
+#define ADDR_LE_DEV_RANDOM_RESOLVED 0x03
#define HCI_EV_LE_ADVERTISING_REPORT 0x02
struct hci_ev_le_advertising_info {
- __u8 evt_type;
+ __u8 type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 length;
- __u8 data[0];
+ __u8 data[];
+} __packed;
+
+struct hci_ev_le_advertising_report {
+ __u8 num;
+ struct hci_ev_le_advertising_info info[];
} __packed;
#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
@@ -2272,7 +2643,7 @@ struct hci_ev_le_data_len_change {
#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
struct hci_ev_le_direct_adv_info {
- __u8 evt_type;
+ __u8 type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 direct_addr_type;
@@ -2280,6 +2651,11 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi;
} __packed;
+struct hci_ev_le_direct_adv_report {
+ __u8 num;
+ struct hci_ev_le_direct_adv_info info[];
+} __packed;
+
#define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c
struct hci_ev_le_phy_update_complete {
__u8 status;
@@ -2289,8 +2665,8 @@ struct hci_ev_le_phy_update_complete {
} __packed;
#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
-struct hci_ev_le_ext_adv_report {
- __le16 evt_type;
+struct hci_ev_le_ext_adv_info {
+ __le16 type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 primary_phy;
@@ -2298,11 +2674,28 @@ struct hci_ev_le_ext_adv_report {
__u8 sid;
__u8 tx_power;
__s8 rssi;
- __le16 interval;
- __u8 direct_addr_type;
+ __le16 interval;
+ __u8 direct_addr_type;
bdaddr_t direct_addr;
- __u8 length;
- __u8 data[0];
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+struct hci_ev_le_ext_adv_report {
+ __u8 num;
+ struct hci_ev_le_ext_adv_info info[];
+} __packed;
+
+#define HCI_EV_LE_PA_SYNC_ESTABLISHED 0x0e
+struct hci_ev_le_pa_sync_established {
+ __u8 status;
+ __le16 handle;
+ __u8 sid;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 phy;
+ __le16 interval;
+ __u8 clock_accuracy;
} __packed;
#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
@@ -2334,17 +2727,17 @@ struct hci_evt_le_cis_established {
__le16 handle;
__u8 cig_sync_delay[3];
__u8 cis_sync_delay[3];
- __u8 m_latency[3];
- __u8 s_latency[3];
- __u8 m_phy;
- __u8 s_phy;
+ __u8 c_latency[3];
+ __u8 p_latency[3];
+ __u8 c_phy;
+ __u8 p_phy;
__u8 nse;
- __u8 m_bn;
- __u8 s_bn;
- __u8 m_ft;
- __u8 s_ft;
- __le16 m_mtu;
- __le16 s_mtu;
+ __u8 c_bn;
+ __u8 p_bn;
+ __u8 c_ft;
+ __u8 p_ft;
+ __le16 c_mtu;
+ __le16 p_mtu;
__le16 interval;
} __packed;
@@ -2356,13 +2749,62 @@ struct hci_evt_le_cis_req {
__u8 cis_id;
} __packed;
+#define HCI_EVT_LE_CREATE_BIG_COMPLETE 0x1b
+struct hci_evt_le_create_big_complete {
+ __u8 status;
+ __u8 handle;
+ __u8 sync_delay[3];
+ __u8 transport_delay[3];
+ __u8 phy;
+ __u8 nse;
+ __u8 bn;
+ __u8 pto;
+ __u8 irc;
+ __le16 max_pdu;
+ __le16 interval;
+ __u8 num_bis;
+ __le16 bis_handle[];
+} __packed;
+
+#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d
+struct hci_evt_le_big_sync_estabilished {
+ __u8 status;
+ __u8 handle;
+ __u8 latency[3];
+ __u8 nse;
+ __u8 bn;
+ __u8 pto;
+ __u8 irc;
+ __le16 max_pdu;
+ __le16 interval;
+ __u8 num_bis;
+ __le16 bis[];
+} __packed;
+
+#define HCI_EVT_LE_BIG_INFO_ADV_REPORT 0x22
+struct hci_evt_le_big_info_adv_report {
+ __le16 sync_handle;
+ __u8 num_bis;
+ __u8 nse;
+ __le16 iso_interval;
+ __u8 bn;
+ __u8 pto;
+ __u8 irc;
+ __le16 max_pdu;
+ __u8 sdu_interval[3];
+ __le16 max_sdu;
+ __u8 phy;
+ __u8 framing;
+ __u8 encryption;
+} __packed;
+
#define HCI_EV_VENDOR 0xff
/* Internal events generated by Bluetooth stack */
#define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal {
__u16 type;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define HCI_EV_SI_DEVICE 0x01
@@ -2409,7 +2851,7 @@ struct hci_sco_hdr {
struct hci_iso_hdr {
__le16 handle;
__le16 dlen;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* ISO data packet status flags */
@@ -2465,4 +2907,15 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_iso_data_len(h) ((h) & 0x3fff)
#define hci_iso_data_flags(h) ((h) >> 14)
+/* codec transport types */
+#define HCI_TRANSPORT_SCO_ESCO 0x01
+
+/* le24 support */
+static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
+{
+ dst[0] = val & 0xff;
+ dst[1] = (val & 0xff00) >> 8;
+ dst[2] = (val & 0xff0000) >> 16;
+}
+
#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 89ecf0a80aa1..c54bc71254af 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -25,15 +25,20 @@
#ifndef __HCI_CORE_H
#define __HCI_CORE_H
+#include <linux/idr.h>
#include <linux/leds.h>
#include <linux/rculist.h>
#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_sync.h>
#include <net/bluetooth/hci_sock.h>
/* HCI priority */
#define HCI_PRIO_MAX 7
+/* HCI maximum id value */
+#define HCI_MAX_ID 10000
+
/* HCI Core structures */
struct inquiry_data {
bdaddr_t bdaddr;
@@ -86,6 +91,34 @@ struct discovery_state {
u8 (*uuids)[16];
unsigned long scan_start;
unsigned long scan_duration;
+ unsigned long name_resolve_timeout;
+};
+
+#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+
+enum suspend_tasks {
+ SUSPEND_PAUSE_DISCOVERY,
+ SUSPEND_UNPAUSE_DISCOVERY,
+
+ SUSPEND_PAUSE_ADVERTISING,
+ SUSPEND_UNPAUSE_ADVERTISING,
+
+ SUSPEND_SCAN_DISABLE,
+ SUSPEND_SCAN_ENABLE,
+ SUSPEND_DISCONNECTING,
+
+ SUSPEND_POWERING_DOWN,
+
+ SUSPEND_PREPARE_NOTIFIER,
+
+ SUSPEND_SET_ADV_FILTER,
+ __SUSPEND_NUM_TASKS
+};
+
+enum suspended_state {
+ BT_RUNNING = 0,
+ BT_SUSPEND_DISCONNECT,
+ BT_SUSPEND_CONFIGURE_WAKE,
};
struct hci_conn_hash {
@@ -93,8 +126,9 @@ struct hci_conn_hash {
unsigned int acl_num;
unsigned int amp_num;
unsigned int sco_num;
+ unsigned int iso_num;
unsigned int le_num;
- unsigned int le_num_slave;
+ unsigned int le_num_peripheral;
};
struct bdaddr_list {
@@ -103,6 +137,17 @@ struct bdaddr_list {
u8 bdaddr_type;
};
+struct codec_list {
+ struct list_head list;
+ u8 id;
+ __u16 cid;
+ __u16 vid;
+ u8 transport;
+ u8 num_caps;
+ u32 len;
+ struct hci_codec_caps caps[];
+};
+
struct bdaddr_list_with_irk {
struct list_head list;
bdaddr_t bdaddr;
@@ -111,6 +156,20 @@ struct bdaddr_list_with_irk {
u8 local_irk[16];
};
+/* Bitmask of connection flags */
+enum hci_conn_flags {
+ HCI_CONN_FLAG_REMOTE_WAKEUP = 1,
+ HCI_CONN_FLAG_DEVICE_PRIVACY = 2,
+};
+typedef u8 hci_conn_flags_t;
+
+struct bdaddr_list_with_flags {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ hci_conn_flags_t flags;
+};
+
struct bt_uuid {
struct list_head list;
u8 uuid[16];
@@ -176,17 +235,26 @@ struct oob_data {
struct adv_info {
struct list_head list;
- bool pending;
+ bool enabled;
+ bool pending;
+ bool periodic;
+ __u8 mesh;
__u8 instance;
__u32 flags;
__u16 timeout;
__u16 remaining_time;
__u16 duration;
__u16 adv_data_len;
- __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
+ bool adv_data_changed;
__u16 scan_rsp_len;
- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
+ bool scan_rsp_changed;
+ __u16 per_adv_data_len;
+ __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH];
__s8 tx_power;
+ __u32 min_interval;
+ __u32 max_interval;
bdaddr_t random_addr;
bool rpa_expired;
struct delayed_work rpa_expired_cb;
@@ -195,8 +263,65 @@ struct adv_info {
#define HCI_MAX_ADV_INSTANCES 5
#define HCI_DEFAULT_ADV_DURATION 2
+#define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F
+
+#define DATA_CMP(_d1, _l1, _d2, _l2) \
+ (_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2)
+
+#define ADV_DATA_CMP(_adv, _data, _len) \
+ DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len)
+
+#define SCAN_RSP_CMP(_adv, _data, _len) \
+ DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len)
+
+struct monitored_device {
+ struct list_head list;
+
+ bdaddr_t bdaddr;
+ __u8 addr_type;
+ __u16 handle;
+ bool notified;
+};
+
+struct adv_pattern {
+ struct list_head list;
+ __u8 ad_type;
+ __u8 offset;
+ __u8 length;
+ __u8 value[HCI_MAX_AD_LENGTH];
+};
+
+struct adv_rssi_thresholds {
+ __s8 low_threshold;
+ __s8 high_threshold;
+ __u16 low_threshold_timeout;
+ __u16 high_threshold_timeout;
+ __u8 sampling_period;
+};
+
+struct adv_monitor {
+ struct list_head patterns;
+ struct adv_rssi_thresholds rssi;
+ __u16 handle;
+
+ enum {
+ ADV_MONITOR_STATE_NOT_REGISTERED,
+ ADV_MONITOR_STATE_REGISTERED,
+ ADV_MONITOR_STATE_OFFLOADED
+ } state;
+};
+
+#define HCI_MIN_ADV_MONITOR_HANDLE 1
+#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
+#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16
+#define HCI_ADV_MONITOR_EXT_NONE 1
+#define HCI_ADV_MONITOR_EXT_MSFT 2
+
#define HCI_MAX_SHORT_NAME_LENGTH 10
+#define HCI_CONN_HANDLE_UNSET 0xffff
+#define HCI_CONN_HANDLE_MAX 0x0eff
+
/* Min encryption key size to match with SMP */
#define HCI_MIN_ENC_KEY_SIZE 7
@@ -244,10 +369,12 @@ struct hci_dev {
__u8 max_page;
__u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8];
- __u8 le_white_list_size;
+ __u8 le_accept_list_size;
__u8 le_resolv_list_size;
__u8 le_num_of_adv_sets;
__u8 le_states[8];
+ __u8 mesh_ad_types[16];
+ __u8 mesh_send_ref;
__u8 commands[64];
__u8 hci_ver;
__u16 hci_rev;
@@ -256,10 +383,11 @@ struct hci_dev {
__u16 lmp_subver;
__u16 voice_setting;
__u8 num_iac;
- __u8 stored_max_keys;
- __u8 stored_num_keys;
+ __u16 stored_max_keys;
+ __u16 stored_num_keys;
__u8 io_capability;
__s8 inq_tx_power;
+ __u8 err_data_reporting;
__u16 page_scan_interval;
__u16 page_scan_window;
__u8 page_scan_type;
@@ -269,6 +397,14 @@ struct hci_dev {
__u8 le_scan_type;
__u16 le_scan_interval;
__u16 le_scan_window;
+ __u16 le_scan_int_suspend;
+ __u16 le_scan_window_suspend;
+ __u16 le_scan_int_discovery;
+ __u16 le_scan_window_discovery;
+ __u16 le_scan_int_adv_monitor;
+ __u16 le_scan_window_adv_monitor;
+ __u16 le_scan_int_connect;
+ __u16 le_scan_window_connect;
__u16 le_conn_min_interval;
__u16 le_conn_max_interval;
__u16 le_conn_latency;
@@ -286,15 +422,33 @@ struct hci_dev {
__u16 conn_info_max_age;
__u16 auth_payload_timeout;
__u8 min_enc_key_size;
+ __u8 max_enc_key_size;
+ __u8 pairing_opts;
__u8 ssp_debug_mode;
__u8 hw_error_code;
__u32 clock;
+ __u16 advmon_allowlist_duration;
+ __u16 advmon_no_filter_duration;
+ __u8 enable_advmon_interleave_scan;
__u16 devid_source;
__u16 devid_vendor;
__u16 devid_product;
__u16 devid_version;
+ __u8 def_page_scan_type;
+ __u16 def_page_scan_int;
+ __u16 def_page_scan_window;
+ __u8 def_inq_scan_type;
+ __u16 def_inq_scan_int;
+ __u16 def_inq_scan_window;
+ __u16 def_br_lsto;
+ __u16 def_page_timeout;
+ __u16 def_multi_adv_rotation_duration;
+ __u16 def_le_autoconnect_timeout;
+ __s8 min_le_tx_power;
+ __s8 max_le_tx_power;
+
__u16 pkt_type;
__u16 esco_type;
__u16 link_policy;
@@ -327,13 +481,16 @@ struct hci_dev {
unsigned int acl_cnt;
unsigned int sco_cnt;
unsigned int le_cnt;
+ unsigned int iso_cnt;
unsigned int acl_mtu;
unsigned int sco_mtu;
unsigned int le_mtu;
+ unsigned int iso_mtu;
unsigned int acl_pkts;
unsigned int sco_pkts;
unsigned int le_pkts;
+ unsigned int iso_pkts;
__u16 block_len;
__u16 block_mtu;
@@ -353,6 +510,11 @@ struct hci_dev {
struct work_struct power_on;
struct delayed_work power_off;
struct work_struct error_reset;
+ struct work_struct cmd_sync_work;
+ struct list_head cmd_sync_work_list;
+ struct mutex cmd_sync_work_lock;
+ struct work_struct cmd_sync_cancel_work;
+ struct work_struct reenable_adv_work;
__u16 discov_timeout;
struct delayed_work discov_off;
@@ -360,16 +522,12 @@ struct hci_dev {
struct delayed_work service_cache;
struct delayed_work cmd_timer;
+ struct delayed_work ncmd_timer;
struct work_struct rx_work;
struct work_struct cmd_work;
struct work_struct tx_work;
- struct work_struct discov_update;
- struct work_struct bg_scan_update;
- struct work_struct scan_update;
- struct work_struct connectable_update;
- struct work_struct discoverable_update;
struct delayed_work le_scan_disable;
struct delayed_work le_scan_restart;
@@ -378,6 +536,7 @@ struct hci_dev {
struct sk_buff_head cmd_q;
struct sk_buff *sent_cmd;
+ struct sk_buff *recv_event;
struct mutex req_lock;
wait_queue_head_t req_wait_q;
@@ -389,22 +548,39 @@ struct hci_dev {
void *smp_bredr_data;
struct discovery_state discovery;
+
+ int discovery_old_state;
+ bool discovery_paused;
+ int advertising_old_state;
+ bool advertising_paused;
+
+ struct notifier_block suspend_notifier;
+ enum suspended_state suspend_state_next;
+ enum suspended_state suspend_state;
+ bool scanning_paused;
+ bool suspended;
+ u8 wake_reason;
+ bdaddr_t wake_addr;
+ u8 wake_addr_type;
+
struct hci_conn_hash conn_hash;
+ struct list_head mesh_pending;
struct list_head mgmt_pending;
- struct list_head blacklist;
- struct list_head whitelist;
+ struct list_head reject_list;
+ struct list_head accept_list;
struct list_head uuids;
struct list_head link_keys;
struct list_head long_term_keys;
struct list_head identity_resolving_keys;
struct list_head remote_oob_data;
- struct list_head le_white_list;
+ struct list_head le_accept_list;
struct list_head le_resolv_list;
struct list_head le_conn_params;
struct list_head pend_le_conns;
struct list_head pend_le_reports;
struct list_head blocked_keys;
+ struct list_head local_codecs;
struct hci_dev_stats stat;
@@ -419,12 +595,15 @@ struct hci_dev {
struct rfkill *rfkill;
DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
+ hci_conn_flags_t conn_flags;
__s8 adv_tx_power;
- __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
__u8 adv_data_len;
- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
__u8 scan_rsp_data_len;
+ __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH];
+ __u8 per_adv_data_len;
struct list_head adv_instances;
unsigned int adv_instance_cnt;
@@ -432,15 +611,42 @@ struct hci_dev {
__u16 adv_instance_timeout;
struct delayed_work adv_instance_expire;
+ struct idr adv_monitors_idr;
+ unsigned int adv_monitors_cnt;
+
__u8 irk[16];
__u32 rpa_timeout;
struct delayed_work rpa_expired;
bdaddr_t rpa;
+ struct delayed_work mesh_send_done;
+
+ enum {
+ INTERLEAVE_SCAN_NONE,
+ INTERLEAVE_SCAN_NO_FILTER,
+ INTERLEAVE_SCAN_ALLOWLIST
+ } interleave_scan_state;
+
+ struct delayed_work interleave_scan;
+
+ struct list_head monitored_devices;
+ bool advmon_pend_notify;
+
#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
#endif
+#if IS_ENABLED(CONFIG_BT_MSFTEXT)
+ __u16 msft_opcode;
+ void *msft_data;
+ bool msft_curve_validity;
+#endif
+
+#if IS_ENABLED(CONFIG_BT_AOSPEXT)
+ bool aosp_capable;
+ bool aosp_quality_report;
+#endif
+
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
@@ -453,10 +659,23 @@ struct hci_dev {
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
void (*cmd_timeout)(struct hci_dev *hdev);
+ bool (*wakeup)(struct hci_dev *hdev);
+ int (*set_quality_report)(struct hci_dev *hdev, bool enable);
+ int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path);
+ int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
+enum conn_reasons {
+ CONN_REASON_PAIR_DEVICE,
+ CONN_REASON_L2CAP_CHAN,
+ CONN_REASON_SCO_CONNECT,
+ CONN_REASON_ISO_CONNECT,
+};
+
struct hci_conn {
struct list_head list;
@@ -470,7 +689,9 @@ struct hci_conn {
__u8 init_addr_type;
bdaddr_t resp_addr;
__u8 resp_addr_type;
+ __u8 adv_instance;
__u16 handle;
+ __u16 sync_handle;
__u16 state;
__u8 mode;
__u8 type;
@@ -501,13 +722,18 @@ struct hci_conn {
__u16 le_supv_timeout;
__u8 le_adv_data[HCI_MAX_AD_LENGTH];
__u8 le_adv_data_len;
+ __u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH];
+ __u8 le_per_adv_data_len;
__u8 le_tx_phy;
__u8 le_rx_phy;
__s8 rssi;
__s8 tx_power;
__s8 max_tx_power;
+ struct bt_iso_qos iso_qos;
unsigned long flags;
+ enum conn_reasons conn_reason;
+
__u32 clock;
__u16 clock_accuracy;
@@ -534,13 +760,17 @@ struct hci_conn {
struct hci_dev *hdev;
void *l2cap_data;
void *sco_data;
+ void *iso_data;
struct amp_mgr *amp_mgr;
struct hci_conn *link;
+ struct bt_codec codec;
void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
+
+ void (*cleanup)(struct hci_conn *conn);
};
struct hci_chan {
@@ -550,6 +780,7 @@ struct hci_chan {
struct sk_buff_head data_q;
unsigned int sent;
__u8 state;
+ bool amp;
};
struct hci_conn_params {
@@ -575,6 +806,8 @@ struct hci_conn_params {
struct hci_conn *conn;
bool explicit_connect;
+ hci_conn_flags_t flags;
+ u8 privacy_mode;
};
extern struct list_head hci_dev_list;
@@ -594,9 +827,17 @@ extern struct mutex hci_cb_list_lock;
do { \
hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
hci_dev_clear_flag(hdev, HCI_LE_ADV); \
+ hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
} while (0)
+#define hci_dev_le_state_simultaneous(hdev) \
+ (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && \
+ (hdev->le_states[4] & 0x08) && /* Central */ \
+ (hdev->le_states[4] & 0x40) && /* Peripheral */ \
+ (hdev->le_states[3] & 0x10)) /* Simultaneous */
+
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
int l2cap_disconn_ind(struct hci_conn *hcon);
@@ -617,6 +858,21 @@ static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
}
#endif
+#if IS_ENABLED(CONFIG_BT_LE)
+int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+#else
+static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 *flags)
+{
+ return 0;
+}
+static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
+ u16 flags)
+{
+}
+#endif
+
/* ----- Inquiry cache ----- */
#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
@@ -701,6 +957,7 @@ enum {
HCI_CONN_NEW_LINK_KEY,
HCI_CONN_SCANNING,
HCI_CONN_AUTH_FAILURE,
+ HCI_CONN_PER_ADV,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -731,12 +988,15 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
case LE_LINK:
h->le_num++;
if (c->role == HCI_ROLE_SLAVE)
- h->le_num_slave++;
+ h->le_num_peripheral++;
break;
case SCO_LINK:
case ESCO_LINK:
h->sco_num++;
break;
+ case ISO_LINK:
+ h->iso_num++;
+ break;
}
}
@@ -757,12 +1017,15 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
case LE_LINK:
h->le_num--;
if (c->role == HCI_ROLE_SLAVE)
- h->le_num_slave--;
+ h->le_num_peripheral--;
break;
case SCO_LINK:
case ESCO_LINK:
h->sco_num--;
break;
+ case ISO_LINK:
+ h->iso_num--;
+ break;
}
}
@@ -779,6 +1042,8 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
case SCO_LINK:
case ESCO_LINK:
return h->sco_num;
+ case ISO_LINK:
+ return h->iso_num;
default:
return 0;
}
@@ -788,7 +1053,7 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
{
struct hci_conn_hash *c = &hdev->conn_hash;
- return c->acl_num + c->amp_num + c->sco_num + c->le_num;
+ return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
}
static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
@@ -811,6 +1076,29 @@ static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
return type;
}
+static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 big, __u8 bis)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, ba) || c->type != ISO_LINK)
+ continue;
+
+ if (c->iso_qos.big == big && c->iso_qos.bis == bis) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
__u16 handle)
{
@@ -874,6 +1162,76 @@ static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
return NULL;
}
+static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 ba_type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK)
+ continue;
+
+ if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev,
+ __u8 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK)
+ continue;
+
+ if (handle == c->iso_qos.cig) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
+ __u8 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK)
+ continue;
+
+ if (handle == c->iso_qos.big) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
__u8 type, __u16 state)
{
@@ -894,6 +1252,27 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
return NULL;
}
+typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data);
+static inline void hci_conn_hash_list_state(struct hci_dev *hdev,
+ hci_conn_func_t func, __u8 type,
+ __u16 state, void *data)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ if (!func)
+ return;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->state == state)
+ func(c, data);
+ }
+
+ rcu_read_unlock();
+}
+
static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -917,6 +1296,8 @@ static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
int hci_disconnect(struct hci_conn *conn, __u8 reason);
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
+bool hci_iso_setup_path(struct hci_conn *conn);
+int hci_le_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
u8 role);
@@ -931,14 +1312,27 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level,
- u16 conn_timeout);
+ u16 conn_timeout,
+ enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
- u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role, bdaddr_t *direct_rpa);
+ u8 dst_type, bool dst_resolved, u8 sec_level,
+ u16 conn_timeout, u8 role);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
- u8 sec_level, u8 auth_type);
+ u8 sec_level, u8 auth_type,
+ enum conn_reasons conn_reason);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting);
+ __u16 setting, struct bt_codec *codec);
+struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos);
+struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos);
+struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos,
+ __u8 data_len, __u8 *data);
+int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
+ __u8 sid);
+int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
+ __u16 sync_handle, __u8 num_bis, __u8 bis[]);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
@@ -947,7 +1341,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
-void hci_le_conn_failed(struct hci_conn *conn, u8 status);
+void hci_conn_failed(struct hci_conn *conn, u8 status);
/*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
@@ -1058,13 +1452,27 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
dev_set_drvdata(&hdev->dev, data);
}
+static inline void *hci_get_priv(struct hci_dev *hdev)
+{
+ return (char *)hdev + sizeof(*hdev);
+}
+
struct hci_dev *hci_dev_get(int index);
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type);
-struct hci_dev *hci_alloc_dev(void);
+struct hci_dev *hci_alloc_dev_priv(int sizeof_priv);
+
+static inline struct hci_dev *hci_alloc_dev(void)
+{
+ return hci_alloc_dev_priv(0);
+}
+
void hci_free_dev(struct hci_dev *hdev);
int hci_register_dev(struct hci_dev *hdev);
void hci_unregister_dev(struct hci_dev *hdev);
+void hci_release_dev(struct hci_dev *hdev);
+int hci_register_suspend_notifier(struct hci_dev *hdev);
+int hci_unregister_suspend_notifier(struct hci_dev *hdev);
int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev);
@@ -1072,6 +1480,21 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
+
+static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode)
+{
+#if IS_ENABLED(CONFIG_BT_MSFTEXT)
+ hdev->msft_opcode = opcode;
+#endif
+}
+
+static inline void hci_set_aosp_capable(struct hci_dev *hdev)
+{
+#if IS_ENABLED(CONFIG_BT_AOSPEXT)
+ hdev->aosp_capable = true;
+#endif
+}
+
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_do_close(struct hci_dev *hdev);
@@ -1090,12 +1513,19 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
struct list_head *list, bdaddr_t *bdaddr,
u8 type);
+struct bdaddr_list_with_flags *
+hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
- u8 type, u8 *peer_irk, u8 *local_irk);
+ u8 type, u8 *peer_irk, u8 *local_irk);
+int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type, u32 flags);
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
- u8 type);
+ u8 type);
+int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
void hci_bdaddr_list_clear(struct list_head *list);
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
@@ -1149,12 +1579,30 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
void hci_adv_instances_clear(struct hci_dev *hdev);
struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
-int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
+struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
+ u32 flags, u16 adv_data_len, u8 *adv_data,
+ u16 scan_rsp_len, u8 *scan_rsp_data,
+ u16 timeout, u16 duration, s8 tx_power,
+ u32 min_interval, u32 max_interval,
+ u8 mesh_handle);
+struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
+ u32 flags, u8 data_len, u8 *data,
+ u32 min_interval, u32 max_interval);
+int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
u16 adv_data_len, u8 *adv_data,
- u16 scan_rsp_len, u8 *scan_rsp_data,
- u16 timeout, u16 duration);
+ u16 scan_rsp_len, u8 *scan_rsp_data);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
+u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance);
+bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance);
+
+void hci_adv_monitors_clear(struct hci_dev *hdev);
+void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
+int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
+int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle);
+int hci_remove_all_adv_monitor(struct hci_dev *hdev);
+bool hci_is_adv_monitoring(struct hci_dev *hdev);
+int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@@ -1177,6 +1625,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
+#define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M)
#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
@@ -1191,8 +1640,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
/* ----- Extended LMP capabilities ----- */
-#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
-#define lmp_csb_slave_capable(dev) ((dev)->features[2][0] & LMP_CSB_SLAVE)
+#define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL)
+#define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL)
#define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
#define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN)
#define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC)
@@ -1208,6 +1657,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
!hci_dev_test_flag(dev, HCI_AUTO_OFF))
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
hci_dev_test_flag(dev, HCI_SC_ENABLED))
+#define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \
+ !hci_dev_test_flag(dev, HCI_RPA_EXPIRED))
+#define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \
+ !adv->rpa_expired)
#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
@@ -1218,6 +1671,22 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+#define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+
+/* Use LL Privacy based address resolution if supported */
+#define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
+
+#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
+ (hdev->commands[39] & 0x04))
+
+/* Use enhanced synchronous connection if command is supported and its quirk
+ * has not been set.
+ */
+#define enhanced_sync_conn_capable(dev) \
+ (((dev)->commands[29] & 0x08) && \
+ !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks))
+
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40))
@@ -1227,6 +1696,27 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
+ *
+ * C24: Mandatory if the LE Controller supports Connection State and either
+ * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
+ */
+#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
+ ext_adv_capable(dev))
+
+/* Periodic advertising support */
+#define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))
+
+/* CIS Master/Slave and BIS support */
+#define iso_capable(dev) (cis_capable(dev) || bis_capable(dev))
+#define cis_capable(dev) \
+ (cis_central_capable(dev) || cis_peripheral_capable(dev))
+#define cis_central_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL)
+#define cis_peripheral_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
+#define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
+
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1241,6 +1731,9 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
case ESCO_LINK:
return sco_connect_ind(hdev, bdaddr, flags);
+ case ISO_LINK:
+ return iso_connect_ind(hdev, bdaddr, flags);
+
default:
BT_ERR("unknown link type %d", type);
return -EINVAL;
@@ -1320,16 +1813,34 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
conn->security_cfm_cb(conn, status);
}
-static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
- __u8 encrypt)
+static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
+ __u8 encrypt;
+
+ if (conn->state == BT_CONFIG) {
+ if (!status)
+ conn->state = BT_CONNECTED;
+
+ hci_connect_cfm(conn, status);
+ hci_conn_drop(conn);
+ return;
+ }
- if (conn->sec_level == BT_SECURITY_SDP)
- conn->sec_level = BT_SECURITY_LOW;
+ if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+ encrypt = 0x00;
+ else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
+ encrypt = 0x02;
+ else
+ encrypt = 0x01;
- if (conn->pending_sec_level > conn->sec_level)
- conn->sec_level = conn->pending_sec_level;
+ if (!status) {
+ if (conn->sec_level == BT_SECURITY_SDP)
+ conn->sec_level = BT_SECURITY_LOW;
+
+ if (conn->pending_sec_level > conn->sec_level)
+ conn->sec_level = conn->pending_sec_level;
+ }
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
@@ -1367,43 +1878,6 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
mutex_unlock(&hci_cb_list_lock);
}
-static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type,
- size_t *data_len)
-{
- size_t parsed = 0;
-
- if (eir_len < 2)
- return NULL;
-
- while (parsed < eir_len - 1) {
- u8 field_len = eir[0];
-
- if (field_len == 0)
- break;
-
- parsed += field_len + 1;
-
- if (parsed > eir_len)
- break;
-
- if (eir[1] != type) {
- eir += field_len + 1;
- continue;
- }
-
- /* Zero length data */
- if (field_len == 1)
- return NULL;
-
- if (data_len)
- *data_len = field_len - 1;
-
- return &eir[2];
- }
-
- return NULL;
-}
-
static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
{
if (addr_type != ADDR_LE_DEV_RANDOM)
@@ -1460,10 +1934,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout);
-struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u8 event, u32 timeout);
int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param);
@@ -1471,11 +1941,12 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
const void *param);
void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
+void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
+void *hci_recv_event_data(struct hci_dev *hdev, __u8 event);
-struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout);
+u32 hci_conn_get_phy(struct hci_conn *conn);
/* ----- HCI Sockets ----- */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
@@ -1492,6 +1963,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
#define HCI_MGMT_NO_HDEV BIT(1)
#define HCI_MGMT_UNTRUSTED BIT(2)
#define HCI_MGMT_UNCONFIGURED BIT(3)
+#define HCI_MGMT_HDEV_OPTIONAL BIT(4)
struct hci_mgmt_handler {
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
@@ -1529,8 +2001,15 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
-#define DISCOV_LE_FAST_ADV_INT_MIN 100 /* msec */
-#define DISCOV_LE_FAST_ADV_INT_MAX 150 /* msec */
+#define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */
+#define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */
+#define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */
+#define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */
+#define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */
+#define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */
+#define INTERVAL_TO_MS(x) (((x) * 10) / 0x10)
+
+#define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */
void mgmt_fill_version_info(void *ver);
int mgmt_new_settings(struct hci_dev *hdev);
@@ -1542,7 +2021,7 @@ void __mgmt_power_off(struct hci_dev *hdev);
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent);
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
- u32 flags, u8 *name, u8 name_len);
+ u8 *name, u8 name_len);
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 reason,
bool mgmt_connected);
@@ -1573,7 +2052,6 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 entered);
void mgmt_auth_failed(struct hci_conn *conn, u8 status);
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
-void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
@@ -1581,10 +2059,14 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);
void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
- u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
+ u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
+ u64 instant);
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, s8 rssi, u8 *name, u8 name_len);
void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+void mgmt_suspending(struct hci_dev *hdev, u8 state);
+void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
+ u8 addr_type);
bool mgmt_powering_down(struct hci_dev *hdev);
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
@@ -1595,15 +2077,17 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
u16 max_interval, u16 latency, u16 timeout);
void mgmt_smp_complete(struct hci_conn *conn, bool complete);
bool mgmt_get_connectable(struct hci_dev *hdev);
-void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status);
-void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status);
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev);
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance);
+void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle);
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
+void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
+ bdaddr_t *bdaddr, u8 addr_type);
+int hci_abort_conn(struct hci_conn *conn, u8 reason);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
@@ -1616,4 +2100,9 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
#define SCO_AIRMODE_CVSD 0x0000
#define SCO_AIRMODE_TRANSP 0x0003
+#define LOCAL_CODEC_ACL_MASK BIT(0)
+#define LOCAL_CODEC_SCO_MASK BIT(1)
+
+#define TRANSPORT_TYPE_MAX 0x04
+
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
index 8e9138acdae1..9949870f7d78 100644
--- a/include/net/bluetooth/hci_sock.h
+++ b/include/net/bluetooth/hci_sock.h
@@ -31,8 +31,8 @@
#define HCI_TIME_STAMP 3
/* CMSG flags */
-#define HCI_CMSG_DIR 0x0001
-#define HCI_CMSG_TSTAMP 0x0002
+#define HCI_CMSG_DIR 0x01
+#define HCI_CMSG_TSTAMP 0x02
struct sockaddr_hci {
sa_family_t hci_family;
@@ -144,19 +144,19 @@ struct hci_dev_req {
struct hci_dev_list_req {
__u16 dev_num;
- struct hci_dev_req dev_req[0]; /* hci_dev_req structures */
+ struct hci_dev_req dev_req[]; /* hci_dev_req structures */
};
struct hci_conn_list_req {
__u16 dev_id;
__u16 conn_num;
- struct hci_conn_info conn_info[0];
+ struct hci_conn_info conn_info[];
};
struct hci_conn_info_req {
bdaddr_t bdaddr;
__u8 type;
- struct hci_conn_info conn_info[0];
+ struct hci_conn_info conn_info[];
};
struct hci_auth_info_req {
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
new file mode 100644
index 000000000000..17f5a4c32f36
--- /dev/null
+++ b/include/net/bluetooth/hci_sync.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
+typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
+ int err);
+
+struct hci_cmd_sync_work_entry {
+ struct list_head list;
+ hci_cmd_sync_work_func_t func;
+ void *data;
+ hci_cmd_sync_work_destroy_t destroy;
+};
+
+struct adv_info;
+/* Function with sync suffix shall not be called with hdev->lock held as they
+ * wait the command to complete and in the meantime an event could be received
+ * which could attempt to acquire hdev->lock causing a deadlock.
+ */
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout);
+struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk);
+int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk);
+
+void hci_cmd_sync_init(struct hci_dev *hdev);
+void hci_cmd_sync_clear(struct hci_dev *hdev);
+void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+
+int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+
+int hci_update_eir_sync(struct hci_dev *hdev);
+int hci_update_class_sync(struct hci_dev *hdev);
+
+int hci_update_eir_sync(struct hci_dev *hdev);
+int hci_update_class_sync(struct hci_dev *hdev);
+int hci_update_name_sync(struct hci_dev *hdev);
+int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode);
+
+int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ bool use_rpa, struct adv_info *adv_instance,
+ u8 *own_addr_type, bdaddr_t *rand_addr);
+
+int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
+ bool rpa, u8 *own_addr_type);
+
+int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance);
+int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance);
+int hci_update_adv_data(struct hci_dev *hdev, u8 instance);
+int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ bool force);
+
+int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance);
+int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance);
+int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance);
+int hci_enable_advertising_sync(struct hci_dev *hdev);
+int hci_enable_advertising(struct hci_dev *hdev);
+
+int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
+ u8 *data, u32 flags, u16 min_interval,
+ u16 max_interval, u16 sync_interval);
+
+int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force);
+int hci_disable_advertising_sync(struct hci_dev *hdev);
+int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force);
+int hci_update_passive_scan_sync(struct hci_dev *hdev);
+int hci_update_passive_scan(struct hci_dev *hdev);
+int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle);
+int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type);
+int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val);
+int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp);
+
+int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable);
+int hci_update_scan_sync(struct hci_dev *hdev);
+int hci_update_scan(struct hci_dev *hdev);
+
+int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul);
+int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ struct sock *sk);
+int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance);
+struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool ext,
+ struct sock *sk);
+
+int hci_reset_sync(struct hci_dev *hdev);
+int hci_dev_open_sync(struct hci_dev *hdev);
+int hci_dev_close_sync(struct hci_dev *hdev);
+
+int hci_powered_update_sync(struct hci_dev *hdev);
+int hci_set_powered_sync(struct hci_dev *hdev, u8 val);
+
+int hci_update_discoverable_sync(struct hci_dev *hdev);
+int hci_update_discoverable(struct hci_dev *hdev);
+
+int hci_update_connectable_sync(struct hci_dev *hdev);
+
+int hci_start_discovery_sync(struct hci_dev *hdev);
+int hci_stop_discovery_sync(struct hci_dev *hdev);
+
+int hci_suspend_sync(struct hci_dev *hdev);
+int hci_resume_sync(struct hci_dev *hdev);
+
+struct hci_conn;
+
+int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
+
+int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
+
+int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason);
+
+int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle);
+
+int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle);
diff --git a/include/net/bluetooth/iso.h b/include/net/bluetooth/iso.h
new file mode 100644
index 000000000000..3f4fe8b78e1b
--- /dev/null
+++ b/include/net/bluetooth/iso.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef __ISO_H
+#define __ISO_H
+
+/* ISO defaults */
+#define ISO_DEFAULT_MTU 251
+#define ISO_MAX_NUM_BIS 0x1f
+
+/* ISO socket broadcast address */
+struct sockaddr_iso_bc {
+ bdaddr_t bc_bdaddr;
+ __u8 bc_bdaddr_type;
+ __u8 bc_sid;
+ __u8 bc_num_bis;
+ __u8 bc_bis[ISO_MAX_NUM_BIS];
+};
+
+/* ISO socket address */
+struct sockaddr_iso {
+ sa_family_t iso_family;
+ bdaddr_t iso_bdaddr;
+ __u8 iso_bdaddr_type;
+ struct sockaddr_iso_bc iso_bc[];
+};
+
+#endif /* __ISO_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 093aedebdf0c..2f766e3437ce 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -47,6 +47,7 @@
#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
#define L2CAP_LE_MIN_MTU 23
+#define L2CAP_ECRED_CONN_SCID_MAX 5
#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
@@ -119,6 +120,10 @@ struct l2cap_conninfo {
#define L2CAP_LE_CONN_REQ 0x14
#define L2CAP_LE_CONN_RSP 0x15
#define L2CAP_LE_CREDITS 0x16
+#define L2CAP_ECRED_CONN_REQ 0x17
+#define L2CAP_ECRED_CONN_RSP 0x18
+#define L2CAP_ECRED_RECONF_REQ 0x19
+#define L2CAP_ECRED_RECONF_RSP 0x1a
/* L2CAP extended feature mask */
#define L2CAP_FEAT_FLOWCTL 0x00000001
@@ -202,6 +207,7 @@ struct l2cap_hdr {
__le16 len;
__le16 cid;
} __packed;
+#define L2CAP_LEN_SIZE 2
#define L2CAP_HDR_SIZE 4
#define L2CAP_ENH_HDR_SIZE 6
#define L2CAP_EXT_HDR_SIZE 8
@@ -290,6 +296,8 @@ struct l2cap_conn_rsp {
#define L2CAP_CR_LE_ENCRYPTION 0x0008
#define L2CAP_CR_LE_INVALID_SCID 0x0009
#define L2CAP_CR_LE_SCID_IN_USE 0X000A
+#define L2CAP_CR_LE_UNACCEPT_PARAMS 0X000B
+#define L2CAP_CR_LE_INVALID_PARAMS 0X000C
/* connect/create channel status */
#define L2CAP_CS_NO_INFO 0x0000
@@ -299,14 +307,14 @@ struct l2cap_conn_rsp {
struct l2cap_conf_req {
__le16 dcid;
__le16 flags;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct l2cap_conf_rsp {
__le16 scid;
__le16 flags;
__le16 result;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define L2CAP_CONF_SUCCESS 0x0000
@@ -322,7 +330,7 @@ struct l2cap_conf_rsp {
struct l2cap_conf_opt {
__u8 type;
__u8 len;
- __u8 val[0];
+ __u8 val[];
} __packed;
#define L2CAP_CONF_OPT_SIZE 2
@@ -359,6 +367,7 @@ struct l2cap_conf_rfc {
* ever be used in the BR/EDR configuration phase.
*/
#define L2CAP_MODE_LE_FLOWCTL 0x80
+#define L2CAP_MODE_EXT_FLOWCTL 0x81
struct l2cap_conf_efs {
__u8 id;
@@ -392,7 +401,7 @@ struct l2cap_info_req {
struct l2cap_info_rsp {
__le16 type;
__le16 result;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct l2cap_create_chan_req {
@@ -483,6 +492,40 @@ struct l2cap_le_credits {
__le16 credits;
} __packed;
+#define L2CAP_ECRED_MIN_MTU 64
+#define L2CAP_ECRED_MIN_MPS 64
+#define L2CAP_ECRED_MAX_CID 5
+
+struct l2cap_ecred_conn_req {
+ __le16 psm;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 scid[];
+} __packed;
+
+struct l2cap_ecred_conn_rsp {
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 result;
+ __le16 dcid[];
+};
+
+struct l2cap_ecred_reconf_req {
+ __le16 mtu;
+ __le16 mps;
+ __le16 scid[];
+} __packed;
+
+#define L2CAP_RECONF_SUCCESS 0x0000
+#define L2CAP_RECONF_INVALID_MTU 0x0001
+#define L2CAP_RECONF_INVALID_MPS 0x0002
+
+struct l2cap_ecred_reconf_rsp {
+ __le16 result;
+} __packed;
+
/* ----- L2CAP channels and connections ----- */
struct l2cap_seq_list {
__u16 head;
@@ -620,9 +663,12 @@ struct l2cap_ops {
void (*suspend) (struct l2cap_chan *chan);
void (*set_shutdown) (struct l2cap_chan *chan);
long (*get_sndtimeo) (struct l2cap_chan *chan);
+ struct pid *(*get_peer_pid) (struct l2cap_chan *chan);
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb);
+ int (*filter) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
};
struct l2cap_conn {
@@ -724,6 +770,7 @@ enum {
FLAG_EFS_ENABLE,
FLAG_DEFER_SETUP,
FLAG_LE_CONN_REQ_SENT,
+ FLAG_ECRED_CONN_REQ_SENT,
FLAG_PENDING_SECURITY,
FLAG_HOLD_HCI_CONN,
};
@@ -800,6 +847,7 @@ enum {
};
void l2cap_chan_hold(struct l2cap_chan *c);
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
void l2cap_chan_put(struct l2cap_chan *c);
static inline void l2cap_chan_lock(struct l2cap_chan *chan)
@@ -917,12 +965,14 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
}
extern bool disable_ertm;
+extern bool enable_ecred;
int l2cap_init_sockets(void);
void l2cap_cleanup_sockets(void);
bool l2cap_is_socket(struct socket *sock);
void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan);
+void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan);
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
@@ -932,6 +982,7 @@ struct l2cap_chan *l2cap_chan_create(void);
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
bdaddr_t *dst, u8 dst_type);
+int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
@@ -939,6 +990,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan);
int l2cap_ertm_init(struct l2cap_chan *chan);
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+typedef void (*l2cap_chan_func_t)(struct l2cap_chan *chan, void *data);
+void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
+ void *data);
void l2cap_chan_del(struct l2cap_chan *chan, int err);
void l2cap_send_conn_req(struct l2cap_chan *chan);
void l2cap_move_start(struct l2cap_chan *chan);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index a90666af05bd..743f6f59dff8 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -52,6 +52,12 @@ struct mgmt_hdr {
__le16 len;
} __packed;
+struct mgmt_tlv {
+ __le16 type;
+ __u8 length;
+ __u8 value[];
+} __packed;
+
struct mgmt_addr_info {
bdaddr_t bdaddr;
__u8 type;
@@ -70,14 +76,14 @@ struct mgmt_rp_read_version {
struct mgmt_rp_read_commands {
__le16 num_commands;
__le16 num_events;
- __le16 opcodes[0];
+ __le16 opcodes[];
} __packed;
#define MGMT_OP_READ_INDEX_LIST 0x0003
#define MGMT_READ_INDEX_LIST_SIZE 0
struct mgmt_rp_read_index_list {
__le16 num_controllers;
- __le16 index[0];
+ __le16 index[];
} __packed;
/* Reserve one extra byte for names in management messages so that they
@@ -101,7 +107,8 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
-#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
+#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
+#define MGMT_SETTING_WIDEBAND_SPEECH 0x00020000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -182,7 +189,7 @@ struct mgmt_link_key_info {
struct mgmt_cp_load_link_keys {
__u8 debug_keys;
__le16 key_count;
- struct mgmt_link_key_info keys[0];
+ struct mgmt_link_key_info keys[];
} __packed;
#define MGMT_LOAD_LINK_KEYS_SIZE 3
@@ -195,7 +202,7 @@ struct mgmt_cp_load_link_keys {
struct mgmt_ltk_info {
struct mgmt_addr_info addr;
__u8 type;
- __u8 master;
+ __u8 initiator;
__u8 enc_size;
__le16 ediv;
__le64 rand;
@@ -205,7 +212,7 @@ struct mgmt_ltk_info {
#define MGMT_OP_LOAD_LONG_TERM_KEYS 0x0013
struct mgmt_cp_load_long_term_keys {
__le16 key_count;
- struct mgmt_ltk_info keys[0];
+ struct mgmt_ltk_info keys[];
} __packed;
#define MGMT_LOAD_LONG_TERM_KEYS_SIZE 2
@@ -222,7 +229,7 @@ struct mgmt_rp_disconnect {
#define MGMT_GET_CONNECTIONS_SIZE 0
struct mgmt_rp_get_connections {
__le16 conn_count;
- struct mgmt_addr_info addr[0];
+ struct mgmt_addr_info addr[];
} __packed;
#define MGMT_OP_PIN_CODE_REPLY 0x0016
@@ -412,7 +419,7 @@ struct mgmt_irk_info {
#define MGMT_OP_LOAD_IRKS 0x0030
struct mgmt_cp_load_irks {
__le16 irk_count;
- struct mgmt_irk_info irks[0];
+ struct mgmt_irk_info irks[];
} __packed;
#define MGMT_LOAD_IRKS_SIZE 2
@@ -464,7 +471,7 @@ struct mgmt_conn_param {
#define MGMT_OP_LOAD_CONN_PARAM 0x0035
struct mgmt_cp_load_conn_param {
__le16 param_count;
- struct mgmt_conn_param params[0];
+ struct mgmt_conn_param params[];
} __packed;
#define MGMT_LOAD_CONN_PARAM_SIZE 2
@@ -472,7 +479,7 @@ struct mgmt_cp_load_conn_param {
#define MGMT_READ_UNCONF_INDEX_LIST_SIZE 0
struct mgmt_rp_read_unconf_index_list {
__le16 num_controllers;
- __le16 index[0];
+ __le16 index[];
} __packed;
#define MGMT_OPTION_EXTERNAL_CONFIG 0x00000001
@@ -503,7 +510,7 @@ struct mgmt_cp_start_service_discovery {
__u8 type;
__s8 rssi;
__le16 uuid_count;
- __u8 uuids[0][16];
+ __u8 uuids[][16];
} __packed;
#define MGMT_START_SERVICE_DISCOVERY_SIZE 4
@@ -515,7 +522,7 @@ struct mgmt_cp_read_local_oob_ext_data {
struct mgmt_rp_read_local_oob_ext_data {
__u8 type;
__le16 eir_len;
- __u8 eir[0];
+ __u8 eir[];
} __packed;
#define MGMT_OP_READ_EXT_INDEX_LIST 0x003C
@@ -526,7 +533,7 @@ struct mgmt_rp_read_ext_index_list {
__le16 index;
__u8 type;
__u8 bus;
- } entry[0];
+ } entry[];
} __packed;
#define MGMT_OP_READ_ADV_FEATURES 0x0003D
@@ -537,7 +544,7 @@ struct mgmt_rp_read_adv_features {
__u8 max_scan_rsp_len;
__u8 max_instances;
__u8 num_instances;
- __u8 instance[0];
+ __u8 instance[];
} __packed;
#define MGMT_OP_ADD_ADVERTISING 0x003E
@@ -548,7 +555,7 @@ struct mgmt_cp_add_advertising {
__le16 timeout;
__u8 adv_data_len;
__u8 scan_rsp_len;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define MGMT_ADD_ADVERTISING_SIZE 11
struct mgmt_rp_add_advertising {
@@ -565,6 +572,13 @@ struct mgmt_rp_add_advertising {
#define MGMT_ADV_FLAG_SEC_1M BIT(7)
#define MGMT_ADV_FLAG_SEC_2M BIT(8)
#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
+#define MGMT_ADV_FLAG_CAN_SET_TX_POWER BIT(10)
+#define MGMT_ADV_FLAG_HW_OFFLOAD BIT(11)
+#define MGMT_ADV_PARAM_DURATION BIT(12)
+#define MGMT_ADV_PARAM_TIMEOUT BIT(13)
+#define MGMT_ADV_PARAM_INTERVALS BIT(14)
+#define MGMT_ADV_PARAM_TX_POWER BIT(15)
+#define MGMT_ADV_PARAM_SCAN_RSP BIT(16)
#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
MGMT_ADV_FLAG_SEC_CODED)
@@ -602,7 +616,7 @@ struct mgmt_rp_read_ext_info {
__le32 supported_settings;
__le32 current_settings;
__le16 eir_len;
- __u8 eir[0];
+ __u8 eir[];
} __packed;
#define MGMT_OP_SET_APPEARANCE 0x0043
@@ -612,7 +626,7 @@ struct mgmt_cp_set_appearance {
#define MGMT_SET_APPEARANCE_SIZE 2
#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
-struct mgmt_rp_get_phy_confguration {
+struct mgmt_rp_get_phy_configuration {
__le32 supported_phys;
__le32 configurable_phys;
__le32 selected_phys;
@@ -649,7 +663,7 @@ struct mgmt_rp_get_phy_confguration {
MGMT_PHY_LE_CODED_RX)
#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
-struct mgmt_cp_set_phy_confguration {
+struct mgmt_cp_set_phy_configuration {
__le32 selected_phys;
} __packed;
#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
@@ -667,15 +681,204 @@ struct mgmt_blocked_key_info {
struct mgmt_cp_set_blocked_keys {
__le16 key_count;
- struct mgmt_blocked_key_info keys[0];
+ struct mgmt_blocked_key_info keys[];
} __packed;
#define MGMT_OP_SET_BLOCKED_KEYS_SIZE 2
+#define MGMT_OP_SET_WIDEBAND_SPEECH 0x0047
+
+#define MGMT_CAP_SEC_FLAGS 0x01
+#define MGMT_CAP_MAX_ENC_KEY_SIZE 0x02
+#define MGMT_CAP_SMP_MAX_ENC_KEY_SIZE 0x03
+#define MGMT_CAP_LE_TX_PWR 0x04
+
+#define MGMT_OP_READ_CONTROLLER_CAP 0x0048
+#define MGMT_READ_CONTROLLER_CAP_SIZE 0
+struct mgmt_rp_read_controller_cap {
+ __le16 cap_len;
+ __u8 cap[];
+} __packed;
+
+#define MGMT_OP_READ_EXP_FEATURES_INFO 0x0049
+#define MGMT_READ_EXP_FEATURES_INFO_SIZE 0
+struct mgmt_rp_read_exp_features_info {
+ __le16 feature_count;
+ struct {
+ __u8 uuid[16];
+ __le32 flags;
+ } features[];
+} __packed;
+
+#define MGMT_OP_SET_EXP_FEATURE 0x004a
+struct mgmt_cp_set_exp_feature {
+ __u8 uuid[16];
+ __u8 param[];
+} __packed;
+#define MGMT_SET_EXP_FEATURE_SIZE 16
+struct mgmt_rp_set_exp_feature {
+ __u8 uuid[16];
+ __le32 flags;
+} __packed;
+
+#define MGMT_OP_READ_DEF_SYSTEM_CONFIG 0x004b
+#define MGMT_READ_DEF_SYSTEM_CONFIG_SIZE 0
+
+#define MGMT_OP_SET_DEF_SYSTEM_CONFIG 0x004c
+#define MGMT_SET_DEF_SYSTEM_CONFIG_SIZE 0
+
+#define MGMT_OP_READ_DEF_RUNTIME_CONFIG 0x004d
+#define MGMT_READ_DEF_RUNTIME_CONFIG_SIZE 0
+
+#define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e
+#define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0
+
+#define MGMT_OP_GET_DEVICE_FLAGS 0x004F
+#define MGMT_GET_DEVICE_FLAGS_SIZE 7
+struct mgmt_cp_get_device_flags {
+ struct mgmt_addr_info addr;
+} __packed;
+struct mgmt_rp_get_device_flags {
+ struct mgmt_addr_info addr;
+ __le32 supported_flags;
+ __le32 current_flags;
+} __packed;
+
+#define MGMT_OP_SET_DEVICE_FLAGS 0x0050
+#define MGMT_SET_DEVICE_FLAGS_SIZE 11
+struct mgmt_cp_set_device_flags {
+ struct mgmt_addr_info addr;
+ __le32 current_flags;
+} __packed;
+struct mgmt_rp_set_device_flags {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS BIT(0)
+
+#define MGMT_OP_READ_ADV_MONITOR_FEATURES 0x0051
+#define MGMT_READ_ADV_MONITOR_FEATURES_SIZE 0
+struct mgmt_rp_read_adv_monitor_features {
+ __le32 supported_features;
+ __le32 enabled_features;
+ __le16 max_num_handles;
+ __u8 max_num_patterns;
+ __le16 num_handles;
+ __le16 handles[];
+} __packed;
+
+struct mgmt_adv_pattern {
+ __u8 ad_type;
+ __u8 offset;
+ __u8 length;
+ __u8 value[31];
+} __packed;
+
+#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052
+struct mgmt_cp_add_adv_patterns_monitor {
+ __u8 pattern_count;
+ struct mgmt_adv_pattern patterns[];
+} __packed;
+#define MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE 1
+struct mgmt_rp_add_adv_patterns_monitor {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_OP_REMOVE_ADV_MONITOR 0x0053
+struct mgmt_cp_remove_adv_monitor {
+ __le16 monitor_handle;
+} __packed;
+#define MGMT_REMOVE_ADV_MONITOR_SIZE 2
+struct mgmt_rp_remove_adv_monitor {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_OP_ADD_EXT_ADV_PARAMS 0x0054
+struct mgmt_cp_add_ext_adv_params {
+ __u8 instance;
+ __le32 flags;
+ __le16 duration;
+ __le16 timeout;
+ __le32 min_interval;
+ __le32 max_interval;
+ __s8 tx_power;
+} __packed;
+#define MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE 18
+struct mgmt_rp_add_ext_adv_params {
+ __u8 instance;
+ __s8 tx_power;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+} __packed;
+
+#define MGMT_OP_ADD_EXT_ADV_DATA 0x0055
+struct mgmt_cp_add_ext_adv_data {
+ __u8 instance;
+ __u8 adv_data_len;
+ __u8 scan_rsp_len;
+ __u8 data[];
+} __packed;
+#define MGMT_ADD_EXT_ADV_DATA_SIZE 3
+struct mgmt_rp_add_ext_adv_data {
+ __u8 instance;
+} __packed;
+
+struct mgmt_adv_rssi_thresholds {
+ __s8 high_threshold;
+ __le16 high_threshold_timeout;
+ __s8 low_threshold;
+ __le16 low_threshold_timeout;
+ __u8 sampling_period;
+} __packed;
+
+#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI 0x0056
+struct mgmt_cp_add_adv_patterns_monitor_rssi {
+ struct mgmt_adv_rssi_thresholds rssi;
+ __u8 pattern_count;
+ struct mgmt_adv_pattern patterns[];
+} __packed;
+#define MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE 8
+#define MGMT_OP_SET_MESH_RECEIVER 0x0057
+struct mgmt_cp_set_mesh {
+ __u8 enable;
+ __le16 window;
+ __le16 period;
+ __u8 num_ad_types;
+ __u8 ad_types[];
+} __packed;
+#define MGMT_SET_MESH_RECEIVER_SIZE 6
+
+#define MGMT_OP_MESH_READ_FEATURES 0x0058
+#define MGMT_MESH_READ_FEATURES_SIZE 0
+#define MESH_HANDLES_MAX 3
+struct mgmt_rp_mesh_read_features {
+ __le16 index;
+ __u8 max_handles;
+ __u8 used_handles;
+ __u8 handles[MESH_HANDLES_MAX];
+} __packed;
+
+#define MGMT_OP_MESH_SEND 0x0059
+struct mgmt_cp_mesh_send {
+ struct mgmt_addr_info addr;
+ __le64 instant;
+ __le16 delay;
+ __u8 cnt;
+ __u8 adv_data_len;
+ __u8 adv_data[];
+} __packed;
+#define MGMT_MESH_SEND_SIZE 19
+
+#define MGMT_OP_MESH_SEND_CANCEL 0x005A
+struct mgmt_cp_mesh_send_cancel {
+ __u8 handle;
+} __packed;
+#define MGMT_MESH_SEND_CANCEL_SIZE 1
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
__u8 status;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define MGMT_EV_CMD_STATUS 0x0002
@@ -723,7 +926,7 @@ struct mgmt_ev_device_connected {
struct mgmt_addr_info addr;
__le32 flags;
__le16 eir_len;
- __u8 eir[0];
+ __u8 eir[];
} __packed;
#define MGMT_DEV_DISCONN_UNKNOWN 0x00
@@ -731,6 +934,7 @@ struct mgmt_ev_device_connected {
#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02
#define MGMT_DEV_DISCONN_REMOTE 0x03
#define MGMT_DEV_DISCONN_AUTH_FAILURE 0x04
+#define MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND 0x05
#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
struct mgmt_ev_device_disconnected {
@@ -768,9 +972,11 @@ struct mgmt_ev_auth_failed {
__u8 status;
} __packed;
-#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
-#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
-#define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04
+#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
+#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
+#define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04
+#define MGMT_DEV_FOUND_INITIATED_CONN 0x08
+#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED 0x10
#define MGMT_EV_DEVICE_FOUND 0x0012
struct mgmt_ev_device_found {
@@ -778,7 +984,7 @@ struct mgmt_ev_device_found {
__s8 rssi;
__le32 flags;
__le16 eir_len;
- __u8 eir[0];
+ __u8 eir[];
} __packed;
#define MGMT_EV_DISCOVERING 0x0013
@@ -873,7 +1079,7 @@ struct mgmt_ev_ext_index {
struct mgmt_ev_local_oob_data_updated {
__u8 type;
__le16 eir_len;
- __u8 eir[0];
+ __u8 eir[];
} __packed;
#define MGMT_EV_ADVERTISING_ADDED 0x0023
@@ -889,10 +1095,80 @@ struct mgmt_ev_advertising_removed {
#define MGMT_EV_EXT_INFO_CHANGED 0x0025
struct mgmt_ev_ext_info_changed {
__le16 eir_len;
- __u8 eir[0];
+ __u8 eir[];
} __packed;
#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026
struct mgmt_ev_phy_configuration_changed {
__le32 selected_phys;
} __packed;
+
+#define MGMT_EV_EXP_FEATURE_CHANGED 0x0027
+struct mgmt_ev_exp_feature_changed {
+ __u8 uuid[16];
+ __le32 flags;
+} __packed;
+
+#define MGMT_EV_DEVICE_FLAGS_CHANGED 0x002a
+struct mgmt_ev_device_flags_changed {
+ struct mgmt_addr_info addr;
+ __le32 supported_flags;
+ __le32 current_flags;
+} __packed;
+
+#define MGMT_EV_ADV_MONITOR_ADDED 0x002b
+struct mgmt_ev_adv_monitor_added {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_EV_ADV_MONITOR_REMOVED 0x002c
+struct mgmt_ev_adv_monitor_removed {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_SUSPEND 0x002d
+struct mgmt_ev_controller_suspend {
+ __u8 suspend_state;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_RESUME 0x002e
+struct mgmt_ev_controller_resume {
+ __u8 wake_reason;
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_WAKE_REASON_NON_BT_WAKE 0x0
+#define MGMT_WAKE_REASON_UNEXPECTED 0x1
+#define MGMT_WAKE_REASON_REMOTE_WAKE 0x2
+
+#define MGMT_EV_ADV_MONITOR_DEVICE_FOUND 0x002f
+struct mgmt_ev_adv_monitor_device_found {
+ __le16 monitor_handle;
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+#define MGMT_EV_ADV_MONITOR_DEVICE_LOST 0x0030
+struct mgmt_ev_adv_monitor_device_lost {
+ __le16 monitor_handle;
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_MESH_DEVICE_FOUND 0x0031
+struct mgmt_ev_mesh_device_found {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le64 instant;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+
+#define MGMT_EV_MESH_PACKET_CMPLT 0x0032
+struct mgmt_ev_mesh_pkt_cmplt {
+ __u8 handle;
+} __packed;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index da4acefe39c8..99d26879b02a 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -34,7 +34,6 @@
#define RFCOMM_DEFAULT_MTU 127
#define RFCOMM_DEFAULT_CREDITS 7
-#define RFCOMM_MAX_L2CAP_MTU 1013
#define RFCOMM_MAX_CREDITS 40
#define RFCOMM_SKB_HEAD_RESERVE 8
@@ -356,7 +355,7 @@ struct rfcomm_dev_info {
struct rfcomm_dev_list_req {
u16 dev_num;
- struct rfcomm_dev_info dev_info[0];
+ struct rfcomm_dev_info dev_info[];
};
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index f40ddb4264fc..1aa2e14b6c94 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -46,4 +46,6 @@ struct sco_conninfo {
__u8 dev_class[3];
};
+#define SCO_CMSG_PKT_STATUS 0x01
+
#endif /* __SCO_H */
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index c8696a230b7d..a016f275cb01 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -15,8 +15,6 @@
#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
-
#define AD_LACP_SLOW 0
#define AD_LACP_FAST 1
@@ -262,7 +260,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
struct bond_3ad_stats stats;
- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
@@ -290,7 +288,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
}
/* ========== AD Exported functions to the main bonding code ========== */
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
+void bond_3ad_initialize(struct bonding *bond);
void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
@@ -303,6 +301,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
+void bond_3ad_update_lacp_active(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
void bond_3ad_update_ad_actor_settings(struct bonding *bond);
int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
index b3504fcd773d..191c36afa1f4 100644
--- a/include/net/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -126,7 +126,7 @@ struct tlb_slave_info {
struct alb_bond_info {
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
u32 unbalanced_load;
- int tx_rebalance_counter;
+ atomic_t tx_rebalance_counter;
int lp_counter;
/* -------- rlb parameters -------- */
int rlb_enabled;
@@ -158,6 +158,10 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
+ struct sk_buff *skb);
+struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
+ struct sk_buff *skb);
void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 9d382f2f0bc5..69292ecc0325 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -7,6 +7,14 @@
#ifndef _NET_BOND_OPTIONS_H
#define _NET_BOND_OPTIONS_H
+#include <linux/bits.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+struct netlink_ext_ack;
+struct nlattr;
+
#define BOND_OPT_MAX_NAMELEN 32
#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST)
#define BOND_MODE_ALL_EX(x) (~(x))
@@ -64,19 +72,30 @@ enum {
BOND_OPT_AD_USER_PORT_KEY,
BOND_OPT_NUM_PEER_NOTIF_ALIAS,
BOND_OPT_PEER_NOTIF_DELAY,
+ BOND_OPT_LACP_ACTIVE,
+ BOND_OPT_MISSED_MAX,
+ BOND_OPT_NS_TARGETS,
+ BOND_OPT_PRIO,
BOND_OPT_LAST
};
/* This structure is used for storing option values and for passing option
* values when changing an option. The logic when used as an arg is as follows:
- * - if string != NULL -> parse it, if the opt is RAW type then return it, else
- * return the parse result
- * - if string == NULL -> parse value
+ * - if value != ULLONG_MAX -> parse value
+ * - if string != NULL -> parse string
+ * - if the opt is RAW data and length less than maxlen,
+ * copy the data to extra storage
*/
+
+#define BOND_OPT_EXTRA_MAXLEN 16
struct bond_opt_value {
char *string;
u64 value;
u32 flags;
+ union {
+ char extra[BOND_OPT_EXTRA_MAXLEN];
+ struct net_device *slave_dev;
+ };
};
struct bonding;
@@ -100,7 +119,8 @@ struct bond_option {
};
int __bond_opt_set(struct bonding *bond, unsigned int option,
- struct bond_opt_value *val);
+ struct bond_opt_value *val,
+ struct nlattr *bad_attr, struct netlink_ext_ack *extack);
int __bond_opt_set_notify(struct bonding *bond, unsigned int option,
struct bond_opt_value *val);
int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
@@ -116,18 +136,29 @@ const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
* When value is ULLONG_MAX then string will be used.
*/
static inline void __bond_opt_init(struct bond_opt_value *optval,
- char *string, u64 value)
+ char *string, u64 value,
+ void *extra, size_t extra_len)
{
memset(optval, 0, sizeof(*optval));
optval->value = ULLONG_MAX;
- if (value == ULLONG_MAX)
- optval->string = string;
- else
+ if (value != ULLONG_MAX)
optval->value = value;
+ else if (string)
+ optval->string = string;
+
+ if (extra && extra_len <= BOND_OPT_EXTRA_MAXLEN)
+ memcpy(optval->extra, extra, extra_len);
}
-#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
-#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
+#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value, NULL, 0)
+#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX, NULL, 0)
+#define bond_opt_initextra(optval, extra, extra_len) \
+ __bond_opt_init(optval, NULL, ULLONG_MAX, extra, extra_len)
+#define bond_opt_slave_initval(optval, slave_dev, value) \
+ __bond_opt_init(optval, NULL, value, slave_dev, sizeof(struct net_device *))
void bond_option_arp_ip_targets_clear(struct bonding *bond);
+#if IS_ENABLED(CONFIG_IPV6)
+void bond_option_ns_ip6_targets_clear(struct bonding *bond);
+#endif
#endif /* _NET_BOND_OPTIONS_H */
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 3d56b026bb9e..e999f851738b 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -29,8 +29,11 @@
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
#include <net/bond_options.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
#define BOND_MAX_ARP_TARGETS 16
+#define BOND_MAX_NS_TARGETS BOND_MAX_ARP_TARGETS
#define BOND_DEFAULT_MIIMON 100
@@ -86,6 +89,11 @@
#define bond_for_each_slave_rcu(bond, pos, iter) \
netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
+#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+
+#define BOND_TLS_FEATURES (NETIF_F_HW_TLS_TX | NETIF_F_HW_TLS_RX)
+
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -116,6 +124,7 @@ struct bond_params {
int xmit_policy;
int miimon;
u8 num_peer_notif;
+ u8 missed_max;
int arp_interval;
int arp_validate;
int arp_all_targets;
@@ -124,6 +133,7 @@ struct bond_params {
int updelay;
int downdelay;
int peer_notif_delay;
+ int lacp_active;
int lacp_fast;
unsigned int min_links;
int ad_select;
@@ -139,22 +149,21 @@ struct bond_params {
struct reciprocal_value reciprocal_packets_per_slave;
u16 ad_actor_sys_prio;
u16 ad_user_port_key;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
+#endif
/* 2 bytes of padding : see ether_addr_equal_64bits() */
u8 ad_actor_system[ETH_ALEN + 2];
};
-struct bond_parm_tbl {
- char *modename;
- int mode;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
int delay;
- /* all three in jiffies */
+ /* all 4 in jiffies */
unsigned long last_link_up;
+ unsigned long last_tx;
unsigned long last_rx;
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
s8 link; /* one of BOND_LINK_XXXX */
@@ -170,6 +179,7 @@ struct slave {
u32 speed;
u16 queue_id;
u8 perm_hwaddr[MAX_ADDR_LEN];
+ int prio;
struct ad_slave_info *ad_info;
struct tlb_slave_info tlb_info;
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -180,10 +190,15 @@ struct slave {
struct rtnl_link_stats64 slave_stats;
};
+static inline struct slave *to_slave(struct kobject *kobj)
+{
+ return container_of(kobj, struct slave, kobj);
+}
+
struct bond_up_slave {
unsigned int count;
struct rcu_head rcu;
- struct slave *arr[0];
+ struct slave *arr[];
};
/*
@@ -191,6 +206,11 @@ struct bond_up_slave {
*/
#define BOND_LINK_NOCHANGE -1
+struct bond_ipsec {
+ struct list_head list;
+ struct xfrm_state *xs;
+};
+
/*
* Here are the locking policies for the two bonding locks:
* Get rcu_read_lock when reading or RTNL when writing slave list.
@@ -200,7 +220,8 @@ struct bonding {
struct slave __rcu *curr_active_slave;
struct slave __rcu *current_arp_slave;
struct slave __rcu *primary_slave;
- struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
+ struct bond_up_slave __rcu *usable_slaves;
+ struct bond_up_slave __rcu *all_slaves;
bool force_primary;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
@@ -221,7 +242,7 @@ struct bonding {
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- u32 rr_tx_counter;
+ u32 __percpu *rr_tx_counter;
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
struct bond_params params;
@@ -237,7 +258,12 @@ struct bonding {
struct dentry *debug_dir;
#endif /* CONFIG_DEBUG_FS */
struct rtnl_link_stats64 bond_stats;
- struct lock_class_key stats_lock_key;
+#ifdef CONFIG_XFRM_OFFLOAD
+ struct list_head ipsec_list;
+ /* protecting ipsec_list */
+ spinlock_t ipsec_lock;
+#endif /* CONFIG_XFRM_OFFLOAD */
+ struct bpf_prog *xdp_prog;
};
#define bond_slave_get_rcu(dev) \
@@ -254,6 +280,8 @@ struct bond_vlan_tag {
unsigned short vlan_id;
};
+bool bond_sk_check(struct bonding *bond);
+
/**
* Returns NULL if the net_device does not belong to any of the bond's slaves
*
@@ -326,7 +354,7 @@ static inline bool bond_uses_primary(struct bonding *bond)
static inline struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
{
- struct slave *slave = rcu_dereference(bond->curr_active_slave);
+ struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
return bond_uses_primary(bond) && slave ? slave->dev : NULL;
}
@@ -479,6 +507,15 @@ static inline int bond_is_ip_target_ok(__be32 addr)
return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int bond_is_ip6_target_ok(struct in6_addr *addr)
+{
+ return !ipv6_addr_any(addr) &&
+ !ipv6_addr_loopback(addr) &&
+ !ipv6_addr_is_multicast(addr);
+}
+#endif
+
/* Get the oldest arp which we've received on this slave for bond's
* arp_targets.
*/
@@ -504,19 +541,28 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
return slave->last_rx;
}
+static inline void slave_update_last_tx(struct slave *slave)
+{
+ WRITE_ONCE(slave->last_tx, jiffies);
+}
+
+static inline unsigned long slave_last_tx(struct slave *slave)
+{
+ return READ_ONCE(slave->last_tx);
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
-static inline void bond_netpoll_send_skb(const struct slave *slave,
+static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
struct sk_buff *skb)
{
- struct netpoll *np = slave->np;
-
- if (np)
- netpoll_send_skb(np, skb);
+ return netpoll_send_skb(slave->np, skb);
}
#else
-static inline void bond_netpoll_send_skb(const struct slave *slave,
+static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
struct sk_buff *skb)
{
+ BUG();
+ return NETDEV_TX_OK;
}
#endif
@@ -609,8 +655,8 @@ struct bond_net {
struct class_attribute class_attr_bonding_masters;
};
-int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
+netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
@@ -680,20 +726,6 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
}
/* Caller must hold rcu_read_lock() for read */
-static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
- const u8 *mac)
-{
- struct list_head *iter;
- struct slave *tmp;
-
- bond_for_each_slave_rcu(bond, tmp, iter)
- if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
- return tmp;
-
- return NULL;
-}
-
-/* Caller must hold rcu_read_lock() for read */
static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
{
struct list_head *iter;
@@ -730,23 +762,38 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
return -1;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
+{
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
+ if (ipv6_addr_equal(&targets[i], ip))
+ return i;
+ else if (ipv6_addr_any(&targets[i]))
+ break;
+
+ return -1;
+}
+#endif
+
/* exported from bond_main.c */
extern unsigned int bond_net_id;
-extern const struct bond_parm_tbl bond_lacp_tbl[];
-extern const struct bond_parm_tbl xmit_hashtype_tbl[];
-extern const struct bond_parm_tbl arp_validate_tbl[];
-extern const struct bond_parm_tbl arp_all_targets_tbl[];
-extern const struct bond_parm_tbl fail_over_mac_tbl[];
-extern const struct bond_parm_tbl pri_reselect_tbl[];
-extern struct bond_parm_tbl ad_select_tbl[];
/* exported from bond_netlink.c */
extern struct rtnl_link_ops bond_link_ops;
-static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
+/* exported from bond_sysfs_slave.c */
+extern const struct sysfs_ops slave_sysfs_ops;
+
+/* exported from bond_3ad.c */
+extern const u8 lacpdu_mcast_addr[];
+
+static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
{
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
dev_kfree_skb_any(skb);
+ return NET_XMIT_DROP;
}
#endif /* _NET_BONDING_H */
diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
index 8e4f831d2e52..2926f1f00d65 100644
--- a/include/net/bpf_sk_storage.h
+++ b/include/net/bpf_sk_storage.h
@@ -3,21 +3,61 @@
#ifndef _BPF_SK_STORAGE_H
#define _BPF_SK_STORAGE_H
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/bpf.h>
+#include <net/sock.h>
+#include <uapi/linux/sock_diag.h>
+#include <uapi/linux/btf.h>
+#include <linux/bpf_local_storage.h>
+
struct sock;
void bpf_sk_storage_free(struct sock *sk);
extern const struct bpf_func_proto bpf_sk_storage_get_proto;
extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
+extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
+
+struct bpf_local_storage_elem;
+struct bpf_sk_storage_diag;
+struct sk_buff;
+struct nlattr;
#ifdef CONFIG_BPF_SYSCALL
int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk);
+struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs);
+void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag);
+int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size);
#else
static inline int bpf_sk_storage_clone(const struct sock *sk,
struct sock *newsk)
{
return 0;
}
+static inline struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla)
+{
+ return NULL;
+}
+static inline void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
+{
+}
+static inline int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size)
+{
+ return 0;
+}
#endif
#endif /* _BPF_SK_STORAGE_H */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 86e028388bad..f90f0021f5f2 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -23,6 +23,8 @@
*/
#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+#define BUSY_POLL_BUDGET 8
+
#ifdef CONFIG_NET_RX_BUSY_POLL
struct napi_struct;
@@ -31,19 +33,19 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
static inline bool net_busy_loop_on(void)
{
- return sysctl_net_busy_poll;
+ return READ_ONCE(sysctl_net_busy_poll);
}
static inline bool sk_can_busy_loop(const struct sock *sk)
{
- return sk->sk_ll_usec && !signal_pending(current);
+ return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
}
bool sk_busy_loop_end(void *p, unsigned long start_time);
void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
- void *loop_end_arg);
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget);
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
@@ -105,7 +107,9 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
if (napi_id >= MIN_NAPI_ID)
- napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk);
+ napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
+ READ_ONCE(sk->sk_prefer_busy_poll),
+ READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
#endif
}
@@ -114,7 +118,11 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- skb->napi_id = napi->napi_id;
+ /* If the skb was already marked with a valid NAPI ID, avoid overwriting
+ * it.
+ */
+ if (skb->napi_id < MIN_NAPI_ID)
+ skb->napi_id = napi->napi_id;
#endif
}
@@ -122,18 +130,47 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+#endif
+ sk_rx_queue_update(sk, skb);
+}
+
+/* Variant of sk_mark_napi_id() for passive flow setup,
+ * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
+ * needs to be set.
+ */
+static inline void sk_mark_napi_id_set(struct sock *sk,
+ const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
sk_rx_queue_set(sk, skb);
}
+static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (!READ_ONCE(sk->sk_napi_id))
+ WRITE_ONCE(sk->sk_napi_id, napi_id);
+#endif
+}
+
/* variant used for unconnected sockets */
static inline void sk_mark_napi_id_once(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- if (!READ_ONCE(sk->sk_napi_id))
- WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+ __sk_mark_napi_id_once(sk, skb->napi_id);
+#endif
+}
+
+static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
+ const struct xdp_buff *xdp)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
#endif
}
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 48ecca8530ff..b655d8666f55 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
* The link_support layer is used to add any Link Layer specific
* framing.
*/
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room,
struct cflayer **layer, int (**rcv_func)(
struct sk_buff *, struct net_device *,
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
deleted file mode 100644
index 552cf68d28d2..000000000000
--- a/include/net/caif/caif_hsi.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson / daniel.martensson@stericsson.com
- * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
- */
-
-#ifndef CAIF_HSI_H_
-#define CAIF_HSI_H_
-
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_device.h>
-#include <linux/atomic.h>
-
-/*
- * Maximum number of CAIF frames that can reside in the same HSI frame.
- */
-#define CFHSI_MAX_PKTS 15
-
-/*
- * Maximum number of bytes used for the frame that can be embedded in the
- * HSI descriptor.
- */
-#define CFHSI_MAX_EMB_FRM_SZ 96
-
-/*
- * Decides if HSI buffers should be prefilled with 0xFF pattern for easier
- * debugging. Both TX and RX buffers will be filled before the transfer.
- */
-#define CFHSI_DBG_PREFILL 0
-
-/* Structure describing a HSI packet descriptor. */
-#pragma pack(1) /* Byte alignment. */
-struct cfhsi_desc {
- u8 header;
- u8 offset;
- u16 cffrm_len[CFHSI_MAX_PKTS];
- u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ];
-};
-#pragma pack() /* Default alignment. */
-
-/* Size of the complete HSI packet descriptor. */
-#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc))
-
-/*
- * Size of the complete HSI packet descriptor excluding the optional embedded
- * CAIF frame.
- */
-#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ)
-
-/*
- * Maximum bytes transferred in one transfer.
- */
-#define CFHSI_MAX_CAIF_FRAME_SZ 4096
-
-#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ)
-
-/* Size of the complete HSI TX buffer. */
-#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Size of the complete HSI RX buffer. */
-#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Bitmasks for the HSI descriptor. */
-#define CFHSI_PIGGY_DESC (0x01 << 7)
-
-#define CFHSI_TX_STATE_IDLE 0
-#define CFHSI_TX_STATE_XFER 1
-
-#define CFHSI_RX_STATE_DESC 0
-#define CFHSI_RX_STATE_PAYLOAD 1
-
-/* Bitmasks for power management. */
-#define CFHSI_WAKE_UP 0
-#define CFHSI_WAKE_UP_ACK 1
-#define CFHSI_WAKE_DOWN_ACK 2
-#define CFHSI_AWAKE 3
-#define CFHSI_WAKELOCK_HELD 4
-#define CFHSI_SHUTDOWN 5
-#define CFHSI_FLUSH_FIFO 6
-
-#ifndef CFHSI_INACTIVITY_TOUT
-#define CFHSI_INACTIVITY_TOUT (1 * HZ)
-#endif /* CFHSI_INACTIVITY_TOUT */
-
-#ifndef CFHSI_WAKE_TOUT
-#define CFHSI_WAKE_TOUT (3 * HZ)
-#endif /* CFHSI_WAKE_TOUT */
-
-#ifndef CFHSI_MAX_RX_RETRIES
-#define CFHSI_MAX_RX_RETRIES (10 * HZ)
-#endif
-
-/* Structure implemented by the CAIF HSI driver. */
-struct cfhsi_cb_ops {
- void (*tx_done_cb) (struct cfhsi_cb_ops *drv);
- void (*rx_done_cb) (struct cfhsi_cb_ops *drv);
- void (*wake_up_cb) (struct cfhsi_cb_ops *drv);
- void (*wake_down_cb) (struct cfhsi_cb_ops *drv);
-};
-
-/* Structure implemented by HSI device. */
-struct cfhsi_ops {
- int (*cfhsi_up) (struct cfhsi_ops *dev);
- int (*cfhsi_down) (struct cfhsi_ops *dev);
- int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev);
- int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev);
- int (*cfhsi_wake_up) (struct cfhsi_ops *dev);
- int (*cfhsi_wake_down) (struct cfhsi_ops *dev);
- int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status);
- int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy);
- int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev);
- struct cfhsi_cb_ops *cb_ops;
-};
-
-/* Structure holds status of received CAIF frames processing */
-struct cfhsi_rx_state {
- int state;
- int nfrms;
- int pld_len;
- int retries;
- bool piggy_desc;
-};
-
-/* Priority mapping */
-enum {
- CFHSI_PRIO_CTL = 0,
- CFHSI_PRIO_VI,
- CFHSI_PRIO_VO,
- CFHSI_PRIO_BEBK,
- CFHSI_PRIO_LAST,
-};
-
-struct cfhsi_config {
- u32 inactivity_timeout;
- u32 aggregation_timeout;
- u32 head_align;
- u32 tail_align;
- u32 q_high_mark;
- u32 q_low_mark;
-};
-
-/* Structure implemented by CAIF HSI drivers. */
-struct cfhsi {
- struct caif_dev_common cfdev;
- struct net_device *ndev;
- struct platform_device *pdev;
- struct sk_buff_head qhead[CFHSI_PRIO_LAST];
- struct cfhsi_cb_ops cb_ops;
- struct cfhsi_ops *ops;
- int tx_state;
- struct cfhsi_rx_state rx_state;
- struct cfhsi_config cfg;
- int rx_len;
- u8 *rx_ptr;
- u8 *tx_buf;
- u8 *rx_buf;
- u8 *rx_flip_buf;
- spinlock_t lock;
- int flow_off_sent;
- struct list_head list;
- struct work_struct wake_up_work;
- struct work_struct wake_down_work;
- struct work_struct out_of_sync_work;
- struct workqueue_struct *wq;
- wait_queue_head_t wake_up_wait;
- wait_queue_head_t wake_down_wait;
- wait_queue_head_t flush_fifo_wait;
- struct timer_list inactivity_timer;
- struct timer_list rx_slowpath_timer;
-
- /* TX aggregation */
- int aggregation_len;
- struct timer_list aggregation_timer;
-
- unsigned long bits;
-};
-extern struct platform_driver cfhsi_driver;
-
-/**
- * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters.
- * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before
- * taking the HSI wakeline down, in milliseconds.
- * When using RT Netlink to create, destroy or configure a CAIF HSI interface,
- * enum ifla_caif_hsi is used to specify the configuration attributes.
- */
-enum ifla_caif_hsi {
- __IFLA_CAIF_HSI_UNSPEC,
- __IFLA_CAIF_HSI_INACTIVITY_TOUT,
- __IFLA_CAIF_HSI_AGGREGATION_TOUT,
- __IFLA_CAIF_HSI_HEAD_ALIGN,
- __IFLA_CAIF_HSI_TAIL_ALIGN,
- __IFLA_CAIF_HSI_QHIGH_WATERMARK,
- __IFLA_CAIF_HSI_QLOW_WATERMARK,
- __IFLA_CAIF_HSI_MAX
-};
-
-struct cfhsi_ops *cfhsi_get_ops(void);
-
-#endif /* CAIF_HSI_H_ */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 064094101cb5..51f7bb42a936 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -156,7 +156,7 @@ struct cflayer {
* CAIF packets upwards in the stack.
* Packet handling rules:
* - The CAIF packet (cfpkt) ownership is passed to the
- * called receive function. This means that the the
+ * called receive function. This means that the
* packet cannot be accessed after passing it to the
* above layer using up->receive().
*
@@ -184,7 +184,7 @@ struct cflayer {
* CAIF packet downwards in the stack.
* Packet handling rules:
* - The CAIF packet (cfpkt) ownership is passed to the
- * transmit function. This means that the the packet
+ * transmit function. This means that the packet
* cannot be accessed after passing it to the below
* layer using dn->transmit().
*
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
deleted file mode 100644
index a0bf4cbce71b..000000000000
--- a/include/net/caif/caif_spi.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
- */
-
-#ifndef CAIF_SPI_H_
-#define CAIF_SPI_H_
-
-#include <net/caif/caif_device.h>
-
-#define SPI_CMD_WR 0x00
-#define SPI_CMD_RD 0x01
-#define SPI_CMD_EOT 0x02
-#define SPI_CMD_IND 0x04
-
-#define SPI_DMA_BUF_LEN 8192
-
-#define WL_SZ 2 /* 16 bits. */
-#define SPI_CMD_SZ 4 /* 32 bits. */
-#define SPI_IND_SZ 4 /* 32 bits. */
-
-#define SPI_XFER 0
-#define SPI_SS_ON 1
-#define SPI_SS_OFF 2
-#define SPI_TERMINATE 3
-
-/* Minimum time between different levels is 50 microseconds. */
-#define MIN_TRANSITION_TIME_USEC 50
-
-/* Defines for calculating duration of SPI transfers for a particular
- * number of bytes.
- */
-#define SPI_MASTER_CLK_MHZ 13
-#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk)
-
-/* Normally this should be aligned on the modem in order to benefit from full
- * duplex transfers. However a size of 8188 provokes errors when running with
- * the modem. These errors occur when packet sizes approaches 4 kB of data.
- */
-#define CAIF_MAX_SPI_FRAME 4092
-
-/* Maximum number of uplink CAIF frames that can reside in the same SPI frame.
- * This number should correspond with the modem setting. The application side
- * CAIF accepts any number of embedded downlink CAIF frames.
- */
-#define CAIF_MAX_SPI_PKTS 9
-
-/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier
- * debugging. Both TX and RX buffers will be filled before the transfer.
- */
-#define CFSPI_DBG_PREFILL 0
-
-/* Structure describing a SPI transfer. */
-struct cfspi_xfer {
- u16 tx_dma_len;
- u16 rx_dma_len;
- void *va_tx[2];
- dma_addr_t pa_tx[2];
- void *va_rx;
- dma_addr_t pa_rx;
-};
-
-/* Structure implemented by the SPI interface. */
-struct cfspi_ifc {
- void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
- void (*xfer_done_cb) (struct cfspi_ifc *ifc);
- void *priv;
-};
-
-/* Structure implemented by SPI clients. */
-struct cfspi_dev {
- int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev);
- void (*sig_xfer) (bool xfer, struct cfspi_dev *dev);
- struct cfspi_ifc *ifc;
- char *name;
- u32 clk_mhz;
- void *priv;
-};
-
-/* Enumeration describing the CAIF SPI state. */
-enum cfspi_state {
- CFSPI_STATE_WAITING = 0,
- CFSPI_STATE_AWAKE,
- CFSPI_STATE_FETCH_PKT,
- CFSPI_STATE_GET_NEXT,
- CFSPI_STATE_INIT_XFER,
- CFSPI_STATE_WAIT_ACTIVE,
- CFSPI_STATE_SIG_ACTIVE,
- CFSPI_STATE_WAIT_XFER_DONE,
- CFSPI_STATE_XFER_DONE,
- CFSPI_STATE_WAIT_INACTIVE,
- CFSPI_STATE_SIG_INACTIVE,
- CFSPI_STATE_DELIVER_PKT,
- CFSPI_STATE_MAX,
-};
-
-/* Structure implemented by SPI physical interfaces. */
-struct cfspi {
- struct caif_dev_common cfdev;
- struct net_device *ndev;
- struct platform_device *pdev;
- struct sk_buff_head qhead;
- struct sk_buff_head chead;
- u16 cmd;
- u16 tx_cpck_len;
- u16 tx_npck_len;
- u16 rx_cpck_len;
- u16 rx_npck_len;
- struct cfspi_ifc ifc;
- struct cfspi_xfer xfer;
- struct cfspi_dev *dev;
- unsigned long state;
- struct work_struct work;
- struct workqueue_struct *wq;
- struct list_head list;
- int flow_off_sent;
- u32 qd_low_mark;
- u32 qd_high_mark;
- struct completion comp;
- wait_queue_head_t wait;
- spinlock_t lock;
- bool flow_stop;
- bool slave;
- bool slave_talked;
-#ifdef CONFIG_DEBUG_FS
- enum cfspi_state dbg_state;
- u16 pcmd;
- u16 tx_ppck_len;
- u16 rx_ppck_len;
- struct dentry *dbgfs_dir;
- struct dentry *dbgfs_state;
- struct dentry *dbgfs_frame;
-#endif /* CONFIG_DEBUG_FS */
-};
-
-extern int spi_frm_align;
-extern int spi_up_head_align;
-extern int spi_up_tail_align;
-extern int spi_down_head_align;
-extern int spi_down_tail_align;
-extern struct platform_driver cfspi_spi_driver;
-
-void cfspi_dbg_state(struct cfspi *cfspi, int state);
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_xmitlen(struct cfspi *cfspi);
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_spi_remove(struct platform_device *pdev);
-int cfspi_spi_probe(struct platform_device *pdev);
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_xmitlen(struct cfspi *cfspi);
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-void cfspi_xfer(struct work_struct *work);
-
-#endif /* CAIF_SPI_H_ */
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 2aa5e91d8457..8819ff4db35a 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
* @fcs: Specify if checksum is used in CAIF Framing Layer.
* @head_room: Head space needed by link specific protocol.
*/
-void
+int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref,
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
index 14a55e03bb3c..67cce8757175 100644
--- a/include/net/caif/cfserl.h
+++ b/include/net/caif/cfserl.h
@@ -9,4 +9,5 @@
#include <net/caif/caif_layer.h>
struct cflayer *cfserl_create(int instance, bool use_stx);
+void cfserl_release(struct cflayer *layer);
#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f22bd6c838a3..e09ff87146c1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,9 +7,11 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
+#include <linux/ethtool.h>
+#include <uapi/linux/rfkill.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include <linux/list.h>
@@ -20,6 +22,7 @@
#include <linux/if_ether.h>
#include <linux/ieee80211.h>
#include <linux/net.h>
+#include <linux/rfkill.h>
#include <net/regulatory.h>
/**
@@ -72,12 +75,12 @@ struct wiphy;
*
* @IEEE80211_CHAN_DISABLED: This channel is disabled.
* @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes
- * sending probe requests or beaconing.
+ * sending probe requests or beaconing.
* @IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
* @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
- * is not permitted.
+ * is not permitted.
* @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
- * is not permitted.
+ * is not permitted.
* @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
* @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band,
* this flag indicates that an 80 MHz channel cannot use this
@@ -95,7 +98,23 @@ struct wiphy;
* on this channel.
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
* on this channel.
- *
+ * @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
+ * @IEEE80211_CHAN_1MHZ: 1 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_2MHZ: 2 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_4MHZ: 4 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_8MHZ: 8 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band,
+ * this flag indicates that a 320 MHz channel cannot use this
+ * channel as the control or any of the secondary channels.
+ * This may be due to the driver or due to regulatory bandwidth
+ * restrictions.
+ * @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
@@ -111,6 +130,14 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
IEEE80211_CHAN_NO_20MHZ = 1<<11,
IEEE80211_CHAN_NO_10MHZ = 1<<12,
+ IEEE80211_CHAN_NO_HE = 1<<13,
+ IEEE80211_CHAN_1MHZ = 1<<14,
+ IEEE80211_CHAN_2MHZ = 1<<15,
+ IEEE80211_CHAN_4MHZ = 1<<16,
+ IEEE80211_CHAN_8MHZ = 1<<17,
+ IEEE80211_CHAN_16MHZ = 1<<18,
+ IEEE80211_CHAN_NO_320MHZ = 1<<19,
+ IEEE80211_CHAN_NO_EHT = 1<<20,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -126,6 +153,7 @@ enum ieee80211_channel_flags {
* with cfg80211.
*
* @center_freq: center frequency in MHz
+ * @freq_offset: offset from @center_freq, in KHz
* @hw_value: hardware-specific value for the channel
* @flags: channel flags from &enum ieee80211_channel_flags.
* @orig_flags: channel flags at registration time, used by regulatory
@@ -147,6 +175,7 @@ enum ieee80211_channel_flags {
struct ieee80211_channel {
enum nl80211_band band;
u32 center_freq;
+ u16 freq_offset;
u16 hw_value;
u32 flags;
int max_antenna_gain;
@@ -250,13 +279,36 @@ struct ieee80211_rate {
* struct ieee80211_he_obss_pd - AP settings for spatial reuse
*
* @enable: is the feature enabled.
+ * @sr_ctrl: The SR Control field of SRP element.
+ * @non_srg_max_offset: non-SRG maximum tx power offset
* @min_offset: minimal tx power offset an associated station shall use
* @max_offset: maximum tx power offset an associated station shall use
+ * @bss_color_bitmap: bitmap that indicates the BSS color values used by
+ * members of the SRG
+ * @partial_bssid_bitmap: bitmap that indicates the partial BSSID values
+ * used by members of the SRG
*/
struct ieee80211_he_obss_pd {
bool enable;
+ u8 sr_ctrl;
+ u8 non_srg_max_offset;
u8 min_offset;
u8 max_offset;
+ u8 bss_color_bitmap[8];
+ u8 partial_bssid_bitmap[8];
+};
+
+/**
+ * struct cfg80211_he_bss_color - AP settings for BSS coloring
+ *
+ * @color: the current color.
+ * @enabled: HE BSS color is used
+ * @partial: define the AID equation.
+ */
+struct cfg80211_he_bss_color {
+ u8 color;
+ bool enabled;
+ bool partial;
};
/**
@@ -316,7 +368,50 @@ struct ieee80211_sta_he_cap {
};
/**
- * struct ieee80211_sband_iftype_data
+ * struct ieee80211_eht_mcs_nss_supp - EHT max supported NSS per MCS
+ *
+ * See P802.11be_D1.3 Table 9-401k - "Subfields of the Supported EHT-MCS
+ * and NSS Set field"
+ *
+ * @only_20mhz: MCS/NSS support for 20 MHz-only STA.
+ * @bw: MCS/NSS support for 80, 160 and 320 MHz
+ * @bw._80: MCS/NSS support for BW <= 80 MHz
+ * @bw._160: MCS/NSS support for BW = 160 MHz
+ * @bw._320: MCS/NSS support for BW = 320 MHz
+ */
+struct ieee80211_eht_mcs_nss_supp {
+ union {
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz;
+ struct {
+ struct ieee80211_eht_mcs_nss_supp_bw _80;
+ struct ieee80211_eht_mcs_nss_supp_bw _160;
+ struct ieee80211_eht_mcs_nss_supp_bw _320;
+ } __packed bw;
+ } __packed;
+} __packed;
+
+#define IEEE80211_EHT_PPE_THRES_MAX_LEN 32
+
+/**
+ * struct ieee80211_sta_eht_cap - STA's EHT capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11be EHT capabilities for a STA.
+ *
+ * @has_eht: true iff EHT data is valid.
+ * @eht_cap_elem: Fixed portion of the eht capabilities element.
+ * @eht_mcs_nss_supp: The supported NSS/MCS combinations.
+ * @eht_ppe_thres: Holds the PPE Thresholds data.
+ */
+struct ieee80211_sta_eht_cap {
+ bool has_eht;
+ struct ieee80211_eht_cap_elem_fixed eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp;
+ u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN];
+};
+
+/**
+ * struct ieee80211_sband_iftype_data - sband data per interface type
*
* This structure encapsulates sband data that is relevant for the
* interface types defined in @types_mask. Each type in the
@@ -324,10 +419,22 @@ struct ieee80211_sta_he_cap {
*
* @types_mask: interface types mask
* @he_cap: holds the HE capabilities
+ * @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a
+ * 6 GHz band channel (and 0 may be valid value).
+ * @eht_cap: STA's EHT capabilities
+ * @vendor_elems: vendor element(s) to advertise
+ * @vendor_elems.data: vendor element(s) data
+ * @vendor_elems.len: vendor element(s) length
*/
struct ieee80211_sband_iftype_data {
u16 types_mask;
struct ieee80211_sta_he_cap he_cap;
+ struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct ieee80211_sta_eht_cap eht_cap;
+ struct {
+ const u8 *data;
+ unsigned int len;
+ } vendor_elems;
};
/**
@@ -385,12 +492,28 @@ struct ieee80211_edmg {
};
/**
+ * struct ieee80211_sta_s1g_cap - STA's S1G capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ah S1G capabilities for a STA.
+ *
+ * @s1g: is STA an S1G STA
+ * @cap: S1G capabilities information
+ * @nss_mcs: Supported NSS MCS set
+ */
+struct ieee80211_sta_s1g_cap {
+ bool s1g;
+ u8 cap[10]; /* use S1G_CAPAB_ */
+ u8 nss_mcs[5];
+};
+
+/**
* struct ieee80211_supported_band - frequency band definition
*
* This structure describes a frequency band a wiphy
* is able to operate in.
*
- * @channels: Array of channels the hardware can operate in
+ * @channels: Array of channels the hardware can operate with
* in this band.
* @band: the band this structure represents
* @n_channels: Number of channels in @channels
@@ -400,7 +523,9 @@ struct ieee80211_edmg {
* @n_bitrates: Number of bitrates in @bitrates
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
+ * @s1g_cap: S1G capabilities in this band
* @edmg_cap: EDMG capabilities in this band
+ * @s1g_cap: S1G capabilities in this band (S1B band only, of course)
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
@@ -415,6 +540,7 @@ struct ieee80211_supported_band {
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_s1g_cap s1g_cap;
struct ieee80211_edmg edmg_cap;
u16 n_iftype_data;
const struct ieee80211_sband_iftype_data *iftype_data;
@@ -468,15 +594,43 @@ ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband,
}
/**
- * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
+ * ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities
* @sband: the sband to search for the STA on
+ * @iftype: the iftype to search for
*
- * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
+ * Return: the 6GHz capabilities
*/
-static inline const struct ieee80211_sta_he_cap *
-ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
+static inline __le16
+ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband,
+ enum nl80211_iftype iftype)
{
- return ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_STATION);
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, iftype);
+
+ if (WARN_ON(!data || !data->he_cap.has_he))
+ return 0;
+
+ return data->he_6ghz_capa.capa;
+}
+
+/**
+ * ieee80211_get_eht_iftype_cap - return ETH capabilities for an sband's iftype
+ * @sband: the sband to search for the iftype on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to the struct ieee80211_sta_eht_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_eht_cap *
+ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *sband,
+ enum nl80211_iftype iftype)
+{
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, iftype);
+
+ if (data && data->eht_cap.has_eht)
+ return &data->eht_cap;
+
+ return NULL;
}
/**
@@ -589,6 +743,7 @@ struct key_params {
* If edmg is requested (i.e. the .channels member is non-zero),
* chan will define the primary channel and all other
* parameters are ignored.
+ * @freq1_offset: offset from @center_freq1, in KHz
*/
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
@@ -596,6 +751,80 @@ struct cfg80211_chan_def {
u32 center_freq1;
u32 center_freq2;
struct ieee80211_edmg edmg;
+ u16 freq1_offset;
+};
+
+/*
+ * cfg80211_bitrate_mask - masks for bitrate control
+ */
+struct cfg80211_bitrate_mask {
+ struct {
+ u32 legacy;
+ u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
+ u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ u16 he_mcs[NL80211_HE_NSS_MAX];
+ enum nl80211_txrate_gi gi;
+ enum nl80211_he_gi he_gi;
+ enum nl80211_he_ltf he_ltf;
+ } control[NUM_NL80211_BANDS];
+};
+
+
+/**
+ * struct cfg80211_tid_cfg - TID specific configuration
+ * @config_override: Flag to notify driver to reset TID configuration
+ * of the peer.
+ * @tids: bitmap of TIDs to modify
+ * @mask: bitmap of attributes indicating which parameter changed,
+ * similar to &nl80211_tid_config_supp.
+ * @noack: noack configuration value for the TID
+ * @retry_long: retry count value
+ * @retry_short: retry count value
+ * @ampdu: Enable/Disable MPDU aggregation
+ * @rtscts: Enable/Disable RTS/CTS
+ * @amsdu: Enable/Disable MSDU aggregation
+ * @txrate_type: Tx bitrate mask type
+ * @txrate_mask: Tx bitrate to be applied for the TID
+ */
+struct cfg80211_tid_cfg {
+ bool config_override;
+ u8 tids;
+ u64 mask;
+ enum nl80211_tid_config noack;
+ u8 retry_long, retry_short;
+ enum nl80211_tid_config ampdu;
+ enum nl80211_tid_config rtscts;
+ enum nl80211_tid_config amsdu;
+ enum nl80211_tx_rate_setting txrate_type;
+ struct cfg80211_bitrate_mask txrate_mask;
+};
+
+/**
+ * struct cfg80211_tid_config - TID configuration
+ * @peer: Station's MAC address
+ * @n_tid_conf: Number of TID specific configurations to be applied
+ * @tid_conf: Configuration change info
+ */
+struct cfg80211_tid_config {
+ const u8 *peer;
+ u32 n_tid_conf;
+ struct cfg80211_tid_cfg tid_conf[];
+};
+
+/**
+ * struct cfg80211_fils_aad - FILS AAD data
+ * @macaddr: STA MAC address
+ * @kek: FILS KEK
+ * @kek_len: FILS KEK length
+ * @snonce: STA Nonce
+ * @anonce: AP Nonce
+ */
+struct cfg80211_fils_aad {
+ const u8 *macaddr;
+ const u8 *kek;
+ u8 kek_len;
+ const u8 *snonce;
+ const u8 *anonce;
};
/**
@@ -650,6 +879,7 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
return (chandef1->chan == chandef2->chan &&
chandef1->width == chandef2->width &&
chandef1->center_freq1 == chandef2->center_freq1 &&
+ chandef1->freq1_offset == chandef2->freq1_offset &&
chandef1->center_freq2 == chandef2->center_freq2);
}
@@ -709,19 +939,18 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
enum nl80211_iftype iftype);
/**
- * ieee80211_chandef_rate_flags - returns rate flags for a channel
+ * ieee80211_chanwidth_rate_flags - return rate flags for channel width
+ * @width: the channel width of the channel
*
* In some channel types, not all rates may be used - for example CCK
* rates may not be used in 5/10 MHz channels.
*
- * @chandef: channel definition for the channel
- *
- * Returns: rate flags which apply for this channel
+ * Returns: rate flags which apply for this channel width
*/
static inline enum ieee80211_rate_flags
-ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
+ieee80211_chanwidth_rate_flags(enum nl80211_chan_width width)
{
- switch (chandef->width) {
+ switch (width) {
case NL80211_CHAN_WIDTH_5:
return IEEE80211_RATE_SUPPORTS_5MHZ;
case NL80211_CHAN_WIDTH_10:
@@ -733,6 +962,20 @@ ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
}
/**
+ * ieee80211_chandef_rate_flags - returns rate flags for a channel
+ * @chandef: channel definition for the channel
+ *
+ * See ieee80211_chanwidth_rate_flags().
+ *
+ * Returns: rate flags which apply for this channel
+ */
+static inline enum ieee80211_rate_flags
+ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
+{
+ return ieee80211_chanwidth_rate_flags(chandef->width);
+}
+
+/**
* ieee80211_chandef_max_power - maximum transmission power for the chandef
*
* In some regulations, the transmit power may depend on the configured channel
@@ -760,6 +1003,17 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
}
/**
+ * cfg80211_any_usable_channels - check for usable channels
+ * @wiphy: the wiphy to check for
+ * @band_mask: which bands to check on
+ * @prohibited_flags: which channels to not consider usable,
+ * %IEEE80211_CHAN_DISABLED is always taken into account
+ */
+bool cfg80211_any_usable_channels(struct wiphy *wiphy,
+ unsigned long band_mask,
+ u32 prohibited_flags);
+
+/**
* enum survey_info_flags - survey information flags
*
* @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
@@ -822,6 +1076,7 @@ struct survey_info {
};
#define CFG80211_MAX_WEP_KEYS 4
+#define CFG80211_MAX_NUM_AKM_SUITES 10
/**
* struct cfg80211_crypto_settings - Crypto settings
@@ -842,6 +1097,8 @@ struct survey_info {
* protocol frames.
* @control_port_over_nl80211: TRUE if userspace expects to exchange control
* port frames over NL80211 instead of the network interface.
+ * @control_port_no_preauth: disables pre-auth rx over the nl80211 control
+ * port for mac80211
* @wep_keys: static WEP keys, if not NULL points to an array of
* CFG80211_MAX_WEP_KEYS WEP keys
* @wep_tx_key: key index (0..3) of the default TX static WEP key
@@ -849,6 +1106,21 @@ struct survey_info {
* @sae_pwd: password for SAE authentication (for devices supporting SAE
* offload)
* @sae_pwd_len: length of SAE password (for devices supporting SAE offload)
+ * @sae_pwe: The mechanisms allowed for SAE PWE derivation:
+ *
+ * NL80211_SAE_PWE_UNSPECIFIED
+ * Not-specified, used to indicate userspace did not specify any
+ * preference. The driver should follow its internal policy in
+ * such a scenario.
+ *
+ * NL80211_SAE_PWE_HUNT_AND_PECK
+ * Allow hunting-and-pecking loop only
+ *
+ * NL80211_SAE_PWE_HASH_TO_ELEMENT
+ * Allow hash-to-element only
+ *
+ * NL80211_SAE_PWE_BOTH
+ * Allow either hunting-and-pecking loop or hash-to-element
*/
struct cfg80211_crypto_settings {
u32 wpa_versions;
@@ -856,20 +1128,53 @@ struct cfg80211_crypto_settings {
int n_ciphers_pairwise;
u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES];
int n_akm_suites;
- u32 akm_suites[NL80211_MAX_NR_AKM_SUITES];
+ u32 akm_suites[CFG80211_MAX_NUM_AKM_SUITES];
bool control_port;
__be16 control_port_ethertype;
bool control_port_no_encrypt;
bool control_port_over_nl80211;
+ bool control_port_no_preauth;
struct key_params *wep_keys;
int wep_tx_key;
const u8 *psk;
const u8 *sae_pwd;
u8 sae_pwd_len;
+ enum nl80211_sae_pwe_mechanism sae_pwe;
+};
+
+/**
+ * struct cfg80211_mbssid_config - AP settings for multi bssid
+ *
+ * @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @index: index of this AP in the multi bssid group.
+ * @ema: set to true if the beacons should be sent out in EMA mode.
+ */
+struct cfg80211_mbssid_config {
+ struct wireless_dev *tx_wdev;
+ u8 index;
+ bool ema;
+};
+
+/**
+ * struct cfg80211_mbssid_elems - Multiple BSSID elements
+ *
+ * @cnt: Number of elements in array %elems.
+ *
+ * @elem: Array of multiple BSSID element(s) to be added into Beacon frames.
+ * @elem.data: Data for multiple BSSID elements.
+ * @elem.len: Length of data.
+ */
+struct cfg80211_mbssid_elems {
+ u8 cnt;
+ struct {
+ const u8 *data;
+ size_t len;
+ } elem[];
};
/**
* struct cfg80211_beacon_data - beacon data
+ * @link_id: the link ID for the AP MLD link sending this beacon
* @head: head portion of beacon (before TIM IE)
* or %NULL if not changed
* @tail: tail portion of beacon (after TIM IE)
@@ -886,6 +1191,7 @@ struct cfg80211_crypto_settings {
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @mbssid_ies: multiple BSSID elements
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
* @lci: Measurement Report element content, starting with Measurement Token
@@ -894,8 +1200,13 @@ struct cfg80211_crypto_settings {
* Token (measurement type 11)
* @lci_len: LCI data length
* @civicloc_len: Civic location data length
+ * @he_bss_color: BSS Color settings
+ * @he_bss_color_valid: indicates whether bss color
+ * attribute is present in beacon data or not.
*/
struct cfg80211_beacon_data {
+ unsigned int link_id;
+
const u8 *head, *tail;
const u8 *beacon_ies;
const u8 *proberesp_ies;
@@ -903,6 +1214,7 @@ struct cfg80211_beacon_data {
const u8 *probe_resp;
const u8 *lci;
const u8 *civicloc;
+ struct cfg80211_mbssid_elems *mbssid_ies;
s8 ftm_responder;
size_t head_len, tail_len;
@@ -912,6 +1224,8 @@ struct cfg80211_beacon_data {
size_t probe_resp_len;
size_t lci_len;
size_t civicloc_len;
+ struct cfg80211_he_bss_color he_bss_color;
+ bool he_bss_color_valid;
};
struct mac_address {
@@ -934,27 +1248,37 @@ struct cfg80211_acl_data {
struct mac_address mac_addrs[];
};
-/*
- * cfg80211_bitrate_mask - masks for bitrate control
+/**
+ * struct cfg80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ * @tmpl_len: Template length
+ * @tmpl: Template data for FILS discovery frame including the action
+ * frame headers.
*/
-struct cfg80211_bitrate_mask {
- struct {
- u32 legacy;
- u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
- u16 vht_mcs[NL80211_VHT_NSS_MAX];
- enum nl80211_txrate_gi gi;
- } control[NUM_NL80211_BANDS];
+struct cfg80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
};
/**
- * enum cfg80211_ap_settings_flags - AP settings flags
+ * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe
+ * response parameters in 6GHz.
*
- * Used by cfg80211_ap_settings
- *
- * @AP_SETTINGS_EXTERNAL_AUTH_SUPPORT: AP supports external authentication
+ * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned
+ * in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive
+ * scanning
+ * @tmpl_len: Template length
+ * @tmpl: Template data for probe response
*/
-enum cfg80211_ap_settings_flags {
- AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = BIT(0),
+struct cfg80211_unsol_bcast_probe_resp {
+ u32 interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
};
/**
@@ -985,11 +1309,19 @@ enum cfg80211_ap_settings_flags {
* @ht_cap: HT capabilities (or %NULL if HT isn't enabled)
* @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled)
* @he_cap: HE capabilities (or %NULL if HE isn't enabled)
+ * @eht_cap: EHT capabilities (or %NULL if EHT isn't enabled)
+ * @eht_oper: EHT operation IE (or %NULL if EHT isn't enabled)
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
* @twt_responder: Enable Target Wait Time
+ * @he_required: stations must support HE
+ * @sae_h2e_required: stations must support direct H2E technique in SAE
* @flags: flags, as defined in enum cfg80211_ap_settings_flags
* @he_obss_pd: OBSS Packet Detection settings
+ * @he_oper: HE operation IE (or %NULL if HE isn't enabled)
+ * @fils_discovery: FILS discovery transmission parameters
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @mbssid_config: AP settings for multiple bssid
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1014,10 +1346,16 @@ struct cfg80211_ap_settings {
const struct ieee80211_ht_cap *ht_cap;
const struct ieee80211_vht_cap *vht_cap;
const struct ieee80211_he_cap_elem *he_cap;
- bool ht_required, vht_required;
+ const struct ieee80211_he_operation *he_oper;
+ const struct ieee80211_eht_cap_elem *eht_cap;
+ const struct ieee80211_eht_operation *eht_oper;
+ bool ht_required, vht_required, he_required, sae_h2e_required;
bool twt_responder;
u32 flags;
struct ieee80211_he_obss_pd he_obss_pd;
+ struct cfg80211_fils_discovery fils_discovery;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_mbssid_config mbssid_config;
};
/**
@@ -1049,7 +1387,26 @@ struct cfg80211_csa_settings {
u8 count;
};
-#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
+/**
+ * struct cfg80211_color_change_settings - color change settings
+ *
+ * Used for bss color change
+ *
+ * @beacon_color_change: beacon data while performing the color countdown
+ * @counter_offset_beacon: offsets of the counters within the beacon (tail)
+ * @counter_offset_presp: offsets of the counters within the probe response
+ * @beacon_next: beacon data to be used after the color change
+ * @count: number of beacons until the color change
+ * @color: the color used after the change
+ */
+struct cfg80211_color_change_settings {
+ struct cfg80211_beacon_data beacon_color_change;
+ u16 counter_offset_beacon;
+ u16 counter_offset_presp;
+ struct cfg80211_beacon_data beacon_next;
+ u8 count;
+ u8 color;
+};
/**
* struct iface_combination_params - input parameters for interface combinations
@@ -1080,6 +1437,7 @@ struct iface_combination_params {
* @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
* @STATION_PARAM_APPLY_CAPABILITY: apply new capability
* @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state
+ * @STATION_PARAM_APPLY_STA_TXPOWER: apply tx power for STA
*
* Not all station parameters have in-band "no change" signalling,
* for those that don't these flags will are used.
@@ -1088,7 +1446,6 @@ enum station_parameters_apply_mask {
STATION_PARAM_APPLY_UAPSD = BIT(0),
STATION_PARAM_APPLY_CAPABILITY = BIT(1),
STATION_PARAM_APPLY_PLINK_STATE = BIT(2),
- STATION_PARAM_APPLY_STA_TXPOWER = BIT(3),
};
/**
@@ -1112,14 +1469,66 @@ struct sta_txpwr {
};
/**
- * struct station_parameters - station parameters
+ * struct link_station_parameters - link station parameters
*
- * Used to change and create a new station.
+ * Used to change and create a new link station.
*
- * @vlan: vlan interface station should belong to
+ * @mld_mac: MAC address of the station
+ * @link_id: the link id (-1 for non-MLD station)
+ * @link_mac: MAC address of the link
* @supported_rates: supported rates in IEEE 802.11 format
* (or NULL for no change)
* @supported_rates_len: number of supported rates
+ * @ht_capa: HT capabilities of station
+ * @vht_capa: VHT capabilities of station
+ * @opmode_notif: operating mode field from Operating Mode Notification
+ * @opmode_notif_used: information if operating mode field is used
+ * @he_capa: HE capabilities of station
+ * @he_capa_len: the length of the HE capabilities
+ * @txpwr: transmit power for an associated station
+ * @txpwr_set: txpwr field is set
+ * @he_6ghz_capa: HE 6 GHz Band capabilities of station
+ * @eht_capa: EHT capabilities of station
+ * @eht_capa_len: the length of the EHT capabilities
+ */
+struct link_station_parameters {
+ const u8 *mld_mac;
+ int link_id;
+ const u8 *link_mac;
+ const u8 *supported_rates;
+ u8 supported_rates_len;
+ const struct ieee80211_ht_cap *ht_capa;
+ const struct ieee80211_vht_cap *vht_capa;
+ u8 opmode_notif;
+ bool opmode_notif_used;
+ const struct ieee80211_he_cap_elem *he_capa;
+ u8 he_capa_len;
+ struct sta_txpwr txpwr;
+ bool txpwr_set;
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+ const struct ieee80211_eht_cap_elem *eht_capa;
+ u8 eht_capa_len;
+};
+
+/**
+ * struct link_station_del_parameters - link station deletion parameters
+ *
+ * Used to delete a link station entry (or all stations).
+ *
+ * @mld_mac: MAC address of the station
+ * @link_id: the link id
+ */
+struct link_station_del_parameters {
+ const u8 *mld_mac;
+ u32 link_id;
+};
+
+/**
+ * struct station_parameters - station parameters
+ *
+ * Used to change and create a new station.
+ *
+ * @vlan: vlan interface station should belong to
* @sta_flags_mask: station flags that changed
* (bitmask of BIT(%NL80211_STA_FLAG_...))
* @sta_flags_set: station flags values
@@ -1130,8 +1539,6 @@ struct sta_txpwr {
* @peer_aid: mesh peer AID or zero for no change
* @plink_action: plink action to take
* @plink_state: set the peer link state for a station
- * @ht_capa: HT capabilities of station
- * @vht_capa: VHT capabilities of station
* @uapsd_queues: bitmap of queues configured for uapsd. same format
* as the AC bitmap in the QoS info field
* @max_sp: max Service Period. same format as the MAX_SP in the
@@ -1148,15 +1555,11 @@ struct sta_txpwr {
* @supported_channels_len: number of supported channels
* @supported_oper_classes: supported oper classes in IEEE 802.11 format
* @supported_oper_classes_len: number of supported operating classes
- * @opmode_notif: operating mode field from Operating Mode Notification
- * @opmode_notif_used: information if operating mode field is used
* @support_p2p_ps: information if station supports P2P PS mechanism
- * @he_capa: HE capabilities of station
- * @he_capa_len: the length of the HE capabilities
* @airtime_weight: airtime scheduler weight for this station
+ * @link_sta_params: link related params.
*/
struct station_parameters {
- const u8 *supported_rates;
struct net_device *vlan;
u32 sta_flags_mask, sta_flags_set;
u32 sta_modify_mask;
@@ -1164,11 +1567,8 @@ struct station_parameters {
u16 aid;
u16 vlan_id;
u16 peer_aid;
- u8 supported_rates_len;
u8 plink_action;
u8 plink_state;
- const struct ieee80211_ht_cap *ht_capa;
- const struct ieee80211_vht_cap *vht_capa;
u8 uapsd_queues;
u8 max_sp;
enum nl80211_mesh_power_mode local_pm;
@@ -1179,13 +1579,9 @@ struct station_parameters {
u8 supported_channels_len;
const u8 *supported_oper_classes;
u8 supported_oper_classes_len;
- u8 opmode_notif;
- bool opmode_notif_used;
int support_p2p_ps;
- const struct ieee80211_he_cap_elem *he_capa;
- u8 he_capa_len;
u16 airtime_weight;
- struct sta_txpwr txpwr;
+ struct link_station_parameters link_sta_params;
};
/**
@@ -1251,7 +1647,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
enum cfg80211_station_type statype);
/**
- * enum station_info_rate_flags - bitrate info flags
+ * enum rate_info_flags - bitrate info flags
*
* Used by the driver to indicate the specific rate transmission
* type for 802.11n transmissions.
@@ -1262,6 +1658,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @RATE_INFO_FLAGS_DMG: 60GHz MCS
* @RATE_INFO_FLAGS_HE_MCS: HE MCS information
* @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
+ * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS
+ * @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
@@ -1270,6 +1668,8 @@ enum rate_info_flags {
RATE_INFO_FLAGS_DMG = BIT(3),
RATE_INFO_FLAGS_HE_MCS = BIT(4),
RATE_INFO_FLAGS_EDMG = BIT(5),
+ RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6),
+ RATE_INFO_FLAGS_EHT_MCS = BIT(7),
};
/**
@@ -1284,6 +1684,8 @@ enum rate_info_flags {
* @RATE_INFO_BW_80: 80 MHz bandwidth
* @RATE_INFO_BW_160: 160 MHz bandwidth
* @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
+ * @RATE_INFO_BW_320: 320 MHz bandwidth
+ * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation
*/
enum rate_info_bw {
RATE_INFO_BW_20 = 0,
@@ -1293,6 +1695,8 @@ enum rate_info_bw {
RATE_INFO_BW_80,
RATE_INFO_BW_160,
RATE_INFO_BW_HE_RU,
+ RATE_INFO_BW_320,
+ RATE_INFO_BW_EHT_RU,
};
/**
@@ -1310,6 +1714,9 @@ enum rate_info_bw {
* @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
* only valid if bw is %RATE_INFO_BW_HE_RU)
* @n_bonded_ch: In case of EDMG the number of bonded channels (1-4)
+ * @eht_gi: EHT guard interval (from &enum nl80211_eht_gi)
+ * @eht_ru_alloc: EHT RU allocation (from &enum nl80211_eht_ru_alloc,
+ * only valid if bw is %RATE_INFO_BW_EHT_RU)
*/
struct rate_info {
u8 flags;
@@ -1321,10 +1728,12 @@ struct rate_info {
u8 he_dcm;
u8 he_ru_alloc;
u8 n_bonded_ch;
+ u8 eht_gi;
+ u8 eht_ru_alloc;
};
/**
- * enum station_info_rate_flags - bitrate info flags
+ * enum bss_param_flags - bitrate info flags
*
* Used by the driver to indicate the specific rate transmission
* type for 802.11n transmissions.
@@ -1472,6 +1881,7 @@ struct cfg80211_tid_stats {
* an FCS error. This counter should be incremented only when TA of the
* received packet with an FCS error matches the peer MAC address.
* @airtime_link_metric: mesh airtime link metric.
+ * @connected_to_as: true if mesh STA has a path to authentication server
*/
struct station_info {
u64 filled;
@@ -1529,6 +1939,56 @@ struct station_info {
u32 fcs_err_count;
u32 airtime_link_metric;
+
+ u8 connected_to_as;
+};
+
+/**
+ * struct cfg80211_sar_sub_specs - sub specs limit
+ * @power: power limitation in 0.25dbm
+ * @freq_range_index: index the power limitation applies to
+ */
+struct cfg80211_sar_sub_specs {
+ s32 power;
+ u32 freq_range_index;
+};
+
+/**
+ * struct cfg80211_sar_specs - sar limit specs
+ * @type: it's set with power in 0.25dbm or other types
+ * @num_sub_specs: number of sar sub specs
+ * @sub_specs: memory to hold the sar sub specs
+ */
+struct cfg80211_sar_specs {
+ enum nl80211_sar_type type;
+ u32 num_sub_specs;
+ struct cfg80211_sar_sub_specs sub_specs[];
+};
+
+
+/**
+ * struct cfg80211_sar_freq_ranges - sar frequency ranges
+ * @start_freq: start range edge frequency
+ * @end_freq: end range edge frequency
+ */
+struct cfg80211_sar_freq_ranges {
+ u32 start_freq;
+ u32 end_freq;
+};
+
+/**
+ * struct cfg80211_sar_capa - sar limit capability
+ * @type: it's set via power in 0.25dbm or other types
+ * @num_freq_ranges: number of frequency ranges
+ * @freq_ranges: memory to hold the freq ranges.
+ *
+ * Note: WLAN driver may append new ranges or split an existing
+ * range to small ones and then append them.
+ */
+struct cfg80211_sar_capa {
+ enum nl80211_sar_type type;
+ u32 num_freq_ranges;
+ const struct cfg80211_sar_freq_ranges *freq_ranges;
};
#if IS_ENABLED(CONFIG_CFG80211)
@@ -1655,8 +2115,9 @@ struct mpath_info {
* (or NULL for no change)
* @basic_rates_len: number of basic rates
* @ap_isolate: do not forward packets between connected stations
+ * (0 = no, 1 = yes, -1 = do not change)
* @ht_opmode: HT Operation mode
- * (u16 = opmode, -1 = do not change)
+ * (u16 = opmode, -1 = do not change)
* @p2p_ctwindow: P2P CT Window (-1 = no change)
* @p2p_opp_ps: P2P opportunistic PS (-1 = no change)
*/
@@ -1740,10 +2201,18 @@ struct bss_parameters {
* @plink_timeout: If no tx activity is seen from a STA we've established
* peering with for longer than this time (in seconds), then remove it
* from the STA's list of peers. Default is 30 minutes.
+ * @dot11MeshConnectedToAuthServer: if set to true then this mesh STA
+ * will advertise that it is connected to a authentication server
+ * in the mesh formation field.
* @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is
* connected to a mesh gate in mesh formation info. If false, the
* value in mesh formation is determined by the presence of root paths
* in the mesh path table
+ * @dot11MeshNolearn: Try to avoid multi-hop path discovery (e.g. PREQ/PREP
+ * for HWMP) if the destination is a direct neighbor. Note that this might
+ * not be the optimal decision as a multi-hop route might be better. So
+ * if using this setting you will likely also want to disable
+ * dot11MeshForwarding and use another mesh routing protocol on top.
*/
struct mesh_config {
u16 dot11MeshRetryTimeout;
@@ -1764,6 +2233,7 @@ struct mesh_config {
u16 dot11MeshHWMPnetDiameterTraversalTime;
u8 dot11MeshHWMPRootMode;
bool dot11MeshConnectedToMeshGate;
+ bool dot11MeshConnectedToAuthServer;
u16 dot11MeshHWMPRannInterval;
bool dot11MeshGateAnnouncementProtocol;
bool dot11MeshForwarding;
@@ -1775,6 +2245,7 @@ struct mesh_config {
enum nl80211_mesh_power_mode power_mode;
u16 dot11MeshAwakeWindowDuration;
u32 plink_timeout;
+ bool dot11MeshNolearn;
};
/**
@@ -1845,6 +2316,7 @@ struct ocb_setup {
* @cwmax: Maximum contention window [a value of the form 2^n-1 in the range
* 1..32767]
* @aifs: Arbitration interframe space [0..255]
+ * @link_id: link_id or -1 for non-MLD
*/
struct ieee80211_txq_params {
enum nl80211_ac ac;
@@ -1852,6 +2324,7 @@ struct ieee80211_txq_params {
u16 cwmin;
u16 cwmax;
u8 aifs;
+ int link_id;
};
/**
@@ -1903,6 +2376,27 @@ struct cfg80211_scan_info {
};
/**
+ * struct cfg80211_scan_6ghz_params - relevant for 6 GHz only
+ *
+ * @short_ssid: short ssid to scan for
+ * @bssid: bssid to scan for
+ * @channel_idx: idx of the channel in the channel array in the scan request
+ * which the above info relvant to
+ * @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU
+ * @short_ssid_valid: @short_ssid is valid and can be used
+ * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait
+ * 20 TUs before starting to send probe requests.
+ */
+struct cfg80211_scan_6ghz_params {
+ u32 short_ssid;
+ u32 channel_idx;
+ u8 bssid[ETH_ALEN];
+ bool unsolicited_probe;
+ bool short_ssid_valid;
+ bool psc_no_listen;
+};
+
+/**
* struct cfg80211_scan_request - scan request description
*
* @ssids: SSIDs to scan for (active scan only)
@@ -1929,6 +2423,10 @@ struct cfg80211_scan_info {
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @scan_6ghz: relevant for split scan request only,
+ * true if this is the second scan request
+ * @n_6ghz_params: number of 6 GHz params
+ * @scan_6ghz_params: 6 GHz params
* @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
*/
struct cfg80211_scan_request {
@@ -1956,9 +2454,12 @@ struct cfg80211_scan_request {
struct cfg80211_scan_info info;
bool notified;
bool no_cck;
+ bool scan_6ghz;
+ u32 n_6ghz_params;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
/* keep last */
- struct ieee80211_channel *channels[0];
+ struct ieee80211_channel *channels[];
};
static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
@@ -2032,8 +2533,8 @@ struct cfg80211_bss_select_adjust {
* @ie_len: length of ie in octets
* @flags: bit field of flags controlling operation
* @match_sets: sets of parameters to be matched for a scan result
- * entry to be considered valid and to be passed to the host
- * (others are filtered out).
+ * entry to be considered valid and to be passed to the host
+ * (others are filtered out).
* If ommited, all results are passed.
* @n_match_sets: number of match sets
* @report_results: indicates that results were reported for this request
@@ -2104,7 +2605,7 @@ struct cfg80211_sched_scan_request {
struct list_head list;
/* keep last */
- struct ieee80211_channel *channels[0];
+ struct ieee80211_channel *channels[];
};
/**
@@ -2226,7 +2727,7 @@ struct cfg80211_bss {
u8 bssid_index;
u8 max_bssid_indicator;
- u8 priv[0] __aligned(sizeof(void *));
+ u8 priv[] __aligned(sizeof(void *));
};
/**
@@ -2251,7 +2752,7 @@ const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id);
*/
static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
{
- return (void *)ieee80211_bss_get_elem(bss, id);
+ return (const void *)ieee80211_bss_get_elem(bss, id);
}
@@ -2274,6 +2775,12 @@ static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
* Authentication algorithm number, i.e., starting at the Authentication
* transaction sequence number field.
* @auth_data_len: Length of auth_data buffer in octets
+ * @link_id: if >= 0, indicates authentication should be done as an MLD,
+ * the interface address is included as the MLD address and the
+ * necessary link (with the given link_id) will be created (and
+ * given an MLD address) by the driver
+ * @ap_mld_addr: AP MLD address in case of authentication request with
+ * an AP MLD, valid iff @link_id >= 0
*/
struct cfg80211_auth_request {
struct cfg80211_bss *bss;
@@ -2281,9 +2788,25 @@ struct cfg80211_auth_request {
size_t ie_len;
enum nl80211_auth_type auth_type;
const u8 *key;
- u8 key_len, key_idx;
+ u8 key_len;
+ s8 key_idx;
const u8 *auth_data;
size_t auth_data_len;
+ s8 link_id;
+ const u8 *ap_mld_addr;
+};
+
+/**
+ * struct cfg80211_assoc_link - per-link information for MLO association
+ * @bss: the BSS pointer, see also &struct cfg80211_assoc_request::bss;
+ * if this is %NULL for a link, that link is not requested
+ * @elems: extra elements for the per-STA profile for this link
+ * @elems_len: length of the elements
+ */
+struct cfg80211_assoc_link {
+ struct cfg80211_bss *bss;
+ const u8 *elems;
+ size_t elems_len;
};
/**
@@ -2296,12 +2819,20 @@ struct cfg80211_auth_request {
* authentication capability. Drivers can offload authentication to
* userspace if this flag is set. Only applicable for cfg80211_connect()
* request (connect callback).
+ * @ASSOC_REQ_DISABLE_HE: Disable HE
+ * @ASSOC_REQ_DISABLE_EHT: Disable EHT
+ * @CONNECT_REQ_MLO_SUPPORT: Userspace indicates support for handling MLD links.
+ * Drivers shall disable MLO features for the current association if this
+ * flag is not set.
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
ASSOC_REQ_DISABLE_VHT = BIT(1),
ASSOC_REQ_USE_RRM = BIT(2),
CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3),
+ ASSOC_REQ_DISABLE_HE = BIT(4),
+ ASSOC_REQ_DISABLE_EHT = BIT(5),
+ CONNECT_REQ_MLO_SUPPORT = BIT(6),
};
/**
@@ -2313,6 +2844,8 @@ enum cfg80211_assoc_req_flags {
* given a reference that it must give back to cfg80211_send_rx_assoc()
* or to cfg80211_assoc_timeout(). To ensure proper refcounting, new
* association requests while already associating must be rejected.
+ * This also applies to the @links.bss parameter, which is used instead
+ * of this one (it is %NULL) for MLO associations.
* @ie: Extra IEs to add to (Re)Association Request frame or %NULL
* @ie_len: Length of ie buffer in octets
* @use_mfp: Use management frame protection (IEEE 802.11w) in this association
@@ -2335,6 +2868,13 @@ enum cfg80211_assoc_req_flags {
* @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
* Request/Response frame or %NULL if FILS is not used. This field starts
* with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
+ * @s1g_capa: S1G capability override
+ * @s1g_capa_mask: S1G capability override mask
+ * @links: per-link information for MLO connections
+ * @link_id: >= 0 for MLO connections, where links are given, and indicates
+ * the link on which the association request should be sent
+ * @ap_mld_addr: AP MLD address in case of MLO association request,
+ * valid iff @link_id >= 0
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -2349,6 +2889,10 @@ struct cfg80211_assoc_request {
const u8 *fils_kek;
size_t fils_kek_len;
const u8 *fils_nonces;
+ struct ieee80211_s1g_cap s1g_capa, s1g_capa_mask;
+ struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS];
+ const u8 *ap_mld_addr;
+ s8 link_id;
};
/**
@@ -2357,7 +2901,7 @@ struct cfg80211_assoc_request {
* This structure provides information needed to complete IEEE 802.11
* deauthentication.
*
- * @bssid: the BSSID of the BSS to deauthenticate from
+ * @bssid: the BSSID or AP MLD address to deauthenticate from
* @ie: Extra IEs to add to Deauthentication frame or %NULL
* @ie_len: Length of ie buffer in octets
* @reason_code: The reason code for the deauthentication
@@ -2378,7 +2922,7 @@ struct cfg80211_deauth_request {
* This structure provides information needed to complete IEEE 802.11
* disassociation.
*
- * @bss: the BSS to disassociate from
+ * @ap_addr: the BSSID or AP MLD address to disassociate from
* @ie: Extra IEs to add to Disassociation frame or %NULL
* @ie_len: Length of ie buffer in octets
* @reason_code: The reason code for the disassociation
@@ -2386,7 +2930,7 @@ struct cfg80211_deauth_request {
* Disassociation frame is to be transmitted.
*/
struct cfg80211_disassoc_request {
- struct cfg80211_bss *bss;
+ const u8 *ap_addr;
const u8 *ie;
size_t ie_len;
u16 reason_code;
@@ -2426,7 +2970,7 @@ struct cfg80211_disassoc_request {
* will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @wep_keys: static WEP keys, if not NULL points to an array of
- * CFG80211_MAX_WEP_KEYS WEP keys
+ * CFG80211_MAX_WEP_KEYS WEP keys
* @wep_tx_key: key index (0..3) of the default TX static WEP key
*/
struct cfg80211_ibss_params {
@@ -2631,6 +3175,17 @@ enum wiphy_params_flags {
* @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
* scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
* %NULL).
+ * @pmk_lifetime: Maximum lifetime for PMKSA in seconds
+ * (dot11RSNAConfigPMKLifetime) or 0 if not specified.
+ * The configured PMKSA must not be used for PMKSA caching after
+ * expiration and any keys derived from this PMK become invalid on
+ * expiration, i.e., the current association must be dropped if the PMK
+ * used for it expires.
+ * @pmk_reauth_threshold: Threshold time for reauthentication (percentage of
+ * PMK lifetime, dot11RSNAConfigPMKReauthThreshold) or 0 if not specified.
+ * Drivers are expected to trigger a full authentication instead of using
+ * this PMKSA for caching when reassociating to a new BSS after this
+ * threshold to generate a new PMK before the current one expires.
*/
struct cfg80211_pmksa {
const u8 *bssid;
@@ -2640,6 +3195,8 @@ struct cfg80211_pmksa {
const u8 *ssid;
size_t ssid_len;
const u8 *cache_id;
+ u32 pmk_lifetime;
+ u8 pmk_reauth_threshold;
};
/**
@@ -2814,12 +3371,17 @@ struct cfg80211_wowlan_wakeup {
/**
* struct cfg80211_gtk_rekey_data - rekey data
- * @kek: key encryption key (NL80211_KEK_LEN bytes)
- * @kck: key confirmation key (NL80211_KCK_LEN bytes)
+ * @kek: key encryption key (@kek_len bytes)
+ * @kck: key confirmation key (@kck_len bytes)
* @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes)
+ * @kek_len: length of kek
+ * @kck_len: length of kck
+ * @akm: akm (oui, id)
*/
struct cfg80211_gtk_rekey_data {
const u8 *kek, *kck, *replay_ctr;
+ u32 akm;
+ u8 kek_len, kck_len;
};
/**
@@ -2851,6 +3413,9 @@ struct cfg80211_update_ft_ies_params {
* @dont_wait_for_ack: tells the low level not to wait for an ack
* @n_csa_offsets: length of csa_offsets array
* @csa_offsets: array of all the csa offsets in the frame
+ * @link_id: for MLO, the link ID to transmit on, -1 if not given; note
+ * that the link ID isn't validated (much), it's in range but the
+ * link might not exist (or be used by the receiver STA)
*/
struct cfg80211_mgmt_tx_params {
struct ieee80211_channel *chan;
@@ -2862,6 +3427,7 @@ struct cfg80211_mgmt_tx_params {
bool dont_wait_for_ack;
int n_csa_offsets;
const u16 *csa_offsets;
+ int link_id;
};
/**
@@ -3175,6 +3741,7 @@ struct cfg80211_pmsr_ftm_result {
* @type: type of the measurement reported, note that we only support reporting
* one type at a time, but you can report multiple results separately and
* they're all aggregated for userspace.
+ * @ftm: FTM result
*/
struct cfg80211_pmsr_result {
u64 host_time, ap_tsf;
@@ -3204,6 +3771,17 @@ struct cfg80211_pmsr_result {
* @ftmr_retries: number of retries for FTM request
* @request_lci: request LCI information
* @request_civicloc: request civic location information
+ * @trigger_based: use trigger based ranging for the measurement
+ * If neither @trigger_based nor @non_trigger_based is set,
+ * EDCA based ranging will be used.
+ * @non_trigger_based: use non trigger based ranging for the measurement
+ * If neither @trigger_based nor @non_trigger_based is set,
+ * EDCA based ranging will be used.
+ * @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either
+ * @trigger_based or @non_trigger_based is set.
+ * @bss_color: the bss color of the responder. Optional. Set to zero to
+ * indicate the driver should set the BSS color. Only valid if
+ * @non_trigger_based or @trigger_based is set.
*
* See also nl80211 for the respective attribute documentation.
*/
@@ -3213,11 +3791,15 @@ struct cfg80211_pmsr_ftm_request_peer {
u8 requested:1,
asap:1,
request_lci:1,
- request_civicloc:1;
+ request_civicloc:1,
+ trigger_based:1,
+ non_trigger_based:1,
+ lmr_feedback:1;
u8 num_bursts_exp;
u8 burst_duration;
u8 ftms_per_burst;
u8 ftmr_retries;
+ u8 bss_color;
};
/**
@@ -3293,6 +3875,21 @@ struct cfg80211_update_owe_info {
};
/**
+ * struct mgmt_frame_regs - management frame registrations data
+ * @global_stypes: bitmap of management frame subtypes registered
+ * for the entire device
+ * @interface_stypes: bitmap of management frame subtypes registered
+ * for the given interface
+ * @global_mcast_stypes: mcast RX is needed globally for these subtypes
+ * @interface_mcast_stypes: mcast RX is needed on this interface
+ * for these subtypes
+ */
+struct mgmt_frame_regs {
+ u32 global_stypes, interface_stypes;
+ u32 global_mcast_stypes, interface_mcast_stypes;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -3301,9 +3898,10 @@ struct cfg80211_update_owe_info {
* All callbacks except where otherwise noted should return 0
* on success or a negative error code.
*
- * All operations are currently invoked under rtnl for consistency with the
- * wireless extensions but this is subject to reevaluation as soon as this
- * code is used more widely and we have a first user without wext.
+ * All operations are invoked with the wiphy mutex held. The RTNL may be
+ * held in addition (due to wireless extensions) but this cannot be relied
+ * upon except in cases where documented below. Note that due to ordering,
+ * the RTNL also cannot be acquired in any handlers.
*
* @suspend: wiphy device needs to be suspended. The variable @wow will
* be %NULL or contain the enabled Wake-on-Wireless triggers that are
@@ -3318,27 +3916,48 @@ struct cfg80211_update_owe_info {
* the new netdev in the wiphy's network namespace! Returns the struct
* wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
* also set the address member in the wdev.
+ * This additionally holds the RTNL to be able to do netdev changes.
*
* @del_virtual_intf: remove the virtual interface
+ * This additionally holds the RTNL to be able to do netdev changes.
*
* @change_virtual_intf: change type/configuration of virtual interface,
* keep the struct wireless_dev's iftype updated.
+ * This additionally holds the RTNL to be able to do netdev changes.
+ *
+ * @add_intf_link: Add a new MLO link to the given interface. Note that
+ * the wdev->link[] data structure has been updated, so the new link
+ * address is available.
+ * @del_intf_link: Remove an MLO link from the given interface.
*
* @add_key: add a key with the given parameters. @mac_addr will be %NULL
- * when adding a group key.
+ * when adding a group key. @link_id will be -1 for non-MLO connection.
+ * For MLO connection, @link_id will be >= 0 for group key and -1 for
+ * pairwise key, @mac_addr will be peer's MLD address for MLO pairwise key.
*
* @get_key: get information about the key with the given parameters.
* @mac_addr will be %NULL when requesting information for a group
* key. All pointers given to the @callback function need not be valid
* after it returns. This function should return an error if it is
* not possible to retrieve the key, -ENOENT if it doesn't exist.
+ * @link_id will be -1 for non-MLO connection. For MLO connection,
+ * @link_id will be >= 0 for group key and -1 for pairwise key, @mac_addr
+ * will be peer's MLD address for MLO pairwise key.
*
* @del_key: remove a key given the @mac_addr (%NULL for a group key)
- * and @key_index, return -ENOENT if the key doesn't exist.
+ * and @key_index, return -ENOENT if the key doesn't exist. @link_id will
+ * be -1 for non-MLO connection. For MLO connection, @link_id will be >= 0
+ * for group key and -1 for pairwise key, @mac_addr will be peer's MLD
+ * address for MLO pairwise key.
+ *
+ * @set_default_key: set the default key on an interface. @link_id will be >= 0
+ * for MLO connection and -1 for non-MLO connection.
*
- * @set_default_key: set the default key on an interface
+ * @set_default_mgmt_key: set the default management frame key on an interface.
+ * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
*
- * @set_default_mgmt_key: set the default management frame key on an interface
+ * @set_default_beacon_key: set the default Beacon frame key on an interface.
+ * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
*
* @set_rekey_data: give the data necessary for GTK rekeying to the driver
*
@@ -3458,8 +4077,6 @@ struct cfg80211_update_owe_info {
* @get_tx_power: store the current TX power into the dbm variable;
* return 0 if successful
*
- * @set_wds_peer: set the WDS peer for a WDS interface
- *
* @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting
* functions to adjust rfkill hw state
*
@@ -3514,8 +4131,8 @@ struct cfg80211_update_owe_info {
* The driver should not call cfg80211_sched_scan_stopped() for a requested
* stop (when this method returns 0).
*
- * @mgmt_frame_register: Notify driver that a management frame type was
- * registered. The callback is allowed to sleep.
+ * @update_mgmt_frame_registrations: Notify the driver that management frame
+ * registrations were updated. The callback is allowed to sleep.
*
* @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
* Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
@@ -3639,6 +4256,30 @@ struct cfg80211_update_owe_info {
*
* @probe_mesh_link: Probe direct Mesh peer's link quality by sending data frame
* and overrule HWMP path selection algorithm.
+ * @set_tid_config: TID specific configuration, this can be peer or BSS specific
+ * This callback may sleep.
+ * @reset_tid_config: Reset TID specific configuration for the peer, for the
+ * given TIDs. This callback may sleep.
+ *
+ * @set_sar_specs: Update the SAR (TX power) settings.
+ *
+ * @color_change: Initiate a color change.
+ *
+ * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use
+ * those to decrypt (Re)Association Request and encrypt (Re)Association
+ * Response frame.
+ *
+ * @set_radar_background: Configure dedicated offchannel chain available for
+ * radar/CAC detection on some hw. This chain can't be used to transmit
+ * or receive frames and it is bounded to a running wdev.
+ * Background radar/CAC detection allows to avoid the CAC downtime
+ * switching to a different channel during CAC detection on the selected
+ * radar channel.
+ * The caller is expected to set chandef pointer to NULL in order to
+ * disable background CAC/radar detection.
+ * @add_link_station: Add a link to a station.
+ * @mod_link_station: Modify a link of a station.
+ * @del_link_station: Remove a link of a station.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3657,27 +4298,40 @@ struct cfg80211_ops {
enum nl80211_iftype type,
struct vif_params *params);
+ int (*add_intf_link)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int link_id);
+ void (*del_intf_link)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int link_id);
+
int (*add_key)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params);
int (*get_key)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- void *cookie,
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie, struct key_params*));
int (*del_key)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr);
int (*set_default_key)(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct net_device *netdev, int link_id,
u8 key_index, bool unicast, bool multicast);
int (*set_default_mgmt_key)(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct net_device *netdev, int link_id,
u8 key_index);
+ int (*set_default_beacon_key)(struct wiphy *wiphy,
+ struct net_device *netdev,
+ int link_id,
+ u8 key_index);
int (*start_ap)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings);
int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_beacon_data *info);
- int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev);
+ int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id);
int (*add_station)(struct wiphy *wiphy, struct net_device *dev,
@@ -3773,9 +4427,6 @@ struct cfg80211_ops {
int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
int *dbm);
- int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev,
- const u8 *addr);
-
void (*rfkill_poll)(struct wiphy *wiphy);
#ifdef CONFIG_NL80211_TESTMODE
@@ -3788,6 +4439,7 @@ struct cfg80211_ops {
int (*set_bitrate_mask)(struct wiphy *wiphy,
struct net_device *dev,
+ unsigned int link_id,
const u8 *peer,
const struct cfg80211_bitrate_mask *mask);
@@ -3831,9 +4483,9 @@ struct cfg80211_ops {
struct net_device *dev,
u32 rate, u32 pkts, u32 intvl);
- void (*mgmt_frame_register)(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- u16 frame_type, bool reg);
+ void (*update_mgmt_frame_registrations)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct mgmt_frame_regs *upd);
int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
@@ -3863,6 +4515,7 @@ struct cfg80211_ops {
int (*get_channel)(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef);
int (*start_p2p_device)(struct wiphy *wiphy,
@@ -3899,6 +4552,7 @@ struct cfg80211_ops {
struct cfg80211_qos_map *qos_map);
int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef);
int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
@@ -3945,7 +4599,8 @@ struct cfg80211_ops {
struct net_device *dev,
const u8 *buf, size_t len,
const u8 *dest, const __be16 proto,
- const bool noencrypt);
+ const bool noencrypt, int link_id,
+ u64 *cookie);
int (*get_ftm_responder_stats)(struct wiphy *wiphy,
struct net_device *dev,
@@ -3959,6 +4614,25 @@ struct cfg80211_ops {
struct cfg80211_update_owe_info *owe_info);
int (*probe_mesh_link)(struct wiphy *wiphy, struct net_device *dev,
const u8 *buf, size_t len);
+ int (*set_tid_config)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_tid_config *tid_conf);
+ int (*reset_tid_config)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 tids);
+ int (*set_sar_specs)(struct wiphy *wiphy,
+ struct cfg80211_sar_specs *sar);
+ int (*color_change)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_color_change_settings *params);
+ int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_fils_aad *fils_aad);
+ int (*set_radar_background)(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef);
+ int (*add_link_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_parameters *params);
+ int (*mod_link_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_parameters *params);
+ int (*del_link_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_del_parameters *params);
};
/*
@@ -3969,6 +4643,8 @@ struct cfg80211_ops {
/**
* enum wiphy_flags - wiphy capability flags
*
+ * @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split
+ * into two, first for legacy bands and second for UHB.
* @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
* wiphy at all
* @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
@@ -4007,11 +4683,16 @@ struct cfg80211_ops {
* beaconing mode (AP, IBSS, Mesh, ...).
* @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
* before connection.
+ * @WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK: The device supports bigger kek and kck keys
+ * @WIPHY_FLAG_SUPPORTS_MLO: This is a temporary flag gating the MLO APIs,
+ * in order to not have them reachable in normal drivers, until we have
+ * complete feature/interface combinations/etc. advertisement. No driver
+ * should set this flag for now.
*/
enum wiphy_flags {
- /* use hole at 0 */
- /* use hole at 1 */
- /* use hole at 2 */
+ WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
+ WIPHY_FLAG_SUPPORTS_MLO = BIT(1),
+ WIPHY_FLAG_SPLIT_SCAN_6GHZ = BIT(2),
WIPHY_FLAG_NETNS_OK = BIT(3),
WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
WIPHY_FLAG_4ADDR_AP = BIT(5),
@@ -4331,19 +5012,32 @@ struct wiphy_vendor_command {
* 802.11-2012 8.4.2.29 for the defined fields.
* @extended_capabilities_mask: mask of the valid values
* @extended_capabilities_len: length of the extended capabilities
+ * @eml_capabilities: EML capabilities (for MLO)
+ * @mld_capa_and_ops: MLD capabilities and operations (for MLO)
*/
struct wiphy_iftype_ext_capab {
enum nl80211_iftype iftype;
const u8 *extended_capabilities;
const u8 *extended_capabilities_mask;
u8 extended_capabilities_len;
+ u16 eml_capabilities;
+ u16 mld_capa_and_ops;
};
/**
+ * cfg80211_get_iftype_ext_capa - lookup interface type extended capability
+ * @wiphy: the wiphy to look up from
+ * @type: the interface type to look up
+ */
+const struct wiphy_iftype_ext_capab *
+cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type);
+
+/**
* struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities
* @max_peers: maximum number of peers in a single measurement
* @report_ap_tsf: can report assoc AP's TSF for radio resource measurement
* @randomize_mac_addr: can randomize MAC address for measurement
+ * @ftm: FTM measurement data
* @ftm.supported: FTM measurement is supported
* @ftm.asap: ASAP-mode is supported
* @ftm.non_asap: non-ASAP-mode is supported
@@ -4356,6 +5050,8 @@ struct wiphy_iftype_ext_capab {
* forbid using the value 15 to let the responder pick)
* @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if
* not limited)
+ * @ftm.trigger_based: trigger based ranging measurement is supported
+ * @ftm.non_trigger_based: non trigger based ranging measurement is supported
*/
struct cfg80211_pmsr_capabilities {
unsigned int max_peers;
@@ -4371,24 +5067,50 @@ struct cfg80211_pmsr_capabilities {
asap:1,
non_asap:1,
request_lci:1,
- request_civicloc:1;
+ request_civicloc:1,
+ trigger_based:1,
+ non_trigger_based:1;
} ftm;
};
/**
+ * struct wiphy_iftype_akm_suites - This structure encapsulates supported akm
+ * suites for interface types defined in @iftypes_mask. Each type in the
+ * @iftypes_mask must be unique across all instances of iftype_akm_suites.
+ *
+ * @iftypes_mask: bitmask of interfaces types
+ * @akm_suites: points to an array of supported akm suites
+ * @n_akm_suites: number of supported AKM suites
+ */
+struct wiphy_iftype_akm_suites {
+ u16 iftypes_mask;
+ const u32 *akm_suites;
+ int n_akm_suites;
+};
+
+/**
* struct wiphy - wireless hardware description
+ * @mtx: mutex for the data (structures) of this device
* @reg_notifier: the driver's regulatory notification callback,
* note that if your driver uses wiphy_apply_custom_regulatory()
* the reg_notifier's request can be passed as NULL
* @regd: the driver's regulatory domain, if one was requested via
- * the regulatory_hint() API. This can be used by the driver
+ * the regulatory_hint() API. This can be used by the driver
* on the reg_notifier() if it chooses to ignore future
* regulatory domain changes caused by other drivers.
* @signal_type: signal type reported in &struct cfg80211_bss.
* @cipher_suites: supported cipher suites
* @n_cipher_suites: number of supported cipher suites
- * @akm_suites: supported AKM suites
+ * @akm_suites: supported AKM suites. These are the default AKMs supported if
+ * the supported AKMs not advertized for a specific interface type in
+ * iftype_akm_suites.
* @n_akm_suites: number of supported AKM suites
+ * @iftype_akm_suites: array of supported akm suites info per interface type.
+ * Note that the bits in @iftypes_mask inside this structure cannot
+ * overlap (i.e. only one occurrence of each type is allowed across all
+ * instances of iftype_akm_suites).
+ * @num_iftype_akm_suites: number of interface types for which supported akm
+ * suites are specified separately.
* @retry_short: Retry limit for short frames (dot11ShortRetryLimit)
* @retry_long: Retry limit for long frames (dot11LongRetryLimit)
* @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold);
@@ -4409,10 +5131,11 @@ struct cfg80211_pmsr_capabilities {
* the same number of arbitrary MAC addresses.
* @registered: protects ->resume and ->suspend sysfs callbacks against
* unregister hardware
- * @debugfsdir: debugfs directory used for this wiphy, will be renamed
- * automatically on wiphy renames
- * @dev: (virtual) struct device for this wiphy
- * @registered: helps synchronize suspend/resume with wiphy unregister
+ * @debugfsdir: debugfs directory used for this wiphy (ieee80211/<wiphyname>).
+ * It will be renamed automatically on wiphy renames
+ * @dev: (virtual) struct device for this wiphy. The item in
+ * /sys/class/ieee80211/ points to this. You need use set_wiphy_dev()
+ * (see below).
* @wext: wireless extension handlers
* @priv: driver private data (sized according to wiphy_new() parameter)
* @interface_modes: bitmask of interfaces types valid for this wiphy,
@@ -4523,12 +5246,6 @@ struct cfg80211_pmsr_capabilities {
* and probe responses. This value should be set if the driver
* wishes to limit the number of csa counters. Default (0) means
* infinite.
- * @max_adj_channel_rssi_comp: max offset of between the channel on which the
- * frame was sent and the channel on which the frame was heard for which
- * the reported rssi is still valid. If a driver is able to compensate the
- * low rssi when a frame is heard on different channel, then it should set
- * this variable to the maximal offset for which it can compensate.
- * This value should be set in MHz.
* @bss_select_support: bitmask indicating the BSS selection criteria supported
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
@@ -4542,17 +5259,50 @@ struct cfg80211_pmsr_capabilities {
* @txq_memory_limit: configuration internal TX queue memory limit
* @txq_quantum: configuration of internal TX queue scheduler quantum
*
+ * @tx_queue_len: allow setting transmit queue len for drivers not using
+ * wake_tx_queue
+ *
* @support_mbssid: can HW support association with nontransmitted AP
* @support_only_he_mbssid: don't parse MBSSID elements if it is not
* HE AP, in order to avoid compatibility issues.
* @support_mbssid must be set for this to have any effect.
*
* @pmsr_capa: peer measurement capabilities
+ *
+ * @tid_config_support: describes the per-TID config support that the
+ * device has
+ * @tid_config_support.vif: bitmap of attributes (configurations)
+ * supported by the driver for each vif
+ * @tid_config_support.peer: bitmap of attributes (configurations)
+ * supported by the driver for each peer
+ * @tid_config_support.max_retry: maximum supported retry count for
+ * long/short retry configuration
+ *
+ * @max_data_retry_count: maximum supported per TID retry count for
+ * configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
+ * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
+ * @sar_capa: SAR control capabilities
+ * @rfkill: a pointer to the rfkill structure
+ *
+ * @mbssid_max_interfaces: maximum number of interfaces supported by the driver
+ * in a multiple BSSID set. This field must be set to a non-zero value
+ * by the driver to advertise MBSSID support.
+ * @ema_max_profile_periodicity: maximum profile periodicity supported by
+ * the driver. Setting this field to a non-zero value indicates that the
+ * driver supports enhanced multi-BSSID advertisements (EMA AP).
+ * @max_num_akm_suites: maximum number of AKM suites allowed for
+ * configuration through %NL80211_CMD_CONNECT, %NL80211_CMD_ASSOCIATE and
+ * %NL80211_CMD_START_AP. Set to NL80211_MAX_NR_AKM_SUITES if not set by
+ * driver. If set by driver minimum allowed value is
+ * NL80211_MAX_NR_AKM_SUITES in order to avoid compatibility issues with
+ * legacy userspace and maximum allowed value is
+ * CFG80211_MAX_NUM_AKM_SUITES.
*/
struct wiphy {
+ struct mutex mtx;
+
/* assign these fields before you register the wiphy */
- /* permanent MAC address(es) */
u8 perm_addr[ETH_ALEN];
u8 addr_mask[ETH_ALEN];
@@ -4595,6 +5345,9 @@ struct wiphy {
int n_akm_suites;
const u32 *akm_suites;
+ const struct wiphy_iftype_akm_suites *iftype_akm_suites;
+ unsigned int num_iftype_akm_suites;
+
u8 retry_short;
u8 retry_long;
u32 frag_threshold;
@@ -4616,11 +5369,6 @@ struct wiphy {
u32 available_antennas_tx;
u32 available_antennas_rx;
- /*
- * Bitmap of supported protocols for probe response offloading
- * see &enum nl80211_probe_resp_offload_support_attr. Only valid
- * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
- */
u32 probe_resp_offload;
const u8 *extended_capabilities, *extended_capabilities_mask;
@@ -4629,16 +5377,10 @@ struct wiphy {
const struct wiphy_iftype_ext_capab *iftype_ext_capab;
unsigned int num_iftype_ext_capab;
- /* If multiple wiphys are registered and you're handed e.g.
- * a regular netdev with assigned ieee80211_ptr, you won't
- * know whether it points to a wiphy your driver has registered
- * or not. Assign this to something global to your driver to
- * help determine whether you own this wiphy or not. */
const void *privid;
struct ieee80211_supported_band *bands[NUM_NL80211_BANDS];
- /* Lets us get back the wiphy on the callback */
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request);
@@ -4646,14 +5388,10 @@ struct wiphy {
const struct ieee80211_regdomain __rcu *regd;
- /* the item in /sys/class/ieee80211/ points to this,
- * you need use set_wiphy_dev() (see below) */
struct device dev;
- /* protects ->resume, ->suspend sysfs callbacks against unregister hw */
bool registered;
- /* dir in debugfs: ieee80211/<wiphyname> */
struct dentry *debugfsdir;
const struct ieee80211_ht_cap *ht_capa_mod_mask;
@@ -4661,7 +5399,6 @@ struct wiphy {
struct list_head wdev_list;
- /* the network namespace this phy lives in currently */
possible_net_t _net;
#ifdef CONFIG_CFG80211_WEXT
@@ -4677,7 +5414,6 @@ struct wiphy {
u16 max_ap_assoc_sta;
u8 max_num_csa_counters;
- u8 max_adj_channel_rssi_comp;
u32 bss_select_support;
@@ -4687,12 +5423,29 @@ struct wiphy {
u32 txq_memory_limit;
u32 txq_quantum;
+ unsigned long tx_queue_len;
+
u8 support_mbssid:1,
support_only_he_mbssid:1;
const struct cfg80211_pmsr_capabilities *pmsr_capa;
- char priv[0] __aligned(NETDEV_ALIGN);
+ struct {
+ u64 peer, vif;
+ u8 max_retry;
+ } tid_config_support;
+
+ u8 max_data_retry_count;
+
+ const struct cfg80211_sar_capa *sar_capa;
+
+ struct rfkill *rfkill;
+
+ u8 mbssid_max_interfaces;
+ u8 ema_max_profile_periodicity;
+ u16 max_num_akm_suites;
+
+ char priv[] __aligned(NETDEV_ALIGN);
};
static inline struct net *wiphy_net(struct wiphy *wiphy)
@@ -4806,6 +5559,37 @@ static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops,
*/
int wiphy_register(struct wiphy *wiphy);
+/* this is a define for better error reporting (file/line) */
+#define lockdep_assert_wiphy(wiphy) lockdep_assert_held(&(wiphy)->mtx)
+
+/**
+ * rcu_dereference_wiphy - rcu_dereference with debug checking
+ * @wiphy: the wiphy to check the locking on
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or RTNL. Note: Please prefer wiphy_dereference() or rcu_dereference().
+ */
+#define rcu_dereference_wiphy(wiphy, p) \
+ rcu_dereference_check(p, lockdep_is_held(&wiphy->mtx))
+
+/**
+ * wiphy_dereference - fetch RCU pointer when updates are prevented by wiphy mtx
+ * @wiphy: the wiphy to check the locking on
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit the
+ * READ_ONCE(), because caller holds the wiphy mutex used for updates.
+ */
+#define wiphy_dereference(wiphy, p) \
+ rcu_dereference_protected(p, lockdep_is_held(&wiphy->mtx))
+
+/**
+ * get_wiphy_regdom - get custom regdomain for the given wiphy
+ * @wiphy: the wiphy to get the regdomain from
+ */
+const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy);
+
/**
* wiphy_unregister - deregister a wiphy from cfg80211
*
@@ -4831,13 +5615,45 @@ struct cfg80211_cached_keys;
struct cfg80211_cqm_config;
/**
+ * wiphy_lock - lock the wiphy
+ * @wiphy: the wiphy to lock
+ *
+ * This is mostly exposed so it can be done around registering and
+ * unregistering netdevs that aren't created through cfg80211 calls,
+ * since that requires locking in cfg80211 when the notifiers is
+ * called, but that cannot differentiate which way it's called.
+ *
+ * When cfg80211 ops are called, the wiphy is already locked.
+ */
+static inline void wiphy_lock(struct wiphy *wiphy)
+ __acquires(&wiphy->mtx)
+{
+ mutex_lock(&wiphy->mtx);
+ __acquire(&wiphy->mtx);
+}
+
+/**
+ * wiphy_unlock - unlock the wiphy again
+ * @wiphy: the wiphy to unlock
+ */
+static inline void wiphy_unlock(struct wiphy *wiphy)
+ __releases(&wiphy->mtx)
+{
+ __release(&wiphy->mtx);
+ mutex_unlock(&wiphy->mtx);
+}
+
+/**
* struct wireless_dev - wireless device state
*
* For netdevs, this structure must be allocated by the driver
* that uses the ieee80211_ptr field in struct net_device (this
* is intentional so it can be allocated along with the netdev.)
* It need not be registered then as netdev registration will
- * be intercepted by cfg80211 to see the new wireless device.
+ * be intercepted by cfg80211 to see the new wireless device,
+ * however, drivers must lock the wiphy before registering or
+ * unregistering netdevs if they pre-create any netdevs (in ops
+ * called from cfg80211, the wiphy is already locked.)
*
* For non-netdev uses, it must also be allocated by the driver
* in response to the cfg80211 callbacks that require it, as
@@ -4846,20 +5662,16 @@ struct cfg80211_cqm_config;
*
* @wiphy: pointer to hardware description
* @iftype: interface type
+ * @registered: is this wdev already registered with cfg80211
+ * @registering: indicates we're doing registration under wiphy lock
+ * for the notifier
* @list: (private) Used to collect the interfaces
* @netdev: (private) Used to reference back to the netdev, may be %NULL
* @identifier: (private) Identifier used in nl80211 to identify this
* wireless device if it has no netdev
- * @current_bss: (private) Used by the internal configuration code
- * @chandef: (private) Used by the internal configuration code to track
- * the user-set channel definition.
- * @preset_chandef: (private) Used by the internal configuration code to
- * track the channel to be used for AP later
+ * @u: union containing data specific to @iftype
+ * @connected: indicates if connected or not (STA mode)
* @bssid: (private) Used by the internal configuration code
- * @ssid: (private) Used by the internal configuration code
- * @ssid_len: (private) Used by the internal configuration code
- * @mesh_id_len: (private) Used by the internal configuration code
- * @mesh_id_up_len: (private) Used by the internal configuration code
* @wext: (private) Used by the internal wireless extensions compat code
* @wext.ibss: (private) IBSS data part of wext handling
* @wext.connect: (private) connection handling data
@@ -4877,7 +5689,8 @@ struct cfg80211_cqm_config;
* netdev and may otherwise be used by driver read-only, will be update
* by cfg80211 on change_interface
* @mgmt_registrations: list of registrations for management frames
- * @mgmt_registrations_lock: lock for the list
+ * @mgmt_registrations_need_update: mgmt registrations were updated,
+ * need to propagate the update to the driver
* @mtx: mutex used to lock data in this struct, may be used by drivers
* and some API functions require it held
* @beacon_interval: beacon interval used on this device for transmitting
@@ -4898,8 +5711,6 @@ struct cfg80211_cqm_config;
* @conn_owner_nlportid: (private) connection owner socket port ID
* @disconnect_wk: (private) auto-disconnect work
* @disconnect_bssid: (private) the BSSID to use for auto-disconnect
- * @ibss_fixed: (private) IBSS is using fixed BSSID
- * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
* @event_list: (private) list for internal event processing
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
@@ -4908,6 +5719,11 @@ struct cfg80211_cqm_config;
* @pmsr_list: (private) peer measurement requests
* @pmsr_lock: (private) peer measurements requests/results lock
* @pmsr_free_wk: (private) peer measurements cleanup work
+ * @unprot_beacon_reported: (private) timestamp of last
+ * unprotected beacon report
+ * @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr
+ * @ap and @client for each link
+ * @valid_links: bitmap describing what elements of @links are valid
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -4920,17 +5736,15 @@ struct wireless_dev {
u32 identifier;
struct list_head mgmt_registrations;
- spinlock_t mgmt_registrations_lock;
+ u8 mgmt_registrations_need_update:1;
struct mutex mtx;
- bool use_4addr, is_running;
+ bool use_4addr, is_running, registered, registering;
u8 address[ETH_ALEN] __aligned(sizeof(u16));
/* currently used for IBSS and SME - might be rearranged later */
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len, mesh_id_len, mesh_id_up_len;
struct cfg80211_conn *conn;
struct cfg80211_cached_keys *connect_keys;
enum ieee80211_bss_type conn_bss_type;
@@ -4942,23 +5756,17 @@ struct wireless_dev {
struct list_head event_list;
spinlock_t event_lock;
- struct cfg80211_internal_bss *current_bss; /* associated / joined */
- struct cfg80211_chan_def preset_chandef;
- struct cfg80211_chan_def chandef;
-
- bool ibss_fixed;
- bool ibss_dfs_possible;
+ u8 connected:1;
bool ps;
int ps_timeout;
- int beacon_interval;
-
u32 ap_unexpected_nlportid;
u32 owner_nlportid;
bool nl_owner_dead;
+ /* FIXME: need to rework radar detection for MLO */
bool cac_started;
unsigned long cac_start_time;
unsigned int cac_time_ms;
@@ -4984,9 +5792,55 @@ struct wireless_dev {
struct list_head pmsr_list;
spinlock_t pmsr_lock;
struct work_struct pmsr_free_wk;
+
+ unsigned long unprot_beacon_reported;
+
+ union {
+ struct {
+ u8 connected_addr[ETH_ALEN] __aligned(2);
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ } client;
+ struct {
+ int beacon_interval;
+ struct cfg80211_chan_def preset_chandef;
+ struct cfg80211_chan_def chandef;
+ u8 id[IEEE80211_MAX_SSID_LEN];
+ u8 id_len, id_up_len;
+ } mesh;
+ struct {
+ struct cfg80211_chan_def preset_chandef;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ } ap;
+ struct {
+ struct cfg80211_internal_bss *current_bss;
+ struct cfg80211_chan_def chandef;
+ int beacon_interval;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ } ibss;
+ struct {
+ struct cfg80211_chan_def chandef;
+ } ocb;
+ } u;
+
+ struct {
+ u8 addr[ETH_ALEN] __aligned(2);
+ union {
+ struct {
+ unsigned int beacon_interval;
+ struct cfg80211_chan_def chandef;
+ } ap;
+ struct {
+ struct cfg80211_internal_bss *current_bss;
+ } client;
+ };
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+ u16 valid_links;
};
-static inline u8 *wdev_address(struct wireless_dev *wdev)
+static inline const u8 *wdev_address(struct wireless_dev *wdev)
{
if (wdev->netdev)
return wdev->netdev->dev_addr;
@@ -5013,35 +5867,148 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
}
/**
+ * wdev_chandef - return chandef pointer from wireless_dev
+ * @wdev: the wdev
+ * @link_id: the link ID for MLO
+ *
+ * Return: The chandef depending on the mode, or %NULL.
+ */
+struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
+ unsigned int link_id);
+
+static inline void WARN_INVALID_LINK_ID(struct wireless_dev *wdev,
+ unsigned int link_id)
+{
+ WARN_ON(link_id && !wdev->valid_links);
+ WARN_ON(wdev->valid_links &&
+ !(wdev->valid_links & BIT(link_id)));
+}
+
+#define for_each_valid_link(link_info, link_id) \
+ for (link_id = 0; \
+ link_id < ((link_info)->valid_links ? \
+ ARRAY_SIZE((link_info)->links) : 1); \
+ link_id++) \
+ if (!(link_info)->valid_links || \
+ ((link_info)->valid_links & BIT(link_id)))
+
+/**
* DOC: Utility functions
*
* cfg80211 offers a number of utility functions that can be useful.
*/
/**
+ * ieee80211_channel_equal - compare two struct ieee80211_channel
+ *
+ * @a: 1st struct ieee80211_channel
+ * @b: 2nd struct ieee80211_channel
+ * Return: true if center frequency of @a == @b
+ */
+static inline bool
+ieee80211_channel_equal(struct ieee80211_channel *a,
+ struct ieee80211_channel *b)
+{
+ return (a->center_freq == b->center_freq &&
+ a->freq_offset == b->freq_offset);
+}
+
+/**
+ * ieee80211_channel_to_khz - convert ieee80211_channel to frequency in KHz
+ * @chan: struct ieee80211_channel to convert
+ * Return: The corresponding frequency (in KHz)
+ */
+static inline u32
+ieee80211_channel_to_khz(const struct ieee80211_channel *chan)
+{
+ return MHZ_TO_KHZ(chan->center_freq) + chan->freq_offset;
+}
+
+/**
+ * ieee80211_s1g_channel_width - get allowed channel width from @chan
+ *
+ * Only allowed for band NL80211_BAND_S1GHZ
+ * @chan: channel
+ * Return: The allowed channel width for this center_freq
+ */
+enum nl80211_chan_width
+ieee80211_s1g_channel_width(const struct ieee80211_channel *chan);
+
+/**
+ * ieee80211_channel_to_freq_khz - convert channel number to frequency
+ * @chan: channel number
+ * @band: band, necessary due to channel number overlap
+ * Return: The corresponding frequency (in KHz), or 0 if the conversion failed.
+ */
+u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band);
+
+/**
* ieee80211_channel_to_frequency - convert channel number to frequency
* @chan: channel number
* @band: band, necessary due to channel number overlap
* Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
*/
-int ieee80211_channel_to_frequency(int chan, enum nl80211_band band);
+static inline int
+ieee80211_channel_to_frequency(int chan, enum nl80211_band band)
+{
+ return KHZ_TO_MHZ(ieee80211_channel_to_freq_khz(chan, band));
+}
+
+/**
+ * ieee80211_freq_khz_to_channel - convert frequency to channel number
+ * @freq: center frequency in KHz
+ * Return: The corresponding channel, or 0 if the conversion failed.
+ */
+int ieee80211_freq_khz_to_channel(u32 freq);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
- * @freq: center frequency
+ * @freq: center frequency in MHz
* Return: The corresponding channel, or 0 if the conversion failed.
*/
-int ieee80211_frequency_to_channel(int freq);
+static inline int
+ieee80211_frequency_to_channel(int freq)
+{
+ return ieee80211_freq_khz_to_channel(MHZ_TO_KHZ(freq));
+}
+
+/**
+ * ieee80211_get_channel_khz - get channel struct from wiphy for specified
+ * frequency
+ * @wiphy: the struct wiphy to get the channel for
+ * @freq: the center frequency (in KHz) of the channel
+ * Return: The channel struct from @wiphy at @freq.
+ */
+struct ieee80211_channel *
+ieee80211_get_channel_khz(struct wiphy *wiphy, u32 freq);
/**
* ieee80211_get_channel - get channel struct from wiphy for specified frequency
*
* @wiphy: the struct wiphy to get the channel for
- * @freq: the center frequency of the channel
- *
+ * @freq: the center frequency (in MHz) of the channel
* Return: The channel struct from @wiphy at @freq.
*/
-struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq);
+static inline struct ieee80211_channel *
+ieee80211_get_channel(struct wiphy *wiphy, int freq)
+{
+ return ieee80211_get_channel_khz(wiphy, MHZ_TO_KHZ(freq));
+}
+
+/**
+ * cfg80211_channel_is_psc - Check if the channel is a 6 GHz PSC
+ * @chan: control channel to check
+ *
+ * The Preferred Scanning Channels (PSC) are defined in
+ * Draft IEEE P802.11ax/D5.0, 26.17.2.3.3
+ */
+static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan)
+{
+ if (chan->band != NL80211_BAND_6GHZ)
+ return false;
+
+ return ieee80211_frequency_to_channel(chan->center_freq) % 16 == 5;
+}
/**
* ieee80211_get_response_rate - get basic rate for a given rate
@@ -5055,7 +6022,7 @@ struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq);
* which is, for this function, given as a bitmap of indices of
* rates in the band's bitrate table.
*/
-struct ieee80211_rate *
+const struct ieee80211_rate *
ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
u32 basic_rates, int bitrate);
@@ -5074,7 +6041,7 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
* Radiotap parsing functions -- for controlled injection support
*
* Implemented in net/wireless/radiotap.c
- * Documentation in Documentation/networking/radiotap-headers.txt
+ * Documentation in Documentation/networking/radiotap-headers.rst
*/
struct radiotap_align_size {
@@ -5201,11 +6168,12 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
* @addr: the device MAC address
* @iftype: the virtual interface type
* @data_offset: offset of payload after the 802.11 header
+ * @is_amsdu: true if the 802.11 header is A-MSDU
* Return: 0 on success. Non-zero on error.
*/
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
- u8 data_offset);
+ u8 data_offset, bool is_amsdu);
/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
@@ -5217,7 +6185,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
- return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
}
/**
@@ -5229,7 +6197,7 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
*
* @skb: The input A-MSDU frame without any headers.
* @list: The output list of 802.3 frames. It must be allocated and
- * initialized by by the caller.
+ * initialized by the caller.
* @addr: The device MAC address.
* @iftype: The device interface type.
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
@@ -5313,9 +6281,9 @@ cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len,
(!match_len && match_offset)))
return NULL;
- return (void *)cfg80211_find_elem_match(eid, ies, len,
- match, match_len,
- match_offset ?
+ return (const void *)cfg80211_find_elem_match(eid, ies, len,
+ match, match_len,
+ match_offset ?
match_offset - 2 : 0);
}
@@ -5442,7 +6410,7 @@ static inline const u8 *
cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, unsigned int len)
{
- return (void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
+ return (const void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
}
/**
@@ -5467,9 +6435,9 @@ void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr);
* @wiphy: the wireless device giving the hint (used only for reporting
* conflicts)
* @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
- * should be in. If @rd is set this should be NULL. Note that if you
- * set this to NULL you should still set rd->alpha2 to some accepted
- * alpha2.
+ * should be in. If @rd is set this should be NULL. Note that if you
+ * set this to NULL you should still set rd->alpha2 to some accepted
+ * alpha2.
*
* Wireless drivers can use this function to hint to the wireless core
* what it believes should be the current regulatory domain by
@@ -5504,18 +6472,18 @@ int regulatory_set_wiphy_regd(struct wiphy *wiphy,
struct ieee80211_regdomain *rd);
/**
- * regulatory_set_wiphy_regd_sync_rtnl - set regdom for self-managed drivers
+ * regulatory_set_wiphy_regd_sync - set regdom for self-managed drivers
* @wiphy: the wireless device we want to process the regulatory domain on
* @rd: the regulatory domain information to use for this wiphy
*
- * This functions requires the RTNL to be held and applies the new regdomain
- * synchronously to this wiphy. For more details see
- * regulatory_set_wiphy_regd().
+ * This functions requires the RTNL and the wiphy mutex to be held and
+ * applies the new regdomain synchronously to this wiphy. For more details
+ * see regulatory_set_wiphy_regd().
*
* Return: 0 on success. -EINVAL, -EPERM
*/
-int regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy,
- struct ieee80211_regdomain *rd);
+int regulatory_set_wiphy_regd_sync(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd);
/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
@@ -5633,7 +6601,7 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid);
void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid);
/**
- * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
+ * cfg80211_sched_scan_stopped_locked - notify that the scheduled scan has stopped
*
* @wiphy: the wiphy on which the scheduled scan stopped
* @reqid: identifier for the related scheduled scan request
@@ -5641,9 +6609,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid);
* The driver can call this function to inform cfg80211 that the
* scheduled scan had to be stopped, for whatever reason. The driver
* is then called back via the sched_scan_stop operation when done.
- * This function should be called with rtnl locked.
+ * This function should be called with the wiphy mutex held.
*/
-void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy, u64 reqid);
+void cfg80211_sched_scan_stopped_locked(struct wiphy *wiphy, u64 reqid);
/**
* cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame
@@ -5753,6 +6721,19 @@ enum cfg80211_bss_frame_type {
};
/**
+ * cfg80211_get_ies_channel_number - returns the channel number from ies
+ * @ie: IEs
+ * @ielen: length of IEs
+ * @band: enum nl80211_band of the channel
+ * @ftype: frame type
+ *
+ * Returns the channel number, or -1 if none could be determined.
+ */
+int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band,
+ enum cfg80211_bss_frame_type ftype);
+
+/**
* cfg80211_inform_bss_data - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
@@ -5940,16 +6921,36 @@ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
/**
- * cfg80211_rx_assoc_resp - notification of processed association response
- * @dev: network device
+ * struct cfg80211_rx_assoc_resp - association response data
* @bss: the BSS that association was requested with, ownership of the pointer
- * moves to cfg80211 in this call
+ * moves to cfg80211 in the call to cfg80211_rx_assoc_resp()
* @buf: (Re)Association Response frame (header + body)
* @len: length of the frame data
* @uapsd_queues: bitmap of queues configured for uapsd. Same format
* as the AC bitmap in the QoS info field
* @req_ies: information elements from the (Re)Association Request frame
* @req_ies_len: length of req_ies data
+ * @ap_mld_addr: AP MLD address (in case of MLO)
+ * @links: per-link information indexed by link ID, use links[0] for
+ * non-MLO connections
+ */
+struct cfg80211_rx_assoc_resp {
+ const u8 *buf;
+ size_t len;
+ const u8 *req_ies;
+ size_t req_ies_len;
+ int uapsd_queues;
+ const u8 *ap_mld_addr;
+ struct {
+ const u8 *addr;
+ struct cfg80211_bss *bss;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+/**
+ * cfg80211_rx_assoc_resp - notification of processed association response
+ * @dev: network device
+ * @data: association response data, &struct cfg80211_rx_assoc_resp
*
* After being asked to associate via cfg80211_ops::assoc() the driver must
* call either this function or cfg80211_auth_timeout().
@@ -5957,53 +6958,61 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
* This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
void cfg80211_rx_assoc_resp(struct net_device *dev,
- struct cfg80211_bss *bss,
- const u8 *buf, size_t len,
- int uapsd_queues,
- const u8 *req_ies, size_t req_ies_len);
+ struct cfg80211_rx_assoc_resp *data);
/**
- * cfg80211_assoc_timeout - notification of timed out association
- * @dev: network device
- * @bss: The BSS entry with which association timed out.
- *
- * This function may sleep. The caller must hold the corresponding wdev's mutex.
+ * struct cfg80211_assoc_failure - association failure data
+ * @ap_mld_addr: AP MLD address, or %NULL
+ * @bss: list of BSSes, must use entry 0 for non-MLO connections
+ * (@ap_mld_addr is %NULL)
+ * @timeout: indicates the association failed due to timeout, otherwise
+ * the association was abandoned for a reason reported through some
+ * other API (e.g. deauth RX)
*/
-void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss);
+struct cfg80211_assoc_failure {
+ const u8 *ap_mld_addr;
+ struct cfg80211_bss *bss[IEEE80211_MLD_MAX_NUM_LINKS];
+ bool timeout;
+};
/**
- * cfg80211_abandon_assoc - notify cfg80211 of abandoned association attempt
+ * cfg80211_assoc_failure - notification of association failure
* @dev: network device
- * @bss: The BSS entry with which association was abandoned.
+ * @data: data describing the association failure
*
- * Call this whenever - for reasons reported through other API, like deauth RX,
- * an association attempt was abandoned.
* This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
-void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss);
+void cfg80211_assoc_failure(struct net_device *dev,
+ struct cfg80211_assoc_failure *data);
/**
* cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame
* @dev: network device
* @buf: 802.11 frame (header + body)
* @len: length of the frame data
+ * @reconnect: immediate reconnect is desired (include the nl80211 attribute)
*
* This function is called whenever deauthentication has been processed in
* station mode. This includes both received deauthentication frames and
* locally generated ones. This function may sleep. The caller must hold the
* corresponding wdev's mutex.
*/
-void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
+void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len,
+ bool reconnect);
/**
* cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame
* @dev: network device
- * @buf: deauthentication frame (header + body)
+ * @buf: received management frame (header + body)
* @len: length of the frame data
*
* This function is called whenever a received deauthentication or dissassoc
* frame has been dropped in station mode because of MFP being used but the
- * frame was not protected. This function may sleep.
+ * frame was not protected. This is also used to notify reception of a Beacon
+ * frame that was dropped because it did not include a valid MME MIC while
+ * beacon protection was enabled (BIGTK configured in station mode).
+ *
+ * This function may sleep.
*/
void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev,
const u8 *buf, size_t len);
@@ -6044,12 +7053,14 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
struct ieee80211_channel *channel, gfp_t gfp);
/**
- * cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate
+ * cfg80211_notify_new_peer_candidate - notify cfg80211 of a new mesh peer
+ * candidate
*
* @dev: network device
* @macaddr: the MAC address of the new candidate
* @ie: information elements advertised by the peer candidate
* @ie_len: length of the information elements buffer
+ * @sig_dbm: signal level in dBm
* @gfp: allocation flags
*
* This function notifies cfg80211 that the mesh peer candidate has been
@@ -6075,11 +7086,19 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev,
*/
/**
- * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state
+ * wiphy_rfkill_set_hw_state_reason - notify cfg80211 about hw block state
* @wiphy: the wiphy
* @blocked: block status
+ * @reason: one of reasons in &enum rfkill_hard_block_reasons
*/
-void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked);
+void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
+ enum rfkill_hard_block_reasons reason);
+
+static inline void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
+{
+ wiphy_rfkill_set_hw_state_reason(wiphy, blocked,
+ RFKILL_HARD_BLOCK_SIGNAL);
+}
/**
* wiphy_rfkill_start_polling - start polling rfkill
@@ -6091,7 +7110,10 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy);
* wiphy_rfkill_stop_polling - stop polling rfkill
* @wiphy: the wiphy
*/
-void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
+static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
+{
+ rfkill_pause_polling(wiphy->rfkill);
+}
/**
* DOC: Vendor commands
@@ -6173,7 +7195,7 @@ cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
/**
- * cfg80211_vendor_cmd_get_sender
+ * cfg80211_vendor_cmd_get_sender - get the current sender netlink ID
* @wiphy: the wiphy
*
* Return the current netlink port ID in a vendor command handler.
@@ -6403,13 +7425,6 @@ struct cfg80211_fils_resp_params {
* indicate that this is a failure, but without a status code.
* @timeout_reason is used to report the reason for the timeout in that
* case.
- * @bssid: The BSSID of the AP (may be %NULL)
- * @bss: Entry of bss to which STA got connected to, can be obtained through
- * cfg80211_get_bss() (may be %NULL). But it is recommended to store the
- * bss from the connect_request and hold a reference to it and return
- * through this param to avoid a warning if the bss is expired during the
- * connection, esp. for those drivers implementing connect op.
- * Only one parameter among @bssid and @bss needs to be specified.
* @req_ie: Association request IEs (may be %NULL)
* @req_ie_len: Association request IEs length
* @resp_ie: Association response IEs (may be %NULL)
@@ -6421,17 +7436,41 @@ struct cfg80211_fils_resp_params {
* not known. This value is used only if @status < 0 to indicate that the
* failure is due to a timeout and not due to explicit rejection by the AP.
* This value is ignored in other cases (@status >= 0).
+ * @valid_links: For MLO connection, BIT mask of the valid link ids. Otherwise
+ * zero.
+ * @ap_mld_addr: For MLO connection, MLD address of the AP. Otherwise %NULL.
+ * @links : For MLO connection, contains link info for the valid links indicated
+ * using @valid_links. For non-MLO connection, links[0] contains the
+ * connected AP info.
+ * @links.addr: For MLO connection, MAC address of the STA link. Otherwise
+ * %NULL.
+ * @links.bssid: For MLO connection, MAC address of the AP link. For non-MLO
+ * connection, links[0].bssid points to the BSSID of the AP (may be %NULL).
+ * @links.bss: For MLO connection, entry of bss to which STA link is connected.
+ * For non-MLO connection, links[0].bss points to entry of bss to which STA
+ * is connected. It can be obtained through cfg80211_get_bss() (may be
+ * %NULL). It is recommended to store the bss from the connect_request and
+ * hold a reference to it and return through this param to avoid a warning
+ * if the bss is expired during the connection, esp. for those drivers
+ * implementing connect op. Only one parameter among @bssid and @bss needs
+ * to be specified.
*/
struct cfg80211_connect_resp_params {
int status;
- const u8 *bssid;
- struct cfg80211_bss *bss;
const u8 *req_ie;
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
struct cfg80211_fils_resp_params fils;
enum nl80211_timeout_reason timeout_reason;
+
+ const u8 *ap_mld_addr;
+ u16 valid_links;
+ struct {
+ const u8 *addr;
+ const u8 *bssid;
+ struct cfg80211_bss *bss;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
@@ -6501,8 +7540,8 @@ cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
memset(&params, 0, sizeof(params));
params.status = status;
- params.bssid = bssid;
- params.bss = bss;
+ params.links[0].bssid = bssid;
+ params.links[0].bss = bss;
params.req_ie = req_ie;
params.req_ie_len = req_ie_len;
params.resp_ie = resp_ie;
@@ -6573,24 +7612,40 @@ cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
/**
* struct cfg80211_roam_info - driver initiated roaming information
*
- * @channel: the channel of the new AP
- * @bss: entry of bss to which STA got roamed (may be %NULL if %bssid is set)
- * @bssid: the BSSID of the new AP (may be %NULL if %bss is set)
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
* @fils: FILS related roaming information.
+ * @valid_links: For MLO roaming, BIT mask of the new valid links is set.
+ * Otherwise zero.
+ * @ap_mld_addr: For MLO roaming, MLD address of the new AP. Otherwise %NULL.
+ * @links : For MLO roaming, contains new link info for the valid links set in
+ * @valid_links. For non-MLO roaming, links[0] contains the new AP info.
+ * @links.addr: For MLO roaming, MAC address of the STA link. Otherwise %NULL.
+ * @links.bssid: For MLO roaming, MAC address of the new AP link. For non-MLO
+ * roaming, links[0].bssid points to the BSSID of the new AP. May be
+ * %NULL if %links.bss is set.
+ * @links.channel: the channel of the new AP.
+ * @links.bss: For MLO roaming, entry of new bss to which STA link got
+ * roamed. For non-MLO roaming, links[0].bss points to entry of bss to
+ * which STA got roamed (may be %NULL if %links.bssid is set)
*/
struct cfg80211_roam_info {
- struct ieee80211_channel *channel;
- struct cfg80211_bss *bss;
- const u8 *bssid;
const u8 *req_ie;
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
struct cfg80211_fils_resp_params fils;
+
+ const u8 *ap_mld_addr;
+ u16 valid_links;
+ struct {
+ const u8 *addr;
+ const u8 *bssid;
+ struct ieee80211_channel *channel;
+ struct cfg80211_bss *bss;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
@@ -6757,6 +7812,80 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
gfp_t gfp);
/**
+ * struct cfg80211_rx_info - received management frame info
+ *
+ * @freq: Frequency on which the frame was received in kHz
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
+ * @have_link_id: indicates the frame was received on a link of
+ * an MLD, i.e. the @link_id field is valid
+ * @link_id: the ID of the link the frame was received on
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @flags: flags, as defined in enum nl80211_rxmgmt_flags
+ * @rx_tstamp: Hardware timestamp of frame RX in nanoseconds
+ * @ack_tstamp: Hardware timestamp of ack TX in nanoseconds
+ */
+struct cfg80211_rx_info {
+ int freq;
+ int sig_dbm;
+ bool have_link_id;
+ u8 link_id;
+ const u8 *buf;
+ size_t len;
+ u32 flags;
+ u64 rx_tstamp;
+ u64 ack_tstamp;
+};
+
+/**
+ * cfg80211_rx_mgmt_ext - management frame notification with extended info
+ * @wdev: wireless device receiving the frame
+ * @info: RX info as defined in struct cfg80211_rx_info
+ *
+ * This function is called whenever an Action frame is received for a station
+ * mode interface, but is not processed in kernel.
+ *
+ * Return: %true if a user space application has registered for this frame.
+ * For action frames, that makes it responsible for rejecting unrecognized
+ * action frames; %false otherwise, in which case for action frames the
+ * driver is responsible for rejecting the frame.
+ */
+bool cfg80211_rx_mgmt_ext(struct wireless_dev *wdev,
+ struct cfg80211_rx_info *info);
+
+/**
+ * cfg80211_rx_mgmt_khz - notification of received, unprocessed management frame
+ * @wdev: wireless device receiving the frame
+ * @freq: Frequency on which the frame was received in KHz
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @flags: flags, as defined in enum nl80211_rxmgmt_flags
+ *
+ * This function is called whenever an Action frame is received for a station
+ * mode interface, but is not processed in kernel.
+ *
+ * Return: %true if a user space application has registered for this frame.
+ * For action frames, that makes it responsible for rejecting unrecognized
+ * action frames; %false otherwise, in which case for action frames the
+ * driver is responsible for rejecting the frame.
+ */
+static inline bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq,
+ int sig_dbm, const u8 *buf, size_t len,
+ u32 flags)
+{
+ struct cfg80211_rx_info info = {
+ .freq = freq,
+ .sig_dbm = sig_dbm,
+ .buf = buf,
+ .len = len,
+ .flags = flags
+ };
+
+ return cfg80211_rx_mgmt_ext(wdev, &info);
+}
+
+/**
* cfg80211_rx_mgmt - notification of received, unprocessed management frame
* @wdev: wireless device receiving the frame
* @freq: Frequency on which the frame was received in MHz
@@ -6773,8 +7902,52 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* action frames; %false otherwise, in which case for action frames the
* driver is responsible for rejecting the frame.
*/
-bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
- const u8 *buf, size_t len, u32 flags);
+static inline bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq,
+ int sig_dbm, const u8 *buf, size_t len,
+ u32 flags)
+{
+ struct cfg80211_rx_info info = {
+ .freq = MHZ_TO_KHZ(freq),
+ .sig_dbm = sig_dbm,
+ .buf = buf,
+ .len = len,
+ .flags = flags
+ };
+
+ return cfg80211_rx_mgmt_ext(wdev, &info);
+}
+
+/**
+ * struct cfg80211_tx_status - TX status for management frame information
+ *
+ * @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
+ * @tx_tstamp: hardware TX timestamp in nanoseconds
+ * @ack_tstamp: hardware ack RX timestamp in nanoseconds
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @ack: Whether frame was acknowledged
+ */
+struct cfg80211_tx_status {
+ u64 cookie;
+ u64 tx_tstamp;
+ u64 ack_tstamp;
+ const u8 *buf;
+ size_t len;
+ bool ack;
+};
+
+/**
+ * cfg80211_mgmt_tx_status_ext - TX status notification with extended info
+ * @wdev: wireless device receiving the frame
+ * @status: TX status data
+ * @gfp: context flags
+ *
+ * This function is called whenever a management frame was requested to be
+ * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
+ * transmission attempt with extended info.
+ */
+void cfg80211_mgmt_tx_status_ext(struct wireless_dev *wdev,
+ struct cfg80211_tx_status *status, gfp_t gfp);
/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
@@ -6789,9 +7962,37 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
* transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
* transmission attempt.
*/
-void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
- const u8 *buf, size_t len, bool ack, gfp_t gfp);
+static inline void cfg80211_mgmt_tx_status(struct wireless_dev *wdev,
+ u64 cookie, const u8 *buf,
+ size_t len, bool ack, gfp_t gfp)
+{
+ struct cfg80211_tx_status status = {
+ .cookie = cookie,
+ .buf = buf,
+ .len = len,
+ .ack = ack
+ };
+ cfg80211_mgmt_tx_status_ext(wdev, &status, gfp);
+}
+
+/**
+ * cfg80211_control_port_tx_status - notification of TX status for control
+ * port frames
+ * @wdev: wireless device receiving the frame
+ * @cookie: Cookie returned by cfg80211_ops::tx_control_port()
+ * @buf: Data frame (header + body)
+ * @len: length of the frame data
+ * @ack: Whether frame was acknowledged
+ * @gfp: context flags
+ *
+ * This function is called whenever a control port frame was requested to be
+ * transmitted with cfg80211_ops::tx_control_port() to report the TX status of
+ * the transmission attempt.
+ */
+void cfg80211_control_port_tx_status(struct wireless_dev *wdev, u64 cookie,
+ const u8 *buf, size_t len, bool ack,
+ gfp_t gfp);
/**
* cfg80211_rx_control_port - notification about a received control port frame
@@ -6866,15 +8067,33 @@ void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer,
void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp);
/**
- * cfg80211_radar_event - radar detection event
+ * __cfg80211_radar_event - radar detection event
* @wiphy: the wiphy
* @chandef: chandef for the current channel
+ * @offchan: the radar has been detected on the offchannel chain
* @gfp: context flags
*
* This function is called when a radar is detected on the current chanenl.
*/
-void cfg80211_radar_event(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef, gfp_t gfp);
+void __cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ bool offchan, gfp_t gfp);
+
+static inline void
+cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ gfp_t gfp)
+{
+ __cfg80211_radar_event(wiphy, chandef, false, gfp);
+}
+
+static inline void
+cfg80211_background_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ gfp_t gfp)
+{
+ __cfg80211_radar_event(wiphy, chandef, true, gfp);
+}
/**
* cfg80211_sta_opmode_change_notify - STA's ht/vht operation mode change event
@@ -6905,6 +8124,14 @@ void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
enum nl80211_radar_event event, gfp_t gfp);
+/**
+ * cfg80211_background_cac_abort - Channel Availability Check offchan abort event
+ * @wiphy: the wiphy
+ *
+ * This function is called by the driver when a Channel Availability Check
+ * (CAC) is aborted by a offchannel dedicated chain.
+ */
+void cfg80211_background_cac_abort(struct wiphy *wiphy);
/**
* cfg80211_gtk_rekey_notify - notify userspace about driver rekeying
@@ -6973,6 +8200,21 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
bool is_valid_ack_signal, gfp_t gfp);
/**
+ * cfg80211_report_obss_beacon_khz - report beacon from other APs
+ * @wiphy: The wiphy that received the beacon
+ * @frame: the frame
+ * @len: length of the frame
+ * @freq: frequency the frame was received on in KHz
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
+ *
+ * Use this function to report to userspace when a beacon was
+ * received. It is not useful to call this when there is no
+ * netdev that is in AP/GO mode.
+ */
+void cfg80211_report_obss_beacon_khz(struct wiphy *wiphy, const u8 *frame,
+ size_t len, int freq, int sig_dbm);
+
+/**
* cfg80211_report_obss_beacon - report beacon from other APs
* @wiphy: The wiphy that received the beacon
* @frame: the frame
@@ -6984,9 +8226,13 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
* received. It is not useful to call this when there is no
* netdev that is in AP/GO mode.
*/
-void cfg80211_report_obss_beacon(struct wiphy *wiphy,
- const u8 *frame, size_t len,
- int freq, int sig_dbm);
+static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+ const u8 *frame, size_t len,
+ int freq, int sig_dbm)
+{
+ cfg80211_report_obss_beacon_khz(wiphy, frame, len, MHZ_TO_KHZ(freq),
+ sig_dbm);
+}
/**
* cfg80211_reg_can_beacon - check if beaconing is allowed
@@ -7012,7 +8258,7 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
* also checks if IR-relaxation conditions apply, to allow beaconing under
* more permissive conditions.
*
- * Requires the RTNL to be held.
+ * Requires the wiphy mutex to be held.
*/
bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
@@ -7022,18 +8268,22 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* cfg80211_ch_switch_notify - update wdev channel and notify userspace
* @dev: the device which switched channels
* @chandef: the new channel definition
+ * @link_id: the link ID for MLO, must be 0 for non-MLO
*
* Caller must acquire wdev_lock, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
- struct cfg80211_chan_def *chandef);
+ struct cfg80211_chan_def *chandef,
+ unsigned int link_id);
/*
* cfg80211_ch_switch_started_notify - notify channel switch start
* @dev: the device on which the channel switch started
* @chandef: the future channel definition
+ * @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
+ * @quiet: whether or not immediate quiet was requested by the AP
*
* Inform the userspace about the channel switch that has just
* started, so that it can take appropriate actions (eg. starting
@@ -7041,7 +8291,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
*/
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u8 count);
+ unsigned int link_id, u8 count,
+ bool quiet);
/**
* ieee80211_operating_class_to_band - convert operating class to band
@@ -7065,6 +8316,19 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class);
+/**
+ * ieee80211_chandef_to_khz - convert chandef to frequency in KHz
+ *
+ * @chandef: the chandef to convert
+ *
+ * Returns the center frequency of chandef (1st segment) in KHz.
+ */
+static inline u32
+ieee80211_chandef_to_khz(const struct cfg80211_chan_def *chandef)
+{
+ return MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
+}
+
/*
* cfg80211_tdls_oper_request - request userspace to perform TDLS operation
* @dev: the device on which the operation is requested
@@ -7096,20 +8360,49 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate);
* cfg80211_unregister_wdev - remove the given wdev
* @wdev: struct wireless_dev to remove
*
- * Call this function only for wdevs that have no netdev assigned,
- * e.g. P2P Devices. It removes the device from the list so that
- * it can no longer be used. It is necessary to call this function
- * even when cfg80211 requests the removal of the interface by
- * calling the del_virtual_intf() callback. The function must also
- * be called when the driver wishes to unregister the wdev, e.g.
- * when the device is unbound from the driver.
+ * This function removes the device so it can no longer be used. It is necessary
+ * to call this function even when cfg80211 requests the removal of the device
+ * by calling the del_virtual_intf() callback. The function must also be called
+ * when the driver wishes to unregister the wdev, e.g. when the hardware device
+ * is unbound from the driver.
*
- * Requires the RTNL to be held.
+ * Requires the RTNL and wiphy mutex to be held.
*/
void cfg80211_unregister_wdev(struct wireless_dev *wdev);
/**
- * struct cfg80211_ft_event - FT Information Elements
+ * cfg80211_register_netdevice - register the given netdev
+ * @dev: the netdev to register
+ *
+ * Note: In contexts coming from cfg80211 callbacks, you must call this rather
+ * than register_netdevice(), unregister_netdev() is impossible as the RTNL is
+ * held. Otherwise, both register_netdevice() and register_netdev() are usable
+ * instead as well.
+ *
+ * Requires the RTNL and wiphy mutex to be held.
+ */
+int cfg80211_register_netdevice(struct net_device *dev);
+
+/**
+ * cfg80211_unregister_netdevice - unregister the given netdev
+ * @dev: the netdev to register
+ *
+ * Note: In contexts coming from cfg80211 callbacks, you must call this rather
+ * than unregister_netdevice(), unregister_netdev() is impossible as the RTNL
+ * is held. Otherwise, both unregister_netdevice() and unregister_netdev() are
+ * usable instead as well.
+ *
+ * Requires the RTNL and wiphy mutex to be held.
+ */
+static inline void cfg80211_unregister_netdevice(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_CFG80211)
+ cfg80211_unregister_wdev(dev->ieee80211_ptr);
+#endif
+}
+
+/**
+ * struct cfg80211_ft_event_params - FT Information Elements
* @ies: FT IEs
* @ies_len: length of the FT IE in bytes
* @target_ap: target AP's MAC address
@@ -7462,6 +8755,18 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
bool is_4addr, u8 check_swif);
+/**
+ * cfg80211_assoc_comeback - notification of association that was
+ * temporarly rejected with a comeback
+ * @netdev: network device
+ * @ap_addr: AP (MLD) address that rejected the assocation
+ * @timeout: timeout interval value TUs.
+ *
+ * this function may sleep. the caller must hold the corresponding wdev's mutex.
+ */
+void cfg80211_assoc_comeback(struct net_device *netdev,
+ const u8 *ap_addr, u32 timeout);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
@@ -7482,6 +8787,8 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
dev_notice(&(wiphy)->dev, format, ##args)
#define wiphy_info(wiphy, format, args...) \
dev_info(&(wiphy)->dev, format, ##args)
+#define wiphy_info_once(wiphy, format, args...) \
+ dev_info_once(&(wiphy)->dev, format, ##args)
#define wiphy_err_ratelimited(wiphy, format, args...) \
dev_err_ratelimited(&(wiphy)->dev, format, ##args)
@@ -7523,4 +8830,77 @@ void cfg80211_update_owe_info_event(struct net_device *netdev,
struct cfg80211_update_owe_info *owe_info,
gfp_t gfp);
+/**
+ * cfg80211_bss_flush - resets all the scan entries
+ * @wiphy: the wiphy
+ */
+void cfg80211_bss_flush(struct wiphy *wiphy);
+
+/**
+ * cfg80211_bss_color_notify - notify about bss color event
+ * @dev: network device
+ * @gfp: allocation flags
+ * @cmd: the actual event we want to notify
+ * @count: the number of TBTTs until the color change happens
+ * @color_bitmap: representations of the colors that the local BSS is aware of
+ */
+int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
+ enum nl80211_commands cmd, u8 count,
+ u64 color_bitmap);
+
+/**
+ * cfg80211_obss_color_collision_notify - notify about bss color collision
+ * @dev: network device
+ * @color_bitmap: representations of the colors that the local BSS is aware of
+ * @gfp: allocation flags
+ */
+static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
+ u64 color_bitmap, gfp_t gfp)
+{
+ return cfg80211_bss_color_notify(dev, gfp,
+ NL80211_CMD_OBSS_COLOR_COLLISION,
+ 0, color_bitmap);
+}
+
+/**
+ * cfg80211_color_change_started_notify - notify color change start
+ * @dev: the device on which the color is switched
+ * @count: the number of TBTTs until the color change happens
+ *
+ * Inform the userspace about the color change that has started.
+ */
+static inline int cfg80211_color_change_started_notify(struct net_device *dev,
+ u8 count)
+{
+ return cfg80211_bss_color_notify(dev, GFP_KERNEL,
+ NL80211_CMD_COLOR_CHANGE_STARTED,
+ count, 0);
+}
+
+/**
+ * cfg80211_color_change_aborted_notify - notify color change abort
+ * @dev: the device on which the color is switched
+ *
+ * Inform the userspace about the color change that has aborted.
+ */
+static inline int cfg80211_color_change_aborted_notify(struct net_device *dev)
+{
+ return cfg80211_bss_color_notify(dev, GFP_KERNEL,
+ NL80211_CMD_COLOR_CHANGE_ABORTED,
+ 0, 0);
+}
+
+/**
+ * cfg80211_color_change_notify - notify color change completion
+ * @dev: the device on which the color was switched
+ *
+ * Inform the userspace about the color change that has completed.
+ */
+static inline int cfg80211_color_change_notify(struct net_device *dev)
+{
+ return cfg80211_bss_color_notify(dev, GFP_KERNEL,
+ NL80211_CMD_COLOR_CHANGE_COMPLETED,
+ 0, 0);
+}
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 6f86073a5d7d..d8d8719315fd 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -203,8 +203,8 @@ struct wpan_phy {
/* PHY depended MAC PIB values */
- /* 802.15.4 acronym: Tdsym in usec */
- u8 symbol_duration;
+ /* 802.15.4 acronym: Tdsym in nsec */
+ u32 symbol_duration;
/* lifs and sifs periods timing */
u16 lifs_period;
u16 sifs_period;
@@ -214,7 +214,7 @@ struct wpan_phy {
/* the network namespace this phy lives in currently */
possible_net_t _net;
- char priv[0] __aligned(NETDEV_ALIGN);
+ char priv[] __aligned(NETDEV_ALIGN);
};
static inline struct net *wpan_phy_net(struct wpan_phy *wpan_phy)
@@ -227,6 +227,16 @@ static inline void wpan_phy_net_set(struct wpan_phy *wpan_phy, struct net *net)
write_pnet(&wpan_phy->_net, net);
}
+/**
+ * struct ieee802154_addr - IEEE802.15.4 device address
+ * @mode: Address mode from frame header. Can be one of:
+ * - @IEEE802154_ADDR_NONE
+ * - @IEEE802154_ADDR_SHORT
+ * - @IEEE802154_ADDR_LONG
+ * @pan_id: The PAN ID this address belongs to
+ * @short_addr: address if @mode is @IEEE802154_ADDR_SHORT
+ * @extended_addr: address if @mode is @IEEE802154_ADDR_LONG
+ */
struct ieee802154_addr {
u8 mode;
__le16 pan_id;
@@ -363,6 +373,7 @@ struct wpan_dev {
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
+#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
static inline int
wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
const struct ieee802154_addr *daddr,
@@ -373,6 +384,7 @@ wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len);
}
+#endif
struct wpan_phy *
wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
@@ -405,4 +417,6 @@ static inline const char *wpan_phy_name(struct wpan_phy *phy)
return dev_name(&phy->dev);
}
+void ieee802154_configure_durations(struct wpan_phy *phy);
+
#endif /* __NET_CFG802154_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 97bf4885a962..6bc783b7a06c 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -22,39 +22,39 @@
#include <asm/checksum.h>
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
-static inline
+static __always_inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
-
- if (len)
- *err_ptr = -EFAULT;
-
- return sum;
+ if (copy_from_user(dst, src, len))
+ return 0;
+ return csum_partial(dst, len, ~0U);
}
#endif
#ifndef HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user
-(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr)
+static __always_inline __wsum csum_and_copy_to_user
+(const void *src, void __user *dst, int len)
{
- sum = csum_partial(src, len, sum);
+ __wsum sum = csum_partial(src, len, ~0U);
- if (access_ok(dst, len)) {
- if (copy_to_user(dst, src, len) == 0)
- return sum;
- }
- if (len)
- *err_ptr = -EFAULT;
+ if (copy_to_user(dst, src, len) == 0)
+ return sum;
+ return 0;
+}
+#endif
- return (__force __wsum)-1; /* invalid checksum */
+#ifndef _HAVE_ARCH_CSUM_AND_COPY
+static __always_inline __wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, 0);
}
#endif
#ifndef HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
u32 res = (__force u32)csum;
res += (__force u32)addend;
@@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
}
#endif
-static inline __wsum csum_sub(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
{
return csum_add(csum, ~addend);
}
-static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
{
u16 res = (__force u16)csum;
@@ -75,53 +75,58 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
return (__force __sum16)(res + (res < (__force u16)addend));
}
-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
{
return csum16_add(csum, ~addend);
}
-static inline __wsum
-csum_block_add(__wsum csum, __wsum csum2, int offset)
+#ifndef HAVE_ARCH_CSUM_SHIFT
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
{
- u32 sum = (__force u32)csum2;
-
/* rotate sum to align it with a 16b boundary */
if (offset & 1)
- sum = ror32(sum, 8);
+ return (__force __wsum)ror32((__force u32)sum, 8);
+ return sum;
+}
+#endif
- return csum_add(csum, (__force __wsum)sum);
+static __always_inline __wsum
+csum_block_add(__wsum csum, __wsum csum2, int offset)
+{
+ return csum_add(csum, csum_shift(csum2, offset));
}
-static inline __wsum
+static __always_inline __wsum
csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
{
return csum_block_add(csum, csum2, offset);
}
-static inline __wsum
+static __always_inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
return csum_block_add(csum, ~csum2, offset);
}
-static inline __wsum csum_unfold(__sum16 n)
+static __always_inline __wsum csum_unfold(__sum16 n)
{
return (__force __wsum)n;
}
-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+static __always_inline
+__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
{
return csum_partial(buff, len, sum);
}
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
{
*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
}
-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
+static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
@@ -134,11 +139,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
* m : old value of a 16bit field
* m' : new value of a 16bit field
*/
-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
+static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
{
*sum = ~csum16_add(csum16_sub(~(*sum), old), new);
}
+static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
+{
+ *csum = csum_add(csum_sub(*csum, old), new);
+}
+
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, bool pseudohdr);
@@ -148,16 +158,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
__wsum diff, bool pseudohdr);
-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
- __be16 from, __be16 to,
- bool pseudohdr)
+static __always_inline
+void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
+ __be16 from, __be16 to, bool pseudohdr)
{
inet_proto_csum_replace4(sum, skb, (__force __be32)from,
(__force __be32)to, pseudohdr);
}
-static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
- int start, int offset)
+static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+ int start, int offset)
{
__sum16 *psum = (__sum16 *)(ptr + offset);
__wsum delta;
@@ -173,9 +183,13 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
return delta;
}
-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
+static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
{
*psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
}
+static __always_inline __wsum wsum_negate(__wsum val)
+{
+ return (__force __wsum)-((__force u32)val);
+}
#endif
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 428b6725b248..53dd7d988a2d 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -150,18 +150,6 @@ static inline int cipso_v4_doi_walk(u32 *skip_cnt,
{
return 0;
}
-
-static inline int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def,
- const char *domain)
-{
- return -ENOSYS;
-}
-
-static inline int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
- const char *domain)
-{
- return 0;
-}
#endif /* CONFIG_NETLABEL */
/*
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 4295de3e6a4b..7e78e7d6f015 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -45,9 +45,14 @@ static inline void sock_update_classid(struct sock_cgroup_data *skcd)
sock_cgroup_set_classid(skcd, classid);
}
+static inline u32 __task_get_classid(struct task_struct *task)
+{
+ return task_cls_state(task)->classid;
+}
+
static inline u32 task_get_classid(const struct sk_buff *skb)
{
- u32 classid = task_cls_state(current)->classid;
+ u32 classid = __task_get_classid(current);
/* Due to the nature of the classifier it is required to ignore all
* packets originating from softirq context as accessing `current'
diff --git a/include/net/codel.h b/include/net/codel.h
index a6e428f80135..5fed2f16cb8d 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -44,8 +44,6 @@
#include <linux/types.h>
#include <linux/ktime.h>
#include <linux/skbuff.h>
-#include <net/pkt_sched.h>
-#include <net/inet_ecn.h>
/* Controlling Queue Delay (CoDel) algorithm
* =========================================
@@ -102,6 +100,9 @@ static inline u32 codel_time_to_us(codel_time_t val)
* @interval: width of moving time window
* @mtu: device mtu, or minimal queue backlog in bytes.
* @ecn: is Explicit Congestion Notification enabled
+ * @ce_threshold_selector: apply ce_threshold to packets matching this value
+ * in the diffserv/ECN byte of the IP header
+ * @ce_threshold_mask: mask to apply to ce_threshold_selector comparison
*/
struct codel_params {
codel_time_t target;
@@ -109,6 +110,8 @@ struct codel_params {
codel_time_t interval;
u32 mtu;
bool ecn;
+ u8 ce_threshold_selector;
+ u8 ce_threshold_mask;
};
/**
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
index d289b91dcd65..78a27ac73070 100644
--- a/include/net/codel_impl.h
+++ b/include/net/codel_impl.h
@@ -49,11 +49,15 @@
* Implemented on linux by Dave Taht and Eric Dumazet
*/
+#include <net/inet_ecn.h>
+
static void codel_params_init(struct codel_params *params)
{
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+ params->ce_threshold_mask = 0;
+ params->ce_threshold_selector = 0;
params->ecn = false;
}
@@ -246,9 +250,19 @@ static struct sk_buff *codel_dequeue(void *ctx,
vars->rec_inv_sqrt);
}
end:
- if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
- INET_ECN_set_ce(skb))
- stats->ce_mark++;
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) {
+ bool set_ce = true;
+
+ if (params->ce_threshold_mask) {
+ int dsfield = skb_get_dsfield(skb);
+
+ set_ce = (dsfield >= 0 &&
+ (((u8)dsfield & params->ce_threshold_mask) ==
+ params->ce_threshold_selector));
+ }
+ if (set_ce && INET_ECN_set_ce(skb))
+ stats->ce_mark++;
+ }
return skb;
}
diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h
index 098630f83a55..7d3d9219f4fe 100644
--- a/include/net/codel_qdisc.h
+++ b/include/net/codel_qdisc.h
@@ -49,6 +49,9 @@
* Implemented on linux by Dave Taht and Eric Dumazet
*/
+#include <net/codel.h>
+#include <net/pkt_sched.h>
+
/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
struct codel_skb_cb {
codel_time_t enqueue_time;
diff --git a/include/net/compat.h b/include/net/compat.h
index f277653c7e17..84c163f40f38 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -5,8 +5,6 @@
struct sock;
-#if defined(CONFIG_COMPAT)
-
#include <linux/compat.h>
struct compat_msghdr {
@@ -30,27 +28,68 @@ struct compat_cmsghdr {
compat_int_t cmsg_type;
};
-#else /* defined(CONFIG_COMPAT) */
-/*
- * To avoid compiler warnings:
- */
-#define compat_msghdr msghdr
-#define compat_mmsghdr mmsghdr
-#endif /* defined(CONFIG_COMPAT) */
+struct compat_rtentry {
+ u32 rt_pad1;
+ struct sockaddr rt_dst; /* target address */
+ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
+ struct sockaddr rt_genmask; /* target network mask (IP) */
+ unsigned short rt_flags;
+ short rt_pad2;
+ u32 rt_pad3;
+ unsigned char rt_tos;
+ unsigned char rt_class;
+ short rt_pad4;
+ short rt_metric; /* +1 for binary compatibility! */
+ compat_uptr_t rt_dev; /* forcing the device at add */
+ u32 rt_mtu; /* per route MTU/Window */
+ u32 rt_window; /* Window clamping */
+ unsigned short rt_irtt; /* Initial RTT */
+};
+int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr *msg,
+ struct sockaddr __user **save_addr);
int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
-struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
int put_cmsg_compat(struct msghdr*, int, int, int, void *);
int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
unsigned char *, int);
-int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
- int (*)(struct sock *, int, int, char __user *,
- unsigned int));
-int compat_mc_getsockopt(struct sock *, int, int, char __user *, int __user *,
- int (*)(struct sock *, int, int, char __user *,
- int __user *));
+struct compat_group_req {
+ __u32 gr_interface;
+ struct __kernel_sockaddr_storage gr_group
+ __aligned(4);
+} __packed;
+
+struct compat_group_source_req {
+ __u32 gsr_interface;
+ struct __kernel_sockaddr_storage gsr_group
+ __aligned(4);
+ struct __kernel_sockaddr_storage gsr_source
+ __aligned(4);
+} __packed;
+
+struct compat_group_filter {
+ union {
+ struct {
+ __u32 gf_interface_aux;
+ struct __kernel_sockaddr_storage gf_group_aux
+ __aligned(4);
+ __u32 gf_fmode_aux;
+ __u32 gf_numsrc_aux;
+ struct __kernel_sockaddr_storage gf_slist[1]
+ __aligned(4);
+ } __packed;
+ struct {
+ __u32 gf_interface;
+ struct __kernel_sockaddr_storage gf_group
+ __aligned(4);
+ __u32 gf_fmode;
+ __u32 gf_numsrc;
+ struct __kernel_sockaddr_storage gf_slist_flex[]
+ __aligned(4);
+ } __packed;
+ };
+} __packed;
#endif /* NET_COMPAT_H */
diff --git a/include/net/datalink.h b/include/net/datalink.h
index a9663229b913..c837ffc7ebf8 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -2,6 +2,13 @@
#ifndef _NET_INET_DATALINK_H_
#define _NET_INET_DATALINK_H_
+#include <linux/list.h>
+
+struct llc_sap;
+struct net_device;
+struct packet_type;
+struct sk_buff;
+
struct datalink_proto {
unsigned char type[8];
@@ -12,7 +19,7 @@ struct datalink_proto {
int (*rcvfunc)(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
int (*request)(struct datalink_proto *, struct sk_buff *,
- unsigned char *);
+ const unsigned char *);
struct list_head node;
};
diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h
index 43e34131a53f..02700262f71a 100644
--- a/include/net/dcbevent.h
+++ b/include/net/dcbevent.h
@@ -8,6 +8,8 @@
#ifndef _DCB_EVENT_H
#define _DCB_EVENT_H
+struct notifier_block;
+
enum dcbevent_notif_type {
DCB_APP_EVENT = 1,
};
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index e4ad58c4062c..2b2d86fb3131 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -10,6 +10,8 @@
#include <linux/dcbnl.h>
+struct net_device;
+
struct dcb_app_type {
int ifindex;
struct dcb_app app;
diff --git a/include/net/devlink.h b/include/net/devlink.h
index ce5cea428fdc..ba6b8b094943 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -16,70 +16,111 @@
#include <linux/workqueue.h>
#include <linux/refcount.h>
#include <net/net_namespace.h>
+#include <net/flow_offload.h>
#include <uapi/linux/devlink.h>
+#include <linux/xarray.h>
+#include <linux/firmware.h>
-struct devlink_ops;
-
-struct devlink {
- struct list_head list;
- struct list_head port_list;
- struct list_head sb_list;
- struct list_head dpipe_table_list;
- struct list_head resource_list;
- struct list_head param_list;
- struct list_head region_list;
- u32 snapshot_id;
- struct list_head reporter_list;
- struct mutex reporters_lock; /* protects reporter_list */
- struct devlink_dpipe_headers *dpipe_headers;
- struct list_head trap_list;
- struct list_head trap_group_list;
- const struct devlink_ops *ops;
- struct device *dev;
- possible_net_t _net;
- struct mutex lock;
- u8 reload_failed:1,
- reload_enabled:1,
- registered:1;
- char priv[0] __aligned(NETDEV_ALIGN);
-};
+struct devlink;
+struct devlink_linecard;
struct devlink_port_phys_attrs {
u32 port_number; /* Same value as "split group".
* A physical port which is visible to the user
* for a given port flavour.
*/
- u32 split_subport_number;
+ u32 split_subport_number; /* If the port is split, this is the number of subport. */
};
+/**
+ * struct devlink_port_pci_pf_attrs - devlink port's PCI PF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
struct devlink_port_pci_pf_attrs {
- u16 pf; /* Associated PCI PF for this port. */
+ u32 controller;
+ u16 pf;
+ u8 external:1;
};
+/**
+ * struct devlink_port_pci_vf_attrs - devlink port's PCI VF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @vf: Associated PCI VF for of the PCI PF for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
struct devlink_port_pci_vf_attrs {
- u16 pf; /* Associated PCI PF for this port. */
- u16 vf; /* Associated PCI VF for of the PCI PF for this port. */
+ u32 controller;
+ u16 pf;
+ u16 vf;
+ u8 external:1;
+};
+
+/**
+ * struct devlink_port_pci_sf_attrs - devlink port's PCI SF attributes
+ * @controller: Associated controller number
+ * @sf: Associated PCI SF for of the PCI PF for this port.
+ * @pf: Associated PCI PF number for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
+struct devlink_port_pci_sf_attrs {
+ u32 controller;
+ u32 sf;
+ u16 pf;
+ u8 external:1;
};
+/**
+ * struct devlink_port_attrs - devlink port object
+ * @flavour: flavour of the port
+ * @split: indicates if this is split port
+ * @splittable: indicates if the port can be split.
+ * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink.
+ * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL
+ * @phys: physical port attributes
+ * @pci_pf: PCI PF port attributes
+ * @pci_vf: PCI VF port attributes
+ * @pci_sf: PCI SF port attributes
+ */
struct devlink_port_attrs {
- u8 set:1,
- split:1,
- switch_port:1;
+ u8 split:1,
+ splittable:1;
+ u32 lanes;
enum devlink_port_flavour flavour;
struct netdev_phys_item_id switch_id;
union {
struct devlink_port_phys_attrs phys;
struct devlink_port_pci_pf_attrs pci_pf;
struct devlink_port_pci_vf_attrs pci_vf;
+ struct devlink_port_pci_sf_attrs pci_sf;
+ };
+};
+
+struct devlink_rate {
+ struct list_head list;
+ enum devlink_rate_type type;
+ struct devlink *devlink;
+ void *priv;
+ u64 tx_share;
+ u64 tx_max;
+
+ struct devlink_rate *parent;
+ union {
+ struct devlink_port *devlink_port;
+ struct {
+ char *name;
+ refcount_t refcnt;
+ };
};
};
struct devlink_port {
struct list_head list;
- struct list_head param_list;
+ struct list_head region_list;
struct devlink *devlink;
unsigned int index;
- bool registered;
spinlock_t type_lock; /* Protects type and type_dev
* pointer consistency.
*/
@@ -87,7 +128,61 @@ struct devlink_port {
enum devlink_port_type desired_type;
void *type_dev;
struct devlink_port_attrs attrs;
+ u8 attrs_set:1,
+ switch_port:1,
+ registered:1,
+ initialized:1;
struct delayed_work type_warn_dw;
+ struct list_head reporter_list;
+ struct mutex reporters_lock; /* Protects reporter_list */
+
+ struct devlink_rate *devlink_rate;
+ struct devlink_linecard *linecard;
+};
+
+struct devlink_port_new_attrs {
+ enum devlink_port_flavour flavour;
+ unsigned int port_index;
+ u32 controller;
+ u32 sfnum;
+ u16 pfnum;
+ u8 port_index_valid:1,
+ controller_valid:1,
+ sfnum_valid:1;
+};
+
+/**
+ * struct devlink_linecard_ops - Linecard operations
+ * @provision: callback to provision the linecard slot with certain
+ * type of linecard. As a result of this operation,
+ * driver is expected to eventually (could be after
+ * the function call returns) call one of:
+ * devlink_linecard_provision_set()
+ * devlink_linecard_provision_fail()
+ * @unprovision: callback to unprovision the linecard slot. As a result
+ * of this operation, driver is expected to eventually
+ * (could be after the function call returns) call
+ * devlink_linecard_provision_clear()
+ * devlink_linecard_provision_fail()
+ * @same_provision: callback to ask the driver if linecard is already
+ * provisioned in the same way user asks this linecard to be
+ * provisioned.
+ * @types_count: callback to get number of supported types
+ * @types_get: callback to get next type in list
+ */
+struct devlink_linecard_ops {
+ int (*provision)(struct devlink_linecard *linecard, void *priv,
+ const char *type, const void *type_priv,
+ struct netlink_ext_ack *extack);
+ int (*unprovision)(struct devlink_linecard *linecard, void *priv,
+ struct netlink_ext_ack *extack);
+ bool (*same_provision)(struct devlink_linecard *linecard, void *priv,
+ const char *type, const void *type_priv);
+ unsigned int (*types_count)(struct devlink_linecard *linecard,
+ void *priv);
+ void (*types_get)(struct devlink_linecard *linecard,
+ void *priv, unsigned int index, const char **type,
+ const void **type_priv);
};
struct devlink_sb_pool_info {
@@ -303,35 +398,10 @@ devlink_resource_size_params_init(struct devlink_resource_size_params *size_para
typedef u64 devlink_resource_occ_get_t(void *priv);
-/**
- * struct devlink_resource - devlink resource
- * @name: name of the resource
- * @id: id, per devlink instance
- * @size: size of the resource
- * @size_new: updated size of the resource, reload is needed
- * @size_valid: valid in case the total size of the resource is valid
- * including its children
- * @parent: parent resource
- * @size_params: size parameters
- * @list: parent list
- * @resource_list: list of child resources
- */
-struct devlink_resource {
- const char *name;
- u64 id;
- u64 size;
- u64 size_new;
- bool size_valid;
- struct devlink_resource *parent;
- struct devlink_resource_size_params size_params;
- struct list_head list;
- struct list_head resource_list;
- devlink_resource_occ_get_t *occ_get;
- void *occ_get_priv;
-};
-
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+#define DEVLINK_RESOURCE_GENERIC_NAME_PORTS "physical_ports"
+
#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
enum devlink_param_type {
DEVLINK_PARAM_TYPE_U8,
@@ -355,6 +425,25 @@ struct devlink_param_gset_ctx {
};
/**
+ * struct devlink_flash_notify - devlink dev flash notify data
+ * @status_msg: current status string
+ * @component: firmware component being updated
+ * @done: amount of work completed of total amount
+ * @total: amount of work expected to be done
+ * @timeout: expected max timeout in seconds
+ *
+ * These are values to be given to userland to be displayed in order
+ * to show current activity in a firmware update process.
+ */
+struct devlink_flash_notify {
+ const char *status_msg;
+ const char *component;
+ unsigned long done;
+ unsigned long total;
+ unsigned long timeout;
+};
+
+/**
* struct devlink_param - devlink configuration parameter data
* @name: name of the parameter
* @generic: indicates if the parameter is generic or driver specific
@@ -389,7 +478,6 @@ struct devlink_param_item {
const struct devlink_param *param;
union devlink_param_value driverinit_value;
bool driverinit_value_valid;
- bool published;
};
enum devlink_param_generic_id {
@@ -403,6 +491,13 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -440,6 +535,27 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME "enable_remote_dev_reset"
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME "enable_eth"
+#define DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME "enable_rdma"
+#define DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME "enable_vnet"
+#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME "enable_iwarp"
+#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME "io_eq_size"
+#define DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME "event_eq_size"
+#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -479,6 +595,8 @@ enum devlink_param_generic_id {
#define DEVLINK_INFO_VERSION_GENERIC_FW "fw"
/* Control processor FW version */
#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT "fw.mgmt"
+/* FW interface specification version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API "fw.mgmt.api"
/* Data path microcode controlling high-speed packet processing */
#define DEVLINK_INFO_VERSION_GENERIC_FW_APP "fw.app"
/* UNDI software version */
@@ -489,11 +607,68 @@ enum devlink_param_generic_id {
#define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid"
/* RoCE FW version */
#define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce"
+/* Firmware bundle identifier */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id"
+
+/**
+ * struct devlink_flash_update_params - Flash Update parameters
+ * @fw: pointer to the firmware data to update from
+ * @component: the flash component to update
+ *
+ * With the exception of fw, drivers must opt-in to parameters by
+ * setting the appropriate bit in the supported_flash_update_params field in
+ * their devlink_ops structure.
+ */
+struct devlink_flash_update_params {
+ const struct firmware *fw;
+ const char *component;
+ u32 overwrite_mask;
+};
+
+#define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(0)
struct devlink_region;
struct devlink_info_req;
-typedef void devlink_snapshot_data_dest_t(const void *data);
+/**
+ * struct devlink_region_ops - Region operations
+ * @name: region name
+ * @destructor: callback used to free snapshot memory when deleting
+ * @snapshot: callback to request an immediate snapshot. On success,
+ * the data variable must be updated to point to the snapshot data.
+ * The function will be called while the devlink instance lock is
+ * held.
+ * @priv: Pointer to driver private data for the region operation
+ */
+struct devlink_region_ops {
+ const char *name;
+ void (*destructor)(const void *data);
+ int (*snapshot)(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data);
+ void *priv;
+};
+
+/**
+ * struct devlink_port_region_ops - Region operations for a port
+ * @name: region name
+ * @destructor: callback used to free snapshot memory when deleting
+ * @snapshot: callback to request an immediate snapshot. On success,
+ * the data variable must be updated to point to the snapshot data.
+ * The function will be called while the devlink instance lock is
+ * held.
+ * @priv: Pointer to driver private data for the region operation
+ */
+struct devlink_port_region_ops {
+ const char *name;
+ void (*destructor)(const void *data);
+ int (*snapshot)(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data);
+ void *priv;
+};
struct devlink_fmsg;
struct devlink_health_reporter;
@@ -511,6 +686,7 @@ enum devlink_health_reporter_state {
* @dump: callback to dump an object
* if priv_ctx is NULL, run a full dump
* @diagnose: callback to diagnose the current status
+ * @test: callback to trigger a test event
*/
struct devlink_health_reporter_ops {
@@ -523,6 +699,51 @@ struct devlink_health_reporter_ops {
int (*diagnose)(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack);
+ int (*test)(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack);
+};
+
+/**
+ * struct devlink_trap_metadata - Packet trap metadata.
+ * @trap_name: Trap name.
+ * @trap_group_name: Trap group name.
+ * @input_dev: Input netdevice.
+ * @dev_tracker: refcount tracker for @input_dev.
+ * @fa_cookie: Flow action user cookie.
+ * @trap_type: Trap type.
+ */
+struct devlink_trap_metadata {
+ const char *trap_name;
+ const char *trap_group_name;
+
+ struct net_device *input_dev;
+ netdevice_tracker dev_tracker;
+
+ const struct flow_action_cookie *fa_cookie;
+ enum devlink_trap_type trap_type;
+};
+
+/**
+ * struct devlink_trap_policer - Immutable packet trap policer attributes.
+ * @id: Policer identifier.
+ * @init_rate: Initial rate in packets / sec.
+ * @init_burst: Initial burst size in packets.
+ * @max_rate: Maximum rate.
+ * @min_rate: Minimum rate.
+ * @max_burst: Maximum burst size.
+ * @min_burst: Minimum burst size.
+ *
+ * Describes immutable attributes of packet trap policers that drivers register
+ * with devlink.
+ */
+struct devlink_trap_policer {
+ u32 id;
+ u64 init_rate;
+ u64 init_burst;
+ u64 max_rate;
+ u64 min_rate;
+ u64 max_burst;
+ u64 min_burst;
};
/**
@@ -530,6 +751,7 @@ struct devlink_health_reporter_ops {
* @name: Trap group name.
* @id: Trap group identifier.
* @generic: Whether the trap group is generic or not.
+ * @init_policer_id: Initial policer identifier.
*
* Describes immutable attributes of packet trap groups that drivers register
* with devlink.
@@ -538,9 +760,11 @@ struct devlink_trap_group {
const char *name;
u16 id;
bool generic;
+ u32 init_policer_id;
};
#define DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT BIT(0)
+#define DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE BIT(1)
/**
* struct devlink_trap - Immutable packet trap attributes.
@@ -549,7 +773,7 @@ struct devlink_trap_group {
* @generic: Whether the trap is generic or not.
* @id: Trap identifier.
* @name: Trap name.
- * @group: Immutable packet trap group attributes.
+ * @init_group_id: Initial group identifier.
* @metadata_cap: Metadata types that can be provided by the trap.
*
* Describes immutable attributes of packet traps that drivers register with
@@ -561,7 +785,7 @@ struct devlink_trap {
bool generic;
u16 id;
const char *name;
- struct devlink_trap_group group;
+ u16 init_group_id;
u32 metadata_cap;
};
@@ -596,6 +820,71 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP,
+ DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP,
+ DEVLINK_TRAP_GENERIC_ID_STP,
+ DEVLINK_TRAP_GENERIC_ID_LACP,
+ DEVLINK_TRAP_GENERIC_ID_LLDP,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_QUERY,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_V1_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_V2_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_V3_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_IGMP_V2_LEAVE,
+ DEVLINK_TRAP_GENERIC_ID_MLD_QUERY,
+ DEVLINK_TRAP_GENERIC_ID_MLD_V1_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_MLD_V2_REPORT,
+ DEVLINK_TRAP_GENERIC_ID_MLD_V1_DONE,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_DHCP,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_DHCP,
+ DEVLINK_TRAP_GENERIC_ID_ARP_REQUEST,
+ DEVLINK_TRAP_GENERIC_ID_ARP_RESPONSE,
+ DEVLINK_TRAP_GENERIC_ID_ARP_OVERLAY,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_SOLICIT,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_ADVERT,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_BFD,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_BFD,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_OSPF,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_OSPF,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_BGP,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_BGP,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_VRRP,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_VRRP,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_PIM,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_PIM,
+ DEVLINK_TRAP_GENERIC_ID_UC_LB,
+ DEVLINK_TRAP_GENERIC_ID_LOCAL_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_EXTERNAL_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_UC_DIP_LINK_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_NODES,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_ROUTERS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_SOLICIT,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ADVERT,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_REDIRECT,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_ROUTER_ALERT,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ALERT,
+ DEVLINK_TRAP_GENERIC_ID_PTP_EVENT,
+ DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL,
+ DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE,
+ DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP,
+ DEVLINK_TRAP_GENERIC_ID_EARLY_DROP,
+ DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ARP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GRE_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_UDP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_TCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ESP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_NEXTHOP,
+ DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -608,8 +897,30 @@ enum devlink_trap_generic_id {
enum devlink_trap_group_generic_id {
DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS,
DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_L3_EXCEPTIONS,
DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_STP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_LACP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_LLDP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_MC_SNOOPING,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_DHCP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_NEIGH_DISCOVERY,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BFD,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_OSPF,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BGP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_VRRP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PIM,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS,
/* Add new generic trap group IDs above */
__DEVLINK_TRAP_GROUP_GENERIC_ID_MAX,
@@ -671,28 +982,203 @@ enum devlink_trap_group_generic_id {
"decap_error"
#define DEVLINK_TRAP_GENERIC_NAME_OVERLAY_SMAC_MC \
"overlay_smac_is_mc"
+#define DEVLINK_TRAP_GENERIC_NAME_INGRESS_FLOW_ACTION_DROP \
+ "ingress_flow_action_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_EGRESS_FLOW_ACTION_DROP \
+ "egress_flow_action_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_STP \
+ "stp"
+#define DEVLINK_TRAP_GENERIC_NAME_LACP \
+ "lacp"
+#define DEVLINK_TRAP_GENERIC_NAME_LLDP \
+ "lldp"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_QUERY \
+ "igmp_query"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V1_REPORT \
+ "igmp_v1_report"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V2_REPORT \
+ "igmp_v2_report"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V3_REPORT \
+ "igmp_v3_report"
+#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V2_LEAVE \
+ "igmp_v2_leave"
+#define DEVLINK_TRAP_GENERIC_NAME_MLD_QUERY \
+ "mld_query"
+#define DEVLINK_TRAP_GENERIC_NAME_MLD_V1_REPORT \
+ "mld_v1_report"
+#define DEVLINK_TRAP_GENERIC_NAME_MLD_V2_REPORT \
+ "mld_v2_report"
+#define DEVLINK_TRAP_GENERIC_NAME_MLD_V1_DONE \
+ "mld_v1_done"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_DHCP \
+ "ipv4_dhcp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_DHCP \
+ "ipv6_dhcp"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_REQUEST \
+ "arp_request"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_RESPONSE \
+ "arp_response"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_OVERLAY \
+ "arp_overlay"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_NEIGH_SOLICIT \
+ "ipv6_neigh_solicit"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_NEIGH_ADVERT \
+ "ipv6_neigh_advert"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_BFD \
+ "ipv4_bfd"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_BFD \
+ "ipv6_bfd"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_OSPF \
+ "ipv4_ospf"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_OSPF \
+ "ipv6_ospf"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_BGP \
+ "ipv4_bgp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_BGP \
+ "ipv6_bgp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_VRRP \
+ "ipv4_vrrp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_VRRP \
+ "ipv6_vrrp"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_PIM \
+ "ipv4_pim"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_PIM \
+ "ipv6_pim"
+#define DEVLINK_TRAP_GENERIC_NAME_UC_LB \
+ "uc_loopback"
+#define DEVLINK_TRAP_GENERIC_NAME_LOCAL_ROUTE \
+ "local_route"
+#define DEVLINK_TRAP_GENERIC_NAME_EXTERNAL_ROUTE \
+ "external_route"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_UC_DIP_LINK_LOCAL_SCOPE \
+ "ipv6_uc_dip_link_local_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_DIP_ALL_NODES \
+ "ipv6_dip_all_nodes"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_DIP_ALL_ROUTERS \
+ "ipv6_dip_all_routers"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_ROUTER_SOLICIT \
+ "ipv6_router_solicit"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_ROUTER_ADVERT \
+ "ipv6_router_advert"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_REDIRECT \
+ "ipv6_redirect"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_ROUTER_ALERT \
+ "ipv4_router_alert"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_ROUTER_ALERT \
+ "ipv6_router_alert"
+#define DEVLINK_TRAP_GENERIC_NAME_PTP_EVENT \
+ "ptp_event"
+#define DEVLINK_TRAP_GENERIC_NAME_PTP_GENERAL \
+ "ptp_general"
+#define DEVLINK_TRAP_GENERIC_NAME_FLOW_ACTION_SAMPLE \
+ "flow_action_sample"
+#define DEVLINK_TRAP_GENERIC_NAME_FLOW_ACTION_TRAP \
+ "flow_action_trap"
+#define DEVLINK_TRAP_GENERIC_NAME_EARLY_DROP \
+ "early_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_VXLAN_PARSING \
+ "vxlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_LLC_SNAP_PARSING \
+ "llc_snap_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_VLAN_PARSING \
+ "vlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_PPPOE_PPP_PARSING \
+ "pppoe_ppp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_MPLS_PARSING \
+ "mpls_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_PARSING \
+ "arp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_1_PARSING \
+ "ip_1_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_N_PARSING \
+ "ip_n_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GRE_PARSING \
+ "gre_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_UDP_PARSING \
+ "udp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_TCP_PARSING \
+ "tcp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IPSEC_PARSING \
+ "ipsec_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_SCTP_PARSING \
+ "sctp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_DCCP_PARSING \
+ "dccp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GTP_PARSING \
+ "gtp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ESP_PARSING \
+ "esp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_BLACKHOLE_NEXTHOP \
+ "blackhole_nexthop"
+#define DEVLINK_TRAP_GENERIC_NAME_DMAC_FILTER \
+ "dmac_filter"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L3_DROPS \
"l3_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L3_EXCEPTIONS \
+ "l3_exceptions"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BUFFER_DROPS \
"buffer_drops"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_TUNNEL_DROPS \
"tunnel_drops"
-
-#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group, _metadata_cap) \
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_DROPS \
+ "acl_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_STP \
+ "stp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LACP \
+ "lacp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LLDP \
+ "lldp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_MC_SNOOPING \
+ "mc_snooping"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_DHCP \
+ "dhcp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_NEIGH_DISCOVERY \
+ "neigh_discovery"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BFD \
+ "bfd"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_OSPF \
+ "ospf"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BGP \
+ "bgp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_VRRP \
+ "vrrp"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PIM \
+ "pim"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_UC_LB \
+ "uc_loopback"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LOCAL_DELIVERY \
+ "local_delivery"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_EXTERNAL_DELIVERY \
+ "external_delivery"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_IPV6 \
+ "ipv6"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_EVENT \
+ "ptp_event"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_GENERAL \
+ "ptp_general"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_SAMPLE \
+ "acl_sample"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_TRAP \
+ "acl_trap"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PARSER_ERROR_DROPS \
+ "parser_error_drops"
+
+#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group_id, \
+ _metadata_cap) \
{ \
.type = DEVLINK_TRAP_TYPE_##_type, \
.init_action = DEVLINK_TRAP_ACTION_##_init_action, \
.generic = true, \
.id = DEVLINK_TRAP_GENERIC_ID_##_id, \
.name = DEVLINK_TRAP_GENERIC_NAME_##_id, \
- .group = _group, \
+ .init_group_id = _group_id, \
.metadata_cap = _metadata_cap, \
}
-#define DEVLINK_TRAP_DRIVER(_type, _init_action, _id, _name, _group, \
+#define DEVLINK_TRAP_DRIVER(_type, _init_action, _id, _name, _group_id, \
_metadata_cap) \
{ \
.type = DEVLINK_TRAP_TYPE_##_type, \
@@ -700,27 +1186,56 @@ enum devlink_trap_group_generic_id {
.generic = false, \
.id = _id, \
.name = _name, \
- .group = _group, \
+ .init_group_id = _group_id, \
.metadata_cap = _metadata_cap, \
}
-#define DEVLINK_TRAP_GROUP_GENERIC(_id) \
+#define DEVLINK_TRAP_GROUP_GENERIC(_id, _policer_id) \
{ \
.name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \
.id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \
.generic = true, \
+ .init_policer_id = _policer_id, \
+ }
+
+#define DEVLINK_TRAP_POLICER(_id, _rate, _burst, _max_rate, _min_rate, \
+ _max_burst, _min_burst) \
+ { \
+ .id = _id, \
+ .init_rate = _rate, \
+ .init_burst = _burst, \
+ .max_rate = _max_rate, \
+ .min_rate = _min_rate, \
+ .max_burst = _max_burst, \
+ .min_burst = _min_burst, \
}
+enum {
+ /* device supports reload operations */
+ DEVLINK_F_RELOAD = 1UL << 0,
+};
+
struct devlink_ops {
+ /**
+ * @supported_flash_update_params:
+ * mask of parameters supported by the driver's .flash_update
+ * implemementation.
+ */
+ u32 supported_flash_update_params;
+ unsigned long reload_actions;
+ unsigned long reload_limits;
int (*reload_down)(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack);
- int (*reload_up)(struct devlink *devlink,
+ int (*reload_up)(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
- int (*port_split)(struct devlink *devlink, unsigned int port_index,
+ int (*port_split)(struct devlink *devlink, struct devlink_port *port,
unsigned int count, struct netlink_ext_ack *extack);
- int (*port_unsplit)(struct devlink *devlink, unsigned int port_index,
+ int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port,
struct netlink_ext_ack *extack);
int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
@@ -772,8 +1287,15 @@ struct devlink_ops {
struct netlink_ext_ack *extack);
int (*info_get)(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack);
- int (*flash_update)(struct devlink *devlink, const char *file_name,
- const char *component,
+ /**
+ * @flash_update: Device flash update function
+ *
+ * Used to perform a flash update for the device. The set of
+ * parameters supported by the driver should be set in
+ * supported_flash_update_params.
+ */
+ int (*flash_update)(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack);
/**
* @trap_init: Trap initialization function.
@@ -798,7 +1320,8 @@ struct devlink_ops {
*/
int (*trap_action_set)(struct devlink *devlink,
const struct devlink_trap *trap,
- enum devlink_trap_action action);
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
/**
* @trap_group_init: Trap group initialization function.
*
@@ -807,50 +1330,250 @@ struct devlink_ops {
*/
int (*trap_group_init)(struct devlink *devlink,
const struct devlink_trap_group *group);
-};
-
-static inline void *devlink_priv(struct devlink *devlink)
-{
- BUG_ON(!devlink);
- return &devlink->priv;
-}
-
-static inline struct devlink *priv_to_devlink(void *priv)
-{
- BUG_ON(!priv);
- return container_of(priv, struct devlink, priv);
-}
+ /**
+ * @trap_group_set: Trap group parameters set function.
+ *
+ * Note: @policer can be NULL when a policer is being unbound from
+ * @group.
+ */
+ int (*trap_group_set)(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_group_action_set: Trap group action set function.
+ *
+ * If this callback is populated, it will take precedence over looping
+ * over all traps in a group and calling .trap_action_set().
+ */
+ int (*trap_group_action_set)(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_drop_counter_get: Trap drop counter get function.
+ *
+ * Should be used by device drivers to report number of packets
+ * that have been dropped, and cannot be passed to the devlink
+ * subsystem by the underlying device.
+ */
+ int (*trap_drop_counter_get)(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops);
+ /**
+ * @trap_policer_init: Trap policer initialization function.
+ *
+ * Should be used by device drivers to initialize the trap policer in
+ * the underlying device.
+ */
+ int (*trap_policer_init)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer);
+ /**
+ * @trap_policer_fini: Trap policer de-initialization function.
+ *
+ * Should be used by device drivers to de-initialize the trap policer
+ * in the underlying device.
+ */
+ void (*trap_policer_fini)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer);
+ /**
+ * @trap_policer_set: Trap policer parameters set function.
+ */
+ int (*trap_policer_set)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_policer_counter_get: Trap policer counter get function.
+ *
+ * Should be used by device drivers to report number of packets dropped
+ * by the policer.
+ */
+ int (*trap_policer_counter_get)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops);
+ /**
+ * @port_function_hw_addr_get: Port function's hardware address get function.
+ *
+ * Should be used by device drivers to report the hardware address of a function managed
+ * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
+ * function handling for a particular port.
+ *
+ * Note: @extack can be NULL when port notifier queries the port function.
+ */
+ int (*port_function_hw_addr_get)(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack);
+ /**
+ * @port_function_hw_addr_set: Port function's hardware address set function.
+ *
+ * Should be used by device drivers to set the hardware address of a function managed
+ * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
+ * function handling for a particular port.
+ */
+ int (*port_function_hw_addr_set)(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack);
+ /**
+ * port_new() - Add a new port function of a specified flavor
+ * @devlink: Devlink instance
+ * @attrs: attributes of the new port
+ * @extack: extack for reporting error messages
+ * @new_port_index: index of the new port
+ *
+ * Devlink core will call this device driver function upon user request
+ * to create a new port function of a specified flavor and optional
+ * attributes
+ *
+ * Notes:
+ * - Called without devlink instance lock being held. Drivers must
+ * implement own means of synchronization
+ * - On success, drivers must register a port with devlink core
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+ int (*port_new)(struct devlink *devlink,
+ const struct devlink_port_new_attrs *attrs,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index);
+ /**
+ * port_del() - Delete a port function
+ * @devlink: Devlink instance
+ * @port_index: port function index to delete
+ * @extack: extack for reporting error messages
+ *
+ * Devlink core will call this device driver function upon user request
+ * to delete a previously created port function
+ *
+ * Notes:
+ * - Called without devlink instance lock being held. Drivers must
+ * implement own means of synchronization
+ * - On success, drivers must unregister the corresponding devlink
+ * port
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+ int (*port_del)(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack);
+ /**
+ * port_fn_state_get() - Get the state of a port function
+ * @devlink: Devlink instance
+ * @port: The devlink port
+ * @state: Admin configured state
+ * @opstate: Current operational state
+ * @extack: extack for reporting error messages
+ *
+ * Reports the admin and operational state of a devlink port function
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+ int (*port_fn_state_get)(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+ /**
+ * port_fn_state_set() - Set the admin state of a port function
+ * @devlink: Devlink instance
+ * @port: The devlink port
+ * @state: Admin state
+ * @extack: extack for reporting error messages
+ *
+ * Set the admin state of a devlink port function
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+ int (*port_fn_state_set)(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
-static inline struct devlink_port *
-netdev_to_devlink_port(struct net_device *dev)
-{
- if (dev->netdev_ops->ndo_get_devlink_port)
- return dev->netdev_ops->ndo_get_devlink_port(dev);
- return NULL;
-}
+ /**
+ * Rate control callbacks.
+ */
+ int (*rate_leaf_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack);
+ int (*rate_leaf_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack);
+ int (*rate_node_new)(struct devlink_rate *rate_node, void **priv,
+ struct netlink_ext_ack *extack);
+ int (*rate_node_del)(struct devlink_rate *rate_node, void *priv,
+ struct netlink_ext_ack *extack);
+ int (*rate_leaf_parent_set)(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack);
+ int (*rate_node_parent_set)(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack);
+ /**
+ * selftests_check() - queries if selftest is supported
+ * @devlink: devlink instance
+ * @id: test index
+ * @extack: extack for reporting error messages
+ *
+ * Return: true if test is supported by the driver
+ */
+ bool (*selftest_check)(struct devlink *devlink, unsigned int id,
+ struct netlink_ext_ack *extack);
+ /**
+ * selftest_run() - Runs a selftest
+ * @devlink: devlink instance
+ * @id: test index
+ * @extack: extack for reporting error messages
+ *
+ * Return: status of the test
+ */
+ enum devlink_selftest_status
+ (*selftest_run)(struct devlink *devlink, unsigned int id,
+ struct netlink_ext_ack *extack);
+};
-static inline struct devlink *netdev_to_devlink(struct net_device *dev)
-{
- struct devlink_port *devlink_port = netdev_to_devlink_port(dev);
+void *devlink_priv(struct devlink *devlink);
+struct devlink *priv_to_devlink(void *priv);
+struct device *devlink_to_dev(const struct devlink *devlink);
- if (devlink_port)
- return devlink_port->devlink;
- return NULL;
-}
+/* Devlink instance explicit locking */
+void devl_lock(struct devlink *devlink);
+int devl_trylock(struct devlink *devlink);
+void devl_unlock(struct devlink *devlink);
+void devl_assert_locked(struct devlink *devlink);
+bool devl_lock_is_held(struct devlink *devlink);
struct ib_device;
struct net *devlink_net(const struct devlink *devlink);
-void devlink_net_set(struct devlink *devlink, struct net *net);
-struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
-int devlink_register(struct devlink *devlink, struct device *dev);
+/* This call is intended for software devices that can create
+ * devlink instances in other namespaces than init_net.
+ *
+ * Drivers that operate on real HW must use devlink_alloc() instead.
+ */
+struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
+ size_t priv_size, struct net *net,
+ struct device *dev);
+static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
+ size_t priv_size,
+ struct device *dev)
+{
+ return devlink_alloc_ns(ops, priv_size, &init_net, dev);
+}
+void devlink_set_features(struct devlink *devlink, u64 features);
+void devlink_register(struct devlink *devlink);
void devlink_unregister(struct devlink *devlink);
-void devlink_reload_enable(struct devlink *devlink);
-void devlink_reload_disable(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
+void devlink_port_init(struct devlink *devlink,
+ struct devlink_port *devlink_port);
+void devlink_port_fini(struct devlink_port *devlink_port);
+int devl_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index);
int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
unsigned int port_index);
+void devl_port_unregister(struct devlink_port *devlink_port);
void devlink_port_unregister(struct devlink_port *devlink_port);
void devlink_port_type_eth_set(struct devlink_port *devlink_port,
struct net_device *netdev);
@@ -858,32 +1581,50 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
struct ib_device *ibdev);
void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_attrs_set(struct devlink_port *devlink_port,
- enum devlink_port_flavour flavour,
- u32 port_number, bool split,
- u32 split_subport_number,
- const unsigned char *switch_id,
- unsigned char switch_id_len);
-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port,
- const unsigned char *switch_id,
- unsigned char switch_id_len, u16 pf);
-void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
- const unsigned char *switch_id,
- unsigned char switch_id_len,
- u16 pf, u16 vf);
+ struct devlink_port_attrs *devlink_port_attrs);
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, bool external);
+void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, u16 vf, bool external);
+void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
+ u32 controller, u16 pf, u32 sf,
+ bool external);
+int devl_rate_leaf_create(struct devlink_port *port, void *priv);
+void devl_rate_leaf_destroy(struct devlink_port *devlink_port);
+void devl_rate_nodes_destroy(struct devlink *devlink);
+void devlink_port_linecard_set(struct devlink_port *devlink_port,
+ struct devlink_linecard *linecard);
+struct devlink_linecard *
+devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+ const struct devlink_linecard_ops *ops, void *priv);
+void devlink_linecard_destroy(struct devlink_linecard *linecard);
+void devlink_linecard_provision_set(struct devlink_linecard *linecard,
+ const char *type);
+void devlink_linecard_provision_clear(struct devlink_linecard *linecard);
+void devlink_linecard_provision_fail(struct devlink_linecard *linecard);
+void devlink_linecard_activate(struct devlink_linecard *linecard);
+void devlink_linecard_deactivate(struct devlink_linecard *linecard);
+void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
+ struct devlink *nested_devlink);
+int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
u16 egress_tc_count);
+void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index);
void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
-int devlink_dpipe_table_register(struct devlink *devlink,
- const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
- void *priv, bool counter_control_extern);
-void devlink_dpipe_table_unregister(struct devlink *devlink,
- const char *table_name);
-int devlink_dpipe_headers_register(struct devlink *devlink,
- struct devlink_dpipe_headers *dpipe_headers);
-void devlink_dpipe_headers_unregister(struct devlink *devlink);
+int devl_dpipe_table_register(struct devlink *devlink,
+ const char *table_name,
+ struct devlink_dpipe_table_ops *table_ops,
+ void *priv, bool counter_control_extern);
+void devl_dpipe_table_unregister(struct devlink *devlink,
+ const char *table_name);
+void devl_dpipe_headers_register(struct devlink *devlink,
+ struct devlink_dpipe_headers *dpipe_headers);
+void devl_dpipe_headers_unregister(struct devlink *devlink);
bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
const char *table_name);
int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx);
@@ -899,24 +1640,40 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
+int devl_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
int devlink_resource_register(struct devlink *devlink,
const char *resource_name,
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
const struct devlink_resource_size_params *size_params);
-void devlink_resources_unregister(struct devlink *devlink,
- struct devlink_resource *resource);
-int devlink_resource_size_get(struct devlink *devlink,
- u64 resource_id,
- u64 *p_resource_size);
+void devl_resources_unregister(struct devlink *devlink);
+void devlink_resources_unregister(struct devlink *devlink);
+int devl_resource_size_get(struct devlink *devlink,
+ u64 resource_id,
+ u64 *p_resource_size);
+int devl_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units);
int devlink_dpipe_table_resource_set(struct devlink *devlink,
const char *table_name, u64 resource_id,
u64 resource_units);
+void devl_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
void devlink_resource_occ_get_register(struct devlink *devlink,
u64 resource_id,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv);
+void devl_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id);
+
void devlink_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
int devlink_params_register(struct devlink *devlink,
@@ -925,52 +1682,66 @@ int devlink_params_register(struct devlink *devlink,
void devlink_params_unregister(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
-void devlink_params_publish(struct devlink *devlink);
-void devlink_params_unpublish(struct devlink *devlink);
-int devlink_port_params_register(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
-void devlink_port_params_unregister(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
+int devlink_param_register(struct devlink *devlink,
+ const struct devlink_param *param);
+void devlink_param_unregister(struct devlink *devlink,
+ const struct devlink_param *param);
int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
union devlink_param_value *init_val);
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val);
-int
-devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value *init_val);
-int devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value init_val);
void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
-void devlink_port_param_value_changed(struct devlink_port *devlink_port,
- u32 param_id);
-void devlink_param_value_str_fill(union devlink_param_value *dst_val,
- const char *src);
-struct devlink_region *devlink_region_create(struct devlink *devlink,
- const char *region_name,
- u32 region_max_snapshots,
- u64 region_size);
+struct devlink_region *devl_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots,
+ u64 region_size);
+struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+devlink_port_region_create(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+void devl_region_destroy(struct devlink_region *region);
void devlink_region_destroy(struct devlink_region *region);
-u32 devlink_region_snapshot_id_get(struct devlink *devlink);
+void devlink_port_region_destroy(struct devlink_region *region);
+
+int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id);
+void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id);
int devlink_region_snapshot_create(struct devlink_region *region,
- u8 *data, u32 snapshot_id,
- devlink_snapshot_data_dest_t *data_destructor);
+ u8 *data, u32 snapshot_id);
int devlink_info_serial_number_put(struct devlink_info_req *req,
const char *sn);
int devlink_info_driver_name_put(struct devlink_info_req *req,
const char *name);
+int devlink_info_board_serial_number_put(struct devlink_info_req *req,
+ const char *bsn);
+
+enum devlink_info_version_type {
+ DEVLINK_INFO_VERSION_TYPE_NONE,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT, /* May be used as flash update
+ * component by name.
+ */
+};
+
int devlink_info_version_fixed_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
int devlink_info_version_stored_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
+int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type);
int devlink_info_version_running_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
+int devlink_info_version_running_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type);
int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg);
int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg);
@@ -981,12 +1752,14 @@ int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg);
int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
const char *name);
int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg);
+int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name);
+int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg);
-int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value);
-int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
-int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
+int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
bool value);
@@ -1004,11 +1777,19 @@ int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, bool auto_recover,
- void *priv);
+ u64 graceful_period, void *priv);
+
+struct devlink_health_reporter *
+devlink_port_health_reporter_create(struct devlink_port *port,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
void
devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
+void
+devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter);
+
void *
devlink_health_reporter_priv(struct devlink_health_reporter *reporter);
int devlink_health_report(struct devlink_health_reporter *reporter,
@@ -1020,31 +1801,65 @@ void
devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
bool devlink_is_reload_failed(const struct devlink *devlink);
+void devlink_remote_reload_actions_performed(struct devlink *devlink,
+ enum devlink_reload_limit limit,
+ u32 actions_performed);
-void devlink_flash_update_begin_notify(struct devlink *devlink);
-void devlink_flash_update_end_notify(struct devlink *devlink);
void devlink_flash_update_status_notify(struct devlink *devlink,
const char *status_msg,
const char *component,
unsigned long done,
unsigned long total);
-
+void devlink_flash_update_timeout_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long timeout);
+
+int devl_traps_register(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count, void *priv);
int devlink_traps_register(struct devlink *devlink,
const struct devlink_trap *traps,
size_t traps_count, void *priv);
+void devl_traps_unregister(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count);
void devlink_traps_unregister(struct devlink *devlink,
const struct devlink_trap *traps,
size_t traps_count);
-void devlink_trap_report(struct devlink *devlink,
- struct sk_buff *skb, void *trap_ctx,
- struct devlink_port *in_devlink_port);
+void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
+ void *trap_ctx, struct devlink_port *in_devlink_port,
+ const struct flow_action_cookie *fa_cookie);
void *devlink_trap_ctx_priv(void *trap_ctx);
+int devl_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+int devlink_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+void devl_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+void devlink_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+int
+devl_trap_policers_register(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count);
+void
+devl_trap_policers_unregister(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count);
#if IS_ENABLED(CONFIG_NET_DEVLINK)
-void devlink_compat_running_version(struct net_device *dev,
+struct devlink *__must_check devlink_try_get(struct devlink *devlink);
+void devlink_put(struct devlink *devlink);
+
+void devlink_compat_running_version(struct devlink *devlink,
char *buf, size_t len);
-int devlink_compat_flash_update(struct net_device *dev, const char *file_name);
+int devlink_compat_flash_update(struct devlink *devlink, const char *file_name);
int devlink_compat_phys_port_name_get(struct net_device *dev,
char *name, size_t len);
int devlink_compat_switch_id_get(struct net_device *dev,
@@ -1052,13 +1867,22 @@ int devlink_compat_switch_id_get(struct net_device *dev,
#else
+static inline struct devlink *devlink_try_get(struct devlink *devlink)
+{
+ return NULL;
+}
+
+static inline void devlink_put(struct devlink *devlink)
+{
+}
+
static inline void
-devlink_compat_running_version(struct net_device *dev, char *buf, size_t len)
+devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len)
{
}
static inline int
-devlink_compat_flash_update(struct net_device *dev, const char *file_name)
+devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
{
return -EOPNOTSUPP;
}
diff --git a/include/net/dn.h b/include/net/dn.h
deleted file mode 100644
index 56ab0726c641..000000000000
--- a/include/net/dn.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_H
-#define _NET_DN_H
-
-#include <linux/dn.h>
-#include <net/sock.h>
-#include <net/flow.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-
-struct dn_scp /* Session Control Port */
-{
- unsigned char state;
-#define DN_O 1 /* Open */
-#define DN_CR 2 /* Connect Receive */
-#define DN_DR 3 /* Disconnect Reject */
-#define DN_DRC 4 /* Discon. Rej. Complete*/
-#define DN_CC 5 /* Connect Confirm */
-#define DN_CI 6 /* Connect Initiate */
-#define DN_NR 7 /* No resources */
-#define DN_NC 8 /* No communication */
-#define DN_CD 9 /* Connect Delivery */
-#define DN_RJ 10 /* Rejected */
-#define DN_RUN 11 /* Running */
-#define DN_DI 12 /* Disconnect Initiate */
-#define DN_DIC 13 /* Disconnect Complete */
-#define DN_DN 14 /* Disconnect Notificat */
-#define DN_CL 15 /* Closed */
-#define DN_CN 16 /* Closed Notification */
-
- __le16 addrloc;
- __le16 addrrem;
- __u16 numdat;
- __u16 numoth;
- __u16 numoth_rcv;
- __u16 numdat_rcv;
- __u16 ackxmt_dat;
- __u16 ackxmt_oth;
- __u16 ackrcv_dat;
- __u16 ackrcv_oth;
- __u8 flowrem_sw;
- __u8 flowloc_sw;
-#define DN_SEND 2
-#define DN_DONTSEND 1
-#define DN_NOCHANGE 0
- __u16 flowrem_dat;
- __u16 flowrem_oth;
- __u16 flowloc_dat;
- __u16 flowloc_oth;
- __u8 services_rem;
- __u8 services_loc;
- __u8 info_rem;
- __u8 info_loc;
-
- __u16 segsize_rem;
- __u16 segsize_loc;
-
- __u8 nonagle;
- __u8 multi_ireq;
- __u8 accept_mode;
- unsigned long seg_total; /* Running total of current segment */
-
- struct optdata_dn conndata_in;
- struct optdata_dn conndata_out;
- struct optdata_dn discdata_in;
- struct optdata_dn discdata_out;
- struct accessdata_dn accessdata;
-
- struct sockaddr_dn addr; /* Local address */
- struct sockaddr_dn peer; /* Remote address */
-
- /*
- * In this case the RTT estimation is not specified in the
- * docs, nor is any back off algorithm. Here we follow well
- * known tcp algorithms with a few small variations.
- *
- * snd_window: Max number of packets we send before we wait for
- * an ack to come back. This will become part of a
- * more complicated scheme when we support flow
- * control.
- *
- * nsp_srtt: Round-Trip-Time (x8) in jiffies. This is a rolling
- * average.
- * nsp_rttvar: Round-Trip-Time-Varience (x4) in jiffies. This is the
- * varience of the smoothed average (but calculated in
- * a simpler way than for normal statistical varience
- * calculations).
- *
- * nsp_rxtshift: Backoff counter. Value is zero normally, each time
- * a packet is lost is increases by one until an ack
- * is received. Its used to index an array of backoff
- * multipliers.
- */
-#define NSP_MIN_WINDOW 1
-#define NSP_MAX_WINDOW (0x07fe)
- unsigned long max_window;
- unsigned long snd_window;
-#define NSP_INITIAL_SRTT (HZ)
- unsigned long nsp_srtt;
-#define NSP_INITIAL_RTTVAR (HZ*3)
- unsigned long nsp_rttvar;
-#define NSP_MAXRXTSHIFT 12
- unsigned long nsp_rxtshift;
-
- /*
- * Output queues, one for data, one for otherdata/linkservice
- */
- struct sk_buff_head data_xmit_queue;
- struct sk_buff_head other_xmit_queue;
-
- /*
- * Input queue for other data
- */
- struct sk_buff_head other_receive_queue;
- int other_report;
-
- /*
- * Stuff to do with the slow timer
- */
- unsigned long stamp; /* time of last transmit */
- unsigned long persist;
- int (*persist_fxn)(struct sock *sk);
- unsigned long keepalive;
- void (*keepalive_fxn)(struct sock *sk);
-
-};
-
-static inline struct dn_scp *DN_SK(struct sock *sk)
-{
- return (struct dn_scp *)(sk + 1);
-}
-
-/*
- * src,dst : Source and Destination DECnet addresses
- * hops : Number of hops through the network
- * dst_port, src_port : NSP port numbers
- * services, info : Useful data extracted from conninit messages
- * rt_flags : Routing flags byte
- * nsp_flags : NSP layer flags byte
- * segsize : Size of segment
- * segnum : Number, for data, otherdata and linkservice
- * xmit_count : Number of times we've transmitted this skb
- * stamp : Time stamp of most recent transmission, used in RTT calculations
- * iif: Input interface number
- *
- * As a general policy, this structure keeps all addresses in network
- * byte order, and all else in host byte order. Thus dst, src, dst_port
- * and src_port are in network order. All else is in host order.
- *
- */
-#define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb)
-struct dn_skb_cb {
- __le16 dst;
- __le16 src;
- __u16 hops;
- __le16 dst_port;
- __le16 src_port;
- __u8 services;
- __u8 info;
- __u8 rt_flags;
- __u8 nsp_flags;
- __u16 segsize;
- __u16 segnum;
- __u16 xmit_count;
- unsigned long stamp;
- int iif;
-};
-
-static inline __le16 dn_eth2dn(unsigned char *ethaddr)
-{
- return get_unaligned((__le16 *)(ethaddr + 4));
-}
-
-static inline __le16 dn_saddr2dn(struct sockaddr_dn *saddr)
-{
- return *(__le16 *)saddr->sdn_nodeaddr;
-}
-
-static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr)
-{
- __u16 a = le16_to_cpu(addr);
- ethaddr[0] = 0xAA;
- ethaddr[1] = 0x00;
- ethaddr[2] = 0x04;
- ethaddr[3] = 0x00;
- ethaddr[4] = (__u8)(a & 0xff);
- ethaddr[5] = (__u8)(a >> 8);
-}
-
-static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
-{
- fld->fld_sport = scp->addrloc;
- fld->fld_dport = scp->addrrem;
-}
-
-unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
-void dn_register_sysctl(void);
-void dn_unregister_sysctl(void);
-
-#define DN_MENUVER_ACC 0x01
-#define DN_MENUVER_USR 0x02
-#define DN_MENUVER_PRX 0x04
-#define DN_MENUVER_UIC 0x08
-
-struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
-struct sock *dn_find_by_skb(struct sk_buff *skb);
-#define DN_ASCBUF_LEN 9
-char *dn_addr2asc(__u16, char *);
-int dn_destroy_timer(struct sock *sk);
-
-int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf,
- unsigned char type);
-int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr,
- unsigned char *type);
-
-void dn_start_slow_timer(struct sock *sk);
-void dn_stop_slow_timer(struct sock *sk);
-
-extern __le16 decnet_address;
-extern int decnet_debug_level;
-extern int decnet_time_wait;
-extern int decnet_dn_count;
-extern int decnet_di_count;
-extern int decnet_dr_count;
-extern int decnet_no_fc_max_cwnd;
-
-extern long sysctl_decnet_mem[3];
-extern int sysctl_decnet_wmem[3];
-extern int sysctl_decnet_rmem[3];
-
-#endif /* _NET_DN_H */
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
deleted file mode 100644
index 595b4f6c1eb1..000000000000
--- a/include/net/dn_dev.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_DEV_H
-#define _NET_DN_DEV_H
-
-
-struct dn_dev;
-
-struct dn_ifaddr {
- struct dn_ifaddr __rcu *ifa_next;
- struct dn_dev *ifa_dev;
- __le16 ifa_local;
- __le16 ifa_address;
- __u32 ifa_flags;
- __u8 ifa_scope;
- char ifa_label[IFNAMSIZ];
- struct rcu_head rcu;
-};
-
-#define DN_DEV_S_RU 0 /* Run - working normally */
-#define DN_DEV_S_CR 1 /* Circuit Rejected */
-#define DN_DEV_S_DS 2 /* Data Link Start */
-#define DN_DEV_S_RI 3 /* Routing Layer Initialize */
-#define DN_DEV_S_RV 4 /* Routing Layer Verify */
-#define DN_DEV_S_RC 5 /* Routing Layer Complete */
-#define DN_DEV_S_OF 6 /* Off */
-#define DN_DEV_S_HA 7 /* Halt */
-
-
-/*
- * The dn_dev_parms structure contains the set of parameters
- * for each device (hence inclusion in the dn_dev structure)
- * and an array is used to store the default types of supported
- * device (in dn_dev.c).
- *
- * The type field matches the ARPHRD_ constants and is used in
- * searching the list for supported devices when new devices
- * come up.
- *
- * The mode field is used to find out if a device is broadcast,
- * multipoint, or pointopoint. Please note that DECnet thinks
- * different ways about devices to the rest of the kernel
- * so the normal IFF_xxx flags are invalid here. For devices
- * which can be any combination of the previously mentioned
- * attributes, you can set this on a per device basis by
- * installing an up() routine.
- *
- * The device state field, defines the initial state in which the
- * device will come up. In the dn_dev structure, it is the actual
- * state.
- *
- * Things have changed here. I've killed timer1 since it's a user space
- * issue for a user space routing deamon to sort out. The kernel does
- * not need to be bothered with it.
- *
- * Timers:
- * t2 - Rate limit timer, min time between routing and hello messages
- * t3 - Hello timer, send hello messages when it expires
- *
- * Callbacks:
- * up() - Called to initialize device, return value can veto use of
- * device with DECnet.
- * down() - Called to turn device off when it goes down
- * timer3() - Called once for each ifaddr when timer 3 goes off
- *
- * sysctl - Hook for sysctl things
- *
- */
-struct dn_dev_parms {
- int type; /* ARPHRD_xxx */
- int mode; /* Broadcast, Unicast, Mulitpoint */
-#define DN_DEV_BCAST 1
-#define DN_DEV_UCAST 2
-#define DN_DEV_MPOINT 4
- int state; /* Initial state */
- int forwarding; /* 0=EndNode, 1=L1Router, 2=L2Router */
- unsigned long t2; /* Default value of t2 */
- unsigned long t3; /* Default value of t3 */
- int priority; /* Priority to be a router */
- char *name; /* Name for sysctl */
- int (*up)(struct net_device *);
- void (*down)(struct net_device *);
- void (*timer3)(struct net_device *, struct dn_ifaddr *ifa);
- void *sysctl;
-};
-
-
-struct dn_dev {
- struct dn_ifaddr __rcu *ifa_list;
- struct net_device *dev;
- struct dn_dev_parms parms;
- char use_long;
- struct timer_list timer;
- unsigned long t3;
- struct neigh_parms *neigh_parms;
- __u8 addr[ETH_ALEN];
- struct neighbour *router; /* Default router on circuit */
- struct neighbour *peer; /* Peer on pointopoint links */
- unsigned long uptime; /* Time device went up in jiffies */
-};
-
-struct dn_short_packet {
- __u8 msgflg;
- __le16 dstnode;
- __le16 srcnode;
- __u8 forward;
-} __packed;
-
-struct dn_long_packet {
- __u8 msgflg;
- __u8 d_area;
- __u8 d_subarea;
- __u8 d_id[6];
- __u8 s_area;
- __u8 s_subarea;
- __u8 s_id[6];
- __u8 nl2;
- __u8 visit_ct;
- __u8 s_class;
- __u8 pt;
-} __packed;
-
-/*------------------------- DRP - Routing messages ---------------------*/
-
-struct endnode_hello_message {
- __u8 msgflg;
- __u8 tiver[3];
- __u8 id[6];
- __u8 iinfo;
- __le16 blksize;
- __u8 area;
- __u8 seed[8];
- __u8 neighbor[6];
- __le16 timer;
- __u8 mpd;
- __u8 datalen;
- __u8 data[2];
-} __packed;
-
-struct rtnode_hello_message {
- __u8 msgflg;
- __u8 tiver[3];
- __u8 id[6];
- __u8 iinfo;
- __le16 blksize;
- __u8 priority;
- __u8 area;
- __le16 timer;
- __u8 mpd;
-} __packed;
-
-
-void dn_dev_init(void);
-void dn_dev_cleanup(void);
-
-int dn_dev_ioctl(unsigned int cmd, void __user *arg);
-
-void dn_dev_devices_off(void);
-void dn_dev_devices_on(void);
-
-void dn_dev_init_pkt(struct sk_buff *skb);
-void dn_dev_veri_pkt(struct sk_buff *skb);
-void dn_dev_hello(struct sk_buff *skb);
-
-void dn_dev_up(struct net_device *);
-void dn_dev_down(struct net_device *);
-
-int dn_dev_set_default(struct net_device *dev, int force);
-struct net_device *dn_dev_get_default(void);
-int dn_dev_bind_default(__le16 *addr);
-
-int register_dnaddr_notifier(struct notifier_block *nb);
-int unregister_dnaddr_notifier(struct notifier_block *nb);
-
-static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
-{
- struct dn_dev *dn_db;
- struct dn_ifaddr *ifa;
- int res = 0;
-
- rcu_read_lock();
- dn_db = rcu_dereference(dev->dn_ptr);
- if (dn_db == NULL) {
- printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
- goto out;
- }
-
- for (ifa = rcu_dereference(dn_db->ifa_list);
- ifa != NULL;
- ifa = rcu_dereference(ifa->ifa_next))
- if ((addr ^ ifa->ifa_local) == 0) {
- res = 1;
- break;
- }
-out:
- rcu_read_unlock();
- return res;
-}
-
-#endif /* _NET_DN_DEV_H */
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
deleted file mode 100644
index 6dd2213c5eb2..000000000000
--- a/include/net/dn_fib.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_FIB_H
-#define _NET_DN_FIB_H
-
-#include <linux/netlink.h>
-#include <linux/refcount.h>
-
-extern const struct nla_policy rtm_dn_policy[];
-
-struct dn_fib_res {
- struct fib_rule *r;
- struct dn_fib_info *fi;
- unsigned char prefixlen;
- unsigned char nh_sel;
- unsigned char type;
- unsigned char scope;
-};
-
-struct dn_fib_nh {
- struct net_device *nh_dev;
- unsigned int nh_flags;
- unsigned char nh_scope;
- int nh_weight;
- int nh_power;
- int nh_oif;
- __le16 nh_gw;
-};
-
-struct dn_fib_info {
- struct dn_fib_info *fib_next;
- struct dn_fib_info *fib_prev;
- int fib_treeref;
- refcount_t fib_clntref;
- int fib_dead;
- unsigned int fib_flags;
- int fib_protocol;
- __le16 fib_prefsrc;
- __u32 fib_priority;
- __u32 fib_metrics[RTAX_MAX];
- int fib_nhs;
- int fib_power;
- struct dn_fib_nh fib_nh[0];
-#define dn_fib_dev fib_nh[0].nh_dev
-};
-
-
-#define DN_FIB_RES_RESET(res) ((res).nh_sel = 0)
-#define DN_FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
-
-#define DN_FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __dn_fib_res_prefsrc(&res))
-#define DN_FIB_RES_GW(res) (DN_FIB_RES_NH(res).nh_gw)
-#define DN_FIB_RES_DEV(res) (DN_FIB_RES_NH(res).nh_dev)
-#define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif)
-
-typedef struct {
- __le16 datum;
-} dn_fib_key_t;
-
-typedef struct {
- __le16 datum;
-} dn_fib_hash_t;
-
-typedef struct {
- __u16 datum;
-} dn_fib_idx_t;
-
-struct dn_fib_node {
- struct dn_fib_node *fn_next;
- struct dn_fib_info *fn_info;
-#define DN_FIB_INFO(f) ((f)->fn_info)
- dn_fib_key_t fn_key;
- u8 fn_type;
- u8 fn_scope;
- u8 fn_state;
-};
-
-
-struct dn_fib_table {
- struct hlist_node hlist;
- u32 n;
-
- int (*insert)(struct dn_fib_table *t, struct rtmsg *r,
- struct nlattr *attrs[], struct nlmsghdr *n,
- struct netlink_skb_parms *req);
- int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
- struct nlattr *attrs[], struct nlmsghdr *n,
- struct netlink_skb_parms *req);
- int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld,
- struct dn_fib_res *res);
- int (*flush)(struct dn_fib_table *t);
- int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
-
- unsigned char data[0];
-};
-
-#ifdef CONFIG_DECNET_ROUTER
-/*
- * dn_fib.c
- */
-void dn_fib_init(void);
-void dn_fib_cleanup(void);
-
-int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
- struct nlattr *attrs[],
- const struct nlmsghdr *nlh, int *errp);
-int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
- const struct flowidn *fld, struct dn_fib_res *res);
-void dn_fib_release_info(struct dn_fib_info *fi);
-void dn_fib_flush(void);
-void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res);
-
-/*
- * dn_tables.c
- */
-struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
-struct dn_fib_table *dn_fib_empty_table(void);
-void dn_fib_table_init(void);
-void dn_fib_table_cleanup(void);
-
-/*
- * dn_rules.c
- */
-void dn_fib_rules_init(void);
-void dn_fib_rules_cleanup(void);
-unsigned int dnet_addr_type(__le16 addr);
-int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
-
-int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
-
-void dn_fib_free_info(struct dn_fib_info *fi);
-
-static inline void dn_fib_info_put(struct dn_fib_info *fi)
-{
- if (refcount_dec_and_test(&fi->fib_clntref))
- dn_fib_free_info(fi);
-}
-
-static inline void dn_fib_res_put(struct dn_fib_res *res)
-{
- if (res->fi)
- dn_fib_info_put(res->fi);
- if (res->r)
- fib_rule_put(res->r);
-}
-
-#else /* Endnode */
-
-#define dn_fib_init() do { } while(0)
-#define dn_fib_cleanup() do { } while(0)
-
-#define dn_fib_lookup(fl, res) (-ESRCH)
-#define dn_fib_info_put(fi) do { } while(0)
-#define dn_fib_select_multipath(fl, res) do { } while(0)
-#define dn_fib_rules_policy(saddr,res,flags) (0)
-#define dn_fib_res_put(res) do { } while(0)
-
-#endif /* CONFIG_DECNET_ROUTER */
-
-static inline __le16 dnet_make_mask(int n)
-{
- if (n)
- return cpu_to_le16(~((1 << (16 - n)) - 1));
- return cpu_to_le16(0);
-}
-
-#endif /* _NET_DN_FIB_H */
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
deleted file mode 100644
index 2e3e7793973a..000000000000
--- a/include/net/dn_neigh.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_NEIGH_H
-#define _NET_DN_NEIGH_H
-
-/*
- * The position of the first two fields of
- * this structure are critical - SJW
- */
-struct dn_neigh {
- struct neighbour n;
- __le16 addr;
- unsigned long flags;
-#define DN_NDFLAG_R1 0x0001 /* Router L1 */
-#define DN_NDFLAG_R2 0x0002 /* Router L2 */
-#define DN_NDFLAG_P3 0x0004 /* Phase III Node */
- unsigned long blksize;
- __u8 priority;
-};
-
-void dn_neigh_init(void);
-void dn_neigh_cleanup(void);
-int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
-int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
-void dn_neigh_pointopoint_hello(struct sk_buff *skb);
-int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
-int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-
-extern struct neigh_table dn_neigh_table;
-
-#endif /* _NET_DN_NEIGH_H */
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
deleted file mode 100644
index f83932b864a9..000000000000
--- a/include/net/dn_nsp.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_DN_NSP_H
-#define _NET_DN_NSP_H
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-/* dn_nsp.c functions prototyping */
-
-void dn_nsp_send_data_ack(struct sock *sk);
-void dn_nsp_send_oth_ack(struct sock *sk);
-void dn_send_conn_ack(struct sock *sk);
-void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
-void dn_nsp_send_disc(struct sock *sk, unsigned char type,
- unsigned short reason, gfp_t gfp);
-void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
- unsigned short reason);
-void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
-void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
-
-void dn_nsp_output(struct sock *sk);
-int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
- struct sk_buff_head *q, unsigned short acknum);
-void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
- int oob);
-unsigned long dn_nsp_persist(struct sock *sk);
-int dn_nsp_xmit_timeout(struct sock *sk);
-
-int dn_nsp_rx(struct sk_buff *);
-int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
- long timeo, int *err);
-
-#define NSP_REASON_OK 0 /* No error */
-#define NSP_REASON_NR 1 /* No resources */
-#define NSP_REASON_UN 2 /* Unrecognised node name */
-#define NSP_REASON_SD 3 /* Node shutting down */
-#define NSP_REASON_ID 4 /* Invalid destination end user */
-#define NSP_REASON_ER 5 /* End user lacks resources */
-#define NSP_REASON_OB 6 /* Object too busy */
-#define NSP_REASON_US 7 /* Unspecified error */
-#define NSP_REASON_TP 8 /* Third-Party abort */
-#define NSP_REASON_EA 9 /* End user has aborted the link */
-#define NSP_REASON_IF 10 /* Invalid node name format */
-#define NSP_REASON_LS 11 /* Local node shutdown */
-#define NSP_REASON_LL 32 /* Node lacks logical-link resources */
-#define NSP_REASON_LE 33 /* End user lacks logical-link resources */
-#define NSP_REASON_UR 34 /* Unacceptable RQSTRID or PASSWORD field */
-#define NSP_REASON_UA 36 /* Unacceptable ACCOUNT field */
-#define NSP_REASON_TM 38 /* End user timed out logical link */
-#define NSP_REASON_NU 39 /* Node unreachable */
-#define NSP_REASON_NL 41 /* No-link message */
-#define NSP_REASON_DC 42 /* Disconnect confirm */
-#define NSP_REASON_IO 43 /* Image data field overflow */
-
-#define NSP_DISCINIT 0x38
-#define NSP_DISCCONF 0x48
-
-/*------------------------- NSP - messages ------------------------------*/
-/* Data Messages */
-/*---------------*/
-
-/* Data Messages (data segment/interrupt/link service) */
-
-struct nsp_data_seg_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
-} __packed;
-
-struct nsp_data_opt_msg {
- __le16 acknum;
- __le16 segnum;
- __le16 lsflgs;
-} __packed;
-
-struct nsp_data_opt_msg1 {
- __le16 acknum;
- __le16 segnum;
-} __packed;
-
-
-/* Acknowledgment Message (data/other data) */
-struct nsp_data_ack_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
- __le16 acknum;
-} __packed;
-
-/* Connect Acknowledgment Message */
-struct nsp_conn_ack_msg {
- __u8 msgflg;
- __le16 dstaddr;
-} __packed;
-
-
-/* Connect Initiate/Retransmit Initiate/Connect Confirm */
-struct nsp_conn_init_msg {
- __u8 msgflg;
-#define NSP_CI 0x18 /* Connect Initiate */
-#define NSP_RCI 0x68 /* Retrans. Conn Init */
- __le16 dstaddr;
- __le16 srcaddr;
- __u8 services;
-#define NSP_FC_NONE 0x00 /* Flow Control None */
-#define NSP_FC_SRC 0x04 /* Seg Req. Count */
-#define NSP_FC_SCMC 0x08 /* Sess. Control Mess */
-#define NSP_FC_MASK 0x0c /* FC type mask */
- __u8 info;
- __le16 segsize;
-} __packed;
-
-/* Disconnect Initiate/Disconnect Confirm */
-struct nsp_disconn_init_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
- __le16 reason;
-} __packed;
-
-
-
-struct srcobj_fmt {
- __u8 format;
- __u8 task;
- __le16 grpcode;
- __le16 usrcode;
- __u8 dlen;
-} __packed;
-
-/*
- * A collection of functions for manipulating the sequence
- * numbers used in NSP. Similar in operation to the functions
- * of the same name in TCP.
- */
-static __inline__ int dn_before(__u16 seq1, __u16 seq2)
-{
- seq1 &= 0x0fff;
- seq2 &= 0x0fff;
-
- return (int)((seq1 - seq2) & 0x0fff) > 2048;
-}
-
-
-static __inline__ int dn_after(__u16 seq1, __u16 seq2)
-{
- seq1 &= 0x0fff;
- seq2 &= 0x0fff;
-
- return (int)((seq2 - seq1) & 0x0fff) > 2048;
-}
-
-static __inline__ int dn_equal(__u16 seq1, __u16 seq2)
-{
- return ((seq1 ^ seq2) & 0x0fff) == 0;
-}
-
-static __inline__ int dn_before_or_equal(__u16 seq1, __u16 seq2)
-{
- return (dn_before(seq1, seq2) || dn_equal(seq1, seq2));
-}
-
-static __inline__ void seq_add(__u16 *seq, __u16 off)
-{
- (*seq) += off;
- (*seq) &= 0x0fff;
-}
-
-static __inline__ int seq_next(__u16 seq1, __u16 seq2)
-{
- return dn_equal(seq1 + 1, seq2);
-}
-
-/*
- * Can we delay the ack ?
- */
-static __inline__ int sendack(__u16 seq)
-{
- return (int)((seq & 0x1000) ? 0 : 1);
-}
-
-/*
- * Is socket congested ?
- */
-static __inline__ int dn_congested(struct sock *sk)
-{
- return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
-}
-
-#define DN_MAX_NSP_DATA_HEADER (11)
-
-#endif /* _NET_DN_NSP_H */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
deleted file mode 100644
index 6f1e94ac0bdf..000000000000
--- a/include/net/dn_route.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_DN_ROUTE_H
-#define _NET_DN_ROUTE_H
-
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
- struct sock *sk, int flags);
-int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-void dn_rt_cache_flush(int delay);
-int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev);
-
-/* Masks for flags field */
-#define DN_RT_F_PID 0x07 /* Mask for packet type */
-#define DN_RT_F_PF 0x80 /* Padding Follows */
-#define DN_RT_F_VER 0x40 /* Version =0 discard packet if ==1 */
-#define DN_RT_F_IE 0x20 /* Intra Ethernet, Reserved in short pkt */
-#define DN_RT_F_RTS 0x10 /* Packet is being returned to sender */
-#define DN_RT_F_RQR 0x08 /* Return packet to sender upon non-delivery */
-
-/* Mask for types of routing packets */
-#define DN_RT_PKT_MSK 0x06
-/* Types of routing packets */
-#define DN_RT_PKT_SHORT 0x02 /* Short routing packet */
-#define DN_RT_PKT_LONG 0x06 /* Long routing packet */
-
-/* Mask for control/routing selection */
-#define DN_RT_PKT_CNTL 0x01 /* Set to 1 if a control packet */
-/* Types of control packets */
-#define DN_RT_CNTL_MSK 0x0f /* Mask for control packets */
-#define DN_RT_PKT_INIT 0x01 /* Initialisation packet */
-#define DN_RT_PKT_VERI 0x03 /* Verification Message */
-#define DN_RT_PKT_HELO 0x05 /* Hello and Test Message */
-#define DN_RT_PKT_L1RT 0x07 /* Level 1 Routing Message */
-#define DN_RT_PKT_L2RT 0x09 /* Level 2 Routing Message */
-#define DN_RT_PKT_ERTH 0x0b /* Ethernet Router Hello */
-#define DN_RT_PKT_EEDH 0x0d /* Ethernet EndNode Hello */
-
-/* Values for info field in hello message */
-#define DN_RT_INFO_TYPE 0x03 /* Type mask */
-#define DN_RT_INFO_L1RT 0x02 /* L1 Router */
-#define DN_RT_INFO_L2RT 0x01 /* L2 Router */
-#define DN_RT_INFO_ENDN 0x03 /* EndNode */
-#define DN_RT_INFO_VERI 0x04 /* Verification Reqd. */
-#define DN_RT_INFO_RJCT 0x08 /* Reject Flag, Reserved */
-#define DN_RT_INFO_VFLD 0x10 /* Verification Failed, Reserved */
-#define DN_RT_INFO_NOML 0x20 /* No Multicast traffic accepted */
-#define DN_RT_INFO_BLKR 0x40 /* Blocking Requested */
-
-/*
- * The fl structure is what we used to look up the route.
- * The rt_saddr & rt_daddr entries are the same as key.saddr & key.daddr
- * except for local input routes, where the rt_saddr = fl.fld_dst and
- * rt_daddr = fl.fld_src to allow the route to be used for returning
- * packets to the originating host.
- */
-struct dn_route {
- struct dst_entry dst;
- struct dn_route __rcu *dn_next;
-
- struct neighbour *n;
-
- struct flowidn fld;
-
- __le16 rt_saddr;
- __le16 rt_daddr;
- __le16 rt_gateway;
- __le16 rt_local_src; /* Source used for forwarding packets */
- __le16 rt_src_map;
- __le16 rt_dst_map;
-
- unsigned int rt_flags;
- unsigned int rt_type;
-};
-
-static inline bool dn_is_input_route(struct dn_route *rt)
-{
- return rt->fld.flowidn_iif != 0;
-}
-
-static inline bool dn_is_output_route(struct dn_route *rt)
-{
- return rt->fld.flowidn_iif == 0;
-}
-
-void dn_route_init(void);
-void dn_route_cleanup(void);
-
-#include <net/sock.h>
-#include <linux/if_arp.h>
-
-static inline void dn_rt_send(struct sk_buff *skb)
-{
- dev_queue_xmit(skb);
-}
-
-static inline void dn_rt_finish_output(struct sk_buff *skb, char *dst, char *src)
-{
- struct net_device *dev = skb->dev;
-
- if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
- dst = NULL;
-
- if (dev_hard_header(skb, dev, ETH_P_DNA_RT, dst, src, skb->len) >= 0)
- dn_rt_send(skb);
- else
- kfree_skb(skb);
-}
-
-#endif /* _NET_DN_ROUTE_H */
diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h
deleted file mode 100644
index 2ab668461463..000000000000
--- a/include/net/drop_monitor.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef _NET_DROP_MONITOR_H_
-#define _NET_DROP_MONITOR_H_
-
-#include <linux/ktime.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-
-/**
- * struct net_dm_hw_metadata - Hardware-supplied packet metadata.
- * @trap_group_name: Hardware trap group name.
- * @trap_name: Hardware trap name.
- * @input_dev: Input netdevice.
- */
-struct net_dm_hw_metadata {
- const char *trap_group_name;
- const char *trap_name;
- struct net_device *input_dev;
-};
-
-#if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
-void net_dm_hw_report(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata);
-#else
-static inline void
-net_dm_hw_report(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata)
-{
-}
-#endif
-
-#endif /* _NET_DROP_MONITOR_H_ */
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
new file mode 100644
index 000000000000..c1cbcdbaf149
--- /dev/null
+++ b/include/net/dropreason.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LINUX_DROPREASON_H
+#define _LINUX_DROPREASON_H
+
+#define DEFINE_DROP_REASON(FN, FNe) \
+ FN(NOT_SPECIFIED) \
+ FN(NO_SOCKET) \
+ FN(PKT_TOO_SMALL) \
+ FN(TCP_CSUM) \
+ FN(SOCKET_FILTER) \
+ FN(UDP_CSUM) \
+ FN(NETFILTER_DROP) \
+ FN(OTHERHOST) \
+ FN(IP_CSUM) \
+ FN(IP_INHDR) \
+ FN(IP_RPFILTER) \
+ FN(UNICAST_IN_L2_MULTICAST) \
+ FN(XFRM_POLICY) \
+ FN(IP_NOPROTO) \
+ FN(SOCKET_RCVBUFF) \
+ FN(PROTO_MEM) \
+ FN(TCP_MD5NOTFOUND) \
+ FN(TCP_MD5UNEXPECTED) \
+ FN(TCP_MD5FAILURE) \
+ FN(SOCKET_BACKLOG) \
+ FN(TCP_FLAGS) \
+ FN(TCP_ZEROWINDOW) \
+ FN(TCP_OLD_DATA) \
+ FN(TCP_OVERWINDOW) \
+ FN(TCP_OFOMERGE) \
+ FN(TCP_RFC7323_PAWS) \
+ FN(TCP_INVALID_SEQUENCE) \
+ FN(TCP_RESET) \
+ FN(TCP_INVALID_SYN) \
+ FN(TCP_CLOSE) \
+ FN(TCP_FASTOPEN) \
+ FN(TCP_OLD_ACK) \
+ FN(TCP_TOO_OLD_ACK) \
+ FN(TCP_ACK_UNSENT_DATA) \
+ FN(TCP_OFO_QUEUE_PRUNE) \
+ FN(TCP_OFO_DROP) \
+ FN(IP_OUTNOROUTES) \
+ FN(BPF_CGROUP_EGRESS) \
+ FN(IPV6DISABLED) \
+ FN(NEIGH_CREATEFAIL) \
+ FN(NEIGH_FAILED) \
+ FN(NEIGH_QUEUEFULL) \
+ FN(NEIGH_DEAD) \
+ FN(TC_EGRESS) \
+ FN(QDISC_DROP) \
+ FN(CPU_BACKLOG) \
+ FN(XDP) \
+ FN(TC_INGRESS) \
+ FN(UNHANDLED_PROTO) \
+ FN(SKB_CSUM) \
+ FN(SKB_GSO_SEG) \
+ FN(SKB_UCOPY_FAULT) \
+ FN(DEV_HDR) \
+ FN(DEV_READY) \
+ FN(FULL_RING) \
+ FN(NOMEM) \
+ FN(HDR_TRUNC) \
+ FN(TAP_FILTER) \
+ FN(TAP_TXFILTER) \
+ FN(ICMP_CSUM) \
+ FN(INVALID_PROTO) \
+ FN(IP_INADDRERRORS) \
+ FN(IP_INNOROUTES) \
+ FN(PKT_TOO_BIG) \
+ FNe(MAX)
+
+/**
+ * enum skb_drop_reason - the reasons of skb drops
+ *
+ * The reason of skb drop, which is used in kfree_skb_reason().
+ */
+enum skb_drop_reason {
+ /**
+ * @SKB_NOT_DROPPED_YET: skb is not dropped yet (used for no-drop case)
+ */
+ SKB_NOT_DROPPED_YET = 0,
+ /** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */
+ SKB_DROP_REASON_NOT_SPECIFIED,
+ /** @SKB_DROP_REASON_NO_SOCKET: socket not found */
+ SKB_DROP_REASON_NO_SOCKET,
+ /** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */
+ SKB_DROP_REASON_PKT_TOO_SMALL,
+ /** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */
+ SKB_DROP_REASON_TCP_CSUM,
+ /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */
+ SKB_DROP_REASON_SOCKET_FILTER,
+ /** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */
+ SKB_DROP_REASON_UDP_CSUM,
+ /** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */
+ SKB_DROP_REASON_NETFILTER_DROP,
+ /**
+ * @SKB_DROP_REASON_OTHERHOST: packet don't belong to current host
+ * (interface is in promisc mode)
+ */
+ SKB_DROP_REASON_OTHERHOST,
+ /** @SKB_DROP_REASON_IP_CSUM: IP checksum error */
+ SKB_DROP_REASON_IP_CSUM,
+ /**
+ * @SKB_DROP_REASON_IP_INHDR: there is something wrong with IP header (see
+ * IPSTATS_MIB_INHDRERRORS)
+ */
+ SKB_DROP_REASON_IP_INHDR,
+ /**
+ * @SKB_DROP_REASON_IP_RPFILTER: IP rpfilter validate failed. see the
+ * document for rp_filter in ip-sysctl.rst for more information
+ */
+ SKB_DROP_REASON_IP_RPFILTER,
+ /**
+ * @SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST: destination address of L2 is
+ * multicast, but L3 is unicast.
+ */
+ SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST,
+ /** @SKB_DROP_REASON_XFRM_POLICY: xfrm policy check failed */
+ SKB_DROP_REASON_XFRM_POLICY,
+ /** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */
+ SKB_DROP_REASON_IP_NOPROTO,
+ /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */
+ SKB_DROP_REASON_SOCKET_RCVBUFF,
+ /**
+ * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet
+ * drop out of udp_memory_allocated.
+ */
+ SKB_DROP_REASON_PROTO_MEM,
+ /**
+ * @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected,
+ * corresponding to LINUX_MIB_TCPMD5NOTFOUND
+ */
+ SKB_DROP_REASON_TCP_MD5NOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TCP_MD5UNEXPECTED: MD5 hash and we're not expecting
+ * one, corresponding to LINUX_MIB_TCPMD5UNEXPECTED
+ */
+ SKB_DROP_REASON_TCP_MD5UNEXPECTED,
+ /**
+ * @SKB_DROP_REASON_TCP_MD5FAILURE: MD5 hash and its wrong, corresponding
+ * to LINUX_MIB_TCPMD5FAILURE
+ */
+ SKB_DROP_REASON_TCP_MD5FAILURE,
+ /**
+ * @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog (
+ * see LINUX_MIB_TCPBACKLOGDROP)
+ */
+ SKB_DROP_REASON_SOCKET_BACKLOG,
+ /** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */
+ SKB_DROP_REASON_TCP_FLAGS,
+ /**
+ * @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero,
+ * see LINUX_MIB_TCPZEROWINDOWDROP
+ */
+ SKB_DROP_REASON_TCP_ZEROWINDOW,
+ /**
+ * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already
+ * received before (spurious retrans may happened), see
+ * LINUX_MIB_DELAYEDACKLOST
+ */
+ SKB_DROP_REASON_TCP_OLD_DATA,
+ /**
+ * @SKB_DROP_REASON_TCP_OVERWINDOW: the TCP data is out of window,
+ * the seq of the first byte exceed the right edges of receive
+ * window
+ */
+ SKB_DROP_REASON_TCP_OVERWINDOW,
+ /**
+ * @SKB_DROP_REASON_TCP_OFOMERGE: the data of skb is already in the ofo
+ * queue, corresponding to LINUX_MIB_TCPOFOMERGE
+ */
+ SKB_DROP_REASON_TCP_OFOMERGE,
+ /**
+ * @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to
+ * LINUX_MIB_PAWSESTABREJECTED
+ */
+ SKB_DROP_REASON_TCP_RFC7323_PAWS,
+ /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
+ SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
+ /** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */
+ SKB_DROP_REASON_TCP_RESET,
+ /**
+ * @SKB_DROP_REASON_TCP_INVALID_SYN: Incoming packet has unexpected
+ * SYN flag
+ */
+ SKB_DROP_REASON_TCP_INVALID_SYN,
+ /** @SKB_DROP_REASON_TCP_CLOSE: TCP socket in CLOSE state */
+ SKB_DROP_REASON_TCP_CLOSE,
+ /** @SKB_DROP_REASON_TCP_FASTOPEN: dropped by FASTOPEN request socket */
+ SKB_DROP_REASON_TCP_FASTOPEN,
+ /** @SKB_DROP_REASON_TCP_OLD_ACK: TCP ACK is old, but in window */
+ SKB_DROP_REASON_TCP_OLD_ACK,
+ /** @SKB_DROP_REASON_TCP_TOO_OLD_ACK: TCP ACK is too old */
+ SKB_DROP_REASON_TCP_TOO_OLD_ACK,
+ /**
+ * @SKB_DROP_REASON_TCP_ACK_UNSENT_DATA: TCP ACK for data we haven't
+ * sent yet
+ */
+ SKB_DROP_REASON_TCP_ACK_UNSENT_DATA,
+ /** @SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE: pruned from TCP OFO queue */
+ SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE,
+ /** @SKB_DROP_REASON_TCP_OFO_DROP: data already in receive queue */
+ SKB_DROP_REASON_TCP_OFO_DROP,
+ /** @SKB_DROP_REASON_IP_OUTNOROUTES: route lookup failed */
+ SKB_DROP_REASON_IP_OUTNOROUTES,
+ /**
+ * @SKB_DROP_REASON_BPF_CGROUP_EGRESS: dropped by BPF_PROG_TYPE_CGROUP_SKB
+ * eBPF program
+ */
+ SKB_DROP_REASON_BPF_CGROUP_EGRESS,
+ /** @SKB_DROP_REASON_IPV6DISABLED: IPv6 is disabled on the device */
+ SKB_DROP_REASON_IPV6DISABLED,
+ /** @SKB_DROP_REASON_NEIGH_CREATEFAIL: failed to create neigh entry */
+ SKB_DROP_REASON_NEIGH_CREATEFAIL,
+ /** @SKB_DROP_REASON_NEIGH_FAILED: neigh entry in failed state */
+ SKB_DROP_REASON_NEIGH_FAILED,
+ /** @SKB_DROP_REASON_NEIGH_QUEUEFULL: arp_queue for neigh entry is full */
+ SKB_DROP_REASON_NEIGH_QUEUEFULL,
+ /** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */
+ SKB_DROP_REASON_NEIGH_DEAD,
+ /** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */
+ SKB_DROP_REASON_TC_EGRESS,
+ /**
+ * @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting (
+ * failed to enqueue to current qdisc)
+ */
+ SKB_DROP_REASON_QDISC_DROP,
+ /**
+ * @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU
+ * backlog queue. This can be caused by backlog queue full (see
+ * netdev_max_backlog in net.rst) or RPS flow limit
+ */
+ SKB_DROP_REASON_CPU_BACKLOG,
+ /** @SKB_DROP_REASON_XDP: dropped by XDP in input path */
+ SKB_DROP_REASON_XDP,
+ /** @SKB_DROP_REASON_TC_INGRESS: dropped in TC ingress HOOK */
+ SKB_DROP_REASON_TC_INGRESS,
+ /** @SKB_DROP_REASON_UNHANDLED_PROTO: protocol not implemented or not supported */
+ SKB_DROP_REASON_UNHANDLED_PROTO,
+ /** @SKB_DROP_REASON_SKB_CSUM: sk_buff checksum computation error */
+ SKB_DROP_REASON_SKB_CSUM,
+ /** @SKB_DROP_REASON_SKB_GSO_SEG: gso segmentation error */
+ SKB_DROP_REASON_SKB_GSO_SEG,
+ /**
+ * @SKB_DROP_REASON_SKB_UCOPY_FAULT: failed to copy data from user space,
+ * e.g., via zerocopy_sg_from_iter() or skb_orphan_frags_rx()
+ */
+ SKB_DROP_REASON_SKB_UCOPY_FAULT,
+ /** @SKB_DROP_REASON_DEV_HDR: device driver specific header/metadata is invalid */
+ SKB_DROP_REASON_DEV_HDR,
+ /**
+ * @SKB_DROP_REASON_DEV_READY: the device is not ready to xmit/recv due to
+ * any of its data structure that is not up/ready/initialized,
+ * e.g., the IFF_UP is not set, or driver specific tun->tfiles[txq]
+ * is not initialized
+ */
+ SKB_DROP_REASON_DEV_READY,
+ /** @SKB_DROP_REASON_FULL_RING: ring buffer is full */
+ SKB_DROP_REASON_FULL_RING,
+ /** @SKB_DROP_REASON_NOMEM: error due to OOM */
+ SKB_DROP_REASON_NOMEM,
+ /**
+ * @SKB_DROP_REASON_HDR_TRUNC: failed to trunc/extract the header from
+ * networking data, e.g., failed to pull the protocol header from
+ * frags via pskb_may_pull()
+ */
+ SKB_DROP_REASON_HDR_TRUNC,
+ /**
+ * @SKB_DROP_REASON_TAP_FILTER: dropped by (ebpf) filter directly attached
+ * to tun/tap, e.g., via TUNSETFILTEREBPF
+ */
+ SKB_DROP_REASON_TAP_FILTER,
+ /**
+ * @SKB_DROP_REASON_TAP_TXFILTER: dropped by tx filter implemented at
+ * tun/tap, e.g., check_filter()
+ */
+ SKB_DROP_REASON_TAP_TXFILTER,
+ /** @SKB_DROP_REASON_ICMP_CSUM: ICMP checksum error */
+ SKB_DROP_REASON_ICMP_CSUM,
+ /**
+ * @SKB_DROP_REASON_INVALID_PROTO: the packet doesn't follow RFC 2211,
+ * such as a broadcasts ICMP_TIMESTAMP
+ */
+ SKB_DROP_REASON_INVALID_PROTO,
+ /**
+ * @SKB_DROP_REASON_IP_INADDRERRORS: host unreachable, corresponding to
+ * IPSTATS_MIB_INADDRERRORS
+ */
+ SKB_DROP_REASON_IP_INADDRERRORS,
+ /**
+ * @SKB_DROP_REASON_IP_INNOROUTES: network unreachable, corresponding to
+ * IPSTATS_MIB_INADDRERRORS
+ */
+ SKB_DROP_REASON_IP_INNOROUTES,
+ /**
+ * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the
+ * MTU)
+ */
+ SKB_DROP_REASON_PKT_TOO_BIG,
+ /**
+ * @SKB_DROP_REASON_MAX: the maximum of drop reason, which shouldn't be
+ * used as a real 'reason'
+ */
+ SKB_DROP_REASON_MAX,
+};
+
+#define SKB_DR_INIT(name, reason) \
+ enum skb_drop_reason name = SKB_DROP_REASON_##reason
+#define SKB_DR(name) \
+ SKB_DR_INIT(name, NOT_SPECIFIED)
+#define SKB_DR_SET(name, reason) \
+ (name = SKB_DROP_REASON_##reason)
+#define SKB_DR_OR(name, reason) \
+ do { \
+ if (name == SKB_DROP_REASON_NOT_SPECIFIED || \
+ name == SKB_NOT_DROPPED_YET) \
+ SKB_DR_SET(name, reason); \
+ } while (0)
+
+extern const char * const drop_reasons[];
+
+#endif
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 63495e3443ac..ee369670e20e 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -44,10 +44,22 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_KSZ8795_VALUE 14
#define DSA_TAG_PROTO_OCELOT_VALUE 15
#define DSA_TAG_PROTO_AR9331_VALUE 16
+#define DSA_TAG_PROTO_RTL4_A_VALUE 17
+#define DSA_TAG_PROTO_HELLCREEK_VALUE 18
+#define DSA_TAG_PROTO_XRS700X_VALUE 19
+#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20
+#define DSA_TAG_PROTO_SEVILLE_VALUE 21
+#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
+#define DSA_TAG_PROTO_SJA1110_VALUE 23
+#define DSA_TAG_PROTO_RTL8_4_VALUE 24
+#define DSA_TAG_PROTO_RTL8_4T_VALUE 25
+#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
+#define DSA_TAG_PROTO_LAN937X_VALUE 27
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
+ DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
@@ -63,48 +75,67 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
+ DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
+ DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE,
+ DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE,
+ DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
+ DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
+ DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
+ DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE,
+ DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
+ DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
+ DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
};
-struct packet_type;
struct dsa_switch;
struct dsa_device_ops {
struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
- struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt);
- int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
- int *offset);
- /* Used to determine which traffic should match the DSA filter in
- * eth_type_trans, and which, if any, should bypass it and be processed
- * as regular on the master net device.
- */
- bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
- unsigned int overhead;
+ struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
+ void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
+ int *offset);
+ int (*connect)(struct dsa_switch *ds);
+ void (*disconnect)(struct dsa_switch *ds);
+ unsigned int needed_headroom;
+ unsigned int needed_tailroom;
const char *name;
enum dsa_tag_protocol proto;
+ /* Some tagging protocols either mangle or shift the destination MAC
+ * address, in which case the DSA master would drop packets on ingress
+ * if what it understands out of the destination MAC address is not in
+ * its RX filter.
+ */
+ bool promisc_on_master;
+};
+
+/* This structure defines the control interfaces that are overlayed by the
+ * DSA layer on top of the DSA CPU/management net_device instance. This is
+ * used by the core net_device layer while calling various net_device_ops
+ * function pointers.
+ */
+struct dsa_netdevice_ops {
+ int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr,
+ int cmd);
};
#define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
-struct dsa_skb_cb {
- struct sk_buff *clone;
-};
-
-struct __dsa_skb_cb {
- struct dsa_skb_cb cb;
- u8 priv[48 - sizeof(struct dsa_skb_cb)];
+struct dsa_lag {
+ struct net_device *dev;
+ unsigned int id;
+ struct mutex fdb_lock;
+ struct list_head fdbs;
+ refcount_t refcount;
};
-#define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb))
-
-#define DSA_SKB_CB_PRIV(skb) \
- ((void *)(skb)->cb + offsetof(struct __dsa_skb_cb, priv))
-
struct dsa_switch_tree {
struct list_head list;
+ /* List of switch ports */
+ struct list_head ports;
+
/* Notifier chain for switch-wide events */
struct raw_notifier_head nh;
@@ -114,6 +145,19 @@ struct dsa_switch_tree {
/* Number of switches attached to this tree */
struct kref refcount;
+ /* Maps offloaded LAG netdevs to a zero-based linear ID for
+ * drivers that need it.
+ */
+ struct dsa_lag **lags;
+
+ /* Tagging protocol operations */
+ const struct dsa_device_ops *tag_ops;
+
+ /* Default tagging protocol preferred by the switches in this
+ * tree.
+ */
+ enum dsa_tag_protocol default_proto;
+
/* Has this tree been applied to the hardware? */
bool setup;
@@ -123,16 +167,55 @@ struct dsa_switch_tree {
*/
struct dsa_platform_data *pd;
- /* List of switch ports */
- struct list_head ports;
-
/* List of DSA links composing the routing table */
struct list_head rtable;
+
+ /* Length of "lags" array */
+ unsigned int lags_len;
+
+ /* Track the largest switch index within a tree */
+ unsigned int last_switch;
};
-/* TC matchall action types, only mirroring for now */
+/* LAG IDs are one-based, the dst->lags array is zero-based */
+#define dsa_lags_foreach_id(_id, _dst) \
+ for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \
+ if ((_dst)->lags[(_id) - 1])
+
+#define dsa_lag_foreach_port(_dp, _dst, _lag) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_offloads_lag((_dp), (_lag)))
+
+#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
+
+static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
+ unsigned int id)
+{
+ /* DSA LAG IDs are one-based, dst->lags is zero-based */
+ return dst->lags[id - 1];
+}
+
+static inline int dsa_lag_id(struct dsa_switch_tree *dst,
+ struct net_device *lag_dev)
+{
+ unsigned int id;
+
+ dsa_lags_foreach_id(id, dst) {
+ struct dsa_lag *lag = dsa_lag_by_id(dst, id);
+
+ if (lag->dev == lag_dev)
+ return lag->id;
+ }
+
+ return -ENODEV;
+}
+
+/* TC matchall action types */
enum dsa_port_mall_action_type {
DSA_PORT_MALL_MIRROR,
+ DSA_PORT_MALL_POLICER,
};
/* TC mirroring entry */
@@ -141,6 +224,12 @@ struct dsa_mall_mirror_tc_entry {
bool ingress;
};
+/* TC port policer entry */
+struct dsa_mall_policer_tc_entry {
+ u32 burst;
+ u64 rate_bytes_per_sec;
+};
+
/* TC matchall entry */
struct dsa_mall_tc_entry {
struct list_head list;
@@ -148,9 +237,16 @@ struct dsa_mall_tc_entry {
enum dsa_port_mall_action_type type;
union {
struct dsa_mall_mirror_tc_entry mirror;
+ struct dsa_mall_policer_tc_entry policer;
};
};
+struct dsa_bridge {
+ struct net_device *dev;
+ unsigned int num;
+ bool tx_fwd_offload;
+ refcount_t refcount;
+};
struct dsa_port {
/* A CPU port is physically connected to a master device.
@@ -161,14 +257,18 @@ struct dsa_port {
struct net_device *slave;
};
- /* CPU port tagging operations used by master or slave devices */
+ /* Copy of the tagging protocol operations, for quicker access
+ * in the data path. Valid only for the CPU ports.
+ */
const struct dsa_device_ops *tag_ops;
/* Copies for faster access in master receive hot path */
struct dsa_switch_tree *dst;
- struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt);
- bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
+ struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
+
+ struct dsa_switch *ds;
+
+ unsigned int index;
enum {
DSA_PORT_TYPE_UNUSED = 0,
@@ -177,29 +277,45 @@ struct dsa_port {
DSA_PORT_TYPE_USER,
} type;
- struct dsa_switch *ds;
- unsigned int index;
const char *name;
struct dsa_port *cpu_dp;
- const char *mac;
+ u8 mac[ETH_ALEN];
+
+ u8 stp_state;
+
+ /* Warning: the following bit fields are not atomic, and updating them
+ * can only be done from code paths where concurrency is not possible
+ * (probe time or under rtnl_lock).
+ */
+ u8 vlan_filtering:1;
+
+ /* Managed by DSA on user ports and by drivers on CPU and DSA ports */
+ u8 learning:1;
+
+ u8 lag_tx_enabled:1;
+
+ /* Master state bits, valid only on CPU ports */
+ u8 master_admin_up:1;
+ u8 master_oper_up:1;
+
+ /* Valid only on user ports */
+ u8 cpu_port_in_lag:1;
+
+ u8 setup:1;
+
struct device_node *dn;
unsigned int ageing_time;
- bool vlan_filtering;
- u8 stp_state;
- struct net_device *bridge_dev;
+
+ struct dsa_bridge *bridge;
struct devlink_port devlink_port;
struct phylink *pl;
struct phylink_config pl_config;
+ struct dsa_lag *lag;
+ struct net_device *hsr_dev;
struct list_head list;
/*
- * Give the switch driver somewhere to hang its per-port private data
- * structures (accessible from the tagger).
- */
- void *priv;
-
- /*
* Original copy of the master netdev ethtool_ops
*/
const struct ethtool_ops *orig_ethtool_ops;
@@ -207,9 +323,18 @@ struct dsa_port {
/*
* Original copy of the master netdev net_device_ops
*/
- const struct net_device_ops *orig_ndo_ops;
+ const struct dsa_netdevice_ops *netdev_ops;
- bool setup;
+ /* List of MAC addresses that must be forwarded on this port.
+ * These are only valid on CPU ports and DSA links.
+ */
+ struct mutex addr_lists_lock;
+ struct list_head fdbs;
+ struct list_head mdbs;
+
+ /* List of VLANs that CPU and DSA ports are members of. */
+ struct mutex vlans_lock;
+ struct list_head vlans;
};
/* TODO: ideally DSA ports would have a single dp->link_dp member,
@@ -223,9 +348,37 @@ struct dsa_link {
struct list_head list;
};
-struct dsa_switch {
- bool setup;
+enum dsa_db_type {
+ DSA_DB_PORT,
+ DSA_DB_LAG,
+ DSA_DB_BRIDGE,
+};
+
+struct dsa_db {
+ enum dsa_db_type type;
+
+ union {
+ const struct dsa_port *dp;
+ struct dsa_lag lag;
+ struct dsa_bridge bridge;
+ };
+};
+
+struct dsa_mac_addr {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ refcount_t refcount;
+ struct list_head list;
+ struct dsa_db db;
+};
+
+struct dsa_vlan {
+ u16 vid;
+ refcount_t refcount;
+ struct list_head list;
+};
+struct dsa_switch {
struct device *dev;
/*
@@ -234,6 +387,59 @@ struct dsa_switch {
struct dsa_switch_tree *dst;
unsigned int index;
+ /* Warning: the following bit fields are not atomic, and updating them
+ * can only be done from code paths where concurrency is not possible
+ * (probe time or under rtnl_lock).
+ */
+ u32 setup:1;
+
+ /* Disallow bridge core from requesting different VLAN awareness
+ * settings on ports if not hardware-supported
+ */
+ u32 vlan_filtering_is_global:1;
+
+ /* Keep VLAN filtering enabled on ports not offloading any upper */
+ u32 needs_standalone_vlan_filtering:1;
+
+ /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
+ * that have vlan_filtering=0. All drivers should ideally set this (and
+ * then the option would get removed), but it is unknown whether this
+ * would break things or not.
+ */
+ u32 configure_vlan_while_not_filtering:1;
+
+ /* If the switch driver always programs the CPU port as egress tagged
+ * despite the VLAN configuration indicating otherwise, then setting
+ * @untag_bridge_pvid will force the DSA receive path to pop the
+ * bridge's default_pvid VLAN tagged frames to offer a consistent
+ * behavior between a vlan_filtering=0 and vlan_filtering=1 bridge
+ * device.
+ */
+ u32 untag_bridge_pvid:1;
+
+ /* Let DSA manage the FDB entries towards the
+ * CPU, based on the software bridge database.
+ */
+ u32 assisted_learning_on_cpu_port:1;
+
+ /* In case vlan_filtering_is_global is set, the VLAN awareness state
+ * should be retrieved from here and not from the per-port settings.
+ */
+ u32 vlan_filtering:1;
+
+ /* For switches that only have the MRU configurable. To ensure the
+ * configured MTU is not exceeded, normalization of MRU on all bridged
+ * interfaces is needed.
+ */
+ u32 mtu_enforcement_ingress:1;
+
+ /* Drivers that isolate the FDBs of multiple bridges must set this
+ * to true to receive the bridge as an argument in .port_fdb_{add,del}
+ * and .port_mdb_{add,del}. Otherwise, the bridge.num will always be
+ * passed as zero.
+ */
+ u32 fdb_isolation:1;
+
/* Listener for switch fabric events */
struct notifier_block nb;
@@ -243,6 +449,8 @@ struct dsa_switch {
*/
void *priv;
+ void *tagger_data;
+
/*
* Configuration data for this switch.
*/
@@ -263,28 +471,31 @@ struct dsa_switch {
unsigned int ageing_time_min;
unsigned int ageing_time_max;
+ /* Storage for drivers using tag_8021q */
+ struct dsa_8021q_context *tag_8021q_ctx;
+
/* devlink used to represent this switch device */
struct devlink *devlink;
/* Number of switch port queues */
unsigned int num_tx_queues;
- /* Disallow bridge core from requesting different VLAN awareness
- * settings on ports if not hardware-supported
- */
- bool vlan_filtering_is_global;
-
- /* In case vlan_filtering_is_global is set, the VLAN awareness state
- * should be retrieved from here and not from the per-port settings.
+ /* Drivers that benefit from having an ID associated with each
+ * offloaded LAG should set this to the maximum number of
+ * supported IDs. DSA will then maintain a mapping of _at
+ * least_ these many IDs, accessible to drivers via
+ * dsa_lag_id().
*/
- bool vlan_filtering;
+ unsigned int num_lag_ids;
- /* MAC PCS does not provide link state change interrupt, and requires
- * polling. Flag passed on to PHYLINK.
+ /* Drivers that support bridge forwarding offload or FDB isolation
+ * should set this to the maximum number of bridges spanning the same
+ * switch tree (or all trees, in the case of cross-tree bridging
+ * support) that can be offloaded.
*/
- bool pcs_poll;
+ unsigned int max_num_bridges;
- size_t num_ports;
+ unsigned int num_ports;
};
static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
@@ -299,6 +510,32 @@ static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
return NULL;
}
+static inline bool dsa_port_is_dsa(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_DSA;
+}
+
+static inline bool dsa_port_is_cpu(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_CPU;
+}
+
+static inline bool dsa_port_is_user(struct dsa_port *dp)
+{
+ return dp->type == DSA_PORT_TYPE_USER;
+}
+
+static inline bool dsa_port_is_unused(struct dsa_port *dp)
+{
+ return dp->type == DSA_PORT_TYPE_UNUSED;
+}
+
+static inline bool dsa_port_master_is_operational(struct dsa_port *dp)
+{
+ return dsa_port_is_cpu(dp) && dp->master_admin_up &&
+ dp->master_oper_up;
+}
+
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
{
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
@@ -319,14 +556,64 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
}
+#define dsa_tree_for_each_user_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_tree_for_each_user_port_continue_reverse(_dp, _dst) \
+ list_for_each_entry_continue_reverse((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_tree_for_each_cpu_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_cpu((_dp)))
+
+#define dsa_switch_for_each_port(_dp, _ds) \
+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
+ list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
+ list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_available_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (!dsa_port_is_unused((_dp)))
+
+#define dsa_switch_for_each_user_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_switch_for_each_cpu_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_cpu((_dp)))
+
+#define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \
+ dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
+ if (dsa_port_is_cpu((_dp)))
+
static inline u32 dsa_user_ports(struct dsa_switch *ds)
{
+ struct dsa_port *dp;
u32 mask = 0;
- int p;
- for (p = 0; p < ds->num_ports; p++)
- if (dsa_is_user_port(ds, p))
- mask |= BIT(p);
+ dsa_switch_for_each_user_port(dp, ds)
+ mask |= BIT(dp->index);
+
+ return mask;
+}
+
+static inline u32 dsa_cpu_ports(struct dsa_switch *ds)
+{
+ struct dsa_port *cpu_dp;
+ u32 mask = 0;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ mask |= BIT(cpu_dp->index);
return mask;
}
@@ -366,6 +653,50 @@ static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
}
+/* Return true if this is the local port used to reach the CPU port */
+static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
+{
+ if (dsa_is_unused_port(ds, port))
+ return false;
+
+ return port == dsa_upstream_port(ds, port);
+}
+
+/* Return true if this is a DSA port leading away from the CPU */
+static inline bool dsa_is_downstream_port(struct dsa_switch *ds, int port)
+{
+ return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port);
+}
+
+/* Return the local port used to reach the CPU port */
+static inline unsigned int dsa_switch_upstream_port(struct dsa_switch *ds)
+{
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_available_port(dp, ds) {
+ return dsa_upstream_port(ds, dp->index);
+ }
+
+ return ds->num_ports;
+}
+
+/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
+ * that the routing port from @downstream_ds to @upstream_ds is also the port
+ * which @downstream_ds uses to reach its dedicated CPU.
+ */
+static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
+ struct dsa_switch *downstream_ds)
+{
+ int routing_port;
+
+ if (upstream_ds == downstream_ds)
+ return true;
+
+ routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
+
+ return dsa_is_upstream_port(downstream_ds, routing_port);
+}
+
static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
{
const struct dsa_switch *ds = dp->ds;
@@ -376,15 +707,157 @@ static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
return dp->vlan_filtering;
}
+static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
+{
+ return dp->lag ? dp->lag->id : 0;
+}
+
+static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
+{
+ return dp->lag ? dp->lag->dev : NULL;
+}
+
+static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
+ const struct dsa_lag *lag)
+{
+ return dsa_port_lag_dev_get(dp) == lag->dev;
+}
+
+static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp)
+{
+ if (dp->cpu_port_in_lag)
+ return dsa_port_lag_dev_get(dp->cpu_dp);
+
+ return dp->cpu_dp->master;
+}
+
+static inline
+struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
+{
+ if (!dp->bridge)
+ return NULL;
+
+ if (dp->lag)
+ return dp->lag->dev;
+ else if (dp->hsr_dev)
+ return dp->hsr_dev;
+
+ return dp->slave;
+}
+
+static inline struct net_device *
+dsa_port_bridge_dev_get(const struct dsa_port *dp)
+{
+ return dp->bridge ? dp->bridge->dev : NULL;
+}
+
+static inline unsigned int dsa_port_bridge_num_get(struct dsa_port *dp)
+{
+ return dp->bridge ? dp->bridge->num : 0;
+}
+
+static inline bool dsa_port_bridge_same(const struct dsa_port *a,
+ const struct dsa_port *b)
+{
+ struct net_device *br_a = dsa_port_bridge_dev_get(a);
+ struct net_device *br_b = dsa_port_bridge_dev_get(b);
+
+ /* Standalone ports are not in the same bridge with one another */
+ return (!br_a || !br_b) ? false : (br_a == br_b);
+}
+
+static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
+ const struct net_device *dev)
+{
+ return dsa_port_to_bridge_port(dp) == dev;
+}
+
+static inline bool
+dsa_port_offloads_bridge_dev(struct dsa_port *dp,
+ const struct net_device *bridge_dev)
+{
+ /* DSA ports connected to a bridge, and event was emitted
+ * for the bridge.
+ */
+ return dsa_port_bridge_dev_get(dp) == bridge_dev;
+}
+
+static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
+ const struct dsa_bridge *bridge)
+{
+ return dsa_port_bridge_dev_get(dp) == bridge->dev;
+}
+
+/* Returns true if any port of this tree offloads the given net_device */
+static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
+ const struct net_device *dev)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_offloads_bridge_port(dp, dev))
+ return true;
+
+ return false;
+}
+
+/* Returns true if any port of this tree offloads the given bridge */
+static inline bool
+dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
+ const struct net_device *bridge_dev)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_offloads_bridge_dev(dp, bridge_dev))
+ return true;
+
+ return false;
+}
+
+static inline bool dsa_port_tree_same(const struct dsa_port *a,
+ const struct dsa_port *b)
+{
+ return a->ds->dst == b->ds->dst;
+}
+
typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
+ /*
+ * Tagging protocol helpers called for the CPU ports and DSA links.
+ * @get_tag_protocol retrieves the initial tagging protocol and is
+ * mandatory. Switches which can operate using multiple tagging
+ * protocols should implement @change_tag_protocol and report in
+ * @get_tag_protocol the tagger in current use.
+ */
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mprot);
+ int (*change_tag_protocol)(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto);
+ /*
+ * Method for switch drivers to connect to the tagging protocol driver
+ * in current use. The switch driver can provide handlers for certain
+ * types of packets for switch management.
+ */
+ int (*connect_tag_protocol)(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto);
+
+ int (*port_change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
+ /* Optional switch-wide initialization and destruction methods */
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
+
+ /* Per-port initialization and destruction methods. Mandatory if the
+ * driver registers devlink port regions, optional otherwise.
+ */
+ int (*port_setup)(struct dsa_switch *ds, int port);
+ void (*port_teardown)(struct dsa_switch *ds, int port);
+
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/*
@@ -405,9 +878,14 @@ struct dsa_switch_ops {
/*
* PHYLINK integration
*/
+ void (*phylink_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
void (*phylink_validate)(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state);
+ struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
+ int port,
+ phy_interface_t iface);
int (*phylink_mac_link_state)(struct dsa_switch *ds, int port,
struct phylink_link_state *state);
void (*phylink_mac_config)(struct dsa_switch *ds, int port,
@@ -420,11 +898,13 @@ struct dsa_switch_ops {
void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev);
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause);
void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
struct phylink_link_state *state);
/*
- * ethtool hardware statistics.
+ * Port statistics counters.
*/
void (*get_strings)(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data);
@@ -433,6 +913,21 @@ struct dsa_switch_ops {
int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
+ void (*get_eth_phy_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+ void (*get_stats64)(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s);
+ void (*get_pause_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats);
+ void (*self_test)(struct dsa_switch *ds, int port,
+ struct ethtool_test *etest, u64 *data);
/*
* ethtool Wake-on-LAN
@@ -449,6 +944,18 @@ struct dsa_switch_ops {
struct ethtool_ts_info *ts);
/*
+ * DCB ops
+ */
+ int (*port_get_default_prio)(struct dsa_switch *ds, int port);
+ int (*port_set_default_prio)(struct dsa_switch *ds, int port,
+ u8 prio);
+ int (*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp);
+ int (*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio);
+ int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio);
+
+ /*
* Suspend and resume
*/
int (*suspend)(struct dsa_switch *ds);
@@ -484,49 +991,77 @@ struct dsa_switch_ops {
struct ethtool_regs *regs, void *p);
/*
+ * Upper device tracking.
+ */
+ int (*port_prechangeupper)(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info);
+
+ /*
* Bridge integration
*/
int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
int (*port_bridge_join)(struct dsa_switch *ds, int port,
- struct net_device *bridge);
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
void (*port_bridge_leave)(struct dsa_switch *ds, int port,
- struct net_device *bridge);
+ struct dsa_bridge bridge);
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
+ int (*port_mst_state_set)(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *state);
void (*port_fast_age)(struct dsa_switch *ds, int port);
- int (*port_egress_floods)(struct dsa_switch *ds, int port,
- bool unicast, bool multicast);
+ int (*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid);
+ int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+ int (*port_bridge_flags)(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+ void (*port_set_host_flood)(struct dsa_switch *ds, int port,
+ bool uc, bool mc);
/*
* VLAN support
*/
int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
- bool vlan_filtering);
- int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
- void (*port_vlan_add)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+ int (*port_vlan_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
int (*port_vlan_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
+ int (*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti);
+
/*
* Forwarding database
*/
int (*port_fdb_add)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int (*port_fdb_del)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
+ int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+ int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
/*
* Multicast database
*/
- int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
- void (*port_mdb_add)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ int (*port_mdb_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int (*port_mdb_del)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
/*
* RXNFC
*/
@@ -538,21 +1073,41 @@ struct dsa_switch_ops {
/*
* TC integration
*/
+ int (*cls_flower_add)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+ int (*cls_flower_del)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+ int (*cls_flower_stats)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
int (*port_mirror_add)(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress);
+ bool ingress, struct netlink_ext_ack *extack);
void (*port_mirror_del)(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+ int (*port_policer_add)(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer);
+ void (*port_policer_del)(struct dsa_switch *ds, int port);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
/*
* Cross-chip operations
*/
- int (*crosschip_bridge_join)(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *br);
- void (*crosschip_bridge_leave)(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *br);
+ int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index,
+ int sw_index, int port,
+ struct dsa_bridge bridge,
+ struct netlink_ext_ack *extack);
+ void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
+ int sw_index, int port,
+ struct dsa_bridge bridge);
+ int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
+ int port);
+ int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
+ int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+ int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
+ int port, struct dsa_lag lag);
/*
* PTP functionality
@@ -561,16 +1116,108 @@ struct dsa_switch_ops {
struct ifreq *ifr);
int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
struct ifreq *ifr);
- bool (*port_txtstamp)(struct dsa_switch *ds, int port,
- struct sk_buff *clone, unsigned int type);
+ void (*port_txtstamp)(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
- /* Devlink parameters */
+ /* Devlink parameters, etc */
int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
+ int (*devlink_info_get)(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_pool_get)(struct dsa_switch *ds,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
+ unsigned int sb_index);
+ int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
+ unsigned int sb_index);
+ int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
+
+ /*
+ * MTU change functionality. Switches can also adjust their MRU through
+ * this method. By MTU, one understands the SDU (L2 payload) length.
+ * If the switch needs to account for the DSA tag on the CPU port, this
+ * method needs to do so privately.
+ */
+ int (*port_change_mtu)(struct dsa_switch *ds, int port,
+ int new_mtu);
+ int (*port_max_mtu)(struct dsa_switch *ds, int port);
+
+ /*
+ * LAG integration
+ */
+ int (*port_lag_change)(struct dsa_switch *ds, int port);
+ int (*port_lag_join)(struct dsa_switch *ds, int port,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+ int (*port_lag_leave)(struct dsa_switch *ds, int port,
+ struct dsa_lag lag);
+
+ /*
+ * HSR integration
+ */
+ int (*port_hsr_join)(struct dsa_switch *ds, int port,
+ struct net_device *hsr);
+ int (*port_hsr_leave)(struct dsa_switch *ds, int port,
+ struct net_device *hsr);
+
+ /*
+ * MRP integration
+ */
+ int (*port_mrp_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp);
+ int (*port_mrp_del)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp);
+ int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+ int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+
+ /*
+ * tag_8021q operations
+ */
+ int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags);
+ int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
+
+ /*
+ * DSA master tracking operations
+ */
+ void (*master_state_change)(struct dsa_switch *ds,
+ const struct net_device *master,
+ bool operational);
};
#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
@@ -602,11 +1249,44 @@ void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
void *occ_get_priv);
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
u64 resource_id);
+struct devlink_region *
+dsa_devlink_region_create(struct dsa_switch *ds,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+dsa_devlink_port_region_create(struct dsa_switch *ds,
+ int port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+void dsa_devlink_region_destroy(struct devlink_region *region);
+
+struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
struct dsa_devlink_priv {
struct dsa_switch *ds;
};
+static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
+{
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline
+struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
+{
+ struct devlink *dl = port->devlink;
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline int dsa_devlink_port_to_port(struct devlink_port *port)
+{
+ return port->index;
+}
+
struct dsa_switch_driver {
struct list_head list;
const struct dsa_switch_ops *ops;
@@ -614,8 +1294,15 @@ struct dsa_switch_driver {
struct net_device *dsa_dev_to_net_device(struct device *dev);
+bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+
/* Keep inline for faster access in hot path */
-static inline bool netdev_uses_dsa(struct net_device *dev)
+static inline bool netdev_uses_dsa(const struct net_device *dev)
{
#if IS_ENABLED(CONFIG_NET_DSA)
return dev->dsa_ptr && dev->dsa_ptr->rcv;
@@ -623,86 +1310,97 @@ static inline bool netdev_uses_dsa(struct net_device *dev)
return false;
}
-static inline bool dsa_can_decode(const struct sk_buff *skb,
- struct net_device *dev)
+/* All DSA tags that push the EtherType to the right (basically all except tail
+ * tags, which don't break dissection) can be treated the same from the
+ * perspective of the flow dissector.
+ *
+ * We need to return:
+ * - offset: the (B - A) difference between:
+ * A. the position of the real EtherType and
+ * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
+ * after the normal EtherType was supposed to be)
+ * The offset in bytes is exactly equal to the tagger overhead (and half of
+ * that, in __be16 shorts).
+ *
+ * - proto: the value of the real EtherType.
+ */
+static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
+ __be16 *proto, int *offset)
{
#if IS_ENABLED(CONFIG_NET_DSA)
- return !dev->dsa_ptr->filter || dev->dsa_ptr->filter(skb, dev);
+ const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
+ int tag_len = ops->needed_headroom;
+
+ *offset = tag_len;
+ *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
#endif
- return false;
}
-void dsa_unregister_switch(struct dsa_switch *ds);
-int dsa_register_switch(struct dsa_switch *ds);
-#ifdef CONFIG_PM_SLEEP
-int dsa_switch_suspend(struct dsa_switch *ds);
-int dsa_switch_resume(struct dsa_switch *ds);
-#else
-static inline int dsa_switch_suspend(struct dsa_switch *ds)
-{
- return 0;
-}
-static inline int dsa_switch_resume(struct dsa_switch *ds)
+#if IS_ENABLED(CONFIG_NET_DSA)
+static inline int __dsa_netdevice_ops_check(struct net_device *dev)
{
+ int err = -EOPNOTSUPP;
+
+ if (!dev->dsa_ptr)
+ return err;
+
+ if (!dev->dsa_ptr->netdev_ops)
+ return err;
+
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-enum dsa_notifier_type {
- DSA_PORT_REGISTER,
- DSA_PORT_UNREGISTER,
-};
+static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ const struct dsa_netdevice_ops *ops;
+ int err;
-struct dsa_notifier_info {
- struct net_device *dev;
-};
+ err = __dsa_netdevice_ops_check(dev);
+ if (err)
+ return err;
-struct dsa_notifier_register_info {
- struct dsa_notifier_info info; /* must be first */
- struct net_device *master;
- unsigned int port_number;
- unsigned int switch_number;
-};
+ ops = dev->dsa_ptr->netdev_ops;
-static inline struct net_device *
-dsa_notifier_info_to_dev(const struct dsa_notifier_info *info)
+ return ops->ndo_eth_ioctl(dev, ifr, cmd);
+}
+#else
+static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
{
- return info->dev;
+ return -EOPNOTSUPP;
}
+#endif
-#if IS_ENABLED(CONFIG_NET_DSA)
-int register_dsa_notifier(struct notifier_block *nb);
-int unregister_dsa_notifier(struct notifier_block *nb);
-int call_dsa_notifiers(unsigned long val, struct net_device *dev,
- struct dsa_notifier_info *info);
+void dsa_unregister_switch(struct dsa_switch *ds);
+int dsa_register_switch(struct dsa_switch *ds);
+void dsa_switch_shutdown(struct dsa_switch *ds);
+struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
+void dsa_flush_workqueue(void);
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds);
+int dsa_switch_resume(struct dsa_switch *ds);
#else
-static inline int register_dsa_notifier(struct notifier_block *nb)
+static inline int dsa_switch_suspend(struct dsa_switch *ds)
{
return 0;
}
-
-static inline int unregister_dsa_notifier(struct notifier_block *nb)
+static inline int dsa_switch_resume(struct dsa_switch *ds)
{
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
-static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev,
- struct dsa_notifier_info *info)
+#if IS_ENABLED(CONFIG_NET_DSA)
+bool dsa_slave_dev_check(const struct net_device *dev);
+#else
+static inline bool dsa_slave_dev_check(const struct net_device *dev)
{
- return NOTIFY_DONE;
+ return false;
}
#endif
-/* Broadcom tag specific helpers to insert and extract queue/port number */
-#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
-#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
-#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
-
-
netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
-int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data);
-int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data);
-int dsa_port_get_phy_sset_count(struct dsa_port *dp);
void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
struct dsa_tag_driver {
@@ -735,7 +1433,7 @@ module_exit(dsa_tag_driver_module_exit)
/**
* module_dsa_tag_drivers() - Helper macro for registering DSA tag
* drivers
- * @__ops_array: Array of tag driver strucutres
+ * @__ops_array: Array of tag driver structures
*
* Helper macro for DSA tag drivers which do not do anything special
* in module init/exit. Each module may only use this macro once, and
diff --git a/include/net/dst.h b/include/net/dst.h
index 3448cf865ede..00b479ce6b99 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -18,6 +18,7 @@
#include <linux/refcount.h>
#include <net/neighbour.h>
#include <asm/processor.h>
+#include <linux/indirect_call_wrapper.h>
struct sk_buff;
@@ -35,7 +36,6 @@ struct dst_entry {
int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
unsigned short flags;
-#define DST_HOST 0x0001
#define DST_NOXFRM 0x0002
#define DST_NOPOLICY 0x0004
#define DST_NOCOUNT 0x0008
@@ -77,6 +77,7 @@ struct dst_entry {
#ifndef CONFIG_64BIT
atomic_t __refcnt; /* 32-bit offset 64 */
#endif
+ netdevice_tracker dev_tracker;
};
struct dst_metrics {
@@ -194,9 +195,11 @@ dst_feature(const struct dst_entry *dst, u32 feature)
return dst_metric(dst, RTAX_FEATURES) & feature;
}
+INDIRECT_CALLABLE_DECLARE(unsigned int ip6_mtu(const struct dst_entry *));
+INDIRECT_CALLABLE_DECLARE(unsigned int ipv4_mtu(const struct dst_entry *));
static inline u32 dst_mtu(const struct dst_entry *dst)
{
- return dst->ops->mtu(dst);
+ return INDIRECT_CALL_INET(dst->ops->mtu, ip6_mtu, ipv4_mtu, dst);
}
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
@@ -215,7 +218,7 @@ dst_allfrag(const struct dst_entry *dst)
static inline int
dst_metric_locked(const struct dst_entry *dst, int metric)
{
- return dst_metric(dst, RTAX_LOCK) & (1<<metric);
+ return dst_metric(dst, RTAX_LOCK) & (1 << metric);
}
static inline void dst_hold(struct dst_entry *dst)
@@ -236,12 +239,6 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
}
}
-static inline void dst_hold_and_use(struct dst_entry *dst, unsigned long time)
-{
- dst_hold(dst);
- dst_use_noref(dst, time);
-}
-
static inline struct dst_entry *dst_clone(struct dst_entry *dst)
{
if (dst)
@@ -275,6 +272,7 @@ static inline void skb_dst_drop(struct sk_buff *skb)
static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
{
+ nskb->slow_gro |= !!refdst;
nskb->_skb_refdst = refdst;
if (!(nskb->_skb_refdst & SKB_DST_NOREF))
dst_clone(skb_dst(nskb));
@@ -314,6 +312,7 @@ static inline bool skb_dst_force(struct sk_buff *skb)
dst = NULL;
skb->_skb_refdst = (unsigned long)dst;
+ skb->slow_gro |= !!dst;
}
return skb->_skb_refdst != 0UL;
@@ -401,7 +400,13 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co
static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
struct sk_buff *skb)
{
- struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
+ struct neighbour *n;
+
+ if (WARN_ON_ONCE(!dst->ops->neigh_lookup))
+ return NULL;
+
+ n = dst->ops->neigh_lookup(dst, skb, NULL);
+
return IS_ERR(n) ? NULL : n;
}
@@ -430,22 +435,36 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
dst->expires = expires;
}
+INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
+ struct sk_buff *));
/* Output packet to network from transport. */
static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- return skb_dst(skb)->output(net, sk, skb);
+ return INDIRECT_CALL_INET(skb_dst(skb)->output,
+ ip6_output, ip_output,
+ net, sk, skb);
}
+INDIRECT_CALLABLE_DECLARE(int ip6_input(struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *));
/* Input packet from network to transport. */
static inline int dst_input(struct sk_buff *skb)
{
- return skb_dst(skb)->input(skb);
+ return INDIRECT_CALL_INET(skb_dst(skb)->input,
+ ip6_input, ip_local_deliver, skb);
}
+INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
+ u32));
+INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
+ u32));
static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
{
if (dst->obsolete)
- dst = dst->ops->check(dst, cookie);
+ dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check,
+ ipv4_dst_check, dst, cookie);
return dst;
}
@@ -528,14 +547,15 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
-static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
- struct dst_entry *encap_dst,
- int headroom)
-{
- u32 encap_mtu = dst_mtu(encap_dst);
-
- if (skb->len > encap_mtu - headroom)
- skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
-}
+struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
+void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu, bool confirm_neigh);
+void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
+u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
+struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
+unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
#endif /* _NET_DST_H */
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
index 67634675e919..df6622a5fe98 100644
--- a/include/net/dst_cache.h
+++ b/include/net/dst_cache.h
@@ -80,6 +80,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache)
}
/**
+ * dst_cache_reset_now - invalidate the cache contents immediately
+ * @dst_cache: the cache
+ *
+ * The caller must be sure there are no concurrent users, as this frees
+ * all dst_cache users immediately, rather than waiting for the next
+ * per-cpu usage like dst_cache_reset does. Most callers should use the
+ * higher speed lazily-freed dst_cache_reset function instead.
+ */
+void dst_cache_reset_now(struct dst_cache *dst_cache);
+
+/**
* dst_cache_init - initialize the cache, allocating the required storage
* @dst_cache: the cache
* @gfp: allocation flags
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 56cb3c38569a..a454cf4327fe 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -4,11 +4,14 @@
#include <linux/skbuff.h>
#include <net/ip_tunnels.h>
+#include <net/macsec.h>
#include <net/dst.h>
enum metadata_type {
METADATA_IP_TUNNEL,
METADATA_HW_PORT_MUX,
+ METADATA_MACSEC,
+ METADATA_XFRM,
};
struct hw_port_info {
@@ -16,12 +19,23 @@ struct hw_port_info {
u32 port_id;
};
+struct macsec_info {
+ sci_t sci;
+};
+
+struct xfrm_md_info {
+ u32 if_id;
+ int link;
+};
+
struct metadata_dst {
struct dst_entry dst;
enum metadata_type type;
union {
struct ip_tunnel_info tun_info;
struct hw_port_info port_info;
+ struct macsec_info macsec_info;
+ struct xfrm_md_info xfrm_info;
} u;
};
@@ -45,12 +59,35 @@ skb_tunnel_info(const struct sk_buff *skb)
return &md_dst->u.tun_info;
dst = skb_dst(skb);
- if (dst && dst->lwtstate)
+ if (dst && dst->lwtstate &&
+ (dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
+ dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
return lwt_tun_info(dst->lwtstate);
return NULL;
}
+static inline struct xfrm_md_info *lwt_xfrm_info(struct lwtunnel_state *lwt)
+{
+ return (struct xfrm_md_info *)lwt->data;
+}
+
+static inline struct xfrm_md_info *skb_xfrm_md_info(const struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct dst_entry *dst;
+
+ if (md_dst && md_dst->type == METADATA_XFRM)
+ return &md_dst->u.xfrm_info;
+
+ dst = skb_dst(skb);
+ if (dst && dst->lwtstate &&
+ dst->lwtstate->type == LWTUNNEL_ENCAP_XFRM)
+ return lwt_xfrm_info(dst->lwtstate);
+
+ return NULL;
+}
+
static inline bool skb_valid_dst(const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -80,6 +117,12 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
return memcmp(&a->u.tun_info, &b->u.tun_info,
sizeof(a->u.tun_info) +
a->u.tun_info.options_len);
+ case METADATA_MACSEC:
+ return memcmp(&a->u.macsec_info, &b->u.macsec_info,
+ sizeof(a->u.macsec_info));
+ case METADATA_XFRM:
+ return memcmp(&a->u.xfrm_info, &b->u.xfrm_info,
+ sizeof(a->u.xfrm_info));
default:
return 1;
}
@@ -121,8 +164,20 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
sizeof(struct ip_tunnel_info) + md_size);
+#ifdef CONFIG_DST_CACHE
+ /* Unclone the dst cache if there is one */
+ if (new_md->u.tun_info.dst_cache.cache) {
+ int ret;
+
+ ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
+ if (ret) {
+ metadata_dst_free(new_md);
+ return ERR_PTR(ret);
+ }
+ }
+#endif
+
skb_dst_drop(skb);
- dst_hold(&new_md->dst);
skb_dst_set(skb, &new_md->dst);
return new_md;
}
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 443863c7b8da..88ff7bb2bb9b 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -53,9 +53,11 @@ static inline int dst_entries_get_slow(struct dst_ops *dst)
return percpu_counter_sum_positive(&dst->pcpuc_entries);
}
+#define DST_PERCPU_COUNTER_BATCH 32
static inline void dst_entries_add(struct dst_ops *dst, int val)
{
- percpu_counter_add(&dst->pcpuc_entries, val);
+ percpu_counter_add_batch(&dst->pcpuc_entries, val,
+ DST_PERCPU_COUNTER_BATCH);
}
static inline int dst_entries_init(struct dst_ops *dst)
diff --git a/include/net/erspan.h b/include/net/erspan.h
index b39643ef4c95..6cb4cbd6a48f 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -2,7 +2,19 @@
#define __LINUX_ERSPAN_H
/*
- * GRE header for ERSPAN encapsulation (8 octets [34:41]) -- 8 bytes
+ * GRE header for ERSPAN type I encapsulation (4 octets [34:37])
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0|0|0|0|0|00000|000000000|00000| Protocol Type for ERSPAN |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The Type I ERSPAN frame format is based on the barebones IP + GRE
+ * encapsulation (as described above) on top of the raw mirrored frame.
+ * There is no extra ERSPAN header.
+ *
+ *
+ * GRE header for ERSPAN type II and II encapsulation (8 octets [34:41])
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -43,9 +55,12 @@
* | Platform Specific Info |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB
+ * GRE proto ERSPAN type I/II = 0x88BE, type III = 0x22EB
*/
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
#include <uapi/linux/erspan.h>
#define ERSPAN_VERSION 0x1 /* ERSPAN type II */
@@ -139,6 +154,9 @@ static inline u8 get_hwid(const struct erspan_md2 *md2)
static inline int erspan_hdr_len(int version)
{
+ if (version == 0)
+ return 0;
+
return sizeof(struct erspan_base_hdr) +
(version == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE);
}
diff --git a/include/net/esp.h b/include/net/esp.h
index 117652eb6ea3..322950727dd0 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -5,12 +5,29 @@
#include <linux/skbuff.h>
struct ip_esp_hdr;
+struct xfrm_state;
static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
{
return (struct ip_esp_hdr *)skb_transport_header(skb);
}
+static inline void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
+{
+ /* Fill padding... */
+ if (tfclen) {
+ memset(tail, 0, tfclen);
+ tail += tfclen;
+ }
+ do {
+ int i;
+ for (i = 0; i < plen - 2; i++)
+ tail[i] = i + 1;
+ } while (0);
+ tail[plen - 2] = plen - 2;
+ tail[plen - 1] = proto;
+}
+
struct esp_info {
struct ip_esp_hdr *esph;
__be64 seqno;
diff --git a/include/net/espintcp.h b/include/net/espintcp.h
index dd7026a00066..0335bbd76552 100644
--- a/include/net/espintcp.h
+++ b/include/net/espintcp.h
@@ -25,6 +25,7 @@ struct espintcp_ctx {
struct espintcp_msg partial;
void (*saved_data_ready)(struct sock *sk);
void (*saved_write_space)(struct sock *sk);
+ void (*saved_destruct)(struct sock *sk);
struct work_struct work;
bool tx_running;
};
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 78519ed42ab4..73810f3ca492 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -10,6 +10,9 @@
#ifndef LINUX_NET_ETHOC_H
#define LINUX_NET_ETHOC_H 1
+#include <linux/if.h>
+#include <linux/types.h>
+
struct ethoc_platform_data {
u8 hwaddr[IFHWADDRLEN];
s8 phy_id;
diff --git a/include/net/failover.h b/include/net/failover.h
index bb15438f39c7..f2b42b4b9cd6 100644
--- a/include/net/failover.h
+++ b/include/net/failover.h
@@ -25,6 +25,7 @@ struct failover_ops {
struct failover {
struct list_head list;
struct net_device __rcu *failover_dev;
+ netdevice_tracker dev_tracker;
struct failover_ops __rcu *ops;
};
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index a259050f84af..82da359bca03 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -10,6 +10,7 @@
#include <net/flow.h>
#include <net/rtnetlink.h>
#include <net/fib_notifier.h>
+#include <linux/indirect_call_wrapper.h>
struct fib_kuid_range {
kuid_t start;
@@ -68,7 +69,7 @@ struct fib_rules_ops {
int (*action)(struct fib_rule *,
struct flowi *, int,
struct fib_lookup_arg *);
- bool (*suppress)(struct fib_rule *,
+ bool (*suppress)(struct fib_rule *, int,
struct fib_lookup_arg *);
int (*match)(struct fib_rule *,
struct flowi *, int);
@@ -90,7 +91,6 @@ struct fib_rules_ops {
void (*flush_cache)(struct fib_rules_ops *ops);
int nlgroup;
- const struct nla_policy *policy;
struct list_head rules_list;
struct module *owner;
struct net *fro_net;
@@ -102,26 +102,6 @@ struct fib_rule_notifier_info {
struct fib_rule *rule;
};
-#define FRA_GENERIC_POLICY \
- [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 }, \
- [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
- [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
- [FRA_PRIORITY] = { .type = NLA_U32 }, \
- [FRA_FWMARK] = { .type = NLA_U32 }, \
- [FRA_TUN_ID] = { .type = NLA_U64 }, \
- [FRA_FWMASK] = { .type = NLA_U32 }, \
- [FRA_TABLE] = { .type = NLA_U32 }, \
- [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
- [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
- [FRA_GOTO] = { .type = NLA_U32 }, \
- [FRA_L3MDEV] = { .type = NLA_U8 }, \
- [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }, \
- [FRA_PROTOCOL] = { .type = NLA_U8 }, \
- [FRA_IP_PROTO] = { .type = NLA_U8 }, \
- [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, \
- [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }
-
-
static inline void fib_rule_get(struct fib_rule *rule)
{
refcount_inc(&rule->refcnt);
@@ -203,4 +183,23 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
+
+INDIRECT_CALLABLE_DECLARE(int fib6_rule_match(struct fib_rule *rule,
+ struct flowi *fl, int flags));
+INDIRECT_CALLABLE_DECLARE(int fib4_rule_match(struct fib_rule *rule,
+ struct flowi *fl, int flags));
+
+INDIRECT_CALLABLE_DECLARE(int fib6_rule_action(struct fib_rule *rule,
+ struct flowi *flp, int flags,
+ struct fib_lookup_arg *arg));
+INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
+ struct flowi *flp, int flags,
+ struct fib_lookup_arg *arg));
+
+INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
+ int flags,
+ struct fib_lookup_arg *arg));
+INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
+ int flags,
+ struct fib_lookup_arg *arg));
#endif
diff --git a/include/net/firewire.h b/include/net/firewire.h
index 299e5df38552..8fbff8d77865 100644
--- a/include/net/firewire.h
+++ b/include/net/firewire.h
@@ -2,6 +2,8 @@
#ifndef _NET_FIREWIRE_H
#define _NET_FIREWIRE_H
+#include <linux/types.h>
+
/* Pseudo L2 address */
#define FWNET_ALEN 16
union fwnet_hwaddr {
@@ -11,8 +13,7 @@ union fwnet_hwaddr {
__be64 uniq_id; /* EUI-64 */
u8 max_rec; /* max packet size */
u8 sspd; /* max speed */
- __be16 fifo_hi; /* hi 16bits of FIFO addr */
- __be32 fifo_lo; /* lo 32bits of FIFO addr */
+ u8 fifo[6]; /* FIFO addr */
} __packed uc;
};
diff --git a/include/net/flow.h b/include/net/flow.h
index a50fb77a0b27..2f0da4f0318b 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -29,6 +29,7 @@ struct flowi_tunnel {
struct flowi_common {
int flowic_oif;
int flowic_iif;
+ int flowic_l3mdev;
__u32 flowic_mark;
__u8 flowic_tos;
__u8 flowic_scope;
@@ -36,7 +37,6 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
-#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
kuid_t flowic_uid;
struct flowi_tunnel flowic_tun_key;
@@ -54,12 +54,6 @@ union flowi_uli {
__u8 code;
} icmpt;
- struct {
- __le16 dport;
- __le16 sport;
- } dnports;
-
- __be32 spi;
__be32 gre_key;
struct {
@@ -71,6 +65,7 @@ struct flowi4 {
struct flowi_common __fl_common;
#define flowi4_oif __fl_common.flowic_oif
#define flowi4_iif __fl_common.flowic_iif
+#define flowi4_l3mdev __fl_common.flowic_l3mdev
#define flowi4_mark __fl_common.flowic_mark
#define flowi4_tos __fl_common.flowic_tos
#define flowi4_scope __fl_common.flowic_scope
@@ -90,7 +85,6 @@ struct flowi4 {
#define fl4_dport uli.ports.dport
#define fl4_icmp_type uli.icmpt.type
#define fl4_icmp_code uli.icmpt.code
-#define fl4_ipsec_spi uli.spi
#define fl4_mh_type uli.mht.type
#define fl4_gre_key uli.gre_key
} __attribute__((__aligned__(BITS_PER_LONG/8)));
@@ -104,6 +98,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
{
fl4->flowi4_oif = oif;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
+ fl4->flowi4_l3mdev = 0;
fl4->flowi4_mark = mark;
fl4->flowi4_tos = tos;
fl4->flowi4_scope = scope;
@@ -116,6 +111,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
+ fl4->flowi4_multipath_hash = 0;
}
/* Reset some input parameters after previous lookup */
@@ -133,6 +129,7 @@ struct flowi6 {
struct flowi_common __fl_common;
#define flowi6_oif __fl_common.flowic_oif
#define flowi6_iif __fl_common.flowic_iif
+#define flowi6_l3mdev __fl_common.flowic_l3mdev
#define flowi6_mark __fl_common.flowic_mark
#define flowi6_scope __fl_common.flowic_scope
#define flowi6_proto __fl_common.flowic_proto
@@ -149,36 +146,20 @@ struct flowi6 {
#define fl6_dport uli.ports.dport
#define fl6_icmp_type uli.icmpt.type
#define fl6_icmp_code uli.icmpt.code
-#define fl6_ipsec_spi uli.spi
#define fl6_mh_type uli.mht.type
#define fl6_gre_key uli.gre_key
__u32 mp_hash;
} __attribute__((__aligned__(BITS_PER_LONG/8)));
-struct flowidn {
- struct flowi_common __fl_common;
-#define flowidn_oif __fl_common.flowic_oif
-#define flowidn_iif __fl_common.flowic_iif
-#define flowidn_mark __fl_common.flowic_mark
-#define flowidn_scope __fl_common.flowic_scope
-#define flowidn_proto __fl_common.flowic_proto
-#define flowidn_flags __fl_common.flowic_flags
- __le16 daddr;
- __le16 saddr;
- union flowi_uli uli;
-#define fld_sport uli.ports.sport
-#define fld_dport uli.ports.dport
-} __attribute__((__aligned__(BITS_PER_LONG/8)));
-
struct flowi {
union {
struct flowi_common __fl_common;
struct flowi4 ip4;
struct flowi6 ip6;
- struct flowidn dn;
} u;
#define flowi_oif u.__fl_common.flowic_oif
#define flowi_iif u.__fl_common.flowic_iif
+#define flowi_l3mdev u.__fl_common.flowic_l3mdev
#define flowi_mark u.__fl_common.flowic_mark
#define flowi_tos u.__fl_common.flowic_tos
#define flowi_scope u.__fl_common.flowic_scope
@@ -194,32 +175,19 @@ static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
return container_of(fl4, struct flowi, u.ip4);
}
-static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
+static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4)
{
- return container_of(fl6, struct flowi, u.ip6);
+ return &(fl4->__fl_common);
}
-static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
+static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
{
- return container_of(fldn, struct flowi, u.dn);
+ return container_of(fl6, struct flowi, u.ip6);
}
-typedef unsigned long flow_compare_t;
-
-static inline unsigned int flow_key_size(u16 family)
+static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6)
{
- switch (family) {
- case AF_INET:
- BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
- return sizeof(struct flowi4) / sizeof(flow_compare_t);
- case AF_INET6:
- BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
- return sizeof(struct flowi6) / sizeof(flow_compare_t);
- case AF_DECnet:
- BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
- return sizeof(struct flowidn) / sizeof(flow_compare_t);
- }
- return 0;
+ return &(fl6->__fl_common);
}
__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 628383915827..5ccf52ef8809 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -8,6 +8,8 @@
#include <linux/string.h>
#include <uapi/linux/if_ether.h>
+struct bpf_prog;
+struct net;
struct sk_buff;
/**
@@ -57,15 +59,29 @@ struct flow_dissector_key_vlan {
__be16 vlan_tci;
};
__be16 vlan_tpid;
+ __be16 vlan_eth_type;
+ u16 padding;
};
-struct flow_dissector_key_mpls {
+struct flow_dissector_mpls_lse {
u32 mpls_ttl:8,
mpls_bos:1,
mpls_tc:3,
mpls_label:20;
};
+#define FLOW_DIS_MPLS_MAX 7
+struct flow_dissector_key_mpls {
+ struct flow_dissector_mpls_lse ls[FLOW_DIS_MPLS_MAX]; /* Label Stack */
+ u8 used_lses; /* One bit set for each Label Stack Entry in use */
+};
+
+static inline void dissector_set_mpls_lse(struct flow_dissector_key_mpls *mpls,
+ int lse_index)
+{
+ mpls->used_lses |= 1 << lse_index;
+}
+
#define FLOW_DIS_TUN_OPTS_MAX 255
/**
* struct flow_dissector_key_enc_opts:
@@ -163,6 +179,22 @@ struct flow_dissector_key_ports {
};
/**
+ * struct flow_dissector_key_ports_range
+ * @tp: port number from packet
+ * @tp_min: min port number in range
+ * @tp_max: max port number in range
+ */
+struct flow_dissector_key_ports_range {
+ union {
+ struct flow_dissector_key_ports tp;
+ struct {
+ struct flow_dissector_key_ports tp_min;
+ struct flow_dissector_key_ports tp_max;
+ };
+ };
+};
+
+/**
* flow_dissector_key_icmp:
* type: ICMP type
* code: ICMP code
@@ -229,6 +261,42 @@ struct flow_dissector_key_ct {
u32 ct_labels[4];
};
+/**
+ * struct flow_dissector_key_hash:
+ * @hash: hash value
+ */
+struct flow_dissector_key_hash {
+ u32 hash;
+};
+
+/**
+ * struct flow_dissector_key_num_of_vlans:
+ * @num_of_vlans: num_of_vlans value
+ */
+struct flow_dissector_key_num_of_vlans {
+ u8 num_of_vlans;
+};
+
+/**
+ * struct flow_dissector_key_pppoe:
+ * @session_id: pppoe session id
+ * @ppp_proto: ppp protocol
+ * @type: pppoe eth type
+ */
+struct flow_dissector_key_pppoe {
+ __be16 session_id;
+ __be16 ppp_proto;
+ __be16 type;
+};
+
+/**
+ * struct flow_dissector_key_l2tpv3:
+ * @session_id: identifier for a l2tp session
+ */
+struct flow_dissector_key_l2tpv3 {
+ __be32 session_id;
+};
+
enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
@@ -257,6 +325,10 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */
FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */
+ FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */
+ FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */
+ FLOW_DISSECTOR_KEY_PPPOE, /* struct flow_dissector_key_pppoe */
+ FLOW_DISSECTOR_KEY_L2TPV3, /* struct flow_dissector_key_l2tpv3 */
FLOW_DISSECTOR_KEY_MAX,
};
@@ -264,6 +336,7 @@ enum flow_dissector_key_id {
#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0)
#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(1)
#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(2)
+#define FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP BIT(3)
struct flow_dissector_key {
enum flow_dissector_key_id key_id;
@@ -327,7 +400,7 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys)
u32 flow_hash_from_keys(struct flow_keys *keys);
void skb_flow_get_icmp_tci(const struct sk_buff *skb,
struct flow_dissector_key_icmp *key_icmp,
- void *data, int thoff, int hlen);
+ const void *data, int thoff, int hlen);
static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
@@ -345,8 +418,8 @@ static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissec
struct bpf_flow_dissector {
struct bpf_flow_keys *flow_keys;
const struct sk_buff *skb;
- void *data;
- void *data_end;
+ const void *data;
+ const void *data_end;
};
static inline void
@@ -357,4 +430,9 @@ flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
memset(key_basic, 0, sizeof(*key_basic));
}
+#ifdef CONFIG_BPF_SYSCALL
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+ struct bpf_prog *prog);
+#endif /* CONFIG_BPF_SYSCALL */
+
#endif
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index c6f7bd22db60..e343f9f8363e 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -3,8 +3,8 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/netlink.h>
#include <net/flow_dissector.h>
-#include <linux/rhashtable.h>
struct flow_match {
struct flow_dissector *dissector;
@@ -48,6 +48,10 @@ struct flow_match_ports {
struct flow_dissector_key_ports *key, *mask;
};
+struct flow_match_ports_range {
+ struct flow_dissector_key_ports_range *key, *mask;
+};
+
struct flow_match_icmp {
struct flow_dissector_key_icmp *key, *mask;
};
@@ -68,6 +72,18 @@ struct flow_match_enc_opts {
struct flow_dissector_key_enc_opts *key, *mask;
};
+struct flow_match_ct {
+ struct flow_dissector_key_ct *key, *mask;
+};
+
+struct flow_match_pppoe {
+ struct flow_dissector_key_pppoe *key, *mask;
+};
+
+struct flow_match_l2tpv3 {
+ struct flow_dissector_key_l2tpv3 *key, *mask;
+};
+
struct flow_rule;
void flow_rule_match_meta(const struct flow_rule *rule,
@@ -90,6 +106,8 @@ void flow_rule_match_ip(const struct flow_rule *rule,
struct flow_match_ip *out);
void flow_rule_match_ports(const struct flow_rule *rule,
struct flow_match_ports *out);
+void flow_rule_match_ports_range(const struct flow_rule *rule,
+ struct flow_match_ports_range *out);
void flow_rule_match_tcp(const struct flow_rule *rule,
struct flow_match_tcp *out);
void flow_rule_match_icmp(const struct flow_rule *rule,
@@ -110,6 +128,12 @@ void flow_rule_match_enc_keyid(const struct flow_rule *rule,
struct flow_match_enc_keyid *out);
void flow_rule_match_enc_opts(const struct flow_rule *rule,
struct flow_match_enc_opts *out);
+void flow_rule_match_ct(const struct flow_rule *rule,
+ struct flow_match_ct *out);
+void flow_rule_match_pppoe(const struct flow_rule *rule,
+ struct flow_match_pppoe *out);
+void flow_rule_match_l2tpv3(const struct flow_rule *rule,
+ struct flow_match_l2tpv3 *out);
enum flow_action_id {
FLOW_ACTION_ACCEPT = 0,
@@ -130,14 +154,23 @@ enum flow_action_id {
FLOW_ACTION_CSUM,
FLOW_ACTION_MARK,
FLOW_ACTION_PTYPE,
+ FLOW_ACTION_PRIORITY,
FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE,
FLOW_ACTION_POLICE,
FLOW_ACTION_CT,
+ FLOW_ACTION_CT_METADATA,
FLOW_ACTION_MPLS_PUSH,
FLOW_ACTION_MPLS_POP,
FLOW_ACTION_MPLS_MANGLE,
+ FLOW_ACTION_GATE,
+ FLOW_ACTION_PPPOE_PUSH,
+ FLOW_ACTION_JUMP,
+ FLOW_ACTION_PIPE,
+ FLOW_ACTION_VLAN_PUSH_ETH,
+ FLOW_ACTION_VLAN_POP_ETH,
+ FLOW_ACTION_CONTINUE,
NUM_FLOW_ACTIONS,
};
@@ -154,10 +187,41 @@ enum flow_action_mangle_base {
FLOW_ACT_MANGLE_HDR_TYPE_UDP,
};
+enum flow_action_hw_stats_bit {
+ FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT,
+ FLOW_ACTION_HW_STATS_DISABLED_BIT,
+
+ FLOW_ACTION_HW_STATS_NUM_BITS
+};
+
+enum flow_action_hw_stats {
+ FLOW_ACTION_HW_STATS_IMMEDIATE =
+ BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
+ FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
+ FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
+ FLOW_ACTION_HW_STATS_DELAYED,
+ FLOW_ACTION_HW_STATS_DISABLED =
+ BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
+ FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
+};
+
typedef void (*action_destr)(void *priv);
+struct flow_action_cookie {
+ u32 cookie_len;
+ u8 cookie[];
+};
+
+struct flow_action_cookie *flow_action_cookie_create(void *data,
+ unsigned int len,
+ gfp_t gfp);
+void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
+
struct flow_action_entry {
enum flow_action_id id;
+ u32 hw_index;
+ enum flow_action_hw_stats hw_stats;
action_destr destructor;
void *destructor_priv;
union {
@@ -168,7 +232,12 @@ struct flow_action_entry {
__be16 proto;
u8 prio;
} vlan;
- struct { /* FLOW_ACTION_PACKET_EDIT */
+ struct { /* FLOW_ACTION_VLAN_PUSH_ETH */
+ unsigned char dst[ETH_ALEN];
+ unsigned char src[ETH_ALEN];
+ } vlan_push_eth;
+ struct { /* FLOW_ACTION_MANGLE */
+ /* FLOW_ACTION_ADD */
enum flow_action_mangle_base htype;
u32 offset;
u32 mask;
@@ -178,6 +247,7 @@ struct flow_action_entry {
u32 csum_flags; /* FLOW_ACTION_CSUM */
u32 mark; /* FLOW_ACTION_MARK */
u16 ptype; /* FLOW_ACTION_PTYPE */
+ u32 priority; /* FLOW_ACTION_PRIORITY */
struct { /* FLOW_ACTION_QUEUE */
u32 ctx;
u32 index;
@@ -190,13 +260,30 @@ struct flow_action_entry {
bool truncate;
} sample;
struct { /* FLOW_ACTION_POLICE */
- s64 burst;
+ u32 burst;
u64 rate_bytes_ps;
+ u64 peakrate_bytes_ps;
+ u32 avrate;
+ u16 overhead;
+ u64 burst_pkt;
+ u64 rate_pkt_ps;
+ u32 mtu;
+ struct {
+ enum flow_action_id act_id;
+ u32 extval;
+ } exceed, notexceed;
} police;
struct { /* FLOW_ACTION_CT */
int action;
u16 zone;
+ struct nf_flowtable *flow_table;
} ct;
+ struct {
+ unsigned long cookie;
+ u32 mark;
+ u32 labels[4];
+ bool orig_dir;
+ } ct_metadata;
struct { /* FLOW_ACTION_MPLS_PUSH */
u32 label;
__be16 proto;
@@ -213,12 +300,24 @@ struct flow_action_entry {
u8 bos;
u8 ttl;
} mpls_mangle;
+ struct {
+ s32 prio;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletimeext;
+ u32 num_entries;
+ struct action_gate_entry *entries;
+ } gate;
+ struct { /* FLOW_ACTION_PPPOE_PUSH */
+ u16 sid;
+ } pppoe;
};
+ struct flow_action_cookie *cookie; /* user defined action cookie */
};
struct flow_action {
unsigned int num_entries;
- struct flow_action_entry entries[0];
+ struct flow_action_entry entries[];
};
static inline bool flow_action_has_entries(const struct flow_action *action)
@@ -227,7 +326,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
}
/**
- * flow_action_has_one_action() - check if exactly one action is present
+ * flow_offload_has_one_action() - check if exactly one action is present
* @action: tc filter flow offload action
*
* Returns true if exactly one action is present.
@@ -237,8 +336,89 @@ static inline bool flow_offload_has_one_action(const struct flow_action *action)
return action->num_entries == 1;
}
+static inline bool flow_action_is_last_entry(const struct flow_action *action,
+ const struct flow_action_entry *entry)
+{
+ return entry == &action->entries[action->num_entries - 1];
+}
+
#define flow_action_for_each(__i, __act, __actions) \
- for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
+ for (__i = 0, __act = &(__actions)->entries[0]; \
+ __i < (__actions)->num_entries; \
+ __act = &(__actions)->entries[++__i])
+
+static inline bool
+flow_action_mixed_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry *action_entry;
+ u8 last_hw_stats;
+ int i;
+
+ if (flow_offload_has_one_action(action))
+ return true;
+
+ flow_action_for_each(i, action_entry, action) {
+ if (i && action_entry->hw_stats != last_hw_stats) {
+ NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+ return false;
+ }
+ last_hw_stats = action_entry->hw_stats;
+ }
+ return true;
+}
+
+static inline const struct flow_action_entry *
+flow_action_first_entry_get(const struct flow_action *action)
+{
+ WARN_ON(!flow_action_has_entries(action));
+ return &action->entries[0];
+}
+
+static inline bool
+__flow_action_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack,
+ bool check_allow_bit,
+ enum flow_action_hw_stats_bit allow_bit)
+{
+ const struct flow_action_entry *action_entry;
+
+ if (!flow_action_has_entries(action))
+ return true;
+ if (!flow_action_mixed_hw_stats_check(action, extack))
+ return false;
+
+ action_entry = flow_action_first_entry_get(action);
+
+ /* Zero is not a legal value for hw_stats, catch anyone passing it */
+ WARN_ON_ONCE(!action_entry->hw_stats);
+
+ if (!check_allow_bit &&
+ ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
+ return false;
+ } else if (check_allow_bit &&
+ !(action_entry->hw_stats & BIT(allow_bit))) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
+ return false;
+ }
+ return true;
+}
+
+static inline bool
+flow_action_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack,
+ enum flow_action_hw_stats_bit allow_bit)
+{
+ return __flow_action_hw_stats_check(action, extack, true, allow_bit);
+}
+
+static inline bool
+flow_action_basic_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack)
+{
+ return __flow_action_hw_stats_check(action, extack, false, 0);
+}
struct flow_rule {
struct flow_match match;
@@ -256,15 +436,28 @@ static inline bool flow_rule_match_key(const struct flow_rule *rule,
struct flow_stats {
u64 pkts;
u64 bytes;
+ u64 drops;
u64 lastused;
+ enum flow_action_hw_stats used_hw_stats;
+ bool used_hw_stats_valid;
};
static inline void flow_stats_update(struct flow_stats *flow_stats,
- u64 bytes, u64 pkts, u64 lastused)
+ u64 bytes, u64 pkts,
+ u64 drops, u64 lastused,
+ enum flow_action_hw_stats used_hw_stats)
{
flow_stats->pkts += pkts;
flow_stats->bytes += bytes;
+ flow_stats->drops += drops;
flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
+
+ /* The driver should pass value with a maximum of one bit set.
+ * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
+ */
+ WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
+ flow_stats->used_hw_stats |= used_hw_stats;
+ flow_stats->used_hw_stats_valid = true;
}
enum flow_block_command {
@@ -276,6 +469,8 @@ enum flow_block_binder_type {
FLOW_BLOCK_BINDER_TYPE_UNSPEC,
FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
+ FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
+ FLOW_BLOCK_BINDER_TYPE_RED_MARK,
};
struct flow_block {
@@ -294,12 +489,26 @@ struct flow_block_offload {
struct list_head cb_list;
struct list_head *driver_block_list;
struct netlink_ext_ack *extack;
+ struct Qdisc *sch;
+ struct list_head *cb_list_head;
};
enum tc_setup_type;
typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
void *cb_priv);
+struct flow_block_cb;
+
+struct flow_block_indr {
+ struct list_head list;
+ struct net_device *dev;
+ struct Qdisc *sch;
+ enum flow_block_binder_type binder_type;
+ void *data;
+ void *cb_priv;
+ void (*cleanup)(struct flow_block_cb *block_cb);
+};
+
struct flow_block_cb {
struct list_head driver_list;
struct list_head list;
@@ -307,12 +516,21 @@ struct flow_block_cb {
void *cb_ident;
void *cb_priv;
void (*release)(void *cb_priv);
+ struct flow_block_indr indr;
unsigned int refcnt;
};
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv));
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+ void *cb_ident, void *cb_priv,
+ void (*release)(void *cb_priv),
+ struct flow_block_offload *bo,
+ struct net_device *dev,
+ struct Qdisc *sch, void *data,
+ void *indr_cb_priv,
+ void (*cleanup)(struct flow_block_cb *block_cb));
void flow_block_cb_free(struct flow_block_cb *block_cb);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
@@ -334,6 +552,13 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
list_move(&block_cb->list, &offload->cb_list);
}
+static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
+ struct flow_block_offload *offload)
+{
+ list_del(&block_cb->indr.list);
+ list_move(&block_cb->list, &offload->cb_list);
+}
+
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
struct list_head *driver_block_list);
@@ -366,6 +591,23 @@ struct flow_cls_offload {
u32 classid;
};
+enum offload_act_command {
+ FLOW_ACT_REPLACE,
+ FLOW_ACT_DESTROY,
+ FLOW_ACT_STATS,
+};
+
+struct flow_offload_action {
+ struct netlink_ext_ack *extack; /* NULL in FLOW_ACT_STATS process*/
+ enum offload_act_command command;
+ enum flow_action_id id;
+ u32 index;
+ struct flow_stats stats;
+ struct flow_action action;
+};
+
+struct flow_offload_action *offload_action_alloc(unsigned int num_actions);
+
static inline struct flow_rule *
flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
{
@@ -377,39 +619,18 @@ static inline void flow_block_init(struct flow_block *flow_block)
INIT_LIST_HEAD(&flow_block->cb_list);
}
-typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
- enum tc_setup_type type, void *type_data);
-
-typedef void flow_indr_block_cmd_t(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb, void *cb_priv,
- enum flow_block_command command);
-
-struct flow_indr_block_entry {
- flow_indr_block_cmd_t *cb;
- struct list_head list;
-};
-
-void flow_indr_add_block_cb(struct flow_indr_block_entry *entry);
-
-void flow_indr_del_block_cb(struct flow_indr_block_entry *entry);
-
-int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
- flow_indr_block_bind_cb_t *cb,
- void *cb_ident);
-
-void __flow_indr_block_cb_unregister(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb,
- void *cb_ident);
-
-int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
- flow_indr_block_bind_cb_t *cb, void *cb_ident);
-
-void flow_indr_block_cb_unregister(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb,
- void *cb_ident);
-
-void flow_indr_block_call(struct net_device *dev,
- struct flow_block_offload *bo,
- enum flow_block_command command);
+typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+
+int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
+void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
+ void (*release)(void *cb_priv));
+int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
+ enum tc_setup_type type, void *data,
+ struct flow_block_offload *bo,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+bool flow_indr_dev_exists(void);
#endif /* _NET_FLOW_OFFLOAD_H */
diff --git a/include/net/fq.h b/include/net/fq.h
index 2ad85e683041..07b5aff6ec58 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -7,6 +7,10 @@
#ifndef __NET_SCHED_FQ_H
#define __NET_SCHED_FQ_H
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
struct fq_tin;
/**
@@ -19,8 +23,6 @@ struct fq_tin;
* @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++
* (deficit round robin) based round robin queuing similar to the one
* found in net/sched/sch_fq_codel.c
- * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of
- * fat flows and efficient head-dropping if packet limit is reached
* @queue: sk_buff queue to hold packets
* @backlog: number of bytes pending in the queue. The number of packets can be
* found in @queue.qlen
@@ -29,7 +31,6 @@ struct fq_tin;
struct fq_flow {
struct fq_tin *tin;
struct list_head flowchain;
- struct list_head backlogchain;
struct sk_buff_head queue;
u32 backlog;
int deficit;
@@ -47,6 +48,8 @@ struct fq_flow {
struct fq_tin {
struct list_head new_flows;
struct list_head old_flows;
+ struct list_head tin_list;
+ struct fq_flow default_flow;
u32 backlog_bytes;
u32 backlog_packets;
u32 overlimit;
@@ -59,17 +62,16 @@ struct fq_tin {
/**
* struct fq - main container for fair queuing purposes
*
- * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient
- * head-dropping when @backlog reaches @limit
* @limit: max number of packets that can be queued across all flows
* @backlog: number of packets queued across all flows
*/
struct fq {
struct fq_flow *flows;
- struct list_head backlogs;
+ unsigned long *flows_bitmap;
+
+ struct list_head tin_backlog;
spinlock_t lock;
u32 flows_cnt;
- siphash_key_t perturbation;
u32 limit;
u32 memory_limit;
u32 memory_usage;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 38a9a3d1222b..524b510f1c68 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -11,35 +11,37 @@
/* functions that are embedded into includer */
-static void fq_adjust_removal(struct fq *fq,
- struct fq_flow *flow,
- struct sk_buff *skb)
+
+static void
+__fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets,
+ unsigned int bytes, unsigned int truesize)
{
struct fq_tin *tin = flow->tin;
+ int idx;
- tin->backlog_bytes -= skb->len;
- tin->backlog_packets--;
- flow->backlog -= skb->len;
- fq->backlog--;
- fq->memory_usage -= skb->truesize;
-}
+ tin->backlog_bytes -= bytes;
+ tin->backlog_packets -= packets;
+ flow->backlog -= bytes;
+ fq->backlog -= packets;
+ fq->memory_usage -= truesize;
-static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow)
-{
- struct fq_flow *i;
+ if (flow->backlog)
+ return;
- if (flow->backlog == 0) {
- list_del_init(&flow->backlogchain);
- } else {
- i = flow;
+ if (flow == &tin->default_flow) {
+ list_del_init(&tin->tin_list);
+ return;
+ }
- list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
- if (i->backlog < flow->backlog)
- break;
+ idx = flow - fq->flows;
+ __clear_bit(idx, fq->flows_bitmap);
+}
- list_move_tail(&flow->backlogchain,
- &i->backlogchain);
- }
+static void fq_adjust_removal(struct fq *fq,
+ struct fq_flow *flow,
+ struct sk_buff *skb)
+{
+ __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize);
}
static struct sk_buff *fq_flow_dequeue(struct fq *fq,
@@ -54,11 +56,37 @@ static struct sk_buff *fq_flow_dequeue(struct fq *fq,
return NULL;
fq_adjust_removal(fq, flow, skb);
- fq_rejigger_backlog(fq, flow);
return skb;
}
+static int fq_flow_drop(struct fq *fq, struct fq_flow *flow,
+ fq_skb_free_t free_func)
+{
+ unsigned int packets = 0, bytes = 0, truesize = 0;
+ struct fq_tin *tin = flow->tin;
+ struct sk_buff *skb;
+ int pending;
+
+ lockdep_assert_held(&fq->lock);
+
+ pending = min_t(int, 32, skb_queue_len(&flow->queue) / 2);
+ do {
+ skb = __skb_dequeue(&flow->queue);
+ if (!skb)
+ break;
+
+ packets++;
+ bytes += skb->len;
+ truesize += skb->truesize;
+ free_func(fq, tin, flow, skb);
+ } while (packets < pending);
+
+ __fq_adjust_removal(fq, flow, packets, bytes, truesize);
+
+ return packets;
+}
+
static struct sk_buff *fq_tin_dequeue(struct fq *fq,
struct fq_tin *tin,
fq_tin_dequeue_t dequeue_func)
@@ -108,15 +136,14 @@ begin:
static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
{
- u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
+ u32 hash = skb_get_hash(skb);
return reciprocal_scale(hash, fq->flows_cnt);
}
static struct fq_flow *fq_flow_classify(struct fq *fq,
struct fq_tin *tin, u32 idx,
- struct sk_buff *skb,
- fq_flow_get_default_t get_default_func)
+ struct sk_buff *skb)
{
struct fq_flow *flow;
@@ -124,7 +151,7 @@ static struct fq_flow *fq_flow_classify(struct fq *fq,
flow = &fq->flows[idx];
if (flow->tin && flow->tin != tin) {
- flow = get_default_func(fq, tin, idx, skb);
+ flow = &tin->default_flow;
tin->collisions++;
fq->collisions++;
}
@@ -135,36 +162,56 @@ static struct fq_flow *fq_flow_classify(struct fq *fq,
return flow;
}
-static void fq_recalc_backlog(struct fq *fq,
- struct fq_tin *tin,
- struct fq_flow *flow)
+static struct fq_flow *fq_find_fattest_flow(struct fq *fq)
{
- struct fq_flow *i;
+ struct fq_tin *tin;
+ struct fq_flow *flow = NULL;
+ u32 len = 0;
+ int i;
- if (list_empty(&flow->backlogchain))
- list_add_tail(&flow->backlogchain, &fq->backlogs);
+ for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) {
+ struct fq_flow *cur = &fq->flows[i];
+ unsigned int cur_len;
- i = flow;
- list_for_each_entry_continue_reverse(i, &fq->backlogs,
- backlogchain)
- if (i->backlog > flow->backlog)
- break;
+ cur_len = cur->backlog;
+ if (cur_len <= len)
+ continue;
+
+ flow = cur;
+ len = cur_len;
+ }
+
+ list_for_each_entry(tin, &fq->tin_backlog, tin_list) {
+ unsigned int cur_len = tin->default_flow.backlog;
- list_move(&flow->backlogchain, &i->backlogchain);
+ if (cur_len <= len)
+ continue;
+
+ flow = &tin->default_flow;
+ len = cur_len;
+ }
+
+ return flow;
}
static void fq_tin_enqueue(struct fq *fq,
struct fq_tin *tin, u32 idx,
struct sk_buff *skb,
- fq_skb_free_t free_func,
- fq_flow_get_default_t get_default_func)
+ fq_skb_free_t free_func)
{
struct fq_flow *flow;
bool oom;
lockdep_assert_held(&fq->lock);
- flow = fq_flow_classify(fq, tin, idx, skb, get_default_func);
+ flow = fq_flow_classify(fq, tin, idx, skb);
+
+ if (!flow->backlog) {
+ if (flow != &tin->default_flow)
+ __set_bit(idx, fq->flows_bitmap);
+ else if (list_empty(&tin->tin_list))
+ list_add(&tin->tin_list, &fq->tin_backlog);
+ }
flow->tin = tin;
flow->backlog += skb->len;
@@ -173,8 +220,6 @@ static void fq_tin_enqueue(struct fq *fq,
fq->memory_usage += skb->truesize;
fq->backlog++;
- fq_recalc_backlog(fq, tin, flow);
-
if (list_empty(&flow->flowchain)) {
flow->deficit = fq->quantum;
list_add_tail(&flow->flowchain,
@@ -184,18 +229,13 @@ static void fq_tin_enqueue(struct fq *fq,
__skb_queue_tail(&flow->queue, skb);
oom = (fq->memory_usage > fq->memory_limit);
while (fq->backlog > fq->limit || oom) {
- flow = list_first_entry_or_null(&fq->backlogs,
- struct fq_flow,
- backlogchain);
+ flow = fq_find_fattest_flow(fq);
if (!flow)
return;
- skb = fq_flow_dequeue(fq, flow);
- if (!skb)
+ if (!fq_flow_drop(fq, flow, free_func))
return;
- free_func(fq, flow->tin, flow, skb);
-
flow->tin->overlimit++;
fq->overlimit++;
if (oom) {
@@ -224,8 +264,6 @@ static void fq_flow_filter(struct fq *fq,
fq_adjust_removal(fq, flow, skb);
free_func(fq, tin, flow, skb);
}
-
- fq_rejigger_backlog(fq, flow);
}
static void fq_tin_filter(struct fq *fq,
@@ -248,16 +286,18 @@ static void fq_flow_reset(struct fq *fq,
struct fq_flow *flow,
fq_skb_free_t free_func)
{
+ struct fq_tin *tin = flow->tin;
struct sk_buff *skb;
while ((skb = fq_flow_dequeue(fq, flow)))
- free_func(fq, flow->tin, flow, skb);
+ free_func(fq, tin, flow, skb);
- if (!list_empty(&flow->flowchain))
+ if (!list_empty(&flow->flowchain)) {
list_del_init(&flow->flowchain);
-
- if (!list_empty(&flow->backlogchain))
- list_del_init(&flow->backlogchain);
+ if (list_empty(&tin->new_flows) &&
+ list_empty(&tin->old_flows))
+ list_del_init(&tin->tin_list);
+ }
flow->tin = NULL;
@@ -283,6 +323,7 @@ static void fq_tin_reset(struct fq *fq,
fq_flow_reset(fq, flow, free_func);
}
+ WARN_ON_ONCE(!list_empty(&tin->tin_list));
WARN_ON_ONCE(tin->backlog_bytes);
WARN_ON_ONCE(tin->backlog_packets);
}
@@ -290,7 +331,6 @@ static void fq_tin_reset(struct fq *fq,
static void fq_flow_init(struct fq_flow *flow)
{
INIT_LIST_HEAD(&flow->flowchain);
- INIT_LIST_HEAD(&flow->backlogchain);
__skb_queue_head_init(&flow->queue);
}
@@ -298,6 +338,8 @@ static void fq_tin_init(struct fq_tin *tin)
{
INIT_LIST_HEAD(&tin->new_flows);
INIT_LIST_HEAD(&tin->old_flows);
+ INIT_LIST_HEAD(&tin->tin_list);
+ fq_flow_init(&tin->default_flow);
}
static int fq_init(struct fq *fq, int flows_cnt)
@@ -305,10 +347,9 @@ static int fq_init(struct fq *fq, int flows_cnt)
int i;
memset(fq, 0, sizeof(fq[0]));
- INIT_LIST_HEAD(&fq->backlogs);
spin_lock_init(&fq->lock);
+ INIT_LIST_HEAD(&fq->tin_backlog);
fq->flows_cnt = max_t(u32, flows_cnt, 1);
- get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
fq->quantum = 300;
fq->limit = 8192;
fq->memory_limit = 16 << 20; /* 16 MBytes */
@@ -317,6 +358,13 @@ static int fq_init(struct fq *fq, int flows_cnt)
if (!fq->flows)
return -ENOMEM;
+ fq->flows_bitmap = bitmap_zalloc(fq->flows_cnt, GFP_KERNEL);
+ if (!fq->flows_bitmap) {
+ kvfree(fq->flows);
+ fq->flows = NULL;
+ return -ENOMEM;
+ }
+
for (i = 0; i < fq->flows_cnt; i++)
fq_flow_init(&fq->flows[i]);
@@ -333,6 +381,9 @@ static void fq_reset(struct fq *fq,
kvfree(fq->flows);
fq->flows = NULL;
+
+ bitmap_free(fq->flows_bitmap);
+ fq->flows_bitmap = NULL;
}
#endif
diff --git a/include/net/garp.h b/include/net/garp.h
index 4d9a0c6a2e5f..59a07b171def 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -2,6 +2,8 @@
#ifndef _NET_GARP_H
#define _NET_GARP_H
+#include <linux/if_ether.h>
+#include <linux/types.h>
#include <net/stp.h>
#define GARP_PROTOCOL_ID 0x1
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 1424e02cef90..7aa2b8e1fb29 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -7,14 +7,17 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
-/* Note: this used to be in include/uapi/linux/gen_stats.h */
-struct gnet_stats_basic_packed {
- __u64 bytes;
- __u64 packets;
-};
-
-struct gnet_stats_basic_cpu {
- struct gnet_stats_basic_packed bstats;
+/* Throughput stats.
+ * Must be initialized beforehand with gnet_stats_basic_sync_init().
+ *
+ * If no reads can ever occur parallel to writes (e.g. stack-allocated
+ * bstats), then the internal stat values can be written to and read
+ * from directly. Otherwise, use _bstats_set/update() for writes and
+ * gnet_stats_add_basic() for reads.
+ */
+struct gnet_stats_basic_sync {
+ u64_stats_t bytes;
+ u64_stats_t packets;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
@@ -34,6 +37,7 @@ struct gnet_dump {
struct tc_stats tc_stats;
};
+void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d, int padattr);
@@ -42,41 +46,38 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d,
int padattr);
-int gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-void __gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-int gnet_stats_copy_basic_hw(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+int gnet_stats_copy_basic_hw(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
-void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
- const struct gnet_stats_queue __percpu *cpu_q,
- const struct gnet_stats_queue *q, __u32 qlen);
+void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu_q,
+ const struct gnet_stats_queue *q);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
-int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
-int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
struct gnet_stats_rate_est64 *sample);
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 74950663bb00..9f97f73615b6 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -11,9 +11,11 @@
/**
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
*/
struct genl_multicast_group {
char name[GENL_NAMSIZ];
+ u8 flags;
};
struct genl_ops;
@@ -35,19 +37,25 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
- * @mcast_bind: a socket bound to the given multicast group (which
- * is given as the offset into the groups array)
- * @mcast_unbind: a socket was unbound from the given multicast group.
- * Note that unbind() will not be called symmetrically if the
- * generic netlink family is removed while there are still open
- * sockets.
- * @attrbuf: buffer to store parsed attributes (private)
+ * @module: pointer to the owning module (set to THIS_MODULE)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
+ * @resv_start_op: first operation for which reserved fields of the header
+ * can be validated and policies are required (see below);
+ * new families should leave this field at zero
* @mcgrp_offset: starting number of multicast group IDs in this family
* (private)
* @ops: the operations supported by this family
* @n_ops: number of operations supported by this family
+ * @small_ops: the small-struct operations supported by this family
+ * @n_small_ops: number of small-struct operations supported by this family
+ *
+ * Attribute policies (the combination of @policy and @maxattr fields)
+ * can be attached at the family level or at the operation level.
+ * If both are present the per-operation policy takes precedence.
+ * For operations before @resv_start_op lack of policy means that the core
+ * will perform no attribute parsing or validation. For newer operations
+ * if policy is not provided core will reject all TLV attributes.
*/
struct genl_family {
int id; /* private */
@@ -55,8 +63,13 @@ struct genl_family {
char name[GENL_NAMSIZ];
unsigned int version;
unsigned int maxattr;
- bool netnsok;
- bool parallel_ops;
+ unsigned int mcgrp_offset; /* private */
+ u8 netnsok:1;
+ u8 parallel_ops:1;
+ u8 n_ops;
+ u8 n_small_ops;
+ u8 n_mcgrps;
+ u8 resv_start_op;
const struct nla_policy *policy;
int (*pre_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
@@ -64,14 +77,9 @@ struct genl_family {
void (*post_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
- int (*mcast_bind)(struct net *net, int group);
- void (*mcast_unbind)(struct net *net, int group);
- struct nlattr ** attrbuf; /* private */
const struct genl_ops * ops;
+ const struct genl_small_ops *small_ops;
const struct genl_multicast_group *mcgrps;
- unsigned int n_ops;
- unsigned int n_mcgrps;
- unsigned int mcgrp_offset; /* private */
struct module *module;
};
@@ -111,13 +119,12 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
-static inline int genl_err_attr(struct genl_info *info, int err,
- const struct nlattr *attr)
-{
- info->extack->bad_attr = attr;
-
- return err;
-}
+/* Report that a root attribute is missing */
+#define GENL_REQ_ATTR_CHECK(info, attr) ({ \
+ struct genl_info *__info = (info); \
+ \
+ NL_REQ_ATTR_CHECK(__info->extack, NULL, __info->attrs, (attr)); \
+})
enum genl_validate_flags {
GENL_DONT_VALIDATE_STRICT = BIT(0),
@@ -126,28 +133,34 @@ enum genl_validate_flags {
};
/**
- * struct genl_info - info that is available during dumpit op call
- * @family: generic netlink family - for internal genl code usage
- * @ops: generic netlink ops - for internal genl code usage
- * @attrs: netlink attributes
+ * struct genl_small_ops - generic netlink operations (small version)
+ * @cmd: command identifier
+ * @internal_flags: flags used by the family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @validate: validation flags from enum genl_validate_flags
+ * @doit: standard command callback
+ * @dumpit: callback for dumpers
+ *
+ * This is a cut-down version of struct genl_ops for users who don't need
+ * most of the ancillary infra and want to save space.
*/
-struct genl_dumpit_info {
- const struct genl_family *family;
- const struct genl_ops *ops;
- struct nlattr **attrs;
+struct genl_small_ops {
+ int (*doit)(struct sk_buff *skb, struct genl_info *info);
+ int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb);
+ u8 cmd;
+ u8 internal_flags;
+ u8 flags;
+ u8 validate;
};
-static inline const struct genl_dumpit_info *
-genl_dumpit_info(struct netlink_callback *cb)
-{
- return cb->data;
-}
-
/**
* struct genl_ops - generic netlink operations
* @cmd: command identifier
* @internal_flags: flags used by the family
- * @flags: flags
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @maxattr: maximum number of attributes supported
+ * @policy: netlink policy (takes precedence over family policy)
+ * @validate: validation flags from enum genl_validate_flags
* @doit: standard command callback
* @start: start callback for dumps
* @dumpit: callback for dumpers
@@ -160,12 +173,32 @@ struct genl_ops {
int (*dumpit)(struct sk_buff *skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
+ const struct nla_policy *policy;
+ unsigned int maxattr;
u8 cmd;
u8 internal_flags;
u8 flags;
u8 validate;
};
+/**
+ * struct genl_dumpit_info - info that is available during dumpit op call
+ * @family: generic netlink family - for internal genl code usage
+ * @op: generic netlink ops - for internal genl code usage
+ * @attrs: netlink attributes
+ */
+struct genl_dumpit_info {
+ const struct genl_family *family;
+ struct genl_ops op;
+ struct nlattr **attrs;
+};
+
+static inline const struct genl_dumpit_info *
+genl_dumpit_info(struct netlink_callback *cb)
+{
+ return cb->data;
+}
+
int genl_register_family(struct genl_family *family);
int genl_unregister_family(const struct genl_family *family);
void genl_notify(const struct genl_family *family, struct sk_buff *skb,
@@ -330,6 +363,7 @@ int genlmsg_multicast_allns(const struct genl_family *family,
/**
* genlmsg_unicast - unicast a netlink message
+ * @net: network namespace to look up @portid in
* @skb: netlink message as socket buffer
* @portid: netlink portid of the destination socket
*/
@@ -349,7 +383,7 @@ static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
}
/**
- * gennlmsg_data - head of message payload
+ * genlmsg_data - head of message payload
* @gnlh: genetlink message header
*/
static inline void *genlmsg_data(const struct genlmsghdr *gnlh)
diff --git a/include/net/gre.h b/include/net/gre.h
index b60f212c16c6..4e209708b754 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -106,17 +106,6 @@ static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
return flags;
}
-static inline __sum16 gre_checksum(struct sk_buff *skb)
-{
- __wsum csum;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- csum = lco_csum(skb);
- else
- csum = skb_checksum(skb, 0, skb->len, 0);
- return csum_fold(csum);
-}
-
static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
__be16 flags, __be16 proto,
__be32 key, __be32 seq)
@@ -146,7 +135,13 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0;
- *(__sum16 *)ptr = gre_checksum(skb);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ *(__sum16 *)ptr = csum_fold(lco_csum(skb));
+ } else {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = sizeof(*greh);
+ }
}
}
}
diff --git a/include/net/gro.h b/include/net/gro.h
new file mode 100644
index 000000000000..a4fab706240d
--- /dev/null
+++ b/include/net/gro.h
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_IPV6_GRO_H
+#define _NET_IPV6_GRO_H
+
+#include <linux/indirect_call_wrapper.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/skbuff.h>
+#include <net/udp.h>
+
+struct napi_gro_cb {
+ /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
+ void *frag0;
+
+ /* Length of frag0. */
+ unsigned int frag0_len;
+
+ /* This indicates where we are processing relative to skb->data. */
+ int data_offset;
+
+ /* This is non-zero if the packet cannot be merged with the new skb. */
+ u16 flush;
+
+ /* Save the IP ID here and check when we get to the transport layer */
+ u16 flush_id;
+
+ /* Number of segments aggregated. */
+ u16 count;
+
+ /* Used in ipv6_gro_receive() and foo-over-udp */
+ u16 proto;
+
+ /* jiffies when first packet was created/queued */
+ unsigned long age;
+
+/* Used in napi_gro_cb::free */
+#define NAPI_GRO_FREE 1
+#define NAPI_GRO_FREE_STOLEN_HEAD 2
+ /* portion of the cb set to zero at every gro iteration */
+ struct_group(zeroed,
+
+ /* Start offset for remote checksum offload */
+ u16 gro_remcsum_start;
+
+ /* This is non-zero if the packet may be of the same flow. */
+ u8 same_flow:1;
+
+ /* Used in tunnel GRO receive */
+ u8 encap_mark:1;
+
+ /* GRO checksum is valid */
+ u8 csum_valid:1;
+
+ /* Number of checksums via CHECKSUM_UNNECESSARY */
+ u8 csum_cnt:3;
+
+ /* Free the skb? */
+ u8 free:2;
+
+ /* Used in foo-over-udp, set in udp[46]_gro_receive */
+ u8 is_ipv6:1;
+
+ /* Used in GRE, set in fou/gue_gro_receive */
+ u8 is_fou:1;
+
+ /* Used to determine if flush_id can be ignored */
+ u8 is_atomic:1;
+
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* GRO is done by frag_list pointer chaining. */
+ u8 is_flist:1;
+ );
+
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
+
+ /* used in skb_gro_receive() slow path */
+ struct sk_buff *last;
+};
+
+#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
+static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
+typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
+ struct sk_buff *);
+static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(sk, head, skb);
+}
+
+static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
+{
+ return NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline unsigned int skb_gro_len(const struct sk_buff *skb)
+{
+ return skb->len - NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
+{
+ NAPI_GRO_CB(skb)->data_offset += len;
+}
+
+static inline void *skb_gro_header_fast(struct sk_buff *skb,
+ unsigned int offset)
+{
+ return NAPI_GRO_CB(skb)->frag0 + offset;
+}
+
+static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+{
+ return NAPI_GRO_CB(skb)->frag0_len < hlen;
+}
+
+static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
+{
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+}
+
+static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
+{
+ if (!pskb_may_pull(skb, hlen))
+ return NULL;
+
+ skb_gro_frag0_invalidate(skb);
+ return skb->data + offset;
+}
+
+static inline void *skb_gro_header(struct sk_buff *skb,
+ unsigned int hlen, unsigned int offset)
+{
+ void *ptr;
+
+ ptr = skb_gro_header_fast(skb, offset);
+ if (skb_gro_header_hard(skb, hlen))
+ ptr = skb_gro_header_slow(skb, hlen, offset);
+ return ptr;
+}
+
+static inline void *skb_gro_network_header(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
+ skb_network_offset(skb);
+}
+
+static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+
+ return csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ skb_gro_len(skb), proto, 0);
+}
+
+static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid)
+ NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
+ wsum_negate(NAPI_GRO_CB(skb)->csum)));
+}
+
+/* GRO checksum functions. These are logical equivalents of the normal
+ * checksum functions (in skbuff.h) except that they operate on the GRO
+ * offsets and fields in sk_buff.
+ */
+
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
+
+static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
+}
+
+static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
+ bool zero_okay,
+ __sum16 check)
+{
+ return ((skb->ip_summed != CHECKSUM_PARTIAL ||
+ skb_checksum_start_offset(skb) <
+ skb_gro_offset(skb)) &&
+ !skb_at_gro_remcsum_start(skb) &&
+ NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ (!zero_okay || check));
+}
+
+static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
+ __wsum psum)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid &&
+ !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
+ return 0;
+
+ NAPI_GRO_CB(skb)->csum = psum;
+
+ return __skb_gro_checksum_complete(skb);
+}
+
+static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
+{
+ if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
+ /* Consume a checksum from CHECKSUM_UNNECESSARY */
+ NAPI_GRO_CB(skb)->csum_cnt--;
+ } else {
+ /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
+ * verified a new top level checksum or an encapsulated one
+ * during GRO. This saves work if we fallback to normal path.
+ */
+ __skb_incr_checksum_unnecessary(skb);
+ }
+}
+
+#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
+ compute_pseudo) \
+({ \
+ __sum16 __ret = 0; \
+ if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
+ __ret = __skb_gro_checksum_validate_complete(skb, \
+ compute_pseudo(skb, proto)); \
+ if (!__ret) \
+ skb_gro_incr_csum_unnecessary(skb); \
+ __ret; \
+})
+
+#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
+
+#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
+ compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
+
+#define skb_gro_checksum_simple_validate(skb) \
+ __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
+
+static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ !NAPI_GRO_CB(skb)->csum_valid);
+}
+
+static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
+ __wsum pseudo)
+{
+ NAPI_GRO_CB(skb)->csum = ~pseudo;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+}
+
+#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
+do { \
+ if (__skb_gro_checksum_convert_check(skb)) \
+ __skb_gro_checksum_convert(skb, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
+struct gro_remcsum {
+ int offset;
+ __wsum delta;
+};
+
+static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
+{
+ grc->offset = 0;
+ grc->delta = 0;
+}
+
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ unsigned int off, size_t hdrlen,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
+{
+ __wsum delta;
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+
+ BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+ if (!nopartial) {
+ NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+ return ptr;
+ }
+
+ ptr = skb_gro_header(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
+
+ delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+ start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+ grc->offset = off + hdrlen + offset;
+ grc->delta = delta;
+
+ return ptr;
+}
+
+static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
+ struct gro_remcsum *grc)
+{
+ void *ptr;
+ size_t plen = grc->offset + sizeof(u16);
+
+ if (!grc->delta)
+ return;
+
+ ptr = skb_gro_header(skb, plen, grc->offset);
+ if (!ptr)
+ return;
+
+ remcsum_unadjust((__sum16 *)ptr, grc->delta);
+}
+
+#ifdef CONFIG_XFRM_OFFLOAD
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS)
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS) {
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+ }
+}
+#else
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+}
+#endif
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
+
+#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
+})
+
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct udphdr *uh, struct sock *sk);
+int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
+
+static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
+{
+ struct udphdr *uh;
+ unsigned int hlen, off;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*uh);
+ uh = skb_gro_header(skb, hlen, off);
+
+ return uh;
+}
+
+static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct ipv6hdr *iph = skb_gro_network_header(skb);
+
+ return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ skb_gro_len(skb), proto, 0));
+}
+
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static inline void gro_normal_list(struct napi_struct *napi)
+{
+ if (!napi->rx_count)
+ return;
+ netif_receive_skb_list_internal(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
+{
+ list_add_tail(&skb->list, &napi->rx_list);
+ napi->rx_count += segs;
+ if (napi->rx_count >= READ_ONCE(gro_normal_batch))
+ gro_normal_list(napi);
+}
+
+
+#endif /* _NET_IPV6_GRO_H */
diff --git a/include/net/gtp.h b/include/net/gtp.h
index 0e16ebb2a82d..2a503f035d18 100644
--- a/include/net/gtp.h
+++ b/include/net/gtp.h
@@ -2,13 +2,22 @@
#ifndef _GTP_H_
#define _GTP_H_
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/rtnetlink.h>
+
/* General GTP protocol related definitions. */
#define GTP0_PORT 3386
#define GTP1U_PORT 2152
+/* GTP messages types */
+#define GTP_ECHO_REQ 1 /* Echo Request */
+#define GTP_ECHO_RSP 2 /* Echo Response */
#define GTP_TPDU 255
+#define GTPIE_RECOVERY 14
+
struct gtp0_header { /* According to GSM TS 09.60. */
__u8 flags;
__u8 type;
@@ -27,6 +36,43 @@ struct gtp1_header { /* According to 3GPP TS 29.060. */
__be32 tid;
} __attribute__ ((packed));
+struct gtp1_header_long { /* According to 3GPP TS 29.060. */
+ __u8 flags;
+ __u8 type;
+ __be16 length;
+ __be32 tid;
+ __be16 seq;
+ __u8 npdu;
+ __u8 next;
+} __packed;
+
+/* GTP Information Element */
+struct gtp_ie {
+ __u8 tag;
+ __u8 val;
+} __packed;
+
+struct gtp0_packet {
+ struct gtp0_header gtp0_h;
+ struct gtp_ie ie;
+} __packed;
+
+struct gtp1u_packet {
+ struct gtp1_header_long gtp1u_h;
+ struct gtp_ie ie;
+} __packed;
+
+struct gtp_pdu_session_info { /* According to 3GPP TS 38.415. */
+ u8 pdu_type;
+ u8 qfi;
+};
+
+static inline bool netif_is_gtp(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "gtp");
+}
+
#define GTP1_F_NPDU 0x01
#define GTP1_F_SEQ 0x02
#define GTP1_F_EXTHDR 0x04
diff --git a/include/net/gue.h b/include/net/gue.h
index 3a6595bfa641..dfca298bec9c 100644
--- a/include/net/gue.h
+++ b/include/net/gue.h
@@ -21,7 +21,7 @@
* | |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * C bit indicates contol message when set, data message when unset.
+ * C bit indicates control message when set, data message when unset.
* For a control message, proto/ctype is interpreted as a type of
* control message. For data messages, proto/ctype is the IP protocol
* of the next header.
@@ -30,6 +30,9 @@
* may refer to options placed after this field.
*/
+#include <asm/byteorder.h>
+#include <linux/types.h>
+
struct guehdr {
union {
struct {
diff --git a/include/net/hwbm.h b/include/net/hwbm.h
index c81444611a22..aa495decec35 100644
--- a/include/net/hwbm.h
+++ b/include/net/hwbm.h
@@ -2,6 +2,8 @@
#ifndef _HWBM_H
#define _HWBM_H
+#include <linux/mutex.h>
+
struct hwbm_pool {
/* Capacity of the pool */
int size;
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 9ac2d2672a93..caddf4a59ad1 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -46,12 +46,17 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
#if IS_ENABLED(CONFIG_NF_NAT)
void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
#else
-#define icmp_ndo_send icmp_send
+static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ struct ip_options opts = { 0 };
+ __icmp_send(skb_in, type, code, info, &opts);
+}
#endif
int icmp_rcv(struct sk_buff *skb);
int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
void icmp_out_count(struct net *net, unsigned char type);
+bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr);
#endif /* _ICMP_H */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 459d355f6506..598f53d2a3a0 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2017 Intel Deutschland GmbH
- * Copyright (c) 2018-2019 Intel Corporation
+ * Copyright (c) 2018-2019, 2021 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -43,6 +43,11 @@ struct ieee80211_radiotap_header {
* @it_present: (first) present word
*/
__le32 it_present;
+
+ /**
+ * @it_optional: all remaining presence bitmaps
+ */
+ __le32 it_optional[];
} __packed;
/* version is always 0 */
@@ -117,6 +122,8 @@ enum ieee80211_radiotap_tx_flags {
IEEE80211_RADIOTAP_F_TX_CTS = 0x0002,
IEEE80211_RADIOTAP_F_TX_RTS = 0x0004,
IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008,
+ IEEE80211_RADIOTAP_F_TX_NOSEQNO = 0x0010,
+ IEEE80211_RADIOTAP_F_TX_ORDER = 0x0020,
};
/* for IEEE80211_RADIOTAP_MCS "have" flags */
@@ -358,7 +365,7 @@ enum ieee80211_radiotap_zero_len_psdu_type {
*/
static inline u16 ieee80211_get_radiotap_len(const char *data)
{
- struct ieee80211_radiotap_header *hdr = (void *)data;
+ const struct ieee80211_radiotap_header *hdr = (const void *)data;
return get_unaligned_le16(&hdr->it_len);
}
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index d0d188c3294b..03b64bf876a4 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -15,6 +15,22 @@
#ifndef IEEE802154_NETDEVICE_H
#define IEEE802154_NETDEVICE_H
+#define IEEE802154_REQUIRED_SIZE(struct_type, member) \
+ (offsetof(typeof(struct_type), member) + \
+ sizeof(((typeof(struct_type) *)(NULL))->member))
+
+#define IEEE802154_ADDR_OFFSET \
+ offsetof(typeof(struct sockaddr_ieee802154), addr)
+
+#define IEEE802154_MIN_NAMELEN (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, addr_type))
+
+#define IEEE802154_NAMELEN_SHORT (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, short_addr))
+
+#define IEEE802154_NAMELEN_LONG (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, hwaddr))
+
#include <net/af_ieee802154.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
@@ -165,6 +181,33 @@ static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr)
memcpy(raw, &temp, IEEE802154_ADDR_LEN);
}
+static inline int
+ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len)
+{
+ struct ieee802154_addr_sa *sa;
+ int ret = 0;
+
+ sa = &daddr->addr;
+ if (len < IEEE802154_MIN_NAMELEN)
+ return -EINVAL;
+ switch (sa->addr_type) {
+ case IEEE802154_ADDR_NONE:
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (len < IEEE802154_NAMELEN_SHORT)
+ ret = -EINVAL;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (len < IEEE802154_NAMELEN_LONG)
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
const struct ieee802154_addr_sa *sa)
{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index a01981d7108f..c8490729b4ae 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -64,6 +64,14 @@ struct inet6_ifaddr {
struct hlist_node addr_lst;
struct list_head if_list;
+ /*
+ * Used to safely traverse idev->addr_list in process context
+ * if the idev->lock needed to protect idev->addr_list cannot be held.
+ * In that case, add the items to this list temporarily and iterate
+ * without holding idev->lock.
+ * See addrconf_ifdown and dev_forward_change.
+ */
+ struct list_head if_list_aux;
struct list_head tmp_list;
struct inet6_ifaddr *ifpub;
@@ -71,6 +79,8 @@ struct inet6_ifaddr {
bool tokenized;
+ u8 ifa_proto;
+
struct rcu_head rcu;
struct in6_addr peer_addr;
};
@@ -78,12 +88,10 @@ struct inet6_ifaddr {
struct ip6_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
- struct in6_addr sl_addr[0];
+ struct rcu_head rcu;
+ struct in6_addr sl_addr[];
};
-#define IP6_SFLSIZE(count) (sizeof(struct ip6_sf_socklist) + \
- (count) * sizeof(struct in6_addr))
-
#define IP6_SFBLOCK 10 /* allocate this many at once */
struct ipv6_mc_socklist {
@@ -91,18 +99,18 @@ struct ipv6_mc_socklist {
int ifindex;
unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
struct ipv6_mc_socklist __rcu *next;
- rwlock_t sflock;
- struct ip6_sf_socklist *sflist;
+ struct ip6_sf_socklist __rcu *sflist;
struct rcu_head rcu;
};
struct ip6_sf_list {
- struct ip6_sf_list *sf_next;
+ struct ip6_sf_list __rcu *sf_next;
struct in6_addr sf_addr;
unsigned long sf_count[2]; /* include/exclude counts */
unsigned char sf_gsresp; /* include in g & s response? */
unsigned char sf_oldin; /* change state */
unsigned char sf_crcount; /* retrans. left to send */
+ struct rcu_head rcu;
};
#define MAF_TIMER_RUNNING 0x01
@@ -114,19 +122,19 @@ struct ip6_sf_list {
struct ifmcaddr6 {
struct in6_addr mca_addr;
struct inet6_dev *idev;
- struct ifmcaddr6 *next;
- struct ip6_sf_list *mca_sources;
- struct ip6_sf_list *mca_tomb;
+ struct ifmcaddr6 __rcu *next;
+ struct ip6_sf_list __rcu *mca_sources;
+ struct ip6_sf_list __rcu *mca_tomb;
unsigned int mca_sfmode;
unsigned char mca_crcount;
unsigned long mca_sfcount[2];
- struct timer_list mca_timer;
+ struct delayed_work mca_work;
unsigned int mca_flags;
int mca_users;
refcount_t mca_refcnt;
- spinlock_t mca_lock;
unsigned long mca_cstamp;
unsigned long mca_tstamp;
+ struct rcu_head rcu;
};
/* Anycast stuff */
@@ -162,12 +170,12 @@ struct ipv6_devstat {
struct inet6_dev {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
struct list_head addr_list;
- struct ifmcaddr6 *mc_list;
- struct ifmcaddr6 *mc_tomb;
- spinlock_t mc_lock;
+ struct ifmcaddr6 __rcu *mc_list;
+ struct ifmcaddr6 __rcu *mc_tomb;
unsigned char mc_qrv; /* Query Robustness Variable */
unsigned char mc_gq_running;
@@ -179,9 +187,18 @@ struct inet6_dev {
unsigned long mc_qri; /* Query Response Interval */
unsigned long mc_maxdelay;
- struct timer_list mc_gq_timer; /* general query timer */
- struct timer_list mc_ifc_timer; /* interface change timer */
- struct timer_list mc_dad_timer; /* dad complete mc timer */
+ struct delayed_work mc_gq_work; /* general query work */
+ struct delayed_work mc_ifc_work; /* interface change work */
+ struct delayed_work mc_dad_work; /* dad complete mc work */
+ struct delayed_work mc_query_work; /* mld query work */
+ struct delayed_work mc_report_work; /* mld report work */
+
+ struct sk_buff_head mc_query_queue; /* mld query queue */
+ struct sk_buff_head mc_report_queue; /* mld report queue */
+
+ spinlock_t mc_query_lock; /* mld query queue lock */
+ spinlock_t mc_report_lock; /* mld query report lock */
+ struct mutex mc_lock; /* mld global lock */
struct ifacaddr6 *ac_list;
rwlock_t lock;
@@ -190,7 +207,6 @@ struct inet6_dev {
int dead;
u32 desync_factor;
- u8 rndid[8];
struct list_head tempaddr_list;
struct in6_addr token;
@@ -205,6 +221,8 @@ struct inet6_dev {
unsigned long tstamp; /* ipv6InterfaceTable update timestamp */
struct rcu_head rcu;
+
+ unsigned int ra_mtu;
};
static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf)
diff --git a/include/net/ila.h b/include/net/ila.h
index f98dcd5791b0..73ebe5eab272 100644
--- a/include/net/ila.h
+++ b/include/net/ila.h
@@ -8,6 +8,8 @@
#ifndef _NET_ILA_H
#define _NET_ILA_H
+struct sk_buff;
+
int ila_xlat_outgoing(struct sk_buff *skb);
int ila_xlat_incoming(struct sk_buff *skb);
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 7392f959a405..025bd8d3c769 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -11,6 +11,8 @@
#include <linux/types.h>
+struct flowi;
+struct flowi6;
struct request_sock;
struct sk_buff;
struct sock;
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index fe96bf247aac..56f1286583d3 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -85,9 +85,8 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
int iif, int sdif,
bool *refcounted)
{
- struct sock *sk = skb_steal_sock(skb);
+ struct sock *sk = skb_steal_sock(skb, refcounted);
- *refcounted = true;
if (sk)
return sk;
@@ -104,15 +103,24 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
const int dif);
int inet6_hash(struct sock *sk);
-#endif /* IS_ENABLED(CONFIG_IPV6) */
-#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif, __sdif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_family == AF_INET6) && \
- ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
- ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
- (((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
- net_eq(sock_net(__sk), (__net)))
+static inline bool inet6_match(struct net *net, const struct sock *sk,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ const __portpair ports,
+ const int dif, const int sdif)
+{
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_family != AF_INET6 ||
+ sk->sk_portpair != ports ||
+ !ipv6_addr_equal(&sk->sk_v6_daddr, saddr) ||
+ !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return false;
+
+ /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
+ return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
+ sdif);
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index ae2ba897675c..cec453c18f1d 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -3,6 +3,10 @@
#define _INET_COMMON_H
#include <linux/indirect_call_wrapper.h>
+#include <linux/net.h>
+#include <linux/netdev_features.h>
+#include <linux/types.h>
+#include <net/sock.h>
extern const struct proto_ops inet_stream_ops;
extern const struct proto_ops inet_dgram_ops;
@@ -12,6 +16,8 @@ extern const struct proto_ops inet_dgram_ops;
*/
struct msghdr;
+struct net;
+struct page;
struct sock;
struct sockaddr;
struct socket;
@@ -35,8 +41,16 @@ int inet_shutdown(struct socket *sock, int how);
int inet_listen(struct socket *sock, int backlog);
void inet_sock_destruct(struct sock *sk);
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+/* Don't allocate port at this moment, defer to connect. */
+#define BIND_FORCE_ADDRESS_NO_PORT (1 << 0)
+/* Grab and release socket lock. */
+#define BIND_WITH_LOCK (1 << 1)
+/* Called from BPF program. */
+#define BIND_FROM_BPF (1 << 2)
+/* Skip CAP_NET_BIND_SERVICE check. */
+#define BIND_NO_CAP_NET_BIND_SERVICE (1 << 3)
int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
- bool force_bind_address_no_port, bool with_lock);
+ u32 flags);
int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 895546058a20..c2b15f7e5516 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -16,6 +16,7 @@
#include <linux/timer.h>
#include <linux/poll.h>
#include <linux/kernel.h>
+#include <linux/sockptr.h>
#include <net/inet_sock.h>
#include <net/request_sock.h>
@@ -24,6 +25,7 @@
#undef INET_CSK_CLEAR_TIMERS
struct inet_bind_bucket;
+struct inet_bind2_bucket;
struct tcp_congestion_ops;
/*
@@ -45,17 +47,9 @@ struct inet_connection_sock_af_ops {
u16 net_frag_header_len;
u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+ sockptr_t optval, unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-#ifdef CONFIG_COMPAT
- int (*compat_setsockopt)(struct sock *sk,
- int level, int optname,
- char __user *optval, unsigned int optlen);
- int (*compat_getsockopt)(struct sock *sk,
- int level, int optname,
- char __user *optval, int __user *optlen);
-#endif
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
void (*mtu_reduced)(struct sock *sk);
};
@@ -64,6 +58,7 @@ struct inet_connection_sock_af_ops {
*
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
+ * @icsk_bind2_hash: Bind node in the bhash2 table
* @icsk_timeout: Timeout
* @icsk_retransmit_timer: Resend (no ack)
* @icsk_rto: Retransmit timeout
@@ -73,7 +68,6 @@ struct inet_connection_sock_af_ops {
* @icsk_ulp_ops Pluggable ULP control hook
* @icsk_ulp_data ULP private data
* @icsk_clean_acked Clean acked data hook
- * @icsk_listen_portaddr_node hash to the portaddr listener hashtable
* @icsk_ca_state: Congestion control state
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
@@ -83,25 +77,30 @@ struct inet_connection_sock_af_ops {
* @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options)
* @icsk_ack: Delayed ACK control data
* @icsk_mtup; MTU probing control data
+ * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack)
+ * @icsk_user_timeout: TCP_USER_TIMEOUT value
*/
struct inet_connection_sock {
/* inet_sock has to be the first member! */
struct inet_sock icsk_inet;
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
+ struct inet_bind2_bucket *icsk_bind2_hash;
unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
+ __u32 icsk_rto_min;
+ __u32 icsk_delack_max;
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
const struct tcp_ulp_ops *icsk_ulp_ops;
void __rcu *icsk_ulp_data;
void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
- struct hlist_node icsk_listen_portaddr_node;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
- __u8 icsk_ca_state:6,
+ __u8 icsk_ca_state:5,
+ icsk_ca_initialized:1,
icsk_ca_setsockopt:1,
icsk_ca_dst_locked:1;
__u8 icsk_retransmits;
@@ -114,7 +113,7 @@ struct inet_connection_sock {
__u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */
- __u8 blocked; /* Delayed ACK was blocked by socket lock */
+ __u8 retry; /* Number of attempts */
__u32 ato; /* Predicted tick of soft clock */
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */
@@ -122,27 +121,27 @@ struct inet_connection_sock {
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
} icsk_ack;
struct {
- int enabled;
-
/* Range of MTUs to search */
int search_high;
int search_low;
/* Information on the current probe. */
- int probe_size;
+ u32 probe_size:31,
+ /* Is the MTUP feature enabled for this connection? */
+ enabled:1;
u32 probe_timestamp;
} icsk_mtup;
+ u32 icsk_probes_tstamp;
u32 icsk_user_timeout;
u64 icsk_ca_priv[104 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64))
+#define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv)
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
#define ICSK_TIME_DACK 2 /* Delayed ack timer */
#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
-#define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */
#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */
#define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */
@@ -202,7 +201,8 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
#endif
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
+ icsk->icsk_ack.pending = 0;
+ icsk->icsk_ack.retry = 0;
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer);
#endif
@@ -227,8 +227,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
}
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
- what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE ||
- what == ICSK_TIME_REO_TIMEOUT) {
+ what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
icsk->icsk_pending = what;
icsk->icsk_timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
@@ -284,9 +283,24 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
}
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
+static inline unsigned long
+reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
+{
+ u64 timeout = (u64)req->timeout << req->num_timeout;
+
+ return (unsigned long)min_t(u64, timeout, max_timeout);
+}
+
+static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
+{
+ /* The below has to be done to allow calling inet_csk_destroy_sock */
+ sock_set_flag(sk, SOCK_DEAD);
+ this_cpu_inc(*sk->sk_prot->orphan_count);
+}
+
void inet_csk_destroy_sock(struct sock *sk);
void inet_csk_prepare_forced_close(struct sock *sk);
@@ -299,19 +313,18 @@ static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
(EPOLLIN | EPOLLRDNORM) : 0;
}
-int inet_csk_listen_start(struct sock *sk, int backlog);
+int inet_csk_listen_start(struct sock *sk);
void inet_csk_listen_stop(struct sock *sk);
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+/* update the fast reuse flag when adding a socket */
+void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
+ struct sock *sk);
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
-#define TCP_PINGPONG_THRESH 3
+#define TCP_PINGPONG_THRESH 1
static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
{
@@ -328,11 +341,9 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
}
-static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
+static inline bool inet_csk_has_ulp(struct sock *sk)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
-
- if (icsk->icsk_ack.pingpong < U8_MAX)
- icsk->icsk_ack.pingpong++;
+ return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
}
+
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_dscp.h b/include/net/inet_dscp.h
new file mode 100644
index 000000000000..72f250dffada
--- /dev/null
+++ b/include/net/inet_dscp.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * inet_dscp.h: helpers for handling differentiated services codepoints (DSCP)
+ *
+ * DSCP is defined in RFC 2474:
+ *
+ * 0 1 2 3 4 5 6 7
+ * +---+---+---+---+---+---+---+---+
+ * | DSCP | CU |
+ * +---+---+---+---+---+---+---+---+
+ *
+ * DSCP: differentiated services codepoint
+ * CU: currently unused
+ *
+ * The whole DSCP + CU bits form the DS field.
+ * The DS field is also commonly called TOS or Traffic Class (for IPv6).
+ *
+ * Note: the CU bits are now used for Explicit Congestion Notification
+ * (RFC 3168).
+ */
+
+#ifndef _INET_DSCP_H
+#define _INET_DSCP_H
+
+#include <linux/types.h>
+
+/* Special type for storing DSCP values.
+ *
+ * A dscp_t variable stores a DS field with the CU (ECN) bits cleared.
+ * Using dscp_t allows to strictly separate DSCP and ECN bits, thus avoiding
+ * bugs where ECN bits are erroneously taken into account during FIB lookups
+ * or policy routing.
+ *
+ * Note: to get the real DSCP value contained in a dscp_t variable one would
+ * have to do a bit shift after calling inet_dscp_to_dsfield(). We could have
+ * a helper for that, but there's currently no users.
+ */
+typedef u8 __bitwise dscp_t;
+
+#define INET_DSCP_MASK 0xfc
+
+static inline dscp_t inet_dsfield_to_dscp(__u8 dsfield)
+{
+ return (__force dscp_t)(dsfield & INET_DSCP_MASK);
+}
+
+static inline __u8 inet_dscp_to_dsfield(dscp_t dscp)
+{
+ return (__force __u8)dscp;
+}
+
+static inline bool inet_validate_dscp(__u8 val)
+{
+ return !(val & ~INET_DSCP_MASK);
+}
+
+#endif /* _INET_DSCP_H */
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index c8e2bebd8d93..ea32393464a2 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -4,9 +4,11 @@
#include <linux/ip.h>
#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
#include <net/inet_sock.h>
#include <net/dsfield.h>
+#include <net/checksum.h>
enum {
INET_ECN_NOT_ECT = 0,
@@ -74,8 +76,8 @@ static inline void INET_ECN_dontxmit(struct sock *sk)
static inline int IP_ECN_set_ce(struct iphdr *iph)
{
- u32 check = (__force u32)iph->check;
u32 ecn = (iph->tos + 1) & INET_ECN_MASK;
+ __be16 check_add;
/*
* After the last operation we have (in binary):
@@ -92,13 +94,24 @@ static inline int IP_ECN_set_ce(struct iphdr *iph)
* INET_ECN_ECT_1 => check += htons(0xFFFD)
* INET_ECN_ECT_0 => check += htons(0xFFFE)
*/
- check += (__force u16)htons(0xFFFB) + (__force u16)htons(ecn);
+ check_add = (__force __be16)((__force u16)htons(0xFFFB) +
+ (__force u16)htons(ecn));
- iph->check = (__force __sum16)(check + (check>=0xFFFF));
+ iph->check = csum16_add(iph->check, check_add);
iph->tos |= INET_ECN_CE;
return 1;
}
+static inline int IP_ECN_set_ect1(struct iphdr *iph)
+{
+ if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
+ return 0;
+
+ iph->check = csum16_add(iph->check, htons(0x1));
+ iph->tos ^= INET_ECN_MASK;
+ return 1;
+}
+
static inline void IP_ECN_clear(struct iphdr *iph)
{
iph->tos &= ~INET_ECN_MASK;
@@ -134,6 +147,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
return 1;
}
+static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
+{
+ __be32 from, to;
+
+ if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
+ return 0;
+
+ from = *(__be32 *)iph;
+ to = from ^ htonl(INET_ECN_MASK << 20);
+ *(__be32 *)iph = to;
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
+ (__force __wsum)to);
+ return 1;
+}
+
static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
{
dscp &= ~INET_ECN_MASK;
@@ -142,7 +171,7 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
static inline int INET_ECN_set_ce(struct sk_buff *skb)
{
- switch (skb->protocol) {
+ switch (skb_protocol(skb, true)) {
case cpu_to_be16(ETH_P_IP):
if (skb_network_header(skb) + sizeof(struct iphdr) <=
skb_tail_pointer(skb))
@@ -159,6 +188,42 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
return 0;
}
+static inline int skb_get_dsfield(struct sk_buff *skb)
+{
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ break;
+ return ipv4_get_dsfield(ip_hdr(skb));
+
+ case cpu_to_be16(ETH_P_IPV6):
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ break;
+ return ipv6_get_dsfield(ipv6_hdr(skb));
+ }
+
+ return -1;
+}
+
+static inline int INET_ECN_set_ect1(struct sk_buff *skb)
+{
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
+ if (skb_network_header(skb) + sizeof(struct iphdr) <=
+ skb_tail_pointer(skb))
+ return IP_ECN_set_ect1(ip_hdr(skb));
+ break;
+
+ case cpu_to_be16(ETH_P_IPV6):
+ if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
+ skb_tail_pointer(skb))
+ return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
+ break;
+ }
+
+ return 0;
+}
+
/*
* RFC 6040 4.2
* To decapsulate the inner header at the tunnel egress, a compliant
@@ -208,8 +273,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
int rc;
rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
- if (!rc && set_ce)
- INET_ECN_set_ce(skb);
+ if (!rc) {
+ if (set_ce)
+ INET_ECN_set_ce(skb);
+ else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
+ INET_ECN_set_ect1(skb);
+ }
return rc;
}
@@ -219,12 +288,16 @@ static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
{
__u8 inner;
- if (skb->protocol == htons(ETH_P_IP))
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
inner = ip_hdr(skb)->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ break;
+ case htons(ETH_P_IPV6):
inner = ipv6_get_dsfield(ipv6_hdr(skb));
- else
+ break;
+ default:
return 0;
+ }
return INET_ECN_decapsulate(skb, oiph->tos, inner);
}
@@ -234,12 +307,16 @@ static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
{
__u8 inner;
- if (skb->protocol == htons(ETH_P_IP))
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
inner = ip_hdr(skb)->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ break;
+ case htons(ETH_P_IPV6):
inner = ipv6_get_dsfield(ipv6_hdr(skb));
- else
+ break;
+ default:
return 0;
+ }
return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
}
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index bac79e817776..0b0876610553 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -4,6 +4,9 @@
#include <linux/rhashtable-types.h>
#include <linux/completion.h>
+#include <linux/in6.h>
+#include <linux/rbtree_types.h>
+#include <linux/refcount.h>
/* Per netns frag queues directory */
struct fqdir {
@@ -21,6 +24,7 @@ struct fqdir {
/* Keep atomic mem on separate cachelines in structs that include it */
atomic_long_t mem ____cacheline_aligned_in_smp;
struct work_struct destroy_work;
+ struct llist_node free_list;
};
/**
@@ -69,6 +73,7 @@ struct frag_v6_compare_key {
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
+ * @mono_delivery_time: stamp has a mono delivery time (EDT)
* @flags: fragment queue flags
* @max_size: maximum received fragment size
* @fqdir: pointer to struct fqdir
@@ -89,6 +94,7 @@ struct inet_frag_queue {
ktime_t stamp;
int len;
int meat;
+ u8 mono_delivery_time;
__u8 flags;
u16 max_size;
struct fqdir *fqdir;
@@ -116,8 +122,15 @@ int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
static inline void fqdir_pre_exit(struct fqdir *fqdir)
{
- fqdir->high_thresh = 0; /* prevent creation of new frags */
- fqdir->dead = true;
+ /* Prevent creation of new frags.
+ * Pairs with READ_ONCE() in inet_frag_find().
+ */
+ WRITE_ONCE(fqdir->high_thresh, 0);
+
+ /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
+ * and ip6frag_expire_frag_queue().
+ */
+ WRITE_ONCE(fqdir->dead, true);
}
void fqdir_exit(struct fqdir *fqdir);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index d0019d3395cf..3af1e927247d 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -23,6 +23,7 @@
#include <net/inet_connection_sock.h>
#include <net/inet_sock.h>
+#include <net/ip.h>
#include <net/sock.h>
#include <net/route.h>
#include <net/tcp_states.h>
@@ -90,7 +91,31 @@ struct inet_bind_bucket {
struct hlist_head owners;
};
-static inline struct net *ib_net(struct inet_bind_bucket *ib)
+struct inet_bind2_bucket {
+ possible_net_t ib_net;
+ int l3mdev;
+ unsigned short port;
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned short family;
+#endif
+ union {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr v6_rcv_saddr;
+#endif
+ __be32 rcv_saddr;
+ };
+ /* Node in the bhash2 inet_bind_hashbucket chain */
+ struct hlist_node node;
+ /* List of sockets hashed to this bucket */
+ struct hlist_head owners;
+};
+
+static inline struct net *ib_net(const struct inet_bind_bucket *ib)
+{
+ return read_pnet(&ib->ib_net);
+}
+
+static inline struct net *ib2_net(const struct inet_bind2_bucket *ib)
{
return read_pnet(&ib->ib_net);
}
@@ -111,11 +136,7 @@ struct inet_bind_hashbucket {
#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
- unsigned int count;
- union {
- struct hlist_head head;
- struct hlist_nulls_head nulls_head;
- };
+ struct hlist_nulls_head nulls_head;
};
/* This is for listening sockets, thus all sockets which possess wildcards. */
@@ -137,31 +158,32 @@ struct inet_hashinfo {
* TCP hash as well as the others for fast bind/connect.
*/
struct kmem_cache *bind_bucket_cachep;
+ /* This bind table is hashed by local port */
struct inet_bind_hashbucket *bhash;
+ struct kmem_cache *bind2_bucket_cachep;
+ /* This bind table is hashed by local port and sk->sk_rcv_saddr (ipv4)
+ * or sk->sk_v6_rcv_saddr (ipv6). This 2nd bind table is used
+ * primarily for expediting bind conflict resolution.
+ */
+ struct inet_bind_hashbucket *bhash2;
unsigned int bhash_size;
/* The 2nd listener table hashed by local port and address */
unsigned int lhash2_mask;
struct inet_listen_hashbucket *lhash2;
- /* All the above members are written once at bootup and
- * never written again _or_ are predominantly read-access.
- *
- * Now align to a new cache line as all the following members
- * might be often dirty.
- */
- /* All sockets in TCP_LISTEN state will be in listening_hash.
- * This is the only table where wildcard'd TCP sockets can
- * exist. listening_hash is only hashed by local port number.
- * If lhash2 is initialized, the same socket will also be hashed
- * to lhash2 by port and address.
- */
- struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
- ____cacheline_aligned_in_smp;
+ bool pernet;
};
-#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
- hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
+static inline struct inet_hashinfo *tcp_or_dccp_get_hashinfo(const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IP_DCCP)
+ return sk->sk_prot->h.hashinfo ? :
+ sock_net(sk)->ipv4.tcp_death_row.hashinfo;
+#else
+ return sock_net(sk)->ipv4.tcp_death_row.hashinfo;
+#endif
+}
static inline struct inet_listen_hashbucket *
inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
@@ -185,22 +207,21 @@ static inline spinlock_t *inet_ehash_lockp(
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
+static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
+{
+ kfree(h->lhash2);
+ h->lhash2 = NULL;
+}
+
static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
kvfree(hashinfo->ehash_locks);
hashinfo->ehash_locks = NULL;
}
-static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
- int dif, int sdif)
-{
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
- bound_dev_if, dif, sdif);
-#else
- return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
-#endif
-}
+struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
+ unsigned int ehash_entries);
+void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo);
struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
@@ -209,40 +230,76 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
+bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
+ const struct net *net, unsigned short port,
+ int l3mdev);
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
+ struct inet_bind_hashbucket *head,
+ unsigned short port, int l3mdev,
+ const struct sock *sk);
+
+void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind2_bucket *tb);
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_find(const struct inet_bind_hashbucket *head,
+ const struct net *net,
+ unsigned short port, int l3mdev,
+ const struct sock *sk);
+
+bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb,
+ const struct net *net, unsigned short port,
+ int l3mdev, const struct sock *sk);
+
static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
const u32 bhash_size)
{
return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
-void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum);
-
-/* These can have wildcards, don't try too hard. */
-static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
+static inline struct inet_bind_hashbucket *
+inet_bhashfn_portaddr(const struct inet_hashinfo *hinfo, const struct sock *sk,
+ const struct net *net, unsigned short port)
{
- return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
-}
+ u32 hash;
-static inline int inet_sk_listen_hashfn(const struct sock *sk)
-{
- return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ hash = ipv6_portaddr_hash(net, &sk->sk_v6_rcv_saddr, port);
+ else
+#endif
+ hash = ipv4_portaddr_hash(net, sk->sk_rcv_saddr, port);
+ return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
}
+struct inet_bind_hashbucket *
+inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port);
+
+/* This should be called whenever a socket's sk_rcv_saddr (ipv4) or
+ * sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
+ * rcv_saddr field should already have been updated when this is called.
+ */
+int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk);
+
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ struct inet_bind2_bucket *tb2, unsigned short port);
+
/* Caller must disable local BH processing. */
int __inet_inherit_port(const struct sock *sk, struct sock *child);
void inet_put_port(struct sock *sk);
-void inet_hashinfo_init(struct inet_hashinfo *h);
void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
unsigned long numentries, int scale,
unsigned long low_limit,
unsigned long high_limit);
int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
-bool inet_ehash_insert(struct sock *sk, struct sock *osk);
-bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
+bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
+ bool *found_dup_sk);
int __inet_hash(struct sock *sk, struct sock *osk);
int inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
@@ -282,7 +339,6 @@ static inline struct sock *inet_lookup_listener(struct net *net,
((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
#endif
-#if (BITS_PER_LONG == 64)
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __addrpair __name = (__force __addrpair) ( \
@@ -294,24 +350,20 @@ static inline struct sock *inet_lookup_listener(struct net *net,
(((__force __u64)(__be32)(__daddr)) << 32) | \
((__force __u64)(__be32)(__saddr)))
#endif /* __BIG_ENDIAN */
-#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_addrpair == (__cookie)) && \
- (((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
- net_eq(sock_net(__sk), (__net)))
-#else /* 32-bit arch */
-#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
- const int __name __deprecated __attribute__((unused))
-#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_daddr == (__saddr)) && \
- ((__sk)->sk_rcv_saddr == (__daddr)) && \
- (((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
- net_eq(sock_net(__sk), (__net)))
-#endif /* 64-bit arch */
+static inline bool inet_match(struct net *net, const struct sock *sk,
+ const __addrpair cookie, const __portpair ports,
+ int dif, int sdif)
+{
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_portpair != ports ||
+ sk->sk_addrpair != cookie)
+ return false;
+
+ /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
+ return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
+ sdif);
+}
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM
@@ -379,10 +431,9 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
const int sdif,
bool *refcounted)
{
- struct sock *sk = skb_steal_sock(skb);
+ struct sock *sk = skb_steal_sock(skb, refcounted);
const struct iphdr *iph = ip_hdr(skb);
- *refcounted = true;
if (sk)
return sk;
@@ -413,7 +464,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
}
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk, u32 port_offset,
+ struct sock *sk, u64 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16,
struct inet_timewait_sock **));
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 34c4436fd18f..bf5654ce711e 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -52,7 +52,7 @@ struct ip_options {
unsigned char router_alert;
unsigned char cipso;
unsigned char __pad2;
- unsigned char __data[0];
+ unsigned char __data[];
};
struct ip_options_rcu {
@@ -107,7 +107,8 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
{
- if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
+ if (!sk->sk_mark &&
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
return skb->mark;
return sk->sk_mark;
@@ -116,14 +117,15 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
static inline int inet_request_bound_dev_if(const struct sock *sk,
struct sk_buff *skb)
{
+ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
#ifdef CONFIG_NET_L3_MASTER_DEV
struct net *net = sock_net(sk);
- if (!sk->sk_bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept)
+ if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
return l3mdev_master_ifindex_by_index(net, skb->skb_iif);
#endif
- return sk->sk_bound_dev_if;
+ return bound_dev_if;
}
static inline int inet_sk_bound_l3mdev(const struct sock *sk)
@@ -131,7 +133,7 @@ static inline int inet_sk_bound_l3mdev(const struct sock *sk)
#ifdef CONFIG_NET_L3_MASTER_DEV
struct net *net = sock_net(sk);
- if (!net->ipv4.sysctl_tcp_l3mdev_accept)
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
return l3mdev_master_ifindex_by_index(net,
sk->sk_bound_dev_if);
#endif
@@ -147,6 +149,17 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
return bound_dev_if == dif || bound_dev_if == sdif;
}
+static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept),
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
struct inet_cork {
unsigned int flags;
__be32 addr;
@@ -207,11 +220,10 @@ struct inet_sock {
__be32 inet_saddr;
__s16 uc_ttl;
__u16 cmsg_flags;
+ struct ip_options_rcu __rcu *inet_opt;
__be16 inet_sport;
__u16 inet_id;
- struct ip_options_rcu __rcu *inet_opt;
- int rx_dst_ifindex;
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
@@ -225,6 +237,7 @@ struct inet_sock {
mc_all:1,
nodefrag:1;
__u8 bind_address_no_port:1,
+ recverr_rfc4884:1,
defer_connect:1; /* Indicates that fastopen_connect is set
* and cookie exists so we defer connect
* until first data frame is written
@@ -252,6 +265,11 @@ struct inet_sock {
#define IP_CMSG_CHECKSUM BIT(7)
#define IP_CMSG_RECVFRAGSIZE BIT(8)
+static inline bool sk_is_inet(struct sock *sk)
+{
+ return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
+}
+
/**
* sk_to_full_sk - Access to a full socket
* @sk: pointer to a socket
@@ -295,13 +313,6 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->sk_prot->obj_size - ancestor_size);
}
-#if !(IS_ENABLED(CONFIG_IPV6))
-static inline void inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from)
-{
- __inet_sk_copy_descendant(sk_to, sk_from, sizeof(struct inet_sock));
-}
-#endif
int inet_sk_rebuild_header(struct sock *sk);
@@ -375,8 +386,20 @@ static inline bool inet_get_convert_csum(struct sock *sk)
static inline bool inet_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
- return net->ipv4.sysctl_ip_nonlocal_bind ||
+ return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) ||
inet->freebind || inet->transparent;
}
+static inline bool inet_addr_valid_or_nonlocal(struct net *net,
+ struct inet_sock *inet,
+ __be32 addr,
+ int addr_type)
+{
+ return inet_can_nonlocal_bind(net, inet) ||
+ addr == htonl(INADDR_ANY) ||
+ addr_type == RTN_LOCAL ||
+ addr_type == RTN_MULTICAST ||
+ addr_type == RTN_BROADCAST;
+}
+
#endif /* _INET_SOCK_H */
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index dfd919b3119e..5b47545f22d3 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -65,10 +65,9 @@ struct inet_timewait_sock {
/* these three are in inet_sock */
__be16 tw_sport;
/* And these are ours. */
- unsigned int tw_kill : 1,
- tw_transparent : 1,
+ unsigned int tw_transparent : 1,
tw_flowlabel : 20,
- tw_pad : 2, /* 2 bits hole */
+ tw_pad : 3, /* 3 bits hole */
tw_tos : 8;
u32 tw_txhash;
u32 tw_priority;
diff --git a/include/net/ioam6.h b/include/net/ioam6.h
new file mode 100644
index 000000000000..781d2d8b2f29
--- /dev/null
+++ b/include/net/ioam6.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM implementation
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+
+#ifndef _NET_IOAM6_H
+#define _NET_IOAM6_H
+
+#include <linux/net.h>
+#include <linux/ipv6.h>
+#include <linux/ioam6.h>
+#include <linux/rhashtable-types.h>
+
+struct ioam6_namespace {
+ struct rhash_head head;
+ struct rcu_head rcu;
+
+ struct ioam6_schema __rcu *schema;
+
+ __be16 id;
+ __be32 data;
+ __be64 data_wide;
+};
+
+struct ioam6_schema {
+ struct rhash_head head;
+ struct rcu_head rcu;
+
+ struct ioam6_namespace __rcu *ns;
+
+ u32 id;
+ int len;
+ __be32 hdr;
+
+ u8 data[];
+};
+
+struct ioam6_pernet_data {
+ struct mutex lock;
+ struct rhashtable namespaces;
+ struct rhashtable schemas;
+};
+
+static inline struct ioam6_pernet_data *ioam6_pernet(struct net *net)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ return net->ipv6.ioam6_data;
+#else
+ return NULL;
+#endif
+}
+
+struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id);
+void ioam6_fill_trace_data(struct sk_buff *skb,
+ struct ioam6_namespace *ns,
+ struct ioam6_trace_hdr *trace,
+ bool is_input);
+
+int ioam6_init(void);
+void ioam6_exit(void);
+
+int ioam6_iptunnel_init(void);
+void ioam6_iptunnel_exit(void);
+
+#endif /* _NET_IOAM6_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 5b317c9f4470..038097c2a152 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -23,6 +23,8 @@
#include <linux/in.h>
#include <linux/skbuff.h>
#include <linux/jhash.h>
+#include <linux/sockptr.h>
+#include <linux/static_key.h>
#include <net/inet_sock.h>
#include <net/route.h>
@@ -30,6 +32,7 @@
#include <net/flow.h>
#include <net/flow_dissector.h>
#include <net/netns/hash.h>
+#include <net/lwtunnel.h>
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -53,6 +56,7 @@ struct inet_skb_parm {
#define IPSKB_DOREDIRECT BIT(5)
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_L3SLAVE BIT(7)
+#define IPSKB_NOPOLICY BIT(8)
u16 frag_max_size;
};
@@ -90,7 +94,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
ipcm->sockc.mark = inet->sk.sk_mark;
ipcm->sockc.tsflags = inet->sk.sk_tsflags;
- ipcm->oif = inet->sk.sk_bound_dev_if;
+ ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
}
@@ -98,7 +102,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
/* return enslaved device index if relevant */
-static inline int inet_sdif(struct sk_buff *skb)
+static inline int inet_sdif(const struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
@@ -150,7 +154,7 @@ int igmp_mc_init(void);
int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr,
- struct ip_options_rcu *opt);
+ struct ip_options_rcu *opt, u8 tos);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
@@ -231,11 +235,7 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rtp,
struct inet_cork *cork, unsigned int flags);
-static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
- struct flowi *fl)
-{
- return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
-}
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
@@ -293,7 +293,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
+static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+ return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
@@ -353,7 +357,7 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
{
- return port < net->ipv4.sysctl_ip_prot_sock;
+ return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
}
#else
@@ -380,7 +384,7 @@ void ipfrag_init(void);
void ip_static_sysctl_init(void);
#define IP4_REPLY_MARK(net, mark) \
- ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
+ (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
static inline bool ip_is_fragment(const struct iphdr *iph)
{
@@ -438,26 +442,49 @@ static inline bool ip_sk_ignore_df(const struct sock *sk)
static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
+ const struct rtable *rt = container_of(dst, struct rtable, dst);
struct net *net = dev_net(dst->dev);
+ unsigned int mtu;
- if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
+ if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
ip_mtu_locked(dst) ||
- !forwarding)
- return dst_mtu(dst);
+ !forwarding) {
+ mtu = rt->rt_pmtu;
+ if (mtu && time_before(jiffies, rt->dst.expires))
+ goto out;
+ }
+
+ /* 'forwarding = true' case should always honour route mtu */
+ mtu = dst_metric_raw(dst, RTAX_MTU);
+ if (mtu)
+ goto out;
+
+ mtu = READ_ONCE(dst->dev->mtu);
+
+ if (unlikely(ip_mtu_locked(dst))) {
+ if (rt->rt_uses_gateway && mtu > 576)
+ mtu = 576;
+ }
+
+out:
+ mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
- return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
const struct sk_buff *skb)
{
+ unsigned int mtu;
+
if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
}
- return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
}
struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
@@ -491,7 +518,6 @@ void ip_dst_metrics_put(struct dst_entry *dst)
kfree(p);
}
-u32 ip_idents_reserve(u32 hash, int segs);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
@@ -499,19 +525,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
{
struct iphdr *iph = ip_hdr(skb);
+ /* We had many attacks based on IPID, use the private
+ * generator as much as we can.
+ */
+ if (sk && inet_sk(sk)->inet_daddr) {
+ iph->id = htons(inet_sk(sk)->inet_id);
+ inet_sk(sk)->inet_id += segs;
+ return;
+ }
if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
- /* This is only to work around buggy Windows95/2000
- * VJ compression implementations. If the ID field
- * does not change, they drop every other packet in
- * a TCP stream using header compression.
- */
- if (sk && inet_sk(sk)->inet_daddr) {
- iph->id = htons(inet_sk(sk)->inet_id);
- inet_sk(sk)->inet_id += segs;
- } else {
- iph->id = 0;
- }
+ iph->id = 0;
} else {
+ /* Unfortunately we need the big hammer to get a suitable IPID */
__ip_select_ident(net, iph, segs);
}
}
@@ -542,14 +567,6 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
-static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
-{
- const struct iphdr *iph = skb_gro_network_header(skb);
-
- return csum_tcpudp_nofold(iph->saddr, iph->daddr,
- skb_gro_len(skb), proto, 0);
-}
-
/*
* Map a multicast IP onto multicast MAC for type ethernet.
*/
@@ -695,7 +712,7 @@ int ip_forward(struct sk_buff *skb);
*/
void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
- __be32 daddr, struct rtable *rt, int is_frag);
+ __be32 daddr, struct rtable *rt);
int __ip_options_echo(struct net *net, struct ip_options *dopt,
struct sk_buff *skb, const struct ip_options *sopt);
@@ -711,9 +728,7 @@ int __ip_options_compile(struct net *net, struct ip_options *opt,
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
int ip_options_get(struct net *net, struct ip_options_rcu **optp,
- unsigned char *data, int optlen);
-int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
- unsigned char __user *data, int optlen);
+ sockptr_t data, int optlen);
void ip_options_undo(struct ip_options *opt);
void ip_forward_options(struct sk_buff *skb);
int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
@@ -727,14 +742,15 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
-int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
+int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
+int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
+int do_ip_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int __user *optlen);
-int compat_ip_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-int compat_ip_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *));
@@ -765,4 +781,11 @@ static inline bool inetdev_valid_mtu(unsigned int mtu)
return likely(mtu >= IPV4_MIN_MTU);
}
+void ip_sock_set_freebind(struct sock *sk);
+int ip_sock_set_mtu_discover(struct sock *sk, int val);
+void ip_sock_set_pktinfo(struct sock *sk);
+void ip_sock_set_recverr(struct sock *sk);
+void ip_sock_set_tos(struct sock *sk, int val);
+void __ip_sock_set_tos(struct sock *sk, int val);
+
#endif /* _IP_H */
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 7bec95df4f80..c8a96b888277 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -43,14 +43,6 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0));
}
-static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
-{
- const struct ipv6hdr *iph = skb_gro_network_header(skb);
-
- return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
- skb_gro_len(skb), proto, 0));
-}
-
static __inline__ __sum16 tcp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
@@ -65,25 +57,19 @@ static inline void __tcp_v6_send_check(struct sk_buff *skb,
{
struct tcphdr *th = tcp_hdr(skb);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct tcphdr, check);
- } else {
- th->check = tcp_v6_check(skb->len, saddr, daddr,
- csum_partial(th, th->doff << 2,
- skb->csum));
- }
+ th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
}
-#if IS_ENABLED(CONFIG_IPV6)
-static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
+static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
- __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
+ ipv6h->payload_len = 0;
+ th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
}
-#endif
static inline __sum16 udp_v6_check(int len,
const struct in6_addr *saddr,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index fd60a8ac02ee..6268963d9599 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -19,6 +19,8 @@
#include <net/netlink.h>
#include <net/inetpeer.h>
#include <net/fib_notifier.h>
+#include <linux/indirect_call_wrapper.h>
+#include <uapi/linux/bpf.h>
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
#define FIB6_TABLE_HASHSZ 256
@@ -65,6 +67,7 @@ struct fib6_config {
struct nl_info fc_nlinfo;
struct nlattr *fc_encap;
u16 fc_encap_type;
+ bool fc_is_fdb;
};
struct fib6_node {
@@ -164,7 +167,7 @@ struct fib6_info {
struct fib6_node __rcu *fib6_node;
/* Multipath routes:
- * siblings is a list of fib6_info that have the the same metric/weight,
+ * siblings is a list of fib6_info that have the same metric/weight,
* destination, but not the same gateway. nsiblings is just a cache
* to speed up lookup.
*/
@@ -187,23 +190,26 @@ struct fib6_info {
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
+
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
+
u8 should_flush:1,
dst_nocount:1,
dst_nopolicy:1,
- dst_host:1,
fib6_destroying:1,
- offload:1,
- trap:1,
- unused:1;
+ unused:4;
struct rcu_head rcu;
struct nexthop *nh;
- struct fib6_nh fib6_nh[0];
+ struct fib6_nh fib6_nh[];
};
struct rt6_info {
struct dst_entry dst;
struct fib6_info __rcu *from;
+ int sernum;
struct rt6key rt6i_dst;
struct rt6key rt6i_src;
@@ -264,7 +270,7 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i)
return false;
}
-/* Function to safely get fn->sernum for passed in rt
+/* Function to safely get fn->fn_sernum for passed in rt
* and store result in passed in cookie.
* Return true if we can get cookie safely
* Return false if not
@@ -278,8 +284,8 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
fn = rcu_dereference(f6i->fib6_node);
if (fn) {
- *cookie = fn->fn_sernum;
- /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
+ *cookie = READ_ONCE(fn->fn_sernum);
+ /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */
smp_rmb();
status = true;
}
@@ -292,6 +298,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
struct fib6_info *from;
u32 cookie = 0;
+ if (rt->sernum)
+ return rt->sernum;
+
rcu_read_lock();
from = rcu_dereference(rt->from);
@@ -331,13 +340,6 @@ static inline void fib6_info_release(struct fib6_info *f6i)
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}
-static inline void fib6_info_hw_flags_set(struct fib6_info *f6i, bool offload,
- bool trap)
-{
- f6i->offload = offload;
- f6i->trap = trap;
-}
-
enum fib6_walk_state {
#ifdef CONFIG_IPV6_SUBTREES
FWS_S,
@@ -367,9 +369,8 @@ struct rt6_statistics {
__u32 fib_rt_cache; /* cached rt entries in exception table */
__u32 fib_discarded_routes; /* total number of routes delete */
- /* The following stats are not protected by any lock */
+ /* The following stat is not protected by any lock */
atomic_t fib_rt_alloc; /* total number of routes alloced */
- atomic_t fib_rt_uncache; /* rt entries in uncached list */
};
#define RTN_TL_ROOT 0x0001
@@ -486,6 +487,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void fib6_nh_release(struct fib6_nh *fib6_nh);
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
int call_fib6_entry_notifiers(struct net *net,
enum fib_event_type event_type,
@@ -540,6 +542,50 @@ static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric)
{
return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric));
}
+void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
+ bool offload, bool trap, bool offload_failed);
+
+#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
+struct bpf_iter__ipv6_route {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct fib6_info *, rt);
+};
+#endif
+
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_output(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_input(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *__ip6_route_redirect(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_lookup(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+static inline struct rt6_info *pol_lookup_func(pol_lookup_t lookup,
+ struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags)
+{
+ return INDIRECT_CALL_4(lookup,
+ ip6_pol_route_output,
+ ip6_pol_route_input,
+ ip6_pol_route_lookup,
+ __ip6_route_redirect,
+ net, table, fl6, skb, flags);
+}
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
static inline bool fib6_has_custom_rules(const struct net *net)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index b69c16cbbf71..035d61d50a98 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -2,6 +2,16 @@
#ifndef _NET_IP6_ROUTE_H
#define _NET_IP6_ROUTE_H
+#include <net/addrconf.h>
+#include <net/flow.h>
+#include <net/ip6_fib.h>
+#include <net/sock.h>
+#include <net/lwtunnel.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/route.h>
+#include <net/nexthop.h>
+
struct route_info {
__u8 type;
__u8 length;
@@ -16,19 +26,9 @@ struct route_info {
reserved_h:3;
#endif
__be32 lifetime;
- __u8 prefix[0]; /* 0,8 or 16 */
+ __u8 prefix[]; /* 0,8 or 16 */
};
-#include <net/addrconf.h>
-#include <net/flow.h>
-#include <net/ip6_fib.h>
-#include <net/sock.h>
-#include <net/lwtunnel.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/route.h>
-#include <net/nexthop.h>
-
#define RT6_LOOKUP_F_IFACE 0x00000001
#define RT6_LOOKUP_F_REACHABLE 0x00000002
#define RT6_LOOKUP_F_HAS_SADDR 0x00000004
@@ -118,12 +118,13 @@ void ip6_route_init_special_entries(void);
int ip6_route_init(void);
void ip6_route_cleanup(void);
-int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
+int ipv6_route_ioctl(struct net *net, unsigned int cmd,
+ struct in6_rtmsg *rtmsg);
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
int ip6_ins_rt(struct net *net, struct fib6_info *f6i);
-int ip6_del_rt(struct net *net, struct fib6_info *f6i);
+int ip6_del_rt(struct net *net, struct fib6_info *f6i, bool skip_notify);
void rt6_flush_exceptions(struct fib6_info *f6i);
void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
@@ -173,7 +174,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
struct net_device *dev);
struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
- struct net_device *dev, unsigned int pref);
+ struct net_device *dev, unsigned int pref,
+ u32 defrtr_usr_metric);
void rt6_purge_dflt_routers(struct net *net);
@@ -254,19 +256,27 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
return rt->rt6i_flags & RTF_ANYCAST ||
(rt->rt6i_dst.plen < 127 &&
+ !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
}
int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
-static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
{
- struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ const struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
+ const struct dst_entry *dst = skb_dst(skb);
+ unsigned int mtu;
- return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+ if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
+ mtu = READ_ONCE(dst->dev->mtu);
+ mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
+ } else {
+ mtu = dst_mtu(dst);
+ }
+ return mtu;
}
static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
@@ -306,15 +316,16 @@ static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *
!lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws);
}
-static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
+static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ bool forwarding)
{
struct inet6_dev *idev;
unsigned int mtu;
- if (dst_metric_locked(dst, RTAX_MTU)) {
+ if (!forwarding || dst_metric_locked(dst, RTAX_MTU)) {
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
- return mtu;
+ goto out;
}
mtu = IPV6_MIN_MTU;
@@ -324,7 +335,8 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
mtu = idev->cnf.mtu6;
rcu_read_unlock();
- return mtu;
+out:
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
u32 ip6_mtu_from_fib6(const struct fib6_result *res,
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 028eaea1c854..74b369bddf49 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -46,6 +46,7 @@ struct __ip6_tnl_parm {
struct ip6_tnl {
struct ip6_tnl __rcu *next; /* next tunnel in list */
struct net_device *dev; /* virtual device associated with tunnel */
+ netdevice_tracker dev_tracker;
struct net *net; /* netns for packet i/o */
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
@@ -57,7 +58,7 @@ struct ip6_tnl {
/* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
- __u32 o_seqno; /* The last output seqno */
+ atomic_t o_seqno; /* The last output seqno */
int hlen; /* tun_hlen + encap_hlen */
int tun_hlen; /* Precalculated header length */
int encap_hlen; /* Encap header length (FOU,GUE) */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 6a1ae49809de..a378eff827c7 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -17,6 +17,7 @@
#include <linux/rcupdate.h>
#include <net/fib_notifier.h>
#include <net/fib_rules.h>
+#include <net/inet_dscp.h>
#include <net/inetpeer.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
@@ -24,7 +25,7 @@
struct fib_config {
u8 fc_dst_len;
- u8 fc_tos;
+ dscp_t fc_dscp;
u8 fc_protocol;
u8 fc_scope;
u8 fc_type;
@@ -79,6 +80,7 @@ struct fnhe_hash_bucket {
struct fib_nh_common {
struct net_device *nhc_dev;
+ netdevice_tracker nhc_dev_tracker;
int nhc_oif;
unsigned char nhc_scope;
u8 nhc_family;
@@ -111,6 +113,7 @@ struct fib_nh {
int nh_saddr_genid;
#define fib_nh_family nh_common.nhc_family
#define fib_nh_dev nh_common.nhc_dev
+#define fib_nh_dev_tracker nh_common.nhc_dev_tracker
#define fib_nh_oif nh_common.nhc_oif
#define fib_nh_flags nh_common.nhc_flags
#define fib_nh_lws nh_common.nhc_lwtstate
@@ -133,7 +136,7 @@ struct fib_info {
struct hlist_node fib_lhash;
struct list_head nh_list;
struct net *fib_net;
- int fib_treeref;
+ refcount_t fib_treeref;
refcount_t fib_clntref;
unsigned int fib_flags;
unsigned char fib_dead;
@@ -153,7 +156,7 @@ struct fib_info {
bool nh_updated;
struct nexthop *nh;
struct rcu_head rcu;
- struct fib_nh fib_nh[0];
+ struct fib_nh fib_nh[];
};
@@ -209,11 +212,12 @@ struct fib_rt_info {
u32 tb_id;
__be32 dst;
int dst_len;
- u8 tos;
+ dscp_t dscp;
u8 type;
u8 offload:1,
trap:1,
- unused:6;
+ offload_failed:1,
+ unused:5;
};
struct fib_entry_notifier_info {
@@ -221,7 +225,7 @@ struct fib_entry_notifier_info {
u32 dst;
int dst_len;
struct fib_info *fi;
- u8 tos;
+ dscp_t dscp;
u8 type;
u32 tb_id;
};
@@ -250,14 +254,13 @@ struct fib_table {
int tb_num_default;
struct rcu_head rcu;
unsigned long *tb_data;
- unsigned long __data[0];
+ unsigned long __data[];
};
struct fib_dump_filter {
u32 table_id;
/* filter_set is an optimization that an entry is set */
bool filter_set;
- bool dump_all_families;
bool dump_routes;
bool dump_exceptions;
unsigned char protocol;
@@ -438,7 +441,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
#ifdef CONFIG_IP_ROUTE_CLASSID
static inline int fib_num_tclassid_users(struct net *net)
{
- return net->ipv4.fib_num_tclassid_users;
+ return atomic_read(&net->ipv4.fib_num_tclassid_users);
}
#else
static inline int fib_num_tclassid_users(struct net *net)
@@ -448,6 +451,16 @@ static inline int fib_num_tclassid_users(struct net *net)
#endif
int fib_unmerge(struct net *net);
+static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc,
+const struct net_device *dev)
+{
+ if (nhc->nhc_dev == dev ||
+ l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex)
+ return true;
+
+ return false;
+}
+
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
@@ -456,6 +469,49 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags);
void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig);
+/* Fields used for sysctl_fib_multipath_hash_fields.
+ * Common to IPv4 and IPv6.
+ *
+ * Add new fields at the end. This is user API.
+ */
+#define FIB_MULTIPATH_HASH_FIELD_SRC_IP BIT(0)
+#define FIB_MULTIPATH_HASH_FIELD_DST_IP BIT(1)
+#define FIB_MULTIPATH_HASH_FIELD_IP_PROTO BIT(2)
+#define FIB_MULTIPATH_HASH_FIELD_FLOWLABEL BIT(3)
+#define FIB_MULTIPATH_HASH_FIELD_SRC_PORT BIT(4)
+#define FIB_MULTIPATH_HASH_FIELD_DST_PORT BIT(5)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP BIT(6)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP BIT(7)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO BIT(8)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL BIT(9)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT BIT(10)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT BIT(11)
+
+#define FIB_MULTIPATH_HASH_FIELD_OUTER_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_IP_PROTO | \
+ FIB_MULTIPATH_HASH_FIELD_FLOWLABEL | \
+ FIB_MULTIPATH_HASH_FIELD_SRC_PORT | \
+ FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+
+#define FIB_MULTIPATH_HASH_FIELD_INNER_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
+
+#define FIB_MULTIPATH_HASH_FIELD_ALL_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_OUTER_MASK | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
+
+#define FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
const struct sk_buff *skb, struct flow_keys *flkeys);
@@ -470,8 +526,9 @@ int fib_nh_init(struct net *net, struct fib_nh *fib_nh,
struct fib_config *cfg, int nh_weight,
struct netlink_ext_ack *extack);
void fib_nh_release(struct net *net, struct fib_nh *fib_nh);
-int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *fc_encap,
- u16 fc_encap_type, void *cfg, gfp_t gfp_flags,
+int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc,
+ struct nlattr *fc_encap, u16 fc_encap_type,
+ void *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void fib_nh_common_release(struct fib_nh_common *nhc);
@@ -479,6 +536,8 @@ void fib_nh_common_release(struct fib_nh_common *nhc);
void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri);
void fib_trie_init(void);
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
+bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
+ const struct flowi4 *flp);
static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
@@ -541,5 +600,5 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
u8 rt_family, unsigned char *flags, bool skip_oif);
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
- int nh_weight, u8 rt_family);
+ int nh_weight, u8 rt_family, u32 nh_tclassid);
#endif /* _NET_FIB_H */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 236503a50759..fca357679816 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -54,6 +54,7 @@ struct ip_tunnel_key {
__be32 label; /* Flow Label for IPv6 */
__be16 tp_src;
__be16 tp_dst;
+ __u8 flow_flags;
};
/* Flags for ip_tunnel_info mode. */
@@ -104,7 +105,10 @@ struct metadata_dst;
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
+
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
struct net *net; /* netns for packet i/o */
unsigned long err_time; /* Time when the last ICMP error
@@ -113,7 +117,7 @@ struct ip_tunnel {
/* These four fields used only by GRE */
u32 i_seqno; /* The last seen seqno */
- u32 o_seqno; /* The last output seqno */
+ atomic_t o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
/* These four fields used only by ERSPAN */
@@ -240,11 +244,19 @@ static inline __be32 tunnel_id_to_key32(__be64 tun_id)
static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
int proto,
__be32 daddr, __be32 saddr,
- __be32 key, __u8 tos, int oif,
- __u32 mark, __u32 tun_inner_hash)
+ __be32 key, __u8 tos,
+ struct net *net, int oif,
+ __u32 mark, __u32 tun_inner_hash,
+ __u8 flow_flags)
{
memset(fl4, 0, sizeof(*fl4));
- fl4->flowi4_oif = oif;
+
+ if (oif) {
+ fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
+ /* Legacy VRF/l3mdev use case */
+ fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
+ }
+
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->flowi4_tos = tos;
@@ -252,6 +264,7 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
fl4->fl4_gre_key = key;
fl4->flowi4_mark = mark;
fl4->flowi4_multipath_hash = tun_inner_hash;
+ fl4->flowi4_flags = flow_flags;
}
int ip_tunnel_init(struct net_device *dev);
@@ -269,12 +282,12 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const u8 proto, int tunnel_hlen);
-int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
-void ip_tunnel_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *tot);
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
int link, __be16 flags,
__be32 remote, __be32 local,
@@ -289,6 +302,15 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
+bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
+ struct ip_tunnel_encap *encap);
+
+void ip_tunnel_netlink_parms(struct nlattr *data[],
+ struct ip_tunnel_parm *parms);
+
+extern const struct header_ops ip_tunnel_header_ops;
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
+
struct ip_tunnel_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
@@ -374,9 +396,11 @@ static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
const struct sk_buff *skb)
{
- if (skb->protocol == htons(ETH_P_IP))
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IP))
return iph->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (payload_protocol == htons(ETH_P_IPV6))
return ipv6_get_dsfield((const struct ipv6hdr *)iph);
else
return 0;
@@ -385,9 +409,11 @@ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
const struct sk_buff *skb)
{
- if (skb->protocol == htons(ETH_P_IP))
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IP))
return iph->ttl;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (payload_protocol == htons(ETH_P_IPV6))
return ((const struct ipv6hdr *)iph)->hop_limit;
else
return 0;
@@ -416,6 +442,8 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
u8 tos, u8 ttl, __be16 df, bool xnet);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
+int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
+ int headroom, bool reply);
int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
@@ -441,8 +469,8 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += pkt_len;
- tstats->tx_packets++;
+ u64_stats_add(&tstats->tx_bytes, pkt_len);
+ u64_stats_inc(&tstats->tx_packets);
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
} else {
@@ -472,9 +500,11 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len,
__be16 flags)
{
- memcpy(ip_tunnel_info_opts(info), from, len);
info->options_len = len;
- info->key.tun_flags |= flags;
+ if (len > 0) {
+ memcpy(ip_tunnel_info_opts(info), from, len);
+ info->key.tun_flags |= flags;
+ }
}
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
@@ -520,7 +550,6 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
__be16 flags)
{
info->options_len = 0;
- info->key.tun_flags |= flags;
}
#endif /* CONFIG_INET */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 83be2d93b407..ff1804a0c469 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -14,6 +14,7 @@
#include <linux/spinlock.h> /* for struct rwlock_t */
#include <linux/atomic.h> /* for struct atomic_t */
#include <linux/refcount.h> /* for struct refcount_t */
+#include <linux/workqueue.h>
#include <linux/compiler.h>
#include <linux/timer.h>
@@ -24,9 +25,6 @@
#include <linux/ip.h>
#include <linux/ipv6.h> /* for struct ipv6hdr */
#include <net/ipv6.h>
-#if IS_ENABLED(CONFIG_IP_VS_IPV6)
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#endif
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
@@ -874,6 +872,7 @@ struct netns_ipvs {
struct ip_vs_stats tot_stats; /* Statistics & est. */
int num_services; /* no of virtual services */
+ int num_services6; /* IPv6 virtual services */
/* Trash for destinations */
struct list_head dest_trash;
@@ -885,6 +884,8 @@ struct netns_ipvs {
atomic_t conn_out_counter;
#ifdef CONFIG_SYSCTL
+ /* delayed work for expiring no dest connections */
+ struct delayed_work expire_nodest_conn_work;
/* 1/rate drop and drop-entry variables */
struct delayed_work defense_work; /* Work handler */
int drop_rate;
@@ -930,6 +931,7 @@ struct netns_ipvs {
int sysctl_conn_reuse_mode;
int sysctl_schedule_icmp;
int sysctl_ignore_tunneled;
+ int sysctl_run_estimation;
/* ip_vs_lblc */
int sysctl_lblc_expiration;
@@ -960,6 +962,7 @@ struct netns_ipvs {
* are not supported when synchronization is enabled.
*/
unsigned int mixed_address_family_dests;
+ unsigned int hooks_afmask; /* &1=AF_INET, &2=AF_INET6 */
};
#define DEFAULT_SYNC_THRESHOLD 3
@@ -1049,6 +1052,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
return ipvs->sysctl_conn_reuse_mode;
}
+static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_expire_nodest_conn;
+}
+
static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
{
return ipvs->sysctl_schedule_icmp;
@@ -1064,6 +1072,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return ipvs->sysctl_cache_bypass;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_run_estimation;
+}
+
#else
static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1136,6 +1149,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
return 1;
}
+static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
{
return 0;
@@ -1151,6 +1169,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return 0;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
#endif
/* IPVS core functions
@@ -1505,6 +1528,22 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
#endif
+#ifdef CONFIG_SYSCTL
+/* Enqueue delayed work for expiring no dest connections
+ * Only run when sysctl_expire_nodest=1
+ */
+static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs)
+{
+ if (sysctl_expire_nodest_conn(ipvs))
+ queue_delayed_work(system_long_wq,
+ &ipvs->expire_nodest_conn_work, 1);
+}
+
+void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs);
+#else
+static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) {}
+#endif
+
#define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \
IP_VS_CONN_F_FWD_MASK)
@@ -1624,18 +1663,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
}
#endif /* CONFIG_IP_VS_NFCT */
-/* Really using conntrack? */
-static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
- struct sk_buff *skb)
+/* Using old conntrack that can not be redirected to another real server? */
+static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp,
+ struct sk_buff *skb)
{
#ifdef CONFIG_IP_VS_NFCT
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
- if (!(cp->flags & IP_VS_CONN_F_NFCT))
- return false;
ct = nf_ct_get(skb, &ctinfo);
- if (ct)
+ if (ct && nf_ct_is_confirmed(ct))
return true;
#endif
return false;
@@ -1670,6 +1707,9 @@ static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc)
#endif
}
+int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af);
+void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af);
+
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
@@ -1683,4 +1723,15 @@ ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
atomic_read(&dest->inactconns);
}
+#ifdef CONFIG_IP_VS_PROTO_TCP
+INDIRECT_CALLABLE_DECLARE(int
+ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+INDIRECT_CALLABLE_DECLARE(int
+ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
#endif /* _NET_IP_VS_H */
diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h
index fee6fc451597..8660a2a6d1fc 100644
--- a/include/net/ipcomp.h
+++ b/include/net/ipcomp.h
@@ -2,11 +2,13 @@
#ifndef _NET_IPCOMP_H
#define _NET_IPCOMP_H
+#include <linux/skbuff.h>
#include <linux/types.h>
#define IPCOMP_SCRATCH_SIZE 65400
struct crypto_comp;
+struct ip_comp_hdr;
struct ipcomp_data {
u16 threshold;
@@ -20,7 +22,7 @@ struct xfrm_state;
int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb);
int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb);
void ipcomp_destroy(struct xfrm_state *x);
-int ipcomp_init_state(struct xfrm_state *x);
+int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb)
{
diff --git a/include/net/ipconfig.h b/include/net/ipconfig.h
index e3534299bd2a..8276897d0c2e 100644
--- a/include/net/ipconfig.h
+++ b/include/net/ipconfig.h
@@ -7,6 +7,8 @@
/* The following are initdata: */
+#include <linux/types.h>
+
extern int ic_proto_enabled; /* Protocols enabled (see IC_xxx) */
extern int ic_set_manually; /* IPconfig parameters set manually */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index cec1a54401f2..37943ba3a73c 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -15,12 +15,14 @@
#include <linux/refcount.h>
#include <linux/jump_label_ratelimit.h>
#include <net/if_inet6.h>
-#include <net/ndisc.h>
#include <net/flow.h>
#include <net/flow_dissector.h>
+#include <net/inet_dscp.h>
#include <net/snmp.h>
#include <net/netns/hash.h>
+struct ip_tunnel_info;
+
#define SIN6_LEN_RFC2133 24
#define IPV6_MAXPLEN 65535
@@ -30,6 +32,7 @@
*/
#define NEXTHDR_HOP 0 /* Hop-by-hop option header. */
+#define NEXTHDR_IPV4 4 /* IPv4 in IPv6 */
#define NEXTHDR_TCP 6 /* TCP segment. */
#define NEXTHDR_UDP 17 /* UDP message. */
#define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
@@ -148,6 +151,17 @@ struct frag_hdr {
__be32 identification;
};
+/*
+ * Jumbo payload option, as described in RFC 2675 2.
+ */
+struct hop_jumbo_hdr {
+ u8 nexthdr;
+ u8 hdrlen;
+ u8 tlv_type; /* IPV6_TLV_JUMBO, 0xC2 */
+ u8 tlv_len; /* 4 */
+ __be32 jumbo_payload_len;
+};
+
#define IP6_MF 0x0001
#define IP6_OFFSET 0xFFF8
@@ -344,9 +358,9 @@ struct ipcm6_cookie {
struct sockcm_cookie sockc;
__s16 hlimit;
__s16 tclass;
+ __u16 gso_size;
__s8 dontfrag;
struct ipv6_txoptions *opt;
- __u16 gso_size;
};
static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
@@ -390,23 +404,26 @@ static inline void txopt_put(struct ipv6_txoptions *opt)
kfree_rcu(opt, rcu);
}
+#if IS_ENABLED(CONFIG_IPV6)
struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
__be32 label)
{
- if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
+ if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
+ READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
return NULL;
}
+#endif
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
struct ipv6_txoptions *fopt);
void fl6_free_socklist(struct sock *sk);
-int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
+int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen);
int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
int flags);
int ip6_flowlabel_init(void);
@@ -434,14 +451,55 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *opt,
int newtype,
struct ipv6_opt_hdr *newopt);
-struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
- struct ipv6_txoptions *opt);
+struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space,
+ struct ipv6_txoptions *opt);
+
+static inline struct ipv6_txoptions *
+ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt)
+{
+ if (!opt)
+ return NULL;
+ return __ipv6_fixup_options(opt_space, opt);
+}
bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
const struct inet6_skb_parm *opt);
struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
struct ipv6_txoptions *opt);
+/* This helper is specialized for BIG TCP needs.
+ * It assumes the hop_jumbo_hdr will immediately follow the IPV6 header.
+ * It assumes headers are already in skb->head.
+ * Returns 0, or IPPROTO_TCP if a BIG TCP packet is there.
+ */
+static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb)
+{
+ const struct hop_jumbo_hdr *jhdr;
+ const struct ipv6hdr *nhdr;
+
+ if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
+ return 0;
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ return 0;
+
+ if (skb_network_offset(skb) +
+ sizeof(struct ipv6hdr) +
+ sizeof(struct hop_jumbo_hdr) > skb_headlen(skb))
+ return 0;
+
+ nhdr = ipv6_hdr(skb);
+
+ if (nhdr->nexthdr != NEXTHDR_HOP)
+ return 0;
+
+ jhdr = (const struct hop_jumbo_hdr *) (nhdr + 1);
+ if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
+ jhdr->nexthdr != IPPROTO_TCP)
+ return 0;
+ return jhdr->nexthdr;
+}
+
static inline bool ipv6_accept_ra(struct inet6_dev *idev)
{
/* If forwarding is enabled, RA are not accepted unless the special
@@ -908,7 +966,6 @@ static inline int ip6_default_np_autolabel(struct net *net)
}
}
#else
-static inline void ip6_set_txhash(struct sock *sk) { }
static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
__be32 flowlabel, bool autolabel,
struct flowi6 *fl6)
@@ -926,11 +983,19 @@ static inline int ip6_multipath_hash_policy(const struct net *net)
{
return net->ipv6.sysctl.multipath_hash_policy;
}
+static inline u32 ip6_multipath_hash_fields(const struct net *net)
+{
+ return net->ipv6.sysctl.multipath_hash_fields;
+}
#else
static inline int ip6_multipath_hash_policy(const struct net *net)
{
return 0;
}
+static inline u32 ip6_multipath_hash_fields(const struct net *net)
+{
+ return 0;
+}
#endif
/*
@@ -957,6 +1022,11 @@ static inline u8 ip6_tclass(__be32 flowinfo)
return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
}
+static inline dscp_t ip6_dscp(__be32 flowinfo)
+{
+ return inet_dsfield_to_dscp(ip6_tclass(flowinfo));
+}
+
static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
{
return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
@@ -993,7 +1063,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
int ip6_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
- void *from, int length, int transhdrlen,
+ void *from, size_t length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags);
@@ -1009,8 +1079,8 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
struct sk_buff *ip6_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
- void *from, int length, int transhdrlen,
- struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+ void *from, size_t length, int transhdrlen,
+ struct ipcm6_cookie *ipc6,
struct rt6_info *rt, unsigned int flags,
struct inet_cork_full *cork);
@@ -1027,6 +1097,12 @@ struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, st
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,
bool connected);
+struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net, struct socket *sock,
+ struct in6_addr *saddr,
+ const struct ip_tunnel_info *info,
+ u8 protocol, bool use_cache);
struct dst_entry *ip6_blackhole_route(struct net *net,
struct dst_entry *orig_dst);
@@ -1078,15 +1154,16 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
/*
* socket options (ipv6_sockglue.c)
*/
-
-int ipv6_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+DECLARE_STATIC_KEY_FALSE(ip6_min_hopcount);
+
+int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
+int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
+int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
int ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
int addr_len);
@@ -1105,11 +1182,15 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+void inet6_cleanup_sock(struct sock *sk);
+void inet6_sock_destruct(struct sock *sk);
int inet6_release(struct socket *sock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int inet6_compat_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg);
int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
@@ -1129,9 +1210,10 @@ struct group_filter;
int ip6_mc_source(int add, int omode, struct sock *sk,
struct group_source_req *pgsr);
-int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
+int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
+ struct sockaddr_storage *list);
int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
- struct group_filter __user *optval, int __user *optlen);
+ sockptr_t optval, size_t ss_offset);
#ifdef CONFIG_PROC_FS
int ac6_proc_init(struct net *net);
@@ -1169,4 +1251,96 @@ int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
const struct in6_addr *addr, unsigned int mode);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+
+static inline int ip6_sock_set_v6only(struct sock *sk)
+{
+ if (inet_sk(sk)->inet_num)
+ return -EINVAL;
+ lock_sock(sk);
+ sk->sk_ipv6only = true;
+ release_sock(sk);
+ return 0;
+}
+
+static inline void ip6_sock_set_recverr(struct sock *sk)
+{
+ lock_sock(sk);
+ inet6_sk(sk)->recverr = true;
+ release_sock(sk);
+}
+
+static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
+{
+ unsigned int pref = 0;
+ unsigned int prefmask = ~0;
+
+ /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
+ switch (val & (IPV6_PREFER_SRC_PUBLIC |
+ IPV6_PREFER_SRC_TMP |
+ IPV6_PREFER_SRC_PUBTMP_DEFAULT)) {
+ case IPV6_PREFER_SRC_PUBLIC:
+ pref |= IPV6_PREFER_SRC_PUBLIC;
+ prefmask &= ~(IPV6_PREFER_SRC_PUBLIC |
+ IPV6_PREFER_SRC_TMP);
+ break;
+ case IPV6_PREFER_SRC_TMP:
+ pref |= IPV6_PREFER_SRC_TMP;
+ prefmask &= ~(IPV6_PREFER_SRC_PUBLIC |
+ IPV6_PREFER_SRC_TMP);
+ break;
+ case IPV6_PREFER_SRC_PUBTMP_DEFAULT:
+ prefmask &= ~(IPV6_PREFER_SRC_PUBLIC |
+ IPV6_PREFER_SRC_TMP);
+ break;
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check HOME/COA conflicts */
+ switch (val & (IPV6_PREFER_SRC_HOME | IPV6_PREFER_SRC_COA)) {
+ case IPV6_PREFER_SRC_HOME:
+ prefmask &= ~IPV6_PREFER_SRC_COA;
+ break;
+ case IPV6_PREFER_SRC_COA:
+ pref |= IPV6_PREFER_SRC_COA;
+ break;
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check CGA/NONCGA conflicts */
+ switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) {
+ case IPV6_PREFER_SRC_CGA:
+ case IPV6_PREFER_SRC_NONCGA:
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref;
+ return 0;
+}
+
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
+{
+ int ret;
+
+ lock_sock(sk);
+ ret = __ip6_sock_set_addr_preferences(sk, val);
+ release_sock(sk);
+ return ret;
+}
+
+static inline void ip6_sock_set_recvpktinfo(struct sock *sk)
+{
+ lock_sock(sk);
+ inet6_sk(sk)->rxopt.bits.rxinfo = true;
+ release_sock(sk);
+}
+
#endif /* _NET_IPV6_H */
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index a21e8b1381a1..5052c66e22d2 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IPV6_FRAG_H
#define _IPV6_FRAG_H
+#include <linux/icmpv6.h>
#include <linux/kernel.h>
#include <net/addrconf.h>
#include <net/ipv6.h>
@@ -67,7 +68,8 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
struct sk_buff *head;
rcu_read_lock();
- if (fq->q.fqdir->dead)
+ /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
+ if (READ_ONCE(fq->q.fqdir->dead))
goto out_rcu_unlock;
spin_lock(&fq->q.lock);
@@ -108,5 +110,35 @@ out_rcu_unlock:
rcu_read_unlock();
inet_frag_put(&fq->q);
}
+
+/* Check if the upper layer header is truncated in the first fragment. */
+static inline bool
+ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp)
+{
+ u8 nexthdr = *nexthdrp;
+ __be16 frag_off;
+ int offset;
+
+ offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off);
+ if (offset < 0 || (frag_off & htons(IP6_OFFSET)))
+ return false;
+ switch (nexthdr) {
+ case NEXTHDR_TCP:
+ offset += sizeof(struct tcphdr);
+ break;
+ case NEXTHDR_UDP:
+ offset += sizeof(struct udphdr);
+ break;
+ case NEXTHDR_ICMP:
+ offset += sizeof(struct icmp6hdr);
+ break;
+ default:
+ offset += 1;
+ }
+ if (offset > skb->len)
+ return true;
+ return false;
+}
+
#endif
#endif
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index 3e7d2c0e79ca..c48186bf4737 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -47,8 +47,9 @@ struct ipv6_stub {
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
+ void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
- int (*ip6_del_rt)(struct net *net, struct fib6_info *rt);
+ int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
struct nl_info *info);
@@ -56,19 +57,34 @@ struct ipv6_stub {
void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
const struct in6_addr *solicited_addr,
bool router, bool solicited, bool override, bool inc_opt);
+#if IS_ENABLED(CONFIG_XFRM)
+ void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu);
+ int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+#endif
struct neigh_table *nd_tbl;
+
+ int (*ipv6_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
+ struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev);
};
extern const struct ipv6_stub *ipv6_stub __read_mostly;
/* A stub used by bpf helpers. Similarly ugly as ipv6_stub */
struct ipv6_bpf_stub {
int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
- bool force_bind_address_no_port, bool with_lock);
+ u32 flags);
struct sock *(*udp6_lib_lookup)(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
struct sk_buff *skb);
+ int (*ipv6_setsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+ int (*ipv6_getsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
};
extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
diff --git a/include/net/ipx.h b/include/net/ipx.h
deleted file mode 100644
index 9d1342807b59..000000000000
--- a/include/net/ipx.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_INET_IPX_H_
-#define _NET_INET_IPX_H_
-/*
- * The following information is in its entirety obtained from:
- *
- * Novell 'IPX Router Specification' Version 1.10
- * Part No. 107-000029-001
- *
- * Which is available from ftp.novell.com
- */
-
-#include <linux/netdevice.h>
-#include <net/datalink.h>
-#include <linux/ipx.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/refcount.h>
-
-struct ipx_address {
- __be32 net;
- __u8 node[IPX_NODE_LEN];
- __be16 sock;
-};
-
-#define ipx_broadcast_node "\377\377\377\377\377\377"
-#define ipx_this_node "\0\0\0\0\0\0"
-
-#define IPX_MAX_PPROP_HOPS 8
-
-struct ipxhdr {
- __be16 ipx_checksum __packed;
-#define IPX_NO_CHECKSUM cpu_to_be16(0xFFFF)
- __be16 ipx_pktsize __packed;
- __u8 ipx_tctrl;
- __u8 ipx_type;
-#define IPX_TYPE_UNKNOWN 0x00
-#define IPX_TYPE_RIP 0x01 /* may also be 0 */
-#define IPX_TYPE_SAP 0x04 /* may also be 0 */
-#define IPX_TYPE_SPX 0x05 /* SPX protocol */
-#define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */
-#define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast */
- struct ipx_address ipx_dest __packed;
- struct ipx_address ipx_source __packed;
-};
-
-/* From af_ipx.c */
-extern int sysctl_ipx_pprop_broadcasting;
-
-struct ipx_interface {
- /* IPX address */
- __be32 if_netnum;
- unsigned char if_node[IPX_NODE_LEN];
- refcount_t refcnt;
-
- /* physical device info */
- struct net_device *if_dev;
- struct datalink_proto *if_dlink;
- __be16 if_dlink_type;
-
- /* socket support */
- unsigned short if_sknum;
- struct hlist_head if_sklist;
- spinlock_t if_sklist_lock;
-
- /* administrative overhead */
- int if_ipx_offset;
- unsigned char if_internal;
- unsigned char if_primary;
-
- struct list_head node; /* node in ipx_interfaces list */
-};
-
-struct ipx_route {
- __be32 ir_net;
- struct ipx_interface *ir_intrfc;
- unsigned char ir_routed;
- unsigned char ir_router_node[IPX_NODE_LEN];
- struct list_head node; /* node in ipx_routes list */
- refcount_t refcnt;
-};
-
-struct ipx_cb {
- u8 ipx_tctrl;
- __be32 ipx_dest_net;
- __be32 ipx_source_net;
- struct {
- __be32 netnum;
- int index;
- } last_hop;
-};
-
-#include <net/sock.h>
-
-struct ipx_sock {
- /* struct sock has to be the first member of ipx_sock */
- struct sock sk;
- struct ipx_address dest_addr;
- struct ipx_interface *intrfc;
- __be16 port;
-#ifdef CONFIG_IPX_INTERN
- unsigned char node[IPX_NODE_LEN];
-#endif
- unsigned short type;
- /*
- * To handle special ncp connection-handling sockets for mars_nwe,
- * the connection number must be stored in the socket.
- */
- unsigned short ipx_ncp_conn;
-};
-
-static inline struct ipx_sock *ipx_sk(struct sock *sk)
-{
- return (struct ipx_sock *)sk;
-}
-
-#define IPX_SKB_CB(__skb) ((struct ipx_cb *)&((__skb)->cb[0]))
-
-#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
-#define IPX_MAX_EPHEMERAL_SOCKET 0x7fff
-
-extern struct list_head ipx_routes;
-extern rwlock_t ipx_routes_lock;
-
-extern struct list_head ipx_interfaces;
-struct ipx_interface *ipx_interfaces_head(void);
-extern spinlock_t ipx_interfaces_lock;
-
-extern struct ipx_interface *ipx_primary_net;
-
-int ipx_proc_init(void);
-void ipx_proc_exit(void);
-
-const char *ipx_frame_name(__be16);
-const char *ipx_device_name(struct ipx_interface *intrfc);
-
-static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
-{
- refcount_inc(&intrfc->refcnt);
-}
-
-void ipxitf_down(struct ipx_interface *intrfc);
-struct ipx_interface *ipxitf_find_using_net(__be32 net);
-int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node);
-__be16 ipx_cksum(struct ipxhdr *packet, int length);
-int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
- unsigned char *node);
-void ipxrtr_del_routes(struct ipx_interface *intrfc);
-int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
- struct msghdr *msg, size_t len, int noblock);
-int ipxrtr_route_skb(struct sk_buff *skb);
-struct ipx_route *ipxrtr_lookup(__be32 net);
-int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
-
-static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
-{
- if (refcount_dec_and_test(&intrfc->refcnt))
- ipxitf_down(intrfc);
-}
-
-static __inline__ void ipxrtr_hold(struct ipx_route *rt)
-{
- refcount_inc(&rt->refcnt);
-}
-
-static __inline__ void ipxrtr_put(struct ipx_route *rt)
-{
- if (refcount_dec_and_test(&rt->refcnt))
- kfree(rt);
-}
-#endif /* _NET_INET_IPX_H_ */
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 14a490246be9..df85d19fbf84 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -112,10 +112,12 @@ enum iucv_tx_notify {
struct iucv_sock {
struct sock sk;
- char src_user_id[8];
- char src_name[8];
- char dst_user_id[8];
- char dst_name[8];
+ struct_group(init,
+ char src_user_id[8];
+ char src_name[8];
+ char dst_user_id[8];
+ char dst_name[8];
+ );
struct list_head accept_q;
spinlock_t accept_q_lock;
struct sock *parent;
@@ -128,11 +130,12 @@ struct iucv_sock {
u8 flags;
u16 msglimit;
u16 msglimit_peer;
+ atomic_t skbs_in_xmit;
atomic_t msg_sent;
atomic_t msg_recv;
atomic_t pendings;
int transport;
- void (*sk_txnotify)(struct sk_buff *skb,
+ void (*sk_txnotify)(struct sock *sk,
enum iucv_tx_notify n);
};
@@ -158,12 +161,4 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
-__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
- poll_table *wait);
-void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
-void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
-void iucv_accept_unlink(struct sock *sk);
-struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
-
#endif /* __IUCV_H */
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index e942372b077b..031c661aa14d 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -10,6 +10,16 @@
#include <net/dst.h>
#include <net/fib_rules.h>
+enum l3mdev_type {
+ L3MDEV_TYPE_UNSPEC,
+ L3MDEV_TYPE_VRF,
+ __L3MDEV_TYPE_MAX
+};
+
+#define L3MDEV_TYPE_MAX (__L3MDEV_TYPE_MAX - 1)
+
+typedef int (*lookup_by_table_id_t)(struct net *net, u32 table_d);
+
/**
* struct l3mdev_ops - l3mdev operations
*
@@ -37,6 +47,15 @@ struct l3mdev_ops {
#ifdef CONFIG_NET_L3_MASTER_DEV
+int l3mdev_table_lookup_register(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn);
+
+void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn);
+
+int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
+ u32 table_id);
+
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg);
@@ -281,6 +300,26 @@ struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
}
static inline
+int l3mdev_table_lookup_register(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn)
+{
+}
+
+static inline
+int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
+ u32 table_id)
+{
+ return -ENODEV;
+}
+
+static inline
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg)
{
diff --git a/include/net/lapb.h b/include/net/lapb.h
index ccc3d1f020b0..124ee122f2c8 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -92,6 +92,7 @@ struct lapb_cb {
unsigned short n2, n2count;
unsigned short t1, t2;
struct timer_list t1timer, t2timer;
+ bool t1timer_running, t2timer_running;
/* Internal control information */
struct sk_buff_head write_queue;
@@ -103,6 +104,7 @@ struct lapb_cb {
struct lapb_frame frmr_data;
unsigned char frmr_type;
+ spinlock_t lock;
refcount_t refcnt;
};
diff --git a/include/net/llc.h b/include/net/llc.h
index df282d9b4017..e250dca03963 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -72,7 +72,9 @@ struct llc_sap {
static inline
struct hlist_head *llc_sk_dev_hash(struct llc_sap *sap, int ifindex)
{
- return &sap->sk_dev_hash[ifindex % LLC_SK_DEV_HASH_ENTRIES];
+ u32 bucket = hash_32(ifindex, LLC_SK_DEV_HASH_BITS);
+
+ return &sap->sk_dev_hash[bucket];
}
static inline
@@ -133,7 +135,7 @@ static inline void llc_sap_put(struct llc_sap *sap)
struct llc_sap *llc_sap_find(unsigned char sap_value);
int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
- unsigned char *dmac, unsigned char dsap);
+ const unsigned char *dmac, unsigned char dsap);
void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
index e766300b3e99..3e1f76786d7b 100644
--- a/include/net/llc_c_ac.h
+++ b/include/net/llc_c_ac.h
@@ -16,6 +16,13 @@
* Connection state transition actions
* (Fb = F bit; Pb = P bit; Xb = X bit)
*/
+
+#include <linux/types.h>
+
+struct sk_buff;
+struct sock;
+struct timer_list;
+
#define LLC_CONN_AC_CLR_REMOTE_BUSY 1
#define LLC_CONN_AC_CONN_IND 2
#define LLC_CONN_AC_CONN_CONFIRM 3
diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
index 48f3f891b2f9..53823d61d8b6 100644
--- a/include/net/llc_c_st.h
+++ b/include/net/llc_c_st.h
@@ -11,6 +11,10 @@
*
* See the GNU General Public License for more details.
*/
+
+#include <net/llc_c_ac.h>
+#include <net/llc_c_ev.h>
+
/* Connection component state management */
/* connection states */
#define LLC_CONN_OUT_OF_SVC 0 /* prior to allocation */
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index ea985aa7a6c5..2c1ea3414640 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -38,6 +38,7 @@ struct llc_sock {
struct llc_addr laddr; /* lsap/mac pair */
struct llc_addr daddr; /* dsap/mac pair */
struct net_device *dev; /* device to send to remote */
+ netdevice_tracker dev_tracker;
u32 copied_seq; /* head of yet unread data */
u8 retry_count; /* number of retries */
u8 ack_must_be_send;
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index 8d5c543cd620..c72570a21a4f 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,7 +62,8 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */
-int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac,
+ u8 dsap);
int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
int llc_send_disc(struct sock *sk);
#endif /* LLC_IF_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index c0f0a13ed818..49aa79c7b278 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -15,9 +15,11 @@
#include <linux/if_ether.h>
/* Lengths of frame formats */
-#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
-#define LLC_PDU_LEN_S 4
-#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
+#define LLC_PDU_LEN_S 4
+#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+/* header and 1 control byte and XID info */
+#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
/* Known SAP addresses */
#define LLC_GLOBAL_SAP 0xFF
#define LLC_NULL_SAP 0x00 /* not network-layer visible */
@@ -50,9 +52,10 @@
#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
#define LLC_PDU_TYPE_MASK 0x03
-#define LLC_PDU_TYPE_I 0 /* first bit */
-#define LLC_PDU_TYPE_S 1 /* first two bits */
-#define LLC_PDU_TYPE_U 3 /* first two bits */
+#define LLC_PDU_TYPE_I 0 /* first bit */
+#define LLC_PDU_TYPE_S 1 /* first two bits */
+#define LLC_PDU_TYPE_U 3 /* first two bits */
+#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
#define LLC_PDU_TYPE_IS_I(pdu) \
((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
@@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
u8 ssap, u8 dsap, u8 cr)
{
- const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
+ int hlen = 4; /* default value for I and S types */
struct llc_pdu_un *pdu;
+ switch (type) {
+ case LLC_PDU_TYPE_U:
+ hlen = 3;
+ break;
+ case LLC_PDU_TYPE_U_XID:
+ hlen = 6;
+ break;
+ }
+
skb_push(skb, hlen);
skb_reset_network_header(skb);
pdu = llc_pdu_un_hdr(skb);
@@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
xid_info->type = svcs_supported;
xid_info->rw = rx_window << 1; /* size of receive window */
- skb_put(skb, sizeof(struct llc_xid_info));
+
+ /* no need to push/put since llc_pdu_header_init() has already
+ * pushed 3 + 3 bytes
+ */
}
/**
diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
index a61b98c108ee..f71790305bc9 100644
--- a/include/net/llc_s_ac.h
+++ b/include/net/llc_s_ac.h
@@ -11,6 +11,10 @@
*
* See the GNU General Public License for more details.
*/
+
+struct llc_sap;
+struct sk_buff;
+
/* SAP component actions */
#define SAP_ACT_UNITDATA_IND 1
#define SAP_ACT_SEND_UI 2
diff --git a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h
index 84db3a59ed28..fb7df1d70af3 100644
--- a/include/net/llc_s_ev.h
+++ b/include/net/llc_s_ev.h
@@ -13,6 +13,7 @@
*/
#include <linux/skbuff.h>
+#include <net/llc.h>
/* Defines SAP component events */
/* Types of events (possible values in 'ev->type') */
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
index c4359e203013..ed5b2fa40d32 100644
--- a/include/net/llc_s_st.h
+++ b/include/net/llc_s_st.h
@@ -12,6 +12,12 @@
* See the GNU General Public License for more details.
*/
+#include <linux/types.h>
+#include <net/llc_s_ac.h>
+#include <net/llc_s_ev.h>
+
+struct llc_sap_state_trans;
+
#define LLC_NR_SAP_STATES 2 /* size of state table */
/* structures and types */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 5d6c5b1fc695..6f15e6fa154e 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -30,11 +30,11 @@ struct lwtunnel_state {
int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*orig_input)(struct sk_buff *);
struct rcu_head rcu;
- __u8 data[0];
+ __u8 data[];
};
struct lwtunnel_encap_ops {
- int (*build_state)(struct nlattr *encap,
+ int (*build_state)(struct net *net, struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack);
@@ -51,6 +51,9 @@ struct lwtunnel_encap_ops {
};
#ifdef CONFIG_LWTUNNEL
+
+DECLARE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled);
+
void lwtstate_free(struct lwtunnel_state *lws);
static inline struct lwtunnel_state *
@@ -113,7 +116,7 @@ int lwtunnel_valid_encap_type(u16 encap_type,
struct netlink_ext_ack *extack);
int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
struct netlink_ext_ack *extack);
-int lwtunnel_build_state(u16 encap_type,
+int lwtunnel_build_state(struct net *net, u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **lws,
@@ -209,7 +212,7 @@ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
return 0;
}
-static inline int lwtunnel_build_state(u16 encap_type,
+static inline int lwtunnel_build_state(struct net *net, u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **lws,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 77e6b5a83b06..ac2bad57933f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*/
#ifndef MAC80211_H
@@ -18,6 +18,7 @@
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
+#include <linux/lockdep.h>
#include <net/cfg80211.h>
#include <net/codel.h>
#include <net/ieee80211_radiotap.h>
@@ -125,6 +126,22 @@
* via the usual ieee80211_tx_dequeue).
*/
+/**
+ * DOC: HW timestamping
+ *
+ * Timing Measurement and Fine Timing Measurement require accurate timestamps
+ * of the action frames TX/RX and their respective acks.
+ *
+ * To report hardware timestamps for Timing Measurement or Fine Timing
+ * Measurement frame RX, the low level driver should set the SKB's hwtstamp
+ * field to the frame RX timestamp and report the ack TX timestamp in the
+ * ieee80211_rx_status struct.
+ *
+ * Similarly, To report hardware timestamps for Timing Measurement or Fine
+ * Timing Measurement frame TX, the driver should set the SKB's hwtstamp field
+ * to the frame TX timestamp and report the ack RX timestamp in the
+ * ieee80211_tx_status struct.
+ */
struct device;
/**
@@ -230,7 +247,7 @@ struct ieee80211_chanctx_conf {
bool radar_enabled;
- u8 drv_priv[0] __aligned(sizeof(void *));
+ u8 drv_priv[] __aligned(sizeof(void *));
};
/**
@@ -261,11 +278,13 @@ enum ieee80211_chanctx_switch_mode {
* done.
*
* @vif: the vif that should be switched from old_ctx to new_ctx
+ * @link_conf: the link conf that's switching
* @old_ctx: the old context to which the vif was assigned
* @new_ctx: the new context to which the vif must be assigned
*/
struct ieee80211_vif_chanctx_switch {
struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_chanctx_conf *old_ctx;
struct ieee80211_chanctx_conf *new_ctx;
};
@@ -273,8 +292,8 @@ struct ieee80211_vif_chanctx_switch {
/**
* enum ieee80211_bss_change - BSS change notification flags
*
- * These flags are used with the bss_info_changed() callback
- * to indicate which BSS parameter changed.
+ * These flags are used with the bss_info_changed(), link_info_changed()
+ * and vif_cfg_changed() callbacks to indicate which parameter(s) changed.
*
* @BSS_CHANGED_ASSOC: association status changed (associated/disassociated),
* also implies a change in the AID.
@@ -316,6 +335,10 @@ struct ieee80211_vif_chanctx_switch {
* functionality changed for this BSS (AP mode).
* @BSS_CHANGED_TWT: TWT status changed
* @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
+ * @BSS_CHANGED_HE_BSS_COLOR: BSS Color has changed
+ * @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
+ * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
+ * status changed.
*
*/
enum ieee80211_bss_change {
@@ -348,6 +371,9 @@ enum ieee80211_bss_change {
BSS_CHANGED_FTM_RESPONDER = 1<<26,
BSS_CHANGED_TWT = 1<<27,
BSS_CHANGED_HE_OBSS_PD = 1<<28,
+ BSS_CHANGED_HE_BSS_COLOR = 1<<29,
+ BSS_CHANGED_FILS_DISCOVERY = 1<<30,
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -489,14 +515,26 @@ struct ieee80211_ftm_responder_params {
};
/**
+ * struct ieee80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ */
+struct ieee80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
* to that BSS) that can change during the lifetime of the BSS.
*
- * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE
+ * @addr: (link) address used locally
+ * @link_id: link ID, or 0 for non-MLO
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
- * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
* @uora_exists: is the UORA element advertised by AP
* @ack_enabled: indicates support to receive a multi-TID that solicits either
* ACK, BACK or both
@@ -507,11 +545,8 @@ struct ieee80211_ftm_responder_params {
* mode only, set if the AP advertises TWT responder role)
* @twt_responder: does this BSS support TWT requester (relevant for managed
* mode only, set if the AP advertises TWT responder role)
- * @assoc: association status
- * @ibss_joined: indicates whether this station is part of an IBSS
- * or not
- * @ibss_creator: indicates if a new IBSS network is being created
- * @aid: association ID number, valid only when @assoc is true
+ * @twt_protected: does this BSS support protected TWT frames
+ * @twt_broadcast: does this BSS support broadcast TWT
* @use_cts_prot: use CTS protection
* @use_short_preamble: use 802.11b short preamble
* @use_short_slot: use short slot time (only relevant for ERP)
@@ -532,6 +567,8 @@ struct ieee80211_ftm_responder_params {
* IMPORTANT: These three sync_* parameters would possibly be out of sync
* by the time the driver will use them. The synchronized view is currently
* guaranteed only in certain callbacks.
+ * Note also that this is not used with MLD associations, mac80211 doesn't
+ * know how to track beacons for all of the links for this.
* @beacon_int: beacon interval
* @assoc_capability: capabilities taken from assoc resp
* @basic_rates: bitmap of basic rates, each bit stands for an
@@ -557,23 +594,9 @@ struct ieee80211_ftm_responder_params {
* threshold event and can't be enabled simultaneously with it.
* @cqm_rssi_high: Connection quality monitor RSSI upper threshold.
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
- * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
- * may filter ARP queries targeted for other addresses than listed here.
- * The driver must allow ARP queries targeted for all address listed here
- * to pass through. An empty list implies no ARP queries need to pass.
- * @arp_addr_cnt: Number of addresses currently on the list. Note that this
- * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list
- * array size), it's up to the driver what to do in that case.
* @qos: This is a QoS-enabled BSS.
- * @idle: This interface is idle. There's also a global idle flag in the
- * hardware config which may be more appropriate depending on what
- * your driver/device needs to do.
- * @ps: power-save mode (STA only). This flag is NOT affected by
- * offchannel/dynamic_ps operations.
- * @ssid: The SSID of the current vif. Valid in AP and IBSS mode.
- * @ssid_len: Length of SSID given in @ssid.
* @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
- * @txpower: TX power in dBm
+ * @txpower: TX power in dBm. INT_MIN means not configured.
* @txpower_type: TX power adjustment used to control per packet Transmit
* Power Control (TPC) in lower driver for the current vif. In particular
* TPC is enabled if value passed in %txpower_type is
@@ -602,25 +625,47 @@ struct ieee80211_ftm_responder_params {
* nontransmitted BSSIDs
* @profile_periodicity: the least number of beacon frames need to be received
* in order to discover all the nontransmitted BSSIDs in the set.
- * @he_operation: HE operation information of the AP we are connected to
+ * @he_oper: HE operation information of the BSS (AP/Mesh) or of the AP we are
+ * connected to (STA)
* @he_obss_pd: OBSS Packet Detection parameters.
+ * @he_bss_color: BSS coloring settings, if BSS supports HE
+ * @fils_discovery: FILS discovery configuration
+ * @unsol_bcast_probe_resp_interval: Unsolicited broadcast probe response
+ * interval.
+ * @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
+ * to driver when rate control is offloaded to firmware.
+ * @power_type: power type of BSS for 6 GHz
+ * @tx_pwr_env: transmit power envelope array of BSS.
+ * @tx_pwr_env_num: number of @tx_pwr_env.
+ * @pwr_reduction: power constraint of BSS.
+ * @eht_support: does this BSS support EHT
+ * @csa_active: marks whether a channel switch is going on. Internally it is
+ * write-protected by sdata_lock and local->mtx so holding either is fine
+ * for read access.
+ * @mu_mimo_owner: indicates interface owns MU-MIMO capability
+ * @chanctx_conf: The channel context this interface is assigned to, or %NULL
+ * when it is not assigned. This pointer is RCU-protected due to the TX
+ * path needing to access it; even though the netdev carrier will always
+ * be off when it is %NULL there can still be races and packets could be
+ * processed after it switches back to %NULL.
+ * @color_change_active: marks whether a color change is ongoing. Internally it is
+ * write-protected by sdata_lock and local->mtx so holding either is fine
+ * for read access.
+ * @color_change_color: the bss color that will be used after the change.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
- u8 bss_color;
+ unsigned int link_id;
+ u8 addr[ETH_ALEN] __aligned(2);
u8 htc_trig_based_pkt_ext;
- bool multi_sta_back_32bit;
bool uora_exists;
- bool ack_enabled;
u8 uora_ocw_range;
u16 frame_time_rts_th;
bool he_support;
bool twt_requester;
bool twt_responder;
- /* association related data */
- bool assoc, ibss_joined;
- bool ibss_creator;
- u16 aid;
+ bool twt_protected;
+ bool twt_broadcast;
/* erp related data */
bool use_cts_prot;
bool use_short_preamble;
@@ -642,13 +687,7 @@ struct ieee80211_bss_conf {
s32 cqm_rssi_high;
struct cfg80211_chan_def chandef;
struct ieee80211_mu_group_data mu_group;
- __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
- int arp_addr_cnt;
bool qos;
- bool idle;
- bool ps;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- size_t ssid_len;
bool hidden_ssid;
int txpower;
enum nl80211_tx_power_setting txpower_type;
@@ -665,8 +704,27 @@ struct ieee80211_bss_conf {
u8 bssid_indicator;
bool ema_ap;
u8 profile_periodicity;
- struct ieee80211_he_operation he_operation;
+ struct {
+ u32 params;
+ u16 nss_set;
+ } he_oper;
struct ieee80211_he_obss_pd he_obss_pd;
+ struct cfg80211_he_bss_color he_bss_color;
+ struct ieee80211_fils_discovery fils_discovery;
+ u32 unsol_bcast_probe_resp_interval;
+ struct cfg80211_bitrate_mask beacon_tx_rate;
+ enum ieee80211_ap_reg_power power_type;
+ struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
+ u8 tx_pwr_env_num;
+ u8 pwr_reduction;
+ bool eht_support;
+
+ bool csa_active;
+ bool mu_mimo_owner;
+ struct ieee80211_chanctx_conf __rcu *chanctx_conf;
+
+ bool color_change_active;
+ u8 color_change_color;
};
/**
@@ -713,9 +771,8 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate
* that a frame can be transmitted while the queues are stopped for
* off-channel operation.
- * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
- * used to indicate that a pending frame requires TX processing before
- * it can be sent out.
+ * @IEEE80211_TX_CTL_HW_80211_ENCAP: This frame uses hardware encapsulation
+ * (header conversion)
* @IEEE80211_TX_INTFL_RETRIED: completely internal to mac80211,
* used to indicate that a frame was already retried due to PS
* @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211,
@@ -784,7 +841,7 @@ enum mac80211_tx_info_flags {
IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
IEEE80211_TX_INTFL_OFFCHAN_TX_OK = BIT(13),
- IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
+ IEEE80211_TX_CTL_HW_80211_ENCAP = BIT(14),
IEEE80211_TX_INTFL_RETRIED = BIT(15),
IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
IEEE80211_TX_CTL_NO_PS_BUFFER = BIT(17),
@@ -805,6 +862,8 @@ enum mac80211_tx_info_flags {
#define IEEE80211_TX_CTL_STBC_SHIFT 23
+#define IEEE80211_TX_RC_S1G_MCS IEEE80211_TX_RC_VHT_MCS
+
/**
* enum mac80211_tx_control_flags - flags to describe transmit control
*
@@ -816,6 +875,22 @@ enum mac80211_tx_info_flags {
* @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
* @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
* @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup
+ * @IEEE80211_TX_INTCFL_NEED_TXPROCESSING: completely internal to mac80211,
+ * used to indicate that a pending frame requires TX processing before
+ * it can be sent out.
+ * @IEEE80211_TX_CTRL_NO_SEQNO: Do not overwrite the sequence number that
+ * has already been assigned to this frame.
+ * @IEEE80211_TX_CTRL_DONT_REORDER: This frame should not be reordered
+ * relative to other frames that have this flag set, independent
+ * of their QoS TID or other priority field values.
+ * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
+ * for sequence number assignment
+ * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
+ * frame should be transmitted on the specific link. This really is
+ * only relevant for frames that do not have data present, and is
+ * also not used for 802.3 format frames. Note that even if the frame
+ * is on a specific link, address translation might still apply if
+ * it's intended for an MLD.
*
* These flags are used in tx_info->control.flags.
*/
@@ -826,6 +901,27 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_AMSDU = BIT(3),
IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5),
+ IEEE80211_TX_INTCFL_NEED_TXPROCESSING = BIT(6),
+ IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
+ IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
+ IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
+ IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
+};
+
+#define IEEE80211_LINK_UNSPECIFIED 0xf
+#define IEEE80211_TX_CTRL_MLO_LINK_UNSPEC \
+ u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, \
+ IEEE80211_TX_CTRL_MLO_LINK)
+
+/**
+ * enum mac80211_tx_status_flags - flags to describe transmit status
+ *
+ * @IEEE80211_TX_STATUS_ACK_SIGNAL_VALID: ACK signal is valid
+ *
+ * These flags are used in tx_info->status.flags.
+ */
+enum mac80211_tx_status_flags {
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID = BIT(0),
};
/*
@@ -964,7 +1060,9 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* (3) TX status information - driver tells mac80211 what happened
*
* @flags: transmit info flags, defined above
- * @band: the band to transmit on (use for checking for races)
+ * @band: the band to transmit on (use e.g. for checking for races),
+ * not valid if the interface is an MLD since we won't know which
+ * link the frame will be transmitted on
* @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
* @ack_frame_id: internal frame ID for TX status, used internally
* @tx_time_est: TX time estimate in units of 4us, used internally
@@ -989,8 +1087,9 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @status.ampdu_ack_len: AMPDU ack length
* @status.ampdu_len: AMPDU length
* @status.antenna: (legacy, kept only for iwlegacy)
- * @status.tx_time: airtime consumed for transmission
- * @status.is_valid_ack_signal: ACK signal is valid
+ * @status.tx_time: airtime consumed for transmission; note this is only
+ * used for WMM AC, not for airtime fairness
+ * @status.flags: status flags, see &enum mac80211_tx_status_flags
* @status.status_driver_data: driver use area
* @ack: union part for pure ACK data
* @ack.cookie: cookie for the ACK
@@ -1043,8 +1142,8 @@ struct ieee80211_tx_info {
u8 ampdu_len;
u8 antenna;
u16 tx_time;
- bool is_valid_ack_signal;
- void *status_driver_data[19 / sizeof(void *)];
+ u8 flags;
+ void *status_driver_data[18 / sizeof(void *)];
} status;
struct {
struct ieee80211_tx_rate driver_rates[
@@ -1075,19 +1174,46 @@ ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info)
return info->tx_time_est << 2;
}
+/***
+ * struct ieee80211_rate_status - mrr stage for status path
+ *
+ * This struct is used in struct ieee80211_tx_status to provide drivers a
+ * dynamic way to report about used rates and power levels per packet.
+ *
+ * @rate_idx The actual used rate.
+ * @try_count How often the rate was tried.
+ * @tx_power_idx An idx into the ieee80211_hw->tx_power_levels list of the
+ * corresponding wifi hardware. The idx shall point to the power level
+ * that was used when sending the packet.
+ */
+struct ieee80211_rate_status {
+ struct rate_info rate_idx;
+ u8 try_count;
+ u8 tx_power_idx;
+};
+
/**
* struct ieee80211_tx_status - extended tx status info for rate control
*
* @sta: Station that the packet was transmitted for
* @info: Basic tx status information
* @skb: Packet skb (can be NULL if not provided by the driver)
- * @rate: The TX rate that was used when sending the packet
+ * @rates: Mrr stages that were used when sending the packet
+ * @n_rates: Number of mrr stages (count of instances for @rates)
+ * @free_list: list where processed skbs are stored to be free'd by the driver
+ * @ack_hwtstamp: Hardware timestamp of the received ack in nanoseconds
+ * Only needed for Timing measurement and Fine timing measurement action
+ * frames. Only reported by devices that have timestamping enabled.
*/
struct ieee80211_tx_status {
struct ieee80211_sta *sta;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- struct rate_info *rate;
+ struct ieee80211_rate_status *rates;
+ ktime_t ack_hwtstamp;
+ u8 n_rates;
+
+ struct list_head *free_list;
};
/**
@@ -1130,9 +1256,9 @@ static inline struct ieee80211_rx_status *IEEE80211_SKB_RXCB(struct sk_buff *skb
* in the TX status but the rate control information (it does clear
* the count since you need to fill that in anyway).
*
- * NOTE: You can only use this function if you do NOT use
- * info->driver_data! Use info->rate_driver_data
- * instead if you need only the less space that allows.
+ * NOTE: While the rates array is kept intact, this will wipe all of the
+ * driver_data fields in info, so it's up to the driver to restore
+ * any fields it needs after calling this helper.
*/
static inline void
ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
@@ -1147,12 +1273,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
/* clear the rate counts */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++)
info->status.rates[i].count = 0;
-
- BUILD_BUG_ON(
- offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
- memset(&info->status.ampdu_ack_len, 0,
- sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
+ memset_after(&info->status, 0, rates);
}
@@ -1248,6 +1369,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* the "0-length PSDU" field included there. The value for it is
* in &struct ieee80211_rx_status. Note that if this value isn't
* known the frame shouldn't be reported.
+ * @RX_FLAG_8023: the frame has an 802.3 header (decap offload performed by
+ * hardware or driver)
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1280,6 +1403,7 @@ enum mac80211_rx_flags {
RX_FLAG_RADIOTAP_HE_MU = BIT(27),
RX_FLAG_RADIOTAP_LSIG = BIT(28),
RX_FLAG_NO_PSDU = BIT(29),
+ RX_FLAG_8023 = BIT(30),
};
/**
@@ -1324,12 +1448,16 @@ enum mac80211_rx_encoding {
* (TSF) timer when the first data symbol (MPDU) arrived at the hardware.
* @boottime_ns: CLOCK_BOOTTIME timestamp the frame was received at, this is
* needed only for beacons and probe responses that update the scan cache.
+ * @ack_tx_hwtstamp: Hardware timestamp for the ack TX in nanoseconds. Only
+ * needed for Timing measurement and Fine timing measurement action frames.
+ * Only reported by devices that have timestamping enabled.
* @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use
* it but can store it and pass it back to the driver for synchronisation
* @band: the active band when this frame was received
* @freq: frequency the radio was tuned to when receiving this frame, in MHz
* This field must be set for management frames, but isn't strictly needed
* for data (other) frames - for those it only affects radiotap reporting.
+ * @freq_offset: @freq has a positive offset of 500Khz.
* @signal: signal strength when receiving this frame, either in dBm, in dB or
* unspecified depending on the hardware capabilities flags
* @IEEE80211_HW_SIGNAL_*
@@ -1353,14 +1481,21 @@ enum mac80211_rx_encoding {
* each A-MPDU but the same for each subframe within one A-MPDU
* @ampdu_delimiter_crc: A-MPDU delimiter CRC
* @zero_length_psdu_type: radiotap type of the 0-length PSDU
+ * @link_valid: if the link which is identified by @link_id is valid. This flag
+ * is set only when connection is MLO.
+ * @link_id: id of the link used to receive the packet. This is used along with
+ * @link_valid.
*/
struct ieee80211_rx_status {
u64 mactime;
- u64 boottime_ns;
+ union {
+ u64 boottime_ns;
+ ktime_t ack_tx_hwtstamp;
+ };
u32 device_timestamp;
u32 ampdu_reference;
u32 flag;
- u16 freq;
+ u16 freq: 13, freq_offset: 1;
u8 enc_flags;
u8 encoding:2, bw:3, he_ru:3;
u8 he_gi:2, he_dcm:1;
@@ -1374,8 +1509,16 @@ struct ieee80211_rx_status {
s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 ampdu_delimiter_crc;
u8 zero_length_psdu_type;
+ u8 link_valid:1, link_id:4;
};
+static inline u32
+ieee80211_rx_status_to_khz(struct ieee80211_rx_status *rx_status)
+{
+ return MHZ_TO_KHZ(rx_status->freq) +
+ (rx_status->freq_offset ? 500 : 0);
+}
+
/**
* struct ieee80211_vendor_radiotap - vendor radiotap data information
* @present: presence bitmap for this vendor namespace
@@ -1585,6 +1728,65 @@ enum ieee80211_vif_flags {
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
};
+
+/**
+ * enum ieee80211_offload_flags - virtual interface offload flags
+ *
+ * @IEEE80211_OFFLOAD_ENCAP_ENABLED: tx encapsulation offload is enabled
+ * The driver supports sending frames passed as 802.3 frames by mac80211.
+ * It must also support sending 802.11 packets for the same interface.
+ * @IEEE80211_OFFLOAD_ENCAP_4ADDR: support 4-address mode encapsulation offload
+ * @IEEE80211_OFFLOAD_DECAP_ENABLED: rx encapsulation offload is enabled
+ * The driver supports passing received 802.11 frames as 802.3 frames to
+ * mac80211.
+ */
+
+enum ieee80211_offload_flags {
+ IEEE80211_OFFLOAD_ENCAP_ENABLED = BIT(0),
+ IEEE80211_OFFLOAD_ENCAP_4ADDR = BIT(1),
+ IEEE80211_OFFLOAD_DECAP_ENABLED = BIT(2),
+};
+
+/**
+ * struct ieee80211_vif_cfg - interface configuration
+ * @assoc: association status
+ * @ibss_joined: indicates whether this station is part of an IBSS or not
+ * @ibss_creator: indicates if a new IBSS network is being created
+ * @ps: power-save mode (STA only). This flag is NOT affected by
+ * offchannel/dynamic_ps operations.
+ * @aid: association ID number, valid only when @assoc is true
+ * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
+ * may filter ARP queries targeted for other addresses than listed here.
+ * The driver must allow ARP queries targeted for all address listed here
+ * to pass through. An empty list implies no ARP queries need to pass.
+ * @arp_addr_cnt: Number of addresses currently on the list. Note that this
+ * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list
+ * array size), it's up to the driver what to do in that case.
+ * @ssid: The SSID of the current vif. Valid in AP and IBSS mode.
+ * @ssid_len: Length of SSID given in @ssid.
+ * @s1g: BSS is S1G BSS (affects Association Request format).
+ * @idle: This interface is idle. There's also a global idle flag in the
+ * hardware config which may be more appropriate depending on what
+ * your driver/device needs to do.
+ * @ap_addr: AP MLD address, or BSSID for non-MLO connections
+ * (station mode only)
+ */
+struct ieee80211_vif_cfg {
+ /* association related data */
+ bool assoc, ibss_joined;
+ bool ibss_creator;
+ bool ps;
+ u16 aid;
+
+ __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ int arp_addr_cnt;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ bool s1g;
+ bool idle;
+ u8 ap_addr[ETH_ALEN] __aligned(2);
+};
+
/**
* struct ieee80211_vif - per-interface data
*
@@ -1592,66 +1794,83 @@ enum ieee80211_vif_flags {
* use during the life of a virtual interface.
*
* @type: type of this virtual interface
+ * @cfg: vif configuration, see &struct ieee80211_vif_cfg
* @bss_conf: BSS configuration for this interface, either our own
* or the BSS we're associated to
+ * @link_conf: in case of MLD, the per-link BSS configuration,
+ * indexed by link ID
+ * @valid_links: bitmap of valid links, or 0 for non-MLO.
+ * @active_links: The bitmap of active links, or 0 for non-MLO.
+ * The driver shouldn't change this directly, but use the
+ * API calls meant for that purpose.
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
- * @csa_active: marks whether a channel switch is going on. Internally it is
- * write-protected by sdata_lock and local->mtx so holding either is fine
- * for read access.
- * @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @driver_flags: flags/capabilities the driver has for this interface,
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
* at runtime, mac80211 will never touch this field
+ * @offload_flags: hardware offload capabilities/flags for this interface.
+ * These are initialized by mac80211 before calling .add_interface,
+ * .change_interface or .update_vif_offload and updated by the driver
+ * within these ops, based on supported features or runtime change
+ * restrictions.
* @hw_queue: hardware queue for each AC
* @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
- * @chanctx_conf: The channel context this interface is assigned to, or %NULL
- * when it is not assigned. This pointer is RCU-protected due to the TX
- * path needing to access it; even though the netdev carrier will always
- * be off when it is %NULL there can still be races and packets could be
- * processed after it switches back to %NULL.
* @debugfs_dir: debugfs dentry, can be used by drivers to create own per
* interface debug files. Note that it will be NULL for the virtual
* monitor interface (if that is requested.)
* @probe_req_reg: probe requests should be reported to mac80211 for this
* interface.
+ * @rx_mcast_action_reg: multicast Action frames should be reported to mac80211
+ * for this interface.
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void \*).
* @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
* @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
* protected by fq->lock.
+ * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
+ * &enum ieee80211_offload_flags.
+ * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
+ struct ieee80211_vif_cfg cfg;
struct ieee80211_bss_conf bss_conf;
+ struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
+ u16 valid_links, active_links;
u8 addr[ETH_ALEN] __aligned(2);
bool p2p;
- bool csa_active;
- bool mu_mimo_owner;
u8 cab_queue;
u8 hw_queue[IEEE80211_NUM_ACS];
struct ieee80211_txq *txq;
- struct ieee80211_chanctx_conf __rcu *chanctx_conf;
-
u32 driver_flags;
+ u32 offload_flags;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *debugfs_dir;
#endif
- unsigned int probe_req_reg;
+ bool probe_req_reg;
+ bool rx_mcast_action_reg;
bool txqs_stopped[IEEE80211_NUM_ACS];
+ struct ieee80211_vif *mbssid_tx_vif;
+
/* must be last */
- u8 drv_priv[0] __aligned(sizeof(void *));
+ u8 drv_priv[] __aligned(sizeof(void *));
};
+#define for_each_vif_active_link(vif, link, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \
+ if ((!(vif)->active_links || \
+ (vif)->active_links & BIT(link_id)) && \
+ (link = rcu_dereference((vif)->link_conf[link_id])))
+
static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
{
#ifdef CONFIG_MAC80211_MESH
@@ -1679,14 +1898,24 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* This can be used by mac80211 drivers with direct cfg80211 APIs
* (like the vendor commands) that needs to get the wdev for a vif.
- *
- * Note that this function may return %NULL if the given wdev isn't
- * associated with a vif that the driver knows about (e.g. monitor
- * or AP_VLAN interfaces.)
+ * This can also be useful to get the netdev associated to a vif.
*/
struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
/**
+ * lockdep_vif_mutex_held - for lockdep checks on link poiners
+ * @vif: the interface to check
+ */
+static inline bool lockdep_vif_mutex_held(struct ieee80211_vif *vif)
+{
+ return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->mtx);
+}
+
+#define link_conf_dereference_protected(vif, link_id) \
+ rcu_dereference_protected((vif)->link_conf[link_id], \
+ lockdep_vif_mutex_held(vif))
+
+/**
* enum ieee80211_key_flags - key flags
*
* These flags are used for communication about keys between the driver
@@ -1767,6 +1996,7 @@ enum ieee80211_key_flags {
* - Temporal Authenticator Rx MIC Key (64 bits)
* @icv_len: The ICV length for this key type
* @iv_len: The IV length for this key type
+ * @link_id: the link ID for MLO, or -1 for non-MLO or pairwise keys
*/
struct ieee80211_key_conf {
atomic64_t tx_pn;
@@ -1776,8 +2006,9 @@ struct ieee80211_key_conf {
u8 hw_key_idx;
s8 keyidx;
u16 flags;
+ s8 link_id;
u8 keylen;
- u8 key[0];
+ u8 key[];
};
#define IEEE80211_MAX_PN_LEN 16
@@ -1825,36 +2056,6 @@ struct ieee80211_key_seq {
};
/**
- * struct ieee80211_cipher_scheme - cipher scheme
- *
- * This structure contains a cipher scheme information defining
- * the secure packet crypto handling.
- *
- * @cipher: a cipher suite selector
- * @iftype: a cipher iftype bit mask indicating an allowed cipher usage
- * @hdr_len: a length of a security header used the cipher
- * @pn_len: a length of a packet number in the security header
- * @pn_off: an offset of pn from the beginning of the security header
- * @key_idx_off: an offset of key index byte in the security header
- * @key_idx_mask: a bit mask of key_idx bits
- * @key_idx_shift: a bit shift needed to get key_idx
- * key_idx value calculation:
- * (sec_header_base[key_idx_off] & key_idx_mask) >> key_idx_shift
- * @mic_len: a mic length in bytes
- */
-struct ieee80211_cipher_scheme {
- u32 cipher;
- u16 iftype;
- u8 hdr_len;
- u8 pn_len;
- u8 pn_off;
- u8 key_idx_off;
- u8 key_idx_mask;
- u8 key_idx_shift;
- u8 mic_len;
-};
-
-/**
* enum set_key_cmd - key command
*
* Used with the set_key() callback in &struct ieee80211_ops, this
@@ -1893,6 +2094,7 @@ enum ieee80211_sta_state {
* @IEEE80211_STA_RX_BW_80: station can receive up to 80 MHz
* @IEEE80211_STA_RX_BW_160: station can receive up to 160 MHz
* (including 80+80 MHz)
+ * @IEEE80211_STA_RX_BW_320: station can receive up to 320 MHz
*
* Implementation note: 20 must be zero to be initialized
* correctly, the values must be sorted.
@@ -1902,6 +2104,7 @@ enum ieee80211_sta_rx_bandwidth {
IEEE80211_STA_RX_BW_40,
IEEE80211_STA_RX_BW_80,
IEEE80211_STA_RX_BW_160,
+ IEEE80211_STA_RX_BW_320,
};
/**
@@ -1941,6 +2144,77 @@ struct ieee80211_sta_txpwr {
};
/**
+ * struct ieee80211_sta_aggregates - info that is aggregated from active links
+ *
+ * Used for any per-link data that needs to be aggregated and updated in the
+ * main &struct ieee80211_sta when updated or the active links change.
+ *
+ * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes.
+ * This field is always valid for packets with a VHT preamble.
+ * For packets with a HT preamble, additional limits apply:
+ *
+ * * If the skb is transmitted as part of a BA agreement, the
+ * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
+ * * If the skb is not part of a BA agreement, the A-MSDU maximal
+ * size is min(max_amsdu_len, 7935) bytes.
+ *
+ * Both additional HT limits must be enforced by the low level
+ * driver. This is defined by the spec (IEEE 802.11-2012 section
+ * 8.3.2.2 NOTE 2).
+ * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
+ * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
+ */
+struct ieee80211_sta_aggregates {
+ u16 max_amsdu_len;
+
+ u16 max_rc_amsdu_len;
+ u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
+};
+
+/**
+ * struct ieee80211_link_sta - station Link specific info
+ * All link specific info for a STA link for a non MLD STA(single)
+ * or a MLD STA(multiple entries) are stored here.
+ *
+ * @addr: MAC address of the Link STA. For non-MLO STA this is same as the addr
+ * in ieee80211_sta. For MLO Link STA this addr can be same or different
+ * from addr in ieee80211_sta (representing MLD STA addr)
+ * @link_id: the link ID for this link STA (0 for deflink)
+ * @smps_mode: current SMPS mode (off, static or dynamic)
+ * @supp_rates: Bitmap of supported rates
+ * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
+ * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @he_cap: HE capabilities of this STA
+ * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
+ * @eht_cap: EHT capabilities of this STA
+ * @bandwidth: current bandwidth the station can receive with
+ * @rx_nss: in HT/VHT, the maximum number of spatial streams the
+ * station can receive at the moment, changed by operating mode
+ * notifications and capabilities. The value is only valid after
+ * the station moves to associated state.
+ * @txpwr: the station tx power configuration
+ *
+ */
+struct ieee80211_link_sta {
+ u8 addr[ETH_ALEN];
+ u8 link_id;
+ enum ieee80211_smps_mode smps_mode;
+
+ u32 supp_rates[NUM_NL80211_BANDS];
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_he_cap he_cap;
+ struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct ieee80211_sta_eht_cap eht_cap;
+
+ struct ieee80211_sta_aggregates agg;
+
+ u8 rx_nss;
+ enum ieee80211_sta_rx_bandwidth bandwidth;
+ struct ieee80211_sta_txpwr txpwr;
+};
+
+/**
* struct ieee80211_sta - station table entry
*
* A station table entry represents a station we are possibly
@@ -1949,13 +2223,11 @@ struct ieee80211_sta_txpwr {
* either be protected by rcu_read_lock() explicitly or implicitly,
* or you must take good care to not use such a pointer after a
* call to your sta_remove callback that removed it.
+ * This also represents the MLD STA in case of MLO association
+ * and holds pointers to various link STA's
*
* @addr: MAC address
* @aid: AID we assigned to the station if we're an AP
- * @supp_rates: Bitmap of supported rates (per band)
- * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
- * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
- * @he_cap: HE capabilities of this STA
* @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
* that this station is allowed to transmit to us.
* Can be modified by driver.
@@ -1967,73 +2239,81 @@ struct ieee80211_sta_txpwr {
* if wme is supported. The bits order is like in
* IEEE80211_WMM_IE_STA_QOSINFO_AC_*.
* @max_sp: max Service Period. Only valid if wme is supported.
- * @bandwidth: current bandwidth the station can receive with
- * @rx_nss: in HT/VHT, the maximum number of spatial streams the
- * station can receive at the moment, changed by operating mode
- * notifications and capabilities. The value is only valid after
- * the station moves to associated state.
- * @smps_mode: current SMPS mode (off, static or dynamic)
* @rates: rate control selection table
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
* @mfp: indicates whether the STA uses management frame protection or not.
+ * @mlo: indicates whether the STA is MLO station.
* @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
* A-MSDU. Taken from the Extended Capabilities element. 0 means
* unlimited.
+ * @cur: currently valid data as aggregated from the active links
+ * For non MLO STA it will point to the deflink data. For MLO STA
+ * ieee80211_sta_recalc_aggregates() must be called to update it.
* @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
- * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
- * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that
* the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames
+ * @deflink: This holds the default link STA information, for non MLO STA all link
+ * specific STA information is accessed through @deflink or through
+ * link[0] which points to address of @deflink. For MLO Link STA
+ * the first added link STA will point to deflink.
+ * @link: reference to Link Sta entries. For Non MLO STA, except 1st link,
+ * i.e link[0] all links would be assigned to NULL by default and
+ * would access link information via @deflink or link[0]. For MLO
+ * STA, first link STA being added will point its link pointer to
+ * @deflink address and remaining would be allocated and the address
+ * would be assigned to link[link_id] where link_id is the id assigned
+ * by the AP.
+ * @valid_links: bitmap of valid links, or 0 for non-MLO
*/
struct ieee80211_sta {
- u32 supp_rates[NUM_NL80211_BANDS];
u8 addr[ETH_ALEN];
u16 aid;
- struct ieee80211_sta_ht_cap ht_cap;
- struct ieee80211_sta_vht_cap vht_cap;
- struct ieee80211_sta_he_cap he_cap;
u16 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
- u8 rx_nss;
- enum ieee80211_sta_rx_bandwidth bandwidth;
- enum ieee80211_smps_mode smps_mode;
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
bool mfp;
+ bool mlo;
u8 max_amsdu_subframes;
- /**
- * @max_amsdu_len:
- * indicates the maximal length of an A-MSDU in bytes.
- * This field is always valid for packets with a VHT preamble.
- * For packets with a HT preamble, additional limits apply:
- *
- * * If the skb is transmitted as part of a BA agreement, the
- * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
- * * If the skb is not part of a BA agreement, the A-MSDU maximal
- * size is min(max_amsdu_len, 7935) bytes.
- *
- * Both additional HT limits must be enforced by the low level
- * driver. This is defined by the spec (IEEE 802.11-2012 section
- * 8.3.2.2 NOTE 2).
- */
- u16 max_amsdu_len;
+ struct ieee80211_sta_aggregates *cur;
+
bool support_p2p_ps;
- u16 max_rc_amsdu_len;
- u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
- struct ieee80211_sta_txpwr txpwr;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1];
+ u16 valid_links;
+ struct ieee80211_link_sta deflink;
+ struct ieee80211_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
/* must be last */
- u8 drv_priv[0] __aligned(sizeof(void *));
+ u8 drv_priv[] __aligned(sizeof(void *));
};
+#ifdef CONFIG_LOCKDEP
+bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta);
+#else
+static inline bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta)
+{
+ return true;
+}
+#endif
+
+#define link_sta_dereference_protected(sta, link_id) \
+ rcu_dereference_protected((sta)->link[link_id], \
+ lockdep_sta_mutex_held(sta))
+
+#define for_each_sta_active_link(vif, sta, link_sta, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) \
+ if ((!(vif)->active_links || \
+ (vif)->active_links & BIT(link_id)) && \
+ ((link_sta) = link_sta_dereference_protected(sta, link_id)))
+
/**
* enum sta_notify_cmd - sta notify command
*
@@ -2077,7 +2357,7 @@ struct ieee80211_txq {
u8 ac;
/* must be last */
- u8 drv_priv[0] __aligned(sizeof(void *));
+ u8 drv_priv[] __aligned(sizeof(void *));
};
/**
@@ -2301,6 +2581,24 @@ struct ieee80211_txq {
* aggregating MPDUs with the same keyid, allowing mac80211 to keep Tx
* A-MPDU sessions active while rekeying with Extended Key ID.
*
+ * @IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD: Hardware supports tx encapsulation
+ * offload
+ *
+ * @IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD: Hardware supports rx decapsulation
+ * offload
+ *
+ * @IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP: Hardware supports concurrent rx
+ * decapsulation offload and passing raw 802.11 frames for monitor iface.
+ * If this is supported, the driver must pass both 802.3 frames for real
+ * usage and 802.11 frames with %RX_FLAG_ONLY_MONITOR set for monitor to
+ * the stack.
+ *
+ * @IEEE80211_HW_DETECTS_COLOR_COLLISION: HW/driver has support for BSS color
+ * collision detection and doesn't need it in software.
+ *
+ * @IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX: Hardware/driver handles transmitting
+ * multicast frames on all links, mac80211 should not do that.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2353,6 +2651,11 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_MULTI_BSSID,
IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID,
IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT,
+ IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD,
+ IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD,
+ IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP,
+ IEEE80211_HW_DETECTS_COLOR_COLLISION,
+ IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2468,9 +2771,6 @@ enum ieee80211_hw_flags {
* deliver to a WMM STA during any Service Period triggered by the WMM STA.
* Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct values.
*
- * @n_cipher_schemes: a size of an array of cipher schemes definitions.
- * @cipher_schemes: a pointer to an array of cipher scheme definitions
- * supported by HW.
* @max_nan_de_entries: maximum number of NAN DE functions supported by the
* device.
*
@@ -2482,6 +2782,12 @@ enum ieee80211_hw_flags {
* refilling deficit of each TXQ.
*
* @max_mtu: the max mtu could be set.
+ *
+ * @tx_power_levels: a list of power levels supported by the wifi hardware.
+ * The power levels can be specified either as integer or fractions.
+ * The power level at idx 0 shall be the maximum positive power level.
+ *
+ * @max_txpwr_levels_idx: the maximum valid idx of 'tx_power_levels' list.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2514,12 +2820,12 @@ struct ieee80211_hw {
netdev_features_t netdev_features;
u8 uapsd_queues;
u8 uapsd_max_sp_len;
- u8 n_cipher_schemes;
- const struct ieee80211_cipher_scheme *cipher_schemes;
u8 max_nan_de_entries;
u8 tx_sk_pacing_shift;
u8 weight_multiplier;
u32 max_mtu;
+ const s8 *tx_power_levels;
+ u8 max_txpwr_levels_idx;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
@@ -2703,15 +3009,15 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* for devices that support offload of data packets (e.g. ARP responses).
*
* Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
- * when they are able to replace in-use PTK keys according to to following
+ * when they are able to replace in-use PTK keys according to the following
* requirements:
- * 1) They do not hand over frames decrypted with the old key to
- mac80211 once the call to set_key() with command %DISABLE_KEY has been
- completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,
+ * 1) They do not hand over frames decrypted with the old key to mac80211
+ once the call to set_key() with command %DISABLE_KEY has been completed,
2) either drop or continue to use the old key for any outgoing frames queued
at the time of the key deletion (including re-transmits),
3) never send out a frame queued prior to the set_key() %SET_KEY command
- encrypted with the new key and
+ encrypted with the new key when also needing
+ @IEEE80211_KEY_FLAG_GENERATE_IV and
4) never send out a frame unencrypted when it should be encrypted.
Mac80211 will not queue any new frames for a deleted key to the driver.
*/
@@ -3087,6 +3393,8 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* @FIF_PSPOLL: pass PS Poll frames
*
* @FIF_PROBE_REQ: pass probe request frames
+ *
+ * @FIF_MCAST_ACTION: pass multicast Action frames
*/
enum ieee80211_filter_flags {
FIF_ALLMULTI = 1<<1,
@@ -3097,6 +3405,7 @@ enum ieee80211_filter_flags {
FIF_OTHER_BSS = 1<<6,
FIF_PSPOLL = 1<<7,
FIF_PROBE_REQ = 1<<8,
+ FIF_MCAST_ACTION = 1<<9,
};
/**
@@ -3113,7 +3422,10 @@ enum ieee80211_filter_flags {
* @IEEE80211_AMPDU_RX_START: start RX aggregation
* @IEEE80211_AMPDU_RX_STOP: stop RX aggregation
* @IEEE80211_AMPDU_TX_START: start TX aggregation, the driver must either
- * call ieee80211_start_tx_ba_cb_irqsafe() or return the special
+ * call ieee80211_start_tx_ba_cb_irqsafe() or
+ * call ieee80211_start_tx_ba_cb_irqsafe() with status
+ * %IEEE80211_AMPDU_TX_START_DELAY_ADDBA to delay addba after
+ * ieee80211_start_tx_ba_cb_irqsafe is called, or just return the special
* status %IEEE80211_AMPDU_TX_START_IMMEDIATE.
* @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational
* @IEEE80211_AMPDU_TX_STOP_CONT: stop TX aggregation but continue transmitting
@@ -3139,6 +3451,7 @@ enum ieee80211_ampdu_mlme_action {
};
#define IEEE80211_AMPDU_TX_START_IMMEDIATE 1
+#define IEEE80211_AMPDU_TX_START_DELAY_ADDBA 2
/**
* struct ieee80211_ampdu_params - AMPDU action parameters
@@ -3218,7 +3531,7 @@ enum ieee80211_roc_type {
};
/**
- * enum ieee80211_reconfig_complete_type - reconfig type
+ * enum ieee80211_reconfig_type - reconfig type
*
* This enum is used by the reconfig_complete() callback to indicate what
* reconfiguration type was completed.
@@ -3234,6 +3547,21 @@ enum ieee80211_reconfig_type {
};
/**
+ * struct ieee80211_prep_tx_info - prepare TX information
+ * @duration: if non-zero, hint about the required duration,
+ * only used with the mgd_prepare_tx() method.
+ * @subtype: frame subtype (auth, (re)assoc, deauth, disassoc)
+ * @success: whether the frame exchange was successful, only
+ * used with the mgd_complete_tx() method, and then only
+ * valid for auth and (re)assoc.
+ */
+struct ieee80211_prep_tx_info {
+ u16 duration;
+ u16 subtype;
+ u8 success:1;
+};
+
+/**
* struct ieee80211_ops - callbacks from mac80211 to the driver
*
* This structure contains various callbacks that the driver may
@@ -3323,6 +3651,22 @@ enum ieee80211_reconfig_type {
* for association indication. The @changed parameter indicates which
* of the bss parameters has changed when a call is made. The callback
* can sleep.
+ * Note: this callback is called if @vif_cfg_changed or @link_info_changed
+ * are not implemented.
+ *
+ * @vif_cfg_changed: Handler for configuration requests related to interface
+ * (MLD) parameters from &struct ieee80211_vif_cfg that vary during the
+ * lifetime of the interface (e.g. assoc status, IP addresses, etc.)
+ * The @changed parameter indicates which value changed.
+ * The callback can sleep.
+ *
+ * @link_info_changed: Handler for configuration requests related to link
+ * parameters from &struct ieee80211_bss_conf that are related to an
+ * individual link. e.g. legacy/HT/VHT/... rate information.
+ * The @changed parameter indicates which value changed, and the @link_id
+ * parameter indicates the link ID. Note that the @link_id will be 0 for
+ * non-MLO connections.
+ * The callback can sleep.
*
* @prepare_multicast: Prepare for multicast filter configuration.
* This callback is optional, and its return value is passed
@@ -3448,6 +3792,10 @@ enum ieee80211_reconfig_type {
* in AP mode, this callback will not be called when the flag
* %IEEE80211_HW_AP_LINK_PS is set. Must be atomic.
*
+ * @sta_set_txpwr: Configure the station tx power. This callback set the tx
+ * power for the station.
+ * This callback can sleep.
+ *
* @sta_state: Notifies low level driver about state transition of a
* station (which can be the AP, a client, IBSS/WDS/mesh peer etc.)
* This callback is mutually exclusive with @sta_add/@sta_remove.
@@ -3641,9 +3989,13 @@ enum ieee80211_reconfig_type {
* frame in case that no beacon was heard from the AP/P2P GO.
* The callback will be called before each transmission and upon return
* mac80211 will transmit the frame right away.
- * If duration is greater than zero, mac80211 hints to the driver the
- * duration for which the operation is requested.
+ * Additional information is passed in the &struct ieee80211_prep_tx_info
+ * data. If duration there is greater than zero, mac80211 hints to the
+ * driver the duration for which the operation is requested.
* The callback is optional and can (should!) sleep.
+ * @mgd_complete_tx: Notify the driver that the response frame for a previously
+ * transmitted frame announced with @mgd_prepare_tx was received, the data
+ * is filled similarly to @mgd_prepare_tx though the duration is not used.
*
* @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
* a TDLS discovery-request, we expect a reply to arrive on the AP's
@@ -3698,7 +4050,7 @@ enum ieee80211_reconfig_type {
* decremented, and when they reach 1 the driver must call
* ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
* get the csa counter decremented by mac80211, but must check if it is
- * 1 using ieee80211_csa_is_complete() after the beacon has been
+ * 1 using ieee80211_beacon_counter_is_complete() after the beacon has been
* transmitted and then call ieee80211_csa_finish().
* If the CSA count starts as zero or 1, this function will not be called,
* since there won't be any time to beacon before the switch anyway.
@@ -3773,6 +4125,45 @@ enum ieee80211_reconfig_type {
*
* @start_pmsr: start peer measurement (e.g. FTM) (this call can sleep)
* @abort_pmsr: abort peer measurement (this call can sleep)
+ * @set_tid_config: Apply TID specific configurations. This callback may sleep.
+ * @reset_tid_config: Reset TID specific configuration for the peer.
+ * This callback may sleep.
+ * @update_vif_offload: Update virtual interface offload flags
+ * This callback may sleep.
+ * @sta_set_4addr: Called to notify the driver when a station starts/stops using
+ * 4-address mode
+ * @set_sar_specs: Update the SAR (TX power) settings.
+ * @sta_set_decap_offload: Called to notify the driver when a station is allowed
+ * to use rx decapsulation offload
+ * @add_twt_setup: Update hw with TWT agreement parameters received from the peer.
+ * This callback allows the hw to check if requested parameters
+ * are supported and if there is enough room for a new agreement.
+ * The hw is expected to set agreement result in the req_type field of
+ * twt structure.
+ * @twt_teardown_request: Update the hw with TWT teardown request received
+ * from the peer.
+ * @set_radar_background: Configure dedicated offchannel chain available for
+ * radar/CAC detection on some hw. This chain can't be used to transmit
+ * or receive frames and it is bounded to a running wdev.
+ * Background radar/CAC detection allows to avoid the CAC downtime
+ * switching to a different channel during CAC detection on the selected
+ * radar channel.
+ * The caller is expected to set chandef pointer to NULL in order to
+ * disable background CAC/radar detection.
+ * @net_fill_forward_path: Called from .ndo_fill_forward_path in order to
+ * resolve a path for hardware flow offloading
+ * @change_vif_links: Change the valid links on an interface, note that while
+ * removing the old link information is still valid (link_conf pointer),
+ * but may immediately disappear after the function returns. The old or
+ * new links bitmaps may be 0 if going from/to a non-MLO situation.
+ * The @old array contains pointers to the old bss_conf structures
+ * that were already removed, in case they're needed.
+ * This callback can sleep.
+ * @change_sta_links: Change the valid links of a station, similar to
+ * @change_vif_links. This callback can sleep.
+ * Note that a sta can also be inserted or removed with valid links,
+ * i.e. passed to @sta_add/@sta_state with sta->valid_links not zero.
+ * In fact, cannot change from having valid_links and not having them.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3796,10 +4187,19 @@ struct ieee80211_ops {
void (*bss_info_changed)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed);
+ u64 changed);
+ void (*vif_cfg_changed)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changed);
+ void (*link_info_changed)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed);
- int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
- void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+ void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
u64 (*prepare_multicast)(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
@@ -3882,7 +4282,8 @@ struct ieee80211_ops {
struct ieee80211_sta *sta,
struct station_info *sinfo);
int (*conf_tx)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params);
u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -3984,7 +4385,10 @@ struct ieee80211_ops {
void (*mgd_prepare_tx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u16 duration);
+ struct ieee80211_prep_tx_info *info);
+ void (*mgd_complete_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info);
void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
@@ -3998,9 +4402,11 @@ struct ieee80211_ops {
u32 changed);
int (*assign_vif_chanctx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
void (*unassign_vif_chanctx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
int (*switch_vif_chanctx)(struct ieee80211_hw *hw,
struct ieee80211_vif_chanctx_switch *vifs,
@@ -4077,6 +4483,42 @@ struct ieee80211_ops {
struct cfg80211_pmsr_request *request);
void (*abort_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *request);
+ int (*set_tid_config)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_conf);
+ int (*reset_tid_config)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 tids);
+ void (*update_vif_offload)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*sta_set_4addr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled);
+ int (*set_sar_specs)(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar);
+ void (*sta_set_decap_offload)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled);
+ void (*add_twt_setup)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt);
+ void (*twt_teardown_request)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 flowid);
+ int (*set_radar_background)(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef);
+ int (*net_fill_forward_path)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct net_device_path_ctx *ctx,
+ struct net_device_path *path);
+ int (*change_vif_links)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]);
+ int (*change_sta_links)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links);
};
/**
@@ -4313,6 +4755,31 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
void ieee80211_restart_hw(struct ieee80211_hw *hw);
/**
+ * ieee80211_rx_list - receive frame and store processed skbs in a list
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled and RCU read lock
+ *
+ * @hw: the hardware this frame came in on
+ * @sta: the station the frame was received from, or %NULL
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @list: the destination list
+ */
+void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct list_head *list);
+
+/**
* ieee80211_rx_napi - receive frame from NAPI context
*
* Use this function to hand received frames to mac80211. The receive
@@ -4660,6 +5127,26 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
struct sk_buff *skb);
/**
+ * ieee80211_tx_status_8023 - transmit status callback for 802.3 frame format
+ *
+ * Call this function for all transmitted data frames after their transmit
+ * completion. This callback should only be called for data frames which
+ * are using driver's (or hardware's) offload capability of encap/decap
+ * 802.11 frames.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other and all
+ * calls in the same tx status family.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @vif: the interface for which the frame was transmitted
+ * @skb: the frame that was transmitted, owned by mac80211 after this call
+ */
+void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+
+/**
* ieee80211_report_low_ack - report non-responding station
*
* When operating in AP-mode, call this function to report a non-responding
@@ -4670,21 +5157,23 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
*/
void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
-#define IEEE80211_MAX_CSA_COUNTERS_NUM 2
+#define IEEE80211_MAX_CNTDWN_COUNTERS_NUM 2
/**
* struct ieee80211_mutable_offsets - mutable beacon offsets
* @tim_offset: position of TIM element
* @tim_length: size of TIM element
- * @csa_counter_offs: array of IEEE80211_MAX_CSA_COUNTERS_NUM offsets
- * to CSA counters. This array can contain zero values which
+ * @cntdwn_counter_offs: array of IEEE80211_MAX_CNTDWN_COUNTERS_NUM offsets
+ * to countdown counters. This array can contain zero values which
* should be ignored.
+ * @mbssid_off: position of the multiple bssid element
*/
struct ieee80211_mutable_offsets {
u16 tim_offset;
u16 tim_length;
- u16 csa_counter_offs[IEEE80211_MAX_CSA_COUNTERS_NUM];
+ u16 cntdwn_counter_offs[IEEE80211_MAX_CNTDWN_COUNTERS_NUM];
+ u16 mbssid_off;
};
/**
@@ -4693,6 +5182,7 @@ struct ieee80211_mutable_offsets {
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @offs: &struct ieee80211_mutable_offsets pointer to struct that will
* receive the offsets that may be updated by the driver.
+ * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
*
* If the driver implements beaconing modes, it must use this function to
* obtain the beacon template.
@@ -4709,7 +5199,8 @@ struct ieee80211_mutable_offsets {
struct sk_buff *
ieee80211_beacon_get_template(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_mutable_offsets *offs);
+ struct ieee80211_mutable_offsets *offs,
+ unsigned int link_id);
/**
* ieee80211_beacon_get_tim - beacon generation function
@@ -4720,6 +5211,7 @@ ieee80211_beacon_get_template(struct ieee80211_hw *hw,
* @tim_length: pointer to variable that will receive the TIM IE length,
* (including the ID and length bytes!).
* Set to 0 if invalid (in non-AP modes).
+ * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
*
* If the driver implements beaconing modes, it must use this function to
* obtain the beacon frame.
@@ -4735,49 +5227,52 @@ ieee80211_beacon_get_template(struct ieee80211_hw *hw,
*/
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u16 *tim_offset, u16 *tim_length);
+ u16 *tim_offset, u16 *tim_length,
+ unsigned int link_id);
/**
* ieee80211_beacon_get - beacon generation function
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
*
* See ieee80211_beacon_get_tim().
*
* Return: See ieee80211_beacon_get_tim().
*/
static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ unsigned int link_id)
{
- return ieee80211_beacon_get_tim(hw, vif, NULL, NULL);
+ return ieee80211_beacon_get_tim(hw, vif, NULL, NULL, link_id);
}
/**
- * ieee80211_csa_update_counter - request mac80211 to decrement the csa counter
+ * ieee80211_beacon_update_cntdwn - request mac80211 to decrement the beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * The csa counter should be updated after each beacon transmission.
+ * The beacon counter should be updated after each beacon transmission.
* This function is called implicitly when
* ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the
* beacon frames are generated by the device, the driver should call this
- * function after each beacon transmission to sync mac80211's csa counters.
+ * function after each beacon transmission to sync mac80211's beacon countdown.
*
- * Return: new csa counter value
+ * Return: new countdown value
*/
-u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif);
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif);
/**
- * ieee80211_csa_set_counter - request mac80211 to set csa counter
+ * ieee80211_beacon_set_cntdwn - request mac80211 to set beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @counter: the new value for the counter
*
- * The csa counter can be changed by the device, this API should be
+ * The beacon countdown can be changed by the device, this API should be
* used by the device driver to update csa counter in mac80211.
*
- * It should never be used together with ieee80211_csa_update_counter(),
+ * It should never be used together with ieee80211_beacon_update_cntdwn(),
* as it will cause a race condition around the counter value.
*/
-void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter);
+void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter);
/**
* ieee80211_csa_finish - notify mac80211 about channel switch
@@ -4790,13 +5285,22 @@ void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter);
void ieee80211_csa_finish(struct ieee80211_vif *vif);
/**
- * ieee80211_csa_is_complete - find out if counters reached 1
+ * ieee80211_beacon_cntdwn_is_complete - find out if countdown reached 1
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * This function returns whether the channel switch counters reached zero.
+ * This function returns whether the countdown reached zero.
*/
-bool ieee80211_csa_is_complete(struct ieee80211_vif *vif);
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif);
+/**
+ * ieee80211_color_change_finish - notify mac80211 about color change
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * After a color change announcement was scheduled and the counter in this
+ * announcement hits 1, this function must be called by the driver to
+ * notify mac80211 that the color can be changed
+ */
+void ieee80211_color_change_finish(struct ieee80211_vif *vif);
/**
* ieee80211_proberesp_get - retrieve a Probe Response template
@@ -4834,6 +5338,9 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
* ieee80211_nullfunc_get - retrieve a nullfunc template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: If the vif is an MLD, get a frame with the link addresses
+ * for the given link ID. For a link_id < 0 you get a frame with
+ * MLD addresses, however useful that might be.
* @qos_ok: QoS NDP is acceptable to the caller, this should be set
* if at all possible
*
@@ -4851,7 +5358,7 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
*/
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- bool qos_ok);
+ int link_id, bool qos_ok);
/**
* ieee80211_probereq_get - retrieve a Probe Request template
@@ -5160,6 +5667,26 @@ void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp);
/**
+ * ieee80211_key_mic_failure - increment MIC failure counter for the key
+ *
+ * Note: this is really only safe if no other RX function is called
+ * at the same time.
+ *
+ * @keyconf: the key in question
+ */
+void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_key_replay - increment replay counter for the key
+ *
+ * Note: this is really only safe if no other RX function is called
+ * at the same time.
+ *
+ * @keyconf: the key in question
+ */
+void ieee80211_key_replay(struct ieee80211_key_conf *keyconf);
+
+/**
* ieee80211_wake_queue - wake specific queue
* @hw: pointer as obtained from ieee80211_alloc_hw().
* @queue: queue number (counted from zero).
@@ -5251,11 +5778,15 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
* @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
* interfaces, even if they haven't been re-added to the driver yet.
* @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up).
+ * @IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER: Skip any interfaces where SDATA
+ * is not in the driver. This may fix crashes during firmware recovery
+ * for instance.
*/
enum ieee80211_interface_iteration_flags {
IEEE80211_IFACE_ITER_NORMAL = 0,
IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
IEEE80211_IFACE_ITER_ACTIVE = BIT(1),
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER = BIT(2),
};
/**
@@ -5324,23 +5855,44 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
void *data);
/**
- * ieee80211_iterate_active_interfaces_rtnl - iterate active interfaces
+ * ieee80211_iterate_active_interfaces_mtx - iterate active interfaces
*
* This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them.
- * This version can only be used while holding the RTNL.
+ * This version can only be used while holding the wiphy mutex.
+ * The driver must not call this with a lock held that it can also take in
+ * response to callbacks from mac80211, and it must not call this within
+ * callbacks made by mac80211 - both would result in deadlocks.
*
* @hw: the hardware struct of which the interfaces should be iterated over
* @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
* @iterator: the iterator function to call, cannot sleep
* @data: first argument of the iterator function
*/
-void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw,
- u32 iter_flags,
- void (*iterator)(void *data,
+void ieee80211_iterate_active_interfaces_mtx(struct ieee80211_hw *hw,
+ u32 iter_flags,
+ void (*iterator)(void *data,
u8 *mac,
struct ieee80211_vif *vif),
- void *data);
+ void *data);
+
+/**
+ * ieee80211_iterate_stations - iterate stations
+ *
+ * This function iterates over all stations associated with a given
+ * hardware that are currently uploaded to the driver and calls the callback
+ * function for them.
+ * This function allows the iterator function to sleep, when the iterator
+ * function is atomic @ieee80211_iterate_stations_atomic can be used.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_stations(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data);
/**
* ieee80211_iterate_stations_atomic - iterate stations
@@ -5478,6 +6030,22 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
const u8 *localaddr);
/**
+ * ieee80211_find_sta_by_link_addrs - find STA by link addresses
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @addr: remote station's link address
+ * @localaddr: local link address, use %NULL for any (but avoid that)
+ * @link_id: pointer to obtain the link ID if the STA is found,
+ * may be %NULL if the link ID is not needed
+ *
+ * Obtain the STA by link address, must use RCU protection.
+ */
+struct ieee80211_sta *
+ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw,
+ const u8 *addr,
+ const u8 *localaddr,
+ unsigned int *link_id);
+
+/**
* ieee80211_sta_block_awake - block station from waking up
* @hw: the hardware
* @pubsta: the station
@@ -5553,9 +6121,22 @@ void ieee80211_sta_eosp(struct ieee80211_sta *pubsta);
void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid);
/**
+ * ieee80211_sta_recalc_aggregates - recalculate aggregate data after a change
+ * @pubsta: the station
+ *
+ * Call this function after changing a per-link aggregate data as referenced in
+ * &struct ieee80211_sta_aggregates by accessing the agg field of
+ * &struct ieee80211_link_sta.
+ *
+ * With non MLO the data in deflink will be referenced directly. In that case
+ * there is no need to call this function.
+ */
+void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta);
+
+/**
* ieee80211_sta_register_airtime - register airtime usage for a sta/tid
*
- * Register airtime usage for a given sta on a given tid. The driver can call
+ * Register airtime usage for a given sta on a given tid. The driver must call
* this function to notify mac80211 that a station used a certain amount of
* airtime. This information will be used by the TXQ scheduler to schedule
* stations in a way that ensures airtime fairness.
@@ -5714,6 +6295,17 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
void ieee80211_connection_loss(struct ieee80211_vif *vif);
/**
+ * ieee80211_disconnect - request disconnection
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @reconnect: immediate reconnect is desired
+ *
+ * Request disconnection from the current network and, if enabled, send a
+ * hint to the higher layers that immediate reconnect is desired.
+ */
+void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect);
+
+/**
* ieee80211_resume_disconnect - disconnect from AP after resume
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -5737,6 +6329,16 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif);
void ieee80211_resume_disconnect(struct ieee80211_vif *vif);
/**
+ * ieee80211_hw_restart_disconnect - disconnect from AP after
+ * hardware restart
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Instructs mac80211 to disconnect from the AP after
+ * hardware restart.
+ */
+void ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif);
+
+/**
* ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
* rssi threshold triggered
*
@@ -5780,15 +6382,28 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw);
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success);
/**
+ * ieee80211_channel_switch_disconnect - disconnect due to channel switch error
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @block_tx: if %true, do not send deauth frame.
+ *
+ * Instruct mac80211 to disconnect due to a channel switch error. The channel
+ * switch can request to block the tx and so, we need to make sure we do not send
+ * a deauth frame in this case.
+ */
+void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif,
+ bool block_tx);
+
+/**
* ieee80211_request_smps - request SM PS transition
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: link ID for MLO, or 0
* @smps_mode: new SM PS mode
*
* This allows the driver to request an SM PS transition in managed
* mode. This is useful when the driver has more information than
* the stack about possible interference, for example by bluetooth.
*/
-void ieee80211_request_smps(struct ieee80211_vif *vif,
+void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id,
enum ieee80211_smps_mode smps_mode);
/**
@@ -5964,12 +6579,19 @@ enum rate_control_capabilities {
* otherwise the NSS difference doesn't bother us.
*/
RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0),
+ /**
+ * @RATE_CTRL_CAPA_AMPDU_TRIGGER:
+ * mac80211 should start A-MPDU sessions on tx
+ */
+ RATE_CTRL_CAPA_AMPDU_TRIGGER = BIT(1),
};
struct rate_control_ops {
unsigned long capa;
const char *name;
- void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
+ void *(*alloc)(struct ieee80211_hw *hw);
+ void (*add_debugfs)(struct ieee80211_hw *hw, void *priv,
+ struct dentry *debugfsdir);
void (*free)(void *priv);
void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
@@ -6002,7 +6624,7 @@ static inline int rate_supported(struct ieee80211_sta *sta,
enum nl80211_band band,
int index)
{
- return (sta == NULL || sta->supp_rates[band] & BIT(index));
+ return (sta == NULL || sta->deflink.supp_rates[band] & BIT(index));
}
static inline s8
@@ -6113,6 +6735,7 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
* ieee80211_update_mu_groups - set the VHT MU-MIMO groud data
*
* @vif: the specified virtual interface
+ * @link_id: the link ID for MLO, otherwise 0
* @membership: 64 bits array - a bit is set if station is member of the group
* @position: 2 bits per group id indicating the position in the group
*
@@ -6121,7 +6744,7 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
* matching GroupId management frame.
* Calls to this function need to be serialized with RX path.
*/
-void ieee80211_update_mu_groups(struct ieee80211_vif *vif,
+void ieee80211_update_mu_groups(struct ieee80211_vif *vif, unsigned int link_id,
const u8 *membership, const u8 *position);
void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
@@ -6169,6 +6792,20 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
int band, struct ieee80211_sta **sta);
/**
+ * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
+ * of injected frames.
+ *
+ * To accurately parse and take into account rate and retransmission fields,
+ * you must initialize the chandef field in the ieee80211_tx_info structure
+ * of the skb before calling this function.
+ *
+ * @skb: packet injected by userspace
+ * @dev: the &struct device of this 802.11 device
+ */
+bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ struct net_device *dev);
+
+/**
* struct ieee80211_noa_data - holds temporary data for tracking P2P NoA state
*
* @next_tsf: TSF timestamp of the next absent state change
@@ -6216,7 +6853,7 @@ int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf);
/**
- * ieee80211_tdls_oper - request userspace to perform a TDLS operation
+ * ieee80211_tdls_oper_request - request userspace to perform a TDLS operation
* @vif: virtual interface
* @peer: the peer's destination address
* @oper: the requested TDLS operation
@@ -6277,7 +6914,7 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
*
* Note that this must be called in an rcu_read_lock() critical section,
* which can only be released after the SKB was handled. Some pointers in
- * skb->cb, e.g. the key pointer, are protected by by RCU and thus the
+ * skb->cb, e.g. the key pointer, are protected by RCU and thus the
* critical section must persist not just for the duration of this call
* but for the duration of the frame handling.
* However, also note that while in the wake_tx_queue() method,
@@ -6479,5 +7116,113 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
int len);
+/**
+ * ieee80211_set_hw_80211_encap - enable hardware encapsulation offloading.
+ *
+ * This function is used to notify mac80211 that a vif can be passed raw 802.3
+ * frames. The driver needs to then handle the 802.11 encapsulation inside the
+ * hardware or firmware.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @enable: indicate if the feature should be turned on or off
+ */
+bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable);
+
+/**
+ * ieee80211_get_fils_discovery_tmpl - Get FILS discovery template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: FILS discovery template. %NULL on error.
+ */
+struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_get_unsol_bcast_probe_resp_tmpl - Get unsolicited broadcast
+ * probe response template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: Unsolicited broadcast probe response template. %NULL on error.
+ */
+struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieeee80211_obss_color_collision_notify - notify userland about a BSS color
+ * collision.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is
+ * aware of.
+ * @gfp: allocation flags
+ */
+void
+ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
+ u64 color_bitmap, gfp_t gfp);
+
+/**
+ * ieee80211_is_tx_data - check if frame is a data frame
+ *
+ * The function is used to check if a frame is a data frame. Frames with
+ * hardware encapsulation enabled are data frames.
+ *
+ * @skb: the frame to be transmitted.
+ */
+static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+
+ return info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+ ieee80211_is_data(hdr->frame_control);
+}
+
+/**
+ * ieee80211_set_active_links - set active links in client mode
+ * @vif: interface to set active links on
+ * @active_links: the new active links bitmap
+ *
+ * This changes the active links on an interface. The interface
+ * must be in client mode (in AP mode, all links are always active),
+ * and @active_links must be a subset of the vif's valid_links.
+ *
+ * If a link is switched off and another is switched on at the same
+ * time (e.g. active_links going from 0x1 to 0x10) then you will get
+ * a sequence of calls like
+ * - change_vif_links(0x11)
+ * - unassign_vif_chanctx(link_id=0)
+ * - change_sta_links(0x11) for each affected STA (the AP)
+ * (TDLS connections on now inactive links should be torn down)
+ * - remove group keys on the old link (link_id 0)
+ * - add new group keys (GTK/IGTK/BIGTK) on the new link (link_id 4)
+ * - change_sta_links(0x10) for each affected STA (the AP)
+ * - assign_vif_chanctx(link_id=4)
+ * - change_vif_links(0x10)
+ *
+ * Note: This function acquires some mac80211 locks and must not
+ * be called with any driver locks held that could cause a
+ * lock dependency inversion. Best call it without locks.
+ */
+int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
+
+/**
+ * ieee80211_set_active_links_async - asynchronously set active links
+ * @vif: interface to set active links on
+ * @active_links: the new active links bitmap
+ *
+ * See ieee80211_set_active_links() for more information, the only
+ * difference here is that the link change is triggered async and
+ * can be called in any context, but the link switch will only be
+ * completed after it returns.
+ */
+void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
+ u16 active_links);
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index d524ffb9eb25..bdac0ddbdcdb 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -464,6 +464,12 @@ void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
* ieee802154_wake_queue - wake ieee802154 queue
* @hw: pointer as obtained from ieee802154_alloc_hw().
*
+ * Tranceivers usually have either one transmit framebuffer or one framebuffer
+ * for both transmitting and receiving. Hence, the core currently only handles
+ * one frame at a time for each phy, which means we had to stop the queue to
+ * avoid new skb to come during the transmission. The queue then needs to be
+ * woken up after the operation.
+ *
* Drivers should use this function instead of netif_wake_queue.
*/
void ieee802154_wake_queue(struct ieee802154_hw *hw);
@@ -472,6 +478,12 @@ void ieee802154_wake_queue(struct ieee802154_hw *hw);
* ieee802154_stop_queue - stop ieee802154 queue
* @hw: pointer as obtained from ieee802154_alloc_hw().
*
+ * Tranceivers usually have either one transmit framebuffer or one framebuffer
+ * for both transmitting and receiving. Hence, the core currently only handles
+ * one frame at a time for each phy, which means we need to tell upper layers to
+ * stop giving us new skbs while we are busy with the transmitted one. The queue
+ * must then be stopped before transmitting.
+ *
* Drivers should use this function instead of netif_stop_queue.
*/
void ieee802154_stop_queue(struct ieee802154_hw *hw);
@@ -486,4 +498,23 @@ void ieee802154_stop_queue(struct ieee802154_hw *hw);
void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
bool ifs_handling);
+/**
+ * ieee802154_xmit_error - offloaded frame transmission failed
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @reason: error code
+ */
+void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
+ int reason);
+
+/**
+ * ieee802154_xmit_hw_error - frame could not be offloaded to the transmitter
+ * because of a hardware error (bus error, timeout, etc)
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ */
+void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb);
+
#endif /* NET_MAC802154_H */
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 92e43db8b566..5b9c61c4d3a6 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -11,18 +11,63 @@
#include <uapi/linux/if_link.h>
#include <uapi/linux/if_macsec.h>
-typedef u64 __bitwise sci_t;
+#define MACSEC_DEFAULT_PN_LEN 4
+#define MACSEC_XPN_PN_LEN 8
#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+#define MACSEC_SCI_LEN 8
+#define MACSEC_PORT_ES (htons(0x0001))
+
+#define MACSEC_TCI_VERSION 0x80
+#define MACSEC_TCI_ES 0x40 /* end station */
+#define MACSEC_TCI_SC 0x20 /* SCI present */
+#define MACSEC_TCI_SCB 0x10 /* epon */
+#define MACSEC_TCI_E 0x08 /* encryption */
+#define MACSEC_TCI_C 0x04 /* changed text */
+#define MACSEC_AN_MASK 0x03 /* association number */
+#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
+
+#define MACSEC_DEFAULT_ICV_LEN 16
+
+typedef u64 __bitwise sci_t;
+typedef u32 __bitwise ssci_t;
+
+struct metadata_dst;
+
+typedef union salt {
+ struct {
+ u32 ssci;
+ u64 pn;
+ } __packed;
+ u8 bytes[MACSEC_SALT_LEN];
+} __packed salt_t;
+
+typedef union pn {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 lower;
+ u32 upper;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 upper;
+ u32 lower;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ };
+ u64 full64;
+} pn_t;
+
/**
* struct macsec_key - SA key
* @id: user-provided key identifier
* @tfm: crypto struct, key storage
+ * @salt: salt used to generate IV in XPN cipher suites
*/
struct macsec_key {
u8 id[MACSEC_KEYID_LEN];
struct crypto_aead *tfm;
+ salt_t salt;
};
struct macsec_rx_sc_stats {
@@ -58,18 +103,34 @@ struct macsec_tx_sc_stats {
__u64 OutOctetsEncrypted;
};
+struct macsec_dev_stats {
+ __u64 OutPktsUntagged;
+ __u64 InPktsUntagged;
+ __u64 OutPktsTooLong;
+ __u64 InPktsNoTag;
+ __u64 InPktsBadTag;
+ __u64 InPktsUnknownSCI;
+ __u64 InPktsNoSCI;
+ __u64 InPktsOverrun;
+};
+
/**
* struct macsec_rx_sa - receive secure association
* @active:
* @next_pn: packet number expected for the next packet
* @lock: protects next_pn manipulations
* @key: key structure
+ * @ssci: short secure channel identifier
* @stats: per-SA stats
*/
struct macsec_rx_sa {
struct macsec_key key;
+ ssci_t ssci;
spinlock_t lock;
- u32 next_pn;
+ union {
+ pn_t next_pn_halves;
+ u64 next_pn;
+ };
refcount_t refcnt;
bool active;
struct macsec_rx_sa_stats __percpu *stats;
@@ -110,12 +171,17 @@ struct macsec_rx_sc {
* @next_pn: packet number to use for the next packet
* @lock: protects next_pn manipulations
* @key: key structure
+ * @ssci: short secure channel identifier
* @stats: per-SA stats
*/
struct macsec_tx_sa {
struct macsec_key key;
+ ssci_t ssci;
spinlock_t lock;
- u32 next_pn;
+ union {
+ pn_t next_pn_halves;
+ u64 next_pn;
+ };
refcount_t refcnt;
bool active;
struct macsec_tx_sa_stats __percpu *stats;
@@ -132,6 +198,7 @@ struct macsec_tx_sa {
* @scb: single copy broadcast flag
* @sa: array of secure associations
* @stats: stats for this TXSC
+ * @md_dst: MACsec offload metadata dst
*/
struct macsec_tx_sc {
bool active;
@@ -142,6 +209,7 @@ struct macsec_tx_sc {
bool scb;
struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
struct pcpu_tx_sc_stats __percpu *stats;
+ struct metadata_dst *md_dst;
};
/**
@@ -152,6 +220,7 @@ struct macsec_tx_sc {
* @key_len: length of keys used by the cipher suite
* @icv_len: length of ICV used by the cipher suite
* @validate_frames: validation mode
+ * @xpn: enable XPN for this SecY
* @operational: MAC_Operational flag
* @protect_frames: enable protection for this SecY
* @replay_protect: enable packet number checks on receive
@@ -166,6 +235,7 @@ struct macsec_secy {
u16 key_len;
u16 icv_len;
enum macsec_validation_type validate_frames;
+ bool xpn;
bool operational;
bool protect_frames;
bool replay_protect;
@@ -178,21 +248,29 @@ struct macsec_secy {
* struct macsec_context - MACsec context for hardware offloading
*/
struct macsec_context {
- struct phy_device *phydev;
+ union {
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ };
enum macsec_offload offload;
struct macsec_secy *secy;
struct macsec_rx_sc *rx_sc;
struct {
unsigned char assoc_num;
- u8 key[MACSEC_KEYID_LEN];
+ u8 key[MACSEC_MAX_KEY_LEN];
union {
struct macsec_rx_sa *rx_sa;
struct macsec_tx_sa *tx_sa;
};
} sa;
-
- u8 prepare:1;
+ union {
+ struct macsec_tx_sc_stats *tx_sc_stats;
+ struct macsec_tx_sa_stats *tx_sa_stats;
+ struct macsec_rx_sc_stats *rx_sc_stats;
+ struct macsec_rx_sa_stats *rx_sa_stats;
+ struct macsec_dev_stats *dev_stats;
+ } stats;
};
/**
@@ -217,8 +295,21 @@ struct macsec_ops {
int (*mdo_add_txsa)(struct macsec_context *ctx);
int (*mdo_upd_txsa)(struct macsec_context *ctx);
int (*mdo_del_txsa)(struct macsec_context *ctx);
+ /* Statistics */
+ int (*mdo_get_dev_stats)(struct macsec_context *ctx);
+ int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx);
+ int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
+ int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
+ int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
+static inline bool macsec_send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
#endif /* _NET_MACSEC_H_ */
diff --git a/include/net/mctp.h b/include/net/mctp.h
new file mode 100644
index 000000000000..82800d521c3d
--- /dev/null
+++ b/include/net/mctp.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Management Component Transport Protocol (MCTP)
+ *
+ * Copyright (c) 2021 Code Construct
+ * Copyright (c) 2021 Google
+ */
+
+#ifndef __NET_MCTP_H
+#define __NET_MCTP_H
+
+#include <linux/bits.h>
+#include <linux/mctp.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+/* MCTP packet definitions */
+struct mctp_hdr {
+ u8 ver;
+ u8 dest;
+ u8 src;
+ u8 flags_seq_tag;
+};
+
+#define MCTP_VER_MIN 1
+#define MCTP_VER_MAX 1
+
+/* Definitions for flags_seq_tag field */
+#define MCTP_HDR_FLAG_SOM BIT(7)
+#define MCTP_HDR_FLAG_EOM BIT(6)
+#define MCTP_HDR_FLAG_TO BIT(3)
+#define MCTP_HDR_FLAGS GENMASK(5, 3)
+#define MCTP_HDR_SEQ_SHIFT 4
+#define MCTP_HDR_SEQ_MASK GENMASK(1, 0)
+#define MCTP_HDR_TAG_SHIFT 0
+#define MCTP_HDR_TAG_MASK GENMASK(2, 0)
+
+#define MCTP_INITIAL_DEFAULT_NET 1
+
+static inline bool mctp_address_unicast(mctp_eid_t eid)
+{
+ return eid >= 8 && eid < 255;
+}
+
+static inline bool mctp_address_broadcast(mctp_eid_t eid)
+{
+ return eid == 255;
+}
+
+static inline bool mctp_address_null(mctp_eid_t eid)
+{
+ return eid == 0;
+}
+
+static inline bool mctp_address_matches(mctp_eid_t match, mctp_eid_t eid)
+{
+ return match == eid || match == MCTP_ADDR_ANY;
+}
+
+static inline struct mctp_hdr *mctp_hdr(struct sk_buff *skb)
+{
+ return (struct mctp_hdr *)skb_network_header(skb);
+}
+
+/* socket implementation */
+struct mctp_sock {
+ struct sock sk;
+
+ /* bind() params */
+ unsigned int bind_net;
+ mctp_eid_t bind_addr;
+ __u8 bind_type;
+
+ /* sendmsg()/recvmsg() uses struct sockaddr_mctp_ext */
+ bool addr_ext;
+
+ /* list of mctp_sk_key, for incoming tag lookup. updates protected
+ * by sk->net->keys_lock
+ */
+ struct hlist_head keys;
+
+ /* mechanism for expiring allocated keys; will release an allocated
+ * tag, and any netdev state for a request/response pairing
+ */
+ struct timer_list key_expiry;
+};
+
+/* Key for matching incoming packets to sockets or reassembly contexts.
+ * Packets are matched on (src,dest,tag).
+ *
+ * Lifetime / locking requirements:
+ *
+ * - individual key data (ie, the struct itself) is protected by key->lock;
+ * changes must be made with that lock held.
+ *
+ * - the lookup fields: peer_addr, local_addr and tag are set before the
+ * key is added to lookup lists, and never updated.
+ *
+ * - A ref to the key must be held (throuh key->refs) if a pointer to the
+ * key is to be accessed after key->lock is released.
+ *
+ * - a mctp_sk_key contains a reference to a struct sock; this is valid
+ * for the life of the key. On sock destruction (through unhash), the key is
+ * removed from lists (see below), and marked invalid.
+ *
+ * - these mctp_sk_keys appear on two lists:
+ * 1) the struct mctp_sock->keys list
+ * 2) the struct netns_mctp->keys list
+ *
+ * presences on these lists requires a (single) refcount to be held; both
+ * lists are updated as a single operation.
+ *
+ * Updates and lookups in either list are performed under the
+ * netns_mctp->keys lock. Lookup functions will need to lock the key and
+ * take a reference before unlocking the keys_lock. Consequently, the list's
+ * keys_lock *cannot* be acquired with the individual key->lock held.
+ *
+ * - a key may have a sk_buff attached as part of an in-progress message
+ * reassembly (->reasm_head). The reasm data is protected by the individual
+ * key->lock.
+ *
+ * - there are two destruction paths for a mctp_sk_key:
+ *
+ * - through socket unhash (see mctp_sk_unhash). This performs the list
+ * removal under keys_lock.
+ *
+ * - where a key is established to receive a reply message: after receiving
+ * the (complete) reply, or during reassembly errors. Here, we clean up
+ * the reassembly context (marking reasm_dead, to prevent another from
+ * starting), and remove the socket from the netns & socket lists.
+ *
+ * - through an expiry timeout, on a per-socket timer
+ */
+struct mctp_sk_key {
+ mctp_eid_t peer_addr;
+ mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */
+ __u8 tag; /* incoming tag match; invert TO for local */
+
+ /* we hold a ref to sk when set */
+ struct sock *sk;
+
+ /* routing lookup list */
+ struct hlist_node hlist;
+
+ /* per-socket list */
+ struct hlist_node sklist;
+
+ /* lock protects against concurrent updates to the reassembly and
+ * expiry data below.
+ */
+ spinlock_t lock;
+
+ /* Keys are referenced during the output path, which may sleep */
+ refcount_t refs;
+
+ /* incoming fragment reassembly context */
+ struct sk_buff *reasm_head;
+ struct sk_buff **reasm_tailp;
+ bool reasm_dead;
+ u8 last_seq;
+
+ /* key validity */
+ bool valid;
+
+ /* expiry timeout; valid (above) cleared on expiry */
+ unsigned long expiry;
+
+ /* free to use for device flow state tracking. Initialised to
+ * zero on initial key creation
+ */
+ unsigned long dev_flow_state;
+ struct mctp_dev *dev;
+
+ /* a tag allocated with SIOCMCTPALLOCTAG ioctl will not expire
+ * automatically on timeout or response, instead SIOCMCTPDROPTAG
+ * is used.
+ */
+ bool manual_alloc;
+};
+
+struct mctp_skb_cb {
+ unsigned int magic;
+ unsigned int net;
+ int ifindex; /* extended/direct addressing if set */
+ mctp_eid_t src;
+ unsigned char halen;
+ unsigned char haddr[MAX_ADDR_LEN];
+};
+
+/* skb control-block accessors with a little extra debugging for initial
+ * development.
+ *
+ * TODO: remove checks & mctp_skb_cb->magic; replace callers of __mctp_cb
+ * with mctp_cb().
+ *
+ * __mctp_cb() is only for the initial ingress code; we should see ->magic set
+ * at all times after this.
+ */
+static inline struct mctp_skb_cb *__mctp_cb(struct sk_buff *skb)
+{
+ struct mctp_skb_cb *cb = (void *)skb->cb;
+
+ cb->magic = 0x4d435450;
+ return cb;
+}
+
+static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
+{
+ struct mctp_skb_cb *cb = (void *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb));
+ WARN_ON(cb->magic != 0x4d435450);
+ return (void *)(skb->cb);
+}
+
+/* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension,
+ * indicating the flow to the device driver.
+ */
+struct mctp_flow {
+ struct mctp_sk_key *key;
+};
+
+/* Route definition.
+ *
+ * These are held in the pernet->mctp.routes list, with RCU protection for
+ * removed routes. We hold a reference to the netdev; routes need to be
+ * dropped on NETDEV_UNREGISTER events.
+ *
+ * Updates to the route table are performed under rtnl; all reads under RCU,
+ * so routes cannot be referenced over a RCU grace period. Specifically: A
+ * caller cannot block between mctp_route_lookup and mctp_route_release()
+ */
+struct mctp_route {
+ mctp_eid_t min, max;
+
+ struct mctp_dev *dev;
+ unsigned int mtu;
+ unsigned char type;
+ int (*output)(struct mctp_route *route,
+ struct sk_buff *skb);
+
+ struct list_head list;
+ refcount_t refs;
+ struct rcu_head rcu;
+};
+
+/* route interfaces */
+struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
+ mctp_eid_t daddr);
+
+int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
+
+void mctp_key_unref(struct mctp_sk_key *key);
+struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
+ mctp_eid_t daddr, mctp_eid_t saddr,
+ bool manual, u8 *tagp);
+
+/* routing <--> device interface */
+unsigned int mctp_default_net(struct net *net);
+int mctp_default_net_set(struct net *net, unsigned int index);
+int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr);
+int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr);
+void mctp_route_remove_dev(struct mctp_dev *mdev);
+
+/* neighbour definitions */
+enum mctp_neigh_source {
+ MCTP_NEIGH_STATIC,
+ MCTP_NEIGH_DISCOVER,
+};
+
+struct mctp_neigh {
+ struct mctp_dev *dev;
+ mctp_eid_t eid;
+ enum mctp_neigh_source source;
+
+ unsigned char ha[MAX_ADDR_LEN];
+
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+int mctp_neigh_init(void);
+void mctp_neigh_exit(void);
+
+// ret_hwaddr may be NULL, otherwise must have space for MAX_ADDR_LEN
+int mctp_neigh_lookup(struct mctp_dev *dev, mctp_eid_t eid,
+ void *ret_hwaddr);
+void mctp_neigh_remove_dev(struct mctp_dev *mdev);
+
+int mctp_routes_init(void);
+void mctp_routes_exit(void);
+
+void mctp_device_init(void);
+void mctp_device_exit(void);
+
+#endif /* __NET_MCTP_H */
diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h
new file mode 100644
index 000000000000..5c0d04b5c12c
--- /dev/null
+++ b/include/net/mctpdevice.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Management Component Transport Protocol (MCTP) - device
+ * definitions.
+ *
+ * Copyright (c) 2021 Code Construct
+ * Copyright (c) 2021 Google
+ */
+
+#ifndef __NET_MCTPDEVICE_H
+#define __NET_MCTPDEVICE_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/refcount.h>
+
+struct mctp_sk_key;
+
+struct mctp_dev {
+ struct net_device *dev;
+
+ refcount_t refs;
+
+ unsigned int net;
+
+ const struct mctp_netdev_ops *ops;
+
+ /* Only modified under RTNL. Reads have addrs_lock held */
+ u8 *addrs;
+ size_t num_addrs;
+ spinlock_t addrs_lock;
+
+ struct rcu_head rcu;
+};
+
+struct mctp_netdev_ops {
+ void (*release_flow)(struct mctp_dev *dev,
+ struct mctp_sk_key *key);
+};
+
+#define MCTP_INITIAL_DEFAULT_NET 1
+
+struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev);
+struct mctp_dev *__mctp_dev_get(const struct net_device *dev);
+
+int mctp_register_netdev(struct net_device *dev,
+ const struct mctp_netdev_ops *ops);
+void mctp_unregister_netdev(struct net_device *dev);
+
+void mctp_dev_hold(struct mctp_dev *mdev);
+void mctp_dev_put(struct mctp_dev *mdev);
+
+void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+
+#endif /* __NET_MCTPDEVICE_H */
diff --git a/include/net/mip6.h b/include/net/mip6.h
index f1c28971c362..67cd7e50804c 100644
--- a/include/net/mip6.h
+++ b/include/net/mip6.h
@@ -25,7 +25,7 @@ struct ip6_mh {
__u8 ip6mh_reserved;
__u16 ip6mh_cksum;
/* Followed by type specific messages */
- __u8 data[0];
+ __u8 data[];
} __packed;
#define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */
diff --git a/include/net/mld.h b/include/net/mld.h
index b0f5b3105ef0..c07359808493 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -24,12 +24,12 @@ struct mld2_grec {
__u8 grec_auxwords;
__be16 grec_nsrcs;
struct in6_addr grec_mca;
- struct in6_addr grec_src[0];
+ struct in6_addr grec_src[];
};
struct mld2_report {
struct icmp6hdr mld2r_hdr;
- struct mld2_grec mld2r_grec[0];
+ struct mld2_grec mld2r_grec[];
};
#define mld2r_type mld2r_hdr.icmp6_type
@@ -55,7 +55,7 @@ struct mld2_query {
#endif
__u8 mld2q_qqic;
__be16 mld2q_nsrcs;
- struct in6_addr mld2q_srcs[0];
+ struct in6_addr mld2q_srcs[];
};
#define mld2q_type mld2q_hdr.icmp6_type
@@ -92,6 +92,9 @@ struct mld2_query {
#define MLD_EXP_MIN_LIMIT 32768UL
#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1)
+#define MLD_MAX_QUEUE 8
+#define MLD_MAX_SKBS 32
+
static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
{
/* RFC3810, 5.1.3. Maximum Response Code */
diff --git a/include/net/mpls.h b/include/net/mpls.h
index ccaf238e8ea7..0bb7944e7b08 100644
--- a/include/net/mpls.h
+++ b/include/net/mpls.h
@@ -8,6 +8,7 @@
#include <linux/if_ether.h>
#include <linux/netdevice.h>
+#include <linux/mpls.h>
#define MPLS_HLEN 4
@@ -25,4 +26,20 @@ static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
{
return (struct mpls_shim_hdr *)skb_network_header(skb);
}
+
+static inline struct mpls_shim_hdr mpls_entry_encode(u32 label,
+ unsigned int ttl,
+ unsigned int tc,
+ bool bos)
+{
+ struct mpls_shim_hdr result;
+
+ result.label_stack_entry =
+ cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) |
+ (tc << MPLS_LS_TC_SHIFT) |
+ (bos ? (1 << MPLS_LS_S_SHIFT) : 0) |
+ (ttl << MPLS_LS_TTL_SHIFT));
+ return result;
+}
+
#endif
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 6b4759eae158..0c71c27979fb 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -6,12 +6,15 @@
#ifndef _NET_MPLS_IPTUNNEL_H
#define _NET_MPLS_IPTUNNEL_H 1
+#include <linux/types.h>
+#include <net/lwtunnel.h>
+
struct mpls_iptunnel_encap {
u8 labels;
u8 ttl_propagate;
u8 default_ttl;
u8 reserved1;
- u32 label[0];
+ u32 label[];
};
static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index c971d25431ea..412479ebf5ad 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -12,32 +12,92 @@
#include <linux/tcp.h>
#include <linux/types.h>
+struct mptcp_info;
+struct mptcp_sock;
+struct seq_file;
+
/* MPTCP sk_buff extension data */
struct mptcp_ext {
- u64 data_ack;
+ union {
+ u64 data_ack;
+ u32 data_ack32;
+ };
u64 data_seq;
u32 subflow_seq;
u16 data_len;
+ __sum16 csum;
u8 use_map:1,
dsn64:1,
data_fin:1,
use_ack:1,
ack64:1,
mpc_map:1,
- __unused:2;
- /* one byte hole */
+ frozen:1,
+ reset_transient:1;
+ u8 reset_reason:4,
+ csum_reqd:1,
+ infinite_map:1;
+};
+
+#define MPTCPOPT_HMAC_LEN 20
+#define MPTCP_RM_IDS_MAX 8
+
+struct mptcp_rm_list {
+ u8 ids[MPTCP_RM_IDS_MAX];
+ u8 nr;
+};
+
+struct mptcp_addr_info {
+ u8 id;
+ sa_family_t family;
+ __be16 port;
+ union {
+ struct in_addr addr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ struct in6_addr addr6;
+#endif
+ };
};
struct mptcp_out_options {
#if IS_ENABLED(CONFIG_MPTCP)
u16 suboptions;
- u64 sndr_key;
- u64 rcvr_key;
- struct mptcp_ext ext_copy;
+ struct mptcp_rm_list rm_list;
+ u8 join_id;
+ u8 backup;
+ u8 reset_reason:4,
+ reset_transient:1,
+ csum_reqd:1,
+ allow_join_id0:1;
+ union {
+ struct {
+ u64 sndr_key;
+ u64 rcvr_key;
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ __sum16 csum;
+ };
+ struct {
+ struct mptcp_addr_info addr;
+ u64 ahmac;
+ };
+ struct {
+ struct mptcp_ext ext_copy;
+ u64 fail_seq;
+ };
+ struct {
+ u32 nonce;
+ u32 token;
+ u64 thmac;
+ u8 hmac[MPTCPOPT_HMAC_LEN];
+ };
+ };
#endif
};
#ifdef CONFIG_MPTCP
+extern struct request_sock_ops mptcp_subflow_request_sock_ops;
void mptcp_init(void);
@@ -51,20 +111,25 @@ static inline bool rsk_is_mptcp(const struct request_sock *req)
return tcp_rsk(req)->is_mptcp;
}
-void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
- int opsize, struct tcp_options_received *opt_rx);
+static inline bool rsk_drop_req(const struct request_sock *req)
+{
+ return tcp_rsk(req)->is_mptcp && tcp_rsk(req)->drop_req;
+}
+
+void mptcp_space(const struct sock *ssk, int *space, int *full_space);
bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
unsigned int *size, struct mptcp_out_options *opts);
-void mptcp_rcv_synsent(struct sock *sk);
bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
struct mptcp_out_options *opts);
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
unsigned int *size, unsigned int remaining,
struct mptcp_out_options *opts);
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
- struct tcp_options_received *opt_rx);
+bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
-void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts);
+void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
+ struct mptcp_out_options *opts);
+
+void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info);
/* move the skb extension owership, with the assumption that 'to' is
* newly allocated
@@ -83,6 +148,19 @@ static inline void mptcp_skb_ext_move(struct sk_buff *to,
from->active_extensions = 0;
}
+static inline void mptcp_skb_ext_copy(struct sk_buff *to,
+ struct sk_buff *from)
+{
+ struct mptcp_ext *from_ext;
+
+ from_ext = skb_ext_find(from, SKB_EXT_MPTCP);
+ if (!from_ext)
+ return;
+
+ from_ext->frozen = 1;
+ skb_ext_copy(to, from);
+}
+
static inline bool mptcp_ext_matches(const struct mptcp_ext *to_ext,
const struct mptcp_ext *from_ext)
{
@@ -106,6 +184,20 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
skb_ext_find(from, SKB_EXT_MPTCP));
}
+void mptcp_seq_show(struct seq_file *seq);
+int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ const struct sock *sk_listener,
+ struct sk_buff *skb);
+
+__be32 mptcp_get_reset_option(const struct sk_buff *skb);
+
+static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
+{
+ if (skb_ext_exist(skb, SKB_EXT_MPTCP))
+ return mptcp_get_reset_option(skb);
+
+ return htonl(0u);
+}
#else
static inline void mptcp_init(void)
@@ -122,10 +214,9 @@ static inline bool rsk_is_mptcp(const struct request_sock *req)
return false;
}
-static inline void mptcp_parse_option(const struct sk_buff *skb,
- const unsigned char *ptr, int opsize,
- struct tcp_options_received *opt_rx)
+static inline bool rsk_drop_req(const struct request_sock *req)
{
+ return false;
}
static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
@@ -135,10 +226,6 @@ static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
return false;
}
-static inline void mptcp_rcv_synsent(struct sock *sk)
-{
-}
-
static inline bool mptcp_synack_options(const struct request_sock *req,
unsigned int *size,
struct mptcp_out_options *opts)
@@ -155,10 +242,10 @@ static inline bool mptcp_established_options(struct sock *sk,
return false;
}
-static inline void mptcp_incoming_options(struct sock *sk,
- struct sk_buff *skb,
- struct tcp_options_received *opt_rx)
+static inline bool mptcp_incoming_options(struct sock *sk,
+ struct sk_buff *skb)
{
+ return true;
}
static inline void mptcp_skb_ext_move(struct sk_buff *to,
@@ -166,12 +253,28 @@ static inline void mptcp_skb_ext_move(struct sk_buff *to,
{
}
+static inline void mptcp_skb_ext_copy(struct sk_buff *to,
+ struct sk_buff *from)
+{
+}
+
static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
return true;
}
+static inline void mptcp_space(const struct sock *ssk, int *s, int *fs) { }
+static inline void mptcp_seq_show(struct seq_file *seq) { }
+
+static inline int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ const struct sock *sk_listener,
+ struct sk_buff *skb)
+{
+ return 0; /* TCP fallback */
+}
+
+static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); }
#endif /* CONFIG_MPTCP */
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -182,4 +285,14 @@ static inline int mptcpv6_init(void) { return 0; }
static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
#endif
+#if defined(CONFIG_MPTCP) && defined(CONFIG_BPF_SYSCALL)
+struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk);
+#else
+static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; }
+#endif
+
+#if !IS_ENABLED(CONFIG_MPTCP)
+struct mptcp_sock { };
+#endif
+
#endif /* __NET_MPTCP_H */
diff --git a/include/net/mrp.h b/include/net/mrp.h
index 1c308c034e1a..92cd3fb6cf9d 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -2,6 +2,10 @@
#ifndef _NET_MRP_H
#define _NET_MRP_H
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
#define MRP_END_MARK 0x0
struct mrp_pdu_hdr {
diff --git a/include/net/ncsi.h b/include/net/ncsi.h
index fbefe80361ee..08a50d9acb0a 100644
--- a/include/net/ncsi.h
+++ b/include/net/ncsi.h
@@ -2,6 +2,8 @@
#ifndef __NET_NCSI_H
#define __NET_NCSI_H
+#include <linux/types.h>
+
/*
* The NCSI device states seen from external. More NCSI device states are
* only visible internally (in net/ncsi/internal.h). When the NCSI device
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b5ebeb3b0de0..da7eec8669ec 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -41,6 +41,7 @@ enum {
ND_OPT_DNSSL = 31, /* RFC6106 */
ND_OPT_6CO = 34, /* RFC6775 */
ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */
+ ND_OPT_PREF64 = 38, /* RFC8781 */
__ND_OPT_MAX
};
@@ -80,12 +81,12 @@ extern struct neigh_table nd_tbl;
struct nd_msg {
struct icmp6hdr icmph;
struct in6_addr target;
- __u8 opt[0];
+ __u8 opt[];
};
struct rs_msg {
struct icmp6hdr icmph;
- __u8 opt[0];
+ __u8 opt[];
};
struct ra_msg {
@@ -98,7 +99,7 @@ struct rd_msg {
struct icmp6hdr icmph;
struct in6_addr target;
struct in6_addr dest;
- __u8 opt[0];
+ __u8 opt[];
};
struct nd_opt_hdr {
@@ -136,7 +137,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
u8 *opt, int opt_len,
struct ndisc_options *ndopts);
-void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
+void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
int data_len, int pad);
#define NDISC_OPS_REDIRECT_DATA_SPACE 2
@@ -410,13 +411,7 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
rcu_read_lock_bh();
n = __ipv6_neigh_lookup_noref(dev, pkey);
- if (n) {
- unsigned long now = jiffies;
-
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
- }
+ neigh_confirm(n);
rcu_read_unlock_bh();
}
@@ -427,13 +422,7 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
rcu_read_lock_bh();
n = __ipv6_neigh_lookup_noref_stub(dev, pkey);
- if (n) {
- unsigned long now = jiffies;
-
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
- }
+ neigh_confirm(n);
rcu_read_unlock_bh();
}
@@ -458,10 +447,15 @@ void ndisc_cleanup(void);
int ndisc_rcv(struct sk_buff *skb);
+struct sk_buff *ndisc_ns_create(struct net_device *dev, const struct in6_addr *solicit,
+ const struct in6_addr *saddr, u64 nonce);
void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
const struct in6_addr *daddr, const struct in6_addr *saddr,
u64 nonce);
+void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+
void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr);
void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
@@ -486,14 +480,14 @@ int igmp6_late_init(void);
void igmp6_cleanup(void);
void igmp6_late_cleanup(void);
-int igmp6_event_query(struct sk_buff *skb);
+void igmp6_event_query(struct sk_buff *skb);
-int igmp6_event_report(struct sk_buff *skb);
+void igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 8ec77bfdc1a4..20745cf7ae1a 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -48,6 +48,7 @@ enum {
NEIGH_VAR_RETRANS_TIME,
NEIGH_VAR_BASE_REACHABLE_TIME,
NEIGH_VAR_DELAY_PROBE_TIME,
+ NEIGH_VAR_INTERVAL_PROBE_TIME_MS,
NEIGH_VAR_GC_STALETIME,
NEIGH_VAR_QUEUE_LEN_BYTES,
NEIGH_VAR_PROXY_QLEN,
@@ -70,6 +71,7 @@ enum {
struct neigh_parms {
possible_net_t net;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
struct list_head list;
int (*neigh_setup)(struct neighbour *);
struct neigh_table *tbl;
@@ -81,6 +83,7 @@ struct neigh_parms {
struct rcu_head rcu_head;
int reachable_time;
+ int qlen;
int data[NEIGH_VAR_DATA_MAX];
DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};
@@ -144,19 +147,21 @@ struct neighbour {
struct timer_list timer;
unsigned long used;
atomic_t probes;
- __u8 flags;
- __u8 nud_state;
- __u8 type;
- __u8 dead;
+ u8 nud_state;
+ u8 type;
+ u8 dead;
u8 protocol;
+ u32 flags;
seqlock_t ha_lock;
unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
struct hh_cache hh;
int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
struct list_head gc_list;
+ struct list_head managed_list;
struct rcu_head rcu;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
u8 primary_key[0];
} __randomize_layout;
@@ -172,9 +177,10 @@ struct pneigh_entry {
struct pneigh_entry *next;
possible_net_t net;
struct net_device *dev;
- u8 flags;
+ netdevice_tracker dev_tracker;
+ u32 flags;
u8 protocol;
- u8 key[0];
+ u8 key[];
};
/*
@@ -204,6 +210,7 @@ struct neigh_table {
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
void (*proxy_redo)(struct sk_buff *skb);
+ int (*is_multicast)(const void *pkey);
bool (*allow_add)(const struct net_device *dev,
struct netlink_ext_ack *extack);
char *id;
@@ -215,11 +222,13 @@ struct neigh_table {
int gc_thresh3;
unsigned long last_flush;
struct delayed_work gc_work;
+ struct delayed_work managed_work;
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
atomic_t gc_entries;
struct list_head gc_list;
+ struct list_head managed_list;
rwlock_t lock;
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
@@ -249,20 +258,24 @@ static inline void *neighbour_priv(const struct neighbour *n)
}
/* flags for neigh_update() */
-#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
-#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
-#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004
-#define NEIGH_UPDATE_F_EXT_LEARNED 0x20000000
-#define NEIGH_UPDATE_F_ISROUTER 0x40000000
-#define NEIGH_UPDATE_F_ADMIN 0x80000000
+#define NEIGH_UPDATE_F_OVERRIDE BIT(0)
+#define NEIGH_UPDATE_F_WEAK_OVERRIDE BIT(1)
+#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER BIT(2)
+#define NEIGH_UPDATE_F_USE BIT(3)
+#define NEIGH_UPDATE_F_MANAGED BIT(4)
+#define NEIGH_UPDATE_F_EXT_LEARNED BIT(5)
+#define NEIGH_UPDATE_F_ISROUTER BIT(6)
+#define NEIGH_UPDATE_F_ADMIN BIT(7)
+
+/* In-kernel representation for NDA_FLAGS_EXT flags: */
+#define NTF_OLD_MASK 0xff
+#define NTF_EXT_SHIFT 8
+#define NTF_EXT_MASK (NTF_EXT_MANAGED)
+
+#define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT)
extern const struct nla_policy nda_policy[];
-static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
-{
- return *(const u16 *)n->primary_key == *(const u16 *)pkey;
-}
-
static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
{
return *(const u32 *)n->primary_key == *(const u32 *)pkey;
@@ -308,6 +321,17 @@ static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
}
+static inline void neigh_confirm(struct neighbour *n)
+{
+ if (n) {
+ unsigned long now = jiffies;
+
+ /* avoid dirtying neighbour */
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
+ }
+}
+
void neigh_table_init(int index, struct neigh_table *tbl);
int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
@@ -323,7 +347,8 @@ static inline struct neighbour *neigh_create(struct neigh_table *tbl,
return __neigh_create(tbl, pkey, dev, true);
}
void neigh_destroy(struct neighbour *neigh);
-int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
+ const bool immediate_ok);
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
@@ -392,13 +417,12 @@ void *neigh_seq_next(struct seq_file *, void *, loff_t *);
void neigh_seq_stop(struct seq_file *, void *);
int neigh_proc_dointvec(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
- void __user *buffer,
+ void *buffer,
size_t *lenp, loff_t *ppos);
int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
proc_handler *proc_handler);
@@ -434,17 +458,24 @@ static inline struct neighbour * neigh_clone(struct neighbour *neigh)
#define neigh_hold(n) refcount_inc(&(n)->refcnt)
-static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
+ struct sk_buff *skb,
+ const bool immediate_ok)
{
unsigned long now = jiffies;
-
+
if (READ_ONCE(neigh->used) != now)
WRITE_ONCE(neigh->used, now);
- if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
- return __neigh_event_send(neigh, skb);
+ if (!(neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
+ return __neigh_event_send(neigh, skb, immediate_ok);
return 0;
}
+static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+{
+ return neigh_event_send_probe(neigh, skb, true);
+}
+
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
{
@@ -504,10 +535,15 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
{
const struct hh_cache *hh = &n->hh;
- if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache)
+ /* n->nud_state and hh->hh_len could be changed under us.
+ * neigh_hh_output() is taking care of the race later.
+ */
+ if (!skip_cache &&
+ (READ_ONCE(n->nud_state) & NUD_CONNECTED) &&
+ READ_ONCE(hh->hh_len))
return neigh_hh_output(hh, skb);
- else
- return n->output(n, skb);
+
+ return n->output(n, skb);
}
static inline struct neighbour *
diff --git a/include/net/net_debug.h b/include/net/net_debug.h
new file mode 100644
index 000000000000..1e74684cbbdb
--- /dev/null
+++ b/include/net/net_debug.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NET_DEBUG_H
+#define _LINUX_NET_DEBUG_H
+
+#include <linux/bug.h>
+#include <linux/kern_levels.h>
+
+struct net_device;
+
+__printf(3, 4) __cold
+void netdev_printk(const char *level, const struct net_device *dev,
+ const char *format, ...);
+__printf(2, 3) __cold
+void netdev_emerg(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_alert(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_crit(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_err(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_warn(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_notice(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_info(const struct net_device *dev, const char *format, ...);
+
+#define netdev_level_once(level, dev, fmt, ...) \
+do { \
+ static bool __section(".data.once") __print_once; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define netdev_emerg_once(dev, fmt, ...) \
+ netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
+#define netdev_alert_once(dev, fmt, ...) \
+ netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
+#define netdev_crit_once(dev, fmt, ...) \
+ netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
+#define netdev_err_once(dev, fmt, ...) \
+ netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
+#define netdev_warn_once(dev, fmt, ...) \
+ netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
+#define netdev_notice_once(dev, fmt, ...) \
+ netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
+#define netdev_info_once(dev, fmt, ...) \
+ netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+#define netdev_dbg(__dev, format, args...) \
+do { \
+ dynamic_netdev_dbg(__dev, format, ##args); \
+} while (0)
+#elif defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#else
+#define netdev_dbg(__dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+})
+#endif
+
+#if defined(VERBOSE_DEBUG)
+#define netdev_vdbg netdev_dbg
+#else
+
+#define netdev_vdbg(dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+/* netif printk helpers, similar to netdev_printk */
+
+#define netif_printk(priv, type, level, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_printk(level, (dev), fmt, ##args); \
+} while (0)
+
+#define netif_level(level, priv, type, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_##level(dev, fmt, ##args); \
+} while (0)
+
+#define netif_emerg(priv, type, dev, fmt, args...) \
+ netif_level(emerg, priv, type, dev, fmt, ##args)
+#define netif_alert(priv, type, dev, fmt, args...) \
+ netif_level(alert, priv, type, dev, fmt, ##args)
+#define netif_crit(priv, type, dev, fmt, args...) \
+ netif_level(crit, priv, type, dev, fmt, ##args)
+#define netif_err(priv, type, dev, fmt, args...) \
+ netif_level(err, priv, type, dev, fmt, ##args)
+#define netif_warn(priv, type, dev, fmt, args...) \
+ netif_level(warn, priv, type, dev, fmt, ##args)
+#define netif_notice(priv, type, dev, fmt, args...) \
+ netif_level(notice, priv, type, dev, fmt, ##args)
+#define netif_info(priv, type, dev, fmt, args...) \
+ netif_level(info, priv, type, dev, fmt, ##args)
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+#define netif_dbg(priv, type, netdev, format, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ dynamic_netdev_dbg(netdev, format, ##args); \
+} while (0)
+#elif defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
+#else
+#define netif_dbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+/* if @cond then downgrade to debug, else print at @level */
+#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
+ do { \
+ if (cond) \
+ netif_dbg(priv, type, netdev, fmt, ##args); \
+ else \
+ netif_ ## level(priv, type, netdev, fmt, ##args); \
+ } while (0)
+
+#if defined(VERBOSE_DEBUG)
+#define netif_vdbg netif_dbg
+#else
+#define netif_vdbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+
+#if defined(CONFIG_DEBUG_NET)
+#define DEBUG_NET_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#else
+#define DEBUG_NET_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#endif
+
+#endif /* _LINUX_NET_DEBUG_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 854d39ef1ca3..8c3587d5c308 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -22,17 +22,22 @@
#include <net/netns/nexthop.h>
#include <net/netns/ieee802154_6lowpan.h>
#include <net/netns/sctp.h>
-#include <net/netns/dccp.h>
#include <net/netns/netfilter.h>
-#include <net/netns/x_tables.h>
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netns/conntrack.h>
#endif
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+#include <net/netns/flow_table.h>
+#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
#include <net/netns/mpls.h>
#include <net/netns/can.h>
#include <net/netns/xdp.h>
+#include <net/netns/smc.h>
+#include <net/netns/bpf.h>
+#include <net/netns/mctp.h>
+#include <net/net_trackers.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
#include <linux/skbuff.h>
@@ -59,12 +64,9 @@ struct net {
refcount_t passive; /* To decide when the network
* namespace should be freed.
*/
- refcount_t count; /* To decided when the network
- * namespace should be shut down.
- */
spinlock_t rules_mod_lock;
- unsigned int dev_unreg_count;
+ atomic_t dev_unreg_count;
unsigned int dev_base_seq; /* protected by rtnl_mutex */
int ifindex;
@@ -89,6 +91,7 @@ struct net {
struct idr netns_ids;
struct ns_common ns;
+ struct ref_tracker_dir refcnt_tracker;
struct list_head dev_base_head;
struct proc_dir_entry *proc_net;
@@ -120,7 +123,9 @@ struct net {
struct netns_core core;
struct netns_mib mib;
struct netns_packet packet;
+#if IS_ENABLED(CONFIG_UNIX)
struct netns_unix unx;
+#endif
struct netns_nexthop nexthop;
struct netns_ipv4 ipv4;
#if IS_ENABLED(CONFIG_IPV6)
@@ -132,29 +137,16 @@ struct net {
#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
struct netns_sctp sctp;
#endif
-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
- struct netns_dccp dccp;
-#endif
#ifdef CONFIG_NETFILTER
struct netns_nf nf;
- struct netns_xt xt;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct netns_ct ct;
#endif
#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
struct netns_nftables nft;
#endif
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
- struct netns_nf_frag nf_frag;
- struct ctl_table_header *nf_frag_frags_hdr;
-#endif
- struct sock *nfnl;
- struct sock *nfnl_stash;
-#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
- struct list_head nfnl_acct_list;
-#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
- struct list_head nfct_timeout_list;
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ struct netns_ft ft;
#endif
#endif
#ifdef CONFIG_WEXT_CORE
@@ -162,12 +154,16 @@ struct net {
#endif
struct net_generic __rcu *gen;
- struct bpf_prog __rcu *flow_dissector_prog;
+ /* Used to store attached BPF programs */
+ struct netns_bpf bpf;
/* Note : following structs are cache line aligned */
#ifdef CONFIG_XFRM
struct netns_xfrm xfrm;
#endif
+
+ u64 net_cookie; /* written once */
+
#if IS_ENABLED(CONFIG_IP_VS)
struct netns_ipvs *ipvs;
#endif
@@ -180,10 +176,16 @@ struct net {
#ifdef CONFIG_XDP_SOCKETS
struct netns_xdp xdp;
#endif
+#if IS_ENABLED(CONFIG_MCTP)
+ struct netns_mctp mctp;
+#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
struct sock *crypto_nlsk;
#endif
struct sock *diag_nlsk;
+#if IS_ENABLED(CONFIG_SMC)
+ struct netns_smc smc;
+#endif
} __randomize_layout;
#include <linux/seq_file_net.h>
@@ -198,6 +200,9 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
void net_ns_barrier(void);
+
+struct ns_common *get_net_ns(struct ns_common *ns);
+struct net *get_net_ns_by_fd(int fd);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
#include <linux/nsproxy.h>
@@ -217,13 +222,22 @@ static inline void net_ns_get_ownership(const struct net *net,
}
static inline void net_ns_barrier(void) {}
+
+static inline struct ns_common *get_net_ns(struct ns_common *ns)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct net *get_net_ns_by_fd(int fd)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_NET_NS */
extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int fd);
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
@@ -236,9 +250,10 @@ void ipx_unregister_sysctl(void);
#ifdef CONFIG_NET_NS
void __put_net(struct net *net);
+/* Try using get_net_track() instead */
static inline struct net *get_net(struct net *net)
{
- refcount_inc(&net->count);
+ refcount_inc(&net->ns.count);
return net;
}
@@ -249,14 +264,15 @@ static inline struct net *maybe_get_net(struct net *net)
* exists. If the reference count is zero this
* function fails and returns NULL.
*/
- if (!refcount_inc_not_zero(&net->count))
+ if (!refcount_inc_not_zero(&net->ns.count))
net = NULL;
return net;
}
+/* Try using put_net_track() instead */
static inline void put_net(struct net *net)
{
- if (refcount_dec_and_test(&net->count))
+ if (refcount_dec_and_test(&net->ns.count))
__put_net(net);
}
@@ -268,7 +284,7 @@ int net_eq(const struct net *net1, const struct net *net2)
static inline int check_net(const struct net *net)
{
- return refcount_read(&net->count) != 0;
+ return refcount_read(&net->ns.count) != 0;
}
void net_drop_ns(void *);
@@ -304,6 +320,36 @@ static inline int check_net(const struct net *net)
#endif
+static inline void netns_tracker_alloc(struct net *net,
+ netns_tracker *tracker, gfp_t gfp)
+{
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+ ref_tracker_alloc(&net->refcnt_tracker, tracker, gfp);
+#endif
+}
+
+static inline void netns_tracker_free(struct net *net,
+ netns_tracker *tracker)
+{
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+ ref_tracker_free(&net->refcnt_tracker, tracker);
+#endif
+}
+
+static inline struct net *get_net_track(struct net *net,
+ netns_tracker *tracker, gfp_t gfp)
+{
+ get_net(net);
+ netns_tracker_alloc(net, tracker, gfp);
+ return net;
+}
+
+static inline void put_net_track(struct net *net, netns_tracker *tracker)
+{
+ netns_tracker_free(net, tracker);
+ put_net(net);
+}
+
typedef struct {
#ifdef CONFIG_NET_NS
struct net *net;
@@ -408,7 +454,6 @@ int register_pernet_device(struct pernet_operations *);
void unregister_pernet_device(struct pernet_operations *);
struct ctl_table;
-struct ctl_table_header;
#ifdef CONFIG_SYSCTL
int net_sysctl_init(void);
@@ -432,6 +477,13 @@ static inline int rt_genid_ipv4(const struct net *net)
return atomic_read(&net->ipv4.rt_genid);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int rt_genid_ipv6(const struct net *net)
+{
+ return atomic_read(&net->ipv6.fib6_sernum);
+}
+#endif
+
static inline void rt_genid_bump_ipv4(struct net *net)
{
atomic_inc(&net->ipv4.rt_genid);
@@ -469,4 +521,10 @@ static inline void fnhe_genid_bump(struct net *net)
atomic_inc(&net->fnhe_genid);
}
+#ifdef CONFIG_NET
+void net_ns_init(void);
+#else
+static inline void net_ns_init(void) {}
+#endif
+
#endif /* __NET_NET_NAMESPACE_H */
diff --git a/include/net/net_trackers.h b/include/net/net_trackers.h
new file mode 100644
index 000000000000..d94c76cf15a9
--- /dev/null
+++ b/include/net/net_trackers.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_NET_TRACKERS_H
+#define __NET_NET_TRACKERS_H
+#include <linux/ref_tracker.h>
+
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+typedef struct ref_tracker *netdevice_tracker;
+#else
+typedef struct {} netdevice_tracker;
+#endif
+
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+typedef struct ref_tracker *netns_tracker;
+#else
+typedef struct {} netns_tracker;
+#endif
+
+#endif /* __NET_NET_TRACKERS_H */
diff --git a/include/net/netevent.h b/include/net/netevent.h
index 4107016c3bb4..1be3757a8b7f 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -14,6 +14,7 @@
struct dst_entry;
struct neighbour;
+struct notifier_block ;
struct netevent_redirect {
struct dst_entry *old;
diff --git a/include/net/netfilter/ipv4/nf_defrag_ipv4.h b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
index bcbd724cc048..7fda9ce9f694 100644
--- a/include/net/netfilter/ipv4/nf_defrag_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
@@ -3,6 +3,7 @@
#define _NF_DEFRAG_IPV4_H
struct net;
-int nf_defrag_ipv4_enable(struct net *);
+int nf_defrag_ipv4_enable(struct net *net);
+void nf_defrag_ipv4_disable(struct net *net);
#endif /* _NF_DEFRAG_IPV4_H */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 40e0e0623f46..c653fcb88354 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -8,8 +8,8 @@
#include <net/netfilter/nf_reject.h>
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
-void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook);
-
+void nf_send_reset(struct net *net, struct sock *, struct sk_buff *oldskb,
+ int hook);
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook);
struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
@@ -18,4 +18,14 @@ struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth);
+struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code);
+struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook);
+
+
#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 7b3c873f8839..e95483192d1b 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -4,7 +4,4 @@
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
-#include <linux/sysctl.h>
-extern struct ctl_table nf_ct_ipv6_sysctl_table[];
-
#endif /* _NF_CONNTRACK_IPV6_H*/
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 6d31cd041143..ceadf8ba25a4 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -5,7 +5,8 @@
#include <linux/skbuff.h>
#include <linux/types.h>
-int nf_defrag_ipv6_enable(struct net *);
+int nf_defrag_ipv6_enable(struct net *net);
+void nf_defrag_ipv6_disable(struct net *net);
int nf_ct_frag6_init(void);
void nf_ct_frag6_cleanup(void);
@@ -13,4 +14,9 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
struct inet_frags_ctl;
+struct nft_ct_frag6_pernet {
+ struct ctl_table_header *nf_frag_frags_hdr;
+ struct fqdir *fqdir;
+};
+
#endif /* _NF_DEFRAG_IPV6_H */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 4a3ef9ebdf6f..d729344ba644 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -7,9 +7,8 @@
void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
unsigned int hooknum);
-
-void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
-
+void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ int hook);
const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *otcph,
unsigned int *otcplen, int hook);
@@ -20,4 +19,13 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
const struct tcphdr *oth, unsigned int otcplen);
+struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook);
+struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code);
+
#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 9f551f3b69c6..6a2019aaa464 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -43,10 +43,27 @@ union nf_conntrack_expect_proto {
/* insert expect proto private data here */
};
+struct nf_conntrack_net_ecache {
+ struct delayed_work dwork;
+ spinlock_t dying_lock;
+ struct hlist_nulls_head dying_list;
+};
+
struct nf_conntrack_net {
+ /* only used when new connection is allocated: */
+ atomic_t count;
+ unsigned int expect_count;
+
+ /* only used from work queues, configuration plane, and so on: */
unsigned int users4;
unsigned int users6;
unsigned int users_bridge;
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_header;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ struct nf_conntrack_net_ecache ecache;
+#endif
};
#include <linux/types.h>
@@ -62,6 +79,8 @@ struct nf_conn {
* Hint, SKB address this struct and refcnt via skb->_nfct and
* helpers nf_conntrack_get() and nf_conntrack_put().
* Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
+ * except that the latter uses internal indirection and does not
+ * result in a conntrack module dependency.
* beware nf_ct_get() is different and don't inc refcnt.
*/
struct nf_conntrack ct_general;
@@ -80,14 +99,13 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
- u16 cpu;
possible_net_t ct_net;
#if IS_ENABLED(CONFIG_NF_NAT)
struct hlist_node nat_bysource;
#endif
/* all members below initialized via memset */
- u8 __nfct_init_offset[0];
+ struct { } __nfct_init_offset;
/* If we were expected by an expectation, this will be it */
struct nf_conn *master;
@@ -155,11 +173,13 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
return (struct nf_conn *)(nfct & NFCT_PTRMASK);
}
+void nf_ct_destroy(struct nf_conntrack *nfct);
+
/* decrement reference count on a conntrack */
static inline void nf_ct_put(struct nf_conn *ct)
{
- WARN_ON(!ct);
- nf_conntrack_put(&ct->ct_general);
+ if (ct && refcount_dec_and_test(&ct->ct_general.use))
+ nf_ct_destroy(&ct->ct_general);
}
/* Protocol module loading */
@@ -214,13 +234,16 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
return nf_ct_delete(ct, 0, 0);
}
-/* Set all unconfirmed conntrack as dying */
-void nf_ct_unconfirmed_destroy(struct net *);
+struct nf_ct_iter_data {
+ struct net *net;
+ void *data;
+ u32 portid;
+ int report;
+};
/* Iterate over all conntracks: if iter returns true, it's deleted. */
-void nf_ct_iterate_cleanup_net(struct net *net,
- int (*iter)(struct nf_conn *i, void *data),
- void *data, u32 portid, int report);
+void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
+ const struct nf_ct_iter_data *iter_data);
/* also set unconfirmed conntracks as dying. Only use in module exit path. */
void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
@@ -262,14 +285,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
/* jiffies until ct expires, 0 if already expired */
static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
{
- s32 timeout = ct->timeout - nfct_time_stamp;
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
- return timeout > 0 ? timeout : 0;
+ return max(timeout, 0);
}
static inline bool nf_ct_is_expired(const struct nf_conn *ct)
{
- return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+ return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
}
/* use after obtaining a reference count */
@@ -279,6 +302,18 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
!nf_ct_is_dying(ct);
}
+#define NF_CT_DAY (86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static inline void nf_ct_offload_timeout(struct nf_conn *ct)
+{
+ if (nf_ct_expires(ct) < NF_CT_DAY / 2)
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
+}
+
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
@@ -286,7 +321,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize);
extern struct hlist_nulls_head *nf_conntrack_hash;
extern unsigned int nf_conntrack_htable_size;
-extern seqcount_t nf_conntrack_generation;
+extern seqcount_spinlock_t nf_conntrack_generation;
extern unsigned int nf_conntrack_max;
/* must be called with rcu read lock held */
@@ -312,6 +347,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
void nf_ct_tmpl_free(struct nf_conn *tmpl);
u32 nf_ct_get_id(const struct nf_conn *ct);
+u32 nf_conntrack_count(const struct net *net);
static inline void
nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
@@ -319,6 +355,13 @@ nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
skb_set_nfct(skb, (unsigned long)ct | info);
}
+extern unsigned int nf_conntrack_net_id;
+
+static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net)
+{
+ return net_generic(net, nf_conntrack_net_id);
+}
+
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index f7a060c6eb28..4b2b7f8914ea 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -65,9 +65,19 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
#endif
}
+void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
+ unsigned int bytes);
+
+static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir,
+ unsigned int bytes)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_ct_acct_add(ct, dir, 1, bytes);
+#endif
+}
+
void nf_conntrack_acct_pernet_init(struct net *net);
-int nf_conntrack_acct_init(void);
void nf_conntrack_acct_fini(void);
#endif /* _NF_CONNTRACK_ACCT_H */
diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
new file mode 100644
index 000000000000..078d3c52c03f
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_act_ct.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NF_CONNTRACK_ACT_CT_H
+#define _NF_CONNTRACK_ACT_CT_H
+
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_act_ct_ext {
+ int ifindex[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ return nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+
+ if (act_ct)
+ return act_ct;
+
+ act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
+ return act_ct;
+#else
+ return NULL;
+#endif
+}
+
+static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct_ext;
+
+ act_ct_ext = nf_conn_act_ct_ext_find(ct);
+ if (dev_net(skb->dev) == &init_net && act_ct_ext)
+ act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+#endif
+}
+
+#endif /* _NF_CONNTRACK_ACT_CT_H */
diff --git a/include/net/netfilter/nf_conntrack_bpf.h b/include/net/netfilter/nf_conntrack_bpf.h
new file mode 100644
index 000000000000..2d0da478c8e0
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_bpf.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NF_CONNTRACK_BPF_H
+#define _NF_CONNTRACK_BPF_H
+
+#include <linux/kconfig.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct nf_conn___init {
+ struct nf_conn ct;
+};
+
+#if (IS_BUILTIN(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+
+extern int register_nf_conntrack_bpf(void);
+extern void cleanup_nf_conntrack_bpf(void);
+
+#else
+
+static inline int register_nf_conntrack_bpf(void)
+{
+ return 0;
+}
+
+static inline void cleanup_nf_conntrack_bpf(void)
+{
+}
+
+#endif
+
+#if (IS_BUILTIN(CONFIG_NF_NAT) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_NAT) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+
+extern int register_nf_nat_bpf(void);
+
+#else
+
+static inline int register_nf_nat_bpf(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _NF_CONNTRACK_BPF_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 09f2efea0b97..b2b9de70d9f4 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -30,7 +30,6 @@ void nf_conntrack_cleanup_net(struct net *net);
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
void nf_conntrack_proto_pernet_init(struct net *net);
-void nf_conntrack_proto_pernet_fini(struct net *net);
int nf_conntrack_proto_init(void);
void nf_conntrack_proto_fini(void);
@@ -59,9 +58,14 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
int ret = NF_ACCEPT;
if (ct) {
- if (!nf_ct_is_confirmed(ct))
+ if (!nf_ct_is_confirmed(ct)) {
ret = __nf_conntrack_confirm(skb);
- if (likely(ret == NF_ACCEPT))
+
+ if (ret == NF_ACCEPT)
+ ct = (struct nf_conn *)skb_nfct(skb);
+ }
+
+ if (ret == NF_ACCEPT && nf_ct_ecache_exist(ct))
nf_ct_deliver_cached_events(ct);
}
return ret;
@@ -80,4 +84,17 @@ void nf_conntrack_lock(spinlock_t *lock);
extern spinlock_t nf_conntrack_expect_lock;
+/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */
+
+static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout)
+{
+ if (timeout > INT_MAX)
+ timeout = INT_MAX;
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
+}
+
+int __nf_ct_change_timeout(struct nf_conn *ct, u64 cta_timeout);
+void __nf_ct_change_status(struct nf_conn *ct, unsigned long on, unsigned long off);
+int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status);
+
#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 9645b47fa7e4..e227d997fc71 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -10,6 +10,7 @@ struct nf_conncount_data;
struct nf_conncount_list {
spinlock_t list_lock;
+ u32 last_gc; /* jiffies at most recent gc */
struct list_head head; /* connections with the same filtering key */
unsigned int count; /* length of list */
};
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index eb81f9195e28..0c1dac318e02 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -14,17 +14,15 @@
#include <net/netfilter/nf_conntrack_extend.h>
enum nf_ct_ecache_state {
- NFCT_ECACHE_UNKNOWN, /* destroy event not sent */
NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */
};
struct nf_conntrack_ecache {
unsigned long cache; /* bitops want long */
- u16 missed; /* missed events */
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
- enum nf_ct_ecache_state state:8;/* ecache state */
+ u32 missed; /* missed events */
u32 portid; /* netlink portid of destroyer */
};
@@ -38,28 +36,12 @@ nf_ct_ecache_find(const struct nf_conn *ct)
#endif
}
-static inline struct nf_conntrack_ecache *
-nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
+static inline bool nf_ct_ecache_exist(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct net *net = nf_ct_net(ct);
- struct nf_conntrack_ecache *e;
-
- if (!ctmask && !expmask && net->ct.sysctl_events) {
- ctmask = ~0;
- expmask = ~0;
- }
- if (!ctmask && !expmask)
- return NULL;
-
- e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
- if (e) {
- e->ctmask = ctmask;
- e->expmask = expmask;
- }
- return e;
+ return nf_ct_ext_exist(ct, NF_CT_EXT_ECACHE);
#else
- return NULL;
+ return false;
#endif
}
@@ -72,19 +54,26 @@ struct nf_ct_event {
int report;
};
+struct nf_exp_event {
+ struct nf_conntrack_expect *exp;
+ u32 portid;
+ int report;
+};
+
struct nf_ct_event_notifier {
- int (*fcn)(unsigned int events, struct nf_ct_event *item);
+ int (*ct_event)(unsigned int events, const struct nf_ct_event *item);
+ int (*exp_event)(unsigned int events, const struct nf_exp_event *item);
};
-int nf_conntrack_register_notifier(struct net *net,
- struct nf_ct_event_notifier *nb);
-void nf_conntrack_unregister_notifier(struct net *net,
- struct nf_ct_event_notifier *nb);
+void nf_conntrack_register_notifier(struct net *net,
+ const struct nf_ct_event_notifier *nb);
+void nf_conntrack_unregister_notifier(struct net *net);
void nf_ct_deliver_cached_events(struct nf_conn *ct);
int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
u32 portid, int report);
+bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp);
#else
static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct)
@@ -99,6 +88,10 @@ static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
return 0;
}
+static inline bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
+{
+ return false;
+}
#endif
static inline void
@@ -124,59 +117,38 @@ nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
u32 portid, int report)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- const struct net *net = nf_ct_net(ct);
-
- if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
- return 0;
-
- return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
-#else
- return 0;
+ if (nf_ct_ecache_exist(ct))
+ return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
#endif
+ return 0;
}
static inline int
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- const struct net *net = nf_ct_net(ct);
-
- if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
- return 0;
-
- return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
-#else
- return 0;
+ if (nf_ct_ecache_exist(ct))
+ return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
#endif
+ return 0;
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
-
-struct nf_exp_event {
- struct nf_conntrack_expect *exp;
- u32 portid;
- int report;
-};
-
-struct nf_exp_event_notifier {
- int (*fcn)(unsigned int events, struct nf_exp_event *item);
-};
-
-int nf_ct_expect_register_notifier(struct net *net,
- struct nf_exp_event_notifier *nb);
-void nf_ct_expect_unregister_notifier(struct net *net,
- struct nf_exp_event_notifier *nb);
-
void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
struct nf_conntrack_expect *exp,
u32 portid, int report);
+void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state);
+
void nf_conntrack_ecache_pernet_init(struct net *net);
void nf_conntrack_ecache_pernet_fini(struct net *net);
-int nf_conntrack_ecache_init(void);
-void nf_conntrack_ecache_fini(void);
+struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net);
+static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net)
+{
+ return net->ct.ecache_dwork_pending;
+}
#else /* CONFIG_NF_CONNTRACK_EVENTS */
static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
@@ -186,43 +158,18 @@ static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
{
}
-static inline void nf_conntrack_ecache_pernet_init(struct net *net)
+static inline void nf_conntrack_ecache_work(struct net *net,
+ enum nf_ct_ecache_state s)
{
}
-static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
+static inline void nf_conntrack_ecache_pernet_init(struct net *net)
{
}
-static inline int nf_conntrack_ecache_init(void)
-{
- return 0;
-}
-
-static inline void nf_conntrack_ecache_fini(void)
+static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
{
}
-
+static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net) { return false; }
#endif /* CONFIG_NF_CONNTRACK_EVENTS */
-
-static inline void nf_conntrack_ecache_delayed_work(struct net *net)
-{
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- if (!delayed_work_pending(&net->ct.ecache_dwork)) {
- schedule_delayed_work(&net->ct.ecache_dwork, HZ);
- net->ct.ecache_dwork_pending = true;
- }
-#endif
-}
-
-static inline void nf_conntrack_ecache_work(struct net *net)
-{
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- if (net->ct.ecache_dwork_pending) {
- net->ct.ecache_dwork_pending = false;
- mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0);
- }
-#endif
-}
-
#endif /*_NF_CONNTRACK_ECACHE_H*/
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 5ae5295aa46d..0b247248b032 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -28,24 +28,18 @@ enum nf_ct_ext_id {
#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
NF_CT_EXT_SYNPROXY,
#endif
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ NF_CT_EXT_ACT_CT,
+#endif
NF_CT_EXT_NUM,
};
-#define NF_CT_EXT_HELPER_TYPE struct nf_conn_help
-#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
-#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj
-#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct
-#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
-#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
-#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
-#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
-#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
-
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
u8 offset[NF_CT_EXT_NUM];
u8 len;
- char data[0];
+ unsigned int gen_id;
+ char data[] __aligned(8);
};
static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id)
@@ -58,33 +52,28 @@ static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
return (ct->ext && __nf_ct_ext_exist(ct->ext, id));
}
-static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
+void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id);
+
+static inline void *nf_ct_ext_find(const struct nf_conn *ct, u8 id)
{
- if (!nf_ct_ext_exist(ct, id))
+ struct nf_ct_ext *ext = ct->ext;
+
+ if (!ext || !__nf_ct_ext_exist(ext, id))
return NULL;
+ if (unlikely(ext->gen_id))
+ return __nf_ct_ext_find(ext, id);
+
return (void *)ct->ext + ct->ext->offset[id];
}
-#define nf_ct_ext_find(ext, id) \
- ((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
-
-/* Destroy all relationships */
-void nf_ct_ext_destroy(struct nf_conn *ct);
/* Add this type, returns pointer to data or NULL. */
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
-struct nf_ct_ext_type {
- /* Destroys relationships (can be NULL). */
- void (*destroy)(struct nf_conn *ct);
-
- enum nf_ct_ext_id id;
-
- /* Length and min alignment. */
- u8 len;
- u8 align;
-};
+/* ext genid. if ext->id != ext_genid, extensions cannot be used
+ * anymore unless conntrack has CONFIRMED bit set.
+ */
+extern atomic_t nf_conntrack_ext_genid;
+void nf_ct_ext_bump_genid(void);
-int nf_ct_extend_register(const struct nf_ct_ext_type *type);
-void nf_ct_extend_unregister(const struct nf_ct_ext_type *type);
#endif /* _NF_CONNTRACK_EXTEND_H */
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 37f0fbefb060..9939c366f720 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -177,4 +177,5 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat);
int nf_nat_helper_try_module_get(const char *name, u16 l3num,
u8 protonum);
void nf_nat_helper_put(struct nf_conntrack_helper *helper);
+void nf_ct_set_auto_assign_helper_warned(struct net *net);
#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 4cad1f0a327a..1f47bef51722 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -32,7 +32,7 @@ struct nf_conntrack_l4proto {
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
- struct nf_conn *ct);
+ struct nf_conn *ct, bool destroy);
/* convert nfnetlink attributes to protoinfo */
int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
@@ -42,7 +42,8 @@ struct nf_conntrack_l4proto {
/* Calculate tuple nlattr size */
unsigned int (*nlattr_tuple_size)(void);
int (*nlattr_to_tuple)(struct nlattr *tb[],
- struct nf_conntrack_tuple *t);
+ struct nf_conntrack_tuple *t,
+ u_int32_t flags);
const struct nla_policy *nla_policy;
struct {
@@ -152,27 +153,32 @@ const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto);
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple);
int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
- struct nf_conntrack_tuple *t);
+ struct nf_conntrack_tuple *t,
+ u_int32_t flags);
unsigned int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
-__printf(3, 4) __cold
+__printf(4, 5) __cold
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
+ const struct nf_hook_state *state,
const char *fmt, ...);
-__printf(5, 6) __cold
+__printf(4, 5) __cold
void nf_l4proto_log_invalid(const struct sk_buff *skb,
- struct net *net,
- u16 pf, u8 protonum,
+ const struct nf_hook_state *state,
+ u8 protonum,
const char *fmt, ...);
#else
-static inline __printf(5, 6) __cold
-void nf_l4proto_log_invalid(const struct sk_buff *skb, struct net *net,
- u16 pf, u8 protonum, const char *fmt, ...) {}
-static inline __printf(3, 4) __cold
+static inline __printf(4, 5) __cold
+void nf_l4proto_log_invalid(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ u8 protonum,
+ const char *fmt, ...) {}
+static inline __printf(4, 5) __cold
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
+ const struct nf_hook_state *state,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
@@ -201,6 +207,20 @@ static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.icmpv6;
}
+
+/* Caller must check nf_ct_protonum(ct) is IPPROTO_TCP before calling. */
+static inline void nf_ct_set_tcp_be_liberal(struct nf_conn *ct)
+{
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+}
+
+/* Caller must check nf_ct_protonum(ct) is IPPROTO_TCP before calling. */
+static inline bool nf_conntrack_tcp_established(const struct nf_conn *ct)
+{
+ return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
+ test_bit(IPS_ASSURED_BIT, &ct->status);
+}
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index ba916411c4e1..66bab6c60d12 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -17,10 +17,18 @@ struct nf_conn_labels {
unsigned long bits[NF_CT_LABELS_MAX_SIZE / sizeof(long)];
};
+/* Can't use nf_ct_ext_find(), flow dissector cannot use symbols
+ * exported by nf_conntrack module.
+ */
static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_LABELS
- return nf_ct_ext_find(ct, NF_CT_EXT_LABELS);
+ struct nf_ct_ext *ext = ct->ext;
+
+ if (!ext || !__nf_ct_ext_exist(ext, NF_CT_EXT_LABELS))
+ return NULL;
+
+ return (void *)ct->ext + ct->ext->offset[NF_CT_EXT_LABELS];
#else
return NULL;
#endif
@@ -45,12 +53,9 @@ int nf_connlabels_replace(struct nf_conn *ct,
#ifdef CONFIG_NF_CONNTRACK_LABELS
int nf_conntrack_labels_init(void);
-void nf_conntrack_labels_fini(void);
int nf_connlabels_get(struct net *net, unsigned int bit);
void nf_connlabels_put(struct net *net);
#else
-static inline int nf_conntrack_labels_init(void) { return 0; }
-static inline void nf_conntrack_labels_fini(void) {}
static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; }
static inline void nf_connlabels_put(struct net *net) {}
#endif
diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h
index 0a10b50537ae..883c414b768e 100644
--- a/include/net/netfilter/nf_conntrack_seqadj.h
+++ b/include/net/netfilter/nf_conntrack_seqadj.h
@@ -42,7 +42,4 @@ int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, unsigned int protoff);
s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq);
-int nf_conntrack_seqadj_init(void);
-void nf_conntrack_seqadj_fini(void);
-
#endif /* _NF_CONNTRACK_SEQADJ_H */
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 6dd72396f534..9fdaba911de6 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -14,15 +14,7 @@
struct nf_ct_timeout {
__u16 l3num;
const struct nf_conntrack_l4proto *l4proto;
- char data[0];
-};
-
-struct ctnl_timeout {
- struct list_head head;
- struct rcu_head rcu_head;
- refcount_t refcnt;
- char name[CTNL_TIMEOUT_NAME_MAX];
- struct nf_ct_timeout timeout;
+ char data[];
};
struct nf_conn_timeout {
@@ -89,23 +81,11 @@ static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-int nf_conntrack_timeout_init(void);
-void nf_conntrack_timeout_fini(void);
void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout);
int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, u8 l3num, u8 l4num,
const char *timeout_name);
void nf_ct_destroy_timeout(struct nf_conn *ct);
#else
-static inline int nf_conntrack_timeout_init(void)
-{
- return 0;
-}
-
-static inline void nf_conntrack_timeout_fini(void)
-{
- return;
-}
-
static inline int nf_ct_set_timeout(struct net *net, struct nf_conn *ct,
u8 l3num, u8 l4num,
const char *timeout_name)
@@ -120,8 +100,12 @@ static inline void nf_ct_destroy_timeout(struct nf_conn *ct)
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
-extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
+struct nf_ct_timeout_hooks {
+ struct nf_ct_timeout *(*timeout_find_get)(struct net *net, const char *name);
+ void (*timeout_put)(struct nf_ct_timeout *timeout);
+};
+
+extern const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook;
#endif
#endif /* _NF_CONNTRACK_TIMEOUT_H */
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
index 820ea34b6029..57138d974a9f 100644
--- a/include/net/netfilter/nf_conntrack_timestamp.h
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -40,21 +40,8 @@ struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
void nf_conntrack_tstamp_pernet_init(struct net *net);
-
-int nf_conntrack_tstamp_init(void);
-void nf_conntrack_tstamp_fini(void);
#else
static inline void nf_conntrack_tstamp_pernet_init(struct net *net) {}
-
-static inline int nf_conntrack_tstamp_init(void)
-{
- return 0;
-}
-
-static inline void nf_conntrack_tstamp_fini(void)
-{
- return;
-}
#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
#endif /* _NF_CONNTRACK_TSTAMP_H */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index e0f709d9d547..cd982f4a0f50 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -10,12 +10,45 @@
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/flow_offload.h>
#include <net/dst.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
struct nf_flowtable;
struct nf_flow_rule;
struct flow_offload;
enum flow_offload_tuple_dir;
+struct nf_flow_key {
+ struct flow_dissector_key_meta meta;
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_control enc_control;
+ struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_keyid enc_key_id;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
+ struct flow_dissector_key_tcp tcp;
+ struct flow_dissector_key_ports tp;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct nf_flow_match {
+ struct flow_dissector dissector;
+ struct nf_flow_key key;
+ struct nf_flow_key mask;
+};
+
+struct nf_flow_rule {
+ struct nf_flow_match match;
+ struct flow_rule *rule;
+};
+
struct nf_flowtable_type {
struct list_head list;
int family;
@@ -33,7 +66,8 @@ struct nf_flowtable_type {
};
enum nf_flowtable_flags {
- NF_FLOWTABLE_HW_OFFLOAD = 0x1,
+ NF_FLOWTABLE_HW_OFFLOAD = 0x1, /* NFT_FLOWTABLE_HW_OFFLOAD */
+ NF_FLOWTABLE_COUNTER = 0x2, /* NFT_FLOWTABLE_COUNTER */
};
struct nf_flowtable {
@@ -44,6 +78,7 @@ struct nf_flowtable {
struct delayed_work gc_work;
unsigned int flags;
struct flow_block flow_block;
+ struct rw_semaphore flow_block_lock; /* Guards flow_block */
possible_net_t net;
};
@@ -55,9 +90,19 @@ static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
enum flow_offload_tuple_dir {
FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
- FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
+};
+#define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX
+
+enum flow_offload_xmit_type {
+ FLOW_OFFLOAD_XMIT_UNSPEC = 0,
+ FLOW_OFFLOAD_XMIT_NEIGH,
+ FLOW_OFFLOAD_XMIT_XFRM,
+ FLOW_OFFLOAD_XMIT_DIRECT,
+ FLOW_OFFLOAD_XMIT_TC,
};
+#define NF_FLOW_TABLE_ENCAP_MAX 2
+
struct flow_offload_tuple {
union {
struct in_addr src_v4;
@@ -76,11 +121,34 @@ struct flow_offload_tuple {
u8 l3proto;
u8 l4proto;
- u8 dir;
+ struct {
+ u16 id;
+ __be16 proto;
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
- u16 mtu;
+ /* All members above are keys for lookups, see flow_offload_hash(). */
+ struct { } __hash;
- struct dst_entry *dst_cache;
+ u8 dir:2,
+ xmit_type:3,
+ encap_num:2,
+ in_vlan_ingress:2;
+ u16 mtu;
+ union {
+ struct {
+ struct dst_entry *dst_cache;
+ u32 dst_cookie;
+ };
+ struct {
+ u32 ifidx;
+ u32 hw_ifidx;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+ } out;
+ struct {
+ u32 iifidx;
+ } tc;
+ };
};
struct flow_offload_tuple_rhash {
@@ -95,7 +163,7 @@ enum nf_flow_flags {
NF_FLOW_HW,
NF_FLOW_HW_DYING,
NF_FLOW_HW_DEAD,
- NF_FLOW_HW_REFRESH,
+ NF_FLOW_HW_PENDING,
};
enum flow_offload_type {
@@ -115,6 +183,8 @@ struct flow_offload {
#define NF_FLOW_TIMEOUT (30 * HZ)
#define nf_flowtable_time_stamp (u32)jiffies
+unsigned long flow_offload_get_timeout(struct flow_offload *flow);
+
static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
{
return (__s32)(timeout - nf_flowtable_time_stamp);
@@ -122,19 +192,87 @@ static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
struct nf_flow_route {
struct {
- struct dst_entry *dst;
+ struct dst_entry *dst;
+ struct {
+ u32 ifindex;
+ struct {
+ u16 id;
+ __be16 proto;
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
+ u8 num_encaps:2,
+ ingress_vlans:2;
+ } in;
+ struct {
+ u32 ifindex;
+ u32 hw_ifindex;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+ } out;
+ enum flow_offload_xmit_type xmit_type;
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
void flow_offload_free(struct flow_offload *flow);
+static inline int
+nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+ int err = 0;
+
+ down_write(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto unlock;
+ }
+
+ list_add_tail(&block_cb->list, &block->cb_list);
+
+unlock:
+ up_write(&flow_table->flow_block_lock);
+ return err;
+}
+
+static inline void
+nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+
+ down_write(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb) {
+ list_del(&block_cb->list);
+ flow_block_cb_free(block_cb);
+ } else {
+ WARN_ON(true);
+ }
+ up_write(&flow_table->flow_block_lock);
+}
+
int flow_offload_route_init(struct flow_offload *flow,
const struct nf_flow_route *route);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+void flow_offload_refresh(struct nf_flowtable *flow_table,
+ struct flow_offload *flow);
+
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
+void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
+void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
+ struct net_device *dev);
void nf_flow_table_cleanup(struct net_device *dev);
int nf_flow_table_init(struct nf_flowtable *flow_table);
@@ -142,12 +280,12 @@ void nf_flow_table_free(struct nf_flowtable *flow_table);
void flow_offload_teardown(struct flow_offload *flow);
-int nf_flow_snat_port(const struct flow_offload *flow,
- struct sk_buff *skb, unsigned int thoff,
- u8 protocol, enum flow_offload_tuple_dir dir);
-int nf_flow_dnat_port(const struct flow_offload *flow,
- struct sk_buff *skb, unsigned int thoff,
- u8 protocol, enum flow_offload_tuple_dir dir);
+void nf_flow_snat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
+void nf_flow_dnat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
struct flow_ports {
__be16 source, dest;
@@ -169,6 +307,8 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
struct flow_offload *flow);
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
+void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
+
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd);
@@ -182,4 +322,41 @@ int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
int nf_flow_table_offload_init(void);
void nf_flow_table_offload_exit(void);
+static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+{
+ __be16 proto;
+
+ proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+ sizeof(struct pppoe_hdr)));
+ switch (proto) {
+ case htons(PPP_IP):
+ return htons(ETH_P_IP);
+ case htons(PPP_IPV6):
+ return htons(ETH_P_IPV6);
+ }
+
+ return 0;
+}
+
+#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
+#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
+#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
+ this_cpu_inc((net)->ft.stat->count)
+#define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count) \
+ this_cpu_dec((net)->ft.stat->count)
+
+#ifdef CONFIG_NF_FLOW_TABLE_PROCFS
+int nf_flow_table_init_proc(struct net *net);
+void nf_flow_table_fini_proc(struct net *net);
+#else
+static inline int nf_flow_table_init_proc(struct net *net)
+{
+ return 0;
+}
+
+static inline void nf_flow_table_fini_proc(struct net *net)
+{
+}
+#endif /* CONFIG_NF_FLOW_TABLE_PROCFS */
+
#endif /* _NF_FLOW_TABLE_H */
diff --git a/include/net/netfilter/nf_hooks_lwtunnel.h b/include/net/netfilter/nf_hooks_lwtunnel.h
new file mode 100644
index 000000000000..52e27920f829
--- /dev/null
+++ b/include/net/netfilter/nf_hooks_lwtunnel.h
@@ -0,0 +1,7 @@
+#include <linux/sysctl.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_SYSCTL
+int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+#endif
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 0d3920896d50..e55eedc84ed7 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -68,7 +68,6 @@ void nf_log_unbind_pf(struct net *net, u_int8_t pf);
int nf_logger_find_get(int pf, enum nf_log_type type);
void nf_logger_put(int pf, enum nf_log_type type);
-void nf_logger_request_module(int pf, enum nf_log_type type);
#define MODULE_ALIAS_NF_LOGGER(family, type) \
MODULE_ALIAS("nf-logger-" __stringify(family) "-" __stringify(type))
@@ -99,27 +98,4 @@ struct nf_log_buf;
struct nf_log_buf *nf_log_buf_open(void);
__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...);
void nf_log_buf_close(struct nf_log_buf *m);
-
-/* common logging functions */
-int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
- u8 proto, int fragment, unsigned int offset);
-int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
- u8 proto, int fragment, unsigned int offset,
- unsigned int logflags);
-void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
- struct sock *sk);
-void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
- unsigned int hooknum, const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct nf_loginfo *loginfo,
- const char *prefix);
-void nf_log_l2packet(struct net *net, u_int8_t pf,
- __be16 protocol,
- unsigned int hooknum,
- const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct nf_loginfo *loginfo, const char *prefix);
-
#endif /* _NF_LOG_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 0d412dd63707..e9eb01e99d2f 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -104,9 +104,7 @@ unsigned int
nf_nat_inet_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
-int nf_xfrm_me_harder(struct net *n, struct sk_buff *s, unsigned int family);
-
-static inline int nf_nat_initialized(struct nf_conn *ct,
+static inline int nf_nat_initialized(const struct nf_conn *ct,
enum nf_nat_manip_type manip)
{
if (manip == NF_NAT_MANIP_SRC)
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index efae84646353..44c421b9be85 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -38,4 +38,5 @@ bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
* to port ct->master->saved_proto. */
void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
+u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port);
#endif
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 47088083667b..c81021ab07aa 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -14,7 +14,10 @@ struct nf_queue_entry {
struct sk_buff *skb;
unsigned int id;
unsigned int hook_index; /* index in hook_entries->hook[] */
-
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct net_device *physin;
+ struct net_device *physout;
+#endif
struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */
@@ -30,17 +33,17 @@ struct nf_queue_handler {
void (*nf_hook_drop)(struct net *net);
};
-void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
-void nf_unregister_queue_handler(struct net *net);
+void nf_register_queue_handler(const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(void);
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
-void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+void nf_queue_entry_free(struct nf_queue_entry *entry);
static inline void init_hashrandom(u32 *jhash_initval)
{
while (*jhash_initval == 0)
- *jhash_initval = prandom_u32();
+ *jhash_initval = get_random_u32();
}
static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h
index 9051c3a0c8e7..7c669792fb9c 100644
--- a/include/net/netfilter/nf_reject.h
+++ b/include/net/netfilter/nf_reject.h
@@ -5,12 +5,28 @@
#include <linux/types.h>
#include <uapi/linux/in.h>
-static inline bool nf_reject_verify_csum(__u8 proto)
+static inline bool nf_reject_verify_csum(struct sk_buff *skb, int dataoff,
+ __u8 proto)
{
/* Skip protocols that don't use 16-bit one's complement checksum
* of the entire payload.
*/
switch (proto) {
+ /* Protocols with optional checksums. */
+ case IPPROTO_UDP: {
+ const struct udphdr *udp_hdr;
+ struct udphdr _udp_hdr;
+
+ udp_hdr = skb_header_pointer(skb, dataoff,
+ sizeof(_udp_hdr),
+ &_udp_hdr);
+ if (!udp_hdr || udp_hdr->check)
+ return true;
+
+ return false;
+ }
+ case IPPROTO_GRE:
+
/* Protocols with other integrity checks. */
case IPPROTO_AH:
case IPPROTO_ESP:
@@ -19,9 +35,6 @@ static inline bool nf_reject_verify_csum(__u8 proto)
/* Protocols with partial checksums. */
case IPPROTO_UDPLITE:
case IPPROTO_DCCP:
-
- /* Protocols with optional checksums. */
- case IPPROTO_GRE:
return false;
}
return true;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 4170c033d461..cdb7db9b0e25 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -13,42 +13,62 @@
#include <net/netfilter/nf_flow_table.h>
#include <net/netlink.h>
#include <net/flow_offload.h>
+#include <net/netns/generic.h>
+
+#define NFT_MAX_HOOKS (NF_INET_INGRESS + 1)
struct module;
#define NFT_JUMP_STACK_SIZE 16
+enum {
+ NFT_PKTINFO_L4PROTO = (1 << 0),
+ NFT_PKTINFO_INNER = (1 << 1),
+};
+
struct nft_pktinfo {
struct sk_buff *skb;
- bool tprot_set;
+ const struct nf_hook_state *state;
+ u8 flags;
u8 tprot;
- /* for x_tables compatibility */
- struct xt_action_param xt;
+ u16 fragoff;
+ unsigned int thoff;
+ unsigned int inneroff;
};
+static inline struct sock *nft_sk(const struct nft_pktinfo *pkt)
+{
+ return pkt->state->sk;
+}
+
+static inline unsigned int nft_thoff(const struct nft_pktinfo *pkt)
+{
+ return pkt->thoff;
+}
+
static inline struct net *nft_net(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->net;
+ return pkt->state->net;
}
static inline unsigned int nft_hook(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->hook;
+ return pkt->state->hook;
}
static inline u8 nft_pf(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->pf;
+ return pkt->state->pf;
}
static inline const struct net_device *nft_in(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->in;
+ return pkt->state->in;
}
static inline const struct net_device *nft_out(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->out;
+ return pkt->state->out;
}
static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
@@ -56,16 +76,15 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
const struct nf_hook_state *state)
{
pkt->skb = skb;
- pkt->xt.state = state;
+ pkt->state = state;
}
-static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt)
{
- pkt->tprot_set = false;
+ pkt->flags = 0;
pkt->tprot = 0;
- pkt->xt.thoff = 0;
- pkt->xt.fragoff = 0;
+ pkt->thoff = 0;
+ pkt->fragoff = 0;
}
/**
@@ -86,6 +105,8 @@ struct nft_data {
};
} __attribute__((aligned(__alignof__(u64))));
+#define NFT_REG32_NUM 20
+
/**
* struct nft_regs - nf_tables register set
*
@@ -96,11 +117,22 @@ struct nft_data {
*/
struct nft_regs {
union {
- u32 data[20];
+ u32 data[NFT_REG32_NUM];
struct nft_verdict verdict;
};
};
+struct nft_regs_track {
+ struct {
+ const struct nft_expr *selector;
+ const struct nft_expr *bitwise;
+ u8 num_reg;
+ } regs[NFT_REG32_NUM];
+
+ const struct nft_expr *cur;
+ const struct nft_expr *last;
+};
+
/* Store/load an u8, u16 or u64 integer to/from the u32 data register.
*
* Note, when using concatenations, register allocation happens at 32-bit
@@ -125,11 +157,26 @@ static inline void nft_reg_store16(u32 *dreg, u16 val)
*(u16 *)dreg = val;
}
+static inline void nft_reg_store_be16(u32 *dreg, __be16 val)
+{
+ nft_reg_store16(dreg, (__force __u16)val);
+}
+
static inline u16 nft_reg_load16(const u32 *sreg)
{
return *(u16 *)sreg;
}
+static inline __be16 nft_reg_load_be16(const u32 *sreg)
+{
+ return (__force __be16)nft_reg_load16(sreg);
+}
+
+static inline __be32 nft_reg_load_be32(const u32 *sreg)
+{
+ return *(__force __be32 *)sreg;
+}
+
static inline void nft_reg_store64(u32 *dreg, u64 val)
{
put_unaligned(val, (u64 *)dreg);
@@ -143,16 +190,11 @@ static inline u64 nft_reg_load64(const u32 *sreg)
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
unsigned int len)
{
+ if (len % NFT_REG32_SIZE)
+ dst[len / NFT_REG32_SIZE] = 0;
memcpy(dst, src, len);
}
-static inline void nft_data_debug(const struct nft_data *data)
-{
- pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n",
- data->data[0], data->data[1],
- data->data[2], data->data[3]);
-}
-
/**
* struct nft_ctx - nf_tables rule/set context
*
@@ -179,13 +221,18 @@ struct nft_ctx {
bool report;
};
+enum nft_data_desc_flags {
+ NFT_DATA_DESC_SETELEM = (1 << 0),
+};
+
struct nft_data_desc {
enum nft_data_types type;
+ unsigned int size;
unsigned int len;
+ unsigned int flags;
};
-int nft_data_init(const struct nft_ctx *ctx,
- struct nft_data *data, unsigned int size,
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
struct nft_data_desc *desc, const struct nlattr *nla);
void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
void nft_data_release(const struct nft_data *data, enum nft_data_types type);
@@ -203,14 +250,13 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
}
int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
-unsigned int nft_parse_register(const struct nlattr *attr);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
-int nft_validate_register_load(enum nft_registers reg, unsigned int len);
-int nft_validate_register_store(const struct nft_ctx *ctx,
- enum nft_registers reg,
- const struct nft_data *data,
- enum nft_data_types type, unsigned int len);
+int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
+int nft_parse_register_store(const struct nft_ctx *ctx,
+ const struct nlattr *attr, u8 *dreg,
+ const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
/**
* struct nft_userdata - user defined data associated with an object
@@ -224,7 +270,7 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
*/
struct nft_userdata {
u8 len;
- unsigned char data[0];
+ unsigned char data[];
};
/**
@@ -243,6 +289,10 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} key_end;
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } data;
void *priv;
};
@@ -266,6 +316,7 @@ struct nft_set_iter {
* @size: number of set elements
* @field_len: length of each field in concatenation, bytes
* @field_count: number of concatenated fields in element
+ * @expr: set must support for expressions
*/
struct nft_set_desc {
unsigned int klen;
@@ -273,6 +324,7 @@ struct nft_set_desc {
unsigned int size;
u8 field_len[NFT_REG32_COUNT];
u8 field_count;
+ bool expr;
};
/**
@@ -302,8 +354,35 @@ struct nft_set_estimate {
enum nft_set_class space;
};
+#define NFT_EXPR_MAXATTR 16
+#define NFT_EXPR_SIZE(size) (sizeof(struct nft_expr) + \
+ ALIGN(size, __alignof__(struct nft_expr)))
+
+/**
+ * struct nft_expr - nf_tables expression
+ *
+ * @ops: expression ops
+ * @data: expression private data
+ */
+struct nft_expr {
+ const struct nft_expr_ops *ops;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_expr_priv(const struct nft_expr *expr)
+{
+ return (void *)expr->data;
+}
+
+int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src);
+void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
+int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
+ const struct nft_expr *expr);
+bool nft_expr_reduce_bitwise(struct nft_regs_track *track,
+ const struct nft_expr *expr);
+
struct nft_set_ext;
-struct nft_expr;
/**
* struct nft_set_ops - nf_tables set operations
@@ -385,20 +464,29 @@ struct nft_set_ops {
* struct nft_set_type - nf_tables set type
*
* @ops: set ops for this type
- * @list: used internally
- * @owner: module reference
* @features: features supported by the implementation
*/
struct nft_set_type {
const struct nft_set_ops ops;
- struct list_head list;
- struct module *owner;
u32 features;
};
#define to_set_type(o) container_of(o, struct nft_set_type, ops)
-int nft_register_set(struct nft_set_type *type);
-void nft_unregister_set(struct nft_set_type *type);
+struct nft_set_elem_expr {
+ u8 size;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+#define nft_setelem_expr_at(__elem_expr, __offset) \
+ ((struct nft_expr *)&__elem_expr->data[__offset])
+
+#define nft_setelem_expr_foreach(__expr, __elem_expr, __size) \
+ for (__expr = nft_setelem_expr_at(__elem_expr, 0), __size = 0; \
+ __size < (__elem_expr)->size; \
+ __size += (__expr)->ops->size, __expr = ((void *)(__expr)) + (__expr)->ops->size)
+
+#define NFT_SET_EXPR_MAX 2
/**
* struct nft_set - nf_tables set instance
@@ -423,6 +511,7 @@ void nft_unregister_set(struct nft_set_type *type);
* @policy: set parameterization (see enum nft_set_policies)
* @udlen: user data length
* @udata: user data
+ * @expr: stateful expression
* @ops: set ops
* @flags: set flags
* @genmask: generation mask
@@ -457,6 +546,9 @@ struct nft_set {
genmask:2;
u8 klen;
u8 dlen;
+ u8 num_exprs;
+ struct nft_expr *exprs[NFT_SET_EXPR_MAX];
+ struct list_head catchall_list;
unsigned char data[]
__attribute__((aligned(__alignof__(u64))));
};
@@ -482,6 +574,10 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
const struct nlattr *nla_set_id,
u8 genmask);
+struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
+ const struct nft_set *set);
+void *nft_set_catchall_gc(const struct nft_set *set);
+
static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
{
return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ;
@@ -521,7 +617,7 @@ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
* @NFT_SET_EXT_TIMEOUT: element timeout
* @NFT_SET_EXT_EXPIRATION: element expiration time
* @NFT_SET_EXT_USERDATA: user data associated with the element
- * @NFT_SET_EXT_EXPR: expression assiociated with the element
+ * @NFT_SET_EXT_EXPRESSIONS: expressions assiciated with the element
* @NFT_SET_EXT_OBJREF: stateful object reference associated with element
* @NFT_SET_EXT_NUM: number of extension types
*/
@@ -533,7 +629,7 @@ enum nft_set_extensions {
NFT_SET_EXT_TIMEOUT,
NFT_SET_EXT_EXPIRATION,
NFT_SET_EXT_USERDATA,
- NFT_SET_EXT_EXPR,
+ NFT_SET_EXT_EXPRESSIONS,
NFT_SET_EXT_OBJREF,
NFT_SET_EXT_NUM
};
@@ -560,6 +656,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
struct nft_set_ext_tmpl {
u16 len;
u8 offset[NFT_SET_EXT_NUM];
+ u8 ext_len[NFT_SET_EXT_NUM];
};
/**
@@ -572,7 +669,7 @@ struct nft_set_ext_tmpl {
struct nft_set_ext {
u8 genmask;
u8 offset[NFT_SET_EXT_NUM];
- char data[0];
+ char data[];
};
static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
@@ -581,18 +678,23 @@ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
tmpl->len = sizeof(struct nft_set_ext);
}
-static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
- unsigned int len)
+static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
+ unsigned int len)
{
tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align);
- BUG_ON(tmpl->len > U8_MAX);
+ if (tmpl->len > U8_MAX)
+ return -EINVAL;
+
tmpl->offset[id] = tmpl->len;
- tmpl->len += nft_set_ext_types[id].len + len;
+ tmpl->ext_len[id] = nft_set_ext_types[id].len + len;
+ tmpl->len += tmpl->ext_len[id];
+
+ return 0;
}
-static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
+static inline int nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
{
- nft_set_ext_add_length(tmpl, id, 0);
+ return nft_set_ext_add_length(tmpl, id, 0);
}
static inline void nft_set_ext_init(struct nft_set_ext *ext,
@@ -651,9 +753,9 @@ static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext
return nft_set_ext(ext, NFT_SET_EXT_USERDATA);
}
-static inline struct nft_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
+static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
{
- return nft_set_ext(ext, NFT_SET_EXT_EXPR);
+ return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS);
}
static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
@@ -673,10 +775,16 @@ static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext)
return nft_set_ext(ext, NFT_SET_EXT_OBJREF);
}
+struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nlattr *attr);
+
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *key_end, const u32 *data,
u64 timeout, u64 expiration, gfp_t gfp);
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_expr *expr_array[]);
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr);
@@ -792,7 +900,6 @@ struct nft_offload_ctx;
* @validate: validate expression, called during loop detection
* @data: extra data to attach to this expression operation
*/
-struct nft_expr;
struct nft_expr_ops {
void (*eval)(const struct nft_expr *expr,
struct nft_regs *regs,
@@ -818,43 +925,20 @@ struct nft_expr_ops {
int (*validate)(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data);
+ bool (*reduce)(struct nft_regs_track *track,
+ const struct nft_expr *expr);
bool (*gc)(struct net *net,
const struct nft_expr *expr);
int (*offload)(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr);
- u32 offload_flags;
+ bool (*offload_action)(const struct nft_expr *expr);
+ void (*offload_stats)(struct nft_expr *expr,
+ const struct flow_stats *stats);
const struct nft_expr_type *type;
void *data;
};
-#define NFT_EXPR_MAXATTR 16
-#define NFT_EXPR_SIZE(size) (sizeof(struct nft_expr) + \
- ALIGN(size, __alignof__(struct nft_expr)))
-
-/**
- * struct nft_expr - nf_tables expression
- *
- * @ops: expression ops
- * @data: expression private data
- */
-struct nft_expr {
- const struct nft_expr_ops *ops;
- unsigned char data[]
- __attribute__((aligned(__alignof__(u64))));
-};
-
-static inline void *nft_expr_priv(const struct nft_expr *expr)
-{
- return (void *)expr->data;
-}
-
-struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
- const struct nlattr *nla);
-void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
-int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
- const struct nft_expr *expr);
-
/**
* struct nft_rule - nf_tables rule
*
@@ -890,11 +974,37 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
return (struct nft_expr *)&rule->data[rule->dlen];
}
+static inline bool nft_expr_more(const struct nft_rule *rule,
+ const struct nft_expr *expr)
+{
+ return expr != nft_expr_last(rule) && expr->ops;
+}
+
static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
{
return (void *)&rule->data[rule->dlen];
}
+void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule);
+
+static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_set_elem_expr *elem_expr;
+ struct nft_expr *expr;
+ u32 size;
+
+ if (__nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS)) {
+ elem_expr = nft_set_ext_expr(ext);
+ nft_setelem_expr_foreach(expr, elem_expr, size) {
+ expr->ops->eval(expr, regs, pkt);
+ if (regs->verdict.code == NFT_BREAK)
+ return;
+ }
+ }
+}
+
/*
* The last pointer isn't really necessary, but the compiler isn't able to
* determine that the result of nft_expr_last() is always the same since it
@@ -905,12 +1015,21 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
(expr) != (last); \
(expr) = nft_expr_next(expr))
-enum nft_chain_flags {
- NFT_BASE_CHAIN = 0x1,
- NFT_CHAIN_HW_OFFLOAD = 0x2,
+#define NFT_CHAIN_POLICY_UNSET U8_MAX
+
+struct nft_rule_dp {
+ u64 is_last:1,
+ dlen:12,
+ handle:42; /* for tracing */
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_expr))));
};
-#define NFT_CHAIN_POLICY_UNSET U8_MAX
+struct nft_rule_blob {
+ unsigned long size;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_rule_dp))));
+};
/**
* struct nft_chain - nf_tables chain
@@ -925,20 +1044,23 @@ enum nft_chain_flags {
* @name: name of the chain
*/
struct nft_chain {
- struct nft_rule *__rcu *rules_gen_0;
- struct nft_rule *__rcu *rules_gen_1;
+ struct nft_rule_blob __rcu *blob_gen_0;
+ struct nft_rule_blob __rcu *blob_gen_1;
struct list_head rules;
struct list_head list;
struct rhlist_head rhlhead;
struct nft_table *table;
u64 handle;
u32 use;
- u8 flags:6,
+ u8 flags:5,
+ bound:1,
genmask:2;
char *name;
+ u16 udlen;
+ u8 *udata;
/* Only used during control plane commit phase: */
- struct nft_rule **rules_next;
+ struct nft_rule_blob *blob_next;
};
int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
@@ -968,7 +1090,7 @@ struct nft_chain_type {
int family;
struct module *owner;
unsigned int hook_mask;
- nf_hookfn *hooks[NF_MAX_HOOKS];
+ nf_hookfn *hooks[NFT_MAX_HOOKS];
int (*ops_register)(struct net *net, const struct nf_hook_ops *ops);
void (*ops_unregister)(struct net *net, const struct nf_hook_ops *ops);
};
@@ -978,6 +1100,14 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
int nft_chain_validate_hooks(const struct nft_chain *chain,
unsigned int hook_flags);
+static inline bool nft_chain_is_bound(struct nft_chain *chain)
+{
+ return (chain->flags & NFT_CHAIN_BINDING) && chain->bound;
+}
+
+void nft_chain_del(struct nft_chain *chain);
+void nf_tables_chain_destroy(struct nft_ctx *ctx);
+
struct nft_stats {
u64 bytes;
u64 pkts;
@@ -1019,7 +1149,7 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai
static inline bool nft_is_base_chain(const struct nft_chain *chain)
{
- return chain->flags & NFT_BASE_CHAIN;
+ return chain->flags & NFT_CHAIN_BASE;
}
int __nft_release_basechain(struct nft_ctx *ctx);
@@ -1056,9 +1186,23 @@ struct nft_table {
u16 family:6,
flags:8,
genmask:2;
+ u32 nlpid;
char *name;
+ u16 udlen;
+ u8 *udata;
};
+static inline bool nft_table_has_owner(const struct nft_table *table)
+{
+ return table->flags & NFT_TABLE_F_OWNER;
+}
+
+static inline bool nft_base_chain_netdev(int family, u32 hooknum)
+{
+ return family == NFPROTO_NETDEV ||
+ (family == NFPROTO_INET && hooknum == NF_INET_INGRESS);
+}
+
void nft_register_chain_type(const struct nft_chain_type *);
void nft_unregister_chain_type(const struct nft_chain_type *);
@@ -1098,6 +1242,8 @@ struct nft_object {
u32 genmask:2,
use:30;
u64 handle;
+ u16 udlen;
+ u8 *udata;
/* runtime data below here */
const struct nft_object_ops *ops ____cacheline_aligned;
unsigned char data[]
@@ -1118,7 +1264,7 @@ struct nft_object *nft_obj_lookup(const struct net *net,
void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq,
- int event, int family, int report, gfp_t gfp);
+ int event, u16 flags, int family, int report, gfp_t gfp);
/**
* struct nft_object_type - stateful object type
@@ -1218,24 +1364,28 @@ void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
/**
* struct nft_traceinfo - nft tracing information and state
*
+ * @trace: other struct members are initialised
+ * @nf_trace: copy of skb->nf_trace before rule evaluation
+ * @type: event type (enum nft_trace_types)
+ * @skbid: hash of skb to be used as trace id
+ * @packet_dumped: packet headers sent in a previous traceinfo message
* @pkt: pktinfo currently processed
* @basechain: base chain currently processed
* @chain: chain currently processed
* @rule: rule that was evaluated
* @verdict: verdict given by rule
- * @type: event type (enum nft_trace_types)
- * @packet_dumped: packet headers sent in a previous traceinfo message
- * @trace: other struct members are initialised
*/
struct nft_traceinfo {
+ bool trace;
+ bool nf_trace;
+ bool packet_dumped;
+ enum nft_trace_types type:8;
+ u32 skbid;
const struct nft_pktinfo *pkt;
const struct nft_base_chain *basechain;
const struct nft_chain *chain;
- const struct nft_rule *rule;
+ const struct nft_rule_dp *rule;
const struct nft_verdict *verdict;
- enum nft_trace_types type;
- bool packet_dumped;
- bool trace;
};
void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
@@ -1253,9 +1403,6 @@ void nft_trace_notify(struct nft_traceinfo *info);
#define MODULE_ALIAS_NFT_EXPR(name) \
MODULE_ALIAS("nft-expr-" name)
-#define MODULE_ALIAS_NFT_SET() \
- MODULE_ALIAS("nft-set")
-
#define MODULE_ALIAS_NFT_OBJ(type) \
MODULE_ALIAS("nft-obj-" __stringify(type))
@@ -1385,7 +1532,7 @@ struct nft_trans {
int msg_type;
bool put_net;
struct nft_ctx ctx;
- char data[0];
+ char data[];
};
struct nft_trans_rule {
@@ -1419,6 +1566,7 @@ struct nft_trans_chain {
char *name;
struct nft_stats __percpu *stats;
u8 policy;
+ u32 chain_id;
};
#define nft_trans_chain_update(trans) \
@@ -1429,16 +1577,15 @@ struct nft_trans_chain {
(((struct nft_trans_chain *)trans->data)->stats)
#define nft_trans_chain_policy(trans) \
(((struct nft_trans_chain *)trans->data)->policy)
+#define nft_trans_chain_id(trans) \
+ (((struct nft_trans_chain *)trans->data)->chain_id)
struct nft_trans_table {
bool update;
- bool enable;
};
#define nft_trans_table_update(trans) \
(((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_enable(trans) \
- (((struct nft_trans_table *)trans->data)->enable)
struct nft_trans_elem {
struct nft_set *set;
@@ -1468,14 +1615,74 @@ struct nft_trans_obj {
struct nft_trans_flowtable {
struct nft_flowtable *flowtable;
+ bool update;
+ struct list_head hook_list;
+ u32 flags;
};
#define nft_trans_flowtable(trans) \
(((struct nft_trans_flowtable *)trans->data)->flowtable)
+#define nft_trans_flowtable_update(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->update)
+#define nft_trans_flowtable_hooks(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->hook_list)
+#define nft_trans_flowtable_flags(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->flags)
int __init nft_chain_filter_init(void);
void nft_chain_filter_fini(void);
void __init nft_chain_route_init(void);
void nft_chain_route_fini(void);
+
+void nf_tables_trans_destroy_flush_work(void);
+
+int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
+__be64 nf_jiffies64_to_msecs(u64 input);
+
+#ifdef CONFIG_MODULES
+__printf(2, 3) int nft_request_module(struct net *net, const char *fmt, ...);
+#else
+static inline int nft_request_module(struct net *net, const char *fmt, ...) { return -ENOENT; }
+#endif
+
+struct nftables_pernet {
+ struct list_head tables;
+ struct list_head commit_list;
+ struct list_head module_list;
+ struct list_head notify_list;
+ struct mutex commit_mutex;
+ u64 table_handle;
+ unsigned int base_seq;
+ u8 validate_state;
+};
+
+extern unsigned int nf_tables_net_id;
+
+static inline struct nftables_pernet *nft_pernet(const struct net *net)
+{
+ return net_generic(net, nf_tables_net_id);
+}
+
+#define __NFT_REDUCE_READONLY 1UL
+#define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY
+
+static inline bool nft_reduce_is_readonly(const struct nft_expr *expr)
+{
+ return expr->ops->reduce == NFT_REDUCE_READONLY;
+}
+
+void nft_reg_track_update(struct nft_regs_track *track,
+ const struct nft_expr *expr, u8 dreg, u8 len);
+void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len);
+void __nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg);
+
+static inline bool nft_reg_track_cmp(struct nft_regs_track *track,
+ const struct nft_expr *expr, u8 dreg)
+{
+ return track->regs[dreg].selector &&
+ track->regs[dreg].selector->ops == expr->ops &&
+ track->regs[dreg].num_reg == 0;
+}
+
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 29e7e1021267..1223af68cd9a 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -3,9 +3,11 @@
#define _NET_NF_TABLES_CORE_H
#include <net/netfilter/nf_tables.h>
+#include <linux/indirect_call_wrapper.h>
extern struct nft_expr_type nft_imm_type;
extern struct nft_expr_type nft_cmp_type;
+extern struct nft_expr_type nft_counter_type;
extern struct nft_expr_type nft_lookup_type;
extern struct nft_expr_type nft_bitwise_type;
extern struct nft_expr_type nft_byteorder_type;
@@ -15,50 +17,60 @@ extern struct nft_expr_type nft_range_type;
extern struct nft_expr_type nft_meta_type;
extern struct nft_expr_type nft_rt_type;
extern struct nft_expr_type nft_exthdr_type;
+extern struct nft_expr_type nft_last_type;
#ifdef CONFIG_NETWORK_SECMARK
extern struct nft_object_type nft_secmark_obj_type;
#endif
+extern struct nft_object_type nft_counter_obj_type;
int nf_tables_core_module_init(void);
void nf_tables_core_module_exit(void);
+struct nft_bitwise_fast_expr {
+ u32 mask;
+ u32 xor;
+ u8 sreg;
+ u8 dreg;
+};
+
struct nft_cmp_fast_expr {
u32 data;
- enum nft_registers sreg:8;
+ u32 mask;
+ u8 sreg;
+ u8 len;
+ bool inv;
+};
+
+struct nft_cmp16_fast_expr {
+ struct nft_data data;
+ struct nft_data mask;
+ u8 sreg;
u8 len;
+ bool inv;
};
struct nft_immediate_expr {
struct nft_data data;
- enum nft_registers dreg:8;
+ u8 dreg;
u8 dlen;
};
-/* Calculate the mask for the nft_cmp_fast expression. On big endian the
- * mask needs to include the *upper* bytes when interpreting that data as
- * something smaller than the full u32, therefore a cpu_to_le32 is done.
- */
-static inline u32 nft_cmp_fast_mask(unsigned int len)
-{
- return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
- data) * BITS_PER_BYTE - len));
-}
-
extern const struct nft_expr_ops nft_cmp_fast_ops;
+extern const struct nft_expr_ops nft_cmp16_fast_ops;
struct nft_payload {
enum nft_payload_bases base:8;
u8 offset;
u8 len;
- enum nft_registers dreg:8;
+ u8 dreg;
};
struct nft_payload_set {
enum nft_payload_bases base:8;
u8 offset;
u8 len;
- enum nft_registers sreg:8;
+ u8 sreg;
u8 csum_type;
u8 csum_offset;
u8 csum_flags;
@@ -66,15 +78,50 @@ struct nft_payload_set {
extern const struct nft_expr_ops nft_payload_fast_ops;
+extern const struct nft_expr_ops nft_bitwise_fast_ops;
+
extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
-extern struct nft_set_type nft_set_rhash_type;
-extern struct nft_set_type nft_set_hash_type;
-extern struct nft_set_type nft_set_hash_fast_type;
-extern struct nft_set_type nft_set_rbtree_type;
-extern struct nft_set_type nft_set_bitmap_type;
-extern struct nft_set_type nft_set_pipapo_type;
+extern const struct nft_set_type nft_set_rhash_type;
+extern const struct nft_set_type nft_set_hash_type;
+extern const struct nft_set_type nft_set_hash_fast_type;
+extern const struct nft_set_type nft_set_rbtree_type;
+extern const struct nft_set_type nft_set_bitmap_type;
+extern const struct nft_set_type nft_set_pipapo_type;
+extern const struct nft_set_type nft_set_pipapo_avx2_type;
+
+#ifdef CONFIG_RETPOLINE
+bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_hash_lookup_fast(const struct net *net,
+ const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+#else
+static inline bool
+nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+{
+ return set->ops->lookup(net, set, key, ext);
+}
+#endif
+
+/* called from nft_pipapo_avx2.c */
+bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+/* called from nft_set_pipapo.c */
+bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+
+void nft_counter_init_seqcount(void);
struct nft_expr;
struct nft_regs;
@@ -99,4 +146,6 @@ void nft_dynset_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
void nft_rt_get_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_counter_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index ed7b511f0a59..c4a6147b0ef8 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -5,26 +5,24 @@
#include <net/netfilter/nf_tables.h>
#include <net/ip.h>
-static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
{
struct iphdr *ip;
ip = ip_hdr(pkt->skb);
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = ip->protocol;
- pkt->xt.thoff = ip_hdrlen(pkt->skb);
- pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+ pkt->thoff = ip_hdrlen(pkt->skb);
+ pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
-static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
{
struct iphdr *iph, _iph;
u32 len, thoff;
- iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
- &_iph);
+ iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*iph), &_iph);
if (!iph)
return -1;
@@ -33,24 +31,56 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
len = ntohs(iph->tot_len);
thoff = iph->ihl * 4;
- if (skb->len < len)
+ if (pkt->skb->len < len)
return -1;
else if (len < thoff)
return -1;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+ pkt->thoff = thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
return 0;
}
-static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
{
- if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
- nft_set_pktinfo_unspec(pkt, skb);
+ if (__nft_set_pktinfo_ipv4_validate(pkt) < 0)
+ nft_set_pktinfo_unspec(pkt);
+}
+
+static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
+{
+ struct iphdr *iph;
+ u32 len, thoff;
+
+ if (!pskb_may_pull(pkt->skb, sizeof(*iph)))
+ return -1;
+
+ iph = ip_hdr(pkt->skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ goto inhdr_error;
+
+ len = ntohs(iph->tot_len);
+ thoff = iph->ihl * 4;
+ if (pkt->skb->len < len) {
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ } else if (len < thoff) {
+ goto inhdr_error;
+ }
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = iph->protocol;
+ pkt->thoff = thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+
+inhdr_error:
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INHDRERRORS);
+ return -1;
}
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index d0f1c537b017..ec7eaeaf4f04 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -6,8 +6,7 @@
#include <net/ipv6.h>
#include <net/netfilter/nf_tables.h>
-static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt)
{
unsigned int flags = IP6_FH_F_AUTH;
int protohdr, thoff = 0;
@@ -15,18 +14,17 @@ static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0) {
- nft_set_pktinfo_unspec(pkt, skb);
+ nft_set_pktinfo_unspec(pkt);
return;
}
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = frag_off;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
}
-static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
{
#if IS_ENABLED(CONFIG_IPV6)
unsigned int flags = IP6_FH_F_AUTH;
@@ -36,8 +34,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
int protohdr;
u32 pkt_len;
- ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
- &_ip6h);
+ ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*ip6h), &_ip6h);
if (!ip6h)
return -1;
@@ -45,17 +43,17 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
return -1;
pkt_len = ntohs(ip6h->payload_len);
- if (pkt_len + sizeof(*ip6h) > skb->len)
+ if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
return -1;
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0)
return -1;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = frag_off;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
return 0;
#else
@@ -63,11 +61,55 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
#endif
}
-static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
{
- if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
- nft_set_pktinfo_unspec(pkt, skb);
+ if (__nft_set_pktinfo_ipv6_validate(pkt) < 0)
+ nft_set_pktinfo_unspec(pkt);
+}
+
+static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned int flags = IP6_FH_F_AUTH;
+ unsigned short frag_off;
+ unsigned int thoff = 0;
+ struct inet6_dev *idev;
+ struct ipv6hdr *ip6h;
+ int protohdr;
+ u32 pkt_len;
+
+ if (!pskb_may_pull(pkt->skb, sizeof(*ip6h)))
+ return -1;
+
+ ip6h = ipv6_hdr(pkt->skb);
+ if (ip6h->version != 6)
+ goto inhdr_error;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > pkt->skb->len) {
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ }
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+ if (protohdr < 0)
+ goto inhdr_error;
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = protohdr;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
+
+ return 0;
+
+inhdr_error:
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INHDRERRORS);
+ return -1;
+#else
+ return -1;
+#endif
}
#endif
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index ea7d1d78b92d..3568b6a2f5f0 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -4,11 +4,16 @@
#include <net/flow_offload.h>
#include <net/netfilter/nf_tables.h>
+enum nft_offload_reg_flags {
+ NFT_OFFLOAD_F_NETWORK2HOST = (1 << 0),
+};
+
struct nft_offload_reg {
u32 key;
u32 len;
u32 base_offset;
u32 offset;
+ u32 flags;
struct nft_data data;
struct nft_data mask;
};
@@ -37,6 +42,7 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
struct nft_flow_key {
struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_control control;
union {
struct flow_dissector_key_ipv4_addrs ipv4;
struct flow_dissector_key_ipv6_addrs ipv6;
@@ -44,6 +50,7 @@ struct nft_flow_key {
struct flow_dissector_key_ports tp;
struct flow_dissector_key_ip ip;
struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
struct flow_dissector_key_eth_addrs eth_addrs;
struct flow_dissector_key_meta meta;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
@@ -60,23 +67,32 @@ struct nft_flow_rule {
struct flow_rule *rule;
};
-#define NFT_OFFLOAD_F_ACTION (1 << 0)
+void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
+ enum flow_dissector_key_id addr_type);
struct nft_rule;
struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule);
+int nft_flow_rule_stats(const struct nft_chain *chain, const struct nft_rule *rule);
void nft_flow_rule_destroy(struct nft_flow_rule *flow);
int nft_flow_rule_offload_commit(struct net *net);
-#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
+#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
(__reg)->base_offset = \
offsetof(struct nft_flow_key, __base); \
(__reg)->offset = \
offsetof(struct nft_flow_key, __base.__field); \
(__reg)->len = __len; \
(__reg)->key = __key; \
+ (__reg)->flags = __flags;
+
+#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
+ NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
+
+#define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg) \
+ NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
memset(&(__reg)->mask, 0xff, (__reg)->len);
-int nft_chain_offload_priority(struct nft_base_chain *basechain);
+bool nft_chain_offload_support(const struct nft_base_chain *basechain);
int nft_offload_init(void);
void nft_offload_exit(void);
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 628b6fa579cd..eed099eae672 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -5,7 +5,7 @@
#include <net/netfilter/nf_tables.h>
struct nft_fib {
- enum nft_registers dreg:8;
+ u8 dreg;
u8 result;
u32 flags;
};
@@ -37,4 +37,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
void nft_fib_store_result(void *reg, const struct nft_fib *priv,
const struct net_device *dev);
+
+bool nft_fib_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr);
#endif
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index 07e2fd507963..9b51cc67de54 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -6,9 +6,10 @@
struct nft_meta {
enum nft_meta_keys key:8;
+ u8 len;
union {
- enum nft_registers dreg:8;
- enum nft_registers sreg:8;
+ u8 dreg;
+ u8 sreg;
};
};
@@ -43,4 +44,6 @@ int nft_meta_set_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data);
+bool nft_meta_get_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr);
#endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 832ab69efda5..4c3809e141f4 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -6,7 +6,7 @@
struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
spinlock_t lock;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 56c365dc6dc7..6bfa972f2fbf 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -142,7 +142,7 @@
* Attribute Misc:
* nla_memcpy(dest, nla, count) copy attribute into memory
* nla_memcmp(nla, data, size) compare attribute with memory area
- * nla_strlcpy(dst, nla, size) copy attribute to a sized string
+ * nla_strscpy(dst, nla, size) copy attribute to a sized string
* nla_strcmp(nla, str) compare attribute with string
*
* Attribute Parsing:
@@ -181,19 +181,29 @@ enum {
NLA_S64,
NLA_BITFIELD32,
NLA_REJECT,
- NLA_EXACT_LEN,
- NLA_EXACT_LEN_WARN,
- NLA_MIN_LEN,
+ NLA_BE16,
+ NLA_BE32,
__NLA_TYPE_MAX,
};
#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
+struct netlink_range_validation {
+ u64 min, max;
+};
+
+struct netlink_range_validation_signed {
+ s64 min, max;
+};
+
enum nla_policy_validation {
NLA_VALIDATE_NONE,
NLA_VALIDATE_RANGE,
+ NLA_VALIDATE_RANGE_WARN_TOO_LONG,
NLA_VALIDATE_MIN,
NLA_VALIDATE_MAX,
+ NLA_VALIDATE_MASK,
+ NLA_VALIDATE_RANGE_PTR,
NLA_VALIDATE_FUNCTION,
};
@@ -213,79 +223,98 @@ enum nla_policy_validation {
* NLA_NUL_STRING Maximum length of string (excluding NUL)
* NLA_FLAG Unused
* NLA_BINARY Maximum length of attribute payload
- * NLA_MIN_LEN Minimum length of attribute payload
+ * (but see also below with the validation type)
* NLA_NESTED,
* NLA_NESTED_ARRAY Length verification is done by checking len of
* nested header (or empty); len field is used if
- * validation_data is also used, for the max attr
+ * nested_policy is also used, for the max attr
* number in the nested policy.
* NLA_U8, NLA_U16,
* NLA_U32, NLA_U64,
* NLA_S8, NLA_S16,
* NLA_S32, NLA_S64,
+ * NLA_BE16, NLA_BE32,
* NLA_MSECS Leaving the length field zero will verify the
* given type fits, using it verifies minimum length
* just like "All other"
* NLA_BITFIELD32 Unused
* NLA_REJECT Unused
- * NLA_EXACT_LEN Attribute must have exactly this length, otherwise
- * it is rejected.
- * NLA_EXACT_LEN_WARN Attribute should have exactly this length, a warning
- * is logged if it is longer, shorter is rejected.
- * NLA_MIN_LEN Minimum length of attribute payload
* All other Minimum length of attribute payload
*
- * Meaning of `validation_data' field:
+ * Meaning of validation union:
* NLA_BITFIELD32 This is a 32-bit bitmap/bitselector attribute and
- * validation data must point to a u32 value of valid
- * flags
- * NLA_REJECT This attribute is always rejected and validation data
+ * `bitfield32_valid' is the u32 value of valid flags
+ * NLA_REJECT This attribute is always rejected and `reject_message'
* may point to a string to report as the error instead
* of the generic one in extended ACK.
- * NLA_NESTED Points to a nested policy to validate, must also set
- * `len' to the max attribute number.
+ * NLA_NESTED `nested_policy' to a nested policy to validate, must
+ * also set `len' to the max attribute number. Use the
+ * provided NLA_POLICY_NESTED() macro.
* Note that nla_parse() will validate, but of course not
* parse, the nested sub-policies.
- * NLA_NESTED_ARRAY Points to a nested policy to validate, must also set
- * `len' to the max attribute number. The difference to
- * NLA_NESTED is the structure - NLA_NESTED has the
- * nested attributes directly inside, while an array has
- * the nested attributes at another level down and the
- * attributes directly in the nesting don't matter.
- * All other Unused - but note that it's a union
- *
- * Meaning of `min' and `max' fields, use via NLA_POLICY_MIN, NLA_POLICY_MAX
- * and NLA_POLICY_RANGE:
+ * NLA_NESTED_ARRAY `nested_policy' points to a nested policy to validate,
+ * must also set `len' to the max attribute number. Use
+ * the provided NLA_POLICY_NESTED_ARRAY() macro.
+ * The difference to NLA_NESTED is the structure:
+ * NLA_NESTED has the nested attributes directly inside
+ * while an array has the nested attributes at another
+ * level down and the attribute types directly in the
+ * nesting don't matter.
* NLA_U8,
* NLA_U16,
* NLA_U32,
* NLA_U64,
+ * NLA_BE16,
+ * NLA_BE32,
* NLA_S8,
* NLA_S16,
* NLA_S32,
- * NLA_S64 These are used depending on the validation_type
- * field, if that is min/max/range then the minimum,
- * maximum and both are used (respectively) to check
+ * NLA_S64 The `min' and `max' fields are used depending on the
+ * validation_type field, if that is min/max/range then
+ * the min, max or both are used (respectively) to check
* the value of the integer attribute.
* Note that in the interest of code simplicity and
* struct size both limits are s16, so you cannot
* enforce a range that doesn't fall within the range
* of s16 - do that as usual in the code instead.
+ * Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and
+ * NLA_POLICY_RANGE() macros.
+ * NLA_U8,
+ * NLA_U16,
+ * NLA_U32,
+ * NLA_U64 If the validation_type field instead is set to
+ * NLA_VALIDATE_RANGE_PTR, `range' must be a pointer
+ * to a struct netlink_range_validation that indicates
+ * the min/max values.
+ * Use NLA_POLICY_FULL_RANGE().
+ * NLA_S8,
+ * NLA_S16,
+ * NLA_S32,
+ * NLA_S64 If the validation_type field instead is set to
+ * NLA_VALIDATE_RANGE_PTR, `range_signed' must be a
+ * pointer to a struct netlink_range_validation_signed
+ * that indicates the min/max values.
+ * Use NLA_POLICY_FULL_RANGE_SIGNED().
+ *
+ * NLA_BINARY If the validation type is like the ones for integers
+ * above, then the min/max length (not value like for
+ * integers) of the attribute is enforced.
+ *
* All other Unused - but note that it's a union
*
* Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
- * NLA_BINARY Validation function called for the attribute,
- * not compatible with use of the validation_data
- * as in NLA_BITFIELD32, NLA_REJECT, NLA_NESTED and
- * NLA_NESTED_ARRAY.
+ * NLA_BINARY Validation function called for the attribute.
* All other Unused - but note that it's a union
*
* Example:
+ *
+ * static const u32 myvalidflags = 0xff231023;
+ *
* static const struct nla_policy my_policy[ATTR_MAX+1] = {
* [ATTR_FOO] = { .type = NLA_U16 },
* [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
- * [ATTR_BAZ] = { .type = NLA_EXACT_LEN, .len = sizeof(struct mystruct) },
- * [ATTR_GOO] = { .type = NLA_BITFIELD32, .validation_data = &myvalidflags },
+ * [ATTR_BAZ] = NLA_POLICY_EXACT_LEN(sizeof(struct mystruct)),
+ * [ATTR_GOO] = NLA_POLICY_BITFIELD32(myvalidflags),
* };
*/
struct nla_policy {
@@ -293,22 +322,20 @@ struct nla_policy {
u8 validation_type;
u16 len;
union {
- const void *validation_data;
- struct {
- s16 min, max;
- };
- int (*validate)(const struct nlattr *attr,
- struct netlink_ext_ack *extack);
- /* This entry is special, and used for the attribute at index 0
+ /**
+ * @strict_start_type: first attribute to validate strictly
+ *
+ * This entry is special, and used for the attribute at index 0
* only, and specifies special data about the policy, namely it
* specifies the "boundary type" where strict length validation
* starts for any attribute types >= this value, also, strict
* nesting validation starts here.
*
* Additionally, it means that NLA_UNSPEC is actually NLA_REJECT
- * for any types >= this, so need to use NLA_MIN_LEN to get the
- * previous pure { .len = xyz } behaviour. The advantage of this
- * is that types not specified in the policy will be rejected.
+ * for any types >= this, so need to use NLA_POLICY_MIN_LEN() to
+ * get the previous pure { .len = xyz } behaviour. The advantage
+ * of this is that types not specified in the policy will be
+ * rejected.
*
* For completely new families it should be set to 1 so that the
* validation is enforced for all attributes. For existing ones
@@ -317,57 +344,103 @@ struct nla_policy {
* was added to enforce strict validation from thereon.
*/
u16 strict_start_type;
+
+ /* private: use NLA_POLICY_*() to set */
+ const u32 bitfield32_valid;
+ const u32 mask;
+ const char *reject_message;
+ const struct nla_policy *nested_policy;
+ struct netlink_range_validation *range;
+ struct netlink_range_validation_signed *range_signed;
+ struct {
+ s16 min, max;
+ };
+ int (*validate)(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
};
};
-#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len }
-#define NLA_POLICY_EXACT_LEN_WARN(_len) { .type = NLA_EXACT_LEN_WARN, \
- .len = _len }
-#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_MIN_LEN, .len = _len }
-
#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
#define _NLA_POLICY_NESTED(maxattr, policy) \
- { .type = NLA_NESTED, .validation_data = policy, .len = maxattr }
+ { .type = NLA_NESTED, .nested_policy = policy, .len = maxattr }
#define _NLA_POLICY_NESTED_ARRAY(maxattr, policy) \
- { .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr }
+ { .type = NLA_NESTED_ARRAY, .nested_policy = policy, .len = maxattr }
#define NLA_POLICY_NESTED(policy) \
_NLA_POLICY_NESTED(ARRAY_SIZE(policy) - 1, policy)
#define NLA_POLICY_NESTED_ARRAY(policy) \
_NLA_POLICY_NESTED_ARRAY(ARRAY_SIZE(policy) - 1, policy)
+#define NLA_POLICY_BITFIELD32(valid) \
+ { .type = NLA_BITFIELD32, .bitfield32_valid = valid }
+
+#define __NLA_IS_UINT_TYPE(tp) \
+ (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || tp == NLA_U64)
+#define __NLA_IS_SINT_TYPE(tp) \
+ (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64)
+#define __NLA_IS_BEINT_TYPE(tp) \
+ (tp == NLA_BE16 || tp == NLA_BE32)
#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
-#define NLA_ENSURE_INT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \
- tp == NLA_S16 || tp == NLA_U16 || \
- tp == NLA_S32 || tp == NLA_U32 || \
- tp == NLA_S64 || tp == NLA_U64) + tp)
+#define NLA_ENSURE_UINT_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_UINT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
+#define NLA_ENSURE_SINT_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_SINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ __NLA_IS_SINT_TYPE(tp) || \
+ __NLA_IS_BEINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
(__NLA_ENSURE(tp != NLA_BITFIELD32 && \
tp != NLA_REJECT && \
tp != NLA_NESTED && \
tp != NLA_NESTED_ARRAY) + tp)
+#define NLA_ENSURE_BEINT_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_BEINT_TYPE(tp)) + tp)
#define NLA_POLICY_RANGE(tp, _min, _max) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE, \
.min = _min, \
.max = _max \
}
+#define NLA_POLICY_FULL_RANGE(tp, _range) { \
+ .type = NLA_ENSURE_UINT_OR_BINARY_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_RANGE_PTR, \
+ .range = _range, \
+}
+
+#define NLA_POLICY_FULL_RANGE_SIGNED(tp, _range) { \
+ .type = NLA_ENSURE_SINT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_RANGE_PTR, \
+ .range_signed = _range, \
+}
+
#define NLA_POLICY_MIN(tp, _min) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MIN, \
.min = _min, \
}
#define NLA_POLICY_MAX(tp, _max) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MAX, \
.max = _max, \
}
+#define NLA_POLICY_MASK(tp, _mask) { \
+ .type = NLA_ENSURE_UINT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MASK, \
+ .mask = _mask, \
+}
+
#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \
.type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \
.validation_type = NLA_VALIDATE_FUNCTION, \
@@ -375,6 +448,15 @@ struct nla_policy {
.len = __VA_ARGS__ + 0, \
}
+#define NLA_POLICY_EXACT_LEN(_len) NLA_POLICY_RANGE(NLA_BINARY, _len, _len)
+#define NLA_POLICY_EXACT_LEN_WARN(_len) { \
+ .type = NLA_BINARY, \
+ .validation_type = NLA_VALIDATE_RANGE_WARN_TOO_LONG, \
+ .min = _len, \
+ .max = _len \
+}
+#define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len)
+
/**
* struct nl_info - netlink source information
* @nlh: Netlink message header of original request
@@ -439,7 +521,7 @@ int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
struct netlink_ext_ack *extack);
int nla_policy_len(const struct nla_policy *, int);
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
-size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
+ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize);
char *nla_strdup(const struct nlattr *nla, gfp_t flags);
int nla_memcpy(void *dest, const struct nlattr *src, int count);
int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
@@ -674,7 +756,7 @@ static inline int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
- * @validate: validation strictness
+ * @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse()
@@ -694,6 +776,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse_deprecated()
@@ -713,6 +796,7 @@ static inline int nlmsg_parse_deprecated(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse_deprecated_strict()
@@ -748,7 +832,6 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
- * @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
@@ -772,7 +855,6 @@ static inline int nla_validate_deprecated(const struct nlattr *head, int len,
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
- * @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
@@ -820,7 +902,7 @@ static inline int nlmsg_validate_deprecated(const struct nlmsghdr *nlh,
*/
static inline int nlmsg_report(const struct nlmsghdr *nlh)
{
- return !!(nlh->nlmsg_flags & NLM_F_ECHO);
+ return nlh ? !!(nlh->nlmsg_flags & NLM_F_ECHO) : 0;
}
/**
@@ -1466,6 +1548,21 @@ static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
}
/**
+ * nla_put_bitfield32 - Add a bitfield32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: value carrying bits
+ * @selector: selector of valid bits
+ */
+static inline int nla_put_bitfield32(struct sk_buff *skb, int attrtype,
+ __u32 value, __u32 selector)
+{
+ struct nla_bitfield32 tmp = { value, selector, };
+
+ return nla_put(skb, attrtype, sizeof(tmp), &tmp);
+}
+
+/**
* nla_get_u32 - return payload of u32 attribute
* @nla: u32 netlink attribute
*/
@@ -1861,4 +1958,26 @@ static inline bool nla_is_last(const struct nlattr *nla, int rem)
return nla->nla_len == rem;
}
+void nla_get_range_unsigned(const struct nla_policy *pt,
+ struct netlink_range_validation *range);
+void nla_get_range_signed(const struct nla_policy *pt,
+ struct netlink_range_validation_signed *range);
+
+struct netlink_policy_dump_state;
+
+int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+int netlink_policy_dump_get_policy_idx(struct netlink_policy_dump_state *state,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+bool netlink_policy_dump_loop(struct netlink_policy_dump_state *state);
+int netlink_policy_dump_write(struct sk_buff *skb,
+ struct netlink_policy_dump_state *state);
+int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt);
+int netlink_policy_dump_write_attr(struct sk_buff *skb,
+ const struct nla_policy *pt,
+ int nestattr);
+void netlink_policy_dump_free(struct netlink_policy_dump_state *state);
+
#endif
diff --git a/include/net/netns/bpf.h b/include/net/netns/bpf.h
new file mode 100644
index 000000000000..2c01a278d1eb
--- /dev/null
+++ b/include/net/netns/bpf.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF programs attached to network namespace
+ */
+
+#ifndef __NETNS_BPF_H__
+#define __NETNS_BPF_H__
+
+#include <linux/list.h>
+
+struct bpf_prog;
+struct bpf_prog_array;
+
+enum netns_bpf_attach_type {
+ NETNS_BPF_INVALID = -1,
+ NETNS_BPF_FLOW_DISSECTOR = 0,
+ NETNS_BPF_SK_LOOKUP,
+ MAX_NETNS_BPF_ATTACH_TYPE
+};
+
+struct netns_bpf {
+ /* Array of programs to run compiled from progs or links */
+ struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE];
+ struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE];
+ struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE];
+};
+
+#endif /* __NETNS_BPF_H__ */
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
index b6ab7d1530d7..48b79f7e6236 100644
--- a/include/net/netns/can.h
+++ b/include/net/netns/can.h
@@ -7,6 +7,7 @@
#define __NETNS_CAN_H__
#include <linux/spinlock.h>
+#include <linux/timer.h>
struct can_dev_rcv_lists;
struct can_pkg_stats;
@@ -15,7 +16,6 @@ struct can_rcv_lists_stats;
struct netns_can {
#if IS_ENABLED(CONFIG_PROC_FS)
struct proc_dir_entry *proc_dir;
- struct proc_dir_entry *pde_version;
struct proc_dir_entry *pde_stats;
struct proc_dir_entry *pde_reset_stats;
struct proc_dir_entry *pde_rcvlist_all;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 806454e767bf..e1290c159184 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -24,9 +24,13 @@ struct nf_generic_net {
struct nf_tcp_net {
unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
- int tcp_loose;
- int tcp_be_liberal;
- int tcp_max_retrans;
+ u8 tcp_loose;
+ u8 tcp_be_liberal;
+ u8 tcp_max_retrans;
+ u8 tcp_ignore_invalid_rst;
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+#endif
};
enum udp_conntrack {
@@ -37,6 +41,9 @@ enum udp_conntrack {
struct nf_udp_net {
unsigned int timeouts[UDP_CT_MAX];
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+#endif
};
struct nf_icmp_net {
@@ -45,7 +52,7 @@ struct nf_icmp_net {
#ifdef CONFIG_NF_CT_PROTO_DCCP
struct nf_dccp_net {
- int dccp_loose;
+ u8 dccp_loose;
unsigned int dccp_timeout[CT_DCCP_MAX + 1];
};
#endif
@@ -86,34 +93,19 @@ struct nf_ip_net {
#endif
};
-struct ct_pcpu {
- spinlock_t lock;
- struct hlist_nulls_head unconfirmed;
- struct hlist_nulls_head dying;
-};
-
struct netns_ct {
- atomic_t count;
- unsigned int expect_count;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct delayed_work ecache_dwork;
+ u8 ctnetlink_has_listener;
bool ecache_dwork_pending;
#endif
- bool auto_assign_helper_warned;
-#ifdef CONFIG_SYSCTL
- struct ctl_table_header *sysctl_header;
-#endif
- unsigned int sysctl_log_invalid; /* Log invalid packets */
- int sysctl_events;
- int sysctl_acct;
- int sysctl_auto_assign_helper;
- int sysctl_tstamp;
- int sysctl_checksum;
+ u8 sysctl_log_invalid; /* Log invalid packets */
+ u8 sysctl_events;
+ u8 sysctl_acct;
+ u8 sysctl_tstamp;
+ u8 sysctl_checksum;
- struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat;
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
- struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
unsigned int labels_used;
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 36c2d998a43c..8249060cf5d0 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -2,6 +2,8 @@
#ifndef __NETNS_CORE_H__
#define __NETNS_CORE_H__
+#include <linux/types.h>
+
struct ctl_table_header;
struct prot_inuse;
@@ -10,9 +12,9 @@ struct netns_core {
struct ctl_table_header *sysctl_hdr;
int sysctl_somaxconn;
+ u8 sysctl_txrehash;
#ifdef CONFIG_PROC_FS
- int __percpu *sock_inuse;
struct prot_inuse __percpu *prot_inuse;
#endif
};
diff --git a/include/net/netns/dccp.h b/include/net/netns/dccp.h
deleted file mode 100644
index cdbc4f5b8390..000000000000
--- a/include/net/netns/dccp.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NETNS_DCCP_H__
-#define __NETNS_DCCP_H__
-
-struct sock;
-
-struct netns_dccp {
- struct sock *v4_ctl_sk;
- struct sock *v6_ctl_sk;
-};
-
-#endif
diff --git a/include/net/netns/flow_table.h b/include/net/netns/flow_table.h
new file mode 100644
index 000000000000..1c5fc657e267
--- /dev/null
+++ b/include/net/netns/flow_table.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_FLOW_TABLE_H
+#define __NETNS_FLOW_TABLE_H
+
+struct nf_flow_table_stat {
+ unsigned int count_wq_add;
+ unsigned int count_wq_del;
+ unsigned int count_wq_stats;
+};
+
+struct netns_ft {
+ struct nf_flow_table_stat __percpu *stat;
+};
+#endif
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 8a1ab47c3fb3..00c399edeed1 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -8,6 +8,7 @@
#include <linux/bug.h>
#include <linux/rcupdate.h>
+#include <net/net_namespace.h>
/*
* Generic net pointers are to be used by modules to put some private
@@ -32,7 +33,7 @@ struct net_generic {
struct rcu_head rcu;
} s;
- void *ptr[0];
+ DECLARE_FLEX_ARRAY(void *, ptr);
};
};
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 08b98414d94e..1b8004679445 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -9,9 +9,9 @@
#include <linux/uidgid.h>
#include <net/inet_frag.h>
#include <linux/rcupdate.h>
+#include <linux/seqlock.h>
#include <linux/siphash.h>
-struct tcpm_hash_bucket;
struct ctl_table_header;
struct ipv4_devconf;
struct fib_rules_ops;
@@ -32,8 +32,9 @@ struct ping_group_range {
struct inet_hashinfo;
struct inet_timewait_death_row {
- atomic_t tw_count;
+ refcount_t tw_refcount;
+ /* Padding to avoid false sharing, tw_refcount can be often written */
struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
int sysctl_max_tw_buckets;
};
@@ -41,6 +42,8 @@ struct inet_timewait_death_row {
struct tcp_fastopen_context;
struct netns_ipv4 {
+ struct inet_timewait_death_row tcp_death_row;
+
#ifdef CONFIG_SYSCTL
struct ctl_table_header *forw_hdr;
struct ctl_table_header *frags_hdr;
@@ -54,68 +57,64 @@ struct netns_ipv4 {
struct mutex ra_mutex;
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
- bool fib_has_custom_rules;
- unsigned int fib_rules_require_fldissect;
struct fib_table __rcu *fib_main;
struct fib_table __rcu *fib_default;
+ unsigned int fib_rules_require_fldissect;
+ bool fib_has_custom_rules;
#endif
bool fib_has_custom_local_routes;
+ bool fib_offload_disabled;
#ifdef CONFIG_IP_ROUTE_CLASSID
- int fib_num_tclassid_users;
+ atomic_t fib_num_tclassid_users;
#endif
struct hlist_head *fib_table_hash;
- bool fib_offload_disabled;
struct sock *fibnl;
- struct sock * __percpu *icmp_sk;
struct sock *mc_autojoin_sk;
struct inet_peer_base *peers;
- struct sock * __percpu *tcp_sk;
struct fqdir *fqdir;
-#ifdef CONFIG_NETFILTER
- struct xt_table *iptable_filter;
- struct xt_table *iptable_mangle;
- struct xt_table *iptable_raw;
- struct xt_table *arptable_filter;
-#ifdef CONFIG_SECURITY
- struct xt_table *iptable_security;
-#endif
- struct xt_table *nat_table;
-#endif
- int sysctl_icmp_echo_ignore_all;
- int sysctl_icmp_echo_ignore_broadcasts;
- int sysctl_icmp_ignore_bogus_error_responses;
+ u8 sysctl_icmp_echo_ignore_all;
+ u8 sysctl_icmp_echo_enable_probe;
+ u8 sysctl_icmp_echo_ignore_broadcasts;
+ u8 sysctl_icmp_ignore_bogus_error_responses;
+ u8 sysctl_icmp_errors_use_inbound_ifaddr;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
- int sysctl_icmp_errors_use_inbound_ifaddr;
+
+ u32 ip_rt_min_pmtu;
+ int ip_rt_mtu_expires;
+ int ip_rt_min_advmss;
struct local_ports ip_local_ports;
- int sysctl_tcp_ecn;
- int sysctl_tcp_ecn_fallback;
+ u8 sysctl_tcp_ecn;
+ u8 sysctl_tcp_ecn_fallback;
- int sysctl_ip_default_ttl;
- int sysctl_ip_no_pmtu_disc;
- int sysctl_ip_fwd_use_pmtu;
- int sysctl_ip_fwd_update_priority;
- int sysctl_ip_nonlocal_bind;
+ u8 sysctl_ip_default_ttl;
+ u8 sysctl_ip_no_pmtu_disc;
+ u8 sysctl_ip_fwd_use_pmtu;
+ u8 sysctl_ip_fwd_update_priority;
+ u8 sysctl_ip_nonlocal_bind;
+ u8 sysctl_ip_autobind_reuse;
/* Shall we try to damage output packets if routing dev changes? */
- int sysctl_ip_dynaddr;
- int sysctl_ip_early_demux;
+ u8 sysctl_ip_dynaddr;
+ u8 sysctl_ip_early_demux;
#ifdef CONFIG_NET_L3_MASTER_DEV
- int sysctl_raw_l3mdev_accept;
+ u8 sysctl_raw_l3mdev_accept;
#endif
- int sysctl_tcp_early_demux;
- int sysctl_udp_early_demux;
+ u8 sysctl_tcp_early_demux;
+ u8 sysctl_udp_early_demux;
- int sysctl_fwmark_reflect;
- int sysctl_tcp_fwmark_accept;
+ u8 sysctl_nexthop_compat_mode;
+
+ u8 sysctl_fwmark_reflect;
+ u8 sysctl_tcp_fwmark_accept;
#ifdef CONFIG_NET_L3_MASTER_DEV
- int sysctl_tcp_l3mdev_accept;
+ u8 sysctl_tcp_l3mdev_accept;
#endif
- int sysctl_tcp_mtu_probing;
+ u8 sysctl_tcp_mtu_probing;
int sysctl_tcp_mtu_probe_floor;
int sysctl_tcp_base_mss;
int sysctl_tcp_min_snd_mss;
@@ -123,73 +122,80 @@ struct netns_ipv4 {
u32 sysctl_tcp_probe_interval;
int sysctl_tcp_keepalive_time;
- int sysctl_tcp_keepalive_probes;
int sysctl_tcp_keepalive_intvl;
+ u8 sysctl_tcp_keepalive_probes;
- int sysctl_tcp_syn_retries;
- int sysctl_tcp_synack_retries;
- int sysctl_tcp_syncookies;
+ u8 sysctl_tcp_syn_retries;
+ u8 sysctl_tcp_synack_retries;
+ u8 sysctl_tcp_syncookies;
+ u8 sysctl_tcp_migrate_req;
+ u8 sysctl_tcp_comp_sack_nr;
int sysctl_tcp_reordering;
- int sysctl_tcp_retries1;
- int sysctl_tcp_retries2;
- int sysctl_tcp_orphan_retries;
+ u8 sysctl_tcp_retries1;
+ u8 sysctl_tcp_retries2;
+ u8 sysctl_tcp_orphan_retries;
+ u8 sysctl_tcp_tw_reuse;
int sysctl_tcp_fin_timeout;
unsigned int sysctl_tcp_notsent_lowat;
- int sysctl_tcp_tw_reuse;
- int sysctl_tcp_sack;
- int sysctl_tcp_window_scaling;
- int sysctl_tcp_timestamps;
- int sysctl_tcp_early_retrans;
- int sysctl_tcp_recovery;
- int sysctl_tcp_thin_linear_timeouts;
- int sysctl_tcp_slow_start_after_idle;
- int sysctl_tcp_retrans_collapse;
- int sysctl_tcp_stdurg;
- int sysctl_tcp_rfc1337;
- int sysctl_tcp_abort_on_overflow;
- int sysctl_tcp_fack;
+ u8 sysctl_tcp_sack;
+ u8 sysctl_tcp_window_scaling;
+ u8 sysctl_tcp_timestamps;
+ u8 sysctl_tcp_early_retrans;
+ u8 sysctl_tcp_recovery;
+ u8 sysctl_tcp_thin_linear_timeouts;
+ u8 sysctl_tcp_slow_start_after_idle;
+ u8 sysctl_tcp_retrans_collapse;
+ u8 sysctl_tcp_stdurg;
+ u8 sysctl_tcp_rfc1337;
+ u8 sysctl_tcp_abort_on_overflow;
+ u8 sysctl_tcp_fack; /* obsolete */
int sysctl_tcp_max_reordering;
- int sysctl_tcp_dsack;
- int sysctl_tcp_app_win;
int sysctl_tcp_adv_win_scale;
- int sysctl_tcp_frto;
- int sysctl_tcp_nometrics_save;
- int sysctl_tcp_no_ssthresh_metrics_save;
- int sysctl_tcp_moderate_rcvbuf;
- int sysctl_tcp_tso_win_divisor;
- int sysctl_tcp_workaround_signed_windows;
+ u8 sysctl_tcp_dsack;
+ u8 sysctl_tcp_app_win;
+ u8 sysctl_tcp_frto;
+ u8 sysctl_tcp_nometrics_save;
+ u8 sysctl_tcp_no_ssthresh_metrics_save;
+ u8 sysctl_tcp_moderate_rcvbuf;
+ u8 sysctl_tcp_tso_win_divisor;
+ u8 sysctl_tcp_workaround_signed_windows;
int sysctl_tcp_limit_output_bytes;
int sysctl_tcp_challenge_ack_limit;
- int sysctl_tcp_min_tso_segs;
int sysctl_tcp_min_rtt_wlen;
- int sysctl_tcp_autocorking;
+ u8 sysctl_tcp_min_tso_segs;
+ u8 sysctl_tcp_tso_rtt_log;
+ u8 sysctl_tcp_autocorking;
+ u8 sysctl_tcp_reflect_tos;
int sysctl_tcp_invalid_ratelimit;
int sysctl_tcp_pacing_ss_ratio;
int sysctl_tcp_pacing_ca_ratio;
int sysctl_tcp_wmem[3];
int sysctl_tcp_rmem[3];
- int sysctl_tcp_comp_sack_nr;
+ unsigned int sysctl_tcp_child_ehash_entries;
unsigned long sysctl_tcp_comp_sack_delay_ns;
- struct inet_timewait_death_row tcp_death_row;
+ unsigned long sysctl_tcp_comp_sack_slack_ns;
int sysctl_max_syn_backlog;
int sysctl_tcp_fastopen;
const struct tcp_congestion_ops __rcu *tcp_congestion_control;
struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
- spinlock_t tcp_fastopen_ctx_lock;
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
atomic_t tfo_active_disable_times;
unsigned long tfo_active_disable_stamp;
+ u32 tcp_challenge_timestamp;
+ u32 tcp_challenge_count;
int sysctl_udp_wmem_min;
int sysctl_udp_rmem_min;
+ u8 sysctl_fib_notify_on_flag_change;
+
#ifdef CONFIG_NET_L3_MASTER_DEV
- int sysctl_udp_l3mdev_accept;
+ u8 sysctl_udp_l3mdev_accept;
#endif
+ u8 sysctl_igmp_llm_reports;
int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf;
- int sysctl_igmp_llm_reports;
int sysctl_igmp_qrv;
struct ping_group_range ping_group_range;
@@ -210,8 +216,9 @@ struct netns_ipv4 {
#endif
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- int sysctl_fib_multipath_use_neigh;
- int sysctl_fib_multipath_hash_policy;
+ u32 sysctl_fib_multipath_hash_fields;
+ u8 sysctl_fib_multipath_use_neigh;
+ u8 sysctl_fib_multipath_hash_policy;
#endif
struct fib_notifier_ops *notifier_ops;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 5ec054473d81..b4af4837d80b 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -20,7 +20,6 @@ struct netns_sysctl_ipv6 {
struct ctl_table_header *frags_hdr;
struct ctl_table_header *xfrm6_hdr;
#endif
- int bindv6only;
int flush_delay;
int ip6_rt_max_size;
int ip6_rt_gc_min_interval;
@@ -29,45 +28,44 @@ struct netns_sysctl_ipv6 {
int ip6_rt_gc_elasticity;
int ip6_rt_mtu_expires;
int ip6_rt_min_advmss;
- int multipath_hash_policy;
- int flowlabel_consistency;
- int auto_flowlabels;
+ u32 multipath_hash_fields;
+ u8 multipath_hash_policy;
+ u8 bindv6only;
+ u8 flowlabel_consistency;
+ u8 auto_flowlabels;
int icmpv6_time;
- int icmpv6_echo_ignore_all;
- int icmpv6_echo_ignore_multicast;
- int icmpv6_echo_ignore_anycast;
+ u8 icmpv6_echo_ignore_all;
+ u8 icmpv6_echo_ignore_multicast;
+ u8 icmpv6_echo_ignore_anycast;
DECLARE_BITMAP(icmpv6_ratemask, ICMPV6_MSG_MAX + 1);
unsigned long *icmpv6_ratemask_ptr;
- int anycast_src_echo_reply;
- int ip_nonlocal_bind;
- int fwmark_reflect;
+ u8 anycast_src_echo_reply;
+ u8 ip_nonlocal_bind;
+ u8 fwmark_reflect;
+ u8 flowlabel_state_ranges;
int idgen_retries;
int idgen_delay;
- int flowlabel_state_ranges;
int flowlabel_reflect;
int max_dst_opts_cnt;
int max_hbh_opts_cnt;
int max_dst_opts_len;
int max_hbh_opts_len;
int seg6_flowlabel;
+ u32 ioam6_id;
+ u64 ioam6_id_wide;
bool skip_notify_on_dev_down;
+ u8 fib_notify_on_flag_change;
};
struct netns_ipv6 {
+ /* Keep ip6_dst_ops at the beginning of netns_sysctl_ipv6 */
+ struct dst_ops ip6_dst_ops;
+
struct netns_sysctl_ipv6 sysctl;
struct ipv6_devconf *devconf_all;
struct ipv6_devconf *devconf_dflt;
struct inet_peer_base *peers;
struct fqdir *fqdir;
-#ifdef CONFIG_NETFILTER
- struct xt_table *ip6table_filter;
- struct xt_table *ip6table_mangle;
- struct xt_table *ip6table_raw;
-#ifdef CONFIG_SECURITY
- struct xt_table *ip6table_security;
-#endif
- struct xt_table *ip6table_nat;
-#endif
struct fib6_info *fib6_null_entry;
struct rt6_info *ip6_null_entry;
struct rt6_statistics *rt6_stats;
@@ -75,14 +73,14 @@ struct netns_ipv6 {
struct hlist_head *fib_table_hash;
struct fib6_table *fib6_main_tbl;
struct list_head fib6_walkers;
- struct dst_ops ip6_dst_ops;
rwlock_t fib6_walker_lock;
spinlock_t fib6_gc_lock;
- unsigned int ip6_rt_gc_expire;
- unsigned long ip6_rt_last_gc;
+ atomic_t ip6_rt_gc_expire;
+ unsigned long ip6_rt_last_gc;
+ unsigned char flowlabel_has_excl;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- unsigned int fib6_rules_require_fldissect;
bool fib6_has_custom_rules;
+ unsigned int fib6_rules_require_fldissect;
#ifdef CONFIG_IPV6_SUBTREES
unsigned int fib6_routes_require_src;
#endif
@@ -91,11 +89,15 @@ struct netns_ipv6 {
struct fib6_table *fib6_local_tbl;
struct fib_rules_ops *fib6_rules_ops;
#endif
- struct sock * __percpu *icmp_sk;
struct sock *ndisc_sk;
struct sock *tcp_sk;
struct sock *igmp_sk;
struct sock *mc_autojoin_sk;
+
+ struct hlist_head *inet6_addr_lst;
+ spinlock_t addrconf_hash_lock;
+ struct delayed_work addr_chk_work;
+
#ifdef CONFIG_IPV6_MROUTE
#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
struct mr_table *mrt6;
@@ -115,6 +117,7 @@ struct netns_ipv6 {
spinlock_t lock;
u32 seq;
} ip6addrlbl_table;
+ struct ioam6_pernet_data *ioam6_data;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netns/mctp.h b/include/net/netns/mctp.h
new file mode 100644
index 000000000000..1db8f9aaddb4
--- /dev/null
+++ b/include/net/netns/mctp.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MCTP per-net structures
+ */
+
+#ifndef __NETNS_MCTP_H__
+#define __NETNS_MCTP_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct netns_mctp {
+ /* Only updated under RTNL, entries freed via RCU */
+ struct list_head routes;
+
+ /* Bound sockets: list of sockets bound by type.
+ * This list is updated from non-atomic contexts (under bind_lock),
+ * and read (under rcu) in packet rx
+ */
+ struct mutex bind_lock;
+ struct hlist_head binds;
+
+ /* tag allocations. This list is read and updated from atomic contexts,
+ * but elements are free()ed after a RCU grace-period
+ */
+ spinlock_t keys_lock;
+ struct hlist_head keys;
+
+ /* MCTP network */
+ unsigned int default_net;
+
+ /* neighbour table */
+ struct mutex neigh_lock;
+ struct list_head neighbours;
+};
+
+#endif /* __NETNS_MCTP_H__ */
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index b5fdb108d602..7e373664b1e7 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -5,28 +5,41 @@
#include <net/snmp.h>
struct netns_mib {
- DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
+#endif
+
+ DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
DEFINE_SNMP_STAT(struct linux_mib, net_statistics);
- DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
- DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
- DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
- DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
+ DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
#if IS_ENABLED(CONFIG_IPV6)
- struct proc_dir_entry *proc_net_devsnmp6;
DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
- DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
- DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
- DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
- DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
#endif
+
#ifdef CONFIG_XFRM_STATISTICS
DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
#endif
#if IS_ENABLED(CONFIG_TLS)
DEFINE_SNMP_STAT(struct linux_tls_mib, tls_statistics);
#endif
+#ifdef CONFIG_MPTCP
+ DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics);
+#endif
+
+ DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
+#endif
+
+ DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
+ struct proc_dir_entry *proc_net_devsnmp6;
+#endif
};
#endif
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
index a7bdcfbb0b28..19ad2574b267 100644
--- a/include/net/netns/mpls.h
+++ b/include/net/netns/mpls.h
@@ -6,6 +6,8 @@
#ifndef __NETNS_MPLS_H__
#define __NETNS_MPLS_H__
+#include <linux/types.h>
+
struct mpls_route;
struct ctl_table_header;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index ca043342c0eb..02bbdc577f8e 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -12,7 +12,6 @@ struct netns_nf {
#if defined CONFIG_PROC_FS
struct proc_dir_entry *proc_netfilter;
#endif
- const struct nf_queue_handler __rcu *queue_handler;
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
@@ -25,14 +24,11 @@ struct netns_nf {
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
#endif
-#if IS_ENABLED(CONFIG_DECNET)
- struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
-#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
- bool defrag_ipv4;
+ unsigned int defrag_ipv4_users;
#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
- bool defrag_ipv6;
+ unsigned int defrag_ipv6_users;
#endif
};
#endif
diff --git a/include/net/netns/nexthop.h b/include/net/netns/nexthop.h
index c712ee5eebd9..434239b37014 100644
--- a/include/net/netns/nexthop.h
+++ b/include/net/netns/nexthop.h
@@ -6,6 +6,7 @@
#ifndef __NETNS_NEXTHOP_H__
#define __NETNS_NEXTHOP_H__
+#include <linux/notifier.h>
#include <linux/rbtree.h>
struct netns_nexthop {
@@ -14,5 +15,6 @@ struct netns_nexthop {
unsigned int seq; /* protected by rtnl_mutex */
u32 last_id_allocated;
+ struct blocking_notifier_head notifier_chain;
};
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index a1a8d45adb42..8c77832d0240 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -5,13 +5,7 @@
#include <linux/list.h>
struct netns_nftables {
- struct list_head tables;
- struct list_head commit_list;
- struct list_head module_list;
- struct mutex commit_mutex;
- unsigned int base_seq;
u8 gencursor;
- u8 validate_state;
};
#endif
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index d8d02e4188d1..a681147aecd8 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -2,6 +2,9 @@
#ifndef __NETNS_SCTP_H__
#define __NETNS_SCTP_H__
+#include <linux/timer.h>
+#include <net/snmp.h>
+
struct sock;
struct proc_dir_entry;
struct sctp_mib;
@@ -22,6 +25,14 @@ struct netns_sctp {
*/
struct sock *ctl_sock;
+ /* UDP tunneling listening sock. */
+ struct sock *udp4_sock;
+ struct sock *udp6_sock;
+ /* UDP tunneling listening port. */
+ int udp_port;
+ /* UDP tunneling remote encap port. */
+ int encap_port;
+
/* This is the global local address list.
* We actively maintain this complete list of addresses on
* the system by catching address add/delete events.
@@ -76,6 +87,9 @@ struct netns_sctp {
/* HB.interval - 30 seconds */
unsigned int hb_interval;
+ /* The interval for PLPMTUD probe timer */
+ unsigned int probe_interval;
+
/* Association.Max.Retrans - 10 attempts
* Path.Max.Retrans - 5 attempts (per destination address)
* Max.Init.Retransmits - 8 attempts
diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h
new file mode 100644
index 000000000000..582212ada3ba
--- /dev/null
+++ b/include/net/netns/smc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_SMC_H__
+#define __NETNS_SMC_H__
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+
+struct smc_stats_rsn;
+struct smc_stats;
+struct netns_smc {
+ /* per cpu counters for SMC */
+ struct smc_stats __percpu *smc_stats;
+ /* protect fback_rsn */
+ struct mutex mutex_fback_rsn;
+ struct smc_stats_rsn *fback_rsn;
+
+ bool limit_smc_hs; /* constraint on handshake */
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *smc_hdr;
+#endif
+ unsigned int sysctl_autocorking_size;
+ unsigned int sysctl_smcr_buf_type;
+ int sysctl_smcr_testlink_time;
+ int sysctl_wmem;
+ int sysctl_rmem;
+};
+#endif
diff --git a/include/net/netns/unix.h b/include/net/netns/unix.h
index 91a3d7e39198..9859d134d5a8 100644
--- a/include/net/netns/unix.h
+++ b/include/net/netns/unix.h
@@ -5,8 +5,16 @@
#ifndef __NETNS_UNIX_H__
#define __NETNS_UNIX_H__
+#include <linux/spinlock.h>
+
+struct unix_table {
+ spinlock_t *locks;
+ struct hlist_head *buckets;
+};
+
struct ctl_table_header;
struct netns_unix {
+ struct unix_table table;
int sysctl_max_dgram_qlen;
struct ctl_table_header *ctl;
};
diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h
deleted file mode 100644
index 9bc5a12fdbb0..000000000000
--- a/include/net/netns/x_tables.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NETNS_X_TABLES_H
-#define __NETNS_X_TABLES_H
-
-#include <linux/list.h>
-#include <linux/netfilter_defs.h>
-
-struct ebt_table;
-
-struct netns_xt {
- struct list_head tables[NFPROTO_NUMPROTO];
- bool notrack_deprecated_warning;
- bool clusterip_deprecated_warning;
-#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \
- defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE)
- struct ebt_table *broute_table;
- struct ebt_table *frame_filter;
- struct ebt_table *frame_nat;
-#endif
-};
-#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 59f45b1e9dac..bd7c3be4af5d 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -42,6 +42,7 @@ struct netns_xfrm {
struct hlist_head __rcu *state_bydst;
struct hlist_head __rcu *state_bysrc;
struct hlist_head __rcu *state_byspi;
+ struct hlist_head __rcu *state_byseq;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
@@ -64,6 +65,9 @@ struct netns_xfrm {
u32 sysctl_aevent_rseqth;
int sysctl_larval_drop;
u32 sysctl_acq_expires;
+
+ u8 policy_default[XFRM_POLICY_MAX];
+
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_hdr;
#endif
@@ -72,7 +76,10 @@ struct netns_xfrm {
#if IS_ENABLED(CONFIG_IPV6)
struct dst_ops xfrm6_dst_ops;
#endif
- spinlock_t xfrm_state_lock;
+ spinlock_t xfrm_state_lock;
+ seqcount_spinlock_t xfrm_state_hash_generation;
+ seqcount_spinlock_t xfrm_policy_hash_generation;
+
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
};
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 80f15b1c1a48..f0565a5987d1 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -14,6 +14,7 @@
#include <net/sock.h>
#include <linux/refcount.h>
#include <linux/seq_file.h>
+#include <net/ax25.h>
#define NR_NETWORK_LEN 15
#define NR_TRANSPORT_LEN 5
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 331ebbc94fe7..28085b995ddc 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -10,6 +10,7 @@
#define __LINUX_NEXTHOP_H
#include <linux/netdevice.h>
+#include <linux/notifier.h>
#include <linux/route.h>
#include <linux/types.h>
#include <net/ip_fib.h>
@@ -26,6 +27,7 @@ struct nh_config {
u8 nh_family;
u8 nh_protocol;
u8 nh_blackhole;
+ u8 nh_fdb;
u32 nh_flags;
int nh_ifindex;
@@ -38,6 +40,12 @@ struct nh_config {
struct nlattr *nh_grp;
u16 nh_grp_type;
+ u16 nh_grp_res_num_buckets;
+ unsigned long nh_grp_res_idle_timer;
+ unsigned long nh_grp_res_unbalanced_timer;
+ bool nh_grp_res_has_num_buckets;
+ bool nh_grp_res_has_idle_timer;
+ bool nh_grp_res_has_unbalanced_timer;
struct nlattr *nh_encap;
u16 nh_encap_type;
@@ -52,6 +60,7 @@ struct nh_info {
u8 family;
bool reject_nh;
+ bool fdb_nh;
union {
struct fib_nh_common fib_nhc;
@@ -60,26 +69,71 @@ struct nh_info {
};
};
+struct nh_res_bucket {
+ struct nh_grp_entry __rcu *nh_entry;
+ atomic_long_t used_time;
+ unsigned long migrated_time;
+ bool occupied;
+ u8 nh_flags;
+};
+
+struct nh_res_table {
+ struct net *net;
+ u32 nhg_id;
+ struct delayed_work upkeep_dw;
+
+ /* List of NHGEs that have too few buckets ("uw" for underweight).
+ * Reclaimed buckets will be given to entries in this list.
+ */
+ struct list_head uw_nh_entries;
+ unsigned long unbalanced_since;
+
+ u32 idle_timer;
+ u32 unbalanced_timer;
+
+ u16 num_nh_buckets;
+ struct nh_res_bucket nh_buckets[];
+};
+
struct nh_grp_entry {
struct nexthop *nh;
u8 weight;
- atomic_t upper_bound;
+
+ union {
+ struct {
+ atomic_t upper_bound;
+ } hthr;
+ struct {
+ /* Member on uw_nh_entries. */
+ struct list_head uw_nh_entry;
+
+ u16 count_buckets;
+ u16 wants_buckets;
+ } res;
+ };
struct list_head nh_list;
struct nexthop *nh_parent; /* nexthop of group with this entry */
};
struct nh_group {
+ struct nh_group *spare; /* spare group for removals */
u16 num_nh;
- bool mpath;
+ bool is_multipath;
+ bool hash_threshold;
+ bool resilient;
+ bool fdb_nh;
bool has_v4;
- struct nh_grp_entry nh_entries[0];
+
+ struct nh_res_table __rcu *res_table;
+ struct nh_grp_entry nh_entries[];
};
struct nexthop {
struct rb_node rb_node; /* entry on netns rbtree */
struct list_head fi_list; /* v4 entries using nh */
struct list_head f6i_list; /* v6 entries using nh */
+ struct list_head fdb_list; /* fdb entries using this nh */
struct list_head grp_list; /* nh group entries using this nh */
struct net *net;
@@ -98,6 +152,79 @@ struct nexthop {
};
};
+enum nexthop_event_type {
+ NEXTHOP_EVENT_DEL,
+ NEXTHOP_EVENT_REPLACE,
+ NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
+ NEXTHOP_EVENT_BUCKET_REPLACE,
+};
+
+enum nh_notifier_info_type {
+ NH_NOTIFIER_INFO_TYPE_SINGLE,
+ NH_NOTIFIER_INFO_TYPE_GRP,
+ NH_NOTIFIER_INFO_TYPE_RES_TABLE,
+ NH_NOTIFIER_INFO_TYPE_RES_BUCKET,
+};
+
+struct nh_notifier_single_info {
+ struct net_device *dev;
+ u8 gw_family;
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ };
+ u8 is_reject:1,
+ is_fdb:1,
+ has_encap:1;
+};
+
+struct nh_notifier_grp_entry_info {
+ u8 weight;
+ u32 id;
+ struct nh_notifier_single_info nh;
+};
+
+struct nh_notifier_grp_info {
+ u16 num_nh;
+ bool is_fdb;
+ struct nh_notifier_grp_entry_info nh_entries[];
+};
+
+struct nh_notifier_res_bucket_info {
+ u16 bucket_index;
+ unsigned int idle_timer_ms;
+ bool force;
+ struct nh_notifier_single_info old_nh;
+ struct nh_notifier_single_info new_nh;
+};
+
+struct nh_notifier_res_table_info {
+ u16 num_nh_buckets;
+ struct nh_notifier_single_info nhs[];
+};
+
+struct nh_notifier_info {
+ struct net *net;
+ struct netlink_ext_ack *extack;
+ u32 id;
+ enum nh_notifier_info_type type;
+ union {
+ struct nh_notifier_single_info *nh;
+ struct nh_notifier_grp_info *nh_grp;
+ struct nh_notifier_res_table_info *nh_res_table;
+ struct nh_notifier_res_bucket_info *nh_res_bucket;
+ };
+};
+
+int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
+int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
+void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
+void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
+ bool offload, bool trap);
+void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
+ unsigned long *activity);
+
/* caller is holding rcu or rtnl; no reference taken to nexthop */
struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
void nexthop_free_rcu(struct rcu_head *head);
@@ -119,13 +246,39 @@ static inline bool nexthop_cmp(const struct nexthop *nh1,
return nh1 == nh2;
}
+static inline bool nexthop_is_fdb(const struct nexthop *nh)
+{
+ if (nh->is_group) {
+ const struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ return nh_grp->fdb_nh;
+ } else {
+ const struct nh_info *nhi;
+
+ nhi = rcu_dereference_rtnl(nh->nh_info);
+ return nhi->fdb_nh;
+ }
+}
+
+static inline bool nexthop_has_v4(const struct nexthop *nh)
+{
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ return nh_grp->has_v4;
+ }
+ return false;
+}
+
static inline bool nexthop_is_multipath(const struct nexthop *nh)
{
if (nh->is_group) {
struct nh_group *nh_grp;
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
- return nh_grp->mpath;
+ return nh_grp->is_multipath;
}
return false;
}
@@ -136,21 +289,20 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
{
unsigned int rc = 1;
- if (nexthop_is_multipath(nh)) {
+ if (nh->is_group) {
struct nh_group *nh_grp;
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
- rc = nh_grp->num_nh;
+ if (nh_grp->is_multipath)
+ rc = nh_grp->num_nh;
}
return rc;
}
static inline
-struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel)
+struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel)
{
- const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
-
/* for_nexthops macros in fib_semantics.c grabs a pointer to
* the nexthop before checking nhsel
*/
@@ -173,7 +325,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
struct fib_nh_common *nhc = &nhi->fib_nhc;
int weight = nhg->nh_entries[i].weight;
- if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
+ if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
return -EMSGSIZE;
}
@@ -185,12 +337,14 @@ static inline bool nexthop_is_blackhole(const struct nexthop *nh)
{
const struct nh_info *nhi;
- if (nexthop_is_multipath(nh)) {
- if (nexthop_num_path(nh) > 1)
- return false;
- nh = nexthop_mpath_select(nh, 0);
- if (!nh)
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ if (nh_grp->num_nh > 1)
return false;
+
+ nh = nh_grp->nh_entries[0].nh;
}
nhi = rcu_dereference_rtnl(nh->nh_info);
@@ -216,16 +370,79 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel)
BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0);
BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0);
- if (nexthop_is_multipath(nh)) {
- nh = nexthop_mpath_select(nh, nhsel);
- if (!nh)
- return NULL;
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ if (nh_grp->is_multipath) {
+ nh = nexthop_mpath_select(nh_grp, nhsel);
+ if (!nh)
+ return NULL;
+ }
}
nhi = rcu_dereference_rtnl(nh->nh_info);
return &nhi->fib_nhc;
}
+/* called from fib_table_lookup with rcu_lock */
+static inline
+struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh,
+ int fib_flags,
+ const struct flowi4 *flp,
+ int *nhsel)
+{
+ struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nhg = rcu_dereference(nh->nh_grp);
+ int i;
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nexthop *nhe = nhg->nh_entries[i].nh;
+
+ nhi = rcu_dereference(nhe->nh_info);
+ if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
+ *nhsel = i;
+ return &nhi->fib_nhc;
+ }
+ }
+ } else {
+ nhi = rcu_dereference(nh->nh_info);
+ if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
+ *nhsel = 0;
+ return &nhi->fib_nhc;
+ }
+ }
+
+ return NULL;
+}
+
+static inline bool nexthop_uses_dev(const struct nexthop *nh,
+ const struct net_device *dev)
+{
+ struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nhg = rcu_dereference(nh->nh_grp);
+ int i;
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nexthop *nhe = nhg->nh_entries[i].nh;
+
+ nhi = rcu_dereference(nhe->nh_info);
+ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
+ return true;
+ }
+ } else {
+ nhi = rcu_dereference(nh->nh_info);
+ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
+ return true;
+ }
+
+ return false;
+}
+
static inline unsigned int fib_info_num_path(const struct fib_info *fi)
{
if (unlikely(fi->nh))
@@ -259,12 +476,16 @@ static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
struct netlink_ext_ack *extack);
+/* Caller should either hold rcu_read_lock(), or RTNL. */
static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
{
struct nh_info *nhi;
- if (nexthop_is_multipath(nh)) {
- nh = nexthop_mpath_select(nh, 0);
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ nh = nexthop_mpath_select(nh_grp, 0);
if (!nh)
return NULL;
}
@@ -276,6 +497,29 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
return NULL;
}
+/* Variant of nexthop_fib6_nh().
+ * Caller should either hold rcu_read_lock_bh(), or RTNL.
+ */
+static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
+{
+ struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp);
+ nh = nexthop_mpath_select(nh_grp, 0);
+ if (!nh)
+ return NULL;
+ }
+
+ nhi = rcu_dereference_bh_rtnl(nh->nh_info);
+ if (nhi->family == AF_INET6)
+ return &nhi->fib6_nh;
+
+ return NULL;
+}
+
static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
{
struct fib6_nh *fib6_nh;
@@ -304,4 +548,32 @@ static inline void nexthop_path_fib6_result(struct fib6_result *res, int hash)
int nexthop_for_each_fib6_nh(struct nexthop *nh,
int (*cb)(struct fib6_nh *nh, void *arg),
void *arg);
+
+static inline int nexthop_get_family(struct nexthop *nh)
+{
+ struct nh_info *nhi = rcu_dereference_rtnl(nh->nh_info);
+
+ return nhi->family;
+}
+
+static inline
+struct fib_nh_common *nexthop_fdb_nhc(struct nexthop *nh)
+{
+ struct nh_info *nhi = rcu_dereference_rtnl(nh->nh_info);
+
+ return &nhi->fib_nhc;
+}
+
+static inline struct fib_nh_common *nexthop_path_fdb_result(struct nexthop *nh,
+ int hash)
+{
+ struct nh_info *nhi;
+ struct nexthop *nhp;
+
+ nhp = nexthop_select_path(nh, hash);
+ if (unlikely(!nhp))
+ return NULL;
+ nhi = rcu_dereference(nhp->nh_info);
+ return &nhi->fib_nhc;
+}
#endif
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
index 963db96bcbbb..bb3e8fdc0692 100644
--- a/include/net/nfc/digital.h
+++ b/include/net/nfc/digital.h
@@ -191,7 +191,7 @@ struct digital_poll_tech {
struct nfc_digital_dev {
struct nfc_dev *nfc_dev;
- struct nfc_digital_ops *ops;
+ const struct nfc_digital_ops *ops;
u32 protocols;
@@ -236,7 +236,7 @@ struct nfc_digital_dev {
void (*skb_add_crc)(struct sk_buff *skb);
};
-struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
+struct nfc_digital_dev *nfc_digital_allocate_device(const struct nfc_digital_ops *ops,
__u32 supported_protocols,
__u32 driver_capabilities,
int tx_headroom,
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index b35f37a57686..756c11084f65 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -118,7 +118,7 @@ struct nfc_hci_dev {
struct sk_buff_head msg_rx_queue;
- struct nfc_hci_ops *ops;
+ const struct nfc_hci_ops *ops;
struct nfc_llc *llc;
@@ -151,7 +151,7 @@ struct nfc_hci_dev {
};
/* hci device allocation */
-struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
+struct nfc_hci_dev *nfc_hci_allocate_device(const struct nfc_hci_ops *ops,
struct nfc_hci_init_data *init_data,
unsigned long quirks,
u32 protocols,
@@ -168,7 +168,7 @@ void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
static inline int nfc_hci_set_vendor_cmds(struct nfc_hci_dev *hdev,
- struct nfc_vendor_cmd *cmds,
+ const struct nfc_vendor_cmd *cmds,
int n_cmds)
{
return nfc_set_vendor_cmds(hdev->ndev, cmds, n_cmds);
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 6ab5a83f597c..e82f55f543bb 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -25,6 +25,8 @@
#define NCI_MAX_PARAM_LEN 251
#define NCI_MAX_PAYLOAD_SIZE 255
#define NCI_MAX_PACKET_SIZE 258
+#define NCI_MAX_LARGE_PARAMS_NCI_v2 15
+#define NCI_VER_2_MASK 0x20
/* NCI Status Codes */
#define NCI_STATUS_OK 0x00
@@ -131,6 +133,9 @@
#define NCI_LF_CON_BITR_F_212 0x02
#define NCI_LF_CON_BITR_F_424 0x04
+/* NCI 2.x Feature Enable Bit */
+#define NCI_FEATURE_DISABLE 0x00
+
/* NCI Reset types */
#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
#define NCI_RESET_TYPE_RESET_CONFIG 0x01
@@ -220,6 +225,11 @@ struct nci_core_reset_cmd {
} __packed;
#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
+/* To support NCI 2.x */
+struct nci_core_init_v2_cmd {
+ u8 feature1;
+ u8 feature2;
+};
#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02)
struct set_config_param {
@@ -244,13 +254,13 @@ struct dest_spec_params {
struct core_conn_create_dest_spec_params {
__u8 type;
__u8 length;
- __u8 value[0];
+ __u8 value[];
} __packed;
struct nci_core_conn_create_cmd {
__u8 destination_type;
__u8 number_destination_params;
- struct core_conn_create_dest_spec_params params[0];
+ struct core_conn_create_dest_spec_params params[];
} __packed;
#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x05)
@@ -321,7 +331,7 @@ struct nci_core_init_rsp_1 {
__u8 status;
__le32 nfcc_features;
__u8 num_supported_rf_interfaces;
- __u8 supported_rf_interfaces[0]; /* variable size array */
+ __u8 supported_rf_interfaces[]; /* variable size array */
/* continuted in nci_core_init_rsp_2 */
} __packed;
@@ -334,11 +344,25 @@ struct nci_core_init_rsp_2 {
__le32 manufact_specific_info;
} __packed;
+/* To support NCI ver 2.x */
+struct nci_core_init_rsp_nci_ver2 {
+ u8 status;
+ __le32 nfcc_features;
+ u8 max_logical_connections;
+ __le16 max_routing_table_size;
+ u8 max_ctrl_pkt_payload_len;
+ u8 max_data_pkt_hci_payload_len;
+ u8 number_of_hci_credit;
+ __le16 max_nfc_v_frame_size;
+ u8 num_supported_rf_interfaces;
+ u8 supported_rf_interfaces[];
+} __packed;
+
#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02)
struct nci_core_set_config_rsp {
__u8 status;
__u8 num_params;
- __u8 params_id[0]; /* variable size array */
+ __u8 params_id[]; /* variable size array */
} __packed;
#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04)
@@ -372,6 +396,16 @@ struct nci_nfcee_discover_rsp {
/* --------------------------- */
/* ---- NCI Notifications ---- */
/* --------------------------- */
+#define NCI_OP_CORE_RESET_NTF nci_opcode_pack(NCI_GID_CORE, 0x00)
+struct nci_core_reset_ntf {
+ u8 reset_trigger;
+ u8 config_status;
+ u8 nci_ver;
+ u8 manufact_id;
+ u8 manufacturer_specific_len;
+ __le32 manufact_specific_info;
+} __packed;
+
#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x06)
struct conn_credit_entry {
__u8 conn_id;
@@ -501,18 +535,18 @@ struct nci_rf_nfcee_action_ntf {
__u8 nfcee_id;
__u8 trigger;
__u8 supported_data_length;
- __u8 supported_data[0];
+ __u8 supported_data[];
} __packed;
#define NCI_OP_NFCEE_DISCOVER_NTF nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
struct nci_nfcee_supported_protocol {
__u8 num_protocol;
- __u8 supported_protocol[0];
+ __u8 supported_protocol[];
} __packed;
struct nci_nfcee_information_tlv {
__u8 num_tlv;
- __u8 information_tlv[0];
+ __u8 information_tlv[];
} __packed;
struct nci_nfcee_discover_ntf {
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 43c9c5d2bedb..ea8595651c38 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -30,6 +30,7 @@ enum nci_flag {
NCI_UP,
NCI_DATA_EXCHANGE,
NCI_DATA_EXCHANGE_TO,
+ NCI_UNREG,
};
/* NCI device states */
@@ -82,10 +83,10 @@ struct nci_ops {
void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
struct sk_buff *skb);
- struct nci_driver_ops *prop_ops;
+ const struct nci_driver_ops *prop_ops;
size_t n_prop_ops;
- struct nci_driver_ops *core_ops;
+ const struct nci_driver_ops *core_ops;
size_t n_core_ops;
};
@@ -194,7 +195,7 @@ struct nci_hci_dev {
/* NCI Core structures */
struct nci_dev {
struct nfc_dev *nfc_dev;
- struct nci_ops *ops;
+ const struct nci_ops *ops;
struct nci_hci_dev *hci_dev;
int tx_headroom;
@@ -267,7 +268,7 @@ struct nci_dev {
};
/* ----- NCI Devices ----- */
-struct nci_dev *nci_allocate_device(struct nci_ops *ops,
+struct nci_dev *nci_allocate_device(const struct nci_ops *ops,
__u32 supported_protocols,
int tx_headroom,
int tx_tailroom);
@@ -276,28 +277,31 @@ int nci_register_device(struct nci_dev *ndev);
void nci_unregister_device(struct nci_dev *ndev);
int nci_request(struct nci_dev *ndev,
void (*req)(struct nci_dev *ndev,
- unsigned long opt),
- unsigned long opt, __u32 timeout);
-int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
-int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, __u8 *payload);
+ const void *opt),
+ const void *opt, __u32 timeout);
+int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len,
+ const __u8 *payload);
+int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len,
+ const __u8 *payload);
int nci_core_reset(struct nci_dev *ndev);
int nci_core_init(struct nci_dev *ndev);
int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb);
-int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
+int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, const __u8 *val);
int nci_nfcee_discover(struct nci_dev *ndev, u8 action);
int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode);
int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
u8 number_destination_params,
size_t params_len,
- struct core_conn_create_dest_spec_params *params);
+ const struct core_conn_create_dest_spec_params *params);
int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id);
-int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
+int nci_nfcc_loopback(struct nci_dev *ndev, const void *data, size_t data_len,
struct sk_buff **resp);
struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+void nci_hci_deallocate(struct nci_dev *ndev);
int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
const u8 *param, size_t param_len);
int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
@@ -342,7 +346,7 @@ static inline void *nci_get_drvdata(struct nci_dev *ndev)
}
static inline int nci_set_vendor_cmds(struct nci_dev *ndev,
- struct nfc_vendor_cmd *cmds,
+ const struct nfc_vendor_cmd *cmds,
int n_cmds)
{
return nfc_set_vendor_cmds(ndev->nfc_dev, cmds, n_cmds);
@@ -359,7 +363,7 @@ int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode,
int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
struct sk_buff *skb);
void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
-int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
+int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, const void *payload);
int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
int nci_conn_max_data_pkt_payload_size(struct nci_dev *ndev, __u8 conn_id);
void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
@@ -377,7 +381,7 @@ void nci_req_complete(struct nci_dev *ndev, int result);
struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
int conn_id);
int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type,
- struct dest_spec_params *params);
+ const struct dest_spec_params *params);
/* ----- NCI status code ----- */
int nci_to_errno(__u8 code);
@@ -430,8 +434,6 @@ struct nci_uart_ops {
int (*open)(struct nci_uart *nci_uart);
void (*close)(struct nci_uart *nci_uart);
int (*recv)(struct nci_uart *nci_uart, struct sk_buff *skb);
- int (*recv_buf)(struct nci_uart *nci_uart, const u8 *data, char *flags,
- int count);
int (*send)(struct nci_uart *nci_uart, struct sk_buff *skb);
void (*tx_start)(struct nci_uart *nci_uart);
void (*tx_done)(struct nci_uart *nci_uart);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 5d277d68fd8d..5dee575fbe86 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -146,7 +146,7 @@ struct nfc_evt_transaction {
u32 aid_len;
u8 aid[NFC_MAX_AID_LENGTH];
u8 params_len;
- u8 params[0];
+ u8 params[];
} __packed;
struct nfc_genl_data {
@@ -188,17 +188,17 @@ struct nfc_dev {
struct rfkill *rfkill;
- struct nfc_vendor_cmd *vendor_cmds;
+ const struct nfc_vendor_cmd *vendor_cmds;
int n_vendor_cmds;
- struct nfc_ops *ops;
+ const struct nfc_ops *ops;
struct genl_info *cur_cmd_info;
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
extern struct class nfc_class;
-struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
+struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops,
u32 supported_protocols,
int tx_headroom,
int tx_tailroom);
@@ -245,7 +245,7 @@ static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
*
* @dev: The nfc device
*/
-static inline void *nfc_get_drvdata(struct nfc_dev *dev)
+static inline void *nfc_get_drvdata(const struct nfc_dev *dev)
{
return dev_get_drvdata(&dev->dev);
}
@@ -255,7 +255,7 @@ static inline void *nfc_get_drvdata(struct nfc_dev *dev)
*
* @dev: The nfc device whose name to return
*/
-static inline const char *nfc_device_name(struct nfc_dev *dev)
+static inline const char *nfc_device_name(const struct nfc_dev *dev)
{
return dev_name(&dev->dev);
}
@@ -266,7 +266,7 @@ struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
int nfc_set_remote_general_bytes(struct nfc_dev *dev,
- u8 *gt, u8 gt_len);
+ const u8 *gt, u8 gt_len);
u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
@@ -280,7 +280,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode);
int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
- u8 *gb, size_t gb_len);
+ const u8 *gb, size_t gb_len);
int nfc_tm_deactivated(struct nfc_dev *dev);
int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
@@ -297,7 +297,7 @@ void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
u8 payload_type, u8 direction);
static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
- struct nfc_vendor_cmd *cmds,
+ const struct nfc_vendor_cmd *cmds,
int n_cmds)
{
if (dev->vendor_cmds || dev->n_vendor_cmds)
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index ddcee128f5d9..f5850b569c52 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -19,6 +19,8 @@
*
*/
+#include <linux/types.h>
+
#define NL802154_GENL_NAME "nl802154"
enum nl802154_commands {
@@ -56,9 +58,6 @@ enum nl802154_commands {
NL802154_CMD_SET_WPAN_PHY_NETNS,
- /* add new commands above here */
-
-#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
NL802154_CMD_SET_SEC_PARAMS,
NL802154_CMD_GET_SEC_KEY, /* can dump */
NL802154_CMD_NEW_SEC_KEY,
@@ -72,7 +71,8 @@ enum nl802154_commands {
NL802154_CMD_GET_SEC_LEVEL, /* can dump */
NL802154_CMD_NEW_SEC_LEVEL,
NL802154_CMD_DEL_SEC_LEVEL,
-#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
+ /* add new commands above here */
/* used to define NL802154_CMD_MAX below */
__NL802154_CMD_AFTER_LAST,
@@ -150,10 +150,9 @@ enum nl802154_attrs {
};
enum nl802154_iftype {
- /* for backwards compatibility TODO */
- NL802154_IFTYPE_UNSPEC = -1,
+ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
- NL802154_IFTYPE_NODE,
+ NL802154_IFTYPE_NODE = 0,
NL802154_IFTYPE_MONITOR,
NL802154_IFTYPE_COORD,
diff --git a/include/net/p8022.h b/include/net/p8022.h
index c2bacc66bfbc..b690ffcad66b 100644
--- a/include/net/p8022.h
+++ b/include/net/p8022.h
@@ -1,6 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NET_P8022_H
#define _NET_P8022_H
+
+struct net_device;
+struct packet_type;
+struct sk_buff;
+
struct datalink_proto *
register_8022_client(unsigned char type,
int (*func)(struct sk_buff *skb,
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index cfbed00ba7ee..813c93499f20 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -45,7 +45,10 @@
* Please note DMA-sync-for-CPU is still
* device driver responsibility
*/
-#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
+#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
+ PP_FLAG_DMA_SYNC_DEV |\
+ PP_FLAG_PAGE_FRAG)
/*
* Fast allocation side cache array/stack
@@ -65,7 +68,7 @@
#define PP_ALLOC_CACHE_REFILL 64
struct pp_alloc_cache {
u32 count;
- void *cache[PP_ALLOC_CACHE_SIZE];
+ struct page *cache[PP_ALLOC_CACHE_SIZE];
};
struct page_pool_params {
@@ -77,8 +80,73 @@ struct page_pool_params {
enum dma_data_direction dma_dir; /* DMA mapping direction */
unsigned int max_len; /* max DMA sync memory size */
unsigned int offset; /* DMA addr offset */
+ void (*init_callback)(struct page *page, void *arg);
+ void *init_arg;
};
+#ifdef CONFIG_PAGE_POOL_STATS
+struct page_pool_alloc_stats {
+ u64 fast; /* fast path allocations */
+ u64 slow; /* slow-path order 0 allocations */
+ u64 slow_high_order; /* slow-path high order allocations */
+ u64 empty; /* failed refills due to empty ptr ring, forcing
+ * slow path allocation
+ */
+ u64 refill; /* allocations via successful refill */
+ u64 waive; /* failed refills due to numa zone mismatch */
+};
+
+struct page_pool_recycle_stats {
+ u64 cached; /* recycling placed page in the cache. */
+ u64 cache_full; /* cache was full */
+ u64 ring; /* recycling placed page back into ptr ring */
+ u64 ring_full; /* page was released from page-pool because
+ * PTR ring was full.
+ */
+ u64 released_refcnt; /* page released because of elevated
+ * refcnt
+ */
+};
+
+/* This struct wraps the above stats structs so users of the
+ * page_pool_get_stats API can pass a single argument when requesting the
+ * stats for the page pool.
+ */
+struct page_pool_stats {
+ struct page_pool_alloc_stats alloc_stats;
+ struct page_pool_recycle_stats recycle_stats;
+};
+
+int page_pool_ethtool_stats_get_count(void);
+u8 *page_pool_ethtool_stats_get_strings(u8 *data);
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
+
+/*
+ * Drivers that wish to harvest page pool stats and report them to users
+ * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
+ * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
+ */
+bool page_pool_get_stats(struct page_pool *pool,
+ struct page_pool_stats *stats);
+#else
+
+static inline int page_pool_ethtool_stats_get_count(void)
+{
+ return 0;
+}
+
+static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
+{
+ return data;
+}
+
+static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+{
+ return data;
+}
+
+#endif
+
struct page_pool {
struct page_pool_params p;
@@ -88,6 +156,15 @@ struct page_pool {
unsigned long defer_warn;
u32 pages_state_hold_cnt;
+ unsigned int frag_offset;
+ struct page *frag_page;
+ long frag_users;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* these stats are incremented while in softirq context */
+ struct page_pool_alloc_stats alloc_stats;
+#endif
+ u32 xdp_mem_id;
/*
* Data structure for allocation side
@@ -117,6 +194,10 @@ struct page_pool {
*/
struct ptr_ring ring;
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* recycle stats are per-cpu to avoid locking */
+ struct page_pool_recycle_stats __percpu *recycle_stats;
+#endif
atomic_t pages_state_release_cnt;
/* A page_pool is strictly tied to a single RX-queue being
@@ -137,6 +218,18 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
return page_pool_alloc_pages(pool, gfp);
}
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+ unsigned int size, gfp_t gfp);
+
+static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_frag(pool, offset, size, gfp);
+}
+
/* get the stored dma direction. A driver might decide to treat this locally and
* avoid the extra cache line from page_pool to determine the direction
*/
@@ -146,60 +239,126 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
return pool->p.dma_dir;
}
+bool page_pool_return_skb_page(struct page *page);
+
struct page_pool *page_pool_create(const struct page_pool_params *params);
+struct xdp_mem_info;
+
#ifdef CONFIG_PAGE_POOL
void page_pool_destroy(struct page_pool *pool);
-void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+ struct xdp_mem_info *mem);
+void page_pool_release_page(struct page_pool *pool, struct page *page);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count);
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
}
static inline void page_pool_use_xdp_mem(struct page_pool *pool,
- void (*disconnect)(void *))
+ void (*disconnect)(void *),
+ struct xdp_mem_info *mem)
+{
+}
+static inline void page_pool_release_page(struct page_pool *pool,
+ struct page *page)
+{
+}
+
+static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count)
{
}
#endif
-/* Never call this directly, use helpers below */
-void __page_pool_put_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size, bool allow_direct);
+void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size,
+ bool allow_direct);
+
+static inline void page_pool_fragment_page(struct page *page, long nr)
+{
+ atomic_long_set(&page->pp_frag_count, nr);
+}
+
+static inline long page_pool_defrag_page(struct page *page, long nr)
+{
+ long ret;
+
+ /* If nr == pp_frag_count then we have cleared all remaining
+ * references to the page. No need to actually overwrite it, instead
+ * we can leave this to be overwritten by the calling function.
+ *
+ * The main advantage to doing this is that an atomic_read is
+ * generally a much cheaper operation than an atomic update,
+ * especially when dealing with a page that may be partitioned
+ * into only 2 or 3 pieces.
+ */
+ if (atomic_long_read(&page->pp_frag_count) == nr)
+ return 0;
+
+ ret = atomic_long_sub_return(nr, &page->pp_frag_count);
+ WARN_ON(ret < 0);
+ return ret;
+}
+
+static inline bool page_pool_is_last_frag(struct page_pool *pool,
+ struct page *page)
+{
+ /* If fragments aren't enabled or count is 0 we were the last user */
+ return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
+ (page_pool_defrag_page(page, 1) == 0);
+}
static inline void page_pool_put_page(struct page_pool *pool,
- struct page *page, bool allow_direct)
+ struct page *page,
+ unsigned int dma_sync_size,
+ bool allow_direct)
{
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- __page_pool_put_page(pool, page, -1, allow_direct);
+ if (!page_pool_is_last_frag(pool, page))
+ return;
+
+ page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
#endif
}
-/* Very limited use-cases allow recycle direct */
+
+/* Same as above but will try to sync the entire area pool->max_len */
+static inline void page_pool_put_full_page(struct page_pool *pool,
+ struct page *page, bool allow_direct)
+{
+ page_pool_put_page(pool, page, -1, allow_direct);
+}
+
+/* Same as above but the caller must guarantee safe context. e.g NAPI */
static inline void page_pool_recycle_direct(struct page_pool *pool,
struct page *page)
{
- __page_pool_put_page(pool, page, -1, true);
+ page_pool_put_full_page(pool, page, true);
}
-/* Disconnects a page (from a page_pool). API users can have a need
- * to disconnect a page (from a page_pool), to allow it to be used as
- * a regular page (that will eventually be returned to the normal
- * page-allocator via put_page).
- */
-void page_pool_unmap_page(struct page_pool *pool, struct page *page);
-static inline void page_pool_release_page(struct page_pool *pool,
- struct page *page)
+#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
+ (sizeof(dma_addr_t) > sizeof(unsigned long))
+
+static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
-#ifdef CONFIG_PAGE_POOL
- page_pool_unmap_page(pool, page);
-#endif
+ dma_addr_t ret = page->dma_addr;
+
+ if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+ ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
+
+ return ret;
}
-static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
- return page->dma_addr;
+ page->dma_addr = addr;
+ if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+ page->dma_addr_upper = upper_32_bits(addr);
}
static inline bool is_page_pool_compiled_in(void)
@@ -223,4 +382,23 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
if (unlikely(pool->p.nid != new_nid))
page_pool_update_nid(pool, new_nid);
}
+
+static inline void page_pool_ring_lock(struct page_pool *pool)
+ __acquires(&pool->ring.producer_lock)
+{
+ if (in_serving_softirq())
+ spin_lock(&pool->ring.producer_lock);
+ else
+ spin_lock_bh(&pool->ring.producer_lock);
+}
+
+static inline void page_pool_ring_unlock(struct page_pool *pool)
+ __releases(&pool->ring.producer_lock)
+{
+ if (in_serving_softirq())
+ spin_unlock(&pool->ring.producer_lock);
+ else
+ spin_unlock_bh(&pool->ring.producer_lock);
+}
+
#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index 27b1ab5e4e6d..645dddf5ce77 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -10,6 +10,9 @@
#ifndef NET_PHONET_PEP_H
#define NET_PHONET_PEP_H
+#include <linux/skbuff.h>
+#include <net/phonet/phonet.h>
+
struct pep_sock {
struct pn_sock pn_sk;
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index a27bdc6cfeab..862f1719b523 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -10,6 +10,10 @@
#ifndef AF_PHONET_H
#define AF_PHONET_H
+#include <linux/phonet.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
/*
* The lower layers may not require more space, ever. Make sure it's
* enough.
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index 05b49d4d2b11..e9dc8dca5817 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -10,6 +10,11 @@
#ifndef PN_DEV_H
#define PN_DEV_H
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct net;
+
struct phonet_device_list {
struct list_head list;
struct mutex lock;
diff --git a/include/net/pie.h b/include/net/pie.h
index fd5a37cb7993..3fe2361e03b4 100644
--- a/include/net/pie.h
+++ b/include/net/pie.h
@@ -8,7 +8,7 @@
#include <net/inet_ecn.h>
#include <net/pkt_sched.h>
-#define MAX_PROB U64_MAX
+#define MAX_PROB (U64_MAX >> BITS_PER_BYTE)
#define DTIME_INVALID U64_MAX
#define QUEUE_THRESHOLD 16384
#define DQCOUNT_INVALID -1
@@ -38,16 +38,15 @@ struct pie_params {
/**
* struct pie_vars - contains pie variables
- * @qdelay: current queue delay
- * @qdelay_old: queue delay in previous qdelay calculation
- * @burst_time: burst time allowance
- * @dq_tstamp: timestamp at which dq rate was last calculated
- * @prob: drop probability
- * @accu_prob: accumulated drop probability
- * @dq_count: number of bytes dequeued in a measurement cycle
- * @avg_dq_rate: calculated average dq rate
- * @qlen_old: queue length during previous qdelay calculation
- * @accu_prob_overflows: number of times accu_prob overflows
+ * @qdelay: current queue delay
+ * @qdelay_old: queue delay in previous qdelay calculation
+ * @burst_time: burst time allowance
+ * @dq_tstamp: timestamp at which dq rate was last calculated
+ * @prob: drop probability
+ * @accu_prob: accumulated drop probability
+ * @dq_count: number of bytes dequeued in a measurement cycle
+ * @avg_dq_rate: calculated average dq rate
+ * @backlog_old: queue backlog during previous qdelay calculation
*/
struct pie_vars {
psched_time_t qdelay;
@@ -58,8 +57,7 @@ struct pie_vars {
u64 accu_prob;
u64 dq_count;
u32 avg_dq_rate;
- u32 qlen_old;
- u8 accu_prob_overflows;
+ u32 backlog_old;
};
/**
@@ -107,7 +105,6 @@ static inline void pie_vars_init(struct pie_vars *vars)
vars->accu_prob = 0;
vars->dq_count = DQCOUNT_INVALID;
vars->avg_dq_rate = 0;
- vars->accu_prob_overflows = 0;
}
static inline struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
@@ -127,12 +124,12 @@ static inline void pie_set_enqueue_time(struct sk_buff *skb)
}
bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
- struct pie_vars *vars, u32 qlen, u32 packet_size);
+ struct pie_vars *vars, u32 backlog, u32 packet_size);
void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
- struct pie_vars *vars, u32 qlen);
+ struct pie_vars *vars, u32 backlog);
void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
- u32 qlen);
+ u32 backlog);
#endif
diff --git a/include/net/ping.h b/include/net/ping.h
index 2fe78874318c..e4ff3911cbf5 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -71,12 +71,12 @@ void ping_err(struct sk_buff *skb, int offset, u32 info);
int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
struct sk_buff *);
-int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-bool ping_rcv(struct sk_buff *skb);
+enum skb_drop_reason ping_rcv(struct sk_buff *skb);
#ifdef CONFIG_PROC_FS
void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a972244ab193..4cabb32a2ad9 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -23,7 +23,7 @@ struct tcf_walker {
};
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
-int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
struct tcf_block_ext_info {
enum flow_block_binder_type binder_type;
@@ -32,6 +32,12 @@ struct tcf_block_ext_info {
u32 block_index;
};
+struct tcf_qevent {
+ struct tcf_block *block;
+ struct tcf_block_ext_info info;
+ struct tcf_proto __rcu *filter_chain;
+};
+
struct tcf_block_cb;
bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
@@ -42,7 +48,7 @@ void tcf_chain_put_by_act(struct tcf_chain *chain);
struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
struct tcf_chain *chain);
struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
- struct tcf_proto *tp, bool rtnl_held);
+ struct tcf_proto *tp);
void tcf_block_netif_keep_dst(struct tcf_block *block);
int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
@@ -70,8 +76,23 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
return block->q;
}
-int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res, bool compat_mode);
+int tcf_classify(struct sk_buff *skb,
+ const struct tcf_block *block,
+ const struct tcf_proto *tp, struct tcf_result *res,
+ bool compat_mode);
+
+static inline bool tc_cls_stats_dump(struct tcf_proto *tp,
+ struct tcf_walker *arg,
+ void *filter)
+{
+ if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) {
+ arg->stop = 1;
+ return false;
+ }
+
+ arg->count++;
+ return true;
+}
#else
static inline bool tcf_block_shared(struct tcf_block *block)
@@ -128,11 +149,14 @@ void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
{
}
-static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+static inline int tcf_classify(struct sk_buff *skb,
+ const struct tcf_block *block,
+ const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
return TC_ACT_UNSPEC;
}
+
#endif
static inline unsigned long
@@ -186,12 +210,25 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
__tcf_unbind_filter(q, r);
}
+static inline void tc_cls_bind_class(u32 classid, unsigned long cl,
+ void *q, struct tcf_result *res,
+ unsigned long base)
+{
+ if (res->classid == classid) {
+ if (cl)
+ __tcf_bind_filter(q, res, base);
+ else
+ __tcf_unbind_filter(q, res);
+ }
+}
+
struct tcf_exts {
#ifdef CONFIG_NET_CLS_ACT
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
int nr_actions;
struct tc_action **actions;
- struct net *net;
+ struct net *net;
+ netns_tracker ns_tracker;
#endif
/* Map to export classifier specific extension TLV types to the
* generic extensions API. Unsupported extensions must be set to 0.
@@ -206,6 +243,9 @@ static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
#ifdef CONFIG_NET_CLS_ACT
exts->type = 0;
exts->nr_actions = 0;
+ /* Note: we do not own yet a reference on net.
+ * This reference might be taken later from tcf_exts_get_net().
+ */
exts->net = net;
exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
GFP_KERNEL);
@@ -225,6 +265,8 @@ static inline bool tcf_exts_get_net(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
exts->net = maybe_get_net(exts->net);
+ if (exts->net)
+ netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL);
return exts->net != NULL;
#else
return true;
@@ -235,7 +277,7 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
if (exts->net)
- put_net(exts->net);
+ put_net_track(exts->net, &exts->ns_tracker);
#endif
}
@@ -247,22 +289,31 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
for (; 0; (void)(i), (void)(a), (void)(exts))
#endif
+#define tcf_act_for_each_action(i, a, actions) \
+ for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
+
static inline void
-tcf_exts_stats_update(const struct tcf_exts *exts,
- u64 bytes, u64 packets, u64 lastuse)
+tcf_exts_hw_stats_update(const struct tcf_exts *exts,
+ u64 bytes, u64 packets, u64 drops, u64 lastuse,
+ u8 used_hw_stats, bool used_hw_stats_valid)
{
#ifdef CONFIG_NET_CLS_ACT
int i;
- preempt_disable();
-
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- tcf_action_stats_update(a, bytes, packets, lastuse, true);
- }
+ /* if stats from hw, just skip */
+ if (tcf_action_update_hw_stats(a)) {
+ preempt_disable();
+ tcf_action_stats_update(a, bytes, packets, drops,
+ lastuse, true);
+ preempt_enable();
- preempt_enable();
+ a->used_hw_stats = used_hw_stats;
+ a->used_hw_stats_valid = used_hw_stats_valid;
+ }
+ }
#endif
}
@@ -304,15 +355,22 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
- struct tcf_exts *exts, bool ovr, bool rtnl_held,
+ struct tcf_exts *exts, u32 flags,
struct netlink_ext_ack *extack);
+int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ struct nlattr *rate_tlv, struct tcf_exts *exts,
+ u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
+int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
/**
* struct tcf_pkt_info - packet information
+ *
+ * @ptr: start of the pkt data
+ * @nexthdr: offset of the next header
*/
struct tcf_pkt_info {
unsigned char * ptr;
@@ -331,6 +389,7 @@ struct tcf_ematch_ops;
* @ops: the operations lookup table of the corresponding ematch module
* @datalen: length of the ematch specific configuration data
* @data: ematch specific data
+ * @net: the network namespace
*/
struct tcf_ematch {
struct tcf_ematch_ops * ops;
@@ -488,13 +547,17 @@ tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
char indev[IFNAMSIZ];
struct net_device *dev;
- if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
- NL_SET_ERR_MSG(extack, "Interface name too long");
+ if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
+ NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
+ "Interface name too long");
return -EINVAL;
}
dev = __dev_get_by_name(net, indev);
- if (!dev)
+ if (!dev) {
+ NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
+ "Network device not found");
return -ENODEV;
+ }
return dev->ifindex;
}
@@ -508,9 +571,13 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
return ifindex == skb->skb_iif;
}
-int tc_setup_flow_action(struct flow_action *flow_action,
- const struct tcf_exts *exts, bool rtnl_held);
-void tc_cleanup_flow_action(struct flow_action *flow_action);
+int tc_setup_offload_action(struct flow_action *flow_action,
+ const struct tcf_exts *exts,
+ struct netlink_ext_ack *extack);
+void tc_cleanup_offload_action(struct flow_action *flow_action);
+int tc_setup_action(struct flow_action *flow_action,
+ struct tc_action *actions[],
+ struct netlink_ext_ack *extack);
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
void *type_data, bool err_stop, bool rtnl_held);
@@ -531,6 +598,49 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
void *cb_priv, u32 *flags, unsigned int *in_hw_count);
unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
+#ifdef CONFIG_NET_CLS_ACT
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+ enum flow_block_binder_type binder_type,
+ struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack);
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
+int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack);
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+ struct sk_buff **to_free, int *ret);
+int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
+#else
+static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+ enum flow_block_binder_type binder_type,
+ struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+}
+
+static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline struct sk_buff *
+tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+ struct sk_buff **to_free, int *ret)
+{
+ return skb;
+}
+
+static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
+{
+ return 0;
+}
+#endif
+
struct tc_cls_u32_knode {
struct tcf_exts *exts;
struct tcf_result *res;
@@ -638,6 +748,17 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
cls_common->extack = extack;
}
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
+{
+ struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+
+ if (tc_skb_ext)
+ memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
+ return tc_skb_ext;
+}
+#endif
+
enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE,
TC_CLSMATCHALL_DESTROY,
@@ -687,7 +808,7 @@ struct tc_cookie {
};
struct tc_qopt_offload_stats {
- struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_basic_sync *bstats;
struct gnet_stats_queue *qstats;
};
@@ -712,6 +833,41 @@ struct tc_mq_qopt_offload {
};
};
+enum tc_htb_command {
+ /* Root */
+ TC_HTB_CREATE, /* Initialize HTB offload. */
+ TC_HTB_DESTROY, /* Destroy HTB offload. */
+
+ /* Classes */
+ /* Allocate qid and create leaf. */
+ TC_HTB_LEAF_ALLOC_QUEUE,
+ /* Convert leaf to inner, preserve and return qid, create new leaf. */
+ TC_HTB_LEAF_TO_INNER,
+ /* Delete leaf, while siblings remain. */
+ TC_HTB_LEAF_DEL,
+ /* Delete leaf, convert parent to leaf, preserving qid. */
+ TC_HTB_LEAF_DEL_LAST,
+ /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
+ TC_HTB_LEAF_DEL_LAST_FORCE,
+ /* Modify parameters of a node. */
+ TC_HTB_NODE_MODIFY,
+
+ /* Class qdisc */
+ TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
+};
+
+struct tc_htb_qopt_offload {
+ struct netlink_ext_ack *extack;
+ enum tc_htb_command command;
+ u32 parent_classid;
+ u16 classid;
+ u16 qid;
+ u64 rate;
+ u64 ceil;
+};
+
+#define TC_HTB_CLASSID_ROOT U32_MAX
+
enum tc_red_command {
TC_RED_REPLACE,
TC_RED_DESTROY,
@@ -727,6 +883,7 @@ struct tc_red_qopt_offload_params {
u32 limit;
bool is_ecn;
bool is_harddrop;
+ bool is_nodrop;
struct gnet_stats_queue *qstats;
};
@@ -771,7 +928,7 @@ struct tc_gred_qopt_offload_params {
};
struct tc_gred_qopt_offload_stats {
- struct gnet_stats_basic_packed bstats[MAX_DPs];
+ struct gnet_stats_basic_sync bstats[MAX_DPs];
struct gnet_stats_queue qstats[MAX_DPs];
struct red_stats *xstats[MAX_DPs];
};
@@ -863,6 +1020,7 @@ enum tc_tbf_command {
TC_TBF_REPLACE,
TC_TBF_DESTROY,
TC_TBF_STATS,
+ TC_TBF_GRAFT,
};
struct tc_tbf_qopt_offload_replace_params {
@@ -878,7 +1036,34 @@ struct tc_tbf_qopt_offload {
union {
struct tc_tbf_qopt_offload_replace_params replace_params;
struct tc_qopt_offload_stats stats;
+ u32 child_handle;
+ };
+};
+
+enum tc_fifo_command {
+ TC_FIFO_REPLACE,
+ TC_FIFO_DESTROY,
+ TC_FIFO_STATS,
+};
+
+struct tc_fifo_qopt_offload {
+ enum tc_fifo_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_qopt_offload_stats stats;
};
};
+#ifdef CONFIG_NET_CLS_ACT
+DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc);
+void tc_skb_ext_tc_enable(void);
+void tc_skb_ext_tc_disable(void);
+#define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc)
+#else /* CONFIG_NET_CLS_ACT */
+static inline void tc_skb_ext_tc_enable(void) { }
+static inline void tc_skb_ext_tc_disable(void) { }
+#define tc_skb_ext_tc_enabled() false
+#endif
+
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6a70845bd9ab..38207873eda6 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -11,6 +11,7 @@
#include <uapi/linux/pkt_sched.h>
#define DEFAULT_TX_QUEUE_LEN 1000
+#define STAB_SIZE_LOG_MAX 30
struct qdisc_walker {
int stop;
@@ -19,12 +20,14 @@ struct qdisc_walker {
int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
};
-#define QDISC_ALIGNTO 64
-#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
-
static inline void *qdisc_priv(struct Qdisc *q)
{
- return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc));
+ return &q->privdata;
+}
+
+static inline struct Qdisc *qdisc_from_priv(void *priv)
+{
+ return container_of(priv, struct Qdisc, privdata);
}
/*
@@ -60,12 +63,6 @@ static inline psched_time_t psched_get_time(void)
return PSCHED_NS2TICKS(ktime_get_ns());
}
-static inline psched_tdiff_t
-psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
-{
- return min(tv1 - tv2, bound);
-}
-
struct qdisc_watchdog {
u64 last_expires;
struct hrtimer timer;
@@ -75,7 +72,15 @@ struct qdisc_watchdog {
void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
clockid_t clockid);
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+
+void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
+ u64 delta_ns);
+
+static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd,
+ u64 expires)
+{
+ return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL);
+}
static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
psched_time_t expires)
@@ -95,7 +100,7 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
int register_qdisc(struct Qdisc_ops *qops);
-int unregister_qdisc(struct Qdisc_ops *qops);
+void unregister_qdisc(struct Qdisc_ops *qops);
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
@@ -118,27 +123,11 @@ void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
if (qdisc_run_begin(q)) {
- /* NOLOCK qdisc must check 'state' under the qdisc seqlock
- * to avoid racing with dev_qdisc_reset()
- */
- if (!(q->flags & TCQ_F_NOLOCK) ||
- likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
- __qdisc_run(q);
+ __qdisc_run(q);
qdisc_run_end(q);
}
}
-static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
-{
- /* We need to take extra care in case the skb came via
- * vlan accelerated path. In that case, use skb->vlan_proto
- * as the original vlan header was already stripped.
- */
- if (skb_vlan_tag_present(skb))
- return skb->vlan_proto;
- return skb->protocol;
-}
-
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
@@ -152,6 +141,11 @@ static inline struct net *qdisc_net(struct Qdisc *q)
return dev_net(q->dev_queue->dev);
}
+struct tc_query_caps_base {
+ enum tc_setup_type type;
+ void *caps;
+};
+
struct tc_cbs_qopt_offload {
u8 enable;
s32 queue;
@@ -166,6 +160,10 @@ struct tc_etf_qopt_offload {
s32 queue;
};
+struct tc_taprio_caps {
+ bool supports_queue_max_sdu:1;
+};
+
struct tc_taprio_sched_entry {
u8 command; /* TC_TAPRIO_CMD_* */
@@ -179,14 +177,72 @@ struct tc_taprio_qopt_offload {
ktime_t base_time;
u64 cycle_time;
u64 cycle_time_extension;
+ u32 max_sdu[TC_MAX_QUEUE];
size_t num_entries;
- struct tc_taprio_sched_entry entries[0];
+ struct tc_taprio_sched_entry entries[];
};
+#if IS_ENABLED(CONFIG_NET_SCH_TAPRIO)
+
/* Reference counting */
struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
*offload);
void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
+#else
+
+/* Reference counting */
+static inline struct tc_taprio_qopt_offload *
+taprio_offload_get(struct tc_taprio_qopt_offload *offload)
+{
+ return NULL;
+}
+
+static inline void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
+{
+}
+
+#endif
+
+/* Ensure skb_mstamp_ns, which might have been populated with the txtime, is
+ * not mistaken for a software timestamp, because this will otherwise prevent
+ * the dispatch of hardware timestamps to the socket.
+ */
+static inline void skb_txtime_consumed(struct sk_buff *skb)
+{
+ skb->tstamp = ktime_set(0, 0);
+}
+
+struct tc_skb_cb {
+ struct qdisc_skb_cb qdisc_cb;
+
+ u16 mru;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
+ u16 zone; /* Only valid if post_ct = true */
+};
+
+static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
+{
+ struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+ return cb;
+}
+
+static inline bool tc_qdisc_stats_dump(struct Qdisc *sch,
+ unsigned long cl,
+ struct qdisc_walker *arg)
+{
+ if (arg->count >= arg->skip && arg->fn(sch, cl, arg) < 0) {
+ arg->stop = 1;
+ return false;
+ }
+
+ arg->count++;
+ return true;
+}
+
#endif
diff --git a/include/net/pptp.h b/include/net/pptp.h
index 383e25ca53a7..e63176bdd4c8 100644
--- a/include/net/pptp.h
+++ b/include/net/pptp.h
@@ -2,6 +2,9 @@
#ifndef _NET_PPTP_H
#define _NET_PPTP_H
+#include <linux/types.h>
+#include <net/gre.h>
+
#define PPP_LCP_ECHOREQ 0x09
#define PPP_LCP_ECHOREP 0x0A
#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 2b778e1d2d8f..6aef8cb11cc8 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -35,15 +35,12 @@
/* This is used to register protocols. */
struct net_protocol {
- int (*early_demux)(struct sk_buff *skb);
- int (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */
int (*err_handler)(struct sk_buff *skb, u32 info);
unsigned int no_policy:1,
- netns_ok:1,
/* does the protocol do more stringent
* icmp tag validation than simple
* socket lookup?
@@ -53,8 +50,6 @@ struct net_protocol {
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol {
- void (*early_demux)(struct sk_buff *skb);
- void (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */
diff --git a/include/net/psample.h b/include/net/psample.h
index 68ae16bb0a4a..0509d2d6be67 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -14,22 +14,35 @@ struct psample_group {
struct rcu_head rcu;
};
+struct psample_metadata {
+ u32 trunc_size;
+ int in_ifindex;
+ int out_ifindex;
+ u16 out_tc;
+ u64 out_tc_occ; /* bytes */
+ u64 latency; /* nanoseconds */
+ u8 out_tc_valid:1,
+ out_tc_occ_valid:1,
+ latency_valid:1,
+ unused:5;
+};
+
struct psample_group *psample_group_get(struct net *net, u32 group_num);
void psample_group_take(struct psample_group *group);
void psample_group_put(struct psample_group *group);
+struct sk_buff;
+
#if IS_ENABLED(CONFIG_PSAMPLE)
void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
- u32 trunc_size, int in_ifindex, int out_ifindex,
- u32 sample_rate);
+ u32 sample_rate, const struct psample_metadata *md);
#else
static inline void psample_sample_packet(struct psample_group *group,
- struct sk_buff *skb, u32 trunc_size,
- int in_ifindex, int out_ifindex,
- u32 sample_rate)
+ struct sk_buff *skb, u32 sample_rate,
+ const struct psample_metadata *md)
{
}
diff --git a/include/net/psnap.h b/include/net/psnap.h
index 7cb0c8ab4171..88802b0754ad 100644
--- a/include/net/psnap.h
+++ b/include/net/psnap.h
@@ -2,6 +2,11 @@
#ifndef _NET_PSNAP_H
#define _NET_PSNAP_H
+struct datalink_proto;
+struct sk_buff;
+struct packet_type;
+struct net_device;
+
struct datalink_proto *
register_snap_client(const unsigned char *desc,
int (*rcvfunc)(struct sk_buff *, struct net_device *,
diff --git a/include/net/raw.h b/include/net/raw.h
index 8ad8df594853..5e665934ebc7 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -20,9 +20,8 @@
extern struct proto raw_prot;
extern struct raw_hashinfo raw_v4_hashinfo;
-struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
- unsigned short num, __be32 raddr,
- __be32 laddr, int dif, int sdif);
+bool raw_v4_match(struct net *net, struct sock *sk, unsigned short num,
+ __be32 raddr, __be32 laddr, int dif, int sdif);
int raw_abort(struct sock *sk, int err);
void raw_icmp_error(struct sk_buff *, int, u32);
@@ -33,10 +32,19 @@ int raw_rcv(struct sock *, struct sk_buff *);
#define RAW_HTABLE_SIZE MAX_INET_PROTOS
struct raw_hashinfo {
- rwlock_t lock;
- struct hlist_head ht[RAW_HTABLE_SIZE];
+ spinlock_t lock;
+ struct hlist_nulls_head ht[RAW_HTABLE_SIZE];
};
+static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
+{
+ int i;
+
+ spin_lock_init(&hashinfo->lock);
+ for (i = 0; i < RAW_HTABLE_SIZE; i++)
+ INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i);
+}
+
#ifdef CONFIG_PROC_FS
int raw_proc_init(void);
void raw_proc_exit(void);
@@ -75,7 +83,7 @@ static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept,
+ return inet_bound_dev_eq(READ_ONCE(net->ipv4.sysctl_raw_l3mdev_accept),
bound_dev_if, dif, sdif);
#else
return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index 53d86b6055e8..bc70909625f6 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -3,11 +3,12 @@
#define _NET_RAWV6_H
#include <net/protocol.h>
+#include <net/raw.h>
extern struct raw_hashinfo raw_v6_hashinfo;
-struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
- unsigned short num, const struct in6_addr *loc_addr,
- const struct in6_addr *rmt_addr, int dif, int sdif);
+bool raw_v6_match(struct net *net, struct sock *sk, unsigned short num,
+ const struct in6_addr *loc_addr,
+ const struct in6_addr *rmt_addr, int dif, int sdif);
int raw_abort(struct sock *sk, int err);
diff --git a/include/net/red.h b/include/net/red.h
index 9665582c4687..425364de0df7 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -122,7 +122,6 @@ struct red_stats {
u32 forced_drop; /* Forced drops, qavg > max_thresh */
u32 forced_mark; /* Forced marks, qavg > max_thresh */
u32 pdrop; /* Drops due to queue limits */
- u32 other; /* Drops due to drop() calls */
};
struct red_parms {
@@ -168,17 +167,65 @@ static inline void red_set_vars(struct red_vars *v)
v->qcount = -1;
}
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
+ u8 Scell_log, u8 *stab)
{
- if (fls(qth_min) + Wlog > 32)
+ if (fls(qth_min) + Wlog >= 32)
return false;
- if (fls(qth_max) + Wlog > 32)
+ if (fls(qth_max) + Wlog >= 32)
+ return false;
+ if (Scell_log >= 32)
return false;
if (qth_max < qth_min)
return false;
+ if (stab) {
+ int i;
+
+ for (i = 0; i < RED_STAB_SIZE; i++)
+ if (stab[i] >= 32)
+ return false;
+ }
return true;
}
+static inline int red_get_flags(unsigned char qopt_flags,
+ unsigned char historic_mask,
+ struct nlattr *flags_attr,
+ unsigned char supported_mask,
+ struct nla_bitfield32 *p_flags,
+ unsigned char *p_userbits,
+ struct netlink_ext_ack *extack)
+{
+ struct nla_bitfield32 flags;
+
+ if (qopt_flags && flags_attr) {
+ NL_SET_ERR_MSG_MOD(extack, "flags should be passed either through qopt, or through a dedicated attribute");
+ return -EINVAL;
+ }
+
+ if (flags_attr) {
+ flags = nla_get_bitfield32(flags_attr);
+ } else {
+ flags.selector = historic_mask;
+ flags.value = qopt_flags & historic_mask;
+ }
+
+ *p_flags = flags;
+ *p_userbits = qopt_flags & ~historic_mask;
+ return 0;
+}
+
+static inline int red_validate_flags(unsigned char flags,
+ struct netlink_ext_ack *extack)
+{
+ if ((flags & TC_RED_NODROP) && !(flags & TC_RED_ECN)) {
+ NL_SET_ERR_MSG_MOD(extack, "nodrop mode is only meaningful with ECN");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static inline void red_set_parms(struct red_parms *p,
u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
u8 Scell_log, u8 *stab, u32 max_P)
@@ -247,7 +294,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms
int shift;
/*
- * The problem: ideally, average length queue recalcultion should
+ * The problem: ideally, average length queue recalculation should
* be done over constant clock intervals. This is too expensive, so
* that the calculation is driven by outgoing packets.
* When the queue is idle we have to model this clock by hand.
@@ -316,7 +363,7 @@ static inline unsigned long red_calc_qavg(const struct red_parms *p,
static inline u32 red_random(const struct red_parms *p)
{
- return reciprocal_divide(prandom_u32(), p->max_P_reciprocal);
+ return reciprocal_divide(get_random_u32(), p->max_P_reciprocal);
}
static inline int red_mark_probability(const struct red_parms *p,
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 3469750df0f4..896191f420d5 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -1,3 +1,4 @@
+
#ifndef __NET_REGULATORY_H
#define __NET_REGULATORY_H
/*
@@ -19,6 +20,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/ieee80211.h>
+#include <linux/nl80211.h>
#include <linux/rcupdate.h>
/**
@@ -44,7 +47,7 @@ enum environment_cap {
* and potentially inform users of which devices specifically
* cased the conflicts.
* @initiator: indicates who sent this request, could be any of
- * of those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*)
+ * those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*)
* @alpha2: the ISO / IEC 3166 alpha2 country code of the requested
* regulatory domain. We have a few special codes:
* 00 - World regulatory domain
@@ -231,13 +234,6 @@ struct ieee80211_regdomain {
struct ieee80211_reg_rule reg_rules[];
};
-#define MHZ_TO_KHZ(freq) ((freq) * 1000)
-#define KHZ_TO_MHZ(freq) ((freq) / 1000)
-#define DBI_TO_MBI(gain) ((gain) * 100)
-#define MBI_TO_DBI(gain) ((gain) / 100)
-#define DBM_TO_MBM(gain) ((gain) * 100)
-#define MBM_TO_DBM(gain) ((gain) / 100)
-
#define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags) \
{ \
.freq_range.start_freq_khz = MHZ_TO_KHZ(start), \
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index cf8b33213bbc..144c39db9898 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -41,6 +41,13 @@ struct request_sock_ops {
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
+struct saved_syn {
+ u32 mac_hdrlen;
+ u32 network_hdrlen;
+ u32 tcp_hdrlen;
+ u8 data[];
+};
+
/* struct request_sock - mini sock to represent a connection request
*/
struct request_sock {
@@ -54,15 +61,16 @@ struct request_sock {
struct request_sock *dl_next;
u16 mss;
u8 num_retrans; /* number of retransmits */
- u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
+ u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */
u8 num_timeout:7; /* number of timeouts */
u32 ts_recent;
struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
- u32 *saved_syn;
+ struct saved_syn *saved_syn;
u32 secid;
u32 peer_secid;
+ u32 timeout;
};
static inline struct request_sock *inet_reqsk(const struct sock *sk)
@@ -97,6 +105,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
sk_node_init(&req_to_sk(req)->sk_node);
sk_tx_queue_clear(req_to_sk(req));
req->saved_syn = NULL;
+ req->timeout = 0;
req->num_timeout = 0;
req->num_retrans = 0;
req->sk = NULL;
diff --git a/include/net/rose.h b/include/net/rose.h
index cf517d306a28..23267b4efcfa 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -9,6 +9,7 @@
#define _ROSE_H
#include <linux/rose.h>
+#include <net/ax25.h>
#include <net/sock.h>
#define ROSE_ADDR_LEN 5
@@ -131,7 +132,8 @@ struct rose_sock {
ax25_address source_digis[ROSE_MAX_DIGIS];
ax25_address dest_digis[ROSE_MAX_DIGIS];
struct rose_neigh *neighbour;
- struct net_device *device;
+ struct net_device *device;
+ netdevice_tracker dev_tracker;
unsigned int lci, rand;
unsigned char state, condition, qbitincl, defer;
unsigned char cause, diagnostic;
@@ -162,8 +164,8 @@ extern int sysctl_rose_link_fail_timeout;
extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
-int rosecmp(rose_address *, rose_address *);
-int rosecmpm(rose_address *, rose_address *, unsigned short);
+int rosecmp(const rose_address *, const rose_address *);
+int rosecmpm(const rose_address *, const rose_address *, unsigned short);
char *rose2asc(char *buf, const rose_address *);
struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
void rose_kill_by_neigh(struct rose_neigh *);
@@ -205,8 +207,8 @@ extern const struct seq_operations rose_node_seqops;
extern struct seq_operations rose_route_seqops;
void rose_add_loopback_neigh(void);
-int __must_check rose_add_loopback_node(rose_address *);
-void rose_del_loopback_node(rose_address *);
+int __must_check rose_add_loopback_node(const rose_address *);
+void rose_del_loopback_node(const rose_address *);
void rose_rt_device_down(struct net_device *);
void rose_link_device_down(struct net_device *);
struct net_device *rose_dev_first(void);
diff --git a/include/net/route.h b/include/net/route.h
index a9c60fc68e36..6e92dd5bcd61 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -43,6 +43,20 @@
#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
+static inline __u8 ip_sock_rt_scope(const struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_LOCALROUTE))
+ return RT_SCOPE_LINK;
+
+ return RT_SCOPE_UNIVERSE;
+}
+
+static inline __u8 ip_sock_rt_tos(const struct sock *sk)
+{
+ return RT_TOS(inet_sk(sk)->tos);
+}
+
+struct ip_tunnel_info;
struct fib_nh;
struct fib_info;
struct uncached_list;
@@ -128,6 +142,12 @@ static inline struct rtable *__ip_route_output_key(struct net *net,
struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
const struct sock *sk);
+struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net, __be32 *saddr,
+ const struct ip_tunnel_info *info,
+ u8 protocol, bool use_cache);
+
struct dst_entry *ipv4_blackhole_route(struct net *net,
struct dst_entry *dst_orig);
@@ -159,7 +179,7 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
sk ? inet_sk_flowi_flags(sk) : 0,
daddr, saddr, dport, sport, sock_net_uid(net, sk));
if (sk)
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
}
@@ -181,10 +201,6 @@ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct in_device *in_dev, u32 *itag);
int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin);
-int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin,
- struct fib_result *res);
-
int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin,
const struct sk_buff *hint);
@@ -224,8 +240,7 @@ void ip_rt_multicast_event(struct in_device *);
int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
struct rtable *rt_dst_alloc(struct net_device *dev,
- unsigned int flags, u16 type,
- bool nopolicy, bool noxfrm, bool will_cache);
+ unsigned int flags, u16 type, bool noxfrm);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
struct in_ifaddr;
@@ -282,41 +297,40 @@ static inline char rt_tos2priority(u8 tos)
* ip_route_newports() calls.
*/
-static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 src,
- u32 tos, int oif, u8 protocol,
+static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
+ __be32 src, int oif, u8 protocol,
__be16 sport, __be16 dport,
- struct sock *sk)
+ const struct sock *sk)
{
__u8 flow_flags = 0;
if (inet_sk(sk)->transparent)
flow_flags |= FLOWI_FLAG_ANYSRC;
- flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
- protocol, flow_flags, dst, src, dport, sport,
- sk->sk_uid);
+ flowi4_init_output(fl4, oif, sk->sk_mark, ip_sock_rt_tos(sk),
+ ip_sock_rt_scope(sk), protocol, flow_flags, dst,
+ src, dport, sport, sk->sk_uid);
}
-static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
- __be32 dst, __be32 src, u32 tos,
- int oif, u8 protocol,
+static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
+ __be32 src, int oif, u8 protocol,
__be16 sport, __be16 dport,
struct sock *sk)
{
struct net *net = sock_net(sk);
struct rtable *rt;
- ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
- sport, dport, sk);
+ ip_route_connect_init(fl4, dst, src, oif, protocol, sport, dport, sk);
if (!dst || !src) {
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
return rt;
ip_rt_put(rt);
- flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
+ flowi4_update_output(fl4, oif, fl4->flowi4_tos, fl4->daddr,
+ fl4->saddr);
}
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
}
@@ -332,7 +346,7 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
flowi4_update_output(fl4, sk->sk_bound_dev_if,
RT_CONN_FLAGS(sk), fl4->daddr,
fl4->saddr);
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(sock_net(sk), fl4, sk);
}
return rt;
@@ -354,7 +368,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
struct net *net = dev_net(dst->dev);
if (hoplimit == 0)
- hoplimit = net->ipv4.sysctl_ip_default_ttl;
+ hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
return hoplimit;
}
@@ -363,7 +377,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
{
struct neighbour *neigh;
- neigh = __ipv4_neigh_lookup_noref(dev, daddr);
+ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
diff --git a/include/net/rpl.h b/include/net/rpl.h
new file mode 100644
index 000000000000..308ef0a05cae
--- /dev/null
+++ b/include/net/rpl.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RPL implementation
+ *
+ * Author:
+ * (C) 2020 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#ifndef _NET_RPL_H
+#define _NET_RPL_H
+
+#include <linux/rpl.h>
+
+#if IS_ENABLED(CONFIG_IPV6_RPL_LWTUNNEL)
+extern int rpl_init(void);
+extern void rpl_exit(void);
+#else
+static inline int rpl_init(void)
+{
+ return 0;
+}
+
+static inline void rpl_exit(void) {}
+#endif
+
+/* Worst decompression memory usage ipv6 address (16) + pad 7 */
+#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
+
+size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+ unsigned char cmpre);
+
+void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
+ const struct ipv6_rpl_sr_hdr *inhdr,
+ const struct in6_addr *daddr, unsigned char n);
+
+void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr,
+ const struct ipv6_rpl_sr_hdr *inhdr,
+ const struct in6_addr *daddr, unsigned char n);
+
+#endif /* _NET_RPL_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index e2091bb2b3a8..bf8bb3357825 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -10,9 +10,23 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
enum rtnl_link_flags {
- RTNL_FLAG_DOIT_UNLOCKED = 1,
+ RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
+ RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
};
+enum rtnl_kinds {
+ RTNL_KIND_NEW,
+ RTNL_KIND_DEL,
+ RTNL_KIND_GET,
+ RTNL_KIND_SET
+};
+#define RTNL_KIND_MASK 0x3
+
+static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype)
+{
+ return msgtype & RTNL_KIND_MASK;
+}
+
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
int rtnl_register_module(struct module *owner, int protocol, int msgtype,
@@ -33,9 +47,13 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
*
* @list: Used internally
* @kind: Identifier
+ * @netns_refund: Physical device, move to init_net on netns exit
* @maxtype: Highest device specific netlink attribute number
* @policy: Netlink policy for device specific attribute validation
* @validate: Optional validation function for netlink/changelink parameters
+ * @alloc: netdev allocation function, can be %NULL and is then used
+ * in place of alloc_netdev_mqs(), in this case @priv_size
+ * and @setup are unused. Returns a netdev or ERR_PTR().
* @priv_size: sizeof net_device private space
* @setup: net_device setup function
* @newlink: Function for configuring and registering a new device
@@ -62,8 +80,14 @@ struct rtnl_link_ops {
const char *kind;
size_t priv_size;
+ struct net_device *(*alloc)(struct nlattr *tb[],
+ const char *ifname,
+ unsigned char name_assign_type,
+ unsigned int num_tx_queues,
+ unsigned int num_rx_queues);
void (*setup)(struct net_device *dev);
+ bool netns_refund;
unsigned int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
@@ -143,10 +167,11 @@ struct rtnl_af_ops {
u32 ext_filter_mask);
int (*validate_link_af)(const struct net_device *dev,
- const struct nlattr *attr);
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
int (*set_link_af)(struct net_device *dev,
- const struct nlattr *attr);
-
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
int (*fill_stats_af)(struct sk_buff *skb,
const struct net_device *dev);
size_t (*get_stats_af_size)(const struct net_device *dev);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 151208704ed2..d5517719af4e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,8 +36,23 @@ struct qdisc_rate_table {
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_MISSED,
+ __QDISC_STATE_DRAINING,
};
+enum qdisc_state2_t {
+ /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
+ * Use qdisc_run_begin/end() or qdisc_is_running() instead.
+ */
+ __QDISC_STATE2_RUNNING,
+};
+
+#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
+#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
+
+#define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \
+ QDISC_STATE_DRAINING)
+
struct qdisc_size_table {
struct rcu_head rcu;
struct list_head list;
@@ -89,9 +104,9 @@ struct Qdisc {
struct netdev_queue *dev_queue;
struct net_rate_estimator __rcu *rate_est;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- int padded;
+ int pad;
refcount_t refcnt;
/*
@@ -99,19 +114,20 @@ struct Qdisc {
*/
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
- struct gnet_stats_basic_packed bstats;
- seqcount_t running;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
unsigned long state;
+ unsigned long state2; /* must be written under qdisc spinlock */
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock ____cacheline_aligned_in_smp;
spinlock_t seqlock;
- /* for NOLOCK qdisc, true if there are no enqueued skbs */
- bool empty;
struct rcu_head rcu;
+ netdevice_tracker dev_tracker;
+ /* private data */
+ long privdata[] ____cacheline_aligned;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
@@ -134,11 +150,20 @@ static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
return NULL;
}
+/* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
+ * root_lock section, or provide their own memory barriers -- ordering
+ * against qdisc_run_begin/end() atomic bit operations.
+ */
static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK)
return spin_is_locked(&qdisc->seqlock);
- return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
+ return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+}
+
+static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
+{
+ return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
}
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
@@ -149,32 +174,53 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc))
- return READ_ONCE(qdisc->empty);
+ return nolock_qdisc_is_empty(qdisc);
return !READ_ONCE(qdisc->q.qlen);
}
+/* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
+ * the qdisc root lock acquired.
+ */
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
- if (!spin_trylock(&qdisc->seqlock))
+ if (spin_trylock(&qdisc->seqlock))
+ return true;
+
+ /* No need to insist if the MISSED flag was already set.
+ * Note that test_and_set_bit() also gives us memory ordering
+ * guarantees wrt potential earlier enqueue() and below
+ * spin_trylock(), both of which are necessary to prevent races
+ */
+ if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
return false;
- WRITE_ONCE(qdisc->empty, false);
- } else if (qdisc_is_running(qdisc)) {
- return false;
+
+ /* Try to take the lock again to make sure that we will either
+ * grab it or the CPU that still has it will see MISSED set
+ * when testing it in qdisc_run_end()
+ */
+ return spin_trylock(&qdisc->seqlock);
}
- /* Variant of write_seqcount_begin() telling lockdep a trylock
- * was attempted.
- */
- raw_write_seqcount_begin(&qdisc->running);
- seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
- return true;
+ return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
- write_seqcount_end(&qdisc->running);
- if (qdisc->flags & TCQ_F_NOLOCK)
+ if (qdisc->flags & TCQ_F_NOLOCK) {
spin_unlock(&qdisc->seqlock);
+
+ /* spin_unlock() only has store-release semantic. The unlock
+ * and test_bit() ordering is a store-load ordering, so a full
+ * memory barrier is needed here.
+ */
+ smp_mb();
+
+ if (unlikely(test_bit(__QDISC_STATE_MISSED,
+ &qdisc->state)))
+ __netif_schedule(qdisc);
+ } else {
+ __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+ }
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
@@ -207,7 +253,8 @@ struct Qdisc_class_ops {
int (*change)(struct Qdisc *, u32, u32,
struct nlattr **, unsigned long *,
struct netlink_ext_ack *);
- int (*delete)(struct Qdisc *, unsigned long);
+ int (*delete)(struct Qdisc *, unsigned long,
+ struct netlink_ext_ack *);
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
/* Filter manipulation */
@@ -254,6 +301,8 @@ struct Qdisc_ops {
struct netlink_ext_ack *extack);
void (*attach)(struct Qdisc *sch);
int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
+ void (*change_real_num_tx)(struct Qdisc *sch,
+ unsigned int new_real_tx);
int (*dump)(struct Qdisc *, struct sk_buff *);
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
@@ -277,11 +326,6 @@ struct tcf_result {
};
const struct tcf_proto *goto_tp;
- /* used in the skb_tc_reinsert function */
- struct {
- bool ingress;
- struct gnet_stats_queue *qstats;
- };
};
};
@@ -303,7 +347,7 @@ struct tcf_proto_ops {
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
- void **, bool, bool,
+ void **, u32,
struct netlink_ext_ack *);
int (*delete)(struct tcf_proto *tp, void *arg,
bool *last, bool rtnl_held,
@@ -330,6 +374,10 @@ struct tcf_proto_ops {
int (*dump)(struct net*, struct tcf_proto*, void *,
struct sk_buff *skb, struct tcmsg*,
bool);
+ int (*terse_dump)(struct net *net,
+ struct tcf_proto *tp, void *fh,
+ struct sk_buff *skb,
+ struct tcmsg *t, bool rtnl_held);
int (*tmplt_dump)(struct sk_buff *skb,
struct net *net,
void *tmplt_priv);
@@ -407,6 +455,7 @@ struct tcf_block {
struct mutex lock;
struct list_head chain_list;
u32 index; /* block index for shared blocks */
+ u32 classid; /* which class this block belongs to */
refcount_t refcnt;
struct net *net;
struct Qdisc *q;
@@ -426,7 +475,6 @@ struct tcf_block {
struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
};
-#ifdef CONFIG_PROVE_LOCKING
static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
{
return lockdep_is_held(&chain->filter_chain_lock);
@@ -436,17 +484,6 @@ static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
{
return lockdep_is_held(&tp->lock);
}
-#else
-static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
-{
- return true;
-}
-
-static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
-{
- return true;
-}
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
#define tcf_chain_dereference(p, chain) \
rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
@@ -458,15 +495,10 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
- BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
-{
- return this_cpu_ptr(q->cpu_qstats)->qlen;
-}
-
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
@@ -514,25 +546,6 @@ static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
return qdisc->dev_queue->qdisc_sleeping;
}
-/* The qdisc root lock is a mechanism by which to top level
- * of a qdisc tree can be locked from any qdisc node in the
- * forest. This allows changing the configuration of some
- * aspect of the qdisc tree while blocking out asynchronous
- * qdisc access in the packet processing paths.
- *
- * It is only legal to do this when the root will not change
- * on us. Otherwise we'll potentially lock the wrong qdisc
- * root. This is enforced by holding the RTNL semaphore, which
- * all users of this lock accessor must do.
- */
-static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
-{
- struct Qdisc *root = qdisc_root(qdisc);
-
- ASSERT_RTNL();
- return qdisc_lock(root);
-}
-
static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root_sleeping(qdisc);
@@ -541,27 +554,25 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
-{
- struct Qdisc *root = qdisc_root_sleeping(qdisc);
-
- ASSERT_RTNL();
- return &root->running;
-}
-
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
}
-static inline void sch_tree_lock(const struct Qdisc *q)
+static inline void sch_tree_lock(struct Qdisc *q)
{
- spin_lock_bh(qdisc_root_sleeping_lock(q));
+ if (q->flags & TCQ_F_MQROOT)
+ spin_lock_bh(qdisc_lock(q));
+ else
+ spin_lock_bh(qdisc_root_sleeping_lock(q));
}
-static inline void sch_tree_unlock(const struct Qdisc *q)
+static inline void sch_tree_unlock(struct Qdisc *q)
{
- spin_unlock_bh(qdisc_root_sleeping_lock(q));
+ if (q->flags & TCQ_F_MQROOT)
+ spin_unlock_bh(qdisc_lock(q));
+ else
+ spin_unlock_bh(qdisc_root_sleeping_lock(q));
}
extern struct Qdisc noop_qdisc;
@@ -629,6 +640,8 @@ void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
int dev_qdisc_change_tx_queue_len(struct net_device *dev);
+void dev_qdisc_change_real_num_tx(struct net_device *dev,
+ unsigned int new_real_tx);
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
@@ -664,6 +677,9 @@ qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
{
}
#endif
+void qdisc_offload_query_caps(struct net_device *dev,
+ enum tc_setup_type type,
+ void *caps, size_t caps_len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
@@ -675,22 +691,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
int skb_do_redirect(struct sk_buff *);
-static inline void skb_reset_tc(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
- skb->tc_redirected = 0;
-#endif
-}
-
-static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
- return skb->tc_redirected;
-#else
- return false;
-#endif
-}
-
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
@@ -726,11 +726,6 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
}
}
-static inline void qdisc_reset_all_tx(struct net_device *dev)
-{
- qdisc_reset_all_tx_gt(dev, 0);
-}
-
/* Are all TX queues of the device empty? */
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
@@ -811,14 +806,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return sch->enqueue(skb, sch, to_free);
}
-static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
__u64 bytes, __u32 packets)
{
- bstats->bytes += bytes;
- bstats->packets += packets;
+ u64_stats_update_begin(&bstats->syncp);
+ u64_stats_add(&bstats->bytes, bytes);
+ u64_stats_add(&bstats->packets, packets);
+ u64_stats_update_end(&bstats->syncp);
}
-static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
_bstats_update(bstats,
@@ -826,26 +823,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
}
-static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- __u64 bytes, __u32 packets)
-{
- u64_stats_update_begin(&bstats->syncp);
- _bstats_update(&bstats->bstats, bytes, packets);
- u64_stats_update_end(&bstats->syncp);
-}
-
-static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- const struct sk_buff *skb)
-{
- u64_stats_update_begin(&bstats->syncp);
- bstats_update(&bstats->bstats, skb);
- u64_stats_update_end(&bstats->syncp);
-}
-
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
}
static inline void qdisc_bstats_update(struct Qdisc *sch,
@@ -934,10 +915,9 @@ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
__u32 *backlog)
{
struct gnet_stats_queue qstats = { 0 };
- __u32 len = qdisc_qlen_sum(sch);
- __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
- *qlen = qstats.qlen;
+ gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
+ *qlen = qstats.qlen + qdisc_qlen(sch);
*backlog = qstats.backlog;
}
@@ -958,13 +938,6 @@ static inline void qdisc_purge_queue(struct Qdisc *sch)
qdisc_tree_reduce_backlog(sch, qlen, backlog);
}
-static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
-{
- qh->head = NULL;
- qh->tail = NULL;
- qh->qlen = 0;
-}
-
static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
struct qdisc_skb_head *qh)
{
@@ -1062,12 +1035,6 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
return 0;
}
-static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- return __qdisc_queue_drop_head(sch, &sch->q, to_free);
-}
-
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
const struct qdisc_skb_head *qh = &sch->q;
@@ -1161,7 +1128,6 @@ static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
static inline void qdisc_reset_queue(struct Qdisc *sch)
{
__qdisc_reset_queue(&sch->q);
- sch->qstats.backlog = 0;
}
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
@@ -1173,7 +1139,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
old = *pold;
*pold = new;
if (old != NULL)
- qdisc_tree_flush_backlog(old);
+ qdisc_purge_queue(old);
sch_tree_unlock(sch);
return old;
@@ -1230,6 +1196,7 @@ struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
u16 overhead;
+ u16 mpu;
u8 linklayer;
u8 shift;
};
@@ -1239,6 +1206,9 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
{
len += r->overhead;
+ if (len < r->mpu)
+ len = r->mpu;
+
if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
@@ -1261,23 +1231,39 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
res->overhead = r->overhead;
+ res->mpu = r->mpu;
res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
+struct psched_pktrate {
+ u64 rate_pkts_ps; /* packets per second */
+ u32 mult;
+ u8 shift;
+};
+
+static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
+ unsigned int pkt_num)
+{
+ return ((u64)pkt_num * r->mult) >> r->shift;
+}
+
+void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
+
/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
* The fast path only needs to access filter list and to update stats
*/
struct mini_Qdisc {
struct tcf_proto *filter_list;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct tcf_block *block;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct rcu_head rcu;
+ unsigned long rcu_state;
};
static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
}
static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
@@ -1295,10 +1281,11 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
struct tcf_proto *tp_head);
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
+ struct tcf_block *block);
-static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
-{
- return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
-}
+void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
+
+int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
#endif
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index e8df72e1627a..2058fabffbf6 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -59,6 +59,7 @@ enum sctp_verb {
SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */
SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */
+ SCTP_CMD_PROBE_TIMER_UPDATE, /* Update a probe timer. */
SCTP_CMD_TRANSPORT_HB_SENT, /* Reset the status of a transport. */
SCTP_CMD_TRANSPORT_IDLE, /* Do manipulations on idle transport */
SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */
@@ -68,7 +69,6 @@ enum sctp_verb {
SCTP_CMD_ASSOC_FAILED, /* Handle association failure. */
SCTP_CMD_DISCARD_PACKET, /* Discard the whole packet. */
SCTP_CMD_GEN_SHUTDOWN, /* Generate a SHUTDOWN chunk. */
- SCTP_CMD_UPDATE_ASSOC, /* Update association information. */
SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 15b4d9aec7ff..5859e0a16a58 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -77,6 +77,7 @@ enum sctp_event_timeout {
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
SCTP_EVENT_TIMEOUT_HEARTBEAT,
SCTP_EVENT_TIMEOUT_RECONF,
+ SCTP_EVENT_TIMEOUT_PROBE,
SCTP_EVENT_TIMEOUT_SACK,
SCTP_EVENT_TIMEOUT_AUTOCLOSE,
};
@@ -200,6 +201,23 @@ enum sctp_sock_state {
SCTP_SS_CLOSING = TCP_CLOSE_WAIT,
};
+enum sctp_plpmtud_state {
+ SCTP_PL_DISABLED,
+ SCTP_PL_BASE,
+ SCTP_PL_SEARCH,
+ SCTP_PL_COMPLETE,
+ SCTP_PL_ERROR,
+};
+
+#define SCTP_BASE_PLPMTU 1200
+#define SCTP_MAX_PLPMTU 9000
+#define SCTP_MIN_PLPMTU 512
+
+#define SCTP_MAX_PROBES 3
+
+#define SCTP_PL_BIG_STEP 32
+#define SCTP_PL_MIN_STEP 4
+
/* These functions map various type to printable names. */
const char *sctp_cname(const union sctp_subtype id); /* chunk types */
const char *sctp_oname(const union sctp_subtype id); /* other events */
@@ -286,6 +304,8 @@ enum { SCTP_MAX_GABS = 16 };
* functions simpler to write.
*/
+#define SCTP_DEFAULT_UDP_PORT 9899 /* default UDP tunneling port */
+
/* These are the values for pf exposure, UNUSED is to keep compatible with old
* applications by default.
*/
@@ -340,8 +360,7 @@ enum {
#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
- * 192.88.99.0/24.
+ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
* Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
* addresses.
*/
@@ -349,15 +368,16 @@ enum {
((htonl(INADDR_BROADCAST) == a) || \
ipv4_is_multicast(a) || \
ipv4_is_zeronet(a) || \
- ipv4_is_test_198(a) || \
ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */
-#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by
+#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by
local sock family */
-#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by
+#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by
+ local sock family */
+#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by
peer */
-#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by
+#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by
peer */
/* Reasons to retransmit. */
@@ -420,4 +440,6 @@ enum {
*/
#define SCTP_AUTH_RANDOM_LENGTH 32
+#define SCTP_PROBE_TIMER_MIN 5000
+
#endif /* __sctp_constants_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 3ab5c6bbb90b..a04999ee99b0 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -84,6 +84,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *addr,
struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
int sctp_register_pf(struct sctp_pf *, sa_family_t);
void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
+int sctp_udp_sock_start(struct net *net);
+void sctp_udp_sock_stop(struct net *net);
/*
* sctp/socket.c
@@ -101,21 +103,20 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
-struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
+struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int *);
+typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
void sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_next(struct net *net,
struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_idx(struct net *net,
struct rhashtable_iter *iter, int pos);
-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
- struct net *net,
+int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p);
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
- int (*cb_done)(struct sctp_transport *, void *),
- struct net *net, int *pos, void *p);
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p);
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
struct sctp_info *info);
@@ -143,6 +144,8 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
struct sctphdr *, struct sctp_association **,
struct sctp_transport **);
void sctp_err_finish(struct sock *, struct sctp_transport *);
+int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb);
+int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb);
void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
struct sctp_transport *t, __u32 pmtu);
void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
@@ -291,7 +294,7 @@ atomic_dec(&sctp_dbg_objcnt_## name)
#define SCTP_DBG_OBJCNT(name) \
atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
-/* Macro to help create new entries in in the global array of
+/* Macro to help create new entries in the global array of
* objcnt counters.
*/
#define SCTP_DBG_OBJCNT_ENTRY(name) \
@@ -412,7 +415,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
/* Tests if the list has one and only one entry. */
static inline int sctp_list_single_entry(struct list_head *head)
{
- return (head->next != head) && (head->next == head->prev);
+ return list_is_singular(head);
}
static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
@@ -506,8 +509,8 @@ static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
}
-#define sctp_for_each_hentry(epb, head) \
- hlist_for_each_entry(epb, head, node)
+#define sctp_for_each_hentry(ep, head) \
+ hlist_for_each_entry(ep, head, node)
/* Is a socket of this style? */
#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
@@ -571,15 +574,19 @@ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *
/* Calculate max payload size given a MTU, or the total overhead if
* given MTU is zero
*/
-static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
- __u32 mtu, __u32 extra)
+static inline __u32 __sctp_mtu_payload(const struct sctp_sock *sp,
+ const struct sctp_transport *t,
+ __u32 mtu, __u32 extra)
{
__u32 overhead = sizeof(struct sctphdr) + extra;
- if (sp)
+ if (sp) {
overhead += sp->pf->af->net_header_len;
- else
+ if (sp->udp_port && (!t || t->encap_port))
+ overhead += sizeof(struct udphdr);
+ } else {
overhead += sizeof(struct ipv6hdr);
+ }
if (WARN_ON_ONCE(mtu && mtu <= overhead))
mtu = overhead;
@@ -587,6 +594,12 @@ static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
return mtu ? mtu - overhead : overhead;
}
+static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
+ __u32 mtu, __u32 extra)
+{
+ return __sctp_mtu_payload(sp, NULL, mtu, extra);
+}
+
static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
{
return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
@@ -610,9 +623,57 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
}
+static inline int sctp_transport_pl_hlen(struct sctp_transport *t)
+{
+ return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0) -
+ sizeof(struct sctphdr);
+}
+
+static inline void sctp_transport_pl_reset(struct sctp_transport *t)
+{
+ if (t->probe_interval && (t->param_flags & SPP_PMTUD_ENABLE) &&
+ (t->state == SCTP_ACTIVE || t->state == SCTP_UNKNOWN)) {
+ if (t->pl.state == SCTP_PL_DISABLED) {
+ t->pl.state = SCTP_PL_BASE;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ sctp_transport_reset_probe_timer(t);
+ }
+ } else {
+ if (t->pl.state != SCTP_PL_DISABLED) {
+ if (del_timer(&t->probe_timer))
+ sctp_transport_put(t);
+ t->pl.state = SCTP_PL_DISABLED;
+ }
+ }
+}
+
+static inline void sctp_transport_pl_update(struct sctp_transport *t)
+{
+ if (t->pl.state == SCTP_PL_DISABLED)
+ return;
+
+ t->pl.state = SCTP_PL_BASE;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ sctp_transport_reset_probe_timer(t);
+}
+
+static inline bool sctp_transport_pl_enabled(struct sctp_transport *t)
+{
+ return t->pl.state != SCTP_PL_DISABLED;
+}
+
static inline bool sctp_newsk_ready(const struct sock *sk)
{
return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
}
+static inline void sctp_sock_set_nodelay(struct sock *sk)
+{
+ lock_sock(sk);
+ sctp_sk(sk)->nodelay = true;
+ release_sock(sk);
+}
+
#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 5c491a3bc27e..f37c7a558d6d 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -151,6 +151,7 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort;
/* Prototypes for timeout event state functions. */
sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
sctp_state_fn_t sctp_sf_send_reconf;
+sctp_state_fn_t sctp_sf_send_probe;
sctp_state_fn_t sctp_sf_do_6_2_sack;
sctp_state_fn_t sctp_sf_autoclose_timer_expire;
@@ -221,12 +222,17 @@ struct sctp_chunk *sctp_make_violation_paramlen(
struct sctp_chunk *sctp_make_violation_max_retrans(
const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_new_encap_port(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
- const struct sctp_transport *transport);
+ const struct sctp_transport *transport,
+ __u32 probe_size);
struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
const void *payload,
const size_t paylen);
+struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len);
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
__be16 cause_code, const void *payload,
@@ -307,6 +313,7 @@ int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
void sctp_generate_t3_rtx_event(struct timer_list *t);
void sctp_generate_heartbeat_event(struct timer_list *t);
void sctp_generate_reconf_event(struct timer_list *t);
+void sctp_generate_probe_event(struct timer_list *t);
void sctp_generate_proto_unreach_event(struct timer_list *t);
void sctp_ootb_pkt_free(struct sctp_packet *packet);
@@ -377,10 +384,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
* Verification Tag value does not match the receiver's own
* tag value, the receiver shall silently discard the packet...
*/
- if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
- return 1;
+ if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
+ return 0;
- return 0;
+ chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
+ return 1;
}
/* Check VTAG of the packet matches the sender's own tag and the T bit is
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 314a2fa21d6b..350f250b0dc7 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -177,6 +177,10 @@ struct sctp_sock {
* will be inherited by all new associations.
*/
__u32 hbinterval;
+ __u32 probe_interval;
+
+ __be16 udp_port;
+ __be16 encap_port;
/* This is the max_retrans value for new associations. */
__u16 pathmaxrxt;
@@ -226,12 +230,14 @@ struct sctp_sock {
data_ready_signalled:1;
atomic_t pd_mode;
+
+ /* Fields after this point will be skipped on copies, like on accept
+ * and peeloff operations
+ */
+
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
- /* These must be the last fields, as they will skipped on copies,
- * like on accept and peeloff operations
- */
struct list_head auto_asconf_list;
int do_auto_asconf;
};
@@ -326,7 +332,7 @@ struct sctp_cookie {
* the association TCB is re-constructed from the cookie.
*/
__u32 raw_addr_list_len;
- struct sctp_init_chunk peer_init[0];
+ struct sctp_init_chunk peer_init[];
};
@@ -380,6 +386,7 @@ struct sctp_sender_hb_info {
union sctp_addr daddr;
unsigned long sent_at;
__u64 hb_nonce;
+ __u32 probe_size;
};
int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
@@ -431,23 +438,13 @@ struct sctp_af {
int (*setsockopt) (struct sock *sk,
int level,
int optname,
- char __user *optval,
+ sockptr_t optval,
unsigned int optlen);
int (*getsockopt) (struct sock *sk,
int level,
int optname,
char __user *optval,
int __user *optlen);
- int (*compat_setsockopt) (struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- unsigned int optlen);
- int (*compat_getsockopt) (struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- int __user *optlen);
void (*get_dst) (struct sctp_transport *t,
union sctp_addr *saddr,
struct flowi *fl,
@@ -466,7 +463,7 @@ struct sctp_af {
int saddr);
void (*from_sk) (union sctp_addr *,
struct sock *sk);
- void (*from_addr_param) (union sctp_addr *,
+ bool (*from_addr_param) (union sctp_addr *,
union sctp_addr_param *,
__be16 port, int iif);
int (*to_addr_param) (const union sctp_addr *,
@@ -661,6 +658,7 @@ struct sctp_chunk {
data_accepted:1, /* At least 1 chunk accepted */
auth:1, /* IN: was auth'ed | OUT: needs auth */
has_asconf:1, /* IN: have seen an asconf before */
+ pmtu_probe:1, /* Used by PLPMTUD, can be set in s HB chunk */
tsn_missing_report:2, /* Data chunk missing counter. */
fast_retransmit:2; /* Is this chunk fast retransmitted? */
};
@@ -863,6 +861,7 @@ struct sctp_transport {
* the destination address every heartbeat interval.
*/
unsigned long hbinterval;
+ unsigned long probe_interval;
/* SACK delay timeout */
unsigned long sackdelay;
@@ -885,6 +884,8 @@ struct sctp_transport {
*/
unsigned long last_time_ecne_reduced;
+ __be16 encap_port;
+
/* This is the max_retrans value for the transport and will
* be initialized from the assocs value. This can be changed
* using the SCTP_SET_PEER_ADDR_PARAMS socket option.
@@ -937,6 +938,9 @@ struct sctp_transport {
/* Timer to handler reconf chunk rtx */
struct timer_list reconf_timer;
+ /* Timer to send a probe HB packet for PLPMTUD */
+ struct timer_list probe_timer;
+
/* Since we're using per-destination retransmission timers
* (see above), we're also using per-destination "transmitted"
* queues. This probably ought to be a private struct
@@ -979,6 +983,14 @@ struct sctp_transport {
char cacc_saw_newack;
} cacc;
+ struct {
+ __u16 pmtu;
+ __u16 probe_size;
+ __u16 probe_high;
+ __u8 probe_count;
+ __u8 state;
+ } pl; /* plpmtud related */
+
/* 64-bit random number sent with heartbeat. */
__u64 hb_nonce;
@@ -996,6 +1008,8 @@ void sctp_transport_free(struct sctp_transport *);
void sctp_transport_reset_t3_rtx(struct sctp_transport *);
void sctp_transport_reset_hb_timer(struct sctp_transport *);
void sctp_transport_reset_reconf_timer(struct sctp_transport *transport);
+void sctp_transport_reset_probe_timer(struct sctp_transport *transport);
+void sctp_transport_reset_raise_timer(struct sctp_transport *transport);
int sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -1010,6 +1024,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);
+void sctp_transport_pl_send(struct sctp_transport *t);
+bool sctp_transport_pl_recv(struct sctp_transport *t);
/* This is the structure we use to queue packets as they come into
@@ -1125,13 +1141,14 @@ static inline void sctp_outq_cork(struct sctp_outq *q)
*/
struct sctp_input_cb {
union {
- struct inet_skb_parm h4;
+ struct inet_skb_parm h4;
#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_skb_parm h6;
+ struct inet6_skb_parm h6;
#endif
} header;
struct sctp_chunk *chunk;
struct sctp_af *af;
+ __be16 encap_port;
};
#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
@@ -1226,10 +1243,6 @@ enum sctp_endpoint_type {
*/
struct sctp_ep_common {
- /* Fields to help us manage our entries in the hash tables. */
- struct hlist_node node;
- int hashent;
-
/* Runtime type information. What kind of endpoint is this? */
enum sctp_endpoint_type type;
@@ -1281,6 +1294,10 @@ struct sctp_endpoint {
/* Common substructure for endpoint and association. */
struct sctp_ep_common base;
+ /* Fields to help us manage our entries in the hash tables. */
+ struct hlist_node node;
+ int hashent;
+
/* Associations: A list of current associations and mappings
* to the data consumers for each association. This
* may be in the form of a hash table or other
@@ -1337,16 +1354,7 @@ struct sctp_endpoint {
reconf_enable:1;
__u8 strreset_enable;
-
- /* Security identifiers from incoming (INIT). These are set by
- * security_sctp_assoc_request(). These will only be used by
- * SCTP TCP type sockets and peeled off connections as they
- * cause a new socket to be generated. security_sctp_sk_clone()
- * will then plug these into the new socket.
- */
-
- u32 secid;
- u32 peer_secid;
+ struct rcu_head rcu;
};
/* Recover the outter endpoint structure. */
@@ -1362,7 +1370,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
void sctp_endpoint_free(struct sctp_endpoint *);
void sctp_endpoint_put(struct sctp_endpoint *);
-void sctp_endpoint_hold(struct sctp_endpoint *);
+int sctp_endpoint_hold(struct sctp_endpoint *ep);
void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
struct sctp_association *sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep,
@@ -1398,7 +1406,7 @@ struct sctp_stream_priorities {
struct list_head prio_sched;
/* List of streams scheduled */
struct list_head active;
- /* The next stream stream in line */
+ /* The next stream in line */
struct sctp_stream_out_ext *next;
__u16 prio;
};
@@ -1460,7 +1468,7 @@ struct sctp_stream {
struct {
/* List of streams scheduled */
struct list_head rr_list;
- /* The next stream stream in line */
+ /* The next stream in line */
struct sctp_stream_out_ext *rr_next;
};
};
@@ -1770,7 +1778,7 @@ struct sctp_association {
int max_burst;
/* This is the max_retrans value for the association. This value will
- * be initialized initialized from system defaults, but can be
+ * be initialized from system defaults, but can be
* modified by the SCTP_ASSOCINFO socket option.
*/
int max_retrans;
@@ -1797,6 +1805,9 @@ struct sctp_association {
* will be inherited by all new transports.
*/
unsigned long hbinterval;
+ unsigned long probe_interval;
+
+ __be16 encap_port;
/* This is the max_retrans value for new transports in the
* association.
@@ -2083,6 +2094,16 @@ struct sctp_association {
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+ /* Security identifiers from incoming (INIT). These are set by
+ * security_sctp_assoc_request(). These will only be used by
+ * SCTP TCP type sockets and peeled off connections as they
+ * cause a new socket to be generated. security_sctp_sk_clone()
+ * will then plug these into the new socket.
+ */
+
+ u32 secid;
+ u32 peer_secid;
+
struct rcu_head rcu;
};
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 0b032b92da0b..994e984eef32 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -80,7 +80,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
struct sctp_chunk *chunk,
gfp_t gfp);
-void sctp_ulpevent_nofity_peer_addr_change(struct sctp_transport *transport,
+void sctp_ulpevent_notify_peer_addr_change(struct sctp_transport *transport,
int state, int error);
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index d7d2495f83c2..21e7fa2a1813 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -4,8 +4,10 @@
#include <linux/types.h>
-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+struct net;
+
+u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
diff --git a/include/net/seg6.h b/include/net/seg6.h
index 640724b35273..af668f17b398 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -57,10 +57,31 @@ extern void seg6_iptunnel_exit(void);
extern int seg6_local_init(void);
extern void seg6_local_exit(void);
-extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len);
+extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
+extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
+extern void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt);
extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
int proto);
extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
u32 tbl_id);
+
+/* If the packet which invoked an ICMP error contains an SRH return
+ * the true destination address from within the SRH, otherwise use the
+ * destination address in the IP header.
+ */
+static inline const struct in6_addr *seg6_get_daddr(struct sk_buff *skb,
+ struct inet6_skb_parm *opt)
+{
+ struct ipv6_sr_hdr *srh;
+
+ if (opt->flags & IP6SKB_SEG6) {
+ srh = (struct ipv6_sr_hdr *)(skb->data + opt->srhoff);
+ return &srh->segments[0];
+ }
+
+ return NULL;
+}
+
+
#endif
diff --git a/include/net/selftests.h b/include/net/selftests.h
new file mode 100644
index 000000000000..e65e8d230d33
--- /dev/null
+++ b/include/net/selftests.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_SELFTESTS
+#define _NET_SELFTESTS
+
+#include <linux/ethtool.h>
+
+#if IS_ENABLED(CONFIG_NET_SELFTESTS)
+
+void net_selftest(struct net_device *ndev, struct ethtool_test *etest,
+ u64 *buf);
+int net_selftest_get_count(void);
+void net_selftest_get_strings(u8 *data);
+
+#else
+
+static inline void net_selftest(struct net_device *ndev, struct ethtool_test *etest,
+ u64 *buf)
+{
+}
+
+static inline int net_selftest_get_count(void)
+{
+ return 0;
+}
+
+static inline void net_selftest_get_strings(u8 *data)
+{
+}
+
+#endif
+#endif /* _NET_SELFTESTS */
diff --git a/include/net/smc.h b/include/net/smc.h
index 646feb4bc75f..c926d3313e05 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -11,6 +11,13 @@
#ifndef _SMC_H
#define _SMC_H
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct sock;
+
#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */
struct smc_hashinfo {
@@ -37,6 +44,8 @@ struct smcd_dmb {
#define ISM_EVENT_GID 1
#define ISM_EVENT_SWR 2
+#define ISM_RESERVED_VLANID 0x1FFF
+
#define ISM_ERROR 0xFFFF
struct smcd_event {
@@ -63,6 +72,8 @@ struct smcd_ops {
int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
bool sf, unsigned int offset, void *data,
unsigned int size);
+ u8* (*get_system_eid)(void);
+ u16 (*get_chid)(struct smcd_dev *dev);
};
struct smcd_dev {
@@ -90,5 +101,5 @@ int smcd_register_dev(struct smcd_dev *smcd);
void smcd_unregister_dev(struct smcd_dev *smcd);
void smcd_free_dev(struct smcd_dev *smcd);
void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
-void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit);
+void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit, u16 dmbemask);
#endif /* _SMC_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index 328564525526..5db02546941c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -56,17 +56,19 @@
#include <linux/wait.h>
#include <linux/cgroup-defs.h>
#include <linux/rbtree.h>
-#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include <linux/poll.h>
-
+#include <linux/sockptr.h>
+#include <linux/indirect_call_wrapper.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/llist.h>
#include <net/dst.h>
#include <net/checksum.h>
#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
#include <net/l3mdev.h>
+#include <uapi/linux/socket.h>
/*
* This structure really needs to be cleaned up.
@@ -159,9 +161,6 @@ typedef __u64 __bitwise __addrpair;
* for struct sock and struct inet_timewait_sock.
*/
struct sock_common {
- /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
- * address on 64bit arches : cf INET_MATCH()
- */
union {
__addrpair skc_addrpair;
struct {
@@ -225,7 +224,7 @@ struct sock_common {
struct hlist_nulls_node skc_nulls_node;
};
unsigned short skc_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
unsigned short skc_rx_queue_mapping;
#endif
union {
@@ -245,7 +244,8 @@ struct sock_common {
/* public: */
};
-struct bpf_sk_storage;
+struct bpf_local_storage;
+struct sk_filter;
/**
* struct sock - network layer representation of sockets
@@ -257,10 +257,11 @@ struct bpf_sk_storage;
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
+ * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst
+ * @sk_rx_dst_cookie: cookie for @sk_rx_dst
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
- * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@ -268,6 +269,7 @@ struct bpf_sk_storage;
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
+ * @sk_reserved_mem: space reserved and non-reclaimable for the socket
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
@@ -280,9 +282,7 @@ struct bpf_sk_storage;
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
- * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
- * @sk_route_forced_caps: static, forced route capabilities
- * (set in tcp_init_sock())
+ * @sk_gso_disabled: if set, NETIF_F_GSO_MASK is forbidden.
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
* @sk_gso_max_segs: Maximum number of GSO segments
@@ -300,20 +300,26 @@ struct bpf_sk_storage;
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_uid: user id of owner
+ * @sk_prefer_busy_poll: prefer busypolling over softirq processing
+ * @sk_busy_poll_budget: napi processing budget when busypolling
* @sk_priority: %SO_PRIORITY setting
* @sk_type: socket type (%SOCK_STREAM, etc)
* @sk_protocol: which protocol this socket belongs in this network family
+ * @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred
* @sk_peer_pid: &struct pid for this socket's peer
* @sk_peer_cred: %SO_PEERCRED setting
* @sk_rcvlowat: %SO_RCVLOWAT setting
* @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting
* @sk_txhash: computed flow hash for use on transmit
+ * @sk_txrehash: enable TX hash rethink
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
* @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
- * @sk_tsflags: SO_TIMESTAMPING socket options
+ * @sk_tsflags: SO_TIMESTAMPING flags
+ * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
+ * for timestamping
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
@@ -322,7 +328,6 @@ struct bpf_sk_storage;
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
- * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
@@ -342,6 +347,8 @@ struct bpf_sk_storage;
* @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
+ * @ns_tracker: tracker for netns reference
+ * @sk_bind2_node: bind node in the bhash2 table
*/
struct sock {
/*
@@ -353,7 +360,7 @@ struct sock {
#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_refcnt __sk_common.skc_refcnt
#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
#endif
@@ -383,11 +390,15 @@ struct sock {
#define sk_flags __sk_common.skc_flags
#define sk_rxhash __sk_common.skc_rxhash
+ /* early demux fields */
+ struct dst_entry __rcu *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
- struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@ -403,9 +414,11 @@ struct sock {
struct sk_buff *head;
struct sk_buff *tail;
} sk_backlog;
+
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
+ u32 sk_reserved_mem;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
/* ===== mostly read cache line ===== */
@@ -423,7 +436,7 @@ struct sock {
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
- struct dst_entry *sk_rx_dst;
+
struct dst_entry __rcu *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
@@ -436,7 +449,6 @@ struct sock {
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
- struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
@@ -450,8 +462,6 @@ struct sock {
unsigned long sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
- netdev_features_t sk_route_nocaps;
- netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
@@ -461,7 +471,7 @@ struct sock {
* Because of non atomicity rules, all
* changes are protected by socket lock.
*/
- u8 sk_padding : 1,
+ u8 sk_gso_disabled : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
sk_no_check_rx : 1,
@@ -478,8 +488,16 @@ struct sock {
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
+ u8 sk_txrehash;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ u8 sk_prefer_busy_poll;
+ u16 sk_busy_poll_budget;
+#endif
+ spinlock_t sk_peer_lock;
+ int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
+
long sk_rcvtimeo;
ktime_t sk_stamp;
#if BITS_PER_LONG==32
@@ -487,7 +505,7 @@ struct sock {
#endif
u16 sk_tsflags;
u8 sk_shutdown;
- u32 sk_tskey;
+ atomic_t sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
@@ -516,9 +534,11 @@ struct sock {
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
#ifdef CONFIG_BPF_SYSCALL
- struct bpf_sk_storage __rcu *sk_bpf_storage;
+ struct bpf_local_storage __rcu *sk_bpf_storage;
#endif
struct rcu_head sk_rcu;
+ netns_tracker ns_tracker;
+ struct hlist_node sk_bind2_node;
};
enum sk_pacing {
@@ -527,10 +547,109 @@ enum sk_pacing {
SK_PACING_FQ = 2,
};
+/* flag bits in sk_user_data
+ *
+ * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
+ * not be suitable for copying when cloning the socket. For instance,
+ * it can point to a reference counted object. sk_user_data bottom
+ * bit is set if pointer must not be copied.
+ *
+ * - SK_USER_DATA_BPF: Mark whether sk_user_data field is
+ * managed/owned by a BPF reuseport array. This bit should be set
+ * when sk_user_data's sk is added to the bpf's reuseport_array.
+ *
+ * - SK_USER_DATA_PSOCK: Mark whether pointer stored in
+ * sk_user_data points to psock type. This bit should be set
+ * when sk_user_data is assigned to a psock object.
+ */
+#define SK_USER_DATA_NOCOPY 1UL
+#define SK_USER_DATA_BPF 2UL
+#define SK_USER_DATA_PSOCK 4UL
+#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
+ SK_USER_DATA_PSOCK)
+
+/**
+ * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
+ * @sk: socket
+ */
+static inline bool sk_user_data_is_nocopy(const struct sock *sk)
+{
+ return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
+}
+
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
-#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
-#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
+/**
+ * __locked_read_sk_user_data_with_flags - return the pointer
+ * only if argument flags all has been set in sk_user_data. Otherwise
+ * return NULL
+ *
+ * @sk: socket
+ * @flags: flag bits
+ *
+ * The caller must be holding sk->sk_callback_lock.
+ */
+static inline void *
+__locked_read_sk_user_data_with_flags(const struct sock *sk,
+ uintptr_t flags)
+{
+ uintptr_t sk_user_data =
+ (uintptr_t)rcu_dereference_check(__sk_user_data(sk),
+ lockdep_is_held(&sk->sk_callback_lock));
+
+ WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
+
+ if ((sk_user_data & flags) == flags)
+ return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+ return NULL;
+}
+
+/**
+ * __rcu_dereference_sk_user_data_with_flags - return the pointer
+ * only if argument flags all has been set in sk_user_data. Otherwise
+ * return NULL
+ *
+ * @sk: socket
+ * @flags: flag bits
+ */
+static inline void *
+__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
+ uintptr_t flags)
+{
+ uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
+
+ WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
+
+ if ((sk_user_data & flags) == flags)
+ return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+ return NULL;
+}
+
+#define rcu_dereference_sk_user_data(sk) \
+ __rcu_dereference_sk_user_data_with_flags(sk, 0)
+#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
+({ \
+ uintptr_t __tmp1 = (uintptr_t)(ptr), \
+ __tmp2 = (uintptr_t)(flags); \
+ WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
+ WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
+ rcu_assign_pointer(__sk_user_data((sk)), \
+ __tmp1 | __tmp2); \
+})
+#define rcu_assign_sk_user_data(sk, ptr) \
+ __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
+
+static inline
+struct net *sock_net(const struct sock *sk)
+{
+ return read_pnet(&sk->sk_net);
+}
+
+static inline
+void sock_net_set(struct sock *sk, struct net *net)
+{
+ write_pnet(&sk->sk_net, net);
+}
/*
* SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
@@ -545,7 +664,7 @@ enum sk_pacing {
int sk_set_peek_off(struct sock *sk, int val);
-static inline int sk_peek_offset(struct sock *sk, int flags)
+static inline int sk_peek_offset(const struct sock *sk, int flags)
{
if (unlikely(flags & MSG_PEEK)) {
return READ_ONCE(sk->sk_peek_off);
@@ -625,11 +744,6 @@ static inline void sk_node_init(struct hlist_node *node)
node->pprev = NULL;
}
-static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
-{
- node->pprev = NULL;
-}
-
static inline void __sk_del_node(struct sock *sk)
{
__hlist_del(&sk->sk_node);
@@ -753,6 +867,16 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_add_head(&sk->sk_bind_node, list);
}
+static inline void __sk_del_bind2_node(struct sock *sk)
+{
+ __hlist_del(&sk->sk_bind2_node);
+}
+
+static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
+{
+ hlist_add_head(&sk->sk_bind2_node, list);
+}
+
#define sk_for_each(__sk, list) \
hlist_for_each_entry(__sk, list, sk_node)
#define sk_for_each_rcu(__sk, list) \
@@ -770,6 +894,8 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
+#define sk_for_each_bound_bhash2(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_bind2_node)
/**
* sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
@@ -785,7 +911,7 @@ static inline void sk_add_bind_node(struct sock *sk,
({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
pos = rcu_dereference(hlist_next_rcu(pos)))
-static inline struct user_namespace *sk_user_ns(struct sock *sk)
+static inline struct user_namespace *sk_user_ns(const struct sock *sk)
{
/* Careful only use this in a context where these parameters
* can not change and must all be valid, such as recvmsg from
@@ -810,7 +936,6 @@ enum sock_flags {
SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
- SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
SOCK_MEMALLOC, /* VM depends on this socket for swapping */
SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
SOCK_FASYNC, /* fasync() active */
@@ -827,11 +952,12 @@ enum sock_flags {
SOCK_TXTIME,
SOCK_XDP, /* XDP is attached */
SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
+ SOCK_RCVMARK, /* Receive SO_MARK ancillary data with packet */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
-static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
+static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk)
{
nsk->sk_flags = osk->sk_flags;
}
@@ -846,6 +972,15 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
__clear_bit(flag, &sk->sk_flags);
}
+static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
+ int valbool)
+{
+ if (valbool)
+ sock_set_flag(sk, bit);
+ else
+ sock_reset_flag(sk, bit);
+}
+
static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
{
return test_bit(flag, &sk->sk_flags);
@@ -857,6 +992,8 @@ static inline int sk_memalloc_socks(void)
{
return static_branch_unlikely(&memalloc_socks_key);
}
+
+void __receive_sock(struct file *file);
#else
static inline int sk_memalloc_socks(void)
@@ -864,6 +1001,8 @@ static inline int sk_memalloc_socks(void)
return 0;
}
+static inline void __receive_sock(struct file *file)
+{ }
#endif
static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
@@ -881,6 +1020,10 @@ static inline void sk_acceptq_added(struct sock *sk)
WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
}
+/* Note: If you think the test should be:
+ * return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+ * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
+ */
static inline bool sk_acceptq_is_full(const struct sock *sk)
{
return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
@@ -955,12 +1098,18 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
+
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
if (sk_memalloc_socks() && skb_pfmemalloc(skb))
return __sk_backlog_rcv(sk, skb);
- return sk->sk_backlog_rcv(sk, skb);
+ return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
+ tcp_v6_do_rcv,
+ tcp_v4_do_rcv,
+ sk, skb);
}
static inline void sk_incoming_cpu_update(struct sock *sk)
@@ -1061,6 +1210,7 @@ struct inet_hashinfo;
struct raw_hashinfo;
struct smc_hashinfo;
struct module;
+struct sk_psock;
/*
* caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
@@ -1097,36 +1247,31 @@ struct proto {
void (*destroy)(struct sock *sk);
void (*shutdown)(struct sock *sk, int how);
int (*setsockopt)(struct sock *sk, int level,
- int optname, char __user *optval,
+ int optname, sockptr_t optval,
unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level,
int optname, char __user *optval,
int __user *option);
void (*keepalive)(struct sock *sk, int valbool);
#ifdef CONFIG_COMPAT
- int (*compat_setsockopt)(struct sock *sk,
- int level,
- int optname, char __user *optval,
- unsigned int optlen);
- int (*compat_getsockopt)(struct sock *sk,
- int level,
- int optname, char __user *optval,
- int __user *option);
int (*compat_ioctl)(struct sock *sk,
unsigned int cmd, unsigned long arg);
#endif
int (*sendmsg)(struct sock *sk, struct msghdr *msg,
size_t len);
int (*recvmsg)(struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags,
- int *addr_len);
+ size_t len, int flags, int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int (*bind)(struct sock *sk,
- struct sockaddr *uaddr, int addr_len);
+ struct sockaddr *addr, int addr_len);
+ int (*bind_add)(struct sock *sk,
+ struct sockaddr *addr, int addr_len);
int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
+ bool (*bpf_bypass_getsockopt)(int level,
+ int optname);
void (*release_cb)(struct sock *sk);
@@ -1135,19 +1280,31 @@ struct proto {
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
+ void (*put_port)(struct sock *sk);
+#ifdef CONFIG_BPF_SYSCALL
+ int (*psock_update_sk_prot)(struct sock *sk,
+ struct sk_psock *psock,
+ bool restore);
+#endif
/* Keeping track of sockets in use */
#ifdef CONFIG_PROC_FS
unsigned int inuse_idx;
#endif
+#if IS_ENABLED(CONFIG_MPTCP)
+ int (*forward_alloc_get)(const struct sock *sk);
+#endif
+
bool (*stream_memory_free)(const struct sock *sk, int wake);
- bool (*stream_memory_read)(const struct sock *sk);
+ bool (*sock_is_readable)(struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
void (*leave_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
+ int __percpu *per_cpu_fw_alloc;
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
+
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
@@ -1171,7 +1328,7 @@ struct proto {
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count;
+ unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@ -1223,13 +1380,25 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
+INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
+
+static inline int sk_forward_alloc_get(const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_MPTCP)
+ if (sk->sk_prot->forward_alloc_get)
+ return sk->sk_prot->forward_alloc_get(sk);
+#endif
+ return sk->sk_forward_alloc;
+}
+
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
return false;
return sk->sk_prot->stream_memory_free ?
- sk->sk_prot->stream_memory_free(sk, wake) : true;
+ INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free,
+ tcp_stream_memory_free, sk, wake) : true;
}
static inline bool sk_stream_memory_free(const struct sock *sk)
@@ -1277,31 +1446,60 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
}
static inline long
-sk_memory_allocated(const struct sock *sk)
+proto_memory_allocated(const struct proto *prot)
{
- return atomic_long_read(sk->sk_prot->memory_allocated);
+ return max(0L, atomic_long_read(prot->memory_allocated));
}
static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+ return proto_memory_allocated(sk->sk_prot);
+}
+
+/* 1 MB per cpu, in page units */
+#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+
+static inline void
sk_memory_allocated_add(struct sock *sk, int amt)
{
- return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
+ int local_reserve;
+
+ preempt_disable();
+ local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+ if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
+ __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+ atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+ }
+ preempt_enable();
}
static inline void
sk_memory_allocated_sub(struct sock *sk, int amt)
{
- atomic_long_sub(amt, sk->sk_prot->memory_allocated);
+ int local_reserve;
+
+ preempt_disable();
+ local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+ if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
+ __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+ atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+ }
+ preempt_enable();
}
+#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
+
static inline void sk_sockets_allocated_dec(struct sock *sk)
{
- percpu_counter_dec(sk->sk_prot->sockets_allocated);
+ percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1,
+ SK_ALLOC_PERCPU_COUNTER_BATCH);
}
static inline void sk_sockets_allocated_inc(struct sock *sk)
{
- percpu_counter_inc(sk->sk_prot->sockets_allocated);
+ percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1,
+ SK_ALLOC_PERCPU_COUNTER_BATCH);
}
static inline u64
@@ -1316,12 +1514,6 @@ proto_sockets_allocated_sum_positive(struct proto *prot)
return percpu_counter_sum_positive(prot->sockets_allocated);
}
-static inline long
-proto_memory_allocated(struct proto *prot)
-{
- return atomic_long_read(prot->memory_allocated);
-}
-
static inline bool
proto_memory_pressure(struct proto *prot)
{
@@ -1332,13 +1524,32 @@ proto_memory_pressure(struct proto *prot)
#ifdef CONFIG_PROC_FS
-/* Called with local bh disabled */
-void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
+#define PROTO_INUSE_NR 64 /* should be enough for the first time */
+struct prot_inuse {
+ int all;
+ int val[PROTO_INUSE_NR];
+};
+
+static inline void sock_prot_inuse_add(const struct net *net,
+ const struct proto *prot, int val)
+{
+ this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
+}
+
+static inline void sock_inuse_add(const struct net *net, int val)
+{
+ this_cpu_add(net->core.prot_inuse->all, val);
+}
+
int sock_prot_inuse_get(struct net *net, struct proto *proto);
int sock_inuse_get(struct net *net);
#else
-static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
- int inc)
+static inline void sock_prot_inuse_add(const struct net *net,
+ const struct proto *prot, int val)
+{
+}
+
+static inline void sock_inuse_add(const struct net *net, int val)
{
}
#endif
@@ -1363,8 +1574,6 @@ static inline int __sk_prot_rehash(struct sock *sk)
#define RCV_SHUTDOWN 1
#define SEND_SHUTDOWN 2
-#define SOCK_SNDBUF_LOCK 1
-#define SOCK_RCVBUF_LOCK 2
#define SOCK_BINDADDR_LOCK 4
#define SOCK_BINDPORT_LOCK 8
@@ -1391,30 +1600,18 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind);
void __sk_mem_reduce_allocated(struct sock *sk, int amount);
void __sk_mem_reclaim(struct sock *sk, int amount);
-/* We used to have PAGE_SIZE here, but systems with 64KB pages
- * do not necessarily have 16x time more memory than 4KB ones.
- */
-#define SK_MEM_QUANTUM 4096
-#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
#define SK_MEM_SEND 0
#define SK_MEM_RECV 1
-/* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */
+/* sysctl_mem values are in pages */
static inline long sk_prot_mem_limits(const struct sock *sk, int index)
{
- long val = sk->sk_prot->sysctl_mem[index];
-
-#if PAGE_SIZE > SK_MEM_QUANTUM
- val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
-#elif PAGE_SIZE < SK_MEM_QUANTUM
- val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
-#endif
- return val;
+ return READ_ONCE(sk->sk_prot->sysctl_mem[index]);
}
static inline int sk_mem_pages(int amt)
{
- return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
+ return (amt + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
static inline bool sk_has_account(struct sock *sk)
@@ -1425,36 +1622,56 @@ static inline bool sk_has_account(struct sock *sk)
static inline bool sk_wmem_schedule(struct sock *sk, int size)
{
+ int delta;
+
if (!sk_has_account(sk))
return true;
- return size <= sk->sk_forward_alloc ||
- __sk_mem_schedule(sk, size, SK_MEM_SEND);
+ delta = size - sk->sk_forward_alloc;
+ return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND);
}
static inline bool
sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
{
+ int delta;
+
if (!sk_has_account(sk))
return true;
- return size<= sk->sk_forward_alloc ||
- __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
+ delta = size - sk->sk_forward_alloc;
+ return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
skb_pfmemalloc(skb);
}
+static inline int sk_unused_reserved_mem(const struct sock *sk)
+{
+ int unused_mem;
+
+ if (likely(!sk->sk_reserved_mem))
+ return 0;
+
+ unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+ atomic_read(&sk->sk_rmem_alloc);
+
+ return unused_mem > 0 ? unused_mem : 0;
+}
+
static inline void sk_mem_reclaim(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable >= (int)PAGE_SIZE)
+ __sk_mem_reclaim(sk, reclaimable);
}
-static inline void sk_mem_reclaim_partial(struct sock *sk)
+static inline void sk_mem_reclaim_final(struct sock *sk)
{
- if (!sk_has_account(sk))
- return;
- if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+ sk->sk_reserved_mem = 0;
+ sk_mem_reclaim(sk);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1469,42 +1686,7 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
-
- /* Avoid a possible overflow.
- * TCP send queues can make this happen, if sk_mem_reclaim()
- * is not called and more than 2 GBytes are released at once.
- *
- * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
- * no need to hold that much forward allocation anyway.
- */
- if (unlikely(sk->sk_forward_alloc >= 1 << 21))
- __sk_mem_reclaim(sk, 1 << 20);
-}
-
-DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
-static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
-{
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
- sk_wmem_queued_add(sk, -skb->truesize);
- sk_mem_uncharge(sk, skb->truesize);
- if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
- !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
- skb_ext_reset(skb);
- skb_zcopy_clear(skb, true);
- sk->sk_tx_skb_cache = skb;
- return;
- }
- __kfree_skb(skb);
-}
-
-static inline void sock_release_ownership(struct sock *sk)
-{
- if (sk->sk_lock.owned) {
- sk->sk_lock.owned = 0;
-
- /* The sk_lock has mutex_unlock() semantics: */
- mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
- }
+ sk_mem_reclaim(sk);
}
/*
@@ -1526,13 +1708,11 @@ do { \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
-#ifdef CONFIG_LOCKDEP
static inline bool lockdep_sock_is_held(const struct sock *sk)
{
return lockdep_is_held(&sk->sk_lock) ||
lockdep_is_held(&sk->sk_lock.slock);
}
-#endif
void lock_sock_nested(struct sock *sk, int subclass);
@@ -1541,6 +1721,7 @@ static inline void lock_sock(struct sock *sk)
lock_sock_nested(sk, 0);
}
+void __lock_sock(struct sock *sk);
void __release_sock(struct sock *sk);
void release_sock(struct sock *sk);
@@ -1551,7 +1732,37 @@ void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
-bool lock_sock_fast(struct sock *sk);
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+
+/**
+ * lock_sock_fast - fast version of lock_sock
+ * @sk: socket
+ *
+ * This version should be used for very small section, where process wont block
+ * return false if fast path is taken:
+ *
+ * sk_lock.slock locked, owned = 0, BH disabled
+ *
+ * return true if slow path is taken:
+ *
+ * sk_lock.slock unlocked, owned = 1, BH enabled
+ */
+static inline bool lock_sock_fast(struct sock *sk)
+{
+ /* The sk_lock has mutex_lock() semantics here. */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
+
+/* fast socket lock variant for caller already holding a [different] socket lock */
+static inline bool lock_sock_fast_nested(struct sock *sk)
+{
+ mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
+
/**
* unlock_sock_fast - complement of lock_sock_fast
* @sk: socket
@@ -1561,13 +1772,22 @@ bool lock_sock_fast(struct sock *sk);
* If slow mode is on, we call regular release_sock()
*/
static inline void unlock_sock_fast(struct sock *sk, bool slow)
+ __releases(&sk->sk_lock.slock)
{
- if (slow)
+ if (slow) {
release_sock(sk);
- else
+ __release(&sk->sk_lock.slock);
+ } else {
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
spin_unlock_bh(&sk->sk_lock.slock);
+ }
}
+void sockopt_lock_sock(struct sock *sk);
+void sockopt_release_sock(struct sock *sk);
+bool sockopt_ns_capable(struct user_namespace *ns, int cap);
+bool sockopt_capable(int cap);
+
/* Used by processes to "lock" a socket state, so that
* interrupts and bottom half handlers won't change it
* from under us. It essentially blocks any incoming
@@ -1600,12 +1820,23 @@ static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
return sk->sk_lock.owned;
}
+static inline void sock_release_ownership(struct sock *sk)
+{
+ if (sock_owned_by_user_nocheck(sk)) {
+ sk->sk_lock.owned = 0;
+
+ /* The sk_lock has mutex_unlock() semantics: */
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
+ }
+}
+
/* no reclassification while locks are held */
static inline bool sock_allow_reclassification(const struct sock *csk)
{
struct sock *sk = (struct sock *)csk;
- return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
+ return !sock_owned_by_user_nocheck(sk) &&
+ !spin_is_locked(&sk->sk_lock.slock);
}
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
@@ -1626,27 +1857,45 @@ void sock_rfree(struct sk_buff *skb);
void sock_efree(struct sk_buff *skb);
#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb);
+void sock_pfree(struct sk_buff *skb);
#else
#define sock_edemux sock_efree
#endif
+int sk_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
int sock_setsockopt(struct socket *sock, int level, int op,
- char __user *optval, unsigned int optlen);
+ sockptr_t optval, unsigned int optlen);
+int sk_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
int sock_getsockopt(struct socket *sock, int level, int op,
char __user *optval, int __user *optlen);
int sock_gettstamp(struct socket *sock, void __user *userstamp,
bool timeval, bool time32);
-struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
- int noblock, int *errcode);
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock,
int *errcode, int max_page_order);
+
+static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk,
+ unsigned long size,
+ int noblock, int *errcode)
+{
+ return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
+}
+
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
void sock_kfree_s(struct sock *sk, void *mem, int size);
void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
+static inline void sock_replace_proto(struct sock *sk, struct proto *proto)
+{
+ if (sk->sk_socket)
+ clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
+ WRITE_ONCE(sk->sk_prot, proto);
+}
+
struct sockcm_cookie {
u64 transmit_time;
u32 mark;
@@ -1676,8 +1925,6 @@ int sock_no_getname(struct socket *, struct sockaddr *, int);
int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
int sock_no_listen(struct socket *, int);
int sock_no_shutdown(struct socket *, int);
-int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
-int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
@@ -1697,11 +1944,7 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname,
int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int sock_common_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen);
-int compat_sock_common_getsockopt(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen);
-int compat_sock_common_setsockopt(struct socket *sock, int level,
- int optname, char __user *optval, unsigned int optlen);
+ sockptr_t optval, unsigned int optlen);
void sk_common_release(struct sock *sk);
@@ -1779,40 +2022,54 @@ static inline int sk_tx_queue_get(const struct sock *sk)
return -1;
}
-static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+static inline void __sk_rx_queue_set(struct sock *sk,
+ const struct sk_buff *skb,
+ bool force_set)
{
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
- return;
-
- sk->sk_rx_queue_mapping = rx_queue;
+ if (force_set ||
+ unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
}
#endif
}
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, true);
+}
+
+static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, false);
+}
+
static inline void sk_rx_queue_clear(struct sock *sk)
{
-#ifdef CONFIG_XPS
- sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+ WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
#endif
}
-#ifdef CONFIG_XPS
static inline int sk_rx_queue_get(const struct sock *sk)
{
- if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_rx_queue_mapping;
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+ if (sk) {
+ int res = READ_ONCE(sk->sk_rx_queue_mapping);
+
+ if (res != NO_QUEUE_MAPPING)
+ return res;
+ }
+#endif
return -1;
}
-#endif
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
- sk_tx_queue_clear(sk);
sk->sk_socket = sock;
}
@@ -1859,20 +2116,24 @@ static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
static inline u32 net_tx_rndhash(void)
{
- u32 v = prandom_u32();
+ u32 v = get_random_u32();
return v ?: 1;
}
static inline void sk_set_txhash(struct sock *sk)
{
- sk->sk_txhash = net_tx_rndhash();
+ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
+ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
}
-static inline void sk_rethink_txhash(struct sock *sk)
+static inline bool sk_rethink_txhash(struct sock *sk)
{
- if (sk->sk_txhash)
+ if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) {
sk_set_txhash(sk);
+ return true;
+ }
+ return false;
}
static inline struct dst_entry *
@@ -1895,12 +2156,10 @@ sk_dst_get(struct sock *sk)
return dst;
}
-static inline void dst_negative_advice(struct sock *sk)
+static inline void __dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
- sk_rethink_txhash(sk);
-
if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);
@@ -1912,6 +2171,12 @@ static inline void dst_negative_advice(struct sock *sk)
}
}
+static inline void dst_negative_advice(struct sock *sk)
+{
+ sk_rethink_txhash(sk);
+ __dst_negative_advice(sk);
+}
+
static inline void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
@@ -1962,13 +2227,10 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
{
if (skb_get_dst_pending_confirm(skb)) {
struct sock *sk = skb->sk;
- unsigned long now = jiffies;
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ neigh_confirm(n);
}
}
@@ -1981,10 +2243,10 @@ static inline bool sk_can_gso(const struct sock *sk)
void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
-static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
+static inline void sk_gso_disable(struct sock *sk)
{
- sk->sk_route_nocaps |= flags;
- sk->sk_route_caps &= ~flags;
+ sk->sk_gso_disabled = 1;
+ sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
@@ -2030,9 +2292,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
if (err)
return err;
- skb->len += copy;
- skb->data_len += copy;
- skb->truesize += copy;
+ skb_len_add(skb, copy);
sk_wmem_queued_add(sk, copy);
sk_mem_charge(sk, copy);
return 0;
@@ -2131,9 +2391,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
{
- if (sk->sk_txhash) {
+ /* This pairs with WRITE_ONCE() in sk_set_txhash() */
+ u32 txhash = READ_ONCE(sk->sk_txhash);
+
+ if (txhash) {
skb->l4_hash = 1;
- skb->hash = sk->sk_txhash;
+ skb->hash = txhash;
}
}
@@ -2156,17 +2419,46 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
+static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+{
+ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+ skb_orphan(skb);
+ skb->destructor = sock_efree;
+ skb->sk = sk;
+ return true;
+ }
+ return false;
+}
+
+static inline void skb_prepare_for_gro(struct sk_buff *skb)
+{
+ if (skb->destructor != sock_wfree) {
+ skb_orphan(skb);
+ return;
+ }
+ skb->slow_gro = 1;
+}
+
void sk_reset_timer(struct sock *sk, struct timer_list *timer,
unsigned long expires);
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
+
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb));
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+
+int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason);
+
+static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ return sock_queue_rcv_skb_reason(sk, skb, NULL);
+}
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
@@ -2178,12 +2470,19 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
static inline int sock_error(struct sock *sk)
{
int err;
- if (likely(!sk->sk_err))
+
+ /* Avoid an atomic operation for the common case.
+ * This is racy since another cpu/thread can change sk_err under us.
+ */
+ if (likely(data_race(!sk->sk_err)))
return 0;
+
err = xchg(&sk->sk_err, 0);
return -err;
}
+void sk_error_report(struct sock *sk);
+
static inline unsigned long sock_wspace(struct sock *sk)
{
int amt = 0;
@@ -2245,31 +2544,32 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+ val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule);
-
/**
* sk_page_frag - return an appropriate page_frag
* @sk: socket
*
* Use the per task page_frag instead of the per socket one for
- * optimization when we know that we're in the normal context and owns
+ * optimization when we know that we're in process context and own
* everything that's associated with %current.
*
- * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
- * inside other socket operations and end up recursing into sk_page_frag()
- * while it's already in use.
+ * Both direct reclaim and page faults can nest inside other
+ * socket operations and end up recursing into sk_page_frag()
+ * while it's already in use: explicitly avoid task page_frag
+ * usage if the caller is potentially doing any of them.
+ * This assumes that page fault handlers use the GFP_NOFS flags.
*
* Return: a per task page_frag if context allows that,
* otherwise a per socket one.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- if (gfpflags_normal_context(sk->sk_allocation))
+ if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
+ (__GFP_DIRECT_RECLAIM | __GFP_FS))
return &current->task_frag;
return &sk->sk_frag;
@@ -2290,6 +2590,11 @@ static inline gfp_t gfp_any(void)
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
+static inline gfp_t gfp_memcg_charge(void)
+{
+ return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
+}
+
static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
{
return noblock ? 0 : sk->sk_rcvtimeo;
@@ -2404,20 +2709,21 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
__sock_recv_wifi_status(msg, sk, skb);
}
-void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb);
+void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
-static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb)
+static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
{
-#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
- (1UL << SOCK_RCVTSTAMP))
+#define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL) | \
+ (1UL << SOCK_RCVTSTAMP) | \
+ (1UL << SOCK_RCVMARK))
#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
- __sock_recv_ts_and_drops(msg, sk, skb);
+ if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY)
+ __sock_recv_cmsgs(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
@@ -2442,7 +2748,7 @@ static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
__sock_tx_timestamp(tsflags, tx_flags);
if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
- *tskey = sk->sk_tskey++;
+ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
}
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
@@ -2460,7 +2766,11 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
+static inline bool sk_is_tcp(const struct sock *sk)
+{
+ return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
+}
+
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@ -2472,47 +2782,56 @@ DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
- !sk->sk_rx_skb_cache) {
- sk->sk_rx_skb_cache = skb;
- skb_orphan(skb);
- return;
- }
__kfree_skb(skb);
}
-static inline
-struct net *sock_net(const struct sock *sk)
+static inline bool
+skb_sk_is_prefetched(struct sk_buff *skb)
{
- return read_pnet(&sk->sk_net);
+#ifdef CONFIG_INET
+ return skb->destructor == sock_pfree;
+#else
+ return false;
+#endif /* CONFIG_INET */
}
-static inline
-void sock_net_set(struct sock *sk, struct net *net)
+/* This helper checks if a socket is a full socket,
+ * ie _not_ a timewait or request socket.
+ */
+static inline bool sk_fullsock(const struct sock *sk)
{
- write_pnet(&sk->sk_net, net);
+ return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
+}
+
+static inline bool
+sk_is_refcounted(struct sock *sk)
+{
+ /* Only full sockets have sk->sk_flags. */
+ return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
}
-static inline struct sock *skb_steal_sock(struct sk_buff *skb)
+/**
+ * skb_steal_sock - steal a socket from an sk_buff
+ * @skb: sk_buff to steal the socket from
+ * @refcounted: is set to true if the socket is reference-counted
+ */
+static inline struct sock *
+skb_steal_sock(struct sk_buff *skb, bool *refcounted)
{
if (skb->sk) {
struct sock *sk = skb->sk;
+ *refcounted = true;
+ if (skb_sk_is_prefetched(skb))
+ *refcounted = sk_is_refcounted(sk);
skb->destructor = NULL;
skb->sk = NULL;
return sk;
}
+ *refcounted = false;
return NULL;
}
-/* This helper checks if a socket is a full socket,
- * ie _not_ a timewait or request socket.
- */
-static inline bool sk_fullsock(const struct sock *sk)
-{
- return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
-}
-
/* Checks if this SKB belongs to an HW offloaded socket
* and whether any SW fallbacks are required based on dev.
* Check decrypted mark in case skb_orphan() cleared socket.
@@ -2575,24 +2894,25 @@ extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
+#define SKB_FRAG_PAGE_ORDER get_order(32768)
DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
{
/* Does this proto have per netns sysctl_wmem ? */
if (proto->sysctl_wmem_offset)
- return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
- return *proto->sysctl_wmem;
+ return READ_ONCE(*proto->sysctl_wmem);
}
static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
{
/* Does this proto have per netns sysctl_rmem ? */
if (proto->sysctl_rmem_offset)
- return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
- return *proto->sysctl_rmem;
+ return READ_ONCE(*proto->sysctl_rmem);
}
/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
@@ -2613,13 +2933,14 @@ static inline void sk_pacing_shift_update(struct sock *sk, int val)
*/
static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
{
+ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
int mdif;
- if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
+ if (!bound_dev_if || bound_dev_if == dif)
return true;
mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
- if (mdif && mdif == sk->sk_bound_dev_if)
+ if (mdif && mdif == bound_dev_if)
return true;
return false;
@@ -2627,4 +2948,31 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
void sock_def_readable(struct sock *sk);
+int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
+void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
+int sock_set_timestamping(struct sock *sk, int optname,
+ struct so_timestamping timestamping);
+
+void sock_enable_timestamps(struct sock *sk);
+void sock_no_linger(struct sock *sk);
+void sock_set_keepalive(struct sock *sk);
+void sock_set_priority(struct sock *sk, u32 priority);
+void sock_set_rcvbuf(struct sock *sk, int val);
+void sock_set_mark(struct sock *sk, u32 val);
+void sock_set_reuseaddr(struct sock *sk);
+void sock_set_reuseport(struct sock *sk);
+void sock_set_sndtimeo(struct sock *sk, s64 secs);
+
+int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+
+int sock_get_timeout(long timeo, void *optval, bool old_timeval);
+int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval);
+
+static inline bool sk_is_readable(struct sock *sk)
+{
+ if (sk->sk_prot->sock_is_readable)
+ return sk->sk_prot->sock_is_readable(sk);
+ return false;
+}
#endif /* _SOCK_H */
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 43f4a818d88f..efc9085c6892 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -13,8 +13,9 @@ extern spinlock_t reuseport_lock;
struct sock_reuseport {
struct rcu_head rcu;
- u16 max_socks; /* length of socks */
- u16 num_socks; /* elements in socks */
+ u16 max_socks; /* length of socks */
+ u16 num_socks; /* elements in socks */
+ u16 num_closed_socks; /* closed elements in socks */
/* The last synq overflow event timestamp of this
* reuse->socks[] group.
*/
@@ -24,37 +25,38 @@ struct sock_reuseport {
unsigned int bind_inany:1;
unsigned int has_conns:1;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
- struct sock *socks[0]; /* array of sock pointers */
+ struct sock *socks[]; /* array of sock pointers */
};
extern int reuseport_alloc(struct sock *sk, bool bind_inany);
extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
bool bind_inany);
extern void reuseport_detach_sock(struct sock *sk);
+void reuseport_stop_listen_sock(struct sock *sk);
extern struct sock *reuseport_select_sock(struct sock *sk,
u32 hash,
struct sk_buff *skb,
int hdr_len);
+struct sock *reuseport_migrate_sock(struct sock *sk,
+ struct sock *migrating_sk,
+ struct sk_buff *skb);
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
extern int reuseport_detach_prog(struct sock *sk);
-static inline bool reuseport_has_conns(struct sock *sk, bool set)
+static inline bool reuseport_has_conns(struct sock *sk)
{
struct sock_reuseport *reuse;
bool ret = false;
rcu_read_lock();
reuse = rcu_dereference(sk->sk_reuseport_cb);
- if (reuse) {
- if (set)
- reuse->has_conns = 1;
- ret = reuse->has_conns;
- }
+ if (reuse && reuse->has_conns)
+ ret = true;
rcu_read_unlock();
return ret;
}
-int reuseport_get_id(struct sock_reuseport *reuse);
+void reuseport_has_conns_set(struct sock *sk);
#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/net/stp.h b/include/net/stp.h
index 2914e6d53490..528103fce2c0 100644
--- a/include/net/stp.h
+++ b/include/net/stp.h
@@ -2,6 +2,8 @@
#ifndef _NET_STP_H
#define _NET_STP_H
+#include <linux/if_ether.h>
+
struct stp_proto {
unsigned char group_address[ETH_ALEN];
void (*rcv)(const struct stp_proto *, struct sk_buff *,
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 1d20b98493a1..41e2ce9e9e10 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -54,10 +54,35 @@ struct strp_msg {
int offset;
};
+struct _strp_msg {
+ /* Internal cb structure. struct strp_msg must be first for passing
+ * to upper layer.
+ */
+ struct strp_msg strp;
+ int accum_len;
+};
+
+struct sk_skb_cb {
+#define SK_SKB_CB_PRIV_LEN 20
+ unsigned char data[SK_SKB_CB_PRIV_LEN];
+ /* align strp on cache line boundary within skb->cb[] */
+ unsigned char pad[4];
+ struct _strp_msg strp;
+
+ /* strp users' data follows */
+ struct tls_msg {
+ u8 control;
+ } tls;
+ /* temp_reg is a temporary register used for bpf_convert_data_end_access
+ * when dst_reg == src_reg.
+ */
+ u64 temp_reg;
+};
+
static inline struct strp_msg *strp_msg(struct sk_buff *skb)
{
return (struct strp_msg *)((void *)skb->cb +
- offsetof(struct qdisc_skb_cb, data));
+ offsetof(struct sk_skb_cb, strp));
}
/* Structure for an attached lower socket */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index aee86a189432..7dcdc97c0bc3 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -16,30 +16,36 @@
#define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1)
#define SWITCHDEV_F_DEFER BIT(2)
-struct switchdev_trans {
- bool ph_prepare;
-};
-
-static inline bool switchdev_trans_ph_prepare(struct switchdev_trans *trans)
-{
- return trans && trans->ph_prepare;
-}
-
-static inline bool switchdev_trans_ph_commit(struct switchdev_trans *trans)
-{
- return trans && !trans->ph_prepare;
-}
-
enum switchdev_attr_id {
SWITCHDEV_ATTR_ID_UNDEFINED,
SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+ SWITCHDEV_ATTR_ID_PORT_MST_STATE,
SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS,
SWITCHDEV_ATTR_ID_PORT_MROUTER,
SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
+ SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
+ SWITCHDEV_ATTR_ID_BRIDGE_MST,
+ SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
+ SWITCHDEV_ATTR_ID_VLAN_MSTI,
+};
+
+struct switchdev_mst_state {
+ u16 msti;
+ u8 state;
+};
+
+struct switchdev_brport_flags {
+ unsigned long val;
+ unsigned long mask;
+};
+
+struct switchdev_vlan_msti {
+ u16 vid;
+ u16 msti;
};
struct switchdev_attr {
@@ -50,11 +56,16 @@ struct switchdev_attr {
void (*complete)(struct net_device *dev, int err, void *priv);
union {
u8 stp_state; /* PORT_STP_STATE */
- unsigned long brport_flags; /* PORT_{PRE}_BRIDGE_FLAGS */
+ struct switchdev_mst_state mst_state; /* PORT_MST_STATE */
+ struct switchdev_brport_flags brport_flags; /* PORT_BRIDGE_FLAGS */
bool mrouter; /* PORT_MROUTER */
clock_t ageing_time; /* BRIDGE_AGEING_TIME */
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
+ u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */
+ bool mst; /* BRIDGE_MST */
bool mc_disabled; /* MC_DISABLED */
+ u8 mrp_port_role; /* MRP_PORT_ROLE */
+ struct switchdev_vlan_msti vlan_msti; /* VLAN_MSTI */
} u;
};
@@ -63,9 +74,17 @@ enum switchdev_obj_id {
SWITCHDEV_OBJ_ID_PORT_VLAN,
SWITCHDEV_OBJ_ID_PORT_MDB,
SWITCHDEV_OBJ_ID_HOST_MDB,
+ SWITCHDEV_OBJ_ID_MRP,
+ SWITCHDEV_OBJ_ID_RING_TEST_MRP,
+ SWITCHDEV_OBJ_ID_RING_ROLE_MRP,
+ SWITCHDEV_OBJ_ID_RING_STATE_MRP,
+ SWITCHDEV_OBJ_ID_IN_TEST_MRP,
+ SWITCHDEV_OBJ_ID_IN_ROLE_MRP,
+ SWITCHDEV_OBJ_ID_IN_STATE_MRP,
};
struct switchdev_obj {
+ struct list_head list;
struct net_device *orig_dev;
enum switchdev_obj_id id;
u32 flags;
@@ -77,8 +96,14 @@ struct switchdev_obj {
struct switchdev_obj_port_vlan {
struct switchdev_obj obj;
u16 flags;
- u16 vid_begin;
- u16 vid_end;
+ u16 vid;
+ /* If set, the notifier signifies a change of one of the following
+ * flags for a VLAN that already exists:
+ * - BRIDGE_VLAN_INFO_PVID
+ * - BRIDGE_VLAN_INFO_UNTAGGED
+ * Entries with BRIDGE_VLAN_INFO_BRENTRY unset are not notified at all.
+ */
+ bool changed;
};
#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \
@@ -94,14 +119,105 @@ struct switchdev_obj_port_mdb {
#define SWITCHDEV_OBJ_PORT_MDB(OBJ) \
container_of((OBJ), struct switchdev_obj_port_mdb, obj)
+
+/* SWITCHDEV_OBJ_ID_MRP */
+struct switchdev_obj_mrp {
+ struct switchdev_obj obj;
+ struct net_device *p_port;
+ struct net_device *s_port;
+ u32 ring_id;
+ u16 prio;
+};
+
+#define SWITCHDEV_OBJ_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_mrp, obj)
+
+/* SWITCHDEV_OBJ_ID_RING_TEST_MRP */
+struct switchdev_obj_ring_test_mrp {
+ struct switchdev_obj obj;
+ /* The value is in us and a value of 0 represents to stop */
+ u32 interval;
+ u8 max_miss;
+ u32 ring_id;
+ u32 period;
+ bool monitor;
+};
+
+#define SWITCHDEV_OBJ_RING_TEST_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_ring_test_mrp, obj)
+
+/* SWICHDEV_OBJ_ID_RING_ROLE_MRP */
+struct switchdev_obj_ring_role_mrp {
+ struct switchdev_obj obj;
+ u8 ring_role;
+ u32 ring_id;
+ u8 sw_backup;
+};
+
+#define SWITCHDEV_OBJ_RING_ROLE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_ring_role_mrp, obj)
+
+struct switchdev_obj_ring_state_mrp {
+ struct switchdev_obj obj;
+ u8 ring_state;
+ u32 ring_id;
+};
+
+#define SWITCHDEV_OBJ_RING_STATE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_ring_state_mrp, obj)
+
+/* SWITCHDEV_OBJ_ID_IN_TEST_MRP */
+struct switchdev_obj_in_test_mrp {
+ struct switchdev_obj obj;
+ /* The value is in us and a value of 0 represents to stop */
+ u32 interval;
+ u32 in_id;
+ u32 period;
+ u8 max_miss;
+};
+
+#define SWITCHDEV_OBJ_IN_TEST_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_in_test_mrp, obj)
+
+/* SWICHDEV_OBJ_ID_IN_ROLE_MRP */
+struct switchdev_obj_in_role_mrp {
+ struct switchdev_obj obj;
+ struct net_device *i_port;
+ u32 ring_id;
+ u16 in_id;
+ u8 in_role;
+ u8 sw_backup;
+};
+
+#define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_in_role_mrp, obj)
+
+struct switchdev_obj_in_state_mrp {
+ struct switchdev_obj obj;
+ u32 in_id;
+ u8 in_state;
+};
+
+#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_in_state_mrp, obj)
+
typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
+struct switchdev_brport {
+ struct net_device *dev;
+ const void *ctx;
+ struct notifier_block *atomic_nb;
+ struct notifier_block *blocking_nb;
+ bool tx_fwd_offload;
+};
+
enum switchdev_notifier_type {
SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
SWITCHDEV_FDB_DEL_TO_BRIDGE,
SWITCHDEV_FDB_ADD_TO_DEVICE,
SWITCHDEV_FDB_DEL_TO_DEVICE,
SWITCHDEV_FDB_OFFLOADED,
+ SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
@@ -112,35 +228,46 @@ enum switchdev_notifier_type {
SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE,
SWITCHDEV_VXLAN_FDB_OFFLOADED,
+
+ SWITCHDEV_BRPORT_OFFLOADED,
+ SWITCHDEV_BRPORT_UNOFFLOADED,
};
struct switchdev_notifier_info {
struct net_device *dev;
struct netlink_ext_ack *extack;
+ const void *ctx;
};
+/* Remember to update br_switchdev_fdb_populate() when adding
+ * new members to this structure
+ */
struct switchdev_notifier_fdb_info {
struct switchdev_notifier_info info; /* must be first */
const unsigned char *addr;
u16 vid;
u8 added_by_user:1,
+ is_local:1,
offloaded:1;
};
struct switchdev_notifier_port_obj_info {
struct switchdev_notifier_info info; /* must be first */
const struct switchdev_obj *obj;
- struct switchdev_trans *trans;
bool handled;
};
struct switchdev_notifier_port_attr_info {
struct switchdev_notifier_info info; /* must be first */
const struct switchdev_attr *attr;
- struct switchdev_trans *trans;
bool handled;
};
+struct switchdev_notifier_brport_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const struct switchdev_brport brport;
+};
+
static inline struct net_device *
switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
{
@@ -153,11 +280,29 @@ switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info)
return info->extack;
}
+static inline bool
+switchdev_fdb_is_dynamically_learned(const struct switchdev_notifier_fdb_info *fdb_info)
+{
+ return !fdb_info->added_by_user && !fdb_info->is_local;
+}
+
#ifdef CONFIG_NET_SWITCHDEV
+int switchdev_bridge_port_offload(struct net_device *brport_dev,
+ struct net_device *dev, const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb,
+ bool tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
+ const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb);
+
void switchdev_deferred_process(void);
int switchdev_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr);
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack);
int switchdev_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack);
@@ -180,33 +325,76 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
+int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
+ const struct switchdev_notifier_fdb_info *fdb_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info));
+
int switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*add_cb)(struct net_device *dev,
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack));
+int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack));
int switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*del_cb)(struct net_device *dev,
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj));
+int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj));
int switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
- int (*set_cb)(struct net_device *dev,
+ int (*set_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans));
+ struct netlink_ext_ack *extack));
#else
+static inline int
+switchdev_bridge_port_offload(struct net_device *brport_dev,
+ struct net_device *dev, const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb,
+ bool tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+switchdev_bridge_port_unoffload(struct net_device *brport_dev,
+ const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb)
+{
+}
+
static inline void switchdev_deferred_process(void)
{
}
static inline int switchdev_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr)
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -264,12 +452,36 @@ call_switchdev_blocking_notifiers(unsigned long val,
}
static inline int
+switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
+ const struct switchdev_notifier_fdb_info *fdb_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info))
+{
+ return 0;
+}
+
+static inline int
switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*add_cb)(struct net_device *dev,
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack))
+{
+ return 0;
+}
+
+static inline int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack))
{
return 0;
@@ -279,7 +491,19 @@ static inline int
switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*del_cb)(struct net_device *dev,
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj))
+{
+ return 0;
+}
+
+static inline int
+switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj))
{
return 0;
@@ -289,9 +513,9 @@ static inline int
switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
- int (*set_cb)(struct net_device *dev,
+ int (*set_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans))
+ struct netlink_ext_ack *extack))
{
return 0;
}
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index bdc20ab3b88d..8250d6f0a462 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -25,6 +25,9 @@ struct tcf_ct_params {
u16 ct_action;
struct rcu_head rcu;
+
+ struct tcf_ct_flow_table *ct_ft;
+ struct nf_flowtable *nf_ft;
};
struct tcf_ct {
@@ -33,8 +36,10 @@ struct tcf_ct {
};
#define to_ct(a) ((struct tcf_ct *)a)
-#define to_ct_params(a) ((struct tcf_ct_params *) \
- rtnl_dereference((to_ct(a)->params)))
+#define to_ct_params(a) \
+ ((struct tcf_ct_params *) \
+ rcu_dereference_protected(to_ct(a)->params, \
+ lockdep_is_held(&a->tcfa_lock)))
static inline uint16_t tcf_ct_zone(const struct tc_action *a)
{
@@ -46,11 +51,36 @@ static inline int tcf_ct_action(const struct tc_action *a)
return to_ct_params(a)->ct_action;
}
+static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+{
+ return to_ct_params(a)->nf_ft;
+}
+
#else
static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
+static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+{
+ return NULL;
+}
#endif /* CONFIG_NF_CONNTRACK */
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+static inline void
+tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie)
+{
+ enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK;
+ struct nf_conn *ct;
+
+ ct = (struct nf_conn *)(cookie & NFCT_PTRMASK);
+ nf_conntrack_get(&ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
+}
+#else
+static inline void
+tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { }
+#endif
+
static inline bool is_tcf_ct(const struct tc_action *a)
{
#if defined(CONFIG_NET_CLS_ACT) && IS_ENABLED(CONFIG_NF_CONNTRACK)
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index eb8f01c819e6..832efd40e023 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -59,4 +59,19 @@ static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
}
+static inline bool is_tcf_gact_continue(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_UNSPEC, false);
+}
+
+static inline bool is_tcf_gact_reclassify(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_RECLASSIFY, false);
+}
+
+static inline bool is_tcf_gact_pipe(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_PIPE, false);
+}
+
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
new file mode 100644
index 000000000000..c8fa11ebb397
--- /dev/null
+++ b/include/net/tc_act/tc_gate.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2020 NXP */
+
+#ifndef __NET_TC_GATE_H
+#define __NET_TC_GATE_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_gate.h>
+
+struct action_gate_entry {
+ u8 gate_state;
+ u32 interval;
+ s32 ipv;
+ s32 maxoctets;
+};
+
+struct tcfg_gate_entry {
+ int index;
+ u8 gate_state;
+ u32 interval;
+ s32 ipv;
+ s32 maxoctets;
+ struct list_head list;
+};
+
+struct tcf_gate_params {
+ s32 tcfg_priority;
+ u64 tcfg_basetime;
+ u64 tcfg_cycletime;
+ u64 tcfg_cycletime_ext;
+ u32 tcfg_flags;
+ s32 tcfg_clockid;
+ size_t num_entries;
+ struct list_head entries;
+};
+
+#define GATE_ACT_GATE_OPEN BIT(0)
+#define GATE_ACT_PENDING BIT(1)
+
+struct tcf_gate {
+ struct tc_action common;
+ struct tcf_gate_params param;
+ u8 current_gate_status;
+ ktime_t current_close_time;
+ u32 current_entry_octets;
+ s32 current_max_octets;
+ struct tcfg_gate_entry *next_entry;
+ struct hrtimer hitimer;
+ enum tk_offsets tk_offset;
+};
+
+#define to_gate(a) ((struct tcf_gate *)a)
+
+static inline bool is_tcf_gate(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->id == TCA_ID_GATE)
+ return true;
+#endif
+ return false;
+}
+
+static inline s32 tcf_gate_prio(const struct tc_action *a)
+{
+ s32 tcfg_prio;
+
+ tcfg_prio = to_gate(a)->param.tcfg_priority;
+
+ return tcfg_prio;
+}
+
+static inline u64 tcf_gate_basetime(const struct tc_action *a)
+{
+ u64 tcfg_basetime;
+
+ tcfg_basetime = to_gate(a)->param.tcfg_basetime;
+
+ return tcfg_basetime;
+}
+
+static inline u64 tcf_gate_cycletime(const struct tc_action *a)
+{
+ u64 tcfg_cycletime;
+
+ tcfg_cycletime = to_gate(a)->param.tcfg_cycletime;
+
+ return tcfg_cycletime;
+}
+
+static inline u64 tcf_gate_cycletimeext(const struct tc_action *a)
+{
+ u64 tcfg_cycletimeext;
+
+ tcfg_cycletimeext = to_gate(a)->param.tcfg_cycletime_ext;
+
+ return tcfg_cycletimeext;
+}
+
+static inline u32 tcf_gate_num_entries(const struct tc_action *a)
+{
+ u32 num_entries;
+
+ num_entries = to_gate(a)->param.num_entries;
+
+ return num_entries;
+}
+
+static inline struct action_gate_entry
+ *tcf_gate_get_list(const struct tc_action *a)
+{
+ struct action_gate_entry *oe;
+ struct tcf_gate_params *p;
+ struct tcfg_gate_entry *entry;
+ u32 num_entries;
+ int i = 0;
+
+ p = &to_gate(a)->param;
+ num_entries = p->num_entries;
+
+ list_for_each_entry(entry, &p->entries, list)
+ i++;
+
+ if (i != num_entries)
+ return NULL;
+
+ oe = kcalloc(num_entries, sizeof(*oe), GFP_ATOMIC);
+ if (!oe)
+ return NULL;
+
+ i = 0;
+ list_for_each_entry(entry, &p->entries, list) {
+ oe[i].gate_state = entry->gate_state;
+ oe[i].interval = entry->interval;
+ oe[i].ipv = entry->ipv;
+ oe[i].maxoctets = entry->maxoctets;
+ i++;
+ }
+
+ return oe;
+}
+#endif
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 1cace4c69e44..32ce8ea36950 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -10,6 +10,7 @@ struct tcf_mirred {
int tcfm_eaction;
bool tcfm_mac_header_xmit;
struct net_device __rcu *tcfm_dev;
+ netdevice_tracker tcfm_dev_tracker;
struct list_head tcfm_list;
};
#define to_mirred(a) ((struct tcf_mirred *)a)
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 748cf87a4d7e..3e02709a1df6 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -14,6 +14,7 @@ struct tcf_pedit {
struct tc_action common;
unsigned char tcfp_nkeys;
unsigned char tcfp_flags;
+ u32 tcfp_off_max_hint;
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
};
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index cfdc7cb82cad..283bde711a42 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -10,10 +10,13 @@ struct tcf_police_params {
s64 tcfp_burst;
u32 tcfp_mtu;
s64 tcfp_mtu_ptoks;
+ s64 tcfp_pkt_burst;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
+ struct psched_pktrate ppsrate;
+ bool pps_present;
struct rcu_head rcu;
};
@@ -24,6 +27,7 @@ struct tcf_police {
spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
s64 tcfp_toks;
s64 tcfp_ptoks;
+ s64 tcfp_pkttoks;
s64 tcfp_t_c;
};
@@ -54,17 +58,135 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
- params = rcu_dereference_bh_rtnl(police->params);
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
return params->rate.rate_bytes_ps;
}
-static inline s64 tcf_police_tcfp_burst(const struct tc_action *act)
+static inline u32 tcf_police_burst(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
+ u32 burst;
- params = rcu_dereference_bh_rtnl(police->params);
- return params->tcfp_burst;
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+
+ /*
+ * "rate" bytes "burst" nanoseconds
+ * ------------ * -------------------
+ * 1 second 2^6 ticks
+ *
+ * ------------------------------------
+ * NSEC_PER_SEC nanoseconds
+ * ------------------------
+ * 2^6 ticks
+ *
+ * "rate" bytes "burst" nanoseconds 2^6 ticks
+ * = ------------ * ------------------- * ------------------------
+ * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds
+ *
+ * "rate" * "burst"
+ * = ---------------- bytes/nanosecond
+ * NSEC_PER_SEC^2
+ *
+ *
+ * "rate" * "burst"
+ * = ---------------- bytes/second
+ * NSEC_PER_SEC
+ */
+ burst = div_u64(params->tcfp_burst * params->rate.rate_bytes_ps,
+ NSEC_PER_SEC);
+
+ return burst;
+}
+
+static inline u64 tcf_police_rate_pkt_ps(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->ppsrate.rate_pkts_ps;
+}
+
+static inline u32 tcf_police_burst_pkt(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+ u32 burst;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+
+ /*
+ * "rate" pkts "burst" nanoseconds
+ * ------------ * -------------------
+ * 1 second 2^6 ticks
+ *
+ * ------------------------------------
+ * NSEC_PER_SEC nanoseconds
+ * ------------------------
+ * 2^6 ticks
+ *
+ * "rate" pkts "burst" nanoseconds 2^6 ticks
+ * = ------------ * ------------------- * ------------------------
+ * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds
+ *
+ * "rate" * "burst"
+ * = ---------------- pkts/nanosecond
+ * NSEC_PER_SEC^2
+ *
+ *
+ * "rate" * "burst"
+ * = ---------------- pkts/second
+ * NSEC_PER_SEC
+ */
+ burst = div_u64(params->tcfp_pkt_burst * params->ppsrate.rate_pkts_ps,
+ NSEC_PER_SEC);
+
+ return burst;
+}
+
+static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->tcfp_mtu;
+}
+
+static inline u64 tcf_police_peakrate_bytes_ps(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->peak.rate_bytes_ps;
+}
+
+static inline u32 tcf_police_tcfp_ewma_rate(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->tcfp_ewma_rate;
+}
+
+static inline u16 tcf_police_rate_overhead(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->rate.overhead;
}
#endif /* __NET_TC_POLICE_H */
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index b22a1f641f02..dc1079f28e13 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -17,6 +17,7 @@ struct tcf_skbedit_params {
u32 mark;
u32 mask;
u16 queue_mapping;
+ u16 mapping_mod;
u16 ptype;
struct rcu_head rcu;
};
@@ -27,8 +28,8 @@ struct tcf_skbedit {
};
#define to_skbedit(a) ((struct tcf_skbedit *)a)
-/* Return true iff action is mark */
-static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
+/* Return true iff action is the one identified by FLAG. */
+static inline bool is_tcf_skbedit_with_flag(const struct tc_action *a, u32 flag)
{
#ifdef CONFIG_NET_CLS_ACT
u32 flags;
@@ -37,12 +38,18 @@ static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
rcu_read_lock();
flags = rcu_dereference(to_skbedit(a)->params)->flags;
rcu_read_unlock();
- return flags == SKBEDIT_F_MARK;
+ return flags == flag;
}
#endif
return false;
}
+/* Return true iff action is mark */
+static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_MARK);
+}
+
static inline u32 tcf_skbedit_mark(const struct tc_action *a)
{
u32 mark;
@@ -57,17 +64,7 @@ static inline u32 tcf_skbedit_mark(const struct tc_action *a)
/* Return true iff action is ptype */
static inline bool is_tcf_skbedit_ptype(const struct tc_action *a)
{
-#ifdef CONFIG_NET_CLS_ACT
- u32 flags;
-
- if (a->ops && a->ops->id == TCA_ID_SKBEDIT) {
- rcu_read_lock();
- flags = rcu_dereference(to_skbedit(a)->params)->flags;
- rcu_read_unlock();
- return flags == SKBEDIT_F_PTYPE;
- }
-#endif
- return false;
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_PTYPE);
}
static inline u32 tcf_skbedit_ptype(const struct tc_action *a)
@@ -81,4 +78,33 @@ static inline u32 tcf_skbedit_ptype(const struct tc_action *a)
return ptype;
}
+/* Return true iff action is priority */
+static inline bool is_tcf_skbedit_priority(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_PRIORITY);
+}
+
+static inline u32 tcf_skbedit_priority(const struct tc_action *a)
+{
+ u32 priority;
+
+ rcu_read_lock();
+ priority = rcu_dereference(to_skbedit(a)->params)->priority;
+ rcu_read_unlock();
+
+ return priority;
+}
+
+/* Return true iff action is queue_mapping */
+static inline bool is_tcf_skbedit_queue_mapping(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_QUEUE_MAPPING);
+}
+
+/* Return true iff action is inheritdsfield */
+static inline bool is_tcf_skbedit_inheritdsfield(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_INHERITDSFIELD);
+}
+
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index 0689d9bcdf84..879fe8cff581 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -28,8 +28,10 @@ static inline bool is_tcf_tunnel_set(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+ struct tcf_tunnel_key_params *params;
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY)
return params->tcft_action == TCA_TUNNEL_KEY_ACT_SET;
#endif
@@ -40,8 +42,10 @@ static inline bool is_tcf_tunnel_release(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+ struct tcf_tunnel_key_params *params;
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY)
return params->tcft_action == TCA_TUNNEL_KEY_ACT_RELEASE;
#endif
@@ -52,7 +56,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+ struct tcf_tunnel_key_params *params;
+
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
return &params->tcft_enc_metadata->u.tun_info;
#else
@@ -69,7 +76,7 @@ tcf_tunnel_info_copy(const struct tc_action *a)
if (tun) {
size_t tun_size = sizeof(*tun) + tun->options_len;
struct ip_tunnel_info *tun_copy = kmemdup(tun, tun_size,
- GFP_KERNEL);
+ GFP_ATOMIC);
return tun_copy;
}
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 4e2502408c31..904eddfc1826 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,9 +11,12 @@
struct tcf_vlan_params {
int tcfv_action;
+ unsigned char tcfv_push_dst[ETH_ALEN];
+ unsigned char tcfv_push_src[ETH_ALEN];
u16 tcfv_push_vid;
__be16 tcfv_push_proto;
u8 tcfv_push_prio;
+ bool tcfv_push_prio_exists;
struct rcu_head rcu;
};
@@ -75,4 +78,14 @@ static inline u8 tcf_vlan_push_prio(const struct tc_action *a)
return tcfv_push_prio;
}
+
+static inline void tcf_vlan_push_eth(unsigned char *src, unsigned char *dest,
+ const struct tc_action *a)
+{
+ rcu_read_lock();
+ memcpy(dest, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_dst, ETH_ALEN);
+ memcpy(src, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_src, ETH_ALEN);
+ rcu_read_unlock();
+}
+
#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a5ea27df3c2b..14d45661a84d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -23,9 +23,9 @@
#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/skbuff.h>
-#include <linux/cryptohash.h>
#include <linux/kref.h>
#include <linux/ktime.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -48,10 +48,12 @@
extern struct inet_hashinfo tcp_hashinfo;
-extern struct percpu_counter tcp_orphan_count;
+DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
+int tcp_orphan_count_sum(void);
+
void tcp_time_wait(struct sock *sk, int state, int timeo);
-#define MAX_TCP_HEADER (128 + MAX_HEADER)
+#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
#define TCP_MIN_SND_MSS 48
#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
@@ -126,6 +128,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
* to combine FIN-WAIT-2 timeout with
* TIME-WAIT timer.
*/
+#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
#if HZ >= 100
@@ -250,6 +253,8 @@ extern long sysctl_tcp_mem[3];
#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
extern atomic_long_t tcp_memory_allocated;
+DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
+
extern struct percpu_counter tcp_sockets_allocated;
extern unsigned long tcp_memory_pressure;
@@ -287,21 +292,18 @@ static inline bool tcp_out_of_memory(struct sock *sk)
return false;
}
-void sk_forced_mem_schedule(struct sock *sk, int size);
-
-static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
+static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
- struct percpu_counter *ocp = sk->sk_prot->orphan_count;
- int orphans = percpu_counter_read_positive(ocp);
-
- if (orphans << shift > sysctl_tcp_max_orphans) {
- orphans = percpu_counter_sum_positive(ocp);
- if (orphans << shift > sysctl_tcp_max_orphans)
- return true;
- }
- return false;
+ sk_wmem_queued_add(sk, -skb->truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_uncharge(sk, skb->truesize);
+ else
+ sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
+ __kfree_skb(skb);
}
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
bool tcp_check_oom(struct sock *sk, int shift);
@@ -321,9 +323,12 @@ void tcp_shutdown(struct sock *sk, int how);
int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
+void tcp_remove_empty_skb(struct sock *sk);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
+int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
+ size_t size, struct ubuf_info *uarg);
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
@@ -343,9 +348,12 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
+void tcp_twsk_purge(struct list_head *net_exit_list, int family);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule);
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk,
@@ -385,30 +393,37 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
-void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
+void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
void tcp_metrics_init(void);
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
+void __tcp_close(struct sock *sk, long timeout);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
-void tcp_init_transfer(struct sock *sk, int bpf_op);
+void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
+int do_tcp_getsockopt(struct sock *sk, int level,
+ int optname, sockptr_t optval, sockptr_t optlen);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-int tcp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+bool tcp_bpf_bypass_getsockopt(int level, int optname);
+int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
void tcp_syn_ack_timeout(const struct request_sock *req);
-int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len);
int tcp_set_rcvlowat(struct sock *sk, int val);
+int tcp_set_window_clamp(struct sock *sk, int val);
+void tcp_update_recv_tstamps(struct sk_buff *skb,
+ struct scm_timestamping_internal *tss);
+void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+ struct scm_timestamping_internal *tss);
void tcp_data_ready(struct sock *sk);
#ifdef CONFIG_MMU
int tcp_mmap(struct file *file, struct socket *sock,
@@ -426,6 +441,7 @@ u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
struct tcphdr *th, u32 *cookie);
u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
struct tcphdr *th, u32 *cookie);
+u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct tcphdr *th);
@@ -436,6 +452,7 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk);
void tcp_req_err(struct sock *sk, u32 seq, bool abort);
+void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_create_openreq_child(const struct sock *sk,
struct request_sock *req,
@@ -457,7 +474,8 @@ enum tcp_synack_type {
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type);
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
int tcp_disconnect(struct sock *sk, int flags);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -471,6 +489,9 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
+struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
+ const struct tcp_request_sock_ops *af_ops,
+ struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -570,6 +591,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
+void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
@@ -605,9 +628,10 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
/* tcp_input.c */
void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
-void tcp_reset(struct sock *sk);
+void tcp_reset(struct sock *sk, struct sk_buff *skb);
void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
void tcp_fin(struct sock *sk);
+void tcp_check_space(struct sock *sk);
/* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *);
@@ -624,6 +648,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk)
unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
unsigned int tcp_current_mss(struct sock *sk);
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
@@ -654,13 +679,15 @@ void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
+int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
+void tcp_read_done(struct sock *sk, size_t len);
void tcp_initialize_rcv_mss(struct sock *sk);
int tcp_mtu_to_mss(struct sock *sk, int pmtu);
int tcp_mss_to_mtu(struct sock *sk, int mss);
void tcp_mtup_init(struct sock *sk);
-void tcp_init_buffer_space(struct sock *sk);
static inline void tcp_bound_rto(const struct sock *sk)
{
@@ -675,6 +702,10 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
+ /* mptcp hooks are only on the slow path */
+ if (sk_is_mptcp((struct sock *)tp))
+ return;
+
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
ntohl(TCP_FLAG_ACK) |
snd_wnd);
@@ -700,7 +731,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
static inline u32 tcp_rto_min(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
- u32 rto_min = TCP_RTO_MIN;
+ u32 rto_min = inet_csk(sk)->icsk_rto_min;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
@@ -859,10 +890,11 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
+#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
/* There is space for up to 24 bytes */
- __u32 in_flight:30,/* Bytes in flight at transmit */
- is_app_limited:1, /* cwnd not fully used? */
- unused:1;
+ __u32 is_app_limited:1, /* cwnd not fully used? */
+ delivered_ce:20,
+ unused:11;
/* pkts S/ACKed so far upon tx of skb, incl retrans: */
__u32 delivered;
/* start of send pipeline phase */
@@ -876,35 +908,12 @@ struct tcp_skb_cb {
struct inet6_skb_parm h6;
#endif
} header; /* For incoming skbs */
- struct {
- __u32 flags;
- struct sock *sk_redir;
- void *data_end;
- } bpf;
};
};
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
-static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
-{
- TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
-}
-
-static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
-{
- return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
-}
-
-static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
-{
- return TCP_SKB_CB(skb)->bpf.sk_redir;
-}
-
-static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
-{
- TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
-}
+extern const struct inet_connection_sock_af_ops ipv4_specific;
#if IS_ENABLED(CONFIG_IPV6)
/* This is the variant of inet6_iif() that must be used by TCP,
@@ -931,17 +940,14 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
#endif
return 0;
}
-#endif
-static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
-{
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
- return true;
+extern const struct inet_connection_sock_af_ops ipv6_specific;
+
+INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
+void tcp_v6_early_demux(struct sk_buff *skb);
+
#endif
- return false;
-}
/* TCP_SKB_CB reference means this can not be used from early demux */
static inline int tcp_v4_sdif(struct sk_buff *skb)
@@ -986,7 +992,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
return likely(tcp_skb_can_collapse_to(to) &&
- mptcp_skb_can_collapse(to, from));
+ mptcp_skb_can_collapse(to, from) &&
+ skb_pure_zcopy_same(to, from));
}
/* Events passed to congestion control interface */
@@ -1040,7 +1047,9 @@ struct ack_sample {
struct rate_sample {
u64 prior_mstamp; /* starting timestamp for interval */
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
+ s32 delivered_ce; /* number of packets delivered w/ CE marks*/
long interval_us; /* time for tp->delivered to incr "delivered" */
u32 snd_interval_us; /* snd interval for delivered packets */
u32 rcv_interval_us; /* rcv interval for delivered packets */
@@ -1048,50 +1057,63 @@ struct rate_sample {
int losses; /* number of packets marked lost upon ACK */
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
u32 prior_in_flight; /* in flight before this ACK */
+ u32 last_end_seq; /* end_seq of most recently ACKed packet */
bool is_app_limited; /* is sample from packet with bubble in pipe? */
bool is_retrans; /* is sample from retransmission? */
bool is_ack_delayed; /* is this (likely) a delayed ACK? */
};
struct tcp_congestion_ops {
- struct list_head list;
- u32 key;
- u32 flags;
-
- /* initialize private data (optional) */
- void (*init)(struct sock *sk);
- /* cleanup private data (optional) */
- void (*release)(struct sock *sk);
+/* fast path fields are put first to fill one cache line */
/* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk);
+
/* do new cwnd calculation (required) */
void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
+
/* call before changing ca_state (optional) */
void (*set_state)(struct sock *sk, u8 new_state);
+
/* call when cwnd event occurs (optional) */
void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+
/* call when ack arrives (optional) */
void (*in_ack_event)(struct sock *sk, u32 flags);
- /* new value of cwnd after loss (required) */
- u32 (*undo_cwnd)(struct sock *sk);
+
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+
/* override sysctl_tcp_min_tso_segs */
u32 (*min_tso_segs)(struct sock *sk);
- /* returns the multiplier used in tcp_sndbuf_expand (optional) */
- u32 (*sndbuf_expand)(struct sock *sk);
+
/* call when packets are delivered to update cwnd and pacing rate,
* after all the ca_state processing. (optional)
*/
void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
+
+
+ /* new value of cwnd after loss (required) */
+ u32 (*undo_cwnd)(struct sock *sk);
+ /* returns the multiplier used in tcp_sndbuf_expand (optional) */
+ u32 (*sndbuf_expand)(struct sock *sk);
+
+/* control/slow paths put last */
/* get info for inet_diag (optional) */
size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);
- char name[TCP_CA_NAME_MAX];
- struct module *owner;
-};
+ char name[TCP_CA_NAME_MAX];
+ struct module *owner;
+ struct list_head list;
+ u32 key;
+ u32 flags;
+
+ /* initialize private data (optional) */
+ void (*init)(struct sock *sk);
+ /* cleanup private data (optional) */
+ void (*release)(struct sock *sk);
+} ____cacheline_aligned_in_smp;
int tcp_register_congestion_control(struct tcp_congestion_ops *type);
void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
@@ -1105,7 +1127,7 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
- bool reinit, bool cap_net_admin);
+ bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
@@ -1133,15 +1155,6 @@ static inline bool tcp_ca_needs_ecn(const struct sock *sk)
return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
}
-static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
-
- if (icsk->icsk_ca_ops->set_state)
- icsk->icsk_ca_ops->set_state(sk, ca_state);
- icsk->icsk_ca_state = ca_state;
-}
-
static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1150,6 +1163,9 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
+/* From tcp_cong.c */
+void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
+
/* From tcp_rate.c */
void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
@@ -1158,6 +1174,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
bool is_sack_reneg, struct rate_sample *rs);
void tcp_rate_check_app_limited(struct sock *sk);
+static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
+{
+ return t1 > t2 || (t1 == t2 && after(seq1, seq2));
+}
+
/* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows.
@@ -1201,9 +1222,20 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
#define TCP_INFINITE_SSTHRESH 0x7fffffff
+static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
+{
+ return tp->snd_cwnd;
+}
+
+static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
+{
+ WARN_ON_ONCE((int)val <= 0);
+ tp->snd_cwnd = val;
+}
+
static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
{
- return tp->snd_cwnd < tp->snd_ssthresh;
+ return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
}
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
@@ -1229,8 +1261,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
return tp->snd_ssthresh;
else
return max(tp->snd_ssthresh,
- ((tp->snd_cwnd >> 1) +
- (tp->snd_cwnd >> 2)));
+ ((tcp_snd_cwnd(tp) >> 1) +
+ (tcp_snd_cwnd(tp) >> 2)));
}
/* Use define here intentionally to get WARN_ON location shown at the caller */
@@ -1270,11 +1302,14 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+ if (tp->is_cwnd_limited)
+ return true;
+
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
if (tcp_in_slow_start(tp))
- return tp->snd_cwnd < 2 * tp->max_packets_out;
+ return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
- return tp->is_cwnd_limited;
+ return false;
}
/* BBR congestion control needs pacing.
@@ -1288,26 +1323,22 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk)
return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
}
-/* Return in jiffies the delay before one skb is sent.
- * If @skb is NULL, we look at EDT for next packet being sent on the socket.
+/* Estimates in how many jiffies next packet for this flow can be sent.
+ * Scheduling a retransmit timer too early would be silly.
*/
-static inline unsigned long tcp_pacing_delay(const struct sock *sk,
- const struct sk_buff *skb)
+static inline unsigned long tcp_pacing_delay(const struct sock *sk)
{
- s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns;
-
- pacing_delay -= tcp_sk(sk)->tcp_clock_cache;
+ s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
- return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0;
+ return delay > 0 ? nsecs_to_jiffies(delay) : 0;
}
static inline void tcp_reset_xmit_timer(struct sock *sk,
const int what,
unsigned long when,
- const unsigned long max_when,
- const struct sk_buff *skb)
+ const unsigned long max_when)
{
- inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb),
+ inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
max_when);
}
@@ -1326,7 +1357,9 @@ static inline unsigned long tcp_probe0_base(const struct sock *sk)
static inline unsigned long tcp_probe0_when(const struct sock *sk,
unsigned long max_when)
{
- u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
+ u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
+ inet_csk(sk)->icsk_backoff);
+ u64 when = (u64)tcp_probe0_base(sk) << backoff;
return (unsigned long)min_t(u64, when, max_when);
}
@@ -1335,8 +1368,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX,
- NULL);
+ tcp_probe0_base(sk), TCP_RTO_MAX);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -1364,7 +1396,10 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
__skb_checksum_complete(skb);
}
-bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason);
+
+
int tcp_filter(struct sock *sk, struct sk_buff *skb);
void tcp_set_state(struct sock *sk, int state);
void tcp_done(struct sock *sk);
@@ -1376,7 +1411,6 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
rx_opt->num_sacks = 0;
}
-u32 tcp_default_init_rwnd(u32 mss);
void tcp_cwnd_restart(struct sock *sk, s32 delta);
static inline void tcp_slow_start_after_idle_check(struct sock *sk)
@@ -1385,8 +1419,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
s32 delta;
- if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
- ca_ops->cong_control)
+ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
+ tp->packets_out || ca_ops->cong_control)
return;
delta = tcp_jiffies32 - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto)
@@ -1401,7 +1435,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
static inline int tcp_win_from_space(const struct sock *sk, int space)
{
- int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
+ int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
return tcp_adv_win_scale <= 0 ?
(space>>(-tcp_adv_win_scale)) :
@@ -1421,6 +1455,49 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+ int unused_mem = sk_unused_reserved_mem(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ if (unused_mem)
+ tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+ tcp_win_from_space(sk, unused_mem));
+}
+
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+
+/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
+ * If 87.5 % (7/8) of the space has been consumed, we want to override
+ * SO_RCVLOWAT constraint, since we are receiving skbs with too small
+ * len/truesize ratio.
+ */
+static inline bool tcp_rmem_pressure(const struct sock *sk)
+{
+ int rcvbuf, threshold;
+
+ if (tcp_under_memory_pressure(sk))
+ return true;
+
+ rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+ threshold = rcvbuf - (rcvbuf >> 3);
+
+ return atomic_read(&sk->sk_rmem_alloc) > threshold;
+}
+
+static inline bool tcp_epollin_ready(const struct sock *sk, int target)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
+
+ if (avail <= 0)
+ return false;
+
+ return (avail >= target) || tcp_rmem_pressure(sk) ||
+ (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
+}
+
extern void tcp_openreq_init_rwin(struct request_sock *req,
const struct sock *sk_listener,
const struct dst_entry *dst);
@@ -1432,21 +1509,24 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
- return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
+ return tp->keepalive_intvl ? :
+ READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
}
static inline int keepalive_time_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
- return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
+ return tp->keepalive_time ? :
+ READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
}
static inline int keepalive_probes(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
- return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
+ return tp->keepalive_probes ? :
+ READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
}
static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
@@ -1459,7 +1539,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
static inline int tcp_fin_time(const struct sock *sk)
{
- int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
+ int fin_timeout = tcp_sk(sk)->linger2 ? :
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
const int rto = inet_csk(sk)->icsk_rto;
if (fin_timeout < (rto << 2) - (rto >> 1))
@@ -1547,6 +1628,7 @@ struct tcp_md5sig_key {
u8 keylen;
u8 family; /* AF_INET or AF_INET6 */
u8 prefixlen;
+ u8 flags;
union tcp_md5_addr addr;
int l3index; /* set if key added with L3 scope */
u8 key[TCP_MD5SIG_MAXKEYLEN];
@@ -1592,10 +1674,10 @@ struct tcp_md5sig_pool {
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, int l3index,
+ int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, int l3index);
+ int family, u8 prefixlen, int l3index, u8 flags);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
@@ -1614,6 +1696,12 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index,
return __tcp_md5_do_lookup(sk, l3index, addr, family);
}
+enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif);
+
+
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *
@@ -1622,6 +1710,14 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index,
{
return NULL;
}
+
+static inline enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif)
+{
+ return SKB_NOT_DROPPED_YET;
+}
#define tcp_twsk_md5_key(twsk) NULL
#endif
@@ -1657,6 +1753,8 @@ void tcp_fastopen_destroy_cipher(struct sock *sk);
void tcp_fastopen_ctx_destroy(struct net *net);
int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
void *primary_key, void *backup_key);
+int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
+ u64 *key);
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
@@ -1678,7 +1776,6 @@ struct tcp_fastopen_context {
struct rcu_head rcu;
};
-extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
void tcp_fastopen_active_disable(struct sock *sk);
bool tcp_fastopen_active_should_disable(struct sock *sk);
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
@@ -1756,11 +1853,6 @@ static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
return skb_rb_last(&sk->tcp_rtx_queue);
}
-static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
-{
- return skb_peek(&sk->sk_write_queue);
-}
-
static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
{
return skb_peek_tail(&sk->sk_write_queue);
@@ -1839,7 +1931,7 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc
{
list_del(&skb->tcp_tsorted_anchor);
tcp_rtx_queue_unlink(skb, sk);
- sk_wmem_free_skb(sk, skb);
+ tcp_wmem_free_skb(sk, skb);
}
static inline void tcp_push_pending_frames(struct sock *sk)
@@ -1940,6 +2032,10 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
int tcp_gro_complete(struct sk_buff *skb);
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
@@ -1947,21 +2043,10 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
- return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
+ return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
}
-/* @wake is one when sk_stream_write_space() calls us.
- * This sends EPOLLOUT only if notsent_bytes is half the limit.
- * This mimics the strategy used in sock_def_write_space().
- */
-static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- u32 notsent_bytes = READ_ONCE(tp->write_seq) -
- READ_ONCE(tp->snd_nxt);
-
- return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
-}
+bool tcp_stream_memory_free(const struct sock *sk, int wake);
#ifdef CONFIG_PROC_FS
int tcp4_proc_init(void);
@@ -1984,7 +2069,7 @@ struct tcp_sock_af_ops {
const struct sk_buff *skb);
int (*md5_parse)(struct sock *sk,
int optname,
- char __user *optval,
+ sockptr_t optval,
int optlen);
#endif
};
@@ -1999,21 +2084,21 @@ struct tcp_request_sock_ops {
const struct sock *sk,
const struct sk_buff *skb);
#endif
- void (*init_req)(struct request_sock *req,
- const struct sock *sk_listener,
- struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
__u32 (*cookie_init_seq)(const struct sk_buff *skb,
__u16 *mss);
#endif
- struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
- const struct request_sock *req);
+ struct dst_entry *(*route_req)(const struct sock *sk,
+ struct sk_buff *skb,
+ struct flowi *fl,
+ struct request_sock *req);
u32 (*init_seq)(const struct sk_buff *skb);
u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type);
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
};
extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
@@ -2049,7 +2134,7 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
u32 reo_wnd);
-extern void tcp_rack_mark_lost(struct sock *sk);
+extern bool tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
@@ -2132,9 +2217,13 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
u16 segs_in;
segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
- tp->segs_in += segs_in;
+
+ /* We update these fields while other threads might
+ * read them from tcp_get_info()
+ */
+ WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
if (skb->len > tcp_hdrlen(skb))
- tp->data_segs_in += segs_in;
+ WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
}
/*
@@ -2192,17 +2281,41 @@ void tcp_update_ulp(struct sock *sk, struct proto *p,
__MODULE_INFO(alias, alias_userspace, name); \
__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
+#ifdef CONFIG_NET_SOCK_MSG
struct sk_msg;
struct sk_psock;
-int tcp_bpf_init(struct sock *sk);
-void tcp_bpf_reinit(struct sock *sk);
+#ifdef CONFIG_BPF_SYSCALL
+struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
+int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+#endif /* CONFIG_BPF_SYSCALL */
+
int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
int flags);
-int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len);
-int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
- struct msghdr *msg, int len, int flags);
+#endif /* CONFIG_NET_SOCK_MSG */
+
+#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
+static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
+{
+}
+#endif
+
+#ifdef CONFIG_CGROUP_BPF
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+ skops->skb = skb;
+ skops->skb_data_end = skb->data + end_offset;
+}
+#else
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+}
+#endif
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
@@ -2276,7 +2389,7 @@ static inline u32 tcp_timeout_init(struct sock *sk)
if (timeout <= 0)
timeout = TCP_TIMEOUT_INIT;
- return timeout;
+ return min_t(int, timeout, TCP_RTO_MAX);
}
static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
diff --git a/include/net/tls.h b/include/net/tls.h
index bf9eb4823933..154949c7b0c8 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -39,7 +39,6 @@
#include <linux/crypto.h>
#include <linux/socket.h>
#include <linux/tcp.h>
-#include <linux/skmsg.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
@@ -50,6 +49,17 @@
#include <crypto/aead.h>
#include <uapi/linux/tls.h>
+struct tls_rec;
+
+struct tls_cipher_size_desc {
+ unsigned int iv;
+ unsigned int key;
+ unsigned int salt;
+ unsigned int tag;
+ unsigned int rec_seq;
+};
+
+extern const struct tls_cipher_size_desc tls_cipher_size_desc[];
/* Maximum data size carried in a TLS record */
#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
@@ -64,9 +74,11 @@
#define TLS_AAD_SPACE_SIZE 13
#define MAX_IV_SIZE 16
+#define TLS_TAG_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
+#define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE
-/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
+/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
*
* IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
*
@@ -74,15 +86,7 @@
* Hence b0 contains (3 - 1) = 2.
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
-
-#define __TLS_INC_STATS(net, field) \
- __SNMP_INC_STATS((net)->mib.tls_statistics, field)
-#define TLS_INC_STATS(net, field) \
- SNMP_INC_STATS((net)->mib.tls_statistics, field)
-#define __TLS_DEC_STATS(net, field) \
- __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
-#define TLS_DEC_STATS(net, field) \
- SNMP_DEC_STATS((net)->mib.tls_statistics, field)
+#define TLS_SM4_CCM_IV_B0_BYTE 2
enum {
TLS_BASE,
@@ -92,37 +96,6 @@ enum {
TLS_NUM_CONFIG,
};
-/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
- * allocated or mapped for each TLS record. After encryption, the records are
- * stores in a linked list.
- */
-struct tls_rec {
- struct list_head list;
- int tx_ready;
- int tx_flags;
-
- struct sk_msg msg_plaintext;
- struct sk_msg msg_encrypted;
-
- /* AAD | msg_plaintext.sg.data | sg_tag */
- struct scatterlist sg_aead_in[2];
- /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
- struct scatterlist sg_aead_out[2];
-
- char content_type;
- struct scatterlist sg_content_type;
-
- char aad_space[TLS_AAD_SPACE_SIZE];
- u8 iv_data[MAX_IV_SIZE];
- struct aead_request aead_req;
- u8 aead_req_ctx[];
-};
-
-struct tls_msg {
- struct strp_msg rxm;
- u8 control;
-};
-
struct tx_work {
struct delayed_work work;
struct sock *sk;
@@ -135,6 +108,8 @@ struct tls_sw_context_tx {
struct tls_rec *open_rec;
struct list_head tx_list;
atomic_t encrypt_pending;
+ /* protect crypto_wait with encrypt_pending */
+ spinlock_t encrypt_compl_lock;
int async_notify;
u8 async_capable:1;
@@ -143,19 +118,38 @@ struct tls_sw_context_tx {
unsigned long tx_bitmask;
};
+struct tls_strparser {
+ struct sock *sk;
+
+ u32 mark : 8;
+ u32 stopped : 1;
+ u32 copy_mode : 1;
+ u32 msg_ready : 1;
+
+ struct strp_msg stm;
+
+ struct sk_buff *anchor;
+ struct work_struct work;
+};
+
struct tls_sw_context_rx {
struct crypto_aead *aead_recv;
struct crypto_wait async_wait;
- struct strparser strp;
struct sk_buff_head rx_list; /* list of decrypted 'data' records */
void (*saved_data_ready)(struct sock *sk);
- struct sk_buff *recv_pkt;
- u8 control;
+ u8 reader_present;
u8 async_capable:1;
- u8 decrypted:1;
+ u8 zc_capable:1;
+ u8 reader_contended:1;
+
+ struct tls_strparser strp;
+
atomic_t decrypt_pending;
- bool async_notify;
+ /* protect crypto_wait with decrypt_pending*/
+ spinlock_t decrypt_compl_lock;
+ struct sk_buff_head async_hold;
+ struct wait_queue_head wq;
};
struct tls_record_info {
@@ -177,6 +171,8 @@ struct tls_offload_context_tx {
struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
void (*sk_destruct)(struct sock *sk);
+ struct work_struct destruct_work;
+ struct tls_context *ctx;
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
@@ -189,12 +185,22 @@ struct tls_offload_context_tx {
(sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
enum tls_context_flags {
- TLS_RX_SYNC_RUNNING = 0,
+ /* tls_device_down was called after the netdev went down, device state
+ * was released, and kTLS works in software, even though rx_conf is
+ * still TLS_HW (needed for transition).
+ */
+ TLS_RX_DEV_DEGRADED = 0,
/* Unlike RX where resync is driven entirely by the core in TX only
* the driver knows when things went out of sync, so we need the flag
* to be atomic.
*/
TLS_TX_SYNC_SCHED = 1,
+ /* tls_dev_del was called for the RX side, device state was released,
+ * but tls_ctx->netdev might still be kept, because TX-side driver
+ * resources might not be released yet. Used to prevent the second
+ * tls_dev_del call in tls_device_down if it happens simultaneously.
+ */
+ TLS_RX_DEV_CLOSED = 2,
};
struct cipher_context {
@@ -207,6 +213,9 @@ union tls_crypto_context {
union {
struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
+ struct tls12_crypto_info_sm4_gcm sm4_gcm;
+ struct tls12_crypto_info_sm4_ccm sm4_ccm;
};
};
@@ -229,6 +238,8 @@ struct tls_context {
u8 tx_conf:3;
u8 rx_conf:3;
+ u8 zerocopy_sendfile:1;
+ u8 rx_no_pad:1;
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
@@ -236,7 +247,7 @@ struct tls_context {
void *priv_ctx_tx;
void *priv_ctx_rx;
- struct net_device *netdev;
+ struct net_device __rcu *netdev;
/* rw cache line */
struct cipher_context tx;
@@ -255,6 +266,7 @@ struct tls_context {
/* cache cold stuff */
struct proto *sk_proto;
+ struct sock *sk;
void (*sk_destruct)(struct sock *sk);
@@ -287,11 +299,20 @@ struct tlsdev_ops {
enum tls_offload_sync_type {
TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
+ TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
};
#define TLS_DEVICE_RESYNC_NH_START_IVAL 2
#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
+#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
+struct tls_offload_resync_async {
+ atomic64_t req;
+ u16 loglen;
+ u16 rcd_delta;
+ u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
+};
+
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
@@ -310,6 +331,10 @@ struct tls_offload_context_rx {
u32 decrypted_failed;
u32 decrypted_tgt;
} resync_nh;
+ /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
+ struct {
+ struct tls_offload_resync_async *resync_async;
+ };
};
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
@@ -322,42 +347,6 @@ struct tls_offload_context_rx {
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
-struct tls_context *tls_ctx_create(struct sock *sk);
-void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
-void update_sk_prot(struct sock *sk, struct tls_context *ctx);
-
-int wait_on_pending_writer(struct sock *sk, long *timeo);
-int tls_sk_query(struct sock *sk, int optname, char __user *optval,
- int __user *optlen);
-int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
- unsigned int optlen);
-
-int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
-void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
-void tls_sw_strparser_done(struct tls_context *tls_ctx);
-int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
-int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
-int tls_sw_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
-void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
-void tls_sw_release_resources_tx(struct sock *sk);
-void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
-void tls_sw_free_resources_rx(struct sock *sk);
-void tls_sw_release_resources_rx(struct sock *sk);
-void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
-int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len);
-bool tls_sw_stream_read(const struct sock *sk);
-ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len, unsigned int flags);
-
-int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
-int tls_device_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
-int tls_tx_records(struct sock *sk, int flags);
-
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
@@ -371,59 +360,12 @@ static inline u32 tls_record_start_seq(struct tls_record_info *rec)
return rec->end_seq - rec->len;
}
-int tls_push_sg(struct sock *sk, struct tls_context *ctx,
- struct scatterlist *sg, u16 first_offset,
- int flags);
-int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
- int flags);
-void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
-
-static inline struct tls_msg *tls_msg(struct sk_buff *skb)
-{
- return (struct tls_msg *)strp_msg(skb);
-}
-
-static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
-{
- return !!ctx->partially_sent_record;
-}
-
-static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
-{
- return tls_ctx->pending_open_record_frags;
-}
-
-static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
-{
- struct tls_rec *rec;
-
- rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
- if (!rec)
- return false;
-
- return READ_ONCE(rec->tx_ready);
-}
-
-static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
-{
- u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
-
- switch (config) {
- case TLS_BASE:
- return TLS_CONF_BASE;
- case TLS_SW:
- return TLS_CONF_SW;
- case TLS_HW:
- return TLS_CONF_HW;
- case TLS_HW_RECORD:
- return TLS_CONF_HW_RECORD;
- }
- return 0;
-}
-
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
+struct sk_buff *
+tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{
@@ -436,25 +378,6 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
#endif
}
-static inline void tls_err_abort(struct sock *sk, int err)
-{
- sk->sk_err = err;
- sk->sk_error_report(sk);
-}
-
-static inline bool tls_bigint_increment(unsigned char *seq, int len)
-{
- int i;
-
- for (i = len - 1; i >= 0; i--) {
- ++seq[i];
- if (seq[i] != 0)
- break;
- }
-
- return (i == -1);
-}
-
static inline struct tls_context *tls_get_ctx(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -465,81 +388,6 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
return (__force void *)icsk->icsk_ulp_data;
}
-static inline void tls_advance_record_sn(struct sock *sk,
- struct tls_prot_info *prot,
- struct cipher_context *ctx)
-{
- if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
- tls_err_abort(sk, EBADMSG);
-
- if (prot->version != TLS_1_3_VERSION)
- tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
- prot->iv_size);
-}
-
-static inline void tls_fill_prepend(struct tls_context *ctx,
- char *buf,
- size_t plaintext_len,
- unsigned char record_type,
- int version)
-{
- struct tls_prot_info *prot = &ctx->prot_info;
- size_t pkt_len, iv_size = prot->iv_size;
-
- pkt_len = plaintext_len + prot->tag_size;
- if (version != TLS_1_3_VERSION) {
- pkt_len += iv_size;
-
- memcpy(buf + TLS_NONCE_OFFSET,
- ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
- }
-
- /* we cover nonce explicit here as well, so buf should be of
- * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
- */
- buf[0] = version == TLS_1_3_VERSION ?
- TLS_RECORD_TYPE_DATA : record_type;
- /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
- buf[1] = TLS_1_2_VERSION_MINOR;
- buf[2] = TLS_1_2_VERSION_MAJOR;
- /* we can use IV for nonce explicit according to spec */
- buf[3] = pkt_len >> 8;
- buf[4] = pkt_len & 0xFF;
-}
-
-static inline void tls_make_aad(char *buf,
- size_t size,
- char *record_sequence,
- int record_sequence_size,
- unsigned char record_type,
- int version)
-{
- if (version != TLS_1_3_VERSION) {
- memcpy(buf, record_sequence, record_sequence_size);
- buf += 8;
- } else {
- size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
- }
-
- buf[0] = version == TLS_1_3_VERSION ?
- TLS_RECORD_TYPE_DATA : record_type;
- buf[1] = TLS_1_2_VERSION_MAJOR;
- buf[2] = TLS_1_2_VERSION_MINOR;
- buf[3] = size >> 8;
- buf[4] = size & 0xFF;
-}
-
-static inline void xor_iv_with_seq(int version, char *iv, char *seq)
-{
- int i;
-
- if (version == TLS_1_3_VERSION) {
- for (i = 0; i < 8; i++)
- iv[i + 4] ^= seq[i];
- }
-}
-
-
static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
const struct tls_context *tls_ctx)
{
@@ -567,8 +415,14 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
return !!tls_sw_ctx_tx(ctx);
}
-void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
-void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
+static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (!ctx)
+ return false;
+ return !!tls_sw_ctx_rx(ctx);
+}
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
@@ -576,7 +430,6 @@ tls_offload_ctx_rx(const struct tls_context *tls_ctx)
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{
@@ -591,15 +444,39 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
{
return __tls_driver_ctx(tls_get_ctx(sk), direction);
}
-#endif
+#define RESYNC_REQ BIT(0)
+#define RESYNC_REQ_ASYNC BIT(1)
/* The TLS context is valid until sk_destruct is called */
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
- atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
+ atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+}
+
+/* Log all TLS record header TCP sequences in [seq, seq+len] */
+static inline void
+tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+ atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
+ ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
+ rx_ctx->resync_async->loglen = 0;
+ rx_ctx->resync_async->rcd_delta = 0;
+}
+
+static inline void
+tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+ atomic64_set(&rx_ctx->resync_async->req,
+ ((u64)ntohl(seq) << 32) | RESYNC_REQ);
}
static inline void
@@ -621,35 +498,11 @@ static inline bool tls_offload_tx_resync_pending(struct sock *sk)
return ret;
}
-int __net_init tls_proc_init(struct net *net);
-void __net_exit tls_proc_fini(struct net *net);
-
-int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
- unsigned char *record_type);
-int decrypt_skb(struct sock *sk, struct sk_buff *skb,
- struct scatterlist *sgout);
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
-struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
- struct net_device *dev,
- struct sk_buff *skb);
-
-int tls_sw_fallback_init(struct sock *sk,
- struct tls_offload_context_tx *offload_ctx,
- struct tls_crypto_info *crypto_info);
-
#ifdef CONFIG_TLS_DEVICE
-void tls_device_init(void);
-void tls_device_cleanup(void);
void tls_device_sk_destruct(struct sock *sk);
-int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
-void tls_device_free_resources_tx(struct sock *sk);
-int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
-void tls_device_offload_cleanup_rx(struct sock *sk);
-void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
-int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
- struct sk_buff *skb, struct strp_msg *rxm);
static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
{
@@ -658,33 +511,5 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
return false;
return tls_get_ctx(sk)->rx_conf == TLS_HW;
}
-#else
-static inline void tls_device_init(void) {}
-static inline void tls_device_cleanup(void) {}
-
-static inline int
-tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void tls_device_free_resources_tx(struct sock *sk) {}
-
-static inline int
-tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
-static inline void
-tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
-
-static inline int
-tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
- struct sk_buff *skb, struct strp_msg *rxm)
-{
- return 0;
-}
#endif
#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index a8f6020f1196..b830463e3dff 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -3,6 +3,7 @@
#define _TRANSP_V6_H
#include <net/checksum.h>
+#include <net/sock.h>
/* IPv6 transport protocols */
extern struct proto rawv6_prot;
@@ -12,6 +13,7 @@ extern struct proto tcpv6_prot;
extern struct proto pingv6_prot;
struct flowi6;
+struct ipcm6_cookie;
/* extension headers */
int ipv6_exthdrs_init(void);
@@ -56,9 +58,6 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
-/* address family specific functions */
-extern const struct inet_connection_sock_af_ops ipv4_specific;
-
void inet6_destroy_sock(struct sock *sk);
#define IPV6_SEQ_DGRAM_HEADER \
diff --git a/include/net/tso.h b/include/net/tso.h
index 7e166a570349..62c98a9c60f1 100644
--- a/include/net/tso.h
+++ b/include/net/tso.h
@@ -4,21 +4,22 @@
#include <net/ip.h>
-#define TSO_HEADER_SIZE 128
+#define TSO_HEADER_SIZE 256
struct tso_t {
- int next_frag_idx;
- void *data;
- size_t size;
- u16 ip_id;
- bool ipv6;
- u32 tcp_seq;
+ int next_frag_idx;
+ int size;
+ void *data;
+ u16 ip_id;
+ u8 tlen; /* transport header len */
+ bool ipv6;
+ u32 tcp_seq;
};
-int tso_count_descs(struct sk_buff *skb);
-void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+int tso_count_descs(const struct sk_buff *skb);
+void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
int size, bool is_last);
-void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
-void tso_start(struct sk_buff *skb, struct tso_t *tso);
+void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size);
+int tso_start(struct sk_buff *skb, struct tso_t *tso);
#endif /* _TSO_H */
diff --git a/include/net/tun_proto.h b/include/net/tun_proto.h
index 2ea3deba4c99..7b0de7852908 100644
--- a/include/net/tun_proto.h
+++ b/include/net/tun_proto.h
@@ -1,7 +1,8 @@
#ifndef __NET_TUN_PROTO_H
#define __NET_TUN_PROTO_H
-#include <linux/kernel.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
/* One byte protocol values as defined by VXLAN-GPE and NSH. These will
* hopefully get a shared IANA registry.
diff --git a/include/net/udp.h b/include/net/udp.h
index e55d5f765807..fee053bcd17c 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -27,6 +27,7 @@
#include <linux/ipv6.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
+#include <linux/indirect_call_wrapper.h>
/**
* struct udp_skb_cb - UDP(-Lite) private variables
@@ -94,6 +95,7 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
extern struct proto udp_prot;
extern atomic_long_t udp_memory_allocated;
+DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
/* sysctl variables for udp */
extern long sysctl_udp_mem[3];
@@ -163,29 +165,14 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
}
-typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
+typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
__be16 dport);
-struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
- struct udphdr *uh, struct sock *sk);
-int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
+void udp_v6_early_demux(struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
- netdev_features_t features);
-
-static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
-{
- struct udphdr *uh;
- unsigned int hlen, off;
-
- off = skb_gro_offset(skb);
- hlen = off + sizeof(*uh);
- uh = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen))
- uh = skb_gro_header_slow(skb, hlen, off);
-
- return uh;
-}
+ netdev_features_t features, bool is_ipv6);
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
static inline int udp_lib_hash(struct sock *sk)
@@ -252,7 +239,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
+ return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept),
bound_dev_if, dif, sdif);
#else
return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
@@ -260,18 +247,18 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
}
/* net/ipv4/udp.c */
-void udp_destruct_sock(struct sock *sk);
+void udp_destruct_common(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
-struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
- int noblock, int *off, int *err);
+struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int *off,
+ int *err);
static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
- int noblock, int *err)
+ int *err)
{
int off = 0;
- return __skb_recv_udp(sk, flags, noblock, &off, err);
+ return __skb_recv_udp(sk, flags, &off, err);
}
int udp_v4_early_demux(struct sk_buff *skb);
@@ -299,14 +286,14 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen,
+ sockptr_t optval, unsigned int optlen,
int (*push_pending_frames)(struct sock *));
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif);
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif, int sdif,
struct udp_table *tbl, struct sk_buff *skb);
-struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
+struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport);
struct sock *udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
@@ -317,8 +304,9 @@ struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
struct sk_buff *skb);
-struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport);
+int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
* possibly multiple cache miss on dequeue()
@@ -440,6 +428,7 @@ struct udp_seq_afinfo {
struct udp_iter_state {
struct seq_net_private p;
int bucket;
+ struct udp_seq_afinfo *bpf_seq_afinfo;
};
void *udp_seq_start(struct seq_file *seq, loff_t *pos);
@@ -459,6 +448,7 @@ void udp_init(void);
DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
void udp_encap_enable(void);
+void udp_encap_disable(void);
#if IS_ENABLED(CONFIG_IPV6)
DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void);
@@ -480,8 +470,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
* CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
* packets in udp_gro_complete_segment. As does UDP GSO, verified by
* udp_send_skb. But when those packets are looped in dev_loopback_xmit
- * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
- * specific case, where PARTIAL is both correct and required.
+ * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
+ * Reset in this specific case, where PARTIAL is both correct and
+ * required.
*/
if (skb->pkt_type == PACKET_LOOPBACK)
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -503,4 +494,33 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
return segs;
}
+static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
+{
+ /* UDP-lite can't land here - no GRO */
+ WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
+
+ /* UDP packets generated with UDP_SEGMENT and traversing:
+ *
+ * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx)
+ *
+ * can reach an UDP socket with CHECKSUM_NONE, because
+ * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE.
+ * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will
+ * have a valid checksum, as the GRO engine validates the UDP csum
+ * before the aggregation and nobody strips such info in between.
+ * Instead of adding another check in the tunnel fastpath, we can force
+ * a valid csum after the segmentation.
+ * Additionally fixup the UDP CB.
+ */
+ UDP_SKB_CB(skb)->cscov = skb->len;
+ if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
+ skb->csum_valid = 1;
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+struct sk_psock;
+struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
+int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+#endif
+
#endif /* _UDP_H */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 4b1f95e08307..72394f441dad 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -67,6 +67,9 @@ static inline int udp_sock_create(struct net *net,
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
struct sk_buff *skb);
+typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
+ struct sk_buff *skb,
+ unsigned int udp_offset);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
struct list_head *head,
@@ -80,6 +83,7 @@ struct udp_tunnel_sock_cfg {
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
udp_tunnel_encap_err_lookup_t encap_err_lookup;
+ udp_tunnel_encap_err_rcv_t encap_err_rcv;
udp_tunnel_encap_destroy_t encap_destroy;
udp_tunnel_gro_receive_t gro_receive;
udp_tunnel_gro_complete_t gro_complete;
@@ -106,15 +110,16 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
* call this function to perform Tx offloads on outgoing traffic.
*/
enum udp_parsable_tunnel_type {
- UDP_TUNNEL_TYPE_VXLAN, /* RFC 7348 */
- UDP_TUNNEL_TYPE_GENEVE, /* draft-ietf-nvo3-geneve */
- UDP_TUNNEL_TYPE_VXLAN_GPE, /* draft-ietf-nvo3-vxlan-gpe */
+ UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */
+ UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */
+ UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */
};
struct udp_tunnel_info {
unsigned short type;
sa_family_t sa_family;
__be16 port;
+ u8 hw_priv;
};
/* Notify network devices of offloadable types */
@@ -128,12 +133,16 @@ void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
static inline void udp_tunnel_get_rx_info(struct net_device *dev)
{
ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
}
static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
{
ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
}
@@ -143,14 +152,12 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
__be16 df, __be16 src_port, __be16 dst_port,
bool xnet, bool nocheck);
-#if IS_ENABLED(CONFIG_IPV6)
int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be32 label,
__be16 src_port, __be16 dst_port, bool nocheck);
-#endif
void udp_tunnel_sock_release(struct socket *sock);
@@ -178,9 +185,198 @@ static inline void udp_tunnel_encap_enable(struct socket *sock)
#if IS_ENABLED(CONFIG_IPV6)
if (sock->sk->sk_family == PF_INET6)
ipv6_stub->udpv6_encap_enable();
- else
#endif
- udp_encap_enable();
+ udp_encap_enable();
}
+#define UDP_TUNNEL_NIC_MAX_TABLES 4
+
+enum udp_tunnel_nic_info_flags {
+ /* Device callbacks may sleep */
+ UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0),
+ /* Device only supports offloads when it's open, all ports
+ * will be removed before close and re-added after open.
+ */
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1),
+ /* Device supports only IPv4 tunnels */
+ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2),
+ /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
+ * This port must not be counted towards n_entries of any table.
+ * Driver will not receive any callback associated with port 4789.
+ */
+ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
+};
+
+struct udp_tunnel_nic;
+
+#define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2)
+
+struct udp_tunnel_nic_shared {
+ struct udp_tunnel_nic *udp_tunnel_nic_info;
+
+ struct list_head devices;
+};
+
+struct udp_tunnel_nic_shared_node {
+ struct net_device *dev;
+ struct list_head list;
+};
+
+/**
+ * struct udp_tunnel_nic_info - driver UDP tunnel offload information
+ * @set_port: callback for adding a new port
+ * @unset_port: callback for removing a port
+ * @sync_table: callback for syncing the entire port table at once
+ * @shared: reference to device global state (optional)
+ * @flags: device flags from enum udp_tunnel_nic_info_flags
+ * @tables: UDP port tables this device has
+ * @tables.n_entries: number of entries in this table
+ * @tables.tunnel_types: types of tunnels this table accepts
+ *
+ * Drivers are expected to provide either @set_port and @unset_port callbacks
+ * or the @sync_table callback. Callbacks are invoked with rtnl lock held.
+ *
+ * Devices which (misguidedly) share the UDP tunnel port table across multiple
+ * netdevs should allocate an instance of struct udp_tunnel_nic_shared and
+ * point @shared at it.
+ * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
+ * sharing a table.
+ *
+ * Known limitations:
+ * - UDP tunnel port notifications are fundamentally best-effort -
+ * it is likely the driver will both see skbs which use a UDP tunnel port,
+ * while not being a tunneled skb, and tunnel skbs from other ports -
+ * drivers should only use these ports for non-critical RX-side offloads,
+ * e.g. the checksum offload;
+ * - none of the devices care about the socket family at present, so we don't
+ * track it. Please extend this code if you care.
+ */
+struct udp_tunnel_nic_info {
+ /* one-by-one */
+ int (*set_port)(struct net_device *dev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti);
+ int (*unset_port)(struct net_device *dev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti);
+
+ /* all at once */
+ int (*sync_table)(struct net_device *dev, unsigned int table);
+
+ struct udp_tunnel_nic_shared *shared;
+
+ unsigned int flags;
+
+ struct udp_tunnel_nic_table_info {
+ unsigned int n_entries;
+ unsigned int tunnel_types;
+ } tables[UDP_TUNNEL_NIC_MAX_TABLES];
+};
+
+/* UDP tunnel module dependencies
+ *
+ * Tunnel drivers are expected to have a hard dependency on the udp_tunnel
+ * module. NIC drivers are not, they just attach their
+ * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come.
+ * Loading a tunnel driver will cause the udp_tunnel module to be loaded
+ * and only then will all the required state structures be allocated.
+ * Since we want a weak dependency from the drivers and the core to udp_tunnel
+ * we call things through the following stubs.
+ */
+struct udp_tunnel_nic_ops {
+ void (*get_port)(struct net_device *dev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
+ void (*set_port_priv)(struct net_device *dev, unsigned int table,
+ unsigned int idx, u8 priv);
+ void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti);
+ void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti);
+ void (*reset_ntf)(struct net_device *dev);
+
+ size_t (*dump_size)(struct net_device *dev, unsigned int table);
+ int (*dump_write)(struct net_device *dev, unsigned int table,
+ struct sk_buff *skb);
+};
+
+#ifdef CONFIG_INET
+extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
+#else
+#define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL)
+#endif
+
+static inline void
+udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ /* This helper is used from .sync_table, we indicate empty entries
+ * by zero'ed @ti. Drivers which need to know the details of a port
+ * when it gets deleted should use the .set_port / .unset_port
+ * callbacks.
+ * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings.
+ */
+ memset(ti, 0, sizeof(*ti));
+
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
+}
+
+static inline void
+udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
+ unsigned int idx, u8 priv)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
+}
+
+static inline void
+udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->add_port(dev, ti);
+}
+
+static inline void
+udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->del_port(dev, ti);
+}
+
+/**
+ * udp_tunnel_nic_reset_ntf() - device-originating reset notification
+ * @dev: network interface device structure
+ *
+ * Called by the driver to inform the core that the entire UDP tunnel port
+ * state has been lost, usually due to device reset. Core will assume device
+ * forgot all the ports and issue .set_port and .sync_table callbacks as
+ * necessary.
+ *
+ * This function must be called with rtnl lock held, and will issue all
+ * the callbacks before returning.
+ */
+static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->reset_ntf(dev);
+}
+
+static inline size_t
+udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
+{
+ if (!udp_tunnel_nic_ops)
+ return 0;
+ return udp_tunnel_nic_ops->dump_size(dev, table);
+}
+
+static inline int
+udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
+ struct sk_buff *skb)
+{
+ if (!udp_tunnel_nic_ops)
+ return 0;
+ return udp_tunnel_nic_ops->dump_write(dev, table, skb);
+}
#endif
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 9185e45b997f..299c14ce2bb9 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -6,6 +6,7 @@
#define _UDPLITE_H
#include <net/ip6_checksum.h>
+#include <net/udp.h>
/* UDP-Lite socket options */
#define UDPLITE_SEND_CSCOV 10 /* sender partial coverage (as sent) */
@@ -24,14 +25,6 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
}
-/* Designate sk as UDP-Lite socket */
-static inline int udplite_sk_init(struct sock *sk)
-{
- udp_init_sock(sk);
- udp_sk(sk)->pcflag = UDPLITE_BIT;
- return 0;
-}
-
/*
* Checksumming routines
*/
@@ -70,49 +63,6 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
return 0;
}
-/* Slow-path computation of checksum. Socket is locked. */
-static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
-{
- const struct udp_sock *up = udp_sk(skb->sk);
- int cscov = up->len;
- __wsum csum = 0;
-
- if (up->pcflag & UDPLITE_SEND_CC) {
- /*
- * Sender has set `partial coverage' option on UDP-Lite socket.
- * The special case "up->pcslen == 0" signifies full coverage.
- */
- if (up->pcslen < up->len) {
- if (0 < up->pcslen)
- cscov = up->pcslen;
- udp_hdr(skb)->len = htons(up->pcslen);
- }
- /*
- * NOTE: Causes for the error case `up->pcslen > up->len':
- * (i) Application error (will not be penalized).
- * (ii) Payload too big for send buffer: data is split
- * into several packets, each with its own header.
- * In this case (e.g. last segment), coverage may
- * exceed packet length.
- * Since packets with coverage length > packet length are
- * illegal, we fall back to the defaults here.
- */
- }
-
- skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
-
- skb_queue_walk(&sk->sk_write_queue, skb) {
- const int off = skb_transport_offset(skb);
- const int len = skb->len - off;
-
- csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum);
-
- if ((cscov -= len) <= 0)
- break;
- }
- return csum;
-}
-
/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 373aadcfea21..bca5b01af247 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -7,8 +7,10 @@
#include <net/dst_metadata.h>
#include <net/rtnetlink.h>
#include <net/switchdev.h>
+#include <net/nexthop.h>
#define IANA_VXLAN_UDP_PORT 4789
+#define IANA_VXLAN_GPE_UDP_PORT 4790
/* VXLAN protocol (RFC 7348) header:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -120,6 +122,9 @@ struct vxlanhdr_gbp {
#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
#define VXLAN_GBP_ID_MASK (0xFFFF)
+#define VXLAN_GBP_MASK (VXLAN_GBP_DONT_LEARN | VXLAN_GBP_POLICY_APPLIED | \
+ VXLAN_GBP_ID_MASK)
+
/*
* VXLAN Generic Protocol Extension (VXLAN_F_GPE):
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -222,11 +227,56 @@ struct vxlan_config {
enum ifla_vxlan_df df;
};
+enum {
+ VXLAN_VNI_STATS_RX,
+ VXLAN_VNI_STATS_RX_DROPS,
+ VXLAN_VNI_STATS_RX_ERRORS,
+ VXLAN_VNI_STATS_TX,
+ VXLAN_VNI_STATS_TX_DROPS,
+ VXLAN_VNI_STATS_TX_ERRORS,
+};
+
+struct vxlan_vni_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+ u64 tx_errors;
+};
+
+struct vxlan_vni_stats_pcpu {
+ struct vxlan_vni_stats stats;
+ struct u64_stats_sync syncp;
+};
+
struct vxlan_dev_node {
struct hlist_node hlist;
struct vxlan_dev *vxlan;
};
+struct vxlan_vni_node {
+ struct rhash_head vnode;
+ struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */
+#endif
+ struct list_head vlist;
+ __be32 vni;
+ union vxlan_addr remote_ip; /* default remote ip for this vni */
+ struct vxlan_vni_stats_pcpu __percpu *stats;
+
+ struct rcu_head rcu;
+};
+
+struct vxlan_vni_group {
+ struct rhashtable vni_hash;
+ struct list_head vni_list;
+ u32 num_vnis;
+};
+
/* Pseudo network device */
struct vxlan_dev {
struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
@@ -249,6 +299,8 @@ struct vxlan_dev {
struct vxlan_config cfg;
+ struct vxlan_vni_group __rcu *vnigrp;
+
struct hlist_head fdb_head[FDB_HASH_SIZE];
};
@@ -269,6 +321,7 @@ struct vxlan_dev {
#define VXLAN_F_GPE 0x4000
#define VXLAN_F_IPV6_LINKLOCAL 0x8000
#define VXLAN_F_TTL_INHERIT 0x10000
+#define VXLAN_F_VNIFILTER 0x20000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -278,7 +331,8 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_REMCSUM_RX | \
VXLAN_F_REMCSUM_NOPARTIAL | \
- VXLAN_F_COLLECT_METADATA)
+ VXLAN_F_COLLECT_METADATA | \
+ VXLAN_F_VNIFILTER)
/* Flags that can be set together with VXLAN_F_GPE. */
#define VXLAN_F_ALLOWED_GPE (VXLAN_F_GPE | \
@@ -287,7 +341,8 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM_TX | \
VXLAN_F_UDP_ZERO_CSUM6_TX | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
- VXLAN_F_COLLECT_METADATA)
+ VXLAN_F_COLLECT_METADATA | \
+ VXLAN_F_VNIFILTER)
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
@@ -487,4 +542,28 @@ static inline void vxlan_flag_attr_error(int attrtype,
#undef VXLAN_FLAG
}
+static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh,
+ int hash,
+ struct vxlan_rdst *rdst)
+{
+ struct fib_nh_common *nhc;
+
+ nhc = nexthop_path_fdb_result(nh, hash);
+ if (unlikely(!nhc))
+ return false;
+
+ switch (nhc->nhc_gw_family) {
+ case AF_INET:
+ rdst->remote_ip.sin.sin_addr.s_addr = nhc->nhc_gw.ipv4;
+ rdst->remote_ip.sa.sa_family = AF_INET;
+ break;
+ case AF_INET6:
+ rdst->remote_ip.sin6.sin6_addr = nhc->nhc_gw.ipv6;
+ rdst->remote_ip.sa.sa_family = AF_INET6;
+ break;
+ }
+
+ return true;
+}
+
#endif
diff --git a/include/net/wimax.h b/include/net/wimax.h
deleted file mode 100644
index 24ba7e89c26c..000000000000
--- a/include/net/wimax.h
+++ /dev/null
@@ -1,503 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Linux WiMAX
- * Kernel space API for accessing WiMAX devices
- *
- * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * The WiMAX stack provides an API for controlling and managing the
- * system's WiMAX devices. This API affects the control plane; the
- * data plane is accessed via the network stack (netdev).
- *
- * Parts of the WiMAX stack API and notifications are exported to
- * user space via Generic Netlink. In user space, libwimax (part of
- * the wimax-tools package) provides a shim layer for accessing those
- * calls.
- *
- * The API is standarized for all WiMAX devices and different drivers
- * implement the backend support for it. However, device-specific
- * messaging pipes are provided that can be used to issue commands and
- * receive notifications in free form.
- *
- * Currently the messaging pipes are the only means of control as it
- * is not known (due to the lack of more devices in the market) what
- * will be a good abstraction layer. Expect this to change as more
- * devices show in the market. This API is designed to be growable in
- * order to address this problem.
- *
- * USAGE
- *
- * Embed a `struct wimax_dev` at the beginning of the the device's
- * private structure, initialize and register it. For details, see
- * `struct wimax_dev`s documentation.
- *
- * Once this is done, wimax-tools's libwimaxll can be used to
- * communicate with the driver from user space. You user space
- * application does not have to forcibily use libwimaxll and can talk
- * the generic netlink protocol directly if desired.
- *
- * Remember this is a very low level API that will to provide all of
- * WiMAX features. Other daemons and services running in user space
- * are the expected clients of it. They offer a higher level API that
- * applications should use (an example of this is the Intel's WiMAX
- * Network Service for the i2400m).
- *
- * DESIGN
- *
- * Although not set on final stone, this very basic interface is
- * mostly completed. Remember this is meant to grow as new common
- * operations are decided upon. New operations will be added to the
- * interface, intent being on keeping backwards compatibility as much
- * as possible.
- *
- * This layer implements a set of calls to control a WiMAX device,
- * exposing a frontend to the rest of the kernel and user space (via
- * generic netlink) and a backend implementation in the driver through
- * function pointers.
- *
- * WiMAX devices have a state, and a kernel-only API allows the
- * drivers to manipulate that state. State transitions are atomic, and
- * only some of them are allowed (see `enum wimax_st`).
- *
- * Most API calls will set the state automatically; in most cases
- * drivers have to only report state changes due to external
- * conditions.
- *
- * All API operations are 'atomic', serialized through a mutex in the
- * `struct wimax_dev`.
- *
- * EXPORTING TO USER SPACE THROUGH GENERIC NETLINK
- *
- * The API is exported to user space using generic netlink (other
- * methods can be added as needed).
- *
- * There is a Generic Netlink Family named "WiMAX", where interfaces
- * supporting the WiMAX interface receive commands and broadcast their
- * signals over a multicast group named "msg".
- *
- * Mapping to the source/destination interface is done by an interface
- * index attribute.
- *
- * For user-to-kernel traffic (commands) we use a function call
- * marshalling mechanism, where a message X with attributes A, B, C
- * sent from user space to kernel space means executing the WiMAX API
- * call wimax_X(A, B, C), sending the results back as a message.
- *
- * Kernel-to-user (notifications or signals) communication is sent
- * over multicast groups. This allows to have multiple applications
- * monitoring them.
- *
- * Each command/signal gets assigned it's own attribute policy. This
- * way the validator will verify that all the attributes in there are
- * only the ones that should be for each command/signal. Thing of an
- * attribute mapping to a type+argumentname for each command/signal.
- *
- * If we had a single policy for *all* commands/signals, after running
- * the validator we'd have to check "does this attribute belong in
- * here"? for each one. It can be done manually, but it's just easier
- * to have the validator do that job with multiple policies. As well,
- * it makes it easier to later expand each command/signal signature
- * without affecting others and keeping the namespace more or less
- * sane. Not that it is too complicated, but it makes it even easier.
- *
- * No state information is maintained in the kernel for each user
- * space connection (the connection is stateless).
- *
- * TESTING FOR THE INTERFACE AND VERSIONING
- *
- * If network interface X is a WiMAX device, there will be a Generic
- * Netlink family named "WiMAX X" and the device will present a
- * "wimax" directory in it's network sysfs directory
- * (/sys/class/net/DEVICE/wimax) [used by HAL].
- *
- * The inexistence of any of these means the device does not support
- * this WiMAX API.
- *
- * By querying the generic netlink controller, versioning information
- * and the multicast groups available can be found. Applications using
- * the interface can either rely on that or use the generic netlink
- * controller to figure out which generic netlink commands/signals are
- * supported.
- *
- * NOTE: this versioning is a last resort to avoid hard
- * incompatibilities. It is the intention of the design of this
- * stack not to introduce backward incompatible changes.
- *
- * The version code has to fit in one byte (restrictions imposed by
- * generic netlink); we use `version / 10` for the major version and
- * `version % 10` for the minor. This gives 9 minors for each major
- * and 25 majors.
- *
- * The version change protocol is as follow:
- *
- * - Major versions: needs to be increased if an existing message/API
- * call is changed or removed. Doesn't need to be changed if a new
- * message is added.
- *
- * - Minor version: needs to be increased if new messages/API calls are
- * being added or some other consideration that doesn't impact the
- * user-kernel interface too much (like some kind of bug fix) and
- * that is kind of left up in the air to common sense.
- *
- * User space code should not try to work if the major version it was
- * compiled for differs from what the kernel offers. As well, if the
- * minor version of the kernel interface is lower than the one user
- * space is expecting (the one it was compiled for), the kernel
- * might be missing API calls; user space shall be ready to handle
- * said condition. Use the generic netlink controller operations to
- * find which ones are supported and which not.
- *
- * libwimaxll:wimaxll_open() takes care of checking versions.
- *
- * THE OPERATIONS:
- *
- * Each operation is defined in its on file (drivers/net/wimax/op-*.c)
- * for clarity. The parts needed for an operation are:
- *
- * - a function pointer in `struct wimax_dev`: optional, as the
- * operation might be implemented by the stack and not by the
- * driver.
- *
- * All function pointers are named wimax_dev->op_*(), and drivers
- * must implement them except where noted otherwise.
- *
- * - When exported to user space, a `struct nla_policy` to define the
- * attributes of the generic netlink command and a `struct genl_ops`
- * to define the operation.
- *
- * All the declarations for the operation codes (WIMAX_GNL_OP_<NAME>)
- * and generic netlink attributes (WIMAX_GNL_<NAME>_*) are declared in
- * include/linux/wimax.h; this file is intended to be cloned by user
- * space to gain access to those declarations.
- *
- * A few caveats to remember:
- *
- * - Need to define attribute numbers starting in 1; otherwise it
- * fails.
- *
- * - the `struct genl_family` requires a maximum attribute id; when
- * defining the `struct nla_policy` for each message, it has to have
- * an array size of WIMAX_GNL_ATTR_MAX+1.
- *
- * The op_*() function pointers will not be called if the wimax_dev is
- * in a state <= %WIMAX_ST_UNINITIALIZED. The exception is:
- *
- * - op_reset: can be called at any time after wimax_dev_add() has
- * been called.
- *
- * THE PIPE INTERFACE:
- *
- * This interface is kept intentionally simple. The driver can send
- * and receive free-form messages to/from user space through a
- * pipe. See drivers/net/wimax/op-msg.c for details.
- *
- * The kernel-to-user messages are sent with
- * wimax_msg(). user-to-kernel messages are delivered via
- * wimax_dev->op_msg_from_user().
- *
- * RFKILL:
- *
- * RFKILL support is built into the wimax_dev layer; the driver just
- * needs to call wimax_report_rfkill_{hw,sw}() to inform of changes in
- * the hardware or software RF kill switches. When the stack wants to
- * turn the radio off, it will call wimax_dev->op_rfkill_sw_toggle(),
- * which the driver implements.
- *
- * User space can set the software RF Kill switch by calling
- * wimax_rfkill().
- *
- * The code for now only supports devices that don't require polling;
- * If the device needs to be polled, create a self-rearming delayed
- * work struct for polling or look into adding polled support to the
- * WiMAX stack.
- *
- * When initializing the hardware (_probe), after calling
- * wimax_dev_add(), query the device for it's RF Kill switches status
- * and feed it back to the WiMAX stack using
- * wimax_report_rfkill_{hw,sw}(). If any switch is missing, always
- * report it as ON.
- *
- * NOTE: the wimax stack uses an inverted terminology to that of the
- * RFKILL subsystem:
- *
- * - ON: radio is ON, RFKILL is DISABLED or OFF.
- * - OFF: radio is OFF, RFKILL is ENABLED or ON.
- *
- * MISCELLANEOUS OPS:
- *
- * wimax_reset() can be used to reset the device to power on state; by
- * default it issues a warm reset that maintains the same device
- * node. If that is not possible, it falls back to a cold reset
- * (device reconnect). The driver implements the backend to this
- * through wimax_dev->op_reset().
- */
-
-#ifndef __NET__WIMAX_H__
-#define __NET__WIMAX_H__
-
-#include <linux/wimax.h>
-#include <net/genetlink.h>
-#include <linux/netdevice.h>
-
-struct net_device;
-struct genl_info;
-struct wimax_dev;
-
-/**
- * struct wimax_dev - Generic WiMAX device
- *
- * @net_dev: [fill] Pointer to the &struct net_device this WiMAX
- * device implements.
- *
- * @op_msg_from_user: [fill] Driver-specific operation to
- * handle a raw message from user space to the driver. The
- * driver can send messages to user space using with
- * wimax_msg_to_user().
- *
- * @op_rfkill_sw_toggle: [fill] Driver-specific operation to act on
- * userspace (or any other agent) requesting the WiMAX device to
- * change the RF Kill software switch (WIMAX_RF_ON or
- * WIMAX_RF_OFF).
- * If such hardware support is not present, it is assumed the
- * radio cannot be switched off and it is always on (and the stack
- * will error out when trying to switch it off). In such case,
- * this function pointer can be left as NULL.
- *
- * @op_reset: [fill] Driver specific operation to reset the
- * device.
- * This operation should always attempt first a warm reset that
- * does not disconnect the device from the bus and return 0.
- * If that fails, it should resort to some sort of cold or bus
- * reset (even if it implies a bus disconnection and device
- * disappearance). In that case, -ENODEV should be returned to
- * indicate the device is gone.
- * This operation has to be synchronous, and return only when the
- * reset is complete. In case of having had to resort to bus/cold
- * reset implying a device disconnection, the call is allowed to
- * return immediately.
- * NOTE: wimax_dev->mutex is NOT locked when this op is being
- * called; however, wimax_dev->mutex_reset IS locked to ensure
- * serialization of calls to wimax_reset().
- * See wimax_reset()'s documentation.
- *
- * @name: [fill] A way to identify this device. We need to register a
- * name with many subsystems (rfkill, workqueue creation, etc).
- * We can't use the network device name as that
- * might change and in some instances we don't know it yet (until
- * we don't call register_netdev()). So we generate an unique one
- * using the driver name and device bus id, place it here and use
- * it across the board. Recommended naming:
- * DRIVERNAME-BUSNAME:BUSID (dev->bus->name, dev->bus_id).
- *
- * @id_table_node: [private] link to the list of wimax devices kept by
- * id-table.c. Protected by it's own spinlock.
- *
- * @mutex: [private] Serializes all concurrent access and execution of
- * operations.
- *
- * @mutex_reset: [private] Serializes reset operations. Needs to be a
- * different mutex because as part of the reset operation, the
- * driver has to call back into the stack to do things such as
- * state change, that require wimax_dev->mutex.
- *
- * @state: [private] Current state of the WiMAX device.
- *
- * @rfkill: [private] integration into the RF-Kill infrastructure.
- *
- * @rf_sw: [private] State of the software radio switch (OFF/ON)
- *
- * @rf_hw: [private] State of the hardware radio switch (OFF/ON)
- *
- * @debugfs_dentry: [private] Used to hook up a debugfs entry. This
- * shows up in the debugfs root as wimax\:DEVICENAME.
- *
- * Description:
- * This structure defines a common interface to access all WiMAX
- * devices from different vendors and provides a common API as well as
- * a free-form device-specific messaging channel.
- *
- * Usage:
- * 1. Embed a &struct wimax_dev at *the beginning* the network
- * device structure so that netdev_priv() points to it.
- *
- * 2. memset() it to zero
- *
- * 3. Initialize with wimax_dev_init(). This will leave the WiMAX
- * device in the %__WIMAX_ST_NULL state.
- *
- * 4. Fill all the fields marked with [fill]; once called
- * wimax_dev_add(), those fields CANNOT be modified.
- *
- * 5. Call wimax_dev_add() *after* registering the network
- * device. This will leave the WiMAX device in the %WIMAX_ST_DOWN
- * state.
- * Protect the driver's net_device->open() against succeeding if
- * the wimax device state is lower than %WIMAX_ST_DOWN.
- *
- * 6. Select when the device is going to be turned on/initialized;
- * for example, it could be initialized on 'ifconfig up' (when the
- * netdev op 'open()' is called on the driver).
- *
- * When the device is initialized (at `ifconfig up` time, or right
- * after calling wimax_dev_add() from _probe(), make sure the
- * following steps are taken
- *
- * a. Move the device to %WIMAX_ST_UNINITIALIZED. This is needed so
- * some API calls that shouldn't work until the device is ready
- * can be blocked.
- *
- * b. Initialize the device. Make sure to turn the SW radio switch
- * off and move the device to state %WIMAX_ST_RADIO_OFF when
- * done. When just initialized, a device should be left in RADIO
- * OFF state until user space devices to turn it on.
- *
- * c. Query the device for the state of the hardware rfkill switch
- * and call wimax_rfkill_report_hw() and wimax_rfkill_report_sw()
- * as needed. See below.
- *
- * wimax_dev_rm() undoes before unregistering the network device. Once
- * wimax_dev_add() is called, the driver can get called on the
- * wimax_dev->op_* function pointers
- *
- * CONCURRENCY:
- *
- * The stack provides a mutex for each device that will disallow API
- * calls happening concurrently; thus, op calls into the driver
- * through the wimax_dev->op*() function pointers will always be
- * serialized and *never* concurrent.
- *
- * For locking, take wimax_dev->mutex is taken; (most) operations in
- * the API have to check for wimax_dev_is_ready() to return 0 before
- * continuing (this is done internally).
- *
- * REFERENCE COUNTING:
- *
- * The WiMAX device is reference counted by the associated network
- * device. The only operation that can be used to reference the device
- * is wimax_dev_get_by_genl_info(), and the reference it acquires has
- * to be released with dev_put(wimax_dev->net_dev).
- *
- * RFKILL:
- *
- * At startup, both HW and SW radio switchess are assumed to be off.
- *
- * At initialization time [after calling wimax_dev_add()], have the
- * driver query the device for the status of the software and hardware
- * RF kill switches and call wimax_report_rfkill_hw() and
- * wimax_rfkill_report_sw() to indicate their state. If any is
- * missing, just call it to indicate it is ON (radio always on).
- *
- * Whenever the driver detects a change in the state of the RF kill
- * switches, it should call wimax_report_rfkill_hw() or
- * wimax_report_rfkill_sw() to report it to the stack.
- */
-struct wimax_dev {
- struct net_device *net_dev;
- struct list_head id_table_node;
- struct mutex mutex; /* Protects all members and API calls */
- struct mutex mutex_reset;
- enum wimax_st state;
-
- int (*op_msg_from_user)(struct wimax_dev *wimax_dev,
- const char *,
- const void *, size_t,
- const struct genl_info *info);
- int (*op_rfkill_sw_toggle)(struct wimax_dev *wimax_dev,
- enum wimax_rf_state);
- int (*op_reset)(struct wimax_dev *wimax_dev);
-
- struct rfkill *rfkill;
- unsigned int rf_hw;
- unsigned int rf_sw;
- char name[32];
-
- struct dentry *debugfs_dentry;
-};
-
-
-
-/*
- * WiMAX stack public API for device drivers
- * -----------------------------------------
- *
- * These functions are not exported to user space.
- */
-void wimax_dev_init(struct wimax_dev *);
-int wimax_dev_add(struct wimax_dev *, struct net_device *);
-void wimax_dev_rm(struct wimax_dev *);
-
-static inline
-struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
-{
- return netdev_priv(net_dev);
-}
-
-static inline
-struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev)
-{
- return wimax_dev->net_dev->dev.parent;
-}
-
-void wimax_state_change(struct wimax_dev *, enum wimax_st);
-enum wimax_st wimax_state_get(struct wimax_dev *);
-
-/*
- * Radio Switch state reporting.
- *
- * enum wimax_rf_state is declared in linux/wimax.h so the exports
- * to user space can use it.
- */
-void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
-void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
-
-
-/*
- * Free-form messaging to/from user space
- *
- * Sending a message:
- *
- * wimax_msg(wimax_dev, pipe_name, buf, buf_size, GFP_KERNEL);
- *
- * Broken up:
- *
- * skb = wimax_msg_alloc(wimax_dev, pipe_name, buf_size, GFP_KERNEL);
- * ...fill up skb...
- * wimax_msg_send(wimax_dev, pipe_name, skb);
- *
- * Be sure not to modify skb->data in the middle (ie: don't use
- * skb_push()/skb_pull()/skb_reserve() on the skb).
- *
- * "pipe_name" is any string, that can be interpreted as the name of
- * the pipe or recipient; the interpretation of it is driver
- * specific, so the recipient can multiplex it as wished. It can be
- * NULL, it won't be used - an example is using a "diagnostics" tag to
- * send diagnostics information that a device-specific diagnostics
- * tool would be interested in.
- */
-struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *,
- size_t, gfp_t);
-int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
-int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t);
-
-const void *wimax_msg_data_len(struct sk_buff *, size_t *);
-const void *wimax_msg_data(struct sk_buff *);
-ssize_t wimax_msg_len(struct sk_buff *);
-
-
-/*
- * WiMAX stack user space API
- * --------------------------
- *
- * This API is what gets exported to user space for general
- * operations. As well, they can be called from within the kernel,
- * (with a properly referenced `struct wimax_dev`).
- *
- * Properly referenced means: the 'struct net_device' that embeds the
- * device's control structure and (as such) the 'struct wimax_dev' is
- * referenced by the caller.
- */
-int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
-int wimax_reset(struct wimax_dev *);
-
-#endif /* #ifndef __NET__WIMAX_H__ */
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 40c6d3398458..55dbc68bfffc 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -6,6 +6,8 @@
#ifndef __LINUX_NET_XDP_H__
#define __LINUX_NET_XDP_H__
+#include <linux/skbuff.h> /* skb_shared_info */
+
/**
* DOC: XDP RX-queue information
*
@@ -13,13 +15,13 @@
* level RX-ring queues. It is information that is specific to how
* the driver have configured a given RX-ring queue.
*
- * Each xdp_buff frame received in the driver carry a (pointer)
+ * Each xdp_buff frame received in the driver carries a (pointer)
* reference to this xdp_rxq_info structure. This provides the XDP
* data-path read-access to RX-info for both kernel and bpf-side
* (limited subset).
*
* For now, direct access is only safe while running in NAPI/softirq
- * context. Contents is read-mostly and must not be updated during
+ * context. Contents are read-mostly and must not be updated during
* driver NAPI/softirq poll.
*
* The driver usage API is a register and unregister API.
@@ -28,8 +30,8 @@
* can be attached as long as it doesn't change the underlying
* RX-ring. If the RX-ring does change significantly, the NIC driver
* naturally need to stop the RX-ring before purging and reallocating
- * memory. In that process the driver MUST call unregistor (which
- * also apply for driver shutdown and unload). The register API is
+ * memory. In that process the driver MUST call unregister (which
+ * also applies for driver shutdown and unload). The register API is
* also mandatory during RX-ring setup.
*/
@@ -37,7 +39,7 @@ enum xdp_mem_type {
MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
MEM_TYPE_PAGE_POOL,
- MEM_TYPE_ZERO_COPY,
+ MEM_TYPE_XSK_BUFF_POOL,
MEM_TYPE_MAX,
};
@@ -52,36 +54,162 @@ struct xdp_mem_info {
struct page_pool;
-struct zero_copy_allocator {
- void (*free)(struct zero_copy_allocator *zca, unsigned long handle);
-};
-
struct xdp_rxq_info {
struct net_device *dev;
u32 queue_index;
u32 reg_state;
struct xdp_mem_info mem;
+ unsigned int napi_id;
+ u32 frag_size;
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
+struct xdp_txq_info {
+ struct net_device *dev;
+};
+
+enum xdp_buff_flags {
+ XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */
+ XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under
+ * pressure
+ */
+};
+
struct xdp_buff {
void *data;
void *data_end;
void *data_meta;
void *data_hard_start;
- unsigned long handle;
struct xdp_rxq_info *rxq;
+ struct xdp_txq_info *txq;
+ u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
+ u32 flags; /* supported values defined in xdp_buff_flags */
};
+static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
+{
+ return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
+}
+
+static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
+{
+ xdp->flags |= XDP_FLAGS_HAS_FRAGS;
+}
+
+static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
+{
+ xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
+}
+
+static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp)
+{
+ return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+}
+
+static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
+{
+ xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
+}
+
+static __always_inline void
+xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
+{
+ xdp->frame_sz = frame_sz;
+ xdp->rxq = rxq;
+ xdp->flags = 0;
+}
+
+static __always_inline void
+xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
+ int headroom, int data_len, const bool meta_valid)
+{
+ unsigned char *data = hard_start + headroom;
+
+ xdp->data_hard_start = hard_start;
+ xdp->data = data;
+ xdp->data_end = data + data_len;
+ xdp->data_meta = meta_valid ? data : data + 1;
+}
+
+/* Reserve memory area at end-of data area.
+ *
+ * This macro reserves tailroom in the XDP buffer by limiting the
+ * XDP/BPF data access to data_hard_end. Notice same area (and size)
+ * is used for XDP_PASS, when constructing the SKB via build_skb().
+ */
+#define xdp_data_hard_end(xdp) \
+ ((xdp)->data_hard_start + (xdp)->frame_sz - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+static inline struct skb_shared_info *
+xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
+{
+ return (struct skb_shared_info *)xdp_data_hard_end(xdp);
+}
+
+static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp)
+{
+ unsigned int len = xdp->data_end - xdp->data;
+ struct skb_shared_info *sinfo;
+
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ len += sinfo->xdp_frags_size;
+out:
+ return len;
+}
+
struct xdp_frame {
void *data;
u16 len;
u16 headroom;
- u16 metasize;
+ u32 metasize; /* uses lower 8-bits */
/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
* while mem info is valid on remote CPU.
*/
struct xdp_mem_info mem;
struct net_device *dev_rx; /* used by cpumap */
+ u32 frame_sz;
+ u32 flags; /* supported values defined in xdp_buff_flags */
+};
+
+static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
+{
+ return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
+}
+
+static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame)
+{
+ return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+}
+
+#define XDP_BULK_QUEUE_SIZE 16
+struct xdp_frame_bulk {
+ int count;
+ void *xa;
+ void *q[XDP_BULK_QUEUE_SIZE];
+};
+
+static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
+{
+ /* bq->count will be zero'ed when bq->xa gets updated */
+ bq->xa = NULL;
+}
+
+static inline struct skb_shared_info *
+xdp_get_shared_info_from_frame(struct xdp_frame *frame)
+{
+ void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
+
+ return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+}
+
+struct xdp_cpumap_stats {
+ unsigned int redirect;
+ unsigned int pass;
+ unsigned int drop;
};
/* Clear kernel pointers in xdp_frame */
@@ -91,33 +219,85 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame)
frame->dev_rx = NULL;
}
+static inline void
+xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
+ unsigned int size, unsigned int truesize,
+ bool pfmemalloc)
+{
+ skb_shinfo(skb)->nr_frags = nr_frags;
+
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += truesize;
+ skb->pfmemalloc |= pfmemalloc;
+}
+
+/* Avoids inlining WARN macro in fast-path */
+void xdp_warn(const char *msg, const char *func, const int line);
+#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
+
struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
+struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
+ struct sk_buff *skb,
+ struct net_device *dev);
+struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
+ struct net_device *dev);
+int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
+struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
-/* Convert xdp_buff to xdp_frame */
static inline
-struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
+void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
{
- struct xdp_frame *xdp_frame;
- int metasize;
- int headroom;
+ xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
+ xdp->data = frame->data;
+ xdp->data_end = frame->data + frame->len;
+ xdp->data_meta = frame->data - frame->metasize;
+ xdp->frame_sz = frame->frame_sz;
+ xdp->flags = frame->flags;
+}
- if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY)
- return xdp_convert_zc_to_xdp_frame(xdp);
+static inline
+int xdp_update_frame_from_buff(struct xdp_buff *xdp,
+ struct xdp_frame *xdp_frame)
+{
+ int metasize, headroom;
/* Assure headroom is available for storing info */
headroom = xdp->data - xdp->data_hard_start;
metasize = xdp->data - xdp->data_meta;
metasize = metasize > 0 ? metasize : 0;
if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
- return NULL;
+ return -ENOSPC;
- /* Store info in top of packet */
- xdp_frame = xdp->data_hard_start;
+ /* Catch if driver didn't reserve tailroom for skb_shared_info */
+ if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
+ XDP_WARN("Driver BUG: missing reserved tailroom");
+ return -ENOSPC;
+ }
xdp_frame->data = xdp->data;
xdp_frame->len = xdp->data_end - xdp->data;
xdp_frame->headroom = headroom - sizeof(*xdp_frame);
xdp_frame->metasize = metasize;
+ xdp_frame->frame_sz = xdp->frame_sz;
+ xdp_frame->flags = xdp->flags;
+
+ return 0;
+}
+
+/* Convert xdp_buff to xdp_frame */
+static inline
+struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdp_frame;
+
+ if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+ return xdp_convert_zc_to_xdp_frame(xdp);
+
+ /* Store info in top of packet */
+ xdp_frame = xdp->data_hard_start;
+ if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
+ return NULL;
/* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
xdp_frame->mem = xdp->rxq->mem;
@@ -125,9 +305,14 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
return xdp_frame;
}
+void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ struct xdp_buff *xdp);
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
+void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
+void xdp_return_frame_bulk(struct xdp_frame *xdpf,
+ struct xdp_frame_bulk *bq);
/* When sending xdp_frame into the network stack, then there is no
* return point callback, which is needed to release e.g. DMA-mapping
@@ -138,20 +323,60 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
static inline void xdp_release_frame(struct xdp_frame *xdpf)
{
struct xdp_mem_info *mem = &xdpf->mem;
+ struct skb_shared_info *sinfo;
+ int i;
/* Curr only page_pool needs this */
- if (mem->type == MEM_TYPE_PAGE_POOL)
- __xdp_release_frame(xdpf->data, mem);
+ if (mem->type != MEM_TYPE_PAGE_POOL)
+ return;
+
+ if (likely(!xdp_frame_has_frags(xdpf)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&sinfo->frags[i]);
+
+ __xdp_release_frame(page_address(page), mem);
+ }
+out:
+ __xdp_release_frame(xdpf->data, mem);
+}
+
+static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
+{
+ struct skb_shared_info *sinfo;
+ unsigned int len = xdpf->len;
+
+ if (likely(!xdp_frame_has_frags(xdpf)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ len += sinfo->xdp_frags_size;
+out:
+ return len;
+}
+
+int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index,
+ unsigned int napi_id, u32 frag_size);
+static inline int
+xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index,
+ unsigned int napi_id)
+{
+ return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0);
}
-int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
- struct net_device *dev, u32 queue_index);
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
enum xdp_mem_type type, void *allocator);
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
+int xdp_reg_mem_model(struct xdp_mem_info *mem,
+ enum xdp_mem_type type, void *allocator);
+void xdp_unreg_mem_model(struct xdp_mem_info *mem);
/* Drivers not supporting XDP metadata can use this helper, which
* rejects any room expansion for metadata as a result.
@@ -168,17 +393,20 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
return unlikely(xdp->data_meta > xdp->data);
}
+static inline bool xdp_metalen_invalid(unsigned long metalen)
+{
+ return (metalen & (sizeof(__u32) - 1)) || (metalen > 32);
+}
+
struct xdp_attachment_info {
struct bpf_prog *prog;
u32 flags;
};
struct netdev_bpf;
-int xdp_attachment_query(struct xdp_attachment_info *info,
- struct netdev_bpf *bpf);
-bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
- struct netdev_bpf *bpf);
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
+#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
+
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h
index a9d5b7603b89..c9df68d5f258 100644
--- a/include/net/xdp_priv.h
+++ b/include/net/xdp_priv.h
@@ -3,6 +3,7 @@
#define __LINUX_NET_XDP_PRIV_H__
#include <linux/rhashtable.h>
+#include <net/xdp.h>
/* Private to net/core/xdp.c, but used by trace/events/xdp.h */
struct xdp_mem_allocator {
@@ -10,7 +11,6 @@ struct xdp_mem_allocator {
union {
void *allocator;
struct page_pool *page_pool;
- struct zero_copy_allocator *zc_alloc;
};
struct rhash_head node;
struct rcu_head rcu;
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index e86ec48ef627..3057e1a4a11c 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -6,6 +6,7 @@
#ifndef _LINUX_XDP_SOCK_H
#define _LINUX_XDP_SOCK_H
+#include <linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/if_xdp.h>
#include <linux/mutex.h>
@@ -15,80 +16,39 @@
struct net_device;
struct xsk_queue;
-
-/* Masks for xdp_umem_page flags.
- * The low 12-bits of the addr will be 0 since this is the page address, so we
- * can use them for flags.
- */
-#define XSK_NEXT_PG_CONTIG_SHIFT 0
-#define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
-
-struct xdp_umem_page {
- void *addr;
- dma_addr_t dma;
-};
-
-struct xdp_umem_fq_reuse {
- u32 nentries;
- u32 length;
- u64 handles[];
-};
-
-/* Flags for the umem flags field.
- *
- * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
- * flags. See inlude/uapi/include/linux/if_xdp.h.
- */
-#define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
+struct xdp_buff;
struct xdp_umem {
- struct xsk_queue *fq;
- struct xsk_queue *cq;
- struct xdp_umem_page *pages;
- u64 chunk_mask;
+ void *addrs;
u64 size;
u32 headroom;
- u32 chunk_size_nohr;
+ u32 chunk_size;
+ u32 chunks;
+ u32 npgs;
struct user_struct *user;
- unsigned long address;
refcount_t users;
- struct work_struct work;
- struct page **pgs;
- u32 npgs;
- u16 queue_id;
- u8 need_wakeup;
u8 flags;
- int id;
- struct net_device *dev;
- struct xdp_umem_fq_reuse *fq_reuse;
bool zc;
- spinlock_t xsk_list_lock;
- struct list_head xsk_list;
+ struct page **pgs;
+ int id;
+ struct list_head xsk_dma_list;
+ struct work_struct work;
};
-/* Nodes are linked in the struct xdp_sock map_list field, and used to
- * track which maps a certain socket reside in.
- */
-
struct xsk_map {
struct bpf_map map;
spinlock_t lock; /* Synchronize map updates */
- struct xdp_sock *xsk_map[];
-};
-
-struct xsk_map_node {
- struct list_head node;
- struct xsk_map *map;
- struct xdp_sock **map_entry;
+ struct xdp_sock __rcu *xsk_map[];
};
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk;
- struct xsk_queue *rx;
+ struct xsk_queue *rx ____cacheline_aligned_in_smp;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
+ struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
enum {
@@ -96,277 +56,38 @@ struct xdp_sock {
XSK_BOUND,
XSK_UNBOUND,
} state;
- /* Protects multiple processes in the control path */
- struct mutex mutex;
+
struct xsk_queue *tx ____cacheline_aligned_in_smp;
- struct list_head list;
- /* Mutual exclusion of NAPI TX thread and sendmsg error paths
- * in the SKB destructor callback.
- */
- spinlock_t tx_completion_lock;
+ struct list_head tx_list;
/* Protects generic receive. */
spinlock_t rx_lock;
+
+ /* Statistics */
u64 rx_dropped;
+ u64 rx_queue_full;
+
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
+ /* Protects multiple processes in the control path */
+ struct mutex mutex;
+ struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
+ struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};
-struct xdp_buff;
#ifdef CONFIG_XDP_SOCKETS
-int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
-bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
-/* Used from netdev driver */
-bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
-bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
-void xsk_umem_release_addr(struct xdp_umem *umem);
-void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
-bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
-void xsk_umem_consume_tx_done(struct xdp_umem *umem);
-struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
-struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
- struct xdp_umem_fq_reuse *newq);
-void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
-struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
-void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
-void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
-void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
-void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
-bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
-void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
- struct xdp_sock **map_entry);
-int xsk_map_inc(struct xsk_map *map);
-void xsk_map_put(struct xsk_map *map);
+int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(void);
-static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
- u32 key)
-{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct xdp_sock *xs;
-
- if (key >= map->max_entries)
- return NULL;
-
- xs = READ_ONCE(m->xsk_map[key]);
- return xs;
-}
-
-static inline u64 xsk_umem_extract_addr(u64 addr)
-{
- return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
-}
-
-static inline u64 xsk_umem_extract_offset(u64 addr)
-{
- return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
-}
-
-static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
-{
- return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
-}
-
-static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
-{
- unsigned long page_addr;
-
- addr = xsk_umem_add_offset_to_addr(addr);
- page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
-
- return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
-}
-
-static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
-{
- addr = xsk_umem_add_offset_to_addr(addr);
-
- return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
-}
-
-/* Reuse-queue aware version of FILL queue helpers */
-static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
-{
- struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
-
- if (rq->length >= cnt)
- return true;
-
- return xsk_umem_has_addrs(umem, cnt - rq->length);
-}
-
-static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
-{
- struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
-
- if (!rq->length)
- return xsk_umem_peek_addr(umem, addr);
-
- *addr = rq->handles[rq->length - 1];
- return addr;
-}
-
-static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
-{
- struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
-
- if (!rq->length)
- xsk_umem_release_addr(umem);
- else
- rq->length--;
-}
-
-static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
-{
- struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
-
- rq->handles[rq->length++] = addr;
-}
-
-/* Handle the offset appropriately depending on aligned or unaligned mode.
- * For unaligned mode, we store the offset in the upper 16-bits of the address.
- * For aligned mode, we simply add the offset to the address.
- */
-static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
- u64 offset)
-{
- if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
- return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
- else
- return address + offset;
-}
#else
+
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -ENOTSUPP;
}
-static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
-{
- return false;
-}
-
-static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
-{
- return false;
-}
-
-static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
-{
- return NULL;
-}
-
-static inline void xsk_umem_release_addr(struct xdp_umem *umem)
-{
-}
-
-static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
-{
-}
-
-static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
- struct xdp_desc *desc)
-{
- return false;
-}
-
-static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
-{
-}
-
-static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
-{
- return NULL;
-}
-
-static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
- struct xdp_umem *umem,
- struct xdp_umem_fq_reuse *newq)
-{
- return NULL;
-}
-static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
-{
-}
-
-static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
- u16 queue_id)
-{
- return NULL;
-}
-
-static inline u64 xsk_umem_extract_addr(u64 addr)
-{
- return 0;
-}
-
-static inline u64 xsk_umem_extract_offset(u64 addr)
-{
- return 0;
-}
-
-static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
-{
- return 0;
-}
-
-static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
-{
- return NULL;
-}
-
-static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
-{
- return 0;
-}
-
-static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
-{
- return false;
-}
-
-static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
-{
- return NULL;
-}
-
-static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
-{
-}
-
-static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
-{
-}
-
-static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
-{
-}
-
-static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
-{
-}
-
-static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
-{
-}
-
-static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
-{
-}
-
-static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
-{
- return false;
-}
-
-static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
- u64 offset)
-{
- return 0;
-}
-
static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -EOPNOTSUPP;
@@ -376,11 +97,6 @@ static inline void __xsk_map_flush(void)
{
}
-static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
- u32 key)
-{
- return NULL;
-}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
new file mode 100644
index 000000000000..9c0d860609ba
--- /dev/null
+++ b/include/net/xdp_sock_drv.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Interface for implementing AF_XDP zero-copy support in drivers.
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#ifndef _LINUX_XDP_SOCK_DRV_H
+#define _LINUX_XDP_SOCK_DRV_H
+
+#include <net/xdp_sock.h>
+#include <net/xsk_buff_pool.h>
+
+#define XDP_UMEM_MIN_CHUNK_SHIFT 11
+#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
+
+#ifdef CONFIG_XDP_SOCKETS
+
+void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
+bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
+u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
+void xsk_tx_release(struct xsk_buff_pool *pool);
+struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+ u16 queue_id);
+void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
+bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
+
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
+{
+ return XDP_PACKET_HEADROOM + pool->headroom;
+}
+
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
+{
+ return pool->chunk_size;
+}
+
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+{
+ return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
+}
+
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+{
+ xp_set_rxq_info(pool, rxq);
+}
+
+static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ return pool->heads[0].xdp.rxq->napi_id;
+#else
+ return 0;
+#endif
+}
+
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
+ unsigned long attrs)
+{
+ xp_dma_unmap(pool, attrs);
+}
+
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
+{
+ struct xdp_umem *umem = pool->umem;
+
+ return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
+}
+
+static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
+{
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ return xp_get_dma(xskb);
+}
+
+static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
+{
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ return xp_get_frame_dma(xskb);
+}
+
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
+{
+ return xp_alloc(pool);
+}
+
+/* Returns as many entries as possible up to max. 0 <= N <= max. */
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return xp_alloc_batch(pool, xdp, max);
+}
+
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
+{
+ return xp_can_alloc(pool, count);
+}
+
+static inline void xsk_buff_free(struct xdp_buff *xdp)
+{
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ xp_free(xskb);
+}
+
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+ xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+ xdp->data_meta = xdp->data;
+ xdp->data_end = xdp->data + size;
+}
+
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ return xp_raw_get_dma(pool, addr);
+}
+
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_raw_get_data(pool, addr);
+}
+
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+{
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ if (!pool->dma_need_sync)
+ return;
+
+ xp_dma_sync_for_cpu(xskb);
+}
+
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
+ dma_addr_t dma,
+ size_t size)
+{
+ xp_dma_sync_for_device(pool, dma, size);
+}
+
+#else
+
+static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
+{
+}
+
+static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
+ struct xdp_desc *desc)
+{
+ return false;
+}
+
+static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
+{
+ return 0;
+}
+
+static inline void xsk_tx_release(struct xsk_buff_pool *pool)
+{
+}
+
+static inline struct xsk_buff_pool *
+xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
+{
+ return NULL;
+}
+
+static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
+{
+}
+
+static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
+{
+}
+
+static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
+{
+}
+
+static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
+{
+}
+
+static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
+{
+ return false;
+}
+
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+{
+}
+
+static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
+ unsigned long attrs)
+{
+}
+
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
+{
+ return 0;
+}
+
+static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
+{
+ return 0;
+}
+
+static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
+{
+ return 0;
+}
+
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
+{
+ return NULL;
+}
+
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return 0;
+}
+
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
+{
+ return false;
+}
+
+static inline void xsk_buff_free(struct xdp_buff *xdp)
+{
+}
+
+static inline void xsk_buff_discard(struct xdp_buff *xdp)
+{
+}
+
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+}
+
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ return 0;
+}
+
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
+{
+ return NULL;
+}
+
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+{
+}
+
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
+ dma_addr_t dma,
+ size_t size)
+{
+}
+
+#endif /* CONFIG_XDP_SOCKETS */
+
+#endif /* _LINUX_XDP_SOCK_DRV_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 8f71c111e65a..dbc81f5eb553 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -15,6 +15,7 @@
#include <linux/audit.h>
#include <linux/slab.h>
#include <linux/refcount.h>
+#include <linux/sockptr.h>
#include <net/sock.h>
#include <net/dst.h>
@@ -125,11 +126,17 @@ struct xfrm_state_walk {
struct xfrm_address_filter *filter;
};
-struct xfrm_state_offload {
+enum {
+ XFRM_DEV_OFFLOAD_IN = 1,
+ XFRM_DEV_OFFLOAD_OUT,
+};
+
+struct xfrm_dev_offload {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ struct net_device *real_dev;
unsigned long offload_handle;
- unsigned int num_exthdrs;
- u8 flags;
+ u8 dir : 2;
};
struct xfrm_mode {
@@ -143,6 +150,12 @@ enum {
XFRM_MODE_FLAG_TUNNEL = 1,
};
+enum xfrm_replay_mode {
+ XFRM_REPLAY_MODE_LEGACY,
+ XFRM_REPLAY_MODE_BMP,
+ XFRM_REPLAY_MODE_ESN,
+};
+
/* Full description of state of transformer. */
struct xfrm_state {
possible_net_t xs_net;
@@ -152,6 +165,7 @@ struct xfrm_state {
};
struct hlist_node bysrc;
struct hlist_node byspi;
+ struct hlist_node byseq;
refcount_t refcnt;
spinlock_t lock;
@@ -191,6 +205,11 @@ struct xfrm_state {
struct xfrm_algo_aead *aead;
const char *geniv;
+ /* mapping change rate limiting */
+ __be16 new_mapping_sport;
+ u32 new_mapping; /* seconds */
+ u32 mapping_maxage; /* seconds for input SA */
+
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
struct sock __rcu *encap_sk;
@@ -212,9 +231,8 @@ struct xfrm_state {
struct xfrm_replay_state preplay;
struct xfrm_replay_state_esn *preplay_esn;
- /* The functions for replay detection. */
- const struct xfrm_replay *repl;
-
+ /* replay detection mode */
+ enum xfrm_replay_mode repl_mode;
/* internal flag that only holds state for delayed aevent at the
* moment
*/
@@ -233,7 +251,7 @@ struct xfrm_state {
struct xfrm_lifetime_cur curlft;
struct hrtimer mtimer;
- struct xfrm_state_offload xso;
+ struct xfrm_dev_offload xso;
/* used to fix curlft->add_time when changing date */
long saved_tmo;
@@ -294,21 +312,15 @@ struct km_event {
struct net *net;
};
-struct xfrm_replay {
- void (*advance)(struct xfrm_state *x, __be32 net_seq);
- int (*check)(struct xfrm_state *x,
- struct sk_buff *skb,
- __be32 net_seq);
- int (*recheck)(struct xfrm_state *x,
- struct sk_buff *skb,
- __be32 net_seq);
- void (*notify)(struct xfrm_state *x, int event);
- int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
+struct xfrm_if_decode_session_result {
+ struct net *net;
+ u32 if_id;
};
struct xfrm_if_cb {
- struct xfrm_if *(*decode_session)(struct sk_buff *skb,
- unsigned short family);
+ bool (*decode_session)(struct sk_buff *skb,
+ unsigned short family,
+ struct xfrm_if_decode_session_result *res);
};
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
@@ -361,11 +373,6 @@ struct xfrm_state_afinfo {
const struct xfrm_type *type_dstopts;
int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
- int (*output_finish)(struct sock *sk, struct sk_buff *skb);
- int (*extract_input)(struct xfrm_state *x,
- struct sk_buff *skb);
- int (*extract_output)(struct xfrm_state *x,
- struct sk_buff *skb);
int (*transport_finish)(struct sk_buff *skb,
int async);
void (*local_error)(struct sk_buff *skb, u32 mtu);
@@ -377,7 +384,8 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
struct xfrm_input_afinfo {
- unsigned int family;
+ u8 family;
+ bool is_ipip;
int (*callback)(struct sk_buff *skb, u8 protocol,
int err);
};
@@ -389,7 +397,6 @@ void xfrm_flush_gc(void);
void xfrm_state_delete_tunnel(struct xfrm_state *x);
struct xfrm_type {
- char *description;
struct module *owner;
u8 proto;
u8 flags;
@@ -398,20 +405,19 @@ struct xfrm_type {
#define XFRM_TYPE_LOCAL_COADDR 4
#define XFRM_TYPE_REMOTE_COADDR 8
- int (*init_state)(struct xfrm_state *x);
+ int (*init_state)(struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
void (*destructor)(struct xfrm_state *);
int (*input)(struct xfrm_state *, struct sk_buff *skb);
int (*output)(struct xfrm_state *, struct sk_buff *pskb);
int (*reject)(struct xfrm_state *, struct sk_buff *,
const struct flowi *);
- int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
};
int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
struct xfrm_type_offload {
- char *description;
struct module *owner;
u8 proto;
void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
@@ -584,8 +590,8 @@ struct xfrm_mgr {
bool (*is_alive)(const struct km_event *c);
};
-int xfrm_register_km(struct xfrm_mgr *km);
-int xfrm_unregister_km(struct xfrm_mgr *km);
+void xfrm_register_km(struct xfrm_mgr *km);
+void xfrm_unregister_km(struct xfrm_mgr *km);
struct xfrm_tunnel_skb_cb {
union {
@@ -946,7 +952,7 @@ struct xfrm_dst {
static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
{
#ifdef CONFIG_XFRM
- if (dst->xfrm) {
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
return xdst->path;
@@ -958,7 +964,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
{
#ifdef CONFIG_XFRM
- if (dst->xfrm) {
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
return xdst->child;
}
@@ -986,6 +992,7 @@ void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
int link; /* ifindex of underlying L2 interface */
u32 if_id; /* interface identifyer */
+ bool collect_md;
};
struct xfrm_if {
@@ -1011,8 +1018,9 @@ struct xfrm_offload {
#define CRYPTO_FALLBACK 8
#define XFRM_GSO_SEGMENT 16
#define XFRM_GRO 32
-#define XFRM_ESP_NO_TRAILER 64
+/* 64 is free */
#define XFRM_DEV_RESUME 128
+#define XFRM_XMIT 256
__u32 status;
#define CRYPTO_SUCCESS 1
@@ -1025,6 +1033,7 @@ struct xfrm_offload {
#define CRYPTO_INVALID_PROTOCOL 128
__u8 proto;
+ __u8 inner_ipproto;
};
struct sec_path {
@@ -1087,6 +1096,27 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
unsigned short family);
+static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
+ int dir)
+{
+ if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
+ return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
+
+ return false;
+}
+
+static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
+ int dir, unsigned short family)
+{
+ if (dir != XFRM_POLICY_OUT && family == AF_INET) {
+ /* same dst may be used for traffic originating from
+ * devices with different policy settings.
+ */
+ return IPCB(skb)->flags & IPSKB_NOPOLICY;
+ }
+ return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
+}
+
static inline int __xfrm_policy_check2(struct sock *sk, int dir,
struct sk_buff *skb,
unsigned int family, int reverse)
@@ -1097,9 +1127,9 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
if (sk && sk->sk_policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, ndir, skb, family);
- return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
- (skb_dst(skb)->flags & DST_NOPOLICY) ||
- __xfrm_policy_check(sk, ndir, skb, family);
+ return __xfrm_check_nopolicy(net, skb, dir) ||
+ __xfrm_check_dev_nopolicy(skb, dir, family) ||
+ __xfrm_policy_check(sk, ndir, skb, family);
}
static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
@@ -1151,9 +1181,12 @@ static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
struct net *net = dev_net(skb->dev);
- return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
- (skb_dst(skb)->flags & DST_NOXFRM) ||
- __xfrm_route_forward(skb, family);
+ if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
+ net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
+ return true;
+
+ return (skb_dst(skb)->flags & DST_NOXFRM) ||
+ __xfrm_route_forward(skb, family);
}
static inline int xfrm4_route_forward(struct sk_buff *skb)
@@ -1170,6 +1203,8 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
{
+ if (!sk_fullsock(osk))
+ return 0;
sk->sk_policy[0] = NULL;
sk->sk_policy[1] = NULL;
if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
@@ -1406,6 +1441,8 @@ struct xfrm4_protocol {
struct xfrm6_protocol {
int (*handler)(struct sk_buff *skb);
+ int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
int (*cb_handler)(struct sk_buff *skb, int err);
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info);
@@ -1417,6 +1454,7 @@ struct xfrm6_protocol {
/* XFRM tunnel handlers. */
struct xfrm_tunnel {
int (*handler)(struct sk_buff *skb);
+ int (*cb_handler)(struct sk_buff *skb, int err);
int (*err_handler)(struct sk_buff *skb, u32 info);
struct xfrm_tunnel __rcu *next;
@@ -1425,6 +1463,7 @@ struct xfrm_tunnel {
struct xfrm6_tunnel {
int (*handler)(struct sk_buff *skb);
+ int (*cb_handler)(struct sk_buff *skb, int err);
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info);
struct xfrm6_tunnel __rcu *next;
@@ -1542,9 +1581,10 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
-int xfrm_init_replay(struct xfrm_state *x);
+int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
-int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
+ struct netlink_ext_ack *extack);
int xfrm_init_state(struct xfrm_state *x);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
@@ -1554,7 +1594,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
int xfrm_trans_queue(struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *));
-int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
int xfrm_output(struct sock *sk, struct sk_buff *skb);
#if IS_ENABLED(CONFIG_NET_PKTGEN)
@@ -1562,13 +1602,11 @@ int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
#endif
void xfrm_local_error(struct sk_buff *skb, int mtu);
-int xfrm4_extract_header(struct sk_buff *skb);
int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
int xfrm4_transport_finish(struct sk_buff *skb, int async);
int xfrm4_rcv(struct sk_buff *skb);
-int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
@@ -1578,18 +1616,17 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
return xfrm_input(skb, nexthdr, spi, 0);
}
-int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
-int xfrm6_extract_header(struct sk_buff *skb);
int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
struct ip6_tnl *t);
+int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
int xfrm6_transport_finish(struct sk_buff *skb, int async);
int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
int xfrm6_rcv(struct sk_buff *skb);
@@ -1602,18 +1639,17 @@ int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
-int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
-int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
- u8 **prevhdr);
#ifdef CONFIG_XFRM
+void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
-int xfrm_user_policy(struct sock *sk, int optname,
- u8 __user *optval, int optlen);
+int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
+ int optlen);
#else
-static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
+static inline int xfrm_user_policy(struct sock *sk, int optname,
+ sockptr_t optval, int optlen)
{
return -ENOPROTOOPT;
}
@@ -1632,13 +1668,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
void *);
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
- u8 type, int dir,
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
+ const struct xfrm_mark *mark,
+ u32 if_id, u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
- int dir, u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net,
+ const struct xfrm_mark *mark, u32 if_id,
+ u8 type, int dir, u32 id, int delete,
+ int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
@@ -1656,14 +1695,15 @@ int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_bundles,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap);
-struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
+ u32 if_id);
struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m,
struct xfrm_encap_tmpl *encap);
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_bundles,
struct xfrm_kmaddress *k, struct net *net,
- struct xfrm_encap_tmpl *encap);
+ struct xfrm_encap_tmpl *encap, u32 if_id);
#endif
int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
@@ -1714,6 +1754,12 @@ static inline int xfrm_policy_id2dir(u32 index)
}
#ifdef CONFIG_XFRM
+void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
+int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+void xfrm_replay_notify(struct xfrm_state *x, int event);
+int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+
static inline int xfrm_aevent_is_on(struct net *net)
{
struct sock *nlsk;
@@ -1766,21 +1812,17 @@ static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_es
static inline int xfrm_replay_clone(struct xfrm_state *x,
struct xfrm_state *orig)
{
- x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
+
+ x->replay_esn = kmemdup(orig->replay_esn,
+ xfrm_replay_state_esn_len(orig->replay_esn),
GFP_KERNEL);
if (!x->replay_esn)
return -ENOMEM;
-
- x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
- x->replay_esn->replay_window = orig->replay_esn->replay_window;
-
- x->preplay_esn = kmemdup(x->replay_esn,
- xfrm_replay_state_esn_len(x->replay_esn),
+ x->preplay_esn = kmemdup(orig->preplay_esn,
+ xfrm_replay_state_esn_len(orig->preplay_esn),
GFP_KERNEL);
- if (!x->preplay_esn) {
- kfree(x->replay_esn);
+ if (!x->preplay_esn)
return -ENOMEM;
- }
return 0;
}
@@ -1846,12 +1888,13 @@ void xfrm_dev_resume(struct sk_buff *skb);
void xfrm_dev_backlog(struct softnet_data *sd);
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
- struct xfrm_user_offload *xuo);
+ struct xfrm_user_offload *xuo,
+ struct netlink_ext_ack *extack);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
{
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
@@ -1877,7 +1920,7 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
static inline void xfrm_dev_state_delete(struct xfrm_state *x)
{
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
if (xso->dev)
xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
@@ -1885,14 +1928,14 @@ static inline void xfrm_dev_state_delete(struct xfrm_state *x)
static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
if (dev->xfrmdev_ops->xdo_dev_state_free)
dev->xfrmdev_ops->xdo_dev_state_free(x);
xso->dev = NULL;
- dev_put(dev);
+ netdev_put(dev, &xso->dev_tracker);
}
}
#else
@@ -1909,7 +1952,7 @@ static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_fea
return skb;
}
-static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
+static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
{
return 0;
}
@@ -1992,4 +2035,53 @@ static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
return 0;
}
+
+extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
+extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
+
+struct xfrm_translator {
+ /* Allocate frag_list and put compat translation there */
+ int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
+
+ /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
+ struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
+ int maxtype, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack);
+
+ /* Translate 32-bit user_policy from sockptr */
+ int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
+
+ struct module *owner;
+};
+
+#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
+extern int xfrm_register_translator(struct xfrm_translator *xtr);
+extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
+extern struct xfrm_translator *xfrm_get_translator(void);
+extern void xfrm_put_translator(struct xfrm_translator *xtr);
+#else
+static inline struct xfrm_translator *xfrm_get_translator(void)
+{
+ return NULL;
+}
+static inline void xfrm_put_translator(struct xfrm_translator *xtr)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool xfrm6_local_dontfrag(const struct sock *sk)
+{
+ int proto;
+
+ if (!sk || sk->sk_family != AF_INET6)
+ return false;
+
+ proto = sk->sk_protocol;
+ if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
+ return inet6_sk(sk)->dontfrag;
+
+ return false;
+}
+#endif
#endif /* _NET_XFRM_H */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
new file mode 100644
index 000000000000..f787c3f524b0
--- /dev/null
+++ b/include/net/xsk_buff_pool.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 Intel Corporation. */
+
+#ifndef XSK_BUFF_POOL_H_
+#define XSK_BUFF_POOL_H_
+
+#include <linux/if_xdp.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
+
+struct xsk_buff_pool;
+struct xdp_rxq_info;
+struct xsk_queue;
+struct xdp_desc;
+struct xdp_umem;
+struct xdp_sock;
+struct device;
+struct page;
+
+struct xdp_buff_xsk {
+ struct xdp_buff xdp;
+ dma_addr_t dma;
+ dma_addr_t frame_dma;
+ struct xsk_buff_pool *pool;
+ u64 orig_addr;
+ struct list_head free_list_node;
+};
+
+struct xsk_dma_map {
+ dma_addr_t *dma_pages;
+ struct device *dev;
+ struct net_device *netdev;
+ refcount_t users;
+ struct list_head list; /* Protected by the RTNL_LOCK */
+ u32 dma_pages_cnt;
+ bool dma_need_sync;
+};
+
+struct xsk_buff_pool {
+ /* Members only used in the control path first. */
+ struct device *dev;
+ struct net_device *netdev;
+ struct list_head xsk_tx_list;
+ /* Protects modifications to the xsk_tx_list */
+ spinlock_t xsk_tx_list_lock;
+ refcount_t users;
+ struct xdp_umem *umem;
+ struct work_struct work;
+ struct list_head free_list;
+ u32 heads_cnt;
+ u16 queue_id;
+
+ /* Data path members as close to free_heads at the end as possible. */
+ struct xsk_queue *fq ____cacheline_aligned_in_smp;
+ struct xsk_queue *cq;
+ /* For performance reasons, each buff pool has its own array of dma_pages
+ * even when they are identical.
+ */
+ dma_addr_t *dma_pages;
+ struct xdp_buff_xsk *heads;
+ struct xdp_desc *tx_descs;
+ u64 chunk_mask;
+ u64 addrs_cnt;
+ u32 free_list_cnt;
+ u32 dma_pages_cnt;
+ u32 free_heads_cnt;
+ u32 headroom;
+ u32 chunk_size;
+ u32 chunk_shift;
+ u32 frame_len;
+ u8 cached_need_wakeup;
+ bool uses_need_wakeup;
+ bool dma_need_sync;
+ bool unaligned;
+ void *addrs;
+ /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
+ * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
+ * sockets share a single cq when the same netdev and queue id is shared.
+ */
+ spinlock_t cq_lock;
+ struct xdp_buff_xsk *free_heads[];
+};
+
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
+
+/* AF_XDP core. */
+struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ struct xdp_umem *umem);
+int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
+ u16 queue_id, u16 flags);
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
+ struct net_device *dev, u16 queue_id);
+int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
+void xp_destroy(struct xsk_buff_pool *pool);
+void xp_get_pool(struct xsk_buff_pool *pool);
+bool xp_put_pool(struct xsk_buff_pool *pool);
+void xp_clear_dev(struct xsk_buff_pool *pool);
+void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
+void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
+
+/* AF_XDP, and XDP core. */
+void xp_free(struct xdp_buff_xsk *xskb);
+
+static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ xskb->orig_addr = addr;
+ xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
+}
+
+static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ dma_addr_t *dma_pages, u64 addr)
+{
+ xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
+ (addr & ~PAGE_MASK);
+ xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
+}
+
+/* AF_XDP ZC drivers, via xdp_sock_buff.h */
+void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
+int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
+ unsigned long attrs, struct page **pages, u32 nr_pages);
+void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
+struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
+bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
+void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
+dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
+static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
+{
+ return xskb->dma;
+}
+
+static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
+{
+ return xskb->frame_dma;
+}
+
+void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
+static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
+{
+ xp_dma_sync_for_cpu_slow(xskb);
+}
+
+void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
+ size_t size);
+static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
+ dma_addr_t dma, size_t size)
+{
+ if (!pool->dma_need_sync)
+ return;
+
+ xp_dma_sync_for_device_slow(pool, dma, size);
+}
+
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
+
+static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
+ u64 addr, u32 len)
+{
+ bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
+
+ if (likely(!cross_pg))
+ return false;
+
+ if (pool->dma_pages_cnt) {
+ return !(pool->dma_pages[addr >> PAGE_SHIFT] &
+ XSK_NEXT_PG_CONTIG_MASK);
+ }
+
+ /* skb path */
+ return addr + len > pool->addrs_cnt;
+}
+
+static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
+{
+ return addr & pool->chunk_mask;
+}
+
+static inline u64 xp_unaligned_extract_addr(u64 addr)
+{
+ return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
+}
+
+static inline u64 xp_unaligned_extract_offset(u64 addr)
+{
+ return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+}
+
+static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
+{
+ return xp_unaligned_extract_addr(addr) +
+ xp_unaligned_extract_offset(addr);
+}
+
+static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
+}
+
+static inline void xp_release(struct xdp_buff_xsk *xskb)
+{
+ if (xskb->pool->unaligned)
+ xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
+}
+
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+{
+ u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+
+ offset += xskb->pool->headroom;
+ if (!xskb->pool->unaligned)
+ return xskb->orig_addr + offset;
+ return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+}
+
+#endif /* XSK_BUFF_POOL_H_ */
diff --git a/include/pcmcia/cistpl.h b/include/pcmcia/cistpl.h
index 59a011101e0e..749320cc9aba 100644
--- a/include/pcmcia/cistpl.h
+++ b/include/pcmcia/cistpl.h
@@ -161,7 +161,7 @@ typedef struct cistpl_funcid_t {
typedef struct cistpl_funce_t {
u_char type;
- u_char data[0];
+ u_char data[];
} cistpl_funce_t;
/*======================================================================
@@ -255,7 +255,7 @@ typedef struct cistpl_data_serv_t {
u_char escape;
u_char encrypt;
u_char misc_features;
- u_char ccitt_code[0];
+ u_char ccitt_code[];
} cistpl_data_serv_t;
typedef struct cistpl_fax_serv_t {
@@ -265,7 +265,7 @@ typedef struct cistpl_fax_serv_t {
u_char encrypt;
u_char features_0;
u_char features_1;
- u_char ccitt_code[0];
+ u_char ccitt_code[];
} cistpl_fax_serv_t;
typedef struct cistpl_voice_serv_t {
diff --git a/include/pcmcia/soc_common.h b/include/pcmcia/soc_common.h
new file mode 100644
index 000000000000..d4f18f4679df
--- /dev/null
+++ b/include/pcmcia/soc_common.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <pcmcia/ss.h>
+
+struct module;
+struct cpufreq_freqs;
+
+struct soc_pcmcia_regulator {
+ struct regulator *reg;
+ bool on;
+};
+
+struct pcmcia_state {
+ unsigned detect: 1,
+ ready: 1,
+ bvd1: 1,
+ bvd2: 1,
+ wrprot: 1,
+ vs_3v: 1,
+ vs_Xv: 1;
+};
+
+/*
+ * This structure encapsulates per-socket state which we might need to
+ * use when responding to a Card Services query of some kind.
+ */
+struct soc_pcmcia_socket {
+ struct pcmcia_socket socket;
+
+ /*
+ * Info from low level handler
+ */
+ unsigned int nr;
+ struct clk *clk;
+
+ /*
+ * Core PCMCIA state
+ */
+ const struct pcmcia_low_level *ops;
+
+ unsigned int status;
+ socket_state_t cs_state;
+
+ unsigned short spd_io[MAX_IO_WIN];
+ unsigned short spd_mem[MAX_WIN];
+ unsigned short spd_attr[MAX_WIN];
+
+ struct resource res_skt;
+ struct resource res_io;
+ struct resource res_io_io;
+ struct resource res_mem;
+ struct resource res_attr;
+
+ struct {
+ int gpio;
+ struct gpio_desc *desc;
+ unsigned int irq;
+ const char *name;
+ } stat[6];
+#define SOC_STAT_CD 0 /* Card detect */
+#define SOC_STAT_BVD1 1 /* BATDEAD / IOSTSCHG */
+#define SOC_STAT_BVD2 2 /* BATWARN / IOSPKR */
+#define SOC_STAT_RDY 3 /* Ready / Interrupt */
+#define SOC_STAT_VS1 4 /* Voltage sense 1 */
+#define SOC_STAT_VS2 5 /* Voltage sense 2 */
+
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_bus_enable;
+ struct soc_pcmcia_regulator vcc;
+ struct soc_pcmcia_regulator vpp;
+
+ unsigned int irq_state;
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block cpufreq_nb;
+#endif
+ struct timer_list poll_timer;
+ struct list_head node;
+ void *driver_data;
+};
+
+
+struct pcmcia_low_level {
+ struct module *owner;
+
+ /* first socket in system */
+ int first;
+ /* nr of sockets */
+ int nr;
+
+ int (*hw_init)(struct soc_pcmcia_socket *);
+ void (*hw_shutdown)(struct soc_pcmcia_socket *);
+
+ void (*socket_state)(struct soc_pcmcia_socket *, struct pcmcia_state *);
+ int (*configure_socket)(struct soc_pcmcia_socket *, const socket_state_t *);
+
+ /*
+ * Enable card status IRQs on (re-)initialisation. This can
+ * be called at initialisation, power management event, or
+ * pcmcia event.
+ */
+ void (*socket_init)(struct soc_pcmcia_socket *);
+
+ /*
+ * Disable card status IRQs and PCMCIA bus on suspend.
+ */
+ void (*socket_suspend)(struct soc_pcmcia_socket *);
+
+ /*
+ * Hardware specific timing routines.
+ * If provided, the get_timing routine overrides the SOC default.
+ */
+ unsigned int (*get_timing)(struct soc_pcmcia_socket *, unsigned int, unsigned int);
+ int (*set_timing)(struct soc_pcmcia_socket *);
+ int (*show_timing)(struct soc_pcmcia_socket *, char *);
+
+#ifdef CONFIG_CPU_FREQ
+ /*
+ * CPUFREQ support.
+ */
+ int (*frequency_change)(struct soc_pcmcia_socket *, unsigned long, struct cpufreq_freqs *);
+#endif
+};
+
+
+
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 36c5c5e38c1d..cbd3ddd7c33d 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -358,7 +358,6 @@ TRACE_EVENT(aer_event,
EM ( MF_MSG_KERNEL_HIGH_ORDER, "high-order kernel page" ) \
EM ( MF_MSG_SLAB, "kernel slab page" ) \
EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \
- EM ( MF_MSG_POISONED_HUGE, "huge page already hardware poisoned" ) \
EM ( MF_MSG_HUGE, "huge page" ) \
EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \
@@ -372,7 +371,8 @@ TRACE_EVENT(aer_event,
EM ( MF_MSG_CLEAN_LRU, "clean LRU page" ) \
EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \
EM ( MF_MSG_BUDDY, "free buddy page" ) \
- EM ( MF_MSG_BUDDY_2ND, "free buddy page (2nd try)" ) \
+ EM ( MF_MSG_DAX, "dax page" ) \
+ EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \
EMe ( MF_MSG_UNKNOWN, "unknown page" )
/*
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index fe2fc9e91588..f7c185ff7a11 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2010 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(_RDMA_IB_H)
+#ifndef _RDMA_IB_H
#define _RDMA_IB_H
#include <linux/types.h>
@@ -102,7 +75,7 @@ struct sockaddr_ib {
*/
static inline bool ib_safe_file_access(struct file *filp)
{
- return filp->f_cred == current_cred() && !uaccess_kernel();
+ return filp->f_cred == current_cred();
}
#endif /* _RDMA_IB_H */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 2734c895c1bf..d808dc3d239e 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -1,39 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_ADDR_H)
+#ifndef IB_ADDR_H
#define IB_ADDR_H
+#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/if_arp.h>
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 870b5e6c06db..226ae3702d8a 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -1,35 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _IB_CACHE_H
@@ -37,8 +10,9 @@
#include <rdma/ib_verbs.h>
-int rdma_query_gid(struct ib_device *device, u8 port_num, int index,
+int rdma_query_gid(struct ib_device *device, u32 port_num, int index,
union ib_gid *gid);
+void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr);
const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
const union ib_gid *gid,
enum ib_gid_type gid_type,
@@ -46,10 +20,10 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
const struct ib_gid_attr *rdma_find_gid_by_port(struct ib_device *ib_dev,
const union ib_gid *gid,
enum ib_gid_type gid_type,
- u8 port,
+ u32 port,
struct net_device *ndev);
const struct ib_gid_attr *rdma_find_gid_by_filter(
- struct ib_device *device, const union ib_gid *gid, u8 port_num,
+ struct ib_device *device, const union ib_gid *gid, u32 port_num,
bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
void *),
void *context);
@@ -69,7 +43,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
* the local software cache.
*/
int ib_get_cached_pkey(struct ib_device *device_handle,
- u8 port_num,
+ u32 port_num,
int index,
u16 *pkey);
@@ -85,7 +59,7 @@ int ib_get_cached_pkey(struct ib_device *device_handle,
* the local software cache.
*/
int ib_find_cached_pkey(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u16 pkey,
u16 *index);
@@ -101,7 +75,7 @@ int ib_find_cached_pkey(struct ib_device *device,
* the local software cache.
*/
int ib_find_exact_cached_pkey(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u16 pkey,
u16 *index);
@@ -115,7 +89,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
* the local software cache.
*/
int ib_get_cached_lmc(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u8 *lmc);
/**
@@ -128,13 +102,16 @@ int ib_get_cached_lmc(struct ib_device *device,
* the local software cache.
*/
int ib_get_cached_port_state(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum ib_port_state *port_active);
bool rdma_is_zero_gid(const union ib_gid *gid);
const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device,
- u8 port_num, int index);
+ u32 port_num, int index);
void rdma_put_gid_attr(const struct ib_gid_attr *attr);
void rdma_hold_gid_attr(const struct ib_gid_attr *attr);
+ssize_t rdma_query_gid_table(struct ib_device *device,
+ struct ib_uverbs_gid_entry *entries,
+ size_t max_entries);
#endif /* _IB_CACHE_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 8ec482e391aa..a2ac62b4a6cf 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -6,14 +6,13 @@
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
+
#ifndef IB_CM_H
#define IB_CM_H
#include <rdma/ib_mad.h>
#include <rdma/ib_sa.h>
-
-/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
-extern struct class cm_class;
+#include <rdma/rdma_cm.h>
enum ib_cm_state {
IB_CM_IDLE,
@@ -115,6 +114,7 @@ struct ib_cm_req_event_param {
unsigned int retry_count:3;
unsigned int rnr_retry_count:3;
unsigned int srq:1;
+ struct rdma_ucm_ece ece;
};
struct ib_cm_rep_event_param {
@@ -129,6 +129,7 @@ struct ib_cm_rep_event_param {
unsigned int flow_control:1;
unsigned int rnr_retry_count:3;
unsigned int srq:1;
+ struct rdma_ucm_ece ece;
};
enum ib_cm_rej_reason {
@@ -164,7 +165,8 @@ enum ib_cm_rej_reason {
IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30,
IB_CM_REJ_INVALID_CLASS_VERSION = 31,
IB_CM_REJ_INVALID_FLOW_LABEL = 32,
- IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33
+ IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33,
+ IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED = 35,
};
struct ib_cm_rej_event_param {
@@ -292,7 +294,6 @@ struct ib_cm_id {
void *context;
struct ib_device *device;
__be64 service_id;
- __be64 service_mask;
enum ib_cm_state state; /* internal CM/debug use */
enum ib_cm_lap_state lap_state; /* internal CM/debug use */
__be32 local_id;
@@ -338,13 +339,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
* and service ID resolution requests. The service ID should be specified
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
* assign a service ID to the caller.
- * @service_mask: Mask applied to service ID used to listen across a
- * range of service IDs. If set to 0, the service ID is matched
- * exactly. This parameter is ignored if %service_id is set to
- * IB_CM_ASSIGN_SERVICE_ID.
*/
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
- __be64 service_mask);
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id);
struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
ib_cm_handler cm_handler,
@@ -352,6 +348,8 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
struct ib_cm_req_param {
struct sa_path_rec *primary_path;
+ struct sa_path_rec *primary_path_inbound;
+ struct sa_path_rec *primary_path_outbound;
struct sa_path_rec *alternate_path;
const struct ib_gid_attr *ppath_sgid_attr;
__be64 service_id;
@@ -360,7 +358,6 @@ struct ib_cm_req_param {
u32 starting_psn;
const void *private_data;
u8 private_data_len;
- u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
u8 remote_cm_response_timeout;
@@ -370,6 +367,7 @@ struct ib_cm_req_param {
u8 rnr_retry_count;
u8 max_cm_retries;
u8 srq;
+ struct rdma_ucm_ece ece;
};
/**
@@ -393,6 +391,7 @@ struct ib_cm_rep_param {
u8 flow_control;
u8 rnr_retry_count;
u8 srq;
+ struct rdma_ucm_ece ece;
};
/**
@@ -547,6 +546,7 @@ struct ib_cm_sidr_rep_param {
u8 info_length;
const void *private_data;
u8 private_data_len;
+ struct rdma_ucm_ece ece;
};
/**
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
deleted file mode 100644
index f8982e4e9702..000000000000
--- a/include/rdma/ib_fmr_pool.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Corporation. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#if !defined(IB_FMR_POOL_H)
-#define IB_FMR_POOL_H
-
-#include <rdma/ib_verbs.h>
-
-struct ib_fmr_pool;
-
-/**
- * struct ib_fmr_pool_param - Parameters for creating FMR pool
- * @max_pages_per_fmr:Maximum number of pages per map request.
- * @page_shift: Log2 of sizeof "pages" mapped by this fmr
- * @access:Access flags for FMRs in pool.
- * @pool_size:Number of FMRs to allocate for pool.
- * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
- * FMRs are present.
- * @flush_function:Callback called when unmapped FMRs are flushed and
- * more FMRs are possibly available for mapping
- * @flush_arg:Context passed to user's flush function.
- * @cache:If set, FMRs may be reused after unmapping for identical map
- * requests.
- */
-struct ib_fmr_pool_param {
- int max_pages_per_fmr;
- int page_shift;
- enum ib_access_flags access;
- int pool_size;
- int dirty_watermark;
- void (*flush_function)(struct ib_fmr_pool *pool,
- void *arg);
- void *flush_arg;
- unsigned cache:1;
-};
-
-struct ib_pool_fmr {
- struct ib_fmr *fmr;
- struct ib_fmr_pool *pool;
- struct list_head list;
- struct hlist_node cache_node;
- int ref_count;
- int remap_count;
- u64 io_virtual_address;
- int page_list_len;
- u64 page_list[0];
-};
-
-struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
- struct ib_fmr_pool_param *params);
-
-void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
-
-int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
-
-struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
- u64 *page_list,
- int list_len,
- u64 io_virtual_address);
-
-void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
-
-#endif /* IB_FMR_POOL_H */
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index 9a90bd031e8c..8ae07c0ecdf7 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef IB_HDRS_H
@@ -248,11 +206,6 @@ static inline u8 ib_get_lver(struct ib_header *hdr)
IB_LVER_MASK);
}
-static inline u16 ib_get_len(struct ib_header *hdr)
-{
- return (u16)(be16_to_cpu(hdr->lrh[2]));
-}
-
static inline u32 ib_get_qkey(struct ib_other_headers *ohdr)
{
return be32_to_cpu(ohdr->u.ud.deth[0]);
@@ -279,6 +232,7 @@ static inline u32 ib_get_sqpn(struct ib_other_headers *ohdr)
#define IB_BTH_SE_SHIFT 23
#define IB_BTH_TVER_MASK 0xf
#define IB_BTH_TVER_SHIFT 16
+#define IB_BTH_OPCODE_CNP 0x81
static inline u8 ib_bth_get_pad(struct ib_other_headers *ohdr)
{
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 4e62650e2127..2e3843b761e8 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -1,40 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_MAD_H)
+#ifndef IB_MAD_H
#define IB_MAD_H
#include <linux/list.h>
@@ -303,6 +276,7 @@ enum ib_port_capability_mask2_bits {
IB_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3,
IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4,
IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5,
+ IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10,
};
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
@@ -559,20 +533,6 @@ typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc);
/**
- * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
- * @mad_agent: MAD agent that snooped the MAD.
- * @send_buf: send MAD data buffer.
- * @mad_send_wc: Work completion information on the sent MAD. Valid
- * only for snooping that occurs on a send completion.
- *
- * Clients snooping MADs should not modify data referenced by the @send_buf
- * or @mad_send_wc.
- */
-typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf,
- struct ib_mad_send_wc *mad_send_wc);
-
-/**
* ib_mad_recv_handler - callback handler for a received MAD.
* @mad_agent: MAD agent requesting the received MAD.
* @send_buf: Send buffer if found, else NULL
@@ -581,8 +541,7 @@ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
* MADs received in response to a send request operation will be handed to
* the user before the send operation completes. All data buffers given
* to registered agents through this routine are owned by the receiving
- * client, except for snooping agents. Clients snooping MADs should not
- * modify the data referenced by @mad_recv_wc.
+ * client.
*/
typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf,
@@ -595,7 +554,6 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
* @mr: Memory region for system memory usable for DMA.
* @recv_handler: Callback handler for a received MAD.
* @send_handler: Callback handler for a sent MAD.
- * @snoop_handler: Callback handler for snooped sent MADs.
* @context: User-specified context associated with this registration.
* @hi_tid: Access layer assigned transaction ID for this client.
* Unsolicited MADs sent by this client will have the upper 32-bits
@@ -612,7 +570,6 @@ struct ib_mad_agent {
struct ib_qp *qp;
ib_mad_recv_handler recv_handler;
ib_mad_send_handler send_handler;
- ib_mad_snoop_handler snoop_handler;
void *context;
u32 hi_tid;
u32 flags;
@@ -712,7 +669,7 @@ struct ib_mad_reg_req {
* @registration_flags: Registration flags to set for this agent
*/
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum ib_qp_type qp_type,
struct ib_mad_reg_req *mad_reg_req,
u8 rmpp_version,
@@ -720,36 +677,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
ib_mad_recv_handler recv_handler,
void *context,
u32 registration_flags);
-
-enum ib_mad_snoop_flags {
- /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
- /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/
- IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2),
- /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
- IB_MAD_SNOOP_RECVS = (1<<4)
- /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/
- /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/
-};
-
-/**
- * ib_register_mad_snoop - Register to snoop sent and received MADs.
- * @device: The device to register with.
- * @port_num: The port on the specified device to use.
- * @qp_type: Specifies which QP traffic to snoop. Must be either
- * IB_QPT_SMI or IB_QPT_GSI.
- * @mad_snoop_flags: Specifies information where snooping occurs.
- * @send_handler: The callback routine invoked for a snooped send.
- * @recv_handler: The callback routine invoked for a snooped receive.
- * @context: User specified context associated with the registration.
- */
-struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
- u8 port_num,
- enum ib_qp_type qp_type,
- int mad_snoop_flags,
- ib_mad_snoop_handler snoop_handler,
- ib_mad_recv_handler recv_handler,
- void *context);
-
/**
* ib_unregister_mad_agent - Unregisters a client from using MAD services.
* @mad_agent: Corresponding MAD registration request to deregister.
@@ -792,27 +719,26 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
/**
- * ib_cancel_mad - Cancels an outstanding send MAD operation.
- * @mad_agent: Specifies the registration associated with sent MAD.
- * @send_buf: Indicates the MAD to cancel.
- *
- * MADs will be returned to the user through the corresponding
- * ib_mad_send_handler.
- */
-void ib_cancel_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf);
-
-/**
* ib_modify_mad - Modifies an outstanding send MAD operation.
- * @mad_agent: Specifies the registration associated with sent MAD.
* @send_buf: Indicates the MAD to modify.
* @timeout_ms: New timeout value for sent MAD.
*
* This call will reset the timeout value for a sent MAD to the specified
* value.
*/
-int ib_modify_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf, u32 timeout_ms);
+int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms);
+
+/**
+ * ib_cancel_mad - Cancels an outstanding send MAD operation.
+ * @send_buf: Indicates the MAD to cancel.
+ *
+ * MADs will be returned to the user through the corresponding
+ * ib_mad_send_handler.
+ */
+static inline void ib_cancel_mad(struct ib_mad_send_buf *send_buf)
+{
+ ib_modify_mad(send_buf, 0);
+}
/**
* ib_create_send_mad - Allocate and initialize a data buffer and work request
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 8ebf84ae9ed1..1838869aad28 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_USER_MARSHALL_H)
+#ifndef IB_USER_MARSHALL_H
#define IB_USER_MARSHALL_H
#include <rdma/ib_verbs.h>
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index 7ea1382ad0e5..a9162f25beaf 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef IB_PACK_H
diff --git a/include/rdma/ib_pma.h b/include/rdma/ib_pma.h
index 2f8a65c1fca7..44c618203785 100644
--- a/include/rdma/ib_pma.h
+++ b/include/rdma/ib_pma.h
@@ -1,38 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_PMA_H)
+#ifndef IB_PMA_H
#define IB_PMA_H
#include <rdma/ib_mad.h>
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 19520979b84c..e930bec33b31 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -1,35 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef IB_SA_H
@@ -213,6 +186,7 @@ struct sa_path_rec {
struct sa_path_rec_opa opa;
};
enum sa_path_rec_type rec_type;
+ u32 flags;
};
static inline enum ib_gid_type
@@ -393,20 +367,6 @@ struct ib_sa_mcmember_rec {
#define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF
-struct ib_sa_service_rec {
- u64 id;
- union ib_gid gid;
- __be16 pkey;
- /* reserved */
- u32 lease;
- u8 key[16];
- u8 name[64];
- u8 data8[16];
- u16 data16[8];
- u32 data32[4];
- u64 data64[2];
-};
-
#define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0)
#define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1)
#define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2)
@@ -450,23 +410,13 @@ struct ib_sa_query;
void ib_sa_cancel_query(int id, struct ib_sa_query *query);
int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
- u8 port_num, struct sa_path_rec *rec,
+ u32 port_num, struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
gfp_t gfp_mask,
void (*callback)(int status, struct sa_path_rec *resp,
- void *context),
+ int num_prs, void *context),
void *context, struct ib_sa_query **query);
-int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num, u8 method,
- struct ib_sa_service_rec *rec,
- ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
- gfp_t gfp_mask,
- void (*callback)(int status,
- struct ib_sa_service_rec *resp,
- void *context),
- void *context, struct ib_sa_query **sa_query);
-
struct ib_sa_multicast {
struct ib_sa_mcmember_rec rec;
ib_sa_comp_mask comp_mask;
@@ -504,7 +454,8 @@ struct ib_sa_multicast {
* group, and the user must rejoin the group to continue using it.
*/
struct ib_sa_multicast *ib_sa_join_multicast(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device,
+ u32 port_num,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
int (*callback)(int status,
@@ -533,20 +484,20 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast);
* @mgid: MGID of multicast group.
* @rec: Location to copy SA multicast member record.
*/
-int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
+int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num,
union ib_gid *mgid, struct ib_sa_mcmember_rec *rec);
/**
* ib_init_ah_from_mcmember - Initialize address handle attributes based on
* an SA multicast member record.
*/
-int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
+int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num,
struct ib_sa_mcmember_rec *rec,
struct net_device *ndev,
enum ib_gid_type gid_type,
struct rdma_ah_attr *ah_attr);
-int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *sgid_attr);
@@ -565,7 +516,7 @@ void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec);
/* Support GuidInfoRecord */
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
unsigned long timeout_ms, gfp_t gfp_mask,
@@ -574,10 +525,6 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
void *context),
void *context, struct ib_sa_query **sa_query);
-bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
- struct ib_device *device,
- u8 port_num);
-
static inline bool sa_path_is_roce(struct sa_path_rec *rec)
{
return ((rec->rec_type == SA_PATH_REC_TYPE_ROCE_V1) ||
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index 7be0028f155c..fc16b826b2c1 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -1,40 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_SMI_H)
+#ifndef IB_SMI_H
#define IB_SMI_H
#include <rdma/ib_mad.h>
@@ -171,5 +144,15 @@ ib_get_smp_direction(struct ib_smp *smp)
#define IB_NOTICE_TRAP_DR_NOTICE 0x80
#define IB_NOTICE_TRAP_DR_TRUNC 0x40
-
+/**
+ * ib_init_query_mad - Initialize query MAD.
+ * @mad: MAD to initialize.
+ */
+static inline void ib_init_query_mad(struct ib_smp *mad)
+{
+ mad->base_version = IB_MGMT_BASE_VERSION;
+ mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ mad->class_version = 1;
+ mad->method = IB_MGMT_METHOD_GET;
+}
#endif /* IB_SMI_H */
diff --git a/include/rdma/ib_sysfs.h b/include/rdma/ib_sysfs.h
new file mode 100644
index 000000000000..3b77cfd74d9a
--- /dev/null
+++ b/include/rdma/ib_sysfs.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2021 Mellanox Technologies Ltd. All rights reserved.
+ */
+#ifndef DEF_RDMA_IB_SYSFS_H
+#define DEF_RDMA_IB_SYSFS_H
+
+#include <linux/sysfs.h>
+
+struct ib_device;
+
+struct ib_port_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf);
+ ssize_t (*store)(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, const char *buf,
+ size_t count);
+};
+
+#define IB_PORT_ATTR_RW(_name) \
+ struct ib_port_attribute ib_port_attr_##_name = __ATTR_RW(_name)
+
+#define IB_PORT_ATTR_ADMIN_RW(_name) \
+ struct ib_port_attribute ib_port_attr_##_name = \
+ __ATTR_RW_MODE(_name, 0600)
+
+#define IB_PORT_ATTR_RO(_name) \
+ struct ib_port_attribute ib_port_attr_##_name = __ATTR_RO(_name)
+
+#define IB_PORT_ATTR_WO(_name) \
+ struct ib_port_attribute ib_port_attr_##_name = __ATTR_WO(_name)
+
+struct ib_device *ib_port_sysfs_get_ibdev_kobj(struct kobject *kobj,
+ u32 *port_num);
+
+#endif
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index e3518fd6b95b..92a673cd9b4f 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2007 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2020 Intel Corporation. All rights reserved.
*/
#ifndef IB_UMEM_H
@@ -40,45 +14,140 @@
struct ib_ucontext;
struct ib_umem_odp;
+struct dma_buf_attach_ops;
struct ib_umem {
struct ib_device *ibdev;
struct mm_struct *owning_mm;
+ u64 iova;
size_t length;
unsigned long address;
u32 writable : 1;
u32 is_odp : 1;
+ u32 is_dmabuf : 1;
struct work_struct work;
- struct sg_table sg_head;
- int nmap;
- unsigned int sg_nents;
+ struct sg_append_table sgt_append;
};
+struct ib_umem_dmabuf {
+ struct ib_umem umem;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct scatterlist *first_sg;
+ struct scatterlist *last_sg;
+ unsigned long first_sg_offset;
+ unsigned long last_sg_trim;
+ void *private;
+ u8 pinned : 1;
+};
+
+static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
+{
+ return container_of(umem, struct ib_umem_dmabuf, umem);
+}
+
/* Returns the offset of the umem start relative to the first page. */
static inline int ib_umem_offset(struct ib_umem *umem)
{
return umem->address & ~PAGE_MASK;
}
+static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
+ unsigned long pgsz)
+{
+ return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
+ (pgsz - 1);
+}
+
+static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
+ unsigned long pgsz)
+{
+ return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
+ ALIGN_DOWN(umem->iova, pgsz))) /
+ pgsz;
+}
+
static inline size_t ib_umem_num_pages(struct ib_umem *umem)
{
- return (ALIGN(umem->address + umem->length, PAGE_SIZE) -
- ALIGN_DOWN(umem->address, PAGE_SIZE)) >>
- PAGE_SHIFT;
+ return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
}
+static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ struct ib_umem *umem,
+ unsigned long pgsz)
+{
+ __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
+ umem->sgt_append.sgt.nents, pgsz);
+}
+
+/**
+ * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
+ * @umem: umem to iterate over
+ * @pgsz: Page size to split the list into
+ *
+ * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
+ * returned DMA blocks will be aligned to pgsz and span the range:
+ * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
+ *
+ * Performs exactly ib_umem_num_dma_blocks() iterations.
+ */
+#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
+ for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
+ __rdma_block_iter_next(biter);)
+
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access);
void ib_umem_release(struct ib_umem *umem);
-int ib_umem_page_count(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
size_t length);
unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt);
+/**
+ * ib_umem_find_best_pgoff - Find best HW page size
+ *
+ * @umem: umem struct
+ * @pgsz_bitmap bitmap of HW supported page sizes
+ * @pgoff_bitmask: Mask of bits that can be represented with an offset
+ *
+ * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
+ * an IOVA it accepts a bitmask specifying what address bits can be represented
+ * with a page offset.
+ *
+ * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
+ * and can support aligned offsets up to 4032 then pgoff_bitmask would be
+ * "111111000000".
+ *
+ * If the pgoff_bitmask requires either alignment in the low bit or an
+ * unavailable page size for the high bits, this function returns 0.
+ */
+static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ u64 pgoff_bitmask)
+{
+ struct scatterlist *sg = umem->sgt_append.sgt.sgl;
+ dma_addr_t dma_addr;
+
+ dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
+ return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
+ dma_addr & pgoff_bitmask);
+}
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops);
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access);
+int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
+void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
+void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
+
#else /* CONFIG_INFINIBAND_USER_MEM */
#include <linux/err.h>
@@ -87,20 +156,46 @@ static inline struct ib_umem *ib_umem_get(struct ib_device *device,
unsigned long addr, size_t size,
int access)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void ib_umem_release(struct ib_umem *umem) { }
-static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
size_t length) {
- return -EINVAL;
+ return -EOPNOTSUPP;
}
-static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
- unsigned long pgsz_bitmap,
- unsigned long virt) {
- return -EINVAL;
+static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ unsigned long virt)
+{
+ return 0;
+}
+static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ u64 pgoff_bitmask)
+{
+ return 0;
+}
+static inline
+struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access,
+ struct dma_buf_attach_ops *ops)
+{
+ return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
+ size_t size, int fd, int access)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
+static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
#endif /* CONFIG_INFINIBAND_USER_MEM */
-
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 64314ff76612..0844c1d05ac6 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2014 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef IB_UMEM_ODP_H
@@ -41,17 +14,13 @@ struct ib_umem_odp {
struct mmu_interval_notifier notifier;
struct pid *tgid;
+ /* An array of the pfns included in the on-demand paging umem. */
+ unsigned long *pfn_list;
+
/*
- * An array of the pages included in the on-demand paging umem.
- * Indices of pages that are currently not mapped into the device will
- * contain NULL.
- */
- struct page **page_list;
- /*
- * An array of the same size as page_list, with DMA addresses mapped
- * for pages the pages in page_list. The lower two bits designate
- * access permissions. See ODP_READ_ALLOWED_BIT and
- * ODP_WRITE_ALLOWED_BIT.
+ * An array with DMA addresses mapped for pfns in pfn_list.
+ * The lower two bits designate access permissions.
+ * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT.
*/
dma_addr_t *dma_list;
/*
@@ -124,9 +93,8 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
const struct mmu_interval_notifier_ops *ops);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
-int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
- u64 bcnt, u64 access_mask,
- unsigned long current_seq);
+int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset,
+ u64 bcnt, u64 access_mask, bool fault);
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bound);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 1f779fad3a1e..975d6e9efbcb 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1,44 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
- * Copyright (c) 2004 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004, 2020 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_VERBS_H)
+#ifndef IB_VERBS_H
#define IB_VERBS_H
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -75,6 +49,9 @@ struct ib_umem_odp;
struct ib_uqp_object;
struct ib_usrq_object;
struct ib_uwq_object;
+struct rdma_cm_id;
+struct ib_port;
+struct hw_stats_device_data;
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
@@ -100,7 +77,8 @@ void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
__printf(2, 3) __cold
void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define ibdev_dbg(__dev, format, args...) \
dynamic_ibdev_dbg(__dev, format, ##args)
#else
@@ -133,7 +111,8 @@ do { \
#define ibdev_info_ratelimited(ibdev, fmt, ...) \
ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
do { \
@@ -162,10 +141,9 @@ union ib_gid {
extern union ib_gid zgid;
enum ib_gid_type {
- /* If link layer is Ethernet, this is RoCE V1 */
- IB_GID_TYPE_IB = 0,
- IB_GID_TYPE_ROCE = 0,
- IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
+ IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
+ IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
+ IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
IB_GID_TYPE_SIZE
};
@@ -176,7 +154,7 @@ struct ib_gid_attr {
union ib_gid gid;
enum ib_gid_type gid_type;
u16 index;
- u8 port_num;
+ u32 port_num;
};
enum {
@@ -204,7 +182,7 @@ rdma_node_get_transport(unsigned int node_type);
enum rdma_network_type {
RDMA_NETWORK_IB,
- RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
+ RDMA_NETWORK_ROCE_V1,
RDMA_NETWORK_IPV4,
RDMA_NETWORK_IPV6
};
@@ -214,9 +192,10 @@ static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type net
if (network_type == RDMA_NETWORK_IPV4 ||
network_type == RDMA_NETWORK_IPV6)
return IB_GID_TYPE_ROCE_UDP_ENCAP;
-
- /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
- return IB_GID_TYPE_IB;
+ else if (network_type == RDMA_NETWORK_ROCE_V1)
+ return IB_GID_TYPE_ROCE;
+ else
+ return IB_GID_TYPE_IB;
}
static inline enum rdma_network_type
@@ -225,6 +204,9 @@ rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
if (attr->gid_type == IB_GID_TYPE_IB)
return RDMA_NETWORK_IB;
+ if (attr->gid_type == IB_GID_TYPE_ROCE)
+ return RDMA_NETWORK_ROCE_V1;
+
if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
return RDMA_NETWORK_IPV4;
else
@@ -238,32 +220,24 @@ enum rdma_link_layer {
};
enum ib_device_cap_flags {
- IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
- IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
- IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
- IB_DEVICE_RAW_MULTI = (1 << 3),
- IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
- IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
- IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
- IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
- IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
- /* Not in use, former INIT_TYPE = (1 << 9),*/
- IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
- IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
- IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
- IB_DEVICE_SRQ_RESIZE = (1 << 13),
- IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
-
- /*
- * This device supports a per-device lkey or stag that can be
- * used without performing a memory registration for the local
- * memory. Note that ULPs should never check this flag, but
- * instead of use the local_dma_lkey flag in the ib_pd structure,
- * which will always contain a usable lkey.
- */
- IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
- /* Reserved, old SEND_W_INV = (1 << 16),*/
- IB_DEVICE_MEM_WINDOW = (1 << 17),
+ IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
+ IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
+ IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
+ IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
+ IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
+ IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
+ IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
+ IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
+ IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
+ /* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
+ IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
+ IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
+ IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
+ IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
+ IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
+
+ /* Reserved, old SEND_W_INV = 1 << 16,*/
+ IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
/*
* Devices should set IB_DEVICE_UD_IP_SUM if they support
* insertion of UDP and TCP checksum on outgoing UD IPoIB
@@ -271,9 +245,8 @@ enum ib_device_cap_flags {
* incoming messages. Setting this flag implies that the
* IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
*/
- IB_DEVICE_UD_IP_CSUM = (1 << 18),
- IB_DEVICE_UD_TSO = (1 << 19),
- IB_DEVICE_XRC = (1 << 20),
+ IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
+ IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
/*
* This device supports the IB "base memory management extension",
@@ -284,31 +257,53 @@ enum ib_device_cap_flags {
* IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
* stag.
*/
- IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
- IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
- IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
- IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
- IB_DEVICE_RC_IP_CSUM = (1 << 25),
+ IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
+ IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
+ IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
+ IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
- IB_DEVICE_RAW_IP_CSUM = (1 << 26),
- /*
- * Devices should set IB_DEVICE_CROSS_CHANNEL if they
- * support execution of WQEs that involve synchronization
- * of I/O operations with single completion queue managed
- * by hardware.
- */
- IB_DEVICE_CROSS_CHANNEL = (1 << 27),
- IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
- IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
- IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
- IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
- IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
+ IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
+ IB_DEVICE_MANAGED_FLOW_STEERING =
+ IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
- IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
+ IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
/* The device supports padding incoming writes to cacheline. */
- IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
- IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
+ IB_DEVICE_PCI_WRITE_END_PADDING =
+ IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
+};
+
+enum ib_kernel_cap_flags {
+ /*
+ * This device supports a per-device lkey or stag that can be
+ * used without performing a memory registration for the local
+ * memory. Note that ULPs should never check this flag, but
+ * instead of use the local_dma_lkey flag in the ib_pd structure,
+ * which will always contain a usable lkey.
+ */
+ IBK_LOCAL_DMA_LKEY = 1 << 0,
+ /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
+ IBK_INTEGRITY_HANDOVER = 1 << 1,
+ /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
+ IBK_ON_DEMAND_PAGING = 1 << 2,
+ /* IB_MR_TYPE_SG_GAPS is supported */
+ IBK_SG_GAPS_REG = 1 << 3,
+ /* Driver supports RDMA_NLDEV_CMD_DELLINK */
+ IBK_ALLOW_USER_UNREG = 1 << 4,
+
+ /* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
+ IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
+ /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
+ IBK_UD_TSO = 1 << 6,
+ /* iopib will use the device ops:
+ * get_vf_config
+ * get_vf_guid
+ * get_vf_stats
+ * set_vf_guid
+ * set_vf_link_state
+ */
+ IBK_VIRTUAL_FUNCTION = 1 << 7,
+ /* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
+ IBK_RDMA_NETDEV_OPA = 1 << 8,
};
enum ib_atomic_cap {
@@ -407,6 +402,7 @@ struct ib_device_attr {
int max_qp;
int max_qp_wr;
u64 device_cap_flags;
+ u64 kernel_cap_flags;
int max_send_sge;
int max_recv_sge;
int max_sge_rd;
@@ -430,8 +426,6 @@ struct ib_device_attr {
int max_mcast_qp_attach;
int max_total_mcast_qp_attach;
int max_ah;
- int max_fmr;
- int max_map_per_fmr;
int max_srq;
int max_srq_wr;
int max_srq_sge;
@@ -462,6 +456,11 @@ enum ib_mtu {
IB_MTU_4096 = 5
};
+enum opa_mtu {
+ OPA_MTU_8192 = 6,
+ OPA_MTU_10240 = 7
+};
+
static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
{
switch (mtu) {
@@ -488,6 +487,28 @@ static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
return IB_MTU_256;
}
+static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
+{
+ switch (mtu) {
+ case OPA_MTU_8192:
+ return 8192;
+ case OPA_MTU_10240:
+ return 10240;
+ default:
+ return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
+ }
+}
+
+static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
+{
+ if (mtu >= 10240)
+ return OPA_MTU_10240;
+ else if (mtu >= 8192)
+ return OPA_MTU_8192;
+ else
+ return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
+}
+
enum ib_port_state {
IB_PORT_NOP = 0,
IB_PORT_DOWN = 1,
@@ -534,21 +555,40 @@ enum ib_port_speed {
IB_SPEED_FDR10 = 8,
IB_SPEED_FDR = 16,
IB_SPEED_EDR = 32,
- IB_SPEED_HDR = 64
+ IB_SPEED_HDR = 64,
+ IB_SPEED_NDR = 128,
+};
+
+enum ib_stat_flag {
+ IB_STAT_FLAG_OPTIONAL = 1 << 0,
+};
+
+/**
+ * struct rdma_stat_desc
+ * @name - The name of the counter
+ * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
+ * @priv - Driver private information; Core code should not use
+ */
+struct rdma_stat_desc {
+ const char *name;
+ unsigned int flags;
+ const void *priv;
};
/**
* struct rdma_hw_stats
* @lock - Mutex to protect parallel write access to lifespan and values
- * of counters, which are 64bits and not guaranteeed to be written
+ * of counters, which are 64bits and not guaranteed to be written
* atomicaly on 32bits systems.
* @timestamp - Used by the core code to track when the last update was
* @lifespan - Used by the core code to determine how old the counters
* should be before being updated again. Stored in jiffies, defaults
* to 10 milliseconds, drivers can override the default be specifying
* their own value during their allocation routine.
- * @name - Array of pointers to static names used for the counters in
- * directory.
+ * @descs - Array of pointers to static descriptors used for the counters
+ * in directory.
+ * @is_disabled - A bitmap to indicate each counter is currently disabled
+ * or not.
* @num_counters - How many hardware counters there are. If name is
* shorter than this number, a kernel oops will result. Driver authors
* are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
@@ -560,36 +600,19 @@ struct rdma_hw_stats {
struct mutex lock; /* Protect lifespan and values[] */
unsigned long timestamp;
unsigned long lifespan;
- const char * const *names;
+ const struct rdma_stat_desc *descs;
+ unsigned long *is_disabled;
int num_counters;
u64 value[];
};
#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
-/**
- * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
- * for drivers.
- * @names - Array of static const char *
- * @num_counters - How many elements in array
- * @lifespan - How many milliseconds between updates
- */
-static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
- const char * const *names, int num_counters,
- unsigned long lifespan)
-{
- struct rdma_hw_stats *stats;
- stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
- GFP_KERNEL);
- if (!stats)
- return NULL;
- stats->names = names;
- stats->num_counters = num_counters;
- stats->lifespan = msecs_to_jiffies(lifespan);
-
- return stats;
-}
+struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
+ const struct rdma_stat_desc *descs, int num_counters,
+ unsigned long lifespan);
+void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
/* Define bits for the various functionality this port needs to be supported by
* the core.
@@ -651,6 +674,7 @@ struct ib_port_attr {
enum ib_port_state state;
enum ib_mtu max_mtu;
enum ib_mtu active_mtu;
+ u32 phys_mtu;
int gid_tbl_len;
unsigned int ip_gids:1;
/* This is the value from PortInfo CapabilityMask, defined by IBA */
@@ -667,7 +691,7 @@ struct ib_port_attr {
u8 subnet_timeout;
u8 init_type_reply;
u8 active_width;
- u8 active_speed;
+ u16 active_speed;
u8 phys_state;
u16 port_cap_flags2;
};
@@ -729,7 +753,7 @@ struct ib_event {
struct ib_qp *qp;
struct ib_srq *srq;
struct ib_wq *wq;
- u8 port_num;
+ u32 port_num;
} element;
enum ib_event_type event;
};
@@ -880,6 +904,12 @@ struct ib_mr_status {
*/
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
+struct rdma_ah_init_attr {
+ struct rdma_ah_attr *ah_attr;
+ u32 flags;
+ struct net_device *xmit_slave;
+};
+
enum rdma_ah_attr_type {
RDMA_AH_ATTR_TYPE_UNDEFINED,
RDMA_AH_ATTR_TYPE_IB,
@@ -906,7 +936,7 @@ struct rdma_ah_attr {
struct ib_global_route grh;
u8 sl;
u8 static_rate;
- u8 port_num;
+ u32 port_num;
u8 ah_flags;
enum rdma_ah_attr_type type;
union {
@@ -944,13 +974,14 @@ enum ib_wc_status {
const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
enum ib_wc_opcode {
- IB_WC_SEND,
- IB_WC_RDMA_WRITE,
- IB_WC_RDMA_READ,
- IB_WC_COMP_SWAP,
- IB_WC_FETCH_ADD,
- IB_WC_LSO,
- IB_WC_LOCAL_INV,
+ IB_WC_SEND = IB_UVERBS_WC_SEND,
+ IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
+ IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
+ IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
+ IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
+ IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
+ IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
+ IB_WC_LSO = IB_UVERBS_WC_TSO,
IB_WC_REG_MR,
IB_WC_MASKED_COMP_SWAP,
IB_WC_MASKED_FETCH_ADD,
@@ -992,7 +1023,7 @@ struct ib_wc {
u16 pkey_index;
u8 sl;
u8 dlid_path_bits;
- u8 port_num; /* valid only for DR SMPs on switches */
+ u32 port_num; /* valid only for DR SMPs on switches */
u8 smac[ETH_ALEN];
u16 vlan_id;
u8 network_hdr_type;
@@ -1006,9 +1037,9 @@ enum ib_cq_notify_flags {
};
enum ib_srq_type {
- IB_SRQT_BASIC,
- IB_SRQT_XRC,
- IB_SRQT_TM,
+ IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
+ IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
+ IB_SRQT_TM = IB_UVERBS_SRQT_TM,
};
static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
@@ -1077,16 +1108,16 @@ enum ib_qp_type {
IB_QPT_SMI,
IB_QPT_GSI,
- IB_QPT_RC,
- IB_QPT_UC,
- IB_QPT_UD,
+ IB_QPT_RC = IB_UVERBS_QPT_RC,
+ IB_QPT_UC = IB_UVERBS_QPT_UC,
+ IB_QPT_UD = IB_UVERBS_QPT_UD,
IB_QPT_RAW_IPV6,
IB_QPT_RAW_ETHERTYPE,
- IB_QPT_RAW_PACKET = 8,
- IB_QPT_XRC_INI = 9,
- IB_QPT_XRC_TGT,
+ IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
+ IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
+ IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
IB_QPT_MAX,
- IB_QPT_DRIVER = 0xFF,
+ IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
/* Reserve a range for qp types internal to the low level driver.
* These qp types will not be visible at the IB core layer, so the
* IB_QPT_MAX usages should not be affected in the core layer
@@ -1105,17 +1136,21 @@ enum ib_qp_type {
enum ib_qp_create_flags {
IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
- IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
+ IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
IB_QP_CREATE_MANAGED_SEND = 1 << 3,
IB_QP_CREATE_MANAGED_RECV = 1 << 4,
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
- /* FREE = 1 << 7, */
- IB_QP_CREATE_SCATTER_FCS = 1 << 8,
- IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
+ IB_QP_CREATE_NETDEV_USE = 1 << 7,
+ IB_QP_CREATE_SCATTER_FCS =
+ IB_UVERBS_QP_CREATE_SCATTER_FCS,
+ IB_QP_CREATE_CVLAN_STRIPPING =
+ IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
IB_QP_CREATE_SOURCE_QPN = 1 << 10,
- IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING =
+ IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -1143,7 +1178,7 @@ struct ib_qp_init_attr {
/*
* Only needed for special QP types, or when using the RW API.
*/
- u8 port_num;
+ u32 port_num;
struct ib_rwq_ind_table *rwq_ind_tbl;
u32 source_qpn;
};
@@ -1217,6 +1252,8 @@ enum ib_qp_attr_mask {
IB_QP_RESERVED3 = (1<<23),
IB_QP_RESERVED4 = (1<<24),
IB_QP_RATE_LIMIT = (1<<25),
+
+ IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
};
enum ib_qp_state {
@@ -1260,13 +1297,14 @@ struct ib_qp_attr {
u8 max_rd_atomic;
u8 max_dest_rd_atomic;
u8 min_rnr_timer;
- u8 port_num;
+ u32 port_num;
u8 timeout;
u8 retry_cnt;
u8 rnr_retry;
- u8 alt_port_num;
+ u32 alt_port_num;
u8 alt_timeout;
u32 rate_limit;
+ struct net_device *xmit_slave;
};
enum ib_wr_opcode {
@@ -1278,6 +1316,7 @@ enum ib_wr_opcode {
IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
+ IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
IB_WR_LSO = IB_UVERBS_WR_TSO,
IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
@@ -1379,7 +1418,7 @@ struct ib_ud_wr {
u32 remote_qpn;
u32 remote_qkey;
u16 pkey_index; /* valid for GSI only */
- u8 port_num; /* valid for DR SMPs on switch only */
+ u32 port_num; /* valid for DR SMPs on switch only */
};
static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
@@ -1436,12 +1475,6 @@ enum ib_mr_rereg_flags {
IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
};
-struct ib_fmr_attr {
- int max_pages;
- int max_maps;
- u8 page_shift;
-};
-
struct ib_umem;
enum rdma_remove_reason {
@@ -1456,6 +1489,8 @@ enum rdma_remove_reason {
RDMA_REMOVE_DRIVER_REMOVE,
/* uobj is being cleaned-up before being committed */
RDMA_REMOVE_ABORT,
+ /* The driver failed to destroy the uobject and is being disconnected */
+ RDMA_REMOVE_DRIVER_FAILURE,
};
struct ib_rdmacg_object {
@@ -1467,14 +1502,6 @@ struct ib_rdmacg_object {
struct ib_ucontext {
struct ib_device *device;
struct ib_uverbs_file *ufile;
- /*
- * 'closing' can be read by the driver only during a destroy callback,
- * it is set when we are closing the file descriptor and indicates
- * that mm_sem may be locked.
- */
- bool closing;
-
- bool cleanup_retryable;
struct ib_rdmacg_object cg_obj;
/*
@@ -1528,9 +1555,8 @@ struct ib_xrcd {
struct ib_device *device;
atomic_t usecnt; /* count all exposed resources */
struct inode *inode;
-
- struct mutex tgt_qp_mutex;
- struct list_head tgt_qp_list;
+ struct rw_semaphore tgt_qps_rwsem;
+ struct xarray tgt_qps;
};
struct ib_ah {
@@ -1544,10 +1570,12 @@ struct ib_ah {
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
enum ib_poll_context {
- IB_POLL_DIRECT, /* caller context, no hw completions */
IB_POLL_SOFTIRQ, /* poll from softirq context */
IB_POLL_WORKQUEUE, /* poll from workqueue */
IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
+ IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
+
+ IB_POLL_DIRECT, /* caller context, no hw completions */
};
struct ib_cq {
@@ -1557,9 +1585,11 @@ struct ib_cq {
void (*event_handler)(struct ib_event *, void *);
void *cq_context;
int cqe;
+ unsigned int cqe_used;
atomic_t usecnt; /* count number of work queues */
enum ib_poll_context poll_ctx;
struct ib_wc *wc;
+ struct list_head pool_entry;
union {
struct irq_poll iop;
struct work_struct work;
@@ -1569,7 +1599,9 @@ struct ib_cq {
/* updated only by trace points */
ktime_t timestamp;
- bool interrupt;
+ u8 interrupt:1;
+ u8 shared:1;
+ unsigned int comp_vector;
/*
* Implementation details of the RDMA core, don't use in drivers:
@@ -1595,26 +1627,35 @@ struct ib_srq {
} xrc;
};
} ext;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
enum ib_raw_packet_caps {
- /* Strip cvlan from incoming packet and report it in the matching work
+ /*
+ * Strip cvlan from incoming packet and report it in the matching work
* completion is supported.
*/
- IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
- /* Scatter FCS field of an incoming packet to host memory is supported.
+ IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
+ IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
+ /*
+ * Scatter FCS field of an incoming packet to host memory is supported.
*/
- IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
+ IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
/* Checksum offloads are supported (for both send and receive). */
- IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
- /* When a packet is received for an RQ with no receive WQEs, the
+ IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
+ /*
+ * When a packet is received for an RQ with no receive WQEs, the
* packet processing is delayed.
*/
- IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
+ IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
};
enum ib_wq_type {
- IB_WQT_RQ
+ IB_WQT_RQ = IB_UVERBS_WQT_RQ,
};
enum ib_wq_state {
@@ -1637,10 +1678,11 @@ struct ib_wq {
};
enum ib_wq_flags {
- IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
- IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
- IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
- IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
+ IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
+ IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
+ IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
+ IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
+ IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
};
struct ib_wq_init_attr {
@@ -1692,7 +1734,7 @@ struct ib_qp_security;
struct ib_port_pkey {
enum port_pkey_state state;
u16 pkey_index;
- u8 port_num;
+ u32 port_num;
struct list_head qp_list;
struct list_head to_error_list;
struct ib_qp_security *sec;
@@ -1753,7 +1795,7 @@ struct ib_qp {
enum ib_qp_type qp_type;
struct ib_rwq_ind_table *rwq_ind_tbl;
struct ib_qp_security *qp_sec;
- u8 port;
+ u32 port;
bool integrity_en;
/*
@@ -1804,14 +1846,6 @@ struct ib_mw {
enum ib_mw_type type;
};
-struct ib_fmr {
- struct ib_device *device;
- struct ib_pd *pd;
- struct list_head list;
- u32 lkey;
- u32 rkey;
-};
-
/* Supported steering options */
enum ib_flow_attr_type {
/* steering according to rule specifications */
@@ -1853,17 +1887,6 @@ enum ib_flow_spec_type {
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
-/* Flow steering rule priority is set according to it's domain.
- * Lower domain value means higher priority.
- */
-enum ib_flow_domain {
- IB_FLOW_DOMAIN_USER,
- IB_FLOW_DOMAIN_ETHTOOL,
- IB_FLOW_DOMAIN_RFS,
- IB_FLOW_DOMAIN_NIC,
- IB_FLOW_DOMAIN_NUM /* Must be last */
-};
-
enum ib_flow_flags {
IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
@@ -1876,7 +1899,7 @@ struct ib_flow_eth_filter {
__be16 ether_type;
__be16 vlan_tag;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_eth {
@@ -1890,7 +1913,7 @@ struct ib_flow_ib_filter {
__be16 dlid;
__u8 sl;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_ib {
@@ -1915,7 +1938,7 @@ struct ib_flow_ipv4_filter {
u8 ttl;
u8 flags;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_ipv4 {
@@ -1933,7 +1956,7 @@ struct ib_flow_ipv6_filter {
u8 traffic_class;
u8 hop_limit;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_ipv6 {
@@ -1947,7 +1970,7 @@ struct ib_flow_tcp_udp_filter {
__be16 dst_port;
__be16 src_port;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_tcp_udp {
@@ -1959,7 +1982,7 @@ struct ib_flow_spec_tcp_udp {
struct ib_flow_tunnel_filter {
__be32 tunnel_id;
- u8 real_sz[0];
+ u8 real_sz[];
};
/* ib_flow_spec_tunnel describes the Vxlan tunnel
@@ -1976,7 +1999,7 @@ struct ib_flow_esp_filter {
__be32 spi;
__be32 seq;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_esp {
@@ -1991,7 +2014,7 @@ struct ib_flow_gre_filter {
__be16 protocol;
__be32 key;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_gre {
@@ -2004,7 +2027,7 @@ struct ib_flow_spec_gre {
struct ib_flow_mpls_filter {
__be32 tag;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_mpls {
@@ -2068,7 +2091,7 @@ struct ib_flow_attr {
u16 priority;
u32 flags;
u8 num_of_specs;
- u8 port;
+ u32 port;
union ib_flow_spec flows[];
};
@@ -2137,7 +2160,6 @@ struct ib_flow_action {
};
struct ib_mad;
-struct ib_grh;
enum ib_process_mad_flags {
IB_MAD_IGNORE_MKEY = 1,
@@ -2173,15 +2195,17 @@ struct ib_port_data {
struct ib_port_immutable immutable;
spinlock_t pkey_list_lock;
+
+ spinlock_t netdev_lock;
+
struct list_head pkey_list;
struct ib_port_cache cache;
- spinlock_t netdev_lock;
struct net_device __rcu *netdev;
struct hlist_node ndev_hash_link;
struct rdma_port_counter port_counter;
- struct rdma_hw_stats *hw_stats;
+ struct ib_port *sysfs;
};
/* rdma netdev type - specifies protocol type */
@@ -2197,7 +2221,8 @@ enum rdma_netdev_t {
struct rdma_netdev {
void *clnt_priv;
struct ib_device *hca;
- u8 port_num;
+ u32 port_num;
+ int mtu;
/*
* cleanup function must be specified.
@@ -2217,6 +2242,8 @@ struct rdma_netdev {
int set_qkey, u32 qkey);
int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
union ib_gid *gid, u16 mlid);
+ /* timeout */
+ void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
};
struct rdma_netdev_alloc_params {
@@ -2225,13 +2252,14 @@ struct rdma_netdev_alloc_params {
unsigned int rxqs;
void *param;
- int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
+ int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
struct net_device *netdev, void *param);
};
struct ib_odp_counters {
atomic64_t faults;
atomic64_t invalidations;
+ atomic64_t prefetch;
};
struct ib_counters {
@@ -2259,8 +2287,13 @@ struct iw_cm_conn_param;
!__same_type(((struct drv_struct *)NULL)->member, \
struct ib_struct)))
-#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
- ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
+#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
+ ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
+ gfp, false))
+
+#define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \
+ ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
+ GFP_KERNEL, true))
#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
@@ -2293,6 +2326,14 @@ struct ib_device_ops {
u32 uverbs_abi_ver;
unsigned int uverbs_no_driver_id_binding:1;
+ /*
+ * NOTE: New drivers should not make use of device_group; instead new
+ * device parameter should be exposed via netlink command. This
+ * mechanism exists only for existing drivers.
+ */
+ const struct attribute_group *device_group;
+ const struct attribute_group **port_groups;
+
int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
const struct ib_send_wr **bad_send_wr);
int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
@@ -2302,12 +2343,11 @@ struct ib_device_ops {
int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
- int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
int (*post_srq_recv)(struct ib_srq *srq,
const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int (*process_mad)(struct ib_device *device, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad,
size_t *out_mad_size, u16 *out_mad_pkey_index);
@@ -2319,9 +2359,9 @@ struct ib_device_ops {
void (*get_dev_fw_str)(struct ib_device *device, char *str);
const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
int comp_vector);
- int (*query_port)(struct ib_device *device, u8 port_num,
+ int (*query_port)(struct ib_device *device, u32 port_num,
struct ib_port_attr *port_attr);
- int (*modify_port)(struct ib_device *device, u8 port_num,
+ int (*modify_port)(struct ib_device *device, u32 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
/**
@@ -2330,10 +2370,10 @@ struct ib_device_ops {
* structure to avoid cache line misses when accessing struct ib_device
* in fast paths.
*/
- int (*get_port_immutable)(struct ib_device *device, u8 port_num,
+ int (*get_port_immutable)(struct ib_device *device, u32 port_num,
struct ib_port_immutable *immutable);
enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
/**
* When calling get_netdev, the HW vendor's driver should return the
* net device of device @device at port @port_num or NULL if such
@@ -2342,7 +2382,8 @@ struct ib_device_ops {
* that this function returns NULL before the net device has finished
* NETDEV_UNREGISTER state.
*/
- struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
+ struct net_device *(*get_netdev)(struct ib_device *device,
+ u32 port_num);
/**
* rdma netdev operation
*
@@ -2350,11 +2391,11 @@ struct ib_device_ops {
* must return -EOPNOTSUPP if it doesn't support the specified type.
*/
struct net_device *(*alloc_rdma_netdev)(
- struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
+ struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
const char *name, unsigned char name_assign_type,
void (*setup)(struct net_device *));
- int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
+ int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params);
/**
@@ -2362,7 +2403,7 @@ struct ib_device_ops {
* link layer is either IB or iWarp. It is no-op if @port_num port
* is RoCE link layer.
*/
- int (*query_gid)(struct ib_device *device, u8 port_num, int index,
+ int (*query_gid)(struct ib_device *device, u32 port_num, int index,
union ib_gid *gid);
/**
* When calling add_gid, the HW vendor's driver should add the gid
@@ -2387,7 +2428,7 @@ struct ib_device_ops {
* This function is only called when roce_gid_table is used.
*/
int (*del_gid)(const struct ib_gid_attr *attr, void **context);
- int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
+ int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
u16 *pkey);
int (*alloc_ucontext)(struct ib_ucontext *context,
struct ib_udata *udata);
@@ -2402,12 +2443,14 @@ struct ib_device_ops {
void (*mmap_free)(struct rdma_user_mmap_entry *entry);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
- void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
- int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
+ int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
+ int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata);
+ int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata);
int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
- void (*destroy_ah)(struct ib_ah *ah, u32 flags);
+ int (*destroy_ah)(struct ib_ah *ah, u32 flags);
int (*create_srq)(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
@@ -2415,10 +2458,9 @@ struct ib_device_ops {
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
- void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
- struct ib_qp *(*create_qp)(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata);
+ int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
+ int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata);
int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
@@ -2427,18 +2469,23 @@ struct ib_device_ops {
int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
- void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
+ int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
- int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_pd *pd, struct ib_udata *udata);
+ struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr, int fd,
+ int mr_access_flags,
+ struct ib_udata *udata);
+ struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata);
+ u32 max_num_sg);
struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
u32 max_num_data_sg,
u32 max_num_meta_sg);
@@ -2446,58 +2493,49 @@ struct ib_device_ops {
enum ib_uverbs_advise_mr_advice advice, u32 flags,
struct ib_sge *sg_list, u32 num_sge,
struct uverbs_attr_bundle *attrs);
+
+ /*
+ * Kernel users should universally support relaxed ordering (RO), as
+ * they are designed to read data only after observing the CQE and use
+ * the DMA API correctly.
+ *
+ * Some drivers implicitly enable RO if platform supports it.
+ */
int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
- struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata);
+ int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
int (*dealloc_mw)(struct ib_mw *mw);
- struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
- int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
- u64 iova);
- int (*unmap_fmr)(struct list_head *fmr_list);
- int (*dealloc_fmr)(struct ib_fmr *fmr);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
- struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
- struct ib_udata *udata);
+ int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
struct ib_flow *(*create_flow)(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
- int domain, struct ib_udata *udata);
+ struct ib_udata *udata);
int (*destroy_flow)(struct ib_flow *flow_id);
- struct ib_flow_action *(*create_flow_action_esp)(
- struct ib_device *device,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs);
int (*destroy_flow_action)(struct ib_flow_action *action);
- int (*modify_flow_action_esp)(
- struct ib_flow_action *action,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs);
- int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
+ int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
int state);
- int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
+ int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *ivf);
- int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
+ int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
struct ifla_vf_stats *stats);
- int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
+ int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
- int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
+ int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
int type);
struct ib_wq *(*create_wq)(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
- void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
+ int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask, struct ib_udata *udata);
- struct ib_rwq_ind_table *(*create_rwq_ind_table)(
- struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata);
+ int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
struct ib_dm *(*alloc_dm)(struct ib_device *device,
struct ib_ucontext *context,
@@ -2507,8 +2545,8 @@ struct ib_device_ops {
struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
- struct ib_counters *(*create_counters)(
- struct ib_device *device, struct uverbs_attr_bundle *attrs);
+ int (*create_counters)(struct ib_counters *counters,
+ struct uverbs_attr_bundle *attrs);
int (*destroy_counters)(struct ib_counters *counters);
int (*read_counters)(struct ib_counters *counters,
struct ib_counters_read_attr *counters_read_attr,
@@ -2519,13 +2557,14 @@ struct ib_device_ops {
unsigned int *meta_sg_offset);
/**
- * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
- * driver initialized data. The struct is kfree()'ed by the sysfs
- * core when the device is removed. A lifespan of -1 in the return
- * struct tells the core to set a default lifespan.
+ * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
+ * fill in the driver initialized data. The struct is kfree()'ed by
+ * the sysfs core when the device is removed. A lifespan of -1 in the
+ * return struct tells the core to set a default lifespan.
*/
- struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
- u8 port_num);
+ struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
+ struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
+ u32 port_num);
/**
* get_hw_stats - Fill in the counter value(s) in the stats struct.
* @index - The index in the value array we wish to have updated, or
@@ -2539,18 +2578,25 @@ struct ib_device_ops {
* one given in index at their option
*/
int (*get_hw_stats)(struct ib_device *device,
- struct rdma_hw_stats *stats, u8 port, int index);
- /*
- * This function is called once for each port when a ib device is
- * registered.
+ struct rdma_hw_stats *stats, u32 port, int index);
+
+ /**
+ * modify_hw_stat - Modify the counter configuration
+ * @enable: true/false when enable/disable a counter
+ * Return codes - 0 on success or error code otherwise.
*/
- int (*init_port)(struct ib_device *device, u8 port_num,
- struct kobject *port_sysfs);
+ int (*modify_hw_stat)(struct ib_device *device, u32 port,
+ unsigned int counter_index, bool enable);
/**
* Allows rdma drivers to add their own restrack attributes.
*/
- int (*fill_res_entry)(struct sk_buff *msg,
- struct rdma_restrack_entry *entry);
+ int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
+ int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
+ int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
+ int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
+ int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
+ int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
+ int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
/* Device lifecycle callbacks */
/*
@@ -2605,14 +2651,28 @@ struct ib_device_ops {
* Allows rdma drivers to add their own restrack attributes
* dumped via 'rdma stat' iproute2 command.
*/
- int (*fill_stat_entry)(struct sk_buff *msg,
- struct rdma_restrack_entry *entry);
+ int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
+
+ /* query driver for its ucontext properties */
+ int (*query_ucontext)(struct ib_ucontext *context,
+ struct uverbs_attr_bundle *attrs);
+
+ /*
+ * Provide NUMA node. This API exists for rdmavt/hfi1 only.
+ * Everyone else relies on Linux memory management model.
+ */
+ int (*get_numa_node)(struct ib_device *dev);
DECLARE_RDMA_OBJ_SIZE(ib_ah);
+ DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
+ DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
+ DECLARE_RDMA_OBJ_SIZE(ib_qp);
+ DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
+ DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
};
struct ib_core_device {
@@ -2659,14 +2719,14 @@ struct ib_device {
struct ib_core_device coredev;
};
- /* First group for device attributes,
- * Second group for driver provided attributes (optional).
- * It is NULL terminated array.
+ /* First group is for device attributes,
+ * Second group is for driver provided attributes (optional).
+ * Third group is for the hw_stats
+ * It is a NULL terminated array.
*/
- const struct attribute_group *groups[3];
+ const struct attribute_group *groups[4];
u64 uverbs_cmd_mask;
- u64 uverbs_ex_cmd_mask;
char node_desc[IB_DEVICE_NODE_DESC_MAX];
__be64 node_guid;
@@ -2677,16 +2737,19 @@ struct ib_device {
/* CQ adaptive moderation (RDMA DIM) */
u16 use_cq_dim:1;
u8 node_type;
- u8 phys_port_cnt;
+ u32 phys_port_cnt;
struct ib_device_attr attrs;
- struct attribute_group *hw_stats_ag;
- struct rdma_hw_stats *hw_stats;
+ struct hw_stats_device_data *hw_stats_data;
#ifdef CONFIG_CGROUP_RDMA
struct rdmacg_device cg_device;
#endif
u32 index;
+
+ spinlock_t cq_pools_lock;
+ struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
+
struct rdma_restrack_root *res;
const struct uapi_definition *driver_def;
@@ -2709,12 +2772,22 @@ struct ib_device {
/* Used by iWarp CM */
char iw_ifname[IFNAMSIZ];
u32 iw_driver_flags;
+ u32 lag_flags;
};
+static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
+ gfp_t gfp, bool is_numa_aware)
+{
+ if (is_numa_aware && dev->ops.get_numa_node)
+ return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
+
+ return kzalloc(size, gfp);
+}
+
struct ib_client_nl_info;
struct ib_client {
const char *name;
- void (*add) (struct ib_device *);
+ int (*add)(struct ib_device *ibdev);
void (*remove)(struct ib_device *, void *client_data);
void (*rename)(struct ib_device *dev, void *client_data);
int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
@@ -2738,7 +2811,7 @@ struct ib_client {
* netdev. */
struct net_device *(*get_net_dev_by_params)(
struct ib_device *dev,
- u8 port,
+ u32 port,
u16 pkey,
const union ib_gid *gid,
const struct sockaddr *addr,
@@ -2778,7 +2851,8 @@ void ib_dealloc_device(struct ib_device *device);
void ib_get_device_fw_str(struct ib_device *device, char *str);
-int ib_register_device(struct ib_device *device, const char *name);
+int ib_register_device(struct ib_device *device, const char *name,
+ struct device *dma_device);
void ib_unregister_device(struct ib_device *device);
void ib_unregister_driver(enum rdma_driver_id driver_id);
void ib_unregister_device_and_put(struct ib_device *device);
@@ -2850,6 +2924,15 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
size_t length, u32 min_pgoff,
u32 max_pgoff);
+static inline int
+rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length, u32 pgoff)
+{
+ return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
+ pgoff);
+}
+
struct rdma_user_mmap_entry *
rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
unsigned long pgoff);
@@ -2896,46 +2979,6 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
}
/**
- * ib_is_destroy_retryable - Check whether the uobject destruction
- * is retryable.
- * @ret: The initial destruction return code
- * @why: remove reason
- * @uobj: The uobject that is destroyed
- *
- * This function is a helper function that IB layer and low-level drivers
- * can use to consider whether the destruction of the given uobject is
- * retry-able.
- * It checks the original return code, if it wasn't success the destruction
- * is retryable according to the ucontext state (i.e. cleanup_retryable) and
- * the remove reason. (i.e. why).
- * Must be called with the object locked for destroy.
- */
-static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
- struct ib_uobject *uobj)
-{
- return ret && (why == RDMA_REMOVE_DESTROY ||
- uobj->context->cleanup_retryable);
-}
-
-/**
- * ib_destroy_usecnt - Called during destruction to check the usecnt
- * @usecnt: The usecnt atomic
- * @why: remove reason
- * @uobj: The uobject that is destroyed
- *
- * Non-zero usecnts will block destruction unless destruction was triggered by
- * a ucontext cleanup.
- */
-static inline int ib_destroy_usecnt(atomic_t *usecnt,
- enum rdma_remove_reason why,
- struct ib_uobject *uobj)
-{
- if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
- return -EBUSY;
- return 0;
-}
-
-/**
* ib_modify_qp_is_ok - Check that the supplied attribute mask
* contains all required attributes and no attributes not allowed for
* the given QP state transition.
@@ -2958,10 +3001,10 @@ void ib_unregister_event_handler(struct ib_event_handler *event_handler);
void ib_dispatch_event(const struct ib_event *event);
int ib_query_port(struct ib_device *device,
- u8 port_num, struct ib_port_attr *port_attr);
+ u32 port_num, struct ib_port_attr *port_attr);
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
/**
* rdma_cap_ib_switch - Check if the device is IB switch
@@ -2985,7 +3028,7 @@ static inline bool rdma_cap_ib_switch(const struct ib_device *device)
*
* Return start port number
*/
-static inline u8 rdma_start_port(const struct ib_device *device)
+static inline u32 rdma_start_port(const struct ib_device *device)
{
return rdma_cap_ib_switch(device) ? 0 : 1;
}
@@ -2996,9 +3039,10 @@ static inline u8 rdma_start_port(const struct ib_device *device)
* @iter - The unsigned int to store the port number
*/
#define rdma_for_each_port(device, iter) \
- for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
- unsigned int, iter))); \
- iter <= rdma_end_port(device); (iter)++)
+ for (iter = rdma_start_port(device + \
+ BUILD_BUG_ON_ZERO(!__same_type(u32, \
+ iter))); \
+ iter <= rdma_end_port(device); iter++)
/**
* rdma_end_port - Return the last valid port number for the device
@@ -3008,7 +3052,7 @@ static inline u8 rdma_start_port(const struct ib_device *device)
*
* Return last port number
*/
-static inline u8 rdma_end_port(const struct ib_device *device)
+static inline u32 rdma_end_port(const struct ib_device *device)
{
return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
}
@@ -3021,55 +3065,63 @@ static inline int rdma_is_port_valid(const struct ib_device *device,
}
static inline bool rdma_is_grh_required(const struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_PORT_IB_GRH_REQUIRED;
}
-static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_ib(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_IB;
}
-static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
(RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
}
-static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
}
-static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_ROCE;
}
-static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_iwarp(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_IWARP;
}
-static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
+static inline bool rdma_ib_or_roce(const struct ib_device *device,
+ u32 port_num)
{
return rdma_protocol_ib(device, port_num) ||
rdma_protocol_roce(device, port_num);
}
-static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_RAW_PACKET;
}
-static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_usnic(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_USNIC;
@@ -3087,7 +3139,7 @@ static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_n
*
* Return: true if the port supports sending/receiving of MAD packets.
*/
-static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_MAD;
@@ -3112,7 +3164,7 @@ static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
*
* Return: true if the port supports OPA MAD packet formats.
*/
-static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_OPA_MAD;
@@ -3138,7 +3190,7 @@ static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
*
* Return: true if the port provides an SMI.
*/
-static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_SMI;
@@ -3159,7 +3211,7 @@ static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
* Return: true if the port supports an IB CM (this does not guarantee that
* a CM is actually running however).
*/
-static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_CM;
@@ -3177,7 +3229,7 @@ static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
* Return: true if the port supports an iWARP CM (this does not guarantee that
* a CM is actually running however).
*/
-static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IW_CM;
@@ -3198,7 +3250,7 @@ static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
* Administration interface. This does not imply that the SA service is
* running locally.
*/
-static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_SA;
@@ -3221,7 +3273,8 @@ static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
* overhead of registering/unregistering with the SM and tracking of the
* total number of queue pairs attached to the multicast group.
*/
-static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
+ u32 port_num)
{
return rdma_cap_ib_sa(device, port_num);
}
@@ -3239,7 +3292,7 @@ static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num
* Return: true if the port uses a GID address to identify devices on the
* network.
*/
-static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_AF_IB;
@@ -3261,7 +3314,7 @@ static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
* addition of a Global Route Header built from our Ethernet Address
* Handle into our header list for connectionless packets.
*/
-static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_ETH_AH;
@@ -3276,7 +3329,7 @@ static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
* Return: true if we are running on an OPA device which supports
* the extended OPA addressing.
*/
-static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
{
return (device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
@@ -3294,7 +3347,8 @@ static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
* Return the max MAD size required by the Port. Will return 0 if the port
* does not support MADs
*/
-static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
+static inline size_t rdma_max_mad_size(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.max_mad_size;
}
@@ -3313,7 +3367,7 @@ static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_n
* its GIDs.
*/
static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return rdma_protocol_roce(device, port_num) &&
device->ops.add_gid && device->ops.del_gid;
@@ -3332,57 +3386,82 @@ static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
}
/**
- * rdma_find_pg_bit - Find page bit given address and HW supported page sizes
+ * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
+ * @device: Device
+ * @port_num: 1 based Port number
*
- * @addr: address
- * @pgsz_bitmap: bitmap of HW supported page sizes
+ * Return true if port is an Intel OPA port , false if not
*/
-static inline unsigned int rdma_find_pg_bit(unsigned long addr,
- unsigned long pgsz_bitmap)
+static inline bool rdma_core_cap_opa_port(struct ib_device *device,
+ u32 port_num)
{
- unsigned long align;
- unsigned long pgsz;
-
- align = addr & -addr;
+ return (device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
+}
- /* Find page bit such that addr is aligned to the highest supported
- * HW page size
- */
- pgsz = pgsz_bitmap & ~(-align << 1);
- if (!pgsz)
- return __ffs(pgsz_bitmap);
+/**
+ * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
+ * @device: Device
+ * @port_num: Port number
+ * @mtu: enum value of MTU
+ *
+ * Return the MTU size supported by the port as an integer value. Will return
+ * -1 if enum value of mtu is not supported.
+ */
+static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
+ int mtu)
+{
+ if (rdma_core_cap_opa_port(device, port))
+ return opa_mtu_enum_to_int((enum opa_mtu)mtu);
+ else
+ return ib_mtu_enum_to_int((enum ib_mtu)mtu);
+}
- return __fls(pgsz);
+/**
+ * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
+ * @device: Device
+ * @port_num: Port number
+ * @attr: port attribute
+ *
+ * Return the MTU size supported by the port as an integer value.
+ */
+static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
+ struct ib_port_attr *attr)
+{
+ if (rdma_core_cap_opa_port(device, port))
+ return attr->phys_mtu;
+ else
+ return ib_mtu_enum_to_int(attr->max_mtu);
}
-int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
int state);
-int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *info);
-int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
struct ifla_vf_stats *stats);
-int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
-int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
int type);
int ib_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey);
+ u32 port_num, u16 index, u16 *pkey);
int ib_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify);
int ib_modify_port(struct ib_device *device,
- u8 port_num, int port_modify_mask,
+ u32 port_num, int port_modify_mask,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- u8 *port_num, u16 *index);
+ u32 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
- u8 port_num, u16 pkey, u16 *index);
+ u32 port_num, u16 pkey, u16 *index);
enum ib_pd_flags {
/*
@@ -3400,15 +3479,21 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller);
+/**
+ * ib_alloc_pd - Allocates an unused protection domain.
+ * @device: The device on which to allocate the protection domain.
+ * @flags: protection domain flags
+ *
+ * A protection domain object provides an association between QPs, shared
+ * receive queues, address handles, memory regions, and memory windows.
+ *
+ * Every PD has a local_dma_lkey which can be used as the lkey value for local
+ * memory operations.
+ */
#define ib_alloc_pd(device, flags) \
__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
-/**
- * ib_dealloc_pd_user - Deallocate kernel/user PD
- * @pd: The protection domain
- * @udata: Valid user data or NULL for kernel objects
- */
-void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
+int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
/**
* ib_dealloc_pd - Deallocate kernel PD
@@ -3418,7 +3503,9 @@ void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
*/
static inline void ib_dealloc_pd(struct ib_pd *pd)
{
- ib_dealloc_pd_user(pd, NULL);
+ int ret = ib_dealloc_pd_user(pd, NULL);
+
+ WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
}
enum rdma_create_ah_flags {
@@ -3489,7 +3576,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
* attributes which are initialized using ib_init_ah_attr_from_wc().
*
*/
-int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr);
@@ -3506,7 +3593,7 @@ int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
* in all UD QP post sends.
*/
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
- const struct ib_grh *grh, u8 port_num);
+ const struct ib_grh *grh, u32 port_num);
/**
* rdma_modify_ah - Modifies the address vector associated with an address
@@ -3546,26 +3633,25 @@ int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
*
* NOTE: for user ah use rdma_destroy_ah_user with valid udata!
*/
-static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
+static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
{
- return rdma_destroy_ah_user(ah, flags, NULL);
+ int ret = rdma_destroy_ah_user(ah, flags, NULL);
+
+ WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
}
-/**
- * ib_create_srq - Creates a SRQ associated with the specified protection
- * domain.
- * @pd: The protection domain associated with the SRQ.
- * @srq_init_attr: A list of initial attributes required to create the
- * SRQ. If SRQ creation succeeds, then the attributes are updated to
- * the actual capabilities of the created SRQ.
- *
- * srq_attr->max_wr and srq_attr->max_sge are read the determine the
- * requested size of the SRQ, and set to the actual values allocated
- * on return. If ib_create_srq() succeeds, then max_wr and max_sge
- * will always be at least as large as the requested values.
- */
-struct ib_srq *ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr);
+struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_usrq_object *uobject,
+ struct ib_udata *udata);
+static inline struct ib_srq *
+ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
+{
+ if (!pd->device->ops.create_srq)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
+}
/**
* ib_modify_srq - Modifies the attributes for the specified SRQ.
@@ -3605,9 +3691,11 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
*
* NOTE: for user srq use ib_destroy_srq_user with valid udata!
*/
-static inline int ib_destroy_srq(struct ib_srq *srq)
+static inline void ib_destroy_srq(struct ib_srq *srq)
{
- return ib_destroy_srq_user(srq, NULL);
+ int ret = ib_destroy_srq_user(srq, NULL);
+
+ WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
}
/**
@@ -3627,34 +3715,21 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
bad_recv_wr ? : &dummy);
}
+struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ const char *caller);
/**
- * ib_create_qp_user - Creates a QP associated with the specified protection
- * domain.
+ * ib_create_qp - Creates a kernel QP associated with the specific protection
+ * domain.
* @pd: The protection domain associated with the QP.
- * @qp_init_attr: A list of initial attributes required to create the
+ * @init_attr: A list of initial attributes required to create the
* QP. If QP creation succeeds, then the attributes are updated to
* the actual capabilities of the created QP.
- * @udata: Valid user data or NULL for kernel objects
- */
-struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata);
-
-/**
- * ib_create_qp - Creates a kernel QP associated with the specified protection
- * domain.
- * @pd: The protection domain associated with the QP.
- * @qp_init_attr: A list of initial attributes required to create the
- * QP. If QP creation succeeds, then the attributes are updated to
- * the actual capabilities of the created QP.
- * @udata: Valid user data or NULL for kernel objects
- *
- * NOTE: for user qp use ib_create_qp_user with valid udata!
*/
static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr)
+ struct ib_qp_init_attr *init_attr)
{
- return ib_create_qp_user(pd, qp_init_attr, NULL);
+ return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
}
/**
@@ -3778,46 +3853,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
}
-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector,
- enum ib_poll_context poll_ctx,
- const char *caller, struct ib_udata *udata);
-
-/**
- * ib_alloc_cq_user: Allocate kernel/user CQ
- * @dev: The IB device
- * @private: Private data attached to the CQE
- * @nr_cqe: Number of CQEs in the CQ
- * @comp_vector: Completion vector used for the IRQs
- * @poll_ctx: Context used for polling the CQ
- * @udata: Valid user data or NULL for kernel objects
- */
-static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
- void *private, int nr_cqe,
- int comp_vector,
- enum ib_poll_context poll_ctx,
- struct ib_udata *udata)
-{
- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
- KBUILD_MODNAME, udata);
-}
-
-/**
- * ib_alloc_cq: Allocate kernel CQ
- * @dev: The IB device
- * @private: Private data attached to the CQE
- * @nr_cqe: Number of CQEs in the CQ
- * @comp_vector: Completion vector used for the IRQs
- * @poll_ctx: Context used for polling the CQ
- *
- * NOTE: for user cq use ib_alloc_cq_user with valid udata!
- */
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
+ int comp_vector, enum ib_poll_context poll_ctx,
+ const char *caller);
static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
int nr_cqe, int comp_vector,
enum ib_poll_context poll_ctx)
{
- return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
- NULL);
+ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
+ KBUILD_MODNAME);
}
struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
@@ -3839,24 +3883,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
KBUILD_MODNAME);
}
-/**
- * ib_free_cq_user - Free kernel/user CQ
- * @cq: The CQ to free
- * @udata: Valid user data or NULL for kernel objects
- */
-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
-
-/**
- * ib_free_cq - Free kernel CQ
- * @cq: The CQ to free
- *
- * NOTE: for user cq use ib_free_cq_user with valid udata!
- */
-static inline void ib_free_cq(struct ib_cq *cq)
-{
- ib_free_cq_user(cq, NULL);
-}
-
+void ib_free_cq(struct ib_cq *cq);
int ib_process_cq_direct(struct ib_cq *cq, int budget);
/**
@@ -3914,7 +3941,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
*/
static inline void ib_destroy_cq(struct ib_cq *cq)
{
- ib_destroy_cq_user(cq, NULL);
+ int ret = ib_destroy_cq_user(cq, NULL);
+
+ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
}
/**
@@ -3968,18 +3997,31 @@ static inline int ib_req_notify_cq(struct ib_cq *cq,
return cq->device->ops.req_notify_cq(cq, flags);
}
-/**
- * ib_req_ncomp_notif - Request completion notification when there are
- * at least the specified number of unreaped completions on the CQ.
- * @cq: The CQ to generate an event for.
- * @wc_cnt: The number of unreaped completions that should be on the
- * CQ before an event is generated.
+struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
+ int comp_vector_hint,
+ enum ib_poll_context poll_ctx);
+
+void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
+
+/*
+ * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
+ * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
+ * address into the dma address.
*/
-static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
+static inline bool ib_uses_virt_dma(struct ib_device *dev)
{
- return cq->device->ops.req_ncomp_notif ?
- cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
- -ENOSYS;
+ return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
+}
+
+/*
+ * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
+ */
+static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
+{
+ if (ib_uses_virt_dma(dev))
+ return false;
+
+ return dma_pci_p2pdma_supported(dev->dma_device);
}
/**
@@ -3989,6 +4031,8 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
*/
static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
+ if (ib_uses_virt_dma(dev))
+ return 0;
return dma_mapping_error(dev->dma_device, dma_addr);
}
@@ -4003,6 +4047,8 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
+ if (ib_uses_virt_dma(dev))
+ return (uintptr_t)cpu_addr;
return dma_map_single(dev->dma_device, cpu_addr, size, direction);
}
@@ -4017,7 +4063,8 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
- dma_unmap_single(dev->dma_device, addr, size, direction);
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_single(dev->dma_device, addr, size, direction);
}
/**
@@ -4034,6 +4081,8 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
size_t size,
enum dma_data_direction direction)
{
+ if (ib_uses_virt_dma(dev))
+ return (uintptr_t)(page_address(page) + offset);
return dma_map_page(dev->dma_device, page, offset, size, direction);
}
@@ -4048,7 +4097,63 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
- dma_unmap_page(dev->dma_device, addr, size, direction);
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_page(dev->dma_device, addr, size, direction);
+}
+
+int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
+static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ if (ib_uses_virt_dma(dev))
+ return ib_dma_virt_map_sg(dev, sg, nents);
+ return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
+}
+
+static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
+}
+
+/**
+ * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
+ * @dev: The device for which the DMA addresses are to be created
+ * @sg: The sg_table object describing the buffer
+ * @direction: The direction of the DMA
+ * @attrs: Optional DMA attributes for the map operation
+ */
+static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ int nents;
+
+ if (ib_uses_virt_dma(dev)) {
+ nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
+ if (!nents)
+ return -EIO;
+ sgt->nents = nents;
+ return 0;
+ }
+ return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
+}
+
+static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
}
/**
@@ -4062,7 +4167,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
- return dma_map_sg(dev->dma_device, sg, nents, direction);
+ return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
}
/**
@@ -4076,24 +4181,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
- dma_unmap_sg(dev->dma_device, sg, nents, direction);
-}
-
-static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction,
- unsigned long dma_attrs)
-{
- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
- dma_attrs);
-}
-
-static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction,
- unsigned long dma_attrs)
-{
- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
+ ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
}
/**
@@ -4104,6 +4192,8 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
*/
static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
{
+ if (ib_uses_virt_dma(dev))
+ return UINT_MAX;
return dma_get_max_seg_size(dev->dma_device);
}
@@ -4119,7 +4209,8 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
size_t size,
enum dma_data_direction dir)
{
- dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+ if (!ib_uses_virt_dma(dev))
+ dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
}
/**
@@ -4134,36 +4225,8 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
size_t size,
enum dma_data_direction dir)
{
- dma_sync_single_for_device(dev->dma_device, addr, size, dir);
-}
-
-/**
- * ib_dma_alloc_coherent - Allocate memory and map it for DMA
- * @dev: The device for which the DMA address is requested
- * @size: The size of the region to allocate in bytes
- * @dma_handle: A pointer for returning the DMA address of the region
- * @flag: memory allocator flags
- */
-static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
- size_t size,
- dma_addr_t *dma_handle,
- gfp_t flag)
-{
- return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
-}
-
-/**
- * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
- * @dev: The device for which the DMA addresses were allocated
- * @size: The size of the region
- * @cpu_addr: the address returned by ib_dma_alloc_coherent()
- * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
- */
-static inline void ib_dma_free_coherent(struct ib_device *dev,
- size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
-{
- dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
+ if (!ib_uses_virt_dma(dev))
+ dma_sync_single_for_device(dev->dma_device, addr, size, dir);
}
/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
@@ -4199,14 +4262,8 @@ static inline int ib_dereg_mr(struct ib_mr *mr)
return ib_dereg_mr_user(mr, NULL);
}
-struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata);
-
-static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type, u32 max_num_sg)
-{
- return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
-}
+struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
u32 max_num_data_sg,
@@ -4236,45 +4293,6 @@ static inline u32 ib_inc_rkey(u32 rkey)
}
/**
- * ib_alloc_fmr - Allocates a unmapped fast memory region.
- * @pd: The protection domain associated with the unmapped region.
- * @mr_access_flags: Specifies the memory access rights.
- * @fmr_attr: Attributes of the unmapped region.
- *
- * A fast memory region must be mapped before it can be used as part of
- * a work request.
- */
-struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-
-/**
- * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
- * @fmr: The fast memory region to associate with the pages.
- * @page_list: An array of physical pages to map to the fast memory region.
- * @list_len: The number of pages in page_list.
- * @iova: The I/O virtual address to use with the mapped region.
- */
-static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
- u64 *page_list, int list_len,
- u64 iova)
-{
- return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
-}
-
-/**
- * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
- * @fmr_list: A linked list of fast memory regions to unmap.
- */
-int ib_unmap_fmr(struct list_head *fmr_list);
-
-/**
- * ib_dealloc_fmr - Deallocates a fast memory region.
- * @fmr: The fast memory region to deallocate.
- */
-int ib_dealloc_fmr(struct ib_fmr *fmr);
-
-/**
* ib_attach_mcast - Attaches the specified QP to a multicast group.
* @qp: QP to attach to the multicast group. The QP must be type
* IB_QPT_UD.
@@ -4296,23 +4314,12 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
*/
int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-/**
- * ib_alloc_xrcd - Allocates an XRC domain.
- * @device: The device on which to allocate the XRC domain.
- * @caller: Module name for kernel consumers
- */
-struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
-#define ib_alloc_xrcd(device) \
- __ib_alloc_xrcd((device), KBUILD_MODNAME)
+struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
+ struct inode *inode, struct ib_udata *udata);
+int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
-/**
- * ib_dealloc_xrcd - Deallocates an XRC domain.
- * @xrcd: The XRC domain to deallocate.
- * @udata: Valid user data or NULL for kernel object
- */
-int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
-
-static inline int ib_check_mr_access(int flags)
+static inline int ib_check_mr_access(struct ib_device *ib_dev,
+ unsigned int flags)
{
/*
* Local write permission is required if remote write or
@@ -4325,6 +4332,9 @@ static inline int ib_check_mr_access(int flags)
if (flags & ~IB_ACCESS_SUPPORTED)
return -EINVAL;
+ if (flags & IB_ACCESS_ON_DEMAND &&
+ !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
+ return -EINVAL;
return 0;
}
@@ -4380,22 +4390,16 @@ struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
enum rdma_driver_id driver_id);
struct ib_device *ib_device_get_by_name(const char *name,
enum rdma_driver_id driver_id);
-struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
+struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
-struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
+struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
-int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
-int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
- u32 wq_attr_mask);
-struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr*
- wq_ind_table_init_attr);
-int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset, unsigned int page_size);
@@ -4423,7 +4427,8 @@ void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
void ib_drain_qp(struct ib_qp *qp);
-int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
+int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
+ u8 *width);
static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
{
@@ -4491,12 +4496,12 @@ static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
return false;
}
-static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
+static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
{
attr->port_num = port_num;
}
-static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
+static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
{
return attr->port_num;
}
@@ -4594,7 +4599,7 @@ void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
* @port_num: Port number
*/
static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
- u8 port_num)
+ u32 port_num)
{
if (rdma_protocol_roce(dev, port_num))
return RDMA_AH_ATTR_TYPE_ROCE;
@@ -4609,7 +4614,7 @@ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
/**
* ib_lid_cpu16 - Return lid in 16bit CPU encoding.
- * In the current implementation the only way to get
+ * In the current implementation the only way to
* get the 32bit lid is from other sources for OPA.
* For IB, lids will always be 16bits so cast the
* value accordingly.
@@ -4666,40 +4671,18 @@ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
-struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *));
-int rdma_init_netdev(struct ib_device *device, u8 port_num,
+int rdma_init_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
struct net_device *netdev);
/**
- * rdma_set_device_sysfs_group - Set device attributes group to have
- * driver specific sysfs entries at
- * for infiniband class.
- *
- * @device: device pointer for which attributes to be created
- * @group: Pointer to group which should be added when device
- * is registered with sysfs.
- * rdma_set_device_sysfs_group() allows existing drivers to expose one
- * group per device to have sysfs attributes.
- *
- * NOTE: New drivers should not make use of this API; instead new device
- * parameter should be exposed via netlink command. This API and mechanism
- * exist only for existing drivers.
- */
-static inline void
-rdma_set_device_sysfs_group(struct ib_device *dev,
- const struct attribute_group *group)
-{
- dev->groups[1] = group;
-}
-
-/**
* rdma_device_to_ibdev - Get ib_device pointer from device pointer
*
* @device: device pointer for which ib_device pointer to retrieve
@@ -4716,16 +4699,94 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
}
/**
+ * ibdev_to_node - return the NUMA node for a given ib_device
+ * @dev: device to get the NUMA node for.
+ */
+static inline int ibdev_to_node(struct ib_device *ibdev)
+{
+ struct device *parent = ibdev->dev.parent;
+
+ if (!parent)
+ return NUMA_NO_NODE;
+ return dev_to_node(parent);
+}
+
+/**
* rdma_device_to_drv_device - Helper macro to reach back to driver's
* ib_device holder structure from device pointer.
*
* NOTE: New drivers should not make use of this API; This API is only for
* existing drivers who have exposed sysfs entries using
- * rdma_set_device_sysfs_group().
+ * ops->device_group.
*/
#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
bool rdma_dev_access_netns(const struct ib_device *device,
const struct net *net);
+
+#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
+#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
+#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
+
+/**
+ * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
+ * on the flow_label
+ *
+ * This function will convert the 20 bit flow_label input to a valid RoCE v2
+ * UDP src port 14 bit value. All RoCE V2 drivers should use this same
+ * convention.
+ */
+static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
+{
+ u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
+
+ fl_low ^= fl_high >> 14;
+ return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
+}
+
+/**
+ * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
+ * local and remote qpn values
+ *
+ * This function folded the multiplication results of two qpns, 24 bit each,
+ * fields, and converts it to a 20 bit results.
+ *
+ * This function will create symmetric flow_label value based on the local
+ * and remote qpn values. this will allow both the requester and responder
+ * to calculate the same flow_label for a given connection.
+ *
+ * This helper function should be used by driver in case the upper layer
+ * provide a zero flow_label value. This is to improve entropy of RDMA
+ * traffic in the network.
+ */
+static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
+{
+ u64 v = (u64)lqpn * rqpn;
+
+ v ^= v >> 20;
+ v ^= v >> 40;
+
+ return (u32)(v & IB_GRH_FLOWLABEL_MASK);
+}
+
+/**
+ * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
+ * label. If flow label is not defined in GRH then
+ * calculate it based on lqpn/rqpn.
+ *
+ * @fl: flow label from GRH
+ * @lqpn: local qp number
+ * @rqpn: remote qp number
+ */
+static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
+{
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ return rdma_flow_label_to_udp_sport(fl);
+}
+
+const struct ib_port_immutable*
+ib_port_immutable_read(struct ib_device *dev, unsigned int port);
#endif /* IB_VERBS_H */
diff --git a/include/rdma/ibta_vol1_c12.h b/include/rdma/ibta_vol1_c12.h
index 269904425d3f..960c86bec76c 100644
--- a/include/rdma/ibta_vol1_c12.h
+++ b/include/rdma/ibta_vol1_c12.h
@@ -38,6 +38,7 @@
/* Table 106 REQ Message Contents */
#define CM_REQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_req_msg, 0, 32)
+#define CM_REQ_VENDOR_ID CM_FIELD32_LOC(struct cm_req_msg, 5, 24)
#define CM_REQ_SERVICE_ID CM_FIELD64_LOC(struct cm_req_msg, 8)
#define CM_REQ_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_req_msg, 16)
#define CM_REQ_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_req_msg, 28, 32)
@@ -119,8 +120,11 @@ CM_STRUCT(struct cm_rej_msg, 84 * 8 + 1184);
#define CM_REP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 4, 32)
#define CM_REP_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_rep_msg, 8, 32)
#define CM_REP_LOCAL_QPN CM_FIELD32_LOC(struct cm_rep_msg, 12, 24)
+#define CM_REP_VENDOR_ID_H CM_FIELD8_LOC(struct cm_rep_msg, 15, 8)
#define CM_REP_LOCAL_EE_CONTEXT_NUMBER CM_FIELD32_LOC(struct cm_rep_msg, 16, 24)
+#define CM_REP_VENDOR_ID_M CM_FIELD8_LOC(struct cm_rep_msg, 19, 8)
#define CM_REP_STARTING_PSN CM_FIELD32_LOC(struct cm_rep_msg, 20, 24)
+#define CM_REP_VENDOR_ID_L CM_FIELD8_LOC(struct cm_rep_msg, 23, 8)
#define CM_REP_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_rep_msg, 24, 8)
#define CM_REP_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_rep_msg, 25, 8)
#define CM_REP_TARGET_ACK_DELAY CM_FIELD8_LOC(struct cm_rep_msg, 26, 5)
@@ -201,7 +205,9 @@ CM_STRUCT(struct cm_sidr_req_msg, 16 * 8 + 1728);
#define CM_SIDR_REP_STATUS CM_FIELD8_LOC(struct cm_sidr_rep_msg, 4, 8)
#define CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH \
CM_FIELD8_LOC(struct cm_sidr_rep_msg, 5, 8)
+#define CM_SIDR_REP_VENDOR_ID_H CM_FIELD16_LOC(struct cm_sidr_rep_msg, 6, 16)
#define CM_SIDR_REP_QPN CM_FIELD32_LOC(struct cm_sidr_rep_msg, 8, 24)
+#define CM_SIDR_REP_VENDOR_ID_L CM_FIELD8_LOC(struct cm_sidr_rep_msg, 11, 8)
#define CM_SIDR_REP_SERVICEID CM_FIELD64_LOC(struct cm_sidr_rep_msg, 12)
#define CM_SIDR_REP_Q_KEY CM_FIELD32_LOC(struct cm_sidr_rep_msg, 20, 32)
#define CM_SIDR_REP_ADDITIONAL_INFORMATION \
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 5aa8a9c76aa0..03abd30e6c8c 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -1,35 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
+
#ifndef IW_CM_H
#define IW_CM_H
@@ -96,6 +70,7 @@ struct iw_cm_id {
u8 tos;
bool tos_set:1;
bool mapped:1;
+ bool afonly:1;
};
struct iw_cm_conn_param {
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index c89535047c42..6dbc1a235974 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -1,35 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2014 Intel Corporation. All rights reserved.
* Copyright (c) 2014 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
+
#ifndef _IW_PORTMAP_H
#define _IW_PORTMAP_H
diff --git a/include/rdma/lag.h b/include/rdma/lag.h
new file mode 100644
index 000000000000..7c06ec9b2eef
--- /dev/null
+++ b/include/rdma/lag.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_LAG_H_
+#define _RDMA_LAG_H_
+
+#include <net/lag.h>
+
+struct ib_device;
+struct rdma_ah_attr;
+
+enum rdma_lag_flags {
+ RDMA_LAG_FLAGS_HASH_ALL_SLAVES = 1 << 0
+};
+
+void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave);
+struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags);
+
+#endif /* _RDMA_LAG_H_ */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index 66d4393d339c..c6bfa2640bac 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef OPA_ADDR_H
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index bdbfe25d3854..73bcac90a048 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright (c) 2014-2017 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2014-2020 Intel Corporation. All rights reserved.
*/
-#if !defined(OPA_PORT_INFO_H)
+#ifndef OPA_PORT_INFO_H
#define OPA_PORT_INFO_H
#include <rdma/opa_smi.h>
@@ -139,14 +112,6 @@
#define OPA_CAP_MASK3_IsVLMarkerSupported (1 << 1)
#define OPA_CAP_MASK3_IsVLrSupported (1 << 0)
-/**
- * new MTU values
- */
-enum {
- OPA_MTU_8192 = 6,
- OPA_MTU_10240 = 7,
-};
-
enum {
OPA_PORT_PHYS_CONF_DISCONNECTED = 0,
OPA_PORT_PHYS_CONF_STANDARD = 1,
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
index c7b2ef12792d..a9f1b5700e98 100644
--- a/include/rdma/opa_smi.h
+++ b/include/rdma/opa_smi.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2014 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(OPA_SMI_H)
+#ifndef OPA_SMI_H
#define OPA_SMI_H
#include <rdma/ib_mad.h>
diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h
index 0c07a70bd7f6..f3d5377b217a 100644
--- a/include/rdma/opa_vnic.h
+++ b/include/rdma/opa_vnic.h
@@ -1,52 +1,11 @@
-#ifndef _OPA_VNIC_H
-#define _OPA_VNIC_H
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*/
+#ifndef _OPA_VNIC_H
+#define _OPA_VNIC_H
+
/*
* This file contains Intel Omni-Path (OPA) Virtual Network Interface
* Controller (VNIC) specific declarations.
@@ -75,7 +34,7 @@
struct opa_vnic_rdma_netdev {
struct rdma_netdev rn; /* keep this first */
/* followed by device private data */
- char *dev_priv[0];
+ char *dev_priv[];
};
static inline void *opa_vnic_priv(const struct net_device *dev)
@@ -131,8 +90,7 @@ struct opa_vnic_stats {
static inline bool rdma_cap_opa_vnic(struct ib_device *device)
{
- return !!(device->attrs.device_cap_flags &
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC);
+ return !!(device->attrs.kernel_cap_flags & IBK_RDMA_NETDEV_OPA);
}
#endif /* _OPA_VNIC_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 71f48cfdc24c..cdc7cafab572 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -1,37 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(RDMA_CM_H)
+#ifndef RDMA_CM_H
#define RDMA_CM_H
#include <linux/socket.h>
@@ -76,10 +49,21 @@ struct rdma_addr {
struct rdma_dev_addr dev_addr;
};
+#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
struct rdma_route {
struct rdma_addr addr;
struct sa_path_rec *path_rec;
- int num_paths;
+
+ /* Optional path records of primary path */
+ struct sa_path_rec *path_rec_inbound;
+ struct sa_path_rec *path_rec_outbound;
+
+ /*
+ * 0 - No primary nor alternate path is available
+ * 1 - Only primary path is available
+ * 2 - Both primary and alternate path are available
+ */
+ int num_pri_alt_paths;
};
struct rdma_conn_param {
@@ -111,6 +95,7 @@ struct rdma_cm_event {
struct rdma_conn_param conn;
struct rdma_ud_param ud;
} param;
+ struct rdma_ucm_ece ece;
};
struct rdma_cm_id;
@@ -133,14 +118,18 @@ struct rdma_cm_id {
struct rdma_route route;
enum rdma_ucm_port_space ps;
enum ib_qp_type qp_type;
- u8 port_num;
+ u32 port_num;
+ struct work_struct net_work;
};
-struct rdma_cm_id *__rdma_create_id(struct net *net,
- rdma_cm_event_handler event_handler,
- void *context, enum rdma_ucm_port_space ps,
- enum ib_qp_type qp_type,
- const char *caller);
+struct rdma_cm_id *
+__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
+ void *context, enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type, const char *caller);
+struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
+ void *context,
+ enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type);
/**
* rdma_create_id - Create an RDMA identifier.
@@ -158,9 +147,9 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
* The event handler callback serializes on the id's mutex and is
* allowed to sleep.
*/
-#define rdma_create_id(net, event_handler, context, ps, qp_type) \
- __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \
- KBUILD_MODNAME)
+#define rdma_create_id(net, event_handler, context, ps, qp_type) \
+ __rdma_create_kernel_id(net, event_handler, context, ps, qp_type, \
+ KBUILD_MODNAME)
/**
* rdma_destroy_id - Destroys an RDMA identifier.
@@ -250,19 +239,12 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
int *qp_attr_mask);
-/**
- * rdma_connect - Initiate an active connection request.
- * @id: Connection identifier to connect.
- * @conn_param: Connection information used for connected QPs.
- *
- * Users must have resolved a route for the rdma_cm_id to connect with
- * by having called rdma_resolve_route before calling this routine.
- *
- * This call will either connect to a remote QP or obtain remote QP
- * information for unconnected rdma_cm_id's. The actual operation is
- * based on the rdma_cm_id's port space.
- */
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
+int rdma_connect_locked(struct rdma_cm_id *id,
+ struct rdma_conn_param *conn_param);
+
+int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece);
/**
* rdma_listen - This function is called by the passive side to
@@ -273,26 +255,12 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
*/
int rdma_listen(struct rdma_cm_id *id, int backlog);
-int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
- const char *caller);
+int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
-/**
- * rdma_accept - Called to accept a connection request or response.
- * @id: Connection identifier associated with the request.
- * @conn_param: Information needed to establish the connection. This must be
- * provided if accepting a connection request. If accepting a connection
- * response, this parameter must be NULL.
- *
- * Typically, this routine is only called by the listener to accept a connection
- * request. It must also be called on the active side of a connection if the
- * user is performing their own QP transitions.
- *
- * In the case of error, a reject message is sent to the remote side and the
- * state of the qp associated with the id is modified to error, such that any
- * previously posted receive buffers would be flushed.
- */
-#define rdma_accept(id, conn_param) \
- __rdma_accept((id), (conn_param), KBUILD_MODNAME)
+void rdma_lock_handler(struct rdma_cm_id *id);
+void rdma_unlock_handler(struct rdma_cm_id *id);
+int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece);
/**
* rdma_notify - Notifies the RDMA CM of an asynchronous event that has
@@ -313,7 +281,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
* rdma_reject - Called to reject a connection request or response.
*/
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
- u8 private_data_len);
+ u8 private_data_len, u8 reason);
/**
* rdma_disconnect - This function disconnects the associated QP and
@@ -375,6 +343,8 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse);
int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout);
+
+int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer);
/**
* rdma_get_service_id - Return the IB service ID for a specified address.
* @id: Communication identifier associated with the address.
@@ -390,14 +360,6 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr);
const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
int reason);
/**
- * rdma_is_consumer_reject - return true if the consumer rejected the connect
- * request.
- * @id: Communication identifier that received the REJECT event.
- * @reason: Value returned in the REJECT event status field.
- */
-bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
-
-/**
* rdma_consumer_reject_data - return the consumer reject private data and
* length, if any.
* @id: Communication identifier that received the REJECT event.
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index 6a69d71a21a5..8354e7de7815 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(RDMA_CM_IB_H)
+#ifndef RDMA_CM_IB_H
#define RDMA_CM_IB_H
#include <rdma/rdma_cm.h>
diff --git a/include/rdma/rdma_counter.h b/include/rdma/rdma_counter.h
index eb99856e8b30..45d5481a7846 100644
--- a/include/rdma/rdma_counter.h
+++ b/include/rdma/rdma_counter.h
@@ -40,26 +40,29 @@ struct rdma_counter {
struct rdma_counter_mode mode;
struct mutex lock;
struct rdma_hw_stats *stats;
- u8 port;
+ u32 port;
};
void rdma_counter_init(struct ib_device *dev);
void rdma_counter_release(struct ib_device *dev);
-int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
- bool on, enum rdma_nl_counter_mask mask);
-int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port);
+int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
+ enum rdma_nl_counter_mask mask,
+ struct netlink_ext_ack *extack);
+int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port);
int rdma_counter_unbind_qp(struct ib_qp *qp, bool force);
int rdma_counter_query_stats(struct rdma_counter *counter);
-u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index);
-int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
+u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index);
+int rdma_counter_bind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id);
-int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
+int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port,
u32 qp_num, u32 *counter_id);
-int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
+int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id);
-int rdma_counter_get_mode(struct ib_device *dev, u8 port,
+int rdma_counter_get_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mode *mode,
enum rdma_nl_counter_mask *mask);
+int rdma_counter_modify(struct ib_device *dev, u32 port,
+ unsigned int index, bool enable);
#endif /* _RDMA_COUNTER_H_ */
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index ab22759de7ea..c2a79aeee113 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef _RDMA_NETLINK_H
#define _RDMA_NETLINK_H
-
#include <linux/netlink.h>
#include <uapi/rdma/rdma_netlink.h>
@@ -30,7 +30,7 @@ enum rdma_nl_flags {
* constant as well and the compiler checks they are the same.
*/
#define MODULE_ALIAS_RDMA_NETLINK(_index, _val) \
- static inline void __chk_##_index(void) \
+ static inline void __maybe_unused __chk_##_index(void) \
{ \
BUILD_BUG_ON(_index != _val); \
} \
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index ac5a9430abb6..c429d6ddb129 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RDMA_VT_H
-#define DEF_RDMA_VT_H
-
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2016 - 2019 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMA_VT_H
+#define DEF_RDMA_VT_H
+
/*
* Structure that low level drivers will populate in order to register with the
* rdmavt layer.
@@ -134,7 +92,7 @@ struct rvt_ibport {
/*
* The pkey table is allocated and maintained by the driver. Drivers
* need to have access to this before registering with rdmav. However
- * rdmavt will need access to it so drivers need to proviee this during
+ * rdmavt will need access to it so drivers need to provide this during
* the attach port API call.
*/
u16 *pkey_table;
@@ -272,7 +230,7 @@ struct rvt_driver_provided {
void (*do_send)(struct rvt_qp *qp);
/*
- * Returns a pointer to the undelying hardware's PCI device. This is
+ * Returns a pointer to the underlying hardware's PCI device. This is
* used to display information as to what hardware is being referenced
* in an output message
*/
@@ -287,7 +245,7 @@ struct rvt_driver_provided {
void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
/*
- * Init a struture allocated with qp_priv_alloc(). This should be
+ * Init a structure allocated with qp_priv_alloc(). This should be
* called after all qp fields have been initialized in rdmavt.
*/
int (*qp_priv_init)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
@@ -299,7 +257,7 @@ struct rvt_driver_provided {
void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
/*
- * Inform the driver the particular qp in quesiton has been reset so
+ * Inform the driver the particular qp in question has been reset so
* that it can clean up anything it needs to.
*/
void (*notify_qp_reset)(struct rvt_qp *qp);
@@ -323,7 +281,7 @@ struct rvt_driver_provided {
void (*stop_send_queue)(struct rvt_qp *qp);
/*
- * Have the drivr drain any in progress operations
+ * Have the driver drain any in progress operations
*/
void (*quiesce_qp)(struct rvt_qp *qp);
@@ -351,16 +309,16 @@ struct rvt_driver_provided {
/*
* Query driver for the state of the port.
*/
- int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
+ int (*query_port_state)(struct rvt_dev_info *rdi, u32 port_num,
struct ib_port_attr *props);
/*
* Tell driver to shutdown a port
*/
- int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
+ int (*shut_down_port)(struct rvt_dev_info *rdi, u32 port_num);
/* Tell driver to send a trap for changed port capabilities */
- void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
+ void (*cap_mask_chg)(struct rvt_dev_info *rdi, u32 port_num);
/*
* The following functions can be safely ignored completely. Any use of
@@ -380,7 +338,7 @@ struct rvt_driver_provided {
/* Let the driver pick the next queue pair number*/
int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num);
+ enum ib_qp_type type, u32 port_num);
/* Determine if its safe or allowed to modify the qp */
int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
@@ -487,7 +445,7 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
* to work by setting the name manually here.
*/
dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
- strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
+ strscpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
}
/**
diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h
index 574eb7278f46..1fe2bb5a63b0 100644
--- a/include/rdma/rdmavt_cq.h
+++ b/include/rdma/rdmavt_cq.h
@@ -1,56 +1,11 @@
-#ifndef DEF_RDMAVT_INCCQ_H
-#define DEF_RDMAVT_INCCQ_H
-
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMAVT_INCCQ_H
+#define DEF_RDMAVT_INCCQ_H
+
#include <linux/kthread.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h
index 72a3856d4057..c3367e9833ef 100644
--- a/include/rdma/rdmavt_mr.h
+++ b/include/rdma/rdmavt_mr.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RDMAVT_INCMR_H
-#define DEF_RDMAVT_INCMR_H
-
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMAVT_INCMR_H
+#define DEF_RDMAVT_INCMR_H
+
/*
* For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once
* drivers no longer need access to the MR directly.
@@ -85,7 +43,7 @@ struct rvt_mregion {
u8 lkey_published; /* in global table */
struct percpu_ref refcount;
struct completion comp; /* complete when refcount goes to zero */
- struct rvt_segarray *map[0]; /* the segments */
+ struct rvt_segarray *map[]; /* the segments */
};
#define RVT_MAX_LKEY_TABLE_BITS 23
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0d5c70e2d8ab..2e58d5e6ac0e 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RDMAVT_INCQP_H
-#define DEF_RDMAVT_INCQP_H
-
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2016 - 2019 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
+ * Copyright(c) 2016 - 2020 Intel Corporation.
*/
+#ifndef DEF_RDMAVT_INCQP_H
+#define DEF_RDMAVT_INCQP_H
+
#include <rdma/rdma_vt.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_verbs.h>
@@ -69,6 +27,33 @@
#define RVT_R_COMM_EST 0x10
/*
+ * If a packet's QP[23:16] bits match this value, then it is
+ * a PSM packet and the hardware will expect a KDETH header
+ * following the BTH.
+ */
+#define RVT_KDETH_QP_PREFIX 0x80
+#define RVT_KDETH_QP_SUFFIX 0xffff
+#define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000
+#define RVT_KDETH_QP_PREFIX_SHIFT 16
+#define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \
+ RVT_KDETH_QP_PREFIX_SHIFT)
+#define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
+
+/*
+ * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
+ * prefix value, then it is an AIP packet with a DETH containing the entropy
+ * value in byte 4 following the BTH.
+ */
+#define RVT_AIP_QP_PREFIX 0x81
+#define RVT_AIP_QP_SUFFIX 0xffff
+#define RVT_AIP_QP_PREFIX_MASK 0x00ff0000
+#define RVT_AIP_QP_PREFIX_SHIFT 16
+#define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \
+ RVT_AIP_QP_PREFIX_SHIFT)
+#define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT)
+#define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
+
+/*
* Bit definitions for s_flags.
*
* RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
@@ -191,7 +176,7 @@ struct rvt_swqe {
u32 ssn; /* send sequence number */
u32 length; /* total length of data in sg_list */
void *priv; /* driver dependent field */
- struct rvt_sge sg_list[0];
+ struct rvt_sge sg_list[];
};
/**
@@ -278,6 +263,25 @@ struct rvt_rq {
spinlock_t lock ____cacheline_aligned_in_smp;
};
+/**
+ * rvt_get_rq_count - count numbers of request work queue entries
+ * in circular buffer
+ * @rq: data structure for request queue entry
+ * @head: head indices of the circular buffer
+ * @tail: tail indices of the circular buffer
+ *
+ * Return - total number of entries in the Receive Queue
+ */
+
+static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
+{
+ u32 count = head - tail;
+
+ if ((s32)count < 0)
+ count += rq->size;
+ return count;
+}
+
/*
* This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation.
@@ -440,7 +444,7 @@ struct rvt_qp {
/*
* This sge list MUST be last. Do not add anything below here.
*/
- struct rvt_sge r_sg_list[0] /* verified SGEs */
+ struct rvt_sge *r_sg_list /* verified SGEs */
____cacheline_aligned_in_smp;
};
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 7682d1bcf789..79d109c47242 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -50,6 +50,10 @@ enum rdma_restrack_type {
*/
RDMA_RESTRACK_COUNTER,
/**
+ * @RDMA_RESTRACK_SRQ: Shared receive queue (SRQ)
+ */
+ RDMA_RESTRACK_SRQ,
+ /**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/
RDMA_RESTRACK_MAX
@@ -68,6 +72,14 @@ struct rdma_restrack_entry {
* As an example for that, see mlx5 QPs with type MLX5_IB_QPT_HW_GSI
*/
bool valid;
+ /**
+ * @no_track: don't add this entry to restrack DB
+ *
+ * This field is used to mark an entry that doesn't need to be added to
+ * internal restrack DB and presented later to the users at the nldev
+ * query stage.
+ */
+ u8 no_track : 1;
/*
* @kref: Protect destroy of the resource
*/
@@ -106,22 +118,11 @@ struct rdma_restrack_entry {
int rdma_restrack_count(struct ib_device *dev,
enum rdma_restrack_type type);
-
-void rdma_restrack_kadd(struct rdma_restrack_entry *res);
-void rdma_restrack_uadd(struct rdma_restrack_entry *res);
-
-/**
- * rdma_restrack_del() - delete object from the reource tracking database
- * @res: resource entry
- * @type: actual type of object to operate
- */
-void rdma_restrack_del(struct rdma_restrack_entry *res);
-
/**
* rdma_is_kernel_res() - check the owner of resource
* @res: resource entry
*/
-static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
+static inline bool rdma_is_kernel_res(const struct rdma_restrack_entry *res)
{
return !res->user;
}
@@ -138,14 +139,6 @@ int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
*/
int rdma_restrack_put(struct rdma_restrack_entry *res);
-/**
- * rdma_restrack_set_task() - set the task for this resource
- * @res: resource entry
- * @caller: kernel name, the current task will be used if the caller is NULL.
- */
-void rdma_restrack_set_task(struct rdma_restrack_entry *res,
- const char *caller);
-
/*
* Helper functions for rdma drivers when filling out
* nldev driver attributes.
@@ -164,4 +157,20 @@ int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
struct rdma_restrack_entry *rdma_restrack_get_byid(struct ib_device *dev,
enum rdma_restrack_type type,
u32 id);
+
+/**
+ * rdma_restrack_no_track() - don't add resource to the DB
+ * @res: resource entry
+ *
+ * Every user of thie API should be cross examined.
+ * Probaby you don't need to use this function.
+ */
+static inline void rdma_restrack_no_track(struct rdma_restrack_entry *res)
+{
+ res->no_track = true;
+}
+static inline bool rdma_restrack_is_tracked(struct rdma_restrack_entry *res)
+{
+ return !res->no_track;
+}
#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/rdma/rw.h b/include/rdma/rw.h
index 6ad9dc836c10..d606cac48233 100644
--- a/include/rdma/rw.h
+++ b/include/rdma/rw.h
@@ -42,29 +42,29 @@ struct rdma_rw_ctx {
};
};
-int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir);
-void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
- struct scatterlist *sg, u32 sg_cnt,
- enum dma_data_direction dir);
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
+ enum dma_data_direction dir);
int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
enum dma_data_direction dir);
void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
enum dma_data_direction dir);
struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
-int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
-unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
+unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
unsigned int maxpages);
void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 28570ac2b6a0..9d45a5b20316 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _UVERBS_IOCTL_
@@ -51,6 +24,7 @@ enum uverbs_attr_type {
UVERBS_ATTR_TYPE_PTR_OUT,
UVERBS_ATTR_TYPE_IDR,
UVERBS_ATTR_TYPE_FD,
+ UVERBS_ATTR_TYPE_RAW_FD,
UVERBS_ATTR_TYPE_ENUM_IN,
UVERBS_ATTR_TYPE_IDRS_ARRAY,
};
@@ -173,7 +147,7 @@ enum uapi_radix_data {
UVERBS_API_OBJ_KEY_BITS = 5,
UVERBS_API_OBJ_KEY_SHIFT =
UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT,
- UVERBS_API_OBJ_KEY_NUM_CORE = 24,
+ UVERBS_API_OBJ_KEY_NUM_CORE = 20,
UVERBS_API_OBJ_KEY_NUM_DRIVER =
(1 << UVERBS_API_OBJ_KEY_BITS) - UVERBS_API_OBJ_KEY_NUM_CORE,
UVERBS_API_OBJ_KEY_MASK = GENMASK(31, UVERBS_API_OBJ_KEY_SHIFT),
@@ -420,9 +394,9 @@ struct uapi_definition {
.scope = UAPI_SCOPE_OBJECT, \
.needs_fn_offset = \
offsetof(struct ib_device_ops, ibdev_fn) + \
- BUILD_BUG_ON_ZERO( \
- sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
- sizeof(void *)), \
+ BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \
+ ibdev_fn) != \
+ sizeof(void *)), \
}
/*
@@ -435,9 +409,9 @@ struct uapi_definition {
.scope = UAPI_SCOPE_METHOD, \
.needs_fn_offset = \
offsetof(struct ib_device_ops, ibdev_fn) + \
- BUILD_BUG_ON_ZERO( \
- sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
- sizeof(void *)), \
+ BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \
+ ibdev_fn) != \
+ sizeof(void *)), \
}
/* Call a function to determine if the entire object is supported or not */
@@ -491,8 +465,7 @@ struct uapi_definition {
*/
#define UVERBS_ATTR_STRUCT(_type, _last) \
.zero_trailing = 1, \
- UVERBS_ATTR_SIZE(((uintptr_t)(&((_type *)0)->_last + 1)), \
- sizeof(_type))
+ UVERBS_ATTR_SIZE(offsetofend(_type, _last), sizeof(_type))
/*
* Specifies at least min_len bytes must be passed in, but the amount can be
* larger, up to the protocol maximum size. No check for zeroing is done.
@@ -549,6 +522,11 @@ struct uapi_definition {
.u.obj.access = _access, \
__VA_ARGS__ } })
+#define UVERBS_ATTR_RAW_FD(_attr_id, ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = (_attr_id), \
+ .attr = { .type = UVERBS_ATTR_TYPE_RAW_FD, __VA_ARGS__ } })
+
#define UVERBS_ATTR_PTR_IN(_attr_id, _type, ...) \
(&(const struct uverbs_attr_def){ \
.id = _attr_id, \
@@ -653,6 +631,7 @@ struct uverbs_attr_bundle {
struct ib_udata ucore;
struct ib_uverbs_file *ufile;
struct ib_ucontext *context;
+ struct ib_uobject *uobject;
DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN);
struct uverbs_attr attrs[];
};
@@ -674,12 +653,15 @@ static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_b
* 'ucontext'.
*
*/
-#define rdma_udata_to_drv_context(udata, drv_dev_struct, member) \
- (udata ? container_of(container_of(udata, struct uverbs_attr_bundle, \
- driver_udata) \
- ->context, \
- drv_dev_struct, member) : \
- (drv_dev_struct *)NULL)
+static inline struct uverbs_attr_bundle *
+rdma_udata_to_uverbs_attr_bundle(struct ib_udata *udata)
+{
+ return container_of(udata, struct uverbs_attr_bundle, driver_udata);
+}
+
+#define rdma_udata_to_drv_context(udata, drv_dev_struct, member) \
+ (udata ? container_of(rdma_udata_to_uverbs_attr_bundle(udata)->context, \
+ drv_dev_struct, member) : (drv_dev_struct *)NULL)
#define IS_UVERBS_COPY_ERR(_ret) ((_ret) && (_ret) != -ENOENT)
@@ -737,6 +719,9 @@ uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx)
return attr->ptr_attr.len;
}
+void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx);
+
/*
* uverbs_attr_ptr_get_array_size() - Get array size pointer by a ptr
* attribute.
@@ -886,9 +871,24 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{
return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO);
}
-int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
- size_t idx, s64 lower_bound, u64 upper_bound,
- s64 *def_val);
+
+static inline __malloc void *uverbs_kcalloc(struct uverbs_attr_bundle *bundle,
+ size_t n, size_t size)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return ERR_PTR(-EOVERFLOW);
+ return uverbs_zalloc(bundle, bytes);
+}
+
+int _uverbs_get_const_signed(s64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val);
+int _uverbs_get_const_unsigned(u64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 upper_bound, u64 *def_val);
int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
size_t idx, const void *from, size_t size);
#else
@@ -932,27 +932,84 @@ uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
{
return -EINVAL;
}
+static inline int
+_uverbs_get_const_signed(s64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
+{
+ return -EINVAL;
+}
+static inline int
+_uverbs_get_const_unsigned(u64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 upper_bound, u64 *def_val)
+{
+ return -EINVAL;
+}
#endif
-#define uverbs_get_const(_to, _attrs_bundle, _idx) \
+#define uverbs_get_const_signed(_to, _attrs_bundle, _idx) \
({ \
s64 _val; \
- int _ret = _uverbs_get_const(&_val, _attrs_bundle, _idx, \
- type_min(typeof(*_to)), \
- type_max(typeof(*_to)), NULL); \
- (*_to) = _val; \
+ int _ret = \
+ _uverbs_get_const_signed(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*(_to))), \
+ type_max(typeof(*(_to))), NULL); \
+ (*(_to)) = _val; \
_ret; \
})
-#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \
+#define uverbs_get_const_unsigned(_to, _attrs_bundle, _idx) \
+ ({ \
+ u64 _val; \
+ int _ret = \
+ _uverbs_get_const_unsigned(&_val, _attrs_bundle, _idx, \
+ type_max(typeof(*(_to))), NULL); \
+ (*(_to)) = _val; \
+ _ret; \
+ })
+
+#define uverbs_get_const_default_signed(_to, _attrs_bundle, _idx, _default) \
({ \
s64 _val; \
s64 _def_val = _default; \
int _ret = \
- _uverbs_get_const(&_val, _attrs_bundle, _idx, \
- type_min(typeof(*_to)), \
- type_max(typeof(*_to)), &_def_val); \
- (*_to) = _val; \
+ _uverbs_get_const_signed(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*(_to))), \
+ type_max(typeof(*(_to))), &_def_val); \
+ (*(_to)) = _val; \
_ret; \
})
+
+#define uverbs_get_const_default_unsigned(_to, _attrs_bundle, _idx, _default) \
+ ({ \
+ u64 _val; \
+ u64 _def_val = _default; \
+ int _ret = \
+ _uverbs_get_const_unsigned(&_val, _attrs_bundle, _idx, \
+ type_max(typeof(*(_to))), &_def_val); \
+ (*(_to)) = _val; \
+ _ret; \
+ })
+
+#define uverbs_get_const(_to, _attrs_bundle, _idx) \
+ (is_signed_type(typeof(*(_to))) ? \
+ uverbs_get_const_signed(_to, _attrs_bundle, _idx) : \
+ uverbs_get_const_unsigned(_to, _attrs_bundle, _idx)) \
+
+#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \
+ (is_signed_type(typeof(*(_to))) ? \
+ uverbs_get_const_default_signed(_to, _attrs_bundle, _idx, \
+ _default) : \
+ uverbs_get_const_default_unsigned(_to, _attrs_bundle, _idx, \
+ _default))
+
+static inline int
+uverbs_get_raw_fd(int *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx)
+{
+ return uverbs_get_const_signed(to, attrs_bundle, idx);
+}
+
#endif
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
index 6ae6cf8e4c2e..ee7873f872c3 100644
--- a/include/rdma/uverbs_named_ioctl.h
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _UVERBS_NAMED_IOCTL_
@@ -47,7 +20,7 @@
/* These are static so they do not need to be qualified */
#define UVERBS_METHOD_ATTRS(method_id) _method_attrs_##method_id
-#define UVERBS_OBJECT_METHODS(object_id) _object_methods_##object_id
+#define UVERBS_OBJECT_METHODS(object_id) _UVERBS_NAME(_object_methods_##object_id, __LINE__)
#define DECLARE_UVERBS_NAMED_METHOD(_method_id, ...) \
static const struct uverbs_attr_def *const UVERBS_METHOD_ATTRS( \
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 1b28ce1aba07..fe0512116958 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _UVERBS_STD_TYPES__
@@ -88,7 +61,7 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
static inline void uobj_put_destroy(struct ib_uobject *uobj)
{
- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
}
static inline void uobj_put_read(struct ib_uobject *uobj)
@@ -107,7 +80,21 @@ static inline void uobj_put_write(struct ib_uobject *uobj)
static inline void uobj_alloc_abort(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs)
{
- rdma_alloc_abort_uobject(uobj, attrs);
+ rdma_alloc_abort_uobject(uobj, attrs, false);
+}
+
+static inline void uobj_finalize_uobj_create(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs)
+{
+ /*
+ * Tell the core code that the write() handler has completed
+ * initializing the object and that the core should commit or
+ * abort this object based upon the return code from the write()
+ * method. Similar to what uverbs_finalize_uobj_create() does for
+ * ioctl()
+ */
+ WARN_ON(attrs->uobject);
+ attrs->uobject = uobj;
}
static inline struct ib_uobject *
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index f1cbdae67250..ccd11631c167 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _UVERBS_TYPES_
@@ -98,6 +71,8 @@ struct uverbs_obj_type_class {
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs);
void (*remove_handle)(struct ib_uobject *uobj);
+ void (*swap_uobjects)(struct ib_uobject *obj_old,
+ struct ib_uobject *obj_new);
};
struct uverbs_obj_type {
@@ -139,9 +114,13 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs);
void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs);
+ struct uverbs_attr_bundle *attrs,
+ bool hw_obj_valid);
void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs);
+void rdma_assign_uobject(struct ib_uobject *to_uobj,
+ struct ib_uobject *new_uobj,
+ struct uverbs_attr_bundle *attrs);
/*
* uverbs_uobject_get is called in order to increase the reference count on
@@ -164,8 +143,8 @@ struct uverbs_obj_fd_type {
* because the driver is removed or the FD is closed.
*/
struct uverbs_obj_type type;
- int (*destroy_object)(struct ib_uobject *uobj,
- enum rdma_remove_reason why);
+ void (*destroy_object)(struct ib_uobject *uobj,
+ enum rdma_remove_reason why);
const struct file_operations *fops;
const char *name;
int flags;
diff --git a/include/rv/automata.h b/include/rv/automata.h
new file mode 100644
index 000000000000..eb9e636809a0
--- /dev/null
+++ b/include/rv/automata.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Deterministic automata helper functions, to be used with the automata
+ * models in C generated by the dot2k tool.
+ */
+
+/*
+ * DECLARE_AUTOMATA_HELPERS - define a set of helper functions for automata
+ *
+ * Define a set of helper functions for automata. The 'name' argument is used
+ * as suffix for the functions and data. These functions will handle automaton
+ * with data type 'type'.
+ */
+#define DECLARE_AUTOMATA_HELPERS(name, type) \
+ \
+/* \
+ * model_get_state_name_##name - return the (string) name of the given state \
+ */ \
+static char *model_get_state_name_##name(enum states_##name state) \
+{ \
+ if ((state < 0) || (state >= state_max_##name)) \
+ return "INVALID"; \
+ \
+ return automaton_##name.state_names[state]; \
+} \
+ \
+/* \
+ * model_get_event_name_##name - return the (string) name of the given event \
+ */ \
+static char *model_get_event_name_##name(enum events_##name event) \
+{ \
+ if ((event < 0) || (event >= event_max_##name)) \
+ return "INVALID"; \
+ \
+ return automaton_##name.event_names[event]; \
+} \
+ \
+/* \
+ * model_get_initial_state_##name - return the automaton's initial state \
+ */ \
+static inline type model_get_initial_state_##name(void) \
+{ \
+ return automaton_##name.initial_state; \
+} \
+ \
+/* \
+ * model_get_next_state_##name - process an automaton event occurrence \
+ * \
+ * Given the current state (curr_state) and the event (event), returns \
+ * the next state, or INVALID_STATE in case of error. \
+ */ \
+static inline type model_get_next_state_##name(enum states_##name curr_state, \
+ enum events_##name event) \
+{ \
+ if ((curr_state < 0) || (curr_state >= state_max_##name)) \
+ return INVALID_STATE; \
+ \
+ if ((event < 0) || (event >= event_max_##name)) \
+ return INVALID_STATE; \
+ \
+ return automaton_##name.function[curr_state][event]; \
+} \
+ \
+/* \
+ * model_is_final_state_##name - check if the given state is a final state \
+ */ \
+static inline bool model_is_final_state_##name(enum states_##name state) \
+{ \
+ if ((state < 0) || (state >= state_max_##name)) \
+ return 0; \
+ \
+ return automaton_##name.final_states[state]; \
+}
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
new file mode 100644
index 000000000000..9eb75683e012
--- /dev/null
+++ b/include/rv/da_monitor.h
@@ -0,0 +1,544 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Deterministic automata (DA) monitor functions, to be used together
+ * with automata models in C generated by the dot2k tool.
+ *
+ * The dot2k tool is available at tools/verification/dot2k/
+ *
+ * For further information, see:
+ * Documentation/trace/rv/da_monitor_synthesis.rst
+ */
+
+#include <rv/automata.h>
+#include <linux/rv.h>
+#include <linux/bug.h>
+
+#ifdef CONFIG_RV_REACTORS
+
+#define DECLARE_RV_REACTING_HELPERS(name, type) \
+static char REACT_MSG_##name[1024]; \
+ \
+static inline char *format_react_msg_##name(type curr_state, type event) \
+{ \
+ snprintf(REACT_MSG_##name, 1024, \
+ "rv: monitor %s does not allow event %s on state %s\n", \
+ #name, \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(curr_state)); \
+ return REACT_MSG_##name; \
+} \
+ \
+static void cond_react_##name(char *msg) \
+{ \
+ if (rv_##name.react) \
+ rv_##name.react(msg); \
+} \
+ \
+static bool rv_reacting_on_##name(void) \
+{ \
+ return rv_reacting_on(); \
+}
+
+#else /* CONFIG_RV_REACTOR */
+
+#define DECLARE_RV_REACTING_HELPERS(name, type) \
+static inline char *format_react_msg_##name(type curr_state, type event) \
+{ \
+ return NULL; \
+} \
+ \
+static void cond_react_##name(char *msg) \
+{ \
+ return; \
+} \
+ \
+static bool rv_reacting_on_##name(void) \
+{ \
+ return 0; \
+}
+#endif
+
+/*
+ * Generic helpers for all types of deterministic automata monitors.
+ */
+#define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+ \
+DECLARE_RV_REACTING_HELPERS(name, type) \
+ \
+/* \
+ * da_monitor_reset_##name - reset a monitor and setting it to init state \
+ */ \
+static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \
+{ \
+ da_mon->monitoring = 0; \
+ da_mon->curr_state = model_get_initial_state_##name(); \
+} \
+ \
+/* \
+ * da_monitor_curr_state_##name - return the current state \
+ */ \
+static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \
+{ \
+ return da_mon->curr_state; \
+} \
+ \
+/* \
+ * da_monitor_set_state_##name - set the new current state \
+ */ \
+static inline void \
+da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \
+{ \
+ da_mon->curr_state = state; \
+} \
+ \
+/* \
+ * da_monitor_start_##name - start monitoring \
+ * \
+ * The monitor will ignore all events until monitoring is set to true. This \
+ * function needs to be called to tell the monitor to start monitoring. \
+ */ \
+static inline void da_monitor_start_##name(struct da_monitor *da_mon) \
+{ \
+ da_mon->curr_state = model_get_initial_state_##name(); \
+ da_mon->monitoring = 1; \
+} \
+ \
+/* \
+ * da_monitoring_##name - returns true if the monitor is processing events \
+ */ \
+static inline bool da_monitoring_##name(struct da_monitor *da_mon) \
+{ \
+ return da_mon->monitoring; \
+} \
+ \
+/* \
+ * da_monitor_enabled_##name - checks if the monitor is enabled \
+ */ \
+static inline bool da_monitor_enabled_##name(void) \
+{ \
+ /* global switch */ \
+ if (unlikely(!rv_monitoring_on())) \
+ return 0; \
+ \
+ /* monitor enabled */ \
+ if (unlikely(!rv_##name.enabled)) \
+ return 0; \
+ \
+ return 1; \
+} \
+ \
+/* \
+ * da_monitor_handling_event_##name - checks if the monitor is ready to handle events \
+ */ \
+static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon) \
+{ \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ /* monitor is actually monitoring */ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ return 0; \
+ \
+ return 1; \
+}
+
+/*
+ * Event handler for implicit monitors. Implicit monitor is the one which the
+ * handler does not need to specify which da_monitor to manipulate. Examples
+ * of implicit monitor are the per_cpu or the global ones.
+ */
+#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
+ \
+static inline bool \
+da_event_##name(struct da_monitor *da_mon, enum events_##name event) \
+{ \
+ type curr_state = da_monitor_curr_state_##name(da_mon); \
+ type next_state = model_get_next_state_##name(curr_state, event); \
+ \
+ if (next_state != INVALID_STATE) { \
+ da_monitor_set_state_##name(da_mon, next_state); \
+ \
+ trace_event_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ \
+ return true; \
+ } \
+ \
+ if (rv_reacting_on_##name()) \
+ cond_react_##name(format_react_msg_##name(curr_state, event)); \
+ \
+ trace_error_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ \
+ return false; \
+} \
+
+/*
+ * Event handler for per_task monitors.
+ */
+#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
+ \
+static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
+ enum events_##name event) \
+{ \
+ type curr_state = da_monitor_curr_state_##name(da_mon); \
+ type next_state = model_get_next_state_##name(curr_state, event); \
+ \
+ if (next_state != INVALID_STATE) { \
+ da_monitor_set_state_##name(da_mon, next_state); \
+ \
+ trace_event_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ \
+ return true; \
+ } \
+ \
+ if (rv_reacting_on_##name()) \
+ cond_react_##name(format_react_msg_##name(curr_state, event)); \
+ \
+ trace_error_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ \
+ return false; \
+}
+
+/*
+ * Functions to define, init and get a global monitor.
+ */
+#define DECLARE_DA_MON_INIT_GLOBAL(name, type) \
+ \
+/* \
+ * global monitor (a single variable) \
+ */ \
+static struct da_monitor da_mon_##name; \
+ \
+/* \
+ * da_get_monitor_##name - return the global monitor address \
+ */ \
+static struct da_monitor *da_get_monitor_##name(void) \
+{ \
+ return &da_mon_##name; \
+} \
+ \
+/* \
+ * da_monitor_reset_all_##name - reset the single monitor \
+ */ \
+static void da_monitor_reset_all_##name(void) \
+{ \
+ da_monitor_reset_##name(da_get_monitor_##name()); \
+} \
+ \
+/* \
+ * da_monitor_init_##name - initialize a monitor \
+ */ \
+static inline int da_monitor_init_##name(void) \
+{ \
+ da_monitor_reset_all_##name(); \
+ return 0; \
+} \
+ \
+/* \
+ * da_monitor_destroy_##name - destroy the monitor \
+ */ \
+static inline void da_monitor_destroy_##name(void) \
+{ \
+ return; \
+}
+
+/*
+ * Functions to define, init and get a per-cpu monitor.
+ */
+#define DECLARE_DA_MON_INIT_PER_CPU(name, type) \
+ \
+/* \
+ * per-cpu monitor variables \
+ */ \
+DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
+ \
+/* \
+ * da_get_monitor_##name - return current CPU monitor address \
+ */ \
+static struct da_monitor *da_get_monitor_##name(void) \
+{ \
+ return this_cpu_ptr(&da_mon_##name); \
+} \
+ \
+/* \
+ * da_monitor_reset_all_##name - reset all CPUs' monitor \
+ */ \
+static void da_monitor_reset_all_##name(void) \
+{ \
+ struct da_monitor *da_mon; \
+ int cpu; \
+ for_each_cpu(cpu, cpu_online_mask) { \
+ da_mon = per_cpu_ptr(&da_mon_##name, cpu); \
+ da_monitor_reset_##name(da_mon); \
+ } \
+} \
+ \
+/* \
+ * da_monitor_init_##name - initialize all CPUs' monitor \
+ */ \
+static inline int da_monitor_init_##name(void) \
+{ \
+ da_monitor_reset_all_##name(); \
+ return 0; \
+} \
+ \
+/* \
+ * da_monitor_destroy_##name - destroy the monitor \
+ */ \
+static inline void da_monitor_destroy_##name(void) \
+{ \
+ return; \
+}
+
+/*
+ * Functions to define, init and get a per-task monitor.
+ */
+#define DECLARE_DA_MON_INIT_PER_TASK(name, type) \
+ \
+/* \
+ * The per-task monitor is stored a vector in the task struct. This variable \
+ * stores the position on the vector reserved for this monitor. \
+ */ \
+static int task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \
+ \
+/* \
+ * da_get_monitor_##name - return the monitor in the allocated slot for tsk \
+ */ \
+static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) \
+{ \
+ return &tsk->rv[task_mon_slot_##name].da_mon; \
+} \
+ \
+static void da_monitor_reset_all_##name(void) \
+{ \
+ struct task_struct *g, *p; \
+ \
+ read_lock(&tasklist_lock); \
+ for_each_process_thread(g, p) \
+ da_monitor_reset_##name(da_get_monitor_##name(p)); \
+ read_unlock(&tasklist_lock); \
+} \
+ \
+/* \
+ * da_monitor_init_##name - initialize the per-task monitor \
+ * \
+ * Try to allocate a slot in the task's vector of monitors. If there \
+ * is an available slot, use it and reset all task's monitor. \
+ */ \
+static int da_monitor_init_##name(void) \
+{ \
+ int slot; \
+ \
+ slot = rv_get_task_monitor_slot(); \
+ if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT) \
+ return slot; \
+ \
+ task_mon_slot_##name = slot; \
+ \
+ da_monitor_reset_all_##name(); \
+ return 0; \
+} \
+ \
+/* \
+ * da_monitor_destroy_##name - return the allocated slot \
+ */ \
+static inline void da_monitor_destroy_##name(void) \
+{ \
+ if (task_mon_slot_##name == RV_PER_TASK_MONITOR_INIT) { \
+ WARN_ONCE(1, "Disabling a disabled monitor: " #name); \
+ return; \
+ } \
+ rv_put_task_monitor_slot(task_mon_slot_##name); \
+ task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \
+ return; \
+}
+
+/*
+ * Handle event for implicit monitor: da_get_monitor_##name() will figure out
+ * the monitor.
+ */
+#define DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) \
+ \
+static inline void __da_handle_event_##name(struct da_monitor *da_mon, \
+ enum events_##name event) \
+{ \
+ bool retval; \
+ \
+ retval = da_event_##name(da_mon, event); \
+ if (!retval) \
+ da_monitor_reset_##name(da_mon); \
+} \
+ \
+/* \
+ * da_handle_event_##name - handle an event \
+ */ \
+static inline void da_handle_event_##name(enum events_##name event) \
+{ \
+ struct da_monitor *da_mon = da_get_monitor_##name(); \
+ bool retval; \
+ \
+ retval = da_monitor_handling_event_##name(da_mon); \
+ if (!retval) \
+ return; \
+ \
+ __da_handle_event_##name(da_mon, event); \
+} \
+ \
+/* \
+ * da_handle_start_event_##name - start monitoring or handle event \
+ * \
+ * This function is used to notify the monitor that the system is returning \
+ * to the initial state, so the monitor can start monitoring in the next event. \
+ * Thus: \
+ * \
+ * If the monitor already started, handle the event. \
+ * If the monitor did not start yet, start the monitor but skip the event. \
+ */ \
+static inline bool da_handle_start_event_##name(enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) { \
+ da_monitor_start_##name(da_mon); \
+ return 0; \
+ } \
+ \
+ __da_handle_event_##name(da_mon, event); \
+ \
+ return 1; \
+} \
+ \
+/* \
+ * da_handle_start_run_event_##name - start monitoring and handle event \
+ * \
+ * This function is used to notify the monitor that the system is in the \
+ * initial state, so the monitor can start monitoring and handling event. \
+ */ \
+static inline bool da_handle_start_run_event_##name(enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ da_monitor_start_##name(da_mon); \
+ \
+ __da_handle_event_##name(da_mon, event); \
+ \
+ return 1; \
+}
+
+/*
+ * Handle event for per task.
+ */
+#define DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) \
+ \
+static inline void \
+__da_handle_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
+ enum events_##name event) \
+{ \
+ bool retval; \
+ \
+ retval = da_event_##name(da_mon, tsk, event); \
+ if (!retval) \
+ da_monitor_reset_##name(da_mon); \
+} \
+ \
+/* \
+ * da_handle_event_##name - handle an event \
+ */ \
+static inline void \
+da_handle_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon = da_get_monitor_##name(tsk); \
+ bool retval; \
+ \
+ retval = da_monitor_handling_event_##name(da_mon); \
+ if (!retval) \
+ return; \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+} \
+ \
+/* \
+ * da_handle_start_event_##name - start monitoring or handle event \
+ * \
+ * This function is used to notify the monitor that the system is returning \
+ * to the initial state, so the monitor can start monitoring in the next event. \
+ * Thus: \
+ * \
+ * If the monitor already started, handle the event. \
+ * If the monitor did not start yet, start the monitor but skip the event. \
+ */ \
+static inline bool \
+da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(tsk); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) { \
+ da_monitor_start_##name(da_mon); \
+ return 0; \
+ } \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+ \
+ return 1; \
+}
+
+/*
+ * Entry point for the global monitor.
+ */
+#define DECLARE_DA_MON_GLOBAL(name, type) \
+ \
+DECLARE_AUTOMATA_HELPERS(name, type) \
+DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
+DECLARE_DA_MON_INIT_GLOBAL(name, type) \
+DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type)
+
+/*
+ * Entry point for the per-cpu monitor.
+ */
+#define DECLARE_DA_MON_PER_CPU(name, type) \
+ \
+DECLARE_AUTOMATA_HELPERS(name, type) \
+DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
+DECLARE_DA_MON_INIT_PER_CPU(name, type) \
+DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type)
+
+/*
+ * Entry point for the per-task monitor.
+ */
+#define DECLARE_DA_MON_PER_TASK(name, type) \
+ \
+DECLARE_AUTOMATA_HELPERS(name, type) \
+DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
+DECLARE_DA_MON_INIT_PER_TASK(name, type) \
+DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type)
diff --git a/include/rv/instrumentation.h b/include/rv/instrumentation.h
new file mode 100644
index 000000000000..d4e7a02ede1a
--- /dev/null
+++ b/include/rv/instrumentation.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Helper functions to facilitate the instrumentation of auto-generated
+ * RV monitors create by dot2k.
+ *
+ * The dot2k tool is available at tools/verification/dot2/
+ */
+
+#include <linux/ftrace.h>
+
+/*
+ * rv_attach_trace_probe - check and attach a handler function to a tracepoint
+ */
+#define rv_attach_trace_probe(monitor, tp, rv_handler) \
+ do { \
+ check_trace_callback_type_##tp(rv_handler); \
+ WARN_ONCE(register_trace_##tp(rv_handler, NULL), \
+ "fail attaching " #monitor " " #tp "handler"); \
+ } while (0)
+
+/*
+ * rv_detach_trace_probe - detach a handler function to a tracepoint
+ */
+#define rv_detach_trace_probe(monitor, tp, rv_handler) \
+ do { \
+ unregister_trace_##tp(rv_handler, NULL); \
+ } while (0)
diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h
index 800d53dc9470..56a5d2b5a624 100644
--- a/include/scsi/fc/fc_ms.h
+++ b/include/scsi/fc/fc_ms.h
@@ -25,6 +25,12 @@
#define FC_FDMI_SUBTYPE 0x10 /* fs_ct_hdr.ct_fs_subtype */
/*
+ * Management server FDMI specifications.
+ */
+#define FDMI_V1 1 /* FDMI version 1 specifications */
+#define FDMI_V2 2 /* FDMI version 2 specifications */
+
+/*
* Management server FDMI Requests.
*/
enum fc_fdmi_req {
@@ -57,6 +63,13 @@ enum fc_fdmi_hba_attr_type {
FC_FDMI_HBA_ATTR_FIRMWAREVERSION = 0x0009,
FC_FDMI_HBA_ATTR_OSNAMEVERSION = 0x000A,
FC_FDMI_HBA_ATTR_MAXCTPAYLOAD = 0x000B,
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME = 0x000C,
+ FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO = 0x000D,
+ FC_FDMI_HBA_ATTR_NUMBEROFPORTS = 0x000E,
+ FC_FDMI_HBA_ATTR_FABRICNAME = 0x000F,
+ FC_FDMI_HBA_ATTR_BIOSVERSION = 0x0010,
+ FC_FDMI_HBA_ATTR_BIOSSTATE = 0x0011,
+ FC_FDMI_HBA_ATTR_VENDORIDENTIFIER = 0x00E0,
};
/*
@@ -65,14 +78,21 @@ enum fc_fdmi_hba_attr_type {
#define FC_FDMI_HBA_ATTR_NODENAME_LEN 8
#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64
#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64
-#define FC_FDMI_HBA_ATTR_MODEL_LEN 256
-#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256
-#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 256
+#define FC_FDMI_HBA_ATTR_MODEL_LEN 64
+#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 64
+#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 128
#define FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN 4
+#define FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN 64
+#define FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN 4
+#define FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN 4
+#define FC_FDMI_HBA_ATTR_FABRICNAME_LEN 8
+#define FC_FDMI_HBA_ATTR_BIOSVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_BIOSSTATE_LEN 4
+#define FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN 8
/*
* Port Attribute Type
@@ -84,6 +104,16 @@ enum fc_fdmi_port_attr_type {
FC_FDMI_PORT_ATTR_MAXFRAMESIZE = 0x0004,
FC_FDMI_PORT_ATTR_OSDEVICENAME = 0x0005,
FC_FDMI_PORT_ATTR_HOSTNAME = 0x0006,
+ FC_FDMI_PORT_ATTR_NODENAME = 0x0007,
+ FC_FDMI_PORT_ATTR_PORTNAME = 0x0008,
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME = 0x0009,
+ FC_FDMI_PORT_ATTR_PORTTYPE = 0x000A,
+ FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC = 0x000B,
+ FC_FDMI_PORT_ATTR_FABRICNAME = 0x000C,
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE = 0x000D,
+ FC_FDMI_PORT_ATTR_PORTSTATE = 0x101,
+ FC_FDMI_PORT_ATTR_DISCOVEREDPORTS = 0x102,
+ FC_FDMI_PORT_ATTR_PORTID = 0x103,
};
/*
@@ -95,6 +125,17 @@ enum fc_fdmi_port_attr_type {
#define FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN 4
#define FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN 256
#define FC_FDMI_PORT_ATTR_HOSTNAME_LEN 256
+#define FC_FDMI_PORT_ATTR_NODENAME_LEN 8
+#define FC_FDMI_PORT_ATTR_PORTNAME_LEN 8
+#define FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN 256
+#define FC_FDMI_PORT_ATTR_PORTTYPE_LEN 4
+#define FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN 4
+#define FC_FDMI_PORT_ATTR_FABRICNAME_LEN 8
+#define FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN 32
+#define FC_FDMI_PORT_ATTR_PORTSTATE_LEN 4
+#define FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN 4
+#define FC_FDMI_PORT_ATTR_PORTID_LEN 4
+
/*
* HBA Attribute ID
@@ -117,7 +158,7 @@ struct fc_fdmi_port_name {
struct fc_fdmi_attr_entry {
__be16 type;
__be16 len;
- __u8 value[1];
+ __u8 value[];
} __attribute__((__packed__));
/*
@@ -125,7 +166,7 @@ struct fc_fdmi_attr_entry {
*/
struct fs_fdmi_attrs {
__be32 numattrs;
- struct fc_fdmi_attr_entry attr[1];
+ struct fc_fdmi_attr_entry attr[];
} __attribute__((__packed__));
/*
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
deleted file mode 100644
index c6660205d73f..000000000000
--- a/include/scsi/fc_encode.h
+++ /dev/null
@@ -1,727 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2008 Intel Corporation. All rights reserved.
- *
- * Maintained at www.Open-FCoE.org
- */
-
-#ifndef _FC_ENCODE_H_
-#define _FC_ENCODE_H_
-#include <asm/unaligned.h>
-#include <linux/utsname.h>
-
-/*
- * F_CTL values for simple requests and responses.
- */
-#define FC_FCTL_REQ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)
-#define FC_FCTL_RESP (FC_FC_EX_CTX | FC_FC_LAST_SEQ | \
- FC_FC_END_SEQ | FC_FC_SEQ_INIT)
-
-struct fc_ns_rft {
- struct fc_ns_fid fid; /* port ID object */
- struct fc_ns_fts fts; /* FC4-types object */
-};
-
-struct fc_ct_req {
- struct fc_ct_hdr hdr;
- union {
- struct fc_ns_gid_ft gid;
- struct fc_ns_rn_id rn;
- struct fc_ns_rft rft;
- struct fc_ns_rff_id rff;
- struct fc_ns_fid fid;
- struct fc_ns_rsnn snn;
- struct fc_ns_rspn spn;
- struct fc_fdmi_rhba rhba;
- struct fc_fdmi_rpa rpa;
- struct fc_fdmi_dprt dprt;
- struct fc_fdmi_dhba dhba;
- } payload;
-};
-
-static inline void __fc_fill_fc_hdr(struct fc_frame_header *fh,
- enum fc_rctl r_ctl,
- u32 did, u32 sid, enum fc_fh_type type,
- u32 f_ctl, u32 parm_offset)
-{
- WARN_ON(r_ctl == 0);
- fh->fh_r_ctl = r_ctl;
- hton24(fh->fh_d_id, did);
- hton24(fh->fh_s_id, sid);
- fh->fh_type = type;
- hton24(fh->fh_f_ctl, f_ctl);
- fh->fh_cs_ctl = 0;
- fh->fh_df_ctl = 0;
- fh->fh_parm_offset = htonl(parm_offset);
-}
-
-/**
- * fill FC header fields in specified fc_frame
- */
-static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
- u32 did, u32 sid, enum fc_fh_type type,
- u32 f_ctl, u32 parm_offset)
-{
- struct fc_frame_header *fh;
-
- fh = fc_frame_header_get(fp);
- __fc_fill_fc_hdr(fh, r_ctl, did, sid, type, f_ctl, parm_offset);
-}
-
-/**
- * fc_adisc_fill() - Fill in adisc request frame
- * @lport: local port.
- * @fp: fc frame where payload will be placed.
- */
-static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_adisc *adisc;
-
- adisc = fc_frame_payload_get(fp, sizeof(*adisc));
- memset(adisc, 0, sizeof(*adisc));
- adisc->adisc_cmd = ELS_ADISC;
- put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn);
- put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn);
- hton24(adisc->adisc_port_id, lport->port_id);
-}
-
-/**
- * fc_ct_hdr_fill- fills ct header and reset ct payload
- * returns pointer to ct request.
- */
-static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
- unsigned int op, size_t req_size,
- enum fc_ct_fs_type fs_type,
- u8 subtype)
-{
- struct fc_ct_req *ct;
- size_t ct_plen;
-
- ct_plen = sizeof(struct fc_ct_hdr) + req_size;
- ct = fc_frame_payload_get(fp, ct_plen);
- memset(ct, 0, ct_plen);
- ct->hdr.ct_rev = FC_CT_REV;
- ct->hdr.ct_fs_type = fs_type;
- ct->hdr.ct_fs_subtype = subtype;
- ct->hdr.ct_cmd = htons((u16) op);
- return ct;
-}
-
-/**
- * fc_ct_ns_fill() - Fill in a name service request frame
- * @lport: local port.
- * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
- * @fp: frame to contain payload.
- * @op: CT opcode.
- * @r_ctl: pointer to FC header R_CTL.
- * @fh_type: pointer to FC-4 type.
- */
-static inline int fc_ct_ns_fill(struct fc_lport *lport,
- u32 fc_id, struct fc_frame *fp,
- unsigned int op, enum fc_rctl *r_ctl,
- enum fc_fh_type *fh_type)
-{
- struct fc_ct_req *ct;
- size_t len;
-
- switch (op) {
- case FC_NS_GPN_FT:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft),
- FC_FST_DIR, FC_NS_SUBTYPE);
- ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
- break;
-
- case FC_NS_GPN_ID:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid),
- FC_FST_DIR, FC_NS_SUBTYPE);
- ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
- hton24(ct->payload.fid.fp_fid, fc_id);
- break;
-
- case FC_NS_RFT_ID:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft),
- FC_FST_DIR, FC_NS_SUBTYPE);
- hton24(ct->payload.rft.fid.fp_fid, lport->port_id);
- ct->payload.rft.fts = lport->fcts;
- break;
-
- case FC_NS_RFF_ID:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id),
- FC_FST_DIR, FC_NS_SUBTYPE);
- hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id);
- ct->payload.rff.fr_type = FC_TYPE_FCP;
- if (lport->service_params & FCP_SPPF_INIT_FCN)
- ct->payload.rff.fr_feat = FCP_FEAT_INIT;
- if (lport->service_params & FCP_SPPF_TARG_FCN)
- ct->payload.rff.fr_feat |= FCP_FEAT_TARG;
- break;
-
- case FC_NS_RNN_ID:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id),
- FC_FST_DIR, FC_NS_SUBTYPE);
- hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id);
- put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn);
- break;
-
- case FC_NS_RSPN_ID:
- len = strnlen(fc_host_symbolic_name(lport->host), 255);
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len,
- FC_FST_DIR, FC_NS_SUBTYPE);
- hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
- strncpy(ct->payload.spn.fr_name,
- fc_host_symbolic_name(lport->host), len);
- ct->payload.spn.fr_name_len = len;
- break;
-
- case FC_NS_RSNN_NN:
- len = strnlen(fc_host_symbolic_name(lport->host), 255);
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len,
- FC_FST_DIR, FC_NS_SUBTYPE);
- put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn);
- strncpy(ct->payload.snn.fr_name,
- fc_host_symbolic_name(lport->host), len);
- ct->payload.snn.fr_name_len = len;
- break;
-
- default:
- return -EINVAL;
- }
- *r_ctl = FC_RCTL_DD_UNSOL_CTL;
- *fh_type = FC_TYPE_CT;
- return 0;
-}
-
-/**
- * fc_ct_ms_fill() - Fill in a mgmt service request frame
- * @lport: local port.
- * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
- * @fp: frame to contain payload.
- * @op: CT opcode.
- * @r_ctl: pointer to FC header R_CTL.
- * @fh_type: pointer to FC-4 type.
- */
-static inline int fc_ct_ms_fill(struct fc_lport *lport,
- u32 fc_id, struct fc_frame *fp,
- unsigned int op, enum fc_rctl *r_ctl,
- enum fc_fh_type *fh_type)
-{
- struct fc_ct_req *ct;
- size_t len;
- struct fc_fdmi_attr_entry *entry;
- struct fs_fdmi_attrs *hba_attrs;
- int numattrs = 0;
-
- switch (op) {
- case FC_FDMI_RHBA:
- numattrs = 10;
- len = sizeof(struct fc_fdmi_rhba);
- len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
- len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
- len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
- len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
- len += FC_FDMI_HBA_ATTR_MODEL_LEN;
- len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
- len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
- len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
- len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
- len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
- len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
- ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
-
- /* HBA Identifier */
- put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id);
- /* Number of Ports - always 1 */
- put_unaligned_be32(1, &ct->payload.rhba.port.numport);
- /* Port Name */
- put_unaligned_be64(lport->wwpn,
- &ct->payload.rhba.port.port[0].portname);
-
- /* HBA Attributes */
- put_unaligned_be32(numattrs,
- &ct->payload.rhba.hba_attrs.numattrs);
- hba_attrs = &ct->payload.rhba.hba_attrs;
- entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
- /* NodeName*/
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_NODENAME,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- put_unaligned_be64(lport->wwnn,
- (__be64 *)&entry->value[0]);
-
- /* Manufacturer */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_NODENAME_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_MANUFACTURER,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_manufacturer(lport->host),
- FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
-
- /* SerialNumber */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_SERIALNUMBER,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_serial_number(lport->host),
- FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
-
- /* Model */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_MODEL_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_MODEL,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_model(lport->host),
- FC_FDMI_HBA_ATTR_MODEL_LEN);
-
- /* Model Description */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_MODEL_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_model_description(lport->host),
- FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
-
- /* Hardware Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_HARDWAREVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_hardware_version(lport->host),
- FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
-
- /* Driver Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_DRIVERVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_driver_version(lport->host),
- FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
-
- /* OptionROM Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_OPTIONROMVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_optionrom_version(lport->host),
- FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
-
- /* Firmware Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_firmware_version(lport->host),
- FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
-
- /* OS Name and Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_OSNAMEVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- snprintf((char *)&entry->value,
- FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN,
- "%s v%s",
- init_utsname()->sysname,
- init_utsname()->release);
- break;
- case FC_FDMI_RPA:
- numattrs = 6;
- len = sizeof(struct fc_fdmi_rpa);
- len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
- len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
- len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
- len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
- len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
- len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
- len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
- ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
-
- /* Port Name */
- put_unaligned_be64(lport->wwpn,
- &ct->payload.rpa.port.portname);
-
- /* Port Attributes */
- put_unaligned_be32(numattrs,
- &ct->payload.rpa.hba_attrs.numattrs);
-
- hba_attrs = &ct->payload.rpa.hba_attrs;
- entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
-
- /* FC4 types */
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_FC4TYPES,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- memcpy(&entry->value, fc_host_supported_fc4s(lport->host),
- FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
-
- /* Supported Speed */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
-
- put_unaligned_be32(fc_host_supported_speeds(lport->host),
- &entry->value);
-
- /* Current Port Speed */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- put_unaligned_be32(lport->link_speed,
- &entry->value);
-
- /* Max Frame Size */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- put_unaligned_be32(fc_host_maxframe_size(lport->host),
- &entry->value);
-
- /* OS Device Name */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_OSDEVICENAME,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- /* Use the sysfs device name */
- strncpy((char *)&entry->value,
- dev_name(&lport->host->shost_gendev),
- strnlen(dev_name(&lport->host->shost_gendev),
- FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
-
- /* Host Name */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_HOSTNAME,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- if (strlen(fc_host_system_hostname(lport->host)))
- strncpy((char *)&entry->value,
- fc_host_system_hostname(lport->host),
- strnlen(fc_host_system_hostname(lport->host),
- FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
- else
- strncpy((char *)&entry->value,
- init_utsname()->nodename,
- FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
- break;
- case FC_FDMI_DPRT:
- len = sizeof(struct fc_fdmi_dprt);
- ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
- /* Port Name */
- put_unaligned_be64(lport->wwpn,
- &ct->payload.dprt.port.portname);
- break;
- case FC_FDMI_DHBA:
- len = sizeof(struct fc_fdmi_dhba);
- ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
- /* HBA Identifier */
- put_unaligned_be64(lport->wwpn, &ct->payload.dhba.hbaid.id);
- break;
- default:
- return -EINVAL;
- }
- *r_ctl = FC_RCTL_DD_UNSOL_CTL;
- *fh_type = FC_TYPE_CT;
- return 0;
-}
-
-/**
- * fc_ct_fill() - Fill in a common transport service request frame
- * @lport: local port.
- * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
- * @fp: frame to contain payload.
- * @op: CT opcode.
- * @r_ctl: pointer to FC header R_CTL.
- * @fh_type: pointer to FC-4 type.
- */
-static inline int fc_ct_fill(struct fc_lport *lport,
- u32 fc_id, struct fc_frame *fp,
- unsigned int op, enum fc_rctl *r_ctl,
- enum fc_fh_type *fh_type, u32 *did)
-{
- int rc = -EINVAL;
-
- switch (fc_id) {
- case FC_FID_MGMT_SERV:
- rc = fc_ct_ms_fill(lport, fc_id, fp, op, r_ctl, fh_type);
- *did = FC_FID_MGMT_SERV;
- break;
- case FC_FID_DIR_SERV:
- default:
- rc = fc_ct_ns_fill(lport, fc_id, fp, op, r_ctl, fh_type);
- *did = FC_FID_DIR_SERV;
- break;
- }
-
- return rc;
-}
-/**
- * fc_plogi_fill - Fill in plogi request frame
- */
-static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
- unsigned int op)
-{
- struct fc_els_flogi *plogi;
- struct fc_els_csp *csp;
- struct fc_els_cssp *cp;
-
- plogi = fc_frame_payload_get(fp, sizeof(*plogi));
- memset(plogi, 0, sizeof(*plogi));
- plogi->fl_cmd = (u8) op;
- put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
- put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
-
- csp = &plogi->fl_csp;
- csp->sp_hi_ver = 0x20;
- csp->sp_lo_ver = 0x20;
- csp->sp_bb_cred = htons(10); /* this gets set by gateway */
- csp->sp_bb_data = htons((u16) lport->mfs);
- cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */
- cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
- csp->sp_features = htons(FC_SP_FT_CIRO);
- csp->sp_tot_seq = htons(255); /* seq. we accept */
- csp->sp_rel_off = htons(0x1f);
- csp->sp_e_d_tov = htonl(lport->e_d_tov);
-
- cp->cp_rdfs = htons((u16) lport->mfs);
- cp->cp_con_seq = htons(255);
- cp->cp_open_seq = 1;
-}
-
-/**
- * fc_flogi_fill - Fill in a flogi request frame.
- */
-static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_csp *sp;
- struct fc_els_cssp *cp;
- struct fc_els_flogi *flogi;
-
- flogi = fc_frame_payload_get(fp, sizeof(*flogi));
- memset(flogi, 0, sizeof(*flogi));
- flogi->fl_cmd = (u8) ELS_FLOGI;
- put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
- put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
- sp = &flogi->fl_csp;
- sp->sp_hi_ver = 0x20;
- sp->sp_lo_ver = 0x20;
- sp->sp_bb_cred = htons(10); /* this gets set by gateway */
- sp->sp_bb_data = htons((u16) lport->mfs);
- cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
- cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
- if (lport->does_npiv)
- sp->sp_features = htons(FC_SP_FT_NPIV);
-}
-
-/**
- * fc_fdisc_fill - Fill in a fdisc request frame.
- */
-static inline void fc_fdisc_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_csp *sp;
- struct fc_els_cssp *cp;
- struct fc_els_flogi *fdisc;
-
- fdisc = fc_frame_payload_get(fp, sizeof(*fdisc));
- memset(fdisc, 0, sizeof(*fdisc));
- fdisc->fl_cmd = (u8) ELS_FDISC;
- put_unaligned_be64(lport->wwpn, &fdisc->fl_wwpn);
- put_unaligned_be64(lport->wwnn, &fdisc->fl_wwnn);
- sp = &fdisc->fl_csp;
- sp->sp_hi_ver = 0x20;
- sp->sp_lo_ver = 0x20;
- sp->sp_bb_cred = htons(10); /* this gets set by gateway */
- sp->sp_bb_data = htons((u16) lport->mfs);
- cp = &fdisc->fl_cssp[3 - 1]; /* class 3 parameters */
- cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
-}
-
-/**
- * fc_logo_fill - Fill in a logo request frame.
- */
-static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_logo *logo;
-
- logo = fc_frame_payload_get(fp, sizeof(*logo));
- memset(logo, 0, sizeof(*logo));
- logo->fl_cmd = ELS_LOGO;
- hton24(logo->fl_n_port_id, lport->port_id);
- logo->fl_n_port_wwn = htonll(lport->wwpn);
-}
-
-/**
- * fc_rtv_fill - Fill in RTV (read timeout value) request frame.
- */
-static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_rtv *rtv;
-
- rtv = fc_frame_payload_get(fp, sizeof(*rtv));
- memset(rtv, 0, sizeof(*rtv));
- rtv->rtv_cmd = ELS_RTV;
-}
-
-/**
- * fc_rec_fill - Fill in rec request frame
- */
-static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_rec *rec;
- struct fc_exch *ep = fc_seq_exch(fr_seq(fp));
-
- rec = fc_frame_payload_get(fp, sizeof(*rec));
- memset(rec, 0, sizeof(*rec));
- rec->rec_cmd = ELS_REC;
- hton24(rec->rec_s_id, lport->port_id);
- rec->rec_ox_id = htons(ep->oxid);
- rec->rec_rx_id = htons(ep->rxid);
-}
-
-/**
- * fc_prli_fill - Fill in prli request frame
- */
-static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct {
- struct fc_els_prli prli;
- struct fc_els_spp spp;
- } *pp;
-
- pp = fc_frame_payload_get(fp, sizeof(*pp));
- memset(pp, 0, sizeof(*pp));
- pp->prli.prli_cmd = ELS_PRLI;
- pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
- pp->prli.prli_len = htons(sizeof(*pp));
- pp->spp.spp_type = FC_TYPE_FCP;
- pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
- pp->spp.spp_params = htonl(lport->service_params);
-}
-
-/**
- * fc_scr_fill - Fill in a scr request frame.
- */
-static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_scr *scr;
-
- scr = fc_frame_payload_get(fp, sizeof(*scr));
- memset(scr, 0, sizeof(*scr));
- scr->scr_cmd = ELS_SCR;
- scr->scr_reg_func = ELS_SCRF_FULL;
-}
-
-/**
- * fc_els_fill - Fill in an ELS request frame
- */
-static inline int fc_els_fill(struct fc_lport *lport,
- u32 did,
- struct fc_frame *fp, unsigned int op,
- enum fc_rctl *r_ctl, enum fc_fh_type *fh_type)
-{
- switch (op) {
- case ELS_ADISC:
- fc_adisc_fill(lport, fp);
- break;
-
- case ELS_PLOGI:
- fc_plogi_fill(lport, fp, ELS_PLOGI);
- break;
-
- case ELS_FLOGI:
- fc_flogi_fill(lport, fp);
- break;
-
- case ELS_FDISC:
- fc_fdisc_fill(lport, fp);
- break;
-
- case ELS_LOGO:
- fc_logo_fill(lport, fp);
- break;
-
- case ELS_RTV:
- fc_rtv_fill(lport, fp);
- break;
-
- case ELS_REC:
- fc_rec_fill(lport, fp);
- break;
-
- case ELS_PRLI:
- fc_prli_fill(lport, fp);
- break;
-
- case ELS_SCR:
- fc_scr_fill(lport, fp);
- break;
-
- default:
- return -EINVAL;
- }
-
- *r_ctl = FC_RCTL_ELS_REQ;
- *fh_type = FC_TYPE_ELS;
- return 0;
-}
-#endif /* _FC_ENCODE_H_ */
diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
index 41df2ba9dbaa..d544dc5057fc 100644
--- a/include/scsi/fc_frame.h
+++ b/include/scsi/fc_frame.h
@@ -246,4 +246,34 @@ static inline bool fc_frame_is_cmd(const struct fc_frame *fp)
*/
void fc_frame_leak_check(void);
+static inline void __fc_fill_fc_hdr(struct fc_frame_header *fh,
+ enum fc_rctl r_ctl,
+ u32 did, u32 sid, enum fc_fh_type type,
+ u32 f_ctl, u32 parm_offset)
+{
+ WARN_ON(r_ctl == 0);
+ fh->fh_r_ctl = r_ctl;
+ hton24(fh->fh_d_id, did);
+ hton24(fh->fh_s_id, sid);
+ fh->fh_type = type;
+ hton24(fh->fh_f_ctl, f_ctl);
+ fh->fh_cs_ctl = 0;
+ fh->fh_df_ctl = 0;
+ fh->fh_parm_offset = htonl(parm_offset);
+}
+
+/**
+ * fill FC header fields in specified fc_frame
+ */
+static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
+ u32 did, u32 sid, enum fc_fh_type type,
+ u32 f_ctl, u32 parm_offset)
+{
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ __fc_fill_fc_hdr(fh, r_ctl, did, sid, type, f_ctl, parm_offset);
+}
+
+
#endif /* _FC_FRAME_H_ */
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 92b11c7e0b4f..5225a23f2d0e 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -60,6 +60,7 @@ enum iscsi_uevent_e {
ISCSI_UEVENT_LOGOUT_FLASHNODE_SID = UEVENT_BASE + 30,
ISCSI_UEVENT_SET_CHAP = UEVENT_BASE + 31,
ISCSI_UEVENT_GET_HOST_STATS = UEVENT_BASE + 32,
+ ISCSI_UEVENT_DESTROY_SESSION_ASYNC = UEVENT_BASE + 33,
/* up events */
ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@@ -311,7 +312,7 @@ enum iscsi_param_type {
struct iscsi_param_info {
uint32_t len; /* Actual length of the param value */
uint16_t param; /* iscsi param */
- uint8_t value[0]; /* length sized value follows */
+ uint8_t value[]; /* length sized value follows */
} __packed;
struct iscsi_iface_param_info {
@@ -320,7 +321,7 @@ struct iscsi_iface_param_info {
uint16_t param; /* iscsi param value */
uint8_t iface_type; /* IPv4 or IPv6 */
uint8_t param_type; /* iscsi_param_type */
- uint8_t value[0]; /* length sized value follows */
+ uint8_t value[]; /* length sized value follows */
} __packed;
/*
@@ -697,7 +698,7 @@ enum iscsi_flashnode_param {
struct iscsi_flashnode_param_info {
uint32_t len; /* Actual length of the param */
uint16_t param; /* iscsi param value */
- uint8_t value[0]; /* length sized value follows */
+ uint8_t value[]; /* length sized value follows */
} __packed;
enum iscsi_discovery_parent_type {
@@ -815,7 +816,7 @@ struct iscsi_stats {
* up to ISCSI_STATS_CUSTOM_MAX
*/
uint32_t custom_length;
- struct iscsi_stats_custom custom[0]
+ struct iscsi_stats_custom custom[]
__attribute__ ((aligned (sizeof(uint64_t))));
};
@@ -946,7 +947,7 @@ struct iscsi_offload_host_stats {
* up to ISCSI_HOST_STATS_CUSTOM_MAX
*/
uint32_t custom_length;
- struct iscsi_host_stats_custom custom[0]
+ struct iscsi_host_stats_custom custom[]
__aligned(sizeof(uint64_t));
};
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index b71b5c4f418c..7b192d88f186 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -52,7 +52,7 @@ static inline int iscsi_sna_gte(u32 n1, u32 n2)
}
/*
- * useful common(control and data pathes) macro
+ * useful common(control and data paths) macro
*/
#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
#define hton24(p, v) { \
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 9b87e1a1c646..6e29e1719db1 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -352,6 +352,15 @@ struct fc_fcp_pkt {
} ____cacheline_aligned_in_smp;
/*
+ * @fsp should be tested and set under the scsi_pkt_queue lock
+ */
+struct libfc_cmd_priv {
+ struct fc_fcp_pkt *fsp;
+ u32 resid_len;
+ u8 status;
+};
+
+/*
* Structure and function definitions for managing Fibre Channel Exchanges
* and Sequences
*
@@ -399,7 +408,7 @@ struct fc_seq {
* @sid: Source FCID
* @did: Destination FCID
* @esb_stat: ESB exchange status
- * @r_a_tov: Resouce allocation time out value (in msecs)
+ * @r_a_tov: Resource allocation time out value (in msecs)
* @seq_id: The next sequence ID to use
* @encaps: encapsulation information for lower-level driver
* @f_ctl: F_CTL flags for the sequence
@@ -668,7 +677,7 @@ enum fc_lport_event {
* @wwnn: World Wide Node Name
* @service_params: Common service parameters
* @e_d_tov: Error detection timeout value
- * @r_a_tov: Resouce allocation timeout value
+ * @r_a_tov: Resource allocation timeout value
* @rnid_gen: RNID information
* @sg_supp: Indicates if scatter gather is supported
* @seq_offload: Indicates if sequence offload is supported
@@ -841,7 +850,7 @@ static inline void fc_lport_free_stats(struct fc_lport *lport)
/**
* lport_priv() - Return the private data from a local port
- * @lport: The local port whose private data is to be retreived
+ * @lport: The local port whose private data is to be retrieved
*/
static inline void *lport_priv(const struct fc_lport *lport)
{
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 2568cb0627ec..279782156373 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/local_lock.h>
#include <linux/random.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
@@ -249,7 +250,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
struct fc_frame *);
/* libfcoe funcs */
-u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
+u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN], unsigned int scheme,
+ unsigned int port);
int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
const struct libfc_function_template *, int init_fcp);
u32 fcoe_fc_crc(struct fc_frame *fp);
@@ -326,6 +328,7 @@ struct fcoe_percpu_s {
struct sk_buff_head fcoe_rx_list;
struct page *crc_eof_page;
int crc_eof_offset;
+ local_lock_t lock;
};
/**
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index c25fb86ffae9..654cc3918c94 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -19,6 +19,7 @@
#include <linux/refcount.h>
#include <scsi/iscsi_proto.h>
#include <scsi/iscsi_if.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport_iscsi.h>
struct scsi_transport_template;
@@ -52,8 +53,10 @@ enum {
#define ISID_SIZE 6
-/* Connection suspend "bit" */
-#define ISCSI_SUSPEND_BIT 1
+/* Connection flags */
+#define ISCSI_CONN_FLAG_SUSPEND_TX 0
+#define ISCSI_CONN_FLAG_SUSPEND_RX 1
+#define ISCSI_CONN_FLAG_BOUND 2
#define ISCSI_ITT_MASK 0x1fff
#define ISCSI_TOTAL_CMDS_MAX 4096
@@ -142,6 +145,24 @@ static inline void* iscsi_next_hdr(struct iscsi_task *task)
return (void*)task->hdr + task->hdr_len;
}
+static inline bool iscsi_task_is_completed(struct iscsi_task *task)
+{
+ return task->state == ISCSI_TASK_COMPLETED ||
+ task->state == ISCSI_TASK_ABRT_TMF ||
+ task->state == ISCSI_TASK_ABRT_SESS_RECOV;
+}
+
+/* Private data associated with struct scsi_cmnd. */
+struct iscsi_cmd {
+ struct iscsi_task *task;
+ int age;
+};
+
+static inline struct iscsi_cmd *iscsi_cmd(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/* Connection's states */
enum {
ISCSI_CONN_INITIAL_STAGE,
@@ -184,19 +205,14 @@ struct iscsi_conn {
struct iscsi_task *task; /* xmit task in progress */
/* xmit */
- spinlock_t taskqueuelock; /* protects the next three lists */
+ /* items must be added/deleted under frwd lock */
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */
struct work_struct xmitwork; /* per-conn. xmit workqueue */
- unsigned long suspend_tx; /* suspend Tx */
- unsigned long suspend_rx; /* suspend Rx */
-
- /* abort */
- wait_queue_head_t ehwait; /* used in eh_abort() */
- struct iscsi_tm tmhdr;
- struct timer_list tmf_timer;
- int tmf_state; /* see TMF_INITIAL, etc.*/
+ /* recv */
+ struct work_struct recvwork;
+ unsigned long flags; /* ISCSI_CONN_FLAGs */
/* negotiated params */
unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
@@ -267,6 +283,12 @@ struct iscsi_session {
* and recv lock.
*/
struct mutex eh_mutex;
+ /* abort */
+ wait_queue_head_t ehwait; /* used in eh_abort() */
+ struct iscsi_tm tmhdr;
+ struct timer_list tmf_timer;
+ int tmf_state; /* see TMF_INITIAL, etc.*/
+ struct iscsi_task *running_aborted_task;
/* iSCSI session-wide sequencing */
uint32_t cmdsn;
@@ -329,7 +351,7 @@ struct iscsi_session {
* cmdsn, queued_cmdsn *
* session resources: *
* - cmdpool kfifo_out , *
- * - mgmtpool, */
+ * - mgmtpool, queues */
spinlock_t back_lock; /* protects cmdsn_exp *
* cmdsn_max, *
* cmdpool kfifo_in */
@@ -361,7 +383,6 @@ struct iscsi_host {
int state;
struct workqueue_struct *workq;
- char workq_name[20];
};
/*
@@ -389,9 +410,11 @@ extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
int dd_data_size,
bool xmit_can_sleep);
-extern void iscsi_host_remove(struct Scsi_Host *shost);
+extern void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown);
extern void iscsi_host_free(struct Scsi_Host *shost);
extern int iscsi_target_alloc(struct scsi_target *starget);
+extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
+ uint16_t requested_cmds_max);
/*
* session management
@@ -419,6 +442,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *);
extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
int);
+extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active);
extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
extern void iscsi_session_failure(struct iscsi_session *session,
enum iscsi_err err);
@@ -427,8 +451,10 @@ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
extern int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
enum iscsi_param param, char *buf);
extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+extern void iscsi_suspend_rx(struct iscsi_conn *conn);
extern void iscsi_suspend_queue(struct iscsi_conn *conn);
-extern void iscsi_conn_queue_work(struct iscsi_conn *conn);
+extern void iscsi_conn_queue_xmit(struct iscsi_conn *conn);
+extern void iscsi_conn_queue_recv(struct iscsi_conn *conn);
#define iscsi_conn_printk(prefix, _c, fmt, a...) \
iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
@@ -453,7 +479,7 @@ extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
extern void iscsi_requeue_task(struct iscsi_task *task);
extern void iscsi_put_task(struct iscsi_task *task);
extern void __iscsi_put_task(struct iscsi_task *task);
-extern void __iscsi_get_task(struct iscsi_task *task);
+extern bool iscsi_get_task(struct iscsi_task *task);
extern void iscsi_complete_scsi_task(struct iscsi_task *task,
uint32_t exp_cmdsn, uint32_t max_cmdsn);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 4e2d61e8fb1e..2dbead74a2af 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -145,7 +145,7 @@ struct sata_device {
struct ata_port *ap;
struct ata_host *ata_host;
- struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
+ struct smp_rps_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
u8 fis[ATA_RESP_FIS_SIZE];
};
@@ -356,6 +356,7 @@ enum sas_ha_state {
SAS_HA_DRAINING,
SAS_HA_ATA_EH_ACTIVE,
SAS_HA_FROZEN,
+ SAS_HA_RESUMING,
};
struct sas_ha_struct {
@@ -391,10 +392,6 @@ struct sas_ha_struct {
int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
* their siblings when forming wide ports */
- /* LLDD calls these to notify the class of an event. */
- int (*notify_port_event)(struct asd_sas_phy *, enum port_event);
- int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
-
void *lldd_ha; /* not touched by sas class code */
struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
@@ -478,10 +475,16 @@ enum service_response {
};
enum exec_status {
- /* The SAM_STAT_.. codes fit in the lower 6 bits, alias some of
- * them here to silence 'case value not in enumerated type' warnings
+ /*
+ * Values 0..0x7f are used to return the SAM_STAT_* codes. To avoid
+ * 'case value not in enumerated type' compiler warnings every value
+ * returned through the exec_status enum needs an alias with the SAS_
+ * prefix here.
*/
- __SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION,
+ SAS_SAM_STAT_GOOD = SAM_STAT_GOOD,
+ SAS_SAM_STAT_BUSY = SAM_STAT_BUSY,
+ SAS_SAM_STAT_TASK_ABORTED = SAM_STAT_TASK_ABORTED,
+ SAS_SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION,
SAS_DEV_NO_RESPONSE = 0x80,
SAS_DATA_UNDERRUN,
@@ -489,7 +492,6 @@ enum exec_status {
SAS_INTERRUPTED,
SAS_QUEUE_FULL,
SAS_DEVICE_UNKNOWN,
- SAS_SG_ERR,
SAS_OPEN_REJECT,
SAS_OPEN_TO,
SAS_PROTO_RESPONSE,
@@ -550,6 +552,21 @@ struct sas_ata_task {
u8 stp_affil_pol:1;
u8 device_control_reg_update:1;
+
+ bool force_phy;
+ int force_phy_id;
+};
+
+/* LLDDs rely on these values */
+enum sas_internal_abort {
+ SAS_INTERNAL_ABORT_SINGLE = 0,
+ SAS_INTERNAL_ABORT_DEV = 1,
+};
+
+struct sas_internal_abort_task {
+ enum sas_internal_abort type;
+ unsigned int qid;
+ u16 tag;
};
struct sas_smp_task {
@@ -574,6 +591,11 @@ struct sas_ssp_task {
struct scsi_cmnd *cmd;
};
+struct sas_tmf_task {
+ u8 tmf;
+ u16 tag_of_task_to_be_managed;
+};
+
struct sas_task {
struct domain_device *dev;
@@ -586,6 +608,7 @@ struct sas_task {
struct sas_ata_task ata_task;
struct sas_smp_task smp_task;
struct sas_ssp_task ssp_task;
+ struct sas_internal_abort_task abort_task;
};
struct scatterlist *scatter;
@@ -599,6 +622,7 @@ struct sas_task {
void *lldd_task; /* for use by LLDDs */
void *uldd_task;
struct sas_task_slow *slow_task;
+ struct sas_tmf_task *tmf;
};
struct sas_task_slow {
@@ -614,12 +638,16 @@ struct sas_task_slow {
#define SAS_TASK_STATE_DONE 2
#define SAS_TASK_STATE_ABORTED 4
#define SAS_TASK_NEED_DEV_RESET 8
-#define SAS_TASK_AT_INITIATOR 16
extern struct sas_task *sas_alloc_task(gfp_t flags);
extern struct sas_task *sas_alloc_slow_task(gfp_t flags);
extern void sas_free_task(struct sas_task *task);
+static inline bool sas_is_internal_abort(struct sas_task *task)
+{
+ return task->task_proto == SAS_PROTOCOL_INTERNAL_ABORT;
+}
+
struct sas_domain_function_template {
/* The class calls these to notify the LLDD of an event. */
void (*lldd_port_formed)(struct asd_sas_phy *);
@@ -634,7 +662,6 @@ struct sas_domain_function_template {
/* Task Management Functions. Must be called from process context. */
int (*lldd_abort_task)(struct sas_task *);
int (*lldd_abort_task_set)(struct domain_device *, u8 *lun);
- int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
int (*lldd_I_T_nexus_reset)(struct domain_device *);
int (*lldd_ata_check_ready)(struct domain_device *);
@@ -642,6 +669,11 @@ struct sas_domain_function_template {
int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
int (*lldd_query_task)(struct sas_task *);
+ /* Special TMF callbacks */
+ void (*lldd_tmf_exec_complete)(struct domain_device *dev);
+ void (*lldd_tmf_aborted)(struct sas_task *task);
+ bool (*lldd_abort_timeout)(struct sas_task *task, void *data);
+
/* Port and Adapter management */
int (*lldd_clear_nexus_port)(struct asd_sas_port *);
int (*lldd_clear_nexus_ha)(struct sas_ha_struct *);
@@ -658,16 +690,23 @@ extern int sas_register_ha(struct sas_ha_struct *);
extern int sas_unregister_ha(struct sas_ha_struct *);
extern void sas_prep_resume_ha(struct sas_ha_struct *sas_ha);
extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
+extern void sas_resume_ha_no_sync(struct sas_ha_struct *sas_ha);
extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
+int sas_phy_enable(struct sas_phy *phy, int enable);
extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
extern int sas_slave_configure(struct scsi_device *);
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
extern int sas_bios_param(struct scsi_device *, struct block_device *,
sector_t capacity, int *hsc);
+int sas_execute_internal_abort_single(struct domain_device *device,
+ u16 tag, unsigned int qid,
+ void *data);
+int sas_execute_internal_abort_dev(struct domain_device *device,
+ unsigned int qid, void *data);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
extern struct device_attribute dev_attr_phy_event_threshold;
@@ -680,7 +719,7 @@ int sas_ex_revalidate_domain(struct domain_device *);
void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *);
-int sas_discover_event(struct asd_sas_port *, enum discover_event ev);
+void sas_discover_event(struct asd_sas_port *, enum discover_event ev);
int sas_discover_sata(struct domain_device *);
int sas_discover_end_dev(struct domain_device *);
@@ -706,4 +745,15 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev);
int sas_request_addr(struct Scsi_Host *shost, u8 *addr);
+int sas_abort_task_set(struct domain_device *dev, u8 *lun);
+int sas_clear_task_set(struct domain_device *dev, u8 *lun);
+int sas_lu_reset(struct domain_device *dev, u8 *lun);
+int sas_query_task(struct sas_task *task, u16 tag);
+int sas_abort_task(struct sas_task *task, u16 tag);
+
+void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
+ gfp_t gfp_flags);
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
+ gfp_t gfp_flags);
+
#endif /* _SASLIB_H_ */
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index a5d8ae49198c..71b749bed3b0 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -95,6 +95,8 @@ enum sas_protocol {
SAS_PROTOCOL_SSP = 0x08,
SAS_PROTOCOL_ALL = 0x0E,
SAS_PROTOCOL_STP_ALL = SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA,
+ /* these are internal to libsas */
+ SAS_PROTOCOL_INTERNAL_ABORT = 0x10,
};
/* From the spec; local phys only */
@@ -191,6 +193,13 @@ enum sas_gpio_reg_type {
SAS_GPIO_REG_TX_GP = 4,
};
+/* Response frame DATAPRES field */
+enum {
+ SAS_DATAPRES_NO_DATA = 0,
+ SAS_DATAPRES_RESPONSE_DATA = 1,
+ SAS_DATAPRES_SENSE_DATA = 2,
+};
+
struct dev_to_host_fis {
u8 fis_type; /* 0x34 */
u8 flags;
@@ -323,8 +332,10 @@ struct ssp_response_iu {
__be32 sense_data_len;
__be32 response_data_len;
- u8 resp_data[0];
- u8 sense_data[0];
+ union {
+ DECLARE_FLEX_ARRAY(u8, resp_data);
+ DECLARE_FLEX_ARRAY(u8, sense_data);
+ };
} __attribute__ ((packed));
struct ssp_command_iu {
@@ -346,7 +357,7 @@ struct ssp_command_iu {
u8 add_cdb_len:6;
u8 cdb[16];
- u8 add_cdb[0];
+ u8 add_cdb[];
} __attribute__ ((packed));
struct xfer_rdy_iu {
@@ -460,18 +471,6 @@ struct report_phy_sata_resp {
__be32 crc;
} __attribute__ ((packed));
-struct smp_resp {
- u8 frame_type;
- u8 function;
- u8 result;
- u8 reserved;
- union {
- struct report_general_resp rg;
- struct discover_resp disc;
- struct report_phy_sata_resp rps;
- };
-} __attribute__ ((packed));
-
#elif defined(__BIG_ENDIAN_BITFIELD)
struct sas_identify_frame {
/* Byte 0 */
@@ -554,8 +553,10 @@ struct ssp_response_iu {
__be32 sense_data_len;
__be32 response_data_len;
- u8 resp_data[0];
- u8 sense_data[0];
+ union {
+ DECLARE_FLEX_ARRAY(u8, resp_data);
+ DECLARE_FLEX_ARRAY(u8, sense_data);
+ };
} __attribute__ ((packed));
struct ssp_command_iu {
@@ -577,7 +578,7 @@ struct ssp_command_iu {
u8 _r_c:2;
u8 cdb[16];
- u8 add_cdb[0];
+ u8 add_cdb[];
} __attribute__ ((packed));
struct xfer_rdy_iu {
@@ -691,20 +692,32 @@ struct report_phy_sata_resp {
__be32 crc;
} __attribute__ ((packed));
-struct smp_resp {
+#else
+#error "Bitfield order not defined!"
+#endif
+
+struct smp_rg_resp {
u8 frame_type;
u8 function;
u8 result;
u8 reserved;
- union {
- struct report_general_resp rg;
- struct discover_resp disc;
- struct report_phy_sata_resp rps;
- };
+ struct report_general_resp rg;
} __attribute__ ((packed));
-#else
-#error "Bitfield order not defined!"
-#endif
+struct smp_disc_resp {
+ u8 frame_type;
+ u8 function;
+ u8 result;
+ u8 reserved;
+ struct discover_resp disc;
+} __attribute__ ((packed));
+
+struct smp_rps_resp {
+ u8 frame_type;
+ u8 function;
+ u8 result;
+ u8 reserved;
+ struct report_phy_sata_resp rps;
+} __attribute__ ((packed));
#endif /* _SAS_H_ */
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index 416c9c47d0e7..a1df4f9d57a3 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -25,14 +25,16 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
int sas_ata_init(struct domain_device *dev);
void sas_ata_task_abort(struct sas_task *task);
void sas_ata_strategy_handler(struct Scsi_Host *shost);
-void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q);
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q);
void sas_ata_schedule_reset(struct domain_device *dev);
void sas_ata_wait_eh(struct domain_device *dev);
void sas_probe_sata(struct asd_sas_port *port);
void sas_suspend_sata(struct asd_sas_port *port);
void sas_resume_sata(struct asd_sas_port *port);
void sas_ata_end_eh(struct ata_port *ap);
+int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ int force_phy_id);
+int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline);
#else
@@ -52,8 +54,7 @@ static inline void sas_ata_strategy_handler(struct Scsi_Host *shost)
{
}
-static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q)
+static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
{
}
@@ -85,6 +86,18 @@ static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy
static inline void sas_ata_end_eh(struct ata_port *ap)
{
}
+
+static inline int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ int force_phy_id)
+{
+ return 0;
+}
+
+static inline int sas_ata_wait_after_reset(struct domain_device *dev,
+ unsigned long deadline)
+{
+ return -ETIMEDOUT;
+}
#endif
#endif /* _SAS_ATA_H_ */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 5339baadc082..3e46859774c8 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
+#include <scsi/scsi_status.h>
struct scsi_cmnd;
@@ -30,32 +31,6 @@ enum scsi_timeouts {
*/
#define SCAN_WILD_CARD ~0
-/** scsi_status_is_good - check the status return.
- *
- * @status: the status passed up from the driver (including host and
- * driver components)
- *
- * This returns true for known good conditions that may be treated as
- * command completed normally
- */
-static inline int scsi_status_is_good(int status)
-{
- /*
- * FIXME: bit0 is listed as reserved in SCSI-2, but is
- * significant in SCSI-3. For now, we follow the SCSI-2
- * behaviour and ignore reserved bits.
- */
- status &= 0xfe;
- return ((status == SAM_STAT_GOOD) ||
- (status == SAM_STAT_CONDITION_MET) ||
- /* Next two "intermediate" statuses are obsolete in SAM-4 */
- (status == SAM_STAT_INTERMEDIATE) ||
- (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
- /* FIXME: this is obsolete in SAM-3 */
- (status == SAM_STAT_COMMAND_TERMINATED));
-}
-
-
/*
* standard mode-select header prepended to all mode-select commands
*/
@@ -88,106 +63,46 @@ static inline int scsi_is_wlun(u64 lun)
return (lun & 0xff00) == SCSI_W_LUN_BASE;
}
+/**
+ * scsi_status_is_check_condition - check the status return.
+ *
+ * @status: the status passed up from the driver (including host and
+ * driver components)
+ *
+ * This returns true if the status code is SAM_STAT_CHECK_CONDITION.
+ */
+static inline int scsi_status_is_check_condition(int status)
+{
+ if (status < 0)
+ return false;
+ status &= 0xfe;
+ return status == SAM_STAT_CHECK_CONDITION;
+}
/*
- * MESSAGE CODES
+ * Extended message codes.
*/
-
-#define COMMAND_COMPLETE 0x00
-#define EXTENDED_MESSAGE 0x01
#define EXTENDED_MODIFY_DATA_POINTER 0x00
#define EXTENDED_SDTR 0x01
#define EXTENDED_EXTENDED_IDENTIFY 0x02 /* SCSI-I only */
#define EXTENDED_WDTR 0x03
#define EXTENDED_PPR 0x04
#define EXTENDED_MODIFY_BIDI_DATA_PTR 0x05
-#define SAVE_POINTERS 0x02
-#define RESTORE_POINTERS 0x03
-#define DISCONNECT 0x04
-#define INITIATOR_ERROR 0x05
-#define ABORT_TASK_SET 0x06
-#define MESSAGE_REJECT 0x07
-#define NOP 0x08
-#define MSG_PARITY_ERROR 0x09
-#define LINKED_CMD_COMPLETE 0x0a
-#define LINKED_FLG_CMD_COMPLETE 0x0b
-#define TARGET_RESET 0x0c
-#define ABORT_TASK 0x0d
-#define CLEAR_TASK_SET 0x0e
-#define INITIATE_RECOVERY 0x0f /* SCSI-II only */
-#define RELEASE_RECOVERY 0x10 /* SCSI-II only */
-#define CLEAR_ACA 0x16
-#define LOGICAL_UNIT_RESET 0x17
-#define SIMPLE_QUEUE_TAG 0x20
-#define HEAD_OF_QUEUE_TAG 0x21
-#define ORDERED_QUEUE_TAG 0x22
-#define IGNORE_WIDE_RESIDUE 0x23
-#define ACA 0x24
-#define QAS_REQUEST 0x55
-
-/* Old SCSI2 names, don't use in new code */
-#define BUS_DEVICE_RESET TARGET_RESET
-#define ABORT ABORT_TASK_SET
-
-/*
- * Host byte codes
- */
-
-#define DID_OK 0x00 /* NO error */
-#define DID_NO_CONNECT 0x01 /* Couldn't connect before timeout period */
-#define DID_BUS_BUSY 0x02 /* BUS stayed busy through time out period */
-#define DID_TIME_OUT 0x03 /* TIMED OUT for other reason */
-#define DID_BAD_TARGET 0x04 /* BAD target. */
-#define DID_ABORT 0x05 /* Told to abort for some other reason */
-#define DID_PARITY 0x06 /* Parity error */
-#define DID_ERROR 0x07 /* Internal error */
-#define DID_RESET 0x08 /* Reset by somebody. */
-#define DID_BAD_INTR 0x09 /* Got an interrupt we weren't expecting. */
-#define DID_PASSTHROUGH 0x0a /* Force command past mid-layer */
-#define DID_SOFT_ERROR 0x0b /* The low level driver just wish a retry */
-#define DID_IMM_RETRY 0x0c /* Retry without decrementing retry count */
-#define DID_REQUEUE 0x0d /* Requeue command (no immediate retry) also
- * without decrementing the retry count */
-#define DID_TRANSPORT_DISRUPTED 0x0e /* Transport error disrupted execution
- * and the driver blocked the port to
- * recover the link. Transport class will
- * retry or fail IO */
-#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */
-#define DID_TARGET_FAILURE 0x10 /* Permanent target failure, do not retry on
- * other paths */
-#define DID_NEXUS_FAILURE 0x11 /* Permanent nexus failure, retry on other
- * paths might yield different results */
-#define DID_ALLOC_FAILURE 0x12 /* Space allocation on the device failed */
-#define DID_MEDIUM_ERROR 0x13 /* Medium error */
-#define DRIVER_OK 0x00 /* Driver status */
-
-/*
- * These indicate the error that occurred, and what is available.
- */
-
-#define DRIVER_BUSY 0x01
-#define DRIVER_SOFT 0x02
-#define DRIVER_MEDIA 0x03
-#define DRIVER_ERROR 0x04
-
-#define DRIVER_INVALID 0x05
-#define DRIVER_TIMEOUT 0x06
-#define DRIVER_HARD 0x07
-#define DRIVER_SENSE 0x08
/*
* Internal return values.
*/
-
-#define NEEDS_RETRY 0x2001
-#define SUCCESS 0x2002
-#define FAILED 0x2003
-#define QUEUED 0x2004
-#define SOFT_ERROR 0x2005
-#define ADD_TO_MLQUEUE 0x2006
-#define TIMEOUT_ERROR 0x2007
-#define SCSI_RETURN_NOT_HANDLED 0x2008
-#define FAST_IO_FAIL 0x2009
+enum scsi_disposition {
+ NEEDS_RETRY = 0x2001,
+ SUCCESS = 0x2002,
+ FAILED = 0x2003,
+ QUEUED = 0x2004,
+ SOFT_ERROR = 0x2005,
+ ADD_TO_MLQUEUE = 0x2006,
+ TIMEOUT_ERROR = 0x2007,
+ SCSI_RETURN_NOT_HANDLED = 0x2008,
+ FAST_IO_FAIL = 0x2009,
+};
/*
* Midlevel queue return values.
@@ -203,14 +118,10 @@ static inline int scsi_is_wlun(u64 lun)
* These are set by:
*
* status byte = set from target device
- * msg_byte = return status from host adapter itself.
+ * msg_byte (unused)
* host_byte = set by low-level driver to indicate status.
- * driver_byte = set by mid-level.
*/
-#define status_byte(result) (((result) >> 1) & 0x7f)
-#define msg_byte(result) (((result) >> 8) & 0xff)
#define host_byte(result) (((result) >> 16) & 0xff)
-#define driver_byte(result) (((result) >> 24) & 0xff)
#define sense_class(sense) (((sense) >> 4) & 0x7)
#define sense_error(sense) ((sense) & 0xf)
@@ -274,4 +185,35 @@ static inline int scsi_is_wlun(u64 lun)
/* Used to obtain the PCI location of a device */
#define SCSI_IOCTL_GET_PCI 0x5387
+/** scsi_status_is_good - check the status return.
+ *
+ * @status: the status passed up from the driver (including host and
+ * driver components)
+ *
+ * This returns true for known good conditions that may be treated as
+ * command completed normally
+ */
+static inline bool scsi_status_is_good(int status)
+{
+ if (status < 0)
+ return false;
+
+ if (host_byte(status) == DID_NO_CONNECT)
+ return false;
+
+ /*
+ * FIXME: bit0 is listed as reserved in SCSI-2, but is
+ * significant in SCSI-3. For now, we follow the SCSI-2
+ * behaviour and ignore reserved bits.
+ */
+ status &= 0xfe;
+ return ((status == SAM_STAT_GOOD) ||
+ (status == SAM_STAT_CONDITION_MET) ||
+ /* Next two "intermediate" statuses are obsolete in SAM-4 */
+ (status == SAM_STAT_INTERMEDIATE) ||
+ (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
+ /* FIXME: this is obsolete in SAM-3 */
+ (status == SAM_STAT_COMMAND_TERMINATED));
+}
+
#endif /* _SCSI_SCSI_H */
diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h
index fa0c820a1663..9b1f0f424a79 100644
--- a/include/scsi/scsi_bsg_iscsi.h
+++ b/include/scsi/scsi_bsg_iscsi.h
@@ -52,7 +52,7 @@ struct iscsi_bsg_host_vendor {
uint64_t vendor_id;
/* start of vendor command area */
- uint32_t vendor_cmd[0];
+ uint32_t vendor_cmd[];
};
/* Response:
@@ -84,7 +84,7 @@ struct iscsi_bsg_reply {
*/
uint32_t result;
- /* If there was reply_payload, how much was recevied ? */
+ /* If there was reply_payload, how much was received ? */
uint32_t reply_payload_rcv_len;
union {
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index a2849bb9cd19..7d3622db38ed 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -10,10 +10,8 @@
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_device.h>
-#include <scsi/scsi_request.h>
struct Scsi_Host;
-struct scsi_driver;
/*
* MAX_COMMAND_SIZE is:
@@ -28,9 +26,6 @@ struct scsi_driver;
* supports without specifying a cmd_len by ULD's
*/
#define MAX_COMMAND_SIZE 16
-#if (MAX_COMMAND_SIZE > BLK_MAX_CDB)
-# error MAX_COMMAND_SIZE can not be bigger than BLK_MAX_CDB
-#endif
struct scsi_data_buffer {
struct sg_table table;
@@ -55,27 +50,32 @@ struct scsi_pointer {
/* for scmd->flags */
#define SCMD_TAGGED (1 << 0)
-#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
-#define SCMD_INITIALIZED (1 << 2)
-#define SCMD_LAST (1 << 3)
+#define SCMD_INITIALIZED (1 << 1)
+#define SCMD_LAST (1 << 2)
/* flags preserved across unprep / reprep */
-#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
+#define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED)
/* for scmd->state */
#define SCMD_STATE_COMPLETE 0
#define SCMD_STATE_INFLIGHT 1
+enum scsi_cmnd_submitter {
+ SUBMITTED_BY_BLOCK_LAYER = 0,
+ SUBMITTED_BY_SCSI_ERROR_HANDLER = 1,
+ SUBMITTED_BY_SCSI_RESET_IOCTL = 2,
+} __packed;
+
struct scsi_cmnd {
- struct scsi_request req;
struct scsi_device *device;
- struct list_head list; /* scsi_cmnd participates in queue lists */
- struct list_head eh_entry; /* entry for the host eh_cmd_q */
+ struct list_head eh_entry; /* entry for the host eh_abort_list/eh_cmd_q */
struct delayed_work abort_work;
struct rcu_head rcu;
int eh_eflags; /* Used by error handlr */
+ int budget_token;
+
/*
* This is set to jiffies as it was when the command was first
* allocated. It is used to time how long the command has
@@ -89,13 +89,12 @@ struct scsi_cmnd {
unsigned char prot_op;
unsigned char prot_type;
unsigned char prot_flags;
+ enum scsi_cmnd_submitter submitter;
unsigned short cmd_len;
enum dma_data_direction sc_data_direction;
- /* These elements define the operation we are about to perform */
- unsigned char *cmnd;
-
+ unsigned char cmnd[32]; /* SCSI CDB */
/* These elements define the operation we ultimately want to perform */
struct scsi_data_buffer sdb;
@@ -109,25 +108,23 @@ struct scsi_cmnd {
(ie, between disconnect /
reconnects. Probably == sector
size */
-
- struct request *request; /* The command we are
- working on */
-
+ unsigned resid_len; /* residual count */
+ unsigned sense_len;
unsigned char *sense_buffer;
/* obtained by REQUEST SENSE when
* CHECK CONDITION is received on original
* command (auto-sense). Length must be
* SCSI_SENSE_BUFFERSIZE bytes. */
- /* Low-level done function - can be used by low-level driver to point
- * to completion function. Not used by mid/upper level code. */
- void (*scsi_done) (struct scsi_cmnd *);
+ int flags; /* Command flags */
+ unsigned long state; /* Command completion state */
+
+ unsigned int extra_len; /* length of alignment and padding */
/*
- * The following fields can be written to by the host specific code.
- * Everything else should be left alone.
+ * The fields below can be modified by the LLD but the fields above
+ * must not be modified.
*/
- struct scsi_pointer SCp; /* Scratchpad used by some host adapters */
unsigned char *host_scribble; /* The host adapter is allowed to
* call scsi_malloc and get some memory
@@ -138,12 +135,14 @@ struct scsi_cmnd {
* to be at an address < 16Mb). */
int result; /* Status code from lower level driver */
- int flags; /* Command flags */
- unsigned long state; /* Command completion state */
-
- unsigned char tag; /* SCSI-II queued command tag */
};
+/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
+static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd)
+{
+ return blk_mq_rq_from_pdu(scmd);
+}
+
/*
* Return the driver private allocation behind the command.
* Only works if cmd_size is set in the host template.
@@ -153,20 +152,17 @@ static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
return cmd + 1;
}
-/* make sure not to use it with passthrough commands */
-static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
-{
- return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
-}
+void scsi_done(struct scsi_cmnd *cmd);
+void scsi_done_direct(struct scsi_cmnd *cmd);
-extern void scsi_put_command(struct scsi_cmnd *);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt);
-extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd);
+blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd);
+void scsi_free_sgtables(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_DMA
extern int scsi_dma_map(struct scsi_cmnd *cmd);
@@ -193,19 +189,19 @@ static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
static inline void scsi_set_resid(struct scsi_cmnd *cmd, unsigned int resid)
{
- cmd->req.resid_len = resid;
+ cmd->resid_len = resid;
}
static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd)
{
- return cmd->req.resid_len;
+ return cmd->resid_len;
}
#define scsi_for_each_sg(cmd, sg, nseg, __i) \
for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd,
- void *buf, int buflen)
+ const void *buf, int buflen)
{
return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
buf, buflen);
@@ -218,6 +214,25 @@ static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd,
buf, buflen);
}
+static inline sector_t scsi_get_sector(struct scsi_cmnd *scmd)
+{
+ return blk_rq_pos(scsi_cmd_to_rq(scmd));
+}
+
+static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+{
+ unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
+
+ return blk_rq_pos(scsi_cmd_to_rq(scmd)) >> shift;
+}
+
+static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
+{
+ unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
+
+ return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
+}
+
/*
* The operations below are hints that tell the controller driver how
* to handle I/Os with DIF or similar types of protection information.
@@ -280,9 +295,11 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
return scmd->prot_type;
}
-static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
{
- return blk_rq_pos(scmd->request);
+ struct request *rq = blk_mq_rq_from_pdu(scmd);
+
+ return t10_pi_ref_tag(rq);
}
static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
@@ -308,9 +325,14 @@ static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd)
#define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \
for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i)
-static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
+static inline void set_status_byte(struct scsi_cmnd *cmd, char status)
{
- cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
+ cmd->result = (cmd->result & 0xffffff00) | status;
+}
+
+static inline u8 get_status_byte(struct scsi_cmnd *cmd)
+{
+ return cmd->result & 0xff;
}
static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
@@ -318,9 +340,36 @@ static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
cmd->result = (cmd->result & 0xff00ffff) | (status << 16);
}
-static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
+static inline u8 get_host_byte(struct scsi_cmnd *cmd)
{
- cmd->result = (cmd->result & 0x00ffffff) | (status << 24);
+ return (cmd->result >> 16) & 0xff;
+}
+
+/**
+ * scsi_msg_to_host_byte() - translate message byte
+ *
+ * Translate the SCSI parallel message byte to a matching
+ * host byte setting. A message of COMMAND_COMPLETE indicates
+ * a successful command execution, any other message indicate
+ * an error. As the messages themselves only have a meaning
+ * for the SCSI parallel protocol this function translates
+ * them into a matching host byte value for SCSI EH.
+ */
+static inline void scsi_msg_to_host_byte(struct scsi_cmnd *cmd, u8 msg)
+{
+ switch (msg) {
+ case COMMAND_COMPLETE:
+ break;
+ case ABORT_TASK_SET:
+ set_host_byte(cmd, DID_ABORT);
+ break;
+ case TARGET_RESET:
+ set_host_byte(cmd, DID_RESET);
+ break;
+ default:
+ set_host_byte(cmd, DID_ERROR);
+ break;
+ }
}
static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
@@ -334,4 +383,10 @@ static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
return xfer_len;
}
+extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc,
+ u8 key, u8 asc, u8 ascq);
+
+struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
+ blk_mq_req_flags_t flags);
+
#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 731ac09ed231..5b567b43e1b1 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd)
scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
}
+static inline unsigned char
+scsi_command_control(const unsigned char *cmnd)
+{
+ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
+ cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1];
+}
+
/* Returns a human-readable name for the device */
extern const char *scsi_device_type(unsigned type);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index f8312a3e5b42..c36656d8ac6c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -5,10 +5,12 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <scsi/scsi.h>
#include <linux/atomic.h>
+#include <linux/sbitmap.h>
+struct bsg_device;
struct device;
struct request_queue;
struct scsi_cmnd;
@@ -98,6 +100,10 @@ struct scsi_vpd {
unsigned char data[];
};
+enum scsi_vpd_parameters {
+ SCSI_VPD_HEADER_SIZE = 4,
+};
+
struct scsi_device {
struct Scsi_Host *host;
struct request_queue *request_queue;
@@ -106,11 +112,11 @@ struct scsi_device {
struct list_head siblings; /* list of all devices on this host */
struct list_head same_target_siblings; /* just the devices sharing same target id */
- atomic_t device_busy; /* commands actually active on LLDD */
+ struct sbitmap budget_map;
atomic_t device_blocked; /* Device returned QUEUE_FULL. */
+ atomic_t restarts;
spinlock_t list_lock;
- struct list_head cmd_list; /* queue of in use SCSI Command structures */
struct list_head starved_entry;
unsigned short queue_depth; /* How deep of a queue we want */
unsigned short max_queue_depth; /* max queue depth */
@@ -139,13 +145,15 @@ struct scsi_device {
const char * model; /* ... after scan; point to static string */
const char * rev; /* ... "nullnullnullnull" before scan */
-#define SCSI_VPD_PG_LEN 255
struct scsi_vpd __rcu *vpd_pg0;
struct scsi_vpd __rcu *vpd_pg83;
struct scsi_vpd __rcu *vpd_pg80;
struct scsi_vpd __rcu *vpd_pg89;
- unsigned char current_tag; /* current tag */
- struct scsi_target *sdev_target; /* used only for single_lun */
+ struct scsi_vpd __rcu *vpd_pgb0;
+ struct scsi_vpd __rcu *vpd_pgb1;
+ struct scsi_vpd __rcu *vpd_pgb2;
+
+ struct scsi_target *sdev_target;
blist_flags_t sdev_bflags; /* black/white flags as also found in
* scsi_devinfo.[hc]. For now used only to
@@ -204,6 +212,12 @@ struct scsi_device {
unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device
* creation time */
+ unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
+ unsigned silence_suspend:1; /* Do not print runtime PM related messages */
+
+ unsigned int queue_stopped; /* request queue is quiesced */
+ bool offline_already; /* Device offline message logged */
+
atomic_t disk_events_disable_depth; /* disable depth for disk events */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
@@ -217,6 +231,7 @@ struct scsi_device {
atomic_t iorequest_cnt;
atomic_t iodone_cnt;
atomic_t ioerr_cnt;
+ atomic_t iotmo_cnt;
struct device sdev_gendev,
sdev_dev;
@@ -227,11 +242,18 @@ struct scsi_device {
struct scsi_device_handler *handler;
void *handler_data;
+ size_t dma_drain_len;
+ void *dma_drain_buf;
+
+ unsigned int sg_timeout;
+ unsigned int sg_reserved_size;
+
+ struct bsg_device *bsg_dev;
unsigned char access_state;
struct mutex state_mutex;
enum scsi_device_state sdev_state;
struct task_struct *quiesced_by;
- unsigned long sdev_data[0];
+ unsigned long sdev_data[];
} __attribute__((aligned(sizeof(unsigned long))));
#define to_scsi_device(d) \
@@ -258,13 +280,15 @@ sdev_prefix_printk(const char *, const struct scsi_device *, const char *,
__printf(3, 4) void
scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
-#define scmd_dbg(scmd, fmt, a...) \
- do { \
- if ((scmd)->request->rq_disk) \
- sdev_dbg((scmd)->device, "[%s] " fmt, \
- (scmd)->request->rq_disk->disk_name, ##a);\
- else \
- sdev_dbg((scmd)->device, fmt, ##a); \
+#define scmd_dbg(scmd, fmt, a...) \
+ do { \
+ struct request *__rq = scsi_cmd_to_rq((scmd)); \
+ \
+ if (__rq->q->disk) \
+ sdev_dbg((scmd)->device, "[%s] " fmt, \
+ __rq->q->disk->disk_name, ##a); \
+ else \
+ sdev_dbg((scmd)->device, fmt, ##a); \
} while (0)
enum scsi_target_state {
@@ -315,7 +339,7 @@ struct scsi_target {
char scsi_level;
enum scsi_target_state state;
void *hostdata; /* available to low-level driver */
- unsigned long starget_data[0]; /* for the transport */
+ unsigned long starget_data[]; /* for the transport */
/* starget_data must be the last element!!!! */
} __attribute__((aligned(sizeof(unsigned long))));
@@ -400,9 +424,8 @@ extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
int retries, struct scsi_mode_data *data,
struct scsi_sense_hdr *);
extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
- int modepage, unsigned char *buffer, int len,
- int timeout, int retries,
- struct scsi_mode_data *data,
+ unsigned char *buffer, int len, int timeout,
+ int retries, struct scsi_mode_data *data,
struct scsi_sense_hdr *);
extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
int retries, struct scsi_sense_hdr *sshdr);
@@ -435,7 +458,7 @@ extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
- int timeout, int retries, u64 flags,
+ int timeout, int retries, blk_opf_t flags,
req_flags_t rq_flags, int *resid);
/* Make sure any sense buffer is the correct size. */
#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, \
@@ -584,6 +607,11 @@ static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
return 0;
}
+static inline int scsi_device_busy(struct scsi_device *sdev)
+{
+ return sbitmap_weight(&sdev->budget_map);
+}
+
#define MODULE_ALIAS_SCSI_DEVICE(type) \
MODULE_ALIAS("scsi:t-" __stringify(type) "*")
#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 3fdb322d4c4b..5d14adae21c7 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -28,7 +28,8 @@
#define BLIST_LARGELUN ((__force blist_flags_t)(1ULL << 9))
/* override additional length field */
#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1ULL << 10))
-#define __BLIST_UNUSED_11 ((__force blist_flags_t)(1ULL << 11))
+/* ignore MEDIA CHANGE unit attention after resuming from runtime suspend */
+#define BLIST_IGN_MEDIA_CHANGE ((__force blist_flags_t)(1ULL << 11))
/* do not do automatic start on add */
#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1ULL << 12))
#define __BLIST_UNUSED_13 ((__force blist_flags_t)(1ULL << 13))
@@ -73,8 +74,7 @@
#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
(__force blist_flags_t) \
((__force __u64)__BLIST_LAST_USED - 1ULL)))
-#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_11 | \
- __BLIST_UNUSED_13 | \
+#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \
__BLIST_UNUSED_14 | \
__BLIST_UNUSED_15 | \
__BLIST_UNUSED_16 | \
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index 2852e470a8ed..4df943c1b90b 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Header file for SCSI device handler infrastruture.
+ * Header file for SCSI device handler infrastructure.
*
* Modified version of patches posted by Mike Christie <michaelc@cs.wisc.edu>
*
@@ -52,7 +52,8 @@ struct scsi_device_handler {
/* Filled by the hardware handler */
struct module *module;
const char *name;
- int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
+ enum scsi_disposition (*check_sense)(struct scsi_device *,
+ struct scsi_sense_hdr *);
int (*attach)(struct scsi_device *);
void (*detach)(struct scsi_device *);
int (*activate)(struct scsi_device *, activate_complete, void *);
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 6dffa8555a39..4ce1988b2ba0 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -4,11 +4,10 @@
#include <linux/blk_types.h>
#include <linux/device.h>
+#include <scsi/scsi_cmnd.h>
struct module;
struct request;
-struct scsi_cmnd;
-struct scsi_device;
struct scsi_driver {
struct device_driver gendrv;
@@ -31,4 +30,10 @@ extern int scsi_register_interface(struct class_interface *);
#define scsi_unregister_interface(intf) \
class_interface_unregister(intf)
+/* make sure not to use it with passthrough commands */
+static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
+{
+ return to_scsi_driver(cmd->device->sdev_gendev.driver);
+}
+
#endif /* _SCSI_SCSI_DRIVER_H */
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 6bd5ed695a5e..1ae08e81339f 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -17,7 +17,7 @@ extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
extern int scsi_block_when_processing_errors(struct scsi_device *);
extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr);
-extern int scsi_check_sense(struct scsi_cmnd *);
+extern enum scsi_disposition scsi_check_sense(struct scsi_cmnd *);
static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
{
@@ -38,10 +38,8 @@ struct scsi_eh_save {
unsigned underflow;
unsigned char cmd_len;
unsigned char prot_op;
- unsigned char *cmnd;
+ unsigned char cmnd[32];
struct scsi_data_buffer sdb;
- /* new command support */
- unsigned char eh_cmnd[BLK_MAX_CDB];
struct scatterlist sense_sgl;
};
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index f577647bf5f2..fcf25f1642a3 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -16,10 +16,8 @@ struct completion;
struct module;
struct scsi_cmnd;
struct scsi_device;
-struct scsi_host_cmd_pool;
struct scsi_target;
struct Scsi_Host;
-struct scsi_host_cmd_pool;
struct scsi_transport_template;
@@ -30,37 +28,15 @@ struct scsi_transport_template;
#define MODE_TARGET 0x02
struct scsi_host_template {
- struct module *module;
- const char *name;
-
/*
- * The info function will return whatever useful information the
- * developer sees fit. If not provided, then the name field will
- * be used instead.
- *
- * Status: OPTIONAL
+ * Put fields referenced in IO submission path together in
+ * same cacheline
*/
- const char *(* info)(struct Scsi_Host *);
/*
- * Ioctl interface
- *
- * Status: OPTIONAL
- */
- int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
- void __user *arg);
-
-
-#ifdef CONFIG_COMPAT
- /*
- * Compat handler. Handle 32bit ABI.
- * When unknown ioctl is passed return -ENOIOCTLCMD.
- *
- * Status: OPTIONAL
+ * Additional per-command data allocated for the driver.
*/
- int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
- void __user *arg);
-#endif
+ unsigned int cmd_size;
/*
* The queuecommand function is used to queue up a scsi
@@ -108,6 +84,41 @@ struct scsi_host_template {
*/
void (*commit_rqs)(struct Scsi_Host *, u16);
+ struct module *module;
+ const char *name;
+
+ /*
+ * The info function will return whatever useful information the
+ * developer sees fit. If not provided, then the name field will
+ * be used instead.
+ *
+ * Status: OPTIONAL
+ */
+ const char *(*info)(struct Scsi_Host *);
+
+ /*
+ * Ioctl interface
+ *
+ * Status: OPTIONAL
+ */
+ int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
+
+
+#ifdef CONFIG_COMPAT
+ /*
+ * Compat handler. Handle 32bit ABI.
+ * When unknown ioctl is passed return -ENOIOCTLCMD.
+ *
+ * Status: OPTIONAL
+ */
+ int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
+#endif
+
+ int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+ int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+
/*
* This is an error handling strategy routine. You don't need to
* define one of these if you don't want to - there is a default
@@ -265,7 +276,24 @@ struct scsi_host_template {
*
* Status: OPTIONAL
*/
- int (* map_queues)(struct Scsi_Host *shost);
+ void (* map_queues)(struct Scsi_Host *shost);
+
+ /*
+ * SCSI interface of blk_poll - poll for IO completions.
+ * Only applicable if SCSI LLD exposes multiple h/w queues.
+ *
+ * Return value: Number of completed entries found.
+ *
+ * Status: OPTIONAL
+ */
+ int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
+
+ /*
+ * Check if scatterlists need to be padded for DMA draining.
+ *
+ * Status: OPTIONAL
+ */
+ bool (* dma_need_drain)(struct request *rq);
/*
* This function determines the BIOS parameters for a given
@@ -304,6 +332,12 @@ struct scsi_host_template {
* Status: OPTIONAL
*/
enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
+ /*
+ * Optional routine that allows the transport to decide if a cmd
+ * is retryable. Return true if the transport is in a state the
+ * cmd should be retried on.
+ */
+ bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
/* This is an optional routine that allows transport to initiate
* LLD adapter or firmware reset using sysfs attribute.
@@ -409,11 +443,6 @@ struct scsi_host_template {
unsigned supported_mode:2;
/*
- * True if this host adapter uses unchecked DMA onto an ISA bus.
- */
- unsigned unchecked_isa_dma:1;
-
- /*
* True for emulated SCSI host adapters (e.g. ATAPI).
*/
unsigned emulated:1;
@@ -426,8 +455,8 @@ struct scsi_host_template {
/* True if the controller does not support WRITE SAME */
unsigned no_write_same:1;
- /* True if the low-level driver supports blk-mq only */
- unsigned force_blk_mq:1;
+ /* True if the host uses host-wide tagspace */
+ unsigned host_tagset:1;
/*
* Countdown for host blocking with no commands outstanding.
@@ -444,14 +473,9 @@ struct scsi_host_template {
#define SCSI_DEFAULT_HOST_BLOCKED 7
/*
- * Pointer to the sysfs class properties for this host, NULL terminated.
+ * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
*/
- struct device_attribute **shost_attrs;
-
- /*
- * Pointer to the SCSI device properties for this host, NULL terminated.
- */
- struct device_attribute **sdev_attrs;
+ const struct attribute_group **shost_groups;
/*
* Pointer to the SCSI device attribute groups for this host,
@@ -468,12 +492,6 @@ struct scsi_host_template {
*/
u64 vendor_id;
- /*
- * Additional per-command data allocated for the driver.
- */
- unsigned int cmd_size;
- struct scsi_host_cmd_pool *cmd_pool;
-
/* Delay for runtime autosuspend */
int rpm_autosuspend_delay;
};
@@ -490,7 +508,7 @@ struct scsi_host_template {
unsigned long irq_flags; \
int rc; \
spin_lock_irqsave(shost->host_lock, irq_flags); \
- rc = func_name##_lck (cmd, cmd->scsi_done); \
+ rc = func_name##_lck(cmd); \
spin_unlock_irqrestore(shost->host_lock, irq_flags); \
return rc; \
}
@@ -530,6 +548,7 @@ struct Scsi_Host {
struct mutex scan_mutex;/* serialize scanning activity */
+ struct list_head eh_abort_list;
struct list_head eh_cmd_q;
struct task_struct * ehandler; /* Error recovery thread. */
struct completion * eh_action; /* Wait for specific actions on the
@@ -538,6 +557,8 @@ struct Scsi_Host {
struct scsi_host_template *hostt;
struct scsi_transport_template *transportt;
+ struct kref tagset_refcnt;
+ struct completion tagset_freed;
/* Area to keep a shared tag map */
struct blk_mq_tag_set tag_set;
@@ -588,6 +609,7 @@ struct Scsi_Host {
short unsigned int sg_tablesize;
short unsigned int sg_prot_tablesize;
unsigned int max_sectors;
+ unsigned int opt_sectors;
unsigned int max_segment_size;
unsigned long dma_boundary;
unsigned long virt_boundary_mask;
@@ -596,11 +618,12 @@ struct Scsi_Host {
*
* Note: it is assumed that each hardware queue has a queue depth of
* can_queue. In other words, the total queue depth per host
- * is nr_hw_queues * can_queue.
+ * is nr_hw_queues * can_queue. However, for when host_tagset is set,
+ * the total queue depth is can_queue.
*/
unsigned nr_hw_queues;
+ unsigned nr_maps;
unsigned active_mode:2;
- unsigned unchecked_isa_dma:1;
/*
* Host has requested that no further requests come through for the
@@ -627,7 +650,8 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
- unsigned use_cmd_list:1;
+ /* True if the host uses host-wide tagspace */
+ unsigned host_tagset:1;
/* Host responded with short (<36 bytes) INQUIRY result */
unsigned short_inquiry:1;
@@ -685,7 +709,7 @@ struct Scsi_Host {
* and also because some compilers (m68k) don't automatically force
* alignment to a long boundary.
*/
- unsigned long hostdata[0] /* Used for storage of host specific stuff */
+ unsigned long hostdata[] /* Used for storage of host specific stuff */
__attribute__ ((aligned (sizeof(unsigned long))));
};
@@ -735,6 +759,8 @@ extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
extern struct Scsi_Host *scsi_host_lookup(unsigned short);
extern const char *scsi_host_state_name(enum scsi_host_state);
+extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
+ enum scsi_host_status status);
static inline int __must_check scsi_add_host(struct Scsi_Host *host,
struct device *dev)
@@ -759,18 +785,13 @@ static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
extern void scsi_unblock_requests(struct Scsi_Host *);
extern void scsi_block_requests(struct Scsi_Host *);
+extern int scsi_host_block(struct Scsi_Host *shost);
+extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
-struct class_container;
+void scsi_host_busy_iter(struct Scsi_Host *,
+ bool (*fn)(struct scsi_cmnd *, void *), void *priv);
-/*
- * These two functions are used to allocate and free a pseudo device
- * which will connect to the host adapter itself rather than any
- * physical device. You must deallocate when you are done with the
- * thing. This physical pseudo-device isn't real and won't be available
- * from any high-level drivers.
- */
-extern void scsi_free_host_dev(struct scsi_device *);
-extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
+struct class_container;
/*
* DIF defines the exchange of protection information between
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index 4fe69d863b5d..beac64e38b87 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -18,7 +18,9 @@
#ifdef __KERNEL__
+struct gendisk;
struct scsi_device;
+struct sg_io_hdr;
/*
* Structures used for scsi_ioctl et al.
@@ -27,7 +29,7 @@ struct scsi_device;
typedef struct scsi_ioctl_command {
unsigned int inlen;
unsigned int outlen;
- unsigned char data[0];
+ unsigned char data[];
} Scsi_Ioctl_Command;
typedef struct scsi_idlun {
@@ -43,8 +45,11 @@ typedef struct scsi_fctargaddress {
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
int cmd, bool ndelay);
-extern int scsi_ioctl(struct scsi_device *, int, void __user *);
-extern int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
+int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd,
+ void __user *arg);
+int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
+int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
+bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode);
#endif /* __KERNEL__ */
#endif /* _SCSI_IOCTL_H */
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index c36860111932..c03e35fc382c 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -190,39 +190,21 @@ struct scsi_varlen_cdb_hdr {
* SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
* T10/1561-D Revision 4 Draft dated 7th November 2002.
*/
-#define SAM_STAT_GOOD 0x00
-#define SAM_STAT_CHECK_CONDITION 0x02
-#define SAM_STAT_CONDITION_MET 0x04
-#define SAM_STAT_BUSY 0x08
-#define SAM_STAT_INTERMEDIATE 0x10
-#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
-#define SAM_STAT_TASK_SET_FULL 0x28
-#define SAM_STAT_ACA_ACTIVE 0x30
-#define SAM_STAT_TASK_ABORTED 0x40
-
-/*
- * Status codes. These are deprecated as they are shifted 1 bit right
- * from those found in the SCSI standards. This causes confusion for
- * applications that are ported to several OSes. Prefer SAM Status codes
- * above.
- */
-
-#define GOOD 0x00
-#define CHECK_CONDITION 0x01
-#define CONDITION_GOOD 0x02
-#define BUSY 0x04
-#define INTERMEDIATE_GOOD 0x08
-#define INTERMEDIATE_C_GOOD 0x0a
-#define RESERVATION_CONFLICT 0x0c
-#define COMMAND_TERMINATED 0x11
-#define QUEUE_FULL 0x14
-#define ACA_ACTIVE 0x18
-#define TASK_ABORTED 0x20
-
-#define STATUS_MASK 0xfe
+enum sam_status {
+ SAM_STAT_GOOD = 0x00,
+ SAM_STAT_CHECK_CONDITION = 0x02,
+ SAM_STAT_CONDITION_MET = 0x04,
+ SAM_STAT_BUSY = 0x08,
+ SAM_STAT_INTERMEDIATE = 0x10,
+ SAM_STAT_INTERMEDIATE_CONDITION_MET = 0x14,
+ SAM_STAT_RESERVATION_CONFLICT = 0x18,
+ SAM_STAT_COMMAND_TERMINATED = 0x22, /* obsolete in SAM-3 */
+ SAM_STAT_TASK_SET_FULL = 0x28,
+ SAM_STAT_ACA_ACTIVE = 0x30,
+ SAM_STAT_TASK_ABORTED = 0x40,
+};
+#define STATUS_MASK 0xfe
/*
* SENSE KEYS
*/
@@ -325,7 +307,9 @@ enum zbc_zone_type {
ZBC_ZONE_TYPE_CONV = 0x1,
ZBC_ZONE_TYPE_SEQWRITE_REQ = 0x2,
ZBC_ZONE_TYPE_SEQWRITE_PREF = 0x3,
- /* 0x4 to 0xf are reserved */
+ ZBC_ZONE_TYPE_SEQ_OR_BEFORE_REQ = 0x4,
+ ZBC_ZONE_TYPE_GAP = 0x5,
+ /* 0x6 to 0xf are reserved */
};
/* Zone conditions of REPORT ZONES zone descriptors */
@@ -341,4 +325,21 @@ enum zbc_zone_cond {
ZBC_ZONE_COND_OFFLINE = 0xf,
};
+enum zbc_zone_alignment_method {
+ ZBC_CONSTANT_ZONE_LENGTH = 0x1,
+ ZBC_CONSTANT_ZONE_START_OFFSET = 0x8,
+};
+
+/* Version descriptor values for INQUIRY */
+enum scsi_version_descriptor {
+ SCSI_VERSION_DESCRIPTOR_FCP4 = 0x0a40,
+ SCSI_VERSION_DESCRIPTOR_ISCSI = 0x0960,
+ SCSI_VERSION_DESCRIPTOR_SAM5 = 0x00a0,
+ SCSI_VERSION_DESCRIPTOR_SAS3 = 0x0c60,
+ SCSI_VERSION_DESCRIPTOR_SBC3 = 0x04c0,
+ SCSI_VERSION_DESCRIPTOR_SBP3 = 0x0980,
+ SCSI_VERSION_DESCRIPTOR_SPC4 = 0x0460,
+ SCSI_VERSION_DESCRIPTOR_SRP = 0x0940
+};
+
#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h
deleted file mode 100644
index b06f28c74908..000000000000
--- a/include/scsi/scsi_request.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SCSI_SCSI_REQUEST_H
-#define _SCSI_SCSI_REQUEST_H
-
-#include <linux/blk-mq.h>
-
-#define BLK_MAX_CDB 16
-
-struct scsi_request {
- unsigned char __cmd[BLK_MAX_CDB];
- unsigned char *cmd;
- unsigned short cmd_len;
- int result;
- unsigned int sense_len;
- unsigned int resid_len; /* residual count */
- int retries;
- void *sense;
-};
-
-static inline struct scsi_request *scsi_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline void scsi_req_free_cmd(struct scsi_request *req)
-{
- if (req->cmd != req->__cmd)
- kfree(req->cmd);
-}
-
-void scsi_req_init(struct scsi_request *req);
-
-#endif /* _SCSI_SCSI_REQUEST_H */
diff --git a/include/scsi/scsi_status.h b/include/scsi/scsi_status.h
new file mode 100644
index 000000000000..9cb85262de64
--- /dev/null
+++ b/include/scsi/scsi_status.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _SCSI_SCSI_STATUS_H
+#define _SCSI_SCSI_STATUS_H
+
+#include <linux/types.h>
+#include <scsi/scsi_proto.h>
+
+/* Message codes. */
+enum scsi_msg_byte {
+ COMMAND_COMPLETE = 0x00,
+ EXTENDED_MESSAGE = 0x01,
+ SAVE_POINTERS = 0x02,
+ RESTORE_POINTERS = 0x03,
+ DISCONNECT = 0x04,
+ INITIATOR_ERROR = 0x05,
+ ABORT_TASK_SET = 0x06,
+ MESSAGE_REJECT = 0x07,
+ NOP = 0x08,
+ MSG_PARITY_ERROR = 0x09,
+ LINKED_CMD_COMPLETE = 0x0a,
+ LINKED_FLG_CMD_COMPLETE = 0x0b,
+ TARGET_RESET = 0x0c,
+ ABORT_TASK = 0x0d,
+ CLEAR_TASK_SET = 0x0e,
+ INITIATE_RECOVERY = 0x0f, /* SCSI-II only */
+ RELEASE_RECOVERY = 0x10, /* SCSI-II only */
+ TERMINATE_IO_PROC = 0x11, /* SCSI-II only */
+ CLEAR_ACA = 0x16,
+ LOGICAL_UNIT_RESET = 0x17,
+ SIMPLE_QUEUE_TAG = 0x20,
+ HEAD_OF_QUEUE_TAG = 0x21,
+ ORDERED_QUEUE_TAG = 0x22,
+ IGNORE_WIDE_RESIDUE = 0x23,
+ ACA = 0x24,
+ QAS_REQUEST = 0x55,
+
+ /* Old SCSI2 names, don't use in new code */
+ BUS_DEVICE_RESET = TARGET_RESET,
+ ABORT = ABORT_TASK_SET,
+};
+
+/* Host byte codes. */
+enum scsi_host_status {
+ DID_OK = 0x00, /* NO error */
+ DID_NO_CONNECT = 0x01, /* Couldn't connect before timeout period */
+ DID_BUS_BUSY = 0x02, /* BUS stayed busy through time out period */
+ DID_TIME_OUT = 0x03, /* TIMED OUT for other reason */
+ DID_BAD_TARGET = 0x04, /* BAD target. */
+ DID_ABORT = 0x05, /* Told to abort for some other reason */
+ DID_PARITY = 0x06, /* Parity error */
+ DID_ERROR = 0x07, /* Internal error */
+ DID_RESET = 0x08, /* Reset by somebody. */
+ DID_BAD_INTR = 0x09, /* Got an interrupt we weren't expecting. */
+ DID_PASSTHROUGH = 0x0a, /* Force command past mid-layer */
+ DID_SOFT_ERROR = 0x0b, /* The low level driver just wish a retry */
+ DID_IMM_RETRY = 0x0c, /* Retry without decrementing retry count */
+ DID_REQUEUE = 0x0d, /* Requeue command (no immediate retry) also
+ * without decrementing the retry count */
+ DID_TRANSPORT_DISRUPTED = 0x0e, /* Transport error disrupted execution
+ * and the driver blocked the port to
+ * recover the link. Transport class will
+ * retry or fail IO */
+ DID_TRANSPORT_FAILFAST = 0x0f, /* Transport class fastfailed the io */
+ /*
+ * We used to have DID_TARGET_FAILURE, DID_NEXUS_FAILURE,
+ * DID_ALLOC_FAILURE and DID_MEDIUM_ERROR at 0x10 - 0x13. For compat
+ * with userspace apps that parse the host byte for SG IO, we leave
+ * that block of codes unused and start at 0x14 below.
+ */
+ DID_TRANSPORT_MARGINAL = 0x14, /* Transport marginal errors */
+};
+
+#endif /* _SCSI_SCSI_STATUS_H */
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index 6053d46e794e..ea7848e74d25 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -34,7 +34,7 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
blk_mq_unique_tag_to_tag(tag));
}
- if (!req)
+ if (!req || !blk_mq_request_started(req))
return NULL;
return blk_mq_rq_to_pdu(req);
}
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 7db2dd783834..e80a7c542c88 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -67,6 +67,7 @@ enum fc_port_state {
FC_PORTSTATE_ERROR,
FC_PORTSTATE_LOOPBACK,
FC_PORTSTATE_DELETED,
+ FC_PORTSTATE_MARGINAL,
};
@@ -124,6 +125,7 @@ enum fc_vport_state {
#define FC_PORTSPEED_25GBIT 0x800
#define FC_PORTSPEED_64GBIT 0x1000
#define FC_PORTSPEED_128GBIT 0x2000
+#define FC_PORTSPEED_256GBIT 0x4000
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
@@ -284,6 +286,36 @@ struct fc_rport_identifiers {
u32 roles;
};
+/*
+ * Fabric Performance Impact Notification Statistics
+ */
+struct fc_fpin_stats {
+ /* Delivery */
+ u64 dn;
+ u64 dn_unknown;
+ u64 dn_timeout;
+ u64 dn_unable_to_route;
+ u64 dn_device_specific;
+
+ /* Link Integrity */
+ u64 li;
+ u64 li_failure_unknown;
+ u64 li_link_failure_count;
+ u64 li_loss_of_sync_count;
+ u64 li_loss_of_signals_count;
+ u64 li_prim_seq_err_count;
+ u64 li_invalid_tx_word_count;
+ u64 li_invalid_crc_count;
+ u64 li_device_specific;
+
+ /* Congestion/Peer Congestion */
+ u64 cn;
+ u64 cn_clear;
+ u64 cn_lost_credit;
+ u64 cn_credit_stall;
+ u64 cn_oversubscription;
+ u64 cn_device_specific;
+};
/* Macro for use in defining Remote Port attributes */
#define FC_RPORT_ATTR(_name,_mode,_show,_store) \
@@ -325,6 +357,7 @@ struct fc_rport { /* aka fc_starget_attrs */
/* Dynamic Attributes */
u32 dev_loss_tmo; /* Remote Port loss timeout in seconds. */
+ struct fc_fpin_stats fpin_stats;
/* Private (Transport-managed) Attributes */
u64 node_name;
@@ -436,6 +469,9 @@ struct fc_host_statistics {
u64 fc_seq_not_found; /* seq is not found for exchange */
u64 fc_non_bls_resp; /* a non BLS response frame with
a sequence responder in new exch */
+ /* Host Congestion Signals */
+ u64 cn_sig_warn;
+ u64 cn_sig_alarm;
};
@@ -481,10 +517,11 @@ enum fc_host_event_code {
* managed by the transport w/o driver interaction.
*/
+#define FC_VENDOR_IDENTIFIER 8
#define FC_FC4_LIST_SIZE 32
#define FC_SYMBOLIC_NAME_SIZE 256
#define FC_VERSION_STRING_SIZE 64
-#define FC_SERIAL_NUMBER_SIZE 80
+#define FC_SERIAL_NUMBER_SIZE 64
struct fc_host_attrs {
/* Fixed Attributes */
@@ -496,6 +533,10 @@ struct fc_host_attrs {
u32 supported_speeds;
u32 maxframe_size;
u16 max_npiv_vports;
+ u32 max_ct_payload;
+ u32 num_ports;
+ u32 num_discovered_ports;
+ u32 bootbios_state;
char serial_number[FC_SERIAL_NUMBER_SIZE];
char manufacturer[FC_SERIAL_NUMBER_SIZE];
char model[FC_SYMBOLIC_NAME_SIZE];
@@ -504,6 +545,9 @@ struct fc_host_attrs {
char driver_version[FC_VERSION_STRING_SIZE];
char firmware_version[FC_VERSION_STRING_SIZE];
char optionrom_version[FC_VERSION_STRING_SIZE];
+ char vendor_identifier[FC_VENDOR_IDENTIFIER];
+ char bootbios_version[FC_SYMBOLIC_NAME_SIZE];
+
/* Dynamic Attributes */
u32 port_id;
@@ -515,6 +559,7 @@ struct fc_host_attrs {
char symbolic_name[FC_SYMBOLIC_NAME_SIZE];
char system_hostname[FC_SYMBOLIC_NAME_SIZE];
u32 dev_loss_tmo;
+ struct fc_fpin_stats fpin_stats;
/* Private (Transport-managed) Attributes */
enum fc_tgtid_binding_type tgtid_bind_type;
@@ -536,6 +581,9 @@ struct fc_host_attrs {
/* bsg support */
struct request_queue *rqst_q;
+
+ /* FDMI support version*/
+ u8 fdmi_version;
};
#define shost_to_fc_host(x) \
@@ -615,6 +663,18 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \
(((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
+#define fc_host_max_ct_payload(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->max_ct_payload)
+#define fc_host_vendor_identifier(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->vendor_identifier)
+#define fc_host_num_discovered_ports(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->num_discovered_ports)
+#define fc_host_num_ports(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->num_ports)
+#define fc_host_bootbios_version(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->bootbios_version)
+#define fc_host_bootbios_state(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->bootbios_state)
/* The functions by which the transport class and the driver communicate */
struct fc_function_template {
@@ -706,7 +766,6 @@ struct fc_function_template {
unsigned long disable_target_scan:1;
};
-
/**
* fc_remote_port_chkready - called to validate the remote port state
* prior to initiating io to the port.
@@ -722,6 +781,7 @@ fc_remote_port_chkready(struct fc_rport *rport)
switch (rport->port_state) {
case FC_PORTSTATE_ONLINE:
+ case FC_PORTSTATE_MARGINAL:
if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
result = 0;
else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
@@ -786,6 +846,7 @@ void fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
enum fc_host_event_code event_code, u32 event_data);
void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
u32 data_len, char *data_buf, u64 vendor_id);
+struct fc_rport *fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn);
void fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
enum fc_host_event_code event_code,
u32 data_len, char *data_buf, u64 vendor_id);
@@ -802,6 +863,7 @@ int fc_vport_terminate(struct fc_vport *vport);
int fc_block_rport(struct fc_rport *rport);
int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
enum blk_eh_timer_return fc_eh_timed_out(struct scsi_cmnd *scmd);
+bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd);
static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job)
{
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 325ae731d9ad..cab52b0f11d0 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -57,7 +57,7 @@ struct iscsi_bus_flash_conn;
* When not offloading the data path, this is called
* from the scsi work queue without the session lock.
* @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
- * the number of bytes transferred on success, and -Exyz
+ * number of bytes transferred on success, and -Exyz
* value on error. When offloading the data path, this
* is called from queuecommand with the session lock, or
* from the iscsi_conn_send_pdu context with the session
@@ -82,6 +82,7 @@ struct iscsi_transport {
void (*destroy_session) (struct iscsi_cls_session *session);
struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
uint32_t cid);
+ void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active);
int (*bind_conn) (struct iscsi_cls_session *session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_eph, int is_leading);
@@ -161,7 +162,7 @@ struct iscsi_transport {
* transport registration upcalls
*/
extern struct scsi_transport_template *iscsi_register_transport(struct iscsi_transport *tt);
-extern int iscsi_unregister_transport(struct iscsi_transport *tt);
+extern void iscsi_unregister_transport(struct iscsi_transport *tt);
/*
* control plane upcalls
@@ -188,15 +189,35 @@ extern void iscsi_ping_comp_event(uint32_t host_no,
uint32_t status, uint32_t pid,
uint32_t data_size, uint8_t *data);
+/* iscsi class connection state */
+enum iscsi_connection_state {
+ ISCSI_CONN_UP = 0,
+ ISCSI_CONN_DOWN,
+ ISCSI_CONN_FAILED,
+ ISCSI_CONN_BOUND,
+};
+
+#define ISCSI_CLS_CONN_BIT_CLEANUP 1
+
struct iscsi_cls_conn {
struct list_head conn_list; /* item in connlist */
void *dd_data; /* LLD private data */
struct iscsi_transport *transport;
uint32_t cid; /* connection id */
+ /*
+ * This protects the conn startup and binding/unbinding of the ep to
+ * the conn. Unbinding includes ep_disconnect and stop_conn.
+ */
struct mutex ep_mutex;
struct iscsi_endpoint *ep;
+ /* Used when accessing flags and queueing work. */
+ spinlock_t lock;
+ unsigned long flags;
+ struct work_struct cleanup_work;
+
struct device dev; /* sysfs transport/container device */
+ enum iscsi_connection_state state;
};
#define iscsi_dev_to_conn(_dev) \
@@ -225,12 +246,15 @@ struct iscsi_cls_session {
struct work_struct unblock_work;
struct work_struct scan_work;
struct work_struct unbind_work;
+ struct work_struct destroy_work;
/* recovery fields */
int recovery_tmo;
bool recovery_tmo_sysfs_override;
struct delayed_work recovery_work;
+ struct workqueue_struct *workq;
+
unsigned int target_id;
bool ida_used;
@@ -258,7 +282,6 @@ struct iscsi_cls_session {
iscsi_dev_to_session(_stgt->dev.parent)
struct iscsi_cls_host {
- atomic_t nr_scans;
struct mutex mutex;
struct request_queue *bsg_q;
uint32_t port_speed;
@@ -274,7 +297,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
struct iscsi_endpoint {
void *dd_data; /* LLD private data */
struct device dev;
- uint64_t id;
+ int id;
struct iscsi_cls_conn *conn;
};
@@ -419,17 +442,21 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
struct iscsi_transport *t,
int dd_size,
unsigned int target_id);
+extern void iscsi_force_destroy_session(struct iscsi_cls_session *session);
extern void iscsi_remove_session(struct iscsi_cls_session *session);
extern void iscsi_free_session(struct iscsi_cls_session *session);
-extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+extern struct iscsi_cls_conn *iscsi_alloc_conn(struct iscsi_cls_session *sess,
int dd_size, uint32_t cid);
-extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+extern int iscsi_add_conn(struct iscsi_cls_conn *conn);
+extern void iscsi_remove_conn(struct iscsi_cls_conn *conn);
+extern void iscsi_put_conn(struct iscsi_cls_conn *conn);
+extern void iscsi_get_conn(struct iscsi_cls_conn *conn);
extern void iscsi_unblock_session(struct iscsi_cls_session *session);
extern void iscsi_block_session(struct iscsi_cls_session *session);
-extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+extern void iscsi_put_endpoint(struct iscsi_endpoint *ep);
extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd);
extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
struct iscsi_transport *t,
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 05ec927a3c72..0e75b9277c8c 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -41,6 +41,7 @@ enum sas_linkrate {
SAS_LINK_RATE_G2 = SAS_LINK_RATE_3_0_GBPS,
SAS_LINK_RATE_6_0_GBPS = 10,
SAS_LINK_RATE_12_0_GBPS = 11,
+ SAS_LINK_RATE_22_5_GBPS = 12,
/* These are virtual to the transport class and may never
* be signalled normally since the standard defined field
* is only 4 bits */
diff --git a/include/scsi/scsicam.h b/include/scsi/scsicam.h
index 57c729254569..08edd603e521 100644
--- a/include/scsi/scsicam.h
+++ b/include/scsi/scsicam.h
@@ -13,8 +13,7 @@
#ifndef SCSICAM_H
#define SCSICAM_H
-extern int scsicam_bios_param (struct block_device *bdev, sector_t capacity, int *ip);
-extern int scsi_partsize(unsigned char *buf, unsigned long capacity,
- unsigned int *cyls, unsigned int *hds, unsigned int *secs);
-extern unsigned char *scsi_bios_ptable(struct block_device *bdev);
+int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip);
+bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3]);
+unsigned char *scsi_bios_ptable(struct block_device *bdev);
#endif /* def SCSICAM_H */
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 29c7ad04d2e2..068e35d36557 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -24,15 +24,11 @@
* http://sg.danny.cz/sg [alternatively check the MAINTAINERS file]
* The documentation for the sg version 3 driver can be found at:
* http://sg.danny.cz/sg/p/sg_v3_ho.html
- * Also see: <kernel_source>/Documentation/scsi/scsi-generic.txt
+ * Also see: <kernel_source>/Documentation/scsi/scsi-generic.rst
*
* For utility and test programs see: http://sg.danny.cz/sg/sg3_utils.html
*/
-#ifdef __KERNEL__
-extern int sg_big_buff; /* for sysctl */
-#endif
-
typedef struct sg_iovec /* same structure as used by readv() Linux system */
{ /* call. It defines one scatter-gather element. */
@@ -131,6 +127,39 @@ struct compat_sg_io_hdr {
#define SG_INFO_DIRECT_IO 0x2 /* direct IO requested and performed */
#define SG_INFO_MIXED_IO 0x4 /* part direct, part indirect IO */
+/*
+ * Obsolete DRIVER_SENSE driver byte
+ *
+ * Originally the SCSI midlayer would set the DRIVER_SENSE driver byte when
+ * a sense code was generated and a sense buffer was allocated.
+ * However, as nowadays every scsi command has a sense code allocated this
+ * distinction became moot as one could check the sense buffer directly.
+ * Consequently this byte is not set anymore from the midlayer, but SG will
+ * keep setting this byte to be compatible with previous releases.
+ */
+#define DRIVER_SENSE 0x08
+/* Obsolete driver_byte() declaration */
+#define driver_byte(result) (((result) >> 24) & 0xff)
+
+/*
+ * Original linux SCSI Status codes. They are shifted 1 bit right
+ * from those found in the SCSI standards.
+ */
+
+#define GOOD 0x00
+#define CHECK_CONDITION 0x01
+#define CONDITION_GOOD 0x02
+#define BUSY 0x04
+#define INTERMEDIATE_GOOD 0x08
+#define INTERMEDIATE_C_GOOD 0x0a
+#define RESERVATION_CONFLICT 0x0c
+#define COMMAND_TERMINATED 0x11
+#define QUEUE_FULL 0x14
+#define ACA_ACTIVE 0x18
+#define TASK_ABORTED 0x20
+
+/* Obsolete status_byte() declaration */
+#define status_byte(result) (((result) >> 1) & 0x7f)
typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */
int host_no; /* as in "scsi<n>" where 'n' is one of 0, 1, 2 etc */
@@ -145,7 +174,7 @@ typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */
typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
char req_state; /* 0 -> not used, 1 -> written, 2 -> ready to read */
- char orphan; /* 0 -> normal request, 1 -> from interruped SG_IO */
+ char orphan; /* 0 -> normal request, 1 -> from interrupted SG_IO */
char sg_io_owned; /* 0 -> complete with read(), 1 -> owned by SG_IO */
char problem; /* 0 -> no problem detected, 1 -> error to report */
int pack_id; /* pack_id associated with request */
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 9220758d5087..dfe0984b58a9 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -107,10 +107,10 @@ struct srp_direct_buf {
* having the 20-byte structure padded to 24 bytes on 64-bit architectures.
*/
struct srp_indirect_buf {
- struct srp_direct_buf table_desc;
+ struct srp_direct_buf table_desc __packed __aligned(4);
__be32 len;
- struct srp_direct_buf desc_list[0];
-} __attribute__((packed));
+ struct srp_direct_buf desc_list[] __packed __aligned(4);
+};
/* Immediate data buffer descriptor as defined in SRP2. */
struct srp_imm_buf {
@@ -175,13 +175,13 @@ struct srp_login_rsp {
u8 opcode;
u8 reserved1[3];
__be32 req_lim_delta;
- u64 tag;
+ u64 tag __packed __aligned(4);
__be32 max_it_iu_len;
__be32 max_ti_iu_len;
__be16 buf_fmt;
u8 rsp_flags;
u8 reserved2[25];
-} __attribute__((packed));
+};
struct srp_login_rej {
u8 opcode;
@@ -207,10 +207,6 @@ struct srp_t_logout {
u64 tag;
};
-/*
- * We need the packed attribute because the SRP spec only aligns the
- * 8-byte LUN field to 4 bytes.
- */
struct srp_tsk_mgmt {
u8 opcode;
u8 sol_not;
@@ -225,10 +221,6 @@ struct srp_tsk_mgmt {
u8 reserved5[8];
};
-/*
- * We need the packed attribute because the SRP spec only aligns the
- * 8-byte LUN field to 4 bytes.
- */
struct srp_cmd {
u8 opcode;
u8 sol_not;
@@ -244,7 +236,7 @@ struct srp_cmd {
u8 reserved4;
u8 add_cdb_len;
u8 cdb[16];
- u8 add_data[0];
+ u8 add_data[];
};
enum {
@@ -266,7 +258,7 @@ struct srp_rsp {
u8 sol_not;
u8 reserved1[2];
__be32 req_lim_delta;
- u64 tag;
+ u64 tag __packed __aligned(4);
u8 reserved2[2];
u8 flags;
u8 status;
@@ -274,8 +266,8 @@ struct srp_rsp {
__be32 data_in_res_cnt;
__be32 sense_data_len;
__be32 resp_data_len;
- u8 data[0];
-} __attribute__((packed));
+ u8 data[];
+};
struct srp_cred_req {
u8 opcode;
@@ -301,13 +293,13 @@ struct srp_aer_req {
u8 sol_not;
u8 reserved[2];
__be32 req_lim_delta;
- u64 tag;
+ u64 tag __packed __aligned(4);
u32 reserved2;
struct scsi_lun lun;
__be32 sense_data_len;
u32 reserved3;
- u8 sense_data[0];
-} __attribute__((packed));
+ u8 sense_data[];
+};
struct srp_aer_rsp {
u8 opcode;
diff --git a/include/scsi/viosrp.h b/include/scsi/viosrp.h
index c978133c83e3..6c5559d2b285 100644
--- a/include/scsi/viosrp.h
+++ b/include/scsi/viosrp.h
@@ -70,12 +70,17 @@ enum viosrp_crq_status {
};
struct viosrp_crq {
- u8 valid; /* used by RPA */
- u8 format; /* SCSI vs out-of-band */
- u8 reserved;
- u8 status; /* non-scsi failure? (e.g. DMA failure) */
- __be16 timeout; /* in seconds */
- __be16 IU_length; /* in bytes */
+ union {
+ __be64 high; /* High 64 bits */
+ struct {
+ u8 valid; /* used by RPA */
+ u8 format; /* SCSI vs out-of-band */
+ u8 reserved;
+ u8 status; /* non-scsi failure? (e.g. DMA failure) */
+ __be16 timeout; /* in seconds */
+ __be16 IU_length; /* in bytes */
+ };
+ };
__be64 IU_data_ptr; /* the TCE for transferring data */
};
diff --git a/include/soc/arc/aux.h b/include/soc/arc/aux.h
index e223c4ffa153..9c2eff6140b6 100644
--- a/include/soc/arc/aux.h
+++ b/include/soc/arc/aux.h
@@ -22,7 +22,7 @@ static inline int read_aux_reg(u32 r)
/*
* function helps elide unused variable warning
- * see: http://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html
+ * see: https://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html
*/
static inline void write_aux_reg(u32 r, u32 v)
{
diff --git a/include/soc/arc/timers.h b/include/soc/arc/timers.h
index 7ecde3b159c8..ae99d3e855f1 100644
--- a/include/soc/arc/timers.h
+++ b/include/soc/arc/timers.h
@@ -17,8 +17,8 @@
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
/* CTRL reg bits */
-#define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
-#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
+#define ARC_TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
+#define ARC_TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
#define ARC_TIMERN_MAX 0xFFFFFFFF
diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h
index c3c7200ce151..1d7071dc0bca 100644
--- a/include/soc/at91/atmel_tcb.h
+++ b/include/soc/at91/atmel_tcb.h
@@ -36,9 +36,14 @@ struct clk;
/**
* struct atmel_tcb_config - SoC data for a Timer/Counter Block
* @counter_width: size in bits of a timer counter register
+ * @has_gclk: boolean indicating if a timer counter has a generic clock
+ * @has_qdec: boolean indicating if a timer counter has a quadrature
+ * decoder.
*/
struct atmel_tcb_config {
size_t counter_width;
+ bool has_gclk;
+ bool has_qdec;
};
/**
diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h
new file mode 100644
index 000000000000..6ce3bd22f6c6
--- /dev/null
+++ b/include/soc/at91/sama7-ddr.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Microchip SAMA7 UDDR Controller and DDR3 PHY Controller registers offsets
+ * and bit definitions.
+ *
+ * Copyright (C) [2020] Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#ifndef __SAMA7_DDR_H__
+#define __SAMA7_DDR_H__
+
+/* DDR3PHY */
+#define DDR3PHY_PIR (0x04) /* DDR3PHY PHY Initialization Register */
+#define DDR3PHY_PIR_DLLBYP (1 << 17) /* DLL Bypass */
+#define DDR3PHY_PIR_ITMSRST (1 << 4) /* Interface Timing Module Soft Reset */
+#define DDR3PHY_PIR_DLLLOCK (1 << 2) /* DLL Lock */
+#define DDR3PHY_PIR_DLLSRST (1 << 1) /* DLL Soft Rest */
+#define DDR3PHY_PIR_INIT (1 << 0) /* Initialization Trigger */
+
+#define DDR3PHY_PGCR (0x08) /* DDR3PHY PHY General Configuration Register */
+#define DDR3PHY_PGCR_CKDV1 (1 << 13) /* CK# Disable Value */
+#define DDR3PHY_PGCR_CKDV0 (1 << 12) /* CK Disable Value */
+
+#define DDR3PHY_PGSR (0x0C) /* DDR3PHY PHY General Status Register */
+#define DDR3PHY_PGSR_IDONE (1 << 0) /* Initialization Done */
+
+#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */
+#define DDR3PHY_ACIOCR_CSPDD_CS0 (1 << 18) /* CS#[0] Power Down Driver */
+#define DDR3PHY_ACIOCR_CKPDD_CK0 (1 << 8) /* CK[0] Power Down Driver */
+#define DDR3PHY_ACIORC_ACPDD (1 << 3) /* AC Power Down Driver */
+
+#define DDR3PHY_DXCCR (0x28) /* DDR3PHY DATX8 Common Configuration Register */
+#define DDR3PHY_DXCCR_DXPDR (1 << 3) /* Data Power Down Receiver */
+
+#define DDR3PHY_DSGCR (0x2C) /* DDR3PHY DDR System General Configuration Register */
+#define DDR3PHY_DSGCR_ODTPDD_ODT0 (1 << 20) /* ODT[0] Power Down Driver */
+
+#define DDR3PHY_ZQ0SR0 (0x188) /* ZQ status register 0 */
+#define DDR3PHY_ZQ0SR0_PDO_OFF (0) /* Pull-down output impedance select offset */
+#define DDR3PHY_ZQ0SR0_PUO_OFF (5) /* Pull-up output impedance select offset */
+#define DDR3PHY_ZQ0SR0_PDODT_OFF (10) /* Pull-down on-die termination impedance select offset */
+#define DDR3PHY_ZQ0SRO_PUODT_OFF (15) /* Pull-up on-die termination impedance select offset */
+
+#define DDR3PHY_DX0DLLCR (0x1CC) /* DDR3PHY DATX8 DLL Control Register */
+#define DDR3PHY_DX1DLLCR (0x20C) /* DDR3PHY DATX8 DLL Control Register */
+#define DDR3PHY_DXDLLCR_DLLDIS (1 << 31) /* DLL Disable */
+
+/* UDDRC */
+#define UDDRC_STAT (0x04) /* UDDRC Operating Mode Status Register */
+#define UDDRC_STAT_SELFREF_TYPE_DIS (0x0 << 4) /* SDRAM is not in Self-refresh */
+#define UDDRC_STAT_SELFREF_TYPE_PHY (0x1 << 4) /* SDRAM is in Self-refresh, which was caused by PHY Master Request */
+#define UDDRC_STAT_SELFREF_TYPE_SW (0x2 << 4) /* SDRAM is in Self-refresh, which was not caused solely under Automatic Self-refresh control */
+#define UDDRC_STAT_SELFREF_TYPE_AUTO (0x3 << 4) /* SDRAM is in Self-refresh, which was caused by Automatic Self-refresh only */
+#define UDDRC_STAT_SELFREF_TYPE_MSK (0x3 << 4) /* Self-refresh type mask */
+#define UDDRC_STAT_OPMODE_INIT (0x0 << 0) /* Init */
+#define UDDRC_STAT_OPMODE_NORMAL (0x1 << 0) /* Normal */
+#define UDDRC_STAT_OPMODE_PWRDOWN (0x2 << 0) /* Power-down */
+#define UDDRC_STAT_OPMODE_SELF_REFRESH (0x3 << 0) /* Self-refresh */
+#define UDDRC_STAT_OPMODE_MSK (0x7 << 0) /* Operating mode mask */
+
+#define UDDRC_PWRCTL (0x30) /* UDDRC Low Power Control Register */
+#define UDDRC_PWRCTL_SELFREF_EN (1 << 0) /* Automatic self-refresh */
+#define UDDRC_PWRCTL_SELFREF_SW (1 << 5) /* Software self-refresh */
+
+#define UDDRC_DFIMISC (0x1B0) /* UDDRC DFI Miscellaneous Control Register */
+#define UDDRC_DFIMISC_DFI_INIT_COMPLETE_EN (1 << 0) /* PHY initialization complete enable signal */
+
+#define UDDRC_SWCTRL (0x320) /* UDDRC Software Register Programming Control Enable */
+#define UDDRC_SWCTRL_SW_DONE (1 << 0) /* Enable quasi-dynamic register programming outside reset */
+
+#define UDDRC_SWSTAT (0x324) /* UDDRC Software Register Programming Control Status */
+#define UDDRC_SWSTAT_SW_DONE_ACK (1 << 0) /* Register programming done */
+
+#define UDDRC_PSTAT (0x3FC) /* UDDRC Port Status Register */
+#define UDDRC_PSTAT_ALL_PORTS (0x1F001F) /* Read + writes outstanding transactions on all ports */
+
+#define UDDRC_PCTRL_0 (0x490) /* UDDRC Port 0 Control Register */
+#define UDDRC_PCTRL_1 (0x540) /* UDDRC Port 1 Control Register */
+#define UDDRC_PCTRL_2 (0x5F0) /* UDDRC Port 2 Control Register */
+#define UDDRC_PCTRL_3 (0x6A0) /* UDDRC Port 3 Control Register */
+#define UDDRC_PCTRL_4 (0x750) /* UDDRC Port 4 Control Register */
+
+#endif /* __SAMA7_DDR_H__ */
diff --git a/include/soc/at91/sama7-sfrbu.h b/include/soc/at91/sama7-sfrbu.h
new file mode 100644
index 000000000000..76b740810d34
--- /dev/null
+++ b/include/soc/at91/sama7-sfrbu.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Microchip SAMA7 SFRBU registers offsets and bit definitions.
+ *
+ * Copyright (C) [2020] Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#ifndef __SAMA7_SFRBU_H__
+#define __SAMA7_SFRBU_H__
+
+#ifdef CONFIG_SOC_SAMA7
+
+#define AT91_SFRBU_PSWBU (0x00) /* SFRBU Power Switch BU Control Register */
+#define AT91_SFRBU_PSWBU_PSWKEY (0x4BD20C << 8) /* Specific value mandatory to allow writing of other register bits */
+#define AT91_SFRBU_PSWBU_STATE (1 << 2) /* Power switch BU state */
+#define AT91_SFRBU_PSWBU_SOFTSWITCH (1 << 1) /* Power switch BU source selection */
+#define AT91_SFRBU_PSWBU_CTRL (1 << 0) /* Power switch BU control */
+
+#define AT91_SFRBU_25LDOCR (0x0C) /* SFRBU 2.5V LDO Control Register */
+#define AT91_SFRBU_25LDOCR_LDOANAKEY (0x3B6E18 << 8) /* Specific value mandatory to allow writing of other register bits. */
+#define AT91_SFRBU_25LDOCR_STATE (1 << 3) /* LDOANA Switch On/Off Control */
+#define AT91_SFRBU_25LDOCR_LP (1 << 2) /* LDOANA Low-Power Mode Control */
+#define AT91_SFRBU_PD_VALUE_MSK (0x3)
+#define AT91_SFRBU_25LDOCR_PD_VALUE(v) ((v) & AT91_SFRBU_PD_VALUE_MSK) /* LDOANA Pull-down value */
+
+#define AT91_FRBU_DDRPWR (0x10) /* SFRBU DDR Power Control Register */
+#define AT91_FRBU_DDRPWR_STATE (1 << 0) /* DDR Power Mode State */
+
+#endif /* CONFIG_SOC_SAMA7 */
+
+#endif /* __SAMA7_SFRBU_H__ */
+
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 7800e12ee042..811ea668c4a1 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -90,7 +90,8 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_SET_PERIPH_REG = 0x00038045,
RPI_FIRMWARE_GET_POE_HAT_VAL = 0x00030049,
RPI_FIRMWARE_SET_POE_HAT_VAL = 0x00030050,
-
+ RPI_FIRMWARE_NOTIFY_XHCI_RESET = 0x00030058,
+ RPI_FIRMWARE_NOTIFY_DISPLAY_DONE = 0x00030066,
/* Dispmanx TAGS */
RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001,
@@ -140,7 +141,10 @@ int rpi_firmware_property(struct rpi_firmware *fw,
u32 tag, void *data, size_t len);
int rpi_firmware_property_list(struct rpi_firmware *fw,
void *data, size_t tag_size);
+void rpi_firmware_put(struct rpi_firmware *fw);
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node);
#else
static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
void *data, size_t len)
@@ -154,10 +158,17 @@ static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
return -ENOSYS;
}
+static inline void rpi_firmware_put(struct rpi_firmware *fw) { }
static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
{
return NULL;
}
+
+static inline struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
+{
+ return NULL;
+}
#endif
#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
diff --git a/include/soc/brcmstb/common.h b/include/soc/brcmstb/common.h
deleted file mode 100644
index e4fe76856de9..000000000000
--- a/include/soc/brcmstb/common.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2014 NVIDIA Corporation
- * Copyright © 2015 Broadcom Corporation
- */
-
-#ifndef __SOC_BRCMSTB_COMMON_H__
-#define __SOC_BRCMSTB_COMMON_H__
-
-bool soc_is_brcmstb(void);
-
-#endif /* __SOC_BRCMSTB_COMMON_H__ */
diff --git a/include/soc/canaan/k210-sysctl.h b/include/soc/canaan/k210-sysctl.h
new file mode 100644
index 000000000000..0c2b2c2dabca
--- /dev/null
+++ b/include/soc/canaan/k210-sysctl.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef K210_SYSCTL_H
+#define K210_SYSCTL_H
+
+/*
+ * Kendryte K210 SoC system controller registers offsets.
+ * Taken from Kendryte SDK (kendryte-standalone-sdk).
+ */
+#define K210_SYSCTL_GIT_ID 0x00 /* Git short commit id */
+#define K210_SYSCTL_UART_BAUD 0x04 /* Default UARTHS baud rate */
+#define K210_SYSCTL_PLL0 0x08 /* PLL0 controller */
+#define K210_SYSCTL_PLL1 0x0C /* PLL1 controller */
+#define K210_SYSCTL_PLL2 0x10 /* PLL2 controller */
+#define K210_SYSCTL_PLL_LOCK 0x18 /* PLL lock tester */
+#define K210_SYSCTL_ROM_ERROR 0x1C /* AXI ROM detector */
+#define K210_SYSCTL_SEL0 0x20 /* Clock select controller 0 */
+#define K210_SYSCTL_SEL1 0x24 /* Clock select controller 1 */
+#define K210_SYSCTL_EN_CENT 0x28 /* Central clock enable */
+#define K210_SYSCTL_EN_PERI 0x2C /* Peripheral clock enable */
+#define K210_SYSCTL_SOFT_RESET 0x30 /* Soft reset ctrl */
+#define K210_SYSCTL_PERI_RESET 0x34 /* Peripheral reset controller */
+#define K210_SYSCTL_THR0 0x38 /* Clock threshold controller 0 */
+#define K210_SYSCTL_THR1 0x3C /* Clock threshold controller 1 */
+#define K210_SYSCTL_THR2 0x40 /* Clock threshold controller 2 */
+#define K210_SYSCTL_THR3 0x44 /* Clock threshold controller 3 */
+#define K210_SYSCTL_THR4 0x48 /* Clock threshold controller 4 */
+#define K210_SYSCTL_THR5 0x4C /* Clock threshold controller 5 */
+#define K210_SYSCTL_THR6 0x50 /* Clock threshold controller 6 */
+#define K210_SYSCTL_MISC 0x54 /* Miscellaneous controller */
+#define K210_SYSCTL_PERI 0x58 /* Peripheral controller */
+#define K210_SYSCTL_SPI_SLEEP 0x5C /* SPI sleep controller */
+#define K210_SYSCTL_RESET_STAT 0x60 /* Reset source status */
+#define K210_SYSCTL_DMA_SEL0 0x64 /* DMA handshake selector 0 */
+#define K210_SYSCTL_DMA_SEL1 0x68 /* DMA handshake selector 1 */
+#define K210_SYSCTL_POWER_SEL 0x6C /* IO Power Mode Select controller */
+
+void k210_clk_early_init(void __iomem *regs);
+
+#endif
diff --git a/include/soc/fsl/caam-blob.h b/include/soc/fsl/caam-blob.h
new file mode 100644
index 000000000000..937cac52f36d
--- /dev/null
+++ b/include/soc/fsl/caam-blob.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ */
+
+#ifndef __CAAM_BLOB_GEN
+#define __CAAM_BLOB_GEN
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#define CAAM_BLOB_KEYMOD_LENGTH 16
+#define CAAM_BLOB_OVERHEAD (32 + 16)
+#define CAAM_BLOB_MAX_LEN 4096
+
+struct caam_blob_priv;
+
+/**
+ * struct caam_blob_info - information for CAAM blobbing
+ * @input: pointer to input buffer (must be DMAable)
+ * @input_len: length of @input buffer in bytes.
+ * @output: pointer to output buffer (must be DMAable)
+ * @output_len: length of @output buffer in bytes.
+ * @key_mod: key modifier
+ * @key_mod_len: length of @key_mod in bytes.
+ * May not exceed %CAAM_BLOB_KEYMOD_LENGTH
+ */
+struct caam_blob_info {
+ void *input;
+ size_t input_len;
+
+ void *output;
+ size_t output_len;
+
+ const void *key_mod;
+ size_t key_mod_len;
+};
+
+/**
+ * caam_blob_gen_init - initialize blob generation
+ * Return: pointer to new &struct caam_blob_priv instance on success
+ * and ``ERR_PTR(-ENODEV)`` if CAAM has no hardware blobbing support
+ * or no job ring could be allocated.
+ */
+struct caam_blob_priv *caam_blob_gen_init(void);
+
+/**
+ * caam_blob_gen_exit - free blob generation resources
+ * @priv: instance returned by caam_blob_gen_init()
+ */
+void caam_blob_gen_exit(struct caam_blob_priv *priv);
+
+/**
+ * caam_process_blob - encapsulate or decapsulate blob
+ * @priv: instance returned by caam_blob_gen_init()
+ * @info: pointer to blobbing info describing key, blob and
+ * key modifier buffers.
+ * @encap: true for encapsulation, false for decapsulation
+ *
+ * Return: %0 and sets ``info->output_len`` on success and a negative
+ * error code otherwise.
+ */
+int caam_process_blob(struct caam_blob_priv *priv,
+ struct caam_blob_info *info, bool encap);
+
+/**
+ * caam_encap_blob - encapsulate blob
+ * @priv: instance returned by caam_blob_gen_init()
+ * @info: pointer to blobbing info describing input key,
+ * output blob and key modifier buffers.
+ *
+ * Return: %0 and sets ``info->output_len`` on success and
+ * a negative error code otherwise.
+ */
+static inline int caam_encap_blob(struct caam_blob_priv *priv,
+ struct caam_blob_info *info)
+{
+ if (info->output_len < info->input_len + CAAM_BLOB_OVERHEAD)
+ return -EINVAL;
+
+ return caam_process_blob(priv, info, true);
+}
+
+/**
+ * caam_decap_blob - decapsulate blob
+ * @priv: instance returned by caam_blob_gen_init()
+ * @info: pointer to blobbing info describing output key,
+ * input blob and key modifier buffers.
+ *
+ * Return: %0 and sets ``info->output_len`` on success and
+ * a negative error code otherwise.
+ */
+static inline int caam_decap_blob(struct caam_blob_priv *priv,
+ struct caam_blob_info *info)
+{
+ if (info->input_len < CAAM_BLOB_OVERHEAD ||
+ info->output_len < info->input_len - CAAM_BLOB_OVERHEAD)
+ return -EINVAL;
+
+ return caam_process_blob(priv, info, false);
+}
+
+#endif
diff --git a/include/soc/fsl/dpaa2-fd.h b/include/soc/fsl/dpaa2-fd.h
index 90ae8d191f1a..bae490cac0aa 100644
--- a/include/soc/fsl/dpaa2-fd.h
+++ b/include/soc/fsl/dpaa2-fd.h
@@ -7,7 +7,8 @@
#ifndef __FSL_DPAA2_FD_H
#define __FSL_DPAA2_FD_H
-#include <linux/kernel.h>
+#include <linux/byteorder/generic.h>
+#include <linux/types.h>
/**
* DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index 672cfb58046f..4bf62de2e00e 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright NXP
+ * Copyright 2017-2019 NXP
*
*/
#ifndef __FSL_DPAA2_IO_H
@@ -44,6 +44,7 @@ struct device;
* @regs_cinh: The cache inhibited regs
* @dpio_id: The dpio index
* @qman_version: The qman version
+ * @qman_clk: The qman clock frequency in Hz
*
* Describes the attributes and features of the DPIO object.
*/
@@ -55,6 +56,7 @@ struct dpaa2_io_desc {
void __iomem *regs_cinh;
int dpio_id;
u32 qman_version;
+ u32 qman_clk;
};
struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
@@ -109,6 +111,10 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
const struct dpaa2_fd *fd);
+int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, u32 fqid,
+ const struct dpaa2_fd *fd, int number_of_frame);
+int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, u32 *fqid,
+ const struct dpaa2_fd *fd, int number_of_frame);
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
u16 qdbin, const struct dpaa2_fd *fd);
int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid,
@@ -125,4 +131,11 @@ int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
u32 *fcnt, u32 *bcnt);
int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid,
u32 *num);
+
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff);
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff);
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce);
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d);
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes);
#endif /* __FSL_DPAA2_IO_H */
diff --git a/include/soc/fsl/qe/immap_qe.h b/include/soc/fsl/qe/immap_qe.h
index 7614fee532f1..edd601f53f5d 100644
--- a/include/soc/fsl/qe/immap_qe.h
+++ b/include/soc/fsl/qe/immap_qe.h
@@ -13,7 +13,8 @@
#define _ASM_POWERPC_IMMAP_QE_H
#ifdef __KERNEL__
-#include <linux/kernel.h>
+#include <linux/types.h>
+
#include <asm/io.h>
#define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index e282ac01ec08..b02e9fe69146 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -27,12 +27,6 @@
#define QE_NUM_OF_BRGS 16
#define QE_NUM_OF_PORTS 1024
-/* Memory partitions
-*/
-#define MEM_PART_SYSTEM 0
-#define MEM_PART_SECONDARY 1
-#define MEM_PART_MURAM 2
-
/* Clocks and BRGs */
enum qe_clock {
QE_CLK_NONE = 0,
@@ -102,8 +96,9 @@ s32 cpm_muram_alloc(unsigned long size, unsigned long align);
void cpm_muram_free(s32 offset);
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
void __iomem *cpm_muram_addr(unsigned long offset);
-unsigned long cpm_muram_offset(void __iomem *addr);
+unsigned long cpm_muram_offset(const void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
+void cpm_muram_free_addr(const void __iomem *addr);
#else
static inline s32 cpm_muram_alloc(unsigned long size,
unsigned long align)
@@ -126,7 +121,7 @@ static inline void __iomem *cpm_muram_addr(unsigned long offset)
return NULL;
}
-static inline unsigned long cpm_muram_offset(void __iomem *addr)
+static inline unsigned long cpm_muram_offset(const void __iomem *addr)
{
return -ENOSYS;
}
@@ -135,6 +130,9 @@ static inline dma_addr_t cpm_muram_dma(void __iomem *addr)
{
return 0;
}
+static inline void cpm_muram_free_addr(const void __iomem *addr)
+{
+}
#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */
/* QE PIO */
@@ -239,38 +237,23 @@ static inline int qe_alive_during_sleep(void)
#define qe_muram_addr cpm_muram_addr
#define qe_muram_offset cpm_muram_offset
#define qe_muram_dma cpm_muram_dma
+#define qe_muram_free_addr cpm_muram_free_addr
-#ifdef CONFIG_PPC32
-#define qe_iowrite8(val, addr) out_8(addr, val)
-#define qe_iowrite16be(val, addr) out_be16(addr, val)
-#define qe_iowrite32be(val, addr) out_be32(addr, val)
-#define qe_ioread8(addr) in_8(addr)
-#define qe_ioread16be(addr) in_be16(addr)
-#define qe_ioread32be(addr) in_be32(addr)
-#else
-#define qe_iowrite8(val, addr) iowrite8(val, addr)
-#define qe_iowrite16be(val, addr) iowrite16be(val, addr)
-#define qe_iowrite32be(val, addr) iowrite32be(val, addr)
-#define qe_ioread8(addr) ioread8(addr)
-#define qe_ioread16be(addr) ioread16be(addr)
-#define qe_ioread32be(addr) ioread32be(addr)
-#endif
-
-#define qe_setbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) | (_v), (_addr))
-#define qe_clrbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) & ~(_v), (_addr))
+#define qe_setbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
+#define qe_clrbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
-#define qe_setbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) | (_v), (_addr))
-#define qe_clrbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) & ~(_v), (_addr))
+#define qe_setbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
+#define qe_clrbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
-#define qe_setbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) | (_v), (_addr))
-#define qe_clrbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) & ~(_v), (_addr))
+#define qe_setbits_8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
+#define qe_clrbits_8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
#define qe_clrsetbits_be32(addr, clear, set) \
- qe_iowrite32be((qe_ioread32be(addr) & ~(clear)) | (set), (addr))
+ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
#define qe_clrsetbits_be16(addr, clear, set) \
- qe_iowrite16be((qe_ioread16be(addr) & ~(clear)) | (set), (addr))
+ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
#define qe_clrsetbits_8(addr, clear, set) \
- qe_iowrite8((qe_ioread8(addr) & ~(clear)) | (set), (addr))
+ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
/* Structure that defines QE firmware binary files.
*
@@ -307,7 +290,7 @@ struct qe_firmware {
u8 revision; /* The microcode version revision */
u8 padding; /* Reserved, for alignment */
u8 reserved[4]; /* Reserved, for future expansion */
- } __attribute__ ((packed)) microcode[1];
+ } __packed microcode[];
/* All microcode binaries should be located here */
/* CRC32 should be located here, after the microcode binaries */
} __attribute__ ((packed));
diff --git a/include/soc/fsl/qe/qe_tdm.h b/include/soc/fsl/qe/qe_tdm.h
index b6febe225071..43ea830cfe1f 100644
--- a/include/soc/fsl/qe/qe_tdm.h
+++ b/include/soc/fsl/qe/qe_tdm.h
@@ -10,8 +10,8 @@
#ifndef _QE_TDM_H_
#define _QE_TDM_H_
-#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/types.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
@@ -19,6 +19,8 @@
#include <soc/fsl/qe/ucc.h>
#include <soc/fsl/qe/ucc_fast.h>
+struct device_node;
+
/* SI RAM entries */
#define SIR_LAST 0x0001
#define SIR_BYTE 0x0002
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index ba0e838f962a..ad60b87a3c69 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -10,7 +10,7 @@
#ifndef __UCC_FAST_H__
#define __UCC_FAST_H__
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
@@ -146,7 +146,6 @@ struct ucc_fast_info {
resource_size_t regs;
int irq;
u32 uccm_mask;
- int bd_mem_part;
int brkpt_support;
int grant_support;
int tsa;
@@ -178,10 +177,10 @@ struct ucc_fast_info {
struct ucc_fast_private {
struct ucc_fast_info *uf_info;
struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */
- u32 __iomem *p_ucce; /* a pointer to the event register in memory. */
- u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */
+ __be32 __iomem *p_ucce; /* a pointer to the event register in memory. */
+ __be32 __iomem *p_uccm; /* a pointer to the mask register in memory. */
#ifdef CONFIG_UGETH_TX_ON_DEMAND
- u16 __iomem *p_utodr; /* pointer to the transmit on demand register */
+ __be16 __iomem *p_utodr;/* pointer to the transmit on demand register */
#endif
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h
index d187a6be83bc..7548ce8a202d 100644
--- a/include/soc/fsl/qe/ucc_slow.h
+++ b/include/soc/fsl/qe/ucc_slow.h
@@ -11,7 +11,7 @@
#ifndef __UCC_SLOW_H__
#define __UCC_SLOW_H__
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
@@ -184,7 +184,7 @@ struct ucc_slow_info {
struct ucc_slow_private {
struct ucc_slow_info *us_info;
struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
- struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */
+ struct ucc_slow_pram __iomem *us_pram; /* a pointer to the parameter RAM */
s32 us_pram_offset;
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
@@ -196,13 +196,12 @@ struct ucc_slow_private {
and length for first BD in a frame */
s32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
s32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
- struct qe_bd *confBd; /* next BD for confirm after Tx */
- struct qe_bd *tx_bd; /* next BD for new Tx request */
- struct qe_bd *rx_bd; /* next BD to collect after Rx */
+ struct qe_bd __iomem *confBd; /* next BD for confirm after Tx */
+ struct qe_bd __iomem *tx_bd; /* next BD for new Tx request */
+ struct qe_bd __iomem *rx_bd; /* next BD to collect after Rx */
void *p_rx_frame; /* accumulating receive frame */
- u16 *p_ucce; /* a pointer to the event register in memory.
- */
- u16 *p_uccm; /* a pointer to the mask register in memory */
+ __be16 __iomem *p_ucce; /* a pointer to the event register in memory */
+ __be16 __iomem *p_uccm; /* a pointer to the mask register in memory */
u16 saved_uccm; /* a saved mask for the RX Interrupt bits */
#ifdef STATISTICS
u32 tx_frames; /* Transmitted frames counters */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index cfe00e08e85b..0d3d6beb7fdb 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -256,7 +256,7 @@ struct qm_dqrr_entry {
__be32 context_b;
struct qm_fd fd;
u8 __reserved4[32];
-} __packed;
+} __packed __aligned(64);
#define QM_DQRR_VERB_VBIT 0x80
#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
@@ -289,7 +289,7 @@ union qm_mr_entry {
__be32 tag;
struct qm_fd fd;
u8 __reserved1[32];
- } __packed ern;
+ } __packed __aligned(64) ern;
struct {
u8 verb;
u8 fqs; /* Frame Queue Status */
@@ -689,7 +689,8 @@ enum qman_cb_dqrr_result {
};
typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr);
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi);
/*
* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
@@ -1171,6 +1172,15 @@ int qman_delete_cgr(struct qman_cgr *cgr);
void qman_delete_cgr_safe(struct qman_cgr *cgr);
/**
+ * qman_update_cgr_safe - Modifies a congestion group object from any CPU
+ * @cgr: the 'cgr' object to modify
+ * @opts: state of the CGR settings
+ *
+ * This will select the proper CPU and modify the CGR settings.
+ */
+int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts);
+
+/**
* qman_query_cgr_congested - Queries CGR's congestion status
* @cgr: the 'cgr' object to query
* @result: returns 'cgr's congestion status, 1 (true) if congested
diff --git a/include/soc/imx/cpu.h b/include/soc/imx/cpu.h
new file mode 100644
index 000000000000..0bf610acafd0
--- /dev/null
+++ b/include/soc/imx/cpu.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __IMX_CPU_H__
+#define __IMX_CPU_H__
+
+#define MXC_CPU_MX1 1
+#define MXC_CPU_MX21 21
+#define MXC_CPU_MX25 25
+#define MXC_CPU_MX27 27
+#define MXC_CPU_MX31 31
+#define MXC_CPU_MX35 35
+#define MXC_CPU_MX50 50
+#define MXC_CPU_MX51 51
+#define MXC_CPU_MX53 53
+#define MXC_CPU_IMX6SL 0x60
+#define MXC_CPU_IMX6DL 0x61
+#define MXC_CPU_IMX6SX 0x62
+#define MXC_CPU_IMX6Q 0x63
+#define MXC_CPU_IMX6UL 0x64
+#define MXC_CPU_IMX6ULL 0x65
+/* virtual cpu id for i.mx6ulz */
+#define MXC_CPU_IMX6ULZ 0x6b
+#define MXC_CPU_IMX6SLL 0x67
+#define MXC_CPU_IMX7D 0x72
+#define MXC_CPU_IMX7ULP 0xff
+
+#define MXC_CPU_VFx10 0x010
+#define MXC_CPU_VF500 0x500
+#define MXC_CPU_VF510 (MXC_CPU_VF500 | MXC_CPU_VFx10)
+#define MXC_CPU_VF600 0x600
+#define MXC_CPU_VF610 (MXC_CPU_VF600 | MXC_CPU_VFx10)
+
+#ifndef __ASSEMBLY__
+extern unsigned int __mxc_cpu_type;
+#endif
+
+#endif
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index 5a34b87d89e3..dfd8efca5e60 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -9,37 +9,21 @@
#include <linux/bitops.h>
#include <linux/device.h>
-#ifdef CONFIG_MTK_SMI
+#if IS_ENABLED(CONFIG_MTK_SMI)
-#define MTK_LARB_NR_MAX 16
+enum iommu_atf_cmd {
+ IOMMU_ATF_CMD_CONFIG_SMI_LARB, /* For mm master to en/disable iommu */
+ IOMMU_ATF_CMD_MAX,
+};
#define MTK_SMI_MMU_EN(port) BIT(port)
struct mtk_smi_larb_iommu {
struct device *dev;
unsigned int mmu;
+ unsigned char bank[32];
};
-/*
- * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter.
- * It also initialize some basic setting(like iommu).
- * mtk_smi_larb_put: Disable the power domain and clocks for this local arbiter.
- * Both should be called in non-atomic context.
- *
- * Returns 0 if successful, negative on failure.
- */
-int mtk_smi_larb_get(struct device *larbdev);
-void mtk_smi_larb_put(struct device *larbdev);
-
-#else
-
-static inline int mtk_smi_larb_get(struct device *larbdev)
-{
- return 0;
-}
-
-static inline void mtk_smi_larb_put(struct device *larbdev) { }
-
#endif
#endif
diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h
new file mode 100644
index 000000000000..f916dcde457f
--- /dev/null
+++ b/include/soc/microchip/mpfs.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * Microchip PolarFire SoC (MPFS)
+ *
+ * Copyright (c) 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ */
+
+#ifndef __SOC_MPFS_H__
+#define __SOC_MPFS_H__
+
+#include <linux/types.h>
+#include <linux/of_device.h>
+
+struct mpfs_sys_controller;
+
+struct mpfs_mss_msg {
+ u8 cmd_opcode;
+ u16 cmd_data_size;
+ struct mpfs_mss_response *response;
+ u8 *cmd_data;
+ u16 mbox_offset;
+ u16 resp_offset;
+};
+
+struct mpfs_mss_response {
+ u32 resp_status;
+ u32 *resp_msg;
+ u16 resp_size;
+};
+
+#if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL)
+
+int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, struct mpfs_mss_msg *msg);
+
+struct mpfs_sys_controller *mpfs_sys_controller_get(struct device *dev);
+
+#endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */
+
+#if IS_ENABLED(CONFIG_MCHP_CLK_MPFS)
+
+u32 mpfs_reset_read(struct device *dev);
+
+void mpfs_reset_write(struct device *dev, u32 val);
+
+#endif /* if IS_ENABLED(CONFIG_MCHP_CLK_MPFS) */
+
+#endif /* __SOC_MPFS_H__ */
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 068f96b1a83e..967ba30ea636 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -11,27 +11,93 @@
#include <linux/regmap.h>
#include <net/dsa.h>
-#define IFH_INJ_BYPASS BIT(31)
-#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
+/* Port Group IDs (PGID) are masks of destination ports.
+ *
+ * For L2 forwarding, the switch performs 3 lookups in the PGID table for each
+ * frame, and forwards the frame to the ports that are present in the logical
+ * AND of all 3 PGIDs.
+ *
+ * These PGID lookups are:
+ * - In one of PGID[0-63]: for the destination masks. There are 2 paths by
+ * which the switch selects a destination PGID:
+ * - The {DMAC, VID} is present in the MAC table. In that case, the
+ * destination PGID is given by the DEST_IDX field of the MAC table entry
+ * that matched.
+ * - The {DMAC, VID} is not present in the MAC table (it is unknown). The
+ * frame is disseminated as being either unicast, multicast or broadcast,
+ * and according to that, the destination PGID is chosen as being the
+ * value contained by ANA_FLOODING_FLD_UNICAST,
+ * ANA_FLOODING_FLD_MULTICAST or ANA_FLOODING_FLD_BROADCAST.
+ * The destination PGID can be an unicast set: the first PGIDs, 0 to
+ * ocelot->num_phys_ports - 1, or a multicast set: the PGIDs from
+ * ocelot->num_phys_ports to 63. By convention, a unicast PGID corresponds to
+ * a physical port and has a single bit set in the destination ports mask:
+ * that corresponding to the port number itself. In contrast, a multicast
+ * PGID will have potentially more than one single bit set in the destination
+ * ports mask.
+ * - In one of PGID[64-79]: for the aggregation mask. The switch classifier
+ * dissects each frame and generates a 4-bit Link Aggregation Code which is
+ * used for this second PGID table lookup. The goal of link aggregation is to
+ * hash multiple flows within the same LAG on to different destination ports.
+ * The first lookup will result in a PGID with all the LAG members present in
+ * the destination ports mask, and the second lookup, by Link Aggregation
+ * Code, will ensure that each flow gets forwarded only to a single port out
+ * of that mask (there are no duplicates).
+ * - In one of PGID[80-90]: for the source mask. The third time, the PGID table
+ * is indexed with the ingress port (plus 80). These PGIDs answer the
+ * question "is port i allowed to forward traffic to port j?" If yes, then
+ * BIT(j) of PGID 80+i will be found set. The third PGID lookup can be used
+ * to enforce the L2 forwarding matrix imposed by e.g. a Linux bridge.
+ */
+
+/* Reserve some destination PGIDs at the end of the range:
+ * PGID_BLACKHOLE: used for not forwarding the frames
+ * PGID_CPU: used for whitelisting certain MAC addresses, such as the addresses
+ * of the switch port net devices, towards the CPU port module.
+ * PGID_UC: the flooding destinations for unknown unicast traffic.
+ * PGID_MC: the flooding destinations for non-IP multicast traffic.
+ * PGID_MCIPV4: the flooding destinations for IPv4 multicast traffic.
+ * PGID_MCIPV6: the flooding destinations for IPv6 multicast traffic.
+ * PGID_BC: the flooding destinations for broadcast traffic.
+ */
+#define PGID_BLACKHOLE 57
+#define PGID_CPU 58
+#define PGID_UC 59
+#define PGID_MC 60
+#define PGID_MCIPV4 61
+#define PGID_MCIPV6 62
+#define PGID_BC 63
+
+#define for_each_unicast_dest_pgid(ocelot, pgid) \
+ for ((pgid) = 0; \
+ (pgid) < (ocelot)->num_phys_ports; \
+ (pgid)++)
+
+#define for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) \
+ for ((pgid) = (ocelot)->num_phys_ports + 1; \
+ (pgid) < PGID_BLACKHOLE; \
+ (pgid)++)
+
+#define for_each_aggr_pgid(ocelot, pgid) \
+ for ((pgid) = PGID_AGGR; \
+ (pgid) < PGID_SRC; \
+ (pgid)++)
-#define IFH_TAG_TYPE_C 0
-#define IFH_TAG_TYPE_S 1
+/* Aggregation PGIDs, one per Link Aggregation Code */
+#define PGID_AGGR 64
-#define IFH_REW_OP_NOOP 0x0
-#define IFH_REW_OP_DSCP 0x1
-#define IFH_REW_OP_ONE_STEP_PTP 0x2
-#define IFH_REW_OP_TWO_STEP_PTP 0x3
-#define IFH_REW_OP_ORIGIN_PTP 0x5
+/* Source PGIDs, one per physical port */
+#define PGID_SRC 80
-#define OCELOT_TAG_LEN 16
-#define OCELOT_SHORT_PREFIX_LEN 4
-#define OCELOT_LONG_PREFIX_LEN 16
+#define OCELOT_NUM_TC 8
#define OCELOT_SPEED_2500 0
#define OCELOT_SPEED_1000 1
#define OCELOT_SPEED_100 2
#define OCELOT_SPEED_10 3
+#define OCELOT_PTP_PINS_NUM 4
+
#define TARGET_OFFSET 24
#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0)
#define REG(reg, offset) [reg & REG_MASK] = offset
@@ -45,10 +111,14 @@ enum ocelot_target {
QSYS,
REW,
SYS,
+ S0,
+ S1,
S2,
HSIO,
PTP,
+ FDMA,
GCB,
+ DEV_GMII,
TARGET_MAX,
};
@@ -260,13 +330,38 @@ enum ocelot_reg {
SYS_COUNT_RX_64,
SYS_COUNT_RX_65_127,
SYS_COUNT_RX_128_255,
- SYS_COUNT_RX_256_1023,
+ SYS_COUNT_RX_256_511,
+ SYS_COUNT_RX_512_1023,
SYS_COUNT_RX_1024_1526,
SYS_COUNT_RX_1527_MAX,
SYS_COUNT_RX_PAUSE,
SYS_COUNT_RX_CONTROL,
SYS_COUNT_RX_LONGS,
SYS_COUNT_RX_CLASSIFIED_DROPS,
+ SYS_COUNT_RX_RED_PRIO_0,
+ SYS_COUNT_RX_RED_PRIO_1,
+ SYS_COUNT_RX_RED_PRIO_2,
+ SYS_COUNT_RX_RED_PRIO_3,
+ SYS_COUNT_RX_RED_PRIO_4,
+ SYS_COUNT_RX_RED_PRIO_5,
+ SYS_COUNT_RX_RED_PRIO_6,
+ SYS_COUNT_RX_RED_PRIO_7,
+ SYS_COUNT_RX_YELLOW_PRIO_0,
+ SYS_COUNT_RX_YELLOW_PRIO_1,
+ SYS_COUNT_RX_YELLOW_PRIO_2,
+ SYS_COUNT_RX_YELLOW_PRIO_3,
+ SYS_COUNT_RX_YELLOW_PRIO_4,
+ SYS_COUNT_RX_YELLOW_PRIO_5,
+ SYS_COUNT_RX_YELLOW_PRIO_6,
+ SYS_COUNT_RX_YELLOW_PRIO_7,
+ SYS_COUNT_RX_GREEN_PRIO_0,
+ SYS_COUNT_RX_GREEN_PRIO_1,
+ SYS_COUNT_RX_GREEN_PRIO_2,
+ SYS_COUNT_RX_GREEN_PRIO_3,
+ SYS_COUNT_RX_GREEN_PRIO_4,
+ SYS_COUNT_RX_GREEN_PRIO_5,
+ SYS_COUNT_RX_GREEN_PRIO_6,
+ SYS_COUNT_RX_GREEN_PRIO_7,
SYS_COUNT_TX_OCTETS,
SYS_COUNT_TX_UNICAST,
SYS_COUNT_TX_MULTICAST,
@@ -276,11 +371,50 @@ enum ocelot_reg {
SYS_COUNT_TX_PAUSE,
SYS_COUNT_TX_64,
SYS_COUNT_TX_65_127,
- SYS_COUNT_TX_128_511,
+ SYS_COUNT_TX_128_255,
+ SYS_COUNT_TX_256_511,
SYS_COUNT_TX_512_1023,
SYS_COUNT_TX_1024_1526,
SYS_COUNT_TX_1527_MAX,
- SYS_COUNT_TX_AGING,
+ SYS_COUNT_TX_YELLOW_PRIO_0,
+ SYS_COUNT_TX_YELLOW_PRIO_1,
+ SYS_COUNT_TX_YELLOW_PRIO_2,
+ SYS_COUNT_TX_YELLOW_PRIO_3,
+ SYS_COUNT_TX_YELLOW_PRIO_4,
+ SYS_COUNT_TX_YELLOW_PRIO_5,
+ SYS_COUNT_TX_YELLOW_PRIO_6,
+ SYS_COUNT_TX_YELLOW_PRIO_7,
+ SYS_COUNT_TX_GREEN_PRIO_0,
+ SYS_COUNT_TX_GREEN_PRIO_1,
+ SYS_COUNT_TX_GREEN_PRIO_2,
+ SYS_COUNT_TX_GREEN_PRIO_3,
+ SYS_COUNT_TX_GREEN_PRIO_4,
+ SYS_COUNT_TX_GREEN_PRIO_5,
+ SYS_COUNT_TX_GREEN_PRIO_6,
+ SYS_COUNT_TX_GREEN_PRIO_7,
+ SYS_COUNT_TX_AGED,
+ SYS_COUNT_DROP_LOCAL,
+ SYS_COUNT_DROP_TAIL,
+ SYS_COUNT_DROP_YELLOW_PRIO_0,
+ SYS_COUNT_DROP_YELLOW_PRIO_1,
+ SYS_COUNT_DROP_YELLOW_PRIO_2,
+ SYS_COUNT_DROP_YELLOW_PRIO_3,
+ SYS_COUNT_DROP_YELLOW_PRIO_4,
+ SYS_COUNT_DROP_YELLOW_PRIO_5,
+ SYS_COUNT_DROP_YELLOW_PRIO_6,
+ SYS_COUNT_DROP_YELLOW_PRIO_7,
+ SYS_COUNT_DROP_GREEN_PRIO_0,
+ SYS_COUNT_DROP_GREEN_PRIO_1,
+ SYS_COUNT_DROP_GREEN_PRIO_2,
+ SYS_COUNT_DROP_GREEN_PRIO_3,
+ SYS_COUNT_DROP_GREEN_PRIO_4,
+ SYS_COUNT_DROP_GREEN_PRIO_5,
+ SYS_COUNT_DROP_GREEN_PRIO_6,
+ SYS_COUNT_DROP_GREEN_PRIO_7,
+ SYS_COUNT_SF_MATCHING_FRAMES,
+ SYS_COUNT_SF_NOT_PASSING_FRAMES,
+ SYS_COUNT_SF_NOT_PASSING_SDU,
+ SYS_COUNT_SF_RED_FRAMES,
SYS_RESET_CFG,
SYS_SR_ETYPE_CFG,
SYS_VLAN_ETYPE_CFG,
@@ -303,7 +437,6 @@ enum ocelot_reg {
SYS_MMGT_FAST,
SYS_EVENTS_DIF,
SYS_EVENTS_CORE,
- SYS_CNT,
SYS_PTP_STATUS,
SYS_PTP_TXSTAMP,
SYS_PTP_NXT,
@@ -314,21 +447,58 @@ enum ocelot_reg {
SYS_CM_DATA_RD,
SYS_CM_OP,
SYS_CM_DATA,
- S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
- S2_CORE_MV_CFG,
- S2_CACHE_ENTRY_DAT,
- S2_CACHE_MASK_DAT,
- S2_CACHE_ACTION_DAT,
- S2_CACHE_CNT_DAT,
- S2_CACHE_TG_DAT,
PTP_PIN_CFG = PTP << TARGET_OFFSET,
PTP_PIN_TOD_SEC_MSB,
PTP_PIN_TOD_SEC_LSB,
PTP_PIN_TOD_NSEC,
+ PTP_PIN_WF_HIGH_PERIOD,
+ PTP_PIN_WF_LOW_PERIOD,
PTP_CFG_MISC,
PTP_CLK_CFG_ADJ_CFG,
PTP_CLK_CFG_ADJ_FREQ,
GCB_SOFT_RST = GCB << TARGET_OFFSET,
+ GCB_MIIM_MII_STATUS,
+ GCB_MIIM_MII_CMD,
+ GCB_MIIM_MII_DATA,
+ DEV_CLOCK_CFG = DEV_GMII << TARGET_OFFSET,
+ DEV_PORT_MISC,
+ DEV_EVENTS,
+ DEV_EEE_CFG,
+ DEV_RX_PATH_DELAY,
+ DEV_TX_PATH_DELAY,
+ DEV_PTP_PREDICT_CFG,
+ DEV_MAC_ENA_CFG,
+ DEV_MAC_MODE_CFG,
+ DEV_MAC_MAXLEN_CFG,
+ DEV_MAC_TAGS_CFG,
+ DEV_MAC_ADV_CHK_CFG,
+ DEV_MAC_IFG_CFG,
+ DEV_MAC_HDX_CFG,
+ DEV_MAC_DBG_CFG,
+ DEV_MAC_FC_MAC_LOW_CFG,
+ DEV_MAC_FC_MAC_HIGH_CFG,
+ DEV_MAC_STICKY,
+ PCS1G_CFG,
+ PCS1G_MODE_CFG,
+ PCS1G_SD_CFG,
+ PCS1G_ANEG_CFG,
+ PCS1G_ANEG_NP_CFG,
+ PCS1G_LB_CFG,
+ PCS1G_DBG_CFG,
+ PCS1G_CDET_CFG,
+ PCS1G_ANEG_STATUS,
+ PCS1G_ANEG_NP_STATUS,
+ PCS1G_LINK_STATUS,
+ PCS1G_LINK_DOWN_CNT,
+ PCS1G_STICKY,
+ PCS1G_DEBUG_STATUS,
+ PCS1G_LPI_CFG,
+ PCS1G_LPI_WAKE_ERROR_CNT,
+ PCS1G_LPI_STATUS,
+ PCS1G_TSTPAT_MODE_CFG,
+ PCS1G_TSTPAT_STATUS,
+ DEV_PCS_FX100_CFG,
+ DEV_PCS_FX100_STATUS,
};
enum ocelot_regfield {
@@ -368,30 +538,279 @@ enum ocelot_regfield {
ANA_TABLES_MACACCESS_B_DOM,
ANA_TABLES_MACTINDX_BUCKET,
ANA_TABLES_MACTINDX_M_INDEX,
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG,
+ QSYS_SWITCH_PORT_MODE_YEL_RSRVD,
+ QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE,
+ QSYS_SWITCH_PORT_MODE_TX_PFC_ENA,
+ QSYS_SWITCH_PORT_MODE_TX_PFC_MODE,
QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
+ SYS_PORT_MODE_DATA_WO_TS,
+ SYS_PORT_MODE_INCL_INJ_HDR,
+ SYS_PORT_MODE_INCL_XTR_HDR,
+ SYS_PORT_MODE_INCL_HDR_ERR,
SYS_RESET_CFG_CORE_ENA,
SYS_RESET_CFG_MEM_ENA,
SYS_RESET_CFG_MEM_INIT,
GCB_SOFT_RST_SWC_RST,
+ GCB_MIIM_MII_STATUS_PENDING,
+ GCB_MIIM_MII_STATUS_BUSY,
+ SYS_PAUSE_CFG_PAUSE_START,
+ SYS_PAUSE_CFG_PAUSE_STOP,
+ SYS_PAUSE_CFG_PAUSE_ENA,
REGFIELD_MAX
};
-enum ocelot_clk_pins {
- ALT_PPS_PIN = 1,
- EXT_CLK_PIN,
- ALT_LDST_PIN,
+enum {
+ /* VCAP_CORE_CFG */
+ VCAP_CORE_UPDATE_CTRL,
+ VCAP_CORE_MV_CFG,
+ /* VCAP_CORE_CACHE */
+ VCAP_CACHE_ENTRY_DAT,
+ VCAP_CACHE_MASK_DAT,
+ VCAP_CACHE_ACTION_DAT,
+ VCAP_CACHE_CNT_DAT,
+ VCAP_CACHE_TG_DAT,
+ /* VCAP_CONST */
+ VCAP_CONST_VCAP_VER,
+ VCAP_CONST_ENTRY_WIDTH,
+ VCAP_CONST_ENTRY_CNT,
+ VCAP_CONST_ENTRY_SWCNT,
+ VCAP_CONST_ENTRY_TG_WIDTH,
+ VCAP_CONST_ACTION_DEF_CNT,
+ VCAP_CONST_ACTION_WIDTH,
+ VCAP_CONST_CNT_WIDTH,
+ VCAP_CONST_CORE_CNT,
+ VCAP_CONST_IF_CNT,
+};
+
+enum ocelot_ptp_pins {
+ PTP_PIN_0,
+ PTP_PIN_1,
+ PTP_PIN_2,
+ PTP_PIN_3,
TOD_ACC_PIN
};
+enum ocelot_stat {
+ OCELOT_STAT_RX_OCTETS,
+ OCELOT_STAT_RX_UNICAST,
+ OCELOT_STAT_RX_MULTICAST,
+ OCELOT_STAT_RX_BROADCAST,
+ OCELOT_STAT_RX_SHORTS,
+ OCELOT_STAT_RX_FRAGMENTS,
+ OCELOT_STAT_RX_JABBERS,
+ OCELOT_STAT_RX_CRC_ALIGN_ERRS,
+ OCELOT_STAT_RX_SYM_ERRS,
+ OCELOT_STAT_RX_64,
+ OCELOT_STAT_RX_65_127,
+ OCELOT_STAT_RX_128_255,
+ OCELOT_STAT_RX_256_511,
+ OCELOT_STAT_RX_512_1023,
+ OCELOT_STAT_RX_1024_1526,
+ OCELOT_STAT_RX_1527_MAX,
+ OCELOT_STAT_RX_PAUSE,
+ OCELOT_STAT_RX_CONTROL,
+ OCELOT_STAT_RX_LONGS,
+ OCELOT_STAT_RX_CLASSIFIED_DROPS,
+ OCELOT_STAT_RX_RED_PRIO_0,
+ OCELOT_STAT_RX_RED_PRIO_1,
+ OCELOT_STAT_RX_RED_PRIO_2,
+ OCELOT_STAT_RX_RED_PRIO_3,
+ OCELOT_STAT_RX_RED_PRIO_4,
+ OCELOT_STAT_RX_RED_PRIO_5,
+ OCELOT_STAT_RX_RED_PRIO_6,
+ OCELOT_STAT_RX_RED_PRIO_7,
+ OCELOT_STAT_RX_YELLOW_PRIO_0,
+ OCELOT_STAT_RX_YELLOW_PRIO_1,
+ OCELOT_STAT_RX_YELLOW_PRIO_2,
+ OCELOT_STAT_RX_YELLOW_PRIO_3,
+ OCELOT_STAT_RX_YELLOW_PRIO_4,
+ OCELOT_STAT_RX_YELLOW_PRIO_5,
+ OCELOT_STAT_RX_YELLOW_PRIO_6,
+ OCELOT_STAT_RX_YELLOW_PRIO_7,
+ OCELOT_STAT_RX_GREEN_PRIO_0,
+ OCELOT_STAT_RX_GREEN_PRIO_1,
+ OCELOT_STAT_RX_GREEN_PRIO_2,
+ OCELOT_STAT_RX_GREEN_PRIO_3,
+ OCELOT_STAT_RX_GREEN_PRIO_4,
+ OCELOT_STAT_RX_GREEN_PRIO_5,
+ OCELOT_STAT_RX_GREEN_PRIO_6,
+ OCELOT_STAT_RX_GREEN_PRIO_7,
+ OCELOT_STAT_TX_OCTETS,
+ OCELOT_STAT_TX_UNICAST,
+ OCELOT_STAT_TX_MULTICAST,
+ OCELOT_STAT_TX_BROADCAST,
+ OCELOT_STAT_TX_COLLISION,
+ OCELOT_STAT_TX_DROPS,
+ OCELOT_STAT_TX_PAUSE,
+ OCELOT_STAT_TX_64,
+ OCELOT_STAT_TX_65_127,
+ OCELOT_STAT_TX_128_255,
+ OCELOT_STAT_TX_256_511,
+ OCELOT_STAT_TX_512_1023,
+ OCELOT_STAT_TX_1024_1526,
+ OCELOT_STAT_TX_1527_MAX,
+ OCELOT_STAT_TX_YELLOW_PRIO_0,
+ OCELOT_STAT_TX_YELLOW_PRIO_1,
+ OCELOT_STAT_TX_YELLOW_PRIO_2,
+ OCELOT_STAT_TX_YELLOW_PRIO_3,
+ OCELOT_STAT_TX_YELLOW_PRIO_4,
+ OCELOT_STAT_TX_YELLOW_PRIO_5,
+ OCELOT_STAT_TX_YELLOW_PRIO_6,
+ OCELOT_STAT_TX_YELLOW_PRIO_7,
+ OCELOT_STAT_TX_GREEN_PRIO_0,
+ OCELOT_STAT_TX_GREEN_PRIO_1,
+ OCELOT_STAT_TX_GREEN_PRIO_2,
+ OCELOT_STAT_TX_GREEN_PRIO_3,
+ OCELOT_STAT_TX_GREEN_PRIO_4,
+ OCELOT_STAT_TX_GREEN_PRIO_5,
+ OCELOT_STAT_TX_GREEN_PRIO_6,
+ OCELOT_STAT_TX_GREEN_PRIO_7,
+ OCELOT_STAT_TX_AGED,
+ OCELOT_STAT_DROP_LOCAL,
+ OCELOT_STAT_DROP_TAIL,
+ OCELOT_STAT_DROP_YELLOW_PRIO_0,
+ OCELOT_STAT_DROP_YELLOW_PRIO_1,
+ OCELOT_STAT_DROP_YELLOW_PRIO_2,
+ OCELOT_STAT_DROP_YELLOW_PRIO_3,
+ OCELOT_STAT_DROP_YELLOW_PRIO_4,
+ OCELOT_STAT_DROP_YELLOW_PRIO_5,
+ OCELOT_STAT_DROP_YELLOW_PRIO_6,
+ OCELOT_STAT_DROP_YELLOW_PRIO_7,
+ OCELOT_STAT_DROP_GREEN_PRIO_0,
+ OCELOT_STAT_DROP_GREEN_PRIO_1,
+ OCELOT_STAT_DROP_GREEN_PRIO_2,
+ OCELOT_STAT_DROP_GREEN_PRIO_3,
+ OCELOT_STAT_DROP_GREEN_PRIO_4,
+ OCELOT_STAT_DROP_GREEN_PRIO_5,
+ OCELOT_STAT_DROP_GREEN_PRIO_6,
+ OCELOT_STAT_DROP_GREEN_PRIO_7,
+ OCELOT_NUM_STATS,
+};
+
struct ocelot_stat_layout {
- u32 offset;
+ u32 reg;
char name[ETH_GSTRING_LEN];
};
+/* 32-bit counter checked for wraparound by ocelot_port_update_stats()
+ * and copied to ocelot->stats.
+ */
+#define OCELOT_STAT(kind) \
+ [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind }
+/* Same as above, except also exported to ethtool -S. Standard counters should
+ * only be exposed to more specific interfaces rather than by their string name.
+ */
+#define OCELOT_STAT_ETHTOOL(kind, ethtool_name) \
+ [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind, .name = ethtool_name }
+
+#define OCELOT_COMMON_STATS \
+ OCELOT_STAT_ETHTOOL(RX_OCTETS, "rx_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_UNICAST, "rx_unicast"), \
+ OCELOT_STAT_ETHTOOL(RX_MULTICAST, "rx_multicast"), \
+ OCELOT_STAT_ETHTOOL(RX_BROADCAST, "rx_broadcast"), \
+ OCELOT_STAT_ETHTOOL(RX_SHORTS, "rx_shorts"), \
+ OCELOT_STAT_ETHTOOL(RX_FRAGMENTS, "rx_fragments"), \
+ OCELOT_STAT_ETHTOOL(RX_JABBERS, "rx_jabbers"), \
+ OCELOT_STAT_ETHTOOL(RX_CRC_ALIGN_ERRS, "rx_crc_align_errs"), \
+ OCELOT_STAT_ETHTOOL(RX_SYM_ERRS, "rx_sym_errs"), \
+ OCELOT_STAT_ETHTOOL(RX_64, "rx_frames_below_65_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_65_127, "rx_frames_65_to_127_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_128_255, "rx_frames_128_to_255_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_256_511, "rx_frames_256_to_511_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_512_1023, "rx_frames_512_to_1023_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_1024_1526, "rx_frames_1024_to_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_1527_MAX, "rx_frames_over_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_PAUSE, "rx_pause"), \
+ OCELOT_STAT_ETHTOOL(RX_CONTROL, "rx_control"), \
+ OCELOT_STAT_ETHTOOL(RX_LONGS, "rx_longs"), \
+ OCELOT_STAT_ETHTOOL(RX_CLASSIFIED_DROPS, "rx_classified_drops"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_0, "rx_red_prio_0"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_1, "rx_red_prio_1"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_2, "rx_red_prio_2"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_3, "rx_red_prio_3"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_4, "rx_red_prio_4"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_5, "rx_red_prio_5"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_6, "rx_red_prio_6"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_7, "rx_red_prio_7"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_0, "rx_yellow_prio_0"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_1, "rx_yellow_prio_1"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_2, "rx_yellow_prio_2"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_3, "rx_yellow_prio_3"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_4, "rx_yellow_prio_4"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_5, "rx_yellow_prio_5"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_6, "rx_yellow_prio_6"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_7, "rx_yellow_prio_7"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_0, "rx_green_prio_0"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_1, "rx_green_prio_1"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_2, "rx_green_prio_2"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_3, "rx_green_prio_3"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_4, "rx_green_prio_4"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_5, "rx_green_prio_5"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_6, "rx_green_prio_6"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_7, "rx_green_prio_7"), \
+ OCELOT_STAT_ETHTOOL(TX_OCTETS, "tx_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_UNICAST, "tx_unicast"), \
+ OCELOT_STAT_ETHTOOL(TX_MULTICAST, "tx_multicast"), \
+ OCELOT_STAT_ETHTOOL(TX_BROADCAST, "tx_broadcast"), \
+ OCELOT_STAT_ETHTOOL(TX_COLLISION, "tx_collision"), \
+ OCELOT_STAT_ETHTOOL(TX_DROPS, "tx_drops"), \
+ OCELOT_STAT_ETHTOOL(TX_PAUSE, "tx_pause"), \
+ OCELOT_STAT_ETHTOOL(TX_64, "tx_frames_below_65_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_65_127, "tx_frames_65_to_127_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_128_255, "tx_frames_128_255_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_256_511, "tx_frames_256_511_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_512_1023, "tx_frames_512_1023_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_1024_1526, "tx_frames_1024_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_1527_MAX, "tx_frames_over_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_0, "tx_yellow_prio_0"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_1, "tx_yellow_prio_1"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_2, "tx_yellow_prio_2"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_3, "tx_yellow_prio_3"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_4, "tx_yellow_prio_4"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_5, "tx_yellow_prio_5"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_6, "tx_yellow_prio_6"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_7, "tx_yellow_prio_7"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_0, "tx_green_prio_0"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_1, "tx_green_prio_1"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_2, "tx_green_prio_2"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_3, "tx_green_prio_3"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_4, "tx_green_prio_4"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_5, "tx_green_prio_5"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_6, "tx_green_prio_6"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_7, "tx_green_prio_7"), \
+ OCELOT_STAT_ETHTOOL(TX_AGED, "tx_aged"), \
+ OCELOT_STAT_ETHTOOL(DROP_LOCAL, "drop_local"), \
+ OCELOT_STAT_ETHTOOL(DROP_TAIL, "drop_tail"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_0, "drop_yellow_prio_0"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_1, "drop_yellow_prio_1"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_2, "drop_yellow_prio_2"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_3, "drop_yellow_prio_3"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_4, "drop_yellow_prio_4"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_5, "drop_yellow_prio_5"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_6, "drop_yellow_prio_6"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_7, "drop_yellow_prio_7"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_0, "drop_green_prio_0"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_1, "drop_green_prio_1"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_2, "drop_green_prio_2"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_3, "drop_green_prio_3"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_4, "drop_green_prio_4"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_5, "drop_green_prio_5"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_6, "drop_green_prio_6"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_7, "drop_green_prio_7")
+
+struct ocelot_stats_region {
+ struct list_head node;
+ u32 base;
+ int count;
+ u32 *buf;
+};
+
enum ocelot_tag_prefix {
OCELOT_TAG_PREFIX_DISABLED = 0,
OCELOT_TAG_PREFIX_NONE,
@@ -402,144 +821,518 @@ enum ocelot_tag_prefix {
struct ocelot;
struct ocelot_ops {
- void (*pcs_init)(struct ocelot *ocelot, int port);
+ struct net_device *(*port_to_netdev)(struct ocelot *ocelot, int port);
+ int (*netdev_to_port)(struct net_device *dev);
int (*reset)(struct ocelot *ocelot);
+ u16 (*wm_enc)(u16 value);
+ u16 (*wm_dec)(u16 value);
+ void (*wm_stat)(u32 val, u32 *inuse, u32 *maxuse);
+ void (*psfp_init)(struct ocelot *ocelot);
+ int (*psfp_filter_add)(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f);
+ int (*psfp_filter_del)(struct ocelot *ocelot, struct flow_cls_offload *f);
+ int (*psfp_stats_get)(struct ocelot *ocelot, struct flow_cls_offload *f,
+ struct flow_stats *stats);
+ void (*cut_through_fwd)(struct ocelot *ocelot);
+ void (*tas_clock_adjust)(struct ocelot *ocelot);
+ void (*update_stats)(struct ocelot *ocelot);
+};
+
+struct ocelot_vcap_policer {
+ struct list_head pol_list;
+ u16 base;
+ u16 max;
+ u16 base2;
+ u16 max2;
+};
+
+struct ocelot_vcap_block {
+ struct list_head rules;
+ int count;
};
+struct ocelot_bridge_vlan {
+ u16 vid;
+ unsigned long portmask;
+ unsigned long untagged;
+ struct list_head list;
+};
+
+enum ocelot_port_tag_config {
+ /* all VLANs are egress-untagged */
+ OCELOT_PORT_TAG_DISABLED = 0,
+ /* all VLANs except the native VLAN and VID 0 are egress-tagged */
+ OCELOT_PORT_TAG_NATIVE = 1,
+ /* all VLANs except VID 0 are egress-tagged */
+ OCELOT_PORT_TAG_TRUNK_NO_VID0 = 2,
+ /* all VLANs are egress-tagged */
+ OCELOT_PORT_TAG_TRUNK = 3,
+};
+
+struct ocelot_psfp_list {
+ struct list_head stream_list;
+ struct list_head sfi_list;
+ struct list_head sgi_list;
+ /* Serialize access to the lists */
+ struct mutex lock;
+};
+
+enum ocelot_sb {
+ OCELOT_SB_BUF,
+ OCELOT_SB_REF,
+ OCELOT_SB_NUM,
+};
+
+enum ocelot_sb_pool {
+ OCELOT_SB_POOL_ING,
+ OCELOT_SB_POOL_EGR,
+ OCELOT_SB_POOL_NUM,
+};
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACv4,
+ ENTRYTYPE_MACv6,
+};
+
+#define OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION BIT(0)
+#define OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP BIT(1)
+
+struct ocelot_lag_fdb {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ struct net_device *bond;
+ struct list_head list;
+};
+
+struct ocelot_mirror {
+ refcount_t refcount;
+ int to;
+};
+
+struct ocelot_port;
+
struct ocelot_port {
struct ocelot *ocelot;
- void __iomem *regs;
+ struct regmap *target;
- /* Ingress default VLAN (pvid) */
- u16 pvid;
+ struct net_device *bond;
+ struct net_device *bridge;
- /* Egress default VLAN (vid) */
- u16 vid;
+ struct ocelot_port *dsa_8021q_cpu;
- u8 ptp_cmd;
+ /* VLAN that untagged frames are classified to, on ingress */
+ const struct ocelot_bridge_vlan *pvid_vlan;
+
+ struct tc_taprio_qopt_offload *taprio;
+
+ phy_interface_t phy_mode;
+
+ unsigned int ptp_skbs_in_flight;
struct sk_buff_head tx_skbs;
+
+ u16 mrp_ring_id;
+
+ u8 ptp_cmd;
u8 ts_id;
- phy_interface_t phy_mode;
+ u8 index;
+
+ u8 stp_state;
+ bool vlan_aware;
+ bool is_dsa_8021q_cpu;
+ bool learn_ena;
+
+ bool lag_tx_active;
+
+ int bridge_num;
+
+ int speed;
};
struct ocelot {
struct device *dev;
+ struct devlink *devlink;
+ struct devlink_port *devlink_ports;
const struct ocelot_ops *ops;
struct regmap *targets[TARGET_MAX];
struct regmap_field *regfields[REGFIELD_MAX];
const u32 *const *map;
const struct ocelot_stat_layout *stats_layout;
- unsigned int num_stats;
-
- int shared_queue_sz;
+ struct list_head stats_regions;
- struct net_device *hw_bridge_dev;
- u16 bridge_mask;
- u16 bridge_fwd_mask;
+ u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
+ int packet_buffer_size;
+ int num_frame_refs;
+ int num_mact_rows;
struct ocelot_port **ports;
u8 base_mac[ETH_ALEN];
- /* Keep track of the vlan port masks */
- u32 vlan_mask[VLAN_N_VID];
+ struct list_head vlans;
+ struct list_head traps;
+ struct list_head lag_fdbs;
+
+ /* Switches like VSC9959 have flooding per traffic class */
+ int num_flooding_pgids;
+ /* In tables like ANA:PORT and the ANA:PGID:PGID mask,
+ * the CPU is located after the physical ports (at the
+ * num_phys_ports index).
+ */
u8 num_phys_ports;
- u8 num_cpu_ports;
- u8 cpu;
- u32 *lags;
+ int npi;
+
+ enum ocelot_tag_prefix npi_inj_prefix;
+ enum ocelot_tag_prefix npi_xtr_prefix;
+
+ unsigned long bridges;
struct list_head multicast;
+ struct list_head pgids;
- /* Workqueue to check statistics for overflow with its lock */
- struct mutex stats_lock;
- u64 *stats;
+ struct list_head dummy_rules;
+ struct ocelot_vcap_block block[3];
+ struct ocelot_vcap_policer vcap_pol;
+ struct vcap_props *vcap;
+ struct ocelot_mirror *mirror;
+
+ struct ocelot_psfp_list psfp;
+
+ /* Workqueue to check statistics for overflow */
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
+ /* Lock for serializing access to the statistics array */
+ spinlock_t stats_lock;
+ u64 *stats;
+
+ /* Lock for serializing indirect access to STAT_VIEW registers */
+ struct mutex stat_view_lock;
+ /* Lock for serializing access to the MAC table */
+ struct mutex mact_lock;
+ /* Lock for serializing forwarding domain changes */
+ struct mutex fwd_domain_lock;
+
+ /* Lock for serializing Time-Aware Shaper changes */
+ struct mutex tas_lock;
+
+ struct workqueue_struct *owq;
u8 ptp:1;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
struct hwtstamp_config hwtstamp_config;
+ unsigned int ptp_skbs_in_flight;
+ /* Protects the 2-step TX timestamp ID logic */
+ spinlock_t ts_id_lock;
/* Protects the PTP interface state */
struct mutex ptp_lock;
/* Protects the PTP clock */
spinlock_t ptp_clock_lock;
+ struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM];
- void (*port_pcs_init)(struct ocelot_port *port);
+ struct ocelot_fdma *fdma;
};
-#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
-#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
-#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
+struct ocelot_policer {
+ u32 rate; /* kilobit per second */
+ u32 burst; /* bytes */
+};
+
+#define ocelot_bulk_read(ocelot, reg, buf, count) \
+ __ocelot_bulk_read_ix(ocelot, reg, 0, buf, count)
+
+#define ocelot_read_ix(ocelot, reg, gi, ri) \
+ __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_read_gix(ocelot, reg, gi) \
+ __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
+#define ocelot_read_rix(ocelot, reg, ri) \
+ __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
+#define ocelot_read(ocelot, reg) \
+ __ocelot_read_ix(ocelot, reg, 0)
-#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
-#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
+#define ocelot_write_ix(ocelot, val, reg, gi, ri) \
+ __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_write_gix(ocelot, val, reg, gi) \
+ __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
+#define ocelot_write_rix(ocelot, val, reg, ri) \
+ __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
-#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
-#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
+#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) \
+ __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_rmw_gix(ocelot, val, m, reg, gi) \
+ __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
+#define ocelot_rmw_rix(ocelot, val, m, reg, ri) \
+ __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
+#define ocelot_field_write(ocelot, reg, val) \
+ regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) \
+ regmap_field_read((ocelot)->regfields[(reg)], (val))
+#define ocelot_fields_write(ocelot, id, reg, val) \
+ regmap_fields_write((ocelot)->regfields[(reg)], (id), (val))
+#define ocelot_fields_read(ocelot, id, reg, val) \
+ regmap_fields_read((ocelot)->regfields[(reg)], (id), (val))
+
+#define ocelot_target_read_ix(ocelot, target, reg, gi, ri) \
+ __ocelot_target_read_ix(ocelot, target, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_target_read_gix(ocelot, target, reg, gi) \
+ __ocelot_target_read_ix(ocelot, target, reg, reg##_GSZ * (gi))
+#define ocelot_target_read_rix(ocelot, target, reg, ri) \
+ __ocelot_target_read_ix(ocelot, target, reg, reg##_RSZ * (ri))
+#define ocelot_target_read(ocelot, target, reg) \
+ __ocelot_target_read_ix(ocelot, target, reg, 0)
+
+#define ocelot_target_write_ix(ocelot, target, val, reg, gi, ri) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_target_write_gix(ocelot, target, val, reg, gi) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, reg##_GSZ * (gi))
+#define ocelot_target_write_rix(ocelot, target, val, reg, ri) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, reg##_RSZ * (ri))
+#define ocelot_target_write(ocelot, target, val, reg) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, 0)
+
/* I/O */
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg);
+int __ocelot_bulk_read_ix(struct ocelot *ocelot, u32 reg, u32 offset, void *buf,
+ int count);
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
u32 offset);
+u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 reg, u32 offset);
+void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 val, u32 reg, u32 offset);
+
+/* Packet I/O */
+bool ocelot_can_inject(struct ocelot *ocelot, int grp);
+void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
+ u32 rew_op, struct sk_buff *skb);
+void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag);
+int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
+void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
+void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
+ u64 timestamp);
/* Hardware initialization */
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction);
int ocelot_init(struct ocelot *ocelot);
void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
+void ocelot_deinit_port(struct ocelot *ocelot, int port);
+
+void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
+void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
+void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, int cpu);
+void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port);
+u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port);
/* DSA callbacks */
-void ocelot_port_enable(struct ocelot *ocelot, int port,
- struct phy_device *phy);
-void ocelot_port_disable(struct ocelot *ocelot, int port);
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data);
void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data);
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
+void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
+ struct rtnl_link_stats64 *stats);
+void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
+ struct ethtool_pause_stats *pause_stats);
+void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_mac_stats *mac_stats);
+void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_phy_stats *phy_stats);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info);
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
-void ocelot_adjust_link(struct ocelot *ocelot, int port,
- struct phy_device *phydev);
-void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware);
+int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
+ struct netlink_ext_ack *extack);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
+u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port);
+int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags val);
+void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags val);
+int ocelot_port_get_default_prio(struct ocelot *ocelot, int port);
+int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio);
+int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp);
+int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio);
+int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio);
int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
- struct net_device *bridge);
-int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
- struct net_device *bridge);
+ struct net_device *bridge, int bridge_num,
+ struct netlink_ext_ack *extack);
+void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge);
+int ocelot_mact_flush(struct ocelot *ocelot, int port);
int ocelot_fdb_dump(struct ocelot *ocelot, int port,
dsa_fdb_dump_cb_t *cb, void *data);
-int ocelot_fdb_add(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid, bool vlan_aware);
-int ocelot_fdb_del(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid);
+int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge);
+int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge);
+int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge);
+int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge);
+int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged, struct netlink_ext_ack *extack);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged);
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid);
int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr);
int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr);
-int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
-int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
- struct sk_buff *skb);
+int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
+ struct sk_buff *skb,
+ struct sk_buff **clone);
void ocelot_get_txtstamp(struct ocelot *ocelot);
+void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu);
+int ocelot_get_max_mtu(struct ocelot *ocelot, int port);
+int ocelot_port_policer_add(struct ocelot *ocelot, int port,
+ struct ocelot_policer *pol);
+int ocelot_port_policer_del(struct ocelot *ocelot, int port);
+int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
+ bool ingress, struct netlink_ext_ack *extack);
+void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress);
+int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress);
+int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress);
+int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress);
+int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge);
+int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge);
+int ocelot_port_lag_join(struct ocelot *ocelot, int port,
+ struct net_device *bond,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
+ struct net_device *bond);
+void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active);
+int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond);
+
+int ocelot_devlink_sb_register(struct ocelot *ocelot);
+void ocelot_devlink_sb_unregister(struct ocelot *ocelot);
+int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
+int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack);
+int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
+int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index);
+int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index);
+int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
+
+void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ unsigned long quirks);
+void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
+ struct phy_device *phydev,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause,
+ unsigned long quirks);
+
+int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type *type);
+int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type,
+ int sfid, int ssid);
+
+int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask,
+ unsigned long to_mask);
+
+int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol);
+int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix);
+
+#if IS_ENABLED(CONFIG_BRIDGE_MRP)
+int ocelot_mrp_add(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp);
+int ocelot_mrp_del(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp);
+int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+#else
+static inline int ocelot_mrp_add(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ocelot_mrp_del(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ return -EOPNOTSUPP;
+}
+#endif
#endif
diff --git a/include/soc/mscc/ocelot_ana.h b/include/soc/mscc/ocelot_ana.h
index 841c6ec22b64..67e0ae05a5ab 100644
--- a/include/soc/mscc/ocelot_ana.h
+++ b/include/soc/mscc/ocelot_ana.h
@@ -227,6 +227,11 @@
#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
+#define SFIDACCESS_CMD_IDLE 0
+#define SFIDACCESS_CMD_READ 1
+#define SFIDACCESS_CMD_WRITE 2
+#define SFIDACCESS_CMD_INIT 3
+
#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
@@ -252,10 +257,15 @@
#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24))
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24)
-#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 21) & GENMASK(24, 21))
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(24, 21)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(24, 21)) >> 21)
+#define ANA_SG_CONFIG_REG_3_IPV_VALID BIT(24)
+#define ANA_SG_CONFIG_REG_3_IPV_INVALID(x) (((x) << 24) & GENMASK(24, 24))
+#define ANA_SG_CONFIG_REG_3_INIT_IPV(x) (((x) << 21) & GENMASK(23, 21))
+#define ANA_SG_CONFIG_REG_3_INIT_IPV_M GENMASK(23, 21)
+#define ANA_SG_CONFIG_REG_3_INIT_IPV_X(x) (((x) & GENMASK(23, 21)) >> 21)
+#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(25)
#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h
index 7c08437061fc..0c6021f02fee 100644
--- a/include/soc/mscc/ocelot_dev.h
+++ b/include/soc/mscc/ocelot_dev.h
@@ -8,8 +8,6 @@
#ifndef _MSCC_OCELOT_DEV_H_
#define _MSCC_OCELOT_DEV_H_
-#define DEV_CLOCK_CFG 0x0
-
#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
@@ -19,18 +17,12 @@
#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
-#define DEV_PORT_MISC 0x4
-
#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4)
#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3)
#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2)
#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1)
#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0)
-#define DEV_EVENTS 0x8
-
-#define DEV_EEE_CFG 0xc
-
#define DEV_EEE_CFG_EEE_ENA BIT(22)
#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
@@ -43,33 +35,19 @@
#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
#define DEV_EEE_CFG_PORT_LPI BIT(0)
-#define DEV_RX_PATH_DELAY 0x10
-
-#define DEV_TX_PATH_DELAY 0x14
-
-#define DEV_PTP_PREDICT_CFG 0x18
-
#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4))
#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4)
#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4)
#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0))
#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0)
-#define DEV_MAC_ENA_CFG 0x1c
-
#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
-#define DEV_MAC_MODE_CFG 0x20
-
#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0)
-#define DEV_MAC_MAXLEN_CFG 0x24
-
-#define DEV_MAC_TAGS_CFG 0x28
-
#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
@@ -77,12 +55,8 @@
#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1)
#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
-#define DEV_MAC_ADV_CHK_CFG 0x2c
-
#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
-#define DEV_MAC_IFG_CFG 0x30
-
#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
@@ -94,8 +68,6 @@
#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
-#define DEV_MAC_HDX_CFG 0x34
-
#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
#define DEV_MAC_HDX_CFG_OB_ENA BIT(25)
#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24)
@@ -107,17 +79,9 @@
#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
-#define DEV_MAC_DBG_CFG 0x38
-
#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4)
#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
-#define DEV_MAC_FC_MAC_LOW_CFG 0x3c
-
-#define DEV_MAC_FC_MAC_HIGH_CFG 0x40
-
-#define DEV_MAC_STICKY 0x44
-
#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
@@ -129,25 +93,17 @@
#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
-#define PCS1G_CFG 0x48
-
#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
#define PCS1G_CFG_PCS_ENA BIT(0)
-#define PCS1G_MODE_CFG 0x4c
-
#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
-#define PCS1G_SD_CFG 0x50
-
#define PCS1G_SD_CFG_SD_SEL BIT(8)
#define PCS1G_SD_CFG_SD_POL BIT(4)
#define PCS1G_SD_CFG_SD_ENA BIT(0)
-#define PCS1G_ANEG_CFG 0x54
-
#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16)
#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
@@ -155,29 +111,19 @@
#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
-#define PCS1G_ANEG_NP_CFG 0x58
-
#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16))
#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16)
#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16)
#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0)
-#define PCS1G_LB_CFG 0x5c
-
#define PCS1G_LB_CFG_RA_ENA BIT(4)
#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
-#define PCS1G_DBG_CFG 0x60
-
#define PCS1G_DBG_CFG_UDLT BIT(0)
-#define PCS1G_CDET_CFG 0x64
-
#define PCS1G_CDET_CFG_CDET_ENA BIT(0)
-#define PCS1G_ANEG_STATUS 0x68
-
#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16)
#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
@@ -185,10 +131,6 @@
#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
-#define PCS1G_ANEG_NP_STATUS 0x6c
-
-#define PCS1G_LINK_STATUS 0x70
-
#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12))
#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12)
#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12)
@@ -196,17 +138,9 @@
#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
-#define PCS1G_LINK_DOWN_CNT 0x74
-
-#define PCS1G_STICKY 0x78
-
#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
-#define PCS1G_DEBUG_STATUS 0x7c
-
-#define PCS1G_LPI_CFG 0x80
-
#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20)
#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17)
#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16)
@@ -215,10 +149,6 @@
#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4)
#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0)
-#define PCS1G_LPI_WAKE_ERROR_CNT 0x84
-
-#define PCS1G_LPI_STATUS 0x88
-
#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16)
#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12)
#define PCS1G_LPI_STATUS_RX_QUIET BIT(9)
@@ -227,18 +157,12 @@
#define PCS1G_LPI_STATUS_TX_QUIET BIT(1)
#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0)
-#define PCS1G_TSTPAT_MODE_CFG 0x8c
-
-#define PCS1G_TSTPAT_STATUS 0x90
-
#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8))
#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8)
#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8)
#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4)
#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0)
-#define DEV_PCS_FX100_CFG 0x94
-
#define DEV_PCS_FX100_CFG_SD_SEL BIT(26)
#define DEV_PCS_FX100_CFG_SD_POL BIT(25)
#define DEV_PCS_FX100_CFG_SD_ENA BIT(24)
@@ -259,8 +183,6 @@
#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0)
-#define DEV_PCS_FX100_STATUS 0x98
-
#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8))
#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8)
#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8)
diff --git a/include/soc/mscc/ocelot_ptp.h b/include/soc/mscc/ocelot_ptp.h
new file mode 100644
index 000000000000..f085884b1fa2
--- /dev/null
+++ b/include/soc/mscc/ocelot_ptp.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * License: Dual MIT/GPL
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright 2020 NXP
+ */
+
+#ifndef _MSCC_OCELOT_PTP_H_
+#define _MSCC_OCELOT_PTP_H_
+
+#include <linux/ptp_clock_kernel.h>
+#include <soc/mscc/ocelot.h>
+
+#define OCELOT_MAX_PTP_ID 63
+#define OCELOT_PTP_FIFO_SIZE 128
+
+#define PTP_PIN_CFG_RSZ 0x20
+#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_TOD_NSEC_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_WF_HIGH_PERIOD_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_WF_LOW_PERIOD_RSZ PTP_PIN_CFG_RSZ
+
+#define PTP_PIN_CFG_DOM BIT(0)
+#define PTP_PIN_CFG_SYNC BIT(2)
+#define PTP_PIN_CFG_ACTION(x) ((x) << 3)
+#define PTP_PIN_CFG_ACTION_MASK PTP_PIN_CFG_ACTION(0x7)
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_NOSYNC,
+ PTP_PIN_ACTION_SYNC,
+};
+
+#define PTP_CFG_MISC_PTP_EN BIT(2)
+
+#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0)
+#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1)
+
+#define PTP_CFG_CLK_ADJ_FREQ_NS BIT(30)
+
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts);
+int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta);
+int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm);
+int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan);
+int ocelot_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+int ocelot_init_timestamp(struct ocelot *ocelot,
+ const struct ptp_clock_info *info);
+int ocelot_deinit_timestamp(struct ocelot *ocelot);
+#endif
diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h
index d8c63aa761be..9731895be643 100644
--- a/include/soc/mscc/ocelot_qsys.h
+++ b/include/soc/mscc/ocelot_qsys.h
@@ -13,19 +13,6 @@
#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0)
-#define QSYS_SWITCH_PORT_MODE_RSZ 0x4
-
-#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11))
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10)
-#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1))
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0)
-
#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5)
#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4)
#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3)
@@ -84,11 +71,8 @@
#define QSYS_RES_STAT_GSZ 0x8
-#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
-#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
-#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
-#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
-#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
+#define QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(x) ((x) & GENMASK(15, 0))
+#define QSYS_MMGT_EQ_CTRL_FP_FREE_CNT_M GENMASK(15, 0)
#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
diff --git a/include/soc/mscc/ocelot_sys.h b/include/soc/mscc/ocelot_sys.h
index 16f91e172bcb..79cf40ccdbe6 100644
--- a/include/soc/mscc/ocelot_sys.h
+++ b/include/soc/mscc/ocelot_sys.h
@@ -12,19 +12,6 @@
#define SYS_COUNT_TX_OCTETS_RSZ 0x4
-#define SYS_PORT_MODE_RSZ 0x4
-
-#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5))
-#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5)
-#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5)
-#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3))
-#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3)
-#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3)
-#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1))
-#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1)
-#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1)
-#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0)
-
#define SYS_FRONT_PORT_MODE_RSZ 0x4
#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0)
@@ -56,16 +43,6 @@
#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0))
#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0)
-#define SYS_PAUSE_CFG_RSZ 0x4
-
-#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10))
-#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10)
-#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10)
-#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1))
-#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1)
-#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1)
-#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
-
#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9))
#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9)
#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9)
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
new file mode 100644
index 000000000000..c601a4598b0d
--- /dev/null
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -0,0 +1,731 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _OCELOT_VCAP_H_
+#define _OCELOT_VCAP_H_
+
+#include <soc/mscc/ocelot.h>
+
+/* Cookie definitions for private VCAP filters installed by the driver.
+ * Must be unique per VCAP block.
+ */
+#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port))
+#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
+#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
+#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
+#define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2)
+#define OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 1)
+#define OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 2)
+#define OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 3)
+#define OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 4)
+#define OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 5)
+
+/* =================================================================
+ * VCAP Common
+ * =================================================================
+ */
+
+enum {
+ VCAP_ES0,
+ VCAP_IS1,
+ VCAP_IS2,
+ __VCAP_COUNT,
+};
+
+#define OCELOT_NUM_VCAP_BLOCKS __VCAP_COUNT
+
+struct vcap_props {
+ u16 tg_width; /* Type-group width (in bits) */
+ u16 sw_count; /* Sub word count */
+ u16 entry_count; /* Entry count */
+ u16 entry_words; /* Number of entry words */
+ u16 entry_width; /* Entry width (in bits) */
+ u16 action_count; /* Action count */
+ u16 action_words; /* Number of action words */
+ u16 action_width; /* Action width (in bits) */
+ u16 action_type_width; /* Action type width (in bits) */
+ struct {
+ u16 width; /* Action type width (in bits) */
+ u16 count; /* Action type sub word count */
+ } action_table[2];
+ u16 counter_words; /* Number of counter words */
+ u16 counter_width; /* Counter width (in bits) */
+
+ enum ocelot_target target;
+
+ const struct vcap_field *keys;
+ const struct vcap_field *actions;
+};
+
+/* VCAP Type-Group values */
+#define VCAP_TG_NONE 0 /* Entry is invalid */
+#define VCAP_TG_FULL 1 /* Full entry */
+#define VCAP_TG_HALF 2 /* Half entry */
+#define VCAP_TG_QUARTER 3 /* Quarter entry */
+
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CMD(x) (((x) << 22) & GENMASK(24, 22))
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CMD_M GENMASK(24, 22)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CMD_X(x) (((x) & GENMASK(24, 22)) >> 22)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR(x) (((x) << 3) & GENMASK(18, 3))
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR_M GENMASK(18, 3)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR_X(x) (((x) & GENMASK(18, 3)) >> 3)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_CORE_UPDATE_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_CORE_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
+
+#define VCAP_CORE_MV_CFG_MV_NUM_POS(x) (((x) << 16) & GENMASK(31, 16))
+#define VCAP_CORE_MV_CFG_MV_NUM_POS_M GENMASK(31, 16)
+#define VCAP_CORE_MV_CFG_MV_NUM_POS_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define VCAP_CORE_MV_CFG_MV_SIZE(x) ((x) & GENMASK(15, 0))
+#define VCAP_CORE_MV_CFG_MV_SIZE_M GENMASK(15, 0)
+
+#define VCAP_CACHE_ENTRY_DAT_RSZ 0x4
+
+#define VCAP_CACHE_MASK_DAT_RSZ 0x4
+
+#define VCAP_CACHE_ACTION_DAT_RSZ 0x4
+
+#define VCAP_CACHE_CNT_DAT_RSZ 0x4
+
+#define VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+
+#define TCAM_BIST_CTRL_TCAM_BIST BIT(1)
+#define TCAM_BIST_CTRL_TCAM_INIT BIT(0)
+
+#define TCAM_BIST_CFG_TCAM_BIST_SOE_ENA BIT(8)
+#define TCAM_BIST_CFG_TCAM_HCG_DIS BIT(7)
+#define TCAM_BIST_CFG_TCAM_CG_DIS BIT(6)
+#define TCAM_BIST_CFG_TCAM_BIAS(x) ((x) & GENMASK(5, 0))
+#define TCAM_BIST_CFG_TCAM_BIAS_M GENMASK(5, 0)
+
+#define TCAM_BIST_STAT_BIST_RT_ERR BIT(15)
+#define TCAM_BIST_STAT_BIST_PENC_ERR BIT(14)
+#define TCAM_BIST_STAT_BIST_COMP_ERR BIT(13)
+#define TCAM_BIST_STAT_BIST_ADDR_ERR BIT(12)
+#define TCAM_BIST_STAT_BIST_BL1E_ERR BIT(11)
+#define TCAM_BIST_STAT_BIST_BL1_ERR BIT(10)
+#define TCAM_BIST_STAT_BIST_BL0E_ERR BIT(9)
+#define TCAM_BIST_STAT_BIST_BL0_ERR BIT(8)
+#define TCAM_BIST_STAT_BIST_PH1_ERR BIT(7)
+#define TCAM_BIST_STAT_BIST_PH0_ERR BIT(6)
+#define TCAM_BIST_STAT_BIST_PV1_ERR BIT(5)
+#define TCAM_BIST_STAT_BIST_PV0_ERR BIT(4)
+#define TCAM_BIST_STAT_BIST_RUN BIT(3)
+#define TCAM_BIST_STAT_BIST_ERR BIT(2)
+#define TCAM_BIST_STAT_BIST_BUSY BIT(1)
+#define TCAM_BIST_STAT_TCAM_RDY BIT(0)
+
+/* =================================================================
+ * VCAP IS2
+ * =================================================================
+ */
+
+/* IS2 half key types */
+#define IS2_TYPE_ETYPE 0
+#define IS2_TYPE_LLC 1
+#define IS2_TYPE_SNAP 2
+#define IS2_TYPE_ARP 3
+#define IS2_TYPE_IP_UDP_TCP 4
+#define IS2_TYPE_IP_OTHER 5
+#define IS2_TYPE_IPV6 6
+#define IS2_TYPE_OAM 7
+#define IS2_TYPE_SMAC_SIP6 8
+#define IS2_TYPE_ANY 100 /* Pseudo type */
+
+/* IS2 half key type mask for matching any IP */
+#define IS2_TYPE_MASK_IP_ANY 0xe
+
+enum {
+ IS2_ACTION_TYPE_NORMAL,
+ IS2_ACTION_TYPE_SMAC_SIP,
+ IS2_ACTION_TYPE_MAX,
+};
+
+/* IS2 MASK_MODE values */
+#define IS2_ACT_MASK_MODE_NONE 0
+#define IS2_ACT_MASK_MODE_FILTER 1
+#define IS2_ACT_MASK_MODE_POLICY 2
+#define IS2_ACT_MASK_MODE_REDIR 3
+
+/* IS2 REW_OP values */
+#define IS2_ACT_REW_OP_NONE 0
+#define IS2_ACT_REW_OP_PTP_ONE 2
+#define IS2_ACT_REW_OP_PTP_TWO 3
+#define IS2_ACT_REW_OP_SPECIAL 8
+#define IS2_ACT_REW_OP_PTP_ORG 9
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_1 (IS2_ACT_REW_OP_PTP_ONE | (1 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_2 (IS2_ACT_REW_OP_PTP_ONE | (2 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_DELAY (IS2_ACT_REW_OP_PTP_ONE | (1 << 5))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_SUB BIT(7)
+
+#define VCAP_PORT_WIDTH 4
+
+/* IS2 quarter key - SMAC_SIP4 */
+#define IS2_QKO_IGR_PORT 0
+#define IS2_QKL_IGR_PORT VCAP_PORT_WIDTH
+#define IS2_QKO_L2_SMAC (IS2_QKO_IGR_PORT + IS2_QKL_IGR_PORT)
+#define IS2_QKL_L2_SMAC 48
+#define IS2_QKO_L3_IP4_SIP (IS2_QKO_L2_SMAC + IS2_QKL_L2_SMAC)
+#define IS2_QKL_L3_IP4_SIP 32
+
+enum vcap_is2_half_key_field {
+ /* Common */
+ VCAP_IS2_TYPE,
+ VCAP_IS2_HK_FIRST,
+ VCAP_IS2_HK_PAG,
+ VCAP_IS2_HK_RSV1,
+ VCAP_IS2_HK_IGR_PORT_MASK,
+ VCAP_IS2_HK_RSV2,
+ VCAP_IS2_HK_HOST_MATCH,
+ VCAP_IS2_HK_L2_MC,
+ VCAP_IS2_HK_L2_BC,
+ VCAP_IS2_HK_VLAN_TAGGED,
+ VCAP_IS2_HK_VID,
+ VCAP_IS2_HK_DEI,
+ VCAP_IS2_HK_PCP,
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ VCAP_IS2_HK_L2_DMAC,
+ VCAP_IS2_HK_L2_SMAC,
+ /* MAC_ETYPE (TYPE=000) */
+ VCAP_IS2_HK_MAC_ETYPE_ETYPE,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
+ /* MAC_LLC (TYPE=001) */
+ VCAP_IS2_HK_MAC_LLC_DMAC,
+ VCAP_IS2_HK_MAC_LLC_SMAC,
+ VCAP_IS2_HK_MAC_LLC_L2_LLC,
+ /* MAC_SNAP (TYPE=010) */
+ VCAP_IS2_HK_MAC_SNAP_SMAC,
+ VCAP_IS2_HK_MAC_SNAP_DMAC,
+ VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
+ /* MAC_ARP (TYPE=011) */
+ VCAP_IS2_HK_MAC_ARP_SMAC,
+ VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
+ VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
+ VCAP_IS2_HK_MAC_ARP_LEN_OK,
+ VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
+ VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
+ VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
+ VCAP_IS2_HK_MAC_ARP_OPCODE,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
+ VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ VCAP_IS2_HK_IP4,
+ VCAP_IS2_HK_L3_FRAGMENT,
+ VCAP_IS2_HK_L3_FRAG_OFS_GT0,
+ VCAP_IS2_HK_L3_OPTIONS,
+ VCAP_IS2_HK_IP4_L3_TTL_GT0,
+ VCAP_IS2_HK_L3_TOS,
+ VCAP_IS2_HK_L3_IP4_DIP,
+ VCAP_IS2_HK_L3_IP4_SIP,
+ VCAP_IS2_HK_DIP_EQ_SIP,
+ /* IP4_TCP_UDP (TYPE=100) */
+ VCAP_IS2_HK_TCP,
+ VCAP_IS2_HK_L4_SPORT,
+ VCAP_IS2_HK_L4_DPORT,
+ VCAP_IS2_HK_L4_RNG,
+ VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
+ VCAP_IS2_HK_L4_SEQUENCE_EQ0,
+ VCAP_IS2_HK_L4_URG,
+ VCAP_IS2_HK_L4_ACK,
+ VCAP_IS2_HK_L4_PSH,
+ VCAP_IS2_HK_L4_RST,
+ VCAP_IS2_HK_L4_SYN,
+ VCAP_IS2_HK_L4_FIN,
+ VCAP_IS2_HK_L4_1588_DOM,
+ VCAP_IS2_HK_L4_1588_VER,
+ /* IP4_OTHER (TYPE=101) */
+ VCAP_IS2_HK_IP4_L3_PROTO,
+ VCAP_IS2_HK_L3_PAYLOAD,
+ /* IP6_STD (TYPE=110) */
+ VCAP_IS2_HK_IP6_L3_TTL_GT0,
+ VCAP_IS2_HK_IP6_L3_PROTO,
+ VCAP_IS2_HK_L3_IP6_SIP,
+ /* OAM (TYPE=111) */
+ VCAP_IS2_HK_OAM_MEL_FLAGS,
+ VCAP_IS2_HK_OAM_VER,
+ VCAP_IS2_HK_OAM_OPCODE,
+ VCAP_IS2_HK_OAM_FLAGS,
+ VCAP_IS2_HK_OAM_MEPID,
+ VCAP_IS2_HK_OAM_CCM_CNTS_EQ0,
+ VCAP_IS2_HK_OAM_IS_Y1731,
+};
+
+struct vcap_field {
+ int offset;
+ int length;
+};
+
+enum vcap_is2_action_field {
+ VCAP_IS2_ACT_HIT_ME_ONCE,
+ VCAP_IS2_ACT_CPU_COPY_ENA,
+ VCAP_IS2_ACT_CPU_QU_NUM,
+ VCAP_IS2_ACT_MASK_MODE,
+ VCAP_IS2_ACT_MIRROR_ENA,
+ VCAP_IS2_ACT_LRN_DIS,
+ VCAP_IS2_ACT_POLICE_ENA,
+ VCAP_IS2_ACT_POLICE_IDX,
+ VCAP_IS2_ACT_POLICE_VCAP_ONLY,
+ VCAP_IS2_ACT_PORT_MASK,
+ VCAP_IS2_ACT_REW_OP,
+ VCAP_IS2_ACT_SMAC_REPLACE_ENA,
+ VCAP_IS2_ACT_RSV,
+ VCAP_IS2_ACT_ACL_ID,
+ VCAP_IS2_ACT_HIT_CNT,
+};
+
+/* =================================================================
+ * VCAP IS1
+ * =================================================================
+ */
+
+/* IS1 half key types */
+#define IS1_TYPE_S1_NORMAL 0
+#define IS1_TYPE_S1_5TUPLE_IP4 1
+
+/* IS1 full key types */
+#define IS1_TYPE_S1_NORMAL_IP6 0
+#define IS1_TYPE_S1_7TUPLE 1
+#define IS2_TYPE_S1_5TUPLE_IP6 2
+
+enum {
+ IS1_ACTION_TYPE_NORMAL,
+ IS1_ACTION_TYPE_MAX,
+};
+
+enum vcap_is1_half_key_field {
+ VCAP_IS1_HK_TYPE,
+ VCAP_IS1_HK_LOOKUP,
+ VCAP_IS1_HK_IGR_PORT_MASK,
+ VCAP_IS1_HK_RSV,
+ VCAP_IS1_HK_OAM_Y1731,
+ VCAP_IS1_HK_L2_MC,
+ VCAP_IS1_HK_L2_BC,
+ VCAP_IS1_HK_IP_MC,
+ VCAP_IS1_HK_VLAN_TAGGED,
+ VCAP_IS1_HK_VLAN_DBL_TAGGED,
+ VCAP_IS1_HK_TPID,
+ VCAP_IS1_HK_VID,
+ VCAP_IS1_HK_DEI,
+ VCAP_IS1_HK_PCP,
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ VCAP_IS1_HK_L2_SMAC,
+ VCAP_IS1_HK_ETYPE_LEN,
+ VCAP_IS1_HK_ETYPE,
+ VCAP_IS1_HK_IP_SNAP,
+ VCAP_IS1_HK_IP4,
+ VCAP_IS1_HK_L3_FRAGMENT,
+ VCAP_IS1_HK_L3_FRAG_OFS_GT0,
+ VCAP_IS1_HK_L3_OPTIONS,
+ VCAP_IS1_HK_L3_DSCP,
+ VCAP_IS1_HK_L3_IP4_SIP,
+ VCAP_IS1_HK_TCP_UDP,
+ VCAP_IS1_HK_TCP,
+ VCAP_IS1_HK_L4_SPORT,
+ VCAP_IS1_HK_L4_RNG,
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ VCAP_IS1_HK_IP4_INNER_TPID,
+ VCAP_IS1_HK_IP4_INNER_VID,
+ VCAP_IS1_HK_IP4_INNER_DEI,
+ VCAP_IS1_HK_IP4_INNER_PCP,
+ VCAP_IS1_HK_IP4_IP4,
+ VCAP_IS1_HK_IP4_L3_FRAGMENT,
+ VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0,
+ VCAP_IS1_HK_IP4_L3_OPTIONS,
+ VCAP_IS1_HK_IP4_L3_DSCP,
+ VCAP_IS1_HK_IP4_L3_IP4_DIP,
+ VCAP_IS1_HK_IP4_L3_IP4_SIP,
+ VCAP_IS1_HK_IP4_L3_PROTO,
+ VCAP_IS1_HK_IP4_TCP_UDP,
+ VCAP_IS1_HK_IP4_TCP,
+ VCAP_IS1_HK_IP4_L4_RNG,
+ VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE,
+};
+
+enum vcap_is1_action_field {
+ VCAP_IS1_ACT_DSCP_ENA,
+ VCAP_IS1_ACT_DSCP_VAL,
+ VCAP_IS1_ACT_QOS_ENA,
+ VCAP_IS1_ACT_QOS_VAL,
+ VCAP_IS1_ACT_DP_ENA,
+ VCAP_IS1_ACT_DP_VAL,
+ VCAP_IS1_ACT_PAG_OVERRIDE_MASK,
+ VCAP_IS1_ACT_PAG_VAL,
+ VCAP_IS1_ACT_RSV,
+ VCAP_IS1_ACT_VID_REPLACE_ENA,
+ VCAP_IS1_ACT_VID_ADD_VAL,
+ VCAP_IS1_ACT_FID_SEL,
+ VCAP_IS1_ACT_FID_VAL,
+ VCAP_IS1_ACT_PCP_DEI_ENA,
+ VCAP_IS1_ACT_PCP_VAL,
+ VCAP_IS1_ACT_DEI_VAL,
+ VCAP_IS1_ACT_VLAN_POP_CNT_ENA,
+ VCAP_IS1_ACT_VLAN_POP_CNT,
+ VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA,
+ VCAP_IS1_ACT_HIT_STICKY,
+};
+
+/* =================================================================
+ * VCAP ES0
+ * =================================================================
+ */
+
+enum {
+ ES0_ACTION_TYPE_NORMAL,
+ ES0_ACTION_TYPE_MAX,
+};
+
+enum vcap_es0_key_field {
+ VCAP_ES0_EGR_PORT,
+ VCAP_ES0_IGR_PORT,
+ VCAP_ES0_RSV,
+ VCAP_ES0_L2_MC,
+ VCAP_ES0_L2_BC,
+ VCAP_ES0_VID,
+ VCAP_ES0_DP,
+ VCAP_ES0_PCP,
+};
+
+enum vcap_es0_action_field {
+ VCAP_ES0_ACT_PUSH_OUTER_TAG,
+ VCAP_ES0_ACT_PUSH_INNER_TAG,
+ VCAP_ES0_ACT_TAG_A_TPID_SEL,
+ VCAP_ES0_ACT_TAG_A_VID_SEL,
+ VCAP_ES0_ACT_TAG_A_PCP_SEL,
+ VCAP_ES0_ACT_TAG_A_DEI_SEL,
+ VCAP_ES0_ACT_TAG_B_TPID_SEL,
+ VCAP_ES0_ACT_TAG_B_VID_SEL,
+ VCAP_ES0_ACT_TAG_B_PCP_SEL,
+ VCAP_ES0_ACT_TAG_B_DEI_SEL,
+ VCAP_ES0_ACT_VID_A_VAL,
+ VCAP_ES0_ACT_PCP_A_VAL,
+ VCAP_ES0_ACT_DEI_A_VAL,
+ VCAP_ES0_ACT_VID_B_VAL,
+ VCAP_ES0_ACT_PCP_B_VAL,
+ VCAP_ES0_ACT_DEI_B_VAL,
+ VCAP_ES0_ACT_RSV,
+ VCAP_ES0_ACT_HIT_STICKY,
+};
+
+struct ocelot_ipv4 {
+ u8 addr[4];
+};
+
+enum ocelot_vcap_bit {
+ OCELOT_VCAP_BIT_ANY,
+ OCELOT_VCAP_BIT_0,
+ OCELOT_VCAP_BIT_1
+};
+
+struct ocelot_vcap_u8 {
+ u8 value[1];
+ u8 mask[1];
+};
+
+struct ocelot_vcap_u16 {
+ u8 value[2];
+ u8 mask[2];
+};
+
+struct ocelot_vcap_u24 {
+ u8 value[3];
+ u8 mask[3];
+};
+
+struct ocelot_vcap_u32 {
+ u8 value[4];
+ u8 mask[4];
+};
+
+struct ocelot_vcap_u40 {
+ u8 value[5];
+ u8 mask[5];
+};
+
+struct ocelot_vcap_u48 {
+ u8 value[6];
+ u8 mask[6];
+};
+
+struct ocelot_vcap_u64 {
+ u8 value[8];
+ u8 mask[8];
+};
+
+struct ocelot_vcap_u128 {
+ u8 value[16];
+ u8 mask[16];
+};
+
+struct ocelot_vcap_vid {
+ u16 value;
+ u16 mask;
+};
+
+struct ocelot_vcap_ipv4 {
+ struct ocelot_ipv4 value;
+ struct ocelot_ipv4 mask;
+};
+
+struct ocelot_vcap_udp_tcp {
+ u16 value;
+ u16 mask;
+};
+
+struct ocelot_vcap_port {
+ u8 value;
+ u8 mask;
+};
+
+enum ocelot_vcap_key_type {
+ OCELOT_VCAP_KEY_ANY,
+ OCELOT_VCAP_KEY_ETYPE,
+ OCELOT_VCAP_KEY_LLC,
+ OCELOT_VCAP_KEY_SNAP,
+ OCELOT_VCAP_KEY_ARP,
+ OCELOT_VCAP_KEY_IPV4,
+ OCELOT_VCAP_KEY_IPV6
+};
+
+struct ocelot_vcap_key_vlan {
+ struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */
+ struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
+ enum ocelot_vcap_bit dei; /* DEI */
+ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
+};
+
+struct ocelot_vcap_key_etype {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+ struct ocelot_vcap_u16 etype;
+ struct ocelot_vcap_u16 data; /* MAC data */
+};
+
+struct ocelot_vcap_key_llc {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* LLC header: DSAP at byte 0, SSAP at byte 1, Control at byte 2 */
+ struct ocelot_vcap_u32 llc;
+};
+
+struct ocelot_vcap_key_snap {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* SNAP header: Organization Code at byte 0, Type at byte 3 */
+ struct ocelot_vcap_u40 snap;
+};
+
+struct ocelot_vcap_key_arp {
+ struct ocelot_vcap_u48 smac;
+ enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */
+ enum ocelot_vcap_bit req; /* Opcode request/reply */
+ enum ocelot_vcap_bit unknown; /* Opcode unknown */
+ enum ocelot_vcap_bit smac_match; /* Sender MAC matches SMAC */
+ enum ocelot_vcap_bit dmac_match; /* Target MAC matches DMAC */
+
+ /**< Protocol addr. length 4, hardware length 6 */
+ enum ocelot_vcap_bit length;
+
+ enum ocelot_vcap_bit ip; /* Protocol address type IP */
+ enum ocelot_vcap_bit ethernet; /* Hardware address type Ethernet */
+ struct ocelot_vcap_ipv4 sip; /* Sender IP address */
+ struct ocelot_vcap_ipv4 dip; /* Target IP address */
+};
+
+struct ocelot_vcap_key_ipv4 {
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ enum ocelot_vcap_bit fragment; /* Fragment */
+ enum ocelot_vcap_bit options; /* Header options */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u8 proto; /* Protocol */
+ struct ocelot_vcap_ipv4 sip; /* Source IP address */
+ struct ocelot_vcap_ipv4 dip; /* Destination IP address */
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport; /* UDP/TCP: Source port */
+ struct ocelot_vcap_udp_tcp dport; /* UDP/TCP: Destination port */
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+struct ocelot_vcap_key_ipv6 {
+ struct ocelot_vcap_u8 proto; /* IPv6 protocol */
+ struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
+ struct ocelot_vcap_u128 dip; /* IPv6 destination (byte 0-7 ignored) */
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport;
+ struct ocelot_vcap_udp_tcp dport;
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+enum ocelot_mask_mode {
+ OCELOT_MASK_MODE_NONE,
+ OCELOT_MASK_MODE_PERMIT_DENY,
+ OCELOT_MASK_MODE_POLICY,
+ OCELOT_MASK_MODE_REDIRECT,
+};
+
+enum ocelot_es0_vid_sel {
+ OCELOT_ES0_VID_PLUS_CLASSIFIED_VID = 0,
+ OCELOT_ES0_VID = 1,
+};
+
+enum ocelot_es0_pcp_sel {
+ OCELOT_CLASSIFIED_PCP = 0,
+ OCELOT_ES0_PCP = 1,
+};
+
+enum ocelot_es0_tag {
+ OCELOT_NO_ES0_TAG,
+ OCELOT_ES0_TAG,
+ OCELOT_FORCE_PORT_TAG,
+ OCELOT_FORCE_UNTAG,
+};
+
+enum ocelot_tag_tpid_sel {
+ OCELOT_TAG_TPID_SEL_8021Q,
+ OCELOT_TAG_TPID_SEL_8021AD,
+};
+
+struct ocelot_vcap_action {
+ union {
+ /* VCAP ES0 */
+ struct {
+ enum ocelot_es0_tag push_outer_tag;
+ enum ocelot_es0_tag push_inner_tag;
+ enum ocelot_tag_tpid_sel tag_a_tpid_sel;
+ int tag_a_vid_sel;
+ int tag_a_pcp_sel;
+ u16 vid_a_val;
+ u8 pcp_a_val;
+ u8 dei_a_val;
+ enum ocelot_tag_tpid_sel tag_b_tpid_sel;
+ int tag_b_vid_sel;
+ int tag_b_pcp_sel;
+ u16 vid_b_val;
+ u8 pcp_b_val;
+ u8 dei_b_val;
+ };
+
+ /* VCAP IS1 */
+ struct {
+ bool vid_replace_ena;
+ u16 vid;
+ bool vlan_pop_cnt_ena;
+ int vlan_pop_cnt;
+ bool pcp_dei_ena;
+ u8 pcp;
+ u8 dei;
+ bool qos_ena;
+ u8 qos_val;
+ u8 pag_override_mask;
+ u8 pag_val;
+ };
+
+ /* VCAP IS2 */
+ struct {
+ bool cpu_copy_ena;
+ u8 cpu_qu_num;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ bool police_ena;
+ bool mirror_ena;
+ struct ocelot_policer pol;
+ u32 pol_ix;
+ };
+ };
+};
+
+struct ocelot_vcap_stats {
+ u64 bytes;
+ u64 pkts;
+ u64 used;
+};
+
+enum ocelot_vcap_filter_type {
+ OCELOT_VCAP_FILTER_DUMMY,
+ OCELOT_VCAP_FILTER_PAG,
+ OCELOT_VCAP_FILTER_OFFLOAD,
+ OCELOT_PSFP_FILTER_OFFLOAD,
+};
+
+struct ocelot_vcap_id {
+ unsigned long cookie;
+ bool tc_offload;
+};
+
+struct ocelot_vcap_filter {
+ struct list_head list;
+
+ enum ocelot_vcap_filter_type type;
+ int block_id;
+ int goto_target;
+ int lookup;
+ u8 pag;
+ u16 prio;
+ struct ocelot_vcap_id id;
+
+ struct ocelot_vcap_action action;
+ struct ocelot_vcap_stats stats;
+ /* For VCAP IS1 and IS2 */
+ bool take_ts;
+ bool is_trap;
+ unsigned long ingress_port_mask;
+ /* For VCAP ES0 */
+ struct ocelot_vcap_port ingress_port;
+ /* For VCAP IS2 mirrors and ES0 */
+ struct ocelot_vcap_port egress_port;
+
+ enum ocelot_vcap_bit dmac_mc;
+ enum ocelot_vcap_bit dmac_bc;
+ struct ocelot_vcap_key_vlan vlan;
+
+ enum ocelot_vcap_key_type key_type;
+ union {
+ /* OCELOT_VCAP_KEY_ANY: No specific fields */
+ struct ocelot_vcap_key_etype etype;
+ struct ocelot_vcap_key_llc llc;
+ struct ocelot_vcap_key_snap snap;
+ struct ocelot_vcap_key_arp arp;
+ struct ocelot_vcap_key_ipv4 ipv4;
+ struct ocelot_vcap_key_ipv6 ipv6;
+ } key;
+};
+
+int ocelot_vcap_filter_add(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *rule,
+ struct netlink_ext_ack *extack);
+int ocelot_vcap_filter_del(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *rule);
+int ocelot_vcap_filter_replace(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter);
+struct ocelot_vcap_filter *
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
+ unsigned long cookie, bool tc_offload);
+
+#endif /* _OCELOT_VCAP_H_ */
diff --git a/include/soc/mscc/vsc7514_regs.h b/include/soc/mscc/vsc7514_regs.h
new file mode 100644
index 000000000000..ceee26c96959
--- /dev/null
+++ b/include/soc/mscc/vsc7514_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2021 Innovative Advantage Inc.
+ */
+
+#ifndef VSC7514_REGS_H
+#define VSC7514_REGS_H
+
+#include <soc/mscc/ocelot_vcap.h>
+
+extern const u32 vsc7514_ana_regmap[];
+extern const u32 vsc7514_qs_regmap[];
+extern const u32 vsc7514_qsys_regmap[];
+extern const u32 vsc7514_rew_regmap[];
+extern const u32 vsc7514_sys_regmap[];
+extern const u32 vsc7514_vcap_regmap[];
+extern const u32 vsc7514_ptp_regmap[];
+extern const u32 vsc7514_dev_gmii_regmap[];
+
+extern const struct vcap_field vsc7514_vcap_es0_keys[];
+extern const struct vcap_field vsc7514_vcap_es0_actions[];
+extern const struct vcap_field vsc7514_vcap_is1_keys[];
+extern const struct vcap_field vsc7514_vcap_is1_actions[];
+extern const struct vcap_field vsc7514_vcap_is2_keys[];
+extern const struct vcap_field vsc7514_vcap_is2_actions[];
+
+#endif
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
deleted file mode 100644
index 9b1d43d671a3..000000000000
--- a/include/soc/nps/common.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_COMMON_H
-#define SOC_NPS_COMMON_H
-
-#ifdef CONFIG_SMP
-#define NPS_IPI_IRQ 5
-#endif
-
-#define NPS_HOST_REG_BASE 0xF6000000
-
-#define NPS_MSU_BLKID 0x018
-
-#define CTOP_INST_RSPI_GIC_0_R12 0x3C56117E
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
-
-#ifndef __ASSEMBLY__
-
-/* In order to increase compilation test coverage */
-#ifdef CONFIG_ARC
-static inline void nps_ack_gic(void)
-{
- __asm__ __volatile__ (
- " .word %0\n"
- :
- : "i"(CTOP_INST_RSPI_GIC_0_R12)
- : "memory");
-}
-#else
-static inline void nps_ack_gic(void) { }
-#define write_aux_reg(r, v)
-#define read_aux_reg(r) 0
-#endif
-
-/* CPU global ID */
-struct global_id {
- union {
- struct {
-#ifdef CONFIG_EZNPS_MTM_EXT
- u32 __reserved:20, cluster:4, core:4, thread:4;
-#else
- u32 __reserved:24, cluster:4, core:4;
-#endif
- };
- u32 value;
- };
-};
-
-/*
- * Convert logical to physical CPU IDs
- *
- * The conversion swap bits 1 and 2 of cluster id (out of 4 bits)
- * Now quad of logical clusters id's are adjacent physically,
- * and not like the id's physically came with each cluster.
- * Below table is 4x4 mesh of core clusters as it layout on chip.
- * Cluster ids are in format: logical (physical)
- *
- * ----------------- ------------------
- * 3 | 5 (3) 7 (7) | | 13 (11) 15 (15)|
- *
- * 2 | 4 (2) 6 (6) | | 12 (10) 14 (14)|
- * ----------------- ------------------
- * 1 | 1 (1) 3 (5) | | 9 (9) 11 (13)|
- *
- * 0 | 0 (0) 2 (4) | | 8 (8) 10 (12)|
- * ----------------- ------------------
- * 0 1 2 3
- */
-static inline int nps_cluster_logic_to_phys(int cluster)
-{
-#ifdef __arc__
- __asm__ __volatile__(
- " mov r3,%0\n"
- " .short %1\n"
- " .word %2\n"
- " mov %0,r3\n"
- : "+r"(cluster)
- : "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST),
- "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM)
- : "r3");
-#endif
-
- return cluster;
-}
-
-#define NPS_CPU_TO_CLUSTER_NUM(cpu) \
- ({ struct global_id gid; gid.value = cpu; \
- nps_cluster_logic_to_phys(gid.cluster); })
-
-struct nps_host_reg_address {
- union {
- struct {
- u32 base:8, cl_x:4, cl_y:4,
- blkid:6, reg:8, __reserved:2;
- };
- u32 value;
- };
-};
-
-struct nps_host_reg_address_non_cl {
- union {
- struct {
- u32 base:7, blkid:11, reg:12, __reserved:2;
- };
- u32 value;
- };
-};
-
-static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg)
-{
- struct nps_host_reg_address_non_cl reg_address;
-
- reg_address.value = NPS_HOST_REG_BASE;
- reg_address.blkid = blkid;
- reg_address.reg = reg;
-
- return (void *)reg_address.value;
-}
-
-static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg)
-{
- struct nps_host_reg_address reg_address;
- u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
-
- reg_address.value = NPS_HOST_REG_BASE;
- reg_address.cl_x = (cl >> 2) & 0x3;
- reg_address.cl_y = cl & 0x3;
- reg_address.blkid = blkid;
- reg_address.reg = reg;
-
- return (void *)reg_address.value;
-}
-#endif /* __ASSEMBLY__ */
-
-#endif /* SOC_NPS_COMMON_H */
diff --git a/include/soc/nps/mtm.h b/include/soc/nps/mtm.h
deleted file mode 100644
index d2f5e7e3703e..000000000000
--- a/include/soc/nps/mtm.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_MTM_H
-#define SOC_NPS_MTM_H
-
-#define CTOP_INST_HWSCHD_OFF_R3 0x3B6F00BF
-#define CTOP_INST_HWSCHD_RESTORE_R3 0x3E6F70C3
-
-static inline void hw_schd_save(unsigned int *flags)
-{
- __asm__ __volatile__(
- " .word %1\n"
- " st r3,[%0]\n"
- :
- : "r"(flags), "i"(CTOP_INST_HWSCHD_OFF_R3)
- : "r3", "memory");
-}
-
-static inline void hw_schd_restore(unsigned int flags)
-{
- __asm__ __volatile__(
- " mov r3, %0\n"
- " .word %1\n"
- :
- : "r"(flags), "i"(CTOP_INST_HWSCHD_RESTORE_R3)
- : "r3");
-}
-
-#endif /* SOC_NPS_MTM_H */
diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
index af9722223925..c8bb56e6852a 100644
--- a/include/soc/qcom/cmd-db.h
+++ b/include/soc/qcom/cmd-db.h
@@ -4,6 +4,7 @@
#ifndef __QCOM_COMMAND_DB_H__
#define __QCOM_COMMAND_DB_H__
+#include <linux/err.h>
enum cmd_db_hw_type {
CMD_DB_HW_INVALID = 0,
diff --git a/include/soc/qcom/kryo-l2-accessors.h b/include/soc/qcom/kryo-l2-accessors.h
new file mode 100644
index 000000000000..673c5344afe3
--- /dev/null
+++ b/include/soc/qcom/kryo-l2-accessors.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_ARCH_QCOM_KRYO_L2_ACCESSORS_H
+#define __SOC_ARCH_QCOM_KRYO_L2_ACCESSORS_H
+
+void kryo_l2_set_indirect_reg(u64 reg, u64 val);
+u64 kryo_l2_get_indirect_reg(u64 reg);
+
+#endif
diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h
new file mode 100644
index 000000000000..72398ff44719
--- /dev/null
+++ b/include/soc/qcom/qcom-spmi-pmic.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Linaro. All rights reserved.
+ * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ */
+
+#ifndef __QCOM_SPMI_PMIC_H__
+#define __QCOM_SPMI_PMIC_H__
+
+#include <linux/device.h>
+
+#define COMMON_SUBTYPE 0x00
+#define PM8941_SUBTYPE 0x01
+#define PM8841_SUBTYPE 0x02
+#define PM8019_SUBTYPE 0x03
+#define PM8226_SUBTYPE 0x04
+#define PM8110_SUBTYPE 0x05
+#define PMA8084_SUBTYPE 0x06
+#define PMI8962_SUBTYPE 0x07
+#define PMD9635_SUBTYPE 0x08
+#define PM8994_SUBTYPE 0x09
+#define PMI8994_SUBTYPE 0x0a
+#define PM8916_SUBTYPE 0x0b
+#define PM8004_SUBTYPE 0x0c
+#define PM8909_SUBTYPE 0x0d
+#define PM8028_SUBTYPE 0x0e
+#define PM8901_SUBTYPE 0x0f
+#define PM8950_SUBTYPE 0x10
+#define PMI8950_SUBTYPE 0x11
+#define PM8998_SUBTYPE 0x14
+#define PMI8998_SUBTYPE 0x15
+#define PM8005_SUBTYPE 0x18
+#define PM660L_SUBTYPE 0x1A
+#define PM660_SUBTYPE 0x1B
+#define PM8150_SUBTYPE 0x1E
+#define PM8150L_SUBTYPE 0x1f
+#define PM8150B_SUBTYPE 0x20
+#define PMK8002_SUBTYPE 0x21
+#define PM8009_SUBTYPE 0x24
+#define PM8150C_SUBTYPE 0x26
+#define SMB2351_SUBTYPE 0x29
+
+#define PMI8998_FAB_ID_SMIC 0x11
+#define PMI8998_FAB_ID_GF 0x30
+
+#define PM660_FAB_ID_GF 0x0
+#define PM660_FAB_ID_TSMC 0x2
+#define PM660_FAB_ID_MX 0x3
+
+struct qcom_spmi_pmic {
+ unsigned int type;
+ unsigned int subtype;
+ unsigned int major;
+ unsigned int minor;
+ unsigned int rev2;
+ unsigned int fab_id;
+ const char *name;
+};
+
+const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev);
+
+#endif /* __QCOM_SPMI_PMIC_H__ */
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
index 619e07c75da9..bdbee1a97d36 100644
--- a/include/soc/qcom/rpmh.h
+++ b/include/soc/qcom/rpmh.h
@@ -20,9 +20,7 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state,
int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 *n);
-int rpmh_flush(const struct device *dev);
-
-int rpmh_invalidate(const struct device *dev);
+void rpmh_invalidate(const struct device *dev);
#else
@@ -40,11 +38,9 @@ static inline int rpmh_write_batch(const struct device *dev,
const struct tcs_cmd *cmd, u32 *n)
{ return -ENODEV; }
-static inline int rpmh_flush(const struct device *dev)
-{ return -ENODEV; }
-
-static inline int rpmh_invalidate(const struct device *dev)
-{ return -ENODEV; }
+static inline void rpmh_invalidate(const struct device *dev)
+{
+}
#endif /* CONFIG_QCOM_RPMH */
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
new file mode 100644
index 000000000000..4951f9d8b0bd
--- /dev/null
+++ b/include/soc/qcom/spm.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ */
+
+#ifndef __SPM_H__
+#define __SPM_H__
+
+#include <linux/cpuidle.h>
+
+#define MAX_PMIC_DATA 2
+#define MAX_SEQ_DATA 64
+
+enum pm_sleep_mode {
+ PM_SLEEP_MODE_STBY,
+ PM_SLEEP_MODE_RET,
+ PM_SLEEP_MODE_SPC,
+ PM_SLEEP_MODE_PC,
+ PM_SLEEP_MODE_NR,
+};
+
+struct spm_reg_data {
+ const u16 *reg_offset;
+ u32 spm_cfg;
+ u32 spm_dly;
+ u32 pmic_dly;
+ u32 pmic_data[MAX_PMIC_DATA];
+ u32 avs_ctl;
+ u32 avs_limit;
+ u8 seq[MAX_SEQ_DATA];
+ u8 start_index[PM_SLEEP_MODE_NR];
+};
+
+struct spm_driver_data {
+ void __iomem *reg_base;
+ const struct spm_reg_data *reg_data;
+};
+
+void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode);
+
+#endif /* __SPM_H__ */
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
index 7a2a055ba6b0..3acca067c72b 100644
--- a/include/soc/qcom/tcs.h
+++ b/include/soc/qcom/tcs.h
@@ -30,7 +30,13 @@ enum rpmh_state {
*
* @addr: the address of the resource slv_id:18:16 | offset:0:15
* @data: the resource state request
- * @wait: wait for this request to be complete before sending the next
+ * @wait: ensure that this command is complete before returning.
+ * Setting "wait" here only makes sense during rpmh_write_batch() for
+ * active-only transfers, this is because:
+ * rpmh_write() - Always waits.
+ * (DEFINE_RPMH_MSG_ONSTACK will set .wait_for_compl)
+ * rpmh_write_async() - Never waits.
+ * (There's no request completion callback)
*/
struct tcs_cmd {
u32 addr;
@@ -43,6 +49,7 @@ struct tcs_cmd {
*
* @state: state for the request.
* @wait_for_compl: wait until we get a response from the h/w accelerator
+ * (same as setting cmd->wait for all commands in the request)
* @num_cmds: the number of @cmds in this request
* @cmds: an array of tcs_cmds
*/
diff --git a/include/soc/rockchip/pm_domains.h b/include/soc/rockchip/pm_domains.h
new file mode 100644
index 000000000000..7dbd941fc937
--- /dev/null
+++ b/include/soc/rockchip/pm_domains.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2022, The Chromium OS Authors. All rights reserved.
+ */
+
+#ifndef __SOC_ROCKCHIP_PM_DOMAINS_H__
+#define __SOC_ROCKCHIP_PM_DOMAINS_H__
+
+#ifdef CONFIG_ROCKCHIP_PM_DOMAINS
+
+int rockchip_pmu_block(void);
+void rockchip_pmu_unblock(void);
+
+#else /* CONFIG_ROCKCHIP_PM_DOMAINS */
+
+static inline int rockchip_pmu_block(void)
+{
+ return 0;
+}
+
+static inline void rockchip_pmu_unblock(void) { }
+
+#endif /* CONFIG_ROCKCHIP_PM_DOMAINS */
+
+#endif /* __SOC_ROCKCHIP_PM_DOMAINS_H__ */
diff --git a/include/soc/sifive/sifive_ccache.h b/include/soc/sifive/sifive_ccache.h
new file mode 100644
index 000000000000..4d4ed49388a0
--- /dev/null
+++ b/include/soc/sifive/sifive_ccache.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SiFive Composable Cache Controller header file
+ *
+ */
+
+#ifndef __SOC_SIFIVE_CCACHE_H
+#define __SOC_SIFIVE_CCACHE_H
+
+extern int register_sifive_ccache_error_notifier(struct notifier_block *nb);
+extern int unregister_sifive_ccache_error_notifier(struct notifier_block *nb);
+
+#define SIFIVE_CCACHE_ERR_TYPE_CE 0
+#define SIFIVE_CCACHE_ERR_TYPE_UE 1
+
+#endif /* __SOC_SIFIVE_CCACHE_H */
diff --git a/include/soc/sifive/sifive_l2_cache.h b/include/soc/sifive/sifive_l2_cache.h
deleted file mode 100644
index 92ade10ed67e..000000000000
--- a/include/soc/sifive/sifive_l2_cache.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * SiFive L2 Cache Controller header file
- *
- */
-
-#ifndef __SOC_SIFIVE_L2_CACHE_H
-#define __SOC_SIFIVE_L2_CACHE_H
-
-extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
-extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
-
-#define SIFIVE_L2_ERR_TYPE_CE 0
-#define SIFIVE_L2_ERR_TYPE_UE 1
-
-#endif /* __SOC_SIFIVE_L2_CACHE_H */
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index cac6f610b3fe..53171e324d1c 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -1,30 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*/
-#ifndef _ABI_BPMP_ABI_H_
-#define _ABI_BPMP_ABI_H_
+#ifndef ABI_BPMP_ABI_H
+#define ABI_BPMP_ABI_H
-#ifdef LK
+#if defined(LK) || defined(BPMP_ABI_HAVE_STDC)
+#include <stddef.h>
#include <stdint.h>
#endif
-#ifndef __ABI_PACKED
-#define __ABI_PACKED __attribute__((packed))
+#ifndef BPMP_ABI_PACKED
+#ifdef __ABI_PACKED
+#define BPMP_ABI_PACKED __ABI_PACKED
+#else
+#define BPMP_ABI_PACKED __attribute__((packed))
+#endif
#endif
#ifdef NO_GCC_EXTENSIONS
-#define EMPTY char empty;
-#define EMPTY_ARRAY 1
+#define BPMP_ABI_EMPTY char empty;
+#define BPMP_ABI_EMPTY_ARRAY 1
#else
-#define EMPTY
-#define EMPTY_ARRAY 0
+#define BPMP_ABI_EMPTY
+#define BPMP_ABI_EMPTY_ARRAY 0
#endif
-#ifndef __UNION_ANON
-#define __UNION_ANON
+#ifndef BPMP_UNION_ANON
+#ifdef __UNION_ANON
+#define BPMP_UNION_ANON __UNION_ANON
+#else
+#define BPMP_UNION_ANON
+#endif
#endif
+
/**
* @file
*/
@@ -73,6 +83,7 @@
struct mrq_request {
/** @brief MRQ number of the request */
uint32_t mrq;
+
/**
* @brief Flags providing follow up directions to the receiver
*
@@ -82,7 +93,7 @@ struct mrq_request {
* | 0 | should be 1 |
*/
uint32_t flags;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Format
@@ -98,18 +109,18 @@ struct mrq_response {
int32_t err;
/** @brief Reserved for future use */
uint32_t flags;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Format
* Minimum needed size for an IPC message buffer
*/
-#define MSG_MIN_SZ 128
+#define MSG_MIN_SZ 128U
/**
* @ingroup MRQ_Format
* Minimum size guaranteed for data in an IPC message buffer
*/
-#define MSG_DATA_MIN_SZ 120
+#define MSG_DATA_MIN_SZ 120U
/**
* @ingroup MRQ_Codes
@@ -118,36 +129,36 @@ struct mrq_response {
* @{
*/
-#define MRQ_PING 0
-#define MRQ_QUERY_TAG 1
-#define MRQ_MODULE_LOAD 4
-#define MRQ_MODULE_UNLOAD 5
-#define MRQ_TRACE_MODIFY 7
-#define MRQ_WRITE_TRACE 8
-#define MRQ_THREADED_PING 9
-#define MRQ_MODULE_MAIL 11
-#define MRQ_DEBUGFS 19
-#define MRQ_RESET 20
-#define MRQ_I2C 21
-#define MRQ_CLK 22
-#define MRQ_QUERY_ABI 23
-#define MRQ_PG_READ_STATE 25
-#define MRQ_PG_UPDATE_STATE 26
-#define MRQ_THERMAL 27
-#define MRQ_CPU_VHINT 28
-#define MRQ_ABI_RATCHET 29
-#define MRQ_EMC_DVFS_LATENCY 31
-#define MRQ_TRACE_ITER 64
-#define MRQ_RINGBUF_CONSOLE 65
-#define MRQ_PG 66
-#define MRQ_CPU_NDIV_LIMITS 67
-#define MRQ_STRAP 68
-#define MRQ_UPHY 69
-#define MRQ_CPU_AUTO_CC3 70
-#define MRQ_QUERY_FW_TAG 71
-#define MRQ_FMON 72
-#define MRQ_EC 73
-#define MRQ_FBVOLT_STATUS 74
+#define MRQ_PING 0U
+#define MRQ_QUERY_TAG 1U
+#define MRQ_MODULE_LOAD 4U
+#define MRQ_MODULE_UNLOAD 5U
+#define MRQ_TRACE_MODIFY 7U
+#define MRQ_WRITE_TRACE 8U
+#define MRQ_THREADED_PING 9U
+#define MRQ_MODULE_MAIL 11U
+#define MRQ_DEBUGFS 19U
+#define MRQ_RESET 20U
+#define MRQ_I2C 21U
+#define MRQ_CLK 22U
+#define MRQ_QUERY_ABI 23U
+#define MRQ_PG_READ_STATE 25U
+#define MRQ_PG_UPDATE_STATE 26U
+#define MRQ_THERMAL 27U
+#define MRQ_CPU_VHINT 28U
+#define MRQ_ABI_RATCHET 29U
+#define MRQ_EMC_DVFS_LATENCY 31U
+#define MRQ_TRACE_ITER 64U
+#define MRQ_RINGBUF_CONSOLE 65U
+#define MRQ_PG 66U
+#define MRQ_CPU_NDIV_LIMITS 67U
+#define MRQ_STRAP 68U
+#define MRQ_UPHY 69U
+#define MRQ_CPU_AUTO_CC3 70U
+#define MRQ_QUERY_FW_TAG 71U
+#define MRQ_FMON 72U
+#define MRQ_EC 73U
+#define MRQ_DEBUG 75U
/** @} */
@@ -156,7 +167,7 @@ struct mrq_response {
* @brief Maximum MRQ code to be sent by CPU software to
* BPMP. Subject to change in future
*/
-#define MAX_CPU_MRQ_ID 74
+#define MAX_CPU_MRQ_ID 75U
/**
* @addtogroup MRQ_Payloads
@@ -223,7 +234,7 @@ struct mrq_response {
struct mrq_ping_request {
/** @brief Arbitrarily chosen value */
uint32_t challenge;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Ping
@@ -237,7 +248,7 @@ struct mrq_ping_request {
struct mrq_ping_response {
/** @brief Response to the MRQ_PING challege */
uint32_t reply;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
@@ -264,7 +275,7 @@ struct mrq_ping_response {
struct mrq_query_tag_request {
/** @brief Base address to store the firmware tag */
uint32_t addr;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
@@ -291,15 +302,15 @@ struct mrq_query_tag_request {
struct mrq_query_fw_tag_response {
/** @brief Array to store tag information */
uint8_t tag[32];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_MODULE_LOAD
* @brief Dynamically load a BPMP code module
*
- * * Platforms: T210, T214, T186
- * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
+ * * Platforms: T210, T210B01, T186
+ * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_module_load_request
@@ -327,11 +338,11 @@ struct mrq_query_fw_tag_response {
*
*/
struct mrq_module_load_request {
- /** @brief Base address of the code to load. Treated as (void *) */
- uint32_t phys_addr; /* (void *) */
+ /** @brief Base address of the code to load */
+ uint32_t phys_addr;
/** @brief Size in bytes of code to load */
uint32_t size;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Module
@@ -342,7 +353,7 @@ struct mrq_module_load_request {
struct mrq_module_load_response {
/** @brief Handle to the loaded module */
uint32_t base;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/**
@@ -350,8 +361,8 @@ struct mrq_module_load_response {
* @def MRQ_MODULE_UNLOAD
* @brief Unload a previously loaded code module
*
- * * Platforms: T210, T214, T186
- * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
+ * * Platforms: T210, T210B01, T186
+ * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_module_unload_request
@@ -370,7 +381,7 @@ struct mrq_module_load_response {
struct mrq_module_unload_request {
/** @brief Handle of the module to unload */
uint32_t base;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/**
@@ -378,6 +389,8 @@ struct mrq_module_unload_request {
* @def MRQ_TRACE_MODIFY
* @brief Modify the set of enabled trace events
*
+ * @deprecated
+ *
* * Platforms: All
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -400,7 +413,7 @@ struct mrq_trace_modify_request {
uint32_t clr;
/** @brief Bit mask of trace events to enable */
uint32_t set;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Trace
@@ -414,13 +427,15 @@ struct mrq_trace_modify_request {
struct mrq_trace_modify_response {
/** @brief Bit mask of trace event enable states */
uint32_t mask;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_WRITE_TRACE
* @brief Write trace data to a buffer
*
+ * @deprecated
+ *
* * Platforms: All
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -454,7 +469,7 @@ struct mrq_write_trace_request {
uint32_t area;
/** @brief Size in bytes of the output buffer */
uint32_t size;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Trace
@@ -471,25 +486,25 @@ struct mrq_write_trace_response {
* drained to the outputbuffer. Value is 0 otherwise.
*/
uint32_t eof;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct mrq_threaded_ping_request {
uint32_t challenge;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct mrq_threaded_ping_response {
uint32_t reply;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_MODULE_MAIL
* @brief Send a message to a loadable module
*
- * * Platforms: T210, T214, T186
- * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
+ * * Platforms: T210, T210B01, T186
+ * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_module_mail_request
@@ -510,8 +525,8 @@ struct mrq_module_mail_request {
* The length of data[ ] is unknown to the BPMP core firmware
* but it is limited to the size of an IPC message.
*/
- uint8_t data[EMPTY_ARRAY];
-} __ABI_PACKED;
+ uint8_t data[BPMP_ABI_EMPTY_ARRAY];
+} BPMP_ABI_PACKED;
/**
* @ingroup Module
@@ -523,8 +538,8 @@ struct mrq_module_mail_response {
* The length of data[ ] is unknown to the BPMP core firmware
* but it is limited to the size of an IPC message.
*/
- uint8_t data[EMPTY_ARRAY];
-} __ABI_PACKED;
+ uint8_t data[BPMP_ABI_EMPTY_ARRAY];
+} BPMP_ABI_PACKED;
/** @endcond */
/**
@@ -532,6 +547,8 @@ struct mrq_module_mail_response {
* @def MRQ_DEBUGFS
* @brief Interact with BPMP's debugfs file nodes
*
+ * @deprecated use MRQ_DEBUG instead.
+ *
* * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
@@ -587,7 +604,7 @@ struct cmd_debugfs_fileop_request {
uint32_t dataaddr;
/** @brief Length in bytes of data buffer */
uint32_t datalen;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -598,7 +615,7 @@ struct cmd_debugfs_dumpdir_request {
uint32_t dataaddr;
/** @brief Length in bytes of data buffer */
uint32_t datalen;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -609,7 +626,7 @@ struct cmd_debugfs_fileop_response {
uint32_t reserved;
/** @brief Number of bytes read from or written to data buffer */
uint32_t nbytes;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -620,7 +637,7 @@ struct cmd_debugfs_dumpdir_response {
uint32_t reserved;
/** @brief Number of bytes read from or written to data buffer */
uint32_t nbytes;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -643,8 +660,8 @@ struct mrq_debugfs_request {
union {
struct cmd_debugfs_fileop_request fop;
struct cmd_debugfs_dumpdir_request dumpdir;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -659,8 +676,8 @@ struct mrq_debugfs_response {
struct cmd_debugfs_fileop_response fop;
/** @brief Response data for CMD_DEBUGFS_DUMPDIR command */
struct cmd_debugfs_dumpdir_response dumpdir;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @addtogroup Debugfs
@@ -673,6 +690,190 @@ struct mrq_debugfs_response {
/**
* @ingroup MRQ_Codes
+ * @def MRQ_DEBUG
+ * @brief Interact with BPMP's debugfs file nodes. Use message payload
+ * for exchanging data. This is functionally equivalent to
+ * @ref MRQ_DEBUGFS. But the way in which data is exchanged is different.
+ * When software running on CPU tries to read a debugfs file,
+ * the file path and read data will be stored in message payload.
+ * Since the message payload size is limited, a debugfs file
+ * transaction might require multiple frames of data exchanged
+ * between BPMP and CPU until the transaction completes.
+ *
+ * * Platforms: T194
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_debug_request
+ * * Response Payload: @ref mrq_debug_response
+ */
+
+/** @ingroup Debugfs */
+enum mrq_debug_commands {
+ /** @brief Open required file for read operation */
+ CMD_DEBUG_OPEN_RO = 0,
+ /** @brief Open required file for write operation */
+ CMD_DEBUG_OPEN_WO = 1,
+ /** @brief Perform read */
+ CMD_DEBUG_READ = 2,
+ /** @brief Perform write */
+ CMD_DEBUG_WRITE = 3,
+ /** @brief Close file */
+ CMD_DEBUG_CLOSE = 4,
+ /** @brief Not a command */
+ CMD_DEBUG_MAX
+};
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum number of files that can be open at a given time
+ */
+#define DEBUG_MAX_OPEN_FILES 1
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum size of null-terminated file name string in bytes.
+ * Value is derived from memory available in message payload while
+ * using @ref cmd_debug_fopen_request
+ * Value 4 corresponds to size of @ref mrq_debug_commands
+ * in @ref mrq_debug_request.
+ * 120 - 4 dbg_cmd(32bit) = 116
+ */
+#define DEBUG_FNAME_MAX_SZ (MSG_DATA_MIN_SZ - 4)
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_OPEN command
+ */
+struct cmd_debug_fopen_request {
+ /** @brief File name - Null-terminated string with maximum
+ * length @ref DEBUG_FNAME_MAX_SZ
+ */
+ char name[DEBUG_FNAME_MAX_SZ];
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Response data for CMD_DEBUG_OPEN_RO/WO command
+ */
+struct cmd_debug_fopen_response {
+ /** @brief Identifier for file access */
+ uint32_t fd;
+ /** @brief Data length. File data size for READ command.
+ * Maximum allowed length for WRITE command
+ */
+ uint32_t datalen;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_READ command
+ */
+struct cmd_debug_fread_request {
+ /** @brief File access identifier received in response
+ * to CMD_DEBUG_OPEN_RO request
+ */
+ uint32_t fd;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum size of read data in bytes.
+ * Value is derived from memory available in message payload while
+ * using @ref cmd_debug_fread_response.
+ */
+#define DEBUG_READ_MAX_SZ (MSG_DATA_MIN_SZ - 4)
+
+/**
+ * @ingroup Debugfs
+ * @brief Response data for CMD_DEBUG_READ command
+ */
+struct cmd_debug_fread_response {
+ /** @brief Size of data provided in this response in bytes */
+ uint32_t readlen;
+ /** @brief File data from seek position */
+ char data[DEBUG_READ_MAX_SZ];
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum size of write data in bytes.
+ * Value is derived from memory available in message payload while
+ * using @ref cmd_debug_fwrite_request.
+ */
+#define DEBUG_WRITE_MAX_SZ (MSG_DATA_MIN_SZ - 12)
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_WRITE command
+ */
+struct cmd_debug_fwrite_request {
+ /** @brief File access identifier received in response
+ * to CMD_DEBUG_OPEN_RO request
+ */
+ uint32_t fd;
+ /** @brief Size of write data in bytes */
+ uint32_t datalen;
+ /** @brief Data to be written */
+ char data[DEBUG_WRITE_MAX_SZ];
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_CLOSE command
+ */
+struct cmd_debug_fclose_request {
+ /** @brief File access identifier received in response
+ * to CMD_DEBUG_OPEN_RO request
+ */
+ uint32_t fd;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Request with #MRQ_DEBUG.
+ *
+ * The sender of an MRQ_DEBUG message uses #cmd to specify a debugfs
+ * command to execute. Legal commands are the values of @ref
+ * mrq_debug_commands. Each command requires a specific additional
+ * payload of data.
+ *
+ * |command |payload|
+ * |-------------------|-------|
+ * |CMD_DEBUG_OPEN_RO |fop |
+ * |CMD_DEBUG_OPEN_WO |fop |
+ * |CMD_DEBUG_READ |frd |
+ * |CMD_DEBUG_WRITE |fwr |
+ * |CMD_DEBUG_CLOSE |fcl |
+ */
+struct mrq_debug_request {
+ /** @brief Sub-command (@ref mrq_debug_commands) */
+ uint32_t cmd;
+ union {
+ /** @brief Request payload for CMD_DEBUG_OPEN_RO/WO command */
+ struct cmd_debug_fopen_request fop;
+ /** @brief Request payload for CMD_DEBUG_READ command */
+ struct cmd_debug_fread_request frd;
+ /** @brief Request payload for CMD_DEBUG_WRITE command */
+ struct cmd_debug_fwrite_request fwr;
+ /** @brief Request payload for CMD_DEBUG_CLOSE command */
+ struct cmd_debug_fclose_request fcl;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ */
+struct mrq_debug_response {
+ union {
+ /** @brief Response data for CMD_DEBUG_OPEN_RO/WO command */
+ struct cmd_debug_fopen_response fop;
+ /** @brief Response data for CMD_DEBUG_READ command */
+ struct cmd_debug_fread_response frd;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
* @def MRQ_RESET
* @brief Reset an IP block
*
@@ -687,14 +888,41 @@ struct mrq_debugfs_response {
*/
enum mrq_reset_commands {
- /** @brief Assert module reset */
+ /**
+ * @brief Assert module reset
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
+ * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
+ * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ */
CMD_RESET_ASSERT = 1,
- /** @brief Deassert module reset */
+ /**
+ * @brief Deassert module reset
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
+ * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
+ * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ */
CMD_RESET_DEASSERT = 2,
- /** @brief Assert and deassert the module reset */
+ /**
+ * @brief Assert and deassert the module reset
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
+ * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
+ * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ */
CMD_RESET_MODULE = 3,
- /** @brief Get the highest reset ID */
+ /**
+ * @brief Get the highest reset ID
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_ENODEV if no reset domains are supported (number of IDs is 0)
+ */
CMD_RESET_GET_MAX_ID = 4,
+
/** @brief Not part of ABI and subject to change */
CMD_RESET_MAX,
};
@@ -703,14 +931,14 @@ enum mrq_reset_commands {
* @brief Request with MRQ_RESET
*
* Used by the sender of an #MRQ_RESET message to request BPMP to
- * assert or or deassert a given reset line.
+ * assert or deassert a given reset line.
*/
struct mrq_reset_request {
/** @brief Reset action to perform (@ref mrq_reset_commands) */
uint32_t cmd;
/** @brief Id of the reset to affected */
uint32_t reset_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response for MRQ_RESET sub-command CMD_RESET_GET_MAX_ID. When
@@ -720,7 +948,7 @@ struct mrq_reset_request {
struct cmd_reset_get_max_id_response {
/** @brief Max reset id */
uint32_t max_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response with MRQ_RESET
@@ -739,8 +967,8 @@ struct cmd_reset_get_max_id_response {
struct mrq_reset_response {
union {
struct cmd_reset_get_max_id_response reset_get_max_id;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
@@ -758,17 +986,17 @@ struct mrq_reset_response {
* @addtogroup I2C
* @{
*/
-#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12)
-#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4)
+#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12U)
+#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4U)
-#define SERIALI2C_TEN 0x0010
-#define SERIALI2C_RD 0x0001
-#define SERIALI2C_STOP 0x8000
-#define SERIALI2C_NOSTART 0x4000
-#define SERIALI2C_REV_DIR_ADDR 0x2000
-#define SERIALI2C_IGNORE_NAK 0x1000
-#define SERIALI2C_NO_RD_ACK 0x0800
-#define SERIALI2C_RECV_LEN 0x0400
+#define SERIALI2C_TEN 0x0010U
+#define SERIALI2C_RD 0x0001U
+#define SERIALI2C_STOP 0x8000U
+#define SERIALI2C_NOSTART 0x4000U
+#define SERIALI2C_REV_DIR_ADDR 0x2000U
+#define SERIALI2C_IGNORE_NAK 0x1000U
+#define SERIALI2C_NO_RD_ACK 0x0800U
+#define SERIALI2C_RECV_LEN 0x0400U
enum {
CMD_I2C_XFER = 1
@@ -798,7 +1026,7 @@ struct serial_i2c_request {
uint16_t len;
/** @brief For write transactions only, #len bytes of data */
uint8_t data[];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Trigger one or more i2c transactions
@@ -812,7 +1040,7 @@ struct cmd_i2c_xfer_request {
/** @brief Serialized packed instances of @ref serial_i2c_request*/
uint8_t data_buf[TEGRA_I2C_IPC_MAX_IN_BUF_SIZE];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Container for data read from the i2c bus
@@ -826,7 +1054,7 @@ struct cmd_i2c_xfer_response {
uint32_t data_size;
/** @brief I2c read data */
uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Request with #MRQ_I2C
@@ -836,14 +1064,25 @@ struct mrq_i2c_request {
uint32_t cmd;
/** @brief Parameters of the transfer request */
struct cmd_i2c_xfer_request xfer;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response to #MRQ_I2C
+ *
+ * mrq_response:err is
+ * 0: Success
+ * -#BPMP_EBADCMD: if mrq_i2c_request::cmd is other than 1
+ * -#BPMP_EINVAL: if cmd_i2c_xfer_request does not contain correctly formatted request
+ * -#BPMP_ENODEV: if cmd_i2c_xfer_request::bus_id is not supported by BPMP
+ * -#BPMP_EACCES: if i2c transaction is not allowed due to firewall rules
+ * -#BPMP_ETIMEDOUT: if i2c transaction times out
+ * -#BPMP_ENXIO: if i2c slave device does not reply with ACK to the transaction
+ * -#BPMP_EAGAIN: if ARB_LOST condition is detected by the i2c controller
+ * -#BPMP_EIO: any other i2c controller error code than NO_ACK or ARB_LOST
*/
struct mrq_i2c_response {
struct cmd_i2c_xfer_response xfer;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
@@ -876,90 +1115,105 @@ enum {
CMD_CLK_MAX,
};
-#define BPMP_CLK_HAS_MUX (1 << 0)
-#define BPMP_CLK_HAS_SET_RATE (1 << 1)
-#define BPMP_CLK_IS_ROOT (1 << 2)
+#define BPMP_CLK_HAS_MUX (1U << 0U)
+#define BPMP_CLK_HAS_SET_RATE (1U << 1U)
+#define BPMP_CLK_IS_ROOT (1U << 2U)
+#define BPMP_CLK_IS_VAR_ROOT (1U << 3U)
-#define MRQ_CLK_NAME_MAXLEN 40
-#define MRQ_CLK_MAX_PARENTS 16
+#define MRQ_CLK_NAME_MAXLEN 40U
+#define MRQ_CLK_MAX_PARENTS 16U
/** @private */
struct cmd_clk_get_rate_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_rate_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_rate_request {
int32_t unused;
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_rate_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_round_rate_request {
int32_t unused;
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_round_rate_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_parent_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_parent_response {
uint32_t parent_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_parent_request {
uint32_t parent_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_parent_response {
uint32_t parent_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_is_enabled_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+/**
+ * @brief Response data to #MRQ_CLK sub-command CMD_CLK_IS_ENABLED
+ */
struct cmd_clk_is_enabled_response {
+ /**
+ * @brief The state of the clock that has been succesfully
+ * requested with CMD_CLK_ENABLE or CMD_CLK_DISABLE by the
+ * master invoking the command earlier.
+ *
+ * The state may not reflect the physical state of the clock
+ * if there are some other masters requesting it to be
+ * enabled.
+ *
+ * Value 0 is disabled, all other values indicate enabled.
+ */
int32_t state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_enable_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_enable_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_disable_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_disable_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_all_info_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_all_info_response {
uint32_t flags;
@@ -967,25 +1221,25 @@ struct cmd_clk_get_all_info_response {
uint32_t parents[MRQ_CLK_MAX_PARENTS];
uint8_t num_parents;
uint8_t name[MRQ_CLK_NAME_MAXLEN];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_max_clk_id_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_max_clk_id_response {
uint32_t max_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_fmax_at_vmin_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_fmax_at_vmin_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Clocks
@@ -1040,8 +1294,8 @@ struct mrq_clk_request {
struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id;
/** @private */
struct cmd_clk_get_fmax_at_vmin_request clk_get_fmax_at_vmin;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup Clocks
@@ -1082,8 +1336,8 @@ struct mrq_clk_response {
struct cmd_clk_get_all_info_response clk_get_all_info;
struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
struct cmd_clk_get_fmax_at_vmin_response clk_get_fmax_at_vmin;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1109,7 +1363,7 @@ struct mrq_clk_response {
struct mrq_query_abi_request {
/** @brief MRQ code to query */
uint32_t mrq;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup ABI_info
@@ -1121,7 +1375,7 @@ struct mrq_query_abi_request {
struct mrq_query_abi_response {
/** @brief 0 if queried MRQ is supported. Else, -#BPMP_ENODEV */
int32_t status;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
@@ -1146,7 +1400,7 @@ struct mrq_query_abi_response {
struct mrq_pg_read_state_request {
/** @brief ID of partition */
uint32_t partition_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Powergating
@@ -1161,7 +1415,7 @@ struct mrq_pg_read_state_response {
* * 1 : on
*/
uint32_t logic_state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/** @} */
@@ -1211,7 +1465,7 @@ struct mrq_pg_update_state_request {
* @ref logic_state == 0x3)
*/
uint32_t clock_state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/**
@@ -1307,25 +1561,38 @@ enum pg_states {
struct cmd_pg_query_abi_request {
/** @ref mrq_pg_cmd */
uint32_t type;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_pg_set_state_request {
/** @ref pg_states */
uint32_t state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+/**
+ * @brief Response data to #MRQ_PG sub command #CMD_PG_GET_STATE
+ */
struct cmd_pg_get_state_response {
- /** @ref pg_states */
+ /**
+ * @brief The state of the power partition that has been
+ * succesfuly requested by the master earlier using #MRQ_PG
+ * command #CMD_PG_SET_STATE.
+ *
+ * The state may not reflect the physical state of the power
+ * partition if there are some other masters requesting it to
+ * be enabled.
+ *
+ * See @ref pg_states for possible values
+ */
uint32_t state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_pg_get_name_response {
uint8_t name[MRQ_PG_NAME_MAXLEN];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_pg_get_max_id_response {
uint32_t max_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Request with #MRQ_PG
@@ -1350,8 +1617,8 @@ struct mrq_pg_request {
union {
struct cmd_pg_query_abi_request query_abi;
struct cmd_pg_set_state_request set_state;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @brief Response to MRQ_PG
@@ -1373,8 +1640,8 @@ struct mrq_pg_response {
struct cmd_pg_get_state_response get_state;
struct cmd_pg_get_name_response get_name;
struct cmd_pg_get_max_id_response get_max_id;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1463,6 +1730,20 @@ enum mrq_thermal_host_to_bpmp_cmd {
*/
CMD_THERMAL_GET_NUM_ZONES = 3,
+ /**
+ * @brief Get the thermtrip of the specified zone.
+ *
+ * Host needs to supply request parameters.
+ *
+ * mrq_response::err is
+ * * 0: Valid zone information returned.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_ENOENT: No driver registered for thermal zone.
+ * * -#BPMP_ERANGE if thermtrip is invalid or disabled.
+ * * -#BPMP_EFAULT: Problem reading zone information.
+ */
+ CMD_THERMAL_GET_THERMTRIP = 4,
+
/** @brief: number of supported host-to-bpmp commands. May
* increase in future
*/
@@ -1493,7 +1774,7 @@ enum mrq_thermal_bpmp_to_host_cmd {
*/
struct cmd_thermal_query_abi_request {
uint32_t type;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* Host->BPMP request data for request type CMD_THERMAL_GET_TEMP
@@ -1502,7 +1783,7 @@ struct cmd_thermal_query_abi_request {
*/
struct cmd_thermal_get_temp_request {
uint32_t zone;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host reply data for request CMD_THERMAL_GET_TEMP
@@ -1515,7 +1796,7 @@ struct cmd_thermal_get_temp_request {
*/
struct cmd_thermal_get_temp_response {
int32_t temp;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* Host->BPMP request data for request type CMD_THERMAL_SET_TRIP
@@ -1530,7 +1811,7 @@ struct cmd_thermal_set_trip_request {
int32_t low;
int32_t high;
uint32_t enabled;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host request data for request type CMD_THERMAL_HOST_TRIP_REACHED
@@ -1539,7 +1820,7 @@ struct cmd_thermal_set_trip_request {
*/
struct cmd_thermal_host_trip_reached_request {
uint32_t zone;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host reply data for request type CMD_THERMAL_GET_NUM_ZONES
@@ -1549,7 +1830,25 @@ struct cmd_thermal_host_trip_reached_request {
*/
struct cmd_thermal_get_num_zones_response {
uint32_t num;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_GET_THERMTRIP
+ *
+ * zone: Number of thermal zone.
+ */
+struct cmd_thermal_get_thermtrip_request {
+ uint32_t zone;
+} BPMP_ABI_PACKED;
+
+/*
+ * BPMP->Host reply data for request CMD_THERMAL_GET_THERMTRIP
+ *
+ * thermtrip: HW shutdown temperature in millicelsius.
+ */
+struct cmd_thermal_get_thermtrip_response {
+ int32_t thermtrip;
+} BPMP_ABI_PACKED;
/*
* Host->BPMP request data.
@@ -1565,8 +1864,9 @@ struct mrq_thermal_host_to_bpmp_request {
struct cmd_thermal_query_abi_request query_abi;
struct cmd_thermal_get_temp_request get_temp;
struct cmd_thermal_set_trip_request set_trip;
- } __UNION_ANON;
-} __ABI_PACKED;
+ struct cmd_thermal_get_thermtrip_request get_thermtrip;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host request data.
@@ -1578,16 +1878,17 @@ struct mrq_thermal_bpmp_to_host_request {
uint32_t type;
union {
struct cmd_thermal_host_trip_reached_request host_trip_reached;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/*
* Data in reply to a Host->BPMP request.
*/
union mrq_thermal_bpmp_to_host_response {
struct cmd_thermal_get_temp_response get_temp;
+ struct cmd_thermal_get_thermtrip_response get_thermtrip;
struct cmd_thermal_get_num_zones_response get_num_zones;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/**
@@ -1619,7 +1920,7 @@ struct mrq_cpu_vhint_request {
uint32_t addr;
/** @brief ID of the cluster whose data is requested */
uint32_t cluster_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Description of the CPU v/f relation
@@ -1646,7 +1947,7 @@ struct cpu_vhint_data {
uint16_t vindex_div;
/** reserved for future use */
uint16_t reserved[328];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond */
/** @} */
@@ -1735,11 +2036,11 @@ struct mrq_abi_ratchet_response {
* @brief Used by @ref mrq_emc_dvfs_latency_response
*/
struct emc_dvfs_latency {
- /** @brief EMC frequency in kHz */
+ /** @brief EMC DVFS node frequency in kHz */
uint32_t freq;
/** @brief EMC DVFS latency in nanoseconds */
uint32_t latency;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
#define EMC_DVFS_LATENCY_MAX_SIZE 14
/**
@@ -1748,9 +2049,9 @@ struct emc_dvfs_latency {
struct mrq_emc_dvfs_latency_response {
/** @brief The number valid entries in #pairs */
uint32_t num_pairs;
- /** @brief EMC <frequency, latency> information */
+ /** @brief EMC DVFS node <frequency, latency> information */
struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1775,7 +2076,7 @@ struct mrq_emc_dvfs_latency_response {
struct mrq_cpu_ndiv_limits_request {
/** @brief Enum cluster_id */
uint32_t cluster_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response to #MRQ_CPU_NDIV_LIMITS
@@ -1791,7 +2092,7 @@ struct mrq_cpu_ndiv_limits_response {
uint16_t ndiv_max;
/** @brief Minimum allowed NDIV value */
uint16_t ndiv_min;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -1823,7 +2124,7 @@ struct mrq_cpu_ndiv_limits_response {
struct mrq_cpu_auto_cc3_request {
/** @brief Enum cluster_id (logical cluster id, known to CCPLEX s/w) */
uint32_t cluster_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response to #MRQ_CPU_AUTO_CC3
@@ -1837,7 +2138,7 @@ struct mrq_cpu_auto_cc3_response {
* - bit [0] if "1" auto-CC3 is allowed, if "0" auto-CC3 is not allowed
*/
uint32_t auto_cc3_config;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -1847,6 +2148,8 @@ struct mrq_cpu_auto_cc3_response {
* @def MRQ_TRACE_ITER
* @brief Manage the trace iterator
*
+ * @deprecated
+ *
* * Platforms: All
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -1868,7 +2171,7 @@ enum {
struct mrq_trace_iter_request {
/** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */
uint32_t cmd;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1944,12 +2247,12 @@ enum mrq_ringbuf_console_host_to_bpmp_cmd {
struct cmd_ringbuf_console_query_abi_req {
/** @brief Command identifier to be queried */
uint32_t cmd;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_ringbuf_console_query_abi_resp {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1960,7 +2263,7 @@ struct cmd_ringbuf_console_read_req {
* @brief Number of bytes requested to be read from the BPMP TX buffer
*/
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1971,7 +2274,7 @@ struct cmd_ringbuf_console_read_resp {
uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_READ_LEN];
/** @brief Number of bytes in cmd_ringbuf_console_read_resp::data */
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1982,7 +2285,7 @@ struct cmd_ringbuf_console_write_req {
uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_WRITE_LEN];
/** @brief Number of bytes in cmd_ringbuf_console_write_req::data */
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1993,12 +2296,12 @@ struct cmd_ringbuf_console_write_resp {
uint32_t space_avail;
/** @brief Number of bytes that were written to the BPMP RX buffer */
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_ringbuf_console_get_fifo_req {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -2013,7 +2316,7 @@ struct cmd_ringbuf_console_get_fifo_resp {
uint64_t bpmp_tx_tail_addr;
/** @brief Length of the BPMP TX buffer */
uint32_t bpmp_tx_buf_len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -2033,8 +2336,8 @@ struct mrq_ringbuf_console_host_to_bpmp_request {
struct cmd_ringbuf_console_read_req read;
struct cmd_ringbuf_console_write_req write;
struct cmd_ringbuf_console_get_fifo_req get_fifo;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -2047,7 +2350,7 @@ union mrq_ringbuf_console_bpmp_to_host_response {
struct cmd_ringbuf_console_read_resp read;
struct cmd_ringbuf_console_write_resp write;
struct cmd_ringbuf_console_get_fifo_resp get_fifo;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/**
@@ -2091,7 +2394,7 @@ struct mrq_strap_request {
uint32_t id;
/** @brief Desired value for strap (if cmd is #STRAP_SET) */
uint32_t value;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @defgroup Strap_Ids Strap Identifiers
@@ -2119,6 +2422,7 @@ enum {
CMD_UPHY_PCIE_LANE_MARGIN_STATUS = 2,
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT = 3,
CMD_UPHY_PCIE_CONTROLLER_STATE = 4,
+ CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF = 5,
CMD_UPHY_MAX,
};
@@ -2133,23 +2437,28 @@ struct cmd_uphy_margin_control_request {
uint32_t y;
/** @brief Set number of bit blocks for each margin section */
uint32_t nblks;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_margin_status_response {
/** @brief Number of errors observed */
uint32_t status;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_ep_controller_pll_init_request {
/** @brief EP controller number, valid: 0, 4, 5 */
uint8_t ep_controller;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_pcie_controller_state_request {
/** @brief PCIE controller number, valid: 0, 1, 2, 3, 4 */
uint8_t pcie_controller;
uint8_t enable;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+
+struct cmd_uphy_ep_controller_pll_off_request {
+ /** @brief EP controller number, valid: 0, 4, 5 */
+ uint8_t ep_controller;
+} BPMP_ABI_PACKED;
/**
* @ingroup UPHY
@@ -2165,6 +2474,7 @@ struct cmd_uphy_pcie_controller_state_request {
* |CMD_UPHY_PCIE_LANE_MARGIN_STATUS | |
* |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |cmd_uphy_ep_controller_pll_init_request |
* |CMD_UPHY_PCIE_CONTROLLER_STATE |cmd_uphy_pcie_controller_state_request |
+ * |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF |cmd_uphy_ep_controller_pll_off_request |
*
*/
@@ -2178,8 +2488,9 @@ struct mrq_uphy_request {
struct cmd_uphy_margin_control_request uphy_set_margin_control;
struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init;
struct cmd_uphy_pcie_controller_state_request controller_state;
- } __UNION_ANON;
-} __ABI_PACKED;
+ struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup UPHY
@@ -2199,8 +2510,8 @@ struct mrq_uphy_request {
struct mrq_uphy_response {
union {
struct cmd_uphy_margin_status_response uphy_get_margin_status;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -2250,31 +2561,31 @@ enum {
struct cmd_fmon_gear_clamp_request {
int32_t unused;
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_clamp_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_free_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_free_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_get_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_fmon_gear_get_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup FMON
@@ -2307,8 +2618,8 @@ struct mrq_fmon_request {
struct cmd_fmon_gear_free_request fmon_gear_free;
/** @private */
struct cmd_fmon_gear_get_request fmon_gear_get;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup FMON
@@ -2332,8 +2643,8 @@ struct mrq_fmon_response {
/** @private */
struct cmd_fmon_gear_free_response fmon_gear_free;
struct cmd_fmon_gear_get_response fmon_gear_get;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -2358,13 +2669,27 @@ struct mrq_fmon_response {
*/
enum {
/**
+ * @cond DEPRECATED
* @brief Retrieve specified EC status.
*
* mrq_response::err is 0 if the operation was successful, or @n
* -#BPMP_ENODEV if target EC is not owned by BPMP @n
- * -#BPMP_EACCES if target EC power domain is turned off
+ * -#BPMP_EACCES if target EC power domain is turned off @n
+ * -#BPMP_EBADCMD if subcommand is not supported
+ * @endcond
+ */
+ CMD_EC_STATUS_GET = 1, /* deprecated */
+
+ /**
+ * @brief Retrieve specified EC extended status (includes error
+ * counter and user values).
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_ENODEV if target EC is not owned by BPMP @n
+ * -#BPMP_EACCES if target EC power domain is turned off @n
+ * -#BPMP_EBADCMD if subcommand is not supported
*/
- CMD_EC_STATUS_GET = 1,
+ CMD_EC_STATUS_EX_GET = 2,
CMD_EC_NUM,
};
@@ -2420,13 +2745,13 @@ enum bpmp_ec_err_type {
/** @brief SW Correctable error
*
- * Error descriptor @ref ec_err_simple_desc.
+ * Error descriptor @ref ec_err_sw_error_desc.
*/
EC_ERR_TYPE_SW_CORRECTABLE = 16,
/** @brief SW Uncorrectable error
*
- * Error descriptor @ref ec_err_simple_desc.
+ * Error descriptor @ref ec_err_sw_error_desc.
*/
EC_ERR_TYPE_SW_UNCORRECTABLE = 17,
@@ -2446,9 +2771,9 @@ enum bpmp_ec_err_type {
/** @brief Group of registers with parity error. */
enum ec_registers_group {
/** @brief Functional registers group */
- EC_ERR_GROUP_FUNC_REG = 0,
+ EC_ERR_GROUP_FUNC_REG = 0U,
/** @brief SCR registers group */
- EC_ERR_GROUP_SCR_REG = 1,
+ EC_ERR_GROUP_SCR_REG = 1U,
};
/**
@@ -2457,11 +2782,11 @@ enum ec_registers_group {
* @{
*/
/** @brief No EC error found flag */
-#define EC_STATUS_FLAG_NO_ERROR 0x0001
+#define EC_STATUS_FLAG_NO_ERROR 0x0001U
/** @brief Last EC error found flag */
-#define EC_STATUS_FLAG_LAST_ERROR 0x0002
+#define EC_STATUS_FLAG_LAST_ERROR 0x0002U
/** @brief EC latent error flag */
-#define EC_STATUS_FLAG_LATENT_ERROR 0x0004
+#define EC_STATUS_FLAG_LATENT_ERROR 0x0004U
/** @} */
/**
@@ -2470,9 +2795,9 @@ enum ec_registers_group {
* @{
*/
/** @brief EC descriptor error resolved flag */
-#define EC_DESC_FLAG_RESOLVED 0x0001
+#define EC_DESC_FLAG_RESOLVED 0x0001U
/** @brief EC descriptor failed to retrieve id flag */
-#define EC_DESC_FLAG_NO_ID 0x0002
+#define EC_DESC_FLAG_NO_ID 0x0002U
/** @} */
/**
@@ -2489,7 +2814,7 @@ struct ec_err_fmon_desc {
uint32_t fmon_faults;
/** @brief FMON faults access error */
int32_t fmon_access_error;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* |error type | vmon_adc_id values |
@@ -2505,7 +2830,7 @@ struct ec_err_vmon_desc {
uint32_t vmon_faults;
/** @brief VMON faults access error */
int32_t vmon_access_error;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* |error type | reg_id values |
@@ -2519,7 +2844,22 @@ struct ec_err_reg_parity_desc {
uint16_t reg_id;
/** @brief Register group @ref ec_registers_group */
uint16_t reg_group;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+
+/**
+ * |error type | err_source_id values |
+ * |--------------------------------- |--------------------------|
+ * |@ref EC_ERR_TYPE_SW_CORRECTABLE | @ref bpmp_ec_ce_swd_ids |
+ * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE | @ref bpmp_ec_ue_swd_ids |
+ */
+struct ec_err_sw_error_desc {
+ /** @brief Bitmask of @ref bpmp_ec_desc_flags */
+ uint16_t desc_flags;
+ /** @brief Error source id */
+ uint16_t err_source_id;
+ /** @brief Sw error data */
+ uint32_t sw_error_data;
+} BPMP_ABI_PACKED;
/**
* |error type | err_source_id values |
@@ -2529,34 +2869,36 @@ struct ec_err_reg_parity_desc {
* |@ref EC_ERR_TYPE_ECC_DED_INTERNAL |@ref bpmp_ec_ipath_ids |
* |@ref EC_ERR_TYPE_COMPARATOR |@ref bpmp_ec_comparator_ids|
* |@ref EC_ERR_TYPE_PARITY_SRAM |@ref bpmp_clock_ids |
- * |@ref EC_ERR_TYPE_SW_CORRECTABLE |@ref bpmp_ec_misc_ids |
- * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE |@ref bpmp_ec_misc_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
*/
struct ec_err_simple_desc {
/** @brief Bitmask of @ref bpmp_ec_desc_flags */
uint16_t desc_flags;
/** @brief Error source id. Id space depends on error type. */
uint16_t err_source_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @brief Union of EC error descriptors */
union ec_err_desc {
struct ec_err_fmon_desc fmon_desc;
struct ec_err_vmon_desc vmon_desc;
struct ec_err_reg_parity_desc reg_parity_desc;
+ struct ec_err_sw_error_desc sw_error_desc;
struct ec_err_simple_desc simple_desc;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_ec_status_get_request {
/** @brief HSM error line number that identifies target EC. */
uint32_t ec_hsm_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** EC status maximum number of descriptors */
-#define EC_ERR_STATUS_DESC_MAX_NUM 4
+#define EC_ERR_STATUS_DESC_MAX_NUM 4U
+/**
+ * @cond DEPRECATED
+ */
struct cmd_ec_status_get_response {
/** @brief Target EC id (the same id received with request). */
uint32_t ec_hsm_id;
@@ -2574,7 +2916,33 @@ struct cmd_ec_status_get_response {
uint32_t error_desc_num;
/** @brief EC error descriptors */
union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+/** @endcond */
+
+struct cmd_ec_status_ex_get_response {
+ /** @brief Target EC id (the same id received with request). */
+ uint32_t ec_hsm_id;
+ /**
+ * @brief Bitmask of @ref bpmp_ec_status_flags
+ *
+ * If NO_ERROR flag is set, error_ fields should be ignored
+ */
+ uint32_t ec_status_flags;
+ /** @brief Found EC error index. */
+ uint32_t error_idx;
+ /** @brief Found EC error type @ref bpmp_ec_err_type. */
+ uint32_t error_type;
+ /** @brief Found EC mission error counter value */
+ uint32_t error_counter;
+ /** @brief Found EC mission error user value */
+ uint32_t error_uval;
+ /** @brief Reserved entry */
+ uint32_t reserved;
+ /** @brief Number of returned EC error descriptors */
+ uint32_t error_desc_num;
+ /** @brief EC error descriptors */
+ union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
+} BPMP_ABI_PACKED;
/**
* @ingroup EC
@@ -2583,9 +2951,15 @@ struct cmd_ec_status_get_response {
* Used by the sender of an #MRQ_EC message to access ECs owned
* by BPMP.
*
+ * @cond DEPRECATED
* |sub-command |payload |
* |----------------------------|-----------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
+ * @endcond
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------|
+ * |@ref CMD_EC_STATUS_EX_GET |ec_status_get |
*
*/
@@ -2595,8 +2969,8 @@ struct mrq_ec_request {
union {
struct cmd_ec_status_get_request ec_status_get;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup EC
@@ -2605,49 +2979,28 @@ struct mrq_ec_request {
* Each sub-command supported by @ref mrq_ec_request may return
* sub-command-specific data as indicated below.
*
+ * @cond DEPRECATED
* |sub-command |payload |
* |----------------------------|------------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
+ * @endcond
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------|
+ * |@ref CMD_EC_STATUS_EX_GET |ec_status_ex_get |
*
*/
struct mrq_ec_response {
union {
+ /**
+ * @cond DEPRECATED
+ */
struct cmd_ec_status_get_response ec_status_get;
- } __UNION_ANON;
-} __ABI_PACKED;
-
-/** @} */
-/** @endcond */
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_FBVOLT_STATUS
- * @brief Provides status information about voltage state for fuse burning
- *
- * * Platforms: T194 onwards
- * @cond bpmp_t194
- * * Initiators: CCPLEX
- * * Target: BPMP
- * * Request Payload: None
- * * Response Payload: @ref mrq_fbvolt_status_response
- * @{
- */
-
-/**
- * @ingroup Fbvolt_status
- * @brief Response to #MRQ_FBVOLT_STATUS
- *
- * Value of #ready reflects if core voltages are in a suitable state for buring
- * fuses. A value of 0x1 indicates that core voltages are ready for burning
- * fuses. A value of 0x0 indicates that core voltages are not ready.
- */
-struct mrq_fbvolt_status_response {
- /** @brief Bit [0:0] - ready status, bits [31:1] - reserved */
- uint32_t ready;
- /** @brief Reserved */
- uint32_t unused;
-} __ABI_PACKED;
+ /** @endcond */
+ struct cmd_ec_status_ex_get_response ec_status_ex_get;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -2660,6 +3013,8 @@ struct mrq_fbvolt_status_response {
* @{
*/
+/** @brief Operation not permitted */
+#define BPMP_EPERM 1
/** @brief No such file or directory */
#define BPMP_ENOENT 2
/** @brief No MRQ handler */
@@ -2668,12 +3023,16 @@ struct mrq_fbvolt_status_response {
#define BPMP_EIO 5
/** @brief Bad sub-MRQ command */
#define BPMP_EBADCMD 6
+/** @brief Resource temporarily unavailable */
+#define BPMP_EAGAIN 11
/** @brief Not enough memory */
#define BPMP_ENOMEM 12
/** @brief Permission denied */
#define BPMP_EACCES 13
/** @brief Bad address */
#define BPMP_EFAULT 14
+/** @brief Resource busy */
+#define BPMP_EBUSY 16
/** @brief No such device */
#define BPMP_ENODEV 19
/** @brief Argument is a directory */
@@ -2685,10 +3044,18 @@ struct mrq_fbvolt_status_response {
/** @brief Out of range */
#define BPMP_ERANGE 34
/** @brief Function not implemented */
-#define BPMP_ENOSYS 38
+#define BPMP_ENOSYS 38
/** @brief Invalid slot */
#define BPMP_EBADSLT 57
+/** @brief Not supported */
+#define BPMP_ENOTSUP 134
+/** @brief No such device or address */
+#define BPMP_ENXIO 140
/** @} */
+#if defined(BPMP_ABI_CHECKS)
+#include "bpmp_abi_checks.h"
+#endif
+
#endif
diff --git a/include/soc/tegra/common.h b/include/soc/tegra/common.h
index 98027a76ce3d..8ec1ac07fc85 100644
--- a/include/soc/tegra/common.h
+++ b/include/soc/tegra/common.h
@@ -6,6 +6,52 @@
#ifndef __SOC_TEGRA_COMMON_H__
#define __SOC_TEGRA_COMMON_H__
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * Tegra SoC core device OPP table configuration
+ *
+ * @init_state: pre-initialize OPP state of a device
+ */
+struct tegra_core_opp_params {
+ bool init_state;
+};
+
+#ifdef CONFIG_ARCH_TEGRA
bool soc_is_tegra(void);
+int devm_tegra_core_dev_init_opp_table(struct device *dev,
+ struct tegra_core_opp_params *params);
+#else
+static inline bool soc_is_tegra(void)
+{
+ return false;
+}
+
+static inline int
+devm_tegra_core_dev_init_opp_table(struct device *dev,
+ struct tegra_core_opp_params *params)
+{
+ return -ENODEV;
+}
+#endif
+
+static inline int
+devm_tegra_core_dev_init_opp_table_common(struct device *dev)
+{
+ struct tegra_core_opp_params opp_params = {};
+ int err;
+
+ opp_params.init_state = true;
+
+ err = devm_tegra_core_dev_init_opp_table(dev, &opp_params);
+ if (err != -ENODEV)
+ return err;
+
+ return 0;
+}
+
#endif /* __SOC_TEGRA_COMMON_H__ */
diff --git a/include/soc/tegra/cpuidle.h b/include/soc/tegra/cpuidle.h
index 029ba1f4b2cc..5665975015d8 100644
--- a/include/soc/tegra/cpuidle.h
+++ b/include/soc/tegra/cpuidle.h
@@ -6,7 +6,7 @@
#ifndef __SOC_TEGRA_CPUIDLE_H__
#define __SOC_TEGRA_CPUIDLE_H__
-#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_CPU_IDLE)
+#ifdef CONFIG_ARM_TEGRA_CPUIDLE
void tegra_cpuidle_pcie_irqs_in_use(void);
#else
static inline void tegra_cpuidle_pcie_irqs_in_use(void)
diff --git a/include/soc/tegra/emc.h b/include/soc/tegra/emc.h
deleted file mode 100644
index 05199a97ccf4..000000000000
--- a/include/soc/tegra/emc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014 NVIDIA Corporation. All rights reserved.
- */
-
-#ifndef __SOC_TEGRA_EMC_H__
-#define __SOC_TEGRA_EMC_H__
-
-struct tegra_emc;
-
-int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
- unsigned long rate);
-void tegra_emc_complete_timing_change(struct tegra_emc *emc,
- unsigned long rate);
-
-#endif /* __SOC_TEGRA_EMC_H__ */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 252ea20fe4c1..977c334136e9 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -6,12 +6,17 @@
#ifndef __SOC_TEGRA_FUSE_H__
#define __SOC_TEGRA_FUSE_H__
+#include <linux/types.h>
+
#define TEGRA20 0x20
#define TEGRA30 0x30
#define TEGRA114 0x35
#define TEGRA124 0x40
#define TEGRA132 0x13
#define TEGRA210 0x21
+#define TEGRA186 0x18
+#define TEGRA194 0x19
+#define TEGRA234 0x23
#define TEGRA_FUSE_SKU_CALIB_0 0xf0
#define TEGRA30_FUSE_SATA_CALIB 0x124
@@ -19,9 +24,6 @@
#ifndef __ASSEMBLY__
-u32 tegra_read_chipid(void);
-u8 tegra_get_chip_id(void);
-
enum tegra_revision {
TEGRA_REVISION_UNKNOWN = 0,
TEGRA_REVISION_A01,
@@ -47,11 +49,59 @@ struct tegra_sku_info {
enum tegra_revision revision;
};
+#ifdef CONFIG_ARCH_TEGRA
+extern struct tegra_sku_info tegra_sku_info;
u32 tegra_read_straps(void);
u32 tegra_read_ram_code(void);
int tegra_fuse_readl(unsigned long offset, u32 *value);
+u32 tegra_read_chipid(void);
+u8 tegra_get_chip_id(void);
+u8 tegra_get_platform(void);
+bool tegra_is_silicon(void);
+int tegra194_miscreg_mask_serror(void);
+#else
+static struct tegra_sku_info tegra_sku_info __maybe_unused;
-extern struct tegra_sku_info tegra_sku_info;
+static inline u32 tegra_read_straps(void)
+{
+ return 0;
+}
+
+static inline u32 tegra_read_ram_code(void)
+{
+ return 0;
+}
+
+static inline int tegra_fuse_readl(unsigned long offset, u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline u32 tegra_read_chipid(void)
+{
+ return 0;
+}
+
+static inline u8 tegra_get_chip_id(void)
+{
+ return 0;
+}
+
+static inline u8 tegra_get_platform(void)
+{
+ return 0;
+}
+
+static inline bool tegra_is_silicon(void)
+{
+ return false;
+}
+
+static inline int tegra194_miscreg_mask_serror(void)
+{
+ return false;
+}
+#endif
struct device *tegra_soc_device_register(void);
diff --git a/include/soc/tegra/irq.h b/include/soc/tegra/irq.h
new file mode 100644
index 000000000000..94539551c8c1
--- /dev/null
+++ b/include/soc/tegra/irq.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012, NVIDIA Corporation. All rights reserved.
+ */
+
+#ifndef __SOC_TEGRA_IRQ_H
+#define __SOC_TEGRA_IRQ_H
+
+#include <linux/types.h>
+
+#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA)
+bool tegra_pending_sgi(void);
+#else
+static inline bool tegra_pending_sgi(void)
+{
+ return false;
+}
+#endif
+
+#endif /* __SOC_TEGRA_IRQ_H */
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 1238e35653d1..47ce6d434427 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -6,7 +6,11 @@
#ifndef __SOC_TEGRA_MC_H__
#define __SOC_TEGRA_MC_H__
+#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/irq.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
@@ -14,34 +18,48 @@ struct clk;
struct device;
struct page;
-struct tegra_smmu_enable {
- unsigned int reg;
- unsigned int bit;
-};
-
struct tegra_mc_timing {
unsigned long rate;
u32 *emem_data;
};
-/* latency allowance */
-struct tegra_mc_la {
- unsigned int reg;
- unsigned int shift;
- unsigned int mask;
- unsigned int def;
-};
-
struct tegra_mc_client {
unsigned int id;
const char *name;
- unsigned int swgroup;
+ /*
+ * For Tegra210 and earlier, this is the SWGROUP ID used for IOVA translations in the
+ * Tegra SMMU, whereas on Tegra186 and later this is the ID used to override the ARM SMMU
+ * stream ID used for IOVA translations for the given memory client.
+ */
+ union {
+ unsigned int swgroup;
+ unsigned int sid;
+ };
unsigned int fifo_size;
- struct tegra_smmu_enable smmu;
- struct tegra_mc_la la;
+ struct {
+ /* Tegra SMMU enable (Tegra210 and earlier) */
+ struct {
+ unsigned int reg;
+ unsigned int bit;
+ } smmu;
+
+ /* latency allowance */
+ struct {
+ unsigned int reg;
+ unsigned int shift;
+ unsigned int mask;
+ unsigned int def;
+ } la;
+
+ /* stream ID overrides (Tegra186 and later) */
+ struct {
+ unsigned int override;
+ unsigned int security;
+ } sid;
+ } regs;
};
struct tegra_smmu_swgroup {
@@ -141,6 +159,30 @@ struct tegra_mc_reset_ops {
const struct tegra_mc_reset *rst);
};
+#define TEGRA_MC_ICC_TAG_DEFAULT 0
+#define TEGRA_MC_ICC_TAG_ISO BIT(0)
+
+struct tegra_mc_icc_ops {
+ int (*set)(struct icc_node *src, struct icc_node *dst);
+ int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+ struct icc_node_data *(*xlate_extended)(struct of_phandle_args *spec,
+ void *data);
+};
+
+struct tegra_mc_ops {
+ /*
+ * @probe: Callback to set up SoC-specific bits of the memory controller. This is called
+ * after basic, common set up that is done by the SoC-agnostic bits.
+ */
+ int (*probe)(struct tegra_mc *mc);
+ void (*remove)(struct tegra_mc *mc);
+ int (*suspend)(struct tegra_mc *mc);
+ int (*resume)(struct tegra_mc *mc);
+ irqreturn_t (*handle_irq)(int irq, void *data);
+ int (*probe_device)(struct tegra_mc *mc, struct device *dev);
+};
+
struct tegra_mc_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
@@ -151,15 +193,22 @@ struct tegra_mc_soc {
unsigned int num_address_bits;
unsigned int atom_size;
- u8 client_id_mask;
+ u16 client_id_mask;
+ u8 num_channels;
const struct tegra_smmu_soc *smmu;
u32 intmask;
+ u32 ch_intmask;
+ u32 global_intstatus_channel_shift;
+ bool has_addr_hi_reg;
const struct tegra_mc_reset_ops *reset_ops;
const struct tegra_mc_reset *resets;
unsigned int num_resets;
+
+ const struct tegra_mc_icc_ops *icc_ops;
+ const struct tegra_mc_ops *ops;
};
struct tegra_mc {
@@ -167,6 +216,8 @@ struct tegra_mc {
struct tegra_smmu *smmu;
struct gart_device *gart;
void __iomem *regs;
+ void __iomem *bcast_ch_regs;
+ void __iomem **ch_regs;
struct clk *clk;
int irq;
@@ -178,10 +229,33 @@ struct tegra_mc {
struct reset_controller_dev reset;
+ struct icc_provider provider;
+
spinlock_t lock;
+
+ struct {
+ struct dentry *root;
+ } debugfs;
};
int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate);
unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc);
+#ifdef CONFIG_TEGRA_MC
+struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev);
+int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev);
+#else
+static inline struct tegra_mc *
+devm_tegra_memory_controller_get(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int
+tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* __SOC_TEGRA_MC_H__ */
diff --git a/include/soc/tegra/pm.h b/include/soc/tegra/pm.h
index 951fcd738d55..ce4d0b1bd0d6 100644
--- a/include/soc/tegra/pm.h
+++ b/include/soc/tegra/pm.h
@@ -6,20 +6,30 @@
#ifndef __SOC_TEGRA_PM_H__
#define __SOC_TEGRA_PM_H__
+#include <linux/errno.h>
+
enum tegra_suspend_mode {
TEGRA_SUSPEND_NONE = 0,
TEGRA_SUSPEND_LP2, /* CPU voltage off */
TEGRA_SUSPEND_LP1, /* CPU voltage off, DRAM self-refresh */
TEGRA_SUSPEND_LP0, /* CPU + core voltage off, DRAM self-refresh */
TEGRA_MAX_SUSPEND_MODE,
+ TEGRA_SUSPEND_NOT_READY,
};
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA)
enum tegra_suspend_mode
tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode);
/* low-level resume entry point */
void tegra_resume(void);
+
+int tegra30_pm_secondary_cpu_suspend(unsigned long arg);
+void tegra_pm_clear_cpu_in_lp2(void);
+void tegra_pm_set_cpu_in_lp2(void);
+int tegra_pm_enter_lp2(void);
+int tegra_pm_park_secondary_cpu(unsigned long cpu);
+void tegra_pm_init_suspend(void);
#else
static inline enum tegra_suspend_mode
tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode)
@@ -30,6 +40,33 @@ tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode)
static inline void tegra_resume(void)
{
}
+
+static inline int tegra30_pm_secondary_cpu_suspend(unsigned long arg)
+{
+ return -ENOTSUPP;
+}
+
+static inline void tegra_pm_clear_cpu_in_lp2(void)
+{
+}
+
+static inline void tegra_pm_set_cpu_in_lp2(void)
+{
+}
+
+static inline int tegra_pm_enter_lp2(void)
+{
+ return -ENOTSUPP;
+}
+
+static inline int tegra_pm_park_secondary_cpu(unsigned long cpu)
+{
+ return -ENOTSUPP;
+}
+
+static inline void tegra_pm_init_suspend(void)
+{
+}
#endif /* CONFIG_PM_SLEEP */
#endif /* __SOC_TEGRA_PM_H__ */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 57e58faf660b..d186bccd125d 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -113,8 +113,9 @@ enum tegra_io_pad {
TEGRA_IO_PAD_PEX_CLK_BIAS,
TEGRA_IO_PAD_PEX_CLK1,
TEGRA_IO_PAD_PEX_CLK2,
- TEGRA_IO_PAD_PEX_CLK2_BIAS,
TEGRA_IO_PAD_PEX_CLK3,
+ TEGRA_IO_PAD_PEX_CLK_2_BIAS,
+ TEGRA_IO_PAD_PEX_CLK_2,
TEGRA_IO_PAD_PEX_CNTRL,
TEGRA_IO_PAD_PEX_CTL2,
TEGRA_IO_PAD_PEX_L0_RST_N,
@@ -167,10 +168,11 @@ int tegra_io_pad_power_disable(enum tegra_io_pad id);
int tegra_io_rail_power_on(unsigned int id);
int tegra_io_rail_power_off(unsigned int id);
-enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void);
void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
+bool tegra_pmc_core_domain_state_synced(void);
+
#else
static inline int tegra_powergate_power_on(unsigned int id)
{
@@ -219,19 +221,28 @@ static inline int tegra_io_rail_power_off(unsigned int id)
return -ENOSYS;
}
-static inline enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
+static inline void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
{
- return TEGRA_SUSPEND_NONE;
}
-static inline void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
+static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
{
}
-static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
+static inline bool tegra_pmc_core_domain_state_synced(void)
{
+ return false;
}
#endif /* CONFIG_SOC_TEGRA_PMC */
+#if defined(CONFIG_SOC_TEGRA_PMC) && defined(CONFIG_PM_SLEEP)
+enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void);
+#else
+static inline enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
+{
+ return TEGRA_SUSPEND_NONE;
+}
+#endif
+
#endif /* __SOC_TEGRA_PMC_H__ */
diff --git a/include/soc/tegra/tegra-cbb.h b/include/soc/tegra/tegra-cbb.h
new file mode 100644
index 000000000000..e864c2ebe794
--- /dev/null
+++ b/include/soc/tegra/tegra-cbb.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ */
+
+#ifndef TEGRA_CBB_H
+#define TEGRA_CBB_H
+
+#include <linux/list.h>
+
+struct tegra_cbb_error {
+ const char *code;
+ const char *source;
+ const char *desc;
+};
+
+struct tegra_cbb {
+ struct device *dev;
+ const struct tegra_cbb_ops *ops;
+ struct list_head node;
+};
+
+struct tegra_cbb_ops {
+ int (*debugfs_show)(struct tegra_cbb *cbb, struct seq_file *s, void *v);
+ int (*interrupt_enable)(struct tegra_cbb *cbb);
+ void (*error_enable)(struct tegra_cbb *cbb);
+ void (*fault_enable)(struct tegra_cbb *cbb);
+ void (*stall_enable)(struct tegra_cbb *cbb);
+ void (*error_clear)(struct tegra_cbb *cbb);
+ u32 (*get_status)(struct tegra_cbb *cbb);
+};
+
+int tegra_cbb_get_irq(struct platform_device *pdev, unsigned int *nonsec_irq,
+ unsigned int *sec_irq);
+__printf(2, 3)
+void tegra_cbb_print_err(struct seq_file *file, const char *fmt, ...);
+
+void tegra_cbb_print_cache(struct seq_file *file, u32 cache);
+void tegra_cbb_print_prot(struct seq_file *file, u32 prot);
+int tegra_cbb_register(struct tegra_cbb *cbb);
+
+void tegra_cbb_fault_enable(struct tegra_cbb *cbb);
+void tegra_cbb_stall_enable(struct tegra_cbb *cbb);
+void tegra_cbb_error_clear(struct tegra_cbb *cbb);
+u32 tegra_cbb_get_status(struct tegra_cbb *cbb);
+
+#endif /* TEGRA_CBB_H */
diff --git a/include/sound/acp62_chip_offset_byte.h b/include/sound/acp62_chip_offset_byte.h
new file mode 100644
index 000000000000..f03992f81168
--- /dev/null
+++ b/include/sound/acp62_chip_offset_byte.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ACP 6.2 Register Documentation
+ *
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#ifndef _acp_ip_OFFSET_HEADER
+#define _acp_ip_OFFSET_HEADER
+
+/* Registers from ACP_DMA block */
+#define ACP_DMA_CNTL_0 0x0000000
+#define ACP_DMA_CNTL_1 0x0000004
+#define ACP_DMA_CNTL_2 0x0000008
+#define ACP_DMA_CNTL_3 0x000000C
+#define ACP_DMA_CNTL_4 0x0000010
+#define ACP_DMA_CNTL_5 0x0000014
+#define ACP_DMA_CNTL_6 0x0000018
+#define ACP_DMA_CNTL_7 0x000001C
+#define ACP_DMA_DSCR_STRT_IDX_0 0x0000020
+#define ACP_DMA_DSCR_STRT_IDX_1 0x0000024
+#define ACP_DMA_DSCR_STRT_IDX_2 0x0000028
+#define ACP_DMA_DSCR_STRT_IDX_3 0x000002C
+#define ACP_DMA_DSCR_STRT_IDX_4 0x0000030
+#define ACP_DMA_DSCR_STRT_IDX_5 0x0000034
+#define ACP_DMA_DSCR_STRT_IDX_6 0x0000038
+#define ACP_DMA_DSCR_STRT_IDX_7 0x000003C
+#define ACP_DMA_DSCR_CNT_0 0x0000040
+#define ACP_DMA_DSCR_CNT_1 0x0000044
+#define ACP_DMA_DSCR_CNT_2 0x0000048
+#define ACP_DMA_DSCR_CNT_3 0x000004C
+#define ACP_DMA_DSCR_CNT_4 0x0000050
+#define ACP_DMA_DSCR_CNT_5 0x0000054
+#define ACP_DMA_DSCR_CNT_6 0x0000058
+#define ACP_DMA_DSCR_CNT_7 0x000005C
+#define ACP_DMA_PRIO_0 0x0000060
+#define ACP_DMA_PRIO_1 0x0000064
+#define ACP_DMA_PRIO_2 0x0000068
+#define ACP_DMA_PRIO_3 0x000006C
+#define ACP_DMA_PRIO_4 0x0000070
+#define ACP_DMA_PRIO_5 0x0000074
+#define ACP_DMA_PRIO_6 0x0000078
+#define ACP_DMA_PRIO_7 0x000007C
+#define ACP_DMA_CUR_DSCR_0 0x0000080
+#define ACP_DMA_CUR_DSCR_1 0x0000084
+#define ACP_DMA_CUR_DSCR_2 0x0000088
+#define ACP_DMA_CUR_DSCR_3 0x000008C
+#define ACP_DMA_CUR_DSCR_4 0x0000090
+#define ACP_DMA_CUR_DSCR_5 0x0000094
+#define ACP_DMA_CUR_DSCR_6 0x0000098
+#define ACP_DMA_CUR_DSCR_7 0x000009C
+#define ACP_DMA_CUR_TRANS_CNT_0 0x00000A0
+#define ACP_DMA_CUR_TRANS_CNT_1 0x00000A4
+#define ACP_DMA_CUR_TRANS_CNT_2 0x00000A8
+#define ACP_DMA_CUR_TRANS_CNT_3 0x00000AC
+#define ACP_DMA_CUR_TRANS_CNT_4 0x00000B0
+#define ACP_DMA_CUR_TRANS_CNT_5 0x00000B4
+#define ACP_DMA_CUR_TRANS_CNT_6 0x00000B8
+#define ACP_DMA_CUR_TRANS_CNT_7 0x00000BC
+#define ACP_DMA_ERR_STS_0 0x00000C0
+#define ACP_DMA_ERR_STS_1 0x00000C4
+#define ACP_DMA_ERR_STS_2 0x00000C8
+#define ACP_DMA_ERR_STS_3 0x00000CC
+#define ACP_DMA_ERR_STS_4 0x00000D0
+#define ACP_DMA_ERR_STS_5 0x00000D4
+#define ACP_DMA_ERR_STS_6 0x00000D8
+#define ACP_DMA_ERR_STS_7 0x00000DC
+#define ACP_DMA_DESC_BASE_ADDR 0x00000E0
+#define ACP_DMA_DESC_MAX_NUM_DSCR 0x00000E4
+#define ACP_DMA_CH_STS 0x00000E8
+#define ACP_DMA_CH_GROUP 0x00000EC
+#define ACP_DMA_CH_RST_STS 0x00000F0
+
+/* Registers from ACP_AXI2AXIATU block */
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1 0x0000C00
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_1 0x0000C04
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2 0x0000C08
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_2 0x0000C0C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3 0x0000C10
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_3 0x0000C14
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4 0x0000C18
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_4 0x0000C1C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5 0x0000C20
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_5 0x0000C24
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6 0x0000C28
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_6 0x0000C2C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7 0x0000C30
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_7 0x0000C34
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8 0x0000C38
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8 0x0000C3C
+#define ACPAXI2AXI_ATU_CTRL 0x0000C40
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_9 0x0000C44
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_9 0x0000C48
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_10 0x0000C4C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_10 0x0000C50
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_11 0x0000C54
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_11 0x0000C58
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_12 0x0000C5C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_12 0x0000C60
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_13 0x0000C64
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_13 0x0000C68
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_14 0x0000C6C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_14 0x0000C70
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_15 0x0000C74
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_15 0x0000C78
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_16 0x0000C7C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_16 0x0000C80
+
+/* Registers from ACP_CLKRST block */
+#define ACP_SOFT_RESET 0x0001000
+#define ACP_CONTROL 0x0001004
+#define ACP_STATUS 0x0001008
+#define ACP_DYNAMIC_CG_MASTER_CONTROL 0x0001010
+#define ACP_ZSC_DSP_CTRL 0x0001014
+#define ACP_ZSC_STS 0x0001018
+#define ACP_PGFSM_CONTROL 0x0001024
+#define ACP_PGFSM_STATUS 0x0001028
+#define ACP_CLKMUX_SEL 0x000102C
+
+/* Registers from ACP_AON block */
+#define ACP_PME_EN 0x0001400
+#define ACP_DEVICE_STATE 0x0001404
+#define AZ_DEVICE_STATE 0x0001408
+#define ACP_PIN_CONFIG 0x0001440
+#define ACP_PAD_PULLUP_CTRL 0x0001444
+#define ACP_PAD_PULLDOWN_CTRL 0x0001448
+#define ACP_PAD_DRIVE_STRENGTH_CTRL 0x000144C
+#define ACP_PAD_SCHMEN_CTRL 0x0001450
+#define ACP_SW_PAD_KEEPER_EN 0x0001454
+#define ACP_SW_WAKE_EN 0x0001458
+#define ACP_I2S_WAKE_EN 0x000145C
+#define ACP_SW1_WAKE_EN 0x0001460
+
+/* Registers from ACP_P1_MISC block */
+#define ACP_EXTERNAL_INTR_ENB 0x0001A00
+#define ACP_EXTERNAL_INTR_CNTL 0x0001A04
+#define ACP_EXTERNAL_INTR_CNTL1 0x0001A08
+#define ACP_EXTERNAL_INTR_STAT 0x0001A0C
+#define ACP_EXTERNAL_INTR_STAT1 0x0001A10
+#define ACP_ERROR_STATUS 0x0001A4C
+#define ACP_P1_SW_I2S_ERROR_REASON 0x0001A50
+#define ACP_P1_SW_POS_TRACK_I2S_TX_CTRL 0x0001A6C
+#define ACP_P1_SW_I2S_TX_DMA_POS 0x0001A70
+#define ACP_P1_SW_POS_TRACK_I2S_RX_CTRL 0x0001A74
+#define ACP_P1_SW_I2S_RX_DMA_POS 0x0001A78
+#define ACP_P1_DMIC_I2S_GPIO_INTR_CTRL 0x0001A7C
+#define ACP_P1_DMIC_I2S_GPIO_INTR_STATUS 0x0001A80
+#define ACP_SCRATCH_REG_BASE_ADDR 0x0001A84
+#define ACP_P1_SW_POS_TRACK_BT_TX_CTRL 0x0001A88
+#define ACP_P1_SW_BT_TX_DMA_POS 0x0001A8C
+#define ACP_P1_SW_POS_TRACK_HS_TX_CTRL 0x0001A90
+#define ACP_P1_SW_HS_TX_DMA_POS 0x0001A94
+#define ACP_P1_SW_POS_TRACK_BT_RX_CTRL 0x0001A98
+#define ACP_P1_SW_BT_RX_DMA_POS 0x0001A9C
+#define ACP_P1_SW_POS_TRACK_HS_RX_CTRL 0x0001AA0
+#define ACP_P1_SW_HS_RX_DMA_POS 0x0001AA4
+
+/* Registers from ACP_AUDIO_BUFFERS block */
+#define ACP_I2S_RX_RINGBUFADDR 0x0002000
+#define ACP_I2S_RX_RINGBUFSIZE 0x0002004
+#define ACP_I2S_RX_LINKPOSITIONCNTR 0x0002008
+#define ACP_I2S_RX_FIFOADDR 0x000200C
+#define ACP_I2S_RX_FIFOSIZE 0x0002010
+#define ACP_I2S_RX_DMA_SIZE 0x0002014
+#define ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0002018
+#define ACP_I2S_RX_LINEARPOSITIONCNTR_LOW 0x000201C
+#define ACP_I2S_RX_INTR_WATERMARK_SIZE 0x0002020
+#define ACP_I2S_TX_RINGBUFADDR 0x0002024
+#define ACP_I2S_TX_RINGBUFSIZE 0x0002028
+#define ACP_I2S_TX_LINKPOSITIONCNTR 0x000202C
+#define ACP_I2S_TX_FIFOADDR 0x0002030
+#define ACP_I2S_TX_FIFOSIZE 0x0002034
+#define ACP_I2S_TX_DMA_SIZE 0x0002038
+#define ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x000203C
+#define ACP_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0002040
+#define ACP_I2S_TX_INTR_WATERMARK_SIZE 0x0002044
+#define ACP_BT_RX_RINGBUFADDR 0x0002048
+#define ACP_BT_RX_RINGBUFSIZE 0x000204C
+#define ACP_BT_RX_LINKPOSITIONCNTR 0x0002050
+#define ACP_BT_RX_FIFOADDR 0x0002054
+#define ACP_BT_RX_FIFOSIZE 0x0002058
+#define ACP_BT_RX_DMA_SIZE 0x000205C
+#define ACP_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0002060
+#define ACP_BT_RX_LINEARPOSITIONCNTR_LOW 0x0002064
+#define ACP_BT_RX_INTR_WATERMARK_SIZE 0x0002068
+#define ACP_BT_TX_RINGBUFADDR 0x000206C
+#define ACP_BT_TX_RINGBUFSIZE 0x0002070
+#define ACP_BT_TX_LINKPOSITIONCNTR 0x0002074
+#define ACP_BT_TX_FIFOADDR 0x0002078
+#define ACP_BT_TX_FIFOSIZE 0x000207C
+#define ACP_BT_TX_DMA_SIZE 0x0002080
+#define ACP_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0002084
+#define ACP_BT_TX_LINEARPOSITIONCNTR_LOW 0x0002088
+#define ACP_BT_TX_INTR_WATERMARK_SIZE 0x000208C
+#define ACP_HS_RX_RINGBUFADDR 0x0002090
+#define ACP_HS_RX_RINGBUFSIZE 0x0002094
+#define ACP_HS_RX_LINKPOSITIONCNTR 0x0002098
+#define ACP_HS_RX_FIFOADDR 0x000209C
+#define ACP_HS_RX_FIFOSIZE 0x00020A0
+#define ACP_HS_RX_DMA_SIZE 0x00020A4
+#define ACP_HS_RX_LINEARPOSITIONCNTR_HIGH 0x00020A8
+#define ACP_HS_RX_LINEARPOSITIONCNTR_LOW 0x00020AC
+#define ACP_HS_RX_INTR_WATERMARK_SIZE 0x00020B0
+#define ACP_HS_TX_RINGBUFADDR 0x00020B4
+#define ACP_HS_TX_RINGBUFSIZE 0x00020B8
+#define ACP_HS_TX_LINKPOSITIONCNTR 0x00020BC
+#define ACP_HS_TX_FIFOADDR 0x00020C0
+#define ACP_HS_TX_FIFOSIZE 0x00020C4
+#define ACP_HS_TX_DMA_SIZE 0x00020C8
+#define ACP_HS_TX_LINEARPOSITIONCNTR_HIGH 0x00020CC
+#define ACP_HS_TX_LINEARPOSITIONCNTR_LOW 0x00020D0
+#define ACP_HS_TX_INTR_WATERMARK_SIZE 0x00020D4
+
+/* Registers from ACP_I2S_TDM block */
+#define ACP_I2STDM_IER 0x0002400
+#define ACP_I2STDM_IRER 0x0002404
+#define ACP_I2STDM_RXFRMT 0x0002408
+#define ACP_I2STDM_ITER 0x000240C
+#define ACP_I2STDM_TXFRMT 0x0002410
+#define ACP_I2STDM0_MSTRCLKGEN 0x0002414
+#define ACP_I2STDM1_MSTRCLKGEN 0x0002418
+#define ACP_I2STDM2_MSTRCLKGEN 0x000241C
+#define ACP_I2STDM_REFCLKGEN 0x0002420
+
+/* Registers from ACP_BT_TDM block */
+#define ACP_BTTDM_IER 0x0002800
+#define ACP_BTTDM_IRER 0x0002804
+#define ACP_BTTDM_RXFRMT 0x0002808
+#define ACP_BTTDM_ITER 0x000280C
+#define ACP_BTTDM_TXFRMT 0x0002810
+#define ACP_HSTDM_IER 0x0002814
+#define ACP_HSTDM_IRER 0x0002818
+#define ACP_HSTDM_RXFRMT 0x000281C
+#define ACP_HSTDM_ITER 0x0002820
+#define ACP_HSTDM_TXFRMT 0x0002824
+
+/* Registers from ACP_WOV block */
+#define ACP_WOV_PDM_ENABLE 0x0002C04
+#define ACP_WOV_PDM_DMA_ENABLE 0x0002C08
+#define ACP_WOV_RX_RINGBUFADDR 0x0002C0C
+#define ACP_WOV_RX_RINGBUFSIZE 0x0002C10
+#define ACP_WOV_RX_LINKPOSITIONCNTR 0x0002C14
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH 0x0002C18
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_LOW 0x0002C1C
+#define ACP_WOV_RX_INTR_WATERMARK_SIZE 0x0002C20
+#define ACP_WOV_PDM_FIFO_FLUSH 0x0002C24
+#define ACP_WOV_PDM_NO_OF_CHANNELS 0x0002C28
+#define ACP_WOV_PDM_DECIMATION_FACTOR 0x0002C2C
+#define ACP_WOV_PDM_VAD_CTRL 0x0002C30
+#define ACP_WOV_WAKE 0x0002C54
+#define ACP_WOV_BUFFER_STATUS 0x0002C58
+#define ACP_WOV_MISC_CTRL 0x0002C5C
+#define ACP_WOV_CLK_CTRL 0x0002C60
+#define ACP_PDM_VAD_DYNAMIC_CLK_GATING_EN 0x0002C64
+#define ACP_WOV_ERROR_STATUS_REGISTER 0x0002C68
+#define ACP_PDM_CLKDIV 0x0002C6C
+
+/* Registers from ACP_P1_AUDIO_BUFFERS block */
+#define ACP_P1_I2S_RX_RINGBUFADDR 0x0003A00
+#define ACP_P1_I2S_RX_RINGBUFSIZE 0x0003A04
+#define ACP_P1_I2S_RX_LINKPOSITIONCNTR 0x0003A08
+#define ACP_P1_I2S_RX_FIFOADDR 0x0003A0C
+#define ACP_P1_I2S_RX_FIFOSIZE 0x0003A10
+#define ACP_P1_I2S_RX_DMA_SIZE 0x0003A14
+#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0003A18
+#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_LOW 0x0003A1C
+#define ACP_P1_I2S_RX_INTR_WATERMARK_SIZE 0x0003A20
+#define ACP_P1_I2S_TX_RINGBUFADDR 0x0003A24
+#define ACP_P1_I2S_TX_RINGBUFSIZE 0x0003A28
+#define ACP_P1_I2S_TX_LINKPOSITIONCNTR 0x0003A2C
+#define ACP_P1_I2S_TX_FIFOADDR 0x0003A30
+#define ACP_P1_I2S_TX_FIFOSIZE 0x0003A34
+#define ACP_P1_I2S_TX_DMA_SIZE 0x0003A38
+#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x0003A3C
+#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0003A40
+#define ACP_P1_I2S_TX_INTR_WATERMARK_SIZE 0x0003A44
+#define ACP_P1_BT_RX_RINGBUFADDR 0x0003A48
+#define ACP_P1_BT_RX_RINGBUFSIZE 0x0003A4C
+#define ACP_P1_BT_RX_LINKPOSITIONCNTR 0x0003A50
+#define ACP_P1_BT_RX_FIFOADDR 0x0003A54
+#define ACP_P1_BT_RX_FIFOSIZE 0x0003A58
+#define ACP_P1_BT_RX_DMA_SIZE 0x0003A5C
+#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0003A60
+#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_LOW 0x0003A64
+#define ACP_P1_BT_RX_INTR_WATERMARK_SIZE 0x0003A68
+#define ACP_P1_BT_TX_RINGBUFADDR 0x0003A6C
+#define ACP_P1_BT_TX_RINGBUFSIZE 0x0003A70
+#define ACP_P1_BT_TX_LINKPOSITIONCNTR 0x0003A74
+#define ACP_P1_BT_TX_FIFOADDR 0x0003A78
+#define ACP_P1_BT_TX_FIFOSIZE 0x0003A7C
+#define ACP_P1_BT_TX_DMA_SIZE 0x0003A80
+#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0003A84
+#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_LOW 0x0003A88
+#define ACP_P1_BT_TX_INTR_WATERMARK_SIZE 0x0003A8C
+#define ACP_P1_HS_RX_RINGBUFADDR 0x0003A90
+#define ACP_P1_HS_RX_RINGBUFSIZE 0x0003A94
+#define ACP_P1_HS_RX_LINKPOSITIONCNTR 0x0003A98
+#define ACP_P1_HS_RX_FIFOADDR 0x0003A9C
+#define ACP_P1_HS_RX_FIFOSIZE 0x0003AA0
+#define ACP_P1_HS_RX_DMA_SIZE 0x0003AA4
+#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_HIGH 0x0003AA8
+#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_LOW 0x0003AAC
+#define ACP_P1_HS_RX_INTR_WATERMARK_SIZE 0x0003AB0
+#define ACP_P1_HS_TX_RINGBUFADDR 0x0003AB4
+#define ACP_P1_HS_TX_RINGBUFSIZE 0x0003AB8
+#define ACP_P1_HS_TX_LINKPOSITIONCNTR 0x0003ABC
+#define ACP_P1_HS_TX_FIFOADDR 0x0003AC0
+#define ACP_P1_HS_TX_FIFOSIZE 0x0003AC4
+#define ACP_P1_HS_TX_DMA_SIZE 0x0003AC8
+#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_HIGH 0x0003ACC
+#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_LOW 0x0003AD0
+#define ACP_P1_HS_TX_INTR_WATERMARK_SIZE 0x0003AD4
+
+/* Registers from ACP_SCRATCH block */
+#define ACP_SCRATCH_REG_0 0x0010000
+#define ACP_SCRATCH_REG_1 0x0010004
+#define ACP_SCRATCH_REG_2 0x0010008
+#define ACP_SCRATCH_REG_3 0x001000C
+#define ACP_SCRATCH_REG_4 0x0010010
+#define ACP_SCRATCH_REG_5 0x0010014
+#define ACP_SCRATCH_REG_6 0x0010018
+#define ACP_SCRATCH_REG_7 0x001001C
+#define ACP_SCRATCH_REG_8 0x0010020
+#define ACP_SCRATCH_REG_9 0x0010024
+#define ACP_SCRATCH_REG_10 0x0010028
+#define ACP_SCRATCH_REG_11 0x001002C
+#define ACP_SCRATCH_REG_12 0x0010030
+#define ACP_SCRATCH_REG_13 0x0010034
+#define ACP_SCRATCH_REG_14 0x0010038
+#define ACP_SCRATCH_REG_15 0x001003C
+#define ACP_SCRATCH_REG_16 0x0010040
+#define ACP_SCRATCH_REG_17 0x0010044
+#define ACP_SCRATCH_REG_18 0x0010048
+#define ACP_SCRATCH_REG_19 0x001004C
+#define ACP_SCRATCH_REG_20 0x0010050
+#define ACP_SCRATCH_REG_21 0x0010054
+#define ACP_SCRATCH_REG_22 0x0010058
+#define ACP_SCRATCH_REG_23 0x001005C
+#define ACP_SCRATCH_REG_24 0x0010060
+#define ACP_SCRATCH_REG_25 0x0010064
+#define ACP_SCRATCH_REG_26 0x0010068
+#define ACP_SCRATCH_REG_27 0x001006C
+#define ACP_SCRATCH_REG_28 0x0010070
+#define ACP_SCRATCH_REG_29 0x0010074
+#define ACP_SCRATCH_REG_30 0x0010078
+#define ACP_SCRATCH_REG_31 0x001007C
+#define ACP_SCRATCH_REG_32 0x0010080
+#define ACP_SCRATCH_REG_33 0x0010084
+#define ACP_SCRATCH_REG_34 0x0010088
+#define ACP_SCRATCH_REG_35 0x001008C
+#define ACP_SCRATCH_REG_36 0x0010090
+#define ACP_SCRATCH_REG_37 0x0010094
+#define ACP_SCRATCH_REG_38 0x0010098
+#define ACP_SCRATCH_REG_39 0x001009C
+#define ACP_SCRATCH_REG_40 0x00100A0
+#define ACP_SCRATCH_REG_41 0x00100A4
+#define ACP_SCRATCH_REG_42 0x00100A8
+#define ACP_SCRATCH_REG_43 0x00100AC
+#define ACP_SCRATCH_REG_44 0x00100B0
+#define ACP_SCRATCH_REG_45 0x00100B4
+#define ACP_SCRATCH_REG_46 0x00100B8
+#define ACP_SCRATCH_REG_47 0x00100BC
+#define ACP_SCRATCH_REG_48 0x00100C0
+#define ACP_SCRATCH_REG_49 0x00100C4
+#define ACP_SCRATCH_REG_50 0x00100C8
+#define ACP_SCRATCH_REG_51 0x00100CC
+#define ACP_SCRATCH_REG_52 0x00100D0
+#define ACP_SCRATCH_REG_53 0x00100D4
+#define ACP_SCRATCH_REG_54 0x00100D8
+#define ACP_SCRATCH_REG_55 0x00100DC
+#define ACP_SCRATCH_REG_56 0x00100E0
+#define ACP_SCRATCH_REG_57 0x00100E4
+#define ACP_SCRATCH_REG_58 0x00100E8
+#define ACP_SCRATCH_REG_59 0x00100EC
+#define ACP_SCRATCH_REG_60 0x00100F0
+#define ACP_SCRATCH_REG_61 0x00100F4
+#define ACP_SCRATCH_REG_62 0x00100F8
+#define ACP_SCRATCH_REG_63 0x00100FC
+#define ACP_SCRATCH_REG_64 0x0010100
+#define ACP_SCRATCH_REG_65 0x0010104
+#define ACP_SCRATCH_REG_66 0x0010108
+#define ACP_SCRATCH_REG_67 0x001010C
+#define ACP_SCRATCH_REG_68 0x0010110
+#define ACP_SCRATCH_REG_69 0x0010114
+#define ACP_SCRATCH_REG_70 0x0010118
+#define ACP_SCRATCH_REG_71 0x001011C
+#define ACP_SCRATCH_REG_72 0x0010120
+#define ACP_SCRATCH_REG_73 0x0010124
+#define ACP_SCRATCH_REG_74 0x0010128
+#define ACP_SCRATCH_REG_75 0x001012C
+#define ACP_SCRATCH_REG_76 0x0010130
+#define ACP_SCRATCH_REG_77 0x0010134
+#define ACP_SCRATCH_REG_78 0x0010138
+#define ACP_SCRATCH_REG_79 0x001013C
+#define ACP_SCRATCH_REG_80 0x0010140
+#define ACP_SCRATCH_REG_81 0x0010144
+#define ACP_SCRATCH_REG_82 0x0010148
+#define ACP_SCRATCH_REG_83 0x001014C
+#define ACP_SCRATCH_REG_84 0x0010150
+#define ACP_SCRATCH_REG_85 0x0010154
+#define ACP_SCRATCH_REG_86 0x0010158
+#define ACP_SCRATCH_REG_87 0x001015C
+#define ACP_SCRATCH_REG_88 0x0010160
+#define ACP_SCRATCH_REG_89 0x0010164
+#define ACP_SCRATCH_REG_90 0x0010168
+#define ACP_SCRATCH_REG_91 0x001016C
+#define ACP_SCRATCH_REG_92 0x0010170
+#define ACP_SCRATCH_REG_93 0x0010174
+#define ACP_SCRATCH_REG_94 0x0010178
+#define ACP_SCRATCH_REG_95 0x001017C
+#define ACP_SCRATCH_REG_96 0x0010180
+#define ACP_SCRATCH_REG_97 0x0010184
+#define ACP_SCRATCH_REG_98 0x0010188
+#define ACP_SCRATCH_REG_99 0x001018C
+#define ACP_SCRATCH_REG_100 0x0010190
+#define ACP_SCRATCH_REG_101 0x0010194
+#define ACP_SCRATCH_REG_102 0x0010198
+#define ACP_SCRATCH_REG_103 0x001019C
+#define ACP_SCRATCH_REG_104 0x00101A0
+#define ACP_SCRATCH_REG_105 0x00101A4
+#define ACP_SCRATCH_REG_106 0x00101A8
+#define ACP_SCRATCH_REG_107 0x00101AC
+#define ACP_SCRATCH_REG_108 0x00101B0
+#define ACP_SCRATCH_REG_109 0x00101B4
+#define ACP_SCRATCH_REG_110 0x00101B8
+#define ACP_SCRATCH_REG_111 0x00101BC
+#define ACP_SCRATCH_REG_112 0x00101C0
+#define ACP_SCRATCH_REG_113 0x00101C4
+#define ACP_SCRATCH_REG_114 0x00101C8
+#define ACP_SCRATCH_REG_115 0x00101CC
+#define ACP_SCRATCH_REG_116 0x00101D0
+#define ACP_SCRATCH_REG_117 0x00101D4
+#define ACP_SCRATCH_REG_118 0x00101D8
+#define ACP_SCRATCH_REG_119 0x00101DC
+#define ACP_SCRATCH_REG_120 0x00101E0
+#define ACP_SCRATCH_REG_121 0x00101E4
+#define ACP_SCRATCH_REG_122 0x00101E8
+#define ACP_SCRATCH_REG_123 0x00101EC
+#define ACP_SCRATCH_REG_124 0x00101F0
+#define ACP_SCRATCH_REG_125 0x00101F4
+#define ACP_SCRATCH_REG_126 0x00101F8
+#define ACP_SCRATCH_REG_127 0x00101FC
+#define ACP_SCRATCH_REG_128 0x0010200
+#endif
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index bc88d6f964da..d91289c6f00e 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -23,7 +23,6 @@ struct snd_compr_ops;
* struct snd_compr_runtime: runtime stream description
* @state: stream state
* @ops: pointer to DSP callbacks
- * @dma_buffer_p: runtime dma buffer pointer
* @buffer: pointer to kernel buffer, valid only when not in mmap mode or
* DSP doesn't implement copy
* @buffer_size: size of the above buffer
@@ -34,11 +33,14 @@ struct snd_compr_ops;
* @total_bytes_transferred: cumulative bytes transferred by offload DSP
* @sleep: poll sleep
* @private_data: driver private data pointer
+ * @dma_area: virtual buffer address
+ * @dma_addr: physical buffer address (not accessible from main CPU)
+ * @dma_bytes: size of DMA area
+ * @dma_buffer_p: runtime dma buffer pointer
*/
struct snd_compr_runtime {
snd_pcm_state_t state;
struct snd_compr_ops *ops;
- struct snd_dma_buffer *dma_buffer_p;
void *buffer;
u64 buffer_size;
u32 fragment_size;
@@ -47,6 +49,11 @@ struct snd_compr_runtime {
u64 total_bytes_transferred;
wait_queue_head_t sleep;
void *private_data;
+
+ unsigned char *dma_area;
+ dma_addr_t dma_addr;
+ size_t dma_bytes;
+ struct snd_dma_buffer *dma_buffer_p;
};
/**
@@ -59,7 +66,10 @@ struct snd_compr_runtime {
* @direction: stream direction, playback/recording
* @metadata_set: metadata set flag, true when set
* @next_track: has userspace signal next track transition, true when set
+ * @partial_drain: undergoing partial_drain for stream, true when set
+ * @pause_in_draining: paused during draining state, true when set
* @private_data: pointer to DSP private data
+ * @dma_buffer: allocated buffer if any
*/
struct snd_compr_stream {
const char *name;
@@ -70,7 +80,10 @@ struct snd_compr_stream {
enum snd_compr_direction direction;
bool metadata_set;
bool next_track;
+ bool partial_drain;
+ bool pause_in_draining;
void *private_data;
+ struct snd_dma_buffer dma_buffer;
};
/**
@@ -131,6 +144,7 @@ struct snd_compr_ops {
* @direction: Playback or capture direction
* @lock: device lock
* @device: device id
+ * @use_pause_in_draining: allow pause in draining, true when set
*/
struct snd_compr {
const char *name;
@@ -141,6 +155,7 @@ struct snd_compr {
unsigned int direction;
struct mutex lock;
int device;
+ bool use_pause_in_draining;
#ifdef CONFIG_SND_VERBOSE_PROCFS
/* private: */
char id[64];
@@ -150,11 +165,21 @@ struct snd_compr {
};
/* compress device register APIs */
-int snd_compress_register(struct snd_compr *device);
-int snd_compress_deregister(struct snd_compr *device);
int snd_compress_new(struct snd_card *card, int device,
int type, const char *id, struct snd_compr *compr);
+/**
+ * snd_compr_use_pause_in_draining - Allow pause and resume in draining state
+ * @substream: compress substream to set
+ *
+ * Allow pause and resume in draining state.
+ * Only HW driver supports this transition can call this API.
+ */
+static inline void snd_compr_use_pause_in_draining(struct snd_compr_stream *substream)
+{
+ substream->device->use_pause_in_draining = true;
+}
+
/* dsp driver callback apis
* For playback: driver should call snd_compress_fragment_elapsed() to let the
* framework know that a fragment has been consumed from the ring buffer
@@ -173,28 +198,47 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
if (snd_BUG_ON(!stream))
return;
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ /* for partial_drain case we are back to running state on success */
+ if (stream->partial_drain) {
+ stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+ stream->partial_drain = false; /* clear this flag as well */
+ } else {
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ }
wake_up(&stream->runtime->sleep);
}
/**
* snd_compr_set_runtime_buffer - Set the Compress runtime buffer
- * @substream: compress substream to set
+ * @stream: compress stream to set
* @bufp: the buffer information, NULL to clear
*
* Copy the buffer information to runtime buffer when @bufp is non-NULL.
* Otherwise it clears the current buffer information.
*/
-static inline void snd_compr_set_runtime_buffer(
- struct snd_compr_stream *substream,
- struct snd_dma_buffer *bufp)
+static inline void
+snd_compr_set_runtime_buffer(struct snd_compr_stream *stream,
+ struct snd_dma_buffer *bufp)
{
- struct snd_compr_runtime *runtime = substream->runtime;
-
- runtime->dma_buffer_p = bufp;
+ struct snd_compr_runtime *runtime = stream->runtime;
+
+ if (bufp) {
+ runtime->dma_buffer_p = bufp;
+ runtime->dma_area = bufp->area;
+ runtime->dma_addr = bufp->addr;
+ runtime->dma_bytes = bufp->bytes;
+ } else {
+ runtime->dma_buffer_p = NULL;
+ runtime->dma_area = NULL;
+ runtime->dma_addr = 0;
+ runtime->dma_bytes = 0;
+ }
}
+int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size);
+int snd_compr_free_pages(struct snd_compr_stream *stream);
+
int snd_compr_stop_error(struct snd_compr_stream *stream,
snd_pcm_state_t state);
diff --git a/include/sound/control.h b/include/sound/control.h
index 11feeee31e35..cc3dcc6cfb0f 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -23,8 +23,8 @@ typedef int (snd_kcontrol_tlv_rw_t)(struct snd_kcontrol *kcontrol,
unsigned int __user *tlv);
/* internal flag for skipping validations */
-#ifdef CONFIG_SND_CTL_VALIDATION
-#define SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK (1 << 27)
+#ifdef CONFIG_SND_CTL_DEBUG
+#define SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK (1 << 24)
#define snd_ctl_skip_validation(info) \
((info)->access & SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK)
#else
@@ -32,6 +32,12 @@ typedef int (snd_kcontrol_tlv_rw_t)(struct snd_kcontrol *kcontrol,
#define snd_ctl_skip_validation(info) true
#endif
+/* kernel only - LED bits */
+#define SNDRV_CTL_ELEM_ACCESS_LED_SHIFT 25
+#define SNDRV_CTL_ELEM_ACCESS_LED_MASK (7<<25) /* kernel three bits - LED group */
+#define SNDRV_CTL_ELEM_ACCESS_SPK_LED (1<<25) /* kernel speaker (output) LED flag */
+#define SNDRV_CTL_ELEM_ACCESS_MIC_LED (2<<25) /* kernel microphone (input) LED flag */
+
enum {
SNDRV_CTL_TLV_OP_READ = 0,
SNDRV_CTL_TLV_OP_WRITE = 1,
@@ -42,7 +48,7 @@ struct snd_kcontrol_new {
snd_ctl_elem_iface_t iface; /* interface identifier */
unsigned int device; /* device/client number */
unsigned int subdevice; /* subdevice (substream) number */
- const unsigned char *name; /* ASCII name of item */
+ const char *name; /* ASCII name of item */
unsigned int index; /* index of item */
unsigned int access; /* access rights */
unsigned int count; /* count of same elements */
@@ -75,7 +81,7 @@ struct snd_kcontrol {
unsigned long private_value;
void *private_data;
void (*private_free)(struct snd_kcontrol *kcontrol);
- struct snd_kcontrol_volatile vd[0]; /* volatile data */
+ struct snd_kcontrol_volatile vd[]; /* volatile data */
};
#define snd_kcontrol(n) list_entry(n, struct snd_kcontrol, list)
@@ -103,11 +109,19 @@ struct snd_ctl_file {
int preferred_subdevice[SND_CTL_SUBDEV_ITEMS];
wait_queue_head_t change_sleep;
spinlock_t read_lock;
- struct fasync_struct *fasync;
+ struct snd_fasync *fasync;
int subscribed; /* read interface is activated */
struct list_head events; /* waiting events for read */
};
+struct snd_ctl_layer_ops {
+ struct snd_ctl_layer_ops *next;
+ const char *module_name;
+ void (*lregister)(struct snd_card *card);
+ void (*ldisconnect)(struct snd_card *card);
+ void (*lnotify)(struct snd_card *card, unsigned int mask, struct snd_kcontrol *kctl, unsigned int ioff);
+};
+
#define snd_ctl_file(n) list_entry(n, struct snd_ctl_file, list)
typedef int (*snd_kctl_ioctl_func_t) (struct snd_card * card,
@@ -115,6 +129,7 @@ typedef int (*snd_kctl_ioctl_func_t) (struct snd_card * card,
unsigned int cmd, unsigned long arg);
void snd_ctl_notify(struct snd_card * card, unsigned int mask, struct snd_ctl_elem_id * id);
+void snd_ctl_notify_one(struct snd_card * card, unsigned int mask, struct snd_kcontrol * kctl, unsigned int ioff);
struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new * kcontrolnew, void * private_data);
void snd_ctl_free_one(struct snd_kcontrol * kcontrol);
@@ -123,8 +138,8 @@ int snd_ctl_remove(struct snd_card * card, struct snd_kcontrol * kcontrol);
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace);
int snd_ctl_remove_id(struct snd_card * card, struct snd_ctl_elem_id *id);
int snd_ctl_rename_id(struct snd_card * card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id);
-int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id,
- int active);
+void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl, const char *name);
+int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active);
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card * card, unsigned int numid);
struct snd_kcontrol *snd_ctl_find_id(struct snd_card * card, struct snd_ctl_elem_id *id);
@@ -140,6 +155,10 @@ int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn);
#define snd_ctl_unregister_ioctl_compat(fcn)
#endif
+int snd_ctl_request_layer(const char *module_name);
+void snd_ctl_register_layer(struct snd_ctl_layer_ops *lops);
+void snd_ctl_disconnect_layer(struct snd_ctl_layer_ops *lops);
+
int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
@@ -188,20 +207,21 @@ int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
*/
struct snd_kcontrol *snd_ctl_make_virtual_master(char *name,
const unsigned int *tlv);
-int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
- unsigned int flags);
-/* optional flags for slave */
-#define SND_CTL_SLAVE_NEED_UPDATE (1 << 0)
+int _snd_ctl_add_follower(struct snd_kcontrol *master,
+ struct snd_kcontrol *follower,
+ unsigned int flags);
+/* optional flags for follower */
+#define SND_CTL_FOLLOWER_NEED_UPDATE (1 << 0)
/**
- * snd_ctl_add_slave - Add a virtual slave control
+ * snd_ctl_add_follower - Add a virtual follower control
* @master: vmaster element
- * @slave: slave element to add
+ * @follower: follower element to add
*
- * Add a virtual slave control to the given master element created via
+ * Add a virtual follower control to the given master element created via
* snd_ctl_create_virtual_master() beforehand.
*
- * All slaves must be the same type (returning the same information
+ * All followers must be the same type (returning the same information
* via info callback). The function doesn't check it, so it's your
* responsibility.
*
@@ -213,18 +233,18 @@ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
* Return: Zero if successful or a negative error code.
*/
static inline int
-snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave)
+snd_ctl_add_follower(struct snd_kcontrol *master, struct snd_kcontrol *follower)
{
- return _snd_ctl_add_slave(master, slave, 0);
+ return _snd_ctl_add_follower(master, follower, 0);
}
/**
- * snd_ctl_add_slave_uncached - Add a virtual slave control
+ * snd_ctl_add_follower_uncached - Add a virtual follower control
* @master: vmaster element
- * @slave: slave element to add
+ * @follower: follower element to add
*
- * Add a virtual slave control to the given master.
- * Unlike snd_ctl_add_slave(), the element added via this function
+ * Add a virtual follower control to the given master.
+ * Unlike snd_ctl_add_follower(), the element added via this function
* is supposed to have volatile values, and get callback is called
* at each time queried from the master.
*
@@ -235,10 +255,10 @@ snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave)
* Return: Zero if successful or a negative error code.
*/
static inline int
-snd_ctl_add_slave_uncached(struct snd_kcontrol *master,
- struct snd_kcontrol *slave)
+snd_ctl_add_follower_uncached(struct snd_kcontrol *master,
+ struct snd_kcontrol *follower)
{
- return _snd_ctl_add_slave(master, slave, SND_CTL_SLAVE_NEED_UPDATE);
+ return _snd_ctl_add_follower(master, follower, SND_CTL_FOLLOWER_NEED_UPDATE);
}
int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
@@ -246,11 +266,22 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
void *private_data);
void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
-int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
- int (*func)(struct snd_kcontrol *vslave,
- struct snd_kcontrol *slave,
- void *arg),
- void *arg);
+int snd_ctl_apply_vmaster_followers(struct snd_kcontrol *kctl,
+ int (*func)(struct snd_kcontrol *vfollower,
+ struct snd_kcontrol *follower,
+ void *arg),
+ void *arg);
+
+/*
+ * Control LED trigger layer
+ */
+#define SND_CTL_LAYER_MODULE_LED "snd-ctl-led"
+
+#if IS_MODULE(CONFIG_SND_CTL_LED)
+static inline int snd_ctl_led_request(void) { return snd_ctl_request_layer(SND_CTL_LAYER_MODULE_LED); }
+#else
+static inline int snd_ctl_led_request(void) { return 0; }
+#endif
/*
* Helper functions for jack-detection controls
diff --git a/include/sound/core.h b/include/sound/core.h
index ac8b692b69b4..4365c35d038b 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -14,6 +14,7 @@
#include <linux/pm.h> /* pm_message_t */
#include <linux/stringify.h>
#include <linux/printk.h>
+#include <linux/xarray.h>
/* number of supported soundcards */
#ifdef CONFIG_SND_DYNAMIC_MINORS
@@ -100,9 +101,14 @@ struct snd_card {
struct rw_semaphore controls_rwsem; /* controls list lock */
rwlock_t ctl_files_rwlock; /* ctl_files list lock */
int controls_count; /* count of all controls */
- int user_ctl_count; /* count of all user controls */
+ size_t user_ctl_alloc_size; // current memory allocation by user controls.
struct list_head controls; /* all controls for this card */
struct list_head ctl_files; /* active control files */
+#ifdef CONFIG_SND_CTL_FAST_LOOKUP
+ struct xarray ctl_numids; /* hash table for numids */
+ struct xarray ctl_hash; /* hash table for ctl id matching */
+ bool ctl_hash_collision; /* ctl_hash collision seen? */
+#endif
struct snd_info_entry *proc_root; /* root for soundcard specific files */
struct proc_dir_entry *proc_root_link; /* number link to real id */
@@ -117,15 +123,22 @@ struct snd_card {
struct device card_dev; /* cardX object for sysfs */
const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */
bool registered; /* card_dev is registered? */
+ bool managed; /* managed via devres */
+ bool releasing; /* during card free process */
int sync_irq; /* assigned irq, used for PCM sync */
wait_queue_head_t remove_sleep;
size_t total_pcm_alloc_bytes; /* total amount of allocated buffers */
struct mutex memory_mutex; /* protection for the above */
+#ifdef CONFIG_SND_DEBUG
+ struct dentry *debugfs_root; /* debugfs root for card */
+#endif
#ifdef CONFIG_PM
unsigned int power_state; /* power state */
+ atomic_t power_ref;
wait_queue_head_t power_sleep;
+ wait_queue_head_t power_ref_sleep;
#endif
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -139,21 +152,61 @@ struct snd_card {
#ifdef CONFIG_PM
static inline unsigned int snd_power_get_state(struct snd_card *card)
{
- return card->power_state;
+ return READ_ONCE(card->power_state);
}
static inline void snd_power_change_state(struct snd_card *card, unsigned int state)
{
- card->power_state = state;
+ WRITE_ONCE(card->power_state, state);
wake_up(&card->power_sleep);
}
+/**
+ * snd_power_ref - Take the reference count for power control
+ * @card: sound card object
+ *
+ * The power_ref reference of the card is used for managing to block
+ * the snd_power_sync_ref() operation. This function increments the reference.
+ * The counterpart snd_power_unref() has to be called appropriately later.
+ */
+static inline void snd_power_ref(struct snd_card *card)
+{
+ atomic_inc(&card->power_ref);
+}
+
+/**
+ * snd_power_unref - Release the reference count for power control
+ * @card: sound card object
+ */
+static inline void snd_power_unref(struct snd_card *card)
+{
+ if (atomic_dec_and_test(&card->power_ref))
+ wake_up(&card->power_ref_sleep);
+}
+
+/**
+ * snd_power_sync_ref - wait until the card power_ref is freed
+ * @card: sound card object
+ *
+ * This function is used to synchronize with the pending power_ref being
+ * released.
+ */
+static inline void snd_power_sync_ref(struct snd_card *card)
+{
+ wait_event(card->power_ref_sleep, !atomic_read(&card->power_ref));
+}
+
/* init.c */
-int snd_power_wait(struct snd_card *card, unsigned int power_state);
+int snd_power_wait(struct snd_card *card);
+int snd_power_ref_and_wait(struct snd_card *card);
#else /* ! CONFIG_PM */
-static inline int snd_power_wait(struct snd_card *card, unsigned int state) { return 0; }
+static inline int snd_power_wait(struct snd_card *card) { return 0; }
+static inline void snd_power_ref(struct snd_card *card) {}
+static inline void snd_power_unref(struct snd_card *card) {}
+static inline int snd_power_ref_and_wait(struct snd_card *card) { return 0; }
+static inline void snd_power_sync_ref(struct snd_card *card) {}
#define snd_power_get_state(card) ({ (void)(card); SNDRV_CTL_POWER_D0; })
#define snd_power_change_state(card, state) do { (void)(card); } while (0)
@@ -180,6 +233,9 @@ static inline struct device *snd_card_get_device_link(struct snd_card *card)
extern int snd_major;
extern int snd_ecards_limit;
extern struct class *sound_class;
+#ifdef CONFIG_SND_DEBUG
+extern struct dentry *sound_debugfs_root;
+#endif
void snd_request_card(int card);
@@ -226,11 +282,15 @@ extern int (*snd_mixer_oss_notify_callback)(struct snd_card *card, int cmd);
int snd_card_new(struct device *parent, int idx, const char *xid,
struct module *module, int extra_size,
struct snd_card **card_ret);
+int snd_devm_card_new(struct device *parent, int idx, const char *xid,
+ struct module *module, size_t extra_size,
+ struct snd_card **card_ret);
int snd_card_disconnect(struct snd_card *card);
void snd_card_disconnect_sync(struct snd_card *card);
int snd_card_free(struct snd_card *card);
int snd_card_free_when_closed(struct snd_card *card);
+int snd_card_free_on_error(struct device *dev, int ret);
void snd_card_set_id(struct snd_card *card, const char *id);
int snd_card_register(struct snd_card *card);
int snd_card_info_init(void);
@@ -266,6 +326,7 @@ void snd_device_disconnect(struct snd_card *card, void *device_data);
void snd_device_disconnect_all(struct snd_card *card);
void snd_device_free(struct snd_card *card, void *device_data);
void snd_device_free_all(struct snd_card *card);
+int snd_device_get_state(struct snd_card *card, void *device_data);
/* isadma.c */
@@ -275,6 +336,7 @@ void snd_device_free_all(struct snd_card *card);
void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode);
void snd_dma_disable(unsigned long dma);
unsigned int snd_dma_pointer(unsigned long dma, unsigned int size);
+int snd_devm_request_dma(struct device *dev, int dma, const char *name);
#endif
/* misc.c */
@@ -331,7 +393,8 @@ void __snd_printk(unsigned int level, const char *file, int line,
#define snd_BUG() WARN(1, "BUG?\n")
/**
- * Suppress high rates of output when CONFIG_SND_DEBUG is enabled.
+ * snd_printd_ratelimit - Suppress high rates of output when
+ * CONFIG_SND_DEBUG is enabled.
*/
#define snd_printd_ratelimit() printk_ratelimit()
@@ -444,4 +507,12 @@ snd_pci_quirk_lookup_id(u16 vendor, u16 device,
}
#endif
+/* async signal helpers */
+struct snd_fasync;
+
+int snd_fasync_helper(int fd, struct file *file, int on,
+ struct snd_fasync **fasyncp);
+void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll);
+void snd_fasync_free(struct snd_fasync *fasync);
+
#endif /* __SOUND_CORE_H */
diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
new file mode 100644
index 000000000000..9ac5918269a5
--- /dev/null
+++ b/include/sound/cs35l41.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * linux/sound/cs35l41.h -- Platform data for CS35L41
+ *
+ * Copyright (c) 2017-2021 Cirrus Logic Inc.
+ *
+ * Author: David Rhodes <david.rhodes@cirrus.com>
+ */
+
+#ifndef __CS35L41_H
+#define __CS35L41_H
+
+#include <linux/regmap.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+
+#define CS35L41_FIRSTREG 0x00000000
+#define CS35L41_LASTREG 0x03804FE8
+#define CS35L41_DEVID 0x00000000
+#define CS35L41_REVID 0x00000004
+#define CS35L41_FABID 0x00000008
+#define CS35L41_RELID 0x0000000C
+#define CS35L41_OTPID 0x00000010
+#define CS35L41_SFT_RESET 0x00000020
+#define CS35L41_TEST_KEY_CTL 0x00000040
+#define CS35L41_USER_KEY_CTL 0x00000044
+#define CS35L41_OTP_MEM0 0x00000400
+#define CS35L41_OTP_MEM31 0x0000047C
+#define CS35L41_OTP_CTRL0 0x00000500
+#define CS35L41_OTP_CTRL1 0x00000504
+#define CS35L41_OTP_CTRL3 0x00000508
+#define CS35L41_OTP_CTRL4 0x0000050C
+#define CS35L41_OTP_CTRL5 0x00000510
+#define CS35L41_OTP_CTRL6 0x00000514
+#define CS35L41_OTP_CTRL7 0x00000518
+#define CS35L41_OTP_CTRL8 0x0000051C
+#define CS35L41_PWR_CTRL1 0x00002014
+#define CS35L41_PWR_CTRL2 0x00002018
+#define CS35L41_PWR_CTRL3 0x0000201C
+#define CS35L41_CTRL_OVRRIDE 0x00002020
+#define CS35L41_AMP_OUT_MUTE 0x00002024
+#define CS35L41_PROTECT_REL_ERR_IGN 0x00002034
+#define CS35L41_GPIO_PAD_CONTROL 0x0000242C
+#define CS35L41_JTAG_CONTROL 0x00002438
+#define CS35L41_PWRMGT_CTL 0x00002900
+#define CS35L41_WAKESRC_CTL 0x00002904
+#define CS35L41_PWRMGT_STS 0x00002908
+#define CS35L41_PLL_CLK_CTRL 0x00002C04
+#define CS35L41_DSP_CLK_CTRL 0x00002C08
+#define CS35L41_GLOBAL_CLK_CTRL 0x00002C0C
+#define CS35L41_DATA_FS_SEL 0x00002C10
+#define CS35L41_TST_FS_MON0 0x00002D10
+#define CS35L41_MDSYNC_EN 0x00003400
+#define CS35L41_MDSYNC_TX_ID 0x00003408
+#define CS35L41_MDSYNC_PWR_CTRL 0x0000340C
+#define CS35L41_MDSYNC_DATA_TX 0x00003410
+#define CS35L41_MDSYNC_TX_STATUS 0x00003414
+#define CS35L41_MDSYNC_DATA_RX 0x0000341C
+#define CS35L41_MDSYNC_RX_STATUS 0x00003420
+#define CS35L41_MDSYNC_ERR_STATUS 0x00003424
+#define CS35L41_MDSYNC_SYNC_PTE2 0x00003528
+#define CS35L41_MDSYNC_SYNC_PTE3 0x0000352C
+#define CS35L41_MDSYNC_SYNC_MSM_STATUS 0x0000353C
+#define CS35L41_BSTCVRT_VCTRL1 0x00003800
+#define CS35L41_BSTCVRT_VCTRL2 0x00003804
+#define CS35L41_BSTCVRT_PEAK_CUR 0x00003808
+#define CS35L41_BSTCVRT_SFT_RAMP 0x0000380C
+#define CS35L41_BSTCVRT_COEFF 0x00003810
+#define CS35L41_BSTCVRT_SLOPE_LBST 0x00003814
+#define CS35L41_BSTCVRT_SW_FREQ 0x00003818
+#define CS35L41_BSTCVRT_DCM_CTRL 0x0000381C
+#define CS35L41_BSTCVRT_DCM_MODE_FORCE 0x00003820
+#define CS35L41_BSTCVRT_OVERVOLT_CTRL 0x00003830
+#define CS35L41_VI_VOL_POL 0x00004000
+#define CS35L41_VIMON_SPKMON_RESYNC 0x00004100
+#define CS35L41_DTEMP_WARN_THLD 0x00004220
+#define CS35L41_DTEMP_CFG 0x00004224
+#define CS35L41_DTEMP_EN 0x00004308
+#define CS35L41_VPVBST_FS_SEL 0x00004400
+#define CS35L41_SP_ENABLES 0x00004800
+#define CS35L41_SP_RATE_CTRL 0x00004804
+#define CS35L41_SP_FORMAT 0x00004808
+#define CS35L41_SP_HIZ_CTRL 0x0000480C
+#define CS35L41_SP_FRAME_TX_SLOT 0x00004810
+#define CS35L41_SP_FRAME_RX_SLOT 0x00004820
+#define CS35L41_SP_TX_WL 0x00004830
+#define CS35L41_SP_RX_WL 0x00004840
+#define CS35L41_ASP_CONTROL4 0x00004854
+#define CS35L41_DAC_PCM1_SRC 0x00004C00
+#define CS35L41_ASP_TX1_SRC 0x00004C20
+#define CS35L41_ASP_TX2_SRC 0x00004C24
+#define CS35L41_ASP_TX3_SRC 0x00004C28
+#define CS35L41_ASP_TX4_SRC 0x00004C2C
+#define CS35L41_DSP1_RX1_SRC 0x00004C40
+#define CS35L41_DSP1_RX2_SRC 0x00004C44
+#define CS35L41_DSP1_RX3_SRC 0x00004C48
+#define CS35L41_DSP1_RX4_SRC 0x00004C4C
+#define CS35L41_DSP1_RX5_SRC 0x00004C50
+#define CS35L41_DSP1_RX6_SRC 0x00004C54
+#define CS35L41_DSP1_RX7_SRC 0x00004C58
+#define CS35L41_DSP1_RX8_SRC 0x00004C5C
+#define CS35L41_NGATE1_SRC 0x00004C60
+#define CS35L41_NGATE2_SRC 0x00004C64
+#define CS35L41_AMP_DIG_VOL_CTRL 0x00006000
+#define CS35L41_VPBR_CFG 0x00006404
+#define CS35L41_VBBR_CFG 0x00006408
+#define CS35L41_VPBR_STATUS 0x0000640C
+#define CS35L41_VBBR_STATUS 0x00006410
+#define CS35L41_OVERTEMP_CFG 0x00006414
+#define CS35L41_AMP_ERR_VOL 0x00006418
+#define CS35L41_VOL_STATUS_TO_DSP 0x00006450
+#define CS35L41_CLASSH_CFG 0x00006800
+#define CS35L41_WKFET_CFG 0x00006804
+#define CS35L41_NG_CFG 0x00006808
+#define CS35L41_AMP_GAIN_CTRL 0x00006C04
+#define CS35L41_DAC_MSM_CFG 0x00007400
+#define CS35L41_IRQ1_CFG 0x00010000
+#define CS35L41_IRQ1_STATUS 0x00010004
+#define CS35L41_IRQ1_STATUS1 0x00010010
+#define CS35L41_IRQ1_STATUS2 0x00010014
+#define CS35L41_IRQ1_STATUS3 0x00010018
+#define CS35L41_IRQ1_STATUS4 0x0001001C
+#define CS35L41_IRQ1_RAW_STATUS1 0x00010090
+#define CS35L41_IRQ1_RAW_STATUS2 0x00010094
+#define CS35L41_IRQ1_RAW_STATUS3 0x00010098
+#define CS35L41_IRQ1_RAW_STATUS4 0x0001009C
+#define CS35L41_IRQ1_MASK1 0x00010110
+#define CS35L41_IRQ1_MASK2 0x00010114
+#define CS35L41_IRQ1_MASK3 0x00010118
+#define CS35L41_IRQ1_MASK4 0x0001011C
+#define CS35L41_IRQ1_FRC1 0x00010190
+#define CS35L41_IRQ1_FRC2 0x00010194
+#define CS35L41_IRQ1_FRC3 0x00010198
+#define CS35L41_IRQ1_FRC4 0x0001019C
+#define CS35L41_IRQ1_EDGE1 0x00010210
+#define CS35L41_IRQ1_EDGE4 0x0001021C
+#define CS35L41_IRQ1_POL1 0x00010290
+#define CS35L41_IRQ1_POL2 0x00010294
+#define CS35L41_IRQ1_POL3 0x00010298
+#define CS35L41_IRQ1_POL4 0x0001029C
+#define CS35L41_IRQ1_DB3 0x00010318
+#define CS35L41_IRQ2_CFG 0x00010800
+#define CS35L41_IRQ2_STATUS 0x00010804
+#define CS35L41_IRQ2_STATUS1 0x00010810
+#define CS35L41_IRQ2_STATUS2 0x00010814
+#define CS35L41_IRQ2_STATUS3 0x00010818
+#define CS35L41_IRQ2_STATUS4 0x0001081C
+#define CS35L41_IRQ2_RAW_STATUS1 0x00010890
+#define CS35L41_IRQ2_RAW_STATUS2 0x00010894
+#define CS35L41_IRQ2_RAW_STATUS3 0x00010898
+#define CS35L41_IRQ2_RAW_STATUS4 0x0001089C
+#define CS35L41_IRQ2_MASK1 0x00010910
+#define CS35L41_IRQ2_MASK2 0x00010914
+#define CS35L41_IRQ2_MASK3 0x00010918
+#define CS35L41_IRQ2_MASK4 0x0001091C
+#define CS35L41_IRQ2_FRC1 0x00010990
+#define CS35L41_IRQ2_FRC2 0x00010994
+#define CS35L41_IRQ2_FRC3 0x00010998
+#define CS35L41_IRQ2_FRC4 0x0001099C
+#define CS35L41_IRQ2_EDGE1 0x00010A10
+#define CS35L41_IRQ2_EDGE4 0x00010A1C
+#define CS35L41_IRQ2_POL1 0x00010A90
+#define CS35L41_IRQ2_POL2 0x00010A94
+#define CS35L41_IRQ2_POL3 0x00010A98
+#define CS35L41_IRQ2_POL4 0x00010A9C
+#define CS35L41_IRQ2_DB3 0x00010B18
+#define CS35L41_GPIO_STATUS1 0x00011000
+#define CS35L41_GPIO1_CTRL1 0x00011008
+#define CS35L41_GPIO2_CTRL1 0x0001100C
+#define CS35L41_MIXER_NGATE_CFG 0x00012000
+#define CS35L41_MIXER_NGATE_CH1_CFG 0x00012004
+#define CS35L41_MIXER_NGATE_CH2_CFG 0x00012008
+#define CS35L41_DSP_MBOX_1 0x00013000
+#define CS35L41_DSP_MBOX_2 0x00013004
+#define CS35L41_DSP_MBOX_3 0x00013008
+#define CS35L41_DSP_MBOX_4 0x0001300C
+#define CS35L41_DSP_MBOX_5 0x00013010
+#define CS35L41_DSP_MBOX_6 0x00013014
+#define CS35L41_DSP_MBOX_7 0x00013018
+#define CS35L41_DSP_MBOX_8 0x0001301C
+#define CS35L41_DSP_VIRT1_MBOX_1 0x00013020
+#define CS35L41_DSP_VIRT1_MBOX_2 0x00013024
+#define CS35L41_DSP_VIRT1_MBOX_3 0x00013028
+#define CS35L41_DSP_VIRT1_MBOX_4 0x0001302C
+#define CS35L41_DSP_VIRT1_MBOX_5 0x00013030
+#define CS35L41_DSP_VIRT1_MBOX_6 0x00013034
+#define CS35L41_DSP_VIRT1_MBOX_7 0x00013038
+#define CS35L41_DSP_VIRT1_MBOX_8 0x0001303C
+#define CS35L41_DSP_VIRT2_MBOX_1 0x00013040
+#define CS35L41_DSP_VIRT2_MBOX_2 0x00013044
+#define CS35L41_DSP_VIRT2_MBOX_3 0x00013048
+#define CS35L41_DSP_VIRT2_MBOX_4 0x0001304C
+#define CS35L41_DSP_VIRT2_MBOX_5 0x00013050
+#define CS35L41_DSP_VIRT2_MBOX_6 0x00013054
+#define CS35L41_DSP_VIRT2_MBOX_7 0x00013058
+#define CS35L41_DSP_VIRT2_MBOX_8 0x0001305C
+#define CS35L41_CLOCK_DETECT_1 0x00014000
+#define CS35L41_TIMER1_CONTROL 0x00015000
+#define CS35L41_TIMER1_COUNT_PRESET 0x00015004
+#define CS35L41_TIMER1_START_STOP 0x0001500C
+#define CS35L41_TIMER1_STATUS 0x00015010
+#define CS35L41_TIMER1_COUNT_READBACK 0x00015014
+#define CS35L41_TIMER1_DSP_CLK_CFG 0x00015018
+#define CS35L41_TIMER1_DSP_CLK_STATUS 0x0001501C
+#define CS35L41_TIMER2_CONTROL 0x00015100
+#define CS35L41_TIMER2_COUNT_PRESET 0x00015104
+#define CS35L41_TIMER2_START_STOP 0x0001510C
+#define CS35L41_TIMER2_STATUS 0x00015110
+#define CS35L41_TIMER2_COUNT_READBACK 0x00015114
+#define CS35L41_TIMER2_DSP_CLK_CFG 0x00015118
+#define CS35L41_TIMER2_DSP_CLK_STATUS 0x0001511C
+#define CS35L41_DFT_JTAG_CONTROL 0x00016000
+#define CS35L41_DIE_STS1 0x00017040
+#define CS35L41_DIE_STS2 0x00017044
+#define CS35L41_TEMP_CAL1 0x00017048
+#define CS35L41_TEMP_CAL2 0x0001704C
+#define CS35L41_DSP1_XMEM_PACK_0 0x02000000
+#define CS35L41_DSP1_XMEM_PACK_3068 0x02002FF0
+#define CS35L41_DSP1_XMEM_UNPACK32_0 0x02400000
+#define CS35L41_DSP1_XMEM_UNPACK32_2046 0x02401FF8
+#define CS35L41_DSP1_TIMESTAMP_COUNT 0x025C0800
+#define CS35L41_DSP1_SYS_ID 0x025E0000
+#define CS35L41_DSP1_SYS_VERSION 0x025E0004
+#define CS35L41_DSP1_SYS_CORE_ID 0x025E0008
+#define CS35L41_DSP1_SYS_AHB_ADDR 0x025E000C
+#define CS35L41_DSP1_SYS_XSRAM_SIZE 0x025E0010
+#define CS35L41_DSP1_SYS_YSRAM_SIZE 0x025E0018
+#define CS35L41_DSP1_SYS_PSRAM_SIZE 0x025E0020
+#define CS35L41_DSP1_SYS_PM_BOOT_SIZE 0x025E0028
+#define CS35L41_DSP1_SYS_FEATURES 0x025E002C
+#define CS35L41_DSP1_SYS_FIR_FILTERS 0x025E0030
+#define CS35L41_DSP1_SYS_LMS_FILTERS 0x025E0034
+#define CS35L41_DSP1_SYS_XM_BANK_SIZE 0x025E0038
+#define CS35L41_DSP1_SYS_YM_BANK_SIZE 0x025E003C
+#define CS35L41_DSP1_SYS_PM_BANK_SIZE 0x025E0040
+#define CS35L41_DSP1_AHBM_WIN0_CTRL0 0x025E2000
+#define CS35L41_DSP1_AHBM_WIN0_CTRL1 0x025E2004
+#define CS35L41_DSP1_AHBM_WIN1_CTRL0 0x025E2008
+#define CS35L41_DSP1_AHBM_WIN1_CTRL1 0x025E200C
+#define CS35L41_DSP1_AHBM_WIN2_CTRL0 0x025E2010
+#define CS35L41_DSP1_AHBM_WIN2_CTRL1 0x025E2014
+#define CS35L41_DSP1_AHBM_WIN3_CTRL0 0x025E2018
+#define CS35L41_DSP1_AHBM_WIN3_CTRL1 0x025E201C
+#define CS35L41_DSP1_AHBM_WIN4_CTRL0 0x025E2020
+#define CS35L41_DSP1_AHBM_WIN4_CTRL1 0x025E2024
+#define CS35L41_DSP1_AHBM_WIN5_CTRL0 0x025E2028
+#define CS35L41_DSP1_AHBM_WIN5_CTRL1 0x025E202C
+#define CS35L41_DSP1_AHBM_WIN6_CTRL0 0x025E2030
+#define CS35L41_DSP1_AHBM_WIN6_CTRL1 0x025E2034
+#define CS35L41_DSP1_AHBM_WIN7_CTRL0 0x025E2038
+#define CS35L41_DSP1_AHBM_WIN7_CTRL1 0x025E203C
+#define CS35L41_DSP1_AHBM_WIN_DBG_CTRL0 0x025E2040
+#define CS35L41_DSP1_AHBM_WIN_DBG_CTRL1 0x025E2044
+#define CS35L41_DSP1_XMEM_UNPACK24_0 0x02800000
+#define CS35L41_DSP1_XMEM_UNPACK24_4093 0x02803FF4
+#define CS35L41_DSP1_CTRL_BASE 0x02B80000
+#define CS35L41_DSP1_CORE_SOFT_RESET 0x02B80010
+#define CS35L41_DSP1_DEBUG 0x02B80040
+#define CS35L41_DSP1_TIMER_CTRL 0x02B80048
+#define CS35L41_DSP1_STREAM_ARB_CTRL 0x02B80050
+#define CS35L41_DSP1_RX1_RATE 0x02B80080
+#define CS35L41_DSP1_RX2_RATE 0x02B80088
+#define CS35L41_DSP1_RX3_RATE 0x02B80090
+#define CS35L41_DSP1_RX4_RATE 0x02B80098
+#define CS35L41_DSP1_RX5_RATE 0x02B800A0
+#define CS35L41_DSP1_RX6_RATE 0x02B800A8
+#define CS35L41_DSP1_RX7_RATE 0x02B800B0
+#define CS35L41_DSP1_RX8_RATE 0x02B800B8
+#define CS35L41_DSP1_TX1_RATE 0x02B80280
+#define CS35L41_DSP1_TX2_RATE 0x02B80288
+#define CS35L41_DSP1_TX3_RATE 0x02B80290
+#define CS35L41_DSP1_TX4_RATE 0x02B80298
+#define CS35L41_DSP1_TX5_RATE 0x02B802A0
+#define CS35L41_DSP1_TX6_RATE 0x02B802A8
+#define CS35L41_DSP1_TX7_RATE 0x02B802B0
+#define CS35L41_DSP1_TX8_RATE 0x02B802B8
+#define CS35L41_DSP1_NMI_CTRL1 0x02B80480
+#define CS35L41_DSP1_NMI_CTRL2 0x02B80488
+#define CS35L41_DSP1_NMI_CTRL3 0x02B80490
+#define CS35L41_DSP1_NMI_CTRL4 0x02B80498
+#define CS35L41_DSP1_NMI_CTRL5 0x02B804A0
+#define CS35L41_DSP1_NMI_CTRL6 0x02B804A8
+#define CS35L41_DSP1_NMI_CTRL7 0x02B804B0
+#define CS35L41_DSP1_NMI_CTRL8 0x02B804B8
+#define CS35L41_DSP1_RESUME_CTRL 0x02B80500
+#define CS35L41_DSP1_IRQ1_CTRL 0x02B80508
+#define CS35L41_DSP1_IRQ2_CTRL 0x02B80510
+#define CS35L41_DSP1_IRQ3_CTRL 0x02B80518
+#define CS35L41_DSP1_IRQ4_CTRL 0x02B80520
+#define CS35L41_DSP1_IRQ5_CTRL 0x02B80528
+#define CS35L41_DSP1_IRQ6_CTRL 0x02B80530
+#define CS35L41_DSP1_IRQ7_CTRL 0x02B80538
+#define CS35L41_DSP1_IRQ8_CTRL 0x02B80540
+#define CS35L41_DSP1_IRQ9_CTRL 0x02B80548
+#define CS35L41_DSP1_IRQ10_CTRL 0x02B80550
+#define CS35L41_DSP1_IRQ11_CTRL 0x02B80558
+#define CS35L41_DSP1_IRQ12_CTRL 0x02B80560
+#define CS35L41_DSP1_IRQ13_CTRL 0x02B80568
+#define CS35L41_DSP1_IRQ14_CTRL 0x02B80570
+#define CS35L41_DSP1_IRQ15_CTRL 0x02B80578
+#define CS35L41_DSP1_IRQ16_CTRL 0x02B80580
+#define CS35L41_DSP1_IRQ17_CTRL 0x02B80588
+#define CS35L41_DSP1_IRQ18_CTRL 0x02B80590
+#define CS35L41_DSP1_IRQ19_CTRL 0x02B80598
+#define CS35L41_DSP1_IRQ20_CTRL 0x02B805A0
+#define CS35L41_DSP1_IRQ21_CTRL 0x02B805A8
+#define CS35L41_DSP1_IRQ22_CTRL 0x02B805B0
+#define CS35L41_DSP1_IRQ23_CTRL 0x02B805B8
+#define CS35L41_DSP1_SCRATCH1 0x02B805C0
+#define CS35L41_DSP1_SCRATCH2 0x02B805C8
+#define CS35L41_DSP1_SCRATCH3 0x02B805D0
+#define CS35L41_DSP1_SCRATCH4 0x02B805D8
+#define CS35L41_DSP1_CCM_CORE_CTRL 0x02BC1000
+#define CS35L41_DSP1_CCM_CLK_OVERRIDE 0x02BC1008
+#define CS35L41_DSP1_XM_MSTR_EN 0x02BC2000
+#define CS35L41_DSP1_XM_CORE_PRI 0x02BC2008
+#define CS35L41_DSP1_XM_AHB_PACK_PL_PRI 0x02BC2010
+#define CS35L41_DSP1_XM_AHB_UP_PL_PRI 0x02BC2018
+#define CS35L41_DSP1_XM_ACCEL_PL0_PRI 0x02BC2020
+#define CS35L41_DSP1_XM_NPL0_PRI 0x02BC2078
+#define CS35L41_DSP1_YM_MSTR_EN 0x02BC20C0
+#define CS35L41_DSP1_YM_CORE_PRI 0x02BC20C8
+#define CS35L41_DSP1_YM_AHB_PACK_PL_PRI 0x02BC20D0
+#define CS35L41_DSP1_YM_AHB_UP_PL_PRI 0x02BC20D8
+#define CS35L41_DSP1_YM_ACCEL_PL0_PRI 0x02BC20E0
+#define CS35L41_DSP1_YM_NPL0_PRI 0x02BC2138
+#define CS35L41_DSP1_PM_MSTR_EN 0x02BC2180
+#define CS35L41_DSP1_PM_PATCH0_ADDR 0x02BC2188
+#define CS35L41_DSP1_PM_PATCH0_EN 0x02BC218C
+#define CS35L41_DSP1_PM_PATCH0_DATA_LO 0x02BC2190
+#define CS35L41_DSP1_PM_PATCH0_DATA_HI 0x02BC2194
+#define CS35L41_DSP1_PM_PATCH1_ADDR 0x02BC2198
+#define CS35L41_DSP1_PM_PATCH1_EN 0x02BC219C
+#define CS35L41_DSP1_PM_PATCH1_DATA_LO 0x02BC21A0
+#define CS35L41_DSP1_PM_PATCH1_DATA_HI 0x02BC21A4
+#define CS35L41_DSP1_PM_PATCH2_ADDR 0x02BC21A8
+#define CS35L41_DSP1_PM_PATCH2_EN 0x02BC21AC
+#define CS35L41_DSP1_PM_PATCH2_DATA_LO 0x02BC21B0
+#define CS35L41_DSP1_PM_PATCH2_DATA_HI 0x02BC21B4
+#define CS35L41_DSP1_PM_PATCH3_ADDR 0x02BC21B8
+#define CS35L41_DSP1_PM_PATCH3_EN 0x02BC21BC
+#define CS35L41_DSP1_PM_PATCH3_DATA_LO 0x02BC21C0
+#define CS35L41_DSP1_PM_PATCH3_DATA_HI 0x02BC21C4
+#define CS35L41_DSP1_PM_PATCH4_ADDR 0x02BC21C8
+#define CS35L41_DSP1_PM_PATCH4_EN 0x02BC21CC
+#define CS35L41_DSP1_PM_PATCH4_DATA_LO 0x02BC21D0
+#define CS35L41_DSP1_PM_PATCH4_DATA_HI 0x02BC21D4
+#define CS35L41_DSP1_PM_PATCH5_ADDR 0x02BC21D8
+#define CS35L41_DSP1_PM_PATCH5_EN 0x02BC21DC
+#define CS35L41_DSP1_PM_PATCH5_DATA_LO 0x02BC21E0
+#define CS35L41_DSP1_PM_PATCH5_DATA_HI 0x02BC21E4
+#define CS35L41_DSP1_PM_PATCH6_ADDR 0x02BC21E8
+#define CS35L41_DSP1_PM_PATCH6_EN 0x02BC21EC
+#define CS35L41_DSP1_PM_PATCH6_DATA_LO 0x02BC21F0
+#define CS35L41_DSP1_PM_PATCH6_DATA_HI 0x02BC21F4
+#define CS35L41_DSP1_PM_PATCH7_ADDR 0x02BC21F8
+#define CS35L41_DSP1_PM_PATCH7_EN 0x02BC21FC
+#define CS35L41_DSP1_PM_PATCH7_DATA_LO 0x02BC2200
+#define CS35L41_DSP1_PM_PATCH7_DATA_HI 0x02BC2204
+#define CS35L41_DSP1_MPU_XM_ACCESS0 0x02BC3000
+#define CS35L41_DSP1_MPU_YM_ACCESS0 0x02BC3004
+#define CS35L41_DSP1_MPU_WNDW_ACCESS0 0x02BC3008
+#define CS35L41_DSP1_MPU_XREG_ACCESS0 0x02BC300C
+#define CS35L41_DSP1_MPU_YREG_ACCESS0 0x02BC3014
+#define CS35L41_DSP1_MPU_XM_ACCESS1 0x02BC3018
+#define CS35L41_DSP1_MPU_YM_ACCESS1 0x02BC301C
+#define CS35L41_DSP1_MPU_WNDW_ACCESS1 0x02BC3020
+#define CS35L41_DSP1_MPU_XREG_ACCESS1 0x02BC3024
+#define CS35L41_DSP1_MPU_YREG_ACCESS1 0x02BC302C
+#define CS35L41_DSP1_MPU_XM_ACCESS2 0x02BC3030
+#define CS35L41_DSP1_MPU_YM_ACCESS2 0x02BC3034
+#define CS35L41_DSP1_MPU_WNDW_ACCESS2 0x02BC3038
+#define CS35L41_DSP1_MPU_XREG_ACCESS2 0x02BC303C
+#define CS35L41_DSP1_MPU_YREG_ACCESS2 0x02BC3044
+#define CS35L41_DSP1_MPU_XM_ACCESS3 0x02BC3048
+#define CS35L41_DSP1_MPU_YM_ACCESS3 0x02BC304C
+#define CS35L41_DSP1_MPU_WNDW_ACCESS3 0x02BC3050
+#define CS35L41_DSP1_MPU_XREG_ACCESS3 0x02BC3054
+#define CS35L41_DSP1_MPU_YREG_ACCESS3 0x02BC305C
+#define CS35L41_DSP1_MPU_XM_VIO_ADDR 0x02BC3100
+#define CS35L41_DSP1_MPU_XM_VIO_STATUS 0x02BC3104
+#define CS35L41_DSP1_MPU_YM_VIO_ADDR 0x02BC3108
+#define CS35L41_DSP1_MPU_YM_VIO_STATUS 0x02BC310C
+#define CS35L41_DSP1_MPU_PM_VIO_ADDR 0x02BC3110
+#define CS35L41_DSP1_MPU_PM_VIO_STATUS 0x02BC3114
+#define CS35L41_DSP1_MPU_LOCK_CONFIG 0x02BC3140
+#define CS35L41_DSP1_MPU_WDT_RST_CTRL 0x02BC3180
+#define CS35L41_DSP1_STRMARB_MSTR0_CFG0 0x02BC5000
+#define CS35L41_DSP1_STRMARB_MSTR0_CFG1 0x02BC5004
+#define CS35L41_DSP1_STRMARB_MSTR0_CFG2 0x02BC5008
+#define CS35L41_DSP1_STRMARB_MSTR1_CFG0 0x02BC5010
+#define CS35L41_DSP1_STRMARB_MSTR1_CFG1 0x02BC5014
+#define CS35L41_DSP1_STRMARB_MSTR1_CFG2 0x02BC5018
+#define CS35L41_DSP1_STRMARB_MSTR2_CFG0 0x02BC5020
+#define CS35L41_DSP1_STRMARB_MSTR2_CFG1 0x02BC5024
+#define CS35L41_DSP1_STRMARB_MSTR2_CFG2 0x02BC5028
+#define CS35L41_DSP1_STRMARB_MSTR3_CFG0 0x02BC5030
+#define CS35L41_DSP1_STRMARB_MSTR3_CFG1 0x02BC5034
+#define CS35L41_DSP1_STRMARB_MSTR3_CFG2 0x02BC5038
+#define CS35L41_DSP1_STRMARB_MSTR4_CFG0 0x02BC5040
+#define CS35L41_DSP1_STRMARB_MSTR4_CFG1 0x02BC5044
+#define CS35L41_DSP1_STRMARB_MSTR4_CFG2 0x02BC5048
+#define CS35L41_DSP1_STRMARB_MSTR5_CFG0 0x02BC5050
+#define CS35L41_DSP1_STRMARB_MSTR5_CFG1 0x02BC5054
+#define CS35L41_DSP1_STRMARB_MSTR5_CFG2 0x02BC5058
+#define CS35L41_DSP1_STRMARB_MSTR6_CFG0 0x02BC5060
+#define CS35L41_DSP1_STRMARB_MSTR6_CFG1 0x02BC5064
+#define CS35L41_DSP1_STRMARB_MSTR6_CFG2 0x02BC5068
+#define CS35L41_DSP1_STRMARB_MSTR7_CFG0 0x02BC5070
+#define CS35L41_DSP1_STRMARB_MSTR7_CFG1 0x02BC5074
+#define CS35L41_DSP1_STRMARB_MSTR7_CFG2 0x02BC5078
+#define CS35L41_DSP1_STRMARB_TX0_CFG0 0x02BC5200
+#define CS35L41_DSP1_STRMARB_TX0_CFG1 0x02BC5204
+#define CS35L41_DSP1_STRMARB_TX1_CFG0 0x02BC5208
+#define CS35L41_DSP1_STRMARB_TX1_CFG1 0x02BC520C
+#define CS35L41_DSP1_STRMARB_TX2_CFG0 0x02BC5210
+#define CS35L41_DSP1_STRMARB_TX2_CFG1 0x02BC5214
+#define CS35L41_DSP1_STRMARB_TX3_CFG0 0x02BC5218
+#define CS35L41_DSP1_STRMARB_TX3_CFG1 0x02BC521C
+#define CS35L41_DSP1_STRMARB_TX4_CFG0 0x02BC5220
+#define CS35L41_DSP1_STRMARB_TX4_CFG1 0x02BC5224
+#define CS35L41_DSP1_STRMARB_TX5_CFG0 0x02BC5228
+#define CS35L41_DSP1_STRMARB_TX5_CFG1 0x02BC522C
+#define CS35L41_DSP1_STRMARB_TX6_CFG0 0x02BC5230
+#define CS35L41_DSP1_STRMARB_TX6_CFG1 0x02BC5234
+#define CS35L41_DSP1_STRMARB_TX7_CFG0 0x02BC5238
+#define CS35L41_DSP1_STRMARB_TX7_CFG1 0x02BC523C
+#define CS35L41_DSP1_STRMARB_RX0_CFG0 0x02BC5400
+#define CS35L41_DSP1_STRMARB_RX0_CFG1 0x02BC5404
+#define CS35L41_DSP1_STRMARB_RX1_CFG0 0x02BC5408
+#define CS35L41_DSP1_STRMARB_RX1_CFG1 0x02BC540C
+#define CS35L41_DSP1_STRMARB_RX2_CFG0 0x02BC5410
+#define CS35L41_DSP1_STRMARB_RX2_CFG1 0x02BC5414
+#define CS35L41_DSP1_STRMARB_RX3_CFG0 0x02BC5418
+#define CS35L41_DSP1_STRMARB_RX3_CFG1 0x02BC541C
+#define CS35L41_DSP1_STRMARB_RX4_CFG0 0x02BC5420
+#define CS35L41_DSP1_STRMARB_RX4_CFG1 0x02BC5424
+#define CS35L41_DSP1_STRMARB_RX5_CFG0 0x02BC5428
+#define CS35L41_DSP1_STRMARB_RX5_CFG1 0x02BC542C
+#define CS35L41_DSP1_STRMARB_RX6_CFG0 0x02BC5430
+#define CS35L41_DSP1_STRMARB_RX6_CFG1 0x02BC5434
+#define CS35L41_DSP1_STRMARB_RX7_CFG0 0x02BC5438
+#define CS35L41_DSP1_STRMARB_RX7_CFG1 0x02BC543C
+#define CS35L41_DSP1_STRMARB_IRQ0_CFG0 0x02BC5600
+#define CS35L41_DSP1_STRMARB_IRQ0_CFG1 0x02BC5604
+#define CS35L41_DSP1_STRMARB_IRQ0_CFG2 0x02BC5608
+#define CS35L41_DSP1_STRMARB_IRQ1_CFG0 0x02BC5610
+#define CS35L41_DSP1_STRMARB_IRQ1_CFG1 0x02BC5614
+#define CS35L41_DSP1_STRMARB_IRQ1_CFG2 0x02BC5618
+#define CS35L41_DSP1_STRMARB_IRQ2_CFG0 0x02BC5620
+#define CS35L41_DSP1_STRMARB_IRQ2_CFG1 0x02BC5624
+#define CS35L41_DSP1_STRMARB_IRQ2_CFG2 0x02BC5628
+#define CS35L41_DSP1_STRMARB_IRQ3_CFG0 0x02BC5630
+#define CS35L41_DSP1_STRMARB_IRQ3_CFG1 0x02BC5634
+#define CS35L41_DSP1_STRMARB_IRQ3_CFG2 0x02BC5638
+#define CS35L41_DSP1_STRMARB_IRQ4_CFG0 0x02BC5640
+#define CS35L41_DSP1_STRMARB_IRQ4_CFG1 0x02BC5644
+#define CS35L41_DSP1_STRMARB_IRQ4_CFG2 0x02BC5648
+#define CS35L41_DSP1_STRMARB_IRQ5_CFG0 0x02BC5650
+#define CS35L41_DSP1_STRMARB_IRQ5_CFG1 0x02BC5654
+#define CS35L41_DSP1_STRMARB_IRQ5_CFG2 0x02BC5658
+#define CS35L41_DSP1_STRMARB_IRQ6_CFG0 0x02BC5660
+#define CS35L41_DSP1_STRMARB_IRQ6_CFG1 0x02BC5664
+#define CS35L41_DSP1_STRMARB_IRQ6_CFG2 0x02BC5668
+#define CS35L41_DSP1_STRMARB_IRQ7_CFG0 0x02BC5670
+#define CS35L41_DSP1_STRMARB_IRQ7_CFG1 0x02BC5674
+#define CS35L41_DSP1_STRMARB_IRQ7_CFG2 0x02BC5678
+#define CS35L41_DSP1_STRMARB_RESYNC_MSK 0x02BC5A00
+#define CS35L41_DSP1_STRMARB_ERR_STATUS 0x02BC5A08
+#define CS35L41_DSP1_INTPCTL_RES_STATIC 0x02BC6000
+#define CS35L41_DSP1_INTPCTL_RES_DYN 0x02BC6004
+#define CS35L41_DSP1_INTPCTL_NMI_CTRL 0x02BC6008
+#define CS35L41_DSP1_INTPCTL_IRQ_INV 0x02BC6010
+#define CS35L41_DSP1_INTPCTL_IRQ_MODE 0x02BC6014
+#define CS35L41_DSP1_INTPCTL_IRQ_EN 0x02BC6018
+#define CS35L41_DSP1_INTPCTL_IRQ_MSK 0x02BC601C
+#define CS35L41_DSP1_INTPCTL_IRQ_FLUSH 0x02BC6020
+#define CS35L41_DSP1_INTPCTL_IRQ_MSKCLR 0x02BC6024
+#define CS35L41_DSP1_INTPCTL_IRQ_FRC 0x02BC6028
+#define CS35L41_DSP1_INTPCTL_IRQ_MSKSET 0x02BC602C
+#define CS35L41_DSP1_INTPCTL_IRQ_ERR 0x02BC6030
+#define CS35L41_DSP1_INTPCTL_IRQ_PEND 0x02BC6034
+#define CS35L41_DSP1_INTPCTL_IRQ_GEN 0x02BC6038
+#define CS35L41_DSP1_INTPCTL_TESTBITS 0x02BC6040
+#define CS35L41_DSP1_WDT_CONTROL 0x02BC7000
+#define CS35L41_DSP1_WDT_STATUS 0x02BC7008
+#define CS35L41_DSP1_YMEM_PACK_0 0x02C00000
+#define CS35L41_DSP1_YMEM_PACK_1532 0x02C017F0
+#define CS35L41_DSP1_YMEM_UNPACK32_0 0x03000000
+#define CS35L41_DSP1_YMEM_UNPACK32_1022 0x03000FF8
+#define CS35L41_DSP1_YMEM_UNPACK24_0 0x03400000
+#define CS35L41_DSP1_YMEM_UNPACK24_2045 0x03401FF4
+#define CS35L41_DSP1_PMEM_0 0x03800000
+#define CS35L41_DSP1_PMEM_5114 0x03804FE8
+
+/*test regs for emulation bringup*/
+#define CS35L41_PLL_OVR 0x00003018
+#define CS35L41_BST_TEST_DUTY 0x00003900
+#define CS35L41_DIGPWM_IOCTRL 0x0000706C
+
+/*registers populated by OTP*/
+#define CS35L41_OTP_TRIM_1 0x0000208c
+#define CS35L41_OTP_TRIM_2 0x00002090
+#define CS35L41_OTP_TRIM_3 0x00003010
+#define CS35L41_OTP_TRIM_4 0x0000300C
+#define CS35L41_OTP_TRIM_5 0x0000394C
+#define CS35L41_OTP_TRIM_6 0x00003950
+#define CS35L41_OTP_TRIM_7 0x00003954
+#define CS35L41_OTP_TRIM_8 0x00003958
+#define CS35L41_OTP_TRIM_9 0x0000395C
+#define CS35L41_OTP_TRIM_10 0x0000416C
+#define CS35L41_OTP_TRIM_11 0x00004160
+#define CS35L41_OTP_TRIM_12 0x00004170
+#define CS35L41_OTP_TRIM_13 0x00004360
+#define CS35L41_OTP_TRIM_14 0x00004448
+#define CS35L41_OTP_TRIM_15 0x0000444C
+#define CS35L41_OTP_TRIM_16 0x00006E30
+#define CS35L41_OTP_TRIM_17 0x00006E34
+#define CS35L41_OTP_TRIM_18 0x00006E38
+#define CS35L41_OTP_TRIM_19 0x00006E3C
+#define CS35L41_OTP_TRIM_20 0x00006E40
+#define CS35L41_OTP_TRIM_21 0x00006E44
+#define CS35L41_OTP_TRIM_22 0x00006E48
+#define CS35L41_OTP_TRIM_23 0x00006E4C
+#define CS35L41_OTP_TRIM_24 0x00006E50
+#define CS35L41_OTP_TRIM_25 0x00006E54
+#define CS35L41_OTP_TRIM_26 0x00006E58
+#define CS35L41_OTP_TRIM_27 0x00006E5C
+#define CS35L41_OTP_TRIM_28 0x00006E60
+#define CS35L41_OTP_TRIM_29 0x00006E64
+#define CS35L41_OTP_TRIM_30 0x00007418
+#define CS35L41_OTP_TRIM_31 0x0000741C
+#define CS35L41_OTP_TRIM_32 0x00007434
+#define CS35L41_OTP_TRIM_33 0x00007068
+#define CS35L41_OTP_TRIM_34 0x0000410C
+#define CS35L41_OTP_TRIM_35 0x0000400C
+#define CS35L41_OTP_TRIM_36 0x00002030
+
+#define CS35L41_MAX_CACHE_REG 36
+#define CS35L41_OTP_SIZE_WORDS 32
+
+#define CS35L41_NUM_SUPPLIES 2
+
+#define CS35L41_SCLK_MSTR_MASK 0x10
+#define CS35L41_SCLK_MSTR_SHIFT 4
+#define CS35L41_LRCLK_MSTR_MASK 0x01
+#define CS35L41_LRCLK_MSTR_SHIFT 0
+#define CS35L41_SCLK_INV_MASK 0x40
+#define CS35L41_SCLK_INV_SHIFT 6
+#define CS35L41_LRCLK_INV_MASK 0x04
+#define CS35L41_LRCLK_INV_SHIFT 2
+#define CS35L41_SCLK_FRC_MASK 0x20
+#define CS35L41_SCLK_FRC_SHIFT 5
+#define CS35L41_LRCLK_FRC_MASK 0x02
+#define CS35L41_LRCLK_FRC_SHIFT 1
+
+#define CS35L41_AMP_GAIN_PCM_MASK 0x3E0
+#define CS35L41_AMP_GAIN_ZC_MASK 0x0400
+#define CS35L41_AMP_GAIN_ZC_SHIFT 10
+
+#define CS35L41_BST_CTL_MASK 0xFF
+#define CS35L41_BST_CTL_SEL_MASK 0x03
+#define CS35L41_BST_CTL_SEL_REG 0x00
+#define CS35L41_BST_CTL_SEL_CLASSH 0x01
+#define CS35L41_BST_IPK_MASK 0x7F
+#define CS35L41_BST_IPK_SHIFT 0
+#define CS35L41_BST_LIM_MASK 0x4
+#define CS35L41_BST_LIM_SHIFT 2
+#define CS35L41_BST_K1_MASK 0x000000FF
+#define CS35L41_BST_K1_SHIFT 0
+#define CS35L41_BST_K2_MASK 0x0000FF00
+#define CS35L41_BST_K2_SHIFT 8
+#define CS35L41_BST_SLOPE_MASK 0x0000FF00
+#define CS35L41_BST_SLOPE_SHIFT 8
+#define CS35L41_BST_LBST_VAL_MASK 0x00000003
+#define CS35L41_BST_LBST_VAL_SHIFT 0
+
+#define CS35L41_TEMP_THLD_MASK 0x03
+#define CS35L41_VMON_IMON_VOL_MASK 0x07FF07FF
+#define CS35L41_PDM_MODE_MASK 0x01
+#define CS35L41_PDM_MODE_SHIFT 0
+
+#define CS35L41_CH_MEM_DEPTH_MASK 0x07
+#define CS35L41_CH_MEM_DEPTH_SHIFT 0
+#define CS35L41_CH_HDRM_CTL_MASK 0x007F0000
+#define CS35L41_CH_HDRM_CTL_SHIFT 16
+#define CS35L41_CH_REL_RATE_MASK 0xFF00
+#define CS35L41_CH_REL_RATE_SHIFT 8
+#define CS35L41_CH_WKFET_DLY_MASK 0x001C
+#define CS35L41_CH_WKFET_DLY_SHIFT 2
+#define CS35L41_CH_WKFET_THLD_MASK 0x0F00
+#define CS35L41_CH_WKFET_THLD_SHIFT 8
+
+#define CS35L41_HW_NG_SEL_MASK 0x3F00
+#define CS35L41_HW_NG_SEL_SHIFT 8
+#define CS35L41_HW_NG_DLY_MASK 0x0070
+#define CS35L41_HW_NG_DLY_SHIFT 4
+#define CS35L41_HW_NG_THLD_MASK 0x0007
+#define CS35L41_HW_NG_THLD_SHIFT 0
+
+#define CS35L41_DSP_NG_ENABLE_MASK 0x00010000
+#define CS35L41_DSP_NG_ENABLE_SHIFT 16
+#define CS35L41_DSP_NG_THLD_MASK 0x7
+#define CS35L41_DSP_NG_THLD_SHIFT 0
+#define CS35L41_DSP_NG_DELAY_MASK 0x0F00
+#define CS35L41_DSP_NG_DELAY_SHIFT 8
+
+#define CS35L41_ASP_FMT_MASK 0x0700
+#define CS35L41_ASP_FMT_SHIFT 8
+#define CS35L41_ASP_DOUT_HIZ_MASK 0x03
+#define CS35L41_ASP_DOUT_HIZ_SHIFT 0
+#define CS35L41_ASP_WIDTH_16 0x10
+#define CS35L41_ASP_WIDTH_24 0x18
+#define CS35L41_ASP_WIDTH_32 0x20
+#define CS35L41_ASP_WIDTH_TX_MASK 0xFF0000
+#define CS35L41_ASP_WIDTH_TX_SHIFT 16
+#define CS35L41_ASP_WIDTH_RX_MASK 0xFF000000
+#define CS35L41_ASP_WIDTH_RX_SHIFT 24
+#define CS35L41_ASP_RX1_SLOT_MASK 0x3F
+#define CS35L41_ASP_RX1_SLOT_SHIFT 0
+#define CS35L41_ASP_RX2_SLOT_MASK 0x3F00
+#define CS35L41_ASP_RX2_SLOT_SHIFT 8
+#define CS35L41_ASP_RX_WL_MASK 0x3F
+#define CS35L41_ASP_TX_WL_MASK 0x3F
+#define CS35L41_ASP_RX_WL_SHIFT 0
+#define CS35L41_ASP_TX_WL_SHIFT 0
+#define CS35L41_ASP_SOURCE_MASK 0x7F
+
+#define CS35L41_INPUT_SRC_ASPRX1 0x08
+#define CS35L41_INPUT_SRC_ASPRX2 0x09
+#define CS35L41_INPUT_SRC_VMON 0x18
+#define CS35L41_INPUT_SRC_IMON 0x19
+#define CS35L41_INPUT_SRC_CLASSH 0x21
+#define CS35L41_INPUT_SRC_VPMON 0x28
+#define CS35L41_INPUT_SRC_VBSTMON 0x29
+#define CS35L41_INPUT_SRC_TEMPMON 0x3A
+#define CS35L41_INPUT_SRC_RSVD 0x3B
+#define CS35L41_INPUT_DSP_TX1 0x32
+#define CS35L41_INPUT_DSP_TX2 0x33
+
+#define CS35L41_WR_PEND_STS_MASK 0x2
+
+#define CS35L41_PLL_CLK_SEL_MASK 0x07
+#define CS35L41_PLL_CLK_SEL_SHIFT 0
+#define CS35L41_PLL_CLK_EN_MASK 0x10
+#define CS35L41_PLL_CLK_EN_SHIFT 4
+#define CS35L41_PLL_OPENLOOP_MASK 0x0800
+#define CS35L41_PLL_OPENLOOP_SHIFT 11
+#define CS35L41_PLLSRC_SCLK 0
+#define CS35L41_PLLSRC_LRCLK 1
+#define CS35L41_PLLSRC_SELF 3
+#define CS35L41_PLLSRC_PDMCLK 4
+#define CS35L41_PLLSRC_MCLK 5
+#define CS35L41_PLLSRC_SWIRE 7
+#define CS35L41_REFCLK_FREQ_MASK 0x7E0
+#define CS35L41_REFCLK_FREQ_SHIFT 5
+
+#define CS35L41_GLOBAL_FS_MASK 0x1F
+#define CS35L41_GLOBAL_FS_SHIFT 0
+
+#define CS35L41_GLOBAL_EN_MASK 0x01
+#define CS35L41_GLOBAL_EN_SHIFT 0
+#define CS35L41_BST_EN_MASK 0x0030
+#define CS35L41_BST_EN_SHIFT 4
+#define CS35L41_BST_DIS_FET_OFF 0x00
+#define CS35L41_BST_EN_DEFAULT 0x2
+#define CS35L41_AMP_EN_SHIFT 0
+#define CS35L41_AMP_EN_MASK 1
+#define CS35L41_VMON_EN_MASK 0x1000
+#define CS35L41_VMON_EN_SHIFT 12
+#define CS35L41_IMON_EN_MASK 0x2000
+#define CS35L41_IMON_EN_SHIFT 13
+
+#define CS35L41_PDN_DONE_MASK 0x00800000
+#define CS35L41_PDN_DONE_SHIFT 23
+#define CS35L41_PUP_DONE_MASK 0x01000000
+#define CS35L41_PUP_DONE_SHIFT 24
+
+#define CS35L36_PUP_DONE_IRQ_UNMASK 0x5F
+#define CS35L36_PUP_DONE_IRQ_MASK 0xBF
+
+#define CS35L41_AMP_SHORT_ERR 0x80000000
+#define CS35L41_BST_SHORT_ERR 0x0100
+#define CS35L41_TEMP_WARN 0x8000
+#define CS35L41_TEMP_ERR 0x00020000
+#define CS35L41_BST_OVP_ERR 0x40
+#define CS35L41_BST_DCM_UVP_ERR 0x80
+#define CS35L41_OTP_BOOT_DONE 0x02
+#define CS35L41_PLL_UNLOCK 0x10
+#define CS35L41_OTP_BOOT_ERR 0x80000000
+
+#define CS35L41_AMP_SHORT_ERR_RLS 0x02
+#define CS35L41_BST_SHORT_ERR_RLS 0x04
+#define CS35L41_BST_OVP_ERR_RLS 0x08
+#define CS35L41_BST_UVP_ERR_RLS 0x10
+#define CS35L41_TEMP_WARN_ERR_RLS 0x20
+#define CS35L41_TEMP_ERR_RLS 0x40
+
+#define CS35L41_AMP_SHORT_ERR_RLS_SHIFT 1
+#define CS35L41_BST_SHORT_ERR_RLS_SHIFT 2
+#define CS35L41_BST_OVP_ERR_RLS_SHIFT 3
+#define CS35L41_BST_UVP_ERR_RLS_SHIFT 4
+#define CS35L41_TEMP_WARN_ERR_RLS_SHIFT 5
+#define CS35L41_TEMP_ERR_RLS_SHIFT 6
+
+#define CS35L41_INT1_MASK_DEFAULT 0x7FFCFE3F
+#define CS35L41_INT1_UNMASK_PUP 0xFEFFFFFF
+#define CS35L41_INT1_UNMASK_PDN 0xFF7FFFFF
+
+#define CS35L41_GPIO_DIR_MASK 0x80000000
+#define CS35L41_GPIO_DIR_SHIFT 31
+#define CS35L41_GPIO1_CTRL_MASK 0x00030000
+#define CS35L41_GPIO1_CTRL_SHIFT 16
+#define CS35L41_GPIO2_CTRL_MASK 0x07000000
+#define CS35L41_GPIO2_CTRL_SHIFT 24
+#define CS35L41_GPIO_LVL_SHIFT 15
+#define CS35L41_GPIO_LVL_MASK BIT(CS35L41_GPIO_LVL_SHIFT)
+#define CS35L41_GPIO_POL_MASK 0x1000
+#define CS35L41_GPIO_POL_SHIFT 12
+
+#define CS35L41_AMP_INV_PCM_SHIFT 14
+#define CS35L41_AMP_INV_PCM_MASK BIT(CS35L41_AMP_INV_PCM_SHIFT)
+#define CS35L41_AMP_PCM_VOL_SHIFT 3
+#define CS35L41_AMP_PCM_VOL_MASK (0x7FF << 3)
+#define CS35L41_AMP_PCM_VOL_MUTE 0x4CF
+
+#define CS35L41_CHIP_ID 0x35a40
+#define CS35L41R_CHIP_ID 0x35b40
+#define CS35L41_MTLREVID_MASK 0x0F
+#define CS35L41_REVID_A0 0xA0
+#define CS35L41_REVID_B0 0xB0
+#define CS35L41_REVID_B2 0xB2
+
+#define CS35L41_HALO_CORE_RESET 0x00000200
+
+#define CS35L41_FS1_WINDOW_MASK 0x000007FF
+#define CS35L41_FS2_WINDOW_MASK 0x00FFF800
+#define CS35L41_FS2_WINDOW_SHIFT 12
+
+#define CS35L41_SPI_MAX_FREQ 4000000
+#define CS35L41_REGSTRIDE 4
+
+enum cs35l41_boost_type {
+ CS35L41_INT_BOOST,
+ CS35L41_EXT_BOOST,
+ CS35L41_EXT_BOOST_NO_VSPK_SWITCH,
+};
+
+enum cs35l41_clk_ids {
+ CS35L41_CLKID_SCLK = 0,
+ CS35L41_CLKID_LRCLK = 1,
+ CS35L41_CLKID_MCLK = 4,
+};
+
+enum cs35l41_gpio1_func {
+ CS35L41_GPIO1_HIZ,
+ CS35L41_GPIO1_GPIO,
+ CS35L41_GPIO1_MDSYNC,
+ CS35L41_GPIO1_MCLK,
+ CS35L41_GPIO1_PDM_CLK,
+ CS35L41_GPIO1_PDM_DATA,
+};
+
+enum cs35l41_gpio2_func {
+ CS35L41_GPIO2_HIZ,
+ CS35L41_GPIO2_GPIO,
+ CS35L41_GPIO2_INT_OPEN_DRAIN,
+ CS35L41_GPIO2_MCLK,
+ CS35L41_GPIO2_INT_PUSH_PULL_LOW,
+ CS35L41_GPIO2_INT_PUSH_PULL_HIGH,
+ CS35L41_GPIO2_PDM_CLK,
+ CS35L41_GPIO2_PDM_DATA,
+};
+
+struct cs35l41_gpio_cfg {
+ bool valid;
+ bool pol_inv;
+ bool out_en;
+ unsigned int func;
+};
+
+struct cs35l41_hw_cfg {
+ bool valid;
+ int bst_ind;
+ int bst_ipk;
+ int bst_cap;
+ int dout_hiz;
+ struct cs35l41_gpio_cfg gpio1;
+ struct cs35l41_gpio_cfg gpio2;
+ unsigned int spk_pos;
+
+ enum cs35l41_boost_type bst_type;
+};
+
+struct cs35l41_otp_packed_element_t {
+ u32 reg;
+ u8 shift;
+ u8 size;
+};
+
+struct cs35l41_otp_map_element_t {
+ u32 id;
+ u32 num_elements;
+ const struct cs35l41_otp_packed_element_t *map;
+ u32 bit_offset;
+ u32 word_offset;
+};
+
+enum cs35l41_cspl_mbox_status {
+ CSPL_MBOX_STS_RUNNING = 0,
+ CSPL_MBOX_STS_PAUSED = 1,
+ CSPL_MBOX_STS_RDY_FOR_REINIT = 2,
+};
+
+enum cs35l41_cspl_mbox_cmd {
+ CSPL_MBOX_CMD_NONE = 0,
+ CSPL_MBOX_CMD_PAUSE = 1,
+ CSPL_MBOX_CMD_RESUME = 2,
+ CSPL_MBOX_CMD_REINIT = 3,
+ CSPL_MBOX_CMD_STOP_PRE_REINIT = 4,
+ CSPL_MBOX_CMD_HIBERNATE = 5,
+ CSPL_MBOX_CMD_OUT_OF_HIBERNATE = 6,
+ CSPL_MBOX_CMD_UNKNOWN_CMD = -1,
+ CSPL_MBOX_CMD_INVALID_SEQUENCE = -2,
+};
+
+/*
+ * IRQs
+ */
+#define CS35L41_IRQ(_irq, _name, _hand) \
+ { \
+ .irq = CS35L41_ ## _irq ## _IRQ,\
+ .name = _name, \
+ .handler = _hand, \
+ }
+
+struct cs35l41_irq {
+ int irq;
+ const char *name;
+ irqreturn_t (*handler)(int irq, void *data);
+};
+
+#define CS35L41_REG_IRQ(_reg, _irq) \
+ [CS35L41_ ## _irq ## _IRQ] = { \
+ .reg_offset = (CS35L41_ ## _reg) - CS35L41_IRQ1_STATUS1,\
+ .mask = CS35L41_ ## _irq ## _MASK \
+ }
+
+/* (0x0000E010) CS35L41_IRQ1_STATUS1 */
+#define CS35L41_BST_OVP_ERR_SHIFT 6
+#define CS35L41_BST_OVP_ERR_MASK BIT(CS35L41_BST_OVP_ERR_SHIFT)
+#define CS35L41_BST_DCM_UVP_ERR_SHIFT 7
+#define CS35L41_BST_DCM_UVP_ERR_MASK BIT(CS35L41_BST_DCM_UVP_ERR_SHIFT)
+#define CS35L41_BST_SHORT_ERR_SHIFT 8
+#define CS35L41_BST_SHORT_ERR_MASK BIT(CS35L41_BST_SHORT_ERR_SHIFT)
+#define CS35L41_TEMP_WARN_SHIFT 15
+#define CS35L41_TEMP_WARN_MASK BIT(CS35L41_TEMP_WARN_SHIFT)
+#define CS35L41_TEMP_ERR_SHIFT 17
+#define CS35L41_TEMP_ERR_MASK BIT(CS35L41_TEMP_ERR_SHIFT)
+#define CS35L41_AMP_SHORT_ERR_SHIFT 31
+#define CS35L41_AMP_SHORT_ERR_MASK BIT(CS35L41_AMP_SHORT_ERR_SHIFT)
+
+enum cs35l41_irq_list {
+ CS35L41_BST_OVP_ERR_IRQ,
+ CS35L41_BST_DCM_UVP_ERR_IRQ,
+ CS35L41_BST_SHORT_ERR_IRQ,
+ CS35L41_TEMP_WARN_IRQ,
+ CS35L41_TEMP_ERR_IRQ,
+ CS35L41_AMP_SHORT_ERR_IRQ,
+
+ CS35L41_NUM_IRQ
+};
+
+extern struct regmap_config cs35l41_regmap_i2c;
+extern struct regmap_config cs35l41_regmap_spi;
+
+int cs35l41_test_key_unlock(struct device *dev, struct regmap *regmap);
+int cs35l41_test_key_lock(struct device *dev, struct regmap *regmap);
+int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap);
+int cs35l41_register_errata_patch(struct device *dev, struct regmap *reg, unsigned int reg_revid);
+int cs35l41_set_channels(struct device *dev, struct regmap *reg,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot);
+int cs35l41_gpio_config(struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg);
+void cs35l41_configure_cs_dsp(struct device *dev, struct regmap *reg, struct cs_dsp *dsp);
+int cs35l41_set_cspl_mbox_cmd(struct device *dev, struct regmap *regmap,
+ enum cs35l41_cspl_mbox_cmd cmd);
+int cs35l41_write_fs_errata(struct device *dev, struct regmap *regmap);
+int cs35l41_enter_hibernate(struct device *dev, struct regmap *regmap,
+ enum cs35l41_boost_type b_type);
+int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap);
+int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
+ struct cs35l41_hw_cfg *hw_cfg);
+bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
+int cs35l41_global_enable(struct regmap *regmap, enum cs35l41_boost_type b_type, int enable);
+
+#endif /* __CS35L41_H */
diff --git a/include/sound/cs42l42.h b/include/sound/cs42l42.h
new file mode 100644
index 000000000000..1d1c24fdd0ca
--- /dev/null
+++ b/include/sound/cs42l42.h
@@ -0,0 +1,811 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/sound/cs42l42.h -- Platform data for CS42L42 ALSA SoC audio driver header
+ *
+ * Copyright 2016-2022 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ * Author: Michael White <michael.white@cirrus.com>
+ */
+
+#ifndef __CS42L42_H
+#define __CS42L42_H
+
+#define CS42L42_PAGE_REGISTER 0x00 /* Page Select Register */
+#define CS42L42_WIN_START 0x00
+#define CS42L42_WIN_LEN 0x100
+#define CS42L42_RANGE_MIN 0x00
+#define CS42L42_RANGE_MAX 0x7F
+
+#define CS42L42_PAGE_10 0x1000
+#define CS42L42_PAGE_11 0x1100
+#define CS42L42_PAGE_12 0x1200
+#define CS42L42_PAGE_13 0x1300
+#define CS42L42_PAGE_15 0x1500
+#define CS42L42_PAGE_19 0x1900
+#define CS42L42_PAGE_1B 0x1B00
+#define CS42L42_PAGE_1C 0x1C00
+#define CS42L42_PAGE_1D 0x1D00
+#define CS42L42_PAGE_1F 0x1F00
+#define CS42L42_PAGE_20 0x2000
+#define CS42L42_PAGE_21 0x2100
+#define CS42L42_PAGE_23 0x2300
+#define CS42L42_PAGE_24 0x2400
+#define CS42L42_PAGE_25 0x2500
+#define CS42L42_PAGE_26 0x2600
+#define CS42L42_PAGE_28 0x2800
+#define CS42L42_PAGE_29 0x2900
+#define CS42L42_PAGE_2A 0x2A00
+#define CS42L42_PAGE_30 0x3000
+
+#define CS42L42_CHIP_ID 0x42A42
+#define CS42L83_CHIP_ID 0x42A83
+
+/* Page 0x10 Global Registers */
+#define CS42L42_DEVID_AB (CS42L42_PAGE_10 + 0x01)
+#define CS42L42_DEVID_CD (CS42L42_PAGE_10 + 0x02)
+#define CS42L42_DEVID_E (CS42L42_PAGE_10 + 0x03)
+#define CS42L42_FABID (CS42L42_PAGE_10 + 0x04)
+#define CS42L42_REVID (CS42L42_PAGE_10 + 0x05)
+#define CS42L42_FRZ_CTL (CS42L42_PAGE_10 + 0x06)
+
+#define CS42L42_SRC_CTL (CS42L42_PAGE_10 + 0x07)
+#define CS42L42_SRC_BYPASS_DAC_SHIFT 1
+#define CS42L42_SRC_BYPASS_DAC_MASK (1 << CS42L42_SRC_BYPASS_DAC_SHIFT)
+
+#define CS42L42_MCLK_STATUS (CS42L42_PAGE_10 + 0x08)
+
+#define CS42L42_MCLK_CTL (CS42L42_PAGE_10 + 0x09)
+#define CS42L42_INTERNAL_FS_SHIFT 1
+#define CS42L42_INTERNAL_FS_MASK (1 << CS42L42_INTERNAL_FS_SHIFT)
+
+#define CS42L42_SFTRAMP_RATE (CS42L42_PAGE_10 + 0x0A)
+#define CS42L42_SLOW_START_ENABLE (CS42L42_PAGE_10 + 0x0B)
+#define CS42L42_SLOW_START_EN_MASK GENMASK(6, 4)
+#define CS42L42_SLOW_START_EN_SHIFT 4
+#define CS42L42_I2C_DEBOUNCE (CS42L42_PAGE_10 + 0x0E)
+#define CS42L42_I2C_STRETCH (CS42L42_PAGE_10 + 0x0F)
+#define CS42L42_I2C_TIMEOUT (CS42L42_PAGE_10 + 0x10)
+
+/* Page 0x11 Power and Headset Detect Registers */
+#define CS42L42_PWR_CTL1 (CS42L42_PAGE_11 + 0x01)
+#define CS42L42_ASP_DAO_PDN_SHIFT 7
+#define CS42L42_ASP_DAO_PDN_MASK (1 << CS42L42_ASP_DAO_PDN_SHIFT)
+#define CS42L42_ASP_DAI_PDN_SHIFT 6
+#define CS42L42_ASP_DAI_PDN_MASK (1 << CS42L42_ASP_DAI_PDN_SHIFT)
+#define CS42L42_MIXER_PDN_SHIFT 5
+#define CS42L42_MIXER_PDN_MASK (1 << CS42L42_MIXER_PDN_SHIFT)
+#define CS42L42_EQ_PDN_SHIFT 4
+#define CS42L42_EQ_PDN_MASK (1 << CS42L42_EQ_PDN_SHIFT)
+#define CS42L42_HP_PDN_SHIFT 3
+#define CS42L42_HP_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
+#define CS42L42_ADC_PDN_SHIFT 2
+#define CS42L42_ADC_PDN_MASK (1 << CS42L42_ADC_PDN_SHIFT)
+#define CS42L42_PDN_ALL_SHIFT 0
+#define CS42L42_PDN_ALL_MASK (1 << CS42L42_PDN_ALL_SHIFT)
+
+#define CS42L42_PWR_CTL2 (CS42L42_PAGE_11 + 0x02)
+#define CS42L42_ADC_SRC_PDNB_SHIFT 0
+#define CS42L42_ADC_SRC_PDNB_MASK (1 << CS42L42_ADC_SRC_PDNB_SHIFT)
+#define CS42L42_DAC_SRC_PDNB_SHIFT 1
+#define CS42L42_DAC_SRC_PDNB_MASK (1 << CS42L42_DAC_SRC_PDNB_SHIFT)
+#define CS42L42_ASP_DAI1_PDN_SHIFT 2
+#define CS42L42_ASP_DAI1_PDN_MASK (1 << CS42L42_ASP_DAI1_PDN_SHIFT)
+#define CS42L42_SRC_PDN_OVERRIDE_SHIFT 3
+#define CS42L42_SRC_PDN_OVERRIDE_MASK (1 << CS42L42_SRC_PDN_OVERRIDE_SHIFT)
+#define CS42L42_DISCHARGE_FILT_SHIFT 4
+#define CS42L42_DISCHARGE_FILT_MASK (1 << CS42L42_DISCHARGE_FILT_SHIFT)
+
+#define CS42L42_PWR_CTL3 (CS42L42_PAGE_11 + 0x03)
+#define CS42L42_RING_SENSE_PDNB_SHIFT 1
+#define CS42L42_RING_SENSE_PDNB_MASK (1 << CS42L42_RING_SENSE_PDNB_SHIFT)
+#define CS42L42_VPMON_PDNB_SHIFT 2
+#define CS42L42_VPMON_PDNB_MASK (1 << CS42L42_VPMON_PDNB_SHIFT)
+#define CS42L42_SW_CLK_STP_STAT_SEL_SHIFT 5
+#define CS42L42_SW_CLK_STP_STAT_SEL_MASK (3 << CS42L42_SW_CLK_STP_STAT_SEL_SHIFT)
+
+#define CS42L42_RSENSE_CTL1 (CS42L42_PAGE_11 + 0x04)
+#define CS42L42_RS_TRIM_R_SHIFT 0
+#define CS42L42_RS_TRIM_R_MASK (1 << CS42L42_RS_TRIM_R_SHIFT)
+#define CS42L42_RS_TRIM_T_SHIFT 1
+#define CS42L42_RS_TRIM_T_MASK (1 << CS42L42_RS_TRIM_T_SHIFT)
+#define CS42L42_HPREF_RS_SHIFT 2
+#define CS42L42_HPREF_RS_MASK (1 << CS42L42_HPREF_RS_SHIFT)
+#define CS42L42_HSBIAS_FILT_REF_RS_SHIFT 3
+#define CS42L42_HSBIAS_FILT_REF_RS_MASK (1 << CS42L42_HSBIAS_FILT_REF_RS_SHIFT)
+#define CS42L42_RING_SENSE_PU_HIZ_SHIFT 6
+#define CS42L42_RING_SENSE_PU_HIZ_MASK (1 << CS42L42_RING_SENSE_PU_HIZ_SHIFT)
+
+#define CS42L42_RSENSE_CTL2 (CS42L42_PAGE_11 + 0x05)
+#define CS42L42_TS_RS_GATE_SHIFT 7
+#define CS42L42_TS_RS_GATE_MAS (1 << CS42L42_TS_RS_GATE_SHIFT)
+
+#define CS42L42_OSC_SWITCH (CS42L42_PAGE_11 + 0x07)
+#define CS42L42_SCLK_PRESENT_SHIFT 0
+#define CS42L42_SCLK_PRESENT_MASK (1 << CS42L42_SCLK_PRESENT_SHIFT)
+
+#define CS42L42_OSC_SWITCH_STATUS (CS42L42_PAGE_11 + 0x09)
+#define CS42L42_OSC_SW_SEL_STAT_SHIFT 0
+#define CS42L42_OSC_SW_SEL_STAT_MASK (3 << CS42L42_OSC_SW_SEL_STAT_SHIFT)
+#define CS42L42_OSC_PDNB_STAT_SHIFT 2
+#define CS42L42_OSC_PDNB_STAT_MASK (1 << CS42L42_OSC_SW_SEL_STAT_SHIFT)
+
+#define CS42L42_RSENSE_CTL3 (CS42L42_PAGE_11 + 0x12)
+#define CS42L42_RS_RISE_DBNCE_TIME_SHIFT 0
+#define CS42L42_RS_RISE_DBNCE_TIME_MASK (7 << CS42L42_RS_RISE_DBNCE_TIME_SHIFT)
+#define CS42L42_RS_FALL_DBNCE_TIME_SHIFT 3
+#define CS42L42_RS_FALL_DBNCE_TIME_MASK (7 << CS42L42_RS_FALL_DBNCE_TIME_SHIFT)
+#define CS42L42_RS_PU_EN_SHIFT 6
+#define CS42L42_RS_PU_EN_MASK (1 << CS42L42_RS_PU_EN_SHIFT)
+#define CS42L42_RS_INV_SHIFT 7
+#define CS42L42_RS_INV_MASK (1 << CS42L42_RS_INV_SHIFT)
+
+#define CS42L42_TSENSE_CTL (CS42L42_PAGE_11 + 0x13)
+#define CS42L42_TS_RISE_DBNCE_TIME_SHIFT 0
+#define CS42L42_TS_RISE_DBNCE_TIME_MASK (7 << CS42L42_TS_RISE_DBNCE_TIME_SHIFT)
+#define CS42L42_TS_FALL_DBNCE_TIME_SHIFT 3
+#define CS42L42_TS_FALL_DBNCE_TIME_MASK (7 << CS42L42_TS_FALL_DBNCE_TIME_SHIFT)
+#define CS42L42_TS_INV_SHIFT 7
+#define CS42L42_TS_INV_MASK (1 << CS42L42_TS_INV_SHIFT)
+
+#define CS42L42_TSRS_INT_DISABLE (CS42L42_PAGE_11 + 0x14)
+#define CS42L42_D_RS_PLUG_DBNC_SHIFT 0
+#define CS42L42_D_RS_PLUG_DBNC_MASK (1 << CS42L42_D_RS_PLUG_DBNC_SHIFT)
+#define CS42L42_D_RS_UNPLUG_DBNC_SHIFT 1
+#define CS42L42_D_RS_UNPLUG_DBNC_MASK (1 << CS42L42_D_RS_UNPLUG_DBNC_SHIFT)
+#define CS42L42_D_TS_PLUG_DBNC_SHIFT 2
+#define CS42L42_D_TS_PLUG_DBNC_MASK (1 << CS42L42_D_TS_PLUG_DBNC_SHIFT)
+#define CS42L42_D_TS_UNPLUG_DBNC_SHIFT 3
+#define CS42L42_D_TS_UNPLUG_DBNC_MASK (1 << CS42L42_D_TS_UNPLUG_DBNC_SHIFT)
+
+#define CS42L42_TRSENSE_STATUS (CS42L42_PAGE_11 + 0x15)
+#define CS42L42_RS_PLUG_DBNC_SHIFT 0
+#define CS42L42_RS_PLUG_DBNC_MASK (1 << CS42L42_RS_PLUG_DBNC_SHIFT)
+#define CS42L42_RS_UNPLUG_DBNC_SHIFT 1
+#define CS42L42_RS_UNPLUG_DBNC_MASK (1 << CS42L42_RS_UNPLUG_DBNC_SHIFT)
+#define CS42L42_TS_PLUG_DBNC_SHIFT 2
+#define CS42L42_TS_PLUG_DBNC_MASK (1 << CS42L42_TS_PLUG_DBNC_SHIFT)
+#define CS42L42_TS_UNPLUG_DBNC_SHIFT 3
+#define CS42L42_TS_UNPLUG_DBNC_MASK (1 << CS42L42_TS_UNPLUG_DBNC_SHIFT)
+
+#define CS42L42_HSDET_CTL1 (CS42L42_PAGE_11 + 0x1F)
+#define CS42L42_HSDET_COMP1_LVL_SHIFT 0
+#define CS42L42_HSDET_COMP1_LVL_MASK (15 << CS42L42_HSDET_COMP1_LVL_SHIFT)
+#define CS42L42_HSDET_COMP2_LVL_SHIFT 4
+#define CS42L42_HSDET_COMP2_LVL_MASK (15 << CS42L42_HSDET_COMP2_LVL_SHIFT)
+
+#define CS42L42_HSDET_COMP1_LVL_VAL 12 /* 1.25V Comparator */
+#define CS42L42_HSDET_COMP2_LVL_VAL 2 /* 1.75V Comparator */
+#define CS42L42_HSDET_COMP1_LVL_DEFAULT 7 /* 1V Comparator */
+#define CS42L42_HSDET_COMP2_LVL_DEFAULT 7 /* 2V Comparator */
+
+#define CS42L42_HSDET_CTL2 (CS42L42_PAGE_11 + 0x20)
+#define CS42L42_HSDET_AUTO_TIME_SHIFT 0
+#define CS42L42_HSDET_AUTO_TIME_MASK (3 << CS42L42_HSDET_AUTO_TIME_SHIFT)
+#define CS42L42_HSBIAS_REF_SHIFT 3
+#define CS42L42_HSBIAS_REF_MASK (1 << CS42L42_HSBIAS_REF_SHIFT)
+#define CS42L42_HSDET_SET_SHIFT 4
+#define CS42L42_HSDET_SET_MASK (3 << CS42L42_HSDET_SET_SHIFT)
+#define CS42L42_HSDET_CTRL_SHIFT 6
+#define CS42L42_HSDET_CTRL_MASK (3 << CS42L42_HSDET_CTRL_SHIFT)
+
+#define CS42L42_HS_SWITCH_CTL (CS42L42_PAGE_11 + 0x21)
+#define CS42L42_SW_GNDHS_HS4_SHIFT 0
+#define CS42L42_SW_GNDHS_HS4_MASK (1 << CS42L42_SW_GNDHS_HS4_SHIFT)
+#define CS42L42_SW_GNDHS_HS3_SHIFT 1
+#define CS42L42_SW_GNDHS_HS3_MASK (1 << CS42L42_SW_GNDHS_HS3_SHIFT)
+#define CS42L42_SW_HSB_HS4_SHIFT 2
+#define CS42L42_SW_HSB_HS4_MASK (1 << CS42L42_SW_HSB_HS4_SHIFT)
+#define CS42L42_SW_HSB_HS3_SHIFT 3
+#define CS42L42_SW_HSB_HS3_MASK (1 << CS42L42_SW_HSB_HS3_SHIFT)
+#define CS42L42_SW_HSB_FILT_HS4_SHIFT 4
+#define CS42L42_SW_HSB_FILT_HS4_MASK (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT)
+#define CS42L42_SW_HSB_FILT_HS3_SHIFT 5
+#define CS42L42_SW_HSB_FILT_HS3_MASK (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT)
+#define CS42L42_SW_REF_HS4_SHIFT 6
+#define CS42L42_SW_REF_HS4_MASK (1 << CS42L42_SW_REF_HS4_SHIFT)
+#define CS42L42_SW_REF_HS3_SHIFT 7
+#define CS42L42_SW_REF_HS3_MASK (1 << CS42L42_SW_REF_HS3_SHIFT)
+
+#define CS42L42_HS_DET_STATUS (CS42L42_PAGE_11 + 0x24)
+#define CS42L42_HSDET_TYPE_SHIFT 0
+#define CS42L42_HSDET_TYPE_MASK (3 << CS42L42_HSDET_TYPE_SHIFT)
+#define CS42L42_HSDET_COMP1_OUT_SHIFT 6
+#define CS42L42_HSDET_COMP1_OUT_MASK (1 << CS42L42_HSDET_COMP1_OUT_SHIFT)
+#define CS42L42_HSDET_COMP2_OUT_SHIFT 7
+#define CS42L42_HSDET_COMP2_OUT_MASK (1 << CS42L42_HSDET_COMP2_OUT_SHIFT)
+#define CS42L42_PLUG_CTIA 0
+#define CS42L42_PLUG_OMTP 1
+#define CS42L42_PLUG_HEADPHONE 2
+#define CS42L42_PLUG_INVALID 3
+
+#define CS42L42_HSDET_SW_COMP1 ((0 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_COMP2 ((1 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (0 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_TYPE1 ((0 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_TYPE2 ((1 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (0 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_TYPE3 ((1 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_TYPE4 ((0 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS3_SHIFT))
+
+#define CS42L42_HSDET_COMP_TYPE1 1
+#define CS42L42_HSDET_COMP_TYPE2 2
+#define CS42L42_HSDET_COMP_TYPE3 0
+#define CS42L42_HSDET_COMP_TYPE4 3
+
+#define CS42L42_HS_CLAMP_DISABLE (CS42L42_PAGE_11 + 0x29)
+#define CS42L42_HS_CLAMP_DISABLE_SHIFT 0
+#define CS42L42_HS_CLAMP_DISABLE_MASK (1 << CS42L42_HS_CLAMP_DISABLE_SHIFT)
+
+/* Page 0x12 Clocking Registers */
+#define CS42L42_MCLK_SRC_SEL (CS42L42_PAGE_12 + 0x01)
+#define CS42L42_MCLKDIV_SHIFT 1
+#define CS42L42_MCLKDIV_MASK (1 << CS42L42_MCLKDIV_SHIFT)
+#define CS42L42_MCLK_SRC_SEL_SHIFT 0
+#define CS42L42_MCLK_SRC_SEL_MASK (1 << CS42L42_MCLK_SRC_SEL_SHIFT)
+
+#define CS42L42_SPDIF_CLK_CFG (CS42L42_PAGE_12 + 0x02)
+#define CS42L42_FSYNC_PW_LOWER (CS42L42_PAGE_12 + 0x03)
+
+#define CS42L42_FSYNC_PW_UPPER (CS42L42_PAGE_12 + 0x04)
+#define CS42L42_FSYNC_PULSE_WIDTH_SHIFT 0
+#define CS42L42_FSYNC_PULSE_WIDTH_MASK (0xff << \
+ CS42L42_FSYNC_PULSE_WIDTH_SHIFT)
+
+#define CS42L42_FSYNC_P_LOWER (CS42L42_PAGE_12 + 0x05)
+
+#define CS42L42_FSYNC_P_UPPER (CS42L42_PAGE_12 + 0x06)
+#define CS42L42_FSYNC_PERIOD_SHIFT 0
+#define CS42L42_FSYNC_PERIOD_MASK (0xff << CS42L42_FSYNC_PERIOD_SHIFT)
+
+#define CS42L42_ASP_CLK_CFG (CS42L42_PAGE_12 + 0x07)
+#define CS42L42_ASP_SCLK_EN_SHIFT 5
+#define CS42L42_ASP_SCLK_EN_MASK (1 << CS42L42_ASP_SCLK_EN_SHIFT)
+#define CS42L42_ASP_MASTER_MODE 0x01
+#define CS42L42_ASP_SLAVE_MODE 0x00
+#define CS42L42_ASP_MODE_SHIFT 4
+#define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT)
+#define CS42L42_ASP_SCPOL_SHIFT 2
+#define CS42L42_ASP_SCPOL_MASK (3 << CS42L42_ASP_SCPOL_SHIFT)
+#define CS42L42_ASP_SCPOL_NOR 3
+#define CS42L42_ASP_LCPOL_SHIFT 0
+#define CS42L42_ASP_LCPOL_MASK (3 << CS42L42_ASP_LCPOL_SHIFT)
+#define CS42L42_ASP_LCPOL_INV 3
+
+#define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08)
+#define CS42L42_ASP_STP_SHIFT 4
+#define CS42L42_ASP_STP_MASK (1 << CS42L42_ASP_STP_SHIFT)
+#define CS42L42_ASP_5050_SHIFT 3
+#define CS42L42_ASP_5050_MASK (1 << CS42L42_ASP_5050_SHIFT)
+#define CS42L42_ASP_FSD_SHIFT 0
+#define CS42L42_ASP_FSD_MASK (7 << CS42L42_ASP_FSD_SHIFT)
+#define CS42L42_ASP_FSD_0_5 1
+#define CS42L42_ASP_FSD_1_0 2
+#define CS42L42_ASP_FSD_1_5 3
+#define CS42L42_ASP_FSD_2_0 4
+
+#define CS42L42_FS_RATE_EN (CS42L42_PAGE_12 + 0x09)
+#define CS42L42_FS_EN_SHIFT 0
+#define CS42L42_FS_EN_MASK (0xf << CS42L42_FS_EN_SHIFT)
+#define CS42L42_FS_EN_IASRC_96K 0x1
+#define CS42L42_FS_EN_OASRC_96K 0x2
+
+#define CS42L42_IN_ASRC_CLK (CS42L42_PAGE_12 + 0x0A)
+#define CS42L42_CLK_IASRC_SEL_SHIFT 0
+#define CS42L42_CLK_IASRC_SEL_MASK (1 << CS42L42_CLK_IASRC_SEL_SHIFT)
+#define CS42L42_CLK_IASRC_SEL_6 0
+#define CS42L42_CLK_IASRC_SEL_12 1
+
+#define CS42L42_OUT_ASRC_CLK (CS42L42_PAGE_12 + 0x0B)
+#define CS42L42_CLK_OASRC_SEL_SHIFT 0
+#define CS42L42_CLK_OASRC_SEL_MASK (1 << CS42L42_CLK_OASRC_SEL_SHIFT)
+#define CS42L42_CLK_OASRC_SEL_12 1
+
+#define CS42L42_PLL_DIV_CFG1 (CS42L42_PAGE_12 + 0x0C)
+#define CS42L42_SCLK_PREDIV_SHIFT 0
+#define CS42L42_SCLK_PREDIV_MASK (3 << CS42L42_SCLK_PREDIV_SHIFT)
+
+/* Page 0x13 Interrupt Registers */
+/* Interrupts */
+#define CS42L42_ADC_OVFL_STATUS (CS42L42_PAGE_13 + 0x01)
+#define CS42L42_MIXER_STATUS (CS42L42_PAGE_13 + 0x02)
+#define CS42L42_SRC_STATUS (CS42L42_PAGE_13 + 0x03)
+#define CS42L42_ASP_RX_STATUS (CS42L42_PAGE_13 + 0x04)
+#define CS42L42_ASP_TX_STATUS (CS42L42_PAGE_13 + 0x05)
+#define CS42L42_CODEC_STATUS (CS42L42_PAGE_13 + 0x08)
+#define CS42L42_DET_INT_STATUS1 (CS42L42_PAGE_13 + 0x09)
+#define CS42L42_DET_INT_STATUS2 (CS42L42_PAGE_13 + 0x0A)
+#define CS42L42_SRCPL_INT_STATUS (CS42L42_PAGE_13 + 0x0B)
+#define CS42L42_VPMON_STATUS (CS42L42_PAGE_13 + 0x0D)
+#define CS42L42_PLL_LOCK_STATUS (CS42L42_PAGE_13 + 0x0E)
+#define CS42L42_TSRS_PLUG_STATUS (CS42L42_PAGE_13 + 0x0F)
+/* Masks */
+#define CS42L42_ADC_OVFL_INT_MASK (CS42L42_PAGE_13 + 0x16)
+#define CS42L42_ADC_OVFL_SHIFT 0
+#define CS42L42_ADC_OVFL_MASK (1 << CS42L42_ADC_OVFL_SHIFT)
+#define CS42L42_ADC_OVFL_VAL_MASK CS42L42_ADC_OVFL_MASK
+
+#define CS42L42_MIXER_INT_MASK (CS42L42_PAGE_13 + 0x17)
+#define CS42L42_MIX_CHB_OVFL_SHIFT 0
+#define CS42L42_MIX_CHB_OVFL_MASK (1 << CS42L42_MIX_CHB_OVFL_SHIFT)
+#define CS42L42_MIX_CHA_OVFL_SHIFT 1
+#define CS42L42_MIX_CHA_OVFL_MASK (1 << CS42L42_MIX_CHA_OVFL_SHIFT)
+#define CS42L42_EQ_OVFL_SHIFT 2
+#define CS42L42_EQ_OVFL_MASK (1 << CS42L42_EQ_OVFL_SHIFT)
+#define CS42L42_EQ_BIQUAD_OVFL_SHIFT 3
+#define CS42L42_EQ_BIQUAD_OVFL_MASK (1 << CS42L42_EQ_BIQUAD_OVFL_SHIFT)
+#define CS42L42_MIXER_VAL_MASK (CS42L42_MIX_CHB_OVFL_MASK | \
+ CS42L42_MIX_CHA_OVFL_MASK | \
+ CS42L42_EQ_OVFL_MASK | \
+ CS42L42_EQ_BIQUAD_OVFL_MASK)
+
+#define CS42L42_SRC_INT_MASK (CS42L42_PAGE_13 + 0x18)
+#define CS42L42_SRC_ILK_SHIFT 0
+#define CS42L42_SRC_ILK_MASK (1 << CS42L42_SRC_ILK_SHIFT)
+#define CS42L42_SRC_OLK_SHIFT 1
+#define CS42L42_SRC_OLK_MASK (1 << CS42L42_SRC_OLK_SHIFT)
+#define CS42L42_SRC_IUNLK_SHIFT 2
+#define CS42L42_SRC_IUNLK_MASK (1 << CS42L42_SRC_IUNLK_SHIFT)
+#define CS42L42_SRC_OUNLK_SHIFT 3
+#define CS42L42_SRC_OUNLK_MASK (1 << CS42L42_SRC_OUNLK_SHIFT)
+#define CS42L42_SRC_VAL_MASK (CS42L42_SRC_ILK_MASK | \
+ CS42L42_SRC_OLK_MASK | \
+ CS42L42_SRC_IUNLK_MASK | \
+ CS42L42_SRC_OUNLK_MASK)
+
+#define CS42L42_ASP_RX_INT_MASK (CS42L42_PAGE_13 + 0x19)
+#define CS42L42_ASPRX_NOLRCK_SHIFT 0
+#define CS42L42_ASPRX_NOLRCK_MASK (1 << CS42L42_ASPRX_NOLRCK_SHIFT)
+#define CS42L42_ASPRX_EARLY_SHIFT 1
+#define CS42L42_ASPRX_EARLY_MASK (1 << CS42L42_ASPRX_EARLY_SHIFT)
+#define CS42L42_ASPRX_LATE_SHIFT 2
+#define CS42L42_ASPRX_LATE_MASK (1 << CS42L42_ASPRX_LATE_SHIFT)
+#define CS42L42_ASPRX_ERROR_SHIFT 3
+#define CS42L42_ASPRX_ERROR_MASK (1 << CS42L42_ASPRX_ERROR_SHIFT)
+#define CS42L42_ASPRX_OVLD_SHIFT 4
+#define CS42L42_ASPRX_OVLD_MASK (1 << CS42L42_ASPRX_OVLD_SHIFT)
+#define CS42L42_ASP_RX_VAL_MASK (CS42L42_ASPRX_NOLRCK_MASK | \
+ CS42L42_ASPRX_EARLY_MASK | \
+ CS42L42_ASPRX_LATE_MASK | \
+ CS42L42_ASPRX_ERROR_MASK | \
+ CS42L42_ASPRX_OVLD_MASK)
+
+#define CS42L42_ASP_TX_INT_MASK (CS42L42_PAGE_13 + 0x1A)
+#define CS42L42_ASPTX_NOLRCK_SHIFT 0
+#define CS42L42_ASPTX_NOLRCK_MASK (1 << CS42L42_ASPTX_NOLRCK_SHIFT)
+#define CS42L42_ASPTX_EARLY_SHIFT 1
+#define CS42L42_ASPTX_EARLY_MASK (1 << CS42L42_ASPTX_EARLY_SHIFT)
+#define CS42L42_ASPTX_LATE_SHIFT 2
+#define CS42L42_ASPTX_LATE_MASK (1 << CS42L42_ASPTX_LATE_SHIFT)
+#define CS42L42_ASPTX_SMERROR_SHIFT 3
+#define CS42L42_ASPTX_SMERROR_MASK (1 << CS42L42_ASPTX_SMERROR_SHIFT)
+#define CS42L42_ASP_TX_VAL_MASK (CS42L42_ASPTX_NOLRCK_MASK | \
+ CS42L42_ASPTX_EARLY_MASK | \
+ CS42L42_ASPTX_LATE_MASK | \
+ CS42L42_ASPTX_SMERROR_MASK)
+
+#define CS42L42_CODEC_INT_MASK (CS42L42_PAGE_13 + 0x1B)
+#define CS42L42_PDN_DONE_SHIFT 0
+#define CS42L42_PDN_DONE_MASK (1 << CS42L42_PDN_DONE_SHIFT)
+#define CS42L42_HSDET_AUTO_DONE_SHIFT 1
+#define CS42L42_HSDET_AUTO_DONE_MASK (1 << CS42L42_HSDET_AUTO_DONE_SHIFT)
+#define CS42L42_CODEC_VAL_MASK (CS42L42_PDN_DONE_MASK | \
+ CS42L42_HSDET_AUTO_DONE_MASK)
+
+#define CS42L42_SRCPL_INT_MASK (CS42L42_PAGE_13 + 0x1C)
+#define CS42L42_SRCPL_ADC_LK_SHIFT 0
+#define CS42L42_SRCPL_ADC_LK_MASK (1 << CS42L42_SRCPL_ADC_LK_SHIFT)
+#define CS42L42_SRCPL_DAC_LK_SHIFT 2
+#define CS42L42_SRCPL_DAC_LK_MASK (1 << CS42L42_SRCPL_DAC_LK_SHIFT)
+#define CS42L42_SRCPL_ADC_UNLK_SHIFT 5
+#define CS42L42_SRCPL_ADC_UNLK_MASK (1 << CS42L42_SRCPL_ADC_UNLK_SHIFT)
+#define CS42L42_SRCPL_DAC_UNLK_SHIFT 6
+#define CS42L42_SRCPL_DAC_UNLK_MASK (1 << CS42L42_SRCPL_DAC_UNLK_SHIFT)
+#define CS42L42_SRCPL_VAL_MASK (CS42L42_SRCPL_ADC_LK_MASK | \
+ CS42L42_SRCPL_DAC_LK_MASK | \
+ CS42L42_SRCPL_ADC_UNLK_MASK | \
+ CS42L42_SRCPL_DAC_UNLK_MASK)
+
+#define CS42L42_VPMON_INT_MASK (CS42L42_PAGE_13 + 0x1E)
+#define CS42L42_VPMON_SHIFT 0
+#define CS42L42_VPMON_MASK (1 << CS42L42_VPMON_SHIFT)
+#define CS42L42_VPMON_VAL_MASK CS42L42_VPMON_MASK
+
+#define CS42L42_PLL_LOCK_INT_MASK (CS42L42_PAGE_13 + 0x1F)
+#define CS42L42_PLL_LOCK_SHIFT 0
+#define CS42L42_PLL_LOCK_MASK (1 << CS42L42_PLL_LOCK_SHIFT)
+#define CS42L42_PLL_LOCK_VAL_MASK CS42L42_PLL_LOCK_MASK
+
+#define CS42L42_TSRS_PLUG_INT_MASK (CS42L42_PAGE_13 + 0x20)
+#define CS42L42_RS_PLUG_SHIFT 0
+#define CS42L42_RS_PLUG_MASK (1 << CS42L42_RS_PLUG_SHIFT)
+#define CS42L42_RS_UNPLUG_SHIFT 1
+#define CS42L42_RS_UNPLUG_MASK (1 << CS42L42_RS_UNPLUG_SHIFT)
+#define CS42L42_TS_PLUG_SHIFT 2
+#define CS42L42_TS_PLUG_MASK (1 << CS42L42_TS_PLUG_SHIFT)
+#define CS42L42_TS_UNPLUG_SHIFT 3
+#define CS42L42_TS_UNPLUG_MASK (1 << CS42L42_TS_UNPLUG_SHIFT)
+#define CS42L42_TSRS_PLUG_VAL_MASK (CS42L42_RS_PLUG_MASK | \
+ CS42L42_RS_UNPLUG_MASK | \
+ CS42L42_TS_PLUG_MASK | \
+ CS42L42_TS_UNPLUG_MASK)
+#define CS42L42_TS_PLUG 3
+#define CS42L42_TS_UNPLUG 0
+#define CS42L42_TS_TRANS 1
+
+/*
+ * NOTE: PLL_START must be 0 while both ADC_PDN=1 and HP_PDN=1.
+ * Otherwise it will prevent FILT+ from charging properly.
+ */
+#define CS42L42_PLL_CTL1 (CS42L42_PAGE_15 + 0x01)
+#define CS42L42_PLL_START_SHIFT 0
+#define CS42L42_PLL_START_MASK (1 << CS42L42_PLL_START_SHIFT)
+
+#define CS42L42_PLL_DIV_FRAC0 (CS42L42_PAGE_15 + 0x02)
+#define CS42L42_PLL_DIV_FRAC_SHIFT 0
+#define CS42L42_PLL_DIV_FRAC_MASK (0xff << CS42L42_PLL_DIV_FRAC_SHIFT)
+
+#define CS42L42_PLL_DIV_FRAC1 (CS42L42_PAGE_15 + 0x03)
+#define CS42L42_PLL_DIV_FRAC2 (CS42L42_PAGE_15 + 0x04)
+
+#define CS42L42_PLL_DIV_INT (CS42L42_PAGE_15 + 0x05)
+#define CS42L42_PLL_DIV_INT_SHIFT 0
+#define CS42L42_PLL_DIV_INT_MASK (0xff << CS42L42_PLL_DIV_INT_SHIFT)
+
+#define CS42L42_PLL_CTL3 (CS42L42_PAGE_15 + 0x08)
+#define CS42L42_PLL_DIVOUT_SHIFT 0
+#define CS42L42_PLL_DIVOUT_MASK (0xff << CS42L42_PLL_DIVOUT_SHIFT)
+
+#define CS42L42_PLL_CAL_RATIO (CS42L42_PAGE_15 + 0x0A)
+#define CS42L42_PLL_CAL_RATIO_SHIFT 0
+#define CS42L42_PLL_CAL_RATIO_MASK (0xff << CS42L42_PLL_CAL_RATIO_SHIFT)
+
+#define CS42L42_PLL_CTL4 (CS42L42_PAGE_15 + 0x1B)
+#define CS42L42_PLL_MODE_SHIFT 0
+#define CS42L42_PLL_MODE_MASK (3 << CS42L42_PLL_MODE_SHIFT)
+
+/* Page 0x19 HP Load Detect Registers */
+#define CS42L42_LOAD_DET_RCSTAT (CS42L42_PAGE_19 + 0x25)
+#define CS42L42_RLA_STAT_SHIFT 0
+#define CS42L42_RLA_STAT_MASK (3 << CS42L42_RLA_STAT_SHIFT)
+#define CS42L42_RLA_STAT_15_OHM 0
+
+#define CS42L42_LOAD_DET_DONE (CS42L42_PAGE_19 + 0x26)
+#define CS42L42_HPLOAD_DET_DONE_SHIFT 0
+#define CS42L42_HPLOAD_DET_DONE_MASK (1 << CS42L42_HPLOAD_DET_DONE_SHIFT)
+
+#define CS42L42_LOAD_DET_EN (CS42L42_PAGE_19 + 0x27)
+#define CS42L42_HP_LD_EN_SHIFT 0
+#define CS42L42_HP_LD_EN_MASK (1 << CS42L42_HP_LD_EN_SHIFT)
+
+/* Page 0x1B Headset Interface Registers */
+#define CS42L42_HSBIAS_SC_AUTOCTL (CS42L42_PAGE_1B + 0x70)
+#define CS42L42_HSBIAS_SENSE_TRIP_SHIFT 0
+#define CS42L42_HSBIAS_SENSE_TRIP_MASK (7 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT)
+#define CS42L42_TIP_SENSE_EN_SHIFT 5
+#define CS42L42_TIP_SENSE_EN_MASK (1 << CS42L42_TIP_SENSE_EN_SHIFT)
+#define CS42L42_AUTO_HSBIAS_HIZ_SHIFT 6
+#define CS42L42_AUTO_HSBIAS_HIZ_MASK (1 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT)
+#define CS42L42_HSBIAS_SENSE_EN_SHIFT 7
+#define CS42L42_HSBIAS_SENSE_EN_MASK (1 << CS42L42_HSBIAS_SENSE_EN_SHIFT)
+
+#define CS42L42_WAKE_CTL (CS42L42_PAGE_1B + 0x71)
+#define CS42L42_WAKEB_CLEAR_SHIFT 0
+#define CS42L42_WAKEB_CLEAR_MASK (1 << CS42L42_WAKEB_CLEAR_SHIFT)
+#define CS42L42_WAKEB_MODE_SHIFT 5
+#define CS42L42_WAKEB_MODE_MASK (1 << CS42L42_WAKEB_MODE_SHIFT)
+#define CS42L42_M_HP_WAKE_SHIFT 6
+#define CS42L42_M_HP_WAKE_MASK (1 << CS42L42_M_HP_WAKE_SHIFT)
+#define CS42L42_M_MIC_WAKE_SHIFT 7
+#define CS42L42_M_MIC_WAKE_MASK (1 << CS42L42_M_MIC_WAKE_SHIFT)
+
+#define CS42L42_ADC_DISABLE_MUTE (CS42L42_PAGE_1B + 0x72)
+#define CS42L42_ADC_DISABLE_S0_MUTE_SHIFT 7
+#define CS42L42_ADC_DISABLE_S0_MUTE_MASK (1 << CS42L42_ADC_DISABLE_S0_MUTE_SHIFT)
+
+#define CS42L42_TIPSENSE_CTL (CS42L42_PAGE_1B + 0x73)
+#define CS42L42_TIP_SENSE_DEBOUNCE_SHIFT 0
+#define CS42L42_TIP_SENSE_DEBOUNCE_MASK (3 << CS42L42_TIP_SENSE_DEBOUNCE_SHIFT)
+#define CS42L42_TIP_SENSE_INV_SHIFT 5
+#define CS42L42_TIP_SENSE_INV_MASK (1 << CS42L42_TIP_SENSE_INV_SHIFT)
+#define CS42L42_TIP_SENSE_CTRL_SHIFT 6
+#define CS42L42_TIP_SENSE_CTRL_MASK (3 << CS42L42_TIP_SENSE_CTRL_SHIFT)
+
+/*
+ * NOTE: DETECT_MODE must be 0 while both ADC_PDN=1 and HP_PDN=1.
+ * Otherwise it will prevent FILT+ from charging properly.
+ */
+#define CS42L42_MISC_DET_CTL (CS42L42_PAGE_1B + 0x74)
+#define CS42L42_PDN_MIC_LVL_DET_SHIFT 0
+#define CS42L42_PDN_MIC_LVL_DET_MASK (1 << CS42L42_PDN_MIC_LVL_DET_SHIFT)
+#define CS42L42_HSBIAS_CTL_SHIFT 1
+#define CS42L42_HSBIAS_CTL_MASK (3 << CS42L42_HSBIAS_CTL_SHIFT)
+#define CS42L42_DETECT_MODE_SHIFT 3
+#define CS42L42_DETECT_MODE_MASK (3 << CS42L42_DETECT_MODE_SHIFT)
+
+#define CS42L42_MIC_DET_CTL1 (CS42L42_PAGE_1B + 0x75)
+#define CS42L42_HS_DET_LEVEL_SHIFT 0
+#define CS42L42_HS_DET_LEVEL_MASK (0x3F << CS42L42_HS_DET_LEVEL_SHIFT)
+#define CS42L42_EVENT_STAT_SEL_SHIFT 6
+#define CS42L42_EVENT_STAT_SEL_MASK (1 << CS42L42_EVENT_STAT_SEL_SHIFT)
+#define CS42L42_LATCH_TO_VP_SHIFT 7
+#define CS42L42_LATCH_TO_VP_MASK (1 << CS42L42_LATCH_TO_VP_SHIFT)
+
+#define CS42L42_MIC_DET_CTL2 (CS42L42_PAGE_1B + 0x76)
+#define CS42L42_DEBOUNCE_TIME_SHIFT 5
+#define CS42L42_DEBOUNCE_TIME_MASK (0x07 << CS42L42_DEBOUNCE_TIME_SHIFT)
+
+#define CS42L42_DET_STATUS1 (CS42L42_PAGE_1B + 0x77)
+#define CS42L42_HSBIAS_HIZ_MODE_SHIFT 6
+#define CS42L42_HSBIAS_HIZ_MODE_MASK (1 << CS42L42_HSBIAS_HIZ_MODE_SHIFT)
+#define CS42L42_TIP_SENSE_SHIFT 7
+#define CS42L42_TIP_SENSE_MASK (1 << CS42L42_TIP_SENSE_SHIFT)
+
+#define CS42L42_DET_STATUS2 (CS42L42_PAGE_1B + 0x78)
+#define CS42L42_SHORT_TRUE_SHIFT 0
+#define CS42L42_SHORT_TRUE_MASK (1 << CS42L42_SHORT_TRUE_SHIFT)
+#define CS42L42_HS_TRUE_SHIFT 1
+#define CS42L42_HS_TRUE_MASK (1 << CS42L42_HS_TRUE_SHIFT)
+
+#define CS42L42_DET_INT1_MASK (CS42L42_PAGE_1B + 0x79)
+#define CS42L42_TIP_SENSE_UNPLUG_SHIFT 5
+#define CS42L42_TIP_SENSE_UNPLUG_MASK (1 << CS42L42_TIP_SENSE_UNPLUG_SHIFT)
+#define CS42L42_TIP_SENSE_PLUG_SHIFT 6
+#define CS42L42_TIP_SENSE_PLUG_MASK (1 << CS42L42_TIP_SENSE_PLUG_SHIFT)
+#define CS42L42_HSBIAS_SENSE_SHIFT 7
+#define CS42L42_HSBIAS_SENSE_MASK (1 << CS42L42_HSBIAS_SENSE_SHIFT)
+#define CS42L42_DET_INT_VAL1_MASK (CS42L42_TIP_SENSE_UNPLUG_MASK | \
+ CS42L42_TIP_SENSE_PLUG_MASK | \
+ CS42L42_HSBIAS_SENSE_MASK)
+
+#define CS42L42_DET_INT2_MASK (CS42L42_PAGE_1B + 0x7A)
+#define CS42L42_M_SHORT_DET_SHIFT 0
+#define CS42L42_M_SHORT_DET_MASK (1 << CS42L42_M_SHORT_DET_SHIFT)
+#define CS42L42_M_SHORT_RLS_SHIFT 1
+#define CS42L42_M_SHORT_RLS_MASK (1 << CS42L42_M_SHORT_RLS_SHIFT)
+#define CS42L42_M_HSBIAS_HIZ_SHIFT 2
+#define CS42L42_M_HSBIAS_HIZ_MASK (1 << CS42L42_M_HSBIAS_HIZ_SHIFT)
+#define CS42L42_M_DETECT_FT_SHIFT 6
+#define CS42L42_M_DETECT_FT_MASK (1 << CS42L42_M_DETECT_FT_SHIFT)
+#define CS42L42_M_DETECT_TF_SHIFT 7
+#define CS42L42_M_DETECT_TF_MASK (1 << CS42L42_M_DETECT_TF_SHIFT)
+#define CS42L42_DET_INT_VAL2_MASK (CS42L42_M_SHORT_DET_MASK | \
+ CS42L42_M_SHORT_RLS_MASK | \
+ CS42L42_M_HSBIAS_HIZ_MASK | \
+ CS42L42_M_DETECT_FT_MASK | \
+ CS42L42_M_DETECT_TF_MASK)
+
+/* Page 0x1C Headset Bias Registers */
+#define CS42L42_HS_BIAS_CTL (CS42L42_PAGE_1C + 0x03)
+#define CS42L42_HSBIAS_RAMP_SHIFT 0
+#define CS42L42_HSBIAS_RAMP_MASK (3 << CS42L42_HSBIAS_RAMP_SHIFT)
+#define CS42L42_HSBIAS_PD_SHIFT 4
+#define CS42L42_HSBIAS_PD_MASK (1 << CS42L42_HSBIAS_PD_SHIFT)
+#define CS42L42_HSBIAS_CAPLESS_SHIFT 7
+#define CS42L42_HSBIAS_CAPLESS_MASK (1 << CS42L42_HSBIAS_CAPLESS_SHIFT)
+
+/* Page 0x1D ADC Registers */
+#define CS42L42_ADC_CTL (CS42L42_PAGE_1D + 0x01)
+#define CS42L42_ADC_NOTCH_DIS_SHIFT 5
+#define CS42L42_ADC_FORCE_WEAK_VCM_SHIFT 4
+#define CS42L42_ADC_INV_SHIFT 2
+#define CS42L42_ADC_DIG_BOOST_SHIFT 0
+
+#define CS42L42_ADC_VOLUME (CS42L42_PAGE_1D + 0x03)
+#define CS42L42_ADC_VOL_SHIFT 0
+
+#define CS42L42_ADC_WNF_HPF_CTL (CS42L42_PAGE_1D + 0x04)
+#define CS42L42_ADC_WNF_CF_SHIFT 4
+#define CS42L42_ADC_WNF_EN_SHIFT 3
+#define CS42L42_ADC_HPF_CF_SHIFT 1
+#define CS42L42_ADC_HPF_EN_SHIFT 0
+
+/* Page 0x1F DAC Registers */
+#define CS42L42_DAC_CTL1 (CS42L42_PAGE_1F + 0x01)
+#define CS42L42_DACB_INV_SHIFT 1
+#define CS42L42_DACA_INV_SHIFT 0
+
+#define CS42L42_DAC_CTL2 (CS42L42_PAGE_1F + 0x06)
+#define CS42L42_HPOUT_PULLDOWN_SHIFT 4
+#define CS42L42_HPOUT_PULLDOWN_MASK (15 << CS42L42_HPOUT_PULLDOWN_SHIFT)
+#define CS42L42_HPOUT_LOAD_SHIFT 3
+#define CS42L42_HPOUT_LOAD_MASK (1 << CS42L42_HPOUT_LOAD_SHIFT)
+#define CS42L42_HPOUT_CLAMP_SHIFT 2
+#define CS42L42_HPOUT_CLAMP_MASK (1 << CS42L42_HPOUT_CLAMP_SHIFT)
+#define CS42L42_DAC_HPF_EN_SHIFT 1
+#define CS42L42_DAC_HPF_EN_MASK (1 << CS42L42_DAC_HPF_EN_SHIFT)
+#define CS42L42_DAC_MON_EN_SHIFT 0
+#define CS42L42_DAC_MON_EN_MASK (1 << CS42L42_DAC_MON_EN_SHIFT)
+
+/* Page 0x20 HP CTL Registers */
+#define CS42L42_HP_CTL (CS42L42_PAGE_20 + 0x01)
+#define CS42L42_HP_ANA_BMUTE_SHIFT 3
+#define CS42L42_HP_ANA_BMUTE_MASK (1 << CS42L42_HP_ANA_BMUTE_SHIFT)
+#define CS42L42_HP_ANA_AMUTE_SHIFT 2
+#define CS42L42_HP_ANA_AMUTE_MASK (1 << CS42L42_HP_ANA_AMUTE_SHIFT)
+#define CS42L42_HP_FULL_SCALE_VOL_SHIFT 1
+#define CS42L42_HP_FULL_SCALE_VOL_MASK (1 << CS42L42_HP_FULL_SCALE_VOL_SHIFT)
+
+/* Page 0x21 Class H Registers */
+#define CS42L42_CLASSH_CTL (CS42L42_PAGE_21 + 0x01)
+
+/* Page 0x23 Mixer Volume Registers */
+#define CS42L42_MIXER_CHA_VOL (CS42L42_PAGE_23 + 0x01)
+#define CS42L42_MIXER_ADC_VOL (CS42L42_PAGE_23 + 0x02)
+
+#define CS42L42_MIXER_CHB_VOL (CS42L42_PAGE_23 + 0x03)
+#define CS42L42_MIXER_CH_VOL_SHIFT 0
+#define CS42L42_MIXER_CH_VOL_MASK (0x3f << CS42L42_MIXER_CH_VOL_SHIFT)
+
+/* Page 0x24 EQ Registers */
+#define CS42L42_EQ_COEF_IN0 (CS42L42_PAGE_24 + 0x01)
+#define CS42L42_EQ_COEF_IN1 (CS42L42_PAGE_24 + 0x02)
+#define CS42L42_EQ_COEF_IN2 (CS42L42_PAGE_24 + 0x03)
+#define CS42L42_EQ_COEF_IN3 (CS42L42_PAGE_24 + 0x04)
+#define CS42L42_EQ_COEF_RW (CS42L42_PAGE_24 + 0x06)
+#define CS42L42_EQ_COEF_OUT0 (CS42L42_PAGE_24 + 0x07)
+#define CS42L42_EQ_COEF_OUT1 (CS42L42_PAGE_24 + 0x08)
+#define CS42L42_EQ_COEF_OUT2 (CS42L42_PAGE_24 + 0x09)
+#define CS42L42_EQ_COEF_OUT3 (CS42L42_PAGE_24 + 0x0A)
+#define CS42L42_EQ_INIT_STAT (CS42L42_PAGE_24 + 0x0B)
+#define CS42L42_EQ_START_FILT (CS42L42_PAGE_24 + 0x0C)
+#define CS42L42_EQ_MUTE_CTL (CS42L42_PAGE_24 + 0x0E)
+
+/* Page 0x25 Audio Port Registers */
+#define CS42L42_SP_RX_CH_SEL (CS42L42_PAGE_25 + 0x01)
+#define CS42L42_SP_RX_CHB_SEL_SHIFT 2
+#define CS42L42_SP_RX_CHB_SEL_MASK (3 << CS42L42_SP_RX_CHB_SEL_SHIFT)
+
+#define CS42L42_SP_RX_ISOC_CTL (CS42L42_PAGE_25 + 0x02)
+#define CS42L42_SP_RX_RSYNC_SHIFT 6
+#define CS42L42_SP_RX_RSYNC_MASK (1 << CS42L42_SP_RX_RSYNC_SHIFT)
+#define CS42L42_SP_RX_NSB_POS_SHIFT 3
+#define CS42L42_SP_RX_NSB_POS_MASK (7 << CS42L42_SP_RX_NSB_POS_SHIFT)
+#define CS42L42_SP_RX_NFS_NSBB_SHIFT 2
+#define CS42L42_SP_RX_NFS_NSBB_MASK (1 << CS42L42_SP_RX_NFS_NSBB_SHIFT)
+#define CS42L42_SP_RX_ISOC_MODE_SHIFT 0
+#define CS42L42_SP_RX_ISOC_MODE_MASK (3 << CS42L42_SP_RX_ISOC_MODE_SHIFT)
+
+#define CS42L42_SP_RX_FS (CS42L42_PAGE_25 + 0x03)
+#define CS42l42_SPDIF_CH_SEL (CS42L42_PAGE_25 + 0x04)
+#define CS42L42_SP_TX_ISOC_CTL (CS42L42_PAGE_25 + 0x05)
+#define CS42L42_SP_TX_FS (CS42L42_PAGE_25 + 0x06)
+#define CS42L42_SPDIF_SW_CTL1 (CS42L42_PAGE_25 + 0x07)
+
+/* Page 0x26 SRC Registers */
+#define CS42L42_SRC_SDIN_FS (CS42L42_PAGE_26 + 0x01)
+#define CS42L42_SRC_SDIN_FS_SHIFT 0
+#define CS42L42_SRC_SDIN_FS_MASK (0x1f << CS42L42_SRC_SDIN_FS_SHIFT)
+
+#define CS42L42_SRC_SDOUT_FS (CS42L42_PAGE_26 + 0x09)
+
+/* Page 0x28 S/PDIF Registers */
+#define CS42L42_SPDIF_CTL1 (CS42L42_PAGE_28 + 0x01)
+#define CS42L42_SPDIF_CTL2 (CS42L42_PAGE_28 + 0x02)
+#define CS42L42_SPDIF_CTL3 (CS42L42_PAGE_28 + 0x03)
+#define CS42L42_SPDIF_CTL4 (CS42L42_PAGE_28 + 0x04)
+
+/* Page 0x29 Serial Port TX Registers */
+#define CS42L42_ASP_TX_SZ_EN (CS42L42_PAGE_29 + 0x01)
+#define CS42L42_ASP_TX_EN_SHIFT 0
+#define CS42L42_ASP_TX_CH_EN (CS42L42_PAGE_29 + 0x02)
+#define CS42L42_ASP_TX0_CH2_SHIFT 1
+#define CS42L42_ASP_TX0_CH1_SHIFT 0
+
+#define CS42L42_ASP_TX_CH_AP_RES (CS42L42_PAGE_29 + 0x03)
+#define CS42L42_ASP_TX_CH1_AP_SHIFT 7
+#define CS42L42_ASP_TX_CH1_AP_MASK (1 << CS42L42_ASP_TX_CH1_AP_SHIFT)
+#define CS42L42_ASP_TX_CH2_AP_SHIFT 6
+#define CS42L42_ASP_TX_CH2_AP_MASK (1 << CS42L42_ASP_TX_CH2_AP_SHIFT)
+#define CS42L42_ASP_TX_CH2_RES_SHIFT 2
+#define CS42L42_ASP_TX_CH2_RES_MASK (3 << CS42L42_ASP_TX_CH2_RES_SHIFT)
+#define CS42L42_ASP_TX_CH1_RES_SHIFT 0
+#define CS42L42_ASP_TX_CH1_RES_MASK (3 << CS42L42_ASP_TX_CH1_RES_SHIFT)
+#define CS42L42_ASP_TX_CH1_BIT_MSB (CS42L42_PAGE_29 + 0x04)
+#define CS42L42_ASP_TX_CH1_BIT_LSB (CS42L42_PAGE_29 + 0x05)
+#define CS42L42_ASP_TX_HIZ_DLY_CFG (CS42L42_PAGE_29 + 0x06)
+#define CS42L42_ASP_TX_CH2_BIT_MSB (CS42L42_PAGE_29 + 0x0A)
+#define CS42L42_ASP_TX_CH2_BIT_LSB (CS42L42_PAGE_29 + 0x0B)
+
+/* Page 0x2A Serial Port RX Registers */
+#define CS42L42_ASP_RX_DAI0_EN (CS42L42_PAGE_2A + 0x01)
+#define CS42L42_ASP_RX0_CH_EN_SHIFT 2
+#define CS42L42_ASP_RX0_CH_EN_MASK (0xf << CS42L42_ASP_RX0_CH_EN_SHIFT)
+#define CS42L42_ASP_RX0_CH1_SHIFT 2
+#define CS42L42_ASP_RX0_CH2_SHIFT 3
+#define CS42L42_ASP_RX0_CH3_SHIFT 4
+#define CS42L42_ASP_RX0_CH4_SHIFT 5
+
+#define CS42L42_ASP_RX_DAI0_CH1_AP_RES (CS42L42_PAGE_2A + 0x02)
+#define CS42L42_ASP_RX_DAI0_CH1_BIT_MSB (CS42L42_PAGE_2A + 0x03)
+#define CS42L42_ASP_RX_DAI0_CH1_BIT_LSB (CS42L42_PAGE_2A + 0x04)
+#define CS42L42_ASP_RX_DAI0_CH2_AP_RES (CS42L42_PAGE_2A + 0x05)
+#define CS42L42_ASP_RX_DAI0_CH2_BIT_MSB (CS42L42_PAGE_2A + 0x06)
+#define CS42L42_ASP_RX_DAI0_CH2_BIT_LSB (CS42L42_PAGE_2A + 0x07)
+#define CS42L42_ASP_RX_DAI0_CH3_AP_RES (CS42L42_PAGE_2A + 0x08)
+#define CS42L42_ASP_RX_DAI0_CH3_BIT_MSB (CS42L42_PAGE_2A + 0x09)
+#define CS42L42_ASP_RX_DAI0_CH3_BIT_LSB (CS42L42_PAGE_2A + 0x0A)
+#define CS42L42_ASP_RX_DAI0_CH4_AP_RES (CS42L42_PAGE_2A + 0x0B)
+#define CS42L42_ASP_RX_DAI0_CH4_BIT_MSB (CS42L42_PAGE_2A + 0x0C)
+#define CS42L42_ASP_RX_DAI0_CH4_BIT_LSB (CS42L42_PAGE_2A + 0x0D)
+#define CS42L42_ASP_RX_DAI1_CH1_AP_RES (CS42L42_PAGE_2A + 0x0E)
+#define CS42L42_ASP_RX_DAI1_CH1_BIT_MSB (CS42L42_PAGE_2A + 0x0F)
+#define CS42L42_ASP_RX_DAI1_CH1_BIT_LSB (CS42L42_PAGE_2A + 0x10)
+#define CS42L42_ASP_RX_DAI1_CH2_AP_RES (CS42L42_PAGE_2A + 0x11)
+#define CS42L42_ASP_RX_DAI1_CH2_BIT_MSB (CS42L42_PAGE_2A + 0x12)
+#define CS42L42_ASP_RX_DAI1_CH2_BIT_LSB (CS42L42_PAGE_2A + 0x13)
+
+#define CS42L42_ASP_RX_CH_AP_SHIFT 6
+#define CS42L42_ASP_RX_CH_AP_MASK (1 << CS42L42_ASP_RX_CH_AP_SHIFT)
+#define CS42L42_ASP_RX_CH_AP_LOW 0
+#define CS42L42_ASP_RX_CH_AP_HI 1
+#define CS42L42_ASP_RX_CH_RES_SHIFT 0
+#define CS42L42_ASP_RX_CH_RES_MASK (3 << CS42L42_ASP_RX_CH_RES_SHIFT)
+#define CS42L42_ASP_RX_CH_RES_32 3
+#define CS42L42_ASP_RX_CH_RES_16 1
+#define CS42L42_ASP_RX_CH_BIT_ST_SHIFT 0
+#define CS42L42_ASP_RX_CH_BIT_ST_MASK (0xff << CS42L42_ASP_RX_CH_BIT_ST_SHIFT)
+
+/* Page 0x30 ID Registers */
+#define CS42L42_SUB_REVID (CS42L42_PAGE_30 + 0x14)
+#define CS42L42_MAX_REGISTER (CS42L42_PAGE_30 + 0x14)
+
+/* Defines for fracturing values spread across multiple registers */
+#define CS42L42_FRAC0_VAL(val) ((val) & 0x0000ff)
+#define CS42L42_FRAC1_VAL(val) (((val) & 0x00ff00) >> 8)
+#define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16)
+
+#define CS42L42_NUM_SUPPLIES 5
+#define CS42L42_BOOT_TIME_US 3000
+#define CS42L42_PLL_DIVOUT_TIME_US 800
+#define CS42L42_CLOCK_SWITCH_DELAY_US 150
+#define CS42L42_PLL_LOCK_POLL_US 250
+#define CS42L42_PLL_LOCK_TIMEOUT_US 1250
+#define CS42L42_HP_ADC_EN_TIME_US 20000
+#define CS42L42_PDN_DONE_POLL_US 1000
+#define CS42L42_PDN_DONE_TIMEOUT_US 200000
+#define CS42L42_PDN_DONE_TIME_MS 100
+#define CS42L42_FILT_DISCHARGE_TIME_MS 46
+
+#endif /* __CS42L42_H */
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index b65220685920..2df54cf02cb3 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -15,6 +15,8 @@
* snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
* substream
* @substream: PCM substream
+ *
+ * Return: DMA transfer direction
*/
static inline enum dma_transfer_direction
snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
@@ -60,22 +62,25 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* @maxburst: Maximum number of words(note: words, as in units of the
* src_addr_width member, not bytes) that can be send to or received from the
* DAI in one burst.
- * @slave_id: Slave requester id for the DMA channel.
* @filter_data: Custom DMA channel filter data, this will usually be used when
* requesting the DMA channel.
* @chan_name: Custom channel name to use when requesting DMA channel.
* @fifo_size: FIFO size of the DAI controller in bytes
* @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
*/
struct snd_dmaengine_dai_dma_data {
dma_addr_t addr;
enum dma_slave_buswidth addr_width;
u32 maxburst;
- unsigned int slave_id;
void *filter_data;
const char *chan_name;
unsigned int fifo_size;
unsigned int flags;
+ void *peripheral_config;
+ size_t peripheral_size;
};
void snd_dmaengine_pcm_set_config_from_dai_data(
@@ -161,4 +166,15 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+struct dmaengine_pcm {
+ struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+ const struct snd_dmaengine_pcm_config *config;
+ struct snd_soc_component component;
+ unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+ return container_of(p, struct dmaengine_pcm, component);
+}
#endif
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 468e38c54dd3..39787fecc8d9 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1701,7 +1701,7 @@ struct snd_emu10k1 {
struct snd_dma_buffer silent_page; /* silent page */
struct snd_dma_buffer ptb_pages; /* page table pages */
struct snd_dma_device p16v_dma_dev;
- struct snd_dma_buffer p16v_buffer;
+ struct snd_dma_buffer *p16v_buffer;
struct snd_util_memhdr *memhdr; /* page allocation list */
@@ -1796,14 +1796,12 @@ int snd_emu10k1_create(struct snd_card *card,
unsigned short extout_mask,
long max_cache_bytes,
int enable_ir,
- uint subsystem,
- struct snd_emu10k1 ** remu);
+ uint subsystem);
int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device);
int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device);
int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device);
int snd_p16v_pcm(struct snd_emu10k1 *emu, int device);
-int snd_p16v_free(struct snd_emu10k1 * emu);
int snd_p16v_mixer(struct snd_emu10k1 * emu);
int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device);
int snd_emu10k1_fx8010_pcm(struct snd_emu10k1 *emu, int device);
diff --git a/include/sound/emu8000.h b/include/sound/emu8000.h
index ad0365d6de5e..072791bbcf5c 100644
--- a/include/sound/emu8000.h
+++ b/include/sound/emu8000.h
@@ -56,9 +56,6 @@ struct snd_emu8000 {
unsigned long port1; /* Port usually base+0 */
unsigned long port2; /* Port usually at base+0x400 */
unsigned long port3; /* Port usually at base+0x800 */
- struct resource *res_port1;
- struct resource *res_port2;
- struct resource *res_port3;
unsigned short last_reg;/* Last register command */
spinlock_t reg_lock;
diff --git a/include/sound/graph_card.h b/include/sound/graph_card.h
new file mode 100644
index 000000000000..4c8b94c77b8e
--- /dev/null
+++ b/include/sound/graph_card.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * ASoC audio graph card support
+ *
+ */
+
+#ifndef __GRAPH_CARD_H
+#define __GRAPH_CARD_H
+
+#include <sound/simple_card_utils.h>
+
+typedef int (*GRAPH2_CUSTOM)(struct asoc_simple_priv *priv,
+ struct device_node *lnk,
+ struct link_info *li);
+
+struct graph2_custom_hooks {
+ int (*hook_pre)(struct asoc_simple_priv *priv);
+ int (*hook_post)(struct asoc_simple_priv *priv);
+ GRAPH2_CUSTOM custom_normal;
+ GRAPH2_CUSTOM custom_dpcm;
+ GRAPH2_CUSTOM custom_c2c;
+};
+
+int audio_graph_parse_of(struct asoc_simple_priv *priv, struct device *dev);
+int audio_graph2_parse_of(struct asoc_simple_priv *priv, struct device *dev,
+ struct graph2_custom_hooks *hooks);
+
+int audio_graph2_link_normal(struct asoc_simple_priv *priv,
+ struct device_node *lnk, struct link_info *li);
+int audio_graph2_link_dpcm(struct asoc_simple_priv *priv,
+ struct device_node *lnk, struct link_info *li);
+int audio_graph2_link_c2c(struct asoc_simple_priv *priv,
+ struct device_node *lnk, struct link_info *li);
+
+#endif /* __GRAPH_CARD_H */
diff --git a/include/sound/gus.h b/include/sound/gus.h
index 410939ecf3a5..cd8da68cab92 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -613,4 +613,8 @@ int snd_gus_dram_write(struct snd_gus_card *gus, char __user *ptr,
int snd_gus_dram_read(struct snd_gus_card *gus, char __user *ptr,
unsigned int addr, unsigned int size, int rom);
+/* gus_timer.c */
+void snd_gf1_timers_init(struct snd_gus_card *gus);
+void snd_gf1_timers_done(struct snd_gus_card *gus);
+
#endif /* __SOUND_GUS_H */
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 3ee8036f5436..25ec8c181688 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -8,7 +8,7 @@
#ifndef __SOUND_HDA_CODEC_H
#define __SOUND_HDA_CODEC_H
-#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/mod_devicetable.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -59,6 +59,9 @@ struct hda_bus {
unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
unsigned int bus_probing :1; /* during probing process */
unsigned int keep_power:1; /* keep power up for notification */
+ unsigned int jackpoll_in_suspend:1; /* keep jack polling during
+ * runtime suspend
+ */
int primary_dig_out_type; /* primary digital out PCM type */
unsigned int mixer_assigned; /* codec addr for mixer name */
@@ -114,7 +117,6 @@ struct hda_codec_ops {
int (*resume)(struct hda_codec *codec);
int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
#endif
- void (*reboot_notify)(struct hda_codec *codec);
void (*stream_pm)(struct hda_codec *codec, hda_nid_t nid, bool on);
};
@@ -167,8 +169,8 @@ struct hda_pcm {
bool own_chmap; /* codec driver provides own channel maps */
/* private: */
struct hda_codec *codec;
- struct kref kref;
struct list_head list;
+ unsigned int disconnected:1;
};
/* codec information */
@@ -188,6 +190,8 @@ struct hda_codec {
/* PCM to create, set by patch_ops.build_pcms callback */
struct list_head pcm_list_head;
+ refcount_t pcm_ref;
+ wait_queue_head_t remove_sleep;
/* codec specific info */
void *spec;
@@ -208,7 +212,7 @@ struct hda_codec {
struct mutex control_mutex;
struct snd_array spdif_out;
unsigned int spdif_in_enable; /* SPDIF input enable? */
- const hda_nid_t *slave_dig_outs; /* optional digital out slave widgets */
+ const hda_nid_t *follower_dig_outs; /* optional digital out follower widgets */
struct snd_array init_pins; /* initial (BIOS) pin configurations */
struct snd_array driver_pins; /* pin configs set by codec parser */
struct snd_array cvt_setups; /* audio convert setups */
@@ -225,8 +229,8 @@ struct hda_codec {
#endif
/* misc flags */
+ unsigned int configured:1; /* codec was configured */
unsigned int in_freeing:1; /* being released */
- unsigned int registered:1; /* codec was registered */
unsigned int display_power_control:1; /* needs display power */
unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each
* status change
@@ -253,7 +257,7 @@ struct hda_codec {
unsigned int force_pin_prefix:1; /* Add location prefix */
unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
- unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
+ unsigned int forced_resume:1; /* forced resume for jack */
#ifdef CONFIG_PM
unsigned long power_on_acct;
@@ -288,6 +292,8 @@ struct hda_codec {
#define dev_to_hda_codec(_dev) container_of(_dev, struct hda_codec, core.dev)
#define hda_codec_dev(_dev) (&(_dev)->core.dev)
+#define hdac_to_hda_codec(_hdac) container_of(_hdac, struct hda_codec, core)
+
#define list_for_each_codec(c, bus) \
list_for_each_entry(c, &(bus)->core.codec_list, core.list)
#define list_for_each_codec_safe(c, n, bus) \
@@ -299,12 +305,19 @@ struct hda_codec {
/*
* constructors
*/
+__printf(3, 4) struct hda_codec *
+snd_hda_codec_device_init(struct hda_bus *bus, unsigned int codec_addr,
+ const char *fmt, ...);
int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
unsigned int codec_addr, struct hda_codec **codecp);
int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
- unsigned int codec_addr, struct hda_codec *codec);
+ unsigned int codec_addr, struct hda_codec *codec,
+ bool snddev_managed);
int snd_hda_codec_configure(struct hda_codec *codec);
int snd_hda_codec_update_widgets(struct hda_codec *codec);
+void snd_hda_codec_register(struct hda_codec *codec);
+void snd_hda_codec_unregister(struct hda_codec *codec);
+void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec);
/*
* low level functions
@@ -339,7 +352,7 @@ snd_hda_get_num_conns(struct hda_codec *codec, hda_nid_t nid)
#define snd_hda_get_raw_connections(codec, nid, list, max_conns) \
snd_hdac_get_connections(&(codec)->core, nid, list, max_conns)
#define snd_hda_get_num_raw_conns(codec, nid) \
- snd_hdac_get_connections(&(codec)->core, nid, NULL, 0);
+ snd_hdac_get_connections(&(codec)->core, nid, NULL, 0)
int snd_hda_get_conn_list(struct hda_codec *codec, hda_nid_t nid,
const hda_nid_t **listp);
@@ -362,13 +375,6 @@ struct hda_verb {
void snd_hda_sequence_write(struct hda_codec *codec,
const struct hda_verb *seq);
-/* unsolicited event */
-static inline void
-snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
-{
- snd_hdac_bus_queue_event(&bus->core, res, res_ex);
-}
-
/* cached write */
static inline int
snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
@@ -418,9 +424,11 @@ __printf(2, 3)
struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec,
const char *fmt, ...);
+void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec);
+
static inline void snd_hda_codec_pcm_get(struct hda_pcm *pcm)
{
- kref_get(&pcm->kref);
+ refcount_inc(&pcm->codec->pcm_ref);
}
void snd_hda_codec_pcm_put(struct hda_pcm *pcm);
@@ -488,12 +496,19 @@ int hda_call_check_power_status(struct hda_codec *codec, hda_nid_t nid)
#define snd_hda_power_down(codec) snd_hdac_power_down(&(codec)->core)
#define snd_hda_power_down_pm(codec) snd_hdac_power_down_pm(&(codec)->core)
#ifdef CONFIG_PM
+void snd_hda_codec_set_power_save(struct hda_codec *codec, int delay);
void snd_hda_set_power_save(struct hda_bus *bus, int delay);
void snd_hda_update_power_acct(struct hda_codec *codec);
#else
+static inline void snd_hda_codec_set_power_save(struct hda_codec *codec, int delay) {}
static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {}
#endif
+static inline bool hda_codec_need_resume(struct hda_codec *codec)
+{
+ return !codec->relaxed_resume && codec->jacktbl.used;
+}
+
#ifdef CONFIG_SND_HDA_PATCH_LOADER
/*
* patch firmware
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 057d2a2d0bd0..d37cf43546eb 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -119,7 +119,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_VS_EM3U 0x103C
#define AZX_REG_VS_EM4L 0x1040
#define AZX_REG_VS_EM4U 0x1044
-#define AZX_REG_VS_LTRC 0x1048
+#define AZX_REG_VS_LTRP 0x1048
#define AZX_REG_VS_D0I3C 0x104A
#define AZX_REG_VS_PCE 0x104B
#define AZX_REG_VS_L2MAGC 0x1050
@@ -140,8 +140,12 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define BDL_SIZE 4096
#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16)
#define AZX_MAX_FRAG 32
-/* max buffer size - no h/w limit, you can increase as you like */
-#define AZX_MAX_BUF_SIZE (1024*1024*1024)
+/*
+ * max buffer size - artificial 4MB limit per stream to avoid big allocations
+ * In theory it can be really big, but as it is per stream on systems with many streams memory could
+ * be quickly saturated if userspace requests maximum buffer size for each of them.
+ */
+#define AZX_MAX_BUF_SIZE (4*1024*1024)
/* RIRB int mask: overrun[2], response[0] */
#define RIRB_INT_RESPONSE 0x01
@@ -256,7 +260,18 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_ML_LCAP 0x00
#define AZX_REG_ML_LCTL 0x04
+
+#define AZX_ML_LCTL_CPA BIT(23)
+#define AZX_ML_LCTL_CPA_SHIFT 23
+#define AZX_ML_LCTL_SPA BIT(16)
+#define AZX_ML_LCTL_SPA_SHIFT 16
+#define AZX_ML_LCTL_SCF GENMASK(3, 0)
+
#define AZX_REG_ML_LOSIDV 0x08
+
+/* bit0 is reserved, with BIT(1) mapping to stream1 */
+#define AZX_ML_LOSIDV_STREAM_MASK 0xFFFE
+
#define AZX_REG_ML_LSDIID 0x0C
#define AZX_REG_ML_LPSOO 0x10
#define AZX_REG_ML_LPSIO 0x12
@@ -264,15 +279,6 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_ML_LOUTPAY 0x20
#define AZX_REG_ML_LINPAY 0x30
-/* bit0 is reserved, with BIT(1) mapping to stream1 */
-#define ML_LOSIDV_STREAM_MASK 0xFFFE
-
-#define ML_LCTL_SCF_MASK 0xF
-#define AZX_MLCTL_SPA (0x1 << 16)
-#define AZX_MLCTL_CPA (0x1 << 23)
-#define AZX_MLCTL_SPA_SHIFT 16
-#define AZX_MLCTL_CPA_SHIFT 23
-
/* registers for DMA Resume Capability Structure */
#define AZX_DRSM_CAP_ID 0x5
#define AZX_REG_DRSM_CTL 0x4
diff --git a/include/sound/hda_verbs.h b/include/sound/hda_verbs.h
index e36b77531c5c..006d358acce2 100644
--- a/include/sound/hda_verbs.h
+++ b/include/sound/hda_verbs.h
@@ -461,7 +461,7 @@ enum {
#define AC_DE_ELDV (1<<1)
#define AC_DE_IA (1<<2)
-/* device device types (0x0-0xf) */
+/* device types (0x0-0xf) */
enum {
AC_JACK_LINE_OUT,
AC_JACK_SPEAKER,
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index d4299e146d95..35778f953a3f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
@@ -92,6 +94,7 @@ struct hdac_device {
bool lazy_cache:1; /* don't wake up for writes */
bool caps_overwriting:1; /* caps overwrite being in process */
bool cache_coef:1; /* cache COEF read/write too */
+ unsigned int registered:1; /* codec was registered */
};
/* device/driver type used for matching */
@@ -207,8 +210,8 @@ static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0;
static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
static inline void snd_hdac_enter_pm(struct hdac_device *codec) {}
static inline void snd_hdac_leave_pm(struct hdac_device *codec) {}
-static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return 0; }
-static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return 1; }
+static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return false; }
+static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return true; }
#endif
/*
@@ -241,6 +244,8 @@ struct hdac_bus_ops {
/* get a response from the last command */
int (*get_response)(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
+ /* notify of codec link power-up/down */
+ void (*link_power)(struct hdac_device *hdev, bool enable);
};
/*
@@ -347,6 +352,9 @@ struct hdac_bus {
int bdl_pos_adj; /* BDL position adjustment */
+ /* delay time in us for dma stop */
+ unsigned int dma_stop_delay;
+
/* locks */
spinlock_t reg_lock;
struct mutex cmd_mutex;
@@ -364,26 +372,19 @@ struct hdac_bus {
/* link management */
struct list_head hlink_list;
bool cmd_dma_state;
+
+ /* factor used to derive STRIPE control value */
+ unsigned int sdo_limit;
};
int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
const struct hdac_bus_ops *ops);
void snd_hdac_bus_exit(struct hdac_bus *bus);
-int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr,
- unsigned int cmd, unsigned int *res);
int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr,
unsigned int cmd, unsigned int *res);
-void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex);
-static inline void snd_hdac_codec_link_up(struct hdac_device *codec)
-{
- set_bit(codec->addr, &codec->bus->codec_powered);
-}
-
-static inline void snd_hdac_codec_link_down(struct hdac_device *codec)
-{
- clear_bit(codec->addr, &codec->bus->codec_powered);
-}
+void snd_hdac_codec_link_up(struct hdac_device *codec);
+void snd_hdac_codec_link_down(struct hdac_device *codec);
int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val);
int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
@@ -397,6 +398,7 @@ void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
+void snd_hdac_bus_link_power(struct hdac_device *hdev, bool enable);
void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
@@ -449,6 +451,8 @@ static inline u16 snd_hdac_reg_readw(struct hdac_bus *bus, void __iomem *addr)
#define snd_hdac_reg_writel(bus, addr, val) writel(val, addr)
#define snd_hdac_reg_readl(bus, addr) readl(addr)
+#define snd_hdac_reg_writeq(bus, addr, val) writeq(val, addr)
+#define snd_hdac_reg_readq(bus, addr) readq(addr)
/*
* macros for easy use
@@ -513,6 +517,7 @@ struct hdac_stream {
struct snd_pcm_substream *substream; /* assigned substream,
* set in PCM open
*/
+ struct snd_compr_stream *cstream;
unsigned int format_val; /* format value to be set in the
* controller and the codec
*/
@@ -527,6 +532,7 @@ struct hdac_stream {
bool locked:1;
bool stripe:1; /* apply stripe control */
+ u64 curr_pos;
/* timestamp */
unsigned long start_wallclk; /* start + minimum wallclk */
unsigned long period_wallclk; /* wallclk for period */
@@ -545,6 +551,7 @@ void snd_hdac_stream_init(struct hdac_bus *bus, struct hdac_stream *azx_dev,
int idx, int direction, int tag);
struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream);
+void snd_hdac_stream_release_locked(struct hdac_stream *azx_dev);
void snd_hdac_stream_release(struct hdac_stream *azx_dev);
struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
int dir, int stream_tag);
@@ -555,8 +562,9 @@ int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev);
int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
unsigned int format_val);
void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
-void snd_hdac_stream_clear(struct hdac_stream *azx_dev);
void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
+void snd_hdac_stop_streams(struct hdac_bus *bus);
+void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus);
void snd_hdac_stream_reset(struct hdac_stream *azx_dev);
void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
unsigned int streams, unsigned int reg);
@@ -583,6 +591,12 @@ int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
snd_hdac_reg_readw((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
#define snd_hdac_stream_readb(dev, reg) \
snd_hdac_reg_readb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+#define snd_hdac_stream_readb_poll(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(snd_hdac_reg_readb, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+#define snd_hdac_stream_readl_poll(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(snd_hdac_reg_readl, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
/* update a register, pass without AZX_REG_ prefix */
#define snd_hdac_stream_updatel(dev, reg, mask, val) \
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index ef88b20c7b0a..83aed26ab143 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -2,6 +2,8 @@
#ifndef __SOUND_HDAUDIO_EXT_H
#define __SOUND_HDAUDIO_EXT_H
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
#include <sound/hdaudio.h>
int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
@@ -9,9 +11,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
const struct hdac_ext_bus_ops *ext_ops);
void snd_hdac_ext_bus_exit(struct hdac_bus *bus);
-int snd_hdac_ext_bus_device_init(struct hdac_bus *bus, int addr,
- struct hdac_device *hdev);
-void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus);
#define HDA_CODEC_REV_EXT_ENTRY(_vid, _rev, _name, drv_data) \
@@ -28,6 +27,7 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *chip,
bool enable, int index);
int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus);
+struct hdac_ext_link *snd_hdac_ext_bus_link_at(struct hdac_bus *bus, int addr);
struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
const char *codec_name);
@@ -51,7 +51,7 @@ enum hdac_ext_stream_type {
* @decoupled: stream host and link is decoupled
* @link_locked: link is locked
* @link_prepared: link is prepared
- * link_substream: link substream
+ * @link_substream: link substream
*/
struct hdac_ext_stream {
struct hdac_stream hstream;
@@ -77,35 +77,33 @@ struct hdac_ext_stream {
#define stream_to_hdac_ext_stream(s) \
container_of(s, struct hdac_ext_stream, hstream)
-void snd_hdac_ext_stream_init(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, int idx,
- int direction, int tag);
int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
- int num_stream, int dir);
-void snd_hdac_stream_free_all(struct hdac_bus *bus);
+ int num_stream, int dir);
+void snd_hdac_ext_stream_free_all(struct hdac_bus *bus);
void snd_hdac_link_free_all(struct hdac_bus *bus);
struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
int type);
-void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
+void snd_hdac_ext_stream_release(struct hdac_ext_stream *hext_stream, int type);
+void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus,
+ struct hdac_ext_stream *hext_stream, bool decouple);
void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
-void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, u32 value);
+ struct hdac_ext_stream *hext_stream, u32 value);
int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
- struct hdac_ext_stream *stream);
+ struct hdac_ext_stream *hext_stream);
void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index);
int snd_hdac_ext_stream_set_dpibr(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, u32 value);
-int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value);
+ struct hdac_ext_stream *hext_stream, u32 value);
+int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *hext_stream, u32 value);
-void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
-void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
-void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
-int snd_hdac_ext_link_stream_setup(struct hdac_ext_stream *stream, int fmt);
+void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hext_stream);
+void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hext_stream);
+void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hext_stream);
+int snd_hdac_ext_link_stream_setup(struct hdac_ext_stream *hext_stream, int fmt);
struct hdac_ext_link {
struct hdac_bus *bus;
@@ -131,6 +129,8 @@ void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *link);
int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *link);
+void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable);
+
/* update register macro */
#define snd_hdac_updatel(addr, reg, mask, val) \
writel(((readl(addr + reg) & ~(mask)) | (val)), \
@@ -140,6 +140,48 @@ int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *link);
writew(((readw(addr + reg) & ~(mask)) | (val)), \
addr + reg)
+#define snd_hdac_adsp_writeb(chip, reg, value) \
+ snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readb(chip, reg) \
+ snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg))
+#define snd_hdac_adsp_writew(chip, reg, value) \
+ snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readw(chip, reg) \
+ snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg))
+#define snd_hdac_adsp_writel(chip, reg, value) \
+ snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readl(chip, reg) \
+ snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg))
+#define snd_hdac_adsp_writeq(chip, reg, value) \
+ snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readq(chip, reg) \
+ snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg))
+
+#define snd_hdac_adsp_updateb(chip, reg, mask, val) \
+ snd_hdac_adsp_writeb(chip, reg, \
+ (snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updatew(chip, reg, mask, val) \
+ snd_hdac_adsp_writew(chip, reg, \
+ (snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updatel(chip, reg, mask, val) \
+ snd_hdac_adsp_writel(chip, reg, \
+ (snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updateq(chip, reg, mask, val) \
+ snd_hdac_adsp_writeq(chip, reg, \
+ (snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val))
+
+#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
struct hdac_ext_device;
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 83b17682e01c..48ad33aba393 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -2,7 +2,7 @@
/*
* hdmi-codec.h - HDMI Codec driver API
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Jyri Sarha <jsarha@ti.com>
*/
@@ -32,8 +32,13 @@ struct hdmi_codec_daifmt {
} fmt;
unsigned int bit_clk_inv:1;
unsigned int frame_clk_inv:1;
- unsigned int bit_clk_master:1;
- unsigned int frame_clk_master:1;
+ unsigned int bit_clk_provider:1;
+ unsigned int frame_clk_provider:1;
+ /* bit_fmt could be standard PCM format or
+ * IEC958 encoded format. ALSA IEC958 plugin will pass
+ * IEC958_SUBFRAME format to the underneath driver.
+ */
+ snd_pcm_format_t bit_fmt;
};
/*
@@ -60,13 +65,23 @@ struct hdmi_codec_ops {
/*
* Configures HDMI-encoder for audio stream.
- * Mandatory
+ * Having either prepare or hw_params is mandatory.
*/
int (*hw_params)(struct device *dev, void *data,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
/*
+ * Configures HDMI-encoder for audio stream. Can be called
+ * multiple times for each setup.
+ *
+ * Having either prepare or hw_params is mandatory.
+ */
+ int (*prepare)(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /*
* Shuts down the audio stream.
* Mandatory
*/
@@ -76,7 +91,8 @@ struct hdmi_codec_ops {
* Mute/unmute HDMI audio stream.
* Optional
*/
- int (*digital_mute)(struct device *dev, void *data, bool enable);
+ int (*mute_stream)(struct device *dev, void *data,
+ bool enable, int direction);
/*
* Provides EDID-Like-Data from connected HDMI device.
@@ -99,6 +115,9 @@ struct hdmi_codec_ops {
int (*hook_plugged_cb)(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev);
+
+ /* bit field */
+ unsigned int no_capture_mute:1;
};
/* HDMI codec initalization data */
@@ -113,9 +132,6 @@ struct hdmi_codec_pdata {
struct snd_soc_component;
struct snd_soc_jack;
-int hdmi_codec_set_jack_detect(struct snd_soc_component *component,
- struct snd_soc_jack *jack);
-
#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
#endif /* __HDMI_CODEC_H__ */
diff --git a/include/sound/intel-dsp-config.h b/include/sound/intel-dsp-config.h
index c36622bee3f8..34c975910574 100644
--- a/include/sound/intel-dsp-config.h
+++ b/include/sound/intel-dsp-config.h
@@ -15,12 +15,14 @@ enum {
SND_INTEL_DSP_DRIVER_LEGACY,
SND_INTEL_DSP_DRIVER_SST,
SND_INTEL_DSP_DRIVER_SOF,
- SND_INTEL_DSP_DRIVER_LAST = SND_INTEL_DSP_DRIVER_SOF
+ SND_INTEL_DSP_DRIVER_AVS,
+ SND_INTEL_DSP_DRIVER_LAST = SND_INTEL_DSP_DRIVER_AVS
};
#if IS_ENABLED(CONFIG_SND_INTEL_DSP_CONFIG)
int snd_intel_dsp_driver_probe(struct pci_dev *pci);
+int snd_intel_acpi_dsp_driver_probe(struct device *dev, const u8 acpi_hid[ACPI_ID_LEN]);
#else
@@ -29,6 +31,12 @@ static inline int snd_intel_dsp_driver_probe(struct pci_dev *pci)
return SND_INTEL_DSP_DRIVER_ANY;
}
+static inline
+int snd_intel_acpi_dsp_driver_probe(struct device *dev, const u8 acpi_hid[ACPI_ID_LEN])
+{
+ return SND_INTEL_DSP_DRIVER_ANY;
+}
+
#endif
#endif
diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
index f657fd8fc0ad..53470d6a28d6 100644
--- a/include/sound/intel-nhlt.h
+++ b/include/sound/intel-nhlt.h
@@ -10,7 +10,20 @@
#include <linux/acpi.h>
-#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT)
+enum nhlt_link_type {
+ NHLT_LINK_HDA = 0,
+ NHLT_LINK_DSP = 1,
+ NHLT_LINK_DMIC = 2,
+ NHLT_LINK_SSP = 3,
+ NHLT_LINK_INVALID
+};
+
+enum nhlt_device_type {
+ NHLT_DEVICE_BT = 0,
+ NHLT_DEVICE_DMIC = 1,
+ NHLT_DEVICE_I2S = 4,
+ NHLT_DEVICE_INVALID
+};
struct wav_fmt {
u16 fmt_tag;
@@ -33,24 +46,9 @@ struct wav_fmt_ext {
u8 sub_fmt[16];
} __packed;
-enum nhlt_link_type {
- NHLT_LINK_HDA = 0,
- NHLT_LINK_DSP = 1,
- NHLT_LINK_DMIC = 2,
- NHLT_LINK_SSP = 3,
- NHLT_LINK_INVALID
-};
-
-enum nhlt_device_type {
- NHLT_DEVICE_BT = 0,
- NHLT_DEVICE_DMIC = 1,
- NHLT_DEVICE_I2S = 4,
- NHLT_DEVICE_INVALID
-};
-
struct nhlt_specific_cfg {
u32 size;
- u8 caps[0];
+ u8 caps[];
} __packed;
struct nhlt_fmt_cfg {
@@ -60,7 +58,7 @@ struct nhlt_fmt_cfg {
struct nhlt_fmt {
u8 fmt_count;
- struct nhlt_fmt_cfg fmt_config[0];
+ struct nhlt_fmt_cfg fmt_config[];
} __packed;
struct nhlt_endpoint {
@@ -80,7 +78,7 @@ struct nhlt_endpoint {
struct nhlt_acpi_table {
struct acpi_table_header header;
u8 endpoint_count;
- struct nhlt_endpoint desc[0];
+ struct nhlt_endpoint desc[];
} __packed;
struct nhlt_resource_desc {
@@ -113,6 +111,11 @@ struct nhlt_vendor_dmic_array_config {
} __packed;
enum {
+ NHLT_CONFIG_TYPE_GENERIC = 0,
+ NHLT_CONFIG_TYPE_MIC_ARRAY = 1
+};
+
+enum {
NHLT_MIC_ARRAY_2CH_SMALL = 0xa,
NHLT_MIC_ARRAY_2CH_BIG = 0xb,
NHLT_MIC_ARRAY_4CH_1ST_GEOM = 0xc,
@@ -121,15 +124,26 @@ enum {
NHLT_MIC_ARRAY_VENDOR_DEFINED = 0xf,
};
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT)
+
struct nhlt_acpi_table *intel_nhlt_init(struct device *dev);
void intel_nhlt_free(struct nhlt_acpi_table *addr);
int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt);
-#else
+bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, u8 link_type);
+
+int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type);
-struct nhlt_acpi_table;
+int intel_nhlt_ssp_mclk_mask(struct nhlt_acpi_table *nhlt, int ssp_num);
+
+struct nhlt_specific_cfg *
+intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u32 bus_id, u8 link_type, u8 vbps, u8 bps,
+ u8 num_ch, u32 rate, u8 dir, u8 dev_type);
+
+#else
static inline struct nhlt_acpi_table *intel_nhlt_init(struct device *dev)
{
@@ -145,6 +159,31 @@ static inline int intel_nhlt_get_dmic_geo(struct device *dev,
{
return 0;
}
+
+static inline bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt,
+ u8 link_type)
+{
+ return false;
+}
+
+static inline int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type)
+{
+ return 0;
+}
+
+static inline int intel_nhlt_ssp_mclk_mask(struct nhlt_acpi_table *nhlt, int ssp_num)
+{
+ return 0;
+}
+
+static inline struct nhlt_specific_cfg *
+intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u32 bus_id, u8 link_type, u8 vbps, u8 bps,
+ u8 num_ch, u32 rate, u8 dir, u8 dev_type)
+{
+ return NULL;
+}
+
#endif
#endif
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 9eb2b5ec1ec4..1ed90e2109e9 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -62,11 +62,13 @@ struct snd_jack {
const char *id;
#ifdef CONFIG_SND_JACK_INPUT_DEV
struct input_dev *input_dev;
+ struct mutex input_dev_lock;
int registered;
int type;
char name[100];
unsigned int key[6]; /* Keep in sync with definitions above */
#endif /* CONFIG_SND_JACK_INPUT_DEV */
+ int hw_status_cache;
void *private_data;
void (*private_free)(struct snd_jack *);
};
diff --git a/include/sound/madera-pdata.h b/include/sound/madera-pdata.h
index e3060f48f108..58398d80c3de 100644
--- a/include/sound/madera-pdata.h
+++ b/include/sound/madera-pdata.h
@@ -9,7 +9,7 @@
#ifndef MADERA_CODEC_PDATA_H
#define MADERA_CODEC_PDATA_H
-#include <linux/kernel.h>
+#include <linux/types.h>
#define MADERA_MAX_INPUT 6
#define MADERA_MAX_MUXED_CHANNELS 4
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 3b47832b1c1f..43d524580bd2 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -9,41 +9,50 @@
#ifndef __SOUND_MEMALLOC_H
#define __SOUND_MEMALLOC_H
+#include <linux/dma-direction.h>
#include <asm/page.h>
struct device;
+struct vm_area_struct;
+struct sg_table;
/*
* buffer device info
*/
struct snd_dma_device {
int type; /* SNDRV_DMA_TYPE_XXX */
+ enum dma_data_direction dir; /* DMA direction */
+ bool need_sync; /* explicit sync needed? */
struct device *dev; /* generic device */
};
-#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
-
-
/*
* buffer types
*/
#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
-#define SNDRV_DMA_TYPE_DEV_UC 5 /* continuous non-cahced */
-#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
-#define SNDRV_DMA_TYPE_DEV_UC_SG 6 /* SG non-cached */
-#else
-#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
-#define SNDRV_DMA_TYPE_DEV_UC_SG SNDRV_DMA_TYPE_DEV_UC
-#endif
+#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */
#ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
#else
#define SNDRV_DMA_TYPE_DEV_IRAM SNDRV_DMA_TYPE_DEV
#endif
#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
+#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
+#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
+#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
+#else
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
+#endif
+/* fallback types, don't use those directly */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK 10
+#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK 11
+#endif
/*
* info for buffer allocation
@@ -64,77 +73,53 @@ static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * Scatter-Gather generic device pages
- */
-void *snd_malloc_sgbuf_pages(struct device *device,
- size_t size, struct snd_dma_buffer *dmab,
- size_t *res_size);
-int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
-
-struct snd_sg_page {
- void *buf;
- dma_addr_t addr;
-};
-
-struct snd_sg_buf {
- int size; /* allocated byte size */
- int pages; /* allocated pages */
- int tblsize; /* allocated table size */
- struct snd_sg_page *table; /* address table */
- struct page **page_table; /* page table (for vmap/vunmap) */
- struct device *dev;
-};
+/* allocate/release a buffer */
+int snd_dma_alloc_dir_pages(int type, struct device *dev,
+ enum dma_data_direction dir, size_t size,
+ struct snd_dma_buffer *dmab);
-/*
- * return the physical address at the corresponding offset
- */
-static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
- size_t offset)
+static inline int snd_dma_alloc_pages(int type, struct device *dev,
+ size_t size, struct snd_dma_buffer *dmab)
{
- struct snd_sg_buf *sgbuf = dmab->private_data;
- dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
- addr &= ~((dma_addr_t)PAGE_SIZE - 1);
- return addr + offset % PAGE_SIZE;
+ return snd_dma_alloc_dir_pages(type, dev, DMA_BIDIRECTIONAL, size, dmab);
}
-/*
- * return the virtual address at the corresponding offset
- */
-static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
- size_t offset)
-{
- struct snd_sg_buf *sgbuf = dmab->private_data;
- return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
-}
+int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
+ struct snd_dma_buffer *dmab);
+void snd_dma_free_pages(struct snd_dma_buffer *dmab);
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area);
+
+enum snd_dma_sync_mode { SNDRV_DMA_SYNC_CPU, SNDRV_DMA_SYNC_DEVICE };
+#ifdef CONFIG_HAS_DMA
+void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode);
+#else
+static inline void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode) {}
+#endif
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
unsigned int ofs, unsigned int size);
-#else
-/* non-SG versions */
-static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
- size_t offset)
+
+/* device-managed memory allocator */
+struct snd_dma_buffer *snd_devm_alloc_dir_pages(struct device *dev, int type,
+ enum dma_data_direction dir,
+ size_t size);
+
+static inline struct snd_dma_buffer *
+snd_devm_alloc_pages(struct device *dev, int type, size_t size)
{
- return dmab->addr + offset;
+ return snd_devm_alloc_dir_pages(dev, type, DMA_BIDIRECTIONAL, size);
}
-static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
- size_t offset)
+static inline struct sg_table *
+snd_dma_noncontig_sg_table(struct snd_dma_buffer *dmab)
{
- return dmab->area + offset;
+ return dmab->private_data;
}
-#define snd_sgbuf_get_chunk_size(dmab, ofs, size) (size)
-
-#endif /* CONFIG_SND_DMA_SGBUF */
-
-/* allocate/release a buffer */
-int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
- struct snd_dma_buffer *dmab);
-int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
- struct snd_dma_buffer *dmab);
-void snd_dma_free_pages(struct snd_dma_buffer *dmab);
-
#endif /* __SOUND_MEMALLOC_H */
diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h
index 16c007b651f4..e5f82044a404 100644
--- a/include/sound/omap-hdmi-audio.h
+++ b/include/sound/omap-hdmi-audio.h
@@ -2,7 +2,7 @@
/*
* hdmi-audio.c -- OMAP4+ DSS HDMI audio support library
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Jyri Sarha <jsarha@ti.com>
*/
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index f657ff08f317..7b1a022910e8 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -147,6 +147,9 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_S24_BE _SNDRV_PCM_FMTBIT(S24_BE)
#define SNDRV_PCM_FMTBIT_U24_LE _SNDRV_PCM_FMTBIT(U24_LE)
#define SNDRV_PCM_FMTBIT_U24_BE _SNDRV_PCM_FMTBIT(U24_BE)
+// For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+// available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+// `right-padding` sample which has less width than 32 bit.
#define SNDRV_PCM_FMTBIT_S32_LE _SNDRV_PCM_FMTBIT(S32_LE)
#define SNDRV_PCM_FMTBIT_S32_BE _SNDRV_PCM_FMTBIT(S32_BE)
#define SNDRV_PCM_FMTBIT_U32_LE _SNDRV_PCM_FMTBIT(U32_LE)
@@ -229,7 +232,7 @@ typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule {
unsigned int cond;
int var;
- int deps[4];
+ int deps[5];
snd_pcm_hw_rule_func_t func;
void *private;
@@ -343,6 +346,8 @@ static inline void snd_pcm_pack_audio_tstamp_report(__u32 *data, __u32 *accuracy
struct snd_pcm_runtime {
/* -- Status -- */
+ snd_pcm_state_t state; /* stream state */
+ snd_pcm_state_t suspended_state; /* suspended stream state */
struct snd_pcm_substream *trigger_master;
struct timespec64 trigger_tstamp; /* trigger timestamp */
bool trigger_tstamp_latched; /* trigger timestamp latched in low-level driver/hardware */
@@ -396,8 +401,10 @@ struct snd_pcm_runtime {
snd_pcm_uframes_t twake; /* do transfer (!poll) wakeup if non-zero */
wait_queue_head_t sleep; /* poll sleep */
wait_queue_head_t tsleep; /* transfer sleep */
- struct fasync_struct *fasync;
+ struct snd_fasync *fasync;
bool stop_operating; /* sync_stop will be called */
+ struct mutex buffer_mutex; /* protect for buffer changes */
+ atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */
/* -- private section -- */
void *private_data;
@@ -602,7 +609,7 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
* snd_pcm_stream_linked - Check whether the substream is linked with others
* @substream: substream to check
*
- * Returns true if the given substream is being linked with others.
+ * Return: true if the given substream is being linked with others
*/
static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
{
@@ -614,6 +621,7 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream);
/**
* snd_pcm_stream_lock_irqsave - Lock the PCM stream
@@ -633,6 +641,20 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags);
/**
+ * snd_pcm_stream_lock_irqsave_nested - Single-nested PCM stream locking
+ * @substream: PCM substream
+ * @flags: irq flags
+ *
+ * This locks the PCM stream like snd_pcm_stream_lock_irqsave() but with
+ * the single-depth lockdep subclass.
+ */
+#define snd_pcm_stream_lock_irqsave_nested(substream, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _snd_pcm_stream_lock_irqsave_nested(substream); \
+ } while (0)
+
+/**
* snd_pcm_group_for_each_entry - iterate over the linked substreams
* @s: the iterator
* @substream: the substream
@@ -644,24 +666,45 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
#define snd_pcm_group_for_each_entry(s, substream) \
list_for_each_entry(s, &substream->group->substreams, link_list)
+#define for_each_pcm_streams(stream) \
+ for (stream = SNDRV_PCM_STREAM_PLAYBACK; \
+ stream <= SNDRV_PCM_STREAM_LAST; \
+ stream++)
+
/**
* snd_pcm_running - Check whether the substream is in a running state
* @substream: substream to check
*
- * Returns true if the given substream is in the state RUNNING, or in the
+ * Return: true if the given substream is in the state RUNNING, or in the
* state DRAINING for playback.
*/
static inline int snd_pcm_running(struct snd_pcm_substream *substream)
{
- return (substream->runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
- (substream->runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
+ return (substream->runtime->state == SNDRV_PCM_STATE_RUNNING ||
+ (substream->runtime->state == SNDRV_PCM_STATE_DRAINING &&
substream->stream == SNDRV_PCM_STREAM_PLAYBACK));
}
/**
+ * __snd_pcm_set_state - Change the current PCM state
+ * @runtime: PCM runtime to set
+ * @state: the current state to set
+ *
+ * Call within the stream lock
+ */
+static inline void __snd_pcm_set_state(struct snd_pcm_runtime *runtime,
+ snd_pcm_state_t state)
+{
+ runtime->state = state;
+ runtime->status->state = state; /* copy for mmap */
+}
+
+/**
* bytes_to_samples - Unit conversion of the size from bytes to samples
* @runtime: PCM runtime instance
* @size: size in bytes
+ *
+ * Return: the size in samples
*/
static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size)
{
@@ -672,6 +715,8 @@ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t
* bytes_to_frames - Unit conversion of the size from bytes to frames
* @runtime: PCM runtime instance
* @size: size in bytes
+ *
+ * Return: the size in frames
*/
static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size)
{
@@ -682,6 +727,8 @@ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime,
* samples_to_bytes - Unit conversion of the size from samples to bytes
* @runtime: PCM runtime instance
* @size: size in samples
+ *
+ * Return: the byte size
*/
static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size)
{
@@ -692,6 +739,8 @@ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t
* frames_to_bytes - Unit conversion of the size from frames to bytes
* @runtime: PCM runtime instance
* @size: size in frames
+ *
+ * Return: the byte size
*/
static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size)
{
@@ -702,6 +751,8 @@ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_s
* frame_aligned - Check whether the byte size is aligned to frames
* @runtime: PCM runtime instance
* @bytes: size in bytes
+ *
+ * Return: true if aligned, or false if not
*/
static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
{
@@ -711,6 +762,8 @@ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
/**
* snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes
* @substream: PCM substream
+ *
+ * Return: buffer byte size
*/
static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream)
{
@@ -721,6 +774,8 @@ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substrea
/**
* snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes
* @substream: PCM substream
+ *
+ * Return: period byte size
*/
static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream)
{
@@ -733,6 +788,8 @@ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substrea
* @runtime: PCM runtime instance
*
* Result is between 0 ... (boundary - 1)
+ *
+ * Return: available frame size
*/
static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime)
{
@@ -749,6 +806,8 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r
* @runtime: PCM runtime instance
*
* Result is between 0 ... (boundary - 1)
+ *
+ * Return: available frame size
*/
static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime)
{
@@ -761,6 +820,8 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru
/**
* snd_pcm_playback_hw_avail - Get the queued space for playback
* @runtime: PCM runtime instance
+ *
+ * Return: available frame size
*/
static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime)
{
@@ -770,6 +831,8 @@ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime
/**
* snd_pcm_capture_hw_avail - Get the free space for capture
* @runtime: PCM runtime instance
+ *
+ * Return: available frame size
*/
static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime)
{
@@ -909,6 +972,8 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc
/**
* params_channels - Get the number of channels from the hw params
* @p: hw params
+ *
+ * Return: the number of channels
*/
static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
{
@@ -918,6 +983,8 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
/**
* params_rate - Get the sample rate from the hw params
* @p: hw params
+ *
+ * Return: the sample rate
*/
static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
{
@@ -927,6 +994,8 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
/**
* params_period_size - Get the period size (in frames) from the hw params
* @p: hw params
+ *
+ * Return: the period size in frames
*/
static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
{
@@ -936,6 +1005,8 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
/**
* params_periods - Get the number of periods from the hw params
* @p: hw params
+ *
+ * Return: the number of periods
*/
static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
{
@@ -945,6 +1016,8 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
/**
* params_buffer_size - Get the buffer size (in frames) from the hw params
* @p: hw params
+ *
+ * Return: the buffer size in frames
*/
static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
{
@@ -954,6 +1027,8 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
/**
* params_buffer_bytes - Get the buffer size (in bytes) from the hw params
* @p: hw params
+ *
+ * Return: the buffer size in bytes
*/
static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
{
@@ -1061,6 +1136,7 @@ void snd_pcm_set_ops(struct snd_pcm * pcm, int direction,
void snd_pcm_set_sync(struct snd_pcm_substream *substream);
int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
+void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream);
void snd_pcm_period_elapsed(struct snd_pcm_substream *substream);
snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
void *buf, bool interleaved,
@@ -1122,7 +1198,14 @@ snd_pcm_kernel_readv(struct snd_pcm_substream *substream,
return __snd_pcm_lib_xfer(substream, bufs, false, frames, true);
}
-int snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime);
+int snd_pcm_hw_limit_rates(struct snd_pcm_hardware *hw);
+
+static inline int
+snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime)
+{
+ return snd_pcm_hw_limit_rates(&runtime->hw);
+}
+
unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate);
unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
@@ -1191,11 +1274,52 @@ void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream);
-void snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
- struct device *data, size_t size, size_t max);
-void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
- struct device *data,
- size_t size, size_t max);
+int snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
+ struct device *data, size_t size, size_t max);
+int snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
+ struct device *data,
+ size_t size, size_t max);
+
+/**
+ * snd_pcm_set_fixed_buffer - Preallocate and set up the fixed size PCM buffer
+ * @substream: the pcm substream instance
+ * @type: DMA type (SNDRV_DMA_TYPE_*)
+ * @data: DMA type dependent data
+ * @size: the requested pre-allocation size in bytes
+ *
+ * This is a variant of snd_pcm_set_managed_buffer(), but this pre-allocates
+ * only the given sized buffer and doesn't allow re-allocation nor dynamic
+ * allocation of a larger buffer unlike the standard one.
+ * The function may return -ENOMEM error, hence the caller must check it.
+ *
+ * Return: zero if successful, or a negative error code
+ */
+static inline int __must_check
+snd_pcm_set_fixed_buffer(struct snd_pcm_substream *substream, int type,
+ struct device *data, size_t size)
+{
+ return snd_pcm_set_managed_buffer(substream, type, data, size, 0);
+}
+
+/**
+ * snd_pcm_set_fixed_buffer_all - Preallocate and set up the fixed size PCM buffer
+ * @pcm: the pcm instance
+ * @type: DMA type (SNDRV_DMA_TYPE_*)
+ * @data: DMA type dependent data
+ * @size: the requested pre-allocation size in bytes
+ *
+ * Apply the set up of the fixed buffer via snd_pcm_set_fixed_buffer() for
+ * all substream. If any of allocation fails, it returns -ENOMEM, hence the
+ * caller must check the return value.
+ *
+ * Return: zero if successful, or a negative error code
+ */
+static inline int __must_check
+snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type,
+ struct device *data, size_t size)
+{
+ return snd_pcm_set_managed_buffer_all(pcm, type, data, size, 0);
+}
int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
size_t size, gfp_t gfp_flags);
@@ -1241,18 +1365,12 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
#define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p)
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * SG-buffer handling
- */
-#define snd_pcm_substream_sgbuf(substream) \
- snd_pcm_get_dma_buf(substream)->private_data
-#endif /* SND_DMA_SGBUF */
-
/**
* snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset
* @substream: PCM substream
* @ofs: byte offset
+ *
+ * Return: DMA address
*/
static inline dma_addr_t
snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
@@ -1261,22 +1379,13 @@ snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
}
/**
- * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset
- * @substream: PCM substream
- * @ofs: byte offset
- */
-static inline void *
-snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
-{
- return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs);
-}
-
-/**
- * snd_pcm_sgbuf_chunk_size - Compute the max size that fits within the contig.
- * page from the given size
+ * snd_pcm_sgbuf_get_chunk_size - Compute the max size that fits within the
+ * contig. page from the given size
* @substream: PCM substream
* @ofs: byte offset
* @size: byte size to examine
+ *
+ * Return: chunk size
*/
static inline unsigned int
snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
@@ -1342,6 +1451,20 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
const char *snd_pcm_format_name(snd_pcm_format_t format);
/**
+ * snd_pcm_direction_name - Get a string naming the direction of a stream
+ * @direction: Stream's direction, one of SNDRV_PCM_STREAM_XXX
+ *
+ * Returns a string naming the direction of the stream.
+ */
+static inline const char *snd_pcm_direction_name(int direction)
+{
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ return "Playback";
+ else
+ return "Capture";
+}
+
+/**
* snd_pcm_stream_str - Get a string naming the direction of a stream
* @substream: the pcm substream instance
*
@@ -1349,10 +1472,7 @@ const char *snd_pcm_format_name(snd_pcm_format_t format);
*/
static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream)
{
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- return "Playback";
- else
- return "Capture";
+ return snd_pcm_direction_name(substream->stream);
}
/*
@@ -1379,6 +1499,8 @@ struct snd_pcm_chmap {
* snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info
* @info: chmap information
* @idx: the substream number index
+ *
+ * Return: the matched PCM substream, or NULL if not found
*/
static inline struct snd_pcm_substream *
snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx)
@@ -1409,12 +1531,23 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
/**
* pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise
* @pcm_format: PCM format
+ *
+ * Return: 64bit mask corresponding to the given PCM format
*/
static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
{
return 1ULL << (__force int) pcm_format;
}
+/**
+ * pcm_for_each_format - helper to iterate for each format type
+ * @f: the iterator variable in snd_pcm_format_t type
+ */
+#define pcm_for_each_format(f) \
+ for ((f) = SNDRV_PCM_FORMAT_FIRST; \
+ (__force int)(f) <= (__force int)SNDRV_PCM_FORMAT_LAST; \
+ (f) = (__force snd_pcm_format_t)((__force int)(f) + 1))
+
/* printk helpers */
#define pcm_err(pcm, fmt, args...) \
dev_err((pcm)->card->dev, fmt, ##args)
diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
index 0939aa45e2fe..64e84441cde1 100644
--- a/include/sound/pcm_iec958.h
+++ b/include/sound/pcm_iec958.h
@@ -4,6 +4,14 @@
#include <linux/types.h>
+int snd_pcm_create_iec958_consumer_default(u8 *cs, size_t len);
+
+int snd_pcm_fill_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
+ size_t len);
+
+int snd_pcm_fill_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+ u8 *cs, size_t len);
+
int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
size_t len);
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 661450a2095b..ba184f49f7e1 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -23,11 +23,6 @@ int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
#define MASK_OFS(i) ((i) >> 5)
#define MASK_BIT(i) (1U << ((i) & 31))
-static inline size_t snd_mask_sizeof(void)
-{
- return sizeof(struct snd_mask);
-}
-
static inline void snd_mask_none(struct snd_mask *mask)
{
memset(mask, 0, sizeof(*mask));
@@ -133,6 +128,13 @@ static inline int snd_mask_test(const struct snd_mask *mask, unsigned int val)
return mask->bits[MASK_OFS(val)] & MASK_BIT(val);
}
+/* Most of drivers need only this one */
+static inline int snd_mask_test_format(const struct snd_mask *mask,
+ snd_pcm_format_t format)
+{
+ return snd_mask_test(mask, (__force unsigned int)format);
+}
+
static inline int snd_mask_single(const struct snd_mask *mask)
{
int i, c = 0;
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 0feaf16e6ac0..0a6f8dabf8c4 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -14,18 +14,12 @@ struct snd_soc_component;
extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-extern int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_open(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_close(struct snd_pcm_substream *substream);
-extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma);
-extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream);
-extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
-extern void pxa2xx_soc_pcm_free(struct snd_soc_component *component,
- struct snd_pcm *pcm);
+extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm);
extern int pxa2xx_soc_pcm_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd);
extern int pxa2xx_soc_pcm_open(struct snd_soc_component *component,
@@ -35,8 +29,6 @@ extern int pxa2xx_soc_pcm_close(struct snd_soc_component *component,
extern int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-extern int pxa2xx_soc_pcm_hw_free(struct snd_soc_component *component,
- struct snd_pcm_substream *substream);
extern int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component,
@@ -44,9 +36,6 @@ extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component,
extern snd_pcm_uframes_t
pxa2xx_soc_pcm_pointer(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
-extern int pxa2xx_soc_pcm_mmap(struct snd_soc_component *component,
- struct snd_pcm_substream *substream,
- struct vm_area_struct *vma);
/* AC97 */
@@ -63,4 +52,8 @@ extern int pxa2xx_ac97_hw_resume(void);
extern int pxa2xx_ac97_hw_probe(struct platform_device *dev);
extern void pxa2xx_ac97_hw_remove(struct platform_device *dev);
+/* modem registers, used by touchscreen driver */
+u32 pxa2xx_ac97_read_modr(void);
+u32 pxa2xx_ac97_read_misr(void);
+
#endif
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index a36b7227a15a..e1f59b2940af 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -61,8 +61,8 @@ struct snd_rawmidi_runtime {
size_t avail_min; /* min avail for wakeup */
size_t avail; /* max used buffer for wakeup */
size_t xruns; /* over/underruns counter */
+ int buffer_ref; /* buffer reference count */
/* misc */
- spinlock_t lock;
wait_queue_head_t sleep;
/* event handler (new bytes, input only) */
void (*event)(struct snd_rawmidi_substream *substream);
@@ -80,8 +80,11 @@ struct snd_rawmidi_substream {
bool opened; /* open flag */
bool append; /* append flag (merge more streams) */
bool active_sensing; /* send active sensing when close */
+ unsigned int framing; /* whether to frame input data */
+ unsigned int clock_type; /* clock source to use for input framing */
int use_count; /* use counter (for output) */
size_t bytes;
+ spinlock_t lock;
struct snd_rawmidi *rmidi;
struct snd_rawmidi_str *pstr;
char name[32];
@@ -95,6 +98,7 @@ struct snd_rawmidi_file {
struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *input;
struct snd_rawmidi_substream *output;
+ unsigned int user_pversion; /* supported protocol version */
};
struct snd_rawmidi_str {
@@ -152,10 +156,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count);
-int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
- unsigned char *buffer, int count);
-int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
- int count);
int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream);
/* main midi functions */
diff --git a/include/sound/rt1015.h b/include/sound/rt1015.h
new file mode 100644
index 000000000000..70a7538d4c89
--- /dev/null
+++ b/include/sound/rt1015.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/sound/rt1015.h -- Platform data for RT1015
+ *
+ * Copyright 2020 Realtek Microelectronics
+ */
+
+#ifndef __LINUX_SND_RT1015_H
+#define __LINUX_SND_RT1015_H
+
+struct rt1015_platform_data {
+ unsigned int power_up_delay_ms;
+};
+
+#endif
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
deleted file mode 100644
index 39a77c7cea36..000000000000
--- a/include/sound/rt5645.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/rt5645.h -- Platform data for RT5645
- *
- * Copyright 2013 Realtek Microelectronics
- */
-
-#ifndef __LINUX_SND_RT5645_H
-#define __LINUX_SND_RT5645_H
-
-struct rt5645_platform_data {
- /* IN2 can optionally be differential */
- bool in2_diff;
-
- unsigned int dmic1_data_pin;
- /* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */
- unsigned int dmic2_data_pin;
- /* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */
-
- unsigned int jd_mode;
- /* Use level triggered irq */
- bool level_trigger_irq;
- /* Invert JD1_1 status polarity */
- bool inv_jd1_1;
-
- /* Value to asign to snd_soc_card.long_name */
- const char *long_name;
-};
-
-#endif
diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h
deleted file mode 100644
index f9024c7a1600..000000000000
--- a/include/sound/rt5670.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/rt5670.h -- Platform data for RT5670
- *
- * Copyright 2014 Realtek Microelectronics
- */
-
-#ifndef __LINUX_SND_RT5670_H
-#define __LINUX_SND_RT5670_H
-
-struct rt5670_platform_data {
- int jd_mode;
- bool in2_diff;
- bool dev_gpio;
-
- bool dmic_en;
- unsigned int dmic1_data_pin;
- /* 0 = GPIO6; 1 = IN2P; 3 = GPIO7*/
- unsigned int dmic2_data_pin;
- /* 0 = GPIO8; 1 = IN3N; */
- unsigned int dmic3_data_pin;
- /* 0 = GPIO9; 1 = GPIO10; 2 = GPIO5*/
-};
-
-#endif
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
index bc2c31734df1..3900a07e3935 100644
--- a/include/sound/rt5682.h
+++ b/include/sound/rt5682.h
@@ -24,6 +24,12 @@ enum rt5682_jd_src {
RT5682_JD1,
};
+enum rt5682_dai_clks {
+ RT5682_DAI_WCLK_IDX,
+ RT5682_DAI_BCLK_IDX,
+ RT5682_DAI_NUM_CLKS,
+};
+
struct rt5682_platform_data {
int ldo1_en; /* GPIO for LDO1_EN */
@@ -32,6 +38,11 @@ struct rt5682_platform_data {
enum rt5682_dmic1_clk_pin dmic1_clk_pin;
enum rt5682_jd_src jd_src;
unsigned int btndet_delay;
+ unsigned int dmic_clk_rate;
+ unsigned int dmic_delay;
+ bool dmic_clk_driving_high;
+
+ const char *dai_clk_names[RT5682_DAI_NUM_CLKS];
};
#endif
diff --git a/include/sound/rt5682s.h b/include/sound/rt5682s.h
new file mode 100644
index 000000000000..f18d91308b9a
--- /dev/null
+++ b/include/sound/rt5682s.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/sound/rt5682s.h -- Platform data for RT5682I-VS
+ *
+ * Copyright 2021 Realtek Microelectronics
+ */
+
+#ifndef __LINUX_SND_RT5682S_H
+#define __LINUX_SND_RT5682S_H
+
+enum rt5682s_dmic1_data_pin {
+ RT5682S_DMIC1_DATA_NULL,
+ RT5682S_DMIC1_DATA_GPIO2,
+ RT5682S_DMIC1_DATA_GPIO5,
+};
+
+enum rt5682s_dmic1_clk_pin {
+ RT5682S_DMIC1_CLK_NULL,
+ RT5682S_DMIC1_CLK_GPIO1,
+ RT5682S_DMIC1_CLK_GPIO3,
+};
+
+enum rt5682s_jd_src {
+ RT5682S_JD_NULL,
+ RT5682S_JD1,
+};
+
+enum rt5682s_dai_clks {
+ RT5682S_DAI_WCLK_IDX,
+ RT5682S_DAI_BCLK_IDX,
+ RT5682S_DAI_NUM_CLKS,
+};
+
+struct rt5682s_platform_data {
+
+ int ldo1_en; /* GPIO for LDO1_EN */
+
+ enum rt5682s_dmic1_data_pin dmic1_data_pin;
+ enum rt5682s_dmic1_clk_pin dmic1_clk_pin;
+ enum rt5682s_jd_src jd_src;
+ unsigned int dmic_clk_rate;
+ unsigned int dmic_delay;
+ unsigned int amic_delay;
+ bool dmic_clk_driving_high;
+
+ const char *dai_clk_names[RT5682S_DAI_NUM_CLKS];
+};
+
+#endif
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index bbdd1542d6f1..25e049f44178 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -12,9 +12,15 @@
#include <sound/soc.h>
#define asoc_simple_init_hp(card, sjack, prefix) \
- asoc_simple_init_jack(card, sjack, 1, prefix)
+ asoc_simple_init_jack(card, sjack, 1, prefix, NULL)
#define asoc_simple_init_mic(card, sjack, prefix) \
- asoc_simple_init_jack(card, sjack, 0, prefix)
+ asoc_simple_init_jack(card, sjack, 0, prefix, NULL)
+
+struct asoc_simple_tdm_width_map {
+ u8 sample_bits;
+ u8 slot_count;
+ u16 slot_width;
+};
struct asoc_simple_dai {
const char *name;
@@ -25,11 +31,15 @@ struct asoc_simple_dai {
unsigned int tx_slot_mask;
unsigned int rx_slot_mask;
struct clk *clk;
+ bool clk_fixed;
+ struct asoc_simple_tdm_width_map *tdm_width_map;
+ int n_tdm_widths;
};
struct asoc_simple_data {
u32 convert_rate;
u32 convert_channels;
+ const char *convert_sample_format;
};
struct asoc_simple_jack {
@@ -38,35 +48,89 @@ struct asoc_simple_jack {
struct snd_soc_jack_gpio gpio;
};
+struct prop_nums {
+ int cpus;
+ int codecs;
+ int platforms;
+};
+
struct asoc_simple_priv {
struct snd_soc_card snd_card;
struct simple_dai_props {
struct asoc_simple_dai *cpu_dai;
struct asoc_simple_dai *codec_dai;
- struct snd_soc_dai_link_component cpus; /* single cpu */
- struct snd_soc_dai_link_component codecs; /* single codec */
- struct snd_soc_dai_link_component platforms;
+ struct snd_soc_dai_link_component *cpus;
+ struct snd_soc_dai_link_component *codecs;
+ struct snd_soc_dai_link_component *platforms;
struct asoc_simple_data adata;
struct snd_soc_codec_conf *codec_conf;
+ struct prop_nums num;
unsigned int mclk_fs;
} *dai_props;
struct asoc_simple_jack hp_jack;
struct asoc_simple_jack mic_jack;
struct snd_soc_dai_link *dai_link;
struct asoc_simple_dai *dais;
+ struct snd_soc_dai_link_component *dlcs;
+ struct snd_soc_dai_link_component dummy;
struct snd_soc_codec_conf *codec_conf;
struct gpio_desc *pa_gpio;
+ const struct snd_soc_ops *ops;
+ unsigned int dpcm_selectable:1;
+ unsigned int force_dpcm:1;
};
#define simple_priv_to_card(priv) (&(priv)->snd_card)
#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i))
#define simple_priv_to_dev(priv) (simple_priv_to_card(priv)->dev)
#define simple_priv_to_link(priv, i) (simple_priv_to_card(priv)->dai_link + (i))
+#define simple_props_to_dlc_cpu(props, i) ((props)->cpus + i)
+#define simple_props_to_dlc_codec(props, i) ((props)->codecs + i)
+#define simple_props_to_dlc_platform(props, i) ((props)->platforms + i)
+
+#define simple_props_to_dai_cpu(props, i) ((props)->cpu_dai + i)
+#define simple_props_to_dai_codec(props, i) ((props)->codec_dai + i)
+#define simple_props_to_codec_conf(props, i) ((props)->codec_conf + i)
+
+#define for_each_prop_dlc_cpus(props, i, cpu) \
+ for ((i) = 0; \
+ ((i) < (props)->num.cpus) && \
+ ((cpu) = simple_props_to_dlc_cpu(props, i)); \
+ (i)++)
+#define for_each_prop_dlc_codecs(props, i, codec) \
+ for ((i) = 0; \
+ ((i) < (props)->num.codecs) && \
+ ((codec) = simple_props_to_dlc_codec(props, i)); \
+ (i)++)
+#define for_each_prop_dlc_platforms(props, i, platform) \
+ for ((i) = 0; \
+ ((i) < (props)->num.platforms) && \
+ ((platform) = simple_props_to_dlc_platform(props, i)); \
+ (i)++)
+#define for_each_prop_codec_conf(props, i, conf) \
+ for ((i) = 0; \
+ ((i) < (props)->num.codecs) && \
+ (props)->codec_conf && \
+ ((conf) = simple_props_to_codec_conf(props, i)); \
+ (i)++)
+
+#define for_each_prop_dai_cpu(props, i, cpu) \
+ for ((i) = 0; \
+ ((i) < (props)->num.cpus) && \
+ ((cpu) = simple_props_to_dai_cpu(props, i)); \
+ (i)++)
+#define for_each_prop_dai_codec(props, i, codec) \
+ for ((i) = 0; \
+ ((i) < (props)->num.codecs) && \
+ ((codec) = simple_props_to_dai_codec(props, i)); \
+ (i)++)
+
+#define SNDRV_MAX_LINKS 512
+
struct link_info {
- int dais; /* number of dai */
int link; /* number of link */
- int conf; /* number of codec_conf */
int cpu; /* turn for CPU / Codec */
+ struct prop_nums num[SNDRV_MAX_LINKS];
};
int asoc_simple_parse_daifmt(struct device *dev,
@@ -74,6 +138,9 @@ int asoc_simple_parse_daifmt(struct device *dev,
struct device_node *codec,
char *prefix,
unsigned int *retfmt);
+int asoc_simple_parse_tdm_width_map(struct device *dev, struct device_node *np,
+ struct asoc_simple_dai *dai);
+
__printf(3, 4)
int asoc_simple_set_dailink_name(struct device *dev,
struct snd_soc_dai_link *dai_link,
@@ -81,10 +148,6 @@ int asoc_simple_set_dailink_name(struct device *dev,
int asoc_simple_parse_card_name(struct snd_soc_card *card,
char *prefix);
-#define asoc_simple_parse_clk_cpu(dev, node, dai_link, simple_dai) \
- asoc_simple_parse_clk(dev, node, simple_dai, dai_link->cpus)
-#define asoc_simple_parse_clk_codec(dev, node, dai_link, simple_dai) \
- asoc_simple_parse_clk(dev, node, simple_dai, dai_link->codecs)
int asoc_simple_parse_clk(struct device *dev,
struct device_node *node,
struct asoc_simple_dai *simple_dai,
@@ -97,30 +160,24 @@ int asoc_simple_dai_init(struct snd_soc_pcm_runtime *rtd);
int asoc_simple_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params);
-#define asoc_simple_parse_cpu(node, dai_link, is_single_link) \
- asoc_simple_parse_dai(node, dai_link->cpus, is_single_link)
-#define asoc_simple_parse_codec(node, dai_link) \
- asoc_simple_parse_dai(node, dai_link->codecs, NULL)
-#define asoc_simple_parse_platform(node, dai_link) \
- asoc_simple_parse_dai(node, dai_link->platforms, NULL)
-
#define asoc_simple_parse_tdm(np, dai) \
snd_soc_of_parse_tdm_slot(np, &(dai)->tx_slot_mask, \
&(dai)->rx_slot_mask, \
&(dai)->slots, \
&(dai)->slot_width);
-void asoc_simple_canonicalize_platform(struct snd_soc_dai_link *dai_link);
-void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
- int is_single_links);
+void asoc_simple_canonicalize_platform(struct snd_soc_dai_link_component *platforms,
+ struct snd_soc_dai_link_component *cpus);
+void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link_component *cpus,
+ int is_single_links);
-int asoc_simple_clean_reference(struct snd_soc_card *card);
+void asoc_simple_clean_reference(struct snd_soc_card *card);
void asoc_simple_convert_fixup(struct asoc_simple_data *data,
struct snd_pcm_hw_params *params);
-void asoc_simple_parse_convert(struct device *dev,
- struct device_node *np, char *prefix,
+void asoc_simple_parse_convert(struct device_node *np, char *prefix,
struct asoc_simple_data *data);
+bool asoc_simple_is_convert_required(const struct asoc_simple_data *data);
int asoc_simple_parse_routing(struct snd_soc_card *card,
char *prefix);
@@ -131,9 +188,13 @@ int asoc_simple_parse_pin_switches(struct snd_soc_card *card,
int asoc_simple_init_jack(struct snd_soc_card *card,
struct asoc_simple_jack *sjack,
- int is_hp, char *prefix);
+ int is_hp, char *prefix, char *pin);
int asoc_simple_init_priv(struct asoc_simple_priv *priv,
struct link_info *li);
+int asoc_simple_remove(struct platform_device *pdev);
+
+int asoc_graph_card_probe(struct snd_soc_card *card);
+int asoc_graph_is_ports0(struct device_node *port);
#ifdef DEBUG
static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
@@ -149,12 +210,6 @@ static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
if (dai->name)
dev_dbg(dev, "%s dai name = %s\n",
name, dai->name);
- if (dai->sysclk)
- dev_dbg(dev, "%s sysclk = %d\n",
- name, dai->sysclk);
-
- dev_dbg(dev, "%s direction = %s\n",
- name, dai->clk_direction ? "OUT" : "IN");
if (dai->slots)
dev_dbg(dev, "%s slots = %d\n", name, dai->slots);
@@ -166,6 +221,12 @@ static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
dev_dbg(dev, "%s rx slot mask = %d\n", name, dai->rx_slot_mask);
if (dai->clk)
dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk));
+ if (dai->sysclk)
+ dev_dbg(dev, "%s sysclk = %dHz\n",
+ name, dai->sysclk);
+ if (dai->clk || dai->sysclk)
+ dev_dbg(dev, "%s direction = %s\n",
+ name, dai->clk_direction ? "OUT" : "IN");
}
static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
@@ -181,29 +242,32 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
for (i = 0; i < card->num_links; i++) {
struct simple_dai_props *props = simple_priv_to_props(priv, i);
struct snd_soc_dai_link *link = simple_priv_to_link(priv, i);
+ struct asoc_simple_dai *dai;
+ struct snd_soc_codec_conf *cnf;
+ int j;
dev_dbg(dev, "DAI%d\n", i);
- asoc_simple_debug_dai(priv, "cpu", props->cpu_dai);
- asoc_simple_debug_dai(priv, "codec", props->codec_dai);
+ dev_dbg(dev, "cpu num = %d\n", link->num_cpus);
+ for_each_prop_dai_cpu(props, j, dai)
+ asoc_simple_debug_dai(priv, "cpu", dai);
+ dev_dbg(dev, "codec num = %d\n", link->num_codecs);
+ for_each_prop_dai_codec(props, j, dai)
+ asoc_simple_debug_dai(priv, "codec", dai);
if (link->name)
dev_dbg(dev, "dai name = %s\n", link->name);
-
- dev_dbg(dev, "dai format = %04x\n", link->dai_fmt);
-
+ if (link->dai_fmt)
+ dev_dbg(dev, "dai format = %04x\n", link->dai_fmt);
if (props->adata.convert_rate)
- dev_dbg(dev, "convert_rate = %d\n",
- props->adata.convert_rate);
+ dev_dbg(dev, "convert_rate = %d\n", props->adata.convert_rate);
if (props->adata.convert_channels)
- dev_dbg(dev, "convert_channels = %d\n",
- props->adata.convert_channels);
- if (props->codec_conf && props->codec_conf->name_prefix)
- dev_dbg(dev, "name prefix = %s\n",
- props->codec_conf->name_prefix);
+ dev_dbg(dev, "convert_channels = %d\n", props->adata.convert_channels);
+ for_each_prop_codec_conf(props, j, cnf)
+ if (cnf->name_prefix)
+ dev_dbg(dev, "name prefix = %s\n", cnf->name_prefix);
if (props->mclk_fs)
- dev_dbg(dev, "mclk-fs = %d\n",
- props->mclk_fs);
+ dev_dbg(dev, "mclk-fs = %d\n", props->mclk_fs);
}
}
#else
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index ab6f75a86611..82a7db23db69 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -14,9 +14,7 @@
* these tables are not constants, some fields can be used for
* pdata or machine ops
*/
-extern struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[];
-extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_skl_machines[];
@@ -30,12 +28,18 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index a217a87cae86..b38fd25c5729 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (C) 2013-15, Intel Corporation. All rights reserved.
*/
@@ -58,11 +58,15 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* snd_soc_acpi_mach_params: interface for machine driver configuration
*
* @acpi_ipc_irq_index: used for BYT-CR detection
- * @platform: string used for HDaudio codec support
+ * @platform: string used for HDAudio codec support
* @codec_mask: used for HDAudio support
+ * @dmic_num: number of SoC- or chipset-attached PDM digital microphones
* @common_hdmi_codec_drv: use commom HDAudio HDMI codec driver
- * @link_mask: links enabled on the board
- * @links: array of link _ADR descriptors, null terminated
+ * @link_mask: SoundWire links enabled on the board
+ * @links: array of SoundWire link _ADR descriptors, null terminated
+ * @i2s_link_mask: I2S/TDM links enabled on the board
+ * @num_dai_drivers: number of elements in @dai_drivers
+ * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
@@ -72,23 +76,73 @@ struct snd_soc_acpi_mach_params {
bool common_hdmi_codec_drv;
u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
+ u32 i2s_link_mask;
+ u32 num_dai_drivers;
+ struct snd_soc_dai_driver *dai_drivers;
};
/**
- * snd_soc_acpi_link_adr: ACPI-based list of _ADR, with a variable
- * number of devices per link
- *
+ * snd_soc_acpi_endpoint - endpoint descriptor
+ * @num: endpoint number (mandatory, unique per device)
+ * @aggregated: 0 (independent) or 1 (logically grouped)
+ * @group_position: zero-based order (only when @aggregated is 1)
+ * @group_id: platform-unique group identifier (only when @aggregrated is 1)
+ */
+struct snd_soc_acpi_endpoint {
+ u8 num;
+ u8 aggregated;
+ u8 group_position;
+ u8 group_id;
+};
+
+/**
+ * snd_soc_acpi_adr_device - descriptor for _ADR-enumerated device
+ * @adr: 64 bit ACPI _ADR value
+ * @num_endpoints: number of endpoints for this device
+ * @endpoints: array of endpoints
+ * @name_prefix: string used for codec controls
+ */
+struct snd_soc_acpi_adr_device {
+ const u64 adr;
+ const u8 num_endpoints;
+ const struct snd_soc_acpi_endpoint *endpoints;
+ const char *name_prefix;
+};
+
+/**
+ * snd_soc_acpi_link_adr - ACPI-based list of _ADR enumerated devices
* @mask: one bit set indicates the link this list applies to
- * @num_adr: ARRAY_SIZE of adr
- * @adr: array of _ADR (represented as u64).
+ * @num_adr: ARRAY_SIZE of devices
+ * @adr_d: array of devices
+ *
+ * The number of devices per link can be more than 1, e.g. in SoundWire
+ * multi-drop configurations.
*/
struct snd_soc_acpi_link_adr {
const u32 mask;
const u32 num_adr;
- const u64 *adr;
+ const struct snd_soc_acpi_adr_device *adr_d;
};
+/*
+ * when set the topology uses the -ssp<N> suffix, where N is determined based on
+ * BIOS or DMI information
+ */
+#define SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER BIT(0)
+
+/*
+ * when more than one SSP is reported in the link mask, use the most significant.
+ * This choice was found to be valid on platforms with ES8336 codecs.
+ */
+#define SND_SOC_ACPI_TPLG_INTEL_SSP_MSB BIT(1)
+
+/*
+ * when set the topology uses the -dmic<N>ch suffix, where N is determined based on
+ * BIOS or DMI information
+ */
+#define SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER BIT(2)
+
/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
@@ -96,10 +150,13 @@ struct snd_soc_acpi_link_adr {
* all firmware/topology related fields.
*
* @id: ACPI ID (usually the codec's) used to find a matching machine driver.
+ * @comp_ids: list of compatible audio codecs using the same machine driver,
+ * firmware and topology
* @link_mask: describes required board layout, e.g. for SoundWire.
* @links: array of link _ADR descriptors, null terminated.
* @drv_name: machine driver name
* @fw_filename: firmware file name. Used when SOF is not enabled.
+ * @tplg_filename: topology file name. Used when SOF is not enabled.
* @board: board name
* @machine_quirk: pointer to quirk, usually based on DMI information when
* ACPI ID alone is not sufficient, wrong or misleading
@@ -107,23 +164,25 @@ struct snd_soc_acpi_link_adr {
* audio codecs whose presence if checked with ACPI
* @pdata: intended for platform data or machine specific-ops. This structure
* is not constant since this field may be updated at run-time
- * @sof_fw_filename: Sound Open Firmware file name, if enabled
* @sof_tplg_filename: Sound Open Firmware topology file name, if enabled
+ * @tplg_quirk_mask: quirks to select different topology files dynamically
*/
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
- const u8 id[ACPI_ID_LEN];
+ u8 id[ACPI_ID_LEN];
+ const struct snd_soc_acpi_codecs *comp_ids;
const u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
const char *drv_name;
const char *fw_filename;
+ const char *tplg_filename;
const char *board;
struct snd_soc_acpi_mach * (*machine_quirk)(void *arg);
const void *quirk_data;
void *pdata;
struct snd_soc_acpi_mach_params mach_params;
- const char *sof_fw_filename;
const char *sof_tplg_filename;
+ const u32 tplg_quirk_mask;
};
#define SND_SOC_ACPI_MAX_CODECS 3
@@ -142,4 +201,10 @@ struct snd_soc_acpi_codecs {
u8 codecs[SND_SOC_ACPI_MAX_CODECS][ACPI_ID_LEN];
};
+static inline bool snd_soc_acpi_sof_parent(struct device *dev)
+{
+ return dev->parent && dev->parent->driver && dev->parent->driver->name &&
+ !strncmp(dev->parent->driver->name, "sof-audio-acpi", strlen("sof-audio-acpi"));
+}
+
#endif
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
new file mode 100644
index 000000000000..9d31a5c0db33
--- /dev/null
+++ b/include/sound/soc-card.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * soc-card.h
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ */
+#ifndef __SOC_CARD_H
+#define __SOC_CARD_H
+
+enum snd_soc_card_subclass {
+ SND_SOC_CARD_CLASS_INIT = 0,
+ SND_SOC_CARD_CLASS_RUNTIME = 1,
+};
+
+struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+ const char *name);
+int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
+ struct snd_soc_jack *jack);
+int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id,
+ int type, struct snd_soc_jack *jack,
+ struct snd_soc_jack_pin *pins,
+ unsigned int num_pins);
+
+int snd_soc_card_suspend_pre(struct snd_soc_card *card);
+int snd_soc_card_suspend_post(struct snd_soc_card *card);
+int snd_soc_card_resume_pre(struct snd_soc_card *card);
+int snd_soc_card_resume_post(struct snd_soc_card *card);
+
+int snd_soc_card_probe(struct snd_soc_card *card);
+int snd_soc_card_late_probe(struct snd_soc_card *card);
+void snd_soc_card_fixup_controls(struct snd_soc_card *card);
+int snd_soc_card_remove(struct snd_soc_card *card);
+
+int snd_soc_card_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level);
+int snd_soc_card_set_bias_level_post(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level);
+
+int snd_soc_card_add_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link);
+void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link);
+
+/* device driver data */
+static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
+ void *data)
+{
+ card->drvdata = data;
+}
+
+static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card)
+{
+ return card->drvdata;
+}
+
+static inline
+struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
+ const char *dai_name)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ for_each_card_rtds(card, rtd) {
+ if (!strcmp(asoc_rtd_to_codec(rtd, 0)->name, dai_name))
+ return asoc_rtd_to_codec(rtd, 0);
+ }
+
+ return NULL;
+}
+
+#endif /* __SOC_CARD_H */
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 154d02fbbfed..c26ffb033777 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -2,7 +2,8 @@
*
* soc-component.h
*
- * Copyright (c) 2019 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*/
#ifndef __SOC_COMPONENT_H
#define __SOC_COMPONENT_H
@@ -25,6 +26,44 @@
order++)
/* component interface */
+struct snd_compress_ops {
+ int (*open)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream);
+ int (*free)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream);
+ int (*set_params)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_params *params);
+ int (*get_params)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_codec *params);
+ int (*set_metadata)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_metadata *metadata);
+ int (*get_metadata)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_metadata *metadata);
+ int (*trigger)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, int cmd);
+ int (*pointer)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_tstamp *tstamp);
+ int (*copy)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, char __user *buf,
+ size_t count);
+ int (*mmap)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct vm_area_struct *vma);
+ int (*ack)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, size_t bytes);
+ int (*get_caps)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_caps *caps);
+ int (*get_codec_caps)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_codec_caps *codec);
+};
+
struct snd_soc_component_driver {
const char *name;
@@ -62,7 +101,7 @@ struct snd_soc_component_driver {
/* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component,
- struct of_phandle_args *args,
+ const struct of_phandle_args *args,
const char **dai_name);
int (*of_xlate_dai_id)(struct snd_soc_component *comment,
struct device_node *endpoint);
@@ -107,8 +146,12 @@ struct snd_soc_component_driver {
int (*mmap)(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
+ int (*ack)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ snd_pcm_sframes_t (*delay)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
- const struct snd_compr_ops *compr_ops;
+ const struct snd_compress_ops *compress_ops;
/* probe ordering - for components with runtime dependencies */
int probe_order;
@@ -126,8 +169,17 @@ struct snd_soc_component_driver {
unsigned int idle_bias_on:1;
unsigned int suspend_bias_off:1;
unsigned int use_pmdown_time:1; /* care pmdown_time at stop */
+ /*
+ * Indicates that the component does not care about the endianness of
+ * PCM audio data and the core will ensure that both LE and BE variants
+ * of each used format are present. Typically this is because the
+ * component sits behind a bus that abstracts away the endian of the
+ * original data, ie. one for which the transmission endian is defined
+ * (I2S/SLIMbus/SoundWire), or the concept of endian doesn't exist (PDM,
+ * analogue).
+ */
unsigned int endianness:1;
- unsigned int non_legacy_dai_naming:1;
+ unsigned int legacy_dai_naming:1;
/* this component uses topology and ignore machine driver FEs */
const char *ignore_machine;
@@ -136,6 +188,10 @@ struct snd_soc_component_driver {
struct snd_pcm_hw_params *params);
bool use_dai_pcm_id; /* use DAI link PCM ID as PCM device number */
int be_pcm_base; /* base device ID for all BE PCMs */
+
+#ifdef CONFIG_DEBUG_FS
+ const char *debugfs_prefix;
+#endif
};
struct snd_soc_component {
@@ -178,10 +234,16 @@ struct snd_soc_component {
/* machine specific init */
int (*init)(struct snd_soc_component *component);
-#ifdef CONFIG_DEBUG_FS
+ /* function mark */
+ void *mark_module;
+ struct snd_pcm_substream *mark_open;
+ struct snd_pcm_substream *mark_hw_params;
+ struct snd_pcm_substream *mark_trigger;
+ struct snd_compr_stream *mark_compr_open;
+ void *mark_pm;
+
struct dentry *debugfs_root;
const char *debugfs_prefix;
-#endif
};
#define for_each_component_dais(component, dai)\
@@ -286,10 +348,13 @@ static inline int snd_soc_component_cache_sync(
return regcache_sync(component->regmap);
}
+void snd_soc_component_set_aux(struct snd_soc_component *component,
+ struct snd_soc_aux_dev *aux);
+int snd_soc_component_init(struct snd_soc_component *component);
+int snd_soc_component_is_dummy(struct snd_soc_component *component);
+
/* component IO */
-int snd_soc_component_read(struct snd_soc_component *component,
- unsigned int reg, unsigned int *val);
-unsigned int snd_soc_component_read32(struct snd_soc_component *component,
+unsigned int snd_soc_component_read(struct snd_soc_component *component,
unsigned int reg);
int snd_soc_component_write(struct snd_soc_component *component,
unsigned int reg, unsigned int val);
@@ -304,6 +369,12 @@ int snd_soc_component_test_bits(struct snd_soc_component *component,
unsigned int reg, unsigned int mask,
unsigned int value);
+unsigned int snd_soc_component_read_field(struct snd_soc_component *component,
+ unsigned int reg, unsigned int mask);
+int snd_soc_component_write_field(struct snd_soc_component *component,
+ unsigned int reg, unsigned int mask,
+ unsigned int val);
+
/* component wide operations */
int snd_soc_component_set_sysclk(struct snd_soc_component *component,
int clk_id, int source,
@@ -321,6 +392,7 @@ int snd_soc_component_stream_event(struct snd_soc_component *component,
int snd_soc_component_set_bias_level(struct snd_soc_component *component,
enum snd_soc_bias_level level);
+void snd_soc_component_setup_regmap(struct snd_soc_component *component);
#ifdef CONFIG_REGMAP
void snd_soc_component_init_regmap(struct snd_soc_component *component,
struct regmap *regmap);
@@ -328,17 +400,17 @@ void snd_soc_component_exit_regmap(struct snd_soc_component *component);
#endif
#define snd_soc_component_module_get_when_probe(component)\
- snd_soc_component_module_get(component, 0)
-#define snd_soc_component_module_get_when_open(component) \
- snd_soc_component_module_get(component, 1)
+ snd_soc_component_module_get(component, NULL, 0)
+#define snd_soc_component_module_get_when_open(component, substream) \
+ snd_soc_component_module_get(component, substream, 1)
int snd_soc_component_module_get(struct snd_soc_component *component,
- int upon_open);
+ void *mark, int upon_open);
#define snd_soc_component_module_put_when_remove(component) \
- snd_soc_component_module_put(component, 0)
-#define snd_soc_component_module_put_when_close(component) \
- snd_soc_component_module_put(component, 1)
+ snd_soc_component_module_put(component, NULL, 0, 0)
+#define snd_soc_component_module_put_when_close(component, substream, rollback) \
+ snd_soc_component_module_put(component, substream, 1, rollback)
void snd_soc_component_module_put(struct snd_soc_component *component,
- int upon_open);
+ void *mark, int upon_open, int rollback);
static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c,
void *data)
@@ -351,10 +423,10 @@ static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c)
return dev_get_drvdata(c->dev);
}
-static inline bool snd_soc_component_is_active(
- struct snd_soc_component *component)
+static inline unsigned int
+snd_soc_component_active(struct snd_soc_component *component)
{
- return component->active != 0;
+ return component->active;
}
/* component pin */
@@ -382,17 +454,8 @@ int snd_soc_component_force_enable_pin_unlocked(
int snd_soc_component_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
int snd_soc_component_close(struct snd_soc_component *component,
- struct snd_pcm_substream *substream);
-int snd_soc_component_prepare(struct snd_soc_component *component,
- struct snd_pcm_substream *substream);
-int snd_soc_component_hw_params(struct snd_soc_component *component,
- struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params);
-int snd_soc_component_hw_free(struct snd_soc_component *component,
- struct snd_pcm_substream *substream);
-int snd_soc_component_trigger(struct snd_soc_component *component,
- struct snd_pcm_substream *substream,
- int cmd);
+ struct snd_pcm_substream *substream,
+ int rollback);
void snd_soc_component_suspend(struct snd_soc_component *component);
void snd_soc_component_resume(struct snd_soc_component *component);
int snd_soc_component_is_suspended(struct snd_soc_component *component);
@@ -401,8 +464,31 @@ void snd_soc_component_remove(struct snd_soc_component *component);
int snd_soc_component_of_xlate_dai_id(struct snd_soc_component *component,
struct device_node *ep);
int snd_soc_component_of_xlate_dai_name(struct snd_soc_component *component,
- struct of_phandle_args *args,
+ const struct of_phandle_args *args,
const char **dai_name);
+int snd_soc_component_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream);
+void snd_soc_component_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ int rollback);
+int snd_soc_component_compr_trigger(struct snd_compr_stream *cstream, int cmd);
+int snd_soc_component_compr_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params);
+int snd_soc_component_compr_get_params(struct snd_compr_stream *cstream,
+ struct snd_codec *params);
+int snd_soc_component_compr_get_caps(struct snd_compr_stream *cstream,
+ struct snd_compr_caps *caps);
+int snd_soc_component_compr_get_codec_caps(struct snd_compr_stream *cstream,
+ struct snd_compr_codec_caps *codec);
+int snd_soc_component_compr_ack(struct snd_compr_stream *cstream, size_t bytes);
+int snd_soc_component_compr_pointer(struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp);
+int snd_soc_component_compr_copy(struct snd_compr_stream *cstream,
+ char __user *buf, size_t count);
+int snd_soc_component_compr_set_metadata(struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata);
+int snd_soc_component_compr_get_metadata(struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata);
int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
@@ -417,5 +503,19 @@ int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd);
void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd);
+int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream);
+int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream,
+ int rollback);
+int snd_soc_pcm_component_trigger(struct snd_pcm_substream *substream,
+ int cmd, int rollback);
+int snd_soc_pcm_component_pm_runtime_get(struct snd_soc_pcm_runtime *rtd,
+ void *stream);
+void snd_soc_pcm_component_pm_runtime_put(struct snd_soc_pcm_runtime *rtd,
+ void *stream, int rollback);
+int snd_soc_pcm_component_ack(struct snd_pcm_substream *substream);
+void snd_soc_pcm_component_delay(struct snd_pcm_substream *substream,
+ snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay);
#endif /* __SOC_COMPONENT_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index eaaeb00e9e84..ea7509672086 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -36,15 +36,42 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_MSB SND_SOC_DAIFMT_LEFT_J
#define SND_SOC_DAIFMT_LSB SND_SOC_DAIFMT_RIGHT_J
+/* Describes the possible PCM format */
+/*
+ * use SND_SOC_DAI_FORMAT_xx as eash shift.
+ * see
+ * snd_soc_runtime_get_dai_fmt()
+ */
+#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT 0
+#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_I2S (1 << SND_SOC_DAI_FORMAT_I2S)
+#define SND_SOC_POSSIBLE_DAIFMT_RIGHT_J (1 << SND_SOC_DAI_FORMAT_RIGHT_J)
+#define SND_SOC_POSSIBLE_DAIFMT_LEFT_J (1 << SND_SOC_DAI_FORMAT_LEFT_J)
+#define SND_SOC_POSSIBLE_DAIFMT_DSP_A (1 << SND_SOC_DAI_FORMAT_DSP_A)
+#define SND_SOC_POSSIBLE_DAIFMT_DSP_B (1 << SND_SOC_DAI_FORMAT_DSP_B)
+#define SND_SOC_POSSIBLE_DAIFMT_AC97 (1 << SND_SOC_DAI_FORMAT_AC97)
+#define SND_SOC_POSSIBLE_DAIFMT_PDM (1 << SND_SOC_DAI_FORMAT_PDM)
+
/*
* DAI Clock gating.
*
- * DAI bit clocks can be be gated (disabled) when the DAI is not
+ * DAI bit clocks can be gated (disabled) when the DAI is not
* sending or receiving PCM data in a frame. This can be used to save power.
*/
#define SND_SOC_DAIFMT_CONT (1 << 4) /* continuous clock */
#define SND_SOC_DAIFMT_GATED (0 << 4) /* clock is gated */
+/* Describes the possible PCM format */
+/*
+ * define GATED -> CONT. GATED will be selected if both are selected.
+ * see
+ * snd_soc_runtime_get_dai_fmt()
+ */
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT 16
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_GATED (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CONT (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT)
+
/*
* DAI hardware signal polarity.
*
@@ -71,22 +98,52 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_IB_NF (3 << 8) /* invert BCLK + nor FRM */
#define SND_SOC_DAIFMT_IB_IF (4 << 8) /* invert BCLK + FRM */
+/* Describes the possible PCM format */
+#define SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT 32
+#define SND_SOC_POSSIBLE_DAIFMT_INV_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_NB_NF (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_NB_IF (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_IB_NF (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_IB_IF (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+
/*
- * DAI hardware clock masters.
+ * DAI hardware clock providers/consumers
*
* This is wrt the codec, the inverse is true for the interface
- * i.e. if the codec is clk and FRM master then the interface is
- * clk and frame slave.
+ * i.e. if the codec is clk and FRM provider then the interface is
+ * clk and frame consumer.
*/
-#define SND_SOC_DAIFMT_CBM_CFM (1 << 12) /* codec clk & FRM master */
-#define SND_SOC_DAIFMT_CBS_CFM (2 << 12) /* codec clk slave & FRM master */
-#define SND_SOC_DAIFMT_CBM_CFS (3 << 12) /* codec clk master & frame slave */
-#define SND_SOC_DAIFMT_CBS_CFS (4 << 12) /* codec clk & FRM slave */
-
-#define SND_SOC_DAIFMT_FORMAT_MASK 0x000f
-#define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0
-#define SND_SOC_DAIFMT_INV_MASK 0x0f00
-#define SND_SOC_DAIFMT_MASTER_MASK 0xf000
+#define SND_SOC_DAIFMT_CBP_CFP (1 << 12) /* codec clk provider & frame provider */
+#define SND_SOC_DAIFMT_CBC_CFP (2 << 12) /* codec clk consumer & frame provider */
+#define SND_SOC_DAIFMT_CBP_CFC (3 << 12) /* codec clk provider & frame consumer */
+#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame consumer */
+
+/* previous definitions kept for backwards-compatibility, do not use in new contributions */
+#define SND_SOC_DAIFMT_CBM_CFM SND_SOC_DAIFMT_CBP_CFP
+#define SND_SOC_DAIFMT_CBS_CFM SND_SOC_DAIFMT_CBC_CFP
+#define SND_SOC_DAIFMT_CBM_CFS SND_SOC_DAIFMT_CBP_CFC
+#define SND_SOC_DAIFMT_CBS_CFS SND_SOC_DAIFMT_CBC_CFC
+
+/* when passed to set_fmt directly indicate if the device is provider or consumer */
+#define SND_SOC_DAIFMT_BP_FP SND_SOC_DAIFMT_CBP_CFP
+#define SND_SOC_DAIFMT_BC_FP SND_SOC_DAIFMT_CBC_CFP
+#define SND_SOC_DAIFMT_BP_FC SND_SOC_DAIFMT_CBP_CFC
+#define SND_SOC_DAIFMT_BC_FC SND_SOC_DAIFMT_CBC_CFC
+
+/* Describes the possible PCM format */
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT 48
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFP (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFP (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFC (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFC (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+
+#define SND_SOC_DAIFMT_FORMAT_MASK 0x000f
+#define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0
+#define SND_SOC_DAIFMT_INV_MASK 0x0f00
+#define SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK 0xf000
+
+#define SND_SOC_DAIFMT_MASTER_MASK SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK
/*
* Master Clock Directions
@@ -123,6 +180,8 @@ int snd_soc_dai_set_pll(struct snd_soc_dai *dai,
int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio);
/* Digital Audio interface formatting */
+int snd_soc_dai_get_fmt_max_priority(struct snd_soc_pcm_runtime *rtd);
+u64 snd_soc_dai_get_fmt(struct snd_soc_dai *dai, int priority);
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
@@ -149,26 +208,68 @@ int snd_soc_dai_hw_params(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
void snd_soc_dai_hw_free(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream);
+ struct snd_pcm_substream *substream,
+ int rollback);
int snd_soc_dai_startup(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream);
void snd_soc_dai_shutdown(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream);
-int snd_soc_dai_prepare(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream);
-int snd_soc_dai_trigger(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream, int cmd);
-int snd_soc_dai_bespoke_trigger(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream, int cmd);
-snd_pcm_sframes_t snd_soc_dai_delay(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream);
+ struct snd_pcm_substream *substream, int rollback);
void snd_soc_dai_suspend(struct snd_soc_dai *dai);
void snd_soc_dai_resume(struct snd_soc_dai *dai);
-int snd_soc_dai_probe(struct snd_soc_dai *dai);
-int snd_soc_dai_remove(struct snd_soc_dai *dai);
int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
struct snd_soc_pcm_runtime *rtd, int num);
bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream);
+void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link);
+void snd_soc_dai_action(struct snd_soc_dai *dai,
+ int stream, int action);
+static inline void snd_soc_dai_activate(struct snd_soc_dai *dai,
+ int stream)
+{
+ snd_soc_dai_action(dai, stream, 1);
+}
+static inline void snd_soc_dai_deactivate(struct snd_soc_dai *dai,
+ int stream)
+{
+ snd_soc_dai_action(dai, stream, -1);
+}
+int snd_soc_dai_active(struct snd_soc_dai *dai);
+
+int snd_soc_pcm_dai_probe(struct snd_soc_pcm_runtime *rtd, int order);
+int snd_soc_pcm_dai_remove(struct snd_soc_pcm_runtime *rtd, int order);
+int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd);
+int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream);
+int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ int rollback);
+int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream,
+ int cmd);
+void snd_soc_pcm_dai_delay(struct snd_pcm_substream *substream,
+ snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay);
+
+int snd_soc_dai_compr_startup(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream);
+void snd_soc_dai_compr_shutdown(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ int rollback);
+int snd_soc_dai_compr_trigger(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream, int cmd);
+int snd_soc_dai_compr_set_params(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params);
+int snd_soc_dai_compr_get_params(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_codec *params);
+int snd_soc_dai_compr_ack(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ size_t bytes);
+int snd_soc_dai_compr_pointer(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp);
+int snd_soc_dai_compr_set_metadata(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata);
+int snd_soc_dai_compr_get_metadata(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata);
struct snd_soc_dai_ops {
/*
@@ -200,13 +301,14 @@ struct snd_soc_dai_ops {
unsigned int *rx_num, unsigned int *rx_slot);
int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
- int (*set_sdw_stream)(struct snd_soc_dai *dai,
- void *stream, int direction);
+ int (*set_stream)(struct snd_soc_dai *dai,
+ void *stream, int direction);
+ void *(*get_stream)(struct snd_soc_dai *dai, int direction);
+
/*
* DAI digital mute - optional.
* Called by soc-core to minimise any pops.
*/
- int (*digital_mute)(struct snd_soc_dai *dai, int mute);
int (*mute_stream)(struct snd_soc_dai *dai, int mute, int stream);
/*
@@ -240,6 +342,19 @@ struct snd_soc_dai_ops {
*/
snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
struct snd_soc_dai *);
+
+ /*
+ * Format list for auto selection.
+ * Format will be increased if priority format was
+ * not selected.
+ * see
+ * snd_soc_dai_get_fmt()
+ */
+ u64 *auto_selectable_formats;
+ int num_auto_selectable_formats;
+
+ /* bit field */
+ unsigned int no_capture_mute:1;
};
struct snd_soc_cdai_ops {
@@ -299,9 +414,9 @@ struct snd_soc_dai_driver {
/* DAI capabilities */
struct snd_soc_pcm_stream capture;
struct snd_soc_pcm_stream playback;
- unsigned int symmetric_rates:1;
+ unsigned int symmetric_rate:1;
unsigned int symmetric_channels:1;
- unsigned int symmetric_samplebits:1;
+ unsigned int symmetric_sample_bits:1;
/* probe ordering - for components with runtime dependencies */
int probe_order;
@@ -322,11 +437,7 @@ struct snd_soc_dai {
struct snd_soc_dai_driver *driver;
/* DAI runtime info */
- unsigned int capture_active; /* stream usage count */
- unsigned int playback_active; /* stream usage count */
- unsigned int probed:1;
-
- unsigned int active;
+ unsigned int stream_active[SNDRV_PCM_STREAM_LAST + 1]; /* usage count */
struct snd_soc_dapm_widget *playback_widget;
struct snd_soc_dapm_widget *capture_widget;
@@ -348,8 +459,32 @@ struct snd_soc_dai {
unsigned int rx_mask;
struct list_head list;
+
+ /* function mark */
+ struct snd_pcm_substream *mark_startup;
+ struct snd_pcm_substream *mark_hw_params;
+ struct snd_pcm_substream *mark_trigger;
+ struct snd_compr_stream *mark_compr_startup;
+
+ /* bit field */
+ unsigned int probed:1;
};
+static inline struct snd_soc_pcm_stream *
+snd_soc_dai_get_pcm_stream(const struct snd_soc_dai *dai, int stream)
+{
+ return (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ &dai->driver->playback : &dai->driver->capture;
+}
+
+static inline
+struct snd_soc_dapm_widget *snd_soc_dai_get_widget(
+ struct snd_soc_dai *dai, int stream)
+{
+ return (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ dai->playback_widget : dai->capture_widget;
+}
+
static inline void *snd_soc_dai_get_dma_data(const struct snd_soc_dai *dai,
const struct snd_pcm_substream *ss)
{
@@ -386,24 +521,50 @@ static inline void *snd_soc_dai_get_drvdata(struct snd_soc_dai *dai)
}
/**
- * snd_soc_dai_set_sdw_stream() - Configures a DAI for SDW stream operation
+ * snd_soc_dai_set_stream() - Configures a DAI for stream operation
* @dai: DAI
- * @stream: STREAM
+ * @stream: STREAM (opaque structure depending on DAI type)
* @direction: Stream direction(Playback/Capture)
- * SoundWire subsystem doesn't have a notion of direction and we reuse
+ * Some subsystems, such as SoundWire, don't have a notion of direction and we reuse
* the ASoC stream direction to configure sink/source ports.
* Playback maps to source ports and Capture for sink ports.
*
* This should be invoked with NULL to clear the stream set previously.
* Returns 0 on success, a negative error code otherwise.
*/
-static inline int snd_soc_dai_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, int direction)
+static inline int snd_soc_dai_set_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
{
- if (dai->driver->ops->set_sdw_stream)
- return dai->driver->ops->set_sdw_stream(dai, stream, direction);
+ if (dai->driver->ops->set_stream)
+ return dai->driver->ops->set_stream(dai, stream, direction);
else
return -ENOTSUPP;
}
+/**
+ * snd_soc_dai_get_stream() - Retrieves stream from DAI
+ * @dai: DAI
+ * @direction: Stream direction(Playback/Capture)
+ *
+ * This routine only retrieves that was previously configured
+ * with snd_soc_dai_get_stream()
+ *
+ * Returns pointer to stream or an ERR_PTR value, e.g.
+ * ERR_PTR(-ENOTSUPP) if callback is not supported;
+ */
+static inline void *snd_soc_dai_get_stream(struct snd_soc_dai *dai,
+ int direction)
+{
+ if (dai->driver->ops->get_stream)
+ return dai->driver->ops->get_stream(dai, direction);
+ else
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline unsigned int
+snd_soc_dai_stream_active(struct snd_soc_dai *dai, int stream)
+{
+ return dai->stream_active[stream];
+}
+
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 1b6afbc1a4ed..ebb8e7a7fc29 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -16,6 +16,8 @@
#include <sound/asoc.h>
struct device;
+struct snd_soc_pcm_runtime;
+struct soc_enum;
/* widget has no PM register bit */
#define SND_SOC_NOPM -1
@@ -376,6 +378,24 @@ struct snd_soc_dapm_widget_list;
struct snd_soc_dapm_update;
enum snd_soc_dapm_direction;
+/*
+ * Bias levels
+ *
+ * @ON: Bias is fully on for audio playback and capture operations.
+ * @PREPARE: Prepare for audio operations. Called before DAPM switching for
+ * stream start and stop operations.
+ * @STANDBY: Low power standby state when no playback/capture operations are
+ * in progress. NOTE: The transition time between STANDBY and ON
+ * should be as fast as possible and no longer than 10ms.
+ * @OFF: Power Off. No restrictions on transition times.
+ */
+enum snd_soc_bias_level {
+ SND_SOC_BIAS_OFF = 0,
+ SND_SOC_BIAS_STANDBY = 1,
+ SND_SOC_BIAS_PREPARE = 2,
+ SND_SOC_BIAS_ON = 3,
+};
+
int dapm_regulator_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
int dapm_clock_event(struct snd_soc_dapm_widget *w,
@@ -409,6 +429,7 @@ struct snd_soc_dapm_widget *snd_soc_dapm_new_control_unlocked(
const struct snd_soc_dapm_widget *widget);
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
@@ -482,6 +503,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list,
bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
enum snd_soc_dapm_direction));
+void snd_soc_dapm_dai_free_widgets(struct snd_soc_dapm_widget_list **list);
struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
struct snd_kcontrol *kcontrol);
@@ -688,9 +710,14 @@ struct snd_soc_dapm_context {
/* A list of widgets associated with an object, typically a snd_kcontrol */
struct snd_soc_dapm_widget_list {
int num_widgets;
- struct snd_soc_dapm_widget *widgets[0];
+ struct snd_soc_dapm_widget *widgets[];
};
+#define for_each_dapm_widgets(list, i, widget) \
+ for ((i) = 0; \
+ (i) < list->num_widgets && (widget = list->widgets[i]); \
+ (i)++)
+
struct snd_soc_dapm_stats {
int power_checks;
int path_checks;
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index b654ebfc8766..5b689c663290 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -101,6 +101,10 @@ struct snd_soc_dpcm_runtime {
enum snd_soc_dpcm_state state;
int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
+
+ int be_start; /* refcount protected by BE stream pcm lock */
+ int be_pause; /* refcount protected by BE stream pcm lock */
+ bool fe_pause; /* used to track STOP after PAUSE */
};
#define for_each_dpcm_fe(be, stream, _dpcm) \
@@ -132,17 +136,8 @@ int snd_soc_dpcm_be_can_update(struct snd_soc_pcm_runtime *fe,
struct snd_pcm_substream *
snd_soc_dpcm_get_substream(struct snd_soc_pcm_runtime *be, int stream);
-/* get the BE runtime state */
-enum snd_soc_dpcm_state
- snd_soc_dpcm_be_get_state(struct snd_soc_pcm_runtime *be, int stream);
-
-/* set the BE runtime state */
-void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be, int stream,
- enum snd_soc_dpcm_state state);
-
-/* internal use only */
-int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute);
-int soc_dpcm_runtime_update(struct snd_soc_card *);
+/* update audio routing between PCMs and any DAI links */
+int snd_soc_dpcm_runtime_update(struct snd_soc_card *card);
#ifdef CONFIG_DEBUG_FS
void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
@@ -154,23 +149,25 @@ static inline void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd)
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list_);
+void dpcm_path_put(struct snd_soc_dapm_widget_list **list);
int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list, int new);
int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream);
-int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_be_dai_stop(struct snd_soc_pcm_runtime *fe, int stream,
+ int do_hw_free, struct snd_soc_dpcm *last);
void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream);
void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream);
-int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream);
int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int tream);
int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd);
int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream);
int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
int event);
+bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget, enum snd_soc_dapm_direction dir);
-static inline void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
-{
- kfree(*list);
-}
-
+#define dpcm_be_dai_startup_rollback(fe, stream, last) \
+ dpcm_be_dai_stop(fe, stream, 0, last)
+#define dpcm_be_dai_startup_unwind(fe, stream) dpcm_be_dai_stop(fe, stream, 0, NULL)
+#define dpcm_be_dai_shutdown(fe, stream) dpcm_be_dai_stop(fe, stream, 1, NULL)
#endif
diff --git a/include/sound/soc-jack.h b/include/sound/soc-jack.h
new file mode 100644
index 000000000000..a0abb1ee5110
--- /dev/null
+++ b/include/sound/soc-jack.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * soc-jack.h
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ */
+#ifndef __SOC_JACK_H
+#define __SOC_JACK_H
+
+/**
+ * struct snd_soc_jack_pin - Describes a pin to update based on jack detection
+ *
+ * @pin: name of the pin to update
+ * @mask: bits to check for in reported jack status
+ * @invert: if non-zero then pin is enabled when status is not reported
+ * @list: internal list entry
+ */
+struct snd_soc_jack_pin {
+ struct list_head list;
+ const char *pin;
+ int mask;
+ bool invert;
+};
+
+/**
+ * struct snd_soc_jack_zone - Describes voltage zones of jack detection
+ *
+ * @min_mv: start voltage in mv
+ * @max_mv: end voltage in mv
+ * @jack_type: type of jack that is expected for this voltage
+ * @debounce_time: debounce_time for jack, codec driver should wait for this
+ * duration before reading the adc for voltages
+ * @list: internal list entry
+ */
+struct snd_soc_jack_zone {
+ unsigned int min_mv;
+ unsigned int max_mv;
+ unsigned int jack_type;
+ unsigned int debounce_time;
+ struct list_head list;
+};
+
+/**
+ * struct snd_soc_jack_gpio - Describes a gpio pin for jack detection
+ *
+ * @gpio: legacy gpio number
+ * @idx: gpio descriptor index within the function of the GPIO
+ * consumer device
+ * @gpiod_dev: GPIO consumer device
+ * @name: gpio name. Also as connection ID for the GPIO consumer
+ * device function name lookup
+ * @report: value to report when jack detected
+ * @invert: report presence in low state
+ * @debounce_time: debounce time in ms
+ * @wake: enable as wake source
+ * @jack_status_check: callback function which overrides the detection
+ * to provide more complex checks (eg, reading an
+ * ADC).
+ */
+struct snd_soc_jack_gpio {
+ unsigned int gpio;
+ unsigned int idx;
+ struct device *gpiod_dev;
+ const char *name;
+ int report;
+ int invert;
+ int debounce_time;
+ bool wake;
+
+ /* private: */
+ struct snd_soc_jack *jack;
+ struct delayed_work work;
+ struct notifier_block pm_notifier;
+ struct gpio_desc *desc;
+
+ void *data;
+ /* public: */
+ int (*jack_status_check)(void *data);
+};
+
+struct snd_soc_jack {
+ struct mutex mutex;
+ struct snd_jack *jack;
+ struct snd_soc_card *card;
+ struct list_head pins;
+ int status;
+ struct blocking_notifier_head notifier;
+ struct list_head jack_zones;
+};
+
+/* Jack reporting */
+void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask);
+int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_pin *pins);
+void snd_soc_jack_notifier_register(struct snd_soc_jack *jack,
+ struct notifier_block *nb);
+void snd_soc_jack_notifier_unregister(struct snd_soc_jack *jack,
+ struct notifier_block *nb);
+int snd_soc_jack_add_zones(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_zone *zones);
+int snd_soc_jack_get_type(struct snd_soc_jack *jack, int micbias_voltage);
+#ifdef CONFIG_GPIOLIB
+int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_gpio *gpios);
+int snd_soc_jack_add_gpiods(struct device *gpiod_dev,
+ struct snd_soc_jack *jack,
+ int count, struct snd_soc_jack_gpio *gpios);
+void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_gpio *gpios);
+#else
+static inline int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_gpio *gpios)
+{
+ return 0;
+}
+
+static inline int snd_soc_jack_add_gpiods(struct device *gpiod_dev,
+ struct snd_soc_jack *jack,
+ int count,
+ struct snd_soc_jack_gpio *gpios)
+{
+ return 0;
+}
+
+static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_gpio *gpios)
+{
+}
+#endif
+
+#endif /* __SOC_JACK_H */
diff --git a/include/sound/soc-link.h b/include/sound/soc-link.h
new file mode 100644
index 000000000000..9314cde1756b
--- /dev/null
+++ b/include/sound/soc-link.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * soc-link.h
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ */
+#ifndef __SOC_LINK_H
+#define __SOC_LINK_H
+
+int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd);
+void snd_soc_link_exit(struct snd_soc_pcm_runtime *rtd);
+int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+
+int snd_soc_link_startup(struct snd_pcm_substream *substream);
+void snd_soc_link_shutdown(struct snd_pcm_substream *substream,
+ int rollback);
+int snd_soc_link_prepare(struct snd_pcm_substream *substream);
+int snd_soc_link_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+void snd_soc_link_hw_free(struct snd_pcm_substream *substream,
+ int rollback);
+
+int snd_soc_link_trigger(struct snd_pcm_substream *substream, int cmd,
+ int rollback);
+int snd_soc_link_compr_startup(struct snd_compr_stream *cstream);
+void snd_soc_link_compr_shutdown(struct snd_compr_stream *cstream,
+ int rollback);
+int snd_soc_link_compr_set_params(struct snd_compr_stream *cstream);
+
+#endif /* __SOC_LINK_H */
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 5223896de26f..b4b896f83b94 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -31,9 +31,6 @@ struct snd_soc_dai_driver;
struct snd_soc_dai;
struct snd_soc_dapm_route;
-/* object scan be loaded and unloaded in groups with identfying indexes */
-#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */
-
/* dynamic object type */
enum snd_soc_dobj_type {
SND_SOC_DOBJ_NONE = 0, /* object is not dynamic */
@@ -57,7 +54,7 @@ struct snd_soc_dobj_control {
/* dynamic widget object */
struct snd_soc_dobj_widget {
- unsigned int kcontrol_type; /* kcontrol type: mixer, enum, bytes */
+ unsigned int *kcontrol_type; /* kcontrol type: mixer, enum, bytes */
};
/* generic dynamic object - all dynamic objects belong to this struct */
@@ -154,7 +151,7 @@ struct snd_soc_tplg_ops {
struct snd_soc_tplg_hdr *);
/* completion - called at completion of firmware loading */
- void (*complete)(struct snd_soc_component *);
+ int (*complete)(struct snd_soc_component *comp);
/* manifest - optional to inform component of manifest */
int (*manifest)(struct snd_soc_component *, int index,
@@ -181,14 +178,8 @@ static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
/* Dynamic Object loading and removal for component drivers */
int snd_soc_tplg_component_load(struct snd_soc_component *comp,
- struct snd_soc_tplg_ops *ops, const struct firmware *fw,
- u32 index);
-int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index);
-
-/* Widget removal - widgets also removed wth component API */
-void snd_soc_tplg_widget_remove(struct snd_soc_dapm_widget *w);
-void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
- u32 index);
+ struct snd_soc_tplg_ops *ops, const struct firmware *fw);
+int snd_soc_tplg_component_remove(struct snd_soc_component *comp);
/* Binds event handlers to dynamic widgets */
int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
@@ -197,8 +188,7 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
#else
-static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
- u32 index)
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
{
return 0;
}
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 8a2266676b2d..37bbfc8b45cb 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -31,31 +31,31 @@
#define SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, xmax, xinvert, xautodisable) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
- .rshift = shift_right, .max = xmax, .platform_max = xmax, \
+ .rshift = shift_right, .max = xmax, \
.invert = xinvert, .autodisable = xautodisable})
#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, xinvert, xautodisable) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
- .rshift = shift_right, .min = xmin, .max = xmax, .platform_max = xmax, \
+ .rshift = shift_right, .min = xmin, .max = xmax, \
.sign_bit = xsign_bit, .invert = xinvert, .autodisable = xautodisable})
#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \
SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable)
#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .max = xmax, .platform_max = xmax, .invert = xinvert})
+ {.reg = xreg, .max = xmax, .invert = xinvert})
#define SOC_DOUBLE_R_VALUE(xlreg, xrreg, xshift, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .max = xmax, .platform_max = xmax, .invert = xinvert})
+ .max = xmax, .invert = xinvert})
#define SOC_DOUBLE_R_S_VALUE(xlreg, xrreg, xshift, xmin, xmax, xsign_bit, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .max = xmax, .min = xmin, .platform_max = xmax, .sign_bit = xsign_bit, \
+ .max = xmax, .min = xmin, .sign_bit = xsign_bit, \
.invert = xinvert})
#define SOC_DOUBLE_R_RANGE_VALUE(xlreg, xrreg, xshift, xmin, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .min = xmin, .max = xmax, .platform_max = xmax, .invert = xinvert})
+ .min = xmin, .max = xmax, .invert = xinvert})
#define SOC_SINGLE(xname, reg, shift, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
@@ -68,7 +68,7 @@
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
- .platform_max = xmax, .invert = xinvert} }
+ .invert = xinvert} }
#define SOC_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -99,7 +99,7 @@
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
- .platform_max = xmax, .invert = xinvert} }
+ .invert = xinvert} }
#define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
@@ -136,6 +136,18 @@
.put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
max, invert, 0) }
+#define SOC_DOUBLE_SX_TLV(xname, xreg, shift_left, shift_right, xmin, xmax, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw_sx, \
+ .get = snd_soc_get_volsw_sx, \
+ .put = snd_soc_put_volsw_sx, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .rreg = xreg, \
+ .shift = shift_left, .rshift = shift_right, \
+ .max = xmax, .min = xmin} }
#define SOC_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -176,6 +188,8 @@
.get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_R_S_VALUE(reg_left, reg_right, xshift, \
xmin, xmax, xsign_bit, xinvert) }
+#define SOC_SINGLE_S_TLV(xname, xreg, xshift, xmin, xmax, xsign_bit, xinvert, tlv_array) \
+ SOC_DOUBLE_R_S_TLV(xname, xreg, xreg, xshift, xmin, xmax, xsign_bit, xinvert, tlv_array)
#define SOC_SINGLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
@@ -185,7 +199,7 @@
.put = snd_soc_put_volsw, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, \
- .min = xmin, .max = xmax, .platform_max = xmax, \
+ .min = xmin, .max = xmax, \
.sign_bit = 7,} }
#define SOC_DOUBLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -259,7 +273,7 @@
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
- .platform_max = xmax, .invert = xinvert} }
+ .invert = xinvert} }
#define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -280,6 +294,23 @@
.get = xhandler_get, .put = xhandler_put, \
.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
xmax, xinvert) }
+#define SOC_DOUBLE_R_S_EXT_TLV(xname, reg_left, reg_right, xshift, xmin, xmax, \
+ xsign_bit, xinvert, xhandler_get, xhandler_put, \
+ tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = SOC_DOUBLE_R_S_VALUE(reg_left, reg_right, xshift, \
+ xmin, xmax, xsign_bit, xinvert) }
+#define SOC_SINGLE_S_EXT_TLV(xname, xreg, xshift, xmin, xmax, \
+ xsign_bit, xinvert, xhandler_get, xhandler_put, \
+ tlv_array) \
+ SOC_DOUBLE_R_S_EXT_TLV(xname, xreg, xreg, xshift, xmin, xmax, \
+ xsign_bit, xinvert, xhandler_get, xhandler_put, \
+ tlv_array)
#define SOC_SINGLE_BOOL_EXT(xname, xdata, xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_bool_ext, \
@@ -368,24 +399,6 @@
#define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \
const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts)
-/*
- * Bias levels
- *
- * @ON: Bias is fully on for audio playback and capture operations.
- * @PREPARE: Prepare for audio operations. Called before DAPM switching for
- * stream start and stop operations.
- * @STANDBY: Low power standby state when no playback/capture operations are
- * in progress. NOTE: The transition time between STANDBY and ON
- * should be as fast as possible and no longer than 10ms.
- * @OFF: Power Off. No restrictions on transition times.
- */
-enum snd_soc_bias_level {
- SND_SOC_BIAS_OFF = 0,
- SND_SOC_BIAS_STANDBY = 1,
- SND_SOC_BIAS_PREPARE = 2,
- SND_SOC_BIAS_ON = 3,
-};
-
struct device_node;
struct snd_jack;
struct snd_soc_card;
@@ -407,20 +420,13 @@ struct snd_soc_jack_pin;
struct snd_soc_jack_gpio;
-typedef int (*hw_write_t)(void *,const char* ,int);
-
enum snd_soc_pcm_subclass {
SND_SOC_PCM_CLASS_PCM = 0,
SND_SOC_PCM_CLASS_BE = 1,
};
-enum snd_soc_card_subclass {
- SND_SOC_CARD_CLASS_INIT = 0,
- SND_SOC_CARD_CLASS_RUNTIME = 1,
-};
-
int snd_soc_register_card(struct snd_soc_card *card);
-int snd_soc_unregister_card(struct snd_soc_card *card);
+void snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
#ifdef CONFIG_PM_SLEEP
int snd_soc_suspend(struct device *dev);
@@ -437,11 +443,12 @@ static inline int snd_soc_resume(struct device *dev)
}
#endif
int snd_soc_poweroff(struct device *dev);
-int snd_soc_add_component(struct device *dev,
- struct snd_soc_component *component,
- const struct snd_soc_component_driver *component_driver,
- struct snd_soc_dai_driver *dai_drv,
- int num_dai);
+int snd_soc_component_initialize(struct snd_soc_component *component,
+ const struct snd_soc_component_driver *driver,
+ struct device *dev);
+int snd_soc_add_component(struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ int num_dai);
int snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
@@ -449,6 +456,10 @@ int devm_snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_component(struct device *dev);
+void snd_soc_unregister_component_by_driver(struct device *dev,
+ const struct snd_soc_component_driver *component_driver);
+struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
+ const char *driver_name);
struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
const char *driver_name);
@@ -468,8 +479,22 @@ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd);
-void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream);
-void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream);
+
+void snd_soc_runtime_action(struct snd_soc_pcm_runtime *rtd,
+ int stream, int action);
+static inline void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd,
+ int stream)
+{
+ snd_soc_runtime_action(rtd, stream, 1);
+}
+static inline void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd,
+ int stream)
+{
+ snd_soc_runtime_action(rtd, stream, -1);
+}
+
+int snd_soc_runtime_calc_hw(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hardware *hw, int stream);
int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
unsigned int dai_fmt);
@@ -489,55 +514,13 @@ int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params);
int snd_soc_calc_bclk(int fs, int sample_size, int channels, int tdm_slots);
int snd_soc_params_to_bclk(struct snd_pcm_hw_params *parms);
+int snd_soc_tdm_params_to_bclk(struct snd_pcm_hw_params *params,
+ int tdm_width, int tdm_slots, int slot_multiple);
/* set runtime hw params */
int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream,
const struct snd_pcm_hardware *hw);
-/* Jack reporting */
-int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
- struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins,
- unsigned int num_pins);
-
-void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask);
-int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_pin *pins);
-void snd_soc_jack_notifier_register(struct snd_soc_jack *jack,
- struct notifier_block *nb);
-void snd_soc_jack_notifier_unregister(struct snd_soc_jack *jack,
- struct notifier_block *nb);
-int snd_soc_jack_add_zones(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_zone *zones);
-int snd_soc_jack_get_type(struct snd_soc_jack *jack, int micbias_voltage);
-#ifdef CONFIG_GPIOLIB
-int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_gpio *gpios);
-int snd_soc_jack_add_gpiods(struct device *gpiod_dev,
- struct snd_soc_jack *jack,
- int count, struct snd_soc_jack_gpio *gpios);
-void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_gpio *gpios);
-#else
-static inline int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_gpio *gpios)
-{
- return 0;
-}
-
-static inline int snd_soc_jack_add_gpiods(struct device *gpiod_dev,
- struct snd_soc_jack *jack,
- int count,
- struct snd_soc_jack_gpio *gpios)
-{
- return 0;
-}
-
-static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_gpio *gpios)
-{
-}
-#endif
-
struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
unsigned int id, unsigned int id_mask);
@@ -568,8 +551,6 @@ static inline int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template,
void *data, const char *long_name,
const char *prefix);
-struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
- const char *name);
int snd_soc_add_component_controls(struct snd_soc_component *component,
const struct snd_kcontrol_new *controls, unsigned int num_controls);
int snd_soc_add_card_controls(struct snd_soc_card *soc_card,
@@ -626,87 +607,6 @@ int snd_soc_get_strobe(struct snd_kcontrol *kcontrol,
int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
-/**
- * struct snd_soc_jack_pin - Describes a pin to update based on jack detection
- *
- * @pin: name of the pin to update
- * @mask: bits to check for in reported jack status
- * @invert: if non-zero then pin is enabled when status is not reported
- * @list: internal list entry
- */
-struct snd_soc_jack_pin {
- struct list_head list;
- const char *pin;
- int mask;
- bool invert;
-};
-
-/**
- * struct snd_soc_jack_zone - Describes voltage zones of jack detection
- *
- * @min_mv: start voltage in mv
- * @max_mv: end voltage in mv
- * @jack_type: type of jack that is expected for this voltage
- * @debounce_time: debounce_time for jack, codec driver should wait for this
- * duration before reading the adc for voltages
- * @list: internal list entry
- */
-struct snd_soc_jack_zone {
- unsigned int min_mv;
- unsigned int max_mv;
- unsigned int jack_type;
- unsigned int debounce_time;
- struct list_head list;
-};
-
-/**
- * struct snd_soc_jack_gpio - Describes a gpio pin for jack detection
- *
- * @gpio: legacy gpio number
- * @idx: gpio descriptor index within the function of the GPIO
- * consumer device
- * @gpiod_dev: GPIO consumer device
- * @name: gpio name. Also as connection ID for the GPIO consumer
- * device function name lookup
- * @report: value to report when jack detected
- * @invert: report presence in low state
- * @debounce_time: debounce time in ms
- * @wake: enable as wake source
- * @jack_status_check: callback function which overrides the detection
- * to provide more complex checks (eg, reading an
- * ADC).
- */
-struct snd_soc_jack_gpio {
- unsigned int gpio;
- unsigned int idx;
- struct device *gpiod_dev;
- const char *name;
- int report;
- int invert;
- int debounce_time;
- bool wake;
-
- /* private: */
- struct snd_soc_jack *jack;
- struct delayed_work work;
- struct notifier_block pm_notifier;
- struct gpio_desc *desc;
-
- void *data;
- /* public: */
- int (*jack_status_check)(void *data);
-};
-
-struct snd_soc_jack {
- struct mutex mutex;
- struct snd_jack *jack;
- struct snd_soc_card *card;
- struct list_head pins;
- int status;
- struct blocking_notifier_head notifier;
- struct list_head jack_zones;
-};
-
/* SoC PCM stream information */
struct snd_soc_pcm_stream {
const char *stream_name;
@@ -794,6 +694,9 @@ struct snd_soc_dai_link {
/* codec/machine specific init - e.g. add machine controls */
int (*init)(struct snd_soc_pcm_runtime *rtd);
+ /* codec/machine specific exit - dual of init() */
+ void (*exit)(struct snd_soc_pcm_runtime *rtd);
+
/* optional hw_params re-writing for BE and FE sync */
int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params);
@@ -803,7 +706,7 @@ struct snd_soc_dai_link {
const struct snd_soc_compr_ops *compr_ops;
/* Mark this pcm with non atomic ops */
- bool nonatomic;
+ unsigned int nonatomic:1;
/* For unidirectional dai links */
unsigned int playback_only:1;
@@ -813,9 +716,9 @@ struct snd_soc_dai_link {
unsigned int ignore_suspend:1;
/* Symmetry requirements */
- unsigned int symmetric_rates:1;
+ unsigned int symmetric_rate:1;
unsigned int symmetric_channels:1;
- unsigned int symmetric_samplebits:1;
+ unsigned int symmetric_sample_bits:1;
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int no_pcm:1;
@@ -840,19 +743,48 @@ struct snd_soc_dai_link {
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int ignore:1;
+ /* This flag will reorder stop sequence. By enabling this flag
+ * DMA controller stop sequence will be invoked first followed by
+ * CPU DAI driver stop sequence
+ */
+ unsigned int stop_dma_first:1;
+
#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj; /* For topology */
#endif
};
+
+static inline struct snd_soc_dai_link_component*
+asoc_link_to_cpu(struct snd_soc_dai_link *link, int n) {
+ return &(link)->cpus[n];
+}
+
+static inline struct snd_soc_dai_link_component*
+asoc_link_to_codec(struct snd_soc_dai_link *link, int n) {
+ return &(link)->codecs[n];
+}
+
+static inline struct snd_soc_dai_link_component*
+asoc_link_to_platform(struct snd_soc_dai_link *link, int n) {
+ return &(link)->platforms[n];
+}
+
#define for_each_link_codecs(link, i, codec) \
for ((i) = 0; \
- ((i) < link->num_codecs) && ((codec) = &link->codecs[i]); \
+ ((i) < link->num_codecs) && \
+ ((codec) = asoc_link_to_codec(link, i)); \
(i)++)
#define for_each_link_platforms(link, i, platform) \
for ((i) = 0; \
((i) < link->num_platforms) && \
- ((platform) = &link->platforms[i]); \
+ ((platform) = asoc_link_to_platform(link, i)); \
+ (i)++)
+
+#define for_each_link_cpus(link, i, cpu) \
+ for ((i) = 0; \
+ ((i) < link->num_cpus) && \
+ ((cpu) = asoc_link_to_cpu(link, i)); \
(i)++)
/*
@@ -992,13 +924,9 @@ struct snd_soc_card {
struct mutex pcm_mutex;
enum snd_soc_pcm_subclass pcm_subclass;
- spinlock_t dpcm_lock;
-
- bool instantiated;
- bool topology_shortname_created;
-
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
+ void (*fixup_controls)(struct snd_soc_card *card);
int (*remove)(struct snd_soc_card *card);
/* the pre and post PM functions are used to do any PM work before and
@@ -1057,7 +985,6 @@ struct snd_soc_card {
int num_of_dapm_widgets;
const struct snd_soc_dapm_route *of_dapm_routes;
int num_of_dapm_routes;
- bool fully_routed;
/* lists of probed devices belonging to this card */
struct list_head component_dev_list;
@@ -1084,6 +1011,14 @@ struct snd_soc_card {
#endif
u32 pop_time;
+ /* bit field */
+ unsigned int instantiated:1;
+ unsigned int topology_shortname_created:1;
+ unsigned int fully_routed:1;
+ unsigned int disable_route_checks:1;
+ unsigned int probed:1;
+ unsigned int component_chaining:1;
+
void *drvdata;
};
#define for_each_card_prelinks(card, i, link) \
@@ -1109,6 +1044,14 @@ struct snd_soc_card {
#define for_each_card_components(card, component) \
list_for_each_entry(component, &(card)->component_dev_list, card_list)
+#define for_each_card_dapms(card, dapm) \
+ list_for_each_entry(dapm, &card->dapm_list, list)
+
+#define for_each_card_widgets(card, w)\
+ list_for_each_entry(w, &card->widgets, list)
+#define for_each_card_widgets_safe(card, w, _w) \
+ list_for_each_entry_safe(w, _w, &card->widgets, list)
+
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
struct device *dev;
@@ -1119,18 +1062,23 @@ struct snd_soc_pcm_runtime {
unsigned int params_select; /* currently selected param for dai link */
/* Dynamic PCM BE runtime data */
- struct snd_soc_dpcm_runtime dpcm[2];
+ struct snd_soc_dpcm_runtime dpcm[SNDRV_PCM_STREAM_LAST + 1];
+ struct snd_soc_dapm_widget *c2c_widget[SNDRV_PCM_STREAM_LAST + 1];
long pmdown_time;
/* runtime devices */
struct snd_pcm *pcm;
struct snd_compr *compr;
- struct snd_soc_dai *codec_dai;
- struct snd_soc_dai *cpu_dai;
- struct snd_soc_dai **codec_dais;
- unsigned int num_codecs;
+ /*
+ * dais = cpu_dai + codec_dai
+ * see
+ * soc_new_pcm_runtime()
+ * asoc_rtd_to_cpu()
+ * asoc_rtd_to_codec()
+ */
+ struct snd_soc_dai **dais;
struct delayed_work delayed_work;
void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd);
@@ -1141,23 +1089,42 @@ struct snd_soc_pcm_runtime {
unsigned int num; /* 0-based and monotonic increasing */
struct list_head list; /* rtd list of the soc card */
+ /* function mark */
+ struct snd_pcm_substream *mark_startup;
+ struct snd_pcm_substream *mark_hw_params;
+ struct snd_pcm_substream *mark_trigger;
+ struct snd_compr_stream *mark_compr_startup;
+
/* bit field */
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
int num_components;
- struct snd_soc_component *components[0]; /* CPU/Codec/Platform */
+ struct snd_soc_component *components[]; /* CPU/Codec/Platform */
};
+/* see soc_new_pcm_runtime() */
+#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
+#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus]
+#define asoc_substream_to_rtd(substream) \
+ (struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream)
+
#define for_each_rtd_components(rtd, i, component) \
- for ((i) = 0; \
+ for ((i) = 0, component = NULL; \
((i) < rtd->num_components) && ((component) = rtd->components[i]);\
(i)++)
-#define for_each_rtd_codec_dai(rtd, i, dai)\
- for ((i) = 0; \
- ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
+#define for_each_rtd_cpu_dais(rtd, i, dai) \
+ for ((i) = 0; \
+ ((i) < rtd->dai_link->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \
+ (i)++)
+#define for_each_rtd_codec_dais(rtd, i, dai) \
+ for ((i) = 0; \
+ ((i) < rtd->dai_link->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \
+ (i)++)
+#define for_each_rtd_dais(rtd, i, dai) \
+ for ((i) = 0; \
+ ((i) < (rtd)->dai_link->num_cpus + (rtd)->dai_link->num_codecs) && \
+ ((dai) = (rtd)->dais[i]); \
(i)++)
-#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
- for (; (--(i) >= 0) && ((dai) = rtd->codec_dais[i]);)
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
@@ -1213,29 +1180,16 @@ struct soc_enum {
#endif
};
-/* device driver data */
-
-static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
- void *data)
-{
- card->drvdata = data;
-}
-
-static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card)
-{
- return card->drvdata;
-}
-
static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc)
{
if (mc->reg == mc->rreg && mc->shift == mc->rshift)
- return 0;
+ return false;
/*
* mc->reg == mc->rreg && mc->shift != mc->rshift, or
* mc->reg != mc->rreg means that the control is
* stereo (bits in one register or in two registers)
*/
- return 1;
+ return true;
}
static inline unsigned int snd_soc_enum_val_to_item(struct soc_enum *e,
@@ -1285,6 +1239,7 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
const char *propname);
int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
const char *propname);
+int snd_soc_of_parse_pin_switches(struct snd_soc_card *card, const char *prop);
int snd_soc_of_get_slot_mask(struct device_node *np,
const char *prop_name,
unsigned int *mask);
@@ -1309,12 +1264,26 @@ void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
-unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
- const char *prefix,
- struct device_node **bitclkmaster,
- struct device_node **framemaster);
+int snd_soc_of_parse_aux_devs(struct snd_soc_card *card, const char *propname);
+
+unsigned int snd_soc_daifmt_clock_provider_flipped(unsigned int dai_fmt);
+unsigned int snd_soc_daifmt_clock_provider_from_bitmap(unsigned int bit_frame);
+
+unsigned int snd_soc_daifmt_parse_format(struct device_node *np, const char *prefix);
+unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
+ const char *prefix,
+ struct device_node **bitclkmaster,
+ struct device_node **framemaster);
+#define snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix) \
+ snd_soc_daifmt_parse_clock_provider_raw(np, prefix, NULL, NULL)
+#define snd_soc_daifmt_parse_clock_provider_as_phandle \
+ snd_soc_daifmt_parse_clock_provider_raw
+#define snd_soc_daifmt_parse_clock_provider_as_flag(np, prefix) \
+ snd_soc_daifmt_clock_provider_from_bitmap( \
+ snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix))
+
int snd_soc_get_dai_id(struct device_node *ep);
-int snd_soc_get_dai_name(struct of_phandle_args *args,
+int snd_soc_get_dai_name(const struct of_phandle_args *args,
const char **dai_name);
int snd_soc_of_get_dai_name(struct device_node *of_node,
const char **dai_name);
@@ -1322,6 +1291,10 @@ int snd_soc_of_get_dai_link_codecs(struct device *dev,
struct device_node *of_node,
struct snd_soc_dai_link *dai_link);
void snd_soc_of_put_dai_link_codecs(struct snd_soc_dai_link *dai_link);
+int snd_soc_of_get_dai_link_cpus(struct device *dev,
+ struct device_node *of_node,
+ struct snd_soc_dai_link *dai_link);
+void snd_soc_of_put_dai_link_cpus(struct snd_soc_dai_link *dai_link);
int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
@@ -1331,28 +1304,20 @@ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv,
bool legacy_dai_naming);
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+ struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ bool legacy_dai_naming);
void snd_soc_unregister_dai(struct snd_soc_dai *dai);
struct snd_soc_dai *snd_soc_find_dai(
const struct snd_soc_dai_link_component *dlc);
+struct snd_soc_dai *snd_soc_find_dai_with_mutex(
+ const struct snd_soc_dai_link_component *dlc);
#include <sound/soc-dai.h>
static inline
-struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
- const char *dai_name)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
- if (!strcmp(rtd->codec_dai->name, dai_name))
- return rtd->codec_dai;
- }
-
- return NULL;
-}
-
-static inline
int snd_soc_fixup_dai_links_platform_name(struct snd_soc_card *card,
const char *platform_name)
{
@@ -1365,13 +1330,17 @@ int snd_soc_fixup_dai_links_platform_name(struct snd_soc_card *card,
/* set platform name for each dailink */
for_each_card_prelinks(card, i, dai_link) {
- name = devm_kstrdup(card->dev, platform_name, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
+ /* only single platform is supported for now */
+ if (dai_link->num_platforms != 1)
+ return -EINVAL;
if (!dai_link->platforms)
return -EINVAL;
+ name = devm_kstrdup(card->dev, platform_name, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
/* only single platform is supported for now */
dai_link->platforms->name = name;
}
@@ -1397,5 +1366,7 @@ static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm)
}
#include <sound/soc-component.h>
+#include <sound/soc-card.h>
+#include <sound/soc-jack.h>
#endif
diff --git a/include/sound/sof.h b/include/sound/sof.h
index a0cbca021230..341fef19e612 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -16,6 +16,44 @@
#include <sound/soc-acpi.h>
struct snd_sof_dsp_ops;
+struct snd_sof_dev;
+
+/**
+ * enum sof_fw_state - DSP firmware state definitions
+ * @SOF_FW_BOOT_NOT_STARTED: firmware boot is not yet started
+ * @SOF_FW_BOOT_PREPARE: preparing for boot (firmware loading for exaqmple)
+ * @SOF_FW_BOOT_IN_PROGRESS: firmware boot is in progress
+ * @SOF_FW_BOOT_FAILED: firmware boot failed
+ * @SOF_FW_BOOT_READY_FAILED: firmware booted but fw_ready op failed
+ * @SOF_FW_BOOT_READY_OK: firmware booted and fw_ready op passed
+ * @SOF_FW_BOOT_COMPLETE: firmware is booted up and functional
+ * @SOF_FW_CRASHED: firmware crashed after successful boot
+ */
+enum sof_fw_state {
+ SOF_FW_BOOT_NOT_STARTED = 0,
+ SOF_FW_BOOT_PREPARE,
+ SOF_FW_BOOT_IN_PROGRESS,
+ SOF_FW_BOOT_FAILED,
+ SOF_FW_BOOT_READY_FAILED,
+ SOF_FW_BOOT_READY_OK,
+ SOF_FW_BOOT_COMPLETE,
+ SOF_FW_CRASHED,
+};
+
+/* DSP power states */
+enum sof_dsp_power_states {
+ SOF_DSP_PM_D0,
+ SOF_DSP_PM_D1,
+ SOF_DSP_PM_D2,
+ SOF_DSP_PM_D3,
+};
+
+/* Definitions for multiple IPCs */
+enum sof_ipc_type {
+ SOF_IPC,
+ SOF_INTEL_IPC4,
+ SOF_IPC_TYPE_COUNT
+};
/*
* SOF Platform data.
@@ -27,6 +65,9 @@ struct snd_sof_pdata {
struct device *dev;
+ /* indicate how many first bytes shouldn't be loaded into DSP memory. */
+ size_t fw_offset;
+
/*
* notification callback used if the hardware initialization
* can take time or is handled in a workqueue. This callback
@@ -48,8 +89,11 @@ struct snd_sof_pdata {
/* machine */
struct platform_device *pdev_mach;
const struct snd_soc_acpi_mach *machine;
+ const struct snd_sof_of_mach *of_machine;
void *hw_pdata;
+
+ enum sof_ipc_type ipc_type;
};
/*
@@ -59,21 +103,19 @@ struct snd_sof_pdata {
struct sof_dev_desc {
/* list of machines using this configuration */
struct snd_soc_acpi_mach *machines;
+ struct snd_sof_of_mach *of_machines;
/* alternate list of machines using this configuration */
struct snd_soc_acpi_mach *alt_machines;
+ bool use_acpi_target_states;
+
/* Platform resource indexes in BAR / ACPI resources. */
/* Must set to -1 if not used - add new items to end */
int resindex_lpe_base;
int resindex_pcicfg_base;
int resindex_imr_base;
int irqindex_host_ipc;
- int resindex_dma_base;
-
- /* DMA only valid when resindex_dma_base != -1*/
- int dma_engine;
- int dma_size;
/* IPC timeouts in ms */
int ipc_timeout;
@@ -85,16 +127,23 @@ struct sof_dev_desc {
/* defaults for no codec mode */
const char *nocodec_tplg_filename;
+ /* information on supported IPCs */
+ unsigned int ipc_supported_mask;
+ enum sof_ipc_type ipc_default;
+
/* defaults paths for firmware and topology files */
- const char *default_fw_path;
- const char *default_tplg_path;
+ const char *default_fw_path[SOF_IPC_TYPE_COUNT];
+ const char *default_tplg_path[SOF_IPC_TYPE_COUNT];
/* default firmware name */
- const char *default_fw_filename;
+ const char *default_fw_filename[SOF_IPC_TYPE_COUNT];
- const struct snd_sof_dsp_ops *ops;
+ struct snd_sof_dsp_ops *ops;
+ int (*ops_init)(struct snd_sof_dev *sdev);
+ void (*ops_free)(struct snd_sof_dev *sdev);
};
-int sof_nocodec_setup(struct device *dev,
- const struct snd_sof_dsp_ops *ops);
+int sof_dai_get_mclk(struct snd_soc_pcm_runtime *rtd);
+int sof_dai_get_bclk(struct snd_soc_pcm_runtime *rtd);
+
#endif
diff --git a/include/sound/sof/channel_map.h b/include/sound/sof/channel_map.h
index 21044eb5f377..d363f0ca6979 100644
--- a/include/sound/sof/channel_map.h
+++ b/include/sound/sof/channel_map.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -39,7 +39,7 @@ struct sof_ipc_channel_map {
uint32_t ext_id;
uint32_t ch_mask;
uint32_t reserved;
- int32_t ch_coeffs[0];
+ int32_t ch_coeffs[];
} __packed;
/**
@@ -55,7 +55,7 @@ struct sof_ipc_stream_map {
struct sof_ipc_cmd_hdr hdr;
uint32_t num_ch_map;
uint32_t reserved[3];
- struct sof_ipc_channel_map ch_map[0];
+ struct sof_ipc_channel_map ch_map[];
} __packed;
#endif /* __IPC_CHANNEL_MAP_H__ */
diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h
index 6080ea0facd7..983d374fe511 100644
--- a/include/sound/sof/control.h
+++ b/include/sound/sof/control.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -117,11 +117,11 @@ struct sof_ipc_ctrl_data {
/* control data - add new types if needed */
union {
/* channel values can be used by volume type controls */
- struct sof_ipc_ctrl_value_chan chanv[0];
+ DECLARE_FLEX_ARRAY(struct sof_ipc_ctrl_value_chan, chanv);
/* component values used by routing controls like mux, mixer */
- struct sof_ipc_ctrl_value_comp compv[0];
+ DECLARE_FLEX_ARRAY(struct sof_ipc_ctrl_value_comp, compv);
/* data can be used by binary controls */
- struct sof_abi_hdr data[0];
+ DECLARE_FLEX_ARRAY(struct sof_abi_hdr, data);
};
} __packed;
diff --git a/include/sound/sof/dai-amd.h b/include/sound/sof/dai-amd.h
new file mode 100644
index 000000000000..92f45c180b7c
--- /dev/null
+++ b/include/sound/sof/dai-amd.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2021 Advanced Micro Devices, Inc.. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_AMD_H__
+#define __INCLUDE_SOUND_SOF_DAI_AMD_H__
+
+#include <sound/sof/header.h>
+
+/* ACP Configuration Request - SOF_IPC_DAI_AMD_CONFIG */
+struct sof_ipc_dai_acp_params {
+ struct sof_ipc_hdr hdr;
+
+ uint32_t fsync_rate; /* FSYNC frequency in Hz */
+ uint32_t tdm_slots;
+} __packed;
+
+/* ACPDMIC Configuration Request - SOF_IPC_DAI_AMD_CONFIG */
+struct sof_ipc_dai_acpdmic_params {
+ uint32_t pdm_rate;
+ uint32_t pdm_ch;
+} __packed;
+
+#endif
diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h
index ff9088dcc6f2..ca8325353d41 100644
--- a/include/sound/sof/dai-imx.h
+++ b/include/sound/sof/dai-imx.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* Copyright 2019 NXP
*
diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h
index 5f1ef5565be6..5b93b7292f5e 100644
--- a/include/sound/sof/dai-intel.h
+++ b/include/sound/sof/dai-intel.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -48,6 +48,15 @@
#define SOF_DAI_INTEL_SSP_CLKCTRL_FS_KA BIT(4)
/* bclk idle */
#define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_IDLE_HIGH BIT(5)
+/* mclk early start */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_MCLK_ES BIT(6)
+/* bclk early start */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_ES BIT(7)
+/* mclk always on */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_MCLK_AON BIT(8)
+
+/* DMIC max. four controllers for eight microphone channels */
+#define SOF_DAI_INTEL_DMIC_NUM_CTRL 4
/* SSP Configuration Request - SOF_IPC_DAI_SSP_CONFIG */
struct sof_ipc_dai_ssp_params {
@@ -85,6 +94,19 @@ struct sof_ipc_dai_ssp_params {
struct sof_ipc_dai_hda_params {
struct sof_ipc_hdr hdr;
uint32_t link_dma_ch;
+ uint32_t rate;
+ uint32_t channels;
+} __packed;
+
+/* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */
+struct sof_ipc_dai_alh_params {
+ struct sof_ipc_hdr hdr;
+ uint32_t stream_id;
+ uint32_t rate;
+ uint32_t channels;
+
+ /* reserved for future use */
+ uint32_t reserved[13];
} __packed;
/* DMIC Configuration Request - SOF_IPC_DAI_DMIC_CONFIG */
@@ -126,7 +148,7 @@ struct sof_ipc_dai_dmic_pdm_ctrl {
* version number used in configuration data is checked vs. version used by
* device driver src/drivers/dmic.c need to match. It is incremented from
* initial value 1 if updates done for the to driver would alter the operation
- * of the microhone.
+ * of the microphone.
*
* Note: The microphone clock (pdmclk_min, pdmclk_max, duty_min, duty_max)
* parameters need to be set as defined in microphone data sheet. E.g. clock
@@ -161,12 +183,13 @@ struct sof_ipc_dai_dmic_params {
uint32_t fifo_fs; /**< FIFO sample rate in Hz (8000..96000) */
uint32_t reserved_1; /**< Reserved */
uint16_t fifo_bits; /**< FIFO word length (16 or 32) */
- uint16_t reserved_2; /**< Reserved */
+ uint16_t fifo_bits_b; /**< Deprecated since firmware ABI 3.0.1 */
uint16_t duty_min; /**< Min. mic clock duty cycle in % (20..80) */
uint16_t duty_max; /**< Max. mic clock duty cycle in % (min..80) */
- uint32_t num_pdm_active; /**< Number of active pdm controllers */
+ uint32_t num_pdm_active; /**< Number of active pdm controllers. */
+ /**< Range is 1..SOF_DAI_INTEL_DMIC_NUM_CTRL */
uint32_t wake_up_time; /**< Time from clock start to data (us) */
uint32_t min_clock_on_time; /**< Min. time that clk is kept on (us) */
@@ -175,17 +198,8 @@ struct sof_ipc_dai_dmic_params {
/* reserved for future use */
uint32_t reserved[5];
- /**< variable number of pdm controller config */
- struct sof_ipc_dai_dmic_pdm_ctrl pdm[0];
-} __packed;
-
-/* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */
-struct sof_ipc_dai_alh_params {
- struct sof_ipc_hdr hdr;
- uint32_t stream_id;
-
- /* reserved for future use */
- uint32_t reserved[15];
+ /**< PDM controllers configuration */
+ struct sof_ipc_dai_dmic_pdm_ctrl pdm[SOF_DAI_INTEL_DMIC_NUM_CTRL];
} __packed;
#endif
diff --git a/include/sound/sof/dai-mediatek.h b/include/sound/sof/dai-mediatek.h
new file mode 100644
index 000000000000..62dd4720558d
--- /dev/null
+++ b/include/sound/sof/dai-mediatek.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2021 Mediatek Corporation. All rights reserved.
+ *
+ * Author: Bo Pan <bo.pan@mediatek.com>
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_MEDIATEK_H__
+#define __INCLUDE_SOUND_SOF_DAI_MEDIATEK_H__
+
+#include <sound/sof/header.h>
+
+struct sof_ipc_dai_mtk_afe_params {
+ struct sof_ipc_hdr hdr;
+ u32 channels;
+ u32 rate;
+ u32 format;
+ u32 stream_id;
+ u32 reserved[4]; /* reserve for future */
+} __packed;
+
+#endif
+
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 2565edd336f1..83fd81c82e4c 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -12,6 +12,8 @@
#include <sound/sof/header.h>
#include <sound/sof/dai-intel.h>
#include <sound/sof/dai-imx.h>
+#include <sound/sof/dai-amd.h>
+#include <sound/sof/dai-mediatek.h>
/*
* DAI Configuration.
@@ -34,15 +36,41 @@
#define SOF_DAI_FMT_IB_NF (3 << 8) /**< invert BCLK + nor FRM */
#define SOF_DAI_FMT_IB_IF (4 << 8) /**< invert BCLK + FRM */
-#define SOF_DAI_FMT_CBM_CFM (0 << 12) /**< codec clk & FRM master */
-#define SOF_DAI_FMT_CBS_CFM (2 << 12) /**< codec clk slave & FRM master */
-#define SOF_DAI_FMT_CBM_CFS (3 << 12) /**< codec clk master & frame slave */
-#define SOF_DAI_FMT_CBS_CFS (4 << 12) /**< codec clk & FRM slave */
+#define SOF_DAI_FMT_CBP_CFP (0 << 12) /**< codec bclk provider & frame provider */
+#define SOF_DAI_FMT_CBC_CFP (2 << 12) /**< codec bclk consumer & frame provider */
+#define SOF_DAI_FMT_CBP_CFC (3 << 12) /**< codec bclk provider & frame consumer */
+#define SOF_DAI_FMT_CBC_CFC (4 << 12) /**< codec bclk consumer & frame consumer */
+
+/* keep old definitions for backwards compatibility */
+#define SOF_DAI_FMT_CBM_CFM SOF_DAI_FMT_CBP_CFP
+#define SOF_DAI_FMT_CBS_CFM SOF_DAI_FMT_CBC_CFP
+#define SOF_DAI_FMT_CBM_CFS SOF_DAI_FMT_CBP_CFC
+#define SOF_DAI_FMT_CBS_CFS SOF_DAI_FMT_CBC_CFC
#define SOF_DAI_FMT_FORMAT_MASK 0x000f
#define SOF_DAI_FMT_CLOCK_MASK 0x00f0
#define SOF_DAI_FMT_INV_MASK 0x0f00
-#define SOF_DAI_FMT_MASTER_MASK 0xf000
+#define SOF_DAI_FMT_CLOCK_PROVIDER_MASK 0xf000
+
+/*
+ * DAI_CONFIG flags. The 4 LSB bits are used for the commands, HW_PARAMS, HW_FREE and PAUSE
+ * representing when the IPC is sent. The 4 MSB bits are used to add quirks along with the above
+ * commands.
+ */
+#define SOF_DAI_CONFIG_FLAGS_CMD_MASK 0xF
+#define SOF_DAI_CONFIG_FLAGS_NONE 0 /**< DAI_CONFIG sent without stage information */
+#define SOF_DAI_CONFIG_FLAGS_HW_PARAMS BIT(0) /**< DAI_CONFIG sent during hw_params stage */
+#define SOF_DAI_CONFIG_FLAGS_HW_FREE BIT(1) /**< DAI_CONFIG sent during hw_free stage */
+/**< DAI_CONFIG sent during pause trigger. Only available ABI 3.20 onwards */
+#define SOF_DAI_CONFIG_FLAGS_PAUSE BIT(2)
+#define SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT 4
+#define SOF_DAI_CONFIG_FLAGS_QUIRK_MASK (0xF << SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT)
+/*
+ * This should be used along with the SOF_DAI_CONFIG_FLAGS_HW_PARAMS to indicate that pipeline
+ * stop/pause and DAI DMA stop/pause should happen in two steps. This change is only available
+ * ABI 3.20 onwards.
+ */
+#define SOF_DAI_CONFIG_FLAGS_2_STEP_STOP BIT(0)
/** \brief Types of DAI */
enum sof_ipc_dai_type {
@@ -53,6 +81,11 @@ enum sof_ipc_dai_type {
SOF_DAI_INTEL_ALH, /**< Intel ALH */
SOF_DAI_IMX_SAI, /**< i.MX SAI */
SOF_DAI_IMX_ESAI, /**< i.MX ESAI */
+ SOF_DAI_AMD_BT, /**< AMD ACP BT*/
+ SOF_DAI_AMD_SP, /**< AMD ACP SP */
+ SOF_DAI_AMD_DMIC, /**< AMD ACP DMIC */
+ SOF_DAI_AMD_HS, /**< Amd HS */
+ SOF_DAI_MEDIATEK_AFE, /**< Mediatek AFE */
};
/* general purpose DAI configuration */
@@ -63,7 +96,8 @@ struct sof_ipc_dai_config {
/* physical protocol and clocking */
uint16_t format; /**< SOF_DAI_FMT_ */
- uint16_t reserved16; /**< alignment */
+ uint8_t group_id; /**< group ID, 0 means no group (ABI 3.17) */
+ uint8_t flags; /**< SOF_DAI_CONFIG_FLAGS_ (ABI 3.19) */
/* reserved for future use */
uint32_t reserved[8];
@@ -76,7 +110,17 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_alh_params alh;
struct sof_ipc_dai_esai_params esai;
struct sof_ipc_dai_sai_params sai;
+ struct sof_ipc_dai_acp_params acpbt;
+ struct sof_ipc_dai_acp_params acpsp;
+ struct sof_ipc_dai_acpdmic_params acpdmic;
+ struct sof_ipc_dai_acp_params acphs;
+ struct sof_ipc_dai_mtk_afe_params afe;
};
} __packed;
+struct sof_dai_private_data {
+ struct sof_ipc_comp_dai *comp_dai;
+ struct sof_ipc_dai_config *dai_config;
+};
+
#endif
diff --git a/include/sound/sof/debug.h b/include/sound/sof/debug.h
new file mode 100644
index 000000000000..38693e3fb514
--- /dev/null
+++ b/include/sound/sof/debug.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2020 Intel Corporation. All rights reserved.
+ *
+ * Author: Karol Trzcinski <karolx.trzcinski@linux.intel.com>
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DEBUG_H__
+#define __INCLUDE_SOUND_SOF_DEBUG_H__
+
+#include <sound/sof/header.h>
+
+/** ABI3.18 */
+enum sof_ipc_dbg_mem_zone {
+ SOF_IPC_MEM_ZONE_SYS = 0, /**< System zone */
+ SOF_IPC_MEM_ZONE_SYS_RUNTIME = 1, /**< System-runtime zone */
+ SOF_IPC_MEM_ZONE_RUNTIME = 2, /**< Runtime zone */
+ SOF_IPC_MEM_ZONE_BUFFER = 3, /**< Buffer zone */
+ SOF_IPC_MEM_ZONE_RUNTIME_SHARED = 4, /**< System runtime zone */
+ SOF_IPC_MEM_ZONE_SYS_SHARED = 5, /**< System shared zone */
+};
+
+/** ABI3.18 */
+struct sof_ipc_dbg_mem_usage_elem {
+ uint32_t zone; /**< see sof_ipc_dbg_mem_zone */
+ uint32_t id; /**< heap index within zone */
+ uint32_t used; /**< number of bytes used in zone */
+ uint32_t free; /**< number of bytes free to use within zone */
+ uint32_t reserved; /**< for future use */
+} __packed;
+
+/** ABI3.18 */
+struct sof_ipc_dbg_mem_usage {
+ struct sof_ipc_reply rhdr; /**< generic IPC reply header */
+ uint32_t reserved[4]; /**< reserved for future use */
+ uint32_t num_elems; /**< elems[] counter */
+ struct sof_ipc_dbg_mem_usage_elem elems[]; /**< memory usage information */
+} __packed;
+
+#endif
diff --git a/include/sound/sof/ext_manifest.h b/include/sound/sof/ext_manifest.h
new file mode 100644
index 000000000000..2a7e055584f9
--- /dev/null
+++ b/include/sound/sof/ext_manifest.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2020 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Extended manifest is a place to store metadata about firmware, known during
+ * compilation time - for example firmware version or used compiler.
+ * Given information are read on host side before firmware startup.
+ * This part of output binary is not signed.
+ */
+
+#ifndef __SOF_FIRMWARE_EXT_MANIFEST_H__
+#define __SOF_FIRMWARE_EXT_MANIFEST_H__
+
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <sound/sof/info.h>
+
+/* In ASCII `XMan` */
+#define SOF_EXT_MAN_MAGIC_NUMBER 0x6e614d58
+
+/* Build u32 number in format MMmmmppp */
+#define SOF_EXT_MAN_BUILD_VERSION(MAJOR, MINOR, PATH) ((uint32_t)( \
+ ((MAJOR) << 24) | \
+ ((MINOR) << 12) | \
+ (PATH)))
+
+/* check extended manifest version consistency */
+#define SOF_EXT_MAN_VERSION_INCOMPATIBLE(host_ver, cli_ver) ( \
+ ((host_ver) & GENMASK(31, 24)) != \
+ ((cli_ver) & GENMASK(31, 24)))
+
+/* used extended manifest header version */
+#define SOF_EXT_MAN_VERSION SOF_EXT_MAN_BUILD_VERSION(1, 0, 0)
+
+/* extended manifest header, deleting any field breaks backward compatibility */
+struct sof_ext_man_header {
+ uint32_t magic; /*< identification number, */
+ /*< EXT_MAN_MAGIC_NUMBER */
+ uint32_t full_size; /*< [bytes] full size of ext_man, */
+ /*< (header + content + padding) */
+ uint32_t header_size; /*< [bytes] makes header extensionable, */
+ /*< after append new field to ext_man header */
+ /*< then backward compatible won't be lost */
+ uint32_t header_version; /*< value of EXT_MAN_VERSION */
+ /*< not related with following content */
+
+ /* just after this header should be list of ext_man_elem_* elements */
+} __packed;
+
+/* Now define extended manifest elements */
+
+/* Extended manifest elements types */
+enum sof_ext_man_elem_type {
+ SOF_EXT_MAN_ELEM_FW_VERSION = 0,
+ SOF_EXT_MAN_ELEM_WINDOW = 1,
+ SOF_EXT_MAN_ELEM_CC_VERSION = 2,
+ SOF_EXT_MAN_ELEM_DBG_ABI = 4,
+ SOF_EXT_MAN_ELEM_CONFIG_DATA = 5, /**< ABI3.17 */
+ SOF_EXT_MAN_ELEM_PLATFORM_CONFIG_DATA = 6,
+};
+
+/* extended manifest element header */
+struct sof_ext_man_elem_header {
+ uint32_t type; /*< SOF_EXT_MAN_ELEM_ */
+ uint32_t size; /*< in bytes, including header size */
+
+ /* just after this header should be type dependent content */
+} __packed;
+
+/* FW version */
+struct sof_ext_man_fw_version {
+ struct sof_ext_man_elem_header hdr;
+ /* use sof_ipc struct because of code re-use */
+ struct sof_ipc_fw_version version;
+ uint32_t flags;
+} __packed;
+
+/* extended data memory windows for IPC, trace and debug */
+struct sof_ext_man_window {
+ struct sof_ext_man_elem_header hdr;
+ /* use sof_ipc struct because of code re-use */
+ struct sof_ipc_window ipc_window;
+} __packed;
+
+/* Used C compiler description */
+struct sof_ext_man_cc_version {
+ struct sof_ext_man_elem_header hdr;
+ /* use sof_ipc struct because of code re-use */
+ struct sof_ipc_cc_version cc_version;
+} __packed;
+
+struct ext_man_dbg_abi {
+ struct sof_ext_man_elem_header hdr;
+ /* use sof_ipc struct because of code re-use */
+ struct sof_ipc_user_abi_version dbg_abi;
+} __packed;
+
+/* EXT_MAN_ELEM_CONFIG_DATA elements identificators, ABI3.17 */
+enum config_elem_type {
+ SOF_EXT_MAN_CONFIG_EMPTY = 0,
+ SOF_EXT_MAN_CONFIG_IPC_MSG_SIZE = 1,
+ SOF_EXT_MAN_CONFIG_MEMORY_USAGE_SCAN = 2, /**< ABI 3.18 */
+};
+
+struct sof_config_elem {
+ uint32_t token;
+ uint32_t value;
+} __packed;
+
+/* firmware configuration information */
+struct sof_ext_man_config_data {
+ struct sof_ext_man_elem_header hdr;
+
+ struct sof_config_elem elems[];
+} __packed;
+
+#endif /* __SOF_FIRMWARE_EXT_MANIFEST_H__ */
diff --git a/include/sound/sof/ext_manifest4.h b/include/sound/sof/ext_manifest4.h
new file mode 100644
index 000000000000..ec97edcbbfc3
--- /dev/null
+++ b/include/sound/sof/ext_manifest4.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Extended manifest is a place to store metadata about firmware, known during
+ * compilation time - for example firmware version or used compiler.
+ * Given information are read on host side before firmware startup.
+ * This part of output binary is not signed.
+ */
+
+#ifndef __SOF_FIRMWARE_EXT_MANIFEST4_H__
+#define __SOF_FIRMWARE_EXT_MANIFEST4_H__
+
+#include <linux/uuid.h>
+
+/* In ASCII $AE1 */
+#define SOF_EXT_MAN4_MAGIC_NUMBER 0x31454124
+
+#define MAX_MODULE_NAME_LEN 8
+#define MAX_FW_BINARY_NAME 8
+#define DEFAULT_HASH_SHA256_LEN 32
+#define SOF_MAN4_FW_HDR_OFFSET 0x2000
+#define SOF_MAN4_FW_HDR_OFFSET_CAVS_1_5 0x284
+
+/*********************************************************************
+ * extended manifest (struct sof_ext_manifest4_hdr)
+ *-------------------
+ * css_manifest hdr
+ *-------------------
+ * offset reserved for future
+ *-------------------
+ * fw_hdr (struct sof_man4_fw_binary_header)
+ *-------------------
+ * module_entry[0] (struct sof_man4_module)
+ *-------------------
+ * module_entry[1]
+ *-------------------
+ * ...
+ *-------------------
+ * module_entry[n]
+ *-------------------
+ * module_config[0] (struct sof_man4_module_config)
+ *-------------------
+ * module_config[1]
+ *-------------------
+ * ...
+ *-------------------
+ * module_config[m]
+ *-------------------
+ * FW content
+ *-------------------
+ *********************************************************************/
+
+struct sof_ext_manifest4_hdr {
+ uint32_t id;
+ uint32_t len; /* length of extension manifest */
+ uint16_t version_major; /* header version */
+ uint16_t version_minor;
+ uint32_t num_module_entries;
+} __packed;
+
+struct sof_man4_fw_binary_header {
+ /* This part must be unchanged to be backward compatible with SPT-LP ROM */
+ uint32_t id;
+ uint32_t len; /* sizeof(sof_man4_fw_binary_header) in bytes */
+ uint8_t name[MAX_FW_BINARY_NAME];
+ uint32_t preload_page_count; /* number of pages of preloaded image */
+ uint32_t fw_image_flags;
+ uint32_t feature_mask;
+ uint16_t major_version; /* Firmware version */
+ uint16_t minor_version;
+ uint16_t hotfix_version;
+ uint16_t build_version;
+ uint32_t num_module_entries;
+
+ /* This part may change to contain any additional data for BaseFw that is skipped by ROM */
+ uint32_t hw_buf_base_addr;
+ uint32_t hw_buf_length;
+ uint32_t load_offset; /* This value is used by ROM */
+} __packed;
+
+struct sof_man4_segment_desc {
+ uint32_t flags;
+ uint32_t v_base_addr;
+ uint32_t file_offset;
+} __packed;
+
+struct sof_man4_module {
+ uint32_t id;
+ uint8_t name[MAX_MODULE_NAME_LEN];
+ guid_t uuid;
+ uint32_t type;
+ uint8_t hash[DEFAULT_HASH_SHA256_LEN];
+ uint32_t entry_point;
+ uint16_t cfg_offset;
+ uint16_t cfg_count;
+ uint32_t affinity_mask;
+ uint16_t instance_max_count;
+ uint16_t instance_stack_size;
+ struct sof_man4_segment_desc segments[3];
+} __packed;
+
+struct sof_man4_module_config {
+ uint32_t par[4]; /* module parameters */
+ uint32_t is_bytes; /* actual size of instance .bss (bytes) */
+ uint32_t cps; /* cycles per second */
+ uint32_t ibs; /* input buffer size (bytes) */
+ uint32_t obs; /* output buffer size (bytes) */
+ uint32_t module_flags; /* flags, reserved for future use */
+ uint32_t cpc; /* cycles per single run */
+ uint32_t obls; /* output block size, reserved for future use */
+} __packed;
+
+#endif /* __SOF_FIRMWARE_EXT_MANIFEST4_H__ */
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index bf3edd9c08b4..b22e925c70e2 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -31,12 +31,12 @@
/* Global Message - Generic */
#define SOF_GLB_TYPE_SHIFT 28
-#define SOF_GLB_TYPE_MASK (0xf << SOF_GLB_TYPE_SHIFT)
+#define SOF_GLB_TYPE_MASK (0xfUL << SOF_GLB_TYPE_SHIFT)
#define SOF_GLB_TYPE(x) ((x) << SOF_GLB_TYPE_SHIFT)
/* Command Message - Generic */
#define SOF_CMD_TYPE_SHIFT 16
-#define SOF_CMD_TYPE_MASK (0xfff << SOF_CMD_TYPE_SHIFT)
+#define SOF_CMD_TYPE_MASK (0xfffL << SOF_CMD_TYPE_SHIFT)
#define SOF_CMD_TYPE(x) ((x) << SOF_CMD_TYPE_SHIFT)
/* Global Message Types */
@@ -49,8 +49,10 @@
#define SOF_IPC_FW_READY SOF_GLB_TYPE(0x7U)
#define SOF_IPC_GLB_DAI_MSG SOF_GLB_TYPE(0x8U)
#define SOF_IPC_GLB_TRACE_MSG SOF_GLB_TYPE(0x9U)
-#define SOF_IPC_GLB_GDB_DEBUG SOF_GLB_TYPE(0xAU)
+#define SOF_IPC_GLB_GDB_DEBUG SOF_GLB_TYPE(0xAU)
#define SOF_IPC_GLB_TEST_MSG SOF_GLB_TYPE(0xBU)
+#define SOF_IPC_GLB_PROBE SOF_GLB_TYPE(0xCU)
+#define SOF_IPC_GLB_DEBUG SOF_GLB_TYPE(0xDU)
/*
* DSP Command Message Types
@@ -102,13 +104,28 @@
#define SOF_IPC_STREAM_VORBIS_PARAMS SOF_CMD_TYPE(0x010)
#define SOF_IPC_STREAM_VORBIS_FREE SOF_CMD_TYPE(0x011)
+/* probe */
+#define SOF_IPC_PROBE_INIT SOF_CMD_TYPE(0x001)
+#define SOF_IPC_PROBE_DEINIT SOF_CMD_TYPE(0x002)
+#define SOF_IPC_PROBE_DMA_ADD SOF_CMD_TYPE(0x003)
+#define SOF_IPC_PROBE_DMA_INFO SOF_CMD_TYPE(0x004)
+#define SOF_IPC_PROBE_DMA_REMOVE SOF_CMD_TYPE(0x005)
+#define SOF_IPC_PROBE_POINT_ADD SOF_CMD_TYPE(0x006)
+#define SOF_IPC_PROBE_POINT_INFO SOF_CMD_TYPE(0x007)
+#define SOF_IPC_PROBE_POINT_REMOVE SOF_CMD_TYPE(0x008)
+
/* trace */
#define SOF_IPC_TRACE_DMA_PARAMS SOF_CMD_TYPE(0x001)
#define SOF_IPC_TRACE_DMA_POSITION SOF_CMD_TYPE(0x002)
#define SOF_IPC_TRACE_DMA_PARAMS_EXT SOF_CMD_TYPE(0x003)
+#define SOF_IPC_TRACE_FILTER_UPDATE SOF_CMD_TYPE(0x004) /**< ABI3.17 */
+#define SOF_IPC_TRACE_DMA_FREE SOF_CMD_TYPE(0x005) /**< ABI3.20 */
/* debug */
-#define SOF_IPC_TEST_IPC_FLOOD SOF_CMD_TYPE(0x001)
+#define SOF_IPC_DEBUG_MEM_USAGE SOF_CMD_TYPE(0x001)
+
+/* test */
+#define SOF_IPC_TEST_IPC_FLOOD SOF_CMD_TYPE(0x001)
/* Get message component id */
#define SOF_IPC_MESSAGE_ID(x) ((x) & 0xffff)
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 1c560144996c..65e86e4e9fd8 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -25,12 +25,15 @@
#define SOF_IPC_INFO_LOCKS BIT(1)
#define SOF_IPC_INFO_LOCKSV BIT(2)
#define SOF_IPC_INFO_GDB BIT(3)
+#define SOF_IPC_INFO_D3_PERSISTENT BIT(4)
/* extended data types that can be appended onto end of sof_ipc_fw_ready */
enum sof_ipc_ext_data {
- SOF_IPC_EXT_DMA_BUFFER = 0,
- SOF_IPC_EXT_WINDOW,
- SOF_IPC_EXT_CC_INFO,
+ SOF_IPC_EXT_UNUSED = 0,
+ SOF_IPC_EXT_WINDOW = 1,
+ SOF_IPC_EXT_CC_INFO = 2,
+ SOF_IPC_EXT_PROBE_INFO = 3,
+ SOF_IPC_EXT_USER_ABI_INFO = 4,
};
/* FW version - SOF_IPC_GLB_VERSION */
@@ -44,9 +47,11 @@ struct sof_ipc_fw_version {
uint8_t time[10];
uint8_t tag[6];
uint32_t abi_version;
+ /* used to check FW and ldc file compatibility, reproducible value */
+ uint32_t src_hash;
/* reserved for future use */
- uint32_t reserved[4];
+ uint32_t reserved[3];
} __packed;
/* FW ready Message - sent by firmware when boot has completed */
@@ -83,22 +88,6 @@ struct sof_ipc_ext_data_hdr {
uint32_t type; /**< SOF_IPC_EXT_ */
} __packed;
-struct sof_ipc_dma_buffer_elem {
- struct sof_ipc_hdr hdr;
- uint32_t type; /**< SOF_IPC_REGION_ */
- uint32_t id; /**< platform specific - used to map to host memory */
- struct sof_ipc_host_buffer buffer;
-} __packed;
-
-/* extended data DMA buffers for IPC, trace and debug */
-struct sof_ipc_dma_buffer_data {
- struct sof_ipc_ext_data_hdr ext_hdr;
- uint32_t num_buffers;
-
- /* host files in buffer[n].buffer */
- struct sof_ipc_dma_buffer_elem buffer[];
-} __packed;
-
struct sof_ipc_window_elem {
struct sof_ipc_hdr hdr;
uint32_t type; /**< SOF_IPC_REGION_ */
@@ -113,7 +102,7 @@ struct sof_ipc_window_elem {
struct sof_ipc_window {
struct sof_ipc_ext_data_hdr ext_hdr;
uint32_t num_windows;
- struct sof_ipc_window_elem window[];
+ struct sof_ipc_window_elem window[SOF_IPC_MAX_ELEMS];
} __packed;
struct sof_ipc_cc_version {
@@ -125,9 +114,27 @@ struct sof_ipc_cc_version {
/* reserved for future use */
uint32_t reserved[4];
- char name[16]; /* null terminated compiler name */
- char optim[4]; /* null terminated compiler -O flag value */
- char desc[]; /* null terminated compiler description */
+ uint8_t name[16]; /* null terminated compiler name */
+ uint8_t optim[4]; /* null terminated compiler -O flag value */
+ uint8_t desc[32]; /* null terminated compiler description */
} __packed;
+/* extended data: Probe setup */
+struct sof_ipc_probe_support {
+ struct sof_ipc_ext_data_hdr ext_hdr;
+
+ uint32_t probe_points_max;
+ uint32_t injection_dmas_max;
+
+ /* reserved for future use */
+ uint32_t reserved[2];
+} __packed;
+
+/* extended data: user abi version(s) */
+struct sof_ipc_user_abi_version {
+ struct sof_ipc_ext_data_hdr ext_hdr;
+
+ uint32_t abi_dbg_version;
+} __packed;
+
#endif
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
new file mode 100644
index 000000000000..99efe0ef1784
--- /dev/null
+++ b/include/sound/sof/ipc4/header.h
@@ -0,0 +1,473 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_IPC4_HEADER_H__
+#define __INCLUDE_SOUND_SOF_IPC4_HEADER_H__
+
+#include <linux/types.h>
+#include <uapi/sound/sof/abi.h>
+
+/* maximum message size for mailbox Tx/Rx */
+#define SOF_IPC4_MSG_MAX_SIZE 4096
+
+/** \addtogroup sof_uapi uAPI
+ * SOF uAPI specification.
+ * @{
+ */
+
+/**
+ * struct sof_ipc4_msg - Placeholder of an IPC4 message
+ * @header_u64: IPC4 header as single u64 number
+ * @primary: Primary, mandatory part of the header
+ * @extension: Extended part of the header, if not used it should be
+ * set to 0
+ * @data_size: Size of data in bytes pointed by @data_ptr
+ * @data_ptr: Pointer to the optional payload of a message
+ */
+struct sof_ipc4_msg {
+ union {
+ u64 header_u64;
+ struct {
+ u32 primary;
+ u32 extension;
+ };
+ };
+
+ size_t data_size;
+ void *data_ptr;
+};
+
+/**
+ * struct sof_ipc4_tuple - Generic type/ID and parameter tuple
+ * @type: type/ID
+ * @size: size of the @value array in bytes
+ * @value: value for the given type
+ */
+struct sof_ipc4_tuple {
+ uint32_t type;
+ uint32_t size;
+ uint32_t value[];
+} __packed;
+
+/*
+ * IPC4 messages have two 32 bit identifier made up as follows :-
+ *
+ * header - msg type, msg id, msg direction ...
+ * extension - extra params such as msg data size in mailbox
+ *
+ * These are sent at the start of the IPC message in the mailbox. Messages
+ * should not be sent in the doorbell (special exceptions for firmware).
+ */
+
+/*
+ * IPC4 primary header bit allocation for messages
+ * bit 0-23: message type specific
+ * bit 24-28: type: enum sof_ipc4_global_msg if target is SOF_IPC4_FW_GEN_MSG
+ * enum sof_ipc4_module_type if target is SOF_IPC4_MODULE_MSG
+ * bit 29: response - sof_ipc4_msg_dir
+ * bit 30: target - enum sof_ipc4_msg_target
+ * bit 31: reserved, unused
+ */
+
+/* Value of target field - must fit into 1 bit */
+enum sof_ipc4_msg_target {
+ /* Global FW message */
+ SOF_IPC4_FW_GEN_MSG,
+
+ /* Module message */
+ SOF_IPC4_MODULE_MSG
+};
+
+/* Value of type field - must fit into 5 bits */
+enum sof_ipc4_global_msg {
+ SOF_IPC4_GLB_BOOT_CONFIG,
+ SOF_IPC4_GLB_ROM_CONTROL,
+ SOF_IPC4_GLB_IPCGATEWAY_CMD,
+
+ /* 3 .. 12: RESERVED - do not use */
+
+ SOF_IPC4_GLB_PERF_MEASUREMENTS_CMD = 13,
+ SOF_IPC4_GLB_CHAIN_DMA,
+
+ SOF_IPC4_GLB_LOAD_MULTIPLE_MODULES,
+ SOF_IPC4_GLB_UNLOAD_MULTIPLE_MODULES,
+
+ /* pipeline settings */
+ SOF_IPC4_GLB_CREATE_PIPELINE,
+ SOF_IPC4_GLB_DELETE_PIPELINE,
+ SOF_IPC4_GLB_SET_PIPELINE_STATE,
+ SOF_IPC4_GLB_GET_PIPELINE_STATE,
+ SOF_IPC4_GLB_GET_PIPELINE_CONTEXT_SIZE,
+ SOF_IPC4_GLB_SAVE_PIPELINE,
+ SOF_IPC4_GLB_RESTORE_PIPELINE,
+
+ /* Loads library (using Code Load or HD/A Host Output DMA) */
+ SOF_IPC4_GLB_LOAD_LIBRARY,
+
+ /* 25: RESERVED - do not use */
+
+ SOF_IPC4_GLB_INTERNAL_MESSAGE = 26,
+
+ /* Notification (FW to SW driver) */
+ SOF_IPC4_GLB_NOTIFICATION,
+
+ /* 28 .. 31: RESERVED - do not use */
+
+ SOF_IPC4_GLB_TYPE_LAST,
+};
+
+/* Value of response field - must fit into 1 bit */
+enum sof_ipc4_msg_dir {
+ SOF_IPC4_MSG_REQUEST,
+ SOF_IPC4_MSG_REPLY,
+};
+
+enum sof_ipc4_pipeline_state {
+ SOF_IPC4_PIPE_INVALID_STATE,
+ SOF_IPC4_PIPE_UNINITIALIZED,
+ SOF_IPC4_PIPE_RESET,
+ SOF_IPC4_PIPE_PAUSED,
+ SOF_IPC4_PIPE_RUNNING,
+ SOF_IPC4_PIPE_EOS
+};
+
+/* Generic message fields (bit 24-30) */
+
+/* encoded to header's msg_tgt field */
+#define SOF_IPC4_MSG_TARGET_SHIFT 30
+#define SOF_IPC4_MSG_TARGET_MASK BIT(30)
+#define SOF_IPC4_MSG_TARGET(x) ((x) << SOF_IPC4_MSG_TARGET_SHIFT)
+#define SOF_IPC4_MSG_IS_MODULE_MSG(x) ((x) & SOF_IPC4_MSG_TARGET_MASK ? 1 : 0)
+
+/* encoded to header's rsp field */
+#define SOF_IPC4_MSG_DIR_SHIFT 29
+#define SOF_IPC4_MSG_DIR_MASK BIT(29)
+#define SOF_IPC4_MSG_DIR(x) ((x) << SOF_IPC4_MSG_DIR_SHIFT)
+
+/* encoded to header's type field */
+#define SOF_IPC4_MSG_TYPE_SHIFT 24
+#define SOF_IPC4_MSG_TYPE_MASK GENMASK(28, 24)
+#define SOF_IPC4_MSG_TYPE_SET(x) (((x) << SOF_IPC4_MSG_TYPE_SHIFT) & \
+ SOF_IPC4_MSG_TYPE_MASK)
+#define SOF_IPC4_MSG_TYPE_GET(x) (((x) & SOF_IPC4_MSG_TYPE_MASK) >> \
+ SOF_IPC4_MSG_TYPE_SHIFT)
+
+/* Global message type specific field definitions */
+
+/* pipeline creation ipc msg */
+#define SOF_IPC4_GLB_PIPE_INSTANCE_SHIFT 16
+#define SOF_IPC4_GLB_PIPE_INSTANCE_MASK GENMASK(23, 16)
+#define SOF_IPC4_GLB_PIPE_INSTANCE_ID(x) ((x) << SOF_IPC4_GLB_PIPE_INSTANCE_SHIFT)
+
+#define SOF_IPC4_GLB_PIPE_PRIORITY_SHIFT 11
+#define SOF_IPC4_GLB_PIPE_PRIORITY_MASK GENMASK(15, 11)
+#define SOF_IPC4_GLB_PIPE_PRIORITY(x) ((x) << SOF_IPC4_GLB_PIPE_PRIORITY_SHIFT)
+
+#define SOF_IPC4_GLB_PIPE_MEM_SIZE_SHIFT 0
+#define SOF_IPC4_GLB_PIPE_MEM_SIZE_MASK GENMASK(10, 0)
+#define SOF_IPC4_GLB_PIPE_MEM_SIZE(x) ((x) << SOF_IPC4_GLB_PIPE_MEM_SIZE_SHIFT)
+
+#define SOF_IPC4_GLB_PIPE_EXT_LP_SHIFT 0
+#define SOF_IPC4_GLB_PIPE_EXT_LP_MASK BIT(0)
+#define SOF_IPC4_GLB_PIPE_EXT_LP(x) ((x) << SOF_IPC4_GLB_PIPE_EXT_LP_SHIFT)
+
+/* pipeline set state ipc msg */
+#define SOF_IPC4_GLB_PIPE_STATE_ID_SHIFT 16
+#define SOF_IPC4_GLB_PIPE_STATE_ID_MASK GENMASK(23, 16)
+#define SOF_IPC4_GLB_PIPE_STATE_ID(x) ((x) << SOF_IPC4_GLB_PIPE_STATE_ID_SHIFT)
+
+#define SOF_IPC4_GLB_PIPE_STATE_SHIFT 0
+#define SOF_IPC4_GLB_PIPE_STATE_MASK GENMASK(15, 0)
+#define SOF_IPC4_GLB_PIPE_STATE(x) ((x) << SOF_IPC4_GLB_PIPE_STATE_SHIFT)
+
+enum sof_ipc4_channel_config {
+ /* one channel only. */
+ SOF_IPC4_CHANNEL_CONFIG_MONO,
+ /* L & R. */
+ SOF_IPC4_CHANNEL_CONFIG_STEREO,
+ /* L, R & LFE; PCM only. */
+ SOF_IPC4_CHANNEL_CONFIG_2_POINT_1,
+ /* L, C & R; MP3 & AAC only. */
+ SOF_IPC4_CHANNEL_CONFIG_3_POINT_0,
+ /* L, C, R & LFE; PCM only. */
+ SOF_IPC4_CHANNEL_CONFIG_3_POINT_1,
+ /* L, R, Ls & Rs; PCM only. */
+ SOF_IPC4_CHANNEL_CONFIG_QUATRO,
+ /* L, C, R & Cs; MP3 & AAC only. */
+ SOF_IPC4_CHANNEL_CONFIG_4_POINT_0,
+ /* L, C, R, Ls & Rs. */
+ SOF_IPC4_CHANNEL_CONFIG_5_POINT_0,
+ /* L, C, R, Ls, Rs & LFE. */
+ SOF_IPC4_CHANNEL_CONFIG_5_POINT_1,
+ /* one channel replicated in two. */
+ SOF_IPC4_CHANNEL_CONFIG_DUAL_MONO,
+ /* Stereo (L,R) in 4 slots, 1st stream: [ L, R, -, - ] */
+ SOF_IPC4_CHANNEL_CONFIG_I2S_DUAL_STEREO_0,
+ /* Stereo (L,R) in 4 slots, 2nd stream: [ -, -, L, R ] */
+ SOF_IPC4_CHANNEL_CONFIG_I2S_DUAL_STEREO_1,
+ /* L, C, R, Ls, Rs & LFE., LS, RS */
+ SOF_IPC4_CHANNEL_CONFIG_7_POINT_1,
+};
+
+enum sof_ipc4_interleaved_style {
+ SOF_IPC4_CHANNELS_INTERLEAVED,
+ SOF_IPC4_CHANNELS_NONINTERLEAVED,
+};
+
+enum sof_ipc4_sample_type {
+ SOF_IPC4_MSB_INTEGER, /* integer with Most Significant Byte first */
+ SOF_IPC4_LSB_INTEGER, /* integer with Least Significant Byte first */
+};
+
+struct sof_ipc4_audio_format {
+ uint32_t sampling_frequency;
+ uint32_t bit_depth;
+ uint32_t ch_map;
+ uint32_t ch_cfg; /* sof_ipc4_channel_config */
+ uint32_t interleaving_style;
+ uint32_t fmt_cfg; /* channels_count valid_bit_depth s_type */
+} __packed __aligned(4);
+
+#define SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT_SHIFT 0
+#define SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT_MASK GENMASK(7, 0)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(x) \
+ ((x) & SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT_MASK)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_SHIFT 8
+#define SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_MASK GENMASK(15, 8)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(x) \
+ (((x) & SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_MASK) >> \
+ SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_SHIFT)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_SHIFT 16
+#define SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_MASK GENMASK(23, 16)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE(x) \
+ (((x) & SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_MASK) >> \
+ SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_SHIFT)
+
+/* Module message type specific field definitions */
+
+enum sof_ipc4_module_type {
+ SOF_IPC4_MOD_INIT_INSTANCE,
+ SOF_IPC4_MOD_CONFIG_GET,
+ SOF_IPC4_MOD_CONFIG_SET,
+ SOF_IPC4_MOD_LARGE_CONFIG_GET,
+ SOF_IPC4_MOD_LARGE_CONFIG_SET,
+ SOF_IPC4_MOD_BIND,
+ SOF_IPC4_MOD_UNBIND,
+ SOF_IPC4_MOD_SET_DX,
+ SOF_IPC4_MOD_SET_D0IX,
+ SOF_IPC4_MOD_ENTER_MODULE_RESTORE,
+ SOF_IPC4_MOD_EXIT_MODULE_RESTORE,
+ SOF_IPC4_MOD_DELETE_INSTANCE,
+
+ SOF_IPC4_MOD_TYPE_LAST,
+};
+
+struct sof_ipc4_base_module_cfg {
+ uint32_t cpc; /* the max count of Cycles Per Chunk processing */
+ uint32_t ibs; /* input Buffer Size (in bytes) */
+ uint32_t obs; /* output Buffer Size (in bytes) */
+ uint32_t is_pages; /* number of physical pages used */
+ struct sof_ipc4_audio_format audio_fmt;
+} __packed __aligned(4);
+
+/* common module ipc msg */
+#define SOF_IPC4_MOD_INSTANCE_SHIFT 16
+#define SOF_IPC4_MOD_INSTANCE_MASK GENMASK(23, 16)
+#define SOF_IPC4_MOD_INSTANCE(x) ((x) << SOF_IPC4_MOD_INSTANCE_SHIFT)
+
+#define SOF_IPC4_MOD_ID_SHIFT 0
+#define SOF_IPC4_MOD_ID_MASK GENMASK(15, 0)
+#define SOF_IPC4_MOD_ID(x) ((x) << SOF_IPC4_MOD_ID_SHIFT)
+
+/* init module ipc msg */
+#define SOF_IPC4_MOD_EXT_PARAM_SIZE_SHIFT 0
+#define SOF_IPC4_MOD_EXT_PARAM_SIZE_MASK GENMASK(15, 0)
+#define SOF_IPC4_MOD_EXT_PARAM_SIZE(x) ((x) << SOF_IPC4_MOD_EXT_PARAM_SIZE_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_PPL_ID_SHIFT 16
+#define SOF_IPC4_MOD_EXT_PPL_ID_MASK GENMASK(23, 16)
+#define SOF_IPC4_MOD_EXT_PPL_ID(x) ((x) << SOF_IPC4_MOD_EXT_PPL_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_CORE_ID_SHIFT 24
+#define SOF_IPC4_MOD_EXT_CORE_ID_MASK GENMASK(27, 24)
+#define SOF_IPC4_MOD_EXT_CORE_ID(x) ((x) << SOF_IPC4_MOD_EXT_CORE_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_DOMAIN_SHIFT 28
+#define SOF_IPC4_MOD_EXT_DOMAIN_MASK BIT(28)
+#define SOF_IPC4_MOD_EXT_DOMAIN(x) ((x) << SOF_IPC4_MOD_EXT_DOMAIN_SHIFT)
+
+/* bind/unbind module ipc msg */
+#define SOF_IPC4_MOD_EXT_DST_MOD_ID_SHIFT 0
+#define SOF_IPC4_MOD_EXT_DST_MOD_ID_MASK GENMASK(15, 0)
+#define SOF_IPC4_MOD_EXT_DST_MOD_ID(x) ((x) << SOF_IPC4_MOD_EXT_DST_MOD_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE_SHIFT 16
+#define SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE_MASK GENMASK(23, 16)
+#define SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(x) ((x) << SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID_SHIFT 24
+#define SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID_MASK GENMASK(26, 24)
+#define SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID(x) ((x) << SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID_SHIFT 27
+#define SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID_MASK GENMASK(29, 27)
+#define SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID(x) ((x) << SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID_SHIFT)
+
+#define MOD_ENABLE_LOG 6
+#define MOD_SYSTEM_TIME 20
+
+/* set module large config */
+#define SOF_IPC4_MOD_EXT_MSG_SIZE_SHIFT 0
+#define SOF_IPC4_MOD_EXT_MSG_SIZE_MASK GENMASK(19, 0)
+#define SOF_IPC4_MOD_EXT_MSG_SIZE(x) ((x) << SOF_IPC4_MOD_EXT_MSG_SIZE_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_MSG_PARAM_ID_SHIFT 20
+#define SOF_IPC4_MOD_EXT_MSG_PARAM_ID_MASK GENMASK(27, 20)
+#define SOF_IPC4_MOD_EXT_MSG_PARAM_ID(x) ((x) << SOF_IPC4_MOD_EXT_MSG_PARAM_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK_SHIFT 28
+#define SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK_MASK BIT(28)
+#define SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK(x) ((x) << SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_SHIFT 29
+#define SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_MASK BIT(29)
+#define SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK(x) ((x) << SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_SHIFT)
+
+/* Init instance messagees */
+#define SOF_IPC4_MOD_INIT_BASEFW_MOD_ID 0
+#define SOF_IPC4_MOD_INIT_BASEFW_INSTANCE_ID 0
+
+enum sof_ipc4_base_fw_params {
+ SOF_IPC4_FW_PARAM_ENABLE_LOGS = 6,
+ SOF_IPC4_FW_PARAM_FW_CONFIG,
+ SOF_IPC4_FW_PARAM_HW_CONFIG_GET,
+ SOF_IPC4_FW_PARAM_MODULES_INFO_GET,
+ SOF_IPC4_FW_PARAM_LIBRARIES_INFO_GET = 16,
+ SOF_IPC4_FW_PARAM_SYSTEM_TIME = 20,
+};
+
+enum sof_ipc4_fw_config_params {
+ SOF_IPC4_FW_CFG_FW_VERSION,
+ SOF_IPC4_FW_CFG_MEMORY_RECLAIMED,
+ SOF_IPC4_FW_CFG_SLOW_CLOCK_FREQ_HZ,
+ SOF_IPC4_FW_CFG_FAST_CLOCK_FREQ_HZ,
+ SOF_IPC4_FW_CFG_DMA_BUFFER_CONFIG,
+ SOF_IPC4_FW_CFG_ALH_SUPPORT_LEVEL,
+ SOF_IPC4_FW_CFG_DL_MAILBOX_BYTES,
+ SOF_IPC4_FW_CFG_UL_MAILBOX_BYTES,
+ SOF_IPC4_FW_CFG_TRACE_LOG_BYTES,
+ SOF_IPC4_FW_CFG_MAX_PPL_COUNT,
+ SOF_IPC4_FW_CFG_MAX_ASTATE_COUNT,
+ SOF_IPC4_FW_CFG_MAX_MODULE_PIN_COUNT,
+ SOF_IPC4_FW_CFG_MODULES_COUNT,
+ SOF_IPC4_FW_CFG_MAX_MOD_INST_COUNT,
+ SOF_IPC4_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT,
+ SOF_IPC4_FW_CFG_LL_PRI_COUNT,
+ SOF_IPC4_FW_CFG_MAX_DP_TASKS_COUNT,
+ SOF_IPC4_FW_CFG_MAX_LIBS_COUNT,
+ SOF_IPC4_FW_CFG_SCHEDULER_CONFIG,
+ SOF_IPC4_FW_CFG_XTAL_FREQ_HZ,
+ SOF_IPC4_FW_CFG_CLOCKS_CONFIG,
+ SOF_IPC4_FW_CFG_RESERVED,
+ SOF_IPC4_FW_CFG_POWER_GATING_POLICY,
+ SOF_IPC4_FW_CFG_ASSERT_MODE,
+};
+
+struct sof_ipc4_fw_version {
+ uint16_t major;
+ uint16_t minor;
+ uint16_t hotfix;
+ uint16_t build;
+} __packed;
+
+/* Payload data for SOF_IPC4_MOD_SET_DX */
+struct sof_ipc4_dx_state_info {
+ /* core(s) to apply the change */
+ uint32_t core_mask;
+ /* core state: 0: put core_id to D3; 1: put core_id to D0 */
+ uint32_t dx_mask;
+} __packed __aligned(4);
+
+/* Reply messages */
+
+/*
+ * IPC4 primary header bit allocation for replies
+ * bit 0-23: status
+ * bit 24-28: type: enum sof_ipc4_global_msg if target is SOF_IPC4_FW_GEN_MSG
+ * enum sof_ipc4_module_type if target is SOF_IPC4_MODULE_MSG
+ * bit 29: response - sof_ipc4_msg_dir
+ * bit 30: target - enum sof_ipc4_msg_target
+ * bit 31: reserved, unused
+ */
+
+#define SOF_IPC4_REPLY_STATUS GENMASK(23, 0)
+
+/* Notification messages */
+
+/*
+ * IPC4 primary header bit allocation for notifications
+ * bit 0-15: notification type specific
+ * bit 16-23: enum sof_ipc4_notification_type
+ * bit 24-28: SOF_IPC4_GLB_NOTIFICATION
+ * bit 29: response - sof_ipc4_msg_dir
+ * bit 30: target - enum sof_ipc4_msg_target
+ * bit 31: reserved, unused
+ */
+
+#define SOF_IPC4_MSG_IS_NOTIFICATION(x) (SOF_IPC4_MSG_TYPE_GET(x) == \
+ SOF_IPC4_GLB_NOTIFICATION)
+
+#define SOF_IPC4_NOTIFICATION_TYPE_SHIFT 16
+#define SOF_IPC4_NOTIFICATION_TYPE_MASK GENMASK(23, 16)
+#define SOF_IPC4_NOTIFICATION_TYPE_GET(x) (((x) & SOF_IPC4_NOTIFICATION_TYPE_MASK) >> \
+ SOF_IPC4_NOTIFICATION_TYPE_SHIFT)
+
+#define SOF_IPC4_LOG_CORE_SHIFT 12
+#define SOF_IPC4_LOG_CORE_MASK GENMASK(15, 12)
+#define SOF_IPC4_LOG_CORE_GET(x) (((x) & SOF_IPC4_LOG_CORE_MASK) >> \
+ SOF_IPC4_LOG_CORE_SHIFT)
+
+/* Value of notification type field - must fit into 8 bits */
+enum sof_ipc4_notification_type {
+ /* Phrase detected (notification from WoV module) */
+ SOF_IPC4_NOTIFY_PHRASE_DETECTED = 4,
+ /* Event from a resource (pipeline or module instance) */
+ SOF_IPC4_NOTIFY_RESOURCE_EVENT,
+ /* Debug log buffer status changed */
+ SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS,
+ /* Timestamp captured at the link */
+ SOF_IPC4_NOTIFY_TIMESTAMP_CAPTURED,
+ /* FW complete initialization */
+ SOF_IPC4_NOTIFY_FW_READY,
+ /* Audio classifier result (ACA) */
+ SOF_IPC4_NOTIFY_FW_AUD_CLASS_RESULT,
+ /* Exception caught by DSP FW */
+ SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT,
+ /* 11 is skipped by the existing cavs firmware */
+ /* Custom module notification */
+ SOF_IPC4_NOTIFY_MODULE_NOTIFICATION = 12,
+ /* 13 is reserved - do not use */
+ /* Probe notify data available */
+ SOF_IPC4_NOTIFY_PROBE_DATA_AVAILABLE = 14,
+ /* AM module notifications */
+ SOF_IPC4_NOTIFY_ASYNC_MSG_SRVC_MESSAGE,
+
+ SOF_IPC4_NOTIFY_TYPE_LAST,
+};
+
+struct sof_ipc4_notify_resource_data {
+ uint32_t resource_type;
+ uint32_t resource_id;
+ uint32_t event_type;
+ uint32_t reserved;
+ uint32_t data[6];
+} __packed __aligned(4);
+
+/** @}*/
+
+#endif
diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h
index 3cf2e0f39d94..366aa6ec442b 100644
--- a/include/sound/sof/pm.h
+++ b/include/sound/sof/pm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
index 7facefb541b3..9377113f13e4 100644
--- a/include/sound/sof/stream.h
+++ b/include/sound/sof/stream.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -85,9 +85,12 @@ struct sof_ipc_stream_params {
uint32_t host_period_bytes;
uint16_t no_stream_position; /**< 1 means don't send stream position */
-
- uint16_t reserved[3];
+ uint8_t cont_update_posn; /**< 1 means continuous update stream position */
+ uint8_t reserved0;
+ int16_t ext_data_length; /**< 0, means no extended data */
+ uint8_t reserved[2];
uint16_t chmap[SOF_IPC_MAX_CHANNELS]; /**< channel map - SOF_CHMAP_ */
+ uint8_t ext_data[]; /**< extended data */
} __packed;
/* PCM params info - SOF_IPC_STREAM_PCM_PARAMS */
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 8e76178fedf0..88560281d420 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -37,6 +37,8 @@ enum sof_comp_type {
SOF_COMP_SELECTOR, /**< channel selector component */
SOF_COMP_DEMUX,
SOF_COMP_ASRC, /**< Asynchronous sample rate converter */
+ SOF_COMP_DCBLOCK,
+ SOF_COMP_SMART_AMP, /**< smart amplifier component */
/* keep FILEREAD/FILEWRITE as the last ones */
SOF_COMP_FILEREAD = 10000, /**< host test based file IO */
SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */
@@ -53,9 +55,10 @@ struct sof_ipc_comp {
uint32_t id;
enum sof_comp_type type;
uint32_t pipeline_id;
+ uint32_t core;
- /* reserved for future use */
- uint32_t reserved[2];
+ /* extended data length, 0 if no extended data */
+ uint32_t ext_data_length;
} __packed;
/*
@@ -74,11 +77,23 @@ struct sof_ipc_comp {
#define SOF_MEM_CAPS_CACHE (1 << 6) /**< cacheable */
#define SOF_MEM_CAPS_EXEC (1 << 7) /**< executable */
+/*
+ * overrun will cause ring buffer overwrite, instead of XRUN.
+ */
+#define SOF_BUF_OVERRUN_PERMITTED BIT(0)
+
+/*
+ * underrun will cause readback of 0s, instead of XRUN.
+ */
+#define SOF_BUF_UNDERRUN_PERMITTED BIT(1)
+
/* create new component buffer - SOF_IPC_TPLG_BUFFER_NEW */
struct sof_ipc_buffer {
struct sof_ipc_comp comp;
uint32_t size; /**< buffer size in bytes */
uint32_t caps; /**< SOF_MEM_CAPS_ */
+ uint32_t flags; /**< SOF_BUF_ flags defined above */
+ uint32_t reserved; /**< reserved for future use */
} __packed;
/* generic component config data - must always be after struct sof_ipc_comp */
@@ -205,6 +220,8 @@ enum sof_ipc_process_type {
SOF_PROCESS_CHAN_SELECTOR, /**< Channel Selector */
SOF_PROCESS_MUX,
SOF_PROCESS_DEMUX,
+ SOF_PROCESS_DCBLOCK,
+ SOF_PROCESS_SMART_AMP, /**< Smart Amplifier */
};
/* generic "effect", "codec" or proprietary processing component */
@@ -217,7 +234,7 @@ struct sof_ipc_comp_process {
/* reserved for future use */
uint32_t reserved[7];
- unsigned char data[0];
+ uint8_t data[];
} __packed;
/* frees components, buffers and pipelines
diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h
index fda6e8f6ead4..25ea99f62d37 100644
--- a/include/sound/sof/trace.h
+++ b/include/sound/sof/trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -43,6 +43,34 @@ struct sof_ipc_dma_trace_posn {
uint32_t messages; /* total trace messages */
} __packed;
+/* Values used in sof_ipc_trace_filter_elem: */
+
+/* bits 6..0 */
+#define SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL 0x01 /**< trace level for selected components */
+#define SOF_IPC_TRACE_FILTER_ELEM_BY_UUID 0x02 /**< filter by uuid key */
+#define SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE 0x03 /**< filter by pipeline */
+#define SOF_IPC_TRACE_FILTER_ELEM_BY_COMP 0x04 /**< filter by component id */
+
+/* bit 7 */
+#define SOF_IPC_TRACE_FILTER_ELEM_FIN 0x80 /**< mark last filter in set */
+
+/* bits 31..8: Unused */
+
+/** part of sof_ipc_trace_filter, ABI3.17 */
+struct sof_ipc_trace_filter_elem {
+ uint32_t key; /**< SOF_IPC_TRACE_FILTER_ELEM_ {LEVEL, UUID, COMP, PIPE} */
+ uint32_t value; /**< element value */
+} __packed;
+
+/** Runtime tracing filtration data - SOF_IPC_TRACE_FILTER_UPDATE, ABI3.17 */
+struct sof_ipc_trace_filter {
+ struct sof_ipc_cmd_hdr hdr; /**< IPC command header */
+ uint32_t elem_cnt; /**< number of entries in elems[] array */
+ uint32_t reserved[8]; /**< reserved for future usage */
+ /** variable size array with new filtering settings */
+ struct sof_ipc_trace_filter_elem elems[];
+} __packed;
+
/*
* Commom debug
*/
@@ -72,7 +100,7 @@ struct sof_ipc_dma_trace_posn {
struct sof_ipc_panic_info {
struct sof_ipc_hdr hdr;
uint32_t code; /* SOF_IPC_PANIC_ */
- char filename[SOF_TRACE_FILENAME_SIZE];
+ uint8_t filename[SOF_TRACE_FILENAME_SIZE];
uint32_t linenum;
} __packed;
diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h
index dd53d36b34e1..87a07e520415 100644
--- a/include/sound/sof/xtensa.h
+++ b/include/sound/sof/xtensa.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/timer.h b/include/sound/timer.h
index 23e885d31525..760e132cc0cd 100644
--- a/include/sound/timer.h
+++ b/include/sound/timer.h
@@ -21,13 +21,13 @@
#define SNDRV_TIMER_HW_STOP 0x00000002 /* call stop before start */
#define SNDRV_TIMER_HW_SLAVE 0x00000004 /* only slave timer (variable resolution) */
#define SNDRV_TIMER_HW_FIRST 0x00000008 /* first tick can be incomplete */
-#define SNDRV_TIMER_HW_TASKLET 0x00000010 /* timer is called from tasklet */
+#define SNDRV_TIMER_HW_WORK 0x00000010 /* timer is called from work */
#define SNDRV_TIMER_IFLG_SLAVE 0x00000001
#define SNDRV_TIMER_IFLG_RUNNING 0x00000002
#define SNDRV_TIMER_IFLG_START 0x00000004
#define SNDRV_TIMER_IFLG_AUTO 0x00000008 /* auto restart */
-#define SNDRV_TIMER_IFLG_FAST 0x00000010 /* fast callback (do not use tasklet) */
+#define SNDRV_TIMER_IFLG_FAST 0x00000010 /* fast callback (do not use work) */
#define SNDRV_TIMER_IFLG_CALLBACK 0x00000020 /* timer callback is active */
#define SNDRV_TIMER_IFLG_EXCLUSIVE 0x00000040 /* exclusive owner - no more instances */
#define SNDRV_TIMER_IFLG_EARLY_EVENT 0x00000080 /* write early event to the poll queue */
@@ -74,7 +74,7 @@ struct snd_timer {
struct list_head active_list_head;
struct list_head ack_list_head;
struct list_head sack_list_head; /* slow ack list head */
- struct tasklet_struct task_queue;
+ struct work_struct task_work;
int max_instances; /* upper limit of timer instances */
int num_instances; /* current number of timer instances */
};
@@ -96,7 +96,7 @@ struct snd_timer_instance {
unsigned long ticks; /* auto-load ticks when expired */
unsigned long cticks; /* current ticks */
unsigned long pticks; /* accumulated ticks for callback */
- unsigned long resolution; /* current resolution for tasklet */
+ unsigned long resolution; /* current resolution for work */
unsigned long lost; /* lost ticks */
int slave_class;
unsigned int slave_id;
diff --git a/include/sound/wm8960.h b/include/sound/wm8960.h
index d22e84805025..275fd5b201ce 100644
--- a/include/sound/wm8960.h
+++ b/include/sound/wm8960.h
@@ -16,6 +16,23 @@ struct wm8960_data {
bool capless; /* Headphone outputs configured in capless mode */
bool shared_lrclk; /* DAC and ADC LRCLKs are wired together */
+
+ /*
+ * Setup for headphone detection
+ *
+ * hp_cfg[0]: HPSEL[1:0] of R48 (Additional Control 4)
+ * hp_cfg[1]: {HPSWEN:HPSWPOL} of R24 (Additional Control 2).
+ * hp_cfg[2]: {TOCLKSEL:TOEN} of R23 (Additional Control 1).
+ */
+ u32 hp_cfg[3];
+
+ /*
+ * Setup for gpio configuration
+ *
+ * gpio_cfg[0]: ALRCGPIO of R9 (Audio interface)
+ * gpio_cfg[1]: {GPIOPOL:GPIOSEL[2:0]} of R48 (Additional Control 4).
+ */
+ u32 gpio_cfg[2];
};
#endif
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index a49d37140a64..94d06ddfd80a 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -26,6 +26,7 @@ struct sock;
#define ISCSI_RX_THREAD_NAME "iscsi_trx"
#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
#define ISCSI_IQN_LEN 224
+#define NA_AUTHENTICATION_INHERITED -1
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -143,7 +144,7 @@ enum tiqn_state_table {
TIQN_STATE_SHUTDOWN = 2,
};
-/* struct iscsi_cmd->cmd_flags */
+/* struct iscsit_cmd->cmd_flags */
enum cmd_flags_table {
ICF_GOT_LAST_DATAOUT = 0x00000001,
ICF_GOT_DATACK_SNACK = 0x00000002,
@@ -157,7 +158,7 @@ enum cmd_flags_table {
ICF_SENDTARGETS_SINGLE = 0x00000200,
};
-/* struct iscsi_cmd->i_state */
+/* struct iscsit_cmd->i_state */
enum cmd_i_state_table {
ISTATE_NO_STATE = 0,
ISTATE_NEW_CMD = 1,
@@ -297,20 +298,10 @@ struct iscsi_sess_ops {
struct iscsi_queue_req {
int state;
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
struct list_head qr_list;
};
-struct iscsi_data_count {
- int data_length;
- int sync_and_steering;
- enum data_count_type type;
- u32 iov_count;
- u32 ss_iov_count;
- u32 ss_marker_count;
- struct kvec *iov;
-};
-
struct iscsi_param_list {
bool iser;
struct list_head param_list;
@@ -337,7 +328,7 @@ struct iscsi_ooo_cmdsn {
u32 batch_count;
u32 cmdsn;
u32 exp_cmdsn;
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
struct list_head ooo_list;
} ____cacheline_aligned;
@@ -359,7 +350,7 @@ struct iscsi_r2t {
struct list_head r2t_list;
} ____cacheline_aligned;
-struct iscsi_cmd {
+struct iscsit_cmd {
enum iscsi_timer_flags_table dataout_timer_flags;
/* DataOUT timeout retries */
u8 dataout_timeout_retries;
@@ -415,22 +406,22 @@ struct iscsi_cmd {
u32 outstanding_r2ts;
/* Next R2T Offset when DataSequenceInOrder=Yes */
u32 r2t_offset;
- /* Iovec current and orig count for iscsi_cmd->iov_data */
+ /* Iovec current and orig count for iscsit_cmd->iov_data */
u32 iov_data_count;
u32 orig_iov_data_count;
/* Number of miscellaneous iovecs used for IP stack calls */
u32 iov_misc_count;
- /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ /* Number of struct iscsi_pdu in struct iscsit_cmd->pdu_list */
u32 pdu_count;
- /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+ /* Next struct iscsi_pdu to send in struct iscsit_cmd->pdu_list */
u32 pdu_send_order;
- /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ /* Current struct iscsi_pdu in struct iscsit_cmd->pdu_list */
u32 pdu_start;
- /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+ /* Next struct iscsi_seq to send in struct iscsit_cmd->seq_list */
u32 seq_send_order;
- /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+ /* Number of struct iscsi_seq in struct iscsit_cmd->seq_list */
u32 seq_count;
- /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+ /* Current struct iscsi_seq in struct iscsit_cmd->seq_list */
u32 seq_no;
/* Lowest offset in current DataOUT sequence */
u32 seq_start_offset;
@@ -454,12 +445,12 @@ struct iscsi_cmd {
enum dma_data_direction data_direction;
/* iSCSI PDU Header + CRC */
unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
- /* Number of times struct iscsi_cmd is present in immediate queue */
+ /* Number of times struct iscsit_cmd is present in immediate queue */
atomic_t immed_queue_count;
atomic_t response_queue_count;
spinlock_t datain_lock;
spinlock_t dataout_timeout_lock;
- /* spinlock for protecting struct iscsi_cmd->i_state */
+ /* spinlock for protecting struct iscsit_cmd->i_state */
spinlock_t istate_lock;
/* spinlock for adding within command recovery entries */
spinlock_t error_lock;
@@ -488,11 +479,11 @@ struct iscsi_cmd {
/* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
struct iscsi_tmr_req *tmr_req;
/* Connection this command is alligient to */
- struct iscsi_conn *conn;
+ struct iscsit_conn *conn;
/* Pointer to connection recovery entry */
struct iscsi_conn_recovery *cr;
/* Session the command is part of, used for connection recovery */
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
/* list_head for connection list */
struct list_head i_conn_node;
/* The TCM I/O descriptor that is accessed via container_of() */
@@ -513,12 +504,12 @@ struct iscsi_cmd {
struct iscsi_tmr_req {
bool task_reassign:1;
u32 exp_data_sn;
- struct iscsi_cmd *ref_cmd;
+ struct iscsit_cmd *ref_cmd;
struct iscsi_conn_recovery *conn_recovery;
struct se_tmr_req *se_tmr_req;
};
-struct iscsi_conn {
+struct iscsit_conn {
wait_queue_head_t queues_wq;
/* Authentication Successful for this connection */
u8 auth_complete;
@@ -566,10 +557,11 @@ struct iscsi_conn {
struct socket *sock;
void (*orig_data_ready)(struct sock *);
void (*orig_state_change)(struct sock *);
-#define LOGIN_FLAGS_READ_ACTIVE 1
-#define LOGIN_FLAGS_CLOSED 2
-#define LOGIN_FLAGS_READY 4
-#define LOGIN_FLAGS_INITIAL_PDU 8
+#define LOGIN_FLAGS_READY 0
+#define LOGIN_FLAGS_INITIAL_PDU 1
+#define LOGIN_FLAGS_READ_ACTIVE 2
+#define LOGIN_FLAGS_WRITE_ACTIVE 3
+#define LOGIN_FLAGS_CLOSED 4
unsigned long login_flags;
struct delayed_work login_work;
struct iscsi_login *login;
@@ -589,9 +581,10 @@ struct iscsi_conn {
struct ahash_request *conn_tx_hash;
/* Used for scheduling TX and RX connection kthreads */
cpumask_var_t conn_cpumask;
+ cpumask_var_t allowed_cpumask;
unsigned int conn_rx_reset_cpumask:1;
unsigned int conn_tx_reset_cpumask:1;
- /* list_head of struct iscsi_cmd for this connection */
+ /* list_head of struct iscsit_cmd for this connection */
struct list_head conn_cmd_list;
struct list_head immed_queue_list;
struct list_head response_queue_list;
@@ -606,7 +599,7 @@ struct iscsi_conn {
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
/* Pointer to parent session */
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
int bitmap_id;
int rx_thread_active;
struct task_struct *rx_thread;
@@ -626,11 +619,11 @@ struct iscsi_conn_recovery {
struct list_head conn_recovery_cmd_list;
spinlock_t conn_recovery_cmd_lock;
struct timer_list time2retain_timer;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct list_head cr_list;
} ____cacheline_aligned;
-struct iscsi_session {
+struct iscsit_session {
u8 initiator_vendor;
u8 isid[6];
enum iscsi_timer_flags_table time2retain_timer_flags;
@@ -676,7 +669,7 @@ struct iscsi_session {
atomic_t session_logout;
atomic_t session_reinstatement;
atomic_t session_stop_active;
- atomic_t sleep_on_sess_wait_comp;
+ atomic_t session_close;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
@@ -718,11 +711,12 @@ struct iscsi_login {
char rsp[ISCSI_HDR_LEN];
char *req_buf;
char *rsp_buf;
- struct iscsi_conn *conn;
+ struct iscsit_conn *conn;
struct iscsi_np *np;
} ____cacheline_aligned;
struct iscsi_node_attrib {
+ s32 authentication;
u32 dataout_timeout;
u32 dataout_timeout_retries;
u32 default_erl;
@@ -766,6 +760,12 @@ struct iscsi_node_acl {
struct iscsi_node_stat_grps node_stat_grps;
};
+static inline struct iscsi_node_acl *
+to_iscsi_nacl(struct se_node_acl *se_nacl)
+{
+ return container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+}
+
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
@@ -847,6 +847,12 @@ struct iscsi_portal_group {
struct list_head tpg_list;
} ____cacheline_aligned;
+static inline struct iscsi_portal_group *
+to_iscsi_tpg(struct se_portal_group *se_tpg)
+{
+ return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+}
+
struct iscsi_wwn_stat_grps {
struct config_group iscsi_stat_group;
struct config_group iscsi_instance_group;
@@ -887,12 +893,13 @@ struct iscsit_global {
/* Thread Set bitmap pointer */
unsigned long *ts_bitmap;
spinlock_t ts_bitmap_lock;
+ cpumask_var_t allowed_cpumask;
/* Used for iSCSI discovery session authentication */
struct iscsi_node_acl discovery_acl;
struct iscsi_portal_group *discovery_tpg;
};
-static inline u32 session_get_next_ttt(struct iscsi_session *session)
+static inline u32 session_get_next_ttt(struct iscsit_session *session)
{
u32 ttt;
@@ -905,31 +912,10 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session)
return ttt;
}
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+extern struct iscsit_cmd *iscsit_find_cmd_from_itt(struct iscsit_conn *, itt_t);
+
+extern void iscsit_thread_check_cpumask(struct iscsit_conn *conn,
+ struct task_struct *p,
+ int mode);
-static inline void iscsit_thread_check_cpumask(
- struct iscsi_conn *conn,
- struct task_struct *p,
- int mode)
-{
- /*
- * mode == 1 signals iscsi_target_tx_thread() usage.
- * mode == 0 signals iscsi_target_rx_thread() usage.
- */
- if (mode == 1) {
- if (!conn->conn_tx_reset_cpumask)
- return;
- conn->conn_tx_reset_cpumask = 0;
- } else {
- if (!conn->conn_rx_reset_cpumask)
- return;
- conn->conn_rx_reset_cpumask = 0;
- }
- /*
- * Update the CPU mask for this single kthread so that
- * both TX and RX kthreads are scheduled to run on the
- * same CPU.
- */
- set_cpus_allowed_ptr(p, conn->conn_cpumask);
-}
#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 75bee29fd7dd..42cfe02ea909 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include "iscsi_target_core.h" /* struct iscsi_cmd */
+#include "iscsi_target_core.h" /* struct iscsit_cmd */
struct sockaddr_storage;
@@ -12,29 +12,29 @@ struct iscsit_transport {
struct module *owner;
struct list_head t_node;
int (*iscsit_setup_np)(struct iscsi_np *, struct sockaddr_storage *);
- int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
+ int (*iscsit_accept_np)(struct iscsi_np *, struct iscsit_conn *);
void (*iscsit_free_np)(struct iscsi_np *);
- void (*iscsit_wait_conn)(struct iscsi_conn *);
- void (*iscsit_free_conn)(struct iscsi_conn *);
- int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
- int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
- int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
- int (*iscsit_response_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
- int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
- int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
- int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
- void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
- int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *,
+ void (*iscsit_wait_conn)(struct iscsit_conn *);
+ void (*iscsit_free_conn)(struct iscsit_conn *);
+ int (*iscsit_get_login_rx)(struct iscsit_conn *, struct iscsi_login *);
+ int (*iscsit_put_login_tx)(struct iscsit_conn *, struct iscsi_login *, u32);
+ int (*iscsit_immediate_queue)(struct iscsit_conn *, struct iscsit_cmd *, int);
+ int (*iscsit_response_queue)(struct iscsit_conn *, struct iscsit_cmd *, int);
+ int (*iscsit_get_dataout)(struct iscsit_conn *, struct iscsit_cmd *, bool);
+ int (*iscsit_queue_data_in)(struct iscsit_conn *, struct iscsit_cmd *);
+ int (*iscsit_queue_status)(struct iscsit_conn *, struct iscsit_cmd *);
+ void (*iscsit_aborted_task)(struct iscsit_conn *, struct iscsit_cmd *);
+ int (*iscsit_xmit_pdu)(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_datain_req *, const void *, u32);
- void (*iscsit_unmap_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
- void (*iscsit_get_rx_pdu)(struct iscsi_conn *);
- int (*iscsit_validate_params)(struct iscsi_conn *);
- void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *,
+ void (*iscsit_unmap_cmd)(struct iscsit_conn *, struct iscsit_cmd *);
+ void (*iscsit_get_rx_pdu)(struct iscsit_conn *);
+ int (*iscsit_validate_params)(struct iscsit_conn *);
+ void (*iscsit_get_r2t_ttt)(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_r2t *);
- enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
+ enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsit_conn *);
};
-static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
+static inline void *iscsit_priv_cmd(struct iscsit_cmd *cmd)
{
return (void *)(cmd + 1);
}
@@ -43,7 +43,7 @@ static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
* From iscsi_target_transport.c
*/
-extern int iscsit_register_transport(struct iscsit_transport *);
+extern void iscsit_register_transport(struct iscsit_transport *);
extern void iscsit_unregister_transport(struct iscsit_transport *);
extern struct iscsit_transport *iscsit_get_transport(int);
extern void iscsit_put_transport(struct iscsit_transport *);
@@ -51,100 +51,100 @@ extern void iscsit_put_transport(struct iscsit_transport *);
/*
* From iscsi_target.c
*/
-extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_setup_scsi_cmd(struct iscsit_conn *, struct iscsit_cmd *,
unsigned char *);
-extern void iscsit_set_unsolicited_dataout(struct iscsi_cmd *);
-extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *);
+extern int iscsit_process_scsi_cmd(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_scsi_req *);
extern int
-__iscsit_check_dataout_hdr(struct iscsi_conn *, void *,
- struct iscsi_cmd *, u32, bool *);
+__iscsit_check_dataout_hdr(struct iscsit_conn *, void *,
+ struct iscsit_cmd *, u32, bool *);
extern int
-iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
- struct iscsi_cmd **out_cmd);
-extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *,
+iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
+ struct iscsit_cmd **out_cmd);
+extern int iscsit_check_dataout_payload(struct iscsit_cmd *, struct iscsi_data *,
bool);
-extern int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_setup_nop_out(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_nopout *);
-extern int iscsit_process_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_process_nop_out(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_nopout *);
-extern int iscsit_handle_logout_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_handle_logout_cmd(struct iscsit_conn *, struct iscsit_cmd *,
unsigned char *);
-extern int iscsit_handle_task_mgt_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_handle_task_mgt_cmd(struct iscsit_conn *, struct iscsit_cmd *,
unsigned char *);
-extern int iscsit_setup_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_setup_text_cmd(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_text *);
-extern int iscsit_process_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_process_text_cmd(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_text *);
-extern void iscsit_build_rsp_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_rsp_pdu(struct iscsit_cmd *, struct iscsit_conn *,
bool, struct iscsi_scsi_rsp *);
-extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_nopin_rsp(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_nopin *, bool);
-extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_task_mgt_rsp(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_tm_rsp *);
-extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern int iscsit_build_text_rsp(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_text_rsp *,
enum iscsit_transport_type);
-extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_reject(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_reject *);
-extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern int iscsit_build_logout_rsp(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_logout_rsp *);
-extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_queue_rsp(struct iscsi_conn *, struct iscsi_cmd *);
-extern void iscsit_aborted_task(struct iscsi_conn *, struct iscsi_cmd *);
-extern int iscsit_add_reject(struct iscsi_conn *, u8, unsigned char *);
-extern int iscsit_reject_cmd(struct iscsi_cmd *, u8, unsigned char *);
-extern int iscsit_handle_snack(struct iscsi_conn *, unsigned char *);
-extern void iscsit_build_datain_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+extern int iscsit_logout_post_handler(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_queue_rsp(struct iscsit_conn *, struct iscsit_cmd *);
+extern void iscsit_aborted_task(struct iscsit_conn *, struct iscsit_cmd *);
+extern int iscsit_add_reject(struct iscsit_conn *, u8, unsigned char *);
+extern int iscsit_reject_cmd(struct iscsit_cmd *, u8, unsigned char *);
+extern int iscsit_handle_snack(struct iscsit_conn *, unsigned char *);
+extern void iscsit_build_datain_pdu(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_datain *,
struct iscsi_data_rsp *, bool);
-extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *,
bool);
-extern int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
-extern int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+extern int iscsit_immediate_queue(struct iscsit_conn *, struct iscsit_cmd *, int);
+extern int iscsit_response_queue(struct iscsit_conn *, struct iscsit_cmd *, int);
/*
* From iscsi_target_device.c
*/
-extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsit_cmd *, struct iscsit_session *);
/*
* From iscsi_target_erl0.c
*/
-extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+extern void iscsit_cause_connection_reinstatement(struct iscsit_conn *, int);
/*
* From iscsi_target_erl1.c
*/
-extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+extern void iscsit_stop_dataout_timer(struct iscsit_cmd *);
/*
* From iscsi_target_tmr.c
*/
-extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_tmr_post_handler(struct iscsit_cmd *, struct iscsit_conn *);
/*
* From iscsi_target_util.c
*/
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
-extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int);
+extern int iscsit_sequence_cmd(struct iscsit_conn *, struct iscsit_cmd *,
unsigned char *, __be32);
-extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
-extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *,
- struct iscsi_conn *, u8);
-extern struct iscsi_cmd *
-iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *conn,
+extern void iscsit_release_cmd(struct iscsit_cmd *);
+extern void iscsit_free_cmd(struct iscsit_cmd *, bool);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *,
+ struct iscsit_conn *, u8);
+extern struct iscsit_cmd *
+iscsit_find_cmd_from_itt_or_dump(struct iscsit_conn *conn,
itt_t init_task_tag, u32 length);
/*
* From iscsi_target_nego.c
*/
-extern int iscsi_target_check_login_request(struct iscsi_conn *,
+extern int iscsi_target_check_login_request(struct iscsit_conn *,
struct iscsi_login *);
/*
* From iscsi_target_login.c
*/
extern __printf(2, 3) int iscsi_change_param_sprintf(
- struct iscsi_conn *, const char *, ...);
+ struct iscsit_conn *, const char *, ...);
/*
* From iscsi_target_parameters.c
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 51b6f50eabee..a3c193df25b3 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -14,7 +14,7 @@
#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2
#define TRANSPORT_FLAG_PASSTHROUGH_PGR 0x4
-struct request_queue;
+struct block_device;
struct scatterlist;
struct target_backend_ops {
@@ -23,7 +23,8 @@ struct target_backend_ops {
char inquiry_rev[4];
struct module *owner;
- u8 transport_flags;
+ u8 transport_flags_default;
+ u8 transport_flags_changeable;
int (*attach_hba)(struct se_hba *, u32);
void (*detach_hba)(struct se_hba *);
@@ -33,12 +34,17 @@ struct target_backend_ops {
int (*configure_device)(struct se_device *);
void (*destroy_device)(struct se_device *);
void (*free_device)(struct se_device *device);
+ struct se_dev_plug *(*plug_device)(struct se_device *se_dev);
+ void (*unplug_device)(struct se_dev_plug *se_plug);
+ bool (*configure_unmap)(struct se_device *se_dev);
ssize_t (*set_configfs_dev_params)(struct se_device *,
const char *, ssize_t);
ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
+ void (*tmr_notify)(struct se_device *se_dev, enum tcm_tmreq_table,
+ struct list_head *aborted_cmds);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
sector_t (*get_alignment_offset_lbas)(struct se_device *);
@@ -69,6 +75,8 @@ int transport_backend_register(const struct target_backend_ops *);
void target_backend_unregister(const struct target_backend_ops *);
void target_complete_cmd(struct se_cmd *, u8);
+void target_set_cmd_data_length(struct se_cmd *, int);
+void target_complete_cmd_with_sense(struct se_cmd *, u8, sense_reason_t);
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
@@ -94,6 +102,7 @@ int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
extern struct configfs_attribute *sbc_attrib_attrs[];
extern struct configfs_attribute *passthrough_attrib_attrs[];
+extern struct configfs_attribute *passthrough_pr_attrib_attrs[];
/* core helpers also used by command snooping in pscsi */
void *transport_kmap_data_sg(struct se_cmd *);
@@ -109,17 +118,11 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q);
+ struct block_device *bdev);
static inline bool target_dev_configured(struct se_device *se_dev)
{
return !!(se_dev->dev_flags & DF_CONFIGURED);
}
-/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
-static inline uint32_t get_unaligned_be24(const uint8_t *const p)
-{
- return get_unaligned_be32(p - 1) & 0xffffffU;
-}
-
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1728e883b7b2..8c920456edd9 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -74,8 +74,6 @@
#define DA_EMULATE_MODEL_ALIAS 0
/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
#define DA_EMULATE_WRITE_CACHE 0
-/* Emulation for UNIT ATTENTION Interlock Control */
-#define DA_EMULATE_UA_INTLLCK_CTRL 0
/* Emulation for TASK_ABORTED status (TAS) by default */
#define DA_EMULATE_TAS 1
/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
@@ -129,25 +127,25 @@ enum transport_state_table {
/* Used for struct se_cmd->se_cmd_flags */
enum se_cmd_flags_table {
- SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
- SCF_TRANSPORT_TASK_SENSE = 0x00000002,
- SCF_EMULATED_TASK_SENSE = 0x00000004,
- SCF_SCSI_DATA_CDB = 0x00000008,
- SCF_SCSI_TMR_CDB = 0x00000010,
- SCF_FUA = 0x00000080,
- SCF_SE_LUN_CMD = 0x00000100,
- SCF_BIDI = 0x00000400,
- SCF_SENT_CHECK_CONDITION = 0x00000800,
- SCF_OVERFLOW_BIT = 0x00001000,
- SCF_UNDERFLOW_BIT = 0x00002000,
- SCF_ALUA_NON_OPTIMIZED = 0x00008000,
- SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
- SCF_COMPARE_AND_WRITE = 0x00080000,
- SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
- SCF_ACK_KREF = 0x00400000,
- SCF_USE_CPUID = 0x00800000,
- SCF_TASK_ATTR_SET = 0x01000000,
- SCF_TREAT_READ_AS_NORMAL = 0x02000000,
+ SCF_SUPPORTED_SAM_OPCODE = (1 << 0),
+ SCF_TRANSPORT_TASK_SENSE = (1 << 1),
+ SCF_EMULATED_TASK_SENSE = (1 << 2),
+ SCF_SCSI_DATA_CDB = (1 << 3),
+ SCF_SCSI_TMR_CDB = (1 << 4),
+ SCF_FUA = (1 << 5),
+ SCF_SE_LUN_CMD = (1 << 6),
+ SCF_BIDI = (1 << 7),
+ SCF_SENT_CHECK_CONDITION = (1 << 8),
+ SCF_OVERFLOW_BIT = (1 << 9),
+ SCF_UNDERFLOW_BIT = (1 << 10),
+ SCF_ALUA_NON_OPTIMIZED = (1 << 11),
+ SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = (1 << 12),
+ SCF_COMPARE_AND_WRITE = (1 << 13),
+ SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = (1 << 14),
+ SCF_ACK_KREF = (1 << 15),
+ SCF_USE_CPUID = (1 << 16),
+ SCF_TASK_ATTR_SET = (1 << 17),
+ SCF_TREAT_READ_AS_NORMAL = (1 << 18),
};
/*
@@ -173,7 +171,7 @@ enum tcm_sense_reason_table {
TCM_WRITE_PROTECTED = R(0x0c),
TCM_CHECK_CONDITION_ABORT_CMD = R(0x0d),
TCM_CHECK_CONDITION_UNIT_ATTENTION = R(0x0e),
- TCM_CHECK_CONDITION_NOT_READY = R(0x0f),
+
TCM_RESERVATION_CONFLICT = R(0x10),
TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
TCM_OUT_OF_RESOURCES = R(0x12),
@@ -189,6 +187,11 @@ enum tcm_sense_reason_table {
TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
TCM_LUN_BUSY = R(0x1e),
+ TCM_INVALID_FIELD_IN_COMMAND_IU = R(0x1f),
+ TCM_ALUA_TG_PT_STANDBY = R(0x20),
+ TCM_ALUA_TG_PT_UNAVAILABLE = R(0x21),
+ TCM_ALUA_STATE_TRANSITION = R(0x22),
+ TCM_ALUA_OFFLINE = R(0x23),
#undef R
};
@@ -197,7 +200,6 @@ enum target_sc_flags_table {
TARGET_SCF_ACK_KREF = 0x02,
TARGET_SCF_UNKNOWN_SIZE = 0x04,
TARGET_SCF_USE_CPUID = 0x08,
- TARGET_SCF_LOOKUP_LUN_FROM_TAG = 0x10,
};
/* fabric independent task management function values */
@@ -209,6 +211,7 @@ enum tcm_tmreq_table {
TMR_LUN_RESET = 5,
TMR_TARGET_WARM_RESET = 6,
TMR_TARGET_COLD_RESET = 7,
+ TMR_LUN_RESET_PRO = 0x80,
TMR_UNKNOWN = 0xff,
};
@@ -327,6 +330,7 @@ struct t10_wwn {
char model[INQUIRY_MODEL_LEN + 1];
char revision[INQUIRY_REVISION_LEN + 1];
char unit_serial[INQUIRY_VPD_SERIAL_LEN];
+ u32 company_id;
spinlock_t t10_vpd_lock;
struct se_device *t10_dev;
struct config_group t10_wwn_group;
@@ -433,6 +437,13 @@ enum target_prot_type {
TARGET_DIF_TYPE3_PROT,
};
+/* Emulation for UNIT ATTENTION Interlock Control */
+enum target_ua_intlck_ctrl {
+ TARGET_UA_INTLCK_CTRL_CLEAR = 0,
+ TARGET_UA_INTLCK_CTRL_NO_CLEAR = 1,
+ TARGET_UA_INTLCK_CTRL_ESTABLISH_UA = 2,
+};
+
enum target_core_dif_check {
TARGET_DIF_CHECK_GUARD = 0x1 << 0,
TARGET_DIF_CHECK_APPTAG = 0x1 << 1,
@@ -446,10 +457,10 @@ enum target_core_dif_check {
#define TCM_ACA_TAG 0x24
struct se_cmd {
+ /* Used for fail with specific sense codes */
+ sense_reason_t sense_reason;
/* SAM response code being sent to initiator */
u8 scsi_status;
- u8 scsi_asc;
- u8 scsi_ascq;
u16 scsi_sense_length;
unsigned unknown_data_length:1;
bool state_active:1;
@@ -482,7 +493,7 @@ struct se_cmd {
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
- struct list_head se_cmd_list;
+ struct llist_node se_cmd_list;
struct completion *free_compl;
struct completion *abrt_compl;
const struct target_core_fabric_ops *se_tfo;
@@ -534,7 +545,11 @@ struct se_cmd {
struct scatterlist *t_prot_sg;
unsigned int t_prot_nents;
sense_reason_t pi_err;
- sector_t bad_sector;
+ u64 sense_info;
+ /*
+ * CPU LIO will execute the cmd on. Defaults to the CPU the cmd is
+ * initialized on. Drivers can override.
+ */
int cpuid;
};
@@ -603,7 +618,7 @@ static inline struct se_node_acl *fabric_stat_to_nacl(struct config_item *item)
}
struct se_session {
- unsigned sess_tearing_down:1;
+ atomic_t stopped;
u64 sess_bin_isid;
enum target_prot_op sup_prot_ops;
enum target_prot_type sess_prot_type;
@@ -613,9 +628,9 @@ struct se_session {
struct percpu_ref cmd_count;
struct list_head sess_list;
struct list_head sess_acl_list;
- struct list_head sess_cmd_list;
spinlock_t sess_cmd_lock;
- wait_queue_head_t cmd_list_wq;
+ wait_queue_head_t cmd_count_wq;
+ struct completion stop_done;
void *sess_cmd_map;
struct sbitmap_queue sess_tag_pool;
};
@@ -650,9 +665,9 @@ struct se_dev_entry {
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
struct kref pr_kref;
struct completion pr_comp;
- struct se_lun_acl __rcu *se_lun_acl;
+ struct se_lun_acl *se_lun_acl;
spinlock_t ua_lock;
- struct se_lun __rcu *se_lun;
+ struct se_lun *se_lun;
#define DEF_PR_REG_ACTIVE 1
unsigned long deve_flags;
struct list_head alua_port_list;
@@ -663,26 +678,26 @@ struct se_dev_entry {
};
struct se_dev_attrib {
- int emulate_model_alias;
- int emulate_dpo;
- int emulate_fua_write;
- int emulate_fua_read;
- int emulate_write_cache;
- int emulate_ua_intlck_ctrl;
- int emulate_tas;
- int emulate_tpu;
- int emulate_tpws;
- int emulate_caw;
- int emulate_3pc;
- int emulate_pr;
+ bool emulate_model_alias;
+ bool emulate_dpo; /* deprecated */
+ bool emulate_fua_write;
+ bool emulate_fua_read; /* deprecated */
+ bool emulate_write_cache;
+ enum target_ua_intlck_ctrl emulate_ua_intlck_ctrl;
+ bool emulate_tas;
+ bool emulate_tpu;
+ bool emulate_tpws;
+ bool emulate_caw;
+ bool emulate_3pc;
+ bool emulate_pr;
enum target_prot_type pi_prot_type;
enum target_prot_type hw_pi_prot_type;
- int pi_prot_verify;
- int enforce_pr_isids;
- int force_pr_aptpl;
- int is_nonrot;
- int emulate_rest_reord;
- int unmap_zeroes_data;
+ bool pi_prot_verify;
+ bool enforce_pr_isids;
+ bool force_pr_aptpl;
+ bool is_nonrot;
+ bool emulate_rest_reord;
+ bool unmap_zeroes_data;
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
@@ -734,7 +749,7 @@ struct se_lun {
/* ALUA target port group linkage */
struct list_head lun_tg_pt_gp_link;
- struct t10_alua_tg_pt_gp *lun_tg_pt_gp;
+ struct t10_alua_tg_pt_gp __rcu *lun_tg_pt_gp;
spinlock_t lun_tg_pt_gp_lock;
struct se_portal_group *lun_tpg;
@@ -755,6 +770,21 @@ struct se_dev_stat_grps {
struct config_group scsi_lu_group;
};
+struct se_cmd_queue {
+ struct llist_head cmd_list;
+ struct work_struct work;
+};
+
+struct se_dev_plug {
+ struct se_device *se_dev;
+};
+
+struct se_device_queue {
+ struct list_head state_list;
+ spinlock_t lock;
+ struct se_cmd_queue sq;
+};
+
struct se_device {
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
@@ -767,6 +797,7 @@ struct se_device {
#define DF_USING_UDEV_PATH 0x00000008
#define DF_USING_ALIAS 0x00000010
#define DF_READ_ONLY 0x00000020
+ u8 transport_flags;
/* Physical device queue depth */
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
@@ -781,12 +812,12 @@ struct se_device {
atomic_long_t read_bytes;
atomic_long_t write_bytes;
/* Active commands on this virtual SE device */
- atomic_t simple_cmds;
- atomic_t dev_ordered_sync;
+ atomic_t non_ordered;
+ bool ordered_sync_in_progress;
+ atomic_t delayed_cmd_count;
atomic_t dev_qf_count;
u32 export_count;
spinlock_t delayed_cmd_lock;
- spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock;
unsigned int dev_reservation_flags;
#define DRF_SPC2_RESERVATIONS 0x00000001
@@ -804,8 +835,8 @@ struct se_device {
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
struct work_struct qf_work_queue;
+ struct work_struct delayed_cmd_work;
struct list_head delayed_cmd_list;
- struct list_head state_list;
struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
@@ -832,6 +863,8 @@ struct se_device {
/* For se_lun->lun_se_dev RCU read-side critical access */
u32 hba_index;
struct rcu_head rcu_head;
+ int queue_cnt;
+ struct se_device_queue *queues;
};
struct se_hba {
@@ -869,6 +902,7 @@ struct se_portal_group {
* Negative values can be used by fabric drivers for internal use TPGs.
*/
int proto_id;
+ bool enabled;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t tpg_pr_ref_count;
/* Spinlock for adding/removing ACLed Nodes */
@@ -918,11 +952,20 @@ static inline struct se_portal_group *param_to_tpg(struct config_item *item)
tpg_param_group);
}
+enum {
+ /* Use se_cmd's cpuid for completion */
+ SE_COMPL_AFFINITY_CPUID = -1,
+ /* Complete on current CPU */
+ SE_COMPL_AFFINITY_CURR_CPU = -2,
+};
+
struct se_wwn {
struct target_fabric_configfs *wwn_tf;
void *priv;
struct config_group wwn_group;
struct config_group fabric_stat_group;
+ struct config_group param_group;
+ int cmd_compl_affinity;
};
static inline void atomic_inc_mb(atomic_t *v)
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 063f133e47c2..38f0662476d1 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -89,6 +89,7 @@ struct target_core_fabric_ops {
void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
const char *);
+ int (*fabric_enable_tpg)(struct se_portal_group *se_tpg, bool enable);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);
@@ -148,17 +149,25 @@ void transport_deregister_session_configfs(struct se_session *);
void transport_deregister_session(struct se_session *);
-void transport_init_se_cmd(struct se_cmd *,
+void __target_init_cmd(struct se_cmd *,
const struct target_core_fabric_ops *,
- struct se_session *, u32, int, int, unsigned char *);
-sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64);
-sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
-int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
- unsigned char *, unsigned char *, u64, u32, int, int, int,
- struct scatterlist *, u32, struct scatterlist *, u32,
- struct scatterlist *, u32);
-int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
+ struct se_session *, u32, int, int, unsigned char *, u64);
+int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *sense, u64 unpacked_lun, u32 data_length,
+ int task_attr, int data_dir, int flags);
+int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
+ struct scatterlist *sgl, u32 sgl_count,
+ struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+ struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp);
+void target_submit(struct se_cmd *se_cmd);
+sense_reason_t transport_lookup_cmd_lun(struct se_cmd *);
+sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb,
+ gfp_t gfp);
+sense_reason_t target_cmd_parse_cdb(struct se_cmd *);
+void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
unsigned char *, u64, u32, int, int, int);
+void target_queue_submission(struct se_cmd *se_cmd);
+
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u64 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
@@ -177,7 +186,7 @@ int transport_send_check_condition_and_sense(struct se_cmd *,
int target_send_busy(struct se_cmd *cmd);
int target_get_sess_cmd(struct se_cmd *, bool);
int target_put_sess_cmd(struct se_cmd *);
-void target_sess_cmd_list_set_waiting(struct se_session *);
+void target_stop_session(struct se_session *se_sess);
void target_wait_for_sess_cmds(struct se_session *);
void target_show_cmd(const char *pfx, struct se_cmd *cmd);
@@ -187,7 +196,7 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
-int transport_lookup_tmr_lun(struct se_cmd *, u64);
+int transport_lookup_tmr_lun(struct se_cmd *);
void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index b04c29270973..6a13220d2d27 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -21,6 +21,28 @@
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
+#undef __get_rel_dynamic_array
+#define __get_rel_dynamic_array(field) \
+ ((void *)(&__entry->__rel_loc_##field) + \
+ sizeof(__entry->__rel_loc_##field) + \
+ (__entry->__rel_loc_##field & 0xffff))
+
+#undef __get_rel_dynamic_array_len
+#define __get_rel_dynamic_array_len(field) \
+ ((__entry->__rel_loc_##field >> 16) & 0xffff)
+
+#undef __get_rel_str
+#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
#undef __perf_count
#define __perf_count(c) (c)
@@ -55,8 +77,7 @@
/* tracepoints with more than 12 arguments will hit build error */
#define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+#define __BPF_DECLARE_TRACE(call, proto, args) \
static notrace void \
__bpf_trace_##call(void *__data, proto) \
{ \
@@ -64,6 +85,10 @@ __bpf_trace_##call(void *__data, proto) \
CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \
}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args))
+
/*
* This part is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the
@@ -75,19 +100,22 @@ static inline void bpf_test_probe_##call(void) \
check_trace_callback_type_##call(__bpf_trace_##template); \
} \
typedef void (*btf_trace_##call)(void *__data, proto); \
-static struct bpf_raw_event_map __used \
- __attribute__((section("__bpf_raw_tp_map"))) \
-__bpf_trace_tp_map_##call = { \
- .tp = &__tracepoint_##call, \
- .bpf_func = (void *)(btf_trace_##call)__bpf_trace_##template, \
- .num_args = COUNT_ARGS(args), \
- .writable_size = size, \
+static union { \
+ struct bpf_raw_event_map event; \
+ btf_trace_##call handler; \
+} __bpf_trace_tp_map_##call __used \
+__section("__bpf_raw_tp_map") = { \
+ .event = { \
+ .tp = &__tracepoint_##call, \
+ .bpf_func = __bpf_trace_##template, \
+ .num_args = COUNT_ARGS(args), \
+ .writable_size = size, \
+ }, \
};
#define FIRST(x, ...) x
-#undef DEFINE_EVENT_WRITABLE
-#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
+#define __CHECK_WRITABLE_BUF_SIZE(call, proto, args, size) \
static inline void bpf_test_buffer_##call(void) \
{ \
/* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \
@@ -96,8 +124,12 @@ static inline void bpf_test_buffer_##call(void) \
*/ \
FIRST(proto); \
(void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \
-} \
-__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
+}
+
+#undef DEFINE_EVENT_WRITABLE
+#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
+ __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
+ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
@@ -107,9 +139,22 @@ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+#undef DECLARE_TRACE
+#define DECLARE_TRACE(call, proto, args) \
+ __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
+ __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0)
+
+#undef DECLARE_TRACE_WRITABLE
+#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \
+ __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
+ __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
+ __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size)
+
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef DECLARE_TRACE_WRITABLE
#undef DEFINE_EVENT_WRITABLE
+#undef __CHECK_WRITABLE_BUF_SIZE
#undef __DEFINE_EVENT
#undef FIRST
diff --git a/include/trace/define_custom_trace.h b/include/trace/define_custom_trace.h
new file mode 100644
index 000000000000..5827a4c92c74
--- /dev/null
+++ b/include/trace/define_custom_trace.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace files that want to automate creation of all tracepoints defined
+ * in their file should include this file. The following are macros that the
+ * trace file may define:
+ *
+ * TRACE_SYSTEM defines the system the tracepoint is for
+ *
+ * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
+ * This macro may be defined to tell define_trace.h what file to include.
+ * Note, leave off the ".h".
+ *
+ * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
+ * then this macro can define the path to use. Note, the path is relative to
+ * define_trace.h, not the file including it. Full path names for out of tree
+ * modules must be used.
+ */
+
+#ifdef CREATE_CUSTOM_TRACE_EVENTS
+
+/* Prevent recursion */
+#undef CREATE_CUSTOM_TRACE_EVENTS
+
+#include <linux/stringify.h>
+
+#undef TRACE_CUSTOM_EVENT
+#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print)
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args)
+
+#undef TRACE_INCLUDE
+#undef __TRACE_INCLUDE
+
+#ifndef TRACE_INCLUDE_FILE
+# define TRACE_INCLUDE_FILE TRACE_SYSTEM
+# define UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifndef TRACE_INCLUDE_PATH
+# define __TRACE_INCLUDE(system) <trace/events/system.h>
+# define UNDEF_TRACE_INCLUDE_PATH
+#else
+# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
+#endif
+
+# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
+
+/* Let the trace headers be reread */
+#define TRACE_CUSTOM_MULTI_READ
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#ifdef TRACEPOINTS_ENABLED
+#include <trace/trace_custom_events.h>
+#endif
+
+#undef TRACE_CUSTOM_EVENT
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#undef DEFINE_CUSTOM_EVENT
+#undef TRACE_CUSTOM_MULTI_READ
+
+/* Only undef what we defined in this file */
+#ifdef UNDEF_TRACE_INCLUDE_FILE
+# undef TRACE_INCLUDE_FILE
+# undef UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifdef UNDEF_TRACE_INCLUDE_PATH
+# undef TRACE_INCLUDE_PATH
+# undef UNDEF_TRACE_INCLUDE_PATH
+#endif
+
+/* We may be processing more files */
+#define CREATE_CUSTOM_TRACE_POINTS
+
+#endif /* CREATE_CUSTOM_TRACE_POINTS */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index bd75f97867b9..00723935dcc7 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -25,7 +25,7 @@
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
- DEFINE_TRACE(name)
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_CONDITION
#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
@@ -39,12 +39,12 @@
#undef TRACE_EVENT_FN
#define TRACE_EVENT_FN(name, proto, args, tstruct, \
assign, print, reg, unreg) \
- DEFINE_TRACE_FN(name, reg, unreg)
+ DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_FN_COND
#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
assign, print, reg, unreg) \
- DEFINE_TRACE_FN(name, reg, unreg)
+ DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_NOP
#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print)
@@ -54,15 +54,15 @@
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
- DEFINE_TRACE(name)
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_FN
#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
- DEFINE_TRACE_FN(name, reg, unreg)
+ DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_TRACE(name)
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_CONDITION
#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
@@ -70,7 +70,7 @@
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \
- DEFINE_TRACE(name)
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef TRACE_INCLUDE
#undef __TRACE_INCLUDE
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index 78c5608a1648..4dfa6d7f83ba 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -77,6 +77,13 @@
EM( P9_TWSTAT, "P9_TWSTAT" ) \
EMe(P9_RWSTAT, "P9_RWSTAT" )
+
+#define P9_FID_REFTYPE \
+ EM( P9_FID_REF_CREATE, "create " ) \
+ EM( P9_FID_REF_GET, "get " ) \
+ EM( P9_FID_REF_PUT, "put " ) \
+ EMe(P9_FID_REF_DESTROY, "destroy" )
+
/* Define EM() to export the enums to userspace via TRACE_DEFINE_ENUM() */
#undef EM
#undef EMe
@@ -84,6 +91,21 @@
#define EMe(a, b) TRACE_DEFINE_ENUM(a);
P9_MSG_T
+P9_FID_REFTYPE
+
+/* And also use EM/EMe to define helper enums -- once */
+#ifndef __9P_DECLARE_TRACE_ENUMS_ONLY_ONCE
+#define __9P_DECLARE_TRACE_ENUMS_ONLY_ONCE
+#undef EM
+#undef EMe
+#define EM(a, b) a,
+#define EMe(a, b) a
+
+enum p9_fid_reftype {
+ P9_FID_REFTYPE
+} __mode(byte);
+
+#endif
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
@@ -96,6 +118,8 @@ P9_MSG_T
#define show_9p_op(type) \
__print_symbolic(type, P9_MSG_T)
+#define show_9p_fid_reftype(type) \
+ __print_symbolic(type, P9_FID_REFTYPE)
TRACE_EVENT(9p_client_req,
TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
@@ -168,6 +192,30 @@ TRACE_EVENT(9p_protocol_dump,
__entry->tag, 0, __entry->line, 16, __entry->line + 16)
);
+
+TRACE_EVENT(9p_fid_ref,
+ TP_PROTO(struct p9_fid *fid, __u8 type),
+
+ TP_ARGS(fid, type),
+
+ TP_STRUCT__entry(
+ __field( int, fid )
+ __field( int, refcount )
+ __field( __u8, type )
+ ),
+
+ TP_fast_assign(
+ __entry->fid = fid->fid;
+ __entry->refcount = refcount_read(&fid->count);
+ __entry->type = type;
+ ),
+
+ TP_printk("%s fid %d, refcount %d",
+ show_9p_fid_reftype(__entry->type),
+ __entry->fid, __entry->refcount)
+);
+
+
#endif /* _TRACE_9P_H */
/* This part must be outside protection */
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 564ba1b5cf57..e9d412d19dbb 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -33,20 +33,82 @@ enum afs_server_trace {
afs_server_trace_destroy,
afs_server_trace_free,
afs_server_trace_gc,
+ afs_server_trace_get_by_addr,
afs_server_trace_get_by_uuid,
afs_server_trace_get_caps,
afs_server_trace_get_install,
afs_server_trace_get_new_cbi,
+ afs_server_trace_get_probe,
afs_server_trace_give_up_cb,
+ afs_server_trace_purging,
afs_server_trace_put_call,
afs_server_trace_put_cbi,
afs_server_trace_put_find_rsq,
+ afs_server_trace_put_probe,
afs_server_trace_put_slist,
afs_server_trace_put_slist_isort,
afs_server_trace_put_uuid_rsq,
afs_server_trace_update,
};
+
+enum afs_volume_trace {
+ afs_volume_trace_alloc,
+ afs_volume_trace_free,
+ afs_volume_trace_get_alloc_sbi,
+ afs_volume_trace_get_cell_insert,
+ afs_volume_trace_get_new_op,
+ afs_volume_trace_get_query_alias,
+ afs_volume_trace_put_cell_dup,
+ afs_volume_trace_put_cell_root,
+ afs_volume_trace_put_destroy_sbi,
+ afs_volume_trace_put_free_fc,
+ afs_volume_trace_put_put_op,
+ afs_volume_trace_put_query_alias,
+ afs_volume_trace_put_validate_fc,
+ afs_volume_trace_remove,
+};
+
+enum afs_cell_trace {
+ afs_cell_trace_alloc,
+ afs_cell_trace_free,
+ afs_cell_trace_get_queue_dns,
+ afs_cell_trace_get_queue_manage,
+ afs_cell_trace_get_queue_new,
+ afs_cell_trace_get_vol,
+ afs_cell_trace_insert,
+ afs_cell_trace_manage,
+ afs_cell_trace_put_candidate,
+ afs_cell_trace_put_destroy,
+ afs_cell_trace_put_queue_fail,
+ afs_cell_trace_put_queue_work,
+ afs_cell_trace_put_vol,
+ afs_cell_trace_see_source,
+ afs_cell_trace_see_ws,
+ afs_cell_trace_unuse_alias,
+ afs_cell_trace_unuse_check_alias,
+ afs_cell_trace_unuse_delete,
+ afs_cell_trace_unuse_fc,
+ afs_cell_trace_unuse_lookup,
+ afs_cell_trace_unuse_mntpt,
+ afs_cell_trace_unuse_no_pin,
+ afs_cell_trace_unuse_parse,
+ afs_cell_trace_unuse_pin,
+ afs_cell_trace_unuse_probe,
+ afs_cell_trace_unuse_sbi,
+ afs_cell_trace_unuse_ws,
+ afs_cell_trace_use_alias,
+ afs_cell_trace_use_check_alias,
+ afs_cell_trace_use_fc,
+ afs_cell_trace_use_fc_alias,
+ afs_cell_trace_use_lookup,
+ afs_cell_trace_use_mntpt,
+ afs_cell_trace_use_pin,
+ afs_cell_trace_use_probe,
+ afs_cell_trace_use_sbi,
+ afs_cell_trace_wait,
+};
+
enum afs_fs_operation {
afs_FS_FetchData = 130, /* AFS Fetch file data */
afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
@@ -108,9 +170,38 @@ enum afs_vl_operation {
afs_VL_GetEntryByNameU = 527, /* AFS Get Vol Entry By Name operation ID */
afs_VL_GetAddrsU = 533, /* AFS Get FS server addresses */
afs_YFSVL_GetEndpoints = 64002, /* YFS Get FS & Vol server addresses */
+ afs_YFSVL_GetCellName = 64014, /* YFS Get actual cell name */
afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */
};
+enum afs_cm_operation {
+ afs_CB_CallBack = 204, /* AFS break callback promises */
+ afs_CB_InitCallBackState = 205, /* AFS initialise callback state */
+ afs_CB_Probe = 206, /* AFS probe client */
+ afs_CB_GetLock = 207, /* AFS get contents of CM lock table */
+ afs_CB_GetCE = 208, /* AFS get cache file description */
+ afs_CB_GetXStatsVersion = 209, /* AFS get version of extended statistics */
+ afs_CB_GetXStats = 210, /* AFS get contents of extended statistics data */
+ afs_CB_InitCallBackState3 = 213, /* AFS initialise callback state, version 3 */
+ afs_CB_ProbeUuid = 214, /* AFS check the client hasn't rebooted */
+};
+
+enum yfs_cm_operation {
+ yfs_CB_Probe = 206, /* YFS probe client */
+ yfs_CB_GetLock = 207, /* YFS get contents of CM lock table */
+ yfs_CB_XStatsVersion = 209, /* YFS get version of extended statistics */
+ yfs_CB_GetXStats = 210, /* YFS get contents of extended statistics data */
+ yfs_CB_InitCallBackState3 = 213, /* YFS initialise callback state, version 3 */
+ yfs_CB_ProbeUuid = 214, /* YFS check the client hasn't rebooted */
+ yfs_CB_GetServerPrefs = 215,
+ yfs_CB_GetCellServDV = 216,
+ yfs_CB_GetLocalCell = 217,
+ yfs_CB_GetCacheConfig = 218,
+ yfs_CB_GetCellByNum = 65537,
+ yfs_CB_TellMeAboutYourself = 65538, /* get client capabilities */
+ yfs_CB_CallBack = 64204,
+};
+
enum afs_edit_dir_op {
afs_edit_dir_create,
afs_edit_dir_create_error,
@@ -140,6 +231,7 @@ enum afs_eproto_cause {
afs_eproto_bad_status,
afs_eproto_cb_count,
afs_eproto_cb_fid_count,
+ afs_eproto_cellname_len,
afs_eproto_file_type,
afs_eproto_ibulkst_cb_count,
afs_eproto_ibulkst_count,
@@ -167,6 +259,7 @@ enum afs_file_error {
afs_file_error_dir_bad_magic,
afs_file_error_dir_big,
afs_file_error_dir_missing_page,
+ afs_file_error_dir_name_too_long,
afs_file_error_dir_over_end,
afs_file_error_dir_small,
afs_file_error_dir_unmarked_ext,
@@ -213,11 +306,13 @@ enum afs_flock_operation {
enum afs_cb_break_reason {
afs_cb_break_no_break,
+ afs_cb_break_no_promise,
afs_cb_break_for_callback,
afs_cb_break_for_deleted,
afs_cb_break_for_lapsed,
+ afs_cb_break_for_s_reinit,
afs_cb_break_for_unlink,
- afs_cb_break_for_vsbreak,
+ afs_cb_break_for_v_break,
afs_cb_break_for_volume_callback,
afs_cb_break_for_zap,
};
@@ -233,7 +328,7 @@ enum afs_cb_break_reason {
EM(afs_call_trace_get, "GET ") \
EM(afs_call_trace_put, "PUT ") \
EM(afs_call_trace_wake, "WAKE ") \
- E_(afs_call_trace_work, "WORK ")
+ E_(afs_call_trace_work, "QUEUE")
#define afs_server_traces \
EM(afs_server_trace_alloc, "ALLOC ") \
@@ -241,19 +336,77 @@ enum afs_cb_break_reason {
EM(afs_server_trace_destroy, "DESTROY ") \
EM(afs_server_trace_free, "FREE ") \
EM(afs_server_trace_gc, "GC ") \
+ EM(afs_server_trace_get_by_addr, "GET addr ") \
EM(afs_server_trace_get_by_uuid, "GET uuid ") \
EM(afs_server_trace_get_caps, "GET caps ") \
EM(afs_server_trace_get_install, "GET inst ") \
EM(afs_server_trace_get_new_cbi, "GET cbi ") \
+ EM(afs_server_trace_get_probe, "GET probe") \
EM(afs_server_trace_give_up_cb, "giveup-cb") \
+ EM(afs_server_trace_purging, "PURGE ") \
EM(afs_server_trace_put_call, "PUT call ") \
EM(afs_server_trace_put_cbi, "PUT cbi ") \
EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \
+ EM(afs_server_trace_put_probe, "PUT probe") \
EM(afs_server_trace_put_slist, "PUT slist") \
EM(afs_server_trace_put_slist_isort, "PUT isort") \
EM(afs_server_trace_put_uuid_rsq, "PUT u-req") \
E_(afs_server_trace_update, "UPDATE")
+#define afs_volume_traces \
+ EM(afs_volume_trace_alloc, "ALLOC ") \
+ EM(afs_volume_trace_free, "FREE ") \
+ EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \
+ EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \
+ EM(afs_volume_trace_get_new_op, "GET op-new ") \
+ EM(afs_volume_trace_get_query_alias, "GET cell-alias") \
+ EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \
+ EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \
+ EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \
+ EM(afs_volume_trace_put_free_fc, "PUT fc-free ") \
+ EM(afs_volume_trace_put_put_op, "PUT op-put ") \
+ EM(afs_volume_trace_put_query_alias, "PUT cell-alias") \
+ EM(afs_volume_trace_put_validate_fc, "PUT fc-validat") \
+ E_(afs_volume_trace_remove, "REMOVE ")
+
+#define afs_cell_traces \
+ EM(afs_cell_trace_alloc, "ALLOC ") \
+ EM(afs_cell_trace_free, "FREE ") \
+ EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \
+ EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \
+ EM(afs_cell_trace_get_queue_new, "GET q-new ") \
+ EM(afs_cell_trace_get_vol, "GET vol ") \
+ EM(afs_cell_trace_insert, "INSERT ") \
+ EM(afs_cell_trace_manage, "MANAGE ") \
+ EM(afs_cell_trace_put_candidate, "PUT candid") \
+ EM(afs_cell_trace_put_destroy, "PUT destry") \
+ EM(afs_cell_trace_put_queue_work, "PUT q-work") \
+ EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \
+ EM(afs_cell_trace_put_vol, "PUT vol ") \
+ EM(afs_cell_trace_see_source, "SEE source") \
+ EM(afs_cell_trace_see_ws, "SEE ws ") \
+ EM(afs_cell_trace_unuse_alias, "UNU alias ") \
+ EM(afs_cell_trace_unuse_check_alias, "UNU chk-al") \
+ EM(afs_cell_trace_unuse_delete, "UNU delete") \
+ EM(afs_cell_trace_unuse_fc, "UNU fc ") \
+ EM(afs_cell_trace_unuse_lookup, "UNU lookup") \
+ EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
+ EM(afs_cell_trace_unuse_parse, "UNU parse ") \
+ EM(afs_cell_trace_unuse_pin, "UNU pin ") \
+ EM(afs_cell_trace_unuse_probe, "UNU probe ") \
+ EM(afs_cell_trace_unuse_sbi, "UNU sbi ") \
+ EM(afs_cell_trace_unuse_ws, "UNU ws ") \
+ EM(afs_cell_trace_use_alias, "USE alias ") \
+ EM(afs_cell_trace_use_check_alias, "USE chk-al") \
+ EM(afs_cell_trace_use_fc, "USE fc ") \
+ EM(afs_cell_trace_use_fc_alias, "USE fc-al ") \
+ EM(afs_cell_trace_use_lookup, "USE lookup") \
+ EM(afs_cell_trace_use_mntpt, "USE mntpt ") \
+ EM(afs_cell_trace_use_pin, "USE pin ") \
+ EM(afs_cell_trace_use_probe, "USE probe ") \
+ EM(afs_cell_trace_use_sbi, "USE sbi ") \
+ E_(afs_cell_trace_wait, "WAIT ")
+
#define afs_fs_operations \
EM(afs_FS_FetchData, "FS.FetchData") \
EM(afs_FS_FetchStatus, "FS.FetchStatus") \
@@ -310,8 +463,35 @@ enum afs_cb_break_reason {
EM(afs_VL_GetEntryByNameU, "VL.GetEntryByNameU") \
EM(afs_VL_GetAddrsU, "VL.GetAddrsU") \
EM(afs_YFSVL_GetEndpoints, "YFSVL.GetEndpoints") \
+ EM(afs_YFSVL_GetCellName, "YFSVL.GetCellName") \
E_(afs_VL_GetCapabilities, "VL.GetCapabilities")
+#define afs_cm_operations \
+ EM(afs_CB_CallBack, "CB.CallBack") \
+ EM(afs_CB_InitCallBackState, "CB.InitCallBackState") \
+ EM(afs_CB_Probe, "CB.Probe") \
+ EM(afs_CB_GetLock, "CB.GetLock") \
+ EM(afs_CB_GetCE, "CB.GetCE") \
+ EM(afs_CB_GetXStatsVersion, "CB.GetXStatsVersion") \
+ EM(afs_CB_GetXStats, "CB.GetXStats") \
+ EM(afs_CB_InitCallBackState3, "CB.InitCallBackState3") \
+ E_(afs_CB_ProbeUuid, "CB.ProbeUuid")
+
+#define yfs_cm_operations \
+ EM(yfs_CB_Probe, "YFSCB.Probe") \
+ EM(yfs_CB_GetLock, "YFSCB.GetLock") \
+ EM(yfs_CB_XStatsVersion, "YFSCB.XStatsVersion") \
+ EM(yfs_CB_GetXStats, "YFSCB.GetXStats") \
+ EM(yfs_CB_InitCallBackState3, "YFSCB.InitCallBackState3") \
+ EM(yfs_CB_ProbeUuid, "YFSCB.ProbeUuid") \
+ EM(yfs_CB_GetServerPrefs, "YFSCB.GetServerPrefs") \
+ EM(yfs_CB_GetCellServDV, "YFSCB.GetCellServDV") \
+ EM(yfs_CB_GetLocalCell, "YFSCB.GetLocalCell") \
+ EM(yfs_CB_GetCacheConfig, "YFSCB.GetCacheConfig") \
+ EM(yfs_CB_GetCellByNum, "YFSCB.GetCellByNum") \
+ EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \
+ E_(yfs_CB_CallBack, "YFSCB.CallBack")
+
#define afs_edit_dir_ops \
EM(afs_edit_dir_create, "create") \
EM(afs_edit_dir_create_error, "c_fail") \
@@ -339,6 +519,7 @@ enum afs_cb_break_reason {
EM(afs_eproto_bad_status, "BadStatus") \
EM(afs_eproto_cb_count, "CbCount") \
EM(afs_eproto_cb_fid_count, "CbFidCount") \
+ EM(afs_eproto_cellname_len, "CellNameLen") \
EM(afs_eproto_file_type, "FileTYpe") \
EM(afs_eproto_ibulkst_cb_count, "IBS.CbCount") \
EM(afs_eproto_ibulkst_count, "IBS.FidCount") \
@@ -364,6 +545,7 @@ enum afs_cb_break_reason {
EM(afs_file_error_dir_bad_magic, "DIR_BAD_MAGIC") \
EM(afs_file_error_dir_big, "DIR_BIG") \
EM(afs_file_error_dir_missing_page, "DIR_MISSING_PAGE") \
+ EM(afs_file_error_dir_name_too_long, "DIR_NAME_TOO_LONG") \
EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \
EM(afs_file_error_dir_small, "DIR_SMALL") \
EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \
@@ -422,11 +604,13 @@ enum afs_cb_break_reason {
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
+ EM(afs_cb_break_no_promise, "no-promise") \
EM(afs_cb_break_for_callback, "break-cb") \
EM(afs_cb_break_for_deleted, "break-del") \
EM(afs_cb_break_for_lapsed, "break-lapsed") \
+ EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_vsbreak, "break-vs") \
+ EM(afs_cb_break_for_v_break, "break-v") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
E_(afs_cb_break_for_zap, "break-zap")
@@ -440,8 +624,11 @@ enum afs_cb_break_reason {
afs_call_traces;
afs_server_traces;
+afs_cell_traces;
afs_fs_operations;
afs_vl_operations;
+afs_cm_operations;
+yfs_cm_operations;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
@@ -522,48 +709,49 @@ TRACE_EVENT(afs_cb_call,
TP_STRUCT__entry(
__field(unsigned int, call )
- __field(const char *, name )
__field(u32, op )
+ __field(u16, service_id )
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->name = call->type->name;
__entry->op = call->operation_ID;
+ __entry->service_id = call->service_id;
),
- TP_printk("c=%08x %s o=%u",
+ TP_printk("c=%08x %s",
__entry->call,
- __entry->name,
- __entry->op)
+ __entry->service_id == 2501 ?
+ __print_symbolic(__entry->op, yfs_cm_operations) :
+ __print_symbolic(__entry->op, afs_cm_operations))
);
TRACE_EVENT(afs_call,
- TP_PROTO(struct afs_call *call, enum afs_call_trace op,
- int usage, int outstanding, const void *where),
+ TP_PROTO(unsigned int call_debug_id, enum afs_call_trace op,
+ int ref, int outstanding, const void *where),
- TP_ARGS(call, op, usage, outstanding, where),
+ TP_ARGS(call_debug_id, op, ref, outstanding, where),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(int, op )
- __field(int, usage )
+ __field(int, ref )
__field(int, outstanding )
__field(const void *, where )
),
TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call_debug_id;
__entry->op = op;
- __entry->usage = usage;
+ __entry->ref = ref;
__entry->outstanding = outstanding;
__entry->where = where;
),
- TP_printk("c=%08x %s u=%d o=%d sp=%pSR",
+ TP_printk("c=%08x %s r=%d o=%d sp=%pSR",
__entry->call,
__print_symbolic(__entry->op, afs_call_traces),
- __entry->usage,
+ __entry->ref,
__entry->outstanding,
__entry->where)
);
@@ -636,7 +824,7 @@ TRACE_EVENT(afs_make_fs_calli,
TRACE_EVENT(afs_make_fs_call1,
TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
- const char *name),
+ const struct qstr *name),
TP_ARGS(call, fid, name),
@@ -648,8 +836,7 @@ TRACE_EVENT(afs_make_fs_call1,
),
TP_fast_assign(
- int __len = strlen(name);
- __len = min(__len, 23);
+ unsigned int __len = min_t(unsigned int, name->len, 23);
__entry->call = call->debug_id;
__entry->op = call->operation_ID;
if (fid) {
@@ -659,7 +846,7 @@ TRACE_EVENT(afs_make_fs_call1,
__entry->fid.vnode = 0;
__entry->fid.unique = 0;
}
- memcpy(__entry->name, name, __len);
+ memcpy(__entry->name, name->name, __len);
__entry->name[__len] = 0;
),
@@ -674,7 +861,7 @@ TRACE_EVENT(afs_make_fs_call1,
TRACE_EVENT(afs_make_fs_call2,
TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
- const char *name, const char *name2),
+ const struct qstr *name, const struct qstr *name2),
TP_ARGS(call, fid, name, name2),
@@ -687,10 +874,8 @@ TRACE_EVENT(afs_make_fs_call2,
),
TP_fast_assign(
- int __len = strlen(name);
- int __len2 = strlen(name2);
- __len = min(__len, 23);
- __len2 = min(__len2, 23);
+ unsigned int __len = min_t(unsigned int, name->len, 23);
+ unsigned int __len2 = min_t(unsigned int, name2->len, 23);
__entry->call = call->debug_id;
__entry->op = call->operation_ID;
if (fid) {
@@ -700,9 +885,9 @@ TRACE_EVENT(afs_make_fs_call2,
__entry->fid.vnode = 0;
__entry->fid.unique = 0;
}
- memcpy(__entry->name, name, __len);
+ memcpy(__entry->name, name->name, __len);
__entry->name[__len] = 0;
- memcpy(__entry->name2, name2, __len2);
+ memcpy(__entry->name2, name2->name, __len2);
__entry->name2[__len2] = 0;
),
@@ -762,65 +947,52 @@ TRACE_EVENT(afs_call_done,
__entry->rx_call)
);
-TRACE_EVENT(afs_send_pages,
- TP_PROTO(struct afs_call *call, struct msghdr *msg,
- pgoff_t first, pgoff_t last, unsigned int offset),
+TRACE_EVENT(afs_send_data,
+ TP_PROTO(struct afs_call *call, struct msghdr *msg),
- TP_ARGS(call, msg, first, last, offset),
+ TP_ARGS(call, msg),
TP_STRUCT__entry(
__field(unsigned int, call )
- __field(pgoff_t, first )
- __field(pgoff_t, last )
- __field(unsigned int, nr )
- __field(unsigned int, bytes )
- __field(unsigned int, offset )
__field(unsigned int, flags )
+ __field(loff_t, offset )
+ __field(loff_t, count )
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->first = first;
- __entry->last = last;
- __entry->nr = msg->msg_iter.nr_segs;
- __entry->bytes = msg->msg_iter.count;
- __entry->offset = offset;
__entry->flags = msg->msg_flags;
+ __entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset;
+ __entry->count = iov_iter_count(&msg->msg_iter);
),
- TP_printk(" c=%08x %lx-%lx-%lx b=%x o=%x f=%x",
- __entry->call,
- __entry->first, __entry->first + __entry->nr - 1, __entry->last,
- __entry->bytes, __entry->offset,
+ TP_printk(" c=%08x o=%llx n=%llx f=%x",
+ __entry->call, __entry->offset, __entry->count,
__entry->flags)
);
-TRACE_EVENT(afs_sent_pages,
- TP_PROTO(struct afs_call *call, pgoff_t first, pgoff_t last,
- pgoff_t cursor, int ret),
+TRACE_EVENT(afs_sent_data,
+ TP_PROTO(struct afs_call *call, struct msghdr *msg, int ret),
- TP_ARGS(call, first, last, cursor, ret),
+ TP_ARGS(call, msg, ret),
TP_STRUCT__entry(
__field(unsigned int, call )
- __field(pgoff_t, first )
- __field(pgoff_t, last )
- __field(pgoff_t, cursor )
__field(int, ret )
+ __field(loff_t, offset )
+ __field(loff_t, count )
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->first = first;
- __entry->last = last;
- __entry->cursor = cursor;
__entry->ret = ret;
+ __entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset;
+ __entry->count = iov_iter_count(&msg->msg_iter);
),
- TP_printk(" c=%08x %lx-%lx c=%lx r=%d",
- __entry->call,
- __entry->first, __entry->last,
- __entry->cursor, __entry->ret)
+ TP_printk(" c=%08x o=%llx n=%llx r=%x",
+ __entry->call, __entry->offset, __entry->count,
+ __entry->ret)
);
TRACE_EVENT(afs_dir_check_failed,
@@ -844,43 +1016,35 @@ TRACE_EVENT(afs_dir_check_failed,
__entry->vnode, __entry->off, __entry->i_size)
);
-/*
- * We use page->private to hold the amount of the page that we've written to,
- * splitting the field into two parts. However, we need to represent a range
- * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
- */
-#if PAGE_SIZE > 32768
-#define AFS_PRIV_MAX 0xffffffff
-#define AFS_PRIV_SHIFT 32
-#else
-#define AFS_PRIV_MAX 0xffff
-#define AFS_PRIV_SHIFT 16
-#endif
-
-TRACE_EVENT(afs_page_dirty,
- TP_PROTO(struct afs_vnode *vnode, const char *where,
- pgoff_t page, unsigned long priv),
+TRACE_EVENT(afs_folio_dirty,
+ TP_PROTO(struct afs_vnode *vnode, const char *where, struct folio *folio),
- TP_ARGS(vnode, where, page, priv),
+ TP_ARGS(vnode, where, folio),
TP_STRUCT__entry(
__field(struct afs_vnode *, vnode )
__field(const char *, where )
- __field(pgoff_t, page )
- __field(unsigned long, priv )
+ __field(pgoff_t, index )
+ __field(unsigned long, from )
+ __field(unsigned long, to )
),
TP_fast_assign(
+ unsigned long priv = (unsigned long)folio_get_private(folio);
__entry->vnode = vnode;
__entry->where = where;
- __entry->page = page;
- __entry->priv = priv;
+ __entry->index = folio_index(folio);
+ __entry->from = afs_folio_dirty_from(folio, priv);
+ __entry->to = afs_folio_dirty_to(folio, priv);
+ __entry->to |= (afs_is_folio_dirty_mmapped(priv) ?
+ (1UL << (BITS_PER_LONG - 1)) : 0);
),
- TP_printk("vn=%p %lx %s %lu-%lu",
- __entry->vnode, __entry->page, __entry->where,
- __entry->priv & AFS_PRIV_MAX,
- __entry->priv >> AFS_PRIV_SHIFT)
+ TP_printk("vn=%p %lx %s %lx-%lx%s",
+ __entry->vnode, __entry->index, __entry->where,
+ __entry->from,
+ __entry->to & ~(1UL << (BITS_PER_LONG - 1)),
+ __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
);
TRACE_EVENT(afs_call_state,
@@ -988,24 +1152,22 @@ TRACE_EVENT(afs_edit_dir,
);
TRACE_EVENT(afs_protocol_error,
- TP_PROTO(struct afs_call *call, int error, enum afs_eproto_cause cause),
+ TP_PROTO(struct afs_call *call, enum afs_eproto_cause cause),
- TP_ARGS(call, error, cause),
+ TP_ARGS(call, cause),
TP_STRUCT__entry(
__field(unsigned int, call )
- __field(int, error )
__field(enum afs_eproto_cause, cause )
),
TP_fast_assign(
__entry->call = call ? call->debug_id : 0;
- __entry->error = error;
__entry->cause = cause;
),
- TP_printk("c=%08x r=%d %s",
- __entry->call, __entry->error,
+ TP_printk("c=%08x %s",
+ __entry->call,
__print_symbolic(__entry->cause, afs_eproto_causes))
);
@@ -1271,26 +1433,80 @@ TRACE_EVENT(afs_cb_miss,
);
TRACE_EVENT(afs_server,
- TP_PROTO(struct afs_server *server, int usage, enum afs_server_trace reason),
+ TP_PROTO(unsigned int server_debug_id, int ref, int active,
+ enum afs_server_trace reason),
- TP_ARGS(server, usage, reason),
+ TP_ARGS(server_debug_id, ref, active, reason),
TP_STRUCT__entry(
__field(unsigned int, server )
- __field(int, usage )
+ __field(int, ref )
+ __field(int, active )
__field(int, reason )
),
TP_fast_assign(
- __entry->server = server->debug_id;
- __entry->usage = usage;
+ __entry->server = server_debug_id;
+ __entry->ref = ref;
+ __entry->active = active;
__entry->reason = reason;
),
- TP_printk("s=%08x %s u=%d",
+ TP_printk("s=%08x %s u=%d a=%d",
__entry->server,
__print_symbolic(__entry->reason, afs_server_traces),
- __entry->usage)
+ __entry->ref,
+ __entry->active)
+ );
+
+TRACE_EVENT(afs_volume,
+ TP_PROTO(afs_volid_t vid, int ref, enum afs_volume_trace reason),
+
+ TP_ARGS(vid, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(afs_volid_t, vid )
+ __field(int, ref )
+ __field(enum afs_volume_trace, reason )
+ ),
+
+ TP_fast_assign(
+ __entry->vid = vid;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("V=%llx %s ur=%d",
+ __entry->vid,
+ __print_symbolic(__entry->reason, afs_volume_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_cell,
+ TP_PROTO(unsigned int cell_debug_id, int ref, int active,
+ enum afs_cell_trace reason),
+
+ TP_ARGS(cell_debug_id, ref, active, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cell )
+ __field(int, ref )
+ __field(int, active )
+ __field(int, reason )
+ ),
+
+ TP_fast_assign(
+ __entry->cell = cell_debug_id;
+ __entry->ref = ref;
+ __entry->active = active;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("L=%08x %s r=%d a=%d",
+ __entry->cell,
+ __print_symbolic(__entry->reason, afs_cell_traces),
+ __entry->ref,
+ __entry->active)
);
#endif /* _TRACE_AFS_H */
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 40c300fe704d..4d8ef71090af 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -7,6 +7,7 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
+#include <sound/jack.h>
#define DAPM_DIRECT "(direct)"
#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
diff --git a/include/trace/events/avc.h b/include/trace/events/avc.h
new file mode 100644
index 000000000000..b55fda2e0773
--- /dev/null
+++ b/include/trace/events/avc.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Authors: Thiébaud Weksteen <tweek@google.com>
+ * Peter Enderborg <Peter.Enderborg@sony.com>
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM avc
+
+#if !defined(_TRACE_SELINUX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SELINUX_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(selinux_audited,
+
+ TP_PROTO(struct selinux_audit_data *sad,
+ char *scontext,
+ char *tcontext,
+ const char *tclass
+ ),
+
+ TP_ARGS(sad, scontext, tcontext, tclass),
+
+ TP_STRUCT__entry(
+ __field(u32, requested)
+ __field(u32, denied)
+ __field(u32, audited)
+ __field(int, result)
+ __string(scontext, scontext)
+ __string(tcontext, tcontext)
+ __string(tclass, tclass)
+ ),
+
+ TP_fast_assign(
+ __entry->requested = sad->requested;
+ __entry->denied = sad->denied;
+ __entry->audited = sad->audited;
+ __entry->result = sad->result;
+ __assign_str(tcontext, tcontext);
+ __assign_str(scontext, scontext);
+ __assign_str(tclass, tclass);
+ ),
+
+ TP_printk("requested=0x%x denied=0x%x audited=0x%x result=%d scontext=%s tcontext=%s tclass=%s",
+ __entry->requested, __entry->denied, __entry->audited, __entry->result,
+ __get_str(scontext), __get_str(tcontext), __get_str(tclass)
+ )
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 0bddea663b3b..899fdacf57b9 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(bcache_request,
__entry->sector = bio->bi_iter.bi_sector;
__entry->orig_sector = bio->bi_iter.bi_sector - 16;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u",
@@ -137,7 +137,7 @@ TRACE_EVENT(bcache_read,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
__entry->cache_hit = hit;
__entry->bypass = bypass;
),
@@ -164,11 +164,11 @@ TRACE_EVENT(bcache_write,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->sb.set_uuid, 16);
+ memcpy(__entry->uuid, c->set_uuid, 16);
__entry->inode = inode;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
__entry->writeback = writeback;
__entry->bypass = bypass;
),
@@ -200,7 +200,7 @@ DECLARE_EVENT_CLASS(cache_set,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->sb.set_uuid, 16);
+ memcpy(__entry->uuid, c->set_uuid, 16);
),
TP_printk("%pU", __entry->uuid)
@@ -238,7 +238,7 @@ TRACE_EVENT(bcache_journal_write,
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
__entry->nr_keys = keys;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u keys %u",
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 81b43f5bdf23..7f4dfbdf12a6 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -64,7 +64,6 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
/**
* block_rq_requeue - place block IO request back on a queue
- * @q: queue holding operation
* @rq: block IO operation request
*
* The block operation request @rq is being placed back into queue
@@ -73,9 +72,9 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
*/
TRACE_EVENT(block_rq_requeue,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -86,11 +85,11 @@ TRACE_EVENT(block_rq_requeue,
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
@@ -101,21 +100,9 @@ TRACE_EVENT(block_rq_requeue,
__entry->nr_sector, 0)
);
-/**
- * block_rq_complete - block IO operation completed by device driver
- * @rq: block operations request
- * @error: status code
- * @nr_bytes: number of completed bytes
- *
- * The block_rq_complete tracepoint event indicates that some portion
- * of operation request has been completed by the device driver. If
- * the @rq->bio is %NULL, then there is absolutely no additional work to
- * do for the request. If @rq->bio is non-NULL then there is
- * additional work required to complete the request.
- */
-TRACE_EVENT(block_rq_complete,
+DECLARE_EVENT_CLASS(block_rq_completion,
- TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
TP_ARGS(rq, error, nr_bytes),
@@ -123,18 +110,18 @@ TRACE_EVENT(block_rq_complete,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __field( int, error )
+ __field( int , error )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
- __entry->error = error;
+ __entry->error = blk_status_to_errno(error);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
@@ -145,11 +132,46 @@ TRACE_EVENT(block_rq_complete,
__entry->nr_sector, __entry->error)
);
+/**
+ * block_rq_complete - block IO operation completed by device driver
+ * @rq: block operations request
+ * @error: status code
+ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_complete tracepoint event indicates that some portion
+ * of operation request has been completed by the device driver. If
+ * the @rq->bio is %NULL, then there is absolutely no additional work to
+ * do for the request. If @rq->bio is non-NULL then there is
+ * additional work required to complete the request.
+ */
+DEFINE_EVENT(block_rq_completion, block_rq_complete,
+
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
+
+ TP_ARGS(rq, error, nr_bytes)
+);
+
+/**
+ * block_rq_error - block IO operation error reported by device driver
+ * @rq: block operations request
+ * @error: status code
+ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_error tracepoint event indicates that some portion
+ * of operation request has failed as reported by the device driver.
+ */
+DEFINE_EVENT(block_rq_completion, block_rq_error,
+
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
+
+ TP_ARGS(rq, error, nr_bytes)
+);
+
DECLARE_EVENT_CLASS(block_rq,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -162,12 +184,12 @@ DECLARE_EVENT_CLASS(block_rq,
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
__entry->bytes = blk_rq_bytes(rq);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -181,7 +203,6 @@ DECLARE_EVENT_CLASS(block_rq,
/**
* block_rq_insert - insert block operation request into queue
- * @q: target queue
* @rq: block IO operation request
*
* Called immediately before block operation request @rq is inserted
@@ -191,79 +212,52 @@ DECLARE_EVENT_CLASS(block_rq,
*/
DEFINE_EVENT(block_rq, block_rq_insert,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(rq)
);
/**
* block_rq_issue - issue pending block IO request operation to device driver
- * @q: queue holding operation
- * @rq: block IO operation operation request
+ * @rq: block IO operation request
*
* Called when block operation request @rq from queue @q is sent to a
* device driver for processing.
*/
DEFINE_EVENT(block_rq, block_rq_issue,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(rq)
);
/**
- * block_bio_bounce - used bounce buffer when processing block operation
- * @q: queue holding the block operation
- * @bio: block operation
+ * block_rq_merge - merge request with another one in the elevator
+ * @rq: block IO operation request
*
- * A bounce buffer was used to handle the block operation @bio in @q.
- * This occurs when hardware limitations prevent a direct transfer of
- * data between the @bio data memory area and the IO device. Use of a
- * bounce buffer requires extra copying of data and decreases
- * performance.
+ * Called when block operation request @rq from queue @q is merged to another
+ * request queued in the elevator.
*/
-TRACE_EVENT(block_bio_bounce,
-
- TP_PROTO(struct request_queue *q, struct bio *bio),
-
- TP_ARGS(q, bio),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
+DEFINE_EVENT(block_rq, block_rq_merge,
- TP_fast_assign(
- __entry->dev = bio_dev(bio);
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
+ TP_PROTO(struct request *rq),
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+ TP_ARGS(rq)
);
/**
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
* @bio: block operation completed
- * @error: io error value
*
* This tracepoint indicates there is no further work to do on this
* block IO operation @bio.
*/
TRACE_EVENT(block_bio_complete,
- TP_PROTO(struct request_queue *q, struct bio *bio, int error),
+ TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio, error),
+ TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -277,8 +271,8 @@ TRACE_EVENT(block_bio_complete,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- __entry->error = error;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ __entry->error = blk_status_to_errno(bio->bi_status);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u [%d]",
@@ -287,11 +281,11 @@ TRACE_EVENT(block_bio_complete,
__entry->nr_sector, __entry->error)
);
-DECLARE_EVENT_CLASS(block_bio_merge,
+DECLARE_EVENT_CLASS(block_bio,
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
+ TP_PROTO(struct bio *bio),
- TP_ARGS(q, rq, bio),
+ TP_ARGS(bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -305,7 +299,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -316,133 +310,62 @@ DECLARE_EVENT_CLASS(block_bio_merge,
);
/**
+ * block_bio_bounce - used bounce buffer when processing block operation
+ * @bio: block operation
+ *
+ * A bounce buffer was used to handle the block operation @bio in @q.
+ * This occurs when hardware limitations prevent a direct transfer of
+ * data between the @bio data memory area and the IO device. Use of a
+ * bounce buffer requires extra copying of data and decreases
+ * performance.
+ */
+DEFINE_EVENT(block_bio, block_bio_bounce,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+/**
* block_bio_backmerge - merging block operation to the end of an existing operation
- * @q: queue holding operation
- * @rq: request bio is being merged into
* @bio: new block operation to merge
*
- * Merging block request @bio to the end of an existing block request
- * in queue @q.
+ * Merging block request @bio to the end of an existing block request.
*/
-DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
-
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
-
- TP_ARGS(q, rq, bio)
+DEFINE_EVENT(block_bio, block_bio_backmerge,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_bio_frontmerge - merging block operation to the beginning of an existing operation
- * @q: queue holding operation
- * @rq: request bio is being merged into
* @bio: new block operation to merge
*
- * Merging block IO operation @bio to the beginning of an existing block
- * operation in queue @q.
+ * Merging block IO operation @bio to the beginning of an existing block request.
*/
-DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
-
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
-
- TP_ARGS(q, rq, bio)
+DEFINE_EVENT(block_bio, block_bio_frontmerge,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_bio_queue - putting new block IO operation in queue
- * @q: queue holding operation
* @bio: new block operation
*
* About to place the block IO operation @bio into queue @q.
*/
-TRACE_EVENT(block_bio_queue,
-
- TP_PROTO(struct request_queue *q, struct bio *bio),
-
- TP_ARGS(q, bio),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
-
- TP_fast_assign(
- __entry->dev = bio_dev(bio);
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
-
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
-);
-
-DECLARE_EVENT_CLASS(block_get_rq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
-
- TP_fast_assign(
- __entry->dev = bio ? bio_dev(bio) : 0;
- __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
- __entry->nr_sector = bio ? bio_sectors(bio) : 0;
- blk_fill_rwbs(__entry->rwbs,
- bio ? bio->bi_opf : 0, __entry->nr_sector);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
-
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+DEFINE_EVENT(block_bio, block_bio_queue,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_getrq - get a free request entry in queue for block IO operations
- * @q: queue for operations
* @bio: pending block IO operation (can be %NULL)
- * @rw: low bit indicates a read (%0) or a write (%1)
*
- * A request struct for queue @q has been allocated to handle the
- * block IO operation @bio.
+ * A request struct has been allocated to handle the block IO operation @bio.
*/
-DEFINE_EVENT(block_get_rq, block_getrq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw)
-);
-
-/**
- * block_sleeprq - waiting to get a free request entry in queue for block IO operation
- * @q: queue for operation
- * @bio: pending block IO operation (can be %NULL)
- * @rw: low bit indicates a read (%0) or a write (%1)
- *
- * In the case where a request struct cannot be provided for queue @q
- * the process needs to wait for an request struct to become
- * available. This tracepoint event is generated each time the
- * process goes to sleep waiting for request struct become available.
- */
-DEFINE_EVENT(block_get_rq, block_sleeprq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw)
+DEFINE_EVENT(block_bio, block_getrq,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
@@ -507,21 +430,19 @@ DEFINE_EVENT(block_unplug, block_unplug,
/**
* block_split - split a single bio struct into two bio structs
- * @q: queue containing the bio
* @bio: block operation being split
* @new_sector: The starting sector for the new bio
*
- * The bio request @bio in request queue @q needs to be split into two
- * bio requests. The newly created @bio request starts at
- * @new_sector. This split may be required due to hardware limitation
- * such as operation crossing device boundaries in a RAID system.
+ * The bio request @bio needs to be split into two bio requests. The newly
+ * created @bio request starts at @new_sector. This split may be required due to
+ * hardware limitations such as operation crossing device boundaries in a RAID
+ * system.
*/
TRACE_EVENT(block_split,
- TP_PROTO(struct request_queue *q, struct bio *bio,
- unsigned int new_sector),
+ TP_PROTO(struct bio *bio, unsigned int new_sector),
- TP_ARGS(q, bio, new_sector),
+ TP_ARGS(bio, new_sector),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -535,7 +456,7 @@ TRACE_EVENT(block_split,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->new_sector = new_sector;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -548,9 +469,8 @@ TRACE_EVENT(block_split,
/**
* block_bio_remap - map request for a logical device to the raw device
- * @q: queue holding the operation
* @bio: revised operation
- * @dev: device for the operation
+ * @dev: original device for the operation
* @from: original sector for the operation
*
* An operation for a logical device has been mapped to the
@@ -558,10 +478,9 @@ TRACE_EVENT(block_split,
*/
TRACE_EVENT(block_bio_remap,
- TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
- sector_t from),
+ TP_PROTO(struct bio *bio, dev_t dev, sector_t from),
- TP_ARGS(q, bio, dev, from),
+ TP_ARGS(bio, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -578,7 +497,7 @@ TRACE_EVENT(block_bio_remap,
__entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev;
__entry->old_sector = from;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
@@ -591,7 +510,6 @@ TRACE_EVENT(block_bio_remap,
/**
* block_rq_remap - map request for a block operation request
- * @q: queue holding the operation
* @rq: block IO operation request
* @dev: device for the operation
* @from: original sector for the operation
@@ -602,10 +520,9 @@ TRACE_EVENT(block_bio_remap,
*/
TRACE_EVENT(block_rq_remap,
- TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
- sector_t from),
+ TP_PROTO(struct request *rq, dev_t dev, sector_t from),
- TP_ARGS(q, rq, dev, from),
+ TP_ARGS(rq, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -618,13 +535,13 @@ TRACE_EVENT(block_rq_remap,
),
TP_fast_assign(
- __entry->dev = disk_devt(rq->rq_disk);
+ __entry->dev = disk_devt(rq->q->disk);
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = blk_rq_sectors(rq);
__entry->old_dev = dev;
__entry->old_sector = from;
__entry->nr_bios = blk_rq_count_bios(rq);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 17088a112ed0..ed50e81174bf 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -24,19 +24,14 @@ struct btrfs_free_cluster;
struct map_lookup;
struct extent_buffer;
struct btrfs_work;
-struct __btrfs_workqueue;
+struct btrfs_workqueue;
struct btrfs_qgroup_extent_record;
struct btrfs_qgroup;
struct extent_io_tree;
struct prelim_ref;
struct btrfs_space_info;
-
-TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS_NR);
-TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS);
-TRACE_DEFINE_ENUM(FLUSH_DELALLOC);
-TRACE_DEFINE_ENUM(FLUSH_DELALLOC_WAIT);
-TRACE_DEFINE_ENUM(ALLOC_CHUNK);
-TRACE_DEFINE_ENUM(COMMIT_TRANS);
+struct btrfs_raid_bio;
+struct raid56_bio_trace_info;
#define show_ref_type(type) \
__print_symbolic(type, \
@@ -60,6 +55,7 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
{ BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
{ BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" }, \
{ BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" }, \
+ { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },\
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
#define show_root_type(obj) \
@@ -67,28 +63,73 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
(obj >= BTRFS_ROOT_TREE_OBJECTID && \
obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
-#define show_fi_type(type) \
- __print_symbolic(type, \
- { BTRFS_FILE_EXTENT_INLINE, "INLINE" }, \
- { BTRFS_FILE_EXTENT_REG, "REG" }, \
- { BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC"})
+#define FLUSH_ACTIONS \
+ EM( BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH") \
+ EM( BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT") \
+ EM( BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL") \
+ EMe(BTRFS_RESERVE_FLUSH_ALL_STEAL, "BTRFS_RESERVE_FLUSH_ALL_STEAL")
+
+#define FI_TYPES \
+ EM( BTRFS_FILE_EXTENT_INLINE, "INLINE") \
+ EM( BTRFS_FILE_EXTENT_REG, "REG") \
+ EMe(BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC")
+
+#define QGROUP_RSV_TYPES \
+ EM( BTRFS_QGROUP_RSV_DATA, "DATA") \
+ EM( BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS") \
+ EMe(BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC")
+
+#define IO_TREE_OWNER \
+ EM( IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS") \
+ EM( IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS") \
+ EM( IO_TREE_BTREE_INODE_IO, "BTREE_INODE_IO") \
+ EM( IO_TREE_INODE_IO, "INODE_IO") \
+ EM( IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS") \
+ EM( IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES") \
+ EM( IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES") \
+ EM( IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT") \
+ EM( IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE") \
+ EMe(IO_TREE_SELFTEST, "SELFTEST")
+
+#define FLUSH_STATES \
+ EM( FLUSH_DELAYED_ITEMS_NR, "FLUSH_DELAYED_ITEMS_NR") \
+ EM( FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS") \
+ EM( FLUSH_DELALLOC, "FLUSH_DELALLOC") \
+ EM( FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT") \
+ EM( FLUSH_DELALLOC_FULL, "FLUSH_DELALLOC_FULL") \
+ EM( FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR") \
+ EM( FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS") \
+ EM( ALLOC_CHUNK, "ALLOC_CHUNK") \
+ EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \
+ EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \
+ EMe(COMMIT_TRANS, "COMMIT_TRANS")
+
+/*
+ * First define the enums in the above macros to be exported to userspace via
+ * TRACE_DEFINE_ENUM().
+ */
+
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+FLUSH_ACTIONS
+FI_TYPES
+QGROUP_RSV_TYPES
+IO_TREE_OWNER
+FLUSH_STATES
+
+/*
+ * Now redefine the EM and EMe macros to map the enums to the strings that will
+ * be printed in the output
+ */
+
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
-#define show_qgroup_rsv_type(type) \
- __print_symbolic(type, \
- { BTRFS_QGROUP_RSV_DATA, "DATA" }, \
- { BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS" }, \
- { BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC" })
-
-#define show_extent_io_tree_owner(owner) \
- __print_symbolic(owner, \
- { IO_TREE_FS_INFO_FREED_EXTENTS0, "FREED_EXTENTS0" }, \
- { IO_TREE_FS_INFO_FREED_EXTENTS1, "FREED_EXTENTS1" }, \
- { IO_TREE_INODE_IO, "INODE_IO" }, \
- { IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE" }, \
- { IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS" }, \
- { IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES" }, \
- { IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES" }, \
- { IO_TREE_SELFTEST, "SELFTEST" })
#define BTRFS_GROUP_FLAGS \
{ BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
@@ -112,7 +153,6 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
{ EXTENT_NODATASUM, "NODATASUM"}, \
{ EXTENT_CLEAR_META_RESV, "CLEAR_META_RESV"}, \
{ EXTENT_NEED_WAIT, "NEED_WAIT"}, \
- { EXTENT_DAMAGED, "DAMAGED"}, \
{ EXTENT_NORESERVE, "NORESERVE"}, \
{ EXTENT_QGROUP_RESERVED, "QGROUP_RESERVED"}, \
{ EXTENT_CLEAR_DATA_RESV, "CLEAR_DATA_RESV"}, \
@@ -143,18 +183,18 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
TRACE_EVENT(btrfs_transaction_commit,
- TP_PROTO(const struct btrfs_root *root),
+ TP_PROTO(const struct btrfs_fs_info *fs_info),
- TP_ARGS(root),
+ TP_ARGS(fs_info),
TP_STRUCT__entry_btrfs(
__field( u64, generation )
__field( u64, root_objectid )
),
- TP_fast_assign_btrfs(root->fs_info,
- __entry->generation = root->fs_info->generation;
- __entry->root_objectid = root->root_key.objectid;
+ TP_fast_assign_btrfs(fs_info,
+ __entry->generation = fs_info->generation;
+ __entry->root_objectid = BTRFS_ROOT_TREE_OBJECTID;
),
TP_printk_btrfs("root=%llu(%s) gen=%llu",
@@ -378,7 +418,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
__entry->disk_isize, __entry->extent_start,
__entry->extent_end, __entry->num_bytes, __entry->ram_bytes,
__entry->disk_bytenr, __entry->disk_num_bytes,
- __entry->extent_offset, show_fi_type(__entry->extent_type),
+ __entry->extent_offset, __print_symbolic(__entry->extent_type, FI_TYPES),
__entry->compression)
);
@@ -419,7 +459,7 @@ DECLARE_EVENT_CLASS(
"extent_type=%s compression=%u",
show_root_type(__entry->root_obj), __entry->ino, __entry->isize,
__entry->disk_isize, __entry->extent_start,
- __entry->extent_end, show_fi_type(__entry->extent_type),
+ __entry->extent_end, __print_symbolic(__entry->extent_type, FI_TYPES),
__entry->compression)
);
@@ -461,20 +501,20 @@ DEFINE_EVENT(
#define show_ordered_flags(flags) \
__print_flags(flags, "|", \
- { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \
- { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \
+ { (1 << BTRFS_ORDERED_REGULAR), "REGULAR" }, \
{ (1 << BTRFS_ORDERED_NOCOW), "NOCOW" }, \
- { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \
{ (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \
+ { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \
{ (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
+ { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \
+ { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \
{ (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
- { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \
{ (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
DECLARE_EVENT_CLASS(btrfs__ordered_extent,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered),
@@ -493,8 +533,8 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__field( u64, truncated_len )
),
- TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->ino = btrfs_ino(BTRFS_I(inode));
+ TP_fast_assign_btrfs(inode->root->fs_info,
+ __entry->ino = btrfs_ino(inode);
__entry->file_offset = ordered->file_offset;
__entry->start = ordered->disk_bytenr;
__entry->len = ordered->num_bytes;
@@ -503,8 +543,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->flags = ordered->flags;
__entry->compress_type = ordered->compress_type;
__entry->refs = refcount_read(&ordered->refs);
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = inode->root->root_key.objectid;
__entry->truncated_len = ordered->truncated_len;
),
@@ -527,7 +566,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
@@ -535,7 +574,7 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
@@ -543,7 +582,7 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
@@ -551,12 +590,76 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_range,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first_range,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_for_logging,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_split,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_dec_test_pending,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_mark_finished,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
DECLARE_EVENT_CLASS(btrfs__writepage,
TP_PROTO(const struct page *page, const struct inode *inode,
@@ -616,34 +719,30 @@ DEFINE_EVENT(btrfs__writepage, __extent_writepage,
TRACE_EVENT(btrfs_writepage_end_io_hook,
- TP_PROTO(const struct page *page, u64 start, u64 end, int uptodate),
+ TP_PROTO(const struct btrfs_inode *inode, u64 start, u64 end,
+ int uptodate),
- TP_ARGS(page, start, end, uptodate),
+ TP_ARGS(inode, start, end, uptodate),
TP_STRUCT__entry_btrfs(
__field( u64, ino )
- __field( unsigned long, index )
__field( u64, start )
__field( u64, end )
__field( int, uptodate )
__field( u64, root_objectid )
),
- TP_fast_assign_btrfs(btrfs_sb(page->mapping->host->i_sb),
- __entry->ino = btrfs_ino(BTRFS_I(page->mapping->host));
- __entry->index = page->index;
+ TP_fast_assign_btrfs(inode->root->fs_info,
+ __entry->ino = btrfs_ino(inode);
__entry->start = start;
__entry->end = end;
__entry->uptodate = uptodate;
- __entry->root_objectid =
- BTRFS_I(page->mapping->host)->root->root_key.objectid;
+ __entry->root_objectid = inode->root->root_key.objectid;
),
- TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu "
- "end=%llu uptodate=%d",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu end=%llu uptodate=%d",
show_root_type(__entry->root_objectid),
- __entry->ino, __entry->index,
- __entry->start,
+ __entry->ino, __entry->start,
__entry->end, __entry->uptodate)
);
@@ -1041,12 +1140,6 @@ TRACE_EVENT(btrfs_space_reservation,
__entry->bytes)
);
-#define show_flush_action(action) \
- __print_symbolic(action, \
- { BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH"}, \
- { BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT"}, \
- { BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL"})
-
TRACE_EVENT(btrfs_trigger_flush,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes,
@@ -1065,43 +1158,32 @@ TRACE_EVENT(btrfs_trigger_flush,
__entry->flags = flags;
__entry->bytes = bytes;
__entry->flush = flush;
- __assign_str(reason, reason)
+ __assign_str(reason, reason);
),
TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
__get_str(reason), __entry->flush,
- show_flush_action(__entry->flush),
+ __print_symbolic(__entry->flush, FLUSH_ACTIONS),
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS),
__entry->bytes)
);
-#define show_flush_state(state) \
- __print_symbolic(state, \
- { FLUSH_DELAYED_ITEMS_NR, "FLUSH_DELAYED_ITEMS_NR"}, \
- { FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS"}, \
- { FLUSH_DELALLOC, "FLUSH_DELALLOC"}, \
- { FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT"}, \
- { FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR"}, \
- { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \
- { ALLOC_CHUNK, "ALLOC_CHUNK"}, \
- { ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE"}, \
- { RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS"}, \
- { COMMIT_TRANS, "COMMIT_TRANS"})
TRACE_EVENT(btrfs_flush_space,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 num_bytes,
- int state, int ret),
+ int state, int ret, bool for_preempt),
- TP_ARGS(fs_info, flags, num_bytes, state, ret),
+ TP_ARGS(fs_info, flags, num_bytes, state, ret, for_preempt),
TP_STRUCT__entry_btrfs(
__field( u64, flags )
__field( u64, num_bytes )
__field( int, state )
__field( int, ret )
+ __field( bool, for_preempt )
),
TP_fast_assign_btrfs(fs_info,
@@ -1109,15 +1191,16 @@ TRACE_EVENT(btrfs_flush_space,
__entry->num_bytes = num_bytes;
__entry->state = state;
__entry->ret = ret;
+ __entry->for_preempt = for_preempt;
),
- TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d",
+ TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d for_preempt=%d",
__entry->state,
- show_flush_state(__entry->state),
+ __print_symbolic(__entry->state, FLUSH_STATES),
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS),
- __entry->num_bytes, __entry->ret)
+ __entry->num_bytes, __entry->ret, __entry->for_preempt)
);
DECLARE_EVENT_CLASS(btrfs__reserved_extent,
@@ -1158,25 +1241,27 @@ DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
TRACE_EVENT(find_free_extent,
- TP_PROTO(const struct btrfs_fs_info *fs_info, u64 num_bytes,
+ TP_PROTO(const struct btrfs_root *root, u64 num_bytes,
u64 empty_size, u64 data),
- TP_ARGS(fs_info, num_bytes, empty_size, data),
+ TP_ARGS(root, num_bytes, empty_size, data),
TP_STRUCT__entry_btrfs(
+ __field( u64, root_objectid )
__field( u64, num_bytes )
__field( u64, empty_size )
__field( u64, data )
),
- TP_fast_assign_btrfs(fs_info,
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->root_objectid = root->root_key.objectid;
__entry->num_bytes = num_bytes;
__entry->empty_size = empty_size;
__entry->data = data;
),
TP_printk_btrfs("root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s)",
- show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
+ show_root_type(__entry->root_objectid),
__entry->num_bytes, __entry->empty_size, __entry->data,
__print_flags((unsigned long)__entry->data, "|",
BTRFS_GROUP_FLAGS))
@@ -1323,13 +1408,13 @@ TRACE_EVENT(alloc_extent_state,
TP_STRUCT__entry(
__field(const struct extent_state *, state)
- __field(gfp_t, mask)
+ __field(unsigned long, mask)
__field(const void*, ip)
),
TP_fast_assign(
__entry->state = state,
- __entry->mask = mask,
+ __entry->mask = (__force unsigned long)mask,
__entry->ip = (const void *)IP
),
@@ -1436,42 +1521,36 @@ DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
TP_ARGS(work)
);
-DECLARE_EVENT_CLASS(btrfs__workqueue,
+DECLARE_EVENT_CLASS(btrfs_workqueue,
- TP_PROTO(const struct __btrfs_workqueue *wq,
- const char *name, int high),
+ TP_PROTO(const struct btrfs_workqueue *wq, const char *name),
- TP_ARGS(wq, name, high),
+ TP_ARGS(wq, name),
TP_STRUCT__entry_btrfs(
__field( const void *, wq )
__string( name, name )
- __field( int , high )
),
TP_fast_assign_btrfs(btrfs_workqueue_owner(wq),
__entry->wq = wq;
__assign_str(name, name);
- __entry->high = high;
),
- TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
- __print_flags(__entry->high, "",
- {(WQ_HIGHPRI), "-high"}),
+ TP_printk_btrfs("name=%s wq=%p", __get_str(name),
__entry->wq)
);
-DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc,
+DEFINE_EVENT(btrfs_workqueue, btrfs_workqueue_alloc,
- TP_PROTO(const struct __btrfs_workqueue *wq,
- const char *name, int high),
+ TP_PROTO(const struct btrfs_workqueue *wq, const char *name),
- TP_ARGS(wq, name, high)
+ TP_ARGS(wq, name)
);
-DECLARE_EVENT_CLASS(btrfs__workqueue_done,
+DECLARE_EVENT_CLASS(btrfs_workqueue_done,
- TP_PROTO(const struct __btrfs_workqueue *wq),
+ TP_PROTO(const struct btrfs_workqueue *wq),
TP_ARGS(wq),
@@ -1486,9 +1565,9 @@ DECLARE_EVENT_CLASS(btrfs__workqueue_done,
TP_printk_btrfs("wq=%p", __entry->wq)
);
-DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
+DEFINE_EVENT(btrfs_workqueue_done, btrfs_workqueue_destroy,
- TP_PROTO(const struct __btrfs_workqueue *wq),
+ TP_PROTO(const struct btrfs_workqueue *wq),
TP_ARGS(wq)
);
@@ -1689,7 +1768,7 @@ TRACE_EVENT(qgroup_update_reserve,
),
TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
- __entry->qgid, show_qgroup_rsv_type(__entry->type),
+ __entry->qgid, __print_symbolic(__entry->type, QGROUP_RSV_TYPES),
__entry->cur_reserved, __entry->diff)
);
@@ -1713,7 +1792,7 @@ TRACE_EVENT(qgroup_meta_reserve,
TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
show_root_type(__entry->refroot),
- show_qgroup_rsv_type(__entry->type), __entry->diff)
+ __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff)
);
TRACE_EVENT(qgroup_meta_convert,
@@ -1734,8 +1813,8 @@ TRACE_EVENT(qgroup_meta_convert,
TP_printk_btrfs("refroot=%llu(%s) type=%s->%s diff=%lld",
show_root_type(__entry->refroot),
- show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PREALLOC),
- show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PERTRANS),
+ __print_symbolic(BTRFS_QGROUP_RSV_META_PREALLOC, QGROUP_RSV_TYPES),
+ __print_symbolic(BTRFS_QGROUP_RSV_META_PERTRANS, QGROUP_RSV_TYPES),
__entry->diff)
);
@@ -1761,7 +1840,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans,
TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
show_root_type(__entry->refroot),
- show_qgroup_rsv_type(__entry->type), __entry->diff)
+ __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff)
);
DECLARE_EVENT_CLASS(btrfs__prelim_ref,
@@ -1879,6 +1958,18 @@ DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group,
TP_ARGS(bg_cache)
);
+DEFINE_EVENT(btrfs__block_group, btrfs_add_reclaim_block_group,
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
+
+ TP_ARGS(bg_cache)
+);
+
+DEFINE_EVENT(btrfs__block_group, btrfs_reclaim_block_group,
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
+
+ TP_ARGS(bg_cache)
+);
+
DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group,
TP_PROTO(const struct btrfs_block_group *bg_cache),
@@ -1919,7 +2010,7 @@ TRACE_EVENT(btrfs_set_extent_bit,
TP_printk_btrfs(
"io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s",
- show_extent_io_tree_owner(__entry->owner), __entry->ino,
+ __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino,
__entry->rootid, __entry->start, __entry->len,
__print_flags(__entry->set_bits, "|", EXTENT_FLAGS))
);
@@ -1958,7 +2049,7 @@ TRACE_EVENT(btrfs_clear_extent_bit,
TP_printk_btrfs(
"io_tree=%s ino=%llu root=%llu start=%llu len=%llu clear_bits=%s",
- show_extent_io_tree_owner(__entry->owner), __entry->ino,
+ __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino,
__entry->rootid, __entry->start, __entry->len,
__print_flags(__entry->clear_bits, "|", EXTENT_FLAGS))
);
@@ -1999,12 +2090,115 @@ TRACE_EVENT(btrfs_convert_extent_bit,
TP_printk_btrfs(
"io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s clear_bits=%s",
- show_extent_io_tree_owner(__entry->owner), __entry->ino,
+ __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino,
__entry->rootid, __entry->start, __entry->len,
__print_flags(__entry->set_bits , "|", EXTENT_FLAGS),
__print_flags(__entry->clear_bits, "|", EXTENT_FLAGS))
);
+DECLARE_EVENT_CLASS(btrfs_dump_space_info,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo),
+
+ TP_ARGS(fs_info, sinfo),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, flags )
+ __field( u64, total_bytes )
+ __field( u64, bytes_used )
+ __field( u64, bytes_pinned )
+ __field( u64, bytes_reserved )
+ __field( u64, bytes_may_use )
+ __field( u64, bytes_readonly )
+ __field( u64, reclaim_size )
+ __field( int, clamp )
+ __field( u64, global_reserved )
+ __field( u64, trans_reserved )
+ __field( u64, delayed_refs_reserved )
+ __field( u64, delayed_reserved )
+ __field( u64, free_chunk_space )
+ __field( u64, delalloc_bytes )
+ __field( u64, ordered_bytes )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->flags = sinfo->flags;
+ __entry->total_bytes = sinfo->total_bytes;
+ __entry->bytes_used = sinfo->bytes_used;
+ __entry->bytes_pinned = sinfo->bytes_pinned;
+ __entry->bytes_reserved = sinfo->bytes_reserved;
+ __entry->bytes_may_use = sinfo->bytes_may_use;
+ __entry->bytes_readonly = sinfo->bytes_readonly;
+ __entry->reclaim_size = sinfo->reclaim_size;
+ __entry->clamp = sinfo->clamp;
+ __entry->global_reserved = fs_info->global_block_rsv.reserved;
+ __entry->trans_reserved = fs_info->trans_block_rsv.reserved;
+ __entry->delayed_refs_reserved = fs_info->delayed_refs_rsv.reserved;
+ __entry->delayed_reserved = fs_info->delayed_block_rsv.reserved;
+ __entry->free_chunk_space = atomic64_read(&fs_info->free_chunk_space);
+ __entry->delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
+ __entry->ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
+ ),
+
+ TP_printk_btrfs("flags=%s total_bytes=%llu bytes_used=%llu "
+ "bytes_pinned=%llu bytes_reserved=%llu "
+ "bytes_may_use=%llu bytes_readonly=%llu "
+ "reclaim_size=%llu clamp=%d global_reserved=%llu "
+ "trans_reserved=%llu delayed_refs_reserved=%llu "
+ "delayed_reserved=%llu chunk_free_space=%llu "
+ "delalloc_bytes=%llu ordered_bytes=%llu",
+ __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS),
+ __entry->total_bytes, __entry->bytes_used,
+ __entry->bytes_pinned, __entry->bytes_reserved,
+ __entry->bytes_may_use, __entry->bytes_readonly,
+ __entry->reclaim_size, __entry->clamp,
+ __entry->global_reserved, __entry->trans_reserved,
+ __entry->delayed_refs_reserved,
+ __entry->delayed_reserved, __entry->free_chunk_space,
+ __entry->delalloc_bytes, __entry->ordered_bytes)
+);
+
+DEFINE_EVENT(btrfs_dump_space_info, btrfs_done_preemptive_reclaim,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo),
+ TP_ARGS(fs_info, sinfo)
+);
+
+DEFINE_EVENT(btrfs_dump_space_info, btrfs_fail_all_tickets,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo),
+ TP_ARGS(fs_info, sinfo)
+);
+
+TRACE_EVENT(btrfs_reserve_ticket,
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes,
+ u64 start_ns, int flush, int error),
+
+ TP_ARGS(fs_info, flags, bytes, start_ns, flush, error),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, flags )
+ __field( u64, bytes )
+ __field( u64, start_ns )
+ __field( int, flush )
+ __field( int, error )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->flags = flags;
+ __entry->bytes = bytes;
+ __entry->start_ns = start_ns;
+ __entry->flush = flush;
+ __entry->error = error;
+ ),
+
+ TP_printk_btrfs("flags=%s bytes=%llu start_ns=%llu flush=%s error=%d",
+ __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS),
+ __entry->bytes, __entry->start_ns,
+ __print_symbolic(__entry->flush, FLUSH_ACTIONS),
+ __entry->error)
+);
+
DECLARE_EVENT_CLASS(btrfs_sleep_tree_lock,
TP_PROTO(const struct extent_buffer *eb, u64 start_ns),
@@ -2128,6 +2322,98 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
TP_ARGS(fs_info, sinfo, old, diff)
);
+DECLARE_EVENT_CLASS(btrfs_raid56_bio,
+
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+ const struct bio *bio,
+ const struct raid56_bio_trace_info *trace_info),
+
+ TP_ARGS(rbio, bio, trace_info),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, full_stripe )
+ __field( u64, physical )
+ __field( u64, devid )
+ __field( u32, offset )
+ __field( u32, len )
+ __field( u8, opf )
+ __field( u8, total_stripes )
+ __field( u8, real_stripes )
+ __field( u8, nr_data )
+ __field( u8, stripe_nr )
+ ),
+
+ TP_fast_assign_btrfs(rbio->bioc->fs_info,
+ __entry->full_stripe = rbio->bioc->raid_map[0];
+ __entry->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ __entry->len = bio->bi_iter.bi_size;
+ __entry->opf = bio_op(bio);
+ __entry->devid = trace_info->devid;
+ __entry->offset = trace_info->offset;
+ __entry->stripe_nr = trace_info->stripe_nr;
+ __entry->total_stripes = rbio->bioc->num_stripes;
+ __entry->real_stripes = rbio->real_stripes;
+ __entry->nr_data = rbio->nr_data;
+ ),
+ /*
+ * For type output, we need to output things like "DATA1"
+ * (the first data stripe), "DATA2" (the second data stripe),
+ * "PQ1" (P stripe),"PQ2" (Q stripe), "REPLACE0" (replace target device).
+ */
+ TP_printk_btrfs(
+"full_stripe=%llu devid=%lld type=%s%d offset=%d opf=0x%x physical=%llu len=%u",
+ __entry->full_stripe, __entry->devid,
+ (__entry->stripe_nr < __entry->nr_data) ? "DATA" :
+ ((__entry->stripe_nr < __entry->real_stripes) ? "PQ" :
+ "REPLACE"),
+ (__entry->stripe_nr < __entry->nr_data) ?
+ (__entry->stripe_nr + 1) :
+ ((__entry->stripe_nr < __entry->real_stripes) ?
+ (__entry->stripe_nr - __entry->nr_data + 1) : 0),
+ __entry->offset, __entry->opf, __entry->physical, __entry->len)
+);
+
+DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial,
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+ const struct bio *bio,
+ const struct raid56_bio_trace_info *trace_info),
+
+ TP_ARGS(rbio, bio, trace_info)
+);
+
+DEFINE_EVENT(btrfs_raid56_bio, raid56_write_stripe,
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+ const struct bio *bio,
+ const struct raid56_bio_trace_info *trace_info),
+
+ TP_ARGS(rbio, bio, trace_info)
+);
+
+
+DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_write_stripe,
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+ const struct bio *bio,
+ const struct raid56_bio_trace_info *trace_info),
+
+ TP_ARGS(rbio, bio, trace_info)
+);
+
+DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read,
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+ const struct bio *bio,
+ const struct raid56_bio_trace_info *trace_info),
+
+ TP_ARGS(rbio, bio, trace_info)
+);
+
+DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read_recover,
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+ const struct bio *bio,
+ const struct raid56_bio_trace_info *trace_info),
+
+ TP_ARGS(rbio, bio, trace_info)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index 5d9de24cb9c0..d8d4d73fe7b6 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* CacheFiles tracepoints
*
- * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#undef TRACE_SYSTEM
@@ -19,9 +19,86 @@
#define __CACHEFILES_DECLARE_TRACE_ENUMS_ONCE_ONLY
enum cachefiles_obj_ref_trace {
- cachefiles_obj_put_wait_retry = fscache_obj_ref__nr_traces,
- cachefiles_obj_put_wait_timeo,
- cachefiles_obj_ref__nr_traces
+ cachefiles_obj_get_ioreq,
+ cachefiles_obj_new,
+ cachefiles_obj_put_alloc_fail,
+ cachefiles_obj_put_detach,
+ cachefiles_obj_put_ioreq,
+ cachefiles_obj_see_clean_commit,
+ cachefiles_obj_see_clean_delete,
+ cachefiles_obj_see_clean_drop_tmp,
+ cachefiles_obj_see_lookup_cookie,
+ cachefiles_obj_see_lookup_failed,
+ cachefiles_obj_see_withdraw_cookie,
+ cachefiles_obj_see_withdrawal,
+ cachefiles_obj_get_ondemand_fd,
+ cachefiles_obj_put_ondemand_fd,
+};
+
+enum fscache_why_object_killed {
+ FSCACHE_OBJECT_IS_STALE,
+ FSCACHE_OBJECT_IS_WEIRD,
+ FSCACHE_OBJECT_INVALIDATED,
+ FSCACHE_OBJECT_NO_SPACE,
+ FSCACHE_OBJECT_WAS_RETIRED,
+ FSCACHE_OBJECT_WAS_CULLED,
+ FSCACHE_VOLUME_IS_WEIRD,
+};
+
+enum cachefiles_coherency_trace {
+ cachefiles_coherency_check_aux,
+ cachefiles_coherency_check_content,
+ cachefiles_coherency_check_dirty,
+ cachefiles_coherency_check_len,
+ cachefiles_coherency_check_objsize,
+ cachefiles_coherency_check_ok,
+ cachefiles_coherency_check_type,
+ cachefiles_coherency_check_xattr,
+ cachefiles_coherency_set_fail,
+ cachefiles_coherency_set_ok,
+ cachefiles_coherency_vol_check_cmp,
+ cachefiles_coherency_vol_check_ok,
+ cachefiles_coherency_vol_check_resv,
+ cachefiles_coherency_vol_check_xattr,
+ cachefiles_coherency_vol_set_fail,
+ cachefiles_coherency_vol_set_ok,
+};
+
+enum cachefiles_trunc_trace {
+ cachefiles_trunc_dio_adjust,
+ cachefiles_trunc_expand_tmpfile,
+ cachefiles_trunc_shrink,
+};
+
+enum cachefiles_prepare_read_trace {
+ cachefiles_trace_read_after_eof,
+ cachefiles_trace_read_found_hole,
+ cachefiles_trace_read_found_part,
+ cachefiles_trace_read_have_data,
+ cachefiles_trace_read_no_data,
+ cachefiles_trace_read_no_file,
+ cachefiles_trace_read_seek_error,
+ cachefiles_trace_read_seek_nxio,
+};
+
+enum cachefiles_error_trace {
+ cachefiles_trace_fallocate_error,
+ cachefiles_trace_getxattr_error,
+ cachefiles_trace_link_error,
+ cachefiles_trace_lookup_error,
+ cachefiles_trace_mkdir_error,
+ cachefiles_trace_notify_change_error,
+ cachefiles_trace_open_error,
+ cachefiles_trace_read_error,
+ cachefiles_trace_remxattr_error,
+ cachefiles_trace_rename_error,
+ cachefiles_trace_seek_error,
+ cachefiles_trace_setxattr_error,
+ cachefiles_trace_statfs_error,
+ cachefiles_trace_tmpfile_error,
+ cachefiles_trace_trunc_error,
+ cachefiles_trace_unlink_error,
+ cachefiles_trace_write_error,
};
#endif
@@ -31,21 +108,79 @@ enum cachefiles_obj_ref_trace {
*/
#define cachefiles_obj_kill_traces \
EM(FSCACHE_OBJECT_IS_STALE, "stale") \
+ EM(FSCACHE_OBJECT_IS_WEIRD, "weird") \
+ EM(FSCACHE_OBJECT_INVALIDATED, "inval") \
EM(FSCACHE_OBJECT_NO_SPACE, "no_space") \
EM(FSCACHE_OBJECT_WAS_RETIRED, "was_retired") \
- E_(FSCACHE_OBJECT_WAS_CULLED, "was_culled")
+ EM(FSCACHE_OBJECT_WAS_CULLED, "was_culled") \
+ E_(FSCACHE_VOLUME_IS_WEIRD, "volume_weird")
#define cachefiles_obj_ref_traces \
- EM(fscache_obj_get_add_to_deps, "GET add_to_deps") \
- EM(fscache_obj_get_queue, "GET queue") \
- EM(fscache_obj_put_alloc_fail, "PUT alloc_fail") \
- EM(fscache_obj_put_attach_fail, "PUT attach_fail") \
- EM(fscache_obj_put_drop_obj, "PUT drop_obj") \
- EM(fscache_obj_put_enq_dep, "PUT enq_dep") \
- EM(fscache_obj_put_queue, "PUT queue") \
- EM(fscache_obj_put_work, "PUT work") \
- EM(cachefiles_obj_put_wait_retry, "PUT wait_retry") \
- E_(cachefiles_obj_put_wait_timeo, "PUT wait_timeo")
+ EM(cachefiles_obj_get_ioreq, "GET ioreq") \
+ EM(cachefiles_obj_new, "NEW obj") \
+ EM(cachefiles_obj_put_alloc_fail, "PUT alloc_fail") \
+ EM(cachefiles_obj_put_detach, "PUT detach") \
+ EM(cachefiles_obj_put_ioreq, "PUT ioreq") \
+ EM(cachefiles_obj_see_clean_commit, "SEE clean_commit") \
+ EM(cachefiles_obj_see_clean_delete, "SEE clean_delete") \
+ EM(cachefiles_obj_see_clean_drop_tmp, "SEE clean_drop_tmp") \
+ EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
+ EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
+ EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
+ E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
+
+#define cachefiles_coherency_traces \
+ EM(cachefiles_coherency_check_aux, "BAD aux ") \
+ EM(cachefiles_coherency_check_content, "BAD cont") \
+ EM(cachefiles_coherency_check_dirty, "BAD dirt") \
+ EM(cachefiles_coherency_check_len, "BAD len ") \
+ EM(cachefiles_coherency_check_objsize, "BAD osiz") \
+ EM(cachefiles_coherency_check_ok, "OK ") \
+ EM(cachefiles_coherency_check_type, "BAD type") \
+ EM(cachefiles_coherency_check_xattr, "BAD xatt") \
+ EM(cachefiles_coherency_set_fail, "SET fail") \
+ EM(cachefiles_coherency_set_ok, "SET ok ") \
+ EM(cachefiles_coherency_vol_check_cmp, "VOL BAD cmp ") \
+ EM(cachefiles_coherency_vol_check_ok, "VOL OK ") \
+ EM(cachefiles_coherency_vol_check_resv, "VOL BAD resv") \
+ EM(cachefiles_coherency_vol_check_xattr,"VOL BAD xatt") \
+ EM(cachefiles_coherency_vol_set_fail, "VOL SET fail") \
+ E_(cachefiles_coherency_vol_set_ok, "VOL SET ok ")
+
+#define cachefiles_trunc_traces \
+ EM(cachefiles_trunc_dio_adjust, "DIOADJ") \
+ EM(cachefiles_trunc_expand_tmpfile, "EXPTMP") \
+ E_(cachefiles_trunc_shrink, "SHRINK")
+
+#define cachefiles_prepare_read_traces \
+ EM(cachefiles_trace_read_after_eof, "after-eof ") \
+ EM(cachefiles_trace_read_found_hole, "found-hole") \
+ EM(cachefiles_trace_read_found_part, "found-part") \
+ EM(cachefiles_trace_read_have_data, "have-data ") \
+ EM(cachefiles_trace_read_no_data, "no-data ") \
+ EM(cachefiles_trace_read_no_file, "no-file ") \
+ EM(cachefiles_trace_read_seek_error, "seek-error") \
+ E_(cachefiles_trace_read_seek_nxio, "seek-enxio")
+
+#define cachefiles_error_traces \
+ EM(cachefiles_trace_fallocate_error, "fallocate") \
+ EM(cachefiles_trace_getxattr_error, "getxattr") \
+ EM(cachefiles_trace_link_error, "link") \
+ EM(cachefiles_trace_lookup_error, "lookup") \
+ EM(cachefiles_trace_mkdir_error, "mkdir") \
+ EM(cachefiles_trace_notify_change_error, "notify_change") \
+ EM(cachefiles_trace_open_error, "open") \
+ EM(cachefiles_trace_read_error, "read") \
+ EM(cachefiles_trace_remxattr_error, "remxattr") \
+ EM(cachefiles_trace_rename_error, "rename") \
+ EM(cachefiles_trace_seek_error, "seek") \
+ EM(cachefiles_trace_setxattr_error, "setxattr") \
+ EM(cachefiles_trace_statfs_error, "statfs") \
+ EM(cachefiles_trace_tmpfile_error, "tmpfile") \
+ EM(cachefiles_trace_trunc_error, "trunc") \
+ EM(cachefiles_trace_unlink_error, "unlink") \
+ E_(cachefiles_trace_write_error, "write")
+
/*
* Export enum symbols via userspace.
@@ -57,6 +192,10 @@ enum cachefiles_obj_ref_trace {
cachefiles_obj_kill_traces;
cachefiles_obj_ref_traces;
+cachefiles_coherency_traces;
+cachefiles_trunc_traces;
+cachefiles_prepare_read_traces;
+cachefiles_error_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -69,250 +208,643 @@ cachefiles_obj_ref_traces;
TRACE_EVENT(cachefiles_ref,
- TP_PROTO(struct cachefiles_object *obj,
- struct fscache_cookie *cookie,
- enum cachefiles_obj_ref_trace why,
- int usage),
+ TP_PROTO(unsigned int object_debug_id,
+ unsigned int cookie_debug_id,
+ int usage,
+ enum cachefiles_obj_ref_trace why),
- TP_ARGS(obj, cookie, why, usage),
+ TP_ARGS(object_debug_id, cookie_debug_id, usage, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct fscache_cookie *, cookie )
+ __field(unsigned int, obj )
+ __field(unsigned int, cookie )
__field(enum cachefiles_obj_ref_trace, why )
__field(int, usage )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->cookie = cookie;
+ __entry->obj = object_debug_id;
+ __entry->cookie = cookie_debug_id;
__entry->usage = usage;
__entry->why = why;
),
- TP_printk("c=%p o=%p u=%d %s",
+ TP_printk("c=%08x o=%08x u=%d %s",
__entry->cookie, __entry->obj, __entry->usage,
__print_symbolic(__entry->why, cachefiles_obj_ref_traces))
);
TRACE_EVENT(cachefiles_lookup,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- struct inode *inode),
+ struct dentry *dir,
+ struct dentry *de),
- TP_ARGS(obj, de, inode),
+ TP_ARGS(obj, dir, de),
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(struct inode *, inode )
+ __field(unsigned int, obj )
+ __field(short, error )
+ __field(unsigned long, dino )
+ __field(unsigned long, ino )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->inode = inode;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->dino = d_backing_inode(dir)->i_ino;
+ __entry->ino = (!IS_ERR(de) && d_backing_inode(de) ?
+ d_backing_inode(de)->i_ino : 0);
+ __entry->error = IS_ERR(de) ? PTR_ERR(de) : 0;
),
- TP_printk("o=%p d=%p i=%p",
- __entry->obj, __entry->de, __entry->inode)
+ TP_printk("o=%08x dB=%lx B=%lx e=%d",
+ __entry->obj, __entry->dino, __entry->ino, __entry->error)
);
TRACE_EVENT(cachefiles_mkdir,
- TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de, int ret),
+ TP_PROTO(struct dentry *dir, struct dentry *subdir),
- TP_ARGS(obj, de, ret),
+ TP_ARGS(dir, subdir),
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(int, ret )
+ __field(unsigned int, dir )
+ __field(unsigned int, subdir )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->ret = ret;
+ __entry->dir = d_backing_inode(dir)->i_ino;
+ __entry->subdir = d_backing_inode(subdir)->i_ino;
),
- TP_printk("o=%p d=%p r=%u",
- __entry->obj, __entry->de, __entry->ret)
+ TP_printk("dB=%x sB=%x",
+ __entry->dir,
+ __entry->subdir)
);
-TRACE_EVENT(cachefiles_create,
- TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de, int ret),
+TRACE_EVENT(cachefiles_tmpfile,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer),
- TP_ARGS(obj, de, ret),
+ TP_ARGS(obj, backer),
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(int, ret )
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->ret = ret;
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
),
- TP_printk("o=%p d=%p r=%u",
- __entry->obj, __entry->de, __entry->ret)
+ TP_printk("o=%08x B=%x",
+ __entry->obj,
+ __entry->backer)
+ );
+
+TRACE_EVENT(cachefiles_link,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer),
+
+ TP_ARGS(obj, backer),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ ),
+
+ TP_printk("o=%08x B=%x",
+ __entry->obj,
+ __entry->backer)
);
TRACE_EVENT(cachefiles_unlink,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
+ ino_t ino,
enum fscache_why_object_killed why),
- TP_ARGS(obj, de, why),
+ TP_ARGS(obj, ino, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
+ __field(unsigned int, obj )
+ __field(unsigned int, ino )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
+ __entry->obj = obj ? obj->debug_id : UINT_MAX;
+ __entry->ino = ino;
__entry->why = why;
),
- TP_printk("o=%p d=%p w=%s",
- __entry->obj, __entry->de,
+ TP_printk("o=%08x B=%x w=%s",
+ __entry->obj, __entry->ino,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
TRACE_EVENT(cachefiles_rename,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- struct dentry *to,
+ ino_t ino,
enum fscache_why_object_killed why),
- TP_ARGS(obj, de, to, why),
+ TP_ARGS(obj, ino, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(struct dentry *, to )
+ __field(unsigned int, obj )
+ __field(unsigned int, ino )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->to = to;
+ __entry->obj = obj ? obj->debug_id : UINT_MAX;
+ __entry->ino = ino;
__entry->why = why;
),
- TP_printk("o=%p d=%p t=%p w=%s",
- __entry->obj, __entry->de, __entry->to,
+ TP_printk("o=%08x B=%x w=%s",
+ __entry->obj, __entry->ino,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
-TRACE_EVENT(cachefiles_mark_active,
+TRACE_EVENT(cachefiles_coherency,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de),
+ ino_t ino,
+ enum cachefiles_content content,
+ enum cachefiles_coherency_trace why),
- TP_ARGS(obj, de),
+ TP_ARGS(obj, ino, content, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
+ __field(unsigned int, obj )
+ __field(enum cachefiles_coherency_trace, why )
+ __field(enum cachefiles_content, content )
+ __field(u64, ino )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
+ __entry->obj = obj->debug_id;
+ __entry->why = why;
+ __entry->content = content;
+ __entry->ino = ino;
),
- TP_printk("o=%p d=%p",
- __entry->obj, __entry->de)
+ TP_printk("o=%08x %s B=%llx c=%u",
+ __entry->obj,
+ __print_symbolic(__entry->why, cachefiles_coherency_traces),
+ __entry->ino,
+ __entry->content)
);
-TRACE_EVENT(cachefiles_wait_active,
+TRACE_EVENT(cachefiles_vol_coherency,
+ TP_PROTO(struct cachefiles_volume *volume,
+ ino_t ino,
+ enum cachefiles_coherency_trace why),
+
+ TP_ARGS(volume, ino, why),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(unsigned int, vol )
+ __field(enum cachefiles_coherency_trace, why )
+ __field(u64, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->vol = volume->vcookie->debug_id;
+ __entry->why = why;
+ __entry->ino = ino;
+ ),
+
+ TP_printk("V=%08x %s B=%llx",
+ __entry->vol,
+ __print_symbolic(__entry->why, cachefiles_coherency_traces),
+ __entry->ino)
+ );
+
+TRACE_EVENT(cachefiles_prep_read,
+ TP_PROTO(struct netfs_io_subrequest *sreq,
+ enum netfs_io_source source,
+ enum cachefiles_prepare_read_trace why,
+ ino_t cache_inode),
+
+ TP_ARGS(sreq, source, why, cache_inode),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned short, index )
+ __field(unsigned short, flags )
+ __field(enum netfs_io_source, source )
+ __field(enum cachefiles_prepare_read_trace, why )
+ __field(size_t, len )
+ __field(loff_t, start )
+ __field(unsigned int, netfs_inode )
+ __field(unsigned int, cache_inode )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = sreq->rreq->debug_id;
+ __entry->index = sreq->debug_index;
+ __entry->flags = sreq->flags;
+ __entry->source = source;
+ __entry->why = why;
+ __entry->len = sreq->len;
+ __entry->start = sreq->start;
+ __entry->netfs_inode = sreq->rreq->inode->i_ino;
+ __entry->cache_inode = cache_inode;
+ ),
+
+ TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx ni=%x B=%x",
+ __entry->rreq, __entry->index,
+ __print_symbolic(__entry->source, netfs_sreq_sources),
+ __print_symbolic(__entry->why, cachefiles_prepare_read_traces),
+ __entry->flags,
+ __entry->start, __entry->len,
+ __entry->netfs_inode, __entry->cache_inode)
+ );
+
+TRACE_EVENT(cachefiles_read,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct inode *backer,
+ loff_t start,
+ size_t len),
+
+ TP_ARGS(obj, backer, start, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(size_t, len )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("o=%08x B=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->backer,
+ __entry->start,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_write,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct inode *backer,
+ loff_t start,
+ size_t len),
+
+ TP_ARGS(obj, backer, start, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(size_t, len )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("o=%08x B=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->backer,
+ __entry->start,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_trunc,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ loff_t from, loff_t to, enum cachefiles_trunc_trace why),
+
+ TP_ARGS(obj, backer, from, to, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(enum cachefiles_trunc_trace, why )
+ __field(loff_t, from )
+ __field(loff_t, to )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ __entry->from = from;
+ __entry->to = to;
+ __entry->why = why;
+ ),
+
+ TP_printk("o=%08x B=%x %s l=%llx->%llx",
+ __entry->obj,
+ __entry->backer,
+ __print_symbolic(__entry->why, cachefiles_trunc_traces),
+ __entry->from,
+ __entry->to)
+ );
+
+TRACE_EVENT(cachefiles_mark_active,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- struct cachefiles_object *xobj),
+ struct inode *inode),
- TP_ARGS(obj, de, xobj),
+ TP_ARGS(obj, inode),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(struct cachefiles_object *, xobj )
- __field(u16, flags )
- __field(u16, fsc_flags )
+ __field(unsigned int, obj )
+ __field(ino_t, inode )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->xobj = xobj;
- __entry->flags = xobj->flags;
- __entry->fsc_flags = xobj->fscache.flags;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->inode = inode->i_ino;
),
- TP_printk("o=%p d=%p wo=%p wf=%x wff=%x",
- __entry->obj, __entry->de, __entry->xobj,
- __entry->flags, __entry->fsc_flags)
+ TP_printk("o=%08x B=%lx",
+ __entry->obj, __entry->inode)
);
-TRACE_EVENT(cachefiles_mark_inactive,
+TRACE_EVENT(cachefiles_mark_failed,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
struct inode *inode),
- TP_ARGS(obj, de, inode),
+ TP_ARGS(obj, inode),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(struct inode *, inode )
+ __field(unsigned int, obj )
+ __field(ino_t, inode )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->inode = inode;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->inode = inode->i_ino;
),
- TP_printk("o=%p d=%p i=%p",
- __entry->obj, __entry->de, __entry->inode)
+ TP_printk("o=%08x B=%lx",
+ __entry->obj, __entry->inode)
);
-TRACE_EVENT(cachefiles_mark_buried,
+TRACE_EVENT(cachefiles_mark_inactive,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- enum fscache_why_object_killed why),
+ struct inode *inode),
- TP_ARGS(obj, de, why),
+ TP_ARGS(obj, inode),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(enum fscache_why_object_killed, why )
+ __field(unsigned int, obj )
+ __field(ino_t, inode )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->why = why;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->inode = inode->i_ino;
),
- TP_printk("o=%p d=%p w=%s",
- __entry->obj, __entry->de,
- __print_symbolic(__entry->why, cachefiles_obj_kill_traces))
+ TP_printk("o=%08x B=%lx",
+ __entry->obj, __entry->inode)
+ );
+
+TRACE_EVENT(cachefiles_vfs_error,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ int error, enum cachefiles_error_trace where),
+
+ TP_ARGS(obj, backer, error, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(enum cachefiles_error_trace, where )
+ __field(short, error )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->backer = backer->i_ino;
+ __entry->error = error;
+ __entry->where = where;
+ ),
+
+ TP_printk("o=%08x B=%x %s e=%d",
+ __entry->obj,
+ __entry->backer,
+ __print_symbolic(__entry->where, cachefiles_error_traces),
+ __entry->error)
+ );
+
+TRACE_EVENT(cachefiles_io_error,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ int error, enum cachefiles_error_trace where),
+
+ TP_ARGS(obj, backer, error, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(enum cachefiles_error_trace, where )
+ __field(short, error )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->backer = backer->i_ino;
+ __entry->error = error;
+ __entry->where = where;
+ ),
+
+ TP_printk("o=%08x B=%x %s e=%d",
+ __entry->obj,
+ __entry->backer,
+ __print_symbolic(__entry->where, cachefiles_error_traces),
+ __entry->error)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_open,
+ TP_PROTO(struct cachefiles_object *obj, struct cachefiles_msg *msg,
+ struct cachefiles_open *load),
+
+ TP_ARGS(obj, msg, load),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ __field(unsigned int, object_id )
+ __field(unsigned int, fd )
+ __field(unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg->msg_id;
+ __entry->object_id = msg->object_id;
+ __entry->fd = load->fd;
+ __entry->flags = load->flags;
+ ),
+
+ TP_printk("o=%08x mid=%x oid=%x fd=%d f=%x",
+ __entry->obj,
+ __entry->msg_id,
+ __entry->object_id,
+ __entry->fd,
+ __entry->flags)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_copen,
+ TP_PROTO(struct cachefiles_object *obj, unsigned int msg_id,
+ long len),
+
+ TP_ARGS(obj, msg_id, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ __field(long, len )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg_id;
+ __entry->len = len;
+ ),
+
+ TP_printk("o=%08x mid=%x l=%lx",
+ __entry->obj,
+ __entry->msg_id,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_close,
+ TP_PROTO(struct cachefiles_object *obj, struct cachefiles_msg *msg),
+
+ TP_ARGS(obj, msg),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ __field(unsigned int, object_id )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg->msg_id;
+ __entry->object_id = msg->object_id;
+ ),
+
+ TP_printk("o=%08x mid=%x oid=%x",
+ __entry->obj,
+ __entry->msg_id,
+ __entry->object_id)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_read,
+ TP_PROTO(struct cachefiles_object *obj, struct cachefiles_msg *msg,
+ struct cachefiles_read *load),
+
+ TP_ARGS(obj, msg, load),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ __field(unsigned int, object_id )
+ __field(loff_t, start )
+ __field(size_t, len )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg->msg_id;
+ __entry->object_id = msg->object_id;
+ __entry->start = load->off;
+ __entry->len = load->len;
+ ),
+
+ TP_printk("o=%08x mid=%x oid=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->msg_id,
+ __entry->object_id,
+ __entry->start,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_cread,
+ TP_PROTO(struct cachefiles_object *obj, unsigned int msg_id),
+
+ TP_ARGS(obj, msg_id),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg_id;
+ ),
+
+ TP_printk("o=%08x mid=%x",
+ __entry->obj,
+ __entry->msg_id)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_fd_write,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ loff_t start, size_t len),
+
+ TP_ARGS(obj, backer, start, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(loff_t, start )
+ __field(size_t, len )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->backer = backer->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("o=%08x iB=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->backer,
+ __entry->start,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_fd_release,
+ TP_PROTO(struct cachefiles_object *obj, int object_id),
+
+ TP_ARGS(obj, object_id),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, object_id )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->object_id = object_id;
+ ),
+
+ TP_printk("o=%08x oid=%x",
+ __entry->obj,
+ __entry->object_id)
);
#endif /* _TRACE_CACHEFILES_H */
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index 7f42a3de59e6..dd7d7c9efecd 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -59,8 +59,8 @@ DECLARE_EVENT_CLASS(cgroup,
TP_STRUCT__entry(
__field( int, root )
- __field( int, id )
__field( int, level )
+ __field( u64, id )
__string( path, path )
),
@@ -71,7 +71,7 @@ DECLARE_EVENT_CLASS(cgroup,
__assign_str(path, path);
),
- TP_printk("root=%d id=%d level=%d path=%s",
+ TP_printk("root=%d id=%llu level=%d path=%s",
__entry->root, __entry->id, __entry->level, __get_str(path))
);
@@ -126,8 +126,8 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
TP_STRUCT__entry(
__field( int, dst_root )
- __field( int, dst_id )
__field( int, dst_level )
+ __field( u64, dst_id )
__field( int, pid )
__string( dst_path, path )
__string( comm, task->comm )
@@ -142,7 +142,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
__assign_str(comm, task->comm);
),
- TP_printk("dst_root=%d dst_id=%d dst_level=%d dst_path=%s pid=%d comm=%s",
+ TP_printk("dst_root=%d dst_id=%llu dst_level=%d dst_path=%s pid=%d comm=%s",
__entry->dst_root, __entry->dst_id, __entry->dst_level,
__get_str(dst_path), __entry->pid, __get_str(comm))
);
@@ -171,8 +171,8 @@ DECLARE_EVENT_CLASS(cgroup_event,
TP_STRUCT__entry(
__field( int, root )
- __field( int, id )
__field( int, level )
+ __field( u64, id )
__string( path, path )
__field( int, val )
),
@@ -185,7 +185,7 @@ DECLARE_EVENT_CLASS(cgroup_event,
__entry->val = val;
),
- TP_printk("root=%d id=%d level=%d path=%s val=%d",
+ TP_printk("root=%d id=%llu level=%d path=%s val=%d",
__entry->root, __entry->id, __entry->level, __get_str(path),
__entry->val)
);
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index cb1aea25c199..e19edc63ee95 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -118,6 +118,50 @@ DEFINE_EVENT(clk_rate, clk_set_rate_complete,
TP_ARGS(core, rate)
);
+DEFINE_EVENT(clk_rate, clk_set_min_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DEFINE_EVENT(clk_rate, clk_set_max_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DECLARE_EVENT_CLASS(clk_rate_range,
+
+ TP_PROTO(struct clk_core *core, unsigned long min, unsigned long max),
+
+ TP_ARGS(core, min, max),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field(unsigned long, min )
+ __field(unsigned long, max )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->min = min;
+ __entry->max = max;
+ ),
+
+ TP_printk("%s min %lu max %lu", __get_str(name),
+ (unsigned long)__entry->min,
+ (unsigned long)__entry->max)
+);
+
+DEFINE_EVENT(clk_rate_range, clk_set_rate_range,
+
+ TP_PROTO(struct clk_core *core, unsigned long min, unsigned long max),
+
+ TP_ARGS(core, min, max)
+);
+
DECLARE_EVENT_CLASS(clk_parent,
TP_PROTO(struct clk_core *core, struct clk_core *parent),
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index 5017a8829270..3d708dae1542 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -8,28 +8,31 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
- TP_PROTO(unsigned long pfn, const struct page *page,
- unsigned int count, unsigned int align),
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
+ unsigned long count, unsigned int align),
- TP_ARGS(pfn, page, count, align),
+ TP_ARGS(name, pfn, page, count, align),
TP_STRUCT__entry(
+ __string(name, name)
__field(unsigned long, pfn)
__field(const struct page *, page)
- __field(unsigned int, count)
+ __field(unsigned long, count)
__field(unsigned int, align)
),
TP_fast_assign(
+ __assign_str(name, name);
__entry->pfn = pfn;
__entry->page = page;
__entry->count = count;
__entry->align = align;
),
- TP_printk("pfn=%lx page=%p count=%u align=%u",
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u",
+ __get_str(name),
__entry->pfn,
__entry->page,
__entry->count,
@@ -38,29 +41,72 @@ TRACE_EVENT(cma_alloc,
TRACE_EVENT(cma_release,
- TP_PROTO(unsigned long pfn, const struct page *page,
- unsigned int count),
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
+ unsigned long count),
- TP_ARGS(pfn, page, count),
+ TP_ARGS(name, pfn, page, count),
TP_STRUCT__entry(
+ __string(name, name)
__field(unsigned long, pfn)
__field(const struct page *, page)
- __field(unsigned int, count)
+ __field(unsigned long, count)
),
TP_fast_assign(
+ __assign_str(name, name);
__entry->pfn = pfn;
__entry->page = page;
__entry->count = count;
),
- TP_printk("pfn=%lx page=%p count=%u",
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu",
+ __get_str(name),
__entry->pfn,
__entry->page,
__entry->count)
);
+TRACE_EVENT(cma_alloc_start,
+
+ TP_PROTO(const char *name, unsigned long count, unsigned int align),
+
+ TP_ARGS(name, count, align),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned long, count)
+ __field(unsigned int, align)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->count = count;
+ __entry->align = align;
+ ),
+
+ TP_printk("name=%s count=%lu align=%u",
+ __get_str(name),
+ __entry->count,
+ __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_finish,
+
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
+ unsigned long count, unsigned int align),
+
+ TP_ARGS(name, pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
+ unsigned long count, unsigned int align),
+
+ TP_ARGS(name, pfn, page, count, align)
+);
+
#endif /* _TRACE_CMA_H */
/* This part must be outside protection */
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index e5bf6ee4e814..3313eb83c117 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -67,11 +67,10 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
#ifdef CONFIG_COMPACTION
TRACE_EVENT(mm_compaction_migratepages,
- TP_PROTO(unsigned long nr_all,
- int migrate_rc,
- struct list_head *migratepages),
+ TP_PROTO(struct compact_control *cc,
+ unsigned int nr_succeeded),
- TP_ARGS(nr_all, migrate_rc, migratepages),
+ TP_ARGS(cc, nr_succeeded),
TP_STRUCT__entry(
__field(unsigned long, nr_migrated)
@@ -79,23 +78,8 @@ TRACE_EVENT(mm_compaction_migratepages,
),
TP_fast_assign(
- unsigned long nr_failed = 0;
- struct list_head *page_lru;
-
- /*
- * migrate_pages() returns either a non-negative number
- * with the number of pages that failed migration, or an
- * error code, in which case we need to count the remaining
- * pages manually
- */
- if (migrate_rc >= 0)
- nr_failed = migrate_rc;
- else
- list_for_each(page_lru, migratepages)
- nr_failed++;
-
- __entry->nr_migrated = nr_all - nr_failed;
- __entry->nr_failed = nr_failed;
+ __entry->nr_migrated = nr_succeeded;
+ __entry->nr_failed = cc->nr_migratepages - nr_succeeded;
),
TP_printk("nr_migrated=%lu nr_failed=%lu",
@@ -104,10 +88,10 @@ TRACE_EVENT(mm_compaction_migratepages,
);
TRACE_EVENT(mm_compaction_begin,
- TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
- unsigned long free_pfn, unsigned long zone_end, bool sync),
+ TP_PROTO(struct compact_control *cc, unsigned long zone_start,
+ unsigned long zone_end, bool sync),
- TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync),
+ TP_ARGS(cc, zone_start, zone_end, sync),
TP_STRUCT__entry(
__field(unsigned long, zone_start)
@@ -119,8 +103,8 @@ TRACE_EVENT(mm_compaction_begin,
TP_fast_assign(
__entry->zone_start = zone_start;
- __entry->migrate_pfn = migrate_pfn;
- __entry->free_pfn = free_pfn;
+ __entry->migrate_pfn = cc->migrate_pfn;
+ __entry->free_pfn = cc->free_pfn;
__entry->zone_end = zone_end;
__entry->sync = sync;
),
@@ -134,11 +118,11 @@ TRACE_EVENT(mm_compaction_begin,
);
TRACE_EVENT(mm_compaction_end,
- TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
- unsigned long free_pfn, unsigned long zone_end, bool sync,
+ TP_PROTO(struct compact_control *cc, unsigned long zone_start,
+ unsigned long zone_end, bool sync,
int status),
- TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync, status),
+ TP_ARGS(cc, zone_start, zone_end, sync, status),
TP_STRUCT__entry(
__field(unsigned long, zone_start)
@@ -151,8 +135,8 @@ TRACE_EVENT(mm_compaction_end,
TP_fast_assign(
__entry->zone_start = zone_start;
- __entry->migrate_pfn = migrate_pfn;
- __entry->free_pfn = free_pfn;
+ __entry->migrate_pfn = cc->migrate_pfn;
+ __entry->free_pfn = cc->free_pfn;
__entry->zone_end = zone_end;
__entry->sync = sync;
__entry->status = status;
@@ -178,13 +162,13 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages,
TP_STRUCT__entry(
__field(int, order)
- __field(gfp_t, gfp_mask)
+ __field(unsigned long, gfp_mask)
__field(int, prio)
),
TP_fast_assign(
__entry->order = order;
- __entry->gfp_mask = gfp_mask;
+ __entry->gfp_mask = (__force unsigned long)gfp_mask;
__entry->prio = prio;
),
@@ -314,40 +298,44 @@ TRACE_EVENT(mm_compaction_kcompactd_sleep,
DECLARE_EVENT_CLASS(kcompactd_wake_template,
- TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+ TP_PROTO(int nid, int order, enum zone_type highest_zoneidx),
- TP_ARGS(nid, order, classzone_idx),
+ TP_ARGS(nid, order, highest_zoneidx),
TP_STRUCT__entry(
__field(int, nid)
__field(int, order)
- __field(enum zone_type, classzone_idx)
+ __field(enum zone_type, highest_zoneidx)
),
TP_fast_assign(
__entry->nid = nid;
__entry->order = order;
- __entry->classzone_idx = classzone_idx;
+ __entry->highest_zoneidx = highest_zoneidx;
),
+ /*
+ * classzone_idx is previous name of the highest_zoneidx.
+ * Reason not to change it is the ABI requirement of the tracepoint.
+ */
TP_printk("nid=%d order=%d classzone_idx=%-8s",
__entry->nid,
__entry->order,
- __print_symbolic(__entry->classzone_idx, ZONE_TYPE))
+ __print_symbolic(__entry->highest_zoneidx, ZONE_TYPE))
);
DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd,
- TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+ TP_PROTO(int nid, int order, enum zone_type highest_zoneidx),
- TP_ARGS(nid, order, classzone_idx)
+ TP_ARGS(nid, order, highest_zoneidx)
);
DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake,
- TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+ TP_PROTO(int nid, int order, enum zone_type highest_zoneidx),
- TP_ARGS(nid, order, classzone_idx)
+ TP_ARGS(nid, order, highest_zoneidx)
);
#endif
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
new file mode 100644
index 000000000000..c79f1d4c39af
--- /dev/null
+++ b/include/trace/events/damon.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM damon
+
+#if !defined(_TRACE_DAMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DAMON_H
+
+#include <linux/damon.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(damon_aggregated,
+
+ TP_PROTO(struct damon_target *t, unsigned int target_id,
+ struct damon_region *r, unsigned int nr_regions),
+
+ TP_ARGS(t, target_id, r, nr_regions),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, target_id)
+ __field(unsigned int, nr_regions)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned int, nr_accesses)
+ __field(unsigned int, age)
+ ),
+
+ TP_fast_assign(
+ __entry->target_id = target_id;
+ __entry->nr_regions = nr_regions;
+ __entry->start = r->ar.start;
+ __entry->end = r->ar.end;
+ __entry->nr_accesses = r->nr_accesses;
+ __entry->age = r->age;
+ ),
+
+ TP_printk("target_id=%lu nr_regions=%u %lu-%lu: %u %u",
+ __entry->target_id, __entry->nr_regions,
+ __entry->start, __entry->end,
+ __entry->nr_accesses, __entry->age)
+);
+
+#endif /* _TRACE_DAMON_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/devfreq.h b/include/trace/events/devfreq.h
index cf5b8772175d..7627c620bbda 100644
--- a/include/trace/events/devfreq.h
+++ b/include/trace/events/devfreq.h
@@ -8,6 +8,34 @@
#include <linux/devfreq.h>
#include <linux/tracepoint.h>
+TRACE_EVENT(devfreq_frequency,
+ TP_PROTO(struct devfreq *devfreq, unsigned long freq,
+ unsigned long prev_freq),
+
+ TP_ARGS(devfreq, freq, prev_freq),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(&devfreq->dev))
+ __field(unsigned long, freq)
+ __field(unsigned long, prev_freq)
+ __field(unsigned long, busy_time)
+ __field(unsigned long, total_time)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name(&devfreq->dev));
+ __entry->freq = freq;
+ __entry->prev_freq = prev_freq;
+ __entry->busy_time = devfreq->last_status.busy_time;
+ __entry->total_time = devfreq->last_status.total_time;
+ ),
+
+ TP_printk("dev_name=%-30s freq=%-12lu prev_freq=%-12lu load=%-2lu",
+ __get_str(dev_name), __entry->freq, __entry->prev_freq,
+ __entry->total_time == 0 ? 0 :
+ (100 * __entry->busy_time) / __entry->total_time)
+);
+
TRACE_EVENT(devfreq_monitor,
TP_PROTO(struct devfreq *devfreq),
@@ -29,7 +57,7 @@ TRACE_EVENT(devfreq_monitor,
__assign_str(dev_name, dev_name(&devfreq->dev));
),
- TP_printk("dev_name=%s freq=%lu polling_ms=%u load=%lu",
+ TP_printk("dev_name=%-30s freq=%-12lu polling_ms=%-3u load=%-2lu",
__get_str(dev_name), __entry->freq, __entry->polling_ms,
__entry->total_time == 0 ? 0 :
(100 * __entry->busy_time) / __entry->total_time)
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 6f60a78d9a7e..24969184c534 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -21,9 +21,9 @@ TRACE_EVENT(devlink_hwmsg,
TP_ARGS(devlink, incoming, type, buf, len),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__field(bool, incoming)
__field(unsigned long, type)
__dynamic_array(u8, buf, len)
@@ -31,9 +31,9 @@ TRACE_EVENT(devlink_hwmsg,
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__entry->incoming = incoming;
__entry->type = type;
memcpy(__get_dynamic_array(buf), buf, len);
@@ -55,17 +55,17 @@ TRACE_EVENT(devlink_hwerr,
TP_ARGS(devlink, err, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__field(int, err)
__string(msg, msg)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__entry->err = err;
__assign_str(msg, msg);
),
@@ -85,17 +85,17 @@ TRACE_EVENT(devlink_health_report,
TP_ARGS(devlink, reporter_name, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, msg)
__string(msg, msg)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__assign_str(msg, msg);
),
@@ -116,18 +116,18 @@ TRACE_EVENT(devlink_health_recover_aborted,
TP_ARGS(devlink, reporter_name, health_state, time_since_last_recover),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, reporter_name)
__field(bool, health_state)
__field(u64, time_since_last_recover)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__entry->health_state = health_state;
__entry->time_since_last_recover = time_since_last_recover;
@@ -150,17 +150,17 @@ TRACE_EVENT(devlink_health_reporter_state_update,
TP_ARGS(devlink, reporter_name, new_state),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, reporter_name)
__field(u8, new_state)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__entry->new_state = new_state;
),
@@ -171,6 +171,42 @@ TRACE_EVENT(devlink_health_reporter_state_update,
__entry->new_state)
);
+/*
+ * Tracepoint for devlink packet trap:
+ */
+TRACE_EVENT(devlink_trap_report,
+ TP_PROTO(const struct devlink *devlink, struct sk_buff *skb,
+ const struct devlink_trap_metadata *metadata),
+
+ TP_ARGS(devlink, skb, metadata),
+
+ TP_STRUCT__entry(
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
+ __string(trap_name, metadata->trap_name)
+ __string(trap_group_name, metadata->trap_group_name)
+ __array(char, input_dev_name, IFNAMSIZ)
+ ),
+
+ TP_fast_assign(
+ struct net_device *input_dev = metadata->input_dev;
+
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
+ __assign_str(trap_name, metadata->trap_name);
+ __assign_str(trap_group_name, metadata->trap_group_name);
+ strscpy(__entry->input_dev_name, input_dev ? input_dev->name : "NULL", IFNAMSIZ);
+ ),
+
+ TP_printk("bus_name=%s dev_name=%s driver_name=%s trap_name=%s "
+ "trap_group_name=%s input_dev_name=%s", __get_str(bus_name),
+ __get_str(dev_name), __get_str(driver_name),
+ __get_str(trap_name), __get_str(trap_group_name),
+ __entry->input_dev_name)
+);
+
#endif /* _TRACE_DEVLINK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
new file mode 100644
index 000000000000..da0eaae98fa3
--- /dev/null
+++ b/include/trace/events/dlm.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dlm
+
+#if !defined(_TRACE_DLM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DLM_H
+
+#include <linux/dlm.h>
+#include <linux/dlmconstants.h>
+#include <linux/tracepoint.h>
+
+#include "../../../fs/dlm/dlm_internal.h"
+
+#define show_lock_flags(flags) __print_flags(flags, "|", \
+ { DLM_LKF_NOQUEUE, "NOQUEUE" }, \
+ { DLM_LKF_CANCEL, "CANCEL" }, \
+ { DLM_LKF_CONVERT, "CONVERT" }, \
+ { DLM_LKF_VALBLK, "VALBLK" }, \
+ { DLM_LKF_QUECVT, "QUECVT" }, \
+ { DLM_LKF_IVVALBLK, "IVVALBLK" }, \
+ { DLM_LKF_CONVDEADLK, "CONVDEADLK" }, \
+ { DLM_LKF_PERSISTENT, "PERSISTENT" }, \
+ { DLM_LKF_NODLCKWT, "NODLCKWT" }, \
+ { DLM_LKF_NODLCKBLK, "NODLCKBLK" }, \
+ { DLM_LKF_EXPEDITE, "EXPEDITE" }, \
+ { DLM_LKF_NOQUEUEBAST, "NOQUEUEBAST" }, \
+ { DLM_LKF_HEADQUE, "HEADQUE" }, \
+ { DLM_LKF_NOORDER, "NOORDER" }, \
+ { DLM_LKF_ORPHAN, "ORPHAN" }, \
+ { DLM_LKF_ALTPR, "ALTPR" }, \
+ { DLM_LKF_ALTCW, "ALTCW" }, \
+ { DLM_LKF_FORCEUNLOCK, "FORCEUNLOCK" }, \
+ { DLM_LKF_TIMEOUT, "TIMEOUT" })
+
+#define show_lock_mode(mode) __print_symbolic(mode, \
+ { DLM_LOCK_IV, "IV"}, \
+ { DLM_LOCK_NL, "NL"}, \
+ { DLM_LOCK_CR, "CR"}, \
+ { DLM_LOCK_CW, "CW"}, \
+ { DLM_LOCK_PR, "PR"}, \
+ { DLM_LOCK_PW, "PW"}, \
+ { DLM_LOCK_EX, "EX"})
+
+#define show_dlm_sb_flags(flags) __print_flags(flags, "|", \
+ { DLM_SBF_DEMOTED, "DEMOTED" }, \
+ { DLM_SBF_VALNOTVALID, "VALNOTVALID" }, \
+ { DLM_SBF_ALTMODE, "ALTMODE" })
+
+/* note: we begin tracing dlm_lock_start() only if ls and lkb are found */
+TRACE_EVENT(dlm_lock_start,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, const void *name,
+ unsigned int namelen, int mode, __u32 flags),
+
+ TP_ARGS(ls, lkb, name, namelen, mode, flags),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(int, mode)
+ __field(__u32, flags)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->mode = mode;
+ __entry->flags = flags;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ else if (name)
+ memcpy(__get_dynamic_array(res_name), name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_mode(__entry->mode),
+ show_lock_flags(__entry->flags),
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_lock_end,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, const void *name,
+ unsigned int namelen, int mode, __u32 flags, int error,
+ bool kernel_lock),
+
+ TP_ARGS(ls, lkb, name, namelen, mode, flags, error, kernel_lock),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(int, mode)
+ __field(__u32, flags)
+ __field(int, error)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->mode = mode;
+ __entry->flags = flags;
+ __entry->error = error;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ else if (name)
+ memcpy(__get_dynamic_array(res_name), name,
+ __get_dynamic_array_len(res_name));
+
+ if (kernel_lock) {
+ /* return value will be zeroed in those cases by dlm_lock()
+ * we do it here again to not introduce more overhead if
+ * trace isn't running and error reflects the return value.
+ */
+ if (error == -EAGAIN || error == -EDEADLK)
+ __entry->error = 0;
+ }
+
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s error=%d res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_mode(__entry->mode),
+ show_lock_flags(__entry->flags), __entry->error,
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_bast,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode),
+
+ TP_ARGS(ls, lkb, mode),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(int, mode)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->mode = mode;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_mode(__entry->mode),
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_ast,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb),
+
+ TP_ARGS(ls, lkb),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(u8, sb_flags)
+ __field(int, sb_status)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->sb_flags = lkb->lkb_lksb->sb_flags;
+ __entry->sb_status = lkb->lkb_lksb->sb_status;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_dlm_sb_flags(__entry->sb_flags), __entry->sb_status,
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+/* note: we begin tracing dlm_unlock_start() only if ls and lkb are found */
+TRACE_EVENT(dlm_unlock_start,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, __u32 flags),
+
+ TP_ARGS(ls, lkb, flags),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(__u32, flags)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->flags = flags;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x flags=%s res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_flags(__entry->flags),
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_unlock_end,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, __u32 flags,
+ int error),
+
+ TP_ARGS(ls, lkb, flags, error),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(__u32, flags)
+ __field(int, error)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->flags = flags;
+ __entry->error = error;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x flags=%s error=%d res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_flags(__entry->flags), __entry->error,
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_send,
+
+ TP_PROTO(int nodeid, int ret),
+
+ TP_ARGS(nodeid, ret),
+
+ TP_STRUCT__entry(
+ __field(int, nodeid)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->nodeid = nodeid;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("nodeid=%d ret=%d", __entry->nodeid, __entry->ret)
+
+);
+
+TRACE_EVENT(dlm_recv,
+
+ TP_PROTO(int nodeid, int ret),
+
+ TP_ARGS(nodeid, ret),
+
+ TP_STRUCT__entry(
+ __field(int, nodeid)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->nodeid = nodeid;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("nodeid=%d ret=%d", __entry->nodeid, __entry->ret)
+
+);
+
+#endif /* if !defined(_TRACE_DLM_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index 64e92d56c6a8..3963e79ca7b4 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -23,8 +23,8 @@ DECLARE_EVENT_CLASS(dma_fence,
),
TP_fast_assign(
- __assign_str(driver, fence->ops->get_driver_name(fence))
- __assign_str(timeline, fence->ops->get_timeline_name(fence))
+ __assign_str(driver, fence->ops->get_driver_name(fence));
+ __assign_str(timeline, fence->ops->get_timeline_name(fence));
__entry->context = fence->context;
__entry->seqno = fence->seqno;
),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index 27f5caa6299a..4f4c44ea3a65 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -24,7 +24,7 @@ struct erofs_map_blocks;
#define show_mflags(flags) __print_flags(flags, "", \
{ EROFS_MAP_MAPPED, "M" }, \
{ EROFS_MAP_META, "I" }, \
- { EROFS_MAP_ZIPPED, "Z" })
+ { EROFS_MAP_ENCODED, "E" })
TRACE_EVENT(erofs_lookup,
@@ -35,33 +35,32 @@ TRACE_EVENT(erofs_lookup,
TP_STRUCT__entry(
__field(dev_t, dev )
__field(erofs_nid_t, nid )
- __field(const char *, name )
+ __string(name, dentry->d_name.name )
__field(unsigned int, flags )
),
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->nid = EROFS_I(dir)->nid;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->flags = flags;
),
TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
show_dev_nid(__entry),
- __entry->name,
+ __get_str(name),
__entry->flags)
);
TRACE_EVENT(erofs_fill_inode,
- TP_PROTO(struct inode *inode, int isdir),
- TP_ARGS(inode, isdir),
+ TP_PROTO(struct inode *inode),
+ TP_ARGS(inode),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(erofs_nid_t, nid )
__field(erofs_blk_t, blkaddr )
__field(unsigned int, ofs )
- __field(int, isdir )
),
TP_fast_assign(
@@ -69,13 +68,11 @@ TRACE_EVENT(erofs_fill_inode,
__entry->nid = EROFS_I(inode)->nid;
__entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid));
__entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid));
- __entry->isdir = isdir;
),
- TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d",
+ TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u",
show_dev_nid(__entry),
- __entry->blkaddr, __entry->ofs,
- __entry->isdir)
+ __entry->blkaddr, __entry->ofs)
);
TRACE_EVENT(erofs_readpage,
@@ -113,10 +110,10 @@ TRACE_EVENT(erofs_readpage,
TRACE_EVENT(erofs_readpages,
- TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage,
+ TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage,
bool raw),
- TP_ARGS(inode, page, nrpage, raw),
+ TP_ARGS(inode, start, nrpage, raw),
TP_STRUCT__entry(
__field(dev_t, dev )
@@ -129,7 +126,7 @@ TRACE_EVENT(erofs_readpages,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->nid = EROFS_I(inode)->nid;
- __entry->start = page->index;
+ __entry->start = start;
__entry->nrpage = nrpage;
__entry->raw = raw;
),
@@ -169,7 +166,7 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
__entry->flags ? show_map_flags(__entry->flags) : "NULL")
);
-DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter,
+DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned flags),
@@ -221,7 +218,7 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
show_mflags(__entry->mflags), __entry->ret)
);
-DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit,
+DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned flags, int ret),
diff --git a/include/trace/events/error_report.h b/include/trace/events/error_report.h
new file mode 100644
index 000000000000..a1922a800e6f
--- /dev/null
+++ b/include/trace/events/error_report.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Declarations for error reporting tracepoints.
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM error_report
+
+#if !defined(_TRACE_ERROR_REPORT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ERROR_REPORT_H
+
+#include <linux/tracepoint.h>
+
+#ifndef __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum error_detector {
+ ERROR_DETECTOR_KFENCE,
+ ERROR_DETECTOR_KASAN,
+ ERROR_DETECTOR_WARN,
+};
+
+#endif /* __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY */
+
+#define error_detector_list \
+ EM(ERROR_DETECTOR_KFENCE, "kfence") \
+ EM(ERROR_DETECTOR_KASAN, "kasan") \
+ EMe(ERROR_DETECTOR_WARN, "warning")
+/* Always end the list with an EMe. */
+
+#undef EM
+#undef EMe
+
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+error_detector_list
+
+#undef EM
+#undef EMe
+
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+#define show_error_detector_list(val) \
+ __print_symbolic(val, error_detector_list)
+
+DECLARE_EVENT_CLASS(error_report_template,
+ TP_PROTO(enum error_detector error_detector, unsigned long id),
+ TP_ARGS(error_detector, id),
+ TP_STRUCT__entry(__field(enum error_detector, error_detector)
+ __field(unsigned long, id)),
+ TP_fast_assign(__entry->error_detector = error_detector;
+ __entry->id = id;),
+ TP_printk("[%s] %lx",
+ show_error_detector_list(__entry->error_detector),
+ __entry->id));
+
+/**
+ * error_report_end - called after printing the error report
+ * @error_detector: short string describing the error detection tool
+ * @id: pseudo-unique descriptor identifying the report
+ * (e.g. the memory access address)
+ *
+ * This event occurs right after a debugging tool finishes printing the error
+ * report.
+ */
+DEFINE_EVENT(error_report_template, error_report_end,
+ TP_PROTO(enum error_detector error_detector, unsigned long id),
+ TP_ARGS(error_detector, id));
+
+#endif /* _TRACE_ERROR_REPORT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 19c87661eeec..229e8fae66a3 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -35,7 +35,8 @@ struct partial_cluster;
{ EXT4_MB_DELALLOC_RESERVED, "DELALLOC_RESV" }, \
{ EXT4_MB_STREAM_ALLOC, "STREAM_ALLOC" }, \
{ EXT4_MB_USE_ROOT_BLOCKS, "USE_ROOT_BLKS" }, \
- { EXT4_MB_USE_RESERVED, "USE_RESV" })
+ { EXT4_MB_USE_RESERVED, "USE_RESV" }, \
+ { EXT4_MB_STRICT_CHECK, "STRICT_CHECK" })
#define show_map_flags(flags) __print_flags(flags, "|", \
{ EXT4_GET_BLOCKS_CREATE, "CREATE" }, \
@@ -45,8 +46,10 @@ struct partial_cluster;
{ EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \
{ EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \
{ EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \
- { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \
- { EXT4_GET_BLOCKS_ZERO, "ZERO" })
+ { EXT4_GET_BLOCKS_CONVERT_UNWRITTEN, "CONVERT_UNWRITTEN" }, \
+ { EXT4_GET_BLOCKS_ZERO, "ZERO" }, \
+ { EXT4_GET_BLOCKS_IO_SUBMIT, "IO_SUBMIT" }, \
+ { EXT4_EX_NOCACHE, "EX_NOCACHE" })
/*
* __print_flags() requires that all enum values be wrapped in the
@@ -92,6 +95,28 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B);
{ FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \
{ FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_XATTR);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_CROSS_RENAME);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_NOMEM);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_SWAP_BOOT);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
+
+#define show_fc_reason(reason) \
+ __print_symbolic(reason, \
+ { EXT4_FC_REASON_XATTR, "XATTR"}, \
+ { EXT4_FC_REASON_CROSS_RENAME, "CROSS_RENAME"}, \
+ { EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, "JOURNAL_FLAG_CHANGE"}, \
+ { EXT4_FC_REASON_NOMEM, "NO_MEM"}, \
+ { EXT4_FC_REASON_SWAP_BOOT, "SWAP_BOOT"}, \
+ { EXT4_FC_REASON_RESIZE, "RESIZE"}, \
+ { EXT4_FC_REASON_RENAME_DIR, "RENAME_DIR"}, \
+ { EXT4_FC_REASON_FALLOC_RANGE, "FALLOC_RANGE"}, \
+ { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"})
TRACE_EVENT(ext4_other_inode_update_time,
TP_PROTO(struct inode *inode, ino_t orig_ino),
@@ -310,17 +335,15 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
DECLARE_EVENT_CLASS(ext4__write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags),
+ TP_ARGS(inode, pos, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( loff_t, pos )
__field( unsigned int, len )
- __field( unsigned int, flags )
),
TP_fast_assign(
@@ -328,29 +351,26 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
- __entry->flags = flags;
),
- TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
+ TP_printk("dev %d,%d ino %lu pos %lld len %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->pos, __entry->len, __entry->flags)
+ __entry->pos, __entry->len)
);
DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags)
+ TP_ARGS(inode, pos, len)
);
DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags)
+ TP_ARGS(inode, pos, len)
);
DECLARE_EVENT_CLASS(ext4__write_end,
@@ -583,44 +603,44 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage,
TP_ARGS(page)
);
-DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+DECLARE_EVENT_CLASS(ext4_invalidate_folio_op,
+ TP_PROTO(struct folio *folio, size_t offset, size_t length),
- TP_ARGS(page, offset, length),
+ TP_ARGS(folio, offset, length),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
- __field( unsigned int, offset )
- __field( unsigned int, length )
+ __field( size_t, offset )
+ __field( size_t, length )
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->index = page->index;
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->index = folio->index;
__entry->offset = offset;
__entry->length = length;
),
- TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
+ TP_printk("dev %d,%d ino %lu folio_index %lu offset %zu length %zu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->index,
__entry->offset, __entry->length)
);
-DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+DEFINE_EVENT(ext4_invalidate_folio_op, ext4_invalidate_folio,
+ TP_PROTO(struct folio *folio, size_t offset, size_t length),
- TP_ARGS(page, offset, length)
+ TP_ARGS(folio, offset, length)
);
-DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+DEFINE_EVENT(ext4_invalidate_folio_op, ext4_journalled_invalidate_folio,
+ TP_PROTO(struct folio *folio, size_t offset, size_t length),
- TP_ARGS(page, offset, length)
+ TP_ARGS(folio, offset, length)
);
TRACE_EVENT(ext4_discard_blocks,
@@ -743,24 +763,29 @@ TRACE_EVENT(ext4_mb_release_group_pa,
);
TRACE_EVENT(ext4_discard_preallocations,
- TP_PROTO(struct inode *inode),
+ TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
- TP_ARGS(inode),
+ TP_ARGS(inode, len, needed),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( unsigned int, len )
+ __field( unsigned int, needed )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
+ __entry->len = len;
+ __entry->needed = needed;
),
- TP_printk("dev %d,%d ino %lu",
+ TP_printk("dev %d,%d ino %lu len: %u needed %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino)
+ (unsigned long) __entry->ino, __entry->len,
+ __entry->needed)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -1309,13 +1334,6 @@ DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
TP_ARGS(sb, group)
);
-DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
-
- TP_PROTO(struct super_block *sb, unsigned long group),
-
- TP_ARGS(sb, group)
-);
-
DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
TP_PROTO(struct super_block *sb, unsigned long group),
@@ -1323,62 +1341,27 @@ DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
TP_ARGS(sb, group)
);
-TRACE_EVENT(ext4_direct_IO_enter,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
+TRACE_EVENT(ext4_read_block_bitmap_load,
+ TP_PROTO(struct super_block *sb, unsigned long group, bool prefetch),
- TP_ARGS(inode, offset, len, rw),
+ TP_ARGS(sb, group, prefetch),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( ino_t, ino )
- __field( loff_t, pos )
- __field( unsigned long, len )
- __field( int, rw )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->pos = offset;
- __entry->len = len;
- __entry->rw = rw;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->pos, __entry->len, __entry->rw)
-);
-
-TRACE_EVENT(ext4_direct_IO_exit,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
- int rw, int ret),
-
- TP_ARGS(inode, offset, len, rw, ret),
+ __field( __u32, group )
+ __field( bool, prefetch )
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( loff_t, pos )
- __field( unsigned long, len )
- __field( int, rw )
- __field( int, ret )
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->pos = offset;
- __entry->len = len;
- __entry->rw = rw;
- __entry->ret = ret;
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ __entry->prefetch = prefetch;
),
- TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
+ TP_printk("dev %d,%d group %u prefetch %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->pos, __entry->len,
- __entry->rw, __entry->ret)
+ __entry->group, __entry->prefetch)
);
DECLARE_EVENT_CLASS(ext4__fallocate_mode,
@@ -1742,9 +1725,9 @@ TRACE_EVENT(ext4_ext_load_extent,
);
TRACE_EVENT(ext4_load_inode,
- TP_PROTO(struct inode *inode),
+ TP_PROTO(struct super_block *sb, unsigned long ino),
- TP_ARGS(inode),
+ TP_ARGS(sb, ino),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -1752,8 +1735,8 @@ TRACE_EVENT(ext4_load_inode,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
+ __entry->dev = sb->s_dev;
+ __entry->ino = ino;
),
TP_printk("dev %d,%d ino %ld",
@@ -1927,124 +1910,6 @@ TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
__entry->len, show_mflags(__entry->flags), __entry->ret)
);
-TRACE_EVENT(ext4_ext_put_in_cache,
- TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
- ext4_fsblk_t start),
-
- TP_ARGS(inode, lblk, len, start),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ext4_lblk_t, lblk )
- __field( unsigned int, len )
- __field( ext4_fsblk_t, start )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->lblk = lblk;
- __entry->len = len;
- __entry->start = start;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned) __entry->lblk,
- __entry->len,
- (unsigned long long) __entry->start)
-);
-
-TRACE_EVENT(ext4_ext_in_cache,
- TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
-
- TP_ARGS(inode, lblk, ret),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ext4_lblk_t, lblk )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->lblk = lblk;
- __entry->ret = ret;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %u ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned) __entry->lblk,
- __entry->ret)
-
-);
-
-TRACE_EVENT(ext4_find_delalloc_range,
- TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
- int reverse, int found, ext4_lblk_t found_blk),
-
- TP_ARGS(inode, from, to, reverse, found, found_blk),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ext4_lblk_t, from )
- __field( ext4_lblk_t, to )
- __field( int, reverse )
- __field( int, found )
- __field( ext4_lblk_t, found_blk )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->from = from;
- __entry->to = to;
- __entry->reverse = reverse;
- __entry->found = found;
- __entry->found_blk = found_blk;
- ),
-
- TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
- "(blk = %u)",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned) __entry->from, (unsigned) __entry->to,
- __entry->reverse, __entry->found,
- (unsigned) __entry->found_blk)
-);
-
-TRACE_EVENT(ext4_get_reserved_cluster_alloc,
- TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
-
- TP_ARGS(inode, lblk, len),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ext4_lblk_t, lblk )
- __field( unsigned int, len )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->lblk = lblk;
- __entry->len = len;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %u len %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned) __entry->lblk,
- __entry->len)
-);
-
TRACE_EVENT(ext4_ext_show_extent,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
unsigned short len),
@@ -2723,6 +2588,353 @@ TRACE_EVENT(ext4_error,
__entry->function, __entry->line)
);
+TRACE_EVENT(ext4_prefetch_bitmaps,
+ TP_PROTO(struct super_block *sb, ext4_group_t group,
+ ext4_group_t next, unsigned int prefetch_ios),
+
+ TP_ARGS(sb, group, next, prefetch_ios),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u32, group )
+ __field( __u32, next )
+ __field( __u32, ios )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ __entry->next = next;
+ __entry->ios = prefetch_ios;
+ ),
+
+ TP_printk("dev %d,%d group %u next %u ios %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->group, __entry->next, __entry->ios)
+);
+
+TRACE_EVENT(ext4_lazy_itable_init,
+ TP_PROTO(struct super_block *sb, ext4_group_t group),
+
+ TP_ARGS(sb, group),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u32, group )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ ),
+
+ TP_printk("dev %d,%d group %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group)
+);
+
+TRACE_EVENT(ext4_fc_replay_scan,
+ TP_PROTO(struct super_block *sb, int error, int off),
+
+ TP_ARGS(sb, error, off),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, error)
+ __field(int, off)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->error = error;
+ __entry->off = off;
+ ),
+
+ TP_printk("dev %d,%d error %d, off %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->error, __entry->off)
+);
+
+TRACE_EVENT(ext4_fc_replay,
+ TP_PROTO(struct super_block *sb, int tag, int ino, int priv1, int priv2),
+
+ TP_ARGS(sb, tag, ino, priv1, priv2),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, tag)
+ __field(int, ino)
+ __field(int, priv1)
+ __field(int, priv2)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->tag = tag;
+ __entry->ino = ino;
+ __entry->priv1 = priv1;
+ __entry->priv2 = priv2;
+ ),
+
+ TP_printk("dev %d,%d: tag %d, ino %d, data1 %d, data2 %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tag, __entry->ino, __entry->priv1, __entry->priv2)
+);
+
+TRACE_EVENT(ext4_fc_commit_start,
+ TP_PROTO(struct super_block *sb, tid_t commit_tid),
+
+ TP_ARGS(sb, commit_tid),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, tid)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->tid = commit_tid;
+ ),
+
+ TP_printk("dev %d,%d tid %u", MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tid)
+);
+
+TRACE_EVENT(ext4_fc_commit_stop,
+ TP_PROTO(struct super_block *sb, int nblks, int reason,
+ tid_t commit_tid),
+
+ TP_ARGS(sb, nblks, reason, commit_tid),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, nblks)
+ __field(int, reason)
+ __field(int, num_fc)
+ __field(int, num_fc_ineligible)
+ __field(int, nblks_agg)
+ __field(tid_t, tid)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->nblks = nblks;
+ __entry->reason = reason;
+ __entry->num_fc = EXT4_SB(sb)->s_fc_stats.fc_num_commits;
+ __entry->num_fc_ineligible =
+ EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits;
+ __entry->nblks_agg = EXT4_SB(sb)->s_fc_stats.fc_numblks;
+ __entry->tid = commit_tid;
+ ),
+
+ TP_printk("dev %d,%d nblks %d, reason %d, fc = %d, ineligible = %d, agg_nblks %d, tid %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->nblks, __entry->reason, __entry->num_fc,
+ __entry->num_fc_ineligible, __entry->nblks_agg, __entry->tid)
+);
+
+#define FC_REASON_NAME_STAT(reason) \
+ show_fc_reason(reason), \
+ __entry->fc_ineligible_rc[reason]
+
+TRACE_EVENT(ext4_fc_stats,
+ TP_PROTO(struct super_block *sb),
+
+ TP_ARGS(sb),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __array(unsigned int, fc_ineligible_rc, EXT4_FC_REASON_MAX)
+ __field(unsigned long, fc_commits)
+ __field(unsigned long, fc_ineligible_commits)
+ __field(unsigned long, fc_numblks)
+ ),
+
+ TP_fast_assign(
+ int i;
+
+ __entry->dev = sb->s_dev;
+ for (i = 0; i < EXT4_FC_REASON_MAX; i++) {
+ __entry->fc_ineligible_rc[i] =
+ EXT4_SB(sb)->s_fc_stats.fc_ineligible_reason_count[i];
+ }
+ __entry->fc_commits = EXT4_SB(sb)->s_fc_stats.fc_num_commits;
+ __entry->fc_ineligible_commits =
+ EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits;
+ __entry->fc_numblks = EXT4_SB(sb)->s_fc_stats.fc_numblks;
+ ),
+
+ TP_printk("dev %d,%d fc ineligible reasons:\n"
+ "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u "
+ "num_commits:%lu, ineligible: %lu, numblks: %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA),
+ __entry->fc_commits, __entry->fc_ineligible_commits,
+ __entry->fc_numblks)
+);
+
+DECLARE_EVENT_CLASS(ext4_fc_track_dentry,
+
+ TP_PROTO(handle_t *handle, struct inode *inode,
+ struct dentry *dentry, int ret),
+
+ TP_ARGS(handle, inode, dentry, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, t_tid)
+ __field(ino_t, i_ino)
+ __field(tid_t, i_sync_tid)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->t_tid = handle->h_transaction->t_tid;
+ __entry->i_ino = inode->i_ino;
+ __entry->i_sync_tid = ei->i_sync_tid;
+ __entry->error = ret;
+ ),
+
+ TP_printk("dev %d,%d, t_tid %u, ino %lu, i_sync_tid %u, error %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
+ __entry->error
+ )
+);
+
+#define DEFINE_EVENT_CLASS_DENTRY(__type) \
+DEFINE_EVENT(ext4_fc_track_dentry, ext4_fc_track_##__type, \
+ TP_PROTO(handle_t *handle, struct inode *inode, \
+ struct dentry *dentry, int ret), \
+ TP_ARGS(handle, inode, dentry, ret) \
+)
+
+DEFINE_EVENT_CLASS_DENTRY(create);
+DEFINE_EVENT_CLASS_DENTRY(link);
+DEFINE_EVENT_CLASS_DENTRY(unlink);
+
+TRACE_EVENT(ext4_fc_track_inode,
+ TP_PROTO(handle_t *handle, struct inode *inode, int ret),
+
+ TP_ARGS(handle, inode, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, t_tid)
+ __field(ino_t, i_ino)
+ __field(tid_t, i_sync_tid)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->t_tid = handle->h_transaction->t_tid;
+ __entry->i_ino = inode->i_ino;
+ __entry->i_sync_tid = ei->i_sync_tid;
+ __entry->error = ret;
+ ),
+
+ TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
+ __entry->error)
+ );
+
+TRACE_EVENT(ext4_fc_track_range,
+ TP_PROTO(handle_t *handle, struct inode *inode,
+ long start, long end, int ret),
+
+ TP_ARGS(handle, inode, start, end, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, t_tid)
+ __field(ino_t, i_ino)
+ __field(tid_t, i_sync_tid)
+ __field(long, start)
+ __field(long, end)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->t_tid = handle->h_transaction->t_tid;
+ __entry->i_ino = inode->i_ino;
+ __entry->i_sync_tid = ei->i_sync_tid;
+ __entry->start = start;
+ __entry->end = end;
+ __entry->error = ret;
+ ),
+
+ TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d, start %ld, end %ld",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
+ __entry->error, __entry->start, __entry->end)
+ );
+
+TRACE_EVENT(ext4_fc_cleanup,
+ TP_PROTO(journal_t *journal, int full, tid_t tid),
+
+ TP_ARGS(journal, full, tid),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, j_fc_off)
+ __field(int, full)
+ __field(tid_t, tid)
+ ),
+
+ TP_fast_assign(
+ struct super_block *sb = journal->j_private;
+
+ __entry->dev = sb->s_dev;
+ __entry->j_fc_off = journal->j_fc_off;
+ __entry->full = full;
+ __entry->tid = tid;
+ ),
+
+ TP_printk("dev %d,%d, j_fc_off %d, full %d, tid %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->j_fc_off, __entry->full, __entry->tid)
+ );
+
+TRACE_EVENT(ext4_update_sb,
+ TP_PROTO(struct super_block *sb, ext4_fsblk_t fsblk,
+ unsigned int flags),
+
+ TP_ARGS(sb, fsblk, flags),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ext4_fsblk_t, fsblk)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->fsblk = fsblk;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev %d,%d fsblk %llu flags %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->fsblk, __entry->flags)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 67a97838c2a0..c6b372401c27 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -6,6 +6,7 @@
#define _TRACE_F2FS_H
#include <linux/tracepoint.h>
+#include <uapi/linux/f2fs.h>
#define show_dev(dev) MAJOR(dev), MINOR(dev)
#define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino
@@ -14,10 +15,6 @@ TRACE_DEFINE_ENUM(NODE);
TRACE_DEFINE_ENUM(DATA);
TRACE_DEFINE_ENUM(META);
TRACE_DEFINE_ENUM(META_FLUSH);
-TRACE_DEFINE_ENUM(INMEM);
-TRACE_DEFINE_ENUM(INMEM_DROP);
-TRACE_DEFINE_ENUM(INMEM_INVALIDATE);
-TRACE_DEFINE_ENUM(INMEM_REVOKE);
TRACE_DEFINE_ENUM(IPU);
TRACE_DEFINE_ENUM(OPU);
TRACE_DEFINE_ENUM(HOT);
@@ -50,6 +47,7 @@ TRACE_DEFINE_ENUM(CP_RECOVERY);
TRACE_DEFINE_ENUM(CP_DISCARD);
TRACE_DEFINE_ENUM(CP_TRIMMED);
TRACE_DEFINE_ENUM(CP_PAUSE);
+TRACE_DEFINE_ENUM(CP_RESIZE);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -57,10 +55,6 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
{ DATA, "DATA" }, \
{ META, "META" }, \
{ META_FLUSH, "META_FLUSH" }, \
- { INMEM, "INMEM" }, \
- { INMEM_DROP, "INMEM_DROP" }, \
- { INMEM_INVALIDATE, "INMEM_INVALIDATE" }, \
- { INMEM_REVOKE, "INMEM_REVOKE" }, \
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
@@ -72,7 +66,7 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_META | REQ_PRIO | \
REQ_PREFLUSH | REQ_FUA)
-#define F2FS_BIO_FLAG_MASK(t) (t & F2FS_OP_FLAGS)
+#define F2FS_BIO_FLAG_MASK(t) (__force u32)((t) & F2FS_OP_FLAGS)
#define show_bio_type(op,op_flags) show_bio_op(op), \
show_bio_op_flags(op_flags)
@@ -81,12 +75,12 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
#define show_bio_op_flags(flags) \
__print_flags(F2FS_BIO_FLAG_MASK(flags), "|", \
- { REQ_RAHEAD, "R" }, \
- { REQ_SYNC, "S" }, \
- { REQ_META, "M" }, \
- { REQ_PRIO, "P" }, \
- { REQ_PREFLUSH, "PF" }, \
- { REQ_FUA, "FUA" })
+ { (__force u32)REQ_RAHEAD, "R" }, \
+ { (__force u32)REQ_SYNC, "S" }, \
+ { (__force u32)REQ_META, "M" }, \
+ { (__force u32)REQ_PRIO, "P" }, \
+ { (__force u32)REQ_PREFLUSH, "PF" }, \
+ { (__force u32)REQ_FUA, "FUA" })
#define show_data_type(type) \
__print_symbolic(type, \
@@ -110,13 +104,15 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
#define show_alloc_mode(type) \
__print_symbolic(type, \
- { LFS, "LFS-mode" }, \
- { SSR, "SSR-mode" })
+ { LFS, "LFS-mode" }, \
+ { SSR, "SSR-mode" }, \
+ { AT_SSR, "AT_SSR-mode" })
#define show_victim_policy(type) \
__print_symbolic(type, \
{ GC_GREEDY, "Greedy" }, \
- { GC_CB, "Cost-Benefit" })
+ { GC_CB, "Cost-Benefit" }, \
+ { GC_AT, "Age-threshold" })
#define show_cpreason(type) \
__print_flags(type, "|", \
@@ -126,13 +122,14 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
{ CP_RECOVERY, "Recovery" }, \
{ CP_DISCARD, "Discard" }, \
{ CP_PAUSE, "Pause" }, \
- { CP_TRIMMED, "Trimmed" })
+ { CP_TRIMMED, "Trimmed" }, \
+ { CP_RESIZE, "Resize" })
#define show_fsync_cpreason(type) \
__print_symbolic(type, \
{ CP_NO_NEEDED, "no needed" }, \
{ CP_NON_REGULAR, "non regular" }, \
- { CP_COMPRESSED, "compreesed" }, \
+ { CP_COMPRESSED, "compressed" }, \
{ CP_HARDLINK, "hardlink" }, \
{ CP_SB_NEED_CP, "sb needs cp" }, \
{ CP_WRONG_PINO, "wrong pino" }, \
@@ -153,7 +150,9 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
#define show_compress_algorithm(type) \
__print_symbolic(type, \
{ COMPRESS_LZO, "LZO" }, \
- { COMPRESS_LZ4, "LZ4" })
+ { COMPRESS_LZ4, "LZ4" }, \
+ { COMPRESS_ZSTD, "ZSTD" }, \
+ { COMPRESS_LZORLE, "LZO-RLE" })
struct f2fs_sb_info;
struct f2fs_io_info;
@@ -533,17 +532,17 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
TRACE_EVENT(f2fs_file_write_iter,
- TP_PROTO(struct inode *inode, unsigned long offset,
- unsigned long length, int ret),
+ TP_PROTO(struct inode *inode, loff_t offset, size_t length,
+ ssize_t ret),
TP_ARGS(inode, offset, length, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(unsigned long, offset)
- __field(unsigned long, length)
- __field(int, ret)
+ __field(loff_t, offset)
+ __field(size_t, length)
+ __field(ssize_t, ret)
),
TP_fast_assign(
@@ -555,7 +554,7 @@ TRACE_EVENT(f2fs_file_write_iter,
),
TP_printk("dev = (%d,%d), ino = %lu, "
- "offset = %lu, length = %lu, written(err) = %d",
+ "offset = %lld, length = %zu, written(err) = %zd",
show_dev_ino(__entry),
__entry->offset,
__entry->length,
@@ -563,9 +562,10 @@ TRACE_EVENT(f2fs_file_write_iter,
);
TRACE_EVENT(f2fs_map_blocks,
- TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
+ TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map,
+ int create, int flag, int ret),
- TP_ARGS(inode, map, ret),
+ TP_ARGS(inode, map, create, flag, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -576,11 +576,14 @@ TRACE_EVENT(f2fs_map_blocks,
__field(unsigned int, m_flags)
__field(int, m_seg_type)
__field(bool, m_may_create)
+ __field(bool, m_multidev_dio)
+ __field(int, create)
+ __field(int, flag)
__field(int, ret)
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev = map->m_bdev->bd_dev;
__entry->ino = inode->i_ino;
__entry->m_lblk = map->m_lblk;
__entry->m_pblk = map->m_pblk;
@@ -588,12 +591,16 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_flags = map->m_flags;
__entry->m_seg_type = map->m_seg_type;
__entry->m_may_create = map->m_may_create;
+ __entry->m_multidev_dio = map->m_multidev_dio;
+ __entry->create = create;
+ __entry->flag = flag;
__entry->ret = ret;
),
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
- "start blkaddr = 0x%llx, len = 0x%llx, flags = %u,"
- "seg_type = %d, may_create = %d, err = %d",
+ "start blkaddr = 0x%llx, len = 0x%llx, flags = %u, "
+ "seg_type = %d, may_create = %d, multidevice = %d, "
+ "create = %d, flag = %d, err = %d",
show_dev_ino(__entry),
(unsigned long long)__entry->m_lblk,
(unsigned long long)__entry->m_pblk,
@@ -601,6 +608,9 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_flags,
__entry->m_seg_type,
__entry->m_may_create,
+ __entry->m_multidev_dio,
+ __entry->create,
+ __entry->flag,
__entry->ret)
);
@@ -634,19 +644,22 @@ TRACE_EVENT(f2fs_background_gc,
TRACE_EVENT(f2fs_gc_begin,
- TP_PROTO(struct super_block *sb, bool sync, bool background,
+ TP_PROTO(struct super_block *sb, int gc_type, bool no_bg_gc,
+ unsigned int nr_free_secs,
long long dirty_nodes, long long dirty_dents,
long long dirty_imeta, unsigned int free_sec,
unsigned int free_seg, int reserved_seg,
unsigned int prefree_seg),
- TP_ARGS(sb, sync, background, dirty_nodes, dirty_dents, dirty_imeta,
+ TP_ARGS(sb, gc_type, no_bg_gc, nr_free_secs, dirty_nodes,
+ dirty_dents, dirty_imeta,
free_sec, free_seg, reserved_seg, prefree_seg),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(bool, sync)
- __field(bool, background)
+ __field(int, gc_type)
+ __field(bool, no_bg_gc)
+ __field(unsigned int, nr_free_secs)
__field(long long, dirty_nodes)
__field(long long, dirty_dents)
__field(long long, dirty_imeta)
@@ -658,8 +671,9 @@ TRACE_EVENT(f2fs_gc_begin,
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->sync = sync;
- __entry->background = background;
+ __entry->gc_type = gc_type;
+ __entry->no_bg_gc = no_bg_gc;
+ __entry->nr_free_secs = nr_free_secs;
__entry->dirty_nodes = dirty_nodes;
__entry->dirty_dents = dirty_dents;
__entry->dirty_imeta = dirty_imeta;
@@ -669,12 +683,13 @@ TRACE_EVENT(f2fs_gc_begin,
__entry->prefree_seg = prefree_seg;
),
- TP_printk("dev = (%d,%d), sync = %d, background = %d, nodes = %lld, "
- "dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
+ TP_printk("dev = (%d,%d), gc_type = %s, no_background_GC = %d, nr_free_secs = %u, "
+ "nodes = %lld, dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
"rsv_seg:%d, prefree_seg:%u",
show_dev(__entry->dev),
- __entry->sync,
- __entry->background,
+ show_gc_type(__entry->gc_type),
+ (__entry->gc_type == BG_GC) ? __entry->no_bg_gc : -1,
+ __entry->nr_free_secs,
__entry->dirty_nodes,
__entry->dirty_dents,
__entry->dirty_imeta,
@@ -800,20 +815,20 @@ TRACE_EVENT(f2fs_lookup_start,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(const char *, name)
+ __string(name, dentry->d_name.name)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->flags = flags;
),
TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u",
show_dev_ino(__entry),
- __entry->name,
+ __get_str(name),
__entry->flags)
);
@@ -827,7 +842,7 @@ TRACE_EVENT(f2fs_lookup_end,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(const char *, name)
+ __string(name, dentry->d_name.name)
__field(nid_t, cino)
__field(int, err)
),
@@ -835,14 +850,14 @@ TRACE_EVENT(f2fs_lookup_end,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->cino = ino;
__entry->err = err;
),
TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d",
show_dev_ino(__entry),
- __entry->name,
+ __get_str(name),
__entry->cino,
__entry->err)
);
@@ -918,14 +933,14 @@ TRACE_EVENT(f2fs_fallocate,
TRACE_EVENT(f2fs_direct_IO_enter,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
+ TP_PROTO(struct inode *inode, struct kiocb *iocb, long len, int rw),
- TP_ARGS(inode, offset, len, rw),
+ TP_ARGS(inode, iocb, len, rw),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(loff_t, pos)
+ __field(struct kiocb *, iocb)
__field(unsigned long, len)
__field(int, rw)
),
@@ -933,15 +948,17 @@ TRACE_EVENT(f2fs_direct_IO_enter,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->pos = offset;
+ __entry->iocb = iocb;
__entry->len = len;
__entry->rw = rw;
),
- TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu rw = %d",
+ TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d",
show_dev_ino(__entry),
- __entry->pos,
+ __entry->iocb->ki_pos,
__entry->len,
+ __entry->iocb->ki_flags,
+ __entry->iocb->ki_ioprio,
__entry->rw)
);
@@ -1019,8 +1036,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__field(pgoff_t, index)
__field(block_t, old_blkaddr)
__field(block_t, new_blkaddr)
- __field(int, op)
- __field(int, op_flags)
+ __field(enum req_op, op)
+ __field(blk_opf_t, op_flags)
__field(int, temp)
__field(int, type)
),
@@ -1075,8 +1092,8 @@ DECLARE_EVENT_CLASS(f2fs__bio,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(dev_t, target)
- __field(int, op)
- __field(int, op_flags)
+ __field(enum req_op, op)
+ __field(blk_opf_t, op_flags)
__field(int, type)
__field(sector_t, sector)
__field(unsigned int, size)
@@ -1139,17 +1156,15 @@ DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio,
TRACE_EVENT(f2fs_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags),
+ TP_ARGS(inode, pos, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(loff_t, pos)
__field(unsigned int, len)
- __field(unsigned int, flags)
),
TP_fast_assign(
@@ -1157,14 +1172,12 @@ TRACE_EVENT(f2fs_write_begin,
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
- __entry->flags = flags;
),
- TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, flags = %u",
+ TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u",
show_dev_ino(__entry),
(unsigned long long)__entry->pos,
- __entry->len,
- __entry->flags)
+ __entry->len)
);
TRACE_EVENT(f2fs_write_end,
@@ -1269,20 +1282,6 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
TP_ARGS(page, type)
);
-DEFINE_EVENT(f2fs__page, f2fs_register_inmem_page,
-
- TP_PROTO(struct page *page, int type),
-
- TP_ARGS(page, type)
-);
-
-DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
-
- TP_PROTO(struct page *page, int type),
-
- TP_ARGS(page, type)
-);
-
TRACE_EVENT(f2fs_filemap_fault,
TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
@@ -1375,9 +1374,9 @@ TRACE_EVENT(f2fs_writepages,
TRACE_EVENT(f2fs_readpages,
- TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage),
+ TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage),
- TP_ARGS(inode, page, nrpage),
+ TP_ARGS(inode, start, nrpage),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1389,7 +1388,7 @@ TRACE_EVENT(f2fs_readpages,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->start = page->index;
+ __entry->start = start;
__entry->nrpage = nrpage;
),
@@ -1579,9 +1578,10 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
TRACE_EVENT(f2fs_update_extent_tree_range,
TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr,
- unsigned int len),
+ unsigned int len,
+ unsigned int c_len),
- TP_ARGS(inode, pgofs, blkaddr, len),
+ TP_ARGS(inode, pgofs, blkaddr, len, c_len),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1589,6 +1589,7 @@ TRACE_EVENT(f2fs_update_extent_tree_range,
__field(unsigned int, pgofs)
__field(u32, blk)
__field(unsigned int, len)
+ __field(unsigned int, c_len)
),
TP_fast_assign(
@@ -1597,14 +1598,17 @@ TRACE_EVENT(f2fs_update_extent_tree_range,
__entry->pgofs = pgofs;
__entry->blk = blkaddr;
__entry->len = len;
+ __entry->c_len = c_len;
),
TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
- "blkaddr = %u, len = %u",
+ "blkaddr = %u, len = %u, "
+ "c_len = %u",
show_dev_ino(__entry),
__entry->pgofs,
__entry->blk,
- __entry->len)
+ __entry->len,
+ __entry->c_len)
);
TRACE_EVENT(f2fs_shrink_extent_tree,
@@ -1811,6 +1815,350 @@ DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end,
TP_ARGS(inode, cluster_idx, compressed_size, ret)
);
+#ifdef CONFIG_F2FS_IOSTAT
+TRACE_EVENT(f2fs_iostat,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat),
+
+ TP_ARGS(sbi, iostat),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long long, app_dio)
+ __field(unsigned long long, app_bio)
+ __field(unsigned long long, app_wio)
+ __field(unsigned long long, app_mio)
+ __field(unsigned long long, app_bcdio)
+ __field(unsigned long long, app_mcdio)
+ __field(unsigned long long, fs_dio)
+ __field(unsigned long long, fs_cdio)
+ __field(unsigned long long, fs_nio)
+ __field(unsigned long long, fs_mio)
+ __field(unsigned long long, fs_gc_dio)
+ __field(unsigned long long, fs_gc_nio)
+ __field(unsigned long long, fs_cp_dio)
+ __field(unsigned long long, fs_cp_nio)
+ __field(unsigned long long, fs_cp_mio)
+ __field(unsigned long long, app_drio)
+ __field(unsigned long long, app_brio)
+ __field(unsigned long long, app_rio)
+ __field(unsigned long long, app_mrio)
+ __field(unsigned long long, app_bcrio)
+ __field(unsigned long long, app_mcrio)
+ __field(unsigned long long, fs_drio)
+ __field(unsigned long long, fs_gdrio)
+ __field(unsigned long long, fs_cdrio)
+ __field(unsigned long long, fs_nrio)
+ __field(unsigned long long, fs_mrio)
+ __field(unsigned long long, fs_discard)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->app_dio = iostat[APP_DIRECT_IO];
+ __entry->app_bio = iostat[APP_BUFFERED_IO];
+ __entry->app_wio = iostat[APP_WRITE_IO];
+ __entry->app_mio = iostat[APP_MAPPED_IO];
+ __entry->app_bcdio = iostat[APP_BUFFERED_CDATA_IO];
+ __entry->app_mcdio = iostat[APP_MAPPED_CDATA_IO];
+ __entry->fs_dio = iostat[FS_DATA_IO];
+ __entry->fs_cdio = iostat[FS_CDATA_IO];
+ __entry->fs_nio = iostat[FS_NODE_IO];
+ __entry->fs_mio = iostat[FS_META_IO];
+ __entry->fs_gc_dio = iostat[FS_GC_DATA_IO];
+ __entry->fs_gc_nio = iostat[FS_GC_NODE_IO];
+ __entry->fs_cp_dio = iostat[FS_CP_DATA_IO];
+ __entry->fs_cp_nio = iostat[FS_CP_NODE_IO];
+ __entry->fs_cp_mio = iostat[FS_CP_META_IO];
+ __entry->app_drio = iostat[APP_DIRECT_READ_IO];
+ __entry->app_brio = iostat[APP_BUFFERED_READ_IO];
+ __entry->app_rio = iostat[APP_READ_IO];
+ __entry->app_mrio = iostat[APP_MAPPED_READ_IO];
+ __entry->app_bcrio = iostat[APP_BUFFERED_CDATA_READ_IO];
+ __entry->app_mcrio = iostat[APP_MAPPED_CDATA_READ_IO];
+ __entry->fs_drio = iostat[FS_DATA_READ_IO];
+ __entry->fs_gdrio = iostat[FS_GDATA_READ_IO];
+ __entry->fs_cdrio = iostat[FS_CDATA_READ_IO];
+ __entry->fs_nrio = iostat[FS_NODE_READ_IO];
+ __entry->fs_mrio = iostat[FS_META_READ_IO];
+ __entry->fs_discard = iostat[FS_DISCARD];
+ ),
+
+ TP_printk("dev = (%d,%d), "
+ "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu, "
+ "compr(buffered=%llu, mapped=%llu)], "
+ "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu], "
+ "gc [data=%llu, node=%llu], "
+ "cp [data=%llu, node=%llu, meta=%llu], "
+ "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
+ "compr(buffered=%llu, mapped=%llu)], "
+ "fs [data=%llu, (gc_data=%llu, cdata=%llu), "
+ "node=%llu, meta=%llu]",
+ show_dev(__entry->dev), __entry->app_wio, __entry->app_dio,
+ __entry->app_bio, __entry->app_mio, __entry->app_bcdio,
+ __entry->app_mcdio, __entry->fs_dio, __entry->fs_cdio,
+ __entry->fs_nio, __entry->fs_mio, __entry->fs_discard,
+ __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio,
+ __entry->fs_cp_nio, __entry->fs_cp_mio,
+ __entry->app_rio, __entry->app_drio, __entry->app_brio,
+ __entry->app_mrio, __entry->app_bcrio, __entry->app_mcrio,
+ __entry->fs_drio, __entry->fs_gdrio,
+ __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio)
+);
+
+#ifndef __F2FS_IOSTAT_LATENCY_TYPE
+#define __F2FS_IOSTAT_LATENCY_TYPE
+struct f2fs_iostat_latency {
+ unsigned int peak_lat;
+ unsigned int avg_lat;
+ unsigned int cnt;
+};
+#endif /* __F2FS_IOSTAT_LATENCY_TYPE */
+
+TRACE_EVENT(f2fs_iostat_latency,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, struct f2fs_iostat_latency (*iostat_lat)[NR_PAGE_TYPE]),
+
+ TP_ARGS(sbi, iostat_lat),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, d_rd_peak)
+ __field(unsigned int, d_rd_avg)
+ __field(unsigned int, d_rd_cnt)
+ __field(unsigned int, n_rd_peak)
+ __field(unsigned int, n_rd_avg)
+ __field(unsigned int, n_rd_cnt)
+ __field(unsigned int, m_rd_peak)
+ __field(unsigned int, m_rd_avg)
+ __field(unsigned int, m_rd_cnt)
+ __field(unsigned int, d_wr_s_peak)
+ __field(unsigned int, d_wr_s_avg)
+ __field(unsigned int, d_wr_s_cnt)
+ __field(unsigned int, n_wr_s_peak)
+ __field(unsigned int, n_wr_s_avg)
+ __field(unsigned int, n_wr_s_cnt)
+ __field(unsigned int, m_wr_s_peak)
+ __field(unsigned int, m_wr_s_avg)
+ __field(unsigned int, m_wr_s_cnt)
+ __field(unsigned int, d_wr_as_peak)
+ __field(unsigned int, d_wr_as_avg)
+ __field(unsigned int, d_wr_as_cnt)
+ __field(unsigned int, n_wr_as_peak)
+ __field(unsigned int, n_wr_as_avg)
+ __field(unsigned int, n_wr_as_cnt)
+ __field(unsigned int, m_wr_as_peak)
+ __field(unsigned int, m_wr_as_avg)
+ __field(unsigned int, m_wr_as_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->d_rd_peak = iostat_lat[0][DATA].peak_lat;
+ __entry->d_rd_avg = iostat_lat[0][DATA].avg_lat;
+ __entry->d_rd_cnt = iostat_lat[0][DATA].cnt;
+ __entry->n_rd_peak = iostat_lat[0][NODE].peak_lat;
+ __entry->n_rd_avg = iostat_lat[0][NODE].avg_lat;
+ __entry->n_rd_cnt = iostat_lat[0][NODE].cnt;
+ __entry->m_rd_peak = iostat_lat[0][META].peak_lat;
+ __entry->m_rd_avg = iostat_lat[0][META].avg_lat;
+ __entry->m_rd_cnt = iostat_lat[0][META].cnt;
+ __entry->d_wr_s_peak = iostat_lat[1][DATA].peak_lat;
+ __entry->d_wr_s_avg = iostat_lat[1][DATA].avg_lat;
+ __entry->d_wr_s_cnt = iostat_lat[1][DATA].cnt;
+ __entry->n_wr_s_peak = iostat_lat[1][NODE].peak_lat;
+ __entry->n_wr_s_avg = iostat_lat[1][NODE].avg_lat;
+ __entry->n_wr_s_cnt = iostat_lat[1][NODE].cnt;
+ __entry->m_wr_s_peak = iostat_lat[1][META].peak_lat;
+ __entry->m_wr_s_avg = iostat_lat[1][META].avg_lat;
+ __entry->m_wr_s_cnt = iostat_lat[1][META].cnt;
+ __entry->d_wr_as_peak = iostat_lat[2][DATA].peak_lat;
+ __entry->d_wr_as_avg = iostat_lat[2][DATA].avg_lat;
+ __entry->d_wr_as_cnt = iostat_lat[2][DATA].cnt;
+ __entry->n_wr_as_peak = iostat_lat[2][NODE].peak_lat;
+ __entry->n_wr_as_avg = iostat_lat[2][NODE].avg_lat;
+ __entry->n_wr_as_cnt = iostat_lat[2][NODE].cnt;
+ __entry->m_wr_as_peak = iostat_lat[2][META].peak_lat;
+ __entry->m_wr_as_avg = iostat_lat[2][META].avg_lat;
+ __entry->m_wr_as_cnt = iostat_lat[2][META].cnt;
+ ),
+
+ TP_printk("dev = (%d,%d), "
+ "iotype [peak lat.(ms)/avg lat.(ms)/count], "
+ "rd_data [%u/%u/%u], rd_node [%u/%u/%u], rd_meta [%u/%u/%u], "
+ "wr_sync_data [%u/%u/%u], wr_sync_node [%u/%u/%u], "
+ "wr_sync_meta [%u/%u/%u], wr_async_data [%u/%u/%u], "
+ "wr_async_node [%u/%u/%u], wr_async_meta [%u/%u/%u]",
+ show_dev(__entry->dev),
+ __entry->d_rd_peak, __entry->d_rd_avg, __entry->d_rd_cnt,
+ __entry->n_rd_peak, __entry->n_rd_avg, __entry->n_rd_cnt,
+ __entry->m_rd_peak, __entry->m_rd_avg, __entry->m_rd_cnt,
+ __entry->d_wr_s_peak, __entry->d_wr_s_avg, __entry->d_wr_s_cnt,
+ __entry->n_wr_s_peak, __entry->n_wr_s_avg, __entry->n_wr_s_cnt,
+ __entry->m_wr_s_peak, __entry->m_wr_s_avg, __entry->m_wr_s_cnt,
+ __entry->d_wr_as_peak, __entry->d_wr_as_avg, __entry->d_wr_as_cnt,
+ __entry->n_wr_as_peak, __entry->n_wr_as_avg, __entry->n_wr_as_cnt,
+ __entry->m_wr_as_peak, __entry->m_wr_as_avg, __entry->m_wr_as_cnt)
+);
+#endif
+
+TRACE_EVENT(f2fs_bmap,
+
+ TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock),
+
+ TP_ARGS(inode, lblock, pblock),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(sector_t, lblock)
+ __field(sector_t, pblock)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->lblock = lblock;
+ __entry->pblock = pblock;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld",
+ show_dev_ino(__entry),
+ (unsigned long long)__entry->lblock,
+ (unsigned long long)__entry->pblock)
+);
+
+TRACE_EVENT(f2fs_fiemap,
+
+ TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock,
+ unsigned long long len, unsigned int flags, int ret),
+
+ TP_ARGS(inode, lblock, pblock, len, flags, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(sector_t, lblock)
+ __field(sector_t, pblock)
+ __field(unsigned long long, len)
+ __field(unsigned int, flags)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->lblock = lblock;
+ __entry->pblock = pblock;
+ __entry->len = len;
+ __entry->flags = flags;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld, "
+ "len:%llu, flags:%u, ret:%d",
+ show_dev_ino(__entry),
+ (unsigned long long)__entry->lblock,
+ (unsigned long long)__entry->pblock,
+ __entry->len,
+ __entry->flags,
+ __entry->ret)
+);
+
+DECLARE_EVENT_CLASS(f2fs__rw_start,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *pathname, char *command),
+
+ TP_ARGS(inode, offset, bytes, pid, pathname, command),
+
+ TP_STRUCT__entry(
+ __string(pathbuf, pathname)
+ __field(loff_t, offset)
+ __field(int, bytes)
+ __field(loff_t, i_size)
+ __string(cmdline, command)
+ __field(pid_t, pid)
+ __field(ino_t, ino)
+ ),
+
+ TP_fast_assign(
+ /*
+ * Replace the spaces in filenames and cmdlines
+ * because this screws up the tooling that parses
+ * the traces.
+ */
+ __assign_str(pathbuf, pathname);
+ (void)strreplace(__get_str(pathbuf), ' ', '_');
+ __entry->offset = offset;
+ __entry->bytes = bytes;
+ __entry->i_size = i_size_read(inode);
+ __assign_str(cmdline, command);
+ (void)strreplace(__get_str(cmdline), ' ', '_');
+ __entry->pid = pid;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+ " pid %d, i_size %llu, ino %lu",
+ __get_str(pathbuf), __entry->offset, __entry->bytes,
+ __get_str(cmdline), __entry->pid, __entry->i_size,
+ (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(f2fs__rw_end,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+ TP_ARGS(inode, offset, bytes),
+
+ TP_STRUCT__entry(
+ __field(ino_t, ino)
+ __field(loff_t, offset)
+ __field(int, bytes)
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->offset = offset;
+ __entry->bytes = bytes;
+ ),
+
+ TP_printk("ino %lu, offset %llu, bytes %d",
+ (unsigned long) __entry->ino,
+ __entry->offset, __entry->bytes)
+);
+
+DEFINE_EVENT(f2fs__rw_start, f2fs_dataread_start,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *pathname, char *command),
+
+ TP_ARGS(inode, offset, bytes, pid, pathname, command)
+);
+
+DEFINE_EVENT(f2fs__rw_end, f2fs_dataread_end,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+ TP_ARGS(inode, offset, bytes)
+);
+
+DEFINE_EVENT(f2fs__rw_start, f2fs_datawrite_start,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *pathname, char *command),
+
+ TP_ARGS(inode, offset, bytes, pid, pathname, command)
+);
+
+DEFINE_EVENT(f2fs__rw_end, f2fs_datawrite_end,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+ TP_ARGS(inode, offset, bytes)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 6f2a4dc35e37..c2300c407f58 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -32,7 +32,7 @@ TRACE_EVENT(fib_table_lookup,
__array( __u8, gw6, 16 )
__field( u16, sport )
__field( u16, dport )
- __dynamic_array(char, name, IFNAMSIZ )
+ __array(char, name, IFNAMSIZ )
),
TP_fast_assign(
@@ -66,7 +66,7 @@ TRACE_EVENT(fib_table_lookup,
}
dev = nhc ? nhc->nhc_dev : NULL;
- __assign_str(name, dev ? dev->name : "-");
+ strlcpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ);
if (nhc) {
if (nhc->nhc_gw_family == AF_INET) {
@@ -95,7 +95,7 @@ TRACE_EVENT(fib_table_lookup,
__entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
__entry->src, __entry->sport, __entry->dst, __entry->dport,
__entry->tos, __entry->scope, __entry->flags,
- __get_str(name), __entry->gw4, __entry->gw6, __entry->err)
+ __entry->name, __entry->gw4, __entry->gw6, __entry->err)
);
#endif /* _TRACE_FIB_H */
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index c6abdcc77c12..6e821eb79450 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -31,7 +31,7 @@ TRACE_EVENT(fib6_table_lookup,
__field( u16, dport )
__field( u8, proto )
__field( u8, rt_type )
- __dynamic_array( char, name, IFNAMSIZ )
+ __array( char, name, IFNAMSIZ )
__array( __u8, gw, 16 )
),
@@ -63,9 +63,9 @@ TRACE_EVENT(fib6_table_lookup,
}
if (res->nh && res->nh->fib_nh_dev) {
- __assign_str(name, res->nh->fib_nh_dev);
+ strlcpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ);
} else {
- __assign_str(name, "-");
+ strcpy(__entry->name, "-");
}
if (res->f6i == net->ipv6.fib6_null_entry) {
struct in6_addr in6_zero = {};
@@ -83,7 +83,7 @@ TRACE_EVENT(fib6_table_lookup,
__entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
__entry->src, __entry->sport, __entry->dst, __entry->dport,
__entry->tos, __entry->scope, __entry->flags,
- __get_str(name), __entry->gw, __entry->err)
+ __entry->name, __entry->gw, __entry->err)
);
#endif /* _TRACE_FIB6_H */
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index c705e4944a50..1646dadd7f37 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->ret = ret;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
+ TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
__entry->fl_pid, show_fl_flags(__entry->fl_flags),
@@ -145,7 +145,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
+ TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
show_fl_flags(__entry->fl_flags),
@@ -195,7 +195,7 @@ TRACE_EVENT(generic_add_lease,
__entry->fl_type = fl->fl_type;
),
- TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d rcount=%d icount=%d fl_owner=0x%p fl_flags=%s fl_type=%s",
+ TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d rcount=%d icount=%d fl_owner=%p fl_flags=%s fl_type=%s",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->wcount, __entry->rcount,
__entry->icount, __entry->fl_owner,
@@ -228,7 +228,7 @@ TRACE_EVENT(leases_conflict,
__entry->conflict = conflict;
),
- TP_printk("conflict %d: lease=0x%p fl_flags=%s fl_type=%s; breaker=0x%p fl_flags=%s fl_type=%s",
+ TP_printk("conflict %d: lease=%p fl_flags=%s fl_type=%s; breaker=%p fl_flags=%s fl_type=%s",
__entry->conflict,
__entry->lease,
show_fl_flags(__entry->l_fl_flags),
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 796053e162d2..46c89c1e460c 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -15,43 +15,45 @@
DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
__field(unsigned long, pfn)
__field(unsigned long, i_ino)
__field(unsigned long, index)
__field(dev_t, s_dev)
+ __field(unsigned char, order)
),
TP_fast_assign(
- __entry->pfn = page_to_pfn(page);
- __entry->i_ino = page->mapping->host->i_ino;
- __entry->index = page->index;
- if (page->mapping->host->i_sb)
- __entry->s_dev = page->mapping->host->i_sb->s_dev;
+ __entry->pfn = folio_pfn(folio);
+ __entry->i_ino = folio->mapping->host->i_ino;
+ __entry->index = folio->index;
+ if (folio->mapping->host->i_sb)
+ __entry->s_dev = folio->mapping->host->i_sb->s_dev;
else
- __entry->s_dev = page->mapping->host->i_rdev;
+ __entry->s_dev = folio->mapping->host->i_rdev;
+ __entry->order = folio_order(folio);
),
- TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
+ TP_printk("dev %d:%d ino %lx pfn=0x%lx ofs=%lu order=%u",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
- pfn_to_page(__entry->pfn),
__entry->pfn,
- __entry->index << PAGE_SHIFT)
+ __entry->index << PAGE_SHIFT,
+ __entry->order)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache,
- TP_PROTO(struct page *page),
- TP_ARGS(page)
+ TP_PROTO(struct folio *folio),
+ TP_ARGS(folio)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
- TP_PROTO(struct page *page),
- TP_ARGS(page)
+ TP_PROTO(struct folio *folio),
+ TP_ARGS(folio)
);
TRACE_EVENT(filemap_set_wb_err,
diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
new file mode 100644
index 000000000000..738b97f22f36
--- /dev/null
+++ b/include/trace/events/fs.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Display helpers for generic filesystem items
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <linux/fs.h>
+
+#define show_fs_dirent_type(x) \
+ __print_symbolic(x, \
+ { DT_UNKNOWN, "UNKNOWN" }, \
+ { DT_FIFO, "FIFO" }, \
+ { DT_CHR, "CHR" }, \
+ { DT_DIR, "DIR" }, \
+ { DT_BLK, "BLK" }, \
+ { DT_REG, "REG" }, \
+ { DT_LNK, "LNK" }, \
+ { DT_SOCK, "SOCK" }, \
+ { DT_WHT, "WHT" })
+
+#define show_fs_fcntl_open_flags(x) \
+ __print_flags(x, "|", \
+ { O_WRONLY, "O_WRONLY" }, \
+ { O_RDWR, "O_RDWR" }, \
+ { O_CREAT, "O_CREAT" }, \
+ { O_EXCL, "O_EXCL" }, \
+ { O_NOCTTY, "O_NOCTTY" }, \
+ { O_TRUNC, "O_TRUNC" }, \
+ { O_APPEND, "O_APPEND" }, \
+ { O_NONBLOCK, "O_NONBLOCK" }, \
+ { O_DSYNC, "O_DSYNC" }, \
+ { O_DIRECT, "O_DIRECT" }, \
+ { O_LARGEFILE, "O_LARGEFILE" }, \
+ { O_DIRECTORY, "O_DIRECTORY" }, \
+ { O_NOFOLLOW, "O_NOFOLLOW" }, \
+ { O_NOATIME, "O_NOATIME" }, \
+ { O_CLOEXEC, "O_CLOEXEC" })
+
+#define __fmode_flag(x) { (__force unsigned long)FMODE_##x, #x }
+#define show_fs_fmode_flags(x) \
+ __print_flags(x, "|", \
+ __fmode_flag(READ), \
+ __fmode_flag(WRITE), \
+ __fmode_flag(EXEC))
+
+#ifdef CONFIG_64BIT
+#define show_fs_fcntl_cmd(x) \
+ __print_symbolic(x, \
+ { F_DUPFD, "DUPFD" }, \
+ { F_GETFD, "GETFD" }, \
+ { F_SETFD, "SETFD" }, \
+ { F_GETFL, "GETFL" }, \
+ { F_SETFL, "SETFL" }, \
+ { F_GETLK, "GETLK" }, \
+ { F_SETLK, "SETLK" }, \
+ { F_SETLKW, "SETLKW" }, \
+ { F_SETOWN, "SETOWN" }, \
+ { F_GETOWN, "GETOWN" }, \
+ { F_SETSIG, "SETSIG" }, \
+ { F_GETSIG, "GETSIG" }, \
+ { F_SETOWN_EX, "SETOWN_EX" }, \
+ { F_GETOWN_EX, "GETOWN_EX" }, \
+ { F_GETOWNER_UIDS, "GETOWNER_UIDS" }, \
+ { F_OFD_GETLK, "OFD_GETLK" }, \
+ { F_OFD_SETLK, "OFD_SETLK" }, \
+ { F_OFD_SETLKW, "OFD_SETLKW" })
+#else /* CONFIG_64BIT */
+#define show_fs_fcntl_cmd(x) \
+ __print_symbolic(x, \
+ { F_DUPFD, "DUPFD" }, \
+ { F_GETFD, "GETFD" }, \
+ { F_SETFD, "SETFD" }, \
+ { F_GETFL, "GETFL" }, \
+ { F_SETFL, "SETFL" }, \
+ { F_GETLK, "GETLK" }, \
+ { F_SETLK, "SETLK" }, \
+ { F_SETLKW, "SETLKW" }, \
+ { F_SETOWN, "SETOWN" }, \
+ { F_GETOWN, "GETOWN" }, \
+ { F_SETSIG, "SETSIG" }, \
+ { F_GETSIG, "GETSIG" }, \
+ { F_GETLK64, "GETLK64" }, \
+ { F_SETLK64, "SETLK64" }, \
+ { F_SETLKW64, "SETLKW64" }, \
+ { F_SETOWN_EX, "SETOWN_EX" }, \
+ { F_GETOWN_EX, "GETOWN_EX" }, \
+ { F_GETOWNER_UIDS, "GETOWNER_UIDS" }, \
+ { F_OFD_GETLK, "OFD_GETLK" }, \
+ { F_OFD_SETLK, "OFD_SETLK" }, \
+ { F_OFD_SETLKW, "OFD_SETLKW" })
+#endif /* CONFIG_64BIT */
+
+#define show_fs_fcntl_lock_type(x) \
+ __print_symbolic(x, \
+ { F_RDLCK, "RDLCK" }, \
+ { F_WRLCK, "WRLCK" }, \
+ { F_UNLCK, "UNLCK" })
+
+#define show_fs_lookup_flags(flags) \
+ __print_flags(flags, "|", \
+ { LOOKUP_FOLLOW, "FOLLOW" }, \
+ { LOOKUP_DIRECTORY, "DIRECTORY" }, \
+ { LOOKUP_AUTOMOUNT, "AUTOMOUNT" }, \
+ { LOOKUP_EMPTY, "EMPTY" }, \
+ { LOOKUP_DOWN, "DOWN" }, \
+ { LOOKUP_MOUNTPOINT, "MOUNTPOINT" }, \
+ { LOOKUP_REVAL, "REVAL" }, \
+ { LOOKUP_RCU, "RCU" }, \
+ { LOOKUP_OPEN, "OPEN" }, \
+ { LOOKUP_CREATE, "CREATE" }, \
+ { LOOKUP_EXCL, "EXCL" }, \
+ { LOOKUP_RENAME_TARGET, "RENAME_TARGET" }, \
+ { LOOKUP_PARENT, "PARENT" }, \
+ { LOOKUP_NO_SYMLINKS, "NO_SYMLINKS" }, \
+ { LOOKUP_NO_MAGICLINKS, "NO_MAGICLINKS" }, \
+ { LOOKUP_NO_XDEV, "NO_XDEV" }, \
+ { LOOKUP_BENEATH, "BENEATH" }, \
+ { LOOKUP_IN_ROOT, "IN_ROOT" }, \
+ { LOOKUP_CACHED, "CACHED" })
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
index d16fe6ed78a2..c078c48a8e6d 100644
--- a/include/trace/events/fscache.h
+++ b/include/trace/events/fscache.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* FS-Cache tracepoints
*
- * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#undef TRACE_SYSTEM
@@ -19,65 +19,84 @@
#ifndef __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
+enum fscache_cache_trace {
+ fscache_cache_collision,
+ fscache_cache_get_acquire,
+ fscache_cache_new_acquire,
+ fscache_cache_put_alloc_volume,
+ fscache_cache_put_cache,
+ fscache_cache_put_prep_failed,
+ fscache_cache_put_relinquish,
+ fscache_cache_put_volume,
+};
+
+enum fscache_volume_trace {
+ fscache_volume_collision,
+ fscache_volume_get_cookie,
+ fscache_volume_get_create_work,
+ fscache_volume_get_hash_collision,
+ fscache_volume_free,
+ fscache_volume_new_acquire,
+ fscache_volume_put_cookie,
+ fscache_volume_put_create_work,
+ fscache_volume_put_hash_collision,
+ fscache_volume_put_relinquish,
+ fscache_volume_see_create_work,
+ fscache_volume_see_hash_wake,
+ fscache_volume_wait_create_work,
+};
+
enum fscache_cookie_trace {
fscache_cookie_collision,
fscache_cookie_discard,
- fscache_cookie_get_acquire_parent,
+ fscache_cookie_failed,
fscache_cookie_get_attach_object,
- fscache_cookie_get_reacquire,
- fscache_cookie_get_register_netfs,
- fscache_cookie_put_acquire_nobufs,
- fscache_cookie_put_dup_netfs,
- fscache_cookie_put_relinquish,
+ fscache_cookie_get_end_access,
+ fscache_cookie_get_hash_collision,
+ fscache_cookie_get_inval_work,
+ fscache_cookie_get_lru,
+ fscache_cookie_get_use_work,
+ fscache_cookie_new_acquire,
+ fscache_cookie_put_hash_collision,
+ fscache_cookie_put_lru,
fscache_cookie_put_object,
- fscache_cookie_put_parent,
-};
-
-enum fscache_page_trace {
- fscache_page_cached,
- fscache_page_inval,
- fscache_page_maybe_release,
- fscache_page_radix_clear_store,
- fscache_page_radix_delete,
- fscache_page_radix_insert,
- fscache_page_radix_pend2store,
- fscache_page_radix_set_pend,
- fscache_page_uncache,
- fscache_page_write,
- fscache_page_write_end,
- fscache_page_write_end_pend,
- fscache_page_write_end_noc,
- fscache_page_write_wait,
- fscache_page_trace__nr
+ fscache_cookie_put_over_queued,
+ fscache_cookie_put_relinquish,
+ fscache_cookie_put_withdrawn,
+ fscache_cookie_put_work,
+ fscache_cookie_see_active,
+ fscache_cookie_see_lru_discard,
+ fscache_cookie_see_lru_do_one,
+ fscache_cookie_see_relinquish,
+ fscache_cookie_see_withdraw,
+ fscache_cookie_see_work,
};
-enum fscache_op_trace {
- fscache_op_cancel,
- fscache_op_cancel_all,
- fscache_op_cancelled,
- fscache_op_completed,
- fscache_op_enqueue_async,
- fscache_op_enqueue_mythread,
- fscache_op_gc,
- fscache_op_init,
- fscache_op_put,
- fscache_op_run,
- fscache_op_signal,
- fscache_op_submit,
- fscache_op_submit_ex,
- fscache_op_work,
- fscache_op_trace__nr
+enum fscache_active_trace {
+ fscache_active_use,
+ fscache_active_use_modify,
+ fscache_active_unuse,
};
-enum fscache_page_op_trace {
- fscache_page_op_alloc_one,
- fscache_page_op_attr_changed,
- fscache_page_op_check_consistency,
- fscache_page_op_invalidate,
- fscache_page_op_retr_multi,
- fscache_page_op_retr_one,
- fscache_page_op_write_one,
- fscache_page_op_trace__nr
+enum fscache_access_trace {
+ fscache_access_acquire_volume,
+ fscache_access_acquire_volume_end,
+ fscache_access_cache_pin,
+ fscache_access_cache_unpin,
+ fscache_access_invalidate_cookie,
+ fscache_access_invalidate_cookie_end,
+ fscache_access_io_end,
+ fscache_access_io_not_live,
+ fscache_access_io_read,
+ fscache_access_io_resize,
+ fscache_access_io_wait,
+ fscache_access_io_write,
+ fscache_access_lookup_cookie,
+ fscache_access_lookup_cookie_end,
+ fscache_access_lookup_cookie_end_failed,
+ fscache_access_relinquish_volume,
+ fscache_access_relinquish_volume_end,
+ fscache_access_unlive,
};
#endif
@@ -85,59 +104,80 @@ enum fscache_page_op_trace {
/*
* Declare tracing information enums and their string mappings for display.
*/
+#define fscache_cache_traces \
+ EM(fscache_cache_collision, "*COLLIDE*") \
+ EM(fscache_cache_get_acquire, "GET acq ") \
+ EM(fscache_cache_new_acquire, "NEW acq ") \
+ EM(fscache_cache_put_alloc_volume, "PUT alvol") \
+ EM(fscache_cache_put_cache, "PUT cache") \
+ EM(fscache_cache_put_prep_failed, "PUT pfail") \
+ EM(fscache_cache_put_relinquish, "PUT relnq") \
+ E_(fscache_cache_put_volume, "PUT vol ")
+
+#define fscache_volume_traces \
+ EM(fscache_volume_collision, "*COLLIDE*") \
+ EM(fscache_volume_get_cookie, "GET cook ") \
+ EM(fscache_volume_get_create_work, "GET creat") \
+ EM(fscache_volume_get_hash_collision, "GET hcoll") \
+ EM(fscache_volume_free, "FREE ") \
+ EM(fscache_volume_new_acquire, "NEW acq ") \
+ EM(fscache_volume_put_cookie, "PUT cook ") \
+ EM(fscache_volume_put_create_work, "PUT creat") \
+ EM(fscache_volume_put_hash_collision, "PUT hcoll") \
+ EM(fscache_volume_put_relinquish, "PUT relnq") \
+ EM(fscache_volume_see_create_work, "SEE creat") \
+ EM(fscache_volume_see_hash_wake, "SEE hwake") \
+ E_(fscache_volume_wait_create_work, "WAIT crea")
+
#define fscache_cookie_traces \
- EM(fscache_cookie_collision, "*COLLISION*") \
- EM(fscache_cookie_discard, "DISCARD") \
- EM(fscache_cookie_get_acquire_parent, "GET prn") \
- EM(fscache_cookie_get_attach_object, "GET obj") \
- EM(fscache_cookie_get_reacquire, "GET raq") \
- EM(fscache_cookie_get_register_netfs, "GET net") \
- EM(fscache_cookie_put_acquire_nobufs, "PUT nbf") \
- EM(fscache_cookie_put_dup_netfs, "PUT dnt") \
- EM(fscache_cookie_put_relinquish, "PUT rlq") \
- EM(fscache_cookie_put_object, "PUT obj") \
- E_(fscache_cookie_put_parent, "PUT prn")
-
-#define fscache_page_traces \
- EM(fscache_page_cached, "Cached ") \
- EM(fscache_page_inval, "InvalPg") \
- EM(fscache_page_maybe_release, "MayRels") \
- EM(fscache_page_uncache, "Uncache") \
- EM(fscache_page_radix_clear_store, "RxCStr ") \
- EM(fscache_page_radix_delete, "RxDel ") \
- EM(fscache_page_radix_insert, "RxIns ") \
- EM(fscache_page_radix_pend2store, "RxP2S ") \
- EM(fscache_page_radix_set_pend, "RxSPend ") \
- EM(fscache_page_write, "WritePg") \
- EM(fscache_page_write_end, "EndPgWr") \
- EM(fscache_page_write_end_pend, "EndPgWP") \
- EM(fscache_page_write_end_noc, "EndPgNC") \
- E_(fscache_page_write_wait, "WtOnWrt")
-
-#define fscache_op_traces \
- EM(fscache_op_cancel, "Cancel1") \
- EM(fscache_op_cancel_all, "CancelA") \
- EM(fscache_op_cancelled, "Canclld") \
- EM(fscache_op_completed, "Complet") \
- EM(fscache_op_enqueue_async, "EnqAsyn") \
- EM(fscache_op_enqueue_mythread, "EnqMyTh") \
- EM(fscache_op_gc, "GC ") \
- EM(fscache_op_init, "Init ") \
- EM(fscache_op_put, "Put ") \
- EM(fscache_op_run, "Run ") \
- EM(fscache_op_signal, "Signal ") \
- EM(fscache_op_submit, "Submit ") \
- EM(fscache_op_submit_ex, "SubmitX") \
- E_(fscache_op_work, "Work ")
-
-#define fscache_page_op_traces \
- EM(fscache_page_op_alloc_one, "Alloc1 ") \
- EM(fscache_page_op_attr_changed, "AttrChg") \
- EM(fscache_page_op_check_consistency, "CheckCn") \
- EM(fscache_page_op_invalidate, "Inval ") \
- EM(fscache_page_op_retr_multi, "RetrMul") \
- EM(fscache_page_op_retr_one, "Retr1 ") \
- E_(fscache_page_op_write_one, "Write1 ")
+ EM(fscache_cookie_collision, "*COLLIDE*") \
+ EM(fscache_cookie_discard, "DISCARD ") \
+ EM(fscache_cookie_failed, "FAILED ") \
+ EM(fscache_cookie_get_attach_object, "GET attch") \
+ EM(fscache_cookie_get_hash_collision, "GET hcoll") \
+ EM(fscache_cookie_get_end_access, "GQ endac") \
+ EM(fscache_cookie_get_inval_work, "GQ inval") \
+ EM(fscache_cookie_get_lru, "GET lru ") \
+ EM(fscache_cookie_get_use_work, "GQ use ") \
+ EM(fscache_cookie_new_acquire, "NEW acq ") \
+ EM(fscache_cookie_put_hash_collision, "PUT hcoll") \
+ EM(fscache_cookie_put_lru, "PUT lru ") \
+ EM(fscache_cookie_put_object, "PUT obj ") \
+ EM(fscache_cookie_put_over_queued, "PQ overq") \
+ EM(fscache_cookie_put_relinquish, "PUT relnq") \
+ EM(fscache_cookie_put_withdrawn, "PUT wthdn") \
+ EM(fscache_cookie_put_work, "PQ work ") \
+ EM(fscache_cookie_see_active, "- activ") \
+ EM(fscache_cookie_see_lru_discard, "- x-lru") \
+ EM(fscache_cookie_see_lru_do_one, "- lrudo") \
+ EM(fscache_cookie_see_relinquish, "- x-rlq") \
+ EM(fscache_cookie_see_withdraw, "- x-wth") \
+ E_(fscache_cookie_see_work, "- work ")
+
+#define fscache_active_traces \
+ EM(fscache_active_use, "USE ") \
+ EM(fscache_active_use_modify, "USE-m ") \
+ E_(fscache_active_unuse, "UNUSE ")
+
+#define fscache_access_traces \
+ EM(fscache_access_acquire_volume, "BEGIN acq_vol") \
+ EM(fscache_access_acquire_volume_end, "END acq_vol") \
+ EM(fscache_access_cache_pin, "PIN cache ") \
+ EM(fscache_access_cache_unpin, "UNPIN cache ") \
+ EM(fscache_access_invalidate_cookie, "BEGIN inval ") \
+ EM(fscache_access_invalidate_cookie_end,"END inval ") \
+ EM(fscache_access_io_end, "END io ") \
+ EM(fscache_access_io_not_live, "END io_notl") \
+ EM(fscache_access_io_read, "BEGIN io_read") \
+ EM(fscache_access_io_resize, "BEGIN io_resz") \
+ EM(fscache_access_io_wait, "WAIT io ") \
+ EM(fscache_access_io_write, "BEGIN io_writ") \
+ EM(fscache_access_lookup_cookie, "BEGIN lookup ") \
+ EM(fscache_access_lookup_cookie_end, "END lookup ") \
+ EM(fscache_access_lookup_cookie_end_failed,"END lookupf") \
+ EM(fscache_access_relinquish_volume, "BEGIN rlq_vol") \
+ EM(fscache_access_relinquish_volume_end,"END rlq_vol") \
+ E_(fscache_access_unlive, "END unlive ")
/*
* Export enum symbols via userspace.
@@ -147,7 +187,10 @@ enum fscache_page_op_trace {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
+fscache_cache_traces;
+fscache_volume_traces;
fscache_cookie_traces;
+fscache_access_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -159,372 +202,297 @@ fscache_cookie_traces;
#define E_(a, b) { a, b }
-TRACE_EVENT(fscache_cookie,
- TP_PROTO(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where,
- int usage),
+TRACE_EVENT(fscache_cache,
+ TP_PROTO(unsigned int cache_debug_id,
+ int usage,
+ enum fscache_cache_trace where),
- TP_ARGS(cookie, where, usage),
+ TP_ARGS(cache_debug_id, usage, where),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_cookie *, parent )
- __field(enum fscache_cookie_trace, where )
+ __field(unsigned int, cache )
__field(int, usage )
- __field(int, n_children )
- __field(int, n_active )
- __field(u8, flags )
+ __field(enum fscache_cache_trace, where )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->parent = cookie->parent;
- __entry->where = where;
+ __entry->cache = cache_debug_id;
__entry->usage = usage;
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
- ),
-
- TP_printk("%s c=%p u=%d p=%p Nc=%d Na=%d f=%02x",
- __print_symbolic(__entry->where, fscache_cookie_traces),
- __entry->cookie, __entry->usage,
- __entry->parent, __entry->n_children, __entry->n_active,
- __entry->flags)
- );
-
-TRACE_EVENT(fscache_netfs,
- TP_PROTO(struct fscache_netfs *netfs),
-
- TP_ARGS(netfs),
-
- TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __array(char, name, 8 )
- ),
-
- TP_fast_assign(
- __entry->cookie = netfs->primary_index;
- strncpy(__entry->name, netfs->name, 8);
- __entry->name[7] = 0;
- ),
-
- TP_printk("c=%p n=%s",
- __entry->cookie, __entry->name)
- );
-
-TRACE_EVENT(fscache_acquire,
- TP_PROTO(struct fscache_cookie *cookie),
-
- TP_ARGS(cookie),
-
- TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_cookie *, parent )
- __array(char, name, 8 )
- __field(int, p_usage )
- __field(int, p_n_children )
- __field(u8, p_flags )
- ),
-
- TP_fast_assign(
- __entry->cookie = cookie;
- __entry->parent = cookie->parent;
- __entry->p_usage = atomic_read(&cookie->parent->usage);
- __entry->p_n_children = atomic_read(&cookie->parent->n_children);
- __entry->p_flags = cookie->parent->flags;
- memcpy(__entry->name, cookie->def->name, 8);
- __entry->name[7] = 0;
+ __entry->where = where;
),
- TP_printk("c=%p p=%p pu=%d pc=%d pf=%02x n=%s",
- __entry->cookie, __entry->parent, __entry->p_usage,
- __entry->p_n_children, __entry->p_flags, __entry->name)
+ TP_printk("C=%08x %s r=%d",
+ __entry->cache,
+ __print_symbolic(__entry->where, fscache_cache_traces),
+ __entry->usage)
);
-TRACE_EVENT(fscache_relinquish,
- TP_PROTO(struct fscache_cookie *cookie, bool retire),
+TRACE_EVENT(fscache_volume,
+ TP_PROTO(unsigned int volume_debug_id,
+ int usage,
+ enum fscache_volume_trace where),
- TP_ARGS(cookie, retire),
+ TP_ARGS(volume_debug_id, usage, where),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_cookie *, parent )
+ __field(unsigned int, volume )
__field(int, usage )
- __field(int, n_children )
- __field(int, n_active )
- __field(u8, flags )
- __field(bool, retire )
+ __field(enum fscache_volume_trace, where )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->parent = cookie->parent;
- __entry->usage = atomic_read(&cookie->usage);
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
- __entry->retire = retire;
+ __entry->volume = volume_debug_id;
+ __entry->usage = usage;
+ __entry->where = where;
),
- TP_printk("c=%p u=%d p=%p Nc=%d Na=%d f=%02x r=%u",
- __entry->cookie, __entry->usage,
- __entry->parent, __entry->n_children, __entry->n_active,
- __entry->flags, __entry->retire)
+ TP_printk("V=%08x %s u=%d",
+ __entry->volume,
+ __print_symbolic(__entry->where, fscache_volume_traces),
+ __entry->usage)
);
-TRACE_EVENT(fscache_enable,
- TP_PROTO(struct fscache_cookie *cookie),
+TRACE_EVENT(fscache_cookie,
+ TP_PROTO(unsigned int cookie_debug_id,
+ int ref,
+ enum fscache_cookie_trace where),
- TP_ARGS(cookie),
+ TP_ARGS(cookie_debug_id, ref, where),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(int, usage )
- __field(int, n_children )
- __field(int, n_active )
- __field(u8, flags )
+ __field(unsigned int, cookie )
+ __field(int, ref )
+ __field(enum fscache_cookie_trace, where )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->usage = atomic_read(&cookie->usage);
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->where = where;
),
- TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
- __entry->cookie, __entry->usage,
- __entry->n_children, __entry->n_active, __entry->flags)
+ TP_printk("c=%08x %s r=%d",
+ __entry->cookie,
+ __print_symbolic(__entry->where, fscache_cookie_traces),
+ __entry->ref)
);
-TRACE_EVENT(fscache_disable,
- TP_PROTO(struct fscache_cookie *cookie),
+TRACE_EVENT(fscache_active,
+ TP_PROTO(unsigned int cookie_debug_id,
+ int ref,
+ int n_active,
+ int n_accesses,
+ enum fscache_active_trace why),
- TP_ARGS(cookie),
+ TP_ARGS(cookie_debug_id, ref, n_active, n_accesses, why),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(int, usage )
- __field(int, n_children )
+ __field(unsigned int, cookie )
+ __field(int, ref )
__field(int, n_active )
- __field(u8, flags )
+ __field(int, n_accesses )
+ __field(enum fscache_active_trace, why )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->usage = atomic_read(&cookie->usage);
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->n_active = n_active;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
- __entry->cookie, __entry->usage,
- __entry->n_children, __entry->n_active, __entry->flags)
+ TP_printk("c=%08x %s r=%d a=%d c=%d",
+ __entry->cookie,
+ __print_symbolic(__entry->why, fscache_active_traces),
+ __entry->ref,
+ __entry->n_accesses,
+ __entry->n_active)
);
-TRACE_EVENT(fscache_osm,
- TP_PROTO(struct fscache_object *object,
- const struct fscache_state *state,
- bool wait, bool oob, s8 event_num),
+TRACE_EVENT(fscache_access_cache,
+ TP_PROTO(unsigned int cache_debug_id,
+ int ref,
+ int n_accesses,
+ enum fscache_access_trace why),
- TP_ARGS(object, state, wait, oob, event_num),
+ TP_ARGS(cache_debug_id, ref, n_accesses, why),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_object *, object )
- __array(char, state, 8 )
- __field(bool, wait )
- __field(bool, oob )
- __field(s8, event_num )
+ __field(unsigned int, cache )
+ __field(int, ref )
+ __field(int, n_accesses )
+ __field(enum fscache_access_trace, why )
),
TP_fast_assign(
- __entry->cookie = object->cookie;
- __entry->object = object;
- __entry->wait = wait;
- __entry->oob = oob;
- __entry->event_num = event_num;
- memcpy(__entry->state, state->short_name, 8);
+ __entry->cache = cache_debug_id;
+ __entry->ref = ref;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%p o=%p %s %s%sev=%d",
- __entry->cookie,
- __entry->object,
- __entry->state,
- __print_symbolic(__entry->wait,
- { true, "WAIT" },
- { false, "WORK" }),
- __print_symbolic(__entry->oob,
- { true, " OOB " },
- { false, " " }),
- __entry->event_num)
+ TP_printk("C=%08x %s r=%d a=%d",
+ __entry->cache,
+ __print_symbolic(__entry->why, fscache_access_traces),
+ __entry->ref,
+ __entry->n_accesses)
);
-TRACE_EVENT(fscache_page,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- enum fscache_page_trace why),
+TRACE_EVENT(fscache_access_volume,
+ TP_PROTO(unsigned int volume_debug_id,
+ unsigned int cookie_debug_id,
+ int ref,
+ int n_accesses,
+ enum fscache_access_trace why),
- TP_ARGS(cookie, page, why),
+ TP_ARGS(volume_debug_id, cookie_debug_id, ref, n_accesses, why),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(pgoff_t, page )
- __field(enum fscache_page_trace, why )
+ __field(unsigned int, volume )
+ __field(unsigned int, cookie )
+ __field(int, ref )
+ __field(int, n_accesses )
+ __field(enum fscache_access_trace, why )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->page = page->index;
- __entry->why = why;
+ __entry->volume = volume_debug_id;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%p %s pg=%lx",
+ TP_printk("V=%08x c=%08x %s r=%d a=%d",
+ __entry->volume,
__entry->cookie,
- __print_symbolic(__entry->why, fscache_page_traces),
- __entry->page)
+ __print_symbolic(__entry->why, fscache_access_traces),
+ __entry->ref,
+ __entry->n_accesses)
);
-TRACE_EVENT(fscache_check_page,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- void *val, int n),
+TRACE_EVENT(fscache_access,
+ TP_PROTO(unsigned int cookie_debug_id,
+ int ref,
+ int n_accesses,
+ enum fscache_access_trace why),
- TP_ARGS(cookie, page, val, n),
+ TP_ARGS(cookie_debug_id, ref, n_accesses, why),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(void *, page )
- __field(void *, val )
- __field(int, n )
+ __field(unsigned int, cookie )
+ __field(int, ref )
+ __field(int, n_accesses )
+ __field(enum fscache_access_trace, why )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->page = page;
- __entry->val = val;
- __entry->n = n;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%p pg=%p val=%p n=%d",
- __entry->cookie, __entry->page, __entry->val, __entry->n)
+ TP_printk("c=%08x %s r=%d a=%d",
+ __entry->cookie,
+ __print_symbolic(__entry->why, fscache_access_traces),
+ __entry->ref,
+ __entry->n_accesses)
);
-TRACE_EVENT(fscache_wake_cookie,
+TRACE_EVENT(fscache_acquire,
TP_PROTO(struct fscache_cookie *cookie),
TP_ARGS(cookie),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- ),
-
- TP_fast_assign(
- __entry->cookie = cookie;
- ),
-
- TP_printk("c=%p", __entry->cookie)
- );
-
-TRACE_EVENT(fscache_op,
- TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
- enum fscache_op_trace why),
-
- TP_ARGS(cookie, op, why),
-
- TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_operation *, op )
- __field(enum fscache_op_trace, why )
+ __field(unsigned int, cookie )
+ __field(unsigned int, volume )
+ __field(int, v_ref )
+ __field(int, v_n_cookies )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->op = op;
- __entry->why = why;
+ __entry->cookie = cookie->debug_id;
+ __entry->volume = cookie->volume->debug_id;
+ __entry->v_ref = refcount_read(&cookie->volume->ref);
+ __entry->v_n_cookies = atomic_read(&cookie->volume->n_cookies);
),
- TP_printk("c=%p op=%p %s",
- __entry->cookie, __entry->op,
- __print_symbolic(__entry->why, fscache_op_traces))
+ TP_printk("c=%08x V=%08x vr=%d vc=%d",
+ __entry->cookie,
+ __entry->volume, __entry->v_ref, __entry->v_n_cookies)
);
-TRACE_EVENT(fscache_page_op,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- struct fscache_operation *op, enum fscache_page_op_trace what),
+TRACE_EVENT(fscache_relinquish,
+ TP_PROTO(struct fscache_cookie *cookie, bool retire),
- TP_ARGS(cookie, page, op, what),
+ TP_ARGS(cookie, retire),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(pgoff_t, page )
- __field(struct fscache_operation *, op )
- __field(enum fscache_page_op_trace, what )
+ __field(unsigned int, cookie )
+ __field(unsigned int, volume )
+ __field(int, ref )
+ __field(int, n_active )
+ __field(u8, flags )
+ __field(bool, retire )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->page = page ? page->index : 0;
- __entry->op = op;
- __entry->what = what;
+ __entry->cookie = cookie->debug_id;
+ __entry->volume = cookie->volume->debug_id;
+ __entry->ref = refcount_read(&cookie->ref);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ __entry->retire = retire;
),
- TP_printk("c=%p %s pg=%lx op=%p",
- __entry->cookie,
- __print_symbolic(__entry->what, fscache_page_op_traces),
- __entry->page, __entry->op)
+ TP_printk("c=%08x V=%08x r=%d U=%d f=%02x rt=%u",
+ __entry->cookie, __entry->volume, __entry->ref,
+ __entry->n_active, __entry->flags, __entry->retire)
);
-TRACE_EVENT(fscache_wrote_page,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- struct fscache_operation *op, int ret),
+TRACE_EVENT(fscache_invalidate,
+ TP_PROTO(struct fscache_cookie *cookie, loff_t new_size),
- TP_ARGS(cookie, page, op, ret),
+ TP_ARGS(cookie, new_size),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(pgoff_t, page )
- __field(struct fscache_operation *, op )
- __field(int, ret )
+ __field(unsigned int, cookie )
+ __field(loff_t, new_size )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->page = page->index;
- __entry->op = op;
- __entry->ret = ret;
+ __entry->cookie = cookie->debug_id;
+ __entry->new_size = new_size;
),
- TP_printk("c=%p pg=%lx op=%p ret=%d",
- __entry->cookie, __entry->page, __entry->op, __entry->ret)
+ TP_printk("c=%08x sz=%llx",
+ __entry->cookie, __entry->new_size)
);
-TRACE_EVENT(fscache_gang_lookup,
- TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
- void **results, int n, pgoff_t store_limit),
+TRACE_EVENT(fscache_resize,
+ TP_PROTO(struct fscache_cookie *cookie, loff_t new_size),
- TP_ARGS(cookie, op, results, n, store_limit),
+ TP_ARGS(cookie, new_size),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_operation *, op )
- __field(pgoff_t, results0 )
- __field(int, n )
- __field(pgoff_t, store_limit )
+ __field(unsigned int, cookie )
+ __field(loff_t, old_size )
+ __field(loff_t, new_size )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->op = op;
- __entry->results0 = results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1;
- __entry->n = n;
- __entry->store_limit = store_limit;
+ __entry->cookie = cookie->debug_id;
+ __entry->old_size = cookie->object_size;
+ __entry->new_size = new_size;
),
- TP_printk("c=%p op=%p r0=%lx n=%d sl=%lx",
- __entry->cookie, __entry->op, __entry->results0, __entry->n,
- __entry->store_limit)
+ TP_printk("c=%08x os=%08llx sz=%08llx",
+ __entry->cookie,
+ __entry->old_size,
+ __entry->new_size)
);
#endif /* _TRACE_FSCACHE_H */
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index 9832cb8e0eb0..c9a72e8432b8 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -122,6 +122,92 @@ TRACE_EVENT(fsi_master_break,
)
);
+TRACE_EVENT(fsi_slave_init,
+ TP_PROTO(const struct fsi_slave *slave),
+ TP_ARGS(slave),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, master_n_links)
+ __field(int, idx)
+ __field(int, link)
+ __field(int, chip_id)
+ __field(__u32, cfam_id)
+ __field(__u32, size)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = slave->master->idx;
+ __entry->master_n_links = slave->master->n_links;
+ __entry->idx = slave->cdev_idx;
+ __entry->link = slave->link;
+ __entry->chip_id = slave->chip_id;
+ __entry->cfam_id = slave->cfam_id;
+ __entry->size = slave->size;
+ ),
+ TP_printk("fsi%d: idx:%d link:%d/%d cid:%d cfam:%08x %08x",
+ __entry->master_idx,
+ __entry->idx,
+ __entry->link,
+ __entry->master_n_links,
+ __entry->chip_id,
+ __entry->cfam_id,
+ __entry->size
+ )
+);
+
+TRACE_EVENT(fsi_slave_invalid_cfam,
+ TP_PROTO(const struct fsi_master *master, int link, uint32_t cfam_id),
+ TP_ARGS(master, link, cfam_id),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, master_n_links)
+ __field(int, link)
+ __field(__u32, cfam_id)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->master_n_links = master->n_links;
+ __entry->link = link;
+ __entry->cfam_id = cfam_id;
+ ),
+ TP_printk("fsi%d: cfam:%08x link:%d/%d",
+ __entry->master_idx,
+ __entry->cfam_id,
+ __entry->link,
+ __entry->master_n_links
+ )
+);
+
+TRACE_EVENT(fsi_dev_init,
+ TP_PROTO(const struct fsi_device *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, link)
+ __field(int, type)
+ __field(int, unit)
+ __field(int, version)
+ __field(__u32, addr)
+ __field(__u32, size)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = dev->slave->master->idx;
+ __entry->link = dev->slave->link;
+ __entry->type = dev->engine_type;
+ __entry->unit = dev->unit;
+ __entry->version = dev->version;
+ __entry->addr = dev->addr;
+ __entry->size = dev->size;
+ ),
+ TP_printk("fsi%d: slv%d: t:%02x u:%02x v:%02x %08x@%08x",
+ __entry->master_idx,
+ __entry->link,
+ __entry->type,
+ __entry->unit,
+ __entry->version,
+ __entry->size,
+ __entry->addr
+ )
+);
#endif /* _TRACE_FSI_H */
diff --git a/include/trace/events/fsi_master_aspeed.h b/include/trace/events/fsi_master_aspeed.h
index a355ceacc33f..0fff873775f1 100644
--- a/include/trace/events/fsi_master_aspeed.h
+++ b/include/trace/events/fsi_master_aspeed.h
@@ -72,6 +72,18 @@ TRACE_EVENT(fsi_master_aspeed_opb_error,
)
);
+TRACE_EVENT(fsi_master_aspeed_cfam_reset,
+ TP_PROTO(bool start),
+ TP_ARGS(start),
+ TP_STRUCT__entry(
+ __field(bool, start)
+ ),
+ TP_fast_assign(
+ __entry->start = start;
+ ),
+ TP_printk("%s", __entry->start ? "start" : "end")
+);
+
#endif
#include <trace/define_trace.h>
diff --git a/include/trace/events/gpu_mem.h b/include/trace/events/gpu_mem.h
new file mode 100644
index 000000000000..26d871f96e94
--- /dev/null
+++ b/include/trace/events/gpu_mem.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GPU memory trace points
+ *
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu_mem
+
+#if !defined(_TRACE_GPU_MEM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GPU_MEM_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * The gpu_memory_total event indicates that there's an update to either the
+ * global or process total gpu memory counters.
+ *
+ * This event should be emitted whenever the kernel device driver allocates,
+ * frees, imports, unimports memory in the GPU addressable space.
+ *
+ * @gpu_id: This is the gpu id.
+ *
+ * @pid: Put 0 for global total, while positive pid for process total.
+ *
+ * @size: Size of the allocation in bytes.
+ *
+ */
+TRACE_EVENT(gpu_mem_total,
+
+ TP_PROTO(uint32_t gpu_id, uint32_t pid, uint64_t size),
+
+ TP_ARGS(gpu_id, pid, size),
+
+ TP_STRUCT__entry(
+ __field(uint32_t, gpu_id)
+ __field(uint32_t, pid)
+ __field(uint64_t, size)
+ ),
+
+ TP_fast_assign(
+ __entry->gpu_id = gpu_id;
+ __entry->pid = pid;
+ __entry->size = size;
+ ),
+
+ TP_printk("gpu_id=%u pid=%u size=%llu",
+ __entry->gpu_id,
+ __entry->pid,
+ __entry->size)
+);
+
+#endif /* _TRACE_GPU_MEM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h
new file mode 100644
index 000000000000..f05c5fa668a2
--- /dev/null
+++ b/include/trace/events/habanalabs.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2021 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM habanalabs
+
+#if !defined(_TRACE_HABANALABS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HABANALABS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(habanalabs_mmu_template,
+ TP_PROTO(struct device *dev, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte),
+
+ TP_ARGS(dev, virt_addr, phys_addr, page_size, flush_pte),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u64, virt_addr)
+ __field(u64, phys_addr)
+ __field(u32, page_size)
+ __field(u8, flush_pte)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->virt_addr = virt_addr;
+ __entry->phys_addr = phys_addr;
+ __entry->page_size = page_size;
+ __entry->flush_pte = flush_pte;
+ ),
+
+ TP_printk("%s: vaddr: %#llx, paddr: %#llx, psize: %#x, flush: %s",
+ __get_str(dname),
+ __entry->virt_addr,
+ __entry->phys_addr,
+ __entry->page_size,
+ __entry->flush_pte ? "true" : "false")
+);
+
+DEFINE_EVENT(habanalabs_mmu_template, habanalabs_mmu_map,
+ TP_PROTO(struct device *dev, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte),
+ TP_ARGS(dev, virt_addr, phys_addr, page_size, flush_pte));
+
+DEFINE_EVENT(habanalabs_mmu_template, habanalabs_mmu_unmap,
+ TP_PROTO(struct device *dev, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte),
+ TP_ARGS(dev, virt_addr, phys_addr, page_size, flush_pte));
+
+DECLARE_EVENT_CLASS(habanalabs_dma_alloc_template,
+ TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
+
+ TP_ARGS(dev, cpu_addr, dma_addr, size, caller),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u64, cpu_addr)
+ __field(u64, dma_addr)
+ __field(u32, size)
+ __field(const char *, caller)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->cpu_addr = cpu_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->caller = caller;
+ ),
+
+ TP_printk("%s: cpu_addr: %#llx, dma_addr: %#llx, size: %#x, caller: %s",
+ __get_str(dname),
+ __entry->cpu_addr,
+ __entry->dma_addr,
+ __entry->size,
+ __entry->caller)
+);
+
+DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_alloc,
+ TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
+ TP_ARGS(dev, cpu_addr, dma_addr, size, caller));
+
+DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_free,
+ TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
+ TP_ARGS(dev, cpu_addr, dma_addr, size, caller));
+
+#endif /* if !defined(_TRACE_HABANALABS_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/hswadsp.h b/include/trace/events/hswadsp.h
deleted file mode 100644
index 939d7a09d73f..000000000000
--- a/include/trace/events/hswadsp.h
+++ /dev/null
@@ -1,385 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hswadsp
-
-#if !defined(_TRACE_HSWADSP_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_HSWADSP_H
-
-#include <linux/types.h>
-#include <linux/ktime.h>
-#include <linux/tracepoint.h>
-
-struct sst_hsw;
-struct sst_hsw_stream;
-struct sst_hsw_ipc_stream_free_req;
-struct sst_hsw_ipc_volume_req;
-struct sst_hsw_ipc_stream_alloc_req;
-struct sst_hsw_audio_data_format_ipc;
-struct sst_hsw_ipc_stream_info_reply;
-struct sst_hsw_ipc_device_config_req;
-
-DECLARE_EVENT_CLASS(sst_irq,
-
- TP_PROTO(uint32_t status, uint32_t mask),
-
- TP_ARGS(status, mask),
-
- TP_STRUCT__entry(
- __field( unsigned int, status )
- __field( unsigned int, mask )
- ),
-
- TP_fast_assign(
- __entry->status = status;
- __entry->mask = mask;
- ),
-
- TP_printk("status 0x%8.8x mask 0x%8.8x",
- (unsigned int)__entry->status, (unsigned int)__entry->mask)
-);
-
-DEFINE_EVENT(sst_irq, sst_irq_busy,
-
- TP_PROTO(unsigned int status, unsigned int mask),
-
- TP_ARGS(status, mask)
-
-);
-
-DEFINE_EVENT(sst_irq, sst_irq_done,
-
- TP_PROTO(unsigned int status, unsigned int mask),
-
- TP_ARGS(status, mask)
-
-);
-
-DECLARE_EVENT_CLASS(ipc,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val),
-
- TP_STRUCT__entry(
- __string( name, name )
- __field( unsigned int, val )
- ),
-
- TP_fast_assign(
- __assign_str(name, name);
- __entry->val = val;
- ),
-
- TP_printk("%s 0x%8.8x", __get_str(name), (unsigned int)__entry->val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_request,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_reply,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_pending_reply,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_notification,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_error,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DECLARE_EVENT_CLASS(stream_position,
-
- TP_PROTO(unsigned int id, unsigned int pos),
-
- TP_ARGS(id, pos),
-
- TP_STRUCT__entry(
- __field( unsigned int, id )
- __field( unsigned int, pos )
- ),
-
- TP_fast_assign(
- __entry->id = id;
- __entry->pos = pos;
- ),
-
- TP_printk("id %d position 0x%x",
- (unsigned int)__entry->id, (unsigned int)__entry->pos)
-);
-
-DEFINE_EVENT(stream_position, stream_read_position,
-
- TP_PROTO(unsigned int id, unsigned int pos),
-
- TP_ARGS(id, pos)
-
-);
-
-DEFINE_EVENT(stream_position, stream_write_position,
-
- TP_PROTO(unsigned int id, unsigned int pos),
-
- TP_ARGS(id, pos)
-
-);
-
-TRACE_EVENT(hsw_stream_buffer,
-
- TP_PROTO(struct sst_hsw_stream *stream),
-
- TP_ARGS(stream),
-
- TP_STRUCT__entry(
- __field( int, id )
- __field( int, pt_addr )
- __field( int, num_pages )
- __field( int, ring_size )
- __field( int, ring_offset )
- __field( int, first_pfn )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->pt_addr = stream->request.ringinfo.ring_pt_address;
- __entry->num_pages = stream->request.ringinfo.num_pages;
- __entry->ring_size = stream->request.ringinfo.ring_size;
- __entry->ring_offset = stream->request.ringinfo.ring_offset;
- __entry->first_pfn = stream->request.ringinfo.ring_first_pfn;
- ),
-
- TP_printk("stream %d ring addr 0x%x pages %d size 0x%x offset 0x%x PFN 0x%x",
- (int) __entry->id, (int)__entry->pt_addr,
- (int)__entry->num_pages, (int)__entry->ring_size,
- (int)__entry->ring_offset, (int)__entry->first_pfn)
-);
-
-TRACE_EVENT(hsw_stream_alloc_reply,
-
- TP_PROTO(struct sst_hsw_stream *stream),
-
- TP_ARGS(stream),
-
- TP_STRUCT__entry(
- __field( int, id )
- __field( int, stream_id )
- __field( int, mixer_id )
- __field( int, peak0 )
- __field( int, peak1 )
- __field( int, vol0 )
- __field( int, vol1 )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->stream_id = stream->reply.stream_hw_id;
- __entry->mixer_id = stream->reply.mixer_hw_id;
- __entry->peak0 = stream->reply.peak_meter_register_address[0];
- __entry->peak1 = stream->reply.peak_meter_register_address[1];
- __entry->vol0 = stream->reply.volume_register_address[0];
- __entry->vol1 = stream->reply.volume_register_address[1];
- ),
-
- TP_printk("stream %d hw id %d mixer %d peak 0x%x:0x%x vol 0x%x,0x%x",
- (int) __entry->id, (int) __entry->stream_id, (int)__entry->mixer_id,
- (int)__entry->peak0, (int)__entry->peak1,
- (int)__entry->vol0, (int)__entry->vol1)
-);
-
-TRACE_EVENT(hsw_mixer_info_reply,
-
- TP_PROTO(struct sst_hsw_ipc_stream_info_reply *reply),
-
- TP_ARGS(reply),
-
- TP_STRUCT__entry(
- __field( int, mixer_id )
- __field( int, peak0 )
- __field( int, peak1 )
- __field( int, vol0 )
- __field( int, vol1 )
- ),
-
- TP_fast_assign(
- __entry->mixer_id = reply->mixer_hw_id;
- __entry->peak0 = reply->peak_meter_register_address[0];
- __entry->peak1 = reply->peak_meter_register_address[1];
- __entry->vol0 = reply->volume_register_address[0];
- __entry->vol1 = reply->volume_register_address[1];
- ),
-
- TP_printk("mixer id %d peak 0x%x:0x%x vol 0x%x,0x%x",
- (int)__entry->mixer_id,
- (int)__entry->peak0, (int)__entry->peak1,
- (int)__entry->vol0, (int)__entry->vol1)
-);
-
-TRACE_EVENT(hsw_stream_data_format,
-
- TP_PROTO(struct sst_hsw_stream *stream,
- struct sst_hsw_audio_data_format_ipc *req),
-
- TP_ARGS(stream, req),
-
- TP_STRUCT__entry(
- __field( uint32_t, id )
- __field( uint32_t, frequency )
- __field( uint32_t, bitdepth )
- __field( uint32_t, map )
- __field( uint32_t, config )
- __field( uint32_t, style )
- __field( uint8_t, ch_num )
- __field( uint8_t, valid_bit )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->frequency = req->frequency;
- __entry->bitdepth = req->bitdepth;
- __entry->map = req->map;
- __entry->config = req->config;
- __entry->style = req->style;
- __entry->ch_num = req->ch_num;
- __entry->valid_bit = req->valid_bit;
- ),
-
- TP_printk("stream %d freq %d depth %d map 0x%x config 0x%x style 0x%x ch %d bits %d",
- (int) __entry->id, (uint32_t)__entry->frequency,
- (uint32_t)__entry->bitdepth, (uint32_t)__entry->map,
- (uint32_t)__entry->config, (uint32_t)__entry->style,
- (uint8_t)__entry->ch_num, (uint8_t)__entry->valid_bit)
-);
-
-TRACE_EVENT(hsw_stream_alloc_request,
-
- TP_PROTO(struct sst_hsw_stream *stream,
- struct sst_hsw_ipc_stream_alloc_req *req),
-
- TP_ARGS(stream, req),
-
- TP_STRUCT__entry(
- __field( uint32_t, id )
- __field( uint8_t, path_id )
- __field( uint8_t, stream_type )
- __field( uint8_t, format_id )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->path_id = req->path_id;
- __entry->stream_type = req->stream_type;
- __entry->format_id = req->format_id;
- ),
-
- TP_printk("stream %d path %d type %d format %d",
- (int) __entry->id, (uint8_t)__entry->path_id,
- (uint8_t)__entry->stream_type, (uint8_t)__entry->format_id)
-);
-
-TRACE_EVENT(hsw_stream_free_req,
-
- TP_PROTO(struct sst_hsw_stream *stream,
- struct sst_hsw_ipc_stream_free_req *req),
-
- TP_ARGS(stream, req),
-
- TP_STRUCT__entry(
- __field( int, id )
- __field( int, stream_id )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->stream_id = req->stream_id;
- ),
-
- TP_printk("stream %d hw id %d",
- (int) __entry->id, (int) __entry->stream_id)
-);
-
-TRACE_EVENT(hsw_volume_req,
-
- TP_PROTO(struct sst_hsw_stream *stream,
- struct sst_hsw_ipc_volume_req *req),
-
- TP_ARGS(stream, req),
-
- TP_STRUCT__entry(
- __field( int, id )
- __field( uint32_t, channel )
- __field( uint32_t, target_volume )
- __field( uint64_t, curve_duration )
- __field( uint32_t, curve_type )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->channel = req->channel;
- __entry->target_volume = req->target_volume;
- __entry->curve_duration = req->curve_duration;
- __entry->curve_type = req->curve_type;
- ),
-
- TP_printk("stream %d chan 0x%x vol %d duration %llu type %d",
- (int) __entry->id, (uint32_t) __entry->channel,
- (uint32_t)__entry->target_volume,
- (uint64_t)__entry->curve_duration,
- (uint32_t)__entry->curve_type)
-);
-
-TRACE_EVENT(hsw_device_config_req,
-
- TP_PROTO(struct sst_hsw_ipc_device_config_req *req),
-
- TP_ARGS(req),
-
- TP_STRUCT__entry(
- __field( uint32_t, ssp )
- __field( uint32_t, clock_freq )
- __field( uint32_t, mode )
- __field( uint16_t, clock_divider )
- ),
-
- TP_fast_assign(
- __entry->ssp = req->ssp_interface;
- __entry->clock_freq = req->clock_frequency;
- __entry->mode = req->mode;
- __entry->clock_divider = req->clock_divider;
- ),
-
- TP_printk("SSP %d Freq %d mode %d div %d",
- (uint32_t)__entry->ssp,
- (uint32_t)__entry->clock_freq, (uint32_t)__entry->mode,
- (uint32_t)__entry->clock_divider)
-);
-
-#endif /* _TRACE_HSWADSP_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index d82a0f4e824d..935af4947917 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -11,8 +11,14 @@
EM( SCAN_FAIL, "failed") \
EM( SCAN_SUCCEED, "succeeded") \
EM( SCAN_PMD_NULL, "pmd_null") \
+ EM( SCAN_PMD_NONE, "pmd_none") \
+ EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \
EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \
+ EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
+ EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \
EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \
+ EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \
+ EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \
EM( SCAN_PAGE_RO, "no_writable_page") \
EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \
EM( SCAN_PAGE_NULL, "page_null") \
@@ -26,11 +32,9 @@
EM( SCAN_VMA_NULL, "vma_null") \
EM( SCAN_VMA_CHECK, "vma_check_failed") \
EM( SCAN_ADDRESS_RANGE, "not_suitable_address_range") \
- EM( SCAN_SWAP_CACHE_PAGE, "page_swap_cache") \
EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\
EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
- EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
EM( SCAN_TRUNCATED, "truncated") \
EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
@@ -165,5 +169,39 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
__entry->ret)
);
+TRACE_EVENT(mm_khugepaged_scan_file,
+
+ TP_PROTO(struct mm_struct *mm, struct page *page, const char *filename,
+ int present, int swap, int result),
+
+ TP_ARGS(mm, page, filename, present, swap, result),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, pfn)
+ __string(filename, filename)
+ __field(int, present)
+ __field(int, swap)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->pfn = page ? page_to_pfn(page) : -1;
+ __assign_str(filename, filename);
+ __entry->present = present;
+ __entry->swap = swap;
+ __entry->result = result;
+ ),
+
+ TP_printk("mm=%p, scan_pfn=0x%lx, filename=%s, present=%d, swap=%d, result=%s",
+ __entry->mm,
+ __entry->pfn,
+ __get_str(filename),
+ __entry->present,
+ __entry->swap,
+ __print_symbolic(__entry->result, SCAN_STATUS))
+);
+
#endif /* __HUGE_MEMORY_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/i2c_slave.h b/include/trace/events/i2c_slave.h
new file mode 100644
index 000000000000..811166abbe3a
--- /dev/null
+++ b/include/trace/events/i2c_slave.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * I2C slave tracepoints
+ *
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i2c_slave
+
+#if !defined(_TRACE_I2C_SLAVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_I2C_SLAVE_H
+
+#include <linux/i2c.h>
+#include <linux/tracepoint.h>
+
+TRACE_DEFINE_ENUM(I2C_SLAVE_READ_REQUESTED);
+TRACE_DEFINE_ENUM(I2C_SLAVE_WRITE_REQUESTED);
+TRACE_DEFINE_ENUM(I2C_SLAVE_READ_PROCESSED);
+TRACE_DEFINE_ENUM(I2C_SLAVE_WRITE_RECEIVED);
+TRACE_DEFINE_ENUM(I2C_SLAVE_STOP);
+
+#define show_event_type(type) \
+ __print_symbolic(type, \
+ { I2C_SLAVE_READ_REQUESTED, "RD_REQ" }, \
+ { I2C_SLAVE_WRITE_REQUESTED, "WR_REQ" }, \
+ { I2C_SLAVE_READ_PROCESSED, "RD_PRO" }, \
+ { I2C_SLAVE_WRITE_RECEIVED, "WR_RCV" }, \
+ { I2C_SLAVE_STOP, " STOP" })
+
+TRACE_EVENT(i2c_slave,
+ TP_PROTO(const struct i2c_client *client, enum i2c_slave_event event,
+ __u8 *val, int cb_ret),
+ TP_ARGS(client, event, val, cb_ret),
+ TP_STRUCT__entry(
+ __field(int, adapter_nr )
+ __field(int, ret )
+ __field(__u16, addr )
+ __field(__u16, len )
+ __field(enum i2c_slave_event, event )
+ __array(__u8, buf, 1) ),
+
+ TP_fast_assign(
+ __entry->adapter_nr = client->adapter->nr;
+ __entry->addr = client->addr;
+ __entry->event = event;
+ __entry->ret = cb_ret;
+ switch (event) {
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_READ_PROCESSED:
+ case I2C_SLAVE_WRITE_RECEIVED:
+ __entry->len = 1;
+ memcpy(__entry->buf, val, __entry->len);
+ break;
+ default:
+ __entry->len = 0;
+ break;
+ }
+ ),
+ TP_printk("i2c-%d a=%03x ret=%d %s [%*phD]",
+ __entry->adapter_nr, __entry->addr, __entry->ret,
+ show_event_type(__entry->event), __entry->len, __entry->buf
+ ));
+
+#endif /* _TRACE_I2C_SLAVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h
new file mode 100644
index 000000000000..d7353024016c
--- /dev/null
+++ b/include/trace/events/intel_ifs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM intel_ifs
+
+#if !defined(_TRACE_IFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IFS_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(ifs_status,
+
+ TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status),
+
+ TP_ARGS(cpu, activate, status),
+
+ TP_STRUCT__entry(
+ __field( u64, status )
+ __field( int, cpu )
+ __field( u8, start )
+ __field( u8, stop )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->start = activate.start;
+ __entry->stop = activate.stop;
+ __entry->status = status.data;
+ ),
+
+ TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx",
+ __entry->cpu,
+ __entry->start,
+ __entry->stop,
+ __entry->status)
+);
+
+#endif /* _TRACE_IFS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h
deleted file mode 100644
index 112bd06487bf..000000000000
--- a/include/trace/events/intel_iommu.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Intel IOMMU trace support
- *
- * Copyright (C) 2019 Intel Corporation
- *
- * Author: Lu Baolu <baolu.lu@linux.intel.com>
- */
-#ifdef CONFIG_INTEL_IOMMU
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM intel_iommu
-
-#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_INTEL_IOMMU_H
-
-#include <linux/tracepoint.h>
-#include <linux/intel-iommu.h>
-
-DECLARE_EVENT_CLASS(dma_map,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
- size_t size),
-
- TP_ARGS(dev, dev_addr, phys_addr, size),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(dma_addr_t, dev_addr)
- __field(phys_addr_t, phys_addr)
- __field(size_t, size)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->dev_addr = dev_addr;
- __entry->phys_addr = phys_addr;
- __entry->size = size;
- ),
-
- TP_printk("dev=%s dev_addr=0x%llx phys_addr=0x%llx size=%zu",
- __get_str(dev_name),
- (unsigned long long)__entry->dev_addr,
- (unsigned long long)__entry->phys_addr,
- __entry->size)
-);
-
-DEFINE_EVENT(dma_map, map_single,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
- size_t size),
- TP_ARGS(dev, dev_addr, phys_addr, size)
-);
-
-DEFINE_EVENT(dma_map, bounce_map_single,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
- size_t size),
- TP_ARGS(dev, dev_addr, phys_addr, size)
-);
-
-DECLARE_EVENT_CLASS(dma_unmap,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
-
- TP_ARGS(dev, dev_addr, size),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(dma_addr_t, dev_addr)
- __field(size_t, size)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->dev_addr = dev_addr;
- __entry->size = size;
- ),
-
- TP_printk("dev=%s dev_addr=0x%llx size=%zu",
- __get_str(dev_name),
- (unsigned long long)__entry->dev_addr,
- __entry->size)
-);
-
-DEFINE_EVENT(dma_unmap, unmap_single,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
- TP_ARGS(dev, dev_addr, size)
-);
-
-DEFINE_EVENT(dma_unmap, unmap_sg,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
- TP_ARGS(dev, dev_addr, size)
-);
-
-DEFINE_EVENT(dma_unmap, bounce_unmap_single,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
- TP_ARGS(dev, dev_addr, size)
-);
-
-DECLARE_EVENT_CLASS(dma_map_sg,
- TP_PROTO(struct device *dev, int index, int total,
- struct scatterlist *sg),
-
- TP_ARGS(dev, index, total, sg),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(dma_addr_t, dev_addr)
- __field(phys_addr_t, phys_addr)
- __field(size_t, size)
- __field(int, index)
- __field(int, total)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->dev_addr = sg->dma_address;
- __entry->phys_addr = sg_phys(sg);
- __entry->size = sg->dma_length;
- __entry->index = index;
- __entry->total = total;
- ),
-
- TP_printk("dev=%s [%d/%d] dev_addr=0x%llx phys_addr=0x%llx size=%zu",
- __get_str(dev_name), __entry->index, __entry->total,
- (unsigned long long)__entry->dev_addr,
- (unsigned long long)__entry->phys_addr,
- __entry->size)
-);
-
-DEFINE_EVENT(dma_map_sg, map_sg,
- TP_PROTO(struct device *dev, int index, int total,
- struct scatterlist *sg),
- TP_ARGS(dev, index, total, sg)
-);
-
-DEFINE_EVENT(dma_map_sg, bounce_map_sg,
- TP_PROTO(struct device *dev, int index, int total,
- struct scatterlist *sg),
- TP_ARGS(dev, index, total, sg)
-);
-#endif /* _TRACE_INTEL_IOMMU_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
-#endif /* CONFIG_INTEL_IOMMU */
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 27bd9e4f927b..936fd41bf147 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -6,17 +6,20 @@
#define _TRACE_IO_URING_H
#include <linux/tracepoint.h>
+#include <uapi/linux/io_uring.h>
+#include <linux/io_uring_types.h>
+#include <linux/io_uring.h>
struct io_wq_work;
/**
* io_uring_create - called after a new io_uring context was prepared
*
- * @fd: corresponding file descriptor
- * @ctx: pointer to a ring context structure
+ * @fd: corresponding file descriptor
+ * @ctx: pointer to a ring context structure
* @sq_entries: actual SQ size
* @cq_entries: actual CQ size
- * @flags: SQ ring flags, provided to io_uring_setup(2)
+ * @flags: SQ ring flags, provided to io_uring_setup(2)
*
* Allows to trace io_uring creation and provide pointer to a context, that can
* be used later to find correlated events.
@@ -28,38 +31,37 @@ TRACE_EVENT(io_uring_create,
TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
TP_STRUCT__entry (
- __field( int, fd )
- __field( void *, ctx )
+ __field( int, fd )
+ __field( void *, ctx )
__field( u32, sq_entries )
__field( u32, cq_entries )
__field( u32, flags )
),
TP_fast_assign(
- __entry->fd = fd;
+ __entry->fd = fd;
__entry->ctx = ctx;
__entry->sq_entries = sq_entries;
__entry->cq_entries = cq_entries;
__entry->flags = flags;
),
- TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
+ TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
__entry->ctx, __entry->fd, __entry->sq_entries,
__entry->cq_entries, __entry->flags)
);
/**
- * io_uring_register - called after a buffer/file/eventfd was succesfully
+ * io_uring_register - called after a buffer/file/eventfd was successfully
* registered for a ring
*
- * @ctx: pointer to a ring context structure
- * @opcode: describes which operation to perform
+ * @ctx: pointer to a ring context structure
+ * @opcode: describes which operation to perform
* @nr_user_files: number of registered files
* @nr_user_bufs: number of registered buffers
- * @cq_ev_fd: whether eventfs registered or not
- * @ret: return code
+ * @ret: return code
*
- * Allows to trace fixed files/buffers/eventfds, that could be registered to
+ * Allows to trace fixed files/buffers, that could be registered to
* avoid an overhead of getting references to them for every operation. This
* event, together with io_uring_file_get, can provide a full picture of how
* much overhead one can reduce via fixing.
@@ -67,17 +69,16 @@ TRACE_EVENT(io_uring_create,
TRACE_EVENT(io_uring_register,
TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
- unsigned nr_bufs, bool eventfd, long ret),
+ unsigned nr_bufs, long ret),
- TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
+ TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( unsigned, opcode )
- __field( unsigned, nr_files )
- __field( unsigned, nr_bufs )
- __field( bool, eventfd )
- __field( long, ret )
+ __field( void *, ctx )
+ __field( unsigned, opcode )
+ __field( unsigned, nr_files)
+ __field( unsigned, nr_bufs )
+ __field( long, ret )
),
TP_fast_assign(
@@ -85,20 +86,19 @@ TRACE_EVENT(io_uring_register,
__entry->opcode = opcode;
__entry->nr_files = nr_files;
__entry->nr_bufs = nr_bufs;
- __entry->eventfd = eventfd;
__entry->ret = ret;
),
TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
- "eventfd %d, ret %ld",
+ "ret %ld",
__entry->ctx, __entry->opcode, __entry->nr_files,
- __entry->nr_bufs, __entry->eventfd, __entry->ret)
+ __entry->nr_bufs, __entry->ret)
);
/**
* io_uring_file_get - called before getting references to an SQE file
*
- * @ctx: pointer to a ring context structure
+ * @req: pointer to a submitted request
* @fd: SQE file descriptor
*
* Allows to trace out how often an SQE file reference is obtained, which can
@@ -107,99 +107,114 @@ TRACE_EVENT(io_uring_register,
*/
TRACE_EVENT(io_uring_file_get,
- TP_PROTO(void *ctx, int fd),
+ TP_PROTO(struct io_kiocb *req, int fd),
- TP_ARGS(ctx, fd),
+ TP_ARGS(req, fd),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( int, fd )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( u64, user_data )
+ __field( int, fd )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
__entry->fd = fd;
),
- TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
+ TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
+ __entry->ctx, __entry->req, __entry->user_data, __entry->fd)
);
/**
* io_uring_queue_async_work - called before submitting a new async work
*
- * @ctx: pointer to a ring context structure
- * @hashed: type of workqueue, hashed or normal
* @req: pointer to a submitted request
- * @work: pointer to a submitted io_wq_work
+ * @rw: type of workqueue, hashed or normal
*
* Allows to trace asynchronous work submission.
*/
TRACE_EVENT(io_uring_queue_async_work,
- TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
- unsigned int flags),
+ TP_PROTO(struct io_kiocb *req, int rw),
- TP_ARGS(ctx, rw, req, work, flags),
+ TP_ARGS(req, rw),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( int, rw )
- __field( void *, req )
- __field( struct io_wq_work *, work )
- __field( unsigned int, flags )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( u64, user_data )
+ __field( u8, opcode )
+ __field( unsigned int, flags )
+ __field( struct io_wq_work *, work )
+ __field( int, rw )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->flags = req->flags;
+ __entry->opcode = req->opcode;
+ __entry->work = &req->work;
__entry->rw = rw;
- __entry->req = req;
- __entry->work = work;
- __entry->flags = flags;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
- __entry->ctx, __entry->req, __entry->flags,
- __entry->rw ? "hashed" : "normal", __entry->work)
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str),
+ __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
);
/**
* io_uring_defer - called when an io_uring request is deferred
*
- * @ctx: pointer to a ring context structure
* @req: pointer to a deferred request
- * @user_data: user data associated with the request
*
* Allows to track deferred requests, to get an insight about what requests are
* not started immediately.
*/
TRACE_EVENT(io_uring_defer,
- TP_PROTO(void *ctx, void *req, unsigned long long user_data),
+ TP_PROTO(struct io_kiocb *req),
- TP_ARGS(ctx, req, user_data),
+ TP_ARGS(req),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( void *, req )
- __field( unsigned long long, data )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, data )
+ __field( u8, opcode )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = req->ctx;
__entry->req = req;
- __entry->data = user_data;
+ __entry->data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
- __entry->req, __entry->data)
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
+ __entry->ctx, __entry->req, __entry->data,
+ __get_str(op_str))
);
/**
* io_uring_link - called before the io_uring request added into link_list of
- * another request
+ * another request
*
- * @ctx: pointer to a ring context structure
- * @req: pointer to a linked request
+ * @req: pointer to a linked request
* @target_req: pointer to a previous request, that would contain @req
*
* Allows to track linked requests, to understand dependencies between requests
@@ -207,18 +222,18 @@ TRACE_EVENT(io_uring_defer,
*/
TRACE_EVENT(io_uring_link,
- TP_PROTO(void *ctx, void *req, void *target_req),
+ TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req),
- TP_ARGS(ctx, req, target_req),
+ TP_ARGS(req, target_req),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( void *, req )
+ __field( void *, ctx )
+ __field( void *, req )
__field( void *, target_req )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = req->ctx;
__entry->req = req;
__entry->target_req = target_req;
),
@@ -244,12 +259,12 @@ TRACE_EVENT(io_uring_cqring_wait,
TP_ARGS(ctx, min_events),
TP_STRUCT__entry (
- __field( void *, ctx )
+ __field( void *, ctx )
__field( int, min_events )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = ctx;
__entry->min_events = min_events;
),
@@ -267,94 +282,406 @@ TRACE_EVENT(io_uring_cqring_wait,
*/
TRACE_EVENT(io_uring_fail_link,
- TP_PROTO(void *req, void *link),
+ TP_PROTO(struct io_kiocb *req, struct io_kiocb *link),
TP_ARGS(req, link),
TP_STRUCT__entry (
- __field( void *, req )
- __field( void *, link )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( void *, link )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
),
TP_fast_assign(
- __entry->req = req;
- __entry->link = link;
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
+ __entry->link = link;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("request %p, link %p", __entry->req, __entry->link)
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str), __entry->link)
);
/**
* io_uring_complete - called when completing an SQE
*
* @ctx: pointer to a ring context structure
+ * @req: pointer to a submitted request
* @user_data: user data associated with the request
* @res: result of the request
+ * @cflags: completion flags
+ * @extra1: extra 64-bit data for CQE32
+ * @extra2: extra 64-bit data for CQE32
*
*/
TRACE_EVENT(io_uring_complete,
- TP_PROTO(void *ctx, u64 user_data, long res),
+ TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
+ u64 extra1, u64 extra2),
- TP_ARGS(ctx, user_data, res),
+ TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
TP_STRUCT__entry (
__field( void *, ctx )
+ __field( void *, req )
__field( u64, user_data )
- __field( long, res )
+ __field( int, res )
+ __field( unsigned, cflags )
+ __field( u64, extra1 )
+ __field( u64, extra2 )
),
TP_fast_assign(
__entry->ctx = ctx;
+ __entry->req = req;
__entry->user_data = user_data;
__entry->res = res;
+ __entry->cflags = cflags;
+ __entry->extra1 = extra1;
+ __entry->extra2 = extra2;
),
- TP_printk("ring %p, user_data 0x%llx, result %ld",
- __entry->ctx, (unsigned long long)__entry->user_data,
- __entry->res)
+ TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
+ "extra1 %llu extra2 %llu ",
+ __entry->ctx, __entry->req,
+ __entry->user_data,
+ __entry->res, __entry->cflags,
+ (unsigned long long) __entry->extra1,
+ (unsigned long long) __entry->extra2)
);
-
/**
* io_uring_submit_sqe - called before submitting one SQE
*
- * @ctx: pointer to a ring context structure
- * @opcode: opcode of request
- * @user_data: user data associated with the request
+ * @req: pointer to a submitted request
* @force_nonblock: whether a context blocking or not
- * @sq_thread: true if sq_thread has submitted this SQE
*
* Allows to track SQE submitting, to understand what was the source of it, SQ
* thread or io_uring_enter call.
*/
TRACE_EVENT(io_uring_submit_sqe,
- TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock,
- bool sq_thread),
+ TP_PROTO(struct io_kiocb *req, bool force_nonblock),
- TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread),
+ TP_ARGS(req, force_nonblock),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( u8, opcode )
- __field( u64, user_data )
- __field( bool, force_nonblock )
- __field( bool, sq_thread )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( u32, flags )
+ __field( bool, force_nonblock )
+ __field( bool, sq_thread )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
+ __entry->flags = req->flags;
+ __entry->force_nonblock = force_nonblock;
+ __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
+ ),
+
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
+ "non block %d, sq_thread %d", __entry->ctx, __entry->req,
+ __entry->user_data, __get_str(op_str),
+ __entry->flags, __entry->force_nonblock, __entry->sq_thread)
+);
+
+/*
+ * io_uring_poll_arm - called after arming a poll wait if successful
+ *
+ * @req: pointer to the armed request
+ * @mask: request poll events mask
+ * @events: registered events of interest
+ *
+ * Allows to track which fds are waiting for and what are the events of
+ * interest.
+ */
+TRACE_EVENT(io_uring_poll_arm,
+
+ TP_PROTO(struct io_kiocb *req, int mask, int events),
+
+ TP_ARGS(req, mask, events),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( int, mask )
+ __field( int, events )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
+ __entry->mask = mask;
+ __entry->events = events;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
+ ),
+
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str),
+ __entry->mask, __entry->events)
+);
+
+/*
+ * io_uring_task_add - called after adding a task
+ *
+ * @req: pointer to request
+ * @mask: request poll events mask
+ *
+ */
+TRACE_EVENT(io_uring_task_add,
+
+ TP_PROTO(struct io_kiocb *req, int mask),
+
+ TP_ARGS(req, mask),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( int, mask )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
+ __entry->mask = mask;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
+ ),
+
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str),
+ __entry->mask)
+);
+
+/*
+ * io_uring_req_failed - called when an sqe is errored dring submission
+ *
+ * @sqe: pointer to the io_uring_sqe that failed
+ * @req: pointer to request
+ * @error: error it failed with
+ *
+ * Allows easier diagnosing of malformed requests in production systems.
+ */
+TRACE_EVENT(io_uring_req_failed,
+
+ TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
+
+ TP_ARGS(sqe, req, error),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( u8, flags )
+ __field( u8, ioprio )
+ __field( u64, off )
+ __field( u64, addr )
+ __field( u32, len )
+ __field( u32, op_flags )
+ __field( u16, buf_index )
+ __field( u16, personality )
+ __field( u32, file_index )
+ __field( u64, pad1 )
+ __field( u64, addr3 )
+ __field( int, error )
+
+ __string( op_str, io_uring_get_opcode(sqe->opcode) )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = sqe->user_data;
+ __entry->opcode = sqe->opcode;
+ __entry->flags = sqe->flags;
+ __entry->ioprio = sqe->ioprio;
+ __entry->off = sqe->off;
+ __entry->addr = sqe->addr;
+ __entry->len = sqe->len;
+ __entry->op_flags = sqe->poll32_events;
+ __entry->buf_index = sqe->buf_index;
+ __entry->personality = sqe->personality;
+ __entry->file_index = sqe->file_index;
+ __entry->pad1 = sqe->__pad2[0];
+ __entry->addr3 = sqe->addr3;
+ __entry->error = error;
+
+ __assign_str(op_str, io_uring_get_opcode(sqe->opcode));
+ ),
+
+ TP_printk("ring %p, req %p, user_data 0x%llx, "
+ "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
+ "len=%u, rw_flags=0x%x, buf_index=%d, "
+ "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, "
+ "error=%d",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str),
+ __entry->flags, __entry->ioprio,
+ (unsigned long long)__entry->off,
+ (unsigned long long) __entry->addr, __entry->len,
+ __entry->op_flags,
+ __entry->buf_index, __entry->personality, __entry->file_index,
+ (unsigned long long) __entry->pad1,
+ (unsigned long long) __entry->addr3, __entry->error)
+);
+
+
+/*
+ * io_uring_cqe_overflow - a CQE overflowed
+ *
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
+ * @res: CQE result
+ * @cflags: CQE flags
+ * @ocqe: pointer to the overflow cqe (if available)
+ *
+ */
+TRACE_EVENT(io_uring_cqe_overflow,
+
+ TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags,
+ void *ocqe),
+
+ TP_ARGS(ctx, user_data, res, cflags, ocqe),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( unsigned long long, user_data )
+ __field( s32, res )
+ __field( u32, cflags )
+ __field( void *, ocqe )
),
TP_fast_assign(
__entry->ctx = ctx;
- __entry->opcode = opcode;
__entry->user_data = user_data;
- __entry->force_nonblock = force_nonblock;
- __entry->sq_thread = sq_thread;
+ __entry->res = res;
+ __entry->cflags = cflags;
+ __entry->ocqe = ocqe;
+ ),
+
+ TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, "
+ "overflow_cqe %p",
+ __entry->ctx, __entry->user_data, __entry->res,
+ __entry->cflags, __entry->ocqe)
+);
+
+/*
+ * io_uring_task_work_run - ran task work
+ *
+ * @tctx: pointer to a io_uring_task
+ * @count: how many functions it ran
+ * @loops: how many loops it ran
+ *
+ */
+TRACE_EVENT(io_uring_task_work_run,
+
+ TP_PROTO(void *tctx, unsigned int count, unsigned int loops),
+
+ TP_ARGS(tctx, count, loops),
+
+ TP_STRUCT__entry (
+ __field( void *, tctx )
+ __field( unsigned int, count )
+ __field( unsigned int, loops )
+ ),
+
+ TP_fast_assign(
+ __entry->tctx = tctx;
+ __entry->count = count;
+ __entry->loops = loops;
+ ),
+
+ TP_printk("tctx %p, count %u, loops %u",
+ __entry->tctx, __entry->count, __entry->loops)
+);
+
+TRACE_EVENT(io_uring_short_write,
+
+ TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got),
+
+ TP_ARGS(ctx, fpos, wanted, got),
+
+ TP_STRUCT__entry(
+ __field(void *, ctx)
+ __field(u64, fpos)
+ __field(u64, wanted)
+ __field(u64, got)
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->fpos = fpos;
+ __entry->wanted = wanted;
+ __entry->got = got;
+ ),
+
+ TP_printk("ring %p, fpos %lld, wanted %lld, got %lld",
+ __entry->ctx, __entry->fpos,
+ __entry->wanted, __entry->got)
+);
+
+/*
+ * io_uring_local_work_run - ran ring local task work
+ *
+ * @tctx: pointer to a io_uring_ctx
+ * @count: how many functions it ran
+ * @loops: how many loops it ran
+ *
+ */
+TRACE_EVENT(io_uring_local_work_run,
+
+ TP_PROTO(void *ctx, int count, unsigned int loops),
+
+ TP_ARGS(ctx, count, loops),
+
+ TP_STRUCT__entry (
+ __field(void *, ctx )
+ __field(int, count )
+ __field(unsigned int, loops )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->count = count;
+ __entry->loops = loops;
),
- TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
- __entry->ctx, __entry->opcode,
- (unsigned long long) __entry->user_data,
- __entry->force_nonblock, __entry->sq_thread)
+ TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
);
#endif /* _TRACE_IO_URING_H */
diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h
index 7ecaa65b7106..6d1626e7a4ce 100644
--- a/include/trace/events/iocost.h
+++ b/include/trace/events/iocost.h
@@ -11,7 +11,7 @@ struct ioc_gq;
#include <linux/tracepoint.h>
-TRACE_EVENT(iocost_iocg_activate,
+DECLARE_EVENT_CLASS(iocost_iocg_state,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
u64 last_period, u64 cur_period, u64 vtime),
@@ -26,7 +26,6 @@ TRACE_EVENT(iocost_iocg_activate,
__field(u64, vrate)
__field(u64, last_period)
__field(u64, cur_period)
- __field(u64, last_vtime)
__field(u64, vtime)
__field(u32, weight)
__field(u32, inuse)
@@ -42,7 +41,6 @@ TRACE_EVENT(iocost_iocg_activate,
__entry->vrate = now->vrate;
__entry->last_period = last_period;
__entry->cur_period = cur_period;
- __entry->last_vtime = iocg->last_vtime;
__entry->vtime = vtime;
__entry->weight = iocg->weight;
__entry->inuse = iocg->inuse;
@@ -51,17 +49,30 @@ TRACE_EVENT(iocost_iocg_activate,
),
TP_printk("[%s:%s] now=%llu:%llu vrate=%llu "
- "period=%llu->%llu vtime=%llu->%llu "
+ "period=%llu->%llu vtime=%llu "
"weight=%u/%u hweight=%llu/%llu",
__get_str(devname), __get_str(cgroup),
__entry->now, __entry->vnow, __entry->vrate,
__entry->last_period, __entry->cur_period,
- __entry->last_vtime, __entry->vtime,
- __entry->inuse, __entry->weight,
+ __entry->vtime, __entry->inuse, __entry->weight,
__entry->hweight_inuse, __entry->hweight_active
)
);
+DEFINE_EVENT(iocost_iocg_state, iocost_iocg_activate,
+ TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
+ u64 last_period, u64 cur_period, u64 vtime),
+
+ TP_ARGS(iocg, path, now, last_period, cur_period, vtime)
+);
+
+DEFINE_EVENT(iocost_iocg_state, iocost_iocg_idle,
+ TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
+ u64 last_period, u64 cur_period, u64 vtime),
+
+ TP_ARGS(iocg, path, now, last_period, cur_period, vtime)
+);
+
DECLARE_EVENT_CLASS(iocg_inuse_update,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
@@ -98,7 +109,7 @@ DECLARE_EVENT_CLASS(iocg_inuse_update,
)
);
-DEFINE_EVENT(iocg_inuse_update, iocost_inuse_takeback,
+DEFINE_EVENT(iocg_inuse_update, iocost_inuse_shortage,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
u32 old_inuse, u32 new_inuse,
@@ -108,7 +119,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_takeback,
old_hw_inuse, new_hw_inuse)
);
-DEFINE_EVENT(iocg_inuse_update, iocost_inuse_giveaway,
+DEFINE_EVENT(iocg_inuse_update, iocost_inuse_transfer,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
u32 old_inuse, u32 new_inuse,
@@ -118,7 +129,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_giveaway,
old_hw_inuse, new_hw_inuse)
);
-DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset,
+DEFINE_EVENT(iocg_inuse_update, iocost_inuse_adjust,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
u32 old_inuse, u32 new_inuse,
@@ -130,12 +141,10 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset,
TRACE_EVENT(iocost_ioc_vrate_adj,
- TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 (*missed_ppm)[2],
- u32 rq_wait_pct, int nr_lagging, int nr_shortages,
- int nr_surpluses),
+ TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 *missed_ppm,
+ u32 rq_wait_pct, int nr_lagging, int nr_shortages),
- TP_ARGS(ioc, new_vrate, missed_ppm, rq_wait_pct, nr_lagging, nr_shortages,
- nr_surpluses),
+ TP_ARGS(ioc, new_vrate, missed_ppm, rq_wait_pct, nr_lagging, nr_shortages),
TP_STRUCT__entry (
__string(devname, ioc_name(ioc))
@@ -147,28 +156,66 @@ TRACE_EVENT(iocost_ioc_vrate_adj,
__field(u32, rq_wait_pct)
__field(int, nr_lagging)
__field(int, nr_shortages)
- __field(int, nr_surpluses)
),
TP_fast_assign(
__assign_str(devname, ioc_name(ioc));
- __entry->old_vrate = atomic64_read(&ioc->vtime_rate);;
+ __entry->old_vrate = atomic64_read(&ioc->vtime_rate);
__entry->new_vrate = new_vrate;
__entry->busy_level = ioc->busy_level;
- __entry->read_missed_ppm = (*missed_ppm)[READ];
- __entry->write_missed_ppm = (*missed_ppm)[WRITE];
+ __entry->read_missed_ppm = missed_ppm[READ];
+ __entry->write_missed_ppm = missed_ppm[WRITE];
__entry->rq_wait_pct = rq_wait_pct;
__entry->nr_lagging = nr_lagging;
__entry->nr_shortages = nr_shortages;
- __entry->nr_surpluses = nr_surpluses;
),
- TP_printk("[%s] vrate=%llu->%llu busy=%d missed_ppm=%u:%u rq_wait_pct=%u lagging=%d shortages=%d surpluses=%d",
+ TP_printk("[%s] vrate=%llu->%llu busy=%d missed_ppm=%u:%u rq_wait_pct=%u lagging=%d shortages=%d",
__get_str(devname), __entry->old_vrate, __entry->new_vrate,
__entry->busy_level,
__entry->read_missed_ppm, __entry->write_missed_ppm,
- __entry->rq_wait_pct, __entry->nr_lagging, __entry->nr_shortages,
- __entry->nr_surpluses
+ __entry->rq_wait_pct, __entry->nr_lagging, __entry->nr_shortages
+ )
+);
+
+TRACE_EVENT(iocost_iocg_forgive_debt,
+
+ TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
+ u32 usage_pct, u64 old_debt, u64 new_debt,
+ u64 old_delay, u64 new_delay),
+
+ TP_ARGS(iocg, path, now, usage_pct,
+ old_debt, new_debt, old_delay, new_delay),
+
+ TP_STRUCT__entry (
+ __string(devname, ioc_name(iocg->ioc))
+ __string(cgroup, path)
+ __field(u64, now)
+ __field(u64, vnow)
+ __field(u32, usage_pct)
+ __field(u64, old_debt)
+ __field(u64, new_debt)
+ __field(u64, old_delay)
+ __field(u64, new_delay)
+ ),
+
+ TP_fast_assign(
+ __assign_str(devname, ioc_name(iocg->ioc));
+ __assign_str(cgroup, path);
+ __entry->now = now->now;
+ __entry->vnow = now->vnow;
+ __entry->usage_pct = usage_pct;
+ __entry->old_debt = old_debt;
+ __entry->new_debt = new_debt;
+ __entry->old_delay = old_delay;
+ __entry->new_delay = new_delay;
+ ),
+
+ TP_printk("[%s:%s] now=%llu:%llu usage=%u debt=%llu->%llu delay=%llu->%llu",
+ __get_str(devname), __get_str(cgroup),
+ __entry->now, __entry->vnow, __entry->usage_pct,
+ __entry->old_debt, __entry->new_debt,
+ __entry->old_delay, __entry->new_delay
)
);
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 72b4582322ff..29096fe12623 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -101,8 +101,9 @@ TRACE_EVENT(map,
__entry->size = size;
),
- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
- __entry->iova, __entry->paddr, __entry->size
+ TP_printk("IOMMU: iova=0x%016llx - 0x%016llx paddr=0x%016llx size=%zu",
+ __entry->iova, __entry->iova + __entry->size, __entry->paddr,
+ __entry->size
)
);
@@ -124,8 +125,9 @@ TRACE_EVENT(unmap,
__entry->unmapped_size = unmapped_size;
),
- TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
- __entry->iova, __entry->size, __entry->unmapped_size
+ TP_printk("IOMMU: iova=0x%016llx - 0x%016llx size=%zu unmapped_size=%zu",
+ __entry->iova, __entry->iova + __entry->size,
+ __entry->size, __entry->unmapped_size
)
);
diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h
index 87408faf6e4e..8ff2a3ca5d75 100644
--- a/include/trace/events/iscsi.h
+++ b/include/trace/events/iscsi.h
@@ -26,12 +26,12 @@ DECLARE_EVENT_CLASS(iscsi_log_msg,
TP_STRUCT__entry(
__string(dname, dev_name(dev) )
- __dynamic_array(char, msg, ISCSI_MSG_MAX )
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(dname, dev_name(dev));
- vsnprintf(__get_str(msg), ISCSI_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s",__get_str(dname), __get_str(msg)
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index d16a32867f3a..99f783c384bb 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -355,22 +355,22 @@ TRACE_EVENT(jbd2_update_log_tail,
TRACE_EVENT(jbd2_write_superblock,
- TP_PROTO(journal_t *journal, int write_op),
+ TP_PROTO(journal_t *journal, blk_opf_t write_flags),
- TP_ARGS(journal, write_op),
+ TP_ARGS(journal, write_flags),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( int, write_op )
+ __field( blk_opf_t, write_flags )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->write_op = write_op;
+ __entry->write_flags = write_flags;
),
- TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
- MINOR(__entry->dev), __entry->write_op)
+ TP_printk("dev %d,%d write_flags %x", MAJOR(__entry->dev),
+ MINOR(__entry->dev), (__force u32)__entry->write_flags)
);
TRACE_EVENT(jbd2_lock_buffer_stall,
@@ -394,6 +394,107 @@ TRACE_EVENT(jbd2_lock_buffer_stall,
__entry->stall_ms)
);
+DECLARE_EVENT_CLASS(jbd2_journal_shrink,
+
+ TP_PROTO(journal_t *journal, unsigned long nr_to_scan,
+ unsigned long count),
+
+ TP_ARGS(journal, nr_to_scan, count),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long, nr_to_scan)
+ __field(unsigned long, count)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->count = count;
+ ),
+
+ TP_printk("dev %d,%d nr_to_scan %lu count %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->nr_to_scan, __entry->count)
+);
+
+DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_count,
+
+ TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count),
+
+ TP_ARGS(journal, nr_to_scan, count)
+);
+
+DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_scan_enter,
+
+ TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count),
+
+ TP_ARGS(journal, nr_to_scan, count)
+);
+
+TRACE_EVENT(jbd2_shrink_scan_exit,
+
+ TP_PROTO(journal_t *journal, unsigned long nr_to_scan,
+ unsigned long nr_shrunk, unsigned long count),
+
+ TP_ARGS(journal, nr_to_scan, nr_shrunk, count),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long, nr_to_scan)
+ __field(unsigned long, nr_shrunk)
+ __field(unsigned long, count)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->nr_shrunk = nr_shrunk;
+ __entry->count = count;
+ ),
+
+ TP_printk("dev %d,%d nr_to_scan %lu nr_shrunk %lu count %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->nr_to_scan, __entry->nr_shrunk,
+ __entry->count)
+);
+
+TRACE_EVENT(jbd2_shrink_checkpoint_list,
+
+ TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid,
+ unsigned long nr_freed, unsigned long nr_scanned,
+ tid_t next_tid),
+
+ TP_ARGS(journal, first_tid, tid, last_tid, nr_freed,
+ nr_scanned, next_tid),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, first_tid)
+ __field(tid_t, tid)
+ __field(tid_t, last_tid)
+ __field(unsigned long, nr_freed)
+ __field(unsigned long, nr_scanned)
+ __field(tid_t, next_tid)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->first_tid = first_tid;
+ __entry->tid = tid;
+ __entry->last_tid = last_tid;
+ __entry->nr_freed = nr_freed;
+ __entry->nr_scanned = nr_scanned;
+ __entry->next_tid = next_tid;
+ ),
+
+ TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
+ "scanned %lu next transaction %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->first_tid, __entry->tid, __entry->last_tid,
+ __entry->nr_freed, __entry->nr_scanned, __entry->next_tid)
+);
+
#endif /* _TRACE_JBD2_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f65b1f6db22d..243073cfc29d 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -9,57 +9,49 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
-DECLARE_EVENT_CLASS(kmem_alloc,
+TRACE_EVENT(kmem_cache_alloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags),
+ struct kmem_cache *s,
+ gfp_t gfp_flags,
+ int node),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
+ TP_ARGS(call_site, ptr, s, gfp_flags, node),
TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
+ __field( int, node )
+ __field( bool, accounted )
),
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
- __entry->bytes_req = bytes_req;
- __entry->bytes_alloc = bytes_alloc;
- __entry->gfp_flags = gfp_flags;
+ __entry->bytes_req = s->object_size;
+ __entry->bytes_alloc = s->size;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
+ __entry->node = node;
+ __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
+ ((gfp_flags & __GFP_ACCOUNT) ||
+ (s->flags & SLAB_ACCOUNT)) : false;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
+ TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
- show_gfp_flags(__entry->gfp_flags))
-);
-
-DEFINE_EVENT(kmem_alloc, kmalloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
-);
-
-DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->node,
+ __entry->accounted ? "true" : "false")
);
-DECLARE_EVENT_CLASS(kmem_alloc_node,
+TRACE_EVENT(kmalloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
@@ -75,7 +67,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
__field( int, node )
),
@@ -84,38 +76,22 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->ptr = ptr;
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+ TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
- __entry->node)
-);
-
-DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc,
- gfp_t gfp_flags, int node),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
-);
-
-DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc,
- gfp_t gfp_flags, int node),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
+ __entry->node,
+ (IS_ENABLED(CONFIG_MEMCG_KMEM) &&
+ (__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
);
-DECLARE_EVENT_CLASS(kmem_free,
+TRACE_EVENT(kfree,
TP_PROTO(unsigned long call_site, const void *ptr),
@@ -135,18 +111,26 @@ DECLARE_EVENT_CLASS(kmem_free,
(void *)__entry->call_site, __entry->ptr)
);
-DEFINE_EVENT(kmem_free, kfree,
+TRACE_EVENT(kmem_cache_free,
- TP_PROTO(unsigned long call_site, const void *ptr),
+ TP_PROTO(unsigned long call_site, const void *ptr, const struct kmem_cache *s),
- TP_ARGS(call_site, ptr)
-);
+ TP_ARGS(call_site, ptr, s),
-DEFINE_EVENT(kmem_free, kmem_cache_free,
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __string( name, s->name )
+ ),
- TP_PROTO(unsigned long call_site, const void *ptr),
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __assign_str(name, s->name);
+ ),
- TP_ARGS(call_site, ptr)
+ TP_printk("call_site=%pS ptr=%p name=%s",
+ (void *)__entry->call_site, __entry->ptr, __get_str(name))
);
TRACE_EVENT(mm_page_free,
@@ -165,7 +149,7 @@ TRACE_EVENT(mm_page_free,
__entry->order = order;
),
- TP_printk("page=%p pfn=%lu order=%d",
+ TP_printk("page=%p pfn=0x%lx order=%d",
pfn_to_page(__entry->pfn),
__entry->pfn,
__entry->order)
@@ -185,7 +169,7 @@ TRACE_EVENT(mm_page_free_batched,
__entry->pfn = page_to_pfn(page);
),
- TP_printk("page=%p pfn=%lu order=0",
+ TP_printk("page=%p pfn=0x%lx order=0",
pfn_to_page(__entry->pfn),
__entry->pfn)
);
@@ -200,18 +184,18 @@ TRACE_EVENT(mm_page_alloc,
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
__field( int, migratetype )
),
TP_fast_assign(
__entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->migratetype = migratetype;
),
- TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
+ TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
__entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
@@ -221,35 +205,39 @@ TRACE_EVENT(mm_page_alloc,
DECLARE_EVENT_CLASS(mm_page,
- TP_PROTO(struct page *page, unsigned int order, int migratetype),
+ TP_PROTO(struct page *page, unsigned int order, int migratetype,
+ int percpu_refill),
- TP_ARGS(page, order, migratetype),
+ TP_ARGS(page, order, migratetype, percpu_refill),
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
__field( int, migratetype )
+ __field( int, percpu_refill )
),
TP_fast_assign(
__entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
__entry->migratetype = migratetype;
+ __entry->percpu_refill = percpu_refill;
),
- TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
+ TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
__entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
__entry->migratetype,
- __entry->order == 0)
+ __entry->percpu_refill)
);
DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
- TP_PROTO(struct page *page, unsigned int order, int migratetype),
+ TP_PROTO(struct page *page, unsigned int order, int migratetype,
+ int percpu_refill),
- TP_ARGS(page, order, migratetype)
+ TP_ARGS(page, order, migratetype, percpu_refill)
);
TRACE_EVENT(mm_page_pcpu_drain,
@@ -270,7 +258,7 @@ TRACE_EVENT(mm_page_pcpu_drain,
__entry->migratetype = migratetype;
),
- TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
+ TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
pfn_to_page(__entry->pfn), __entry->pfn,
__entry->order, __entry->migratetype)
);
@@ -304,7 +292,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
get_pageblock_migratetype(page));
),
- TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
+ TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
pfn_to_page(__entry->pfn),
__entry->pfn,
__entry->alloc_order,
@@ -335,6 +323,26 @@ static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
#define __PTR_TO_HASHVAL
#endif
+#define TRACE_MM_PAGES \
+ EM(MM_FILEPAGES) \
+ EM(MM_ANONPAGES) \
+ EM(MM_SWAPENTS) \
+ EMe(MM_SHMEMPAGES)
+
+#undef EM
+#undef EMe
+
+#define EM(a) TRACE_DEFINE_ENUM(a);
+#define EMe(a) TRACE_DEFINE_ENUM(a);
+
+TRACE_MM_PAGES
+
+#undef EM
+#undef EMe
+
+#define EM(a) { a, #a },
+#define EMe(a) { a, #a }
+
TRACE_EVENT(rss_stat,
TP_PROTO(struct mm_struct *mm,
@@ -357,10 +365,10 @@ TRACE_EVENT(rss_stat,
__entry->size = (count << PAGE_SHIFT);
),
- TP_printk("mm_id=%u curr=%d member=%d size=%ldB",
+ TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
__entry->mm_id,
__entry->curr,
- __entry->member,
+ __print_symbolic(__entry->member, TRACE_MM_PAGES),
__entry->size)
);
#endif /* _TRACE_KMEM_H */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 2c735a3e6613..3bd31ea23fee 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -17,7 +17,7 @@
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
- ERSN(HYPERV)
+ ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
@@ -255,30 +255,6 @@ TRACE_EVENT(kvm_fpu,
TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
);
-TRACE_EVENT(kvm_age_page,
- TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
- TP_ARGS(gfn, level, slot, ref),
-
- TP_STRUCT__entry(
- __field( u64, hva )
- __field( u64, gfn )
- __field( u8, level )
- __field( u8, referenced )
- ),
-
- TP_fast_assign(
- __entry->gfn = gfn;
- __entry->level = level;
- __entry->hva = ((gfn - slot->base_gfn) <<
- PAGE_SHIFT) + slot->userspace_addr;
- __entry->referenced = ref;
- ),
-
- TP_printk("hva %llx gfn %llx level %u %s",
- __entry->hva, __entry->gfn, __entry->level,
- __entry->referenced ? "YOUNG" : "OLD")
-);
-
#ifdef CONFIG_KVM_ASYNC_PF
DECLARE_EVENT_CLASS(kvm_async_get_page_class,
@@ -306,7 +282,7 @@ DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
TP_ARGS(gva, gfn)
);
-DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
TP_PROTO(u64 gva, u64 gfn),
@@ -399,6 +375,135 @@ TRACE_EVENT(kvm_halt_poll_ns,
#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
+TRACE_EVENT(kvm_dirty_ring_push,
+ TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
+ TP_ARGS(ring, slot, offset),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u32, dirty_index)
+ __field(u32, reset_index)
+ __field(u32, slot)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->index;
+ __entry->dirty_index = ring->dirty_index;
+ __entry->reset_index = ring->reset_index;
+ __entry->slot = slot;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("ring %d: dirty 0x%x reset 0x%x "
+ "slot %u offset 0x%llx (used %u)",
+ __entry->index, __entry->dirty_index,
+ __entry->reset_index, __entry->slot, __entry->offset,
+ __entry->dirty_index - __entry->reset_index)
+);
+
+TRACE_EVENT(kvm_dirty_ring_reset,
+ TP_PROTO(struct kvm_dirty_ring *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u32, dirty_index)
+ __field(u32, reset_index)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->index;
+ __entry->dirty_index = ring->dirty_index;
+ __entry->reset_index = ring->reset_index;
+ ),
+
+ TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
+ __entry->index, __entry->dirty_index, __entry->reset_index,
+ __entry->dirty_index - __entry->reset_index)
+);
+
+TRACE_EVENT(kvm_dirty_ring_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ ),
+
+ TP_printk("vcpu %d", __entry->vcpu_id)
+);
+
+TRACE_EVENT(kvm_unmap_hva_range,
+ TP_PROTO(unsigned long start, unsigned long end),
+ TP_ARGS(start, end),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, start )
+ __field( unsigned long, end )
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
+ __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_set_spte_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
+);
+
+TRACE_EVENT(kvm_age_hva,
+ TP_PROTO(unsigned long start, unsigned long end),
+ TP_ARGS(start, end),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, start )
+ __field( unsigned long, end )
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
+ __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_test_age_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
+);
+
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index c0e7d24ca256..bf7533f171ff 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -13,11 +13,11 @@
TRACE_EVENT(kyber_latency,
- TP_PROTO(struct request_queue *q, const char *domain, const char *type,
+ TP_PROTO(dev_t dev, const char *domain, const char *type,
unsigned int percentile, unsigned int numerator,
unsigned int denominator, unsigned int samples),
- TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
+ TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
),
TP_fast_assign(
- __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
@@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency,
TRACE_EVENT(kyber_adjust,
- TP_PROTO(struct request_queue *q, const char *domain,
- unsigned int depth),
+ TP_PROTO(dev_t dev, const char *domain, unsigned int depth),
- TP_ARGS(q, domain, depth),
+ TP_ARGS(dev, domain, depth),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -59,7 +58,7 @@ TRACE_EVENT(kyber_adjust,
),
TP_fast_assign(
- __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
@@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust,
TRACE_EVENT(kyber_throttled,
- TP_PROTO(struct request_queue *q, const char *domain),
+ TP_PROTO(dev_t dev, const char *domain),
- TP_ARGS(q, domain),
+ TP_ARGS(dev, domain),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -81,7 +80,7 @@ TRACE_EVENT(kyber_throttled,
),
TP_fast_assign(
- __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
),
diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h
index ab69434e2329..6025dd8ba4aa 100644
--- a/include/trace/events/libata.h
+++ b/include/trace/events/libata.h
@@ -132,9 +132,37 @@
ata_protocol_name(ATAPI_PROT_PIO), \
ata_protocol_name(ATAPI_PROT_DMA))
+#define ata_class_name(class) { class, #class }
+#define show_class_name(val) \
+ __print_symbolic(val, \
+ ata_class_name(ATA_DEV_UNKNOWN), \
+ ata_class_name(ATA_DEV_ATA), \
+ ata_class_name(ATA_DEV_ATA_UNSUP), \
+ ata_class_name(ATA_DEV_ATAPI), \
+ ata_class_name(ATA_DEV_ATAPI_UNSUP), \
+ ata_class_name(ATA_DEV_PMP), \
+ ata_class_name(ATA_DEV_PMP_UNSUP), \
+ ata_class_name(ATA_DEV_SEMB), \
+ ata_class_name(ATA_DEV_SEMB_UNSUP), \
+ ata_class_name(ATA_DEV_ZAC), \
+ ata_class_name(ATA_DEV_ZAC_UNSUP), \
+ ata_class_name(ATA_DEV_NONE))
+
+#define ata_sff_hsm_state_name(state) { state, #state }
+#define show_sff_hsm_state_name(val) \
+ __print_symbolic(val, \
+ ata_sff_hsm_state_name(HSM_ST_IDLE), \
+ ata_sff_hsm_state_name(HSM_ST_FIRST), \
+ ata_sff_hsm_state_name(HSM_ST), \
+ ata_sff_hsm_state_name(HSM_ST_LAST), \
+ ata_sff_hsm_state_name(HSM_ST_ERR))
+
const char *libata_trace_parse_status(struct trace_seq*, unsigned char);
#define __parse_status(s) libata_trace_parse_status(p, s)
+const char *libata_trace_parse_host_stat(struct trace_seq *, unsigned char);
+#define __parse_host_stat(s) libata_trace_parse_host_stat(p, s)
+
const char *libata_trace_parse_eh_action(struct trace_seq *, unsigned int);
#define __parse_eh_action(a) libata_trace_parse_eh_action(p, a)
@@ -144,11 +172,14 @@ const char *libata_trace_parse_eh_err_mask(struct trace_seq *, unsigned int);
const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int);
#define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f)
+const char *libata_trace_parse_tf_flags(struct trace_seq *, unsigned int);
+#define __parse_tf_flags(f) libata_trace_parse_tf_flags(p, f)
+
const char *libata_trace_parse_subcmd(struct trace_seq *, unsigned char,
unsigned char, unsigned char);
#define __parse_subcmd(c,f,h) libata_trace_parse_subcmd(p, c, f, h)
-TRACE_EVENT(ata_qc_issue,
+DECLARE_EVENT_CLASS(ata_qc_issue_template,
TP_PROTO(struct ata_queued_cmd *qc),
@@ -207,6 +238,14 @@ TRACE_EVENT(ata_qc_issue,
__entry->dev)
);
+DEFINE_EVENT(ata_qc_issue_template, ata_qc_prep,
+ TP_PROTO(struct ata_queued_cmd *qc),
+ TP_ARGS(qc));
+
+DEFINE_EVENT(ata_qc_issue_template, ata_qc_issue,
+ TP_PROTO(struct ata_queued_cmd *qc),
+ TP_ARGS(qc));
+
DECLARE_EVENT_CLASS(ata_qc_complete_template,
TP_PROTO(struct ata_queued_cmd *qc),
@@ -249,6 +288,7 @@ DECLARE_EVENT_CLASS(ata_qc_complete_template,
__entry->hob_feature = qc->result_tf.hob_feature;
__entry->nsect = qc->result_tf.nsect;
__entry->hob_nsect = qc->result_tf.hob_nsect;
+ __entry->flags = qc->flags;
),
TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \
@@ -275,6 +315,128 @@ DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_done,
TP_PROTO(struct ata_queued_cmd *qc),
TP_ARGS(qc));
+TRACE_EVENT(ata_tf_load,
+
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf),
+
+ TP_ARGS(ap, tf),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned char, cmd )
+ __field( unsigned char, dev )
+ __field( unsigned char, lbal )
+ __field( unsigned char, lbam )
+ __field( unsigned char, lbah )
+ __field( unsigned char, nsect )
+ __field( unsigned char, feature )
+ __field( unsigned char, hob_lbal )
+ __field( unsigned char, hob_lbam )
+ __field( unsigned char, hob_lbah )
+ __field( unsigned char, hob_nsect )
+ __field( unsigned char, hob_feature )
+ __field( unsigned char, proto )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->proto = tf->protocol;
+ __entry->cmd = tf->command;
+ __entry->dev = tf->device;
+ __entry->lbal = tf->lbal;
+ __entry->lbam = tf->lbam;
+ __entry->lbah = tf->lbah;
+ __entry->hob_lbal = tf->hob_lbal;
+ __entry->hob_lbam = tf->hob_lbam;
+ __entry->hob_lbah = tf->hob_lbah;
+ __entry->feature = tf->feature;
+ __entry->hob_feature = tf->hob_feature;
+ __entry->nsect = tf->nsect;
+ __entry->hob_nsect = tf->hob_nsect;
+ ),
+
+ TP_printk("ata_port=%u proto=%s cmd=%s%s " \
+ " tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)",
+ __entry->ata_port,
+ show_protocol_name(__entry->proto),
+ show_opcode_name(__entry->cmd),
+ __parse_subcmd(__entry->cmd, __entry->feature, __entry->hob_nsect),
+ __entry->cmd, __entry->feature, __entry->nsect,
+ __entry->lbal, __entry->lbam, __entry->lbah,
+ __entry->hob_feature, __entry->hob_nsect,
+ __entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah,
+ __entry->dev)
+);
+
+DECLARE_EVENT_CLASS(ata_exec_command_template,
+
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+
+ TP_ARGS(ap, tf, tag),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, tag )
+ __field( unsigned char, cmd )
+ __field( unsigned char, feature )
+ __field( unsigned char, hob_nsect )
+ __field( unsigned char, proto )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->tag = tag;
+ __entry->proto = tf->protocol;
+ __entry->cmd = tf->command;
+ __entry->feature = tf->feature;
+ __entry->hob_nsect = tf->hob_nsect;
+ ),
+
+ TP_printk("ata_port=%u tag=%d proto=%s cmd=%s%s",
+ __entry->ata_port, __entry->tag,
+ show_protocol_name(__entry->proto),
+ show_opcode_name(__entry->cmd),
+ __parse_subcmd(__entry->cmd, __entry->feature, __entry->hob_nsect))
+);
+
+DEFINE_EVENT(ata_exec_command_template, ata_exec_command,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+DEFINE_EVENT(ata_exec_command_template, ata_bmdma_setup,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+DEFINE_EVENT(ata_exec_command_template, ata_bmdma_start,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+DEFINE_EVENT(ata_exec_command_template, ata_bmdma_stop,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+TRACE_EVENT(ata_bmdma_status,
+
+ TP_PROTO(struct ata_port *ap, unsigned int host_stat),
+
+ TP_ARGS(ap, host_stat),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, tag )
+ __field( unsigned char, host_stat )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->host_stat = host_stat;
+ ),
+
+ TP_printk("ata_port=%u host_stat=%s",
+ __entry->ata_port,
+ __parse_host_stat(__entry->host_stat))
+);
+
TRACE_EVENT(ata_eh_link_autopsy,
TP_PROTO(struct ata_device *dev, unsigned int eh_action, unsigned int eh_err_mask),
@@ -329,6 +491,259 @@ TRACE_EVENT(ata_eh_link_autopsy_qc,
__parse_eh_err_mask(__entry->eh_err_mask))
);
+DECLARE_EVENT_CLASS(ata_eh_action_template,
+
+ TP_PROTO(struct ata_link *link, unsigned int devno, unsigned int eh_action),
+
+ TP_ARGS(link, devno, eh_action),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, eh_action )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = link->ap->print_id;
+ __entry->ata_dev = link->pmp + devno;
+ __entry->eh_action = eh_action;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u eh_action=%s",
+ __entry->ata_port, __entry->ata_dev,
+ __parse_eh_action(__entry->eh_action))
+);
+
+DEFINE_EVENT(ata_eh_action_template, ata_eh_about_to_do,
+ TP_PROTO(struct ata_link *link, unsigned int devno, unsigned int eh_action),
+ TP_ARGS(link, devno, eh_action));
+
+DEFINE_EVENT(ata_eh_action_template, ata_eh_done,
+ TP_PROTO(struct ata_link *link, unsigned int devno, unsigned int eh_action),
+ TP_ARGS(link, devno, eh_action));
+
+DECLARE_EVENT_CLASS(ata_link_reset_begin_template,
+
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+
+ TP_ARGS(link, class, deadline),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __array( unsigned int, class, 2 )
+ __field( unsigned long, deadline )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = link->ap->print_id;
+ memcpy(__entry->class, class, 2);
+ __entry->deadline = deadline;
+ ),
+
+ TP_printk("ata_port=%u deadline=%lu classes=[%s,%s]",
+ __entry->ata_port, __entry->deadline,
+ show_class_name(__entry->class[0]),
+ show_class_name(__entry->class[1]))
+);
+
+DEFINE_EVENT(ata_link_reset_begin_template, ata_link_hardreset_begin,
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+ TP_ARGS(link, class, deadline));
+
+DEFINE_EVENT(ata_link_reset_begin_template, ata_slave_hardreset_begin,
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+ TP_ARGS(link, class, deadline));
+
+DEFINE_EVENT(ata_link_reset_begin_template, ata_link_softreset_begin,
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+ TP_ARGS(link, class, deadline));
+
+DECLARE_EVENT_CLASS(ata_link_reset_end_template,
+
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+
+ TP_ARGS(link, class, rc),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __array( unsigned int, class, 2 )
+ __field( int, rc )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = link->ap->print_id;
+ memcpy(__entry->class, class, 2);
+ __entry->rc = rc;
+ ),
+
+ TP_printk("ata_port=%u rc=%d class=[%s,%s]",
+ __entry->ata_port, __entry->rc,
+ show_class_name(__entry->class[0]),
+ show_class_name(__entry->class[1]))
+);
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_link_hardreset_end,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_slave_hardreset_end,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_link_softreset_end,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_link_postreset,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_slave_postreset,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DECLARE_EVENT_CLASS(ata_port_eh_begin_template,
+
+ TP_PROTO(struct ata_port *ap),
+
+ TP_ARGS(ap),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ ),
+
+ TP_printk("ata_port=%u", __entry->ata_port)
+);
+
+DEFINE_EVENT(ata_port_eh_begin_template, ata_std_sched_eh,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
+DEFINE_EVENT(ata_port_eh_begin_template, ata_port_freeze,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
+DEFINE_EVENT(ata_port_eh_begin_template, ata_port_thaw,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
+DECLARE_EVENT_CLASS(ata_sff_hsm_template,
+
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char status),
+
+ TP_ARGS(qc, status),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, tag )
+ __field( unsigned int, qc_flags )
+ __field( unsigned int, protocol )
+ __field( unsigned int, hsm_state )
+ __field( unsigned char, dev_state )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = qc->ap->print_id;
+ __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
+ __entry->tag = qc->tag;
+ __entry->qc_flags = qc->flags;
+ __entry->protocol = qc->tf.protocol;
+ __entry->hsm_state = qc->ap->hsm_task_state;
+ __entry->dev_state = status;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s flags=%s task_state=%s dev_stat=0x%X",
+ __entry->ata_port, __entry->ata_dev, __entry->tag,
+ show_protocol_name(__entry->protocol),
+ __parse_qc_flags(__entry->qc_flags),
+ show_sff_hsm_state_name(__entry->hsm_state),
+ __entry->dev_state)
+);
+
+DEFINE_EVENT(ata_sff_hsm_template, ata_sff_hsm_state,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char state),
+ TP_ARGS(qc, state));
+
+DEFINE_EVENT(ata_sff_hsm_template, ata_sff_hsm_command_complete,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char state),
+ TP_ARGS(qc, state));
+
+DEFINE_EVENT(ata_sff_hsm_template, ata_sff_port_intr,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char state),
+ TP_ARGS(qc, state));
+
+DECLARE_EVENT_CLASS(ata_transfer_data_template,
+
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+
+ TP_ARGS(qc, offset, count),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, tag )
+ __field( unsigned int, flags )
+ __field( unsigned int, offset )
+ __field( unsigned int, bytes )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = qc->ap->print_id;
+ __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
+ __entry->tag = qc->tag;
+ __entry->flags = qc->tf.flags;
+ __entry->offset = offset;
+ __entry->bytes = count;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s offset=%u bytes=%u",
+ __entry->ata_port, __entry->ata_dev, __entry->tag,
+ __parse_tf_flags(__entry->flags),
+ __entry->offset, __entry->bytes)
+);
+
+DEFINE_EVENT(ata_transfer_data_template, ata_sff_pio_transfer_data,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+ TP_ARGS(qc, offset, count));
+
+DEFINE_EVENT(ata_transfer_data_template, atapi_pio_transfer_data,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+ TP_ARGS(qc, offset, count));
+
+DEFINE_EVENT(ata_transfer_data_template, atapi_send_cdb,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+ TP_ARGS(qc, offset, count));
+
+DECLARE_EVENT_CLASS(ata_sff_template,
+
+ TP_PROTO(struct ata_port *ap),
+
+ TP_ARGS(ap),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned char, hsm_state )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->hsm_state = ap->hsm_task_state;
+ ),
+
+ TP_printk("ata_port=%u task_state=%s",
+ __entry->ata_port,
+ show_sff_hsm_state_name(__entry->hsm_state))
+);
+
+DEFINE_EVENT(ata_sff_template, ata_sff_flush_pio_task,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
#endif /* _TRACE_LIBATA_H */
/* This part must be outside protection */
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index d7512129a324..9ebd081e057e 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -5,11 +5,22 @@
#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_LOCK_H
-#include <linux/lockdep.h>
+#include <linux/sched.h>
#include <linux/tracepoint.h>
+/* flags for lock:contention_begin */
+#define LCB_F_SPIN (1U << 0)
+#define LCB_F_READ (1U << 1)
+#define LCB_F_WRITE (1U << 2)
+#define LCB_F_RT (1U << 3)
+#define LCB_F_PERCPU (1U << 4)
+#define LCB_F_MUTEX (1U << 5)
+
+
#ifdef CONFIG_LOCKDEP
+#include <linux/lockdep.h>
+
TRACE_EVENT(lock_acquire,
TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
@@ -78,8 +89,54 @@ DEFINE_EVENT(lock, lock_acquired,
TP_ARGS(lock, ip)
);
-#endif
-#endif
+#endif /* CONFIG_LOCK_STAT */
+#endif /* CONFIG_LOCKDEP */
+
+TRACE_EVENT(contention_begin,
+
+ TP_PROTO(void *lock, unsigned int flags),
+
+ TP_ARGS(lock, flags),
+
+ TP_STRUCT__entry(
+ __field(void *, lock_addr)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->lock_addr = lock;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("%p (flags=%s)", __entry->lock_addr,
+ __print_flags(__entry->flags, "|",
+ { LCB_F_SPIN, "SPIN" },
+ { LCB_F_READ, "READ" },
+ { LCB_F_WRITE, "WRITE" },
+ { LCB_F_RT, "RT" },
+ { LCB_F_PERCPU, "PERCPU" },
+ { LCB_F_MUTEX, "MUTEX" }
+ ))
+);
+
+TRACE_EVENT(contention_end,
+
+ TP_PROTO(void *lock, int ret),
+
+ TP_ARGS(lock, ret),
+
+ TP_STRUCT__entry(
+ __field(void *, lock_addr)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->lock_addr = lock;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%p (ret=%d)", __entry->lock_addr, __entry->ret)
+);
#endif /* _TRACE_LOCK_H */
diff --git a/include/trace/events/maple_tree.h b/include/trace/events/maple_tree.h
new file mode 100644
index 000000000000..2be403bdc2bd
--- /dev/null
+++ b/include/trace/events/maple_tree.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM maple_tree
+
+#if !defined(_TRACE_MM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MM_H
+
+
+#include <linux/tracepoint.h>
+
+struct ma_state;
+
+TRACE_EVENT(ma_op,
+
+ TP_PROTO(const char *fn, struct ma_state *mas),
+
+ TP_ARGS(fn, mas),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last
+ )
+)
+TRACE_EVENT(ma_read,
+
+ TP_PROTO(const char *fn, struct ma_state *mas),
+
+ TP_ARGS(fn, mas),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last
+ )
+)
+
+TRACE_EVENT(ma_write,
+
+ TP_PROTO(const char *fn, struct ma_state *mas, unsigned long piv,
+ void *val),
+
+ TP_ARGS(fn, mas, piv, val),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(unsigned long, piv)
+ __field(void *, val)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->piv = piv;
+ __entry->val = val;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode %p (%lu %lu) range:%lu-%lu piv (%lu) val %p",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last,
+ (unsigned long) __entry->piv,
+ (void *) __entry->val
+ )
+)
+#endif /* _TRACE_MM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mctp.h b/include/trace/events/mctp.h
new file mode 100644
index 000000000000..165cf25f77a7
--- /dev/null
+++ b/include/trace/events/mctp.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mctp
+
+#if !defined(_TRACE_MCTP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MCTP_H
+
+#include <linux/tracepoint.h>
+
+#ifndef __TRACE_MCTP_ENUMS
+#define __TRACE_MCTP_ENUMS
+enum {
+ MCTP_TRACE_KEY_TIMEOUT,
+ MCTP_TRACE_KEY_REPLIED,
+ MCTP_TRACE_KEY_INVALIDATED,
+ MCTP_TRACE_KEY_CLOSED,
+ MCTP_TRACE_KEY_DROPPED,
+};
+#endif /* __TRACE_MCTP_ENUMS */
+
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_TIMEOUT);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_REPLIED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_INVALIDATED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_CLOSED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_DROPPED);
+
+TRACE_EVENT(mctp_key_acquire,
+ TP_PROTO(const struct mctp_sk_key *key),
+ TP_ARGS(key),
+ TP_STRUCT__entry(
+ __field(__u8, paddr)
+ __field(__u8, laddr)
+ __field(__u8, tag)
+ ),
+ TP_fast_assign(
+ __entry->paddr = key->peer_addr;
+ __entry->laddr = key->local_addr;
+ __entry->tag = key->tag;
+ ),
+ TP_printk("local %d, peer %d, tag %1x",
+ __entry->laddr,
+ __entry->paddr,
+ __entry->tag
+ )
+);
+
+TRACE_EVENT(mctp_key_release,
+ TP_PROTO(const struct mctp_sk_key *key, int reason),
+ TP_ARGS(key, reason),
+ TP_STRUCT__entry(
+ __field(__u8, paddr)
+ __field(__u8, laddr)
+ __field(__u8, tag)
+ __field(int, reason)
+ ),
+ TP_fast_assign(
+ __entry->paddr = key->peer_addr;
+ __entry->laddr = key->local_addr;
+ __entry->tag = key->tag;
+ __entry->reason = reason;
+ ),
+ TP_printk("local %d, peer %d, tag %1x %s",
+ __entry->laddr,
+ __entry->paddr,
+ __entry->tag,
+ __print_symbolic(__entry->reason,
+ { MCTP_TRACE_KEY_TIMEOUT, "timeout" },
+ { MCTP_TRACE_KEY_REPLIED, "replied" },
+ { MCTP_TRACE_KEY_INVALIDATED, "invalidated" },
+ { MCTP_TRACE_KEY_CLOSED, "closed" },
+ { MCTP_TRACE_KEY_DROPPED, "dropped" })
+ )
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 705b33d1e395..061b5128f335 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -20,7 +20,9 @@
EM( MR_SYSCALL, "syscall_or_cpuset") \
EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
EM( MR_NUMA_MISPLACED, "numa_misplaced") \
- EMe(MR_CONTIG_RANGE, "contig_range")
+ EM( MR_CONTIG_RANGE, "contig_range") \
+ EM( MR_LONGTERM_PIN, "longterm_pin") \
+ EMe(MR_DEMOTION, "demotion")
/*
* First define the enums in the above macros to be exported to userspace
@@ -46,13 +48,18 @@ MIGRATE_REASON
TRACE_EVENT(mm_migrate_pages,
TP_PROTO(unsigned long succeeded, unsigned long failed,
- enum migrate_mode mode, int reason),
+ unsigned long thp_succeeded, unsigned long thp_failed,
+ unsigned long thp_split, enum migrate_mode mode, int reason),
- TP_ARGS(succeeded, failed, mode, reason),
+ TP_ARGS(succeeded, failed, thp_succeeded, thp_failed,
+ thp_split, mode, reason),
TP_STRUCT__entry(
__field( unsigned long, succeeded)
__field( unsigned long, failed)
+ __field( unsigned long, thp_succeeded)
+ __field( unsigned long, thp_failed)
+ __field( unsigned long, thp_split)
__field( enum migrate_mode, mode)
__field( int, reason)
),
@@ -60,16 +67,75 @@ TRACE_EVENT(mm_migrate_pages,
TP_fast_assign(
__entry->succeeded = succeeded;
__entry->failed = failed;
+ __entry->thp_succeeded = thp_succeeded;
+ __entry->thp_failed = thp_failed;
+ __entry->thp_split = thp_split;
__entry->mode = mode;
__entry->reason = reason;
),
- TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s",
+ TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s",
__entry->succeeded,
__entry->failed,
+ __entry->thp_succeeded,
+ __entry->thp_failed,
+ __entry->thp_split,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+ TP_PROTO(enum migrate_mode mode, int reason),
+
+ TP_ARGS(mode, reason),
+
+ TP_STRUCT__entry(
+ __field(enum migrate_mode, mode)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->mode = mode;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("mode=%s reason=%s",
+ __print_symbolic(__entry->mode, MIGRATE_MODE),
+ __print_symbolic(__entry->reason, MIGRATE_REASON))
+);
+
+DECLARE_EVENT_CLASS(migration_pte,
+
+ TP_PROTO(unsigned long addr, unsigned long pte, int order),
+
+ TP_ARGS(addr, pte, order),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, pte)
+ __field(int, order)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->pte = pte;
+ __entry->order = order;
+ ),
+
+ TP_printk("addr=%lx, pte=%lx order=%d", __entry->addr, __entry->pte, __entry->order)
+);
+
+DEFINE_EVENT(migration_pte, set_migration_pte,
+ TP_PROTO(unsigned long addr, unsigned long pte, int order),
+ TP_ARGS(addr, pte, order)
+);
+
+DEFINE_EVENT(migration_pte, remove_migration_pte,
+ TP_PROTO(unsigned long addr, unsigned long pte, int order),
+ TP_ARGS(addr, pte, order)
+);
+
#endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */
diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h
new file mode 100644
index 000000000000..216de5f03621
--- /dev/null
+++ b/include/trace/events/mmap.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmap
+
+#if !defined(_TRACE_MMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMAP_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vm_unmapped_area,
+
+ TP_PROTO(unsigned long addr, struct vm_unmapped_area_info *info),
+
+ TP_ARGS(addr, info),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, total_vm)
+ __field(unsigned long, flags)
+ __field(unsigned long, length)
+ __field(unsigned long, low_limit)
+ __field(unsigned long, high_limit)
+ __field(unsigned long, align_mask)
+ __field(unsigned long, align_offset)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->total_vm = current->mm->total_vm;
+ __entry->flags = info->flags;
+ __entry->length = info->length;
+ __entry->low_limit = info->low_limit;
+ __entry->high_limit = info->high_limit;
+ __entry->align_mask = info->align_mask;
+ __entry->align_offset = info->align_offset;
+ ),
+
+ TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n",
+ IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr,
+ IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0,
+ __entry->total_vm, __entry->flags, __entry->length,
+ __entry->low_limit, __entry->high_limit, __entry->align_mask,
+ __entry->align_offset)
+);
+
+TRACE_EVENT(vma_mas_szero,
+ TP_PROTO(struct maple_tree *mt, unsigned long start,
+ unsigned long end),
+
+ TP_ARGS(mt, start, end),
+
+ TP_STRUCT__entry(
+ __field(struct maple_tree *, mt)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ ),
+
+ TP_fast_assign(
+ __entry->mt = mt;
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,",
+ __entry->mt,
+ (unsigned long) __entry->start,
+ (unsigned long) __entry->end
+ )
+);
+
+TRACE_EVENT(vma_store,
+ TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma),
+
+ TP_ARGS(mt, vma),
+
+ TP_STRUCT__entry(
+ __field(struct maple_tree *, mt)
+ __field(struct vm_area_struct *, vma)
+ __field(unsigned long, vm_start)
+ __field(unsigned long, vm_end)
+ ),
+
+ TP_fast_assign(
+ __entry->mt = mt;
+ __entry->vma = vma;
+ __entry->vm_start = vma->vm_start;
+ __entry->vm_end = vma->vm_end - 1;
+ ),
+
+ TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,",
+ __entry->mt, __entry->vma,
+ (unsigned long) __entry->vm_start,
+ (unsigned long) __entry->vm_end
+ )
+);
+
+
+TRACE_EVENT(exit_mmap,
+ TP_PROTO(struct mm_struct *mm),
+
+ TP_ARGS(mm),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(struct maple_tree *, mt)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->mt = &mm->mm_mt;
+ ),
+
+ TP_printk("mt_mod %p, DESTROY\n",
+ __entry->mt
+ )
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h
new file mode 100644
index 000000000000..14db8044c1ff
--- /dev/null
+++ b/include/trace/events/mmap_lock.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmap_lock
+
+#if !defined(_TRACE_MMAP_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMAP_LOCK_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+struct mm_struct;
+
+extern int trace_mmap_lock_reg(void);
+extern void trace_mmap_lock_unreg(void);
+
+DECLARE_EVENT_CLASS(mmap_lock,
+
+ TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write),
+
+ TP_ARGS(mm, memcg_path, write),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __string(memcg_path, memcg_path)
+ __field(bool, write)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __assign_str(memcg_path, memcg_path);
+ __entry->write = write;
+ ),
+
+ TP_printk(
+ "mm=%p memcg_path=%s write=%s",
+ __entry->mm,
+ __get_str(memcg_path),
+ __entry->write ? "true" : "false"
+ )
+);
+
+#define DEFINE_MMAP_LOCK_EVENT(name) \
+ DEFINE_EVENT_FN(mmap_lock, name, \
+ TP_PROTO(struct mm_struct *mm, const char *memcg_path, \
+ bool write), \
+ TP_ARGS(mm, memcg_path, write), \
+ trace_mmap_lock_reg, trace_mmap_lock_unreg)
+
+DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking);
+DEFINE_MMAP_LOCK_EVENT(mmap_lock_released);
+
+TRACE_EVENT_FN(mmap_lock_acquire_returned,
+
+ TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write,
+ bool success),
+
+ TP_ARGS(mm, memcg_path, write, success),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __string(memcg_path, memcg_path)
+ __field(bool, write)
+ __field(bool, success)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __assign_str(memcg_path, memcg_path);
+ __entry->write = write;
+ __entry->success = success;
+ ),
+
+ TP_printk(
+ "mm=%p memcg_path=%s write=%s success=%s",
+ __entry->mm,
+ __get_str(memcg_path),
+ __entry->write ? "true" : "false",
+ __entry->success ? "true" : "false"
+ ),
+
+ trace_mmap_lock_reg, trace_mmap_lock_unreg
+);
+
+#endif /* _TRACE_MMAP_LOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index a1675d43777e..e87cb2b80ed3 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -13,46 +13,58 @@
* Thus most bits set go first.
*/
-#define __def_gfpflag_names \
- {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
- {(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
- {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
- {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
- {(unsigned long)GFP_USER, "GFP_USER"}, \
- {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
- {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
- {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
- {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
- {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
- {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
- {(unsigned long)GFP_DMA, "GFP_DMA"}, \
- {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
- {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
- {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
- {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
- {(unsigned long)__GFP_IO, "__GFP_IO"}, \
- {(unsigned long)__GFP_FS, "__GFP_FS"}, \
- {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
- {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
- {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
- {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
- {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
- {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
- {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
- {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
- {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
- {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
- {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
- {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
- {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
- {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
- {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
- {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
- {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
+#define gfpflag_string(flag) {(__force unsigned long)flag, #flag}
+
+#define __def_gfpflag_names \
+ gfpflag_string(GFP_TRANSHUGE), \
+ gfpflag_string(GFP_TRANSHUGE_LIGHT), \
+ gfpflag_string(GFP_HIGHUSER_MOVABLE), \
+ gfpflag_string(GFP_HIGHUSER), \
+ gfpflag_string(GFP_USER), \
+ gfpflag_string(GFP_KERNEL_ACCOUNT), \
+ gfpflag_string(GFP_KERNEL), \
+ gfpflag_string(GFP_NOFS), \
+ gfpflag_string(GFP_ATOMIC), \
+ gfpflag_string(GFP_NOIO), \
+ gfpflag_string(GFP_NOWAIT), \
+ gfpflag_string(GFP_DMA), \
+ gfpflag_string(__GFP_HIGHMEM), \
+ gfpflag_string(GFP_DMA32), \
+ gfpflag_string(__GFP_HIGH), \
+ gfpflag_string(__GFP_ATOMIC), \
+ gfpflag_string(__GFP_IO), \
+ gfpflag_string(__GFP_FS), \
+ gfpflag_string(__GFP_NOWARN), \
+ gfpflag_string(__GFP_RETRY_MAYFAIL), \
+ gfpflag_string(__GFP_NOFAIL), \
+ gfpflag_string(__GFP_NORETRY), \
+ gfpflag_string(__GFP_COMP), \
+ gfpflag_string(__GFP_ZERO), \
+ gfpflag_string(__GFP_NOMEMALLOC), \
+ gfpflag_string(__GFP_MEMALLOC), \
+ gfpflag_string(__GFP_HARDWALL), \
+ gfpflag_string(__GFP_THISNODE), \
+ gfpflag_string(__GFP_RECLAIMABLE), \
+ gfpflag_string(__GFP_MOVABLE), \
+ gfpflag_string(__GFP_ACCOUNT), \
+ gfpflag_string(__GFP_WRITE), \
+ gfpflag_string(__GFP_RECLAIM), \
+ gfpflag_string(__GFP_DIRECT_RECLAIM), \
+ gfpflag_string(__GFP_KSWAPD_RECLAIM), \
+ gfpflag_string(__GFP_ZEROTAGS)
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define __def_gfpflag_names_kasan , \
+ gfpflag_string(__GFP_SKIP_ZERO), \
+ gfpflag_string(__GFP_SKIP_KASAN_POISON), \
+ gfpflag_string(__GFP_SKIP_KASAN_UNPOISON)
+#else
+#define __def_gfpflag_names_kasan
+#endif
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
- __def_gfpflag_names \
+ __def_gfpflag_names __def_gfpflag_names_kasan \
) : "none"
#ifdef CONFIG_MMU
@@ -73,12 +85,24 @@
#define IF_HAVE_PG_HWPOISON(flag,string)
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
#else
#define IF_HAVE_PG_IDLE(flag,string)
#endif
+#ifdef CONFIG_64BIT
+#define IF_HAVE_PG_ARCH_2(flag,string) ,{1UL << flag, string}
+#else
+#define IF_HAVE_PG_ARCH_2(flag,string)
+#endif
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string}
+#else
+#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string)
+#endif
+
#define __def_pageflag_names \
{1UL << PG_locked, "locked" }, \
{1UL << PG_waiters, "waiters" }, \
@@ -105,7 +129,9 @@ IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
IF_HAVE_PG_IDLE(PG_young, "young" ) \
-IF_HAVE_PG_IDLE(PG_idle, "idle" )
+IF_HAVE_PG_IDLE(PG_idle, "idle" ) \
+IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \
+IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
#define show_page_flags(flags) \
(flags) ? __print_flags(flags, "|", \
@@ -130,6 +156,12 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
#define IF_HAVE_VM_SOFTDIRTY(flag,name)
#endif
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
+# define IF_HAVE_UFFD_MINOR(flag, name) {flag, name},
+#else
+# define IF_HAVE_UFFD_MINOR(flag, name)
+#endif
+
#define __def_vmaflag_names \
{VM_READ, "read" }, \
{VM_WRITE, "write" }, \
@@ -141,8 +173,8 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
{VM_MAYSHARE, "mayshare" }, \
{VM_GROWSDOWN, "growsdown" }, \
{VM_UFFD_MISSING, "uffd_missing" }, \
+IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \
{VM_PFNMAP, "pfnmap" }, \
- {VM_DENYWRITE, "denywrite" }, \
{VM_UFFD_WP, "uffd_wp" }, \
{VM_LOCKED, "locked" }, \
{VM_IO, "io" }, \
@@ -154,6 +186,7 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
{VM_ACCOUNT, "account" }, \
{VM_NORESERVE, "noreserve" }, \
{VM_HUGETLB, "hugetlb" }, \
+ {VM_SYNC, "sync" }, \
__VM_ARCH_SPECIFIC_1 , \
{VM_WIPEONFORK, "wipeonfork" }, \
{VM_DONTDUMP, "dontdump" }, \
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
new file mode 100644
index 000000000000..563e48617374
--- /dev/null
+++ b/include/trace/events/mptcp.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mptcp
+
+#if !defined(_TRACE_MPTCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MPTCP_H
+
+#include <linux/tracepoint.h>
+
+#define show_mapping_status(status) \
+ __print_symbolic(status, \
+ { 0, "MAPPING_OK" }, \
+ { 1, "MAPPING_INVALID" }, \
+ { 2, "MAPPING_EMPTY" }, \
+ { 3, "MAPPING_DATA_FIN" }, \
+ { 4, "MAPPING_DUMMY" })
+
+TRACE_EVENT(mptcp_subflow_get_send,
+
+ TP_PROTO(struct mptcp_subflow_context *subflow),
+
+ TP_ARGS(subflow),
+
+ TP_STRUCT__entry(
+ __field(bool, active)
+ __field(bool, free)
+ __field(u32, snd_wnd)
+ __field(u32, pace)
+ __field(u8, backup)
+ __field(u64, ratio)
+ ),
+
+ TP_fast_assign(
+ struct sock *ssk;
+
+ __entry->active = mptcp_subflow_active(subflow);
+ __entry->backup = subflow->backup;
+
+ if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
+ __entry->free = sk_stream_memory_free(subflow->tcp_sock);
+ else
+ __entry->free = 0;
+
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ if (ssk && sk_fullsock(ssk)) {
+ __entry->snd_wnd = tcp_sk(ssk)->snd_wnd;
+ __entry->pace = ssk->sk_pacing_rate;
+ } else {
+ __entry->snd_wnd = 0;
+ __entry->pace = 0;
+ }
+
+ if (ssk && sk_fullsock(ssk) && __entry->pace)
+ __entry->ratio = div_u64((u64)ssk->sk_wmem_queued << 32, __entry->pace);
+ else
+ __entry->ratio = 0;
+ ),
+
+ TP_printk("active=%d free=%d snd_wnd=%u pace=%u backup=%u ratio=%llu",
+ __entry->active, __entry->free,
+ __entry->snd_wnd, __entry->pace,
+ __entry->backup, __entry->ratio)
+);
+
+DECLARE_EVENT_CLASS(mptcp_dump_mpext,
+
+ TP_PROTO(struct mptcp_ext *mpext),
+
+ TP_ARGS(mpext),
+
+ TP_STRUCT__entry(
+ __field(u64, data_ack)
+ __field(u64, data_seq)
+ __field(u32, subflow_seq)
+ __field(u16, data_len)
+ __field(u16, csum)
+ __field(u8, use_map)
+ __field(u8, dsn64)
+ __field(u8, data_fin)
+ __field(u8, use_ack)
+ __field(u8, ack64)
+ __field(u8, mpc_map)
+ __field(u8, frozen)
+ __field(u8, reset_transient)
+ __field(u8, reset_reason)
+ __field(u8, csum_reqd)
+ __field(u8, infinite_map)
+ ),
+
+ TP_fast_assign(
+ __entry->data_ack = mpext->ack64 ? mpext->data_ack : mpext->data_ack32;
+ __entry->data_seq = mpext->data_seq;
+ __entry->subflow_seq = mpext->subflow_seq;
+ __entry->data_len = mpext->data_len;
+ __entry->csum = (__force u16)mpext->csum;
+ __entry->use_map = mpext->use_map;
+ __entry->dsn64 = mpext->dsn64;
+ __entry->data_fin = mpext->data_fin;
+ __entry->use_ack = mpext->use_ack;
+ __entry->ack64 = mpext->ack64;
+ __entry->mpc_map = mpext->mpc_map;
+ __entry->frozen = mpext->frozen;
+ __entry->reset_transient = mpext->reset_transient;
+ __entry->reset_reason = mpext->reset_reason;
+ __entry->csum_reqd = mpext->csum_reqd;
+ __entry->infinite_map = mpext->infinite_map;
+ ),
+
+ TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u infinite_map=%u",
+ __entry->data_ack, __entry->data_seq,
+ __entry->subflow_seq, __entry->data_len,
+ __entry->csum, __entry->use_map,
+ __entry->dsn64, __entry->data_fin,
+ __entry->use_ack, __entry->ack64,
+ __entry->mpc_map, __entry->frozen,
+ __entry->reset_transient, __entry->reset_reason,
+ __entry->csum_reqd, __entry->infinite_map)
+);
+
+DEFINE_EVENT(mptcp_dump_mpext, mptcp_sendmsg_frag,
+ TP_PROTO(struct mptcp_ext *mpext),
+ TP_ARGS(mpext));
+
+DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status,
+ TP_PROTO(struct mptcp_ext *mpext),
+ TP_ARGS(mpext));
+
+TRACE_EVENT(ack_update_msk,
+
+ TP_PROTO(u64 data_ack, u64 old_snd_una,
+ u64 new_snd_una, u64 new_wnd_end,
+ u64 msk_wnd_end),
+
+ TP_ARGS(data_ack, old_snd_una,
+ new_snd_una, new_wnd_end,
+ msk_wnd_end),
+
+ TP_STRUCT__entry(
+ __field(u64, data_ack)
+ __field(u64, old_snd_una)
+ __field(u64, new_snd_una)
+ __field(u64, new_wnd_end)
+ __field(u64, msk_wnd_end)
+ ),
+
+ TP_fast_assign(
+ __entry->data_ack = data_ack;
+ __entry->old_snd_una = old_snd_una;
+ __entry->new_snd_una = new_snd_una;
+ __entry->new_wnd_end = new_wnd_end;
+ __entry->msk_wnd_end = msk_wnd_end;
+ ),
+
+ TP_printk("data_ack=%llu old_snd_una=%llu new_snd_una=%llu new_wnd_end=%llu msk_wnd_end=%llu",
+ __entry->data_ack, __entry->old_snd_una,
+ __entry->new_snd_una, __entry->new_wnd_end,
+ __entry->msk_wnd_end)
+);
+
+TRACE_EVENT(subflow_check_data_avail,
+
+ TP_PROTO(__u8 status, struct sk_buff *skb),
+
+ TP_ARGS(status, skb),
+
+ TP_STRUCT__entry(
+ __field(u8, status)
+ __field(const void *, skb)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->skb = skb;
+ ),
+
+ TP_printk("mapping_status=%s, skb=%p",
+ show_mapping_status(__entry->status),
+ __entry->skb)
+);
+
+#endif /* _TRACE_MPTCP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
index 62bb17516713..5eaa1fa99171 100644
--- a/include/trace/events/neigh.h
+++ b/include/trace/events/neigh.h
@@ -30,7 +30,7 @@ TRACE_EVENT(neigh_create,
TP_STRUCT__entry(
__field(u32, family)
- __dynamic_array(char, dev, IFNAMSIZ )
+ __string(dev, dev ? dev->name : "NULL")
__field(int, entries)
__field(u8, created)
__field(u8, gc_exempt)
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 2399073c3afc..da611a7aaf97 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -260,13 +260,6 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
TP_ARGS(skb)
);
-DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
-
- TP_PROTO(const struct sk_buff *skb),
-
- TP_ARGS(skb)
-);
-
DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
TP_PROTO(int ret),
@@ -312,13 +305,6 @@ DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit,
TP_ARGS(ret)
);
-DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit,
-
- TP_PROTO(int ret),
-
- TP_ARGS(ret)
-);
-
DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
TP_PROTO(int ret),
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
new file mode 100644
index 000000000000..beec534cbaab
--- /dev/null
+++ b/include/trace/events/netfs.h
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Network filesystem support module tracepoints
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM netfs
+
+#if !defined(_TRACE_NETFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NETFS_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Define enums for tracing information.
+ */
+#define netfs_read_traces \
+ EM(netfs_read_trace_expanded, "EXPANDED ") \
+ EM(netfs_read_trace_readahead, "READAHEAD") \
+ EM(netfs_read_trace_readpage, "READPAGE ") \
+ E_(netfs_read_trace_write_begin, "WRITEBEGN")
+
+#define netfs_rreq_origins \
+ EM(NETFS_READAHEAD, "RA") \
+ EM(NETFS_READPAGE, "RP") \
+ E_(NETFS_READ_FOR_WRITE, "RW")
+
+#define netfs_rreq_traces \
+ EM(netfs_rreq_trace_assess, "ASSESS ") \
+ EM(netfs_rreq_trace_copy, "COPY ") \
+ EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_free, "FREE ") \
+ EM(netfs_rreq_trace_resubmit, "RESUBMT") \
+ EM(netfs_rreq_trace_unlock, "UNLOCK ") \
+ E_(netfs_rreq_trace_unmark, "UNMARK ")
+
+#define netfs_sreq_sources \
+ EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
+ EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
+ EM(NETFS_READ_FROM_CACHE, "READ") \
+ E_(NETFS_INVALID_READ, "INVL") \
+
+#define netfs_sreq_traces \
+ EM(netfs_sreq_trace_download_instead, "RDOWN") \
+ EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_prepare, "PREP ") \
+ EM(netfs_sreq_trace_resubmit_short, "SHORT") \
+ EM(netfs_sreq_trace_submit, "SUBMT") \
+ EM(netfs_sreq_trace_terminated, "TERM ") \
+ EM(netfs_sreq_trace_write, "WRITE") \
+ EM(netfs_sreq_trace_write_skip, "SKIP ") \
+ E_(netfs_sreq_trace_write_term, "WTERM")
+
+#define netfs_failures \
+ EM(netfs_fail_check_write_begin, "check-write-begin") \
+ EM(netfs_fail_copy_to_cache, "copy-to-cache") \
+ EM(netfs_fail_read, "read") \
+ EM(netfs_fail_short_read, "short-read") \
+ E_(netfs_fail_prepare_write, "prep-write")
+
+#define netfs_rreq_ref_traces \
+ EM(netfs_rreq_trace_get_hold, "GET HOLD ") \
+ EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
+ EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
+ EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
+ EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
+ EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \
+ EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
+ EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \
+ E_(netfs_rreq_trace_new, "NEW ")
+
+#define netfs_sreq_ref_traces \
+ EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \
+ EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \
+ EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
+ EM(netfs_sreq_trace_new, "NEW ") \
+ EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
+ EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
+ EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
+ EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \
+ E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
+
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
+enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
+enum netfs_failure { netfs_failures } __mode(byte);
+enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
+enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+
+#endif
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+netfs_read_traces;
+netfs_rreq_origins;
+netfs_rreq_traces;
+netfs_sreq_sources;
+netfs_sreq_traces;
+netfs_failures;
+netfs_rreq_ref_traces;
+netfs_sreq_ref_traces;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+TRACE_EVENT(netfs_read,
+ TP_PROTO(struct netfs_io_request *rreq,
+ loff_t start, size_t len,
+ enum netfs_read_trace what),
+
+ TP_ARGS(rreq, start, len, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned int, cookie )
+ __field(loff_t, start )
+ __field(size_t, len )
+ __field(enum netfs_read_trace, what )
+ __field(unsigned int, netfs_inode )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->cookie = rreq->cache_resources.debug_id;
+ __entry->start = start;
+ __entry->len = len;
+ __entry->what = what;
+ __entry->netfs_inode = rreq->inode->i_ino;
+ ),
+
+ TP_printk("R=%08x %s c=%08x ni=%x s=%llx %zx",
+ __entry->rreq,
+ __print_symbolic(__entry->what, netfs_read_traces),
+ __entry->cookie,
+ __entry->netfs_inode,
+ __entry->start, __entry->len)
+ );
+
+TRACE_EVENT(netfs_rreq,
+ TP_PROTO(struct netfs_io_request *rreq,
+ enum netfs_rreq_trace what),
+
+ TP_ARGS(rreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned int, flags )
+ __field(enum netfs_io_origin, origin )
+ __field(enum netfs_rreq_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->flags = rreq->flags;
+ __entry->origin = rreq->origin;
+ __entry->what = what;
+ ),
+
+ TP_printk("R=%08x %s %s f=%02x",
+ __entry->rreq,
+ __print_symbolic(__entry->origin, netfs_rreq_origins),
+ __print_symbolic(__entry->what, netfs_rreq_traces),
+ __entry->flags)
+ );
+
+TRACE_EVENT(netfs_sreq,
+ TP_PROTO(struct netfs_io_subrequest *sreq,
+ enum netfs_sreq_trace what),
+
+ TP_ARGS(sreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned short, index )
+ __field(short, error )
+ __field(unsigned short, flags )
+ __field(enum netfs_io_source, source )
+ __field(enum netfs_sreq_trace, what )
+ __field(size_t, len )
+ __field(size_t, transferred )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = sreq->rreq->debug_id;
+ __entry->index = sreq->debug_index;
+ __entry->error = sreq->error;
+ __entry->flags = sreq->flags;
+ __entry->source = sreq->source;
+ __entry->what = what;
+ __entry->len = sreq->len;
+ __entry->transferred = sreq->transferred;
+ __entry->start = sreq->start;
+ ),
+
+ TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d",
+ __entry->rreq, __entry->index,
+ __print_symbolic(__entry->source, netfs_sreq_sources),
+ __print_symbolic(__entry->what, netfs_sreq_traces),
+ __entry->flags,
+ __entry->start, __entry->transferred, __entry->len,
+ __entry->error)
+ );
+
+TRACE_EVENT(netfs_failure,
+ TP_PROTO(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *sreq,
+ int error, enum netfs_failure what),
+
+ TP_ARGS(rreq, sreq, error, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(short, index )
+ __field(short, error )
+ __field(unsigned short, flags )
+ __field(enum netfs_io_source, source )
+ __field(enum netfs_failure, what )
+ __field(size_t, len )
+ __field(size_t, transferred )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->index = sreq ? sreq->debug_index : -1;
+ __entry->error = error;
+ __entry->flags = sreq ? sreq->flags : 0;
+ __entry->source = sreq ? sreq->source : NETFS_INVALID_READ;
+ __entry->what = what;
+ __entry->len = sreq ? sreq->len : rreq->len;
+ __entry->transferred = sreq ? sreq->transferred : 0;
+ __entry->start = sreq ? sreq->start : 0;
+ ),
+
+ TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d",
+ __entry->rreq, __entry->index,
+ __print_symbolic(__entry->source, netfs_sreq_sources),
+ __entry->flags,
+ __entry->start, __entry->transferred, __entry->len,
+ __print_symbolic(__entry->what, netfs_failures),
+ __entry->error)
+ );
+
+TRACE_EVENT(netfs_rreq_ref,
+ TP_PROTO(unsigned int rreq_debug_id, int ref,
+ enum netfs_rreq_ref_trace what),
+
+ TP_ARGS(rreq_debug_id, ref, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(int, ref )
+ __field(enum netfs_rreq_ref_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq_debug_id;
+ __entry->ref = ref;
+ __entry->what = what;
+ ),
+
+ TP_printk("R=%08x %s r=%u",
+ __entry->rreq,
+ __print_symbolic(__entry->what, netfs_rreq_ref_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(netfs_sreq_ref,
+ TP_PROTO(unsigned int rreq_debug_id, unsigned int subreq_debug_index,
+ int ref, enum netfs_sreq_ref_trace what),
+
+ TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned int, subreq )
+ __field(int, ref )
+ __field(enum netfs_sreq_ref_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq_debug_id;
+ __entry->subreq = subreq_debug_index;
+ __entry->ref = ref;
+ __entry->what = what;
+ ),
+
+ TP_printk("R=%08x[%x] %s r=%u",
+ __entry->rreq,
+ __entry->subreq,
+ __print_symbolic(__entry->what, netfs_sreq_ref_traces),
+ __entry->ref)
+ );
+
+#undef EM
+#undef E_
+#endif /* _TRACE_NETFS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/netlink.h b/include/trace/events/netlink.h
new file mode 100644
index 000000000000..3b7be3b386a4
--- /dev/null
+++ b/include/trace/events/netlink.h
@@ -0,0 +1,29 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM netlink
+
+#if !defined(_TRACE_NETLINK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NETLINK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(netlink_extack,
+
+ TP_PROTO(const char *msg),
+
+ TP_ARGS(msg),
+
+ TP_STRUCT__entry(
+ __string( msg, msg )
+ ),
+
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("msg=%s", __get_str(msg))
+);
+
+#endif /* _TRACE_NETLINK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/nfs.h b/include/trace/events/nfs.h
new file mode 100644
index 000000000000..09ffdbb04134
--- /dev/null
+++ b/include/trace/events/nfs.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Display helpers for NFS protocol elements
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <uapi/linux/nfs.h>
+
+TRACE_DEFINE_ENUM(NFS_OK);
+TRACE_DEFINE_ENUM(NFSERR_PERM);
+TRACE_DEFINE_ENUM(NFSERR_NOENT);
+TRACE_DEFINE_ENUM(NFSERR_IO);
+TRACE_DEFINE_ENUM(NFSERR_NXIO);
+TRACE_DEFINE_ENUM(NFSERR_EAGAIN);
+TRACE_DEFINE_ENUM(NFSERR_ACCES);
+TRACE_DEFINE_ENUM(NFSERR_EXIST);
+TRACE_DEFINE_ENUM(NFSERR_XDEV);
+TRACE_DEFINE_ENUM(NFSERR_NODEV);
+TRACE_DEFINE_ENUM(NFSERR_NOTDIR);
+TRACE_DEFINE_ENUM(NFSERR_ISDIR);
+TRACE_DEFINE_ENUM(NFSERR_INVAL);
+TRACE_DEFINE_ENUM(NFSERR_FBIG);
+TRACE_DEFINE_ENUM(NFSERR_NOSPC);
+TRACE_DEFINE_ENUM(NFSERR_ROFS);
+TRACE_DEFINE_ENUM(NFSERR_MLINK);
+TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP);
+TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
+TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
+TRACE_DEFINE_ENUM(NFSERR_DQUOT);
+TRACE_DEFINE_ENUM(NFSERR_STALE);
+TRACE_DEFINE_ENUM(NFSERR_REMOTE);
+TRACE_DEFINE_ENUM(NFSERR_WFLUSH);
+TRACE_DEFINE_ENUM(NFSERR_BADHANDLE);
+TRACE_DEFINE_ENUM(NFSERR_NOT_SYNC);
+TRACE_DEFINE_ENUM(NFSERR_BAD_COOKIE);
+TRACE_DEFINE_ENUM(NFSERR_NOTSUPP);
+TRACE_DEFINE_ENUM(NFSERR_TOOSMALL);
+TRACE_DEFINE_ENUM(NFSERR_SERVERFAULT);
+TRACE_DEFINE_ENUM(NFSERR_BADTYPE);
+TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
+
+#define show_nfs_status(x) \
+ __print_symbolic(x, \
+ { NFS_OK, "OK" }, \
+ { NFSERR_PERM, "PERM" }, \
+ { NFSERR_NOENT, "NOENT" }, \
+ { NFSERR_IO, "IO" }, \
+ { NFSERR_NXIO, "NXIO" }, \
+ { ECHILD, "CHILD" }, \
+ { NFSERR_EAGAIN, "AGAIN" }, \
+ { NFSERR_ACCES, "ACCES" }, \
+ { NFSERR_EXIST, "EXIST" }, \
+ { NFSERR_XDEV, "XDEV" }, \
+ { NFSERR_NODEV, "NODEV" }, \
+ { NFSERR_NOTDIR, "NOTDIR" }, \
+ { NFSERR_ISDIR, "ISDIR" }, \
+ { NFSERR_INVAL, "INVAL" }, \
+ { NFSERR_FBIG, "FBIG" }, \
+ { NFSERR_NOSPC, "NOSPC" }, \
+ { NFSERR_ROFS, "ROFS" }, \
+ { NFSERR_MLINK, "MLINK" }, \
+ { NFSERR_OPNOTSUPP, "OPNOTSUPP" }, \
+ { NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \
+ { NFSERR_NOTEMPTY, "NOTEMPTY" }, \
+ { NFSERR_DQUOT, "DQUOT" }, \
+ { NFSERR_STALE, "STALE" }, \
+ { NFSERR_REMOTE, "REMOTE" }, \
+ { NFSERR_WFLUSH, "WFLUSH" }, \
+ { NFSERR_BADHANDLE, "BADHANDLE" }, \
+ { NFSERR_NOT_SYNC, "NOTSYNC" }, \
+ { NFSERR_BAD_COOKIE, "BADCOOKIE" }, \
+ { NFSERR_NOTSUPP, "NOTSUPP" }, \
+ { NFSERR_TOOSMALL, "TOOSMALL" }, \
+ { NFSERR_SERVERFAULT, "REMOTEIO" }, \
+ { NFSERR_BADTYPE, "BADTYPE" }, \
+ { NFSERR_JUKEBOX, "JUKEBOX" })
+
+TRACE_DEFINE_ENUM(NFS_UNSTABLE);
+TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
+TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
+
+#define show_nfs_stable_how(x) \
+ __print_symbolic(x, \
+ { NFS_UNSTABLE, "UNSTABLE" }, \
+ { NFS_DATA_SYNC, "DATA_SYNC" }, \
+ { NFS_FILE_SYNC, "FILE_SYNC" })
+
+TRACE_DEFINE_ENUM(NFS4_OK);
+TRACE_DEFINE_ENUM(NFS4ERR_ACCESS);
+TRACE_DEFINE_ENUM(NFS4ERR_ATTRNOTSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_ADMIN_REVOKED);
+TRACE_DEFINE_ENUM(NFS4ERR_BACK_CHAN_BUSY);
+TRACE_DEFINE_ENUM(NFS4ERR_BADCHAR);
+TRACE_DEFINE_ENUM(NFS4ERR_BADHANDLE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADIOMODE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADLAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_BADLABEL);
+TRACE_DEFINE_ENUM(NFS4ERR_BADNAME);
+TRACE_DEFINE_ENUM(NFS4ERR_BADOWNER);
+TRACE_DEFINE_ENUM(NFS4ERR_BADSESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_BADSLOT);
+TRACE_DEFINE_ENUM(NFS4ERR_BADTYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADXDR);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_COOKIE);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_HIGH_SLOT);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_RANGE);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_SEQID);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_SESSION_DIGEST);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_CB_PATH_DOWN);
+TRACE_DEFINE_ENUM(NFS4ERR_CLID_INUSE);
+TRACE_DEFINE_ENUM(NFS4ERR_CLIENTID_BUSY);
+TRACE_DEFINE_ENUM(NFS4ERR_COMPLETE_ALREADY);
+TRACE_DEFINE_ENUM(NFS4ERR_CONN_NOT_BOUND_TO_SESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_DEADLOCK);
+TRACE_DEFINE_ENUM(NFS4ERR_DEADSESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_DELAY);
+TRACE_DEFINE_ENUM(NFS4ERR_DELEG_ALREADY_WANTED);
+TRACE_DEFINE_ENUM(NFS4ERR_DELEG_REVOKED);
+TRACE_DEFINE_ENUM(NFS4ERR_DENIED);
+TRACE_DEFINE_ENUM(NFS4ERR_DIRDELEG_UNAVAIL);
+TRACE_DEFINE_ENUM(NFS4ERR_DQUOT);
+TRACE_DEFINE_ENUM(NFS4ERR_ENCR_ALG_UNSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_EXIST);
+TRACE_DEFINE_ENUM(NFS4ERR_EXPIRED);
+TRACE_DEFINE_ENUM(NFS4ERR_FBIG);
+TRACE_DEFINE_ENUM(NFS4ERR_FHEXPIRED);
+TRACE_DEFINE_ENUM(NFS4ERR_FILE_OPEN);
+TRACE_DEFINE_ENUM(NFS4ERR_GRACE);
+TRACE_DEFINE_ENUM(NFS4ERR_HASH_ALG_UNSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_INVAL);
+TRACE_DEFINE_ENUM(NFS4ERR_IO);
+TRACE_DEFINE_ENUM(NFS4ERR_ISDIR);
+TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTTRYLATER);
+TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTUNAVAILABLE);
+TRACE_DEFINE_ENUM(NFS4ERR_LEASE_MOVED);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCKED);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCKS_HELD);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCK_RANGE);
+TRACE_DEFINE_ENUM(NFS4ERR_MINOR_VERS_MISMATCH);
+TRACE_DEFINE_ENUM(NFS4ERR_MLINK);
+TRACE_DEFINE_ENUM(NFS4ERR_MOVED);
+TRACE_DEFINE_ENUM(NFS4ERR_NAMETOOLONG);
+TRACE_DEFINE_ENUM(NFS4ERR_NOENT);
+TRACE_DEFINE_ENUM(NFS4ERR_NOFILEHANDLE);
+TRACE_DEFINE_ENUM(NFS4ERR_NOMATCHING_LAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_NOSPC);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTDIR);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTEMPTY);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_NOT_ONLY_OP);
+TRACE_DEFINE_ENUM(NFS4ERR_NOT_SAME);
+TRACE_DEFINE_ENUM(NFS4ERR_NO_GRACE);
+TRACE_DEFINE_ENUM(NFS4ERR_NXIO);
+TRACE_DEFINE_ENUM(NFS4ERR_OLD_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_OPENMODE);
+TRACE_DEFINE_ENUM(NFS4ERR_OP_ILLEGAL);
+TRACE_DEFINE_ENUM(NFS4ERR_OP_NOT_IN_SESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_PERM);
+TRACE_DEFINE_ENUM(NFS4ERR_PNFS_IO_HOLE);
+TRACE_DEFINE_ENUM(NFS4ERR_PNFS_NO_LAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_RECALLCONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_BAD);
+TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_CONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_REJECT_DELEG);
+TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG);
+TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+TRACE_DEFINE_ENUM(NFS4ERR_REQ_TOO_BIG);
+TRACE_DEFINE_ENUM(NFS4ERR_RESOURCE);
+TRACE_DEFINE_ENUM(NFS4ERR_RESTOREFH);
+TRACE_DEFINE_ENUM(NFS4ERR_RETRY_UNCACHED_REP);
+TRACE_DEFINE_ENUM(NFS4ERR_RETURNCONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_ROFS);
+TRACE_DEFINE_ENUM(NFS4ERR_SAME);
+TRACE_DEFINE_ENUM(NFS4ERR_SHARE_DENIED);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQUENCE_POS);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQ_FALSE_RETRY);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQ_MISORDERED);
+TRACE_DEFINE_ENUM(NFS4ERR_SERVERFAULT);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE_CLIENTID);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_SYMLINK);
+TRACE_DEFINE_ENUM(NFS4ERR_TOOSMALL);
+TRACE_DEFINE_ENUM(NFS4ERR_TOO_MANY_OPS);
+TRACE_DEFINE_ENUM(NFS4ERR_UNKNOWN_LAYOUTTYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_UNSAFE_COMPOUND);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONGSEC);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONG_CRED);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
+
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_MDS);
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_PNFS);
+
+#define show_nfs4_status(x) \
+ __print_symbolic(x, \
+ { NFS4_OK, "OK" }, \
+ { EPERM, "EPERM" }, \
+ { ENOENT, "ENOENT" }, \
+ { EIO, "EIO" }, \
+ { ENXIO, "ENXIO" }, \
+ { EACCES, "EACCES" }, \
+ { EEXIST, "EEXIST" }, \
+ { EXDEV, "EXDEV" }, \
+ { ENOTDIR, "ENOTDIR" }, \
+ { EISDIR, "EISDIR" }, \
+ { EFBIG, "EFBIG" }, \
+ { ENOSPC, "ENOSPC" }, \
+ { EROFS, "EROFS" }, \
+ { EMLINK, "EMLINK" }, \
+ { ENAMETOOLONG, "ENAMETOOLONG" }, \
+ { ENOTEMPTY, "ENOTEMPTY" }, \
+ { EDQUOT, "EDQUOT" }, \
+ { ESTALE, "ESTALE" }, \
+ { EBADHANDLE, "EBADHANDLE" }, \
+ { EBADCOOKIE, "EBADCOOKIE" }, \
+ { ENOTSUPP, "ENOTSUPP" }, \
+ { ETOOSMALL, "ETOOSMALL" }, \
+ { EREMOTEIO, "EREMOTEIO" }, \
+ { EBADTYPE, "EBADTYPE" }, \
+ { EAGAIN, "EAGAIN" }, \
+ { ELOOP, "ELOOP" }, \
+ { EOPNOTSUPP, "EOPNOTSUPP" }, \
+ { EDEADLK, "EDEADLK" }, \
+ { ENOMEM, "ENOMEM" }, \
+ { EKEYEXPIRED, "EKEYEXPIRED" }, \
+ { ETIMEDOUT, "ETIMEDOUT" }, \
+ { ERESTARTSYS, "ERESTARTSYS" }, \
+ { ECONNREFUSED, "ECONNREFUSED" }, \
+ { ECONNRESET, "ECONNRESET" }, \
+ { ENETUNREACH, "ENETUNREACH" }, \
+ { EHOSTUNREACH, "EHOSTUNREACH" }, \
+ { EHOSTDOWN, "EHOSTDOWN" }, \
+ { EPIPE, "EPIPE" }, \
+ { EPFNOSUPPORT, "EPFNOSUPPORT" }, \
+ { EPROTONOSUPPORT, "EPROTONOSUPPORT" }, \
+ { NFS4ERR_ACCESS, "ACCESS" }, \
+ { NFS4ERR_ATTRNOTSUPP, "ATTRNOTSUPP" }, \
+ { NFS4ERR_ADMIN_REVOKED, "ADMIN_REVOKED" }, \
+ { NFS4ERR_BACK_CHAN_BUSY, "BACK_CHAN_BUSY" }, \
+ { NFS4ERR_BADCHAR, "BADCHAR" }, \
+ { NFS4ERR_BADHANDLE, "BADHANDLE" }, \
+ { NFS4ERR_BADIOMODE, "BADIOMODE" }, \
+ { NFS4ERR_BADLAYOUT, "BADLAYOUT" }, \
+ { NFS4ERR_BADLABEL, "BADLABEL" }, \
+ { NFS4ERR_BADNAME, "BADNAME" }, \
+ { NFS4ERR_BADOWNER, "BADOWNER" }, \
+ { NFS4ERR_BADSESSION, "BADSESSION" }, \
+ { NFS4ERR_BADSLOT, "BADSLOT" }, \
+ { NFS4ERR_BADTYPE, "BADTYPE" }, \
+ { NFS4ERR_BADXDR, "BADXDR" }, \
+ { NFS4ERR_BAD_COOKIE, "BAD_COOKIE" }, \
+ { NFS4ERR_BAD_HIGH_SLOT, "BAD_HIGH_SLOT" }, \
+ { NFS4ERR_BAD_RANGE, "BAD_RANGE" }, \
+ { NFS4ERR_BAD_SEQID, "BAD_SEQID" }, \
+ { NFS4ERR_BAD_SESSION_DIGEST, "BAD_SESSION_DIGEST" }, \
+ { NFS4ERR_BAD_STATEID, "BAD_STATEID" }, \
+ { NFS4ERR_CB_PATH_DOWN, "CB_PATH_DOWN" }, \
+ { NFS4ERR_CLID_INUSE, "CLID_INUSE" }, \
+ { NFS4ERR_CLIENTID_BUSY, "CLIENTID_BUSY" }, \
+ { NFS4ERR_COMPLETE_ALREADY, "COMPLETE_ALREADY" }, \
+ { NFS4ERR_CONN_NOT_BOUND_TO_SESSION, "CONN_NOT_BOUND_TO_SESSION" }, \
+ { NFS4ERR_DEADLOCK, "DEADLOCK" }, \
+ { NFS4ERR_DEADSESSION, "DEAD_SESSION" }, \
+ { NFS4ERR_DELAY, "DELAY" }, \
+ { NFS4ERR_DELEG_ALREADY_WANTED, "DELEG_ALREADY_WANTED" }, \
+ { NFS4ERR_DELEG_REVOKED, "DELEG_REVOKED" }, \
+ { NFS4ERR_DENIED, "DENIED" }, \
+ { NFS4ERR_DIRDELEG_UNAVAIL, "DIRDELEG_UNAVAIL" }, \
+ { NFS4ERR_DQUOT, "DQUOT" }, \
+ { NFS4ERR_ENCR_ALG_UNSUPP, "ENCR_ALG_UNSUPP" }, \
+ { NFS4ERR_EXIST, "EXIST" }, \
+ { NFS4ERR_EXPIRED, "EXPIRED" }, \
+ { NFS4ERR_FBIG, "FBIG" }, \
+ { NFS4ERR_FHEXPIRED, "FHEXPIRED" }, \
+ { NFS4ERR_FILE_OPEN, "FILE_OPEN" }, \
+ { NFS4ERR_GRACE, "GRACE" }, \
+ { NFS4ERR_HASH_ALG_UNSUPP, "HASH_ALG_UNSUPP" }, \
+ { NFS4ERR_INVAL, "INVAL" }, \
+ { NFS4ERR_IO, "IO" }, \
+ { NFS4ERR_ISDIR, "ISDIR" }, \
+ { NFS4ERR_LAYOUTTRYLATER, "LAYOUTTRYLATER" }, \
+ { NFS4ERR_LAYOUTUNAVAILABLE, "LAYOUTUNAVAILABLE" }, \
+ { NFS4ERR_LEASE_MOVED, "LEASE_MOVED" }, \
+ { NFS4ERR_LOCKED, "LOCKED" }, \
+ { NFS4ERR_LOCKS_HELD, "LOCKS_HELD" }, \
+ { NFS4ERR_LOCK_RANGE, "LOCK_RANGE" }, \
+ { NFS4ERR_MINOR_VERS_MISMATCH, "MINOR_VERS_MISMATCH" }, \
+ { NFS4ERR_MLINK, "MLINK" }, \
+ { NFS4ERR_MOVED, "MOVED" }, \
+ { NFS4ERR_NAMETOOLONG, "NAMETOOLONG" }, \
+ { NFS4ERR_NOENT, "NOENT" }, \
+ { NFS4ERR_NOFILEHANDLE, "NOFILEHANDLE" }, \
+ { NFS4ERR_NOMATCHING_LAYOUT, "NOMATCHING_LAYOUT" }, \
+ { NFS4ERR_NOSPC, "NOSPC" }, \
+ { NFS4ERR_NOTDIR, "NOTDIR" }, \
+ { NFS4ERR_NOTEMPTY, "NOTEMPTY" }, \
+ { NFS4ERR_NOTSUPP, "NOTSUPP" }, \
+ { NFS4ERR_NOT_ONLY_OP, "NOT_ONLY_OP" }, \
+ { NFS4ERR_NOT_SAME, "NOT_SAME" }, \
+ { NFS4ERR_NO_GRACE, "NO_GRACE" }, \
+ { NFS4ERR_NXIO, "NXIO" }, \
+ { NFS4ERR_OLD_STATEID, "OLD_STATEID" }, \
+ { NFS4ERR_OPENMODE, "OPENMODE" }, \
+ { NFS4ERR_OP_ILLEGAL, "OP_ILLEGAL" }, \
+ { NFS4ERR_OP_NOT_IN_SESSION, "OP_NOT_IN_SESSION" }, \
+ { NFS4ERR_PERM, "PERM" }, \
+ { NFS4ERR_PNFS_IO_HOLE, "PNFS_IO_HOLE" }, \
+ { NFS4ERR_PNFS_NO_LAYOUT, "PNFS_NO_LAYOUT" }, \
+ { NFS4ERR_RECALLCONFLICT, "RECALLCONFLICT" }, \
+ { NFS4ERR_RECLAIM_BAD, "RECLAIM_BAD" }, \
+ { NFS4ERR_RECLAIM_CONFLICT, "RECLAIM_CONFLICT" }, \
+ { NFS4ERR_REJECT_DELEG, "REJECT_DELEG" }, \
+ { NFS4ERR_REP_TOO_BIG, "REP_TOO_BIG" }, \
+ { NFS4ERR_REP_TOO_BIG_TO_CACHE, "REP_TOO_BIG_TO_CACHE" }, \
+ { NFS4ERR_REQ_TOO_BIG, "REQ_TOO_BIG" }, \
+ { NFS4ERR_RESOURCE, "RESOURCE" }, \
+ { NFS4ERR_RESTOREFH, "RESTOREFH" }, \
+ { NFS4ERR_RETRY_UNCACHED_REP, "RETRY_UNCACHED_REP" }, \
+ { NFS4ERR_RETURNCONFLICT, "RETURNCONFLICT" }, \
+ { NFS4ERR_ROFS, "ROFS" }, \
+ { NFS4ERR_SAME, "SAME" }, \
+ { NFS4ERR_SHARE_DENIED, "SHARE_DENIED" }, \
+ { NFS4ERR_SEQUENCE_POS, "SEQUENCE_POS" }, \
+ { NFS4ERR_SEQ_FALSE_RETRY, "SEQ_FALSE_RETRY" }, \
+ { NFS4ERR_SEQ_MISORDERED, "SEQ_MISORDERED" }, \
+ { NFS4ERR_SERVERFAULT, "SERVERFAULT" }, \
+ { NFS4ERR_STALE, "STALE" }, \
+ { NFS4ERR_STALE_CLIENTID, "STALE_CLIENTID" }, \
+ { NFS4ERR_STALE_STATEID, "STALE_STATEID" }, \
+ { NFS4ERR_SYMLINK, "SYMLINK" }, \
+ { NFS4ERR_TOOSMALL, "TOOSMALL" }, \
+ { NFS4ERR_TOO_MANY_OPS, "TOO_MANY_OPS" }, \
+ { NFS4ERR_UNKNOWN_LAYOUTTYPE, "UNKNOWN_LAYOUTTYPE" }, \
+ { NFS4ERR_UNSAFE_COMPOUND, "UNSAFE_COMPOUND" }, \
+ { NFS4ERR_WRONGSEC, "WRONGSEC" }, \
+ { NFS4ERR_WRONG_CRED, "WRONG_CRED" }, \
+ { NFS4ERR_WRONG_TYPE, "WRONG_TYPE" }, \
+ { NFS4ERR_XDEV, "XDEV" }, \
+ /* ***** Internal to Linux NFS client ***** */ \
+ { NFS4ERR_RESET_TO_MDS, "RESET_TO_MDS" }, \
+ { NFS4ERR_RESET_TO_PNFS, "RESET_TO_PNFS" })
+
+#define show_nfs4_verifier(x) \
+ __print_hex_str(x, NFS4_VERIFIER_SIZE)
+
+TRACE_DEFINE_ENUM(IOMODE_READ);
+TRACE_DEFINE_ENUM(IOMODE_RW);
+TRACE_DEFINE_ENUM(IOMODE_ANY);
+
+#define show_pnfs_layout_iomode(x) \
+ __print_symbolic(x, \
+ { IOMODE_READ, "READ" }, \
+ { IOMODE_RW, "RW" }, \
+ { IOMODE_ANY, "ANY" })
+
+#define show_nfs4_seq4_status(x) \
+ __print_flags(x, "|", \
+ { SEQ4_STATUS_CB_PATH_DOWN, "CB_PATH_DOWN" }, \
+ { SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING, "CB_GSS_CONTEXTS_EXPIRING" }, \
+ { SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED, "CB_GSS_CONTEXTS_EXPIRED" }, \
+ { SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED, "EXPIRED_ALL_STATE_REVOKED" }, \
+ { SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED, "EXPIRED_SOME_STATE_REVOKED" }, \
+ { SEQ4_STATUS_ADMIN_STATE_REVOKED, "ADMIN_STATE_REVOKED" }, \
+ { SEQ4_STATUS_RECALLABLE_STATE_REVOKED, "RECALLABLE_STATE_REVOKED" }, \
+ { SEQ4_STATUS_LEASE_MOVED, "LEASE_MOVED" }, \
+ { SEQ4_STATUS_RESTART_RECLAIM_NEEDED, "RESTART_RECLAIM_NEEDED" }, \
+ { SEQ4_STATUS_CB_PATH_DOWN_SESSION, "CB_PATH_DOWN_SESSION" }, \
+ { SEQ4_STATUS_BACKCHANNEL_FAULT, "BACKCHANNEL_FAULT" })
diff --git a/include/trace/events/nilfs2.h b/include/trace/events/nilfs2.h
index 84ee31fc04cc..8efc6236f57c 100644
--- a/include/trace/events/nilfs2.h
+++ b/include/trace/events/nilfs2.h
@@ -192,7 +192,7 @@ TRACE_EVENT(nilfs2_mdt_submit_block,
TP_PROTO(struct inode *inode,
unsigned long ino,
unsigned long blkoff,
- int mode),
+ enum req_op mode),
TP_ARGS(inode, ino, blkoff, mode),
@@ -200,7 +200,7 @@ TRACE_EVENT(nilfs2_mdt_submit_block,
__field(struct inode *, inode)
__field(unsigned long, ino)
__field(unsigned long, blkoff)
- __field(int, mode)
+ __field(enum req_op, mode)
),
TP_fast_assign(
diff --git a/include/trace/events/osnoise.h b/include/trace/events/osnoise.h
new file mode 100644
index 000000000000..82f741ec0f57
--- /dev/null
+++ b/include/trace/events/osnoise.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM osnoise
+
+#if !defined(_OSNOISE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _OSNOISE_TRACE_H
+
+#include <linux/tracepoint.h>
+TRACE_EVENT(thread_noise,
+
+ TP_PROTO(struct task_struct *t, u64 start, u64 duration),
+
+ TP_ARGS(t, start, duration),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN)
+ __field( u64, start )
+ __field( u64, duration)
+ __field( pid_t, pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
+ __entry->pid = t->pid;
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("%8s:%d start %llu.%09u duration %llu ns",
+ __entry->comm,
+ __entry->pid,
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(softirq_noise,
+
+ TP_PROTO(int vector, u64 start, u64 duration),
+
+ TP_ARGS(vector, start, duration),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ __field( int, vector )
+ ),
+
+ TP_fast_assign(
+ __entry->vector = vector;
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("%8s:%d start %llu.%09u duration %llu ns",
+ show_softirq_name(__entry->vector),
+ __entry->vector,
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(irq_noise,
+
+ TP_PROTO(int vector, const char *desc, u64 start, u64 duration),
+
+ TP_ARGS(vector, desc, start, duration),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ __string( desc, desc )
+ __field( int, vector )
+
+ ),
+
+ TP_fast_assign(
+ __assign_str(desc, desc);
+ __entry->vector = vector;
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("%s:%d start %llu.%09u duration %llu ns",
+ __get_str(desc),
+ __entry->vector,
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(nmi_noise,
+
+ TP_PROTO(u64 start, u64 duration),
+
+ TP_ARGS(start, duration),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("start %llu.%09u duration %llu ns",
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(sample_threshold,
+
+ TP_PROTO(u64 start, u64 duration, u64 interference),
+
+ TP_ARGS(start, duration, interference),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ __field( u64, interference)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->duration = duration;
+ __entry->interference = interference;
+ ),
+
+ TP_printk("start %llu.%09u duration %llu ns interference %llu",
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration,
+ __entry->interference)
+);
+
+#endif /* _TRACE_OSNOISE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index ad0aa7f31675..ca534501158b 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -60,7 +60,7 @@ TRACE_EVENT(page_pool_state_release,
__entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p pfn=%lu release=%u",
+ TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->release)
);
@@ -85,7 +85,7 @@ TRACE_EVENT(page_pool_state_hold,
__entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p pfn=%lu hold=%u",
+ TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->hold)
);
diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h
index 5d2ea93956ce..8a99c1cd417b 100644
--- a/include/trace/events/page_ref.h
+++ b/include/trace/events/page_ref.h
@@ -38,7 +38,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template,
TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d",
__entry->pfn,
- show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)),
+ show_page_flags(__entry->flags & PAGEFLAGS_MASK),
__entry->count,
__entry->mapcount, __entry->mapping, __entry->mt,
__entry->val)
@@ -88,7 +88,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d ret=%d",
__entry->pfn,
- show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)),
+ show_page_flags(__entry->flags & PAGEFLAGS_MASK),
__entry->count,
__entry->mapcount, __entry->mapping, __entry->mt,
__entry->val, __entry->ret)
diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h
index 8fd1babae761..171524d3526d 100644
--- a/include/trace/events/pagemap.h
+++ b/include/trace/events/pagemap.h
@@ -16,41 +16,38 @@
#define PAGEMAP_MAPPEDDISK 0x0020u
#define PAGEMAP_BUFFERS 0x0040u
-#define trace_pagemap_flags(page) ( \
- (PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
- (page_mapped(page) ? PAGEMAP_MAPPED : 0) | \
- (PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \
- (PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \
- (PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \
- (page_has_private(page) ? PAGEMAP_BUFFERS : 0) \
+#define trace_pagemap_flags(folio) ( \
+ (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
+ (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \
+ (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \
+ (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \
+ (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \
+ (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \
)
TRACE_EVENT(mm_lru_insertion,
- TP_PROTO(
- struct page *page,
- int lru
- ),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page, lru),
+ TP_ARGS(folio),
TP_STRUCT__entry(
- __field(struct page *, page )
+ __field(struct folio *, folio )
__field(unsigned long, pfn )
- __field(int, lru )
+ __field(enum lru_list, lru )
__field(unsigned long, flags )
),
TP_fast_assign(
- __entry->page = page;
- __entry->pfn = page_to_pfn(page);
- __entry->lru = lru;
- __entry->flags = trace_pagemap_flags(page);
+ __entry->folio = folio;
+ __entry->pfn = folio_pfn(folio);
+ __entry->lru = folio_lru_list(folio);
+ __entry->flags = trace_pagemap_flags(folio);
),
/* Flag format is based on page-types.c formatting for pagemap */
- TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s",
- __entry->page,
+ TP_printk("folio=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s",
+ __entry->folio,
__entry->pfn,
__entry->lru,
__entry->flags & PAGEMAP_MAPPED ? "M" : " ",
@@ -63,23 +60,21 @@ TRACE_EVENT(mm_lru_insertion,
TRACE_EVENT(mm_lru_activate,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
- __field(struct page *, page )
+ __field(struct folio *, folio )
__field(unsigned long, pfn )
),
TP_fast_assign(
- __entry->page = page;
- __entry->pfn = page_to_pfn(page);
+ __entry->folio = folio;
+ __entry->pfn = folio_pfn(folio);
),
- /* Flag format is based on page-types.c formatting for pagemap */
- TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn)
-
+ TP_printk("folio=%p pfn=0x%lx", __entry->folio, __entry->pfn)
);
#endif /* _TRACE_PAGEMAP_H */
diff --git a/include/trace/events/percpu.h b/include/trace/events/percpu.h
index df112a64f6c9..5b8211ca8950 100644
--- a/include/trace/events/percpu.h
+++ b/include/trace/events/percpu.h
@@ -6,15 +6,20 @@
#define _TRACE_PERCPU_H
#include <linux/tracepoint.h>
+#include <trace/events/mmflags.h>
TRACE_EVENT(percpu_alloc_percpu,
- TP_PROTO(bool reserved, bool is_atomic, size_t size,
- size_t align, void *base_addr, int off, void __percpu *ptr),
+ TP_PROTO(unsigned long call_site,
+ bool reserved, bool is_atomic, size_t size,
+ size_t align, void *base_addr, int off,
+ void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
- TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr),
+ TP_ARGS(call_site, reserved, is_atomic, size, align, base_addr, off,
+ ptr, bytes_alloc, gfp_flags),
TP_STRUCT__entry(
+ __field( unsigned long, call_site )
__field( bool, reserved )
__field( bool, is_atomic )
__field( size_t, size )
@@ -22,9 +27,11 @@ TRACE_EVENT(percpu_alloc_percpu,
__field( void *, base_addr )
__field( int, off )
__field( void __percpu *, ptr )
+ __field( size_t, bytes_alloc )
+ __field( unsigned long, gfp_flags )
),
-
TP_fast_assign(
+ __entry->call_site = call_site;
__entry->reserved = reserved;
__entry->is_atomic = is_atomic;
__entry->size = size;
@@ -32,12 +39,16 @@ TRACE_EVENT(percpu_alloc_percpu,
__entry->base_addr = base_addr;
__entry->off = off;
__entry->ptr = ptr;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
- TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p",
+ TP_printk("call_site=%pS reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s",
+ (void *)__entry->call_site,
__entry->reserved, __entry->is_atomic,
__entry->size, __entry->align,
- __entry->base_addr, __entry->off, __entry->ptr)
+ __entry->base_addr, __entry->off, __entry->ptr,
+ __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags))
);
TRACE_EVENT(percpu_free_percpu,
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 7457e238e1b7..77f14f7a11d4 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -40,6 +40,28 @@ DEFINE_EVENT(cpu, cpu_idle,
TP_ARGS(state, cpu_id)
);
+TRACE_EVENT(cpu_idle_miss,
+
+ TP_PROTO(unsigned int cpu_id, unsigned int state, bool below),
+
+ TP_ARGS(cpu_id, state, below),
+
+ TP_STRUCT__entry(
+ __field(u32, cpu_id)
+ __field(u32, state)
+ __field(bool, below)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->state = state;
+ __entry->below = below;
+ ),
+
+ TP_printk("cpu_id=%lu state=%lu type=%s", (unsigned long)__entry->cpu_id,
+ (unsigned long)__entry->state, (__entry->below)?"below":"above")
+);
+
TRACE_EVENT(powernv_throttle,
TP_PROTO(int chip_id, const char *reason, int pmax),
@@ -359,75 +381,50 @@ DEFINE_EVENT(power_domain, power_domain_target,
);
/*
- * The pm qos events are used for pm qos update
+ * CPU latency QoS events used for global CPU latency QoS list updates
*/
-DECLARE_EVENT_CLASS(pm_qos_request,
+DECLARE_EVENT_CLASS(cpu_latency_qos_request,
- TP_PROTO(int pm_qos_class, s32 value),
+ TP_PROTO(s32 value),
- TP_ARGS(pm_qos_class, value),
+ TP_ARGS(value),
TP_STRUCT__entry(
- __field( int, pm_qos_class )
__field( s32, value )
),
TP_fast_assign(
- __entry->pm_qos_class = pm_qos_class;
__entry->value = value;
),
- TP_printk("pm_qos_class=%s value=%d",
- __print_symbolic(__entry->pm_qos_class,
- { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }),
+ TP_printk("CPU_DMA_LATENCY value=%d",
__entry->value)
);
-DEFINE_EVENT(pm_qos_request, pm_qos_add_request,
-
- TP_PROTO(int pm_qos_class, s32 value),
-
- TP_ARGS(pm_qos_class, value)
-);
-
-DEFINE_EVENT(pm_qos_request, pm_qos_update_request,
+DEFINE_EVENT(cpu_latency_qos_request, pm_qos_add_request,
- TP_PROTO(int pm_qos_class, s32 value),
+ TP_PROTO(s32 value),
- TP_ARGS(pm_qos_class, value)
+ TP_ARGS(value)
);
-DEFINE_EVENT(pm_qos_request, pm_qos_remove_request,
+DEFINE_EVENT(cpu_latency_qos_request, pm_qos_update_request,
- TP_PROTO(int pm_qos_class, s32 value),
+ TP_PROTO(s32 value),
- TP_ARGS(pm_qos_class, value)
+ TP_ARGS(value)
);
-TRACE_EVENT(pm_qos_update_request_timeout,
-
- TP_PROTO(int pm_qos_class, s32 value, unsigned long timeout_us),
-
- TP_ARGS(pm_qos_class, value, timeout_us),
-
- TP_STRUCT__entry(
- __field( int, pm_qos_class )
- __field( s32, value )
- __field( unsigned long, timeout_us )
- ),
+DEFINE_EVENT(cpu_latency_qos_request, pm_qos_remove_request,
- TP_fast_assign(
- __entry->pm_qos_class = pm_qos_class;
- __entry->value = value;
- __entry->timeout_us = timeout_us;
- ),
+ TP_PROTO(s32 value),
- TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld",
- __print_symbolic(__entry->pm_qos_class,
- { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }),
- __entry->value, __entry->timeout_us)
+ TP_ARGS(value)
);
+/*
+ * General PM QoS events used for updates of PM QoS request lists
+ */
DECLARE_EVENT_CLASS(pm_qos_update,
TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
@@ -525,6 +522,35 @@ DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request,
TP_ARGS(name, type, new_value)
);
+
+TRACE_EVENT(guest_halt_poll_ns,
+
+ TP_PROTO(bool grow, unsigned int new, unsigned int old),
+
+ TP_ARGS(grow, new, old),
+
+ TP_STRUCT__entry(
+ __field(bool, grow)
+ __field(unsigned int, new)
+ __field(unsigned int, old)
+ ),
+
+ TP_fast_assign(
+ __entry->grow = grow;
+ __entry->new = new;
+ __entry->old = old;
+ ),
+
+ TP_printk("halt_poll_ns %u (%s %u)",
+ __entry->new,
+ __entry->grow ? "grow" : "shrink",
+ __entry->old)
+);
+
+#define trace_guest_halt_poll_ns_grow(new, old) \
+ trace_guest_halt_poll_ns(true, new, old)
+#define trace_guest_halt_poll_ns_shrink(new, old) \
+ trace_guest_halt_poll_ns(false, new, old)
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index 0d1a9ebf55ba..a3995925cb05 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -8,6 +8,8 @@
#include <linux/netdevice.h>
#include <linux/tracepoint.h>
#include <linux/ftrace.h>
+#include <linux/pkt_sched.h>
+#include <net/sch_generic.h>
TRACE_EVENT(qdisc_dequeue,
@@ -44,6 +46,107 @@ TRACE_EVENT(qdisc_dequeue,
__entry->txq_state, __entry->packets, __entry->skbaddr )
);
+TRACE_EVENT(qdisc_enqueue,
+
+ TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb),
+
+ TP_ARGS(qdisc, txq, skb),
+
+ TP_STRUCT__entry(
+ __field(struct Qdisc *, qdisc)
+ __field(const struct netdev_queue *, txq)
+ __field(void *, skbaddr)
+ __field(int, ifindex)
+ __field(u32, handle)
+ __field(u32, parent)
+ ),
+
+ TP_fast_assign(
+ __entry->qdisc = qdisc;
+ __entry->txq = txq;
+ __entry->skbaddr = skb;
+ __entry->ifindex = txq->dev ? txq->dev->ifindex : 0;
+ __entry->handle = qdisc->handle;
+ __entry->parent = qdisc->parent;
+ ),
+
+ TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%p",
+ __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr)
+);
+
+TRACE_EVENT(qdisc_reset,
+
+ TP_PROTO(struct Qdisc *q),
+
+ TP_ARGS(q),
+
+ TP_STRUCT__entry(
+ __string( dev, qdisc_dev(q) )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev, qdisc_dev(q));
+ __assign_str(kind, q->ops->id);
+ __entry->parent = q->parent;
+ __entry->handle = q->handle;
+ ),
+
+ TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev),
+ __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent),
+ TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle))
+);
+
+TRACE_EVENT(qdisc_destroy,
+
+ TP_PROTO(struct Qdisc *q),
+
+ TP_ARGS(q),
+
+ TP_STRUCT__entry(
+ __string( dev, qdisc_dev(q) )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev, qdisc_dev(q));
+ __assign_str(kind, q->ops->id);
+ __entry->parent = q->parent;
+ __entry->handle = q->handle;
+ ),
+
+ TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev),
+ __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent),
+ TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle))
+);
+
+TRACE_EVENT(qdisc_create,
+
+ TP_PROTO(const struct Qdisc_ops *ops, struct net_device *dev, u32 parent),
+
+ TP_ARGS(ops, dev, parent),
+
+ TP_STRUCT__entry(
+ __string( dev, dev->name )
+ __string( kind, ops->id )
+ __field( u32, parent )
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev, dev->name);
+ __assign_str(kind, ops->id);
+ __entry->parent = parent;
+ ),
+
+ TP_printk("dev=%s kind=%s parent=%x:%x",
+ __get_str(dev), __get_str(kind),
+ TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent))
+);
+
#endif /* _TRACE_QDISC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h
new file mode 100644
index 000000000000..e7fd55e7dc3d
--- /dev/null
+++ b/include/trace/events/qla.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_TRACE_QLA_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_QLA_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qla
+
+#define QLA_MSG_MAX 256
+
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
+
+DECLARE_EVENT_CLASS(qla_log_event,
+ TP_PROTO(const char *buf,
+ struct va_format *vaf),
+
+ TP_ARGS(buf, vaf),
+
+ TP_STRUCT__entry(
+ __string(buf, buf)
+ __vstring(msg, vaf->fmt, vaf->va)
+ ),
+ TP_fast_assign(
+ __assign_str(buf, buf);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
+ ),
+
+ TP_printk("%s %s", __get_str(buf), __get_str(msg))
+);
+
+#pragma GCC diagnostic pop
+
+DEFINE_EVENT(qla_log_event, ql_dbg_log,
+ TP_PROTO(const char *buf, struct va_format *vaf),
+ TP_ARGS(buf, vaf)
+);
+
+#endif /* _TRACE_QLA_H */
+
+#define TRACE_INCLUDE_FILE qla
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/qrtr.h b/include/trace/events/qrtr.h
new file mode 100644
index 000000000000..b1de14c3bb93
--- /dev/null
+++ b/include/trace/events/qrtr.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qrtr
+
+#if !defined(_TRACE_QRTR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_QRTR_H
+
+#include <linux/qrtr.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(qrtr_ns_service_announce_new,
+
+ TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+
+ TP_ARGS(service, instance, node, port),
+
+ TP_STRUCT__entry(
+ __field(__le32, service)
+ __field(__le32, instance)
+ __field(__le32, node)
+ __field(__le32, port)
+ ),
+
+ TP_fast_assign(
+ __entry->service = service;
+ __entry->instance = instance;
+ __entry->node = node;
+ __entry->port = port;
+ ),
+
+ TP_printk("advertising new server [%d:%x]@[%d:%d]",
+ __entry->service, __entry->instance, __entry->node,
+ __entry->port
+ )
+);
+
+TRACE_EVENT(qrtr_ns_service_announce_del,
+
+ TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+
+ TP_ARGS(service, instance, node, port),
+
+ TP_STRUCT__entry(
+ __field(__le32, service)
+ __field(__le32, instance)
+ __field(__le32, node)
+ __field(__le32, port)
+ ),
+
+ TP_fast_assign(
+ __entry->service = service;
+ __entry->instance = instance;
+ __entry->node = node;
+ __entry->port = port;
+ ),
+
+ TP_printk("advertising removal of server [%d:%x]@[%d:%d]",
+ __entry->service, __entry->instance, __entry->node,
+ __entry->port
+ )
+);
+
+TRACE_EVENT(qrtr_ns_server_add,
+
+ TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+
+ TP_ARGS(service, instance, node, port),
+
+ TP_STRUCT__entry(
+ __field(__le32, service)
+ __field(__le32, instance)
+ __field(__le32, node)
+ __field(__le32, port)
+ ),
+
+ TP_fast_assign(
+ __entry->service = service;
+ __entry->instance = instance;
+ __entry->node = node;
+ __entry->port = port;
+ ),
+
+ TP_printk("add server [%d:%x]@[%d:%d]",
+ __entry->service, __entry->instance, __entry->node,
+ __entry->port
+ )
+);
+
+TRACE_EVENT(qrtr_ns_message,
+
+ TP_PROTO(const char * const ctrl_pkt_str, __u32 sq_node, __u32 sq_port),
+
+ TP_ARGS(ctrl_pkt_str, sq_node, sq_port),
+
+ TP_STRUCT__entry(
+ __string(ctrl_pkt_str, ctrl_pkt_str)
+ __field(__u32, sq_node)
+ __field(__u32, sq_port)
+ ),
+
+ TP_fast_assign(
+ __assign_str(ctrl_pkt_str, ctrl_pkt_str);
+ __entry->sq_node = sq_node;
+ __entry->sq_port = sq_port;
+ ),
+
+ TP_printk("%s from %d:%d",
+ __get_str(ctrl_pkt_str), __entry->sq_node, __entry->sq_port
+ )
+);
+
+#endif /* _TRACE_QRTR_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
deleted file mode 100644
index 32c10a515e2d..000000000000
--- a/include/trace/events/random.h
+++ /dev/null
@@ -1,313 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM random
-
-#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_RANDOM_H
-
-#include <linux/writeback.h>
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(add_device_randomness,
- TP_PROTO(int bytes, unsigned long IP),
-
- TP_ARGS(bytes, IP),
-
- TP_STRUCT__entry(
- __field( int, bytes )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->bytes = bytes;
- __entry->IP = IP;
- ),
-
- TP_printk("bytes %d caller %pS",
- __entry->bytes, (void *)__entry->IP)
-);
-
-DECLARE_EVENT_CLASS(random__mix_pool_bytes,
- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
-
- TP_ARGS(pool_name, bytes, IP),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, bytes )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->bytes = bytes;
- __entry->IP = IP;
- ),
-
- TP_printk("%s pool: bytes %d caller %pS",
- __entry->pool_name, __entry->bytes, (void *)__entry->IP)
-);
-
-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
-
- TP_ARGS(pool_name, bytes, IP)
-);
-
-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
-
- TP_ARGS(pool_name, bytes, IP)
-);
-
-TRACE_EVENT(credit_entropy_bits,
- TP_PROTO(const char *pool_name, int bits, int entropy_count,
- unsigned long IP),
-
- TP_ARGS(pool_name, bits, entropy_count, IP),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, bits )
- __field( int, entropy_count )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->bits = bits;
- __entry->entropy_count = entropy_count;
- __entry->IP = IP;
- ),
-
- TP_printk("%s pool: bits %d entropy_count %d caller %pS",
- __entry->pool_name, __entry->bits,
- __entry->entropy_count, (void *)__entry->IP)
-);
-
-TRACE_EVENT(push_to_pool,
- TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
-
- TP_ARGS(pool_name, pool_bits, input_bits),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, pool_bits )
- __field( int, input_bits )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->pool_bits = pool_bits;
- __entry->input_bits = input_bits;
- ),
-
- TP_printk("%s: pool_bits %d input_pool_bits %d",
- __entry->pool_name, __entry->pool_bits,
- __entry->input_bits)
-);
-
-TRACE_EVENT(debit_entropy,
- TP_PROTO(const char *pool_name, int debit_bits),
-
- TP_ARGS(pool_name, debit_bits),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, debit_bits )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->debit_bits = debit_bits;
- ),
-
- TP_printk("%s: debit_bits %d", __entry->pool_name,
- __entry->debit_bits)
-);
-
-TRACE_EVENT(add_input_randomness,
- TP_PROTO(int input_bits),
-
- TP_ARGS(input_bits),
-
- TP_STRUCT__entry(
- __field( int, input_bits )
- ),
-
- TP_fast_assign(
- __entry->input_bits = input_bits;
- ),
-
- TP_printk("input_pool_bits %d", __entry->input_bits)
-);
-
-TRACE_EVENT(add_disk_randomness,
- TP_PROTO(dev_t dev, int input_bits),
-
- TP_ARGS(dev, input_bits),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, input_bits )
- ),
-
- TP_fast_assign(
- __entry->dev = dev;
- __entry->input_bits = input_bits;
- ),
-
- TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
- MINOR(__entry->dev), __entry->input_bits)
-);
-
-TRACE_EVENT(xfer_secondary_pool,
- TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
- int pool_entropy, int input_entropy),
-
- TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
- input_entropy),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, xfer_bits )
- __field( int, request_bits )
- __field( int, pool_entropy )
- __field( int, input_entropy )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->xfer_bits = xfer_bits;
- __entry->request_bits = request_bits;
- __entry->pool_entropy = pool_entropy;
- __entry->input_entropy = input_entropy;
- ),
-
- TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
- "input_entropy %d", __entry->pool_name, __entry->xfer_bits,
- __entry->request_bits, __entry->pool_entropy,
- __entry->input_entropy)
-);
-
-DECLARE_EVENT_CLASS(random__get_random_bytes,
- TP_PROTO(int nbytes, unsigned long IP),
-
- TP_ARGS(nbytes, IP),
-
- TP_STRUCT__entry(
- __field( int, nbytes )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->nbytes = nbytes;
- __entry->IP = IP;
- ),
-
- TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP)
-);
-
-DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
- TP_PROTO(int nbytes, unsigned long IP),
-
- TP_ARGS(nbytes, IP)
-);
-
-DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
- TP_PROTO(int nbytes, unsigned long IP),
-
- TP_ARGS(nbytes, IP)
-);
-
-DECLARE_EVENT_CLASS(random__extract_entropy,
- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
- unsigned long IP),
-
- TP_ARGS(pool_name, nbytes, entropy_count, IP),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, nbytes )
- __field( int, entropy_count )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->nbytes = nbytes;
- __entry->entropy_count = entropy_count;
- __entry->IP = IP;
- ),
-
- TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
- __entry->pool_name, __entry->nbytes, __entry->entropy_count,
- (void *)__entry->IP)
-);
-
-
-DEFINE_EVENT(random__extract_entropy, extract_entropy,
- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
- unsigned long IP),
-
- TP_ARGS(pool_name, nbytes, entropy_count, IP)
-);
-
-DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
- unsigned long IP),
-
- TP_ARGS(pool_name, nbytes, entropy_count, IP)
-);
-
-TRACE_EVENT(random_read,
- TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
-
- TP_ARGS(got_bits, need_bits, pool_left, input_left),
-
- TP_STRUCT__entry(
- __field( int, got_bits )
- __field( int, need_bits )
- __field( int, pool_left )
- __field( int, input_left )
- ),
-
- TP_fast_assign(
- __entry->got_bits = got_bits;
- __entry->need_bits = need_bits;
- __entry->pool_left = pool_left;
- __entry->input_left = input_left;
- ),
-
- TP_printk("got_bits %d still_needed_bits %d "
- "blocking_pool_entropy_left %d input_entropy_left %d",
- __entry->got_bits, __entry->got_bits, __entry->pool_left,
- __entry->input_left)
-);
-
-TRACE_EVENT(urandom_read,
- TP_PROTO(int got_bits, int pool_left, int input_left),
-
- TP_ARGS(got_bits, pool_left, input_left),
-
- TP_STRUCT__entry(
- __field( int, got_bits )
- __field( int, pool_left )
- __field( int, input_left )
- ),
-
- TP_fast_assign(
- __entry->got_bits = got_bits;
- __entry->pool_left = pool_left;
- __entry->input_left = input_left;
- ),
-
- TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
- "input_entropy_left %d", __entry->got_bits,
- __entry->pool_left, __entry->input_left)
-);
-
-#endif /* _TRACE_RANDOM_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 5e49b06e8104..90b2fb0292cb 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -48,7 +48,7 @@ TRACE_EVENT(rcu_utilization,
* RCU flavor, the grace-period number, and a string identifying the
* grace-period-related event as follows:
*
- * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL.
+ * "AccReadyCB": CPU accelerates new callbacks to RCU_NEXT_READY_TAIL.
* "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
* "newreq": Request a new grace period.
* "start": Start a grace period.
@@ -74,17 +74,17 @@ TRACE_EVENT_RCU(rcu_grace_period,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %s",
+ TP_printk("%s %ld %s",
__entry->rcuname, __entry->gp_seq, __entry->gpevent)
);
@@ -114,8 +114,8 @@ TRACE_EVENT_RCU(rcu_future_grace_period,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
- __field(unsigned long, gp_seq_req)
+ __field(long, gp_seq)
+ __field(long, gp_seq_req)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -124,16 +124,16 @@ TRACE_EVENT_RCU(rcu_future_grace_period,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
- __entry->gp_seq_req = gp_seq_req;
+ __entry->gp_seq = (long)gp_seq;
+ __entry->gp_seq_req = (long)gp_seq_req;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %lu %u %d %d %s",
- __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
+ TP_printk("%s %ld %ld %u %d %d %s",
+ __entry->rcuname, (long)__entry->gp_seq, (long)__entry->gp_seq_req, __entry->level,
__entry->grplo, __entry->grphi, __entry->gpevent)
);
@@ -153,7 +153,7 @@ TRACE_EVENT_RCU(rcu_grace_period_init,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -162,14 +162,14 @@ TRACE_EVENT_RCU(rcu_grace_period_init,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->qsmask = qsmask;
),
- TP_printk("%s %lu %u %d %d %lx",
+ TP_printk("%s %ld %u %d %d %lx",
__entry->rcuname, __entry->gp_seq, __entry->level,
__entry->grplo, __entry->grphi, __entry->qsmask)
);
@@ -197,17 +197,17 @@ TRACE_EVENT_RCU(rcu_exp_grace_period,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpseq)
+ __field(long, gpseq)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpseq = gpseq;
+ __entry->gpseq = (long)gpseq;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %s",
+ TP_printk("%s %ld %s",
__entry->rcuname, __entry->gpseq, __entry->gpevent)
);
@@ -278,6 +278,7 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock,
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
* "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
+ * "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended.
* "WokeEmpty": rcuo CB kthread woke to find empty list.
*/
TRACE_EVENT_RCU(rcu_nocb_wake,
@@ -316,17 +317,17 @@ TRACE_EVENT_RCU(rcu_preempt_task,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->pid = pid;
),
- TP_printk("%s %lu %d",
+ TP_printk("%s %ld %d",
__entry->rcuname, __entry->gp_seq, __entry->pid)
);
@@ -343,17 +344,17 @@ TRACE_EVENT_RCU(rcu_unlock_preempted_task,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->pid = pid;
),
- TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
+ TP_printk("%s %ld %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -374,7 +375,7 @@ TRACE_EVENT_RCU(rcu_quiescent_state_report,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(unsigned long, mask)
__field(unsigned long, qsmask)
__field(u8, level)
@@ -385,7 +386,7 @@ TRACE_EVENT_RCU(rcu_quiescent_state_report,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->mask = mask;
__entry->qsmask = qsmask;
__entry->level = level;
@@ -394,7 +395,7 @@ TRACE_EVENT_RCU(rcu_quiescent_state_report,
__entry->gp_tasks = gp_tasks;
),
- TP_printk("%s %lu %lx>%lx %u %d %d %u",
+ TP_printk("%s %ld %lx>%lx %u %d %d %u",
__entry->rcuname, __entry->gp_seq,
__entry->mask, __entry->qsmask, __entry->level,
__entry->grplo, __entry->grphi, __entry->gp_tasks)
@@ -415,31 +416,60 @@ TRACE_EVENT_RCU(rcu_fqs,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(int, cpu)
__field(const char *, qsevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->cpu = cpu;
__entry->qsevent = qsevent;
),
- TP_printk("%s %lu %d %s",
+ TP_printk("%s %ld %d %s",
__entry->rcuname, __entry->gp_seq,
__entry->cpu, __entry->qsevent)
);
+/*
+ * Tracepoint for RCU stall events. Takes a string identifying the RCU flavor
+ * and a string identifying which function detected the RCU stall as follows:
+ *
+ * "StallDetected": Scheduler-tick detects other CPU's stalls.
+ * "SelfDetected": Scheduler-tick detects a current CPU's stall.
+ * "ExpeditedStall": Expedited grace period detects stalls.
+ */
+TRACE_EVENT(rcu_stall_warning,
+
+ TP_PROTO(const char *rcuname, const char *msg),
+
+ TP_ARGS(rcuname, msg),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(const char *, msg)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->msg = msg;
+ ),
+
+ TP_printk("%s %s",
+ __entry->rcuname, __entry->msg)
+);
+
#endif /* #if defined(CONFIG_TREE_RCU) */
/*
- * Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode, "Startirq" for
- * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it
- * to irq/NMI, "--=" for events moving towards idle, and "++=" for events
- * moving away from idle.
+ * Tracepoint for dyntick-idle entry/exit events. These take 2 strings
+ * as argument:
+ * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not
+ * being in dyntick-idle mode.
+ * context: "USER" or "IDLE" or "IRQ".
+ * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest, and a third number that is
@@ -504,15 +534,41 @@ TRACE_EVENT_RCU(rcu_callback,
__entry->qlen)
);
+TRACE_EVENT_RCU(rcu_segcb_stats,
+
+ TP_PROTO(struct rcu_segcblist *rs, const char *ctx),
+
+ TP_ARGS(rs, ctx),
+
+ TP_STRUCT__entry(
+ __field(const char *, ctx)
+ __array(unsigned long, gp_seq, RCU_CBLIST_NSEGS)
+ __array(long, seglen, RCU_CBLIST_NSEGS)
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ memcpy(__entry->seglen, rs->seglen, RCU_CBLIST_NSEGS * sizeof(long));
+ memcpy(__entry->gp_seq, rs->gp_seq, RCU_CBLIST_NSEGS * sizeof(unsigned long));
+
+ ),
+
+ TP_printk("%s seglen: (DONE=%ld, WAIT=%ld, NEXT_READY=%ld, NEXT=%ld) "
+ "gp_seq: (DONE=%lu, WAIT=%lu, NEXT_READY=%lu, NEXT=%lu)", __entry->ctx,
+ __entry->seglen[0], __entry->seglen[1], __entry->seglen[2], __entry->seglen[3],
+ __entry->gp_seq[0], __entry->gp_seq[1], __entry->gp_seq[2], __entry->gp_seq[3])
+
+);
+
/*
* Tracepoint for the registration of a single RCU callback of the special
- * kfree() form. The first argument is the RCU type, the second argument
+ * kvfree() form. The first argument is the RCU type, the second argument
* is a pointer to the RCU callback, the third argument is the offset
* of the callback within the enclosing RCU-protected data structure,
* the fourth argument is the number of lazy callbacks queued, and the
* fifth argument is the total number of callbacks queued.
*/
-TRACE_EVENT_RCU(rcu_kfree_callback,
+TRACE_EVENT_RCU(rcu_kvfree_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
long qlen),
@@ -596,12 +652,12 @@ TRACE_EVENT_RCU(rcu_invoke_callback,
/*
* Tracepoint for the invocation of a single RCU callback of the special
- * kfree() form. The first argument is the RCU flavor, the second
+ * kvfree() form. The first argument is the RCU flavor, the second
* argument is a pointer to the RCU callback, and the third argument
* is the offset of the callback within the enclosing RCU-protected
* data structure.
*/
-TRACE_EVENT_RCU(rcu_invoke_kfree_callback,
+TRACE_EVENT_RCU(rcu_invoke_kvfree_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
@@ -624,6 +680,34 @@ TRACE_EVENT_RCU(rcu_invoke_kfree_callback,
);
/*
+ * Tracepoint for the invocation of a single RCU callback of the special
+ * kfree_bulk() form. The first argument is the RCU flavor, the second
+ * argument is a number of elements in array to free, the third is an
+ * address of the array holding nr_records entries.
+ */
+TRACE_EVENT_RCU(rcu_invoke_kfree_bulk_callback,
+
+ TP_PROTO(const char *rcuname, unsigned long nr_records, void **p),
+
+ TP_ARGS(rcuname, nr_records, p),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(unsigned long, nr_records)
+ __field(void **, p)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->nr_records = nr_records;
+ __entry->p = p;
+ ),
+
+ TP_printk("%s bulk=0x%p nr_records=%lu",
+ __entry->rcuname, __entry->p, __entry->nr_records)
+);
+
+/*
* Tracepoint for exiting rcu_do_batch after RCU callbacks have been
* invoked. The first argument is the name of the RCU flavor,
* the second argument is number of callbacks actually invoked,
@@ -710,15 +794,15 @@ TRACE_EVENT_RCU(rcu_torture_read,
* Tracepoint for rcu_barrier() execution. The string "s" describes
* the rcu_barrier phase:
* "Begin": rcu_barrier() started.
+ * "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "EarlyExit": rcu_barrier() piggybacked, thus early exit.
* "Inc1": rcu_barrier() piggyback check counter incremented.
- * "OnlineQ": rcu_barrier() found online CPU with callbacks.
- * "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
+ * "Inc2": rcu_barrier() piggyback check counter incremented.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
- * "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback.
- * "Inc2": rcu_barrier() piggyback check counter incremented.
+ * "NQ": rcu_barrier() found a CPU with no callbacks.
+ * "OnlineQ": rcu_barrier() found online CPU with callbacks.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
diff --git a/include/trace/events/rdma.h b/include/trace/events/rdma.h
index aa19afc73a4e..81bb454fc288 100644
--- a/include/trace/events/rdma.h
+++ b/include/trace/events/rdma.h
@@ -6,7 +6,6 @@
/*
* enum ib_event_type, from include/rdma/ib_verbs.h
*/
-
#define IB_EVENT_LIST \
ib_event(CQ_ERR) \
ib_event(QP_FATAL) \
@@ -91,6 +90,46 @@ IB_WC_STATUS_LIST
__print_symbolic(x, IB_WC_STATUS_LIST)
/*
+ * enum ib_cm_event_type, from include/rdma/ib_cm.h
+ */
+#define IB_CM_EVENT_LIST \
+ ib_cm_event(REQ_ERROR) \
+ ib_cm_event(REQ_RECEIVED) \
+ ib_cm_event(REP_ERROR) \
+ ib_cm_event(REP_RECEIVED) \
+ ib_cm_event(RTU_RECEIVED) \
+ ib_cm_event(USER_ESTABLISHED) \
+ ib_cm_event(DREQ_ERROR) \
+ ib_cm_event(DREQ_RECEIVED) \
+ ib_cm_event(DREP_RECEIVED) \
+ ib_cm_event(TIMEWAIT_EXIT) \
+ ib_cm_event(MRA_RECEIVED) \
+ ib_cm_event(REJ_RECEIVED) \
+ ib_cm_event(LAP_ERROR) \
+ ib_cm_event(LAP_RECEIVED) \
+ ib_cm_event(APR_RECEIVED) \
+ ib_cm_event(SIDR_REQ_ERROR) \
+ ib_cm_event(SIDR_REQ_RECEIVED) \
+ ib_cm_event_end(SIDR_REP_RECEIVED)
+
+#undef ib_cm_event
+#undef ib_cm_event_end
+
+#define ib_cm_event(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_event_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_EVENT_LIST
+
+#undef ib_cm_event
+#undef ib_cm_event_end
+
+#define ib_cm_event(x) { IB_CM_##x, #x },
+#define ib_cm_event_end(x) { IB_CM_##x, #x }
+
+#define rdma_show_ib_cm_event(x) \
+ __print_symbolic(x, IB_CM_EVENT_LIST)
+
+/*
* enum rdma_cm_event_type, from include/rdma/rdma_cm.h
*/
#define RDMA_CM_EVENT_LIST \
diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h
index b70583c32c08..72b3ba93b0a5 100644
--- a/include/trace/events/regulator.h
+++ b/include/trace/events/regulator.h
@@ -70,6 +70,38 @@ DEFINE_EVENT(regulator_basic, regulator_disable_complete,
);
+DEFINE_EVENT(regulator_basic, regulator_bypass_enable,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_bypass_enable_complete,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_bypass_disable,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_bypass_disable_complete,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name)
+
+);
+
/*
* Events that take a range of numerical values, mostly for voltages
* and so on.
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index 9827f535f032..c9048f3e471b 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -8,15 +8,27 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rpcgss
-#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_TRACE_RPCGSS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RPCGSS_H
#include <linux/tracepoint.h>
+#include <trace/events/sunrpc_base.h>
+
/**
** GSS-API related trace events
**/
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_NONE);
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_INTEGRITY);
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_PRIVACY);
+
+#define show_gss_service(x) \
+ __print_symbolic(x, \
+ { RPC_GSS_SVC_NONE, "none" }, \
+ { RPC_GSS_SVC_INTEGRITY, "integrity" }, \
+ { RPC_GSS_SVC_PRIVACY, "privacy" })
+
TRACE_DEFINE_ENUM(GSS_S_BAD_MECH);
TRACE_DEFINE_ENUM(GSS_S_BAD_NAME);
TRACE_DEFINE_ENUM(GSS_S_BAD_NAMETYPE);
@@ -89,7 +101,7 @@ DECLARE_EVENT_CLASS(rpcgss_gssapi_event,
__entry->maj_stat = maj_stat;
),
- TP_printk("task:%u@%u maj_stat=%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " maj_stat=%s",
__entry->task_id, __entry->client_id,
__entry->maj_stat == 0 ?
"GSS_S_COMPLETE" : show_gss_status(__entry->maj_stat))
@@ -126,34 +138,180 @@ DEFINE_GSSAPI_EVENT(verify_mic);
DEFINE_GSSAPI_EVENT(wrap);
DEFINE_GSSAPI_EVENT(unwrap);
-TRACE_EVENT(rpcgss_accept_upcall,
+DECLARE_EVENT_CLASS(rpcgss_ctx_class,
TP_PROTO(
- __be32 xid,
+ const struct gss_cred *gc
+ ),
+
+ TP_ARGS(gc),
+
+ TP_STRUCT__entry(
+ __field(const void *, cred)
+ __field(unsigned long, service)
+ __string(principal, gc->gc_principal)
+ ),
+
+ TP_fast_assign(
+ __entry->cred = gc;
+ __entry->service = gc->gc_service;
+ __assign_str(principal, gc->gc_principal);
+ ),
+
+ TP_printk("cred=%p service=%s principal='%s'",
+ __entry->cred, show_gss_service(__entry->service),
+ __get_str(principal))
+);
+
+#define DEFINE_CTX_EVENT(name) \
+ DEFINE_EVENT(rpcgss_ctx_class, rpcgss_ctx_##name, \
+ TP_PROTO( \
+ const struct gss_cred *gc \
+ ), \
+ TP_ARGS(gc))
+
+DEFINE_CTX_EVENT(init);
+DEFINE_CTX_EVENT(destroy);
+
+DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ u32 maj_stat
+ ),
+
+ TP_ARGS(rqstp, maj_stat),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, maj_stat)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = __be32_to_cpu(rqstp->rq_xid);
+ __entry->maj_stat = maj_stat;
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x maj_stat=%s",
+ __get_str(addr), __entry->xid,
+ __entry->maj_stat == 0 ?
+ "GSS_S_COMPLETE" : show_gss_status(__entry->maj_stat))
+);
+
+#define DEFINE_SVC_GSSAPI_EVENT(name) \
+ DEFINE_EVENT(rpcgss_svc_gssapi_class, rpcgss_svc_##name, \
+ TP_PROTO( \
+ const struct svc_rqst *rqstp, \
+ u32 maj_stat \
+ ), \
+ TP_ARGS(rqstp, maj_stat))
+
+DEFINE_SVC_GSSAPI_EVENT(unwrap);
+DEFINE_SVC_GSSAPI_EVENT(mic);
+
+TRACE_EVENT(rpcgss_svc_unwrap_failed,
+ TP_PROTO(
+ const struct svc_rqst *rqstp
+ ),
+
+ TP_ARGS(rqstp),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+);
+
+TRACE_EVENT(rpcgss_svc_seqno_bad,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ u32 expected,
+ u32 received
+ ),
+
+ TP_ARGS(rqstp, expected, received),
+
+ TP_STRUCT__entry(
+ __field(u32, expected)
+ __field(u32, received)
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->expected = expected;
+ __entry->received = received;
+ __entry->xid = __be32_to_cpu(rqstp->rq_xid);
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x expected seqno %u, received seqno %u",
+ __get_str(addr), __entry->xid,
+ __entry->expected, __entry->received)
+);
+
+TRACE_EVENT(rpcgss_svc_accept_upcall,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
u32 major_status,
u32 minor_status
),
- TP_ARGS(xid, major_status, minor_status),
+ TP_ARGS(rqstp, major_status, minor_status),
TP_STRUCT__entry(
- __field(u32, xid)
__field(u32, minor_status)
__field(unsigned long, major_status)
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(xid);
__entry->minor_status = minor_status;
__entry->major_status = major_status;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
),
- TP_printk("xid=0x%08x major_status=%s (0x%08lx) minor_status=%u",
- __entry->xid, __entry->major_status == 0 ? "GSS_S_COMPLETE" :
- show_gss_status(__entry->major_status),
+ TP_printk("addr=%s xid=0x%08x major_status=%s (0x%08lx) minor_status=%u",
+ __get_str(addr), __entry->xid,
+ (__entry->major_status == 0) ? "GSS_S_COMPLETE" :
+ show_gss_status(__entry->major_status),
__entry->major_status, __entry->minor_status
)
);
+TRACE_EVENT(rpcgss_svc_authenticate,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct rpc_gss_wire_cred *gc
+ ),
+
+ TP_ARGS(rqstp, gc),
+
+ TP_STRUCT__entry(
+ __field(u32, seqno)
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->seqno = gc->gc_seq;
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x seqno=%u", __get_str(addr),
+ __entry->xid, __entry->seqno)
+);
+
/**
** GSS auth unwrap failures
@@ -176,7 +334,8 @@ TRACE_EVENT(rpcgss_unwrap_failed,
__entry->client_id = task->tk_client->cl_clid;
),
- TP_printk("task:%u@%u", __entry->task_id, __entry->client_id)
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
+ __entry->task_id, __entry->client_id)
);
TRACE_EVENT(rpcgss_bad_seqno,
@@ -202,7 +361,8 @@ TRACE_EVENT(rpcgss_bad_seqno,
__entry->received = received;
),
- TP_printk("task:%u@%u expected seqno %u, received seqno %u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " expected seqno %u, received seqno %u",
__entry->task_id, __entry->client_id,
__entry->expected, __entry->received)
);
@@ -230,7 +390,7 @@ TRACE_EVENT(rpcgss_seqno,
__entry->seqno = rqst->rq_seqno;
),
- TP_printk("task:%u@%u xid=0x%08x seqno=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x seqno=%u",
__entry->task_id, __entry->client_id,
__entry->xid, __entry->seqno)
);
@@ -262,12 +422,109 @@ TRACE_EVENT(rpcgss_need_reencode,
__entry->ret = ret;
),
- TP_printk("task:%u@%u xid=0x%08x rq_seqno=%u seq_xmit=%u reencode %sneeded",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x rq_seqno=%u seq_xmit=%u reencode %sneeded",
__entry->task_id, __entry->client_id,
__entry->xid, __entry->seqno, __entry->seq_xmit,
__entry->ret ? "" : "un")
);
+TRACE_EVENT(rpcgss_update_slack,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpc_auth *auth
+ ),
+
+ TP_ARGS(task, auth),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(const void *, auth)
+ __field(unsigned int, rslack)
+ __field(unsigned int, ralign)
+ __field(unsigned int, verfsize)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
+ __entry->auth = auth;
+ __entry->rslack = auth->au_rslack;
+ __entry->ralign = auth->au_ralign;
+ __entry->verfsize = auth->au_verfsize;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->auth, __entry->rslack, __entry->ralign,
+ __entry->verfsize)
+);
+
+DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ u32 seqno
+ ),
+
+ TP_ARGS(rqstp, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("xid=0x%08x seqno=%u",
+ __entry->xid, __entry->seqno)
+);
+
+#define DEFINE_SVC_SEQNO_EVENT(name) \
+ DEFINE_EVENT(rpcgss_svc_seqno_class, rpcgss_svc_seqno_##name, \
+ TP_PROTO( \
+ const struct svc_rqst *rqstp, \
+ u32 seqno \
+ ), \
+ TP_ARGS(rqstp, seqno))
+
+DEFINE_SVC_SEQNO_EVENT(large);
+DEFINE_SVC_SEQNO_EVENT(seen);
+
+TRACE_EVENT(rpcgss_svc_seqno_low,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ u32 seqno,
+ u32 min,
+ u32 max
+ ),
+
+ TP_ARGS(rqstp, seqno, min, max),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, seqno)
+ __field(u32, min)
+ __field(u32, max)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->seqno = seqno;
+ __entry->min = min;
+ __entry->max = max;
+ ),
+
+ TP_printk("xid=0x%08x seqno=%u window=[%u..%u]",
+ __entry->xid, __entry->seqno, __entry->min, __entry->max)
+);
+
/**
** gssd upcall related trace events
**/
@@ -284,7 +541,7 @@ TRACE_EVENT(rpcgss_upcall_msg,
),
TP_fast_assign(
- __assign_str(msg, buf)
+ __assign_str(msg, buf);
),
TP_printk("msg='%s'", __get_str(msg))
@@ -314,6 +571,7 @@ TRACE_EVENT(rpcgss_upcall_result,
TRACE_EVENT(rpcgss_context,
TP_PROTO(
+ u32 window_size,
unsigned long expiry,
unsigned long now,
unsigned int timeout,
@@ -321,12 +579,13 @@ TRACE_EVENT(rpcgss_context,
const u8 *data
),
- TP_ARGS(expiry, now, timeout, len, data),
+ TP_ARGS(window_size, expiry, now, timeout, len, data),
TP_STRUCT__entry(
__field(unsigned long, expiry)
__field(unsigned long, now)
__field(unsigned int, timeout)
+ __field(u32, window_size)
__field(int, len)
__string(acceptor, data)
),
@@ -335,13 +594,14 @@ TRACE_EVENT(rpcgss_context,
__entry->expiry = expiry;
__entry->now = now;
__entry->timeout = timeout;
+ __entry->window_size = window_size;
__entry->len = len;
strncpy(__get_str(acceptor), data, len);
),
- TP_printk("gc_expiry=%lu now=%lu timeout=%u acceptor=%.*s",
- __entry->expiry, __entry->now, __entry->timeout,
- __entry->len, __get_str(acceptor))
+ TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
+ __entry->window_size, __entry->expiry, __entry->now,
+ __entry->timeout, __entry->len, __get_str(acceptor))
);
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index c0e4c93324f5..fcd3b3f1020a 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -11,14 +11,282 @@
#define _TRACE_RPCRDMA_H
#include <linux/scatterlist.h>
+#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/tracepoint.h>
+#include <rdma/ib_cm.h>
+
#include <trace/events/rdma.h>
+#include <trace/events/sunrpc_base.h>
/**
** Event classes
**/
-DECLARE_EVENT_CLASS(xprtrdma_reply_event,
+DECLARE_EVENT_CLASS(rpcrdma_completion_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status)
+ __entry->vendor_err = wc->vendor_err;
+ else
+ __entry->vendor_err = 0;
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_completion_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ ),
+
+ TP_printk("cq.id=%u cid=%d",
+ __entry->cq_id, __entry->completion_id
+ )
+);
+
+#define DEFINE_SEND_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_send_completion_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ __entry->vendor_err = wc->vendor_err;
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_SEND_FLUSH_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_send_flush_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status)
+ __entry->vendor_err = wc->vendor_err;
+ else
+ __entry->vendor_err = 0;
+ ),
+
+ TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_MR_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_mr_completion_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, received)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status) {
+ __entry->received = 0;
+ __entry->vendor_err = wc->vendor_err;
+ } else {
+ __entry->received = wc->byte_len;
+ __entry->vendor_err = 0;
+ }
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err,
+ __entry->received
+ )
+);
+
+#define DEFINE_RECEIVE_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_receive_completion_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, received)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->received = wc->byte_len;
+ ),
+
+ TP_printk("cq.id=%u cid=%d received=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->received
+ )
+);
+
+#define DEFINE_RECEIVE_SUCCESS_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_receive_success_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ __entry->vendor_err = wc->vendor_err;
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_RECEIVE_FLUSH_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_receive_flush_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(xprtrdma_reply_class,
TP_PROTO(
const struct rpcrdma_rep *rep
),
@@ -26,29 +294,30 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_event,
TP_ARGS(rep),
TP_STRUCT__entry(
- __field(const void *, rep)
- __field(const void *, r_xprt)
__field(u32, xid)
__field(u32, version)
__field(u32, proc)
+ __string(addr, rpcrdma_addrstr(rep->rr_rxprt))
+ __string(port, rpcrdma_portstr(rep->rr_rxprt))
),
TP_fast_assign(
- __entry->rep = rep;
- __entry->r_xprt = rep->rr_rxprt;
__entry->xid = be32_to_cpu(rep->rr_xid);
__entry->version = be32_to_cpu(rep->rr_vers);
__entry->proc = be32_to_cpu(rep->rr_proc);
+ __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
+ __assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
),
- TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
- __entry->r_xprt, __entry->xid, __entry->rep,
- __entry->version, __entry->proc
+ TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
+ __get_str(addr), __get_str(port),
+ __entry->xid, __entry->version, __entry->proc
)
);
#define DEFINE_REPLY_EVENT(name) \
- DEFINE_EVENT(xprtrdma_reply_event, name, \
+ DEFINE_EVENT(xprtrdma_reply_class, \
+ xprtrdma_reply_##name##_err, \
TP_PROTO( \
const struct rpcrdma_rep *rep \
), \
@@ -62,19 +331,17 @@ DECLARE_EVENT_CLASS(xprtrdma_rxprt,
TP_ARGS(r_xprt),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p",
- __get_str(addr), __get_str(port), __entry->r_xprt
+ TP_printk("peer=[%s]:%s",
+ __get_str(addr), __get_str(port)
)
);
@@ -94,7 +361,6 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class,
TP_ARGS(r_xprt, rc),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(int, rc)
__field(int, connect_status)
__string(addr, rpcrdma_addrstr(r_xprt))
@@ -102,15 +368,14 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class,
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->rc = rc;
- __entry->connect_status = r_xprt->rx_ep.rep_connected;
+ __entry->connect_status = r_xprt->rx_ep->re_connect_status;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connect status=%d",
- __get_str(addr), __get_str(port), __entry->r_xprt,
+ TP_printk("peer=[%s]:%s rc=%d connection status=%d",
+ __get_str(addr), __get_str(port),
__entry->rc, __entry->connect_status
)
);
@@ -155,7 +420,8 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " pos=%u %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->pos, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
@@ -202,7 +468,8 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->length, (unsigned long long)__entry->offset,
__entry->handle,
@@ -219,41 +486,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
), \
TP_ARGS(task, mr, nsegs))
-DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
- TP_PROTO(
- const struct ib_wc *wc,
- const struct rpcrdma_frwr *frwr
- ),
-
- TP_ARGS(wc, frwr),
-
- TP_STRUCT__entry(
- __field(const void *, mr)
- __field(unsigned int, status)
- __field(unsigned int, vendor_err)
- ),
-
- TP_fast_assign(
- __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
- __entry->status = wc->status;
- __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
- ),
-
- TP_printk(
- "mr=%p: %s (%u/0x%x)",
- __entry->mr, rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
- )
-);
-
-#define DEFINE_FRWR_DONE_EVENT(name) \
- DEFINE_EVENT(xprtrdma_frwr_done, name, \
- TP_PROTO( \
- const struct ib_wc *wc, \
- const struct rpcrdma_frwr *frwr \
- ), \
- TP_ARGS(wc, frwr))
-
TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
@@ -266,7 +498,7 @@ TRACE_DEFINE_ENUM(DMA_NONE);
{ DMA_FROM_DEVICE, "FROM_DEVICE" }, \
{ DMA_NONE, "NONE" })
-DECLARE_EVENT_CLASS(xprtrdma_mr,
+DECLARE_EVENT_CLASS(xprtrdma_mr_class,
TP_PROTO(
const struct rpcrdma_mr *mr
),
@@ -274,7 +506,10 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
TP_ARGS(mr),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, mr_id)
+ __field(int, nents)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
@@ -282,126 +517,151 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
),
TP_fast_assign(
- __entry->mr = mr;
+ const struct rpcrdma_req *req = mr->mr_req;
+
+ if (req) {
+ const struct rpc_task *task = req->rl_slot.rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ } else {
+ __entry->task_id = 0;
+ __entry->client_id = -1;
+ }
+ __entry->mr_id = mr->mr_ibmr->res.id;
+ __entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
__entry->dir = mr->mr_dir;
),
- TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
- __entry->mr, __entry->length,
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id,
+ __entry->mr_id, __entry->nents, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
xprtrdma_show_direction(__entry->dir)
)
);
-#define DEFINE_MR_EVENT(name) \
- DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
- TP_PROTO( \
- const struct rpcrdma_mr *mr \
- ), \
+#define DEFINE_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_mr_class, \
+ xprtrdma_mr_##name, \
+ TP_PROTO( \
+ const struct rpcrdma_mr *mr \
+ ), \
TP_ARGS(mr))
-DECLARE_EVENT_CLASS(xprtrdma_cb_event,
+DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
TP_PROTO(
- const struct rpc_rqst *rqst
+ const struct rpcrdma_mr *mr
),
- TP_ARGS(rqst),
+ TP_ARGS(mr),
TP_STRUCT__entry(
- __field(const void *, rqst)
- __field(const void *, rep)
- __field(const void *, req)
- __field(u32, xid)
+ __field(u32, mr_id)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(u32, dir)
),
TP_fast_assign(
- __entry->rqst = rqst;
- __entry->req = rpcr_to_rdmar(rqst);
- __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->mr_id = mr->mr_ibmr->res.id;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->dir = mr->mr_dir;
),
- TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
- __entry->xid, __entry->rqst, __entry->req, __entry->rep
+ TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
+ __entry->mr_id, __entry->nents, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle,
+ xprtrdma_show_direction(__entry->dir)
)
);
-#define DEFINE_CB_EVENT(name) \
- DEFINE_EVENT(xprtrdma_cb_event, name, \
+#define DEFINE_ANON_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_anonymous_mr_class, \
+ xprtrdma_mr_##name, \
TP_PROTO( \
- const struct rpc_rqst *rqst \
+ const struct rpcrdma_mr *mr \
), \
- TP_ARGS(rqst))
-
-/**
- ** Connection events
- **/
+ TP_ARGS(mr))
-TRACE_EVENT(xprtrdma_cm_event,
+DECLARE_EVENT_CLASS(xprtrdma_callback_class,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
- struct rdma_cm_event *event
+ const struct rpc_rqst *rqst
),
- TP_ARGS(r_xprt, event),
+ TP_ARGS(r_xprt, rqst),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
- __field(unsigned int, event)
- __field(int, status)
+ __field(u32, xid)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
- __entry->event = event->event;
- __entry->status = event->status;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
- __get_str(addr), __get_str(port),
- __entry->r_xprt, rdma_show_cm_event(__entry->event),
- __entry->event, __entry->status
+ TP_printk("peer=[%s]:%s xid=0x%08x",
+ __get_str(addr), __get_str(port), __entry->xid
)
);
+#define DEFINE_CALLBACK_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_callback_class, \
+ xprtrdma_cb_##name, \
+ TP_PROTO( \
+ const struct rpcrdma_xprt *r_xprt, \
+ const struct rpc_rqst *rqst \
+ ), \
+ TP_ARGS(r_xprt, rqst))
+
+/**
+ ** Connection events
+ **/
+
TRACE_EVENT(xprtrdma_inline_thresh,
TP_PROTO(
- const struct rpcrdma_xprt *r_xprt
+ const struct rpcrdma_ep *ep
),
- TP_ARGS(r_xprt),
+ TP_ARGS(ep),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(unsigned int, inline_send)
__field(unsigned int, inline_recv)
__field(unsigned int, max_send)
__field(unsigned int, max_recv)
- __string(addr, rpcrdma_addrstr(r_xprt))
- __string(port, rpcrdma_portstr(r_xprt))
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
- const struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+ const struct rdma_cm_id *id = ep->re_id;
- __entry->r_xprt = r_xprt;
- __entry->inline_send = ep->rep_inline_send;
- __entry->inline_recv = ep->rep_inline_recv;
- __entry->max_send = ep->rep_max_inline_send;
- __entry->max_recv = ep->rep_max_inline_recv;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __entry->inline_send = ep->re_inline_send;
+ __entry->inline_recv = ep->re_inline_recv;
+ __entry->max_send = ep->re_max_inline_send;
+ __entry->max_recv = ep->re_max_inline_recv;
+ memcpy(__entry->srcaddr, &id->route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
),
- TP_printk("peer=[%s]:%s r_xprt=%p neg send/recv=%u/%u, calc send/recv=%u/%u",
- __get_str(addr), __get_str(port), __entry->r_xprt,
+ TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
+ __entry->srcaddr, __entry->dstaddr,
__entry->inline_send, __entry->inline_recv,
__entry->max_send, __entry->max_recv
)
@@ -410,13 +670,7 @@ TRACE_EVENT(xprtrdma_inline_thresh,
DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect);
-DEFINE_RXPRT_EVENT(xprtrdma_create);
-DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
-DEFINE_RXPRT_EVENT(xprtrdma_remove);
-DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
-DEFINE_RXPRT_EVENT(xprtrdma_op_close);
-DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
TRACE_EVENT(xprtrdma_op_connect,
TP_PROTO(
@@ -427,22 +681,19 @@ TRACE_EVENT(xprtrdma_op_connect,
TP_ARGS(r_xprt, delay),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(unsigned long, delay)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->delay = delay;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
- __get_str(addr), __get_str(port), __entry->r_xprt,
- __entry->delay
+ TP_printk("peer=[%s]:%s delay=%lu",
+ __get_str(addr), __get_str(port), __entry->delay
)
);
@@ -457,7 +708,6 @@ TRACE_EVENT(xprtrdma_op_set_cto,
TP_ARGS(r_xprt, connect, reconnect),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(unsigned long, connect)
__field(unsigned long, reconnect)
__string(addr, rpcrdma_addrstr(r_xprt))
@@ -465,50 +715,18 @@ TRACE_EVENT(xprtrdma_op_set_cto,
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->connect = connect;
__entry->reconnect = reconnect;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
- __get_str(addr), __get_str(port), __entry->r_xprt,
+ TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
+ __get_str(addr), __get_str(port),
__entry->connect / HZ, __entry->reconnect / HZ
)
);
-TRACE_EVENT(xprtrdma_qp_event,
- TP_PROTO(
- const struct rpcrdma_xprt *r_xprt,
- const struct ib_event *event
- ),
-
- TP_ARGS(r_xprt, event),
-
- TP_STRUCT__entry(
- __field(const void *, r_xprt)
- __field(unsigned int, event)
- __string(name, event->device->name)
- __string(addr, rpcrdma_addrstr(r_xprt))
- __string(port, rpcrdma_portstr(r_xprt))
- ),
-
- TP_fast_assign(
- __entry->r_xprt = r_xprt;
- __entry->event = event->event;
- __assign_str(name, event->device->name);
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
- ),
-
- TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
- __get_str(addr), __get_str(port), __entry->r_xprt,
- __get_str(name), rdma_show_ib_event(__entry->event),
- __entry->event
- )
-);
-
/**
** Call events
**/
@@ -522,86 +740,56 @@ TRACE_EVENT(xprtrdma_createmrs,
TP_ARGS(r_xprt, count),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
__field(unsigned int, count)
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->count = count;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
- __get_str(addr), __get_str(port), __entry->r_xprt,
- __entry->count
- )
-);
-
-TRACE_EVENT(xprtrdma_mr_get,
- TP_PROTO(
- const struct rpcrdma_req *req
- ),
-
- TP_ARGS(req),
-
- TP_STRUCT__entry(
- __field(const void *, req)
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(u32, xid)
- ),
-
- TP_fast_assign(
- const struct rpc_rqst *rqst = &req->rl_slot;
-
- __entry->req = req;
- __entry->task_id = rqst->rq_task->tk_pid;
- __entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- ),
-
- TP_printk("task:%u@%u xid=0x%08x req=%p",
- __entry->task_id, __entry->client_id, __entry->xid,
- __entry->req
+ TP_printk("peer=[%s]:%s created %u MRs",
+ __get_str(addr), __get_str(port), __entry->count
)
);
-TRACE_EVENT(xprtrdma_nomrs,
+TRACE_EVENT(xprtrdma_nomrs_err,
TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
const struct rpcrdma_req *req
),
- TP_ARGS(req),
+ TP_ARGS(r_xprt, req),
TP_STRUCT__entry(
- __field(const void *, req)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(u32, xid)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
- __entry->req = req;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("task:%u@%u xid=0x%08x req=%p",
- __entry->task_id, __entry->client_id, __entry->xid,
- __entry->req
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
+ __entry->task_id, __entry->client_id,
+ __get_str(addr), __get_str(port)
)
);
DEFINE_RDCH_EVENT(read);
DEFINE_WRCH_EVENT(write);
DEFINE_WRCH_EVENT(reply);
+DEFINE_WRCH_EVENT(wp);
TRACE_DEFINE_ENUM(rpcrdma_noch);
TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
@@ -656,7 +844,8 @@ TRACE_EVENT(xprtrdma_marshal,
__entry->wtype = wtype;
),
- TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->hdrlen,
__entry->headlen, __entry->pagelen, __entry->taillen,
@@ -686,7 +875,7 @@ TRACE_EVENT(xprtrdma_marshal_failed,
__entry->ret = ret;
),
- TP_printk("task:%u@%u xid=0x%08x: ret=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->ret
)
@@ -713,7 +902,7 @@ TRACE_EVENT(xprtrdma_prepsend_failed,
__entry->ret = ret;
),
- TP_printk("task:%u@%u xid=0x%08x: ret=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->ret
)
@@ -721,41 +910,71 @@ TRACE_EVENT(xprtrdma_prepsend_failed,
TRACE_EVENT(xprtrdma_post_send,
TP_PROTO(
- const struct rpcrdma_req *req,
- int status
+ const struct rpcrdma_req *req
),
- TP_ARGS(req, status),
+ TP_ARGS(req),
TP_STRUCT__entry(
- __field(const void *, req)
- __field(const void *, sc)
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, num_sge)
__field(int, signaled)
- __field(int, status)
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
+ const struct rpcrdma_sendctx *sc = req->rl_sendctx;
+ __entry->cq_id = sc->sc_cid.ci_queue_id;
+ __entry->completion_id = sc->sc_cid.ci_completion_id;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
- __entry->req = req;
- __entry->sc = req->rl_sendctx;
__entry->num_sge = req->rl_wr.num_sge;
__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
- __entry->status = status;
),
- TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %sstatus=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
__entry->task_id, __entry->client_id,
- __entry->req, __entry->sc, __entry->num_sge,
- (__entry->num_sge == 1 ? "" : "s"),
- (__entry->signaled ? "signaled " : ""),
- __entry->status
+ __entry->cq_id, __entry->completion_id,
+ __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
+ (__entry->signaled ? "signaled" : "")
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_send_err,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ const struct rpcrdma_req *req,
+ int rc
+ ),
+
+ TP_ARGS(r_xprt, req, rc),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ const struct rpc_rqst *rqst = &req->rl_slot;
+ const struct rpcrdma_ep *ep = r_xprt->rx_ep;
+
+ __entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client ?
+ rqst->rq_task->tk_client->cl_clid : -1;
+ __entry->rc = rc;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->cq_id, __entry->rc
)
);
@@ -767,149 +986,120 @@ TRACE_EVENT(xprtrdma_post_recv,
TP_ARGS(rep),
TP_STRUCT__entry(
- __field(const void *, rep)
+ __field(u32, cq_id)
+ __field(int, completion_id)
),
TP_fast_assign(
- __entry->rep = rep;
+ __entry->cq_id = rep->rr_cid.ci_queue_id;
+ __entry->completion_id = rep->rr_cid.ci_completion_id;
),
- TP_printk("rep=%p",
- __entry->rep
+ TP_printk("cq.id=%d cid=%d",
+ __entry->cq_id, __entry->completion_id
)
);
TRACE_EVENT(xprtrdma_post_recvs,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
- unsigned int count,
- int status
+ unsigned int count
),
- TP_ARGS(r_xprt, count, status),
+ TP_ARGS(r_xprt, count),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
+ __field(u32, cq_id)
__field(unsigned int, count)
- __field(int, status)
__field(int, posted)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
+ const struct rpcrdma_ep *ep = r_xprt->rx_ep;
+
+ __entry->cq_id = ep->re_attr.recv_cq->res.id;
__entry->count = count;
- __entry->status = status;
- __entry->posted = r_xprt->rx_ep.rep_receive_count;
+ __entry->posted = ep->re_receive_count;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
- __get_str(addr), __get_str(port), __entry->r_xprt,
- __entry->count, __entry->posted, __entry->status
+ TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
+ __get_str(addr), __get_str(port), __entry->cq_id,
+ __entry->count, __entry->posted
)
);
-TRACE_EVENT(xprtrdma_post_linv,
+TRACE_EVENT(xprtrdma_post_recvs_err,
TP_PROTO(
- const struct rpcrdma_req *req,
+ const struct rpcrdma_xprt *r_xprt,
int status
),
- TP_ARGS(req, status),
+ TP_ARGS(r_xprt, status),
TP_STRUCT__entry(
- __field(const void *, req)
+ __field(u32, cq_id)
__field(int, status)
- __field(u32, xid)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->req = req;
+ const struct rpcrdma_ep *ep = r_xprt->rx_ep;
+
+ __entry->cq_id = ep->re_attr.recv_cq->res.id;
__entry->status = status;
- __entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("req=%p xid=0x%08x status=%d",
- __entry->req, __entry->xid, __entry->status
+ TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
+ __get_str(addr), __get_str(port), __entry->cq_id,
+ __entry->status
)
);
-/**
- ** Completion events
- **/
-
-TRACE_EVENT(xprtrdma_wc_send,
+TRACE_EVENT(xprtrdma_post_linv_err,
TP_PROTO(
- const struct rpcrdma_sendctx *sc,
- const struct ib_wc *wc
+ const struct rpcrdma_req *req,
+ int status
),
- TP_ARGS(sc, wc),
+ TP_ARGS(req, status),
TP_STRUCT__entry(
- __field(const void *, req)
- __field(const void *, sc)
- __field(unsigned int, unmap_count)
- __field(unsigned int, status)
- __field(unsigned int, vendor_err)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, status)
),
TP_fast_assign(
- __entry->req = sc->sc_req;
- __entry->sc = sc;
- __entry->unmap_count = sc->sc_unmap_count;
- __entry->status = wc->status;
- __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ const struct rpc_task *task = req->rl_slot.rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->status = status;
),
- TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
- __entry->req, __entry->sc, __entry->unmap_count,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
+ __entry->task_id, __entry->client_id, __entry->status
)
);
-TRACE_EVENT(xprtrdma_wc_receive,
- TP_PROTO(
- const struct ib_wc *wc
- ),
-
- TP_ARGS(wc),
-
- TP_STRUCT__entry(
- __field(const void *, rep)
- __field(u32, byte_len)
- __field(unsigned int, status)
- __field(u32, vendor_err)
- ),
-
- TP_fast_assign(
- __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
- rr_cqe);
- __entry->status = wc->status;
- if (wc->status) {
- __entry->byte_len = 0;
- __entry->vendor_err = wc->vendor_err;
- } else {
- __entry->byte_len = wc->byte_len;
- __entry->vendor_err = 0;
- }
- ),
+/**
+ ** Completion events
+ **/
- TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
- __entry->rep, __entry->byte_len,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
- )
-);
+DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
+DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
+DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
+DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
+DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
+DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
TRACE_EVENT(xprtrdma_frwr_alloc,
TP_PROTO(
@@ -920,17 +1110,17 @@ TRACE_EVENT(xprtrdma_frwr_alloc,
TP_ARGS(mr, rc),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
__field(int, rc)
),
TP_fast_assign(
- __entry->mr = mr;
- __entry->rc = rc;
+ __entry->mr_id = mr->mr_ibmr->res.id;
+ __entry->rc = rc;
),
- TP_printk("mr=%p: rc=%d",
- __entry->mr, __entry->rc
+ TP_printk("mr.id=%u: rc=%d",
+ __entry->mr_id, __entry->rc
)
);
@@ -943,7 +1133,8 @@ TRACE_EVENT(xprtrdma_frwr_dereg,
TP_ARGS(mr, rc),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
+ __field(int, nents)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
@@ -952,7 +1143,8 @@ TRACE_EVENT(xprtrdma_frwr_dereg,
),
TP_fast_assign(
- __entry->mr = mr;
+ __entry->mr_id = mr->mr_ibmr->res.id;
+ __entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
@@ -960,8 +1152,8 @@ TRACE_EVENT(xprtrdma_frwr_dereg,
__entry->rc = rc;
),
- TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
- __entry->mr, __entry->length,
+ TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
+ __entry->mr_id, __entry->nents, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
xprtrdma_show_direction(__entry->dir),
__entry->rc
@@ -977,21 +1169,21 @@ TRACE_EVENT(xprtrdma_frwr_sgerr,
TP_ARGS(mr, sg_nents),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
__field(u64, addr)
__field(u32, dir)
__field(int, nents)
),
TP_fast_assign(
- __entry->mr = mr;
+ __entry->mr_id = mr->mr_ibmr->res.id;
__entry->addr = mr->mr_sg->dma_address;
__entry->dir = mr->mr_dir;
__entry->nents = sg_nents;
),
- TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
- __entry->mr, __entry->addr,
+ TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
+ __entry->mr_id, __entry->addr,
xprtrdma_show_direction(__entry->dir),
__entry->nents
)
@@ -1006,7 +1198,7 @@ TRACE_EVENT(xprtrdma_frwr_maperr,
TP_ARGS(mr, num_mapped),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
__field(u64, addr)
__field(u32, dir)
__field(int, num_mapped)
@@ -1014,25 +1206,26 @@ TRACE_EVENT(xprtrdma_frwr_maperr,
),
TP_fast_assign(
- __entry->mr = mr;
+ __entry->mr_id = mr->mr_ibmr->res.id;
__entry->addr = mr->mr_sg->dma_address;
__entry->dir = mr->mr_dir;
__entry->num_mapped = num_mapped;
__entry->nents = mr->mr_nents;
),
- TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
- __entry->mr, __entry->addr,
+ TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
+ __entry->mr_id, __entry->addr,
xprtrdma_show_direction(__entry->dir),
__entry->num_mapped, __entry->nents
)
);
+DEFINE_MR_EVENT(fastreg);
DEFINE_MR_EVENT(localinv);
+DEFINE_MR_EVENT(reminv);
DEFINE_MR_EVENT(map);
-DEFINE_MR_EVENT(unmap);
-DEFINE_MR_EVENT(remoteinv);
-DEFINE_MR_EVENT(recycle);
+
+DEFINE_ANON_MR_EVENT(unmap);
TRACE_EVENT(xprtrdma_dma_maperr,
TP_PROTO(
@@ -1060,17 +1253,14 @@ TRACE_EVENT(xprtrdma_reply,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_rep *rep,
- const struct rpcrdma_req *req,
unsigned int credits
),
- TP_ARGS(task, rep, req, credits),
+ TP_ARGS(task, rep, credits),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, rep)
- __field(const void *, req)
__field(u32, xid)
__field(unsigned int, credits)
),
@@ -1078,49 +1268,102 @@ TRACE_EVENT(xprtrdma_reply,
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->rep = rep;
- __entry->req = req;
__entry->xid = be32_to_cpu(rep->rr_xid);
__entry->credits = credits;
),
- TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
__entry->task_id, __entry->client_id, __entry->xid,
- __entry->credits, __entry->rep, __entry->req
+ __entry->credits
)
);
-TRACE_EVENT(xprtrdma_defer_cmp,
+DEFINE_REPLY_EVENT(vers);
+DEFINE_REPLY_EVENT(rqst);
+DEFINE_REPLY_EVENT(short);
+DEFINE_REPLY_EVENT(hdr);
+
+TRACE_EVENT(xprtrdma_err_vers,
TP_PROTO(
- const struct rpcrdma_rep *rep
+ const struct rpc_rqst *rqst,
+ __be32 *min,
+ __be32 *max
),
- TP_ARGS(rep),
+ TP_ARGS(rqst, min, max),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, rep)
__field(u32, xid)
+ __field(u32, min)
+ __field(u32, max)
),
TP_fast_assign(
- __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
- __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
- __entry->rep = rep;
- __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->min = be32_to_cpup(min);
+ __entry->max = be32_to_cpup(max);
),
- TP_printk("task:%u@%u xid=0x%08x rep=%p",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
__entry->task_id, __entry->client_id, __entry->xid,
- __entry->rep
+ __entry->min, __entry->max
)
);
-DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
-DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
-DEFINE_REPLY_EVENT(xprtrdma_reply_short);
-DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
+TRACE_EVENT(xprtrdma_err_chunk,
+ TP_PROTO(
+ const struct rpc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
+ __entry->task_id, __entry->client_id, __entry->xid
+ )
+);
+
+TRACE_EVENT(xprtrdma_err_unrecognized,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ __be32 *procedure
+ ),
+
+ TP_ARGS(rqst, procedure),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(u32, procedure)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->procedure = be32_to_cpup(procedure);
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->procedure
+ )
+);
TRACE_EVENT(xprtrdma_fixup,
TP_PROTO(
@@ -1148,7 +1391,7 @@ TRACE_EVENT(xprtrdma_fixup,
__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
),
- TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
__entry->task_id, __entry->client_id, __entry->fixup,
__entry->headlen, __entry->pagelen, __entry->taillen
)
@@ -1181,65 +1424,25 @@ TRACE_EVENT(xprtrdma_decode_seg,
)
);
-/**
- ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
- **/
-
-TRACE_EVENT(xprtrdma_op_allocate,
+TRACE_EVENT(xprtrdma_mrs_zap,
TP_PROTO(
- const struct rpc_task *task,
- const struct rpcrdma_req *req
+ const struct rpc_task *task
),
- TP_ARGS(task, req),
+ TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, req)
- __field(size_t, callsize)
- __field(size_t, rcvsize)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->req = req;
- __entry->callsize = task->tk_rqstp->rq_callsize;
- __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
),
- TP_printk("task:%u@%u req=%p (%zu, %zu)",
- __entry->task_id, __entry->client_id,
- __entry->req, __entry->callsize, __entry->rcvsize
- )
-);
-
-TRACE_EVENT(xprtrdma_op_free,
- TP_PROTO(
- const struct rpc_task *task,
- const struct rpcrdma_req *req
- ),
-
- TP_ARGS(task, req),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(const void *, req)
- __field(const void *, rep)
- ),
-
- TP_fast_assign(
- __entry->task_id = task->tk_pid;
- __entry->client_id = task->tk_client->cl_clid;
- __entry->req = req;
- __entry->rep = req->rl_reply;
- ),
-
- TP_printk("task:%u@%u req=%p rep=%p",
- __entry->task_id, __entry->client_id,
- __entry->req, __entry->rep
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
+ __entry->task_id, __entry->client_id
)
);
@@ -1256,92 +1459,65 @@ TRACE_EVENT(xprtrdma_cb_setup,
TP_ARGS(r_xprt, reqs),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(unsigned int, reqs)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->reqs = reqs;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
- __get_str(addr), __get_str(port),
- __entry->r_xprt, __entry->reqs
+ TP_printk("peer=[%s]:%s %u reqs",
+ __get_str(addr), __get_str(port), __entry->reqs
)
);
-DEFINE_CB_EVENT(xprtrdma_cb_call);
-DEFINE_CB_EVENT(xprtrdma_cb_reply);
-
-TRACE_EVENT(xprtrdma_leaked_rep,
- TP_PROTO(
- const struct rpc_rqst *rqst,
- const struct rpcrdma_rep *rep
- ),
-
- TP_ARGS(rqst, rep),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(u32, xid)
- __field(const void *, rep)
- ),
-
- TP_fast_assign(
- __entry->task_id = rqst->rq_task->tk_pid;
- __entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->rep = rep;
- ),
-
- TP_printk("task:%u@%u xid=0x%08x rep=%p",
- __entry->task_id, __entry->client_id, __entry->xid,
- __entry->rep
- )
-);
+DEFINE_CALLBACK_EVENT(call);
+DEFINE_CALLBACK_EVENT(reply);
/**
** Server-side RPC/RDMA events
**/
-DECLARE_EVENT_CLASS(svcrdma_xprt_event,
+DECLARE_EVENT_CLASS(svcrdma_accept_class,
TP_PROTO(
- const struct svc_xprt *xprt
+ const struct svcxprt_rdma *rdma,
+ long status
),
- TP_ARGS(xprt),
+ TP_ARGS(rdma, status),
TP_STRUCT__entry(
- __field(const void *, xprt)
- __string(addr, xprt->xpt_remotebuf)
+ __field(long, status)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
- __assign_str(addr, xprt->xpt_remotebuf);
+ __entry->status = status;
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s",
- __entry->xprt, __get_str(addr)
+ TP_printk("addr=%s status=%ld",
+ __get_str(addr), __entry->status
)
);
-#define DEFINE_XPRT_EVENT(name) \
- DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
- TP_PROTO( \
- const struct svc_xprt *xprt \
- ), \
- TP_ARGS(xprt))
+#define DEFINE_ACCEPT_EVENT(name) \
+ DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma, \
+ long status \
+ ), \
+ TP_ARGS(rdma, status))
-DEFINE_XPRT_EVENT(accept);
-DEFINE_XPRT_EVENT(fail);
-DEFINE_XPRT_EVENT(free);
+DEFINE_ACCEPT_EVENT(pd);
+DEFINE_ACCEPT_EVENT(qp);
+DEFINE_ACCEPT_EVENT(fabric);
+DEFINE_ACCEPT_EVENT(initdepth);
+DEFINE_ACCEPT_EVENT(accept);
TRACE_DEFINE_ENUM(RDMA_MSG);
TRACE_DEFINE_ENUM(RDMA_NOMSG);
@@ -1359,13 +1535,16 @@ TRACE_DEFINE_ENUM(RDMA_ERROR);
TRACE_EVENT(svcrdma_decode_rqst,
TP_PROTO(
+ const struct svc_rdma_recv_ctxt *ctxt,
__be32 *p,
unsigned int hdrlen
),
- TP_ARGS(p, hdrlen),
+ TP_ARGS(ctxt, p, hdrlen),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(u32, xid)
__field(u32, vers)
__field(u32, proc)
@@ -1374,6 +1553,8 @@ TRACE_EVENT(svcrdma_decode_rqst,
),
TP_fast_assign(
+ __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->rc_cid.ci_completion_id;
__entry->xid = be32_to_cpup(p++);
__entry->vers = be32_to_cpup(p++);
__entry->credits = be32_to_cpup(p++);
@@ -1381,37 +1562,48 @@ TRACE_EVENT(svcrdma_decode_rqst,
__entry->hdrlen = hdrlen;
),
- TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
+ TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
+ __entry->cq_id, __entry->completion_id,
__entry->xid, __entry->vers, __entry->credits,
show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
);
-TRACE_EVENT(svcrdma_decode_short,
+TRACE_EVENT(svcrdma_decode_short_err,
TP_PROTO(
+ const struct svc_rdma_recv_ctxt *ctxt,
unsigned int hdrlen
),
- TP_ARGS(hdrlen),
+ TP_ARGS(ctxt, hdrlen),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, hdrlen)
),
TP_fast_assign(
+ __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->rc_cid.ci_completion_id;
__entry->hdrlen = hdrlen;
),
- TP_printk("hdrlen=%u", __entry->hdrlen)
+ TP_printk("cq.id=%u cid=%d hdrlen=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->hdrlen)
);
DECLARE_EVENT_CLASS(svcrdma_badreq_event,
TP_PROTO(
+ const struct svc_rdma_recv_ctxt *ctxt,
__be32 *p
),
- TP_ARGS(p),
+ TP_ARGS(ctxt, p),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(u32, xid)
__field(u32, vers)
__field(u32, proc)
@@ -1419,118 +1611,139 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event,
),
TP_fast_assign(
+ __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->rc_cid.ci_completion_id;
__entry->xid = be32_to_cpup(p++);
__entry->vers = be32_to_cpup(p++);
__entry->credits = be32_to_cpup(p++);
__entry->proc = be32_to_cpup(p);
),
- TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
+ TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
+ __entry->cq_id, __entry->completion_id,
__entry->xid, __entry->vers, __entry->credits, __entry->proc)
);
#define DEFINE_BADREQ_EVENT(name) \
- DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
+ DEFINE_EVENT(svcrdma_badreq_event, \
+ svcrdma_decode_##name##_err, \
TP_PROTO( \
+ const struct svc_rdma_recv_ctxt *ctxt, \
__be32 *p \
), \
- TP_ARGS(p))
+ TP_ARGS(ctxt, p))
DEFINE_BADREQ_EVENT(badvers);
DEFINE_BADREQ_EVENT(drop);
DEFINE_BADREQ_EVENT(badproc);
DEFINE_BADREQ_EVENT(parse);
-DECLARE_EVENT_CLASS(svcrdma_segment_event,
+TRACE_EVENT(svcrdma_encode_wseg,
TP_PROTO(
+ const struct svc_rdma_send_ctxt *ctxt,
+ u32 segno,
u32 handle,
u32 length,
u64 offset
),
- TP_ARGS(handle, length, offset),
+ TP_ARGS(ctxt, segno, handle, length, offset),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, segno)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
),
TP_fast_assign(
+ __entry->cq_id = ctxt->sc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->sc_cid.ci_completion_id;
+ __entry->segno = segno;
__entry->handle = handle;
__entry->length = length;
__entry->offset = offset;
),
- TP_printk("%u@0x%016llx:0x%08x",
- __entry->length, (unsigned long long)__entry->offset,
- __entry->handle
+ TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ __entry->cq_id, __entry->completion_id,
+ __entry->segno, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle
)
);
-#define DEFINE_SEGMENT_EVENT(name) \
- DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
- TP_PROTO( \
- u32 handle, \
- u32 length, \
- u64 offset \
- ), \
- TP_ARGS(handle, length, offset))
-
-DEFINE_SEGMENT_EVENT(rseg);
-DEFINE_SEGMENT_EVENT(wseg);
-
-DECLARE_EVENT_CLASS(svcrdma_chunk_event,
+TRACE_EVENT(svcrdma_decode_rseg,
TP_PROTO(
- u32 length
+ const struct rpc_rdma_cid *cid,
+ const struct svc_rdma_chunk *chunk,
+ const struct svc_rdma_segment *segment
),
- TP_ARGS(length),
+ TP_ARGS(cid, chunk, segment),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, segno)
+ __field(u32, position)
+ __field(u32, handle)
__field(u32, length)
+ __field(u64, offset)
),
TP_fast_assign(
- __entry->length = length;
- ),
-
- TP_printk("length=%u",
- __entry->length
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->segno = chunk->ch_segcount;
+ __entry->position = chunk->ch_position;
+ __entry->handle = segment->rs_handle;
+ __entry->length = segment->rs_length;
+ __entry->offset = segment->rs_offset;
+ ),
+
+ TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
+ __entry->cq_id, __entry->completion_id,
+ __entry->segno, __entry->position, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle
)
);
-#define DEFINE_CHUNK_EVENT(name) \
- DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
- TP_PROTO( \
- u32 length \
- ), \
- TP_ARGS(length))
-
-DEFINE_CHUNK_EVENT(pzr);
-DEFINE_CHUNK_EVENT(write);
-DEFINE_CHUNK_EVENT(reply);
-
-TRACE_EVENT(svcrdma_encode_read,
+TRACE_EVENT(svcrdma_decode_wseg,
TP_PROTO(
- u32 length,
- u32 position
+ const struct rpc_rdma_cid *cid,
+ const struct svc_rdma_chunk *chunk,
+ u32 segno
),
- TP_ARGS(length, position),
+ TP_ARGS(cid, chunk, segno),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, segno)
+ __field(u32, handle)
__field(u32, length)
- __field(u32, position)
+ __field(u64, offset)
),
TP_fast_assign(
- __entry->length = length;
- __entry->position = position;
+ const struct svc_rdma_segment *segment =
+ &chunk->ch_segments[segno];
+
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->segno = segno;
+ __entry->handle = segment->rs_handle;
+ __entry->length = segment->rs_length;
+ __entry->offset = segment->rs_offset;
),
- TP_printk("length=%u position=%u",
- __entry->length, __entry->position
+ TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ __entry->cq_id, __entry->completion_id,
+ __entry->segno, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle
)
);
@@ -1607,240 +1820,335 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
TP_ARGS(rdma, dma_addr, length))
DEFINE_SVC_DMA_EVENT(dma_map_page);
+DEFINE_SVC_DMA_EVENT(dma_map_err);
DEFINE_SVC_DMA_EVENT(dma_unmap_page);
-TRACE_EVENT(svcrdma_dma_map_rwctx,
+TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ unsigned int nents,
int status
),
- TP_ARGS(rdma, status),
+ TP_ARGS(rdma, nents, status),
TP_STRUCT__entry(
__field(int, status)
+ __field(unsigned int, nents)
__string(device, rdma->sc_cm_id->device->name)
__string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
__entry->status = status;
+ __entry->nents = nents;
__assign_str(device, rdma->sc_cm_id->device->name);
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s status=%d",
- __get_str(addr), __get_str(device), __entry->status
+ TP_printk("addr=%s device=%s nents=%u status=%d",
+ __get_str(addr), __get_str(device), __entry->nents,
+ __entry->status
)
);
-TRACE_EVENT(svcrdma_send_failed,
+TRACE_EVENT(svcrdma_no_rwctx_err,
TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ unsigned int num_sges
+ ),
+
+ TP_ARGS(rdma, num_sges),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, num_sges)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->num_sges = num_sges;
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s device=%s num_sges=%d",
+ __get_str(addr), __get_str(device), __entry->num_sges
+ )
+);
+
+TRACE_EVENT(svcrdma_page_overrun_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
const struct svc_rqst *rqst,
- int status
+ unsigned int pageno
),
- TP_ARGS(rqst, status),
+ TP_ARGS(rdma, rqst, pageno),
TP_STRUCT__entry(
- __field(int, status)
+ __field(unsigned int, pageno)
__field(u32, xid)
- __field(const void *, xprt)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
- __entry->status = status;
+ __entry->pageno = pageno;
__entry->xid = __be32_to_cpu(rqst->rq_xid);
- __entry->xprt = rqst->rq_xprt;
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
- __entry->xprt, __get_str(addr),
- __entry->xid, __entry->status
+ TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
+ __get_str(device), __entry->xid, __entry->pageno
)
);
-DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
+TRACE_EVENT(svcrdma_small_wrch_err,
TP_PROTO(
- const struct ib_wc *wc
+ const struct svcxprt_rdma *rdma,
+ unsigned int remaining,
+ unsigned int seg_no,
+ unsigned int num_segs
),
- TP_ARGS(wc),
+ TP_ARGS(rdma, remaining, seg_no, num_segs),
TP_STRUCT__entry(
- __field(const void *, cqe)
- __field(unsigned int, status)
- __field(unsigned int, vendor_err)
+ __field(unsigned int, remaining)
+ __field(unsigned int, seg_no)
+ __field(unsigned int, num_segs)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
- __entry->cqe = wc->wr_cqe;
- __entry->status = wc->status;
- if (wc->status)
- __entry->vendor_err = wc->vendor_err;
- else
- __entry->vendor_err = 0;
+ __entry->remaining = remaining;
+ __entry->seg_no = seg_no;
+ __entry->num_segs = num_segs;
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("cqe=%p status=%s (%u/0x%x)",
- __entry->cqe, rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
+ __get_str(addr), __get_str(device), __entry->remaining,
+ __entry->seg_no, __entry->num_segs
)
);
-#define DEFINE_SENDCOMP_EVENT(name) \
- DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
- TP_PROTO( \
- const struct ib_wc *wc \
- ), \
- TP_ARGS(wc))
+TRACE_EVENT(svcrdma_send_pullup,
+ TP_PROTO(
+ const struct svc_rdma_send_ctxt *ctxt,
+ unsigned int msglen
+ ),
-TRACE_EVENT(svcrdma_post_send,
+ TP_ARGS(ctxt, msglen),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned int, hdrlen)
+ __field(unsigned int, msglen)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = ctxt->sc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->sc_cid.ci_completion_id;
+ __entry->hdrlen = ctxt->sc_hdrbuf.len,
+ __entry->msglen = msglen;
+ ),
+
+ TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
+ __entry->cq_id, __entry->completion_id,
+ __entry->hdrlen, __entry->msglen,
+ __entry->hdrlen + __entry->msglen)
+);
+
+TRACE_EVENT(svcrdma_send_err,
TP_PROTO(
- const struct ib_send_wr *wr,
+ const struct svc_rqst *rqst,
int status
),
- TP_ARGS(wr, status),
+ TP_ARGS(rqst, status),
+
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(u32, xid)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->xid = __be32_to_cpu(rqst->rq_xid);
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
+ __entry->xid, __entry->status
+ )
+);
+
+TRACE_EVENT(svcrdma_post_send,
+ TP_PROTO(
+ const struct svc_rdma_send_ctxt *ctxt
+ ),
+
+ TP_ARGS(ctxt),
TP_STRUCT__entry(
- __field(const void *, cqe)
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, num_sge)
__field(u32, inv_rkey)
- __field(int, status)
),
TP_fast_assign(
- __entry->cqe = wr->wr_cqe;
+ const struct ib_send_wr *wr = &ctxt->sc_send_wr;
+
+ __entry->cq_id = ctxt->sc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->sc_cid.ci_completion_id;
__entry->num_sge = wr->num_sge;
__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
wr->ex.invalidate_rkey : 0;
- __entry->status = status;
),
- TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
- __entry->cqe, __entry->num_sge,
- __entry->inv_rkey, __entry->status
+ TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
+ __entry->cq_id, __entry->completion_id,
+ __entry->num_sge, __entry->inv_rkey
)
);
-DEFINE_SENDCOMP_EVENT(send);
+DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
TRACE_EVENT(svcrdma_post_recv,
TP_PROTO(
- const struct ib_recv_wr *wr,
- int status
+ const struct svc_rdma_recv_ctxt *ctxt
),
- TP_ARGS(wr, status),
+ TP_ARGS(ctxt),
TP_STRUCT__entry(
- __field(const void *, cqe)
- __field(int, status)
+ __field(u32, cq_id)
+ __field(int, completion_id)
),
TP_fast_assign(
- __entry->cqe = wr->wr_cqe;
- __entry->status = status;
+ __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->rc_cid.ci_completion_id;
),
- TP_printk("cqe=%p status=%d",
- __entry->cqe, __entry->status
+ TP_printk("cq.id=%d cid=%d",
+ __entry->cq_id, __entry->completion_id
)
);
-TRACE_EVENT(svcrdma_wc_receive,
+DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
+DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
+DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
+
+TRACE_EVENT(svcrdma_rq_post_err,
TP_PROTO(
- const struct ib_wc *wc
+ const struct svcxprt_rdma *rdma,
+ int status
),
- TP_ARGS(wc),
+ TP_ARGS(rdma, status),
TP_STRUCT__entry(
- __field(const void *, cqe)
- __field(u32, byte_len)
- __field(unsigned int, status)
- __field(u32, vendor_err)
+ __field(int, status)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
- __entry->cqe = wc->wr_cqe;
- __entry->status = wc->status;
- if (wc->status) {
- __entry->byte_len = 0;
- __entry->vendor_err = wc->vendor_err;
- } else {
- __entry->byte_len = wc->byte_len;
- __entry->vendor_err = 0;
- }
+ __entry->status = status;
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
- __entry->cqe, __entry->byte_len,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk("addr=%s status=%d",
+ __get_str(addr), __entry->status
)
);
-TRACE_EVENT(svcrdma_post_rw,
+DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
TP_PROTO(
- const void *cqe,
- int sqecount,
- int status
+ const struct rpc_rdma_cid *cid,
+ int sqecount
),
- TP_ARGS(cqe, sqecount, status),
+ TP_ARGS(cid, sqecount),
TP_STRUCT__entry(
- __field(const void *, cqe)
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, sqecount)
- __field(int, status)
),
TP_fast_assign(
- __entry->cqe = cqe;
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->sqecount = sqecount;
- __entry->status = status;
),
- TP_printk("cqe=%p sqecount=%d status=%d",
- __entry->cqe, __entry->sqecount, __entry->status
+ TP_printk("cq.id=%u cid=%d sqecount=%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->sqecount
)
);
-DEFINE_SENDCOMP_EVENT(read);
-DEFINE_SENDCOMP_EVENT(write);
+#define DEFINE_POST_CHUNK_EVENT(name) \
+ DEFINE_EVENT(svcrdma_post_chunk_class, \
+ svcrdma_post_##name##_chunk, \
+ TP_PROTO( \
+ const struct rpc_rdma_cid *cid, \
+ int sqecount \
+ ), \
+ TP_ARGS(cid, sqecount))
+
+DEFINE_POST_CHUNK_EVENT(read);
+DEFINE_POST_CHUNK_EVENT(write);
+DEFINE_POST_CHUNK_EVENT(reply);
-TRACE_EVENT(svcrdma_cm_event,
+TRACE_EVENT(svcrdma_wc_read,
TP_PROTO(
- const struct rdma_cm_event *event,
- const struct sockaddr *sap
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid,
+ unsigned int totalbytes,
+ const ktime_t posttime
),
- TP_ARGS(event, sap),
+ TP_ARGS(wc, cid, totalbytes, posttime),
TP_STRUCT__entry(
- __field(unsigned int, event)
- __field(int, status)
- __array(__u8, addr, INET6_ADDRSTRLEN + 10)
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(s64, read_latency)
+ __field(unsigned int, totalbytes)
),
TP_fast_assign(
- __entry->event = event->event;
- __entry->status = event->status;
- snprintf(__entry->addr, sizeof(__entry->addr) - 1,
- "%pISpc", sap);
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->totalbytes = totalbytes;
+ __entry->read_latency = ktime_us_delta(ktime_get(), posttime);
),
- TP_printk("addr=%s event=%s (%u/%d)",
- __entry->addr,
- rdma_show_cm_event(__entry->event),
- __entry->event, __entry->status
+ TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld",
+ __entry->cq_id, __entry->completion_id,
+ __entry->totalbytes, __entry->read_latency
)
);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
+
+DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
+
TRACE_EVENT(svcrdma_qp_error,
TP_PROTO(
const struct ib_event *event,
@@ -1902,6 +2210,34 @@ DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
DEFINE_SQ_EVENT(full);
DEFINE_SQ_EVENT(retry);
+TRACE_EVENT(svcrdma_sq_post_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ int status
+ ),
+
+ TP_ARGS(rdma, status),
+
+ TP_STRUCT__entry(
+ __field(int, avail)
+ __field(int, depth)
+ __field(int, status)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->avail = atomic_read(&rdma->sc_sq_avail);
+ __entry->depth = rdma->sc_sq_depth;
+ __entry->status = status;
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
+ __get_str(addr), __entry->avail, __entry->depth,
+ __entry->status
+ )
+);
+
#endif /* _TRACE_RPCRDMA_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h
new file mode 100644
index 000000000000..56592da9301c
--- /dev/null
+++ b/include/trace/events/rv.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rv
+
+#if !defined(_TRACE_RV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RV_H
+
+#include <linux/rv.h>
+#include <linux/tracepoint.h>
+
+#ifdef CONFIG_DA_MON_EVENTS_IMPLICIT
+DECLARE_EVENT_CLASS(event_da_monitor,
+
+ TP_PROTO(char *state, char *event, char *next_state, bool final_state),
+
+ TP_ARGS(state, event, next_state, final_state),
+
+ TP_STRUCT__entry(
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ __array( char, next_state, MAX_DA_NAME_LEN )
+ __field( bool, final_state )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
+ __entry->final_state = final_state;
+ ),
+
+ TP_printk("%s x %s -> %s %s",
+ __entry->state,
+ __entry->event,
+ __entry->next_state,
+ __entry->final_state ? "(final)" : "")
+);
+
+DECLARE_EVENT_CLASS(error_da_monitor,
+
+ TP_PROTO(char *state, char *event),
+
+ TP_ARGS(state, event),
+
+ TP_STRUCT__entry(
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ ),
+
+ TP_printk("event %s not expected in the state %s",
+ __entry->event,
+ __entry->state)
+);
+
+#ifdef CONFIG_RV_MON_WIP
+DEFINE_EVENT(event_da_monitor, event_wip,
+ TP_PROTO(char *state, char *event, char *next_state, bool final_state),
+ TP_ARGS(state, event, next_state, final_state));
+
+DEFINE_EVENT(error_da_monitor, error_wip,
+ TP_PROTO(char *state, char *event),
+ TP_ARGS(state, event));
+#endif /* CONFIG_RV_MON_WIP */
+#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */
+
+#ifdef CONFIG_DA_MON_EVENTS_ID
+DECLARE_EVENT_CLASS(event_da_monitor_id,
+
+ TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
+
+ TP_ARGS(id, state, event, next_state, final_state),
+
+ TP_STRUCT__entry(
+ __field( int, id )
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ __array( char, next_state, MAX_DA_NAME_LEN )
+ __field( bool, final_state )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
+ __entry->id = id;
+ __entry->final_state = final_state;
+ ),
+
+ TP_printk("%d: %s x %s -> %s %s",
+ __entry->id,
+ __entry->state,
+ __entry->event,
+ __entry->next_state,
+ __entry->final_state ? "(final)" : "")
+);
+
+DECLARE_EVENT_CLASS(error_da_monitor_id,
+
+ TP_PROTO(int id, char *state, char *event),
+
+ TP_ARGS(id, state, event),
+
+ TP_STRUCT__entry(
+ __field( int, id )
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ __entry->id = id;
+ ),
+
+ TP_printk("%d: event %s not expected in the state %s",
+ __entry->id,
+ __entry->event,
+ __entry->state)
+);
+
+#ifdef CONFIG_RV_MON_WWNR
+/* id is the pid of the task */
+DEFINE_EVENT(event_da_monitor_id, event_wwnr,
+ TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
+ TP_ARGS(id, state, event, next_state, final_state));
+
+DEFINE_EVENT(error_da_monitor_id, error_wwnr,
+ TP_PROTO(int id, char *state, char *event),
+ TP_ARGS(id, state, event));
+#endif /* CONFIG_RV_MON_WWNR */
+
+#endif /* CONFIG_DA_MON_EVENTS_ID */
+#endif /* _TRACE_RV_H */
+
+/* This part ust be outside protection */
+#undef TRACE_INCLUDE_PATH
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rwmmio.h b/include/trace/events/rwmmio.h
new file mode 100644
index 000000000000..de41159216c1
--- /dev/null
+++ b/include/trace/events/rwmmio.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rwmmio
+
+#if !defined(_TRACE_RWMMIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RWMMIO_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rwmmio_rw_template,
+
+ TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
+
+ TP_ARGS(caller, val, width, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller)
+ __field(unsigned long, addr)
+ __field(u64, val)
+ __field(u8, width)
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->val = val;
+ __entry->addr = (unsigned long)addr;
+ __entry->width = width;
+ ),
+
+ TP_printk("%pS width=%d val=%#llx addr=%#lx",
+ (void *)__entry->caller, __entry->width,
+ __entry->val, __entry->addr)
+);
+
+DEFINE_EVENT(rwmmio_rw_template, rwmmio_write,
+ TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
+ TP_ARGS(caller, val, width, addr)
+);
+
+DEFINE_EVENT(rwmmio_rw_template, rwmmio_post_write,
+ TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
+ TP_ARGS(caller, val, width, addr)
+);
+
+TRACE_EVENT(rwmmio_read,
+
+ TP_PROTO(unsigned long caller, u8 width, const volatile void __iomem *addr),
+
+ TP_ARGS(caller, width, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller)
+ __field(unsigned long, addr)
+ __field(u8, width)
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = (unsigned long)addr;
+ __entry->width = width;
+ ),
+
+ TP_printk("%pS width=%d addr=%#lx",
+ (void *)__entry->caller, __entry->width, __entry->addr)
+);
+
+TRACE_EVENT(rwmmio_post_read,
+
+ TP_PROTO(unsigned long caller, u64 val, u8 width, const volatile void __iomem *addr),
+
+ TP_ARGS(caller, val, width, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller)
+ __field(unsigned long, addr)
+ __field(u64, val)
+ __field(u8, width)
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->val = val;
+ __entry->addr = (unsigned long)addr;
+ __entry->width = width;
+ ),
+
+ TP_printk("%pS width=%d val=%#llx addr=%#lx",
+ (void *)__entry->caller, __entry->width,
+ __entry->val, __entry->addr)
+);
+
+#endif /* _TRACE_RWMMIO_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 191fe447f990..d20bf4aa0204 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -14,214 +14,6 @@
#include <linux/errqueue.h>
/*
- * Define enums for tracing information.
- *
- * These should all be kept sorted, making it easier to match the string
- * mapping tables further on.
- */
-#ifndef __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
-#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
-
-enum rxrpc_skb_trace {
- rxrpc_skb_cleaned,
- rxrpc_skb_freed,
- rxrpc_skb_got,
- rxrpc_skb_lost,
- rxrpc_skb_new,
- rxrpc_skb_purged,
- rxrpc_skb_received,
- rxrpc_skb_rotated,
- rxrpc_skb_seen,
- rxrpc_skb_unshared,
- rxrpc_skb_unshared_nomem,
-};
-
-enum rxrpc_local_trace {
- rxrpc_local_got,
- rxrpc_local_new,
- rxrpc_local_processing,
- rxrpc_local_put,
- rxrpc_local_queued,
-};
-
-enum rxrpc_peer_trace {
- rxrpc_peer_got,
- rxrpc_peer_new,
- rxrpc_peer_processing,
- rxrpc_peer_put,
-};
-
-enum rxrpc_conn_trace {
- rxrpc_conn_got,
- rxrpc_conn_new_client,
- rxrpc_conn_new_service,
- rxrpc_conn_put_client,
- rxrpc_conn_put_service,
- rxrpc_conn_queued,
- rxrpc_conn_reap_service,
- rxrpc_conn_seen,
-};
-
-enum rxrpc_client_trace {
- rxrpc_client_activate_chans,
- rxrpc_client_alloc,
- rxrpc_client_chan_activate,
- rxrpc_client_chan_disconnect,
- rxrpc_client_chan_pass,
- rxrpc_client_chan_unstarted,
- rxrpc_client_chan_wait_failed,
- rxrpc_client_cleanup,
- rxrpc_client_count,
- rxrpc_client_discard,
- rxrpc_client_duplicate,
- rxrpc_client_exposed,
- rxrpc_client_replace,
- rxrpc_client_to_active,
- rxrpc_client_to_culled,
- rxrpc_client_to_idle,
- rxrpc_client_to_inactive,
- rxrpc_client_to_upgrade,
- rxrpc_client_to_waiting,
- rxrpc_client_uncount,
-};
-
-enum rxrpc_call_trace {
- rxrpc_call_connected,
- rxrpc_call_error,
- rxrpc_call_got,
- rxrpc_call_got_kernel,
- rxrpc_call_got_userid,
- rxrpc_call_new_client,
- rxrpc_call_new_service,
- rxrpc_call_put,
- rxrpc_call_put_kernel,
- rxrpc_call_put_noqueue,
- rxrpc_call_put_userid,
- rxrpc_call_queued,
- rxrpc_call_queued_ref,
- rxrpc_call_release,
- rxrpc_call_seen,
-};
-
-enum rxrpc_transmit_trace {
- rxrpc_transmit_await_reply,
- rxrpc_transmit_end,
- rxrpc_transmit_queue,
- rxrpc_transmit_queue_last,
- rxrpc_transmit_rotate,
- rxrpc_transmit_rotate_last,
- rxrpc_transmit_wait,
-};
-
-enum rxrpc_receive_trace {
- rxrpc_receive_end,
- rxrpc_receive_front,
- rxrpc_receive_incoming,
- rxrpc_receive_queue,
- rxrpc_receive_queue_last,
- rxrpc_receive_rotate,
-};
-
-enum rxrpc_recvmsg_trace {
- rxrpc_recvmsg_cont,
- rxrpc_recvmsg_data_return,
- rxrpc_recvmsg_dequeue,
- rxrpc_recvmsg_enter,
- rxrpc_recvmsg_full,
- rxrpc_recvmsg_hole,
- rxrpc_recvmsg_next,
- rxrpc_recvmsg_requeue,
- rxrpc_recvmsg_return,
- rxrpc_recvmsg_terminal,
- rxrpc_recvmsg_to_be_accepted,
- rxrpc_recvmsg_wait,
-};
-
-enum rxrpc_rtt_tx_trace {
- rxrpc_rtt_tx_data,
- rxrpc_rtt_tx_ping,
-};
-
-enum rxrpc_rtt_rx_trace {
- rxrpc_rtt_rx_ping_response,
- rxrpc_rtt_rx_requested_ack,
-};
-
-enum rxrpc_timer_trace {
- rxrpc_timer_begin,
- rxrpc_timer_exp_ack,
- rxrpc_timer_exp_hard,
- rxrpc_timer_exp_idle,
- rxrpc_timer_exp_keepalive,
- rxrpc_timer_exp_lost_ack,
- rxrpc_timer_exp_normal,
- rxrpc_timer_exp_ping,
- rxrpc_timer_exp_resend,
- rxrpc_timer_expired,
- rxrpc_timer_init_for_reply,
- rxrpc_timer_init_for_send_reply,
- rxrpc_timer_restart,
- rxrpc_timer_set_for_ack,
- rxrpc_timer_set_for_hard,
- rxrpc_timer_set_for_idle,
- rxrpc_timer_set_for_keepalive,
- rxrpc_timer_set_for_lost_ack,
- rxrpc_timer_set_for_normal,
- rxrpc_timer_set_for_ping,
- rxrpc_timer_set_for_resend,
- rxrpc_timer_set_for_send,
-};
-
-enum rxrpc_propose_ack_trace {
- rxrpc_propose_ack_client_tx_end,
- rxrpc_propose_ack_input_data,
- rxrpc_propose_ack_ping_for_check_life,
- rxrpc_propose_ack_ping_for_keepalive,
- rxrpc_propose_ack_ping_for_lost_ack,
- rxrpc_propose_ack_ping_for_lost_reply,
- rxrpc_propose_ack_ping_for_params,
- rxrpc_propose_ack_processing_op,
- rxrpc_propose_ack_respond_to_ack,
- rxrpc_propose_ack_respond_to_ping,
- rxrpc_propose_ack_retry_tx,
- rxrpc_propose_ack_rotate_rx,
- rxrpc_propose_ack_terminal_ack,
-};
-
-enum rxrpc_propose_ack_outcome {
- rxrpc_propose_ack_subsume,
- rxrpc_propose_ack_update,
- rxrpc_propose_ack_use,
-};
-
-enum rxrpc_congest_change {
- rxrpc_cong_begin_retransmission,
- rxrpc_cong_cleared_nacks,
- rxrpc_cong_new_low_nack,
- rxrpc_cong_no_change,
- rxrpc_cong_progress,
- rxrpc_cong_retransmit_again,
- rxrpc_cong_rtt_window_end,
- rxrpc_cong_saw_nack,
-};
-
-enum rxrpc_tx_point {
- rxrpc_tx_point_call_abort,
- rxrpc_tx_point_call_ack,
- rxrpc_tx_point_call_data_frag,
- rxrpc_tx_point_call_data_nofrag,
- rxrpc_tx_point_call_final_resend,
- rxrpc_tx_point_conn_abort,
- rxrpc_tx_point_rxkad_challenge,
- rxrpc_tx_point_rxkad_response,
- rxrpc_tx_point_reject,
- rxrpc_tx_point_version_keepalive,
- rxrpc_tx_point_version_reply,
-};
-
-#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
-
-/*
* Declare tracing information enums and their string mappings for display.
*/
#define rxrpc_skb_traces \
@@ -266,41 +58,29 @@ enum rxrpc_tx_point {
EM(rxrpc_client_chan_activate, "ChActv") \
EM(rxrpc_client_chan_disconnect, "ChDisc") \
EM(rxrpc_client_chan_pass, "ChPass") \
- EM(rxrpc_client_chan_unstarted, "ChUnst") \
EM(rxrpc_client_chan_wait_failed, "ChWtFl") \
EM(rxrpc_client_cleanup, "Clean ") \
- EM(rxrpc_client_count, "Count ") \
EM(rxrpc_client_discard, "Discar") \
EM(rxrpc_client_duplicate, "Duplic") \
EM(rxrpc_client_exposed, "Expose") \
EM(rxrpc_client_replace, "Replac") \
EM(rxrpc_client_to_active, "->Actv") \
- EM(rxrpc_client_to_culled, "->Cull") \
- EM(rxrpc_client_to_idle, "->Idle") \
- EM(rxrpc_client_to_inactive, "->Inac") \
- EM(rxrpc_client_to_upgrade, "->Upgd") \
- EM(rxrpc_client_to_waiting, "->Wait") \
- E_(rxrpc_client_uncount, "Uncoun")
-
-#define rxrpc_conn_cache_states \
- EM(RXRPC_CONN_CLIENT_INACTIVE, "Inac") \
- EM(RXRPC_CONN_CLIENT_WAITING, "Wait") \
- EM(RXRPC_CONN_CLIENT_ACTIVE, "Actv") \
- EM(RXRPC_CONN_CLIENT_UPGRADE, "Upgd") \
- EM(RXRPC_CONN_CLIENT_CULLED, "Cull") \
- E_(RXRPC_CONN_CLIENT_IDLE, "Idle") \
+ E_(rxrpc_client_to_idle, "->Idle")
#define rxrpc_call_traces \
EM(rxrpc_call_connected, "CON") \
EM(rxrpc_call_error, "*E*") \
EM(rxrpc_call_got, "GOT") \
EM(rxrpc_call_got_kernel, "Gke") \
+ EM(rxrpc_call_got_timer, "GTM") \
EM(rxrpc_call_got_userid, "Gus") \
EM(rxrpc_call_new_client, "NWc") \
EM(rxrpc_call_new_service, "NWs") \
EM(rxrpc_call_put, "PUT") \
EM(rxrpc_call_put_kernel, "Pke") \
- EM(rxrpc_call_put_noqueue, "PNQ") \
+ EM(rxrpc_call_put_noqueue, "PnQ") \
+ EM(rxrpc_call_put_notimer, "PnT") \
+ EM(rxrpc_call_put_timer, "PTM") \
EM(rxrpc_call_put_userid, "Pus") \
EM(rxrpc_call_queued, "QUE") \
EM(rxrpc_call_queued_ref, "QUR") \
@@ -339,10 +119,15 @@ enum rxrpc_tx_point {
E_(rxrpc_recvmsg_wait, "WAIT")
#define rxrpc_rtt_tx_traces \
+ EM(rxrpc_rtt_tx_cancel, "CNCE") \
EM(rxrpc_rtt_tx_data, "DATA") \
+ EM(rxrpc_rtt_tx_no_slot, "FULL") \
E_(rxrpc_rtt_tx_ping, "PING")
#define rxrpc_rtt_rx_traces \
+ EM(rxrpc_rtt_rx_cancel, "CNCL") \
+ EM(rxrpc_rtt_rx_obsolete, "OBSL") \
+ EM(rxrpc_rtt_rx_lost, "LOST") \
EM(rxrpc_rtt_rx_ping_response, "PONG") \
E_(rxrpc_rtt_rx_requested_ack, "RACK")
@@ -400,7 +185,7 @@ enum rxrpc_tx_point {
EM(rxrpc_cong_begin_retransmission, " Retrans") \
EM(rxrpc_cong_cleared_nacks, " Cleared") \
EM(rxrpc_cong_new_low_nack, " NewLowN") \
- EM(rxrpc_cong_no_change, "") \
+ EM(rxrpc_cong_no_change, " -") \
EM(rxrpc_cong_progress, " Progres") \
EM(rxrpc_cong_retransmit_again, " ReTxAgn") \
EM(rxrpc_cong_rtt_window_end, " RttWinE") \
@@ -458,6 +243,36 @@ enum rxrpc_tx_point {
E_(rxrpc_tx_point_version_reply, "VerReply")
/*
+ * Generate enums for tracing information.
+ */
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum rxrpc_call_trace { rxrpc_call_traces } __mode(byte);
+enum rxrpc_client_trace { rxrpc_client_traces } __mode(byte);
+enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte);
+enum rxrpc_conn_trace { rxrpc_conn_traces } __mode(byte);
+enum rxrpc_local_trace { rxrpc_local_traces } __mode(byte);
+enum rxrpc_peer_trace { rxrpc_peer_traces } __mode(byte);
+enum rxrpc_propose_ack_outcome { rxrpc_propose_ack_outcomes } __mode(byte);
+enum rxrpc_propose_ack_trace { rxrpc_propose_ack_traces } __mode(byte);
+enum rxrpc_receive_trace { rxrpc_receive_traces } __mode(byte);
+enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte);
+enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte);
+enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte);
+enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte);
+enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte);
+enum rxrpc_transmit_trace { rxrpc_transmit_traces } __mode(byte);
+enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte);
+
+#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
+
+/*
* Export enum symbols via userspace.
*/
#undef EM
@@ -465,21 +280,21 @@ enum rxrpc_tx_point {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
-rxrpc_skb_traces;
-rxrpc_local_traces;
-rxrpc_conn_traces;
-rxrpc_client_traces;
rxrpc_call_traces;
-rxrpc_transmit_traces;
+rxrpc_client_traces;
+rxrpc_congest_changes;
+rxrpc_congest_modes;
+rxrpc_conn_traces;
+rxrpc_local_traces;
+rxrpc_propose_ack_outcomes;
+rxrpc_propose_ack_traces;
rxrpc_receive_traces;
rxrpc_recvmsg_traces;
-rxrpc_rtt_tx_traces;
rxrpc_rtt_rx_traces;
+rxrpc_rtt_tx_traces;
+rxrpc_skb_traces;
rxrpc_timer_traces;
-rxrpc_propose_ack_traces;
-rxrpc_propose_ack_outcomes;
-rxrpc_congest_modes;
-rxrpc_congest_changes;
+rxrpc_transmit_traces;
rxrpc_tx_points;
/*
@@ -584,23 +399,20 @@ TRACE_EVENT(rxrpc_client,
__field(int, channel )
__field(int, usage )
__field(enum rxrpc_client_trace, op )
- __field(enum rxrpc_conn_cache_state, cs )
),
TP_fast_assign(
- __entry->conn = conn->debug_id;
+ __entry->conn = conn ? conn->debug_id : 0;
__entry->channel = channel;
- __entry->usage = atomic_read(&conn->usage);
+ __entry->usage = conn ? refcount_read(&conn->ref) : -2;
__entry->op = op;
- __entry->cid = conn->proto.cid;
- __entry->cs = conn->cache_state;
+ __entry->cid = conn ? conn->proto.cid : 0;
),
- TP_printk("C=%08x h=%2d %s %s i=%08x u=%d",
+ TP_printk("C=%08x h=%2d %s i=%08x u=%d",
__entry->conn,
__entry->channel,
__print_symbolic(__entry->op, rxrpc_client_traces),
- __print_symbolic(__entry->cs, rxrpc_conn_cache_states),
__entry->cid,
__entry->usage)
);
@@ -1087,63 +899,67 @@ TRACE_EVENT(rxrpc_recvmsg,
TRACE_EVENT(rxrpc_rtt_tx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
- rxrpc_serial_t send_serial),
+ int slot, rxrpc_serial_t send_serial),
- TP_ARGS(call, why, send_serial),
+ TP_ARGS(call, why, slot, send_serial),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(enum rxrpc_rtt_tx_trace, why )
+ __field(int, slot )
__field(rxrpc_serial_t, send_serial )
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
+ __entry->slot = slot;
__entry->send_serial = send_serial;
),
- TP_printk("c=%08x %s sr=%08x",
+ TP_printk("c=%08x [%d] %s sr=%08x",
__entry->call,
+ __entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
__entry->send_serial)
);
TRACE_EVENT(rxrpc_rtt_rx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ int slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
- s64 rtt, u8 nr, s64 avg),
+ u32 rtt, u32 rto),
- TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
+ TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(enum rxrpc_rtt_rx_trace, why )
- __field(u8, nr )
+ __field(int, slot )
__field(rxrpc_serial_t, send_serial )
__field(rxrpc_serial_t, resp_serial )
- __field(s64, rtt )
- __field(u64, avg )
+ __field(u32, rtt )
+ __field(u32, rto )
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
+ __entry->slot = slot;
__entry->send_serial = send_serial;
__entry->resp_serial = resp_serial;
__entry->rtt = rtt;
- __entry->nr = nr;
- __entry->avg = avg;
+ __entry->rto = rto;
),
- TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
+ TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u",
__entry->call,
+ __entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
__entry->send_serial,
__entry->resp_serial,
__entry->rtt,
- __entry->nr,
- __entry->avg)
+ __entry->rto)
);
TRACE_EVENT(rxrpc_timer,
@@ -1514,7 +1330,7 @@ TRACE_EVENT(rxrpc_call_reset,
__entry->call_serial = call->rx_serial;
__entry->conn_serial = call->conn->hi_serial;
__entry->tx_seq = call->tx_hard_ack;
- __entry->rx_seq = call->ackr_seen;
+ __entry->rx_seq = call->rx_hard_ack;
),
TP_printk("c=%08x %08x:%08x r=%08x/%08x tx=%08x rx=%08x",
@@ -1544,6 +1360,43 @@ TRACE_EVENT(rxrpc_notify_socket,
__entry->serial)
);
+TRACE_EVENT(rxrpc_rx_discard_ack,
+ TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial,
+ rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first,
+ rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev),
+
+ TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first,
+ prev_pkt, call_ackr_prev),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, debug_id )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, first_soft_ack)
+ __field(rxrpc_seq_t, call_ackr_first)
+ __field(rxrpc_seq_t, prev_pkt)
+ __field(rxrpc_seq_t, call_ackr_prev)
+ ),
+
+ TP_fast_assign(
+ __entry->debug_id = debug_id;
+ __entry->serial = serial;
+ __entry->first_soft_ack = first_soft_ack;
+ __entry->call_ackr_first = call_ackr_first;
+ __entry->prev_pkt = prev_pkt;
+ __entry->call_ackr_prev = call_ackr_prev;
+ ),
+
+ TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x",
+ __entry->debug_id,
+ __entry->serial,
+ __entry->first_soft_ack,
+ __entry->call_ackr_first,
+ __entry->prev_pkt,
+ __entry->call_ackr_prev)
+ );
+
+#undef EM
+#undef E_
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 420e80e56e55..fbb99a61f714 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -5,6 +5,7 @@
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
+#include <linux/kthread.h>
#include <linux/sched/numa_balancing.h>
#include <linux/tracepoint.h>
#include <linux/binfmts.h>
@@ -51,6 +52,89 @@ TRACE_EVENT(sched_kthread_stop_ret,
TP_printk("ret=%d", __entry->ret)
);
+/**
+ * sched_kthread_work_queue_work - called when a work gets queued
+ * @worker: pointer to the kthread_worker
+ * @work: pointer to struct kthread_work
+ *
+ * This event occurs when a work is queued immediately or once a
+ * delayed work is actually queued (ie: once the delay has been
+ * reached).
+ */
+TRACE_EVENT(sched_kthread_work_queue_work,
+
+ TP_PROTO(struct kthread_worker *worker,
+ struct kthread_work *work),
+
+ TP_ARGS(worker, work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, function)
+ __field( void *, worker)
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->function = work->func;
+ __entry->worker = worker;
+ ),
+
+ TP_printk("work struct=%p function=%ps worker=%p",
+ __entry->work, __entry->function, __entry->worker)
+);
+
+/**
+ * sched_kthread_work_execute_start - called immediately before the work callback
+ * @work: pointer to struct kthread_work
+ *
+ * Allows to track kthread work execution.
+ */
+TRACE_EVENT(sched_kthread_work_execute_start,
+
+ TP_PROTO(struct kthread_work *work),
+
+ TP_ARGS(work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, function)
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->function = work->func;
+ ),
+
+ TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
+);
+
+/**
+ * sched_kthread_work_execute_end - called immediately after the work callback
+ * @work: pointer to struct work_struct
+ * @function: pointer to worker function
+ *
+ * Allows to track workqueue execution.
+ */
+TRACE_EVENT(sched_kthread_work_execute_end,
+
+ TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
+
+ TP_ARGS(work, function),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, function)
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->function = function;
+ ),
+
+ TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
+);
+
/*
* Tracepoint for waking up a task:
*/
@@ -64,7 +148,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
- __field( int, success )
__field( int, target_cpu )
),
@@ -72,7 +155,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio; /* XXX SCHED_DEADLINE */
- __entry->success = 1; /* rudiment, kill when possible */
__entry->target_cpu = task_cpu(p);
),
@@ -90,8 +172,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_waking,
TP_ARGS(p));
/*
- * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
- * It it not always called from the waking context.
+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
+ * It is not always called from the waking context.
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
TP_PROTO(struct task_struct *p),
@@ -105,7 +187,9 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_ARGS(p));
#ifdef CREATE_TRACE_POINTS
-static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
+static inline long __trace_sched_switch_state(bool preempt,
+ unsigned int prev_state,
+ struct task_struct *p)
{
unsigned int state;
@@ -126,7 +210,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
* it for left shift operation to get the correct task->state
* mapping.
*/
- state = task_state_index(p);
+ state = __task_state_index(prev_state, p->exit_state);
return state ? (1 << (state - 1)) : state;
}
@@ -139,9 +223,10 @@ TRACE_EVENT(sched_switch,
TP_PROTO(bool preempt,
struct task_struct *prev,
- struct task_struct *next),
+ struct task_struct *next,
+ unsigned int prev_state),
- TP_ARGS(preempt, prev, next),
+ TP_ARGS(preempt, prev, next, prev_state),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
@@ -157,7 +242,7 @@ TRACE_EVENT(sched_switch,
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
- __entry->prev_state = __trace_sched_switch_state(preempt, prev);
+ __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
@@ -282,7 +367,7 @@ TRACE_EVENT(sched_process_wait,
);
/*
- * Tracepoint for do_fork:
+ * Tracepoint for kernel_clone:
*/
TRACE_EVENT(sched_process_fork,
@@ -487,7 +572,11 @@ TRACE_EVENT(sched_process_hang,
);
#endif /* CONFIG_DETECT_HUNG_TASK */
-DECLARE_EVENT_CLASS(sched_move_task_template,
+/*
+ * Tracks migration of tasks from one runqueue to another. Can be used to
+ * detect if automatic NUMA balancing is bouncing between nodes.
+ */
+TRACE_EVENT(sched_move_numa,
TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
@@ -519,23 +608,7 @@ DECLARE_EVENT_CLASS(sched_move_task_template,
__entry->dst_cpu, __entry->dst_nid)
);
-/*
- * Tracks migration of tasks from one runqueue to another. Can be used to
- * detect if automatic NUMA balancing is bouncing between nodes
- */
-DEFINE_EVENT(sched_move_task_template, sched_move_numa,
- TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
-
- TP_ARGS(tsk, src_cpu, dst_cpu)
-);
-
-DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
- TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
-
- TP_ARGS(tsk, src_cpu, dst_cpu)
-);
-
-TRACE_EVENT(sched_swap_numa,
+DECLARE_EVENT_CLASS(sched_numa_pair_template,
TP_PROTO(struct task_struct *src_tsk, int src_cpu,
struct task_struct *dst_tsk, int dst_cpu),
@@ -561,11 +634,11 @@ TRACE_EVENT(sched_swap_numa,
__entry->src_ngid = task_numa_group_id(src_tsk);
__entry->src_cpu = src_cpu;
__entry->src_nid = cpu_to_node(src_cpu);
- __entry->dst_pid = task_pid_nr(dst_tsk);
- __entry->dst_tgid = task_tgid_nr(dst_tsk);
- __entry->dst_ngid = task_numa_group_id(dst_tsk);
+ __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
+ __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
+ __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
__entry->dst_cpu = dst_cpu;
- __entry->dst_nid = cpu_to_node(dst_cpu);
+ __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
),
TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
@@ -575,6 +648,23 @@ TRACE_EVENT(sched_swap_numa,
__entry->dst_cpu, __entry->dst_nid)
);
+DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
+
+ TP_PROTO(struct task_struct *src_tsk, int src_cpu,
+ struct task_struct *dst_tsk, int dst_cpu),
+
+ TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
+);
+
+DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
+
+ TP_PROTO(struct task_struct *src_tsk, int src_cpu,
+ struct task_struct *dst_tsk, int dst_cpu),
+
+ TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
+);
+
+
/*
* Tracepoint for waking a polling cpu without an IPI.
*/
@@ -613,6 +703,10 @@ DECLARE_TRACE(pelt_dl_tp,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
+DECLARE_TRACE(pelt_thermal_tp,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq));
+
DECLARE_TRACE(pelt_irq_tp,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
@@ -621,10 +715,26 @@ DECLARE_TRACE(pelt_se_tp,
TP_PROTO(struct sched_entity *se),
TP_ARGS(se));
+DECLARE_TRACE(sched_cpu_capacity_tp,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq));
+
DECLARE_TRACE(sched_overutilized_tp,
TP_PROTO(struct root_domain *rd, bool overutilized),
TP_ARGS(rd, overutilized));
+DECLARE_TRACE(sched_util_est_cfs_tp,
+ TP_PROTO(struct cfs_rq *cfs_rq),
+ TP_ARGS(cfs_rq));
+
+DECLARE_TRACE(sched_util_est_se_tp,
+ TP_PROTO(struct sched_entity *se),
+ TP_ARGS(se));
+
+DECLARE_TRACE(sched_update_nr_running_tp,
+ TP_PROTO(struct rq *rq, int change),
+ TP_ARGS(rq, change));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index f076c430d243..f160d68f961d 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -7,6 +7,31 @@
#include <linux/tracepoint.h>
+TRACE_EVENT(scmi_fc_call,
+ TP_PROTO(u8 protocol_id, u8 msg_id, u32 res_id, u32 val1, u32 val2),
+ TP_ARGS(protocol_id, msg_id, res_id, val1, val2),
+
+ TP_STRUCT__entry(
+ __field(u8, protocol_id)
+ __field(u8, msg_id)
+ __field(u32, res_id)
+ __field(u32, val1)
+ __field(u32, val2)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol_id = protocol_id;
+ __entry->msg_id = msg_id;
+ __entry->res_id = res_id;
+ __entry->val1 = val1;
+ __entry->val2 = val2;
+ ),
+
+ TP_printk("pt=%02X msg_id=%02X res_id:%u vals=%u:%u",
+ __entry->protocol_id, __entry->msg_id,
+ __entry->res_id, __entry->val1, __entry->val2)
+);
+
TRACE_EVENT(scmi_xfer_begin,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
bool poll),
@@ -28,14 +53,42 @@ TRACE_EVENT(scmi_xfer_begin,
__entry->poll = poll;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u poll=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->poll)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->poll)
+);
+
+TRACE_EVENT(scmi_xfer_response_wait,
+ TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+ u32 timeout, bool poll),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, timeout, poll),
+
+ TP_STRUCT__entry(
+ __field(int, transfer_id)
+ __field(u8, msg_id)
+ __field(u8, protocol_id)
+ __field(u16, seq)
+ __field(u32, timeout)
+ __field(bool, poll)
+ ),
+
+ TP_fast_assign(
+ __entry->transfer_id = transfer_id;
+ __entry->msg_id = msg_id;
+ __entry->protocol_id = protocol_id;
+ __entry->seq = seq;
+ __entry->timeout = timeout;
+ __entry->poll = poll;
+ ),
+
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X tmo_ms=%u poll=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->timeout, __entry->poll)
);
TRACE_EVENT(scmi_xfer_end,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
- u32 status),
+ int status),
TP_ARGS(transfer_id, msg_id, protocol_id, seq, status),
TP_STRUCT__entry(
@@ -43,7 +96,7 @@ TRACE_EVENT(scmi_xfer_end,
__field(u8, msg_id)
__field(u8, protocol_id)
__field(u16, seq)
- __field(u32, status)
+ __field(int, status)
),
TP_fast_assign(
@@ -54,9 +107,9 @@ TRACE_EVENT(scmi_xfer_end,
__entry->status = status;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->status)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->status)
);
TRACE_EVENT(scmi_rx_done,
@@ -80,9 +133,40 @@ TRACE_EVENT(scmi_rx_done,
__entry->msg_type = msg_type;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u msg_type=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->msg_type)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X msg_type=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->msg_type)
+);
+
+TRACE_EVENT(scmi_msg_dump,
+ TP_PROTO(u8 protocol_id, u8 msg_id, unsigned char *tag, u16 seq,
+ int status, void *buf, size_t len),
+ TP_ARGS(protocol_id, msg_id, tag, seq, status, buf, len),
+
+ TP_STRUCT__entry(
+ __field(u8, protocol_id)
+ __field(u8, msg_id)
+ __array(char, tag, 5)
+ __field(u16, seq)
+ __field(int, status)
+ __field(size_t, len)
+ __dynamic_array(unsigned char, cmd, len)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol_id = protocol_id;
+ __entry->msg_id = msg_id;
+ strscpy(__entry->tag, tag, 5);
+ __entry->seq = seq;
+ __entry->status = status;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(cmd), buf, __entry->len);
+ ),
+
+ TP_printk("pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
+ __entry->protocol_id, __entry->tag, __entry->msg_id,
+ __entry->seq, __entry->status,
+ __print_hex_str(__get_dynamic_array(cmd), __entry->len))
);
#endif /* _TRACE_SCMI_H */
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index f624969a4f14..a2c7befd451a 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -124,50 +124,6 @@
scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \
scsi_hostbyte_name(DID_TRANSPORT_FAILFAST))
-#define scsi_driverbyte_name(result) { result, #result }
-#define show_driverbyte_name(val) \
- __print_symbolic(val, \
- scsi_driverbyte_name(DRIVER_OK), \
- scsi_driverbyte_name(DRIVER_BUSY), \
- scsi_driverbyte_name(DRIVER_SOFT), \
- scsi_driverbyte_name(DRIVER_MEDIA), \
- scsi_driverbyte_name(DRIVER_ERROR), \
- scsi_driverbyte_name(DRIVER_INVALID), \
- scsi_driverbyte_name(DRIVER_TIMEOUT), \
- scsi_driverbyte_name(DRIVER_HARD), \
- scsi_driverbyte_name(DRIVER_SENSE))
-
-#define scsi_msgbyte_name(result) { result, #result }
-#define show_msgbyte_name(val) \
- __print_symbolic(val, \
- scsi_msgbyte_name(COMMAND_COMPLETE), \
- scsi_msgbyte_name(EXTENDED_MESSAGE), \
- scsi_msgbyte_name(SAVE_POINTERS), \
- scsi_msgbyte_name(RESTORE_POINTERS), \
- scsi_msgbyte_name(DISCONNECT), \
- scsi_msgbyte_name(INITIATOR_ERROR), \
- scsi_msgbyte_name(ABORT_TASK_SET), \
- scsi_msgbyte_name(MESSAGE_REJECT), \
- scsi_msgbyte_name(NOP), \
- scsi_msgbyte_name(MSG_PARITY_ERROR), \
- scsi_msgbyte_name(LINKED_CMD_COMPLETE), \
- scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \
- scsi_msgbyte_name(TARGET_RESET), \
- scsi_msgbyte_name(ABORT_TASK), \
- scsi_msgbyte_name(CLEAR_TASK_SET), \
- scsi_msgbyte_name(INITIATE_RECOVERY), \
- scsi_msgbyte_name(RELEASE_RECOVERY), \
- scsi_msgbyte_name(CLEAR_ACA), \
- scsi_msgbyte_name(LOGICAL_UNIT_RESET), \
- scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \
- scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \
- scsi_msgbyte_name(ORDERED_QUEUE_TAG), \
- scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \
- scsi_msgbyte_name(ACA), \
- scsi_msgbyte_name(QAS_REQUEST), \
- scsi_msgbyte_name(BUS_DEVICE_RESET), \
- scsi_msgbyte_name(ABORT))
-
#define scsi_statusbyte_name(result) { result, #result }
#define show_statusbyte_name(val) \
__print_symbolic(val, \
@@ -210,6 +166,8 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__field( unsigned int, lun )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
+ __field( int, driver_tag)
+ __field( int, scheduler_tag)
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
@@ -223,6 +181,8 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__entry->lun = cmd->device->lun;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
+ __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag;
+ __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
@@ -230,11 +190,11 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
- " prot_op=%s cmnd=(%s %s raw=%s)",
+ " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
- show_prot_op_name(__entry->prot_op),
- show_opcode_name(__entry->opcode),
+ show_prot_op_name(__entry->prot_op), __entry->driver_tag,
+ __entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
);
@@ -253,6 +213,8 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
__field( int, rtn )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
+ __field( int, driver_tag)
+ __field( int, scheduler_tag)
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
@@ -267,6 +229,8 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
__entry->rtn = rtn;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
+ __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag;
+ __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
@@ -274,11 +238,12 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
- " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d",
+ " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \
+ " rtn=%d",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
- show_prot_op_name(__entry->prot_op),
- show_opcode_name(__entry->opcode),
+ show_prot_op_name(__entry->prot_op), __entry->driver_tag,
+ __entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
__entry->rtn)
@@ -298,6 +263,8 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__field( int, result )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
+ __field( int, driver_tag)
+ __field( int, scheduler_tag)
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
@@ -312,24 +279,26 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__entry->result = cmd->result;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
+ __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag;
+ __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
),
- TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
- "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \
- "%s host=%s message=%s status=%s)",
+ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u " \
+ "prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s) " \
+ "result=(driver=%s host=%s message=%s status=%s)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
- show_prot_op_name(__entry->prot_op),
- show_opcode_name(__entry->opcode),
+ show_prot_op_name(__entry->prot_op), __entry->driver_tag,
+ __entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
- show_driverbyte_name(((__entry->result) >> 24) & 0xff),
+ "DRIVER_OK",
show_hostbyte_name(((__entry->result) >> 16) & 0xff),
- show_msgbyte_name(((__entry->result) >> 8) & 0xff),
+ "COMMAND_COMPLETE",
show_statusbyte_name(__entry->result & 0xff))
);
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 9e92f22eb086..50a974f7dfb4 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -9,31 +9,48 @@
#include <linux/netdevice.h>
#include <linux/tracepoint.h>
+#undef FN
+#define FN(reason) TRACE_DEFINE_ENUM(SKB_DROP_REASON_##reason);
+DEFINE_DROP_REASON(FN, FN)
+
+#undef FN
+#undef FNe
+#define FN(reason) { SKB_DROP_REASON_##reason, #reason },
+#define FNe(reason) { SKB_DROP_REASON_##reason, #reason }
+
/*
* Tracepoint for free an sk_buff:
*/
TRACE_EVENT(kfree_skb,
- TP_PROTO(struct sk_buff *skb, void *location),
+ TP_PROTO(struct sk_buff *skb, void *location,
+ enum skb_drop_reason reason),
- TP_ARGS(skb, location),
+ TP_ARGS(skb, location, reason),
TP_STRUCT__entry(
- __field( void *, skbaddr )
- __field( void *, location )
- __field( unsigned short, protocol )
+ __field(void *, skbaddr)
+ __field(void *, location)
+ __field(unsigned short, protocol)
+ __field(enum skb_drop_reason, reason)
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->location = location;
__entry->protocol = ntohs(skb->protocol);
+ __entry->reason = reason;
),
- TP_printk("skbaddr=%p protocol=%u location=%p",
- __entry->skbaddr, __entry->protocol, __entry->location)
+ TP_printk("skbaddr=%p protocol=%u location=%p reason: %s",
+ __entry->skbaddr, __entry->protocol, __entry->location,
+ __print_symbolic(__entry->reason,
+ DEFINE_DROP_REASON(FN, FNe)))
);
+#undef FN
+#undef FNe
+
TRACE_EVENT(consume_skb,
TP_PROTO(struct sk_buff *skb),
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index a966d4b5ab37..777ee6cbe933 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -98,7 +98,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(long *, sysctl_mem)
+ __array(long, sysctl_mem, 3)
__field(long, allocated)
__field(int, sysctl_rmem)
__field(int, rmem_alloc)
@@ -110,7 +110,9 @@ TRACE_EVENT(sock_exceed_buf_limit,
TP_fast_assign(
strncpy(__entry->name, prot->name, 32);
- __entry->sysctl_mem = prot->sysctl_mem;
+ __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]);
+ __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]);
+ __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]);
__entry->allocated = allocated;
__entry->sysctl_rmem = sk_get_rmem0(sk, prot);
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
@@ -201,6 +203,66 @@ TRACE_EVENT(inet_sock_set_state,
show_tcp_state_name(__entry->newstate))
);
+TRACE_EVENT(inet_sk_error_report,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->error = sk->sk_err;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ *pin6 = sk->sk_v6_rcv_saddr;
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ *pin6 = sk->sk_v6_daddr;
+ } else
+#endif
+ {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+ }
+ ),
+
+ TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c error=%d",
+ show_family_name(__entry->family),
+ show_inet_protocol_name(__entry->protocol),
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ __entry->error)
+);
+
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sof.h b/include/trace/events/sof.h
new file mode 100644
index 000000000000..21c2a1efb9f6
--- /dev/null
+++ b/include/trace/events/sof.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ *
+ * Author: Noah Klayman <noah.klayman@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sof
+
+#if !defined(_TRACE_SOF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SOF_H
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+#include <sound/sof/stream.h>
+#include "../../../sound/soc/sof/sof-audio.h"
+
+DECLARE_EVENT_CLASS(sof_widget_template,
+ TP_PROTO(struct snd_sof_widget *swidget),
+ TP_ARGS(swidget),
+ TP_STRUCT__entry(
+ __string(name, swidget->widget->name)
+ __field(int, use_count)
+ ),
+ TP_fast_assign(
+ __assign_str(name, swidget->widget->name);
+ __entry->use_count = swidget->use_count;
+ ),
+ TP_printk("name=%s use_count=%d", __get_str(name), __entry->use_count)
+);
+
+DEFINE_EVENT(sof_widget_template, sof_widget_setup,
+ TP_PROTO(struct snd_sof_widget *swidget),
+ TP_ARGS(swidget)
+);
+
+DEFINE_EVENT(sof_widget_template, sof_widget_free,
+ TP_PROTO(struct snd_sof_widget *swidget),
+ TP_ARGS(swidget)
+);
+
+TRACE_EVENT(sof_ipc3_period_elapsed_position,
+ TP_PROTO(struct snd_sof_dev *sdev, struct sof_ipc_stream_posn *posn),
+ TP_ARGS(sdev, posn),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u64, host_posn)
+ __field(u64, dai_posn)
+ __field(u64, wallclock)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->host_posn = posn->host_posn;
+ __entry->dai_posn = posn->dai_posn;
+ __entry->wallclock = posn->wallclock;
+ ),
+ TP_printk("device_name=%s host_posn=%#llx dai_posn=%#llx wallclock=%#llx",
+ __get_str(device_name), __entry->host_posn, __entry->dai_posn,
+ __entry->wallclock)
+);
+
+TRACE_EVENT(sof_pcm_pointer_position,
+ TP_PROTO(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm *spcm,
+ struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t dma_posn,
+ snd_pcm_uframes_t dai_posn
+ ),
+ TP_ARGS(sdev, spcm, substream, dma_posn, dai_posn),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, pcm_id)
+ __field(int, stream)
+ __field(unsigned long, dma_posn)
+ __field(unsigned long, dai_posn)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->pcm_id = le32_to_cpu(spcm->pcm.pcm_id);
+ __entry->stream = substream->stream;
+ __entry->dma_posn = dma_posn;
+ __entry->dai_posn = dai_posn;
+ ),
+ TP_printk("device_name=%s pcm_id=%d stream=%d dma_posn=%lu dai_posn=%lu",
+ __get_str(device_name), __entry->pcm_id, __entry->stream,
+ __entry->dma_posn, __entry->dai_posn)
+);
+
+TRACE_EVENT(sof_stream_position_ipc_rx,
+ TP_PROTO(struct device *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(dev))
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(dev));
+ ),
+ TP_printk("device_name=%s", __get_str(device_name))
+);
+
+TRACE_EVENT(sof_ipc4_fw_config,
+ TP_PROTO(struct snd_sof_dev *sdev, char *key, u32 value),
+ TP_ARGS(sdev, key, value),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __string(key, key)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(key, key);
+ __entry->value = value;
+ ),
+ TP_printk("device_name=%s key=%s value=%d",
+ __get_str(device_name), __get_str(key), __entry->value)
+);
+
+#endif /* _TRACE_SOF_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sof_intel.h b/include/trace/events/sof_intel.h
new file mode 100644
index 000000000000..2a77f9d26c0b
--- /dev/null
+++ b/include/trace/events/sof_intel.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ *
+ * Author: Noah Klayman <noah.klayman@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sof_intel
+
+#if !defined(_TRACE_SOF_INTEL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SOF_INTEL_H
+#include <linux/tracepoint.h>
+#include <sound/hdaudio.h>
+#include "../../../sound/soc/sof/sof-audio.h"
+
+TRACE_EVENT(sof_intel_hda_irq,
+ TP_PROTO(struct snd_sof_dev *sdev, char *source),
+ TP_ARGS(sdev, source),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __string(source, source)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(source, source);
+ ),
+ TP_printk("device_name=%s source=%s",
+ __get_str(device_name), __get_str(source))
+);
+
+DECLARE_EVENT_CLASS(sof_intel_ipc_firmware_template,
+ TP_ARGS(struct snd_sof_dev *sdev, u32 msg, u32 msg_ext),
+ TP_PROTO(sdev, msg, msg_ext),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, msg)
+ __field(u32, msg_ext)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->msg = msg;
+ __entry->msg_ext = msg_ext;
+ ),
+ TP_printk("device_name=%s msg=%#x msg_ext=%#x",
+ __get_str(device_name), __entry->msg, __entry->msg_ext)
+);
+
+DEFINE_EVENT(sof_intel_ipc_firmware_template, sof_intel_ipc_firmware_response,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 msg, u32 msg_ext),
+ TP_ARGS(sdev, msg, msg_ext)
+);
+
+DEFINE_EVENT(sof_intel_ipc_firmware_template, sof_intel_ipc_firmware_initiated,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 msg, u32 msg_ext),
+ TP_ARGS(sdev, msg, msg_ext)
+);
+
+TRACE_EVENT(sof_intel_D0I3C_updated,
+ TP_PROTO(struct snd_sof_dev *sdev, u8 reg),
+ TP_ARGS(sdev, reg),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u8, reg)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->reg = reg;
+ ),
+ TP_printk("device_name=%s register=%#x",
+ __get_str(device_name), __entry->reg)
+);
+
+TRACE_EVENT(sof_intel_hda_irq_ipc_check,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 irq_status),
+ TP_ARGS(sdev, irq_status),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, irq_status)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->irq_status = irq_status;
+ ),
+ TP_printk("device_name=%s irq_status=%#x",
+ __get_str(device_name), __entry->irq_status)
+);
+
+TRACE_EVENT(sof_intel_hda_dsp_pcm,
+ TP_PROTO(struct snd_sof_dev *sdev,
+ struct hdac_stream *hstream,
+ struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t pos
+ ),
+ TP_ARGS(sdev, hstream, substream, pos),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, hstream_index)
+ __field(u32, substream)
+ __field(unsigned long, pos)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->hstream_index = hstream->index;
+ __entry->substream = substream->stream;
+ __entry->pos = pos;
+ ),
+ TP_printk("device_name=%s hstream_index=%d substream=%d pos=%lu",
+ __get_str(device_name), __entry->hstream_index,
+ __entry->substream, __entry->pos)
+);
+
+TRACE_EVENT(sof_intel_hda_dsp_stream_status,
+ TP_PROTO(struct device *dev, struct hdac_stream *s, u32 status),
+ TP_ARGS(dev, s, status),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(dev))
+ __field(u32, stream)
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(dev));
+ __entry->stream = s->index;
+ __entry->status = status;
+ ),
+ TP_printk("device_name=%s stream=%d status=%#x",
+ __get_str(device_name), __entry->stream, __entry->status)
+);
+
+TRACE_EVENT(sof_intel_hda_dsp_check_stream_irq,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 status),
+ TP_ARGS(sdev, status),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->status = status;
+ ),
+ TP_printk("device_name=%s status=%#x",
+ __get_str(device_name), __entry->status)
+);
+
+#endif /* _TRACE_SOF_INTEL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index 0dd9171d2ad8..c0d9844befd7 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -42,6 +42,63 @@ DEFINE_EVENT(spi_controller, spi_controller_busy,
);
+TRACE_EVENT(spi_setup,
+ TP_PROTO(struct spi_device *spi, int status),
+ TP_ARGS(spi, status),
+
+ TP_STRUCT__entry(
+ __field(int, bus_num)
+ __field(int, chip_select)
+ __field(unsigned long, mode)
+ __field(unsigned int, bits_per_word)
+ __field(unsigned int, max_speed_hz)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = spi->controller->bus_num;
+ __entry->chip_select = spi->chip_select;
+ __entry->mode = spi->mode;
+ __entry->bits_per_word = spi->bits_per_word;
+ __entry->max_speed_hz = spi->max_speed_hz;
+ __entry->status = status;
+ ),
+
+ TP_printk("spi%d.%d setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d",
+ __entry->bus_num, __entry->chip_select,
+ (__entry->mode & SPI_MODE_X_MASK),
+ (__entry->mode & SPI_CS_HIGH) ? "cs_high, " : "",
+ (__entry->mode & SPI_LSB_FIRST) ? "lsb, " : "",
+ (__entry->mode & SPI_3WIRE) ? "3wire, " : "",
+ (__entry->mode & SPI_LOOP) ? "loopback, " : "",
+ __entry->bits_per_word, __entry->max_speed_hz,
+ __entry->status)
+);
+
+TRACE_EVENT(spi_set_cs,
+ TP_PROTO(struct spi_device *spi, bool enable),
+ TP_ARGS(spi, enable),
+
+ TP_STRUCT__entry(
+ __field(int, bus_num)
+ __field(int, chip_select)
+ __field(unsigned long, mode)
+ __field(bool, enable)
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = spi->controller->bus_num;
+ __entry->chip_select = spi->chip_select;
+ __entry->mode = spi->mode;
+ __entry->enable = enable;
+ ),
+
+ TP_printk("spi%d.%d %s%s",
+ __entry->bus_num, __entry->chip_select,
+ __entry->enable ? "activate" : "deactivate",
+ (__entry->mode & SPI_CS_HIGH) ? ", cs_high" : "")
+);
+
DECLARE_EVENT_CLASS(spi_message,
TP_PROTO(struct spi_message *msg),
diff --git a/include/trace/events/spmi.h b/include/trace/events/spmi.h
index 8b60efe18ba6..a6819fd85cdf 100644
--- a/include/trace/events/spmi.h
+++ b/include/trace/events/spmi.h
@@ -21,15 +21,15 @@ TRACE_EVENT(spmi_write_begin,
__field ( u8, sid )
__field ( u16, addr )
__field ( u8, len )
- __dynamic_array ( u8, buf, len + 1 )
+ __dynamic_array ( u8, buf, len )
),
TP_fast_assign(
__entry->opcode = opcode;
__entry->sid = sid;
__entry->addr = addr;
- __entry->len = len + 1;
- memcpy(__get_dynamic_array(buf), buf, len + 1);
+ __entry->len = len;
+ memcpy(__get_dynamic_array(buf), buf, len);
),
TP_printk("opc=%d sid=%02d addr=0x%04x len=%d buf=0x[%*phD]",
@@ -92,7 +92,7 @@ TRACE_EVENT(spmi_read_end,
__field ( u16, addr )
__field ( int, ret )
__field ( u8, len )
- __dynamic_array ( u8, buf, len + 1 )
+ __dynamic_array ( u8, buf, len )
),
TP_fast_assign(
@@ -100,8 +100,8 @@ TRACE_EVENT(spmi_read_end,
__entry->sid = sid;
__entry->addr = addr;
__entry->ret = ret;
- __entry->len = len + 1;
- memcpy(__get_dynamic_array(buf), buf, len + 1);
+ __entry->len = len;
+ memcpy(__get_dynamic_array(buf), buf, len);
),
TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d len=%02d buf=0x[%*phD]",
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index ee993575d2fa..f48f2ab9d238 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -14,6 +14,211 @@
#include <linux/net.h>
#include <linux/tracepoint.h>
+#include <trace/events/sunrpc_base.h>
+
+TRACE_DEFINE_ENUM(SOCK_STREAM);
+TRACE_DEFINE_ENUM(SOCK_DGRAM);
+TRACE_DEFINE_ENUM(SOCK_RAW);
+TRACE_DEFINE_ENUM(SOCK_RDM);
+TRACE_DEFINE_ENUM(SOCK_SEQPACKET);
+TRACE_DEFINE_ENUM(SOCK_DCCP);
+TRACE_DEFINE_ENUM(SOCK_PACKET);
+
+#define show_socket_type(type) \
+ __print_symbolic(type, \
+ { SOCK_STREAM, "STREAM" }, \
+ { SOCK_DGRAM, "DGRAM" }, \
+ { SOCK_RAW, "RAW" }, \
+ { SOCK_RDM, "RDM" }, \
+ { SOCK_SEQPACKET, "SEQPACKET" }, \
+ { SOCK_DCCP, "DCCP" }, \
+ { SOCK_PACKET, "PACKET" })
+
+/* This list is known to be incomplete, add new enums as needed. */
+TRACE_DEFINE_ENUM(AF_UNSPEC);
+TRACE_DEFINE_ENUM(AF_UNIX);
+TRACE_DEFINE_ENUM(AF_LOCAL);
+TRACE_DEFINE_ENUM(AF_INET);
+TRACE_DEFINE_ENUM(AF_INET6);
+
+#define rpc_show_address_family(family) \
+ __print_symbolic(family, \
+ { AF_UNSPEC, "AF_UNSPEC" }, \
+ { AF_UNIX, "AF_UNIX" }, \
+ { AF_LOCAL, "AF_LOCAL" }, \
+ { AF_INET, "AF_INET" }, \
+ { AF_INET6, "AF_INET6" })
+
+DECLARE_EVENT_CLASS(rpc_xdr_buf_class,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct xdr_buf *xdr
+ ),
+
+ TP_ARGS(task, xdr),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_base)
+ __field(unsigned int, page_len)
+ __field(unsigned int, msg_len)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client ?
+ task->tk_client->cl_clid : -1;
+ __entry->head_base = xdr->head[0].iov_base;
+ __entry->head_len = xdr->head[0].iov_len;
+ __entry->tail_base = xdr->tail[0].iov_base;
+ __entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_base = xdr->page_base;
+ __entry->page_len = xdr->page_len;
+ __entry->msg_len = xdr->len;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " head=[%p,%zu] page=%u(%u) tail=[%p,%zu] len=%u",
+ __entry->task_id, __entry->client_id,
+ __entry->head_base, __entry->head_len,
+ __entry->page_len, __entry->page_base,
+ __entry->tail_base, __entry->tail_len,
+ __entry->msg_len
+ )
+);
+
+#define DEFINE_RPCXDRBUF_EVENT(name) \
+ DEFINE_EVENT(rpc_xdr_buf_class, \
+ rpc_xdr_##name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ const struct xdr_buf *xdr \
+ ), \
+ TP_ARGS(task, xdr))
+
+DEFINE_RPCXDRBUF_EVENT(sendto);
+DEFINE_RPCXDRBUF_EVENT(recvfrom);
+DEFINE_RPCXDRBUF_EVENT(reply_pages);
+
+
+DECLARE_EVENT_CLASS(rpc_clnt_class,
+ TP_PROTO(
+ const struct rpc_clnt *clnt
+ ),
+
+ TP_ARGS(clnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, client_id)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = clnt->cl_clid;
+ ),
+
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER, __entry->client_id)
+);
+
+#define DEFINE_RPC_CLNT_EVENT(name) \
+ DEFINE_EVENT(rpc_clnt_class, \
+ rpc_clnt_##name, \
+ TP_PROTO( \
+ const struct rpc_clnt *clnt \
+ ), \
+ TP_ARGS(clnt))
+
+DEFINE_RPC_CLNT_EVENT(free);
+DEFINE_RPC_CLNT_EVENT(killall);
+DEFINE_RPC_CLNT_EVENT(shutdown);
+DEFINE_RPC_CLNT_EVENT(release);
+DEFINE_RPC_CLNT_EVENT(replace_xprt);
+DEFINE_RPC_CLNT_EVENT(replace_xprt_err);
+
+TRACE_EVENT(rpc_clnt_new,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ const struct rpc_xprt *xprt,
+ const char *program,
+ const char *server
+ ),
+
+ TP_ARGS(clnt, xprt, program, server),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, client_id)
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ __string(program, program)
+ __string(server, server)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = clnt->cl_clid;
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ __assign_str(program, program);
+ __assign_str(server, server);
+ ),
+
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER
+ " peer=[%s]:%s program=%s server=%s",
+ __entry->client_id, __get_str(addr), __get_str(port),
+ __get_str(program), __get_str(server))
+);
+
+TRACE_EVENT(rpc_clnt_new_err,
+ TP_PROTO(
+ const char *program,
+ const char *server,
+ int error
+ ),
+
+ TP_ARGS(program, server, error),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __string(program, program)
+ __string(server, server)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error;
+ __assign_str(program, program);
+ __assign_str(server, server);
+ ),
+
+ TP_printk("program=%s server=%s error=%d",
+ __get_str(program), __get_str(server), __entry->error)
+);
+
+TRACE_EVENT(rpc_clnt_clone_err,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ int error
+ ),
+
+ TP_ARGS(clnt, error),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, client_id)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = clnt->cl_clid;
+ __entry->error = error;
+ ),
+
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " error=%d",
+ __entry->client_id, __entry->error)
+);
+
+
TRACE_DEFINE_ENUM(RPC_AUTH_OK);
TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED);
TRACE_DEFINE_ENUM(RPC_AUTH_REJECTEDCRED);
@@ -52,7 +257,7 @@ DECLARE_EVENT_CLASS(rpc_task_status,
__entry->status = task->tk_status;
),
- TP_printk("task:%u@%u status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -64,8 +269,10 @@ DECLARE_EVENT_CLASS(rpc_task_status,
TP_ARGS(task))
DEFINE_RPC_STATUS_EVENT(call);
-DEFINE_RPC_STATUS_EVENT(bind);
DEFINE_RPC_STATUS_EVENT(connect);
+DEFINE_RPC_STATUS_EVENT(timeout);
+DEFINE_RPC_STATUS_EVENT(retry_refresh);
+DEFINE_RPC_STATUS_EVENT(refresh);
TRACE_EVENT(rpc_request,
TP_PROTO(const struct rpc_task *task),
@@ -86,50 +293,33 @@ TRACE_EVENT(rpc_request,
__entry->client_id = task->tk_client->cl_clid;
__entry->version = task->tk_client->cl_vers;
__entry->async = RPC_IS_ASYNC(task);
- __assign_str(progname, task->tk_client->cl_program->name)
- __assign_str(procname, rpc_proc_name(task))
+ __assign_str(progname, task->tk_client->cl_program->name);
+ __assign_str(procname, rpc_proc_name(task));
),
- TP_printk("task:%u@%u %sv%d %s (%ssync)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " %sv%d %s (%ssync)",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version,
__get_str(procname), __entry->async ? "a": ""
)
);
-TRACE_DEFINE_ENUM(RPC_TASK_ASYNC);
-TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
-TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
-TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
-TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
-TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
-TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
-TRACE_DEFINE_ENUM(RPC_TASK_SENT);
-TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT);
-TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT);
-TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
-
#define rpc_show_task_flags(flags) \
__print_flags(flags, "|", \
{ RPC_TASK_ASYNC, "ASYNC" }, \
{ RPC_TASK_SWAPPER, "SWAPPER" }, \
+ { RPC_TASK_MOVEABLE, "MOVEABLE" }, \
+ { RPC_TASK_NULLCREDS, "NULLCREDS" }, \
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
- { RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
+ { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
{ RPC_TASK_SOFTCONN, "SOFTCONN" }, \
{ RPC_TASK_SENT, "SENT" }, \
{ RPC_TASK_TIMEOUT, "TIMEOUT" }, \
{ RPC_TASK_NOCONNECT, "NOCONNECT" }, \
- { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" })
-
-TRACE_DEFINE_ENUM(RPC_TASK_RUNNING);
-TRACE_DEFINE_ENUM(RPC_TASK_QUEUED);
-TRACE_DEFINE_ENUM(RPC_TASK_ACTIVE);
-TRACE_DEFINE_ENUM(RPC_TASK_NEED_XMIT);
-TRACE_DEFINE_ENUM(RPC_TASK_NEED_RECV);
-TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT);
-TRACE_DEFINE_ENUM(RPC_TASK_SIGNALLED);
+ { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }, \
+ { RPC_TASK_CRED_NOREF, "CRED_NOREF" })
#define rpc_show_runstate(flags) \
__print_flags(flags, "|", \
@@ -166,7 +356,8 @@ DECLARE_EVENT_CLASS(rpc_task_running,
__entry->flags = task->tk_flags;
),
- TP_printk("task:%u@%d flags=%s runstate=%s status=%d action=%ps",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " flags=%s runstate=%s status=%d action=%ps",
__entry->task_id, __entry->client_id,
rpc_show_task_flags(__entry->flags),
rpc_show_runstate(__entry->runstate),
@@ -184,9 +375,13 @@ DECLARE_EVENT_CLASS(rpc_task_running,
DEFINE_RPC_RUNNING_EVENT(begin);
DEFINE_RPC_RUNNING_EVENT(run_action);
+DEFINE_RPC_RUNNING_EVENT(sync_sleep);
+DEFINE_RPC_RUNNING_EVENT(sync_wake);
DEFINE_RPC_RUNNING_EVENT(complete);
+DEFINE_RPC_RUNNING_EVENT(timeout);
DEFINE_RPC_RUNNING_EVENT(signalled);
DEFINE_RPC_RUNNING_EVENT(end);
+DEFINE_RPC_RUNNING_EVENT(call_done);
DECLARE_EVENT_CLASS(rpc_task_queued,
@@ -215,7 +410,8 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__assign_str(q_name, rpc_qname(q));
),
- TP_printk("task:%u@%d flags=%s runstate=%s status=%d timeout=%lu queue=%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " flags=%s runstate=%s status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
rpc_show_task_flags(__entry->flags),
rpc_show_runstate(__entry->runstate),
@@ -251,7 +447,7 @@ DECLARE_EVENT_CLASS(rpc_failure,
__entry->client_id = task->tk_client->cl_clid;
),
- TP_printk("task:%u@%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
__entry->task_id, __entry->client_id)
);
@@ -287,13 +483,14 @@ DECLARE_EVENT_CLASS(rpc_reply_event,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
- __assign_str(progname, task->tk_client->cl_program->name)
+ __assign_str(progname, task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procname, rpc_proc_name(task))
- __assign_str(servername, task->tk_xprt->servername)
+ __assign_str(procname, rpc_proc_name(task));
+ __assign_str(servername, task->tk_xprt->servername);
),
- TP_printk("task:%u@%d server=%s xid=0x%08x %sv%d %s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " server=%s xid=0x%08x %sv%d %s",
__entry->task_id, __entry->client_id, __get_str(servername),
__entry->xid, __get_str(progname), __entry->version,
__get_str(procname))
@@ -316,6 +513,79 @@ DEFINE_RPC_REPLY_EVENT(stale_creds);
DEFINE_RPC_REPLY_EVENT(bad_creds);
DEFINE_RPC_REPLY_EVENT(auth_tooweak);
+#define DEFINE_RPCB_ERROR_EVENT(name) \
+ DEFINE_EVENT(rpc_reply_event, rpcb_##name##_err, \
+ TP_PROTO( \
+ const struct rpc_task *task \
+ ), \
+ TP_ARGS(task))
+
+DEFINE_RPCB_ERROR_EVENT(prog_unavail);
+DEFINE_RPCB_ERROR_EVENT(timeout);
+DEFINE_RPCB_ERROR_EVENT(bind_version);
+DEFINE_RPCB_ERROR_EVENT(unreachable);
+DEFINE_RPCB_ERROR_EVENT(unrecognized);
+
+TRACE_EVENT(rpc_buf_alloc,
+ TP_PROTO(
+ const struct rpc_task *task,
+ int status
+ ),
+
+ TP_ARGS(task, status),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(size_t, callsize)
+ __field(size_t, recvsize)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->callsize = task->tk_rqstp->rq_callsize;
+ __entry->recvsize = task->tk_rqstp->rq_rcvsize;
+ __entry->status = status;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " callsize=%zu recvsize=%zu status=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->callsize, __entry->recvsize, __entry->status
+ )
+);
+
+TRACE_EVENT(rpc_call_rpcerror,
+ TP_PROTO(
+ const struct rpc_task *task,
+ int tk_status,
+ int rpc_status
+ ),
+
+ TP_ARGS(task, tk_status, rpc_status),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, tk_status)
+ __field(int, rpc_status)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->task_id = task->tk_pid;
+ __entry->tk_status = tk_status;
+ __entry->rpc_status = rpc_status;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " tk_status=%d rpc_status=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->tk_status, __entry->rpc_status)
+);
+
TRACE_EVENT(rpc_stats_latency,
TP_PROTO(
@@ -344,14 +614,15 @@ TRACE_EVENT(rpc_stats_latency,
__entry->task_id = task->tk_pid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->version = task->tk_client->cl_vers;
- __assign_str(progname, task->tk_client->cl_program->name)
- __assign_str(procname, rpc_proc_name(task))
+ __assign_str(progname, task->tk_client->cl_program->name);
+ __assign_str(procname, rpc_proc_name(task));
__entry->backlog = ktime_to_us(backlog);
__entry->rtt = ktime_to_us(rtt);
__entry->execute = ktime_to_us(execute);
),
- TP_printk("task:%u@%d xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
__entry->task_id, __entry->client_id, __entry->xid,
__get_str(progname), __entry->version, __get_str(procname),
__entry->backlog, __entry->rtt, __entry->execute)
@@ -378,10 +649,10 @@ TRACE_EVENT(rpc_xdr_overflow,
__field(size_t, tail_len)
__field(unsigned int, page_len)
__field(unsigned int, len)
- __string(progname,
- xdr->rqst->rq_task->tk_client->cl_program->name)
- __string(procedure,
- xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
+ __string(progname, xdr->rqst ?
+ xdr->rqst->rq_task->tk_client->cl_program->name : "unknown")
+ __string(procedure, xdr->rqst ?
+ xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown")
),
TP_fast_assign(
@@ -391,15 +662,15 @@ TRACE_EVENT(rpc_xdr_overflow,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__assign_str(progname,
- task->tk_client->cl_program->name)
+ task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
} else {
- __entry->task_id = 0;
- __entry->client_id = 0;
- __assign_str(progname, "unknown")
+ __entry->task_id = -1;
+ __entry->client_id = -1;
+ __assign_str(progname, "unknown");
__entry->version = 0;
- __assign_str(procedure, "unknown")
+ __assign_str(procedure, "unknown");
}
__entry->requested = requested;
__entry->end = xdr->end;
@@ -412,8 +683,8 @@ TRACE_EVENT(rpc_xdr_overflow,
__entry->len = xdr->buf->len;
),
- TP_printk(
- "task:%u@%u %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->requested, __entry->p, __entry->end,
@@ -457,9 +728,9 @@ TRACE_EVENT(rpc_xdr_alignment,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__assign_str(progname,
- task->tk_client->cl_program->name)
+ task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
__entry->offset = offset;
__entry->copied = copied;
@@ -471,8 +742,8 @@ TRACE_EVENT(rpc_xdr_alignment,
__entry->len = xdr->buf->len;
),
- TP_printk(
- "task:%u@%u %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->offset, __entry->copied,
@@ -483,43 +754,6 @@ TRACE_EVENT(rpc_xdr_alignment,
)
);
-TRACE_EVENT(rpc_reply_pages,
- TP_PROTO(
- const struct rpc_rqst *req
- ),
-
- TP_ARGS(req),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(const void *, head_base)
- __field(size_t, head_len)
- __field(const void *, tail_base)
- __field(size_t, tail_len)
- __field(unsigned int, page_len)
- ),
-
- TP_fast_assign(
- __entry->task_id = req->rq_task->tk_pid;
- __entry->client_id = req->rq_task->tk_client->cl_clid;
-
- __entry->head_base = req->rq_rcv_buf.head[0].iov_base;
- __entry->head_len = req->rq_rcv_buf.head[0].iov_len;
- __entry->page_len = req->rq_rcv_buf.page_len;
- __entry->tail_base = req->rq_rcv_buf.tail[0].iov_base;
- __entry->tail_len = req->rq_rcv_buf.tail[0].iov_len;
- ),
-
- TP_printk(
- "task:%u@%u xdr=[%p,%zu]/%u/[%p,%zu]\n",
- __entry->task_id, __entry->client_id,
- __entry->head_base, __entry->head_len,
- __entry->page_len,
- __entry->tail_base, __entry->tail_len
- )
-);
-
/*
* First define the enums in the below macros to be exported to userspace
* via TRACE_DEFINE_ENUM().
@@ -532,9 +766,9 @@ TRACE_EVENT(rpc_reply_pages,
#define RPC_SHOW_SOCKET \
EM( SS_FREE, "FREE" ) \
EM( SS_UNCONNECTED, "UNCONNECTED" ) \
- EM( SS_CONNECTING, "CONNECTING," ) \
- EM( SS_CONNECTED, "CONNECTED," ) \
- EMe(SS_DISCONNECTING, "DISCONNECTING" )
+ EM( SS_CONNECTING, "CONNECTING" ) \
+ EM( SS_CONNECTED, "CONNECTED" ) \
+ EMe( SS_DISCONNECTING, "DISCONNECTING" )
#define rpc_show_socket_state(state) \
__print_symbolic(state, RPC_SHOW_SOCKET)
@@ -559,6 +793,9 @@ RPC_SHOW_SOCKET
RPC_SHOW_SOCK
+
+#include <trace/events/net_probe_common.h>
+
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
* that will be printed in the output.
@@ -581,27 +818,32 @@ DECLARE_EVENT_CLASS(xs_socket_event,
__field(unsigned int, socket_state)
__field(unsigned int, sock_state)
__field(unsigned long long, ino)
- __string(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR])
- __string(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT])
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
struct inode *inode = SOCK_INODE(socket);
+ const struct sock *sk = socket->sk;
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
__entry->socket_state = socket->state;
__entry->sock_state = socket->sk->sk_state;
__entry->ino = (unsigned long long)inode->i_ino;
- __assign_str(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT]);
+
),
TP_printk(
- "socket:[%llu] dstaddr=%s/%s "
+ "socket:[%llu] srcaddr=%pISpc dstaddr=%pISpc "
"state=%u (%s) sk_state=%u (%s)",
- __entry->ino, __get_str(dstaddr), __get_str(dstport),
+ __entry->ino,
+ __entry->saddr,
+ __entry->daddr,
__entry->socket_state,
rpc_show_socket_state(__entry->socket_state),
__entry->sock_state,
@@ -631,29 +873,33 @@ DECLARE_EVENT_CLASS(xs_socket_event_done,
__field(unsigned int, socket_state)
__field(unsigned int, sock_state)
__field(unsigned long long, ino)
- __string(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR])
- __string(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT])
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
struct inode *inode = SOCK_INODE(socket);
+ const struct sock *sk = socket->sk;
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
__entry->socket_state = socket->state;
__entry->sock_state = socket->sk->sk_state;
__entry->ino = (unsigned long long)inode->i_ino;
__entry->error = error;
- __assign_str(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT]);
),
TP_printk(
- "error=%d socket:[%llu] dstaddr=%s/%s "
+ "error=%d socket:[%llu] srcaddr=%pISpc dstaddr=%pISpc "
"state=%u (%s) sk_state=%u (%s)",
__entry->error,
- __entry->ino, __get_str(dstaddr), __get_str(dstport),
+ __entry->ino,
+ __entry->saddr,
+ __entry->daddr,
__entry->socket_state,
rpc_show_socket_state(__entry->socket_state),
__entry->sock_state,
@@ -676,6 +922,90 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
+TRACE_EVENT(rpc_socket_nospace,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ const struct sock_xprt *transport
+ ),
+
+ TP_ARGS(rqst, transport),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(unsigned int, total)
+ __field(unsigned int, remaining)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->total = rqst->rq_slen;
+ __entry->remaining = rqst->rq_slen - transport->xmit.offset;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " total=%u remaining=%u",
+ __entry->task_id, __entry->client_id,
+ __entry->total, __entry->remaining
+ )
+);
+
+#define rpc_show_xprt_state(x) \
+ __print_flags(x, "|", \
+ { BIT(XPRT_LOCKED), "LOCKED" }, \
+ { BIT(XPRT_CONNECTED), "CONNECTED" }, \
+ { BIT(XPRT_CONNECTING), "CONNECTING" }, \
+ { BIT(XPRT_CLOSE_WAIT), "CLOSE_WAIT" }, \
+ { BIT(XPRT_BOUND), "BOUND" }, \
+ { BIT(XPRT_BINDING), "BINDING" }, \
+ { BIT(XPRT_CLOSING), "CLOSING" }, \
+ { BIT(XPRT_OFFLINE), "OFFLINE" }, \
+ { BIT(XPRT_REMOVE), "REMOVE" }, \
+ { BIT(XPRT_CONGESTED), "CONGESTED" }, \
+ { BIT(XPRT_CWND_WAIT), "CWND_WAIT" }, \
+ { BIT(XPRT_WRITE_SPACE), "WRITE_SPACE" }, \
+ { BIT(XPRT_SND_IS_COOKIE), "SND_IS_COOKIE" })
+
+DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
+ TP_PROTO(
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ ),
+
+ TP_fast_assign(
+ __entry->state = xprt->state;
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ ),
+
+ TP_printk("peer=[%s]:%s state=%s",
+ __get_str(addr), __get_str(port),
+ rpc_show_xprt_state(__entry->state))
+);
+
+#define DEFINE_RPC_XPRT_LIFETIME_EVENT(name) \
+ DEFINE_EVENT(rpc_xprt_lifetime_class, \
+ xprt_##name, \
+ TP_PROTO( \
+ const struct rpc_xprt *xprt \
+ ), \
+ TP_ARGS(xprt))
+
+DEFINE_RPC_XPRT_LIFETIME_EVENT(create);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(connect);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
+
DECLARE_EVENT_CLASS(rpc_xprt_event,
TP_PROTO(
const struct rpc_xprt *xprt,
@@ -714,7 +1044,6 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
DEFINE_RPC_XPRT_EVENT(timer);
DEFINE_RPC_XPRT_EVENT(lookup_rqst);
-DEFINE_RPC_XPRT_EVENT(complete_rqst);
TRACE_EVENT(xprt_transmit,
TP_PROTO(
@@ -741,41 +1070,52 @@ TRACE_EVENT(xprt_transmit,
__entry->status = status;
),
- TP_printk(
- "task:%u@%u xid=0x%08x seqno=%u status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x seqno=%u status=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->seqno, __entry->status)
);
-TRACE_EVENT(xprt_enq_xmit,
+TRACE_EVENT(xprt_retransmit,
TP_PROTO(
- const struct rpc_task *task,
- int stage
+ const struct rpc_rqst *rqst
),
- TP_ARGS(task, stage),
+ TP_ARGS(rqst),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
- __field(u32, seqno)
- __field(int, stage)
+ __field(int, ntrans)
+ __field(int, version)
+ __field(unsigned long, timeout)
+ __string(progname,
+ rqst->rq_task->tk_client->cl_program->name)
+ __string(procname, rpc_proc_name(rqst->rq_task))
),
TP_fast_assign(
+ struct rpc_task *task = rqst->rq_task;
+
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client ?
task->tk_client->cl_clid : -1;
- __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
- __entry->seqno = task->tk_rqstp->rq_seqno;
- __entry->stage = stage;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->ntrans = rqst->rq_ntrans;
+ __entry->timeout = task->tk_timeout;
+ __assign_str(progname,
+ task->tk_client->cl_program->name);
+ __entry->version = task->tk_client->cl_vers;
+ __assign_str(procname, rpc_proc_name(task));
),
- TP_printk(
- "task:%u@%u xid=0x%08x seqno=%u stage=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x %sv%d %s ntrans=%d timeout=%lu",
__entry->task_id, __entry->client_id, __entry->xid,
- __entry->seqno, __entry->stage)
+ __get_str(progname), __entry->version, __get_str(procname),
+ __entry->ntrans, __entry->timeout
+ )
);
TRACE_EVENT(xprt_ping,
@@ -821,11 +1161,15 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
__entry->task_id = -1;
__entry->client_id = -1;
}
- __entry->snd_task_id = xprt->snd_task ?
- xprt->snd_task->tk_pid : -1;
+ if (xprt->snd_task &&
+ !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
+ __entry->snd_task_id = xprt->snd_task->tk_pid;
+ else
+ __entry->snd_task_id = -1;
),
- TP_printk("task:%u@%u snd_task:%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " snd_task:" SUNRPC_TRACE_PID_SPECIFIER,
__entry->task_id, __entry->client_id,
__entry->snd_task_id)
);
@@ -866,14 +1210,20 @@ DECLARE_EVENT_CLASS(xprt_cong_event,
__entry->task_id = -1;
__entry->client_id = -1;
}
- __entry->snd_task_id = xprt->snd_task ?
- xprt->snd_task->tk_pid : -1;
+ if (xprt->snd_task &&
+ !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
+ __entry->snd_task_id = xprt->snd_task->tk_pid;
+ else
+ __entry->snd_task_id = -1;
+
__entry->cong = xprt->cong;
__entry->cwnd = xprt->cwnd;
__entry->wait = test_bit(XPRT_CWND_WAIT, &xprt->state);
),
- TP_printk("task:%u@%u snd_task:%u cong=%lu cwnd=%lu%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " snd_task:" SUNRPC_TRACE_PID_SPECIFIER
+ " cong=%lu cwnd=%lu%s",
__entry->task_id, __entry->client_id,
__entry->snd_task_id, __entry->cong, __entry->cwnd,
__entry->wait ? " (wait)" : "")
@@ -892,6 +1242,50 @@ DEFINE_CONG_EVENT(release_cong);
DEFINE_CONG_EVENT(get_cong);
DEFINE_CONG_EVENT(put_cong);
+TRACE_EVENT(xprt_reserve,
+ TP_PROTO(
+ const struct rpc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
+ __entry->task_id, __entry->client_id, __entry->xid
+ )
+);
+
+TRACE_EVENT(xs_data_ready,
+ TP_PROTO(
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ ),
+
+ TP_fast_assign(
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ ),
+
+ TP_printk("peer=[%s]:%s", __get_str(addr), __get_str(port))
+);
+
TRACE_EVENT(xs_stream_read_data,
TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total),
@@ -947,40 +1341,295 @@ TRACE_EVENT(xs_stream_read_request,
__entry->copied, __entry->reclen, __entry->offset)
);
-#define show_rqstp_flags(flags) \
- __print_flags(flags, "|", \
- { (1UL << RQ_SECURE), "RQ_SECURE"}, \
- { (1UL << RQ_LOCAL), "RQ_LOCAL"}, \
- { (1UL << RQ_USEDEFERRAL), "RQ_USEDEFERRAL"}, \
- { (1UL << RQ_DROPME), "RQ_DROPME"}, \
- { (1UL << RQ_SPLICE_OK), "RQ_SPLICE_OK"}, \
- { (1UL << RQ_VICTIM), "RQ_VICTIM"}, \
- { (1UL << RQ_BUSY), "RQ_BUSY"})
+TRACE_EVENT(rpcb_getport,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ const struct rpc_task *task,
+ unsigned int bind_version
+ ),
+
+ TP_ARGS(clnt, task, bind_version),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(unsigned int, program)
+ __field(unsigned int, version)
+ __field(int, protocol)
+ __field(unsigned int, bind_version)
+ __string(servername, task->tk_xprt->servername)
+ ),
-TRACE_EVENT(svc_recv,
- TP_PROTO(struct svc_rqst *rqst, int len),
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = clnt->cl_clid;
+ __entry->program = clnt->cl_prog;
+ __entry->version = clnt->cl_vers;
+ __entry->protocol = task->tk_xprt->prot;
+ __entry->bind_version = bind_version;
+ __assign_str(servername, task->tk_xprt->servername);
+ ),
- TP_ARGS(rqst, len),
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " server=%s program=%u version=%u protocol=%d bind_version=%u",
+ __entry->task_id, __entry->client_id, __get_str(servername),
+ __entry->program, __entry->version, __entry->protocol,
+ __entry->bind_version
+ )
+);
+
+TRACE_EVENT(rpcb_setport,
+ TP_PROTO(
+ const struct rpc_task *task,
+ int status,
+ unsigned short port
+ ),
+
+ TP_ARGS(task, status, port),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, status)
+ __field(unsigned short, port)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->status = status;
+ __entry->port = port;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d port=%u",
+ __entry->task_id, __entry->client_id,
+ __entry->status, __entry->port
+ )
+);
+
+TRACE_EVENT(pmap_register,
+ TP_PROTO(
+ u32 program,
+ u32 version,
+ int protocol,
+ unsigned short port
+ ),
+
+ TP_ARGS(program, version, protocol, port),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, program)
+ __field(unsigned int, version)
+ __field(int, protocol)
+ __field(unsigned int, port)
+ ),
+
+ TP_fast_assign(
+ __entry->program = program;
+ __entry->version = version;
+ __entry->protocol = protocol;
+ __entry->port = port;
+ ),
+
+ TP_printk("program=%u version=%u protocol=%d port=%u",
+ __entry->program, __entry->version,
+ __entry->protocol, __entry->port
+ )
+);
+
+TRACE_EVENT(rpcb_register,
+ TP_PROTO(
+ u32 program,
+ u32 version,
+ const char *addr,
+ const char *netid
+ ),
+
+ TP_ARGS(program, version, addr, netid),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, program)
+ __field(unsigned int, version)
+ __string(addr, addr)
+ __string(netid, netid)
+ ),
+
+ TP_fast_assign(
+ __entry->program = program;
+ __entry->version = version;
+ __assign_str(addr, addr);
+ __assign_str(netid, netid);
+ ),
+
+ TP_printk("program=%u version=%u addr=%s netid=%s",
+ __entry->program, __entry->version,
+ __get_str(addr), __get_str(netid)
+ )
+);
+
+TRACE_EVENT(rpcb_unregister,
+ TP_PROTO(
+ u32 program,
+ u32 version,
+ const char *netid
+ ),
+
+ TP_ARGS(program, version, netid),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, program)
+ __field(unsigned int, version)
+ __string(netid, netid)
+ ),
+
+ TP_fast_assign(
+ __entry->program = program;
+ __entry->version = version;
+ __assign_str(netid, netid);
+ ),
+
+ TP_printk("program=%u version=%u netid=%s",
+ __entry->program, __entry->version, __get_str(netid)
+ )
+);
+
+/* Record an xdr_buf containing a fully-formed RPC message */
+DECLARE_EVENT_CLASS(svc_xdr_msg_class,
+ TP_PROTO(
+ const struct xdr_buf *xdr
+ ),
+
+ TP_ARGS(xdr),
TP_STRUCT__entry(
__field(u32, xid)
- __field(int, len)
- __field(unsigned long, flags)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ __field(unsigned int, msg_len)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->len = len;
- __entry->flags = rqst->rq_flags;
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ __be32 *p = (__be32 *)xdr->head[0].iov_base;
+
+ __entry->xid = be32_to_cpu(*p);
+ __entry->head_base = p;
+ __entry->head_len = xdr->head[0].iov_len;
+ __entry->tail_base = xdr->tail[0].iov_base;
+ __entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_len = xdr->page_len;
+ __entry->msg_len = xdr->len;
),
- TP_printk("addr=%s xid=0x%08x len=%d flags=%s",
- __get_str(addr), __entry->xid, __entry->len,
- show_rqstp_flags(__entry->flags))
+ TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ __entry->xid,
+ __entry->head_base, __entry->head_len, __entry->page_len,
+ __entry->tail_base, __entry->tail_len, __entry->msg_len
+ )
);
+#define DEFINE_SVCXDRMSG_EVENT(name) \
+ DEFINE_EVENT(svc_xdr_msg_class, \
+ svc_xdr_##name, \
+ TP_PROTO( \
+ const struct xdr_buf *xdr \
+ ), \
+ TP_ARGS(xdr))
+
+DEFINE_SVCXDRMSG_EVENT(recvfrom);
+
+/* Record an xdr_buf containing arbitrary data, tagged with an XID */
+DECLARE_EVENT_CLASS(svc_xdr_buf_class,
+ TP_PROTO(
+ __be32 xid,
+ const struct xdr_buf *xdr
+ ),
+
+ TP_ARGS(xid, xdr),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_base)
+ __field(unsigned int, page_len)
+ __field(unsigned int, msg_len)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(xid);
+ __entry->head_base = xdr->head[0].iov_base;
+ __entry->head_len = xdr->head[0].iov_len;
+ __entry->tail_base = xdr->tail[0].iov_base;
+ __entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_base = xdr->page_base;
+ __entry->page_len = xdr->page_len;
+ __entry->msg_len = xdr->len;
+ ),
+
+ TP_printk("xid=0x%08x head=[%p,%zu] page=%u(%u) tail=[%p,%zu] len=%u",
+ __entry->xid,
+ __entry->head_base, __entry->head_len,
+ __entry->page_len, __entry->page_base,
+ __entry->tail_base, __entry->tail_len,
+ __entry->msg_len
+ )
+);
+
+#define DEFINE_SVCXDRBUF_EVENT(name) \
+ DEFINE_EVENT(svc_xdr_buf_class, \
+ svc_xdr_##name, \
+ TP_PROTO( \
+ __be32 xid, \
+ const struct xdr_buf *xdr \
+ ), \
+ TP_ARGS(xid, xdr))
+
+DEFINE_SVCXDRBUF_EVENT(sendto);
+
+/*
+ * from include/linux/sunrpc/svc.h
+ */
+#define SVC_RQST_FLAG_LIST \
+ svc_rqst_flag(SECURE) \
+ svc_rqst_flag(LOCAL) \
+ svc_rqst_flag(USEDEFERRAL) \
+ svc_rqst_flag(DROPME) \
+ svc_rqst_flag(SPLICE_OK) \
+ svc_rqst_flag(VICTIM) \
+ svc_rqst_flag(BUSY) \
+ svc_rqst_flag_end(DATA)
+
+#undef svc_rqst_flag
+#undef svc_rqst_flag_end
+#define svc_rqst_flag(x) TRACE_DEFINE_ENUM(RQ_##x);
+#define svc_rqst_flag_end(x) TRACE_DEFINE_ENUM(RQ_##x);
+
+SVC_RQST_FLAG_LIST
+
+#undef svc_rqst_flag
+#undef svc_rqst_flag_end
+#define svc_rqst_flag(x) { BIT(RQ_##x), #x },
+#define svc_rqst_flag_end(x) { BIT(RQ_##x), #x }
+
+#define show_rqstp_flags(flags) \
+ __print_flags(flags, "|", SVC_RQST_FLAG_LIST)
+
+TRACE_DEFINE_ENUM(SVC_GARBAGE);
+TRACE_DEFINE_ENUM(SVC_SYSERR);
+TRACE_DEFINE_ENUM(SVC_VALID);
+TRACE_DEFINE_ENUM(SVC_NEGATIVE);
+TRACE_DEFINE_ENUM(SVC_OK);
+TRACE_DEFINE_ENUM(SVC_DROP);
+TRACE_DEFINE_ENUM(SVC_CLOSE);
+TRACE_DEFINE_ENUM(SVC_DENIED);
+TRACE_DEFINE_ENUM(SVC_PENDING);
+TRACE_DEFINE_ENUM(SVC_COMPLETE);
+
#define svc_show_status(status) \
__print_symbolic(status, \
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
@@ -994,26 +1643,53 @@ TRACE_EVENT(svc_recv,
{ SVC_PENDING, "SVC_PENDING" }, \
{ SVC_COMPLETE, "SVC_COMPLETE" })
+#define SVC_RQST_ENDPOINT_FIELDS(r) \
+ __sockaddr(server, (r)->rq_xprt->xpt_locallen) \
+ __sockaddr(client, (r)->rq_xprt->xpt_remotelen) \
+ __field(unsigned int, netns_ino) \
+ __field(u32, xid)
+
+#define SVC_RQST_ENDPOINT_ASSIGNMENTS(r) \
+ do { \
+ struct svc_xprt *xprt = (r)->rq_xprt; \
+ __assign_sockaddr(server, &xprt->xpt_local, \
+ xprt->xpt_locallen); \
+ __assign_sockaddr(client, &xprt->xpt_remote, \
+ xprt->xpt_remotelen); \
+ __entry->netns_ino = xprt->xpt_net->ns.inum; \
+ __entry->xid = be32_to_cpu((r)->rq_xid); \
+ } while (0)
+
+#define SVC_RQST_ENDPOINT_FORMAT \
+ "xid=0x%08x server=%pISpc client=%pISpc"
+
+#define SVC_RQST_ENDPOINT_VARARGS \
+ __entry->xid, __get_sockaddr(server), __get_sockaddr(client)
+
TRACE_EVENT(svc_authenticate,
- TP_PROTO(const struct svc_rqst *rqst, int auth_res, __be32 auth_stat),
+ TP_PROTO(const struct svc_rqst *rqst, int auth_res),
- TP_ARGS(rqst, auth_res, auth_stat),
+ TP_ARGS(rqst, auth_res),
TP_STRUCT__entry(
- __field(u32, xid)
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
__field(unsigned long, svc_status)
__field(unsigned long, auth_stat)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
__entry->svc_status = auth_res;
- __entry->auth_stat = be32_to_cpu(auth_stat);
+ __entry->auth_stat = be32_to_cpu(rqst->rq_auth_stat);
),
- TP_printk("xid=0x%08x auth_res=%s auth_stat=%s",
- __entry->xid, svc_show_status(__entry->svc_status),
- rpc_show_auth_stat(__entry->auth_stat))
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT
+ " auth_res=%s auth_stat=%s",
+ SVC_RQST_ENDPOINT_VARARGS,
+ svc_show_status(__entry->svc_status),
+ rpc_show_auth_stat(__entry->auth_stat))
);
TRACE_EVENT(svc_process,
@@ -1026,6 +1702,7 @@ TRACE_EVENT(svc_process,
__field(u32, vers)
__field(u32, proc)
__string(service, name)
+ __string(procedure, svc_proc_name(rqst))
__string(addr, rqst->rq_xprt ?
rqst->rq_xprt->xpt_remotebuf : "(null)")
),
@@ -1035,17 +1712,19 @@ TRACE_EVENT(svc_process,
__entry->vers = rqst->rq_vers;
__entry->proc = rqst->rq_proc;
__assign_str(service, name);
+ __assign_str(procedure, svc_proc_name(rqst));
__assign_str(addr, rqst->rq_xprt ?
rqst->rq_xprt->xpt_remotebuf : "(null)");
),
- TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
+ TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%s",
__get_str(addr), __entry->xid,
- __get_str(service), __entry->vers, __entry->proc)
+ __get_str(service), __entry->vers,
+ __get_str(procedure)
+ )
);
DECLARE_EVENT_CLASS(svc_rqst_event,
-
TP_PROTO(
const struct svc_rqst *rqst
),
@@ -1053,20 +1732,20 @@ DECLARE_EVENT_CLASS(svc_rqst_event,
TP_ARGS(rqst),
TP_STRUCT__entry(
- __field(u32, xid)
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
__field(unsigned long, flags)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
__entry->flags = rqst->rq_flags;
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x flags=%s",
- __get_str(addr), __entry->xid,
- show_rqstp_flags(__entry->flags))
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " flags=%s",
+ SVC_RQST_ENDPOINT_VARARGS,
+ show_rqstp_flags(__entry->flags))
);
#define DEFINE_SVC_RQST_EVENT(name) \
DEFINE_EVENT(svc_rqst_event, svc_##name, \
@@ -1079,34 +1758,63 @@ DEFINE_SVC_RQST_EVENT(defer);
DEFINE_SVC_RQST_EVENT(drop);
DECLARE_EVENT_CLASS(svc_rqst_status,
-
- TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ int status
+ ),
TP_ARGS(rqst, status),
TP_STRUCT__entry(
- __field(u32, xid)
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
__field(int, status)
__field(unsigned long, flags)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
__entry->status = status;
__entry->flags = rqst->rq_flags;
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x status=%d flags=%s",
- __get_str(addr), __entry->xid,
- __entry->status, show_rqstp_flags(__entry->flags))
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " status=%d flags=%s",
+ SVC_RQST_ENDPOINT_VARARGS,
+ __entry->status, show_rqstp_flags(__entry->flags))
);
DEFINE_EVENT(svc_rqst_status, svc_send,
- TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_PROTO(const struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status));
+TRACE_EVENT(svc_stats_latency,
+ TP_PROTO(
+ const struct svc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
+ __field(unsigned long, execute)
+ __string(procedure, svc_proc_name(rqst))
+ ),
+
+ TP_fast_assign(
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
+ __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_stime));
+ __assign_str(procedure, svc_proc_name(rqst));
+ ),
+
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " proc=%s execute-us=%lu",
+ SVC_RQST_ENDPOINT_VARARGS,
+ __get_str(procedure), __entry->execute)
+);
+
#define show_svc_xprt_flags(flags) \
__print_flags(flags, "|", \
{ (1UL << XPT_BUSY), "XPT_BUSY"}, \
@@ -1124,80 +1832,163 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
{ (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \
{ (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"})
-TRACE_EVENT(svc_xprt_do_enqueue,
- TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
+TRACE_EVENT(svc_xprt_create_err,
+ TP_PROTO(
+ const char *program,
+ const char *protocol,
+ struct sockaddr *sap,
+ size_t salen,
+ const struct svc_xprt *xprt
+ ),
+
+ TP_ARGS(program, protocol, sap, salen, xprt),
+
+ TP_STRUCT__entry(
+ __field(long, error)
+ __string(program, program)
+ __string(protocol, protocol)
+ __sockaddr(addr, salen)
+ ),
+
+ TP_fast_assign(
+ __entry->error = PTR_ERR(xprt);
+ __assign_str(program, program);
+ __assign_str(protocol, protocol);
+ __assign_sockaddr(addr, sap, salen);
+ ),
+
+ TP_printk("addr=%pISpc program=%s protocol=%s error=%ld",
+ __get_sockaddr(addr), __get_str(program), __get_str(protocol),
+ __entry->error)
+);
+
+#define SVC_XPRT_ENDPOINT_FIELDS(x) \
+ __sockaddr(server, (x)->xpt_locallen) \
+ __sockaddr(client, (x)->xpt_remotelen) \
+ __field(unsigned long, flags) \
+ __field(unsigned int, netns_ino)
+
+#define SVC_XPRT_ENDPOINT_ASSIGNMENTS(x) \
+ do { \
+ __assign_sockaddr(server, &(x)->xpt_local, \
+ (x)->xpt_locallen); \
+ __assign_sockaddr(client, &(x)->xpt_remote, \
+ (x)->xpt_remotelen); \
+ __entry->flags = (x)->xpt_flags; \
+ __entry->netns_ino = (x)->xpt_net->ns.inum; \
+ } while (0)
+
+#define SVC_XPRT_ENDPOINT_FORMAT \
+ "server=%pISpc client=%pISpc flags=%s"
+
+#define SVC_XPRT_ENDPOINT_VARARGS \
+ __get_sockaddr(server), __get_sockaddr(client), \
+ show_svc_xprt_flags(__entry->flags)
+
+TRACE_EVENT(svc_xprt_enqueue,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const struct svc_rqst *rqst
+ ),
TP_ARGS(xprt, rqst),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
+ SVC_XPRT_ENDPOINT_FIELDS(xprt)
+
__field(int, pid)
- __field(unsigned long, flags)
- __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
+
__entry->pid = rqst? rqst->rq_task->pid : 0;
- __entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s pid=%d flags=%s",
- __entry->xprt, __get_str(addr),
- __entry->pid, show_svc_xprt_flags(__entry->flags))
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " pid=%d",
+ SVC_XPRT_ENDPOINT_VARARGS, __entry->pid)
+);
+
+TRACE_EVENT(svc_xprt_dequeue,
+ TP_PROTO(
+ const struct svc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ SVC_XPRT_ENDPOINT_FIELDS(rqst->rq_xprt)
+
+ __field(unsigned long, wakeup)
+ ),
+
+ TP_fast_assign(
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt);
+
+ __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_qtime));
+ ),
+
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu",
+ SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup)
);
DECLARE_EVENT_CLASS(svc_xprt_event,
- TP_PROTO(struct svc_xprt *xprt),
+ TP_PROTO(
+ const struct svc_xprt *xprt
+ ),
TP_ARGS(xprt),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
- __field(unsigned long, flags)
- __string(addr, xprt->xpt_remotebuf)
+ SVC_XPRT_ENDPOINT_FIELDS(xprt)
),
TP_fast_assign(
- __entry->xprt = xprt;
- __entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
),
- TP_printk("xprt=%p addr=%s flags=%s",
- __entry->xprt, __get_str(addr),
- show_svc_xprt_flags(__entry->flags))
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS)
);
-DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space,
- TP_PROTO(struct svc_xprt *xprt),
- TP_ARGS(xprt));
+#define DEFINE_SVC_XPRT_EVENT(name) \
+ DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \
+ TP_PROTO( \
+ const struct svc_xprt *xprt \
+ ), \
+ TP_ARGS(xprt))
-TRACE_EVENT(svc_xprt_dequeue,
- TP_PROTO(struct svc_rqst *rqst),
+DEFINE_SVC_XPRT_EVENT(no_write_space);
+DEFINE_SVC_XPRT_EVENT(close);
+DEFINE_SVC_XPRT_EVENT(detach);
+DEFINE_SVC_XPRT_EVENT(free);
- TP_ARGS(rqst),
+TRACE_EVENT(svc_xprt_accept,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const char *service
+ ),
+
+ TP_ARGS(xprt, service),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
- __field(unsigned long, flags)
- __field(unsigned long, wakeup)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ SVC_XPRT_ENDPOINT_FIELDS(xprt)
+
+ __string(protocol, xprt->xpt_class->xcl_name)
+ __string(service, service)
),
TP_fast_assign(
- __entry->xprt = rqst->rq_xprt;
- __entry->flags = rqst->rq_xprt->xpt_flags;
- __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
- rqst->rq_qtime));
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
+
+ __assign_str(protocol, xprt->xpt_class->xcl_name);
+ __assign_str(service, service);
),
- TP_printk("xprt=%p addr=%s flags=%s wakeup-us=%lu",
- __entry->xprt, __get_str(addr),
- show_svc_xprt_flags(__entry->flags),
- __entry->wakeup)
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " protocol=%s service=%s",
+ SVC_XPRT_ENDPOINT_VARARGS,
+ __get_str(protocol), __get_str(service)
+ )
);
TRACE_EVENT(svc_wake_up,
@@ -1216,81 +2007,363 @@ TRACE_EVENT(svc_wake_up,
TP_printk("pid=%d", __entry->pid)
);
-TRACE_EVENT(svc_handle_xprt,
- TP_PROTO(struct svc_xprt *xprt, int len),
+TRACE_EVENT(svc_alloc_arg_err,
+ TP_PROTO(
+ unsigned int requested,
+ unsigned int allocated
+ ),
- TP_ARGS(xprt, len),
+ TP_ARGS(requested, allocated),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
- __field(int, len)
+ __field(unsigned int, requested)
+ __field(unsigned int, allocated)
+ ),
+
+ TP_fast_assign(
+ __entry->requested = requested;
+ __entry->allocated = allocated;
+ ),
+
+ TP_printk("requested=%u allocated=%u",
+ __entry->requested, __entry->allocated)
+);
+
+DECLARE_EVENT_CLASS(svc_deferred_event,
+ TP_PROTO(
+ const struct svc_deferred_req *dr
+ ),
+
+ TP_ARGS(dr),
+
+ TP_STRUCT__entry(
+ __field(const void *, dr)
+ __field(u32, xid)
+ __sockaddr(addr, dr->addrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->dr = dr;
+ __entry->xid = be32_to_cpu(*(__be32 *)dr->args);
+ __assign_sockaddr(addr, &dr->addr, dr->addrlen);
+ ),
+
+ TP_printk("addr=%pISpc dr=%p xid=0x%08x", __get_sockaddr(addr),
+ __entry->dr, __entry->xid)
+);
+
+#define DEFINE_SVC_DEFERRED_EVENT(name) \
+ DEFINE_EVENT(svc_deferred_event, svc_defer_##name, \
+ TP_PROTO( \
+ const struct svc_deferred_req *dr \
+ ), \
+ TP_ARGS(dr))
+
+DEFINE_SVC_DEFERRED_EVENT(drop);
+DEFINE_SVC_DEFERRED_EVENT(queue);
+DEFINE_SVC_DEFERRED_EVENT(recv);
+
+TRACE_EVENT(svcsock_new_socket,
+ TP_PROTO(
+ const struct socket *socket
+ ),
+
+ TP_ARGS(socket),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, type)
+ __field(unsigned long, family)
+ __field(bool, listener)
+ ),
+
+ TP_fast_assign(
+ __entry->type = socket->type;
+ __entry->family = socket->sk->sk_family;
+ __entry->listener = (socket->sk->sk_state == TCP_LISTEN);
+ ),
+
+ TP_printk("type=%s family=%s%s",
+ show_socket_type(__entry->type),
+ rpc_show_address_family(__entry->family),
+ __entry->listener ? " (listener)" : ""
+ )
+);
+
+TRACE_EVENT(svcsock_marker,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ __be32 marker
+ ),
+
+ TP_ARGS(xprt, marker),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, length)
+ __field(bool, last)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->length = be32_to_cpu(marker) & RPC_FRAGMENT_SIZE_MASK;
+ __entry->last = be32_to_cpu(marker) & RPC_LAST_STREAM_FRAGMENT;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s length=%u%s", __get_str(addr),
+ __entry->length, __entry->last ? " (last)" : "")
+);
+
+DECLARE_EVENT_CLASS(svcsock_class,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ ssize_t result
+ ),
+
+ TP_ARGS(xprt, result),
+
+ TP_STRUCT__entry(
+ __field(ssize_t, result)
__field(unsigned long, flags)
__string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
- __entry->len = len;
+ __entry->result = result;
__entry->flags = xprt->xpt_flags;
__assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s len=%d flags=%s",
- __entry->xprt, __get_str(addr),
- __entry->len, show_svc_xprt_flags(__entry->flags))
+ TP_printk("addr=%s result=%zd flags=%s", __get_str(addr),
+ __entry->result, show_svc_xprt_flags(__entry->flags)
+ )
);
-TRACE_EVENT(svc_stats_latency,
- TP_PROTO(const struct svc_rqst *rqst),
+#define DEFINE_SVCSOCK_EVENT(name) \
+ DEFINE_EVENT(svcsock_class, svcsock_##name, \
+ TP_PROTO( \
+ const struct svc_xprt *xprt, \
+ ssize_t result \
+ ), \
+ TP_ARGS(xprt, result))
+
+DEFINE_SVCSOCK_EVENT(udp_send);
+DEFINE_SVCSOCK_EVENT(udp_recv);
+DEFINE_SVCSOCK_EVENT(udp_recv_err);
+DEFINE_SVCSOCK_EVENT(tcp_send);
+DEFINE_SVCSOCK_EVENT(tcp_recv);
+DEFINE_SVCSOCK_EVENT(tcp_recv_eagain);
+DEFINE_SVCSOCK_EVENT(tcp_recv_err);
+DEFINE_SVCSOCK_EVENT(data_ready);
+DEFINE_SVCSOCK_EVENT(write_space);
+
+TRACE_EVENT(svcsock_tcp_recv_short,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ u32 expected,
+ u32 received
+ ),
- TP_ARGS(rqst),
+ TP_ARGS(xprt, expected, received),
TP_STRUCT__entry(
- __field(u32, xid)
- __field(unsigned long, execute)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ __field(u32, expected)
+ __field(u32, received)
+ __field(unsigned long, flags)
+ __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
- rqst->rq_stime));
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ __entry->expected = expected;
+ __entry->received = received;
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x execute-us=%lu",
- __get_str(addr), __entry->xid, __entry->execute)
+ TP_printk("addr=%s flags=%s expected=%u received=%u",
+ __get_str(addr), show_svc_xprt_flags(__entry->flags),
+ __entry->expected, __entry->received
+ )
);
-DECLARE_EVENT_CLASS(svc_deferred_event,
+TRACE_EVENT(svcsock_tcp_state,
TP_PROTO(
- const struct svc_deferred_req *dr
+ const struct svc_xprt *xprt,
+ const struct socket *socket
),
- TP_ARGS(dr),
+ TP_ARGS(xprt, socket),
TP_STRUCT__entry(
- __field(u32, xid)
- __string(addr, dr->xprt->xpt_remotebuf)
+ __field(unsigned long, socket_state)
+ __field(unsigned long, sock_state)
+ __field(unsigned long, flags)
+ __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
- (dr->xprt_hlen>>2)));
- __assign_str(addr, dr->xprt->xpt_remotebuf);
+ __entry->socket_state = socket->state;
+ __entry->sock_state = socket->sk->sk_state;
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+ TP_printk("addr=%s state=%s sk_state=%s flags=%s", __get_str(addr),
+ rpc_show_socket_state(__entry->socket_state),
+ rpc_show_sock_state(__entry->sock_state),
+ show_svc_xprt_flags(__entry->flags)
+ )
);
-#define DEFINE_SVC_DEFERRED_EVENT(name) \
- DEFINE_EVENT(svc_deferred_event, svc_##name##_deferred, \
+
+DECLARE_EVENT_CLASS(svcsock_accept_class,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const char *service,
+ long status
+ ),
+
+ TP_ARGS(xprt, service, status),
+
+ TP_STRUCT__entry(
+ __field(long, status)
+ __string(service, service)
+ __field(unsigned int, netns_ino)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __assign_str(service, service);
+ __entry->netns_ino = xprt->xpt_net->ns.inum;
+ ),
+
+ TP_printk("addr=listener service=%s status=%ld",
+ __get_str(service), __entry->status
+ )
+);
+
+#define DEFINE_ACCEPT_EVENT(name) \
+ DEFINE_EVENT(svcsock_accept_class, svcsock_##name##_err, \
TP_PROTO( \
- const struct svc_deferred_req *dr \
+ const struct svc_xprt *xprt, \
+ const char *service, \
+ long status \
), \
- TP_ARGS(dr))
+ TP_ARGS(xprt, service, status))
-DEFINE_SVC_DEFERRED_EVENT(drop);
-DEFINE_SVC_DEFERRED_EVENT(revisit);
+DEFINE_ACCEPT_EVENT(accept);
+DEFINE_ACCEPT_EVENT(getpeername);
+
+DECLARE_EVENT_CLASS(cache_event,
+ TP_PROTO(
+ const struct cache_detail *cd,
+ const struct cache_head *h
+ ),
+
+ TP_ARGS(cd, h),
+
+ TP_STRUCT__entry(
+ __field(const struct cache_head *, h)
+ __string(name, cd->name)
+ ),
+
+ TP_fast_assign(
+ __entry->h = h;
+ __assign_str(name, cd->name);
+ ),
+
+ TP_printk("cache=%s entry=%p", __get_str(name), __entry->h)
+);
+#define DEFINE_CACHE_EVENT(name) \
+ DEFINE_EVENT(cache_event, name, \
+ TP_PROTO( \
+ const struct cache_detail *cd, \
+ const struct cache_head *h \
+ ), \
+ TP_ARGS(cd, h))
+DEFINE_CACHE_EVENT(cache_entry_expired);
+DEFINE_CACHE_EVENT(cache_entry_upcall);
+DEFINE_CACHE_EVENT(cache_entry_update);
+DEFINE_CACHE_EVENT(cache_entry_make_negative);
+DEFINE_CACHE_EVENT(cache_entry_no_listener);
+
+DECLARE_EVENT_CLASS(register_class,
+ TP_PROTO(
+ const char *program,
+ const u32 version,
+ const int family,
+ const unsigned short protocol,
+ const unsigned short port,
+ int error
+ ),
+
+ TP_ARGS(program, version, family, protocol, port, error),
+
+ TP_STRUCT__entry(
+ __field(u32, version)
+ __field(unsigned long, family)
+ __field(unsigned short, protocol)
+ __field(unsigned short, port)
+ __field(int, error)
+ __string(program, program)
+ ),
+
+ TP_fast_assign(
+ __entry->version = version;
+ __entry->family = family;
+ __entry->protocol = protocol;
+ __entry->port = port;
+ __entry->error = error;
+ __assign_str(program, program);
+ ),
+
+ TP_printk("program=%sv%u proto=%s port=%u family=%s error=%d",
+ __get_str(program), __entry->version,
+ __entry->protocol == IPPROTO_UDP ? "udp" : "tcp",
+ __entry->port, rpc_show_address_family(__entry->family),
+ __entry->error
+ )
+);
+
+#define DEFINE_REGISTER_EVENT(name) \
+ DEFINE_EVENT(register_class, svc_##name, \
+ TP_PROTO( \
+ const char *program, \
+ const u32 version, \
+ const int family, \
+ const unsigned short protocol, \
+ const unsigned short port, \
+ int error \
+ ), \
+ TP_ARGS(program, version, family, protocol, \
+ port, error))
+
+DEFINE_REGISTER_EVENT(register);
+DEFINE_REGISTER_EVENT(noregister);
+
+TRACE_EVENT(svc_unregister,
+ TP_PROTO(
+ const char *program,
+ const u32 version,
+ int error
+ ),
+
+ TP_ARGS(program, version, error),
+
+ TP_STRUCT__entry(
+ __field(u32, version)
+ __field(int, error)
+ __string(program, program)
+ ),
+
+ TP_fast_assign(
+ __entry->version = version;
+ __entry->error = error;
+ __assign_str(program, program);
+ ),
+
+ TP_printk("program=%sv%u error=%d",
+ __get_str(program), __entry->version, __entry->error
+ )
+);
#endif /* _TRACE_SUNRPC_H */
diff --git a/include/trace/events/sunrpc_base.h b/include/trace/events/sunrpc_base.h
new file mode 100644
index 000000000000..588557d07ea8
--- /dev/null
+++ b/include/trace/events/sunrpc_base.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Oracle and/or its affiliates.
+ *
+ * Common types and format specifiers for sunrpc.
+ */
+
+#if !defined(_TRACE_SUNRPC_BASE_H)
+#define _TRACE_SUNRPC_BASE_H
+
+#include <linux/tracepoint.h>
+
+#define SUNRPC_TRACE_PID_SPECIFIER "%08x"
+#define SUNRPC_TRACE_CLID_SPECIFIER "%08x"
+#define SUNRPC_TRACE_TASK_SPECIFIER \
+ "task:" SUNRPC_TRACE_PID_SPECIFIER "@" SUNRPC_TRACE_CLID_SPECIFIER
+
+#endif /* _TRACE_SUNRPC_BASE_H */
diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
index 705be43b71ab..da05c9ebd224 100644
--- a/include/trace/events/swiotlb.h
+++ b/include/trace/events/swiotlb.h
@@ -8,20 +8,15 @@
#include <linux/tracepoint.h>
TRACE_EVENT(swiotlb_bounced,
-
- TP_PROTO(struct device *dev,
- dma_addr_t dev_addr,
- size_t size,
- enum swiotlb_force swiotlb_force),
-
- TP_ARGS(dev, dev_addr, size, swiotlb_force),
+ TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
+ TP_ARGS(dev, dev_addr, size),
TP_STRUCT__entry(
- __string( dev_name, dev_name(dev) )
- __field( u64, dma_mask )
- __field( dma_addr_t, dev_addr )
- __field( size_t, size )
- __field( enum swiotlb_force, swiotlb_force )
+ __string(dev_name, dev_name(dev))
+ __field(u64, dma_mask)
+ __field(dma_addr_t, dev_addr)
+ __field(size_t, size)
+ __field(bool, force)
),
TP_fast_assign(
@@ -29,19 +24,15 @@ TRACE_EVENT(swiotlb_bounced,
__entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0);
__entry->dev_addr = dev_addr;
__entry->size = size;
- __entry->swiotlb_force = swiotlb_force;
+ __entry->force = is_swiotlb_force_bounce(dev);
),
- TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx "
- "size=%zu %s",
+ TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx size=%zu %s",
__get_str(dev_name),
__entry->dma_mask,
(unsigned long long)__entry->dev_addr,
__entry->size,
- __print_symbolic(__entry->swiotlb_force,
- { SWIOTLB_NORMAL, "NORMAL" },
- { SWIOTLB_FORCE, "FORCE" },
- { SWIOTLB_NO_FORCE, "NO_FORCE" }))
+ __entry->force ? "FORCE" : "NORMAL")
);
#endif /* _TRACE_SWIOTLB_H */
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 914a872dd343..67fad2677ed5 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -137,30 +137,32 @@ TRACE_EVENT(target_sequencer_start,
TP_STRUCT__entry(
__field( unsigned int, unpacked_lun )
+ __field( unsigned long long, tag )
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
+ __field( unsigned char, control )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
__string( initiator, cmd->se_sess->se_node_acl->initiatorname )
),
TP_fast_assign(
__entry->unpacked_lun = cmd->orig_fe_lun;
+ __entry->tag = cmd->tag;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
+ __entry->control = scsi_command_control(cmd->t_task_cdb);
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
- TP_printk("%s -> LUN %03u %s data_length %6u CDB %s (TA:%s C:%02x)",
+ TP_printk("%s -> LUN %03u tag %#llx %s data_length %6u CDB %s (TA:%s C:%02x)",
__get_str(initiator), __entry->unpacked_lun,
- show_opcode_name(__entry->opcode),
+ __entry->tag, show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
- scsi_command_size(__entry->cdb) <= 16 ?
- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
- __entry->cdb[1]
+ __entry->control
)
);
@@ -172,9 +174,11 @@ TRACE_EVENT(target_cmd_complete,
TP_STRUCT__entry(
__field( unsigned int, unpacked_lun )
+ __field( unsigned long long, tag )
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
+ __field( unsigned char, control )
__field( unsigned char, scsi_status )
__field( unsigned char, sense_length )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
@@ -184,9 +188,11 @@ TRACE_EVENT(target_cmd_complete,
TP_fast_assign(
__entry->unpacked_lun = cmd->orig_fe_lun;
+ __entry->tag = cmd->tag;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
+ __entry->control = scsi_command_control(cmd->t_task_cdb);
__entry->scsi_status = cmd->scsi_status;
__entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
@@ -195,17 +201,16 @@ TRACE_EVENT(target_cmd_complete,
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
- TP_printk("%s <- LUN %03u status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)",
+ TP_printk("%s <- LUN %03u tag %#llx status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)",
__get_str(initiator), __entry->unpacked_lun,
+ __entry->tag,
show_scsi_status_name(__entry->scsi_status),
__entry->sense_length, __entry->sense_length ? " / " : "",
__print_hex(__entry->sense_data, __entry->sense_length),
show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
- scsi_command_size(__entry->cdb) <= 16 ?
- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
- __entry->cdb[1]
+ __entry->control
)
);
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index cf97f6339acb..901b440238d5 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -59,6 +59,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__field(int, state)
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
@@ -75,6 +76,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
p32 = (__be32 *) __entry->saddr;
*p32 = inet->inet_saddr;
@@ -86,7 +88,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ show_family_name(__entry->family),
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
show_tcp_state_name(__entry->state))
@@ -125,6 +128,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
__field(const void *, skaddr)
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
@@ -140,6 +144,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
p32 = (__be32 *) __entry->saddr;
*p32 = inet->inet_saddr;
@@ -153,7 +158,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
__entry->sock_cookie = sock_gen_cookie(sk);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c sock_cookie=%llx",
+ TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c sock_cookie=%llx",
+ show_family_name(__entry->family),
__entry->sport, __entry->dport,
__entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
@@ -192,6 +198,7 @@ TRACE_EVENT(tcp_retransmit_synack,
__field(const void *, req)
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
@@ -207,6 +214,7 @@ TRACE_EVENT(tcp_retransmit_synack,
__entry->sport = ireq->ir_num;
__entry->dport = ntohs(ireq->ir_rmt_port);
+ __entry->family = sk->sk_family;
p32 = (__be32 *) __entry->saddr;
*p32 = ireq->ir_loc_addr;
@@ -218,7 +226,8 @@ TRACE_EVENT(tcp_retransmit_synack,
ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ show_family_name(__entry->family),
__entry->sport, __entry->dport,
__entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6)
@@ -238,6 +247,7 @@ TRACE_EVENT(tcp_probe,
__array(__u8, daddr, sizeof(struct sockaddr_in6))
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__field(__u32, mark)
__field(__u16, data_len)
__field(__u32, snd_nxt)
@@ -264,11 +274,12 @@ TRACE_EVENT(tcp_probe,
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
__entry->mark = skb->mark;
+ __entry->family = sk->sk_family;
__entry->data_len = skb->len - __tcp_hdrlen(th);
__entry->snd_nxt = tp->snd_nxt;
__entry->snd_una = tp->snd_una;
- __entry->snd_cwnd = tp->snd_cwnd;
+ __entry->snd_cwnd = tcp_snd_cwnd(tp);
__entry->snd_wnd = tp->snd_wnd;
__entry->rcv_wnd = tp->rcv_wnd;
__entry->ssthresh = tcp_current_ssthresh(sk);
@@ -276,13 +287,135 @@ TRACE_EVENT(tcp_probe,
__entry->sock_cookie = sock_gen_cookie(sk);
),
- TP_printk("src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx",
+ TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx",
+ show_family_name(__entry->family),
__entry->saddr, __entry->daddr, __entry->mark,
__entry->data_len, __entry->snd_nxt, __entry->snd_una,
__entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
__entry->srtt, __entry->rcv_wnd, __entry->sock_cookie)
);
+#define TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) \
+ do { \
+ const struct tcphdr *th = (const struct tcphdr *)skb->data; \
+ struct sockaddr_in *v4 = (void *)__entry->saddr; \
+ \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = th->source; \
+ v4->sin_addr.s_addr = ip_hdr(skb)->saddr; \
+ v4 = (void *)__entry->daddr; \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = th->dest; \
+ v4->sin_addr.s_addr = ip_hdr(skb)->daddr; \
+ } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \
+ do { \
+ const struct iphdr *iph = ip_hdr(skb); \
+ \
+ if (iph->version == 6) { \
+ const struct tcphdr *th = (const struct tcphdr *)skb->data; \
+ struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
+ \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = th->source; \
+ v6->sin6_addr = ipv6_hdr(skb)->saddr; \
+ v6 = (void *)__entry->daddr; \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = th->dest; \
+ v6->sin6_addr = ipv6_hdr(skb)->daddr; \
+ } else \
+ TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb); \
+ } while (0)
+
+#else
+
+#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \
+ TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb)
+
+#endif
+
+/*
+ * tcp event with only skb
+ */
+DECLARE_EVENT_CLASS(tcp_event_skb,
+
+ TP_PROTO(const struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(const void *, skbaddr)
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS_SKB(__entry, skb);
+ ),
+
+ TP_printk("src=%pISpc dest=%pISpc", __entry->saddr, __entry->daddr)
+);
+
+DEFINE_EVENT(tcp_event_skb, tcp_bad_csum,
+
+ TP_PROTO(const struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+TRACE_EVENT(tcp_cong_state_set,
+
+ TP_PROTO(struct sock *sk, const u8 ca_state),
+
+ TP_ARGS(sk, ca_state),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ __field(__u8, cong_state)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ __be32 *p32;
+
+ __entry->skaddr = sk;
+
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->cong_state = ca_state;
+ ),
+
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ __entry->cong_state)
+);
+
#endif /* _TRACE_TCP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 135e5421f003..e58bf3072f32 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -92,34 +92,22 @@ TRACE_EVENT(thermal_zone_trip,
);
#ifdef CONFIG_CPU_THERMAL
-TRACE_EVENT(thermal_power_cpu_get_power,
- TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
- size_t load_len, u32 dynamic_power),
+TRACE_EVENT(thermal_power_cpu_get_power_simple,
+ TP_PROTO(int cpu, u32 power),
- TP_ARGS(cpus, freq, load, load_len, dynamic_power),
+ TP_ARGS(cpu, power),
TP_STRUCT__entry(
- __bitmask(cpumask, num_possible_cpus())
- __field(unsigned long, freq )
- __dynamic_array(u32, load, load_len)
- __field(size_t, load_len )
- __field(u32, dynamic_power )
+ __field(int, cpu)
+ __field(u32, power)
),
TP_fast_assign(
- __assign_bitmask(cpumask, cpumask_bits(cpus),
- num_possible_cpus());
- __entry->freq = freq;
- memcpy(__get_dynamic_array(load), load,
- load_len * sizeof(*load));
- __entry->load_len = load_len;
- __entry->dynamic_power = dynamic_power;
+ __entry->cpu = cpu;
+ __entry->power = power;
),
- TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d",
- __get_bitmask(cpumask), __entry->freq,
- __print_array(__get_dynamic_array(load), __entry->load_len, 4),
- __entry->dynamic_power)
+ TP_printk("cpu=%d power=%u", __entry->cpu, __entry->power)
);
TRACE_EVENT(thermal_power_cpu_limit,
@@ -153,31 +141,30 @@ TRACE_EVENT(thermal_power_cpu_limit,
TRACE_EVENT(thermal_power_devfreq_get_power,
TP_PROTO(struct thermal_cooling_device *cdev,
struct devfreq_dev_status *status, unsigned long freq,
- u32 dynamic_power, u32 static_power, u32 power),
+ u32 power),
- TP_ARGS(cdev, status, freq, dynamic_power, static_power, power),
+ TP_ARGS(cdev, status, freq, power),
TP_STRUCT__entry(
__string(type, cdev->type )
__field(unsigned long, freq )
- __field(u32, load )
- __field(u32, dynamic_power )
- __field(u32, static_power )
+ __field(u32, busy_time)
+ __field(u32, total_time)
__field(u32, power)
),
TP_fast_assign(
__assign_str(type, cdev->type);
__entry->freq = freq;
- __entry->load = (100 * status->busy_time) / status->total_time;
- __entry->dynamic_power = dynamic_power;
- __entry->static_power = static_power;
+ __entry->busy_time = status->busy_time;
+ __entry->total_time = status->total_time;
__entry->power = power;
),
- TP_printk("type=%s freq=%lu load=%u dynamic_power=%u static_power=%u power=%u",
+ TP_printk("type=%s freq=%lu load=%u power=%u",
__get_str(type), __entry->freq,
- __entry->load, __entry->dynamic_power, __entry->static_power,
+ __entry->total_time == 0 ? 0 :
+ (100 * __entry->busy_time) / __entry->total_time,
__entry->power)
);
diff --git a/include/trace/events/thermal_pressure.h b/include/trace/events/thermal_pressure.h
new file mode 100644
index 000000000000..b68680201360
--- /dev/null
+++ b/include/trace/events/thermal_pressure.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal_pressure
+
+#if !defined(_TRACE_THERMAL_PRESSURE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_THERMAL_PRESSURE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(thermal_pressure_update,
+ TP_PROTO(int cpu, unsigned long thermal_pressure),
+ TP_ARGS(cpu, thermal_pressure),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, thermal_pressure)
+ __field(int, cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->thermal_pressure = thermal_pressure;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("cpu=%d thermal_pressure=%lu", __entry->cpu, __entry->thermal_pressure)
+);
+#endif /* _TRACE_THERMAL_PRESSURE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index d7fbbe551841..202b3e3e67ff 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -8,24 +8,6 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-TRACE_EVENT(hugepage_invalidate,
-
- TP_PROTO(unsigned long addr, unsigned long pte),
- TP_ARGS(addr, pte),
- TP_STRUCT__entry(
- __field(unsigned long, addr)
- __field(unsigned long, pte)
- ),
-
- TP_fast_assign(
- __entry->addr = addr;
- __entry->pte = pte;
- ),
-
- TP_printk("hugepage invalidate at addr 0x%lx and pte = 0x%lx",
- __entry->addr, __entry->pte)
-);
-
TRACE_EVENT(hugepage_set_pmd,
TP_PROTO(unsigned long addr, unsigned long pmd),
@@ -65,24 +47,34 @@ TRACE_EVENT(hugepage_update,
TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set)
);
-TRACE_EVENT(hugepage_splitting,
- TP_PROTO(unsigned long addr, unsigned long pte),
- TP_ARGS(addr, pte),
- TP_STRUCT__entry(
- __field(unsigned long, addr)
- __field(unsigned long, pte)
- ),
+DECLARE_EVENT_CLASS(migration_pmd,
- TP_fast_assign(
- __entry->addr = addr;
- __entry->pte = pte;
- ),
+ TP_PROTO(unsigned long addr, unsigned long pmd),
+
+ TP_ARGS(addr, pmd),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, pmd)
+ ),
- TP_printk("hugepage splitting at addr 0x%lx and pte = 0x%lx",
- __entry->addr, __entry->pte)
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->pmd = pmd;
+ ),
+ TP_printk("addr=%lx, pmd=%lx", __entry->addr, __entry->pmd)
);
+DEFINE_EVENT(migration_pmd, set_migration_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd),
+ TP_ARGS(addr, pmd)
+);
+
+DEFINE_EVENT(migration_pmd, remove_migration_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd),
+ TP_ARGS(addr, pmd)
+);
#endif /* _TRACE_THP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 19abb6c3eb73..2e713a7d9aa3 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -48,6 +48,7 @@ DEFINE_EVENT(timer_class, timer_init,
* timer_start - called when the timer is started
* @timer: pointer to struct timer_list
* @expires: the timers expiry time
+ * @flags: the timers flags
*/
TRACE_EVENT(timer_start,
@@ -84,6 +85,7 @@ TRACE_EVENT(timer_start,
/**
* timer_expire_entry - called immediately before the timer callback
* @timer: pointer to struct timer_list
+ * @baseclk: value of timer_base::clk when timer expires
*
* Allows to determine the timer latency.
*/
@@ -119,7 +121,7 @@ TRACE_EVENT(timer_expire_entry,
* When used in combination with the timer_expire_entry tracepoint we can
* determine the runtime of the timer callback function.
*
- * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
+ * NOTE: Do NOT dereference timer in TP_fast_assign. The pointer might
* be invalid. We solely track the pointer.
*/
DEFINE_EVENT(timer_class, timer_expire_exit,
@@ -190,7 +192,8 @@ TRACE_EVENT(hrtimer_init,
/**
* hrtimer_start - called when the hrtimer is started
- * @hrtimer: pointer to struct hrtimer
+ * @hrtimer: pointer to struct hrtimer
+ * @mode: the hrtimers mode
*/
TRACE_EVENT(hrtimer_start,
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index 5f300739240d..599739ee7b20 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -11,31 +11,60 @@
#include <linux/tracepoint.h>
-#define UFS_LINK_STATES \
- EM(UIC_LINK_OFF_STATE) \
- EM(UIC_LINK_ACTIVE_STATE) \
- EMe(UIC_LINK_HIBERN8_STATE)
-
-#define UFS_PWR_MODES \
- EM(UFS_ACTIVE_PWR_MODE) \
- EM(UFS_SLEEP_PWR_MODE) \
- EMe(UFS_POWERDOWN_PWR_MODE)
-
-#define UFSCHD_CLK_GATING_STATES \
- EM(CLKS_OFF) \
- EM(CLKS_ON) \
- EM(REQ_CLKS_OFF) \
- EMe(REQ_CLKS_ON)
+#define str_opcode(opcode) \
+ __print_symbolic(opcode, \
+ { WRITE_16, "WRITE_16" }, \
+ { WRITE_10, "WRITE_10" }, \
+ { READ_16, "READ_16" }, \
+ { READ_10, "READ_10" }, \
+ { SYNCHRONIZE_CACHE, "SYNC" }, \
+ { UNMAP, "UNMAP" })
+
+#define UFS_LINK_STATES \
+ EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \
+ EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \
+ EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE")
+
+#define UFS_PWR_MODES \
+ EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \
+ EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \
+ EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \
+ EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE")
+
+#define UFSCHD_CLK_GATING_STATES \
+ EM(CLKS_OFF, "CLKS_OFF") \
+ EM(CLKS_ON, "CLKS_ON") \
+ EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \
+ EMe(REQ_CLKS_ON, "REQ_CLKS_ON")
+
+#define UFS_CMD_TRACE_STRINGS \
+ EM(UFS_CMD_SEND, "send_req") \
+ EM(UFS_CMD_COMP, "complete_rsp") \
+ EM(UFS_DEV_COMP, "dev_complete") \
+ EM(UFS_QUERY_SEND, "query_send") \
+ EM(UFS_QUERY_COMP, "query_complete") \
+ EM(UFS_QUERY_ERR, "query_complete_err") \
+ EM(UFS_TM_SEND, "tm_send") \
+ EM(UFS_TM_COMP, "tm_complete") \
+ EMe(UFS_TM_ERR, "tm_complete_err")
+
+#define UFS_CMD_TRACE_TSF_TYPES \
+ EM(UFS_TSF_CDB, "CDB") \
+ EM(UFS_TSF_OSF, "OSF") \
+ EM(UFS_TSF_TM_INPUT, "TM_INPUT") \
+ EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT")
/* Enums require being exported to userspace, for user tool parsing */
#undef EM
#undef EMe
-#define EM(a) TRACE_DEFINE_ENUM(a);
-#define EMe(a) TRACE_DEFINE_ENUM(a);
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
UFS_LINK_STATES;
UFS_PWR_MODES;
UFSCHD_CLK_GATING_STATES;
+UFS_CMD_TRACE_STRINGS
+UFS_CMD_TRACE_TSF_TYPES
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
@@ -43,8 +72,13 @@ UFSCHD_CLK_GATING_STATES;
*/
#undef EM
#undef EMe
-#define EM(a) { a, #a },
-#define EMe(a) { a, #a }
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+#define show_ufs_cmd_trace_str(str_t) \
+ __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS)
+#define show_ufs_cmd_trace_tsf(tsf) \
+ __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES)
TRACE_EVENT(ufshcd_clk_gating,
@@ -212,70 +246,150 @@ DEFINE_EVENT(ufshcd_template, ufshcd_init,
int dev_state, int link_state),
TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
TRACE_EVENT(ufshcd_command,
- TP_PROTO(const char *dev_name, const char *str, unsigned int tag,
- u32 doorbell, int transfer_len, u32 intr, u64 lba,
- u8 opcode),
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t,
+ unsigned int tag, u32 doorbell, int transfer_len, u32 intr,
+ u64 lba, u8 opcode, u8 group_id),
- TP_ARGS(dev_name, str, tag, doorbell, transfer_len, intr, lba, opcode),
+ TP_ARGS(dev_name, str_t, tag, doorbell, transfer_len,
+ intr, lba, opcode, group_id),
TP_STRUCT__entry(
__string(dev_name, dev_name)
- __string(str, str)
+ __field(enum ufs_trace_str_t, str_t)
__field(unsigned int, tag)
__field(u32, doorbell)
__field(int, transfer_len)
__field(u32, intr)
__field(u64, lba)
__field(u8, opcode)
+ __field(u8, group_id)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
- __assign_str(str, str);
+ __entry->str_t = str_t;
__entry->tag = tag;
__entry->doorbell = doorbell;
__entry->transfer_len = transfer_len;
__entry->intr = intr;
__entry->lba = lba;
__entry->opcode = opcode;
+ __entry->group_id = group_id;
),
TP_printk(
- "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x",
- __get_str(str), __get_str(dev_name), __entry->tag,
- __entry->doorbell, __entry->transfer_len,
- __entry->intr, __entry->lba, (u32)__entry->opcode
+ "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x",
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ __entry->tag, __entry->doorbell, __entry->transfer_len,
+ __entry->intr, __entry->lba, (u32)__entry->opcode,
+ str_opcode(__entry->opcode), (u32)__entry->group_id
+ )
+);
+
+TRACE_EVENT(ufshcd_uic_command,
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
+ u32 arg1, u32 arg2, u32 arg3),
+
+ TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __field(enum ufs_trace_str_t, str_t)
+ __field(u32, cmd)
+ __field(u32, arg1)
+ __field(u32, arg2)
+ __field(u32, arg3)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name);
+ __entry->str_t = str_t;
+ __entry->cmd = cmd;
+ __entry->arg1 = arg1;
+ __entry->arg2 = arg2;
+ __entry->arg3 = arg3;
+ ),
+
+ TP_printk(
+ "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
)
);
TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf),
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
+ void *tsf, enum ufs_trace_tsf_t tsf_t),
- TP_ARGS(dev_name, str, hdr, tsf),
+ TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
TP_STRUCT__entry(
__string(dev_name, dev_name)
- __string(str, str)
+ __field(enum ufs_trace_str_t, str_t)
__array(unsigned char, hdr, 12)
__array(unsigned char, tsf, 16)
+ __field(enum ufs_trace_tsf_t, tsf_t)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
- __assign_str(str, str);
+ __entry->str_t = str_t;
memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
+ __entry->tsf_t = tsf_t;
),
TP_printk(
- "%s: %s: HDR:%s, CDB:%s",
- __get_str(str), __get_str(dev_name),
+ "%s: %s: HDR:%s, %s:%s",
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
__print_hex(__entry->hdr, sizeof(__entry->hdr)),
+ show_ufs_cmd_trace_tsf(__entry->tsf_t),
__print_hex(__entry->tsf, sizeof(__entry->tsf))
)
);
+TRACE_EVENT(ufshcd_exception_event,
+
+ TP_PROTO(const char *dev_name, u16 status),
+
+ TP_ARGS(dev_name, status),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __field(u16, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name);
+ __entry->status = status;
+ ),
+
+ TP_printk("%s: status 0x%x",
+ __get_str(dev_name), __entry->status
+ )
+);
+
#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
/* This part must be outside protection */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index a5ab2973e8dc..d2123dd960d5 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -27,6 +27,20 @@
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
+#define _VMSCAN_THROTTLE_WRITEBACK (1 << VMSCAN_THROTTLE_WRITEBACK)
+#define _VMSCAN_THROTTLE_ISOLATED (1 << VMSCAN_THROTTLE_ISOLATED)
+#define _VMSCAN_THROTTLE_NOPROGRESS (1 << VMSCAN_THROTTLE_NOPROGRESS)
+#define _VMSCAN_THROTTLE_CONGESTED (1 << VMSCAN_THROTTLE_CONGESTED)
+
+#define show_throttle_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ {_VMSCAN_THROTTLE_WRITEBACK, "VMSCAN_THROTTLE_WRITEBACK"}, \
+ {_VMSCAN_THROTTLE_ISOLATED, "VMSCAN_THROTTLE_ISOLATED"}, \
+ {_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"}, \
+ {_VMSCAN_THROTTLE_CONGESTED, "VMSCAN_THROTTLE_CONGESTED"} \
+ ) : "VMSCAN_THROTTLE_NONE"
+
+
#define trace_reclaim_flags(file) ( \
(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
(RECLAIM_WB_ASYNC) \
@@ -82,14 +96,14 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
__field( int, nid )
__field( int, zid )
__field( int, order )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
),
TP_fast_assign(
__entry->nid = nid;
__entry->zid = zid;
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("nid=%d order=%d gfp_flags=%s",
@@ -106,12 +120,12 @@ DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
TP_STRUCT__entry(
__field( int, order )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
),
TP_fast_assign(
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("order=%d gfp_flags=%s",
@@ -196,7 +210,7 @@ TRACE_EVENT(mm_shrink_slab_start,
__field(void *, shrink)
__field(int, nid)
__field(long, nr_objects_to_shrink)
- __field(gfp_t, gfp_flags)
+ __field(unsigned long, gfp_flags)
__field(unsigned long, cache_items)
__field(unsigned long long, delta)
__field(unsigned long, total_scan)
@@ -208,7 +222,7 @@ TRACE_EVENT(mm_shrink_slab_start,
__entry->shrink = shr->scan_objects;
__entry->nid = sc->nid;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
- __entry->gfp_flags = sc->gfp_mask;
+ __entry->gfp_flags = (__force unsigned long)sc->gfp_mask;
__entry->cache_items = cache_items;
__entry->delta = delta;
__entry->total_scan = total_scan;
@@ -265,7 +279,7 @@ TRACE_EVENT(mm_shrink_slab_end,
);
TRACE_EVENT(mm_vmscan_lru_isolate,
- TP_PROTO(int classzone_idx,
+ TP_PROTO(int highest_zoneidx,
int order,
unsigned long nr_requested,
unsigned long nr_scanned,
@@ -274,33 +288,37 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
isolate_mode_t isolate_mode,
int lru),
- TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru),
+ TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru),
TP_STRUCT__entry(
- __field(int, classzone_idx)
+ __field(int, highest_zoneidx)
__field(int, order)
__field(unsigned long, nr_requested)
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_skipped)
__field(unsigned long, nr_taken)
- __field(isolate_mode_t, isolate_mode)
+ __field(unsigned int, isolate_mode)
__field(int, lru)
),
TP_fast_assign(
- __entry->classzone_idx = classzone_idx;
+ __entry->highest_zoneidx = highest_zoneidx;
__entry->order = order;
__entry->nr_requested = nr_requested;
__entry->nr_scanned = nr_scanned;
__entry->nr_skipped = nr_skipped;
__entry->nr_taken = nr_taken;
- __entry->isolate_mode = isolate_mode;
+ __entry->isolate_mode = (__force unsigned int)isolate_mode;
__entry->lru = lru;
),
+ /*
+ * classzone is previous name of the highest_zoneidx.
+ * Reason not to change it is the ABI requirement of the tracepoint.
+ */
TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s",
__entry->isolate_mode,
- __entry->classzone_idx,
+ __entry->highest_zoneidx,
__entry->order,
__entry->nr_requested,
__entry->nr_scanned,
@@ -309,11 +327,11 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__print_symbolic(__entry->lru, LRU_NAMES))
);
-TRACE_EVENT(mm_vmscan_writepage,
+TRACE_EVENT(mm_vmscan_write_folio,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
__field(unsigned long, pfn)
@@ -321,12 +339,12 @@ TRACE_EVENT(mm_vmscan_writepage,
),
TP_fast_assign(
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = folio_pfn(folio);
__entry->reclaim_flags = trace_reclaim_flags(
- page_is_file_cache(page));
+ folio_is_file_lru(folio));
),
- TP_printk("page=%p pfn=%lu flags=%s",
+ TP_printk("page=%p pfn=0x%lx flags=%s",
pfn_to_page(__entry->pfn),
__entry->pfn,
show_reclaim_flags(__entry->reclaim_flags))
@@ -419,47 +437,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_active,
show_reclaim_flags(__entry->reclaim_flags))
);
-TRACE_EVENT(mm_vmscan_inactive_list_is_low,
-
- TP_PROTO(int nid, int reclaim_idx,
- unsigned long total_inactive, unsigned long inactive,
- unsigned long total_active, unsigned long active,
- unsigned long ratio, int file),
-
- TP_ARGS(nid, reclaim_idx, total_inactive, inactive, total_active, active, ratio, file),
-
- TP_STRUCT__entry(
- __field(int, nid)
- __field(int, reclaim_idx)
- __field(unsigned long, total_inactive)
- __field(unsigned long, inactive)
- __field(unsigned long, total_active)
- __field(unsigned long, active)
- __field(unsigned long, ratio)
- __field(int, reclaim_flags)
- ),
-
- TP_fast_assign(
- __entry->nid = nid;
- __entry->reclaim_idx = reclaim_idx;
- __entry->total_inactive = total_inactive;
- __entry->inactive = inactive;
- __entry->total_active = total_active;
- __entry->active = active;
- __entry->ratio = ratio;
- __entry->reclaim_flags = trace_reclaim_flags(file) &
- RECLAIM_WB_LRU;
- ),
-
- TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s",
- __entry->nid,
- __entry->reclaim_idx,
- __entry->total_inactive, __entry->inactive,
- __entry->total_active, __entry->active,
- __entry->ratio,
- show_reclaim_flags(__entry->reclaim_flags))
-);
-
TRACE_EVENT(mm_vmscan_node_reclaim_begin,
TP_PROTO(int nid, int order, gfp_t gfp_flags),
@@ -469,13 +446,13 @@ TRACE_EVENT(mm_vmscan_node_reclaim_begin,
TP_STRUCT__entry(
__field(int, nid)
__field(int, order)
- __field(gfp_t, gfp_flags)
+ __field(unsigned long, gfp_flags)
),
TP_fast_assign(
__entry->nid = nid;
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("nid=%d order=%d gfp_flags=%s",
@@ -491,6 +468,32 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_node_reclaim_end,
TP_ARGS(nr_reclaimed)
);
+TRACE_EVENT(mm_vmscan_throttled,
+
+ TP_PROTO(int nid, int usec_timeout, int usec_delayed, int reason),
+
+ TP_ARGS(nid, usec_timeout, usec_delayed, reason),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, usec_timeout)
+ __field(int, usec_delayed)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->usec_timeout = usec_timeout;
+ __entry->usec_delayed = usec_delayed;
+ __entry->reason = 1U << reason;
+ ),
+
+ TP_printk("nid=%d usec_timeout=%d usect_delayed=%d reason=%s",
+ __entry->nid,
+ __entry->usec_timeout,
+ __entry->usec_delayed,
+ show_throttle_flags(__entry->reason))
+);
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h
index 6782213778be..d0b3f0ea9ba1 100644
--- a/include/trace/events/vsock_virtio_transport_common.h
+++ b/include/trace/events/vsock_virtio_transport_common.h
@@ -9,9 +9,12 @@
#include <linux/tracepoint.h>
TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_STREAM);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_SEQPACKET);
#define show_type(val) \
- __print_symbolic(val, { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" })
+ __print_symbolic(val, \
+ { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" }, \
+ { VIRTIO_VSOCK_TYPE_SEQPACKET, "SEQPACKET" })
TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_INVALID);
TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_REQUEST);
diff --git a/include/trace/events/watchdog.h b/include/trace/events/watchdog.h
new file mode 100644
index 000000000000..beb9bb3424c8
--- /dev/null
+++ b/include/trace/events/watchdog.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM watchdog
+
+#if !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WATCHDOG_H
+
+#include <linux/watchdog.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(watchdog_template,
+
+ TP_PROTO(struct watchdog_device *wdd, int err),
+
+ TP_ARGS(wdd, err),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->id = wdd->id;
+ __entry->err = err;
+ ),
+
+ TP_printk("watchdog%d err=%d", __entry->id, __entry->err)
+);
+
+DEFINE_EVENT(watchdog_template, watchdog_start,
+ TP_PROTO(struct watchdog_device *wdd, int err),
+ TP_ARGS(wdd, err));
+
+DEFINE_EVENT(watchdog_template, watchdog_ping,
+ TP_PROTO(struct watchdog_device *wdd, int err),
+ TP_ARGS(wdd, err));
+
+DEFINE_EVENT(watchdog_template, watchdog_stop,
+ TP_PROTO(struct watchdog_device *wdd, int err),
+ TP_ARGS(wdd, err));
+
+TRACE_EVENT(watchdog_set_timeout,
+
+ TP_PROTO(struct watchdog_device *wdd, unsigned int timeout, int err),
+
+ TP_ARGS(wdd, timeout, err),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(unsigned int, timeout)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->id = wdd->id;
+ __entry->timeout = timeout;
+ __entry->err = err;
+ ),
+
+ TP_printk("watchdog%d timeout=%u err=%d", __entry->id, __entry->timeout, __entry->err)
+);
+
+#endif /* !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
index 37342a13c9cb..9c66e59d859c 100644
--- a/include/trace/events/wbt.h
+++ b/include/trace/events/wbt.h
@@ -33,7 +33,7 @@ TRACE_EVENT(wbt_stat,
),
TP_fast_assign(
- strlcpy(__entry->name, dev_name(bdi->dev),
+ strlcpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->rmean = stat[0].mean;
__entry->rmin = stat[0].min;
@@ -46,7 +46,7 @@ TRACE_EVENT(wbt_stat,
),
TP_printk("%s: rmean=%llu, rmin=%llu, rmax=%llu, rsamples=%llu, "
- "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu\n",
+ "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu",
__entry->name, __entry->rmean, __entry->rmin, __entry->rmax,
__entry->rnr_samples, __entry->wmean, __entry->wmin,
__entry->wmax, __entry->wnr_samples)
@@ -68,12 +68,12 @@ TRACE_EVENT(wbt_lat,
),
TP_fast_assign(
- strlcpy(__entry->name, dev_name(bdi->dev),
+ strlcpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->lat = div_u64(lat, 1000);
),
- TP_printk("%s: latency %lluus\n", __entry->name,
+ TP_printk("%s: latency %lluus", __entry->name,
(unsigned long long) __entry->lat)
);
@@ -105,7 +105,7 @@ TRACE_EVENT(wbt_step,
),
TP_fast_assign(
- strlcpy(__entry->name, dev_name(bdi->dev),
+ strlcpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->msg = msg;
__entry->step = step;
@@ -115,7 +115,7 @@ TRACE_EVENT(wbt_step,
__entry->max = max;
),
- TP_printk("%s: %s: step=%d, window=%luus, background=%u, normal=%u, max=%u\n",
+ TP_printk("%s: %s: step=%d, window=%luus, background=%u, normal=%u, max=%u",
__entry->name, __entry->msg, __entry->step, __entry->window,
__entry->bg, __entry->normal, __entry->max)
);
@@ -141,14 +141,14 @@ TRACE_EVENT(wbt_timer,
),
TP_fast_assign(
- strlcpy(__entry->name, dev_name(bdi->dev),
+ strlcpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->status = status;
__entry->step = step;
__entry->inflight = inflight;
),
- TP_printk("%s: status=%u, step=%d, inflight=%u\n", __entry->name,
+ TP_printk("%s: status=%u, step=%d, inflight=%u", __entry->name,
__entry->status, __entry->step, __entry->inflight)
);
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 9b8ae961acc5..262d52021c23 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -22,7 +22,7 @@ struct pool_workqueue;
*/
TRACE_EVENT(workqueue_queue_work,
- TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
+ TP_PROTO(int req_cpu, struct pool_workqueue *pwq,
struct work_struct *work),
TP_ARGS(req_cpu, pwq, work),
@@ -30,21 +30,21 @@ TRACE_EVENT(workqueue_queue_work,
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
- __field( void *, workqueue)
- __field( unsigned int, req_cpu )
- __field( unsigned int, cpu )
+ __string( workqueue, pwq->wq->name)
+ __field( int, req_cpu )
+ __field( int, cpu )
),
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
- __entry->workqueue = pwq->wq;
+ __assign_str(workqueue, pwq->wq->name);
__entry->req_cpu = req_cpu;
__entry->cpu = pwq->pool->cpu;
),
- TP_printk("work struct=%p function=%ps workqueue=%p req_cpu=%u cpu=%u",
- __entry->work, __entry->function, __entry->workqueue,
+ TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d",
+ __entry->work, __entry->function, __get_str(workqueue),
__entry->req_cpu, __entry->cpu)
);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index d94def25e4dc..86b2a82da546 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -20,7 +20,6 @@
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
{I_DIRTY_TIME, "I_DIRTY_TIME"}, \
- {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \
{I_REFERENCED, "I_REFERENCED"} \
)
@@ -36,9 +35,9 @@
EM( WB_REASON_SYNC, "sync") \
EM( WB_REASON_PERIODIC, "periodic") \
EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
- EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \
EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
- EMe(WB_REASON_FORKER_THREAD, "forker_thread")
+ EM( WB_REASON_FORKER_THREAD, "forker_thread") \
+ EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush")
WB_WORK_REASON
@@ -53,11 +52,11 @@ WB_WORK_REASON
struct wb_writeback_work;
-DECLARE_EVENT_CLASS(writeback_page_template,
+DECLARE_EVENT_CLASS(writeback_folio_template,
- TP_PROTO(struct page *page, struct address_space *mapping),
+ TP_PROTO(struct folio *folio, struct address_space *mapping),
- TP_ARGS(page, mapping),
+ TP_ARGS(folio, mapping),
TP_STRUCT__entry (
__array(char, name, 32)
@@ -70,7 +69,7 @@ DECLARE_EVENT_CLASS(writeback_page_template,
bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
NULL), 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
- __entry->index = page->index;
+ __entry->index = folio->index;
),
TP_printk("bdi %s: ino=%lu index=%lu",
@@ -80,18 +79,18 @@ DECLARE_EVENT_CLASS(writeback_page_template,
)
);
-DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
+DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
- TP_PROTO(struct page *page, struct address_space *mapping),
+ TP_PROTO(struct folio *folio, struct address_space *mapping),
- TP_ARGS(page, mapping)
+ TP_ARGS(folio, mapping)
);
-DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
+DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
- TP_PROTO(struct page *page, struct address_space *mapping),
+ TP_PROTO(struct folio *folio, struct address_space *mapping),
- TP_ARGS(page, mapping)
+ TP_ARGS(folio, mapping)
);
DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
@@ -192,7 +191,7 @@ TRACE_EVENT(inode_foreign_history,
),
TP_fast_assign(
- strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
__entry->history = history;
@@ -221,7 +220,7 @@ TRACE_EVENT(inode_switch_wbs,
),
TP_fast_assign(
- strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
__entry->ino = inode->i_ino;
__entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
__entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
@@ -237,9 +236,9 @@ TRACE_EVENT(inode_switch_wbs,
TRACE_EVENT(track_foreign_dirty,
- TP_PROTO(struct page *page, struct bdi_writeback *wb),
+ TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
- TP_ARGS(page, wb),
+ TP_ARGS(folio, wb),
TP_STRUCT__entry(
__array(char, name, 32)
@@ -251,15 +250,15 @@ TRACE_EVENT(track_foreign_dirty,
),
TP_fast_assign(
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping = folio_mapping(folio);
struct inode *inode = mapping ? mapping->host : NULL;
- strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->bdi_id = wb->bdi->id;
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- __entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup);
+ __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
),
TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
@@ -287,7 +286,7 @@ TRACE_EVENT(flush_foreign,
),
TP_fast_assign(
- strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
__entry->frn_bdi_id = frn_bdi_id;
__entry->frn_memcg_id = frn_memcg_id;
@@ -499,8 +498,9 @@ DEFINE_WBC_EVENT(wbc_writepage);
TRACE_EVENT(writeback_queue_io,
TP_PROTO(struct bdi_writeback *wb,
struct wb_writeback_work *work,
+ unsigned long dirtied_before,
int moved),
- TP_ARGS(wb, work, moved),
+ TP_ARGS(wb, work, dirtied_before, moved),
TP_STRUCT__entry(
__array(char, name, 32)
__field(unsigned long, older)
@@ -510,19 +510,17 @@ TRACE_EVENT(writeback_queue_io,
__field(ino_t, cgroup_ino)
),
TP_fast_assign(
- unsigned long *older_than_this = work->older_than_this;
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
- __entry->older = older_than_this ? *older_than_this : 0;
- __entry->age = older_than_this ?
- (jiffies - *older_than_this) * 1000 / HZ : -1;
+ __entry->older = dirtied_before;
+ __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
__entry->moved = moved;
__entry->reason = work->reason;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
__entry->name,
- __entry->older, /* older_than_this in jiffies */
- __entry->age, /* older_than_this in relative milliseconds */
+ __entry->older, /* dirtied_before in jiffies */
+ __entry->age, /* dirtied_before in relative milliseconds */
__entry->moved,
__print_symbolic(__entry->reason, WB_WORK_REASON),
(unsigned long)__entry->cgroup_ino
@@ -542,7 +540,6 @@ TRACE_EVENT(global_dirty_state,
TP_STRUCT__entry(
__field(unsigned long, nr_dirty)
__field(unsigned long, nr_writeback)
- __field(unsigned long, nr_unstable)
__field(unsigned long, background_thresh)
__field(unsigned long, dirty_thresh)
__field(unsigned long, dirty_limit)
@@ -553,7 +550,6 @@ TRACE_EVENT(global_dirty_state,
TP_fast_assign(
__entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
__entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
- __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS);
__entry->nr_dirtied = global_node_page_state(NR_DIRTIED);
__entry->nr_written = global_node_page_state(NR_WRITTEN);
__entry->background_thresh = background_thresh;
@@ -561,12 +557,11 @@ TRACE_EVENT(global_dirty_state,
__entry->dirty_limit = global_wb_domain.dirty_limit;
),
- TP_printk("dirty=%lu writeback=%lu unstable=%lu "
+ TP_printk("dirty=%lu writeback=%lu "
"bg_thresh=%lu thresh=%lu limit=%lu "
"dirtied=%lu written=%lu",
__entry->nr_dirty,
__entry->nr_writeback,
- __entry->nr_unstable,
__entry->background_thresh,
__entry->dirty_thresh,
__entry->dirty_limit,
@@ -740,41 +735,6 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
)
);
-DECLARE_EVENT_CLASS(writeback_congest_waited_template,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed),
-
- TP_STRUCT__entry(
- __field( unsigned int, usec_timeout )
- __field( unsigned int, usec_delayed )
- ),
-
- TP_fast_assign(
- __entry->usec_timeout = usec_timeout;
- __entry->usec_delayed = usec_delayed;
- ),
-
- TP_printk("usec_timeout=%u usec_delayed=%u",
- __entry->usec_timeout,
- __entry->usec_delayed)
-);
-
-DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed)
-);
-
-DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed)
-);
-
DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_PROTO(struct inode *inode,
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index b95d65e8c628..c40fc97f9417 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -86,19 +86,15 @@ struct _bpf_dtab_netdev {
};
#endif /* __DEVMAP_OBJ_TYPE */
-#define devmap_ifindex(tgt, map) \
- (((map->map_type == BPF_MAP_TYPE_DEVMAP || \
- map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
- ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
-
DECLARE_EVENT_CLASS(xdp_redirect_template,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
TP_STRUCT__entry(
__field(int, prog_id)
@@ -111,14 +107,26 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
),
TP_fast_assign(
+ u32 ifindex = 0, map_index = index;
+
+ if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ /* Just leave to_ifindex to 0 if do broadcast redirect,
+ * as tgt will be NULL.
+ */
+ if (tgt)
+ ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
+ } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
+ ifindex = index;
+ map_index = 0;
+ }
+
__entry->prog_id = xdp->aux->id;
__entry->act = XDP_REDIRECT;
__entry->ifindex = dev->ifindex;
__entry->err = err;
- __entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
- index;
- __entry->map_id = map ? map->id : 0;
- __entry->map_index = map ? index : 0;
+ __entry->to_ifindex = ifindex;
+ __entry->map_id = map_id;
+ __entry->map_index = map_index;
),
TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
@@ -133,53 +141,57 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index)
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index)
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);
-#define _trace_xdp_redirect(dev, xdp, to) \
- trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
+#define _trace_xdp_redirect(dev, xdp, to) \
+ trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
-#define _trace_xdp_redirect_err(dev, xdp, to, err) \
- trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
+#define _trace_xdp_redirect_err(dev, xdp, to, err) \
+ trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
-#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
- trace_xdp_redirect(dev, xdp, to, 0, map, index);
+#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
+ trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
-#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
- trace_xdp_redirect_err(dev, xdp, to, err, map, index);
+#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
+ trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
/* not used anymore, but kept around so as not to break old programs */
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index)
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index)
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);
TRACE_EVENT(xdp_cpumap_kthread,
TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
- int sched),
+ int sched, struct xdp_cpumap_stats *xdp_stats),
- TP_ARGS(map_id, processed, drops, sched),
+ TP_ARGS(map_id, processed, drops, sched, xdp_stats),
TP_STRUCT__entry(
__field(int, map_id)
@@ -188,6 +200,9 @@ TRACE_EVENT(xdp_cpumap_kthread,
__field(unsigned int, drops)
__field(unsigned int, processed)
__field(int, sched)
+ __field(unsigned int, xdp_pass)
+ __field(unsigned int, xdp_drop)
+ __field(unsigned int, xdp_redirect)
),
TP_fast_assign(
@@ -197,16 +212,21 @@ TRACE_EVENT(xdp_cpumap_kthread,
__entry->drops = drops;
__entry->processed = processed;
__entry->sched = sched;
+ __entry->xdp_pass = xdp_stats->pass;
+ __entry->xdp_drop = xdp_stats->drop;
+ __entry->xdp_redirect = xdp_stats->redirect;
),
TP_printk("kthread"
" cpu=%d map_id=%d action=%s"
" processed=%u drops=%u"
- " sched=%d",
+ " sched=%d"
+ " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
__entry->cpu, __entry->map_id,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->processed, __entry->drops,
- __entry->sched)
+ __entry->sched,
+ __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
);
TRACE_EVENT(xdp_cpumap_enqueue,
@@ -287,7 +307,7 @@ TRACE_EVENT(xdp_devmap_xmit,
FN(PAGE_SHARED) \
FN(PAGE_ORDER0) \
FN(PAGE_POOL) \
- FN(ZERO_COPY)
+ FN(XSK_BUFF_POOL)
#define __MEM_TYPE_TP_FN(x) \
TRACE_DEFINE_ENUM(MEM_TYPE_##x);
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index a5ccfa67bc5c..44a3f565264d 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -153,26 +153,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
-TRACE_EVENT(xen_mmu_set_pte_at,
- TP_PROTO(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval),
- TP_ARGS(mm, addr, ptep, pteval),
- TP_STRUCT__entry(
- __field(struct mm_struct *, mm)
- __field(unsigned long, addr)
- __field(pte_t *, ptep)
- __field(pteval_t, pteval)
- ),
- TP_fast_assign(__entry->mm = mm;
- __entry->addr = addr;
- __entry->ptep = ptep;
- __entry->pteval = pteval.pte),
- TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
- __entry->mm, __entry->addr, __entry->ptep,
- (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
- (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
- );
-
TRACE_DEFINE_SIZEOF(pmdval_t);
TRACE_EVENT(xen_mmu_set_pmd,
@@ -366,7 +346,7 @@ TRACE_EVENT(xen_mmu_flush_tlb_one_user,
TP_printk("addr %lx", __entry->addr)
);
-TRACE_EVENT(xen_mmu_flush_tlb_others,
+TRACE_EVENT(xen_mmu_flush_tlb_multi,
TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
unsigned long addr, unsigned long end),
TP_ARGS(cpus, mm, addr, end),
diff --git a/include/trace/perf.h b/include/trace/perf.h
index dbc6c74defc3..5800d13146c3 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -21,6 +21,29 @@
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
+#undef __get_rel_dynamic_array
+#define __get_rel_dynamic_array(field) \
+ ((void *)__entry + \
+ offsetof(typeof(*__entry), __rel_loc_##field) + \
+ sizeof(__entry->__rel_loc_##field) + \
+ (__entry->__rel_loc_##field & 0xffff))
+
+#undef __get_rel_dynamic_array_len
+#define __get_rel_dynamic_array_len(field) \
+ ((__entry->__rel_loc_##field >> 16) & 0xffff)
+
+#undef __get_rel_str
+#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
#undef __perf_count
#define __perf_count(c) (__count = (c))
diff --git a/include/trace/stages/init.h b/include/trace/stages/init.h
new file mode 100644
index 000000000000..000bcfc8dd2e
--- /dev/null
+++ b/include/trace/stages/init.h
@@ -0,0 +1,37 @@
+
+#define __app__(x, y) str__##x##y
+#define __app(x, y) __app__(x, y)
+
+#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
+
+#define TRACE_MAKE_SYSTEM_STR() \
+ static const char TRACE_SYSTEM_STRING[] = \
+ __stringify(TRACE_SYSTEM)
+
+TRACE_MAKE_SYSTEM_STR();
+
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a) \
+ static struct trace_eval_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .eval_string = #a, \
+ .eval_value = a \
+ }; \
+ static struct trace_eval_map __used \
+ __section("_ftrace_eval_map") \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
+
+#undef TRACE_DEFINE_SIZEOF
+#define TRACE_DEFINE_SIZEOF(a) \
+ static struct trace_eval_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .eval_string = "sizeof(" #a ")", \
+ .eval_value = sizeof(a) \
+ }; \
+ static struct trace_eval_map __used \
+ __section("_ftrace_eval_map") \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
diff --git a/include/trace/stages/stage1_struct_define.h b/include/trace/stages/stage1_struct_define.h
new file mode 100644
index 000000000000..1b7bab60434c
--- /dev/null
+++ b/include/trace/stages/stage1_struct_define.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 1 definitions for creating trace events */
+
+#undef __field
+#define __field(type, item) type item;
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type) type item;
+
+#undef __field_struct
+#define __field_struct(type, item) type item;
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type) type item;
+
+#undef __array
+#define __array(type, item, len) type item[len];
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) u32 __data_loc_##item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) u32 __rel_loc_##item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
diff --git a/include/trace/stages/stage2_data_offsets.h b/include/trace/stages/stage2_data_offsets.h
new file mode 100644
index 000000000000..1b7a8f764fdd
--- /dev/null
+++ b/include/trace/stages/stage2_data_offsets.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 2 definitions for creating trace events */
+
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a)
+
+#undef TRACE_DEFINE_SIZEOF
+#define TRACE_DEFINE_SIZEOF(a)
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) u32 item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) u32 item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h
new file mode 100644
index 000000000000..e3b183e9d18e
--- /dev/null
+++ b/include/trace/stages/stage3_trace_output.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 3 definitions for creating trace events */
+
+#undef __entry
+#define __entry field
+
+#undef TP_printk
+#define TP_printk(fmt, args...) fmt "\n", args
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_dynamic_array_len
+#define __get_dynamic_array_len(field) \
+ ((__entry->__data_loc_##field >> 16) & 0xffff)
+
+#undef __get_str
+#define __get_str(field) ((char *)__get_dynamic_array(field))
+
+#undef __get_rel_dynamic_array
+#define __get_rel_dynamic_array(field) \
+ ((void *)__entry + \
+ offsetof(typeof(*__entry), __rel_loc_##field) + \
+ sizeof(__entry->__rel_loc_##field) + \
+ (__entry->__rel_loc_##field & 0xffff))
+
+#undef __get_rel_dynamic_array_len
+#define __get_rel_dynamic_array_len(field) \
+ ((__entry->__rel_loc_##field >> 16) & 0xffff)
+
+#undef __get_rel_str
+#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
+
+#undef __get_bitmask
+#define __get_bitmask(field) \
+ ({ \
+ void *__bitmask = __get_dynamic_array(field); \
+ unsigned int __bitmask_size; \
+ __bitmask_size = __get_dynamic_array_len(field); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ })
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) \
+ ({ \
+ void *__bitmask = __get_rel_dynamic_array(field); \
+ unsigned int __bitmask_size; \
+ __bitmask_size = __get_rel_dynamic_array_len(field); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ })
+
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
+#undef __print_flags
+#define __print_flags(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags __flags[] = \
+ { flag_array, { -1, NULL }}; \
+ trace_print_flags_seq(p, delim, flag, __flags); \
+ })
+
+#undef __print_symbolic
+#define __print_symbolic(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags symbols[] = \
+ { symbol_array, { -1, NULL }}; \
+ trace_print_symbols_seq(p, value, symbols); \
+ })
+
+#undef __print_flags_u64
+#undef __print_symbolic_u64
+#if BITS_PER_LONG == 32
+#define __print_flags_u64(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 __flags[] = \
+ { flag_array, { -1, NULL } }; \
+ trace_print_flags_seq_u64(p, delim, flag, __flags); \
+ })
+
+#define __print_symbolic_u64(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 symbols[] = \
+ { symbol_array, { -1, NULL } }; \
+ trace_print_symbols_seq_u64(p, value, symbols); \
+ })
+#else
+#define __print_flags_u64(flag, delim, flag_array...) \
+ __print_flags(flag, delim, flag_array)
+
+#define __print_symbolic_u64(value, symbol_array...) \
+ __print_symbolic(value, symbol_array)
+#endif
+
+#undef __print_hex
+#define __print_hex(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, false)
+
+#undef __print_hex_str
+#define __print_hex_str(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, true)
+
+#undef __print_array
+#define __print_array(array, count, el_size) \
+ ({ \
+ BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
+ el_size != 4 && el_size != 8); \
+ trace_print_array_seq(p, array, count, el_size); \
+ })
+
+#undef __print_hex_dump
+#define __print_hex_dump(prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii) \
+ trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii)
+
+#undef __print_ns_to_secs
+#define __print_ns_to_secs(value) \
+ ({ \
+ u64 ____val = (u64)(value); \
+ do_div(____val, NSEC_PER_SEC); \
+ ____val; \
+ })
+
+#undef __print_ns_without_secs
+#define __print_ns_without_secs(value) \
+ ({ \
+ u64 ____val = (u64)(value); \
+ (u32) do_div(____val, NSEC_PER_SEC); \
+ })
diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h
new file mode 100644
index 000000000000..a8fb25f39a99
--- /dev/null
+++ b/include/trace/stages/stage4_event_fields.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 4 definitions for creating trace events */
+
+#define ALIGN_STRUCTFIELD(type) ((int)(__alignof__(struct {type b;})))
+
+#undef __field_ext
+#define __field_ext(_type, _item, _filter_type) { \
+ .type = #_type, .name = #_item, \
+ .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \
+ .is_signed = is_signed_type(_type), .filter_type = _filter_type },
+
+#undef __field_struct_ext
+#define __field_struct_ext(_type, _item, _filter_type) { \
+ .type = #_type, .name = #_item, \
+ .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \
+ 0, .filter_type = _filter_type },
+
+#undef __field
+#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
+
+#undef __field_struct
+#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
+
+#undef __array
+#define __array(_type, _item, _len) { \
+ .type = #_type"["__stringify(_len)"]", .name = #_item, \
+ .size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type), \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+
+#undef __dynamic_array
+#define __dynamic_array(_type, _item, _len) { \
+ .type = "__data_loc " #_type "[]", .name = #_item, \
+ .size = 4, .align = 4, \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(_type, _item, _len) { \
+ .type = "__rel_loc " #_type "[]", .name = #_item, \
+ .size = 4, .align = 4, \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h
new file mode 100644
index 000000000000..fba4c24ed9e6
--- /dev/null
+++ b/include/trace/stages/stage5_get_offsets.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 5 definitions for creating trace events */
+
+/*
+ * remember the offset of each array from the beginning of the event.
+ */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __item_length = (len) * sizeof(type); \
+ __data_offsets->item = __data_size + \
+ offsetof(typeof(*entry), __data); \
+ __data_offsets->item |= __item_length << 16; \
+ __data_size += __item_length;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, \
+ strlen((src) ? (const char *)(src) : "(null)") + 1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, \
+ __trace_event_vstr_len(fmt, ap))
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) \
+ __item_length = (len) * sizeof(type); \
+ __data_offsets->item = __data_size + \
+ offsetof(typeof(*entry), __data) - \
+ offsetof(typeof(*entry), __rel_loc_##item) - \
+ sizeof(u32); \
+ __data_offsets->item |= __item_length << 16; \
+ __data_size += __item_length;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, \
+ strlen((src) ? (const char *)(src) : "(null)") + 1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)
+/*
+ * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
+ * num_possible_cpus().
+ */
+#define __bitmask_size_in_bytes_raw(nr_bits) \
+ (((nr_bits) + 7) / 8)
+
+#define __bitmask_size_in_longs(nr_bits) \
+ ((__bitmask_size_in_bytes_raw(nr_bits) + \
+ ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
+
+/*
+ * __bitmask_size_in_bytes is the number of bytes needed to hold
+ * num_possible_cpus() padded out to the nearest long. This is what
+ * is saved in the buffer, just to be consistent.
+ */
+#define __bitmask_size_in_bytes(nr_bits) \
+ (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
+ __bitmask_size_in_longs(nr_bits))
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \
+ __bitmask_size_in_longs(nr_bits))
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h
new file mode 100644
index 000000000000..3c554a585320
--- /dev/null
+++ b/include/trace/stages/stage6_event_callback.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 6 definitions for creating trace events */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __assign_str_len
+#define __assign_str_len(dst, src, len) \
+ do { \
+ memcpy(__get_str(dst), (src), (len)); \
+ __get_str(dst)[len] = '\0'; \
+ } while(0)
+
+#undef __assign_vstr
+#define __assign_vstr(dst, fmt, va) \
+ do { \
+ va_list __cp_va; \
+ va_copy(__cp_va, *(va)); \
+ vsnprintf(__get_str(dst), TRACE_EVENT_STR_MAX, fmt, __cp_va); \
+ va_end(__cp_va); \
+ } while (0)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __assign_bitmask
+#define __assign_bitmask(dst, src, nr_bits) \
+ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
+#undef __assign_sockaddr
+#define __assign_sockaddr(dest, src, len) \
+ memcpy(__get_dynamic_array(dest), src, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) \
+ __entry->__rel_loc_##item = __data_offsets.item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __assign_rel_str
+#define __assign_rel_str(dst, src) \
+ strcpy(__get_rel_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __assign_rel_str_len
+#define __assign_rel_str_len(dst, src, len) \
+ do { \
+ memcpy(__get_rel_str(dst), (src), (len)); \
+ __get_rel_str(dst)[len] = '\0'; \
+ } while (0)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+
+#undef __assign_rel_bitmask
+#define __assign_rel_bitmask(dst, src, nr_bits) \
+ memcpy(__get_rel_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
+
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
+#undef __assign_rel_sockaddr
+#define __assign_rel_sockaddr(dest, src, len) \
+ memcpy(__get_rel_dynamic_array(dest), src, len)
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h
new file mode 100644
index 000000000000..8a7ec24c246d
--- /dev/null
+++ b/include/trace/stages/stage7_class_define.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 7 definitions for creating trace events */
+
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __print_hex
+#undef __print_hex_str
+#undef __get_dynamic_array
+#undef __get_dynamic_array_len
+#undef __get_str
+#undef __get_bitmask
+#undef __get_sockaddr
+#undef __get_rel_dynamic_array
+#undef __get_rel_dynamic_array_len
+#undef __get_rel_str
+#undef __get_rel_bitmask
+#undef __get_rel_sockaddr
+#undef __print_array
+#undef __print_hex_dump
+
+/*
+ * The below is not executed in the kernel. It is only what is
+ * displayed in the print format for userspace to parse.
+ */
+#undef __print_ns_to_secs
+#define __print_ns_to_secs(val) (val) / 1000000000UL
+
+#undef __print_ns_without_secs
+#define __print_ns_without_secs(val) (val) % 1000000000UL
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index dc8ac27d27c1..8e193f3a33b3 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -37,10 +37,10 @@ struct syscall_metadata {
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
static inline void syscall_tracepoint_update(struct task_struct *p)
{
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
- set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+ if (test_syscall_work(SYSCALL_TRACEPOINT))
+ set_task_syscall_work(p, SYSCALL_TRACEPOINT);
else
- clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+ clear_task_syscall_work(p, SYSCALL_TRACEPOINT);
}
#else
static inline void syscall_tracepoint_update(struct task_struct *p)
diff --git a/include/trace/trace_custom_events.h b/include/trace/trace_custom_events.h
new file mode 100644
index 000000000000..6e492dba96bf
--- /dev/null
+++ b/include/trace/trace_custom_events.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This is similar to the trace_events.h file, but is to only
+ * create custom trace events to be attached to existing tracepoints.
+ * Where as the TRACE_EVENT() macro (from trace_events.h) will create
+ * both the trace event and the tracepoint it will attach the event to,
+ * TRACE_CUSTOM_EVENT() is to create only a custom version of an existing
+ * trace event (created by TRACE_EVENT() or DEFINE_EVENT()), and will
+ * be placed in the "custom" system.
+ */
+
+#include <linux/trace_events.h>
+
+/* All custom events are placed in the custom group */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM custom
+
+#ifndef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR TRACE_SYSTEM
+#endif
+
+/* The init stage creates the system string and enum mappings */
+
+#include "stages/init.h"
+
+#undef TRACE_CUSTOM_EVENT
+#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print) \
+ DECLARE_CUSTOM_EVENT_CLASS(name, \
+ PARAMS(proto), \
+ PARAMS(args), \
+ PARAMS(tstruct), \
+ PARAMS(assign), \
+ PARAMS(print)); \
+ DEFINE_CUSTOM_EVENT(name, name, PARAMS(proto), PARAMS(args));
+
+/* Stage 1 creates the structure of the recorded event layout */
+
+#include "stages/stage1_struct_define.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
+ struct trace_custom_event_raw_##name { \
+ struct trace_entry ent; \
+ tstruct \
+ char __data[]; \
+ }; \
+ \
+ static struct trace_event_class custom_event_class_##name;
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args) \
+ static struct trace_event_call __used \
+ __attribute__((__aligned__(4))) custom_event_##name
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 2 creates the custom class */
+
+#include "stages/stage2_data_offsets.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ struct trace_custom_event_data_offsets_##call { \
+ tstruct; \
+ };
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 3 create the way to print the custom event */
+
+#include "stages/stage3_trace_output.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static notrace enum print_line_t \
+trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *trace_event) \
+{ \
+ struct trace_seq *s = &iter->seq; \
+ struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
+ struct trace_custom_event_raw_##call *field; \
+ int ret; \
+ \
+ field = (typeof(field))iter->ent; \
+ \
+ ret = trace_raw_output_prep(iter, trace_event); \
+ if (ret != TRACE_TYPE_HANDLED) \
+ return ret; \
+ \
+ trace_event_printf(iter, print); \
+ \
+ return trace_handle_return(s); \
+} \
+static struct trace_event_functions trace_custom_event_type_funcs_##call = { \
+ .trace = trace_custom_raw_output_##call, \
+};
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 4 creates the offset layout for the fields */
+
+#include "stages/stage4_event_fields.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print) \
+static struct trace_event_fields trace_custom_event_fields_##call[] = { \
+ tstruct \
+ {} };
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 5 creates the helper function for dynamic fields */
+
+#include "stages/stage5_get_offsets.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static inline notrace int trace_custom_event_get_offsets_##call( \
+ struct trace_custom_event_data_offsets_##call *__data_offsets, proto) \
+{ \
+ int __data_size = 0; \
+ int __maybe_unused __item_length; \
+ struct trace_custom_event_raw_##call __maybe_unused *entry; \
+ \
+ tstruct; \
+ \
+ return __data_size; \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 6 creates the probe function that records the event */
+
+#include "stages/stage6_event_callback.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ \
+static notrace void \
+trace_custom_event_raw_event_##call(void *__data, proto) \
+{ \
+ struct trace_event_file *trace_file = __data; \
+ struct trace_custom_event_data_offsets_##call __maybe_unused __data_offsets; \
+ struct trace_event_buffer fbuffer; \
+ struct trace_custom_event_raw_##call *entry; \
+ int __data_size; \
+ \
+ if (trace_trigger_soft_disabled(trace_file)) \
+ return; \
+ \
+ __data_size = trace_custom_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ trace_event_buffer_commit(&fbuffer); \
+}
+/*
+ * The ftrace_test_custom_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \
+static inline void ftrace_test_custom_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(trace_custom_event_raw_event_##template); \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 7 creates the actual class and event structure for the custom event */
+
+#include "stages/stage7_class_define.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static char custom_print_fmt_##call[] = print; \
+static struct trace_event_class __used __refdata custom_event_class_##call = { \
+ .system = TRACE_SYSTEM_STRING, \
+ .fields_array = trace_custom_event_fields_##call, \
+ .fields = LIST_HEAD_INIT(custom_event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = trace_custom_event_raw_event_##call, \
+ .reg = trace_event_reg, \
+};
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \
+ \
+static struct trace_event_call __used custom_event_##call = { \
+ .name = #call, \
+ .class = &custom_event_class_##template, \
+ .event.funcs = &trace_custom_event_type_funcs_##template, \
+ .print_fmt = custom_print_fmt_##template, \
+ .flags = TRACE_EVENT_FL_CUSTOM, \
+}; \
+static inline int trace_custom_event_##call##_update(struct tracepoint *tp) \
+{ \
+ if (tp->name && strcmp(tp->name, #call) == 0) { \
+ custom_event_##call.tp = tp; \
+ custom_event_##call.flags = TRACE_EVENT_FL_TRACEPOINT; \
+ return 1; \
+ } \
+ return 0; \
+} \
+static struct trace_event_call __used \
+__section("_ftrace_events") *__custom_event_##call = &custom_event_##call
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 502c7be50b8d..c2f9cabf154d 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -24,42 +24,7 @@
#define TRACE_SYSTEM_VAR TRACE_SYSTEM
#endif
-#define __app__(x, y) str__##x##y
-#define __app(x, y) __app__(x, y)
-
-#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
-
-#define TRACE_MAKE_SYSTEM_STR() \
- static const char TRACE_SYSTEM_STRING[] = \
- __stringify(TRACE_SYSTEM)
-
-TRACE_MAKE_SYSTEM_STR();
-
-#undef TRACE_DEFINE_ENUM
-#define TRACE_DEFINE_ENUM(a) \
- static struct trace_eval_map __used __initdata \
- __##TRACE_SYSTEM##_##a = \
- { \
- .system = TRACE_SYSTEM_STRING, \
- .eval_string = #a, \
- .eval_value = a \
- }; \
- static struct trace_eval_map __used \
- __attribute__((section("_ftrace_eval_map"))) \
- *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
-
-#undef TRACE_DEFINE_SIZEOF
-#define TRACE_DEFINE_SIZEOF(a) \
- static struct trace_eval_map __used __initdata \
- __##TRACE_SYSTEM##_##a = \
- { \
- .system = TRACE_SYSTEM_STRING, \
- .eval_string = "sizeof(" #a ")", \
- .eval_value = sizeof(a) \
- }; \
- static struct trace_eval_map __used \
- __attribute__((section("_ftrace_eval_map"))) \
- *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
+#include "stages/init.h"
/*
* DECLARE_EVENT_CLASS can be used to add a generic function
@@ -80,40 +45,14 @@ TRACE_MAKE_SYSTEM_STR();
PARAMS(print)); \
DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
-
-#undef __field
-#define __field(type, item) type item;
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type) type item;
-
-#undef __field_struct
-#define __field_struct(type, item) type item;
-
-#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type) type item;
-
-#undef __array
-#define __array(type, item, len) type item[len];
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) u32 __data_loc_##item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
-
-#undef TP_STRUCT__entry
-#define TP_STRUCT__entry(args...) args
+#include "stages/stage1_struct_define.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
struct trace_event_raw_##name { \
struct trace_entry ent; \
tstruct \
- char __data[0]; \
+ char __data[]; \
}; \
\
static struct trace_event_class event_class_##name;
@@ -170,35 +109,7 @@ TRACE_MAKE_SYSTEM_STR();
* The size of an array is also encoded, in the higher 16 bits of <item>.
*/
-#undef TRACE_DEFINE_ENUM
-#define TRACE_DEFINE_ENUM(a)
-
-#undef TRACE_DEFINE_SIZEOF
-#define TRACE_DEFINE_SIZEOF(a)
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) u32 item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+#include "stages/stage2_data_offsets.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -210,8 +121,7 @@ TRACE_MAKE_SYSTEM_STR();
#define DEFINE_EVENT(template, name, proto, args)
#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
#undef TRACE_EVENT_FLAGS
#define TRACE_EVENT_FLAGS(event, flag)
@@ -232,9 +142,11 @@ TRACE_MAKE_SYSTEM_STR();
* {
* struct trace_seq *s = &iter->seq;
* struct trace_event_raw_<call> *field; <-- defined in stage 1
- * struct trace_entry *entry;
* struct trace_seq *p = &iter->tmp_seq;
- * int ret;
+ *
+ * -------(for event)-------
+ *
+ * struct trace_entry *entry;
*
* entry = iter->ent;
*
@@ -246,107 +158,30 @@ TRACE_MAKE_SYSTEM_STR();
* field = (typeof(field))entry;
*
* trace_seq_init(p);
- * ret = trace_seq_printf(s, "%s: ", <call>);
- * if (ret)
- * ret = trace_seq_printf(s, <TP_printk> "\n");
- * if (!ret)
- * return TRACE_TYPE_PARTIAL_LINE;
+ * return trace_output_call(iter, <call>, <TP_printk> "\n");
*
- * return TRACE_TYPE_HANDLED;
- * }
+ * ------(or, for event class)------
+ *
+ * int ret;
+ *
+ * field = (typeof(field))iter->ent;
+ *
+ * ret = trace_raw_output_prep(iter, trace_event);
+ * if (ret != TRACE_TYPE_HANDLED)
+ * return ret;
+ *
+ * trace_event_printf(iter, <TP_printk> "\n");
+ *
+ * return trace_handle_return(s);
+ * -------
+ * }
*
* This is the method used to print the raw event to the trace
* output format. Note, this is not needed if the data is read
* in binary.
*/
-#undef __entry
-#define __entry field
-
-#undef TP_printk
-#define TP_printk(fmt, args...) fmt "\n", args
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) \
- ({ \
- void *__bitmask = __get_dynamic_array(field); \
- unsigned int __bitmask_size; \
- __bitmask_size = __get_dynamic_array_len(field); \
- trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
- })
-
-#undef __print_flags
-#define __print_flags(flag, delim, flag_array...) \
- ({ \
- static const struct trace_print_flags __flags[] = \
- { flag_array, { -1, NULL }}; \
- trace_print_flags_seq(p, delim, flag, __flags); \
- })
-
-#undef __print_symbolic
-#define __print_symbolic(value, symbol_array...) \
- ({ \
- static const struct trace_print_flags symbols[] = \
- { symbol_array, { -1, NULL }}; \
- trace_print_symbols_seq(p, value, symbols); \
- })
-
-#undef __print_flags_u64
-#undef __print_symbolic_u64
-#if BITS_PER_LONG == 32
-#define __print_flags_u64(flag, delim, flag_array...) \
- ({ \
- static const struct trace_print_flags_u64 __flags[] = \
- { flag_array, { -1, NULL } }; \
- trace_print_flags_seq_u64(p, delim, flag, __flags); \
- })
-
-#define __print_symbolic_u64(value, symbol_array...) \
- ({ \
- static const struct trace_print_flags_u64 symbols[] = \
- { symbol_array, { -1, NULL } }; \
- trace_print_symbols_seq_u64(p, value, symbols); \
- })
-#else
-#define __print_flags_u64(flag, delim, flag_array...) \
- __print_flags(flag, delim, flag_array)
-
-#define __print_symbolic_u64(value, symbol_array...) \
- __print_symbolic(value, symbol_array)
-#endif
-
-#undef __print_hex
-#define __print_hex(buf, buf_len) \
- trace_print_hex_seq(p, buf, buf_len, false)
-
-#undef __print_hex_str
-#define __print_hex_str(buf, buf_len) \
- trace_print_hex_seq(p, buf, buf_len, true)
-
-#undef __print_array
-#define __print_array(array, count, el_size) \
- ({ \
- BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
- el_size != 4 && el_size != 8); \
- trace_print_array_seq(p, array, count, el_size); \
- })
-
-#undef __print_hex_dump
-#define __print_hex_dump(prefix_str, prefix_type, \
- rowsize, groupsize, buf, len, ascii) \
- trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
- rowsize, groupsize, buf, len, ascii)
+#include "stages/stage3_trace_output.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -365,7 +200,7 @@ trace_raw_output_##call(struct trace_iterator *iter, int flags, \
if (ret != TRACE_TYPE_HANDLED) \
return ret; \
\
- trace_seq_printf(s, print); \
+ trace_event_printf(iter, print); \
\
return trace_handle_return(s); \
} \
@@ -401,41 +236,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#undef __field_ext
-#define __field_ext(_type, _item, _filter_type) { \
- .type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
- .is_signed = is_signed_type(_type), .filter_type = _filter_type },
-
-#undef __field_struct_ext
-#define __field_struct_ext(_type, _item, _filter_type) { \
- .type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
- 0, .filter_type = _filter_type },
-
-#undef __field
-#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
-
-#undef __field_struct
-#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
-
-#undef __array
-#define __array(_type, _item, _len) { \
- .type = #_type"["__stringify(_len)"]", .name = #_item, \
- .size = sizeof(_type[_len]), .align = __alignof__(_type), \
- .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
-
-#undef __dynamic_array
-#define __dynamic_array(_type, _item, _len) { \
- .type = "__data_loc " #_type "[]", .name = #_item, \
- .size = 4, .align = 4, \
- .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+#include "stages/stage4_event_fields.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
@@ -443,71 +244,12 @@ static struct trace_event_fields trace_event_fields_##call[] = { \
tstruct \
{} };
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args)
-
#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-/*
- * remember the offset of each array from the beginning of the event.
- */
-
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __item_length = (len) * sizeof(type); \
- __data_offsets->item = __data_size + \
- offsetof(typeof(*entry), __data); \
- __data_offsets->item |= __item_length << 16; \
- __data_size += __item_length;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, \
- strlen((src) ? (const char *)(src) : "(null)") + 1)
-
-/*
- * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
- * num_possible_cpus().
- */
-#define __bitmask_size_in_bytes_raw(nr_bits) \
- (((nr_bits) + 7) / 8)
-
-#define __bitmask_size_in_longs(nr_bits) \
- ((__bitmask_size_in_bytes_raw(nr_bits) + \
- ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
-
-/*
- * __bitmask_size_in_bytes is the number of bytes needed to hold
- * num_possible_cpus() padded out to the nearest long. This is what
- * is saved in the buffer, just to be consistent.
- */
-#define __bitmask_size_in_bytes(nr_bits) \
- (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
- __bitmask_size_in_longs(nr_bits))
+#include "stages/stage5_get_offsets.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -523,13 +265,6 @@ static inline notrace int trace_event_get_offsets_##call( \
return __data_size; \
}
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args)
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
@@ -619,7 +354,7 @@ static inline notrace int trace_event_get_offsets_##call( \
* // its only safe to use pointers when doing linker tricks to
* // create an array.
* static struct trace_event_call __used
- * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
+ * __section("_ftrace_events") *__event_<call> = &event_<call>;
*
*/
@@ -637,47 +372,7 @@ static inline notrace int trace_event_get_offsets_##call( \
#define _TRACE_PERF_INIT(call)
#endif /* CONFIG_PERF_EVENTS */
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __entry->__data_loc_##item = __data_offsets.item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __assign_str
-#define __assign_str(dst, src) \
- strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __assign_bitmask
-#define __assign_bitmask(dst, src, nr_bits) \
- memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
-
-#undef TP_fast_assign
-#define TP_fast_assign(args...) args
-
-#undef __perf_count
-#define __perf_count(c) (c)
-
-#undef __perf_task
-#define __perf_task(t) (t)
+#include "stages/stage6_event_callback.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -721,27 +416,9 @@ static inline void ftrace_test_probe_##call(void) \
check_trace_callback_type_##call(trace_event_raw_event_##template); \
}
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
-
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#undef __entry
-#define __entry REC
-
-#undef __print_flags
-#undef __print_symbolic
-#undef __print_hex
-#undef __print_hex_str
-#undef __get_dynamic_array
-#undef __get_dynamic_array_len
-#undef __get_str
-#undef __get_bitmask
-#undef __print_array
-#undef __print_hex_dump
-
-#undef TP_printk
-#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+#include "stages/stage7_class_define.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -770,7 +447,7 @@ static struct trace_event_call __used event_##call = { \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+__section("_ftrace_events") *__event_##call = &event_##call
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
@@ -787,6 +464,6 @@ static struct trace_event_call __used event_##call = { \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+__section("_ftrace_events") *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 9dc0bf0c5a6e..1ecdb911add8 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -116,13 +116,13 @@
#define F_GETSIG 11 /* for sockets. */
#endif
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
#ifndef F_GETLK64
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif
-#endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
#ifndef F_SETOWN_EX
#define F_SETOWN_EX 15
@@ -181,6 +181,10 @@ struct f_owner_ex {
blocking */
#define LOCK_UN 8 /* remove lock */
+/*
+ * LOCK_MAND support has been removed from the kernel. We leave the symbols
+ * here to not break legacy builds, but these should not be used in new code.
+ */
#define LOCK_MAND 32 /* This is a mandatory flock ... */
#define LOCK_READ 64 /* which allows concurrent read operations */
#define LOCK_WRITE 128 /* which allows concurrent write operations */
@@ -189,24 +193,19 @@ struct f_owner_ex {
#define F_LINUX_SPECIFIC_BASE 1024
#ifndef HAVE_ARCH_STRUCT_FLOCK
-#ifndef __ARCH_FLOCK_PAD
-#define __ARCH_FLOCK_PAD
-#endif
-
struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;
- __ARCH_FLOCK_PAD
-};
+#ifdef __ARCH_FLOCK_EXTRA_SYSID
+ __ARCH_FLOCK_EXTRA_SYSID
#endif
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK64
-#ifndef __ARCH_FLOCK64_PAD
-#define __ARCH_FLOCK64_PAD
+#ifdef __ARCH_FLOCK_PAD
+ __ARCH_FLOCK_PAD
#endif
+};
struct flock64 {
short l_type;
@@ -214,8 +213,10 @@ struct flock64 {
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;
+#ifdef __ARCH_FLOCK64_PAD
__ARCH_FLOCK64_PAD
-};
#endif
+};
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h
index b0f8e87235bd..de687009bfe5 100644
--- a/include/uapi/asm-generic/hugetlb_encode.h
+++ b/include/uapi/asm-generic/hugetlb_encode.h
@@ -20,17 +20,18 @@
#define HUGETLB_FLAG_ENCODE_SHIFT 26
#define HUGETLB_FLAG_ENCODE_MASK 0x3f
-#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16KB (14U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_64KB (16U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512KB (19U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1MB (20U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2MB (21U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_8MB (23U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16MB (24U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_256MB (28U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1GB (30U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2GB (31U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16GB (34U << HUGETLB_FLAG_ENCODE_SHIFT)
#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index f94f65d429be..6ce1f1ceb432 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -72,6 +72,13 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
+#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
index 41b509f410bf..f9c520ce4bf4 100644
--- a/include/uapi/asm-generic/poll.h
+++ b/include/uapi/asm-generic/poll.h
@@ -29,7 +29,7 @@
#define POLLRDHUP 0x2000
#endif
-#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
+#define POLLFREE (__force __poll_t)0x4000
#define POLL_BUSY_LOOP (__force __poll_t)0x8000
diff --git a/include/uapi/asm-generic/sembuf.h b/include/uapi/asm-generic/sembuf.h
index 0e709bd3d730..f54e48fc91ae 100644
--- a/include/uapi/asm-generic/sembuf.h
+++ b/include/uapi/asm-generic/sembuf.h
@@ -6,9 +6,9 @@
#include <asm/ipcbuf.h>
/*
- * The semid64_ds structure for x86 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
+ * The semid64_ds structure for most architectures (though it came from x86_32
+ * originally). Note extra padding because this structure is passed back and
+ * forth between kernel and user space.
*
* semid64_ds was originally meant to be architecture specific, but
* everyone just ended up making identical copies without specific
diff --git a/include/uapi/asm-generic/shmbuf.h b/include/uapi/asm-generic/shmbuf.h
index 2bab955e0fed..2979b6dd2c56 100644
--- a/include/uapi/asm-generic/shmbuf.h
+++ b/include/uapi/asm-generic/shmbuf.h
@@ -3,6 +3,8 @@
#define __ASM_GENERIC_SHMBUF_H
#include <asm/bitsperlong.h>
+#include <asm/ipcbuf.h>
+#include <asm/posix_types.h>
/*
* The shmid64_ds structure for x86 architecture.
@@ -24,7 +26,7 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_size_t shm_segsz; /* size of segment (bytes) */
#if __BITS_PER_LONG == 64
long shm_atime; /* last attach time */
long shm_dtime; /* last detach time */
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index cb3d6c267181..ffbe4cec9f32 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -29,6 +29,11 @@ typedef union sigval {
#define __ARCH_SI_ATTRIBUTES
#endif
+/*
+ * Be careful when extending this union. On 32bit siginfo_t is 32bit
+ * aligned. Which means that a 64bit field or any other field that
+ * would increase the alignment of siginfo_t will break the ABI.
+ */
union __sifields {
/* kill() */
struct {
@@ -63,9 +68,6 @@ union __sifields {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void __user *_addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
#ifdef __ia64__
int _imm; /* immediate value for "break" */
unsigned int _flags; /* see ia64 si_flags */
@@ -75,6 +77,8 @@ union __sifields {
#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
sizeof(short) : __alignof__(void *))
union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
/*
* used when si_code=BUS_MCEERR_AR or
* used when si_code=BUS_MCEERR_AO
@@ -91,6 +95,12 @@ union __sifields {
char _dummy_pkey[__ADDR_BND_PKEY_PAD];
__u32 _pkey;
} _addr_pkey;
+ /* used when si_code=TRAP_PERF */
+ struct {
+ unsigned long _data;
+ __u32 _type;
+ __u32 _flags;
+ } _perf;
};
} _sigfault;
@@ -148,13 +158,14 @@ typedef struct siginfo {
#define si_int _sifields._rt._sigval.sival_int
#define si_ptr _sifields._rt._sigval.sival_ptr
#define si_addr _sifields._sigfault._addr
-#ifdef __ARCH_SI_TRAPNO
#define si_trapno _sifields._sigfault._trapno
-#endif
#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_lower _sifields._sigfault._addr_bnd._lower
#define si_upper _sifields._sigfault._addr_bnd._upper
#define si_pkey _sifields._sigfault._addr_pkey._pkey
+#define si_perf_data _sifields._sigfault._perf._data
+#define si_perf_type _sifields._sigfault._perf._type
+#define si_perf_flags _sifields._sigfault._perf._flags
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
#define si_call_addr _sifields._sigsys._call_addr
@@ -229,7 +240,9 @@ typedef struct siginfo {
#define SEGV_ACCADI 5 /* ADI not enabled for mapped object */
#define SEGV_ADIDERR 6 /* Disrupting MCD error */
#define SEGV_ADIPERR 7 /* Precise MCD exception */
-#define NSIGSEGV 7
+#define SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
+#define SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
+#define NSIGSEGV 9
/*
* SIGBUS si_codes
@@ -251,7 +264,8 @@ typedef struct siginfo {
#define TRAP_BRANCH 3 /* process taken branch trap */
#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */
#define TRAP_UNK 5 /* undiagnosed trap */
-#define NSIGTRAP 5
+#define TRAP_PERF 6 /* perf event with sigtrap=1 */
+#define NSIGTRAP 6
/*
* There is an additional set of SIGTRAP si_codes used by ptrace
@@ -259,6 +273,11 @@ typedef struct siginfo {
*/
/*
+ * Flags for si_perf_flags if SIGTRAP si_code is TRAP_PERF.
+ */
+#define TRAP_PERF_FLAG_ASYNC (1u << 0)
+
+/*
* SIGCHLD si_codes
*/
#define CLD_EXITED 1 /* child has exited */
@@ -284,7 +303,8 @@ typedef struct siginfo {
* SIGSYS si_codes
*/
#define SYS_SECCOMP 1 /* seccomp triggered */
-#define NSIGSYS 1
+#define SYS_USER_DISPATCH 2 /* syscall user dispatch triggered */
+#define NSIGSYS 2
/*
* SIGEMT si_codes
diff --git a/include/uapi/asm-generic/signal-defs.h b/include/uapi/asm-generic/signal-defs.h
index e9304c95ceea..7572f2f46ee8 100644
--- a/include/uapi/asm-generic/signal-defs.h
+++ b/include/uapi/asm-generic/signal-defs.h
@@ -4,6 +4,70 @@
#include <linux/compiler.h>
+/*
+ * SA_FLAGS values:
+ *
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_SIGINFO delivers the signal with SIGINFO structs.
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_UNSUPPORTED is a flag bit that will never be supported. Kernels from
+ * before the introduction of SA_UNSUPPORTED did not clear unknown bits from
+ * sa_flags when read using the oldact argument to sigaction and rt_sigaction,
+ * so this bit allows flag bit support to be detected from userspace while
+ * allowing an old kernel to be distinguished from a kernel that supports every
+ * flag bit.
+ * SA_EXPOSE_TAGBITS exposes an architecture-defined set of tag bits in
+ * siginfo.si_addr.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#ifndef SA_NOCLDSTOP
+#define SA_NOCLDSTOP 0x00000001
+#endif
+#ifndef SA_NOCLDWAIT
+#define SA_NOCLDWAIT 0x00000002
+#endif
+#ifndef SA_SIGINFO
+#define SA_SIGINFO 0x00000004
+#endif
+/* 0x00000008 used on alpha, mips, parisc */
+/* 0x00000010 used on alpha, parisc */
+/* 0x00000020 used on alpha, parisc, sparc */
+/* 0x00000040 used on alpha, parisc */
+/* 0x00000080 used on parisc */
+/* 0x00000100 used on sparc */
+/* 0x00000200 used on sparc */
+#define SA_UNSUPPORTED 0x00000400
+#define SA_EXPOSE_TAGBITS 0x00000800
+/* 0x00010000 used on mips */
+/* 0x00800000 used for internal SA_IMMUTABLE */
+/* 0x01000000 used on x86 */
+/* 0x02000000 used on x86 */
+/*
+ * New architectures should not define the obsolete
+ * SA_RESTORER 0x04000000
+ */
+#ifndef SA_ONSTACK
+#define SA_ONSTACK 0x08000000
+#endif
+#ifndef SA_RESTART
+#define SA_RESTART 0x10000000
+#endif
+#ifndef SA_NODEFER
+#define SA_NODEFER 0x40000000
+#endif
+#ifndef SA_RESETHAND
+#define SA_RESETHAND 0x80000000
+#endif
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
#ifndef SIG_BLOCK
#define SIG_BLOCK 0 /* for blocking signals */
#endif
diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h
index 5c716a952cbe..0eb69dc8e572 100644
--- a/include/uapi/asm-generic/signal.h
+++ b/include/uapi/asm-generic/signal.h
@@ -52,35 +52,6 @@
#define SIGRTMAX _NSIG
#endif
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-/*
- * New architectures should not define the obsolete
- * SA_RESTORER 0x04000000
- */
-
#if !defined MINSIGSTKSZ || !defined SIGSTKSZ
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
@@ -114,7 +85,7 @@ struct sigaction {
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
- size_t ss_size;
+ __kernel_size_t ss_size;
} stack_t;
#endif /* __ASSEMBLY__ */
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 77f7c1638eb1..638230899e98 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -119,6 +119,19 @@
#define SO_DETACH_REUSEPORT_BPF 68
+#define SO_PREFER_BUSY_POLL 69
+#define SO_BUSY_POLL_BUDGET 70
+
+#define SO_NETNS_COOKIE 71
+
+#define SO_BUF_LOCK 72
+
+#define SO_RESERVE_MEM 73
+
+#define SO_TXREHASH 74
+
+#define SO_RCVMARK 75
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/asm-generic/termbits-common.h b/include/uapi/asm-generic/termbits-common.h
new file mode 100644
index 000000000000..4a6a79f28b21
--- /dev/null
+++ b/include/uapi/asm-generic/termbits-common.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_GENERIC_TERMBITS_COMMON_H
+#define __ASM_GENERIC_TERMBITS_COMMON_H
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+
+/* c_iflag bits */
+#define IGNBRK 0x001 /* Ignore break condition */
+#define BRKINT 0x002 /* Signal interrupt on break */
+#define IGNPAR 0x004 /* Ignore characters with parity errors */
+#define PARMRK 0x008 /* Mark parity and framing errors */
+#define INPCK 0x010 /* Enable input parity check */
+#define ISTRIP 0x020 /* Strip 8th bit off characters */
+#define INLCR 0x040 /* Map NL to CR on input */
+#define IGNCR 0x080 /* Ignore CR */
+#define ICRNL 0x100 /* Map CR to NL on input */
+#define IXANY 0x800 /* Any character will restart after stop */
+
+/* c_oflag bits */
+#define OPOST 0x01 /* Perform output processing */
+#define OCRNL 0x08
+#define ONOCR 0x10
+#define ONLRET 0x20
+#define OFILL 0x40
+#define OFDEL 0x80
+
+/* c_cflag bit meaning */
+/* Common CBAUD rates */
+#define B0 0x00000000 /* hang up */
+#define B50 0x00000001
+#define B75 0x00000002
+#define B110 0x00000003
+#define B134 0x00000004
+#define B150 0x00000005
+#define B200 0x00000006
+#define B300 0x00000007
+#define B600 0x00000008
+#define B1200 0x00000009
+#define B1800 0x0000000a
+#define B2400 0x0000000b
+#define B4800 0x0000000c
+#define B9600 0x0000000d
+#define B19200 0x0000000e
+#define B38400 0x0000000f
+#define EXTA B19200
+#define EXTB B38400
+
+#define ADDRB 0x20000000 /* address bit */
+#define CMSPAR 0x40000000 /* mark or space (stick) parity */
+#define CRTSCTS 0x80000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* tcflow() ACTION argument and TCXONC use these */
+#define TCOOFF 0 /* Suspend output */
+#define TCOON 1 /* Restart suspended output */
+#define TCIOFF 2 /* Send a STOP character */
+#define TCION 3 /* Send a START character */
+
+/* tcflush() QUEUE_SELECTOR argument and TCFLSH use these */
+#define TCIFLUSH 0 /* Discard data received but not yet read */
+#define TCOFLUSH 1 /* Discard data written but not yet sent */
+#define TCIOFLUSH 2 /* Discard all pending data */
+
+#endif /* __ASM_GENERIC_TERMBITS_COMMON_H */
diff --git a/include/uapi/asm-generic/termbits.h b/include/uapi/asm-generic/termbits.h
index 2fbaf9ae89dd..890ef29053e2 100644
--- a/include/uapi/asm-generic/termbits.h
+++ b/include/uapi/asm-generic/termbits.h
@@ -2,10 +2,8 @@
#ifndef __ASM_GENERIC_TERMBITS_H
#define __ASM_GENERIC_TERMBITS_H
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
#define NCCS 19
@@ -41,156 +39,107 @@ struct ktermios {
};
/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
+#define IUCLC 0x0200
+#define IXON 0x0400
+#define IXOFF 0x1000
+#define IMAXBEL 0x2000
+#define IUTF8 0x4000
/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
+#define OLCUC 0x00002
+#define ONLCR 0x00004
+#define NLDLY 0x00100
+#define NL0 0x00000
+#define NL1 0x00100
+#define CRDLY 0x00600
+#define CR0 0x00000
+#define CR1 0x00200
+#define CR2 0x00400
+#define CR3 0x00600
+#define TABDLY 0x01800
+#define TAB0 0x00000
+#define TAB1 0x00800
+#define TAB2 0x01000
+#define TAB3 0x01800
+#define XTABS 0x01800
+#define BSDLY 0x02000
+#define BS0 0x00000
+#define BS1 0x02000
+#define VTDLY 0x04000
+#define VT0 0x00000
+#define VT1 0x04000
+#define FFDLY 0x08000
+#define FF0 0x00000
+#define FF1 0x08000
/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+#define CBAUD 0x0000100f
+#define CSIZE 0x00000030
+#define CS5 0x00000000
+#define CS6 0x00000010
+#define CS7 0x00000020
+#define CS8 0x00000030
+#define CSTOPB 0x00000040
+#define CREAD 0x00000080
+#define PARENB 0x00000100
+#define PARODD 0x00000200
+#define HUPCL 0x00000400
+#define CLOCAL 0x00000800
+#define CBAUDEX 0x00001000
+#define BOTHER 0x00001000
+#define B57600 0x00001001
+#define B115200 0x00001002
+#define B230400 0x00001003
+#define B460800 0x00001004
+#define B500000 0x00001005
+#define B576000 0x00001006
+#define B921600 0x00001007
+#define B1000000 0x00001008
+#define B1152000 0x00001009
+#define B1500000 0x0000100a
+#define B2000000 0x0000100b
+#define B2500000 0x0000100c
+#define B3000000 0x0000100d
+#define B3500000 0x0000100e
+#define B4000000 0x0000100f
+#define CIBAUD 0x100f0000 /* input baud rate */
/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC 0200000
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
+#define ISIG 0x00001
+#define ICANON 0x00002
+#define XCASE 0x00004
+#define ECHO 0x00008
+#define ECHOE 0x00010
+#define ECHOK 0x00020
+#define ECHONL 0x00040
+#define NOFLSH 0x00080
+#define TOSTOP 0x00100
+#define ECHOCTL 0x00200
+#define ECHOPRT 0x00400
+#define ECHOKE 0x00800
+#define FLUSHO 0x01000
+#define PENDIN 0x04000
+#define IEXTEN 0x08000
+#define EXTPROC 0x10000
/* tcsetattr uses these */
#define TCSANOW 0
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 3a3201e4618e..45fa180cc56a 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -140,7 +140,7 @@ __SYSCALL(__NR_renameat, sys_renameat)
#define __NR_umount2 39
__SYSCALL(__NR_umount2, sys_umount)
#define __NR_mount 40
-__SC_COMP(__NR_mount, sys_mount, compat_sys_mount)
+__SYSCALL(__NR_mount, sys_mount)
#define __NR_pivot_root 41
__SYSCALL(__NR_pivot_root, sys_pivot_root)
@@ -207,9 +207,9 @@ __SYSCALL(__NR_read, sys_read)
#define __NR_write 64
__SYSCALL(__NR_write, sys_write)
#define __NR_readv 65
-__SC_COMP(__NR_readv, sys_readv, compat_sys_readv)
+__SC_COMP(__NR_readv, sys_readv, sys_readv)
#define __NR_writev 66
-__SC_COMP(__NR_writev, sys_writev, compat_sys_writev)
+__SC_COMP(__NR_writev, sys_writev, sys_writev)
#define __NR_pread64 67
__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64)
#define __NR_pwrite64 68
@@ -237,7 +237,7 @@ __SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
/* fs/splice.c */
#define __NR_vmsplice 75
-__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice)
+__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_splice 76
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 77
@@ -383,7 +383,7 @@ __SYSCALL(__NR_syslog, sys_syslog)
/* kernel/ptrace.c */
#define __NR_ptrace 117
-__SYSCALL(__NR_ptrace, sys_ptrace)
+__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
/* kernel/sched/core.c */
#define __NR_sched_setparam 118
@@ -517,7 +517,7 @@ __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
#endif
-/* kernel/timer.c */
+/* kernel/sys.c */
#define __NR_getpid 172
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_getppid 173
@@ -606,9 +606,9 @@ __SYSCALL(__NR_sendto, sys_sendto)
#define __NR_recvfrom 207
__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
#define __NR_setsockopt 208
-__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
+__SC_COMP(__NR_setsockopt, sys_setsockopt, sys_setsockopt)
#define __NR_getsockopt 209
-__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
+__SC_COMP(__NR_getsockopt, sys_getsockopt, sys_getsockopt)
#define __NR_shutdown 210
__SYSCALL(__NR_shutdown, sys_shutdown)
#define __NR_sendmsg 211
@@ -673,15 +673,15 @@ __SYSCALL(__NR_madvise, sys_madvise)
#define __NR_remap_file_pages 234
__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
#define __NR_mbind 235
-__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
+__SYSCALL(__NR_mbind, sys_mbind)
#define __NR_get_mempolicy 236
-__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
+__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
#define __NR_set_mempolicy 237
-__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
+__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
#define __NR_migrate_pages 238
-__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
+__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
#define __NR_move_pages 239
-__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
+__SYSCALL(__NR_move_pages, sys_move_pages)
#endif
#define __NR_rt_tgsigqueueinfo 240
@@ -727,11 +727,9 @@ __SYSCALL(__NR_setns, sys_setns)
#define __NR_sendmmsg 269
__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
#define __NR_process_vm_readv 270
-__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
- compat_sys_process_vm_readv)
+__SYSCALL(__NR_process_vm_readv, sys_process_vm_readv)
#define __NR_process_vm_writev 271
-__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
- compat_sys_process_vm_writev)
+__SYSCALL(__NR_process_vm_writev, sys_process_vm_writev)
#define __NR_kcmp 272
__SYSCALL(__NR_kcmp, sys_kcmp)
#define __NR_finit_module 273
@@ -781,7 +779,7 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
-#if __BITS_PER_LONG == 32
+#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
#define __NR_clock_settime64 404
@@ -850,14 +848,46 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open)
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+#define __NR_close_range 436
+__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
#define __NR_pidfd_getfd 438
__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
+#define __NR_faccessat2 439
+__SYSCALL(__NR_faccessat2, sys_faccessat2)
+#define __NR_process_madvise 440
+__SYSCALL(__NR_process_madvise, sys_process_madvise)
+#define __NR_epoll_pwait2 441
+__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
+#define __NR_mount_setattr 442
+__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
+#define __NR_quotactl_fd 443
+__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
+
+#define __NR_landlock_create_ruleset 444
+__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
+#define __NR_landlock_add_rule 445
+__SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
+#define __NR_landlock_restrict_self 446
+__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
+
+#ifdef __ARCH_WANT_MEMFD_SECRET
+#define __NR_memfd_secret 447
+__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
+#endif
+#define __NR_process_mrelease 448
+__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
+
+#define __NR_futex_waitv 449
+__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
+
+#define __NR_set_mempolicy_home_node 450
+__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
#undef __NR_syscalls
-#define __NR_syscalls 439
+#define __NR_syscalls 451
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index ac3879829bb5..0d93ec132ebb 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -80,7 +80,7 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
* GPU's virtual address space via gart. Gart memory linearizes non-contiguous
- * pages of system memory, allows GPU access system memory in a linezrized
+ * pages of system memory, allows GPU access system memory in a linearized
* fashion.
*
* %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
@@ -116,8 +116,6 @@ extern "C" {
#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
/* Flag that the memory should be in VRAM and cleared */
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
-/* Flag that create shadow bo(GTT) while allocating vram bo */
-#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
/* Flag that allocating the BO should use linear VRAM */
#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
/* Flag that BO is always valid in this VM */
@@ -125,13 +123,27 @@ extern "C" {
/* Flag that BO sharing will be explicitly synchronized */
#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
/* Flag that indicates allocating MQD gart on GFX9, where the mtype
- * for the second page onward should be set to NC.
+ * for the second page onward should be set to NC. It should never
+ * be used by user space applications.
*/
-#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
+#define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8)
/* Flag that BO may contain sensitive data that must be wiped before
* releasing the memory
*/
#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
+/* Flag that BO will be encrypted and that the TMZ bit should be
+ * set in the PTEs when mapping this buffer via GPUVM or
+ * accessing it with various hw blocks
+ */
+#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
+/* Flag that BO will be used only in preemptible context, which does
+ * not require GTT memory accounting
+ */
+#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
+/* Flag that BO can be discarded under memory pressure without keeping the
+ * content.
+ */
+#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -198,6 +210,8 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_OP_FREE_CTX 2
#define AMDGPU_CTX_OP_QUERY_STATE 3
#define AMDGPU_CTX_OP_QUERY_STATE2 4
+#define AMDGPU_CTX_OP_GET_STABLE_PSTATE 5
+#define AMDGPU_CTX_OP_SET_STABLE_PSTATE 6
/* GPU reset status */
#define AMDGPU_CTX_NO_RESET 0
@@ -230,10 +244,18 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_PRIORITY_HIGH 512
#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
+/* select a stable profiling pstate for perfmon tools */
+#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK 0xf
+#define AMDGPU_CTX_STABLE_PSTATE_NONE 0
+#define AMDGPU_CTX_STABLE_PSTATE_STANDARD 1
+#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK 2
+#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK 3
+#define AMDGPU_CTX_STABLE_PSTATE_PEAK 4
+
struct drm_amdgpu_ctx_in {
/** AMDGPU_CTX_OP_* */
__u32 op;
- /** For future use, no flags defined so far */
+ /** Flags */
__u32 flags;
__u32 ctx_id;
/** AMDGPU_CTX_PRIORITY_* */
@@ -254,6 +276,11 @@ union drm_amdgpu_ctx_out {
/** Reset status since the last call of the ioctl. */
__u32 reset_status;
} state;
+
+ struct {
+ __u32 flags;
+ __u32 _pad;
+ } pstate;
};
union drm_amdgpu_ctx {
@@ -345,6 +372,10 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
+#define AMDGPU_TILING_SCANOUT_SHIFT 63
+#define AMDGPU_TILING_SCANOUT_MASK 0x1
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
@@ -492,16 +523,18 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
-/* Use NC MTYPE instead of default MTYPE */
+/* Use Non Coherent MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_NC (1 << 5)
-/* Use WC MTYPE instead of default MTYPE */
+/* Use Write Combine MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_WC (2 << 5)
-/* Use CC MTYPE instead of default MTYPE */
+/* Use Cache Coherent MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_CC (3 << 5)
-/* Use UC MTYPE instead of default MTYPE */
+/* Use UnCached MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_UC (4 << 5)
-/* Use RW MTYPE instead of default MTYPE */
+/* Use Read Write MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_RW (5 << 5)
+/* don't allocate MALL */
+#define AMDGPU_VM_PAGE_NOALLOC (1 << 9)
struct drm_amdgpu_gem_va {
/** GEM object handle */
@@ -526,6 +559,10 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_VCE 4
#define AMDGPU_HW_IP_UVD_ENC 5
#define AMDGPU_HW_IP_VCN_DEC 6
+/*
+ * From VCN4, AMDGPU_HW_IP_VCN_ENC is re-used to support
+ * both encoding and decoding jobs.
+ */
#define AMDGPU_HW_IP_VCN_ENC 7
#define AMDGPU_HW_IP_VCN_JPEG 8
#define AMDGPU_HW_IP_NUM 9
@@ -554,7 +591,7 @@ struct drm_amdgpu_cs_in {
/** Handle of resource list associated with CS */
__u32 bo_list_handle;
__u32 num_chunks;
- __u32 _pad;
+ __u32 flags;
/** this points to __u64 * which point to cs chunks */
__u64 chunks;
};
@@ -588,6 +625,14 @@ union drm_amdgpu_cs {
*/
#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
+/* Flag the IB as secure (TMZ)
+ */
+#define AMDGPU_IB_FLAGS_SECURE (1 << 5)
+
+/* Tell KMD to flush and invalidate caches
+ */
+#define AMDGPU_IB_FLAG_EMIT_MEM_SYNC (1 << 6)
+
struct drm_amdgpu_cs_chunk_ib {
__u32 _pad;
/** AMDGPU_IB_FLAG_* */
@@ -649,12 +694,13 @@ struct drm_amdgpu_cs_chunk_data {
};
};
-/**
+/*
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
*/
#define AMDGPU_IDS_FLAGS_FUSION 0x1
#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
+#define AMDGPU_IDS_FLAGS_TMZ 0x4
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -705,6 +751,20 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_TA 0x13
/* Subquery id: Query DMCUB firmware version */
#define AMDGPU_INFO_FW_DMCUB 0x14
+ /* Subquery id: Query TOC firmware version */
+ #define AMDGPU_INFO_FW_TOC 0x15
+ /* Subquery id: Query CAP firmware version */
+ #define AMDGPU_INFO_FW_CAP 0x16
+ /* Subquery id: Query GFX RLCP firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLCP 0x17
+ /* Subquery id: Query GFX RLCV firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLCV 0x18
+ /* Subquery id: Query MES_KIQ firmware version */
+ #define AMDGPU_INFO_FW_MES_KIQ 0x19
+ /* Subquery id: Query MES firmware version */
+ #define AMDGPU_INFO_FW_MES 0x1a
+ /* Subquery id: Query IMU firmware version */
+ #define AMDGPU_INFO_FW_IMU 0x1b
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
@@ -734,6 +794,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VBIOS_SIZE 0x1
/* Subquery id: Query vbios image */
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
+ /* Subquery id: Query vbios info */
+ #define AMDGPU_INFO_VBIOS_INFO 0x3
/* Query UVD handles */
#define AMDGPU_INFO_NUM_HANDLES 0x1C
/* Query sensor related information */
@@ -761,7 +823,6 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
/* query ras mask of enabled features*/
#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
-
/* RAS MASK: UMC (VRAM) */
#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
/* RAS MASK: SDMA */
@@ -790,6 +851,12 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
/* RAS MASK: FUSE */
#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS 0x21
+ /* Subquery id: Decode */
+ #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
+ /* Subquery id: Encode */
+ #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -857,6 +924,10 @@ struct drm_amdgpu_info {
struct {
__u32 type;
} sensor_info;
+
+ struct {
+ __u32 type;
+ } video_cap;
};
};
@@ -917,6 +988,15 @@ struct drm_amdgpu_info_firmware {
__u32 feature;
};
+struct drm_amdgpu_info_vbios {
+ __u8 name[64];
+ __u8 vbios_pn[64];
+ __u32 version;
+ __u32 pad;
+ __u8 vbios_ver_str[32];
+ __u8 date[32];
+};
+
#define AMDGPU_VRAM_TYPE_UNKNOWN 0
#define AMDGPU_VRAM_TYPE_GDDR1 1
#define AMDGPU_VRAM_TYPE_DDR2 2
@@ -927,6 +1007,9 @@ struct drm_amdgpu_info_firmware {
#define AMDGPU_VRAM_TYPE_DDR3 7
#define AMDGPU_VRAM_TYPE_DDR4 8
#define AMDGPU_VRAM_TYPE_GDDR6 9
+#define AMDGPU_VRAM_TYPE_DDR5 10
+#define AMDGPU_VRAM_TYPE_LPDDR4 11
+#define AMDGPU_VRAM_TYPE_LPDDR5 12
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -1024,7 +1107,8 @@ struct drm_amdgpu_info_hw_ip {
__u32 ib_size_alignment;
/** Bitmask of available rings. Bit 0 means ring 0, etc. */
__u32 available_rings;
- __u32 _pad;
+ /** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
+ __u32 ip_discovery_version;
};
struct drm_amdgpu_info_num_handles {
@@ -1052,6 +1136,30 @@ struct drm_amdgpu_info_vce_clock_table {
__u32 pad;
};
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
+
+struct drm_amdgpu_info_video_codec_info {
+ __u32 valid;
+ __u32 max_width;
+ __u32 max_height;
+ __u32 max_pixels_per_frame;
+ __u32 max_level;
+ __u32 pad;
+};
+
+struct drm_amdgpu_info_video_caps {
+ struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
+};
+
/*
* Supported GPU families
*/
@@ -1064,6 +1172,12 @@ struct drm_amdgpu_info_vce_clock_table {
#define AMDGPU_FAMILY_AI 141 /* Vega10 */
#define AMDGPU_FAMILY_RV 142 /* Raven */
#define AMDGPU_FAMILY_NV 143 /* Navi10 */
+#define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
+#define AMDGPU_FAMILY_GC_11_0_0 145 /* GC 11.0.0 */
+#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
+#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
+#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
+#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 868bf7996c0f..642808520d92 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -1,11 +1,10 @@
-/**
- * \file drm.h
+/*
* Header for the Direct Rendering Manager
*
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
*
- * \par Acknowledgments:
- * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ * Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
*/
/*
@@ -85,7 +84,7 @@ typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
-/**
+/*
* Cliprect.
*
* \warning: If you change this structure, make sure you change
@@ -101,7 +100,7 @@ struct drm_clip_rect {
unsigned short y2;
};
-/**
+/*
* Drawable information.
*/
struct drm_drawable_info {
@@ -109,7 +108,7 @@ struct drm_drawable_info {
struct drm_clip_rect *rects;
};
-/**
+/*
* Texture region,
*/
struct drm_tex_region {
@@ -120,7 +119,7 @@ struct drm_tex_region {
unsigned int age;
};
-/**
+/*
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
@@ -132,7 +131,7 @@ struct drm_hw_lock {
char padding[60]; /**< Pad to cache line */
};
-/**
+/*
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
@@ -149,7 +148,7 @@ struct drm_version {
char __user *desc; /**< User-space buffer to hold desc */
};
-/**
+/*
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
@@ -168,7 +167,7 @@ struct drm_block {
int unused;
};
-/**
+/*
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
@@ -183,7 +182,7 @@ struct drm_control {
int irq;
};
-/**
+/*
* Type of memory to map.
*/
enum drm_map_type {
@@ -195,7 +194,7 @@ enum drm_map_type {
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
};
-/**
+/*
* Memory mapping flags.
*/
enum drm_map_flags {
@@ -214,7 +213,7 @@ struct drm_ctx_priv_map {
void *handle; /**< Handle of map */
};
-/**
+/*
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
@@ -231,7 +230,7 @@ struct drm_map {
/* Private data */
};
-/**
+/*
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
@@ -263,7 +262,7 @@ enum drm_stat_type {
/* Add to the *END* of the list */
};
-/**
+/*
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
@@ -274,7 +273,7 @@ struct drm_stats {
} data[15];
};
-/**
+/*
* Hardware locking flags.
*/
enum drm_lock_flags {
@@ -289,7 +288,7 @@ enum drm_lock_flags {
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
-/**
+/*
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
@@ -299,7 +298,7 @@ struct drm_lock {
enum drm_lock_flags flags;
};
-/**
+/*
* DMA flags
*
* \warning
@@ -328,7 +327,7 @@ enum drm_dma_flags {
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
-/**
+/*
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
@@ -351,7 +350,7 @@ struct drm_buf_desc {
*/
};
-/**
+/*
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
@@ -359,7 +358,7 @@ struct drm_buf_info {
struct drm_buf_desc __user *list;
};
-/**
+/*
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
@@ -367,7 +366,7 @@ struct drm_buf_free {
int __user *list;
};
-/**
+/*
* Buffer information
*
* \sa drm_buf_map.
@@ -379,7 +378,7 @@ struct drm_buf_pub {
void __user *address; /**< Address of buffer */
};
-/**
+/*
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
@@ -392,7 +391,7 @@ struct drm_buf_map {
struct drm_buf_pub __user *list; /**< Buffer information */
};
-/**
+/*
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
@@ -417,7 +416,7 @@ enum drm_ctx_flags {
_DRM_CONTEXT_2DONLY = 0x02
};
-/**
+/*
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
@@ -427,7 +426,7 @@ struct drm_ctx {
enum drm_ctx_flags flags;
};
-/**
+/*
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
@@ -435,14 +434,14 @@ struct drm_ctx_res {
struct drm_ctx __user *contexts;
};
-/**
+/*
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
-/**
+/*
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
@@ -456,14 +455,14 @@ struct drm_update_draw {
unsigned long long data;
};
-/**
+/*
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
-/**
+/*
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
@@ -505,7 +504,7 @@ struct drm_wait_vblank_reply {
long tval_usec;
};
-/**
+/*
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
@@ -518,7 +517,7 @@ union drm_wait_vblank {
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
-/**
+/*
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
@@ -528,7 +527,7 @@ struct drm_modeset_ctl {
__u32 cmd;
};
-/**
+/*
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
@@ -537,7 +536,7 @@ struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
-/**
+/*
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
@@ -549,7 +548,7 @@ struct drm_agp_buffer {
unsigned long physical; /**< Physical used by i810 */
};
-/**
+/*
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
@@ -559,7 +558,7 @@ struct drm_agp_binding {
unsigned long offset; /**< In bytes -- will round to page boundary */
};
-/**
+/*
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
@@ -580,7 +579,7 @@ struct drm_agp_info {
unsigned short id_device;
};
-/**
+/*
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
@@ -588,7 +587,7 @@ struct drm_scatter_gather {
unsigned long handle; /**< Used for mapping / unmapping */
};
-/**
+/*
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
@@ -598,14 +597,14 @@ struct drm_set_version {
int drm_dd_minor;
};
-/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
-/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+/* DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
@@ -614,7 +613,7 @@ struct drm_gem_flink {
__u32 name;
};
-/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+/* DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
@@ -626,33 +625,150 @@ struct drm_gem_open {
__u64 size;
};
+/**
+ * DRM_CAP_DUMB_BUFFER
+ *
+ * If set to 1, the driver supports creating dumb buffers via the
+ * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
+ */
#define DRM_CAP_DUMB_BUFFER 0x1
+/**
+ * DRM_CAP_VBLANK_HIGH_CRTC
+ *
+ * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
+ * in the high bits of &drm_wait_vblank_request.type.
+ *
+ * Starting kernel version 2.6.39, this capability is always set to 1.
+ */
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+/**
+ * DRM_CAP_DUMB_PREFERRED_DEPTH
+ *
+ * The preferred bit depth for dumb buffers.
+ *
+ * The bit depth is the number of bits used to indicate the color of a single
+ * pixel excluding any padding. This is different from the number of bits per
+ * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
+ * pixel.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+/**
+ * DRM_CAP_DUMB_PREFER_SHADOW
+ *
+ * If set to 1, the driver prefers userspace to render to a shadow buffer
+ * instead of directly rendering to a dumb buffer. For best speed, userspace
+ * should do streaming ordered memory copies into the dumb buffer and never
+ * read from it.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+/**
+ * DRM_CAP_PRIME
+ *
+ * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
+ * and &DRM_PRIME_CAP_EXPORT.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors. See
+ * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ */
#define DRM_CAP_PRIME 0x5
+/**
+ * DRM_PRIME_CAP_IMPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
+ * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ */
#define DRM_PRIME_CAP_IMPORT 0x1
+/**
+ * DRM_PRIME_CAP_EXPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
+ * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ */
#define DRM_PRIME_CAP_EXPORT 0x2
+/**
+ * DRM_CAP_TIMESTAMP_MONOTONIC
+ *
+ * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
+ * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
+ * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
+ * clocks.
+ *
+ * Starting from kernel version 2.6.39, the default value for this capability
+ * is 1. Starting kernel version 4.15, this capability is always set to 1.
+ */
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+/**
+ * DRM_CAP_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ */
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
-/*
- * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
- * combination for the hardware cursor. The intention is that a hardware
- * agnostic userspace can query a cursor plane size to use.
+/**
+ * DRM_CAP_CURSOR_WIDTH
+ *
+ * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
+ * width x height combination for the hardware cursor. The intention is that a
+ * hardware agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
+/**
+ * DRM_CAP_CURSOR_HEIGHT
+ *
+ * See &DRM_CAP_CURSOR_WIDTH.
+ */
#define DRM_CAP_CURSOR_HEIGHT 0x9
+/**
+ * DRM_CAP_ADDFB2_MODIFIERS
+ *
+ * If set to 1, the driver supports supplying modifiers in the
+ * &DRM_IOCTL_MODE_ADDFB2 ioctl.
+ */
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+/**
+ * DRM_CAP_PAGE_FLIP_TARGET
+ *
+ * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
+ * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
+ * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
+ * ioctl.
+ */
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
+/**
+ * DRM_CAP_CRTC_IN_VBLANK_EVENT
+ *
+ * If set to 1, the kernel supports reporting the CRTC ID in
+ * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
+ * &DRM_EVENT_FLIP_COMPLETE events.
+ *
+ * Starting kernel version 4.12, this capability is always set to 1.
+ */
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
+/**
+ * DRM_CAP_SYNCOBJ
+ *
+ * If set to 1, the driver supports sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
#define DRM_CAP_SYNCOBJ 0x13
+/**
+ * DRM_CAP_SYNCOBJ_TIMELINE
+ *
+ * If set to 1, the driver supports timeline operations on sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
-/** DRM_IOCTL_GET_CAP ioctl argument type */
+/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
@@ -661,9 +777,12 @@ struct drm_get_cap {
/**
* DRM_CLIENT_CAP_STEREO_3D
*
- * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * If set to 1, the DRM core will expose the stereo 3D capabilities of the
* monitor by advertising the supported 3D layouts in the flags of struct
- * drm_mode_modeinfo.
+ * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 3.13.
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
@@ -672,13 +791,25 @@ struct drm_get_cap {
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
+ *
+ * This capability has been introduced in kernel version 3.15. Starting from
+ * kernel version 3.17, this capability is always supported for all drivers.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
/**
* DRM_CLIENT_CAP_ATOMIC
*
- * If set to 1, the DRM core will expose atomic properties to userspace
+ * If set to 1, the DRM core will expose atomic properties to userspace. This
+ * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
+ * &DRM_CLIENT_CAP_ASPECT_RATIO.
+ *
+ * If the driver doesn't support atomic mode-setting, enabling this capability
+ * will fail with -EOPNOTSUPP.
+ *
+ * This capability has been introduced in kernel version 4.0. Starting from
+ * kernel version 4.2, this capability is always supported for atomic-capable
+ * drivers.
*/
#define DRM_CLIENT_CAP_ATOMIC 3
@@ -686,6 +817,10 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_ASPECT_RATIO
*
* If set to 1, the DRM core will provide aspect ratio information in modes.
+ * See ``DRM_MODE_FLAG_PIC_AR_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 4.18.
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
@@ -693,12 +828,15 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
*
* If set to 1, the DRM core will expose special connectors to be used for
- * writing back to memory the scene setup in the commit. Depends on client
- * also supporting DRM_CLIENT_CAP_ATOMIC
+ * writing back to memory the scene setup in the commit. The client must enable
+ * &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * This capability is always supported for atomic-capable drivers starting from
+ * kernel version 4.19.
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
-/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
__u64 value;
@@ -912,6 +1050,16 @@ extern "C" {
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+/**
+ * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
+ *
+ * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * Warning: removing a framebuffer currently in-use on an enabled plane will
+ * disable that plane. The CRTC the plane is linked to may also be disabled
+ * (depending on driver capabilities).
+ */
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
@@ -949,6 +1097,26 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
/**
+ * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
+ *
+ * This queries metadata about a framebuffer. User-space fills
+ * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
+ * struct as the output.
+ *
+ * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
+ * will be filled with GEM buffer handles. Planes are valid until one has a
+ * zero handle -- this can be used to compute the number of planes.
+ *
+ * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
+ * until one has a zero &drm_mode_fb_cmd2.pitches.
+ *
+ * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
+ * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
+ * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
+ */
+#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+
+/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0.
@@ -959,7 +1127,7 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/**
+/*
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 8bc0b31597d8..868d6909b718 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -58,6 +58,30 @@ extern "C" {
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
+ * Modifiers must uniquely encode buffer layout. In other words, a buffer must
+ * match only a single modifier. A modifier must not be a subset of layouts of
+ * another modifier. For instance, it's incorrect to encode pitch alignment in
+ * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
+ * aligned modifier. That said, modifiers can have implicit minimal
+ * requirements.
+ *
+ * For modifiers where the combination of fourcc code and modifier can alias,
+ * a canonical pair needs to be defined and used by all drivers. Preferred
+ * combinations are also encouraged where all combinations might lead to
+ * confusion and unnecessarily reduced interoperability. An example for the
+ * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
+ *
+ * There are two kinds of modifier users:
+ *
+ * - Kernel and user-space drivers: for drivers it's important that modifiers
+ * don't alias, otherwise two drivers might support the same format but use
+ * different aliases, preventing them from sharing buffers in an efficient
+ * format.
+ * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
+ * see modifiers as opaque tokens they can check for equality and intersect.
+ * These users musn't need to know to reason about the modifier value
+ * (i.e. they are not expected to extract information out of the modifier).
+ *
* Vendors should document their modifier usage in as much detail as
* possible, to ensure maximum compatibility across devices, drivers and
* applications.
@@ -75,12 +99,42 @@ extern "C" {
#define DRM_FORMAT_INVALID 0
/* color index */
+#define DRM_FORMAT_C1 fourcc_code('C', '1', ' ', ' ') /* [7:0] C0:C1:C2:C3:C4:C5:C6:C7 1:1:1:1:1:1:1:1 eight pixels/byte */
+#define DRM_FORMAT_C2 fourcc_code('C', '2', ' ', ' ') /* [7:0] C0:C1:C2:C3 2:2:2:2 four pixels/byte */
+#define DRM_FORMAT_C4 fourcc_code('C', '4', ' ', ' ') /* [7:0] C0:C1 4:4 two pixels/byte */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
-/* 8 bpp Red */
+/* 1 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D1 fourcc_code('D', '1', ' ', ' ') /* [7:0] D0:D1:D2:D3:D4:D5:D6:D7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D2 fourcc_code('D', '2', ' ', ' ') /* [7:0] D0:D1:D2:D3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D4 fourcc_code('D', '4', ' ', ' ') /* [7:0] D0:D1 4:4 two pixels/byte */
+
+/* 8 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D8 fourcc_code('D', '8', ' ', ' ') /* [7:0] D */
+
+/* 1 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R1 fourcc_code('R', '1', ' ', ' ') /* [7:0] R0:R1:R2:R3:R4:R5:R6:R7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R2 fourcc_code('R', '2', ' ', ' ') /* [7:0] R0:R1:R2:R3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R4 fourcc_code('R', '4', ' ', ' ') /* [7:0] R0:R1 4:4 two pixels/byte */
+
+/* 8 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
-/* 16 bpp Red */
+/* 10 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
+
+/* 12 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
+
+/* 16 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
/* 16 bpp RG */
@@ -144,6 +198,13 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 64 bpp RGB */
+#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
/*
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
@@ -155,6 +216,12 @@ extern "C" {
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+/*
+ * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
+ * of unused padding per component:
+ */
+#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
+
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
@@ -162,7 +229,9 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_AVUY8888 fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', 'U', 'Y') /* [31:0] X:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
@@ -236,6 +305,12 @@ extern "C" {
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [39:0] Y3:Y2:Y1:Y0 little endian
+ * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
+ */
+#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
@@ -265,6 +340,29 @@ extern "C" {
*/
#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
+/* 2 plane YCbCr420.
+ * 3 10 bit components and 2 padding bits packed into 4 bytes.
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian
+ */
+#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */
+
+/* 3 plane non-subsampled (444) YCbCr
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cb plane, [15:0] Cb:x [10:6] little endian
+ * index 2: Cr plane, [15:0] Cr:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q410 fourcc_code('Q', '4', '1', '0')
+
+/* 3 plane non-subsampled (444) YCrCb
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cr plane, [15:0] Cr:x [10:6] little endian
+ * index 2: Cb plane, [15:0] Cb:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
+
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
@@ -298,7 +396,6 @@ extern "C" {
*/
/* Vendor Ids: */
-#define DRM_FORMAT_MOD_NONE 0
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
@@ -309,11 +406,18 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
+#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+
+#define fourcc_mod_is_vendor(modifier, vendor) \
+ (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
+
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
@@ -323,8 +427,33 @@ extern "C" {
* When adding a new token please document the layout with a code comment,
* similar to the fourcc codes above. drm_fourcc.h is considered the
* authoritative source for all of these.
+ *
+ * Generic modifier names:
+ *
+ * DRM_FORMAT_MOD_GENERIC_* definitions are used to provide vendor-neutral names
+ * for layouts which are common across multiple vendors. To preserve
+ * compatibility, in cases where a vendor-specific definition already exists and
+ * a generic name for it is desired, the common name is a purely symbolic alias
+ * and must use the same numerical value as the original definition.
+ *
+ * Note that generic names should only be used for modifiers which describe
+ * generic layouts (such as pixel re-ordering), which may have
+ * independently-developed support across multiple vendors.
+ *
+ * In future cases where a generic layout is identified before merging with a
+ * vendor-specific modifier, a new 'GENERIC' vendor or modifier using vendor
+ * 'NONE' could be considered. This should only be for obvious, exceptional
+ * cases to avoid polluting the 'GENERIC' namespace with modifiers which only
+ * apply to a single vendor.
+ *
+ * Generic names should not be used for cases where multiple hardware vendors
+ * have implementations of the same standardised compression scheme (such as
+ * AFBC). In those cases, all implementations should use the same format
+ * modifier(s), reflecting the vendor of the standard.
*/
+#define DRM_FORMAT_MOD_GENERIC_16_16_TILE DRM_FORMAT_MOD_SAMSUNG_16_16_TILE
+
/*
* Invalid Modifier
*
@@ -344,6 +473,16 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
+/*
+ * Deprecated: use DRM_FORMAT_MOD_LINEAR instead
+ *
+ * The "none" format modifier doesn't actually mean that the modifier is
+ * implicit, instead it means that the layout is linear. Whether modifiers are
+ * used is out-of-band information carried in an API-specific way (e.g. in a
+ * flag for drm_mode_fb_cmd2).
+ */
+#define DRM_FORMAT_MOD_NONE 0
+
/* Intel framebuffer modifiers */
/*
@@ -354,9 +493,12 @@ extern "C" {
* a platform-dependent stride. On top of that the memory can apply
* platform-depending swizzling of some higher address bits into bit6.
*
- * This format is highly platforms specific and not useful for cross-driver
- * sharing. It exists since on a given platform it does uniquely identify the
- * layout in a simple way for i915-specific userspace.
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
*/
#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
@@ -369,9 +511,12 @@ extern "C" {
* memory can apply platform-depending swizzling of some higher address bits
* into bit6.
*
- * This format is highly platforms specific and not useful for cross-driver
- * sharing. It exists since on a given platform it does uniquely identify the
- * layout in a simple way for i915-specific userspace.
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
*/
#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
@@ -435,6 +580,72 @@ extern "C" {
#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
/*
+ * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
+ * compression.
+ *
+ * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+
+/*
+ * Intel Tile 4 layout
+ *
+ * This is a tiled layout using 4KB tiles in a row-major layout. It has the same
+ * shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It
+ * only differs from Tile Y at the 256B granularity in between. At this
+ * granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape
+ * of 64B x 8 rows.
+ */
+#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 media compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface
+ * pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths. The
+ * clear color is stored at plane index 1 and the pitch should be 64 bytes
+ * aligned. The format of the 256 bits of clear color data matches the one used
+ * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
+ * for details.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -471,6 +682,28 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+/*
+ * Qualcomm Tiled Format
+ *
+ * Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3)
+
+/*
+ * Qualcomm Alternate Tiled Format
+ *
+ * Alternate tiled format typically only used within GMEM.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2)
+
+
/* Vivante framebuffer modifiers */
/*
@@ -521,7 +754,113 @@ extern "C" {
#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
/*
- * 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later
+ * Generalized Block Linear layout, used by desktop GPUs starting with NV50/G80,
+ * and Tegra GPUs starting with Tegra K1.
+ *
+ * Pixels are arranged in Groups of Bytes (GOBs). GOB size and layout varies
+ * based on the architecture generation. GOBs themselves are then arranged in
+ * 3D blocks, with the block dimensions (in terms of GOBs) always being a power
+ * of two, and hence expressible as their log2 equivalent (E.g., "2" represents
+ * a block depth or height of "4").
+ *
+ * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
+ * in full detail.
+ *
+ * Macro
+ * Bits Param Description
+ * ---- ----- -----------------------------------------------------------------
+ *
+ * 3:0 h log2(height) of each block, in GOBs. Placed here for
+ * compatibility with the existing
+ * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
+ *
+ * 4:4 - Must be 1, to indicate block-linear layout. Necessary for
+ * compatibility with the existing
+ * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
+ *
+ * 8:5 - Reserved (To support 3D-surfaces with variable log2(depth) block
+ * size). Must be zero.
+ *
+ * Note there is no log2(width) parameter. Some portions of the
+ * hardware support a block width of two gobs, but it is impractical
+ * to use due to lack of support elsewhere, and has no known
+ * benefits.
+ *
+ * 11:9 - Reserved (To support 2D-array textures with variable array stride
+ * in blocks, specified via log2(tile width in blocks)). Must be
+ * zero.
+ *
+ * 19:12 k Page Kind. This value directly maps to a field in the page
+ * tables of all GPUs >= NV50. It affects the exact layout of bits
+ * in memory and can be derived from the tuple
+ *
+ * (format, GPU model, compression type, samples per pixel)
+ *
+ * Where compression type is defined below. If GPU model were
+ * implied by the format modifier, format, or memory buffer, page
+ * kind would not need to be included in the modifier itself, but
+ * since the modifier should define the layout of the associated
+ * memory buffer independent from any device or other context, it
+ * must be included here.
+ *
+ * 21:20 g GOB Height and Page Kind Generation. The height of a GOB changed
+ * starting with Fermi GPUs. Additionally, the mapping between page
+ * kind and bit layout has changed at various points.
+ *
+ * 0 = Gob Height 8, Fermi - Volta, Tegra K1+ Page Kind mapping
+ * 1 = Gob Height 4, G80 - GT2XX Page Kind mapping
+ * 2 = Gob Height 8, Turing+ Page Kind mapping
+ * 3 = Reserved for future use.
+ *
+ * 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further
+ * bit remapping step that occurs at an even lower level than the
+ * page kind and block linear swizzles. This causes the layout of
+ * surfaces mapped in those SOC's GPUs to be incompatible with the
+ * equivalent mapping on other GPUs in the same system.
+ *
+ * 0 = Tegra K1 - Tegra Parker/TX2 Layout.
+ * 1 = Desktop GPU and Tegra Xavier+ Layout
+ *
+ * 25:23 c Lossless Framebuffer Compression type.
+ *
+ * 0 = none
+ * 1 = ROP/3D, layout 1, exact compression format implied by Page
+ * Kind field
+ * 2 = ROP/3D, layout 2, exact compression format implied by Page
+ * Kind field
+ * 3 = CDE horizontal
+ * 4 = CDE vertical
+ * 5 = Reserved for future use
+ * 6 = Reserved for future use
+ * 7 = Reserved for future use
+ *
+ * 55:25 - Reserved for future use. Must be zero.
+ */
+#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
+ fourcc_mod_code(NVIDIA, (0x10 | \
+ ((h) & 0xf) | \
+ (((k) & 0xff) << 12) | \
+ (((g) & 0x3) << 20) | \
+ (((s) & 0x1) << 22) | \
+ (((c) & 0x7) << 23)))
+
+/* To grandfather in prior block linear format modifiers to the above layout,
+ * the page kind "0", which corresponds to "pitch/linear" and hence is unusable
+ * with block-linear layouts, is remapped within drivers to the value 0xfe,
+ * which corresponds to the "generic" kind used for simple single-sample
+ * uncompressed color formats on Fermi - Volta GPUs.
+ */
+static inline __u64
+drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+{
+ if (!(modifier & 0x10) || (modifier & (0xff << 12)))
+ return modifier;
+ else
+ return modifier | (0xfe << 12);
+}
+
+/*
+ * 16Bx2 Block Linear layout, used by Tegra K1 and later
*
* Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
* vertically by a power of 2 (1 to 32 GOBs) to form a block.
@@ -542,20 +881,20 @@ extern "C" {
* in full detail.
*/
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
- fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf))
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 0, 0, 0, (v))
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
- fourcc_mod_code(NVIDIA, 0x10)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
- fourcc_mod_code(NVIDIA, 0x11)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
- fourcc_mod_code(NVIDIA, 0x12)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
- fourcc_mod_code(NVIDIA, 0x13)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
- fourcc_mod_code(NVIDIA, 0x14)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
- fourcc_mod_code(NVIDIA, 0x15)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5)
/*
* Some Broadcom modifiers take parameters, for example the number of
@@ -617,6 +956,10 @@ extern "C" {
* and UV. Some SAND-using hardware stores UV in a separate tiled
* image from Y to reduce the column height, which is not supported
* with these modifiers.
+ *
+ * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also
+ * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes
+ * wide, but as this is a 10 bpp format that translates to 96 pixels.
*/
#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
@@ -675,9 +1018,9 @@ extern "C" {
/*
* The top 4 bits (out of the 56 bits alloted for specifying vendor specific
- * modifiers) denote the category for modifiers. Currently we have only two
- * categories of modifiers ie AFBC and MISC. We can have a maximum of sixteen
- * different categories.
+ * modifiers) denote the category for modifiers. Currently we have three
+ * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
+ * sixteen different categories.
*/
#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
@@ -780,6 +1123,121 @@ extern "C" {
*/
#define AFBC_FORMAT_MOD_BCH (1ULL << 11)
+/* AFBC uncompressed storage mode
+ *
+ * Indicates that the buffer is using AFBC uncompressed storage mode.
+ * In this mode all superblock payloads in the buffer use the uncompressed
+ * storage mode, which is usually only used for data which cannot be compressed.
+ * The buffer layout is the same as for AFBC buffers without USM set, this only
+ * affects the storage mode of the individual superblocks. Note that even a
+ * buffer without USM set may use uncompressed storage mode for some or all
+ * superblocks, USM just guarantees it for all.
+ */
+#define AFBC_FORMAT_MOD_USM (1ULL << 12)
+
+/*
+ * Arm Fixed-Rate Compression (AFRC) modifiers
+ *
+ * AFRC is a proprietary fixed rate image compression protocol and format,
+ * designed to provide guaranteed bandwidth and memory footprint
+ * reductions in graphics and media use-cases.
+ *
+ * AFRC buffers consist of one or more planes, with the same components
+ * and meaning as an uncompressed buffer using the same pixel format.
+ *
+ * Within each plane, the pixel/luma/chroma values are grouped into
+ * "coding unit" blocks which are individually compressed to a
+ * fixed size (in bytes). All coding units within a given plane of a buffer
+ * store the same number of values, and have the same compressed size.
+ *
+ * The coding unit size is configurable, allowing different rates of compression.
+ *
+ * The start of each AFRC buffer plane must be aligned to an alignment granule which
+ * depends on the coding unit size.
+ *
+ * Coding Unit Size Plane Alignment
+ * ---------------- ---------------
+ * 16 bytes 1024 bytes
+ * 24 bytes 512 bytes
+ * 32 bytes 2048 bytes
+ *
+ * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned
+ * to a multiple of the paging tile dimensions.
+ * The dimensions of each paging tile depend on whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Layout Paging Tile Width Paging Tile Height
+ * ------ ----------------- ------------------
+ * SCAN 16 coding units 4 coding units
+ * ROT 8 coding units 8 coding units
+ *
+ * The dimensions of each coding unit depend on the number of components
+ * in the compressed plane and whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Number of Components in Plane Layout Coding Unit Width Coding Unit Height
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 SCAN 16 samples 4 samples
+ * Example: 16x4 luma samples in a 'Y' plane
+ * 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 ROT 8 samples 8 samples
+ * Example: 8x8 luma samples in a 'Y' plane
+ * 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 2 DONT CARE 8 samples 4 samples
+ * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 3 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer without alpha
+ * ----------------------------- --------- ----------------- ------------------
+ * 4 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer with alpha
+ */
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02
+
+#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode)
+
+/*
+ * AFRC coding unit size modifier.
+ *
+ * Indicates the number of bytes used to store each compressed coding unit for
+ * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance
+ * is the same for both Cb and Cr, which may be stored in separate planes.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store
+ * each compressed coding unit in the first plane of the buffer. For RGBA buffers
+ * this is the only plane, while for semi-planar and fully-planar YUV buffers,
+ * this corresponds to the luma plane.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store
+ * each compressed coding unit in the second and third planes in the buffer.
+ * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s).
+ *
+ * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified
+ * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero.
+ * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified.
+ */
+#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf
+#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL)
+
+#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size)
+#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4)
+
+/*
+ * AFRC scanline memory layout.
+ *
+ * Indicates if the buffer uses the scanline-optimised layout
+ * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout.
+ * The memory layout is the same for all planes.
+ */
+#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8)
+
/*
* Arm 16x16 Block U-Interleaved modifier
*
@@ -804,6 +1262,222 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_ALLWINNER_TILED fourcc_mod_code(ALLWINNER, 1)
+/*
+ * Amlogic Video Framebuffer Compression modifiers
+ *
+ * Amlogic uses a proprietary lossless image compression protocol and format
+ * for their hardware video codec accelerators, either video decoders or
+ * video input encoders.
+ *
+ * It considerably reduces memory bandwidth while writing and reading
+ * frames in memory.
+ *
+ * The underlying storage is considered to be 3 components, 8bit or 10-bit
+ * per component YCbCr 420, single plane :
+ * - DRM_FORMAT_YUV420_8BIT
+ * - DRM_FORMAT_YUV420_10BIT
+ *
+ * The first 8 bits of the mode defines the layout, then the following 8 bits
+ * defines the options changing the layout.
+ *
+ * Not all combinations are valid, and different SoCs may support different
+ * combinations of layout and options.
+ */
+#define __fourcc_mod_amlogic_layout_mask 0xff
+#define __fourcc_mod_amlogic_options_shift 8
+#define __fourcc_mod_amlogic_options_mask 0xff
+
+#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
+ fourcc_mod_code(AMLOGIC, \
+ ((__layout) & __fourcc_mod_amlogic_layout_mask) | \
+ (((__options) & __fourcc_mod_amlogic_options_mask) \
+ << __fourcc_mod_amlogic_options_shift))
+
+/* Amlogic FBC Layouts */
+
+/*
+ * Amlogic FBC Basic Layout
+ *
+ * The basic layout is composed of:
+ * - a body content organized in 64x32 superblocks with 4096 bytes per
+ * superblock in default mode.
+ * - a 32 bytes per 128x64 header block
+ *
+ * This layout is transferrable between Amlogic SoCs supporting this modifier.
+ */
+#define AMLOGIC_FBC_LAYOUT_BASIC (1ULL)
+
+/*
+ * Amlogic FBC Scatter Memory layout
+ *
+ * Indicates the header contains IOMMU references to the compressed
+ * frames content to optimize memory access and layout.
+ *
+ * In this mode, only the header memory address is needed, thus the
+ * content memory organization is tied to the current producer
+ * execution and cannot be saved/dumped neither transferrable between
+ * Amlogic SoCs supporting this modifier.
+ *
+ * Due to the nature of the layout, these buffers are not expected to
+ * be accessible by the user-space clients, but only accessible by the
+ * hardware producers and consumers.
+ *
+ * The user-space clients should expect a failure while trying to mmap
+ * the DMA-BUF handle returned by the producer.
+ */
+#define AMLOGIC_FBC_LAYOUT_SCATTER (2ULL)
+
+/* Amlogic FBC Layout Options Bit Mask */
+
+/*
+ * Amlogic FBC Memory Saving mode
+ *
+ * Indicates the storage is packed when pixel size is multiple of word
+ * boudaries, i.e. 8bit should be stored in this mode to save allocation
+ * memory.
+ *
+ * This mode reduces body layout to 3072 bytes per 64x32 superblock with
+ * the basic layout and 3200 bytes per 64x32 superblock combined with
+ * the scatter layout.
+ */
+#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
+
+/*
+ * AMD modifiers
+ *
+ * Memory layout:
+ *
+ * without DCC:
+ * - main surface
+ *
+ * with DCC & without DCC_RETILE:
+ * - main surface in plane 0
+ * - DCC surface in plane 1 (RB-aligned, pipe-aligned if DCC_PIPE_ALIGN is set)
+ *
+ * with DCC & DCC_RETILE:
+ * - main surface in plane 0
+ * - displayable DCC surface in plane 1 (not RB-aligned & not pipe-aligned)
+ * - pipe-aligned DCC surface in plane 2 (RB-aligned & pipe-aligned)
+ *
+ * For multi-plane formats the above surfaces get merged into one plane for
+ * each format plane, based on the required alignment only.
+ *
+ * Bits Parameter Notes
+ * ----- ------------------------ ---------------------------------------------
+ *
+ * 7:0 TILE_VERSION Values are AMD_FMT_MOD_TILE_VER_*
+ * 12:8 TILE Values are AMD_FMT_MOD_TILE_<version>_*
+ * 13 DCC
+ * 14 DCC_RETILE
+ * 15 DCC_PIPE_ALIGN
+ * 16 DCC_INDEPENDENT_64B
+ * 17 DCC_INDEPENDENT_128B
+ * 19:18 DCC_MAX_COMPRESSED_BLOCK Values are AMD_FMT_MOD_DCC_BLOCK_*
+ * 20 DCC_CONSTANT_ENCODE
+ * 23:21 PIPE_XOR_BITS Only for some chips
+ * 26:24 BANK_XOR_BITS Only for some chips
+ * 29:27 PACKERS Only for some chips
+ * 32:30 RB Only for some chips
+ * 35:33 PIPE Only for some chips
+ * 55:36 - Reserved for future use, must be zero
+ */
+#define AMD_FMT_MOD fourcc_mod_code(AMD, 0)
+
+#define IS_AMD_FMT_MOD(val) (((val) >> 56) == DRM_FORMAT_MOD_VENDOR_AMD)
+
+/* Reserve 0 for GFX8 and older */
+#define AMD_FMT_MOD_TILE_VER_GFX9 1
+#define AMD_FMT_MOD_TILE_VER_GFX10 2
+#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+#define AMD_FMT_MOD_TILE_VER_GFX11 4
+
+/*
+ * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
+ * version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_S 9
+
+/*
+ * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
+ * GFX9 as canonical version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
+#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
+#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
+#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+
+#define AMD_FMT_MOD_DCC_BLOCK_64B 0
+#define AMD_FMT_MOD_DCC_BLOCK_128B 1
+#define AMD_FMT_MOD_DCC_BLOCK_256B 2
+
+#define AMD_FMT_MOD_TILE_VERSION_SHIFT 0
+#define AMD_FMT_MOD_TILE_VERSION_MASK 0xFF
+#define AMD_FMT_MOD_TILE_SHIFT 8
+#define AMD_FMT_MOD_TILE_MASK 0x1F
+
+/* Whether DCC compression is enabled. */
+#define AMD_FMT_MOD_DCC_SHIFT 13
+#define AMD_FMT_MOD_DCC_MASK 0x1
+
+/*
+ * Whether to include two DCC surfaces, one which is rb & pipe aligned, and
+ * one which is not-aligned.
+ */
+#define AMD_FMT_MOD_DCC_RETILE_SHIFT 14
+#define AMD_FMT_MOD_DCC_RETILE_MASK 0x1
+
+/* Only set if DCC_RETILE = false */
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_SHIFT 15
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_MASK 0x1
+
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_SHIFT 16
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_MASK 0x1
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_SHIFT 17
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_MASK 0x1
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_SHIFT 18
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3
+
+/*
+ * DCC supports embedding some clear colors directly in the DCC surface.
+ * However, on older GPUs the rendering HW ignores the embedded clear color
+ * and prefers the driver provided color. This necessitates doing a fastclear
+ * eliminate operation before a process transfers control.
+ *
+ * If this bit is set that means the fastclear eliminate is not needed for these
+ * embeddable colors.
+ */
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_SHIFT 20
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_MASK 0x1
+
+/*
+ * The below fields are for accounting for per GPU differences. These are only
+ * relevant for GFX9 and later and if the tile field is *_X/_T.
+ *
+ * PIPE_XOR_BITS = always needed
+ * BANK_XOR_BITS = only for TILE_VER_GFX9
+ * PACKERS = only for TILE_VER_GFX10_RBPLUS
+ * RB = only for TILE_VER_GFX9 & DCC
+ * PIPE = only for TILE_VER_GFX9 & DCC & (DCC_RETILE | DCC_PIPE_ALIGN)
+ */
+#define AMD_FMT_MOD_PIPE_XOR_BITS_SHIFT 21
+#define AMD_FMT_MOD_PIPE_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_BANK_XOR_BITS_SHIFT 24
+#define AMD_FMT_MOD_BANK_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_PACKERS_SHIFT 27
+#define AMD_FMT_MOD_PACKERS_MASK 0x7
+#define AMD_FMT_MOD_RB_SHIFT 30
+#define AMD_FMT_MOD_RB_MASK 0x7
+#define AMD_FMT_MOD_PIPE_SHIFT 33
+#define AMD_FMT_MOD_PIPE_MASK 0x7
+
+#define AMD_FMT_MOD_SET(field, value) \
+ ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
+#define AMD_FMT_MOD_GET(field, value) \
+ (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
+#define AMD_FMT_MOD_CLEAR(field) \
+ (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 735c8cfdaaa1..fa953309d9ce 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -218,6 +218,27 @@ extern "C" {
#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
+/**
+ * struct drm_mode_modeinfo - Display mode information.
+ * @clock: pixel clock in kHz
+ * @hdisplay: horizontal display size
+ * @hsync_start: horizontal sync start
+ * @hsync_end: horizontal sync end
+ * @htotal: horizontal total size
+ * @hskew: horizontal skew
+ * @vdisplay: vertical display size
+ * @vsync_start: vertical sync start
+ * @vsync_end: vertical sync end
+ * @vtotal: vertical total size
+ * @vscan: vertical scan
+ * @vrefresh: approximate vertical refresh rate in Hz
+ * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines
+ * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines
+ * @name: string describing the mode resolution
+ *
+ * This is the user-space API display mode information structure. For the
+ * kernel version see struct drm_display_mode.
+ */
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay;
@@ -291,16 +312,48 @@ struct drm_mode_set_plane {
__u32 src_w;
};
+/**
+ * struct drm_mode_get_plane - Get plane metadata.
+ *
+ * Userspace can perform a GETPLANE ioctl to retrieve information about a
+ * plane.
+ *
+ * To retrieve the number of formats supported, set @count_format_types to zero
+ * and call the ioctl. @count_format_types will be updated with the value.
+ *
+ * To retrieve these formats, allocate an array with the memory needed to store
+ * @count_format_types formats. Point @format_type_ptr to this array and call
+ * the ioctl again (with @count_format_types still set to the value returned in
+ * the first ioctl call).
+ */
struct drm_mode_get_plane {
+ /**
+ * @plane_id: Object ID of the plane whose information should be
+ * retrieved. Set by caller.
+ */
__u32 plane_id;
+ /** @crtc_id: Object ID of the current CRTC. */
__u32 crtc_id;
+ /** @fb_id: Object ID of the current fb. */
__u32 fb_id;
+ /**
+ * @possible_crtcs: Bitmask of CRTC's compatible with the plane. CRTC's
+ * are created and they receive an index, which corresponds to their
+ * position in the bitmask. Bit N corresponds to
+ * :ref:`CRTC index<crtc_index>` N.
+ */
__u32 possible_crtcs;
+ /** @gamma_size: Never used. */
__u32 gamma_size;
+ /** @count_format_types: Number of formats. */
__u32 count_format_types;
+ /**
+ * @format_type_ptr: Pointer to ``__u32`` array of formats that are
+ * supported by the plane. These formats do not require modifiers.
+ */
__u64 format_type_ptr;
};
@@ -332,14 +385,19 @@ struct drm_mode_get_encoder {
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
enum drm_mode_subconnector {
- DRM_MODE_SUBCONNECTOR_Automatic = 0,
- DRM_MODE_SUBCONNECTOR_Unknown = 0,
- DRM_MODE_SUBCONNECTOR_DVID = 3,
- DRM_MODE_SUBCONNECTOR_DVIA = 4,
- DRM_MODE_SUBCONNECTOR_Composite = 5,
- DRM_MODE_SUBCONNECTOR_SVIDEO = 6,
- DRM_MODE_SUBCONNECTOR_Component = 8,
- DRM_MODE_SUBCONNECTOR_SCART = 9,
+ DRM_MODE_SUBCONNECTOR_Automatic = 0, /* DVI-I, TV */
+ DRM_MODE_SUBCONNECTOR_Unknown = 0, /* DVI-I, TV, DP */
+ DRM_MODE_SUBCONNECTOR_VGA = 1, /* DP */
+ DRM_MODE_SUBCONNECTOR_DVID = 3, /* DVI-I DP */
+ DRM_MODE_SUBCONNECTOR_DVIA = 4, /* DVI-I */
+ DRM_MODE_SUBCONNECTOR_Composite = 5, /* TV */
+ DRM_MODE_SUBCONNECTOR_SVIDEO = 6, /* TV */
+ DRM_MODE_SUBCONNECTOR_Component = 8, /* TV */
+ DRM_MODE_SUBCONNECTOR_SCART = 9, /* TV */
+ DRM_MODE_SUBCONNECTOR_DisplayPort = 10, /* DP */
+ DRM_MODE_SUBCONNECTOR_HDMIA = 11, /* DP */
+ DRM_MODE_SUBCONNECTOR_Native = 15, /* DP */
+ DRM_MODE_SUBCONNECTOR_Wireless = 18, /* DP */
};
#define DRM_MODE_CONNECTOR_Unknown 0
@@ -362,28 +420,95 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_DPI 17
#define DRM_MODE_CONNECTOR_WRITEBACK 18
#define DRM_MODE_CONNECTOR_SPI 19
+#define DRM_MODE_CONNECTOR_USB 20
+/**
+ * struct drm_mode_get_connector - Get connector metadata.
+ *
+ * User-space can perform a GETCONNECTOR ioctl to retrieve information about a
+ * connector. User-space is expected to retrieve encoders, modes and properties
+ * by performing this ioctl at least twice: the first time to retrieve the
+ * number of elements, the second time to retrieve the elements themselves.
+ *
+ * To retrieve the number of elements, set @count_props and @count_encoders to
+ * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct
+ * drm_mode_modeinfo element.
+ *
+ * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr,
+ * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and
+ * @count_encoders to their capacity.
+ *
+ * Performing the ioctl only twice may be racy: the number of elements may have
+ * changed with a hotplug event in-between the two ioctls. User-space is
+ * expected to retry the last ioctl until the number of elements stabilizes.
+ * The kernel won't fill any array which doesn't have the expected length.
+ *
+ * **Force-probing a connector**
+ *
+ * If the @count_modes field is set to zero and the DRM client is the current
+ * DRM master, the kernel will perform a forced probe on the connector to
+ * refresh the connector status, modes and EDID. A forced-probe can be slow,
+ * might cause flickering and the ioctl will block.
+ *
+ * User-space needs to force-probe connectors to ensure their metadata is
+ * up-to-date at startup and after receiving a hot-plug event. User-space
+ * may perform a forced-probe when the user explicitly requests it. User-space
+ * shouldn't perform a forced-probe in other situations.
+ */
struct drm_mode_get_connector {
-
+ /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
__u64 encoders_ptr;
+ /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */
__u64 modes_ptr;
+ /** @props_ptr: Pointer to ``__u32`` array of property IDs. */
__u64 props_ptr;
+ /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */
__u64 prop_values_ptr;
+ /** @count_modes: Number of modes. */
__u32 count_modes;
+ /** @count_props: Number of properties. */
__u32 count_props;
+ /** @count_encoders: Number of encoders. */
__u32 count_encoders;
- __u32 encoder_id; /**< Current Encoder */
- __u32 connector_id; /**< Id */
+ /** @encoder_id: Object ID of the current encoder. */
+ __u32 encoder_id;
+ /** @connector_id: Object ID of the connector. */
+ __u32 connector_id;
+ /**
+ * @connector_type: Type of the connector.
+ *
+ * See DRM_MODE_CONNECTOR_* defines.
+ */
__u32 connector_type;
+ /**
+ * @connector_type_id: Type-specific connector number.
+ *
+ * This is not an object ID. This is a per-type connector number. Each
+ * (type, type_id) combination is unique across all connectors of a DRM
+ * device.
+ */
__u32 connector_type_id;
+ /**
+ * @connection: Status of the connector.
+ *
+ * See enum drm_connector_status.
+ */
__u32 connection;
- __u32 mm_width; /**< width in millimeters */
- __u32 mm_height; /**< height in millimeters */
+ /** @mm_width: Width of the connected sink in millimeters. */
+ __u32 mm_width;
+ /** @mm_height: Height of the connected sink in millimeters. */
+ __u32 mm_height;
+ /**
+ * @subpixel: Subpixel order of the connected sink.
+ *
+ * See enum subpixel_order.
+ */
__u32 subpixel;
+ /** @pad: Padding, must be zero. */
__u32 pad;
};
@@ -416,22 +541,74 @@ struct drm_mode_get_connector {
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
+/**
+ * struct drm_mode_property_enum - Description for an enum/bitfield entry.
+ * @value: numeric value for this enum entry.
+ * @name: symbolic name for this enum entry.
+ *
+ * See struct drm_property_enum for details.
+ */
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
+/**
+ * struct drm_mode_get_property - Get property metadata.
+ *
+ * User-space can perform a GETPROPERTY ioctl to retrieve information about a
+ * property. The same property may be attached to multiple objects, see
+ * "Modeset Base Object Abstraction".
+ *
+ * The meaning of the @values_ptr field changes depending on the property type.
+ * See &drm_property.flags for more details.
+ *
+ * The @enum_blob_ptr and @count_enum_blobs fields are only meaningful when the
+ * property has the type &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK. For
+ * backwards compatibility, the kernel will always set @count_enum_blobs to
+ * zero when the property has the type &DRM_MODE_PROP_BLOB. User-space must
+ * ignore these two fields if the property has a different type.
+ *
+ * User-space is expected to retrieve values and enums by performing this ioctl
+ * at least twice: the first time to retrieve the number of elements, the
+ * second time to retrieve the elements themselves.
+ *
+ * To retrieve the number of elements, set @count_values and @count_enum_blobs
+ * to zero, then call the ioctl. @count_values will be updated with the number
+ * of elements. If the property has the type &DRM_MODE_PROP_ENUM or
+ * &DRM_MODE_PROP_BITMASK, @count_enum_blobs will be updated as well.
+ *
+ * To retrieve the elements themselves, allocate an array for @values_ptr and
+ * set @count_values to its capacity. If the property has the type
+ * &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK, allocate an array for
+ * @enum_blob_ptr and set @count_enum_blobs to its capacity. Calling the ioctl
+ * again will fill the arrays.
+ */
struct drm_mode_get_property {
- __u64 values_ptr; /* values and blob lengths */
- __u64 enum_blob_ptr; /* enum and blob id ptrs */
+ /** @values_ptr: Pointer to a ``__u64`` array. */
+ __u64 values_ptr;
+ /** @enum_blob_ptr: Pointer to a struct drm_mode_property_enum array. */
+ __u64 enum_blob_ptr;
+ /**
+ * @prop_id: Object ID of the property which should be retrieved. Set
+ * by the caller.
+ */
__u32 prop_id;
+ /**
+ * @flags: ``DRM_MODE_PROP_*`` bitfield. See &drm_property.flags for
+ * a definition of the flags.
+ */
__u32 flags;
+ /**
+ * @name: Symbolic property name. User-space should use this field to
+ * recognize properties.
+ */
char name[DRM_PROP_NAME_LEN];
+ /** @count_values: Number of elements in @values_ptr. */
__u32 count_values;
- /* This is only used to count enum values, not blobs. The _blobs is
- * simply because of a historical reason, i.e. backwards compat. */
+ /** @count_enum_blobs: Number of elements in @enum_blob_ptr. */
__u32 count_enum_blobs;
};
@@ -486,41 +663,73 @@ struct drm_mode_fb_cmd {
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+/**
+ * struct drm_mode_fb_cmd2 - Frame-buffer metadata.
+ *
+ * This struct holds frame-buffer metadata. There are two ways to use it:
+ *
+ * - User-space can fill this struct and perform a &DRM_IOCTL_MODE_ADDFB2
+ * ioctl to register a new frame-buffer. The new frame-buffer object ID will
+ * be set by the kernel in @fb_id.
+ * - User-space can set @fb_id and perform a &DRM_IOCTL_MODE_GETFB2 ioctl to
+ * fetch metadata about an existing frame-buffer.
+ *
+ * In case of planar formats, this struct allows up to 4 buffer objects with
+ * offsets and pitches per plane. The pitch and offset order are dictated by
+ * the format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8-bit Y samples followed by an
+ * interleaved U/V plane containing 8-bit 2x2 subsampled colour difference
+ * samples.
+ *
+ * So it would consist of a Y plane at ``offsets[0]`` and a UV plane at
+ * ``offsets[1]``.
+ *
+ * To accommodate tiled, compressed, etc formats, a modifier can be specified.
+ * For more information see the "Format Modifiers" section. Note that even
+ * though it looks like we have a modifier per-plane, we in fact do not. The
+ * modifier for each plane must be identical. Thus all combinations of
+ * different data layouts for multi-plane formats must be enumerated as
+ * separate modifiers.
+ *
+ * All of the entries in @handles, @pitches, @offsets and @modifier must be
+ * zero when unused. Warning, for @offsets and @modifier zero can't be used to
+ * figure out whether the entry is used or not since it's a valid value (a zero
+ * offset is common, and a zero modifier is &DRM_FORMAT_MOD_LINEAR).
+ */
struct drm_mode_fb_cmd2 {
+ /** @fb_id: Object ID of the frame-buffer. */
__u32 fb_id;
+ /** @width: Width of the frame-buffer. */
__u32 width;
+ /** @height: Height of the frame-buffer. */
__u32 height;
- __u32 pixel_format; /* fourcc code from drm_fourcc.h */
- __u32 flags; /* see above flags */
+ /**
+ * @pixel_format: FourCC format code, see ``DRM_FORMAT_*`` constants in
+ * ``drm_fourcc.h``.
+ */
+ __u32 pixel_format;
+ /**
+ * @flags: Frame-buffer flags (see &DRM_MODE_FB_INTERLACED and
+ * &DRM_MODE_FB_MODIFIERS).
+ */
+ __u32 flags;
- /*
- * In case of planar formats, this ioctl allows up to 4
- * buffer objects with offsets and pitches per plane.
- * The pitch and offset order is dictated by the fourcc,
- * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
- *
- * YUV 4:2:0 image with a plane of 8 bit Y samples
- * followed by an interleaved U/V plane containing
- * 8 bit 2x2 subsampled colour difference samples.
- *
- * So it would consist of Y as offsets[0] and UV as
- * offsets[1]. Note that offsets[0] will generally
- * be 0 (but this is not required).
- *
- * To accommodate tiled, compressed, etc formats, a
- * modifier can be specified. The default value of zero
- * indicates "native" format as specified by the fourcc.
- * Vendor specific modifier token. Note that even though
- * it looks like we have a modifier per-plane, we in fact
- * do not. The modifier for each plane must be identical.
- * Thus all combinations of different data layouts for
- * multi plane formats must be enumerated as separate
- * modifiers.
+ /**
+ * @handles: GEM buffer handle, one per plane. Set to 0 if the plane is
+ * unused. The same handle can be used for multiple planes.
*/
__u32 handles[4];
- __u32 pitches[4]; /* pitch for each plane */
- __u32 offsets[4]; /* offset of each plane */
- __u64 modifier[4]; /* ie, tiling, compress */
+ /** @pitches: Pitch (aka. stride) in bytes, one per plane. */
+ __u32 pitches[4];
+ /** @offsets: Offset into the buffer in bytes, one per plane. */
+ __u32 offsets[4];
+ /**
+ * @modifier: Format modifier, one per plane. See ``DRM_FORMAT_MOD_*``
+ * constants in ``drm_fourcc.h``. All planes must use the same
+ * modifier. Ignored unless &DRM_MODE_FB_MODIFIERS is set in @flags.
+ */
+ __u64 modifier[4];
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
@@ -899,26 +1108,31 @@ struct drm_format_modifier {
};
/**
- * struct drm_mode_create_blob - Create New block property
- * @data: Pointer to data to copy.
- * @length: Length of data to copy.
- * @blob_id: new property ID.
+ * struct drm_mode_create_blob - Create New blob property
+ *
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
*/
struct drm_mode_create_blob {
- /** Pointer to data to copy. */
+ /** @data: Pointer to data to copy. */
__u64 data;
- /** Length of data to copy. */
+ /** @length: Length of data to copy. */
__u32 length;
- /** Return: new property ID. */
+ /** @blob_id: Return: new property ID. */
__u32 blob_id;
};
/**
* struct drm_mode_destroy_blob - Destroy user blob
* @blob_id: blob_id to destroy
+ *
* Destroy a user-created blob property.
+ *
+ * User-space can release blobs as soon as they do not need to refer to them by
+ * their blob object ID. For instance, if you are using a MODE_ID blob in an
+ * atomic commit and you will not make another commit re-using the same ID, you
+ * can destroy the blob as soon as the commit has been issued, without waiting
+ * for it to complete.
*/
struct drm_mode_destroy_blob {
__u32 blob_id;
@@ -926,36 +1140,36 @@ struct drm_mode_destroy_blob {
/**
* struct drm_mode_create_lease - Create lease
- * @object_ids: Pointer to array of object ids.
- * @object_count: Number of object ids.
- * @flags: flags for new FD.
- * @lessee_id: unique identifier for lessee.
- * @fd: file descriptor to new drm_master file.
+ *
* Lease mode resources, creating another drm_master.
+ *
+ * The @object_ids array must reference at least one CRTC, one connector and
+ * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively,
+ * the lease can be completely empty.
*/
struct drm_mode_create_lease {
- /** Pointer to array of object ids (__u32) */
+ /** @object_ids: Pointer to array of object ids (__u32) */
__u64 object_ids;
- /** Number of object ids */
+ /** @object_count: Number of object ids */
__u32 object_count;
- /** flags for new FD (O_CLOEXEC, etc) */
+ /** @flags: flags for new FD (O_CLOEXEC, etc) */
__u32 flags;
- /** Return: unique identifier for lessee. */
+ /** @lessee_id: Return: unique identifier for lessee. */
__u32 lessee_id;
- /** Return: file descriptor to new drm_master file */
+ /** @fd: Return: file descriptor to new drm_master file */
__u32 fd;
};
/**
* struct drm_mode_list_lessees - List lessees
- * @count_lessees: Number of lessees.
- * @pad: pad.
- * @lessees_ptr: Pointer to lessess.
- * List lesses from a drm_master
+ *
+ * List lesses from a drm_master.
*/
struct drm_mode_list_lessees {
- /** Number of lessees.
+ /**
+ * @count_lessees: Number of lessees.
+ *
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@@ -963,23 +1177,26 @@ struct drm_mode_list_lessees {
* the size and then the data.
*/
__u32 count_lessees;
+ /** @pad: Padding. */
__u32 pad;
- /** Pointer to lessees.
- * pointer to __u64 array of lessee ids
+ /**
+ * @lessees_ptr: Pointer to lessees.
+ *
+ * Pointer to __u64 array of lessee ids
*/
__u64 lessees_ptr;
};
/**
* struct drm_mode_get_lease - Get Lease
- * @count_objects: Number of leased objects.
- * @pad: pad.
- * @objects_ptr: Pointer to objects.
- * Get leased objects
+ *
+ * Get leased objects.
*/
struct drm_mode_get_lease {
- /** Number of leased objects.
+ /**
+ * @count_objects: Number of leased objects.
+ *
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@@ -987,22 +1204,22 @@ struct drm_mode_get_lease {
* the size and then the data.
*/
__u32 count_objects;
+ /** @pad: Padding. */
__u32 pad;
- /** Pointer to objects.
- * pointer to __u32 array of object ids
+ /**
+ * @objects_ptr: Pointer to objects.
+ *
+ * Pointer to __u32 array of object ids.
*/
__u64 objects_ptr;
};
/**
* struct drm_mode_revoke_lease - Revoke lease
- * @lessee_id: Unique ID of lessee.
- * Revoke lease
*/
struct drm_mode_revoke_lease {
- /** Unique ID of lessee
- */
+ /** @lessee_id: Unique ID of lessee */
__u32 lessee_id;
};
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index 09d0df8b71c5..af024d90453d 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -74,6 +74,9 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
+#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
+#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
+#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
#define ETNA_MAX_PIPES 4
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 829c0a48577f..520ad2691a99 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -55,15 +55,15 @@ extern "C" {
* cause the related events to not be seen.
*
* I915_RESET_UEVENT - Event is generated just before an attempt to reset the
- * the GPU. The value supplied with the event is always 1. NOTE: Disable
+ * GPU. The value supplied with the event is always 1. NOTE: Disable
* reset via module parameter will cause this event to not be seen.
*/
#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
#define I915_ERROR_UEVENT "ERROR"
#define I915_RESET_UEVENT "RESET"
-/*
- * i915_user_extension: Base class for defining a chain of extensions
+/**
+ * struct i915_user_extension - Base class for defining a chain of extensions
*
* Many interfaces need to grow over time. In most cases we can simply
* extend the struct and have userspace pass in more data. Another option,
@@ -76,12 +76,58 @@ extern "C" {
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
* the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct i915_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct i915_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+ *
*/
struct i915_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct i915_user_extension, or zero if the end.
+ */
__u64 next_extension;
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct i915_user_extension.
+ */
__u32 name;
- __u32 flags; /* All undefined bits must be zero. */
- __u32 rsvd[4]; /* Reserved for future use; must be zero. */
+ /**
+ * @flags: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 flags;
+ /**
+ * @rsvd: MBZ
+ *
+ * Reserved for future use; must be zero.
+ */
+ __u32 rsvd[4];
};
/*
@@ -108,25 +154,77 @@ enum i915_mocs_table_index {
I915_MOCS_CACHED,
};
-/*
+/**
+ * enum drm_i915_gem_engine_class - uapi engine type enumeration
+ *
* Different engines serve different roles, and there may be more than one
- * engine serving each role. enum drm_i915_gem_engine_class provides a
- * classification of the role of the engine, which may be used when requesting
- * operations to be performed on a certain subset of engines, or for providing
- * information about that group.
+ * engine serving each role. This enum provides a classification of the role
+ * of the engine, which may be used when requesting operations to be performed
+ * on a certain subset of engines, or for providing information about that
+ * group.
*/
enum drm_i915_gem_engine_class {
+ /**
+ * @I915_ENGINE_CLASS_RENDER:
+ *
+ * Render engines support instructions used for 3D, Compute (GPGPU),
+ * and programmable media workloads. These instructions fetch data and
+ * dispatch individual work items to threads that operate in parallel.
+ * The threads run small programs (called "kernels" or "shaders") on
+ * the GPU's execution units (EUs).
+ */
I915_ENGINE_CLASS_RENDER = 0,
+
+ /**
+ * @I915_ENGINE_CLASS_COPY:
+ *
+ * Copy engines (also referred to as "blitters") support instructions
+ * that move blocks of data from one location in memory to another,
+ * or that fill a specified location of memory with fixed data.
+ * Copy engines can perform pre-defined logical or bitwise operations
+ * on the source, destination, or pattern data.
+ */
I915_ENGINE_CLASS_COPY = 1,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO:
+ *
+ * Video engines (also referred to as "bit stream decode" (BSD) or
+ * "vdbox") support instructions that perform fixed-function media
+ * decode and encode.
+ */
I915_ENGINE_CLASS_VIDEO = 2,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
+ *
+ * Video enhancement engines (also referred to as "vebox") support
+ * instructions related to image enhancement.
+ */
I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
- /* should be kept compact */
+ /**
+ * @I915_ENGINE_CLASS_COMPUTE:
+ *
+ * Compute engines support a subset of the instructions available
+ * on render engines: compute engines support Compute (GPGPU) and
+ * programmable media workloads, but do not support the 3D pipeline.
+ */
+ I915_ENGINE_CLASS_COMPUTE = 4,
+ /* Values in this enum should be kept compact. */
+
+ /**
+ * @I915_ENGINE_CLASS_INVALID:
+ *
+ * Placeholder value to represent an invalid engine class assignment.
+ */
I915_ENGINE_CLASS_INVALID = -1
};
-/*
+/**
+ * struct i915_engine_class_instance - Engine class/instance identifier
+ *
* There may be more than one engine fulfilling any role within the system.
* Each engine of a class is given a unique instance number and therefore
* any engine can be specified by its class:instance tuplet. APIs that allow
@@ -134,10 +232,21 @@ enum drm_i915_gem_engine_class {
* for this identification.
*/
struct i915_engine_class_instance {
- __u16 engine_class; /* see enum drm_i915_gem_engine_class */
- __u16 engine_instance;
+ /**
+ * @engine_class:
+ *
+ * Engine class from enum drm_i915_gem_engine_class
+ */
+ __u16 engine_class;
#define I915_ENGINE_CLASS_INVALID_NONE -1
#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
+
+ /**
+ * @engine_instance:
+ *
+ * Engine instance.
+ */
+ __u16 engine_instance;
};
/**
@@ -177,8 +286,9 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
+#define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4)
-#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
@@ -359,6 +469,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_QUERY 0x39
#define DRM_I915_GEM_VM_CREATE 0x3a
#define DRM_I915_GEM_VM_DESTROY 0x3b
+#define DRM_I915_GEM_CREATE_EXT 0x3c
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -391,6 +502,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -523,6 +635,15 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
+/*
+ * Indicates the 2k user priority levels are statically mapped into 3 buckets as
+ * follows:
+ *
+ * -1k to -1 Low priority
+ * 0 Normal priority
+ * 1 to 1k Highest priority
+ */
+#define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5)
#define I915_PARAM_HUC_STATUS 42
@@ -619,16 +740,38 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_PERF_REVISION 54
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
+ * I915_EXEC_USE_EXTENSIONS.
+ */
+#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
+
+/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
+#define I915_PARAM_HAS_USERPTR_PROBE 56
+
/* Must be kept compact -- no holes and well documented */
-typedef struct drm_i915_getparam {
+/**
+ * struct drm_i915_getparam - Driver parameter query structure.
+ */
+struct drm_i915_getparam {
+ /** @param: Driver parameter to query. */
__s32 param;
- /*
+
+ /**
+ * @value: Address of memory where queried value should be put.
+ *
* WARNING: Using pointers instead of fixed-size u64 means we need to write
* compat32 code. Don't repeat this mistake.
*/
int __user *value;
-} drm_i915_getparam_t;
+};
+
+/**
+ * typedef drm_i915_getparam_t - Driver parameter query structure.
+ * See struct drm_i915_getparam.
+ */
+typedef struct drm_i915_getparam drm_i915_getparam_t;
/* Ioctl to set kernel params:
*/
@@ -794,45 +937,113 @@ struct drm_i915_gem_mmap_gtt {
__u64 offset;
};
+/**
+ * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
+ *
+ * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
+ * and is used to retrieve the fake offset to mmap an object specified by &handle.
+ *
+ * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
+ * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
+ * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
+ */
struct drm_i915_gem_mmap_offset {
- /** Handle for the object being mapped. */
+ /** @handle: Handle for the object being mapped. */
__u32 handle;
+ /** @pad: Must be zero */
__u32 pad;
/**
- * Fake offset to use for subsequent mmap call
+ * @offset: The fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
/**
- * Flags for extended behaviour.
+ * @flags: Flags for extended behaviour.
+ *
+ * It is mandatory that one of the `MMAP_OFFSET` types
+ * should be included:
+ *
+ * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
+ * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
+ * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
+ * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
*
- * It is mandatory that one of the MMAP_OFFSET types
- * (GTT, WC, WB, UC, etc) should be included.
+ * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
+ * type. On devices without local memory, this caching mode is invalid.
+ *
+ * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
+ * be used, depending on the object placement on creation. WB will be used
+ * when the object can only exist in system memory, WC otherwise.
*/
__u64 flags;
-#define I915_MMAP_OFFSET_GTT 0
-#define I915_MMAP_OFFSET_WC 1
-#define I915_MMAP_OFFSET_WB 2
-#define I915_MMAP_OFFSET_UC 3
- /*
- * Zero-terminated chain of extensions.
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+#define I915_MMAP_OFFSET_FIXED 4
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
*
* No current extensions defined; mbz.
*/
__u64 extensions;
};
+/**
+ * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
+ * preparation for accessing the pages via some CPU domain.
+ *
+ * Specifying a new write or read domain will flush the object out of the
+ * previous domain(if required), before then updating the objects domain
+ * tracking with the new domain.
+ *
+ * Note this might involve waiting for the object first if it is still active on
+ * the GPU.
+ *
+ * Supported values for @read_domains and @write_domain:
+ *
+ * - I915_GEM_DOMAIN_WC: Uncached write-combined domain
+ * - I915_GEM_DOMAIN_CPU: CPU cache domain
+ * - I915_GEM_DOMAIN_GTT: Mappable aperture domain
+ *
+ * All other domains are rejected.
+ *
+ * Note that for discrete, starting from DG1, this is no longer supported, and
+ * is instead rejected. On such platforms the CPU domain is effectively static,
+ * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
+ * which can't be set explicitly and instead depends on the object placements,
+ * as per the below.
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ */
struct drm_i915_gem_set_domain {
- /** Handle for the object */
+ /** @handle: Handle for the object. */
__u32 handle;
- /** New read domains */
+ /** @read_domains: New read domains. */
__u32 read_domains;
- /** New write domain */
+ /**
+ * @write_domain: New write domain.
+ *
+ * Note that having something in the write domain implies it's in the
+ * read domain, and only that read domain.
+ */
__u32 write_domain;
};
@@ -936,6 +1147,7 @@ struct drm_i915_gem_exec_object {
__u64 offset;
};
+/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
@@ -982,10 +1194,16 @@ struct drm_i915_gem_exec_object2 {
/**
* When the EXEC_OBJECT_PINNED flag is specified this is populated by
* the user with the GTT offset at which this object will be pinned.
+ *
* When the I915_EXEC_NO_RELOC flag is specified this must contain the
* presumed_offset of the object.
+ *
* During execbuffer2 the kernel populates it with the value of the
* current GTT offset of the object, for future presumed_offset writes.
+ *
+ * See struct drm_i915_gem_create_ext for the rules when dealing with
+ * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
+ * minimum page sizes, like DG2.
*/
__u64 offset;
@@ -1034,38 +1252,119 @@ struct drm_i915_gem_exec_object2 {
__u64 rsvd2;
};
+/**
+ * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
+ * ioctl.
+ *
+ * The request will wait for input fence to signal before submission.
+ *
+ * The returned output fence will be signaled after the completion of the
+ * request.
+ */
struct drm_i915_gem_exec_fence {
- /**
- * User's handle for a drm_syncobj to wait on or signal.
- */
+ /** @handle: User's handle for a drm_syncobj to wait on or signal. */
__u32 handle;
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_EXEC_FENCE_WAIT:
+ * Wait for the input fence before request submission.
+ *
+ * I915_EXEC_FENCE_SIGNAL:
+ * Return request completion fence as output
+ */
+ __u32 flags;
#define I915_EXEC_FENCE_WAIT (1<<0)
#define I915_EXEC_FENCE_SIGNAL (1<<1)
#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
- __u32 flags;
};
-struct drm_i915_gem_execbuffer2 {
+/**
+ * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
+ * for execbuf ioctl.
+ *
+ * This structure describes an array of drm_syncobj and associated points for
+ * timeline variants of drm_syncobj. It is invalid to append this structure to
+ * the execbuf if I915_EXEC_FENCE_ARRAY is set.
+ */
+struct drm_i915_gem_execbuffer_ext_timeline_fences {
+#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /**
+ * @fence_count: Number of elements in the @handles_ptr & @value_ptr
+ * arrays.
+ */
+ __u64 fence_count;
+
/**
- * List of gem_exec_object2 structs
+ * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
+ * of length @fence_count.
*/
+ __u64 handles_ptr;
+
+ /**
+ * @values_ptr: Pointer to an array of u64 values of length
+ * @fence_count.
+ * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+ * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+ * binary one.
+ */
+ __u64 values_ptr;
+};
+
+/**
+ * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
+ * ioctl.
+ */
+struct drm_i915_gem_execbuffer2 {
+ /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
__u64 buffers_ptr;
+
+ /** @buffer_count: Number of elements in @buffers_ptr array */
__u32 buffer_count;
- /** Offset in the batchbuffer to start execution from. */
+ /**
+ * @batch_start_offset: Offset in the batchbuffer to start execution
+ * from.
+ */
__u32 batch_start_offset;
- /** Bytes used in batchbuffer from batch_start_offset */
+
+ /**
+ * @batch_len: Length in bytes of the batch buffer, starting from the
+ * @batch_start_offset. If 0, length is assumed to be the batch buffer
+ * object size.
+ */
__u32 batch_len;
+
+ /** @DR1: deprecated */
__u32 DR1;
+
+ /** @DR4: deprecated */
__u32 DR4;
+
+ /** @num_cliprects: See @cliprects_ptr */
__u32 num_cliprects;
+
/**
- * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
- * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
- * struct drm_i915_gem_exec_fence *fences.
+ * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
+ *
+ * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
+ * I915_EXEC_USE_EXTENSIONS flags are not set.
+ *
+ * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
+ * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
+ * array.
+ *
+ * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
+ * single &i915_user_extension and num_cliprects is 0.
*/
__u64 cliprects_ptr;
+
+ /** @flags: Execbuf flags */
+ __u64 flags;
#define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
@@ -1083,10 +1382,6 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
- __u64 flags;
- __u64 rsvd1; /* now used for context info */
- __u64 rsvd2;
-};
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
@@ -1181,7 +1476,30 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_FENCE_SUBMIT (1 << 20)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
+/*
+ * Setting I915_EXEC_USE_EXTENSIONS implies that
+ * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
+ * list of i915_user_extension. Each i915_user_extension node is the base of a
+ * larger structure. The list of supported structures are listed in the
+ * drm_i915_gem_execbuffer_ext enum.
+ */
+#define I915_EXEC_USE_EXTENSIONS (1 << 21)
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
+
+ /** @rsvd1: Context id */
+ __u64 rsvd1;
+
+ /**
+ * @rsvd2: in and out sync_file file descriptors.
+ *
+ * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
+ * lower 32 bits of this field will have the in sync_file fd (input).
+ *
+ * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
+ * field will have the out sync_file fd (output).
+ */
+ __u64 rsvd2;
+};
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1245,12 +1563,11 @@ struct drm_i915_gem_busy {
* reading from the object simultaneously.
*
* The value of each engine class is the same as specified in the
- * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
+ * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
* I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
- * reported as active itself. Some hardware may have parallel
- * execution engines, e.g. multiple media engines, which are
- * mapped to the same class identifier and so are not separately
- * reported for busyness.
+ * Some hardware may have parallel execution engines, e.g. multiple
+ * media engines, which are mapped to the same class identifier and so
+ * are not separately reported for busyness.
*
* Caveat emptor:
* Only the boolean result of this query is reliable; that is whether
@@ -1261,49 +1578,91 @@ struct drm_i915_gem_busy {
};
/**
- * I915_CACHING_NONE
- *
- * GPU access is not coherent with cpu caches. Default for machines without an
- * LLC.
- */
-#define I915_CACHING_NONE 0
-/**
- * I915_CACHING_CACHED
- *
- * GPU access is coherent with cpu caches and furthermore the data is cached in
- * last-level caches shared between cpu cores and the gpu GT. Default on
- * machines with HAS_LLC.
- */
-#define I915_CACHING_CACHED 1
-/**
- * I915_CACHING_DISPLAY
- *
- * Special GPU caching mode which is coherent with the scanout engines.
- * Transparently falls back to I915_CACHING_NONE on platforms where no special
- * cache mode (like write-through or gfdt flushing) is available. The kernel
- * automatically sets this mode when using a buffer as a scanout target.
- * Userspace can manually set this mode to avoid a costly stall and clflush in
- * the hotpath of drawing the first frame.
+ * struct drm_i915_gem_caching - Set or get the caching for given object
+ * handle.
+ *
+ * Allow userspace to control the GTT caching bits for a given object when the
+ * object is later mapped through the ppGTT(or GGTT on older platforms lacking
+ * ppGTT support, or if the object is used for scanout). Note that this might
+ * require unbinding the object from the GTT first, if its current caching value
+ * doesn't match.
+ *
+ * Note that this all changes on discrete platforms, starting from DG1, the
+ * set/get caching is no longer supported, and is now rejected. Instead the CPU
+ * caching attributes(WB vs WC) will become an immutable creation time property
+ * for the object, along with the GTT caching level. For now we don't expose any
+ * new uAPI for this, instead on DG1 this is all implicit, although this largely
+ * shouldn't matter since DG1 is coherent by default(without any way of
+ * controlling it).
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ *
+ * Side note: Part of the reason for this is that changing the at-allocation-time CPU
+ * caching attributes for the pages might be required(and is expensive) if we
+ * need to then CPU map the pages later with different caching attributes. This
+ * inconsistent caching behaviour, while supported on x86, is not universally
+ * supported on other architectures. So for simplicity we opt for setting
+ * everything at creation time, whilst also making it immutable, on discrete
+ * platforms.
*/
-#define I915_CACHING_DISPLAY 2
-
struct drm_i915_gem_caching {
/**
- * Handle of the buffer to set/get the caching level of. */
+ * @handle: Handle of the buffer to set/get the caching level.
+ */
__u32 handle;
/**
- * Cacheing level to apply or return value
+ * @caching: The GTT caching level to apply or possible return value.
+ *
+ * The supported @caching values:
+ *
+ * I915_CACHING_NONE:
+ *
+ * GPU access is not coherent with CPU caches. Default for machines
+ * without an LLC. This means manual flushing might be needed, if we
+ * want GPU access to be coherent.
+ *
+ * I915_CACHING_CACHED:
+ *
+ * GPU access is coherent with CPU caches and furthermore the data is
+ * cached in last-level caches shared between CPU cores and the GPU GT.
+ *
+ * I915_CACHING_DISPLAY:
*
- * bits0-15 are for generic caching control (i.e. the above defined
- * values). bits16-31 are reserved for platform-specific variations
- * (e.g. l3$ caching on gen7). */
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no
+ * special cache mode (like write-through or gfdt flushing) is
+ * available. The kernel automatically sets this mode when using a
+ * buffer as a scanout target. Userspace can manually set this mode to
+ * avoid a costly stall and clflush in the hotpath of drawing the first
+ * frame.
+ */
+#define I915_CACHING_NONE 0
+#define I915_CACHING_CACHED 1
+#define I915_CACHING_DISPLAY 2
__u32 caching;
};
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
+/*
+ * Do not add new tiling types here. The I915_TILING_* values are for
+ * de-tiling fence registers that no longer exist on modern platforms. Although
+ * the hardware may support new types of tiling in general (e.g., Tile4), we
+ * do not need to add them to the uapi that is specific to now-defunct ioctls.
+ */
#define I915_TILING_LAST I915_TILING_Y
#define I915_BIT_6_SWIZZLE_NONE 0
@@ -1521,21 +1880,64 @@ struct drm_i915_gem_context_create {
__u32 pad;
};
+/**
+ * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
+ */
struct drm_i915_gem_context_create_ext {
- __u32 ctx_id; /* output: id of new context*/
+ /** @ctx_id: Id of the created context (output) */
+ __u32 ctx_id;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
+ *
+ * Extensions may be appended to this structure and driver must check
+ * for those. See @extensions.
+ *
+ * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
+ *
+ * Created context will have single timeline.
+ */
__u32 flags;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
+ *
+ * I915_CONTEXT_CREATE_EXT_SETPARAM:
+ * Context parameter to set or query during context creation.
+ * See struct drm_i915_gem_context_create_ext_setparam.
+ *
+ * I915_CONTEXT_CREATE_EXT_CLONE:
+ * This extension has been removed. On the off chance someone somewhere
+ * has attempted to use it, never re-use this extension number.
+ */
__u64 extensions;
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
};
+/**
+ * struct drm_i915_gem_context_param - Context parameter to set or query.
+ */
struct drm_i915_gem_context_param {
+ /** @ctx_id: Context id */
__u32 ctx_id;
+
+ /** @size: Size of the parameter @value */
__u32 size;
+
+ /** @param: Parameter to set or query */
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance
+ * someone somewhere has attempted to use it, never re-use this context
+ * param number.
+ */
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
@@ -1602,6 +2004,7 @@ struct drm_i915_gem_context_param {
* Extensions:
* i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
*/
#define I915_CONTEXT_PARAM_ENGINES 0xa
@@ -1619,12 +2022,67 @@ struct drm_i915_gem_context_param {
* By default, new contexts allow persistence.
*/
#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
+
+/* This API has been removed. On the off chance someone somewhere has
+ * attempted to use it, never re-use this context param number.
+ */
+#define I915_CONTEXT_PARAM_RINGSIZE 0xc
+
+/*
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ *
+ * Mark that the context makes use of protected content, which will result
+ * in the context being invalidated when the protected content session is.
+ * Given that the protected content session is killed on suspend, the device
+ * is kept awake for the lifetime of a protected context, so the user should
+ * make sure to dispose of them once done.
+ * This flag can only be set at context creation time and, when set to true,
+ * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
+ * to false. This flag can't be set to true in conjunction with setting the
+ * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_context_create_ext_setparam p_protected = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
+ * .value = 1,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_norecover = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * .next_extension = to_user_pointer(&p_protected),
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_RECOVERABLE,
+ * .value = 0,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_norecover);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * In addition to the normal failure cases, setting this flag during context
+ * creation can result in the following errors:
+ *
+ * -ENODEV: feature not available
+ * -EPERM: trying to mark a recoverable or not bannable context as protected
+ */
+#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/* Must be kept compact -- no holes and well documented */
+ /** @value: Context parameter value to be set or queried */
__u64 value;
};
-/**
+/*
* Context SSEU programming
*
* It may be necessary for either functional or performance reason to configure
@@ -1683,6 +2141,69 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
+/**
+ * DOC: Virtual Engine uAPI
+ *
+ * Virtual engine is a concept where userspace is able to configure a set of
+ * physical engines, submit a batch buffer, and let the driver execute it on any
+ * engine from the set as it sees fit.
+ *
+ * This is primarily useful on parts which have multiple instances of a same
+ * class engine, like for example GT3+ Skylake parts with their two VCS engines.
+ *
+ * For instance userspace can enumerate all engines of a certain class using the
+ * previously described `Engine Discovery uAPI`_. After that userspace can
+ * create a GEM context with a placeholder slot for the virtual engine (using
+ * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
+ * and instance respectively) and finally using the
+ * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
+ * the same reserved slot.
+ *
+ * Example of creating a virtual engine and submitting a batch buffer to it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
+ * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
+ * .engine_index = 0, // Place this virtual engine into engine map slot 0
+ * .num_siblings = 2,
+ * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
+ * { I915_ENGINE_CLASS_VIDEO, 1 }, },
+ * };
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+ * .engines = { { I915_ENGINE_CLASS_INVALID,
+ * I915_ENGINE_CLASS_INVALID_NONE } },
+ * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // Now we have created a GEM context with its engine map containing a
+ * // single virtual engine. Submissions to this slot can go either to
+ * // vcs0 or vcs1, depending on the load balancing algorithm used inside
+ * // the driver. The load balancing is dynamic from one batch buffer to
+ * // another and transparent to userspace.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0 which is the virtual engine
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
/*
* i915_context_engines_load_balance:
*
@@ -1708,7 +2229,7 @@ struct i915_context_engines_load_balance {
__u64 mbz64; /* reserved for future use; must be zero */
- struct i915_engine_class_instance engines[0];
+ struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
@@ -1746,7 +2267,7 @@ struct i915_context_engines_bond {
__u64 flags; /* all undefined flags must be zero */
__u64 mbz64[4]; /* reserved for future use; must be zero */
- struct i915_engine_class_instance engines[0];
+ struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
@@ -1759,10 +2280,195 @@ struct i915_context_engines_bond {
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
+/**
+ * struct i915_context_engines_parallel_submit - Configure engine for
+ * parallel submission.
+ *
+ * Setup a slot in the context engine map to allow multiple BBs to be submitted
+ * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
+ * in parallel. Multiple hardware contexts are created internally in the i915 to
+ * run these BBs. Once a slot is configured for N BBs only N BBs can be
+ * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
+ * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
+ * many BBs there are based on the slot's configuration. The N BBs are the last
+ * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
+ *
+ * The default placement behavior is to create implicit bonds between each
+ * context if each context maps to more than 1 physical engine (e.g. context is
+ * a virtual engine). Also we only allow contexts of same engine class and these
+ * contexts must be in logically contiguous order. Examples of the placement
+ * behavior are described below. Lastly, the default is to not allow BBs to be
+ * preempted mid-batch. Rather insert coordinated preemption points on all
+ * hardware contexts between each set of BBs. Flags could be added in the future
+ * to change both of these default behaviors.
+ *
+ * Returns -EINVAL if hardware context placement configuration is invalid or if
+ * the placement configuration isn't supported on the platform / submission
+ * interface.
+ * Returns -ENODEV if extension isn't supported on the platform / submission
+ * interface.
+ *
+ * .. code-block:: none
+ *
+ * Examples syntax:
+ * CS[X] = generic engine of same class, logical instance X
+ * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *
+ * Example 1 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=1,
+ * engines=CS[0],CS[1])
+ *
+ * Results in the following valid placement:
+ * CS[0], CS[1]
+ *
+ * Example 2 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[2],CS[1],CS[3])
+ *
+ * Results in the following valid placements:
+ * CS[0], CS[1]
+ * CS[2], CS[3]
+ *
+ * This can be thought of as two virtual engines, each containing two
+ * engines thereby making a 2D array. However, there are bonds tying the
+ * entries together and placing restrictions on how they can be scheduled.
+ * Specifically, the scheduler can choose only vertical columns from the 2D
+ * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
+ * scheduler wants to submit to CS[0], it must also choose CS[1] and vice
+ * versa. Same for CS[2] requires also using CS[3].
+ * VE[0] = CS[0], CS[2]
+ * VE[1] = CS[1], CS[3]
+ *
+ * Example 3 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[1],CS[1],CS[3])
+ *
+ * Results in the following valid and invalid placements:
+ * CS[0], CS[1]
+ * CS[1], CS[3] - Not logically contiguous, return -EINVAL
+ */
+struct i915_context_engines_parallel_submit {
+ /**
+ * @base: base user extension.
+ */
+ struct i915_user_extension base;
+
+ /**
+ * @engine_index: slot for parallel engine
+ */
+ __u16 engine_index;
+
+ /**
+ * @width: number of contexts per parallel engine or in other words the
+ * number of batches in each submission
+ */
+ __u16 width;
+
+ /**
+ * @num_siblings: number of siblings per context or in other words the
+ * number of possible placements for each submission
+ */
+ __u16 num_siblings;
+
+ /**
+ * @mbz16: reserved for future use; must be zero
+ */
+ __u16 mbz16;
+
+ /**
+ * @flags: all undefined flags must be zero, currently not defined flags
+ */
+ __u64 flags;
+
+ /**
+ * @mbz64: reserved for future use; must be zero
+ */
+ __u64 mbz64[3];
+
+ /**
+ * @engines: 2-d array of engine instances to configure parallel engine
+ *
+ * length = width (i) * num_siblings (j)
+ * index = j + i * num_siblings
+ */
+ struct i915_engine_class_instance engines[];
+
+} __packed;
+
+#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 width; \
+ __u16 num_siblings; \
+ __u16 mbz16; \
+ __u64 flags; \
+ __u64 mbz64[3]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/**
+ * DOC: Context Engine Map uAPI
+ *
+ * Context engine map is a new way of addressing engines when submitting batch-
+ * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
+ * inside the flags field of `struct drm_i915_gem_execbuffer2`.
+ *
+ * To use it created GEM contexts need to be configured with a list of engines
+ * the user is intending to submit to. This is accomplished using the
+ * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
+ * i915_context_param_engines`.
+ *
+ * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
+ * configured map.
+ *
+ * Example of creating such context and submitting against it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
+ * .engines = { { I915_ENGINE_CLASS_RENDER, 0 },
+ * { I915_ENGINE_CLASS_COPY, 0 } }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // We have now created a GEM context with two engines in the map:
+ * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines
+ * // will not be accessible from this context.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
struct i915_context_param_engines {
__u64 extensions; /* linked chain of extension blocks, 0 terminates */
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
struct i915_engine_class_instance engines[0];
} __attribute__((packed));
@@ -1771,25 +2477,19 @@ struct i915_context_param_engines {
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
+/**
+ * struct drm_i915_gem_context_create_ext_setparam - Context parameter
+ * to set or query during context creation.
+ */
struct drm_i915_gem_context_create_ext_setparam {
-#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+ /** @base: Extension link. See struct i915_user_extension. */
struct i915_user_extension base;
- struct drm_i915_gem_context_param param;
-};
-struct drm_i915_gem_context_create_ext_clone {
-#define I915_CONTEXT_CREATE_EXT_CLONE 1
- struct i915_user_extension base;
- __u32 clone_id;
- __u32 flags;
-#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
-#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
-#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
-#define I915_CONTEXT_CLONE_SSEU (1u << 3)
-#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
-#define I915_CONTEXT_CLONE_VM (1u << 5)
-#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
- __u64 rsvd;
+ /**
+ * @param: Context parameter to set or query.
+ * See struct drm_i915_gem_context_param.
+ */
+ struct drm_i915_gem_context_param param;
};
struct drm_i915_gem_context_destroy {
@@ -1797,7 +2497,9 @@ struct drm_i915_gem_context_destroy {
__u32 pad;
};
-/*
+/**
+ * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
+ *
* DRM_I915_GEM_VM_CREATE -
*
* Create a new virtual memory address space (ppGTT) for use within a context
@@ -1807,20 +2509,23 @@ struct drm_i915_gem_context_destroy {
* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
* returned in the outparam @id.
*
- * No flags are defined, with all bits reserved and must be zero.
- *
* An extension chain maybe provided, starting with @extensions, and terminated
* by the @next_extension being 0. Currently, no extensions are defined.
*
* DRM_I915_GEM_VM_DESTROY -
*
- * Destroys a previously created VM id, specified in @id.
+ * Destroys a previously created VM id, specified in @vm_id.
*
* No extensions or flags are allowed currently, and so must be zero.
*/
struct drm_i915_gem_vm_control {
+ /** @extensions: Zero-terminated chain of extensions. */
__u64 extensions;
+
+ /** @flags: reserved for future usage, currently MBZ */
__u32 flags;
+
+ /** @vm_id: Id of the VM created or to be destroyed */
__u32 vm_id;
};
@@ -1862,14 +2567,69 @@ struct drm_i915_reset_stats {
__u32 pad;
};
+/**
+ * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
+ *
+ * Userptr objects have several restrictions on what ioctls can be used with the
+ * object handle.
+ */
struct drm_i915_gem_userptr {
+ /**
+ * @user_ptr: The pointer to the allocated memory.
+ *
+ * Needs to be aligned to PAGE_SIZE.
+ */
__u64 user_ptr;
+
+ /**
+ * @user_size:
+ *
+ * The size in bytes for the allocated memory. This will also become the
+ * object size.
+ *
+ * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
+ * or larger.
+ */
__u64 user_size;
+
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * I915_USERPTR_READ_ONLY:
+ *
+ * Mark the object as readonly, this also means GPU access can only be
+ * readonly. This is only supported on HW which supports readonly access
+ * through the GTT. If the HW can't support readonly access, an error is
+ * returned.
+ *
+ * I915_USERPTR_PROBE:
+ *
+ * Probe the provided @user_ptr range and validate that the @user_ptr is
+ * indeed pointing to normal memory and that the range is also valid.
+ * For example if some garbage address is given to the kernel, then this
+ * should complain.
+ *
+ * Returns -EFAULT if the probe failed.
+ *
+ * Note that this doesn't populate the backing pages, and also doesn't
+ * guarantee that the object will remain valid when the object is
+ * eventually used.
+ *
+ * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
+ * returns a non-zero value.
+ *
+ * I915_USERPTR_UNSYNCHRONIZED:
+ *
+ * NOT USED. Setting this flag will result in an error.
+ */
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_PROBE 0x2
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
- * Returned handle for the object.
+ * @handle: Returned handle for the object.
*
* Object handles are nonzero.
*/
@@ -1913,7 +2673,7 @@ enum drm_i915_perf_property_id {
/**
* The value specifies which set of OA unit metrics should be
- * be configured, defining the contents of any OA unit reports.
+ * configured, defining the contents of any OA unit reports.
*
* This property is available in perf revision 1.
*/
@@ -1948,6 +2708,30 @@ enum drm_i915_perf_property_id {
*/
DRM_I915_PERF_PROP_HOLD_PREEMPTION,
+ /**
+ * Specifying this pins all contexts to the specified SSEU power
+ * configuration for the duration of the recording.
+ *
+ * This parameter's value is a pointer to a struct
+ * drm_i915_gem_context_param_sseu.
+ *
+ * This property is available in perf revision 4.
+ */
+ DRM_I915_PERF_PROP_GLOBAL_SSEU,
+
+ /**
+ * This optional parameter specifies the timer interval in nanoseconds
+ * at which the i915 driver will check the OA buffer for available data.
+ * Minimum allowed value is 100 microseconds. A default value is used by
+ * the driver if this parameter is not specified. Note that larger timer
+ * values will reduce cpu consumption during OA perf captures. However,
+ * excessively large values would potentially result in OA buffer
+ * overwrites as captures reach end of the OA buffer.
+ *
+ * This property is available in perf revision 5.
+ */
+ DRM_I915_PERF_PROP_POLL_OA_PERIOD,
+
DRM_I915_PERF_PROP_MAX /* non-ABI */
};
@@ -1967,7 +2751,7 @@ struct drm_i915_perf_open_param {
__u64 properties_ptr;
};
-/**
+/*
* Enable data capture for a stream that was either opened in a disabled state
* via I915_PERF_FLAG_DISABLED or was later disabled via
* I915_PERF_IOCTL_DISABLE.
@@ -1981,7 +2765,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
-/**
+/*
* Disable data capture for a stream.
*
* It is an error to try and read a stream that is disabled.
@@ -1990,7 +2774,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
-/**
+/*
* Change metrics_set captured by a stream.
*
* If the stream is bound to a specific context, the configuration change
@@ -2003,7 +2787,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
-/**
+/*
* Common to all i915 perf records
*/
struct drm_i915_perf_record_header {
@@ -2052,162 +2836,382 @@ enum drm_i915_perf_record_type {
};
/**
+ * struct drm_i915_perf_oa_config
+ *
* Structure to upload perf dynamic configuration into the kernel.
*/
struct drm_i915_perf_oa_config {
- /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+ /**
+ * @uuid:
+ *
+ * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
+ */
char uuid[36];
+ /**
+ * @n_mux_regs:
+ *
+ * Number of mux regs in &mux_regs_ptr.
+ */
__u32 n_mux_regs;
+
+ /**
+ * @n_boolean_regs:
+ *
+ * Number of boolean regs in &boolean_regs_ptr.
+ */
__u32 n_boolean_regs;
+
+ /**
+ * @n_flex_regs:
+ *
+ * Number of flex regs in &flex_regs_ptr.
+ */
__u32 n_flex_regs;
- /*
- * These fields are pointers to tuples of u32 values (register address,
- * value). For example the expected length of the buffer pointed by
- * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ /**
+ * @mux_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_mux_regs).
*/
__u64 mux_regs_ptr;
+
+ /**
+ * @boolean_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_boolean_regs).
+ */
__u64 boolean_regs_ptr;
+
+ /**
+ * @flex_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_flex_regs).
+ */
__u64 flex_regs_ptr;
};
+/**
+ * struct drm_i915_query_item - An individual query for the kernel to process.
+ *
+ * The behaviour is determined by the @query_id. Note that exactly what
+ * @data_ptr is also depends on the specific @query_id.
+ */
struct drm_i915_query_item {
+ /**
+ * @query_id:
+ *
+ * The id for this query. Currently accepted query IDs are:
+ * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
+ * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
+ * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
+ * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
+ * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
+ * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
+ */
__u64 query_id;
-#define DRM_I915_QUERY_TOPOLOGY_INFO 1
-#define DRM_I915_QUERY_ENGINE_INFO 2
-#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_MEMORY_REGIONS 4
+#define DRM_I915_QUERY_HWCONFIG_BLOB 5
+#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
/* Must be kept compact -- no holes and well documented */
- /*
+ /**
+ * @length:
+ *
* When set to zero by userspace, this is filled with the size of the
- * data to be written at the data_ptr pointer. The kernel sets this
+ * data to be written at the @data_ptr pointer. The kernel sets this
* value to a negative value to signal an error on a particular query
* item.
*/
__s32 length;
- /*
- * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ /**
+ * @flags:
+ *
+ * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ *
+ * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * following:
*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
- * following :
- * - DRM_I915_QUERY_PERF_CONFIG_LIST
- * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
- * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ *
+ * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
+ * a struct i915_engine_class_instance that references a render engine.
*/
__u32 flags;
#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
- /*
- * Data will be written at the location pointed by data_ptr when the
- * value of length matches the length of the data to be written by the
+ /**
+ * @data_ptr:
+ *
+ * Data will be written at the location pointed by @data_ptr when the
+ * value of @length matches the length of the data to be written by the
* kernel.
*/
__u64 data_ptr;
};
+/**
+ * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
+ * kernel to fill out.
+ *
+ * Note that this is generally a two step process for each struct
+ * drm_i915_query_item in the array:
+ *
+ * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
+ * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
+ * kernel will then fill in the size, in bytes, which tells userspace how
+ * memory it needs to allocate for the blob(say for an array of properties).
+ *
+ * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
+ * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
+ * the &drm_i915_query_item.length should still be the same as what the
+ * kernel previously set. At this point the kernel can fill in the blob.
+ *
+ * Note that for some query items it can make sense for userspace to just pass
+ * in a buffer/blob equal to or larger than the required size. In this case only
+ * a single ioctl call is needed. For some smaller query items this can work
+ * quite well.
+ *
+ */
struct drm_i915_query {
+ /** @num_items: The number of elements in the @items_ptr array */
__u32 num_items;
- /*
- * Unused for now. Must be cleared to zero.
+ /**
+ * @flags: Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * This points to an array of num_items drm_i915_query_item structures.
+ /**
+ * @items_ptr:
+ *
+ * Pointer to an array of struct drm_i915_query_item. The number of
+ * array elements is @num_items.
*/
__u64 items_ptr;
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
- *
- * data: contains the 3 pieces of information :
- *
- * - the slice mask with one bit per slice telling whether a slice is
- * available. The availability of slice X can be queried with the following
- * formula :
- *
- * (data[X / 8] >> (X % 8)) & 1
- *
- * - the subslice mask for each slice with one bit per subslice telling
- * whether a subslice is available. Gen12 has dual-subslices, which are
- * similar to two gen11 subslices. For gen12, this array represents dual-
- * subslices. The availability of subslice Y in slice X can be queried
- * with the following formula :
- *
- * (data[subslice_offset +
- * X * subslice_stride +
- * Y / 8] >> (Y % 8)) & 1
- *
- * - the EU mask for each subslice in each slice with one bit per EU telling
- * whether an EU is available. The availability of EU Z in subslice Y in
- * slice X can be queried with the following formula :
+/**
+ * struct drm_i915_query_topology_info
*
- * (data[eu_offset +
- * (X * max_subslices + Y) * eu_stride +
- * Z / 8] >> (Z % 8)) & 1
+ * Describes slice/subslice/EU information queried by
+ * %DRM_I915_QUERY_TOPOLOGY_INFO
*/
struct drm_i915_query_topology_info {
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u16 flags;
+ /**
+ * @max_slices:
+ *
+ * The number of bits used to express the slice mask.
+ */
__u16 max_slices;
+
+ /**
+ * @max_subslices:
+ *
+ * The number of bits used to express the subslice mask.
+ */
__u16 max_subslices;
+
+ /**
+ * @max_eus_per_subslice:
+ *
+ * The number of bits in the EU mask that correspond to a single
+ * subslice's EUs.
+ */
__u16 max_eus_per_subslice;
- /*
+ /**
+ * @subslice_offset:
+ *
* Offset in data[] at which the subslice masks are stored.
*/
__u16 subslice_offset;
- /*
+ /**
+ * @subslice_stride:
+ *
* Stride at which each of the subslice masks for each slice are
* stored.
*/
__u16 subslice_stride;
- /*
+ /**
+ * @eu_offset:
+ *
* Offset in data[] at which the EU masks are stored.
*/
__u16 eu_offset;
- /*
+ /**
+ * @eu_stride:
+ *
* Stride at which each of the EU masks for each subslice are stored.
*/
__u16 eu_stride;
+ /**
+ * @data:
+ *
+ * Contains 3 pieces of information :
+ *
+ * - The slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the
+ * following formula :
+ *
+ * .. code:: c
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * Starting with Xe_HP platforms, Intel hardware no longer has
+ * traditional slices so i915 will always report a single slice
+ * (hardcoded slicemask = 0x1) which contains all of the platform's
+ * subslices. I.e., the mask here does not reflect any of the newer
+ * hardware concepts such as "gslices" or "cslices" since userspace
+ * is capable of inferring those from the subslice mask.
+ *
+ * - The subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. Starting with Gen12 we use the
+ * term "subslice" to refer to what the hardware documentation
+ * describes as a "dual-subslices." The availability of subslice Y
+ * in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
+ *
+ * - The EU mask for each subslice in each slice, with one bit per EU
+ * telling whether an EU is available. The availability of EU Z in
+ * subslice Y in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8
+ * ] >> (Z % 8)) & 1
+ */
__u8 data[];
};
/**
+ * DOC: Engine Discovery uAPI
+ *
+ * Engine discovery uAPI is a way of enumerating physical engines present in a
+ * GPU associated with an open i915 DRM file descriptor. This supersedes the old
+ * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
+ * `I915_PARAM_HAS_BLT`.
+ *
+ * The need for this interface came starting with Icelake and newer GPUs, which
+ * started to establish a pattern of having multiple engines of a same class,
+ * where not all instances were always completely functionally equivalent.
+ *
+ * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
+ * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
+ *
+ * Example for getting the list of engines:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_engine_info *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_ENGINE_INFO;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of engines. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * //
+ * // Alternatively a large buffer can be allocated straight away enabling
+ * // querying in one pass, in which case item.length should contain the
+ * // length of the provided buffer.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with info on all engines.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each engine in the array
+ * for (i = 0; i < info->num_engines; i++) {
+ * struct drm_i915_engine_info einfo = info->engines[i];
+ * u16 class = einfo.engine.class;
+ * u16 instance = einfo.engine.instance;
+ * ....
+ * }
+ *
+ * free(info);
+ *
+ * Each of the enumerated engines, apart from being defined by its class and
+ * instance (see `struct i915_engine_class_instance`), also can have flags and
+ * capabilities defined as documented in i915_drm.h.
+ *
+ * For instance video engines which support HEVC encoding will have the
+ * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
+ *
+ * Engine discovery only fully comes to its own when combined with the new way
+ * of addressing engines when submitting batch buffers using contexts with
+ * engine maps configured.
+ */
+
+/**
* struct drm_i915_engine_info
*
* Describes one engine and it's capabilities as known to the driver.
*/
struct drm_i915_engine_info {
- /** Engine class and instance. */
+ /** @engine: Engine class and instance. */
struct i915_engine_class_instance engine;
- /** Reserved field. */
+ /** @rsvd0: Reserved field. */
__u32 rsvd0;
- /** Engine flags. */
+ /** @flags: Engine flags. */
__u64 flags;
+#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
- /** Capabilities of this engine. */
+ /** @capabilities: Capabilities of this engine. */
__u64 capabilities;
#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
- /** Reserved fields. */
- __u64 rsvd1[4];
+ /** @logical_instance: Logical instance of engine */
+ __u16 logical_instance;
+
+ /** @rsvd1: Reserved fields. */
+ __u16 rsvd1[3];
+ /** @rsvd2: Reserved fields. */
+ __u64 rsvd2[3];
};
/**
@@ -2217,66 +3221,502 @@ struct drm_i915_engine_info {
* an array of struct drm_i915_engine_info structures.
*/
struct drm_i915_query_engine_info {
- /** Number of struct drm_i915_engine_info structs following. */
+ /** @num_engines: Number of struct drm_i915_engine_info structs following. */
__u32 num_engines;
- /** MBZ */
+ /** @rsvd: MBZ */
__u32 rsvd[3];
- /** Marker for drm_i915_engine_info structures. */
+ /** @engines: Marker for drm_i915_engine_info structures. */
struct drm_i915_engine_info engines[];
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+/**
+ * struct drm_i915_query_perf_config
+ *
+ * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
+ * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
*/
struct drm_i915_query_perf_config {
union {
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
- * this fields to the number of configurations available.
+ /**
+ * @n_configs:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
+ * the number of configurations available.
*/
__u64 n_configs;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @config:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*/
__u64 config;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @uuid:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*
* String formatted like "%08x-%04x-%04x-%04x-%012x"
*/
char uuid[36];
};
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
- * write an array of __u64 of configuration identifiers.
+ /**
+ * @data:
*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
- * write a struct drm_i915_perf_oa_config. If the following fields of
- * drm_i915_perf_oa_config are set not set to 0, i915 will write into
- * the associated pointers the values of submitted when the
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
+ * i915 will write an array of __u64 of configuration identifiers.
+ *
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
+ * i915 will write a struct drm_i915_perf_oa_config. If the following
+ * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
+ * write into the associated pointers the values of submitted when the
* configuration was created :
*
- * - n_mux_regs
- * - n_boolean_regs
- * - n_flex_regs
+ * - &drm_i915_perf_oa_config.n_mux_regs
+ * - &drm_i915_perf_oa_config.n_boolean_regs
+ * - &drm_i915_perf_oa_config.n_flex_regs
*/
__u8 data[];
};
+/**
+ * enum drm_i915_gem_memory_class - Supported memory classes
+ */
+enum drm_i915_gem_memory_class {
+ /** @I915_MEMORY_CLASS_SYSTEM: System memory */
+ I915_MEMORY_CLASS_SYSTEM = 0,
+ /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
+ I915_MEMORY_CLASS_DEVICE,
+};
+
+/**
+ * struct drm_i915_gem_memory_class_instance - Identify particular memory region
+ */
+struct drm_i915_gem_memory_class_instance {
+ /** @memory_class: See enum drm_i915_gem_memory_class */
+ __u16 memory_class;
+
+ /** @memory_instance: Which instance */
+ __u16 memory_instance;
+};
+
+/**
+ * struct drm_i915_memory_region_info - Describes one region as known to the
+ * driver.
+ *
+ * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
+ * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
+ * at &drm_i915_query_item.query_id.
+ */
+struct drm_i915_memory_region_info {
+ /** @region: The class:instance pair encoding */
+ struct drm_i915_gem_memory_class_instance region;
+
+ /** @rsvd0: MBZ */
+ __u32 rsvd0;
+
+ /**
+ * @probed_size: Memory probed by the driver
+ *
+ * Note that it should not be possible to ever encounter a zero value
+ * here, also note that no current region type will ever return -1 here.
+ * Although for future region types, this might be a possibility. The
+ * same applies to the other size fields.
+ */
+ __u64 probed_size;
+
+ /**
+ * @unallocated_size: Estimate of memory remaining
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
+ * Without this (or if this is an older kernel) the value here will
+ * always equal the @probed_size. Note this is only currently tracked
+ * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
+ * will always equal the @probed_size).
+ */
+ __u64 unallocated_size;
+
+ union {
+ /** @rsvd1: MBZ */
+ __u64 rsvd1[8];
+ struct {
+ /**
+ * @probed_cpu_visible_size: Memory probed by the driver
+ * that is CPU accessible.
+ *
+ * This will be always be <= @probed_size, and the
+ * remainder (if there is any) will not be CPU
+ * accessible.
+ *
+ * On systems without small BAR, the @probed_size will
+ * always equal the @probed_cpu_visible_size, since all
+ * of it will be CPU accessible.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the @probed_size).
+ *
+ * Note that if the value returned here is zero, then
+ * this must be an old kernel which lacks the relevant
+ * small-bar uAPI support (including
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
+ * such systems we should never actually end up with a
+ * small BAR configuration, assuming we are able to load
+ * the kernel module. Hence it should be safe to treat
+ * this the same as when @probed_cpu_visible_size ==
+ * @probed_size.
+ */
+ __u64 probed_cpu_visible_size;
+
+ /**
+ * @unallocated_cpu_visible_size: Estimate of CPU
+ * visible memory remaining.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the
+ * @probed_cpu_visible_size).
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always
+ * equal the @probed_cpu_visible_size. Note this is only
+ * currently tracked for I915_MEMORY_CLASS_DEVICE
+ * regions (for other types the value here will also
+ * always equal the @probed_cpu_visible_size).
+ *
+ * If this is an older kernel the value here will be
+ * zero, see also @probed_cpu_visible_size.
+ */
+ __u64 unallocated_cpu_visible_size;
+ };
+ };
+};
+
+/**
+ * struct drm_i915_query_memory_regions
+ *
+ * The region info query enumerates all regions known to the driver by filling
+ * in an array of struct drm_i915_memory_region_info structures.
+ *
+ * Example for getting the list of supported regions:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_memory_regions *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_MEMORY_REGIONS;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of regions. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with the all the region info.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each region in the array
+ * for (i = 0; i < info->num_regions; i++) {
+ * struct drm_i915_memory_region_info mr = info->regions[i];
+ * u16 class = mr.region.class;
+ * u16 instance = mr.region.instance;
+ *
+ * ....
+ * }
+ *
+ * free(info);
+ */
+struct drm_i915_query_memory_regions {
+ /** @num_regions: Number of supported regions */
+ __u32 num_regions;
+
+ /** @rsvd: MBZ */
+ __u32 rsvd[3];
+
+ /** @regions: Info about each supported region */
+ struct drm_i915_memory_region_info regions[];
+};
+
+/**
+ * DOC: GuC HWCONFIG blob uAPI
+ *
+ * The GuC produces a blob with information about the current device.
+ * i915 reads this blob from GuC and makes it available via this uAPI.
+ *
+ * The format and meaning of the blob content are documented in the
+ * Programmer's Reference Manual.
+ */
+
+/**
+ * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
+ * extension support using struct i915_user_extension.
+ *
+ * Note that new buffer flags should be added here, at least for the stuff that
+ * is immutable. Previously we would have two ioctls, one to create the object
+ * with gem_create, and another to apply various parameters, however this
+ * creates some ambiguity for the params which are considered immutable. Also in
+ * general we're phasing out the various SET/GET ioctls.
+ */
+struct drm_i915_gem_create_ext {
+ /**
+ * @size: Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ *
+ * DG2 64K min page size implications:
+ *
+ * On discrete platforms, starting from DG2, we have to contend with GTT
+ * page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE
+ * objects. Specifically the hardware only supports 64K or larger GTT
+ * page sizes for such memory. The kernel will already ensure that all
+ * I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page
+ * sizes underneath.
+ *
+ * Note that the returned size here will always reflect any required
+ * rounding up done by the kernel, i.e 4K will now become 64K on devices
+ * such as DG2. The kernel will always select the largest minimum
+ * page-size for the set of possible placements as the value to use when
+ * rounding up the @size.
+ *
+ * Special DG2 GTT address alignment requirement:
+ *
+ * The GTT alignment will also need to be at least 2M for such objects.
+ *
+ * Note that due to how the hardware implements 64K GTT page support, we
+ * have some further complications:
+ *
+ * 1) The entire PDE (which covers a 2MB virtual address range), must
+ * contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
+ * PDE is forbidden by the hardware.
+ *
+ * 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
+ * objects.
+ *
+ * To keep things simple for userland, we mandate that any GTT mappings
+ * must be aligned to and rounded up to 2MB. The kernel will internally
+ * pad them out to the next 2MB boundary. As this only wastes virtual
+ * address space and avoids userland having to copy any needlessly
+ * complicated PDE sharing scheme (coloring) and only affects DG2, this
+ * is deemed to be a good compromise.
+ */
+ __u64 size;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+ /**
+ * @flags: Optional flags.
+ *
+ * Supported values:
+ *
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
+ * the object will need to be accessed via the CPU.
+ *
+ * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
+ * strictly required on configurations where some subset of the device
+ * memory is directly visible/mappable through the CPU (which we also
+ * call small BAR), like on some DG2+ systems. Note that this is quite
+ * undesirable, but due to various factors like the client CPU, BIOS etc
+ * it's something we can expect to see in the wild. See
+ * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
+ * determine if this system applies.
+ *
+ * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
+ * ensure the kernel can always spill the allocation to system memory,
+ * if the object can't be allocated in the mappable part of
+ * I915_MEMORY_CLASS_DEVICE.
+ *
+ * Also note that since the kernel only supports flat-CCS on objects
+ * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
+ * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
+ * flat-CCS.
+ *
+ * Without this hint, the kernel will assume that non-mappable
+ * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
+ * kernel can still migrate the object to the mappable part, as a last
+ * resort, if userspace ever CPU faults this object, but this might be
+ * expensive, and so ideally should be avoided.
+ *
+ * On older kernels which lack the relevant small-bar uAPI support (see
+ * also &drm_i915_memory_region_info.probed_cpu_visible_size),
+ * usage of the flag will result in an error, but it should NEVER be
+ * possible to end up with a small BAR configuration, assuming we can
+ * also successfully load the i915 kernel module. In such cases the
+ * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
+ * such there are zero restrictions on where the object can be placed.
+ */
+#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
+ __u32 flags;
+
+ /**
+ * @extensions: The chain of extensions to apply to this object.
+ *
+ * This will be useful in the future when we need to support several
+ * different extensions, and we need to apply more than one when
+ * creating the object. See struct i915_user_extension.
+ *
+ * If we don't supply any extensions then we get the same old gem_create
+ * behaviour.
+ *
+ * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
+ * struct drm_i915_gem_create_ext_memory_regions.
+ *
+ * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
+ * struct drm_i915_gem_create_ext_protected_content.
+ */
+#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
+ __u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_memory_regions - The
+ * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
+ *
+ * Set the object with the desired set of placements/regions in priority
+ * order. Each entry must be unique and supported by the device.
+ *
+ * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
+ * an equivalent layout of class:instance pair encodings. See struct
+ * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
+ * query the supported regions for a device.
+ *
+ * As an example, on discrete devices, if we wish to set the placement as
+ * device local-memory we can do something like:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_memory_class_instance region_lmem = {
+ * .memory_class = I915_MEMORY_CLASS_DEVICE,
+ * .memory_instance = 0,
+ * };
+ * struct drm_i915_gem_create_ext_memory_regions regions = {
+ * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
+ * .regions = (uintptr_t)&region_lmem,
+ * .num_regions = 1,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = 16 * PAGE_SIZE,
+ * .extensions = (uintptr_t)&regions,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ *
+ * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
+ * along with the final object size in &drm_i915_gem_create_ext.size, which
+ * should account for any rounding up, if required.
+ *
+ * Note that userspace has no means of knowing the current backing region
+ * for objects where @num_regions is larger than one. The kernel will only
+ * ensure that the priority order of the @regions array is honoured, either
+ * when initially placing the object, or when moving memory around due to
+ * memory pressure
+ *
+ * On Flat-CCS capable HW, compression is supported for the objects residing
+ * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
+ * memory class in @regions and migrated (by i915, due to memory
+ * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
+ * decompress the content. But i915 doesn't have the required information to
+ * decompress the userspace compressed objects.
+ *
+ * So i915 supports Flat-CCS, on the objects which can reside only on
+ * I915_MEMORY_CLASS_DEVICE regions.
+ */
+struct drm_i915_gem_create_ext_memory_regions {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @num_regions: Number of elements in the @regions array. */
+ __u32 num_regions;
+ /**
+ * @regions: The regions/placements array.
+ *
+ * An array of struct drm_i915_gem_memory_class_instance.
+ */
+ __u64 regions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_protected_content - The
+ * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
+ *
+ * If this extension is provided, buffer contents are expected to be protected
+ * by PXP encryption and require decryption for scan out and processing. This
+ * is only possible on platforms that have PXP enabled, on all other scenarios
+ * using this extension will cause the ioctl to fail and return -ENODEV. The
+ * flags parameter is reserved for future expansion and must currently be set
+ * to zero.
+ *
+ * The buffer contents are considered invalid after a PXP session teardown.
+ *
+ * The encryption is guaranteed to be processed correctly only if the object
+ * is submitted with a context created using the
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
+ * at submission time on the validity of the objects involved.
+ *
+ * Below is an example on how to create a protected object:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_create_ext_protected_content protected_ext = {
+ * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
+ * .flags = 0,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = PAGE_SIZE,
+ * .extensions = (uintptr_t)&protected_ext,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ */
+struct drm_i915_gem_create_ext_protected_content {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+ /** @flags: reserved for future usage, currently MBZ */
+ __u32 flags;
+};
+
+/* ID of the protected content session managed by i915 when PXP is active */
+#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h
index 95a00fb867e6..1ec58d652a5a 100644
--- a/include/uapi/drm/lima_drm.h
+++ b/include/uapi/drm/lima_drm.h
@@ -32,12 +32,19 @@ struct drm_lima_get_param {
__u64 value; /* out, parameter value */
};
+/*
+ * heap buffer dynamically increase backup memory size when GP task fail
+ * due to lack of heap memory. size field of heap buffer is an up bound of
+ * the backup memory which can be set to a fairly large value.
+ */
+#define LIMA_BO_FLAG_HEAP (1 << 0)
+
/**
* create a buffer for used by GPU
*/
struct drm_lima_gem_create {
__u32 size; /* in, buffer size */
- __u32 flags; /* in, currently no flags, must be zero */
+ __u32 flags; /* in, buffer flags */
__u32 handle; /* out, GEM buffer handle */
__u32 pad; /* pad, must be zero */
};
diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h
index 8c4337548ab5..bb31567e66c0 100644
--- a/include/uapi/drm/mga_drm.h
+++ b/include/uapi/drm/mga_drm.h
@@ -279,20 +279,22 @@ typedef struct drm_mga_init {
unsigned long sarea_priv_offset;
- int chipset;
- int sgram;
+ __struct_group(/* no tag */, always32bit, /* no attrs */,
+ int chipset;
+ int sgram;
- unsigned int maccess;
+ unsigned int maccess;
- unsigned int fb_cpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
+ unsigned int fb_cpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ );
unsigned long fb_offset;
unsigned long mmio_offset;
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 0b85ed6a3710..3c7b097c4e3d 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -67,20 +67,40 @@ struct drm_msm_timespec {
__s64 tv_nsec; /* nanoseconds */
};
-#define MSM_PARAM_GPU_ID 0x01
-#define MSM_PARAM_GMEM_SIZE 0x02
-#define MSM_PARAM_CHIP_ID 0x03
-#define MSM_PARAM_MAX_FREQ 0x04
-#define MSM_PARAM_TIMESTAMP 0x05
-#define MSM_PARAM_GMEM_BASE 0x06
-#define MSM_PARAM_NR_RINGS 0x07
-#define MSM_PARAM_PP_PGTABLE 0x08 /* => 1 for per-process pagetables, else 0 */
-#define MSM_PARAM_FAULTS 0x09
+/* Below "RO" indicates a read-only param, "WO" indicates write-only, and
+ * "RW" indicates a param that can be both read (GET_PARAM) and written
+ * (SET_PARAM)
+ */
+#define MSM_PARAM_GPU_ID 0x01 /* RO */
+#define MSM_PARAM_GMEM_SIZE 0x02 /* RO */
+#define MSM_PARAM_CHIP_ID 0x03 /* RO */
+#define MSM_PARAM_MAX_FREQ 0x04 /* RO */
+#define MSM_PARAM_TIMESTAMP 0x05 /* RO */
+#define MSM_PARAM_GMEM_BASE 0x06 /* RO */
+#define MSM_PARAM_PRIORITIES 0x07 /* RO: The # of priority levels */
+#define MSM_PARAM_PP_PGTABLE 0x08 /* RO: Deprecated, always returns zero */
+#define MSM_PARAM_FAULTS 0x09 /* RO */
+#define MSM_PARAM_SUSPENDS 0x0a /* RO */
+#define MSM_PARAM_SYSPROF 0x0b /* WO: 1 preserves perfcntrs, 2 also disables suspend */
+#define MSM_PARAM_COMM 0x0c /* WO: override for task->comm */
+#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
+#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
+#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
+
+/* For backwards compat. The original support for preemption was based on
+ * a single ring per priority level so # of priority levels equals the #
+ * of rings. With drm/scheduler providing additional levels of priority,
+ * the number of priorities is greater than the # of rings. The param is
+ * renamed to better reflect this.
+ */
+#define MSM_PARAM_NR_RINGS MSM_PARAM_PRIORITIES
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
__u32 param; /* in, MSM_PARAM_x */
__u64 value; /* out (get_param) or in (set_param) */
+ __u32 len; /* zero for non-pointer params */
+ __u32 pad; /* must be zero */
};
/*
@@ -93,13 +113,12 @@ struct drm_msm_param {
/* cache modes */
#define MSM_BO_CACHED 0x00010000
#define MSM_BO_WC 0x00020000
-#define MSM_BO_UNCACHED 0x00040000
+#define MSM_BO_UNCACHED 0x00040000 /* deprecated, use MSM_BO_WC */
+#define MSM_BO_CACHED_COHERENT 0x080000
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
MSM_BO_GPU_READONLY | \
- MSM_BO_CACHED | \
- MSM_BO_WC | \
- MSM_BO_UNCACHED)
+ MSM_BO_CACHE_MASK)
struct drm_msm_gem_new {
__u64 size; /* in */
@@ -118,6 +137,7 @@ struct drm_msm_gem_new {
#define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */
#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
+#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
struct drm_msm_gem_info {
__u32 handle; /* in */
@@ -217,26 +237,50 @@ struct drm_msm_gem_submit_bo {
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
+#define MSM_SUBMIT_SYNCOBJ_IN 0x08000000 /* enable input syncobj */
+#define MSM_SUBMIT_SYNCOBJ_OUT 0x04000000 /* enable output syncobj */
+#define MSM_SUBMIT_FENCE_SN_IN 0x02000000 /* userspace passes in seqno fence */
#define MSM_SUBMIT_FLAGS ( \
MSM_SUBMIT_NO_IMPLICIT | \
MSM_SUBMIT_FENCE_FD_IN | \
MSM_SUBMIT_FENCE_FD_OUT | \
MSM_SUBMIT_SUDO | \
+ MSM_SUBMIT_SYNCOBJ_IN | \
+ MSM_SUBMIT_SYNCOBJ_OUT | \
+ MSM_SUBMIT_FENCE_SN_IN | \
+ 0)
+
+#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
+#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
+ MSM_SUBMIT_SYNCOBJ_RESET | \
0)
+struct drm_msm_gem_submit_syncobj {
+ __u32 handle; /* in, syncobj handle. */
+ __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
+ __u64 point; /* in, timepoint for timeline syncobjs. */
+};
+
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/
struct drm_msm_gem_submit {
__u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
- __u32 fence; /* out */
+ __u32 fence; /* out (or in with MSM_SUBMIT_FENCE_SN_IN flag) */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
- __u32 queueid; /* in, submitqueue id */
+ __u32 queueid; /* in, submitqueue id */
+ __u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
+ __u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
+ __u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
+ __u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
+ __u32 syncobj_stride; /* in, stride of syncobj arrays. */
+ __u32 pad; /*in, reserved for future use, always 0. */
+
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -282,6 +326,10 @@ struct drm_msm_gem_madvise {
#define MSM_SUBMITQUEUE_FLAGS (0)
+/*
+ * The submitqueue priority should be between 0 and MSM_PARAM_PRIORITIES-1,
+ * a lower numeric value is higher priority.
+ */
struct drm_msm_submitqueue {
__u32 flags; /* in, MSM_SUBMITQUEUE_x */
__u32 prio; /* in, Priority level */
@@ -299,9 +347,7 @@ struct drm_msm_submitqueue_query {
};
#define DRM_MSM_GET_PARAM 0x00
-/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
- */
#define DRM_MSM_GEM_NEW 0x02
#define DRM_MSM_GEM_INFO 0x03
#define DRM_MSM_GEM_CPU_PREP 0x04
@@ -317,6 +363,7 @@ struct drm_msm_submitqueue_query {
#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
+#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index ec19db1eead8..9f231d40a146 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -84,14 +84,14 @@ struct drm_panfrost_wait_bo {
__s64 timeout_ns; /* absolute */
};
+/* Valid flags to pass to drm_panfrost_create_bo */
#define PANFROST_BO_NOEXEC 1
#define PANFROST_BO_HEAP 2
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
*
- * There are currently no values for the flags argument, but it may be
- * used in a future extension.
+ * The flags argument is a bit mask of PANFROST_BO_* flags.
*/
struct drm_panfrost_create_bo {
__u32 size;
@@ -171,6 +171,7 @@ enum drm_panfrost_param {
DRM_PANFROST_PARAM_JS_FEATURES15,
DRM_PANFROST_PARAM_NR_CORE_GROUPS,
DRM_PANFROST_PARAM_THREAD_TLS_ALLOC,
+ DRM_PANFROST_PARAM_AFBC_FEATURES,
};
struct drm_panfrost_get_param {
@@ -223,6 +224,57 @@ struct drm_panfrost_madvise {
__u32 retained; /* out, whether backing store still exists */
};
+/* Definitions for coredump decoding in user space */
+#define PANFROSTDUMP_MAJOR 1
+#define PANFROSTDUMP_MINOR 0
+
+#define PANFROSTDUMP_MAGIC 0x464E4150 /* PANF */
+
+#define PANFROSTDUMP_BUF_REG 0
+#define PANFROSTDUMP_BUF_BOMAP (PANFROSTDUMP_BUF_REG + 1)
+#define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1)
+#define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1)
+
+/*
+ * This structure is the native endianness of the dumping machine, tools can
+ * detect the endianness by looking at the value in 'magic'.
+ */
+struct panfrost_dump_object_header {
+ __u32 magic;
+ __u32 type;
+ __u32 file_size;
+ __u32 file_offset;
+
+ union {
+ struct {
+ __u64 jc;
+ __u32 gpu_id;
+ __u32 major;
+ __u32 minor;
+ __u64 nbos;
+ } reghdr;
+
+ struct {
+ __u32 valid;
+ __u64 iova;
+ __u32 data[2];
+ } bomap;
+
+ /*
+ * Force same size in case we want to expand the header
+ * with new fields and also keep it 512-byte aligned
+ */
+
+ __u32 sizer[496];
+ };
+};
+
+/* Registers object, an array of these */
+struct panfrost_dump_registers {
+ __u32 reg;
+ __u32 value;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index c4df3c3668b3..94cfc306d50a 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -1,24 +1,5 @@
-/*
- * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
+/* Copyright (c) 2012-2020 NVIDIA Corporation */
#ifndef _UAPI_TEGRA_DRM_H_
#define _UAPI_TEGRA_DRM_H_
@@ -29,6 +10,8 @@
extern "C" {
#endif
+/* Tegra DRM legacy UAPI. Only enabled with STAGING */
+
#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
@@ -649,8 +632,8 @@ struct drm_tegra_gem_get_flags {
#define DRM_TEGRA_SYNCPT_READ 0x02
#define DRM_TEGRA_SYNCPT_INCR 0x03
#define DRM_TEGRA_SYNCPT_WAIT 0x04
-#define DRM_TEGRA_OPEN_CHANNEL 0x05
-#define DRM_TEGRA_CLOSE_CHANNEL 0x06
+#define DRM_TEGRA_OPEN_CHANNEL 0x05
+#define DRM_TEGRA_CLOSE_CHANNEL 0x06
#define DRM_TEGRA_GET_SYNCPT 0x07
#define DRM_TEGRA_SUBMIT 0x08
#define DRM_TEGRA_GET_SYNCPT_BASE 0x09
@@ -674,6 +657,402 @@ struct drm_tegra_gem_get_flags {
#define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
#define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
+/* New Tegra DRM UAPI */
+
+/*
+ * Reported by the driver in the `capabilities` field.
+ *
+ * DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT: If set, the engine is cache coherent
+ * with regard to the system memory.
+ */
+#define DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT (1 << 0)
+
+struct drm_tegra_channel_open {
+ /**
+ * @host1x_class: [in]
+ *
+ * Host1x class of the engine that will be programmed using this
+ * channel.
+ */
+ __u32 host1x_class;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * @context: [out]
+ *
+ * Opaque identifier corresponding to the opened channel.
+ */
+ __u32 context;
+
+ /**
+ * @version: [out]
+ *
+ * Version of the engine hardware. This can be used by userspace
+ * to determine how the engine needs to be programmed.
+ */
+ __u32 version;
+
+ /**
+ * @capabilities: [out]
+ *
+ * Flags describing the hardware capabilities.
+ */
+ __u32 capabilities;
+ __u32 padding;
+};
+
+struct drm_tegra_channel_close {
+ /**
+ * @context: [in]
+ *
+ * Identifier of the channel to close.
+ */
+ __u32 context;
+ __u32 padding;
+};
+
+/*
+ * Mapping flags that can be used to influence how the mapping is created.
+ *
+ * DRM_TEGRA_CHANNEL_MAP_READ: create mapping that allows HW read access
+ * DRM_TEGRA_CHANNEL_MAP_WRITE: create mapping that allows HW write access
+ */
+#define DRM_TEGRA_CHANNEL_MAP_READ (1 << 0)
+#define DRM_TEGRA_CHANNEL_MAP_WRITE (1 << 1)
+#define DRM_TEGRA_CHANNEL_MAP_READ_WRITE (DRM_TEGRA_CHANNEL_MAP_READ | \
+ DRM_TEGRA_CHANNEL_MAP_WRITE)
+
+struct drm_tegra_channel_map {
+ /**
+ * @context: [in]
+ *
+ * Identifier of the channel to which make memory available for.
+ */
+ __u32 context;
+
+ /**
+ * @handle: [in]
+ *
+ * GEM handle of the memory to map.
+ */
+ __u32 handle;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * @mapping: [out]
+ *
+ * Identifier corresponding to the mapping, to be used for
+ * relocations or unmapping later.
+ */
+ __u32 mapping;
+};
+
+struct drm_tegra_channel_unmap {
+ /**
+ * @context: [in]
+ *
+ * Channel identifier of the channel to unmap memory from.
+ */
+ __u32 context;
+
+ /**
+ * @mapping: [in]
+ *
+ * Mapping identifier of the memory mapping to unmap.
+ */
+ __u32 mapping;
+};
+
+/* Submission */
+
+/**
+ * Specify that bit 39 of the patched-in address should be set to switch
+ * swizzling between Tegra and non-Tegra sector layout on systems that store
+ * surfaces in system memory in non-Tegra sector layout.
+ */
+#define DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT (1 << 0)
+
+struct drm_tegra_submit_buf {
+ /**
+ * @mapping: [in]
+ *
+ * Identifier of the mapping to use in the submission.
+ */
+ __u32 mapping;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * Information for relocation patching.
+ */
+ struct {
+ /**
+ * @target_offset: [in]
+ *
+ * Offset from the start of the mapping of the data whose
+ * address is to be patched into the gather.
+ */
+ __u64 target_offset;
+
+ /**
+ * @gather_offset_words: [in]
+ *
+ * Offset in words from the start of the gather data to
+ * where the address should be patched into.
+ */
+ __u32 gather_offset_words;
+
+ /**
+ * @shift: [in]
+ *
+ * Number of bits the address should be shifted right before
+ * patching in.
+ */
+ __u32 shift;
+ } reloc;
+};
+
+/**
+ * Execute `words` words of Host1x opcodes specified in the `gather_data_ptr`
+ * buffer. Each GATHER_UPTR command uses successive words from the buffer.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR 0
+/**
+ * Wait for a syncpoint to reach a value before continuing with further
+ * commands.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT 1
+/**
+ * Wait for a syncpoint to reach a value before continuing with further
+ * commands. The threshold is calculated relative to the start of the job.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE 2
+
+struct drm_tegra_submit_cmd_gather_uptr {
+ __u32 words;
+ __u32 reserved[3];
+};
+
+struct drm_tegra_submit_cmd_wait_syncpt {
+ __u32 id;
+ __u32 value;
+ __u32 reserved[2];
+};
+
+struct drm_tegra_submit_cmd {
+ /**
+ * @type: [in]
+ *
+ * Command type to execute. One of the DRM_TEGRA_SUBMIT_CMD*
+ * defines.
+ */
+ __u32 type;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ union {
+ struct drm_tegra_submit_cmd_gather_uptr gather_uptr;
+ struct drm_tegra_submit_cmd_wait_syncpt wait_syncpt;
+ __u32 reserved[4];
+ };
+};
+
+struct drm_tegra_submit_syncpt {
+ /**
+ * @id: [in]
+ *
+ * ID of the syncpoint that the job will increment.
+ */
+ __u32 id;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * @increments: [in]
+ *
+ * Number of times the job will increment this syncpoint.
+ */
+ __u32 increments;
+
+ /**
+ * @value: [out]
+ *
+ * Value the syncpoint will have once the job has completed all
+ * its specified syncpoint increments.
+ *
+ * Note that the kernel may increment the syncpoint before or after
+ * the job. These increments are not reflected in this field.
+ *
+ * If the job hangs or times out, not all of the increments may
+ * get executed.
+ */
+ __u32 value;
+};
+
+struct drm_tegra_channel_submit {
+ /**
+ * @context: [in]
+ *
+ * Identifier of the channel to submit this job to.
+ */
+ __u32 context;
+
+ /**
+ * @num_bufs: [in]
+ *
+ * Number of elements in the `bufs_ptr` array.
+ */
+ __u32 num_bufs;
+
+ /**
+ * @num_cmds: [in]
+ *
+ * Number of elements in the `cmds_ptr` array.
+ */
+ __u32 num_cmds;
+
+ /**
+ * @gather_data_words: [in]
+ *
+ * Number of 32-bit words in the `gather_data_ptr` array.
+ */
+ __u32 gather_data_words;
+
+ /**
+ * @bufs_ptr: [in]
+ *
+ * Pointer to an array of drm_tegra_submit_buf structures.
+ */
+ __u64 bufs_ptr;
+
+ /**
+ * @cmds_ptr: [in]
+ *
+ * Pointer to an array of drm_tegra_submit_cmd structures.
+ */
+ __u64 cmds_ptr;
+
+ /**
+ * @gather_data_ptr: [in]
+ *
+ * Pointer to an array of Host1x opcodes to be used by GATHER_UPTR
+ * commands.
+ */
+ __u64 gather_data_ptr;
+
+ /**
+ * @syncobj_in: [in]
+ *
+ * Handle for DRM syncobj that will be waited before submission.
+ * Ignored if zero.
+ */
+ __u32 syncobj_in;
+
+ /**
+ * @syncobj_out: [in]
+ *
+ * Handle for DRM syncobj that will have its fence replaced with
+ * the job's completion fence. Ignored if zero.
+ */
+ __u32 syncobj_out;
+
+ /**
+ * @syncpt_incr: [in,out]
+ *
+ * Information about the syncpoint the job will increment.
+ */
+ struct drm_tegra_submit_syncpt syncpt;
+};
+
+struct drm_tegra_syncpoint_allocate {
+ /**
+ * @id: [out]
+ *
+ * ID of allocated syncpoint.
+ */
+ __u32 id;
+ __u32 padding;
+};
+
+struct drm_tegra_syncpoint_free {
+ /**
+ * @id: [in]
+ *
+ * ID of syncpoint to free.
+ */
+ __u32 id;
+ __u32 padding;
+};
+
+struct drm_tegra_syncpoint_wait {
+ /**
+ * @timeout: [in]
+ *
+ * Absolute timestamp at which the wait will time out.
+ */
+ __s64 timeout_ns;
+
+ /**
+ * @id: [in]
+ *
+ * ID of syncpoint to wait on.
+ */
+ __u32 id;
+
+ /**
+ * @threshold: [in]
+ *
+ * Threshold to wait for.
+ */
+ __u32 threshold;
+
+ /**
+ * @value: [out]
+ *
+ * Value of the syncpoint upon wait completion.
+ */
+ __u32 value;
+
+ __u32 padding;
+};
+
+#define DRM_IOCTL_TEGRA_CHANNEL_OPEN DRM_IOWR(DRM_COMMAND_BASE + 0x10, struct drm_tegra_channel_open)
+#define DRM_IOCTL_TEGRA_CHANNEL_CLOSE DRM_IOWR(DRM_COMMAND_BASE + 0x11, struct drm_tegra_channel_close)
+#define DRM_IOCTL_TEGRA_CHANNEL_MAP DRM_IOWR(DRM_COMMAND_BASE + 0x12, struct drm_tegra_channel_map)
+#define DRM_IOCTL_TEGRA_CHANNEL_UNMAP DRM_IOWR(DRM_COMMAND_BASE + 0x13, struct drm_tegra_channel_unmap)
+#define DRM_IOCTL_TEGRA_CHANNEL_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + 0x14, struct drm_tegra_channel_submit)
+
+#define DRM_IOCTL_TEGRA_SYNCPOINT_ALLOCATE DRM_IOWR(DRM_COMMAND_BASE + 0x20, struct drm_tegra_syncpoint_allocate)
+#define DRM_IOCTL_TEGRA_SYNCPOINT_FREE DRM_IOWR(DRM_COMMAND_BASE + 0x21, struct drm_tegra_syncpoint_free)
+#define DRM_IOCTL_TEGRA_SYNCPOINT_WAIT DRM_IOWR(DRM_COMMAND_BASE + 0x22, struct drm_tegra_syncpoint_wait)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 1ce746e228d9..3dfc0af8756a 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -38,6 +38,9 @@ extern "C" {
#define DRM_V3D_GET_BO_OFFSET 0x05
#define DRM_V3D_SUBMIT_TFU 0x06
#define DRM_V3D_SUBMIT_CSD 0x07
+#define DRM_V3D_PERFMON_CREATE 0x08
+#define DRM_V3D_PERFMON_DESTROY 0x09
+#define DRM_V3D_PERFMON_GET_VALUES 0x0a
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -47,8 +50,75 @@ extern "C" {
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
#define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd)
+#define DRM_IOCTL_V3D_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_CREATE, \
+ struct drm_v3d_perfmon_create)
+#define DRM_IOCTL_V3D_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_DESTROY, \
+ struct drm_v3d_perfmon_destroy)
+#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
+ struct drm_v3d_perfmon_get_values)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
+#define DRM_V3D_SUBMIT_EXTENSION 0x02
+
+/* struct drm_v3d_extension - ioctl extensions
+ *
+ * Linked-list of generic extensions where the id identify which struct is
+ * pointed by ext_data. Therefore, DRM_V3D_EXT_ID_* is used on id to identify
+ * the extension type.
+ */
+struct drm_v3d_extension {
+ __u64 next;
+ __u32 id;
+#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+ __u32 flags; /* mbz */
+};
+
+/* struct drm_v3d_sem - wait/signal semaphore
+ *
+ * If binary semaphore, it only takes syncobj handle and ignores flags and
+ * point fields. Point is defined for timeline syncobj feature.
+ */
+struct drm_v3d_sem {
+ __u32 handle; /* syncobj */
+ /* rsv below, for future uses */
+ __u32 flags;
+ __u64 point; /* for timeline sem support */
+ __u64 mbz[2]; /* must be zero, rsv */
+};
+
+/* Enum for each of the V3D queues. */
+enum v3d_queue {
+ V3D_BIN,
+ V3D_RENDER,
+ V3D_TFU,
+ V3D_CSD,
+ V3D_CACHE_CLEAN,
+};
+
+/**
+ * struct drm_v3d_multi_sync - ioctl extension to add support multiples
+ * syncobjs for commands submission.
+ *
+ * When an extension of DRM_V3D_EXT_ID_MULTI_SYNC id is defined, it points to
+ * this extension to define wait and signal dependencies, instead of single
+ * in/out sync entries on submitting commands. The field flags is used to
+ * determine the stage to set wait dependencies.
+ */
+struct drm_v3d_multi_sync {
+ struct drm_v3d_extension base;
+ /* Array of wait and signal semaphores */
+ __u64 in_syncs;
+ __u64 out_syncs;
+
+ /* Number of entries */
+ __u32 in_sync_count;
+ __u32 out_sync_count;
+
+ /* set the stage (v3d_queue) to sync */
+ __u32 wait_stage;
+
+ __u32 pad; /* mbz */
+};
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -126,7 +196,16 @@ struct drm_v3d_submit_cl {
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
+ /* DRM_V3D_SUBMIT_* properties */
__u32 flags;
+
+ /* ID of the perfmon to attach to this job. 0 means no perfmon. */
+ __u32 perfmon_id;
+
+ __u32 pad;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
};
/**
@@ -195,6 +274,8 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_TFU,
DRM_V3D_PARAM_SUPPORTS_CSD,
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
+ DRM_V3D_PARAM_SUPPORTS_PERFMON,
+ DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
};
struct drm_v3d_get_param {
@@ -233,6 +314,11 @@ struct drm_v3d_submit_tfu {
__u32 in_sync;
/* Sync object to signal when the TFU job is done. */
__u32 out_sync;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
};
/* Submits a compute shader for dispatch. This job will block on any
@@ -258,6 +344,134 @@ struct drm_v3d_submit_csd {
__u32 in_sync;
/* Sync object to signal when the CSD job is done. */
__u32 out_sync;
+
+ /* ID of the perfmon to attach to this job. 0 means no perfmon. */
+ __u32 perfmon_id;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+
+ __u32 flags;
+
+ __u32 pad;
+};
+
+enum {
+ V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
+ V3D_PERFCNT_FEP_VALID_PRIMS,
+ V3D_PERFCNT_FEP_EZ_NFCLIP_QUADS,
+ V3D_PERFCNT_FEP_VALID_QUADS,
+ V3D_PERFCNT_TLB_QUADS_STENCIL_FAIL,
+ V3D_PERFCNT_TLB_QUADS_STENCILZ_FAIL,
+ V3D_PERFCNT_TLB_QUADS_STENCILZ_PASS,
+ V3D_PERFCNT_TLB_QUADS_ZERO_COV,
+ V3D_PERFCNT_TLB_QUADS_NONZERO_COV,
+ V3D_PERFCNT_TLB_QUADS_WRITTEN,
+ V3D_PERFCNT_PTB_PRIM_VIEWPOINT_DISCARD,
+ V3D_PERFCNT_PTB_PRIM_CLIP,
+ V3D_PERFCNT_PTB_PRIM_REV,
+ V3D_PERFCNT_QPU_IDLE_CYCLES,
+ V3D_PERFCNT_QPU_ACTIVE_CYCLES_VERTEX_COORD_USER,
+ V3D_PERFCNT_QPU_ACTIVE_CYCLES_FRAG,
+ V3D_PERFCNT_QPU_CYCLES_VALID_INSTR,
+ V3D_PERFCNT_QPU_CYCLES_TMU_STALL,
+ V3D_PERFCNT_QPU_CYCLES_SCOREBOARD_STALL,
+ V3D_PERFCNT_QPU_CYCLES_VARYINGS_STALL,
+ V3D_PERFCNT_QPU_IC_HIT,
+ V3D_PERFCNT_QPU_IC_MISS,
+ V3D_PERFCNT_QPU_UC_HIT,
+ V3D_PERFCNT_QPU_UC_MISS,
+ V3D_PERFCNT_TMU_TCACHE_ACCESS,
+ V3D_PERFCNT_TMU_TCACHE_MISS,
+ V3D_PERFCNT_VPM_VDW_STALL,
+ V3D_PERFCNT_VPM_VCD_STALL,
+ V3D_PERFCNT_BIN_ACTIVE,
+ V3D_PERFCNT_RDR_ACTIVE,
+ V3D_PERFCNT_L2T_HITS,
+ V3D_PERFCNT_L2T_MISSES,
+ V3D_PERFCNT_CYCLE_COUNT,
+ V3D_PERFCNT_QPU_CYCLES_STALLED_VERTEX_COORD_USER,
+ V3D_PERFCNT_QPU_CYCLES_STALLED_FRAGMENT,
+ V3D_PERFCNT_PTB_PRIMS_BINNED,
+ V3D_PERFCNT_AXI_WRITES_WATCH_0,
+ V3D_PERFCNT_AXI_READS_WATCH_0,
+ V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_0,
+ V3D_PERFCNT_AXI_READ_STALLS_WATCH_0,
+ V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_0,
+ V3D_PERFCNT_AXI_READ_BYTES_WATCH_0,
+ V3D_PERFCNT_AXI_WRITES_WATCH_1,
+ V3D_PERFCNT_AXI_READS_WATCH_1,
+ V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_1,
+ V3D_PERFCNT_AXI_READ_STALLS_WATCH_1,
+ V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_1,
+ V3D_PERFCNT_AXI_READ_BYTES_WATCH_1,
+ V3D_PERFCNT_TLB_PARTIAL_QUADS,
+ V3D_PERFCNT_TMU_CONFIG_ACCESSES,
+ V3D_PERFCNT_L2T_NO_ID_STALL,
+ V3D_PERFCNT_L2T_COM_QUE_STALL,
+ V3D_PERFCNT_L2T_TMU_WRITES,
+ V3D_PERFCNT_TMU_ACTIVE_CYCLES,
+ V3D_PERFCNT_TMU_STALLED_CYCLES,
+ V3D_PERFCNT_CLE_ACTIVE,
+ V3D_PERFCNT_L2T_TMU_READS,
+ V3D_PERFCNT_L2T_CLE_READS,
+ V3D_PERFCNT_L2T_VCD_READS,
+ V3D_PERFCNT_L2T_TMUCFG_READS,
+ V3D_PERFCNT_L2T_SLC0_READS,
+ V3D_PERFCNT_L2T_SLC1_READS,
+ V3D_PERFCNT_L2T_SLC2_READS,
+ V3D_PERFCNT_L2T_TMU_W_MISSES,
+ V3D_PERFCNT_L2T_TMU_R_MISSES,
+ V3D_PERFCNT_L2T_CLE_MISSES,
+ V3D_PERFCNT_L2T_VCD_MISSES,
+ V3D_PERFCNT_L2T_TMUCFG_MISSES,
+ V3D_PERFCNT_L2T_SLC0_MISSES,
+ V3D_PERFCNT_L2T_SLC1_MISSES,
+ V3D_PERFCNT_L2T_SLC2_MISSES,
+ V3D_PERFCNT_CORE_MEM_WRITES,
+ V3D_PERFCNT_L2T_MEM_WRITES,
+ V3D_PERFCNT_PTB_MEM_WRITES,
+ V3D_PERFCNT_TLB_MEM_WRITES,
+ V3D_PERFCNT_CORE_MEM_READS,
+ V3D_PERFCNT_L2T_MEM_READS,
+ V3D_PERFCNT_PTB_MEM_READS,
+ V3D_PERFCNT_PSE_MEM_READS,
+ V3D_PERFCNT_TLB_MEM_READS,
+ V3D_PERFCNT_GMP_MEM_READS,
+ V3D_PERFCNT_PTB_W_MEM_WORDS,
+ V3D_PERFCNT_TLB_W_MEM_WORDS,
+ V3D_PERFCNT_PSE_R_MEM_WORDS,
+ V3D_PERFCNT_TLB_R_MEM_WORDS,
+ V3D_PERFCNT_TMU_MRU_HITS,
+ V3D_PERFCNT_COMPUTE_ACTIVE,
+ V3D_PERFCNT_NUM,
+};
+
+#define DRM_V3D_MAX_PERF_COUNTERS 32
+
+struct drm_v3d_perfmon_create {
+ __u32 id;
+ __u32 ncounters;
+ __u8 counters[DRM_V3D_MAX_PERF_COUNTERS];
+};
+
+struct drm_v3d_perfmon_destroy {
+ __u32 id;
+};
+
+/*
+ * Returns the values of the performance counters tracked by this
+ * perfmon (as an array of ncounters u64 values).
+ *
+ * No implicit synchronization is performed, so the user has to
+ * guarantee that any jobs using this perfmon have already been
+ * completed (probably by blocking on the seqno returned by the
+ * last exec that used the perfmon).
+ */
+struct drm_v3d_perfmon_get_values {
+ __u32 id;
+ __u32 pad;
+ __u64 values_ptr;
};
#if defined(__cplusplus)
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index f06a789f34cd..0512fde5e697 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -46,12 +46,16 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
+#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_RING_IDX 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ VIRTGPU_EXECBUF_RING_IDX |\
0)
struct drm_virtgpu_map {
@@ -67,10 +71,17 @@ struct drm_virtgpu_execbuffer {
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+ __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
+ __u32 pad;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
+#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
+#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
struct drm_virtgpu_getparam {
__u64 param;
@@ -100,7 +111,7 @@ struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
- __u32 stride;
+ __u32 blob_mem;
};
struct drm_virtgpu_3d_box {
@@ -117,6 +128,8 @@ struct drm_virtgpu_3d_transfer_to_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
@@ -124,6 +137,8 @@ struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@@ -140,6 +155,54 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST 0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob_mem */
+ __u32 blob_mem;
+ __u32 blob_flags;
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u64 size;
+
+ /*
+ * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+ * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+ */
+ __u32 pad;
+ __u32 cmd_size;
+ __u64 cmd;
+ __u64 blob_id;
+};
+
+#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
+#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
+#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+struct drm_virtgpu_context_set_param {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_virtgpu_context_init {
+ __u32 num_params;
+ __u32 pad;
+
+ /* pointer to drm_virtgpu_context_set_param array */
+ __u64 ctx_set_params;
+};
+
+/*
+ * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
+ * effect. The event size is sizeof(drm_event), since there is no additional
+ * payload.
+ */
+#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -175,6 +238,14 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
+ struct drm_virtgpu_resource_create_blob)
+
+#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
+ struct drm_virtgpu_context_init)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index fcb741e3068f..26549c86a91f 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2022 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -72,6 +72,9 @@ extern "C" {
#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
#define DRM_VMW_GB_SURFACE_REF_EXT 28
#define DRM_VMW_MSG 29
+#define DRM_VMW_MKSSTAT_RESET 30
+#define DRM_VMW_MKSSTAT_ADD 31
+#define DRM_VMW_MKSSTAT_REMOVE 32
/*************************************************************************/
/**
@@ -86,6 +89,15 @@ extern "C" {
*
* DRM_VMW_PARAM_SM4_1
* SM4_1 support is enabled.
+ *
+ * DRM_VMW_PARAM_SM5
+ * SM5 support is enabled.
+ *
+ * DRM_VMW_PARAM_GL43
+ * SM5.1+GL4.3 support is enabled.
+ *
+ * DRM_VMW_PARAM_DEVICE_ID
+ * PCI ID of the underlying SVGA device.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -103,6 +115,9 @@ extern "C" {
#define DRM_VMW_PARAM_DX 12
#define DRM_VMW_PARAM_HW_CAPS2 13
#define DRM_VMW_PARAM_SM4_1 14
+#define DRM_VMW_PARAM_SM5 15
+#define DRM_VMW_PARAM_GL43 16
+#define DRM_VMW_PARAM_DEVICE_ID 17
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -1133,7 +1148,7 @@ struct drm_vmw_handle_close_arg {
* svga3d surface flags split into 2, upper half and lower half.
*/
enum drm_vmw_surface_version {
- drm_vmw_gb_surface_v1
+ drm_vmw_gb_surface_v1,
};
/**
@@ -1144,6 +1159,7 @@ enum drm_vmw_surface_version {
* @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
* @multisample_pattern: Multisampling pattern when msaa is supported.
* @quality_level: Precision settings for each sample.
+ * @buffer_byte_stride: Buffer byte stride.
* @must_be_zero: Reserved for future usage.
*
* Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
@@ -1152,10 +1168,11 @@ enum drm_vmw_surface_version {
struct drm_vmw_gb_surface_create_ext_req {
struct drm_vmw_gb_surface_create_req base;
enum drm_vmw_surface_version version;
- uint32_t svga3d_flags_upper_32_bits;
- SVGA3dMSPattern multisample_pattern;
- SVGA3dMSQualityLevel quality_level;
- uint64_t must_be_zero;
+ __u32 svga3d_flags_upper_32_bits;
+ __u32 multisample_pattern;
+ __u32 quality_level;
+ __u32 buffer_byte_stride;
+ __u32 must_be_zero;
};
/**
@@ -1230,6 +1247,44 @@ struct drm_vmw_msg_arg {
__u32 receive_len;
};
+/**
+ * struct drm_vmw_mksstat_add_arg
+ *
+ * @stat: Pointer to user-space stat-counters array, page-aligned.
+ * @info: Pointer to user-space counter-infos array, page-aligned.
+ * @strs: Pointer to user-space stat strings, page-aligned.
+ * @stat_len: Length in bytes of stat-counters array.
+ * @info_len: Length in bytes of counter-infos array.
+ * @strs_len: Length in bytes of the stat strings, terminators included.
+ * @description: Pointer to instance descriptor string; will be truncated
+ * to MKS_GUEST_STAT_INSTANCE_DESC_LENGTH chars.
+ * @id: Output identifier of the produced record; -1 if error.
+ *
+ * Argument to the DRM_VMW_MKSSTAT_ADD ioctl.
+ */
+struct drm_vmw_mksstat_add_arg {
+ __u64 stat;
+ __u64 info;
+ __u64 strs;
+ __u64 stat_len;
+ __u64 info_len;
+ __u64 strs_len;
+ __u64 description;
+ __u64 id;
+};
+
+/**
+ * struct drm_vmw_mksstat_remove_arg
+ *
+ * @id: Identifier of the record being disposed, originally obtained through
+ * DRM_VMW_MKSSTAT_ADD ioctl.
+ *
+ * Argument to the DRM_VMW_MKSSTAT_REMOVE ioctl.
+ */
+struct drm_vmw_mksstat_remove_arg {
+ __u64 id;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/acct.h b/include/uapi/linux/acct.h
index 985b89068591..0e591152aa8a 100644
--- a/include/uapi/linux/acct.h
+++ b/include/uapi/linux/acct.h
@@ -103,12 +103,13 @@ struct acct_v3
/*
* accounting flags
*/
- /* bit set when the process ... */
+ /* bit set when the process/task ... */
#define AFORK 0x01 /* ... executed fork, but did not exec */
#define ASU 0x02 /* ... used super-user privileges */
#define ACOMPAT 0x04 /* ... used compatibility mode (VAX only not used) */
#define ACORE 0x08 /* ... dumped core */
#define AXSIG 0x10 /* ... was killed by a signal */
+#define AGROUP 0x20 /* ... was the last task of the process (task group) */
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
#define ACCT_BYTEORDER 0x80 /* accounting file is big endian */
diff --git a/include/uapi/linux/acrn.h b/include/uapi/linux/acrn.h
new file mode 100644
index 000000000000..ccf47ed92500
--- /dev/null
+++ b/include/uapi/linux/acrn.h
@@ -0,0 +1,650 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace interface for /dev/acrn_hsm - ACRN Hypervisor Service Module
+ *
+ * This file can be used by applications that need to communicate with the HSM
+ * via the ioctl interface.
+ *
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _UAPI_ACRN_H
+#define _UAPI_ACRN_H
+
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+#define ACRN_IO_REQUEST_MAX 16
+
+#define ACRN_IOREQ_STATE_PENDING 0
+#define ACRN_IOREQ_STATE_COMPLETE 1
+#define ACRN_IOREQ_STATE_PROCESSING 2
+#define ACRN_IOREQ_STATE_FREE 3
+
+#define ACRN_IOREQ_TYPE_PORTIO 0
+#define ACRN_IOREQ_TYPE_MMIO 1
+#define ACRN_IOREQ_TYPE_PCICFG 2
+
+#define ACRN_IOREQ_DIR_READ 0
+#define ACRN_IOREQ_DIR_WRITE 1
+
+/**
+ * struct acrn_mmio_request - Info of a MMIO I/O request
+ * @direction: Access direction of this request (ACRN_IOREQ_DIR_*)
+ * @reserved: Reserved for alignment and should be 0
+ * @address: Access address of this MMIO I/O request
+ * @size: Access size of this MMIO I/O request
+ * @value: Read/write value of this MMIO I/O request
+ */
+struct acrn_mmio_request {
+ __u32 direction;
+ __u32 reserved;
+ __u64 address;
+ __u64 size;
+ __u64 value;
+};
+
+/**
+ * struct acrn_pio_request - Info of a PIO I/O request
+ * @direction: Access direction of this request (ACRN_IOREQ_DIR_*)
+ * @reserved: Reserved for alignment and should be 0
+ * @address: Access address of this PIO I/O request
+ * @size: Access size of this PIO I/O request
+ * @value: Read/write value of this PIO I/O request
+ */
+struct acrn_pio_request {
+ __u32 direction;
+ __u32 reserved;
+ __u64 address;
+ __u64 size;
+ __u32 value;
+};
+
+/**
+ * struct acrn_pci_request - Info of a PCI I/O request
+ * @direction: Access direction of this request (ACRN_IOREQ_DIR_*)
+ * @reserved: Reserved for alignment and should be 0
+ * @size: Access size of this PCI I/O request
+ * @value: Read/write value of this PIO I/O request
+ * @bus: PCI bus value of this PCI I/O request
+ * @dev: PCI device value of this PCI I/O request
+ * @func: PCI function value of this PCI I/O request
+ * @reg: PCI config space offset of this PCI I/O request
+ *
+ * Need keep same header layout with &struct acrn_pio_request.
+ */
+struct acrn_pci_request {
+ __u32 direction;
+ __u32 reserved[3];
+ __u64 size;
+ __u32 value;
+ __u32 bus;
+ __u32 dev;
+ __u32 func;
+ __u32 reg;
+};
+
+/**
+ * struct acrn_io_request - 256-byte ACRN I/O request
+ * @type: Type of this request (ACRN_IOREQ_TYPE_*).
+ * @completion_polling: Polling flag. Hypervisor will poll completion of the
+ * I/O request if this flag set.
+ * @reserved0: Reserved fields.
+ * @reqs: Union of different types of request. Byte offset: 64.
+ * @reqs.pio_request: PIO request data of the I/O request.
+ * @reqs.pci_request: PCI configuration space request data of the I/O request.
+ * @reqs.mmio_request: MMIO request data of the I/O request.
+ * @reqs.data: Raw data of the I/O request.
+ * @reserved1: Reserved fields.
+ * @kernel_handled: Flag indicates this request need be handled in kernel.
+ * @processed: The status of this request (ACRN_IOREQ_STATE_*).
+ *
+ * The state transitions of ACRN I/O request:
+ *
+ * FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
+ *
+ * An I/O request in COMPLETE or FREE state is owned by the hypervisor. HSM and
+ * ACRN userspace are in charge of processing the others.
+ *
+ * On basis of the states illustrated above, a typical lifecycle of ACRN IO
+ * request would look like:
+ *
+ * Flow (assume the initial state is FREE)
+ * |
+ * | Service VM vCPU 0 Service VM vCPU x User vCPU y
+ * |
+ * | hypervisor:
+ * | fills in type, addr, etc.
+ * | pauses the User VM vCPU y
+ * | sets the state to PENDING (a)
+ * | fires an upcall to Service VM
+ * |
+ * | HSM:
+ * | scans for PENDING requests
+ * | sets the states to PROCESSING (b)
+ * | assigns the requests to clients (c)
+ * V
+ * | client:
+ * | scans for the assigned requests
+ * | handles the requests (d)
+ * | HSM:
+ * | sets states to COMPLETE
+ * | notifies the hypervisor
+ * |
+ * | hypervisor:
+ * | resumes User VM vCPU y (e)
+ * |
+ * | hypervisor:
+ * | post handling (f)
+ * V sets states to FREE
+ *
+ * Note that the procedures (a) to (f) in the illustration above require to be
+ * strictly processed in the order. One vCPU cannot trigger another request of
+ * I/O emulation before completing the previous one.
+ *
+ * Atomic and barriers are required when HSM and hypervisor accessing the state
+ * of &struct acrn_io_request.
+ *
+ */
+struct acrn_io_request {
+ __u32 type;
+ __u32 completion_polling;
+ __u32 reserved0[14];
+ union {
+ struct acrn_pio_request pio_request;
+ struct acrn_pci_request pci_request;
+ struct acrn_mmio_request mmio_request;
+ __u64 data[8];
+ } reqs;
+ __u32 reserved1;
+ __u32 kernel_handled;
+ __u32 processed;
+} __attribute__((aligned(256)));
+
+struct acrn_io_request_buffer {
+ union {
+ struct acrn_io_request req_slot[ACRN_IO_REQUEST_MAX];
+ __u8 reserved[4096];
+ };
+};
+
+/**
+ * struct acrn_ioreq_notify - The structure of ioreq completion notification
+ * @vmid: User VM ID
+ * @reserved: Reserved and should be 0
+ * @vcpu: vCPU ID
+ */
+struct acrn_ioreq_notify {
+ __u16 vmid;
+ __u16 reserved;
+ __u32 vcpu;
+};
+
+/**
+ * struct acrn_vm_creation - Info to create a User VM
+ * @vmid: User VM ID returned from the hypervisor
+ * @reserved0: Reserved and must be 0
+ * @vcpu_num: Number of vCPU in the VM. Return from hypervisor.
+ * @reserved1: Reserved and must be 0
+ * @uuid: UUID of the VM. Pass to hypervisor directly.
+ * @vm_flag: Flag of the VM creating. Pass to hypervisor directly.
+ * @ioreq_buf: Service VM GPA of I/O request buffer. Pass to
+ * hypervisor directly.
+ * @cpu_affinity: CPU affinity of the VM. Pass to hypervisor directly.
+ * It's a bitmap which indicates CPUs used by the VM.
+ */
+struct acrn_vm_creation {
+ __u16 vmid;
+ __u16 reserved0;
+ __u16 vcpu_num;
+ __u16 reserved1;
+ guid_t uuid;
+ __u64 vm_flag;
+ __u64 ioreq_buf;
+ __u64 cpu_affinity;
+};
+
+/**
+ * struct acrn_gp_regs - General registers of a User VM
+ * @rax: Value of register RAX
+ * @rcx: Value of register RCX
+ * @rdx: Value of register RDX
+ * @rbx: Value of register RBX
+ * @rsp: Value of register RSP
+ * @rbp: Value of register RBP
+ * @rsi: Value of register RSI
+ * @rdi: Value of register RDI
+ * @r8: Value of register R8
+ * @r9: Value of register R9
+ * @r10: Value of register R10
+ * @r11: Value of register R11
+ * @r12: Value of register R12
+ * @r13: Value of register R13
+ * @r14: Value of register R14
+ * @r15: Value of register R15
+ */
+struct acrn_gp_regs {
+ __le64 rax;
+ __le64 rcx;
+ __le64 rdx;
+ __le64 rbx;
+ __le64 rsp;
+ __le64 rbp;
+ __le64 rsi;
+ __le64 rdi;
+ __le64 r8;
+ __le64 r9;
+ __le64 r10;
+ __le64 r11;
+ __le64 r12;
+ __le64 r13;
+ __le64 r14;
+ __le64 r15;
+};
+
+/**
+ * struct acrn_descriptor_ptr - Segment descriptor table of a User VM.
+ * @limit: Limit field.
+ * @base: Base field.
+ * @reserved: Reserved and must be 0.
+ */
+struct acrn_descriptor_ptr {
+ __le16 limit;
+ __le64 base;
+ __le16 reserved[3];
+} __attribute__ ((__packed__));
+
+/**
+ * struct acrn_regs - Registers structure of a User VM
+ * @gprs: General registers
+ * @gdt: Global Descriptor Table
+ * @idt: Interrupt Descriptor Table
+ * @rip: Value of register RIP
+ * @cs_base: Base of code segment selector
+ * @cr0: Value of register CR0
+ * @cr4: Value of register CR4
+ * @cr3: Value of register CR3
+ * @ia32_efer: Value of IA32_EFER MSR
+ * @rflags: Value of regsiter RFLAGS
+ * @reserved_64: Reserved and must be 0
+ * @cs_ar: Attribute field of code segment selector
+ * @cs_limit: Limit field of code segment selector
+ * @reserved_32: Reserved and must be 0
+ * @cs_sel: Value of code segment selector
+ * @ss_sel: Value of stack segment selector
+ * @ds_sel: Value of data segment selector
+ * @es_sel: Value of extra segment selector
+ * @fs_sel: Value of FS selector
+ * @gs_sel: Value of GS selector
+ * @ldt_sel: Value of LDT descriptor selector
+ * @tr_sel: Value of TSS descriptor selector
+ */
+struct acrn_regs {
+ struct acrn_gp_regs gprs;
+ struct acrn_descriptor_ptr gdt;
+ struct acrn_descriptor_ptr idt;
+
+ __le64 rip;
+ __le64 cs_base;
+ __le64 cr0;
+ __le64 cr4;
+ __le64 cr3;
+ __le64 ia32_efer;
+ __le64 rflags;
+ __le64 reserved_64[4];
+
+ __le32 cs_ar;
+ __le32 cs_limit;
+ __le32 reserved_32[3];
+
+ __le16 cs_sel;
+ __le16 ss_sel;
+ __le16 ds_sel;
+ __le16 es_sel;
+ __le16 fs_sel;
+ __le16 gs_sel;
+ __le16 ldt_sel;
+ __le16 tr_sel;
+};
+
+/**
+ * struct acrn_vcpu_regs - Info of vCPU registers state
+ * @vcpu_id: vCPU ID
+ * @reserved: Reserved and must be 0
+ * @vcpu_regs: vCPU registers state
+ *
+ * This structure will be passed to hypervisor directly.
+ */
+struct acrn_vcpu_regs {
+ __u16 vcpu_id;
+ __u16 reserved[3];
+ struct acrn_regs vcpu_regs;
+};
+
+#define ACRN_MEM_ACCESS_RIGHT_MASK 0x00000007U
+#define ACRN_MEM_ACCESS_READ 0x00000001U
+#define ACRN_MEM_ACCESS_WRITE 0x00000002U
+#define ACRN_MEM_ACCESS_EXEC 0x00000004U
+#define ACRN_MEM_ACCESS_RWX (ACRN_MEM_ACCESS_READ | \
+ ACRN_MEM_ACCESS_WRITE | \
+ ACRN_MEM_ACCESS_EXEC)
+
+#define ACRN_MEM_TYPE_MASK 0x000007C0U
+#define ACRN_MEM_TYPE_WB 0x00000040U
+#define ACRN_MEM_TYPE_WT 0x00000080U
+#define ACRN_MEM_TYPE_UC 0x00000100U
+#define ACRN_MEM_TYPE_WC 0x00000200U
+#define ACRN_MEM_TYPE_WP 0x00000400U
+
+/* Memory mapping types */
+#define ACRN_MEMMAP_RAM 0
+#define ACRN_MEMMAP_MMIO 1
+
+/**
+ * struct acrn_vm_memmap - A EPT memory mapping info for a User VM.
+ * @type: Type of the memory mapping (ACRM_MEMMAP_*).
+ * Pass to hypervisor directly.
+ * @attr: Attribute of the memory mapping.
+ * Pass to hypervisor directly.
+ * @user_vm_pa: Physical address of User VM.
+ * Pass to hypervisor directly.
+ * @service_vm_pa: Physical address of Service VM.
+ * Pass to hypervisor directly.
+ * @vma_base: VMA address of Service VM. Pass to hypervisor directly.
+ * @len: Length of the memory mapping.
+ * Pass to hypervisor directly.
+ */
+struct acrn_vm_memmap {
+ __u32 type;
+ __u32 attr;
+ __u64 user_vm_pa;
+ union {
+ __u64 service_vm_pa;
+ __u64 vma_base;
+ };
+ __u64 len;
+};
+
+/* Type of interrupt of a passthrough device */
+#define ACRN_PTDEV_IRQ_INTX 0
+#define ACRN_PTDEV_IRQ_MSI 1
+#define ACRN_PTDEV_IRQ_MSIX 2
+/**
+ * struct acrn_ptdev_irq - Interrupt data of a passthrough device.
+ * @type: Type (ACRN_PTDEV_IRQ_*)
+ * @virt_bdf: Virtual Bus/Device/Function
+ * @phys_bdf: Physical Bus/Device/Function
+ * @intx: Info of interrupt
+ * @intx.virt_pin: Virtual IOAPIC pin
+ * @intx.phys_pin: Physical IOAPIC pin
+ * @intx.is_pic_pin: Is PIC pin or not
+ *
+ * This structure will be passed to hypervisor directly.
+ */
+struct acrn_ptdev_irq {
+ __u32 type;
+ __u16 virt_bdf;
+ __u16 phys_bdf;
+
+ struct {
+ __u32 virt_pin;
+ __u32 phys_pin;
+ __u32 is_pic_pin;
+ } intx;
+};
+
+/* Type of PCI device assignment */
+#define ACRN_PTDEV_QUIRK_ASSIGN (1U << 0)
+
+#define ACRN_MMIODEV_RES_NUM 3
+#define ACRN_PCI_NUM_BARS 6
+/**
+ * struct acrn_pcidev - Info for assigning or de-assigning a PCI device
+ * @type: Type of the assignment
+ * @virt_bdf: Virtual Bus/Device/Function
+ * @phys_bdf: Physical Bus/Device/Function
+ * @intr_line: PCI interrupt line
+ * @intr_pin: PCI interrupt pin
+ * @bar: PCI BARs.
+ *
+ * This structure will be passed to hypervisor directly.
+ */
+struct acrn_pcidev {
+ __u32 type;
+ __u16 virt_bdf;
+ __u16 phys_bdf;
+ __u8 intr_line;
+ __u8 intr_pin;
+ __u32 bar[ACRN_PCI_NUM_BARS];
+};
+
+/**
+ * struct acrn_mmiodev - Info for assigning or de-assigning a MMIO device
+ * @name: Name of the MMIO device.
+ * @res[].user_vm_pa: Physical address of User VM of the MMIO region
+ * for the MMIO device.
+ * @res[].service_vm_pa: Physical address of Service VM of the MMIO
+ * region for the MMIO device.
+ * @res[].size: Size of the MMIO region for the MMIO device.
+ * @res[].mem_type: Memory type of the MMIO region for the MMIO
+ * device.
+ *
+ * This structure will be passed to hypervisor directly.
+ */
+struct acrn_mmiodev {
+ __u8 name[8];
+ struct {
+ __u64 user_vm_pa;
+ __u64 service_vm_pa;
+ __u64 size;
+ __u64 mem_type;
+ } res[ACRN_MMIODEV_RES_NUM];
+};
+
+/**
+ * struct acrn_vdev - Info for creating or destroying a virtual device
+ * @id: Union of identifier of the virtual device
+ * @id.value: Raw data of the identifier
+ * @id.fields.vendor: Vendor id of the virtual PCI device
+ * @id.fields.device: Device id of the virtual PCI device
+ * @id.fields.legacy_id: ID of the virtual device if not a PCI device
+ * @slot: Virtual Bus/Device/Function of the virtual
+ * device
+ * @io_base: IO resource base address of the virtual device
+ * @io_size: IO resource size of the virtual device
+ * @args: Arguments for the virtual device creation
+ *
+ * The created virtual device can be a PCI device or a legacy device (e.g.
+ * a virtual UART controller) and it is emulated by the hypervisor. This
+ * structure will be passed to hypervisor directly.
+ */
+struct acrn_vdev {
+ /*
+ * the identifier of the device, the low 32 bits represent the vendor
+ * id and device id of PCI device and the high 32 bits represent the
+ * device number of the legacy device
+ */
+ union {
+ __u64 value;
+ struct {
+ __le16 vendor;
+ __le16 device;
+ __le32 legacy_id;
+ } fields;
+ } id;
+
+ __u64 slot;
+ __u32 io_addr[ACRN_PCI_NUM_BARS];
+ __u32 io_size[ACRN_PCI_NUM_BARS];
+ __u8 args[128];
+};
+
+/**
+ * struct acrn_msi_entry - Info for injecting a MSI interrupt to a VM
+ * @msi_addr: MSI addr[19:12] with dest vCPU ID
+ * @msi_data: MSI data[7:0] with vector
+ */
+struct acrn_msi_entry {
+ __u64 msi_addr;
+ __u64 msi_data;
+};
+
+struct acrn_acpi_generic_address {
+ __u8 space_id;
+ __u8 bit_width;
+ __u8 bit_offset;
+ __u8 access_size;
+ __u64 address;
+} __attribute__ ((__packed__));
+
+/**
+ * struct acrn_cstate_data - A C state package defined in ACPI
+ * @cx_reg: Register of the C state object
+ * @type: Type of the C state object
+ * @latency: The worst-case latency to enter and exit this C state
+ * @power: The average power consumption when in this C state
+ */
+struct acrn_cstate_data {
+ struct acrn_acpi_generic_address cx_reg;
+ __u8 type;
+ __u32 latency;
+ __u64 power;
+};
+
+/**
+ * struct acrn_pstate_data - A P state package defined in ACPI
+ * @core_frequency: CPU frequency (in MHz).
+ * @power: Power dissipation (in milliwatts).
+ * @transition_latency: The worst-case latency in microseconds that CPU is
+ * unavailable during a transition from any P state to
+ * this P state.
+ * @bus_master_latency: The worst-case latency in microseconds that Bus Masters
+ * are prevented from accessing memory during a transition
+ * from any P state to this P state.
+ * @control: The value to be written to Performance Control Register
+ * @status: Transition status.
+ */
+struct acrn_pstate_data {
+ __u64 core_frequency;
+ __u64 power;
+ __u64 transition_latency;
+ __u64 bus_master_latency;
+ __u64 control;
+ __u64 status;
+};
+
+#define PMCMD_TYPE_MASK 0x000000ff
+enum acrn_pm_cmd_type {
+ ACRN_PMCMD_GET_PX_CNT,
+ ACRN_PMCMD_GET_PX_DATA,
+ ACRN_PMCMD_GET_CX_CNT,
+ ACRN_PMCMD_GET_CX_DATA,
+};
+
+#define ACRN_IOEVENTFD_FLAG_PIO 0x01
+#define ACRN_IOEVENTFD_FLAG_DATAMATCH 0x02
+#define ACRN_IOEVENTFD_FLAG_DEASSIGN 0x04
+/**
+ * struct acrn_ioeventfd - Data to operate a &struct hsm_ioeventfd
+ * @fd: The fd of eventfd associated with a hsm_ioeventfd
+ * @flags: Logical-OR of ACRN_IOEVENTFD_FLAG_*
+ * @addr: The start address of IO range of ioeventfd
+ * @len: The length of IO range of ioeventfd
+ * @reserved: Reserved and should be 0
+ * @data: Data for data matching
+ *
+ * Without flag ACRN_IOEVENTFD_FLAG_DEASSIGN, ioctl ACRN_IOCTL_IOEVENTFD
+ * creates a &struct hsm_ioeventfd with properties originated from &struct
+ * acrn_ioeventfd. With flag ACRN_IOEVENTFD_FLAG_DEASSIGN, ioctl
+ * ACRN_IOCTL_IOEVENTFD destroys the &struct hsm_ioeventfd matching the fd.
+ */
+struct acrn_ioeventfd {
+ __u32 fd;
+ __u32 flags;
+ __u64 addr;
+ __u32 len;
+ __u32 reserved;
+ __u64 data;
+};
+
+#define ACRN_IRQFD_FLAG_DEASSIGN 0x01
+/**
+ * struct acrn_irqfd - Data to operate a &struct hsm_irqfd
+ * @fd: The fd of eventfd associated with a hsm_irqfd
+ * @flags: Logical-OR of ACRN_IRQFD_FLAG_*
+ * @msi: Info of MSI associated with the irqfd
+ */
+struct acrn_irqfd {
+ __s32 fd;
+ __u32 flags;
+ struct acrn_msi_entry msi;
+};
+
+/* The ioctl type, documented in ioctl-number.rst */
+#define ACRN_IOCTL_TYPE 0xA2
+
+/*
+ * Common IOCTL IDs definition for ACRN userspace
+ */
+#define ACRN_IOCTL_CREATE_VM \
+ _IOWR(ACRN_IOCTL_TYPE, 0x10, struct acrn_vm_creation)
+#define ACRN_IOCTL_DESTROY_VM \
+ _IO(ACRN_IOCTL_TYPE, 0x11)
+#define ACRN_IOCTL_START_VM \
+ _IO(ACRN_IOCTL_TYPE, 0x12)
+#define ACRN_IOCTL_PAUSE_VM \
+ _IO(ACRN_IOCTL_TYPE, 0x13)
+#define ACRN_IOCTL_RESET_VM \
+ _IO(ACRN_IOCTL_TYPE, 0x15)
+#define ACRN_IOCTL_SET_VCPU_REGS \
+ _IOW(ACRN_IOCTL_TYPE, 0x16, struct acrn_vcpu_regs)
+
+#define ACRN_IOCTL_INJECT_MSI \
+ _IOW(ACRN_IOCTL_TYPE, 0x23, struct acrn_msi_entry)
+#define ACRN_IOCTL_VM_INTR_MONITOR \
+ _IOW(ACRN_IOCTL_TYPE, 0x24, unsigned long)
+#define ACRN_IOCTL_SET_IRQLINE \
+ _IOW(ACRN_IOCTL_TYPE, 0x25, __u64)
+
+#define ACRN_IOCTL_NOTIFY_REQUEST_FINISH \
+ _IOW(ACRN_IOCTL_TYPE, 0x31, struct acrn_ioreq_notify)
+#define ACRN_IOCTL_CREATE_IOREQ_CLIENT \
+ _IO(ACRN_IOCTL_TYPE, 0x32)
+#define ACRN_IOCTL_ATTACH_IOREQ_CLIENT \
+ _IO(ACRN_IOCTL_TYPE, 0x33)
+#define ACRN_IOCTL_DESTROY_IOREQ_CLIENT \
+ _IO(ACRN_IOCTL_TYPE, 0x34)
+#define ACRN_IOCTL_CLEAR_VM_IOREQ \
+ _IO(ACRN_IOCTL_TYPE, 0x35)
+
+#define ACRN_IOCTL_SET_MEMSEG \
+ _IOW(ACRN_IOCTL_TYPE, 0x41, struct acrn_vm_memmap)
+#define ACRN_IOCTL_UNSET_MEMSEG \
+ _IOW(ACRN_IOCTL_TYPE, 0x42, struct acrn_vm_memmap)
+
+#define ACRN_IOCTL_SET_PTDEV_INTR \
+ _IOW(ACRN_IOCTL_TYPE, 0x53, struct acrn_ptdev_irq)
+#define ACRN_IOCTL_RESET_PTDEV_INTR \
+ _IOW(ACRN_IOCTL_TYPE, 0x54, struct acrn_ptdev_irq)
+#define ACRN_IOCTL_ASSIGN_PCIDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x55, struct acrn_pcidev)
+#define ACRN_IOCTL_DEASSIGN_PCIDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x56, struct acrn_pcidev)
+#define ACRN_IOCTL_ASSIGN_MMIODEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x57, struct acrn_mmiodev)
+#define ACRN_IOCTL_DEASSIGN_MMIODEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x58, struct acrn_mmiodev)
+#define ACRN_IOCTL_CREATE_VDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x59, struct acrn_vdev)
+#define ACRN_IOCTL_DESTROY_VDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x5A, struct acrn_vdev)
+
+#define ACRN_IOCTL_PM_GET_CPU_STATE \
+ _IOWR(ACRN_IOCTL_TYPE, 0x60, __u64)
+
+#define ACRN_IOCTL_IOEVENTFD \
+ _IOW(ACRN_IOCTL_TYPE, 0x70, struct acrn_ioeventfd)
+#define ACRN_IOCTL_IRQFD \
+ _IOW(ACRN_IOCTL_TYPE, 0x71, struct acrn_irqfd)
+
+#endif /* _UAPI_ACRN_H */
diff --git a/include/uapi/linux/agpgart.h b/include/uapi/linux/agpgart.h
index f5251045181a..9cc3448c0b5b 100644
--- a/include/uapi/linux/agpgart.h
+++ b/include/uapi/linux/agpgart.h
@@ -52,7 +52,6 @@
#ifndef __KERNEL__
#include <linux/types.h>
-#include <stdlib.h>
struct agp_version {
__u16 major;
@@ -64,10 +63,10 @@ typedef struct _agp_info {
__u32 bridge_id; /* bridge vendor/device */
__u32 agp_mode; /* mode info of bridge */
unsigned long aper_base;/* base of aperture */
- size_t aper_size; /* size of aperture */
- size_t pg_total; /* max pages (swap + system) */
- size_t pg_system; /* max pages (system) */
- size_t pg_used; /* current pages used */
+ __kernel_size_t aper_size; /* size of aperture */
+ __kernel_size_t pg_total; /* max pages (swap + system) */
+ __kernel_size_t pg_system; /* max pages (system) */
+ __kernel_size_t pg_used; /* current pages used */
} agp_info;
typedef struct _agp_setup {
diff --git a/include/uapi/linux/amt.h b/include/uapi/linux/amt.h
new file mode 100644
index 000000000000..2dccff417195
--- /dev/null
+++ b/include/uapi/linux/amt.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com>
+ */
+#ifndef _UAPI_AMT_H_
+#define _UAPI_AMT_H_
+
+enum ifla_amt_mode {
+ /* AMT interface works as Gateway mode.
+ * The Gateway mode encapsulates IGMP/MLD traffic and decapsulates
+ * multicast traffic.
+ */
+ AMT_MODE_GATEWAY = 0,
+ /* AMT interface works as Relay mode.
+ * The Relay mode encapsulates multicast traffic and decapsulates
+ * IGMP/MLD traffic.
+ */
+ AMT_MODE_RELAY,
+ __AMT_MODE_MAX,
+};
+
+#define AMT_MODE_MAX (__AMT_MODE_MAX - 1)
+
+enum {
+ IFLA_AMT_UNSPEC,
+ /* This attribute specify mode etier Gateway or Relay. */
+ IFLA_AMT_MODE,
+ /* This attribute specify Relay port.
+ * AMT interface is created as Gateway mode, this attribute is used
+ * to specify relay(remote) port.
+ * AMT interface is created as Relay mode, this attribute is used
+ * as local port.
+ */
+ IFLA_AMT_RELAY_PORT,
+ /* This attribute specify Gateway port.
+ * AMT interface is created as Gateway mode, this attribute is used
+ * as local port.
+ * AMT interface is created as Relay mode, this attribute is not used.
+ */
+ IFLA_AMT_GATEWAY_PORT,
+ /* This attribute specify physical device */
+ IFLA_AMT_LINK,
+ /* This attribute specify local ip address */
+ IFLA_AMT_LOCAL_IP,
+ /* This attribute specify Relay ip address.
+ * So, this is not used by Relay.
+ */
+ IFLA_AMT_REMOTE_IP,
+ /* This attribute specify Discovery ip address.
+ * When Gateway get started, it send discovery message to find the
+ * Relay's ip address.
+ * So, this is not used by Relay.
+ */
+ IFLA_AMT_DISCOVERY_IP,
+ /* This attribute specify number of maximum tunnel. */
+ IFLA_AMT_MAX_TUNNELS,
+ __IFLA_AMT_MAX,
+};
+
+#define IFLA_AMT_MAX (__IFLA_AMT_MAX - 1)
+
+#endif /* _UAPI_AMT_H_ */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 2832134e5397..e72e4de8f452 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -217,6 +217,40 @@ struct binder_node_info_for_ref {
__u32 reserved3;
};
+struct binder_freeze_info {
+ __u32 pid;
+ __u32 enable;
+ __u32 timeout_ms;
+};
+
+struct binder_frozen_status_info {
+ __u32 pid;
+
+ /* process received sync transactions since last frozen
+ * bit 0: received sync transaction after being frozen
+ * bit 1: new pending sync transaction during freezing
+ */
+ __u32 sync_recv;
+
+ /* process received async transactions since last frozen */
+ __u32 async_recv;
+};
+
+/* struct binder_extened_error - extended error information
+ * @id: identifier for the failed operation
+ * @command: command as defined by binder_driver_return_protocol
+ * @param: parameter holding a negative errno value
+ *
+ * Used with BINDER_GET_EXTENDED_ERROR. This extends the error information
+ * returned by the driver upon a failed operation. Userspace can pull this
+ * data to properly handle specific error scenarios.
+ */
+struct binder_extended_error {
+ __u32 id;
+ __u32 command;
+ __s32 param;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -227,6 +261,10 @@ struct binder_node_info_for_ref {
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
+#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
+#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
+#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)
+#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error)
/*
* NOTE: Two special error codes you should check for when calling
@@ -248,6 +286,8 @@ enum transaction_flags {
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+ TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */
+ TF_UPDATE_TXN = 0x40, /* update the outdated pending async txn */
};
struct binder_transaction_data {
@@ -265,8 +305,8 @@ struct binder_transaction_data {
/* General information about the transaction. */
__u32 flags;
- pid_t sender_pid;
- uid_t sender_euid;
+ __kernel_pid_t sender_pid;
+ __kernel_uid32_t sender_euid;
binder_size_t data_size; /* number of bytes of data */
binder_size_t offsets_size; /* number of bytes of offsets */
@@ -404,9 +444,22 @@ enum binder_driver_return_protocol {
BR_FAILED_REPLY = _IO('r', 17),
/*
- * The the last transaction (either a bcTRANSACTION or
+ * The last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
*/
+
+ BR_FROZEN_REPLY = _IO('r', 18),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is frozen. No parameters.
+ */
+
+ BR_ONEWAY_SPAM_SUSPECT = _IO('r', 19),
+ /*
+ * Current process sent too many oneway calls to target, and the last
+ * asynchronous transaction makes the allocated async buffer size exceed
+ * detection threshold. No parameters.
+ */
};
enum binder_driver_command_protocol {
diff --git a/include/uapi/linux/atmioc.h b/include/uapi/linux/atmioc.h
index cd7655e40c77..a9030bcc8d56 100644
--- a/include/uapi/linux/atmioc.h
+++ b/include/uapi/linux/atmioc.h
@@ -5,7 +5,7 @@
/*
- * See http://icawww1.epfl.ch/linux-atm/magic.html for the complete list of
+ * See https://icawww1.epfl.ch/linux-atm/magic.html for the complete list of
* "magic" ioctl numbers.
*/
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index a534d71e689a..d676ed2b246e 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -48,7 +48,7 @@
* 2500 - 2999 future user space (maybe integrity labels and related events)
*
* Messages from 1000-1199 are bi-directional. 1200-1299 & 2100 - 2999 are
- * exclusively user space. 1300-2099 is kernel --> user space
+ * exclusively user space. 1300-2099 is kernel --> user space
* communication.
*/
#define AUDIT_GET 1000 /* Get status */
@@ -78,7 +78,7 @@
#define AUDIT_LAST_USER_MSG 1199
#define AUDIT_FIRST_USER_MSG2 2100 /* More user space messages */
#define AUDIT_LAST_USER_MSG2 2999
-
+
#define AUDIT_DAEMON_START 1200 /* Daemon startup record */
#define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */
#define AUDIT_DAEMON_ABORT 1202 /* Daemon error stop record */
@@ -117,6 +117,11 @@
#define AUDIT_TIME_INJOFFSET 1332 /* Timekeeping offset injected */
#define AUDIT_TIME_ADJNTPVAL 1333 /* NTP value adjustment */
#define AUDIT_BPF 1334 /* BPF subsystem */
+#define AUDIT_EVENT_LISTENER 1335 /* Task joined multicast read socket */
+#define AUDIT_URINGOP 1336 /* io_uring operation */
+#define AUDIT_OPENAT2 1337 /* Record showing openat2 how args */
+#define AUDIT_DM_CTRL 1338 /* Device Mapper target control */
+#define AUDIT_DM_EVENT 1339 /* Device Mapper events */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
@@ -165,8 +170,9 @@
#define AUDIT_FILTER_EXCLUDE 0x05 /* Apply rule before record creation */
#define AUDIT_FILTER_TYPE AUDIT_FILTER_EXCLUDE /* obsolete misleading naming */
#define AUDIT_FILTER_FS 0x06 /* Apply rule at __audit_inode_child */
+#define AUDIT_FILTER_URING_EXIT 0x07 /* Apply rule at io_uring op exit */
-#define AUDIT_NR_FILTERS 7
+#define AUDIT_NR_FILTERS 8
#define AUDIT_FILTER_PREPEND 0x10 /* Prepend to front of list */
@@ -181,7 +187,7 @@
#define AUDIT_MAX_KEY_LEN 256
#define AUDIT_BITMASK_SIZE 64
#define AUDIT_WORD(nr) ((__u32)((nr)/32))
-#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32))
+#define AUDIT_BIT(nr) (1U << ((nr) - AUDIT_WORD(nr)*32))
#define AUDIT_SYSCALL_CLASSES 16
#define AUDIT_CLASS_DIR_WRITE 0
@@ -332,14 +338,15 @@ enum {
};
/* Status symbols */
- /* Mask values */
-#define AUDIT_STATUS_ENABLED 0x0001
-#define AUDIT_STATUS_FAILURE 0x0002
-#define AUDIT_STATUS_PID 0x0004
+ /* Mask values */
+#define AUDIT_STATUS_ENABLED 0x0001
+#define AUDIT_STATUS_FAILURE 0x0002
+#define AUDIT_STATUS_PID 0x0004
#define AUDIT_STATUS_RATE_LIMIT 0x0008
-#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010
-#define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020
-#define AUDIT_STATUS_LOST 0x0040
+#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010
+#define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020
+#define AUDIT_STATUS_LOST 0x0040
+#define AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL 0x0080
#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001
#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
@@ -432,6 +439,8 @@ enum {
#define AUDIT_ARCH_UNICORE (EM_UNICORE|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_XTENSA (EM_XTENSA)
+#define AUDIT_ARCH_LOONGARCH32 (EM_LOONGARCH|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_PERM_EXEC 1
#define AUDIT_PERM_WRITE 2
@@ -466,6 +475,9 @@ struct audit_status {
__u32 feature_bitmap; /* bitmap of kernel audit features */
};
__u32 backlog_wait_time;/* message queue wait timeout */
+ __u32 backlog_wait_time_actual;/* time spent waiting while
+ * message limit exceeded
+ */
};
struct audit_features {
@@ -504,7 +516,7 @@ struct audit_rule_data {
__u32 values[AUDIT_MAX_FIELDS];
__u32 fieldflags[AUDIT_MAX_FIELDS];
__u32 buflen; /* total length of string fields */
- char buf[0]; /* string fields buffer */
+ char buf[]; /* string fields buffer */
};
#endif /* _UAPI_LINUX_AUDIT_H_ */
diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h
index 374742651c30..62e625356dc8 100644
--- a/include/uapi/linux/auto_dev-ioctl.h
+++ b/include/uapi/linux/auto_dev-ioctl.h
@@ -82,7 +82,7 @@ struct args_ismountpoint {
/*
* All the ioctls use this structure.
* When sending a path size must account for the total length
- * of the chunk of memory otherwise is is the size of the
+ * of the chunk of memory otherwise it is the size of the
* structure.
*/
diff --git a/include/uapi/linux/auxvec.h b/include/uapi/linux/auxvec.h
index abe5f2b6581b..c7e502bf5a6f 100644
--- a/include/uapi/linux/auxvec.h
+++ b/include/uapi/linux/auxvec.h
@@ -33,5 +33,8 @@
#define AT_EXECFN 31 /* filename of program */
+#ifndef AT_MINSIGSTKSZ
+#define AT_MINSIGSTKSZ 51 /* minimal stack size for signal delivery */
+#endif
#endif /* _UAPI_LINUX_AUXVEC_H */
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index 0ae34c85ef9e..ea4692c339ce 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
-/* Copyright (C) 2007-2020 B.A.T.M.A.N. contributors:
+/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
@@ -72,8 +72,8 @@ enum batadv_subtype {
/**
* enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
- * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
- * previously received from someone else than the best neighbor.
+ * @BATADV_NOT_BEST_NEXT_HOP: flag is set when the ogm packet is forwarded and
+ * was previously received from someone other than the best neighbor.
* @BATADV_PRIMARIES_FIRST_HOP: flag unused.
* @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
* one hop neighbor on the interface where it was originally received.
@@ -195,8 +195,8 @@ struct batadv_bla_claim_dst {
/**
* struct batadv_ogm_packet - ogm (routing protocol) packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @flags: contains routing relevant flags - see enum batadv_iv_flags
* @seqno: sequence identification
* @orig: address of the source node
@@ -247,7 +247,7 @@ struct batadv_ogm2_packet {
/**
* struct batadv_elp_packet - elp (neighbor discovery) packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
* @orig: originator mac address
* @seqno: sequence number
* @elp_interval: currently used ELP sending interval in ms
@@ -265,15 +265,15 @@ struct batadv_elp_packet {
/**
* struct batadv_icmp_header - common members among all the ICMP packets
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
* @uid: local ICMP socket identifier
* @align: not used - useful for alignment purposes only
*
- * This structure is used for ICMP packets parsing only and it is never sent
+ * This structure is used for ICMP packet parsing only and it is never sent
* over the wire. The alignment field at the end is there to ensure that
* members are padded the same way as they are in real packets.
*/
@@ -291,8 +291,8 @@ struct batadv_icmp_header {
/**
* struct batadv_icmp_packet - ICMP packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
@@ -315,8 +315,8 @@ struct batadv_icmp_packet {
/**
* struct batadv_icmp_tp_packet - ICMP TP Meter packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
@@ -358,8 +358,8 @@ enum batadv_icmp_tp_subtype {
/**
* struct batadv_icmp_packet_rr - ICMP RouteRecord packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
@@ -397,8 +397,8 @@ struct batadv_icmp_packet_rr {
/**
* struct batadv_unicast_packet - unicast packet for network payload
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @ttvn: translation table version number
* @dest: originator destination of the unicast packet
*/
@@ -433,8 +433,8 @@ struct batadv_unicast_4addr_packet {
/**
* struct batadv_frag_packet - fragmented packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @dest: final destination used when routing fragments
* @orig: originator of the fragment used when merging the packet
* @no: fragment number within this sequence
@@ -467,8 +467,8 @@ struct batadv_frag_packet {
/**
* struct batadv_bcast_packet - broadcast packet for network payload
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @reserved: reserved byte for alignment
* @seqno: sequence identification
* @orig: originator of the broadcast packet
@@ -488,10 +488,10 @@ struct batadv_bcast_packet {
/**
* struct batadv_coded_packet - network coded packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @first_source: original source of first included packet
- * @first_orig_dest: original destinal of first included packet
+ * @first_orig_dest: original destination of first included packet
* @first_crc: checksum of first included packet
* @first_ttvn: tt-version number of first included packet
* @second_ttl: ttl of second packet
@@ -523,8 +523,8 @@ struct batadv_coded_packet {
/**
* struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @reserved: reserved field (for packet alignment)
* @src: address of the source
* @dst: address of the destination
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 617c180ff0c8..35dc016c9bb4 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: MIT */
-/* Copyright (C) 2016-2020 B.A.T.M.A.N. contributors:
+/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*/
@@ -69,7 +69,7 @@ enum batadv_tt_client_flags {
/**
* @BATADV_TT_CLIENT_TEMP: this global client has been detected to be
- * part of the network but no nnode has already announced it
+ * part of the network but no node has already announced it
*/
BATADV_TT_CLIENT_TEMP = (1 << 11),
};
@@ -131,7 +131,7 @@ enum batadv_gw_modes {
/** @BATADV_GW_MODE_CLIENT: send DHCP requests to gw servers */
BATADV_GW_MODE_CLIENT,
- /** @BATADV_GW_MODE_SERVER: announce itself as gatway server */
+ /** @BATADV_GW_MODE_SERVER: announce itself as gateway server */
BATADV_GW_MODE_SERVER,
};
@@ -427,7 +427,8 @@ enum batadv_nl_attrs {
/**
* @BATADV_ATTR_HOP_PENALTY: defines the penalty which will be applied
- * to an originator message's tq-field on every hop.
+ * to an originator message's tq-field on every hop and/or per
+ * hard interface
*/
BATADV_ATTR_HOP_PENALTY,
@@ -674,4 +675,30 @@ enum batadv_tp_meter_reason {
BATADV_TP_REASON_TOO_MANY = 133,
};
+/**
+ * enum batadv_ifla_attrs - batman-adv ifla nested attributes
+ */
+enum batadv_ifla_attrs {
+ /**
+ * @IFLA_BATADV_UNSPEC: unspecified attribute which is not parsed by
+ * rtnetlink
+ */
+ IFLA_BATADV_UNSPEC,
+
+ /**
+ * @IFLA_BATADV_ALGO_NAME: routing algorithm (name) which should be
+ * used by the newly registered batadv net_device.
+ */
+ IFLA_BATADV_ALGO_NAME,
+
+ /* add attributes above here, update the policy in soft-interface.c */
+
+ /**
+ * @__IFLA_BATADV_MAX: internal use
+ */
+ __IFLA_BATADV_MAX,
+};
+
+#define IFLA_BATADV_MAX (__IFLA_BATADV_MAX - 1)
+
#endif /* _UAPI_LINUX_BATMAN_ADV_H_ */
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
deleted file mode 100644
index 9a1965c6c3d0..000000000000
--- a/include/uapi/linux/bcache.h
+++ /dev/null
@@ -1,429 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _LINUX_BCACHE_H
-#define _LINUX_BCACHE_H
-
-/*
- * Bcache on disk data structures
- */
-
-#include <linux/types.h>
-
-#define BITMASK(name, type, field, offset, size) \
-static inline __u64 name(const type *k) \
-{ return (k->field >> offset) & ~(~0ULL << size); } \
- \
-static inline void SET_##name(type *k, __u64 v) \
-{ \
- k->field &= ~(~(~0ULL << size) << offset); \
- k->field |= (v & ~(~0ULL << size)) << offset; \
-}
-
-/* Btree keys - all units are in sectors */
-
-struct bkey {
- __u64 high;
- __u64 low;
- __u64 ptr[];
-};
-
-#define KEY_FIELD(name, field, offset, size) \
- BITMASK(name, struct bkey, field, offset, size)
-
-#define PTR_FIELD(name, offset, size) \
-static inline __u64 name(const struct bkey *k, unsigned int i) \
-{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
- \
-static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
-{ \
- k->ptr[i] &= ~(~(~0ULL << size) << offset); \
- k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
-}
-
-#define KEY_SIZE_BITS 16
-#define KEY_MAX_U64S 8
-
-KEY_FIELD(KEY_PTRS, high, 60, 3)
-KEY_FIELD(HEADER_SIZE, high, 58, 2)
-KEY_FIELD(KEY_CSUM, high, 56, 2)
-KEY_FIELD(KEY_PINNED, high, 55, 1)
-KEY_FIELD(KEY_DIRTY, high, 36, 1)
-
-KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
-KEY_FIELD(KEY_INODE, high, 0, 20)
-
-/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
-
-static inline __u64 KEY_OFFSET(const struct bkey *k)
-{
- return k->low;
-}
-
-static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
-{
- k->low = v;
-}
-
-/*
- * The high bit being set is a relic from when we used it to do binary
- * searches - it told you where a key started. It's not used anymore,
- * and can probably be safely dropped.
- */
-#define KEY(inode, offset, size) \
-((struct bkey) { \
- .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
- .low = (offset) \
-})
-
-#define ZERO_KEY KEY(0, 0, 0)
-
-#define MAX_KEY_INODE (~(~0 << 20))
-#define MAX_KEY_OFFSET (~0ULL >> 1)
-#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
-
-#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
-#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
-
-#define PTR_DEV_BITS 12
-
-PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
-PTR_FIELD(PTR_OFFSET, 8, 43)
-PTR_FIELD(PTR_GEN, 0, 8)
-
-#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
-
-#define MAKE_PTR(gen, offset, dev) \
- ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
-
-/* Bkey utility code */
-
-static inline unsigned long bkey_u64s(const struct bkey *k)
-{
- return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
-}
-
-static inline unsigned long bkey_bytes(const struct bkey *k)
-{
- return bkey_u64s(k) * sizeof(__u64);
-}
-
-#define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
-
-static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
-{
- SET_KEY_INODE(dest, KEY_INODE(src));
- SET_KEY_OFFSET(dest, KEY_OFFSET(src));
-}
-
-static inline struct bkey *bkey_next(const struct bkey *k)
-{
- __u64 *d = (void *) k;
-
- return (struct bkey *) (d + bkey_u64s(k));
-}
-
-static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
-{
- __u64 *d = (void *) k;
-
- return (struct bkey *) (d + nr_keys);
-}
-/* Enough for a key with 6 pointers */
-#define BKEY_PAD 8
-
-#define BKEY_PADDED(key) \
- union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
-
-/* Superblock */
-
-/* Version 0: Cache device
- * Version 1: Backing device
- * Version 2: Seed pointer into btree node checksum
- * Version 3: Cache device with new UUID format
- * Version 4: Backing device with data offset
- */
-#define BCACHE_SB_VERSION_CDEV 0
-#define BCACHE_SB_VERSION_BDEV 1
-#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
-#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
-#define BCACHE_SB_MAX_VERSION 4
-
-#define SB_SECTOR 8
-#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
-#define SB_SIZE 4096
-#define SB_LABEL_SIZE 32
-#define SB_JOURNAL_BUCKETS 256U
-/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
-#define MAX_CACHES_PER_SET 8
-
-#define BDEV_DATA_START_DEFAULT 16 /* sectors */
-
-struct cache_sb_disk {
- __le64 csum;
- __le64 offset; /* sector where this sb was written */
- __le64 version;
-
- __u8 magic[16];
-
- __u8 uuid[16];
- union {
- __u8 set_uuid[16];
- __le64 set_magic;
- };
- __u8 label[SB_LABEL_SIZE];
-
- __le64 flags;
- __le64 seq;
- __le64 pad[8];
-
- union {
- struct {
- /* Cache devices */
- __le64 nbuckets; /* device size */
-
- __le16 block_size; /* sectors */
- __le16 bucket_size; /* sectors */
-
- __le16 nr_in_set;
- __le16 nr_this_dev;
- };
- struct {
- /* Backing devices */
- __le64 data_offset;
-
- /*
- * block_size from the cache device section is still used by
- * backing devices, so don't add anything here until we fix
- * things to not need it for backing devices anymore
- */
- };
- };
-
- __le32 last_mount; /* time overflow in y2106 */
-
- __le16 first_bucket;
- union {
- __le16 njournal_buckets;
- __le16 keys;
- };
- __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
-};
-
-struct cache_sb {
- __u64 csum;
- __u64 offset; /* sector where this sb was written */
- __u64 version;
-
- __u8 magic[16];
-
- __u8 uuid[16];
- union {
- __u8 set_uuid[16];
- __u64 set_magic;
- };
- __u8 label[SB_LABEL_SIZE];
-
- __u64 flags;
- __u64 seq;
- __u64 pad[8];
-
- union {
- struct {
- /* Cache devices */
- __u64 nbuckets; /* device size */
-
- __u16 block_size; /* sectors */
- __u16 bucket_size; /* sectors */
-
- __u16 nr_in_set;
- __u16 nr_this_dev;
- };
- struct {
- /* Backing devices */
- __u64 data_offset;
-
- /*
- * block_size from the cache device section is still used by
- * backing devices, so don't add anything here until we fix
- * things to not need it for backing devices anymore
- */
- };
- };
-
- __u32 last_mount; /* time overflow in y2106 */
-
- __u16 first_bucket;
- union {
- __u16 njournal_buckets;
- __u16 keys;
- };
- __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
-};
-
-static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
-{
- return sb->version == BCACHE_SB_VERSION_BDEV
- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
-}
-
-BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
-BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
-BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
-#define CACHE_REPLACEMENT_LRU 0U
-#define CACHE_REPLACEMENT_FIFO 1U
-#define CACHE_REPLACEMENT_RANDOM 2U
-
-BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
-#define CACHE_MODE_WRITETHROUGH 0U
-#define CACHE_MODE_WRITEBACK 1U
-#define CACHE_MODE_WRITEAROUND 2U
-#define CACHE_MODE_NONE 3U
-BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
-#define BDEV_STATE_NONE 0U
-#define BDEV_STATE_CLEAN 1U
-#define BDEV_STATE_DIRTY 2U
-#define BDEV_STATE_STALE 3U
-
-/*
- * Magic numbers
- *
- * The various other data structures have their own magic numbers, which are
- * xored with the first part of the cache set's UUID
- */
-
-#define JSET_MAGIC 0x245235c1a3625032ULL
-#define PSET_MAGIC 0x6750e15f87337f91ULL
-#define BSET_MAGIC 0x90135c78b99e07f5ULL
-
-static inline __u64 jset_magic(struct cache_sb *sb)
-{
- return sb->set_magic ^ JSET_MAGIC;
-}
-
-static inline __u64 pset_magic(struct cache_sb *sb)
-{
- return sb->set_magic ^ PSET_MAGIC;
-}
-
-static inline __u64 bset_magic(struct cache_sb *sb)
-{
- return sb->set_magic ^ BSET_MAGIC;
-}
-
-/*
- * Journal
- *
- * On disk format for a journal entry:
- * seq is monotonically increasing; every journal entry has its own unique
- * sequence number.
- *
- * last_seq is the oldest journal entry that still has keys the btree hasn't
- * flushed to disk yet.
- *
- * version is for on disk format changes.
- */
-
-#define BCACHE_JSET_VERSION_UUIDv1 1
-#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
-#define BCACHE_JSET_VERSION 1
-
-struct jset {
- __u64 csum;
- __u64 magic;
- __u64 seq;
- __u32 version;
- __u32 keys;
-
- __u64 last_seq;
-
- BKEY_PADDED(uuid_bucket);
- BKEY_PADDED(btree_root);
- __u16 btree_level;
- __u16 pad[3];
-
- __u64 prio_bucket[MAX_CACHES_PER_SET];
-
- union {
- struct bkey start[0];
- __u64 d[0];
- };
-};
-
-/* Bucket prios/gens */
-
-struct prio_set {
- __u64 csum;
- __u64 magic;
- __u64 seq;
- __u32 version;
- __u32 pad;
-
- __u64 next_bucket;
-
- struct bucket_disk {
- __u16 prio;
- __u8 gen;
- } __attribute((packed)) data[];
-};
-
-/* UUIDS - per backing device/flash only volume metadata */
-
-struct uuid_entry {
- union {
- struct {
- __u8 uuid[16];
- __u8 label[32];
- __u32 first_reg; /* time overflow in y2106 */
- __u32 last_reg;
- __u32 invalidated;
-
- __u32 flags;
- /* Size of flash only volumes */
- __u64 sectors;
- };
-
- __u8 pad[128];
- };
-};
-
-BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
-
-/* Btree nodes */
-
-/* Version 1: Seed pointer into btree node checksum
- */
-#define BCACHE_BSET_CSUM 1
-#define BCACHE_BSET_VERSION 1
-
-/*
- * Btree nodes
- *
- * On disk a btree node is a list/log of these; within each set the keys are
- * sorted
- */
-struct bset {
- __u64 csum;
- __u64 magic;
- __u64 seq;
- __u32 version;
- __u32 keys;
-
- union {
- struct bkey start[0];
- __u64 d[0];
- };
-};
-
-/* OBSOLETE */
-
-/* UUIDS - per backing device/flash only volume metadata */
-
-struct uuid_entry_v0 {
- __u8 uuid[16];
- __u8 label[32];
- __u32 first_reg;
- __u32 last_reg;
- __u32 invalidated;
- __u32 pad;
-};
-
-#endif /* _LINUX_BCACHE_H */
diff --git a/include/uapi/linux/binfmts.h b/include/uapi/linux/binfmts.h
index 689025d9c185..c6f9450efc12 100644
--- a/include/uapi/linux/binfmts.h
+++ b/include/uapi/linux/binfmts.h
@@ -18,4 +18,8 @@ struct pt_regs;
/* sizeof(linux_binprm->buf) */
#define BINPRM_BUF_SIZE 256
+/* preserve argv0 for the interpreter */
+#define AT_FLAGS_PRESERVE_ARGV0_BIT 0
+#define AT_FLAGS_PRESERVE_ARGV0 (1 << AT_FLAGS_PRESERVE_ARGV0_BIT)
+
#endif /* _UAPI_LINUX_BINFMTS_H */
diff --git a/include/uapi/linux/blkpg.h b/include/uapi/linux/blkpg.h
index ac6474e4f29d..d0a64ee97c6d 100644
--- a/include/uapi/linux/blkpg.h
+++ b/include/uapi/linux/blkpg.h
@@ -2,29 +2,6 @@
#ifndef _UAPI__LINUX_BLKPG_H
#define _UAPI__LINUX_BLKPG_H
-/*
- * Partition table and disk geometry handling
- *
- * A single ioctl with lots of subfunctions:
- *
- * Device number stuff:
- * get_whole_disk() (given the device number of a partition,
- * find the device number of the encompassing disk)
- * get_all_partitions() (given the device number of a disk, return the
- * device numbers of all its known partitions)
- *
- * Partition stuff:
- * add_partition()
- * delete_partition()
- * test_partition_in_use() (also for test_disk_in_use)
- *
- * Geometry stuff:
- * get_geometry()
- * set_geometry()
- * get_bios_drivedata()
- *
- * For today, only the partition stuff - aeb, 990515
- */
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -52,9 +29,8 @@ struct blkpg_partition {
long long start; /* starting offset in bytes */
long long length; /* length in bytes */
int pno; /* partition number */
- char devname[BLKPG_DEVNAMELTH]; /* partition name, like sda5 or c0d1p2,
- to be used in kernel messages */
- char volname[BLKPG_VOLNAMELTH]; /* volume label */
+ char devname[BLKPG_DEVNAMELTH]; /* unused / ignored */
+ char volname[BLKPG_VOLNAMELTH]; /* unused / ignore */
};
#endif /* _UAPI__LINUX_BLKPG_H */
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 0cdef67135f0..b80fcc9ea525 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -74,6 +74,15 @@ enum blk_zone_cond {
};
/**
+ * enum blk_zone_report_flags - Feature flags of reported zone descriptors.
+ *
+ * @BLK_ZONE_REP_CAPACITY: Zone descriptor has capacity field.
+ */
+enum blk_zone_report_flags {
+ BLK_ZONE_REP_CAPACITY = (1 << 0),
+};
+
+/**
* struct blk_zone - Zone descriptor for BLKREPORTZONE ioctl.
*
* @start: Zone start in 512 B sector units
@@ -84,12 +93,15 @@ enum blk_zone_cond {
* @non_seq: Flag indicating that the zone is using non-sequential resources
* (for host-aware zoned block devices only).
* @reset: Flag indicating that a zone reset is recommended.
- * @reserved: Padding to 64 B to match the ZBC/ZAC defined zone descriptor size.
+ * @resv: Padding for 8B alignment.
+ * @capacity: Zone usable capacity in 512 B sector units
+ * @reserved: Padding to 64 B to match the ZBC, ZAC and ZNS defined zone
+ * descriptor size.
*
- * start, len and wp use the regular 512 B sector unit, regardless of the
- * device logical block size. The overall structure size is 64 B to match the
- * ZBC/ZAC defined zone descriptor and allow support for future additional
- * zone information.
+ * start, len, capacity and wp use the regular 512 B sector unit, regardless
+ * of the device logical block size. The overall structure size is 64 B to
+ * match the ZBC, ZAC and ZNS defined zone descriptor and allow support for
+ * future additional zone information.
*/
struct blk_zone {
__u64 start; /* Zone start sector */
@@ -99,7 +111,9 @@ struct blk_zone {
__u8 cond; /* Zone condition */
__u8 non_seq; /* Non-sequential write resources active */
__u8 reset; /* Reset write pointer recommended */
- __u8 reserved[36];
+ __u8 resv[4];
+ __u64 capacity; /* Zone capacity in number of sectors */
+ __u8 reserved[24];
};
/**
@@ -107,7 +121,7 @@ struct blk_zone {
*
* @sector: starting sector of report
* @nr_zones: IN maximum / OUT actual
- * @reserved: padding to 16 byte alignment
+ * @flags: one or more flags as defined by enum blk_zone_report_flags.
* @zones: Space to hold @nr_zones @zones entries on reply.
*
* The array of at most @nr_zones must follow this structure in memory.
@@ -115,8 +129,8 @@ struct blk_zone {
struct blk_zone_report {
__u64 sector;
__u32 nr_zones;
- __u8 reserved[4];
- struct blk_zone zones[0];
+ __u32 flags;
+ struct blk_zone zones[];
};
/**
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 22f235260a3a..51b9aa640ad2 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -19,7 +19,8 @@
/* ld/ldx fields */
#define BPF_DW 0x18 /* double word (64-bit) */
-#define BPF_XADD 0xc0 /* exclusive add */
+#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
+#define BPF_XADD 0xc0 /* exclusive add - legacy name */
/* alu/jmp fields */
#define BPF_MOV 0xb0 /* mov reg to reg */
@@ -43,6 +44,11 @@
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
+/* atomic op type fields (stored in immediate) */
+#define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */
+#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
+#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
+
/* Register numbers */
enum {
BPF_REG_0 = 0,
@@ -78,10 +84,784 @@ struct bpf_lpm_trie_key {
struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id; /* cgroup inode id */
- __u32 attach_type; /* program attach type */
+ __u32 attach_type; /* program attach type (enum bpf_attach_type) */
+};
+
+enum bpf_cgroup_iter_order {
+ BPF_CGROUP_ITER_ORDER_UNSPEC = 0,
+ BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */
+ BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */
+ BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */
+ BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */
+};
+
+union bpf_iter_link_info {
+ struct {
+ __u32 map_fd;
+ } map;
+ struct {
+ enum bpf_cgroup_iter_order order;
+
+ /* At most one of cgroup_fd and cgroup_id can be non-zero. If
+ * both are zero, the walk starts from the default cgroup v2
+ * root. For walking v1 hierarchy, one should always explicitly
+ * specify cgroup_fd.
+ */
+ __u32 cgroup_fd;
+ __u64 cgroup_id;
+ } cgroup;
+ /* Parameters of task iterators. */
+ struct {
+ __u32 tid;
+ __u32 pid;
+ __u32 pid_fd;
+ } task;
};
-/* BPF syscall commands, see bpf(2) man-page for details. */
+/* BPF syscall commands, see bpf(2) man-page for more details. */
+/**
+ * DOC: eBPF Syscall Preamble
+ *
+ * The operation to be performed by the **bpf**\ () system call is determined
+ * by the *cmd* argument. Each operation takes an accompanying argument,
+ * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see
+ * below). The size argument is the size of the union pointed to by *attr*.
+ */
+/**
+ * DOC: eBPF Syscall Commands
+ *
+ * BPF_MAP_CREATE
+ * Description
+ * Create a map and return a file descriptor that refers to the
+ * map. The close-on-exec file descriptor flag (see **fcntl**\ (2))
+ * is automatically enabled for the new file descriptor.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_MAP_CREATE** will delete the map (but see NOTES).
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_MAP_LOOKUP_ELEM
+ * Description
+ * Look up an element with a given *key* in the map referred to
+ * by the file descriptor *map_fd*.
+ *
+ * The *flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_UPDATE_ELEM
+ * Description
+ * Create or update an element (key/value pair) in a specified map.
+ *
+ * The *flags* argument should be specified as one of the
+ * following:
+ *
+ * **BPF_ANY**
+ * Create a new element or update an existing element.
+ * **BPF_NOEXIST**
+ * Create a new element only if it did not exist.
+ * **BPF_EXIST**
+ * Update an existing element.
+ * **BPF_F_LOCK**
+ * Update a spin_lock-ed map element.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**,
+ * **E2BIG**, **EEXIST**, or **ENOENT**.
+ *
+ * **E2BIG**
+ * The number of elements in the map reached the
+ * *max_entries* limit specified at map creation time.
+ * **EEXIST**
+ * If *flags* specifies **BPF_NOEXIST** and the element
+ * with *key* already exists in the map.
+ * **ENOENT**
+ * If *flags* specifies **BPF_EXIST** and the element with
+ * *key* does not exist in the map.
+ *
+ * BPF_MAP_DELETE_ELEM
+ * Description
+ * Look up and delete an element by key in a specified map.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_GET_NEXT_KEY
+ * Description
+ * Look up an element by key in a specified map and return the key
+ * of the next element. Can be used to iterate over all elements
+ * in the map.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * The following cases can be used to iterate over all elements of
+ * the map:
+ *
+ * * If *key* is not found, the operation returns zero and sets
+ * the *next_key* pointer to the key of the first element.
+ * * If *key* is found, the operation returns zero and sets the
+ * *next_key* pointer to the key of the next element.
+ * * If *key* is the last element, returns -1 and *errno* is set
+ * to **ENOENT**.
+ *
+ * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or
+ * **EINVAL** on error.
+ *
+ * BPF_PROG_LOAD
+ * Description
+ * Verify and load an eBPF program, returning a new file
+ * descriptor associated with the program.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES).
+ *
+ * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is
+ * automatically enabled for the new file descriptor.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_OBJ_PIN
+ * Description
+ * Pin an eBPF program or map referred by the specified *bpf_fd*
+ * to the provided *pathname* on the filesystem.
+ *
+ * The *pathname* argument must not contain a dot (".").
+ *
+ * On success, *pathname* retains a reference to the eBPF object,
+ * preventing deallocation of the object when the original
+ * *bpf_fd* is closed. This allow the eBPF object to live beyond
+ * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent
+ * process.
+ *
+ * Applying **unlink**\ (2) or similar calls to the *pathname*
+ * unpins the object from the filesystem, removing the reference.
+ * If no other file descriptors or filesystem nodes refer to the
+ * same object, it will be deallocated (see NOTES).
+ *
+ * The filesystem type for the parent directory of *pathname* must
+ * be **BPF_FS_MAGIC**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_OBJ_GET
+ * Description
+ * Open a file descriptor for the eBPF object pinned to the
+ * specified *pathname*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_PROG_ATTACH
+ * Description
+ * Attach an eBPF program to a *target_fd* at the specified
+ * *attach_type* hook.
+ *
+ * The *attach_type* specifies the eBPF attachment point to
+ * attach the program to, and must be one of *bpf_attach_type*
+ * (see below).
+ *
+ * The *attach_bpf_fd* must be a valid file descriptor for a
+ * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap
+ * or sock_ops type corresponding to the specified *attach_type*.
+ *
+ * The *target_fd* must be a valid file descriptor for a kernel
+ * object which depends on the attach type of *attach_bpf_fd*:
+ *
+ * **BPF_PROG_TYPE_CGROUP_DEVICE**,
+ * **BPF_PROG_TYPE_CGROUP_SKB**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
+ * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
+ * **BPF_PROG_TYPE_SOCK_OPS**
+ *
+ * Control Group v2 hierarchy with the eBPF controller
+ * enabled. Requires the kernel to be compiled with
+ * **CONFIG_CGROUP_BPF**.
+ *
+ * **BPF_PROG_TYPE_FLOW_DISSECTOR**
+ *
+ * Network namespace (eg /proc/self/ns/net).
+ *
+ * **BPF_PROG_TYPE_LIRC_MODE2**
+ *
+ * LIRC device path (eg /dev/lircN). Requires the kernel
+ * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
+ *
+ * **BPF_PROG_TYPE_SK_SKB**,
+ * **BPF_PROG_TYPE_SK_MSG**
+ *
+ * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**).
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_DETACH
+ * Description
+ * Detach the eBPF program associated with the *target_fd* at the
+ * hook specified by *attach_type*. The program must have been
+ * previously attached using **BPF_PROG_ATTACH**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_TEST_RUN
+ * Description
+ * Run the eBPF program associated with the *prog_fd* a *repeat*
+ * number of times against a provided program context *ctx_in* and
+ * data *data_in*, and return the modified program context
+ * *ctx_out*, *data_out* (for example, packet data), result of the
+ * execution *retval*, and *duration* of the test run.
+ *
+ * The sizes of the buffers provided as input and output
+ * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must
+ * be provided in the corresponding variables *ctx_size_in*,
+ * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any
+ * of these parameters are not provided (ie set to NULL), the
+ * corresponding size field must be zero.
+ *
+ * Some program types have particular requirements:
+ *
+ * **BPF_PROG_TYPE_SK_LOOKUP**
+ * *data_in* and *data_out* must be NULL.
+ *
+ * **BPF_PROG_TYPE_RAW_TRACEPOINT**,
+ * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE**
+ *
+ * *ctx_out*, *data_in* and *data_out* must be NULL.
+ * *repeat* must be zero.
+ *
+ * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * **ENOSPC**
+ * Either *data_size_out* or *ctx_size_out* is too small.
+ * **ENOTSUPP**
+ * This command is not supported by the program type of
+ * the program referred to by *prog_fd*.
+ *
+ * BPF_PROG_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF program currently loaded into the kernel.
+ *
+ * Looks for the eBPF program with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF programs
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_MAP_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF map currently loaded into the kernel.
+ *
+ * Looks for the eBPF map with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF maps
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_PROG_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF program corresponding to
+ * *prog_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_MAP_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF map corresponding to
+ * *map_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_OBJ_GET_INFO_BY_FD
+ * Description
+ * Obtain information about the eBPF object corresponding to
+ * *bpf_fd*.
+ *
+ * Populates up to *info_len* bytes of *info*, which will be in
+ * one of the following formats depending on the eBPF object type
+ * of *bpf_fd*:
+ *
+ * * **struct bpf_prog_info**
+ * * **struct bpf_map_info**
+ * * **struct bpf_btf_info**
+ * * **struct bpf_link_info**
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_QUERY
+ * Description
+ * Obtain information about eBPF programs associated with the
+ * specified *attach_type* hook.
+ *
+ * The *target_fd* must be a valid file descriptor for a kernel
+ * object which depends on the attach type of *attach_bpf_fd*:
+ *
+ * **BPF_PROG_TYPE_CGROUP_DEVICE**,
+ * **BPF_PROG_TYPE_CGROUP_SKB**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
+ * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
+ * **BPF_PROG_TYPE_SOCK_OPS**
+ *
+ * Control Group v2 hierarchy with the eBPF controller
+ * enabled. Requires the kernel to be compiled with
+ * **CONFIG_CGROUP_BPF**.
+ *
+ * **BPF_PROG_TYPE_FLOW_DISSECTOR**
+ *
+ * Network namespace (eg /proc/self/ns/net).
+ *
+ * **BPF_PROG_TYPE_LIRC_MODE2**
+ *
+ * LIRC device path (eg /dev/lircN). Requires the kernel
+ * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
+ *
+ * **BPF_PROG_QUERY** always fetches the number of programs
+ * attached and the *attach_flags* which were used to attach those
+ * programs. Additionally, if *prog_ids* is nonzero and the number
+ * of attached programs is less than *prog_cnt*, populates
+ * *prog_ids* with the eBPF program ids of the programs attached
+ * at *target_fd*.
+ *
+ * The following flags may alter the result:
+ *
+ * **BPF_F_QUERY_EFFECTIVE**
+ * Only return information regarding programs which are
+ * currently effective at the specified *target_fd*.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_RAW_TRACEPOINT_OPEN
+ * Description
+ * Attach an eBPF program to a tracepoint *name* to access kernel
+ * internal arguments of the tracepoint in their raw form.
+ *
+ * The *prog_fd* must be a valid file descriptor associated with
+ * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**.
+ *
+ * No ABI guarantees are made about the content of tracepoint
+ * arguments exposed to the corresponding eBPF program.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES).
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_BTF_LOAD
+ * Description
+ * Verify and load BPF Type Format (BTF) metadata into the kernel,
+ * returning a new file descriptor associated with the metadata.
+ * BTF is described in more detail at
+ * https://www.kernel.org/doc/html/latest/bpf/btf.html.
+ *
+ * The *btf* parameter must point to valid memory providing
+ * *btf_size* bytes of BTF binary metadata.
+ *
+ * The returned file descriptor can be passed to other **bpf**\ ()
+ * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to
+ * associate the BTF with those objects.
+ *
+ * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional
+ * parameters to specify a *btf_log_buf*, *btf_log_size* and
+ * *btf_log_level* which allow the kernel to return freeform log
+ * output regarding the BTF verification process.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_BTF_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the BPF Type Format (BTF)
+ * corresponding to *btf_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_TASK_FD_QUERY
+ * Description
+ * Obtain information about eBPF programs associated with the
+ * target process identified by *pid* and *fd*.
+ *
+ * If the *pid* and *fd* are associated with a tracepoint, kprobe
+ * or uprobe perf event, then the *prog_id* and *fd_type* will
+ * be populated with the eBPF program id and file descriptor type
+ * of type **bpf_task_fd_type**. If associated with a kprobe or
+ * uprobe, the *probe_offset* and *probe_addr* will also be
+ * populated. Optionally, if *buf* is provided, then up to
+ * *buf_len* bytes of *buf* will be populated with the name of
+ * the tracepoint, kprobe or uprobe.
+ *
+ * The resulting *prog_id* may be introspected in deeper detail
+ * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_LOOKUP_AND_DELETE_ELEM
+ * Description
+ * Look up an element with the given *key* in the map referred to
+ * by the file descriptor *fd*, and if found, delete the element.
+ *
+ * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map
+ * types, the *flags* argument needs to be set to 0, but for other
+ * map types, it may be specified as:
+ *
+ * **BPF_F_LOCK**
+ * Look up and delete the value of a spin-locked map
+ * without returning the lock. This must be specified if
+ * the elements contain a spinlock.
+ *
+ * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
+ * implement this command as a "pop" operation, deleting the top
+ * element rather than one corresponding to *key*.
+ * The *key* and *key_len* parameters should be zeroed when
+ * issuing this operation for these map types.
+ *
+ * This command is only valid for the following map types:
+ * * **BPF_MAP_TYPE_QUEUE**
+ * * **BPF_MAP_TYPE_STACK**
+ * * **BPF_MAP_TYPE_HASH**
+ * * **BPF_MAP_TYPE_PERCPU_HASH**
+ * * **BPF_MAP_TYPE_LRU_HASH**
+ * * **BPF_MAP_TYPE_LRU_PERCPU_HASH**
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_FREEZE
+ * Description
+ * Freeze the permissions of the specified map.
+ *
+ * Write permissions may be frozen by passing zero *flags*.
+ * Upon success, no future syscall invocations may alter the
+ * map state of *map_fd*. Write operations from eBPF programs
+ * are still possible for a frozen map.
+ *
+ * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_BTF_GET_NEXT_ID
+ * Description
+ * Fetch the next BPF Type Format (BTF) object currently loaded
+ * into the kernel.
+ *
+ * Looks for the BTF object with an id greater than *start_id*
+ * and updates *next_id* on success. If no other BTF objects
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_MAP_LOOKUP_BATCH
+ * Description
+ * Iterate and fetch multiple elements in a map.
+ *
+ * Two opaque values are used to manage batch operations,
+ * *in_batch* and *out_batch*. Initially, *in_batch* must be set
+ * to NULL to begin the batched operation. After each subsequent
+ * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
+ * *out_batch* as the *in_batch* for the next operation to
+ * continue iteration from the current point.
+ *
+ * The *keys* and *values* are output parameters which must point
+ * to memory large enough to hold *count* items based on the key
+ * and value size of the map *map_fd*. The *keys* buffer must be
+ * of *key_size* * *count*. The *values* buffer must be of
+ * *value_size* * *count*.
+ *
+ * The *elem_flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * On success, *count* elements from the map are copied into the
+ * user buffer, with the keys copied into *keys* and the values
+ * copied into the corresponding indices in *values*.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **ENOSPC** to indicate that *keys* or
+ * *values* is too small to dump an entire bucket during
+ * iteration of a hash-based map type.
+ *
+ * BPF_MAP_LOOKUP_AND_DELETE_BATCH
+ * Description
+ * Iterate and delete all elements in a map.
+ *
+ * This operation has the same behavior as
+ * **BPF_MAP_LOOKUP_BATCH** with two exceptions:
+ *
+ * * Every element that is successfully returned is also deleted
+ * from the map. This is at least *count* elements. Note that
+ * *count* is both an input and an output parameter.
+ * * Upon returning with *errno* set to **EFAULT**, up to
+ * *count* elements may be deleted without returning the keys
+ * and values of the deleted elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_UPDATE_BATCH
+ * Description
+ * Update multiple elements in a map by *key*.
+ *
+ * The *keys* and *values* are input parameters which must point
+ * to memory large enough to hold *count* items based on the key
+ * and value size of the map *map_fd*. The *keys* buffer must be
+ * of *key_size* * *count*. The *values* buffer must be of
+ * *value_size* * *count*.
+ *
+ * Each element specified in *keys* is sequentially updated to the
+ * value in the corresponding index in *values*. The *in_batch*
+ * and *out_batch* parameters are ignored and should be zeroed.
+ *
+ * The *elem_flags* argument should be specified as one of the
+ * following:
+ *
+ * **BPF_ANY**
+ * Create new elements or update a existing elements.
+ * **BPF_NOEXIST**
+ * Create new elements only if they do not exist.
+ * **BPF_EXIST**
+ * Update existing elements.
+ * **BPF_F_LOCK**
+ * Update spin_lock-ed map elements. This must be
+ * specified if the map value contains a spinlock.
+ *
+ * On success, *count* elements from the map are updated.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or
+ * **E2BIG**. **E2BIG** indicates that the number of elements in
+ * the map reached the *max_entries* limit specified at map
+ * creation time.
+ *
+ * May set *errno* to one of the following error codes under
+ * specific circumstances:
+ *
+ * **EEXIST**
+ * If *flags* specifies **BPF_NOEXIST** and the element
+ * with *key* already exists in the map.
+ * **ENOENT**
+ * If *flags* specifies **BPF_EXIST** and the element with
+ * *key* does not exist in the map.
+ *
+ * BPF_MAP_DELETE_BATCH
+ * Description
+ * Delete multiple elements in a map by *key*.
+ *
+ * The *keys* parameter is an input parameter which must point
+ * to memory large enough to hold *count* items based on the key
+ * size of the map *map_fd*, that is, *key_size* * *count*.
+ *
+ * Each element specified in *keys* is sequentially deleted. The
+ * *in_batch*, *out_batch*, and *values* parameters are ignored
+ * and should be zeroed.
+ *
+ * The *elem_flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * On success, *count* elements from the map are updated.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements. If
+ * *errno* is **EFAULT**, up to *count* elements may be been
+ * deleted.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_LINK_CREATE
+ * Description
+ * Attach an eBPF program to a *target_fd* at the specified
+ * *attach_type* hook and return a file descriptor handle for
+ * managing the link.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_UPDATE
+ * Description
+ * Update the eBPF program in the specified *link_fd* to
+ * *new_prog_fd*.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_LINK_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF Link corresponding to
+ * *link_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF link currently loaded into the kernel.
+ *
+ * Looks for the eBPF link with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF links
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_ENABLE_STATS
+ * Description
+ * Enable eBPF runtime statistics gathering.
+ *
+ * Runtime statistics gathering for the eBPF runtime is disabled
+ * by default to minimize the corresponding performance overhead.
+ * This command enables statistics globally.
+ *
+ * Multiple programs may independently enable statistics.
+ * After gathering the desired statistics, eBPF runtime statistics
+ * may be disabled again by calling **close**\ (2) for the file
+ * descriptor returned by this function. Statistics will only be
+ * disabled system-wide when all outstanding file descriptors
+ * returned by prior calls for this subcommand are closed.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_ITER_CREATE
+ * Description
+ * Create an iterator on top of the specified *link_fd* (as
+ * previously created using **BPF_LINK_CREATE**) and return a
+ * file descriptor that can be used to trigger the iteration.
+ *
+ * If the resulting file descriptor is pinned to the filesystem
+ * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls
+ * for that path will trigger the iterator to read kernel state
+ * using the eBPF program attached to *link_fd*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_DETACH
+ * Description
+ * Forcefully detach the specified *link_fd* from its
+ * corresponding attachment point.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_BIND_MAP
+ * Description
+ * Bind a map to the lifetime of an eBPF program.
+ *
+ * The map identified by *map_fd* is bound to the program
+ * identified by *prog_fd* and only released when *prog_fd* is
+ * released. This may be used in cases where metadata should be
+ * associated with a program which otherwise does not contain any
+ * references to the map (for example, embedded in the eBPF
+ * program instructions).
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * NOTES
+ * eBPF objects (maps and programs) can be shared between processes.
+ *
+ * * After **fork**\ (2), the child inherits file descriptors
+ * referring to the same eBPF objects.
+ * * File descriptors referring to eBPF objects can be transferred over
+ * **unix**\ (7) domain sockets.
+ * * File descriptors referring to eBPF objects can be duplicated in the
+ * usual way, using **dup**\ (2) and similar calls.
+ * * File descriptors referring to eBPF objects can be pinned to the
+ * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2).
+ *
+ * An eBPF object is deallocated only after all file descriptors referring
+ * to the object have been closed and no references remain pinned to the
+ * filesystem or attached (for example, bound to a program or device).
+ */
enum bpf_cmd {
BPF_MAP_CREATE,
BPF_MAP_LOOKUP_ELEM,
@@ -94,6 +874,7 @@ enum bpf_cmd {
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
+ BPF_PROG_RUN = BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
@@ -111,6 +892,14 @@ enum bpf_cmd {
BPF_MAP_LOOKUP_AND_DELETE_BATCH,
BPF_MAP_UPDATE_BATCH,
BPF_MAP_DELETE_BATCH,
+ BPF_LINK_CREATE,
+ BPF_LINK_UPDATE,
+ BPF_LINK_GET_FD_BY_ID,
+ BPF_LINK_GET_NEXT_ID,
+ BPF_ENABLE_STATS,
+ BPF_ITER_CREATE,
+ BPF_LINK_DETACH,
+ BPF_PROG_BIND_MAP,
};
enum bpf_map_type {
@@ -141,6 +930,11 @@ enum bpf_map_type {
BPF_MAP_TYPE_SK_STORAGE,
BPF_MAP_TYPE_DEVMAP_HASH,
BPF_MAP_TYPE_STRUCT_OPS,
+ BPF_MAP_TYPE_RINGBUF,
+ BPF_MAP_TYPE_INODE_STORAGE,
+ BPF_MAP_TYPE_TASK_STORAGE,
+ BPF_MAP_TYPE_BLOOM_FILTER,
+ BPF_MAP_TYPE_USER_RINGBUF,
};
/* Note that tracing related programs such as
@@ -181,6 +975,9 @@ enum bpf_prog_type {
BPF_PROG_TYPE_TRACING,
BPF_PROG_TYPE_STRUCT_OPS,
BPF_PROG_TYPE_EXT,
+ BPF_PROG_TYPE_LSM,
+ BPF_PROG_TYPE_SK_LOOKUP,
+ BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
};
enum bpf_attach_type {
@@ -210,11 +1007,44 @@ enum bpf_attach_type {
BPF_TRACE_RAW_TP,
BPF_TRACE_FENTRY,
BPF_TRACE_FEXIT,
+ BPF_MODIFY_RETURN,
+ BPF_LSM_MAC,
+ BPF_TRACE_ITER,
+ BPF_CGROUP_INET4_GETPEERNAME,
+ BPF_CGROUP_INET6_GETPEERNAME,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ BPF_XDP_DEVMAP,
+ BPF_CGROUP_INET_SOCK_RELEASE,
+ BPF_XDP_CPUMAP,
+ BPF_SK_LOOKUP,
+ BPF_XDP,
+ BPF_SK_SKB_VERDICT,
+ BPF_SK_REUSEPORT_SELECT,
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
+ BPF_PERF_EVENT,
+ BPF_TRACE_KPROBE_MULTI,
+ BPF_LSM_CGROUP,
__MAX_BPF_ATTACH_TYPE
};
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+enum bpf_link_type {
+ BPF_LINK_TYPE_UNSPEC = 0,
+ BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
+ BPF_LINK_TYPE_TRACING = 2,
+ BPF_LINK_TYPE_CGROUP = 3,
+ BPF_LINK_TYPE_ITER = 4,
+ BPF_LINK_TYPE_NETNS = 5,
+ BPF_LINK_TYPE_XDP = 6,
+ BPF_LINK_TYPE_PERF_EVENT = 7,
+ BPF_LINK_TYPE_KPROBE_MULTI = 8,
+ BPF_LINK_TYPE_STRUCT_OPS = 9,
+
+ MAX_BPF_LINK_TYPE,
+};
+
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
*
* NONE(default): No further bpf programs allowed in the subtree.
@@ -305,73 +1135,147 @@ enum bpf_attach_type {
/* The verifier internal test flag. Behavior is undefined */
#define BPF_F_TEST_STATE_FREQ (1U << 3)
+/* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
+ * restrict map and helper usage for such programs. Sleepable BPF programs can
+ * only be attached to hooks where kernel execution context allows sleeping.
+ * Such programs are allowed to use helpers that may sleep like
+ * bpf_copy_from_user().
+ */
+#define BPF_F_SLEEPABLE (1U << 4)
+
+/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program
+ * fully support xdp frags.
+ */
+#define BPF_F_XDP_HAS_FRAGS (1U << 5)
+
+/* link_create.kprobe_multi.flags used in LINK_CREATE command for
+ * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
+ */
+#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
+
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
- * two extensions:
- *
- * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE
- * insn[0].imm: map fd map fd
- * insn[1].imm: 0 offset into value
- * insn[0].off: 0 0
- * insn[1].off: 0 0
- * ldimm64 rewrite: address of map address of map[0]+offset
- * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE
+ * the following extensions:
+ *
+ * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
+ * insn[0].imm: map fd or fd_idx
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map
+ * verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
-#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_IDX 5
+
+/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
+ * insn[0].imm: map fd or fd_idx
+ * insn[1].imm: offset into value
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map[0]+offset
+ * verifier type: PTR_TO_MAP_VALUE
+ */
+#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_IDX_VALUE 6
+
+/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
+ * insn[0].imm: kernel btd id of VAR
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of the kernel variable
+ * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
+ * is struct/union.
+ */
+#define BPF_PSEUDO_BTF_ID 3
+/* insn[0].src_reg: BPF_PSEUDO_FUNC
+ * insn[0].imm: insn offset to the func
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of the function
+ * verifier type: PTR_TO_FUNC.
+ */
+#define BPF_PSEUDO_FUNC 4
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
*/
#define BPF_PSEUDO_CALL 1
+/* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL,
+ * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel
+ */
+#define BPF_PSEUDO_KFUNC_CALL 2
/* flags for BPF_MAP_UPDATE_ELEM command */
-#define BPF_ANY 0 /* create new element or update existing */
-#define BPF_NOEXIST 1 /* create new element if it didn't exist */
-#define BPF_EXIST 2 /* update existing element */
-#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
+enum {
+ BPF_ANY = 0, /* create new element or update existing */
+ BPF_NOEXIST = 1, /* create new element if it didn't exist */
+ BPF_EXIST = 2, /* update existing element */
+ BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
+};
/* flags for BPF_MAP_CREATE command */
-#define BPF_F_NO_PREALLOC (1U << 0)
+enum {
+ BPF_F_NO_PREALLOC = (1U << 0),
/* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
* which can scale and perform better.
* Note, the LRU nodes (including free nodes) cannot be moved
* across different LRU lists.
*/
-#define BPF_F_NO_COMMON_LRU (1U << 1)
+ BPF_F_NO_COMMON_LRU = (1U << 1),
/* Specify numa node during map creation */
-#define BPF_F_NUMA_NODE (1U << 2)
-
-#define BPF_OBJ_NAME_LEN 16U
+ BPF_F_NUMA_NODE = (1U << 2),
/* Flags for accessing BPF object from syscall side. */
-#define BPF_F_RDONLY (1U << 3)
-#define BPF_F_WRONLY (1U << 4)
+ BPF_F_RDONLY = (1U << 3),
+ BPF_F_WRONLY = (1U << 4),
/* Flag for stack_map, store build_id+offset instead of pointer */
-#define BPF_F_STACK_BUILD_ID (1U << 5)
+ BPF_F_STACK_BUILD_ID = (1U << 5),
/* Zero-initialize hash function seed. This should only be used for testing. */
-#define BPF_F_ZERO_SEED (1U << 6)
+ BPF_F_ZERO_SEED = (1U << 6),
/* Flags for accessing BPF object from program side. */
-#define BPF_F_RDONLY_PROG (1U << 7)
-#define BPF_F_WRONLY_PROG (1U << 8)
+ BPF_F_RDONLY_PROG = (1U << 7),
+ BPF_F_WRONLY_PROG = (1U << 8),
/* Clone map from listener for newly accepted socket */
-#define BPF_F_CLONE (1U << 9)
+ BPF_F_CLONE = (1U << 9),
/* Enable memory-mapping BPF map */
-#define BPF_F_MMAPABLE (1U << 10)
+ BPF_F_MMAPABLE = (1U << 10),
+
+/* Share perf_event among processes */
+ BPF_F_PRESERVE_ELEMS = (1U << 11),
+
+/* Create a map that is suitable to be an inner map with dynamic max entries */
+ BPF_F_INNER_MAP = (1U << 12),
+};
/* Flags for BPF_PROG_QUERY. */
/* Query effective (directly attached + inherited from ancestor cgroups)
* programs that will be executed for events within a cgroup.
- * attach_flags with this flag are returned only for directly attached programs.
+ * attach_flags with this flag are always returned 0.
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+/* Flags for BPF_PROG_TEST_RUN */
+
+/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
+#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
+/* If set, XDP frames will be transmitted after processing */
+#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
+
+/* type for BPF_ENABLE_STATS */
+enum bpf_stats_type {
+ /* enabled run_time_ns and run_cnt */
+ BPF_STATS_RUN_TIME = 0,
+};
+
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
@@ -391,6 +1295,8 @@ struct bpf_stack_build_id {
};
};
+#define BPF_OBJ_NAME_LEN 16U
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -413,6 +1319,13 @@ union bpf_attr {
* struct stored as the
* map value
*/
+ /* Any per-map-type extra fields
+ *
+ * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
+ * number of hash functions (if 0, the bloom filter will default
+ * to using 5 hash functions).
+ */
+ __u64 map_extra;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -467,7 +1380,16 @@ union bpf_attr {
__aligned_u64 line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */
__u32 attach_btf_id; /* in-kernel BTF type id to attach to */
- __u32 attach_prog_fd; /* 0 to attach to vmlinux */
+ union {
+ /* valid prog_fd to attach to bpf prog */
+ __u32 attach_prog_fd;
+ /* or valid module BTF object fd or 0 to attach to vmlinux */
+ __u32 attach_btf_obj_fd;
+ };
+ __u32 core_relo_cnt; /* number of bpf_core_relo */
+ __aligned_u64 fd_array; /* array of FDs */
+ __aligned_u64 core_relos;
+ __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -506,6 +1428,9 @@ union bpf_attr {
*/
__aligned_u64 ctx_in;
__aligned_u64 ctx_out;
+ __u32 flags;
+ __u32 cpu;
+ __u32 batch_size;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -514,6 +1439,7 @@ union bpf_attr {
__u32 prog_id;
__u32 map_id;
__u32 btf_id;
+ __u32 link_id;
};
__u32 next_id;
__u32 open_flags;
@@ -532,9 +1458,13 @@ union bpf_attr {
__u32 attach_flags;
__aligned_u64 prog_ids;
__u32 prog_cnt;
+ /* output: per-program attach_flags.
+ * not allowed to be set during effective query.
+ */
+ __aligned_u64 prog_attach_flags;
} query;
- struct {
+ struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
__u64 name;
__u32 prog_fd;
} raw_tracepoint;
@@ -562,6 +1492,76 @@ union bpf_attr {
__u64 probe_offset; /* output: probe_offset */
__u64 probe_addr; /* output: probe_addr */
} task_fd_query;
+
+ struct { /* struct used by BPF_LINK_CREATE command */
+ __u32 prog_fd; /* eBPF program to attach */
+ union {
+ __u32 target_fd; /* object to attach to */
+ __u32 target_ifindex; /* target ifindex */
+ };
+ __u32 attach_type; /* attach type */
+ __u32 flags; /* extra flags */
+ union {
+ __u32 target_btf_id; /* btf_id of target to attach to */
+ struct {
+ __aligned_u64 iter_info; /* extra bpf_iter_link_info */
+ __u32 iter_info_len; /* iter_info length */
+ };
+ struct {
+ /* black box user-provided value passed through
+ * to BPF program at the execution time and
+ * accessible through bpf_get_attach_cookie() BPF helper
+ */
+ __u64 bpf_cookie;
+ } perf_event;
+ struct {
+ __u32 flags;
+ __u32 cnt;
+ __aligned_u64 syms;
+ __aligned_u64 addrs;
+ __aligned_u64 cookies;
+ } kprobe_multi;
+ struct {
+ /* this is overlaid with the target_btf_id above. */
+ __u32 target_btf_id;
+ /* black box user-provided value passed through
+ * to BPF program at the execution time and
+ * accessible through bpf_get_attach_cookie() BPF helper
+ */
+ __u64 cookie;
+ } tracing;
+ };
+ } link_create;
+
+ struct { /* struct used by BPF_LINK_UPDATE command */
+ __u32 link_fd; /* link fd */
+ /* new program fd to update link with */
+ __u32 new_prog_fd;
+ __u32 flags; /* extra flags */
+ /* expected link's program fd; is specified only if
+ * BPF_F_REPLACE flag is set in flags */
+ __u32 old_prog_fd;
+ } link_update;
+
+ struct {
+ __u32 link_fd;
+ } link_detach;
+
+ struct { /* struct used by BPF_ENABLE_STATS command */
+ __u32 type;
+ } enable_stats;
+
+ struct { /* struct used by BPF_ITER_CREATE command */
+ __u32 link_fd;
+ __u32 flags;
+ } iter_create;
+
+ struct { /* struct used by BPF_PROG_BIND_MAP command */
+ __u32 prog_fd;
+ __u32 map_fd;
+ __u32 flags; /* extra flags */
+ } prog_bind_map;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -569,7 +1569,7 @@ union bpf_attr {
* parsed and used to produce a manual page. The workflow is the following,
* and requires the rst2man utility:
*
- * $ ./scripts/bpf_helpers_doc.py \
+ * $ ./scripts/bpf_doc.py \
* --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
* $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
* $ man /tmp/bpf-helpers.7
@@ -588,7 +1588,7 @@ union bpf_attr {
* Map value associated to *key*, or **NULL** if no entry was
* found.
*
- * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
+ * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
* Description
* Add or update the value of the entry associated to *key* in
* *map* with *value*. *flags* is one of:
@@ -606,29 +1606,31 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
+ * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
* Description
* Delete entry with *key* from *map*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
* Description
* For tracing programs, safely attempt to read *size* bytes from
* kernel space address *unsafe_ptr* and store the data in *dst*.
*
- * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel()
- * instead.
+ * Generally, use **bpf_probe_read_user**\ () or
+ * **bpf_probe_read_kernel**\ () instead.
* Return
* 0 on success, or a negative error in case of failure.
*
* u64 bpf_ktime_get_ns(void)
* Description
* Return the time elapsed since system boot, in nanoseconds.
+ * Does not include time the system was suspended.
+ * See: **clock_gettime**\ (**CLOCK_MONOTONIC**)
* Return
* Current *ktime*.
*
- * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
+ * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
* Description
* This helper is a "printk()-like" facility for debugging. It
* prints a message defined by format *fmt* (of size *fmt_size*)
@@ -678,7 +1680,7 @@ union bpf_attr {
*
* Also, note that **bpf_trace_printk**\ () is slow, and should
* only be used for debugging purposes. For this reason, a notice
- * bloc (spanning several lines) is printed to kernel logs and
+ * block (spanning several lines) is printed to kernel logs and
* states that the helper should not be used "for production use"
* the first time this helper is used (or more precisely, when
* **trace_printk**\ () buffers are allocated). For passing values
@@ -702,13 +1704,13 @@ union bpf_attr {
* u32 bpf_get_smp_processor_id(void)
* Description
* Get the SMP (symmetric multiprocessing) processor id. Note that
- * all programs run with preemption disabled, which means that the
+ * all programs run with migration disabled, which means that the
* SMP processor id is stable during all the execution of the
* program.
* Return
* The SMP id of the processor running the program.
*
- * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
+ * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
* Description
* Store *len* bytes from address *from* into the packet
* associated to *skb*, at *offset*. *flags* are a combination of
@@ -725,7 +1727,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
+ * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
* Description
* Recompute the layer 3 (e.g. IP) checksum for the packet
* associated to *skb*. Computation is incremental, so the helper
@@ -750,7 +1752,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
+ * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
* Description
* Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
* packet associated to *skb*. Computation is incremental, so the
@@ -782,7 +1784,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
+ * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
* Description
* This special helper is used to trigger a "tail call", or in
* other words, to jump into another eBPF program. The same stack
@@ -809,11 +1811,11 @@ union bpf_attr {
* if the maximum number of tail calls has been reached for this
* chain of programs. This limit is defined in the kernel by the
* macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
- * which is currently set to 32.
+ * which is currently set to 33.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
+ * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
* Description
* Clone and redirect the packet associated to *skb* to another
* net device of index *ifindex*. Both ingress and egress
@@ -838,6 +1840,8 @@ union bpf_attr {
* 0 on success, or a negative error in case of failure.
*
* u64 bpf_get_current_pid_tgid(void)
+ * Description
+ * Get the current pid and tgid.
* Return
* A 64-bit integer containing the current tgid and pid, and
* created as such:
@@ -845,11 +1849,13 @@ union bpf_attr {
* *current_task*\ **->pid**.
*
* u64 bpf_get_current_uid_gid(void)
+ * Description
+ * Get the current uid and gid.
* Return
* A 64-bit integer containing the current GID and UID, and
* created as such: *current_gid* **<< 32 \|** *current_uid*.
*
- * int bpf_get_current_comm(void *buf, u32 size_of_buf)
+ * long bpf_get_current_comm(void *buf, u32 size_of_buf)
* Description
* Copy the **comm** attribute of the current task into *buf* of
* *size_of_buf*. The **comm** attribute contains the name of
@@ -886,7 +1892,7 @@ union bpf_attr {
* Return
* The classid, or 0 for the default unconfigured classid.
*
- * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+ * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
* Description
* Push a *vlan_tci* (VLAN tag control information) of protocol
* *vlan_proto* to the packet associated to *skb*, then update
@@ -902,7 +1908,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_vlan_pop(struct sk_buff *skb)
+ * long bpf_skb_vlan_pop(struct sk_buff *skb)
* Description
* Pop a VLAN header from the packet associated to *skb*.
*
@@ -914,7 +1920,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
* Description
* Get tunnel metadata. This helper takes a pointer *key* to an
* empty **struct bpf_tunnel_key** of **size**, that will be
@@ -944,14 +1950,14 @@ union bpf_attr {
*
* int ret;
* struct bpf_tunnel_key key = {};
- *
+ *
* ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
* if (ret < 0)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* if (key.remote_ipv4 != 0x0a000001)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* return TC_ACT_OK; // accept packet
*
* This interface can also be used with all encapsulation devices
@@ -965,7 +1971,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
* Description
* Populate tunnel metadata for packet associated to *skb.* The
* tunnel metadata is set to the contents of *key*, of *size*. The
@@ -1031,7 +2037,7 @@ union bpf_attr {
* The value of the perf event counter read from the map, or a
* negative error code in case of failure.
*
- * int bpf_redirect(u32 ifindex, u64 flags)
+ * long bpf_redirect(u32 ifindex, u64 flags)
* Description
* Redirect the packet to another net device of index *ifindex*.
* This helper is somewhat similar to **bpf_clone_redirect**\
@@ -1058,7 +2064,7 @@ union bpf_attr {
* Description
* Retrieve the realm or the route, that is to say the
* **tclassid** field of the destination for the *skb*. The
- * indentifier retrieved is a user-provided tag, similar to the
+ * identifier retrieved is a user-provided tag, similar to the
* one used with the net_cls cgroup (see description for
* **bpf_get_cgroup_classid**\ () helper), but here this tag is
* held by a route (a destination entry), not by a task.
@@ -1078,7 +2084,7 @@ union bpf_attr {
* The realm of the route for the packet associated to *skb*, or 0
* if none was found.
*
- * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -1123,7 +2129,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
+ * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
* Description
* This helper was provided as an easy way to load data from a
* packet. It can be used to load *len* bytes from *offset* from
@@ -1140,7 +2146,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
+ * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
* Description
* Walk a user or a kernel stack and return its id. To achieve
* this, the helper needs *ctx*, which is a pointer to the context
@@ -1209,7 +2215,7 @@ union bpf_attr {
* The checksum result, or a negative error code in case of
* failure.
*
- * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Retrieve tunnel options metadata for the packet associated to
* *skb*, and store the raw tunnel option data to the buffer *opt*
@@ -1227,7 +2233,7 @@ union bpf_attr {
* Return
* The size of the option data retrieved.
*
- * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Set tunnel options metadata for the packet associated to *skb*
* to the option data contained in the raw buffer *opt* of *size*.
@@ -1237,7 +2243,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
+ * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
* Description
* Change the protocol of the *skb* to *proto*. Currently
* supported are transition from IPv4 to IPv6, and from IPv6 to
@@ -1264,7 +2270,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
+ * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
* Description
* Change the packet type for the packet associated to *skb*. This
* comes down to setting *skb*\ **->pkt_type** to *type*, except
@@ -1291,7 +2297,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
+ * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
* Description
* Check whether *skb* is a descendant of the cgroup2 held by
* *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
@@ -1319,10 +2325,12 @@ union bpf_attr {
* The 32-bit hash.
*
* u64 bpf_get_current_task(void)
+ * Description
+ * Get the current task.
* Return
* A pointer to the current task struct.
*
- * int bpf_probe_write_user(void *dst, const void *src, u32 len)
+ * long bpf_probe_write_user(void *dst, const void *src, u32 len)
* Description
* Attempt in a safe way to write *len* bytes from the buffer
* *src* to *dst* in memory. It only works for threads that are in
@@ -1341,7 +2349,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
+ * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
* Description
* Check whether the probe is being run is the context of a given
* subset of the cgroup2 hierarchy. The cgroup2 to test is held by
@@ -1349,11 +2357,11 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if the *skb* task belongs to the cgroup2.
- * * 1, if the *skb* task does not belong to the cgroup2.
+ * * 1, if current task belongs to the cgroup2.
+ * * 0, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
- * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
* Description
* Resize (trim or grow) the packet associated to *skb* to the
* new *len*. The *flags* are reserved for future usage, and must
@@ -1377,12 +2385,13 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
+ * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
* Description
* Pull in non-linear data in case the *skb* is non-linear and not
* all of *len* are part of the linear section. Make *len* bytes
* from *skb* readable and writable. If a zero value is passed for
- * *len*, then the whole length of the *skb* is pulled.
+ * *len*, then all bytes in the linear part of *skb* will be made
+ * readable and writable.
*
* This helper is only needed for reading and writing with direct
* packet access.
@@ -1432,8 +2441,10 @@ union bpf_attr {
* indicate that the hash is outdated and to trigger a
* recalculation the next time the kernel tries to access this
* hash or when the **bpf_get_hash_recalc**\ () helper is called.
+ * Return
+ * void.
*
- * int bpf_get_numa_node_id(void)
+ * long bpf_get_numa_node_id(void)
* Description
* Return the id of the current NUMA node. The primary use case
* for this helper is the selection of sockets for the local NUMA
@@ -1444,7 +2455,7 @@ union bpf_attr {
* Return
* The id of current NUMA node.
*
- * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
* Description
* Grows headroom of packet associated to *skb* and adjusts the
* offset of the MAC header accordingly, adding *len* bytes of
@@ -1465,7 +2476,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
* Description
* Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
* it is possible to use a negative value for *delta*. This helper
@@ -1480,14 +2491,14 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Copy a NUL terminated string from an unsafe kernel address
- * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for
+ * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
* more details.
*
- * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str()
- * instead.
+ * Generally, use **bpf_probe_read_user_str**\ () or
+ * **bpf_probe_read_kernel_str**\ () instead.
* Return
* On success, the strictly positive length of the string,
* including the trailing NUL character. On error, a negative
@@ -1503,24 +2514,34 @@ union bpf_attr {
* networking traffic statistics as it provides a global socket
* identifier that can be assumed unique.
* Return
- * A 8-byte long non-decreasing number on success, or 0 if the
- * socket field is missing inside *skb*.
+ * A 8-byte long unique number on success, or 0 if the socket
+ * field is missing inside *skb*.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
* *skb*, but gets socket from **struct bpf_sock_addr** context.
* Return
- * A 8-byte long non-decreasing number.
+ * A 8-byte long unique number.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
* Description
- * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
* *skb*, but gets socket from **struct bpf_sock_ops** context.
* Return
- * A 8-byte long non-decreasing number.
+ * A 8-byte long unique number.
+ *
+ * u64 bpf_get_socket_cookie(struct sock *sk)
+ * Description
+ * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
+ * *sk*, but gets socket from a BTF **struct sock**. This helper
+ * also works for sleepable programs.
+ * Return
+ * A 8-byte long unique number or 0 if *sk* is NULL.
*
* u32 bpf_get_socket_uid(struct sk_buff *skb)
+ * Description
+ * Get the owner UID of the socked associated to *skb*.
* Return
* The owner UID of the socket associated to *skb*. If the socket
* is **NULL**, or if it is not a full socket (i.e. if it is a
@@ -1528,14 +2549,14 @@ union bpf_attr {
* is returned (note that **overflowuid** might also be the actual
* UID value for the socket).
*
- * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
+ * long bpf_set_hash(struct sk_buff *skb, u32 hash)
* Description
* Set the full hash for *skb* (set the field *skb*\ **->hash**)
* to value *hash*.
* Return
* 0
*
- * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **setsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1543,32 +2564,50 @@ union bpf_attr {
* must be specified, see **setsockopt(2)** for more information.
* The option value of length *optlen* is pointed by *optval*.
*
+ * *bpf_socket* should be one of the following:
+ *
+ * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
+ * and **BPF_CGROUP_INET6_CONNECT**.
+ *
* This helper actually implements a subset of **setsockopt()**.
* It supports the following *level*\ s:
*
* * **SOL_SOCKET**, which supports the following *optname*\ s:
* **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
- * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
+ * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
+ * **SO_BINDTODEVICE**, **SO_KEEPALIVE**.
* * **IPPROTO_TCP**, which supports the following *optname*\ s:
* **TCP_CONGESTION**, **TCP_BPF_IW**,
- * **TCP_BPF_SNDCWND_CLAMP**.
+ * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
+ * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
+ * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
* * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
+ * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
* Description
* Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*.
*
+ * By default, the helper will reset any offloaded checksum
+ * indicator of the skb to CHECKSUM_NONE. This can be avoided
+ * by the following flag:
+ *
+ * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
+ * checksum data of the skb to CHECKSUM_NONE.
+ *
* There are two supported modes at this time:
*
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
- * (room space is added or removed below the layer 2 header).
+ * (room space is added or removed between the layer 2 and
+ * layer 3 headers).
*
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
- * (room space is added or removed below the layer 3 header).
+ * (room space is added or removed between the layer 3 and
+ * layer 4 headers).
*
* The following flags are supported at this time:
*
@@ -1588,6 +2627,10 @@ union bpf_attr {
* Use with ENCAP_L3/L4 flags to further specify the tunnel
* type; *len* is the length of the inner MAC header.
*
+ * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**:
+ * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
+ * L2 type as Ethernet.
+ *
* A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
@@ -1596,7 +2639,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the endpoint referenced by *map* at
* index *key*. Depending on its type, this *map* can contain
@@ -1607,17 +2650,21 @@ union bpf_attr {
*
* The lower two bits of *flags* are used as the return code if
* the map lookup fails. This is so that the return value can be
- * one of the XDP program return codes up to XDP_TX, as chosen by
- * the caller. Any higher bits in the *flags* argument must be
- * unset.
+ * one of the XDP program return codes up to **XDP_TX**, as chosen
+ * by the caller. The higher bits of *flags* can be set to
+ * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
*
- * See also bpf_redirect(), which only supports redirecting to an
- * ifindex, but doesn't require a map to do so.
+ * With BPF_F_BROADCAST the packet will be broadcasted to all the
+ * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
+ * interface will be excluded when do broadcasting.
+ *
+ * See also **bpf_redirect**\ (), which only supports redirecting
+ * to an ifindex, but doesn't require a map to do so.
* Return
* **XDP_REDIRECT** on success, or the value of the two lower bits
- * of the **flags* argument on error.
+ * of the *flags* argument on error.
*
- * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@@ -1628,7 +2675,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a *map* referencing sockets. The
* *skops* is used as a new value for the entry associated to
@@ -1647,7 +2694,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
* Description
* Adjust the address pointed by *xdp_md*\ **->data_meta** by
* *delta* (which can be positive or negative). Note that this
@@ -1676,7 +2723,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
* Read the value of a perf event counter, and store it into *buf*
* of size *buf_size*. This helper relies on a *map* of type
@@ -1720,13 +2767,13 @@ union bpf_attr {
* the time running for event since last normalization. The
* enabled and running times are accumulated since the perf event
* open. To achieve scaling factor between two invocations of an
- * eBPF program, users can can use CPU id as the key (which is
+ * eBPF program, users can use CPU id as the key (which is
* typical for perf array usage model) to remember the previous
* value and do the calculation inside the eBPF program.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
* For en eBPF program attached to a perf event, retrieve the
* value of the event counter associated to *ctx* and store it in
@@ -1737,7 +2784,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **getsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1746,6 +2793,12 @@ union bpf_attr {
* The retrieved value is stored in the structure pointed by
* *opval* and of length *optlen*.
*
+ * *bpf_socket* should be one of the following:
+ *
+ * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
+ * and **BPF_CGROUP_INET6_CONNECT**.
+ *
* This helper actually implements a subset of **getsockopt()**.
* It supports the following *level*\ s:
*
@@ -1756,14 +2809,14 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_override_return(struct pt_regs *regs, u64 rc)
+ * long bpf_override_return(struct pt_regs *regs, u64 rc)
* Description
* Used for error injection, this helper uses kprobes to override
* the return value of the probed function, and to set it to *rc*.
* The first argument is the context *regs* on which the kprobe
* works.
*
- * This helper works by setting setting the PC (program counter)
+ * This helper works by setting the PC (program counter)
* to an override function which is run in place of the original
* probed function. This means the probed function is not run at
* all. The replacement function just returns with the required
@@ -1781,7 +2834,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
+ * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
* Description
* Attempt to set the value of the **bpf_sock_ops_cb_flags** field
* for the full TCP socket associated to *bpf_sock_ops* to
@@ -1825,7 +2878,7 @@ union bpf_attr {
* be set is returned (which comes down to 0 if all bits were set
* as required).
*
- * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
* Description
* This helper is used in programs implementing policies at the
* socket level. If the message *msg* is allowed to pass (i.e. if
@@ -1839,7 +2892,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
* Description
* For socket policies, apply the verdict of the eBPF program to
* the next *bytes* (number of bytes) of message *msg*.
@@ -1873,7 +2926,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
* Description
* For socket policies, prevent the execution of the verdict eBPF
* program for message *msg* until *bytes* (byte number) have been
@@ -1891,7 +2944,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
+ * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
* Description
* For socket policies, pull in non-linear data from user space
* for *msg* and set pointers *msg*\ **->data** and *msg*\
@@ -1922,7 +2975,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
+ * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
* Description
* Bind the socket associated to *ctx* to the address pointed by
* *addr*, of length *addr_len*. This allows for making outgoing
@@ -1932,18 +2985,19 @@ union bpf_attr {
*
* This helper works for IPv4 and IPv6, TCP and UDP sockets. The
* domain (*addr*\ **->sa_family**) must be **AF_INET** (or
- * **AF_INET6**). Looking for a free port to bind to can be
- * expensive, therefore binding to port is not permitted by the
- * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively)
- * must be set to zero.
+ * **AF_INET6**). It's advised to pass zero port (**sin_port**
+ * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like
+ * behavior and lets the kernel efficiently pick up an unused
+ * port as long as 4-tuple is unique. Passing non-zero port might
+ * lead to degraded performance.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
* Description
* Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
- * only possible to shrink the packet as of this writing,
- * therefore *delta* must be a negative integer.
+ * possible to both shrink and grow the packet tail.
+ * Shrink done via *delta* being a negative integer.
*
* A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
@@ -1953,7 +3007,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
+ * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
* Description
* Retrieve the XFRM state (IP transform framework, see also
* **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
@@ -1969,7 +3023,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
+ * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *ctx*, which is a pointer
@@ -1985,8 +3039,18 @@ union bpf_attr {
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
* **BPF_F_USER_BUILD_ID**
- * Collect buildid+offset instead of ips for user stack,
- * only valid if **BPF_F_USER_STACK** is also specified.
+ * Collect (build_id, file_offset) instead of ips for user
+ * stack, only valid if **BPF_F_USER_STACK** is also
+ * specified.
+ *
+ * *file_offset* is an offset relative to the beginning
+ * of the executable or shared object file backing the vma
+ * which the *ip* falls in. It is *not* an offset relative
+ * to that object's base address. Accordingly, it must be
+ * adjusted by adding (sh_addr - sh_offset), where
+ * sh_{addr,offset} correspond to the executable section
+ * containing *file_offset* in the object, for comparisons
+ * to symbols' st_value to be valid.
*
* **bpf_get_stack**\ () can collect up to
* **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
@@ -1999,10 +3063,10 @@ union bpf_attr {
*
* # sysctl kernel.perf_event_max_stack=<new value>
* Return
- * A non-negative value equal to or less than *size* on success,
- * or a negative error in case of failure.
+ * The non-negative copied *buf* length equal to or less than
+ * *size* on success, or a negative error in case of failure.
*
- * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -2024,7 +3088,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
+ * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
* Description
* Do FIB lookup in kernel tables using parameters in *params*.
* If lookup is successful and result shows packet is to be
@@ -2055,7 +3119,10 @@ union bpf_attr {
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
* packet is not forwarded or needs assist from full stack
*
- * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU
+ * was exceeded and output params->mtu_result contains the MTU.
+ *
+ * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a sockhash *map* referencing sockets.
* The *skops* is used as a new value for the entry associated to
@@ -2074,7 +3141,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
* Description
* This helper is used in programs implementing policies at the
* socket level. If the message *msg* is allowed to pass (i.e. if
@@ -2088,11 +3155,11 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
* Description
* This helper is used in programs implementing policies at the
* skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
- * if the verdeict eBPF program returns **SK_PASS**), redirect it
+ * if the verdict eBPF program returns **SK_PASS**), redirect it
* to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
* egress interfaces can be used for redirection. The
@@ -2102,7 +3169,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
+ * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
* Description
* Encapsulate the packet associated to *skb* within a Layer 3
* protocol header. This header is provided in the buffer at
@@ -2139,7 +3206,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
+ * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
* Description
* Store *len* bytes from address *from* into the packet
* associated to *skb*, at *offset*. Only the flags, tag and TLVs
@@ -2154,7 +3221,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
+ * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
* Description
* Adjust the size allocated to TLVs in the outermost IPv6
* Segment Routing Header contained in the packet associated to
@@ -2170,7 +3237,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
+ * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
* Description
* Apply an IPv6 Segment Routing action of type *action* to the
* packet associated to *skb*. Each action takes a parameter
@@ -2199,7 +3266,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_repeat(void *ctx)
+ * long bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded repeat key message. This delays
@@ -2218,7 +3285,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded key press with *scancode*,
@@ -2229,7 +3296,7 @@ union bpf_attr {
* **bpf_rc_keydown**\ () again with the same values, or calling
* **bpf_rc_repeat**\ ().
*
- * Some protocols include a toggle bit, in case the button was
+ * Some protocols include a toggle bit, in case the button was
* released and pressed again between consecutive scancodes.
*
* The *ctx* should point to the lirc sample as passed into
@@ -2261,6 +3328,9 @@ union bpf_attr {
* The id is returned or 0 in case the id could not be retrieved.
*
* u64 bpf_get_current_cgroup_id(void)
+ * Description
+ * Get the current cgroup id based on the cgroup within which
+ * the current task is running.
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
@@ -2278,15 +3348,15 @@ union bpf_attr {
* running simultaneously.
*
* A user should care about the synchronization by himself.
- * For example, by using the **BPF_STX_XADD** instruction to alter
+ * For example, by using the **BPF_ATOMIC** instructions to alter
* the shared data.
* Return
* A pointer to the local storage area.
*
- * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
* Description
* Select a **SO_REUSEPORT** socket from a
- * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
+ * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
* It checks the selected socket is matching the incoming
* request in the socket buffer.
* Return
@@ -2328,7 +3398,7 @@ union bpf_attr {
* Look for an IPv6 socket.
*
* If the *netns* is a negative signed 32-bit integer, then the
- * socket lookup table in the netns associated with the *ctx* will
+ * socket lookup table in the netns associated with the *ctx*
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
@@ -2365,7 +3435,7 @@ union bpf_attr {
* Look for an IPv6 socket.
*
* If the *netns* is a negative signed 32-bit integer, then the
- * socket lookup table in the netns associated with the *ctx* will
+ * socket lookup table in the netns associated with the *ctx*
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
@@ -2384,7 +3454,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * int bpf_sk_release(struct bpf_sock *sock)
+ * long bpf_sk_release(void *sock)
* Description
* Release the reference held by *sock*. *sock* must be a
* non-**NULL** pointer that was returned from
@@ -2392,7 +3462,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
* Description
* Push an element *value* in *map*. *flags* is one of:
*
@@ -2402,19 +3472,19 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * long bpf_map_pop_elem(struct bpf_map *map, void *value)
* Description
* Pop an element from *map*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * long bpf_map_peek_elem(struct bpf_map *map, void *value)
* Description
* Get an element from *map* without removing it.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* For socket policies, insert *len* bytes into *msg* at offset
* *start*.
@@ -2430,7 +3500,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* Will remove *len* bytes from a *msg* starting at byte *start*.
* This may result in **ENOMEM** errors under certain situations if
@@ -2442,7 +3512,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+ * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded pointer movement.
@@ -2456,7 +3526,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * long bpf_spin_lock(struct bpf_spin_lock *lock)
* Description
* Acquire a spinlock represented by the pointer *lock*, which is
* stored as part of a value of a map. Taking the lock allows to
@@ -2504,7 +3574,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * long bpf_spin_unlock(struct bpf_spin_lock *lock)
* Description
* Release the *lock* previously locked by a call to
* **bpf_spin_lock**\ (\ *lock*\ ).
@@ -2527,7 +3597,7 @@ union bpf_attr {
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
- * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
+ * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
* Description
* Set ECN (Explicit Congestion Notification) field of IP header
* to **CE** (Congestion Encountered) if current value is **ECT**
@@ -2564,23 +3634,23 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in *sk*.
*
* *iph* points to the start of the IPv4 or IPv6 header, while
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
- * **sizeof**\ (**struct ip6hdr**).
+ * **sizeof**\ (**struct ipv6hdr**).
*
* *th* points to the start of the TCP header, while *th_len*
- * contains **sizeof**\ (**struct tcphdr**).
- *
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
* Return
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
* error otherwise.
*
- * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
+ * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
* Description
* Get name of sysctl in /proc/sys/ and copy it into provided by
* program buffer *buf* of size *buf_len*.
@@ -2596,7 +3666,7 @@ union bpf_attr {
* **-E2BIG** if the buffer wasn't big enough (*buf* will contain
* truncated name in this case).
*
- * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
* Description
* Get current value of sysctl as it is presented in /proc/sys
* (incl. newline, etc), and copy it as a string into provided
@@ -2615,7 +3685,7 @@ union bpf_attr {
* **-EINVAL** if current value was unavailable, e.g. because
* sysctl is uninitialized and read returns -EIO for it.
*
- * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
* Description
* Get new value being written by user space to sysctl (before
* the actual write happens) and copy it as a string into
@@ -2632,7 +3702,7 @@ union bpf_attr {
*
* **-EINVAL** if sysctl is being read.
*
- * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
+ * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
* Description
* Override new value being written by user space to sysctl with
* value provided by program in buffer *buf* of size *buf_len*.
@@ -2649,7 +3719,7 @@ union bpf_attr {
*
* **-EINVAL** if sysctl is being read.
*
- * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
+ * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
* Description
* Convert the initial part of the string from buffer *buf* of
* size *buf_len* to a long integer according to the given base
@@ -2673,7 +3743,7 @@ union bpf_attr {
*
* **-ERANGE** if resulting value was out of range.
*
- * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
+ * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
* Description
* Convert the initial part of the string from buffer *buf* of
* size *buf_len* to an unsigned long integer according to the
@@ -2696,7 +3766,7 @@ union bpf_attr {
*
* **-ERANGE** if resulting value was out of range.
*
- * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
+ * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
* Description
* Get a bpf-local-storage from a *sk*.
*
@@ -2712,6 +3782,9 @@ union bpf_attr {
* "type". The bpf-local-storage "type" (i.e. the *map*) is
* searched against all bpf-local-storages residing at *sk*.
*
+ * *sk* is a kernel **struct sock** pointer for LSM program.
+ * *sk* is a **struct bpf_sock** pointer for other program types.
+ *
* An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
* used such that a new bpf-local-storage will be
* created if one does not exist. *value* can be used
@@ -2724,15 +3797,16 @@ union bpf_attr {
* **NULL** if not found or there was an error in adding
* a new bpf-local-storage.
*
- * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
* Description
* Delete a bpf-local-storage from a *sk*.
* Return
* 0 on success.
*
* **-ENOENT** if the bpf-local-storage cannot be found.
+ * **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
*
- * int bpf_send_signal(u32 sig)
+ * long bpf_send_signal(u32 sig)
* Description
* Send signal *sig* to the process of the current task.
* The signal may be delivered to any of this process's threads.
@@ -2747,18 +3821,18 @@ union bpf_attr {
*
* **-EAGAIN** if bpf program can try again.
*
- * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Try to issue a SYN cookie for the packet with corresponding
* IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
*
* *iph* points to the start of the IPv4 or IPv6 header, while
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
- * **sizeof**\ (**struct ip6hdr**).
+ * **sizeof**\ (**struct ipv6hdr**).
*
* *th* points to the start of the TCP header, while *th_len*
- * contains the length of the TCP header.
- *
+ * contains the length of the TCP header with options (at least
+ * **sizeof**\ (**struct tcphdr**)).
* Return
* On success, lower 32 bits hold the generated SYN cookie in
* followed by 16 bits which hold the MSS value for that cookie,
@@ -2774,7 +3848,7 @@ union bpf_attr {
*
* **-EPROTONOSUPPORT** IP packet version is not 4 or 6
*
- * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -2798,21 +3872,21 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Safely attempt to read *size* bytes from user space address
* *unsafe_ptr* and store the data in *dst*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Safely attempt to read *size* bytes from kernel space address
* *unsafe_ptr* and store the data in *dst*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Copy a NUL terminated string from an unsafe user address
* *unsafe_ptr* to *dst*. The *size* should include the
@@ -2821,10 +3895,10 @@ union bpf_attr {
* string length is larger than *size*, just *size*-1 bytes are
* copied and the last byte is set to NUL.
*
- * On success, the length of the copied string is returned. This
- * makes this helper useful in tracing programs for reading
- * strings, and more importantly to get its length at runtime. See
- * the following snippet:
+ * On success, returns the number of bytes that were written,
+ * including the terminal NUL. This makes this helper useful in
+ * tracing programs for reading strings, and more importantly to
+ * get its length at runtime. See the following snippet:
*
* ::
*
@@ -2841,7 +3915,7 @@ union bpf_attr {
* // size, after checking its boundaries.
* }
*
- * In comparison, using **bpf_probe_read_user()** helper here
+ * In comparison, using **bpf_probe_read_user**\ () helper here
* instead to read the string would require to estimate the length
* at compile time, and would often result in copying more memory
* than necessary.
@@ -2852,26 +3926,26 @@ union bpf_attr {
* **->mm->env_start**: using this helper and the return value,
* one can quickly iterate at the right offset of the memory area.
* Return
- * On success, the strictly positive length of the string,
+ * On success, the strictly positive length of the output string,
* including the trailing NUL character. On error, a negative
* value.
*
- * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
- * to *dst*. Same semantics as with bpf_probe_read_user_str() apply.
+ * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
* Return
- * On success, the strictly positive length of the string, including
+ * On success, the strictly positive length of the string, including
* the trailing NUL character. On error, a negative value.
*
- * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
+ * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
* Description
- * Send out a tcp-ack. *tp* is the in-kernel struct tcp_sock.
+ * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
* *rcv_nxt* is the ack_seq to be sent out.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_send_signal_thread(u32 sig)
+ * long bpf_send_signal_thread(u32 sig)
* Description
* Send signal *sig* to the thread corresponding to the current task.
* Return
@@ -2890,6 +3964,1477 @@ union bpf_attr {
* Obtain the 64bit jiffies
* Return
* The 64 bit jiffies
+ *
+ * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
+ * Description
+ * For an eBPF program attached to a perf event, retrieve the
+ * branch records (**struct perf_branch_entry**) associated to *ctx*
+ * and store it in the buffer pointed by *buf* up to size
+ * *size* bytes.
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
+ * instead return the number of bytes required to store all the
+ * branch entries. If this flag is set, *buf* may be NULL.
+ *
+ * **-EINVAL** if arguments invalid or **size** not a multiple
+ * of **sizeof**\ (**struct perf_branch_entry**\ ).
+ *
+ * **-ENOENT** if architecture does not support branch records.
+ *
+ * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
+ * Description
+ * Returns 0 on success, values for *pid* and *tgid* as seen from the current
+ * *namespace* will be returned in *nsdata*.
+ * Return
+ * 0 on success, or one of the following in case of failure:
+ *
+ * **-EINVAL** if dev and inum supplied don't match dev_t and inode number
+ * with nsfs of current task, or if dev conversion to dev_t lost high bits.
+ *
+ * **-ENOENT** if pidns does not exists for the current task.
+ *
+ * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * *ctx* is a pointer to in-kernel struct xdp_buff.
+ *
+ * This helper is similar to **bpf_perf_eventoutput**\ () but
+ * restricted to raw_tracepoint bpf programs.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * u64 bpf_get_netns_cookie(void *ctx)
+ * Description
+ * Retrieve the cookie (generated by the kernel) of the network
+ * namespace the input *ctx* is associated with. The network
+ * namespace cookie remains stable for its lifetime and provides
+ * a global identifier that can be assumed unique. If *ctx* is
+ * NULL, then the helper returns the cookie for the initial
+ * network namespace. The cookie itself is very similar to that
+ * of **bpf_get_socket_cookie**\ () helper, but for network
+ * namespaces instead of sockets.
+ * Return
+ * A 8-byte long opaque number.
+ *
+ * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of the cgroup associated
+ * with the current task at the *ancestor_level*. The root cgroup
+ * is at *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with the current task, then return value will be the
+ * same as that of **bpf_get_current_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with the current task.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_get_current_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
+ * Description
+ * Helper is overloaded depending on BPF program type. This
+ * description applies to **BPF_PROG_TYPE_SCHED_CLS** and
+ * **BPF_PROG_TYPE_SCHED_ACT** programs.
+ *
+ * Assign the *sk* to the *skb*. When combined with appropriate
+ * routing configuration to receive the packet towards the socket,
+ * will cause *skb* to be delivered to the specified socket.
+ * Subsequent redirection of *skb* via **bpf_redirect**\ (),
+ * **bpf_clone_redirect**\ () or other methods outside of BPF may
+ * interfere with successful delivery to the socket.
+ *
+ * This operation is only valid from TC ingress path.
+ *
+ * The *flags* argument must be zero.
+ * Return
+ * 0 on success, or a negative error in case of failure:
+ *
+ * **-EINVAL** if specified *flags* are not supported.
+ *
+ * **-ENOENT** if the socket is unavailable for assignment.
+ *
+ * **-ENETUNREACH** if the socket is unreachable (wrong netns).
+ *
+ * **-EOPNOTSUPP** if the operation is not supported, for example
+ * a call from outside of TC ingress.
+ *
+ * **-ESOCKTNOSUPPORT** if the socket type is not supported
+ * (reuseport).
+ *
+ * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
+ * Description
+ * Helper is overloaded depending on BPF program type. This
+ * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
+ *
+ * Select the *sk* as a result of a socket lookup.
+ *
+ * For the operation to succeed passed socket must be compatible
+ * with the packet description provided by the *ctx* object.
+ *
+ * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
+ * be an exact match. While IP family (**AF_INET** or
+ * **AF_INET6**) must be compatible, that is IPv6 sockets
+ * that are not v6-only can be selected for IPv4 packets.
+ *
+ * Only TCP listeners and UDP unconnected sockets can be
+ * selected. *sk* can also be NULL to reset any previous
+ * selection.
+ *
+ * *flags* argument can combination of following values:
+ *
+ * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous
+ * socket selection, potentially done by a BPF program
+ * that ran before us.
+ *
+ * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
+ * load-balancing within reuseport group for the socket
+ * being selected.
+ *
+ * On success *ctx->sk* will point to the selected socket.
+ *
+ * Return
+ * 0 on success, or a negative errno in case of failure.
+ *
+ * * **-EAFNOSUPPORT** if socket family (*sk->family*) is
+ * not compatible with packet family (*ctx->family*).
+ *
+ * * **-EEXIST** if socket has been already selected,
+ * potentially by another program, and
+ * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
+ *
+ * * **-EINVAL** if unsupported flags were specified.
+ *
+ * * **-EPROTOTYPE** if socket L4 protocol
+ * (*sk->protocol*) doesn't match packet protocol
+ * (*ctx->protocol*).
+ *
+ * * **-ESOCKTNOSUPPORT** if socket is not in allowed
+ * state (TCP listening or UDP unconnected).
+ *
+ * u64 bpf_ktime_get_boot_ns(void)
+ * Description
+ * Return the time elapsed since system boot, in nanoseconds.
+ * Does include the time the system was suspended.
+ * See: **clock_gettime**\ (**CLOCK_BOOTTIME**)
+ * Return
+ * Current *ktime*.
+ *
+ * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * Description
+ * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
+ * out the format string.
+ * The *m* represents the seq_file. The *fmt* and *fmt_size* are for
+ * the format string itself. The *data* and *data_len* are format string
+ * arguments. The *data* are a **u64** array and corresponding format string
+ * values are stored in the array. For strings and pointers where pointees
+ * are accessed, only the pointer values are stored in the *data* array.
+ * The *data_len* is the size of *data* in bytes - must be a multiple of 8.
+ *
+ * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
+ * Reading kernel memory may fail due to either invalid address or
+ * valid address but requiring a major memory fault. If reading kernel memory
+ * fails, the string for **%s** will be an empty string, and the ip
+ * address for **%p{i,I}{4,6}** will be 0. Not returning error to
+ * bpf program is consistent with what **bpf_trace_printk**\ () does for now.
+ * Return
+ * 0 on success, or a negative error in case of failure:
+ *
+ * **-EBUSY** if per-CPU memory copy buffer is busy, can try again
+ * by returning 1 from bpf program.
+ *
+ * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported.
+ *
+ * **-E2BIG** if *fmt* contains too many format specifiers.
+ *
+ * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
+ *
+ * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
+ * Description
+ * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
+ * The *m* represents the seq_file. The *data* and *len* represent the
+ * data to write in bytes.
+ * Return
+ * 0 on success, or a negative error in case of failure:
+ *
+ * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
+ *
+ * u64 bpf_sk_cgroup_id(void *sk)
+ * Description
+ * Return the cgroup v2 id of the socket *sk*.
+ *
+ * *sk* must be a non-**NULL** pointer to a socket, e.g. one
+ * returned from **bpf_sk_lookup_xxx**\ (),
+ * **bpf_sk_fullsock**\ (), etc. The format of returned id is
+ * same as in **bpf_skb_cgroup_id**\ ().
+ *
+ * This helper is available only if the kernel was compiled with
+ * the **CONFIG_SOCK_CGROUP_DATA** configuration option.
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *sk* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *sk*, then return value will be same as that
+ * of **bpf_sk_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *sk*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_sk_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * Description
+ * Copy *size* bytes from *data* into a ring buffer *ringbuf*.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * An adaptive notification is a notification sent whenever the user-space
+ * process has caught up and consumed all available payloads. In case the user-space
+ * process is still processing a previous payload, then no notification is needed
+ * as it will process the newly added payload automatically.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
+ * Description
+ * Reserve *size* bytes of payload in a ring buffer *ringbuf*.
+ * *flags* must be 0.
+ * Return
+ * Valid pointer with *size* bytes of memory available; NULL,
+ * otherwise.
+ *
+ * void bpf_ringbuf_submit(void *data, u64 flags)
+ * Description
+ * Submit reserved ring buffer sample, pointed to by *data*.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * void bpf_ringbuf_discard(void *data, u64 flags)
+ * Description
+ * Discard reserved ring buffer sample, pointed to by *data*.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * u64 bpf_ringbuf_query(void *ringbuf, u64 flags)
+ * Description
+ * Query various characteristics of provided ring buffer. What
+ * exactly is queries is determined by *flags*:
+ *
+ * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ * * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ * * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ * Data returned is just a momentary snapshot of actual values
+ * and could be inaccurate, so this facility should be used to
+ * power heuristics and for reporting, not to make 100% correct
+ * calculation.
+ * Return
+ * Requested value, or 0, if *flags* are not recognized.
+ *
+ * long bpf_csum_level(struct sk_buff *skb, u64 level)
+ * Description
+ * Change the skbs checksum level by one layer up or down, or
+ * reset it entirely to none in order to have the stack perform
+ * checksum validation. The level is applicable to the following
+ * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
+ * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
+ * through **bpf_skb_adjust_room**\ () helper with passing in
+ * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call
+ * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
+ * the UDP header is removed. Similarly, an encap of the latter
+ * into the former could be accompanied by a helper call to
+ * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
+ * skb is still intended to be processed in higher layers of the
+ * stack instead of just egressing at tc.
+ *
+ * There are three supported level settings at this time:
+ *
+ * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
+ * with CHECKSUM_UNNECESSARY.
+ * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
+ * with CHECKSUM_UNNECESSARY.
+ * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
+ * sets CHECKSUM_NONE to force checksum validation by the stack.
+ * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
+ * skb->csum_level.
+ * Return
+ * 0 on success, or a negative error in case of failure. In the
+ * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
+ * is returned or the error code -EACCES in case the skb is not
+ * subject to CHECKSUM_UNNECESSARY.
+ *
+ * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+ * Description
+ * Return a user or a kernel stack in bpf program provided buffer.
+ * To achieve this, the helper needs *task*, which is a valid
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
+ *
+ * The last argument, *flags*, holds the number of stack frames to
+ * skip (from 0 to 255), masked with
+ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ * the following flags:
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+ *
+ * **bpf_get_task_stack**\ () can collect up to
+ * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
+ * to sufficient large buffer size. Note that
+ * this limit can be controlled with the **sysctl** program, and
+ * that it should be manually increased in order to profile long
+ * user stacks (such as stacks for Java programs). To do so, use:
+ *
+ * ::
+ *
+ * # sysctl kernel.perf_event_max_stack=<new value>
+ * Return
+ * The non-negative copied *buf* length equal to or less than
+ * *size* on success, or a negative error in case of failure.
+ *
+ * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
+ * Description
+ * Load header option. Support reading a particular TCP header
+ * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
+ *
+ * If *flags* is 0, it will search the option from the
+ * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
+ * has details on what skb_data contains under different
+ * *skops*\ **->op**.
+ *
+ * The first byte of the *searchby_res* specifies the
+ * kind that it wants to search.
+ *
+ * If the searching kind is an experimental kind
+ * (i.e. 253 or 254 according to RFC6994). It also
+ * needs to specify the "magic" which is either
+ * 2 bytes or 4 bytes. It then also needs to
+ * specify the size of the magic by using
+ * the 2nd byte which is "kind-length" of a TCP
+ * header option and the "kind-length" also
+ * includes the first 2 bytes "kind" and "kind-length"
+ * itself as a normal TCP header option also does.
+ *
+ * For example, to search experimental kind 254 with
+ * 2 byte magic 0xeB9F, the searchby_res should be
+ * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
+ *
+ * To search for the standard window scale option (3),
+ * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
+ * Note, kind-length must be 0 for regular option.
+ *
+ * Searching for No-Op (0) and End-of-Option-List (1) are
+ * not supported.
+ *
+ * *len* must be at least 2 bytes which is the minimal size
+ * of a header option.
+ *
+ * Supported flags:
+ *
+ * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
+ * saved_syn packet or the just-received syn packet.
+ *
+ * Return
+ * > 0 when found, the header option is copied to *searchby_res*.
+ * The return value is the total length copied. On failure, a
+ * negative error code is returned:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOMSG** if the option is not found.
+ *
+ * **-ENOENT** if no syn packet is available when
+ * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
+ *
+ * **-ENOSPC** if there is not enough space. Only *len* number of
+ * bytes are copied.
+ *
+ * **-EFAULT** on failure to parse the header options in the
+ * packet.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
+ * Description
+ * Store header option. The data will be copied
+ * from buffer *from* with length *len* to the TCP header.
+ *
+ * The buffer *from* should have the whole option that
+ * includes the kind, kind-length, and the actual
+ * option data. The *len* must be at least kind-length
+ * long. The kind-length does not have to be 4 byte
+ * aligned. The kernel will take care of the padding
+ * and setting the 4 bytes aligned value to th->doff.
+ *
+ * This helper will check for duplicated option
+ * by searching the same option in the outgoing skb.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** If param is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ * Nothing has been written
+ *
+ * **-EEXIST** if the option already exists.
+ *
+ * **-EFAULT** on failure to parse the existing header options.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
+ * Description
+ * Reserve *len* bytes for the bpf header option. The
+ * space will be used by **bpf_store_hdr_opt**\ () later in
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * If **bpf_reserve_hdr_opt**\ () is called multiple times,
+ * the total number of bytes will be reserved.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from an *inode*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *inode* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
+ * helper enforces the key must be an inode and the map must also
+ * be a **BPF_MAP_TYPE_INODE_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *inode* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *inode*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
+ * Description
+ * Delete a bpf_local_storage from an *inode*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * long bpf_d_path(struct path *path, char *buf, u32 sz)
+ * Description
+ * Return full path for given **struct path** object, which
+ * needs to be the kernel BTF *path* object. The path is
+ * returned in the provided buffer *buf* of size *sz* and
+ * is zero terminated.
+ *
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
+ * Description
+ * Read *size* bytes from user space address *user_ptr* and store
+ * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
+ * Description
+ * Use BTF to store a string representation of *ptr*->ptr in *str*,
+ * using *ptr*->type_id. This value should specify the type
+ * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
+ * can be used to look up vmlinux BTF type ids. Traversing the
+ * data structure using BTF, the type information and values are
+ * stored in the first *str_size* - 1 bytes of *str*. Safe copy of
+ * the pointer data is carried out to avoid kernel crashes during
+ * operation. Smaller types can use string space on the stack;
+ * larger programs can use map data to store the string
+ * representation.
+ *
+ * The string can be subsequently shared with userspace via
+ * bpf_perf_event_output() or ring buffer interfaces.
+ * bpf_trace_printk() is to be avoided as it places too small
+ * a limit on string size to be useful.
+ *
+ * *flags* is a combination of
+ *
+ * **BTF_F_COMPACT**
+ * no formatting around type information
+ * **BTF_F_NONAME**
+ * no struct/union member names/types
+ * **BTF_F_PTR_RAW**
+ * show raw (unobfuscated) pointer values;
+ * equivalent to printk specifier %px.
+ * **BTF_F_ZERO**
+ * show zero-valued struct/union members; they
+ * are not displayed by default
+ *
+ * Return
+ * The number of bytes that were written (or would have been
+ * written if output had to be truncated due to string size),
+ * or a negative error in cases of failure.
+ *
+ * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
+ * Description
+ * Use BTF to write to seq_write a string representation of
+ * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
+ * *flags* are identical to those used for bpf_snprintf_btf.
+ * Return
+ * 0 on success or a negative error in case of failure.
+ *
+ * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
+ * Description
+ * See **bpf_get_cgroup_classid**\ () for the main description.
+ * This helper differs from **bpf_get_cgroup_classid**\ () in that
+ * the cgroup v1 net_cls class is retrieved only from the *skb*'s
+ * associated socket instead of the current process.
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*
+ * and fill in L2 addresses from neighboring subsystem. This helper
+ * is somewhat similar to **bpf_redirect**\ (), except that it
+ * populates L2 addresses as well, meaning, internally, the helper
+ * relies on the neighbor lookup for the L2 address of the nexthop.
+ *
+ * The helper will perform a FIB lookup based on the skb's
+ * networking header to get the address of the next hop, unless
+ * this is supplied by the caller in the *params* argument. The
+ * *plen* argument indicates the len of *params* and should be set
+ * to 0 if *params* is NULL.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types, and enabled
+ * for IPv4 and IPv6 protocols.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ *
+ * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on *cpu*. A ksym is an
+ * extern variable decorated with '__ksym'. For ksym, there is a
+ * global var (either static or global) defined of the same name
+ * in the kernel. The ksym is percpu if the global var is percpu.
+ * The returned pointer points to the global percpu var on *cpu*.
+ *
+ * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
+ * kernel, except that bpf_per_cpu_ptr() may return NULL. This
+ * happens if *cpu* is larger than nr_cpu_ids. The caller of
+ * bpf_per_cpu_ptr() must check the returned value.
+ * Return
+ * A pointer pointing to the kernel percpu variable on *cpu*, or
+ * NULL, if *cpu* is invalid.
+ *
+ * void *bpf_this_cpu_ptr(const void *percpu_ptr)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on this cpu. See the
+ * description of 'ksym' in **bpf_per_cpu_ptr**\ ().
+ *
+ * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
+ * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
+ * never return NULL.
+ * Return
+ * A pointer pointing to the kernel percpu variable on this cpu.
+ *
+ * long bpf_redirect_peer(u32 ifindex, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_redirect**\ (), except
+ * that the redirection happens to the *ifindex*' peer device and
+ * the netns switch takes place from ingress to ingress without
+ * going through the CPU's backlog queue.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types at the ingress
+ * hook and for veth device types. The peer device must reside in a
+ * different network namespace.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ *
+ * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from the *task*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *task* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
+ * helper enforces the key must be a task_struct and the map must also
+ * be a **BPF_MAP_TYPE_TASK_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *task* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *task*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
+ * Description
+ * Delete a bpf_local_storage from a *task*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * struct task_struct *bpf_get_current_task_btf(void)
+ * Description
+ * Return a BTF pointer to the "current" task.
+ * This pointer can also be used in helpers that accept an
+ * *ARG_PTR_TO_BTF_ID* of type *task_struct*.
+ * Return
+ * Pointer to the current task.
+ *
+ * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags)
+ * Description
+ * Set or clear certain options on *bprm*:
+ *
+ * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
+ * which sets the **AT_SECURE** auxv for glibc. The bit
+ * is cleared if the flag is not specified.
+ * Return
+ * **-EINVAL** if invalid *flags* are passed, zero otherwise.
+ *
+ * u64 bpf_ktime_get_coarse_ns(void)
+ * Description
+ * Return a coarse-grained version of the time elapsed since
+ * system boot, in nanoseconds. Does not include time the system
+ * was suspended.
+ *
+ * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
+ * Return
+ * Current *ktime*.
+ *
+ * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
+ * Description
+ * Returns the stored IMA hash of the *inode* (if it's available).
+ * If the hash is larger than *size*, then only *size*
+ * bytes will be copied to *dst*
+ * Return
+ * The **hash_algo** is returned on success,
+ * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
+ * invalid arguments are passed.
+ *
+ * struct socket *bpf_sock_from_file(struct file *file)
+ * Description
+ * If the given file represents a socket, returns the associated
+ * socket.
+ * Return
+ * A pointer to a struct socket on success or NULL if the file is
+ * not a socket.
+ *
+ * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
+ * Description
+ * Check packet size against exceeding MTU of net device (based
+ * on *ifindex*). This helper will likely be used in combination
+ * with helpers that adjust/change the packet size.
+ *
+ * The argument *len_diff* can be used for querying with a planned
+ * size change. This allows to check MTU prior to changing packet
+ * ctx. Providing a *len_diff* adjustment that is larger than the
+ * actual packet size (resulting in negative packet size) will in
+ * principle not exceed the MTU, which is why it is not considered
+ * a failure. Other BPF helpers are needed for performing the
+ * planned size change; therefore the responsibility for catching
+ * a negative packet size belongs in those helpers.
+ *
+ * Specifying *ifindex* zero means the MTU check is performed
+ * against the current net device. This is practical if this isn't
+ * used prior to redirect.
+ *
+ * On input *mtu_len* must be a valid pointer, else verifier will
+ * reject BPF program. If the value *mtu_len* is initialized to
+ * zero then the ctx packet size is use. When value *mtu_len* is
+ * provided as input this specify the L3 length that the MTU check
+ * is done against. Remember XDP and TC length operate at L2, but
+ * this value is L3 as this correlate to MTU and IP-header tot_len
+ * values which are L3 (similar behavior as bpf_fib_lookup).
+ *
+ * The Linux kernel route table can configure MTUs on a more
+ * specific per route level, which is not provided by this helper.
+ * For route level MTU checks use the **bpf_fib_lookup**\ ()
+ * helper.
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** for tc cls_act programs.
+ *
+ * The *flags* argument can be a combination of one or more of the
+ * following values:
+ *
+ * **BPF_MTU_CHK_SEGS**
+ * This flag will only works for *ctx* **struct sk_buff**.
+ * If packet context contains extra packet segment buffers
+ * (often knows as GSO skb), then MTU check is harder to
+ * check at this point, because in transmit path it is
+ * possible for the skb packet to get re-segmented
+ * (depending on net device features). This could still be
+ * a MTU violation, so this flag enables performing MTU
+ * check against segments, with a different violation
+ * return code to tell it apart. Check cannot use len_diff.
+ *
+ * On return *mtu_len* pointer contains the MTU value of the net
+ * device. Remember the net device configured MTU is the L3 size,
+ * which is returned here and XDP and TC length operate at L2.
+ * Helper take this into account for you, but remember when using
+ * MTU value in your BPF-code.
+ *
+ * Return
+ * * 0 on success, and populate MTU value in *mtu_len* pointer.
+ *
+ * * < 0 if any input argument is invalid (*mtu_len* not updated)
+ *
+ * MTU violations return positive values, but also populate MTU
+ * value in *mtu_len* pointer, as this can be needed for
+ * implementing PMTU handing:
+ *
+ * * **BPF_MTU_CHK_RET_FRAG_NEEDED**
+ * * **BPF_MTU_CHK_RET_SEGS_TOOBIG**
+ *
+ * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * For each element in **map**, call **callback_fn** function with
+ * **map**, **callback_ctx** and other map-specific parameters.
+ * The **callback_fn** should be a static function and
+ * the **callback_ctx** should be a pointer to the stack.
+ * The **flags** is used to control certain aspects of the helper.
+ * Currently, the **flags** must be 0.
+ *
+ * The following are a list of supported map types and their
+ * respective expected callback signatures:
+ *
+ * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
+ * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
+ * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
+ *
+ * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
+ *
+ * For per_cpu maps, the map_value is the value on the cpu where the
+ * bpf_prog is running.
+ *
+ * If **callback_fn** return 0, the helper will continue to the next
+ * element. If return value is 1, the helper will skip the rest of
+ * elements and return. Other return values are not used now.
+ *
+ * Return
+ * The number of traversed map elements for success, **-EINVAL** for
+ * invalid **flags**.
+ *
+ * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len)
+ * Description
+ * Outputs a string into the **str** buffer of size **str_size**
+ * based on a format string stored in a read-only map pointed by
+ * **fmt**.
+ *
+ * Each format specifier in **fmt** corresponds to one u64 element
+ * in the **data** array. For strings and pointers where pointees
+ * are accessed, only the pointer values are stored in the *data*
+ * array. The *data_len* is the size of *data* in bytes - must be
+ * a multiple of 8.
+ *
+ * Formats **%s** and **%p{i,I}{4,6}** require to read kernel
+ * memory. Reading kernel memory may fail due to either invalid
+ * address or valid address but requiring a major memory fault. If
+ * reading kernel memory fails, the string for **%s** will be an
+ * empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
+ * Not returning error to bpf program is consistent with what
+ * **bpf_trace_printk**\ () does for now.
+ *
+ * Return
+ * The strictly positive length of the formatted string, including
+ * the trailing zero character. If the return value is greater than
+ * **str_size**, **str** contains a truncated string, guaranteed to
+ * be zero-terminated except when **str_size** is 0.
+ *
+ * Or **-EBUSY** if the per-CPU memory copy buffer is busy.
+ *
+ * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
+ * Description
+ * Execute bpf syscall with given arguments.
+ * Return
+ * A syscall result.
+ *
+ * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
+ * Description
+ * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
+ * Return
+ * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
+ *
+ * long bpf_sys_close(u32 fd)
+ * Description
+ * Execute close syscall for given FD.
+ * Return
+ * A syscall result.
+ *
+ * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags)
+ * Description
+ * Initialize the timer.
+ * First 4 bits of *flags* specify clockid.
+ * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
+ * All other bits of *flags* are reserved.
+ * The verifier will reject the program if *timer* is not from
+ * the same *map*.
+ * Return
+ * 0 on success.
+ * **-EBUSY** if *timer* is already initialized.
+ * **-EINVAL** if invalid *flags* are passed.
+ * **-EPERM** if *timer* is in a map that doesn't have any user references.
+ * The user space should either hold a file descriptor to a map with timers
+ * or pin such map in bpffs. When map is unpinned or file descriptor is
+ * closed all timers in the map will be cancelled and freed.
+ *
+ * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn)
+ * Description
+ * Configure the timer to call *callback_fn* static function.
+ * Return
+ * 0 on success.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
+ * **-EPERM** if *timer* is in a map that doesn't have any user references.
+ * The user space should either hold a file descriptor to a map with timers
+ * or pin such map in bpffs. When map is unpinned or file descriptor is
+ * closed all timers in the map will be cancelled and freed.
+ *
+ * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags)
+ * Description
+ * Set timer expiration N nanoseconds from the current time. The
+ * configured callback will be invoked in soft irq context on some cpu
+ * and will not repeat unless another bpf_timer_start() is made.
+ * In such case the next invocation can migrate to a different cpu.
+ * Since struct bpf_timer is a field inside map element the map
+ * owns the timer. The bpf_timer_set_callback() will increment refcnt
+ * of BPF program to make sure that callback_fn code stays valid.
+ * When user space reference to a map reaches zero all timers
+ * in a map are cancelled and corresponding program's refcnts are
+ * decremented. This is done to make sure that Ctrl-C of a user
+ * process doesn't leave any timers running. If map is pinned in
+ * bpffs the callback_fn can re-arm itself indefinitely.
+ * bpf_map_update/delete_elem() helpers and user space sys_bpf commands
+ * cancel and free the timer in the given map element.
+ * The map can contain timers that invoke callback_fn-s from different
+ * programs. The same callback_fn can serve different timers from
+ * different maps if key/value layout matches across maps.
+ * Every bpf_timer_set_callback() can have different callback_fn.
+ *
+ * Return
+ * 0 on success.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
+ * or invalid *flags* are passed.
+ *
+ * long bpf_timer_cancel(struct bpf_timer *timer)
+ * Description
+ * Cancel the timer and wait for callback_fn to finish if it was running.
+ * Return
+ * 0 if the timer was not active.
+ * 1 if the timer was active.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
+ * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
+ * own timer which would have led to a deadlock otherwise.
+ *
+ * u64 bpf_get_func_ip(void *ctx)
+ * Description
+ * Get address of the traced function (for tracing and kprobe programs).
+ * Return
+ * Address of the traced function.
+ * 0 for kprobes placed within the function (not at the entry).
+ *
+ * u64 bpf_get_attach_cookie(void *ctx)
+ * Description
+ * Get bpf_cookie value provided (optionally) during the program
+ * attachment. It might be different for each individual
+ * attachment, even if BPF program itself is the same.
+ * Expects BPF program context *ctx* as a first argument.
+ *
+ * Supported for the following program types:
+ * - kprobe/uprobe;
+ * - tracepoint;
+ * - perf_event.
+ * Return
+ * Value specified by user at BPF link creation/attachment time
+ * or 0, if it was not specified.
+ *
+ * long bpf_task_pt_regs(struct task_struct *task)
+ * Description
+ * Get the struct pt_regs associated with **task**.
+ * Return
+ * A pointer to struct pt_regs.
+ *
+ * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
+ * Description
+ * Get branch trace from hardware engines like Intel LBR. The
+ * hardware engine is stopped shortly after the helper is
+ * called. Therefore, the user need to filter branch entries
+ * based on the actual use case. To capture branch trace
+ * before the trigger point of the BPF program, the helper
+ * should be called at the beginning of the BPF program.
+ *
+ * The data is stored as struct perf_branch_entry into output
+ * buffer *entries*. *size* is the size of *entries* in bytes.
+ * *flags* is reserved for now and must be zero.
+ *
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-ENOENT** if architecture does not support branch records.
+ *
+ * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * Description
+ * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
+ * to format and can handle more format args as a result.
+ *
+ * Arguments are to be used as in **bpf_seq_printf**\ () helper.
+ * Return
+ * The number of bytes written to the buffer, or a negative error
+ * in case of failure.
+ *
+ * struct unix_sock *bpf_skc_to_unix_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *unix_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
+ * Description
+ * Get the address of a kernel symbol, returned in *res*. *res* is
+ * set to 0 if the symbol is not found.
+ * Return
+ * On success, zero. On error, a negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-EINVAL** if string *name* is not the same size as *name_sz*.
+ *
+ * **-ENOENT** if symbol is not found.
+ *
+ * **-EPERM** if caller does not have permission to obtain kernel address.
+ *
+ * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * Find vma of *task* that contains *addr*, call *callback_fn*
+ * function with *task*, *vma*, and *callback_ctx*.
+ * The *callback_fn* should be a static function and
+ * the *callback_ctx* should be a pointer to the stack.
+ * The *flags* is used to control certain aspects of the helper.
+ * Currently, the *flags* must be 0.
+ *
+ * The expected callback signature is
+ *
+ * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
+ *
+ * Return
+ * 0 on success.
+ * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
+ * **-EBUSY** if failed to try lock mmap_lock.
+ * **-EINVAL** for invalid **flags**.
+ *
+ * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * For **nr_loops**, call **callback_fn** function
+ * with **callback_ctx** as the context parameter.
+ * The **callback_fn** should be a static function and
+ * the **callback_ctx** should be a pointer to the stack.
+ * The **flags** is used to control certain aspects of the helper.
+ * Currently, the **flags** must be 0. Currently, nr_loops is
+ * limited to 1 << 23 (~8 million) loops.
+ *
+ * long (\*callback_fn)(u32 index, void \*ctx);
+ *
+ * where **index** is the current index in the loop. The index
+ * is zero-indexed.
+ *
+ * If **callback_fn** returns 0, the helper will continue to the next
+ * loop. If return value is 1, the helper will skip the rest of
+ * the loops and return. Other return values are not used now,
+ * and will be rejected by the verifier.
+ *
+ * Return
+ * The number of loops performed, **-EINVAL** for invalid **flags**,
+ * **-E2BIG** if **nr_loops** exceeds the maximum number of loops.
+ *
+ * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2)
+ * Description
+ * Do strncmp() between **s1** and **s2**. **s1** doesn't need
+ * to be null-terminated and **s1_sz** is the maximum storage
+ * size of **s1**. **s2** must be a read-only string.
+ * Return
+ * An integer less than, equal to, or greater than zero
+ * if the first **s1_sz** bytes of **s1** is found to be
+ * less than, to match, or be greater than **s2**.
+ *
+ * long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
+ * Description
+ * Get **n**-th argument register (zero based) of the traced function (for tracing programs)
+ * returned in **value**.
+ *
+ * Return
+ * 0 on success.
+ * **-EINVAL** if n >= argument register count of traced function.
+ *
+ * long bpf_get_func_ret(void *ctx, u64 *value)
+ * Description
+ * Get return value of the traced function (for tracing programs)
+ * in **value**.
+ *
+ * Return
+ * 0 on success.
+ * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
+ *
+ * long bpf_get_func_arg_cnt(void *ctx)
+ * Description
+ * Get number of registers of the traced function (for tracing programs) where
+ * function arguments are stored in these registers.
+ *
+ * Return
+ * The number of argument registers of the traced function.
+ *
+ * int bpf_get_retval(void)
+ * Description
+ * Get the BPF program's return value that will be returned to the upper layers.
+ *
+ * This helper is currently supported by cgroup programs and only by the hooks
+ * where BPF program's return value is returned to the userspace via errno.
+ * Return
+ * The BPF program's return value.
+ *
+ * int bpf_set_retval(int retval)
+ * Description
+ * Set the BPF program's return value that will be returned to the upper layers.
+ *
+ * This helper is currently supported by cgroup programs and only by the hooks
+ * where BPF program's return value is returned to the userspace via errno.
+ *
+ * Note that there is the following corner case where the program exports an error
+ * via bpf_set_retval but signals success via 'return 1':
+ *
+ * bpf_set_retval(-EPERM);
+ * return 1;
+ *
+ * In this case, the BPF program's return value will use helper's -EPERM. This
+ * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case.
+ *
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md)
+ * Description
+ * Get the total size of a given xdp buff (linear and paged area)
+ * Return
+ * The total size of a given xdp buffer.
+ *
+ * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
+ * Description
+ * This helper is provided as an easy way to load data from a
+ * xdp buffer. It can be used to load *len* bytes from *offset* from
+ * the frame associated to *xdp_md*, into the buffer pointed by
+ * *buf*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
+ * Description
+ * Store *len* bytes from buffer *buf* into the frame
+ * associated to *xdp_md*, at *offset*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags)
+ * Description
+ * Read *size* bytes from user space address *user_ptr* in *tsk*'s
+ * address space, and stores the data in *dst*. *flags* is not
+ * used yet and is provided for future extensibility. This helper
+ * can only be used by sleepable programs.
+ * Return
+ * 0 on success, or a negative error in case of failure. On error
+ * *dst* buffer is zeroed out.
+ *
+ * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type)
+ * Description
+ * Change the __sk_buff->tstamp_type to *tstamp_type*
+ * and set *tstamp* to the __sk_buff->tstamp together.
+ *
+ * If there is no need to change the __sk_buff->tstamp_type,
+ * the tstamp value can be directly written to __sk_buff->tstamp
+ * instead.
+ *
+ * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that
+ * will be kept during bpf_redirect_*(). A non zero
+ * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO
+ * *tstamp_type*.
+ *
+ * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used
+ * with a zero *tstamp*.
+ *
+ * Only IPv4 and IPv6 skb->protocol are supported.
+ *
+ * This function is most useful when it needs to set a
+ * mono delivery time to __sk_buff->tstamp and then
+ * bpf_redirect_*() to the egress of an iface. For example,
+ * changing the (rcv) timestamp in __sk_buff->tstamp at
+ * ingress to a mono delivery time and then bpf_redirect_*()
+ * to sch_fq@phy-dev.
+ * Return
+ * 0 on success.
+ * **-EINVAL** for invalid input
+ * **-EOPNOTSUPP** for unsupported protocol
+ *
+ * long bpf_ima_file_hash(struct file *file, void *dst, u32 size)
+ * Description
+ * Returns a calculated IMA hash of the *file*.
+ * If the hash is larger than *size*, then only *size*
+ * bytes will be copied to *dst*
+ * Return
+ * The **hash_algo** is returned on success,
+ * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
+ * invalid arguments are passed.
+ *
+ * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * Description
+ * Exchange kptr at pointer *map_value* with *ptr*, and return the
+ * old value. *ptr* can be NULL, otherwise it must be a referenced
+ * pointer which will be released when this helper is called.
+ * Return
+ * The old value of kptr (which can be NULL). The returned pointer
+ * if not NULL, is a reference which must be released using its
+ * corresponding release function, or moved into a BPF map before
+ * program exit.
+ *
+ * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
+ * Description
+ * Perform a lookup in *percpu map* for an entry associated to
+ * *key* on *cpu*.
+ * Return
+ * Map value associated to *key* on *cpu*, or **NULL** if no entry
+ * was found or *cpu* is invalid.
+ *
+ * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Get a dynptr to local memory *data*.
+ *
+ * *data* must be a ptr to a map value.
+ * The maximum *size* supported is DYNPTR_MAX_SIZE.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
+ * -EINVAL if flags is not 0.
+ *
+ * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Reserve *size* bytes of payload in a ring buffer *ringbuf*
+ * through the dynptr interface. *flags* must be 0.
+ *
+ * Please note that a corresponding bpf_ringbuf_submit_dynptr or
+ * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
+ * reservation fails. This is enforced by the verifier.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Submit reserved ring buffer sample, pointed to by *data*,
+ * through the dynptr interface. This is a no-op if the dynptr is
+ * invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_submit'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Discard reserved ring buffer sample through the dynptr
+ * interface. This is a no-op if the dynptr is invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_discard'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags)
+ * Description
+ * Read *len* bytes from *src* into *dst*, starting from *offset*
+ * into *src*.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
+ * *flags* is not 0.
+ *
+ * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
+ * Description
+ * Write *len* bytes from *src* into *dst*, starting from *offset*
+ * into *dst*.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
+ * is a read-only dynptr or if *flags* is not 0.
+ *
+ * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
+ * Description
+ * Get a pointer to the underlying dynptr data.
+ *
+ * *len* must be a statically known value. The returned data slice
+ * is invalidated whenever the dynptr is invalidated.
+ * Return
+ * Pointer to the underlying dynptr data, NULL if the dynptr is
+ * read-only, if the dynptr is invalid, or if the offset and length
+ * is out of bounds.
+ *
+ * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
+ * Description
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv4/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ * Return
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ *
+ * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
+ * Description
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv6/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ * Return
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ *
+ * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the TCP header.
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ *
+ * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the TCP header.
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ *
+ * u64 bpf_ktime_get_tai_ns(void)
+ * Description
+ * A nonsettable system-wide clock derived from wall-clock time but
+ * ignoring leap seconds. This clock does not experience
+ * discontinuities and backwards jumps caused by NTP inserting leap
+ * seconds as CLOCK_REALTIME does.
+ *
+ * See: **clock_gettime**\ (**CLOCK_TAI**)
+ * Return
+ * Current *ktime*.
+ *
+ * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags)
+ * Description
+ * Drain samples from the specified user ring buffer, and invoke
+ * the provided callback for each such sample:
+ *
+ * long (\*callback_fn)(struct bpf_dynptr \*dynptr, void \*ctx);
+ *
+ * If **callback_fn** returns 0, the helper will continue to try
+ * and drain the next sample, up to a maximum of
+ * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
+ * the helper will skip the rest of the samples and return. Other
+ * return values are not used now, and will be rejected by the
+ * verifier.
+ * Return
+ * The number of drained samples if no error was encountered while
+ * draining samples, or 0 if no samples were present in the ring
+ * buffer. If a user-space producer was epoll-waiting on this map,
+ * and at least one sample was drained, they will receive an event
+ * notification notifying them of available space in the ring
+ * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
+ * function, no wakeup notification will be sent. If the
+ * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
+ * be sent even if no sample was drained.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EBUSY** if the ring buffer is contended, and another calling
+ * context was concurrently draining the ring buffer.
+ *
+ * **-EINVAL** if user-space is not properly tracking the ring
+ * buffer due to the producer position not being aligned to 8
+ * bytes, a sample not being aligned to 8 bytes, or the producer
+ * position not matching the advertised length of a sample.
+ *
+ * **-E2BIG** if user-space has tried to publish a sample which is
+ * larger than the size of the ring buffer, or which cannot fit
+ * within a struct bpf_dynptr.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3010,7 +5555,99 @@ union bpf_attr {
FN(probe_read_kernel_str), \
FN(tcp_send_ack), \
FN(send_signal_thread), \
- FN(jiffies64),
+ FN(jiffies64), \
+ FN(read_branch_records), \
+ FN(get_ns_current_pid_tgid), \
+ FN(xdp_output), \
+ FN(get_netns_cookie), \
+ FN(get_current_ancestor_cgroup_id), \
+ FN(sk_assign), \
+ FN(ktime_get_boot_ns), \
+ FN(seq_printf), \
+ FN(seq_write), \
+ FN(sk_cgroup_id), \
+ FN(sk_ancestor_cgroup_id), \
+ FN(ringbuf_output), \
+ FN(ringbuf_reserve), \
+ FN(ringbuf_submit), \
+ FN(ringbuf_discard), \
+ FN(ringbuf_query), \
+ FN(csum_level), \
+ FN(skc_to_tcp6_sock), \
+ FN(skc_to_tcp_sock), \
+ FN(skc_to_tcp_timewait_sock), \
+ FN(skc_to_tcp_request_sock), \
+ FN(skc_to_udp6_sock), \
+ FN(get_task_stack), \
+ FN(load_hdr_opt), \
+ FN(store_hdr_opt), \
+ FN(reserve_hdr_opt), \
+ FN(inode_storage_get), \
+ FN(inode_storage_delete), \
+ FN(d_path), \
+ FN(copy_from_user), \
+ FN(snprintf_btf), \
+ FN(seq_printf_btf), \
+ FN(skb_cgroup_classid), \
+ FN(redirect_neigh), \
+ FN(per_cpu_ptr), \
+ FN(this_cpu_ptr), \
+ FN(redirect_peer), \
+ FN(task_storage_get), \
+ FN(task_storage_delete), \
+ FN(get_current_task_btf), \
+ FN(bprm_opts_set), \
+ FN(ktime_get_coarse_ns), \
+ FN(ima_inode_hash), \
+ FN(sock_from_file), \
+ FN(check_mtu), \
+ FN(for_each_map_elem), \
+ FN(snprintf), \
+ FN(sys_bpf), \
+ FN(btf_find_by_name_kind), \
+ FN(sys_close), \
+ FN(timer_init), \
+ FN(timer_set_callback), \
+ FN(timer_start), \
+ FN(timer_cancel), \
+ FN(get_func_ip), \
+ FN(get_attach_cookie), \
+ FN(task_pt_regs), \
+ FN(get_branch_snapshot), \
+ FN(trace_vprintk), \
+ FN(skc_to_unix_sock), \
+ FN(kallsyms_lookup_name), \
+ FN(find_vma), \
+ FN(loop), \
+ FN(strncmp), \
+ FN(get_func_arg), \
+ FN(get_func_ret), \
+ FN(get_func_arg_cnt), \
+ FN(get_retval), \
+ FN(set_retval), \
+ FN(xdp_get_buff_len), \
+ FN(xdp_load_bytes), \
+ FN(xdp_store_bytes), \
+ FN(copy_from_user_task), \
+ FN(skb_set_tstamp), \
+ FN(ima_file_hash), \
+ FN(kptr_xchg), \
+ FN(map_lookup_percpu_elem), \
+ FN(skc_to_mptcp_sock), \
+ FN(dynptr_from_mem), \
+ FN(ringbuf_reserve_dynptr), \
+ FN(ringbuf_submit_dynptr), \
+ FN(ringbuf_discard_dynptr), \
+ FN(dynptr_read), \
+ FN(dynptr_write), \
+ FN(dynptr_data), \
+ FN(tcp_raw_gen_syncookie_ipv4), \
+ FN(tcp_raw_gen_syncookie_ipv6), \
+ FN(tcp_raw_check_syncookie_ipv4), \
+ FN(tcp_raw_check_syncookie_ipv6), \
+ FN(ktime_get_tai_ns), \
+ FN(user_ringbuf_drain), \
+ /* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -3025,69 +5662,148 @@ enum bpf_func_id {
/* All flags used by eBPF helper functions, placed here. */
/* BPF_FUNC_skb_store_bytes flags. */
-#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
-#define BPF_F_INVALIDATE_HASH (1ULL << 1)
+enum {
+ BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
+ BPF_F_INVALIDATE_HASH = (1ULL << 1),
+};
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
* First 4 bits are for passing the header field size.
*/
-#define BPF_F_HDR_FIELD_MASK 0xfULL
+enum {
+ BPF_F_HDR_FIELD_MASK = 0xfULL,
+};
/* BPF_FUNC_l4_csum_replace flags. */
-#define BPF_F_PSEUDO_HDR (1ULL << 4)
-#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
-#define BPF_F_MARK_ENFORCE (1ULL << 6)
+enum {
+ BPF_F_PSEUDO_HDR = (1ULL << 4),
+ BPF_F_MARK_MANGLED_0 = (1ULL << 5),
+ BPF_F_MARK_ENFORCE = (1ULL << 6),
+};
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
-#define BPF_F_INGRESS (1ULL << 0)
+enum {
+ BPF_F_INGRESS = (1ULL << 0),
+};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
-#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
+enum {
+ BPF_F_TUNINFO_IPV6 = (1ULL << 0),
+};
/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
-#define BPF_F_SKIP_FIELD_MASK 0xffULL
-#define BPF_F_USER_STACK (1ULL << 8)
+enum {
+ BPF_F_SKIP_FIELD_MASK = 0xffULL,
+ BPF_F_USER_STACK = (1ULL << 8),
/* flags used by BPF_FUNC_get_stackid only. */
-#define BPF_F_FAST_STACK_CMP (1ULL << 9)
-#define BPF_F_REUSE_STACKID (1ULL << 10)
+ BPF_F_FAST_STACK_CMP = (1ULL << 9),
+ BPF_F_REUSE_STACKID = (1ULL << 10),
/* flags used by BPF_FUNC_get_stack only. */
-#define BPF_F_USER_BUILD_ID (1ULL << 11)
+ BPF_F_USER_BUILD_ID = (1ULL << 11),
+};
/* BPF_FUNC_skb_set_tunnel_key flags. */
-#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
-#define BPF_F_DONT_FRAGMENT (1ULL << 2)
-#define BPF_F_SEQ_NUMBER (1ULL << 3)
+enum {
+ BPF_F_ZERO_CSUM_TX = (1ULL << 1),
+ BPF_F_DONT_FRAGMENT = (1ULL << 2),
+ BPF_F_SEQ_NUMBER = (1ULL << 3),
+};
+
+/* BPF_FUNC_skb_get_tunnel_key flags. */
+enum {
+ BPF_F_TUNINFO_FLAGS = (1ULL << 4),
+};
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags.
*/
-#define BPF_F_INDEX_MASK 0xffffffffULL
-#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
+enum {
+ BPF_F_INDEX_MASK = 0xffffffffULL,
+ BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
/* BPF_FUNC_perf_event_output for sk_buff input context. */
-#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
+ BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
+};
/* Current network namespace */
-#define BPF_F_CURRENT_NETNS (-1L)
+enum {
+ BPF_F_CURRENT_NETNS = (-1L),
+};
+
+/* BPF_FUNC_csum_level level values. */
+enum {
+ BPF_CSUM_LEVEL_QUERY,
+ BPF_CSUM_LEVEL_INC,
+ BPF_CSUM_LEVEL_DEC,
+ BPF_CSUM_LEVEL_RESET,
+};
/* BPF_FUNC_skb_adjust_room flags. */
-#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0)
+enum {
+ BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
+ BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
+ BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
+ BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
+};
-#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff
-#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56
+enum {
+ BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
+ BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
+};
-#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1)
-#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2)
-#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3)
-#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4)
#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
BPF_ADJ_ROOM_ENCAP_L2_MASK) \
<< BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
/* BPF_FUNC_sysctl_get_name flags. */
-#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0)
+enum {
+ BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
+};
+
+/* BPF_FUNC_<kernel_obj>_storage_get flags */
+enum {
+ BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0),
+ /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
+ * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
+ */
+ BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE,
+};
+
+/* BPF_FUNC_read_branch_records flags. */
+enum {
+ BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
+};
+
+/* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and
+ * BPF_FUNC_bpf_ringbuf_output flags.
+ */
+enum {
+ BPF_RB_NO_WAKEUP = (1ULL << 0),
+ BPF_RB_FORCE_WAKEUP = (1ULL << 1),
+};
+
+/* BPF_FUNC_bpf_ringbuf_query flags */
+enum {
+ BPF_RB_AVAIL_DATA = 0,
+ BPF_RB_RING_SIZE = 1,
+ BPF_RB_CONS_POS = 2,
+ BPF_RB_PROD_POS = 3,
+};
+
+/* BPF ring buffer constants */
+enum {
+ BPF_RINGBUF_BUSY_BIT = (1U << 31),
+ BPF_RINGBUF_DISCARD_BIT = (1U << 30),
+ BPF_RINGBUF_HDR_SZ = 8,
+};
-/* BPF_FUNC_sk_storage_get flags */
-#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
+/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
+enum {
+ BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0),
+ BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1),
+};
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
@@ -3108,12 +5824,32 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_IP,
};
+/* Flags for bpf_bprm_opts_set helper */
+enum {
+ BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
+};
+
+/* Flags for bpf_redirect_map helper */
+enum {
+ BPF_F_BROADCAST = (1ULL << 3),
+ BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
+};
+
#define __bpf_md_ptr(type, name) \
union { \
type name; \
__u64 :64; \
} __attribute__((aligned(8)))
+enum {
+ BPF_SKB_TSTAMP_UNSPEC,
+ BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
+ /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
+ * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
+ * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
+ */
+};
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -3153,6 +5889,10 @@ struct __sk_buff {
__u32 wire_len;
__u32 gso_segs;
__bpf_md_ptr(struct bpf_sock *, sk);
+ __u32 gso_size;
+ __u8 tstamp_type;
+ __u32 :24; /* Padding, future use. */
+ __u64 hwtstamp;
};
struct bpf_tunnel_key {
@@ -3163,8 +5903,15 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
- __u16 tunnel_ext; /* Padding, future use. */
+ union {
+ __u16 tunnel_ext; /* compat */
+ __be16 tunnel_flags;
+ };
__u32 tunnel_label;
+ union {
+ __u32 local_ipv4;
+ __u32 local_ipv6[4];
+ };
};
/* user accessible mirror of in-kernel xfrm_state.
@@ -3203,6 +5950,11 @@ enum bpf_ret_code {
* represented by BPF_REDIRECT above).
*/
BPF_LWT_REROUTE = 128,
+ /* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR
+ * to indicate that no custom dissection was performed, and
+ * fallback to standard dissector is requested.
+ */
+ BPF_FLOW_DISSECTOR_CONTINUE = 129,
};
struct bpf_sock {
@@ -3216,10 +5968,12 @@ struct bpf_sock {
__u32 src_ip4;
__u32 src_ip6[4];
__u32 src_port; /* host byte order */
- __u32 dst_port; /* network byte order */
+ __be16 dst_port; /* network byte order */
+ __u16 :16; /* zero padding */
__u32 dst_ip4;
__u32 dst_ip6[4];
__u32 state;
+ __s32 rx_queue_mapping;
};
struct bpf_tcp_sock {
@@ -3313,6 +6067,34 @@ struct xdp_md {
/* Below access go through struct xdp_rxq_info */
__u32 ingress_ifindex; /* rxq->dev->ifindex */
__u32 rx_queue_index; /* rxq->queue_index */
+
+ __u32 egress_ifindex; /* txq->dev->ifindex */
+};
+
+/* DEVMAP map-value layout
+ *
+ * The struct data-layout of map-value is a configuration interface.
+ * New members can only be added to the end of this structure.
+ */
+struct bpf_devmap_val {
+ __u32 ifindex; /* device index */
+ union {
+ int fd; /* prog fd on map write */
+ __u32 id; /* prog id on map read */
+ } bpf_prog;
+};
+
+/* CPUMAP map-value layout
+ *
+ * The struct data-layout of map-value is a configuration interface.
+ * New members can only be added to the end of this structure.
+ */
+struct bpf_cpumap_val {
+ __u32 qsize; /* queue size to remote target CPU */
+ union {
+ int fd; /* prog fd on map write */
+ __u32 id; /* prog id on map read */
+ } bpf_prog;
};
enum sk_action {
@@ -3335,6 +6117,8 @@ struct sk_msg_md {
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
__u32 size; /* Total size of sk_msg */
+
+ __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */
};
struct sk_reuseport_md {
@@ -3360,6 +6144,20 @@ struct sk_reuseport_md {
__u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
__u32 bind_inany; /* Is sock bound to an INANY address? */
__u32 hash; /* A hash of the packet 4 tuples */
+ /* When reuse->migrating_sk is NULL, it is selecting a sk for the
+ * new incoming connection request (e.g. selecting a listen sk for
+ * the received SYN in the TCP case). reuse->sk is one of the sk
+ * in the reuseport group. The bpf prog can use reuse->sk to learn
+ * the local listening ip/port without looking into the skb.
+ *
+ * When reuse->migrating_sk is not NULL, reuse->sk is closed and
+ * reuse->migrating_sk is the socket that needs to be migrated
+ * to another listening socket. migrating_sk could be a fullsock
+ * sk that is fully established or a reqsk that is in-the-middle
+ * of 3-way handshake.
+ */
+ __bpf_md_ptr(struct bpf_sock *, sk);
+ __bpf_md_ptr(struct bpf_sock *, migrating_sk);
};
#define BPF_TAG_SIZE 8
@@ -3400,6 +6198,10 @@ struct bpf_prog_info {
__aligned_u64 prog_tags;
__u64 run_time_ns;
__u64 run_cnt;
+ __u64 recursion_misses;
+ __u32 verified_insns;
+ __u32 attach_btf_obj_id;
+ __u32 attach_btf_id;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -3417,17 +6219,74 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
+ __u32 :32; /* alignment pad */
+ __u64 map_extra;
} __attribute__((aligned(8)));
struct bpf_btf_info {
__aligned_u64 btf;
__u32 btf_size;
__u32 id;
+ __aligned_u64 name;
+ __u32 name_len;
+ __u32 kernel_btf;
+} __attribute__((aligned(8)));
+
+struct bpf_link_info {
+ __u32 type;
+ __u32 id;
+ __u32 prog_id;
+ union {
+ struct {
+ __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
+ __u32 tp_name_len; /* in/out: tp_name buffer len */
+ } raw_tracepoint;
+ struct {
+ __u32 attach_type;
+ __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
+ __u32 target_btf_id; /* BTF type id inside the object */
+ } tracing;
+ struct {
+ __u64 cgroup_id;
+ __u32 attach_type;
+ } cgroup;
+ struct {
+ __aligned_u64 target_name; /* in/out: target_name buffer ptr */
+ __u32 target_name_len; /* in/out: target_name buffer len */
+
+ /* If the iter specific field is 32 bits, it can be put
+ * in the first or second union. Otherwise it should be
+ * put in the second union.
+ */
+ union {
+ struct {
+ __u32 map_id;
+ } map;
+ };
+ union {
+ struct {
+ __u64 cgroup_id;
+ __u32 order;
+ } cgroup;
+ struct {
+ __u32 tid;
+ __u32 pid;
+ } task;
+ };
+ } iter;
+ struct {
+ __u32 netns_ino;
+ __u32 attach_type;
+ } netns;
+ struct {
+ __u32 ifindex;
+ } xdp;
+ };
} __attribute__((aligned(8)));
/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
* by user and intended to be used by socket (e.g. to bind to, depends on
- * attach attach type).
+ * attach type).
*/
struct bpf_sock_addr {
__u32 user_family; /* Allows 4-byte read, but no write. */
@@ -3437,7 +6296,7 @@ struct bpf_sock_addr {
__u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
* Stored in network byte order.
*/
- __u32 user_port; /* Allows 4-byte read and write.
+ __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order
*/
__u32 family; /* Allows 4-byte read, but no write */
@@ -3502,16 +6361,90 @@ struct bpf_sock_ops {
__u64 bytes_received;
__u64 bytes_acked;
__bpf_md_ptr(struct bpf_sock *, sk);
+ /* [skb_data, skb_data_end) covers the whole TCP header.
+ *
+ * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
+ * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the
+ * header has not been written.
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
+ * been written so far.
+ * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes
+ * the 3WHS.
+ * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
+ * the 3WHS.
+ *
+ * bpf_load_hdr_opt() can also be used to read a particular option.
+ */
+ __bpf_md_ptr(void *, skb_data);
+ __bpf_md_ptr(void *, skb_data_end);
+ __u32 skb_len; /* The total length of a packet.
+ * It includes the header, options,
+ * and payload.
+ */
+ __u32 skb_tcp_flags; /* tcp_flags of the header. It provides
+ * an easy way to check for tcp_flags
+ * without parsing skb_data.
+ *
+ * In particular, the skb_tcp_flags
+ * will still be available in
+ * BPF_SOCK_OPS_HDR_OPT_LEN even though
+ * the outgoing header has not
+ * been written yet.
+ */
};
/* Definitions for bpf_sock_ops_cb_flags */
-#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
-#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
-#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
-#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3)
-#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently
- * supported cb flags
- */
+enum {
+ BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
+ BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
+ BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
+ /* Call bpf for all received TCP headers. The bpf prog will be
+ * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ *
+ * It could be used at the client/active side (i.e. connect() side)
+ * when the server told it that the server was in syncookie
+ * mode and required the active side to resend the bpf-written
+ * options. The active side can keep writing the bpf-options until
+ * it received a valid packet from the server side to confirm
+ * the earlier packet (and options) has been received. The later
+ * example patch is using it like this at the active side when the
+ * server is in syncookie mode.
+ *
+ * The bpf prog will usually turn this off in the common cases.
+ */
+ BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4),
+ /* Call bpf when kernel has received a header option that
+ * the kernel cannot handle. The bpf prog will be called under
+ * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ */
+ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
+ /* Call bpf when the kernel is writing header options for the
+ * outgoing packet. The bpf prog will first be called
+ * to reserve space in a skb under
+ * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then
+ * the bpf prog will be called to write the header option(s)
+ * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
+ * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
+ * related helpers that will be useful to the bpf programs.
+ *
+ * The kernel gets its chance to reserve space and write
+ * options first before the BPF program does.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
+/* Mask of all currently supported cb flags */
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
+};
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
@@ -3566,6 +6499,63 @@ enum {
*/
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
*/
+ BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
+ * It will be called to handle
+ * the packets received at
+ * an already established
+ * connection.
+ *
+ * sock_ops->skb_data:
+ * Referring to the received skb.
+ * It covers the TCP header only.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option.
+ */
+ BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the
+ * header option later in
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Not available because no header has
+ * been written yet.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the
+ * outgoing skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_reserve_hdr_opt() should
+ * be used to reserve space.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Referring to the outgoing skb.
+ * It covers the TCP header
+ * that has already been written
+ * by the kernel and the
+ * earlier bpf-progs.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the outgoing
+ * skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_store_hdr_opt() should
+ * be used to write the
+ * option.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option that
+ * has already been written
+ * by the kernel or the
+ * earlier bpf-progs.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -3590,8 +6580,67 @@ enum {
BPF_TCP_MAX_STATES /* Leave at the end! */
};
-#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
-#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
+enum {
+ TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
+ TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
+ TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */
+ TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */
+ /* Copy the SYN pkt to optval
+ *
+ * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the
+ * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
+ * to only getting from the saved_syn. It can either get the
+ * syn packet from:
+ *
+ * 1. the just-received SYN packet (only available when writing the
+ * SYNACK). It will be useful when it is not necessary to
+ * save the SYN packet for latter use. It is also the only way
+ * to get the SYN during syncookie mode because the syn
+ * packet cannot be saved during syncookie.
+ *
+ * OR
+ *
+ * 2. the earlier saved syn which was done by
+ * bpf_setsockopt(TCP_SAVE_SYN).
+ *
+ * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
+ * SYN packet is obtained.
+ *
+ * If the bpf-prog does not need the IP[46] header, the
+ * bpf-prog can avoid parsing the IP header by using
+ * TCP_BPF_SYN. Otherwise, the bpf-prog can get both
+ * IP[46] and TCP header by using TCP_BPF_SYN_IP.
+ *
+ * >0: Total number of bytes copied
+ * -ENOSPC: Not enough space in optval. Only optlen number of
+ * bytes is copied.
+ * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
+ * is not saved by setsockopt(TCP_SAVE_SYN).
+ */
+ TCP_BPF_SYN = 1005, /* Copy the TCP header */
+ TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
+ TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
+};
+
+enum {
+ BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
+};
+
+/* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ */
+enum {
+ BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the
+ * total option spaces
+ * required for an established
+ * sk in order to calculate the
+ * MSS. No skb is actually
+ * sent.
+ */
+ BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode
+ * when sending a SYN.
+ */
+};
struct bpf_perf_event_value {
__u64 counter;
@@ -3599,12 +6648,16 @@ struct bpf_perf_event_value {
__u64 running;
};
-#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
-#define BPF_DEVCG_ACC_READ (1ULL << 1)
-#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
+enum {
+ BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
+ BPF_DEVCG_ACC_READ = (1ULL << 1),
+ BPF_DEVCG_ACC_WRITE = (1ULL << 2),
+};
-#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
-#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
+enum {
+ BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
+ BPF_DEVCG_DEV_CHAR = (1ULL << 1),
+};
struct bpf_cgroup_dev_ctx {
/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
@@ -3620,8 +6673,10 @@ struct bpf_raw_tracepoint_args {
/* DIRECT: Skip the FIB rules and go to FIB table associated with device
* OUTPUT: Do lookup from egress perspective; default is ingress
*/
-#define BPF_FIB_LOOKUP_DIRECT (1U << 0)
-#define BPF_FIB_LOOKUP_OUTPUT (1U << 1)
+enum {
+ BPF_FIB_LOOKUP_DIRECT = (1U << 0),
+ BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+};
enum {
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
@@ -3646,9 +6701,13 @@ struct bpf_fib_lookup {
__be16 sport;
__be16 dport;
- /* total length of packet from network header - used for MTU check */
- __u16 tot_len;
+ union { /* used for MTU check */
+ /* input to lookup */
+ __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */
+ /* output: MTU value */
+ __u16 mtu_result;
+ };
/* input: L3 device index for lookup
* output: device index from FIB lookup
*/
@@ -3684,6 +6743,27 @@ struct bpf_fib_lookup {
__u8 dmac[6]; /* ETH_ALEN */
};
+struct bpf_redir_neigh {
+ /* network family for lookup (AF_INET, AF_INET6) */
+ __u32 nh_family;
+ /* network address of nexthop; skips fib lookup to find gateway */
+ union {
+ __be32 ipv4_nh;
+ __u32 ipv6_nh[4]; /* in6_addr; network order */
+ };
+};
+
+/* bpf_check_mtu flags*/
+enum bpf_check_mtu_flags {
+ BPF_MTU_CHK_SEGS = (1U << 0),
+};
+
+enum bpf_check_mtu_ret {
+ BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */
+ BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+ BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */
+};
+
enum bpf_task_fd_type {
BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */
BPF_FD_TYPE_TRACEPOINT, /* tp name */
@@ -3693,9 +6773,11 @@ enum bpf_task_fd_type {
BPF_FD_TYPE_URETPROBE, /* filename + offset */
};
-#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0)
-#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1)
-#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2)
+enum {
+ BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
+ BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
+ BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
+};
struct bpf_flow_keys {
__u16 nhoff;
@@ -3741,6 +6823,16 @@ struct bpf_spin_lock {
__u32 val;
};
+struct bpf_timer {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_dynptr {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
@@ -3761,4 +6853,133 @@ struct bpf_sockopt {
__s32 retval;
};
+struct bpf_pidns_info {
+ __u32 pid;
+ __u32 tgid;
+};
+
+/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
+struct bpf_sk_lookup {
+ union {
+ __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
+ };
+
+ __u32 family; /* Protocol family (AF_INET, AF_INET6) */
+ __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
+ __u32 remote_ip4; /* Network byte order */
+ __u32 remote_ip6[4]; /* Network byte order */
+ __be16 remote_port; /* Network byte order */
+ __u16 :16; /* Zero padding */
+ __u32 local_ip4; /* Network byte order */
+ __u32 local_ip6[4]; /* Network byte order */
+ __u32 local_port; /* Host byte order */
+ __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
+};
+
+/*
+ * struct btf_ptr is used for typed pointer representation; the
+ * type id is used to render the pointer data as the appropriate type
+ * via the bpf_snprintf_btf() helper described above. A flags field -
+ * potentially to specify additional details about the BTF pointer
+ * (rather than its mode of display) - is included for future use.
+ * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
+ */
+struct btf_ptr {
+ void *ptr;
+ __u32 type_id;
+ __u32 flags; /* BTF ptr flags; unused at present. */
+};
+
+/*
+ * Flags to control bpf_snprintf_btf() behaviour.
+ * - BTF_F_COMPACT: no formatting around type information
+ * - BTF_F_NONAME: no struct/union member names/types
+ * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_F_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ */
+enum {
+ BTF_F_COMPACT = (1ULL << 0),
+ BTF_F_NONAME = (1ULL << 1),
+ BTF_F_PTR_RAW = (1ULL << 2),
+ BTF_F_ZERO = (1ULL << 3),
+};
+
+/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
+ * has to be adjusted by relocations. It is emitted by llvm and passed to
+ * libbpf and later to the kernel.
+ */
+enum bpf_core_relo_kind {
+ BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */
+ BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */
+ BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */
+ BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
+ BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
+ BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
+ BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
+ BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */
+ BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */
+ BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
+ BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
+ BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
+ BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
+};
+
+/*
+ * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf
+ * and from libbpf to the kernel.
+ *
+ * CO-RE relocation captures the following data:
+ * - insn_off - instruction offset (in bytes) within a BPF program that needs
+ * its insn->imm field to be relocated with actual field info;
+ * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
+ * type or field;
+ * - access_str_off - offset into corresponding .BTF string section. String
+ * interpretation depends on specific relocation kind:
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
+ * - kind - one of enum bpf_core_relo_kind;
+ *
+ * Example:
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample *s = ...;
+ * int *x = &s->a; // encoded as "0:0" (a is field #0)
+ * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ *
+ * type_id for all relocs in this example will capture BTF type id of
+ * `struct sample`.
+ *
+ * Such relocation is emitted when using __builtin_preserve_access_index()
+ * Clang built-in, passing expression that captures field address, e.g.:
+ *
+ * bpf_probe_read(&dst, sizeof(dst),
+ * __builtin_preserve_access_index(&src->a.b.c));
+ *
+ * In this case Clang will emit field relocation recording necessary data to
+ * be able to find offset of embedded `a.b.c` field within `src` struct.
+ *
+ * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
+ */
+struct bpf_core_relo {
+ __u32 insn_off;
+ __u32 type_id;
+ __u32 access_str_off;
+ enum bpf_core_relo_kind kind;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 5a667107ad2c..ec1798b6d3ff 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -33,17 +33,17 @@ struct btf_type {
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
- * bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-30: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
* bit 31: kind_flag, currently used by
- * struct, union and fwd
+ * struct, union, enum, fwd and enum64
*/
__u32 info;
- /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
+ /* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
- * FUNC, FUNC_PROTO and VAR.
+ * FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG.
* "type" is a type_id referring to another type.
*/
union {
@@ -52,28 +52,35 @@ struct btf_type {
};
};
-#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
+#define BTF_INFO_KIND(info) (((info) >> 24) & 0x1f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
#define BTF_INFO_KFLAG(info) ((info) >> 31)
-#define BTF_KIND_UNKN 0 /* Unknown */
-#define BTF_KIND_INT 1 /* Integer */
-#define BTF_KIND_PTR 2 /* Pointer */
-#define BTF_KIND_ARRAY 3 /* Array */
-#define BTF_KIND_STRUCT 4 /* Struct */
-#define BTF_KIND_UNION 5 /* Union */
-#define BTF_KIND_ENUM 6 /* Enumeration */
-#define BTF_KIND_FWD 7 /* Forward */
-#define BTF_KIND_TYPEDEF 8 /* Typedef */
-#define BTF_KIND_VOLATILE 9 /* Volatile */
-#define BTF_KIND_CONST 10 /* Const */
-#define BTF_KIND_RESTRICT 11 /* Restrict */
-#define BTF_KIND_FUNC 12 /* Function */
-#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
-#define BTF_KIND_VAR 14 /* Variable */
-#define BTF_KIND_DATASEC 15 /* Section */
-#define BTF_KIND_MAX BTF_KIND_DATASEC
-#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
+enum {
+ BTF_KIND_UNKN = 0, /* Unknown */
+ BTF_KIND_INT = 1, /* Integer */
+ BTF_KIND_PTR = 2, /* Pointer */
+ BTF_KIND_ARRAY = 3, /* Array */
+ BTF_KIND_STRUCT = 4, /* Struct */
+ BTF_KIND_UNION = 5, /* Union */
+ BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */
+ BTF_KIND_FWD = 7, /* Forward */
+ BTF_KIND_TYPEDEF = 8, /* Typedef */
+ BTF_KIND_VOLATILE = 9, /* Volatile */
+ BTF_KIND_CONST = 10, /* Const */
+ BTF_KIND_RESTRICT = 11, /* Restrict */
+ BTF_KIND_FUNC = 12, /* Function */
+ BTF_KIND_FUNC_PROTO = 13, /* Function Proto */
+ BTF_KIND_VAR = 14, /* Variable */
+ BTF_KIND_DATASEC = 15, /* Section */
+ BTF_KIND_FLOAT = 16, /* Floating point */
+ BTF_KIND_DECL_TAG = 17, /* Decl Tag */
+ BTF_KIND_TYPE_TAG = 18, /* Type Tag */
+ BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */
+
+ NR_BTF_KINDS,
+ BTF_KIND_MAX = NR_BTF_KINDS - 1,
+};
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -169,4 +176,25 @@ struct btf_var_secinfo {
__u32 size;
};
+/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe
+ * additional information related to the tag applied location.
+ * If component_idx == -1, the tag is applied to a struct, union,
+ * variable or function. Otherwise, it is applied to a struct/union
+ * member or a func argument, and component_idx indicates which member
+ * or argument (0 ... vlen-1).
+ */
+struct btf_decl_tag {
+ __s32 component_idx;
+};
+
+/* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64".
+ * The exact number of btf_enum64 is stored in the vlen (of the
+ * info in "struct btf_type").
+ */
+struct btf_enum64 {
+ __u32 name_off;
+ __u32 val_lo32;
+ __u32 val_hi32;
+};
+
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 7a8bc8b920f5..5655e89b962b 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -36,17 +36,22 @@ struct btrfs_ioctl_vol_args {
#define BTRFS_DEVICE_PATH_NAME_MAX 1024
#define BTRFS_SUBVOL_NAME_MAX 4039
-#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
+#ifndef __KERNEL__
+/* Deprecated since 5.7 */
+# define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
+#endif
#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3)
+#define BTRFS_SUBVOL_SPEC_BY_ID (1ULL << 4)
+
#define BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED \
- (BTRFS_SUBVOL_CREATE_ASYNC | \
- BTRFS_SUBVOL_RDONLY | \
+ (BTRFS_SUBVOL_RDONLY | \
BTRFS_SUBVOL_QGROUP_INHERIT | \
- BTRFS_DEVICE_SPEC_BY_ID)
+ BTRFS_DEVICE_SPEC_BY_ID | \
+ BTRFS_SUBVOL_SPEC_BY_ID)
#define BTRFS_FSID_SIZE 16
#define BTRFS_UUID_SIZE 16
@@ -88,7 +93,7 @@ struct btrfs_qgroup_inherit {
__u64 num_ref_copies;
__u64 num_excl_copies;
struct btrfs_qgroup_limit lim;
- __u64 qgroups[0];
+ __u64 qgroups[];
};
struct btrfs_ioctl_qgroup_limit_args {
@@ -97,16 +102,29 @@ struct btrfs_ioctl_qgroup_limit_args {
};
/*
- * flags for subvolumes
+ * Arguments for specification of subvolumes or devices, supporting by-name or
+ * by-id and flags
*
- * Used by:
- * struct btrfs_ioctl_vol_args_v2.flags
+ * The set of supported flags depends on the ioctl
*
* BTRFS_SUBVOL_RDONLY is also provided/consumed by the following ioctls:
* - BTRFS_IOC_SUBVOL_GETFLAGS
* - BTRFS_IOC_SUBVOL_SETFLAGS
*/
+/* Supported flags for BTRFS_IOC_RM_DEV_V2 */
+#define BTRFS_DEVICE_REMOVE_ARGS_MASK \
+ (BTRFS_DEVICE_SPEC_BY_ID)
+
+/* Supported flags for BTRFS_IOC_SNAP_CREATE_V2 and BTRFS_IOC_SUBVOL_CREATE_V2 */
+#define BTRFS_SUBVOL_CREATE_ARGS_MASK \
+ (BTRFS_SUBVOL_RDONLY | \
+ BTRFS_SUBVOL_QGROUP_INHERIT)
+
+/* Supported flags for BTRFS_IOC_SNAP_DESTROY_V2 */
+#define BTRFS_SUBVOL_DELETE_ARGS_MASK \
+ (BTRFS_SUBVOL_SPEC_BY_ID)
+
struct btrfs_ioctl_vol_args_v2 {
__s64 fd;
__u64 transid;
@@ -121,6 +139,7 @@ struct btrfs_ioctl_vol_args_v2 {
union {
char name[BTRFS_SUBVOL_NAME_MAX + 1];
__u64 devid;
+ __u64 subvolid;
};
};
@@ -135,7 +154,7 @@ struct btrfs_scrub_progress {
__u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */
__u64 read_errors; /* # of read errors encountered (EIO) */
__u64 csum_errors; /* # of failed csum checks */
- __u64 verify_errors; /* # of occurences, where the metadata
+ __u64 verify_errors; /* # of occurrences, where the metadata
* of a tree block did not match the
* expected values, like generation or
* logical */
@@ -155,7 +174,7 @@ struct btrfs_scrub_progress {
__u64 last_physical; /* last physical address scrubbed. In
* case a scrub was aborted, this can
* be used to restart the scrub */
- __u64 unverified_errors; /* # of occurences where a read for a
+ __u64 unverified_errors; /* # of occurrences where a read for a
* full (64k) bio failed, but the re-
* check succeeded for each 4k piece.
* Intermittent error. */
@@ -224,6 +243,18 @@ struct btrfs_ioctl_dev_info_args {
__u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */
};
+/*
+ * Retrieve information about the filesystem
+ */
+
+/* Request information about checksum type and size */
+#define BTRFS_FS_INFO_FLAG_CSUM_INFO (1 << 0)
+
+/* Request information about filesystem generation */
+#define BTRFS_FS_INFO_FLAG_GENERATION (1 << 1)
+/* Request information about filesystem metadata UUID */
+#define BTRFS_FS_INFO_FLAG_METADATA_UUID (1 << 2)
+
struct btrfs_ioctl_fs_info_args {
__u64 max_id; /* out */
__u64 num_devices; /* out */
@@ -231,8 +262,13 @@ struct btrfs_ioctl_fs_info_args {
__u32 nodesize; /* out */
__u32 sectorsize; /* out */
__u32 clone_alignment; /* out */
- __u32 reserved32;
- __u64 reserved[122]; /* pad to 1k */
+ /* See BTRFS_FS_INFO_FLAG_* */
+ __u16 csum_type; /* out */
+ __u16 csum_size; /* out */
+ __u64 flags; /* in/out */
+ __u64 generation; /* out */
+ __u8 metadata_uuid[BTRFS_FSID_SIZE]; /* out */
+ __u8 reserved[944]; /* pad to 1k */
};
/*
@@ -252,6 +288,13 @@ struct btrfs_ioctl_fs_info_args {
* first mount when booting older kernel versions.
*/
#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID (1ULL << 1)
+#define BTRFS_FEATURE_COMPAT_RO_VERITY (1ULL << 2)
+
+/*
+ * Put all block group items into a dedicated block group tree, greatly
+ * reducing mount time for large filesystem due to better locality.
+ */
+#define BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE (1ULL << 3)
#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
@@ -271,6 +314,8 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9)
#define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10)
#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
+#define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12)
+#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
@@ -522,7 +567,7 @@ struct btrfs_ioctl_search_args_v2 {
__u64 buf_size; /* in - size of buffer
* out - on EOVERFLOW: needed size
* to store item */
- __u64 buf[0]; /* out - found items */
+ __u64 buf[]; /* out - found items */
};
struct btrfs_ioctl_clone_range_args {
@@ -593,7 +638,7 @@ struct btrfs_ioctl_same_args {
__u16 dest_count; /* in - total elements in info array */
__u16 reserved1;
__u32 reserved2;
- struct btrfs_ioctl_same_extent_info info[0];
+ struct btrfs_ioctl_same_extent_info info[];
};
struct btrfs_ioctl_space_info {
@@ -605,7 +650,7 @@ struct btrfs_ioctl_space_info {
struct btrfs_ioctl_space_args {
__u64 space_slots;
__u64 total_spaces;
- struct btrfs_ioctl_space_info spaces[0];
+ struct btrfs_ioctl_space_info spaces[];
};
struct btrfs_data_container {
@@ -613,7 +658,7 @@ struct btrfs_data_container {
__u32 bytes_missing; /* out -- additional bytes needed for result */
__u32 elem_cnt; /* out */
__u32 elem_missed; /* out */
- __u64 val[0]; /* out */
+ __u64 val[]; /* out */
};
struct btrfs_ioctl_ino_path_args {
@@ -733,10 +778,24 @@ struct btrfs_ioctl_received_subvol_args {
*/
#define BTRFS_SEND_FLAG_OMIT_END_CMD 0x4
+/*
+ * Read the protocol version in the structure
+ */
+#define BTRFS_SEND_FLAG_VERSION 0x8
+
+/*
+ * Send compressed data using the ENCODED_WRITE command instead of decompressing
+ * the data and sending it with the WRITE command. This requires protocol
+ * version >= 2.
+ */
+#define BTRFS_SEND_FLAG_COMPRESSED 0x10
+
#define BTRFS_SEND_FLAG_MASK \
(BTRFS_SEND_FLAG_NO_FILE_DATA | \
BTRFS_SEND_FLAG_OMIT_STREAM_HEADER | \
- BTRFS_SEND_FLAG_OMIT_END_CMD)
+ BTRFS_SEND_FLAG_OMIT_END_CMD | \
+ BTRFS_SEND_FLAG_VERSION | \
+ BTRFS_SEND_FLAG_COMPRESSED)
struct btrfs_ioctl_send_args {
__s64 send_fd; /* in */
@@ -744,7 +803,8 @@ struct btrfs_ioctl_send_args {
__u64 __user *clone_sources; /* in */
__u64 parent_root; /* in */
__u64 flags; /* in */
- __u64 reserved[4]; /* in */
+ __u32 version; /* in */
+ __u8 reserved[28]; /* in */
};
/*
@@ -823,6 +883,134 @@ struct btrfs_ioctl_get_subvol_rootref_args {
__u8 align[7];
};
+/*
+ * Data and metadata for an encoded read or write.
+ *
+ * Encoded I/O bypasses any encoding automatically done by the filesystem (e.g.,
+ * compression). This can be used to read the compressed contents of a file or
+ * write pre-compressed data directly to a file.
+ *
+ * BTRFS_IOC_ENCODED_READ and BTRFS_IOC_ENCODED_WRITE are essentially
+ * preadv/pwritev with additional metadata about how the data is encoded and the
+ * size of the unencoded data.
+ *
+ * BTRFS_IOC_ENCODED_READ fills the given iovecs with the encoded data, fills
+ * the metadata fields, and returns the size of the encoded data. It reads one
+ * extent per call. It can also read data which is not encoded.
+ *
+ * BTRFS_IOC_ENCODED_WRITE uses the metadata fields, writes the encoded data
+ * from the iovecs, and returns the size of the encoded data. Note that the
+ * encoded data is not validated when it is written; if it is not valid (e.g.,
+ * it cannot be decompressed), then a subsequent read may return an error.
+ *
+ * Since the filesystem page cache contains decoded data, encoded I/O bypasses
+ * the page cache. Encoded I/O requires CAP_SYS_ADMIN.
+ */
+struct btrfs_ioctl_encoded_io_args {
+ /* Input parameters for both reads and writes. */
+
+ /*
+ * iovecs containing encoded data.
+ *
+ * For reads, if the size of the encoded data is larger than the sum of
+ * iov[n].iov_len for 0 <= n < iovcnt, then the ioctl fails with
+ * ENOBUFS.
+ *
+ * For writes, the size of the encoded data is the sum of iov[n].iov_len
+ * for 0 <= n < iovcnt. This must be less than 128 KiB (this limit may
+ * increase in the future). This must also be less than or equal to
+ * unencoded_len.
+ */
+ const struct iovec __user *iov;
+ /* Number of iovecs. */
+ unsigned long iovcnt;
+ /*
+ * Offset in file.
+ *
+ * For writes, must be aligned to the sector size of the filesystem.
+ */
+ __s64 offset;
+ /* Currently must be zero. */
+ __u64 flags;
+
+ /*
+ * For reads, the following members are output parameters that will
+ * contain the returned metadata for the encoded data.
+ * For writes, the following members must be set to the metadata for the
+ * encoded data.
+ */
+
+ /*
+ * Length of the data in the file.
+ *
+ * Must be less than or equal to unencoded_len - unencoded_offset. For
+ * writes, must be aligned to the sector size of the filesystem unless
+ * the data ends at or beyond the current end of the file.
+ */
+ __u64 len;
+ /*
+ * Length of the unencoded (i.e., decrypted and decompressed) data.
+ *
+ * For writes, must be no more than 128 KiB (this limit may increase in
+ * the future). If the unencoded data is actually longer than
+ * unencoded_len, then it is truncated; if it is shorter, then it is
+ * extended with zeroes.
+ */
+ __u64 unencoded_len;
+ /*
+ * Offset from the first byte of the unencoded data to the first byte of
+ * logical data in the file.
+ *
+ * Must be less than unencoded_len.
+ */
+ __u64 unencoded_offset;
+ /*
+ * BTRFS_ENCODED_IO_COMPRESSION_* type.
+ *
+ * For writes, must not be BTRFS_ENCODED_IO_COMPRESSION_NONE.
+ */
+ __u32 compression;
+ /* Currently always BTRFS_ENCODED_IO_ENCRYPTION_NONE. */
+ __u32 encryption;
+ /*
+ * Reserved for future expansion.
+ *
+ * For reads, always returned as zero. Users should check for non-zero
+ * bytes. If there are any, then the kernel has a newer version of this
+ * structure with additional information that the user definition is
+ * missing.
+ *
+ * For writes, must be zeroed.
+ */
+ __u8 reserved[64];
+};
+
+/* Data is not compressed. */
+#define BTRFS_ENCODED_IO_COMPRESSION_NONE 0
+/* Data is compressed as a single zlib stream. */
+#define BTRFS_ENCODED_IO_COMPRESSION_ZLIB 1
+/*
+ * Data is compressed as a single zstd frame with the windowLog compression
+ * parameter set to no more than 17.
+ */
+#define BTRFS_ENCODED_IO_COMPRESSION_ZSTD 2
+/*
+ * Data is compressed sector by sector (using the sector size indicated by the
+ * name of the constant) with LZO1X and wrapped in the format documented in
+ * fs/btrfs/lzo.c. For writes, the compression sector size must match the
+ * filesystem sector size.
+ */
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_4K 3
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_8K 4
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_16K 5
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_32K 6
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_64K 7
+#define BTRFS_ENCODED_IO_COMPRESSION_TYPES 8
+
+/* Data is not encrypted. */
+#define BTRFS_ENCODED_IO_ENCRYPTION_NONE 0
+#define BTRFS_ENCODED_IO_ENCRYPTION_TYPES 1
+
/* Error codes as returned by the kernel */
enum btrfs_err_code {
BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1,
@@ -949,5 +1137,11 @@ enum btrfs_err_code {
struct btrfs_ioctl_get_subvol_rootref_args)
#define BTRFS_IOC_INO_LOOKUP_USER _IOWR(BTRFS_IOCTL_MAGIC, 62, \
struct btrfs_ioctl_ino_lookup_user_args)
+#define BTRFS_IOC_SNAP_DESTROY_V2 _IOW(BTRFS_IOCTL_MAGIC, 63, \
+ struct btrfs_ioctl_vol_args_v2)
+#define BTRFS_IOC_ENCODED_READ _IOR(BTRFS_IOCTL_MAGIC, 64, \
+ struct btrfs_ioctl_encoded_io_args)
+#define BTRFS_IOC_ENCODED_WRITE _IOW(BTRFS_IOCTL_MAGIC, 64, \
+ struct btrfs_ioctl_encoded_io_args)
#endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 8e322e2c7e78..1f7a38ec6ac3 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -4,6 +4,11 @@
#include <linux/btrfs.h>
#include <linux/types.h>
+#ifdef __KERNEL__
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
/*
* This header contains the structure definitions and constants used
@@ -48,13 +53,16 @@
/* tracks free space in block groups. */
#define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL
+/* Holds the block group items for extent tree v2. */
+#define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL
+
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
/* for storing balance parameters in the root tree */
#define BTRFS_BALANCE_OBJECTID -4ULL
-/* orhpan objectid for tracking unlinked/truncated files */
+/* orphan objectid for tracking unlinked/truncated files */
#define BTRFS_ORPHAN_OBJECTID -5ULL
/* does write ahead logging to speed up fsyncs */
@@ -113,12 +121,37 @@
#define BTRFS_INODE_REF_KEY 12
#define BTRFS_INODE_EXTREF_KEY 13
#define BTRFS_XATTR_ITEM_KEY 24
+
+/*
+ * fs verity items are stored under two different key types on disk.
+ * The descriptor items:
+ * [ inode objectid, BTRFS_VERITY_DESC_ITEM_KEY, offset ]
+ *
+ * At offset 0, we store a btrfs_verity_descriptor_item which tracks the size
+ * of the descriptor item and some extra data for encryption.
+ * Starting at offset 1, these hold the generic fs verity descriptor. The
+ * latter are opaque to btrfs, we just read and write them as a blob for the
+ * higher level verity code. The most common descriptor size is 256 bytes.
+ *
+ * The merkle tree items:
+ * [ inode objectid, BTRFS_VERITY_MERKLE_ITEM_KEY, offset ]
+ *
+ * These also start at offset 0, and correspond to the merkle tree bytes. When
+ * fsverity asks for page 0 of the merkle tree, we pull up one page starting at
+ * offset 0 for this key type. These are also opaque to btrfs, we're blindly
+ * storing whatever fsverity sends down.
+ */
+#define BTRFS_VERITY_DESC_ITEM_KEY 36
+#define BTRFS_VERITY_MERKLE_ITEM_KEY 37
+
#define BTRFS_ORPHAN_ITEM_KEY 48
/* reserve 2-15 close to the inode for later flexibility */
/*
* dir items are the name -> inode pointers in a directory. There is one
- * for every name in a directory.
+ * for every name in a directory. BTRFS_DIR_LOG_ITEM_KEY is no longer used
+ * but it's still defined here for documentation purposes and to help avoid
+ * having its numerical value reused in the future.
*/
#define BTRFS_DIR_LOG_ITEM_KEY 60
#define BTRFS_DIR_LOG_INDEX_KEY 72
@@ -270,7 +303,7 @@
#define BTRFS_PERSISTENT_ITEM_KEY 249
/*
- * Persistantly stores the device replace state in the device tree.
+ * Persistently stores the device replace state in the device tree.
* The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0).
*/
#define BTRFS_DEV_REPLACE_KEY 250
@@ -294,7 +327,8 @@
*/
#define BTRFS_STRING_ITEM_KEY 253
-
+/* Maximum metadata block size (nodesize) */
+#define BTRFS_MAX_METADATA_BLOCKSIZE 65536
/* 32 bytes in various csum fields */
#define BTRFS_CSUM_SIZE 32
@@ -519,15 +553,6 @@ struct btrfs_extent_inline_ref {
__le64 offset;
} __attribute__ ((__packed__));
-/* old style backrefs item */
-struct btrfs_extent_ref_v0 {
- __le64 root;
- __le64 generation;
- __le64 objectid;
- __le32 count;
-} __attribute__ ((__packed__));
-
-
/* dev extents record free space on individual devices. The owner
* field points back to the chunk allocation mapping tree that allocated
* the extent. The chunk tree uuid field is a way to double check the owner
@@ -550,7 +575,7 @@ struct btrfs_inode_extref {
__le64 parent_objectid;
__le64 index;
__le16 name_len;
- __u8 name[0];
+ __u8 name[];
/* name goes here */
} __attribute__ ((__packed__));
@@ -654,6 +679,15 @@ struct btrfs_root_item {
} __attribute__ ((__packed__));
/*
+ * Btrfs root item used to be smaller than current size. The old format ends
+ * at where member generation_v2 is.
+ */
+static inline __u32 btrfs_legacy_root_item_size(void)
+{
+ return offsetof(struct btrfs_root_item, generation_v2);
+}
+
+/*
* this is used for both forward and backward root refs
*/
struct btrfs_root_ref {
@@ -846,19 +880,6 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
BTRFS_SPACE_INFO_GLOBAL_RSV)
-enum btrfs_raid_types {
- BTRFS_RAID_RAID10,
- BTRFS_RAID_RAID1,
- BTRFS_RAID_DUP,
- BTRFS_RAID_RAID0,
- BTRFS_RAID_SINGLE,
- BTRFS_RAID_RAID5,
- BTRFS_RAID_RAID6,
- BTRFS_RAID_RAID1C3,
- BTRFS_RAID_RAID1C4,
- BTRFS_NR_RAID_TYPES
-};
-
#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
BTRFS_BLOCK_GROUP_SYSTEM | \
BTRFS_BLOCK_GROUP_METADATA)
@@ -922,9 +943,9 @@ struct btrfs_free_space_info {
#define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0)
#define BTRFS_QGROUP_LEVEL_SHIFT 48
-static inline __u64 btrfs_qgroup_level(__u64 qgroupid)
+static inline __u16 btrfs_qgroup_level(__u64 qgroupid)
{
- return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT;
+ return (__u16)(qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT);
}
/*
@@ -944,6 +965,10 @@ static inline __u64 btrfs_qgroup_level(__u64 qgroupid)
*/
#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
+#define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \
+ BTRFS_QGROUP_STATUS_FLAG_RESCAN | \
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)
+
#define BTRFS_QGROUP_STATUS_VERSION 1
struct btrfs_qgroup_status_item {
@@ -985,4 +1010,16 @@ struct btrfs_qgroup_limit_item {
__le64 rsv_excl;
} __attribute__ ((__packed__));
+struct btrfs_verity_descriptor_item {
+ /* Size of the verity descriptor in bytes */
+ __le64 size;
+ /*
+ * When we implement support for fscrypt, we will need to encrypt the
+ * Merkle tree for encrypted verity files. These 128 bits are for the
+ * eventual storage of an fscrypt initialization vector.
+ */
+ __le64 reserved[2];
+ __u8 encryption;
+} __attribute__ ((__packed__));
+
#endif /* _BTRFS_CTREE_H_ */
diff --git a/include/uapi/linux/byteorder/big_endian.h b/include/uapi/linux/byteorder/big_endian.h
index 2199adc6a6c2..80aa5c41a763 100644
--- a/include/uapi/linux/byteorder/big_endian.h
+++ b/include/uapi/linux/byteorder/big_endian.h
@@ -9,6 +9,7 @@
#define __BIG_ENDIAN_BITFIELD
#endif
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/swab.h>
diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
index 601c904fd5cd..cd98982e7523 100644
--- a/include/uapi/linux/byteorder/little_endian.h
+++ b/include/uapi/linux/byteorder/little_endian.h
@@ -9,6 +9,7 @@
#define __LITTLE_ENDIAN_BITFIELD
#endif
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/swab.h>
diff --git a/include/uapi/linux/cachefiles.h b/include/uapi/linux/cachefiles.h
new file mode 100644
index 000000000000..78caa73e5343
--- /dev/null
+++ b/include/uapi/linux/cachefiles.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_CACHEFILES_H
+#define _LINUX_CACHEFILES_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * Fscache ensures that the maximum length of cookie key is 255. The volume key
+ * is controlled by netfs, and generally no bigger than 255.
+ */
+#define CACHEFILES_MSG_MAX_SIZE 1024
+
+enum cachefiles_opcode {
+ CACHEFILES_OP_OPEN,
+ CACHEFILES_OP_CLOSE,
+ CACHEFILES_OP_READ,
+};
+
+/*
+ * Message Header
+ *
+ * @msg_id a unique ID identifying this message
+ * @opcode message type, CACHEFILE_OP_*
+ * @len message length, including message header and following data
+ * @object_id a unique ID identifying a cache file
+ * @data message type specific payload
+ */
+struct cachefiles_msg {
+ __u32 msg_id;
+ __u32 opcode;
+ __u32 len;
+ __u32 object_id;
+ __u8 data[];
+};
+
+/*
+ * @data contains the volume_key followed directly by the cookie_key. volume_key
+ * is a NUL-terminated string; @volume_key_size indicates the size of the volume
+ * key in bytes. cookie_key is binary data, which is netfs specific;
+ * @cookie_key_size indicates the size of the cookie key in bytes.
+ *
+ * @fd identifies an anon_fd referring to the cache file.
+ */
+struct cachefiles_open {
+ __u32 volume_key_size;
+ __u32 cookie_key_size;
+ __u32 fd;
+ __u32 flags;
+ __u8 data[];
+};
+
+/*
+ * @off indicates the starting offset of the requested file range
+ * @len indicates the length of the requested file range
+ */
+struct cachefiles_read {
+ __u64 off;
+ __u64 len;
+};
+
+/*
+ * Reply for READ request
+ * @arg for this ioctl is the @id field of READ request.
+ */
+#define CACHEFILES_IOC_READ_COMPLETE _IOW(0x98, 1, int)
+
+#endif
diff --git a/include/uapi/linux/caif/caif_socket.h b/include/uapi/linux/caif/caif_socket.h
index 10ec1d1cf68e..d9970bbaa156 100644
--- a/include/uapi/linux/caif/caif_socket.h
+++ b/include/uapi/linux/caif/caif_socket.h
@@ -169,7 +169,7 @@ struct sockaddr_caif {
* @CAIFSO_LINK_SELECT: Selector used if multiple CAIF Link layers are
* available. Either a high bandwidth
* link can be selected (CAIF_LINK_HIGH_BANDW) or
- * or a low latency link (CAIF_LINK_LOW_LATENCY).
+ * a low latency link (CAIF_LINK_LOW_LATENCY).
* This option is of type __u32.
* Alternatively SO_BINDTODEVICE can be used.
*
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 6a6d2c7655ff..dd645ea72306 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -48,6 +48,7 @@
#include <linux/types.h>
#include <linux/socket.h>
+#include <linux/stddef.h> /* for offsetof */
/* controller area network (CAN) kernel definitions */
@@ -60,6 +61,7 @@
#define CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */
#define CAN_EFF_MASK 0x1FFFFFFFU /* extended frame format (EFF) */
#define CAN_ERR_MASK 0x1FFFFFFFU /* omit EFF, RTR, ERR flags */
+#define CANXL_PRIO_MASK CAN_SFF_MASK /* 11 bit priority mask */
/*
* Controller Area Network Identifier structure
@@ -73,6 +75,7 @@ typedef __u32 canid_t;
#define CAN_SFF_ID_BITS 11
#define CAN_EFF_ID_BITS 29
+#define CANXL_PRIO_BITS CAN_SFF_ID_BITS
/*
* Controller Area Network Error Message Frame Mask structure
@@ -84,37 +87,57 @@ typedef __u32 can_err_mask_t;
/* CAN payload length and DLC definitions according to ISO 11898-1 */
#define CAN_MAX_DLC 8
+#define CAN_MAX_RAW_DLC 15
#define CAN_MAX_DLEN 8
/* CAN FD payload length and DLC definitions according to ISO 11898-7 */
#define CANFD_MAX_DLC 15
#define CANFD_MAX_DLEN 64
+/*
+ * CAN XL payload length and DLC definitions according to ISO 11898-1
+ * CAN XL DLC ranges from 0 .. 2047 => data length from 1 .. 2048 byte
+ */
+#define CANXL_MIN_DLC 0
+#define CANXL_MAX_DLC 2047
+#define CANXL_MAX_DLC_MASK 0x07FF
+#define CANXL_MIN_DLEN 1
+#define CANXL_MAX_DLEN 2048
+
/**
- * struct can_frame - basic CAN frame structure
- * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition
- * @can_dlc: frame payload length in byte (0 .. 8) aka data length code
- * N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
- * mapping of the 'data length code' to the real payload length
- * @__pad: padding
- * @__res0: reserved / padding
- * @__res1: reserved / padding
- * @data: CAN frame payload (up to 8 byte)
+ * struct can_frame - Classical CAN frame structure (aka CAN 2.0B)
+ * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition
+ * @len: CAN frame payload length in byte (0 .. 8)
+ * @can_dlc: deprecated name for CAN frame payload length in byte (0 .. 8)
+ * @__pad: padding
+ * @__res0: reserved / padding
+ * @len8_dlc: optional DLC value (9 .. 15) at 8 byte payload length
+ * len8_dlc contains values from 9 .. 15 when the payload length is
+ * 8 bytes but the DLC value (see ISO 11898-1) is greater then 8.
+ * CAN_CTRLMODE_CC_LEN8_DLC flag has to be enabled in CAN driver.
+ * @data: CAN frame payload (up to 8 byte)
*/
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
- __u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
- __u8 __pad; /* padding */
- __u8 __res0; /* reserved / padding */
- __u8 __res1; /* reserved / padding */
- __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
+ union {
+ /* CAN frame payload length in byte (0 .. CAN_MAX_DLEN)
+ * was previously named can_dlc so we need to carry that
+ * name for legacy support
+ */
+ __u8 len;
+ __u8 can_dlc; /* deprecated */
+ } __attribute__((packed)); /* disable padding added in some ABIs */
+ __u8 __pad; /* padding */
+ __u8 __res0; /* reserved / padding */
+ __u8 len8_dlc; /* optional DLC for 8 byte payload length (9 .. 15) */
+ __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
};
/*
* defined bits for canfd_frame.flags
*
- * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to
- * be set in the CAN frame bitstream on the wire. The EDL bit switch turns
+ * The use of struct canfd_frame implies the FD Frame (FDF) bit to
+ * be set in the CAN frame bitstream on the wire. The FDF bit switch turns
* the CAN controllers bitstream processor into the CAN FD mode which creates
* two new options within the CAN FD frame specification:
*
@@ -125,9 +148,18 @@ struct can_frame {
* controller only the CANFD_BRS bit is relevant for real CAN controllers when
* building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make
* sense for virtual CAN interfaces to test applications with echoed frames.
+ *
+ * The struct can_frame and struct canfd_frame intentionally share the same
+ * layout to be able to write CAN frame content into a CAN FD frame structure.
+ * When this is done the former differentiation via CAN_MTU / CANFD_MTU gets
+ * lost. CANFD_FDF allows programmers to mark CAN FD frames in the case of
+ * using struct canfd_frame for mixed CAN / CAN FD content (dual use).
+ * Since the introduction of CAN XL the CANFD_FDF flag is set in all CAN FD
+ * frame structures provided by the CAN subsystem of the Linux kernel.
*/
#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */
#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */
+#define CANFD_FDF 0x04 /* mark CAN FD for dual use of struct canfd_frame */
/**
* struct canfd_frame - CAN flexible data rate frame structure
@@ -147,8 +179,46 @@ struct canfd_frame {
__u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8)));
};
+/*
+ * defined bits for canxl_frame.flags
+ *
+ * The canxl_frame.flags element contains two bits CANXL_XLF and CANXL_SEC
+ * and shares the relative position of the struct can[fd]_frame.len element.
+ * The CANXL_XLF bit ALWAYS needs to be set to indicate a valid CAN XL frame.
+ * As a side effect setting this bit intentionally breaks the length checks
+ * for Classical CAN and CAN FD frames.
+ *
+ * Undefined bits in canxl_frame.flags are reserved and shall be set to zero.
+ */
+#define CANXL_XLF 0x80 /* mandatory CAN XL frame flag (must always be set!) */
+#define CANXL_SEC 0x01 /* Simple Extended Content (security/segmentation) */
+
+/**
+ * struct canxl_frame - CAN with e'X'tended frame 'L'ength frame structure
+ * @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags
+ * @flags: additional flags for CAN XL
+ * @sdt: SDU (service data unit) type
+ * @len: frame payload length in byte (CANXL_MIN_DLEN .. CANXL_MAX_DLEN)
+ * @af: acceptance field
+ * @data: CAN XL frame payload (CANXL_MIN_DLEN .. CANXL_MAX_DLEN byte)
+ *
+ * @prio shares the same position as @can_id from struct can[fd]_frame.
+ */
+struct canxl_frame {
+ canid_t prio; /* 11 bit priority for arbitration (canid_t) */
+ __u8 flags; /* additional flags for CAN XL */
+ __u8 sdt; /* SDU (service data unit) type */
+ __u16 len; /* frame payload length in byte */
+ __u32 af; /* acceptance field */
+ __u8 data[CANXL_MAX_DLEN];
+};
+
#define CAN_MTU (sizeof(struct can_frame))
#define CANFD_MTU (sizeof(struct canfd_frame))
+#define CANXL_MTU (sizeof(struct canxl_frame))
+#define CANXL_HDR_SIZE (offsetof(struct canxl_frame, data))
+#define CANXL_MIN_MTU (CANXL_HDR_SIZE + 64)
+#define CANXL_MAX_MTU CANXL_MTU
/* particular protocols of the protocol family PF_CAN */
#define CAN_RAW 1 /* RAW sockets */
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index dd2b925b09ac..f1e45f533a72 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -71,7 +71,7 @@ struct bcm_msg_head {
struct bcm_timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
- struct can_frame frames[0];
+ struct can_frame frames[];
};
enum {
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index 34633283de64..acc1ac393d2a 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -57,6 +57,8 @@
#define CAN_ERR_BUSOFF 0x00000040U /* bus off */
#define CAN_ERR_BUSERROR 0x00000080U /* bus error (may flood!) */
#define CAN_ERR_RESTARTED 0x00000100U /* controller restarted */
+#define CAN_ERR_CNT 0x00000200U /* TX error counter / data[6] */
+ /* RX error counter / data[7] */
/* arbitration lost in bit ... / data[0] */
#define CAN_ERR_LOSTARB_UNSPEC 0x00 /* unspecified */
@@ -120,6 +122,22 @@
#define CAN_ERR_TRX_CANL_SHORT_TO_GND 0x70 /* 0111 0000 */
#define CAN_ERR_TRX_CANL_SHORT_TO_CANH 0x80 /* 1000 0000 */
-/* controller specific additional information / data[5..7] */
+/* data[5] is reserved (do not use) */
+
+/* TX error counter / data[6] */
+/* RX error counter / data[7] */
+
+/* CAN state thresholds
+ *
+ * Error counter Error state
+ * -----------------------------------
+ * 0 - 95 Error-active
+ * 96 - 127 Error-warning
+ * 128 - 255 Error-passive
+ * 256 and greater Bus-off
+ */
+#define CAN_ERROR_WARNING_THRESHOLD 96
+#define CAN_ERROR_PASSIVE_THRESHOLD 128
+#define CAN_BUS_OFF_THRESHOLD 256
#endif /* _UAPI_CAN_ERROR_H */
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index c2190bbe21d8..e4f0957554f3 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -98,8 +98,8 @@ enum {
/* CAN frame elements that are affected by curr. 3 CAN frame modifications */
#define CGW_MOD_ID 0x01
-#define CGW_MOD_DLC 0x02 /* contains the data length in bytes */
-#define CGW_MOD_LEN CGW_MOD_DLC /* CAN FD length representation */
+#define CGW_MOD_DLC 0x02 /* Classical CAN data length code */
+#define CGW_MOD_LEN CGW_MOD_DLC /* CAN FD (plain) data length */
#define CGW_MOD_DATA 0x04
#define CGW_MOD_FLAGS 0x08 /* CAN FD flags */
diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h
new file mode 100644
index 000000000000..439c982f7e81
--- /dev/null
+++ b/include/uapi/linux/can/isotp.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * linux/can/isotp.h
+ *
+ * Definitions for isotp CAN sockets (ISO 15765-2:2016)
+ *
+ * Copyright (c) 2020 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef _UAPI_CAN_ISOTP_H
+#define _UAPI_CAN_ISOTP_H
+
+#include <linux/types.h>
+#include <linux/can.h>
+
+#define SOL_CAN_ISOTP (SOL_CAN_BASE + CAN_ISOTP)
+
+/* for socket options affecting the socket (not the global system) */
+
+#define CAN_ISOTP_OPTS 1 /* pass struct can_isotp_options */
+
+#define CAN_ISOTP_RECV_FC 2 /* pass struct can_isotp_fc_options */
+
+/* sockopts to force stmin timer values for protocol regression tests */
+
+#define CAN_ISOTP_TX_STMIN 3 /* pass __u32 value in nano secs */
+ /* use this time instead of value */
+ /* provided in FC from the receiver */
+
+#define CAN_ISOTP_RX_STMIN 4 /* pass __u32 value in nano secs */
+ /* ignore received CF frames which */
+ /* timestamps differ less than val */
+
+#define CAN_ISOTP_LL_OPTS 5 /* pass struct can_isotp_ll_options */
+
+struct can_isotp_options {
+
+ __u32 flags; /* set flags for isotp behaviour. */
+ /* __u32 value : flags see below */
+
+ __u32 frame_txtime; /* frame transmission time (N_As/N_Ar) */
+ /* __u32 value : time in nano secs */
+
+ __u8 ext_address; /* set address for extended addressing */
+ /* __u8 value : extended address */
+
+ __u8 txpad_content; /* set content of padding byte (tx) */
+ /* __u8 value : content on tx path */
+
+ __u8 rxpad_content; /* set content of padding byte (rx) */
+ /* __u8 value : content on rx path */
+
+ __u8 rx_ext_address; /* set address for extended addressing */
+ /* __u8 value : extended address (rx) */
+};
+
+struct can_isotp_fc_options {
+
+ __u8 bs; /* blocksize provided in FC frame */
+ /* __u8 value : blocksize. 0 = off */
+
+ __u8 stmin; /* separation time provided in FC frame */
+ /* __u8 value : */
+ /* 0x00 - 0x7F : 0 - 127 ms */
+ /* 0x80 - 0xF0 : reserved */
+ /* 0xF1 - 0xF9 : 100 us - 900 us */
+ /* 0xFA - 0xFF : reserved */
+
+ __u8 wftmax; /* max. number of wait frame transmiss. */
+ /* __u8 value : 0 = omit FC N_PDU WT */
+};
+
+struct can_isotp_ll_options {
+
+ __u8 mtu; /* generated & accepted CAN frame type */
+ /* __u8 value : */
+ /* CAN_MTU (16) -> standard CAN 2.0 */
+ /* CANFD_MTU (72) -> CAN FD frame */
+
+ __u8 tx_dl; /* tx link layer data length in bytes */
+ /* (configured maximum payload length) */
+ /* __u8 value : 8,12,16,20,24,32,48,64 */
+ /* => rx path supports all LL_DL values */
+
+ __u8 tx_flags; /* set into struct canfd_frame.flags */
+ /* at frame creation: e.g. CANFD_BRS */
+ /* Obsolete when the BRS flag is fixed */
+ /* by the CAN netdriver configuration */
+};
+
+/* flags for isotp behaviour */
+
+#define CAN_ISOTP_LISTEN_MODE 0x0001 /* listen only (do not send FC) */
+#define CAN_ISOTP_EXTEND_ADDR 0x0002 /* enable extended addressing */
+#define CAN_ISOTP_TX_PADDING 0x0004 /* enable CAN frame padding tx path */
+#define CAN_ISOTP_RX_PADDING 0x0008 /* enable CAN frame padding rx path */
+#define CAN_ISOTP_CHK_PAD_LEN 0x0010 /* check received CAN frame padding */
+#define CAN_ISOTP_CHK_PAD_DATA 0x0020 /* check received CAN frame padding */
+#define CAN_ISOTP_HALF_DUPLEX 0x0040 /* half duplex error state handling */
+#define CAN_ISOTP_FORCE_TXSTMIN 0x0080 /* ignore stmin from received FC */
+#define CAN_ISOTP_FORCE_RXSTMIN 0x0100 /* ignore CFs depending on rx stmin */
+#define CAN_ISOTP_RX_EXT_ADDR 0x0200 /* different rx extended addressing */
+#define CAN_ISOTP_WAIT_TX_DONE 0x0400 /* wait for tx completion */
+#define CAN_ISOTP_SF_BROADCAST 0x0800 /* 1-to-N functional addressing */
+#define CAN_ISOTP_CF_BROADCAST 0x1000 /* 1-to-N transmission w/o FC */
+
+/* protocol machine default values */
+
+#define CAN_ISOTP_DEFAULT_FLAGS 0
+#define CAN_ISOTP_DEFAULT_EXT_ADDRESS 0x00
+#define CAN_ISOTP_DEFAULT_PAD_CONTENT 0xCC /* prevent bit-stuffing */
+#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 50000 /* 50 micro seconds */
+#define CAN_ISOTP_DEFAULT_RECV_BS 0
+#define CAN_ISOTP_DEFAULT_RECV_STMIN 0x00
+#define CAN_ISOTP_DEFAULT_RECV_WFTMAX 0
+
+/*
+ * Remark on CAN_ISOTP_DEFAULT_RECV_* values:
+ *
+ * We can strongly assume, that the Linux Kernel implementation of
+ * CAN_ISOTP is capable to run with BS=0, STmin=0 and WFTmax=0.
+ * But as we like to be able to behave as a commonly available ECU,
+ * these default settings can be changed via sockopts.
+ * For that reason the STmin value is intentionally _not_ checked for
+ * consistency and copied directly into the flow control (FC) frame.
+ */
+
+/* link layer default values => make use of Classical CAN frames */
+
+#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU
+#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN
+#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0
+
+/*
+ * The CAN_ISOTP_DEFAULT_FRAME_TXTIME has become a non-zero value as
+ * it only makes sense for isotp implementation tests to run without
+ * a N_As value. As user space applications usually do not set the
+ * frame_txtime element of struct can_isotp_options the new in-kernel
+ * default is very likely overwritten with zero when the sockopt()
+ * CAN_ISOTP_OPTS is invoked.
+ * To make sure that a N_As value of zero is only set intentional the
+ * value '0' is now interpreted as 'do not change the current value'.
+ * When a frame_txtime of zero is required for testing purposes this
+ * CAN_ISOTP_FRAME_TXTIME_ZERO u32 value has to be set in frame_txtime.
+ */
+#define CAN_ISOTP_FRAME_TXTIME_ZERO 0xFFFFFFFF
+
+#endif /* !_UAPI_CAN_ISOTP_H */
diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h
index df6e821075c1..38936460f668 100644
--- a/include/uapi/linux/can/j1939.h
+++ b/include/uapi/linux/can/j1939.h
@@ -78,11 +78,20 @@ enum {
enum {
J1939_NLA_PAD,
J1939_NLA_BYTES_ACKED,
+ J1939_NLA_TOTAL_SIZE,
+ J1939_NLA_PGN,
+ J1939_NLA_SRC_NAME,
+ J1939_NLA_DEST_NAME,
+ J1939_NLA_SRC_ADDR,
+ J1939_NLA_DEST_ADDR,
};
enum {
J1939_EE_INFO_NONE,
J1939_EE_INFO_TX_ABORT,
+ J1939_EE_INFO_RX_RTS,
+ J1939_EE_INFO_RX_DPO,
+ J1939_EE_INFO_RX_ABORT,
};
struct j1939_filter {
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 6f598b73839e..02ec32d69474 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -100,6 +100,9 @@ struct can_ctrlmode {
#define CAN_CTRLMODE_FD 0x20 /* CAN FD mode */
#define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */
#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */
+#define CAN_CTRLMODE_CC_LEN8_DLC 0x100 /* Classic CAN DLC option */
+#define CAN_CTRLMODE_TDC_AUTO 0x200 /* CAN transiver automatically calculates TDCV */
+#define CAN_CTRLMODE_TDC_MANUAL 0x400 /* TDCV is manually set up by user */
/*
* CAN device statistics
@@ -133,10 +136,48 @@ enum {
IFLA_CAN_BITRATE_CONST,
IFLA_CAN_DATA_BITRATE_CONST,
IFLA_CAN_BITRATE_MAX,
- __IFLA_CAN_MAX
+ IFLA_CAN_TDC,
+ IFLA_CAN_CTRLMODE_EXT,
+
+ /* add new constants above here */
+ __IFLA_CAN_MAX,
+ IFLA_CAN_MAX = __IFLA_CAN_MAX - 1
};
-#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1)
+/*
+ * CAN FD Transmitter Delay Compensation (TDC)
+ *
+ * Please refer to struct can_tdc_const and can_tdc in
+ * include/linux/can/bittiming.h for further details.
+ */
+enum {
+ IFLA_CAN_TDC_UNSPEC,
+ IFLA_CAN_TDC_TDCV_MIN, /* u32 */
+ IFLA_CAN_TDC_TDCV_MAX, /* u32 */
+ IFLA_CAN_TDC_TDCO_MIN, /* u32 */
+ IFLA_CAN_TDC_TDCO_MAX, /* u32 */
+ IFLA_CAN_TDC_TDCF_MIN, /* u32 */
+ IFLA_CAN_TDC_TDCF_MAX, /* u32 */
+ IFLA_CAN_TDC_TDCV, /* u32 */
+ IFLA_CAN_TDC_TDCO, /* u32 */
+ IFLA_CAN_TDC_TDCF, /* u32 */
+
+ /* add new constants above here */
+ __IFLA_CAN_TDC,
+ IFLA_CAN_TDC_MAX = __IFLA_CAN_TDC - 1
+};
+
+/*
+ * IFLA_CAN_CTRLMODE_EXT nest: controller mode extended parameters
+ */
+enum {
+ IFLA_CAN_CTRLMODE_UNSPEC,
+ IFLA_CAN_CTRLMODE_SUPPORTED, /* u32 */
+
+ /* add new constants above here */
+ __IFLA_CAN_CTRLMODE,
+ IFLA_CAN_CTRLMODE_MAX = __IFLA_CAN_CTRLMODE - 1
+};
/* u16 termination range: 1..65535 Ohms */
#define CAN_TERMINATION_DISABLED 0
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 6a11d308eb5c..ff12f525c37c 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -49,6 +49,9 @@
#include <linux/can.h>
#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW)
+enum {
+ SCM_CAN_RAW_ERRQUEUE = 1,
+};
/* for socket options affecting the socket (not the global system) */
@@ -59,6 +62,7 @@ enum {
CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
+ CAN_RAW_XL_FRAMES, /* allow CAN XL frames (default:off) */
};
#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 272dc69fa080..3d61a0ae055d 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -243,7 +243,6 @@ struct vfs_ns_cap_data {
/* Allow examination and configuration of disk quotas */
/* Allow setting the domainname */
/* Allow setting the hostname */
-/* Allow calling bdflush() */
/* Allow mount() and umount(), setting up new smb connection */
/* Allow some autofs root ioctls */
/* Allow nfsservctl */
@@ -274,6 +273,7 @@ struct vfs_ns_cap_data {
arbitrary SCSI commands */
/* Allow setting encryption key on loopback filesystem */
/* Allow setting zone reclaim policy */
+/* Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility */
#define CAP_SYS_ADMIN 21
@@ -287,6 +287,8 @@ struct vfs_ns_cap_data {
processes and setting the scheduling algorithm used by another
process. */
/* Allow setting cpu affinity on other processes */
+/* Allow setting realtime ioprio class */
+/* Allow setting ioprio class on other processes */
#define CAP_SYS_NICE 23
@@ -332,6 +334,9 @@ struct vfs_ns_cap_data {
#define CAP_AUDIT_CONTROL 30
+/* Set or remove capabilities on files.
+ Map uid=0 into a child user namespace. */
+
#define CAP_SETFCAP 31
/* Override MAC access.
@@ -367,8 +372,52 @@ struct vfs_ns_cap_data {
#define CAP_AUDIT_READ 37
+/*
+ * Allow system performance and observability privileged operations
+ * using perf_events, i915_perf and other kernel subsystems
+ */
+
+#define CAP_PERFMON 38
+
+/*
+ * CAP_BPF allows the following BPF operations:
+ * - Creating all types of BPF maps
+ * - Advanced verifier features
+ * - Indirect variable access
+ * - Bounded loops
+ * - BPF to BPF function calls
+ * - Scalar precision tracking
+ * - Larger complexity limits
+ * - Dead code elimination
+ * - And potentially other features
+ * - Loading BPF Type Format (BTF) data
+ * - Retrieve xlated and JITed code of BPF programs
+ * - Use bpf_spin_lock() helper
+ *
+ * CAP_PERFMON relaxes the verifier checks further:
+ * - BPF progs can use of pointer-to-integer conversions
+ * - speculation attack hardening measures are bypassed
+ * - bpf_probe_read to read arbitrary kernel memory is allowed
+ * - bpf_trace_printk to print kernel memory is allowed
+ *
+ * CAP_SYS_ADMIN is required to use bpf_probe_write_user.
+ *
+ * CAP_SYS_ADMIN is required to iterate system wide loaded
+ * programs, maps, links, BTFs and convert their IDs to file descriptors.
+ *
+ * CAP_PERFMON and CAP_BPF are required to load tracing programs.
+ * CAP_NET_ADMIN and CAP_BPF are required to load networking programs.
+ */
+#define CAP_BPF 39
+
+
+/* Allow checkpoint/restore related operations */
+/* Allow PID selection during clone3() */
+/* Allow writing to ns_last_pid */
+
+#define CAP_CHECKPOINT_RESTORE 40
-#define CAP_LAST_CAP CAP_AUDIT_READ
+#define CAP_LAST_CAP CAP_CHECKPOINT_RESTORE
#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
@@ -377,7 +426,7 @@ struct vfs_ns_cap_data {
*/
#define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */
-#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */
+#define CAP_TO_MASK(x) (1U << ((x) & 31)) /* mask for indexed __u32 */
#endif /* _UAPI_LINUX_CAPABILITY_H */
diff --git a/include/uapi/linux/ccs.h b/include/uapi/linux/ccs.h
new file mode 100644
index 000000000000..2896d3bfdb5e
--- /dev/null
+++ b/include/uapi/linux/ccs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef __UAPI_CCS_H__
+#define __UAPI_CCS_H__
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_CCS_ANALOGUE_GAIN_M0 (V4L2_CID_USER_CCS_BASE + 1)
+#define V4L2_CID_CCS_ANALOGUE_GAIN_C0 (V4L2_CID_USER_CCS_BASE + 2)
+#define V4L2_CID_CCS_ANALOGUE_GAIN_M1 (V4L2_CID_USER_CCS_BASE + 3)
+#define V4L2_CID_CCS_ANALOGUE_GAIN_C1 (V4L2_CID_USER_CCS_BASE + 4)
+#define V4L2_CID_CCS_ANALOGUE_LINEAR_GAIN (V4L2_CID_USER_CCS_BASE + 5)
+#define V4L2_CID_CCS_ANALOGUE_EXPONENTIAL_GAIN (V4L2_CID_USER_CCS_BASE + 6)
+#define V4L2_CID_CCS_SHADING_CORRECTION (V4L2_CID_USER_CCS_BASE + 8)
+#define V4L2_CID_CCS_LUMINANCE_CORRECTION_LEVEL (V4L2_CID_USER_CCS_BASE + 9)
+
+#endif
diff --git a/include/uapi/linux/cdrom.h b/include/uapi/linux/cdrom.h
index 2817230148fd..011e594e4a0d 100644
--- a/include/uapi/linux/cdrom.h
+++ b/include/uapi/linux/cdrom.h
@@ -103,7 +103,7 @@
#define CDROMREADALL 0x5318 /* read all 2646 bytes */
/*
- * These ioctls are (now) only in ide-cd.c for controlling
+ * These ioctls were only in (now removed) ide-cd.c for controlling
* drive spindown time. They should be implemented in the
* Uniform driver, via generic packet commands, GPCMD_MODE_SELECT_10,
* GPCMD_MODE_SENSE_10 and the GPMODE_POWER_PAGE...
@@ -147,6 +147,8 @@
#define CDROM_NEXT_WRITABLE 0x5394 /* get next writable block */
#define CDROM_LAST_WRITTEN 0x5395 /* get last block written on disc */
+#define CDROM_TIMED_MEDIA_CHANGE 0x5396 /* get the timestamp of the last media change */
+
/*******************************************************
* CDROM IOCTL structures
*******************************************************/
@@ -289,8 +291,28 @@ struct cdrom_generic_command
unsigned char data_direction;
int quiet;
int timeout;
- void __user *reserved[1]; /* unused, actually */
-};
+ union {
+ void __user *reserved[1]; /* unused, actually */
+ void __user *unused;
+ };
+};
+
+/* This struct is used by CDROM_TIMED_MEDIA_CHANGE */
+struct cdrom_timed_media_change_info {
+ __s64 last_media_change; /* Timestamp of the last detected media
+ * change in ms. May be set by caller,
+ * updated upon successful return of
+ * ioctl.
+ */
+ __u64 media_flags; /* Flags returned by ioctl to indicate
+ * media status.
+ */
+};
+#define MEDIA_CHANGED_FLAG 0x1 /* Last detected media change was more
+ * recent than last_media_change set by
+ * caller.
+ */
+/* other bits of media_flags available for future use */
/*
* A CD-ROM physical sector size is 2048, 2052, 2056, 2324, 2332, 2336,
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 37590027b604..d58fa1cdcb08 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -1568,6 +1568,20 @@ static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *
}
}
+static inline void cec_msg_set_audio_volume_level(struct cec_msg *msg,
+ __u8 audio_volume_level)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_SET_AUDIO_VOLUME_LEVEL;
+ msg->msg[2] = audio_volume_level;
+}
+
+static inline void cec_ops_set_audio_volume_level(const struct cec_msg *msg,
+ __u8 *audio_volume_level)
+{
+ *audio_volume_level = msg->msg[2];
+}
+
/* Audio Rate Control Feature */
static inline void cec_msg_set_audio_rate(struct cec_msg *msg,
@@ -1665,7 +1679,7 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
if (*audio_out_compensated == 3 && msg->len >= 7)
*audio_out_delay = msg->msg[6];
else
- *audio_out_delay = 0;
+ *audio_out_delay = 1;
}
static inline void cec_msg_request_current_latency(struct cec_msg *msg,
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 7d1a06c52469..b8e071abaea5 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -142,6 +142,26 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
msg->reply = msg->timeout = 0;
}
+/**
+ * cec_msg_recv_is_tx_result - return true if this message contains the
+ * result of an earlier non-blocking transmit
+ * @msg: the message structure from CEC_RECEIVE
+ */
+static inline int cec_msg_recv_is_tx_result(const struct cec_msg *msg)
+{
+ return msg->sequence && msg->tx_status && !msg->rx_status;
+}
+
+/**
+ * cec_msg_recv_is_rx_result - return true if this message contains the
+ * reply of an earlier non-blocking transmit
+ * @msg: the message structure from CEC_RECEIVE
+ */
+static inline int cec_msg_recv_is_rx_result(const struct cec_msg *msg)
+{
+ return msg->sequence && !msg->tx_status && msg->rx_status;
+}
+
/* cec_msg flags field */
#define CEC_MSG_FL_REPLY_TO_FOLLOWERS (1 << 0)
#define CEC_MSG_FL_RAW (1 << 1)
@@ -396,6 +416,7 @@ struct cec_drm_connector_info {
* associated with the CEC adapter.
* @type: connector type (if any)
* @drm: drm connector info
+ * @raw: array to pad the union
*/
struct cec_connector_info {
__u32 type;
@@ -453,7 +474,7 @@ struct cec_event_lost_msgs {
* struct cec_event - CEC event structure
* @ts: the timestamp of when the event was sent.
* @event: the event.
- * array.
+ * @flags: event flags.
* @state_change: the event payload for CEC_EVENT_STATE_CHANGE.
* @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS.
* @raw: array to pad the union.
@@ -641,7 +662,7 @@ struct cec_event {
#define CEC_OP_REC_SEQ_WEDNESDAY 0x08
#define CEC_OP_REC_SEQ_THURSDAY 0x10
#define CEC_OP_REC_SEQ_FRIDAY 0x20
-#define CEC_OP_REC_SEQ_SATERDAY 0x40
+#define CEC_OP_REC_SEQ_SATURDAY 0x40
#define CEC_OP_REC_SEQ_ONCE_ONLY 0x00
#define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99
@@ -747,6 +768,7 @@ struct cec_event {
#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08
#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04
#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02
+#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_VOLUME_LEVEL 0x01
#define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */
@@ -1038,6 +1060,7 @@ struct cec_event {
#define CEC_OP_AUD_FMT_ID_CEA861 0
#define CEC_OP_AUD_FMT_ID_CEA861_CXT 1
+#define CEC_MSG_SET_AUDIO_VOLUME_LEVEL 0x73
/* Audio Rate Control Feature */
#define CEC_MSG_SET_AUDIO_RATE 0x9a
diff --git a/include/uapi/linux/cfm_bridge.h b/include/uapi/linux/cfm_bridge.h
new file mode 100644
index 000000000000..3c1cbd1db2f5
--- /dev/null
+++ b/include/uapi/linux/cfm_bridge.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_CFM_BRIDGE_H_
+#define _UAPI_LINUX_CFM_BRIDGE_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define ETHER_HEADER_LENGTH (6+6+4+2)
+#define CFM_MAID_LENGTH 48
+#define CFM_CCM_PDU_LENGTH 75
+#define CFM_PORT_STATUS_TLV_LENGTH 4
+#define CFM_IF_STATUS_TLV_LENGTH 4
+#define CFM_IF_STATUS_TLV_TYPE 4
+#define CFM_PORT_STATUS_TLV_TYPE 2
+#define CFM_ENDE_TLV_TYPE 0
+#define CFM_CCM_MAX_FRAME_LENGTH (ETHER_HEADER_LENGTH+\
+ CFM_CCM_PDU_LENGTH+\
+ CFM_PORT_STATUS_TLV_LENGTH+\
+ CFM_IF_STATUS_TLV_LENGTH)
+#define CFM_FRAME_PRIO 7
+#define CFM_CCM_TLV_OFFSET 70
+#define CFM_CCM_PDU_MAID_OFFSET 10
+#define CFM_CCM_PDU_MEPID_OFFSET 8
+#define CFM_CCM_PDU_SEQNR_OFFSET 4
+#define CFM_CCM_PDU_TLV_OFFSET 74
+#define CFM_CCM_ITU_RESERVED_SIZE 16
+
+struct br_cfm_common_hdr {
+ __u8 mdlevel_version;
+ __u8 opcode;
+ __u8 flags;
+ __u8 tlv_offset;
+};
+
+enum br_cfm_opcodes {
+ BR_CFM_OPCODE_CCM = 0x1,
+};
+
+/* MEP domain */
+enum br_cfm_domain {
+ BR_CFM_PORT,
+ BR_CFM_VLAN,
+};
+
+/* MEP direction */
+enum br_cfm_mep_direction {
+ BR_CFM_MEP_DIRECTION_DOWN,
+ BR_CFM_MEP_DIRECTION_UP,
+};
+
+/* CCM interval supported. */
+enum br_cfm_ccm_interval {
+ BR_CFM_CCM_INTERVAL_NONE,
+ BR_CFM_CCM_INTERVAL_3_3_MS,
+ BR_CFM_CCM_INTERVAL_10_MS,
+ BR_CFM_CCM_INTERVAL_100_MS,
+ BR_CFM_CCM_INTERVAL_1_SEC,
+ BR_CFM_CCM_INTERVAL_10_SEC,
+ BR_CFM_CCM_INTERVAL_1_MIN,
+ BR_CFM_CCM_INTERVAL_10_MIN,
+};
+
+#endif
diff --git a/include/uapi/linux/cifs/cifs_mount.h b/include/uapi/linux/cifs/cifs_mount.h
index 69829205fdb5..8e87d27b0951 100644
--- a/include/uapi/linux/cifs/cifs_mount.h
+++ b/include/uapi/linux/cifs/cifs_mount.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
/*
- * include/uapi/linux/cifs/cifs_mount.h
*
* Author(s): Scott Lovenberg (scott.lovenberg@gmail.com)
*
diff --git a/include/uapi/linux/cifs/cifs_netlink.h b/include/uapi/linux/cifs/cifs_netlink.h
new file mode 100644
index 000000000000..da3107582f49
--- /dev/null
+++ b/include/uapi/linux/cifs/cifs_netlink.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
+/*
+ * Netlink routines for CIFS
+ *
+ * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
+ */
+
+
+#ifndef _UAPILINUX_CIFS_NETLINK_H
+#define _UAPILINUX_CIFS_NETLINK_H
+
+#define CIFS_GENL_NAME "cifs"
+#define CIFS_GENL_VERSION 0x1
+
+#define CIFS_GENL_MCGRP_SWN_NAME "cifs_mcgrp_swn"
+
+enum cifs_genl_multicast_groups {
+ CIFS_GENL_MCGRP_SWN,
+};
+
+enum cifs_genl_attributes {
+ CIFS_GENL_ATTR_UNSPEC,
+ CIFS_GENL_ATTR_SWN_REGISTRATION_ID,
+ CIFS_GENL_ATTR_SWN_NET_NAME,
+ CIFS_GENL_ATTR_SWN_SHARE_NAME,
+ CIFS_GENL_ATTR_SWN_IP,
+ CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY,
+ CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY,
+ CIFS_GENL_ATTR_SWN_IP_NOTIFY,
+ CIFS_GENL_ATTR_SWN_KRB_AUTH,
+ CIFS_GENL_ATTR_SWN_USER_NAME,
+ CIFS_GENL_ATTR_SWN_PASSWORD,
+ CIFS_GENL_ATTR_SWN_DOMAIN_NAME,
+ CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE,
+ CIFS_GENL_ATTR_SWN_RESOURCE_STATE,
+ CIFS_GENL_ATTR_SWN_RESOURCE_NAME,
+ __CIFS_GENL_ATTR_MAX,
+};
+#define CIFS_GENL_ATTR_MAX (__CIFS_GENL_ATTR_MAX - 1)
+
+enum cifs_genl_commands {
+ CIFS_GENL_CMD_UNSPEC,
+ CIFS_GENL_CMD_SWN_REGISTER,
+ CIFS_GENL_CMD_SWN_UNREGISTER,
+ CIFS_GENL_CMD_SWN_NOTIFY,
+ __CIFS_GENL_CMD_MAX
+};
+#define CIFS_GENL_CMD_MAX (__CIFS_GENL_CMD_MAX - 1)
+
+enum cifs_swn_notification_type {
+ CIFS_SWN_NOTIFICATION_RESOURCE_CHANGE = 0x01,
+ CIFS_SWN_NOTIFICATION_CLIENT_MOVE = 0x02,
+ CIFS_SWN_NOTIFICATION_SHARE_MOVE = 0x03,
+ CIFS_SWN_NOTIFICATION_IP_CHANGE = 0x04,
+};
+
+enum cifs_swn_resource_state {
+ CIFS_SWN_RESOURCE_STATE_UNKNOWN = 0x00,
+ CIFS_SWN_RESOURCE_STATE_AVAILABLE = 0x01,
+ CIFS_SWN_RESOURCE_STATE_UNAVAILABLE = 0xFF
+};
+
+#endif /* _UAPILINUX_CIFS_NETLINK_H */
diff --git a/include/uapi/linux/close_range.h b/include/uapi/linux/close_range.h
new file mode 100644
index 000000000000..2d804281554c
--- /dev/null
+++ b/include/uapi/linux/close_range.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_CLOSE_RANGE_H
+#define _UAPI_LINUX_CLOSE_RANGE_H
+
+/* Unshare the file descriptor table before closing file descriptors. */
+#define CLOSE_RANGE_UNSHARE (1U << 1)
+
+/* Set the FD_CLOEXEC bit instead of closing the file descriptor. */
+#define CLOSE_RANGE_CLOEXEC (1U << 2)
+
+#endif /* _UAPI_LINUX_CLOSE_RANGE_H */
+
diff --git a/drivers/staging/comedi/comedi.h b/include/uapi/linux/comedi.h
index 09a940066c0e..7314e5ee0a1e 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/include/uapi/linux/comedi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: LGPL-2.0+ */
+/* SPDX-License-Identifier: LGPL-2.0+ WITH Linux-syscall-note */
/*
* comedi.h
* header file for COMEDI user API
@@ -680,7 +680,7 @@ struct comedi_rangeinfo {
* value of 1 volt.
*
* The only defined flag value is %RF_EXTERNAL (%0x100), indicating that the
- * the range needs to be multiplied by an external reference.
+ * range needs to be multiplied by an external reference.
*/
struct comedi_krange {
int min;
@@ -970,7 +970,7 @@ enum i8254_mode {
* major reasons exist why this caused major confusion for users:
* 1) The register values are _NOT_ in user documentation, but rather in
* arcane locations, such as a few register programming manuals that are
- * increasingly hard to find and the NI MHDDK (comments in in example code).
+ * increasingly hard to find and the NI MHDDK (comments in example code).
* There is no one place to find the various valid values of the registers.
* 2) The register values are _NOT_ completely consistent. There is no way to
* gain any sense of intuition of which values, or even enums one should use
diff --git a/include/uapi/linux/connector.h b/include/uapi/linux/connector.h
index 3738936149a2..5ae131c3f145 100644
--- a/include/uapi/linux/connector.h
+++ b/include/uapi/linux/connector.h
@@ -75,7 +75,7 @@ struct cn_msg {
__u16 len; /* Length of the following data */
__u16 flags;
- __u8 data[0];
+ __u8 data[];
};
#endif /* _UAPI__CONNECTOR_H */
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index 5ed721ad5b19..af2a44c08683 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,4 +28,9 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
#endif /* _UAPI_LINUX_CONST_H */
diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h
index aac550a52f80..7ff3709c01b8 100644
--- a/include/uapi/linux/coresight-stm.h
+++ b/include/uapi/linux/coresight-stm.h
@@ -2,8 +2,11 @@
#ifndef __UAPI_CORESIGHT_STM_H_
#define __UAPI_CORESIGHT_STM_H_
-#define STM_FLAG_TIMESTAMPED BIT(3)
-#define STM_FLAG_GUARANTEED BIT(7)
+#include <linux/const.h>
+
+#define STM_FLAG_TIMESTAMPED _BITUL(3)
+#define STM_FLAG_MARKED _BITUL(4)
+#define STM_FLAG_GUARANTEED _BITUL(7)
/*
* The CoreSight STM supports guaranteed and invariant timing
diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h
new file mode 100644
index 000000000000..8ab12d731e3b
--- /dev/null
+++ b/include/uapi/linux/counter.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace ABI for Counter character devices
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#ifndef _UAPI_COUNTER_H_
+#define _UAPI_COUNTER_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Component type definitions */
+enum counter_component_type {
+ COUNTER_COMPONENT_NONE,
+ COUNTER_COMPONENT_SIGNAL,
+ COUNTER_COMPONENT_COUNT,
+ COUNTER_COMPONENT_FUNCTION,
+ COUNTER_COMPONENT_SYNAPSE_ACTION,
+ COUNTER_COMPONENT_EXTENSION,
+};
+
+/* Component scope definitions */
+enum counter_scope {
+ COUNTER_SCOPE_DEVICE,
+ COUNTER_SCOPE_SIGNAL,
+ COUNTER_SCOPE_COUNT,
+};
+
+/**
+ * struct counter_component - Counter component identification
+ * @type: component type (one of enum counter_component_type)
+ * @scope: component scope (one of enum counter_scope)
+ * @parent: parent ID (matching the ID suffix of the respective parent sysfs
+ * path as described by the ABI documentation file
+ * Documentation/ABI/testing/sysfs-bus-counter)
+ * @id: component ID (matching the ID provided by the respective *_component_id
+ * sysfs attribute of the desired component)
+ *
+ * For example, if the Count 2 ceiling extension of Counter device 4 is desired,
+ * set type equal to COUNTER_COMPONENT_EXTENSION, scope equal to
+ * COUNTER_COUNT_SCOPE, parent equal to 2, and id equal to the value provided by
+ * the respective /sys/bus/counter/devices/counter4/count2/ceiling_component_id
+ * sysfs attribute.
+ */
+struct counter_component {
+ __u8 type;
+ __u8 scope;
+ __u8 parent;
+ __u8 id;
+};
+
+/* Event type definitions */
+enum counter_event_type {
+ /* Count value increased past ceiling */
+ COUNTER_EVENT_OVERFLOW,
+ /* Count value decreased past floor */
+ COUNTER_EVENT_UNDERFLOW,
+ /* Count value increased past ceiling, or decreased past floor */
+ COUNTER_EVENT_OVERFLOW_UNDERFLOW,
+ /* Count value reached threshold */
+ COUNTER_EVENT_THRESHOLD,
+ /* Index signal detected */
+ COUNTER_EVENT_INDEX,
+ /* State of counter is changed */
+ COUNTER_EVENT_CHANGE_OF_STATE,
+ /* Count value captured */
+ COUNTER_EVENT_CAPTURE,
+};
+
+/**
+ * struct counter_watch - Counter component watch configuration
+ * @component: component to watch when event triggers
+ * @event: event that triggers (one of enum counter_event_type)
+ * @channel: event channel (typically 0 unless the device supports concurrent
+ * events of the same type)
+ */
+struct counter_watch {
+ struct counter_component component;
+ __u8 event;
+ __u8 channel;
+};
+
+/*
+ * Queues a Counter watch for the specified event.
+ *
+ * The queued watches will not be applied until COUNTER_ENABLE_EVENTS_IOCTL is
+ * called.
+ */
+#define COUNTER_ADD_WATCH_IOCTL _IOW(0x3E, 0x00, struct counter_watch)
+/*
+ * Enables monitoring the events specified by the Counter watches that were
+ * queued by COUNTER_ADD_WATCH_IOCTL.
+ *
+ * If events are already enabled, the new set of watches replaces the old one.
+ * Calling this ioctl also has the effect of clearing the queue of watches added
+ * by COUNTER_ADD_WATCH_IOCTL.
+ */
+#define COUNTER_ENABLE_EVENTS_IOCTL _IO(0x3E, 0x01)
+/*
+ * Stops monitoring the previously enabled events.
+ */
+#define COUNTER_DISABLE_EVENTS_IOCTL _IO(0x3E, 0x02)
+
+/**
+ * struct counter_event - Counter event data
+ * @timestamp: best estimate of time of event occurrence, in nanoseconds
+ * @value: component value
+ * @watch: component watch configuration
+ * @status: return status (system error number)
+ */
+struct counter_event {
+ __aligned_u64 timestamp;
+ __aligned_u64 value;
+ struct counter_watch watch;
+ __u8 status;
+};
+
+/* Count direction values */
+enum counter_count_direction {
+ COUNTER_COUNT_DIRECTION_FORWARD,
+ COUNTER_COUNT_DIRECTION_BACKWARD,
+};
+
+/* Count mode values */
+enum counter_count_mode {
+ COUNTER_COUNT_MODE_NORMAL,
+ COUNTER_COUNT_MODE_RANGE_LIMIT,
+ COUNTER_COUNT_MODE_NON_RECYCLE,
+ COUNTER_COUNT_MODE_MODULO_N,
+};
+
+/* Count function values */
+enum counter_function {
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_DECREASE,
+ COUNTER_FUNCTION_PULSE_DIRECTION,
+ COUNTER_FUNCTION_QUADRATURE_X1_A,
+ COUNTER_FUNCTION_QUADRATURE_X1_B,
+ COUNTER_FUNCTION_QUADRATURE_X2_A,
+ COUNTER_FUNCTION_QUADRATURE_X2_B,
+ COUNTER_FUNCTION_QUADRATURE_X4,
+};
+
+/* Signal values */
+enum counter_signal_level {
+ COUNTER_SIGNAL_LEVEL_LOW,
+ COUNTER_SIGNAL_LEVEL_HIGH,
+};
+
+/* Action mode values */
+enum counter_synapse_action {
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+};
+
+/* Signal polarity values */
+enum counter_signal_polarity {
+ COUNTER_SIGNAL_POLARITY_POSITIVE,
+ COUNTER_SIGNAL_POLARITY_NEGATIVE,
+};
+
+#endif /* _UAPI_COUNTER_H_ */
diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h
new file mode 100644
index 000000000000..c71021a2a9ed
--- /dev/null
+++ b/include/uapi/linux/cxl_mem.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * CXL IOCTLs for Memory Devices
+ */
+
+#ifndef _UAPI_CXL_MEM_H_
+#define _UAPI_CXL_MEM_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: UAPI
+ *
+ * Not all of all commands that the driver supports are always available for use
+ * by userspace. Userspace must check the results from the QUERY command in
+ * order to determine the live set of commands.
+ */
+
+#define CXL_MEM_QUERY_COMMANDS _IOR(0xCE, 1, struct cxl_mem_query_commands)
+#define CXL_MEM_SEND_COMMAND _IOWR(0xCE, 2, struct cxl_send_command)
+
+#define CXL_CMDS \
+ ___C(INVALID, "Invalid Command"), \
+ ___C(IDENTIFY, "Identify Command"), \
+ ___C(RAW, "Raw device command"), \
+ ___C(GET_SUPPORTED_LOGS, "Get Supported Logs"), \
+ ___C(GET_FW_INFO, "Get FW Info"), \
+ ___C(GET_PARTITION_INFO, "Get Partition Information"), \
+ ___C(GET_LSA, "Get Label Storage Area"), \
+ ___C(GET_HEALTH_INFO, "Get Health Info"), \
+ ___C(GET_LOG, "Get Log"), \
+ ___C(SET_PARTITION_INFO, "Set Partition Information"), \
+ ___C(SET_LSA, "Set Label Storage Area"), \
+ ___C(GET_ALERT_CONFIG, "Get Alert Configuration"), \
+ ___C(SET_ALERT_CONFIG, "Set Alert Configuration"), \
+ ___C(GET_SHUTDOWN_STATE, "Get Shutdown State"), \
+ ___C(SET_SHUTDOWN_STATE, "Set Shutdown State"), \
+ ___C(GET_POISON, "Get Poison List"), \
+ ___C(INJECT_POISON, "Inject Poison"), \
+ ___C(CLEAR_POISON, "Clear Poison"), \
+ ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \
+ ___C(SCAN_MEDIA, "Scan Media"), \
+ ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \
+ ___C(MAX, "invalid / last command")
+
+#define ___C(a, b) CXL_MEM_COMMAND_ID_##a
+enum { CXL_CMDS };
+
+#undef ___C
+#define ___C(a, b) { b }
+static const struct {
+ const char *name;
+} cxl_command_names[] __attribute__((__unused__)) = { CXL_CMDS };
+
+/*
+ * Here's how this actually breaks out:
+ * cxl_command_names[] = {
+ * [CXL_MEM_COMMAND_ID_INVALID] = { "Invalid Command" },
+ * [CXL_MEM_COMMAND_ID_IDENTIFY] = { "Identify Command" },
+ * ...
+ * [CXL_MEM_COMMAND_ID_MAX] = { "invalid / last command" },
+ * };
+ */
+
+#undef ___C
+
+/**
+ * struct cxl_command_info - Command information returned from a query.
+ * @id: ID number for the command.
+ * @flags: Flags that specify command behavior.
+ * @size_in: Expected input size, or ~0 if variable length.
+ * @size_out: Expected output size, or ~0 if variable length.
+ *
+ * Represents a single command that is supported by both the driver and the
+ * hardware. This is returned as part of an array from the query ioctl. The
+ * following would be a command that takes a variable length input and returns 0
+ * bytes of output.
+ *
+ * - @id = 10
+ * - @flags = 0
+ * - @size_in = ~0
+ * - @size_out = 0
+ *
+ * See struct cxl_mem_query_commands.
+ */
+struct cxl_command_info {
+ __u32 id;
+
+ __u32 flags;
+#define CXL_MEM_COMMAND_FLAG_MASK GENMASK(0, 0)
+
+ __u32 size_in;
+ __u32 size_out;
+};
+
+/**
+ * struct cxl_mem_query_commands - Query supported commands.
+ * @n_commands: In/out parameter. When @n_commands is > 0, the driver will
+ * return min(num_support_commands, n_commands). When @n_commands
+ * is 0, driver will return the number of total supported commands.
+ * @rsvd: Reserved for future use.
+ * @commands: Output array of supported commands. This array must be allocated
+ * by userspace to be at least min(num_support_commands, @n_commands)
+ *
+ * Allow userspace to query the available commands supported by both the driver,
+ * and the hardware. Commands that aren't supported by either the driver, or the
+ * hardware are not returned in the query.
+ *
+ * Examples:
+ *
+ * - { .n_commands = 0 } // Get number of supported commands
+ * - { .n_commands = 15, .commands = buf } // Return first 15 (or less)
+ * supported commands
+ *
+ * See struct cxl_command_info.
+ */
+struct cxl_mem_query_commands {
+ /*
+ * Input: Number of commands to return (space allocated by user)
+ * Output: Number of commands supported by the driver/hardware
+ *
+ * If n_commands is 0, kernel will only return number of commands and
+ * not try to populate commands[], thus allowing userspace to know how
+ * much space to allocate
+ */
+ __u32 n_commands;
+ __u32 rsvd;
+
+ struct cxl_command_info __user commands[]; /* out: supported commands */
+};
+
+/**
+ * struct cxl_send_command - Send a command to a memory device.
+ * @id: The command to send to the memory device. This must be one of the
+ * commands returned by the query command.
+ * @flags: Flags for the command (input).
+ * @raw: Special fields for raw commands
+ * @raw.opcode: Opcode passed to hardware when using the RAW command.
+ * @raw.rsvd: Must be zero.
+ * @rsvd: Must be zero.
+ * @retval: Return value from the memory device (output).
+ * @in: Parameters associated with input payload.
+ * @in.size: Size of the payload to provide to the device (input).
+ * @in.rsvd: Must be zero.
+ * @in.payload: Pointer to memory for payload input, payload is little endian.
+ * @out: Parameters associated with output payload.
+ * @out.size: Size of the payload received from the device (input/output). This
+ * field is filled in by userspace to let the driver know how much
+ * space was allocated for output. It is populated by the driver to
+ * let userspace know how large the output payload actually was.
+ * @out.rsvd: Must be zero.
+ * @out.payload: Pointer to memory for payload output, payload is little endian.
+ *
+ * Mechanism for userspace to send a command to the hardware for processing. The
+ * driver will do basic validation on the command sizes. In some cases even the
+ * payload may be introspected. Userspace is required to allocate large enough
+ * buffers for size_out which can be variable length in certain situations.
+ */
+struct cxl_send_command {
+ __u32 id;
+ __u32 flags;
+ union {
+ struct {
+ __u16 opcode;
+ __u16 rsvd;
+ } raw;
+ __u32 rsvd;
+ };
+ __u32 retval;
+
+ struct {
+ __u32 size;
+ __u32 rsvd;
+ __u64 payload;
+ } in;
+
+ struct {
+ __u32 size;
+ __u32 rsvd;
+ __u64 payload;
+ } out;
+};
+
+#endif
diff --git a/include/uapi/linux/cyclades.h b/include/uapi/linux/cyclades.h
index fc0add2194a9..6225c5aebe06 100644
--- a/include/uapi/linux/cyclades.h
+++ b/include/uapi/linux/cyclades.h
@@ -1,109 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $
- * linux/include/linux/cyclades.h
- *
- * This file was initially written by
- * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by
- * Ivan Passos <ivan@cyclades.com>.
- *
- * This file contains the general definitions for the cyclades.c driver
- *$Log: cyclades.h,v $
- *Revision 3.1 2002/01/29 11:36:16 henrique
- *added throttle field on struct cyclades_port to indicate whether the
- *port is throttled or not
- *
- *Revision 3.1 2000/04/19 18:52:52 ivan
- *converted address fields to unsigned long and added fields for physical
- *addresses on cyclades_card structure;
- *
- *Revision 3.0 1998/11/02 14:20:59 ivan
- *added nports field on cyclades_card structure;
- *
- *Revision 2.5 1998/08/03 16:57:01 ivan
- *added cyclades_idle_stats structure;
- *
- *Revision 2.4 1998/06/01 12:09:53 ivan
- *removed closing_wait2 from cyclades_port structure;
- *
- *Revision 2.3 1998/03/16 18:01:12 ivan
- *changes in the cyclades_port structure to get it closer to the
- *standard serial port structure;
- *added constants for new ioctls;
- *
- *Revision 2.2 1998/02/17 16:50:00 ivan
- *changes in the cyclades_port structure (addition of shutdown_wait and
- *chip_rev variables);
- *added constants for new ioctls and for CD1400 rev. numbers.
- *
- *Revision 2.1 1997/10/24 16:03:00 ivan
- *added rflow (which allows enabling the CD1400 special flow control
- *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to
- *cyclades_port structure;
- *added Alpha support
- *
- *Revision 2.0 1997/06/30 10:30:00 ivan
- *added some new doorbell command constants related to IOCTLW and
- *UART error signaling
- *
- *Revision 1.8 1997/06/03 15:30:00 ivan
- *added constant ZFIRM_HLT
- *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin)
- *
- *Revision 1.7 1997/03/26 10:30:00 daniel
- *new entries at the end of cyclades_port struct to reallocate
- *variables illegally allocated within card memory.
- *
- *Revision 1.6 1996/09/09 18:35:30 bentson
- *fold in changes for Cyclom-Z -- including structures for
- *communicating with board as well modest changes to original
- *structures to support new features.
- *
- *Revision 1.5 1995/11/13 21:13:31 bentson
- *changes suggested by Michael Chastain <mec@duracef.shout.net>
- *to support use of this file in non-kernel applications
- *
- *
- */
#ifndef _UAPI_LINUX_CYCLADES_H
#define _UAPI_LINUX_CYCLADES_H
-#include <linux/types.h>
+#warning "Support for features provided by this header has been removed"
+#warning "Please consider updating your code"
struct cyclades_monitor {
- unsigned long int_count;
- unsigned long char_count;
- unsigned long char_max;
- unsigned long char_last;
-};
-
-/*
- * These stats all reflect activity since the device was last initialized.
- * (i.e., since the port was opened with no other processes already having it
- * open)
- */
-struct cyclades_idle_stats {
- __kernel_old_time_t in_use; /* Time device has been in use (secs) */
- __kernel_old_time_t recv_idle; /* Time since last char received (secs) */
- __kernel_old_time_t xmit_idle; /* Time since last char transmitted (secs) */
- unsigned long recv_bytes; /* Bytes received */
- unsigned long xmit_bytes; /* Bytes transmitted */
- unsigned long overruns; /* Input overruns */
- unsigned long frame_errs; /* Input framing errors */
- unsigned long parity_errs; /* Input parity errors */
-};
-
-#define CYCLADES_MAGIC 0x4359
-
-#define CYGETMON 0x435901
-#define CYGETTHRESH 0x435902
-#define CYSETTHRESH 0x435903
-#define CYGETDEFTHRESH 0x435904
-#define CYSETDEFTHRESH 0x435905
-#define CYGETTIMEOUT 0x435906
-#define CYSETTIMEOUT 0x435907
-#define CYGETDEFTIMEOUT 0x435908
-#define CYSETDEFTIMEOUT 0x435909
+ unsigned long int_count;
+ unsigned long char_count;
+ unsigned long char_max;
+ unsigned long char_last;
+};
+
+#define CYGETMON 0x435901
+#define CYGETTHRESH 0x435902
+#define CYSETTHRESH 0x435903
+#define CYGETDEFTHRESH 0x435904
+#define CYSETDEFTHRESH 0x435905
+#define CYGETTIMEOUT 0x435906
+#define CYSETTIMEOUT 0x435907
+#define CYGETDEFTIMEOUT 0x435908
+#define CYSETDEFTIMEOUT 0x435909
#define CYSETRFLOW 0x43590a
#define CYGETRFLOW 0x43590b
#define CYSETRTSDTR_INV 0x43590c
@@ -111,384 +29,7 @@ struct cyclades_idle_stats {
#define CYZSETPOLLCYCLE 0x43590e
#define CYZGETPOLLCYCLE 0x43590f
#define CYGETCD1400VER 0x435910
-#define CYSETWAIT 0x435912
-#define CYGETWAIT 0x435913
-
-/*************** CYCLOM-Z ADDITIONS ***************/
-
-#define CZIOC ('M' << 8)
-#define CZ_NBOARDS (CZIOC|0xfa)
-#define CZ_BOOT_START (CZIOC|0xfb)
-#define CZ_BOOT_DATA (CZIOC|0xfc)
-#define CZ_BOOT_END (CZIOC|0xfd)
-#define CZ_TEST (CZIOC|0xfe)
-
-#define CZ_DEF_POLL (HZ/25)
-
-#define MAX_BOARD 4 /* Max number of boards */
-#define MAX_DEV 256 /* Max number of ports total */
-#define CYZ_MAX_SPEED 921600
-
-#define CYZ_FIFO_SIZE 16
-
-#define CYZ_BOOT_NWORDS 0x100
-struct CYZ_BOOT_CTRL {
- unsigned short nboard;
- int status[MAX_BOARD];
- int nchannel[MAX_BOARD];
- int fw_rev[MAX_BOARD];
- unsigned long offset;
- unsigned long data[CYZ_BOOT_NWORDS];
-};
-
-
-#ifndef DP_WINDOW_SIZE
-/*
- * Memory Window Sizes
- */
-
-#define DP_WINDOW_SIZE (0x00080000) /* window size 512 Kb */
-#define ZE_DP_WINDOW_SIZE (0x00100000) /* window size 1 Mb (Ze and
- 8Zo V.2 */
-#define CTRL_WINDOW_SIZE (0x00000080) /* runtime regs 128 bytes */
-
-/*
- * CUSTOM_REG - Cyclom-Z/PCI Custom Registers Set. The driver
- * normally will access only interested on the fpga_id, fpga_version,
- * start_cpu and stop_cpu.
- */
-
-struct CUSTOM_REG {
- __u32 fpga_id; /* FPGA Identification Register */
- __u32 fpga_version; /* FPGA Version Number Register */
- __u32 cpu_start; /* CPU start Register (write) */
- __u32 cpu_stop; /* CPU stop Register (write) */
- __u32 misc_reg; /* Miscellaneous Register */
- __u32 idt_mode; /* IDT mode Register */
- __u32 uart_irq_status; /* UART IRQ status Register */
- __u32 clear_timer0_irq; /* Clear timer interrupt Register */
- __u32 clear_timer1_irq; /* Clear timer interrupt Register */
- __u32 clear_timer2_irq; /* Clear timer interrupt Register */
- __u32 test_register; /* Test Register */
- __u32 test_count; /* Test Count Register */
- __u32 timer_select; /* Timer select register */
- __u32 pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */
- __u32 ram_wait_state; /* RAM wait-state Register */
- __u32 uart_wait_state; /* UART wait-state Register */
- __u32 timer_wait_state; /* timer wait-state Register */
- __u32 ack_wait_state; /* ACK wait State Register */
-};
-
-/*
- * RUNTIME_9060 - PLX PCI9060ES local configuration and shared runtime
- * registers. This structure can be used to access the 9060 registers
- * (memory mapped).
- */
-
-struct RUNTIME_9060 {
- __u32 loc_addr_range; /* 00h - Local Address Range */
- __u32 loc_addr_base; /* 04h - Local Address Base */
- __u32 loc_arbitr; /* 08h - Local Arbitration */
- __u32 endian_descr; /* 0Ch - Big/Little Endian Descriptor */
- __u32 loc_rom_range; /* 10h - Local ROM Range */
- __u32 loc_rom_base; /* 14h - Local ROM Base */
- __u32 loc_bus_descr; /* 18h - Local Bus descriptor */
- __u32 loc_range_mst; /* 1Ch - Local Range for Master to PCI */
- __u32 loc_base_mst; /* 20h - Local Base for Master PCI */
- __u32 loc_range_io; /* 24h - Local Range for Master IO */
- __u32 pci_base_mst; /* 28h - PCI Base for Master PCI */
- __u32 pci_conf_io; /* 2Ch - PCI configuration for Master IO */
- __u32 filler1; /* 30h */
- __u32 filler2; /* 34h */
- __u32 filler3; /* 38h */
- __u32 filler4; /* 3Ch */
- __u32 mail_box_0; /* 40h - Mail Box 0 */
- __u32 mail_box_1; /* 44h - Mail Box 1 */
- __u32 mail_box_2; /* 48h - Mail Box 2 */
- __u32 mail_box_3; /* 4Ch - Mail Box 3 */
- __u32 filler5; /* 50h */
- __u32 filler6; /* 54h */
- __u32 filler7; /* 58h */
- __u32 filler8; /* 5Ch */
- __u32 pci_doorbell; /* 60h - PCI to Local Doorbell */
- __u32 loc_doorbell; /* 64h - Local to PCI Doorbell */
- __u32 intr_ctrl_stat; /* 68h - Interrupt Control/Status */
- __u32 init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */
-};
-
-/* Values for the Local Base Address re-map register */
-
-#define WIN_RAM 0x00000001L /* set the sliding window to RAM */
-#define WIN_CREG 0x14000001L /* set the window to custom Registers */
-
-/* Values timer select registers */
-
-#define TIMER_BY_1M 0x00 /* clock divided by 1M */
-#define TIMER_BY_256K 0x01 /* clock divided by 256k */
-#define TIMER_BY_128K 0x02 /* clock divided by 128k */
-#define TIMER_BY_32K 0x03 /* clock divided by 32k */
-
-/****************** ****************** *******************/
-#endif
-
-#ifndef ZFIRM_ID
-/* #include "zfwint.h" */
-/****************** ****************** *******************/
-/*
- * This file contains the definitions for interfacing with the
- * Cyclom-Z ZFIRM Firmware.
- */
-
-/* General Constant definitions */
-
-#define MAX_CHAN 64 /* max number of channels per board */
-
-/* firmware id structure (set after boot) */
-
-#define ID_ADDRESS 0x00000180L /* signature/pointer address */
-#define ZFIRM_ID 0x5557465AL /* ZFIRM/U signature */
-#define ZFIRM_HLT 0x59505B5CL /* ZFIRM needs external power supply */
-#define ZFIRM_RST 0x56040674L /* RST signal (due to FW reset) */
-
-#define ZF_TINACT_DEF 1000 /* default inactivity timeout
- (1000 ms) */
-#define ZF_TINACT ZF_TINACT_DEF
-
-struct FIRM_ID {
- __u32 signature; /* ZFIRM/U signature */
- __u32 zfwctrl_addr; /* pointer to ZFW_CTRL structure */
-};
-
-/* Op. System id */
-
-#define C_OS_LINUX 0x00000030 /* generic Linux system */
-
-/* channel op_mode */
-
-#define C_CH_DISABLE 0x00000000 /* channel is disabled */
-#define C_CH_TXENABLE 0x00000001 /* channel Tx enabled */
-#define C_CH_RXENABLE 0x00000002 /* channel Rx enabled */
-#define C_CH_ENABLE 0x00000003 /* channel Tx/Rx enabled */
-#define C_CH_LOOPBACK 0x00000004 /* Loopback mode */
-
-/* comm_parity - parity */
-
-#define C_PR_NONE 0x00000000 /* None */
-#define C_PR_ODD 0x00000001 /* Odd */
-#define C_PR_EVEN 0x00000002 /* Even */
-#define C_PR_MARK 0x00000004 /* Mark */
-#define C_PR_SPACE 0x00000008 /* Space */
-#define C_PR_PARITY 0x000000ff
-
-#define C_PR_DISCARD 0x00000100 /* discard char with frame/par error */
-#define C_PR_IGNORE 0x00000200 /* ignore frame/par error */
-
-/* comm_data_l - data length and stop bits */
-
-#define C_DL_CS5 0x00000001
-#define C_DL_CS6 0x00000002
-#define C_DL_CS7 0x00000004
-#define C_DL_CS8 0x00000008
-#define C_DL_CS 0x0000000f
-#define C_DL_1STOP 0x00000010
-#define C_DL_15STOP 0x00000020
-#define C_DL_2STOP 0x00000040
-#define C_DL_STOP 0x000000f0
-
-/* interrupt enabling/status */
-
-#define C_IN_DISABLE 0x00000000 /* zero, disable interrupts */
-#define C_IN_TXBEMPTY 0x00000001 /* tx buffer empty */
-#define C_IN_TXLOWWM 0x00000002 /* tx buffer below LWM */
-#define C_IN_RXHIWM 0x00000010 /* rx buffer above HWM */
-#define C_IN_RXNNDT 0x00000020 /* rx no new data timeout */
-#define C_IN_MDCD 0x00000100 /* modem DCD change */
-#define C_IN_MDSR 0x00000200 /* modem DSR change */
-#define C_IN_MRI 0x00000400 /* modem RI change */
-#define C_IN_MCTS 0x00000800 /* modem CTS change */
-#define C_IN_RXBRK 0x00001000 /* Break received */
-#define C_IN_PR_ERROR 0x00002000 /* parity error */
-#define C_IN_FR_ERROR 0x00004000 /* frame error */
-#define C_IN_OVR_ERROR 0x00008000 /* overrun error */
-#define C_IN_RXOFL 0x00010000 /* RX buffer overflow */
-#define C_IN_IOCTLW 0x00020000 /* I/O control w/ wait */
-#define C_IN_MRTS 0x00040000 /* modem RTS drop */
-#define C_IN_ICHAR 0x00080000
-
-/* flow control */
-
-#define C_FL_OXX 0x00000001 /* output Xon/Xoff flow control */
-#define C_FL_IXX 0x00000002 /* output Xon/Xoff flow control */
-#define C_FL_OIXANY 0x00000004 /* output Xon/Xoff (any xon) */
-#define C_FL_SWFLOW 0x0000000f
-
-/* flow status */
-
-#define C_FS_TXIDLE 0x00000000 /* no Tx data in the buffer or UART */
-#define C_FS_SENDING 0x00000001 /* UART is sending data */
-#define C_FS_SWFLOW 0x00000002 /* Tx is stopped by received Xoff */
-
-/* rs_control/rs_status RS-232 signals */
-
-#define C_RS_PARAM 0x80000000 /* Indicates presence of parameter in
- IOCTLM command */
-#define C_RS_RTS 0x00000001 /* RTS */
-#define C_RS_DTR 0x00000004 /* DTR */
-#define C_RS_DCD 0x00000100 /* CD */
-#define C_RS_DSR 0x00000200 /* DSR */
-#define C_RS_RI 0x00000400 /* RI */
-#define C_RS_CTS 0x00000800 /* CTS */
-
-/* commands Host <-> Board */
-
-#define C_CM_RESET 0x01 /* reset/flush buffers */
-#define C_CM_IOCTL 0x02 /* re-read CH_CTRL */
-#define C_CM_IOCTLW 0x03 /* re-read CH_CTRL, intr when done */
-#define C_CM_IOCTLM 0x04 /* RS-232 outputs change */
-#define C_CM_SENDXOFF 0x10 /* send Xoff */
-#define C_CM_SENDXON 0x11 /* send Xon */
-#define C_CM_CLFLOW 0x12 /* Clear flow control (resume) */
-#define C_CM_SENDBRK 0x41 /* send break */
-#define C_CM_INTBACK 0x42 /* Interrupt back */
-#define C_CM_SET_BREAK 0x43 /* Tx break on */
-#define C_CM_CLR_BREAK 0x44 /* Tx break off */
-#define C_CM_CMD_DONE 0x45 /* Previous command done */
-#define C_CM_INTBACK2 0x46 /* Alternate Interrupt back */
-#define C_CM_TINACT 0x51 /* set inactivity detection */
-#define C_CM_IRQ_ENBL 0x52 /* enable generation of interrupts */
-#define C_CM_IRQ_DSBL 0x53 /* disable generation of interrupts */
-#define C_CM_ACK_ENBL 0x54 /* enable acknowledged interrupt mode */
-#define C_CM_ACK_DSBL 0x55 /* disable acknowledged intr mode */
-#define C_CM_FLUSH_RX 0x56 /* flushes Rx buffer */
-#define C_CM_FLUSH_TX 0x57 /* flushes Tx buffer */
-#define C_CM_Q_ENABLE 0x58 /* enables queue access from the
- driver */
-#define C_CM_Q_DISABLE 0x59 /* disables queue access from the
- driver */
-
-#define C_CM_TXBEMPTY 0x60 /* Tx buffer is empty */
-#define C_CM_TXLOWWM 0x61 /* Tx buffer low water mark */
-#define C_CM_RXHIWM 0x62 /* Rx buffer high water mark */
-#define C_CM_RXNNDT 0x63 /* rx no new data timeout */
-#define C_CM_TXFEMPTY 0x64
-#define C_CM_ICHAR 0x65
-#define C_CM_MDCD 0x70 /* modem DCD change */
-#define C_CM_MDSR 0x71 /* modem DSR change */
-#define C_CM_MRI 0x72 /* modem RI change */
-#define C_CM_MCTS 0x73 /* modem CTS change */
-#define C_CM_MRTS 0x74 /* modem RTS drop */
-#define C_CM_RXBRK 0x84 /* Break received */
-#define C_CM_PR_ERROR 0x85 /* Parity error */
-#define C_CM_FR_ERROR 0x86 /* Frame error */
-#define C_CM_OVR_ERROR 0x87 /* Overrun error */
-#define C_CM_RXOFL 0x88 /* RX buffer overflow */
-#define C_CM_CMDERROR 0x90 /* command error */
-#define C_CM_FATAL 0x91 /* fatal error */
-#define C_CM_HW_RESET 0x92 /* reset board */
-
-/*
- * CH_CTRL - This per port structure contains all parameters
- * that control an specific port. It can be seen as the
- * configuration registers of a "super-serial-controller".
- */
-
-struct CH_CTRL {
- __u32 op_mode; /* operation mode */
- __u32 intr_enable; /* interrupt masking */
- __u32 sw_flow; /* SW flow control */
- __u32 flow_status; /* output flow status */
- __u32 comm_baud; /* baud rate - numerically specified */
- __u32 comm_parity; /* parity */
- __u32 comm_data_l; /* data length/stop */
- __u32 comm_flags; /* other flags */
- __u32 hw_flow; /* HW flow control */
- __u32 rs_control; /* RS-232 outputs */
- __u32 rs_status; /* RS-232 inputs */
- __u32 flow_xon; /* xon char */
- __u32 flow_xoff; /* xoff char */
- __u32 hw_overflow; /* hw overflow counter */
- __u32 sw_overflow; /* sw overflow counter */
- __u32 comm_error; /* frame/parity error counter */
- __u32 ichar;
- __u32 filler[7];
-};
-
-
-/*
- * BUF_CTRL - This per channel structure contains
- * all Tx and Rx buffer control for a given channel.
- */
-
-struct BUF_CTRL {
- __u32 flag_dma; /* buffers are in Host memory */
- __u32 tx_bufaddr; /* address of the tx buffer */
- __u32 tx_bufsize; /* tx buffer size */
- __u32 tx_threshold; /* tx low water mark */
- __u32 tx_get; /* tail index tx buf */
- __u32 tx_put; /* head index tx buf */
- __u32 rx_bufaddr; /* address of the rx buffer */
- __u32 rx_bufsize; /* rx buffer size */
- __u32 rx_threshold; /* rx high water mark */
- __u32 rx_get; /* tail index rx buf */
- __u32 rx_put; /* head index rx buf */
- __u32 filler[5]; /* filler to align structures */
-};
-
-/*
- * BOARD_CTRL - This per board structure contains all global
- * control fields related to the board.
- */
-
-struct BOARD_CTRL {
-
- /* static info provided by the on-board CPU */
- __u32 n_channel; /* number of channels */
- __u32 fw_version; /* firmware version */
-
- /* static info provided by the driver */
- __u32 op_system; /* op_system id */
- __u32 dr_version; /* driver version */
-
- /* board control area */
- __u32 inactivity; /* inactivity control */
-
- /* host to FW commands */
- __u32 hcmd_channel; /* channel number */
- __u32 hcmd_param; /* pointer to parameters */
-
- /* FW to Host commands */
- __u32 fwcmd_channel; /* channel number */
- __u32 fwcmd_param; /* pointer to parameters */
- __u32 zf_int_queue_addr; /* offset for INT_QUEUE structure */
-
- /* filler so the structures are aligned */
- __u32 filler[6];
-};
-
-/* Host Interrupt Queue */
-
-#define QUEUE_SIZE (10*MAX_CHAN)
-
-struct INT_QUEUE {
- unsigned char intr_code[QUEUE_SIZE];
- unsigned long channel[QUEUE_SIZE];
- unsigned long param[QUEUE_SIZE];
- unsigned long put;
- unsigned long get;
-};
-
-/*
- * ZFW_CTRL - This is the data structure that includes all other
- * data structures used by the Firmware.
- */
-
-struct ZFW_CTRL {
- struct BOARD_CTRL board_ctrl;
- struct CH_CTRL ch_ctrl[MAX_CHAN];
- struct BUF_CTRL buf_ctrl[MAX_CHAN];
-};
-
-/****************** ****************** *******************/
-#endif
+#define CYSETWAIT 0x435912
+#define CYGETWAIT 0x435913
#endif /* _UAPI_LINUX_CYCLADES_H */
diff --git a/include/uapi/linux/cycx_cfm.h b/include/uapi/linux/cycx_cfm.h
index 51f541942ff9..91778c8024b1 100644
--- a/include/uapi/linux/cycx_cfm.h
+++ b/include/uapi/linux/cycx_cfm.h
@@ -91,7 +91,7 @@ struct cycx_firmware {
unsigned short reserved[6];
char descr[CFM_DESCR_LEN];
struct cycx_fw_info info;
- unsigned char image[0];
+ unsigned char image[];
};
struct cycx_fw_header {
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index ae37fd4d194a..2f24b53a87a5 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -13,6 +13,8 @@
#ifndef _UAPI_LINUX_DEVLINK_H_
#define _UAPI_LINUX_DEVLINK_H_
+#include <linux/const.h>
+
#define DEVLINK_GENL_NAME "devlink"
#define DEVLINK_GENL_VERSION 0x1
#define DEVLINK_GENL_MCGRP_CONFIG_NAME "config"
@@ -117,6 +119,26 @@ enum devlink_command {
DEVLINK_CMD_TRAP_GROUP_NEW,
DEVLINK_CMD_TRAP_GROUP_DEL,
+ DEVLINK_CMD_TRAP_POLICER_GET, /* can dump */
+ DEVLINK_CMD_TRAP_POLICER_SET,
+ DEVLINK_CMD_TRAP_POLICER_NEW,
+ DEVLINK_CMD_TRAP_POLICER_DEL,
+
+ DEVLINK_CMD_HEALTH_REPORTER_TEST,
+
+ DEVLINK_CMD_RATE_GET, /* can dump */
+ DEVLINK_CMD_RATE_SET,
+ DEVLINK_CMD_RATE_NEW,
+ DEVLINK_CMD_RATE_DEL,
+
+ DEVLINK_CMD_LINECARD_GET, /* can dump */
+ DEVLINK_CMD_LINECARD_SET,
+ DEVLINK_CMD_LINECARD_NEW,
+ DEVLINK_CMD_LINECARD_DEL,
+
+ DEVLINK_CMD_SELFTESTS_GET, /* can dump */
+ DEVLINK_CMD_SELFTESTS_RUN,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -187,6 +209,19 @@ enum devlink_port_flavour {
* for the PCI VF. It is an internal
* port that faces the PCI VF.
*/
+ DEVLINK_PORT_FLAVOUR_VIRTUAL, /* Any virtual port facing the user. */
+ DEVLINK_PORT_FLAVOUR_UNUSED, /* Port which exists in the switch, but
+ * is not used in any way.
+ */
+ DEVLINK_PORT_FLAVOUR_PCI_SF, /* Represents eswitch port
+ * for the PCI SF. It is an internal
+ * port that faces the PCI SF.
+ */
+};
+
+enum devlink_rate_type {
+ DEVLINK_RATE_TYPE_LEAF,
+ DEVLINK_RATE_TYPE_NODE,
};
enum devlink_param_cmode {
@@ -216,20 +251,70 @@ enum devlink_param_reset_dev_on_drv_probe_value {
enum {
DEVLINK_ATTR_STATS_RX_PACKETS, /* u64 */
DEVLINK_ATTR_STATS_RX_BYTES, /* u64 */
+ DEVLINK_ATTR_STATS_RX_DROPPED, /* u64 */
__DEVLINK_ATTR_STATS_MAX,
DEVLINK_ATTR_STATS_MAX = __DEVLINK_ATTR_STATS_MAX - 1
};
+/* Specify what sections of a flash component can be overwritten when
+ * performing an update. Overwriting of firmware binary sections is always
+ * implicitly assumed to be allowed.
+ *
+ * Each section must be documented in
+ * Documentation/networking/devlink/devlink-flash.rst
+ *
+ */
+enum {
+ DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT,
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT,
+
+ __DEVLINK_FLASH_OVERWRITE_MAX_BIT,
+ DEVLINK_FLASH_OVERWRITE_MAX_BIT = __DEVLINK_FLASH_OVERWRITE_MAX_BIT - 1
+};
+
+#define DEVLINK_FLASH_OVERWRITE_SETTINGS _BITUL(DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT)
+#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS _BITUL(DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT)
+
+#define DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS \
+ (_BITUL(__DEVLINK_FLASH_OVERWRITE_MAX_BIT) - 1)
+
+enum devlink_attr_selftest_id {
+ DEVLINK_ATTR_SELFTEST_ID_UNSPEC,
+ DEVLINK_ATTR_SELFTEST_ID_FLASH, /* flag */
+
+ __DEVLINK_ATTR_SELFTEST_ID_MAX,
+ DEVLINK_ATTR_SELFTEST_ID_MAX = __DEVLINK_ATTR_SELFTEST_ID_MAX - 1
+};
+
+enum devlink_selftest_status {
+ DEVLINK_SELFTEST_STATUS_SKIP,
+ DEVLINK_SELFTEST_STATUS_PASS,
+ DEVLINK_SELFTEST_STATUS_FAIL
+};
+
+enum devlink_attr_selftest_result {
+ DEVLINK_ATTR_SELFTEST_RESULT_UNSPEC,
+ DEVLINK_ATTR_SELFTEST_RESULT, /* nested */
+ DEVLINK_ATTR_SELFTEST_RESULT_ID, /* u32, enum devlink_attr_selftest_id */
+ DEVLINK_ATTR_SELFTEST_RESULT_STATUS, /* u8, enum devlink_selftest_status */
+
+ __DEVLINK_ATTR_SELFTEST_RESULT_MAX,
+ DEVLINK_ATTR_SELFTEST_RESULT_MAX = __DEVLINK_ATTR_SELFTEST_RESULT_MAX - 1
+};
+
/**
* enum devlink_trap_action - Packet trap action.
* @DEVLINK_TRAP_ACTION_DROP: Packet is dropped by the device and a copy is not
* sent to the CPU.
* @DEVLINK_TRAP_ACTION_TRAP: The sole copy of the packet is sent to the CPU.
+ * @DEVLINK_TRAP_ACTION_MIRROR: Packet is forwarded by the device and a copy is
+ * sent to the CPU.
*/
enum devlink_trap_action {
DEVLINK_TRAP_ACTION_DROP,
DEVLINK_TRAP_ACTION_TRAP,
+ DEVLINK_TRAP_ACTION_MIRROR,
};
/**
@@ -243,15 +328,59 @@ enum devlink_trap_action {
* control plane for resolution. Trapped packets
* are processed by devlink and injected to
* the kernel's Rx path.
+ * @DEVLINK_TRAP_TYPE_CONTROL: Packet was trapped because it is required for
+ * the correct functioning of the control plane.
+ * For example, an ARP request packet. Trapped
+ * packets are injected to the kernel's Rx path,
+ * but not reported to drop monitor.
*/
enum devlink_trap_type {
DEVLINK_TRAP_TYPE_DROP,
DEVLINK_TRAP_TYPE_EXCEPTION,
+ DEVLINK_TRAP_TYPE_CONTROL,
};
enum {
/* Trap can report input port as metadata */
DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT,
+ /* Trap can report flow action cookie as metadata */
+ DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE,
+};
+
+enum devlink_reload_action {
+ DEVLINK_RELOAD_ACTION_UNSPEC,
+ DEVLINK_RELOAD_ACTION_DRIVER_REINIT, /* Driver entities re-instantiation */
+ DEVLINK_RELOAD_ACTION_FW_ACTIVATE, /* FW activate */
+
+ /* Add new reload actions above */
+ __DEVLINK_RELOAD_ACTION_MAX,
+ DEVLINK_RELOAD_ACTION_MAX = __DEVLINK_RELOAD_ACTION_MAX - 1
+};
+
+enum devlink_reload_limit {
+ DEVLINK_RELOAD_LIMIT_UNSPEC, /* unspecified, no constraints */
+ DEVLINK_RELOAD_LIMIT_NO_RESET, /* No reset allowed, no down time allowed,
+ * no link flap and no configuration is lost.
+ */
+
+ /* Add new reload limit above */
+ __DEVLINK_RELOAD_LIMIT_MAX,
+ DEVLINK_RELOAD_LIMIT_MAX = __DEVLINK_RELOAD_LIMIT_MAX - 1
+};
+
+#define DEVLINK_RELOAD_LIMITS_VALID_MASK (_BITUL(__DEVLINK_RELOAD_LIMIT_MAX) - 1)
+
+enum devlink_linecard_state {
+ DEVLINK_LINECARD_STATE_UNSPEC,
+ DEVLINK_LINECARD_STATE_UNPROVISIONED,
+ DEVLINK_LINECARD_STATE_UNPROVISIONING,
+ DEVLINK_LINECARD_STATE_PROVISIONING,
+ DEVLINK_LINECARD_STATE_PROVISIONING_FAILED,
+ DEVLINK_LINECARD_STATE_PROVISIONED,
+ DEVLINK_LINECARD_STATE_ACTIVE,
+
+ __DEVLINK_LINECARD_STATE_MAX,
+ DEVLINK_LINECARD_STATE_MAX = __DEVLINK_LINECARD_STATE_MAX - 1
};
enum devlink_attr {
@@ -426,6 +555,58 @@ enum devlink_attr {
DEVLINK_ATTR_NETNS_FD, /* u32 */
DEVLINK_ATTR_NETNS_PID, /* u32 */
DEVLINK_ATTR_NETNS_ID, /* u32 */
+
+ DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP, /* u8 */
+
+ DEVLINK_ATTR_TRAP_POLICER_ID, /* u32 */
+ DEVLINK_ATTR_TRAP_POLICER_RATE, /* u64 */
+ DEVLINK_ATTR_TRAP_POLICER_BURST, /* u64 */
+
+ DEVLINK_ATTR_PORT_FUNCTION, /* nested */
+
+ DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, /* string */
+
+ DEVLINK_ATTR_PORT_LANES, /* u32 */
+ DEVLINK_ATTR_PORT_SPLITTABLE, /* u8 */
+
+ DEVLINK_ATTR_PORT_EXTERNAL, /* u8 */
+ DEVLINK_ATTR_PORT_CONTROLLER_NUMBER, /* u32 */
+
+ DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT, /* u64 */
+ DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK, /* bitfield32 */
+
+ DEVLINK_ATTR_RELOAD_ACTION, /* u8 */
+ DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, /* bitfield32 */
+ DEVLINK_ATTR_RELOAD_LIMITS, /* bitfield32 */
+
+ DEVLINK_ATTR_DEV_STATS, /* nested */
+ DEVLINK_ATTR_RELOAD_STATS, /* nested */
+ DEVLINK_ATTR_RELOAD_STATS_ENTRY, /* nested */
+ DEVLINK_ATTR_RELOAD_STATS_LIMIT, /* u8 */
+ DEVLINK_ATTR_RELOAD_STATS_VALUE, /* u32 */
+ DEVLINK_ATTR_REMOTE_RELOAD_STATS, /* nested */
+ DEVLINK_ATTR_RELOAD_ACTION_INFO, /* nested */
+ DEVLINK_ATTR_RELOAD_ACTION_STATS, /* nested */
+
+ DEVLINK_ATTR_PORT_PCI_SF_NUMBER, /* u32 */
+
+ DEVLINK_ATTR_RATE_TYPE, /* u16 */
+ DEVLINK_ATTR_RATE_TX_SHARE, /* u64 */
+ DEVLINK_ATTR_RATE_TX_MAX, /* u64 */
+ DEVLINK_ATTR_RATE_NODE_NAME, /* string */
+ DEVLINK_ATTR_RATE_PARENT_NODE_NAME, /* string */
+
+ DEVLINK_ATTR_REGION_MAX_SNAPSHOTS, /* u32 */
+
+ DEVLINK_ATTR_LINECARD_INDEX, /* u32 */
+ DEVLINK_ATTR_LINECARD_STATE, /* u8 */
+ DEVLINK_ATTR_LINECARD_TYPE, /* string */
+ DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES, /* nested */
+
+ DEVLINK_ATTR_NESTED_DEVLINK, /* nested */
+
+ DEVLINK_ATTR_SELFTESTS, /* nested */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
@@ -472,4 +653,32 @@ enum devlink_resource_unit {
DEVLINK_RESOURCE_UNIT_ENTRY,
};
+enum devlink_port_function_attr {
+ DEVLINK_PORT_FUNCTION_ATTR_UNSPEC,
+ DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, /* binary */
+ DEVLINK_PORT_FN_ATTR_STATE, /* u8 */
+ DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */
+
+ __DEVLINK_PORT_FUNCTION_ATTR_MAX,
+ DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1
+};
+
+enum devlink_port_fn_state {
+ DEVLINK_PORT_FN_STATE_INACTIVE,
+ DEVLINK_PORT_FN_STATE_ACTIVE,
+};
+
+/**
+ * enum devlink_port_fn_opstate - indicates operational state of the function
+ * @DEVLINK_PORT_FN_OPSTATE_ATTACHED: Driver is attached to the function.
+ * For graceful tear down of the function, after inactivation of the
+ * function, user should wait for operational state to turn DETACHED.
+ * @DEVLINK_PORT_FN_OPSTATE_DETACHED: Driver is detached from the function.
+ * It is safe to delete the port.
+ */
+enum devlink_port_fn_opstate {
+ DEVLINK_PORT_FN_OPSTATE_DETACHED,
+ DEVLINK_PORT_FN_OPSTATE_ATTACHED,
+};
+
#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/dlm.h b/include/uapi/linux/dlm.h
index 0d2eca287567..1923f4f3b05e 100644
--- a/include/uapi/linux/dlm.h
+++ b/include/uapi/linux/dlm.h
@@ -69,7 +69,6 @@ struct dlm_lksb {
/* dlm_new_lockspace() flags */
#define DLM_LSFL_TIMEWARN 0x00000002
-#define DLM_LSFL_FS 0x00000004
#define DLM_LSFL_NEWEXCL 0x00000008
diff --git a/include/uapi/linux/dlm_device.h b/include/uapi/linux/dlm_device.h
index f880d2831160..e83954c69fff 100644
--- a/include/uapi/linux/dlm_device.h
+++ b/include/uapi/linux/dlm_device.h
@@ -45,13 +45,13 @@ struct dlm_lock_params {
void __user *bastaddr;
struct dlm_lksb __user *lksb;
char lvb[DLM_USER_LVB_LEN];
- char name[0];
+ char name[];
};
struct dlm_lspace_params {
__u32 flags;
__u32 minor;
- char name[0];
+ char name[];
};
struct dlm_purge_params {
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 6622912c2342..7edf335778ba 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -182,7 +182,7 @@ struct dm_target_spec {
struct dm_target_deps {
__u32 count; /* Array size */
__u32 padding; /* unused */
- __u64 dev[0]; /* out */
+ __u64 dev[]; /* out */
};
/*
@@ -192,9 +192,23 @@ struct dm_name_list {
__u64 dev;
__u32 next; /* offset to the next record from
the _start_ of this */
- char name[0];
+ char name[];
+
+ /*
+ * The following members can be accessed by taking a pointer that
+ * points immediately after the terminating zero character in "name"
+ * and aligning this pointer to next 8-byte boundary.
+ * Uuid is present if the flag DM_NAME_LIST_FLAG_HAS_UUID is set.
+ *
+ * __u32 event_nr;
+ * __u32 flags;
+ * char uuid[0];
+ */
};
+#define DM_NAME_LIST_FLAG_HAS_UUID 1
+#define DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID 2
+
/*
* Used to retrieve the target versions
*/
@@ -202,7 +216,7 @@ struct dm_target_versions {
__u32 next;
__u32 version[3];
- char name[0];
+ char name[];
};
/*
@@ -211,7 +225,7 @@ struct dm_target_versions {
struct dm_target_msg {
__u64 sector; /* Device sector */
- char message[0];
+ char message[];
};
/*
@@ -272,9 +286,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 42
+#define DM_VERSION_MINOR 47
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2020-02-27)"
+#define DM_VERSION_EXTRA "-ioctl (2022-07-28)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -362,4 +376,10 @@ enum {
*/
#define DM_INTERNAL_SUSPEND_FLAG (1 << 18) /* Out */
+/*
+ * If set, returns in the in buffer passed by UM, the raw table information
+ * that would be measured by IMA subsystem on device state change.
+ */
+#define DM_IMA_MEASUREMENT_FLAG (1 << 19) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/uapi/linux/dm-log-userspace.h b/include/uapi/linux/dm-log-userspace.h
index 5c47a8603376..23dad9565e46 100644
--- a/include/uapi/linux/dm-log-userspace.h
+++ b/include/uapi/linux/dm-log-userspace.h
@@ -426,7 +426,7 @@ struct dm_ulog_request {
__u32 request_type; /* DM_ULOG_* defined above */
__u32 data_size; /* How much data (not including this struct) */
- char data[0];
+ char data[];
};
#endif /* __DM_LOG_USERSPACE_H__ */
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
index dbc7092e04b5..5a6fda66d9ad 100644
--- a/include/uapi/linux/dma-buf.h
+++ b/include/uapi/linux/dma-buf.h
@@ -22,8 +22,56 @@
#include <linux/types.h>
-/* begin/end dma-buf functions used for userspace mmap. */
+/**
+ * struct dma_buf_sync - Synchronize with CPU access.
+ *
+ * When a DMA buffer is accessed from the CPU via mmap, it is not always
+ * possible to guarantee coherency between the CPU-visible map and underlying
+ * memory. To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket
+ * any CPU access to give the kernel the chance to shuffle memory around if
+ * needed.
+ *
+ * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC
+ * with DMA_BUF_SYNC_START and the appropriate read/write flags. Once the
+ * access is complete, the client should call DMA_BUF_IOCTL_SYNC with
+ * DMA_BUF_SYNC_END and the same read/write flags.
+ *
+ * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache
+ * coherency. It does not prevent other processes or devices from
+ * accessing the memory at the same time. If synchronization with a GPU or
+ * other device driver is required, it is the client's responsibility to
+ * wait for buffer to be ready for reading or writing before calling this
+ * ioctl with DMA_BUF_SYNC_START. Likewise, the client must ensure that
+ * follow-up work is not submitted to GPU or other device driver until
+ * after this ioctl has been called with DMA_BUF_SYNC_END?
+ *
+ * If the driver or API with which the client is interacting uses implicit
+ * synchronization, waiting for prior work to complete can be done via
+ * poll() on the DMA buffer file descriptor. If the driver or API requires
+ * explicit synchronization, the client may have to wait on a sync_file or
+ * other synchronization primitive outside the scope of the DMA buffer API.
+ */
struct dma_buf_sync {
+ /**
+ * @flags: Set of access flags
+ *
+ * DMA_BUF_SYNC_START:
+ * Indicates the start of a map access session.
+ *
+ * DMA_BUF_SYNC_END:
+ * Indicates the end of a map access session.
+ *
+ * DMA_BUF_SYNC_READ:
+ * Indicates that the mapped DMA buffer will be read by the
+ * client via the CPU map.
+ *
+ * DMA_BUF_SYNC_WRITE:
+ * Indicates that the mapped DMA buffer will be written by the
+ * client via the CPU map.
+ *
+ * DMA_BUF_SYNC_RW:
+ * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE.
+ */
__u64 flags;
};
@@ -37,8 +85,98 @@ struct dma_buf_sync {
#define DMA_BUF_NAME_LEN 32
+/**
+ * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf
+ *
+ * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the
+ * current set of fences on a dma-buf file descriptor as a sync_file. CPU
+ * waits via poll() or other driver-specific mechanisms typically wait on
+ * whatever fences are on the dma-buf at the time the wait begins. This
+ * is similar except that it takes a snapshot of the current fences on the
+ * dma-buf for waiting later instead of waiting immediately. This is
+ * useful for modern graphics APIs such as Vulkan which assume an explicit
+ * synchronization model but still need to inter-operate with dma-buf.
+ *
+ * The intended usage pattern is the following:
+ *
+ * 1. Export a sync_file with flags corresponding to the expected GPU usage
+ * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE.
+ *
+ * 2. Submit rendering work which uses the dma-buf. The work should wait on
+ * the exported sync file before rendering and produce another sync_file
+ * when complete.
+ *
+ * 3. Import the rendering-complete sync_file into the dma-buf with flags
+ * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE.
+ *
+ * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl,
+ * the above is not a single atomic operation. If userspace wants to ensure
+ * ordering via these fences, it is the respnosibility of userspace to use
+ * locks or other mechanisms to ensure that no other context adds fences or
+ * submits work between steps 1 and 3 above.
+ */
+struct dma_buf_export_sync_file {
+ /**
+ * @flags: Read/write flags
+ *
+ * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
+ *
+ * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
+ * the returned sync file waits on any writers of the dma-buf to
+ * complete. Waiting on the returned sync file is equivalent to
+ * poll() with POLLIN.
+ *
+ * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on
+ * any users of the dma-buf (read or write) to complete. Waiting
+ * on the returned sync file is equivalent to poll() with POLLOUT.
+ * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this
+ * is equivalent to just DMA_BUF_SYNC_WRITE.
+ */
+ __u32 flags;
+ /** @fd: Returned sync file descriptor */
+ __s32 fd;
+};
+
+/**
+ * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf
+ *
+ * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a
+ * sync_file into a dma-buf for the purposes of implicit synchronization
+ * with other dma-buf consumers. This allows clients using explicitly
+ * synchronized APIs such as Vulkan to inter-op with dma-buf consumers
+ * which expect implicit synchronization such as OpenGL or most media
+ * drivers/video.
+ */
+struct dma_buf_import_sync_file {
+ /**
+ * @flags: Read/write flags
+ *
+ * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
+ *
+ * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
+ * this inserts the sync_file as a read-only fence. Any subsequent
+ * implicitly synchronized writes to this dma-buf will wait on this
+ * fence but reads will not.
+ *
+ * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a
+ * write fence. All subsequent implicitly synchronized access to
+ * this dma-buf will wait on this fence.
+ */
+ __u32 flags;
+ /** @fd: Sync file descriptor */
+ __s32 fd;
+};
+
#define DMA_BUF_BASE 'b'
#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
+
+/* 32/64bitness of this uapi was botched in android, there's no difference
+ * between them in actual uapi, they're just different numbers.
+ */
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
+#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
+#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
+#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file)
+#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file)
#endif
diff --git a/include/uapi/linux/dn.h b/include/uapi/linux/dn.h
deleted file mode 100644
index 36ca71bd8bbe..000000000000
--- a/include/uapi/linux/dn.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _LINUX_DN_H
-#define _LINUX_DN_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-#include <linux/if_ether.h>
-
-/*
-
- DECnet Data Structures and Constants
-
-*/
-
-/*
- * DNPROTO_NSP can't be the same as SOL_SOCKET,
- * so increment each by one (compared to ULTRIX)
- */
-#define DNPROTO_NSP 2 /* NSP protocol number */
-#define DNPROTO_ROU 3 /* Routing protocol number */
-#define DNPROTO_NML 4 /* Net mgt protocol number */
-#define DNPROTO_EVL 5 /* Evl protocol number (usr) */
-#define DNPROTO_EVR 6 /* Evl protocol number (evl) */
-#define DNPROTO_NSPT 7 /* NSP trace protocol number */
-
-
-#define DN_ADDL 2
-#define DN_MAXADDL 2 /* ULTRIX headers have 20 here, but pathworks has 2 */
-#define DN_MAXOPTL 16
-#define DN_MAXOBJL 16
-#define DN_MAXACCL 40
-#define DN_MAXALIASL 128
-#define DN_MAXNODEL 256
-#define DNBUFSIZE 65023
-
-/*
- * SET/GET Socket options - must match the DSO_ numbers below
- */
-#define SO_CONDATA 1
-#define SO_CONACCESS 2
-#define SO_PROXYUSR 3
-#define SO_LINKINFO 7
-
-#define DSO_CONDATA 1 /* Set/Get connect data */
-#define DSO_DISDATA 10 /* Set/Get disconnect data */
-#define DSO_CONACCESS 2 /* Set/Get connect access data */
-#define DSO_ACCEPTMODE 4 /* Set/Get accept mode */
-#define DSO_CONACCEPT 5 /* Accept deferred connection */
-#define DSO_CONREJECT 6 /* Reject deferred connection */
-#define DSO_LINKINFO 7 /* Set/Get link information */
-#define DSO_STREAM 8 /* Set socket type to stream */
-#define DSO_SEQPACKET 9 /* Set socket type to sequenced packet */
-#define DSO_MAXWINDOW 11 /* Maximum window size allowed */
-#define DSO_NODELAY 12 /* Turn off nagle */
-#define DSO_CORK 13 /* Wait for more data! */
-#define DSO_SERVICES 14 /* NSP Services field */
-#define DSO_INFO 15 /* NSP Info field */
-#define DSO_MAX 15 /* Maximum option number */
-
-
-/* LINK States */
-#define LL_INACTIVE 0
-#define LL_CONNECTING 1
-#define LL_RUNNING 2
-#define LL_DISCONNECTING 3
-
-#define ACC_IMMED 0
-#define ACC_DEFER 1
-
-#define SDF_WILD 1 /* Wild card object */
-#define SDF_PROXY 2 /* Addr eligible for proxy */
-#define SDF_UICPROXY 4 /* Use uic-based proxy */
-
-/* Structures */
-
-
-struct dn_naddr {
- __le16 a_len;
- __u8 a_addr[DN_MAXADDL]; /* Two bytes little endian */
-};
-
-struct sockaddr_dn {
- __u16 sdn_family;
- __u8 sdn_flags;
- __u8 sdn_objnum;
- __le16 sdn_objnamel;
- __u8 sdn_objname[DN_MAXOBJL];
- struct dn_naddr sdn_add;
-};
-#define sdn_nodeaddrl sdn_add.a_len /* Node address length */
-#define sdn_nodeaddr sdn_add.a_addr /* Node address */
-
-
-
-/*
- * DECnet set/get DSO_CONDATA, DSO_DISDATA (optional data) structure
- */
-struct optdata_dn {
- __le16 opt_status; /* Extended status return */
-#define opt_sts opt_status
- __le16 opt_optl; /* Length of user data */
- __u8 opt_data[16]; /* User data */
-};
-
-struct accessdata_dn {
- __u8 acc_accl;
- __u8 acc_acc[DN_MAXACCL];
- __u8 acc_passl;
- __u8 acc_pass[DN_MAXACCL];
- __u8 acc_userl;
- __u8 acc_user[DN_MAXACCL];
-};
-
-/*
- * DECnet logical link information structure
- */
-struct linkinfo_dn {
- __u16 idn_segsize; /* Segment size for link */
- __u8 idn_linkstate; /* Logical link state */
-};
-
-/*
- * Ethernet address format (for DECnet)
- */
-union etheraddress {
- __u8 dne_addr[ETH_ALEN]; /* Full ethernet address */
- struct {
- __u8 dne_hiord[4]; /* DECnet HIORD prefix */
- __u8 dne_nodeaddr[2]; /* DECnet node address */
- } dne_remote;
-};
-
-
-/*
- * DECnet physical socket address format
- */
-struct dn_addr {
- __le16 dna_family; /* AF_DECnet */
- union etheraddress dna_netaddr; /* DECnet ethernet address */
-};
-
-#define DECNET_IOCTL_BASE 0x89 /* PROTOPRIVATE range */
-
-#define SIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, struct dn_naddr)
-#define SIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, struct dn_naddr)
-#define OSIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, int)
-#define OSIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, int)
-
-#endif /* _LINUX_DN_H */
diff --git a/include/uapi/linux/dqblk_xfs.h b/include/uapi/linux/dqblk_xfs.h
index 03d890b80ebc..8cda3e62e0e7 100644
--- a/include/uapi/linux/dqblk_xfs.h
+++ b/include/uapi/linux/dqblk_xfs.h
@@ -61,12 +61,16 @@ typedef struct fs_disk_quota {
__u64 d_ino_softlimit;/* preferred inode limit */
__u64 d_bcount; /* # disk blocks owned by the user */
__u64 d_icount; /* # inodes owned by the user */
- __s32 d_itimer; /* zero if within inode limits */
- /* if not, we refuse service */
+ __s32 d_itimer; /* Zero if within inode limits. If
+ * not, we refuse service at this time
+ * (in seconds since Unix epoch) */
__s32 d_btimer; /* similar to above; for disk blocks */
__u16 d_iwarns; /* # warnings issued wrt num inodes */
__u16 d_bwarns; /* # warnings issued wrt disk blocks */
- __s32 d_padding2; /* padding2 - for future use */
+ __s8 d_itimer_hi; /* upper 8 bits of timer values */
+ __s8 d_btimer_hi;
+ __s8 d_rtbtimer_hi;
+ __s8 d_padding2; /* padding2 - for future use */
__u64 d_rtb_hardlimit;/* absolute limit on realtime blks */
__u64 d_rtb_softlimit;/* preferred limit on RT disk blks */
__u64 d_rtbcount; /* # realtime blocks owned */
@@ -122,6 +126,12 @@ typedef struct fs_disk_quota {
#define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT)
/*
+ * Quota expiration timestamps are 40-bit signed integers, with the upper 8
+ * bits encoded in the _hi fields.
+ */
+#define FS_DQ_BIGTIME (1<<15)
+
+/*
* Various flags related to quotactl(2).
*/
#define FS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */
@@ -209,7 +219,10 @@ struct fs_quota_statv {
__s32 qs_rtbtimelimit;/* limit for rt blks timer */
__u16 qs_bwarnlimit; /* limit for num warnings */
__u16 qs_iwarnlimit; /* limit for num warnings */
- __u64 qs_pad2[8]; /* for future proofing */
+ __u16 qs_rtbwarnlimit;/* limit for rt blks warnings */
+ __u16 qs_pad3;
+ __u32 qs_pad4;
+ __u64 qs_pad2[7]; /* for future proofing */
};
#endif /* _LINUX_DQBLK_XFS_H */
diff --git a/include/uapi/linux/dw100.h b/include/uapi/linux/dw100.h
new file mode 100644
index 000000000000..3356496edd6b
--- /dev/null
+++ b/include/uapi/linux/dw100.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/* Copyright 2022 NXP */
+
+#ifndef __UAPI_DW100_H__
+#define __UAPI_DW100_H__
+
+#include <linux/v4l2-controls.h>
+
+/*
+ * Check Documentation/userspace-api/media/drivers/dw100.rst for control details.
+ */
+#define V4L2_CID_DW100_DEWARPING_16x16_VERTEX_MAP (V4L2_CID_USER_DW100_BASE + 1)
+
+#endif
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index f47e853546fa..ef38c2bc5ab7 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -51,6 +51,7 @@
#define EM_RISCV 243 /* RISC-V */
#define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */
#define EM_CSKY 252 /* C-SKY */
+#define EM_LOONGARCH 258 /* LoongArch */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
/*
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 34c02e4290fe..c7b056af9ef0 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -35,9 +35,14 @@ typedef __s64 Elf64_Sxword;
#define PT_HIOS 0x6fffffff /* OS-specific */
#define PT_LOPROC 0x70000000
#define PT_HIPROC 0x7fffffff
-#define PT_GNU_EH_FRAME 0x6474e550
-
+#define PT_GNU_EH_FRAME (PT_LOOS + 0x474e550)
#define PT_GNU_STACK (PT_LOOS + 0x474e551)
+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
+#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
+
+
+/* ARM MTE memory tag segment type */
+#define PT_AARCH64_MEMTAG_MTE (PT_LOPROC + 0x2)
/*
* Extended Numbering
@@ -52,7 +57,7 @@ typedef __s64 Elf64_Sxword;
*
* - Oracle: Linker and Libraries.
* Part No: 817–1984–19, August 2011.
- * http://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf
+ * https://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf
*
* - System V ABI AMD64 Architecture Processor Supplement
* Draft Version 0.99.4,
@@ -129,7 +134,7 @@ typedef __s64 Elf64_Sxword;
#define STT_TLS 6
#define ELF_ST_BIND(x) ((x) >> 4)
-#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF_ST_TYPE(x) ((x) & 0xf)
#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
@@ -367,6 +372,7 @@ typedef struct elf64_shdr {
* Notes used in ET_CORE. Architectures export some of the arch register sets
* using the corresponding note types via the PTRACE_GETREGSET and
* PTRACE_SETREGSET requests.
+ * The note name for all these is "LINUX".
*/
#define NT_PRSTATUS 1
#define NT_PRFPREG 2
@@ -414,6 +420,7 @@ typedef struct elf64_shdr {
#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
#define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */
#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
+#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */
#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
#define NT_ARM_TLS 0x401 /* ARM TLS register */
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
@@ -423,11 +430,23 @@ typedef struct elf64_shdr {
#define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */
#define NT_ARM_PACA_KEYS 0x407 /* ARM pointer authentication address keys */
#define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */
+#define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */
+#define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */
+#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */
+#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
+#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
+#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
+#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
+#define NT_LOONGARCH_LASX 0xa03 /* LoongArch Loongson Advanced SIMD Extension registers */
+#define NT_LOONGARCH_LBT 0xa04 /* LoongArch Loongson Binary Translation registers */
+
+/* Note types with note name "GNU" */
+#define NT_GNU_PROPERTY_TYPE_0 5
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
@@ -443,4 +462,10 @@ typedef struct elf64_note {
Elf64_Word n_type; /* Content type */
} Elf64_Nhdr;
+/* .note.gnu.property types for EM_AARCH64: */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
+
+/* Bits for GNU_PROPERTY_AARCH64_FEATURE_1_BTI */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
+
#endif /* _UAPI_LINUX_ELF_H */
diff --git a/include/uapi/linux/elfcore.h b/include/uapi/linux/elfcore.h
deleted file mode 100644
index baf03562306d..000000000000
--- a/include/uapi/linux/elfcore.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_ELFCORE_H
-#define _UAPI_LINUX_ELFCORE_H
-
-#include <linux/types.h>
-#include <linux/signal.h>
-#include <linux/time.h>
-#include <linux/ptrace.h>
-#include <linux/elf.h>
-#include <linux/fs.h>
-
-struct elf_siginfo
-{
- int si_signo; /* signal number */
- int si_code; /* extra code */
- int si_errno; /* errno */
-};
-
-
-#ifndef __KERNEL__
-typedef elf_greg_t greg_t;
-typedef elf_gregset_t gregset_t;
-typedef elf_fpregset_t fpregset_t;
-typedef elf_fpxregset_t fpxregset_t;
-#define NGREG ELF_NGREG
-#endif
-
-/*
- * Definitions to generate Intel SVR4-like core files.
- * These mostly have the same names as the SVR4 types with "elf_"
- * tacked on the front to prevent clashes with linux definitions,
- * and the typedef forms have been avoided. This is mostly like
- * the SVR4 structure, but more Linuxy, with things that Linux does
- * not support and which gdb doesn't really use excluded.
- * Fields present but not used are marked with "XXX".
- */
-struct elf_prstatus
-{
-#if 0
- long pr_flags; /* XXX Process flags */
- short pr_why; /* XXX Reason for process halt */
- short pr_what; /* XXX More detailed reason */
-#endif
- struct elf_siginfo pr_info; /* Info associated with signal */
- short pr_cursig; /* Current signal */
- unsigned long pr_sigpend; /* Set of pending signals */
- unsigned long pr_sighold; /* Set of held signals */
-#if 0
- struct sigaltstack pr_altstack; /* Alternate stack info */
- struct sigaction pr_action; /* Signal action for current sig */
-#endif
- pid_t pr_pid;
- pid_t pr_ppid;
- pid_t pr_pgrp;
- pid_t pr_sid;
- struct __kernel_old_timeval pr_utime; /* User time */
- struct __kernel_old_timeval pr_stime; /* System time */
- struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
- struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
-#if 0
- long pr_instr; /* Current instruction */
-#endif
- elf_gregset_t pr_reg; /* GP registers */
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- /* When using FDPIC, the loadmap addresses need to be communicated
- * to GDB in order for GDB to do the necessary relocations. The
- * fields (below) used to communicate this information are placed
- * immediately after ``pr_reg'', so that the loadmap addresses may
- * be viewed as part of the register set if so desired.
- */
- unsigned long pr_exec_fdpic_loadmap;
- unsigned long pr_interp_fdpic_loadmap;
-#endif
- int pr_fpvalid; /* True if math co-processor being used. */
-};
-
-#define ELF_PRARGSZ (80) /* Number of chars for args */
-
-struct elf_prpsinfo
-{
- char pr_state; /* numeric process state */
- char pr_sname; /* char for pr_state */
- char pr_zomb; /* zombie */
- char pr_nice; /* nice val */
- unsigned long pr_flag; /* flags */
- __kernel_uid_t pr_uid;
- __kernel_gid_t pr_gid;
- pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
- /* Lots missing */
- char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
-};
-
-#ifndef __KERNEL__
-typedef struct elf_prstatus prstatus_t;
-typedef struct elf_prpsinfo prpsinfo_t;
-#define PRARGSZ ELF_PRARGSZ
-#endif
-
-
-#endif /* _UAPI_LINUX_ELFCORE_H */
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index 0cca19670fd2..3c70e8ac14b8 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -5,6 +5,13 @@
#include <linux/types.h>
#include <linux/time_types.h>
+/* RFC 4884: return offset to extension struct + validation */
+struct sock_ee_data_rfc4884 {
+ __u16 len;
+ __u8 flags;
+ __u8 reserved;
+};
+
struct sock_extended_err {
__u32 ee_errno;
__u8 ee_origin;
@@ -12,7 +19,10 @@ struct sock_extended_err {
__u8 ee_code;
__u8 ee_pad;
__u32 ee_info;
- __u32 ee_data;
+ union {
+ __u32 ee_data;
+ struct sock_ee_data_rfc4884 ee_rfc4884;
+ };
};
#define SO_EE_ORIGIN_NONE 0
@@ -31,12 +41,14 @@ struct sock_extended_err {
#define SO_EE_CODE_TXTIME_INVALID_PARAM 1
#define SO_EE_CODE_TXTIME_MISSED 2
+#define SO_EE_RFC4884_FLAG_INVALID 1
+
/**
* struct scm_timestamping - timestamps exposed through cmsg
*
* The timestamping interfaces SO_TIMESTAMPING, MSG_TSTAMP_*
* communicate network timestamps by passing this struct in a cmsg with
- * recvmsg(). See Documentation/networking/timestamping.txt for details.
+ * recvmsg(). See Documentation/networking/timestamping.rst for details.
* User space sees a timespec definition that matches either
* __kernel_timespec or __kernel_old_timespec, in the kernel we
* require two structure definitions to provide both.
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 4295ebfa2f91..dc2aa3d75b39 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -14,7 +14,7 @@
#ifndef _UAPI_LINUX_ETHTOOL_H
#define _UAPI_LINUX_ETHTOOL_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/if_ether.h>
@@ -26,6 +26,14 @@
* have the same layout for 32-bit and 64-bit userland.
*/
+/* Note on reserved space.
+ * Reserved fields must not be accessed directly by user space because
+ * they may be replaced by a different field in the future. They must
+ * be initialized to zero before making the request, e.g. via memset
+ * of the entire structure or implicitly by not being set in a structure
+ * initializer.
+ */
+
/**
* struct ethtool_cmd - DEPRECATED, link control and status
* This structure is DEPRECATED, please use struct ethtool_link_settings.
@@ -67,6 +75,7 @@
* and other link features that the link partner advertised
* through autonegotiation; 0 if unknown or not applicable.
* Read-only.
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* The link speed in Mbps is split between @speed and @speed_hi. Use
* the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
@@ -155,6 +164,7 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
* @bus_info: Device bus address. This should match the dev_name()
* string for the underlying bus device, if there is one. May be
* an empty string.
+ * @reserved2: Reserved for future use; see the note on reserved space.
* @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
* %ETHTOOL_SPFLAGS commands; also the number of strings in the
* %ETH_SS_PRIV_FLAGS set
@@ -221,9 +231,10 @@ enum tunable_id {
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
+ ETHTOOL_TX_COPYBREAK_BUF_SIZE,
/*
* Add your fresh new tunable attribute above and remember to update
- * tunable_strings[] in net/core/ethtool.c
+ * tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_TUNABLE_COUNT,
};
@@ -246,7 +257,7 @@ struct ethtool_tunable {
__u32 id;
__u32 type_id;
__u32 len;
- void *data[0];
+ void *data[];
};
#define DOWNSHIFT_DEV_DEFAULT_COUNT 0xff
@@ -287,7 +298,7 @@ enum phy_tunable_id {
ETHTOOL_PHY_EDPD,
/*
* Add your fresh new phy tunable attribute above and remember to update
- * phy_tunable_strings[] in net/core/ethtool.c
+ * phy_tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_PHY_TUNABLE_COUNT,
};
@@ -311,7 +322,7 @@ struct ethtool_regs {
__u32 cmd;
__u32 version;
__u32 len;
- __u8 data[0];
+ __u8 data[];
};
/**
@@ -337,7 +348,7 @@ struct ethtool_eeprom {
__u32 magic;
__u32 offset;
__u32 len;
- __u8 data[0];
+ __u8 data[];
};
/**
@@ -356,6 +367,7 @@ struct ethtool_eeprom {
* @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
* its tx lpi (after reaching 'idle' state). Effective only when eee
* was negotiated and tx_lpi_enabled was set.
+ * @reserved: Reserved for future use; see the note on reserved space.
*/
struct ethtool_eee {
__u32 cmd;
@@ -374,6 +386,7 @@ struct ethtool_eee {
* @cmd: %ETHTOOL_GMODULEINFO
* @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
* @eeprom_len: Length of the eeprom
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* This structure is used to return the information to
* properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
@@ -579,6 +592,70 @@ struct ethtool_pauseparam {
__u32 tx_pause;
};
+/* Link extended state */
+enum ethtool_link_ext_state {
+ ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_STATE_NO_CABLE,
+ ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE,
+ ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE,
+ ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
+ ETHTOOL_LINK_EXT_STATE_OVERHEAT,
+ ETHTOOL_LINK_EXT_STATE_MODULE,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
+enum ethtool_link_ext_substate_autoneg {
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+ */
+enum ethtool_link_ext_substate_link_training {
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+ */
+enum ethtool_link_ext_substate_link_logical_mismatch {
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+ */
+enum ethtool_link_ext_substate_bad_signal_integrity {
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */
+enum ethtool_link_ext_substate_cable_issue {
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_MODULE. */
+enum ethtool_link_ext_substate_module {
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1,
+};
+
#define ETH_GSTRING_LEN 32
/**
@@ -591,11 +668,23 @@ struct ethtool_pauseparam {
* now deprecated
* @ETH_SS_FEATURES: Device feature names
* @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
+ * @ETH_SS_TUNABLES: tunable names
* @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
* @ETH_SS_PHY_TUNABLES: PHY tunable names
* @ETH_SS_LINK_MODES: link mode names
* @ETH_SS_MSG_CLASSES: debug message class names
* @ETH_SS_WOL_MODES: wake-on-lan modes
+ * @ETH_SS_SOF_TIMESTAMPING: SOF_TIMESTAMPING_* flags
+ * @ETH_SS_TS_TX_TYPES: timestamping Tx types
+ * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
+ * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
+ * @ETH_SS_STATS_STD: standardized stats
+ * @ETH_SS_STATS_ETH_PHY: names of IEEE 802.3 PHY statistics
+ * @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
+ * @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
+ * @ETH_SS_STATS_RMON: names of RMON statistics
+ *
+ * @ETH_SS_COUNT: number of defined string sets
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -610,12 +699,89 @@ enum ethtool_stringset {
ETH_SS_LINK_MODES,
ETH_SS_MSG_CLASSES,
ETH_SS_WOL_MODES,
+ ETH_SS_SOF_TIMESTAMPING,
+ ETH_SS_TS_TX_TYPES,
+ ETH_SS_TS_RX_FILTERS,
+ ETH_SS_UDP_TUNNEL_TYPES,
+ ETH_SS_STATS_STD,
+ ETH_SS_STATS_ETH_PHY,
+ ETH_SS_STATS_ETH_MAC,
+ ETH_SS_STATS_ETH_CTRL,
+ ETH_SS_STATS_RMON,
/* add new constants above here */
ETH_SS_COUNT
};
/**
+ * enum ethtool_module_power_mode_policy - plug-in module power mode policy
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host
+ * to high power mode when the first port using it is put administratively
+ * up and to low power mode when the last port using it is put
+ * administratively down.
+ */
+enum ethtool_module_power_mode_policy {
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1,
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO,
+};
+
+/**
+ * enum ethtool_module_power_mode - plug-in module power mode
+ * @ETHTOOL_MODULE_POWER_MODE_LOW: Module is in low power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_HIGH: Module is in high power mode.
+ */
+enum ethtool_module_power_mode {
+ ETHTOOL_MODULE_POWER_MODE_LOW = 1,
+ ETHTOOL_MODULE_POWER_MODE_HIGH,
+};
+
+/**
+ * enum ethtool_podl_pse_admin_state - operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN: state of PoDL PSE functions are
+ * unknown
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: PoDL PSE functions are disabled
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: PoDL PSE functions are enabled
+ */
+enum ethtool_podl_pse_admin_state {
+ ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED,
+};
+
+/**
+ * enum ethtool_podl_pse_pw_d_status - power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN: PoDL PSE
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED: "The enumeration “disabled” is
+ * asserted true when the PoDL PSE state diagram variable mr_pse_enable is
+ * false"
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING: "The enumeration “searching” is
+ * asserted true when either of the PSE state diagram variables
+ * pi_detecting or pi_classifying is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING: "The enumeration “deliveringPower”
+ * is asserted true when the PoDL PSE state diagram variable pi_powered is
+ * true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP: "The enumeration “sleep” is asserted
+ * true when the PoDL PSE state diagram variable pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE: "The enumeration “idle” is asserted true
+ * when the logical combination of the PoDL PSE state diagram variables
+ * pi_prebiased*!pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR: "The enumeration “error” is asserted
+ * true when the PoDL PSE state diagram variable overload_held is true."
+ */
+enum ethtool_podl_pse_pw_d_status {
+ ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
@@ -631,12 +797,13 @@ struct ethtool_gstrings {
__u32 cmd;
__u32 string_set;
__u32 len;
- __u8 data[0];
+ __u8 data[];
};
/**
* struct ethtool_sset_info - string set information
* @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @reserved: Reserved for future use; see the note on reserved space.
* @sset_mask: On entry, a bitmask of string sets to query, with bits
* numbered according to &enum ethtool_stringset. On return, a
* bitmask of those string sets queried that are supported.
@@ -655,7 +822,7 @@ struct ethtool_sset_info {
__u32 cmd;
__u32 reserved;
__u64 sset_mask;
- __u32 data[0];
+ __u32 data[];
};
/**
@@ -681,6 +848,7 @@ enum ethtool_test_flags {
* @flags: A bitmask of flags from &enum ethtool_test_flags. Some
* flags may be set by the user on entry; others may be set by
* the driver on return.
+ * @reserved: Reserved for future use; see the note on reserved space.
* @len: On return, the number of test results
* @data: Array of test results
*
@@ -694,7 +862,7 @@ struct ethtool_test {
__u32 flags;
__u32 reserved;
__u32 len;
- __u64 data[0];
+ __u64 data[];
};
/**
@@ -711,7 +879,7 @@ struct ethtool_test {
struct ethtool_stats {
__u32 cmd;
__u32 n_stats;
- __u64 data[0];
+ __u64 data[];
};
/**
@@ -728,7 +896,7 @@ struct ethtool_stats {
struct ethtool_perm_addr {
__u32 cmd;
__u32 size;
- __u8 data[0];
+ __u8 data[];
};
/* boolean flags controlling per-interface behavior characteristics.
@@ -881,6 +1049,7 @@ union ethtool_flow_union {
* @vlan_etype: VLAN EtherType
* @vlan_tci: VLAN tag control information
* @data: user defined data
+ * @padding: Reserved for future use; see the note on reserved space.
*
* Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT
* is set in &struct ethtool_rx_flow_spec @flow_type.
@@ -1036,7 +1205,7 @@ struct ethtool_rxnfc {
struct ethtool_rxfh_indir {
__u32 cmd;
__u32 size;
- __u32 ring_index[0];
+ __u32 ring_index[];
};
/**
@@ -1056,7 +1225,8 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
- * @rsvd: Reserved for future extensions.
+ * @rsvd8: Reserved for future use; see the note on reserved space.
+ * @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
* of @indir_size __u32 elements, followed by hash key of @key_size
* bytes.
@@ -1076,7 +1246,7 @@ struct ethtool_rxfh {
__u8 hfunc;
__u8 rsvd8[3];
__u32 rsvd32;
- __u32 rss_config[0];
+ __u32 rss_config[];
};
#define ETH_RXFH_CONTEXT_ALLOC 0xffffffff
#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff
@@ -1161,7 +1331,7 @@ struct ethtool_dump {
__u32 version;
__u32 flag;
__u32 len;
- __u8 data[0];
+ __u8 data[];
};
#define ETH_FW_DUMP_DISABLE 0
@@ -1193,7 +1363,7 @@ struct ethtool_get_features_block {
struct ethtool_gfeatures {
__u32 cmd;
__u32 size;
- struct ethtool_get_features_block features[0];
+ struct ethtool_get_features_block features[];
};
/**
@@ -1215,7 +1385,7 @@ struct ethtool_set_features_block {
struct ethtool_sfeatures {
__u32 cmd;
__u32 size;
- struct ethtool_set_features_block features[0];
+ struct ethtool_set_features_block features[];
};
/**
@@ -1224,7 +1394,9 @@ struct ethtool_sfeatures {
* @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
* @phc_index: device index of the associated PHC, or -1 if there is none
* @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @tx_reserved: Reserved for future use; see the note on reserved space.
* @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ * @rx_reserved: Reserved for future use; see the note on reserved space.
*
* The bits in the 'tx_types' and 'rx_filters' fields correspond to
* the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
@@ -1298,15 +1470,33 @@ struct ethtool_per_queue_op {
};
/**
- * struct ethtool_fecparam - Ethernet forward error correction(fec) parameters
+ * struct ethtool_fecparam - Ethernet Forward Error Correction parameters
* @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM
- * @active_fec: FEC mode which is active on porte
- * @fec: Bitmask of supported/configured FEC modes
- * @rsvd: Reserved for future extensions. i.e FEC bypass feature.
+ * @active_fec: FEC mode which is active on the port, single bit set, GET only.
+ * @fec: Bitmask of configured FEC modes.
+ * @reserved: Reserved for future extensions, ignore on GET, write 0 for SET.
*
- * Drivers should reject a non-zero setting of @autoneg when
- * autoneogotiation is disabled (or not supported) for the link.
+ * Note that @reserved was never validated on input and ethtool user space
+ * left it uninitialized when calling SET. Hence going forward it can only be
+ * used to return a value to userspace with GET.
+ *
+ * FEC modes supported by the device can be read via %ETHTOOL_GLINKSETTINGS.
+ * FEC settings are configured by link autonegotiation whenever it's enabled.
+ * With autoneg on %ETHTOOL_GFECPARAM can be used to read the current mode.
*
+ * When autoneg is disabled %ETHTOOL_SFECPARAM controls the FEC settings.
+ * It is recommended that drivers only accept a single bit set in @fec.
+ * When multiple bits are set in @fec drivers may pick mode in an implementation
+ * dependent way. Drivers should reject mixing %ETHTOOL_FEC_AUTO_BIT with other
+ * FEC modes, because it's unclear whether in this case other modes constrain
+ * AUTO or are independent choices.
+ * Drivers must reject SET requests if they support none of the requested modes.
+ *
+ * If device does not support FEC drivers may use %ETHTOOL_FEC_NONE instead
+ * of returning %EOPNOTSUPP from %ETHTOOL_GFECPARAM.
+ *
+ * See enum ethtool_fec_config_bits for definition of valid bits for both
+ * @fec and @active_fec.
*/
struct ethtool_fecparam {
__u32 cmd;
@@ -1318,11 +1508,16 @@ struct ethtool_fecparam {
/**
* enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration
- * @ETHTOOL_FEC_NONE: FEC mode configuration is not supported
- * @ETHTOOL_FEC_AUTO: Default/Best FEC mode provided by driver
- * @ETHTOOL_FEC_OFF: No FEC Mode
- * @ETHTOOL_FEC_RS: Reed-Solomon Forward Error Detection mode
- * @ETHTOOL_FEC_BASER: Base-R/Reed-Solomon Forward Error Detection mode
+ * @ETHTOOL_FEC_NONE_BIT: FEC mode configuration is not supported. Should not
+ * be used together with other bits. GET only.
+ * @ETHTOOL_FEC_AUTO_BIT: Select default/best FEC mode automatically, usually
+ * based link mode and SFP parameters read from module's
+ * EEPROM. This bit does _not_ mean autonegotiation.
+ * @ETHTOOL_FEC_OFF_BIT: No FEC Mode
+ * @ETHTOOL_FEC_RS_BIT: Reed-Solomon FEC Mode
+ * @ETHTOOL_FEC_BASER_BIT: Base-R/Reed-Solomon FEC Mode
+ * @ETHTOOL_FEC_LLRS_BIT: Low Latency Reed Solomon FEC Mode (25G/50G Ethernet
+ * Consortium)
*/
enum ethtool_fec_config_bits {
ETHTOOL_FEC_NONE_BIT,
@@ -1330,6 +1525,7 @@ enum ethtool_fec_config_bits {
ETHTOOL_FEC_OFF_BIT,
ETHTOOL_FEC_RS_BIT,
ETHTOOL_FEC_BASER_BIT,
+ ETHTOOL_FEC_LLRS_BIT,
};
#define ETHTOOL_FEC_NONE (1 << ETHTOOL_FEC_NONE_BIT)
@@ -1337,6 +1533,7 @@ enum ethtool_fec_config_bits {
#define ETHTOOL_FEC_OFF (1 << ETHTOOL_FEC_OFF_BIT)
#define ETHTOOL_FEC_RS (1 << ETHTOOL_FEC_RS_BIT)
#define ETHTOOL_FEC_BASER (1 << ETHTOOL_FEC_BASER_BIT)
+#define ETHTOOL_FEC_LLRS (1 << ETHTOOL_FEC_LLRS_BIT)
/* CMDs currently supported */
#define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings.
@@ -1521,7 +1718,25 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71,
ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72,
ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73,
-
+ ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74,
+ ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75,
+ ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76,
+ ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77,
+ ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78,
+ ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79,
+ ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80,
+ ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81,
+ ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82,
+ ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83,
+ ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84,
+ ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85,
+ ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86,
+ ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87,
+ ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88,
+ ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89,
+ ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90,
+ ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91,
+ ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
};
@@ -1658,6 +1873,32 @@ static inline int ethtool_validate_duplex(__u8 duplex)
return 0;
}
+#define MASTER_SLAVE_CFG_UNSUPPORTED 0
+#define MASTER_SLAVE_CFG_UNKNOWN 1
+#define MASTER_SLAVE_CFG_MASTER_PREFERRED 2
+#define MASTER_SLAVE_CFG_SLAVE_PREFERRED 3
+#define MASTER_SLAVE_CFG_MASTER_FORCE 4
+#define MASTER_SLAVE_CFG_SLAVE_FORCE 5
+#define MASTER_SLAVE_STATE_UNSUPPORTED 0
+#define MASTER_SLAVE_STATE_UNKNOWN 1
+#define MASTER_SLAVE_STATE_MASTER 2
+#define MASTER_SLAVE_STATE_SLAVE 3
+#define MASTER_SLAVE_STATE_ERR 4
+
+/* These are used to throttle the rate of data on the phy interface when the
+ * native speed of the interface is higher than the link speed. These should
+ * not be used for phy interfaces which natively support multiple speeds (e.g.
+ * MII or SGMII).
+ */
+/* No rate matching performed. */
+#define RATE_MATCH_NONE 0
+/* The phy sends pause frames to throttle the MAC. */
+#define RATE_MATCH_PAUSE 1
+/* The phy asserts CRS to prevent the MAC from transmitting. */
+#define RATE_MATCH_CRS 2
+/* The MAC is programmed with a sufficiently-large IPG. */
+#define RATE_MATCH_OPEN_LOOP 3
+
/* Which connector port. */
#define PORT_TP 0x00
#define PORT_AUI 0x01
@@ -1849,6 +2090,11 @@ enum ethtool_reset_flags {
* autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
+ * @master_slave_cfg: Master/slave port mode.
+ * @master_slave_state: Master/slave port state.
+ * @rate_matching: Rate adaptation performed by the PHY
+ * @reserved: Reserved for future use; see the note on reserved space.
+ * @link_mode_masks: Variable length bitmaps.
*
* If autonegotiation is disabled, the speed and @duplex represent the
* fixed link mode and are writable if the driver supports multiple
@@ -1896,9 +2142,11 @@ struct ethtool_link_settings {
__u8 eth_tp_mdix_ctrl;
__s8 link_mode_masks_nwords;
__u8 transceiver;
- __u8 reserved1[3];
+ __u8 master_slave_cfg;
+ __u8 master_slave_state;
+ __u8 rate_matching;
__u32 reserved[7];
- __u32 link_mode_masks[0];
+ __u32 link_mode_masks[];
/* layout of link_mode_masks fields:
* __u32 map_supported[link_mode_masks_nwords];
* __u32 map_advertising[link_mode_masks_nwords];
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 7e0b460f872c..bb57084ac524 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -2,7 +2,7 @@
/*
* include/uapi/linux/ethtool_netlink.h - netlink interface for ethtool
*
- * See Documentation/networking/ethtool-netlink.txt in kernel source tree for
+ * See Documentation/networking/ethtool-netlink.rst in kernel source tree for
* doucumentation of the interface.
*/
@@ -24,6 +24,33 @@ enum {
ETHTOOL_MSG_DEBUG_SET,
ETHTOOL_MSG_WOL_GET,
ETHTOOL_MSG_WOL_SET,
+ ETHTOOL_MSG_FEATURES_GET,
+ ETHTOOL_MSG_FEATURES_SET,
+ ETHTOOL_MSG_PRIVFLAGS_GET,
+ ETHTOOL_MSG_PRIVFLAGS_SET,
+ ETHTOOL_MSG_RINGS_GET,
+ ETHTOOL_MSG_RINGS_SET,
+ ETHTOOL_MSG_CHANNELS_GET,
+ ETHTOOL_MSG_CHANNELS_SET,
+ ETHTOOL_MSG_COALESCE_GET,
+ ETHTOOL_MSG_COALESCE_SET,
+ ETHTOOL_MSG_PAUSE_GET,
+ ETHTOOL_MSG_PAUSE_SET,
+ ETHTOOL_MSG_EEE_GET,
+ ETHTOOL_MSG_EEE_SET,
+ ETHTOOL_MSG_TSINFO_GET,
+ ETHTOOL_MSG_CABLE_TEST_ACT,
+ ETHTOOL_MSG_CABLE_TEST_TDR_ACT,
+ ETHTOOL_MSG_TUNNEL_INFO_GET,
+ ETHTOOL_MSG_FEC_GET,
+ ETHTOOL_MSG_FEC_SET,
+ ETHTOOL_MSG_MODULE_EEPROM_GET,
+ ETHTOOL_MSG_STATS_GET,
+ ETHTOOL_MSG_PHC_VCLOCKS_GET,
+ ETHTOOL_MSG_MODULE_GET,
+ ETHTOOL_MSG_MODULE_SET,
+ ETHTOOL_MSG_PSE_GET,
+ ETHTOOL_MSG_PSE_SET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -43,6 +70,33 @@ enum {
ETHTOOL_MSG_DEBUG_NTF,
ETHTOOL_MSG_WOL_GET_REPLY,
ETHTOOL_MSG_WOL_NTF,
+ ETHTOOL_MSG_FEATURES_GET_REPLY,
+ ETHTOOL_MSG_FEATURES_SET_REPLY,
+ ETHTOOL_MSG_FEATURES_NTF,
+ ETHTOOL_MSG_PRIVFLAGS_GET_REPLY,
+ ETHTOOL_MSG_PRIVFLAGS_NTF,
+ ETHTOOL_MSG_RINGS_GET_REPLY,
+ ETHTOOL_MSG_RINGS_NTF,
+ ETHTOOL_MSG_CHANNELS_GET_REPLY,
+ ETHTOOL_MSG_CHANNELS_NTF,
+ ETHTOOL_MSG_COALESCE_GET_REPLY,
+ ETHTOOL_MSG_COALESCE_NTF,
+ ETHTOOL_MSG_PAUSE_GET_REPLY,
+ ETHTOOL_MSG_PAUSE_NTF,
+ ETHTOOL_MSG_EEE_GET_REPLY,
+ ETHTOOL_MSG_EEE_NTF,
+ ETHTOOL_MSG_TSINFO_GET_REPLY,
+ ETHTOOL_MSG_CABLE_TEST_NTF,
+ ETHTOOL_MSG_CABLE_TEST_TDR_NTF,
+ ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
+ ETHTOOL_MSG_FEC_GET_REPLY,
+ ETHTOOL_MSG_FEC_NTF,
+ ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY,
+ ETHTOOL_MSG_STATS_GET_REPLY,
+ ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
+ ETHTOOL_MSG_MODULE_GET_REPLY,
+ ETHTOOL_MSG_MODULE_NTF,
+ ETHTOOL_MSG_PSE_GET_REPLY,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -55,9 +109,12 @@ enum {
#define ETHTOOL_FLAG_COMPACT_BITSETS (1 << 0)
/* provide optional reply for SET or ACT requests */
#define ETHTOOL_FLAG_OMIT_REPLY (1 << 1)
+/* request statistics, if supported by the driver */
+#define ETHTOOL_FLAG_STATS (1 << 2)
#define ETHTOOL_FLAG_ALL (ETHTOOL_FLAG_COMPACT_BITSETS | \
- ETHTOOL_FLAG_OMIT_REPLY)
+ ETHTOOL_FLAG_OMIT_REPLY | \
+ ETHTOOL_FLAG_STATS)
enum {
ETHTOOL_A_HEADER_UNSPEC,
@@ -185,6 +242,10 @@ enum {
ETHTOOL_A_LINKMODES_PEER, /* bitset */
ETHTOOL_A_LINKMODES_SPEED, /* u32 */
ETHTOOL_A_LINKMODES_DUPLEX, /* u8 */
+ ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, /* u8 */
+ ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, /* u8 */
+ ETHTOOL_A_LINKMODES_LANES, /* u32 */
+ ETHTOOL_A_LINKMODES_RATE_MATCHING, /* u8 */
/* add new constants above here */
__ETHTOOL_A_LINKMODES_CNT,
@@ -197,6 +258,10 @@ enum {
ETHTOOL_A_LINKSTATE_UNSPEC,
ETHTOOL_A_LINKSTATE_HEADER, /* nest - _A_HEADER_* */
ETHTOOL_A_LINKSTATE_LINK, /* u8 */
+ ETHTOOL_A_LINKSTATE_SQI, /* u32 */
+ ETHTOOL_A_LINKSTATE_SQI_MAX, /* u32 */
+ ETHTOOL_A_LINKSTATE_EXT_STATE, /* u8 */
+ ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, /* u8 */
/* add new constants above here */
__ETHTOOL_A_LINKSTATE_CNT,
@@ -228,6 +293,592 @@ enum {
ETHTOOL_A_WOL_MAX = __ETHTOOL_A_WOL_CNT - 1
};
+/* FEATURES */
+
+enum {
+ ETHTOOL_A_FEATURES_UNSPEC,
+ ETHTOOL_A_FEATURES_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_FEATURES_HW, /* bitset */
+ ETHTOOL_A_FEATURES_WANTED, /* bitset */
+ ETHTOOL_A_FEATURES_ACTIVE, /* bitset */
+ ETHTOOL_A_FEATURES_NOCHANGE, /* bitset */
+
+ /* add new constants above here */
+ __ETHTOOL_A_FEATURES_CNT,
+ ETHTOOL_A_FEATURES_MAX = __ETHTOOL_A_FEATURES_CNT - 1
+};
+
+/* PRIVFLAGS */
+
+enum {
+ ETHTOOL_A_PRIVFLAGS_UNSPEC,
+ ETHTOOL_A_PRIVFLAGS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PRIVFLAGS_FLAGS, /* bitset */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PRIVFLAGS_CNT,
+ ETHTOOL_A_PRIVFLAGS_MAX = __ETHTOOL_A_PRIVFLAGS_CNT - 1
+};
+
+/* RINGS */
+
+enum {
+ ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0,
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED,
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED,
+};
+
+enum {
+ ETHTOOL_A_RINGS_UNSPEC,
+ ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_RINGS_RX_MAX, /* u32 */
+ ETHTOOL_A_RINGS_RX_MINI_MAX, /* u32 */
+ ETHTOOL_A_RINGS_RX_JUMBO_MAX, /* u32 */
+ ETHTOOL_A_RINGS_TX_MAX, /* u32 */
+ ETHTOOL_A_RINGS_RX, /* u32 */
+ ETHTOOL_A_RINGS_RX_MINI, /* u32 */
+ ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */
+ ETHTOOL_A_RINGS_TX, /* u32 */
+ ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */
+ ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
+ ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
+ ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_RINGS_CNT,
+ ETHTOOL_A_RINGS_MAX = (__ETHTOOL_A_RINGS_CNT - 1)
+};
+
+/* CHANNELS */
+
+enum {
+ ETHTOOL_A_CHANNELS_UNSPEC,
+ ETHTOOL_A_CHANNELS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_CHANNELS_RX_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_TX_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_OTHER_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_COMBINED_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_RX_COUNT, /* u32 */
+ ETHTOOL_A_CHANNELS_TX_COUNT, /* u32 */
+ ETHTOOL_A_CHANNELS_OTHER_COUNT, /* u32 */
+ ETHTOOL_A_CHANNELS_COMBINED_COUNT, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_CHANNELS_CNT,
+ ETHTOOL_A_CHANNELS_MAX = (__ETHTOOL_A_CHANNELS_CNT - 1)
+};
+
+/* COALESCE */
+
+enum {
+ ETHTOOL_A_COALESCE_UNSPEC,
+ ETHTOOL_A_COALESCE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_COALESCE_RX_USECS, /* u32 */
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES, /* u32 */
+ ETHTOOL_A_COALESCE_RX_USECS_IRQ, /* u32 */
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ, /* u32 */
+ ETHTOOL_A_COALESCE_TX_USECS, /* u32 */
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES, /* u32 */
+ ETHTOOL_A_COALESCE_TX_USECS_IRQ, /* u32 */
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ, /* u32 */
+ ETHTOOL_A_COALESCE_STATS_BLOCK_USECS, /* u32 */
+ ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX, /* u8 */
+ ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX, /* u8 */
+ ETHTOOL_A_COALESCE_PKT_RATE_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_RX_USECS_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_TX_USECS_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_PKT_RATE_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_RX_USECS_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_TX_USECS_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, /* u32 */
+ ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, /* u8 */
+ ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_COALESCE_CNT,
+ ETHTOOL_A_COALESCE_MAX = (__ETHTOOL_A_COALESCE_CNT - 1)
+};
+
+/* PAUSE */
+
+enum {
+ ETHTOOL_A_PAUSE_UNSPEC,
+ ETHTOOL_A_PAUSE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PAUSE_AUTONEG, /* u8 */
+ ETHTOOL_A_PAUSE_RX, /* u8 */
+ ETHTOOL_A_PAUSE_TX, /* u8 */
+ ETHTOOL_A_PAUSE_STATS, /* nest - _PAUSE_STAT_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PAUSE_CNT,
+ ETHTOOL_A_PAUSE_MAX = (__ETHTOOL_A_PAUSE_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_PAUSE_STAT_UNSPEC,
+ ETHTOOL_A_PAUSE_STAT_PAD,
+
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES,
+ ETHTOOL_A_PAUSE_STAT_RX_FRAMES,
+
+ /* add new constants above here
+ * adjust ETHTOOL_PAUSE_STAT_CNT if adding non-stats!
+ */
+ __ETHTOOL_A_PAUSE_STAT_CNT,
+ ETHTOOL_A_PAUSE_STAT_MAX = (__ETHTOOL_A_PAUSE_STAT_CNT - 1)
+};
+
+/* EEE */
+
+enum {
+ ETHTOOL_A_EEE_UNSPEC,
+ ETHTOOL_A_EEE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_EEE_MODES_OURS, /* bitset */
+ ETHTOOL_A_EEE_MODES_PEER, /* bitset */
+ ETHTOOL_A_EEE_ACTIVE, /* u8 */
+ ETHTOOL_A_EEE_ENABLED, /* u8 */
+ ETHTOOL_A_EEE_TX_LPI_ENABLED, /* u8 */
+ ETHTOOL_A_EEE_TX_LPI_TIMER, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_EEE_CNT,
+ ETHTOOL_A_EEE_MAX = (__ETHTOOL_A_EEE_CNT - 1)
+};
+
+/* TSINFO */
+
+enum {
+ ETHTOOL_A_TSINFO_UNSPEC,
+ ETHTOOL_A_TSINFO_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_TSINFO_TIMESTAMPING, /* bitset */
+ ETHTOOL_A_TSINFO_TX_TYPES, /* bitset */
+ ETHTOOL_A_TSINFO_RX_FILTERS, /* bitset */
+ ETHTOOL_A_TSINFO_PHC_INDEX, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TSINFO_CNT,
+ ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1)
+};
+
+/* PHC VCLOCKS */
+
+enum {
+ ETHTOOL_A_PHC_VCLOCKS_UNSPEC,
+ ETHTOOL_A_PHC_VCLOCKS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PHC_VCLOCKS_NUM, /* u32 */
+ ETHTOOL_A_PHC_VCLOCKS_INDEX, /* array, s32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PHC_VCLOCKS_CNT,
+ ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1)
+};
+
+/* CABLE TEST */
+
+enum {
+ ETHTOOL_A_CABLE_TEST_UNSPEC,
+ ETHTOOL_A_CABLE_TEST_HEADER, /* nest - _A_HEADER_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_CABLE_TEST_CNT,
+ ETHTOOL_A_CABLE_TEST_MAX = __ETHTOOL_A_CABLE_TEST_CNT - 1
+};
+
+/* CABLE TEST NOTIFY */
+enum {
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC,
+ ETHTOOL_A_CABLE_RESULT_CODE_OK,
+ ETHTOOL_A_CABLE_RESULT_CODE_OPEN,
+ ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT,
+ ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT,
+};
+
+enum {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+};
+
+enum {
+ ETHTOOL_A_CABLE_RESULT_UNSPEC,
+ ETHTOOL_A_CABLE_RESULT_PAIR, /* u8 ETHTOOL_A_CABLE_PAIR_ */
+ ETHTOOL_A_CABLE_RESULT_CODE, /* u8 ETHTOOL_A_CABLE_RESULT_CODE_ */
+
+ __ETHTOOL_A_CABLE_RESULT_CNT,
+ ETHTOOL_A_CABLE_RESULT_MAX = (__ETHTOOL_A_CABLE_RESULT_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC,
+ ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR, /* u8 ETHTOOL_A_CABLE_PAIR_ */
+ ETHTOOL_A_CABLE_FAULT_LENGTH_CM, /* u32 */
+
+ __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT,
+ ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = (__ETHTOOL_A_CABLE_FAULT_LENGTH_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC,
+ ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED,
+ ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED
+};
+
+enum {
+ ETHTOOL_A_CABLE_NEST_UNSPEC,
+ ETHTOOL_A_CABLE_NEST_RESULT, /* nest - ETHTOOL_A_CABLE_RESULT_ */
+ ETHTOOL_A_CABLE_NEST_FAULT_LENGTH, /* nest - ETHTOOL_A_CABLE_FAULT_LENGTH_ */
+ __ETHTOOL_A_CABLE_NEST_CNT,
+ ETHTOOL_A_CABLE_NEST_MAX = (__ETHTOOL_A_CABLE_NEST_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_CABLE_TEST_NTF_UNSPEC,
+ ETHTOOL_A_CABLE_TEST_NTF_HEADER, /* nest - ETHTOOL_A_HEADER_* */
+ ETHTOOL_A_CABLE_TEST_NTF_STATUS, /* u8 - _STARTED/_COMPLETE */
+ ETHTOOL_A_CABLE_TEST_NTF_NEST, /* nest - of results: */
+
+ __ETHTOOL_A_CABLE_TEST_NTF_CNT,
+ ETHTOOL_A_CABLE_TEST_NTF_MAX = (__ETHTOOL_A_CABLE_TEST_NTF_CNT - 1)
+};
+
+/* CABLE TEST TDR */
+
+enum {
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC,
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST, /* u32 */
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST, /* u32 */
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP, /* u32 */
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT,
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_CABLE_TEST_TDR_UNSPEC,
+ ETHTOOL_A_CABLE_TEST_TDR_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_CABLE_TEST_TDR_CFG, /* nest - *_TDR_CFG_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_CABLE_TEST_TDR_CNT,
+ ETHTOOL_A_CABLE_TEST_TDR_MAX = __ETHTOOL_A_CABLE_TEST_TDR_CNT - 1
+};
+
+/* CABLE TEST TDR NOTIFY */
+
+enum {
+ ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC,
+ ETHTOOL_A_CABLE_AMPLITUDE_PAIR, /* u8 */
+ ETHTOOL_A_CABLE_AMPLITUDE_mV, /* s16 */
+
+ __ETHTOOL_A_CABLE_AMPLITUDE_CNT,
+ ETHTOOL_A_CABLE_AMPLITUDE_MAX = (__ETHTOOL_A_CABLE_AMPLITUDE_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_CABLE_PULSE_UNSPEC,
+ ETHTOOL_A_CABLE_PULSE_mV, /* s16 */
+
+ __ETHTOOL_A_CABLE_PULSE_CNT,
+ ETHTOOL_A_CABLE_PULSE_MAX = (__ETHTOOL_A_CABLE_PULSE_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_CABLE_STEP_UNSPEC,
+ ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE, /* u32 */
+ ETHTOOL_A_CABLE_STEP_LAST_DISTANCE, /* u32 */
+ ETHTOOL_A_CABLE_STEP_STEP_DISTANCE, /* u32 */
+
+ __ETHTOOL_A_CABLE_STEP_CNT,
+ ETHTOOL_A_CABLE_STEP_MAX = (__ETHTOOL_A_CABLE_STEP_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_CABLE_TDR_NEST_UNSPEC,
+ ETHTOOL_A_CABLE_TDR_NEST_STEP, /* nest - ETHTTOOL_A_CABLE_STEP */
+ ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE, /* nest - ETHTOOL_A_CABLE_AMPLITUDE */
+ ETHTOOL_A_CABLE_TDR_NEST_PULSE, /* nest - ETHTOOL_A_CABLE_PULSE */
+
+ __ETHTOOL_A_CABLE_TDR_NEST_CNT,
+ ETHTOOL_A_CABLE_TDR_NEST_MAX = (__ETHTOOL_A_CABLE_TDR_NEST_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_CABLE_TEST_TDR_NTF_UNSPEC,
+ ETHTOOL_A_CABLE_TEST_TDR_NTF_HEADER, /* nest - ETHTOOL_A_HEADER_* */
+ ETHTOOL_A_CABLE_TEST_TDR_NTF_STATUS, /* u8 - _STARTED/_COMPLETE */
+ ETHTOOL_A_CABLE_TEST_TDR_NTF_NEST, /* nest - of results: */
+
+ /* add new constants above here */
+ __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT,
+ ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX = __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT - 1
+};
+
+/* TUNNEL INFO */
+
+enum {
+ ETHTOOL_UDP_TUNNEL_TYPE_VXLAN,
+ ETHTOOL_UDP_TUNNEL_TYPE_GENEVE,
+ ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE,
+
+ __ETHTOOL_UDP_TUNNEL_TYPE_CNT
+};
+
+enum {
+ ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC,
+
+ ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, /* be16 */
+ ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT,
+ ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = (__ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC,
+
+ ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, /* u32 */
+ ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, /* bitset */
+ ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY, /* nest - _UDP_ENTRY_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT,
+ ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = (__ETHTOOL_A_TUNNEL_UDP_TABLE_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_TUNNEL_UDP_UNSPEC,
+
+ ETHTOOL_A_TUNNEL_UDP_TABLE, /* nest - _UDP_TABLE_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TUNNEL_UDP_CNT,
+ ETHTOOL_A_TUNNEL_UDP_MAX = (__ETHTOOL_A_TUNNEL_UDP_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_TUNNEL_INFO_UNSPEC,
+ ETHTOOL_A_TUNNEL_INFO_HEADER, /* nest - _A_HEADER_* */
+
+ ETHTOOL_A_TUNNEL_INFO_UDP_PORTS, /* nest - _UDP_TABLE */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TUNNEL_INFO_CNT,
+ ETHTOOL_A_TUNNEL_INFO_MAX = (__ETHTOOL_A_TUNNEL_INFO_CNT - 1)
+};
+
+/* FEC */
+
+enum {
+ ETHTOOL_A_FEC_UNSPEC,
+ ETHTOOL_A_FEC_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_FEC_MODES, /* bitset */
+ ETHTOOL_A_FEC_AUTO, /* u8 */
+ ETHTOOL_A_FEC_ACTIVE, /* u32 */
+ ETHTOOL_A_FEC_STATS, /* nest - _A_FEC_STAT */
+
+ __ETHTOOL_A_FEC_CNT,
+ ETHTOOL_A_FEC_MAX = (__ETHTOOL_A_FEC_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_FEC_STAT_UNSPEC,
+ ETHTOOL_A_FEC_STAT_PAD,
+
+ ETHTOOL_A_FEC_STAT_CORRECTED, /* array, u64 */
+ ETHTOOL_A_FEC_STAT_UNCORR, /* array, u64 */
+ ETHTOOL_A_FEC_STAT_CORR_BITS, /* array, u64 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_FEC_STAT_CNT,
+ ETHTOOL_A_FEC_STAT_MAX = (__ETHTOOL_A_FEC_STAT_CNT - 1)
+};
+
+/* MODULE EEPROM */
+
+enum {
+ ETHTOOL_A_MODULE_EEPROM_UNSPEC,
+ ETHTOOL_A_MODULE_EEPROM_HEADER, /* nest - _A_HEADER_* */
+
+ ETHTOOL_A_MODULE_EEPROM_OFFSET, /* u32 */
+ ETHTOOL_A_MODULE_EEPROM_LENGTH, /* u32 */
+ ETHTOOL_A_MODULE_EEPROM_PAGE, /* u8 */
+ ETHTOOL_A_MODULE_EEPROM_BANK, /* u8 */
+ ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS, /* u8 */
+ ETHTOOL_A_MODULE_EEPROM_DATA, /* binary */
+
+ __ETHTOOL_A_MODULE_EEPROM_CNT,
+ ETHTOOL_A_MODULE_EEPROM_MAX = (__ETHTOOL_A_MODULE_EEPROM_CNT - 1)
+};
+
+/* STATS */
+
+enum {
+ ETHTOOL_A_STATS_UNSPEC,
+ ETHTOOL_A_STATS_PAD,
+ ETHTOOL_A_STATS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_STATS_GROUPS, /* bitset */
+
+ ETHTOOL_A_STATS_GRP, /* nest - _A_STATS_GRP_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_CNT,
+ ETHTOOL_A_STATS_MAX = (__ETHTOOL_A_STATS_CNT - 1)
+};
+
+enum {
+ ETHTOOL_STATS_ETH_PHY,
+ ETHTOOL_STATS_ETH_MAC,
+ ETHTOOL_STATS_ETH_CTRL,
+ ETHTOOL_STATS_RMON,
+
+ /* add new constants above here */
+ __ETHTOOL_STATS_CNT
+};
+
+enum {
+ ETHTOOL_A_STATS_GRP_UNSPEC,
+ ETHTOOL_A_STATS_GRP_PAD,
+
+ ETHTOOL_A_STATS_GRP_ID, /* u32 */
+ ETHTOOL_A_STATS_GRP_SS_ID, /* u32 */
+
+ ETHTOOL_A_STATS_GRP_STAT, /* nest */
+
+ ETHTOOL_A_STATS_GRP_HIST_RX, /* nest */
+ ETHTOOL_A_STATS_GRP_HIST_TX, /* nest */
+
+ ETHTOOL_A_STATS_GRP_HIST_BKT_LOW, /* u32 */
+ ETHTOOL_A_STATS_GRP_HIST_BKT_HI, /* u32 */
+ ETHTOOL_A_STATS_GRP_HIST_VAL, /* u64 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_GRP_CNT,
+ ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_CNT - 1)
+};
+
+enum {
+ /* 30.3.2.1.5 aSymbolErrorDuringCarrier */
+ ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR,
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_ETH_PHY_CNT,
+ ETHTOOL_A_STATS_ETH_PHY_MAX = (__ETHTOOL_A_STATS_ETH_PHY_CNT - 1)
+};
+
+enum {
+ /* 30.3.1.1.2 aFramesTransmittedOK */
+ ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT,
+ /* 30.3.1.1.3 aSingleCollisionFrames */
+ ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL,
+ /* 30.3.1.1.4 aMultipleCollisionFrames */
+ ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL,
+ /* 30.3.1.1.5 aFramesReceivedOK */
+ ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT,
+ /* 30.3.1.1.6 aFrameCheckSequenceErrors */
+ ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR,
+ /* 30.3.1.1.7 aAlignmentErrors */
+ ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR,
+ /* 30.3.1.1.8 aOctetsTransmittedOK */
+ ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES,
+ /* 30.3.1.1.9 aFramesWithDeferredXmissions */
+ ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER,
+ /* 30.3.1.1.10 aLateCollisions */
+ ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL,
+ /* 30.3.1.1.11 aFramesAbortedDueToXSColls */
+ ETHTOOL_A_STATS_ETH_MAC_11_XS_COL,
+ /* 30.3.1.1.12 aFramesLostDueToIntMACXmitError */
+ ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR,
+ /* 30.3.1.1.13 aCarrierSenseErrors */
+ ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR,
+ /* 30.3.1.1.14 aOctetsReceivedOK */
+ ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES,
+ /* 30.3.1.1.15 aFramesLostDueToIntMACRcvError */
+ ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR,
+
+ /* 30.3.1.1.18 aMulticastFramesXmittedOK */
+ ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST,
+ /* 30.3.1.1.19 aBroadcastFramesXmittedOK */
+ ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST,
+ /* 30.3.1.1.20 aFramesWithExcessiveDeferral */
+ ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER,
+ /* 30.3.1.1.21 aMulticastFramesReceivedOK */
+ ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST,
+ /* 30.3.1.1.22 aBroadcastFramesReceivedOK */
+ ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST,
+ /* 30.3.1.1.23 aInRangeLengthErrors */
+ ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR,
+ /* 30.3.1.1.24 aOutOfRangeLengthField */
+ ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN,
+ /* 30.3.1.1.25 aFrameTooLongErrors */
+ ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR,
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_ETH_MAC_CNT,
+ ETHTOOL_A_STATS_ETH_MAC_MAX = (__ETHTOOL_A_STATS_ETH_MAC_CNT - 1)
+};
+
+enum {
+ /* 30.3.3.3 aMACControlFramesTransmitted */
+ ETHTOOL_A_STATS_ETH_CTRL_3_TX,
+ /* 30.3.3.4 aMACControlFramesReceived */
+ ETHTOOL_A_STATS_ETH_CTRL_4_RX,
+ /* 30.3.3.5 aUnsupportedOpcodesReceived */
+ ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP,
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_ETH_CTRL_CNT,
+ ETHTOOL_A_STATS_ETH_CTRL_MAX = (__ETHTOOL_A_STATS_ETH_CTRL_CNT - 1)
+};
+
+enum {
+ /* etherStatsUndersizePkts */
+ ETHTOOL_A_STATS_RMON_UNDERSIZE,
+ /* etherStatsOversizePkts */
+ ETHTOOL_A_STATS_RMON_OVERSIZE,
+ /* etherStatsFragments */
+ ETHTOOL_A_STATS_RMON_FRAG,
+ /* etherStatsJabbers */
+ ETHTOOL_A_STATS_RMON_JABBER,
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_RMON_CNT,
+ ETHTOOL_A_STATS_RMON_MAX = (__ETHTOOL_A_STATS_RMON_CNT - 1)
+};
+
+/* MODULE */
+
+enum {
+ ETHTOOL_A_MODULE_UNSPEC,
+ ETHTOOL_A_MODULE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_MODULE_POWER_MODE_POLICY, /* u8 */
+ ETHTOOL_A_MODULE_POWER_MODE, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MODULE_CNT,
+ ETHTOOL_A_MODULE_MAX = (__ETHTOOL_A_MODULE_CNT - 1)
+};
+
+/* Power Sourcing Equipment */
+enum {
+ ETHTOOL_A_PSE_UNSPEC,
+ ETHTOOL_A_PSE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PODL_PSE_ADMIN_STATE, /* u32 */
+ ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, /* u32 */
+ ETHTOOL_A_PODL_PSE_PW_D_STATUS, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PSE_CNT,
+ ETHTOOL_A_PSE_MAX = (__ETHTOOL_A_PSE_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/f2fs.h b/include/uapi/linux/f2fs.h
new file mode 100644
index 000000000000..3121d127d5aa
--- /dev/null
+++ b/include/uapi/linux/f2fs.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_F2FS_H
+#define _UAPI_LINUX_F2FS_H
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * f2fs-specific ioctl commands
+ */
+#define F2FS_IOCTL_MAGIC 0xf5
+#define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1)
+#define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2)
+#define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3)
+#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4)
+#define F2FS_IOC_ABORT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
+#define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32)
+#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
+#define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \
+ struct f2fs_defragment)
+#define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
+ struct f2fs_move_range)
+#define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \
+ struct f2fs_flush_device)
+#define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \
+ struct f2fs_gc_range)
+#define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32)
+#define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32)
+#define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32)
+#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
+#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
+#define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64)
+#define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \
+ _IOR(F2FS_IOCTL_MAGIC, 18, __u64)
+#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \
+ _IOR(F2FS_IOCTL_MAGIC, 19, __u64)
+#define F2FS_IOC_SEC_TRIM_FILE _IOW(F2FS_IOCTL_MAGIC, 20, \
+ struct f2fs_sectrim_range)
+#define F2FS_IOC_GET_COMPRESS_OPTION _IOR(F2FS_IOCTL_MAGIC, 21, \
+ struct f2fs_comp_option)
+#define F2FS_IOC_SET_COMPRESS_OPTION _IOW(F2FS_IOCTL_MAGIC, 22, \
+ struct f2fs_comp_option)
+#define F2FS_IOC_DECOMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 23)
+#define F2FS_IOC_COMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 24)
+
+/*
+ * should be same as XFS_IOC_GOINGDOWN.
+ * Flags for going down operation used by FS_IOC_GOINGDOWN
+ */
+#define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */
+#define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */
+#define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */
+#define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */
+#define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */
+#define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */
+
+/*
+ * Flags used by F2FS_IOC_SEC_TRIM_FILE
+ */
+#define F2FS_TRIM_FILE_DISCARD 0x1 /* send discard command */
+#define F2FS_TRIM_FILE_ZEROOUT 0x2 /* zero out */
+#define F2FS_TRIM_FILE_MASK 0x3
+
+struct f2fs_gc_range {
+ __u32 sync;
+ __u64 start;
+ __u64 len;
+};
+
+struct f2fs_defragment {
+ __u64 start;
+ __u64 len;
+};
+
+struct f2fs_move_range {
+ __u32 dst_fd; /* destination fd */
+ __u64 pos_in; /* start position in src_fd */
+ __u64 pos_out; /* start position in dst_fd */
+ __u64 len; /* size to move */
+};
+
+struct f2fs_flush_device {
+ __u32 dev_num; /* device number to flush */
+ __u32 segments; /* # of segments to flush */
+};
+
+struct f2fs_sectrim_range {
+ __u64 start;
+ __u64 len;
+ __u64 flags;
+};
+
+struct f2fs_comp_option {
+ __u8 algorithm;
+ __u8 log_cluster_size;
+};
+
+#endif /* _UAPI_LINUX_F2FS_H */
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index b9effa6f8503..436258214bb0 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -20,14 +20,17 @@
#define FAN_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FAN_FS_ERROR 0x00008000 /* Filesystem error */
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
#define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */
-#define FAN_ONDIR 0x40000000 /* event occurred against dir */
+#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */
-#define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */
+#define FAN_RENAME 0x10000000 /* File was renamed */
+
+#define FAN_ONDIR 0x40000000 /* Event occurred against dir */
/* helper events */
#define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */
@@ -51,8 +54,18 @@
#define FAN_ENABLE_AUDIT 0x00000040
/* Flags to determine fanotify event format */
+#define FAN_REPORT_PIDFD 0x00000080 /* Report pidfd for event->pid */
#define FAN_REPORT_TID 0x00000100 /* event->pid is thread id */
#define FAN_REPORT_FID 0x00000200 /* Report unique file id */
+#define FAN_REPORT_DIR_FID 0x00000400 /* Report unique directory id */
+#define FAN_REPORT_NAME 0x00000800 /* Report events with name */
+#define FAN_REPORT_TARGET_FID 0x00001000 /* Report dirent target id */
+
+/* Convenience macro - FAN_REPORT_NAME requires FAN_REPORT_DIR_FID */
+#define FAN_REPORT_DFID_NAME (FAN_REPORT_DIR_FID | FAN_REPORT_NAME)
+/* Convenience macro - FAN_REPORT_TARGET_FID requires all other FID flags */
+#define FAN_REPORT_DFID_NAME_TARGET (FAN_REPORT_DFID_NAME | \
+ FAN_REPORT_FID | FAN_REPORT_TARGET_FID)
/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \
@@ -69,12 +82,21 @@
#define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040
#define FAN_MARK_FLUSH 0x00000080
/* FAN_MARK_FILESYSTEM is 0x00000100 */
+#define FAN_MARK_EVICTABLE 0x00000200
+/* This bit is mutually exclusive with FAN_MARK_IGNORED_MASK bit */
+#define FAN_MARK_IGNORE 0x00000400
/* These are NOT bitwise flags. Both bits can be used togther. */
#define FAN_MARK_INODE 0x00000000
#define FAN_MARK_MOUNT 0x00000010
#define FAN_MARK_FILESYSTEM 0x00000100
+/*
+ * Convenience macro - FAN_MARK_IGNORE requires FAN_MARK_IGNORED_SURV_MODIFY
+ * for non-inode mark types.
+ */
+#define FAN_MARK_IGNORE_SURV (FAN_MARK_IGNORE | FAN_MARK_IGNORED_SURV_MODIFY)
+
/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\
FAN_MARK_REMOVE |\
@@ -116,6 +138,16 @@ struct fanotify_event_metadata {
};
#define FAN_EVENT_INFO_TYPE_FID 1
+#define FAN_EVENT_INFO_TYPE_DFID_NAME 2
+#define FAN_EVENT_INFO_TYPE_DFID 3
+#define FAN_EVENT_INFO_TYPE_PIDFD 4
+#define FAN_EVENT_INFO_TYPE_ERROR 5
+
+/* Special info types for FAN_RENAME */
+#define FAN_EVENT_INFO_TYPE_OLD_DFID_NAME 10
+/* Reserved for FAN_EVENT_INFO_TYPE_OLD_DFID 11 */
+#define FAN_EVENT_INFO_TYPE_NEW_DFID_NAME 12
+/* Reserved for FAN_EVENT_INFO_TYPE_NEW_DFID 13 */
/* Variable length info record following event metadata */
struct fanotify_event_info_header {
@@ -124,7 +156,13 @@ struct fanotify_event_info_header {
__u16 len;
};
-/* Unique file identifier info record */
+/*
+ * Unique file identifier info record.
+ * This structure is used for records of types FAN_EVENT_INFO_TYPE_FID,
+ * FAN_EVENT_INFO_TYPE_DFID and FAN_EVENT_INFO_TYPE_DFID_NAME.
+ * For FAN_EVENT_INFO_TYPE_DFID_NAME there is additionally a null terminated
+ * name immediately after the file handle.
+ */
struct fanotify_event_info_fid {
struct fanotify_event_info_header hdr;
__kernel_fsid_t fsid;
@@ -132,7 +170,22 @@ struct fanotify_event_info_fid {
* Following is an opaque struct file_handle that can be passed as
* an argument to open_by_handle_at(2).
*/
- unsigned char handle[0];
+ unsigned char handle[];
+};
+
+/*
+ * This structure is used for info records of type FAN_EVENT_INFO_TYPE_PIDFD.
+ * It holds a pidfd for the pid that was responsible for generating an event.
+ */
+struct fanotify_event_info_pidfd {
+ struct fanotify_event_info_header hdr;
+ __s32 pidfd;
+};
+
+struct fanotify_event_info_error {
+ struct fanotify_event_info_header hdr;
+ __s32 error;
+ __u32 error_count;
};
struct fanotify_response {
@@ -147,6 +200,8 @@ struct fanotify_response {
/* No fd set in event */
#define FAN_NOFD -1
+#define FAN_NOPIDFD FAN_NOFD
+#define FAN_EPIDFD -2
/* Helper functions to deal with fanotify_event_metadata buffers */
#define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
index b6aac7ee1f67..3a49913d006c 100644
--- a/include/uapi/linux/fb.h
+++ b/include/uapi/linux/fb.h
@@ -182,7 +182,7 @@ struct fb_fix_screeninfo {
*
* For pseudocolor: offset and length should be the same for all color
* components. Offset specifies the position of the least significant bit
- * of the pallette index in a pixel value. Length indicates the number
+ * of the palette index in a pixel value. Length indicates the number
* of available palette entries (i.e. # of entries = 1 << length).
*/
struct fb_bitfield {
@@ -205,6 +205,7 @@ struct fb_bitfield {
#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */
#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/
#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */
+#define FB_ACTIVATE_KD_TEXT 512 /* for KDSET vt ioctl */
#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index ca88b7bce553..2f86b2ad6d7e 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -84,10 +84,20 @@
#define DN_ATTRIB 0x00000020 /* File changed attibutes */
#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
+/*
+ * The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is
+ * meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to
+ * unlinkat. The two functions do completely different things and therefore,
+ * the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to
+ * faccessat would be undefined behavior and thus treating it equivalent to
+ * AT_EACCESS is valid undefined behavior.
+ */
#define AT_FDCWD -100 /* Special value used to indicate
openat should use the current
working directory. */
#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
+#define AT_EACCESS 0x200 /* Test access permitted for
+ effective IDs, not real IDs. */
#define AT_REMOVEDIR 0x200 /* Remove directory instead of
unlinking file. */
#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
diff --git a/include/uapi/linux/fd.h b/include/uapi/linux/fd.h
index 90fb94712c41..7022e3413dbc 100644
--- a/include/uapi/linux/fd.h
+++ b/include/uapi/linux/fd.h
@@ -49,11 +49,11 @@ struct floppy_struct {
#define FDCLRPRM _IO(2, 0x41)
/* clear user-defined parameters */
-#define FDSETPRM _IOW(2, 0x42, struct floppy_struct)
+#define FDSETPRM _IOW(2, 0x42, struct floppy_struct)
#define FDSETMEDIAPRM FDSETPRM
/* set user-defined parameters for current media */
-#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct)
+#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct)
#define FDGETPRM _IOR(2, 0x04, struct floppy_struct)
#define FDDEFMEDIAPRM FDDEFPRM
#define FDGETMEDIAPRM FDGETPRM
@@ -65,7 +65,7 @@ struct floppy_struct {
/* issue/don't issue kernel messages on media type change */
-/*
+/*
* Formatting (obsolete)
*/
#define FD_FILL_BYTE 0xF6 /* format fill byte. */
@@ -126,13 +126,13 @@ typedef char floppy_drive_name[16];
*/
struct floppy_drive_params {
signed char cmos; /* CMOS type */
-
- /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms
+
+ /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms
* etc) and ND is set means no DMA. Hardcoded to 6 (HLD=6ms, use DMA).
*/
unsigned long max_dtr; /* Step rate, usec */
unsigned long hlt; /* Head load/settle time, msec */
- unsigned long hut; /* Head unload time (remnant of
+ unsigned long hut; /* Head unload time (remnant of
* 8" drives) */
unsigned long srt; /* Step rate, usec */
@@ -145,12 +145,12 @@ struct floppy_drive_params {
unsigned char rps; /* rotations per second */
unsigned char tracks; /* maximum number of tracks */
unsigned long timeout; /* timeout for interrupt requests */
-
- unsigned char interleave_sect; /* if there are more sectors, use
+
+ unsigned char interleave_sect; /* if there are more sectors, use
* interleave */
-
+
struct floppy_max_errors max_errors;
-
+
char flags; /* various flags, including ftd_msg */
/*
* Announce successful media type detection and media information loss after
@@ -162,7 +162,7 @@ struct floppy_drive_params {
#define FD_BROKEN_DCL 0x20
#define FD_DEBUG 0x02
#define FD_SILENT_DCL_CLEAR 0x4
-#define FD_INVERTED_DCL 0x80 /* must be 0x80, because of hardware
+#define FD_INVERTED_DCL 0x80 /* must be 0x80, because of hardware
considerations */
char read_track; /* use readtrack during probing? */
@@ -172,9 +172,12 @@ struct floppy_drive_params {
* used in succession to try to read the disk. If the FDC cannot lock onto
* the disk, the next format is tried. This uses the variable 'probing'.
*/
- short autodetect[8]; /* autodetected formats */
-
- int checkfreq; /* how often should the drive be checked for disk
+
+#define FD_AUTODETECT_SIZE 8
+
+ short autodetect[FD_AUTODETECT_SIZE]; /* autodetected formats */
+
+ int checkfreq; /* how often should the drive be checked for disk
* changes */
int native_format; /* native format of this drive */
};
@@ -222,13 +225,13 @@ struct floppy_drive_struct {
* decremented after each probe.
*/
int keep_data;
-
+
/* Prevent "aliased" accesses. */
int fd_ref;
int fd_device;
- unsigned long last_checked; /* when was the drive last checked for a disk
+ unsigned long last_checked; /* when was the drive last checked for a disk
* change? */
-
+
char *dmabuf;
int bufblocks;
};
@@ -252,7 +255,7 @@ enum reset_mode {
/*
* FDC state
*/
-struct floppy_fdc_state {
+struct floppy_fdc_state {
int spec1; /* spec1 value last used */
int spec2; /* spec2 value last used */
int dtr;
@@ -299,16 +302,16 @@ struct floppy_write_errors {
* to the user process are not counted.
*/
- unsigned int write_errors; /* number of physical write errors
+ unsigned int write_errors; /* number of physical write errors
* encountered */
-
+
/* position of first and last write errors */
unsigned long first_error_sector;
int first_error_generation;
unsigned long last_error_sector;
int last_error_generation;
-
- unsigned int badness; /* highest retry count for a read or write
+
+ unsigned int badness; /* highest retry count for a read or write
* operation */
};
@@ -332,7 +335,7 @@ struct floppy_raw_cmd {
#define FD_RAW_DISK_CHANGE 4 /* out: disk change flag was set */
#define FD_RAW_INTR 8 /* wait for an interrupt */
#define FD_RAW_SPIN 0x10 /* spin up the disk for this command */
-#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command
+#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command
* completion */
#define FD_RAW_NEED_DISK 0x40 /* this command needs a disk to be present */
#define FD_RAW_NEED_SEEK 0x80 /* this command uses an implied seek (soft) */
@@ -350,17 +353,32 @@ struct floppy_raw_cmd {
void __user *data;
char *kernel_data; /* location of data buffer in the kernel */
- struct floppy_raw_cmd *next; /* used for chaining of raw cmd's
+ struct floppy_raw_cmd *next; /* used for chaining of raw cmd's
* within the kernel */
long length; /* in: length of dma transfer. out: remaining bytes */
long phys_length; /* physical length, if different from dma length */
int buffer_length; /* length of allocated buffer */
unsigned char rate;
+
+#define FD_RAW_CMD_SIZE 16
+#define FD_RAW_REPLY_SIZE 16
+#define FD_RAW_CMD_FULLSIZE (FD_RAW_CMD_SIZE + 1 + FD_RAW_REPLY_SIZE)
+
+ /* The command may take up the space initially intended for the reply
+ * and the reply count. Needed for long 82078 commands such as RESTORE,
+ * which takes 17 command bytes.
+ */
+
unsigned char cmd_count;
- unsigned char cmd[16];
- unsigned char reply_count;
- unsigned char reply[16];
+ union {
+ struct {
+ unsigned char cmd[FD_RAW_CMD_SIZE];
+ unsigned char reply_count;
+ unsigned char reply[FD_RAW_REPLY_SIZE];
+ };
+ unsigned char fullcmd[FD_RAW_CMD_FULLSIZE];
+ };
int track;
int resultcode;
diff --git a/include/uapi/linux/fdreg.h b/include/uapi/linux/fdreg.h
index 5e2981d5c523..10d33632939d 100644
--- a/include/uapi/linux/fdreg.h
+++ b/include/uapi/linux/fdreg.h
@@ -7,26 +7,28 @@
* Handbook", Sanches and Canton.
*/
-#ifdef FDPATCHES
-#define FD_IOPORT fdc_state[fdc].address
-#else
-/* It would be a lot saner just to force fdc_state[fdc].address to always
- be set ! FIXME */
-#define FD_IOPORT 0x3f0
-#endif
-
-/* Fd controller regs. S&C, about page 340 */
-#define FD_STATUS (4 + FD_IOPORT )
-#define FD_DATA (5 + FD_IOPORT )
+/* 82077's auxiliary status registers A & B (R) */
+#define FD_SRA 0
+#define FD_SRB 1
/* Digital Output Register */
-#define FD_DOR (2 + FD_IOPORT )
+#define FD_DOR 2
+
+/* 82077's tape drive register (R/W) */
+#define FD_TDR 3
+
+/* 82077's data rate select register (W) */
+#define FD_DSR 4
+
+/* Fd controller regs. S&C, about page 340 */
+#define FD_STATUS 4
+#define FD_DATA 5
/* Digital Input Register (read) */
-#define FD_DIR (7 + FD_IOPORT )
+#define FD_DIR 7
/* Diskette Control Register (write)*/
-#define FD_DCR (7 + FD_IOPORT )
+#define FD_DCR 7
/* Bits of main status register */
#define STATUS_BUSYMASK 0x0F /* drive busy mask */
diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h
index 8c0bc24d5d95..24ca0c00cae3 100644
--- a/include/uapi/linux/fiemap.h
+++ b/include/uapi/linux/fiemap.h
@@ -9,8 +9,8 @@
* Andreas Dilger <adilger@sun.com>
*/
-#ifndef _LINUX_FIEMAP_H
-#define _LINUX_FIEMAP_H
+#ifndef _UAPI_LINUX_FIEMAP_H
+#define _UAPI_LINUX_FIEMAP_H
#include <linux/types.h>
@@ -34,7 +34,7 @@ struct fiemap {
__u32 fm_mapped_extents;/* number of extents that were mapped (out) */
__u32 fm_extent_count; /* size of fm_extents array (in) */
__u32 fm_reserved;
- struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
+ struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */
};
#define FIEMAP_MAX_OFFSET (~0ULL)
@@ -67,4 +67,4 @@ struct fiemap {
#define FIEMAP_EXTENT_SHARED 0x00002000 /* Space shared with other
* files. */
-#endif /* _LINUX_FIEMAP_H */
+#endif /* _UAPI_LINUX_FIEMAP_H */
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index 1acd2b179aef..92be3ea3c6e0 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -118,7 +118,7 @@ struct fw_cdev_event_response {
__u32 type;
__u32 rcode;
__u32 length;
- __u32 data[0];
+ __u32 data[];
};
/**
@@ -142,7 +142,7 @@ struct fw_cdev_event_request {
__u64 offset;
__u32 handle;
__u32 length;
- __u32 data[0];
+ __u32 data[];
};
/**
@@ -205,7 +205,7 @@ struct fw_cdev_event_request2 {
__u32 generation;
__u32 handle;
__u32 length;
- __u32 data[0];
+ __u32 data[];
};
/**
@@ -265,7 +265,7 @@ struct fw_cdev_event_iso_interrupt {
__u32 type;
__u32 cycle;
__u32 header_length;
- __u32 header[0];
+ __u32 header[];
};
/**
@@ -308,7 +308,7 @@ struct fw_cdev_event_iso_interrupt_mc {
/**
* struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
* @closure: See &fw_cdev_event_common;
- * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
+ * set by``FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE)`` ioctl
* @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
* %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
* @handle: Reference by which an allocated resource can be deallocated
@@ -355,7 +355,7 @@ struct fw_cdev_event_phy_packet {
__u32 type;
__u32 rcode;
__u32 length;
- __u32 data[0];
+ __u32 data[];
};
/**
@@ -803,7 +803,7 @@ struct fw_cdev_set_iso_channels {
*/
struct fw_cdev_iso_packet {
__u32 control;
- __u32 header[0];
+ __u32 header[];
};
/**
@@ -844,7 +844,7 @@ struct fw_cdev_queue_iso {
* struct fw_cdev_start_iso - Start an isochronous transmission or reception
* @cycle: Cycle in which to start I/O. If @cycle is greater than or
* equal to 0, the I/O will start on that cycle.
- * @sync: Determines the value to wait for for receive packets that have
+ * @sync: Determines the value to wait for receive packets that have
* the %FW_CDEV_ISO_SYNC bit set
* @tags: Tag filter bit mask. Only valid for isochronous reception.
* Determines the tag values for which packets will be accepted.
diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h
index ec70a0746e59..1621b077bf21 100644
--- a/include/uapi/linux/fpga-dfl.h
+++ b/include/uapi/linux/fpga-dfl.h
@@ -151,6 +151,65 @@ struct dfl_fpga_port_dma_unmap {
#define DFL_FPGA_PORT_DMA_UNMAP _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 4)
+/**
+ * struct dfl_fpga_irq_set - the argument for DFL_FPGA_XXX_SET_IRQ ioctl.
+ *
+ * @start: Index of the first irq.
+ * @count: The number of eventfd handler.
+ * @evtfds: Eventfd handlers.
+ */
+struct dfl_fpga_irq_set {
+ __u32 start;
+ __u32 count;
+ __s32 evtfds[];
+};
+
+/**
+ * DFL_FPGA_PORT_ERR_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 5,
+ * __u32 num_irqs)
+ *
+ * Get the number of irqs supported by the fpga port error reporting private
+ * feature. Currently hardware supports up to 1 irq.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_PORT_ERR_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \
+ DFL_PORT_BASE + 5, __u32)
+
+/**
+ * DFL_FPGA_PORT_ERR_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_PORT_BASE + 6,
+ * struct dfl_fpga_irq_set)
+ *
+ * Set fpga port error reporting interrupt trigger if evtfds[n] is valid.
+ * Unset related interrupt trigger if evtfds[n] is a negative value.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_PORT_ERR_SET_IRQ _IOW(DFL_FPGA_MAGIC, \
+ DFL_PORT_BASE + 6, \
+ struct dfl_fpga_irq_set)
+
+/**
+ * DFL_FPGA_PORT_UINT_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 7,
+ * __u32 num_irqs)
+ *
+ * Get the number of irqs supported by the fpga AFU interrupt private
+ * feature.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_PORT_UINT_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \
+ DFL_PORT_BASE + 7, __u32)
+
+/**
+ * DFL_FPGA_PORT_UINT_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_PORT_BASE + 8,
+ * struct dfl_fpga_irq_set)
+ *
+ * Set fpga AFU interrupt trigger if evtfds[n] is valid.
+ * Unset related interrupt trigger if evtfds[n] is a negative value.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_PORT_UINT_SET_IRQ _IOW(DFL_FPGA_MAGIC, \
+ DFL_PORT_BASE + 8, \
+ struct dfl_fpga_irq_set)
+
/* IOCTLs for FME file descriptor */
/**
@@ -194,4 +253,27 @@ struct dfl_fpga_fme_port_pr {
*/
#define DFL_FPGA_FME_PORT_ASSIGN _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2, int)
+/**
+ * DFL_FPGA_FME_ERR_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_FME_BASE + 3,
+ * __u32 num_irqs)
+ *
+ * Get the number of irqs supported by the fpga fme error reporting private
+ * feature. Currently hardware supports up to 1 irq.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_FME_ERR_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \
+ DFL_FME_BASE + 3, __u32)
+
+/**
+ * DFL_FPGA_FME_ERR_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 4,
+ * struct dfl_fpga_irq_set)
+ *
+ * Set fpga fme error reporting interrupt trigger if evtfds[n] is valid.
+ * Unset related interrupt trigger if evtfds[n] is a negative value.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_FME_ERR_SET_IRQ _IOW(DFL_FPGA_MAGIC, \
+ DFL_FME_BASE + 4, \
+ struct dfl_fpga_irq_set)
+
#endif /* _UAPI_LINUX_FPGA_DFL_H */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 379a612f8f1d..b7b56871029c 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -90,7 +90,7 @@ struct file_dedupe_range {
__u16 dest_count; /* in - total elements in info array */
__u16 reserved1; /* must be zero */
__u32 reserved2; /* must be zero */
- struct file_dedupe_range_info info[0];
+ struct file_dedupe_range_info info[];
};
/* And dynamically-tunable limits and defaults: */
@@ -184,8 +184,9 @@ struct fsxattr {
#define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
+#define BLKGETDISKSEQ _IOR(0x12,128,__u64)
/*
- * A jump here: 130-131 are reserved for zoned block devices
+ * A jump here: 130-136 are reserved for zoned block devices
* (see uapi/linux/blkzoned.h)
*/
@@ -262,6 +263,7 @@ struct fsxattr {
#define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */
#define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */
#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
+#define FS_DAX_FL 0x02000000 /* Inode is DAX */
#define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */
#define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
#define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index 0d8a6f47711c..a756b29afcc2 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -19,7 +19,7 @@
#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03
#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04
#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08
-#define FSCRYPT_POLICY_FLAGS_VALID 0x0F
+#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 0x10
/* Encryption algorithms */
#define FSCRYPT_MODE_AES_256_XTS 1
@@ -27,7 +27,8 @@
#define FSCRYPT_MODE_AES_128_CBC 5
#define FSCRYPT_MODE_AES_128_CTS 6
#define FSCRYPT_MODE_ADIANTUM 9
-#define __FSCRYPT_MODE_MAX 9
+#define FSCRYPT_MODE_AES_256_HCTR2 10
+/* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */
/*
* Legacy policy version; ad-hoc KDF and no key verification.
@@ -44,7 +45,6 @@ struct fscrypt_policy_v1 {
__u8 flags;
__u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
};
-#define fscrypt_policy fscrypt_policy_v1
/*
* Process-subscribed "logon" key description prefix and payload format.
@@ -155,19 +155,21 @@ struct fscrypt_get_key_status_arg {
__u32 __out_reserved[13];
};
-#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy_v1)
#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
-#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy_v1)
#define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */
#define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg)
#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg)
#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg)
#define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg)
+#define FS_IOC_GET_ENCRYPTION_NONCE _IOR('f', 27, __u8[16])
/**********************************************************************/
/* old names; don't add anything new here! */
#ifndef __KERNEL__
+#define fscrypt_policy fscrypt_policy_v1
#define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE
#define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4
#define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8
@@ -175,7 +177,7 @@ struct fscrypt_get_key_status_arg {
#define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32
#define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK
#define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY
-#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID
+#define FS_POLICY_FLAGS_VALID 0x07 /* contains old flags only */
#define FS_ENCRYPTION_MODE_INVALID 0 /* never used */
#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS
#define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */
diff --git a/include/uapi/linux/fsi.h b/include/uapi/linux/fsi.h
index da577ecd90e7..b2f1977378c7 100644
--- a/include/uapi/linux/fsi.h
+++ b/include/uapi/linux/fsi.h
@@ -55,4 +55,18 @@ struct scom_access {
#define FSI_SCOM_WRITE _IOWR('s', 0x02, struct scom_access)
#define FSI_SCOM_RESET _IOW('s', 0x03, __u32)
+/*
+ * /dev/sbefifo* ioctl interface
+ */
+
+/**
+ * FSI_SBEFIFO_READ_TIMEOUT sets the read timeout for response from SBE.
+ *
+ * The read timeout is specified in seconds. The minimum value of read
+ * timeout is 10 seconds (default) and the maximum value of read timeout is
+ * 120 seconds. A read timeout of 0 will reset the value to the default of
+ * (10 seconds).
+ */
+#define FSI_SBEFIFO_READ_TIMEOUT_SECONDS _IOW('s', 0x00, __u32)
+
#endif /* _UAPI_LINUX_FSI_H */
diff --git a/include/uapi/linux/fsl_mc.h b/include/uapi/linux/fsl_mc.h
new file mode 100644
index 000000000000..e57451570033
--- /dev/null
+++ b/include/uapi/linux/fsl_mc.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Management Complex (MC) userspace public interface
+ *
+ * Copyright 2021 NXP
+ *
+ */
+#ifndef _UAPI_FSL_MC_H_
+#define _UAPI_FSL_MC_H_
+
+#include <linux/types.h>
+
+#define MC_CMD_NUM_OF_PARAMS 7
+
+/**
+ * struct fsl_mc_command - Management Complex (MC) command structure
+ * @header: MC command header
+ * @params: MC command parameters
+ *
+ * Used by FSL_MC_SEND_MC_COMMAND
+ */
+struct fsl_mc_command {
+ __le64 header;
+ __le64 params[MC_CMD_NUM_OF_PARAMS];
+};
+
+#define FSL_MC_SEND_CMD_IOCTL_TYPE 'R'
+#define FSL_MC_SEND_CMD_IOCTL_SEQ 0xE0
+
+#define FSL_MC_SEND_MC_COMMAND \
+ _IOWR(FSL_MC_SEND_CMD_IOCTL_TYPE, FSL_MC_SEND_CMD_IOCTL_SEQ, \
+ struct fsl_mc_command)
+
+#endif /* _UAPI_FSL_MC_H_ */
diff --git a/include/uapi/linux/fsmap.h b/include/uapi/linux/fsmap.h
index 91fd519a3f7d..c690d17f1d07 100644
--- a/include/uapi/linux/fsmap.h
+++ b/include/uapi/linux/fsmap.h
@@ -69,7 +69,7 @@ struct fsmap_head {
};
/* Size of an fsmap_head with room for nr records. */
-static inline size_t
+static inline __kernel_size_t
fsmap_sizeof(
unsigned int nr)
{
diff --git a/include/uapi/linux/fsverity.h b/include/uapi/linux/fsverity.h
index da0daf6c193b..15384e22e331 100644
--- a/include/uapi/linux/fsverity.h
+++ b/include/uapi/linux/fsverity.h
@@ -34,7 +34,70 @@ struct fsverity_digest {
__u8 digest[];
};
+/*
+ * Struct containing a file's Merkle tree properties. The fs-verity file digest
+ * is the hash of this struct. A userspace program needs this struct only if it
+ * needs to compute fs-verity file digests itself, e.g. in order to sign files.
+ * It isn't needed just to enable fs-verity on a file.
+ *
+ * Note: when computing the file digest, 'sig_size' and 'signature' must be left
+ * zero and empty, respectively. These fields are present only because some
+ * filesystems reuse this struct as part of their on-disk format.
+ */
+struct fsverity_descriptor {
+ __u8 version; /* must be 1 */
+ __u8 hash_algorithm; /* Merkle tree hash algorithm */
+ __u8 log_blocksize; /* log2 of size of data and tree blocks */
+ __u8 salt_size; /* size of salt in bytes; 0 if none */
+#ifdef __KERNEL__
+ __le32 sig_size;
+#else
+ __le32 __reserved_0x04; /* must be 0 */
+#endif
+ __le64 data_size; /* size of file the Merkle tree is built over */
+ __u8 root_hash[64]; /* Merkle tree root hash */
+ __u8 salt[32]; /* salt prepended to each hashed block */
+ __u8 __reserved[144]; /* must be 0's */
+#ifdef __KERNEL__
+ __u8 signature[];
+#endif
+};
+
+/*
+ * Format in which fs-verity file digests are signed in built-in signatures.
+ * This is the same as 'struct fsverity_digest', except here some magic bytes
+ * are prepended to provide some context about what is being signed in case the
+ * same key is used for non-fsverity purposes, and here the fields have fixed
+ * endianness.
+ *
+ * This struct is specific to the built-in signature verification support, which
+ * is optional. fs-verity users may also verify signatures in userspace, in
+ * which case userspace is responsible for deciding on what bytes are signed.
+ * This struct may still be used, but it doesn't have to be. For example,
+ * userspace could instead use a string like "sha256:$digest_as_hex_string".
+ */
+struct fsverity_formatted_digest {
+ char magic[8]; /* must be "FSVerity" */
+ __le16 digest_algorithm;
+ __le16 digest_size;
+ __u8 digest[];
+};
+
+#define FS_VERITY_METADATA_TYPE_MERKLE_TREE 1
+#define FS_VERITY_METADATA_TYPE_DESCRIPTOR 2
+#define FS_VERITY_METADATA_TYPE_SIGNATURE 3
+
+struct fsverity_read_metadata_arg {
+ __u64 metadata_type;
+ __u64 offset;
+ __u64 length;
+ __u64 buf_ptr;
+ __u64 __reserved;
+};
+
#define FS_IOC_ENABLE_VERITY _IOW('f', 133, struct fsverity_enable_arg)
#define FS_IOC_MEASURE_VERITY _IOWR('f', 134, struct fsverity_digest)
+#define FS_IOC_READ_VERITY_METADATA \
+ _IOWR('f', 135, struct fsverity_read_metadata_arg)
#endif /* _UAPI_LINUX_FSVERITY_H */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 373cada89815..76ee8f9e024a 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -172,6 +172,31 @@
* - add FUSE_WRITE_KILL_PRIV flag
* - add FUSE_SETUPMAPPING and FUSE_REMOVEMAPPING
* - add map_alignment to fuse_init_out, add FUSE_MAP_ALIGNMENT flag
+ *
+ * 7.32
+ * - add flags to fuse_attr, add FUSE_ATTR_SUBMOUNT, add FUSE_SUBMOUNTS
+ *
+ * 7.33
+ * - add FUSE_HANDLE_KILLPRIV_V2, FUSE_WRITE_KILL_SUIDGID, FATTR_KILL_SUIDGID
+ * - add FUSE_OPEN_KILL_SUIDGID
+ * - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT
+ * - add FUSE_SETXATTR_ACL_KILL_SGID
+ *
+ * 7.34
+ * - add FUSE_SYNCFS
+ *
+ * 7.35
+ * - add FOPEN_NOFLUSH
+ *
+ * 7.36
+ * - extend fuse_init_in with reserved fields, add FUSE_INIT_EXT init flag
+ * - add flags2 to fuse_init_in and fuse_init_out
+ * - add FUSE_SECURITY_CTX init flag
+ * - add security context to create, mkdir, symlink, and mknod requests
+ * - add FUSE_HAS_INODE_DAX, FUSE_ATTR_DAX
+ *
+ * 7.37
+ * - add FUSE_TMPFILE
*/
#ifndef _LINUX_FUSE_H
@@ -207,7 +232,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 31
+#define FUSE_KERNEL_MINOR_VERSION 37
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -231,7 +256,7 @@ struct fuse_attr {
uint32_t gid;
uint32_t rdev;
uint32_t blksize;
- uint32_t padding;
+ uint32_t flags;
};
struct fuse_kstatfs {
@@ -268,6 +293,7 @@ struct fuse_file_lock {
#define FATTR_MTIME_NOW (1 << 8)
#define FATTR_LOCKOWNER (1 << 9)
#define FATTR_CTIME (1 << 10)
+#define FATTR_KILL_SUIDGID (1 << 11)
/**
* Flags returned by the OPEN request
@@ -277,12 +303,14 @@ struct fuse_file_lock {
* FOPEN_NONSEEKABLE: the file is not seekable
* FOPEN_CACHE_DIR: allow caching this directory
* FOPEN_STREAM: the file is stream-like (no file position at all)
+ * FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE)
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
#define FOPEN_NONSEEKABLE (1 << 2)
#define FOPEN_CACHE_DIR (1 << 3)
#define FOPEN_STREAM (1 << 4)
+#define FOPEN_NOFLUSH (1 << 5)
/**
* INIT request/reply flags
@@ -313,7 +341,21 @@ struct fuse_file_lock {
* FUSE_CACHE_SYMLINKS: cache READLINK responses
* FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir
* FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request
- * FUSE_MAP_ALIGNMENT: map_alignment field is valid
+ * FUSE_MAP_ALIGNMENT: init_out.map_alignment contains log2(byte alignment) for
+ * foffset and moffset fields in struct
+ * fuse_setupmapping_out and fuse_removemapping_one.
+ * FUSE_SUBMOUNTS: kernel supports auto-mounting directory submounts
+ * FUSE_HANDLE_KILLPRIV_V2: fs kills suid/sgid/cap on write/chown/trunc.
+ * Upon write/truncate suid/sgid is only killed if caller
+ * does not have CAP_FSETID. Additionally upon
+ * write/truncate sgid is killed only if file has group
+ * execute permission. (Same as Linux VFS behavior).
+ * FUSE_SETXATTR_EXT: Server supports extended struct fuse_setxattr_in
+ * FUSE_INIT_EXT: extended fuse_init_in request
+ * FUSE_INIT_RESERVED: reserved, do not use
+ * FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and
+ * mknod
+ * FUSE_HAS_INODE_DAX: use per inode DAX
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -342,6 +384,14 @@ struct fuse_file_lock {
#define FUSE_NO_OPENDIR_SUPPORT (1 << 24)
#define FUSE_EXPLICIT_INVAL_DATA (1 << 25)
#define FUSE_MAP_ALIGNMENT (1 << 26)
+#define FUSE_SUBMOUNTS (1 << 27)
+#define FUSE_HANDLE_KILLPRIV_V2 (1 << 28)
+#define FUSE_SETXATTR_EXT (1 << 29)
+#define FUSE_INIT_EXT (1 << 30)
+#define FUSE_INIT_RESERVED (1 << 31)
+/* bits 32..63 get shifted down 32 bits into the flags2 field */
+#define FUSE_SECURITY_CTX (1ULL << 32)
+#define FUSE_HAS_INODE_DAX (1ULL << 33)
/**
* CUSE INIT request/reply flags
@@ -371,11 +421,14 @@ struct fuse_file_lock {
*
* FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed
* FUSE_WRITE_LOCKOWNER: lock_owner field is valid
- * FUSE_WRITE_KILL_PRIV: kill suid and sgid bits
+ * FUSE_WRITE_KILL_SUIDGID: kill suid and sgid bits
*/
#define FUSE_WRITE_CACHE (1 << 0)
#define FUSE_WRITE_LOCKOWNER (1 << 1)
-#define FUSE_WRITE_KILL_PRIV (1 << 2)
+#define FUSE_WRITE_KILL_SUIDGID (1 << 2)
+
+/* Obsolete alias; this flag implies killing suid/sgid only. */
+#define FUSE_WRITE_KILL_PRIV FUSE_WRITE_KILL_SUIDGID
/**
* Read flags
@@ -417,6 +470,27 @@ struct fuse_file_lock {
*/
#define FUSE_FSYNC_FDATASYNC (1 << 0)
+/**
+ * fuse_attr flags
+ *
+ * FUSE_ATTR_SUBMOUNT: Object is a submount root
+ * FUSE_ATTR_DAX: Enable DAX for this file in per inode DAX mode
+ */
+#define FUSE_ATTR_SUBMOUNT (1 << 0)
+#define FUSE_ATTR_DAX (1 << 1)
+
+/**
+ * Open flags
+ * FUSE_OPEN_KILL_SUIDGID: Kill suid and sgid if executable
+ */
+#define FUSE_OPEN_KILL_SUIDGID (1 << 0)
+
+/**
+ * setxattr flags
+ * FUSE_SETXATTR_ACL_KILL_SGID: Clear SGID when system.posix_acl_access is set
+ */
+#define FUSE_SETXATTR_ACL_KILL_SGID (1 << 0)
+
enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, /* no reply */
@@ -465,6 +539,8 @@ enum fuse_opcode {
FUSE_COPY_FILE_RANGE = 47,
FUSE_SETUPMAPPING = 48,
FUSE_REMOVEMAPPING = 49,
+ FUSE_SYNCFS = 50,
+ FUSE_TMPFILE = 51,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -578,14 +654,14 @@ struct fuse_setattr_in {
struct fuse_open_in {
uint32_t flags;
- uint32_t unused;
+ uint32_t open_flags; /* FUSE_OPEN_... */
};
struct fuse_create_in {
uint32_t flags;
uint32_t mode;
uint32_t umask;
- uint32_t padding;
+ uint32_t open_flags; /* FUSE_OPEN_... */
};
struct fuse_open_out {
@@ -647,9 +723,13 @@ struct fuse_fsync_in {
uint32_t padding;
};
+#define FUSE_COMPAT_SETXATTR_IN_SIZE 8
+
struct fuse_setxattr_in {
uint32_t size;
uint32_t flags;
+ uint32_t setxattr_flags;
+ uint32_t padding;
};
struct fuse_getxattr_in {
@@ -684,6 +764,8 @@ struct fuse_init_in {
uint32_t minor;
uint32_t max_readahead;
uint32_t flags;
+ uint32_t flags2;
+ uint32_t unused[11];
};
#define FUSE_COMPAT_INIT_OUT_SIZE 8
@@ -700,7 +782,8 @@ struct fuse_init_out {
uint32_t time_gran;
uint16_t max_pages;
uint16_t map_alignment;
- uint32_t unused[8];
+ uint32_t flags2;
+ uint32_t unused[7];
};
#define CUSE_INIT_INFO_MAX 4096
@@ -808,9 +891,12 @@ struct fuse_dirent {
char name[];
};
-#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
-#define FUSE_DIRENT_ALIGN(x) \
+/* Align variable length records to 64bit boundary */
+#define FUSE_REC_ALIGN(x) \
(((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
+
+#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
+#define FUSE_DIRENT_ALIGN(x) FUSE_REC_ALIGN(x)
#define FUSE_DIRENT_SIZE(d) \
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
@@ -869,7 +955,8 @@ struct fuse_notify_retrieve_in {
};
/* Device ioctls: */
-#define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t)
+#define FUSE_DEV_IOC_MAGIC 229
+#define FUSE_DEV_IOC_CLONE _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t)
struct fuse_lseek_in {
uint64_t fh;
@@ -892,4 +979,60 @@ struct fuse_copy_file_range_in {
uint64_t flags;
};
+#define FUSE_SETUPMAPPING_FLAG_WRITE (1ull << 0)
+#define FUSE_SETUPMAPPING_FLAG_READ (1ull << 1)
+struct fuse_setupmapping_in {
+ /* An already open handle */
+ uint64_t fh;
+ /* Offset into the file to start the mapping */
+ uint64_t foffset;
+ /* Length of mapping required */
+ uint64_t len;
+ /* Flags, FUSE_SETUPMAPPING_FLAG_* */
+ uint64_t flags;
+ /* Offset in Memory Window */
+ uint64_t moffset;
+};
+
+struct fuse_removemapping_in {
+ /* number of fuse_removemapping_one follows */
+ uint32_t count;
+};
+
+struct fuse_removemapping_one {
+ /* Offset into the dax window start the unmapping */
+ uint64_t moffset;
+ /* Length of mapping required */
+ uint64_t len;
+};
+
+#define FUSE_REMOVEMAPPING_MAX_ENTRY \
+ (PAGE_SIZE / sizeof(struct fuse_removemapping_one))
+
+struct fuse_syncfs_in {
+ uint64_t padding;
+};
+
+/*
+ * For each security context, send fuse_secctx with size of security context
+ * fuse_secctx will be followed by security context name and this in turn
+ * will be followed by actual context label.
+ * fuse_secctx, name, context
+ */
+struct fuse_secctx {
+ uint32_t size;
+ uint32_t padding;
+};
+
+/*
+ * Contains the information about how many fuse_secctx structures are being
+ * sent and what's the total size of all security contexts (including
+ * size of fuse_secctx_header).
+ *
+ */
+struct fuse_secctx_header {
+ uint32_t size;
+ uint32_t nr_secctx;
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
index a89eb0accd5e..71a5df8d2689 100644
--- a/include/uapi/linux/futex.h
+++ b/include/uapi/linux/futex.h
@@ -21,6 +21,7 @@
#define FUTEX_WAKE_BITSET 10
#define FUTEX_WAIT_REQUEUE_PI 11
#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_LOCK_PI2 13
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CLOCK_REALTIME 256
@@ -32,6 +33,7 @@
#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
+#define FUTEX_LOCK_PI2_PRIVATE (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG)
#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG)
@@ -42,6 +44,31 @@
FUTEX_PRIVATE_FLAG)
/*
+ * Flags to specify the bit length of the futex word for futex2 syscalls.
+ * Currently, only 32 is supported.
+ */
+#define FUTEX_32 2
+
+/*
+ * Max numbers of elements in a futex_waitv array
+ */
+#define FUTEX_WAITV_MAX 128
+
+/**
+ * struct futex_waitv - A waiter for vectorized wait
+ * @val: Expected value at uaddr
+ * @uaddr: User address to wait on
+ * @flags: Flags for this waiter
+ * @__reserved: Reserved member to preserve data alignment. Should be 0.
+ */
+struct futex_waitv {
+ __u64 val;
+ __u64 uaddr;
+ __u32 flags;
+ __u32 __reserved;
+};
+
+/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
*/
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h
index 877f7fa95466..ddba3ca01e39 100644
--- a/include/uapi/linux/genetlink.h
+++ b/include/uapi/linux/genetlink.h
@@ -48,6 +48,7 @@ enum {
CTRL_CMD_NEWMCAST_GRP,
CTRL_CMD_DELMCAST_GRP,
CTRL_CMD_GETMCAST_GRP, /* unused */
+ CTRL_CMD_GETPOLICY,
__CTRL_CMD_MAX,
};
@@ -62,6 +63,9 @@ enum {
CTRL_ATTR_MAXATTR,
CTRL_ATTR_OPS,
CTRL_ATTR_MCAST_GROUPS,
+ CTRL_ATTR_POLICY,
+ CTRL_ATTR_OP_POLICY,
+ CTRL_ATTR_OP,
__CTRL_ATTR_MAX,
};
@@ -85,5 +89,15 @@ enum {
#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
+enum {
+ CTRL_ATTR_POLICY_UNSPEC,
+ CTRL_ATTR_POLICY_DO,
+ CTRL_ATTR_POLICY_DUMP,
+
+ __CTRL_ATTR_POLICY_DUMP_MAX,
+ CTRL_ATTR_POLICY_DUMP_MAX = __CTRL_ATTR_POLICY_DUMP_MAX - 1
+};
+
+#define CTRL_ATTR_POLICY_MAX (__CTRL_ATTR_POLICY_DUMP_MAX - 1)
#endif /* _UAPI__LINUX_GENERIC_NETLINK_H */
diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h
index 2dc10a034de1..6ec4291bcc7a 100644
--- a/include/uapi/linux/gfs2_ondisk.h
+++ b/include/uapi/linux/gfs2_ondisk.h
@@ -47,7 +47,7 @@
#define GFS2_FORMAT_DE 1200
#define GFS2_FORMAT_QU 1500
/* These are part of the superblock */
-#define GFS2_FORMAT_FS 1801
+#define GFS2_FORMAT_FS 1802
#define GFS2_FORMAT_MULTI 1900
/*
@@ -171,6 +171,12 @@ struct gfs2_rindex {
#define GFS2_RGF_NOALLOC 0x00000008
#define GFS2_RGF_TRIMMED 0x00000010
+struct gfs2_inode_lvb {
+ __be32 ri_magic;
+ __be32 __pad;
+ __be64 ri_generation_deleted;
+};
+
struct gfs2_rgrp_lvb {
__be32 rl_magic;
__be32 rl_flags;
@@ -383,8 +389,9 @@ struct gfs2_leaf {
#define GFS2_EATYPE_USR 1
#define GFS2_EATYPE_SYS 2
#define GFS2_EATYPE_SECURITY 3
+#define GFS2_EATYPE_TRUSTED 4
-#define GFS2_EATYPE_LAST 3
+#define GFS2_EATYPE_LAST 4
#define GFS2_EATYPE_VALID(x) ((x) <= GFS2_EATYPE_LAST)
#define GFS2_EAFLAG_LAST 0x01 /* last ea in block */
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index 799cf823d493..cb9966d49a16 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* <linux/gpio.h> - userspace ABI for the GPIO character devices
*
@@ -11,22 +11,302 @@
#ifndef _UAPI_GPIO_H_
#define _UAPI_GPIO_H_
+#include <linux/const.h>
#include <linux/ioctl.h>
#include <linux/types.h>
+/*
+ * The maximum size of name and label arrays.
+ *
+ * Must be a multiple of 8 to ensure 32/64-bit alignment of structs.
+ */
+#define GPIO_MAX_NAME_SIZE 32
+
/**
* struct gpiochip_info - Information about a certain GPIO chip
* @name: the Linux kernel name of this GPIO chip
* @label: a functional name for this GPIO chip, such as a product
- * number, may be NULL
+ * number, may be empty (i.e. label[0] == '\0')
* @lines: number of GPIO lines on this chip
*/
struct gpiochip_info {
- char name[32];
- char label[32];
+ char name[GPIO_MAX_NAME_SIZE];
+ char label[GPIO_MAX_NAME_SIZE];
__u32 lines;
};
+/*
+ * Maximum number of requested lines.
+ *
+ * Must be no greater than 64, as bitmaps are restricted here to 64-bits
+ * for simplicity, and a multiple of 2 to ensure 32/64-bit alignment of
+ * structs.
+ */
+#define GPIO_V2_LINES_MAX 64
+
+/*
+ * The maximum number of configuration attributes associated with a line
+ * request.
+ */
+#define GPIO_V2_LINE_NUM_ATTRS_MAX 10
+
+/**
+ * enum gpio_v2_line_flag - &struct gpio_v2_line_attribute.flags values
+ * @GPIO_V2_LINE_FLAG_USED: line is not available for request
+ * @GPIO_V2_LINE_FLAG_ACTIVE_LOW: line active state is physical low
+ * @GPIO_V2_LINE_FLAG_INPUT: line is an input
+ * @GPIO_V2_LINE_FLAG_OUTPUT: line is an output
+ * @GPIO_V2_LINE_FLAG_EDGE_RISING: line detects rising (inactive to active)
+ * edges
+ * @GPIO_V2_LINE_FLAG_EDGE_FALLING: line detects falling (active to
+ * inactive) edges
+ * @GPIO_V2_LINE_FLAG_OPEN_DRAIN: line is an open drain output
+ * @GPIO_V2_LINE_FLAG_OPEN_SOURCE: line is an open source output
+ * @GPIO_V2_LINE_FLAG_BIAS_PULL_UP: line has pull-up bias enabled
+ * @GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN: line has pull-down bias enabled
+ * @GPIO_V2_LINE_FLAG_BIAS_DISABLED: line has bias disabled
+ * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME: line events contain REALTIME timestamps
+ * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE: line events contain timestamps from
+ * hardware timestamp engine
+ */
+enum gpio_v2_line_flag {
+ GPIO_V2_LINE_FLAG_USED = _BITULL(0),
+ GPIO_V2_LINE_FLAG_ACTIVE_LOW = _BITULL(1),
+ GPIO_V2_LINE_FLAG_INPUT = _BITULL(2),
+ GPIO_V2_LINE_FLAG_OUTPUT = _BITULL(3),
+ GPIO_V2_LINE_FLAG_EDGE_RISING = _BITULL(4),
+ GPIO_V2_LINE_FLAG_EDGE_FALLING = _BITULL(5),
+ GPIO_V2_LINE_FLAG_OPEN_DRAIN = _BITULL(6),
+ GPIO_V2_LINE_FLAG_OPEN_SOURCE = _BITULL(7),
+ GPIO_V2_LINE_FLAG_BIAS_PULL_UP = _BITULL(8),
+ GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN = _BITULL(9),
+ GPIO_V2_LINE_FLAG_BIAS_DISABLED = _BITULL(10),
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME = _BITULL(11),
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE = _BITULL(12),
+};
+
+/**
+ * struct gpio_v2_line_values - Values of GPIO lines
+ * @bits: a bitmap containing the value of the lines, set to 1 for active
+ * and 0 for inactive.
+ * @mask: a bitmap identifying the lines to get or set, with each bit
+ * number corresponding to the index into &struct
+ * gpio_v2_line_request.offsets.
+ */
+struct gpio_v2_line_values {
+ __aligned_u64 bits;
+ __aligned_u64 mask;
+};
+
+/**
+ * enum gpio_v2_line_attr_id - &struct gpio_v2_line_attribute.id values
+ * identifying which field of the attribute union is in use.
+ * @GPIO_V2_LINE_ATTR_ID_FLAGS: flags field is in use
+ * @GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES: values field is in use
+ * @GPIO_V2_LINE_ATTR_ID_DEBOUNCE: debounce_period_us field is in use
+ */
+enum gpio_v2_line_attr_id {
+ GPIO_V2_LINE_ATTR_ID_FLAGS = 1,
+ GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES = 2,
+ GPIO_V2_LINE_ATTR_ID_DEBOUNCE = 3,
+};
+
+/**
+ * struct gpio_v2_line_attribute - a configurable attribute of a line
+ * @id: attribute identifier with value from &enum gpio_v2_line_attr_id
+ * @padding: reserved for future use and must be zero filled
+ * @flags: if id is %GPIO_V2_LINE_ATTR_ID_FLAGS, the flags for the GPIO
+ * line, with values from &enum gpio_v2_line_flag, such as
+ * %GPIO_V2_LINE_FLAG_ACTIVE_LOW, %GPIO_V2_LINE_FLAG_OUTPUT etc, added
+ * together. This overrides the default flags contained in the &struct
+ * gpio_v2_line_config for the associated line.
+ * @values: if id is %GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES, a bitmap
+ * containing the values to which the lines will be set, with each bit
+ * number corresponding to the index into &struct
+ * gpio_v2_line_request.offsets.
+ * @debounce_period_us: if id is %GPIO_V2_LINE_ATTR_ID_DEBOUNCE, the
+ * desired debounce period, in microseconds
+ */
+struct gpio_v2_line_attribute {
+ __u32 id;
+ __u32 padding;
+ union {
+ __aligned_u64 flags;
+ __aligned_u64 values;
+ __u32 debounce_period_us;
+ };
+};
+
+/**
+ * struct gpio_v2_line_config_attribute - a configuration attribute
+ * associated with one or more of the requested lines.
+ * @attr: the configurable attribute
+ * @mask: a bitmap identifying the lines to which the attribute applies,
+ * with each bit number corresponding to the index into &struct
+ * gpio_v2_line_request.offsets.
+ */
+struct gpio_v2_line_config_attribute {
+ struct gpio_v2_line_attribute attr;
+ __aligned_u64 mask;
+};
+
+/**
+ * struct gpio_v2_line_config - Configuration for GPIO lines
+ * @flags: flags for the GPIO lines, with values from &enum
+ * gpio_v2_line_flag, such as %GPIO_V2_LINE_FLAG_ACTIVE_LOW,
+ * %GPIO_V2_LINE_FLAG_OUTPUT etc, added together. This is the default for
+ * all requested lines but may be overridden for particular lines using
+ * @attrs.
+ * @num_attrs: the number of attributes in @attrs
+ * @padding: reserved for future use and must be zero filled
+ * @attrs: the configuration attributes associated with the requested
+ * lines. Any attribute should only be associated with a particular line
+ * once. If an attribute is associated with a line multiple times then the
+ * first occurrence (i.e. lowest index) has precedence.
+ */
+struct gpio_v2_line_config {
+ __aligned_u64 flags;
+ __u32 num_attrs;
+ /* Pad to fill implicit padding and reserve space for future use. */
+ __u32 padding[5];
+ struct gpio_v2_line_config_attribute attrs[GPIO_V2_LINE_NUM_ATTRS_MAX];
+};
+
+/**
+ * struct gpio_v2_line_request - Information about a request for GPIO lines
+ * @offsets: an array of desired lines, specified by offset index for the
+ * associated GPIO chip
+ * @consumer: a desired consumer label for the selected GPIO lines such as
+ * "my-bitbanged-relay"
+ * @config: requested configuration for the lines.
+ * @num_lines: number of lines requested in this request, i.e. the number
+ * of valid fields in the %GPIO_V2_LINES_MAX sized arrays, set to 1 to
+ * request a single line
+ * @event_buffer_size: a suggested minimum number of line events that the
+ * kernel should buffer. This is only relevant if edge detection is
+ * enabled in the configuration. Note that this is only a suggested value
+ * and the kernel may allocate a larger buffer or cap the size of the
+ * buffer. If this field is zero then the buffer size defaults to a minimum
+ * of @num_lines * 16.
+ * @padding: reserved for future use and must be zero filled
+ * @fd: if successful this field will contain a valid anonymous file handle
+ * after a %GPIO_GET_LINE_IOCTL operation, zero or negative value means
+ * error
+ */
+struct gpio_v2_line_request {
+ __u32 offsets[GPIO_V2_LINES_MAX];
+ char consumer[GPIO_MAX_NAME_SIZE];
+ struct gpio_v2_line_config config;
+ __u32 num_lines;
+ __u32 event_buffer_size;
+ /* Pad to fill implicit padding and reserve space for future use. */
+ __u32 padding[5];
+ __s32 fd;
+};
+
+/**
+ * struct gpio_v2_line_info - Information about a certain GPIO line
+ * @name: the name of this GPIO line, such as the output pin of the line on
+ * the chip, a rail or a pin header name on a board, as specified by the
+ * GPIO chip, may be empty (i.e. name[0] == '\0')
+ * @consumer: a functional name for the consumer of this GPIO line as set
+ * by whatever is using it, will be empty if there is no current user but
+ * may also be empty if the consumer doesn't set this up
+ * @offset: the local offset on this GPIO chip, fill this in when
+ * requesting the line information from the kernel
+ * @num_attrs: the number of attributes in @attrs
+ * @flags: flags for this GPIO line, with values from &enum
+ * gpio_v2_line_flag, such as %GPIO_V2_LINE_FLAG_ACTIVE_LOW,
+ * %GPIO_V2_LINE_FLAG_OUTPUT etc, added together.
+ * @attrs: the configuration attributes associated with the line
+ * @padding: reserved for future use
+ */
+struct gpio_v2_line_info {
+ char name[GPIO_MAX_NAME_SIZE];
+ char consumer[GPIO_MAX_NAME_SIZE];
+ __u32 offset;
+ __u32 num_attrs;
+ __aligned_u64 flags;
+ struct gpio_v2_line_attribute attrs[GPIO_V2_LINE_NUM_ATTRS_MAX];
+ /* Space reserved for future use. */
+ __u32 padding[4];
+};
+
+/**
+ * enum gpio_v2_line_changed_type - &struct gpio_v2_line_changed.event_type
+ * values
+ * @GPIO_V2_LINE_CHANGED_REQUESTED: line has been requested
+ * @GPIO_V2_LINE_CHANGED_RELEASED: line has been released
+ * @GPIO_V2_LINE_CHANGED_CONFIG: line has been reconfigured
+ */
+enum gpio_v2_line_changed_type {
+ GPIO_V2_LINE_CHANGED_REQUESTED = 1,
+ GPIO_V2_LINE_CHANGED_RELEASED = 2,
+ GPIO_V2_LINE_CHANGED_CONFIG = 3,
+};
+
+/**
+ * struct gpio_v2_line_info_changed - Information about a change in status
+ * of a GPIO line
+ * @info: updated line information
+ * @timestamp_ns: estimate of time of status change occurrence, in nanoseconds
+ * @event_type: the type of change with a value from &enum
+ * gpio_v2_line_changed_type
+ * @padding: reserved for future use
+ */
+struct gpio_v2_line_info_changed {
+ struct gpio_v2_line_info info;
+ __aligned_u64 timestamp_ns;
+ __u32 event_type;
+ /* Pad struct to 64-bit boundary and reserve space for future use. */
+ __u32 padding[5];
+};
+
+/**
+ * enum gpio_v2_line_event_id - &struct gpio_v2_line_event.id values
+ * @GPIO_V2_LINE_EVENT_RISING_EDGE: event triggered by a rising edge
+ * @GPIO_V2_LINE_EVENT_FALLING_EDGE: event triggered by a falling edge
+ */
+enum gpio_v2_line_event_id {
+ GPIO_V2_LINE_EVENT_RISING_EDGE = 1,
+ GPIO_V2_LINE_EVENT_FALLING_EDGE = 2,
+};
+
+/**
+ * struct gpio_v2_line_event - The actual event being pushed to userspace
+ * @timestamp_ns: best estimate of time of event occurrence, in nanoseconds.
+ * @id: event identifier with value from &enum gpio_v2_line_event_id
+ * @offset: the offset of the line that triggered the event
+ * @seqno: the sequence number for this event in the sequence of events for
+ * all the lines in this line request
+ * @line_seqno: the sequence number for this event in the sequence of
+ * events on this particular line
+ * @padding: reserved for future use
+ *
+ * By default the @timestamp_ns is read from %CLOCK_MONOTONIC and is
+ * intended to allow the accurate measurement of the time between events.
+ * It does not provide the wall-clock time.
+ *
+ * If the %GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME flag is set then the
+ * @timestamp_ns is read from %CLOCK_REALTIME.
+ */
+struct gpio_v2_line_event {
+ __aligned_u64 timestamp_ns;
+ __u32 id;
+ __u32 offset;
+ __u32 seqno;
+ __u32 line_seqno;
+ /* Space reserved for future use. */
+ __u32 padding[6];
+};
+
+/*
+ * ABI v1
+ *
+ * This version of the ABI is deprecated.
+ * Use the latest version of the ABI, defined above, instead.
+ */
+
/* Informational flags */
#define GPIOLINE_FLAG_KERNEL (1UL << 0) /* Line used by the kernel */
#define GPIOLINE_FLAG_IS_OUT (1UL << 1)
@@ -44,21 +324,56 @@ struct gpiochip_info {
* @flags: various flags for this line
* @name: the name of this GPIO line, such as the output pin of the line on the
* chip, a rail or a pin header name on a board, as specified by the gpio
- * chip, may be NULL
+ * chip, may be empty (i.e. name[0] == '\0')
* @consumer: a functional name for the consumer of this GPIO line as set by
- * whatever is using it, will be NULL if there is no current user but may
- * also be NULL if the consumer doesn't set this up
+ * whatever is using it, will be empty if there is no current user but may
+ * also be empty if the consumer doesn't set this up
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use &struct gpio_v2_line_info instead.
*/
struct gpioline_info {
__u32 line_offset;
__u32 flags;
- char name[32];
- char consumer[32];
+ char name[GPIO_MAX_NAME_SIZE];
+ char consumer[GPIO_MAX_NAME_SIZE];
};
/* Maximum number of requested handles */
#define GPIOHANDLES_MAX 64
+/* Possible line status change events */
+enum {
+ GPIOLINE_CHANGED_REQUESTED = 1,
+ GPIOLINE_CHANGED_RELEASED,
+ GPIOLINE_CHANGED_CONFIG,
+};
+
+/**
+ * struct gpioline_info_changed - Information about a change in status
+ * of a GPIO line
+ * @info: updated line information
+ * @timestamp: estimate of time of status change occurrence, in nanoseconds
+ * @event_type: one of %GPIOLINE_CHANGED_REQUESTED,
+ * %GPIOLINE_CHANGED_RELEASED and %GPIOLINE_CHANGED_CONFIG
+ * @padding: reserved for future use
+ *
+ * The &struct gpioline_info embedded here has 32-bit alignment on its own,
+ * but it works fine with 64-bit alignment too. With its 72 byte size, we can
+ * guarantee there are no implicit holes between it and subsequent members.
+ * The 20-byte padding at the end makes sure we don't add any implicit padding
+ * at the end of the structure on 64-bit architectures.
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use &struct gpio_v2_line_info_changed instead.
+ */
+struct gpioline_info_changed {
+ struct gpioline_info info;
+ __u64 timestamp;
+ __u32 event_type;
+ __u32 padding[5]; /* for future use */
+};
+
/* Linerequest flags */
#define GPIOHANDLE_REQUEST_INPUT (1UL << 0)
#define GPIOHANDLE_REQUEST_OUTPUT (1UL << 1)
@@ -74,13 +389,13 @@ struct gpioline_info {
* @lineoffsets: an array of desired lines, specified by offset index for the
* associated GPIO device
* @flags: desired flags for the desired GPIO lines, such as
- * GPIOHANDLE_REQUEST_OUTPUT, GPIOHANDLE_REQUEST_ACTIVE_LOW etc, OR:ed
+ * %GPIOHANDLE_REQUEST_OUTPUT, %GPIOHANDLE_REQUEST_ACTIVE_LOW etc, added
* together. Note that even if multiple lines are requested, the same flags
* must be applicable to all of them, if you want lines with individual
* flags set, request them one by one. It is possible to select
* a batch of input or output lines, but they must all have the same
* characteristics, i.e. all inputs or all outputs, all active low etc
- * @default_values: if the GPIOHANDLE_REQUEST_OUTPUT is set for a requested
+ * @default_values: if the %GPIOHANDLE_REQUEST_OUTPUT is set for a requested
* line, this specifies the default output value, should be 0 (low) or
* 1 (high), anything else than 0 or 1 will be interpreted as 1 (high)
* @consumer_label: a desired consumer label for the selected GPIO line(s)
@@ -88,14 +403,17 @@ struct gpioline_info {
* @lines: number of lines requested in this request, i.e. the number of
* valid fields in the above arrays, set to 1 to request a single line
* @fd: if successful this field will contain a valid anonymous file handle
- * after a GPIO_GET_LINEHANDLE_IOCTL operation, zero or negative value
+ * after a %GPIO_GET_LINEHANDLE_IOCTL operation, zero or negative value
* means error
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use &struct gpio_v2_line_request instead.
*/
struct gpiohandle_request {
__u32 lineoffsets[GPIOHANDLES_MAX];
__u32 flags;
__u8 default_values[GPIOHANDLES_MAX];
- char consumer_label[32];
+ char consumer_label[GPIO_MAX_NAME_SIZE];
__u32 lines;
int fd;
};
@@ -103,12 +421,15 @@ struct gpiohandle_request {
/**
* struct gpiohandle_config - Configuration for a GPIO handle request
* @flags: updated flags for the requested GPIO lines, such as
- * GPIOHANDLE_REQUEST_OUTPUT, GPIOHANDLE_REQUEST_ACTIVE_LOW etc, OR:ed
+ * %GPIOHANDLE_REQUEST_OUTPUT, %GPIOHANDLE_REQUEST_ACTIVE_LOW etc, added
* together
- * @default_values: if the GPIOHANDLE_REQUEST_OUTPUT is set in flags,
+ * @default_values: if the %GPIOHANDLE_REQUEST_OUTPUT is set in flags,
* this specifies the default output value, should be 0 (low) or
* 1 (high), anything else than 0 or 1 will be interpreted as 1 (high)
* @padding: reserved for future use and should be zero filled
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use &struct gpio_v2_line_config instead.
*/
struct gpiohandle_config {
__u32 flags;
@@ -116,21 +437,19 @@ struct gpiohandle_config {
__u32 padding[4]; /* padding for future use */
};
-#define GPIOHANDLE_SET_CONFIG_IOCTL _IOWR(0xB4, 0x0a, struct gpiohandle_config)
-
/**
* struct gpiohandle_data - Information of values on a GPIO handle
* @values: when getting the state of lines this contains the current
* state of a line, when setting the state of lines these should contain
* the desired target state
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use &struct gpio_v2_line_values instead.
*/
struct gpiohandle_data {
__u8 values[GPIOHANDLES_MAX];
};
-#define GPIOHANDLE_GET_LINE_VALUES_IOCTL _IOWR(0xB4, 0x08, struct gpiohandle_data)
-#define GPIOHANDLE_SET_LINE_VALUES_IOCTL _IOWR(0xB4, 0x09, struct gpiohandle_data)
-
/* Eventrequest flags */
#define GPIOEVENT_REQUEST_RISING_EDGE (1UL << 0)
#define GPIOEVENT_REQUEST_FALLING_EDGE (1UL << 1)
@@ -141,24 +460,27 @@ struct gpiohandle_data {
* @lineoffset: the desired line to subscribe to events from, specified by
* offset index for the associated GPIO device
* @handleflags: desired handle flags for the desired GPIO line, such as
- * GPIOHANDLE_REQUEST_ACTIVE_LOW or GPIOHANDLE_REQUEST_OPEN_DRAIN
+ * %GPIOHANDLE_REQUEST_ACTIVE_LOW or %GPIOHANDLE_REQUEST_OPEN_DRAIN
* @eventflags: desired flags for the desired GPIO event line, such as
- * GPIOEVENT_REQUEST_RISING_EDGE or GPIOEVENT_REQUEST_FALLING_EDGE
+ * %GPIOEVENT_REQUEST_RISING_EDGE or %GPIOEVENT_REQUEST_FALLING_EDGE
* @consumer_label: a desired consumer label for the selected GPIO line(s)
* such as "my-listener"
* @fd: if successful this field will contain a valid anonymous file handle
- * after a GPIO_GET_LINEEVENT_IOCTL operation, zero or negative value
+ * after a %GPIO_GET_LINEEVENT_IOCTL operation, zero or negative value
* means error
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use &struct gpio_v2_line_request instead.
*/
struct gpioevent_request {
__u32 lineoffset;
__u32 handleflags;
__u32 eventflags;
- char consumer_label[32];
+ char consumer_label[GPIO_MAX_NAME_SIZE];
int fd;
};
-/**
+/*
* GPIO event types
*/
#define GPIOEVENT_EVENT_RISING_EDGE 0x01
@@ -168,15 +490,42 @@ struct gpioevent_request {
* struct gpioevent_data - The actual event being pushed to userspace
* @timestamp: best estimate of time of event occurrence, in nanoseconds
* @id: event identifier
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use &struct gpio_v2_line_event instead.
*/
struct gpioevent_data {
__u64 timestamp;
__u32 id;
};
+/*
+ * v1 and v2 ioctl()s
+ */
#define GPIO_GET_CHIPINFO_IOCTL _IOR(0xB4, 0x01, struct gpiochip_info)
+#define GPIO_GET_LINEINFO_UNWATCH_IOCTL _IOWR(0xB4, 0x0C, __u32)
+
+/*
+ * v2 ioctl()s
+ */
+#define GPIO_V2_GET_LINEINFO_IOCTL _IOWR(0xB4, 0x05, struct gpio_v2_line_info)
+#define GPIO_V2_GET_LINEINFO_WATCH_IOCTL _IOWR(0xB4, 0x06, struct gpio_v2_line_info)
+#define GPIO_V2_GET_LINE_IOCTL _IOWR(0xB4, 0x07, struct gpio_v2_line_request)
+#define GPIO_V2_LINE_SET_CONFIG_IOCTL _IOWR(0xB4, 0x0D, struct gpio_v2_line_config)
+#define GPIO_V2_LINE_GET_VALUES_IOCTL _IOWR(0xB4, 0x0E, struct gpio_v2_line_values)
+#define GPIO_V2_LINE_SET_VALUES_IOCTL _IOWR(0xB4, 0x0F, struct gpio_v2_line_values)
+
+/*
+ * v1 ioctl()s
+ *
+ * These ioctl()s are deprecated. Use the v2 equivalent instead.
+ */
#define GPIO_GET_LINEINFO_IOCTL _IOWR(0xB4, 0x02, struct gpioline_info)
#define GPIO_GET_LINEHANDLE_IOCTL _IOWR(0xB4, 0x03, struct gpiohandle_request)
#define GPIO_GET_LINEEVENT_IOCTL _IOWR(0xB4, 0x04, struct gpioevent_request)
+#define GPIOHANDLE_GET_LINE_VALUES_IOCTL _IOWR(0xB4, 0x08, struct gpiohandle_data)
+#define GPIOHANDLE_SET_LINE_VALUES_IOCTL _IOWR(0xB4, 0x09, struct gpiohandle_data)
+#define GPIOHANDLE_SET_CONFIG_IOCTL _IOWR(0xB4, 0x0A, struct gpiohandle_config)
+#define GPIO_GET_LINEINFO_WATCH_IOCTL _IOWR(0xB4, 0x0B, struct gpioline_info)
#endif /* _UAPI_GPIO_H_ */
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index c7d66755d212..2f61298a7b77 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -2,10 +2,13 @@
#ifndef _UAPI_LINUX_GTP_H_
#define _UAPI_LINUX_GTP_H_
+#define GTP_GENL_MCGRP_NAME "gtp"
+
enum gtp_genl_cmds {
GTP_CMD_NEWPDP,
GTP_CMD_DELPDP,
GTP_CMD_GETPDP,
+ GTP_CMD_ECHOREQ,
GTP_CMD_MAX,
};
diff --git a/include/uapi/linux/hid.h b/include/uapi/linux/hid.h
index b34492a87a8a..a4dcb34386e3 100644
--- a/include/uapi/linux/hid.h
+++ b/include/uapi/linux/hid.h
@@ -43,15 +43,29 @@
#define USB_INTERFACE_PROTOCOL_MOUSE 2
/*
+ * HID report types --- Ouch! HID spec says 1 2 3!
+ */
+
+enum hid_report_type {
+ HID_INPUT_REPORT = 0,
+ HID_OUTPUT_REPORT = 1,
+ HID_FEATURE_REPORT = 2,
+
+ HID_REPORT_TYPES,
+};
+
+/*
* HID class requests
*/
-#define HID_REQ_GET_REPORT 0x01
-#define HID_REQ_GET_IDLE 0x02
-#define HID_REQ_GET_PROTOCOL 0x03
-#define HID_REQ_SET_REPORT 0x09
-#define HID_REQ_SET_IDLE 0x0A
-#define HID_REQ_SET_PROTOCOL 0x0B
+enum hid_class_request {
+ HID_REQ_GET_REPORT = 0x01,
+ HID_REQ_GET_IDLE = 0x02,
+ HID_REQ_GET_PROTOCOL = 0x03,
+ HID_REQ_SET_REPORT = 0x09,
+ HID_REQ_SET_IDLE = 0x0A,
+ HID_REQ_SET_PROTOCOL = 0x0B,
+};
/*
* HID class descriptor types
diff --git a/include/uapi/linux/hidraw.h b/include/uapi/linux/hidraw.h
index 4913539e5bcc..33ebad81720a 100644
--- a/include/uapi/linux/hidraw.h
+++ b/include/uapi/linux/hidraw.h
@@ -40,6 +40,12 @@ struct hidraw_devinfo {
#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
#define HIDIOCGRAWUNIQ(len) _IOC(_IOC_READ, 'H', 0x08, len)
+/* The first byte of SINPUT and GINPUT is the report number */
+#define HIDIOCSINPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x09, len)
+#define HIDIOCGINPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0A, len)
+/* The first byte of SOUTPUT and GOUTPUT is the report number */
+#define HIDIOCSOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0B, len)
+#define HIDIOCGOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0C, len)
#define HIDRAW_FIRST_MINOR 0
#define HIDRAW_MAX_DEVICES 64
diff --git a/include/uapi/linux/hsr_netlink.h b/include/uapi/linux/hsr_netlink.h
index c218ef9c35dd..d540ea9bbef4 100644
--- a/include/uapi/linux/hsr_netlink.h
+++ b/include/uapi/linux/hsr_netlink.h
@@ -17,7 +17,7 @@
/* Generic Netlink HSR family definition
*/
-/* attributes */
+/* attributes for HSR or PRP node */
enum {
HSR_A_UNSPEC,
HSR_A_NODE_ADDR,
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index 991b2b7ada7a..aaa502a7bff4 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -26,7 +26,7 @@
#ifndef _UAPI_HYPERV_H
#define _UAPI_HYPERV_H
-#include <linux/uuid.h>
+#include <linux/types.h>
/*
* Framework version for util services.
@@ -90,6 +90,17 @@ struct hv_vss_check_dm_info {
__u32 flags;
} __attribute__((packed));
+/*
+ * struct hv_vss_msg encodes the fields that the Linux VSS
+ * driver accesses. However, FREEZE messages from Hyper-V contain
+ * additional LUN information that Linux doesn't use and are not
+ * represented in struct hv_vss_msg. A received FREEZE message may
+ * be as large as 6,260 bytes, so the driver must allocate at least
+ * that much space, not sizeof(struct hv_vss_msg). Other messages
+ * such as AUTO_RECOVER may be as large as 12,500 bytes. However,
+ * because the Linux VSS driver responds that it doesn't support
+ * auto-recovery, it should not receive such messages.
+ */
struct hv_vss_msg {
union {
struct hv_vss_hdr vss_hdr;
@@ -119,8 +130,8 @@ enum hv_fcopy_op {
struct hv_fcopy_hdr {
__u32 operation;
- uuid_le service_id0; /* currently unused */
- uuid_le service_id1; /* currently unused */
+ __u8 service_id0[16]; /* currently unused */
+ __u8 service_id1[16]; /* currently unused */
} __attribute__((packed));
#define OVER_WRITE 0x1
@@ -219,7 +230,7 @@ struct hv_do_fcopy {
* kernel and user-level daemon communicate using a connector channel.
*
* The user mode component first registers with the
- * the kernel component. Subsequently, the kernel component requests, data
+ * kernel component. Subsequently, the kernel component requests, data
* for the specified keys. In response to this message the user mode component
* fills in the value corresponding to the specified key. We overload the
* sequence field in the cn_msg header to define our KVP message types.
diff --git a/include/uapi/linux/i2c-dev.h b/include/uapi/linux/i2c-dev.h
index 85f8047afcf2..1c4cec4ddd84 100644
--- a/include/uapi/linux/i2c-dev.h
+++ b/include/uapi/linux/i2c-dev.h
@@ -1,25 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
- i2c-dev.h - i2c-bus driver, char device interface
-
- Copyright (C) 1995-97 Simon G. Vogl
- Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA.
-*/
+ * i2c-dev.h - I2C bus char device interface
+ *
+ * Copyright (C) 1995-97 Simon G. Vogl
+ * Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
+ */
#ifndef _UAPI_LINUX_I2C_DEV_H
#define _UAPI_LINUX_I2C_DEV_H
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index f71a1751cacf..92326ebde350 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -1,29 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/* ------------------------------------------------------------------------- */
-/* */
-/* i2c.h - definitions for the i2c-bus interface */
-/* */
-/* ------------------------------------------------------------------------- */
-/* Copyright (C) 1995-2000 Simon G. Vogl
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA. */
-/* ------------------------------------------------------------------------- */
-
-/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
- Frodo Looijaard <frodol@dds.nl> */
+/*
+ * i2c.h - definitions for the I2C bus interface
+ *
+ * Copyright (C) 1995-2000 Simon G. Vogl
+ * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
+ * Frodo Looijaard <frodol@dds.nl>
+ */
#ifndef _UAPI_LINUX_I2C_H
#define _UAPI_LINUX_I2C_H
@@ -32,18 +14,41 @@
/**
* struct i2c_msg - an I2C transaction segment beginning with START
- * @addr: Slave address, either seven or ten bits. When this is a ten
- * bit address, I2C_M_TEN must be set in @flags and the adapter
- * must support I2C_FUNC_10BIT_ADDR.
- * @flags: I2C_M_RD is handled by all adapters. No other flags may be
- * provided unless the adapter exported the relevant I2C_FUNC_*
- * flags through i2c_check_functionality().
- * @len: Number of data bytes in @buf being read from or written to the
- * I2C slave address. For read transactions where I2C_M_RECV_LEN
- * is set, the caller guarantees that this buffer can hold up to
- * 32 bytes in addition to the initial length byte sent by the
- * slave (plus, if used, the SMBus PEC); and this value will be
- * incremented by the number of block data bytes received.
+ *
+ * @addr: Slave address, either 7 or 10 bits. When this is a 10 bit address,
+ * %I2C_M_TEN must be set in @flags and the adapter must support
+ * %I2C_FUNC_10BIT_ADDR.
+ *
+ * @flags:
+ * Supported by all adapters:
+ * %I2C_M_RD: read data (from slave to master). Guaranteed to be 0x0001!
+ *
+ * Optional:
+ * %I2C_M_DMA_SAFE: the buffer of this message is DMA safe. Makes only sense
+ * in kernelspace, because userspace buffers are copied anyway
+ *
+ * Only if I2C_FUNC_10BIT_ADDR is set:
+ * %I2C_M_TEN: this is a 10 bit chip address
+ *
+ * Only if I2C_FUNC_SMBUS_READ_BLOCK_DATA is set:
+ * %I2C_M_RECV_LEN: message length will be first received byte
+ *
+ * Only if I2C_FUNC_NOSTART is set:
+ * %I2C_M_NOSTART: skip repeated start sequence
+
+ * Only if I2C_FUNC_PROTOCOL_MANGLING is set:
+ * %I2C_M_NO_RD_ACK: in a read message, master ACK/NACK bit is skipped
+ * %I2C_M_IGNORE_NAK: treat NACK from client as ACK
+ * %I2C_M_REV_DIR_ADDR: toggles the Rd/Wr bit
+ * %I2C_M_STOP: force a STOP condition after the message
+ *
+ * @len: Number of data bytes in @buf being read from or written to the I2C
+ * slave address. For read transactions where %I2C_M_RECV_LEN is set, the
+ * caller guarantees that this buffer can hold up to %I2C_SMBUS_BLOCK_MAX
+ * bytes in addition to the initial length byte sent by the slave (plus,
+ * if used, the SMBus PEC); and this value will be incremented by the number
+ * of block data bytes received.
+ *
* @buf: The buffer into which data is read, or from which it's written.
*
* An i2c_msg is the low level representation of one segment of an I2C
@@ -60,40 +65,36 @@
* group, it is followed by a STOP. Otherwise it is followed by the next
* @i2c_msg transaction segment, beginning with a (repeated) START.
*
- * Alternatively, when the adapter supports I2C_FUNC_PROTOCOL_MANGLING then
+ * Alternatively, when the adapter supports %I2C_FUNC_PROTOCOL_MANGLING then
* passing certain @flags may have changed those standard protocol behaviors.
* Those flags are only for use with broken/nonconforming slaves, and with
- * adapters which are known to support the specific mangling options they
- * need (one or more of IGNORE_NAK, NO_RD_ACK, NOSTART, and REV_DIR_ADDR).
+ * adapters which are known to support the specific mangling options they need.
*/
struct i2c_msg {
- __u16 addr; /* slave address */
+ __u16 addr;
__u16 flags;
-#define I2C_M_RD 0x0001 /* read data, from slave to master */
- /* I2C_M_RD is guaranteed to be 0x0001! */
-#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
-#define I2C_M_DMA_SAFE 0x0200 /* the buffer of this message is DMA safe */
- /* makes only sense in kernelspace */
- /* userspace buffers are copied anyway */
-#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */
-#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */
-#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */
- __u16 len; /* msg length */
- __u8 *buf; /* pointer to msg data */
+#define I2C_M_RD 0x0001 /* guaranteed to be 0x0001! */
+#define I2C_M_TEN 0x0010 /* use only if I2C_FUNC_10BIT_ADDR */
+#define I2C_M_DMA_SAFE 0x0200 /* use only in kernel space */
+#define I2C_M_RECV_LEN 0x0400 /* use only if I2C_FUNC_SMBUS_READ_BLOCK_DATA */
+#define I2C_M_NO_RD_ACK 0x0800 /* use only if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_IGNORE_NAK 0x1000 /* use only if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_REV_DIR_ADDR 0x2000 /* use only if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART 0x4000 /* use only if I2C_FUNC_NOSTART */
+#define I2C_M_STOP 0x8000 /* use only if I2C_FUNC_PROTOCOL_MANGLING */
+ __u16 len;
+ __u8 *buf;
};
/* To determine what functionality is present */
#define I2C_FUNC_I2C 0x00000001
-#define I2C_FUNC_10BIT_ADDR 0x00000002
-#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_IGNORE_NAK etc. */
+#define I2C_FUNC_10BIT_ADDR 0x00000002 /* required for I2C_M_TEN */
+#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* required for I2C_M_IGNORE_NAK etc. */
#define I2C_FUNC_SMBUS_PEC 0x00000008
-#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */
+#define I2C_FUNC_NOSTART 0x00000010 /* required for I2C_M_NOSTART */
#define I2C_FUNC_SLAVE 0x00000020
-#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
+#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 or later */
#define I2C_FUNC_SMBUS_QUICK 0x00010000
#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000
@@ -102,11 +103,11 @@ struct i2c_msg {
#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000
#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000
#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000
-#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000
+#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 /* required for I2C_M_RECV_LEN */
#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */
#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */
-#define I2C_FUNC_SMBUS_HOST_NOTIFY 0x10000000
+#define I2C_FUNC_SMBUS_HOST_NOTIFY 0x10000000 /* SMBus 2.0 or later */
#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \
I2C_FUNC_SMBUS_WRITE_BYTE)
@@ -128,6 +129,11 @@ struct i2c_msg {
I2C_FUNC_SMBUS_I2C_BLOCK | \
I2C_FUNC_SMBUS_PEC)
+/* if I2C_M_RECV_LEN is also supported */
+#define I2C_FUNC_SMBUS_EMUL_ALL (I2C_FUNC_SMBUS_EMUL | \
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | \
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL)
+
/*
* Data for SMBus Messages
*/
diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h
index 5589eeb791ca..163c0998aec9 100644
--- a/include/uapi/linux/icmp.h
+++ b/include/uapi/linux/icmp.h
@@ -19,6 +19,9 @@
#define _UAPI_LINUX_ICMP_H
#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/if.h>
+#include <linux/in6.h>
#define ICMP_ECHOREPLY 0 /* Echo Reply */
#define ICMP_DEST_UNREACH 3 /* Destination Unreachable */
@@ -65,6 +68,23 @@
#define ICMP_EXC_TTL 0 /* TTL count exceeded */
#define ICMP_EXC_FRAGTIME 1 /* Fragment Reass time exceeded */
+/* Codes for EXT_ECHO (PROBE) */
+#define ICMP_EXT_ECHO 42
+#define ICMP_EXT_ECHOREPLY 43
+#define ICMP_EXT_CODE_MAL_QUERY 1 /* Malformed Query */
+#define ICMP_EXT_CODE_NO_IF 2 /* No such Interface */
+#define ICMP_EXT_CODE_NO_TABLE_ENT 3 /* No such Table Entry */
+#define ICMP_EXT_CODE_MULT_IFS 4 /* Multiple Interfaces Satisfy Query */
+
+/* Constants for EXT_ECHO (PROBE) */
+#define ICMP_EXT_ECHOREPLY_ACTIVE (1 << 2)/* active bit in reply message */
+#define ICMP_EXT_ECHOREPLY_IPV4 (1 << 1)/* ipv4 bit in reply message */
+#define ICMP_EXT_ECHOREPLY_IPV6 1 /* ipv6 bit in reply message */
+#define ICMP_EXT_ECHO_CTYPE_NAME 1
+#define ICMP_EXT_ECHO_CTYPE_INDEX 2
+#define ICMP_EXT_ECHO_CTYPE_ADDR 3
+#define ICMP_AFI_IP 1 /* Address Family Identifier for ipv4 */
+#define ICMP_AFI_IP6 2 /* Address Family Identifier for ipv6 */
struct icmphdr {
__u8 type;
@@ -95,5 +115,48 @@ struct icmp_filter {
__u32 data;
};
+/* RFC 4884 extension struct: one per message */
+struct icmp_ext_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 reserved1:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:4,
+ reserved1:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 reserved2;
+ __sum16 checksum;
+};
+
+/* RFC 4884 extension object header: one for each object */
+struct icmp_extobj_hdr {
+ __be16 length;
+ __u8 class_num;
+ __u8 class_type;
+};
+/* RFC 8335: 2.1 Header for c-type 3 payload */
+struct icmp_ext_echo_ctype3_hdr {
+ __be16 afi;
+ __u8 addrlen;
+ __u8 reserved;
+};
+
+/* RFC 8335: 2.1 Interface Identification Object */
+struct icmp_ext_echo_iio {
+ struct icmp_extobj_hdr extobj_hdr;
+ union {
+ char name[IFNAMSIZ];
+ __be32 ifindex;
+ struct {
+ struct icmp_ext_echo_ctype3_hdr ctype3_hdr;
+ union {
+ __be32 ipv4_addr;
+ struct in6_addr ipv6_addr;
+ } ip_addr;
+ } addr;
+ } ident;
+};
#endif /* _UAPI_LINUX_ICMP_H */
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index 2622b5a3e616..ecaece3af38d 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -68,6 +68,7 @@ struct icmp6hdr {
#define icmp6_mtu icmp6_dataun.un_data32[0]
#define icmp6_unused icmp6_dataun.un_data32[0]
#define icmp6_maxdelay icmp6_dataun.un_data16[0]
+#define icmp6_datagram_len icmp6_dataun.un_data8[0]
#define icmp6_router icmp6_dataun.u_nd_advt.router
#define icmp6_solicited icmp6_dataun.u_nd_advt.solicited
#define icmp6_override icmp6_dataun.u_nd_advt.override
@@ -137,7 +138,11 @@ struct icmp6hdr {
#define ICMPV6_HDR_FIELD 0
#define ICMPV6_UNK_NEXTHDR 1
#define ICMPV6_UNK_OPTION 2
+#define ICMPV6_HDR_INCOMP 3
+/* Codes for EXT_ECHO (PROBE) */
+#define ICMPV6_EXT_ECHO_REQUEST 160
+#define ICMPV6_EXT_ECHO_REPLY 161
/*
* constants for (set|get)sockopt
*/
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index 849ef1515d04..2b9e7feba3f3 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _USR_IDXD_H_
#define _USR_IDXD_H_
@@ -9,6 +9,32 @@
#include <stdint.h>
#endif
+/* Driver command error status */
+enum idxd_scmd_stat {
+ IDXD_SCMD_DEV_ENABLED = 0x80000010,
+ IDXD_SCMD_DEV_NOT_ENABLED = 0x80000020,
+ IDXD_SCMD_WQ_ENABLED = 0x80000021,
+ IDXD_SCMD_DEV_DMA_ERR = 0x80020000,
+ IDXD_SCMD_WQ_NO_GRP = 0x80030000,
+ IDXD_SCMD_WQ_NO_NAME = 0x80040000,
+ IDXD_SCMD_WQ_NO_SVM = 0x80050000,
+ IDXD_SCMD_WQ_NO_THRESH = 0x80060000,
+ IDXD_SCMD_WQ_PORTAL_ERR = 0x80070000,
+ IDXD_SCMD_WQ_RES_ALLOC_ERR = 0x80080000,
+ IDXD_SCMD_PERCPU_ERR = 0x80090000,
+ IDXD_SCMD_DMA_CHAN_ERR = 0x800a0000,
+ IDXD_SCMD_CDEV_ERR = 0x800b0000,
+ IDXD_SCMD_WQ_NO_SWQ_SUPPORT = 0x800c0000,
+ IDXD_SCMD_WQ_NONE_CONFIGURED = 0x800d0000,
+ IDXD_SCMD_WQ_NO_SIZE = 0x800e0000,
+ IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
+ IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
+ IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000,
+};
+
+#define IDXD_SCMD_SOFTERR_MASK 0x80000000
+#define IDXD_SCMD_SOFTERR_SHIFT 16
+
/* Descriptor flags */
#define IDXD_OP_FLAG_FENCE 0x0001
#define IDXD_OP_FLAG_BOF 0x0002
@@ -26,6 +52,14 @@
#define IDXD_OP_FLAG_DRDBK 0x4000
#define IDXD_OP_FLAG_DSTS 0x8000
+/* IAX */
+#define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000
+#define IDXD_OP_FLAG_RD_SRC2_2ND 0x020000
+#define IDXD_OP_FLAG_WR_SRC2_AECS_COMP 0x040000
+#define IDXD_OP_FLAG_WR_SRC2_AECS_OVFL 0x080000
+#define IDXD_OP_FLAG_SRC2_STS 0x100000
+#define IDXD_OP_FLAG_CRC_RFC3720 0x200000
+
/* Opcode */
enum dsa_opcode {
DSA_OPCODE_NOOP = 0,
@@ -47,6 +81,26 @@ enum dsa_opcode {
DSA_OPCODE_CFLUSH = 0x20,
};
+enum iax_opcode {
+ IAX_OPCODE_NOOP = 0,
+ IAX_OPCODE_DRAIN = 2,
+ IAX_OPCODE_MEMMOVE,
+ IAX_OPCODE_DECOMPRESS = 0x42,
+ IAX_OPCODE_COMPRESS,
+ IAX_OPCODE_CRC64,
+ IAX_OPCODE_ZERO_DECOMP_32 = 0x48,
+ IAX_OPCODE_ZERO_DECOMP_16,
+ IAX_OPCODE_ZERO_COMP_32 = 0x4c,
+ IAX_OPCODE_ZERO_COMP_16,
+ IAX_OPCODE_SCAN = 0x50,
+ IAX_OPCODE_SET_MEMBER,
+ IAX_OPCODE_EXTRACT,
+ IAX_OPCODE_SELECT,
+ IAX_OPCODE_RLE_BURST,
+ IAX_OPCODE_FIND_UNIQUE,
+ IAX_OPCODE_EXPAND,
+};
+
/* Completion record status */
enum dsa_completion_status {
DSA_COMP_NONE = 0,
@@ -80,24 +134,40 @@ enum dsa_completion_status {
DSA_COMP_TRANSLATION_FAIL,
};
+enum iax_completion_status {
+ IAX_COMP_NONE = 0,
+ IAX_COMP_SUCCESS,
+ IAX_COMP_PAGE_FAULT_IR = 0x04,
+ IAX_COMP_ANALYTICS_ERROR = 0x0a,
+ IAX_COMP_OUTBUF_OVERFLOW,
+ IAX_COMP_BAD_OPCODE = 0x10,
+ IAX_COMP_INVALID_FLAGS,
+ IAX_COMP_NOZERO_RESERVE,
+ IAX_COMP_INVALID_SIZE,
+ IAX_COMP_OVERLAP_BUFFERS = 0x16,
+ IAX_COMP_INT_HANDLE_INVAL = 0x19,
+ IAX_COMP_CRA_XLAT,
+ IAX_COMP_CRA_ALIGN,
+ IAX_COMP_ADDR_ALIGN,
+ IAX_COMP_PRIV_BAD,
+ IAX_COMP_TRAFFIC_CLASS_CONF,
+ IAX_COMP_PFAULT_RDBA,
+ IAX_COMP_HW_ERR1,
+ IAX_COMP_HW_ERR_DRB,
+ IAX_COMP_TRANSLATION_FAIL,
+ IAX_COMP_PRS_TIMEOUT,
+ IAX_COMP_WATCHDOG,
+ IAX_COMP_INVALID_COMP_FLAG = 0x30,
+ IAX_COMP_INVALID_FILTER_FLAG,
+ IAX_COMP_INVALID_INPUT_SIZE,
+ IAX_COMP_INVALID_NUM_ELEMS,
+ IAX_COMP_INVALID_SRC1_WIDTH,
+ IAX_COMP_INVALID_INVERT_OUT,
+};
+
#define DSA_COMP_STATUS_MASK 0x7f
#define DSA_COMP_STATUS_WRITE 0x80
-struct dsa_batch_desc {
- uint32_t pasid:20;
- uint32_t rsvd:11;
- uint32_t priv:1;
- uint32_t flags:24;
- uint32_t opcode:8;
- uint64_t completion_addr;
- uint64_t desc_list_addr;
- uint64_t rsvd1;
- uint32_t desc_count;
- uint16_t interrupt_handle;
- uint16_t rsvd2;
- uint8_t rsvd3[24];
-} __attribute__((packed));
-
struct dsa_hw_desc {
uint32_t pasid:20;
uint32_t rsvd:11;
@@ -109,6 +179,7 @@ struct dsa_hw_desc {
uint64_t src_addr;
uint64_t rdback_addr;
uint64_t pattern;
+ uint64_t desc_list_addr;
};
union {
uint64_t dst_addr;
@@ -116,14 +187,20 @@ struct dsa_hw_desc {
uint64_t src2_addr;
uint64_t comp_pattern;
};
- uint32_t xfer_size;
+ union {
+ uint32_t xfer_size;
+ uint32_t desc_count;
+ };
uint16_t int_handle;
uint16_t rsvd1;
union {
uint8_t expected_res;
+ /* create delta record */
struct {
uint64_t delta_addr;
uint32_t max_delta_size;
+ uint32_t delt_rsvd;
+ uint8_t expected_res_mask;
};
uint32_t delta_rec_size;
uint64_t dest2;
@@ -171,6 +248,28 @@ struct dsa_hw_desc {
};
} __attribute__((packed));
+struct iax_hw_desc {
+ uint32_t pasid:20;
+ uint32_t rsvd:11;
+ uint32_t priv:1;
+ uint32_t flags:24;
+ uint32_t opcode:8;
+ uint64_t completion_addr;
+ uint64_t src1_addr;
+ uint64_t dst_addr;
+ uint32_t src1_size;
+ uint16_t int_handle;
+ union {
+ uint16_t compr_flags;
+ uint16_t decompr_flags;
+ };
+ uint64_t src2_addr;
+ uint32_t max_dst_size;
+ uint32_t src2_size;
+ uint32_t filter_flags;
+ uint32_t num_inputs;
+} __attribute__((packed));
+
struct dsa_raw_desc {
uint64_t field[8];
} __attribute__((packed));
@@ -189,8 +288,14 @@ struct dsa_completion_record {
uint32_t bytes_completed;
uint64_t fault_addr;
union {
- uint16_t delta_rec_size;
- uint16_t crc_val;
+ /* common record */
+ struct {
+ uint32_t invalid_flags:24;
+ uint32_t rsvd2:8;
+ };
+
+ uint32_t delta_rec_size;
+ uint32_t crc_val;
/* DIF check & strip */
struct {
@@ -225,4 +330,27 @@ struct dsa_raw_completion_record {
uint64_t field[4];
} __attribute__((packed));
+struct iax_completion_record {
+ volatile uint8_t status;
+ uint8_t error_code;
+ uint16_t rsvd;
+ uint32_t bytes_completed;
+ uint64_t fault_addr;
+ uint32_t invalid_flags;
+ uint32_t rsvd2;
+ uint32_t output_size;
+ uint8_t output_bits;
+ uint8_t rsvd3;
+ uint16_t xor_csum;
+ uint32_t crc;
+ uint32_t min;
+ uint32_t max;
+ uint32_t sum;
+ uint64_t rsvd4[2];
+} __attribute__((packed));
+
+struct iax_raw_completion_record {
+ uint64_t field[8];
+} __attribute__((packed));
+
#endif
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index be714cd8c826..797ba2c1562a 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -178,6 +178,7 @@ enum {
enum {
IF_LINK_MODE_DEFAULT,
IF_LINK_MODE_DORMANT, /* limit upward transition to dormant */
+ IF_LINK_MODE_TESTING, /* limit upward transition to testing */
};
/*
diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h
index dfcf3ce0097f..1c392dd95a5e 100644
--- a/include/uapi/linux/if_addr.h
+++ b/include/uapi/linux/if_addr.h
@@ -33,8 +33,9 @@ enum {
IFA_CACHEINFO,
IFA_MULTICAST,
IFA_FLAGS,
- IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */
+ IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */
IFA_TARGET_NETNSID,
+ IFA_PROTO, /* u8, address protocol */
__IFA_MAX,
};
@@ -69,4 +70,10 @@ struct ifa_cacheinfo {
#define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg))
#endif
+/* ifa_proto */
+#define IFAPROT_UNSPEC 0
+#define IFAPROT_KERNEL_LO 1 /* loopback */
+#define IFAPROT_KERNEL_RA 2 /* set by kernel from router announcement */
+#define IFAPROT_KERNEL_LL 3 /* link-local set by kernel */
+
#endif
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index bc2bcdec377b..578b18aab821 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -24,9 +24,25 @@ struct sockaddr_alg {
__u8 salg_name[64];
};
+/*
+ * Linux v4.12 and later removed the 64-byte limit on salg_name[]; it's now an
+ * arbitrary-length field. We had to keep the original struct above for source
+ * compatibility with existing userspace programs, though. Use the new struct
+ * below if support for very long algorithm names is needed. To do this,
+ * allocate 'sizeof(struct sockaddr_alg_new) + strlen(algname) + 1' bytes, and
+ * copy algname (including the null terminator) into salg_name.
+ */
+struct sockaddr_alg_new {
+ __u16 salg_family;
+ __u8 salg_type[14];
+ __u32 salg_feat;
+ __u32 salg_mask;
+ __u8 salg_name[];
+};
+
struct af_alg_iv {
__u32 ivlen;
- __u8 iv[0];
+ __u8 iv[];
};
/* Socket options */
@@ -35,6 +51,7 @@ struct af_alg_iv {
#define ALG_SET_OP 3
#define ALG_SET_AEAD_ASSOCLEN 4
#define ALG_SET_AEAD_AUTHSIZE 5
+#define ALG_SET_DRBG_ENTROPY 6
/* Operations */
#define ALG_OP_DECRYPT 0
diff --git a/include/uapi/linux/if_arcnet.h b/include/uapi/linux/if_arcnet.h
index 683878036d76..b122cfac7128 100644
--- a/include/uapi/linux/if_arcnet.h
+++ b/include/uapi/linux/if_arcnet.h
@@ -60,7 +60,7 @@ struct arc_rfc1201 {
__u8 proto; /* protocol ID field - varies */
__u8 split_flag; /* for use with split packets */
__be16 sequence; /* sequence number */
- __u8 payload[0]; /* space remaining in packet (504 bytes)*/
+ __u8 payload[]; /* space remaining in packet (504 bytes)*/
};
#define RFC1201_HDR_SIZE 4
@@ -69,7 +69,7 @@ struct arc_rfc1201 {
*/
struct arc_rfc1051 {
__u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */
- __u8 payload[0]; /* 507 bytes */
+ __u8 payload[]; /* 507 bytes */
};
#define RFC1051_HDR_SIZE 1
@@ -80,7 +80,7 @@ struct arc_rfc1051 {
struct arc_eth_encap {
__u8 proto; /* Always ARC_P_ETHER */
struct ethhdr eth; /* standard ethernet header (yuck!) */
- __u8 payload[0]; /* 493 bytes */
+ __u8 payload[]; /* 493 bytes */
};
#define ETH_ENCAP_HDR_SIZE 14
diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h
index c3cc5a9e5eaf..4783af9fe520 100644
--- a/include/uapi/linux/if_arp.h
+++ b/include/uapi/linux/if_arp.h
@@ -54,6 +54,7 @@
#define ARPHRD_X25 271 /* CCITT X.25 */
#define ARPHRD_HWX25 272 /* Boards with X.25 in firmware */
#define ARPHRD_CAN 280 /* Controller Area Network */
+#define ARPHRD_MCTP 290
#define ARPHRD_PPP 512
#define ARPHRD_CISCO 513 /* Cisco HDLC */
#define ARPHRD_HDLC ARPHRD_CISCO
diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h
index 45f3750aa861..d174914a837d 100644
--- a/include/uapi/linux/if_bonding.h
+++ b/include/uapi/linux/if_bonding.h
@@ -94,6 +94,7 @@
#define BOND_XMIT_POLICY_LAYER23 2 /* layer 2+3 (IP ^ MAC) */
#define BOND_XMIT_POLICY_ENCAP23 3 /* encapsulated layer 2+3 */
#define BOND_XMIT_POLICY_ENCAP34 4 /* encapsulated layer 3+4 */
+#define BOND_XMIT_POLICY_VLAN_SRCMAC 5 /* vlan + source MAC */
/* 802.3ad port state definitions (43.4.2.2 in the 802.3ad standard) */
#define LACP_STATE_LACP_ACTIVITY 0x1
@@ -152,14 +153,3 @@ enum {
#define BOND_3AD_STAT_MAX (__BOND_3AD_STAT_MAX - 1)
#endif /* _LINUX_IF_BONDING_H */
-
-/*
- * Local variables:
- * version-control: t
- * kept-new-versions: 5
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
-
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 42f7ca38ad80..a86a7e7b811f 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -120,6 +120,9 @@ enum {
IFLA_BRIDGE_MODE,
IFLA_BRIDGE_VLAN_INFO,
IFLA_BRIDGE_VLAN_TUNNEL_INFO,
+ IFLA_BRIDGE_MRP,
+ IFLA_BRIDGE_CFM,
+ IFLA_BRIDGE_MST,
__IFLA_BRIDGE_MAX,
};
#define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1)
@@ -157,6 +160,315 @@ struct bridge_vlan_xstats {
__u32 pad2;
};
+enum {
+ IFLA_BRIDGE_MRP_UNSPEC,
+ IFLA_BRIDGE_MRP_INSTANCE,
+ IFLA_BRIDGE_MRP_PORT_STATE,
+ IFLA_BRIDGE_MRP_PORT_ROLE,
+ IFLA_BRIDGE_MRP_RING_STATE,
+ IFLA_BRIDGE_MRP_RING_ROLE,
+ IFLA_BRIDGE_MRP_START_TEST,
+ IFLA_BRIDGE_MRP_INFO,
+ IFLA_BRIDGE_MRP_IN_ROLE,
+ IFLA_BRIDGE_MRP_IN_STATE,
+ IFLA_BRIDGE_MRP_START_IN_TEST,
+ __IFLA_BRIDGE_MRP_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_MAX (__IFLA_BRIDGE_MRP_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_INSTANCE_UNSPEC,
+ IFLA_BRIDGE_MRP_INSTANCE_RING_ID,
+ IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX,
+ IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX,
+ IFLA_BRIDGE_MRP_INSTANCE_PRIO,
+ __IFLA_BRIDGE_MRP_INSTANCE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_INSTANCE_MAX (__IFLA_BRIDGE_MRP_INSTANCE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_PORT_STATE_UNSPEC,
+ IFLA_BRIDGE_MRP_PORT_STATE_STATE,
+ __IFLA_BRIDGE_MRP_PORT_STATE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_PORT_STATE_MAX (__IFLA_BRIDGE_MRP_PORT_STATE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_PORT_ROLE_UNSPEC,
+ IFLA_BRIDGE_MRP_PORT_ROLE_ROLE,
+ __IFLA_BRIDGE_MRP_PORT_ROLE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_PORT_ROLE_MAX (__IFLA_BRIDGE_MRP_PORT_ROLE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_RING_STATE_UNSPEC,
+ IFLA_BRIDGE_MRP_RING_STATE_RING_ID,
+ IFLA_BRIDGE_MRP_RING_STATE_STATE,
+ __IFLA_BRIDGE_MRP_RING_STATE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_RING_STATE_MAX (__IFLA_BRIDGE_MRP_RING_STATE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_RING_ROLE_UNSPEC,
+ IFLA_BRIDGE_MRP_RING_ROLE_RING_ID,
+ IFLA_BRIDGE_MRP_RING_ROLE_ROLE,
+ __IFLA_BRIDGE_MRP_RING_ROLE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_RING_ROLE_MAX (__IFLA_BRIDGE_MRP_RING_ROLE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_START_TEST_UNSPEC,
+ IFLA_BRIDGE_MRP_START_TEST_RING_ID,
+ IFLA_BRIDGE_MRP_START_TEST_INTERVAL,
+ IFLA_BRIDGE_MRP_START_TEST_MAX_MISS,
+ IFLA_BRIDGE_MRP_START_TEST_PERIOD,
+ IFLA_BRIDGE_MRP_START_TEST_MONITOR,
+ __IFLA_BRIDGE_MRP_START_TEST_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_START_TEST_MAX (__IFLA_BRIDGE_MRP_START_TEST_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_INFO_UNSPEC,
+ IFLA_BRIDGE_MRP_INFO_RING_ID,
+ IFLA_BRIDGE_MRP_INFO_P_IFINDEX,
+ IFLA_BRIDGE_MRP_INFO_S_IFINDEX,
+ IFLA_BRIDGE_MRP_INFO_PRIO,
+ IFLA_BRIDGE_MRP_INFO_RING_STATE,
+ IFLA_BRIDGE_MRP_INFO_RING_ROLE,
+ IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL,
+ IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS,
+ IFLA_BRIDGE_MRP_INFO_TEST_MONITOR,
+ IFLA_BRIDGE_MRP_INFO_I_IFINDEX,
+ IFLA_BRIDGE_MRP_INFO_IN_STATE,
+ IFLA_BRIDGE_MRP_INFO_IN_ROLE,
+ IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL,
+ IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS,
+ __IFLA_BRIDGE_MRP_INFO_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_INFO_MAX (__IFLA_BRIDGE_MRP_INFO_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_IN_STATE_UNSPEC,
+ IFLA_BRIDGE_MRP_IN_STATE_IN_ID,
+ IFLA_BRIDGE_MRP_IN_STATE_STATE,
+ __IFLA_BRIDGE_MRP_IN_STATE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_IN_STATE_MAX (__IFLA_BRIDGE_MRP_IN_STATE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC,
+ IFLA_BRIDGE_MRP_IN_ROLE_RING_ID,
+ IFLA_BRIDGE_MRP_IN_ROLE_IN_ID,
+ IFLA_BRIDGE_MRP_IN_ROLE_ROLE,
+ IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX,
+ __IFLA_BRIDGE_MRP_IN_ROLE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_IN_ROLE_MAX (__IFLA_BRIDGE_MRP_IN_ROLE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC,
+ IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID,
+ IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL,
+ IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS,
+ IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD,
+ __IFLA_BRIDGE_MRP_START_IN_TEST_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_START_IN_TEST_MAX (__IFLA_BRIDGE_MRP_START_IN_TEST_MAX - 1)
+
+struct br_mrp_instance {
+ __u32 ring_id;
+ __u32 p_ifindex;
+ __u32 s_ifindex;
+ __u16 prio;
+};
+
+struct br_mrp_ring_state {
+ __u32 ring_id;
+ __u32 ring_state;
+};
+
+struct br_mrp_ring_role {
+ __u32 ring_id;
+ __u32 ring_role;
+};
+
+struct br_mrp_start_test {
+ __u32 ring_id;
+ __u32 interval;
+ __u32 max_miss;
+ __u32 period;
+ __u32 monitor;
+};
+
+struct br_mrp_in_state {
+ __u32 in_state;
+ __u16 in_id;
+};
+
+struct br_mrp_in_role {
+ __u32 ring_id;
+ __u32 in_role;
+ __u32 i_ifindex;
+ __u16 in_id;
+};
+
+struct br_mrp_start_in_test {
+ __u32 interval;
+ __u32 max_miss;
+ __u32 period;
+ __u16 in_id;
+};
+
+enum {
+ IFLA_BRIDGE_CFM_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_CREATE,
+ IFLA_BRIDGE_CFM_MEP_DELETE,
+ IFLA_BRIDGE_CFM_MEP_CONFIG,
+ IFLA_BRIDGE_CFM_CC_CONFIG,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_ADD,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_REMOVE,
+ IFLA_BRIDGE_CFM_CC_RDI,
+ IFLA_BRIDGE_CFM_CC_CCM_TX,
+ IFLA_BRIDGE_CFM_MEP_CREATE_INFO,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_INFO,
+ IFLA_BRIDGE_CFM_CC_CONFIG_INFO,
+ IFLA_BRIDGE_CFM_CC_RDI_INFO,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_INFO,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_INFO,
+ IFLA_BRIDGE_CFM_MEP_STATUS_INFO,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_INFO,
+ __IFLA_BRIDGE_CFM_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MAX (__IFLA_BRIDGE_CFM_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_MEP_CREATE_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE,
+ IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN,
+ IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION,
+ IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX,
+ __IFLA_BRIDGE_CFM_MEP_CREATE_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MEP_CREATE_MAX (__IFLA_BRIDGE_CFM_MEP_CREATE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_MEP_DELETE_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_DELETE_INSTANCE,
+ __IFLA_BRIDGE_CFM_MEP_DELETE_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MEP_DELETE_MAX (__IFLA_BRIDGE_CFM_MEP_DELETE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_MEP_CONFIG_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID,
+ __IFLA_BRIDGE_CFM_MEP_CONFIG_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MEP_CONFIG_MAX (__IFLA_BRIDGE_CFM_MEP_CONFIG_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_CONFIG_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE,
+ IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL,
+ IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID,
+ __IFLA_BRIDGE_CFM_CC_CONFIG_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_CONFIG_MAX (__IFLA_BRIDGE_CFM_CC_CONFIG_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_PEER_MEPID,
+ __IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX (__IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_RDI_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_RDI_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_RDI_RDI,
+ __IFLA_BRIDGE_CFM_CC_RDI_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_RDI_MAX (__IFLA_BRIDGE_CFM_CC_RDI_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_CCM_TX_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE,
+ __IFLA_BRIDGE_CFM_CC_CCM_TX_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_CCM_TX_MAX (__IFLA_BRIDGE_CFM_CC_CCM_TX_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_MEP_STATUS_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE,
+ IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN,
+ IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN,
+ IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN,
+ __IFLA_BRIDGE_CFM_MEP_STATUS_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MEP_STATUS_MAX (__IFLA_BRIDGE_CFM_MEP_STATUS_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN,
+ __IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX (__IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MST_UNSPEC,
+ IFLA_BRIDGE_MST_ENTRY,
+ __IFLA_BRIDGE_MST_MAX,
+};
+#define IFLA_BRIDGE_MST_MAX (__IFLA_BRIDGE_MST_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MST_ENTRY_UNSPEC,
+ IFLA_BRIDGE_MST_ENTRY_MSTI,
+ IFLA_BRIDGE_MST_ENTRY_STATE,
+ __IFLA_BRIDGE_MST_ENTRY_MAX,
+};
+#define IFLA_BRIDGE_MST_ENTRY_MAX (__IFLA_BRIDGE_MST_ENTRY_MAX - 1)
+
struct bridge_stp_xstats {
__u64 transition_blk;
__u64 transition_fwd;
@@ -174,15 +486,31 @@ struct br_vlan_msg {
__u32 ifindex;
};
+enum {
+ BRIDGE_VLANDB_DUMP_UNSPEC,
+ BRIDGE_VLANDB_DUMP_FLAGS,
+ __BRIDGE_VLANDB_DUMP_MAX,
+};
+#define BRIDGE_VLANDB_DUMP_MAX (__BRIDGE_VLANDB_DUMP_MAX - 1)
+
+/* flags used in BRIDGE_VLANDB_DUMP_FLAGS attribute to affect dumps */
+#define BRIDGE_VLANDB_DUMPF_STATS (1 << 0) /* Include stats in the dump */
+#define BRIDGE_VLANDB_DUMPF_GLOBAL (1 << 1) /* Dump global vlan options only */
+
/* Bridge vlan RTM attributes
* [BRIDGE_VLANDB_ENTRY] = {
* [BRIDGE_VLANDB_ENTRY_INFO]
* ...
* }
+ * [BRIDGE_VLANDB_GLOBAL_OPTIONS] = {
+ * [BRIDGE_VLANDB_GOPTS_ID]
+ * ...
+ * }
*/
enum {
BRIDGE_VLANDB_UNSPEC,
BRIDGE_VLANDB_ENTRY,
+ BRIDGE_VLANDB_GLOBAL_OPTIONS,
__BRIDGE_VLANDB_MAX,
};
#define BRIDGE_VLANDB_MAX (__BRIDGE_VLANDB_MAX - 1)
@@ -192,10 +520,71 @@ enum {
BRIDGE_VLANDB_ENTRY_INFO,
BRIDGE_VLANDB_ENTRY_RANGE,
BRIDGE_VLANDB_ENTRY_STATE,
+ BRIDGE_VLANDB_ENTRY_TUNNEL_INFO,
+ BRIDGE_VLANDB_ENTRY_STATS,
+ BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
__BRIDGE_VLANDB_ENTRY_MAX,
};
#define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
+/* [BRIDGE_VLANDB_ENTRY] = {
+ * [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = {
+ * [BRIDGE_VLANDB_TINFO_ID]
+ * ...
+ * }
+ * }
+ */
+enum {
+ BRIDGE_VLANDB_TINFO_UNSPEC,
+ BRIDGE_VLANDB_TINFO_ID,
+ BRIDGE_VLANDB_TINFO_CMD,
+ __BRIDGE_VLANDB_TINFO_MAX,
+};
+#define BRIDGE_VLANDB_TINFO_MAX (__BRIDGE_VLANDB_TINFO_MAX - 1)
+
+/* [BRIDGE_VLANDB_ENTRY] = {
+ * [BRIDGE_VLANDB_ENTRY_STATS] = {
+ * [BRIDGE_VLANDB_STATS_RX_BYTES]
+ * ...
+ * }
+ * ...
+ * }
+ */
+enum {
+ BRIDGE_VLANDB_STATS_UNSPEC,
+ BRIDGE_VLANDB_STATS_RX_BYTES,
+ BRIDGE_VLANDB_STATS_RX_PACKETS,
+ BRIDGE_VLANDB_STATS_TX_BYTES,
+ BRIDGE_VLANDB_STATS_TX_PACKETS,
+ BRIDGE_VLANDB_STATS_PAD,
+ __BRIDGE_VLANDB_STATS_MAX,
+};
+#define BRIDGE_VLANDB_STATS_MAX (__BRIDGE_VLANDB_STATS_MAX - 1)
+
+enum {
+ BRIDGE_VLANDB_GOPTS_UNSPEC,
+ BRIDGE_VLANDB_GOPTS_ID,
+ BRIDGE_VLANDB_GOPTS_RANGE,
+ BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING,
+ BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION,
+ BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION,
+ BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT,
+ BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT,
+ BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL,
+ BRIDGE_VLANDB_GOPTS_PAD,
+ BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERIER,
+ BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE,
+ BRIDGE_VLANDB_GOPTS_MSTI,
+ __BRIDGE_VLANDB_GOPTS_MAX
+};
+#define BRIDGE_VLANDB_GOPTS_MAX (__BRIDGE_VLANDB_GOPTS_MAX - 1)
+
/* Bridge multicast database attributes
* [MDBA_MDB] = {
* [MDBA_MDB_ENTRY] = {
@@ -238,10 +627,33 @@ enum {
enum {
MDBA_MDB_EATTR_UNSPEC,
MDBA_MDB_EATTR_TIMER,
+ MDBA_MDB_EATTR_SRC_LIST,
+ MDBA_MDB_EATTR_GROUP_MODE,
+ MDBA_MDB_EATTR_SOURCE,
+ MDBA_MDB_EATTR_RTPROT,
__MDBA_MDB_EATTR_MAX
};
#define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1)
+/* per mdb entry source */
+enum {
+ MDBA_MDB_SRCLIST_UNSPEC,
+ MDBA_MDB_SRCLIST_ENTRY,
+ __MDBA_MDB_SRCLIST_MAX
+};
+#define MDBA_MDB_SRCLIST_MAX (__MDBA_MDB_SRCLIST_MAX - 1)
+
+/* per mdb entry per source attributes
+ * these are embedded in MDBA_MDB_SRCLIST_ENTRY
+ */
+enum {
+ MDBA_MDB_SRCATTR_UNSPEC,
+ MDBA_MDB_SRCATTR_ADDRESS,
+ MDBA_MDB_SRCATTR_TIMER,
+ __MDBA_MDB_SRCATTR_MAX
+};
+#define MDBA_MDB_SRCATTR_MAX (__MDBA_MDB_SRCATTR_MAX - 1)
+
/* multicast router types */
enum {
MDB_RTR_TYPE_DISABLED,
@@ -262,6 +674,9 @@ enum {
MDBA_ROUTER_PATTR_UNSPEC,
MDBA_ROUTER_PATTR_TIMER,
MDBA_ROUTER_PATTR_TYPE,
+ MDBA_ROUTER_PATTR_INET_TIMER,
+ MDBA_ROUTER_PATTR_INET6_TIMER,
+ MDBA_ROUTER_PATTR_VID,
__MDBA_ROUTER_PATTR_MAX
};
#define MDBA_ROUTER_PATTR_MAX (__MDBA_ROUTER_PATTR_MAX - 1)
@@ -278,12 +693,15 @@ struct br_mdb_entry {
__u8 state;
#define MDB_FLAGS_OFFLOAD (1 << 0)
#define MDB_FLAGS_FAST_LEAVE (1 << 1)
+#define MDB_FLAGS_STAR_EXCL (1 << 2)
+#define MDB_FLAGS_BLOCKED (1 << 3)
__u8 flags;
__u16 vid;
struct {
union {
__be32 ip4;
struct in6_addr ip6;
+ unsigned char mac_addr[ETH_ALEN];
} u;
__be16 proto;
} addr;
@@ -292,10 +710,23 @@ struct br_mdb_entry {
enum {
MDBA_SET_ENTRY_UNSPEC,
MDBA_SET_ENTRY,
+ MDBA_SET_ENTRY_ATTRS,
__MDBA_SET_ENTRY_MAX,
};
#define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
+/* [MDBA_SET_ENTRY_ATTRS] = {
+ * [MDBE_ATTR_xxx]
+ * ...
+ * }
+ */
+enum {
+ MDBE_ATTR_UNSPEC,
+ MDBE_ATTR_SOURCE,
+ __MDBE_ATTR_MAX,
+};
+#define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1)
+
/* Embedded inside LINK_XSTATS_TYPE_BRIDGE */
enum {
BRIDGE_XSTATS_UNSPEC,
@@ -337,12 +768,15 @@ struct br_mcast_stats {
/* bridge boolean options
* BR_BOOLOPT_NO_LL_LEARN - disable learning from link-local packets
+ * BR_BOOLOPT_MCAST_VLAN_SNOOPING - control vlan multicast snooping
*
* IMPORTANT: if adding a new option do not forget to handle
* it in br_boolopt_toggle/get and bridge sysfs
*/
enum br_boolopt_id {
BR_BOOLOPT_NO_LL_LEARN,
+ BR_BOOLOPT_MCAST_VLAN_SNOOPING,
+ BR_BOOLOPT_MST_ENABLE,
BR_BOOLOPT_MAX
};
@@ -355,4 +789,17 @@ struct br_boolopt_multi {
__u32 optval;
__u32 optmask;
};
+
+enum {
+ BRIDGE_QUERIER_UNSPEC,
+ BRIDGE_QUERIER_IP_ADDRESS,
+ BRIDGE_QUERIER_IP_PORT,
+ BRIDGE_QUERIER_IP_OTHER_TIMER,
+ BRIDGE_QUERIER_PAD,
+ BRIDGE_QUERIER_IPV6_ADDRESS,
+ BRIDGE_QUERIER_IPV6_PORT,
+ BRIDGE_QUERIER_IPV6_OTHER_TIMER,
+ __BRIDGE_QUERIER_MAX
+};
+#define BRIDGE_QUERIER_MAX (__BRIDGE_QUERIER_MAX - 1)
#endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index f6ceb2e63d1e..69e0457eb200 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -86,18 +86,23 @@
* over Ethernet
*/
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#define ETH_P_PROFINET 0x8892 /* PROFINET */
+#define ETH_P_REALTEK 0x8899 /* Multiple proprietary protocols */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
+#define ETH_P_ETHERCAT 0x88A4 /* EtherCAT */
#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */
#define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */
#define ETH_P_TIPC 0x88CA /* TIPC */
#define ETH_P_LLDP 0x88CC /* Link Layer Discovery Protocol */
+#define ETH_P_MRP 0x88E3 /* Media Redundancy Protocol */
#define ETH_P_MACSEC 0x88E5 /* 802.1ae MACsec */
#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */
#define ETH_P_MVRP 0x88F5 /* 802.1Q MVRP */
#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
#define ETH_P_NCSI 0x88F8 /* NCSI protocol */
#define ETH_P_PRP 0x88FB /* IEC 62439-3 PRP/HSRv0 */
+#define ETH_P_CFM 0x8902 /* Connectivity Fault Management */
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
#define ETH_P_IBOE 0x8915 /* Infiniband over Ethernet */
#define ETH_P_TDLS 0x890D /* TDLS */
@@ -111,10 +116,11 @@
#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_DSA_8021Q 0xDADB /* Fake VLAN Header for DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_DSA_A5PSW 0xE001 /* A5PSW Tag Value [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */
#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
-#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value
+#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is more than this value
* then the frame is Ethernet II. Else it is 802.3 */
/*
@@ -132,6 +138,7 @@
#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
#define ETH_P_CAN 0x000C /* CAN: Controller Area Network */
#define ETH_P_CANFD 0x000D /* CANFD: CAN flexible data rate*/
+#define ETH_P_CANXL 0x000E /* CANXL: eXtended frame Length */
#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */
@@ -149,6 +156,9 @@
#define ETH_P_MAP 0x00F9 /* Qualcomm multiplexing and
* aggregation protocol
*/
+#define ETH_P_MCTP 0x00FA /* Management component transport
+ * protocol packets
+ */
/*
* This is an Ethernet frame header.
diff --git a/include/uapi/linux/if_fddi.h b/include/uapi/linux/if_fddi.h
index 7239aa9c0766..8df2d9934bcd 100644
--- a/include/uapi/linux/if_fddi.h
+++ b/include/uapi/linux/if_fddi.h
@@ -9,7 +9,7 @@
* Version: @(#)if_fddi.h 1.0.3 Oct 6 2018
*
* Author: Lawrence V. Stefani, <stefani@yahoo.com>
- * Maintainer: Maciej W. Rozycki, <macro@linux-mips.org>
+ * Maintainer: Maciej W. Rozycki, <macro@orcam.me.uk>
*
* if_fddi.h is based on previous if_ether.h and if_tr.h work by
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
diff --git a/include/uapi/linux/if_frad.h b/include/uapi/linux/if_frad.h
deleted file mode 100644
index 3c6ee85f6262..000000000000
--- a/include/uapi/linux/if_frad.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are
- * created for each DLCI associated with a FRAD. The FRAD driver
- * is not truly a network device, but the lower level device
- * handler. This allows other FRAD manufacturers to use the DLCI
- * code, including its RFC1490 encapsulation alongside the current
- * implementation for the Sangoma cards.
- *
- * Version: @(#)if_ifrad.h 0.15 31 Mar 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan changed structure defs (packed)
- * re-arranged flags
- * added DLCI_RET vars
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _UAPI_FRAD_H_
-#define _UAPI_FRAD_H_
-
-#include <linux/if.h>
-
-/* Structures and constants associated with the DLCI device driver */
-
-struct dlci_add
-{
- char devname[IFNAMSIZ];
- short dlci;
-};
-
-#define DLCI_GET_CONF (SIOCDEVPRIVATE + 2)
-#define DLCI_SET_CONF (SIOCDEVPRIVATE + 3)
-
-/*
- * These are related to the Sangoma SDLA and should remain in order.
- * Code within the SDLA module is based on the specifics of this
- * structure. Change at your own peril.
- */
-struct dlci_conf {
- short flags;
- short CIR_fwd;
- short Bc_fwd;
- short Be_fwd;
- short CIR_bwd;
- short Bc_bwd;
- short Be_bwd;
-
-/* these are part of the status read */
- short Tc_fwd;
- short Tc_bwd;
- short Tf_max;
- short Tb_max;
-
-/* add any new fields here above is a mirror of sdla_dlci_conf */
-};
-
-#define DLCI_GET_SLAVE (SIOCDEVPRIVATE + 4)
-
-/* configuration flags for DLCI */
-#define DLCI_IGNORE_CIR_OUT 0x0001
-#define DLCI_ACCOUNT_CIR_IN 0x0002
-#define DLCI_BUFFER_IF 0x0008
-
-#define DLCI_VALID_FLAGS 0x000B
-
-/* defines for the actual Frame Relay hardware */
-#define FRAD_GET_CONF (SIOCDEVPRIVATE)
-#define FRAD_SET_CONF (SIOCDEVPRIVATE + 1)
-
-#define FRAD_LAST_IOCTL FRAD_SET_CONF
-
-/*
- * Based on the setup for the Sangoma SDLA. If changes are
- * necessary to this structure, a routine will need to be
- * added to that module to copy fields.
- */
-struct frad_conf
-{
- short station;
- short flags;
- short kbaud;
- short clocking;
- short mtu;
- short T391;
- short T392;
- short N391;
- short N392;
- short N393;
- short CIR_fwd;
- short Bc_fwd;
- short Be_fwd;
- short CIR_bwd;
- short Bc_bwd;
- short Be_bwd;
-
-/* Add new fields here, above is a mirror of the sdla_conf */
-
-};
-
-#define FRAD_STATION_CPE 0x0000
-#define FRAD_STATION_NODE 0x0001
-
-#define FRAD_TX_IGNORE_CIR 0x0001
-#define FRAD_RX_ACCOUNT_CIR 0x0002
-#define FRAD_DROP_ABORTED 0x0004
-#define FRAD_BUFFERIF 0x0008
-#define FRAD_STATS 0x0010
-#define FRAD_MCI 0x0100
-#define FRAD_AUTODLCI 0x8000
-#define FRAD_VALID_FLAGS 0x811F
-
-#define FRAD_CLOCK_INT 0x0001
-#define FRAD_CLOCK_EXT 0x0000
-
-
-#endif /* _UAPI_FRAD_H_ */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 024af2d1d0af..5e7a1041df3a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -7,24 +7,23 @@
/* This struct should be in sync with struct rtnl_link_stats64 */
struct rtnl_link_stats {
- __u32 rx_packets; /* total packets received */
- __u32 tx_packets; /* total packets transmitted */
- __u32 rx_bytes; /* total bytes received */
- __u32 tx_bytes; /* total bytes transmitted */
- __u32 rx_errors; /* bad packets received */
- __u32 tx_errors; /* packet transmit problems */
- __u32 rx_dropped; /* no space in linux buffers */
- __u32 tx_dropped; /* no space available in linux */
- __u32 multicast; /* multicast packets received */
+ __u32 rx_packets;
+ __u32 tx_packets;
+ __u32 rx_bytes;
+ __u32 tx_bytes;
+ __u32 rx_errors;
+ __u32 tx_errors;
+ __u32 rx_dropped;
+ __u32 tx_dropped;
+ __u32 multicast;
__u32 collisions;
-
/* detailed rx_errors: */
__u32 rx_length_errors;
- __u32 rx_over_errors; /* receiver ring buff overflow */
- __u32 rx_crc_errors; /* recved pkt with crc error */
- __u32 rx_frame_errors; /* recv'd frame alignment error */
- __u32 rx_fifo_errors; /* recv'r fifo overrun */
- __u32 rx_missed_errors; /* receiver missed packet */
+ __u32 rx_over_errors;
+ __u32 rx_crc_errors;
+ __u32 rx_frame_errors;
+ __u32 rx_fifo_errors;
+ __u32 rx_missed_errors;
/* detailed tx_errors */
__u32 tx_aborted_errors;
@@ -37,29 +36,204 @@ struct rtnl_link_stats {
__u32 rx_compressed;
__u32 tx_compressed;
- __u32 rx_nohandler; /* dropped, no handler found */
+ __u32 rx_nohandler;
};
-/* The main device statistics structure */
+/**
+ * struct rtnl_link_stats64 - The main device statistics structure.
+ *
+ * @rx_packets: Number of good packets received by the interface.
+ * For hardware interfaces counts all good packets received from the device
+ * by the host, including packets which host had to drop at various stages
+ * of processing (even in the driver).
+ *
+ * @tx_packets: Number of packets successfully transmitted.
+ * For hardware interfaces counts packets which host was able to successfully
+ * hand over to the device, which does not necessarily mean that packets
+ * had been successfully transmitted out of the device, only that device
+ * acknowledged it copied them out of host memory.
+ *
+ * @rx_bytes: Number of good received bytes, corresponding to @rx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @rx_errors: Total number of bad packets received on this network device.
+ * This counter must include events counted by @rx_length_errors,
+ * @rx_crc_errors, @rx_frame_errors and other errors not otherwise
+ * counted.
+ *
+ * @tx_errors: Total number of transmit problems.
+ * This counter must include events counter by @tx_aborted_errors,
+ * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors,
+ * @tx_window_errors and other errors not otherwise counted.
+ *
+ * @rx_dropped: Number of packets received but not processed,
+ * e.g. due to lack of resources or unsupported protocol.
+ * For hardware interfaces this counter may include packets discarded
+ * due to L2 address filtering but should not include packets dropped
+ * by the device due to buffer exhaustion which are counted separately in
+ * @rx_missed_errors (since procfs folds those two counters together).
+ *
+ * @tx_dropped: Number of packets dropped on their way to transmission,
+ * e.g. due to lack of resources.
+ *
+ * @multicast: Multicast packets received.
+ * For hardware interfaces this statistic is commonly calculated
+ * at the device level (unlike @rx_packets) and therefore may include
+ * packets which did not reach the host.
+ *
+ * For IEEE 802.3 devices this counter may be equivalent to:
+ *
+ * - 30.3.1.1.21 aMulticastFramesReceivedOK
+ *
+ * @collisions: Number of collisions during packet transmissions.
+ *
+ * @rx_length_errors: Number of packets dropped due to invalid length.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to a sum
+ * of the following attributes:
+ *
+ * - 30.3.1.1.23 aInRangeLengthErrors
+ * - 30.3.1.1.24 aOutOfRangeLengthField
+ * - 30.3.1.1.25 aFrameTooLongErrors
+ *
+ * @rx_over_errors: Receiver FIFO overflow event counter.
+ *
+ * Historically the count of overflow events. Such events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * The recommended interpretation for high speed interfaces is -
+ * number of packets dropped because they did not fit into buffers
+ * provided by the host, e.g. packets larger than MTU or next buffer
+ * in the ring was not available for a scatter transfer.
+ *
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * This statistics was historically used interchangeably with
+ * @rx_fifo_errors.
+ *
+ * This statistic corresponds to hardware events and is not commonly used
+ * on software devices.
+ *
+ * @rx_crc_errors: Number of packets received with a CRC error.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.6 aFrameCheckSequenceErrors
+ *
+ * @rx_frame_errors: Receiver frame alignment errors.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to:
+ *
+ * - 30.3.1.1.7 aAlignmentErrors
+ *
+ * @rx_fifo_errors: Receiver FIFO error counter.
+ *
+ * Historically the count of overflow events. Those events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * This statistics was used interchangeably with @rx_over_errors.
+ * Not recommended for use in drivers for high speed interfaces.
+ *
+ * This statistic is used on software devices, e.g. to count software
+ * packet queue overflow (can) or sequencing errors (GRE).
+ *
+ * @rx_missed_errors: Count of packets missed by the host.
+ * Folded into the "drop" counter in `/proc/net/dev`.
+ *
+ * Counts number of packets dropped by the device due to lack
+ * of buffer space. This usually indicates that the host interface
+ * is slower than the network interface, or host is not keeping up
+ * with the receive packet rate.
+ *
+ * This statistic corresponds to hardware events and is not used
+ * on software devices.
+ *
+ * @tx_aborted_errors:
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ * For IEEE 802.3 devices capable of half-duplex operation this counter
+ * must be equivalent to:
+ *
+ * - 30.3.1.1.11 aFramesAbortedDueToXSColls
+ *
+ * High speed interfaces may use this counter as a general device
+ * discard counter.
+ *
+ * @tx_carrier_errors: Number of frame transmission errors due to loss
+ * of carrier during transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.13 aCarrierSenseErrors
+ *
+ * @tx_fifo_errors: Number of frame transmission errors due to device
+ * FIFO underrun / underflow. This condition occurs when the device
+ * begins transmission of a frame but is unable to deliver the
+ * entire frame to the transmitter in time for transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for
+ * old half-duplex Ethernet.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices possibly equivalent to:
+ *
+ * - 30.3.2.1.4 aSQETestErrors
+ *
+ * @tx_window_errors: Number of frame transmission errors due
+ * to late collisions (for Ethernet - after the first 64B of transmission).
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.10 aLateCollisions
+ *
+ * @rx_compressed: Number of correctly received compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @tx_compressed: Number of transmitted compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @rx_nohandler: Number of packets received on the interface
+ * but dropped by the networking stack because the device is
+ * not designated to receive packets (e.g. backup link in a bond).
+ *
+ * @rx_otherhost_dropped: Number of packets dropped due to mismatch
+ * in destination MAC address.
+ */
struct rtnl_link_stats64 {
- __u64 rx_packets; /* total packets received */
- __u64 tx_packets; /* total packets transmitted */
- __u64 rx_bytes; /* total bytes received */
- __u64 tx_bytes; /* total bytes transmitted */
- __u64 rx_errors; /* bad packets received */
- __u64 tx_errors; /* packet transmit problems */
- __u64 rx_dropped; /* no space in linux buffers */
- __u64 tx_dropped; /* no space available in linux */
- __u64 multicast; /* multicast packets received */
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 rx_errors;
+ __u64 tx_errors;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+ __u64 multicast;
__u64 collisions;
/* detailed rx_errors: */
__u64 rx_length_errors;
- __u64 rx_over_errors; /* receiver ring buff overflow */
- __u64 rx_crc_errors; /* recved pkt with crc error */
- __u64 rx_frame_errors; /* recv'd frame alignment error */
- __u64 rx_fifo_errors; /* recv'r fifo overrun */
- __u64 rx_missed_errors; /* receiver missed packet */
+ __u64 rx_over_errors;
+ __u64 rx_crc_errors;
+ __u64 rx_frame_errors;
+ __u64 rx_fifo_errors;
+ __u64 rx_missed_errors;
/* detailed tx_errors */
__u64 tx_aborted_errors;
@@ -71,8 +245,24 @@ struct rtnl_link_stats64 {
/* for cslip etc */
__u64 rx_compressed;
__u64 tx_compressed;
+ __u64 rx_nohandler;
- __u64 rx_nohandler; /* dropped, no handler found */
+ __u64 rx_otherhost_dropped;
+};
+
+/* Subset of link stats useful for in-HW collection. Meaning of the fields is as
+ * for struct rtnl_link_stats64.
+ */
+struct rtnl_hw_stats64 {
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 rx_errors;
+ __u64 tx_errors;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+ __u64 multicast;
};
/* The struct should be in sync with struct ifmap */
@@ -170,12 +360,33 @@ enum {
IFLA_PROP_LIST,
IFLA_ALT_IFNAME, /* Alternative ifname */
IFLA_PERM_ADDRESS,
+ IFLA_PROTO_DOWN_REASON,
+
+ /* device (sysfs) name as parent, used instead
+ * of IFLA_LINK where there's no parent netdev
+ */
+ IFLA_PARENT_DEV_NAME,
+ IFLA_PARENT_DEV_BUS_NAME,
+ IFLA_GRO_MAX_SIZE,
+ IFLA_TSO_MAX_SIZE,
+ IFLA_TSO_MAX_SEGS,
+ IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */
+
__IFLA_MAX
};
#define IFLA_MAX (__IFLA_MAX - 1)
+enum {
+ IFLA_PROTO_DOWN_REASON_UNSPEC,
+ IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */
+ IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */
+
+ __IFLA_PROTO_DOWN_REASON_CNT,
+ IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1
+};
+
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
@@ -230,6 +441,7 @@ enum {
IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */
IFLA_INET6_TOKEN, /* device token */
IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */
+ IFLA_INET6_RA_MTU, /* mtu carried in the RA message */
__IFLA_INET6_MAX
};
@@ -292,6 +504,7 @@ enum {
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
IFLA_BR_MULTI_BOOLOPT,
+ IFLA_BR_MCAST_QUERIER_STATE,
__IFLA_BR_MAX,
};
@@ -343,6 +556,11 @@ enum {
IFLA_BRPORT_NEIGH_SUPPRESS,
IFLA_BRPORT_ISOLATED,
IFLA_BRPORT_BACKUP_PORT,
+ IFLA_BRPORT_MRP_RING_OPEN,
+ IFLA_BRPORT_MRP_IN_OPEN,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
+ IFLA_BRPORT_LOCKED,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -407,6 +625,8 @@ enum {
IFLA_MACVLAN_MACADDR,
IFLA_MACVLAN_MACADDR_DATA,
IFLA_MACVLAN_MACADDR_COUNT,
+ IFLA_MACVLAN_BC_QUEUE_LEN,
+ IFLA_MACVLAN_BC_QUEUE_LEN_USED,
__IFLA_MACVLAN_MAX,
};
@@ -428,6 +648,7 @@ enum macvlan_macaddr_mode {
};
#define MACVLAN_FLAG_NOPROMISC 1
+#define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */
/* VRF section */
enum {
@@ -463,6 +684,7 @@ enum {
IFLA_MACSEC_REPLAY_PROTECT,
IFLA_MACSEC_VALIDATION,
IFLA_MACSEC_PAD,
+ IFLA_MACSEC_OFFLOAD,
__IFLA_MACSEC_MAX,
};
@@ -473,6 +695,7 @@ enum {
IFLA_XFRM_UNSPEC,
IFLA_XFRM_LINK,
IFLA_XFRM_IF_ID,
+ IFLA_XFRM_COLLECT_METADATA,
__IFLA_XFRM_MAX
};
@@ -489,6 +712,7 @@ enum macsec_validation_type {
enum macsec_offload {
MACSEC_OFFLOAD_OFF = 0,
MACSEC_OFFLOAD_PHY = 1,
+ MACSEC_OFFLOAD_MAC = 2,
__MACSEC_OFFLOAD_END,
MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
};
@@ -513,7 +737,55 @@ enum ipvlan_mode {
#define IPVLAN_F_PRIVATE 0x01
#define IPVLAN_F_VEPA 0x02
+/* Tunnel RTM header */
+struct tunnel_msg {
+ __u8 family;
+ __u8 flags;
+ __u16 reserved2;
+ __u32 ifindex;
+};
+
/* VXLAN section */
+
+/* include statistics in the dump */
+#define TUNNEL_MSG_FLAG_STATS 0x01
+
+#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS
+
+/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */
+enum {
+ VNIFILTER_ENTRY_STATS_UNSPEC,
+ VNIFILTER_ENTRY_STATS_RX_BYTES,
+ VNIFILTER_ENTRY_STATS_RX_PKTS,
+ VNIFILTER_ENTRY_STATS_RX_DROPS,
+ VNIFILTER_ENTRY_STATS_RX_ERRORS,
+ VNIFILTER_ENTRY_STATS_TX_BYTES,
+ VNIFILTER_ENTRY_STATS_TX_PKTS,
+ VNIFILTER_ENTRY_STATS_TX_DROPS,
+ VNIFILTER_ENTRY_STATS_TX_ERRORS,
+ VNIFILTER_ENTRY_STATS_PAD,
+ __VNIFILTER_ENTRY_STATS_MAX
+};
+#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1)
+
+enum {
+ VXLAN_VNIFILTER_ENTRY_UNSPEC,
+ VXLAN_VNIFILTER_ENTRY_START,
+ VXLAN_VNIFILTER_ENTRY_END,
+ VXLAN_VNIFILTER_ENTRY_GROUP,
+ VXLAN_VNIFILTER_ENTRY_GROUP6,
+ VXLAN_VNIFILTER_ENTRY_STATS,
+ __VXLAN_VNIFILTER_ENTRY_MAX
+};
+#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1)
+
+enum {
+ VXLAN_VNIFILTER_UNSPEC,
+ VXLAN_VNIFILTER_ENTRY,
+ __VXLAN_VNIFILTER_MAX
+};
+#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1)
+
enum {
IFLA_VXLAN_UNSPEC,
IFLA_VXLAN_ID,
@@ -545,6 +817,7 @@ enum {
IFLA_VXLAN_GPE,
IFLA_VXLAN_TTL_INHERIT,
IFLA_VXLAN_DF,
+ IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -578,6 +851,7 @@ enum {
IFLA_GENEVE_LABEL,
IFLA_GENEVE_TTL_INHERIT,
IFLA_GENEVE_DF,
+ IFLA_GENEVE_INNER_PROTO_INHERIT,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
@@ -590,6 +864,18 @@ enum ifla_geneve_df {
GENEVE_DF_MAX = __GENEVE_DF_END - 1,
};
+/* Bareudp section */
+enum {
+ IFLA_BAREUDP_UNSPEC,
+ IFLA_BAREUDP_PORT,
+ IFLA_BAREUDP_ETHERTYPE,
+ IFLA_BAREUDP_SRCPORT_MIN,
+ IFLA_BAREUDP_MULTIPROTO_MODE,
+ __IFLA_BAREUDP_MAX
+};
+
+#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1)
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
@@ -611,6 +897,8 @@ enum {
IFLA_GTP_FD1,
IFLA_GTP_PDP_HASHSIZE,
IFLA_GTP_ROLE,
+ IFLA_GTP_CREATE_SOCKETS,
+ IFLA_GTP_RESTART_COUNT,
__IFLA_GTP_MAX,
};
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
@@ -647,6 +935,9 @@ enum {
IFLA_BOND_AD_ACTOR_SYSTEM,
IFLA_BOND_TLB_DYNAMIC_LB,
IFLA_BOND_PEER_NOTIF_DELAY,
+ IFLA_BOND_AD_LACP_ACTIVE,
+ IFLA_BOND_MISSED_MAX,
+ IFLA_BOND_NS_IP6_TARGET,
__IFLA_BOND_MAX,
};
@@ -674,6 +965,7 @@ enum {
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
+ IFLA_BOND_SLAVE_PRIO,
__IFLA_BOND_SLAVE_MAX,
};
@@ -891,7 +1183,14 @@ enum {
#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
-/* HSR section */
+/* HSR/PRP section, both uses same interface */
+
+/* Different redundancy protocols for hsr device */
+enum {
+ HSR_PROTOCOL_HSR,
+ HSR_PROTOCOL_PRP,
+ HSR_PROTOCOL_MAX,
+};
enum {
IFLA_HSR_UNSPEC,
@@ -901,6 +1200,9 @@ enum {
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
IFLA_HSR_SEQ_NR,
IFLA_HSR_VERSION, /* HSR version */
+ IFLA_HSR_PROTOCOL, /* Indicate different protocol than
+ * HSR. For example PRP.
+ */
__IFLA_HSR_MAX,
};
@@ -933,6 +1235,17 @@ enum {
#define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1))
+enum {
+ IFLA_STATS_GETSET_UNSPEC,
+ IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with
+ * a filter mask for the corresponding group.
+ */
+ IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */
+ __IFLA_STATS_GETSET_MAX,
+};
+
+#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1)
+
/* These are embedded into IFLA_STATS_LINK_XSTATS:
* [IFLA_STATS_LINK_XSTATS]
* -> [LINK_XSTATS_TYPE_xxx]
@@ -950,21 +1263,33 @@ enum {
enum {
IFLA_OFFLOAD_XSTATS_UNSPEC,
IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
+ IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */
+ IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */
__IFLA_OFFLOAD_XSTATS_MAX
};
#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
+enum {
+ IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC,
+ IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */
+ IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */
+ __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX,
+};
+#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \
+ (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1)
+
/* XDP section */
#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
#define XDP_FLAGS_SKB_MODE (1U << 1)
#define XDP_FLAGS_DRV_MODE (1U << 2)
#define XDP_FLAGS_HW_MODE (1U << 3)
+#define XDP_FLAGS_REPLACE (1U << 4)
#define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \
XDP_FLAGS_DRV_MODE | \
XDP_FLAGS_HW_MODE)
#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \
- XDP_FLAGS_MODES)
+ XDP_FLAGS_MODES | XDP_FLAGS_REPLACE)
/* These are stored into IFLA_XDP_ATTACHED on dump. */
enum {
@@ -984,6 +1309,7 @@ enum {
IFLA_XDP_DRV_PROG_ID,
IFLA_XDP_SKB_PROG_ID,
IFLA_XDP_HW_PROG_ID,
+ IFLA_XDP_EXPECTED_FD,
__IFLA_XDP_MAX,
};
@@ -1023,6 +1349,8 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
+#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)
+#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5)
enum {
IFLA_RMNET_UNSPEC,
@@ -1038,4 +1366,24 @@ struct ifla_rmnet_flags {
__u32 mask;
};
+/* MCTP section */
+
+enum {
+ IFLA_MCTP_UNSPEC,
+ IFLA_MCTP_NET,
+ __IFLA_MCTP_MAX,
+};
+
+#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
+
+/* DSA section */
+
+enum {
+ IFLA_DSA_UNSPEC,
+ IFLA_DSA_MASTER,
+ __IFLA_DSA_MAX,
+};
+
+#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 1d63c43c38cc..d5b6d1f37353 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -22,9 +22,13 @@
#define MACSEC_KEYID_LEN 16
-/* cipher IDs as per IEEE802.1AEbn-2011 */
+#define MACSEC_SALT_LEN 12
+
+/* cipher IDs as per IEEE802.1AE-2018 (Table 14-1) */
#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
+#define MACSEC_CIPHER_ID_GCM_AES_XPN_128 0x0080C20001000003ULL
+#define MACSEC_CIPHER_ID_GCM_AES_XPN_256 0x0080C20001000004ULL
/* deprecated cipher ID for GCM-AES-128 */
#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
@@ -88,11 +92,13 @@ enum macsec_sa_attrs {
MACSEC_SA_ATTR_UNSPEC,
MACSEC_SA_ATTR_AN, /* config/dump, u8 0..3 */
MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
- MACSEC_SA_ATTR_PN, /* config/dump, u32 */
+ MACSEC_SA_ATTR_PN, /* config/dump, u32/u64 (u64 if XPN) */
MACSEC_SA_ATTR_KEY, /* config, data */
MACSEC_SA_ATTR_KEYID, /* config/dump, 128-bit */
MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */
MACSEC_SA_ATTR_PAD,
+ MACSEC_SA_ATTR_SSCI, /* config/dump, u32 - XPN only */
+ MACSEC_SA_ATTR_SALT, /* config, 96-bit - XPN only */
__MACSEC_SA_ATTR_END,
NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 3d884d68eb30..c07caf7b40db 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_IF_PACKET_H
#define __LINUX_IF_PACKET_H
+#include <asm/byteorder.h>
#include <linux/types.h>
struct sockaddr_pkt {
@@ -296,6 +297,17 @@ struct packet_mreq {
unsigned char mr_address[8];
};
+struct fanout_args {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 id;
+ __u16 type_flags;
+#else
+ __u16 type_flags;
+ __u16 id;
+#endif
+ __u32 max_num_members;
+};
+
#define PACKET_MR_MULTICAST 0
#define PACKET_MR_PROMISC 1
#define PACKET_MR_ALLMULTI 2
diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h
index 060b4d1f3129..a91044328bc9 100644
--- a/include/uapi/linux/if_pppol2tp.h
+++ b/include/uapi/linux/if_pppol2tp.h
@@ -75,7 +75,7 @@ struct pppol2tpv3in6_addr {
};
/* Socket options:
- * DEBUG - bitmask of debug message categories
+ * DEBUG - bitmask of debug message categories (not used)
* SENDSEQ - 0 => don't send packets with sequence numbers
* 1 => send packets with sequence numbers
* RECVSEQ - 0 => receive packet sequence numbers are optional
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index e7a693c28f16..9abd80dcc46f 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -122,7 +122,7 @@ struct sockaddr_pppol2tpv3in6 {
struct pppoe_tag {
__be16 tag_type;
__be16 tag_len;
- char tag_data[0];
+ char tag_data[];
} __attribute__ ((packed));
/* Tag identifiers */
@@ -150,7 +150,7 @@ struct pppoe_hdr {
__u8 code;
__be16 sid;
__be16 length;
- struct pppoe_tag tag[0];
+ struct pppoe_tag tag[];
} __packed;
/* Length of entire PPPoE + PPP header */
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 454ae31b93c7..b6d7b868f290 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -67,6 +67,8 @@
#define IFF_TAP 0x0002
#define IFF_NAPI 0x0010
#define IFF_NAPI_FRAGS 0x0020
+/* Used in TUNSETIFF to bring up tun/tap without carrier */
+#define IFF_NO_CARRIER 0x0040
#define IFF_NO_PI 0x1000
/* This flag has no real effect */
#define IFF_ONE_QUEUE 0x2000
@@ -108,7 +110,7 @@ struct tun_pi {
struct tun_filter {
__u16 flags; /* TUN_FLT_ flags see above */
__u16 count; /* Number of addresses */
- __u8 addr[0][ETH_ALEN];
+ __u8 addr[][ETH_ALEN];
};
#endif /* _UAPI__IF_TUN_H */
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 7d9105533c7b..102119628ff5 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -176,8 +176,10 @@ enum {
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
+#define TUNNEL_GTP_OPT __cpu_to_be16(0x8000)
#define TUNNEL_OPTIONS_PRESENT \
- (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
+ (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \
+ TUNNEL_GTP_OPT)
#endif /* _UAPI_IF_TUNNEL_H_ */
diff --git a/include/uapi/linux/if_x25.h b/include/uapi/linux/if_x25.h
index 5d962448345f..3a5938e38370 100644
--- a/include/uapi/linux/if_x25.h
+++ b/include/uapi/linux/if_x25.h
@@ -18,7 +18,7 @@
#include <linux/types.h>
-/* Documentation/networking/x25-iface.txt */
+/* Documentation/networking/x25-iface.rst */
#define X25_IFACE_DATA 0x00
#define X25_IFACE_CONNECT 0x01
#define X25_IFACE_DISCONNECT 0x02
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index be328c59389d..a78a8096f4ce 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -73,9 +73,12 @@ struct xdp_umem_reg {
};
struct xdp_statistics {
- __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_dropped; /* Dropped for other reasons */
__u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
__u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 rx_ring_full; /* Dropped due to rx ring being full */
+ __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
+ __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
};
struct xdp_options {
diff --git a/include/uapi/linux/igmp.h b/include/uapi/linux/igmp.h
index 90c28bc466c6..5930f2437cd1 100644
--- a/include/uapi/linux/igmp.h
+++ b/include/uapi/linux/igmp.h
@@ -48,7 +48,7 @@ struct igmpv3_grec {
__u8 grec_auxwords;
__be16 grec_nsrcs;
__be32 grec_mca;
- __be32 grec_src[0];
+ __be32 grec_src[];
};
struct igmpv3_report {
@@ -57,7 +57,7 @@ struct igmpv3_report {
__sum16 csum;
__be16 resv2;
__be16 ngrec;
- struct igmpv3_grec grec[0];
+ struct igmpv3_grec grec[];
};
struct igmpv3_query {
@@ -78,7 +78,7 @@ struct igmpv3_query {
#endif
__u8 qqic;
__be16 nsrcs;
- __be32 srcs[0];
+ __be32 srcs[];
};
#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */
diff --git a/include/uapi/linux/iio/buffer.h b/include/uapi/linux/iio/buffer.h
new file mode 100644
index 000000000000..13939032b3f6
--- /dev/null
+++ b/include/uapi/linux/iio/buffer.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* industrial I/O buffer definitions needed both in and out of kernel
+ */
+
+#ifndef _UAPI_IIO_BUFFER_H_
+#define _UAPI_IIO_BUFFER_H_
+
+#define IIO_BUFFER_GET_FD_IOCTL _IOWR('i', 0x91, int)
+
+#endif /* _UAPI_IIO_BUFFER_H_ */
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index fdd81affca4b..c79f2f046a0b 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -94,6 +94,13 @@ enum iio_modifier {
IIO_MOD_PM10,
IIO_MOD_ETHANOL,
IIO_MOD_H2,
+ IIO_MOD_O2,
+ IIO_MOD_LINEAR_X,
+ IIO_MOD_LINEAR_Y,
+ IIO_MOD_LINEAR_Z,
+ IIO_MOD_PITCH,
+ IIO_MOD_YAW,
+ IIO_MOD_ROLL,
};
enum iio_event_type {
@@ -103,6 +110,8 @@ enum iio_event_type {
IIO_EV_TYPE_THRESH_ADAPTIVE,
IIO_EV_TYPE_MAG_ADAPTIVE,
IIO_EV_TYPE_CHANGE,
+ IIO_EV_TYPE_MAG_REFERENCED,
+ IIO_EV_TYPE_GESTURE,
};
enum iio_event_direction {
@@ -110,7 +119,8 @@ enum iio_event_direction {
IIO_EV_DIR_RISING,
IIO_EV_DIR_FALLING,
IIO_EV_DIR_NONE,
+ IIO_EV_DIR_SINGLETAP,
+ IIO_EV_DIR_DOUBLETAP,
};
#endif /* _UAPI_IIO_TYPES_H_ */
-
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 8533bf07450f..07a4cb149305 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -20,6 +20,7 @@
#define _UAPI_LINUX_IN_H
#include <linux/types.h>
+#include <linux/stddef.h>
#include <linux/libc-compat.h>
#include <linux/socket.h>
@@ -68,6 +69,8 @@ enum {
#define IPPROTO_PIM IPPROTO_PIM
IPPROTO_COMP = 108, /* Compression Header Protocol */
#define IPPROTO_COMP IPPROTO_COMP
+ IPPROTO_L2TP = 115, /* Layer 2 Tunnelling Protocol */
+#define IPPROTO_L2TP IPPROTO_L2TP
IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
@@ -123,6 +126,7 @@ struct in_addr {
#define IP_CHECKSUM 23
#define IP_BIND_ADDRESS_NO_PORT 24
#define IP_RECVFRAGSIZE 25
+#define IP_RECVERR_RFC4884 26
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
@@ -134,7 +138,7 @@ struct in_addr {
* this socket to prevent accepting spoofed ones.
*/
#define IP_PMTUDISC_INTERFACE 4
-/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get
* fragmented if they exeed the interface mtu
*/
#define IP_PMTUDISC_OMIT 5
@@ -191,7 +195,10 @@ struct ip_msfilter {
__be32 imsf_interface;
__u32 imsf_fmode;
__u32 imsf_numsrc;
- __be32 imsf_slist[1];
+ union {
+ __be32 imsf_slist[1];
+ __DECLARE_FLEX_ARRAY(__be32, imsf_slist_flex);
+ };
};
#define IP_MSFILTER_SIZE(numsrc) \
@@ -210,11 +217,22 @@ struct group_source_req {
};
struct group_filter {
- __u32 gf_interface; /* interface index */
- struct __kernel_sockaddr_storage gf_group; /* multicast address */
- __u32 gf_fmode; /* filter mode */
- __u32 gf_numsrc; /* number of sources */
- struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+ union {
+ struct {
+ __u32 gf_interface_aux; /* interface index */
+ struct __kernel_sockaddr_storage gf_group_aux; /* multicast address */
+ __u32 gf_fmode_aux; /* filter mode */
+ __u32 gf_numsrc_aux; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+ };
+ struct {
+ __u32 gf_interface; /* interface index */
+ struct __kernel_sockaddr_storage gf_group; /* multicast address */
+ __u32 gf_fmode; /* filter mode */
+ __u32 gf_numsrc; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist_flex[]; /* interface index */
+ };
+ };
};
#define GROUP_FILTER_SIZE(numsrc) \
@@ -288,6 +306,9 @@ struct sockaddr_in {
/* Address indicating an error return. */
#define INADDR_NONE ((unsigned long int) 0xffffffff)
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
+
/* Network number for local host loopback. */
#define IN_LOOPBACKNET 127
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 9f2273a08356..c4c53a9ab959 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -145,6 +145,7 @@ struct in6_flowlabel_req {
#define IPV6_TLV_PADN 1
#define IPV6_TLV_ROUTERALERT 5
#define IPV6_TLV_CALIPSO 7 /* RFC 5570 */
+#define IPV6_TLV_IOAM 49 /* TEMPORARY IANA allocation for IOAM */
#define IPV6_TLV_JUMBO 194
#define IPV6_TLV_HAO 201 /* home address option */
@@ -179,6 +180,7 @@ struct in6_flowlabel_req {
#define IPV6_LEAVE_ANYCAST 28
#define IPV6_MULTICAST_ALL 29
#define IPV6_ROUTER_ALERT_ISOLATE 30
+#define IPV6_RECVERR_RFC4884 31
/* IPV6_MTU_DISCOVER values */
#define IPV6_PMTUDISC_DONT 0
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index a1ff345b3f33..50655de04c9b 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -64,9 +64,12 @@ struct inet_diag_req_raw {
enum {
INET_DIAG_REQ_NONE,
INET_DIAG_REQ_BYTECODE,
+ INET_DIAG_REQ_SK_BPF_STORAGES,
+ INET_DIAG_REQ_PROTOCOL,
+ __INET_DIAG_REQ_MAX,
};
-#define INET_DIAG_REQ_MAX INET_DIAG_REQ_BYTECODE
+#define INET_DIAG_REQ_MAX (__INET_DIAG_REQ_MAX - 1)
/* Bytecode is sequence of 4 byte commands followed by variable arguments.
* All the commands identified by "code" are conditional jumps forward:
@@ -94,13 +97,14 @@ enum {
INET_DIAG_BC_MARK_COND,
INET_DIAG_BC_S_EQ,
INET_DIAG_BC_D_EQ,
+ INET_DIAG_BC_CGROUP_COND, /* u64 cgroup v2 ID */
};
struct inet_diag_hostcond {
__u8 family;
__u8 prefix_len;
int port;
- __be32 addr[0];
+ __be32 addr[];
};
struct inet_diag_markcond {
@@ -154,6 +158,9 @@ enum {
INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
INET_DIAG_MD5SIG,
INET_DIAG_ULP_INFO,
+ INET_DIAG_SK_BPF_STORAGES,
+ INET_DIAG_CGROUP_ID,
+ INET_DIAG_SOCKOPT,
__INET_DIAG_MAX,
};
@@ -163,6 +170,7 @@ enum {
INET_ULP_INFO_UNSPEC,
INET_ULP_INFO_NAME,
INET_ULP_INFO_TLS,
+ INET_ULP_INFO_MPTCP,
__INET_ULP_INFO_MAX,
};
#define INET_ULP_INFO_MAX (__INET_ULP_INFO_MAX - 1)
@@ -176,6 +184,23 @@ struct inet_diag_meminfo {
__u32 idiag_tmem;
};
+/* INET_DIAG_SOCKOPT */
+
+struct inet_diag_sockopt {
+ __u8 recverr:1,
+ is_icsk:1,
+ freebind:1,
+ hdrincl:1,
+ mc_loop:1,
+ transparent:1,
+ mc_all:1,
+ nodefrag:1;
+ __u8 bind_address_no_port:1,
+ recverr_rfc4884:1,
+ defer_connect:1,
+ unused:5;
+};
+
/* INET_DIAG_VEGASINFO */
struct tcpvegas_info {
diff --git a/include/uapi/linux/inotify.h b/include/uapi/linux/inotify.h
index 884b4846b630..b3e165853d5b 100644
--- a/include/uapi/linux/inotify.h
+++ b/include/uapi/linux/inotify.h
@@ -23,7 +23,7 @@ struct inotify_event {
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
- char name[0]; /* stub for possible name */
+ char name[]; /* stub for possible name */
};
/* the following are legal, implemented events that user-space can watch for */
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 0f1db1cccc3f..7ad931a32970 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Input event codes
*
@@ -278,7 +278,8 @@
#define KEY_PAUSECD 201
#define KEY_PROG3 202
#define KEY_PROG4 203
-#define KEY_DASHBOARD 204 /* AL Dashboard */
+#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */
+#define KEY_DASHBOARD KEY_ALL_APPLICATIONS
#define KEY_SUSPEND 205
#define KEY_CLOSE 206 /* AC Close */
#define KEY_PLAY 207
@@ -515,6 +516,9 @@
#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
#define KEY_IMAGES 0x1ba /* AL Image Browser */
+#define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */
+#define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */
+#define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1
@@ -542,6 +546,7 @@
#define KEY_FN_F 0x1e2
#define KEY_FN_S 0x1e3
#define KEY_FN_B 0x1e4
+#define KEY_FN_RIGHT_SHIFT 0x1e5
#define KEY_BRL_DOT1 0x1f1
#define KEY_BRL_DOT2 0x1f2
@@ -607,6 +612,8 @@
#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */
#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */
+#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
@@ -652,6 +659,30 @@
/* Electronic privacy screen control */
#define KEY_PRIVACY_SCREEN_TOGGLE 0x279
+/* Select an area of screen to be copied */
+#define KEY_SELECTIVE_SCREENSHOT 0x27a
+
+/* Move the focus to the next or previous user controllable element within a UI container */
+#define KEY_NEXT_ELEMENT 0x27b
+#define KEY_PREVIOUS_ELEMENT 0x27c
+
+/* Toggle Autopilot engagement */
+#define KEY_AUTOPILOT_ENGAGE_TOGGLE 0x27d
+
+/* Shortcut Keys */
+#define KEY_MARK_WAYPOINT 0x27e
+#define KEY_SOS 0x27f
+#define KEY_NAV_CHART 0x280
+#define KEY_FISHING_CHART 0x281
+#define KEY_SINGLE_RANGE_RADAR 0x282
+#define KEY_DUAL_RANGE_RADAR 0x283
+#define KEY_RADAR_OVERLAY 0x284
+#define KEY_TRADITIONAL_SONAR 0x285
+#define KEY_CLEARVU_SONAR 0x286
+#define KEY_SIDEVU_SONAR 0x287
+#define KEY_NAV_INFO 0x288
+#define KEY_BRIGHTNESS_MENU 0x289
+
/*
* Some keyboards have keys which do not have a defined meaning, these keys
* are intended to be programmed / bound to macros by the user. For most
@@ -831,6 +862,7 @@
#define ABS_TOOL_WIDTH 0x1c
#define ABS_VOLUME 0x20
+#define ABS_PROFILE 0x21
#define ABS_MISC 0x28
@@ -885,7 +917,8 @@
#define SW_LINEIN_INSERT 0x0d /* set = inserted */
#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
#define SW_PEN_INSERTED 0x0f /* set = pen inserted */
-#define SW_MAX 0x0f
+#define SW_MACHINE_COVER 0x10 /* set = cover closed */
+#define SW_MAX 0x10
#define SW_CNT (SW_MAX+1)
/*
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 9a61c28ed3ae..2557eb7b0561 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -78,13 +78,16 @@ struct input_id {
* Note that input core does not clamp reported values to the
* [minimum, maximum] limits, such task is left to userspace.
*
- * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z)
- * is reported in units per millimeter (units/mm), resolution
- * for rotational axes (ABS_RX, ABS_RY, ABS_RZ) is reported
- * in units per radian.
+ * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z,
+ * ABS_MT_POSITION_X, ABS_MT_POSITION_Y) is reported in units
+ * per millimeter (units/mm), resolution for rotational axes
+ * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian.
+ * The resolution for the size axes (ABS_MT_TOUCH_MAJOR,
+ * ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR)
+ * is reported in units per millimeter (units/mm).
* When INPUT_PROP_ACCELEROMETER is set the resolution changes.
* The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in
- * in units per g (units/g) and in units per degree per second
+ * units per g (units/g) and in units per degree per second
* (units/deg/s) for rotational axes (ABS_RX, ABS_RY, ABS_RZ).
*/
struct input_absinfo {
@@ -271,6 +274,7 @@ struct input_mask {
#define BUS_RMI 0x1D
#define BUS_CEC 0x1E
#define BUS_INTEL_ISHTP 0x1F
+#define BUS_AMD_SFH 0x20
/*
* MT_TOOL types
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 3f7961c1c243..2df3225b562f 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/*
* Header file for the io_uring interface.
*
@@ -10,6 +10,11 @@
#include <linux/fs.h>
#include <linux/types.h>
+#include <linux/time_types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
/*
* IO submission data structure (Submission Queue Entry)
@@ -22,13 +27,21 @@ struct io_uring_sqe {
union {
__u64 off; /* offset into file */
__u64 addr2;
+ struct {
+ __u32 cmd_op;
+ __u32 __pad1;
+ };
+ };
+ union {
+ __u64 addr; /* pointer to buffer or iovecs */
+ __u64 splice_off_in;
};
- __u64 addr; /* pointer to buffer or iovecs */
__u32 len; /* buffer size or number of iovecs */
union {
__kernel_rwf_t rw_flags;
__u32 fsync_flags;
- __u16 poll_events;
+ __u16 poll_events; /* compatibility */
+ __u32 poll32_events; /* word-reversed for BE */
__u32 sync_range_flags;
__u32 msg_flags;
__u32 timeout_flags;
@@ -37,25 +50,62 @@ struct io_uring_sqe {
__u32 open_flags;
__u32 statx_flags;
__u32 fadvise_advice;
+ __u32 splice_flags;
+ __u32 rename_flags;
+ __u32 unlink_flags;
+ __u32 hardlink_flags;
+ __u32 xattr_flags;
+ __u32 msg_ring_flags;
+ __u32 uring_cmd_flags;
};
__u64 user_data; /* data to be passed back at completion time */
+ /* pack this to avoid bogus arm OABI complaints */
+ union {
+ /* index into fixed buffers, if used */
+ __u16 buf_index;
+ /* for grouped buffer selection */
+ __u16 buf_group;
+ } __attribute__((packed));
+ /* personality to use, if used */
+ __u16 personality;
union {
+ __s32 splice_fd_in;
+ __u32 file_index;
struct {
- /* index into fixed buffers, if used */
- __u16 buf_index;
- /* personality to use, if used */
- __u16 personality;
+ __u16 addr_len;
+ __u16 __pad3[1];
};
- __u64 __pad2[3];
+ };
+ union {
+ struct {
+ __u64 addr3;
+ __u64 __pad2[1];
+ };
+ /*
+ * If the ring is initialized with IORING_SETUP_SQE128, then
+ * this field is used for 80 bytes of arbitrary command data
+ */
+ __u8 cmd[0];
};
};
+/*
+ * If sqe->file_index is set to this for opcodes that instantiate a new
+ * direct descriptor (like openat/openat2/accept), then io_uring will allocate
+ * an available direct descriptor instead of having the application pass one
+ * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
+ * if the space is full.
+ */
+#define IORING_FILE_INDEX_ALLOC (~0U)
+
enum {
IOSQE_FIXED_FILE_BIT,
IOSQE_IO_DRAIN_BIT,
IOSQE_IO_LINK_BIT,
IOSQE_IO_HARDLINK_BIT,
IOSQE_ASYNC_BIT,
+ IOSQE_BUFFER_SELECT_BIT,
+ IOSQE_CQE_SKIP_SUCCESS_BIT,
};
/*
@@ -71,6 +121,10 @@ enum {
#define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT)
/* always go async */
#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
+/* select buffer from sqe->buf_group */
+#define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
+/* don't post CQE if request succeeded */
+#define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT)
/*
* io_uring_setup() flags
@@ -81,8 +135,37 @@ enum {
#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
#define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
+#define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */
+#define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */
+/*
+ * Cooperative task running. When requests complete, they often require
+ * forcing the submitter to transition to the kernel to complete. If this
+ * flag is set, work will be done when the task transitions anyway, rather
+ * than force an inter-processor interrupt reschedule. This avoids interrupting
+ * a task running in userspace, and saves an IPI.
+ */
+#define IORING_SETUP_COOP_TASKRUN (1U << 8)
+/*
+ * If COOP_TASKRUN is set, get notified if task work is available for
+ * running and a kernel transition would be needed to run it. This sets
+ * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
+ */
+#define IORING_SETUP_TASKRUN_FLAG (1U << 9)
+#define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
+#define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */
+/*
+ * Only one task is allowed to submit requests
+ */
+#define IORING_SETUP_SINGLE_ISSUER (1U << 12)
-enum {
+/*
+ * Defer running task work to get events.
+ * Rather than running bits of task work whenever the task transitions
+ * try to do it just before it is needed.
+ */
+#define IORING_SETUP_DEFER_TASKRUN (1U << 13)
+
+enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
IORING_OP_WRITEV,
@@ -113,12 +196,39 @@ enum {
IORING_OP_RECV,
IORING_OP_OPENAT2,
IORING_OP_EPOLL_CTL,
+ IORING_OP_SPLICE,
+ IORING_OP_PROVIDE_BUFFERS,
+ IORING_OP_REMOVE_BUFFERS,
+ IORING_OP_TEE,
+ IORING_OP_SHUTDOWN,
+ IORING_OP_RENAMEAT,
+ IORING_OP_UNLINKAT,
+ IORING_OP_MKDIRAT,
+ IORING_OP_SYMLINKAT,
+ IORING_OP_LINKAT,
+ IORING_OP_MSG_RING,
+ IORING_OP_FSETXATTR,
+ IORING_OP_SETXATTR,
+ IORING_OP_FGETXATTR,
+ IORING_OP_GETXATTR,
+ IORING_OP_SOCKET,
+ IORING_OP_URING_CMD,
+ IORING_OP_SEND_ZC,
+ IORING_OP_SENDMSG_ZC,
/* this goes last, obviously */
IORING_OP_LAST,
};
/*
+ * sqe->uring_cmd_flags
+ * IORING_URING_CMD_FIXED use registered buffer; pass this flag
+ * along with setting sqe->buf_index.
+ */
+#define IORING_URING_CMD_FIXED (1U << 0)
+
+
+/*
* sqe->fsync_flags
*/
#define IORING_FSYNC_DATASYNC (1U << 0)
@@ -126,7 +236,91 @@ enum {
/*
* sqe->timeout_flags
*/
-#define IORING_TIMEOUT_ABS (1U << 0)
+#define IORING_TIMEOUT_ABS (1U << 0)
+#define IORING_TIMEOUT_UPDATE (1U << 1)
+#define IORING_TIMEOUT_BOOTTIME (1U << 2)
+#define IORING_TIMEOUT_REALTIME (1U << 3)
+#define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
+#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
+#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
+#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
+/*
+ * sqe->splice_flags
+ * extends splice(2) flags
+ */
+#define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */
+
+/*
+ * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
+ * command flags for POLL_ADD are stored in sqe->len.
+ *
+ * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if
+ * the poll handler will continue to report
+ * CQEs on behalf of the same SQE.
+ *
+ * IORING_POLL_UPDATE Update existing poll request, matching
+ * sqe->addr as the old user_data field.
+ *
+ * IORING_POLL_LEVEL Level triggered poll.
+ */
+#define IORING_POLL_ADD_MULTI (1U << 0)
+#define IORING_POLL_UPDATE_EVENTS (1U << 1)
+#define IORING_POLL_UPDATE_USER_DATA (1U << 2)
+#define IORING_POLL_ADD_LEVEL (1U << 3)
+
+/*
+ * ASYNC_CANCEL flags.
+ *
+ * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key
+ * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the
+ * request 'user_data'
+ * IORING_ASYNC_CANCEL_ANY Match any request
+ * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
+ */
+#define IORING_ASYNC_CANCEL_ALL (1U << 0)
+#define IORING_ASYNC_CANCEL_FD (1U << 1)
+#define IORING_ASYNC_CANCEL_ANY (1U << 2)
+#define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3)
+
+/*
+ * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
+ *
+ * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send
+ * or receive and arm poll if that yields an
+ * -EAGAIN result, arm poll upfront and skip
+ * the initial transfer attempt.
+ *
+ * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if
+ * the handler will continue to report
+ * CQEs on behalf of the same SQE.
+ *
+ * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
+ * the buf_index field.
+ */
+#define IORING_RECVSEND_POLL_FIRST (1U << 0)
+#define IORING_RECV_MULTISHOT (1U << 1)
+#define IORING_RECVSEND_FIXED_BUF (1U << 2)
+
+/*
+ * accept flags stored in sqe->ioprio
+ */
+#define IORING_ACCEPT_MULTISHOT (1U << 0)
+
+/*
+ * IORING_OP_MSG_RING command types, stored in sqe->addr
+ */
+enum {
+ IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
+ IORING_MSG_SEND_FD, /* send a registered fd to another ring */
+};
+
+/*
+ * IORING_OP_MSG_RING flags (sqe->msg_ring_flags)
+ *
+ * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not
+ * applicable for IORING_MSG_DATA, obviously.
+ */
+#define IORING_MSG_RING_CQE_SKIP (1U << 0)
/*
* IO completion data structure (Completion Queue Entry)
@@ -135,6 +329,30 @@ struct io_uring_cqe {
__u64 user_data; /* sqe->data submission passed back */
__s32 res; /* result code for this event */
__u32 flags;
+
+ /*
+ * If the ring is initialized with IORING_SETUP_CQE32, then this field
+ * contains 16-bytes of padding, doubling the size of the CQE.
+ */
+ __u64 big_cqe[];
+};
+
+/*
+ * cqe->flags
+ *
+ * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
+ * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
+ * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
+ * IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
+ * them from sends.
+ */
+#define IORING_CQE_F_BUFFER (1U << 0)
+#define IORING_CQE_F_MORE (1U << 1)
+#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
+#define IORING_CQE_F_NOTIF (1U << 3)
+
+enum {
+ IORING_CQE_BUFFER_SHIFT = 16,
};
/*
@@ -163,6 +381,8 @@ struct io_sqring_offsets {
* sq_ring->flags
*/
#define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */
+#define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */
+#define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */
struct io_cqring_offsets {
__u32 head;
@@ -171,14 +391,26 @@ struct io_cqring_offsets {
__u32 ring_entries;
__u32 overflow;
__u32 cqes;
- __u64 resv[2];
+ __u32 flags;
+ __u32 resv1;
+ __u64 resv2;
};
/*
+ * cq_ring->flags
+ */
+
+/* disable eventfd notifications */
+#define IORING_CQ_EVENTFD_DISABLED (1U << 0)
+
+/*
* io_uring_enter(2) flags
*/
-#define IORING_ENTER_GETEVENTS (1U << 0)
-#define IORING_ENTER_SQ_WAKEUP (1U << 1)
+#define IORING_ENTER_GETEVENTS (1U << 0)
+#define IORING_ENTER_SQ_WAKEUP (1U << 1)
+#define IORING_ENTER_SQ_WAIT (1U << 2)
+#define IORING_ENTER_EXT_ARG (1U << 3)
+#define IORING_ENTER_REGISTERED_RING (1U << 4)
/*
* Passed in for io_uring_setup(2). Copied back with updated info on success
@@ -204,28 +436,122 @@ struct io_uring_params {
#define IORING_FEAT_SUBMIT_STABLE (1U << 2)
#define IORING_FEAT_RW_CUR_POS (1U << 3)
#define IORING_FEAT_CUR_PERSONALITY (1U << 4)
+#define IORING_FEAT_FAST_POLL (1U << 5)
+#define IORING_FEAT_POLL_32BITS (1U << 6)
+#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
+#define IORING_FEAT_EXT_ARG (1U << 8)
+#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
+#define IORING_FEAT_RSRC_TAGS (1U << 10)
+#define IORING_FEAT_CQE_SKIP (1U << 11)
+#define IORING_FEAT_LINKED_FILE (1U << 12)
/*
* io_uring_register(2) opcodes and arguments
*/
-#define IORING_REGISTER_BUFFERS 0
-#define IORING_UNREGISTER_BUFFERS 1
-#define IORING_REGISTER_FILES 2
-#define IORING_UNREGISTER_FILES 3
-#define IORING_REGISTER_EVENTFD 4
-#define IORING_UNREGISTER_EVENTFD 5
-#define IORING_REGISTER_FILES_UPDATE 6
-#define IORING_REGISTER_EVENTFD_ASYNC 7
-#define IORING_REGISTER_PROBE 8
-#define IORING_REGISTER_PERSONALITY 9
-#define IORING_UNREGISTER_PERSONALITY 10
+enum {
+ IORING_REGISTER_BUFFERS = 0,
+ IORING_UNREGISTER_BUFFERS = 1,
+ IORING_REGISTER_FILES = 2,
+ IORING_UNREGISTER_FILES = 3,
+ IORING_REGISTER_EVENTFD = 4,
+ IORING_UNREGISTER_EVENTFD = 5,
+ IORING_REGISTER_FILES_UPDATE = 6,
+ IORING_REGISTER_EVENTFD_ASYNC = 7,
+ IORING_REGISTER_PROBE = 8,
+ IORING_REGISTER_PERSONALITY = 9,
+ IORING_UNREGISTER_PERSONALITY = 10,
+ IORING_REGISTER_RESTRICTIONS = 11,
+ IORING_REGISTER_ENABLE_RINGS = 12,
+
+ /* extended with tagging */
+ IORING_REGISTER_FILES2 = 13,
+ IORING_REGISTER_FILES_UPDATE2 = 14,
+ IORING_REGISTER_BUFFERS2 = 15,
+ IORING_REGISTER_BUFFERS_UPDATE = 16,
+ /* set/clear io-wq thread affinities */
+ IORING_REGISTER_IOWQ_AFF = 17,
+ IORING_UNREGISTER_IOWQ_AFF = 18,
+
+ /* set/get max number of io-wq workers */
+ IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
+
+ /* register/unregister io_uring fd with the ring */
+ IORING_REGISTER_RING_FDS = 20,
+ IORING_UNREGISTER_RING_FDS = 21,
+
+ /* register ring based provide buffer group */
+ IORING_REGISTER_PBUF_RING = 22,
+ IORING_UNREGISTER_PBUF_RING = 23,
+
+ /* sync cancelation API */
+ IORING_REGISTER_SYNC_CANCEL = 24,
+
+ /* register a range of fixed file slots for automatic slot allocation */
+ IORING_REGISTER_FILE_ALLOC_RANGE = 25,
+
+ /* this goes last */
+ IORING_REGISTER_LAST
+};
+
+/* io-wq worker categories */
+enum {
+ IO_WQ_BOUND,
+ IO_WQ_UNBOUND,
+};
+
+/* deprecated, see struct io_uring_rsrc_update */
struct io_uring_files_update {
__u32 offset;
__u32 resv;
__aligned_u64 /* __s32 * */ fds;
};
+/*
+ * Register a fully sparse file space, rather than pass in an array of all
+ * -1 file descriptors.
+ */
+#define IORING_RSRC_REGISTER_SPARSE (1U << 0)
+
+struct io_uring_rsrc_register {
+ __u32 nr;
+ __u32 flags;
+ __u64 resv2;
+ __aligned_u64 data;
+ __aligned_u64 tags;
+};
+
+struct io_uring_rsrc_update {
+ __u32 offset;
+ __u32 resv;
+ __aligned_u64 data;
+};
+
+struct io_uring_rsrc_update2 {
+ __u32 offset;
+ __u32 resv;
+ __aligned_u64 data;
+ __aligned_u64 tags;
+ __u32 nr;
+ __u32 resv2;
+};
+
+struct io_uring_notification_slot {
+ __u64 tag;
+ __u64 resv[3];
+};
+
+struct io_uring_notification_register {
+ __u32 nr_slots;
+ __u32 resv;
+ __u64 resv2;
+ __u64 data;
+ __u64 resv3;
+};
+
+/* Skip updating fd indexes set to this value in the fd table */
+#define IORING_REGISTER_FILES_SKIP (-2)
+
#define IO_URING_OP_SUPPORTED (1U << 0)
struct io_uring_probe_op {
@@ -240,7 +566,108 @@ struct io_uring_probe {
__u8 ops_len; /* length of ops[] array below */
__u16 resv;
__u32 resv2[3];
- struct io_uring_probe_op ops[0];
+ struct io_uring_probe_op ops[];
+};
+
+struct io_uring_restriction {
+ __u16 opcode;
+ union {
+ __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
+ __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */
+ __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */
+ };
+ __u8 resv;
+ __u32 resv2[3];
+};
+
+struct io_uring_buf {
+ __u64 addr;
+ __u32 len;
+ __u16 bid;
+ __u16 resv;
};
+struct io_uring_buf_ring {
+ union {
+ /*
+ * To avoid spilling into more pages than we need to, the
+ * ring tail is overlaid with the io_uring_buf->resv field.
+ */
+ struct {
+ __u64 resv1;
+ __u32 resv2;
+ __u16 resv3;
+ __u16 tail;
+ };
+ struct io_uring_buf bufs[0];
+ };
+};
+
+/* argument for IORING_(UN)REGISTER_PBUF_RING */
+struct io_uring_buf_reg {
+ __u64 ring_addr;
+ __u32 ring_entries;
+ __u16 bgid;
+ __u16 pad;
+ __u64 resv[3];
+};
+
+/*
+ * io_uring_restriction->opcode values
+ */
+enum {
+ /* Allow an io_uring_register(2) opcode */
+ IORING_RESTRICTION_REGISTER_OP = 0,
+
+ /* Allow an sqe opcode */
+ IORING_RESTRICTION_SQE_OP = 1,
+
+ /* Allow sqe flags */
+ IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2,
+
+ /* Require sqe flags (these flags must be set on each submission) */
+ IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3,
+
+ IORING_RESTRICTION_LAST
+};
+
+struct io_uring_getevents_arg {
+ __u64 sigmask;
+ __u32 sigmask_sz;
+ __u32 pad;
+ __u64 ts;
+};
+
+/*
+ * Argument for IORING_REGISTER_SYNC_CANCEL
+ */
+struct io_uring_sync_cancel_reg {
+ __u64 addr;
+ __s32 fd;
+ __u32 flags;
+ struct __kernel_timespec timeout;
+ __u64 pad[4];
+};
+
+/*
+ * Argument for IORING_REGISTER_FILE_ALLOC_RANGE
+ * The range is specified as [off, off + len)
+ */
+struct io_uring_file_index_range {
+ __u32 off;
+ __u32 len;
+ __u64 resv;
+};
+
+struct io_uring_recvmsg_out {
+ __u32 namelen;
+ __u32 controllen;
+ __u32 payloadlen;
+ __u32 flags;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/include/uapi/linux/ioam6.h b/include/uapi/linux/ioam6.h
new file mode 100644
index 000000000000..ac4de376f0ce
--- /dev/null
+++ b/include/uapi/linux/ioam6.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 IOAM implementation
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+
+#ifndef _UAPI_LINUX_IOAM6_H
+#define _UAPI_LINUX_IOAM6_H
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+
+#define IOAM6_U16_UNAVAILABLE U16_MAX
+#define IOAM6_U32_UNAVAILABLE U32_MAX
+#define IOAM6_U64_UNAVAILABLE U64_MAX
+
+#define IOAM6_DEFAULT_ID (IOAM6_U32_UNAVAILABLE >> 8)
+#define IOAM6_DEFAULT_ID_WIDE (IOAM6_U64_UNAVAILABLE >> 8)
+#define IOAM6_DEFAULT_IF_ID IOAM6_U16_UNAVAILABLE
+#define IOAM6_DEFAULT_IF_ID_WIDE IOAM6_U32_UNAVAILABLE
+
+/*
+ * IPv6 IOAM Option Header
+ */
+struct ioam6_hdr {
+ __u8 opt_type;
+ __u8 opt_len;
+ __u8 :8; /* reserved */
+#define IOAM6_TYPE_PREALLOC 0
+ __u8 type;
+} __attribute__((packed));
+
+/*
+ * IOAM Trace Header
+ */
+struct ioam6_trace_hdr {
+ __be16 namespace_id;
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+
+ __u8 :1, /* unused */
+ :1, /* unused */
+ overflow:1,
+ nodelen:5;
+
+ __u8 remlen:7,
+ :1; /* unused */
+
+ union {
+ __be32 type_be32;
+
+ struct {
+ __u32 bit7:1,
+ bit6:1,
+ bit5:1,
+ bit4:1,
+ bit3:1,
+ bit2:1,
+ bit1:1,
+ bit0:1,
+ bit15:1, /* unused */
+ bit14:1, /* unused */
+ bit13:1, /* unused */
+ bit12:1, /* unused */
+ bit11:1,
+ bit10:1,
+ bit9:1,
+ bit8:1,
+ bit23:1, /* reserved */
+ bit22:1,
+ bit21:1, /* unused */
+ bit20:1, /* unused */
+ bit19:1, /* unused */
+ bit18:1, /* unused */
+ bit17:1, /* unused */
+ bit16:1, /* unused */
+ :8; /* reserved */
+ } type;
+ };
+
+#elif defined(__BIG_ENDIAN_BITFIELD)
+
+ __u8 nodelen:5,
+ overflow:1,
+ :1, /* unused */
+ :1; /* unused */
+
+ __u8 :1, /* unused */
+ remlen:7;
+
+ union {
+ __be32 type_be32;
+
+ struct {
+ __u32 bit0:1,
+ bit1:1,
+ bit2:1,
+ bit3:1,
+ bit4:1,
+ bit5:1,
+ bit6:1,
+ bit7:1,
+ bit8:1,
+ bit9:1,
+ bit10:1,
+ bit11:1,
+ bit12:1, /* unused */
+ bit13:1, /* unused */
+ bit14:1, /* unused */
+ bit15:1, /* unused */
+ bit16:1, /* unused */
+ bit17:1, /* unused */
+ bit18:1, /* unused */
+ bit19:1, /* unused */
+ bit20:1, /* unused */
+ bit21:1, /* unused */
+ bit22:1,
+ bit23:1, /* reserved */
+ :8; /* reserved */
+ } type;
+ };
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define IOAM6_TRACE_DATA_SIZE_MAX 244
+ __u8 data[0];
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IOAM6_H */
diff --git a/include/uapi/linux/ioam6_genl.h b/include/uapi/linux/ioam6_genl.h
new file mode 100644
index 000000000000..ca4b22833754
--- /dev/null
+++ b/include/uapi/linux/ioam6_genl.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 IOAM Generic Netlink API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+
+#ifndef _UAPI_LINUX_IOAM6_GENL_H
+#define _UAPI_LINUX_IOAM6_GENL_H
+
+#define IOAM6_GENL_NAME "IOAM6"
+#define IOAM6_GENL_VERSION 0x1
+
+enum {
+ IOAM6_ATTR_UNSPEC,
+
+ IOAM6_ATTR_NS_ID, /* u16 */
+ IOAM6_ATTR_NS_DATA, /* u32 */
+ IOAM6_ATTR_NS_DATA_WIDE,/* u64 */
+
+#define IOAM6_MAX_SCHEMA_DATA_LEN (255 * 4)
+ IOAM6_ATTR_SC_ID, /* u32 */
+ IOAM6_ATTR_SC_DATA, /* Binary */
+ IOAM6_ATTR_SC_NONE, /* Flag */
+
+ IOAM6_ATTR_PAD,
+
+ __IOAM6_ATTR_MAX,
+};
+
+#define IOAM6_ATTR_MAX (__IOAM6_ATTR_MAX - 1)
+
+enum {
+ IOAM6_CMD_UNSPEC,
+
+ IOAM6_CMD_ADD_NAMESPACE,
+ IOAM6_CMD_DEL_NAMESPACE,
+ IOAM6_CMD_DUMP_NAMESPACES,
+
+ IOAM6_CMD_ADD_SCHEMA,
+ IOAM6_CMD_DEL_SCHEMA,
+ IOAM6_CMD_DUMP_SCHEMAS,
+
+ IOAM6_CMD_NS_SET_SCHEMA,
+
+ __IOAM6_CMD_MAX,
+};
+
+#define IOAM6_CMD_MAX (__IOAM6_CMD_MAX - 1)
+
+#endif /* _UAPI_LINUX_IOAM6_GENL_H */
diff --git a/include/uapi/linux/ioam6_iptunnel.h b/include/uapi/linux/ioam6_iptunnel.h
new file mode 100644
index 000000000000..38f6a8fdfd34
--- /dev/null
+++ b/include/uapi/linux/ioam6_iptunnel.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 IOAM Lightweight Tunnel API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+
+#ifndef _UAPI_LINUX_IOAM6_IPTUNNEL_H
+#define _UAPI_LINUX_IOAM6_IPTUNNEL_H
+
+/* Encap modes:
+ * - inline: direct insertion
+ * - encap: ip6ip6 encapsulation
+ * - auto: inline for local packets, encap for in-transit packets
+ */
+enum {
+ __IOAM6_IPTUNNEL_MODE_MIN,
+
+ IOAM6_IPTUNNEL_MODE_INLINE,
+ IOAM6_IPTUNNEL_MODE_ENCAP,
+ IOAM6_IPTUNNEL_MODE_AUTO,
+
+ __IOAM6_IPTUNNEL_MODE_MAX,
+};
+
+#define IOAM6_IPTUNNEL_MODE_MIN (__IOAM6_IPTUNNEL_MODE_MIN + 1)
+#define IOAM6_IPTUNNEL_MODE_MAX (__IOAM6_IPTUNNEL_MODE_MAX - 1)
+
+enum {
+ IOAM6_IPTUNNEL_UNSPEC,
+
+ /* Encap mode */
+ IOAM6_IPTUNNEL_MODE, /* u8 */
+
+ /* Tunnel dst address.
+ * For encap,auto modes.
+ */
+ IOAM6_IPTUNNEL_DST, /* struct in6_addr */
+
+ /* IOAM Trace Header */
+ IOAM6_IPTUNNEL_TRACE, /* struct ioam6_trace_hdr */
+
+ /* Insertion frequency:
+ * "k over n" packets (0 < k <= n)
+ * [0.0001% ... 100%]
+ */
+#define IOAM6_IPTUNNEL_FREQ_MIN 1
+#define IOAM6_IPTUNNEL_FREQ_MAX 1000000
+ IOAM6_IPTUNNEL_FREQ_K, /* u32 */
+ IOAM6_IPTUNNEL_FREQ_N, /* u32 */
+
+ __IOAM6_IPTUNNEL_MAX,
+};
+
+#define IOAM6_IPTUNNEL_MAX (__IOAM6_IPTUNNEL_MAX - 1)
+
+#endif /* _UAPI_LINUX_IOAM6_IPTUNNEL_H */
diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
index 4ad3496e5c43..65d8b0234f69 100644
--- a/include/uapi/linux/iommu.h
+++ b/include/uapi/linux/iommu.h
@@ -81,7 +81,10 @@ struct iommu_fault_unrecoverable {
/**
* struct iommu_fault_page_request - Page Request data
* @flags: encodes whether the corresponding fields are valid and whether this
- * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values)
+ * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
+ * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
+ * must have the same PASID value as the page request. When it is clear,
+ * the page response should not have a PASID.
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
@@ -92,6 +95,7 @@ struct iommu_fault_page_request {
#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
+#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
__u32 flags;
__u32 pasid;
__u32 grpid;
@@ -135,6 +139,7 @@ enum iommu_page_response_code {
/**
* struct iommu_page_response - Generic page response information
+ * @argsz: User filled size of this data
* @version: API version of this structure
* @flags: encodes whether the corresponding fields are valid
* (IOMMU_FAULT_PAGE_RESPONSE_* values)
@@ -143,6 +148,7 @@ enum iommu_page_response_code {
* @code: response code from &enum iommu_page_response_code
*/
struct iommu_page_response {
+ __u32 argsz;
#define IOMMU_PAGE_RESP_VERSION_1 1
__u32 version;
#define IOMMU_PAGE_RESP_PASID_VALID (1 << 0)
@@ -152,173 +158,4 @@ struct iommu_page_response {
__u32 code;
};
-/* defines the granularity of the invalidation */
-enum iommu_inv_granularity {
- IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
- IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
- IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
- IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
-};
-
-/**
- * struct iommu_inv_addr_info - Address Selective Invalidation Structure
- *
- * @flags: indicates the granularity of the address-selective invalidation
- * - If the PASID bit is set, the @pasid field is populated and the invalidation
- * relates to cache entries tagged with this PASID and matching the address
- * range.
- * - If ARCHID bit is set, @archid is populated and the invalidation relates
- * to cache entries tagged with this architecture specific ID and matching
- * the address range.
- * - Both PASID and ARCHID can be set as they may tag different caches.
- * - If neither PASID or ARCHID is set, global addr invalidation applies.
- * - The LEAF flag indicates whether only the leaf PTE caching needs to be
- * invalidated and other paging structure caches can be preserved.
- * @pasid: process address space ID
- * @archid: architecture-specific ID
- * @addr: first stage/level input address
- * @granule_size: page/block size of the mapping in bytes
- * @nb_granules: number of contiguous granules to be invalidated
- */
-struct iommu_inv_addr_info {
-#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
-#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
-#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
- __u32 flags;
- __u32 archid;
- __u64 pasid;
- __u64 addr;
- __u64 granule_size;
- __u64 nb_granules;
-};
-
-/**
- * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
- *
- * @flags: indicates the granularity of the PASID-selective invalidation
- * - If the PASID bit is set, the @pasid field is populated and the invalidation
- * relates to cache entries tagged with this PASID and matching the address
- * range.
- * - If the ARCHID bit is set, the @archid is populated and the invalidation
- * relates to cache entries tagged with this architecture specific ID and
- * matching the address range.
- * - Both PASID and ARCHID can be set as they may tag different caches.
- * - At least one of PASID or ARCHID must be set.
- * @pasid: process address space ID
- * @archid: architecture-specific ID
- */
-struct iommu_inv_pasid_info {
-#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
-#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
- __u32 flags;
- __u32 archid;
- __u64 pasid;
-};
-
-/**
- * struct iommu_cache_invalidate_info - First level/stage invalidation
- * information
- * @version: API version of this structure
- * @cache: bitfield that allows to select which caches to invalidate
- * @granularity: defines the lowest granularity used for the invalidation:
- * domain > PASID > addr
- * @padding: reserved for future use (should be zero)
- * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
- * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
- *
- * Not all the combinations of cache/granularity are valid:
- *
- * +--------------+---------------+---------------+---------------+
- * | type / | DEV_IOTLB | IOTLB | PASID |
- * | granularity | | | cache |
- * +==============+===============+===============+===============+
- * | DOMAIN | N/A | Y | Y |
- * +--------------+---------------+---------------+---------------+
- * | PASID | Y | Y | Y |
- * +--------------+---------------+---------------+---------------+
- * | ADDR | Y | Y | N/A |
- * +--------------+---------------+---------------+---------------+
- *
- * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
- * @version and @cache.
- *
- * If multiple cache types are invalidated simultaneously, they all
- * must support the used granularity.
- */
-struct iommu_cache_invalidate_info {
-#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
- __u32 version;
-/* IOMMU paging structure cache */
-#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
-#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
-#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
-#define IOMMU_CACHE_INV_TYPE_NR (3)
- __u8 cache;
- __u8 granularity;
- __u8 padding[2];
- union {
- struct iommu_inv_pasid_info pasid_info;
- struct iommu_inv_addr_info addr_info;
- };
-};
-
-/**
- * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
- * SVA binding.
- *
- * @flags: VT-d PASID table entry attributes
- * @pat: Page attribute table data to compute effective memory type
- * @emt: Extended memory type
- *
- * Only guest vIOMMU selectable and effective options are passed down to
- * the host IOMMU.
- */
-struct iommu_gpasid_bind_data_vtd {
-#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
-#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
-#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
-#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
-#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
-#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
- __u64 flags;
- __u32 pat;
- __u32 emt;
-};
-
-/**
- * struct iommu_gpasid_bind_data - Information about device and guest PASID binding
- * @version: Version of this data structure
- * @format: PASID table entry format
- * @flags: Additional information on guest bind request
- * @gpgd: Guest page directory base of the guest mm to bind
- * @hpasid: Process address space ID used for the guest mm in host IOMMU
- * @gpasid: Process address space ID used for the guest mm in guest IOMMU
- * @addr_width: Guest virtual address width
- * @padding: Reserved for future use (should be zero)
- * @vtd: Intel VT-d specific data
- *
- * Guest to host PASID mapping can be an identity or non-identity, where guest
- * has its own PASID space. For non-identify mapping, guest to host PASID lookup
- * is needed when VM programs guest PASID into an assigned device. VMM may
- * trap such PASID programming then request host IOMMU driver to convert guest
- * PASID to host PASID based on this bind data.
- */
-struct iommu_gpasid_bind_data {
-#define IOMMU_GPASID_BIND_VERSION_1 1
- __u32 version;
-#define IOMMU_PASID_FORMAT_INTEL_VTD 1
- __u32 format;
-#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
- __u64 flags;
- __u64 gpgd;
- __u64 hpasid;
- __u64 gpasid;
- __u32 addr_width;
- __u8 padding[12];
- /* Vendor specific data */
- union {
- struct iommu_gpasid_bind_data_vtd vtd;
- };
-};
-
#endif /* _UAPI_IOMMU_H */
diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h
new file mode 100644
index 000000000000..f70f2596a6bf
--- /dev/null
+++ b/include/uapi/linux/ioprio.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_IOPRIO_H
+#define _UAPI_LINUX_IOPRIO_H
+
+/*
+ * Gives us 8 prio classes with 13-bits of data for each class
+ */
+#define IOPRIO_CLASS_SHIFT 13
+#define IOPRIO_CLASS_MASK 0x07
+#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
+
+#define IOPRIO_PRIO_CLASS(ioprio) \
+ (((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK)
+#define IOPRIO_PRIO_DATA(ioprio) ((ioprio) & IOPRIO_PRIO_MASK)
+#define IOPRIO_PRIO_VALUE(class, data) \
+ ((((class) & IOPRIO_CLASS_MASK) << IOPRIO_CLASS_SHIFT) | \
+ ((data) & IOPRIO_PRIO_MASK))
+
+/*
+ * These are the io priority groups as implemented by the BFQ and mq-deadline
+ * schedulers. RT is the realtime class, it always gets premium service. For
+ * ATA disks supporting NCQ IO priority, RT class IOs will be processed using
+ * high priority NCQ commands. BE is the best-effort scheduling class, the
+ * default for any process. IDLE is the idle scheduling class, it is only
+ * served when no one else is using the disk.
+ */
+enum {
+ IOPRIO_CLASS_NONE,
+ IOPRIO_CLASS_RT,
+ IOPRIO_CLASS_BE,
+ IOPRIO_CLASS_IDLE,
+};
+
+/*
+ * The RT and BE priority classes both support up to 8 priority levels.
+ */
+#define IOPRIO_NR_LEVELS 8
+#define IOPRIO_BE_NR IOPRIO_NR_LEVELS
+
+enum {
+ IOPRIO_WHO_PROCESS = 1,
+ IOPRIO_WHO_PGRP,
+ IOPRIO_WHO_USER,
+};
+
+/*
+ * Fallback BE priority level.
+ */
+#define IOPRIO_NORM 4
+#define IOPRIO_BE_NORM IOPRIO_NORM
+
+#endif /* _UAPI_LINUX_IOPRIO_H */
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index e42d13b55cf3..961ec16a26b8 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -112,13 +112,13 @@ struct ip_auth_hdr {
__be16 reserved;
__be32 spi;
__be32 seq_no; /* Sequence number */
- __u8 auth_data[0]; /* Variable len but >=4. Mind the 64 bit alignment! */
+ __u8 auth_data[]; /* Variable len but >=4. Mind the 64 bit alignment! */
};
struct ip_esp_hdr {
__be32 spi;
__be32 seq_no; /* Sequence number */
- __u8 enc_data[0]; /* Variable len but >=8. Mind the 64 bit alignment! */
+ __u8 enc_data[]; /* Variable len but >=8. Mind the 64 bit alignment! */
};
struct ip_comp_hdr {
@@ -169,6 +169,7 @@ enum
IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
IPV4_DEVCONF_BC_FORWARDING,
+ IPV4_DEVCONF_ARP_EVICT_NOCARRIER,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 4102ddcb4e14..1ed234e7f251 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -254,7 +254,7 @@ struct ip_vs_get_dests {
unsigned int num_dests;
/* the real servers */
- struct ip_vs_dest_entry entrytable[0];
+ struct ip_vs_dest_entry entrytable[];
};
@@ -264,7 +264,7 @@ struct ip_vs_get_services {
unsigned int num_services;
/* service table */
- struct ip_vs_service_entry entrytable[0];
+ struct ip_vs_service_entry entrytable[];
};
diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h
index 32d148309b16..966c3070959b 100644
--- a/include/uapi/linux/ipmi.h
+++ b/include/uapi/linux/ipmi.h
@@ -81,6 +81,20 @@ struct ipmi_ipmb_addr {
};
/*
+ * Used for messages received directly from an IPMB that have not gone
+ * through a MC. This is for systems that sit right on an IPMB so
+ * they can receive commands and respond to them.
+ */
+#define IPMI_IPMB_DIRECT_ADDR_TYPE 0x81
+struct ipmi_ipmb_direct_addr {
+ int addr_type;
+ short channel;
+ unsigned char slave_addr;
+ unsigned char rs_lun;
+ unsigned char rq_lun;
+};
+
+/*
* A LAN Address. This is an address to/from a LAN interface bridged
* by the BMC, not an address actually out on the LAN.
*
@@ -158,7 +172,7 @@ struct kernel_ipmi_msg {
* is used for the receive in-kernel interface and in the receive
* IOCTL.
*
- * The "IPMI_RESPONSE_RESPNOSE_TYPE" is a little strange sounding, but
+ * The "IPMI_RESPONSE_RESPONSE_TYPE" is a little strange sounding, but
* it allows you to get the message results when you send a response
* message.
*/
diff --git a/include/uapi/linux/ipmi_msgdefs.h b/include/uapi/linux/ipmi_msgdefs.h
index c2b23a9fdf3d..0934af3b8037 100644
--- a/include/uapi/linux/ipmi_msgdefs.h
+++ b/include/uapi/linux/ipmi_msgdefs.h
@@ -69,6 +69,8 @@
#define IPMI_ERR_MSG_TRUNCATED 0xc6
#define IPMI_REQ_LEN_INVALID_ERR 0xc7
#define IPMI_REQ_LEN_EXCEEDED_ERR 0xc8
+#define IPMI_DEVICE_IN_FW_UPDATE_ERR 0xd1
+#define IPMI_DEVICE_IN_INIT_ERR 0xd2
#define IPMI_NOT_IN_MY_STATE_ERR 0xd5 /* IPMI 2.0 */
#define IPMI_LOST_ARBITRATION_ERR 0x81
#define IPMI_BUS_ERR 0x82
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 9c0f4a92bcff..03cdbe798fe3 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -40,6 +40,7 @@ struct in6_ifreq {
#define IPV6_SRCRT_STRICT 0x01 /* Deprecated; will be removed */
#define IPV6_SRCRT_TYPE_0 0 /* Deprecated; will be removed */
#define IPV6_SRCRT_TYPE_2 2 /* IPv6 type 2 Routing Header */
+#define IPV6_SRCRT_TYPE_3 3 /* RPL Segment Routing with IPv6 */
#define IPV6_SRCRT_TYPE_4 4 /* Segment Routing with IPv6 */
/*
@@ -187,6 +188,13 @@ enum {
DEVCONF_DISABLE_POLICY,
DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
DEVCONF_NDISC_TCLASS,
+ DEVCONF_RPL_SEG_ENABLED,
+ DEVCONF_RA_DEFRTR_METRIC,
+ DEVCONF_IOAM6_ENABLED,
+ DEVCONF_IOAM6_ID,
+ DEVCONF_IOAM6_ID_WIDE,
+ DEVCONF_NDISC_EVICT_NOCARRIER,
+ DEVCONF_ACCEPT_UNTRACKED_NA,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/ipx.h b/include/uapi/linux/ipx.h
deleted file mode 100644
index 3168137adae8..000000000000
--- a/include/uapi/linux/ipx.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _IPX_H_
-#define _IPX_H_
-#include <linux/libc-compat.h> /* for compatibility with glibc netipx/ipx.h */
-#include <linux/types.h>
-#include <linux/sockios.h>
-#include <linux/socket.h>
-#define IPX_NODE_LEN 6
-#define IPX_MTU 576
-
-#if __UAPI_DEF_SOCKADDR_IPX
-struct sockaddr_ipx {
- __kernel_sa_family_t sipx_family;
- __be16 sipx_port;
- __be32 sipx_network;
- unsigned char sipx_node[IPX_NODE_LEN];
- __u8 sipx_type;
- unsigned char sipx_zero; /* 16 byte fill */
-};
-#endif /* __UAPI_DEF_SOCKADDR_IPX */
-
-/*
- * So we can fit the extra info for SIOCSIFADDR into the address nicely
- */
-#define sipx_special sipx_port
-#define sipx_action sipx_zero
-#define IPX_DLTITF 0
-#define IPX_CRTITF 1
-
-#if __UAPI_DEF_IPX_ROUTE_DEFINITION
-struct ipx_route_definition {
- __be32 ipx_network;
- __be32 ipx_router_network;
- unsigned char ipx_router_node[IPX_NODE_LEN];
-};
-#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */
-
-#if __UAPI_DEF_IPX_INTERFACE_DEFINITION
-struct ipx_interface_definition {
- __be32 ipx_network;
- unsigned char ipx_device[16];
- unsigned char ipx_dlink_type;
-#define IPX_FRAME_NONE 0
-#define IPX_FRAME_SNAP 1
-#define IPX_FRAME_8022 2
-#define IPX_FRAME_ETHERII 3
-#define IPX_FRAME_8023 4
-#define IPX_FRAME_TR_8022 5 /* obsolete */
- unsigned char ipx_special;
-#define IPX_SPECIAL_NONE 0
-#define IPX_PRIMARY 1
-#define IPX_INTERNAL 2
- unsigned char ipx_node[IPX_NODE_LEN];
-};
-#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */
-
-#if __UAPI_DEF_IPX_CONFIG_DATA
-struct ipx_config_data {
- unsigned char ipxcfg_auto_select_primary;
- unsigned char ipxcfg_auto_create_interfaces;
-};
-#endif /* __UAPI_DEF_IPX_CONFIG_DATA */
-
-/*
- * OLD Route Definition for backward compatibility.
- */
-
-#if __UAPI_DEF_IPX_ROUTE_DEF
-struct ipx_route_def {
- __be32 ipx_network;
- __be32 ipx_router_network;
-#define IPX_ROUTE_NO_ROUTER 0
- unsigned char ipx_router_node[IPX_NODE_LEN];
- unsigned char ipx_device[16];
- unsigned short ipx_flags;
-#define IPX_RT_SNAP 8
-#define IPX_RT_8022 4
-#define IPX_RT_BLUEBOOK 2
-#define IPX_RT_ROUTED 1
-};
-#endif /* __UAPI_DEF_IPX_ROUTE_DEF */
-
-#define SIOCAIPXITFCRT (SIOCPROTOPRIVATE)
-#define SIOCAIPXPRISLT (SIOCPROTOPRIVATE + 1)
-#define SIOCIPXCFGDATA (SIOCPROTOPRIVATE + 2)
-#define SIOCIPXNCPCONN (SIOCPROTOPRIVATE + 3)
-#endif /* _IPX_H_ */
diff --git a/include/uapi/linux/iso_fs.h b/include/uapi/linux/iso_fs.h
index a2555176f6d1..758178f5b52d 100644
--- a/include/uapi/linux/iso_fs.h
+++ b/include/uapi/linux/iso_fs.h
@@ -137,7 +137,7 @@ struct iso_path_table{
__u8 name_len[2]; /* 721 */
__u8 extent[4]; /* 731 */
__u8 parent[2]; /* 721 */
- char name[0];
+ char name[];
} __attribute__((packed));
/* high sierra is identical to iso, except that the date is only 6 bytes, and
@@ -154,7 +154,7 @@ struct iso_directory_record {
__u8 interleave [ISODCL (28, 28)]; /* 711 */
__u8 volume_sequence_number [ISODCL (29, 32)]; /* 723 */
__u8 name_len [ISODCL (33, 33)]; /* 711 */
- char name [0];
+ char name [];
} __attribute__((packed));
#define ISOFS_BLOCK_BITS 11
diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h
index 0a52b7b093d3..ba078f8e9add 100644
--- a/include/uapi/linux/isst_if.h
+++ b/include/uapi/linux/isst_if.h
@@ -69,7 +69,7 @@ struct isst_if_cpu_maps {
* @logical_cpu: Logical CPU number to get target PCI device.
* @reg: PUNIT register offset
* @value: For write operation value to write and for
- * for read placeholder read value
+ * read placeholder read value
*
* Structure to specify read/write data to PUNIT registers.
*/
diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h
index 784ba0b9690a..637ee4a793cf 100644
--- a/include/uapi/linux/jffs2.h
+++ b/include/uapi/linux/jffs2.h
@@ -123,7 +123,7 @@ struct jffs2_raw_dirent
__u8 unused[2];
jint32_t node_crc;
jint32_t name_crc;
- __u8 name[0];
+ __u8 name[];
};
/* The JFFS2 raw inode structure: Used for storage on physical media. */
@@ -155,7 +155,7 @@ struct jffs2_raw_inode
jint16_t flags; /* See JFFS2_INO_FLAG_* */
jint32_t data_crc; /* CRC for the (compressed) data. */
jint32_t node_crc; /* CRC for the raw inode (excluding data) */
- __u8 data[0];
+ __u8 data[];
};
struct jffs2_raw_xattr {
@@ -170,7 +170,7 @@ struct jffs2_raw_xattr {
jint16_t value_len;
jint32_t data_crc;
jint32_t node_crc;
- __u8 data[0];
+ __u8 data[];
} __attribute__((packed));
struct jffs2_raw_xref
@@ -196,7 +196,7 @@ struct jffs2_raw_summary
jint32_t padded; /* sum of the size of padding nodes */
jint32_t sum_crc; /* summary information crc */
jint32_t node_crc; /* node crc */
- jint32_t sum[0]; /* inode summary info */
+ jint32_t sum[]; /* inode summary info */
};
union jffs2_node_union
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
index 1d0350e44ae3..ed95dba9fa37 100644
--- a/include/uapi/linux/kcov.h
+++ b/include/uapi/linux/kcov.h
@@ -13,7 +13,7 @@ struct kcov_remote_arg {
__u32 area_size; /* Length of coverage buffer in words */
__u32 num_handles; /* Size of handles array */
__aligned_u64 common_handle;
- __aligned_u64 handles[0];
+ __aligned_u64 handles[];
};
#define KCOV_REMOTE_MAX_HANDLES 0x100
diff --git a/include/uapi/linux/kd.h b/include/uapi/linux/kd.h
index 4616b31f84da..ee929ece4112 100644
--- a/include/uapi/linux/kd.h
+++ b/include/uapi/linux/kd.h
@@ -173,7 +173,7 @@ struct console_font {
#define KD_FONT_OP_SET 0 /* Set font */
#define KD_FONT_OP_GET 1 /* Get font */
#define KD_FONT_OP_SET_DEFAULT 2 /* Set font to default, data points to name / NULL */
-#define KD_FONT_OP_COPY 3 /* Copy from another console */
+#define KD_FONT_OP_COPY 3 /* Obsolete, do not use */
#define KD_FONT_FLAG_DONT_RECALC 1 /* Don't recalculate hw charcell size [compat] */
diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
index 0ff8f7477847..fadf2db71fe8 100644
--- a/include/uapi/linux/kernel.h
+++ b/include/uapi/linux/kernel.h
@@ -3,13 +3,6 @@
#define _UAPI_LINUX_KERNEL_H
#include <linux/sysinfo.h>
-
-/*
- * 'kernel.h' contains some often-used function prototypes etc
- */
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
-
-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#include <linux/const.h>
#endif /* _UAPI_LINUX_KERNEL_H */
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 05669c87a0af..981016e05cfa 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -42,6 +42,8 @@
#define KEXEC_ARCH_MIPS_LE (10 << 16)
#define KEXEC_ARCH_MIPS ( 8 << 16)
#define KEXEC_ARCH_AARCH64 (183 << 16)
+#define KEXEC_ARCH_RISCV (243 << 16)
+#define KEXEC_ARCH_LOONGARCH (258 << 16)
/* The artificial cap on the number of segments passed to kexec_load. */
#define KEXEC_SEGMENT_MAX 16
@@ -53,9 +55,9 @@
*/
struct kexec_segment {
const void *buf;
- size_t bufsz;
+ __kernel_size_t bufsz;
const void *mem;
- size_t memsz;
+ __kernel_size_t memsz;
};
#endif /* __KERNEL__ */
diff --git a/include/uapi/linux/keyboard.h b/include/uapi/linux/keyboard.h
index 4846716e7c5c..36d230cedf12 100644
--- a/include/uapi/linux/keyboard.h
+++ b/include/uapi/linux/keyboard.h
@@ -27,7 +27,6 @@
#define MAX_NR_FUNC 256 /* max nr of strings assigned to keys */
#define KT_LATIN 0 /* we depend on this being zero */
-#define KT_LETTER 11 /* symbol that can be acted upon by CapsLock */
#define KT_FN 1
#define KT_SPEC 2
#define KT_PAD 3
@@ -38,6 +37,7 @@
#define KT_META 8
#define KT_ASCII 9
#define KT_LOCK 10
+#define KT_LETTER 11 /* symbol that can be acted upon by CapsLock */
#define KT_SLOCK 12
#define KT_DEAD2 13
#define KT_BRL 14
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index ed3d5893830d..4c8884eea808 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -69,6 +69,7 @@
#define KEYCTL_RESTRICT_KEYRING 29 /* Restrict keys allowed to link to a keyring */
#define KEYCTL_MOVE 30 /* Move keys between keyrings */
#define KEYCTL_CAPABILITIES 31 /* Find capabilities of keyrings subsystem */
+#define KEYCTL_WATCH_KEY 32 /* Watch a key or ring of keys for changes */
/* keyctl structures */
struct keyctl_dh_params {
@@ -130,5 +131,6 @@ struct keyctl_pkey_params {
#define KEYCTL_CAPS0_MOVE 0x80 /* KEYCTL_MOVE supported */
#define KEYCTL_CAPS1_NS_KEYRING_NAME 0x01 /* Keyring names are per-user_namespace */
#define KEYCTL_CAPS1_NS_KEY_TAG 0x02 /* Key indexing can include a namespace tag */
+#define KEYCTL_CAPS1_NOTIFICATIONS 0x04 /* Keys generate watchable notifications */
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 20917c59f39c..42b60198b6c5 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -26,8 +26,20 @@
#include <drm/drm.h>
#include <linux/ioctl.h>
+/*
+ * - 1.1 - initial version
+ * - 1.3 - Add SMI events support
+ * - 1.4 - Indicate new SRAM EDC bit in device properties
+ * - 1.5 - Add SVM API
+ * - 1.6 - Query clear flags in SVM get_attr API
+ * - 1.7 - Checkpoint Restore (CRIU) API
+ * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs
+ * - 1.9 - Add available memory ioctl
+ * - 1.10 - Add SMI profiler event log
+ * - 1.11 - Add unified memory for ctx save/restore area
+ */
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 1
+#define KFD_IOCTL_MINOR_VERSION 11
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -91,6 +103,12 @@ struct kfd_ioctl_get_queue_wave_state_args {
__u32 pad;
};
+struct kfd_ioctl_get_available_memory_args {
+ __u64 available; /* from KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 pad;
+};
+
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
#define KFD_IOC_CACHE_POLICY_COHERENT 0
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@@ -187,6 +205,8 @@ struct kfd_ioctl_dbg_wave_control_args {
__u32 buf_size_in_bytes; /*including gpu_id and buf_size */
};
+#define KFD_INVALID_FD 0xffffffff
+
/* Matching HSA_EVENTTYPE */
#define KFD_IOC_EVENT_SIGNAL 0
#define KFD_IOC_EVENT_NODECHANGE 1
@@ -251,7 +271,7 @@ struct kfd_memory_exception_failure {
__u32 imprecise; /* Can't determine the exact fault address */
};
-/* memory exception data*/
+/* memory exception data */
struct kfd_hsa_memory_exception_data {
struct kfd_memory_exception_failure failure;
__u64 va;
@@ -347,6 +367,7 @@ struct kfd_ioctl_acquire_vm_args {
#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
+#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
/* Allocate memory for later SVM (shared virtual memory) mapping.
*
@@ -410,6 +431,20 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
__u32 n_success; /* to/from KFD */
};
+/* Allocate GWS for specific queue
+ *
+ * @queue_id: queue's id that GWS is allocated for
+ * @num_gws: how many GWS to allocate
+ * @first_gws: index of the first GWS allocated.
+ * only support contiguous GWS allocation
+ */
+struct kfd_ioctl_alloc_queue_gws_args {
+ __u32 queue_id; /* to KFD */
+ __u32 num_gws; /* to KFD */
+ __u32 first_gws; /* from KFD */
+ __u32 pad;
+};
+
struct kfd_ioctl_get_dmabuf_info_args {
__u64 size; /* from KFD */
__u64 metadata_ptr; /* to KFD */
@@ -428,6 +463,138 @@ struct kfd_ioctl_import_dmabuf_args {
__u32 dmabuf_fd; /* to KFD */
};
+/*
+ * KFD SMI(System Management Interface) events
+ */
+enum kfd_smi_event {
+ KFD_SMI_EVENT_NONE = 0, /* not used */
+ KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
+ KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
+ KFD_SMI_EVENT_GPU_PRE_RESET = 3,
+ KFD_SMI_EVENT_GPU_POST_RESET = 4,
+ KFD_SMI_EVENT_MIGRATE_START = 5,
+ KFD_SMI_EVENT_MIGRATE_END = 6,
+ KFD_SMI_EVENT_PAGE_FAULT_START = 7,
+ KFD_SMI_EVENT_PAGE_FAULT_END = 8,
+ KFD_SMI_EVENT_QUEUE_EVICTION = 9,
+ KFD_SMI_EVENT_QUEUE_RESTORE = 10,
+ KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
+
+ /*
+ * max event number, as a flag bit to get events from all processes,
+ * this requires super user permission, otherwise will not be able to
+ * receive event from any process. Without this flag to receive events
+ * from same process.
+ */
+ KFD_SMI_EVENT_ALL_PROCESS = 64
+};
+
+enum KFD_MIGRATE_TRIGGERS {
+ KFD_MIGRATE_TRIGGER_PREFETCH,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
+ KFD_MIGRATE_TRIGGER_TTM_EVICTION
+};
+
+enum KFD_QUEUE_EVICTION_TRIGGERS {
+ KFD_QUEUE_EVICTION_TRIGGER_SVM,
+ KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
+ KFD_QUEUE_EVICTION_TRIGGER_TTM,
+ KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
+ KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
+ KFD_QUEUE_EVICTION_CRIU_RESTORE
+};
+
+enum KFD_SVM_UNMAP_TRIGGERS {
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
+};
+
+#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
+#define KFD_SMI_EVENT_MSG_SIZE 96
+
+struct kfd_ioctl_smi_events_args {
+ __u32 gpuid; /* to KFD */
+ __u32 anon_fd; /* from KFD */
+};
+
+/**************************************************************************************************
+ * CRIU IOCTLs (Checkpoint Restore In Userspace)
+ *
+ * When checkpointing a process, the userspace application will perform:
+ * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
+ * all the queues.
+ * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
+ * 3. UNPAUSE op to un-evict all the queues
+ *
+ * When restoring a process, the CRIU userspace application will perform:
+ *
+ * 1. RESTORE op to restore process contents
+ * 2. RESUME op to start the process
+ *
+ * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
+ * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
+ */
+
+enum kfd_criu_op {
+ KFD_CRIU_OP_PROCESS_INFO,
+ KFD_CRIU_OP_CHECKPOINT,
+ KFD_CRIU_OP_UNPAUSE,
+ KFD_CRIU_OP_RESTORE,
+ KFD_CRIU_OP_RESUME,
+};
+
+/**
+ * kfd_ioctl_criu_args - Arguments perform CRIU operation
+ * @devices: [in/out] User pointer to memory location for devices information.
+ * This is an array of type kfd_criu_device_bucket.
+ * @bos: [in/out] User pointer to memory location for BOs information
+ * This is an array of type kfd_criu_bo_bucket.
+ * @priv_data: [in/out] User pointer to memory location for private data
+ * @priv_data_size: [in/out] Size of priv_data in bytes
+ * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array.
+ * @num_bos [in/out] Number of BOs used by process. Size of @bos array.
+ * @num_objects: [in/out] Number of objects used by process. Objects are opaque to
+ * user application.
+ * @pid: [in/out] PID of the process being checkpointed
+ * @op [in] Type of operation (kfd_criu_op)
+ *
+ * Return: 0 on success, -errno on failure
+ */
+struct kfd_ioctl_criu_args {
+ __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */
+ __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */
+ __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */
+ __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */
+ __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */
+ __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */
+ __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */
+ __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */
+ __u32 op;
+};
+
+struct kfd_criu_device_bucket {
+ __u32 user_gpu_id;
+ __u32 actual_gpu_id;
+ __u32 drm_fd;
+ __u32 pad;
+};
+
+struct kfd_criu_bo_bucket {
+ __u64 addr;
+ __u64 size;
+ __u64 offset;
+ __u64 restored_offset; /* During restore, updated offset for BO */
+ __u32 gpu_id; /* This is the user_gpu_id */
+ __u32 alloc_flags;
+ __u32 dmabuf_fd;
+ __u32 pad;
+};
+
+/* CRIU IOCTLs - END */
+/**************************************************************************************************/
+
/* Register offset inside the remapped mmio page
*/
enum kfd_mmio_remap {
@@ -435,6 +602,170 @@ enum kfd_mmio_remap {
KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
};
+/* Guarantee host access to memory */
+#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
+/* Fine grained coherency between all devices with access */
+#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
+/* Use any GPU in same hive as preferred device */
+#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
+/* GPUs only read, allows replication */
+#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
+/* Allow execution on GPU */
+#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
+/* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
+#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
+/* Keep GPU memory mapping always valid as if XNACK is disable */
+#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
+
+/**
+ * kfd_ioctl_svm_op - SVM ioctl operations
+ *
+ * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
+ * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
+ */
+enum kfd_ioctl_svm_op {
+ KFD_IOCTL_SVM_OP_SET_ATTR,
+ KFD_IOCTL_SVM_OP_GET_ATTR
+};
+
+/** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
+ *
+ * GPU IDs are used to specify GPUs as preferred and prefetch locations.
+ * Below definitions are used for system memory or for leaving the preferred
+ * location unspecified.
+ */
+enum kfd_ioctl_svm_location {
+ KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
+ KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
+};
+
+/**
+ * kfd_ioctl_svm_attr_type - SVM attribute types
+ *
+ * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
+ * system memory
+ * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
+ * system memory. Setting this triggers an
+ * immediate prefetch (migration).
+ * @KFD_IOCTL_SVM_ATTR_ACCESS:
+ * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
+ * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
+ * by the attribute value
+ * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
+ * KFD_IOCTL_SVM_FLAG_...)
+ * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
+ * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
+ * (log2 num pages)
+ */
+enum kfd_ioctl_svm_attr_type {
+ KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
+ KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
+ KFD_IOCTL_SVM_ATTR_ACCESS,
+ KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
+ KFD_IOCTL_SVM_ATTR_NO_ACCESS,
+ KFD_IOCTL_SVM_ATTR_SET_FLAGS,
+ KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
+ KFD_IOCTL_SVM_ATTR_GRANULARITY
+};
+
+/**
+ * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
+ *
+ * The meaning of the @value depends on the attribute type.
+ *
+ * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
+ * @value: attribute value
+ */
+struct kfd_ioctl_svm_attribute {
+ __u32 type;
+ __u32 value;
+};
+
+/**
+ * kfd_ioctl_svm_args - Arguments for SVM ioctl
+ *
+ * @op specifies the operation to perform (see enum
+ * @kfd_ioctl_svm_op). @start_addr and @size are common for all
+ * operations.
+ *
+ * A variable number of attributes can be given in @attrs.
+ * @nattr specifies the number of attributes. New attributes can be
+ * added in the future without breaking the ABI. If unknown attributes
+ * are given, the function returns -EINVAL.
+ *
+ * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
+ * range. It may overlap existing virtual address ranges. If it does,
+ * the existing ranges will be split such that the attribute changes
+ * only apply to the specified address range.
+ *
+ * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
+ * over all memory in the given range and returns the result as the
+ * attribute value. If different pages have different preferred or
+ * prefetch locations, 0xffffffff will be returned for
+ * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
+ * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
+ * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
+ * aggregated by bitwise AND. That means, a flag will be set in the
+ * output, if that flag is set for all pages in the range. For
+ * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
+ * aggregated by bitwise NOR. That means, a flag will be set in the
+ * output, if that flag is clear for all pages in the range.
+ * The minimum migration granularity throughout the range will be
+ * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
+ *
+ * Querying of accessibility attributes works by initializing the
+ * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
+ * GPUID being queried. Multiple attributes can be given to allow
+ * querying multiple GPUIDs. The ioctl function overwrites the
+ * attribute type to indicate the access for the specified GPU.
+ */
+struct kfd_ioctl_svm_args {
+ __u64 start_addr;
+ __u64 size;
+ __u32 op;
+ __u32 nattr;
+ /* Variable length array of attributes */
+ struct kfd_ioctl_svm_attribute attrs[];
+};
+
+/**
+ * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
+ *
+ * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process
+ *
+ * @xnack_enabled indicates whether recoverable page faults should be
+ * enabled for the current process. 0 means disabled, positive means
+ * enabled, negative means leave unchanged. If enabled, virtual address
+ * translations on GFXv9 and later AMD GPUs can return XNACK and retry
+ * the access until a valid PTE is available. This is used to implement
+ * device page faults.
+ *
+ * On output, @xnack_enabled returns the (new) current mode (0 or
+ * positive). Therefore, a negative input value can be used to query
+ * the current mode without changing it.
+ *
+ * The XNACK mode fundamentally changes the way SVM managed memory works
+ * in the driver, with subtle effects on application performance and
+ * functionality.
+ *
+ * Enabling XNACK mode requires shader programs to be compiled
+ * differently. Furthermore, not all GPUs support changing the mode
+ * per-process. Therefore changing the mode is only allowed while no
+ * user mode queues exist in the process. This ensure that no shader
+ * code is running that may be compiled for the wrong mode. And GPUs
+ * that cannot change to the requested mode will prevent the XNACK
+ * mode from occurring. All GPUs used by the process must be in the
+ * same XNACK mode.
+ *
+ * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
+ * Therefore those GPUs are not considered for the XNACK mode switch.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+struct kfd_ioctl_set_xnack_mode_args {
+ __s32 xnack_enabled;
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -477,16 +808,16 @@ enum kfd_mmio_remap {
#define AMDKFD_IOC_WAIT_EVENTS \
AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
-#define AMDKFD_IOC_DBG_REGISTER \
+#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \
AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
-#define AMDKFD_IOC_DBG_UNREGISTER \
+#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \
AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
-#define AMDKFD_IOC_DBG_ADDRESS_WATCH \
+#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \
AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
-#define AMDKFD_IOC_DBG_WAVE_CONTROL \
+#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \
AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
@@ -529,7 +860,24 @@ enum kfd_mmio_remap {
#define AMDKFD_IOC_IMPORT_DMABUF \
AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
+#define AMDKFD_IOC_ALLOC_QUEUE_GWS \
+ AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
+
+#define AMDKFD_IOC_SMI_EVENTS \
+ AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
+
+#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
+
+#define AMDKFD_IOC_SET_XNACK_MODE \
+ AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
+
+#define AMDKFD_IOC_CRIU_OP \
+ AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
+
+#define AMDKFD_IOC_AVAILABLE_MEMORY \
+ AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1E
+#define AMDKFD_COMMAND_END 0x24
#endif
diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h
new file mode 100644
index 000000000000..3e330f368917
--- /dev/null
+++ b/include/uapi/linux/kfd_sysfs.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_SYSFS_H_INCLUDED
+#define KFD_SYSFS_H_INCLUDED
+
+/* Capability bits in node properties */
+#define HSA_CAP_HOT_PLUGGABLE 0x00000001
+#define HSA_CAP_ATS_PRESENT 0x00000002
+#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004
+#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008
+#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010
+#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020
+#define HSA_CAP_VA_LIMIT 0x00000040
+#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
+
+#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
+#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
+#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
+#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+
+/* Old buggy user mode depends on this being 0 */
+#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000
+
+#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
+#define HSA_CAP_RASEVENTNOTIFY 0x00200000
+#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000
+#define HSA_CAP_ASIC_REVISION_SHIFT 22
+#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000
+#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000
+#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000
+#define HSA_CAP_RESERVED 0xe00f8000
+
+/* Heap types in memory properties */
+#define HSA_MEM_HEAP_TYPE_SYSTEM 0
+#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1
+#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2
+#define HSA_MEM_HEAP_TYPE_GPU_GDS 3
+#define HSA_MEM_HEAP_TYPE_GPU_LDS 4
+#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5
+
+/* Flag bits in memory properties */
+#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
+#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
+#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
+
+/* Cache types in cache properties */
+#define HSA_CACHE_TYPE_DATA 0x00000001
+#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
+#define HSA_CACHE_TYPE_CPU 0x00000004
+#define HSA_CACHE_TYPE_HSACU 0x00000008
+#define HSA_CACHE_TYPE_RESERVED 0xfffffff0
+
+/* Link types in IO link properties (matches CRAT link types) */
+#define HSA_IOLINK_TYPE_UNDEFINED 0
+#define HSA_IOLINK_TYPE_HYPERTRANSPORT 1
+#define HSA_IOLINK_TYPE_PCIEXPRESS 2
+#define HSA_IOLINK_TYPE_AMBA 3
+#define HSA_IOLINK_TYPE_MIPI 4
+#define HSA_IOLINK_TYPE_QPI_1_1 5
+#define HSA_IOLINK_TYPE_RESERVED1 6
+#define HSA_IOLINK_TYPE_RESERVED2 7
+#define HSA_IOLINK_TYPE_RAPID_IO 8
+#define HSA_IOLINK_TYPE_INFINIBAND 9
+#define HSA_IOLINK_TYPE_RESERVED3 10
+#define HSA_IOLINK_TYPE_XGMI 11
+#define HSA_IOLINK_TYPE_XGOP 12
+#define HSA_IOLINK_TYPE_GZ 13
+#define HSA_IOLINK_TYPE_ETHERNET_RDMA 14
+#define HSA_IOLINK_TYPE_RDMA_OTHER 15
+#define HSA_IOLINK_TYPE_OTHER 16
+
+/* Flag bits in IO link properties (matches CRAT flags, excluding the
+ * bi-directional flag, which is not offially part of the CRAT spec, and
+ * only used internally in KFD)
+ */
+#define HSA_IOLINK_FLAGS_ENABLED (1 << 0)
+#define HSA_IOLINK_FLAGS_NON_COHERENT (1 << 1)
+#define HSA_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
+#define HSA_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
+#define HSA_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
+#define HSA_IOLINK_FLAGS_RESERVED 0xffffffe0
+
+#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4b95f9a31a2f..0d5d4419139a 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -8,6 +8,7 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -116,7 +117,7 @@ struct kvm_irq_level {
* ACPI gsi notion of irq.
* For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
* For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
- * For ARM: See Documentation/virt/kvm/api.txt
+ * For ARM: See Documentation/virt/kvm/api.rst
*/
union {
__u32 irq;
@@ -188,10 +189,13 @@ struct kvm_s390_cmma_log {
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
+#define KVM_EXIT_HYPERV_SYNDBG 3
__u32 type;
+ __u32 pad1;
union {
struct {
__u32 msr;
+ __u32 pad2;
__u64 control;
__u64 evt_page;
__u64 msg_page;
@@ -201,6 +205,29 @@ struct kvm_hyperv_exit {
__u64 result;
__u64 params[2];
} hcall;
+ struct {
+ __u32 msr;
+ __u32 pad2;
+ __u64 control;
+ __u64 status;
+ __u64 send_page;
+ __u64 recv_page;
+ __u64 pending_page;
+ } syndbg;
+ } u;
+};
+
+struct kvm_xen_exit {
+#define KVM_EXIT_XEN_HCALL 1
+ __u32 type;
+ union {
+ struct {
+ __u32 longmode;
+ __u32 cpl;
+ __u64 input;
+ __u64 result;
+ __u64 params[6];
+ } hcall;
} u;
};
@@ -236,6 +263,15 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_IOAPIC_EOI 26
#define KVM_EXIT_HYPERV 27
#define KVM_EXIT_ARM_NISV 28
+#define KVM_EXIT_X86_RDMSR 29
+#define KVM_EXIT_X86_WRMSR 30
+#define KVM_EXIT_DIRTY_RING_FULL 31
+#define KVM_EXIT_AP_RESET_HOLD 32
+#define KVM_EXIT_X86_BUS_LOCK 33
+#define KVM_EXIT_XEN 34
+#define KVM_EXIT_RISCV_SBI 35
+#define KVM_EXIT_RISCV_CSR 36
+#define KVM_EXIT_NOTIFY 37
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -247,6 +283,9 @@ struct kvm_hyperv_exit {
/* Encounter unexpected vm-exit reason */
#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
+/* Flags that describe what fields in emulation_failure hold valid data. */
+#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0)
+
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
/* in */
@@ -277,6 +316,7 @@ struct kvm_run {
/* KVM_EXIT_FAIL_ENTRY */
struct {
__u64 hardware_entry_failure_reason;
+ __u32 cpu;
} fail_entry;
/* KVM_EXIT_EXCEPTION */
struct {
@@ -349,6 +389,35 @@ struct kvm_run {
__u32 ndata;
__u64 data[16];
} internal;
+ /*
+ * KVM_INTERNAL_ERROR_EMULATION
+ *
+ * "struct emulation_failure" is an overlay of "struct internal"
+ * that is used for the KVM_INTERNAL_ERROR_EMULATION sub-type of
+ * KVM_EXIT_INTERNAL_ERROR. Note, unlike other internal error
+ * sub-types, this struct is ABI! It also needs to be backwards
+ * compatible with "struct internal". Take special care that
+ * "ndata" is correct, that new fields are enumerated in "flags",
+ * and that each flag enumerates fields that are 64-bit aligned
+ * and sized (so that ndata+internal.data[] is valid/accurate).
+ *
+ * Space beyond the defined fields may be used to store arbitrary
+ * debug information relating to the emulation failure. It is
+ * accounted for in "ndata" but the format is unspecified and is
+ * not represented in "flags". Any such information is *not* ABI!
+ */
+ struct {
+ __u32 suberror;
+ __u32 ndata;
+ __u64 flags;
+ union {
+ struct {
+ __u8 insn_size;
+ __u8 insn_bytes[15];
+ };
+ };
+ /* Arbitrary debug data may follow. */
+ } emulation_failure;
/* KVM_EXIT_OSI */
struct {
__u64 gprs[32];
@@ -377,8 +446,17 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2
#define KVM_SYSTEM_EVENT_CRASH 3
+#define KVM_SYSTEM_EVENT_WAKEUP 4
+#define KVM_SYSTEM_EVENT_SUSPEND 5
+#define KVM_SYSTEM_EVENT_SEV_TERM 6
__u32 type;
- __u64 flags;
+ __u32 ndata;
+ union {
+#ifndef __KERNEL__
+ __u64 flags;
+#endif
+ __u64 data[16];
+ };
} system_event;
/* KVM_EXIT_S390_STSI */
struct {
@@ -400,6 +478,38 @@ struct kvm_run {
__u64 esr_iss;
__u64 fault_ipa;
} arm_nisv;
+ /* KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR */
+ struct {
+ __u8 error; /* user -> kernel */
+ __u8 pad[7];
+#define KVM_MSR_EXIT_REASON_INVAL (1 << 0)
+#define KVM_MSR_EXIT_REASON_UNKNOWN (1 << 1)
+#define KVM_MSR_EXIT_REASON_FILTER (1 << 2)
+ __u32 reason; /* kernel -> user */
+ __u32 index; /* kernel -> user */
+ __u64 data; /* kernel <-> user */
+ } msr;
+ /* KVM_EXIT_XEN */
+ struct kvm_xen_exit xen;
+ /* KVM_EXIT_RISCV_SBI */
+ struct {
+ unsigned long extension_id;
+ unsigned long function_id;
+ unsigned long args[6];
+ unsigned long ret[2];
+ } riscv_sbi;
+ /* KVM_EXIT_RISCV_CSR */
+ struct {
+ unsigned long csr_num;
+ unsigned long new_value;
+ unsigned long write_mask;
+ unsigned long ret_value;
+ } riscv_csr;
+ /* KVM_EXIT_NOTIFY */
+ struct {
+#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
+ __u32 flags;
+ } notify;
/* Fix the size of the union. */
char padding[256];
};
@@ -446,7 +556,7 @@ struct kvm_coalesced_mmio {
struct kvm_coalesced_mmio_ring {
__u32 first, last;
- struct kvm_coalesced_mmio coalesced_mmio[0];
+ struct kvm_coalesced_mmio coalesced_mmio[];
};
#define KVM_COALESCED_MMIO_MAX \
@@ -474,15 +584,26 @@ struct kvm_s390_mem_op {
__u32 size; /* amount of bytes */
__u32 op; /* type of operation */
__u64 buf; /* buffer in userspace */
- __u8 ar; /* the access register number */
- __u8 reserved[31]; /* should be set to 0 */
+ union {
+ struct {
+ __u8 ar; /* the access register number */
+ __u8 key; /* access key, ignored if flag unset */
+ };
+ __u32 sida_offset; /* offset into the sida */
+ __u8 reserved[32]; /* ignored */
+ };
};
/* types for kvm_s390_mem_op->op */
#define KVM_S390_MEMOP_LOGICAL_READ 0
#define KVM_S390_MEMOP_LOGICAL_WRITE 1
+#define KVM_S390_MEMOP_SIDA_READ 2
+#define KVM_S390_MEMOP_SIDA_WRITE 3
+#define KVM_S390_MEMOP_ABSOLUTE_READ 4
+#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
/* flags for kvm_s390_mem_op->flags */
#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
+#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
/* for KVM_INTERRUPT */
struct kvm_interrupt {
@@ -514,7 +635,7 @@ struct kvm_clear_dirty_log {
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
- __u8 sigset[0];
+ __u8 sigset[];
};
/* for KVM_TPR_ACCESS_REPORTING */
@@ -541,6 +662,8 @@ struct kvm_vapic_addr {
#define KVM_MP_STATE_CHECK_STOP 6
#define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8
+#define KVM_MP_STATE_AP_RESET_HOLD 9
+#define KVM_MP_STATE_SUSPENDED 10
struct kvm_mp_state {
__u32 mp_state;
@@ -772,9 +895,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2
-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
-#define KVM_VM_MIPS_TE 0
+/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
+#define KVM_VM_MIPS_AUTO 0
#define KVM_VM_MIPS_VZ 1
+#define KVM_VM_MIPS_TE 2
#define KVM_S390_SIE_PAGE_OFFSET 1
@@ -1010,6 +1134,50 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_NISV_TO_USER 177
#define KVM_CAP_ARM_INJECT_EXT_DABT 178
#define KVM_CAP_S390_VCPU_RESETS 179
+#define KVM_CAP_S390_PROTECTED 180
+#define KVM_CAP_PPC_SECURE_GUEST 181
+#define KVM_CAP_HALT_POLL 182
+#define KVM_CAP_ASYNC_PF_INT 183
+#define KVM_CAP_LAST_CPU 184
+#define KVM_CAP_SMALLER_MAXPHYADDR 185
+#define KVM_CAP_S390_DIAG318 186
+#define KVM_CAP_STEAL_TIME 187
+#define KVM_CAP_X86_USER_SPACE_MSR 188
+#define KVM_CAP_X86_MSR_FILTER 189
+#define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190
+#define KVM_CAP_SYS_HYPERV_CPUID 191
+#define KVM_CAP_DIRTY_LOG_RING 192
+#define KVM_CAP_X86_BUS_LOCK_EXIT 193
+#define KVM_CAP_PPC_DAWR1 194
+#define KVM_CAP_SET_GUEST_DEBUG2 195
+#define KVM_CAP_SGX_ATTRIBUTE 196
+#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
+#define KVM_CAP_PTP_KVM 198
+#define KVM_CAP_HYPERV_ENFORCE_CPUID 199
+#define KVM_CAP_SREGS2 200
+#define KVM_CAP_EXIT_HYPERCALL 201
+#define KVM_CAP_PPC_RPT_INVALIDATE 202
+#define KVM_CAP_BINARY_STATS_FD 203
+#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
+#define KVM_CAP_ARM_MTE 205
+#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
+#define KVM_CAP_VM_GPA_BITS 207
+#define KVM_CAP_XSAVE2 208
+#define KVM_CAP_SYS_ATTRIBUTES 209
+#define KVM_CAP_PPC_AIL_MODE_3 210
+#define KVM_CAP_S390_MEM_OP_EXTENSION 211
+#define KVM_CAP_PMU_CAPABILITY 212
+#define KVM_CAP_DISABLE_QUIRKS2 213
+#define KVM_CAP_VM_TSC_CONTROL 214
+#define KVM_CAP_SYSTEM_EVENT_DATA 215
+#define KVM_CAP_ARM_SYSTEM_SUSPEND 216
+#define KVM_CAP_S390_PROTECTED_DUMP 217
+#define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218
+#define KVM_CAP_X86_NOTIFY_VMEXIT 219
+#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
+#define KVM_CAP_S390_ZPCI_OP 221
+#define KVM_CAP_S390_CPU_TOPOLOGY 222
+#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1041,11 +1209,20 @@ struct kvm_irq_routing_hv_sint {
__u32 sint;
};
+struct kvm_irq_routing_xen_evtchn {
+ __u32 port;
+ __u32 vcpu;
+ __u32 priority;
+};
+
+#define KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL ((__u32)(-1))
+
/* gsi routing entry types */
#define KVM_IRQ_ROUTING_IRQCHIP 1
#define KVM_IRQ_ROUTING_MSI 2
#define KVM_IRQ_ROUTING_S390_ADAPTER 3
#define KVM_IRQ_ROUTING_HV_SINT 4
+#define KVM_IRQ_ROUTING_XEN_EVTCHN 5
struct kvm_irq_routing_entry {
__u32 gsi;
@@ -1057,6 +1234,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing_msi msi;
struct kvm_irq_routing_s390_adapter adapter;
struct kvm_irq_routing_hv_sint hv_sint;
+ struct kvm_irq_routing_xen_evtchn xen_evtchn;
__u32 pad[8];
} u;
};
@@ -1064,7 +1242,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing {
__u32 nr;
__u32 flags;
- struct kvm_irq_routing_entry entries[0];
+ struct kvm_irq_routing_entry entries[];
};
#endif
@@ -1083,6 +1261,13 @@ struct kvm_x86_mce {
#endif
#ifdef KVM_CAP_XEN_HVM
+#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
+#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
+#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
+#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
+#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
+#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
+
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
@@ -1100,7 +1285,7 @@ struct kvm_xen_hvm_config {
*
* KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
* the irqfd to operate in resampling mode for level triggered interrupt
- * emulation. See Documentation/virt/kvm/api.txt.
+ * emulation. See Documentation/virt/kvm/api.rst.
*/
#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
@@ -1116,11 +1301,16 @@ struct kvm_irqfd {
/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */
#define KVM_CLOCK_TSC_STABLE 2
+#define KVM_CLOCK_REALTIME (1 << 2)
+#define KVM_CLOCK_HOST_TSC (1 << 3)
struct kvm_clock_data {
__u64 clock;
__u32 flags;
- __u32 pad[9];
+ __u32 pad0;
+ __u64 realtime;
+ __u64 host_tsc;
+ __u32 pad[4];
};
/* For KVM_CAP_SW_TLB */
@@ -1172,7 +1362,7 @@ struct kvm_dirty_tlb {
struct kvm_reg_list {
__u64 n; /* number of regs */
- __u64 reg[0];
+ __u64 reg[];
};
struct kvm_one_reg {
@@ -1315,7 +1505,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
/* Available with KVM_CAP_PPC_GET_PVINFO */
#define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo)
-/* Available with KVM_CAP_TSC_CONTROL */
+/* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with
+* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
/* Available with KVM_CAP_PCI_2_3 */
@@ -1350,6 +1541,7 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PMU_EVENT_FILTER */
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
+#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
@@ -1468,7 +1660,7 @@ struct kvm_enc_region {
/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT_2 */
#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
-/* Available with KVM_CAP_HYPERV_CPUID */
+/* Available with KVM_CAP_HYPERV_CPUID (vcpu) / KVM_CAP_SYS_HYPERV_CPUID (system) */
#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
/* Available with KVM_CAP_ARM_SVE */
@@ -1478,6 +1670,193 @@ struct kvm_enc_region {
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
+struct kvm_s390_pv_sec_parm {
+ __u64 origin;
+ __u64 length;
+};
+
+struct kvm_s390_pv_unp {
+ __u64 addr;
+ __u64 size;
+ __u64 tweak;
+};
+
+enum pv_cmd_dmp_id {
+ KVM_PV_DUMP_INIT,
+ KVM_PV_DUMP_CONFIG_STOR_STATE,
+ KVM_PV_DUMP_COMPLETE,
+ KVM_PV_DUMP_CPU,
+};
+
+struct kvm_s390_pv_dmp {
+ __u64 subcmd;
+ __u64 buff_addr;
+ __u64 buff_len;
+ __u64 gaddr; /* For dump storage state */
+ __u64 reserved[4];
+};
+
+enum pv_cmd_info_id {
+ KVM_PV_INFO_VM,
+ KVM_PV_INFO_DUMP,
+};
+
+struct kvm_s390_pv_info_dump {
+ __u64 dump_cpu_buffer_len;
+ __u64 dump_config_mem_buffer_per_1m;
+ __u64 dump_config_finalize_len;
+};
+
+struct kvm_s390_pv_info_vm {
+ __u64 inst_calls_list[4];
+ __u64 max_cpus;
+ __u64 max_guests;
+ __u64 max_guest_addr;
+ __u64 feature_indication;
+};
+
+struct kvm_s390_pv_info_header {
+ __u32 id;
+ __u32 len_max;
+ __u32 len_written;
+ __u32 reserved;
+};
+
+struct kvm_s390_pv_info {
+ struct kvm_s390_pv_info_header header;
+ union {
+ struct kvm_s390_pv_info_dump dump;
+ struct kvm_s390_pv_info_vm vm;
+ };
+};
+
+enum pv_cmd_id {
+ KVM_PV_ENABLE,
+ KVM_PV_DISABLE,
+ KVM_PV_SET_SEC_PARMS,
+ KVM_PV_UNPACK,
+ KVM_PV_VERIFY,
+ KVM_PV_PREP_RESET,
+ KVM_PV_UNSHARE_ALL,
+ KVM_PV_INFO,
+ KVM_PV_DUMP,
+};
+
+struct kvm_pv_cmd {
+ __u32 cmd; /* Command to be executed */
+ __u16 rc; /* Ultravisor return code */
+ __u16 rrc; /* Ultravisor return reason code */
+ __u64 data; /* Data or address */
+ __u32 flags; /* flags for future extensions. Must be 0 for now */
+ __u32 reserved[3];
+};
+
+/* Available with KVM_CAP_S390_PROTECTED */
+#define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd)
+
+/* Available with KVM_CAP_X86_MSR_FILTER */
+#define KVM_X86_SET_MSR_FILTER _IOW(KVMIO, 0xc6, struct kvm_msr_filter)
+
+/* Available with KVM_CAP_DIRTY_LOG_RING */
+#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
+
+/* Per-VM Xen attributes */
+#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr)
+#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr)
+
+struct kvm_xen_hvm_attr {
+ __u16 type;
+ __u16 pad[3];
+ union {
+ __u8 long_mode;
+ __u8 vector;
+ struct {
+ __u64 gfn;
+ } shared_info;
+ struct {
+ __u32 send_port;
+ __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
+ __u32 flags;
+#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
+#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
+#define KVM_XEN_EVTCHN_RESET (1 << 2)
+ /*
+ * Events sent by the guest are either looped back to
+ * the guest itself (potentially on a different port#)
+ * or signalled via an eventfd.
+ */
+ union {
+ struct {
+ __u32 port;
+ __u32 vcpu;
+ __u32 priority;
+ } port;
+ struct {
+ __u32 port; /* Zero for eventfd */
+ __s32 fd;
+ } eventfd;
+ __u32 padding[4];
+ } deliver;
+ } evtchn;
+ __u32 xen_version;
+ __u64 pad[8];
+ } u;
+};
+
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
+#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
+#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
+#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
+#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
+
+/* Per-vCPU Xen attributes */
+#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
+#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
+
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn)
+
+#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
+#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
+
+struct kvm_xen_vcpu_attr {
+ __u16 type;
+ __u16 pad[3];
+ union {
+ __u64 gpa;
+ __u64 pad[8];
+ struct {
+ __u64 state;
+ __u64 state_entry_time;
+ __u64 time_running;
+ __u64 time_runnable;
+ __u64 time_blocked;
+ __u64 time_offline;
+ } runstate;
+ __u32 vcpu_id;
+ struct {
+ __u32 port;
+ __u32 priority;
+ __u64 expires_ns;
+ } timer;
+ __u8 vector;
+ } u;
+};
+
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
+#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
+#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
@@ -1506,6 +1885,10 @@ enum sev_cmd_id {
KVM_SEV_DBG_ENCRYPT,
/* Guest certificates commands */
KVM_SEV_CERT_EXPORT,
+ /* Attestation report */
+ KVM_SEV_GET_ATTESTATION_REPORT,
+ /* Guest Migration Extension */
+ KVM_SEV_SEND_CANCEL,
KVM_SEV_NR_MAX,
};
@@ -1558,6 +1941,51 @@ struct kvm_sev_dbg {
__u32 len;
};
+struct kvm_sev_attestation_report {
+ __u8 mnonce[16];
+ __u64 uaddr;
+ __u32 len;
+};
+
+struct kvm_sev_send_start {
+ __u32 policy;
+ __u64 pdh_cert_uaddr;
+ __u32 pdh_cert_len;
+ __u64 plat_certs_uaddr;
+ __u32 plat_certs_len;
+ __u64 amd_certs_uaddr;
+ __u32 amd_certs_len;
+ __u64 session_uaddr;
+ __u32 session_len;
+};
+
+struct kvm_sev_send_update_data {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+};
+
+struct kvm_sev_receive_start {
+ __u32 handle;
+ __u32 policy;
+ __u64 pdh_uaddr;
+ __u32 pdh_len;
+ __u64 session_uaddr;
+ __u32 session_len;
+};
+
+struct kvm_sev_receive_update_data {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+};
+
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
@@ -1628,4 +2056,176 @@ struct kvm_hyperv_eventfd {
#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
+#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
+#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
+
+/*
+ * Arch needs to define the macro after implementing the dirty ring
+ * feature. KVM_DIRTY_LOG_PAGE_OFFSET should be defined as the
+ * starting page offset of the dirty ring structures.
+ */
+#ifndef KVM_DIRTY_LOG_PAGE_OFFSET
+#define KVM_DIRTY_LOG_PAGE_OFFSET 0
+#endif
+
+/*
+ * KVM dirty GFN flags, defined as:
+ *
+ * |---------------+---------------+--------------|
+ * | bit 1 (reset) | bit 0 (dirty) | Status |
+ * |---------------+---------------+--------------|
+ * | 0 | 0 | Invalid GFN |
+ * | 0 | 1 | Dirty GFN |
+ * | 1 | X | GFN to reset |
+ * |---------------+---------------+--------------|
+ *
+ * Lifecycle of a dirty GFN goes like:
+ *
+ * dirtied harvested reset
+ * 00 -----------> 01 -------------> 1X -------+
+ * ^ |
+ * | |
+ * +------------------------------------------+
+ *
+ * The userspace program is only responsible for the 01->1X state
+ * conversion after harvesting an entry. Also, it must not skip any
+ * dirty bits, so that dirty bits are always harvested in sequence.
+ */
+#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET _BITUL(1)
+#define KVM_DIRTY_GFN_F_MASK 0x3
+
+/*
+ * KVM dirty rings should be mapped at KVM_DIRTY_LOG_PAGE_OFFSET of
+ * per-vcpu mmaped regions as an array of struct kvm_dirty_gfn. The
+ * size of the gfn buffer is decided by the first argument when
+ * enabling KVM_CAP_DIRTY_LOG_RING.
+ */
+struct kvm_dirty_gfn {
+ __u32 flags;
+ __u32 slot;
+ __u64 offset;
+};
+
+#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0)
+#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1)
+
+#define KVM_PMU_CAP_DISABLE (1 << 0)
+
+/**
+ * struct kvm_stats_header - Header of per vm/vcpu binary statistics data.
+ * @flags: Some extra information for header, always 0 for now.
+ * @name_size: The size in bytes of the memory which contains statistics
+ * name string including trailing '\0'. The memory is allocated
+ * at the send of statistics descriptor.
+ * @num_desc: The number of statistics the vm or vcpu has.
+ * @id_offset: The offset of the vm/vcpu stats' id string in the file pointed
+ * by vm/vcpu stats fd.
+ * @desc_offset: The offset of the vm/vcpu stats' descriptor block in the file
+ * pointd by vm/vcpu stats fd.
+ * @data_offset: The offset of the vm/vcpu stats' data block in the file
+ * pointed by vm/vcpu stats fd.
+ *
+ * This is the header userspace needs to read from stats fd before any other
+ * readings. It is used by userspace to discover all the information about the
+ * vm/vcpu's binary statistics.
+ * Userspace reads this header from the start of the vm/vcpu's stats fd.
+ */
+struct kvm_stats_header {
+ __u32 flags;
+ __u32 name_size;
+ __u32 num_desc;
+ __u32 id_offset;
+ __u32 desc_offset;
+ __u32 data_offset;
+};
+
+#define KVM_STATS_TYPE_SHIFT 0
+#define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_LINEAR_HIST (0x3 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_LOG_HIST (0x4 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_LOG_HIST
+
+#define KVM_STATS_UNIT_SHIFT 4
+#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN
+
+#define KVM_STATS_BASE_SHIFT 8
+#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2
+
+/**
+ * struct kvm_stats_desc - Descriptor of a KVM statistics.
+ * @flags: Annotations of the stats, like type, unit, etc.
+ * @exponent: Used together with @flags to determine the unit.
+ * @size: The number of data items for this stats.
+ * Every data item is of type __u64.
+ * @offset: The offset of the stats to the start of stat structure in
+ * structure kvm or kvm_vcpu.
+ * @bucket_size: A parameter value used for histogram stats. It is only used
+ * for linear histogram stats, specifying the size of the bucket;
+ * @name: The name string for the stats. Its size is indicated by the
+ * &kvm_stats_header->name_size.
+ */
+struct kvm_stats_desc {
+ __u32 flags;
+ __s16 exponent;
+ __u16 size;
+ __u32 offset;
+ __u32 bucket_size;
+ char name[];
+};
+
+#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+
+/* Available with KVM_CAP_XSAVE2 */
+#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
+
+/* Available with KVM_CAP_S390_PROTECTED_DUMP */
+#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd)
+
+/* Available with KVM_CAP_X86_NOTIFY_VMEXIT */
+#define KVM_X86_NOTIFY_VMEXIT_ENABLED (1ULL << 0)
+#define KVM_X86_NOTIFY_VMEXIT_USER (1ULL << 1)
+
+/* Available with KVM_CAP_S390_ZPCI_OP */
+#define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op)
+
+struct kvm_s390_zpci_op {
+ /* in */
+ __u32 fh; /* target device */
+ __u8 op; /* operation to perform */
+ __u8 pad[3];
+ union {
+ /* for KVM_S390_ZPCIOP_REG_AEN */
+ struct {
+ __u64 ibv; /* Guest addr of interrupt bit vector */
+ __u64 sb; /* Guest addr of summary bit */
+ __u32 flags;
+ __u32 noi; /* Number of interrupts */
+ __u8 isc; /* Guest interrupt subclass */
+ __u8 sbo; /* Offset of guest summary bit vector */
+ __u16 pad;
+ } reg_aen;
+ __u64 reserved[8];
+ } u;
+};
+
+/* types for kvm_s390_zpci_op->op */
+#define KVM_S390_ZPCIOP_REG_AEN 0
+#define KVM_S390_ZPCIOP_DEREG_AEN 1
+
+/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
+#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h
index 8b86609849b9..960c7e93d1a9 100644
--- a/include/uapi/linux/kvm_para.h
+++ b/include/uapi/linux/kvm_para.h
@@ -29,6 +29,7 @@
#define KVM_HC_CLOCK_PAIRING 9
#define KVM_HC_SEND_IPI 10
#define KVM_HC_SCHED_YIELD 11
+#define KVM_HC_MAP_GPA_RANGE 12
/*
* hypercalls use architecture specific
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 61158f5a1a5b..7d81c3e1ec29 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -13,8 +13,6 @@
#include <linux/in.h>
#include <linux/in6.h>
-#define IPPROTO_L2TP 115
-
/**
* struct sockaddr_l2tpip - the sockaddr structure for L2TP-over-IP sockets
* @l2tp_family: address family number AF_L2TPIP.
@@ -108,7 +106,7 @@ enum {
L2TP_ATTR_VLAN_ID, /* u16 (not used) */
L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
- L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
+ L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags (not used) */
L2TP_ATTR_RECV_SEQ, /* u8 */
L2TP_ATTR_SEND_SEQ, /* u8 */
L2TP_ATTR_LNS_MODE, /* u8 */
@@ -144,6 +142,8 @@ enum {
L2TP_ATTR_RX_OOS_PACKETS, /* u64 */
L2TP_ATTR_RX_ERRORS, /* u64 */
L2TP_ATTR_STATS_PAD,
+ L2TP_ATTR_RX_COOKIE_DISCARDS, /* u64 */
+ L2TP_ATTR_RX_INVALID, /* u64 */
__L2TP_ATTR_STATS_MAX,
};
@@ -177,7 +177,9 @@ enum l2tp_seqmode {
};
/**
- * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions
+ * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions.
+ *
+ * Unused.
*
* @L2TP_MSG_DEBUG: verbose debug (if compiled in)
* @L2TP_MSG_CONTROL: userspace - kernel interface
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
new file mode 100644
index 000000000000..9c4bcc37a455
--- /dev/null
+++ b/include/uapi/linux/landlock.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Landlock - User space API
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _UAPI_LINUX_LANDLOCK_H
+#define _UAPI_LINUX_LANDLOCK_H
+
+#include <linux/types.h>
+
+/**
+ * struct landlock_ruleset_attr - Ruleset definition
+ *
+ * Argument of sys_landlock_create_ruleset(). This structure can grow in
+ * future versions.
+ */
+struct landlock_ruleset_attr {
+ /**
+ * @handled_access_fs: Bitmask of actions (cf. `Filesystem flags`_)
+ * that is handled by this ruleset and should then be forbidden if no
+ * rule explicitly allow them: it is a deny-by-default list that should
+ * contain as much Landlock access rights as possible. Indeed, all
+ * Landlock filesystem access rights that are not part of
+ * handled_access_fs are allowed. This is needed for backward
+ * compatibility reasons. One exception is the
+ * %LANDLOCK_ACCESS_FS_REFER access right, which is always implicitly
+ * handled, but must still be explicitly handled to add new rules with
+ * this access right.
+ */
+ __u64 handled_access_fs;
+};
+
+/*
+ * sys_landlock_create_ruleset() flags:
+ *
+ * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI
+ * version.
+ */
+/* clang-format off */
+#define LANDLOCK_CREATE_RULESET_VERSION (1U << 0)
+/* clang-format on */
+
+/**
+ * enum landlock_rule_type - Landlock rule type
+ *
+ * Argument of sys_landlock_add_rule().
+ */
+enum landlock_rule_type {
+ /**
+ * @LANDLOCK_RULE_PATH_BENEATH: Type of a &struct
+ * landlock_path_beneath_attr .
+ */
+ LANDLOCK_RULE_PATH_BENEATH = 1,
+};
+
+/**
+ * struct landlock_path_beneath_attr - Path hierarchy definition
+ *
+ * Argument of sys_landlock_add_rule().
+ */
+struct landlock_path_beneath_attr {
+ /**
+ * @allowed_access: Bitmask of allowed actions for this file hierarchy
+ * (cf. `Filesystem flags`_).
+ */
+ __u64 allowed_access;
+ /**
+ * @parent_fd: File descriptor, preferably opened with ``O_PATH``,
+ * which identifies the parent directory of a file hierarchy, or just a
+ * file.
+ */
+ __s32 parent_fd;
+ /*
+ * This struct is packed to avoid trailing reserved members.
+ * Cf. security/landlock/syscalls.c:build_check_abi()
+ */
+} __attribute__((packed));
+
+/**
+ * DOC: fs_access
+ *
+ * A set of actions on kernel objects may be defined by an attribute (e.g.
+ * &struct landlock_path_beneath_attr) including a bitmask of access.
+ *
+ * Filesystem flags
+ * ~~~~~~~~~~~~~~~~
+ *
+ * These flags enable to restrict a sandboxed process to a set of actions on
+ * files and directories. Files or directories opened before the sandboxing
+ * are not subject to these restrictions.
+ *
+ * A file can only receive these access rights:
+ *
+ * - %LANDLOCK_ACCESS_FS_EXECUTE: Execute a file.
+ * - %LANDLOCK_ACCESS_FS_WRITE_FILE: Open a file with write access.
+ * - %LANDLOCK_ACCESS_FS_READ_FILE: Open a file with read access.
+ *
+ * A directory can receive access rights related to files or directories. The
+ * following access right is applied to the directory itself, and the
+ * directories beneath it:
+ *
+ * - %LANDLOCK_ACCESS_FS_READ_DIR: Open a directory or list its content.
+ *
+ * However, the following access rights only apply to the content of a
+ * directory, not the directory itself:
+ *
+ * - %LANDLOCK_ACCESS_FS_REMOVE_DIR: Remove an empty directory or rename one.
+ * - %LANDLOCK_ACCESS_FS_REMOVE_FILE: Unlink (or rename) a file.
+ * - %LANDLOCK_ACCESS_FS_MAKE_CHAR: Create (or rename or link) a character
+ * device.
+ * - %LANDLOCK_ACCESS_FS_MAKE_DIR: Create (or rename) a directory.
+ * - %LANDLOCK_ACCESS_FS_MAKE_REG: Create (or rename or link) a regular file.
+ * - %LANDLOCK_ACCESS_FS_MAKE_SOCK: Create (or rename or link) a UNIX domain
+ * socket.
+ * - %LANDLOCK_ACCESS_FS_MAKE_FIFO: Create (or rename or link) a named pipe.
+ * - %LANDLOCK_ACCESS_FS_MAKE_BLOCK: Create (or rename or link) a block device.
+ * - %LANDLOCK_ACCESS_FS_MAKE_SYM: Create (or rename or link) a symbolic link.
+ * - %LANDLOCK_ACCESS_FS_REFER: Link or rename a file from or to a different
+ * directory (i.e. reparent a file hierarchy). This access right is
+ * available since the second version of the Landlock ABI. This is also the
+ * only access right which is always considered handled by any ruleset in
+ * such a way that reparenting a file hierarchy is always denied by default.
+ * To avoid privilege escalation, it is not enough to add a rule with this
+ * access right. When linking or renaming a file, the destination directory
+ * hierarchy must also always have the same or a superset of restrictions of
+ * the source hierarchy. If it is not the case, or if the domain doesn't
+ * handle this access right, such actions are denied by default with errno
+ * set to ``EXDEV``. Linking also requires a ``LANDLOCK_ACCESS_FS_MAKE_*``
+ * access right on the destination directory, and renaming also requires a
+ * ``LANDLOCK_ACCESS_FS_REMOVE_*`` access right on the source's (file or
+ * directory) parent. Otherwise, such actions are denied with errno set to
+ * ``EACCES``. The ``EACCES`` errno prevails over ``EXDEV`` to let user space
+ * efficiently deal with an unrecoverable error.
+ *
+ * .. warning::
+ *
+ * It is currently not possible to restrict some file-related actions
+ * accessible through these syscall families: :manpage:`chdir(2)`,
+ * :manpage:`truncate(2)`, :manpage:`stat(2)`, :manpage:`flock(2)`,
+ * :manpage:`chmod(2)`, :manpage:`chown(2)`, :manpage:`setxattr(2)`,
+ * :manpage:`utime(2)`, :manpage:`ioctl(2)`, :manpage:`fcntl(2)`,
+ * :manpage:`access(2)`.
+ * Future Landlock evolutions will enable to restrict them.
+ */
+/* clang-format off */
+#define LANDLOCK_ACCESS_FS_EXECUTE (1ULL << 0)
+#define LANDLOCK_ACCESS_FS_WRITE_FILE (1ULL << 1)
+#define LANDLOCK_ACCESS_FS_READ_FILE (1ULL << 2)
+#define LANDLOCK_ACCESS_FS_READ_DIR (1ULL << 3)
+#define LANDLOCK_ACCESS_FS_REMOVE_DIR (1ULL << 4)
+#define LANDLOCK_ACCESS_FS_REMOVE_FILE (1ULL << 5)
+#define LANDLOCK_ACCESS_FS_MAKE_CHAR (1ULL << 6)
+#define LANDLOCK_ACCESS_FS_MAKE_DIR (1ULL << 7)
+#define LANDLOCK_ACCESS_FS_MAKE_REG (1ULL << 8)
+#define LANDLOCK_ACCESS_FS_MAKE_SOCK (1ULL << 9)
+#define LANDLOCK_ACCESS_FS_MAKE_FIFO (1ULL << 10)
+#define LANDLOCK_ACCESS_FS_MAKE_BLOCK (1ULL << 11)
+#define LANDLOCK_ACCESS_FS_MAKE_SYM (1ULL << 12)
+#define LANDLOCK_ACCESS_FS_REFER (1ULL << 13)
+/* clang-format on */
+
+#endif /* _UAPI_LINUX_LANDLOCK_H */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
deleted file mode 100644
index f9a1be7fc696..000000000000
--- a/include/uapi/linux/lightnvm.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2015 CNEX Labs. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- */
-
-#ifndef _UAPI_LINUX_LIGHTNVM_H
-#define _UAPI_LINUX_LIGHTNVM_H
-
-#ifdef __KERNEL__
-#include <linux/kernel.h>
-#include <linux/ioctl.h>
-#else /* __KERNEL__ */
-#include <stdio.h>
-#include <sys/ioctl.h>
-#define DISK_NAME_LEN 32
-#endif /* __KERNEL__ */
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define NVM_TTYPE_NAME_MAX 48
-#define NVM_TTYPE_MAX 63
-#define NVM_MMTYPE_LEN 8
-
-#define NVM_CTRL_FILE "/dev/lightnvm/control"
-
-struct nvm_ioctl_info_tgt {
- __u32 version[3];
- __u32 reserved;
- char tgtname[NVM_TTYPE_NAME_MAX];
-};
-
-struct nvm_ioctl_info {
- __u32 version[3]; /* in/out - major, minor, patch */
- __u16 tgtsize; /* number of targets */
- __u16 reserved16; /* pad to 4K page */
- __u32 reserved[12];
- struct nvm_ioctl_info_tgt tgts[NVM_TTYPE_MAX];
-};
-
-enum {
- NVM_DEVICE_ACTIVE = 1 << 0,
-};
-
-struct nvm_ioctl_device_info {
- char devname[DISK_NAME_LEN];
- char bmname[NVM_TTYPE_NAME_MAX];
- __u32 bmversion[3];
- __u32 flags;
- __u32 reserved[8];
-};
-
-struct nvm_ioctl_get_devices {
- __u32 nr_devices;
- __u32 reserved[31];
- struct nvm_ioctl_device_info info[31];
-};
-
-struct nvm_ioctl_create_simple {
- __u32 lun_begin;
- __u32 lun_end;
-};
-
-struct nvm_ioctl_create_extended {
- __u16 lun_begin;
- __u16 lun_end;
- __u16 op;
- __u16 rsv;
-};
-
-enum {
- NVM_CONFIG_TYPE_SIMPLE = 0,
- NVM_CONFIG_TYPE_EXTENDED = 1,
-};
-
-struct nvm_ioctl_create_conf {
- __u32 type;
- union {
- struct nvm_ioctl_create_simple s;
- struct nvm_ioctl_create_extended e;
- };
-};
-
-enum {
- NVM_TARGET_FACTORY = 1 << 0, /* Init target in factory mode */
-};
-
-struct nvm_ioctl_create {
- char dev[DISK_NAME_LEN]; /* open-channel SSD device */
- char tgttype[NVM_TTYPE_NAME_MAX]; /* target type name */
- char tgtname[DISK_NAME_LEN]; /* dev to expose target as */
-
- __u32 flags;
-
- struct nvm_ioctl_create_conf conf;
-};
-
-struct nvm_ioctl_remove {
- char tgtname[DISK_NAME_LEN];
-
- __u32 flags;
-};
-
-struct nvm_ioctl_dev_init {
- char dev[DISK_NAME_LEN]; /* open-channel SSD device */
- char mmtype[NVM_MMTYPE_LEN]; /* register to media manager */
-
- __u32 flags;
-};
-
-enum {
- NVM_FACTORY_ERASE_ONLY_USER = 1 << 0, /* erase only blocks used as
- * host blks or grown blks */
- NVM_FACTORY_RESET_HOST_BLKS = 1 << 1, /* remove host blk marks */
- NVM_FACTORY_RESET_GRWN_BBLKS = 1 << 2, /* remove grown blk marks */
- NVM_FACTORY_NR_BITS = 1 << 3, /* stops here */
-};
-
-struct nvm_ioctl_dev_factory {
- char dev[DISK_NAME_LEN];
-
- __u32 flags;
-};
-
-struct nvm_user_vio {
- __u8 opcode;
- __u8 flags;
- __u16 control;
- __u16 nppas;
- __u16 rsvd;
- __u64 metadata;
- __u64 addr;
- __u64 ppa_list;
- __u32 metadata_len;
- __u32 data_len;
- __u64 status;
- __u32 result;
- __u32 rsvd3[3];
-};
-
-struct nvm_passthru_vio {
- __u8 opcode;
- __u8 flags;
- __u8 rsvd[2];
- __u32 nsid;
- __u32 cdw2;
- __u32 cdw3;
- __u64 metadata;
- __u64 addr;
- __u32 metadata_len;
- __u32 data_len;
- __u64 ppa_list;
- __u16 nppas;
- __u16 control;
- __u32 cdw13;
- __u32 cdw14;
- __u32 cdw15;
- __u64 status;
- __u32 result;
- __u32 timeout_ms;
-};
-
-/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */
-enum {
- /* top level cmds */
- NVM_INFO_CMD = 0x20,
- NVM_GET_DEVICES_CMD,
-
- /* device level cmds */
- NVM_DEV_CREATE_CMD,
- NVM_DEV_REMOVE_CMD,
-
- /* Init a device to support LightNVM media managers */
- NVM_DEV_INIT_CMD,
-
- /* Factory reset device */
- NVM_DEV_FACTORY_CMD,
-
- /* Vector user I/O */
- NVM_DEV_VIO_ADMIN_CMD = 0x41,
- NVM_DEV_VIO_CMD = 0x42,
- NVM_DEV_VIO_USER_CMD = 0x43,
-};
-
-#define NVM_IOCTL 'L' /* 0x4c */
-
-#define NVM_INFO _IOWR(NVM_IOCTL, NVM_INFO_CMD, \
- struct nvm_ioctl_info)
-#define NVM_GET_DEVICES _IOR(NVM_IOCTL, NVM_GET_DEVICES_CMD, \
- struct nvm_ioctl_get_devices)
-#define NVM_DEV_CREATE _IOW(NVM_IOCTL, NVM_DEV_CREATE_CMD, \
- struct nvm_ioctl_create)
-#define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
- struct nvm_ioctl_remove)
-#define NVM_DEV_INIT _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \
- struct nvm_ioctl_dev_init)
-#define NVM_DEV_FACTORY _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \
- struct nvm_ioctl_dev_factory)
-
-#define NVME_NVM_IOCTL_IO_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_USER_CMD, \
- struct nvm_passthru_vio)
-#define NVME_NVM_IOCTL_ADMIN_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_ADMIN_CMD,\
- struct nvm_passthru_vio)
-#define NVME_NVM_IOCTL_SUBMIT_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_CMD,\
- struct nvm_user_vio)
-
-#define NVM_VERSION_MAJOR 1
-#define NVM_VERSION_MINOR 0
-#define NVM_VERSION_PATCHLEVEL 0
-
-#endif
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index f99d9dcae667..8d7ca7c6af42 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* lirc.h - linux infrared remote control header file
- * last modified 2010/07/13 by Jarod Wilson
*/
#ifndef _LINUX_LIRC_H
@@ -17,14 +16,16 @@
#define LIRC_MODE2_PULSE 0x01000000
#define LIRC_MODE2_FREQUENCY 0x02000000
#define LIRC_MODE2_TIMEOUT 0x03000000
+#define LIRC_MODE2_OVERFLOW 0x04000000
#define LIRC_VALUE_MASK 0x00FFFFFF
#define LIRC_MODE2_MASK 0xFF000000
-#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE)
-#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE)
-#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY)
-#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT)
+#define LIRC_SPACE(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_SPACE)
+#define LIRC_PULSE(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_PULSE)
+#define LIRC_FREQUENCY(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY)
+#define LIRC_TIMEOUT(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT)
+#define LIRC_OVERFLOW(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_OVERFLOW)
#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK)
#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK)
@@ -33,6 +34,7 @@
#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE)
#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY)
#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT)
+#define LIRC_IS_OVERFLOW(val) (LIRC_MODE2(val) == LIRC_MODE2_OVERFLOW)
/* used heavily by lirc userspace */
#define lirc_t int
@@ -71,13 +73,10 @@
#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK)
#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16)
-#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16)
-#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000
#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000
#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000
#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000
-#define LIRC_CAN_SET_REC_FILTER 0x08000000
#define LIRC_CAN_MEASURE_CARRIER 0x02000000
#define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000
@@ -85,7 +84,12 @@
#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK)
#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK)
-#define LIRC_CAN_NOTIFY_DECODE 0x01000000
+/*
+ * Unused features. These features were never implemented, in tree or
+ * out of tree. These definitions are here so not to break the lircd build.
+ */
+#define LIRC_CAN_SET_REC_FILTER 0
+#define LIRC_CAN_NOTIFY_DECODE 0
/*** IOCTL commands for lirc driver ***/
@@ -139,7 +143,7 @@
*/
#define LIRC_GET_REC_TIMEOUT _IOR('i', 0x00000024, __u32)
-/*
+/**
* struct lirc_scancode - decoded scancode with protocol for use with
* LIRC_MODE_SCANCODE
*
@@ -196,6 +200,7 @@ struct lirc_scancode {
* @RC_PROTO_RCMM24: RC-MM protocol 24 bits
* @RC_PROTO_RCMM32: RC-MM protocol 32 bits
* @RC_PROTO_XBOX_DVD: Xbox DVD Movie Playback Kit protocol
+ * @RC_PROTO_MAX: Maximum value of enum rc_proto
*/
enum rc_proto {
RC_PROTO_UNKNOWN = 0,
@@ -226,6 +231,7 @@ enum rc_proto {
RC_PROTO_RCMM24 = 25,
RC_PROTO_RCMM32 = 26,
RC_PROTO_XBOX_DVD = 27,
+ RC_PROTO_MAX = RC_PROTO_XBOX_DVD,
};
#endif
diff --git a/include/uapi/linux/loadpin.h b/include/uapi/linux/loadpin.h
new file mode 100644
index 000000000000..daa6dbb8bb02
--- /dev/null
+++ b/include/uapi/linux/loadpin.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2022, Google LLC
+ */
+
+#ifndef _UAPI_LINUX_LOOP_LOADPIN_H
+#define _UAPI_LINUX_LOOP_LOADPIN_H
+
+#define LOADPIN_IOC_MAGIC 'L'
+
+/**
+ * LOADPIN_IOC_SET_TRUSTED_VERITY_DIGESTS - Set up the root digests of verity devices
+ * that loadpin should trust.
+ *
+ * Takes a file descriptor from which to read the root digests of trusted verity devices. The file
+ * is expected to contain a list of digests in ASCII format, with one line per digest. The ioctl
+ * must be issued on the securityfs attribute 'loadpin/dm-verity' (which can be typically found
+ * under /sys/kernel/security/loadpin/dm-verity).
+ */
+#define LOADPIN_IOC_SET_TRUSTED_VERITY_DIGESTS _IOW(LOADPIN_IOC_MAGIC, 0x00, unsigned int)
+
+#endif /* _UAPI_LINUX_LOOP_LOADPIN_H */
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
index 080a8df134ef..6f63527dd2ed 100644
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */
/*
- * include/linux/loop.h
- *
- * Written by Theodore Ts'o, 3/29/93.
- *
- * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
- * permitted under the GNU General Public License.
+ * Copyright 1993 by Theodore Ts'o.
*/
#ifndef _UAPI_LINUX_LOOP_H
#define _UAPI_LINUX_LOOP_H
@@ -25,6 +20,16 @@ enum {
LO_FLAGS_DIRECT_IO = 16,
};
+/* LO_FLAGS that can be set using LOOP_SET_STATUS(64) */
+#define LOOP_SET_STATUS_SETTABLE_FLAGS (LO_FLAGS_AUTOCLEAR | LO_FLAGS_PARTSCAN)
+
+/* LO_FLAGS that can be cleared using LOOP_SET_STATUS(64) */
+#define LOOP_SET_STATUS_CLEARABLE_FLAGS (LO_FLAGS_AUTOCLEAR)
+
+/* LO_FLAGS that can be set using LOOP_CONFIGURE */
+#define LOOP_CONFIGURE_SETTABLE_FLAGS (LO_FLAGS_READ_ONLY | LO_FLAGS_AUTOCLEAR \
+ | LO_FLAGS_PARTSCAN | LO_FLAGS_DIRECT_IO)
+
#include <asm/posix_types.h> /* for __kernel_old_dev_t */
#include <linux/types.h> /* for __u64 */
@@ -35,9 +40,9 @@ struct loop_info {
unsigned long lo_inode; /* ioctl r/o */
__kernel_old_dev_t lo_rdevice; /* ioctl r/o */
int lo_offset;
- int lo_encrypt_type;
+ int lo_encrypt_type; /* obsolete, ignored */
int lo_encrypt_key_size; /* ioctl w/o */
- int lo_flags; /* ioctl r/o */
+ int lo_flags;
char lo_name[LO_NAME_SIZE];
unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
unsigned long lo_init[2];
@@ -51,15 +56,31 @@ struct loop_info64 {
__u64 lo_offset;
__u64 lo_sizelimit;/* bytes, 0 == max available */
__u32 lo_number; /* ioctl r/o */
- __u32 lo_encrypt_type;
+ __u32 lo_encrypt_type; /* obsolete, ignored */
__u32 lo_encrypt_key_size; /* ioctl w/o */
- __u32 lo_flags; /* ioctl r/o */
+ __u32 lo_flags;
__u8 lo_file_name[LO_NAME_SIZE];
__u8 lo_crypt_name[LO_NAME_SIZE];
__u8 lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
__u64 lo_init[2];
};
+/**
+ * struct loop_config - Complete configuration for a loop device.
+ * @fd: fd of the file to be used as a backing file for the loop device.
+ * @block_size: block size to use; ignored if 0.
+ * @info: struct loop_info64 to configure the loop device with.
+ *
+ * This structure is used with the LOOP_CONFIGURE ioctl, and can be used to
+ * atomically setup and configure all loop device parameters at once.
+ */
+struct loop_config {
+ __u32 fd;
+ __u32 block_size;
+ struct loop_info64 info;
+ __u64 __reserved[8];
+};
+
/*
* Loop filter types
*/
@@ -90,6 +111,7 @@ struct loop_info64 {
#define LOOP_SET_CAPACITY 0x4C07
#define LOOP_SET_DIRECT_IO 0x4C08
#define LOOP_SET_BLOCK_SIZE 0x4C09
+#define LOOP_CONFIGURE 0x4C0A
/* /dev/loop-control interface */
#define LOOP_CTL_ADD 0x4C80
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index f6035f737193..229655ef792f 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -13,6 +13,9 @@ enum lwtunnel_encap_types {
LWTUNNEL_ENCAP_SEG6,
LWTUNNEL_ENCAP_BPF,
LWTUNNEL_ENCAP_SEG6_LOCAL,
+ LWTUNNEL_ENCAP_RPL,
+ LWTUNNEL_ENCAP_IOAM6,
+ LWTUNNEL_ENCAP_XFRM,
__LWTUNNEL_ENCAP_MAX,
};
@@ -109,4 +112,13 @@ enum {
#define LWT_BPF_MAX_HEADROOM 256
+enum {
+ LWT_XFRM_UNSPEC,
+ LWT_XFRM_IF_ID,
+ LWT_XFRM_LINK,
+ __LWT_XFRM_MAX,
+};
+
+#define LWT_XFRM_MAX (__LWT_XFRM_MAX - 1)
+
#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index d78064007b17..6325d1d0e90f 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -6,6 +6,7 @@
#define AFFS_SUPER_MAGIC 0xadff
#define AFS_SUPER_MAGIC 0x5346414F
#define AUTOFS_SUPER_MAGIC 0x0187
+#define CEPH_SUPER_MAGIC 0x00c36400
#define CODA_SUPER_MAGIC 0x73757245
#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */
@@ -35,6 +36,7 @@
#define EFIVARFS_MAGIC 0xde5e81e4
#define HOSTFS_SUPER_MAGIC 0x00c0ffee
#define OVERLAYFS_SUPER_MAGIC 0x794c7630
+#define FUSE_SUPER_MAGIC 0x65735546
#define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */
#define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */
@@ -43,6 +45,7 @@
#define MINIX3_SUPER_MAGIC 0x4d5a /* minix v3 fs, 60 char names */
#define MSDOS_SUPER_MAGIC 0x4d44 /* MD */
+#define EXFAT_SUPER_MAGIC 0x2011BAB0
#define NCP_SUPER_MAGIC 0x564c /* Guess, what 0x564c is :-) */
#define NFS_SUPER_MAGIC 0x6969
#define OCFS2_SUPER_MAGIC 0x7461636f
@@ -51,6 +54,7 @@
#define QNX6_SUPER_MAGIC 0x68191122 /* qnx6 fs detection */
#define AFS_FS_MAGIC 0x6B414653
+
#define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */
/* used by file system utilities that
look at the superblock, etc. */
@@ -59,6 +63,9 @@
#define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
#define SMB_SUPER_MAGIC 0x517B
+#define CIFS_SUPER_MAGIC 0xFF534D42 /* the first four bytes of SMB PDUs */
+#define SMB2_SUPER_MAGIC 0xFE534D42
+
#define CGROUP_SUPER_MAGIC 0x27e0eb
#define CGROUP2_SUPER_MAGIC 0x63677270
@@ -91,10 +98,8 @@
/* Since UDF 2.01 is ISO 13346 based... */
#define UDF_SUPER_MAGIC 0x15013346
-#define BALLOON_KVM_MAGIC 0x13661366
-#define ZSMALLOC_MAGIC 0x58295829
#define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */
-#define Z3FOLD_MAGIC 0x33
-#define PPC_CMM_MAGIC 0xc7571590
+#define DEVMEM_MAGIC 0x454d444d /* "DMEM" */
+#define SECRETMEM_MAGIC 0x5345434d /* "SECM" */
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/major.h b/include/uapi/linux/major.h
index 7e5fa8e15c43..4e5f2b3a3d54 100644
--- a/include/uapi/linux/major.h
+++ b/include/uapi/linux/major.h
@@ -34,8 +34,6 @@
#define GOLDSTAR_CDROM_MAJOR 16
#define OPTICS_CDROM_MAJOR 17
#define SANYO_CDROM_MAJOR 18
-#define CYCLADES_MAJOR 19
-#define CYCLADESAUX_MAJOR 20
#define MITSUMI_X_CDROM_MAJOR 20
#define MFM_ACORN_MAJOR 21 /* ARM Linux /dev/mfm */
#define SCSI_GENERIC_MAJOR 21
diff --git a/include/uapi/linux/map_to_14segment.h b/include/uapi/linux/map_to_14segment.h
new file mode 100644
index 000000000000..0346ef76543b
--- /dev/null
+++ b/include/uapi/linux/map_to_14segment.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2021 Glider bv
+ *
+ * Based on include/uapi/linux/map_to_7segment.h:
+
+ * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com>
+ */
+
+#ifndef MAP_TO_14SEGMENT_H
+#define MAP_TO_14SEGMENT_H
+
+/* This file provides translation primitives and tables for the conversion
+ * of (ASCII) characters to a 14-segments notation.
+ *
+ * The 14 segment's wikipedia notation below is used as standard.
+ * See: https://en.wikipedia.org/wiki/Fourteen-segment_display
+ *
+ * Notation: +---a---+
+ * |\ | /|
+ * f h i j b
+ * | \|/ |
+ * +-g1+-g2+
+ * | /|\ |
+ * e k l m c
+ * |/ | \|
+ * +---d---+
+ *
+ * Usage:
+ *
+ * Register a map variable, and fill it with a character set:
+ * static SEG14_DEFAULT_MAP(map_seg14);
+ *
+ *
+ * Then use for conversion:
+ * seg14 = map_to_seg14(&map_seg14, some_char);
+ * ...
+ *
+ * In device drivers it is recommended, if required, to make the char map
+ * accessible via the sysfs interface using the following scheme:
+ *
+ * static ssize_t map_seg14_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
+ * memcpy(buf, &map_seg14, sizeof(map_seg14));
+ * return sizeof(map_seg14);
+ * }
+ * static ssize_t map_seg14_store(struct device *dev,
+ * struct device_attribute *attr,
+ * const char *buf, size_t cnt)
+ * {
+ * if (cnt != sizeof(map_seg14))
+ * return -EINVAL;
+ * memcpy(&map_seg14, buf, cnt);
+ * return cnt;
+ * }
+ * static DEVICE_ATTR_RW(map_seg14);
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#define BIT_SEG14_A 0
+#define BIT_SEG14_B 1
+#define BIT_SEG14_C 2
+#define BIT_SEG14_D 3
+#define BIT_SEG14_E 4
+#define BIT_SEG14_F 5
+#define BIT_SEG14_G1 6
+#define BIT_SEG14_G2 7
+#define BIT_SEG14_H 8
+#define BIT_SEG14_I 9
+#define BIT_SEG14_J 10
+#define BIT_SEG14_K 11
+#define BIT_SEG14_L 12
+#define BIT_SEG14_M 13
+#define BIT_SEG14_RESERVED1 14
+#define BIT_SEG14_RESERVED2 15
+
+struct seg14_conversion_map {
+ __be16 table[128];
+};
+
+static __inline__ int map_to_seg14(struct seg14_conversion_map *map, int c)
+{
+ if (c < 0 || c >= sizeof(map->table) / sizeof(map->table[0]))
+ return -EINVAL;
+
+ return __be16_to_cpu(map->table[c]);
+}
+
+#define SEG14_CONVERSION_MAP(_name, _map) \
+ struct seg14_conversion_map _name = { .table = { _map } }
+
+/*
+ * It is recommended to use a facility that allows user space to redefine
+ * custom character sets for LCD devices. Please use a sysfs interface
+ * as described above.
+ */
+#define MAP_TO_SEG14_SYSFS_FILE "map_seg14"
+
+/*******************************************************************************
+ * ASCII conversion table
+ ******************************************************************************/
+
+#define _SEG14(sym, a, b, c, d, e, f, g1, g2, h, j, k, l, m, n) \
+ __cpu_to_be16( a << BIT_SEG14_A | b << BIT_SEG14_B | \
+ c << BIT_SEG14_C | d << BIT_SEG14_D | \
+ e << BIT_SEG14_E | f << BIT_SEG14_F | \
+ g1 << BIT_SEG14_G1 | g2 << BIT_SEG14_G2 | \
+ h << BIT_SEG14_H | j << BIT_SEG14_I | \
+ k << BIT_SEG14_J | l << BIT_SEG14_K | \
+ m << BIT_SEG14_L | n << BIT_SEG14_M )
+
+#define _MAP_0_32_ASCII_SEG14_NON_PRINTABLE \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+
+#define _MAP_33_47_ASCII_SEG14_SYMBOL \
+ _SEG14('!', 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('"', 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0), \
+ _SEG14('#', 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14('$', 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14('%', 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0), \
+ _SEG14('&', 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1), \
+ _SEG14('\'',0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0), \
+ _SEG14('(', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1), \
+ _SEG14(')', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0), \
+ _SEG14('*', 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1), \
+ _SEG14('+', 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14(',', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0), \
+ _SEG14('-', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('.', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('/', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0),
+
+#define _MAP_48_57_ASCII_SEG14_NUMERIC \
+ _SEG14('0', 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0), \
+ _SEG14('1', 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0), \
+ _SEG14('2', 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('3', 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('4', 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('5', 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('6', 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('7', 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0), \
+ _SEG14('8', 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('9', 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0),
+
+#define _MAP_58_64_ASCII_SEG14_SYMBOL \
+ _SEG14(':', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14(';', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0), \
+ _SEG14('<', 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1), \
+ _SEG14('=', 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('>', 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0), \
+ _SEG14('?', 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0), \
+ _SEG14('@', 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0),
+
+#define _MAP_65_90_ASCII_SEG14_ALPHA_UPPER \
+ _SEG14('A', 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('B', 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14('C', 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('D', 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('E', 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('F', 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('G', 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('H', 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('I', 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('J', 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('K', 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1), \
+ _SEG14('L', 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('M', 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0), \
+ _SEG14('N', 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1), \
+ _SEG14('O', 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('P', 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('Q', 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('R', 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1), \
+ _SEG14('S', 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('T', 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('U', 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('V', 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0), \
+ _SEG14('W', 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1), \
+ _SEG14('X', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1), \
+ _SEG14('Y', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0), \
+ _SEG14('Z', 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0),
+
+#define _MAP_91_96_ASCII_SEG14_SYMBOL \
+ _SEG14('[', 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('\\',0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1), \
+ _SEG14(']', 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('^', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1), \
+ _SEG14('_', 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('`', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0),
+
+#define _MAP_97_122_ASCII_SEG14_ALPHA_LOWER \
+ _SEG14('a', 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('b', 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('c', 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('d', 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0), \
+ _SEG14('e', 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0), \
+ _SEG14('f', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0), \
+ _SEG14('g', 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0), \
+ _SEG14('h', 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('i', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('j', 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0), \
+ _SEG14('k', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1), \
+ _SEG14('l', 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('m', 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0), \
+ _SEG14('n', 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('o', 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('p', 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0), \
+ _SEG14('q', 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0), \
+ _SEG14('r', 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('s', 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1), \
+ _SEG14('t', 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('u', 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('v', 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0), \
+ _SEG14('w', 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1), \
+ _SEG14('x', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), \
+ _SEG14('y', 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0), \
+ _SEG14('z', 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0),
+
+#define _MAP_123_126_ASCII_SEG14_SYMBOL \
+ _SEG14('{', 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0), \
+ _SEG14('|', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('}', 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1), \
+ _SEG14('~', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0),
+
+/* Maps */
+#define MAP_ASCII14SEG_ALPHANUM \
+ _MAP_0_32_ASCII_SEG14_NON_PRINTABLE \
+ _MAP_33_47_ASCII_SEG14_SYMBOL \
+ _MAP_48_57_ASCII_SEG14_NUMERIC \
+ _MAP_58_64_ASCII_SEG14_SYMBOL \
+ _MAP_65_90_ASCII_SEG14_ALPHA_UPPER \
+ _MAP_91_96_ASCII_SEG14_SYMBOL \
+ _MAP_97_122_ASCII_SEG14_ALPHA_LOWER \
+ _MAP_123_126_ASCII_SEG14_SYMBOL
+
+#define SEG14_DEFAULT_MAP(_name) \
+ SEG14_CONVERSION_MAP(_name, MAP_ASCII14SEG_ALPHANUM)
+
+#endif /* MAP_TO_14SEGMENT_H */
diff --git a/include/uapi/linux/map_to_7segment.h b/include/uapi/linux/map_to_7segment.h
index f9ed18134b83..04c8b55812e7 100644
--- a/include/uapi/linux/map_to_7segment.h
+++ b/include/uapi/linux/map_to_7segment.h
@@ -1,20 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef MAP_TO_7SEGMENT_H
@@ -24,7 +10,7 @@
* of (ASCII) characters to a 7-segments notation.
*
* The 7 segment's wikipedia notation below is used as standard.
- * See: http://en.wikipedia.org/wiki/Seven_segment_display
+ * See: https://en.wikipedia.org/wiki/Seven_segment_display
*
* Notation: +-a-+
* f b
@@ -45,17 +31,22 @@
* In device drivers it is recommended, if required, to make the char map
* accessible via the sysfs interface using the following scheme:
*
- * static ssize_t show_map(struct device *dev, char *buf) {
+ * static ssize_t map_seg7_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
* memcpy(buf, &map_seg7, sizeof(map_seg7));
* return sizeof(map_seg7);
* }
- * static ssize_t store_map(struct device *dev, const char *buf, size_t cnt) {
+ * static ssize_t map_seg7_store(struct device *dev,
+ * struct device_attribute *attr, const char *buf,
+ * size_t cnt)
+ * {
* if(cnt != sizeof(map_seg7))
* return -EINVAL;
* memcpy(&map_seg7, buf, cnt);
* return cnt;
* }
- * static DEVICE_ATTR(map_seg7, PERMS_RW, show_map, store_map);
+ * static DEVICE_ATTR_RW(map_seg7);
*
* History:
* 2005-05-31 RFC linux-kernel@vger.kernel.org
diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h
new file mode 100644
index 000000000000..154ab56651f1
--- /dev/null
+++ b/include/uapi/linux/mctp.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Management Component Transport Protocol (MCTP)
+ *
+ * Copyright (c) 2021 Code Construct
+ * Copyright (c) 2021 Google
+ */
+
+#ifndef __UAPI_MCTP_H
+#define __UAPI_MCTP_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/netdevice.h>
+
+typedef __u8 mctp_eid_t;
+
+struct mctp_addr {
+ mctp_eid_t s_addr;
+};
+
+struct sockaddr_mctp {
+ __kernel_sa_family_t smctp_family;
+ __u16 __smctp_pad0;
+ unsigned int smctp_network;
+ struct mctp_addr smctp_addr;
+ __u8 smctp_type;
+ __u8 smctp_tag;
+ __u8 __smctp_pad1;
+};
+
+struct sockaddr_mctp_ext {
+ struct sockaddr_mctp smctp_base;
+ int smctp_ifindex;
+ __u8 smctp_halen;
+ __u8 __smctp_pad0[3];
+ __u8 smctp_haddr[MAX_ADDR_LEN];
+};
+
+#define MCTP_NET_ANY 0x0
+
+#define MCTP_ADDR_NULL 0x00
+#define MCTP_ADDR_ANY 0xff
+
+#define MCTP_TAG_MASK 0x07
+#define MCTP_TAG_OWNER 0x08
+#define MCTP_TAG_PREALLOC 0x10
+
+#define MCTP_OPT_ADDR_EXT 1
+
+#define SIOCMCTPALLOCTAG (SIOCPROTOPRIVATE + 0)
+#define SIOCMCTPDROPTAG (SIOCPROTOPRIVATE + 1)
+
+struct mctp_ioc_tag_ctl {
+ mctp_eid_t peer_addr;
+
+ /* For SIOCMCTPALLOCTAG: must be passed as zero, kernel will
+ * populate with the allocated tag value. Returned tag value will
+ * always have TO and PREALLOC set.
+ *
+ * For SIOCMCTPDROPTAG: userspace provides tag value to drop, from
+ * a prior SIOCMCTPALLOCTAG call (and so must have TO and PREALLOC set).
+ */
+ __u8 tag;
+ __u16 flags;
+};
+
+#endif /* __UAPI_MCTP_H */
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index 4bcb41c71b8c..75b7257a51e1 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -53,18 +53,33 @@
#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
#define MDIO_AN_EEE_ADV2 62 /* EEE advertisement 2 */
#define MDIO_AN_EEE_LPABLE2 63 /* EEE link partner ability 2 */
+#define MDIO_AN_CTRL2 64 /* AN THP bypass request control */
/* Media-dependent registers. */
#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
* Lanes B-D are numbered 134-136. */
+#define MDIO_PMA_10GBR_FSRT_CSR 147 /* 10GBASE-R fast retrain status and control */
#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+#define MDIO_B10L_PMA_CTRL 2294 /* 10BASE-T1L PMA control */
+#define MDIO_PMA_10T1L_STAT 2295 /* 10BASE-T1L PMA status */
+#define MDIO_PCS_10T1L_CTRL 2278 /* 10BASE-T1L PCS control */
+#define MDIO_PMA_PMD_BT1 18 /* BASE-T1 PMA/PMD extended ability */
+#define MDIO_AN_T1_CTRL 512 /* BASE-T1 AN control */
+#define MDIO_AN_T1_STAT 513 /* BASE-T1 AN status */
+#define MDIO_AN_T1_ADV_L 514 /* BASE-T1 AN advertisement register [15:0] */
+#define MDIO_AN_T1_ADV_M 515 /* BASE-T1 AN advertisement register [31:16] */
+#define MDIO_AN_T1_ADV_H 516 /* BASE-T1 AN advertisement register [47:32] */
+#define MDIO_AN_T1_LP_L 517 /* BASE-T1 AN LP Base Page ability register [15:0] */
+#define MDIO_AN_T1_LP_M 518 /* BASE-T1 AN LP Base Page ability register [31:16] */
+#define MDIO_AN_T1_LP_H 519 /* BASE-T1 AN LP Base Page ability register [47:32] */
+#define MDIO_PMA_PMD_BT1_CTRL 2100 /* BASE-T1 PMA/PMD control register */
/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
@@ -120,6 +135,8 @@
#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */
#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */
#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */
+#define MDIO_PCS_SPEED_2_5G 0x0040 /* 2.5G capable */
+#define MDIO_PCS_SPEED_5G 0x0080 /* 5G capable */
/* Device present registers. */
#define MDIO_DEVS_PRESENT(devad) (1 << (devad))
@@ -155,6 +172,7 @@
#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */
#define MDIO_PMA_CTRL2_2_5GBT 0x0030 /* 2.5GBaseT type */
#define MDIO_PMA_CTRL2_5GBT 0x0031 /* 5GBaseT type */
+#define MDIO_PMA_CTRL2_BASET1 0x003D /* BASE-T1 type */
#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */
#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */
#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */
@@ -208,6 +226,7 @@
#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */
#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */
#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */
+#define MDIO_PMA_EXTABLE_BT1 0x0800 /* BASE-T1 ability */
#define MDIO_PMA_EXTABLE_NBT 0x4000 /* 2.5/5GBASE-T ability */
/* PHY XGXS lane state register. */
@@ -237,6 +256,9 @@
#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+/* PMA 10GBASE-R Fast Retrain status and control register. */
+#define MDIO_PMA_10GBR_FSRT_ENABLE 0x0001 /* Fast retrain enable */
+
/* PCS 10GBASE-R/-T status register 1. */
#define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */
@@ -245,6 +267,7 @@
#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00
/* AN 10GBASE-T control register. */
+#define MDIO_AN_10GBT_CTRL_ADVFSRT2_5G 0x0020 /* Advertise 2.5GBASE-T fast retrain */
#define MDIO_AN_10GBT_CTRL_ADV2_5G 0x0080 /* Advertise 2.5GBASE-T */
#define MDIO_AN_10GBT_CTRL_ADV5G 0x0100 /* Advertise 5GBASE-T */
#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */
@@ -260,6 +283,66 @@
#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
+/* 10BASE-T1L PMA control */
+#define MDIO_PMA_10T1L_CTRL_LB_EN 0x0001 /* Enable loopback mode */
+#define MDIO_PMA_10T1L_CTRL_EEE_EN 0x0400 /* Enable EEE mode */
+#define MDIO_PMA_10T1L_CTRL_LOW_POWER 0x0800 /* Low-power mode */
+#define MDIO_PMA_10T1L_CTRL_2V4_EN 0x1000 /* Enable 2.4 Vpp operating mode */
+#define MDIO_PMA_10T1L_CTRL_TX_DIS 0x4000 /* Transmit disable */
+#define MDIO_PMA_10T1L_CTRL_PMA_RST 0x8000 /* MA reset */
+
+/* 10BASE-T1L PMA status register. */
+#define MDIO_PMA_10T1L_STAT_LINK 0x0001 /* PMA receive link up */
+#define MDIO_PMA_10T1L_STAT_FAULT 0x0002 /* Fault condition detected */
+#define MDIO_PMA_10T1L_STAT_POLARITY 0x0004 /* Receive polarity is reversed */
+#define MDIO_PMA_10T1L_STAT_RECV_FAULT 0x0200 /* Able to detect fault on receive path */
+#define MDIO_PMA_10T1L_STAT_EEE 0x0400 /* PHY has EEE ability */
+#define MDIO_PMA_10T1L_STAT_LOW_POWER 0x0800 /* PMA has low-power ability */
+#define MDIO_PMA_10T1L_STAT_2V4_ABLE 0x1000 /* PHY has 2.4 Vpp operating mode ability */
+#define MDIO_PMA_10T1L_STAT_LB_ABLE 0x2000 /* PHY has loopback ability */
+
+/* 10BASE-T1L PCS control register. */
+#define MDIO_PCS_10T1L_CTRL_LB 0x4000 /* Enable PCS level loopback mode */
+#define MDIO_PCS_10T1L_CTRL_RESET 0x8000 /* PCS reset */
+
+/* BASE-T1 PMA/PMD extended ability register. */
+#define MDIO_PMA_PMD_BT1_B10L_ABLE 0x0004 /* 10BASE-T1L Ability */
+
+/* BASE-T1 auto-negotiation advertisement register [15:0] */
+#define MDIO_AN_T1_ADV_L_PAUSE_CAP ADVERTISE_PAUSE_CAP
+#define MDIO_AN_T1_ADV_L_PAUSE_ASYM ADVERTISE_PAUSE_ASYM
+#define MDIO_AN_T1_ADV_L_FORCE_MS 0x1000 /* Force Master/slave Configuration */
+#define MDIO_AN_T1_ADV_L_REMOTE_FAULT ADVERTISE_RFAULT
+#define MDIO_AN_T1_ADV_L_ACK ADVERTISE_LPACK
+#define MDIO_AN_T1_ADV_L_NEXT_PAGE_REQ ADVERTISE_NPAGE
+
+/* BASE-T1 auto-negotiation advertisement register [31:16] */
+#define MDIO_AN_T1_ADV_M_B10L 0x4000 /* device is compatible with 10BASE-T1L */
+#define MDIO_AN_T1_ADV_M_MST 0x0010 /* advertise master preference */
+
+/* BASE-T1 auto-negotiation advertisement register [47:32] */
+#define MDIO_AN_T1_ADV_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level Transmit Request */
+#define MDIO_AN_T1_ADV_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level Transmit Ability */
+
+/* BASE-T1 AN LP Base Page ability register [15:0] */
+#define MDIO_AN_T1_LP_L_PAUSE_CAP LPA_PAUSE_CAP
+#define MDIO_AN_T1_LP_L_PAUSE_ASYM LPA_PAUSE_ASYM
+#define MDIO_AN_T1_LP_L_FORCE_MS 0x1000 /* LP Force Master/slave Configuration */
+#define MDIO_AN_T1_LP_L_REMOTE_FAULT LPA_RFAULT
+#define MDIO_AN_T1_LP_L_ACK LPA_LPACK
+#define MDIO_AN_T1_LP_L_NEXT_PAGE_REQ LPA_NPAGE
+
+/* BASE-T1 AN LP Base Page ability register [31:16] */
+#define MDIO_AN_T1_LP_M_MST 0x0010 /* LP master preference */
+#define MDIO_AN_T1_LP_M_B10L 0x4000 /* LP is compatible with 10BASE-T1L */
+
+/* BASE-T1 AN LP Base Page ability register [47:32] */
+#define MDIO_AN_T1_LP_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level LP Transmit Request */
+#define MDIO_AN_T1_LP_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level LP Transmit Ability */
+
+/* BASE-T1 PMA/PMD control register */
+#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */
+
/* EEE Supported/Advertisement/LP Advertisement registers.
*
* EEE capability Register (3.20), Advertisement (7.60) and
@@ -287,6 +370,9 @@
#define MDIO_EEE_2_5GT 0x0001 /* 2.5GT EEE cap */
#define MDIO_EEE_5GT 0x0002 /* 5GT EEE cap */
+/* AN MultiGBASE-T AN control 2 */
+#define MDIO_AN_THP_BP2_5GT 0x0008 /* 2.5GT THP bypass request */
+
/* 2.5G/5G Extended abilities register. */
#define MDIO_PMA_NG_EXTABLE_2_5GBT 0x0001 /* 2.5GBASET ability */
#define MDIO_PMA_NG_EXTABLE_5GBT 0x0002 /* 5GBASET ability */
@@ -324,4 +410,30 @@ static inline __u16 mdio_phy_id_c45(int prtad, int devad)
return MDIO_PHY_ID_C45 | (prtad << 5) | devad;
}
+/* UsxgmiiChannelInfo[15:0] for USXGMII in-band auto-negotiation.*/
+#define MDIO_USXGMII_EEE_CLK_STP 0x0080 /* EEE clock stop supported */
+#define MDIO_USXGMII_EEE 0x0100 /* EEE supported */
+#define MDIO_USXGMII_SPD_MASK 0x0e00 /* USXGMII speed mask */
+#define MDIO_USXGMII_FULL_DUPLEX 0x1000 /* USXGMII full duplex */
+#define MDIO_USXGMII_DPX_SPD_MASK 0x1e00 /* USXGMII duplex and speed bits */
+#define MDIO_USXGMII_10 0x0000 /* 10Mbps */
+#define MDIO_USXGMII_10HALF 0x0000 /* 10Mbps half-duplex */
+#define MDIO_USXGMII_10FULL 0x1000 /* 10Mbps full-duplex */
+#define MDIO_USXGMII_100 0x0200 /* 100Mbps */
+#define MDIO_USXGMII_100HALF 0x0200 /* 100Mbps half-duplex */
+#define MDIO_USXGMII_100FULL 0x1200 /* 100Mbps full-duplex */
+#define MDIO_USXGMII_1000 0x0400 /* 1000Mbps */
+#define MDIO_USXGMII_1000HALF 0x0400 /* 1000Mbps half-duplex */
+#define MDIO_USXGMII_1000FULL 0x1400 /* 1000Mbps full-duplex */
+#define MDIO_USXGMII_10G 0x0600 /* 10Gbps */
+#define MDIO_USXGMII_10GHALF 0x0600 /* 10Gbps half-duplex */
+#define MDIO_USXGMII_10GFULL 0x1600 /* 10Gbps full-duplex */
+#define MDIO_USXGMII_2500 0x0800 /* 2500Mbps */
+#define MDIO_USXGMII_2500HALF 0x0800 /* 2500Mbps half-duplex */
+#define MDIO_USXGMII_2500FULL 0x1800 /* 2500Mbps full-duplex */
+#define MDIO_USXGMII_5000 0x0a00 /* 5000Mbps */
+#define MDIO_USXGMII_5000HALF 0x0a00 /* 5000Mbps half-duplex */
+#define MDIO_USXGMII_5000FULL 0x1a00 /* 5000Mbps full-duplex */
+#define MDIO_USXGMII_LINK 0x8000 /* PHY link with copper-side partner */
+
#endif /* _UAPI__LINUX_MDIO_H__ */
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 16c1fa2d89a4..ec3323dbb927 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x101d */
+/* RGB - next is 0x1022 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -56,15 +56,20 @@
#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
#define MEDIA_BUS_FMT_RGB888_3X8 0x101c
+#define MEDIA_BUS_FMT_RGB888_3X8_DELTA 0x101d
#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011
#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
+#define MEDIA_BUS_FMT_RGB666_1X30_CPADLO 0x101e
+#define MEDIA_BUS_FMT_RGB888_1X30_CPADLO 0x101f
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
#define MEDIA_BUS_FMT_RGB101010_1X30 0x1018
+#define MEDIA_BUS_FMT_RGB666_1X36_CPADLO 0x1020
+#define MEDIA_BUS_FMT_RGB888_1X36_CPADLO 0x1021
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x202d */
+/* YUV (including grey) - next is 0x202e */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -86,6 +91,7 @@
#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
+#define MEDIA_BUS_FMT_Y14_1X14 0x202d
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
@@ -155,4 +161,12 @@
/* HSV - next is 0x6002 */
#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001
+/*
+ * This format should be used when the same driver handles
+ * both sides of the link and the bus format is a fixed
+ * metadata format that is not configurable from userspace.
+ * Width and height will be set to 0 for this format.
+ */
+#define MEDIA_BUS_FMT_METADATA_FIXED 0x7001
+
#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 383ac7b7d8f0..3ddadaea849f 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -20,9 +20,6 @@
#ifndef __LINUX_MEDIA_H
#define __LINUX_MEDIA_H
-#ifndef __KERNEL__
-#include <stdint.h>
-#endif
#include <linux/ioctl.h>
#include <linux/types.h>
@@ -127,6 +124,7 @@ struct media_device_info {
#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
#define MEDIA_ENT_F_PROC_VIDEO_ENCODER (MEDIA_ENT_F_BASE + 0x4007)
#define MEDIA_ENT_F_PROC_VIDEO_DECODER (MEDIA_ENT_F_BASE + 0x4008)
+#define MEDIA_ENT_F_PROC_VIDEO_ISP (MEDIA_ENT_F_BASE + 0x4009)
/*
* Switch and bridge entity functions
@@ -225,6 +223,7 @@ struct media_pad_desc {
#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28)
# define MEDIA_LNK_FL_DATA_LINK (0 << 28)
# define MEDIA_LNK_FL_INTERFACE_LINK (1 << 28)
+# define MEDIA_LNK_FL_ANCILLARY_LINK (2 << 28)
struct media_link_desc {
struct media_pad_desc source;
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index c6aec86cc5de..4f3638489d01 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -66,4 +66,53 @@ struct mei_connect_client_data {
*/
#define IOCTL_MEI_NOTIFY_GET _IOR('H', 0x03, __u32)
+/**
+ * struct mei_connect_client_vtag - mei client information struct with vtag
+ *
+ * @in_client_uuid: UUID of client to connect
+ * @vtag: virtual tag
+ * @reserved: reserved for future use
+ */
+struct mei_connect_client_vtag {
+ uuid_le in_client_uuid;
+ __u8 vtag;
+ __u8 reserved[3];
+};
+
+/**
+ * struct mei_connect_client_data_vtag - IOCTL connect data union
+ *
+ * @connect: input connect data
+ * @out_client_properties: output client data
+ */
+struct mei_connect_client_data_vtag {
+ union {
+ struct mei_connect_client_vtag connect;
+ struct mei_client out_client_properties;
+ };
+};
+
+/**
+ * DOC:
+ * This IOCTL is used to associate the current file descriptor with a
+ * FW Client (given by UUID), and virtual tag (vtag).
+ * The IOCTL opens a communication channel between a host client and
+ * a FW client on a tagged channel. From this point on, every read
+ * and write will communicate with the associated FW client with
+ * on the tagged channel.
+ * Upone close() the communication is terminated.
+ *
+ * The IOCTL argument is a struct with a union that contains
+ * the input parameter and the output parameter for this IOCTL.
+ *
+ * The input parameter is UUID of the FW Client, a vtag [0,255]
+ * The output parameter is the properties of the FW client
+ * (FW protocool version and max message size).
+ *
+ * Clients that do not support tagged connection
+ * will respond with -EOPNOTSUPP.
+ */
+#define IOCTL_MEI_CONNECT_CLIENT_VTAG \
+ _IOWR('H', 0x04, struct mei_connect_client_data_vtag)
+
#endif /* _LINUX_MEI_H */
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index 5891d7614c8c..737605897f36 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -114,6 +114,26 @@
* If this command is not implemented by an
* architecture, -EINVAL is returned.
* Returns 0 on success.
+ * @MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
+ * Ensure the caller thread, upon return from
+ * system call, that all its running thread
+ * siblings have any currently running rseq
+ * critical sections restarted if @flags
+ * parameter is 0; if @flags parameter is
+ * MEMBARRIER_CMD_FLAG_CPU,
+ * then this operation is performed only
+ * on CPU indicated by @cpu_id. If this command is
+ * not implemented by an architecture, -EINVAL
+ * is returned. A process needs to register its
+ * intent to use the private expedited rseq
+ * command prior to using it, otherwise
+ * this command returns -EPERM.
+ * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
+ * Register the process intent to use
+ * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ.
+ * If this command is not implemented by an
+ * architecture, -EINVAL is returned.
+ * Returns 0 on success.
* @MEMBARRIER_CMD_SHARED:
* Alias to MEMBARRIER_CMD_GLOBAL. Provided for
* header backward compatibility.
@@ -131,9 +151,15 @@ enum membarrier_cmd {
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5),
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6),
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = (1 << 7),
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = (1 << 8),
/* Alias for header backward compatibility. */
MEMBARRIER_CMD_SHARED = MEMBARRIER_CMD_GLOBAL,
};
+enum membarrier_cmd_flag {
+ MEMBARRIER_CMD_FLAG_CPU = (1 << 0),
+};
+
#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 3354774af61e..046d0ccba4cd 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -22,18 +22,21 @@ enum {
MPOL_BIND,
MPOL_INTERLEAVE,
MPOL_LOCAL,
+ MPOL_PREFERRED_MANY,
MPOL_MAX, /* always last member of enum */
};
/* Flags for set_mempolicy */
#define MPOL_F_STATIC_NODES (1 << 15)
#define MPOL_F_RELATIVE_NODES (1 << 14)
+#define MPOL_F_NUMA_BALANCING (1 << 13) /* Optimize with NUMA balancing if possible */
/*
* MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
* either set_mempolicy() or mbind().
*/
-#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
+#define MPOL_MODE_FLAGS \
+ (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES | MPOL_F_NUMA_BALANCING)
/* Flags for get_mempolicy */
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
@@ -58,9 +61,15 @@ enum {
* are never OR'ed into the mode in mempolicy API arguments.
*/
#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
-#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
+/*
+ * These bit locations are exposed in the vm.zone_reclaim_mode sysctl
+ * ABI. New bits are OK, but existing bits can never change.
+ */
+#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
+#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
+#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
#endif /* _UAPI_LINUX_MEMPOLICY_H */
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
deleted file mode 100644
index 504e523f702c..000000000000
--- a/include/uapi/linux/mic_common.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC driver.
- *
- */
-#ifndef __MIC_COMMON_H_
-#define __MIC_COMMON_H_
-
-#include <linux/virtio_ring.h>
-
-#define __mic_align(a, x) (((a) + (x) - 1) & ~((x) - 1))
-
-/**
- * struct mic_device_desc: Virtio device information shared between the
- * virtio driver and userspace backend
- *
- * @type: Device type: console/network/disk etc. Type 0/-1 terminates.
- * @num_vq: Number of virtqueues.
- * @feature_len: Number of bytes of feature bits. Multiply by 2: one for
- host features and one for guest acknowledgements.
- * @config_len: Number of bytes of the config array after virtqueues.
- * @status: A status byte, written by the Guest.
- * @config: Start of the following variable length config.
- */
-struct mic_device_desc {
- __s8 type;
- __u8 num_vq;
- __u8 feature_len;
- __u8 config_len;
- __u8 status;
- __le64 config[0];
-} __attribute__ ((aligned(8)));
-
-/**
- * struct mic_device_ctrl: Per virtio device information in the device page
- * used internally by the host and card side drivers.
- *
- * @vdev: Used for storing MIC vdev information by the guest.
- * @config_change: Set to 1 by host when a config change is requested.
- * @vdev_reset: Set to 1 by guest to indicate virtio device has been reset.
- * @guest_ack: Set to 1 by guest to ack a command.
- * @host_ack: Set to 1 by host to ack a command.
- * @used_address_updated: Set to 1 by guest when the used address should be
- * updated.
- * @c2h_vdev_db: The doorbell number to be used by guest. Set by host.
- * @h2c_vdev_db: The doorbell number to be used by host. Set by guest.
- */
-struct mic_device_ctrl {
- __le64 vdev;
- __u8 config_change;
- __u8 vdev_reset;
- __u8 guest_ack;
- __u8 host_ack;
- __u8 used_address_updated;
- __s8 c2h_vdev_db;
- __s8 h2c_vdev_db;
-} __attribute__ ((aligned(8)));
-
-/**
- * struct mic_bootparam: Virtio device independent information in device page
- *
- * @magic: A magic value used by the card to ensure it can see the host
- * @h2c_config_db: Host to Card Virtio config doorbell set by card
- * @node_id: Unique id of the node
- * @h2c_scif_db - Host to card SCIF doorbell set by card
- * @c2h_scif_db - Card to host SCIF doorbell set by host
- * @scif_host_dma_addr - SCIF host queue pair DMA address
- * @scif_card_dma_addr - SCIF card queue pair DMA address
- */
-struct mic_bootparam {
- __le32 magic;
- __s8 h2c_config_db;
- __u8 node_id;
- __u8 h2c_scif_db;
- __u8 c2h_scif_db;
- __u64 scif_host_dma_addr;
- __u64 scif_card_dma_addr;
-} __attribute__ ((aligned(8)));
-
-/**
- * struct mic_device_page: High level representation of the device page
- *
- * @bootparam: The bootparam structure is used for sharing information and
- * status updates between MIC host and card drivers.
- * @desc: Array of MIC virtio device descriptors.
- */
-struct mic_device_page {
- struct mic_bootparam bootparam;
- struct mic_device_desc desc[0];
-};
-/**
- * struct mic_vqconfig: This is how we expect the device configuration field
- * for a virtqueue to be laid out in config space.
- *
- * @address: Guest/MIC physical address of the virtio ring
- * (avail and desc rings)
- * @used_address: Guest/MIC physical address of the used ring
- * @num: The number of entries in the virtio_ring
- */
-struct mic_vqconfig {
- __le64 address;
- __le64 used_address;
- __le16 num;
-} __attribute__ ((aligned(8)));
-
-/*
- * The alignment to use between consumer and producer parts of vring.
- * This is pagesize for historical reasons.
- */
-#define MIC_VIRTIO_RING_ALIGN 4096
-
-#define MIC_MAX_VRINGS 4
-#define MIC_VRING_ENTRIES 128
-
-/*
- * Max vring entries (power of 2) to ensure desc and avail rings
- * fit in a single page
- */
-#define MIC_MAX_VRING_ENTRIES 128
-
-/**
- * Max size of the desc block in bytes: includes:
- * - struct mic_device_desc
- * - struct mic_vqconfig (num_vq of these)
- * - host and guest features
- * - virtio device config space
- */
-#define MIC_MAX_DESC_BLK_SIZE 256
-
-/**
- * struct _mic_vring_info - Host vring info exposed to userspace backend
- * for the avail index and magic for the card.
- *
- * @avail_idx: host avail idx
- * @magic: A magic debug cookie.
- */
-struct _mic_vring_info {
- __u16 avail_idx;
- __le32 magic;
-};
-
-/**
- * struct mic_vring - Vring information.
- *
- * @vr: The virtio ring.
- * @info: Host vring information exposed to the userspace backend for the
- * avail index and magic for the card.
- * @va: The va for the buffer allocated for vr and info.
- * @len: The length of the buffer required for allocating vr and info.
- */
-struct mic_vring {
- struct vring vr;
- struct _mic_vring_info *info;
- void *va;
- int len;
-};
-
-#define mic_aligned_desc_size(d) __mic_align(mic_desc_size(d), 8)
-
-#ifndef INTEL_MIC_CARD
-static inline unsigned mic_desc_size(const struct mic_device_desc *desc)
-{
- return sizeof(*desc) + desc->num_vq * sizeof(struct mic_vqconfig)
- + desc->feature_len * 2 + desc->config_len;
-}
-
-static inline struct mic_vqconfig *
-mic_vq_config(const struct mic_device_desc *desc)
-{
- return (struct mic_vqconfig *)(desc + 1);
-}
-
-static inline __u8 *mic_vq_features(const struct mic_device_desc *desc)
-{
- return (__u8 *)(mic_vq_config(desc) + desc->num_vq);
-}
-
-static inline __u8 *mic_vq_configspace(const struct mic_device_desc *desc)
-{
- return mic_vq_features(desc) + desc->feature_len * 2;
-}
-static inline unsigned mic_total_desc_size(struct mic_device_desc *desc)
-{
- return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
-}
-#endif
-
-/* Device page size */
-#define MIC_DP_SIZE 4096
-
-#define MIC_MAGIC 0xc0ffee00
-
-/**
- * enum mic_states - MIC states.
- */
-enum mic_states {
- MIC_READY = 0,
- MIC_BOOTING,
- MIC_ONLINE,
- MIC_SHUTTING_DOWN,
- MIC_RESETTING,
- MIC_RESET_FAILED,
- MIC_LAST
-};
-
-/**
- * enum mic_status - MIC status reported by card after
- * a host or card initiated shutdown or a card crash.
- */
-enum mic_status {
- MIC_NOP = 0,
- MIC_CRASHED,
- MIC_HALTED,
- MIC_POWER_OFF,
- MIC_RESTART,
- MIC_STATUS_LAST
-};
-
-#endif
diff --git a/include/uapi/linux/mic_ioctl.h b/include/uapi/linux/mic_ioctl.h
deleted file mode 100644
index 687b9cd9d3e2..000000000000
--- a/include/uapi/linux/mic_ioctl.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#ifndef _MIC_IOCTL_H_
-#define _MIC_IOCTL_H_
-
-#include <linux/types.h>
-
-/*
- * mic_copy - MIC virtio descriptor copy.
- *
- * @iov: An array of IOVEC structures containing user space buffers.
- * @iovcnt: Number of IOVEC structures in iov.
- * @vr_idx: The vring index.
- * @update_used: A non zero value results in used index being updated.
- * @out_len: The aggregate of the total length written to or read from
- * the virtio device.
- */
-struct mic_copy_desc {
-#ifdef __KERNEL__
- struct iovec __user *iov;
-#else
- struct iovec *iov;
-#endif
- __u32 iovcnt;
- __u8 vr_idx;
- __u8 update_used;
- __u32 out_len;
-};
-
-/*
- * Add a new virtio device
- * The (struct mic_device_desc *) pointer points to a device page entry
- * for the virtio device consisting of:
- * - struct mic_device_desc
- * - struct mic_vqconfig (num_vq of these)
- * - host and guest features
- * - virtio device config space
- * The total size referenced by the pointer should equal the size returned
- * by desc_size() in mic_common.h
- */
-#define MIC_VIRTIO_ADD_DEVICE _IOWR('s', 1, struct mic_device_desc *)
-
-/*
- * Copy the number of entries in the iovec and update the used index
- * if requested by the user.
- */
-#define MIC_VIRTIO_COPY_DESC _IOWR('s', 2, struct mic_copy_desc *)
-
-/*
- * Notify virtio device of a config change
- * The (__u8 *) pointer points to config space values for the device
- * as they should be written into the device page. The total size
- * referenced by the pointer should equal the config_len field of struct
- * mic_device_desc.
- */
-#define MIC_VIRTIO_CONFIG_CHANGE _IOWR('s', 5, __u8 *)
-
-#endif
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index 0b9c3beda345..39f7c44baf53 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -134,11 +134,16 @@
/* MAC and PHY tx_config_Reg[15:0] for SGMII in-band auto-negotiation.*/
#define ADVERTISE_SGMII 0x0001 /* MAC can do SGMII */
#define LPA_SGMII 0x0001 /* PHY can do SGMII */
+#define LPA_SGMII_SPD_MASK 0x0c00 /* SGMII speed mask */
+#define LPA_SGMII_FULL_DUPLEX 0x1000 /* SGMII full duplex */
#define LPA_SGMII_DPX_SPD_MASK 0x1C00 /* SGMII duplex and speed bits */
+#define LPA_SGMII_10 0x0000 /* 10Mbps */
#define LPA_SGMII_10HALF 0x0000 /* Can do 10mbps half-duplex */
#define LPA_SGMII_10FULL 0x1000 /* Can do 10mbps full-duplex */
+#define LPA_SGMII_100 0x0400 /* 100Mbps */
#define LPA_SGMII_100HALF 0x0400 /* Can do 100mbps half-duplex */
#define LPA_SGMII_100FULL 0x1400 /* Can do 100mbps full-duplex */
+#define LPA_SGMII_1000 0x0800 /* 1000Mbps */
#define LPA_SGMII_1000HALF 0x0800 /* Can do 1000mbps half-duplex */
#define LPA_SGMII_1000FULL 0x1800 /* Can do 1000mbps full-duplex */
#define LPA_SGMII_LINK 0x8000 /* PHY link with copper-side partner */
@@ -146,11 +151,13 @@
/* 1000BASE-T Control register */
#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
+#define CTL1000_PREFER_MASTER 0x0400 /* prefer to operate as master */
#define CTL1000_AS_MASTER 0x0800
#define CTL1000_ENABLE_MASTER 0x1000
/* 1000BASE-T Status register */
#define LPA_1000MSFAIL 0x8000 /* Master/Slave resolution failure */
+#define LPA_1000MSRES 0x4000 /* Master/Slave resolution status */
#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
diff --git a/include/uapi/linux/minix_fs.h b/include/uapi/linux/minix_fs.h
index 95dbcb17eacd..8d9ca8b2c357 100644
--- a/include/uapi/linux/minix_fs.h
+++ b/include/uapi/linux/minix_fs.h
@@ -97,11 +97,11 @@ struct minix3_super_block {
struct minix_dir_entry {
__u16 inode;
- char name[0];
+ char name[];
};
struct minix3_dir_entry {
__u32 inode;
- char name[0];
+ char name[];
};
#endif
diff --git a/include/uapi/linux/misc/bcm_vk.h b/include/uapi/linux/misc/bcm_vk.h
new file mode 100644
index 000000000000..ec28e0bd46a9
--- /dev/null
+++ b/include/uapi/linux/misc/bcm_vk.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#ifndef __UAPI_LINUX_MISC_BCM_VK_H
+#define __UAPI_LINUX_MISC_BCM_VK_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define BCM_VK_MAX_FILENAME 64
+
+struct vk_image {
+ __u32 type; /* Type of image */
+#define VK_IMAGE_TYPE_BOOT1 1 /* 1st stage (load to SRAM) */
+#define VK_IMAGE_TYPE_BOOT2 2 /* 2nd stage (load to DDR) */
+ __u8 filename[BCM_VK_MAX_FILENAME]; /* Filename of image */
+};
+
+struct vk_reset {
+ __u32 arg1;
+ __u32 arg2;
+};
+
+#define VK_MAGIC 0x5e
+
+/* Load image to Valkyrie */
+#define VK_IOCTL_LOAD_IMAGE _IOW(VK_MAGIC, 0x2, struct vk_image)
+
+/* Send Reset to Valkyrie */
+#define VK_IOCTL_RESET _IOW(VK_MAGIC, 0x4, struct vk_reset)
+
+/*
+ * Firmware Status accessed directly via BAR space
+ */
+#define VK_BAR_FWSTS 0x41c
+#define VK_BAR_COP_FWSTS 0x428
+/* VK_FWSTS definitions */
+#define VK_FWSTS_RELOCATION_ENTRY (1UL << 0)
+#define VK_FWSTS_RELOCATION_EXIT (1UL << 1)
+#define VK_FWSTS_INIT_START (1UL << 2)
+#define VK_FWSTS_ARCH_INIT_DONE (1UL << 3)
+#define VK_FWSTS_PRE_KNL1_INIT_DONE (1UL << 4)
+#define VK_FWSTS_PRE_KNL2_INIT_DONE (1UL << 5)
+#define VK_FWSTS_POST_KNL_INIT_DONE (1UL << 6)
+#define VK_FWSTS_INIT_DONE (1UL << 7)
+#define VK_FWSTS_APP_INIT_START (1UL << 8)
+#define VK_FWSTS_APP_INIT_DONE (1UL << 9)
+#define VK_FWSTS_MASK 0xffffffff
+#define VK_FWSTS_READY (VK_FWSTS_INIT_START | \
+ VK_FWSTS_ARCH_INIT_DONE | \
+ VK_FWSTS_PRE_KNL1_INIT_DONE | \
+ VK_FWSTS_PRE_KNL2_INIT_DONE | \
+ VK_FWSTS_POST_KNL_INIT_DONE | \
+ VK_FWSTS_INIT_DONE | \
+ VK_FWSTS_APP_INIT_START | \
+ VK_FWSTS_APP_INIT_DONE)
+/* Deinit */
+#define VK_FWSTS_APP_DEINIT_START (1UL << 23)
+#define VK_FWSTS_APP_DEINIT_DONE (1UL << 24)
+#define VK_FWSTS_DRV_DEINIT_START (1UL << 25)
+#define VK_FWSTS_DRV_DEINIT_DONE (1UL << 26)
+#define VK_FWSTS_RESET_DONE (1UL << 27)
+#define VK_FWSTS_DEINIT_TRIGGERED (VK_FWSTS_APP_DEINIT_START | \
+ VK_FWSTS_APP_DEINIT_DONE | \
+ VK_FWSTS_DRV_DEINIT_START | \
+ VK_FWSTS_DRV_DEINIT_DONE)
+/* Last nibble for reboot reason */
+#define VK_FWSTS_RESET_REASON_SHIFT 28
+#define VK_FWSTS_RESET_REASON_MASK (0xf << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_SYS_PWRUP (0x0 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_MBOX_DB (0x1 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_M7_WDOG (0x2 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_TEMP (0x3 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_PCI_FLR (0x4 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_PCI_HOT (0x5 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_PCI_WARM (0x6 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_PCI_COLD (0x7 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_L1 (0x8 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_L0 (0x9 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_UNKNOWN (0xf << VK_FWSTS_RESET_REASON_SHIFT)
+
+#endif /* __UAPI_LINUX_MISC_BCM_VK_H */
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index fc1a64c3447b..f55bc680b5b0 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -5,8 +5,9 @@
#include <asm/mman.h>
#include <asm-generic/hugetlb_encode.h>
-#define MREMAP_MAYMOVE 1
-#define MREMAP_FIXED 2
+#define MREMAP_MAYMOVE 1
+#define MREMAP_FIXED 2
+#define MREMAP_DONTUNMAP 4
#define OVERCOMMIT_GUESS 0
#define OVERCOMMIT_ALWAYS 1
@@ -26,6 +27,7 @@
#define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
#define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
+#define MAP_HUGE_16KB HUGETLB_FLAG_ENCODE_16KB
#define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
#define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
#define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
index 00c08120f3ba..e7401ade6822 100644
--- a/include/uapi/linux/mmc/ioctl.h
+++ b/include/uapi/linux/mmc/ioctl.h
@@ -3,6 +3,7 @@
#define LINUX_MMC_IOCTL_H
#include <linux/types.h>
+#include <linux/major.h>
struct mmc_ioc_cmd {
/*
@@ -57,7 +58,7 @@ struct mmc_ioc_cmd {
*/
struct mmc_ioc_multi_cmd {
__u64 num_of_cmds;
- struct mmc_ioc_cmd cmds[0];
+ struct mmc_ioc_cmd cmds[];
};
#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
diff --git a/include/uapi/linux/module.h b/include/uapi/linux/module.h
index 50d98ec5e866..03a33ffffcba 100644
--- a/include/uapi/linux/module.h
+++ b/include/uapi/linux/module.h
@@ -5,5 +5,6 @@
/* Flags for sys_finit_module: */
#define MODULE_INIT_IGNORE_MODVERSIONS 1
#define MODULE_INIT_IGNORE_VERMAGIC 2
+#define MODULE_INIT_COMPRESSED_FILE 4
#endif /* _UAPI_LINUX_MODULE_H */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 96a0240f23fe..4d93967f8aea 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -1,6 +1,8 @@
#ifndef _UAPI_LINUX_MOUNT_H
#define _UAPI_LINUX_MOUNT_H
+#include <linux/types.h>
+
/*
* These are the fs-independent mount-flags: up to 32 flags are supported
*
@@ -16,6 +18,7 @@
#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
+#define MS_NOSYMFOLLOW 256 /* Do not follow symlinks */
#define MS_NOATIME 1024 /* Do not update access times. */
#define MS_NODIRATIME 2048 /* Do not update directory access times */
#define MS_BIND 4096
@@ -70,7 +73,8 @@
#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */
#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */
#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
-#define MOVE_MOUNT__MASK 0x00000077
+#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */
+#define MOVE_MOUNT__MASK 0x00000177
/*
* fsopen() flags.
@@ -116,5 +120,20 @@ enum fsconfig_command {
#define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */
#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
+#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
+#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */
+
+/*
+ * mount_setattr()
+ */
+struct mount_attr {
+ __u64 attr_set;
+ __u64 attr_clr;
+ __u64 propagation;
+ __u64 userns_fd;
+};
+
+/* List of all mount_attr versions. */
+#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
new file mode 100644
index 000000000000..dfe19bf13f4c
--- /dev/null
+++ b/include/uapi/linux/mptcp.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_MPTCP_H
+#define _UAPI_MPTCP_H
+
+#ifndef __KERNEL__
+#include <netinet/in.h> /* for sockaddr_in and sockaddr_in6 */
+#include <sys/socket.h> /* for struct sockaddr */
+#endif
+
+#include <linux/const.h>
+#include <linux/types.h>
+#include <linux/in.h> /* for sockaddr_in */
+#include <linux/in6.h> /* for sockaddr_in6 */
+#include <linux/socket.h> /* for sockaddr_storage and sa_family */
+
+#define MPTCP_SUBFLOW_FLAG_MCAP_REM _BITUL(0)
+#define MPTCP_SUBFLOW_FLAG_MCAP_LOC _BITUL(1)
+#define MPTCP_SUBFLOW_FLAG_JOIN_REM _BITUL(2)
+#define MPTCP_SUBFLOW_FLAG_JOIN_LOC _BITUL(3)
+#define MPTCP_SUBFLOW_FLAG_BKUP_REM _BITUL(4)
+#define MPTCP_SUBFLOW_FLAG_BKUP_LOC _BITUL(5)
+#define MPTCP_SUBFLOW_FLAG_FULLY_ESTABLISHED _BITUL(6)
+#define MPTCP_SUBFLOW_FLAG_CONNECTED _BITUL(7)
+#define MPTCP_SUBFLOW_FLAG_MAPVALID _BITUL(8)
+
+enum {
+ MPTCP_SUBFLOW_ATTR_UNSPEC,
+ MPTCP_SUBFLOW_ATTR_TOKEN_REM,
+ MPTCP_SUBFLOW_ATTR_TOKEN_LOC,
+ MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
+ MPTCP_SUBFLOW_ATTR_SSN_OFFSET,
+ MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
+ MPTCP_SUBFLOW_ATTR_FLAGS,
+ MPTCP_SUBFLOW_ATTR_ID_REM,
+ MPTCP_SUBFLOW_ATTR_ID_LOC,
+ MPTCP_SUBFLOW_ATTR_PAD,
+ __MPTCP_SUBFLOW_ATTR_MAX
+};
+
+#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1)
+
+/* netlink interface */
+#define MPTCP_PM_NAME "mptcp_pm"
+#define MPTCP_PM_CMD_GRP_NAME "mptcp_pm_cmds"
+#define MPTCP_PM_EV_GRP_NAME "mptcp_pm_events"
+#define MPTCP_PM_VER 0x1
+
+/*
+ * ATTR types defined for MPTCP
+ */
+enum {
+ MPTCP_PM_ATTR_UNSPEC,
+
+ MPTCP_PM_ATTR_ADDR, /* nested address */
+ MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */
+ MPTCP_PM_ATTR_SUBFLOWS, /* u32 */
+ MPTCP_PM_ATTR_TOKEN, /* u32 */
+ MPTCP_PM_ATTR_LOC_ID, /* u8 */
+ MPTCP_PM_ATTR_ADDR_REMOTE, /* nested address */
+
+ __MPTCP_PM_ATTR_MAX
+};
+
+#define MPTCP_PM_ATTR_MAX (__MPTCP_PM_ATTR_MAX - 1)
+
+enum {
+ MPTCP_PM_ADDR_ATTR_UNSPEC,
+
+ MPTCP_PM_ADDR_ATTR_FAMILY, /* u16 */
+ MPTCP_PM_ADDR_ATTR_ID, /* u8 */
+ MPTCP_PM_ADDR_ATTR_ADDR4, /* struct in_addr */
+ MPTCP_PM_ADDR_ATTR_ADDR6, /* struct in6_addr */
+ MPTCP_PM_ADDR_ATTR_PORT, /* u16 */
+ MPTCP_PM_ADDR_ATTR_FLAGS, /* u32 */
+ MPTCP_PM_ADDR_ATTR_IF_IDX, /* s32 */
+
+ __MPTCP_PM_ADDR_ATTR_MAX
+};
+
+#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
+
+#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
+#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
+#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
+#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3)
+#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4)
+
+enum {
+ MPTCP_PM_CMD_UNSPEC,
+
+ MPTCP_PM_CMD_ADD_ADDR,
+ MPTCP_PM_CMD_DEL_ADDR,
+ MPTCP_PM_CMD_GET_ADDR,
+ MPTCP_PM_CMD_FLUSH_ADDRS,
+ MPTCP_PM_CMD_SET_LIMITS,
+ MPTCP_PM_CMD_GET_LIMITS,
+ MPTCP_PM_CMD_SET_FLAGS,
+ MPTCP_PM_CMD_ANNOUNCE,
+ MPTCP_PM_CMD_REMOVE,
+ MPTCP_PM_CMD_SUBFLOW_CREATE,
+ MPTCP_PM_CMD_SUBFLOW_DESTROY,
+
+ __MPTCP_PM_CMD_AFTER_LAST
+};
+
+#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0)
+#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
+
+struct mptcp_info {
+ __u8 mptcpi_subflows;
+ __u8 mptcpi_add_addr_signal;
+ __u8 mptcpi_add_addr_accepted;
+ __u8 mptcpi_subflows_max;
+ __u8 mptcpi_add_addr_signal_max;
+ __u8 mptcpi_add_addr_accepted_max;
+ __u32 mptcpi_flags;
+ __u32 mptcpi_token;
+ __u64 mptcpi_write_seq;
+ __u64 mptcpi_snd_una;
+ __u64 mptcpi_rcv_nxt;
+ __u8 mptcpi_local_addr_used;
+ __u8 mptcpi_local_addr_max;
+ __u8 mptcpi_csum_enabled;
+};
+
+/*
+ * MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport
+ * A new MPTCP connection has been created. It is the good time to allocate
+ * memory and send ADD_ADDR if needed. Depending on the traffic-patterns
+ * it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent.
+ *
+ * MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport
+ * A MPTCP connection is established (can start new subflows).
+ *
+ * MPTCP_EVENT_CLOSED: token
+ * A MPTCP connection has stopped.
+ *
+ * MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport]
+ * A new address has been announced by the peer.
+ *
+ * MPTCP_EVENT_REMOVED: token, rem_id
+ * An address has been lost by the peer.
+ *
+ * MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id,
+ * saddr4 | saddr6, daddr4 | daddr6, sport,
+ * dport, backup, if_idx [, error]
+ * A new subflow has been established. 'error' should not be set.
+ *
+ * MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx
+ * [, error]
+ * A subflow has been closed. An error (copy of sk_err) could be set if an
+ * error has been detected for this subflow.
+ *
+ * MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx
+ * [, error]
+ * The priority of a subflow has changed. 'error' should not be set.
+ */
+enum mptcp_event_type {
+ MPTCP_EVENT_UNSPEC = 0,
+ MPTCP_EVENT_CREATED = 1,
+ MPTCP_EVENT_ESTABLISHED = 2,
+ MPTCP_EVENT_CLOSED = 3,
+
+ MPTCP_EVENT_ANNOUNCED = 6,
+ MPTCP_EVENT_REMOVED = 7,
+
+ MPTCP_EVENT_SUB_ESTABLISHED = 10,
+ MPTCP_EVENT_SUB_CLOSED = 11,
+
+ MPTCP_EVENT_SUB_PRIORITY = 13,
+};
+
+enum mptcp_event_attr {
+ MPTCP_ATTR_UNSPEC = 0,
+
+ MPTCP_ATTR_TOKEN, /* u32 */
+ MPTCP_ATTR_FAMILY, /* u16 */
+ MPTCP_ATTR_LOC_ID, /* u8 */
+ MPTCP_ATTR_REM_ID, /* u8 */
+ MPTCP_ATTR_SADDR4, /* be32 */
+ MPTCP_ATTR_SADDR6, /* struct in6_addr */
+ MPTCP_ATTR_DADDR4, /* be32 */
+ MPTCP_ATTR_DADDR6, /* struct in6_addr */
+ MPTCP_ATTR_SPORT, /* be16 */
+ MPTCP_ATTR_DPORT, /* be16 */
+ MPTCP_ATTR_BACKUP, /* u8 */
+ MPTCP_ATTR_ERROR, /* u8 */
+ MPTCP_ATTR_FLAGS, /* u16 */
+ MPTCP_ATTR_TIMEOUT, /* u32 */
+ MPTCP_ATTR_IF_IDX, /* s32 */
+ MPTCP_ATTR_RESET_REASON,/* u32 */
+ MPTCP_ATTR_RESET_FLAGS, /* u32 */
+ MPTCP_ATTR_SERVER_SIDE, /* u8 */
+
+ __MPTCP_ATTR_AFTER_LAST
+};
+
+#define MPTCP_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1)
+
+/* MPTCP Reset reason codes, rfc8684 */
+#define MPTCP_RST_EUNSPEC 0
+#define MPTCP_RST_EMPTCP 1
+#define MPTCP_RST_ERESOURCE 2
+#define MPTCP_RST_EPROHIBIT 3
+#define MPTCP_RST_EWQ2BIG 4
+#define MPTCP_RST_EBADPERF 5
+#define MPTCP_RST_EMIDDLEBOX 6
+
+struct mptcp_subflow_data {
+ __u32 size_subflow_data; /* size of this structure in userspace */
+ __u32 num_subflows; /* must be 0, set by kernel */
+ __u32 size_kernel; /* must be 0, set by kernel */
+ __u32 size_user; /* size of one element in data[] */
+} __attribute__((aligned(8)));
+
+struct mptcp_subflow_addrs {
+ union {
+ __kernel_sa_family_t sa_family;
+ struct sockaddr sa_local;
+ struct sockaddr_in sin_local;
+ struct sockaddr_in6 sin6_local;
+ struct __kernel_sockaddr_storage ss_local;
+ };
+ union {
+ struct sockaddr sa_remote;
+ struct sockaddr_in sin_remote;
+ struct sockaddr_in6 sin6_remote;
+ struct __kernel_sockaddr_storage ss_remote;
+ };
+};
+
+/* MPTCP socket options */
+#define MPTCP_INFO 1
+#define MPTCP_TCPINFO 2
+#define MPTCP_SUBFLOW_ADDRS 3
+
+#endif /* _UAPI_MPTCP_H */
diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h
index 11c8c1fc1124..1a42f5f9b31b 100644
--- a/include/uapi/linux/mroute.h
+++ b/include/uapi/linux/mroute.h
@@ -113,8 +113,8 @@ struct igmpmsg {
__u32 unused1,unused2;
unsigned char im_msgtype; /* What is this */
unsigned char im_mbz; /* Must be zero */
- unsigned char im_vif; /* Interface (this ought to be a vifi_t!) */
- unsigned char unused3;
+ unsigned char im_vif; /* Low 8 bits of Interface */
+ unsigned char im_vif_hi; /* High 8 bits of Interface */
struct in_addr im_src,im_dst;
};
@@ -169,6 +169,7 @@ enum {
IPMRA_CREPORT_SRC_ADDR,
IPMRA_CREPORT_DST_ADDR,
IPMRA_CREPORT_PKT,
+ IPMRA_CREPORT_TABLE,
__IPMRA_CREPORT_MAX
};
#define IPMRA_CREPORT_MAX (__IPMRA_CREPORT_MAX - 1)
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index c36177a86516..1d90c21a6251 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_MROUTE6_H
#define _UAPI__LINUX_MROUTE6_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/sockios.h>
#include <linux/in6.h> /* For struct sockaddr_in6. */
@@ -134,6 +134,7 @@ struct mrt6msg {
#define MRT6MSG_NOCACHE 1
#define MRT6MSG_WRONGMIF 2
#define MRT6MSG_WHOLEPKT 3 /* used for use level encap */
+#define MRT6MSG_WRMIFWHOLE 4 /* For PIM Register and assert processing */
__u8 im6_mbz; /* must be zero */
__u8 im6_msgtype; /* what type of message */
__u16 im6_mif; /* mif rec'd on */
diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h
new file mode 100644
index 000000000000..bd4424de56ff
--- /dev/null
+++ b/include/uapi/linux/mrp_bridge.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_MRP_BRIDGE_H_
+#define _UAPI_LINUX_MRP_BRIDGE_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define MRP_MAX_FRAME_LENGTH 200
+#define MRP_DEFAULT_PRIO 0x8000
+#define MRP_DOMAIN_UUID_LENGTH 16
+#define MRP_VERSION 1
+#define MRP_FRAME_PRIO 7
+#define MRP_OUI_LENGTH 3
+#define MRP_MANUFACTURE_DATA_LENGTH 2
+
+enum br_mrp_ring_role_type {
+ BR_MRP_RING_ROLE_DISABLED,
+ BR_MRP_RING_ROLE_MRC,
+ BR_MRP_RING_ROLE_MRM,
+ BR_MRP_RING_ROLE_MRA,
+};
+
+enum br_mrp_in_role_type {
+ BR_MRP_IN_ROLE_DISABLED,
+ BR_MRP_IN_ROLE_MIC,
+ BR_MRP_IN_ROLE_MIM,
+};
+
+enum br_mrp_ring_state_type {
+ BR_MRP_RING_STATE_OPEN,
+ BR_MRP_RING_STATE_CLOSED,
+};
+
+enum br_mrp_in_state_type {
+ BR_MRP_IN_STATE_OPEN,
+ BR_MRP_IN_STATE_CLOSED,
+};
+
+enum br_mrp_port_state_type {
+ BR_MRP_PORT_STATE_DISABLED,
+ BR_MRP_PORT_STATE_BLOCKED,
+ BR_MRP_PORT_STATE_FORWARDING,
+ BR_MRP_PORT_STATE_NOT_CONNECTED,
+};
+
+enum br_mrp_port_role_type {
+ BR_MRP_PORT_ROLE_PRIMARY,
+ BR_MRP_PORT_ROLE_SECONDARY,
+ BR_MRP_PORT_ROLE_INTER,
+};
+
+enum br_mrp_tlv_header_type {
+ BR_MRP_TLV_HEADER_END = 0x0,
+ BR_MRP_TLV_HEADER_COMMON = 0x1,
+ BR_MRP_TLV_HEADER_RING_TEST = 0x2,
+ BR_MRP_TLV_HEADER_RING_TOPO = 0x3,
+ BR_MRP_TLV_HEADER_RING_LINK_DOWN = 0x4,
+ BR_MRP_TLV_HEADER_RING_LINK_UP = 0x5,
+ BR_MRP_TLV_HEADER_IN_TEST = 0x6,
+ BR_MRP_TLV_HEADER_IN_TOPO = 0x7,
+ BR_MRP_TLV_HEADER_IN_LINK_DOWN = 0x8,
+ BR_MRP_TLV_HEADER_IN_LINK_UP = 0x9,
+ BR_MRP_TLV_HEADER_IN_LINK_STATUS = 0xa,
+ BR_MRP_TLV_HEADER_OPTION = 0x7f,
+};
+
+enum br_mrp_sub_tlv_header_type {
+ BR_MRP_SUB_TLV_HEADER_TEST_MGR_NACK = 0x1,
+ BR_MRP_SUB_TLV_HEADER_TEST_PROPAGATE = 0x2,
+ BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR = 0x3,
+};
+
+#endif
diff --git a/include/uapi/linux/n_r3964.h b/include/uapi/linux/n_r3964.h
deleted file mode 100644
index 6bbd18520f30..000000000000
--- a/include/uapi/linux/n_r3964.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */
-/* r3964 linediscipline for linux
- *
- * -----------------------------------------------------------
- * Copyright by
- * Philips Automation Projects
- * Kassel (Germany)
- * -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
- * Author:
- * L. Haag
- *
- * $Log: r3964.h,v $
- * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de>
- * Fixed HZ usage on 2.6 kernels
- * Removed unnecessary include
- *
- * Revision 1.3 2001/03/18 13:02:24 dwmw2
- * Fix timer usage, use spinlocks properly.
- *
- * Revision 1.2 2001/03/18 12:53:15 dwmw2
- * Merge changes in 2.4.2
- *
- * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2
- * This'll screw the version control
- *
- * Revision 1.6 1998/09/30 00:40:38 dwmw2
- * Updated to use kernel's N_R3964 if available
- *
- * Revision 1.4 1998/04/02 20:29:44 lhaag
- * select, blocking, ...
- *
- * Revision 1.3 1998/02/12 18:58:43 root
- * fixed some memory leaks
- * calculation of checksum characters
- *
- * Revision 1.2 1998/02/07 13:03:17 root
- * ioctl read_telegram
- *
- * Revision 1.1 1998/02/06 19:19:43 root
- * Initial revision
- *
- *
- */
-
-#ifndef _UAPI__LINUX_N_R3964_H__
-#define _UAPI__LINUX_N_R3964_H__
-
-/* line disciplines for r3964 protocol */
-
-
-/*
- * Ioctl-commands
- */
-
-#define R3964_ENABLE_SIGNALS 0x5301
-#define R3964_SETPRIORITY 0x5302
-#define R3964_USE_BCC 0x5303
-#define R3964_READ_TELEGRAM 0x5304
-
-/* Options for R3964_SETPRIORITY */
-#define R3964_MASTER 0
-#define R3964_SLAVE 1
-
-/* Options for R3964_ENABLE_SIGNALS */
-#define R3964_SIG_ACK 0x0001
-#define R3964_SIG_DATA 0x0002
-#define R3964_SIG_ALL 0x000f
-#define R3964_SIG_NONE 0x0000
-#define R3964_USE_SIGIO 0x1000
-
-/*
- * r3964 operation states:
- */
-
-/* types for msg_id: */
-enum {R3964_MSG_ACK=1, R3964_MSG_DATA };
-
-#define R3964_MAX_MSG_COUNT 32
-
-/* error codes for client messages */
-#define R3964_OK 0 /* no error. */
-#define R3964_TX_FAIL -1 /* transmission error, block NOT sent */
-#define R3964_OVERFLOW -2 /* msg queue overflow */
-
-/* the client gets this struct when calling read(fd,...): */
-struct r3964_client_message {
- int msg_id;
- int arg;
- int error_code;
-};
-
-#define R3964_MTU 256
-
-
-
-#endif /* _UAPI__LINUX_N_R3964_H__ */
diff --git a/include/uapi/linux/nbd-netlink.h b/include/uapi/linux/nbd-netlink.h
index c5d0ef7aa7d5..2d0b90964227 100644
--- a/include/uapi/linux/nbd-netlink.h
+++ b/include/uapi/linux/nbd-netlink.h
@@ -35,6 +35,7 @@ enum {
NBD_ATTR_SOCKETS,
NBD_ATTR_DEAD_CONN_TIMEOUT,
NBD_ATTR_DEVICE_LIST,
+ NBD_ATTR_BACKEND_IDENTIFIER,
__NBD_ATTR_MAX,
};
#define NBD_ATTR_MAX (__NBD_ATTR_MAX - 1)
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index de5d90212409..73516e263627 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -30,25 +30,25 @@ struct nd_cmd_get_config_data_hdr {
__u32 in_offset;
__u32 in_length;
__u32 status;
- __u8 out_buf[0];
+ __u8 out_buf[];
} __packed;
struct nd_cmd_set_config_hdr {
__u32 in_offset;
__u32 in_length;
- __u8 in_buf[0];
+ __u8 in_buf[];
} __packed;
struct nd_cmd_vendor_hdr {
__u32 opcode;
__u32 in_length;
- __u8 in_buf[0];
+ __u8 in_buf[];
} __packed;
struct nd_cmd_vendor_tail {
__u32 status;
__u32 out_length;
- __u8 out_buf[0];
+ __u8 out_buf[];
} __packed;
struct nd_cmd_ars_cap {
@@ -86,7 +86,7 @@ struct nd_cmd_ars_status {
__u32 reserved;
__u64 err_address;
__u64 length;
- } __packed records[0];
+ } __packed records[];
} __packed;
struct nd_cmd_clear_error {
@@ -189,7 +189,6 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
#define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */
#define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */
#define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */
-#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */
#define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */
enum nd_driver_flags {
@@ -198,7 +197,6 @@ enum nd_driver_flags {
ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK,
ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO,
ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM,
- ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK,
ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
};
@@ -244,6 +242,12 @@ struct nd_cmd_pkg {
#define NVDIMM_FAMILY_HPE2 2
#define NVDIMM_FAMILY_MSFT 3
#define NVDIMM_FAMILY_HYPERV 4
+#define NVDIMM_FAMILY_PAPR 5
+#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_PAPR
+
+#define NVDIMM_BUS_FAMILY_NFIT 0
+#define NVDIMM_BUS_FAMILY_INTEL 1
+#define NVDIMM_BUS_FAMILY_MAX NVDIMM_BUS_FAMILY_INTEL
#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\
struct nd_cmd_pkg)
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index cd144e3099a3..a998bf761635 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -29,6 +29,11 @@ enum {
NDA_LINK_NETNSID,
NDA_SRC_VNI,
NDA_PROTOCOL, /* Originator of entry */
+ NDA_NH_ID,
+ NDA_FDB_EXT_ATTRS,
+ NDA_FLAGS_EXT,
+ NDA_NDM_STATE_MASK,
+ NDA_NDM_FLAGS_MASK,
__NDA_MAX
};
@@ -38,14 +43,16 @@ enum {
* Neighbor Cache Entry Flags
*/
-#define NTF_USE 0x01
-#define NTF_SELF 0x02
-#define NTF_MASTER 0x04
-#define NTF_PROXY 0x08 /* == ATF_PUBL */
-#define NTF_EXT_LEARNED 0x10
-#define NTF_OFFLOADED 0x20
-#define NTF_STICKY 0x40
-#define NTF_ROUTER 0x80
+#define NTF_USE (1 << 0)
+#define NTF_SELF (1 << 1)
+#define NTF_MASTER (1 << 2)
+#define NTF_PROXY (1 << 3) /* == ATF_PUBL */
+#define NTF_EXT_LEARNED (1 << 4)
+#define NTF_OFFLOADED (1 << 5)
+#define NTF_STICKY (1 << 6)
+#define NTF_ROUTER (1 << 7)
+/* Extended flags under NDA_FLAGS_EXT: */
+#define NTF_EXT_MANAGED (1 << 0)
/*
* Neighbor Cache Entry States.
@@ -63,9 +70,22 @@ enum {
#define NUD_PERMANENT 0x80
#define NUD_NONE 0x00
-/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
- and make no address resolution or NUD.
- NUD_PERMANENT also cannot be deleted by garbage collectors.
+/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change and make no
+ * address resolution or NUD.
+ *
+ * NUD_PERMANENT also cannot be deleted by garbage collectors. This holds true
+ * for dynamic entries with NTF_EXT_LEARNED flag as well. However, upon carrier
+ * down event, NUD_PERMANENT entries are not flushed whereas NTF_EXT_LEARNED
+ * flagged entries explicitly are (which is also consistent with the routing
+ * subsystem).
+ *
+ * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
+ * states don't make sense and thus are ignored. Such entries don't age and
+ * can roam.
+ *
+ * NTF_EXT_MANAGED flagged neigbor entries are managed by the kernel on behalf
+ * of a user space control plane, and automatically refreshed so that (if
+ * possible) they remain in NUD_REACHABLE state.
*/
struct nda_cacheinfo {
@@ -134,6 +154,7 @@ enum {
NDTPA_QUEUE_LENBYTES, /* u32 */
NDTPA_MCAST_REPROBES, /* u32 */
NDTPA_PAD,
+ NDTPA_INTERVAL_PROBE_TIME_MS, /* u64, msecs */
__NDTPA_MAX
};
#define NDTPA_MAX (__NDTPA_MAX - 1)
@@ -171,4 +192,27 @@ enum {
};
#define NDTA_MAX (__NDTA_MAX - 1)
+ /* FDB activity notification bits used in NFEA_ACTIVITY_NOTIFY:
+ * - FDB_NOTIFY_BIT - notify on activity/expire for any entry
+ * - FDB_NOTIFY_INACTIVE_BIT - mark as inactive to avoid multiple notifications
+ */
+enum {
+ FDB_NOTIFY_BIT = (1 << 0),
+ FDB_NOTIFY_INACTIVE_BIT = (1 << 1)
+};
+
+/* embedded into NDA_FDB_EXT_ATTRS:
+ * [NDA_FDB_EXT_ATTRS] = {
+ * [NFEA_ACTIVITY_NOTIFY]
+ * ...
+ * }
+ */
+enum {
+ NFEA_UNSPEC,
+ NFEA_ACTIVITY_NOTIFY,
+ NFEA_DONT_REFRESH,
+ __NFEA_MAX
+};
+#define NFEA_MAX (__NFEA_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h
index 8bf79a9eb234..84f622a66a7a 100644
--- a/include/uapi/linux/net_dropmon.h
+++ b/include/uapi/linux/net_dropmon.h
@@ -29,12 +29,12 @@ struct net_dm_config_entry {
struct net_dm_config_msg {
__u32 entries;
- struct net_dm_config_entry options[0];
+ struct net_dm_config_entry options[];
};
struct net_dm_alert_msg {
__u32 entries;
- struct net_dm_drop_point points[0];
+ struct net_dm_drop_point points[];
};
struct net_dm_user_msg {
@@ -92,6 +92,8 @@ enum net_dm_attr {
NET_DM_ATTR_HW_TRAP_COUNT, /* u32 */
NET_DM_ATTR_SW_DROPS, /* flag */
NET_DM_ATTR_HW_DROPS, /* flag */
+ NET_DM_ATTR_FLOW_ACTION_COOKIE, /* binary */
+ NET_DM_ATTR_REASON, /* string */
__NET_DM_ATTR_MAX,
NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index f96e650d0af9..55501e5e7ac8 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#include <linux/socket.h> /* for SO_TIMESTAMPING */
-/* SO_TIMESTAMPING gets an integer bit field comprised of these values */
+/* SO_TIMESTAMPING flags */
enum {
SOF_TIMESTAMPING_TX_HARDWARE = (1<<0),
SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1),
@@ -30,8 +30,9 @@ enum {
SOF_TIMESTAMPING_OPT_STATS = (1<<12),
SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13),
SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
+ SOF_TIMESTAMPING_BIND_PHC = (1 << 15),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
@@ -47,9 +48,21 @@ enum {
SOF_TIMESTAMPING_TX_ACK)
/**
+ * struct so_timestamping - SO_TIMESTAMPING parameter
+ *
+ * @flags: SO_TIMESTAMPING flags
+ * @bind_phc: Index of PTP virtual clock bound to sock. This is available
+ * if flag SOF_TIMESTAMPING_BIND_PHC is set.
+ */
+struct so_timestamping {
+ int flags;
+ int bind_phc;
+};
+
+/**
* struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter
*
- * @flags: no flags defined right now, must be zero for %SIOCSHWTSTAMP
+ * @flags: one of HWTSTAMP_FLAG_*
* @tx_type: one of HWTSTAMP_TX_*
* @rx_filter: one of HWTSTAMP_FILTER_*
*
@@ -65,6 +78,21 @@ struct hwtstamp_config {
int rx_filter;
};
+/* possible values for hwtstamp_config->flags */
+enum hwtstamp_flags {
+ /*
+ * With this flag, the user could get bond active interface's
+ * PHC index. Note this PHC index is not stable as when there
+ * is a failover, the bond active interface will be changed, so
+ * will be the PHC index.
+ */
+ HWTSTAMP_FLAG_BONDED_PHC_INDEX = (1<<0),
+#define HWTSTAMP_FLAG_BONDED_PHC_INDEX HWTSTAMP_FLAG_BONDED_PHC_INDEX
+
+ HWTSTAMP_FLAG_LAST = HWTSTAMP_FLAG_BONDED_PHC_INDEX,
+ HWTSTAMP_FLAG_MASK = (HWTSTAMP_FLAG_LAST - 1) | HWTSTAMP_FLAG_LAST
+};
+
/* possible values for hwtstamp_config->tx_type */
enum hwtstamp_tx_types {
/*
@@ -98,6 +126,9 @@ enum hwtstamp_tx_types {
* receive a time stamp via the socket error queue.
*/
HWTSTAMP_TX_ONESTEP_P2P,
+
+ /* add new constants above here */
+ __HWTSTAMP_TX_CNT
};
/* possible values for hwtstamp_config->rx_filter */
@@ -140,6 +171,9 @@ enum hwtstamp_rx_filters {
/* NTP, UDP, all versions and packet modes */
HWTSTAMP_FILTER_NTP_ALL,
+
+ /* add new constants above here */
+ __HWTSTAMP_FILTER_CNT
};
/* SCM_TIMESTAMPING_PKTINFO control message */
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ca9e63d6e0e4..5a79ccb76701 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -45,11 +45,13 @@ enum nf_inet_hooks {
NF_INET_FORWARD,
NF_INET_LOCAL_OUT,
NF_INET_POST_ROUTING,
- NF_INET_NUMHOOKS
+ NF_INET_NUMHOOKS,
+ NF_INET_INGRESS = NF_INET_NUMHOOKS,
};
enum nf_dev_hooks {
NF_NETDEV_INGRESS,
+ NF_NETDEV_EGRESS,
NF_NETDEV_NUMHOOKS
};
@@ -61,7 +63,9 @@ enum {
NFPROTO_NETDEV = 5,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
+#ifndef __KERNEL__ /* no longer supported by kernel */
NFPROTO_DECNET = 12,
+#endif
NFPROTO_NUMPROTO,
};
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 11a72a938eb1..79e5d68b87af 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -3,10 +3,6 @@
* Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _UAPI_IP_SET_H
#define _UAPI_IP_SET_H
@@ -92,11 +88,11 @@ enum {
/* Reserve empty slots */
IPSET_ATTR_CADT_MAX = 16,
/* Create-only specific attributes */
- IPSET_ATTR_GC,
+ IPSET_ATTR_INITVAL, /* was unused IPSET_ATTR_GC */
IPSET_ATTR_HASHSIZE,
IPSET_ATTR_MAXELEM,
IPSET_ATTR_NETMASK,
- IPSET_ATTR_PROBES,
+ IPSET_ATTR_BUCKETSIZE, /* was unused IPSET_ATTR_PROBES */
IPSET_ATTR_RESIZE,
IPSET_ATTR_SIZE,
/* Kernel-only */
@@ -214,6 +210,8 @@ enum ipset_cadt_flags {
enum ipset_create_flags {
IPSET_CREATE_FLAG_BIT_FORCEADD = 0,
IPSET_CREATE_FLAG_FORCEADD = (1 << IPSET_CREATE_FLAG_BIT_FORCEADD),
+ IPSET_CREATE_FLAG_BIT_BUCKETSIZE = 1,
+ IPSET_CREATE_FLAG_BUCKETSIZE = (1 << IPSET_CREATE_FLAG_BIT_BUCKETSIZE),
IPSET_CREATE_FLAG_BIT_MAX = 7,
};
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index b6f0bb1dc799..26071021e986 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -106,7 +106,7 @@ enum ip_conntrack_status {
IPS_NAT_CLASH = IPS_UNTRACKED,
#endif
- /* Conntrack got a helper explicitly attached via CT target. */
+ /* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
@@ -114,15 +114,19 @@ enum ip_conntrack_status {
IPS_OFFLOAD_BIT = 14,
IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT),
+ /* Conntrack has been offloaded to hardware. */
+ IPS_HW_OFFLOAD_BIT = 15,
+ IPS_HW_OFFLOAD = (1 << IPS_HW_OFFLOAD_BIT),
+
/* Be careful here, modifying these bits can make things messy,
* so don't let users modify them directly.
*/
IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_UNTRACKED |
- IPS_OFFLOAD),
+ IPS_OFFLOAD | IPS_HW_OFFLOAD),
- __IPS_MAX_BIT = 15,
+ __IPS_MAX_BIT = 16,
};
/* Connection tracking event types */
diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h
index 4a95c0db14d4..a64586e77b24 100644
--- a/include/uapi/linux/netfilter/nf_nat.h
+++ b/include/uapi/linux/netfilter/nf_nat.h
@@ -11,6 +11,7 @@
#define NF_NAT_RANGE_PERSISTENT (1 << 3)
#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4)
#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5)
+#define NF_NAT_RANGE_NETMAP (1 << 6)
#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
(NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
@@ -18,7 +19,8 @@
#define NF_NAT_RANGE_MASK \
(NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
- NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET)
+ NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \
+ NF_NAT_RANGE_NETMAP)
struct nf_nat_ipv4_range {
unsigned int flags;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 065218a20bb7..466fd3f4447c 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -133,7 +133,7 @@ enum nf_tables_msg_types {
* @NFTA_LIST_ELEM: list element (NLA_NESTED)
*/
enum nft_list_attributes {
- NFTA_LIST_UNPEC,
+ NFTA_LIST_UNSPEC,
NFTA_LIST_ELEM,
__NFTA_LIST_MAX
};
@@ -164,7 +164,10 @@ enum nft_hook_attributes {
*/
enum nft_table_flags {
NFT_TABLE_F_DORMANT = 0x1,
+ NFT_TABLE_F_OWNER = 0x2,
};
+#define NFT_TABLE_F_MASK (NFT_TABLE_F_DORMANT | \
+ NFT_TABLE_F_OWNER)
/**
* enum nft_table_attributes - nf_tables table netlink attributes
@@ -172,6 +175,8 @@ enum nft_table_flags {
* @NFTA_TABLE_NAME: name of the table (NLA_STRING)
* @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
* @NFTA_TABLE_USE: number of chains in this table (NLA_U32)
+ * @NFTA_TABLE_USERDATA: user data (NLA_BINARY)
+ * @NFTA_TABLE_OWNER: owner of this table through netlink portID (NLA_U32)
*/
enum nft_table_attributes {
NFTA_TABLE_UNSPEC,
@@ -180,10 +185,21 @@ enum nft_table_attributes {
NFTA_TABLE_USE,
NFTA_TABLE_HANDLE,
NFTA_TABLE_PAD,
+ NFTA_TABLE_USERDATA,
+ NFTA_TABLE_OWNER,
__NFTA_TABLE_MAX
};
#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
+enum nft_chain_flags {
+ NFT_CHAIN_BASE = (1 << 0),
+ NFT_CHAIN_HW_OFFLOAD = (1 << 1),
+ NFT_CHAIN_BINDING = (1 << 2),
+};
+#define NFT_CHAIN_FLAGS (NFT_CHAIN_BASE | \
+ NFT_CHAIN_HW_OFFLOAD | \
+ NFT_CHAIN_BINDING)
+
/**
* enum nft_chain_attributes - nf_tables chain netlink attributes
*
@@ -196,6 +212,8 @@ enum nft_table_attributes {
* @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING)
* @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
* @NFTA_CHAIN_FLAGS: chain flags
+ * @NFTA_CHAIN_ID: uniquely identifies a chain in a transaction (NLA_U32)
+ * @NFTA_CHAIN_USERDATA: user data (NLA_BINARY)
*/
enum nft_chain_attributes {
NFTA_CHAIN_UNSPEC,
@@ -209,6 +227,8 @@ enum nft_chain_attributes {
NFTA_CHAIN_COUNTERS,
NFTA_CHAIN_PAD,
NFTA_CHAIN_FLAGS,
+ NFTA_CHAIN_ID,
+ NFTA_CHAIN_USERDATA,
__NFTA_CHAIN_MAX
};
#define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1)
@@ -238,6 +258,7 @@ enum nft_rule_attributes {
NFTA_RULE_PAD,
NFTA_RULE_ID,
NFTA_RULE_POSITION_ID,
+ NFTA_RULE_CHAIN_ID,
__NFTA_RULE_MAX
};
#define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1)
@@ -276,6 +297,8 @@ enum nft_rule_compat_attributes {
* @NFT_SET_TIMEOUT: set uses timeouts
* @NFT_SET_EVAL: set can be updated from the evaluation path
* @NFT_SET_OBJECT: set contains stateful objects
+ * @NFT_SET_CONCAT: set contains a concatenation
+ * @NFT_SET_EXPR: set contains expressions
*/
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
@@ -285,6 +308,8 @@ enum nft_set_flags {
NFT_SET_TIMEOUT = 0x10,
NFT_SET_EVAL = 0x20,
NFT_SET_OBJECT = 0x40,
+ NFT_SET_CONCAT = 0x80,
+ NFT_SET_EXPR = 0x100,
};
/**
@@ -342,6 +367,8 @@ enum nft_set_field_attributes {
* @NFTA_SET_USERDATA: user data (NLA_BINARY)
* @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*)
* @NFTA_SET_HANDLE: set handle (NLA_U64)
+ * @NFTA_SET_EXPR: set expression (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_SET_EXPRESSIONS: list of expressions (NLA_NESTED: nft_list_attributes)
*/
enum nft_set_attributes {
NFTA_SET_UNSPEC,
@@ -361,6 +388,8 @@ enum nft_set_attributes {
NFTA_SET_PAD,
NFTA_SET_OBJ_TYPE,
NFTA_SET_HANDLE,
+ NFTA_SET_EXPR,
+ NFTA_SET_EXPRESSIONS,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -369,9 +398,11 @@ enum nft_set_attributes {
* enum nft_set_elem_flags - nf_tables set element flags
*
* @NFT_SET_ELEM_INTERVAL_END: element ends the previous interval
+ * @NFT_SET_ELEM_CATCHALL: special catch-all element
*/
enum nft_set_elem_flags {
NFT_SET_ELEM_INTERVAL_END = 0x1,
+ NFT_SET_ELEM_CATCHALL = 0x2,
};
/**
@@ -386,6 +417,7 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
* @NFTA_SET_ELEM_OBJREF: stateful object reference (NLA_STRING)
* @NFTA_SET_ELEM_KEY_END: closing key value (NLA_NESTED: nft_data)
+ * @NFTA_SET_ELEM_EXPRESSIONS: list of expressions (NLA_NESTED: nft_list_attributes)
*/
enum nft_set_elem_attributes {
NFTA_SET_ELEM_UNSPEC,
@@ -399,6 +431,7 @@ enum nft_set_elem_attributes {
NFTA_SET_ELEM_PAD,
NFTA_SET_ELEM_OBJREF,
NFTA_SET_ELEM_KEY_END,
+ NFTA_SET_ELEM_EXPRESSIONS,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -464,11 +497,13 @@ enum nft_data_attributes {
*
* @NFTA_VERDICT_CODE: nf_tables verdict (NLA_U32: enum nft_verdicts)
* @NFTA_VERDICT_CHAIN: jump target chain name (NLA_STRING)
+ * @NFTA_VERDICT_CHAIN_ID: jump target chain ID (NLA_U32)
*/
enum nft_verdict_attributes {
NFTA_VERDICT_UNSPEC,
NFTA_VERDICT_CODE,
NFTA_VERDICT_CHAIN,
+ NFTA_VERDICT_CHAIN_ID,
__NFTA_VERDICT_MAX
};
#define NFTA_VERDICT_MAX (__NFTA_VERDICT_MAX - 1)
@@ -680,6 +715,7 @@ enum nft_dynset_ops {
enum nft_dynset_flags {
NFT_DYNSET_F_INV = (1 << 0),
+ NFT_DYNSET_F_EXPR = (1 << 1),
};
/**
@@ -693,6 +729,7 @@ enum nft_dynset_flags {
* @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
* @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
* @NFTA_DYNSET_FLAGS: flags (NLA_U32)
+ * @NFTA_DYNSET_EXPRESSIONS: list of expressions (NLA_NESTED: nft_list_attributes)
*/
enum nft_dynset_attributes {
NFTA_DYNSET_UNSPEC,
@@ -705,6 +742,7 @@ enum nft_dynset_attributes {
NFTA_DYNSET_EXPR,
NFTA_DYNSET_PAD,
NFTA_DYNSET_FLAGS,
+ NFTA_DYNSET_EXPRESSIONS,
__NFTA_DYNSET_MAX,
};
#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
@@ -715,11 +753,13 @@ enum nft_dynset_attributes {
* @NFT_PAYLOAD_LL_HEADER: link layer header
* @NFT_PAYLOAD_NETWORK_HEADER: network header
* @NFT_PAYLOAD_TRANSPORT_HEADER: transport header
+ * @NFT_PAYLOAD_INNER_HEADER: inner header / payload
*/
enum nft_payload_bases {
NFT_PAYLOAD_LL_HEADER,
NFT_PAYLOAD_NETWORK_HEADER,
NFT_PAYLOAD_TRANSPORT_HEADER,
+ NFT_PAYLOAD_INNER_HEADER,
};
/**
@@ -727,10 +767,12 @@ enum nft_payload_bases {
*
* @NFT_PAYLOAD_CSUM_NONE: no checksumming
* @NFT_PAYLOAD_CSUM_INET: internet checksum (RFC 791)
+ * @NFT_PAYLOAD_CSUM_SCTP: CRC-32c, for use in SCTP header (RFC 3309)
*/
enum nft_payload_csum_types {
NFT_PAYLOAD_CSUM_NONE,
NFT_PAYLOAD_CSUM_INET,
+ NFT_PAYLOAD_CSUM_SCTP,
};
enum nft_payload_csum_flags {
@@ -773,11 +815,13 @@ enum nft_exthdr_flags {
* @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers
* @NFT_EXTHDR_OP_TCP: match against tcp options
* @NFT_EXTHDR_OP_IPV4: match against ipv4 options
+ * @NFT_EXTHDR_OP_SCTP: match against sctp chunks
*/
enum nft_exthdr_op {
NFT_EXTHDR_OP_IPV6,
NFT_EXTHDR_OP_TCPOPT,
NFT_EXTHDR_OP_IPV4,
+ NFT_EXTHDR_OP_SCTP,
__NFT_EXTHDR_OP_MAX
};
#define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1)
@@ -854,7 +898,8 @@ enum nft_meta_keys {
NFT_META_OIF,
NFT_META_IIFNAME,
NFT_META_OIFNAME,
- NFT_META_IIFTYPE,
+ NFT_META_IFTYPE,
+#define NFT_META_IIFTYPE NFT_META_IFTYPE
NFT_META_OIFTYPE,
NFT_META_SKUID,
NFT_META_SKGID,
@@ -881,6 +926,7 @@ enum nft_meta_keys {
NFT_META_TIME_HOUR,
NFT_META_SDIF,
NFT_META_SDIFNAME,
+ __NFT_META_IIFTYPE,
};
/**
@@ -976,11 +1022,13 @@ enum nft_rt_attributes {
*
* @NFTA_SOCKET_KEY: socket key to match
* @NFTA_SOCKET_DREG: destination register
+ * @NFTA_SOCKET_LEVEL: cgroups2 ancestor level (only for cgroupsv2)
*/
enum nft_socket_attributes {
NFTA_SOCKET_UNSPEC,
NFTA_SOCKET_KEY,
NFTA_SOCKET_DREG,
+ NFTA_SOCKET_LEVEL,
__NFTA_SOCKET_MAX
};
#define NFTA_SOCKET_MAX (__NFTA_SOCKET_MAX - 1)
@@ -990,10 +1038,14 @@ enum nft_socket_attributes {
*
* @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option
* @NFT_SOCKET_MARK: Value of the socket mark
+ * @NFT_SOCKET_WILDCARD: Whether the socket is zero-bound (e.g. 0.0.0.0 or ::0)
+ * @NFT_SOCKET_CGROUPV2: Match on cgroups version 2
*/
enum nft_socket_keys {
NFT_SOCKET_TRANSPARENT,
NFT_SOCKET_MARK,
+ NFT_SOCKET_WILDCARD,
+ NFT_SOCKET_CGROUPV2,
__NFT_SOCKET_MAX
};
#define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1)
@@ -1148,6 +1200,21 @@ enum nft_counter_attributes {
#define NFTA_COUNTER_MAX (__NFTA_COUNTER_MAX - 1)
/**
+ * enum nft_last_attributes - nf_tables last expression netlink attributes
+ *
+ * @NFTA_LAST_SET: last update has been set, zero means never updated (NLA_U32)
+ * @NFTA_LAST_MSECS: milliseconds since last update (NLA_U64)
+ */
+enum nft_last_attributes {
+ NFTA_LAST_UNSPEC,
+ NFTA_LAST_SET,
+ NFTA_LAST_MSECS,
+ NFTA_LAST_PAD,
+ __NFTA_LAST_MAX
+};
+#define NFTA_LAST_MAX (__NFTA_LAST_MAX - 1)
+
+/**
* enum nft_log_attributes - nf_tables log expression netlink attributes
*
* @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32)
@@ -1537,6 +1604,7 @@ enum nft_ct_expectation_attributes {
* @NFTA_OBJ_DATA: stateful object data (NLA_NESTED)
* @NFTA_OBJ_USE: number of references to this expression (NLA_U32)
* @NFTA_OBJ_HANDLE: object handle (NLA_U64)
+ * @NFTA_OBJ_USERDATA: user data (NLA_BINARY)
*/
enum nft_object_attributes {
NFTA_OBJ_UNSPEC,
@@ -1547,11 +1615,25 @@ enum nft_object_attributes {
NFTA_OBJ_USE,
NFTA_OBJ_HANDLE,
NFTA_OBJ_PAD,
+ NFTA_OBJ_USERDATA,
__NFTA_OBJ_MAX
};
#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1)
/**
+ * enum nft_flowtable_flags - nf_tables flowtable flags
+ *
+ * @NFT_FLOWTABLE_HW_OFFLOAD: flowtable hardware offload is enabled
+ * @NFT_FLOWTABLE_COUNTER: enable flow counters
+ */
+enum nft_flowtable_flags {
+ NFT_FLOWTABLE_HW_OFFLOAD = 0x1,
+ NFT_FLOWTABLE_COUNTER = 0x2,
+ NFT_FLOWTABLE_MASK = (NFT_FLOWTABLE_HW_OFFLOAD |
+ NFT_FLOWTABLE_COUNTER)
+};
+
+/**
* enum nft_flowtable_attributes - nf_tables flow table netlink attributes
*
* @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
@@ -1770,6 +1852,7 @@ enum nft_tunnel_opts_attributes {
NFTA_TUNNEL_KEY_OPTS_UNSPEC,
NFTA_TUNNEL_KEY_OPTS_VXLAN,
NFTA_TUNNEL_KEY_OPTS_ERSPAN,
+ NFTA_TUNNEL_KEY_OPTS_GENEVE,
__NFTA_TUNNEL_KEY_OPTS_MAX
};
#define NFTA_TUNNEL_KEY_OPTS_MAX (__NFTA_TUNNEL_KEY_OPTS_MAX - 1)
@@ -1791,6 +1874,15 @@ enum nft_tunnel_opts_erspan_attributes {
};
#define NFTA_TUNNEL_KEY_ERSPAN_MAX (__NFTA_TUNNEL_KEY_ERSPAN_MAX - 1)
+enum nft_tunnel_opts_geneve_attributes {
+ NFTA_TUNNEL_KEY_GENEVE_UNSPEC,
+ NFTA_TUNNEL_KEY_GENEVE_CLASS,
+ NFTA_TUNNEL_KEY_GENEVE_TYPE,
+ NFTA_TUNNEL_KEY_GENEVE_DATA,
+ __NFTA_TUNNEL_KEY_GENEVE_MAX
+};
+#define NFTA_TUNNEL_KEY_GENEVE_MAX (__NFTA_TUNNEL_KEY_GENEVE_MAX - 1)
+
enum nft_tunnel_flags {
NFT_TUNNEL_F_ZERO_CSUM_TX = (1 << 0),
NFT_TUNNEL_F_DONT_FRAGMENT = (1 << 1),
diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h
index 5bc960f220b3..6cd58cd2a6f0 100644
--- a/include/uapi/linux/netfilter/nfnetlink.h
+++ b/include/uapi/linux/netfilter/nfnetlink.h
@@ -60,7 +60,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_CTHELPER 9
#define NFNL_SUBSYS_NFTABLES 10
#define NFNL_SUBSYS_NFT_COMPAT 11
-#define NFNL_SUBSYS_COUNT 12
+#define NFNL_SUBSYS_HOOK 12
+#define NFNL_SUBSYS_COUNT 13
/* Reserved control nfnetlink messages */
#define NFNL_MSG_BATCH_BEGIN NLMSG_MIN_TYPE
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 1d41810d17e2..c2ac7269acf7 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -55,6 +55,8 @@ enum ctattr_type {
CTA_LABELS,
CTA_LABELS_MASK,
CTA_SYNPROXY,
+ CTA_FILTER,
+ CTA_STATUS_MASK,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
@@ -246,7 +248,7 @@ enum ctattr_stats_cpu {
CTA_STATS_FOUND,
CTA_STATS_NEW, /* no longer used */
CTA_STATS_INVALID,
- CTA_STATS_IGNORE,
+ CTA_STATS_IGNORE, /* no longer used */
CTA_STATS_DELETE, /* no longer used */
CTA_STATS_DELETE_LIST, /* no longer used */
CTA_STATS_INSERT,
@@ -255,6 +257,8 @@ enum ctattr_stats_cpu {
CTA_STATS_EARLY_DROP,
CTA_STATS_ERROR,
CTA_STATS_SEARCH_RESTART,
+ CTA_STATS_CLASH_RESOLVE,
+ CTA_STATS_CHAIN_TOOLONG,
__CTA_STATS_MAX,
};
#define CTA_STATS_MAX (__CTA_STATS_MAX - 1)
@@ -276,4 +280,12 @@ enum ctattr_expect_stats {
};
#define CTA_STATS_EXP_MAX (__CTA_STATS_EXP_MAX - 1)
+enum ctattr_filter {
+ CTA_FILTER_UNSPEC,
+ CTA_FILTER_ORIG_FLAGS,
+ CTA_FILTER_REPLY_FLAGS,
+ __CTA_FILTER_MAX
+};
+#define CTA_FILTER_MAX (__CTA_FILTER_MAX - 1)
+
#endif /* _IPCONNTRACK_NETLINK_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
index a13137afc429..70af02092d16 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
@@ -5,7 +5,7 @@
#define NFCT_HELPER_STATUS_DISABLED 0
#define NFCT_HELPER_STATUS_ENABLED 1
-enum nfnl_acct_msg_types {
+enum nfnl_cthelper_msg_types {
NFNL_MSG_CTHELPER_NEW,
NFNL_MSG_CTHELPER_GET,
NFNL_MSG_CTHELPER_DEL,
diff --git a/include/uapi/linux/netfilter/nfnetlink_hook.h b/include/uapi/linux/netfilter/nfnetlink_hook.h
new file mode 100644
index 000000000000..bbcd285b22e1
--- /dev/null
+++ b/include/uapi/linux/netfilter/nfnetlink_hook.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _NFNL_HOOK_H_
+#define _NFNL_HOOK_H_
+
+enum nfnl_hook_msg_types {
+ NFNL_MSG_HOOK_GET,
+ NFNL_MSG_HOOK_MAX,
+};
+
+/**
+ * enum nfnl_hook_attributes - netfilter hook netlink attributes
+ *
+ * @NFNLA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
+ * @NFNLA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFNLA_HOOK_DEV: netdevice name (NLA_STRING)
+ * @NFNLA_HOOK_FUNCTION_NAME: hook function name (NLA_STRING)
+ * @NFNLA_HOOK_MODULE_NAME: kernel module that registered this hook (NLA_STRING)
+ * @NFNLA_HOOK_CHAIN_INFO: basechain hook metadata (NLA_NESTED)
+ */
+enum nfnl_hook_attributes {
+ NFNLA_HOOK_UNSPEC,
+ NFNLA_HOOK_HOOKNUM,
+ NFNLA_HOOK_PRIORITY,
+ NFNLA_HOOK_DEV,
+ NFNLA_HOOK_FUNCTION_NAME,
+ NFNLA_HOOK_MODULE_NAME,
+ NFNLA_HOOK_CHAIN_INFO,
+ __NFNLA_HOOK_MAX
+};
+#define NFNLA_HOOK_MAX (__NFNLA_HOOK_MAX - 1)
+
+/**
+ * enum nfnl_hook_chain_info_attributes - chain description
+ *
+ * NFNLA_HOOK_INFO_DESC: nft chain and table name (enum nft_table_attributes) (NLA_NESTED)
+ * NFNLA_HOOK_INFO_TYPE: chain type (enum nfnl_hook_chaintype) (NLA_U32)
+ */
+enum nfnl_hook_chain_info_attributes {
+ NFNLA_HOOK_INFO_UNSPEC,
+ NFNLA_HOOK_INFO_DESC,
+ NFNLA_HOOK_INFO_TYPE,
+ __NFNLA_HOOK_INFO_MAX,
+};
+#define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1)
+
+enum nfnl_hook_chain_desc_attributes {
+ NFNLA_CHAIN_UNSPEC,
+ NFNLA_CHAIN_TABLE,
+ NFNLA_CHAIN_FAMILY,
+ NFNLA_CHAIN_NAME,
+ __NFNLA_CHAIN_MAX,
+};
+#define NFNLA_CHAIN_MAX (__NFNLA_CHAIN_MAX - 1)
+
+/**
+ * enum nfnl_hook_chaintype - chain type
+ *
+ * @NFNL_HOOK_TYPE_NFTABLES nf_tables base chain
+ */
+enum nfnl_hook_chaintype {
+ NFNL_HOOK_TYPE_NFTABLES = 0x1,
+};
+
+#endif /* _NFNL_HOOK_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h
index 45c8d3b027e0..0af9c113d665 100644
--- a/include/uapi/linux/netfilter/nfnetlink_log.h
+++ b/include/uapi/linux/netfilter/nfnetlink_log.h
@@ -61,7 +61,7 @@ enum nfulnl_attr_type {
NFULA_HWTYPE, /* hardware type */
NFULA_HWHEADER, /* hardware header */
NFULA_HWLEN, /* hardware header length */
- NFULA_CT, /* nf_conntrack_netlink.h */
+ NFULA_CT, /* nfnetlink_conntrack.h */
NFULA_CT_INFO, /* enum ip_conntrack_info */
NFULA_VLAN, /* nested attribute: packet vlan info */
NFULA_L2HDR, /* full L2 header */
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index bcb2cb5d40b9..ef7c97f21a15 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -51,16 +51,17 @@ enum nfqnl_attr_type {
NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */
NFQA_HWADDR, /* nfqnl_msg_packet_hw */
NFQA_PAYLOAD, /* opaque data payload */
- NFQA_CT, /* nf_conntrack_netlink.h */
+ NFQA_CT, /* nfnetlink_conntrack.h */
NFQA_CT_INFO, /* enum ip_conntrack_info */
NFQA_CAP_LEN, /* __u32 length of captured packet */
NFQA_SKB_INFO, /* __u32 skb meta information */
- NFQA_EXP, /* nf_conntrack_netlink.h */
+ NFQA_EXP, /* nfnetlink_conntrack.h */
NFQA_UID, /* __u32 sk uid */
NFQA_GID, /* __u32 sk gid */
NFQA_SECCTX, /* security context string */
NFQA_VLAN, /* nested attribute: packet vlan info */
NFQA_L2HDR, /* full L2 header */
+ NFQA_PRIORITY, /* skb->priority */
__NFQA_MAX
};
diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h
index a8283f7dbc51..796af83a963a 100644
--- a/include/uapi/linux/netfilter/x_tables.h
+++ b/include/uapi/linux/netfilter/x_tables.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_X_TABLES_H
#define _UAPI_X_TABLES_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#define XT_FUNCTION_MAXNAMELEN 30
@@ -28,7 +28,7 @@ struct xt_entry_match {
__u16 match_size;
} u;
- unsigned char data[0];
+ unsigned char data[];
};
struct xt_entry_target {
@@ -119,7 +119,7 @@ struct xt_counters_info {
unsigned int num_counters;
/* The counters (actually `number' of these). */
- struct xt_counters counters[0];
+ struct xt_counters counters[];
};
#define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */
diff --git a/include/uapi/linux/netfilter/xt_AUDIT.h b/include/uapi/linux/netfilter/xt_AUDIT.h
index 1b314e2f84ac..56a3f6092e0c 100644
--- a/include/uapi/linux/netfilter/xt_AUDIT.h
+++ b/include/uapi/linux/netfilter/xt_AUDIT.h
@@ -4,10 +4,6 @@
*
* (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
* (C) 2010-2011 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _XT_AUDIT_TARGET_H
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 3c586a19baea..7bfb31a66fc9 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -1,7 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
- * linux/include/linux/netfilter/xt_IDLETIMER.h
- *
* Header file for Xtables timer target module.
*
* Copyright (C) 2004, 2010 Nokia Corporation
@@ -11,20 +9,6 @@
* by Luciano Coelho <luciano.coelho@nokia.com>
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef _XT_IDLETIMER_H
@@ -33,6 +17,7 @@
#include <linux/types.h>
#define MAX_IDLETIMER_LABEL_SIZE 28
+#define XT_IDLETIMER_ALARM 0x01
struct idletimer_tg_info {
__u32 timeout;
@@ -43,4 +28,15 @@ struct idletimer_tg_info {
struct idletimer_tg *timer __attribute__((aligned(8)));
};
+struct idletimer_tg_info_v1 {
+ __u32 timeout;
+
+ char label[MAX_IDLETIMER_LABEL_SIZE];
+
+ __u8 send_nl_msg; /* unused: for compatibility with Android */
+ __u8 timer_type;
+
+ /* for kernel module internal use only */
+ struct idletimer_tg *timer __attribute__((aligned(8)));
+};
#endif
diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
index 1f2a708413f5..beb2cadba8a9 100644
--- a/include/uapi/linux/netfilter/xt_SECMARK.h
+++ b/include/uapi/linux/netfilter/xt_SECMARK.h
@@ -20,4 +20,10 @@ struct xt_secmark_target_info {
char secctx[SECMARK_SECCTX_MAX];
};
+struct xt_secmark_target_info_v1 {
+ __u8 mode;
+ char secctx[SECMARK_SECCTX_MAX];
+ __u32 secid;
+};
+
#endif /*_XT_SECMARK_H_target */
diff --git a/include/uapi/linux/netfilter/xt_connmark.h b/include/uapi/linux/netfilter/xt_connmark.h
index 1aa5c955ee1e..41b578ccd03b 100644
--- a/include/uapi/linux/netfilter/xt_connmark.h
+++ b/include/uapi/linux/netfilter/xt_connmark.h
@@ -1,18 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/* Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com>
+ * by Henrik Nordstrom <hno@marasystems.com>
+ */
+
#ifndef _XT_CONNMARK_H
#define _XT_CONNMARK_H
#include <linux/types.h>
-/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
enum {
XT_CONNMARK_SET = 0,
XT_CONNMARK_SAVE,
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index 6e466236ca4b..f1f097896bdf 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -1,20 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2003+ Evgeniy Polyakov <johnpol@2ka.mxt.ru>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _XT_OSF_H
diff --git a/include/uapi/linux/netfilter_arp/arp_tables.h b/include/uapi/linux/netfilter_arp/arp_tables.h
index bbf5af2b67a8..a6ac2463f787 100644
--- a/include/uapi/linux/netfilter_arp/arp_tables.h
+++ b/include/uapi/linux/netfilter_arp/arp_tables.h
@@ -109,7 +109,7 @@ struct arpt_entry
struct xt_counters counters;
/* The matches (if any), then the target. */
- unsigned char elems[0];
+ unsigned char elems[];
};
/*
@@ -181,7 +181,7 @@ struct arpt_replace {
struct xt_counters __user *counters;
/* The entries (hang off end: not really an array). */
- struct arpt_entry entries[0];
+ struct arpt_entry entries[];
};
/* The argument to ARPT_SO_GET_ENTRIES. */
@@ -193,7 +193,7 @@ struct arpt_get_entries {
unsigned int size;
/* The entries. */
- struct arpt_entry entrytable[0];
+ struct arpt_entry entrytable[];
};
/* Helper functions */
diff --git a/include/uapi/linux/netfilter_bridge/ebt_among.h b/include/uapi/linux/netfilter_bridge/ebt_among.h
index 9acf757bc1f7..73b26a280c4f 100644
--- a/include/uapi/linux/netfilter_bridge/ebt_among.h
+++ b/include/uapi/linux/netfilter_bridge/ebt_among.h
@@ -40,7 +40,7 @@ struct ebt_mac_wormhash_tuple {
struct ebt_mac_wormhash {
int table[257];
int poolsize;
- struct ebt_mac_wormhash_tuple pool[0];
+ struct ebt_mac_wormhash_tuple pool[];
};
#define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
deleted file mode 100644
index 3c77f54560f2..000000000000
--- a/include/uapi/linux/netfilter_decnet.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_DECNET_NETFILTER_H
-#define __LINUX_DECNET_NETFILTER_H
-
-/* DECnet-specific defines for netfilter.
- * This file (C) Steve Whitehouse 1999 derived from the
- * ipv4 netfilter header file which is
- * (C)1998 Rusty Russell -- This code is GPL.
- */
-
-#include <linux/netfilter.h>
-
-/* only for userspace compatibility */
-#ifndef __KERNEL__
-
-#include <limits.h> /* for INT_MIN, INT_MAX */
-
-/* kernel define is in netfilter_defs.h */
-#define NF_DN_NUMHOOKS 7
-#endif /* ! __KERNEL__ */
-
-/* DECnet Hooks */
-/* After promisc drops, checksum checks. */
-#define NF_DN_PRE_ROUTING 0
-/* If the packet is destined for this box. */
-#define NF_DN_LOCAL_IN 1
-/* If the packet is destined for another interface. */
-#define NF_DN_FORWARD 2
-/* Packets coming from a local process. */
-#define NF_DN_LOCAL_OUT 3
-/* Packets about to hit the wire. */
-#define NF_DN_POST_ROUTING 4
-/* Input Hello Packets */
-#define NF_DN_HELLO 5
-/* Input Routing Packets */
-#define NF_DN_ROUTE 6
-
-enum nf_dn_hook_priorities {
- NF_DN_PRI_FIRST = INT_MIN,
- NF_DN_PRI_CONNTRACK = -200,
- NF_DN_PRI_MANGLE = -150,
- NF_DN_PRI_NAT_DST = -100,
- NF_DN_PRI_FILTER = 0,
- NF_DN_PRI_NAT_SRC = 100,
- NF_DN_PRI_DNRTMSG = 200,
- NF_DN_PRI_LAST = INT_MAX,
-};
-
-struct nf_dn_rtmsg {
- int nfdn_ifindex;
-};
-
-#define NFDN_RTMSG(r) ((unsigned char *)(r) + NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)))
-
-#ifndef __KERNEL__
-/* backwards compatibility for userspace */
-#define DNRMG_L1_GROUP 0x01
-#define DNRMG_L2_GROUP 0x02
-#endif
-
-enum {
- DNRNG_NLGRP_NONE,
-#define DNRNG_NLGRP_NONE DNRNG_NLGRP_NONE
- DNRNG_NLGRP_L1,
-#define DNRNG_NLGRP_L1 DNRNG_NLGRP_L1
- DNRNG_NLGRP_L2,
-#define DNRNG_NLGRP_L2 DNRNG_NLGRP_L2
- __DNRNG_NLGRP_MAX
-};
-#define DNRNG_NLGRP_MAX (__DNRNG_NLGRP_MAX - 1)
-
-#endif /*__LINUX_DECNET_NETFILTER_H*/
diff --git a/include/uapi/linux/netfilter_ipv4/ip_tables.h b/include/uapi/linux/netfilter_ipv4/ip_tables.h
index 50c7fee625ae..1485df28b239 100644
--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
+++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
@@ -121,7 +121,7 @@ struct ipt_entry {
struct xt_counters counters;
/* The matches (if any), then the target. */
- unsigned char elems[0];
+ unsigned char elems[];
};
/*
@@ -203,7 +203,7 @@ struct ipt_replace {
struct xt_counters __user *counters;
/* The entries (hang off end: not really an array). */
- struct ipt_entry entries[0];
+ struct ipt_entry entries[];
};
/* The argument to IPT_SO_GET_ENTRIES. */
@@ -215,7 +215,7 @@ struct ipt_get_entries {
unsigned int size;
/* The entries. */
- struct ipt_entry entrytable[0];
+ struct ipt_entry entrytable[];
};
/* Helper functions */
diff --git a/include/uapi/linux/netfilter_ipv6/ip6_tables.h b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
index d9e364f96a5c..766e8e0bcc68 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
@@ -243,7 +243,7 @@ struct ip6t_replace {
struct xt_counters __user *counters;
/* The entries (hang off end: not really an array). */
- struct ip6t_entry entries[0];
+ struct ip6t_entry entries[];
};
/* The argument to IP6T_SO_GET_ENTRIES. */
@@ -255,7 +255,7 @@ struct ip6t_get_entries {
unsigned int size;
/* The entries. */
- struct ip6t_entry entrytable[0];
+ struct ip6t_entry entrytable[];
};
/* Helper functions */
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
index 23e91a9c2583..0b7b16dbdec2 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
@@ -17,4 +17,4 @@ struct ip6t_log_info {
char prefix[30];
};
-#endif /*_IPT_LOG_H*/
+#endif /* _IP6T_LOG_H */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 0a4d73317759..e2ae82e3f9f7 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_NETLINK_H
#define _UAPI__LINUX_NETLINK_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/socket.h> /* for __kernel_sa_family_t */
#include <linux/types.h>
@@ -20,7 +20,7 @@
#define NETLINK_CONNECTOR 11
#define NETLINK_NETFILTER 12 /* netfilter subsystem */
#define NETLINK_IP6_FW 13
-#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
+#define NETLINK_DNRTMSG 14 /* DECnet routing messages (obsolete) */
#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
#define NETLINK_GENERIC 16
/* leave room for NETLINK_DM (DM Events) */
@@ -41,12 +41,20 @@ struct sockaddr_nl {
__u32 nl_groups; /* multicast groups mask */
};
+/**
+ * struct nlmsghdr - fixed format metadata header of Netlink messages
+ * @nlmsg_len: Length of message including header
+ * @nlmsg_type: Message content type
+ * @nlmsg_flags: Additional flags
+ * @nlmsg_seq: Sequence number
+ * @nlmsg_pid: Sending process port ID
+ */
struct nlmsghdr {
- __u32 nlmsg_len; /* Length of message including header */
- __u16 nlmsg_type; /* Message content */
- __u16 nlmsg_flags; /* Additional flags */
- __u32 nlmsg_seq; /* Sequence number */
- __u32 nlmsg_pid; /* Sending process port ID */
+ __u32 nlmsg_len;
+ __u16 nlmsg_type;
+ __u16 nlmsg_flags;
+ __u32 nlmsg_seq;
+ __u32 nlmsg_pid;
};
/* Flags values */
@@ -54,7 +62,7 @@ struct nlmsghdr {
#define NLM_F_REQUEST 0x01 /* It is request message. */
#define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */
-#define NLM_F_ECHO 0x08 /* Echo this request */
+#define NLM_F_ECHO 0x08 /* Receive resulting notifications */
#define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */
#define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */
@@ -72,6 +80,7 @@ struct nlmsghdr {
/* Modifiers to DELETE request */
#define NLM_F_NONREC 0x100 /* Do not delete recursively */
+#define NLM_F_BULK 0x200 /* Delete multiple objects */
/* Flags for ACK message */
#define NLM_F_CAPPED 0x100 /* request was capped */
@@ -91,9 +100,10 @@ struct nlmsghdr {
#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
#define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN)
#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
-#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
+#define NLMSG_DATA(nlh) ((void *)(((char *)nlh) + NLMSG_HDRLEN))
#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
- (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len)))
+ (struct nlmsghdr *)(((char *)(nlh)) + \
+ NLMSG_ALIGN((nlh)->nlmsg_len)))
#define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len <= (len))
@@ -129,6 +139,11 @@ struct nlmsgerr {
* @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to
* be used - in the success case - to identify a created
* object or operation or similar (binary)
+ * @NLMSGERR_ATTR_POLICY: policy for a rejected attribute
+ * @NLMSGERR_ATTR_MISS_TYPE: type of a missing required attribute,
+ * %NLMSGERR_ATTR_MISS_NEST will not be present if the attribute was
+ * missing at the message level
+ * @NLMSGERR_ATTR_MISS_NEST: offset of the nest where attribute was missing
* @__NLMSGERR_ATTR_MAX: number of attributes
* @NLMSGERR_ATTR_MAX: highest attribute number
*/
@@ -137,6 +152,9 @@ enum nlmsgerr_attrs {
NLMSGERR_ATTR_MSG,
NLMSGERR_ATTR_OFFS,
NLMSGERR_ATTR_COOKIE,
+ NLMSGERR_ATTR_POLICY,
+ NLMSGERR_ATTR_MISS_TYPE,
+ NLMSGERR_ATTR_MISS_NEST,
__NLMSGERR_ATTR_MAX,
NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1
@@ -249,4 +267,112 @@ struct nla_bitfield32 {
__u32 selector;
};
+/*
+ * policy descriptions - it's specific to each family how this is used
+ * Normally, it should be retrieved via a dump inside another attribute
+ * specifying where it applies.
+ */
+
+/**
+ * enum netlink_attribute_type - type of an attribute
+ * @NL_ATTR_TYPE_INVALID: unused
+ * @NL_ATTR_TYPE_FLAG: flag attribute (present/not present)
+ * @NL_ATTR_TYPE_U8: 8-bit unsigned attribute
+ * @NL_ATTR_TYPE_U16: 16-bit unsigned attribute
+ * @NL_ATTR_TYPE_U32: 32-bit unsigned attribute
+ * @NL_ATTR_TYPE_U64: 64-bit unsigned attribute
+ * @NL_ATTR_TYPE_S8: 8-bit signed attribute
+ * @NL_ATTR_TYPE_S16: 16-bit signed attribute
+ * @NL_ATTR_TYPE_S32: 32-bit signed attribute
+ * @NL_ATTR_TYPE_S64: 64-bit signed attribute
+ * @NL_ATTR_TYPE_BINARY: binary data, min/max length may be specified
+ * @NL_ATTR_TYPE_STRING: string, min/max length may be specified
+ * @NL_ATTR_TYPE_NUL_STRING: NUL-terminated string,
+ * min/max length may be specified
+ * @NL_ATTR_TYPE_NESTED: nested, i.e. the content of this attribute
+ * consists of sub-attributes. The nested policy and maxtype
+ * inside may be specified.
+ * @NL_ATTR_TYPE_NESTED_ARRAY: nested array, i.e. the content of this
+ * attribute contains sub-attributes whose type is irrelevant
+ * (just used to separate the array entries) and each such array
+ * entry has attributes again, the policy for those inner ones
+ * and the corresponding maxtype may be specified.
+ * @NL_ATTR_TYPE_BITFIELD32: &struct nla_bitfield32 attribute
+ */
+enum netlink_attribute_type {
+ NL_ATTR_TYPE_INVALID,
+
+ NL_ATTR_TYPE_FLAG,
+
+ NL_ATTR_TYPE_U8,
+ NL_ATTR_TYPE_U16,
+ NL_ATTR_TYPE_U32,
+ NL_ATTR_TYPE_U64,
+
+ NL_ATTR_TYPE_S8,
+ NL_ATTR_TYPE_S16,
+ NL_ATTR_TYPE_S32,
+ NL_ATTR_TYPE_S64,
+
+ NL_ATTR_TYPE_BINARY,
+ NL_ATTR_TYPE_STRING,
+ NL_ATTR_TYPE_NUL_STRING,
+
+ NL_ATTR_TYPE_NESTED,
+ NL_ATTR_TYPE_NESTED_ARRAY,
+
+ NL_ATTR_TYPE_BITFIELD32,
+};
+
+/**
+ * enum netlink_policy_type_attr - policy type attributes
+ * @NL_POLICY_TYPE_ATTR_UNSPEC: unused
+ * @NL_POLICY_TYPE_ATTR_TYPE: type of the attribute,
+ * &enum netlink_attribute_type (U32)
+ * @NL_POLICY_TYPE_ATTR_MIN_VALUE_S: minimum value for signed
+ * integers (S64)
+ * @NL_POLICY_TYPE_ATTR_MAX_VALUE_S: maximum value for signed
+ * integers (S64)
+ * @NL_POLICY_TYPE_ATTR_MIN_VALUE_U: minimum value for unsigned
+ * integers (U64)
+ * @NL_POLICY_TYPE_ATTR_MAX_VALUE_U: maximum value for unsigned
+ * integers (U64)
+ * @NL_POLICY_TYPE_ATTR_MIN_LENGTH: minimum length for binary
+ * attributes, no minimum if not given (U32)
+ * @NL_POLICY_TYPE_ATTR_MAX_LENGTH: maximum length for binary
+ * attributes, no maximum if not given (U32)
+ * @NL_POLICY_TYPE_ATTR_POLICY_IDX: sub policy for nested and
+ * nested array types (U32)
+ * @NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE: maximum sub policy
+ * attribute for nested and nested array types, this can
+ * in theory be < the size of the policy pointed to by
+ * the index, if limited inside the nesting (U32)
+ * @NL_POLICY_TYPE_ATTR_BITFIELD32_MASK: valid mask for the
+ * bitfield32 type (U32)
+ * @NL_POLICY_TYPE_ATTR_MASK: mask of valid bits for unsigned integers (U64)
+ * @NL_POLICY_TYPE_ATTR_PAD: pad attribute for 64-bit alignment
+ *
+ * @__NL_POLICY_TYPE_ATTR_MAX: number of attributes
+ * @NL_POLICY_TYPE_ATTR_MAX: highest attribute number
+ */
+enum netlink_policy_type_attr {
+ NL_POLICY_TYPE_ATTR_UNSPEC,
+ NL_POLICY_TYPE_ATTR_TYPE,
+ NL_POLICY_TYPE_ATTR_MIN_VALUE_S,
+ NL_POLICY_TYPE_ATTR_MAX_VALUE_S,
+ NL_POLICY_TYPE_ATTR_MIN_VALUE_U,
+ NL_POLICY_TYPE_ATTR_MAX_VALUE_U,
+ NL_POLICY_TYPE_ATTR_MIN_LENGTH,
+ NL_POLICY_TYPE_ATTR_MAX_LENGTH,
+ NL_POLICY_TYPE_ATTR_POLICY_IDX,
+ NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE,
+ NL_POLICY_TYPE_ATTR_BITFIELD32_MASK,
+ NL_POLICY_TYPE_ATTR_PAD,
+ NL_POLICY_TYPE_ATTR_MASK,
+
+ /* keep last */
+ __NL_POLICY_TYPE_ATTR_MAX,
+ NL_POLICY_TYPE_ATTR_MAX = __NL_POLICY_TYPE_ATTR_MAX - 1
+};
+
#endif /* _UAPI__LINUX_NETLINK_H */
diff --git a/include/uapi/linux/nexthop.h b/include/uapi/linux/nexthop.h
index 7b61867e9848..d8ffa8c9ca78 100644
--- a/include/uapi/linux/nexthop.h
+++ b/include/uapi/linux/nexthop.h
@@ -21,7 +21,10 @@ struct nexthop_grp {
};
enum {
- NEXTHOP_GRP_TYPE_MPATH, /* default type if not specified */
+ NEXTHOP_GRP_TYPE_MPATH, /* hash-threshold nexthop group
+ * default type if not specified
+ */
+ NEXTHOP_GRP_TYPE_RES, /* resilient nexthop group */
__NEXTHOP_GRP_TYPE_MAX,
};
@@ -49,8 +52,53 @@ enum {
NHA_GROUPS, /* flag; only return nexthop groups in dump */
NHA_MASTER, /* u32; only return nexthops with given master dev */
+ NHA_FDB, /* flag; nexthop belongs to a bridge fdb */
+ /* if NHA_FDB is added, OIF, BLACKHOLE, ENCAP cannot be set */
+
+ /* nested; resilient nexthop group attributes */
+ NHA_RES_GROUP,
+ /* nested; nexthop bucket attributes */
+ NHA_RES_BUCKET,
+
__NHA_MAX,
};
#define NHA_MAX (__NHA_MAX - 1)
+
+enum {
+ NHA_RES_GROUP_UNSPEC,
+ /* Pad attribute for 64-bit alignment. */
+ NHA_RES_GROUP_PAD = NHA_RES_GROUP_UNSPEC,
+
+ /* u16; number of nexthop buckets in a resilient nexthop group */
+ NHA_RES_GROUP_BUCKETS,
+ /* clock_t as u32; nexthop bucket idle timer (per-group) */
+ NHA_RES_GROUP_IDLE_TIMER,
+ /* clock_t as u32; nexthop unbalanced timer */
+ NHA_RES_GROUP_UNBALANCED_TIMER,
+ /* clock_t as u64; nexthop unbalanced time */
+ NHA_RES_GROUP_UNBALANCED_TIME,
+
+ __NHA_RES_GROUP_MAX,
+};
+
+#define NHA_RES_GROUP_MAX (__NHA_RES_GROUP_MAX - 1)
+
+enum {
+ NHA_RES_BUCKET_UNSPEC,
+ /* Pad attribute for 64-bit alignment. */
+ NHA_RES_BUCKET_PAD = NHA_RES_BUCKET_UNSPEC,
+
+ /* u16; nexthop bucket index */
+ NHA_RES_BUCKET_INDEX,
+ /* clock_t as u64; nexthop bucket idle time */
+ NHA_RES_BUCKET_IDLE_TIME,
+ /* u32; nexthop id assigned to the nexthop bucket */
+ NHA_RES_BUCKET_NH_ID,
+
+ __NHA_RES_BUCKET_MAX,
+};
+
+#define NHA_RES_BUCKET_MAX (__NHA_RES_BUCKET_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index f6e3c8c9c744..4fa4e979e948 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -263,7 +263,7 @@ enum nfc_sdp_attr {
#define NFC_SE_ENABLED 0x1
struct sockaddr_nfc {
- sa_family_t sa_family;
+ __kernel_sa_family_t sa_family;
__u32 dev_idx;
__u32 target_idx;
__u32 nfc_protocol;
@@ -271,14 +271,14 @@ struct sockaddr_nfc {
#define NFC_LLCP_MAX_SERVICE_NAME 63
struct sockaddr_nfc_llcp {
- sa_family_t sa_family;
+ __kernel_sa_family_t sa_family;
__u32 dev_idx;
__u32 target_idx;
__u32 nfc_protocol;
__u8 dsap; /* Destination SAP, if known */
__u8 ssap; /* Source SAP to be bound to */
char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
- size_t service_name_len;
+ __kernel_size_t service_name_len;
};
/* NFC socket protocols */
diff --git a/include/uapi/linux/nfs3.h b/include/uapi/linux/nfs3.h
index 37e4b34e6b43..c22ab77713bd 100644
--- a/include/uapi/linux/nfs3.h
+++ b/include/uapi/linux/nfs3.h
@@ -63,6 +63,12 @@ enum nfs3_ftype {
NF3BAD = 8
};
+enum nfs3_time_how {
+ DONT_CHANGE = 0,
+ SET_TO_SERVER_TIME = 1,
+ SET_TO_CLIENT_TIME = 2,
+};
+
struct nfs3_fh {
unsigned short size;
unsigned char data[NFS3_FHSIZE];
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 8572930cf5b0..1d2043708bf1 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -33,6 +33,9 @@
#define NFS4_ACCESS_EXTEND 0x0008
#define NFS4_ACCESS_DELETE 0x0010
#define NFS4_ACCESS_EXECUTE 0x0020
+#define NFS4_ACCESS_XAREAD 0x0040
+#define NFS4_ACCESS_XAWRITE 0x0080
+#define NFS4_ACCESS_XALIST 0x0100
#define NFS4_FH_PERSISTENT 0x0000
#define NFS4_FH_NOEXPIRE_WITH_OPEN 0x0001
@@ -42,6 +45,7 @@
#define NFS4_OPEN_RESULT_CONFIRM 0x0002
#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
+#define NFS4_OPEN_RESULT_PRESERVE_UNLINKED 0x0008
#define NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK 0x0020
#define NFS4_SHARE_ACCESS_MASK 0x000F
@@ -136,6 +140,8 @@
#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
+
+#define EXCHGID4_FLAG_SUPP_FENCE_OPS 0x00000004
/*
* Since the validity of these bits depends on whether
* they're set in the argument or response, have separate
@@ -143,6 +149,7 @@
*/
#define EXCHGID4_FLAG_MASK_A 0x40070103
#define EXCHGID4_FLAG_MASK_R 0x80070103
+#define EXCHGID4_2_FLAG_MASK_R 0x80070107
#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
@@ -172,9 +179,3 @@
#define NFS4_MAX_BACK_CHANNEL_OPS 2
#endif /* _UAPI_LINUX_NFS4_H */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/include/uapi/linux/nfs_fs.h b/include/uapi/linux/nfs_fs.h
index 7bcc8cd6831d..ae0de165c014 100644
--- a/include/uapi/linux/nfs_fs.h
+++ b/include/uapi/linux/nfs_fs.h
@@ -52,10 +52,11 @@
#define NFSDBG_CALLBACK 0x0100
#define NFSDBG_CLIENT 0x0200
#define NFSDBG_MOUNT 0x0400
-#define NFSDBG_FSCACHE 0x0800
+#define NFSDBG_FSCACHE 0x0800 /* unused */
#define NFSDBG_PNFS 0x1000
#define NFSDBG_PNFS_LD 0x2000
#define NFSDBG_STATE 0x4000
+#define NFSDBG_XATTRCACHE 0x8000
#define NFSDBG_ALL 0xFFFF
diff --git a/include/uapi/linux/nfsacl.h b/include/uapi/linux/nfsacl.h
index ca9a8501ff30..2c2ad204d3b0 100644
--- a/include/uapi/linux/nfsacl.h
+++ b/include/uapi/linux/nfsacl.h
@@ -9,11 +9,13 @@
#define NFS_ACL_PROGRAM 100227
+#define ACLPROC2_NULL 0
#define ACLPROC2_GETACL 1
#define ACLPROC2_SETACL 2
#define ACLPROC2_GETATTR 3
#define ACLPROC2_ACCESS 4
+#define ACLPROC3_NULL 0
#define ACLPROC3_GETACL 1
#define ACLPROC3_SETACL 2
diff --git a/include/uapi/linux/nfsd/nfsfh.h b/include/uapi/linux/nfsd/nfsfh.h
deleted file mode 100644
index ff0ca88b1c8f..000000000000
--- a/include/uapi/linux/nfsd/nfsfh.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * This file describes the layout of the file handles as passed
- * over the wire.
- *
- * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef _UAPI_LINUX_NFSD_FH_H
-#define _UAPI_LINUX_NFSD_FH_H
-
-#include <linux/types.h>
-#include <linux/nfs.h>
-#include <linux/nfs2.h>
-#include <linux/nfs3.h>
-#include <linux/nfs4.h>
-
-/*
- * This is the old "dentry style" Linux NFSv2 file handle.
- *
- * The xino and xdev fields are currently used to transport the
- * ino/dev of the exported inode.
- */
-struct nfs_fhbase_old {
- __u32 fb_dcookie; /* dentry cookie - always 0xfeebbaca */
- __u32 fb_ino; /* our inode number */
- __u32 fb_dirino; /* dir inode number, 0 for directories */
- __u32 fb_dev; /* our device */
- __u32 fb_xdev;
- __u32 fb_xino;
- __u32 fb_generation;
-};
-
-/*
- * This is the new flexible, extensible style NFSv2/v3/v4 file handle.
- * by Neil Brown <neilb@cse.unsw.edu.au> - March 2000
- *
- * The file handle starts with a sequence of four-byte words.
- * The first word contains a version number (1) and three descriptor bytes
- * that tell how the remaining 3 variable length fields should be handled.
- * These three bytes are auth_type, fsid_type and fileid_type.
- *
- * All four-byte values are in host-byte-order.
- *
- * The auth_type field is deprecated and must be set to 0.
- *
- * The fsid_type identifies how the filesystem (or export point) is
- * encoded.
- * Current values:
- * 0 - 4 byte device id (ms-2-bytes major, ls-2-bytes minor), 4byte inode number
- * NOTE: we cannot use the kdev_t device id value, because kdev_t.h
- * says we mustn't. We must break it up and reassemble.
- * 1 - 4 byte user specified identifier
- * 2 - 4 byte major, 4 byte minor, 4 byte inode number - DEPRECATED
- * 3 - 4 byte device id, encoded for user-space, 4 byte inode number
- * 4 - 4 byte inode number and 4 byte uuid
- * 5 - 8 byte uuid
- * 6 - 16 byte uuid
- * 7 - 8 byte inode number and 16 byte uuid
- *
- * The fileid_type identified how the file within the filesystem is encoded.
- * The values for this field are filesystem specific, exccept that
- * filesystems must not use the values '0' or '0xff'. 'See enum fid_type'
- * in include/linux/exportfs.h for currently registered values.
- */
-struct nfs_fhbase_new {
- __u8 fb_version; /* == 1, even => nfs_fhbase_old */
- __u8 fb_auth_type;
- __u8 fb_fsid_type;
- __u8 fb_fileid_type;
- __u32 fb_auth[1];
-/* __u32 fb_fsid[0]; floating */
-/* __u32 fb_fileid[0]; floating */
-};
-
-struct knfsd_fh {
- unsigned int fh_size; /* significant for NFSv3.
- * Points to the current size while building
- * a new file handle
- */
- union {
- struct nfs_fhbase_old fh_old;
- __u32 fh_pad[NFS4_FHSIZE/4];
- struct nfs_fhbase_new fh_new;
- } fh_base;
-};
-
-#define ofh_dcookie fh_base.fh_old.fb_dcookie
-#define ofh_ino fh_base.fh_old.fb_ino
-#define ofh_dirino fh_base.fh_old.fb_dirino
-#define ofh_dev fh_base.fh_old.fb_dev
-#define ofh_xdev fh_base.fh_old.fb_xdev
-#define ofh_xino fh_base.fh_old.fb_xino
-#define ofh_generation fh_base.fh_old.fb_generation
-
-#define fh_version fh_base.fh_new.fb_version
-#define fh_fsid_type fh_base.fh_new.fb_fsid_type
-#define fh_auth_type fh_base.fh_new.fb_auth_type
-#define fh_fileid_type fh_base.fh_new.fb_fileid_type
-#define fh_fsid fh_base.fh_new.fb_auth
-
-/* Do not use, provided for userspace compatiblity. */
-#define fh_auth fh_base.fh_new.fb_auth
-
-#endif /* _UAPI_LINUX_NFSD_FH_H */
diff --git a/include/uapi/linux/nitro_enclaves.h b/include/uapi/linux/nitro_enclaves.h
new file mode 100644
index 000000000000..e808f5ba124d
--- /dev/null
+++ b/include/uapi/linux/nitro_enclaves.h
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef _UAPI_LINUX_NITRO_ENCLAVES_H_
+#define _UAPI_LINUX_NITRO_ENCLAVES_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: Nitro Enclaves (NE) Kernel Driver Interface
+ */
+
+/**
+ * NE_CREATE_VM - The command is used to create a slot that is associated with
+ * an enclave VM.
+ * The generated unique slot id is an output parameter.
+ * The ioctl can be invoked on the /dev/nitro_enclaves fd, before
+ * setting any resources, such as memory and vCPUs, for an
+ * enclave. Memory and vCPUs are set for the slot mapped to an enclave.
+ * A NE CPU pool has to be set before calling this function. The
+ * pool can be set after the NE driver load, using
+ * /sys/module/nitro_enclaves/parameters/ne_cpus.
+ * Its format is the detailed in the cpu-lists section:
+ * https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html
+ * CPU 0 and its siblings have to remain available for the
+ * primary / parent VM, so they cannot be set for enclaves. Full
+ * CPU core(s), from the same NUMA node, need(s) to be included
+ * in the CPU pool.
+ *
+ * Context: Process context.
+ * Return:
+ * * Enclave file descriptor - Enclave file descriptor used with
+ * ioctl calls to set vCPUs and memory
+ * regions, then start the enclave.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_to_user() failure.
+ * * ENOMEM - Memory allocation failure for internal
+ * bookkeeping variables.
+ * * NE_ERR_NO_CPUS_AVAIL_IN_POOL - No NE CPU pool set / no CPUs available
+ * in the pool.
+ * * Error codes from get_unused_fd_flags() and anon_inode_getfile().
+ * * Error codes from the NE PCI device request.
+ */
+#define NE_CREATE_VM _IOR(0xAE, 0x20, __u64)
+
+/**
+ * NE_ADD_VCPU - The command is used to set a vCPU for an enclave. The vCPU can
+ * be auto-chosen from the NE CPU pool or it can be set by the
+ * caller, with the note that it needs to be available in the NE
+ * CPU pool. Full CPU core(s), from the same NUMA node, need(s) to
+ * be associated with an enclave.
+ * The vCPU id is an input / output parameter. If its value is 0,
+ * then a CPU is chosen from the enclave CPU pool and returned via
+ * this parameter.
+ * The ioctl can be invoked on the enclave fd, before an enclave
+ * is started.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 - Logic successfully completed.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_from_user() / copy_to_user() failure.
+ * * ENOMEM - Memory allocation failure for internal
+ * bookkeeping variables.
+ * * EIO - Current task mm is not the same as the one
+ * that created the enclave.
+ * * NE_ERR_NO_CPUS_AVAIL_IN_POOL - No CPUs available in the NE CPU pool.
+ * * NE_ERR_VCPU_ALREADY_USED - The provided vCPU is already used.
+ * * NE_ERR_VCPU_NOT_IN_CPU_POOL - The provided vCPU is not available in the
+ * NE CPU pool.
+ * * NE_ERR_VCPU_INVALID_CPU_CORE - The core id of the provided vCPU is invalid
+ * or out of range.
+ * * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state
+ * (init = before being started).
+ * * NE_ERR_INVALID_VCPU - The provided vCPU is not in the available
+ * CPUs range.
+ * * Error codes from the NE PCI device request.
+ */
+#define NE_ADD_VCPU _IOWR(0xAE, 0x21, __u32)
+
+/**
+ * NE_GET_IMAGE_LOAD_INFO - The command is used to get information needed for
+ * in-memory enclave image loading e.g. offset in
+ * enclave memory to start placing the enclave image.
+ * The image load info is an input / output parameter.
+ * It includes info provided by the caller - flags -
+ * and returns the offset in enclave memory where to
+ * start placing the enclave image.
+ * The ioctl can be invoked on the enclave fd, before
+ * an enclave is started.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 - Logic successfully completed.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_from_user() / copy_to_user() failure.
+ * * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state (init =
+ * before being started).
+ * * NE_ERR_INVALID_FLAG_VALUE - The value of the provided flag is invalid.
+ */
+#define NE_GET_IMAGE_LOAD_INFO _IOWR(0xAE, 0x22, struct ne_image_load_info)
+
+/**
+ * NE_SET_USER_MEMORY_REGION - The command is used to set a memory region for an
+ * enclave, given the allocated memory from the
+ * userspace. Enclave memory needs to be from the
+ * same NUMA node as the enclave CPUs.
+ * The user memory region is an input parameter. It
+ * includes info provided by the caller - flags,
+ * memory size and userspace address.
+ * The ioctl can be invoked on the enclave fd,
+ * before an enclave is started.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 - Logic successfully completed.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_from_user() failure.
+ * * EINVAL - Invalid physical memory region(s) e.g.
+ * unaligned address.
+ * * EIO - Current task mm is not the same as
+ * the one that created the enclave.
+ * * ENOMEM - Memory allocation failure for internal
+ * bookkeeping variables.
+ * * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state
+ * (init = before being started).
+ * * NE_ERR_INVALID_MEM_REGION_SIZE - The memory size of the region is not
+ * multiple of 2 MiB.
+ * * NE_ERR_INVALID_MEM_REGION_ADDR - Invalid user space address given.
+ * * NE_ERR_UNALIGNED_MEM_REGION_ADDR - Unaligned user space address given.
+ * * NE_ERR_MEM_REGION_ALREADY_USED - The memory region is already used.
+ * * NE_ERR_MEM_NOT_HUGE_PAGE - The memory region is not backed by
+ * huge pages.
+ * * NE_ERR_MEM_DIFFERENT_NUMA_NODE - The memory region is not from the same
+ * NUMA node as the CPUs.
+ * * NE_ERR_MEM_MAX_REGIONS - The number of memory regions set for
+ * the enclave reached maximum.
+ * * NE_ERR_INVALID_PAGE_SIZE - The memory region is not backed by
+ * pages multiple of 2 MiB.
+ * * NE_ERR_INVALID_FLAG_VALUE - The value of the provided flag is invalid.
+ * * Error codes from get_user_pages().
+ * * Error codes from the NE PCI device request.
+ */
+#define NE_SET_USER_MEMORY_REGION _IOW(0xAE, 0x23, struct ne_user_memory_region)
+
+/**
+ * NE_START_ENCLAVE - The command is used to trigger enclave start after the
+ * enclave resources, such as memory and CPU, have been set.
+ * The enclave start info is an input / output parameter. It
+ * includes info provided by the caller - enclave cid and
+ * flags - and returns the cid (if input cid is 0).
+ * The ioctl can be invoked on the enclave fd, after an
+ * enclave slot is created and resources, such as memory and
+ * vCPUs are set for an enclave.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 - Logic successfully completed.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_from_user() / copy_to_user() failure.
+ * * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state
+ * (init = before being started).
+ * * NE_ERR_NO_MEM_REGIONS_ADDED - No memory regions are set.
+ * * NE_ERR_NO_VCPUS_ADDED - No vCPUs are set.
+ * * NE_ERR_FULL_CORES_NOT_USED - Full core(s) not set for the enclave.
+ * * NE_ERR_ENCLAVE_MEM_MIN_SIZE - Enclave memory is less than minimum
+ * memory size (64 MiB).
+ * * NE_ERR_INVALID_FLAG_VALUE - The value of the provided flag is invalid.
+ * * NE_ERR_INVALID_ENCLAVE_CID - The provided enclave CID is invalid.
+ * * Error codes from the NE PCI device request.
+ */
+#define NE_START_ENCLAVE _IOWR(0xAE, 0x24, struct ne_enclave_start_info)
+
+/**
+ * DOC: NE specific error codes
+ */
+
+/**
+ * NE_ERR_VCPU_ALREADY_USED - The provided vCPU is already used.
+ */
+#define NE_ERR_VCPU_ALREADY_USED (256)
+/**
+ * NE_ERR_VCPU_NOT_IN_CPU_POOL - The provided vCPU is not available in the
+ * NE CPU pool.
+ */
+#define NE_ERR_VCPU_NOT_IN_CPU_POOL (257)
+/**
+ * NE_ERR_VCPU_INVALID_CPU_CORE - The core id of the provided vCPU is invalid
+ * or out of range of the NE CPU pool.
+ */
+#define NE_ERR_VCPU_INVALID_CPU_CORE (258)
+/**
+ * NE_ERR_INVALID_MEM_REGION_SIZE - The user space memory region size is not
+ * multiple of 2 MiB.
+ */
+#define NE_ERR_INVALID_MEM_REGION_SIZE (259)
+/**
+ * NE_ERR_INVALID_MEM_REGION_ADDR - The user space memory region address range
+ * is invalid.
+ */
+#define NE_ERR_INVALID_MEM_REGION_ADDR (260)
+/**
+ * NE_ERR_UNALIGNED_MEM_REGION_ADDR - The user space memory region address is
+ * not aligned.
+ */
+#define NE_ERR_UNALIGNED_MEM_REGION_ADDR (261)
+/**
+ * NE_ERR_MEM_REGION_ALREADY_USED - The user space memory region is already used.
+ */
+#define NE_ERR_MEM_REGION_ALREADY_USED (262)
+/**
+ * NE_ERR_MEM_NOT_HUGE_PAGE - The user space memory region is not backed by
+ * contiguous physical huge page(s).
+ */
+#define NE_ERR_MEM_NOT_HUGE_PAGE (263)
+/**
+ * NE_ERR_MEM_DIFFERENT_NUMA_NODE - The user space memory region is backed by
+ * pages from different NUMA nodes than the CPUs.
+ */
+#define NE_ERR_MEM_DIFFERENT_NUMA_NODE (264)
+/**
+ * NE_ERR_MEM_MAX_REGIONS - The supported max memory regions per enclaves has
+ * been reached.
+ */
+#define NE_ERR_MEM_MAX_REGIONS (265)
+/**
+ * NE_ERR_NO_MEM_REGIONS_ADDED - The command to start an enclave is triggered
+ * and no memory regions are added.
+ */
+#define NE_ERR_NO_MEM_REGIONS_ADDED (266)
+/**
+ * NE_ERR_NO_VCPUS_ADDED - The command to start an enclave is triggered and no
+ * vCPUs are added.
+ */
+#define NE_ERR_NO_VCPUS_ADDED (267)
+/**
+ * NE_ERR_ENCLAVE_MEM_MIN_SIZE - The enclave memory size is lower than the
+ * minimum supported.
+ */
+#define NE_ERR_ENCLAVE_MEM_MIN_SIZE (268)
+/**
+ * NE_ERR_FULL_CORES_NOT_USED - The command to start an enclave is triggered and
+ * full CPU cores are not set.
+ */
+#define NE_ERR_FULL_CORES_NOT_USED (269)
+/**
+ * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state when setting
+ * resources or triggering start.
+ */
+#define NE_ERR_NOT_IN_INIT_STATE (270)
+/**
+ * NE_ERR_INVALID_VCPU - The provided vCPU is out of range of the available CPUs.
+ */
+#define NE_ERR_INVALID_VCPU (271)
+/**
+ * NE_ERR_NO_CPUS_AVAIL_IN_POOL - The command to create an enclave is triggered
+ * and no CPUs are available in the pool.
+ */
+#define NE_ERR_NO_CPUS_AVAIL_IN_POOL (272)
+/**
+ * NE_ERR_INVALID_PAGE_SIZE - The user space memory region is not backed by pages
+ * multiple of 2 MiB.
+ */
+#define NE_ERR_INVALID_PAGE_SIZE (273)
+/**
+ * NE_ERR_INVALID_FLAG_VALUE - The provided flag value is invalid.
+ */
+#define NE_ERR_INVALID_FLAG_VALUE (274)
+/**
+ * NE_ERR_INVALID_ENCLAVE_CID - The provided enclave CID is invalid, either
+ * being a well-known value or the CID of the
+ * parent / primary VM.
+ */
+#define NE_ERR_INVALID_ENCLAVE_CID (275)
+
+/**
+ * DOC: Image load info flags
+ */
+
+/**
+ * NE_EIF_IMAGE - Enclave Image Format (EIF)
+ */
+#define NE_EIF_IMAGE (0x01)
+
+#define NE_IMAGE_LOAD_MAX_FLAG_VAL (0x02)
+
+/**
+ * struct ne_image_load_info - Info necessary for in-memory enclave image
+ * loading (in / out).
+ * @flags: Flags to determine the enclave image type
+ * (e.g. Enclave Image Format - EIF) (in).
+ * @memory_offset: Offset in enclave memory where to start placing the
+ * enclave image (out).
+ */
+struct ne_image_load_info {
+ __u64 flags;
+ __u64 memory_offset;
+};
+
+/**
+ * DOC: User memory region flags
+ */
+
+/**
+ * NE_DEFAULT_MEMORY_REGION - Memory region for enclave general usage.
+ */
+#define NE_DEFAULT_MEMORY_REGION (0x00)
+
+#define NE_MEMORY_REGION_MAX_FLAG_VAL (0x01)
+
+/**
+ * struct ne_user_memory_region - Memory region to be set for an enclave (in).
+ * @flags: Flags to determine the usage for the memory region (in).
+ * @memory_size: The size, in bytes, of the memory region to be set for
+ * an enclave (in).
+ * @userspace_addr: The start address of the userspace allocated memory of
+ * the memory region to set for an enclave (in).
+ */
+struct ne_user_memory_region {
+ __u64 flags;
+ __u64 memory_size;
+ __u64 userspace_addr;
+};
+
+/**
+ * DOC: Enclave start info flags
+ */
+
+/**
+ * NE_ENCLAVE_PRODUCTION_MODE - Start enclave in production mode.
+ */
+#define NE_ENCLAVE_PRODUCTION_MODE (0x00)
+/**
+ * NE_ENCLAVE_DEBUG_MODE - Start enclave in debug mode.
+ */
+#define NE_ENCLAVE_DEBUG_MODE (0x01)
+
+#define NE_ENCLAVE_START_MAX_FLAG_VAL (0x02)
+
+/**
+ * struct ne_enclave_start_info - Setup info necessary for enclave start (in / out).
+ * @flags: Flags for the enclave to start with (e.g. debug mode) (in).
+ * @enclave_cid: Context ID (CID) for the enclave vsock device. If 0 as
+ * input, the CID is autogenerated by the hypervisor and
+ * returned back as output by the driver (in / out).
+ */
+struct ne_enclave_start_info {
+ __u64 flags;
+ __u64 enclave_cid;
+};
+
+#endif /* _UAPI_LINUX_NITRO_ENCLAVES_H_ */
diff --git a/include/uapi/linux/nl80211-vnd-intel.h b/include/uapi/linux/nl80211-vnd-intel.h
new file mode 100644
index 000000000000..4ed7d0b24512
--- /dev/null
+++ b/include/uapi/linux/nl80211-vnd-intel.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __VENDOR_CMD_INTEL_H__
+#define __VENDOR_CMD_INTEL_H__
+
+#define INTEL_OUI 0x001735
+
+/**
+ * enum iwl_mvm_vendor_cmd - supported vendor commands
+ * @IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO: reports CSME connection info.
+ * @IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP: asks for ownership on the device.
+ * This is useful when the CSME firmware owns the device and the kernel
+ * wants to use it. In case the CSME firmware has no connection active the
+ * kernel will manage on its own to get ownership of the device.
+ * When the CSME firmware has an active connection, the user space
+ * involvement is required. The kernel will assert the RFKILL signal with
+ * the "device not owned" reason so that nobody can touch the device. Then
+ * the user space can run the following flow to be able to get connected
+ * to the very same AP the CSME firmware is currently connected to:
+ *
+ * 1) The user space (NetworkManager) boots and sees that the device is
+ * in RFKILL because the host doesn't own the device
+ * 2) The user space asks the kernel what AP the CSME firmware is
+ * connected to (with %IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO)
+ * 3) The user space checks if it has a profile that matches the reply
+ * from the CSME firmware
+ * 4) The user space installs a network to the wpa_supplicant with a
+ * specific BSSID and a specific frequency
+ * 5) The user space prevents any type of full scan
+ * 6) The user space asks iwlmei to request ownership on the device (with
+ * this command)
+ * 7) iwlmei requests ownership from the CSME firmware
+ * 8) The CSME firmware grants ownership
+ * 9) iwlmei tells iwlwifi to lift the RFKILL
+ * 10) RFKILL OFF is reported to user space
+ * 11) The host boots the device, loads the firwmare, and connects to a
+ * specific BSSID without scanning including IP as fast as it can
+ * 12) The host reports to the CSME firmware that there is a connection
+ * 13) The TCP connection is preserved and the host has connectivity
+ *
+ * @IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT: notifies if roaming is allowed.
+ * It contains a &IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN and a
+ * &IWL_MVM_VENDOR_ATTR_VIF_ADDR attributes.
+ */
+
+enum iwl_mvm_vendor_cmd {
+ IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO = 0x2d,
+ IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP = 0x30,
+ IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT = 0x32,
+};
+
+enum iwl_vendor_auth_akm_mode {
+ IWL_VENDOR_AUTH_OPEN,
+ IWL_VENDOR_AUTH_RSNA = 0x6,
+ IWL_VENDOR_AUTH_RSNA_PSK,
+ IWL_VENDOR_AUTH_SAE = 0x9,
+ IWL_VENDOR_AUTH_MAX,
+};
+
+/**
+ * enum iwl_mvm_vendor_attr - attributes used in vendor commands
+ * @__IWL_MVM_VENDOR_ATTR_INVALID: attribute 0 is invalid
+ * @IWL_MVM_VENDOR_ATTR_VIF_ADDR: interface MAC address
+ * @IWL_MVM_VENDOR_ATTR_ADDR: MAC address
+ * @IWL_MVM_VENDOR_ATTR_SSID: SSID (binary attribute, 0..32 octets)
+ * @IWL_MVM_VENDOR_ATTR_STA_CIPHER: the cipher to use for the station with the
+ * mac address specified in &IWL_MVM_VENDOR_ATTR_ADDR.
+ * @IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN: u8 attribute. Indicates whether
+ * roaming is forbidden or not. Value 1 means roaming is forbidden,
+ * 0 mean roaming is allowed.
+ * @IWL_MVM_VENDOR_ATTR_AUTH_MODE: u32 attribute. Authentication mode type
+ * as specified in &enum iwl_vendor_auth_akm_mode.
+ * @IWL_MVM_VENDOR_ATTR_CHANNEL_NUM: u8 attribute. Contains channel number.
+ * @IWL_MVM_VENDOR_ATTR_BAND: u8 attribute.
+ * 0 for 2.4 GHz band, 1 for 5.2GHz band and 2 for 6GHz band.
+ * @IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL: u32 attribute. Channel number of
+ * collocated AP. Relevant for 6GHz AP info.
+ * @IWL_MVM_VENDOR_ATTR_COLLOC_ADDR: MAC address of a collocated AP.
+ * Relevant for 6GHz AP info.
+ *
+ * @NUM_IWL_MVM_VENDOR_ATTR: number of vendor attributes
+ * @MAX_IWL_MVM_VENDOR_ATTR: highest vendor attribute number
+
+ */
+enum iwl_mvm_vendor_attr {
+ __IWL_MVM_VENDOR_ATTR_INVALID = 0x00,
+ IWL_MVM_VENDOR_ATTR_VIF_ADDR = 0x02,
+ IWL_MVM_VENDOR_ATTR_ADDR = 0x0a,
+ IWL_MVM_VENDOR_ATTR_SSID = 0x3d,
+ IWL_MVM_VENDOR_ATTR_STA_CIPHER = 0x51,
+ IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN = 0x64,
+ IWL_MVM_VENDOR_ATTR_AUTH_MODE = 0x65,
+ IWL_MVM_VENDOR_ATTR_CHANNEL_NUM = 0x66,
+ IWL_MVM_VENDOR_ATTR_BAND = 0x69,
+ IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL = 0x70,
+ IWL_MVM_VENDOR_ATTR_COLLOC_ADDR = 0x71,
+
+ NUM_IWL_MVM_VENDOR_ATTR,
+ MAX_IWL_MVM_VENDOR_ATTR = NUM_IWL_MVM_VENDOR_ATTR - 1,
+};
+
+#endif /* __VENDOR_CMD_INTEL_H__ */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 5eab191607f8..c32e7616a366 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -183,18 +183,27 @@
*
* By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK flag drivers
* can indicate they support offloading EAPOL handshakes for WPA/WPA2
- * preshared key authentication. In %NL80211_CMD_CONNECT the preshared
- * key should be specified using %NL80211_ATTR_PMK. Drivers supporting
- * this offload may reject the %NL80211_CMD_CONNECT when no preshared
- * key material is provided, for example when that driver does not
- * support setting the temporal keys through %CMD_NEW_KEY.
+ * preshared key authentication in station mode. In %NL80211_CMD_CONNECT
+ * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers
+ * supporting this offload may reject the %NL80211_CMD_CONNECT when no
+ * preshared key material is provided, for example when that driver does
+ * not support setting the temporal keys through %NL80211_CMD_NEW_KEY.
*
* Similarly @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X flag can be
* set by drivers indicating offload support of the PTK/GTK EAPOL
- * handshakes during 802.1X authentication. In order to use the offload
- * the %NL80211_CMD_CONNECT should have %NL80211_ATTR_WANT_1X_4WAY_HS
- * attribute flag. Drivers supporting this offload may reject the
- * %NL80211_CMD_CONNECT when the attribute flag is not present.
+ * handshakes during 802.1X authentication in station mode. In order to
+ * use the offload the %NL80211_CMD_CONNECT should have
+ * %NL80211_ATTR_WANT_1X_4WAY_HS attribute flag. Drivers supporting this
+ * offload may reject the %NL80211_CMD_CONNECT when the attribute flag is
+ * not present.
+ *
+ * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK flag drivers
+ * can indicate they support offloading EAPOL handshakes for WPA/WPA2
+ * preshared key authentication in AP mode. In %NL80211_CMD_START_AP
+ * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers
+ * supporting this offload may reject the %NL80211_CMD_START_AP when no
+ * preshared key material is provided, for example when that driver does
+ * not support setting the temporal keys through %NL80211_CMD_NEW_KEY.
*
* For 802.1X the PMK or PMK-R0 are set by providing %NL80211_ATTR_PMK
* using %NL80211_CMD_SET_PMK. For offloaded FT support also
@@ -243,9 +252,13 @@
* DOC: SAE authentication offload
*
* By setting @NL80211_EXT_FEATURE_SAE_OFFLOAD flag drivers can indicate they
- * support offloading SAE authentication for WPA3-Personal networks. In
- * %NL80211_CMD_CONNECT the password for SAE should be specified using
- * %NL80211_ATTR_SAE_PASSWORD.
+ * support offloading SAE authentication for WPA3-Personal networks in station
+ * mode. Similarly @NL80211_EXT_FEATURE_SAE_OFFLOAD_AP flag can be set by
+ * drivers indicating the offload support in AP mode.
+ *
+ * The password for SAE should be specified using %NL80211_ATTR_SAE_PASSWORD in
+ * %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP for station and AP mode
+ * respectively.
*/
/**
@@ -265,6 +278,63 @@
*/
/**
+ * DOC: TID configuration
+ *
+ * TID config support can be checked in the %NL80211_ATTR_TID_CONFIG
+ * attribute given in wiphy capabilities.
+ *
+ * The necessary configuration parameters are mentioned in
+ * &enum nl80211_tid_config_attr and it will be passed to the
+ * %NL80211_CMD_SET_TID_CONFIG command in %NL80211_ATTR_TID_CONFIG.
+ *
+ * If the configuration needs to be applied for specific peer then the MAC
+ * address of the peer needs to be passed in %NL80211_ATTR_MAC, otherwise the
+ * configuration will be applied for all the connected peers in the vif except
+ * any peers that have peer specific configuration for the TID by default; if
+ * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer specific values
+ * will be overwritten.
+ *
+ * All this configuration is valid only for STA's current connection
+ * i.e. the configuration will be reset to default when the STA connects back
+ * after disconnection/roaming, and this configuration will be cleared when
+ * the interface goes down.
+ */
+
+/**
+ * DOC: FILS shared key crypto offload
+ *
+ * This feature is applicable to drivers running in AP mode.
+ *
+ * FILS shared key crypto offload can be advertised by drivers by setting
+ * @NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD flag. The drivers that support
+ * FILS shared key crypto offload should be able to encrypt and decrypt
+ * association frames for FILS shared key authentication as per IEEE 802.11ai.
+ * With this capability, for FILS key derivation, drivers depend on userspace.
+ *
+ * After FILS key derivation, userspace shares the FILS AAD details with the
+ * driver and the driver stores the same to use in decryption of association
+ * request and in encryption of association response. The below parameters
+ * should be given to the driver in %NL80211_CMD_SET_FILS_AAD.
+ * %NL80211_ATTR_MAC - STA MAC address, used for storing FILS AAD per STA
+ * %NL80211_ATTR_FILS_KEK - Used for encryption or decryption
+ * %NL80211_ATTR_FILS_NONCES - Used for encryption or decryption
+ * (STA Nonce 16 bytes followed by AP Nonce 16 bytes)
+ *
+ * Once the association is done, the driver cleans the FILS AAD data.
+ */
+
+/**
+ * DOC: Multi-Link Operation
+ *
+ * In Multi-Link Operation, a connection between to MLDs utilizes multiple
+ * links. To use this in nl80211, various commands and responses now need
+ * to or will include the new %NL80211_ATTR_MLO_LINKS attribute.
+ * Additionally, various commands that need to operate on a specific link
+ * now need to be given the %NL80211_ATTR_MLO_LINK_ID attribute, e.g. to
+ * use %NL80211_CMD_START_AP or similar functions.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -273,13 +343,14 @@
* to get a list of all present wiphys.
* @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or
* %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME,
- * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ (and the
- * attributes determining the channel width; this is used for setting
- * monitor mode channel), %NL80211_ATTR_WIPHY_RETRY_SHORT,
- * %NL80211_ATTR_WIPHY_RETRY_LONG, %NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
- * and/or %NL80211_ATTR_WIPHY_RTS_THRESHOLD.
- * However, for setting the channel, see %NL80211_CMD_SET_CHANNEL
- * instead, the support here is for backward compatibility only.
+ * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ,
+ * %NL80211_ATTR_WIPHY_FREQ_OFFSET (and the attributes determining the
+ * channel width; this is used for setting monitor mode channel),
+ * %NL80211_ATTR_WIPHY_RETRY_SHORT, %NL80211_ATTR_WIPHY_RETRY_LONG,
+ * %NL80211_ATTR_WIPHY_FRAG_THRESHOLD, and/or
+ * %NL80211_ATTR_WIPHY_RTS_THRESHOLD. However, for setting the channel,
+ * see %NL80211_CMD_SET_CHANNEL instead, the support here is for backward
+ * compatibility only.
* @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request
* or rename notification. Has attributes %NL80211_ATTR_WIPHY and
* %NL80211_ATTR_WIPHY_NAME.
@@ -300,17 +371,28 @@
* @NL80211_CMD_DEL_INTERFACE: Virtual interface was deleted, has attributes
* %NL80211_ATTR_IFINDEX and %NL80211_ATTR_WIPHY. Can also be sent from
* userspace to request deletion of a virtual interface, then requires
- * attribute %NL80211_ATTR_IFINDEX.
+ * attribute %NL80211_ATTR_IFINDEX. If multiple BSSID advertisements are
+ * enabled using %NL80211_ATTR_MBSSID_CONFIG, %NL80211_ATTR_MBSSID_ELEMS,
+ * and if this command is used for the transmitting interface, then all
+ * the non-transmitting interfaces are deleted as well.
*
* @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified
- * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC.
+ * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. %NL80211_ATTR_MAC
+ * represents peer's MLD address for MLO pairwise key. For MLO group key,
+ * the link is identified by %NL80211_ATTR_MLO_LINK_ID.
* @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT,
* %NL80211_ATTR_KEY_DEFAULT_MGMT, or %NL80211_ATTR_KEY_THRESHOLD.
+ * For MLO connection, the link to set default key is identified by
+ * %NL80211_ATTR_MLO_LINK_ID.
* @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA,
* %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC, %NL80211_ATTR_KEY_CIPHER,
- * and %NL80211_ATTR_KEY_SEQ attributes.
+ * and %NL80211_ATTR_KEY_SEQ attributes. %NL80211_ATTR_MAC represents
+ * peer's MLD address for MLO pairwise key. The link to add MLO
+ * group key is identified by %NL80211_ATTR_MLO_LINK_ID.
* @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_IDX
- * or %NL80211_ATTR_MAC.
+ * or %NL80211_ATTR_MAC. %NL80211_ATTR_MAC represents peer's MLD address
+ * for MLO pairwise key. The link to delete group key is identified by
+ * %NL80211_ATTR_MLO_LINK_ID.
*
* @NL80211_CMD_GET_BEACON: (not used)
* @NL80211_CMD_SET_BEACON: change the beacon on an access point interface
@@ -328,7 +410,8 @@
* %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_INACTIVITY_TIMEOUT,
* %NL80211_ATTR_ACL_POLICY and %NL80211_ATTR_MAC_ADDRS.
* The channel to use can be set on the interface or be given using the
- * %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel width.
+ * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_WIPHY_FREQ_OFFSET, and the
+ * attributes determining channel width.
* @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP
* @NL80211_CMD_STOP_AP: Stop AP operation on the given interface
* @NL80211_CMD_DEL_BEACON: old alias for %NL80211_CMD_STOP_AP
@@ -338,7 +421,7 @@
* @NL80211_CMD_SET_STATION: Set station attributes for station identified by
* %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_NEW_STATION: Add a station with given attributes to the
- * the interface identified by %NL80211_ATTR_IFINDEX.
+ * interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all stations, on the interface identified
* by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and
@@ -358,7 +441,7 @@
* @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by
* %NL80211_ATTR_MAC.
* @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the
- * the interface identified by %NL80211_ATTR_IFINDEX.
+ * interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all mesh paths, on the interface identified
* by %NL80211_ATTR_IFINDEX.
@@ -513,11 +596,12 @@
* interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and
* BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify
* the SSID (mainly for association, but is included in authentication
- * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used
- * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE
- * is used to specify the authentication type. %NL80211_ATTR_IE is used to
- * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs)
- * to be added to the frame.
+ * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ +
+ * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequence of the
+ * channel in MHz. %NL80211_ATTR_AUTH_TYPE is used to specify the
+ * authentication type. %NL80211_ATTR_IE is used to define IEs
+ * (VendorSpecificInfo, but also including RSN IE and FT IEs) to be added
+ * to the frame.
* When used as an event, this reports reception of an Authentication
* frame in station and IBSS modes when the local MLME processed the
* frame, i.e., it was for the local STA and was received in correct
@@ -572,8 +656,9 @@
* requests to connect to a specified network but without separating
* auth and assoc steps. For this, you need to specify the SSID in a
* %NL80211_ATTR_SSID attribute, and can optionally specify the association
- * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
- * %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
+ * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE,
+ * %NL80211_ATTR_USE_MFP, %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ,
+ * %NL80211_ATTR_WIPHY_FREQ_OFFSET, %NL80211_ATTR_CONTROL_PORT,
* %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
* %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT,
* %NL80211_ATTR_CONTROL_PORT_OVER_NL80211, %NL80211_ATTR_MAC_HINT, and
@@ -611,14 +696,13 @@
* authentication/association or not receiving a response from the AP.
* Non-zero %NL80211_ATTR_STATUS_CODE value is indicated in that case as
* well to remain backwards compatible.
- * When establishing a security association, drivers that support 4 way
- * handshake offload should send %NL80211_CMD_PORT_AUTHORIZED event when
- * the 4 way handshake is completed successfully.
* @NL80211_CMD_ROAM: Notification indicating the card/driver roamed by itself.
- * When a security association was established with the new AP (e.g. if
- * the FT protocol was used for roaming or the driver completed the 4 way
- * handshake), this event should be followed by an
+ * When a security association was established on an 802.1X network using
+ * fast transition, this event should be followed by an
* %NL80211_CMD_PORT_AUTHORIZED event.
+ * Following a %NL80211_CMD_ROAM event userspace can issue
+ * %NL80211_CMD_GET_SCAN in order to obtain the scan information for the
+ * new BSS the card/driver roamed to.
* @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify
* userspace that a connection was dropped by the AP or due to other
* reasons, for this the %NL80211_ATTR_DISCONNECTED_BY_AP and
@@ -664,6 +748,10 @@
* four bytes for vendor frames including the OUI. The registration
* cannot be dropped, but is removed automatically when the netlink
* socket is closed. Multiple registrations can be made.
+ * The %NL80211_ATTR_RECEIVE_MULTICAST flag attribute can be given if
+ * %NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS is available, in which
+ * case the registration can also be modified to include/exclude the
+ * flag, rather than requiring unregistration to change it.
* @NL80211_CMD_REGISTER_ACTION: Alias for @NL80211_CMD_REGISTER_FRAME for
* backward compatibility
* @NL80211_CMD_FRAME: Management frame TX request and RX notification. This
@@ -684,6 +772,13 @@
* %NL80211_ATTR_CSA_C_OFFSETS_TX is an array of offsets to CSA
* counters which will be updated to the current value. This attribute
* is used during CSA period.
+ * For TX on an MLD, the frequency can be omitted and the link ID be
+ * specified, or if transmitting to a known peer MLD (with MLD addresses
+ * in the frame) both can be omitted and the link will be selected by
+ * lower layers.
+ * For RX notification, %NL80211_ATTR_RX_HW_TIMESTAMP may be included to
+ * indicate the frame RX timestamp and %NL80211_ATTR_TX_HW_TIMESTAMP may
+ * be included to indicate the ack TX timestamp.
* @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this
* command may be used with the corresponding cookie to cancel the wait
* time if it is known that it is no longer necessary. This command is
@@ -694,7 +789,9 @@
* transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies
* the TX command and %NL80211_ATTR_FRAME includes the contents of the
* frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged
- * the frame.
+ * the frame. %NL80211_ATTR_TX_HW_TIMESTAMP may be included to indicate the
+ * tx timestamp and %NL80211_ATTR_RX_HW_TIMESTAMP may be included to
+ * indicate the ack RX timestamp.
* @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for
* backward compatibility.
*
@@ -717,7 +814,8 @@
* of any other interfaces, and other interfaces will again take
* precedence when they are used.
*
- * @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface.
+ * @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface
+ * (no longer supported).
*
* @NL80211_CMD_SET_MULTICAST_TO_UNICAST: Configure if this AP should perform
* multicast to unicast conversion. When enabled, all multicast packets
@@ -763,7 +861,7 @@
* various triggers. These triggers can be configured through this
* command with the %NL80211_ATTR_WOWLAN_TRIGGERS attribute. For
* more background information, see
- * http://wireless.kernel.org/en/users/Documentation/WoWLAN.
+ * https://wireless.wiki.kernel.org/en/users/Documentation/WoWLAN.
* The @NL80211_CMD_SET_WOWLAN command can also be used as a notification
* from the driver reporting the wakeup reason. In this case, the
* @NL80211_ATTR_WOWLAN_TRIGGERS attribute will contain the reason
@@ -903,7 +1001,7 @@
* @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules.
*
* @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the
- * the new channel information (Channel Switch Announcement - CSA)
+ * new channel information (Channel Switch Announcement - CSA)
* in the beacon for some time (as defined in the
* %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the
* new channel. Userspace provides the new channel information (using
@@ -1027,19 +1125,23 @@
* @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously
* configured PMK for the authenticator address identified by
* %NL80211_ATTR_MAC.
- * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way
- * handshake was completed successfully by the driver. The BSSID is
- * specified with %NL80211_ATTR_MAC. Drivers that support 4 way handshake
- * offload should send this event after indicating 802.11 association with
- * %NL80211_CMD_CONNECT or %NL80211_CMD_ROAM. If the 4 way handshake failed
- * %NL80211_CMD_DISCONNECT should be indicated instead.
- *
+ * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates an 802.1X FT roam was
+ * completed successfully. Drivers that support 4 way handshake offload
+ * should send this event after indicating 802.1X FT assocation with
+ * %NL80211_CMD_ROAM. If the 4 way handshake failed %NL80211_CMD_DISCONNECT
+ * should be indicated instead.
* @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
* and RX notification. This command is used both as a request to transmit
* a control port frame and as a notification that a control port frame
* has been received. %NL80211_ATTR_FRAME is used to specify the
* frame contents. The frame is the raw EAPoL data, without ethernet or
* 802.11 headers.
+ * For an MLD transmitter, the %NL80211_ATTR_MLO_LINK_ID may be given and
+ * its effect will depend on the destination: If the destination is known
+ * to be an MLD, this will be used as a hint to select the link to transmit
+ * the frame on. If the destination is not an MLD, this will select both
+ * the link to transmit on and the source address will be set to the link
+ * address of that link.
* When used as an event indication %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
* %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT and %NL80211_ATTR_MAC are added
* indicating the protocol type of the received frame; whether the frame
@@ -1082,7 +1184,7 @@
* randomization may be enabled and configured by specifying the
* %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes.
* If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute.
- * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in
+ * A u64 cookie for further %NL80211_ATTR_COOKIE use is returned in
* the netlink extended ack message.
*
* To cancel a measurement, close the socket that requested it.
@@ -1125,6 +1227,60 @@
* peer MAC address and %NL80211_ATTR_FRAME is used to specify the frame
* content. The frame is ethernet data.
*
+ * @NL80211_CMD_SET_TID_CONFIG: Data frame TID specific configuration
+ * is passed using %NL80211_ATTR_TID_CONFIG attribute.
+ *
+ * @NL80211_CMD_UNPROT_BEACON: Unprotected or incorrectly protected Beacon
+ * frame. This event is used to indicate that a received Beacon frame was
+ * dropped because it did not include a valid MME MIC while beacon
+ * protection was enabled (BIGTK configured in station mode).
+ *
+ * @NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS: Report TX status of a control
+ * port frame transmitted with %NL80211_CMD_CONTROL_PORT_FRAME.
+ * %NL80211_ATTR_COOKIE identifies the TX command and %NL80211_ATTR_FRAME
+ * includes the contents of the frame. %NL80211_ATTR_ACK flag is included
+ * if the recipient acknowledged the frame.
+ *
+ * @NL80211_CMD_SET_SAR_SPECS: SAR power limitation configuration is
+ * passed using %NL80211_ATTR_SAR_SPEC. %NL80211_ATTR_WIPHY is used to
+ * specify the wiphy index to be applied to.
+ *
+ * @NL80211_CMD_OBSS_COLOR_COLLISION: This notification is sent out whenever
+ * mac80211/drv detects a bss color collision.
+ *
+ * @NL80211_CMD_COLOR_CHANGE_REQUEST: This command is used to indicate that
+ * userspace wants to change the BSS color.
+ *
+ * @NL80211_CMD_COLOR_CHANGE_STARTED: Notify userland, that a color change has
+ * started
+ *
+ * @NL80211_CMD_COLOR_CHANGE_ABORTED: Notify userland, that the color change has
+ * been aborted
+ *
+ * @NL80211_CMD_COLOR_CHANGE_COMPLETED: Notify userland that the color change
+ * has completed
+ *
+ * @NL80211_CMD_SET_FILS_AAD: Set FILS AAD data to the driver using -
+ * &NL80211_ATTR_MAC - for STA MAC address
+ * &NL80211_ATTR_FILS_KEK - for KEK
+ * &NL80211_ATTR_FILS_NONCES - for FILS Nonces
+ * (STA Nonce 16 bytes followed by AP Nonce 16 bytes)
+ *
+ * @NL80211_CMD_ASSOC_COMEBACK: notification about an association
+ * temporal rejection with comeback. The event includes %NL80211_ATTR_MAC
+ * to describe the BSSID address of the AP and %NL80211_ATTR_TIMEOUT to
+ * specify the timeout value.
+ *
+ * @NL80211_CMD_ADD_LINK: Add a new link to an interface. The
+ * %NL80211_ATTR_MLO_LINK_ID attribute is used for the new link.
+ * @NL80211_CMD_REMOVE_LINK: Remove a link from an interface. This may come
+ * without %NL80211_ATTR_MLO_LINK_ID as an easy way to remove all links
+ * in preparation for e.g. roaming to a regular (non-MLO) AP.
+ *
+ * @NL80211_CMD_ADD_LINK_STA: Add a link to an MLD station
+ * @NL80211_CMD_MODIFY_LINK_STA: Modify a link of an MLD station
+ * @NL80211_CMD_REMOVE_LINK_STA: Remove a link of an MLD station
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1349,6 +1505,33 @@ enum nl80211_commands {
NL80211_CMD_PROBE_MESH_LINK,
+ NL80211_CMD_SET_TID_CONFIG,
+
+ NL80211_CMD_UNPROT_BEACON,
+
+ NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS,
+
+ NL80211_CMD_SET_SAR_SPECS,
+
+ NL80211_CMD_OBSS_COLOR_COLLISION,
+
+ NL80211_CMD_COLOR_CHANGE_REQUEST,
+
+ NL80211_CMD_COLOR_CHANGE_STARTED,
+ NL80211_CMD_COLOR_CHANGE_ABORTED,
+ NL80211_CMD_COLOR_CHANGE_COMPLETED,
+
+ NL80211_CMD_SET_FILS_AAD,
+
+ NL80211_CMD_ASSOC_COMEBACK,
+
+ NL80211_CMD_ADD_LINK,
+ NL80211_CMD_REMOVE_LINK,
+
+ NL80211_CMD_ADD_LINK_STA,
+ NL80211_CMD_MODIFY_LINK_STA,
+ NL80211_CMD_REMOVE_LINK_STA,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1394,7 +1577,8 @@ enum nl80211_commands {
* of &enum nl80211_chan_width, describing the channel width. See the
* documentation of the enum for more information.
* @NL80211_ATTR_CENTER_FREQ1: Center frequency of the first part of the
- * channel, used for anything but 20 MHz bandwidth
+ * channel, used for anything but 20 MHz bandwidth. In S1G this is the
+ * operating channel center frequency.
* @NL80211_ATTR_CENTER_FREQ2: Center frequency of the second part of the
* channel, used only for 80+80 MHz bandwidth
* @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ
@@ -1459,7 +1643,7 @@ enum nl80211_commands {
* rates as defined by IEEE 802.11 7.3.2.2 but without the length
* restriction (at most %NL80211_MAX_SUPP_RATES).
* @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station
- * to, or the AP interface the station was originally added to to.
+ * to, or the AP interface the station was originally added to.
* @NL80211_ATTR_STA_INFO: information about a station, part of station info
* given for %NL80211_CMD_GET_STATION, nested attribute containing
* info as possible, see &enum nl80211_sta_info.
@@ -1604,7 +1788,8 @@ enum nl80211_commands {
* flag is included, then control port frames are sent over NL80211 instead
* using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is
* to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER
- * flag.
+ * flag. When used with %NL80211_ATTR_CONTROL_PORT_NO_PREAUTH, pre-auth
+ * frames are not forwared over the control port.
*
* @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
* We recommend using nested, driver-specific attributes within this.
@@ -1690,8 +1875,9 @@ enum nl80211_commands {
* specify just a single bitrate, which is to be used for the beacon.
* The driver must also specify support for this with the extended
* features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
- * NL80211_EXT_FEATURE_BEACON_RATE_HT and
- * NL80211_EXT_FEATURE_BEACON_RATE_VHT.
+ * NL80211_EXT_FEATURE_BEACON_RATE_HT,
+ * NL80211_EXT_FEATURE_BEACON_RATE_VHT and
+ * NL80211_EXT_FEATURE_BEACON_RATE_HE.
*
* @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
* at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -1895,8 +2081,15 @@ enum nl80211_commands {
* @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire
* probe-response frame. The DA field in the 802.11 header is zero-ed out,
* to be filled by the FW.
- * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable
- * this feature. Currently, only supported in mac80211 drivers.
+ * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable
+ * this feature during association. This is a flag attribute.
+ * Currently only supported in mac80211 drivers.
+ * @NL80211_ATTR_DISABLE_VHT: Force VHT capable interfaces to disable
+ * this feature during association. This is a flag attribute.
+ * Currently only supported in mac80211 drivers.
+ * @NL80211_ATTR_DISABLE_HE: Force HE capable interfaces to disable
+ * this feature during association. This is a flag attribute.
+ * Currently only supported in mac80211 drivers.
* @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the
* ATTR_HT_CAPABILITY to which attention should be paid.
* Currently, only mac80211 NICs support this feature.
@@ -2017,13 +2210,14 @@ enum nl80211_commands {
* until the channel switch event.
* @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission
* must be blocked on the current channel (before the channel switch
- * operation).
+ * operation). Also included in the channel switch started event if quiet
+ * was requested by the AP.
* @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
* for the time while performing a channel switch.
- * @NL80211_ATTR_CSA_C_OFF_BEACON: An array of offsets (u16) to the channel
- * switch counters in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
- * @NL80211_ATTR_CSA_C_OFF_PRESP: An array of offsets (u16) to the channel
- * switch counters in the probe response (%NL80211_ATTR_PROBE_RESP).
+ * @NL80211_ATTR_CNTDWN_OFFS_BEACON: An array of offsets (u16) to the channel
+ * switch or color change counters in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
+ * @NL80211_ATTR_CNTDWN_OFFS_PRESP: An array of offsets (u16) to the channel
+ * switch or color change counters in the probe response (%NL80211_ATTR_PROBE_RESP).
*
* @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
* As specified in the &enum nl80211_rxmgmt_flags.
@@ -2031,7 +2225,7 @@ enum nl80211_commands {
* @NL80211_ATTR_STA_SUPPORTED_CHANNELS: array of supported channels.
*
* @NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES: array of supported
- * supported operating classes.
+ * operating classes.
*
* @NL80211_ATTR_HANDLE_DFS: A flag indicating whether user space
* controls DFS operation in IBSS mode. If the flag is included in
@@ -2197,8 +2391,10 @@ enum nl80211_commands {
*
* @NL80211_ATTR_IFTYPE_EXT_CAPA: Nested attribute of the following attributes:
* %NL80211_ATTR_IFTYPE, %NL80211_ATTR_EXT_CAPA,
- * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities per
- * interface type.
+ * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities and
+ * other interface-type specific capabilities per interface type. For MLO,
+ * %NL80211_ATTR_EML_CAPABILITY and %NL80211_ATTR_MLD_CAPA_AND_OPS are
+ * present.
*
* @NL80211_ATTR_MU_MIMO_GROUP_DATA: array of 24 bytes that defines a MU-MIMO
* groupID for monitor mode.
@@ -2309,10 +2505,11 @@ enum nl80211_commands {
*
* @NL80211_ATTR_PMK: attribute for passing PMK key material. Used with
* %NL80211_CMD_SET_PMKSA for the PMKSA identified by %NL80211_ATTR_PMKID.
- * For %NL80211_CMD_CONNECT it is used to provide PSK for offloading 4-way
- * handshake for WPA/WPA2-PSK networks. For 802.1X authentication it is
- * used with %NL80211_CMD_SET_PMK. For offloaded FT support this attribute
- * specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME is included as well.
+ * For %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP it is used to provide
+ * PSK for offloading 4-way handshake for WPA/WPA2-PSK networks. For 802.1X
+ * authentication it is used with %NL80211_CMD_SET_PMK. For offloaded FT
+ * support this attribute specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME
+ * is included as well.
*
* @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to
* indicate that it supports multiple active scheduled scan requests.
@@ -2333,7 +2530,9 @@ enum nl80211_commands {
* space supports external authentication. This attribute shall be used
* with %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP request. The driver
* may offload authentication processing to user space if this capability
- * is indicated in the respective requests from the user space.
+ * is indicated in the respective requests from the user space. (This flag
+ * attribute deprecated for %NL80211_CMD_START_AP, use
+ * %NL80211_ATTR_AP_SETTINGS_FLAGS)
*
* @NL80211_ATTR_NSS: Station's New/updated RX_NSS value notified using this
* u8 attribute. This is used with %NL80211_CMD_STA_OPMODE_CHANGED.
@@ -2342,7 +2541,7 @@ enum nl80211_commands {
* nl80211_txq_stats)
* @NL80211_ATTR_TXQ_LIMIT: Total packet limit for the TXQ queues for this phy.
* The smaller of this and the memory limit is enforced.
- * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory memory limit (in bytes) for the
+ * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory limit (in bytes) for the
* TXQ queues for this phy. The smaller of this and the packet limit is
* enforced.
* @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes
@@ -2400,6 +2599,156 @@ enum nl80211_commands {
* @NL80211_ATTR_VLAN_ID: VLAN ID (1..4094) for the station and VLAN group key
* (u16).
*
+ * @NL80211_ATTR_HE_BSS_COLOR: nested attribute for BSS Color Settings.
+ *
+ * @NL80211_ATTR_IFTYPE_AKM_SUITES: nested array attribute, with each entry
+ * using attributes from &enum nl80211_iftype_akm_attributes. This
+ * attribute is sent in a response to %NL80211_CMD_GET_WIPHY indicating
+ * supported AKM suites capability per interface. AKMs advertised in
+ * %NL80211_ATTR_AKM_SUITES are default capabilities if AKM suites not
+ * advertised for a specific interface type.
+ *
+ * @NL80211_ATTR_TID_CONFIG: TID specific configuration in a
+ * nested attribute with &enum nl80211_tid_config_attr sub-attributes;
+ * on output (in wiphy attributes) it contains only the feature sub-
+ * attributes.
+ *
+ * @NL80211_ATTR_CONTROL_PORT_NO_PREAUTH: disable preauth frame rx on control
+ * port in order to forward/receive them as ordinary data frames.
+ *
+ * @NL80211_ATTR_PMK_LIFETIME: Maximum lifetime for PMKSA in seconds (u32,
+ * dot11RSNAConfigPMKReauthThreshold; 0 is not a valid value).
+ * An optional parameter configured through %NL80211_CMD_SET_PMKSA.
+ * Drivers that trigger roaming need to know the lifetime of the
+ * configured PMKSA for triggering the full vs. PMKSA caching based
+ * authentication. This timeout helps authentication methods like SAE,
+ * where PMK gets updated only by going through a full (new SAE)
+ * authentication instead of getting updated during an association for EAP
+ * authentication. No new full authentication within the PMK expiry shall
+ * result in a disassociation at the end of the lifetime.
+ *
+ * @NL80211_ATTR_PMK_REAUTH_THRESHOLD: Reauthentication threshold time, in
+ * terms of percentage of %NL80211_ATTR_PMK_LIFETIME
+ * (u8, dot11RSNAConfigPMKReauthThreshold, 1..100). This is an optional
+ * parameter configured through %NL80211_CMD_SET_PMKSA. Requests the
+ * driver to trigger a full authentication roam (without PMKSA caching)
+ * after the reauthentication threshold time, but before the PMK lifetime
+ * has expired.
+ *
+ * Authentication methods like SAE need to be able to generate a new PMKSA
+ * entry without having to force a disconnection after the PMK timeout. If
+ * no roaming occurs between the reauth threshold and PMK expiration,
+ * disassociation is still forced.
+ * @NL80211_ATTR_RECEIVE_MULTICAST: multicast flag for the
+ * %NL80211_CMD_REGISTER_FRAME command, see the description there.
+ * @NL80211_ATTR_WIPHY_FREQ_OFFSET: offset of the associated
+ * %NL80211_ATTR_WIPHY_FREQ in positive KHz. Only valid when supplied with
+ * an %NL80211_ATTR_WIPHY_FREQ_OFFSET.
+ * @NL80211_ATTR_CENTER_FREQ1_OFFSET: Center frequency offset in KHz for the
+ * first channel segment specified in %NL80211_ATTR_CENTER_FREQ1.
+ * @NL80211_ATTR_SCAN_FREQ_KHZ: nested attribute with KHz frequencies
+ *
+ * @NL80211_ATTR_HE_6GHZ_CAPABILITY: HE 6 GHz Band Capability element (from
+ * association request when used with NL80211_CMD_NEW_STATION).
+ *
+ * @NL80211_ATTR_FILS_DISCOVERY: Optional parameter to configure FILS
+ * discovery. It is a nested attribute, see
+ * &enum nl80211_fils_discovery_attributes.
+ *
+ * @NL80211_ATTR_UNSOL_BCAST_PROBE_RESP: Optional parameter to configure
+ * unsolicited broadcast probe response. It is a nested attribute, see
+ * &enum nl80211_unsol_bcast_probe_resp_attributes.
+ *
+ * @NL80211_ATTR_S1G_CAPABILITY: S1G Capability information element (from
+ * association request when used with NL80211_CMD_NEW_STATION)
+ * @NL80211_ATTR_S1G_CAPABILITY_MASK: S1G Capability Information element
+ * override mask. Used with NL80211_ATTR_S1G_CAPABILITY in
+ * NL80211_CMD_ASSOCIATE or NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_SAE_PWE: Indicates the mechanism(s) allowed for SAE PWE
+ * derivation in WPA3-Personal networks which are using SAE authentication.
+ * This is a u8 attribute that encapsulates one of the values from
+ * &enum nl80211_sae_pwe_mechanism.
+ *
+ * @NL80211_ATTR_SAR_SPEC: SAR power limitation specification when
+ * used with %NL80211_CMD_SET_SAR_SPECS. The message contains fields
+ * of %nl80211_sar_attrs which specifies the sar type and related
+ * sar specs. Sar specs contains array of %nl80211_sar_specs_attrs.
+ *
+ * @NL80211_ATTR_RECONNECT_REQUESTED: flag attribute, used with deauth and
+ * disassoc events to indicate that an immediate reconnect to the AP
+ * is desired.
+ *
+ * @NL80211_ATTR_OBSS_COLOR_BITMAP: bitmap of the u64 BSS colors for the
+ * %NL80211_CMD_OBSS_COLOR_COLLISION event.
+ *
+ * @NL80211_ATTR_COLOR_CHANGE_COUNT: u8 attribute specifying the number of TBTT's
+ * until the color switch event.
+ * @NL80211_ATTR_COLOR_CHANGE_COLOR: u8 attribute specifying the color that we are
+ * switching to
+ * @NL80211_ATTR_COLOR_CHANGE_ELEMS: Nested set of attributes containing the IE
+ * information for the time while performing a color switch.
+ *
+ * @NL80211_ATTR_MBSSID_CONFIG: Nested attribute for multiple BSSID
+ * advertisements (MBSSID) parameters in AP mode.
+ * Kernel uses this attribute to indicate the driver's support for MBSSID
+ * and enhanced multi-BSSID advertisements (EMA AP) to the userspace.
+ * Userspace should use this attribute to configure per interface MBSSID
+ * parameters.
+ * See &enum nl80211_mbssid_config_attributes for details.
+ *
+ * @NL80211_ATTR_MBSSID_ELEMS: Nested parameter to pass multiple BSSID elements.
+ * Mandatory parameter for the transmitting interface to enable MBSSID.
+ * Optional for the non-transmitting interfaces.
+ *
+ * @NL80211_ATTR_RADAR_BACKGROUND: Configure dedicated offchannel chain
+ * available for radar/CAC detection on some hw. This chain can't be used
+ * to transmit or receive frames and it is bounded to a running wdev.
+ * Background radar/CAC detection allows to avoid the CAC downtime
+ * switching on a different channel during CAC detection on the selected
+ * radar channel.
+ *
+ * @NL80211_ATTR_AP_SETTINGS_FLAGS: u32 attribute contains ap settings flags,
+ * enumerated in &enum nl80211_ap_settings_flags. This attribute shall be
+ * used with %NL80211_CMD_START_AP request.
+ *
+ * @NL80211_ATTR_EHT_CAPABILITY: EHT Capability information element (from
+ * association request when used with NL80211_CMD_NEW_STATION). Can be set
+ * only if %NL80211_STA_FLAG_WME is set.
+ *
+ * @NL80211_ATTR_MLO_LINK_ID: A (u8) link ID for use with MLO, to be used with
+ * various commands that need a link ID to operate.
+ * @NL80211_ATTR_MLO_LINKS: A nested array of links, each containing some
+ * per-link information and a link ID.
+ * @NL80211_ATTR_MLD_ADDR: An MLD address, used with various commands such as
+ * authenticate/associate.
+ *
+ * @NL80211_ATTR_MLO_SUPPORT: Flag attribute to indicate user space supports MLO
+ * connection. Used with %NL80211_CMD_CONNECT. If this attribute is not
+ * included in NL80211_CMD_CONNECT drivers must not perform MLO connection.
+ *
+ * @NL80211_ATTR_MAX_NUM_AKM_SUITES: U16 attribute. Indicates maximum number of
+ * AKM suites allowed for %NL80211_CMD_CONNECT, %NL80211_CMD_ASSOCIATE and
+ * %NL80211_CMD_START_AP in %NL80211_CMD_GET_WIPHY response. If this
+ * attribute is not present userspace shall consider maximum number of AKM
+ * suites allowed as %NL80211_MAX_NR_AKM_SUITES which is the legacy maximum
+ * number prior to the introduction of this attribute.
+ *
+ * @NL80211_ATTR_EML_CAPABILITY: EML Capability information (u16)
+ * @NL80211_ATTR_MLD_CAPA_AND_OPS: MLD Capabilities and Operations (u16)
+ *
+ * @NL80211_ATTR_TX_HW_TIMESTAMP: Hardware timestamp for TX operation in
+ * nanoseconds (u64). This is the device clock timestamp so it will
+ * probably reset when the device is stopped or the firmware is reset.
+ * When used with %NL80211_CMD_FRAME_TX_STATUS, indicates the frame TX
+ * timestamp. When used with %NL80211_CMD_FRAME RX notification, indicates
+ * the ack TX timestamp.
+ * @NL80211_ATTR_RX_HW_TIMESTAMP: Hardware timestamp for RX operation in
+ * nanoseconds (u64). This is the device clock timestamp so it will
+ * probably reset when the device is stopped or the firmware is reset.
+ * When used with %NL80211_CMD_FRAME_TX_STATUS, indicates the ack RX
+ * timestamp. When used with %NL80211_CMD_FRAME RX notification, indicates
+ * the incoming frame RX timestamp.
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2706,8 +3055,8 @@ enum nl80211_attrs {
NL80211_ATTR_CH_SWITCH_COUNT,
NL80211_ATTR_CH_SWITCH_BLOCK_TX,
NL80211_ATTR_CSA_IES,
- NL80211_ATTR_CSA_C_OFF_BEACON,
- NL80211_ATTR_CSA_C_OFF_PRESP,
+ NL80211_ATTR_CNTDWN_OFFS_BEACON,
+ NL80211_ATTR_CNTDWN_OFFS_PRESP,
NL80211_ATTR_RXMGMT_FLAGS,
@@ -2864,6 +3213,70 @@ enum nl80211_attrs {
NL80211_ATTR_VLAN_ID,
+ NL80211_ATTR_HE_BSS_COLOR,
+
+ NL80211_ATTR_IFTYPE_AKM_SUITES,
+
+ NL80211_ATTR_TID_CONFIG,
+
+ NL80211_ATTR_CONTROL_PORT_NO_PREAUTH,
+
+ NL80211_ATTR_PMK_LIFETIME,
+ NL80211_ATTR_PMK_REAUTH_THRESHOLD,
+
+ NL80211_ATTR_RECEIVE_MULTICAST,
+ NL80211_ATTR_WIPHY_FREQ_OFFSET,
+ NL80211_ATTR_CENTER_FREQ1_OFFSET,
+ NL80211_ATTR_SCAN_FREQ_KHZ,
+
+ NL80211_ATTR_HE_6GHZ_CAPABILITY,
+
+ NL80211_ATTR_FILS_DISCOVERY,
+
+ NL80211_ATTR_UNSOL_BCAST_PROBE_RESP,
+
+ NL80211_ATTR_S1G_CAPABILITY,
+ NL80211_ATTR_S1G_CAPABILITY_MASK,
+
+ NL80211_ATTR_SAE_PWE,
+
+ NL80211_ATTR_RECONNECT_REQUESTED,
+
+ NL80211_ATTR_SAR_SPEC,
+
+ NL80211_ATTR_DISABLE_HE,
+
+ NL80211_ATTR_OBSS_COLOR_BITMAP,
+
+ NL80211_ATTR_COLOR_CHANGE_COUNT,
+ NL80211_ATTR_COLOR_CHANGE_COLOR,
+ NL80211_ATTR_COLOR_CHANGE_ELEMS,
+
+ NL80211_ATTR_MBSSID_CONFIG,
+ NL80211_ATTR_MBSSID_ELEMS,
+
+ NL80211_ATTR_RADAR_BACKGROUND,
+
+ NL80211_ATTR_AP_SETTINGS_FLAGS,
+
+ NL80211_ATTR_EHT_CAPABILITY,
+
+ NL80211_ATTR_DISABLE_EHT,
+
+ NL80211_ATTR_MLO_LINKS,
+ NL80211_ATTR_MLO_LINK_ID,
+ NL80211_ATTR_MLD_ADDR,
+
+ NL80211_ATTR_MLO_SUPPORT,
+
+ NL80211_ATTR_MAX_NUM_AKM_SUITES,
+
+ NL80211_ATTR_EML_CAPABILITY,
+ NL80211_ATTR_MLD_CAPA_AND_OPS,
+
+ NL80211_ATTR_TX_HW_TIMESTAMP,
+ NL80211_ATTR_RX_HW_TIMESTAMP,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2876,6 +3289,8 @@ enum nl80211_attrs {
#define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
#define NL80211_ATTR_SAE_DATA NL80211_ATTR_AUTH_DATA
+#define NL80211_ATTR_CSA_C_OFF_BEACON NL80211_ATTR_CNTDWN_OFFS_BEACON
+#define NL80211_ATTR_CSA_C_OFF_PRESP NL80211_ATTR_CNTDWN_OFFS_PRESP
/*
* Allow user space programs to use #ifdef on new attributes by defining them
@@ -2916,7 +3331,14 @@ enum nl80211_attrs {
#define NL80211_HE_MIN_CAPABILITY_LEN 16
#define NL80211_HE_MAX_CAPABILITY_LEN 54
#define NL80211_MAX_NR_CIPHER_SUITES 5
+
+/*
+ * NL80211_MAX_NR_AKM_SUITES is obsolete when %NL80211_ATTR_MAX_NUM_AKM_SUITES
+ * present in %NL80211_CMD_GET_WIPHY response.
+ */
#define NL80211_MAX_NR_AKM_SUITES 2
+#define NL80211_EHT_MIN_CAPABILITY_LEN 13
+#define NL80211_EHT_MAX_CAPABILITY_LEN 51
#define NL80211_MIN_REMAIN_ON_CHANNEL_TIME 10
@@ -2944,7 +3366,7 @@ enum nl80211_attrs {
* and therefore can't be created in the normal ways, use the
* %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
* commands to create and destroy one
- * @NL80211_IF_TYPE_OCB: Outside Context of a BSS
+ * @NL80211_IFTYPE_OCB: Outside Context of a BSS
* This mode corresponds to the MIB variable dot11OCBActivated=true
* @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev)
* @NL80211_IFTYPE_MAX: highest interface type number currently defined
@@ -3054,6 +3476,18 @@ enum nl80211_he_gi {
};
/**
+ * enum nl80211_he_ltf - HE long training field
+ * @NL80211_RATE_INFO_HE_1xLTF: 3.2 usec
+ * @NL80211_RATE_INFO_HE_2xLTF: 6.4 usec
+ * @NL80211_RATE_INFO_HE_4xLTF: 12.8 usec
+ */
+enum nl80211_he_ltf {
+ NL80211_RATE_INFO_HE_1XLTF,
+ NL80211_RATE_INFO_HE_2XLTF,
+ NL80211_RATE_INFO_HE_4XLTF,
+};
+
+/**
* enum nl80211_he_ru_alloc - HE RU allocation values
* @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation
* @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation
@@ -3074,6 +3508,56 @@ enum nl80211_he_ru_alloc {
};
/**
+ * enum nl80211_eht_gi - EHT guard interval
+ * @NL80211_RATE_INFO_EHT_GI_0_8: 0.8 usec
+ * @NL80211_RATE_INFO_EHT_GI_1_6: 1.6 usec
+ * @NL80211_RATE_INFO_EHT_GI_3_2: 3.2 usec
+ */
+enum nl80211_eht_gi {
+ NL80211_RATE_INFO_EHT_GI_0_8,
+ NL80211_RATE_INFO_EHT_GI_1_6,
+ NL80211_RATE_INFO_EHT_GI_3_2,
+};
+
+/**
+ * enum nl80211_eht_ru_alloc - EHT RU allocation values
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_26: 26-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_52: 52-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_52P26: 52+26-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_106: 106-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_106P26: 106+26 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_242: 242-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_484: 484-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_484P242: 484+242 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_996: 996-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484: 996+484 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242: 996+484+242 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996: 2x996-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484: 2x996+484 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996: 3x996-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484: 3x996+484 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_4x996: 4x996-tone RU allocation
+ */
+enum nl80211_eht_ru_alloc {
+ NL80211_RATE_INFO_EHT_RU_ALLOC_26,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_52,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_52P26,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_106,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_106P26,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_242,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_484P242,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_996,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_996P484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_2x996,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_3x996,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_4x996,
+};
+
+/**
* enum nl80211_rate_info - bitrate information
*
* These attribute types are used with %NL80211_STA_INFO_TXRATE
@@ -3112,6 +3596,13 @@ enum nl80211_he_ru_alloc {
* @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1)
* @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then
* non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc)
+ * @NL80211_RATE_INFO_320_MHZ_WIDTH: 320 MHz bitrate
+ * @NL80211_RATE_INFO_EHT_MCS: EHT MCS index (u8, 0-15)
+ * @NL80211_RATE_INFO_EHT_NSS: EHT NSS value (u8, 1-8)
+ * @NL80211_RATE_INFO_EHT_GI: EHT guard interval identifier
+ * (u8, see &enum nl80211_eht_gi)
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC: EHT RU allocation, if not present then
+ * non-OFDMA was used (u8, see &enum nl80211_eht_ru_alloc)
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -3133,6 +3624,11 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_HE_GI,
NL80211_RATE_INFO_HE_DCM,
NL80211_RATE_INFO_HE_RU_ALLOC,
+ NL80211_RATE_INFO_320_MHZ_WIDTH,
+ NL80211_RATE_INFO_EHT_MCS,
+ NL80211_RATE_INFO_EHT_NSS,
+ NL80211_RATE_INFO_EHT_GI,
+ NL80211_RATE_INFO_EHT_RU_ALLOC,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -3247,6 +3743,8 @@ enum nl80211_sta_bss_param {
* @NL80211_STA_INFO_AIRTIME_LINK_METRIC: airtime link metric for mesh station
* @NL80211_STA_INFO_ASSOC_AT_BOOTTIME: Timestamp (CLOCK_BOOTTIME, nanoseconds)
* of STA's association
+ * @NL80211_STA_INFO_CONNECTED_TO_AS: set to true if STA has a path to a
+ * authentication server (u8, 0 or 1)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3294,6 +3792,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_AIRTIME_WEIGHT,
NL80211_STA_INFO_AIRTIME_LINK_METRIC,
NL80211_STA_INFO_ASSOC_AT_BOOTTIME,
+ NL80211_STA_INFO_CONNECTED_TO_AS,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -3440,9 +3939,20 @@ enum nl80211_mpath_info {
* capabilities IE
* @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as
* defined in HE capabilities IE
- * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently
- * defined
+ * @NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA: HE 6GHz band capabilities (__le16),
+ * given for all 6 GHz band channels
+ * @NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS: vendor element capabilities that are
+ * advertised on this band/for this iftype (binary)
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC: EHT MAC capabilities as in EHT
+ * capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY: EHT PHY capabilities as in EHT
+ * capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET: EHT supported NSS/MCS as in EHT
+ * capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE: EHT PPE thresholds information as
+ * defined in EHT capabilities element
* @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
+ * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band attribute currently defined
*/
enum nl80211_band_iftype_attr {
__NL80211_BAND_IFTYPE_ATTR_INVALID,
@@ -3452,6 +3962,12 @@ enum nl80211_band_iftype_attr {
NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY,
NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
+ NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA,
+ NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS,
+ NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC,
+ NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY,
+ NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET,
+ NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE,
/* keep last */
__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
@@ -3583,6 +4099,23 @@ enum nl80211_wmm_rule {
* @NL80211_FREQUENCY_ATTR_WMM: this channel has wmm limitations.
* This is a nested attribute that contains the wmm limitation per AC.
* (see &enum nl80211_wmm_rule)
+ * @NL80211_FREQUENCY_ATTR_NO_HE: HE operation is not allowed on this channel
+ * in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_OFFSET: frequency offset in KHz
+ * @NL80211_FREQUENCY_ATTR_1MHZ: 1 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_2MHZ: 2 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_4MHZ: 4 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_8MHZ: 8 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_16MHZ: 16 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_NO_320MHZ: any 320 MHz channel using this channel
+ * as the primary or any of the secondary channels isn't possible
+ * @NL80211_FREQUENCY_ATTR_NO_EHT: EHT operation is not allowed on this channel
+ * in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -3612,6 +4145,15 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_20MHZ,
NL80211_FREQUENCY_ATTR_NO_10MHZ,
NL80211_FREQUENCY_ATTR_WMM,
+ NL80211_FREQUENCY_ATTR_NO_HE,
+ NL80211_FREQUENCY_ATTR_OFFSET,
+ NL80211_FREQUENCY_ATTR_1MHZ,
+ NL80211_FREQUENCY_ATTR_2MHZ,
+ NL80211_FREQUENCY_ATTR_4MHZ,
+ NL80211_FREQUENCY_ATTR_8MHZ,
+ NL80211_FREQUENCY_ATTR_16MHZ,
+ NL80211_FREQUENCY_ATTR_NO_320MHZ,
+ NL80211_FREQUENCY_ATTR_NO_EHT,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -3809,6 +4351,8 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
* @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
* @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed
+ * @NL80211_RRF_NO_HE: HE operation not allowed
+ * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -3826,6 +4370,8 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_HT40PLUS = 1<<14,
NL80211_RRF_NO_80MHZ = 1<<15,
NL80211_RRF_NO_160MHZ = 1<<16,
+ NL80211_RRF_NO_HE = 1<<17,
+ NL80211_RRF_NO_320MHZ = 1<<18,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -3903,6 +4449,7 @@ enum nl80211_user_reg_hint_type {
* receiving frames destined to the local BSS
* @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
* currently defined
+ * @NL80211_SURVEY_INFO_FREQUENCY_OFFSET: center frequency offset in KHz
* @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
*/
enum nl80211_survey_info {
@@ -3918,6 +4465,7 @@ enum nl80211_survey_info {
NL80211_SURVEY_INFO_TIME_SCAN,
NL80211_SURVEY_INFO_PAD,
NL80211_SURVEY_INFO_TIME_BSS_RX,
+ NL80211_SURVEY_INFO_FREQUENCY_OFFSET,
/* keep last */
__NL80211_SURVEY_INFO_AFTER_LAST,
@@ -4103,6 +4651,16 @@ enum nl80211_mesh_power_mode {
* field. If left unset then the mesh formation field will only
* advertise such if there is an active root mesh path.
*
+ * @NL80211_MESHCONF_NOLEARN: Try to avoid multi-hop path discovery (e.g.
+ * PREQ/PREP for HWMP) if the destination is a direct neighbor. Note that
+ * this might not be the optimal decision as a multi-hop route might be
+ * better. So if using this setting you will likely also want to disable
+ * dot11MeshForwarding and use another mesh routing protocol on top.
+ *
+ * @NL80211_MESHCONF_CONNECTED_TO_AS: If set to true then this mesh STA
+ * will advertise that it is connected to a authentication server
+ * in the mesh formation field.
+ *
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
enum nl80211_meshconf_params {
@@ -4136,6 +4694,8 @@ enum nl80211_meshconf_params {
NL80211_MESHCONF_AWAKE_WINDOW,
NL80211_MESHCONF_PLINK_TIMEOUT,
NL80211_MESHCONF_CONNECTED_TO_GATE,
+ NL80211_MESHCONF_NOLEARN,
+ NL80211_MESHCONF_CONNECTED_TO_AS,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -4304,6 +4864,13 @@ enum nl80211_key_mode {
* attribute must be provided as well
* @NL80211_CHAN_WIDTH_5: 5 MHz OFDM channel
* @NL80211_CHAN_WIDTH_10: 10 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_1: 1 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_2: 2 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_4: 4 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_8: 8 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_16: 16 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_320: 320 MHz channel, the %NL80211_ATTR_CENTER_FREQ1
+ * attribute must be provided as well
*/
enum nl80211_chan_width {
NL80211_CHAN_WIDTH_20_NOHT,
@@ -4314,6 +4881,12 @@ enum nl80211_chan_width {
NL80211_CHAN_WIDTH_160,
NL80211_CHAN_WIDTH_5,
NL80211_CHAN_WIDTH_10,
+ NL80211_CHAN_WIDTH_1,
+ NL80211_CHAN_WIDTH_2,
+ NL80211_CHAN_WIDTH_4,
+ NL80211_CHAN_WIDTH_8,
+ NL80211_CHAN_WIDTH_16,
+ NL80211_CHAN_WIDTH_320,
};
/**
@@ -4324,11 +4897,15 @@ enum nl80211_chan_width {
* @NL80211_BSS_CHAN_WIDTH_20: control channel is 20 MHz wide or compatible
* @NL80211_BSS_CHAN_WIDTH_10: control channel is 10 MHz wide
* @NL80211_BSS_CHAN_WIDTH_5: control channel is 5 MHz wide
+ * @NL80211_BSS_CHAN_WIDTH_1: control channel is 1 MHz wide
+ * @NL80211_BSS_CHAN_WIDTH_2: control channel is 2 MHz wide
*/
enum nl80211_bss_scan_width {
NL80211_BSS_CHAN_WIDTH_20,
NL80211_BSS_CHAN_WIDTH_10,
NL80211_BSS_CHAN_WIDTH_5,
+ NL80211_BSS_CHAN_WIDTH_1,
+ NL80211_BSS_CHAN_WIDTH_2,
};
/**
@@ -4380,6 +4957,9 @@ enum nl80211_bss_scan_width {
* @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update.
* Contains a nested array of signal strength attributes (u8, dBm),
* using the nesting index as the antenna number.
+ * @NL80211_BSS_FREQUENCY_OFFSET: frequency offset in KHz
+ * @NL80211_BSS_MLO_LINK_ID: MLO link ID of the BSS (u8).
+ * @NL80211_BSS_MLD_ADDR: MLD address of this BSS if connected to it.
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -4404,6 +4984,9 @@ enum nl80211_bss {
NL80211_BSS_PARENT_TSF,
NL80211_BSS_PARENT_BSSID,
NL80211_BSS_CHAIN_SIGNAL,
+ NL80211_BSS_FREQUENCY_OFFSET,
+ NL80211_BSS_MLO_LINK_ID,
+ NL80211_BSS_MLD_ADDR,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -4532,6 +5115,7 @@ enum nl80211_key_default_types {
* See &enum nl80211_key_default_types.
* @NL80211_KEY_MODE: the mode from enum nl80211_key_mode.
* Defaults to @NL80211_KEY_RX_TX.
+ * @NL80211_KEY_DEFAULT_BEACON: flag indicating default Beacon frame key
*
* @__NL80211_KEY_AFTER_LAST: internal
* @NL80211_KEY_MAX: highest key attribute
@@ -4547,6 +5131,7 @@ enum nl80211_key_attributes {
NL80211_KEY_TYPE,
NL80211_KEY_DEFAULT_TYPES,
NL80211_KEY_MODE,
+ NL80211_KEY_DEFAULT_BEACON,
/* keep last */
__NL80211_KEY_AFTER_LAST,
@@ -4565,6 +5150,10 @@ enum nl80211_key_attributes {
* @NL80211_TXRATE_VHT: VHT rates allowed for TX rate selection,
* see &struct nl80211_txrate_vht
* @NL80211_TXRATE_GI: configure GI, see &enum nl80211_txrate_gi
+ * @NL80211_TXRATE_HE: HE rates allowed for TX rate selection,
+ * see &struct nl80211_txrate_he
+ * @NL80211_TXRATE_HE_GI: configure HE GI, 0.8us, 1.6us and 3.2us.
+ * @NL80211_TXRATE_HE_LTF: configure HE LTF, 1XLTF, 2XLTF and 4XLTF.
* @__NL80211_TXRATE_AFTER_LAST: internal
* @NL80211_TXRATE_MAX: highest TX rate attribute
*/
@@ -4574,6 +5163,9 @@ enum nl80211_tx_rate_attributes {
NL80211_TXRATE_HT,
NL80211_TXRATE_VHT,
NL80211_TXRATE_GI,
+ NL80211_TXRATE_HE,
+ NL80211_TXRATE_HE_GI,
+ NL80211_TXRATE_HE_LTF,
/* keep last */
__NL80211_TXRATE_AFTER_LAST,
@@ -4591,6 +5183,15 @@ struct nl80211_txrate_vht {
__u16 mcs[NL80211_VHT_NSS_MAX];
};
+#define NL80211_HE_NSS_MAX 8
+/**
+ * struct nl80211_txrate_he - HE MCS/NSS txrate bitmap
+ * @mcs: MCS bitmap table for each NSS (array index 0 for 1 stream, etc.)
+ */
+struct nl80211_txrate_he {
+ __u16 mcs[NL80211_HE_NSS_MAX];
+};
+
enum nl80211_txrate_gi {
NL80211_TXRATE_DEFAULT_GI,
NL80211_TXRATE_FORCE_SGI,
@@ -4603,6 +5204,8 @@ enum nl80211_txrate_gi {
* @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
* @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz)
* @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz)
+ * @NL80211_BAND_S1GHZ: around 900MHz, supported by S1G PHYs
+ * @NL80211_BAND_LC: light communication band (placeholder)
* @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
* since newer kernel versions may support more bands
*/
@@ -4611,6 +5214,8 @@ enum nl80211_band {
NL80211_BAND_5GHZ,
NL80211_BAND_60GHZ,
NL80211_BAND_6GHZ,
+ NL80211_BAND_S1GHZ,
+ NL80211_BAND_LC,
NUM_NL80211_BANDS,
};
@@ -4703,6 +5308,92 @@ enum nl80211_tx_power_setting {
};
/**
+ * enum nl80211_tid_config - TID config state
+ * @NL80211_TID_CONFIG_ENABLE: Enable config for the TID
+ * @NL80211_TID_CONFIG_DISABLE: Disable config for the TID
+ */
+enum nl80211_tid_config {
+ NL80211_TID_CONFIG_ENABLE,
+ NL80211_TID_CONFIG_DISABLE,
+};
+
+/* enum nl80211_tx_rate_setting - TX rate configuration type
+ * @NL80211_TX_RATE_AUTOMATIC: automatically determine TX rate
+ * @NL80211_TX_RATE_LIMITED: limit the TX rate by the TX rate parameter
+ * @NL80211_TX_RATE_FIXED: fix TX rate to the TX rate parameter
+ */
+enum nl80211_tx_rate_setting {
+ NL80211_TX_RATE_AUTOMATIC,
+ NL80211_TX_RATE_LIMITED,
+ NL80211_TX_RATE_FIXED,
+};
+
+/* enum nl80211_tid_config_attr - TID specific configuration.
+ * @NL80211_TID_CONFIG_ATTR_PAD: pad attribute for 64-bit values
+ * @NL80211_TID_CONFIG_ATTR_VIF_SUPP: a bitmap (u64) of attributes supported
+ * for per-vif configuration; doesn't list the ones that are generic
+ * (%NL80211_TID_CONFIG_ATTR_TIDS, %NL80211_TID_CONFIG_ATTR_OVERRIDE).
+ * @NL80211_TID_CONFIG_ATTR_PEER_SUPP: same as the previous per-vif one, but
+ * per peer instead.
+ * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribue, if set indicates
+ * that the new configuration overrides all previous peer
+ * configurations, otherwise previous peer specific configurations
+ * should be left untouched.
+ * @NL80211_TID_CONFIG_ATTR_TIDS: a bitmask value of TIDs (bit 0 to 7)
+ * Its type is u16.
+ * @NL80211_TID_CONFIG_ATTR_NOACK: Configure ack policy for the TID.
+ * specified in %NL80211_TID_CONFIG_ATTR_TID. see %enum nl80211_tid_config.
+ * Its type is u8.
+ * @NL80211_TID_CONFIG_ATTR_RETRY_SHORT: Number of retries used with data frame
+ * transmission, user-space sets this configuration in
+ * &NL80211_CMD_SET_TID_CONFIG. It is u8 type, min value is 1 and
+ * the max value is advertised by the driver in this attribute on
+ * output in wiphy capabilities.
+ * @NL80211_TID_CONFIG_ATTR_RETRY_LONG: Number of retries used with data frame
+ * transmission, user-space sets this configuration in
+ * &NL80211_CMD_SET_TID_CONFIG. Its type is u8, min value is 1 and
+ * the max value is advertised by the driver in this attribute on
+ * output in wiphy capabilities.
+ * @NL80211_TID_CONFIG_ATTR_AMPDU_CTRL: Enable/Disable MPDU aggregation
+ * for the TIDs specified in %NL80211_TID_CONFIG_ATTR_TIDS.
+ * Its type is u8, using the values from &nl80211_tid_config.
+ * @NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL: Enable/Disable RTS_CTS for the TIDs
+ * specified in %NL80211_TID_CONFIG_ATTR_TIDS. It is u8 type, using
+ * the values from &nl80211_tid_config.
+ * @NL80211_TID_CONFIG_ATTR_AMSDU_CTRL: Enable/Disable MSDU aggregation
+ * for the TIDs specified in %NL80211_TID_CONFIG_ATTR_TIDS.
+ * Its type is u8, using the values from &nl80211_tid_config.
+ * @NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE: This attribute will be useful
+ * to notfiy the driver that what type of txrate should be used
+ * for the TIDs specified in %NL80211_TID_CONFIG_ATTR_TIDS. using
+ * the values form &nl80211_tx_rate_setting.
+ * @NL80211_TID_CONFIG_ATTR_TX_RATE: Data frame TX rate mask should be applied
+ * with the parameters passed through %NL80211_ATTR_TX_RATES.
+ * configuration is applied to the data frame for the tid to that connected
+ * station.
+ */
+enum nl80211_tid_config_attr {
+ __NL80211_TID_CONFIG_ATTR_INVALID,
+ NL80211_TID_CONFIG_ATTR_PAD,
+ NL80211_TID_CONFIG_ATTR_VIF_SUPP,
+ NL80211_TID_CONFIG_ATTR_PEER_SUPP,
+ NL80211_TID_CONFIG_ATTR_OVERRIDE,
+ NL80211_TID_CONFIG_ATTR_TIDS,
+ NL80211_TID_CONFIG_ATTR_NOACK,
+ NL80211_TID_CONFIG_ATTR_RETRY_SHORT,
+ NL80211_TID_CONFIG_ATTR_RETRY_LONG,
+ NL80211_TID_CONFIG_ATTR_AMPDU_CTRL,
+ NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL,
+ NL80211_TID_CONFIG_ATTR_AMSDU_CTRL,
+ NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE,
+ NL80211_TID_CONFIG_ATTR_TX_RATE,
+
+ /* keep last */
+ __NL80211_TID_CONFIG_ATTR_AFTER_LAST,
+ NL80211_TID_CONFIG_ATTR_MAX = __NL80211_TID_CONFIG_ATTR_AFTER_LAST - 1
+};
+
+/**
* enum nl80211_packet_pattern_attr - packet pattern attribute
* @__NL80211_PKTPAT_INVALID: invalid number for nested attribute
* @NL80211_PKTPAT_PATTERN: the pattern, values where the mask has
@@ -5091,7 +5782,7 @@ enum nl80211_iface_limit_attrs {
* => allows 8 of AP/GO that can have BI gcd >= min gcd
*
* numbers = [ #{STA} <= 2 ], channels = 2, max = 2
- * => allows two STAs on different channels
+ * => allows two STAs on the same or on different channels
*
* numbers = [ #{STA} <= 1, #{P2P-client,P2P-GO} <= 3 ], max = 4
* => allows a STA plus three P2P interfaces
@@ -5136,7 +5827,7 @@ enum nl80211_if_combination_attrs {
* @NL80211_PLINK_ESTAB: mesh peer link is established
* @NL80211_PLINK_HOLDING: mesh peer link is being closed or cancelled
* @NL80211_PLINK_BLOCKED: all frames transmitted from this mesh
- * plink are discarded
+ * plink are discarded, except for authentication frames
* @NUM_NL80211_PLINK_STATES: number of peer link states
* @MAX_NL80211_PLINK_STATES: highest numerical value of plink states
*/
@@ -5173,6 +5864,8 @@ enum plink_actions {
#define NL80211_KCK_LEN 16
#define NL80211_KEK_LEN 16
+#define NL80211_KCK_EXT_LEN 24
+#define NL80211_KEK_EXT_LEN 32
#define NL80211_REPLAY_CTR_LEN 8
/**
@@ -5181,6 +5874,7 @@ enum plink_actions {
* @NL80211_REKEY_DATA_KEK: key encryption key (binary)
* @NL80211_REKEY_DATA_KCK: key confirmation key (binary)
* @NL80211_REKEY_DATA_REPLAY_CTR: replay counter (binary)
+ * @NL80211_REKEY_DATA_AKM: AKM data (OUI, suite type)
* @NUM_NL80211_REKEY_DATA: number of rekey attributes (internal)
* @MAX_NL80211_REKEY_DATA: highest rekey attribute (internal)
*/
@@ -5189,6 +5883,7 @@ enum nl80211_rekey_data {
NL80211_REKEY_DATA_KEK,
NL80211_REKEY_DATA_KCK,
NL80211_REKEY_DATA_REPLAY_CTR,
+ NL80211_REKEY_DATA_AKM,
/* keep last */
NUM_NL80211_REKEY_DATA,
@@ -5269,13 +5964,15 @@ enum nl80211_tdls_operation {
NL80211_TDLS_DISABLE_LINK,
};
-/*
+/**
* enum nl80211_ap_sme_features - device-integrated AP features
- * Reserved for future use, no bits are defined in
- * NL80211_ATTR_DEVICE_AP_SME yet.
+ * @NL80211_AP_SME_SA_QUERY_OFFLOAD: SA Query procedures offloaded to driver
+ * when user space indicates support for SA Query procedures offload during
+ * "start ap" with %NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT.
+ */
enum nl80211_ap_sme_features {
+ NL80211_AP_SME_SA_QUERY_OFFLOAD = 1 << 0,
};
- */
/**
* enum nl80211_feature_flags - device/driver features
@@ -5286,7 +5983,7 @@ enum nl80211_ap_sme_features {
* @NL80211_FEATURE_INACTIVITY_TIMER: This driver takes care of freeing up
* the connected inactive stations in AP mode.
* @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
- * to work properly to suppport receiving regulatory hints from
+ * to work properly to support receiving regulatory hints from
* cellular base stations.
* @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
* here to reserve the value for API/ABI compatibility)
@@ -5409,7 +6106,7 @@ enum nl80211_feature_flags {
* enum nl80211_ext_feature_index - bit index of extended features.
* @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
* @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can
- * can request to use RRM (see %NL80211_ATTR_USE_RRM) with
+ * request to use RRM (see %NL80211_ATTR_USE_RRM) with
* %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
* the ASSOC_REQ_USE_RRM flag in the association request even if
* NL80211_FEATURE_QUIET is not advertized.
@@ -5521,6 +6218,79 @@ enum nl80211_feature_flags {
* feature, which prevents bufferbloat by using the expected transmission
* time to limit the amount of data buffered in the hardware.
*
+ * @NL80211_EXT_FEATURE_BEACON_PROTECTION: The driver supports Beacon protection
+ * and can receive key configuration for BIGTK using key indexes 6 and 7.
+ * @NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT: The driver supports Beacon
+ * protection as a client only and cannot transmit protected beacons.
+ *
+ * @NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH: The driver can disable the
+ * forwarding of preauth frames over the control port. They are then
+ * handled as ordinary data frames.
+ *
+ * @NL80211_EXT_FEATURE_PROTECTED_TWT: Driver supports protected TWT frames
+ *
+ * @NL80211_EXT_FEATURE_DEL_IBSS_STA: The driver supports removing stations
+ * in IBSS mode, essentially by dropping their state.
+ *
+ * @NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS: management frame registrations
+ * are possible for multicast frames and those will be reported properly.
+ *
+ * @NL80211_EXT_FEATURE_SCAN_FREQ_KHZ: This driver supports receiving and
+ * reporting scan request with %NL80211_ATTR_SCAN_FREQ_KHZ. In order to
+ * report %NL80211_ATTR_SCAN_FREQ_KHZ, %NL80211_SCAN_FLAG_FREQ_KHZ must be
+ * included in the scan request.
+ *
+ * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS: The driver
+ * can report tx status for control port over nl80211 tx operations.
+ *
+ * @NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION: Driver supports Operating
+ * Channel Validation (OCV) when using driver's SME for RSNA handshakes.
+ *
+ * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK: Device wants to do 4-way
+ * handshake with PSK in AP mode (PSK is passed as part of the start AP
+ * command).
+ *
+ * @NL80211_EXT_FEATURE_SAE_OFFLOAD_AP: Device wants to do SAE authentication
+ * in AP mode (SAE password is passed as part of the start AP command).
+ *
+ * @NL80211_EXT_FEATURE_FILS_DISCOVERY: Driver/device supports FILS discovery
+ * frames transmission
+ *
+ * @NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP: Driver/device supports
+ * unsolicited broadcast probe response transmission
+ *
+ * @NL80211_EXT_FEATURE_BEACON_RATE_HE: Driver supports beacon rate
+ * configuration (AP/mesh) with HE rates.
+ *
+ * @NL80211_EXT_FEATURE_SECURE_LTF: Device supports secure LTF measurement
+ * exchange protocol.
+ *
+ * @NL80211_EXT_FEATURE_SECURE_RTT: Device supports secure RTT measurement
+ * exchange protocol.
+ *
+ * @NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE: Device supports management
+ * frame protection for all management frames exchanged during the
+ * negotiation and range measurement procedure.
+ *
+ * @NL80211_EXT_FEATURE_BSS_COLOR: The driver supports BSS color collision
+ * detection and change announcemnts.
+ *
+ * @NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD: Driver running in AP mode supports
+ * FILS encryption and decryption for (Re)Association Request and Response
+ * frames. Userspace has to share FILS AAD details to the driver by using
+ * @NL80211_CMD_SET_FILS_AAD.
+ *
+ * @NL80211_EXT_FEATURE_RADAR_BACKGROUND: Device supports background radar/CAC
+ * detection.
+ *
+ * @NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE: Device can perform a MAC address
+ * change without having to bring the underlying network device down
+ * first. For example, in station mode this can be used to vary the
+ * origin MAC address prior to a connection to a new AP for privacy
+ * or other reasons. Note that certain driver specific restrictions
+ * might apply, e.g. no scans in progress, no offchannel operations
+ * in progress, and no active connections.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5568,6 +6338,27 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_SAE_OFFLOAD,
NL80211_EXT_FEATURE_VLAN_OFFLOAD,
NL80211_EXT_FEATURE_AQL,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION,
+ NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH,
+ NL80211_EXT_FEATURE_PROTECTED_TWT,
+ NL80211_EXT_FEATURE_DEL_IBSS_STA,
+ NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT,
+ NL80211_EXT_FEATURE_SCAN_FREQ_KHZ,
+ NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS,
+ NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD_AP,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP,
+ NL80211_EXT_FEATURE_BEACON_RATE_HE,
+ NL80211_EXT_FEATURE_SECURE_LTF,
+ NL80211_EXT_FEATURE_SECURE_RTT,
+ NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE,
+ NL80211_EXT_FEATURE_BSS_COLOR,
+ NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND,
+ NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5679,6 +6470,11 @@ enum nl80211_timeout_reason {
* @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to
* only have supported rates and no additional capabilities (unless
* added by userspace explicitly.)
+ * @NL80211_SCAN_FLAG_FREQ_KHZ: report scan results with
+ * %NL80211_ATTR_SCAN_FREQ_KHZ. This also means
+ * %NL80211_ATTR_SCAN_FREQUENCIES will not be included.
+ * @NL80211_SCAN_FLAG_COLOCATED_6GHZ: scan for colocated APs reported by
+ * 2.4/5 GHz APs
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
@@ -5694,6 +6490,8 @@ enum nl80211_scan_flags {
NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10,
NL80211_SCAN_FLAG_RANDOM_SN = 1<<11,
NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12,
+ NL80211_SCAN_FLAG_FREQ_KHZ = 1<<13,
+ NL80211_SCAN_FLAG_COLOCATED_6GHZ = 1<<14,
};
/**
@@ -5781,7 +6579,7 @@ enum nl80211_dfs_state {
};
/**
- * enum enum nl80211_protocol_features - nl80211 protocol features
+ * enum nl80211_protocol_features - nl80211 protocol features
* @NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP: nl80211 supports splitting
* wiphy dumps (if requested by the application with the attribute
* %NL80211_ATTR_SPLIT_WIPHY_DUMP. Also supported is filtering the
@@ -5858,11 +6656,13 @@ struct nl80211_vendor_cmd_info {
* @NL80211_TDLS_PEER_HT: TDLS peer is HT capable.
* @NL80211_TDLS_PEER_VHT: TDLS peer is VHT capable.
* @NL80211_TDLS_PEER_WMM: TDLS peer is WMM capable.
+ * @NL80211_TDLS_PEER_HE: TDLS peer is HE capable.
*/
enum nl80211_tdls_peer_capability {
NL80211_TDLS_PEER_HT = 1<<0,
NL80211_TDLS_PEER_VHT = 1<<1,
NL80211_TDLS_PEER_WMM = 1<<2,
+ NL80211_TDLS_PEER_HE = 1<<3,
};
/**
@@ -6190,12 +6990,14 @@ enum nl80211_ftm_responder_stats {
* @NL80211_PREAMBLE_HT: HT preamble
* @NL80211_PREAMBLE_VHT: VHT preamble
* @NL80211_PREAMBLE_DMG: DMG preamble
+ * @NL80211_PREAMBLE_HE: HE preamble
*/
enum nl80211_preamble {
NL80211_PREAMBLE_LEGACY,
NL80211_PREAMBLE_HT,
NL80211_PREAMBLE_VHT,
NL80211_PREAMBLE_DMG,
+ NL80211_PREAMBLE_HE,
};
/**
@@ -6388,6 +7190,10 @@ enum nl80211_peer_measurement_attrs {
* is valid)
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST: u32 attribute indicating
* the maximum FTMs per burst (if not present anything is valid)
+ * @NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED: flag attribute indicating if
+ * trigger based ranging measurement is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED: flag attribute indicating
+ * if non trigger based ranging measurement is supported
*
* @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
@@ -6403,6 +7209,8 @@ enum nl80211_peer_measurement_ftm_capa {
NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST,
+ NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED,
+ NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED,
/* keep last */
NUM_NL80211_PMSR_FTM_CAPA_ATTR,
@@ -6432,6 +7240,26 @@ enum nl80211_peer_measurement_ftm_capa {
* @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI: request LCI data (flag)
* @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC: request civic location data
* (flag)
+ * @NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED: request trigger based ranging
+ * measurement (flag).
+ * This attribute and %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED are
+ * mutually exclusive.
+ * if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor
+ * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based
+ * ranging will be used.
+ * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non trigger based
+ * ranging measurement (flag)
+ * This attribute and %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED are
+ * mutually exclusive.
+ * if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor
+ * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based
+ * ranging will be used.
+ * @NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK: negotiate for LMR feedback. Only
+ * valid if either %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED or
+ * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set.
+ * @NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR: optional. The BSS color of the
+ * responder. Only valid if %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
+ * or %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED is set.
*
* @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
* @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
@@ -6448,6 +7276,10 @@ enum nl80211_peer_measurement_ftm_req {
NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES,
NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI,
NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC,
+ NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED,
+ NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED,
+ NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK,
+ NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR,
/* keep last */
NUM_NL80211_PMSR_FTM_REQ_ATTR,
@@ -6572,6 +7404,13 @@ enum nl80211_peer_measurement_ftm_resp {
*
* @NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET: the OBSS PD minimum tx power offset.
* @NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET: the OBSS PD maximum tx power offset.
+ * @NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET: the non-SRG OBSS PD maximum
+ * tx power offset.
+ * @NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP: bitmap that indicates the BSS color
+ * values used by members of the SRG.
+ * @NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP: bitmap that indicates the partial
+ * BSSID values used by members of the SRG.
+ * @NL80211_HE_OBSS_PD_ATTR_SR_CTRL: The SR Control field of SRP element.
*
* @__NL80211_HE_OBSS_PD_ATTR_LAST: Internal
* @NL80211_HE_OBSS_PD_ATTR_MAX: highest OBSS PD attribute.
@@ -6581,11 +7420,304 @@ enum nl80211_obss_pd_attributes {
NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET,
NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET,
+ NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET,
+ NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP,
+ NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP,
+ NL80211_HE_OBSS_PD_ATTR_SR_CTRL,
/* keep last */
__NL80211_HE_OBSS_PD_ATTR_LAST,
NL80211_HE_OBSS_PD_ATTR_MAX = __NL80211_HE_OBSS_PD_ATTR_LAST - 1,
};
+/**
+ * enum nl80211_bss_color_attributes - BSS Color attributes
+ * @__NL80211_HE_BSS_COLOR_ATTR_INVALID: Invalid
+ *
+ * @NL80211_HE_BSS_COLOR_ATTR_COLOR: the current BSS Color.
+ * @NL80211_HE_BSS_COLOR_ATTR_DISABLED: is BSS coloring disabled.
+ * @NL80211_HE_BSS_COLOR_ATTR_PARTIAL: the AID equation to be used..
+ *
+ * @__NL80211_HE_BSS_COLOR_ATTR_LAST: Internal
+ * @NL80211_HE_BSS_COLOR_ATTR_MAX: highest BSS Color attribute.
+ */
+enum nl80211_bss_color_attributes {
+ __NL80211_HE_BSS_COLOR_ATTR_INVALID,
+
+ NL80211_HE_BSS_COLOR_ATTR_COLOR,
+ NL80211_HE_BSS_COLOR_ATTR_DISABLED,
+ NL80211_HE_BSS_COLOR_ATTR_PARTIAL,
+
+ /* keep last */
+ __NL80211_HE_BSS_COLOR_ATTR_LAST,
+ NL80211_HE_BSS_COLOR_ATTR_MAX = __NL80211_HE_BSS_COLOR_ATTR_LAST - 1,
+};
+
+/**
+ * enum nl80211_iftype_akm_attributes - interface type AKM attributes
+ * @__NL80211_IFTYPE_AKM_ATTR_INVALID: Invalid
+ *
+ * @NL80211_IFTYPE_AKM_ATTR_IFTYPES: nested attribute containing a flag
+ * attribute for each interface type that supports AKM suites specified in
+ * %NL80211_IFTYPE_AKM_ATTR_SUITES
+ * @NL80211_IFTYPE_AKM_ATTR_SUITES: an array of u32. Used to indicate supported
+ * AKM suites for the specified interface types.
+ *
+ * @__NL80211_IFTYPE_AKM_ATTR_LAST: Internal
+ * @NL80211_IFTYPE_AKM_ATTR_MAX: highest interface type AKM attribute.
+ */
+enum nl80211_iftype_akm_attributes {
+ __NL80211_IFTYPE_AKM_ATTR_INVALID,
+
+ NL80211_IFTYPE_AKM_ATTR_IFTYPES,
+ NL80211_IFTYPE_AKM_ATTR_SUITES,
+
+ /* keep last */
+ __NL80211_IFTYPE_AKM_ATTR_LAST,
+ NL80211_IFTYPE_AKM_ATTR_MAX = __NL80211_IFTYPE_AKM_ATTR_LAST - 1,
+};
+
+/**
+ * enum nl80211_fils_discovery_attributes - FILS discovery configuration
+ * from IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @__NL80211_FILS_DISCOVERY_ATTR_INVALID: Invalid
+ *
+ * @NL80211_FILS_DISCOVERY_ATTR_INT_MIN: Minimum packet interval (u32, TU).
+ * Allowed range: 0..10000 (TU = Time Unit)
+ * @NL80211_FILS_DISCOVERY_ATTR_INT_MAX: Maximum packet interval (u32, TU).
+ * Allowed range: 0..10000 (TU = Time Unit)
+ * @NL80211_FILS_DISCOVERY_ATTR_TMPL: Template data for FILS discovery action
+ * frame including the headers.
+ *
+ * @__NL80211_FILS_DISCOVERY_ATTR_LAST: Internal
+ * @NL80211_FILS_DISCOVERY_ATTR_MAX: highest attribute
+ */
+enum nl80211_fils_discovery_attributes {
+ __NL80211_FILS_DISCOVERY_ATTR_INVALID,
+
+ NL80211_FILS_DISCOVERY_ATTR_INT_MIN,
+ NL80211_FILS_DISCOVERY_ATTR_INT_MAX,
+ NL80211_FILS_DISCOVERY_ATTR_TMPL,
+
+ /* keep last */
+ __NL80211_FILS_DISCOVERY_ATTR_LAST,
+ NL80211_FILS_DISCOVERY_ATTR_MAX = __NL80211_FILS_DISCOVERY_ATTR_LAST - 1
+};
+
+/*
+ * FILS discovery template minimum length with action frame headers and
+ * mandatory fields.
+ */
+#define NL80211_FILS_DISCOVERY_TMPL_MIN_LEN 42
+
+/**
+ * enum nl80211_unsol_bcast_probe_resp_attributes - Unsolicited broadcast probe
+ * response configuration. Applicable only in 6GHz.
+ *
+ * @__NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INVALID: Invalid
+ *
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT: Maximum packet interval (u32, TU).
+ * Allowed range: 0..20 (TU = Time Unit). IEEE P802.11ax/D6.0
+ * 26.17.2.3.2 (AP behavior for fast passive scanning).
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL: Unsolicited broadcast probe response
+ * frame template (binary).
+ *
+ * @__NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST: Internal
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX: highest attribute
+ */
+enum nl80211_unsol_bcast_probe_resp_attributes {
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INVALID,
+
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT,
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL,
+
+ /* keep last */
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST,
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX =
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST - 1
+};
+
+/**
+ * enum nl80211_sae_pwe_mechanism - The mechanism(s) allowed for SAE PWE
+ * derivation. Applicable only when WPA3-Personal SAE authentication is
+ * used.
+ *
+ * @NL80211_SAE_PWE_UNSPECIFIED: not specified, used internally to indicate that
+ * attribute is not present from userspace.
+ * @NL80211_SAE_PWE_HUNT_AND_PECK: hunting-and-pecking loop only
+ * @NL80211_SAE_PWE_HASH_TO_ELEMENT: hash-to-element only
+ * @NL80211_SAE_PWE_BOTH: both hunting-and-pecking loop and hash-to-element
+ * can be used.
+ */
+enum nl80211_sae_pwe_mechanism {
+ NL80211_SAE_PWE_UNSPECIFIED,
+ NL80211_SAE_PWE_HUNT_AND_PECK,
+ NL80211_SAE_PWE_HASH_TO_ELEMENT,
+ NL80211_SAE_PWE_BOTH,
+};
+
+/**
+ * enum nl80211_sar_type - type of SAR specs
+ *
+ * @NL80211_SAR_TYPE_POWER: power limitation specified in 0.25dBm unit
+ *
+ */
+enum nl80211_sar_type {
+ NL80211_SAR_TYPE_POWER,
+
+ /* add new type here */
+
+ /* Keep last */
+ NUM_NL80211_SAR_TYPE,
+};
+
+/**
+ * enum nl80211_sar_attrs - Attributes for SAR spec
+ *
+ * @NL80211_SAR_ATTR_TYPE: the SAR type as defined in &enum nl80211_sar_type.
+ *
+ * @NL80211_SAR_ATTR_SPECS: Nested array of SAR power
+ * limit specifications. Each specification contains a set
+ * of %nl80211_sar_specs_attrs.
+ *
+ * For SET operation, it contains array of %NL80211_SAR_ATTR_SPECS_POWER
+ * and %NL80211_SAR_ATTR_SPECS_RANGE_INDEX.
+ *
+ * For sar_capa dump, it contains array of
+ * %NL80211_SAR_ATTR_SPECS_START_FREQ
+ * and %NL80211_SAR_ATTR_SPECS_END_FREQ.
+ *
+ * @__NL80211_SAR_ATTR_LAST: Internal
+ * @NL80211_SAR_ATTR_MAX: highest sar attribute
+ *
+ * These attributes are used with %NL80211_CMD_SET_SAR_SPEC
+ */
+enum nl80211_sar_attrs {
+ __NL80211_SAR_ATTR_INVALID,
+
+ NL80211_SAR_ATTR_TYPE,
+ NL80211_SAR_ATTR_SPECS,
+
+ __NL80211_SAR_ATTR_LAST,
+ NL80211_SAR_ATTR_MAX = __NL80211_SAR_ATTR_LAST - 1,
+};
+
+/**
+ * enum nl80211_sar_specs_attrs - Attributes for SAR power limit specs
+ *
+ * @NL80211_SAR_ATTR_SPECS_POWER: Required (s32)value to specify the actual
+ * power limit value in units of 0.25 dBm if type is
+ * NL80211_SAR_TYPE_POWER. (i.e., a value of 44 represents 11 dBm).
+ * 0 means userspace doesn't have SAR limitation on this associated range.
+ *
+ * @NL80211_SAR_ATTR_SPECS_RANGE_INDEX: Required (u32) value to specify the
+ * index of exported freq range table and the associated power limitation
+ * is applied to this range.
+ *
+ * Userspace isn't required to set all the ranges advertised by WLAN driver,
+ * and userspace can skip some certain ranges. These skipped ranges don't
+ * have SAR limitations, and they are same as setting the
+ * %NL80211_SAR_ATTR_SPECS_POWER to any unreasonable high value because any
+ * value higher than regulatory allowed value just means SAR power
+ * limitation is removed, but it's required to set at least one range.
+ * It's not allowed to set duplicated range in one SET operation.
+ *
+ * Every SET operation overwrites previous SET operation.
+ *
+ * @NL80211_SAR_ATTR_SPECS_START_FREQ: Required (u32) value to specify the start
+ * frequency of this range edge when registering SAR capability to wiphy.
+ * It's not a channel center frequency. The unit is kHz.
+ *
+ * @NL80211_SAR_ATTR_SPECS_END_FREQ: Required (u32) value to specify the end
+ * frequency of this range edge when registering SAR capability to wiphy.
+ * It's not a channel center frequency. The unit is kHz.
+ *
+ * @__NL80211_SAR_ATTR_SPECS_LAST: Internal
+ * @NL80211_SAR_ATTR_SPECS_MAX: highest sar specs attribute
+ */
+enum nl80211_sar_specs_attrs {
+ __NL80211_SAR_ATTR_SPECS_INVALID,
+
+ NL80211_SAR_ATTR_SPECS_POWER,
+ NL80211_SAR_ATTR_SPECS_RANGE_INDEX,
+ NL80211_SAR_ATTR_SPECS_START_FREQ,
+ NL80211_SAR_ATTR_SPECS_END_FREQ,
+
+ __NL80211_SAR_ATTR_SPECS_LAST,
+ NL80211_SAR_ATTR_SPECS_MAX = __NL80211_SAR_ATTR_SPECS_LAST - 1,
+};
+
+/**
+ * enum nl80211_mbssid_config_attributes - multiple BSSID (MBSSID) and enhanced
+ * multi-BSSID advertisements (EMA) in AP mode.
+ * Kernel uses some of these attributes to advertise driver's support for
+ * MBSSID and EMA.
+ * Remaining attributes should be used by the userspace to configure the
+ * features.
+ *
+ * @__NL80211_MBSSID_CONFIG_ATTR_INVALID: Invalid
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES: Used by the kernel to advertise
+ * the maximum number of MBSSID interfaces supported by the driver.
+ * Driver should indicate MBSSID support by setting
+ * wiphy->mbssid_max_interfaces to a value more than or equal to 2.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY: Used by the kernel
+ * to advertise the maximum profile periodicity supported by the driver
+ * if EMA is enabled. Driver should indicate EMA support to the userspace
+ * by setting wiphy->ema_max_profile_periodicity to
+ * a non-zero value.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_INDEX: Mandatory parameter to pass the index of
+ * this BSS (u8) in the multiple BSSID set.
+ * Value must be set to 0 for the transmitting interface and non-zero for
+ * all non-transmitting interfaces. The userspace will be responsible
+ * for using unique indices for the interfaces.
+ * Range: 0 to wiphy->mbssid_max_interfaces-1.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX: Mandatory parameter for
+ * a non-transmitted profile which provides the interface index (u32) of
+ * the transmitted profile. The value must match one of the interface
+ * indices advertised by the kernel. Optional if the interface being set up
+ * is the transmitting one, however, if provided then the value must match
+ * the interface index of the same.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_EMA: Flag used to enable EMA AP feature.
+ * Setting this flag is permitted only if the driver advertises EMA support
+ * by setting wiphy->ema_max_profile_periodicity to non-zero.
+ *
+ * @__NL80211_MBSSID_CONFIG_ATTR_LAST: Internal
+ * @NL80211_MBSSID_CONFIG_ATTR_MAX: highest attribute
+ */
+enum nl80211_mbssid_config_attributes {
+ __NL80211_MBSSID_CONFIG_ATTR_INVALID,
+
+ NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES,
+ NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY,
+ NL80211_MBSSID_CONFIG_ATTR_INDEX,
+ NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX,
+ NL80211_MBSSID_CONFIG_ATTR_EMA,
+
+ /* keep last */
+ __NL80211_MBSSID_CONFIG_ATTR_LAST,
+ NL80211_MBSSID_CONFIG_ATTR_MAX = __NL80211_MBSSID_CONFIG_ATTR_LAST - 1,
+};
+
+/**
+ * enum nl80211_ap_settings_flags - AP settings flags
+ *
+ * @NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT: AP supports external
+ * authentication.
+ * @NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT: Userspace supports SA Query
+ * procedures offload to driver. If driver advertises
+ * %NL80211_AP_SME_SA_QUERY_OFFLOAD in AP SME features, userspace shall
+ * ignore SA Query procedures and validations when this flag is set by
+ * userspace.
+ */
+enum nl80211_ap_settings_flags {
+ NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = 1 << 0,
+ NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 1 << 1,
+};
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index d99b5a772698..2f76cba67166 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -55,7 +55,10 @@ struct nvme_passthru_cmd64 {
__u64 metadata;
__u64 addr;
__u32 metadata_len;
- __u32 data_len;
+ union {
+ __u32 data_len; /* for non-vectored io */
+ __u32 vec_cnt; /* for vectored io */
+ };
__u32 cdw10;
__u32 cdw11;
__u32 cdw12;
@@ -67,6 +70,28 @@ struct nvme_passthru_cmd64 {
__u64 result;
};
+/* same as struct nvme_passthru_cmd64, minus the 8b result field */
+struct nvme_uring_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd1;
+ __u32 nsid;
+ __u32 cdw2;
+ __u32 cdw3;
+ __u64 metadata;
+ __u64 addr;
+ __u32 metadata_len;
+ __u32 data_len;
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 rsvd2;
+};
+
#define nvme_admin_cmd nvme_passthru_cmd
#define NVME_IOCTL_ID _IO('N', 0x40)
@@ -78,5 +103,12 @@ struct nvme_passthru_cmd64 {
#define NVME_IOCTL_RESCAN _IO('N', 0x46)
#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
+#define NVME_IOCTL_IO64_CMD_VEC _IOWR('N', 0x49, struct nvme_passthru_cmd64)
+
+/* io_uring async commands: */
+#define NVME_URING_CMD_IO _IOWR('N', 0x80, struct nvme_uring_cmd)
+#define NVME_URING_CMD_IO_VEC _IOWR('N', 0x81, struct nvme_uring_cmd)
+#define NVME_URING_CMD_ADMIN _IOWR('N', 0x82, struct nvme_uring_cmd)
+#define NVME_URING_CMD_ADMIN_VEC _IOWR('N', 0x83, struct nvme_uring_cmd)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */
diff --git a/include/uapi/linux/omap3isp.h b/include/uapi/linux/omap3isp.h
index 87b55755f4ff..d9db7ad43890 100644
--- a/include/uapi/linux/omap3isp.h
+++ b/include/uapi/linux/omap3isp.h
@@ -162,6 +162,7 @@ struct omap3isp_h3a_aewb_config {
* struct omap3isp_stat_data - Statistic data sent to or received from user
* @ts: Timestamp of returned framestats.
* @buf: Pointer to pass to user.
+ * @buf_size: Size of buffer.
* @frame_number: Frame number of requested stats.
* @cur_frame: Current frame number being processed.
* @config_counter: Number of the configuration associated with the data.
@@ -176,10 +177,12 @@ struct omap3isp_stat_data {
struct timeval ts;
#endif
void __user *buf;
- __u32 buf_size;
- __u16 frame_number;
- __u16 cur_frame;
- __u16 config_counter;
+ __struct_group(/* no tag */, frame, /* no attrs */,
+ __u32 buf_size;
+ __u16 frame_number;
+ __u16 cur_frame;
+ __u16 config_counter;
+ );
};
#ifdef __KERNEL__
@@ -189,10 +192,12 @@ struct omap3isp_stat_data_time32 {
__s32 tv_usec;
} ts;
__u32 buf;
- __u32 buf_size;
- __u16 frame_number;
- __u16 cur_frame;
- __u16 config_counter;
+ __struct_group(/* no tag */, frame, /* no attrs */,
+ __u32 buf_size;
+ __u16 frame_number;
+ __u16 cur_frame;
+ __u16 config_counter;
+ );
};
#endif
diff --git a/include/uapi/linux/openat2.h b/include/uapi/linux/openat2.h
index 58b1eb711360..a5feb7604948 100644
--- a/include/uapi/linux/openat2.h
+++ b/include/uapi/linux/openat2.h
@@ -35,5 +35,9 @@ struct open_how {
#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
be scoped inside the dirfd
(similar to chroot(2)). */
+#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be
+ completed through cached lookup. May
+ return -EAGAIN if that's not
+ possible. */
#endif /* _UAPI_LINUX_OPENAT2_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index ae2bff14e7e1..94066f87e9ee 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -70,10 +70,14 @@ enum ovs_datapath_cmd {
* set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on
* %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should
* not be sent.
+ * @OVS_DP_ATTR_PER_CPU_PIDS: Per-cpu array of PIDs for upcalls when
+ * OVS_DP_F_DISPATCH_UPCALL_PER_CPU feature is set.
* @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
* datapath. Always present in notifications.
* @OVS_DP_ATTR_MEGAFLOW_STATS: Statistics about mega flow masks usage for the
* datapath. Always present in notifications.
+ * @OVS_DP_ATTR_IFINDEX: Interface index for a new datapath netdev. Only
+ * valid for %OVS_DP_CMD_NEW requests.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_DP_* commands.
@@ -86,6 +90,11 @@ enum ovs_datapath_attr {
OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
OVS_DP_ATTR_PAD,
+ OVS_DP_ATTR_MASKS_CACHE_SIZE,
+ OVS_DP_ATTR_PER_CPU_PIDS, /* Netlink PIDS to receive upcalls in
+ * per-cpu dispatch mode
+ */
+ OVS_DP_ATTR_IFINDEX,
__OVS_DP_ATTR_MAX
};
@@ -102,8 +111,8 @@ struct ovs_dp_megaflow_stats {
__u64 n_mask_hit; /* Number of masks used for flow lookups. */
__u32 n_masks; /* Number of masks for the datapath. */
__u32 pad0; /* Pad for future expension. */
+ __u64 n_cache_hit; /* Number of cache matches for flow lookups. */
__u64 pad1; /* Pad for future expension. */
- __u64 pad2; /* Pad for future expension. */
};
struct ovs_vport_stats {
@@ -126,6 +135,9 @@ struct ovs_vport_stats {
/* Allow tc offload recirc sharing */
#define OVS_DP_F_TC_RECIRC_SHARING (1 << 2)
+/* Allow per-cpu dispatch of upcalls */
+#define OVS_DP_F_DISPATCH_UPCALL_PER_CPU (1 << 3)
+
/* Fixed logical ports. */
#define OVSP_LOCAL ((__u32)0)
@@ -343,9 +355,20 @@ enum ovs_key_attr {
OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, /* struct ovs_key_ct_tuple_ipv6 */
OVS_KEY_ATTR_NSH, /* Nested set of ovs_nsh_key_* */
-#ifdef __KERNEL__
- OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */
-#endif
+ /* User space decided to squat on types 29 and 30. They are defined
+ * below, but should not be sent to the kernel.
+ *
+ * WARNING: No new types should be added unless they are defined
+ * for both kernel and user space (no 'ifdef's). It's hard
+ * to keep compatibility otherwise.
+ */
+ OVS_KEY_ATTR_PACKET_TYPE, /* be32 packet type */
+ OVS_KEY_ATTR_ND_EXTENSIONS, /* IPv6 Neighbor Discovery extensions */
+
+ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info.
+ * For in-kernel use only.
+ */
+ OVS_KEY_ATTR_IPV6_EXTHDRS, /* struct ovs_key_ipv6_exthdr */
__OVS_KEY_ATTR_MAX
};
@@ -421,6 +444,11 @@ struct ovs_key_ipv6 {
__u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */
};
+/* separate structure to support backward compatibility with older user space */
+struct ovs_key_ipv6_exthdrs {
+ __u16 hdrs;
+};
+
struct ovs_key_tcp {
__be16 tcp_src;
__be16 tcp_dst;
@@ -958,6 +986,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */
OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */
OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */
+ OVS_ACTION_ATTR_DEC_TTL, /* Nested OVS_DEC_TTL_ATTR_*. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
@@ -1050,4 +1079,12 @@ struct ovs_zone_limit {
__u32 count;
};
+enum ovs_dec_ttl_attr {
+ OVS_DEC_TTL_ATTR_UNSPEC,
+ OVS_DEC_TTL_ATTR_ACTION, /* Nested struct nlattr */
+ __OVS_DEC_TTL_ATTR_MAX
+};
+
+#define OVS_DEC_TTL_ATTR_MAX (__OVS_DEC_TTL_ATTR_MAX - 1)
+
#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 5437690483cd..57b8e2ffb1dd 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -76,6 +76,7 @@
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_HEADER_TYPE_MASK 0x7f
#define PCI_HEADER_TYPE_NORMAL 0
#define PCI_HEADER_TYPE_BRIDGE 1
#define PCI_HEADER_TYPE_CARDBUS 2
@@ -246,7 +247,7 @@
#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */
#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
-#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
+#define PCI_PM_CAP_PME_D3hot 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
#define PCI_PM_CTRL 4 /* PM control and status register */
@@ -300,23 +301,23 @@
#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */
#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */
-/* Message Signalled Interrupt registers */
+/* Message Signaled Interrupt registers */
-#define PCI_MSI_FLAGS 2 /* Message Control */
+#define PCI_MSI_FLAGS 0x02 /* Message Control */
#define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */
#define PCI_MSI_FLAGS_QMASK 0x000e /* Maximum queue size available */
#define PCI_MSI_FLAGS_QSIZE 0x0070 /* Message queue size configured */
#define PCI_MSI_FLAGS_64BIT 0x0080 /* 64-bit addresses allowed */
#define PCI_MSI_FLAGS_MASKBIT 0x0100 /* Per-vector masking capable */
#define PCI_MSI_RFU 3 /* Rest of capability flags */
-#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
-#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
-#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
-#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
-#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
-#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
-#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
+#define PCI_MSI_ADDRESS_LO 0x04 /* Lower 32 bits */
+#define PCI_MSI_ADDRESS_HI 0x08 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
+#define PCI_MSI_DATA_32 0x08 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_MASK_32 0x0c /* Mask bits register for 32-bit devices */
+#define PCI_MSI_PENDING_32 0x10 /* Pending intrs for 32-bit devices */
+#define PCI_MSI_DATA_64 0x0c /* 16 bits of data for 64-bit devices */
+#define PCI_MSI_MASK_64 0x10 /* Mask bits register for 64-bit devices */
+#define PCI_MSI_PENDING_64 0x14 /* Pending intrs for 64-bit devices */
/* MSI-X registers (in MSI-X capability) */
#define PCI_MSIX_FLAGS 2 /* Message Control */
@@ -334,10 +335,10 @@
/* MSI-X Table entry format (in memory mapped by a BAR) */
#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR 0 /* Message Address */
-#define PCI_MSIX_ENTRY_UPPER_ADDR 4 /* Message Upper Address */
-#define PCI_MSIX_ENTRY_DATA 8 /* Message Data */
-#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 /* Vector Control */
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0x0 /* Message Address */
+#define PCI_MSIX_ENTRY_UPPER_ADDR 0x4 /* Message Upper Address */
+#define PCI_MSIX_ENTRY_DATA 0x8 /* Message Data */
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 0xc /* Vector Control */
#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001
/* CompactPCI Hotswap Register */
@@ -469,7 +470,7 @@
/* PCI Express capability registers */
-#define PCI_EXP_FLAGS 2 /* Capabilities register */
+#define PCI_EXP_FLAGS 0x02 /* Capabilities register */
#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
@@ -483,7 +484,7 @@
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
-#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_DEVCAP 0x04 /* Device capabilities */
#define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */
#define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */
#define PCI_EXP_DEVCAP_EXT_TAG 0x00000020 /* Extended tags */
@@ -496,13 +497,19 @@
#define PCI_EXP_DEVCAP_PWR_VAL 0x03fc0000 /* Slot Power Limit Value */
#define PCI_EXP_DEVCAP_PWR_SCL 0x0c000000 /* Slot Power Limit Scale */
#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
-#define PCI_EXP_DEVCTL 8 /* Device Control */
+#define PCI_EXP_DEVCTL 0x08 /* Device Control */
#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
+#define PCI_EXP_DEVCTL_PAYLOAD_128B 0x0000 /* 128 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_256B 0x0020 /* 256 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_512B 0x0040 /* 512 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_1024B 0x0060 /* 1024 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_2048B 0x0080 /* 2048 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_4096B 0x00a0 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
@@ -515,7 +522,7 @@
#define PCI_EXP_DEVCTL_READRQ_2048B 0x4000 /* 2048 Bytes */
#define PCI_EXP_DEVCTL_READRQ_4096B 0x5000 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
-#define PCI_EXP_DEVSTA 10 /* Device Status */
+#define PCI_EXP_DEVSTA 0x0a /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
#define PCI_EXP_DEVSTA_NFED 0x0002 /* Non-Fatal Error Detected */
#define PCI_EXP_DEVSTA_FED 0x0004 /* Fatal Error Detected */
@@ -523,15 +530,18 @@
#define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
-#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
#define PCI_EXP_LNKCAP_SLS_32_0GB 0x00000005 /* LNKCAP2 SLS Vector bit 4 */
+#define PCI_EXP_LNKCAP_SLS_64_0GB 0x00000006 /* LNKCAP2 SLS Vector bit 5 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#define PCI_EXP_LNKCAP_ASPM_L0S 0x00000400 /* ASPM L0s Support */
+#define PCI_EXP_LNKCAP_ASPM_L1 0x00000800 /* ASPM L1 Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
#define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */
#define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* Clock Power Management */
@@ -539,7 +549,7 @@
#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */
#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */
#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
-#define PCI_EXP_LNKCTL 16 /* Link Control */
+#define PCI_EXP_LNKCTL 0x10 /* Link Control */
#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
#define PCI_EXP_LNKCTL_ASPM_L0S 0x0001 /* L0s Enable */
#define PCI_EXP_LNKCTL_ASPM_L1 0x0002 /* L1 Enable */
@@ -552,13 +562,14 @@
#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */
-#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_LNKSTA 0x12 /* Link Status */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */
#define PCI_EXP_LNKSTA_CLS_32_0GB 0x0005 /* Current Link Speed 32.0GT/s */
+#define PCI_EXP_LNKSTA_CLS_64_0GB 0x0006 /* Current Link Speed 64.0GT/s */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
#define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */
#define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */
@@ -571,7 +582,7 @@
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints with link end here */
-#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCAP 0x14 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
#define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */
@@ -584,7 +595,7 @@
#define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */
#define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */
#define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */
-#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTCTL 0x18 /* Slot Control */
#define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */
#define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */
#define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */
@@ -605,7 +616,9 @@
#define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */
#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
-#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_SLTCTL_ASPL_DISABLE 0x2000 /* Auto Slot Power Limit Disable */
+#define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */
+#define PCI_EXP_SLTSTA 0x1a /* Slot Status */
#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */
#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */
#define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */
@@ -615,15 +628,15 @@
#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */
#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */
-#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCTL 0x1c /* Root Control */
#define PCI_EXP_RTCTL_SECEE 0x0001 /* System Error on Correctable Error */
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
-#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
-#define PCI_EXP_RTSTA 32 /* Root Status */
+#define PCI_EXP_RTSTA 0x20 /* Root Status */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
@@ -634,7 +647,7 @@
* Use pcie_capability_read_word() and similar interfaces to use them
* safely.
*/
-#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCAP2 0x24 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
@@ -646,7 +659,7 @@
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
-#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
@@ -658,30 +671,33 @@
#define PCI_EXP_DEVCTL2_OBFF_MSGA_EN 0x2000 /* Enable OBFF Message type A */
#define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */
#define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
-#define PCI_EXP_DEVSTA2 42 /* Device Status 2 */
-#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */
-#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
+#define PCI_EXP_DEVSTA2 0x2a /* Device Status 2 */
+#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c /* end of v2 EPs w/o link */
+#define PCI_EXP_LNKCAP2 0x2c /* Link Capabilities 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_SLS_32_0GB 0x00000020 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCAP2_SLS_64_0GB 0x00000040 /* Supported Speed 64GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
-#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+#define PCI_EXP_LNKCTL2 0x30 /* Link Control 2 */
#define PCI_EXP_LNKCTL2_TLS 0x000f
#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCTL2_TLS_64_0GT 0x0006 /* Supported Speed 64GT/s */
#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
-#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
-#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
-#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
-#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
-#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */
+#define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
+#define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
+#define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
+#define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+#define PCI_EXP_SLTSTA2 0x3a /* Slot Status 2 */
/* Extended Capabilities (PCI-X 2.0 and Express) */
#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -718,15 +734,17 @@
#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */
#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
+#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT
+#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
/* Advanced Error Reporting */
-#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
+#define PCI_ERR_UNCOR_STATUS 0x04 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
@@ -744,11 +762,11 @@
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
-#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
+#define PCI_ERR_UNCOR_MASK 0x08 /* Uncorrectable Error Mask */
/* Same bits as above */
-#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
+#define PCI_ERR_UNCOR_SEVER 0x0c /* Uncorrectable Error Severity */
/* Same bits as above */
-#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */
+#define PCI_ERR_COR_STATUS 0x10 /* Correctable Error Status */
#define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */
#define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
@@ -757,20 +775,20 @@
#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
-#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
+#define PCI_ERR_COR_MASK 0x14 /* Correctable Error Mask */
/* Same bits as above */
-#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
-#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */
+#define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/
+#define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */
#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
-#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
-#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
+#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */
+#define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */
-#define PCI_ERR_ROOT_STATUS 48
+#define PCI_ERR_ROOT_STATUS 0x30
#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */
#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */
@@ -779,52 +797,59 @@
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
-#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
+#define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */
/* Virtual Channel */
-#define PCI_VC_PORT_CAP1 4
+#define PCI_VC_PORT_CAP1 0x04
#define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */
#define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */
#define PCI_VC_CAP1_ARB_SIZE 0x00000c00
-#define PCI_VC_PORT_CAP2 8
+#define PCI_VC_PORT_CAP2 0x08
#define PCI_VC_CAP2_32_PHASE 0x00000002
#define PCI_VC_CAP2_64_PHASE 0x00000004
#define PCI_VC_CAP2_128_PHASE 0x00000008
#define PCI_VC_CAP2_ARB_OFF 0xff000000
-#define PCI_VC_PORT_CTRL 12
+#define PCI_VC_PORT_CTRL 0x0c
#define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001
-#define PCI_VC_PORT_STATUS 14
+#define PCI_VC_PORT_STATUS 0x0e
#define PCI_VC_PORT_STATUS_TABLE 0x00000001
-#define PCI_VC_RES_CAP 16
+#define PCI_VC_RES_CAP 0x10
#define PCI_VC_RES_CAP_32_PHASE 0x00000002
#define PCI_VC_RES_CAP_64_PHASE 0x00000004
#define PCI_VC_RES_CAP_128_PHASE 0x00000008
#define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010
#define PCI_VC_RES_CAP_256_PHASE 0x00000020
#define PCI_VC_RES_CAP_ARB_OFF 0xff000000
-#define PCI_VC_RES_CTRL 20
+#define PCI_VC_RES_CTRL 0x14
#define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000
#define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000
#define PCI_VC_RES_CTRL_ID 0x07000000
#define PCI_VC_RES_CTRL_ENABLE 0x80000000
-#define PCI_VC_RES_STATUS 26
+#define PCI_VC_RES_STATUS 0x1a
#define PCI_VC_RES_STATUS_TABLE 0x00000001
#define PCI_VC_RES_STATUS_NEGO 0x00000002
#define PCI_CAP_VC_BASE_SIZEOF 0x10
-#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
+#define PCI_CAP_VC_PER_VC_SIZEOF 0x0c
/* Power Budgeting */
-#define PCI_PWR_DSR 4 /* Data Select Register */
-#define PCI_PWR_DATA 8 /* Data Register */
+#define PCI_PWR_DSR 0x04 /* Data Select Register */
+#define PCI_PWR_DATA 0x08 /* Data Register */
#define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */
#define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */
#define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */
#define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
#define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
-#define PCI_PWR_CAP 12 /* Capability */
+#define PCI_PWR_CAP 0x0c /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
-#define PCI_EXT_CAP_PWR_SIZEOF 16
+#define PCI_EXT_CAP_PWR_SIZEOF 0x10
+
+/* Root Complex Event Collector Endpoint Association */
+#define PCI_RCEC_RCIEP_BITMAP 4 /* Associated Bitmap for RCiEPs */
+#define PCI_RCEC_BUSN 8 /* RCEC Associated Bus Numbers */
+#define PCI_RCEC_BUSN_REG_VER 0x02 /* Least version with BUSN present */
+#define PCI_RCEC_BUSN_NEXT(x) (((x) >> 8) & 0xff)
+#define PCI_RCEC_BUSN_LAST(x) (((x) >> 16) & 0xff)
/* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */
#define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */
@@ -941,7 +966,7 @@
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
-#define PCI_EXT_CAP_SRIOV_SIZEOF 64
+#define PCI_EXT_CAP_SRIOV_SIZEOF 0x40
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
@@ -994,12 +1019,12 @@
#define PCI_TPH_LOC_NONE 0x000 /* no location */
#define PCI_TPH_LOC_CAP 0x200 /* in capability */
#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
-#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
-#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
-#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
+#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST table mask */
+#define PCI_TPH_CAP_ST_SHIFT 16 /* ST table shift */
+#define PCI_TPH_BASE_SIZEOF 0xc /* size with no ST table */
/* Downstream Port Containment */
-#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
+#define PCI_EXP_DPC_CAP 0x04 /* DPC Capability */
#define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */
#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */
#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */
@@ -1007,19 +1032,19 @@
#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
-#define PCI_EXP_DPC_CTL 6 /* DPC control */
+#define PCI_EXP_DPC_CTL 0x06 /* DPC control */
#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
-#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
+#define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */
#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
-#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
+#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */
#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */
@@ -1054,12 +1079,21 @@
#define PCI_L1SS_CTL1_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Enable */
#define PCI_L1SS_CTL1_ASPM_L1_2 0x00000004 /* ASPM L1.2 Enable */
#define PCI_L1SS_CTL1_ASPM_L1_1 0x00000008 /* ASPM L1.1 Enable */
+#define PCI_L1SS_CTL1_L1_2_MASK 0x00000005
#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000f
#define PCI_L1SS_CTL1_CM_RESTORE_TIME 0x0000ff00 /* Common_Mode_Restore_Time */
#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */
#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
+/* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */
+#define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */
+#define PCI_DVSEC_HEADER1_VID(x) ((x) & 0xffff)
+#define PCI_DVSEC_HEADER1_REV(x) (((x) >> 16) & 0xf)
+#define PCI_DVSEC_HEADER1_LEN(x) (((x) >> 20) & 0xfff)
+#define PCI_DVSEC_HEADER2 0x8 /* Designated Vendor-Specific Header2 */
+#define PCI_DVSEC_HEADER2_ID(x) ((x) & 0xffff)
+
/* Data Link Feature */
#define PCI_DLF_CAP 0x04 /* Capabilities Register */
#define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */
@@ -1070,4 +1104,30 @@
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+/* Data Object Exchange */
+#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */
+#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */
+#define PCI_DOE_CAP_INT_MSG_NUM 0x00000ffe /* Interrupt Message Number */
+#define PCI_DOE_CTRL 0x08 /* DOE Control Register */
+#define PCI_DOE_CTRL_ABORT 0x00000001 /* DOE Abort */
+#define PCI_DOE_CTRL_INT_EN 0x00000002 /* DOE Interrupt Enable */
+#define PCI_DOE_CTRL_GO 0x80000000 /* DOE Go */
+#define PCI_DOE_STATUS 0x0c /* DOE Status Register */
+#define PCI_DOE_STATUS_BUSY 0x00000001 /* DOE Busy */
+#define PCI_DOE_STATUS_INT_STATUS 0x00000002 /* DOE Interrupt Status */
+#define PCI_DOE_STATUS_ERROR 0x00000004 /* DOE Error */
+#define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */
+#define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */
+#define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */
+
+/* DOE Data Object - note not actually registers */
+#define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_HEADER_1_TYPE 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH 0x0003ffff
+
+#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index cbf422e56696..f9c1af8d141b 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/**
+/*
* pcitest.h - PCI test uapi defines
*
* Copyright (C) 2017 Texas Instruments
@@ -19,5 +19,13 @@
#define PCITEST_MSIX _IOW('P', 0x7, int)
#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
+#define PCITEST_CLEAR_IRQ _IO('P', 0x10)
+
+#define PCITEST_FLAGS_USE_DMA 0x00000001
+
+struct pci_endpoint_test_xfer_param {
+ unsigned long size;
+ unsigned char flags;
+};
#endif /* __UAPI_LINUX_PCITEST_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 377d794d3105..ccb7f5dad59b 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -38,6 +38,21 @@ enum perf_type_id {
};
/*
+ * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
+ * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
+ * AA: hardware event ID
+ * EEEEEEEE: PMU type ID
+ * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
+ * BB: hardware cache ID
+ * CC: hardware cache op ID
+ * DD: hardware cache op result ID
+ * EEEEEEEE: PMU type ID
+ * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
+ */
+#define PERF_PMU_TYPE_SHIFT 32
+#define PERF_HW_EVENT_MASK 0xffffffff
+
+/*
* Generalized performance event event_id types, used by the
* attr.event_id parameter of the sys_perf_event_open()
* syscall:
@@ -112,6 +127,7 @@ enum perf_sw_ids {
PERF_COUNT_SW_EMULATION_FAULTS = 8,
PERF_COUNT_SW_DUMMY = 9,
PERF_COUNT_SW_BPF_OUTPUT = 10,
+ PERF_COUNT_SW_CGROUP_SWITCHES = 11,
PERF_COUNT_SW_MAX, /* non-ABI */
};
@@ -142,12 +158,15 @@ enum perf_event_sample_format {
PERF_SAMPLE_REGS_INTR = 1U << 18,
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_AUX = 1U << 20,
+ PERF_SAMPLE_CGROUP = 1U << 21,
+ PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22,
+ PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23,
+ PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24,
- PERF_SAMPLE_MAX = 1U << 21, /* non-ABI */
-
- __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
+ PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */
};
+#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
/*
* values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
*
@@ -181,6 +200,10 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
+ PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */
+
+ PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -208,6 +231,10 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_TYPE_SAVE =
1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
+
+ PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -226,9 +253,50 @@ enum {
PERF_BR_SYSRET = 8, /* syscall return */
PERF_BR_COND_CALL = 9, /* conditional function call */
PERF_BR_COND_RET = 10, /* conditional function return */
+ PERF_BR_ERET = 11, /* exception return */
+ PERF_BR_IRQ = 12, /* irq */
+ PERF_BR_SERROR = 13, /* system error */
+ PERF_BR_NO_TX = 14, /* not in transaction */
+ PERF_BR_EXTEND_ABI = 15, /* extend ABI */
PERF_BR_MAX,
};
+/*
+ * Common branch speculation outcome classification
+ */
+enum {
+ PERF_BR_SPEC_NA = 0, /* Not available */
+ PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */
+ PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */
+ PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */
+ PERF_BR_SPEC_MAX,
+};
+
+enum {
+ PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */
+ PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */
+ PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */
+ PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */
+ PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */
+ PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */
+ PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */
+ PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */
+ PERF_BR_NEW_MAX,
+};
+
+enum {
+ PERF_BR_PRIV_UNKNOWN = 0,
+ PERF_BR_PRIV_USER = 1,
+ PERF_BR_PRIV_KERNEL = 2,
+ PERF_BR_PRIV_HV = 3,
+};
+
+#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
+#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
+#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
+#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
+#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
+
#define PERF_SAMPLE_BRANCH_PLM_ALL \
(PERF_SAMPLE_BRANCH_USER|\
PERF_SAMPLE_BRANCH_KERNEL|\
@@ -274,6 +342,7 @@ enum {
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } && !PERF_FORMAT_GROUP
*
* { u64 nr;
@@ -281,6 +350,7 @@ enum {
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 value;
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } cntr[nr];
* } && PERF_FORMAT_GROUP
* };
@@ -290,8 +360,9 @@ enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,
+ PERF_FORMAT_LOST = 1U << 4,
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
+ PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -302,6 +373,7 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
+#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -377,7 +449,13 @@ struct perf_event_attr {
ksymbol : 1, /* include ksymbol events */
bpf_event : 1, /* include bpf events */
aux_output : 1, /* generate AUX records instead of events */
- __reserved_1 : 32;
+ cgroup : 1, /* include cgroup events */
+ text_poke : 1, /* include text poke events */
+ build_id : 1, /* use build id in mmap2 events */
+ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */
+ remove_on_exec : 1, /* event is removed from task on exec */
+ sigtrap : 1, /* send synchronous SIGTRAP on event */
+ __reserved_1 : 26;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -429,6 +507,14 @@ struct perf_event_attr {
__u16 __reserved_2;
__u32 aux_sample_size;
__u32 __reserved_3;
+
+ /*
+ * User provided data if sigtrap=1, passed back to user via
+ * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+ * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+ * truncated accordingly on 32 bit architectures.
+ */
+ __u64 sig_data;
};
/*
@@ -449,7 +535,7 @@ struct perf_event_query_bpf {
/*
* User provided buffer to store program ids
*/
- __u32 ids[0];
+ __u32 ids[];
};
/*
@@ -526,9 +612,10 @@ struct perf_event_mmap_page {
cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
- cap_user_time : 1, /* The time_* fields are used */
+ cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */
cap_user_time_zero : 1, /* The time_zero field is used */
- cap_____res : 59;
+ cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */
+ cap_____res : 58;
};
};
@@ -587,13 +674,29 @@ struct perf_event_mmap_page {
* ((rem * time_mult) >> time_shift);
*/
__u64 time_zero;
+
__u32 size; /* Header size up to __reserved[] fields. */
+ __u32 __reserved_1;
+
+ /*
+ * If cap_usr_time_short, the hardware clock is less than 64bit wide
+ * and we must compute the 'cyc' value, as used by cap_usr_time, as:
+ *
+ * cyc = time_cycles + ((cyc - time_cycles) & time_mask)
+ *
+ * NOTE: this form is explicitly chosen such that cap_usr_time_short
+ * is a correction on top of cap_usr_time, and code that doesn't
+ * know about cap_usr_time_short still works under the assumption
+ * the counter doesn't wrap.
+ */
+ __u64 time_cycles;
+ __u64 time_mask;
/*
* Hole for extension of the self monitor capabilities
*/
- __u8 __reserved[118*8+4]; /* align to 1k. */
+ __u8 __reserved[116*8]; /* align to 1k. */
/*
* Control data for the mmap() data buffer.
@@ -633,6 +736,22 @@ struct perf_event_mmap_page {
__u64 aux_size;
};
+/*
+ * The current state of perf_event_header::misc bits usage:
+ * ('|' used bit, '-' unused bit)
+ *
+ * 012 CDEF
+ * |||---------||||
+ *
+ * Where:
+ * 0-2 CPUMODE_MASK
+ *
+ * C PROC_MAP_PARSE_TIMEOUT
+ * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
+ * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
+ * F (reserved)
+ */
+
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
#define PERF_RECORD_MISC_KERNEL (1 << 0)
@@ -664,6 +783,7 @@ struct perf_event_mmap_page {
*
* PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
+ * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event
*
*
* PERF_RECORD_MISC_EXACT_IP:
@@ -673,9 +793,13 @@ struct perf_event_mmap_page {
*
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
* Indicates that thread was preempted in TASK_RUNNING state.
+ *
+ * PERF_RECORD_MISC_MMAP_BUILD_ID:
+ * Indicates that mmap2 event carries build id data.
*/
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
+#define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14)
/*
* Reserve the last bit to indicate some extended misc field
*/
@@ -853,7 +977,9 @@ enum perf_event_type {
* char data[size];}&& PERF_SAMPLE_RAW
*
* { u64 nr;
- * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+ * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
+ * { u64 from, to, flags } lbr[nr];
+ * } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
@@ -862,7 +988,24 @@ enum perf_event_type {
* char data[size];
* u64 dyn_size; } && PERF_SAMPLE_STACK_USER
*
- * { u64 weight; } && PERF_SAMPLE_WEIGHT
+ * { union perf_sample_weight
+ * {
+ * u64 full; && PERF_SAMPLE_WEIGHT
+ * #if defined(__LITTLE_ENDIAN_BITFIELD)
+ * struct {
+ * u32 var1_dw;
+ * u16 var2_w;
+ * u16 var3_w;
+ * } && PERF_SAMPLE_WEIGHT_STRUCT
+ * #elif defined(__BIG_ENDIAN_BITFIELD)
+ * struct {
+ * u16 var3_w;
+ * u16 var2_w;
+ * u32 var1_dw;
+ * } && PERF_SAMPLE_WEIGHT_STRUCT
+ * #endif
+ * }
+ * }
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
* { u64 transaction; } && PERF_SAMPLE_TRANSACTION
* { u64 abi; # enum perf_sample_regs_abi
@@ -870,6 +1013,8 @@ enum perf_event_type {
* { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
* { u64 size;
* char data[size]; } && PERF_SAMPLE_AUX
+ * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
+ * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
* };
*/
PERF_RECORD_SAMPLE = 9,
@@ -885,10 +1030,20 @@ enum perf_event_type {
* u64 addr;
* u64 len;
* u64 pgoff;
- * u32 maj;
- * u32 min;
- * u64 ino;
- * u64 ino_generation;
+ * union {
+ * struct {
+ * u32 maj;
+ * u32 min;
+ * u64 ino;
+ * u64 ino_generation;
+ * };
+ * struct {
+ * u8 build_id_size;
+ * u8 __reserved_1;
+ * u16 __reserved_2;
+ * u8 build_id[20];
+ * };
+ * };
* u32 prot, flags;
* char filename[];
* struct sample_id sample_id;
@@ -1006,12 +1161,60 @@ enum perf_event_type {
*/
PERF_RECORD_BPF_EVENT = 18,
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * char path[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_CGROUP = 19,
+
+ /*
+ * Records changes to kernel text i.e. self-modified code. 'old_len' is
+ * the number of old bytes, 'new_len' is the number of new bytes. Either
+ * 'old_len' or 'new_len' may be zero to indicate, for example, the
+ * addition or removal of a trampoline. 'bytes' contains the old bytes
+ * followed immediately by the new bytes.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 addr;
+ * u16 old_len;
+ * u16 new_len;
+ * u8 bytes[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_TEXT_POKE = 20,
+
+ /*
+ * Data written to the AUX area by hardware due to aux_output, may need
+ * to be matched to the event by an architecture-specific hardware ID.
+ * This records the hardware ID, but requires sample_id to provide the
+ * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
+ * records from multiple events.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 hw_id;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
+
PERF_RECORD_MAX, /* non-ABI */
};
enum perf_record_ksymbol_type {
PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
+ /*
+ * Out of line code such as kprobe-replaced instructions or optimized
+ * kprobes or ftrace trampolines.
+ */
+ PERF_RECORD_KSYMBOL_TYPE_OOL = 2,
PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
};
@@ -1042,10 +1245,15 @@ enum perf_callchain_context {
/**
* PERF_RECORD_AUX::flags bits
*/
-#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
-#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
-#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
-#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
+#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
+#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */
+
+/* CoreSight PMU AUX buffer formats */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
@@ -1064,14 +1272,18 @@ union perf_mem_data_src {
mem_lvl_num:4, /* memory hierarchy level number */
mem_remote:1, /* remote */
mem_snoopx:2, /* snoop mode, ext */
- mem_rsvd:24;
+ mem_blk:3, /* access blocked */
+ mem_hops:3, /* hop level */
+ mem_rsvd:18;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd:24,
+ __u64 mem_rsvd:18,
+ mem_hops:3, /* hop level */
+ mem_blk:3, /* access blocked */
mem_snoopx:2, /* snoop mode, ext */
mem_remote:1, /* remote */
mem_lvl_num:4, /* memory hierarchy level number */
@@ -1094,7 +1306,13 @@ union perf_mem_data_src {
#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
#define PERF_MEM_OP_SHIFT 0
-/* memory hierarchy (memory level, hit or miss) */
+/*
+ * PERF_MEM_LVL_* namespace being depricated to some extent in the
+ * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
+ * Supporting this namespace inorder to not break defined ABIs.
+ *
+ * memory hierarchy (memory level, hit or miss)
+ */
#define PERF_MEM_LVL_NA 0x01 /* not available */
#define PERF_MEM_LVL_HIT 0x02 /* hit level */
#define PERF_MEM_LVL_MISS 0x04 /* miss level */
@@ -1118,7 +1336,9 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
-/* 5-0xa available */
+/* 5-0x8 available */
+#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
+#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
@@ -1136,8 +1356,8 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOP_SHIFT 19
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
-/* 1 free */
-#define PERF_MEM_SNOOPX_SHIFT 37
+#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */
+#define PERF_MEM_SNOOPX_SHIFT 38
/* locked instruction */
#define PERF_MEM_LOCK_NA 0x01 /* not available */
@@ -1154,6 +1374,20 @@ union perf_mem_data_src {
#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
#define PERF_MEM_TLB_SHIFT 26
+/* Access blocked */
+#define PERF_MEM_BLK_NA 0x01 /* not available */
+#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */
+#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */
+#define PERF_MEM_BLK_SHIFT 40
+
+/* hop level */
+#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
+#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
+#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
+#define PERF_MEM_HOPS_3 0x04 /* remote board */
+/* 5-7 available */
+#define PERF_MEM_HOPS_SHIFT 43
+
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
@@ -1172,6 +1406,7 @@ union perf_mem_data_src {
* abort: aborting a hardware transaction
* cycles: cycles from last branch (or 0 if not supported)
* type: branch type
+ * spec: branch speculation info (or 0 if not supported)
*/
struct perf_branch_entry {
__u64 from;
@@ -1182,7 +1417,29 @@ struct perf_branch_entry {
abort:1, /* transaction abort */
cycles:16, /* cycle count to last branch */
type:4, /* branch type */
- reserved:40;
+ spec:2, /* branch speculation info */
+ new_type:4, /* additional branch type */
+ priv:3, /* privilege level */
+ reserved:31;
+};
+
+union perf_sample_weight {
+ __u64 full;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ struct {
+ __u32 var1_dw;
+ __u16 var2_w;
+ __u16 var3_w;
+ };
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ struct {
+ __u16 var3_w;
+ __u16 var2_w;
+ __u32 var1_dw;
+ };
+#else
+#error "Unknown endianness"
+#endif
};
#endif /* _UAPI_LINUX_PERF_EVENT_H */
diff --git a/include/uapi/linux/pfkeyv2.h b/include/uapi/linux/pfkeyv2.h
index d65b11785260..8abae1f6749c 100644
--- a/include/uapi/linux/pfkeyv2.h
+++ b/include/uapi/linux/pfkeyv2.h
@@ -309,6 +309,7 @@ struct sadb_x_filter {
#define SADB_X_AALG_SHA2_512HMAC 7
#define SADB_X_AALG_RIPEMD160HMAC 8
#define SADB_X_AALG_AES_XCBC_MAC 9
+#define SADB_X_AALG_SM3_256HMAC 10
#define SADB_X_AALG_NULL 251 /* kame */
#define SADB_AALG_MAX 251
@@ -329,6 +330,7 @@ struct sadb_x_filter {
#define SADB_X_EALG_AES_GCM_ICV16 20
#define SADB_X_EALG_CAMELLIACBC 22
#define SADB_X_EALG_NULL_AES_GMAC 23
+#define SADB_X_EALG_SM4CBC 24
#define SADB_EALG_MAX 253 /* last EALG */
/* private allocations should use 249-255 (RFC2407) */
#define SADB_X_EALG_SERPENTCBC 252 /* draft-ietf-ipsec-ciph-aes-cbc-00 */
diff --git a/include/uapi/linux/pfrut.h b/include/uapi/linux/pfrut.h
new file mode 100644
index 000000000000..42fa15f8310d
--- /dev/null
+++ b/include/uapi/linux/pfrut.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Platform Firmware Runtime Update header
+ *
+ * Copyright(c) 2021 Intel Corporation. All rights reserved.
+ */
+#ifndef __PFRUT_H__
+#define __PFRUT_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define PFRUT_IOCTL_MAGIC 0xEE
+
+/**
+ * PFRU_IOC_SET_REV - _IOW(PFRUT_IOCTL_MAGIC, 0x01, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EFAULT - fail to read the revision id
+ * * -EINVAL - user provides an invalid revision id
+ *
+ * Set the Revision ID for Platform Firmware Runtime Update.
+ */
+#define PFRU_IOC_SET_REV _IOW(PFRUT_IOCTL_MAGIC, 0x01, unsigned int)
+
+/**
+ * PFRU_IOC_STAGE - _IOW(PFRUT_IOCTL_MAGIC, 0x02, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - stage phase returns invalid result
+ *
+ * Stage a capsule image from communication buffer and perform authentication.
+ */
+#define PFRU_IOC_STAGE _IOW(PFRUT_IOCTL_MAGIC, 0x02, unsigned int)
+
+/**
+ * PFRU_IOC_ACTIVATE - _IOW(PFRUT_IOCTL_MAGIC, 0x03, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - activate phase returns invalid result
+ *
+ * Activate a previously staged capsule image.
+ */
+#define PFRU_IOC_ACTIVATE _IOW(PFRUT_IOCTL_MAGIC, 0x03, unsigned int)
+
+/**
+ * PFRU_IOC_STAGE_ACTIVATE - _IOW(PFRUT_IOCTL_MAGIC, 0x04, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - stage/activate phase returns invalid result.
+ *
+ * Perform both stage and activation action.
+ */
+#define PFRU_IOC_STAGE_ACTIVATE _IOW(PFRUT_IOCTL_MAGIC, 0x04, unsigned int)
+
+/**
+ * PFRU_IOC_QUERY_CAP - _IOR(PFRUT_IOCTL_MAGIC, 0x05,
+ * struct pfru_update_cap_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - query phase returns invalid result
+ * * -EFAULT - the result fails to be copied to userspace
+ *
+ * Retrieve information on the Platform Firmware Runtime Update capability.
+ * The information is a struct pfru_update_cap_info.
+ */
+#define PFRU_IOC_QUERY_CAP _IOR(PFRUT_IOCTL_MAGIC, 0x05, struct pfru_update_cap_info)
+
+/**
+ * struct pfru_payload_hdr - Capsule file payload header.
+ *
+ * @sig: Signature of this capsule file.
+ * @hdr_version: Revision of this header structure.
+ * @hdr_size: Size of this header, including the OemHeader bytes.
+ * @hw_ver: The supported firmware version.
+ * @rt_ver: Version of the code injection image.
+ * @platform_id: A platform specific GUID to specify the platform what
+ * this capsule image support.
+ */
+struct pfru_payload_hdr {
+ __u32 sig;
+ __u32 hdr_version;
+ __u32 hdr_size;
+ __u32 hw_ver;
+ __u32 rt_ver;
+ __u8 platform_id[16];
+};
+
+enum pfru_dsm_status {
+ DSM_SUCCEED = 0,
+ DSM_FUNC_NOT_SUPPORT = 1,
+ DSM_INVAL_INPUT = 2,
+ DSM_HARDWARE_ERR = 3,
+ DSM_RETRY_SUGGESTED = 4,
+ DSM_UNKNOWN = 5,
+ DSM_FUNC_SPEC_ERR = 6,
+};
+
+/**
+ * struct pfru_update_cap_info - Runtime update capability information.
+ *
+ * @status: Indicator of whether this query succeed.
+ * @update_cap: Bitmap to indicate whether the feature is supported.
+ * @code_type: A buffer containing an image type GUID.
+ * @fw_version: Platform firmware version.
+ * @code_rt_version: Code injection runtime version for anti-rollback.
+ * @drv_type: A buffer containing an image type GUID.
+ * @drv_rt_version: The version of the driver update runtime code.
+ * @drv_svn: The secure version number(SVN) of the driver update runtime code.
+ * @platform_id: A buffer containing a platform ID GUID.
+ * @oem_id: A buffer containing an OEM ID GUID.
+ * @oem_info_len: Length of the buffer containing the vendor specific information.
+ */
+struct pfru_update_cap_info {
+ __u32 status;
+ __u32 update_cap;
+
+ __u8 code_type[16];
+ __u32 fw_version;
+ __u32 code_rt_version;
+
+ __u8 drv_type[16];
+ __u32 drv_rt_version;
+ __u32 drv_svn;
+
+ __u8 platform_id[16];
+ __u8 oem_id[16];
+
+ __u32 oem_info_len;
+};
+
+/**
+ * struct pfru_com_buf_info - Communication buffer information.
+ *
+ * @status: Indicator of whether this query succeed.
+ * @ext_status: Implementation specific query result.
+ * @addr_lo: Low 32bit physical address of the communication buffer to hold
+ * a runtime update package.
+ * @addr_hi: High 32bit physical address of the communication buffer to hold
+ * a runtime update package.
+ * @buf_size: Maximum size in bytes of the communication buffer.
+ */
+struct pfru_com_buf_info {
+ __u32 status;
+ __u32 ext_status;
+ __u64 addr_lo;
+ __u64 addr_hi;
+ __u32 buf_size;
+};
+
+/**
+ * struct pfru_updated_result - Platform firmware runtime update result information.
+ * @status: Indicator of whether this update succeed.
+ * @ext_status: Implementation specific update result.
+ * @low_auth_time: Low 32bit value of image authentication time in nanosecond.
+ * @high_auth_time: High 32bit value of image authentication time in nanosecond.
+ * @low_exec_time: Low 32bit value of image execution time in nanosecond.
+ * @high_exec_time: High 32bit value of image execution time in nanosecond.
+ */
+struct pfru_updated_result {
+ __u32 status;
+ __u32 ext_status;
+ __u64 low_auth_time;
+ __u64 high_auth_time;
+ __u64 low_exec_time;
+ __u64 high_exec_time;
+};
+
+/**
+ * struct pfrt_log_data_info - Log Data from telemetry service.
+ * @status: Indicator of whether this update succeed.
+ * @ext_status: Implementation specific update result.
+ * @chunk1_addr_lo: Low 32bit physical address of the telemetry data chunk1
+ * starting address.
+ * @chunk1_addr_hi: High 32bit physical address of the telemetry data chunk1
+ * starting address.
+ * @chunk2_addr_lo: Low 32bit physical address of the telemetry data chunk2
+ * starting address.
+ * @chunk2_addr_hi: High 32bit physical address of the telemetry data chunk2
+ * starting address.
+ * @max_data_size: Maximum supported size of data of all data chunks combined.
+ * @chunk1_size: Data size in bytes of the telemetry data chunk1 buffer.
+ * @chunk2_size: Data size in bytes of the telemetry data chunk2 buffer.
+ * @rollover_cnt: Number of times telemetry data buffer is overwritten
+ * since telemetry buffer reset.
+ * @reset_cnt: Number of times telemetry services resets that results in
+ * rollover count and data chunk buffers are reset.
+ */
+struct pfrt_log_data_info {
+ __u32 status;
+ __u32 ext_status;
+ __u64 chunk1_addr_lo;
+ __u64 chunk1_addr_hi;
+ __u64 chunk2_addr_lo;
+ __u64 chunk2_addr_hi;
+ __u32 max_data_size;
+ __u32 chunk1_size;
+ __u32 chunk2_size;
+ __u32 rollover_cnt;
+ __u32 reset_cnt;
+};
+
+/**
+ * struct pfrt_log_info - Telemetry log information.
+ * @log_level: The telemetry log level.
+ * @log_type: The telemetry log type(history and execution).
+ * @log_revid: The telemetry log revision id.
+ */
+struct pfrt_log_info {
+ __u32 log_level;
+ __u32 log_type;
+ __u32 log_revid;
+};
+
+/**
+ * PFRT_LOG_IOC_SET_INFO - _IOW(PFRUT_IOCTL_MAGIC, 0x06,
+ * struct pfrt_log_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EFAULT - fail to get the setting parameter
+ * * -EINVAL - fail to set the log level
+ *
+ * Set the PFRT log level and log type. The input information is
+ * a struct pfrt_log_info.
+ */
+#define PFRT_LOG_IOC_SET_INFO _IOW(PFRUT_IOCTL_MAGIC, 0x06, struct pfrt_log_info)
+
+/**
+ * PFRT_LOG_IOC_GET_INFO - _IOR(PFRUT_IOCTL_MAGIC, 0x07,
+ * struct pfrt_log_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - fail to get the log level
+ * * -EFAULT - fail to copy the result back to userspace
+ *
+ * Retrieve log level and log type of the telemetry. The information is
+ * a struct pfrt_log_info.
+ */
+#define PFRT_LOG_IOC_GET_INFO _IOR(PFRUT_IOCTL_MAGIC, 0x07, struct pfrt_log_info)
+
+/**
+ * PFRT_LOG_IOC_GET_DATA_INFO - _IOR(PFRUT_IOCTL_MAGIC, 0x08,
+ * struct pfrt_log_data_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - fail to get the log buffer information
+ * * -EFAULT - fail to copy the log buffer information to userspace
+ *
+ * Retrieve data information about the telemetry. The information
+ * is a struct pfrt_log_data_info.
+ */
+#define PFRT_LOG_IOC_GET_DATA_INFO _IOR(PFRUT_IOCTL_MAGIC, 0x08, struct pfrt_log_data_info)
+
+#endif /* __PFRUT_H__ */
diff --git a/include/uapi/linux/pidfd.h b/include/uapi/linux/pidfd.h
new file mode 100644
index 000000000000..5406fbc13074
--- /dev/null
+++ b/include/uapi/linux/pidfd.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_PIDFD_H
+#define _UAPI_LINUX_PIDFD_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+
+/* Flags for pidfd_open(). */
+#define PIDFD_NONBLOCK O_NONBLOCK
+
+#endif /* _UAPI_LINUX_PIDFD_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 449a63971451..648a82f32666 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -17,12 +17,38 @@ enum {
TCA_ACT_PAD,
TCA_ACT_COOKIE,
TCA_ACT_FLAGS,
+ TCA_ACT_HW_STATS,
+ TCA_ACT_USED_HW_STATS,
+ TCA_ACT_IN_HW_COUNT,
__TCA_ACT_MAX
};
-#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
- * actions stats.
- */
+/* See other TCA_ACT_FLAGS_ * flags in include/net/act_api.h. */
+#define TCA_ACT_FLAGS_NO_PERCPU_STATS (1 << 0) /* Don't use percpu allocator for
+ * actions stats.
+ */
+#define TCA_ACT_FLAGS_SKIP_HW (1 << 1) /* don't offload action to HW */
+#define TCA_ACT_FLAGS_SKIP_SW (1 << 2) /* don't use action in SW */
+
+/* tca HW stats type
+ * When user does not pass the attribute, he does not care.
+ * It is the same as if he would pass the attribute with
+ * all supported bits set.
+ * In case no bits are set, user is not interested in getting any HW statistics.
+ */
+#define TCA_ACT_HW_STATS_IMMEDIATE (1 << 0) /* Means that in dump, user
+ * gets the current HW stats
+ * state from the device
+ * queried at the dump time.
+ */
+#define TCA_ACT_HW_STATS_DELAYED (1 << 1) /* Means that in dump, user gets
+ * HW stats that might be out of date
+ * for some time, maybe couple of
+ * seconds. This is the case when
+ * driver polls stats updates
+ * periodically or when it gets async
+ * stats update from the device.
+ */
#define TCA_ACT_MAX __TCA_ACT_MAX
#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
@@ -112,6 +138,7 @@ enum tca_id {
TCA_ID_CTINFO,
TCA_ID_MPLS,
TCA_ID_CT,
+ TCA_ID_GATE,
/* other actions go here */
__TCA_ID_MAX = 255
};
@@ -167,6 +194,8 @@ enum {
TCA_POLICE_PAD,
TCA_POLICE_RATE64,
TCA_POLICE_PEAKRATE64,
+ TCA_POLICE_PKTRATE64,
+ TCA_POLICE_PKTBURST64,
__TCA_POLICE_MAX
#define TCA_POLICE_RESULT TCA_POLICE_RESULT
};
@@ -227,7 +256,7 @@ struct tc_u32_sel {
short hoff;
__be32 hmask;
- struct tc_u32_key keys[0];
+ struct tc_u32_key keys[];
};
struct tc_u32_mark {
@@ -239,7 +268,7 @@ struct tc_u32_mark {
struct tc_u32_pcnt {
__u64 rcnt;
__u64 rhit;
- __u64 kcnts[0];
+ __u64 kcnts[];
};
/* Flags */
@@ -553,6 +582,18 @@ enum {
TCA_FLOWER_KEY_CT_LABELS, /* u128 */
TCA_FLOWER_KEY_CT_LABELS_MASK, /* u128 */
+ TCA_FLOWER_KEY_MPLS_OPTS,
+
+ TCA_FLOWER_KEY_HASH, /* u32 */
+ TCA_FLOWER_KEY_HASH_MASK, /* u32 */
+
+ TCA_FLOWER_KEY_NUM_OF_VLANS, /* u8 */
+
+ TCA_FLOWER_KEY_PPPOE_SID, /* be16 */
+ TCA_FLOWER_KEY_PPP_PROTO, /* be16 */
+
+ TCA_FLOWER_KEY_L2TPV3_SID, /* be32 */
+
__TCA_FLOWER_MAX,
};
@@ -563,6 +604,9 @@ enum {
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, /* Part of an existing connection. */
TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, /* Related to an established connection. */
TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, /* Conntrack has occurred. */
+ TCA_FLOWER_KEY_CT_FLAGS_INVALID = 1 << 4, /* Conntrack is invalid. */
+ TCA_FLOWER_KEY_CT_FLAGS_REPLY = 1 << 5, /* Packet is in the reply direction. */
+ __TCA_FLOWER_KEY_CT_FLAGS_MAX,
};
enum {
@@ -579,6 +623,10 @@ enum {
* TCA_FLOWER_KEY_ENC_OPT_ERSPAN_
* attributes
*/
+ TCA_FLOWER_KEY_ENC_OPTS_GTP, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_GTP_
+ * attributes
+ */
__TCA_FLOWER_KEY_ENC_OPTS_MAX,
};
@@ -618,6 +666,38 @@ enum {
(__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1)
enum {
+ TCA_FLOWER_KEY_ENC_OPT_GTP_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, /* u8 */
+
+ __TCA_FLOWER_KEY_ENC_OPT_GTP_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_GTP_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC,
+ TCA_FLOWER_KEY_MPLS_OPTS_LSE,
+ __TCA_FLOWER_KEY_MPLS_OPTS_MAX,
+};
+
+#define TCA_FLOWER_KEY_MPLS_OPTS_MAX (__TCA_FLOWER_KEY_MPLS_OPTS_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_MPLS_OPT_LSE_UNSPEC,
+ TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
+ TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
+ TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
+ TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
+ TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
+ __TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX,
+};
+
+#define TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX \
+ (__TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX - 1)
+
+enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index bbe791b24168..000eec106856 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -256,6 +256,9 @@ enum {
TCA_RED_PARMS,
TCA_RED_STAB,
TCA_RED_MAX_P,
+ TCA_RED_FLAGS, /* bitfield32 */
+ TCA_RED_EARLY_DROP_BLOCK, /* u32 */
+ TCA_RED_MARK_BLOCK, /* u32 */
__TCA_RED_MAX,
};
@@ -268,12 +271,28 @@ struct tc_red_qopt {
unsigned char Wlog; /* log(W) */
unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
unsigned char Scell_log; /* cell size for idle damping */
+
+ /* This field can be used for flags that a RED-like qdisc has
+ * historically supported. E.g. when configuring RED, it can be used for
+ * ECN, HARDDROP and ADAPTATIVE. For SFQ it can be used for ECN,
+ * HARDDROP. Etc. Because this field has not been validated, and is
+ * copied back on dump, any bits besides those to which a given qdisc
+ * has assigned a historical meaning need to be considered for free use
+ * by userspace tools.
+ *
+ * Any further flags need to be passed differently, e.g. through an
+ * attribute (such as TCA_RED_FLAGS above). Such attribute should allow
+ * passing both recent and historic flags in one value.
+ */
unsigned char flags;
#define TC_RED_ECN 1
#define TC_RED_HARDDROP 2
#define TC_RED_ADAPTATIVE 4
+#define TC_RED_NODROP 8
};
+#define TC_RED_HISTORIC_FLAGS (TC_RED_ECN | TC_RED_HARDDROP | TC_RED_ADAPTATIVE)
+
struct tc_red_xstats {
__u32 early; /* Early drops */
__u32 pdrop; /* Drops due to queue limits */
@@ -415,6 +434,7 @@ enum {
TCA_HTB_RATE64,
TCA_HTB_CEIL64,
TCA_HTB_PAD,
+ TCA_HTB_OFFLOAD,
__TCA_HTB_MAX,
};
@@ -807,6 +827,8 @@ struct tc_codel_xstats {
/* FQ_CODEL */
+#define FQ_CODEL_QUANTUM_MAX (1 << 20)
+
enum {
TCA_FQ_CODEL_UNSPEC,
TCA_FQ_CODEL_TARGET,
@@ -818,6 +840,8 @@ enum {
TCA_FQ_CODEL_CE_THRESHOLD,
TCA_FQ_CODEL_DROP_BATCH_SIZE,
TCA_FQ_CODEL_MEMORY_LIMIT,
+ TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
+ TCA_FQ_CODEL_CE_THRESHOLD_MASK,
__TCA_FQ_CODEL_MAX
};
@@ -894,6 +918,12 @@ enum {
TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
+ TCA_FQ_TIMER_SLACK, /* timer slack */
+
+ TCA_FQ_HORIZON, /* time horizon in us */
+
+ TCA_FQ_HORIZON_DROP, /* drop packets beyond horizon, or cap their EDT */
+
__TCA_FQ_MAX
};
@@ -913,6 +943,8 @@ struct tc_fq_qd_stats {
__u32 throttled_flows;
__u32 unthrottle_latency_ns;
__u64 ce_mark; /* packets above ce_threshold */
+ __u64 horizon_drops;
+ __u64 horizon_caps;
};
/* Heavy-Hitter Filter */
@@ -1197,8 +1229,18 @@ enum {
* [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
*/
-#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0)
-#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1)
+#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST _BITUL(0)
+#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1)
+
+enum {
+ TCA_TAPRIO_TC_ENTRY_UNSPEC,
+ TCA_TAPRIO_TC_ENTRY_INDEX, /* u32 */
+ TCA_TAPRIO_TC_ENTRY_MAX_SDU, /* u32 */
+
+ /* add new constants above here */
+ __TCA_TAPRIO_TC_ENTRY_CNT,
+ TCA_TAPRIO_TC_ENTRY_MAX = (__TCA_TAPRIO_TC_ENTRY_CNT - 1)
+};
enum {
TCA_TAPRIO_ATTR_UNSPEC,
@@ -1213,6 +1255,7 @@ enum {
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
TCA_TAPRIO_ATTR_FLAGS, /* u32 */
TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
+ TCA_TAPRIO_ATTR_TC_ENTRY, /* nest */
__TCA_TAPRIO_ATTR_MAX,
};
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index 7bd2a5a75348..1cc5ce0ae062 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -115,6 +115,8 @@ struct pppol2tp_ioc_stats {
#define PPPIOCATTCHAN _IOW('t', 56, int) /* attach to ppp channel */
#define PPPIOCGCHAN _IOR('t', 55, int) /* get ppp channel number */
#define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats)
+#define PPPIOCBRIDGECHAN _IOW('t', 53, int) /* bridge one channel to another */
+#define PPPIOCUNBRIDGECHAN _IO('t', 52) /* unbridge channel */
#define SIOCGPPPSTATS (SIOCDEVPRIVATE + 0)
#define SIOCGPPPVER (SIOCDEVPRIVATE + 1) /* NEVER change this!! */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 07b4f8131e36..a5e06dcbba13 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -213,6 +213,7 @@ struct prctl_mm_map {
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
# define PR_SPEC_INDIRECT_BRANCH 1
+# define PR_SPEC_L1D_FLUSH 2
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
@@ -233,9 +234,54 @@ struct prctl_mm_map {
#define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+/* MTE tag check fault modes */
+# define PR_MTE_TCF_NONE 0UL
+# define PR_MTE_TCF_SYNC (1UL << 1)
+# define PR_MTE_TCF_ASYNC (1UL << 2)
+# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
+/* MTE tag inclusion mask */
+# define PR_MTE_TAG_SHIFT 3
+# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+/* Unused; kept only for source compatibility */
+# define PR_MTE_TCF_SHIFT 1
/* Control reclaim behavior when allocating memory */
#define PR_SET_IO_FLUSHER 57
#define PR_GET_IO_FLUSHER 58
+/* Dispatch syscalls to a userspace handler */
+#define PR_SET_SYSCALL_USER_DISPATCH 59
+# define PR_SYS_DISPATCH_OFF 0
+# define PR_SYS_DISPATCH_ON 1
+/* The control values for the user space selector when dispatch is enabled */
+# define SYSCALL_DISPATCH_FILTER_ALLOW 0
+# define SYSCALL_DISPATCH_FILTER_BLOCK 1
+
+/* Set/get enabled arm64 pointer authentication keys */
+#define PR_PAC_SET_ENABLED_KEYS 60
+#define PR_PAC_GET_ENABLED_KEYS 61
+
+/* Request the scheduler to share a core */
+#define PR_SCHED_CORE 62
+# define PR_SCHED_CORE_GET 0
+# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
+# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
+# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
+# define PR_SCHED_CORE_MAX 4
+# define PR_SCHED_CORE_SCOPE_THREAD 0
+# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
+# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
+
+/* arm64 Scalable Matrix Extension controls */
+/* Flag values must be in sync with SVE versions */
+#define PR_SME_SET_VL 63 /* set task vector length */
+# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
+#define PR_SME_GET_VL 64 /* get task vector length */
+/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */
+# define PR_SME_VL_LEN_MASK 0xffff
+# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */
+
+#define PR_SET_VMA 0x53564d41
+# define PR_SET_VMA_ANON_NAME 0
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h
index ce1116cff53d..e585db5bf2d2 100644
--- a/include/uapi/linux/psample.h
+++ b/include/uapi/linux/psample.h
@@ -3,7 +3,6 @@
#define __UAPI_PSAMPLE_H
enum {
- /* sampled packet metadata */
PSAMPLE_ATTR_IIFINDEX,
PSAMPLE_ATTR_OIFINDEX,
PSAMPLE_ATTR_ORIGSIZE,
@@ -11,9 +10,15 @@ enum {
PSAMPLE_ATTR_GROUP_SEQ,
PSAMPLE_ATTR_SAMPLE_RATE,
PSAMPLE_ATTR_DATA,
-
- /* commands attributes */
PSAMPLE_ATTR_GROUP_REFCOUNT,
+ PSAMPLE_ATTR_TUNNEL,
+
+ PSAMPLE_ATTR_PAD,
+ PSAMPLE_ATTR_OUT_TC, /* u16 */
+ PSAMPLE_ATTR_OUT_TC_OCC, /* u64, bytes */
+ PSAMPLE_ATTR_LATENCY, /* u64, nanoseconds */
+ PSAMPLE_ATTR_TIMESTAMP, /* u64, nanoseconds */
+ PSAMPLE_ATTR_PROTO, /* u16 */
__PSAMPLE_ATTR_MAX
};
@@ -25,6 +30,27 @@ enum psample_command {
PSAMPLE_CMD_DEL_GROUP,
};
+enum psample_tunnel_key_attr {
+ PSAMPLE_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */
+ PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */
+ PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */
+ PSAMPLE_TUNNEL_KEY_ATTR_TOS, /* u8 Tunnel IP ToS. */
+ PSAMPLE_TUNNEL_KEY_ATTR_TTL, /* u8 Tunnel IP TTL. */
+ PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT, /* No argument, set DF. */
+ PSAMPLE_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */
+ PSAMPLE_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */
+ PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */
+ PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */
+ PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */
+ PSAMPLE_TUNNEL_KEY_ATTR_VXLAN_OPTS, /* Nested VXLAN opts* */
+ PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
+ PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
+ PSAMPLE_TUNNEL_KEY_ATTR_PAD,
+ PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* struct erspan_metadata */
+ PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE, /* No argument. IPV4_INFO_BRIDGE mode.*/
+ __PSAMPLE_TUNNEL_KEY_ATTR_MAX
+};
+
/* Can be overridden at runtime by module option */
#define PSAMPLE_ATTR_MAX (__PSAMPLE_ATTR_MAX - 1)
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index 2fcad1dd0b0e..3511095c2702 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -48,12 +48,26 @@
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
+#define PSCI_1_0_FN_CPU_FREEZE PSCI_0_2_FN(11)
+#define PSCI_1_0_FN_CPU_DEFAULT_SUSPEND PSCI_0_2_FN(12)
+#define PSCI_1_0_FN_NODE_HW_STATE PSCI_0_2_FN(13)
#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15)
+#define PSCI_1_0_FN_STAT_RESIDENCY PSCI_0_2_FN(16)
+#define PSCI_1_0_FN_STAT_COUNT PSCI_0_2_FN(17)
+
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
+#define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19)
+#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(19)
+#define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12)
+#define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13)
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
+#define PSCI_1_0_FN64_STAT_RESIDENCY PSCI_0_2_FN64(16)
+#define PSCI_1_0_FN64_STAT_COUNT PSCI_0_2_FN64(17)
+
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
+#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(19)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
@@ -82,6 +96,10 @@
#define PSCI_0_2_TOS_UP_NO_MIGRATE 1
#define PSCI_0_2_TOS_MP 2
+/* PSCI v1.1 reset type encoding for SYSTEM_RESET2 */
+#define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0
+#define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U
+
/* PSCI version decoding (independent of PSCI version) */
#define PSCI_VERSION_MAJOR_SHIFT 16
#define PSCI_VERSION_MINOR_MASK \
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 0549a5c622bf..91b4c63d5cbf 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -83,6 +83,8 @@ struct sev_user_data_status {
__u32 guest_count; /* Out */
} __packed;
+#define SEV_STATUS_FLAGS_CONFIG_ES 0x0100
+
/**
* struct sev_user_data_pek_csr - PEK_CSR command parameters
*
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 9dc9d0079e98..1d108d597f66 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -53,12 +53,16 @@
/*
* Bits of the ptp_perout_request.flags field:
*/
-#define PTP_PEROUT_ONE_SHOT (1<<0)
+#define PTP_PEROUT_ONE_SHOT (1<<0)
+#define PTP_PEROUT_DUTY_CYCLE (1<<1)
+#define PTP_PEROUT_PHASE (1<<2)
/*
* flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl.
*/
-#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT)
+#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT | \
+ PTP_PEROUT_DUTY_CYCLE | \
+ PTP_PEROUT_PHASE)
/*
* No flags are valid for the original PTP_PEROUT_REQUEST ioctl
@@ -89,7 +93,9 @@ struct ptp_clock_caps {
int n_pins; /* Number of input/output pins. */
/* Whether the clock supports precise system-device cross timestamps */
int cross_timestamping;
- int rsv[13]; /* Reserved for future use. */
+ /* Whether the clock supports adjust phase */
+ int adjust_phase;
+ int rsv[12]; /* Reserved for future use. */
};
struct ptp_extts_request {
@@ -99,11 +105,33 @@ struct ptp_extts_request {
};
struct ptp_perout_request {
- struct ptp_clock_time start; /* Absolute start time. */
+ union {
+ /*
+ * Absolute start time.
+ * Valid only if (flags & PTP_PEROUT_PHASE) is unset.
+ */
+ struct ptp_clock_time start;
+ /*
+ * Phase offset. The signal should start toggling at an
+ * unspecified integer multiple of the period, plus this value.
+ * The start time should be "as soon as possible".
+ * Valid only if (flags & PTP_PEROUT_PHASE) is set.
+ */
+ struct ptp_clock_time phase;
+ };
struct ptp_clock_time period; /* Desired period, zero means disable. */
unsigned int index; /* Which channel to configure. */
unsigned int flags;
- unsigned int rsv[4]; /* Reserved for future use. */
+ union {
+ /*
+ * The "on" time of the signal.
+ * Must be lower than the period.
+ * Valid only if (flags & PTP_PEROUT_DUTY_CYCLE) is set.
+ */
+ struct ptp_clock_time on;
+ /* Reserved for future use. */
+ unsigned int rsv[4];
+ };
};
#define PTP_MAX_SAMPLES 25 /* Maximum allowed offset measurement samples. */
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index a71b6e3b03eb..195ae64a8c87 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -81,7 +81,8 @@ struct seccomp_metadata {
struct ptrace_syscall_info {
__u8 op; /* PTRACE_SYSCALL_INFO_* */
- __u32 arch __attribute__((__aligned__(sizeof(__u32))));
+ __u8 pad[3];
+ __u32 arch;
__u64 instruction_pointer;
__u64 stack_pointer;
union {
@@ -101,9 +102,19 @@ struct ptrace_syscall_info {
};
};
+#define PTRACE_GET_RSEQ_CONFIGURATION 0x420f
+
+struct ptrace_rseq_configuration {
+ __u64 rseq_abi_pointer;
+ __u32 rseq_abi_size;
+ __u32 signature;
+ __u32 flags;
+ __u32 pad;
+};
+
/*
* These values are stored in task->ptrace_message
- * by tracehook_report_syscall_* to describe the current syscall-stop.
+ * by ptrace_stop to describe the current syscall-stop.
*/
#define PTRACE_EVENTMSG_SYSCALL_ENTRY 1
#define PTRACE_EVENTMSG_SYSCALL_EXIT 2
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 1f2d8c81f0e0..6c0aa577730f 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -123,7 +123,7 @@ typedef struct mdp_device_descriptor_s {
/*
* Notes:
- * - if an array is being reshaped (restriped) in order to change the
+ * - if an array is being reshaped (restriped) in order to change
* the number of active devices in the array, 'raid_disks' will be
* the larger of the old and new numbers. 'delta_disks' will
* be the "new - old". So if +ve, raid_disks is the new value, and
@@ -303,7 +303,7 @@ struct mdp_superblock_1 {
* into the 'roles' value. If a device is spare or faulty, then it doesn't
* have a meaningful role.
*/
- __le16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */
+ __le16 dev_roles[]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */
};
/* feature_map bits */
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index dcc1b3e6106f..e744c23582eb 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -41,7 +41,7 @@
struct rand_pool_info {
int entropy_count;
int buf_size;
- __u32 buf[0];
+ __u32 buf[];
};
/*
diff --git a/include/uapi/linux/raw.h b/include/uapi/linux/raw.h
deleted file mode 100644
index dc96dda479d6..000000000000
--- a/include/uapi/linux/raw.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_RAW_H
-#define __LINUX_RAW_H
-
-#include <linux/types.h>
-
-#define RAW_SETBIND _IO( 0xac, 0 )
-#define RAW_GETBIND _IO( 0xac, 1 )
-
-struct raw_config_request
-{
- int raw_minor;
- __u64 block_major;
- __u64 block_minor;
-};
-
-#define MAX_RAW_MINORS CONFIG_MAX_RAW_DEVS
-
-#endif /* __LINUX_RAW_H */
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index cba368e55863..c21edb966c19 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -64,10 +64,12 @@
/* supported values for SO_RDS_TRANSPORT */
#define RDS_TRANS_IB 0
-#define RDS_TRANS_IWARP 1
+#define RDS_TRANS_GAP 1
#define RDS_TRANS_TCP 2
#define RDS_TRANS_COUNT 3
#define RDS_TRANS_NONE (~0)
+/* don't use RDS_TRANS_IWARP - it is deprecated */
+#define RDS_TRANS_IWARP RDS_TRANS_GAP
/* IOCTLS commands for SOL_RDS */
#define SIOCRDSSETTOS (SIOCPROTOPRIVATE)
diff --git a/include/uapi/linux/reiserfs_xattr.h b/include/uapi/linux/reiserfs_xattr.h
index 28f10842f047..503ad018ce5b 100644
--- a/include/uapi/linux/reiserfs_xattr.h
+++ b/include/uapi/linux/reiserfs_xattr.h
@@ -19,7 +19,7 @@ struct reiserfs_xattr_header {
struct reiserfs_security_handle {
const char *name;
void *value;
- size_t length;
+ __kernel_size_t length;
};
#endif /* _LINUX_REISERFS_XATTR_H */
diff --git a/include/uapi/linux/remoteproc_cdev.h b/include/uapi/linux/remoteproc_cdev.h
new file mode 100644
index 000000000000..c43768e4b0dc
--- /dev/null
+++ b/include/uapi/linux/remoteproc_cdev.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * IOCTLs for Remoteproc's character device interface.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_REMOTEPROC_CDEV_H_
+#define _UAPI_REMOTEPROC_CDEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define RPROC_MAGIC 0xB7
+
+/*
+ * The RPROC_SET_SHUTDOWN_ON_RELEASE ioctl allows to enable/disable the shutdown of a remote
+ * processor automatically when the controlling userpsace closes the char device interface.
+ *
+ * input parameter: integer
+ * 0 : disable automatic shutdown
+ * other : enable automatic shutdown
+ */
+#define RPROC_SET_SHUTDOWN_ON_RELEASE _IOW(RPROC_MAGIC, 1, __s32)
+
+/*
+ * The RPROC_GET_SHUTDOWN_ON_RELEASE ioctl gets information about whether the automatic shutdown of
+ * a remote processor is enabled or disabled when the controlling userspace closes the char device
+ * interface.
+ *
+ * output parameter: integer
+ * 0 : automatic shutdown disable
+ * other : automatic shutdown enable
+ */
+#define RPROC_GET_SHUTDOWN_ON_RELEASE _IOR(RPROC_MAGIC, 2, __s32)
+
+#endif
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index 74ef57b38f9f..ac5d6a3031db 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -66,10 +66,17 @@ struct rlimit64 {
#define _STK_LIM (8*1024*1024)
/*
- * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
- * and other sensitive information are never written to disk.
+ * Limit the amount of locked memory by some sane default:
+ * root can always increase this limit if needed.
+ *
+ * The main use-cases are (1) preventing sensitive memory
+ * from being swapped; (2) real-time operations; (3) via
+ * IOURING_REGISTER_BUFFERS.
+ *
+ * The first two don't need much. The latter will take as
+ * much as it can get. 8MB is a reasonably sane default.
*/
-#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
+#define MLOCK_LIMIT (8*1024*1024)
/*
* Due to binary compatibility, the actual resource numbers
diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h
index 2e00dcebebd0..db6c8588c1d0 100644
--- a/include/uapi/linux/rfkill.h
+++ b/include/uapi/linux/rfkill.h
@@ -70,6 +70,16 @@ enum rfkill_operation {
};
/**
+ * enum rfkill_hard_block_reasons - hard block reasons
+ * @RFKILL_HARD_BLOCK_SIGNAL: the hardware rfkill signal is active
+ * @RFKILL_HARD_BLOCK_NOT_OWNER: the NIC is not owned by the host
+ */
+enum rfkill_hard_block_reasons {
+ RFKILL_HARD_BLOCK_SIGNAL = 1 << 0,
+ RFKILL_HARD_BLOCK_NOT_OWNER = 1 << 1,
+};
+
+/**
* struct rfkill_event - events for userspace on /dev/rfkill
* @idx: index of dev rfkill
* @type: type of the rfkill struct
@@ -84,27 +94,97 @@ struct rfkill_event {
__u32 idx;
__u8 type;
__u8 op;
- __u8 soft, hard;
+ __u8 soft;
+ __u8 hard;
} __attribute__((packed));
-/*
- * We are planning to be backward and forward compatible with changes
- * to the event struct, by adding new, optional, members at the end.
- * When reading an event (whether the kernel from userspace or vice
- * versa) we need to accept anything that's at least as large as the
- * version 1 event size, but might be able to accept other sizes in
- * the future.
- *
- * One exception is the kernel -- we already have two event sizes in
- * that we've made the 'hard' member optional since our only option
- * is to ignore it anyway.
+/**
+ * struct rfkill_event_ext - events for userspace on /dev/rfkill
+ * @idx: index of dev rfkill
+ * @type: type of the rfkill struct
+ * @op: operation code
+ * @hard: hard state (0/1)
+ * @soft: soft state (0/1)
+ * @hard_block_reasons: valid if hard is set. One or several reasons from
+ * &enum rfkill_hard_block_reasons.
+ *
+ * Structure used for userspace communication on /dev/rfkill,
+ * used for events from the kernel and control to the kernel.
+ *
+ * See the extensibility docs below.
+ */
+struct rfkill_event_ext {
+ __u32 idx;
+ __u8 type;
+ __u8 op;
+ __u8 soft;
+ __u8 hard;
+
+ /*
+ * older kernels will accept/send only up to this point,
+ * and if extended further up to any chunk marked below
+ */
+
+ __u8 hard_block_reasons;
+} __attribute__((packed));
+
+/**
+ * DOC: Extensibility
+ *
+ * Originally, we had planned to allow backward and forward compatible
+ * changes by just adding fields at the end of the structure that are
+ * then not reported on older kernels on read(), and not written to by
+ * older kernels on write(), with the kernel reporting the size it did
+ * accept as the result.
+ *
+ * This would have allowed userspace to detect on read() and write()
+ * which kernel structure version it was dealing with, and if was just
+ * recompiled it would have gotten the new fields, but obviously not
+ * accessed them, but things should've continued to work.
+ *
+ * Unfortunately, while actually exercising this mechanism to add the
+ * hard block reasons field, we found that userspace (notably systemd)
+ * did all kinds of fun things not in line with this scheme:
+ *
+ * 1. treat the (expected) short writes as an error;
+ * 2. ask to read sizeof(struct rfkill_event) but then compare the
+ * actual return value to RFKILL_EVENT_SIZE_V1 and treat any
+ * mismatch as an error.
+ *
+ * As a consequence, just recompiling with a new struct version caused
+ * things to no longer work correctly on old and new kernels.
+ *
+ * Hence, we've rolled back &struct rfkill_event to the original version
+ * and added &struct rfkill_event_ext. This effectively reverts to the
+ * old behaviour for all userspace, unless it explicitly opts in to the
+ * rules outlined here by using the new &struct rfkill_event_ext.
+ *
+ * Additionally, some other userspace (bluez, g-s-d) was reading with a
+ * large size but as streaming reads rather than message-based, or with
+ * too strict checks for the returned size. So eventually, we completely
+ * reverted this, and extended messages need to be opted in to by using
+ * an ioctl:
+ *
+ * ioctl(fd, RFKILL_IOCTL_MAX_SIZE, sizeof(struct rfkill_event_ext));
+ *
+ * Userspace using &struct rfkill_event_ext and the ioctl must adhere to
+ * the following rules:
+ *
+ * 1. accept short writes, optionally using them to detect that it's
+ * running on an older kernel;
+ * 2. accept short reads, knowing that this means it's running on an
+ * older kernel;
+ * 3. treat reads that are as long as requested as acceptable, not
+ * checking against RFKILL_EVENT_SIZE_V1 or such.
*/
-#define RFKILL_EVENT_SIZE_V1 8
+#define RFKILL_EVENT_SIZE_V1 sizeof(struct rfkill_event)
/* ioctl for turning off rfkill-input (if present) */
#define RFKILL_IOC_MAGIC 'R'
#define RFKILL_IOC_NOINPUT 1
#define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT)
+#define RFKILL_IOC_MAX_SIZE 2
+#define RFKILL_IOCTL_MAX_SIZE _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_MAX_SIZE, __u32)
/* and that's all userspace gets */
diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h
new file mode 100644
index 000000000000..730673ecc63d
--- /dev/null
+++ b/include/uapi/linux/rkisp1-config.h
@@ -0,0 +1,995 @@
+/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR MIT) */
+/*
+ * Rockchip ISP1 userspace API
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef _UAPI_RKISP1_CONFIG_H
+#define _UAPI_RKISP1_CONFIG_H
+
+#include <linux/types.h>
+
+/* Defect Pixel Cluster Detection */
+#define RKISP1_CIF_ISP_MODULE_DPCC (1U << 0)
+/* Black Level Subtraction */
+#define RKISP1_CIF_ISP_MODULE_BLS (1U << 1)
+/* Sensor De-gamma */
+#define RKISP1_CIF_ISP_MODULE_SDG (1U << 2)
+/* Histogram statistics configuration */
+#define RKISP1_CIF_ISP_MODULE_HST (1U << 3)
+/* Lens Shade Control */
+#define RKISP1_CIF_ISP_MODULE_LSC (1U << 4)
+/* Auto White Balance Gain */
+#define RKISP1_CIF_ISP_MODULE_AWB_GAIN (1U << 5)
+/* Filter */
+#define RKISP1_CIF_ISP_MODULE_FLT (1U << 6)
+/* Bayer Demosaic */
+#define RKISP1_CIF_ISP_MODULE_BDM (1U << 7)
+/* Cross Talk */
+#define RKISP1_CIF_ISP_MODULE_CTK (1U << 8)
+/* Gamma Out Curve */
+#define RKISP1_CIF_ISP_MODULE_GOC (1U << 9)
+/* Color Processing */
+#define RKISP1_CIF_ISP_MODULE_CPROC (1U << 10)
+/* Auto Focus Control statistics configuration */
+#define RKISP1_CIF_ISP_MODULE_AFC (1U << 11)
+/* Auto White Balancing statistics configuration */
+#define RKISP1_CIF_ISP_MODULE_AWB (1U << 12)
+/* Image Effect */
+#define RKISP1_CIF_ISP_MODULE_IE (1U << 13)
+/* Auto Exposure Control statistics configuration */
+#define RKISP1_CIF_ISP_MODULE_AEC (1U << 14)
+/* Wide Dynamic Range */
+#define RKISP1_CIF_ISP_MODULE_WDR (1U << 15)
+/* Denoise Pre-Filter */
+#define RKISP1_CIF_ISP_MODULE_DPF (1U << 16)
+/* Denoise Pre-Filter Strength */
+#define RKISP1_CIF_ISP_MODULE_DPF_STRENGTH (1U << 17)
+
+#define RKISP1_CIF_ISP_CTK_COEFF_MAX 0x100
+#define RKISP1_CIF_ISP_CTK_OFFSET_MAX 0x800
+
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V10 25
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V12 81
+#define RKISP1_CIF_ISP_AE_MEAN_MAX RKISP1_CIF_ISP_AE_MEAN_MAX_V12
+
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 16
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 32
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12
+
+#define RKISP1_CIF_ISP_AFM_MAX_WINDOWS 3
+#define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE 17
+
+#define RKISP1_CIF_ISP_BDM_MAX_TH 0xff
+
+/*
+ * Black level compensation
+ */
+/* maximum value for horizontal start address */
+#define RKISP1_CIF_ISP_BLS_START_H_MAX 0x00000fff
+/* maximum value for horizontal stop address */
+#define RKISP1_CIF_ISP_BLS_STOP_H_MAX 0x00000fff
+/* maximum value for vertical start address */
+#define RKISP1_CIF_ISP_BLS_START_V_MAX 0x00000fff
+/* maximum value for vertical stop address */
+#define RKISP1_CIF_ISP_BLS_STOP_V_MAX 0x00000fff
+/* maximum is 2^18 = 262144*/
+#define RKISP1_CIF_ISP_BLS_SAMPLES_MAX 0x00000012
+/* maximum value for fixed black level */
+#define RKISP1_CIF_ISP_BLS_FIX_SUB_MAX 0x00000fff
+/* minimum value for fixed black level */
+#define RKISP1_CIF_ISP_BLS_FIX_SUB_MIN 0xfffff000
+/* 13 bit range (signed)*/
+#define RKISP1_CIF_ISP_BLS_FIX_MASK 0x00001fff
+
+/*
+ * Automatic white balance measurements
+ */
+#define RKISP1_CIF_ISP_AWB_MAX_GRID 1
+#define RKISP1_CIF_ISP_AWB_MAX_FRAMES 7
+
+/*
+ * Gamma out
+ */
+/* Maximum number of color samples supported */
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 17
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 34
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
+
+/*
+ * Lens shade correction
+ */
+#define RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE 8
+
+/*
+ * The following matches the tuning process,
+ * not the max capabilities of the chip.
+ */
+#define RKISP1_CIF_ISP_LSC_SAMPLES_MAX 17
+
+/*
+ * Histogram calculation
+ */
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 25
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 81
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
+
+/*
+ * Defect Pixel Cluster Correction
+ */
+#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3
+
+#define RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE (1U << 2)
+
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_G_3X3 (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_RB_3X3 (1U << 3)
+
+/* 0-2 for sets 1-3 */
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_SET(n) ((n) << 0)
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET (1U << 3)
+
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE (1U << 3)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE (1U << 4)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE (1U << 8)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE (1U << 9)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE (1U << 10)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE (1U << 11)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE (1U << 12)
+
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_RB(v) ((v) << 8)
+
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(n, v) ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(n, v) ((v) << ((n) * 4 + 2))
+
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(n, v) ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(n, v) ((v) << ((n) * 4 + 2))
+
+/*
+ * Denoising pre filter
+ */
+#define RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS 17
+#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS 6
+
+/*
+ * Measurement types
+ */
+#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
+#define RKISP1_CIF_ISP_STAT_AUTOEXP (1U << 1)
+#define RKISP1_CIF_ISP_STAT_AFM (1U << 2)
+#define RKISP1_CIF_ISP_STAT_HIST (1U << 3)
+
+/**
+ * enum rkisp1_cif_isp_version - ISP variants
+ *
+ * @RKISP1_V10: used at least in rk3288 and rk3399
+ * @RKISP1_V11: declared in the original vendor code, but not used
+ * @RKISP1_V12: used at least in rk3326 and px30
+ * @RKISP1_V13: used at least in rk1808
+ */
+enum rkisp1_cif_isp_version {
+ RKISP1_V10 = 10,
+ RKISP1_V11,
+ RKISP1_V12,
+ RKISP1_V13,
+};
+
+enum rkisp1_cif_isp_histogram_mode {
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_R_HISTOGRAM,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_G_HISTOGRAM,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_B_HISTOGRAM,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM
+};
+
+enum rkisp1_cif_isp_awb_mode_type {
+ RKISP1_CIF_ISP_AWB_MODE_MANUAL,
+ RKISP1_CIF_ISP_AWB_MODE_RGB,
+ RKISP1_CIF_ISP_AWB_MODE_YCBCR
+};
+
+enum rkisp1_cif_isp_flt_mode {
+ RKISP1_CIF_ISP_FLT_STATIC_MODE,
+ RKISP1_CIF_ISP_FLT_DYNAMIC_MODE
+};
+
+/**
+ * enum rkisp1_cif_isp_exp_ctrl_autostop - stop modes
+ * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0: continuous measurement
+ * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1: stop measuring after a complete frame
+ */
+enum rkisp1_cif_isp_exp_ctrl_autostop {
+ RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0 = 0,
+ RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1 = 1,
+};
+
+/**
+ * enum rkisp1_cif_isp_exp_meas_mode - Exposure measure mode
+ * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_0: Y = 16 + 0.25R + 0.5G + 0.1094B
+ * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_1: Y = (R + G + B) x (85/256)
+ */
+enum rkisp1_cif_isp_exp_meas_mode {
+ RKISP1_CIF_ISP_EXP_MEASURING_MODE_0,
+ RKISP1_CIF_ISP_EXP_MEASURING_MODE_1,
+};
+
+/*---------- PART1: Input Parameters ------------*/
+
+/**
+ * struct rkisp1_cif_isp_window - measurement window.
+ *
+ * Measurements are calculated per window inside the frame.
+ * This struct represents a window for a measurement.
+ *
+ * @h_offs: the horizontal offset of the window from the left of the frame in pixels.
+ * @v_offs: the vertical offset of the window from the top of the frame in pixels.
+ * @h_size: the horizontal size of the window in pixels
+ * @v_size: the vertical size of the window in pixels.
+ */
+struct rkisp1_cif_isp_window {
+ __u16 h_offs;
+ __u16 v_offs;
+ __u16 h_size;
+ __u16 v_size;
+};
+
+/**
+ * struct rkisp1_cif_isp_bls_fixed_val - BLS fixed subtraction values
+ *
+ * The values will be subtracted from the sensor
+ * values. Therefore a negative value means addition instead of subtraction!
+ *
+ * @r: Fixed (signed!) subtraction value for Bayer pattern R
+ * @gr: Fixed (signed!) subtraction value for Bayer pattern Gr
+ * @gb: Fixed (signed!) subtraction value for Bayer pattern Gb
+ * @b: Fixed (signed!) subtraction value for Bayer pattern B
+ */
+struct rkisp1_cif_isp_bls_fixed_val {
+ __s16 r;
+ __s16 gr;
+ __s16 gb;
+ __s16 b;
+};
+
+/**
+ * struct rkisp1_cif_isp_bls_config - Configuration used by black level subtraction
+ *
+ * @enable_auto: Automatic mode activated means that the measured values
+ * are subtracted. Otherwise the fixed subtraction
+ * values will be subtracted.
+ * @en_windows: enabled window
+ * @bls_window1: Measurement window 1 size
+ * @bls_window2: Measurement window 2 size
+ * @bls_samples: Set amount of measured pixels for each Bayer position
+ * (A, B,C and D) to 2^bls_samples.
+ * @fixed_val: Fixed subtraction values
+ */
+struct rkisp1_cif_isp_bls_config {
+ __u8 enable_auto;
+ __u8 en_windows;
+ struct rkisp1_cif_isp_window bls_window1;
+ struct rkisp1_cif_isp_window bls_window2;
+ __u8 bls_samples;
+ struct rkisp1_cif_isp_bls_fixed_val fixed_val;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpcc_methods_config - DPCC methods set configuration
+ *
+ * This structure stores the configuration of one set of methods for the DPCC
+ * algorithm. Multiple methods can be selected in each set (independently for
+ * the Green and Red/Blue components) through the @method field, the result is
+ * the logical AND of all enabled methods. The remaining fields set thresholds
+ * and factors for each method.
+ *
+ * @method: Method enable bits (RKISP1_CIF_ISP_DPCC_METHODS_SET_*)
+ * @line_thresh: Line threshold (RKISP1_CIF_ISP_DPCC_LINE_THRESH_*)
+ * @line_mad_fac: Line Mean Absolute Difference factor (RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_*)
+ * @pg_fac: Peak gradient factor (RKISP1_CIF_ISP_DPCC_PG_FAC_*)
+ * @rnd_thresh: Rank Neighbor Difference threshold (RKISP1_CIF_ISP_DPCC_RND_THRESH_*)
+ * @rg_fac: Rank gradient factor (RKISP1_CIF_ISP_DPCC_RG_FAC_*)
+ */
+struct rkisp1_cif_isp_dpcc_methods_config {
+ __u32 method;
+ __u32 line_thresh;
+ __u32 line_mad_fac;
+ __u32 pg_fac;
+ __u32 rnd_thresh;
+ __u32 rg_fac;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC
+ *
+ * Configuration used by Defect Pixel Cluster Correction. Three sets of methods
+ * can be configured and selected through the @set_use field. The result is the
+ * logical OR of all enabled sets.
+ *
+ * @mode: DPCC mode (RKISP1_CIF_ISP_DPCC_MODE_*)
+ * @output_mode: Interpolation output mode (RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_*)
+ * @set_use: Methods sets selection (RKISP1_CIF_ISP_DPCC_SET_USE_*)
+ * @methods: Methods sets configuration
+ * @ro_limits: Rank order limits (RKISP1_CIF_ISP_DPCC_RO_LIMITS_*)
+ * @rnd_offs: Differential rank offsets for rank neighbor difference (RKISP1_CIF_ISP_DPCC_RND_OFFS_*)
+ */
+struct rkisp1_cif_isp_dpcc_config {
+ __u32 mode;
+ __u32 output_mode;
+ __u32 set_use;
+ struct rkisp1_cif_isp_dpcc_methods_config methods[RKISP1_CIF_ISP_DPCC_METHODS_MAX];
+ __u32 ro_limits;
+ __u32 rnd_offs;
+};
+
+/**
+ * struct rkisp1_cif_isp_gamma_corr_curve - gamma curve point definition y-axis (output).
+ *
+ * The reset values define a linear curve which has the same effect as bypass. Reset values are:
+ * gamma_y[0] = 0x0000, gamma_y[1] = 0x0100, ... gamma_y[15] = 0x0f00, gamma_y[16] = 0xfff
+ *
+ * @gamma_y: the values for the y-axis of gamma curve points. Each value is 12 bit.
+ */
+struct rkisp1_cif_isp_gamma_corr_curve {
+ __u16 gamma_y[RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE];
+};
+
+/**
+ * struct rkisp1_cif_isp_gamma_curve_x_axis_pnts - De-Gamma Curve definition x increments
+ * (sampling points). gamma_dx0 is for the lower samples (1-8), gamma_dx1 is for the
+ * higher samples (9-16). The reset values for both fields is 0x44444444. This means
+ * that each sample is 4 units away from the previous one on the x-axis.
+ *
+ * @gamma_dx0: gamma curve sample points definitions. Bits 0:2 for sample 1. Bit 3 unused.
+ * Bits 4:6 for sample 2. bit 7 unused ... Bits 28:30 for sample 8. Bit 31 unused
+ * @gamma_dx1: gamma curve sample points definitions. Bits 0:2 for sample 9. Bit 3 unused.
+ * Bits 4:6 for sample 10. bit 7 unused ... Bits 28:30 for sample 16. Bit 31 unused
+ */
+struct rkisp1_cif_isp_gamma_curve_x_axis_pnts {
+ __u32 gamma_dx0;
+ __u32 gamma_dx1;
+};
+
+/**
+ * struct rkisp1_cif_isp_sdg_config - Configuration used by sensor degamma
+ *
+ * @curve_r: gamma curve point definition axis for red
+ * @curve_g: gamma curve point definition axis for green
+ * @curve_b: gamma curve point definition axis for blue
+ * @xa_pnts: x axis increments
+ */
+struct rkisp1_cif_isp_sdg_config {
+ struct rkisp1_cif_isp_gamma_corr_curve curve_r;
+ struct rkisp1_cif_isp_gamma_corr_curve curve_g;
+ struct rkisp1_cif_isp_gamma_corr_curve curve_b;
+ struct rkisp1_cif_isp_gamma_curve_x_axis_pnts xa_pnts;
+};
+
+/**
+ * struct rkisp1_cif_isp_lsc_config - Configuration used by Lens shading correction
+ *
+ * @r_data_tbl: sample table red
+ * @gr_data_tbl: sample table green (red)
+ * @gb_data_tbl: sample table green (blue)
+ * @b_data_tbl: sample table blue
+ * @x_grad_tbl: gradient table x
+ * @y_grad_tbl: gradient table y
+ * @x_size_tbl: size table x
+ * @y_size_tbl: size table y
+ * @config_width: not used at the moment
+ * @config_height: not used at the moment
+ */
+struct rkisp1_cif_isp_lsc_config {
+ __u16 r_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+ __u16 gr_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+ __u16 gb_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+ __u16 b_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+
+ __u16 x_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ __u16 y_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+
+ __u16 x_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ __u16 y_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ __u16 config_width;
+ __u16 config_height;
+};
+
+/**
+ * struct rkisp1_cif_isp_ie_config - Configuration used by image effects
+ *
+ * @effect: values from 'enum v4l2_colorfx'. Possible values are: V4L2_COLORFX_SEPIA,
+ * V4L2_COLORFX_SET_CBCR, V4L2_COLORFX_AQUA, V4L2_COLORFX_EMBOSS,
+ * V4L2_COLORFX_SKETCH, V4L2_COLORFX_BW, V4L2_COLORFX_NEGATIVE
+ * @color_sel: bits 0:2 - colors bitmask (001 - blue, 010 - green, 100 - red).
+ * bits 8:15 - Threshold value of the RGB colors for the color selection effect.
+ * @eff_mat_1: 3x3 Matrix Coefficients for Emboss Effect 1
+ * @eff_mat_2: 3x3 Matrix Coefficients for Emboss Effect 2
+ * @eff_mat_3: 3x3 Matrix Coefficients for Emboss 3/Sketch 1
+ * @eff_mat_4: 3x3 Matrix Coefficients for Sketch Effect 2
+ * @eff_mat_5: 3x3 Matrix Coefficients for Sketch Effect 3
+ * @eff_tint: Chrominance increment values of tint (used for sepia effect)
+ */
+struct rkisp1_cif_isp_ie_config {
+ __u16 effect;
+ __u16 color_sel;
+ __u16 eff_mat_1;
+ __u16 eff_mat_2;
+ __u16 eff_mat_3;
+ __u16 eff_mat_4;
+ __u16 eff_mat_5;
+ __u16 eff_tint;
+};
+
+/**
+ * struct rkisp1_cif_isp_cproc_config - Configuration used by Color Processing
+ *
+ * @c_out_range: Chrominance pixel clipping range at output.
+ * (0 for limit, 1 for full)
+ * @y_in_range: Luminance pixel clipping range at output.
+ * @y_out_range: Luminance pixel clipping range at output.
+ * @contrast: 00~ff, 0.0~1.992
+ * @brightness: 80~7F, -128~+127
+ * @sat: saturation, 00~FF, 0.0~1.992
+ * @hue: 80~7F, -90~+87.188
+ */
+struct rkisp1_cif_isp_cproc_config {
+ __u8 c_out_range;
+ __u8 y_in_range;
+ __u8 y_out_range;
+ __u8 contrast;
+ __u8 brightness;
+ __u8 sat;
+ __u8 hue;
+};
+
+/**
+ * struct rkisp1_cif_isp_awb_meas_config - Configuration for the AWB statistics
+ *
+ * @awb_mode: the awb meas mode. From enum rkisp1_cif_isp_awb_mode_type.
+ * @awb_wnd: white balance measurement window (in pixels)
+ * @max_y: only pixels values < max_y contribute to awb measurement, set to 0
+ * to disable this feature
+ * @min_y: only pixels values > min_y contribute to awb measurement
+ * @max_csum: Chrominance sum maximum value, only consider pixels with Cb+Cr,
+ * smaller than threshold for awb measurements
+ * @min_c: Chrominance minimum value, only consider pixels with Cb/Cr
+ * each greater than threshold value for awb measurements
+ * @frames: number of frames - 1 used for mean value calculation
+ * (ucFrames=0 means 1 Frame)
+ * @awb_ref_cr: reference Cr value for AWB regulation, target for AWB
+ * @awb_ref_cb: reference Cb value for AWB regulation, target for AWB
+ * @enable_ymax_cmp: enable Y_MAX compare (Not valid in RGB measurement mode.)
+ */
+struct rkisp1_cif_isp_awb_meas_config {
+ /*
+ * Note: currently the h and v offsets are mapped to grid offsets
+ */
+ struct rkisp1_cif_isp_window awb_wnd;
+ __u32 awb_mode;
+ __u8 max_y;
+ __u8 min_y;
+ __u8 max_csum;
+ __u8 min_c;
+ __u8 frames;
+ __u8 awb_ref_cr;
+ __u8 awb_ref_cb;
+ __u8 enable_ymax_cmp;
+};
+
+/**
+ * struct rkisp1_cif_isp_awb_gain_config - Configuration used by auto white balance gain
+ *
+ * All fields in this struct are 10 bit, where:
+ * 0x100h = 1, unsigned integer value, range 0 to 4 with 8 bit fractional part.
+ *
+ * out_data_x = ( AWB_GAIN_X * in_data + 128) >> 8
+ *
+ * @gain_red: gain value for red component.
+ * @gain_green_r: gain value for green component in red line.
+ * @gain_blue: gain value for blue component.
+ * @gain_green_b: gain value for green component in blue line.
+ */
+struct rkisp1_cif_isp_awb_gain_config {
+ __u16 gain_red;
+ __u16 gain_green_r;
+ __u16 gain_blue;
+ __u16 gain_green_b;
+};
+
+/**
+ * struct rkisp1_cif_isp_flt_config - Configuration used by ISP filtering
+ *
+ * All 4 threshold fields (thresh_*) are 10 bits.
+ * All 6 factor fields (fac_*) are 6 bits.
+ *
+ * @mode: ISP_FILT_MODE register fields (from enum rkisp1_cif_isp_flt_mode)
+ * @grn_stage1: Green filter stage 1 select (range 0x0...0x8)
+ * @chr_h_mode: Chroma filter horizontal mode
+ * @chr_v_mode: Chroma filter vertical mode
+ * @thresh_bl0: If thresh_bl1 < sum_grad < thresh_bl0 then fac_bl0 is selected (blurring th)
+ * @thresh_bl1: If sum_grad < thresh_bl1 then fac_bl1 is selected (blurring th)
+ * @thresh_sh0: If thresh_sh0 < sum_grad < thresh_sh1 then thresh_sh0 is selected (sharpening th)
+ * @thresh_sh1: If thresh_sh1 < sum_grad then thresh_sh1 is selected (sharpening th)
+ * @lum_weight: Parameters for luminance weight function.
+ * @fac_sh1: filter factor for sharp1 level
+ * @fac_sh0: filter factor for sharp0 level
+ * @fac_mid: filter factor for mid level and for static filter mode
+ * @fac_bl0: filter factor for blur 0 level
+ * @fac_bl1: filter factor for blur 1 level (max blur)
+ */
+struct rkisp1_cif_isp_flt_config {
+ __u32 mode;
+ __u8 grn_stage1;
+ __u8 chr_h_mode;
+ __u8 chr_v_mode;
+ __u32 thresh_bl0;
+ __u32 thresh_bl1;
+ __u32 thresh_sh0;
+ __u32 thresh_sh1;
+ __u32 lum_weight;
+ __u32 fac_sh1;
+ __u32 fac_sh0;
+ __u32 fac_mid;
+ __u32 fac_bl0;
+ __u32 fac_bl1;
+};
+
+/**
+ * struct rkisp1_cif_isp_bdm_config - Configuration used by Bayer DeMosaic
+ *
+ * @demosaic_th: threshold for bayer demosaicing texture detection
+ */
+struct rkisp1_cif_isp_bdm_config {
+ __u8 demosaic_th;
+};
+
+/**
+ * struct rkisp1_cif_isp_ctk_config - Configuration used by Cross Talk correction
+ *
+ * @coeff: color correction matrix. Values are 11-bit signed fixed-point numbers with 4 bit integer
+ * and 7 bit fractional part, ranging from -8 (0x400) to +7.992 (0x3FF). 0 is
+ * represented by 0x000 and a coefficient value of 1 as 0x080.
+ * @ct_offset: Red, Green, Blue offsets for the crosstalk correction matrix
+ */
+struct rkisp1_cif_isp_ctk_config {
+ __u16 coeff[3][3];
+ __u16 ct_offset[3];
+};
+
+enum rkisp1_cif_isp_goc_mode {
+ RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC,
+ RKISP1_CIF_ISP_GOC_MODE_EQUIDISTANT
+};
+
+/**
+ * struct rkisp1_cif_isp_goc_config - Configuration used by Gamma Out correction
+ *
+ * @mode: goc mode (from enum rkisp1_cif_isp_goc_mode)
+ * @gamma_y: gamma out curve y-axis for all color components
+ *
+ * The number of entries of @gamma_y depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10
+ * entries, versions >= V12 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
+ * entries. RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum
+ * of the two.
+ */
+struct rkisp1_cif_isp_goc_config {
+ __u32 mode;
+ __u16 gamma_y[RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES];
+};
+
+/**
+ * struct rkisp1_cif_isp_hst_config - Configuration for Histogram statistics
+ *
+ * @mode: histogram mode (from enum rkisp1_cif_isp_histogram_mode)
+ * @histogram_predivider: process every stepsize pixel, all other pixels are
+ * skipped
+ * @meas_window: coordinates of the measure window
+ * @hist_weight: weighting factor for sub-windows
+ *
+ * The number of entries of @hist_weight depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10
+ * entries, versions >= V12 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
+ * entries. RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum
+ * of the two.
+ */
+struct rkisp1_cif_isp_hst_config {
+ __u32 mode;
+ __u8 histogram_predivider;
+ struct rkisp1_cif_isp_window meas_window;
+ __u8 hist_weight[RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE];
+};
+
+/**
+ * struct rkisp1_cif_isp_aec_config - Configuration for Auto Exposure statistics
+ *
+ * @mode: Exposure measure mode (from enum rkisp1_cif_isp_exp_meas_mode)
+ * @autostop: stop mode (from enum rkisp1_cif_isp_exp_ctrl_autostop)
+ * @meas_window: coordinates of the measure window
+ */
+struct rkisp1_cif_isp_aec_config {
+ __u32 mode;
+ __u32 autostop;
+ struct rkisp1_cif_isp_window meas_window;
+};
+
+/**
+ * struct rkisp1_cif_isp_afc_config - Configuration for the Auto Focus statistics
+ *
+ * @num_afm_win: max RKISP1_CIF_ISP_AFM_MAX_WINDOWS
+ * @afm_win: coordinates of the meas window
+ * @thres: threshold used for minimizing the influence of noise
+ * @var_shift: the number of bits for the shift operation at the end of the
+ * calculation chain.
+ */
+struct rkisp1_cif_isp_afc_config {
+ __u8 num_afm_win;
+ struct rkisp1_cif_isp_window afm_win[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
+ __u32 thres;
+ __u32 var_shift;
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_gain_usage - dpf gain usage
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED: don't use any gains in preprocessing stage
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS: use only the noise function gains from
+ * registers DPF_NF_GAIN_R, ...
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS: use only the gains from LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS: use the noise function gains and the
+ * gains from LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS: use only the gains from AWB module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS: use the gains from AWB and LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX: upper border (only for an internal evaluation)
+ */
+enum rkisp1_cif_isp_dpf_gain_usage {
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_rb_filtersize - Red and blue filter sizes
+ * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9: red and blue filter kernel size 13x9
+ * (means 7x5 active pixel)
+ * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9: red and blue filter kernel size 9x9
+ * (means 5x5 active pixel)
+ */
+enum rkisp1_cif_isp_dpf_rb_filtersize {
+ RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9,
+ RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9,
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_nll_scale_mode - dpf noise level scale mode
+ * @RKISP1_CIF_ISP_NLL_SCALE_LINEAR: use a linear scaling
+ * @RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC: use a logarithmic scaling
+ */
+enum rkisp1_cif_isp_dpf_nll_scale_mode {
+ RKISP1_CIF_ISP_NLL_SCALE_LINEAR,
+ RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC,
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_nll - Noise level lookup
+ *
+ * @coeff: Noise level Lookup coefficient
+ * @scale_mode: dpf noise level scale mode (from enum rkisp1_cif_isp_dpf_nll_scale_mode)
+ */
+struct rkisp1_cif_isp_dpf_nll {
+ __u16 coeff[RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS];
+ __u32 scale_mode;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_rb_flt - Red blue filter config
+ *
+ * @fltsize: The filter size for the red and blue pixels
+ * (from enum rkisp1_cif_isp_dpf_rb_filtersize)
+ * @spatial_coeff: Spatial weights
+ * @r_enable: enable filter processing for red pixels
+ * @b_enable: enable filter processing for blue pixels
+ */
+struct rkisp1_cif_isp_dpf_rb_flt {
+ __u32 fltsize;
+ __u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
+ __u8 r_enable;
+ __u8 b_enable;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_g_flt - Green filter Configuration
+ *
+ * @spatial_coeff: Spatial weights
+ * @gr_enable: enable filter processing for green pixels in green/red lines
+ * @gb_enable: enable filter processing for green pixels in green/blue lines
+ */
+struct rkisp1_cif_isp_dpf_g_flt {
+ __u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
+ __u8 gr_enable;
+ __u8 gb_enable;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_gain - Noise function Configuration
+ *
+ * @mode: dpf gain usage (from enum rkisp1_cif_isp_dpf_gain_usage)
+ * @nf_r_gain: Noise function Gain that replaces the AWB gain for red pixels
+ * @nf_b_gain: Noise function Gain that replaces the AWB gain for blue pixels
+ * @nf_gr_gain: Noise function Gain that replaces the AWB gain
+ * for green pixels in a red line
+ * @nf_gb_gain: Noise function Gain that replaces the AWB gain
+ * for green pixels in a blue line
+ */
+struct rkisp1_cif_isp_dpf_gain {
+ __u32 mode;
+ __u16 nf_r_gain;
+ __u16 nf_b_gain;
+ __u16 nf_gr_gain;
+ __u16 nf_gb_gain;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_config - Configuration used by De-noising pre-filter
+ *
+ * @gain: noise function gain
+ * @g_flt: green filter config
+ * @rb_flt: red blue filter config
+ * @nll: noise level lookup
+ */
+struct rkisp1_cif_isp_dpf_config {
+ struct rkisp1_cif_isp_dpf_gain gain;
+ struct rkisp1_cif_isp_dpf_g_flt g_flt;
+ struct rkisp1_cif_isp_dpf_rb_flt rb_flt;
+ struct rkisp1_cif_isp_dpf_nll nll;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_strength_config - strength of the filter
+ *
+ * @r: filter strength of the RED filter
+ * @g: filter strength of the GREEN filter
+ * @b: filter strength of the BLUE filter
+ */
+struct rkisp1_cif_isp_dpf_strength_config {
+ __u8 r;
+ __u8 g;
+ __u8 b;
+};
+
+/**
+ * struct rkisp1_cif_isp_isp_other_cfg - Parameters for some blocks in rockchip isp1
+ *
+ * @dpcc_config: Defect Pixel Cluster Correction config
+ * @bls_config: Black Level Subtraction config
+ * @sdg_config: sensor degamma config
+ * @lsc_config: Lens Shade config
+ * @awb_gain_config: Auto White balance gain config
+ * @flt_config: filter config
+ * @bdm_config: demosaic config
+ * @ctk_config: cross talk config
+ * @goc_config: gamma out config
+ * @bls_config: black level subtraction config
+ * @dpf_config: De-noising pre-filter config
+ * @dpf_strength_config: dpf strength config
+ * @cproc_config: color process config
+ * @ie_config: image effects config
+ */
+struct rkisp1_cif_isp_isp_other_cfg {
+ struct rkisp1_cif_isp_dpcc_config dpcc_config;
+ struct rkisp1_cif_isp_bls_config bls_config;
+ struct rkisp1_cif_isp_sdg_config sdg_config;
+ struct rkisp1_cif_isp_lsc_config lsc_config;
+ struct rkisp1_cif_isp_awb_gain_config awb_gain_config;
+ struct rkisp1_cif_isp_flt_config flt_config;
+ struct rkisp1_cif_isp_bdm_config bdm_config;
+ struct rkisp1_cif_isp_ctk_config ctk_config;
+ struct rkisp1_cif_isp_goc_config goc_config;
+ struct rkisp1_cif_isp_dpf_config dpf_config;
+ struct rkisp1_cif_isp_dpf_strength_config dpf_strength_config;
+ struct rkisp1_cif_isp_cproc_config cproc_config;
+ struct rkisp1_cif_isp_ie_config ie_config;
+};
+
+/**
+ * struct rkisp1_cif_isp_isp_meas_cfg - Rockchip ISP1 Measure Parameters
+ *
+ * @awb_meas_config: auto white balance config
+ * @hst_config: histogram config
+ * @aec_config: auto exposure config
+ * @afc_config: auto focus config
+ */
+struct rkisp1_cif_isp_isp_meas_cfg {
+ struct rkisp1_cif_isp_awb_meas_config awb_meas_config;
+ struct rkisp1_cif_isp_hst_config hst_config;
+ struct rkisp1_cif_isp_aec_config aec_config;
+ struct rkisp1_cif_isp_afc_config afc_config;
+};
+
+/**
+ * struct rkisp1_params_cfg - Rockchip ISP1 Input Parameters Meta Data
+ *
+ * @module_en_update: mask the enable bits of which module should be updated
+ * @module_ens: mask the enable value of each module, only update the module
+ * which correspond bit was set in module_en_update
+ * @module_cfg_update: mask the config bits of which module should be updated
+ * @meas: measurement config
+ * @others: other config
+ */
+struct rkisp1_params_cfg {
+ __u32 module_en_update;
+ __u32 module_ens;
+ __u32 module_cfg_update;
+
+ struct rkisp1_cif_isp_isp_meas_cfg meas;
+ struct rkisp1_cif_isp_isp_other_cfg others;
+};
+
+/*---------- PART2: Measurement Statistics ------------*/
+
+/**
+ * struct rkisp1_cif_isp_awb_meas - AWB measured values
+ *
+ * @cnt: White pixel count, number of "white pixels" found during last
+ * measurement
+ * @mean_y_or_g: Mean value of Y within window and frames,
+ * Green if RGB is selected.
+ * @mean_cb_or_b: Mean value of Cb within window and frames,
+ * Blue if RGB is selected.
+ * @mean_cr_or_r: Mean value of Cr within window and frames,
+ * Red if RGB is selected.
+ */
+struct rkisp1_cif_isp_awb_meas {
+ __u32 cnt;
+ __u8 mean_y_or_g;
+ __u8 mean_cb_or_b;
+ __u8 mean_cr_or_r;
+};
+
+/**
+ * struct rkisp1_cif_isp_awb_stat - statistics automatic white balance data
+ *
+ * @awb_mean: Mean measured data
+ */
+struct rkisp1_cif_isp_awb_stat {
+ struct rkisp1_cif_isp_awb_meas awb_mean[RKISP1_CIF_ISP_AWB_MAX_GRID];
+};
+
+/**
+ * struct rkisp1_cif_isp_bls_meas_val - BLS measured values
+ *
+ * @meas_r: Mean measured value for Bayer pattern R
+ * @meas_gr: Mean measured value for Bayer pattern Gr
+ * @meas_gb: Mean measured value for Bayer pattern Gb
+ * @meas_b: Mean measured value for Bayer pattern B
+ */
+struct rkisp1_cif_isp_bls_meas_val {
+ __u16 meas_r;
+ __u16 meas_gr;
+ __u16 meas_gb;
+ __u16 meas_b;
+};
+
+/**
+ * struct rkisp1_cif_isp_ae_stat - statistics auto exposure data
+ *
+ * @exp_mean: Mean luminance value of block xx
+ * @bls_val: BLS measured values
+ *
+ * The number of entries of @exp_mean depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries,
+ * versions >= V12 have RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries.
+ * RKISP1_CIF_ISP_AE_MEAN_MAX is equal to the maximum of the two.
+ *
+ * Image is divided into 5x5 blocks on V10 and 9x9 blocks on V12.
+ */
+struct rkisp1_cif_isp_ae_stat {
+ __u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX];
+ struct rkisp1_cif_isp_bls_meas_val bls_val;
+};
+
+/**
+ * struct rkisp1_cif_isp_af_meas_val - AF measured values
+ *
+ * @sum: sharpness value
+ * @lum: luminance value
+ */
+struct rkisp1_cif_isp_af_meas_val {
+ __u32 sum;
+ __u32 lum;
+};
+
+/**
+ * struct rkisp1_cif_isp_af_stat - statistics auto focus data
+ *
+ * @window: AF measured value of window x
+ *
+ * The module measures the sharpness in 3 windows of selectable size via
+ * register settings(ISP_AFM_*_A/B/C)
+ */
+struct rkisp1_cif_isp_af_stat {
+ struct rkisp1_cif_isp_af_meas_val window[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
+};
+
+/**
+ * struct rkisp1_cif_isp_hist_stat - statistics histogram data
+ *
+ * @hist_bins: measured bin counters. Each bin is a 20 bits unsigned fixed point
+ * type. Bits 0-4 are the fractional part and bits 5-19 are the
+ * integer part.
+ *
+ * The window of the measurements area is divided to 5x5 sub-windows for
+ * V10/V11 and to 9x9 sub-windows for V12. The histogram is then computed for
+ * each sub-window independently and the final result is a weighted average of
+ * the histogram measurements on all sub-windows. The window of the
+ * measurements area and the weight of each sub-window are configurable using
+ * struct @rkisp1_cif_isp_hst_config.
+ *
+ * The histogram contains 16 bins in V10/V11 and 32 bins in V12/V13.
+ *
+ * The number of entries of @hist_bins depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries,
+ * versions >= V12 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries.
+ * RKISP1_CIF_ISP_HIST_BIN_N_MAX is equal to the maximum of the two.
+ */
+struct rkisp1_cif_isp_hist_stat {
+ __u32 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
+};
+
+/**
+ * struct rkisp1_cif_isp_stat - Rockchip ISP1 Statistics Data
+ *
+ * @awb: statistics data for automatic white balance
+ * @ae: statistics data for auto exposure
+ * @af: statistics data for auto focus
+ * @hist: statistics histogram data
+ */
+struct rkisp1_cif_isp_stat {
+ struct rkisp1_cif_isp_awb_stat awb;
+ struct rkisp1_cif_isp_ae_stat ae;
+ struct rkisp1_cif_isp_af_stat af;
+ struct rkisp1_cif_isp_hist_stat hist;
+};
+
+/**
+ * struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Meta Data
+ *
+ * @meas_type: measurement types (RKISP1_CIF_ISP_STAT_* definitions)
+ * @frame_id: frame ID for sync
+ * @params: statistics data
+ */
+struct rkisp1_stat_buffer {
+ __u32 meas_type;
+ __u32 frame_id;
+ struct rkisp1_cif_isp_stat params;
+};
+
+#endif /* _UAPI_RKISP1_CONFIG_H */
diff --git a/include/uapi/linux/romfs_fs.h b/include/uapi/linux/romfs_fs.h
index a7f1585accef..6aa05e792454 100644
--- a/include/uapi/linux/romfs_fs.h
+++ b/include/uapi/linux/romfs_fs.h
@@ -27,7 +27,7 @@ struct romfs_super_block {
__be32 word1;
__be32 size;
__be32 checksum;
- char name[0]; /* volume name */
+ char name[]; /* volume name */
};
/* On disk inode */
@@ -37,7 +37,7 @@ struct romfs_inode {
__be32 spec;
__be32 size;
__be32 checksum;
- char name[0];
+ char name[];
};
#define ROMFH_TYPE 7
diff --git a/include/uapi/linux/rpl.h b/include/uapi/linux/rpl.h
new file mode 100644
index 000000000000..708adddf9f13
--- /dev/null
+++ b/include/uapi/linux/rpl.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 RPL-SR implementation
+ *
+ * Author:
+ * (C) 2020 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#ifndef _UAPI_LINUX_RPL_H
+#define _UAPI_LINUX_RPL_H
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/in6.h>
+
+/*
+ * RPL SR Header
+ */
+struct ipv6_rpl_sr_hdr {
+ __u8 nexthdr;
+ __u8 hdrlen;
+ __u8 type;
+ __u8 segments_left;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u32 cmpre:4,
+ cmpri:4,
+ reserved:4,
+ pad:4,
+ reserved1:16;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u32 cmpri:4,
+ cmpre:4,
+ pad:4,
+ reserved:20;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+ union {
+ struct in6_addr addr[0];
+ __u8 data[0];
+ } segments;
+} __attribute__((packed));
+
+#define rpl_segaddr segments.addr
+#define rpl_segdata segments.data
+
+#endif
diff --git a/include/uapi/linux/rpl_iptunnel.h b/include/uapi/linux/rpl_iptunnel.h
new file mode 100644
index 000000000000..f4eed1f92baa
--- /dev/null
+++ b/include/uapi/linux/rpl_iptunnel.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 RPL-SR implementation
+ *
+ * Author:
+ * (C) 2020 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#ifndef _UAPI_LINUX_RPL_IPTUNNEL_H
+#define _UAPI_LINUX_RPL_IPTUNNEL_H
+
+enum {
+ RPL_IPTUNNEL_UNSPEC,
+ RPL_IPTUNNEL_SRH,
+ __RPL_IPTUNNEL_MAX,
+};
+#define RPL_IPTUNNEL_MAX (__RPL_IPTUNNEL_MAX - 1)
+
+#define RPL_IPTUNNEL_SRH_SIZE(srh) (((srh)->hdrlen + 1) << 3)
+
+#endif
diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h
index e14c6dab4223..1637e68177d9 100644
--- a/include/uapi/linux/rpmsg.h
+++ b/include/uapi/linux/rpmsg.h
@@ -9,11 +9,13 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+#define RPMSG_ADDR_ANY 0xFFFFFFFF
+
/**
* struct rpmsg_endpoint_info - endpoint info representation
* @name: name of service
- * @src: local address
- * @dst: destination address
+ * @src: local address. To set to RPMSG_ADDR_ANY if not used.
+ * @dst: destination address. To set to RPMSG_ADDR_ANY if not used.
*/
struct rpmsg_endpoint_info {
char name[32];
@@ -21,7 +23,24 @@ struct rpmsg_endpoint_info {
__u32 dst;
};
+/**
+ * Instantiate a new rmpsg char device endpoint.
+ */
#define RPMSG_CREATE_EPT_IOCTL _IOW(0xb5, 0x1, struct rpmsg_endpoint_info)
+
+/**
+ * Destroy a rpmsg char device endpoint created by the RPMSG_CREATE_EPT_IOCTL.
+ */
#define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2)
+/**
+ * Instantiate a new local rpmsg service device.
+ */
+#define RPMSG_CREATE_DEV_IOCTL _IOW(0xb5, 0x3, struct rpmsg_endpoint_info)
+
+/**
+ * Release a local rpmsg device.
+ */
+#define RPMSG_RELEASE_DEV_IOCTL _IOW(0xb5, 0x4, struct rpmsg_endpoint_info)
+
#endif
diff --git a/include/uapi/linux/rpmsg_types.h b/include/uapi/linux/rpmsg_types.h
new file mode 100644
index 000000000000..36e3b9404391
--- /dev/null
+++ b/include/uapi/linux/rpmsg_types.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_RPMSG_TYPES_H
+#define _UAPI_LINUX_RPMSG_TYPES_H
+
+#include <linux/types.h>
+
+typedef __u16 __bitwise __rpmsg16;
+typedef __u32 __bitwise __rpmsg32;
+typedef __u64 __bitwise __rpmsg64;
+
+#endif /* _UAPI_LINUX_RPMSG_TYPES_H */
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index 9a402fdb60e9..77ee207623a9 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -105,23 +105,11 @@ struct rseq {
* Read and set by the kernel. Set by user-space with single-copy
* atomicity semantics. This field should only be updated by the
* thread which registered this data structure. Aligned on 64-bit.
+ *
+ * 32-bit architectures should update the low order bits of the
+ * rseq_cs field, leaving the high order bits initialized to 0.
*/
- union {
- __u64 ptr64;
-#ifdef __LP64__
- __u64 ptr;
-#else
- struct {
-#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
- __u32 padding; /* Initialized to zero. */
- __u32 ptr32;
-#else /* LITTLE */
- __u32 ptr32;
- __u32 padding; /* Initialized to zero. */
-#endif /* ENDIAN */
- } ptr;
-#endif
- } rseq_cs;
+ __u64 rseq_cs;
/*
* Restartable sequences flags field.
diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h
index 095af360326a..97aca4503a6a 100644
--- a/include/uapi/linux/rtc.h
+++ b/include/uapi/linux/rtc.h
@@ -12,6 +12,10 @@
#ifndef _UAPI_LINUX_RTC_H_
#define _UAPI_LINUX_RTC_H_
+#include <linux/const.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
/*
* The struct used to pass data via the following ioctl. Similar to the
* struct tm in <time.h>, but it needs to be here so that the kernel
@@ -63,6 +67,17 @@ struct rtc_pll_info {
long pll_clock; /* base PLL frequency */
};
+struct rtc_param {
+ __u64 param;
+ union {
+ __u64 uvalue;
+ __s64 svalue;
+ __u64 ptr;
+ };
+ __u32 index;
+ __u32 __pad;
+};
+
/*
* ioctl calls that are permitted to the /dev/rtc interface, if
* any of the RTC drivers are enabled.
@@ -92,10 +107,14 @@ struct rtc_pll_info {
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
-#define RTC_VL_DATA_INVALID BIT(0) /* Voltage too low, RTC data is invalid */
-#define RTC_VL_BACKUP_LOW BIT(1) /* Backup voltage is low */
-#define RTC_VL_BACKUP_EMPTY BIT(2) /* Backup empty or not present */
-#define RTC_VL_ACCURACY_LOW BIT(3) /* Voltage is low, RTC accuracy is reduced */
+#define RTC_PARAM_GET _IOW('p', 0x13, struct rtc_param) /* Get parameter */
+#define RTC_PARAM_SET _IOW('p', 0x14, struct rtc_param) /* Set parameter */
+
+#define RTC_VL_DATA_INVALID _BITUL(0) /* Voltage too low, RTC data is invalid */
+#define RTC_VL_BACKUP_LOW _BITUL(1) /* Backup voltage is low */
+#define RTC_VL_BACKUP_EMPTY _BITUL(2) /* Backup empty or not present */
+#define RTC_VL_ACCURACY_LOW _BITUL(3) /* Voltage is low, RTC accuracy is reduced */
+#define RTC_VL_BACKUP_SWITCH _BITUL(4) /* Backup switchover happened */
#define RTC_VL_READ _IOR('p', 0x13, unsigned int) /* Voltage low detection */
#define RTC_VL_CLR _IO('p', 0x14) /* Clear voltage low information */
@@ -106,6 +125,26 @@ struct rtc_pll_info {
#define RTC_AF 0x20 /* Alarm interrupt */
#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */
+/* feature list */
+#define RTC_FEATURE_ALARM 0
+#define RTC_FEATURE_ALARM_RES_MINUTE 1
+#define RTC_FEATURE_NEED_WEEK_DAY 2
+#define RTC_FEATURE_ALARM_RES_2S 3
+#define RTC_FEATURE_UPDATE_INTERRUPT 4
+#define RTC_FEATURE_CORRECTION 5
+#define RTC_FEATURE_BACKUP_SWITCH_MODE 6
+#define RTC_FEATURE_ALARM_WAKEUP_ONLY 7
+#define RTC_FEATURE_CNT 8
+
+/* parameter list */
+#define RTC_PARAM_FEATURES 0
+#define RTC_PARAM_CORRECTION 1
+#define RTC_PARAM_BACKUP_SWITCH_MODE 2
+
+#define RTC_BSM_DISABLED 0
+#define RTC_BSM_DIRECT 1
+#define RTC_BSM_LEVEL 2
+#define RTC_BSM_STANDBY 3
#define RTC_MAX_FREQ 8192
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 4a8c5b745157..eb2747d58a81 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -146,6 +146,8 @@ enum {
#define RTM_NEWSTATS RTM_NEWSTATS
RTM_GETSTATS = 94,
#define RTM_GETSTATS RTM_GETSTATS
+ RTM_SETSTATS,
+#define RTM_SETSTATS RTM_SETSTATS
RTM_NEWCACHEREPORT = 96,
#define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT
@@ -178,6 +180,20 @@ enum {
RTM_GETVLAN,
#define RTM_GETVLAN RTM_GETVLAN
+ RTM_NEWNEXTHOPBUCKET = 116,
+#define RTM_NEWNEXTHOPBUCKET RTM_NEWNEXTHOPBUCKET
+ RTM_DELNEXTHOPBUCKET,
+#define RTM_DELNEXTHOPBUCKET RTM_DELNEXTHOPBUCKET
+ RTM_GETNEXTHOPBUCKET,
+#define RTM_GETNEXTHOPBUCKET RTM_GETNEXTHOPBUCKET
+
+ RTM_NEWTUNNEL = 120,
+#define RTM_NEWTUNNEL RTM_NEWTUNNEL
+ RTM_DELTUNNEL,
+#define RTM_DELTUNNEL RTM_DELTUNNEL
+ RTM_GETTUNNEL,
+#define RTM_GETTUNNEL RTM_GETTUNNEL
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -257,12 +273,12 @@ enum {
/* rtm_protocol */
-#define RTPROT_UNSPEC 0
-#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects;
- not used by current IPv4 */
-#define RTPROT_KERNEL 2 /* Route installed by kernel */
-#define RTPROT_BOOT 3 /* Route installed during boot */
-#define RTPROT_STATIC 4 /* Route installed by administrator */
+#define RTPROT_UNSPEC 0
+#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects;
+ not used by current IPv4 */
+#define RTPROT_KERNEL 2 /* Route installed by kernel */
+#define RTPROT_BOOT 3 /* Route installed during boot */
+#define RTPROT_STATIC 4 /* Route installed by administrator */
/* Values of protocol >= RTPROT_STATIC are not interpreted by kernel;
they are just passed from user and back as is.
@@ -271,22 +287,24 @@ enum {
avoid conflicts.
*/
-#define RTPROT_GATED 8 /* Apparently, GateD */
-#define RTPROT_RA 9 /* RDISC/ND router advertisements */
-#define RTPROT_MRT 10 /* Merit MRT */
-#define RTPROT_ZEBRA 11 /* Zebra */
-#define RTPROT_BIRD 12 /* BIRD */
-#define RTPROT_DNROUTED 13 /* DECnet routing daemon */
-#define RTPROT_XORP 14 /* XORP */
-#define RTPROT_NTK 15 /* Netsukuku */
-#define RTPROT_DHCP 16 /* DHCP client */
-#define RTPROT_MROUTED 17 /* Multicast daemon */
-#define RTPROT_BABEL 42 /* Babel daemon */
-#define RTPROT_BGP 186 /* BGP Routes */
-#define RTPROT_ISIS 187 /* ISIS Routes */
-#define RTPROT_OSPF 188 /* OSPF Routes */
-#define RTPROT_RIP 189 /* RIP Routes */
-#define RTPROT_EIGRP 192 /* EIGRP Routes */
+#define RTPROT_GATED 8 /* Apparently, GateD */
+#define RTPROT_RA 9 /* RDISC/ND router advertisements */
+#define RTPROT_MRT 10 /* Merit MRT */
+#define RTPROT_ZEBRA 11 /* Zebra */
+#define RTPROT_BIRD 12 /* BIRD */
+#define RTPROT_DNROUTED 13 /* DECnet routing daemon */
+#define RTPROT_XORP 14 /* XORP */
+#define RTPROT_NTK 15 /* Netsukuku */
+#define RTPROT_DHCP 16 /* DHCP client */
+#define RTPROT_MROUTED 17 /* Multicast daemon */
+#define RTPROT_KEEPALIVED 18 /* Keepalived daemon */
+#define RTPROT_BABEL 42 /* Babel daemon */
+#define RTPROT_OPENR 99 /* Open Routing (Open/R) Routes */
+#define RTPROT_BGP 186 /* BGP Routes */
+#define RTPROT_ISIS 187 /* ISIS Routes */
+#define RTPROT_OSPF 188 /* OSPF Routes */
+#define RTPROT_RIP 189 /* RIP Routes */
+#define RTPROT_EIGRP 192 /* EIGRP Routes */
/* rtm_scope
@@ -318,6 +336,11 @@ enum rt_scope_t {
#define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */
#define RTM_F_OFFLOAD 0x4000 /* route is offloaded */
#define RTM_F_TRAP 0x8000 /* route is trapping packets */
+#define RTM_F_OFFLOAD_FAILED 0x20000000 /* route offload failed, this value
+ * is chosen to avoid conflicts with
+ * other flags defined in
+ * include/uapi/linux/ipv6_route.h
+ */
/* Reserved table identifiers */
@@ -395,11 +418,13 @@ struct rtnexthop {
#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
-#define RTNH_F_OFFLOAD 8 /* offloaded route */
+#define RTNH_F_OFFLOAD 8 /* Nexthop is offloaded */
#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
#define RTNH_F_UNRESOLVED 32 /* The entry is unresolved (ipmr) */
+#define RTNH_F_TRAP 64 /* Nexthop is trapping packets */
-#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | \
+ RTNH_F_OFFLOAD | RTNH_F_TRAP)
/* Macros to handle hexthops */
@@ -415,7 +440,7 @@ struct rtnexthop {
/* RTA_VIA */
struct rtvia {
__kernel_sa_family_t rtvia_family;
- __u8 rtvia_addr[0];
+ __u8 rtvia_addr[];
};
/* RTM_CACHEINFO */
@@ -609,11 +634,17 @@ enum {
TCA_HW_OFFLOAD,
TCA_INGRESS_BLOCK,
TCA_EGRESS_BLOCK,
+ TCA_DUMP_FLAGS,
__TCA_MAX
};
#define TCA_MAX (__TCA_MAX - 1)
+#define TCA_DUMP_FLAGS_TERSE (1 << 0) /* Means that in dump user gets only basic
+ * data necessary to identify the objects
+ * (handle, cookie, etc.) and stats.
+ */
+
#define TCA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcmsg))))
#define TCA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcmsg))
@@ -732,6 +763,12 @@ enum rtnetlink_groups {
#define RTNLGRP_NEXTHOP RTNLGRP_NEXTHOP
RTNLGRP_BRVLAN,
#define RTNLGRP_BRVLAN RTNLGRP_BRVLAN
+ RTNLGRP_MCTP_IFADDR,
+#define RTNLGRP_MCTP_IFADDR RTNLGRP_MCTP_IFADDR
+ RTNLGRP_TUNNEL,
+#define RTNLGRP_TUNNEL RTNLGRP_TUNNEL
+ RTNLGRP_STATS,
+#define RTNLGRP_STATS RTNLGRP_STATS
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
@@ -759,18 +796,28 @@ enum {
#define TA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcamsg))
/* tcamsg flags stored in attribute TCA_ROOT_FLAGS
*
- * TCA_FLAG_LARGE_DUMP_ON user->kernel to request for larger than TCA_ACT_MAX_PRIO
- * actions in a dump. All dump responses will contain the number of actions
- * being dumped stored in for user app's consumption in TCA_ROOT_COUNT
+ * TCA_ACT_FLAG_LARGE_DUMP_ON user->kernel to request for larger than
+ * TCA_ACT_MAX_PRIO actions in a dump. All dump responses will contain the
+ * number of actions being dumped stored in for user app's consumption in
+ * TCA_ROOT_COUNT
+ *
+ * TCA_ACT_FLAG_TERSE_DUMP user->kernel to request terse (brief) dump that only
+ * includes essential action info (kind, index, etc.)
*
*/
#define TCA_FLAG_LARGE_DUMP_ON (1 << 0)
+#define TCA_ACT_FLAG_LARGE_DUMP_ON TCA_FLAG_LARGE_DUMP_ON
+#define TCA_ACT_FLAG_TERSE_DUMP (1 << 1)
/* New extended info filters for IFLA_EXT_MASK */
#define RTEXT_FILTER_VF (1 << 0)
#define RTEXT_FILTER_BRVLAN (1 << 1)
#define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2)
#define RTEXT_FILTER_SKIP_STATS (1 << 3)
+#define RTEXT_FILTER_MRP (1 << 4)
+#define RTEXT_FILTER_CFM_CONFIG (1 << 5)
+#define RTEXT_FILTER_CFM_STATUS (1 << 6)
+#define RTEXT_FILTER_MST (1 << 7)
/* End of information exported to user level */
diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
index 4accfa7e266d..8f8dc7a937a4 100644
--- a/include/uapi/linux/rxrpc.h
+++ b/include/uapi/linux/rxrpc.h
@@ -51,11 +51,11 @@ enum rxrpc_cmsg_type {
RXRPC_BUSY = 6, /* -r: server busy received [terminal] */
RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */
RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */
- RXRPC_ACCEPT = 9, /* s-: [Service] accept request */
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
+ RXRPC_CHARGE_ACCEPT = 14, /* s-: Charge the accept pool with a user call ID */
RXRPC__SUPPORTED
};
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 2e3bc22c6f20..3bac0a8ceab2 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -35,6 +35,7 @@
/* Flags for the clone3() syscall. */
#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
+#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */
/*
* cloning flags intersect with CSIGNAL so can be used with unshare and clone3
@@ -81,6 +82,8 @@
* @set_tid_size: This defines the size of the array referenced
* in @set_tid. This cannot be larger than the
* kernel's limit of nested PID namespaces.
+ * @cgroup: If CLONE_INTO_CGROUP is specified set this to
+ * a file descriptor for the cgroup.
*
* The structure is versioned by size and thus extensible.
* New struct members must go at the end of the struct and
@@ -97,11 +100,13 @@ struct clone_args {
__aligned_u64 tls;
__aligned_u64 set_tid;
__aligned_u64 set_tid_size;
+ __aligned_u64 cgroup;
};
#endif
#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
+#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */
/*
* Scheduling policies
diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h
index c852153ddb0d..f2c4589d4dbf 100644
--- a/include/uapi/linux/sched/types.h
+++ b/include/uapi/linux/sched/types.h
@@ -96,6 +96,8 @@ struct sched_param {
* on a CPU with a capacity big enough to fit the specified value.
* A task with a max utilization value smaller than 1024 is more likely
* scheduled on a CPU with no more capacity than the specified value.
+ *
+ * A task utilization boundary can be reset by setting the attribute to -1.
*/
struct sched_attr {
__u32 size;
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 28ad40d9acba..ed7d4ecbf53d 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -140,6 +140,8 @@ typedef __s32 sctp_assoc_t;
#define SCTP_ECN_SUPPORTED 130
#define SCTP_EXPOSE_POTENTIALLY_FAILED_STATE 131
#define SCTP_EXPOSE_PF_STATE SCTP_EXPOSE_POTENTIALLY_FAILED_STATE
+#define SCTP_REMOTE_UDP_ENCAPS_PORT 132
+#define SCTP_PLPMTUD_PROBE_INTERVAL 133
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -363,7 +365,7 @@ struct sctp_assoc_change {
__u16 sac_outbound_streams;
__u16 sac_inbound_streams;
sctp_assoc_t sac_assoc_id;
- __u8 sac_info[0];
+ __u8 sac_info[];
};
/*
@@ -434,7 +436,7 @@ struct sctp_remote_error {
__u32 sre_length;
__be16 sre_error;
sctp_assoc_t sre_assoc_id;
- __u8 sre_data[0];
+ __u8 sre_data[];
};
@@ -451,7 +453,7 @@ struct sctp_send_failed {
__u32 ssf_error;
struct sctp_sndrcvinfo ssf_info;
sctp_assoc_t ssf_assoc_id;
- __u8 ssf_data[0];
+ __u8 ssf_data[];
};
struct sctp_send_failed_event {
@@ -461,7 +463,7 @@ struct sctp_send_failed_event {
__u32 ssf_error;
struct sctp_sndinfo ssfe_info;
sctp_assoc_t ssf_assoc_id;
- __u8 ssf_data[0];
+ __u8 ssf_data[];
};
/*
@@ -1027,7 +1029,7 @@ struct sctp_getaddrs_old {
struct sctp_getaddrs {
sctp_assoc_t assoc_id; /*input*/
__u32 addr_num; /*output*/
- __u8 addrs[0]; /*output, variable size*/
+ __u8 addrs[]; /*output, variable size*/
};
/* A socket user request obtained via SCTP_GET_ASSOC_STATS that retrieves
@@ -1197,6 +1199,12 @@ struct sctp_event {
uint8_t se_on;
};
+struct sctp_udpencaps {
+ sctp_assoc_t sue_assoc_id;
+ struct sockaddr_storage sue_address;
+ uint16_t sue_port;
+};
+
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
@@ -1206,4 +1214,11 @@ enum sctp_sched_type {
SCTP_SS_MAX = SCTP_SS_RR
};
+/* Probe Interval socket option */
+struct sctp_probeinterval {
+ sctp_assoc_t spi_assoc_id;
+ struct sockaddr_storage spi_address;
+ __u32 spi_interval;
+};
+
#endif /* _UAPI_SCTP_H */
diff --git a/include/uapi/linux/sdla.h b/include/uapi/linux/sdla.h
deleted file mode 100644
index 1e3735be6511..000000000000
--- a/include/uapi/linux/sdla.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * Global definitions for the Frame relay interface.
- *
- * Version: @(#)if_ifrad.h 0.20 13 Apr 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan Structure packing
- *
- * 0.20 Mike McLagan New flags for S508 buffer handling
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _UAPISDLA_H
-#define _UAPISDLA_H
-
-/* adapter type */
-#define SDLA_TYPES
-#define SDLA_S502A 5020
-#define SDLA_S502E 5021
-#define SDLA_S503 5030
-#define SDLA_S507 5070
-#define SDLA_S508 5080
-#define SDLA_S509 5090
-#define SDLA_UNKNOWN -1
-
-/* port selection flags for the S508 */
-#define SDLA_S508_PORT_V35 0x00
-#define SDLA_S508_PORT_RS232 0x02
-
-/* Z80 CPU speeds */
-#define SDLA_CPU_3M 0x00
-#define SDLA_CPU_5M 0x01
-#define SDLA_CPU_7M 0x02
-#define SDLA_CPU_8M 0x03
-#define SDLA_CPU_10M 0x04
-#define SDLA_CPU_16M 0x05
-#define SDLA_CPU_12M 0x06
-
-/* some private IOCTLs */
-#define SDLA_IDENTIFY (FRAD_LAST_IOCTL + 1)
-#define SDLA_CPUSPEED (FRAD_LAST_IOCTL + 2)
-#define SDLA_PROTOCOL (FRAD_LAST_IOCTL + 3)
-
-#define SDLA_CLEARMEM (FRAD_LAST_IOCTL + 4)
-#define SDLA_WRITEMEM (FRAD_LAST_IOCTL + 5)
-#define SDLA_READMEM (FRAD_LAST_IOCTL + 6)
-
-struct sdla_mem {
- int addr;
- int len;
- void __user *data;
-};
-
-#define SDLA_START (FRAD_LAST_IOCTL + 7)
-#define SDLA_STOP (FRAD_LAST_IOCTL + 8)
-
-/* some offsets in the Z80's memory space */
-#define SDLA_NMIADDR 0x0000
-#define SDLA_CONF_ADDR 0x0010
-#define SDLA_S502A_NMIADDR 0x0066
-#define SDLA_CODE_BASEADDR 0x0100
-#define SDLA_WINDOW_SIZE 0x2000
-#define SDLA_ADDR_MASK 0x1FFF
-
-/* largest handleable block of data */
-#define SDLA_MAX_DATA 4080
-#define SDLA_MAX_MTU 4072 /* MAX_DATA - sizeof(fradhdr) */
-#define SDLA_MAX_DLCI 24
-
-/* this should be the same as frad_conf */
-struct sdla_conf {
- short station;
- short config;
- short kbaud;
- short clocking;
- short max_frm;
- short T391;
- short T392;
- short N391;
- short N392;
- short N393;
- short CIR_fwd;
- short Bc_fwd;
- short Be_fwd;
- short CIR_bwd;
- short Bc_bwd;
- short Be_bwd;
-};
-
-/* this should be the same as dlci_conf */
-struct sdla_dlci_conf {
- short config;
- short CIR_fwd;
- short Bc_fwd;
- short Be_fwd;
- short CIR_bwd;
- short Bc_bwd;
- short Be_bwd;
- short Tc_fwd;
- short Tc_bwd;
- short Tf_max;
- short Tb_max;
-};
-
-
-#endif /* _UAPISDLA_H */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index be84d87f1f46..0fdc6ef02b94 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -22,6 +22,9 @@
#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
+#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4)
+/* Received notifications wait in killable state (only respond to fatal signals) */
+#define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5)
/*
* All BPF programs must return a 32-bit value.
@@ -112,6 +115,26 @@ struct seccomp_notif_resp {
__u32 flags;
};
+/* valid flags for seccomp_notif_addfd */
+#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
+#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
+
+/**
+ * struct seccomp_notif_addfd
+ * @id: The ID of the seccomp notification
+ * @flags: SECCOMP_ADDFD_FLAG_*
+ * @srcfd: The local fd number
+ * @newfd: Optional remote FD number if SETFD option is set, otherwise 0.
+ * @newfd_flags: The O_* flags the remote FD should have applied
+ */
+struct seccomp_notif_addfd {
+ __u64 id;
+ __u32 flags;
+ __u32 srcfd;
+ __u32 newfd;
+ __u32 newfd_flags;
+};
+
#define SECCOMP_IOC_MAGIC '!'
#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr)
#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type)
@@ -122,5 +145,9 @@ struct seccomp_notif_resp {
#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
struct seccomp_notif_resp)
-#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64)
+#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
+/* On success, the return value is the remote process's added fd number */
+#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \
+ struct seccomp_notif_addfd)
+
#endif /* _UAPI_LINUX_SECCOMP_H */
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index 6f5af1a84213..2573772e2fb3 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -132,6 +132,18 @@ struct opal_read_write_table {
__u64 priv;
};
+#define OPAL_FL_SUPPORTED 0x00000001
+#define OPAL_FL_LOCKING_SUPPORTED 0x00000002
+#define OPAL_FL_LOCKING_ENABLED 0x00000004
+#define OPAL_FL_LOCKED 0x00000008
+#define OPAL_FL_MBR_ENABLED 0x00000010
+#define OPAL_FL_MBR_DONE 0x00000020
+
+struct opal_status {
+ __u32 flags;
+ __u32 reserved;
+};
+
#define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock)
#define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock)
#define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key)
@@ -148,5 +160,6 @@ struct opal_read_write_table {
#define IOC_OPAL_MBR_DONE _IOW('p', 233, struct opal_mbr_done)
#define IOC_OPAL_WRITE_SHADOW_MBR _IOW('p', 234, struct opal_shadow_mbr)
#define IOC_OPAL_GENERIC_TABLE_RW _IOW('p', 235, struct opal_read_write_table)
+#define IOC_OPAL_GET_STATUS _IOR('p', 236, struct opal_status)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h
index 286e8d6a8e98..13bcbc8bba32 100644
--- a/include/uapi/linux/seg6.h
+++ b/include/uapi/linux/seg6.h
@@ -30,7 +30,7 @@ struct ipv6_sr_hdr {
__u8 flags;
__u16 tag;
- struct in6_addr segments[0];
+ struct in6_addr segments[];
};
#define SR6_FLAG1_PROTECTED (1 << 6)
diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h
index 09fb608a35ec..ae78791372b8 100644
--- a/include/uapi/linux/seg6_iptunnel.h
+++ b/include/uapi/linux/seg6_iptunnel.h
@@ -26,7 +26,7 @@ enum {
struct seg6_iptunnel_encap {
int mode;
- struct ipv6_sr_hdr srh[0];
+ struct ipv6_sr_hdr srh[];
};
#define SEG6_IPTUN_ENCAP_SIZE(x) ((sizeof(*x)) + (((x)->srh->hdrlen + 1) << 3))
@@ -35,27 +35,8 @@ enum {
SEG6_IPTUN_MODE_INLINE,
SEG6_IPTUN_MODE_ENCAP,
SEG6_IPTUN_MODE_L2ENCAP,
+ SEG6_IPTUN_MODE_ENCAP_RED,
+ SEG6_IPTUN_MODE_L2ENCAP_RED,
};
-#ifdef __KERNEL__
-
-static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
-{
- int head = 0;
-
- switch (tuninfo->mode) {
- case SEG6_IPTUN_MODE_INLINE:
- break;
- case SEG6_IPTUN_MODE_ENCAP:
- head = sizeof(struct ipv6hdr);
- break;
- case SEG6_IPTUN_MODE_L2ENCAP:
- return 0;
- }
-
- return ((tuninfo->srh->hdrlen + 1) << 3) + head;
-}
-
-#endif
-
#endif
diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h
index edc138bdc56d..4fdc424c9cb3 100644
--- a/include/uapi/linux/seg6_local.h
+++ b/include/uapi/linux/seg6_local.h
@@ -26,6 +26,9 @@ enum {
SEG6_LOCAL_IIF,
SEG6_LOCAL_OIF,
SEG6_LOCAL_BPF,
+ SEG6_LOCAL_VRFTABLE,
+ SEG6_LOCAL_COUNTERS,
+ SEG6_LOCAL_FLAVORS,
__SEG6_LOCAL_MAX,
};
#define SEG6_LOCAL_MAX (__SEG6_LOCAL_MAX - 1)
@@ -62,6 +65,8 @@ enum {
SEG6_LOCAL_ACTION_END_AM = 14,
/* custom BPF action */
SEG6_LOCAL_ACTION_END_BPF = 15,
+ /* decap and lookup of DA in v4 or v6 table */
+ SEG6_LOCAL_ACTION_END_DT46 = 16,
__SEG6_LOCAL_ACTION_MAX,
};
@@ -77,4 +82,56 @@ enum {
#define SEG6_LOCAL_BPF_PROG_MAX (__SEG6_LOCAL_BPF_PROG_MAX - 1)
+/* SRv6 Behavior counters are encoded as netlink attributes guaranteeing the
+ * correct alignment.
+ * Each counter is identified by a different attribute type (i.e.
+ * SEG6_LOCAL_CNT_PACKETS).
+ *
+ * - SEG6_LOCAL_CNT_PACKETS: identifies a counter that counts the number of
+ * packets that have been CORRECTLY processed by an SRv6 Behavior instance
+ * (i.e., packets that generate errors or are dropped are NOT counted).
+ *
+ * - SEG6_LOCAL_CNT_BYTES: identifies a counter that counts the total amount
+ * of traffic in bytes of all packets that have been CORRECTLY processed by
+ * an SRv6 Behavior instance (i.e., packets that generate errors or are
+ * dropped are NOT counted).
+ *
+ * - SEG6_LOCAL_CNT_ERRORS: identifies a counter that counts the number of
+ * packets that have NOT been properly processed by an SRv6 Behavior instance
+ * (i.e., packets that generate errors or are dropped).
+ */
+enum {
+ SEG6_LOCAL_CNT_UNSPEC,
+ SEG6_LOCAL_CNT_PAD, /* pad for 64 bits values */
+ SEG6_LOCAL_CNT_PACKETS,
+ SEG6_LOCAL_CNT_BYTES,
+ SEG6_LOCAL_CNT_ERRORS,
+ __SEG6_LOCAL_CNT_MAX,
+};
+
+#define SEG6_LOCAL_CNT_MAX (__SEG6_LOCAL_CNT_MAX - 1)
+
+/* SRv6 End* Flavor attributes */
+enum {
+ SEG6_LOCAL_FLV_UNSPEC,
+ SEG6_LOCAL_FLV_OPERATION,
+ SEG6_LOCAL_FLV_LCBLOCK_BITS,
+ SEG6_LOCAL_FLV_LCNODE_FN_BITS,
+ __SEG6_LOCAL_FLV_MAX,
+};
+
+#define SEG6_LOCAL_FLV_MAX (__SEG6_LOCAL_FLV_MAX - 1)
+
+/* Designed flavor operations for SRv6 End* Behavior */
+enum {
+ SEG6_LOCAL_FLV_OP_UNSPEC,
+ SEG6_LOCAL_FLV_OP_PSP,
+ SEG6_LOCAL_FLV_OP_USP,
+ SEG6_LOCAL_FLV_OP_USD,
+ SEG6_LOCAL_FLV_OP_NEXT_CSID,
+ __SEG6_LOCAL_FLV_OP_MAX
+};
+
+#define SEG6_LOCAL_FLV_OP_MAX (__SEG6_LOCAL_FLV_OP_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 93eb3c496ff1..cea06924b295 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -52,11 +52,11 @@ struct serial_struct {
#define PORT_16450 2
#define PORT_16550 3
#define PORT_16550A 4
-#define PORT_CIRRUS 5 /* usurped by cyclades.c */
+#define PORT_CIRRUS 5
#define PORT_16650 6
#define PORT_16650V2 7
#define PORT_16750 8
-#define PORT_STARTECH 9 /* usurped by cyclades.c */
+#define PORT_STARTECH 9
#define PORT_16C950 10 /* Oxford Semiconductor */
#define PORT_16654 11
#define PORT_16850 12
@@ -126,10 +126,26 @@ struct serial_rs485 {
#define SER_RS485_TERMINATE_BUS (1 << 5) /* Enable bus
termination
(if supported) */
+
+/* RS-485 addressing mode */
+#define SER_RS485_ADDRB (1 << 6) /* Enable addressing mode */
+#define SER_RS485_ADDR_RECV (1 << 7) /* Receive address filter */
+#define SER_RS485_ADDR_DEST (1 << 8) /* Destination address */
+
__u32 delay_rts_before_send; /* Delay before send (milliseconds) */
__u32 delay_rts_after_send; /* Delay after send (milliseconds) */
- __u32 padding[5]; /* Memory is cheap, new structs
- are a royal PITA .. */
+
+ /* The fields below are defined by flags */
+ union {
+ __u32 padding[5]; /* Memory is cheap, new structs are a pain */
+
+ struct {
+ __u8 addr_recv;
+ __u8 addr_dest;
+ __u8 padding0[2];
+ __u32 padding1[4];
+ };
+ };
};
/*
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 8ec3dd742ea4..3ba34d8378bd 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -26,20 +26,6 @@
/*
* The type definitions. These are from Ted Ts'o's serial.h
*/
-#define PORT_UNKNOWN 0
-#define PORT_8250 1
-#define PORT_16450 2
-#define PORT_16550 3
-#define PORT_16550A 4
-#define PORT_CIRRUS 5
-#define PORT_16650 6
-#define PORT_16650V2 7
-#define PORT_16750 8
-#define PORT_STARTECH 9
-#define PORT_16C950 10
-#define PORT_16654 11
-#define PORT_16850 12
-#define PORT_RSA 13
#define PORT_NS16550A 14
#define PORT_XSCALE 15
#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
@@ -82,6 +68,9 @@
/* NVIDIA Tegra Combined UART */
#define PORT_TEGRA_TCU 41
+/* ASPEED AST2x00 virtual UART */
+#define PORT_ASPEED_VUART 42
+
/* Intel EG20 */
#define PORT_PCH_8LINE 44
#define PORT_PCH_2LINE 45
@@ -135,10 +124,6 @@
/* TXX9 type number */
#define PORT_TXX9 64
-/* NEC VR4100 series SIU/DSIU */
-#define PORT_VR41XX_SIU 65
-#define PORT_VR41XX_DSIU 66
-
/* Samsung S3C2400 SoC */
#define PORT_S3C2400 67
@@ -148,8 +133,6 @@
/*Digi jsm */
#define PORT_JSM 69
-#define PORT_PNX8XXX 70
-
/* SUN4V Hypervisor Console */
#define PORT_SUNHV 72
@@ -224,9 +207,6 @@
/* Atheros AR933X SoC */
#define PORT_AR933X 99
-/* Energy Micro efm32 SoC */
-#define PORT_EFMUART 100
-
/* ARC (Synopsys) on-chip UART */
#define PORT_ARC 101
@@ -293,4 +273,7 @@
/* Freescale LINFlexD UART */
#define PORT_LINFLEXUART 122
+/* Sunplus UART */
+#define PORT_SUNPLUS 123
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index be07b5470f4b..bab3b39266cc 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -62,6 +62,7 @@
* ST16C654: 8 16 56 60 8 16 32 56 PORT_16654
* TI16C750: 1 16 32 56 xx xx xx xx PORT_16750
* TI16C752: 8 16 56 60 8 16 32 56
+ * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950
* Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA
*/
#define UART_FCR_R_TRIG_00 0x00
@@ -138,7 +139,7 @@
#define UART_LSR_PE 0x04 /* Parity error indicator */
#define UART_LSR_OE 0x02 /* Overrun error indicator */
#define UART_LSR_DR 0x01 /* Receiver data ready */
-#define UART_LSR_BRK_ERROR_BITS 0x1E /* BI, FE, PE, OE bits */
+#define UART_LSR_BRK_ERROR_BITS (UART_LSR_BI|UART_LSR_FE|UART_LSR_PE|UART_LSR_OE)
#define UART_MSR 6 /* In: Modem Status Register */
#define UART_MSR_DCD 0x80 /* Data Carrier Detect */
@@ -149,7 +150,7 @@
#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */
#define UART_MSR_DDSR 0x02 /* Delta DSR */
#define UART_MSR_DCTS 0x01 /* Delta CTS */
-#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */
+#define UART_MSR_ANY_DELTA (UART_MSR_DDCD|UART_MSR_TERI|UART_MSR_DDSR|UART_MSR_DCTS)
#define UART_SCR 7 /* I/O: Scratch Register */
diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
index 50e991952c97..ed2a96f43ce4 100644
--- a/include/uapi/linux/serio.h
+++ b/include/uapi/linux/serio.h
@@ -9,7 +9,7 @@
#ifndef _UAPI_SERIO_H
#define _UAPI_SERIO_H
-
+#include <linux/const.h>
#include <linux/ioctl.h>
#define SPIOCSTYPE _IOW('q', 0x01, unsigned long)
@@ -18,10 +18,10 @@
/*
* bit masks for use in "interrupt" flags (3rd argument)
*/
-#define SERIO_TIMEOUT BIT(0)
-#define SERIO_PARITY BIT(1)
-#define SERIO_FRAME BIT(2)
-#define SERIO_OOB_DATA BIT(3)
+#define SERIO_TIMEOUT _BITUL(0)
+#define SERIO_PARITY _BITUL(1)
+#define SERIO_FRAME _BITUL(2)
+#define SERIO_OOB_DATA _BITUL(3)
/*
* Serio types
diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h
new file mode 100644
index 000000000000..256aaeff7e65
--- /dev/null
+++ b/include/uapi/linux/sev-guest.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Userspace interface for AMD SEV and SNP guest driver.
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * SEV API specification is available at: https://developer.amd.com/sev/
+ */
+
+#ifndef __UAPI_LINUX_SEV_GUEST_H_
+#define __UAPI_LINUX_SEV_GUEST_H_
+
+#include <linux/types.h>
+
+struct snp_report_req {
+ /* user data that should be included in the report */
+ __u8 user_data[64];
+
+ /* The vmpl level to be included in the report */
+ __u32 vmpl;
+
+ /* Must be zero filled */
+ __u8 rsvd[28];
+};
+
+struct snp_report_resp {
+ /* response data, see SEV-SNP spec for the format */
+ __u8 data[4000];
+};
+
+struct snp_derived_key_req {
+ __u32 root_key_select;
+ __u32 rsvd;
+ __u64 guest_field_select;
+ __u32 vmpl;
+ __u32 guest_svn;
+ __u64 tcb_version;
+};
+
+struct snp_derived_key_resp {
+ /* response data, see SEV-SNP spec for the format */
+ __u8 data[64];
+};
+
+struct snp_guest_request_ioctl {
+ /* message version number (must be non-zero) */
+ __u8 msg_version;
+
+ /* Request and response structure address */
+ __u64 req_data;
+ __u64 resp_data;
+
+ /* firmware error code on failure (see psp-sev.h) */
+ __u64 fw_err;
+};
+
+struct snp_ext_report_req {
+ struct snp_report_req data;
+
+ /* where to copy the certificate blob */
+ __u64 certs_address;
+
+ /* length of the certificate blob */
+ __u32 certs_len;
+};
+
+#define SNP_GUEST_REQ_IOC_TYPE 'S'
+
+/* Get SNP attestation report */
+#define SNP_GET_REPORT _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x0, struct snp_guest_request_ioctl)
+
+/* Get a derived key from the root */
+#define SNP_GET_DERIVED_KEY _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x1, struct snp_guest_request_ioctl)
+
+/* Get SNP extended report as defined in the GHCB specification version 2. */
+#define SNP_GET_EXT_REPORT _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x2, struct snp_guest_request_ioctl)
+
+#endif /* __UAPI_LINUX_SEV_GUEST_H_ */
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
index 0e11ca421ca4..bb4dacca31e7 100644
--- a/include/uapi/linux/smc.h
+++ b/include/uapi/linux/smc.h
@@ -33,4 +33,271 @@ enum { /* SMC PNET Table commands */
#define SMCR_GENL_FAMILY_NAME "SMC_PNETID"
#define SMCR_GENL_FAMILY_VERSION 1
+/* gennetlink interface to access non-socket information from SMC module */
+#define SMC_GENL_FAMILY_NAME "SMC_GEN_NETLINK"
+#define SMC_GENL_FAMILY_VERSION 1
+
+#define SMC_PCI_ID_STR_LEN 16 /* Max length of pci id string */
+#define SMC_MAX_HOSTNAME_LEN 32 /* Max length of the hostname */
+#define SMC_MAX_UEID 4 /* Max number of user EIDs */
+#define SMC_MAX_EID_LEN 32 /* Max length of an EID */
+
+/* SMC_GENL_FAMILY commands */
+enum {
+ SMC_NETLINK_GET_SYS_INFO = 1,
+ SMC_NETLINK_GET_LGR_SMCR,
+ SMC_NETLINK_GET_LINK_SMCR,
+ SMC_NETLINK_GET_LGR_SMCD,
+ SMC_NETLINK_GET_DEV_SMCD,
+ SMC_NETLINK_GET_DEV_SMCR,
+ SMC_NETLINK_GET_STATS,
+ SMC_NETLINK_GET_FBACK_STATS,
+ SMC_NETLINK_DUMP_UEID,
+ SMC_NETLINK_ADD_UEID,
+ SMC_NETLINK_REMOVE_UEID,
+ SMC_NETLINK_FLUSH_UEID,
+ SMC_NETLINK_DUMP_SEID,
+ SMC_NETLINK_ENABLE_SEID,
+ SMC_NETLINK_DISABLE_SEID,
+ SMC_NETLINK_DUMP_HS_LIMITATION,
+ SMC_NETLINK_ENABLE_HS_LIMITATION,
+ SMC_NETLINK_DISABLE_HS_LIMITATION,
+};
+
+/* SMC_GENL_FAMILY top level attributes */
+enum {
+ SMC_GEN_UNSPEC,
+ SMC_GEN_SYS_INFO, /* nest */
+ SMC_GEN_LGR_SMCR, /* nest */
+ SMC_GEN_LINK_SMCR, /* nest */
+ SMC_GEN_LGR_SMCD, /* nest */
+ SMC_GEN_DEV_SMCD, /* nest */
+ SMC_GEN_DEV_SMCR, /* nest */
+ SMC_GEN_STATS, /* nest */
+ SMC_GEN_FBACK_STATS, /* nest */
+ __SMC_GEN_MAX,
+ SMC_GEN_MAX = __SMC_GEN_MAX - 1
+};
+
+/* SMC_GEN_SYS_INFO attributes */
+enum {
+ SMC_NLA_SYS_UNSPEC,
+ SMC_NLA_SYS_VER, /* u8 */
+ SMC_NLA_SYS_REL, /* u8 */
+ SMC_NLA_SYS_IS_ISM_V2, /* u8 */
+ SMC_NLA_SYS_LOCAL_HOST, /* string */
+ SMC_NLA_SYS_SEID, /* string */
+ SMC_NLA_SYS_IS_SMCR_V2, /* u8 */
+ __SMC_NLA_SYS_MAX,
+ SMC_NLA_SYS_MAX = __SMC_NLA_SYS_MAX - 1
+};
+
+/* SMC_NLA_LGR_D_V2_COMMON and SMC_NLA_LGR_R_V2_COMMON nested attributes */
+enum {
+ SMC_NLA_LGR_V2_VER, /* u8 */
+ SMC_NLA_LGR_V2_REL, /* u8 */
+ SMC_NLA_LGR_V2_OS, /* u8 */
+ SMC_NLA_LGR_V2_NEG_EID, /* string */
+ SMC_NLA_LGR_V2_PEER_HOST, /* string */
+ __SMC_NLA_LGR_V2_MAX,
+ SMC_NLA_LGR_V2_MAX = __SMC_NLA_LGR_V2_MAX - 1
+};
+
+/* SMC_NLA_LGR_R_V2 nested attributes */
+enum {
+ SMC_NLA_LGR_R_V2_UNSPEC,
+ SMC_NLA_LGR_R_V2_DIRECT, /* u8 */
+ __SMC_NLA_LGR_R_V2_MAX,
+ SMC_NLA_LGR_R_V2_MAX = __SMC_NLA_LGR_R_V2_MAX - 1
+};
+
+/* SMC_GEN_LGR_SMCR attributes */
+enum {
+ SMC_NLA_LGR_R_UNSPEC,
+ SMC_NLA_LGR_R_ID, /* u32 */
+ SMC_NLA_LGR_R_ROLE, /* u8 */
+ SMC_NLA_LGR_R_TYPE, /* u8 */
+ SMC_NLA_LGR_R_PNETID, /* string */
+ SMC_NLA_LGR_R_VLAN_ID, /* u8 */
+ SMC_NLA_LGR_R_CONNS_NUM, /* u32 */
+ SMC_NLA_LGR_R_V2_COMMON, /* nest */
+ SMC_NLA_LGR_R_V2, /* nest */
+ SMC_NLA_LGR_R_NET_COOKIE, /* u64 */
+ SMC_NLA_LGR_R_PAD, /* flag */
+ SMC_NLA_LGR_R_BUF_TYPE, /* u8 */
+ __SMC_NLA_LGR_R_MAX,
+ SMC_NLA_LGR_R_MAX = __SMC_NLA_LGR_R_MAX - 1
+};
+
+/* SMC_GEN_LINK_SMCR attributes */
+enum {
+ SMC_NLA_LINK_UNSPEC,
+ SMC_NLA_LINK_ID, /* u8 */
+ SMC_NLA_LINK_IB_DEV, /* string */
+ SMC_NLA_LINK_IB_PORT, /* u8 */
+ SMC_NLA_LINK_GID, /* string */
+ SMC_NLA_LINK_PEER_GID, /* string */
+ SMC_NLA_LINK_CONN_CNT, /* u32 */
+ SMC_NLA_LINK_NET_DEV, /* u32 */
+ SMC_NLA_LINK_UID, /* u32 */
+ SMC_NLA_LINK_PEER_UID, /* u32 */
+ SMC_NLA_LINK_STATE, /* u32 */
+ __SMC_NLA_LINK_MAX,
+ SMC_NLA_LINK_MAX = __SMC_NLA_LINK_MAX - 1
+};
+
+/* SMC_GEN_LGR_SMCD attributes */
+enum {
+ SMC_NLA_LGR_D_UNSPEC,
+ SMC_NLA_LGR_D_ID, /* u32 */
+ SMC_NLA_LGR_D_GID, /* u64 */
+ SMC_NLA_LGR_D_PEER_GID, /* u64 */
+ SMC_NLA_LGR_D_VLAN_ID, /* u8 */
+ SMC_NLA_LGR_D_CONNS_NUM, /* u32 */
+ SMC_NLA_LGR_D_PNETID, /* string */
+ SMC_NLA_LGR_D_CHID, /* u16 */
+ SMC_NLA_LGR_D_PAD, /* flag */
+ SMC_NLA_LGR_D_V2_COMMON, /* nest */
+ __SMC_NLA_LGR_D_MAX,
+ SMC_NLA_LGR_D_MAX = __SMC_NLA_LGR_D_MAX - 1
+};
+
+/* SMC_NLA_DEV_PORT nested attributes */
+enum {
+ SMC_NLA_DEV_PORT_UNSPEC,
+ SMC_NLA_DEV_PORT_PNET_USR, /* u8 */
+ SMC_NLA_DEV_PORT_PNETID, /* string */
+ SMC_NLA_DEV_PORT_NETDEV, /* u32 */
+ SMC_NLA_DEV_PORT_STATE, /* u8 */
+ SMC_NLA_DEV_PORT_VALID, /* u8 */
+ SMC_NLA_DEV_PORT_LNK_CNT, /* u32 */
+ __SMC_NLA_DEV_PORT_MAX,
+ SMC_NLA_DEV_PORT_MAX = __SMC_NLA_DEV_PORT_MAX - 1
+};
+
+/* SMC_GEN_DEV_SMCD and SMC_GEN_DEV_SMCR attributes */
+enum {
+ SMC_NLA_DEV_UNSPEC,
+ SMC_NLA_DEV_USE_CNT, /* u32 */
+ SMC_NLA_DEV_IS_CRIT, /* u8 */
+ SMC_NLA_DEV_PCI_FID, /* u32 */
+ SMC_NLA_DEV_PCI_CHID, /* u16 */
+ SMC_NLA_DEV_PCI_VENDOR, /* u16 */
+ SMC_NLA_DEV_PCI_DEVICE, /* u16 */
+ SMC_NLA_DEV_PCI_ID, /* string */
+ SMC_NLA_DEV_PORT, /* nest */
+ SMC_NLA_DEV_PORT2, /* nest */
+ SMC_NLA_DEV_IB_NAME, /* string */
+ __SMC_NLA_DEV_MAX,
+ SMC_NLA_DEV_MAX = __SMC_NLA_DEV_MAX - 1
+};
+
+/* SMC_NLA_STATS_T_TX(RX)_RMB_SIZE nested attributes */
+/* SMC_NLA_STATS_TX(RX)PLOAD_SIZE nested attributes */
+enum {
+ SMC_NLA_STATS_PLOAD_PAD,
+ SMC_NLA_STATS_PLOAD_8K, /* u64 */
+ SMC_NLA_STATS_PLOAD_16K, /* u64 */
+ SMC_NLA_STATS_PLOAD_32K, /* u64 */
+ SMC_NLA_STATS_PLOAD_64K, /* u64 */
+ SMC_NLA_STATS_PLOAD_128K, /* u64 */
+ SMC_NLA_STATS_PLOAD_256K, /* u64 */
+ SMC_NLA_STATS_PLOAD_512K, /* u64 */
+ SMC_NLA_STATS_PLOAD_1024K, /* u64 */
+ SMC_NLA_STATS_PLOAD_G_1024K, /* u64 */
+ __SMC_NLA_STATS_PLOAD_MAX,
+ SMC_NLA_STATS_PLOAD_MAX = __SMC_NLA_STATS_PLOAD_MAX - 1
+};
+
+/* SMC_NLA_STATS_T_TX(RX)_RMB_STATS nested attributes */
+enum {
+ SMC_NLA_STATS_RMB_PAD,
+ SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_SIZE_SM_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_FULL_PEER_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_FULL_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_REUSE_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_ALLOC_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_DGRADE_CNT, /* u64 */
+ __SMC_NLA_STATS_RMB_MAX,
+ SMC_NLA_STATS_RMB_MAX = __SMC_NLA_STATS_RMB_MAX - 1
+};
+
+/* SMC_NLA_STATS_SMCD_TECH and _SMCR_TECH nested attributes */
+enum {
+ SMC_NLA_STATS_T_PAD,
+ SMC_NLA_STATS_T_TX_RMB_SIZE, /* nest */
+ SMC_NLA_STATS_T_RX_RMB_SIZE, /* nest */
+ SMC_NLA_STATS_T_TXPLOAD_SIZE, /* nest */
+ SMC_NLA_STATS_T_RXPLOAD_SIZE, /* nest */
+ SMC_NLA_STATS_T_TX_RMB_STATS, /* nest */
+ SMC_NLA_STATS_T_RX_RMB_STATS, /* nest */
+ SMC_NLA_STATS_T_CLNT_V1_SUCC, /* u64 */
+ SMC_NLA_STATS_T_CLNT_V2_SUCC, /* u64 */
+ SMC_NLA_STATS_T_SRV_V1_SUCC, /* u64 */
+ SMC_NLA_STATS_T_SRV_V2_SUCC, /* u64 */
+ SMC_NLA_STATS_T_SENDPAGE_CNT, /* u64 */
+ SMC_NLA_STATS_T_SPLICE_CNT, /* u64 */
+ SMC_NLA_STATS_T_CORK_CNT, /* u64 */
+ SMC_NLA_STATS_T_NDLY_CNT, /* u64 */
+ SMC_NLA_STATS_T_URG_DATA_CNT, /* u64 */
+ SMC_NLA_STATS_T_RX_BYTES, /* u64 */
+ SMC_NLA_STATS_T_TX_BYTES, /* u64 */
+ SMC_NLA_STATS_T_RX_CNT, /* u64 */
+ SMC_NLA_STATS_T_TX_CNT, /* u64 */
+ __SMC_NLA_STATS_T_MAX,
+ SMC_NLA_STATS_T_MAX = __SMC_NLA_STATS_T_MAX - 1
+};
+
+/* SMC_GEN_STATS attributes */
+enum {
+ SMC_NLA_STATS_PAD,
+ SMC_NLA_STATS_SMCD_TECH, /* nest */
+ SMC_NLA_STATS_SMCR_TECH, /* nest */
+ SMC_NLA_STATS_CLNT_HS_ERR_CNT, /* u64 */
+ SMC_NLA_STATS_SRV_HS_ERR_CNT, /* u64 */
+ __SMC_NLA_STATS_MAX,
+ SMC_NLA_STATS_MAX = __SMC_NLA_STATS_MAX - 1
+};
+
+/* SMC_GEN_FBACK_STATS attributes */
+enum {
+ SMC_NLA_FBACK_STATS_PAD,
+ SMC_NLA_FBACK_STATS_TYPE, /* u8 */
+ SMC_NLA_FBACK_STATS_SRV_CNT, /* u64 */
+ SMC_NLA_FBACK_STATS_CLNT_CNT, /* u64 */
+ SMC_NLA_FBACK_STATS_RSN_CODE, /* u32 */
+ SMC_NLA_FBACK_STATS_RSN_CNT, /* u16 */
+ __SMC_NLA_FBACK_STATS_MAX,
+ SMC_NLA_FBACK_STATS_MAX = __SMC_NLA_FBACK_STATS_MAX - 1
+};
+
+/* SMC_NETLINK_UEID attributes */
+enum {
+ SMC_NLA_EID_TABLE_UNSPEC,
+ SMC_NLA_EID_TABLE_ENTRY, /* string */
+ __SMC_NLA_EID_TABLE_MAX,
+ SMC_NLA_EID_TABLE_MAX = __SMC_NLA_EID_TABLE_MAX - 1
+};
+
+/* SMC_NETLINK_SEID attributes */
+enum {
+ SMC_NLA_SEID_UNSPEC,
+ SMC_NLA_SEID_ENTRY, /* string */
+ SMC_NLA_SEID_ENABLED, /* u8 */
+ __SMC_NLA_SEID_TABLE_MAX,
+ SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1
+};
+
+/* SMC_NETLINK_HS_LIMITATION attributes */
+enum {
+ SMC_NLA_HS_LIMITATION_UNSPEC,
+ SMC_NLA_HS_LIMITATION_ENABLED, /* u8 */
+ __SMC_NLA_HS_LIMITATION_MAX,
+ SMC_NLA_HS_LIMITATION_MAX = __SMC_NLA_HS_LIMITATION_MAX - 1
+};
+
+/* SMC socket options */
+#define SMC_LIMIT_HS 1 /* constraint on smc handshake */
+
#endif /* _UAPI_LINUX_SMC_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 7d91f4debc48..4d7470036a8b 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -159,6 +159,7 @@ enum
UDP_MIB_SNDBUFERRORS, /* SndbufErrors */
UDP_MIB_CSUMERRORS, /* InCsumErrors */
UDP_MIB_IGNOREDMULTI, /* IgnoredMulti */
+ UDP_MIB_MEMERRORS, /* MemErrors */
__UDP_MIB_MAX
};
@@ -287,6 +288,10 @@ enum
LINUX_MIB_TCPFASTOPENPASSIVEALTKEY, /* TCPFastOpenPassiveAltKey */
LINUX_MIB_TCPTIMEOUTREHASH, /* TCPTimeoutRehash */
LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */
+ LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */
+ LINUX_MIB_TCPDSACKIGNOREDDUBIOUS, /* TCPDSACKIgnoredDubious */
+ LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */
+ LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */
__LINUX_MIB_MAX
};
@@ -339,6 +344,8 @@ enum
LINUX_MIB_TLSRXDEVICE, /* TlsRxDevice */
LINUX_MIB_TLSDECRYPTERROR, /* TlsDecryptError */
LINUX_MIB_TLSRXDEVICERESYNC, /* TlsRxDeviceResync */
+ LINUX_MIB_TLSDECRYPTRETRY, /* TlsDecryptRetry */
+ LINUX_MIB_TLSRXNOPADVIOL, /* TlsRxNoPadViolation */
__LINUX_MIB_TLSMAX
};
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index e5925009a652..5f74a5f6091d 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -36,4 +36,30 @@ enum sknetlink_groups {
};
#define SKNLGRP_MAX (__SKNLGRP_MAX - 1)
+enum {
+ SK_DIAG_BPF_STORAGE_REQ_NONE,
+ SK_DIAG_BPF_STORAGE_REQ_MAP_FD,
+ __SK_DIAG_BPF_STORAGE_REQ_MAX,
+};
+
+#define SK_DIAG_BPF_STORAGE_REQ_MAX (__SK_DIAG_BPF_STORAGE_REQ_MAX - 1)
+
+enum {
+ SK_DIAG_BPF_STORAGE_REP_NONE,
+ SK_DIAG_BPF_STORAGE,
+ __SK_DIAG_BPF_STORAGE_REP_MAX,
+};
+
+#define SK_DIAB_BPF_STORAGE_REP_MAX (__SK_DIAG_BPF_STORAGE_REP_MAX - 1)
+
+enum {
+ SK_DIAG_BPF_STORAGE_NONE,
+ SK_DIAG_BPF_STORAGE_PAD,
+ SK_DIAG_BPF_STORAGE_MAP_ID,
+ SK_DIAG_BPF_STORAGE_MAP_VALUE,
+ __SK_DIAG_BPF_STORAGE_MAX,
+};
+
+#define SK_DIAG_BPF_STORAGE_MAX (__SK_DIAG_BPF_STORAGE_MAX - 1)
+
#endif /* _UAPI__SOCK_DIAG_H__ */
diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h
index c3409c8ec0dd..d3fcd3b5ec53 100644
--- a/include/uapi/linux/socket.h
+++ b/include/uapi/linux/socket.h
@@ -26,4 +26,13 @@ struct __kernel_sockaddr_storage {
};
};
+#define SOCK_SNDBUF_LOCK 1
+#define SOCK_RCVBUF_LOCK 2
+
+#define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK)
+
+#define SOCK_TXREHASH_DEFAULT 255
+#define SOCK_TXREHASH_DISABLED 0
+#define SOCK_TXREHASH_ENABLED 1
+
#endif /* _UAPI_LINUX_SOCKET_H */
diff --git a/include/uapi/linux/soundcard.h b/include/uapi/linux/soundcard.h
index f3b21f989872..ac1318793a86 100644
--- a/include/uapi/linux/soundcard.h
+++ b/include/uapi/linux/soundcard.h
@@ -1051,7 +1051,7 @@ typedef struct mixer_vol_table {
* the GPL version of OSS-4.x and build against that version
* of the header.
*
- * We redefine the extern keyword so that make headers_check
+ * We redefine the extern keyword so that usr/include/headers_check.pl
* does not complain about SEQ_USE_EXTBUF.
*/
#define SEQ_DECLAREBUF() SEQ_USE_EXTBUF()
diff --git a/include/uapi/linux/spi/spi.h b/include/uapi/linux/spi/spi.h
new file mode 100644
index 000000000000..9d5f58059703
--- /dev/null
+++ b/include/uapi/linux/spi/spi.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_SPI_H
+#define _UAPI_SPI_H
+
+#include <linux/const.h>
+
+#define SPI_CPHA _BITUL(0) /* clock phase */
+#define SPI_CPOL _BITUL(1) /* clock polarity */
+
+#define SPI_MODE_0 (0|0) /* (original MicroWire) */
+#define SPI_MODE_1 (0|SPI_CPHA)
+#define SPI_MODE_2 (SPI_CPOL|0)
+#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
+#define SPI_MODE_X_MASK (SPI_CPOL|SPI_CPHA)
+
+#define SPI_CS_HIGH _BITUL(2) /* chipselect active high? */
+#define SPI_LSB_FIRST _BITUL(3) /* per-word bits-on-wire */
+#define SPI_3WIRE _BITUL(4) /* SI/SO signals shared */
+#define SPI_LOOP _BITUL(5) /* loopback mode */
+#define SPI_NO_CS _BITUL(6) /* 1 dev/bus, no chipselect */
+#define SPI_READY _BITUL(7) /* slave pulls low to pause */
+#define SPI_TX_DUAL _BITUL(8) /* transmit with 2 wires */
+#define SPI_TX_QUAD _BITUL(9) /* transmit with 4 wires */
+#define SPI_RX_DUAL _BITUL(10) /* receive with 2 wires */
+#define SPI_RX_QUAD _BITUL(11) /* receive with 4 wires */
+#define SPI_CS_WORD _BITUL(12) /* toggle cs after each word */
+#define SPI_TX_OCTAL _BITUL(13) /* transmit with 8 wires */
+#define SPI_RX_OCTAL _BITUL(14) /* receive with 8 wires */
+#define SPI_3WIRE_HIZ _BITUL(15) /* high impedance turnaround */
+#define SPI_RX_CPHA_FLIP _BITUL(16) /* flip CPHA on Rx only xfer */
+
+/*
+ * All the bits defined above should be covered by SPI_MODE_USER_MASK.
+ * The SPI_MODE_USER_MASK has the SPI_MODE_KERNEL_MASK counterpart in
+ * 'include/linux/spi/spi.h'. The bits defined here are from bit 0 upwards
+ * while in SPI_MODE_KERNEL_MASK they are from the other end downwards.
+ * These bits must not overlap. A static assert check should make sure of that.
+ * If adding extra bits, make sure to increase the bit index below as well.
+ */
+#define SPI_MODE_USER_MASK (_BITUL(17) - 1)
+
+#endif /* _UAPI_SPI_H */
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index ee0f2460bff6..0c3da08f2aff 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -25,31 +25,7 @@
#include <linux/types.h>
#include <linux/ioctl.h>
-
-/* User space versions of kernel symbols for SPI clocking modes,
- * matching <linux/spi/spi.h>
- */
-
-#define SPI_CPHA 0x01
-#define SPI_CPOL 0x02
-
-#define SPI_MODE_0 (0|0)
-#define SPI_MODE_1 (0|SPI_CPHA)
-#define SPI_MODE_2 (SPI_CPOL|0)
-#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
-
-#define SPI_CS_HIGH 0x04
-#define SPI_LSB_FIRST 0x08
-#define SPI_3WIRE 0x10
-#define SPI_LOOP 0x20
-#define SPI_NO_CS 0x40
-#define SPI_READY 0x80
-#define SPI_TX_DUAL 0x100
-#define SPI_TX_QUAD 0x200
-#define SPI_RX_DUAL 0x400
-#define SPI_RX_QUAD 0x800
-
-/*---------------------------------------------------------------------------*/
+#include <linux/spi/spi.h>
/* IOCTL commands */
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index ad80a5c885d5..7cab2c65d3d7 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -123,7 +123,11 @@ struct statx {
__u32 stx_dev_major; /* ID of device containing file [uncond] */
__u32 stx_dev_minor;
/* 0x90 */
- __u64 __spare2[14]; /* Spare space for future expansion */
+ __u64 stx_mnt_id;
+ __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
+ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+ /* 0xa0 */
+ __u64 __spare3[12]; /* Spare space for future expansion */
/* 0x100 */
};
@@ -148,9 +152,20 @@ struct statx {
#define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */
#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
-#define STATX_ALL 0x00000fffU /* All currently supported flags */
+#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
+#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
+
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
+#ifndef __KERNEL__
+/*
+ * This is deprecated, and shall remain the same value in the future. To avoid
+ * confusion please use the equivalent (STATX_BASIC_STATS | STATX_BTIME)
+ * instead.
+ */
+#define STATX_ALL 0x00000fffU
+#endif
+
/*
* Attributes to be found in stx_attributes and masked in stx_attributes_mask.
*
@@ -158,9 +173,12 @@ struct statx {
* be of use to ordinary userspace programs such as GUIs or ls rather than
* specialised tools.
*
- * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS
+ * Note that the flags marked [I] correspond to the FS_IOC_SETFLAGS flags
* semantically. Where possible, the numerical value is picked to correspond
- * also.
+ * also. Note that the DAX attribute indicates that the file is in the CPU
+ * direct access state. It does not correspond to the per-inode flag that
+ * some filesystems support.
+ *
*/
#define STATX_ATTR_COMPRESSED 0x00000004 /* [I] File is compressed by the fs */
#define STATX_ATTR_IMMUTABLE 0x00000010 /* [I] File is marked immutable */
@@ -168,7 +186,9 @@ struct statx {
#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */
#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */
#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */
+#define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */
#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
+#define STATX_ATTR_DAX 0x00200000 /* File is currently in DAX state */
#endif /* _UAPI_LINUX_STAT_H */
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index ee8220f8dcf5..7837ba4fe728 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -1,6 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_STDDEF_H
+#define _UAPI_LINUX_STDDEF_H
+
#include <linux/compiler_types.h>
#ifndef __always_inline
#define __always_inline inline
#endif
+
+/**
+ * __struct_group() - Create a mirrored named and anonyomous struct
+ *
+ * @TAG: The tag name for the named sub-struct (usually empty)
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @ATTRS: Any struct attributes (usually empty)
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical layout
+ * and size: one anonymous and one named. The former's members can be used
+ * normally without sub-struct naming, and the latter can be used to
+ * reason about the start, end, and size of the group of struct members.
+ * The named struct can also be explicitly tagged for layer reuse, as well
+ * as both having struct attributes appended.
+ */
+#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
+ union { \
+ struct { MEMBERS } ATTRS; \
+ struct TAG { MEMBERS } ATTRS NAME; \
+ }
+
+/**
+ * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+ *
+ * @TYPE: The type of each flexible array element
+ * @NAME: The name of the flexible array member
+ *
+ * In order to have a flexible array member in a union or alone in a
+ * struct, it needs to be wrapped in an anonymous struct with at least 1
+ * named member, but that member can be empty.
+ */
+#define __DECLARE_FLEX_ARRAY(TYPE, NAME) \
+ struct { \
+ struct { } __empty_ ## NAME; \
+ TYPE NAME[]; \
+ }
+#endif
diff --git a/include/uapi/linux/stm.h b/include/uapi/linux/stm.h
index 7bac318b4440..de3579c2cff0 100644
--- a/include/uapi/linux/stm.h
+++ b/include/uapi/linux/stm.h
@@ -36,7 +36,7 @@ struct stp_policy_id {
/* padding */
__u16 __reserved_0;
__u32 __reserved_1;
- char id[0];
+ char id[];
};
#define STP_POLICY_ID_SET _IOWR('%', 0, struct stp_policy_id)
diff --git a/include/uapi/linux/surface_aggregator/cdev.h b/include/uapi/linux/surface_aggregator/cdev.h
new file mode 100644
index 000000000000..08f46b60b151
--- /dev/null
+++ b/include/uapi/linux/surface_aggregator/cdev.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Surface System Aggregator Module (SSAM) user-space EC interface.
+ *
+ * Definitions, structs, and IOCTLs for the /dev/surface/aggregator misc
+ * device. This device provides direct user-space access to the SSAM EC.
+ * Intended for debugging and development.
+ *
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
+#define _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * enum ssam_cdev_request_flags - Request flags for SSAM cdev request IOCTL.
+ *
+ * @SSAM_CDEV_REQUEST_HAS_RESPONSE:
+ * Specifies that the request expects a response. If not set, the request
+ * will be directly completed after its underlying packet has been
+ * transmitted. If set, the request transport system waits for a response
+ * of the request.
+ *
+ * @SSAM_CDEV_REQUEST_UNSEQUENCED:
+ * Specifies that the request should be transmitted via an unsequenced
+ * packet. If set, the request must not have a response, meaning that this
+ * flag and the %SSAM_CDEV_REQUEST_HAS_RESPONSE flag are mutually
+ * exclusive.
+ */
+enum ssam_cdev_request_flags {
+ SSAM_CDEV_REQUEST_HAS_RESPONSE = 0x01,
+ SSAM_CDEV_REQUEST_UNSEQUENCED = 0x02,
+};
+
+/**
+ * struct ssam_cdev_request - Controller request IOCTL argument.
+ * @target_category: Target category of the SAM request.
+ * @target_id: Target ID of the SAM request.
+ * @command_id: Command ID of the SAM request.
+ * @instance_id: Instance ID of the SAM request.
+ * @flags: Request flags (see &enum ssam_cdev_request_flags).
+ * @status: Request status (output).
+ * @payload: Request payload (input data).
+ * @payload.data: Pointer to request payload data.
+ * @payload.length: Length of request payload data (in bytes).
+ * @response: Request response (output data).
+ * @response.data: Pointer to response buffer.
+ * @response.length: On input: Capacity of response buffer (in bytes).
+ * On output: Length of request response (number of bytes
+ * in the buffer that are actually used).
+ */
+struct ssam_cdev_request {
+ __u8 target_category;
+ __u8 target_id;
+ __u8 command_id;
+ __u8 instance_id;
+ __u16 flags;
+ __s16 status;
+
+ struct {
+ __u64 data;
+ __u16 length;
+ __u8 __pad[6];
+ } payload;
+
+ struct {
+ __u64 data;
+ __u16 length;
+ __u8 __pad[6];
+ } response;
+} __attribute__((__packed__));
+
+/**
+ * struct ssam_cdev_notifier_desc - Notifier descriptor.
+ * @priority: Priority value determining the order in which notifier
+ * callbacks will be called. A higher value means higher
+ * priority, i.e. the associated callback will be executed
+ * earlier than other (lower priority) callbacks.
+ * @target_category: The event target category for which this notifier should
+ * receive events.
+ *
+ * Specifies the notifier that should be registered or unregistered,
+ * specifically with which priority and for which target category of events.
+ */
+struct ssam_cdev_notifier_desc {
+ __s32 priority;
+ __u8 target_category;
+} __attribute__((__packed__));
+
+/**
+ * struct ssam_cdev_event_desc - Event descriptor.
+ * @reg: Registry via which the event will be enabled/disabled.
+ * @reg.target_category: Target category for the event registry requests.
+ * @reg.target_id: Target ID for the event registry requests.
+ * @reg.cid_enable: Command ID for the event-enable request.
+ * @reg.cid_disable: Command ID for the event-disable request.
+ * @id: ID specifying the event.
+ * @id.target_category: Target category of the event source.
+ * @id.instance: Instance ID of the event source.
+ * @flags: Flags used for enabling the event.
+ *
+ * Specifies which event should be enabled/disabled and how to do that.
+ */
+struct ssam_cdev_event_desc {
+ struct {
+ __u8 target_category;
+ __u8 target_id;
+ __u8 cid_enable;
+ __u8 cid_disable;
+ } reg;
+
+ struct {
+ __u8 target_category;
+ __u8 instance;
+ } id;
+
+ __u8 flags;
+} __attribute__((__packed__));
+
+/**
+ * struct ssam_cdev_event - SSAM event sent by the EC.
+ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
+ * @target_id: Target ID of the event source.
+ * @command_id: Command ID of the event.
+ * @instance_id: Instance ID of the event source.
+ * @length: Length of the event payload in bytes.
+ * @data: Event payload data.
+ */
+struct ssam_cdev_event {
+ __u8 target_category;
+ __u8 target_id;
+ __u8 command_id;
+ __u8 instance_id;
+ __u16 length;
+ __u8 data[];
+} __attribute__((__packed__));
+
+#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request)
+#define SSAM_CDEV_NOTIF_REGISTER _IOW(0xA5, 2, struct ssam_cdev_notifier_desc)
+#define SSAM_CDEV_NOTIF_UNREGISTER _IOW(0xA5, 3, struct ssam_cdev_notifier_desc)
+#define SSAM_CDEV_EVENT_ENABLE _IOW(0xA5, 4, struct ssam_cdev_event_desc)
+#define SSAM_CDEV_EVENT_DISABLE _IOW(0xA5, 5, struct ssam_cdev_event_desc)
+
+#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H */
diff --git a/include/uapi/linux/surface_aggregator/dtx.h b/include/uapi/linux/surface_aggregator/dtx.h
new file mode 100644
index 000000000000..0833aab0d819
--- /dev/null
+++ b/include/uapi/linux/surface_aggregator/dtx.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Surface DTX (clipboard detachment system driver) user-space interface.
+ *
+ * Definitions, structs, and IOCTLs for the /dev/surface/dtx misc device. This
+ * device allows user-space to control the clipboard detachment process on
+ * Surface Book series devices.
+ *
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
+#define _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Status/error categories */
+#define SDTX_CATEGORY_STATUS 0x0000
+#define SDTX_CATEGORY_RUNTIME_ERROR 0x1000
+#define SDTX_CATEGORY_HARDWARE_ERROR 0x2000
+#define SDTX_CATEGORY_UNKNOWN 0xf000
+
+#define SDTX_CATEGORY_MASK 0xf000
+#define SDTX_CATEGORY(value) ((value) & SDTX_CATEGORY_MASK)
+
+#define SDTX_STATUS(code) ((code) | SDTX_CATEGORY_STATUS)
+#define SDTX_ERR_RT(code) ((code) | SDTX_CATEGORY_RUNTIME_ERROR)
+#define SDTX_ERR_HW(code) ((code) | SDTX_CATEGORY_HARDWARE_ERROR)
+#define SDTX_UNKNOWN(code) ((code) | SDTX_CATEGORY_UNKNOWN)
+
+#define SDTX_SUCCESS(value) (SDTX_CATEGORY(value) == SDTX_CATEGORY_STATUS)
+
+/* Latch status values */
+#define SDTX_LATCH_CLOSED SDTX_STATUS(0x00)
+#define SDTX_LATCH_OPENED SDTX_STATUS(0x01)
+
+/* Base state values */
+#define SDTX_BASE_DETACHED SDTX_STATUS(0x00)
+#define SDTX_BASE_ATTACHED SDTX_STATUS(0x01)
+
+/* Runtime errors (non-critical) */
+#define SDTX_DETACH_NOT_FEASIBLE SDTX_ERR_RT(0x01)
+#define SDTX_DETACH_TIMEDOUT SDTX_ERR_RT(0x02)
+
+/* Hardware errors (critical) */
+#define SDTX_ERR_FAILED_TO_OPEN SDTX_ERR_HW(0x01)
+#define SDTX_ERR_FAILED_TO_REMAIN_OPEN SDTX_ERR_HW(0x02)
+#define SDTX_ERR_FAILED_TO_CLOSE SDTX_ERR_HW(0x03)
+
+/* Base types */
+#define SDTX_DEVICE_TYPE_HID 0x0100
+#define SDTX_DEVICE_TYPE_SSH 0x0200
+
+#define SDTX_DEVICE_TYPE_MASK 0x0f00
+#define SDTX_DEVICE_TYPE(value) ((value) & SDTX_DEVICE_TYPE_MASK)
+
+#define SDTX_BASE_TYPE_HID(id) ((id) | SDTX_DEVICE_TYPE_HID)
+#define SDTX_BASE_TYPE_SSH(id) ((id) | SDTX_DEVICE_TYPE_SSH)
+
+/**
+ * enum sdtx_device_mode - Mode describing how (and if) the clipboard is
+ * attached to the base of the device.
+ * @SDTX_DEVICE_MODE_TABLET: The clipboard is detached from the base and the
+ * device operates as tablet.
+ * @SDTX_DEVICE_MODE_LAPTOP: The clipboard is attached normally to the base
+ * and the device operates as laptop.
+ * @SDTX_DEVICE_MODE_STUDIO: The clipboard is attached to the base in reverse.
+ * The device operates as tablet with keyboard and
+ * touchpad deactivated, however, the base battery
+ * and, if present in the specific device model, dGPU
+ * are available to the system.
+ */
+enum sdtx_device_mode {
+ SDTX_DEVICE_MODE_TABLET = 0x00,
+ SDTX_DEVICE_MODE_LAPTOP = 0x01,
+ SDTX_DEVICE_MODE_STUDIO = 0x02,
+};
+
+/**
+ * struct sdtx_event - Event provided by reading from the DTX device file.
+ * @length: Length of the event payload, in bytes.
+ * @code: Event code, detailing what type of event this is.
+ * @data: Payload of the event, containing @length bytes.
+ *
+ * See &enum sdtx_event_code for currently valid event codes.
+ */
+struct sdtx_event {
+ __u16 length;
+ __u16 code;
+ __u8 data[];
+} __attribute__((__packed__));
+
+/**
+ * enum sdtx_event_code - Code describing the type of an event.
+ * @SDTX_EVENT_REQUEST: Detachment request event type.
+ * @SDTX_EVENT_CANCEL: Cancel detachment process event type.
+ * @SDTX_EVENT_BASE_CONNECTION: Base/clipboard connection change event type.
+ * @SDTX_EVENT_LATCH_STATUS: Latch status change event type.
+ * @SDTX_EVENT_DEVICE_MODE: Device mode change event type.
+ *
+ * Used in &struct sdtx_event to describe the type of the event. Further event
+ * codes are reserved for future use. Any event parser should be able to
+ * gracefully handle unknown events, i.e. by simply skipping them.
+ *
+ * Consult the DTX user-space interface documentation for details regarding
+ * the individual event types.
+ */
+enum sdtx_event_code {
+ SDTX_EVENT_REQUEST = 1,
+ SDTX_EVENT_CANCEL = 2,
+ SDTX_EVENT_BASE_CONNECTION = 3,
+ SDTX_EVENT_LATCH_STATUS = 4,
+ SDTX_EVENT_DEVICE_MODE = 5,
+};
+
+/**
+ * struct sdtx_base_info - Describes if and what type of base is connected.
+ * @state: The state of the connection. Valid values are %SDTX_BASE_DETACHED,
+ * %SDTX_BASE_ATTACHED, and %SDTX_DETACH_NOT_FEASIBLE (in case a base
+ * is attached but low clipboard battery prevents detachment). Other
+ * values are currently reserved.
+ * @base_id: The type of base connected. Zero if no base is connected.
+ */
+struct sdtx_base_info {
+ __u16 state;
+ __u16 base_id;
+} __attribute__((__packed__));
+
+/* IOCTLs */
+#define SDTX_IOCTL_EVENTS_ENABLE _IO(0xa5, 0x21)
+#define SDTX_IOCTL_EVENTS_DISABLE _IO(0xa5, 0x22)
+
+#define SDTX_IOCTL_LATCH_LOCK _IO(0xa5, 0x23)
+#define SDTX_IOCTL_LATCH_UNLOCK _IO(0xa5, 0x24)
+
+#define SDTX_IOCTL_LATCH_REQUEST _IO(0xa5, 0x25)
+#define SDTX_IOCTL_LATCH_CONFIRM _IO(0xa5, 0x26)
+#define SDTX_IOCTL_LATCH_HEARTBEAT _IO(0xa5, 0x27)
+#define SDTX_IOCTL_LATCH_CANCEL _IO(0xa5, 0x28)
+
+#define SDTX_IOCTL_GET_BASE_INFO _IOR(0xa5, 0x29, struct sdtx_base_info)
+#define SDTX_IOCTL_GET_DEVICE_MODE _IOR(0xa5, 0x2a, __u16)
+#define SDTX_IOCTL_GET_LATCH_STATUS _IOR(0xa5, 0x2b, __u16)
+
+#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 7272f85d6d6a..0723a9cce747 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -102,7 +102,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
#else
#define __swab16(x) \
- (__builtin_constant_p((__u16)(x)) ? \
+ (__u16)(__builtin_constant_p(x) ? \
___constant_swab16(x) : \
__fswab16(x))
#endif
@@ -115,7 +115,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
#else
#define __swab32(x) \
- (__builtin_constant_p((__u32)(x)) ? \
+ (__u32)(__builtin_constant_p(x) ? \
___constant_swab32(x) : \
__fswab32(x))
#endif
@@ -128,7 +128,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
#else
#define __swab64(x) \
- (__builtin_constant_p((__u64)(x)) ? \
+ (__u64)(__builtin_constant_p(x) ? \
___constant_swab64(x) : \
__fswab64(x))
#endif
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 27c1ed2822e6..8981f00204db 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -23,7 +23,7 @@
#ifndef _UAPI_LINUX_SYSCTL_H
#define _UAPI_LINUX_SYSCTL_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
@@ -482,6 +482,7 @@ enum
NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
NET_IPV4_CONF_ARP_ACCEPT=21,
NET_IPV4_CONF_ARP_NOTIFY=22,
+ NET_IPV4_CONF_ARP_EVICT_NOCARRIER=23,
};
/* /proc/sys/net/ipv4/netfilter */
@@ -571,6 +572,7 @@ enum {
NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
+ NET_IPV6_RA_DEFRTR_METRIC=28,
__NET_IPV6_MAX
};
@@ -582,24 +584,25 @@ enum {
/* /proc/sys/net/<protocol>/neigh/<dev> */
enum {
- NET_NEIGH_MCAST_SOLICIT=1,
- NET_NEIGH_UCAST_SOLICIT=2,
- NET_NEIGH_APP_SOLICIT=3,
- NET_NEIGH_RETRANS_TIME=4,
- NET_NEIGH_REACHABLE_TIME=5,
- NET_NEIGH_DELAY_PROBE_TIME=6,
- NET_NEIGH_GC_STALE_TIME=7,
- NET_NEIGH_UNRES_QLEN=8,
- NET_NEIGH_PROXY_QLEN=9,
- NET_NEIGH_ANYCAST_DELAY=10,
- NET_NEIGH_PROXY_DELAY=11,
- NET_NEIGH_LOCKTIME=12,
- NET_NEIGH_GC_INTERVAL=13,
- NET_NEIGH_GC_THRESH1=14,
- NET_NEIGH_GC_THRESH2=15,
- NET_NEIGH_GC_THRESH3=16,
- NET_NEIGH_RETRANS_TIME_MS=17,
- NET_NEIGH_REACHABLE_TIME_MS=18,
+ NET_NEIGH_MCAST_SOLICIT = 1,
+ NET_NEIGH_UCAST_SOLICIT = 2,
+ NET_NEIGH_APP_SOLICIT = 3,
+ NET_NEIGH_RETRANS_TIME = 4,
+ NET_NEIGH_REACHABLE_TIME = 5,
+ NET_NEIGH_DELAY_PROBE_TIME = 6,
+ NET_NEIGH_GC_STALE_TIME = 7,
+ NET_NEIGH_UNRES_QLEN = 8,
+ NET_NEIGH_PROXY_QLEN = 9,
+ NET_NEIGH_ANYCAST_DELAY = 10,
+ NET_NEIGH_PROXY_DELAY = 11,
+ NET_NEIGH_LOCKTIME = 12,
+ NET_NEIGH_GC_INTERVAL = 13,
+ NET_NEIGH_GC_THRESH1 = 14,
+ NET_NEIGH_GC_THRESH2 = 15,
+ NET_NEIGH_GC_THRESH3 = 16,
+ NET_NEIGH_RETRANS_TIME_MS = 17,
+ NET_NEIGH_REACHABLE_TIME_MS = 18,
+ NET_NEIGH_INTERVAL_PROBE_TIME_MS = 19,
};
/* /proc/sys/net/dccp */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index b7b57967d90f..fbd8ca67e107 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -45,6 +45,8 @@
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
+#define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */
+#define TCMU_MAILBOX_FLAG_CAP_KEEP_BUF (1<<3) /* Keep buf after cmd completion */
struct tcmu_mailbox {
__u16 version;
@@ -62,6 +64,7 @@ struct tcmu_mailbox {
enum tcmu_opcode {
TCMU_OP_PAD = 0,
TCMU_OP_CMD,
+ TCMU_OP_TMR,
};
/*
@@ -73,6 +76,7 @@ struct tcmu_cmd_entry_hdr {
__u8 kflags;
#define TCMU_UFLAG_UNKNOWN_OP 0x1
#define TCMU_UFLAG_READ_LEN 0x2
+#define TCMU_UFLAG_KEEP_BUF 0x4
__u8 uflags;
} __packed;
@@ -128,6 +132,29 @@ struct tcmu_cmd_entry {
} __packed;
+struct tcmu_tmr_entry {
+ struct tcmu_cmd_entry_hdr hdr;
+
+#define TCMU_TMR_UNKNOWN 0
+#define TCMU_TMR_ABORT_TASK 1
+#define TCMU_TMR_ABORT_TASK_SET 2
+#define TCMU_TMR_CLEAR_ACA 3
+#define TCMU_TMR_CLEAR_TASK_SET 4
+#define TCMU_TMR_LUN_RESET 5
+#define TCMU_TMR_TARGET_WARM_RESET 6
+#define TCMU_TMR_TARGET_COLD_RESET 7
+/* Pseudo reset due to received PR OUT */
+#define TCMU_TMR_LUN_RESET_PRO 128
+ __u8 tmr_type;
+
+ __u8 __pad1;
+ __u16 __pad2;
+ __u32 cmd_cnt;
+ __u64 __pad3;
+ __u64 __pad4;
+ __u16 cmd_ids[];
+} __packed;
+
#define TCMU_OP_ALIGN_SIZE sizeof(__u64)
enum tcmu_genl_cmd {
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index ccbd08709321..a7f5b11a8f1b 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
*/
-#define TASKSTATS_VERSION 10
+#define TASKSTATS_VERSION 13
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -48,7 +48,8 @@ struct taskstats {
__u32 ac_exitcode; /* Exit status */
/* The accounting flags of a task as defined in <linux/acct.h>
- * Defined values are AFORK, ASU, ACOMPAT, ACORE, and AXSIG.
+ * Defined values are AFORK, ASU, ACOMPAT, ACORE, AXSIG, and AGROUP.
+ * (AGROUP since version 12).
*/
__u8 ac_flag; /* Record flags */
__u8 ac_nice; /* task_nice */
@@ -172,6 +173,31 @@ struct taskstats {
/* v10: 64-bit btime to avoid overflow */
__u64 ac_btime64; /* 64-bit begin time */
+
+ /* v11: Delay waiting for memory compact */
+ __u64 compact_count;
+ __u64 compact_delay_total;
+
+ /* v12 begin */
+ __u32 ac_tgid; /* thread group ID */
+ /* Thread group walltime up to now. This is total process walltime if
+ * AGROUP flag is set.
+ */
+ __u64 ac_tgetime __attribute__((aligned(8)));
+ /* Lightweight information to identify process binary files.
+ * This leaves userspace to match this to a file system path, using
+ * MAJOR() and MINOR() macros to identify a device and mount point,
+ * the inode to identify the executable file. This is /proc/self/exe
+ * at the end, so matching the most recent exec(). Values are zero
+ * for kernel threads.
+ */
+ __u64 ac_exe_dev; /* program binary device ID */
+ __u64 ac_exe_inode; /* program binary inode number */
+ /* v12 end */
+
+ /* v13: Delay waiting for write-protect copy */
+ __u64 wpcopy_count;
+ __u64 wpcopy_delay_total;
};
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 653c4f94f76e..fe6c8f8f3e8c 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_BPF_H
diff --git a/include/uapi/linux/tc_act/tc_gate.h b/include/uapi/linux/tc_act/tc_gate.h
new file mode 100644
index 000000000000..f214b3a6d44f
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_gate.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/* Copyright 2020 NXP */
+
+#ifndef __LINUX_TC_GATE_H
+#define __LINUX_TC_GATE_H
+
+#include <linux/pkt_cls.h>
+
+struct tc_gate {
+ tc_gen;
+};
+
+enum {
+ TCA_GATE_ENTRY_UNSPEC,
+ TCA_GATE_ENTRY_INDEX,
+ TCA_GATE_ENTRY_GATE,
+ TCA_GATE_ENTRY_INTERVAL,
+ TCA_GATE_ENTRY_IPV,
+ TCA_GATE_ENTRY_MAX_OCTETS,
+ __TCA_GATE_ENTRY_MAX,
+};
+#define TCA_GATE_ENTRY_MAX (__TCA_GATE_ENTRY_MAX - 1)
+
+enum {
+ TCA_GATE_ONE_ENTRY_UNSPEC,
+ TCA_GATE_ONE_ENTRY,
+ __TCA_GATE_ONE_ENTRY_MAX,
+};
+#define TCA_GATE_ONE_ENTRY_MAX (__TCA_GATE_ONE_ENTRY_MAX - 1)
+
+enum {
+ TCA_GATE_UNSPEC,
+ TCA_GATE_TM,
+ TCA_GATE_PARMS,
+ TCA_GATE_PAD,
+ TCA_GATE_PRIORITY,
+ TCA_GATE_ENTRY_LIST,
+ TCA_GATE_BASE_TIME,
+ TCA_GATE_CYCLE_TIME,
+ TCA_GATE_CYCLE_TIME_EXT,
+ TCA_GATE_FLAGS,
+ TCA_GATE_CLOCKID,
+ __TCA_GATE_MAX,
+};
+#define TCA_GATE_MAX (__TCA_GATE_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_mpls.h b/include/uapi/linux/tc_act/tc_mpls.h
index 9360e95273c7..9e4e8f52a779 100644
--- a/include/uapi/linux/tc_act/tc_mpls.h
+++ b/include/uapi/linux/tc_act/tc_mpls.h
@@ -10,6 +10,7 @@
#define TCA_MPLS_ACT_PUSH 2
#define TCA_MPLS_ACT_MODIFY 3
#define TCA_MPLS_ACT_DEC_TTL 4
+#define TCA_MPLS_ACT_MAC_PUSH 5
struct tc_mpls {
tc_gen; /* generic TC action fields. */
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index 800e93377218..64032513cc4c 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -2,19 +2,6 @@
/*
* Copyright (c) 2008, Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
* Author: Alexander Duyck <alexander.h.duyck@intel.com>
*/
@@ -29,6 +16,7 @@
#define SKBEDIT_F_PTYPE 0x8
#define SKBEDIT_F_MASK 0x10
#define SKBEDIT_F_INHERITDSFIELD 0x20
+#define SKBEDIT_F_TXQ_SKBHASH 0x40
struct tc_skbedit {
tc_gen;
@@ -45,6 +33,7 @@ enum {
TCA_SKBEDIT_PTYPE,
TCA_SKBEDIT_MASK,
TCA_SKBEDIT_FLAGS,
+ TCA_SKBEDIT_QUEUE_MAPPING_MAX,
__TCA_SKBEDIT_MAX
};
#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
index c525b3503797..ac62c9a993ea 100644
--- a/include/uapi/linux/tc_act/tc_skbmod.h
+++ b/include/uapi/linux/tc_act/tc_skbmod.h
@@ -1,12 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2016, Jamal Hadi Salim
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
-*/
+ */
#ifndef __LINUX_TC_SKBMOD_H
#define __LINUX_TC_SKBMOD_H
@@ -17,6 +12,7 @@
#define SKBMOD_F_SMAC 0x2
#define SKBMOD_F_ETYPE 0x4
#define SKBMOD_F_SWAPMAC 0x8
+#define SKBMOD_F_ECN 0x10
struct tc_skbmod {
tc_gen;
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 3f10dc4e7a4b..49ad4033951b 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -2,11 +2,6 @@
/*
* Copyright (c) 2016, Amir Vadai <amir@vadai.me>
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_TUNNEL_KEY_H
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index 168995b54a70..3e1f8e57cdd2 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_VLAN_H
@@ -16,6 +11,8 @@
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
#define TCA_VLAN_ACT_MODIFY 3
+#define TCA_VLAN_ACT_POP_ETH 4
+#define TCA_VLAN_ACT_PUSH_ETH 5
struct tc_vlan {
tc_gen;
@@ -30,6 +27,8 @@ enum {
TCA_VLAN_PUSH_VLAN_PROTOCOL,
TCA_VLAN_PAD,
TCA_VLAN_PUSH_VLAN_PRIORITY,
+ TCA_VLAN_PUSH_ETH_DST,
+ TCA_VLAN_PUSH_ETH_SRC,
__TCA_VLAN_MAX,
};
#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index fd9eb8f6bcae..8fc09e8638b3 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -51,7 +51,7 @@ struct tcphdr {
fin:1;
#else
#error "Adjust your <asm/byteorder.h> defines"
-#endif
+#endif
__be16 window;
__sum16 check;
__be16 urg_ptr;
@@ -62,14 +62,14 @@ struct tcphdr {
* (union is compatible to any of its members)
* This means this part of the code is -fstrict-aliasing safe now.
*/
-union tcp_word_hdr {
+union tcp_word_hdr {
struct tcphdr hdr;
- __be32 words[5];
-};
+ __be32 words[5];
+};
-#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3])
+#define tcp_flag_word(tp) (((union tcp_word_hdr *)(tp))->words[3])
-enum {
+enum {
TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000),
TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000),
TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000),
@@ -80,7 +80,7 @@ enum {
TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000),
TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000),
TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000)
-};
+};
/*
* TCP general constants
@@ -103,8 +103,8 @@ enum {
#define TCP_QUICKACK 12 /* Block/reenable quick acks */
#define TCP_CONGESTION 13 /* Congestion control algorithm */
#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */
-#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
-#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
+#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
+#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
#define TCP_REPAIR_QUEUE 20
@@ -312,6 +312,9 @@ enum {
TCP_NLA_REORD_SEEN, /* reordering events seen */
TCP_NLA_SRTT, /* smoothed RTT in usecs */
TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
+ TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */
+ TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */
+ TCP_NLA_TTL, /* TTL or hop limit of a packet received */
};
/* for TCP_MD5SIG socket option */
@@ -341,9 +344,19 @@ struct tcp_diag_md5sig {
/* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */
+#define TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT 0x1
struct tcp_zerocopy_receive {
__u64 address; /* in: address of mapping */
__u32 length; /* in/out: number of bytes to map/mapped */
__u32 recv_skip_hint; /* out: amount of bytes to skip */
+ __u32 inq; /* out: amount of bytes in read queue */
+ __s32 err; /* out: socket error */
+ __u64 copybuf_address; /* in: copybuf address (small reads) */
+ __s32 copybuf_len; /* in/out: copybuf bytes avail/used or error */
+ __u32 flags; /* in: flags */
+ __u64 msg_control; /* ancillary data */
+ __u64 msg_controllen;
+ __u32 msg_flags;
+ __u32 reserved; /* set to 0 for now */
};
#endif /* _UAPI_LINUX_TCP_H */
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 6596f3a09e54..23e57164693c 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -42,15 +42,14 @@
#define TEE_IOC_MAGIC 0xa4
#define TEE_IOC_BASE 0
-/* Flags relating to shared memory */
-#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */
-#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */
-
#define TEE_MAX_ARG_SIZE 1024
#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */
+#define TEE_GEN_CAP_MEMREF_NULL (1 << 3)/* NULL MemRef support */
+
+#define TEE_MEMREF_NULL (__u64)(-1) /* NULL MemRef Buffer */
/*
* TEE Implementation ID
@@ -173,6 +172,15 @@ struct tee_ioctl_buf_data {
#define TEE_IOCTL_LOGIN_APPLICATION 4
#define TEE_IOCTL_LOGIN_USER_APPLICATION 5
#define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6
+/*
+ * Disallow user-space to use GP implementation specific login
+ * method range (0x80000000 - 0xBFFFFFFF). This range is rather
+ * being reserved for REE kernel clients or TEE implementation.
+ */
+#define TEE_IOCTL_LOGIN_REE_KERNEL_MIN 0x80000000
+#define TEE_IOCTL_LOGIN_REE_KERNEL_MAX 0xBFFFFFFF
+/* Private login method for REE kernel clients */
+#define TEE_IOCTL_LOGIN_REE_KERNEL 0x80000000
/**
* struct tee_ioctl_param - parameter
@@ -191,6 +199,16 @@ struct tee_ioctl_buf_data {
* a part of a shared memory by specifying an offset (@a) and size (@b) of
* the object. To supply the entire shared memory object set the offset
* (@a) to 0 and size (@b) to the previously returned size of the object.
+ *
+ * A client may need to present a NULL pointer in the argument
+ * passed to a trusted application in the TEE.
+ * This is also a requirement in GlobalPlatform Client API v1.0c
+ * (section 3.2.5 memory references), which can be found at
+ * http://www.globalplatform.org/specificationsdevice.asp
+ *
+ * If a NULL pointer is passed to a TA in the TEE, the (@c)
+ * IOCTL parameters value must be set to TEE_MEMREF_NULL indicating a NULL
+ * memory reference.
*/
struct tee_ioctl_param {
__u64 attr;
@@ -333,7 +351,7 @@ struct tee_iocl_supp_send_arg {
};
/**
- * TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function
+ * TEE_IOC_SUPPL_SEND - Send a response to a received request
*
* Takes a struct tee_ioctl_buf_data which contains a struct
* tee_iocl_supp_send_arg followed by any array of struct tee_param
diff --git a/include/uapi/linux/termios.h b/include/uapi/linux/termios.h
index 33961d4e4de0..e6da9d4433d1 100644
--- a/include/uapi/linux/termios.h
+++ b/include/uapi/linux/termios.h
@@ -5,19 +5,4 @@
#include <linux/types.h>
#include <asm/termios.h>
-#define NFF 5
-
-struct termiox
-{
- __u16 x_hflag;
- __u16 x_cflag;
- __u16 x_rflag[NFF];
- __u16 x_sflag;
-};
-
-#define RTSXOFF 0x0001 /* RTS flow control on input */
-#define CTSXON 0x0002 /* CTS flow control on output */
-#define DTRXOFF 0x0004 /* DTR flow control on input */
-#define DSRXON 0x0008 /* DCD flow control on output */
-
#endif
diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h
index 96218378dda8..fc78bf3aead7 100644
--- a/include/uapi/linux/thermal.h
+++ b/include/uapi/linux/thermal.h
@@ -4,31 +4,90 @@
#define THERMAL_NAME_LENGTH 20
-/* Adding event notification support elements */
-#define THERMAL_GENL_FAMILY_NAME "thermal_event"
-#define THERMAL_GENL_VERSION 0x01
-#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
-
-/* Events supported by Thermal Netlink */
-enum events {
- THERMAL_AUX0,
- THERMAL_AUX1,
- THERMAL_CRITICAL,
- THERMAL_DEV_FAULT,
+enum thermal_device_mode {
+ THERMAL_DEVICE_DISABLED = 0,
+ THERMAL_DEVICE_ENABLED,
+};
+
+enum thermal_trip_type {
+ THERMAL_TRIP_ACTIVE = 0,
+ THERMAL_TRIP_PASSIVE,
+ THERMAL_TRIP_HOT,
+ THERMAL_TRIP_CRITICAL,
};
-/* attributes of thermal_genl_family */
-enum {
+/* Adding event notification support elements */
+#define THERMAL_GENL_FAMILY_NAME "thermal"
+#define THERMAL_GENL_VERSION 0x01
+#define THERMAL_GENL_SAMPLING_GROUP_NAME "sampling"
+#define THERMAL_GENL_EVENT_GROUP_NAME "event"
+
+/* Attributes of thermal_genl_family */
+enum thermal_genl_attr {
THERMAL_GENL_ATTR_UNSPEC,
- THERMAL_GENL_ATTR_EVENT,
+ THERMAL_GENL_ATTR_TZ,
+ THERMAL_GENL_ATTR_TZ_ID,
+ THERMAL_GENL_ATTR_TZ_TEMP,
+ THERMAL_GENL_ATTR_TZ_TRIP,
+ THERMAL_GENL_ATTR_TZ_TRIP_ID,
+ THERMAL_GENL_ATTR_TZ_TRIP_TYPE,
+ THERMAL_GENL_ATTR_TZ_TRIP_TEMP,
+ THERMAL_GENL_ATTR_TZ_TRIP_HYST,
+ THERMAL_GENL_ATTR_TZ_MODE,
+ THERMAL_GENL_ATTR_TZ_NAME,
+ THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT,
+ THERMAL_GENL_ATTR_TZ_GOV,
+ THERMAL_GENL_ATTR_TZ_GOV_NAME,
+ THERMAL_GENL_ATTR_CDEV,
+ THERMAL_GENL_ATTR_CDEV_ID,
+ THERMAL_GENL_ATTR_CDEV_CUR_STATE,
+ THERMAL_GENL_ATTR_CDEV_MAX_STATE,
+ THERMAL_GENL_ATTR_CDEV_NAME,
+ THERMAL_GENL_ATTR_GOV_NAME,
+ THERMAL_GENL_ATTR_CPU_CAPABILITY,
+ THERMAL_GENL_ATTR_CPU_CAPABILITY_ID,
+ THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE,
+ THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY,
__THERMAL_GENL_ATTR_MAX,
};
#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
-/* commands supported by the thermal_genl_family */
-enum {
+enum thermal_genl_sampling {
+ THERMAL_GENL_SAMPLING_TEMP,
+ __THERMAL_GENL_SAMPLING_MAX,
+};
+#define THERMAL_GENL_SAMPLING_MAX (__THERMAL_GENL_SAMPLING_MAX - 1)
+
+/* Events of thermal_genl_family */
+enum thermal_genl_event {
+ THERMAL_GENL_EVENT_UNSPEC,
+ THERMAL_GENL_EVENT_TZ_CREATE, /* Thermal zone creation */
+ THERMAL_GENL_EVENT_TZ_DELETE, /* Thermal zone deletion */
+ THERMAL_GENL_EVENT_TZ_DISABLE, /* Thermal zone disabled */
+ THERMAL_GENL_EVENT_TZ_ENABLE, /* Thermal zone enabled */
+ THERMAL_GENL_EVENT_TZ_TRIP_UP, /* Trip point crossed the way up */
+ THERMAL_GENL_EVENT_TZ_TRIP_DOWN, /* Trip point crossed the way down */
+ THERMAL_GENL_EVENT_TZ_TRIP_CHANGE, /* Trip point changed */
+ THERMAL_GENL_EVENT_TZ_TRIP_ADD, /* Trip point added */
+ THERMAL_GENL_EVENT_TZ_TRIP_DELETE, /* Trip point deleted */
+ THERMAL_GENL_EVENT_CDEV_ADD, /* Cdev bound to the thermal zone */
+ THERMAL_GENL_EVENT_CDEV_DELETE, /* Cdev unbound */
+ THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, /* Cdev state updated */
+ THERMAL_GENL_EVENT_TZ_GOV_CHANGE, /* Governor policy changed */
+ THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, /* CPU capability changed */
+ __THERMAL_GENL_EVENT_MAX,
+};
+#define THERMAL_GENL_EVENT_MAX (__THERMAL_GENL_EVENT_MAX - 1)
+
+/* Commands supported by the thermal_genl_family */
+enum thermal_genl_cmd {
THERMAL_GENL_CMD_UNSPEC,
- THERMAL_GENL_CMD_EVENT,
+ THERMAL_GENL_CMD_TZ_GET_ID, /* List of thermal zones id */
+ THERMAL_GENL_CMD_TZ_GET_TRIP, /* List of thermal trips */
+ THERMAL_GENL_CMD_TZ_GET_TEMP, /* Get the thermal zone temperature */
+ THERMAL_GENL_CMD_TZ_GET_GOV, /* Get the thermal zone governor */
+ THERMAL_GENL_CMD_TZ_GET_MODE, /* Get the thermal zone mode */
+ THERMAL_GENL_CMD_CDEV_GET, /* List of cdev id */
__THERMAL_GENL_CMD_MAX,
};
#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index add01db1daef..80ea15e12113 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -254,6 +254,8 @@ static inline int tipc_aead_key_size(struct tipc_aead_key *key)
return sizeof(*key) + key->keylen;
}
+#define TIPC_REKEYING_NOW (~0U)
+
/* The macros and functions below are deprecated:
*/
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 4dfc05651c98..c00adf2fe868 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -43,10 +43,6 @@
#include <linux/tipc.h>
#include <asm/byteorder.h>
-#ifndef __KERNEL__
-#include <arpa/inet.h> /* for ntohs etc. */
-#endif
-
/*
* Configuration
*
@@ -269,33 +265,33 @@ static inline int TLV_OK(const void *tlv, __u16 space)
*/
return (space >= TLV_SPACE(0)) &&
- (ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space);
+ (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_len) <= space);
}
static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
{
return TLV_OK(tlv, space) &&
- (ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
+ (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
}
static inline int TLV_GET_LEN(struct tlv_desc *tlv)
{
- return ntohs(tlv->tlv_len);
+ return __be16_to_cpu(tlv->tlv_len);
}
static inline void TLV_SET_LEN(struct tlv_desc *tlv, __u16 len)
{
- tlv->tlv_len = htons(len);
+ tlv->tlv_len = __cpu_to_be16(len);
}
static inline int TLV_CHECK_TYPE(struct tlv_desc *tlv, __u16 type)
{
- return (ntohs(tlv->tlv_type) == type);
+ return (__be16_to_cpu(tlv->tlv_type) == type);
}
static inline void TLV_SET_TYPE(struct tlv_desc *tlv, __u16 type)
{
- tlv->tlv_type = htons(type);
+ tlv->tlv_type = __cpu_to_be16(type);
}
static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
@@ -305,8 +301,8 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
tlv_len = TLV_LENGTH(len);
tlv_ptr = (struct tlv_desc *)tlv;
- tlv_ptr->tlv_type = htons(type);
- tlv_ptr->tlv_len = htons(tlv_len);
+ tlv_ptr->tlv_type = __cpu_to_be16(type);
+ tlv_ptr->tlv_len = __cpu_to_be16(tlv_len);
if (len && data) {
memcpy(TLV_DATA(tlv_ptr), data, len);
memset((char *)TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
@@ -348,7 +344,7 @@ static inline void *TLV_LIST_DATA(struct tlv_list_desc *list)
static inline void TLV_LIST_STEP(struct tlv_list_desc *list)
{
- __u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len));
+ __u16 tlv_space = TLV_ALIGN(__be16_to_cpu(list->tlv_ptr->tlv_len));
list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space);
list->tlv_space -= tlv_space;
@@ -404,9 +400,9 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
msg_len = TCM_LENGTH(data_len);
tcm_hdr = (struct tipc_cfg_msg_hdr *)msg;
- tcm_hdr->tcm_len = htonl(msg_len);
- tcm_hdr->tcm_type = htons(cmd);
- tcm_hdr->tcm_flags = htons(flags);
+ tcm_hdr->tcm_len = __cpu_to_be32(msg_len);
+ tcm_hdr->tcm_type = __cpu_to_be16(cmd);
+ tcm_hdr->tcm_flags = __cpu_to_be16(flags);
if (data_len && data) {
memcpy(TCM_DATA(msg), data, data_len);
memset((char *)TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index dc0d23a50e69..d847dd671d79 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -165,6 +165,8 @@ enum {
TIPC_NLA_NODE_UP, /* flag */
TIPC_NLA_NODE_ID, /* data */
TIPC_NLA_NODE_KEY, /* data */
+ TIPC_NLA_NODE_KEY_MASTER, /* flag */
+ TIPC_NLA_NODE_REKEYING, /* u32 */
__TIPC_NLA_NODE_MAX,
TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index bcd2869ed472..b66a800389cc 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -39,6 +39,8 @@
/* TLS socket options */
#define TLS_TX 1 /* Set transmit parameters */
#define TLS_RX 2 /* Set receive parameters */
+#define TLS_TX_ZEROCOPY_RO 3 /* TX zerocopy (only sendfile now) */
+#define TLS_RX_EXPECT_NO_PAD 4 /* Attempt opportunistic zero-copy */
/* Supported versions */
#define TLS_VERSION_MINOR(ver) ((ver) & 0xFF)
@@ -77,6 +79,41 @@
#define TLS_CIPHER_AES_CCM_128_TAG_SIZE 16
#define TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE 8
+#define TLS_CIPHER_CHACHA20_POLY1305 54
+#define TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE 12
+#define TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE 32
+#define TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE 0
+#define TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE 16
+#define TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_SM4_GCM 55
+#define TLS_CIPHER_SM4_GCM_IV_SIZE 8
+#define TLS_CIPHER_SM4_GCM_KEY_SIZE 16
+#define TLS_CIPHER_SM4_GCM_SALT_SIZE 4
+#define TLS_CIPHER_SM4_GCM_TAG_SIZE 16
+#define TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_SM4_CCM 56
+#define TLS_CIPHER_SM4_CCM_IV_SIZE 8
+#define TLS_CIPHER_SM4_CCM_KEY_SIZE 16
+#define TLS_CIPHER_SM4_CCM_SALT_SIZE 4
+#define TLS_CIPHER_SM4_CCM_TAG_SIZE 16
+#define TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_ARIA_GCM_128 57
+#define TLS_CIPHER_ARIA_GCM_128_IV_SIZE 8
+#define TLS_CIPHER_ARIA_GCM_128_KEY_SIZE 16
+#define TLS_CIPHER_ARIA_GCM_128_SALT_SIZE 4
+#define TLS_CIPHER_ARIA_GCM_128_TAG_SIZE 16
+#define TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_ARIA_GCM_256 58
+#define TLS_CIPHER_ARIA_GCM_256_IV_SIZE 8
+#define TLS_CIPHER_ARIA_GCM_256_KEY_SIZE 32
+#define TLS_CIPHER_ARIA_GCM_256_SALT_SIZE 4
+#define TLS_CIPHER_ARIA_GCM_256_TAG_SIZE 16
+#define TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE 8
+
#define TLS_SET_RECORD_TYPE 1
#define TLS_GET_RECORD_TYPE 2
@@ -109,12 +146,54 @@ struct tls12_crypto_info_aes_ccm_128 {
unsigned char rec_seq[TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE];
};
+struct tls12_crypto_info_chacha20_poly1305 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE];
+ unsigned char key[TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_sm4_gcm {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_SM4_GCM_IV_SIZE];
+ unsigned char key[TLS_CIPHER_SM4_GCM_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_SM4_GCM_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_sm4_ccm {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_SM4_CCM_IV_SIZE];
+ unsigned char key[TLS_CIPHER_SM4_CCM_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_SM4_CCM_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_aria_gcm_128 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_ARIA_GCM_128_IV_SIZE];
+ unsigned char key[TLS_CIPHER_ARIA_GCM_128_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_ARIA_GCM_128_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_aria_gcm_256 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_ARIA_GCM_256_IV_SIZE];
+ unsigned char key[TLS_CIPHER_ARIA_GCM_256_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_ARIA_GCM_256_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE];
+};
+
enum {
TLS_INFO_UNSPEC,
TLS_INFO_VERSION,
TLS_INFO_CIPHER,
TLS_INFO_TXCONF,
TLS_INFO_RXCONF,
+ TLS_INFO_ZC_RO_TX,
+ TLS_INFO_RX_NO_PAD,
__TLS_INFO_MAX,
};
#define TLS_INFO_MAX (__TLS_INFO_MAX - 1)
diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h
index 376cccf397be..68aeae2addec 100644
--- a/include/uapi/linux/tty.h
+++ b/include/uapi/linux/tty.h
@@ -6,8 +6,6 @@
* 'tty.h' defines some structures used by tty_io.c and some defines.
*/
-#define NR_LDISCS 30
-
/* line disciplines */
#define N_TTY 0
#define N_SLIP 1
@@ -38,5 +36,11 @@
#define N_NCI 25 /* NFC NCI UART */
#define N_SPEAKUP 26 /* Speakup communication with synths */
#define N_NULL 27 /* Null ldisc used for error handling */
+#define N_MCTP 28 /* MCTP-over-serial */
+#define N_DEVELOPMENT 29 /* Manual out-of-tree testing */
+#define N_CAN327 30 /* ELM327 based OBD-II interfaces */
+
+/* Always the newest line discipline + 1 */
+#define NR_LDISCS 31
#endif /* _UAPI_LINUX_TTY_H */
diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
index 900a32e63424..cf25056d4b27 100644
--- a/include/uapi/linux/tty_flags.h
+++ b/include/uapi/linux/tty_flags.h
@@ -39,7 +39,7 @@
* WARNING: These flags are no longer used and have been superceded by the
* TTY_PORT_ flags in the iflags field (and not userspace-visible)
*/
-#ifndef _KERNEL_
+#ifndef __KERNEL__
#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */
#define ASYNCB_SUSPENDED 30 /* Serial port is suspended */
#define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */
@@ -73,15 +73,15 @@
#define ASYNC_MAGIC_MULTIPLIER (1U << ASYNCB_MAGIC_MULTIPLIER)
#define ASYNC_FLAGS ((1U << (ASYNCB_LAST_USER + 1)) - 1)
-#define ASYNC_DEPRECATED (ASYNC_SESSION_LOCKOUT | ASYNC_PGRP_LOCKOUT | \
- ASYNC_CALLOUT_NOHUP | ASYNC_AUTOPROBE)
+#define ASYNC_DEPRECATED (ASYNC_SPLIT_TERMIOS | ASYNC_SESSION_LOCKOUT | \
+ ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP | ASYNC_AUTOPROBE)
#define ASYNC_USR_MASK (ASYNC_SPD_MASK|ASYNC_CALLOUT_NOHUP| \
ASYNC_LOW_LATENCY)
#define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI)
#define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI)
#define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
-#ifndef _KERNEL_
+#ifndef __KERNEL__
/* These flags are no longer used (and were always masked from userspace) */
#define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED)
#define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE)
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index 2fce8b6876e9..308433be33c2 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -7,7 +7,7 @@
#ifndef __ASSEMBLY__
#ifndef __KERNEL__
#ifndef __EXPORTED_HEADERS__
-#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders"
+#warning "Attempt to use kernel headers from user space, see https://kernelnewbies.org/KernelHeaders"
#endif /* __EXPORTED_HEADERS__ */
#endif
@@ -19,12 +19,15 @@
* any application/library that wants linux/types.h.
*/
+/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
-#define __bitwise__ __attribute__((bitwise))
+#define __bitwise __attribute__((bitwise))
#else
-#define __bitwise__
+#define __bitwise
#endif
-#define __bitwise __bitwise__
+
+/* The kernel doesn't use this legacy form, but user space does */
+#define __bitwise__ __bitwise
typedef __u16 __bitwise __le16;
typedef __u16 __bitwise __be16;
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
new file mode 100644
index 000000000000..8f88e3a29998
--- /dev/null
+++ b/include/uapi/linux/ublk_cmd.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef USER_BLK_DRV_CMD_INC_H
+#define USER_BLK_DRV_CMD_INC_H
+
+#include <linux/types.h>
+
+/* ublk server command definition */
+
+/*
+ * Admin commands, issued by ublk server, and handled by ublk driver.
+ */
+#define UBLK_CMD_GET_QUEUE_AFFINITY 0x01
+#define UBLK_CMD_GET_DEV_INFO 0x02
+#define UBLK_CMD_ADD_DEV 0x04
+#define UBLK_CMD_DEL_DEV 0x05
+#define UBLK_CMD_START_DEV 0x06
+#define UBLK_CMD_STOP_DEV 0x07
+#define UBLK_CMD_SET_PARAMS 0x08
+#define UBLK_CMD_GET_PARAMS 0x09
+#define UBLK_CMD_START_USER_RECOVERY 0x10
+#define UBLK_CMD_END_USER_RECOVERY 0x11
+/*
+ * IO commands, issued by ublk server, and handled by ublk driver.
+ *
+ * FETCH_REQ: issued via sqe(URING_CMD) beforehand for fetching IO request
+ * from ublk driver, should be issued only when starting device. After
+ * the associated cqe is returned, request's tag can be retrieved via
+ * cqe->userdata.
+ *
+ * COMMIT_AND_FETCH_REQ: issued via sqe(URING_CMD) after ublkserver handled
+ * this IO request, request's handling result is committed to ublk
+ * driver, meantime FETCH_REQ is piggyback, and FETCH_REQ has to be
+ * handled before completing io request.
+ *
+ * NEED_GET_DATA: only used for write requests to set io addr and copy data
+ * When NEED_GET_DATA is set, ublksrv has to issue UBLK_IO_NEED_GET_DATA
+ * command after ublk driver returns UBLK_IO_RES_NEED_GET_DATA.
+ *
+ * It is only used if ublksrv set UBLK_F_NEED_GET_DATA flag
+ * while starting a ublk device.
+ */
+#define UBLK_IO_FETCH_REQ 0x20
+#define UBLK_IO_COMMIT_AND_FETCH_REQ 0x21
+#define UBLK_IO_NEED_GET_DATA 0x22
+
+/* only ABORT means that no re-fetch */
+#define UBLK_IO_RES_OK 0
+#define UBLK_IO_RES_NEED_GET_DATA 1
+#define UBLK_IO_RES_ABORT (-ENODEV)
+
+#define UBLKSRV_CMD_BUF_OFFSET 0
+#define UBLKSRV_IO_BUF_OFFSET 0x80000000
+
+/* tag bit is 12bit, so at most 4096 IOs for each queue */
+#define UBLK_MAX_QUEUE_DEPTH 4096
+
+/*
+ * zero copy requires 4k block size, and can remap ublk driver's io
+ * request into ublksrv's vm space
+ */
+#define UBLK_F_SUPPORT_ZERO_COPY (1ULL << 0)
+
+/*
+ * Force to complete io cmd via io_uring_cmd_complete_in_task so that
+ * performance comparison is done easily with using task_work_add
+ */
+#define UBLK_F_URING_CMD_COMP_IN_TASK (1ULL << 1)
+
+/*
+ * User should issue io cmd again for write requests to
+ * set io buffer address and copy data from bio vectors
+ * to the userspace io buffer.
+ *
+ * In this mode, task_work is not used.
+ */
+#define UBLK_F_NEED_GET_DATA (1UL << 2)
+
+#define UBLK_F_USER_RECOVERY (1UL << 3)
+
+#define UBLK_F_USER_RECOVERY_REISSUE (1UL << 4)
+
+/* device state */
+#define UBLK_S_DEV_DEAD 0
+#define UBLK_S_DEV_LIVE 1
+#define UBLK_S_DEV_QUIESCED 2
+
+/* shipped via sqe->cmd of io_uring command */
+struct ublksrv_ctrl_cmd {
+ /* sent to which device, must be valid */
+ __u32 dev_id;
+
+ /* sent to which queue, must be -1 if the cmd isn't for queue */
+ __u16 queue_id;
+ /*
+ * cmd specific buffer, can be IN or OUT.
+ */
+ __u16 len;
+ __u64 addr;
+
+ /* inline data */
+ __u64 data[2];
+};
+
+struct ublksrv_ctrl_dev_info {
+ __u16 nr_hw_queues;
+ __u16 queue_depth;
+ __u16 state;
+ __u16 pad0;
+
+ __u32 max_io_buf_bytes;
+ __u32 dev_id;
+
+ __s32 ublksrv_pid;
+ __u32 pad1;
+
+ __u64 flags;
+
+ /* For ublksrv internal use, invisible to ublk driver */
+ __u64 ublksrv_flags;
+
+ __u64 reserved0;
+ __u64 reserved1;
+ __u64 reserved2;
+};
+
+#define UBLK_IO_OP_READ 0
+#define UBLK_IO_OP_WRITE 1
+#define UBLK_IO_OP_FLUSH 2
+#define UBLK_IO_OP_DISCARD 3
+#define UBLK_IO_OP_WRITE_SAME 4
+#define UBLK_IO_OP_WRITE_ZEROES 5
+
+#define UBLK_IO_F_FAILFAST_DEV (1U << 8)
+#define UBLK_IO_F_FAILFAST_TRANSPORT (1U << 9)
+#define UBLK_IO_F_FAILFAST_DRIVER (1U << 10)
+#define UBLK_IO_F_META (1U << 11)
+#define UBLK_IO_F_FUA (1U << 13)
+#define UBLK_IO_F_NOUNMAP (1U << 15)
+#define UBLK_IO_F_SWAP (1U << 16)
+
+/*
+ * io cmd is described by this structure, and stored in share memory, indexed
+ * by request tag.
+ *
+ * The data is stored by ublk driver, and read by ublksrv after one fetch command
+ * returns.
+ */
+struct ublksrv_io_desc {
+ /* op: bit 0-7, flags: bit 8-31 */
+ __u32 op_flags;
+
+ __u32 nr_sectors;
+
+ /* start sector for this io */
+ __u64 start_sector;
+
+ /* buffer address in ublksrv daemon vm space, from ublk driver */
+ __u64 addr;
+};
+
+static inline __u8 ublksrv_get_op(const struct ublksrv_io_desc *iod)
+{
+ return iod->op_flags & 0xff;
+}
+
+static inline __u32 ublksrv_get_flags(const struct ublksrv_io_desc *iod)
+{
+ return iod->op_flags >> 8;
+}
+
+/* issued to ublk driver via /dev/ublkcN */
+struct ublksrv_io_cmd {
+ __u16 q_id;
+
+ /* for fetch/commit which result */
+ __u16 tag;
+
+ /* io result, it is valid for COMMIT* command only */
+ __s32 result;
+
+ /*
+ * userspace buffer address in ublksrv daemon process, valid for
+ * FETCH* command only
+ */
+ __u64 addr;
+};
+
+struct ublk_param_basic {
+#define UBLK_ATTR_READ_ONLY (1 << 0)
+#define UBLK_ATTR_ROTATIONAL (1 << 1)
+#define UBLK_ATTR_VOLATILE_CACHE (1 << 2)
+#define UBLK_ATTR_FUA (1 << 3)
+ __u32 attrs;
+ __u8 logical_bs_shift;
+ __u8 physical_bs_shift;
+ __u8 io_opt_shift;
+ __u8 io_min_shift;
+
+ __u32 max_sectors;
+ __u32 chunk_sectors;
+
+ __u64 dev_sectors;
+ __u64 virt_boundary_mask;
+};
+
+struct ublk_param_discard {
+ __u32 discard_alignment;
+
+ __u32 discard_granularity;
+ __u32 max_discard_sectors;
+
+ __u32 max_write_zeroes_sectors;
+ __u16 max_discard_segments;
+ __u16 reserved0;
+};
+
+struct ublk_params {
+ /*
+ * Total length of parameters, userspace has to set 'len' for both
+ * SET_PARAMS and GET_PARAMS command, and driver may update len
+ * if two sides use different version of 'ublk_params', same with
+ * 'types' fields.
+ */
+ __u32 len;
+#define UBLK_PARAM_TYPE_BASIC (1 << 0)
+#define UBLK_PARAM_TYPE_DISCARD (1 << 1)
+ __u32 types; /* types of parameter included */
+
+ struct ublk_param_basic basic;
+ struct ublk_param_discard discard;
+};
+
+#endif
diff --git a/include/uapi/linux/um_timetravel.h b/include/uapi/linux/um_timetravel.h
new file mode 100644
index 000000000000..ca3238222b6d
--- /dev/null
+++ b/include/uapi/linux/um_timetravel.h
@@ -0,0 +1,128 @@
+/*
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Copyright (C) 2019 Intel Corporation
+ */
+#ifndef _UAPI_LINUX_UM_TIMETRAVEL_H
+#define _UAPI_LINUX_UM_TIMETRAVEL_H
+#include <linux/types.h>
+
+/**
+ * struct um_timetravel_msg - UM time travel message
+ *
+ * This is the basic message type, going in both directions.
+ *
+ * This is the message passed between the host (user-mode Linux instance)
+ * and the calendar (the application on the other side of the socket) in
+ * order to implement common scheduling.
+ *
+ * Whenever UML has an event it will request runtime for it from the
+ * calendar, and then wait for its turn until it can run, etc. Note
+ * that it will only ever request the single next runtime, i.e. multiple
+ * REQUEST messages override each other.
+ */
+struct um_timetravel_msg {
+ /**
+ * @op: operation value from &enum um_timetravel_ops
+ */
+ __u32 op;
+
+ /**
+ * @seq: sequence number for the message - shall be reflected in
+ * the ACK response, and should be checked while processing
+ * the response to see if it matches
+ */
+ __u32 seq;
+
+ /**
+ * @time: time in nanoseconds
+ */
+ __u64 time;
+};
+
+/**
+ * enum um_timetravel_ops - Operation codes
+ */
+enum um_timetravel_ops {
+ /**
+ * @UM_TIMETRAVEL_ACK: response (ACK) to any previous message,
+ * this usually doesn't carry any data in the 'time' field
+ * unless otherwise specified below
+ */
+ UM_TIMETRAVEL_ACK = 0,
+
+ /**
+ * @UM_TIMETRAVEL_START: initialize the connection, the time
+ * field contains an (arbitrary) ID to possibly be able
+ * to distinguish the connections.
+ */
+ UM_TIMETRAVEL_START = 1,
+
+ /**
+ * @UM_TIMETRAVEL_REQUEST: request to run at the given time
+ * (host -> calendar)
+ */
+ UM_TIMETRAVEL_REQUEST = 2,
+
+ /**
+ * @UM_TIMETRAVEL_WAIT: Indicate waiting for the previously requested
+ * runtime, new requests may be made while waiting (e.g. due to
+ * interrupts); the time field is ignored. The calendar must process
+ * this message and later send a %UM_TIMETRAVEL_RUN message when
+ * the host can run again.
+ * (host -> calendar)
+ */
+ UM_TIMETRAVEL_WAIT = 3,
+
+ /**
+ * @UM_TIMETRAVEL_GET: return the current time from the calendar in the
+ * ACK message, the time in the request message is ignored
+ * (host -> calendar)
+ */
+ UM_TIMETRAVEL_GET = 4,
+
+ /**
+ * @UM_TIMETRAVEL_UPDATE: time update to the calendar, must be sent e.g.
+ * before kicking an interrupt to another calendar
+ * (host -> calendar)
+ */
+ UM_TIMETRAVEL_UPDATE = 5,
+
+ /**
+ * @UM_TIMETRAVEL_RUN: run time request granted, current time is in
+ * the time field
+ * (calendar -> host)
+ */
+ UM_TIMETRAVEL_RUN = 6,
+
+ /**
+ * @UM_TIMETRAVEL_FREE_UNTIL: Enable free-running until the given time,
+ * this is a message from the calendar telling the host that it can
+ * freely do its own scheduling for anything before the indicated
+ * time.
+ * Note that if a calendar sends this message once, the host may
+ * assume that it will also do so in the future, if it implements
+ * wraparound semantics for the time field.
+ * (calendar -> host)
+ */
+ UM_TIMETRAVEL_FREE_UNTIL = 7,
+
+ /**
+ * @UM_TIMETRAVEL_GET_TOD: Return time of day, typically used once at
+ * boot by the virtual machines to get a synchronized time from
+ * the simulation.
+ */
+ UM_TIMETRAVEL_GET_TOD = 8,
+};
+
+#endif /* _UAPI_LINUX_UM_TIMETRAVEL_H */
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 76b7c3f6cd0d..c917c53070d5 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -341,7 +341,7 @@ struct uac_feature_unit_descriptor {
__u8 bUnitID;
__u8 bSourceID;
__u8 bControlSize;
- __u8 bmaControls[0]; /* variable length */
+ __u8 bmaControls[]; /* variable length */
} __attribute__((packed));
static inline __u8 uac_feature_unit_iFeature(struct uac_feature_unit_descriptor *desc)
diff --git a/include/uapi/linux/usb/cdc.h b/include/uapi/linux/usb/cdc.h
index 6d61550959ef..1924cf665448 100644
--- a/include/uapi/linux/usb/cdc.h
+++ b/include/uapi/linux/usb/cdc.h
@@ -171,7 +171,7 @@ struct usb_cdc_mdlm_detail_desc {
/* type is associated with mdlm_desc.bGUID */
__u8 bGuidDescriptorType;
- __u8 bDetailData[0];
+ __u8 bDetailData[];
} __attribute__ ((packed));
/* "OBEX Control Model Functional Descriptor" */
@@ -271,6 +271,10 @@ struct usb_cdc_line_coding {
__u8 bDataBits;
} __attribute__ ((packed));
+/* Control Signal Bitmap Values from 6.2.14 SetControlLineState */
+#define USB_CDC_CTRL_DTR (1 << 0)
+#define USB_CDC_CTRL_RTS (1 << 1)
+
/* table 62; bits in multicast filter */
#define USB_CDC_PACKET_TYPE_PROMISCUOUS (1 << 0)
#define USB_CDC_PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */
@@ -302,6 +306,15 @@ struct usb_cdc_notification {
__le16 wLength;
} __attribute__ ((packed));
+/* UART State Bitmap Values from 6.3.5 SerialState */
+#define USB_CDC_SERIAL_STATE_DCD (1 << 0)
+#define USB_CDC_SERIAL_STATE_DSR (1 << 1)
+#define USB_CDC_SERIAL_STATE_BREAK (1 << 2)
+#define USB_CDC_SERIAL_STATE_RING_SIGNAL (1 << 3)
+#define USB_CDC_SERIAL_STATE_FRAMING (1 << 4)
+#define USB_CDC_SERIAL_STATE_PARITY (1 << 5)
+#define USB_CDC_SERIAL_STATE_OVERRUN (1 << 6)
+
struct usb_cdc_speed_change {
__le32 DLBitRRate; /* contains the downlink bit rate (IN pipe) */
__le32 ULBitRate; /* contains the uplink bit rate (OUT pipe) */
@@ -379,7 +392,7 @@ struct usb_cdc_ncm_ndp16 {
__le32 dwSignature;
__le16 wLength;
__le16 wNextNdpIndex;
- struct usb_cdc_ncm_dpe16 dpe16[0];
+ struct usb_cdc_ncm_dpe16 dpe16[];
} __attribute__ ((packed));
/* 32-bit NCM Datagram Pointer Entry */
@@ -395,7 +408,7 @@ struct usb_cdc_ncm_ndp32 {
__le16 wReserved6;
__le32 dwNextNdpIndex;
__le32 dwReserved12;
- struct usb_cdc_ncm_dpe32 dpe32[0];
+ struct usb_cdc_ncm_dpe32 dpe32[];
} __attribute__ ((packed));
/* CDC NCM subclass 3.2.1 and 3.2.2 */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 2b623f36af6b..31fcfa084e63 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -138,11 +138,11 @@
* Test Mode Selectors
* See USB 2.0 spec Table 9-7
*/
-#define TEST_J 1
-#define TEST_K 2
-#define TEST_SE0_NAK 3
-#define TEST_PACKET 4
-#define TEST_FORCE_EN 5
+#define USB_TEST_J 1
+#define USB_TEST_K 2
+#define USB_TEST_SE0_NAK 3
+#define USB_TEST_PACKET 4
+#define USB_TEST_FORCE_ENABLE 5
/* Status Type */
#define USB_STATUS_TYPE_STANDARD 0
@@ -326,6 +326,10 @@ struct usb_device_descriptor {
#define USB_CLASS_CONTENT_SEC 0x0d /* content security */
#define USB_CLASS_VIDEO 0x0e
#define USB_CLASS_WIRELESS_CONTROLLER 0xe0
+#define USB_CLASS_PERSONAL_HEALTHCARE 0x0f
+#define USB_CLASS_AUDIO_VIDEO 0x10
+#define USB_CLASS_BILLBOARD 0x11
+#define USB_CLASS_USB_TYPE_C_BRIDGE 0x12
#define USB_CLASS_MISC 0xef
#define USB_CLASS_APP_SPEC 0xfe
#define USB_CLASS_VENDOR_SPEC 0xff
@@ -364,6 +368,9 @@ struct usb_config_descriptor {
/*-------------------------------------------------------------------------*/
+/* USB String descriptors can contain at most 126 characters. */
+#define USB_MAX_STRING_LEN 126
+
/* USB_DT_STRING: String descriptor */
struct usb_string_descriptor {
__u8 bLength;
@@ -811,7 +818,7 @@ struct usb_key_descriptor {
__u8 tTKID[3];
__u8 bReserved;
- __u8 bKeyData[0];
+ __u8 bKeyData[];
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
@@ -961,9 +968,22 @@ struct usb_ssp_cap_descriptor {
__le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */
#define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */
#define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */
+#define USB_SSP_SUBLINK_SPEED_LSE_BPS 0
+#define USB_SSP_SUBLINK_SPEED_LSE_KBPS 1
+#define USB_SSP_SUBLINK_SPEED_LSE_MBPS 2
+#define USB_SSP_SUBLINK_SPEED_LSE_GBPS 3
+
#define USB_SSP_SUBLINK_SPEED_ST (0x3 << 6) /* Sublink type */
+#define USB_SSP_SUBLINK_SPEED_ST_SYM_RX 0
+#define USB_SSP_SUBLINK_SPEED_ST_ASYM_RX 1
+#define USB_SSP_SUBLINK_SPEED_ST_SYM_TX 2
+#define USB_SSP_SUBLINK_SPEED_ST_ASYM_TX 3
+
#define USB_SSP_SUBLINK_SPEED_RSVD (0x3f << 8) /* Reserved */
#define USB_SSP_SUBLINK_SPEED_LP (0x3 << 14) /* Link protocol */
+#define USB_SSP_SUBLINK_SPEED_LP_SS 0
+#define USB_SSP_SUBLINK_SPEED_LP_SSP 1
+
#define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */
} __attribute__((packed));
@@ -1222,7 +1242,7 @@ struct usb_set_sel_req {
* As per USB compliance update, a device that is actively drawing
* more than 100mA from USB must report itself as bus-powered in
* the GetStatus(DEVICE) call.
- * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
+ * https://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
*/
#define USB_SELF_POWER_VBUS_MAX_DRAW 100
diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h
new file mode 100644
index 000000000000..c7d2199134d7
--- /dev/null
+++ b/include/uapi/linux/usb/raw_gadget.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * USB Raw Gadget driver.
+ *
+ * See Documentation/usb/raw-gadget.rst for more details.
+ */
+
+#ifndef _UAPI__LINUX_USB_RAW_GADGET_H
+#define _UAPI__LINUX_USB_RAW_GADGET_H
+
+#include <asm/ioctl.h>
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+/* Maximum length of driver_name/device_name in the usb_raw_init struct. */
+#define UDC_NAME_LENGTH_MAX 128
+
+/*
+ * struct usb_raw_init - argument for USB_RAW_IOCTL_INIT ioctl.
+ * @speed: The speed of the emulated USB device, takes the same values as
+ * the usb_device_speed enum: USB_SPEED_FULL, USB_SPEED_HIGH, etc.
+ * @driver_name: The name of the UDC driver.
+ * @device_name: The name of a UDC instance.
+ *
+ * The last two fields identify a UDC the gadget driver should bind to.
+ * For example, Dummy UDC has "dummy_udc" as its driver_name and "dummy_udc.N"
+ * as its device_name, where N in the index of the Dummy UDC instance.
+ * At the same time the dwc2 driver that is used on Raspberry Pi Zero, has
+ * "20980000.usb" as both driver_name and device_name.
+ */
+struct usb_raw_init {
+ __u8 driver_name[UDC_NAME_LENGTH_MAX];
+ __u8 device_name[UDC_NAME_LENGTH_MAX];
+ __u8 speed;
+};
+
+/* The type of event fetched with the USB_RAW_IOCTL_EVENT_FETCH ioctl. */
+enum usb_raw_event_type {
+ USB_RAW_EVENT_INVALID = 0,
+
+ /* This event is queued when the driver has bound to a UDC. */
+ USB_RAW_EVENT_CONNECT = 1,
+
+ /* This event is queued when a new control request arrived to ep0. */
+ USB_RAW_EVENT_CONTROL = 2,
+
+ /* The list might grow in the future. */
+};
+
+/*
+ * struct usb_raw_event - argument for USB_RAW_IOCTL_EVENT_FETCH ioctl.
+ * @type: The type of the fetched event.
+ * @length: Length of the data buffer. Updated by the driver and set to the
+ * actual length of the fetched event data.
+ * @data: A buffer to store the fetched event data.
+ *
+ * Currently the fetched data buffer is empty for USB_RAW_EVENT_CONNECT,
+ * and contains struct usb_ctrlrequest for USB_RAW_EVENT_CONTROL.
+ */
+struct usb_raw_event {
+ __u32 type;
+ __u32 length;
+ __u8 data[];
+};
+
+#define USB_RAW_IO_FLAGS_ZERO 0x0001
+#define USB_RAW_IO_FLAGS_MASK 0x0001
+
+static inline int usb_raw_io_flags_valid(__u16 flags)
+{
+ return (flags & ~USB_RAW_IO_FLAGS_MASK) == 0;
+}
+
+static inline int usb_raw_io_flags_zero(__u16 flags)
+{
+ return (flags & USB_RAW_IO_FLAGS_ZERO);
+}
+
+/*
+ * struct usb_raw_ep_io - argument for USB_RAW_IOCTL_EP0/EP_WRITE/READ ioctls.
+ * @ep: Endpoint handle as returned by USB_RAW_IOCTL_EP_ENABLE for
+ * USB_RAW_IOCTL_EP_WRITE/READ. Ignored for USB_RAW_IOCTL_EP0_WRITE/READ.
+ * @flags: When USB_RAW_IO_FLAGS_ZERO is specified, the zero flag is set on
+ * the submitted USB request, see include/linux/usb/gadget.h for details.
+ * @length: Length of data.
+ * @data: Data to send for USB_RAW_IOCTL_EP0/EP_WRITE. Buffer to store received
+ * data for USB_RAW_IOCTL_EP0/EP_READ.
+ */
+struct usb_raw_ep_io {
+ __u16 ep;
+ __u16 flags;
+ __u32 length;
+ __u8 data[];
+};
+
+/* Maximum number of non-control endpoints in struct usb_raw_eps_info. */
+#define USB_RAW_EPS_NUM_MAX 30
+
+/* Maximum length of UDC endpoint name in struct usb_raw_ep_info. */
+#define USB_RAW_EP_NAME_MAX 16
+
+/* Used as addr in struct usb_raw_ep_info if endpoint accepts any address. */
+#define USB_RAW_EP_ADDR_ANY 0xff
+
+/*
+ * struct usb_raw_ep_caps - exposes endpoint capabilities from struct usb_ep
+ * (technically from its member struct usb_ep_caps).
+ */
+struct usb_raw_ep_caps {
+ __u32 type_control : 1;
+ __u32 type_iso : 1;
+ __u32 type_bulk : 1;
+ __u32 type_int : 1;
+ __u32 dir_in : 1;
+ __u32 dir_out : 1;
+};
+
+/*
+ * struct usb_raw_ep_limits - exposes endpoint limits from struct usb_ep.
+ * @maxpacket_limit: Maximum packet size value supported by this endpoint.
+ * @max_streams: maximum number of streams supported by this endpoint
+ * (actual number is 2^n).
+ * @reserved: Empty, reserved for potential future extensions.
+ */
+struct usb_raw_ep_limits {
+ __u16 maxpacket_limit;
+ __u16 max_streams;
+ __u32 reserved;
+};
+
+/*
+ * struct usb_raw_ep_info - stores information about a gadget endpoint.
+ * @name: Name of the endpoint as it is defined in the UDC driver.
+ * @addr: Address of the endpoint that must be specified in the endpoint
+ * descriptor passed to USB_RAW_IOCTL_EP_ENABLE ioctl.
+ * @caps: Endpoint capabilities.
+ * @limits: Endpoint limits.
+ */
+struct usb_raw_ep_info {
+ __u8 name[USB_RAW_EP_NAME_MAX];
+ __u32 addr;
+ struct usb_raw_ep_caps caps;
+ struct usb_raw_ep_limits limits;
+};
+
+/*
+ * struct usb_raw_eps_info - argument for USB_RAW_IOCTL_EPS_INFO ioctl.
+ * eps: Structures that store information about non-control endpoints.
+ */
+struct usb_raw_eps_info {
+ struct usb_raw_ep_info eps[USB_RAW_EPS_NUM_MAX];
+};
+
+/*
+ * Initializes a Raw Gadget instance.
+ * Accepts a pointer to the usb_raw_init struct as an argument.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_INIT _IOW('U', 0, struct usb_raw_init)
+
+/*
+ * Instructs Raw Gadget to bind to a UDC and start emulating a USB device.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_RUN _IO('U', 1)
+
+/*
+ * A blocking ioctl that waits for an event and returns fetched event data to
+ * the user.
+ * Accepts a pointer to the usb_raw_event struct.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EVENT_FETCH _IOR('U', 2, struct usb_raw_event)
+
+/*
+ * Queues an IN (OUT for READ) request as a response to the last setup request
+ * received on endpoint 0 (provided that was an IN (OUT for READ) request), and
+ * waits until the request is completed. Copies received data to user for READ.
+ * Accepts a pointer to the usb_raw_ep_io struct as an argument.
+ * Returns length of transferred data on success or negative error code on
+ * failure.
+ */
+#define USB_RAW_IOCTL_EP0_WRITE _IOW('U', 3, struct usb_raw_ep_io)
+#define USB_RAW_IOCTL_EP0_READ _IOWR('U', 4, struct usb_raw_ep_io)
+
+/*
+ * Finds an endpoint that satisfies the parameters specified in the provided
+ * descriptors (address, transfer type, etc.) and enables it.
+ * Accepts a pointer to the usb_raw_ep_descs struct as an argument.
+ * Returns enabled endpoint handle on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EP_ENABLE _IOW('U', 5, struct usb_endpoint_descriptor)
+
+/*
+ * Disables specified endpoint.
+ * Accepts endpoint handle as an argument.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EP_DISABLE _IOW('U', 6, __u32)
+
+/*
+ * Queues an IN (OUT for READ) request as a response to the last setup request
+ * received on endpoint usb_raw_ep_io.ep (provided that was an IN (OUT for READ)
+ * request), and waits until the request is completed. Copies received data to
+ * user for READ.
+ * Accepts a pointer to the usb_raw_ep_io struct as an argument.
+ * Returns length of transferred data on success or negative error code on
+ * failure.
+ */
+#define USB_RAW_IOCTL_EP_WRITE _IOW('U', 7, struct usb_raw_ep_io)
+#define USB_RAW_IOCTL_EP_READ _IOWR('U', 8, struct usb_raw_ep_io)
+
+/*
+ * Switches the gadget into the configured state.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_CONFIGURE _IO('U', 9)
+
+/*
+ * Constrains UDC VBUS power usage.
+ * Accepts current limit in 2 mA units as an argument.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_VBUS_DRAW _IOW('U', 10, __u32)
+
+/*
+ * Fills in the usb_raw_eps_info structure with information about non-control
+ * endpoints available for the currently connected UDC.
+ * Returns the number of available endpoints on success or negative error code
+ * on failure.
+ */
+#define USB_RAW_IOCTL_EPS_INFO _IOR('U', 11, struct usb_raw_eps_info)
+
+/*
+ * Stalls a pending control request on endpoint 0.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EP0_STALL _IO('U', 12)
+
+/*
+ * Sets or clears halt or wedge status of the endpoint.
+ * Accepts endpoint handle as an argument.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EP_SET_HALT _IOW('U', 13, __u32)
+#define USB_RAW_IOCTL_EP_CLEAR_HALT _IOW('U', 14, __u32)
+#define USB_RAW_IOCTL_EP_SET_WEDGE _IOW('U', 15, __u32)
+
+#endif /* _UAPI__LINUX_USB_RAW_GADGET_H */
diff --git a/include/uapi/linux/usb/tmc.h b/include/uapi/linux/usb/tmc.h
index fdd4d88a7b95..d791cc58a7f0 100644
--- a/include/uapi/linux/usb/tmc.h
+++ b/include/uapi/linux/usb/tmc.h
@@ -102,6 +102,9 @@ struct usbtmc_message {
#define USBTMC_IOCTL_MSG_IN_ATTR _IOR(USBTMC_IOC_NR, 24, __u8)
#define USBTMC_IOCTL_AUTO_ABORT _IOW(USBTMC_IOC_NR, 25, __u8)
+#define USBTMC_IOCTL_GET_STB _IOR(USBTMC_IOC_NR, 26, __u8)
+#define USBTMC_IOCTL_GET_SRQ_STB _IOR(USBTMC_IOC_NR, 27, __u8)
+
/* Cancel and cleanup asynchronous calls */
#define USBTMC_IOCTL_CANCEL_IO _IO(USBTMC_IOC_NR, 35)
#define USBTMC_IOCTL_CLEANUP_IO _IO(USBTMC_IOC_NR, 36)
diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
index d854cb19c42c..bfdae12cdacf 100644
--- a/include/uapi/linux/usb/video.h
+++ b/include/uapi/linux/usb/video.h
@@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor {
__u8 bControlSize;
__u8 bmControls[2];
__u8 iProcessing;
+ __u8 bmVideoStandards;
} __attribute__((__packed__));
-#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n))
+#define UVC_DT_PROCESSING_UNIT_SIZE(n) (10+(n))
/* 3.7.2.6. Extension Unit Descriptor */
struct uvc_extension_unit_descriptor {
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
index cf525cddeb94..74a84e02422a 100644
--- a/include/uapi/linux/usbdevice_fs.h
+++ b/include/uapi/linux/usbdevice_fs.h
@@ -131,7 +131,7 @@ struct usbdevfs_urb {
unsigned int signr; /* signal to be sent on completion,
or 0 if none should be sent. */
void __user *usercontext;
- struct usbdevfs_iso_packet_desc iso_frame_desc[0];
+ struct usbdevfs_iso_packet_desc iso_frame_desc[];
};
/* ioctls for talking directly to drivers */
@@ -176,7 +176,7 @@ struct usbdevfs_disconnect_claim {
struct usbdevfs_streams {
unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */
unsigned int num_eps;
- unsigned char eps[0];
+ unsigned char eps[];
};
/*
diff --git a/include/uapi/linux/usbip.h b/include/uapi/linux/usbip.h
index fd393d908d8a..e4421ad55b2e 100644
--- a/include/uapi/linux/usbip.h
+++ b/include/uapi/linux/usbip.h
@@ -24,4 +24,30 @@ enum usbip_device_status {
VDEV_ST_USED,
VDEV_ST_ERROR
};
+
+/* USB URB Transfer flags:
+ *
+ * USBIP server and client (vchi) pack URBs in TCP packets. The following
+ * are the transfer type defines used in USBIP protocol.
+ */
+
+#define USBIP_URB_SHORT_NOT_OK 0x0001
+#define USBIP_URB_ISO_ASAP 0x0002
+#define USBIP_URB_NO_TRANSFER_DMA_MAP 0x0004
+#define USBIP_URB_ZERO_PACKET 0x0040
+#define USBIP_URB_NO_INTERRUPT 0x0080
+#define USBIP_URB_FREE_BUFFER 0x0100
+#define USBIP_URB_DIR_IN 0x0200
+#define USBIP_URB_DIR_OUT 0
+#define USBIP_URB_DIR_MASK USBIP_URB_DIR_IN
+
+#define USBIP_URB_DMA_MAP_SINGLE 0x00010000
+#define USBIP_URB_DMA_MAP_PAGE 0x00020000
+#define USBIP_URB_DMA_MAP_SG 0x00040000
+#define USBIP_URB_MAP_LOCAL 0x00080000
+#define USBIP_URB_SETUP_MAP_SINGLE 0x00100000
+#define USBIP_URB_SETUP_MAP_LOCAL 0x00200000
+#define USBIP_URB_DMA_SG_COMBINED 0x00400000
+#define USBIP_URB_ALIGNED_TEMP_BUFFER 0x00800000
+
#endif /* _UAPI_LINUX_USBIP_H */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 48f1a7c2f1f0..005e5e306266 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -12,6 +12,10 @@
#include <linux/types.h>
+/* ioctls for /dev/userfaultfd */
+#define USERFAULTFD_IOC 0xAA
+#define USERFAULTFD_IOC_NEW _IO(USERFAULTFD_IOC, 0x00)
+
/*
* If the UFFDIO_API is upgraded someday, the UFFDIO_UNREGISTER and
* UFFDIO_WAKE ioctls should be defined as _IOW and not as _IOR. In
@@ -19,14 +23,22 @@
* means the userland is reading).
*/
#define UFFD_API ((__u64)0xAA)
-#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \
+#define UFFD_API_REGISTER_MODES (UFFDIO_REGISTER_MODE_MISSING | \
+ UFFDIO_REGISTER_MODE_WP | \
+ UFFDIO_REGISTER_MODE_MINOR)
+#define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \
+ UFFD_FEATURE_EVENT_FORK | \
UFFD_FEATURE_EVENT_REMAP | \
- UFFD_FEATURE_EVENT_REMOVE | \
+ UFFD_FEATURE_EVENT_REMOVE | \
UFFD_FEATURE_EVENT_UNMAP | \
UFFD_FEATURE_MISSING_HUGETLBFS | \
UFFD_FEATURE_MISSING_SHMEM | \
UFFD_FEATURE_SIGBUS | \
- UFFD_FEATURE_THREAD_ID)
+ UFFD_FEATURE_THREAD_ID | \
+ UFFD_FEATURE_MINOR_HUGETLBFS | \
+ UFFD_FEATURE_MINOR_SHMEM | \
+ UFFD_FEATURE_EXACT_ADDRESS | \
+ UFFD_FEATURE_WP_HUGETLBFS_SHMEM)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -34,10 +46,14 @@
#define UFFD_API_RANGE_IOCTLS \
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
- (__u64)1 << _UFFDIO_ZEROPAGE)
+ (__u64)1 << _UFFDIO_ZEROPAGE | \
+ (__u64)1 << _UFFDIO_WRITEPROTECT | \
+ (__u64)1 << _UFFDIO_CONTINUE)
#define UFFD_API_RANGE_IOCTLS_BASIC \
((__u64)1 << _UFFDIO_WAKE | \
- (__u64)1 << _UFFDIO_COPY)
+ (__u64)1 << _UFFDIO_COPY | \
+ (__u64)1 << _UFFDIO_CONTINUE | \
+ (__u64)1 << _UFFDIO_WRITEPROTECT)
/*
* Valid ioctl command number range with this API is from 0x00 to
@@ -52,6 +68,8 @@
#define _UFFDIO_WAKE (0x02)
#define _UFFDIO_COPY (0x03)
#define _UFFDIO_ZEROPAGE (0x04)
+#define _UFFDIO_WRITEPROTECT (0x06)
+#define _UFFDIO_CONTINUE (0x07)
#define _UFFDIO_API (0x3F)
/* userfaultfd ioctl ids */
@@ -68,6 +86,10 @@
struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
struct uffdio_zeropage)
+#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
+ struct uffdio_writeprotect)
+#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
+ struct uffdio_continue)
/* read() structure */
struct uffd_msg {
@@ -122,6 +144,7 @@ struct uffd_msg {
/* flags for UFFD_EVENT_PAGEFAULT */
#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
#define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */
+#define UFFD_PAGEFAULT_FLAG_MINOR (1<<2) /* If reason is VM_UFFD_MINOR */
struct uffdio_api {
/* userland asks for an API number and the features to enable */
@@ -166,6 +189,20 @@ struct uffdio_api {
*
* UFFD_FEATURE_THREAD_ID pid of the page faulted task_struct will
* be returned, if feature is not requested 0 will be returned.
+ *
+ * UFFD_FEATURE_MINOR_HUGETLBFS indicates that minor faults
+ * can be intercepted (via REGISTER_MODE_MINOR) for
+ * hugetlbfs-backed pages.
+ *
+ * UFFD_FEATURE_MINOR_SHMEM indicates the same support as
+ * UFFD_FEATURE_MINOR_HUGETLBFS, but for shmem-backed pages instead.
+ *
+ * UFFD_FEATURE_EXACT_ADDRESS indicates that the exact address of page
+ * faults would be provided and the offset within the page would not be
+ * masked.
+ *
+ * UFFD_FEATURE_WP_HUGETLBFS_SHMEM indicates that userfaultfd
+ * write-protection mode is supported on both shmem and hugetlbfs.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -176,6 +213,10 @@ struct uffdio_api {
#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
#define UFFD_FEATURE_SIGBUS (1<<7)
#define UFFD_FEATURE_THREAD_ID (1<<8)
+#define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9)
+#define UFFD_FEATURE_MINOR_SHMEM (1<<10)
+#define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
+#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
__u64 features;
__u64 ioctls;
@@ -190,6 +231,7 @@ struct uffdio_register {
struct uffdio_range range;
#define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0)
#define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1)
+#define UFFDIO_REGISTER_MODE_MINOR ((__u64)1<<2)
__u64 mode;
/*
@@ -203,13 +245,14 @@ struct uffdio_copy {
__u64 dst;
__u64 src;
__u64 len;
+#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0)
/*
- * There will be a wrprotection flag later that allows to map
- * pages wrprotected on the fly. And such a flag will be
- * available if the wrprotection ioctl are implemented for the
- * range according to the uffdio_register.ioctls.
+ * UFFDIO_COPY_MODE_WP will map the page write protected on
+ * the fly. UFFDIO_COPY_MODE_WP is available only if the
+ * write protected ioctl is implemented for the range
+ * according to the uffdio_register.ioctls.
*/
-#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0)
+#define UFFDIO_COPY_MODE_WP ((__u64)1<<1)
__u64 mode;
/*
@@ -231,4 +274,45 @@ struct uffdio_zeropage {
__s64 zeropage;
};
+struct uffdio_writeprotect {
+ struct uffdio_range range;
+/*
+ * UFFDIO_WRITEPROTECT_MODE_WP: set the flag to write protect a range,
+ * unset the flag to undo protection of a range which was previously
+ * write protected.
+ *
+ * UFFDIO_WRITEPROTECT_MODE_DONTWAKE: set the flag to avoid waking up
+ * any wait thread after the operation succeeds.
+ *
+ * NOTE: Write protecting a region (WP=1) is unrelated to page faults,
+ * therefore DONTWAKE flag is meaningless with WP=1. Removing write
+ * protection (WP=0) in response to a page fault wakes the faulting
+ * task unless DONTWAKE is set.
+ */
+#define UFFDIO_WRITEPROTECT_MODE_WP ((__u64)1<<0)
+#define UFFDIO_WRITEPROTECT_MODE_DONTWAKE ((__u64)1<<1)
+ __u64 mode;
+};
+
+struct uffdio_continue {
+ struct uffdio_range range;
+#define UFFDIO_CONTINUE_MODE_DONTWAKE ((__u64)1<<0)
+ __u64 mode;
+
+ /*
+ * Fields below here are written by the ioctl and must be at the end:
+ * the copy_from_user will not read past here.
+ */
+ __s64 mapped;
+};
+
+/*
+ * Flags for the userfaultfd(2) system call itself.
+ */
+
+/*
+ * Create a userfaultfd that can handle page faults only in user mode.
+ */
+#define UFFD_USER_MODE_ONLY 1
+
#endif /* _LINUX_USERFAULTFD_H */
diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h
index e5a7eecef7c3..c0f4bd9b040e 100644
--- a/include/uapi/linux/uuid.h
+++ b/include/uapi/linux/uuid.h
@@ -1,18 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* DO NOT USE in new code! This is solely for MEI due to legacy reasons */
/*
* UUID/GUID definition
*
* Copyright (C) 2010, Intel Corp.
* Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _UAPI_LINUX_UUID_H_
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index f80f05b3c423..8288137387c0 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -76,11 +76,11 @@ struct uvc_xu_control_query {
/**
* struct uvc_meta_buf - metadata buffer building block
- * @ns - system timestamp of the payload in nanoseconds
- * @sof - USB Frame Number
- * @length - length of the payload header
- * @flags - payload header flags
- * @buf - optional device-specific header data
+ * @ns: system timestamp of the payload in nanoseconds
+ * @sof: USB Frame Number
+ * @length: length of the payload header
+ * @flags: payload header flags
+ * @buf: optional device-specific header data
*
* UVC metadata nodes fill buffers with possibly multiple instances of this
* struct. The first two fields are added by the driver, they can be used for
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 5a7bedee2b0e..b5e7d082b8ad 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -50,11 +50,12 @@
#ifndef __LINUX_V4L2_CONTROLS_H
#define __LINUX_V4L2_CONTROLS_H
+#include <linux/const.h>
#include <linux/types.h>
/* Control classes */
#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
-#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
+#define V4L2_CTRL_CLASS_CODEC 0x00990000 /* Stateful codec controls */
#define V4L2_CTRL_CLASS_CAMERA 0x009a0000 /* Camera class controls */
#define V4L2_CTRL_CLASS_FM_TX 0x009b0000 /* FM Modulator controls */
#define V4L2_CTRL_CLASS_FLASH 0x009c0000 /* Camera flash controls */
@@ -65,6 +66,8 @@
#define V4L2_CTRL_CLASS_FM_RX 0x00a10000 /* FM Receiver controls */
#define V4L2_CTRL_CLASS_RF_TUNER 0x00a20000 /* RF tuner controls */
#define V4L2_CTRL_CLASS_DETECT 0x00a30000 /* Detection controls */
+#define V4L2_CTRL_CLASS_CODEC_STATELESS 0x00a40000 /* Stateless codecs controls */
+#define V4L2_CTRL_CLASS_COLORIMETRY 0x00a50000 /* Colorimetry controls */
/* User-class control IDs */
@@ -125,6 +128,7 @@ enum v4l2_colorfx {
V4L2_COLORFX_SOLARIZATION = 13,
V4L2_COLORFX_ANTIQUE = 14,
V4L2_COLORFX_SET_CBCR = 15,
+ V4L2_COLORFX_SET_RGB = 16,
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
@@ -142,9 +146,10 @@ enum v4l2_colorfx {
#define V4L2_CID_ALPHA_COMPONENT (V4L2_CID_BASE+41)
#define V4L2_CID_COLORFX_CBCR (V4L2_CID_BASE+42)
+#define V4L2_CID_COLORFX_RGB (V4L2_CID_BASE+43)
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+43)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+44)
/* USER-class private control IDs */
@@ -192,15 +197,49 @@ enum v4l2_colorfx {
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x10b0)
+/*
+ * The base for the atmel isc driver controls.
+ * We reserve 32 controls for this driver.
+ */
+#define V4L2_CID_USER_ATMEL_ISC_BASE (V4L2_CID_USER_BASE + 0x10c0)
+
+/*
+ * The base for the CODA driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_CODA_BASE (V4L2_CID_USER_BASE + 0x10e0)
+/*
+ * The base for MIPI CCS driver controls.
+ * We reserve 128 controls for this driver.
+ */
+#define V4L2_CID_USER_CCS_BASE (V4L2_CID_USER_BASE + 0x10f0)
+/*
+ * The base for Allegro driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_ALLEGRO_BASE (V4L2_CID_USER_BASE + 0x1170)
+
+/*
+ * The base for the isl7998x driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_ISL7998X_BASE (V4L2_CID_USER_BASE + 0x1180)
+
+/*
+ * The base for DW100 driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_DW100_BASE (V4L2_CID_USER_BASE + 0x1190)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
-#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
-#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1)
+#define V4L2_CID_CODEC_BASE (V4L2_CTRL_CLASS_CODEC | 0x900)
+#define V4L2_CID_CODEC_CLASS (V4L2_CTRL_CLASS_CODEC | 1)
/* MPEG streams, specific to multiplexed streams */
-#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0)
+#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_CODEC_BASE+0)
enum v4l2_mpeg_stream_type {
V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */
V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */
@@ -209,26 +248,26 @@ enum v4l2_mpeg_stream_type {
V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */
V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */
};
-#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1)
-#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2)
-#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3)
-#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4)
-#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5)
-#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6)
-#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7)
+#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_CODEC_BASE+1)
+#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_CODEC_BASE+2)
+#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_CODEC_BASE+3)
+#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_CODEC_BASE+4)
+#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_CODEC_BASE+5)
+#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_CODEC_BASE+6)
+#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_CODEC_BASE+7)
enum v4l2_mpeg_stream_vbi_fmt {
V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */
V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */
};
/* MPEG audio controls specific to multiplexed streams */
-#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100)
+#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_CODEC_BASE+100)
enum v4l2_mpeg_audio_sampling_freq {
V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2,
};
-#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101)
+#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_CODEC_BASE+101)
enum v4l2_mpeg_audio_encoding {
V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0,
V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1,
@@ -236,7 +275,7 @@ enum v4l2_mpeg_audio_encoding {
V4L2_MPEG_AUDIO_ENCODING_AAC = 3,
V4L2_MPEG_AUDIO_ENCODING_AC3 = 4,
};
-#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102)
+#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_CODEC_BASE+102)
enum v4l2_mpeg_audio_l1_bitrate {
V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1,
@@ -253,7 +292,7 @@ enum v4l2_mpeg_audio_l1_bitrate {
V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12,
V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13,
};
-#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103)
+#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_CODEC_BASE+103)
enum v4l2_mpeg_audio_l2_bitrate {
V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1,
@@ -270,7 +309,7 @@ enum v4l2_mpeg_audio_l2_bitrate {
V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12,
V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13,
};
-#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104)
+#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_CODEC_BASE+104)
enum v4l2_mpeg_audio_l3_bitrate {
V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1,
@@ -287,34 +326,34 @@ enum v4l2_mpeg_audio_l3_bitrate {
V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12,
V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13,
};
-#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105)
+#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_CODEC_BASE+105)
enum v4l2_mpeg_audio_mode {
V4L2_MPEG_AUDIO_MODE_STEREO = 0,
V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1,
V4L2_MPEG_AUDIO_MODE_DUAL = 2,
V4L2_MPEG_AUDIO_MODE_MONO = 3,
};
-#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106)
+#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_CODEC_BASE+106)
enum v4l2_mpeg_audio_mode_extension {
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3,
};
-#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107)
+#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_CODEC_BASE+107)
enum v4l2_mpeg_audio_emphasis {
V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0,
V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1,
V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2,
};
-#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108)
+#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_CODEC_BASE+108)
enum v4l2_mpeg_audio_crc {
V4L2_MPEG_AUDIO_CRC_NONE = 0,
V4L2_MPEG_AUDIO_CRC_CRC16 = 1,
};
-#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109)
-#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_MPEG_BASE+110)
-#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_MPEG_BASE+111)
+#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_CODEC_BASE+109)
+#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_CODEC_BASE+110)
+#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_CODEC_BASE+111)
enum v4l2_mpeg_audio_ac3_bitrate {
V4L2_MPEG_AUDIO_AC3_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_AC3_BITRATE_40K = 1,
@@ -336,7 +375,7 @@ enum v4l2_mpeg_audio_ac3_bitrate {
V4L2_MPEG_AUDIO_AC3_BITRATE_576K = 17,
V4L2_MPEG_AUDIO_AC3_BITRATE_640K = 18,
};
-#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK (V4L2_CID_MPEG_BASE+112)
+#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK (V4L2_CID_CODEC_BASE+112)
enum v4l2_mpeg_audio_dec_playback {
V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO = 0,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO = 1,
@@ -345,51 +384,52 @@ enum v4l2_mpeg_audio_dec_playback {
V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO = 4,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO = 5,
};
-#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113)
+#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_CODEC_BASE+113)
/* MPEG video controls specific to multiplexed streams */
-#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200)
+#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_CODEC_BASE+200)
enum v4l2_mpeg_video_encoding {
V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0,
V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1,
V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2,
};
-#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201)
+#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_CODEC_BASE+201)
enum v4l2_mpeg_video_aspect {
V4L2_MPEG_VIDEO_ASPECT_1x1 = 0,
V4L2_MPEG_VIDEO_ASPECT_4x3 = 1,
V4L2_MPEG_VIDEO_ASPECT_16x9 = 2,
V4L2_MPEG_VIDEO_ASPECT_221x100 = 3,
};
-#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202)
-#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203)
-#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204)
-#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205)
-#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206)
+#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_CODEC_BASE+202)
+#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_CODEC_BASE+203)
+#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_CODEC_BASE+204)
+#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_CODEC_BASE+205)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_CODEC_BASE+206)
enum v4l2_mpeg_video_bitrate_mode {
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1,
-};
-#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207)
-#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208)
-#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209)
-#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210)
-#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211)
-#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_MPEG_BASE+212)
-#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_MPEG_BASE+213)
-#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_MPEG_BASE+214)
-#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (V4L2_CID_MPEG_BASE+215)
-#define V4L2_CID_MPEG_VIDEO_HEADER_MODE (V4L2_CID_MPEG_BASE+216)
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CQ = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_CODEC_BASE+207)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_CODEC_BASE+208)
+#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_CODEC_BASE+209)
+#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_CODEC_BASE+210)
+#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_CODEC_BASE+211)
+#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_CODEC_BASE+212)
+#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_CODEC_BASE+213)
+#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_CODEC_BASE+214)
+#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (V4L2_CID_CODEC_BASE+215)
+#define V4L2_CID_MPEG_VIDEO_HEADER_MODE (V4L2_CID_CODEC_BASE+216)
enum v4l2_mpeg_video_header_mode {
V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE = 0,
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME = 1,
};
-#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (V4L2_CID_MPEG_BASE+217)
-#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (V4L2_CID_MPEG_BASE+218)
-#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (V4L2_CID_MPEG_BASE+219)
-#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (V4L2_CID_MPEG_BASE+220)
-#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (V4L2_CID_MPEG_BASE+221)
+#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (V4L2_CID_CODEC_BASE+217)
+#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (V4L2_CID_CODEC_BASE+218)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (V4L2_CID_CODEC_BASE+219)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (V4L2_CID_CODEC_BASE+220)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (V4L2_CID_CODEC_BASE+221)
enum v4l2_mpeg_video_multi_slice_mode {
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE = 0,
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB = 1,
@@ -400,24 +440,36 @@ enum v4l2_mpeg_video_multi_slice_mode {
V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES = 2,
#endif
};
-#define V4L2_CID_MPEG_VIDEO_VBV_SIZE (V4L2_CID_MPEG_BASE+222)
-#define V4L2_CID_MPEG_VIDEO_DEC_PTS (V4L2_CID_MPEG_BASE+223)
-#define V4L2_CID_MPEG_VIDEO_DEC_FRAME (V4L2_CID_MPEG_BASE+224)
-#define V4L2_CID_MPEG_VIDEO_VBV_DELAY (V4L2_CID_MPEG_BASE+225)
-#define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (V4L2_CID_MPEG_BASE+226)
-#define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_MPEG_BASE+227)
-#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228)
-#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229)
+#define V4L2_CID_MPEG_VIDEO_VBV_SIZE (V4L2_CID_CODEC_BASE+222)
+#define V4L2_CID_MPEG_VIDEO_DEC_PTS (V4L2_CID_CODEC_BASE+223)
+#define V4L2_CID_MPEG_VIDEO_DEC_FRAME (V4L2_CID_CODEC_BASE+224)
+#define V4L2_CID_MPEG_VIDEO_VBV_DELAY (V4L2_CID_CODEC_BASE+225)
+#define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (V4L2_CID_CODEC_BASE+226)
+#define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_CODEC_BASE+227)
+#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_CODEC_BASE+228)
+#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_CODEC_BASE+229)
+#define V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID (V4L2_CID_CODEC_BASE+230)
+#define V4L2_CID_MPEG_VIDEO_AU_DELIMITER (V4L2_CID_CODEC_BASE+231)
+#define V4L2_CID_MPEG_VIDEO_LTR_COUNT (V4L2_CID_CODEC_BASE+232)
+#define V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX (V4L2_CID_CODEC_BASE+233)
+#define V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES (V4L2_CID_CODEC_BASE+234)
+#define V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR (V4L2_CID_CODEC_BASE+235)
+#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD (V4L2_CID_CODEC_BASE+236)
+#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE (V4L2_CID_CODEC_BASE+237)
+enum v4l2_mpeg_video_intra_refresh_period_type {
+ V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_RANDOM = 0,
+ V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC = 1,
+};
/* CIDs for the MPEG-2 Part 2 (H.262) codec */
-#define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_MPEG_BASE+270)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_CODEC_BASE+270)
enum v4l2_mpeg_video_mpeg2_level {
V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW = 0,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN = 1,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440 = 2,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH = 3,
};
-#define V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE (V4L2_CID_MPEG_BASE+271)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE (V4L2_CID_CODEC_BASE+271)
enum v4l2_mpeg_video_mpeg2_profile {
V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE = 0,
V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN = 1,
@@ -428,28 +480,28 @@ enum v4l2_mpeg_video_mpeg2_profile {
};
/* CIDs for the FWHT codec as used by the vicodec driver. */
-#define V4L2_CID_FWHT_I_FRAME_QP (V4L2_CID_MPEG_BASE + 290)
-#define V4L2_CID_FWHT_P_FRAME_QP (V4L2_CID_MPEG_BASE + 291)
-
-#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300)
-#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301)
-#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302)
-#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP (V4L2_CID_MPEG_BASE+303)
-#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP (V4L2_CID_MPEG_BASE+304)
-#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (V4L2_CID_MPEG_BASE+350)
-#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (V4L2_CID_MPEG_BASE+351)
-#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (V4L2_CID_MPEG_BASE+352)
-#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP (V4L2_CID_MPEG_BASE+353)
-#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP (V4L2_CID_MPEG_BASE+354)
-#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (V4L2_CID_MPEG_BASE+355)
-#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (V4L2_CID_MPEG_BASE+356)
-#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE (V4L2_CID_MPEG_BASE+357)
+#define V4L2_CID_FWHT_I_FRAME_QP (V4L2_CID_CODEC_BASE + 290)
+#define V4L2_CID_FWHT_P_FRAME_QP (V4L2_CID_CODEC_BASE + 291)
+
+#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_CODEC_BASE+300)
+#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_CODEC_BASE+301)
+#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_CODEC_BASE+302)
+#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP (V4L2_CID_CODEC_BASE+303)
+#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP (V4L2_CID_CODEC_BASE+304)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (V4L2_CID_CODEC_BASE+350)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (V4L2_CID_CODEC_BASE+351)
+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (V4L2_CID_CODEC_BASE+352)
+#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP (V4L2_CID_CODEC_BASE+353)
+#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP (V4L2_CID_CODEC_BASE+354)
+#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (V4L2_CID_CODEC_BASE+355)
+#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (V4L2_CID_CODEC_BASE+356)
+#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE (V4L2_CID_CODEC_BASE+357)
enum v4l2_mpeg_video_h264_entropy_mode {
V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC = 0,
V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC = 1,
};
-#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (V4L2_CID_MPEG_BASE+358)
-#define V4L2_CID_MPEG_VIDEO_H264_LEVEL (V4L2_CID_MPEG_BASE+359)
+#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (V4L2_CID_CODEC_BASE+358)
+#define V4L2_CID_MPEG_VIDEO_H264_LEVEL (V4L2_CID_CODEC_BASE+359)
enum v4l2_mpeg_video_h264_level {
V4L2_MPEG_VIDEO_H264_LEVEL_1_0 = 0,
V4L2_MPEG_VIDEO_H264_LEVEL_1B = 1,
@@ -467,16 +519,20 @@ enum v4l2_mpeg_video_h264_level {
V4L2_MPEG_VIDEO_H264_LEVEL_4_2 = 13,
V4L2_MPEG_VIDEO_H264_LEVEL_5_0 = 14,
V4L2_MPEG_VIDEO_H264_LEVEL_5_1 = 15,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (V4L2_CID_MPEG_BASE+360)
-#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (V4L2_CID_MPEG_BASE+361)
-#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE (V4L2_CID_MPEG_BASE+362)
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_2 = 16,
+ V4L2_MPEG_VIDEO_H264_LEVEL_6_0 = 17,
+ V4L2_MPEG_VIDEO_H264_LEVEL_6_1 = 18,
+ V4L2_MPEG_VIDEO_H264_LEVEL_6_2 = 19,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (V4L2_CID_CODEC_BASE+360)
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (V4L2_CID_CODEC_BASE+361)
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE (V4L2_CID_CODEC_BASE+362)
enum v4l2_mpeg_video_h264_loop_filter_mode {
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED = 0,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED = 1,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2,
};
-#define V4L2_CID_MPEG_VIDEO_H264_PROFILE (V4L2_CID_MPEG_BASE+363)
+#define V4L2_CID_MPEG_VIDEO_H264_PROFILE (V4L2_CID_CODEC_BASE+363)
enum v4l2_mpeg_video_h264_profile {
V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE = 0,
V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE = 1,
@@ -495,11 +551,12 @@ enum v4l2_mpeg_video_h264_profile {
V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA = 14,
V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH = 15,
V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH = 16,
+ V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH = 17,
};
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_MPEG_BASE+364)
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_MPEG_BASE+365)
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (V4L2_CID_MPEG_BASE+366)
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC (V4L2_CID_MPEG_BASE+367)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_CODEC_BASE+364)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_CODEC_BASE+365)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (V4L2_CID_CODEC_BASE+366)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC (V4L2_CID_CODEC_BASE+367)
enum v4l2_mpeg_video_h264_vui_sar_idc {
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED = 0,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 = 1,
@@ -520,9 +577,9 @@ enum v4l2_mpeg_video_h264_vui_sar_idc {
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 = 16,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED = 17,
};
-#define V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (V4L2_CID_MPEG_BASE+368)
-#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (V4L2_CID_MPEG_BASE+369)
-#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE (V4L2_CID_MPEG_BASE+370)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (V4L2_CID_CODEC_BASE+368)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (V4L2_CID_CODEC_BASE+369)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE (V4L2_CID_CODEC_BASE+370)
enum v4l2_mpeg_video_h264_sei_fp_arrangement_type {
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHECKERBOARD = 0,
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN = 1,
@@ -531,8 +588,8 @@ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type {
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM = 4,
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL = 5,
};
-#define V4L2_CID_MPEG_VIDEO_H264_FMO (V4L2_CID_MPEG_BASE+371)
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE (V4L2_CID_MPEG_BASE+372)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO (V4L2_CID_CODEC_BASE+371)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE (V4L2_CID_CODEC_BASE+372)
enum v4l2_mpeg_video_h264_fmo_map_type {
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES = 0,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES = 1,
@@ -542,36 +599,45 @@ enum v4l2_mpeg_video_h264_fmo_map_type {
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN = 5,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT = 6,
};
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (V4L2_CID_MPEG_BASE+373)
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION (V4L2_CID_MPEG_BASE+374)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (V4L2_CID_CODEC_BASE+373)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION (V4L2_CID_CODEC_BASE+374)
enum v4l2_mpeg_video_h264_fmo_change_dir {
V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT = 0,
V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT = 1,
};
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (V4L2_CID_MPEG_BASE+375)
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (V4L2_CID_MPEG_BASE+376)
-#define V4L2_CID_MPEG_VIDEO_H264_ASO (V4L2_CID_MPEG_BASE+377)
-#define V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (V4L2_CID_MPEG_BASE+378)
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (V4L2_CID_MPEG_BASE+379)
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE (V4L2_CID_MPEG_BASE+380)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (V4L2_CID_CODEC_BASE+375)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (V4L2_CID_CODEC_BASE+376)
+#define V4L2_CID_MPEG_VIDEO_H264_ASO (V4L2_CID_CODEC_BASE+377)
+#define V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (V4L2_CID_CODEC_BASE+378)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (V4L2_CID_CODEC_BASE+379)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE (V4L2_CID_CODEC_BASE+380)
enum v4l2_mpeg_video_h264_hierarchical_coding_type {
V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B = 0,
V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P = 1,
};
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (V4L2_CID_MPEG_BASE+381)
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (V4L2_CID_MPEG_BASE+382)
-#define V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (V4L2_CID_MPEG_BASE+383)
-#define V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (V4L2_CID_MPEG_BASE+384)
-#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP (V4L2_CID_MPEG_BASE+385)
-#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP (V4L2_CID_MPEG_BASE+386)
-#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP (V4L2_CID_MPEG_BASE+387)
-#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP (V4L2_CID_MPEG_BASE+388)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_MPEG_BASE+400)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_MPEG_BASE+401)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_MPEG_BASE+402)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (V4L2_CID_MPEG_BASE+403)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (V4L2_CID_MPEG_BASE+404)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (V4L2_CID_MPEG_BASE+405)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (V4L2_CID_CODEC_BASE+381)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (V4L2_CID_CODEC_BASE+382)
+#define V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (V4L2_CID_CODEC_BASE+383)
+#define V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (V4L2_CID_CODEC_BASE+384)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+385)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+386)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+387)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+388)
+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+389)
+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+390)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR (V4L2_CID_CODEC_BASE+391)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR (V4L2_CID_CODEC_BASE+392)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR (V4L2_CID_CODEC_BASE+393)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR (V4L2_CID_CODEC_BASE+394)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR (V4L2_CID_CODEC_BASE+395)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR (V4L2_CID_CODEC_BASE+396)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR (V4L2_CID_CODEC_BASE+397)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_CODEC_BASE+400)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_CODEC_BASE+401)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_CODEC_BASE+402)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (V4L2_CID_CODEC_BASE+403)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (V4L2_CID_CODEC_BASE+404)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (V4L2_CID_CODEC_BASE+405)
enum v4l2_mpeg_video_mpeg4_level {
V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 = 0,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B = 1,
@@ -582,7 +648,7 @@ enum v4l2_mpeg_video_mpeg4_level {
V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 = 6,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 = 7,
};
-#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE (V4L2_CID_MPEG_BASE+406)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE (V4L2_CID_CODEC_BASE+406)
enum v4l2_mpeg_video_mpeg4_profile {
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE = 0,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE = 1,
@@ -590,40 +656,40 @@ enum v4l2_mpeg_video_mpeg4_profile {
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE = 3,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY = 4,
};
-#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (V4L2_CID_MPEG_BASE+407)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (V4L2_CID_CODEC_BASE+407)
/* Control IDs for VP8 streams
* Although VP8 is not part of MPEG we add these controls to the MPEG class
* as that class is already handling other video compression standards
*/
-#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS (V4L2_CID_MPEG_BASE+500)
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS (V4L2_CID_CODEC_BASE+500)
enum v4l2_vp8_num_partitions {
V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION = 0,
V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS = 1,
V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS = 2,
V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS = 3,
};
-#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (V4L2_CID_MPEG_BASE+501)
-#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES (V4L2_CID_MPEG_BASE+502)
+#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (V4L2_CID_CODEC_BASE+501)
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES (V4L2_CID_CODEC_BASE+502)
enum v4l2_vp8_num_ref_frames {
V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME = 0,
V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME = 1,
V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME = 2,
};
-#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (V4L2_CID_MPEG_BASE+503)
-#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (V4L2_CID_MPEG_BASE+504)
-#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (V4L2_CID_MPEG_BASE+505)
-#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL (V4L2_CID_MPEG_BASE+506)
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (V4L2_CID_CODEC_BASE+503)
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (V4L2_CID_CODEC_BASE+504)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (V4L2_CID_CODEC_BASE+505)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL (V4L2_CID_CODEC_BASE+506)
enum v4l2_vp8_golden_frame_sel {
V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV = 0,
V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD = 1,
};
-#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (V4L2_CID_MPEG_BASE+507)
-#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_MPEG_BASE+508)
-#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_MPEG_BASE+509)
-#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510)
+#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (V4L2_CID_CODEC_BASE+507)
+#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_CODEC_BASE+508)
+#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_CODEC_BASE+509)
+#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_CODEC_BASE+510)
-#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_MPEG_BASE+511)
+#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_CODEC_BASE+511)
enum v4l2_mpeg_video_vp8_profile {
V4L2_MPEG_VIDEO_VP8_PROFILE_0 = 0,
V4L2_MPEG_VIDEO_VP8_PROFILE_1 = 1,
@@ -632,42 +698,59 @@ enum v4l2_mpeg_video_vp8_profile {
};
/* Deprecated alias for compatibility reasons. */
#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE V4L2_CID_MPEG_VIDEO_VP8_PROFILE
-#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_MPEG_BASE+512)
+#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_CODEC_BASE+512)
enum v4l2_mpeg_video_vp9_profile {
V4L2_MPEG_VIDEO_VP9_PROFILE_0 = 0,
V4L2_MPEG_VIDEO_VP9_PROFILE_1 = 1,
V4L2_MPEG_VIDEO_VP9_PROFILE_2 = 2,
V4L2_MPEG_VIDEO_VP9_PROFILE_3 = 3,
};
+#define V4L2_CID_MPEG_VIDEO_VP9_LEVEL (V4L2_CID_CODEC_BASE+513)
+enum v4l2_mpeg_video_vp9_level {
+ V4L2_MPEG_VIDEO_VP9_LEVEL_1_0 = 0,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_1_1 = 1,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_2_0 = 2,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_2_1 = 3,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_3_0 = 4,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_3_1 = 5,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_4_0 = 6,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_4_1 = 7,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_5_0 = 8,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_5_1 = 9,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_5_2 = 10,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_6_0 = 11,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_6_1 = 12,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_6_2 = 13,
+};
/* CIDs for HEVC encoding. */
-#define V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (V4L2_CID_MPEG_BASE + 600)
-#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (V4L2_CID_MPEG_BASE + 601)
-#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (V4L2_CID_MPEG_BASE + 602)
-#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (V4L2_CID_MPEG_BASE + 603)
-#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (V4L2_CID_MPEG_BASE + 604)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (V4L2_CID_MPEG_BASE + 605)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE (V4L2_CID_MPEG_BASE + 606)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (V4L2_CID_CODEC_BASE + 600)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (V4L2_CID_CODEC_BASE + 601)
+#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (V4L2_CID_CODEC_BASE + 602)
+#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (V4L2_CID_CODEC_BASE + 603)
+#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (V4L2_CID_CODEC_BASE + 604)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (V4L2_CID_CODEC_BASE + 605)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE (V4L2_CID_CODEC_BASE + 606)
enum v4l2_mpeg_video_hevc_hier_coding_type {
V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B = 0,
V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P = 1,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (V4L2_CID_MPEG_BASE + 607)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (V4L2_CID_MPEG_BASE + 608)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (V4L2_CID_MPEG_BASE + 609)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (V4L2_CID_MPEG_BASE + 610)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (V4L2_CID_MPEG_BASE + 611)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (V4L2_CID_MPEG_BASE + 612)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (V4L2_CID_MPEG_BASE + 613)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (V4L2_CID_MPEG_BASE + 614)
-#define V4L2_CID_MPEG_VIDEO_HEVC_PROFILE (V4L2_CID_MPEG_BASE + 615)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (V4L2_CID_CODEC_BASE + 607)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (V4L2_CID_CODEC_BASE + 608)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (V4L2_CID_CODEC_BASE + 609)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (V4L2_CID_CODEC_BASE + 610)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (V4L2_CID_CODEC_BASE + 611)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (V4L2_CID_CODEC_BASE + 612)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (V4L2_CID_CODEC_BASE + 613)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (V4L2_CID_CODEC_BASE + 614)
+#define V4L2_CID_MPEG_VIDEO_HEVC_PROFILE (V4L2_CID_CODEC_BASE + 615)
enum v4l2_mpeg_video_hevc_profile {
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN = 0,
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE = 1,
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 = 2,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_LEVEL (V4L2_CID_MPEG_BASE + 616)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LEVEL (V4L2_CID_CODEC_BASE + 616)
enum v4l2_mpeg_video_hevc_level {
V4L2_MPEG_VIDEO_HEVC_LEVEL_1 = 0,
V4L2_MPEG_VIDEO_HEVC_LEVEL_2 = 1,
@@ -683,64 +766,81 @@ enum v4l2_mpeg_video_hevc_level {
V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1 = 11,
V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 = 12,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (V4L2_CID_MPEG_BASE + 617)
-#define V4L2_CID_MPEG_VIDEO_HEVC_TIER (V4L2_CID_MPEG_BASE + 618)
+#define V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (V4L2_CID_CODEC_BASE + 617)
+#define V4L2_CID_MPEG_VIDEO_HEVC_TIER (V4L2_CID_CODEC_BASE + 618)
enum v4l2_mpeg_video_hevc_tier {
V4L2_MPEG_VIDEO_HEVC_TIER_MAIN = 0,
V4L2_MPEG_VIDEO_HEVC_TIER_HIGH = 1,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (V4L2_CID_MPEG_BASE + 619)
-#define V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE (V4L2_CID_MPEG_BASE + 620)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (V4L2_CID_CODEC_BASE + 619)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE (V4L2_CID_CODEC_BASE + 620)
enum v4l2_cid_mpeg_video_hevc_loop_filter_mode {
V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED = 0,
V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED = 1,
V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (V4L2_CID_MPEG_BASE + 621)
-#define V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (V4L2_CID_MPEG_BASE + 622)
-#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE (V4L2_CID_MPEG_BASE + 623)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (V4L2_CID_CODEC_BASE + 621)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (V4L2_CID_CODEC_BASE + 622)
+#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE (V4L2_CID_CODEC_BASE + 623)
enum v4l2_cid_mpeg_video_hevc_refresh_type {
V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE = 0,
V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA = 1,
V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR = 2,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (V4L2_CID_MPEG_BASE + 624)
-#define V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (V4L2_CID_MPEG_BASE + 625)
-#define V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (V4L2_CID_MPEG_BASE + 626)
-#define V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (V4L2_CID_MPEG_BASE + 627)
-#define V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (V4L2_CID_MPEG_BASE + 628)
-#define V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (V4L2_CID_MPEG_BASE + 629)
-#define V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (V4L2_CID_MPEG_BASE + 630)
-#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (V4L2_CID_MPEG_BASE + 631)
-#define V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT (V4L2_CID_MPEG_BASE + 632)
-#define V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (V4L2_CID_MPEG_BASE + 633)
-#define V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (V4L2_CID_MPEG_BASE + 634)
-#define V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD (V4L2_CID_MPEG_BASE + 635)
+#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (V4L2_CID_CODEC_BASE + 624)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (V4L2_CID_CODEC_BASE + 625)
+#define V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (V4L2_CID_CODEC_BASE + 626)
+#define V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (V4L2_CID_CODEC_BASE + 627)
+#define V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (V4L2_CID_CODEC_BASE + 628)
+#define V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (V4L2_CID_CODEC_BASE + 629)
+#define V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (V4L2_CID_CODEC_BASE + 630)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (V4L2_CID_CODEC_BASE + 631)
+#define V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT (V4L2_CID_CODEC_BASE + 632)
+#define V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (V4L2_CID_CODEC_BASE + 633)
+#define V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (V4L2_CID_CODEC_BASE + 634)
+#define V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD (V4L2_CID_CODEC_BASE + 635)
enum v4l2_cid_mpeg_video_hevc_size_of_length_field {
V4L2_MPEG_VIDEO_HEVC_SIZE_0 = 0,
V4L2_MPEG_VIDEO_HEVC_SIZE_1 = 1,
V4L2_MPEG_VIDEO_HEVC_SIZE_2 = 2,
V4L2_MPEG_VIDEO_HEVC_SIZE_4 = 3,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (V4L2_CID_MPEG_BASE + 636)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (V4L2_CID_MPEG_BASE + 637)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (V4L2_CID_MPEG_BASE + 638)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (V4L2_CID_MPEG_BASE + 639)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (V4L2_CID_MPEG_BASE + 640)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (V4L2_CID_MPEG_BASE + 641)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (V4L2_CID_MPEG_BASE + 642)
-#define V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (V4L2_CID_MPEG_BASE + 643)
-#define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_MPEG_BASE + 644)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (V4L2_CID_CODEC_BASE + 636)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (V4L2_CID_CODEC_BASE + 637)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (V4L2_CID_CODEC_BASE + 638)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (V4L2_CID_CODEC_BASE + 639)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (V4L2_CID_CODEC_BASE + 640)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (V4L2_CID_CODEC_BASE + 641)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (V4L2_CID_CODEC_BASE + 642)
+#define V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (V4L2_CID_CODEC_BASE + 643)
+#define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_CODEC_BASE + 644)
+#define V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY (V4L2_CID_CODEC_BASE + 645)
+#define V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE (V4L2_CID_CODEC_BASE + 646)
+enum v4l2_mpeg_video_frame_skip_mode {
+ V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED = 0,
+ V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1,
+ V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2,
+};
+
+#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 647)
+#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 648)
+#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 649)
+#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 650)
+#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 651)
+#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 652)
+
+#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY (V4L2_CID_CODEC_BASE + 653)
+#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_BASE + 654)
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
-#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
+#define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0)
enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0,
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_CODEC_CX2341X_BASE+1)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+2)
enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
@@ -748,18 +848,18 @@ enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+3)
enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+4)
enum v4l2_mpeg_cx2341x_video_temporal_filter_mode {
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0,
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_CODEC_CX2341X_BASE+5)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+6)
enum v4l2_mpeg_cx2341x_video_median_filter_type {
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1,
@@ -767,38 +867,38 @@ enum v4l2_mpeg_cx2341x_video_median_filter_type {
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+9)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10)
-#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_CODEC_CX2341X_BASE+7)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_CODEC_CX2341X_BASE+8)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_CODEC_CX2341X_BASE+9)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_CODEC_CX2341X_BASE+10)
+#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_CODEC_CX2341X_BASE+11)
/* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */
-#define V4L2_CID_MPEG_MFC51_BASE (V4L2_CTRL_CLASS_MPEG | 0x1100)
+#define V4L2_CID_CODEC_MFC51_BASE (V4L2_CTRL_CLASS_CODEC | 0x1100)
-#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (V4L2_CID_MPEG_MFC51_BASE+0)
-#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (V4L2_CID_MPEG_MFC51_BASE+1)
-#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE (V4L2_CID_MPEG_MFC51_BASE+2)
+#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (V4L2_CID_CODEC_MFC51_BASE+0)
+#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_MFC51_BASE+1)
+#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE (V4L2_CID_CODEC_MFC51_BASE+2)
enum v4l2_mpeg_mfc51_video_frame_skip_mode {
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED = 0,
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1,
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2,
};
-#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE (V4L2_CID_MPEG_MFC51_BASE+3)
+#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE (V4L2_CID_CODEC_MFC51_BASE+3)
enum v4l2_mpeg_mfc51_video_force_frame_type {
V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED = 0,
V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME = 1,
V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED = 2,
};
-#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING (V4L2_CID_MPEG_MFC51_BASE+4)
-#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (V4L2_CID_MPEG_MFC51_BASE+5)
-#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (V4L2_CID_MPEG_MFC51_BASE+6)
-#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (V4L2_CID_MPEG_MFC51_BASE+7)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (V4L2_CID_MPEG_MFC51_BASE+50)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (V4L2_CID_MPEG_MFC51_BASE+51)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (V4L2_CID_MPEG_MFC51_BASE+52)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_MPEG_MFC51_BASE+53)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_MPEG_MFC51_BASE+54)
+#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING (V4L2_CID_CODEC_MFC51_BASE+4)
+#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (V4L2_CID_CODEC_MFC51_BASE+5)
+#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (V4L2_CID_CODEC_MFC51_BASE+6)
+#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (V4L2_CID_CODEC_MFC51_BASE+7)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (V4L2_CID_CODEC_MFC51_BASE+50)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (V4L2_CID_CODEC_MFC51_BASE+51)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (V4L2_CID_CODEC_MFC51_BASE+52)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_CODEC_MFC51_BASE+53)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_CODEC_MFC51_BASE+54)
/* Camera class control IDs */
@@ -912,6 +1012,13 @@ enum v4l2_auto_focus_range {
#define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32)
#define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33)
+#define V4L2_CID_CAMERA_ORIENTATION (V4L2_CID_CAMERA_CLASS_BASE+34)
+#define V4L2_CAMERA_ORIENTATION_FRONT 0
+#define V4L2_CAMERA_ORIENTATION_BACK 1
+#define V4L2_CAMERA_ORIENTATION_EXTERNAL 2
+
+#define V4L2_CID_CAMERA_SENSOR_ROTATION (V4L2_CID_CAMERA_CLASS_BASE+35)
+
/* FM Modulator class control IDs */
#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900)
@@ -1035,6 +1142,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
#define V4L2_CID_UNIT_CELL_SIZE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 8)
+#define V4L2_CID_NOTIFY_GAINS (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 9)
/* Image processing controls */
@@ -1128,4 +1236,1557 @@ enum v4l2_detect_md_mode {
#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3)
#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4)
+
+/* Stateless CODECs controls */
+#define V4L2_CID_CODEC_STATELESS_BASE (V4L2_CTRL_CLASS_CODEC_STATELESS | 0x900)
+#define V4L2_CID_CODEC_STATELESS_CLASS (V4L2_CTRL_CLASS_CODEC_STATELESS | 1)
+
+#define V4L2_CID_STATELESS_H264_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 0)
+/**
+ * enum v4l2_stateless_h264_decode_mode - Decoding mode
+ *
+ * @V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED: indicates that decoding
+ * is performed one slice at a time. In this mode,
+ * V4L2_CID_STATELESS_H264_SLICE_PARAMS must contain the parsed slice
+ * parameters and the OUTPUT buffer must contain a single slice.
+ * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF feature is used
+ * in order to support multislice frames.
+ * @V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED: indicates that
+ * decoding is performed per frame. The OUTPUT buffer must contain
+ * all slices and also both fields. This mode is typically supported
+ * by device drivers that are able to parse the slice(s) header(s)
+ * in hardware. When this mode is selected,
+ * V4L2_CID_STATELESS_H264_SLICE_PARAMS is not used.
+ */
+enum v4l2_stateless_h264_decode_mode {
+ V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
+ V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+};
+
+#define V4L2_CID_STATELESS_H264_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 1)
+/**
+ * enum v4l2_stateless_h264_start_code - Start code
+ *
+ * @V4L2_STATELESS_H264_START_CODE_NONE: slices are passed
+ * to the driver without any start code.
+ * @V4L2_STATELESS_H264_START_CODE_ANNEX_B: slices are passed
+ * to the driver with an Annex B start code prefix
+ * (legal start codes can be 3-bytes 0x000001 or 4-bytes 0x00000001).
+ * This mode is typically supported by device drivers that parse
+ * the start code in hardware.
+ */
+enum v4l2_stateless_h264_start_code {
+ V4L2_STATELESS_H264_START_CODE_NONE,
+ V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+};
+
+#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01
+#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02
+#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04
+#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08
+#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10
+#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20
+
+#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01
+#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02
+#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04
+#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08
+#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10
+#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20
+#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40
+
+#define V4L2_H264_SPS_HAS_CHROMA_FORMAT(sps) \
+ ((sps)->profile_idc == 100 || (sps)->profile_idc == 110 || \
+ (sps)->profile_idc == 122 || (sps)->profile_idc == 244 || \
+ (sps)->profile_idc == 44 || (sps)->profile_idc == 83 || \
+ (sps)->profile_idc == 86 || (sps)->profile_idc == 118 || \
+ (sps)->profile_idc == 128 || (sps)->profile_idc == 138 || \
+ (sps)->profile_idc == 139 || (sps)->profile_idc == 134 || \
+ (sps)->profile_idc == 135)
+
+#define V4L2_CID_STATELESS_H264_SPS (V4L2_CID_CODEC_STATELESS_BASE + 2)
+/**
+ * struct v4l2_ctrl_h264_sps - H264 sequence parameter set
+ *
+ * All the members on this sequence parameter set structure match the
+ * sequence parameter set syntax as specified by the H264 specification.
+ *
+ * @profile_idc: see H264 specification.
+ * @constraint_set_flags: see H264 specification.
+ * @level_idc: see H264 specification.
+ * @seq_parameter_set_id: see H264 specification.
+ * @chroma_format_idc: see H264 specification.
+ * @bit_depth_luma_minus8: see H264 specification.
+ * @bit_depth_chroma_minus8: see H264 specification.
+ * @log2_max_frame_num_minus4: see H264 specification.
+ * @pic_order_cnt_type: see H264 specification.
+ * @log2_max_pic_order_cnt_lsb_minus4: see H264 specification.
+ * @max_num_ref_frames: see H264 specification.
+ * @num_ref_frames_in_pic_order_cnt_cycle: see H264 specification.
+ * @offset_for_ref_frame: see H264 specification.
+ * @offset_for_non_ref_pic: see H264 specification.
+ * @offset_for_top_to_bottom_field: see H264 specification.
+ * @pic_width_in_mbs_minus1: see H264 specification.
+ * @pic_height_in_map_units_minus1: see H264 specification.
+ * @flags: see V4L2_H264_SPS_FLAG_{}.
+ */
+struct v4l2_ctrl_h264_sps {
+ __u8 profile_idc;
+ __u8 constraint_set_flags;
+ __u8 level_idc;
+ __u8 seq_parameter_set_id;
+ __u8 chroma_format_idc;
+ __u8 bit_depth_luma_minus8;
+ __u8 bit_depth_chroma_minus8;
+ __u8 log2_max_frame_num_minus4;
+ __u8 pic_order_cnt_type;
+ __u8 log2_max_pic_order_cnt_lsb_minus4;
+ __u8 max_num_ref_frames;
+ __u8 num_ref_frames_in_pic_order_cnt_cycle;
+ __s32 offset_for_ref_frame[255];
+ __s32 offset_for_non_ref_pic;
+ __s32 offset_for_top_to_bottom_field;
+ __u16 pic_width_in_mbs_minus1;
+ __u16 pic_height_in_map_units_minus1;
+ __u32 flags;
+};
+
+#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001
+#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002
+#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004
+#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008
+#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010
+#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020
+#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040
+#define V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT 0x0080
+
+#define V4L2_CID_STATELESS_H264_PPS (V4L2_CID_CODEC_STATELESS_BASE + 3)
+/**
+ * struct v4l2_ctrl_h264_pps - H264 picture parameter set
+ *
+ * Except where noted, all the members on this picture parameter set
+ * structure match the picture parameter set syntax as specified
+ * by the H264 specification.
+ *
+ * In particular, V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT flag
+ * has a specific meaning. This flag should be set if a non-flat
+ * scaling matrix applies to the picture. In this case, applications
+ * are expected to use V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ * to pass the values of the non-flat matrices.
+ *
+ * @pic_parameter_set_id: see H264 specification.
+ * @seq_parameter_set_id: see H264 specification.
+ * @num_slice_groups_minus1: see H264 specification.
+ * @num_ref_idx_l0_default_active_minus1: see H264 specification.
+ * @num_ref_idx_l1_default_active_minus1: see H264 specification.
+ * @weighted_bipred_idc: see H264 specification.
+ * @pic_init_qp_minus26: see H264 specification.
+ * @pic_init_qs_minus26: see H264 specification.
+ * @chroma_qp_index_offset: see H264 specification.
+ * @second_chroma_qp_index_offset: see H264 specification.
+ * @flags: see V4L2_H264_PPS_FLAG_{}.
+ */
+struct v4l2_ctrl_h264_pps {
+ __u8 pic_parameter_set_id;
+ __u8 seq_parameter_set_id;
+ __u8 num_slice_groups_minus1;
+ __u8 num_ref_idx_l0_default_active_minus1;
+ __u8 num_ref_idx_l1_default_active_minus1;
+ __u8 weighted_bipred_idc;
+ __s8 pic_init_qp_minus26;
+ __s8 pic_init_qs_minus26;
+ __s8 chroma_qp_index_offset;
+ __s8 second_chroma_qp_index_offset;
+ __u16 flags;
+};
+
+#define V4L2_CID_STATELESS_H264_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 4)
+/**
+ * struct v4l2_ctrl_h264_scaling_matrix - H264 scaling matrices
+ *
+ * @scaling_list_4x4: scaling matrix after applying the inverse
+ * scanning process. Expected list order is Intra Y, Intra Cb,
+ * Intra Cr, Inter Y, Inter Cb, Inter Cr. The values on each
+ * scaling list are expected in raster scan order.
+ * @scaling_list_8x8: scaling matrix after applying the inverse
+ * scanning process. Expected list order is Intra Y, Inter Y,
+ * Intra Cb, Inter Cb, Intra Cr, Inter Cr. The values on each
+ * scaling list are expected in raster scan order.
+ *
+ * Note that the list order is different for the 4x4 and 8x8
+ * matrices as per the H264 specification, see table 7-2 "Assignment
+ * of mnemonic names to scaling list indices and specification of
+ * fall-back rule".
+ */
+struct v4l2_ctrl_h264_scaling_matrix {
+ __u8 scaling_list_4x4[6][16];
+ __u8 scaling_list_8x8[6][64];
+};
+
+struct v4l2_h264_weight_factors {
+ __s16 luma_weight[32];
+ __s16 luma_offset[32];
+ __s16 chroma_weight[32][2];
+ __s16 chroma_offset[32][2];
+};
+
+#define V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(pps, slice) \
+ ((((pps)->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) && \
+ ((slice)->slice_type == V4L2_H264_SLICE_TYPE_P || \
+ (slice)->slice_type == V4L2_H264_SLICE_TYPE_SP)) || \
+ ((pps)->weighted_bipred_idc == 1 && \
+ (slice)->slice_type == V4L2_H264_SLICE_TYPE_B))
+
+#define V4L2_CID_STATELESS_H264_PRED_WEIGHTS (V4L2_CID_CODEC_STATELESS_BASE + 5)
+/**
+ * struct v4l2_ctrl_h264_pred_weights - Prediction weight table
+ *
+ * Prediction weight table, which matches the syntax specified
+ * by the H264 specification.
+ *
+ * @luma_log2_weight_denom: see H264 specification.
+ * @chroma_log2_weight_denom: see H264 specification.
+ * @weight_factors: luma and chroma weight factors.
+ */
+struct v4l2_ctrl_h264_pred_weights {
+ __u16 luma_log2_weight_denom;
+ __u16 chroma_log2_weight_denom;
+ struct v4l2_h264_weight_factors weight_factors[2];
+};
+
+#define V4L2_H264_SLICE_TYPE_P 0
+#define V4L2_H264_SLICE_TYPE_B 1
+#define V4L2_H264_SLICE_TYPE_I 2
+#define V4L2_H264_SLICE_TYPE_SP 3
+#define V4L2_H264_SLICE_TYPE_SI 4
+
+#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x01
+#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x02
+
+#define V4L2_H264_TOP_FIELD_REF 0x1
+#define V4L2_H264_BOTTOM_FIELD_REF 0x2
+#define V4L2_H264_FRAME_REF 0x3
+
+/**
+ * struct v4l2_h264_reference - H264 picture reference
+ *
+ * @fields: indicates how the picture is referenced.
+ * Valid values are V4L2_H264_{}_REF.
+ * @index: index into v4l2_ctrl_h264_decode_params.dpb[].
+ */
+struct v4l2_h264_reference {
+ __u8 fields;
+ __u8 index;
+};
+
+/*
+ * Maximum DPB size, as specified by section 'A.3.1 Level limits
+ * common to the Baseline, Main, and Extended profiles'.
+ */
+#define V4L2_H264_NUM_DPB_ENTRIES 16
+#define V4L2_H264_REF_LIST_LEN (2 * V4L2_H264_NUM_DPB_ENTRIES)
+
+#define V4L2_CID_STATELESS_H264_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 6)
+/**
+ * struct v4l2_ctrl_h264_slice_params - H264 slice parameters
+ *
+ * This structure holds the H264 syntax elements that are specified
+ * as non-invariant for the slices in a given frame.
+ *
+ * Slice invariant syntax elements are contained in struct
+ * v4l2_ctrl_h264_decode_params. This is done to reduce the API surface
+ * on frame-based decoders, where slice header parsing is done by the
+ * hardware.
+ *
+ * Slice invariant syntax elements are specified in specification section
+ * "7.4.3 Slice header semantics".
+ *
+ * Except where noted, the members on this struct match the slice header syntax.
+ *
+ * @header_bit_size: offset in bits to slice_data() from the beginning of this slice.
+ * @first_mb_in_slice: see H264 specification.
+ * @slice_type: see H264 specification.
+ * @colour_plane_id: see H264 specification.
+ * @redundant_pic_cnt: see H264 specification.
+ * @cabac_init_idc: see H264 specification.
+ * @slice_qp_delta: see H264 specification.
+ * @slice_qs_delta: see H264 specification.
+ * @disable_deblocking_filter_idc: see H264 specification.
+ * @slice_alpha_c0_offset_div2: see H264 specification.
+ * @slice_beta_offset_div2: see H264 specification.
+ * @num_ref_idx_l0_active_minus1: see H264 specification.
+ * @num_ref_idx_l1_active_minus1: see H264 specification.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @ref_pic_list0: reference picture list 0 after applying the per-slice modifications.
+ * @ref_pic_list1: reference picture list 1 after applying the per-slice modifications.
+ * @flags: see V4L2_H264_SLICE_FLAG_{}.
+ */
+struct v4l2_ctrl_h264_slice_params {
+ __u32 header_bit_size;
+ __u32 first_mb_in_slice;
+ __u8 slice_type;
+ __u8 colour_plane_id;
+ __u8 redundant_pic_cnt;
+ __u8 cabac_init_idc;
+ __s8 slice_qp_delta;
+ __s8 slice_qs_delta;
+ __u8 disable_deblocking_filter_idc;
+ __s8 slice_alpha_c0_offset_div2;
+ __s8 slice_beta_offset_div2;
+ __u8 num_ref_idx_l0_active_minus1;
+ __u8 num_ref_idx_l1_active_minus1;
+
+ __u8 reserved;
+
+ struct v4l2_h264_reference ref_pic_list0[V4L2_H264_REF_LIST_LEN];
+ struct v4l2_h264_reference ref_pic_list1[V4L2_H264_REF_LIST_LEN];
+
+ __u32 flags;
+};
+
+#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01
+#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02
+#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04
+#define V4L2_H264_DPB_ENTRY_FLAG_FIELD 0x08
+
+/**
+ * struct v4l2_h264_dpb_entry - H264 decoded picture buffer entry
+ *
+ * @reference_ts: timestamp of the V4L2 capture buffer to use as reference.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @pic_num: matches PicNum variable assigned during the reference
+ * picture lists construction process.
+ * @frame_num: frame identifier which matches frame_num syntax element.
+ * @fields: indicates how the DPB entry is referenced. Valid values are
+ * V4L2_H264_{}_REF.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @top_field_order_cnt: matches TopFieldOrderCnt picture value.
+ * @bottom_field_order_cnt: matches BottomFieldOrderCnt picture value.
+ * Note that picture field is indicated by v4l2_buffer.field.
+ * @flags: see V4L2_H264_DPB_ENTRY_FLAG_{}.
+ */
+struct v4l2_h264_dpb_entry {
+ __u64 reference_ts;
+ __u32 pic_num;
+ __u16 frame_num;
+ __u8 fields;
+ __u8 reserved[5];
+ __s32 top_field_order_cnt;
+ __s32 bottom_field_order_cnt;
+ __u32 flags;
+};
+
+#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01
+#define V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC 0x02
+#define V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD 0x04
+#define V4L2_H264_DECODE_PARAM_FLAG_PFRAME 0x08
+#define V4L2_H264_DECODE_PARAM_FLAG_BFRAME 0x10
+
+#define V4L2_CID_STATELESS_H264_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 7)
+/**
+ * struct v4l2_ctrl_h264_decode_params - H264 decoding parameters
+ *
+ * @dpb: decoded picture buffer.
+ * @nal_ref_idc: slice header syntax element.
+ * @frame_num: slice header syntax element.
+ * @top_field_order_cnt: matches TopFieldOrderCnt picture value.
+ * @bottom_field_order_cnt: matches BottomFieldOrderCnt picture value.
+ * Note that picture field is indicated by v4l2_buffer.field.
+ * @idr_pic_id: slice header syntax element.
+ * @pic_order_cnt_lsb: slice header syntax element.
+ * @delta_pic_order_cnt_bottom: slice header syntax element.
+ * @delta_pic_order_cnt0: slice header syntax element.
+ * @delta_pic_order_cnt1: slice header syntax element.
+ * @dec_ref_pic_marking_bit_size: size in bits of dec_ref_pic_marking()
+ * syntax element.
+ * @pic_order_cnt_bit_size: size in bits of pic order count syntax.
+ * @slice_group_change_cycle: slice header syntax element.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_H264_DECODE_PARAM_FLAG_{}.
+ */
+struct v4l2_ctrl_h264_decode_params {
+ struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES];
+ __u16 nal_ref_idc;
+ __u16 frame_num;
+ __s32 top_field_order_cnt;
+ __s32 bottom_field_order_cnt;
+ __u16 idr_pic_id;
+ __u16 pic_order_cnt_lsb;
+ __s32 delta_pic_order_cnt_bottom;
+ __s32 delta_pic_order_cnt0;
+ __s32 delta_pic_order_cnt1;
+ __u32 dec_ref_pic_marking_bit_size;
+ __u32 pic_order_cnt_bit_size;
+ __u32 slice_group_change_cycle;
+
+ __u32 reserved;
+ __u32 flags;
+};
+
+
+/* Stateless FWHT control, used by the vicodec driver */
+
+/* Current FWHT version */
+#define V4L2_FWHT_VERSION 3
+
+/* Set if this is an interlaced format */
+#define V4L2_FWHT_FL_IS_INTERLACED _BITUL(0)
+/* Set if this is a bottom-first (NTSC) interlaced format */
+#define V4L2_FWHT_FL_IS_BOTTOM_FIRST _BITUL(1)
+/* Set if each 'frame' contains just one field */
+#define V4L2_FWHT_FL_IS_ALTERNATE _BITUL(2)
+/*
+ * If V4L2_FWHT_FL_IS_ALTERNATE was set, then this is set if this
+ * 'frame' is the bottom field, else it is the top field.
+ */
+#define V4L2_FWHT_FL_IS_BOTTOM_FIELD _BITUL(3)
+/* Set if the Y' plane is uncompressed */
+#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED _BITUL(4)
+/* Set if the Cb plane is uncompressed */
+#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED _BITUL(5)
+/* Set if the Cr plane is uncompressed */
+#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED _BITUL(6)
+/* Set if the chroma plane is full height, if cleared it is half height */
+#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT _BITUL(7)
+/* Set if the chroma plane is full width, if cleared it is half width */
+#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH _BITUL(8)
+/* Set if the alpha plane is uncompressed */
+#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED _BITUL(9)
+/* Set if this is an I Frame */
+#define V4L2_FWHT_FL_I_FRAME _BITUL(10)
+
+/* A 4-values flag - the number of components - 1 */
+#define V4L2_FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16)
+#define V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET 16
+
+/* A 4-values flag - the pixel encoding type */
+#define V4L2_FWHT_FL_PIXENC_MSK GENMASK(20, 19)
+#define V4L2_FWHT_FL_PIXENC_OFFSET 19
+#define V4L2_FWHT_FL_PIXENC_YUV (1 << V4L2_FWHT_FL_PIXENC_OFFSET)
+#define V4L2_FWHT_FL_PIXENC_RGB (2 << V4L2_FWHT_FL_PIXENC_OFFSET)
+#define V4L2_FWHT_FL_PIXENC_HSV (3 << V4L2_FWHT_FL_PIXENC_OFFSET)
+
+#define V4L2_CID_STATELESS_FWHT_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 100)
+/**
+ * struct v4l2_ctrl_fwht_params - FWHT parameters
+ *
+ * @backward_ref_ts: timestamp of the V4L2 capture buffer to use as reference.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @version: must be V4L2_FWHT_VERSION.
+ * @width: width of frame.
+ * @height: height of frame.
+ * @flags: FWHT flags (see V4L2_FWHT_FL_*).
+ * @colorspace: the colorspace (enum v4l2_colorspace).
+ * @xfer_func: the transfer function (enum v4l2_xfer_func).
+ * @ycbcr_enc: the Y'CbCr encoding (enum v4l2_ycbcr_encoding).
+ * @quantization: the quantization (enum v4l2_quantization).
+ */
+struct v4l2_ctrl_fwht_params {
+ __u64 backward_ref_ts;
+ __u32 version;
+ __u32 width;
+ __u32 height;
+ __u32 flags;
+ __u32 colorspace;
+ __u32 xfer_func;
+ __u32 ycbcr_enc;
+ __u32 quantization;
+};
+
+/* Stateless VP8 control */
+
+#define V4L2_VP8_SEGMENT_FLAG_ENABLED 0x01
+#define V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP 0x02
+#define V4L2_VP8_SEGMENT_FLAG_UPDATE_FEATURE_DATA 0x04
+#define V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE 0x08
+
+/**
+ * struct v4l2_vp8_segment - VP8 segment-based adjustments parameters
+ *
+ * @quant_update: update values for the segment quantizer.
+ * @lf_update: update values for the loop filter level.
+ * @segment_probs: branch probabilities of the segment_id decoding tree.
+ * @padding: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_VP8_SEGMENT_FLAG_{}.
+ *
+ * This structure contains segment-based adjustments related parameters.
+ * See the 'update_segmentation()' part of the frame header syntax,
+ * and section '9.3. Segment-Based Adjustments' of the VP8 specification
+ * for more details.
+ */
+struct v4l2_vp8_segment {
+ __s8 quant_update[4];
+ __s8 lf_update[4];
+ __u8 segment_probs[3];
+ __u8 padding;
+ __u32 flags;
+};
+
+#define V4L2_VP8_LF_ADJ_ENABLE 0x01
+#define V4L2_VP8_LF_DELTA_UPDATE 0x02
+#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04
+
+/**
+ * struct v4l2_vp8_loop_filter - VP8 loop filter parameters
+ *
+ * @ref_frm_delta: Reference frame signed delta values.
+ * @mb_mode_delta: MB prediction mode signed delta values.
+ * @sharpness_level: matches sharpness_level syntax element.
+ * @level: matches loop_filter_level syntax element.
+ * @padding: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_VP8_LF_{}.
+ *
+ * This structure contains loop filter related parameters.
+ * See the 'mb_lf_adjustments()' part of the frame header syntax,
+ * and section '9.4. Loop Filter Type and Levels' of the VP8 specification
+ * for more details.
+ */
+struct v4l2_vp8_loop_filter {
+ __s8 ref_frm_delta[4];
+ __s8 mb_mode_delta[4];
+ __u8 sharpness_level;
+ __u8 level;
+ __u16 padding;
+ __u32 flags;
+};
+
+/**
+ * struct v4l2_vp8_quantization - VP8 quantizattion indices
+ *
+ * @y_ac_qi: luma AC coefficient table index.
+ * @y_dc_delta: luma DC delta vaue.
+ * @y2_dc_delta: y2 block DC delta value.
+ * @y2_ac_delta: y2 block AC delta value.
+ * @uv_dc_delta: chroma DC delta value.
+ * @uv_ac_delta: chroma AC delta value.
+ * @padding: padding field. Should be zeroed by applications.
+ *
+ * This structure contains the quantization indices present
+ * in 'quant_indices()' part of the frame header syntax.
+ * See section '9.6. Dequantization Indices' of the VP8 specification
+ * for more details.
+ */
+struct v4l2_vp8_quantization {
+ __u8 y_ac_qi;
+ __s8 y_dc_delta;
+ __s8 y2_dc_delta;
+ __s8 y2_ac_delta;
+ __s8 uv_dc_delta;
+ __s8 uv_ac_delta;
+ __u16 padding;
+};
+
+#define V4L2_VP8_COEFF_PROB_CNT 11
+#define V4L2_VP8_MV_PROB_CNT 19
+
+/**
+ * struct v4l2_vp8_entropy - VP8 update probabilities
+ *
+ * @coeff_probs: coefficient probability update values.
+ * @y_mode_probs: luma intra-prediction probabilities.
+ * @uv_mode_probs: chroma intra-prediction probabilities.
+ * @mv_probs: mv decoding probability.
+ * @padding: padding field. Should be zeroed by applications.
+ *
+ * This structure contains the update probabilities present in
+ * 'token_prob_update()' and 'mv_prob_update()' part of the frame header.
+ * See section '17.2. Probability Updates' of the VP8 specification
+ * for more details.
+ */
+struct v4l2_vp8_entropy {
+ __u8 coeff_probs[4][8][3][V4L2_VP8_COEFF_PROB_CNT];
+ __u8 y_mode_probs[4];
+ __u8 uv_mode_probs[3];
+ __u8 mv_probs[2][V4L2_VP8_MV_PROB_CNT];
+ __u8 padding[3];
+};
+
+/**
+ * struct v4l2_vp8_entropy_coder_state - VP8 boolean coder state
+ *
+ * @range: coder state value for "Range"
+ * @value: coder state value for "Value"
+ * @bit_count: number of bits left in range "Value".
+ * @padding: padding field. Should be zeroed by applications.
+ *
+ * This structure contains the state for the boolean coder, as
+ * explained in section '7. Boolean Entropy Decoder' of the VP8 specification.
+ */
+struct v4l2_vp8_entropy_coder_state {
+ __u8 range;
+ __u8 value;
+ __u8 bit_count;
+ __u8 padding;
+};
+
+#define V4L2_VP8_FRAME_FLAG_KEY_FRAME 0x01
+#define V4L2_VP8_FRAME_FLAG_EXPERIMENTAL 0x02
+#define V4L2_VP8_FRAME_FLAG_SHOW_FRAME 0x04
+#define V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF 0x08
+#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN 0x10
+#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT 0x20
+
+#define V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) \
+ (!!((hdr)->flags & V4L2_VP8_FRAME_FLAG_KEY_FRAME))
+
+#define V4L2_CID_STATELESS_VP8_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 200)
+/**
+ * struct v4l2_ctrl_vp8_frame - VP8 frame parameters
+ *
+ * @segment: segmentation parameters. See &v4l2_vp8_segment for more details
+ * @lf: loop filter parameters. See &v4l2_vp8_loop_filter for more details
+ * @quant: quantization parameters. See &v4l2_vp8_quantization for more details
+ * @entropy: update probabilities. See &v4l2_vp8_entropy for more details
+ * @coder_state: boolean coder state. See &v4l2_vp8_entropy_coder_state for more details
+ * @width: frame width.
+ * @height: frame height.
+ * @horizontal_scale: horizontal scaling factor.
+ * @vertical_scale: vertical scaling factor.
+ * @version: bitstream version.
+ * @prob_skip_false: frame header syntax element.
+ * @prob_intra: frame header syntax element.
+ * @prob_last: frame header syntax element.
+ * @prob_gf: frame header syntax element.
+ * @num_dct_parts: number of DCT coefficients partitions.
+ * @first_part_size: size of the first partition, i.e. the control partition.
+ * @first_part_header_bits: size in bits of the first partition header portion.
+ * @dct_part_sizes: DCT coefficients sizes.
+ * @last_frame_ts: "last" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @golden_frame_ts: "golden" reference buffer timestamp.
+ * @alt_frame_ts: "alt" reference buffer timestamp.
+ * @flags: see V4L2_VP8_FRAME_FLAG_{}.
+ */
+struct v4l2_ctrl_vp8_frame {
+ struct v4l2_vp8_segment segment;
+ struct v4l2_vp8_loop_filter lf;
+ struct v4l2_vp8_quantization quant;
+ struct v4l2_vp8_entropy entropy;
+ struct v4l2_vp8_entropy_coder_state coder_state;
+
+ __u16 width;
+ __u16 height;
+
+ __u8 horizontal_scale;
+ __u8 vertical_scale;
+
+ __u8 version;
+ __u8 prob_skip_false;
+ __u8 prob_intra;
+ __u8 prob_last;
+ __u8 prob_gf;
+ __u8 num_dct_parts;
+
+ __u32 first_part_size;
+ __u32 first_part_header_bits;
+ __u32 dct_part_sizes[8];
+
+ __u64 last_frame_ts;
+ __u64 golden_frame_ts;
+ __u64 alt_frame_ts;
+
+ __u64 flags;
+};
+
+/* Stateless MPEG-2 controls */
+
+#define V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE 0x01
+
+#define V4L2_CID_STATELESS_MPEG2_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE+220)
+/**
+ * struct v4l2_ctrl_mpeg2_sequence - MPEG-2 sequence header
+ *
+ * All the members on this structure match the sequence header and sequence
+ * extension syntaxes as specified by the MPEG-2 specification.
+ *
+ * Fields horizontal_size, vertical_size and vbv_buffer_size are a
+ * combination of respective _value and extension syntax elements,
+ * as described in section 6.3.3 "Sequence header".
+ *
+ * @horizontal_size: combination of elements horizontal_size_value and
+ * horizontal_size_extension.
+ * @vertical_size: combination of elements vertical_size_value and
+ * vertical_size_extension.
+ * @vbv_buffer_size: combination of elements vbv_buffer_size_value and
+ * vbv_buffer_size_extension.
+ * @profile_and_level_indication: see MPEG-2 specification.
+ * @chroma_format: see MPEG-2 specification.
+ * @flags: see V4L2_MPEG2_SEQ_FLAG_{}.
+ */
+struct v4l2_ctrl_mpeg2_sequence {
+ __u16 horizontal_size;
+ __u16 vertical_size;
+ __u32 vbv_buffer_size;
+ __u16 profile_and_level_indication;
+ __u8 chroma_format;
+ __u8 flags;
+};
+
+#define V4L2_MPEG2_PIC_CODING_TYPE_I 1
+#define V4L2_MPEG2_PIC_CODING_TYPE_P 2
+#define V4L2_MPEG2_PIC_CODING_TYPE_B 3
+#define V4L2_MPEG2_PIC_CODING_TYPE_D 4
+
+#define V4L2_MPEG2_PIC_TOP_FIELD 0x1
+#define V4L2_MPEG2_PIC_BOTTOM_FIELD 0x2
+#define V4L2_MPEG2_PIC_FRAME 0x3
+
+#define V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST 0x0001
+#define V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT 0x0002
+#define V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV 0x0004
+#define V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE 0x0008
+#define V4L2_MPEG2_PIC_FLAG_INTRA_VLC 0x0010
+#define V4L2_MPEG2_PIC_FLAG_ALT_SCAN 0x0020
+#define V4L2_MPEG2_PIC_FLAG_REPEAT_FIRST 0x0040
+#define V4L2_MPEG2_PIC_FLAG_PROGRESSIVE 0x0080
+
+#define V4L2_CID_STATELESS_MPEG2_PICTURE (V4L2_CID_CODEC_STATELESS_BASE+221)
+/**
+ * struct v4l2_ctrl_mpeg2_picture - MPEG-2 picture header
+ *
+ * All the members on this structure match the picture header and picture
+ * coding extension syntaxes as specified by the MPEG-2 specification.
+ *
+ * @backward_ref_ts: timestamp of the V4L2 capture buffer to use as
+ * reference for backward prediction.
+ * @forward_ref_ts: timestamp of the V4L2 capture buffer to use as
+ * reference for forward prediction. These timestamp refers to the
+ * timestamp field in struct v4l2_buffer. Use v4l2_timeval_to_ns()
+ * to convert the struct timeval to a __u64.
+ * @flags: see V4L2_MPEG2_PIC_FLAG_{}.
+ * @f_code: see MPEG-2 specification.
+ * @picture_coding_type: see MPEG-2 specification.
+ * @picture_structure: see V4L2_MPEG2_PIC_{}_FIELD.
+ * @intra_dc_precision: see MPEG-2 specification.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_mpeg2_picture {
+ __u64 backward_ref_ts;
+ __u64 forward_ref_ts;
+ __u32 flags;
+ __u8 f_code[2][2];
+ __u8 picture_coding_type;
+ __u8 picture_structure;
+ __u8 intra_dc_precision;
+ __u8 reserved[5];
+};
+
+#define V4L2_CID_STATELESS_MPEG2_QUANTISATION (V4L2_CID_CODEC_STATELESS_BASE+222)
+/**
+ * struct v4l2_ctrl_mpeg2_quantisation - MPEG-2 quantisation
+ *
+ * Quantisation matrices as specified by section 6.3.7
+ * "Quant matrix extension".
+ *
+ * @intra_quantiser_matrix: The quantisation matrix coefficients
+ * for intra-coded frames, in zigzag scanning order. It is relevant
+ * for both luma and chroma components, although it can be superseded
+ * by the chroma-specific matrix for non-4:2:0 YUV formats.
+ * @non_intra_quantiser_matrix: The quantisation matrix coefficients
+ * for non-intra-coded frames, in zigzag scanning order. It is relevant
+ * for both luma and chroma components, although it can be superseded
+ * by the chroma-specific matrix for non-4:2:0 YUV formats.
+ * @chroma_intra_quantiser_matrix: The quantisation matrix coefficients
+ * for the chominance component of intra-coded frames, in zigzag scanning
+ * order. Only relevant for 4:2:2 and 4:4:4 YUV formats.
+ * @chroma_non_intra_quantiser_matrix: The quantisation matrix coefficients
+ * for the chrominance component of non-intra-coded frames, in zigzag scanning
+ * order. Only relevant for 4:2:2 and 4:4:4 YUV formats.
+ */
+struct v4l2_ctrl_mpeg2_quantisation {
+ __u8 intra_quantiser_matrix[64];
+ __u8 non_intra_quantiser_matrix[64];
+ __u8 chroma_intra_quantiser_matrix[64];
+ __u8 chroma_non_intra_quantiser_matrix[64];
+};
+
+#define V4L2_CID_STATELESS_HEVC_SPS (V4L2_CID_CODEC_STATELESS_BASE + 400)
+#define V4L2_CID_STATELESS_HEVC_PPS (V4L2_CID_CODEC_STATELESS_BASE + 401)
+#define V4L2_CID_STATELESS_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 402)
+#define V4L2_CID_STATELESS_HEVC_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 403)
+#define V4L2_CID_STATELESS_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 404)
+#define V4L2_CID_STATELESS_HEVC_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 405)
+#define V4L2_CID_STATELESS_HEVC_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 406)
+#define V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS (V4L2_CID_CODEC_STATELESS_BASE + 407)
+
+enum v4l2_stateless_hevc_decode_mode {
+ V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+};
+
+enum v4l2_stateless_hevc_start_code {
+ V4L2_STATELESS_HEVC_START_CODE_NONE,
+ V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+};
+
+#define V4L2_HEVC_SLICE_TYPE_B 0
+#define V4L2_HEVC_SLICE_TYPE_P 1
+#define V4L2_HEVC_SLICE_TYPE_I 2
+
+#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0)
+#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1)
+#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3)
+#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4)
+#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5)
+#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6)
+#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7)
+#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8)
+
+/**
+ * struct v4l2_ctrl_hevc_sps - ITU-T Rec. H.265: Sequence parameter set
+ *
+ * @video_parameter_set_id: specifies the value of the
+ * vps_video_parameter_set_id of the active VPS
+ * @seq_parameter_set_id: provides an identifier for the SPS for
+ * reference by other syntax elements
+ * @pic_width_in_luma_samples: specifies the width of each decoded picture
+ * in units of luma samples
+ * @pic_height_in_luma_samples: specifies the height of each decoded picture
+ * in units of luma samples
+ * @bit_depth_luma_minus8: this value plus 8specifies the bit depth of the
+ * samples of the luma array
+ * @bit_depth_chroma_minus8: this value plus 8 specifies the bit depth of the
+ * samples of the chroma arrays
+ * @log2_max_pic_order_cnt_lsb_minus4: this value plus 4 specifies the value of
+ * the variable MaxPicOrderCntLsb
+ * @sps_max_dec_pic_buffering_minus1: this value plus 1 specifies the maximum
+ * required size of the decoded picture
+ * buffer for the codec video sequence
+ * @sps_max_num_reorder_pics: indicates the maximum allowed number of pictures
+ * @sps_max_latency_increase_plus1: not equal to 0 is used to compute the
+ * value of SpsMaxLatencyPictures array
+ * @log2_min_luma_coding_block_size_minus3: plus 3 specifies the minimum
+ * luma coding block size
+ * @log2_diff_max_min_luma_coding_block_size: specifies the difference between
+ * the maximum and minimum luma
+ * coding block size
+ * @log2_min_luma_transform_block_size_minus2: plus 2 specifies the minimum luma
+ * transform block size
+ * @log2_diff_max_min_luma_transform_block_size: specifies the difference between
+ * the maximum and minimum luma
+ * transform block size
+ * @max_transform_hierarchy_depth_inter: specifies the maximum hierarchy
+ * depth for transform units of
+ * coding units coded in inter
+ * prediction mode
+ * @max_transform_hierarchy_depth_intra: specifies the maximum hierarchy
+ * depth for transform units of
+ * coding units coded in intra
+ * prediction mode
+ * @pcm_sample_bit_depth_luma_minus1: this value plus 1 specifies the number of
+ * bits used to represent each of PCM sample
+ * values of the luma component
+ * @pcm_sample_bit_depth_chroma_minus1: this value plus 1 specifies the number
+ * of bits used to represent each of PCM
+ * sample values of the chroma components
+ * @log2_min_pcm_luma_coding_block_size_minus3: this value plus 3 specifies the
+ * minimum size of coding blocks
+ * @log2_diff_max_min_pcm_luma_coding_block_size: specifies the difference between
+ * the maximum and minimum size of
+ * coding blocks
+ * @num_short_term_ref_pic_sets: specifies the number of st_ref_pic_set()
+ * syntax structures included in the SPS
+ * @num_long_term_ref_pics_sps: specifies the number of candidate long-term
+ * reference pictures that are specified in the SPS
+ * @chroma_format_idc: specifies the chroma sampling
+ * @sps_max_sub_layers_minus1: this value plus 1 specifies the maximum number
+ * of temporal sub-layers
+ * @reserved: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_HEVC_SPS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_sps {
+ __u8 video_parameter_set_id;
+ __u8 seq_parameter_set_id;
+ __u16 pic_width_in_luma_samples;
+ __u16 pic_height_in_luma_samples;
+ __u8 bit_depth_luma_minus8;
+ __u8 bit_depth_chroma_minus8;
+ __u8 log2_max_pic_order_cnt_lsb_minus4;
+ __u8 sps_max_dec_pic_buffering_minus1;
+ __u8 sps_max_num_reorder_pics;
+ __u8 sps_max_latency_increase_plus1;
+ __u8 log2_min_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_luma_coding_block_size;
+ __u8 log2_min_luma_transform_block_size_minus2;
+ __u8 log2_diff_max_min_luma_transform_block_size;
+ __u8 max_transform_hierarchy_depth_inter;
+ __u8 max_transform_hierarchy_depth_intra;
+ __u8 pcm_sample_bit_depth_luma_minus1;
+ __u8 pcm_sample_bit_depth_chroma_minus1;
+ __u8 log2_min_pcm_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_pcm_luma_coding_block_size;
+ __u8 num_short_term_ref_pic_sets;
+ __u8 num_long_term_ref_pics_sps;
+ __u8 chroma_format_idc;
+ __u8 sps_max_sub_layers_minus1;
+
+ __u8 reserved[6];
+ __u64 flags;
+};
+
+#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0)
+#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
+#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
+#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
+#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4)
+#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5)
+#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6)
+#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9)
+#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10)
+#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11)
+#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12)
+#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13)
+#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15)
+#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
+#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
+#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19)
+#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20)
+
+/**
+ * struct v4l2_ctrl_hevc_pps - ITU-T Rec. H.265: Picture parameter set
+ *
+ * @pic_parameter_set_id: identifies the PPS for reference by other
+ * syntax elements
+ * @num_extra_slice_header_bits: specifies the number of extra slice header
+ * bits that are present in the slice header RBSP
+ * for coded pictures referring to the PPS.
+ * @num_ref_idx_l0_default_active_minus1: this value plus 1 specifies the
+ * inferred value of num_ref_idx_l0_active_minus1
+ * @num_ref_idx_l1_default_active_minus1: this value plus 1 specifies the
+ * inferred value of num_ref_idx_l1_active_minus1
+ * @init_qp_minus26: this value plus 26 specifies the initial value of SliceQp Y for
+ * each slice referring to the PPS
+ * @diff_cu_qp_delta_depth: specifies the difference between the luma coding
+ * tree block size and the minimum luma coding block
+ * size of coding units that convey cu_qp_delta_abs
+ * and cu_qp_delta_sign_flag
+ * @pps_cb_qp_offset: specify the offsets to the luma quantization parameter Cb
+ * @pps_cr_qp_offset: specify the offsets to the luma quantization parameter Cr
+ * @num_tile_columns_minus1: this value plus 1 specifies the number of tile columns
+ * partitioning the picture
+ * @num_tile_rows_minus1: this value plus 1 specifies the number of tile rows partitioning
+ * the picture
+ * @column_width_minus1: this value plus 1 specifies the width of the each tile column in
+ * units of coding tree blocks
+ * @row_height_minus1: this value plus 1 specifies the height of the each tile row in
+ * units of coding tree blocks
+ * @pps_beta_offset_div2: specify the default deblocking parameter offsets for
+ * beta divided by 2
+ * @pps_tc_offset_div2: specify the default deblocking parameter offsets for tC
+ * divided by 2
+ * @log2_parallel_merge_level_minus2: this value plus 2 specifies the value of
+ * the variable Log2ParMrgLevel
+ * @reserved: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_HEVC_PPS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_pps {
+ __u8 pic_parameter_set_id;
+ __u8 num_extra_slice_header_bits;
+ __u8 num_ref_idx_l0_default_active_minus1;
+ __u8 num_ref_idx_l1_default_active_minus1;
+ __s8 init_qp_minus26;
+ __u8 diff_cu_qp_delta_depth;
+ __s8 pps_cb_qp_offset;
+ __s8 pps_cr_qp_offset;
+ __u8 num_tile_columns_minus1;
+ __u8 num_tile_rows_minus1;
+ __u8 column_width_minus1[20];
+ __u8 row_height_minus1[22];
+ __s8 pps_beta_offset_div2;
+ __s8 pps_tc_offset_div2;
+ __u8 log2_parallel_merge_level_minus2;
+ __u8 reserved;
+ __u64 flags;
+};
+
+#define V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE 0x01
+
+#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME 0
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_FIELD 1
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_FIELD 2
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM 3
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP 4
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM_TOP 5
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM 6
+#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING 7
+#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING 8
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_PREVIOUS_BOTTOM 9
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_PREVIOUS_TOP 10
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_NEXT_BOTTOM 11
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_NEXT_TOP 12
+
+#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16
+
+/**
+ * struct v4l2_hevc_dpb_entry - HEVC decoded picture buffer entry
+ *
+ * @timestamp: timestamp of the V4L2 capture buffer to use as reference.
+ * @flags: long term flag for the reference frame
+ * @field_pic: whether the reference is a field picture or a frame.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @pic_order_cnt_val: the picture order count of the current picture.
+ */
+struct v4l2_hevc_dpb_entry {
+ __u64 timestamp;
+ __u8 flags;
+ __u8 field_pic;
+ __u16 reserved;
+ __s32 pic_order_cnt_val;
+};
+
+/**
+ * struct v4l2_hevc_pred_weight_table - HEVC weighted prediction parameters
+ *
+ * @delta_luma_weight_l0: the difference of the weighting factor applied
+ * to the luma prediction value for list 0
+ * @luma_offset_l0: the additive offset applied to the luma prediction value
+ * for list 0
+ * @delta_chroma_weight_l0: the difference of the weighting factor applied
+ * to the chroma prediction values for list 0
+ * @chroma_offset_l0: the difference of the additive offset applied to
+ * the chroma prediction values for list 0
+ * @delta_luma_weight_l1: the difference of the weighting factor applied
+ * to the luma prediction value for list 1
+ * @luma_offset_l1: the additive offset applied to the luma prediction value
+ * for list 1
+ * @delta_chroma_weight_l1: the difference of the weighting factor applied
+ * to the chroma prediction values for list 1
+ * @chroma_offset_l1: the difference of the additive offset applied to
+ * the chroma prediction values for list 1
+ * @luma_log2_weight_denom: the base 2 logarithm of the denominator for
+ * all luma weighting factors
+ * @delta_chroma_log2_weight_denom: the difference of the base 2 logarithm
+ * of the denominator for all chroma
+ * weighting factors
+ */
+struct v4l2_hevc_pred_weight_table {
+ __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __u8 luma_log2_weight_denom;
+ __s8 delta_chroma_log2_weight_denom;
+};
+
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9)
+
+/**
+ * struct v4l2_ctrl_hevc_slice_params - HEVC slice parameters
+ *
+ * This control is a dynamically sized 1-dimensional array,
+ * V4L2_CTRL_FLAG_DYNAMIC_ARRAY flag must be set when using it.
+ *
+ * @bit_size: size (in bits) of the current slice data
+ * @data_byte_offset: offset (in bytes) to the video data in the current slice data
+ * @num_entry_point_offsets: specifies the number of entry point offset syntax
+ * elements in the slice header.
+ * @nal_unit_type: specifies the coding type of the slice (B, P or I)
+ * @nuh_temporal_id_plus1: minus 1 specifies a temporal identifier for the NAL unit
+ * @slice_type: see V4L2_HEVC_SLICE_TYPE_{}
+ * @colour_plane_id: specifies the colour plane associated with the current slice
+ * @slice_pic_order_cnt: specifies the picture order count
+ * @num_ref_idx_l0_active_minus1: this value plus 1 specifies the maximum
+ * reference index for reference picture list 0
+ * that may be used to decode the slice
+ * @num_ref_idx_l1_active_minus1: this value plus 1 specifies the maximum
+ * reference index for reference picture list 1
+ * that may be used to decode the slice
+ * @collocated_ref_idx: specifies the reference index of the collocated picture used
+ * for temporal motion vector prediction
+ * @five_minus_max_num_merge_cand: specifies the maximum number of merging
+ * motion vector prediction candidates supported in
+ * the slice subtracted from 5
+ * @slice_qp_delta: specifies the initial value of QpY to be used for the coding
+ * blocks in the slice
+ * @slice_cb_qp_offset: specifies a difference to be added to the value of pps_cb_qp_offset
+ * @slice_cr_qp_offset: specifies a difference to be added to the value of pps_cr_qp_offset
+ * @slice_act_y_qp_offset: screen content extension parameters
+ * @slice_act_cb_qp_offset: screen content extension parameters
+ * @slice_act_cr_qp_offset: screen content extension parameters
+ * @slice_beta_offset_div2: specify the deblocking parameter offsets for beta divided by 2
+ * @slice_tc_offset_div2: specify the deblocking parameter offsets for tC divided by 2
+ * @pic_struct: indicates whether a picture should be displayed as a frame or as one or
+ * more fields
+ * @reserved0: padding field. Should be zeroed by applications.
+ * @slice_segment_addr: specifies the address of the first coding tree block in
+ * the slice segment
+ * @ref_idx_l0: the list of L0 reference elements as indices in the DPB
+ * @ref_idx_l1: the list of L1 reference elements as indices in the DPB
+ * @short_term_ref_pic_set_size: specifies the size of short-term reference
+ * pictures set included in the SPS
+ * @long_term_ref_pic_set_size: specifies the size of long-term reference
+ * pictures set include in the SPS
+ * @pred_weight_table: the prediction weight coefficients for inter-picture
+ * prediction
+ * @reserved1: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_HEVC_SLICE_PARAMS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_slice_params {
+ __u32 bit_size;
+ __u32 data_byte_offset;
+ __u32 num_entry_point_offsets;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
+ __u8 nal_unit_type;
+ __u8 nuh_temporal_id_plus1;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u8 slice_type;
+ __u8 colour_plane_id;
+ __s32 slice_pic_order_cnt;
+ __u8 num_ref_idx_l0_active_minus1;
+ __u8 num_ref_idx_l1_active_minus1;
+ __u8 collocated_ref_idx;
+ __u8 five_minus_max_num_merge_cand;
+ __s8 slice_qp_delta;
+ __s8 slice_cb_qp_offset;
+ __s8 slice_cr_qp_offset;
+ __s8 slice_act_y_qp_offset;
+ __s8 slice_act_cb_qp_offset;
+ __s8 slice_act_cr_qp_offset;
+ __s8 slice_beta_offset_div2;
+ __s8 slice_tc_offset_div2;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
+ __u8 pic_struct;
+
+ __u8 reserved0[3];
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u32 slice_segment_addr;
+ __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u16 short_term_ref_pic_set_size;
+ __u16 long_term_ref_pic_set_size;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
+ struct v4l2_hevc_pred_weight_table pred_weight_table;
+
+ __u8 reserved1[2];
+ __u64 flags;
+};
+
+#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1
+#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2
+#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4
+
+/**
+ * struct v4l2_ctrl_hevc_decode_params - HEVC decode parameters
+ *
+ * @pic_order_cnt_val: picture order count
+ * @short_term_ref_pic_set_size: specifies the size of short-term reference
+ * pictures set included in the SPS of the first slice
+ * @long_term_ref_pic_set_size: specifies the size of long-term reference
+ * pictures set include in the SPS of the first slice
+ * @num_active_dpb_entries: the number of entries in dpb
+ * @num_poc_st_curr_before: the number of reference pictures in the short-term
+ * set that come before the current frame
+ * @num_poc_st_curr_after: the number of reference pictures in the short-term
+ * set that come after the current frame
+ * @num_poc_lt_curr: the number of reference pictures in the long-term set
+ * @poc_st_curr_before: provides the index of the short term before references
+ * in DPB array
+ * @poc_st_curr_after: provides the index of the short term after references
+ * in DPB array
+ * @poc_lt_curr: provides the index of the long term references in DPB array
+ * @reserved: padding field. Should be zeroed by applications.
+ * @dpb: the decoded picture buffer, for meta-data about reference frames
+ * @flags: see V4L2_HEVC_DECODE_PARAM_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_decode_params {
+ __s32 pic_order_cnt_val;
+ __u16 short_term_ref_pic_set_size;
+ __u16 long_term_ref_pic_set_size;
+ __u8 num_active_dpb_entries;
+ __u8 num_poc_st_curr_before;
+ __u8 num_poc_st_curr_after;
+ __u8 num_poc_lt_curr;
+ __u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 reserved[4];
+ struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u64 flags;
+};
+
+/**
+ * struct v4l2_ctrl_hevc_scaling_matrix - HEVC scaling lists parameters
+ *
+ * @scaling_list_4x4: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_8x8: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_16x16: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_32x32: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_dc_coef_16x16: scaling list is used for the scaling process
+ * for transform coefficients. The values on each
+ * scaling list are expected in raster scan order.
+ * @scaling_list_dc_coef_32x32: scaling list is used for the scaling process
+ * for transform coefficients. The values on each
+ * scaling list are expected in raster scan order.
+ */
+struct v4l2_ctrl_hevc_scaling_matrix {
+ __u8 scaling_list_4x4[6][16];
+ __u8 scaling_list_8x8[6][64];
+ __u8 scaling_list_16x16[6][64];
+ __u8 scaling_list_32x32[2][64];
+ __u8 scaling_list_dc_coef_16x16[6];
+ __u8 scaling_list_dc_coef_32x32[2];
+};
+
+#define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900)
+#define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1)
+
+#define V4L2_CID_COLORIMETRY_HDR10_CLL_INFO (V4L2_CID_COLORIMETRY_CLASS_BASE + 0)
+
+struct v4l2_ctrl_hdr10_cll_info {
+ __u16 max_content_light_level;
+ __u16 max_pic_average_light_level;
+};
+
+#define V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY (V4L2_CID_COLORIMETRY_CLASS_BASE + 1)
+
+#define V4L2_HDR10_MASTERING_PRIMARIES_X_LOW 5
+#define V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH 37000
+#define V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW 5
+#define V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH 42000
+#define V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW 5
+#define V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH 37000
+#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW 5
+#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH 42000
+#define V4L2_HDR10_MASTERING_MAX_LUMA_LOW 50000
+#define V4L2_HDR10_MASTERING_MAX_LUMA_HIGH 100000000
+#define V4L2_HDR10_MASTERING_MIN_LUMA_LOW 1
+#define V4L2_HDR10_MASTERING_MIN_LUMA_HIGH 50000
+
+struct v4l2_ctrl_hdr10_mastering_display {
+ __u16 display_primaries_x[3];
+ __u16 display_primaries_y[3];
+ __u16 white_point_x;
+ __u16 white_point_y;
+ __u32 max_display_mastering_luminance;
+ __u32 min_display_mastering_luminance;
+};
+
+/* Stateless VP9 controls */
+
+#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
+#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
+
+/**
+ * struct v4l2_vp9_loop_filter - VP9 loop filter parameters
+ *
+ * @ref_deltas: contains the adjustment needed for the filter level based on the
+ * chosen reference frame. If this syntax element is not present in the bitstream,
+ * users should pass its last value.
+ * @mode_deltas: contains the adjustment needed for the filter level based on the
+ * chosen mode. If this syntax element is not present in the bitstream, users should
+ * pass its last value.
+ * @level: indicates the loop filter strength.
+ * @sharpness: indicates the sharpness level.
+ * @flags: combination of V4L2_VP9_LOOP_FILTER_FLAG_{} flags.
+ * @reserved: padding field. Should be zeroed by applications.
+ *
+ * This structure contains all loop filter related parameters. See sections
+ * '7.2.8 Loop filter semantics' of the VP9 specification for more details.
+ */
+struct v4l2_vp9_loop_filter {
+ __s8 ref_deltas[4];
+ __s8 mode_deltas[2];
+ __u8 level;
+ __u8 sharpness;
+ __u8 flags;
+ __u8 reserved[7];
+};
+
+/**
+ * struct v4l2_vp9_quantization - VP9 quantization parameters
+ *
+ * @base_q_idx: indicates the base frame qindex.
+ * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx.
+ * @delta_q_uv_dc: indicates the UV DC quantizer relative to base_q_idx.
+ * @delta_q_uv_ac: indicates the UV AC quantizer relative to base_q_idx.
+ * @reserved: padding field. Should be zeroed by applications.
+ *
+ * Encodes the quantization parameters. See section '7.2.9 Quantization params
+ * syntax' of the VP9 specification for more details.
+ */
+struct v4l2_vp9_quantization {
+ __u8 base_q_idx;
+ __s8 delta_q_y_dc;
+ __s8 delta_q_uv_dc;
+ __s8 delta_q_uv_ac;
+ __u8 reserved[4];
+};
+
+#define V4L2_VP9_SEGMENTATION_FLAG_ENABLED 0x01
+#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP 0x02
+#define V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x04
+#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA 0x08
+#define V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE 0x10
+
+#define V4L2_VP9_SEG_LVL_ALT_Q 0
+#define V4L2_VP9_SEG_LVL_ALT_L 1
+#define V4L2_VP9_SEG_LVL_REF_FRAME 2
+#define V4L2_VP9_SEG_LVL_SKIP 3
+#define V4L2_VP9_SEG_LVL_MAX 4
+
+#define V4L2_VP9_SEGMENT_FEATURE_ENABLED(id) (1 << (id))
+#define V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK 0xf
+
+/**
+ * struct v4l2_vp9_segmentation - VP9 segmentation parameters
+ *
+ * @feature_data: data attached to each feature. Data entry is only valid if
+ * the feature is enabled. The array shall be indexed with segment number as
+ * the first dimension (0..7) and one of V4L2_VP9_SEG_{} as the second dimension.
+ * @feature_enabled: bitmask defining which features are enabled in each segment.
+ * The value for each segment is a combination of V4L2_VP9_SEGMENT_FEATURE_ENABLED(id)
+ * values where id is one of V4L2_VP9_SEG_LVL_{}.
+ * @tree_probs: specifies the probability values to be used when decoding a
+ * Segment-ID. See '5.15. Segmentation map' section of the VP9 specification
+ * for more details.
+ * @pred_probs: specifies the probability values to be used when decoding a
+ * Predicted-Segment-ID. See '6.4.14. Get segment id syntax' section of :ref:`vp9`
+ * for more details.
+ * @flags: combination of V4L2_VP9_SEGMENTATION_FLAG_{} flags.
+ * @reserved: padding field. Should be zeroed by applications.
+ *
+ * Encodes the quantization parameters. See section '7.2.10 Segmentation params syntax' of
+ * the VP9 specification for more details.
+ */
+struct v4l2_vp9_segmentation {
+ __s16 feature_data[8][4];
+ __u8 feature_enabled[8];
+ __u8 tree_probs[7];
+ __u8 pred_probs[3];
+ __u8 flags;
+ __u8 reserved[5];
+};
+
+#define V4L2_VP9_FRAME_FLAG_KEY_FRAME 0x001
+#define V4L2_VP9_FRAME_FLAG_SHOW_FRAME 0x002
+#define V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT 0x004
+#define V4L2_VP9_FRAME_FLAG_INTRA_ONLY 0x008
+#define V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV 0x010
+#define V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX 0x020
+#define V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE 0x040
+#define V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING 0x080
+#define V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING 0x100
+#define V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING 0x200
+
+#define V4L2_VP9_SIGN_BIAS_LAST 0x1
+#define V4L2_VP9_SIGN_BIAS_GOLDEN 0x2
+#define V4L2_VP9_SIGN_BIAS_ALT 0x4
+
+#define V4L2_VP9_RESET_FRAME_CTX_NONE 0
+#define V4L2_VP9_RESET_FRAME_CTX_SPEC 1
+#define V4L2_VP9_RESET_FRAME_CTX_ALL 2
+
+#define V4L2_VP9_INTERP_FILTER_EIGHTTAP 0
+#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SMOOTH 1
+#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SHARP 2
+#define V4L2_VP9_INTERP_FILTER_BILINEAR 3
+#define V4L2_VP9_INTERP_FILTER_SWITCHABLE 4
+
+#define V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE 0
+#define V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE 1
+#define V4L2_VP9_REFERENCE_MODE_SELECT 2
+
+#define V4L2_VP9_PROFILE_MAX 3
+
+#define V4L2_CID_STATELESS_VP9_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 300)
+/**
+ * struct v4l2_ctrl_vp9_frame - VP9 frame decoding control
+ *
+ * @lf: loop filter parameters. See &v4l2_vp9_loop_filter for more details.
+ * @quant: quantization parameters. See &v4l2_vp9_quantization for more details.
+ * @seg: segmentation parameters. See &v4l2_vp9_segmentation for more details.
+ * @flags: combination of V4L2_VP9_FRAME_FLAG_{} flags.
+ * @compressed_header_size: compressed header size in bytes.
+ * @uncompressed_header_size: uncompressed header size in bytes.
+ * @frame_width_minus_1: add 1 to it and you'll get the frame width expressed in pixels.
+ * @frame_height_minus_1: add 1 to it and you'll get the frame height expressed in pixels.
+ * @render_width_minus_1: add 1 to it and you'll get the expected render width expressed in
+ * pixels. This is not used during the decoding process but might be used by HW scalers
+ * to prepare a frame that's ready for scanout.
+ * @render_height_minus_1: add 1 to it and you'll get the expected render height expressed in
+ * pixels. This is not used during the decoding process but might be used by HW scalers
+ * to prepare a frame that's ready for scanout.
+ * @last_frame_ts: "last" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @golden_frame_ts: "golden" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @alt_frame_ts: "alt" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @ref_frame_sign_bias: a bitfield specifying whether the sign bias is set for a given
+ * reference frame. Either of V4L2_VP9_SIGN_BIAS_{}.
+ * @reset_frame_context: specifies whether the frame context should be reset to default values.
+ * Either of V4L2_VP9_RESET_FRAME_CTX_{}.
+ * @frame_context_idx: frame context that should be used/updated.
+ * @profile: VP9 profile. Can be 0, 1, 2 or 3.
+ * @bit_depth: bits per components. Can be 8, 10 or 12. Note that not all profiles support
+ * 10 and/or 12 bits depths.
+ * @interpolation_filter: specifies the filter selection used for performing inter prediction.
+ * Set to one of V4L2_VP9_INTERP_FILTER_{}.
+ * @tile_cols_log2: specifies the base 2 logarithm of the width of each tile (where the width
+ * is measured in units of 8x8 blocks). Shall be less than or equal to 6.
+ * @tile_rows_log2: specifies the base 2 logarithm of the height of each tile (where the height
+ * is measured in units of 8x8 blocks).
+ * @reference_mode: specifies the type of inter prediction to be used.
+ * Set to one of V4L2_VP9_REFERENCE_MODE_{}.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_vp9_frame {
+ struct v4l2_vp9_loop_filter lf;
+ struct v4l2_vp9_quantization quant;
+ struct v4l2_vp9_segmentation seg;
+ __u32 flags;
+ __u16 compressed_header_size;
+ __u16 uncompressed_header_size;
+ __u16 frame_width_minus_1;
+ __u16 frame_height_minus_1;
+ __u16 render_width_minus_1;
+ __u16 render_height_minus_1;
+ __u64 last_frame_ts;
+ __u64 golden_frame_ts;
+ __u64 alt_frame_ts;
+ __u8 ref_frame_sign_bias;
+ __u8 reset_frame_context;
+ __u8 frame_context_idx;
+ __u8 profile;
+ __u8 bit_depth;
+ __u8 interpolation_filter;
+ __u8 tile_cols_log2;
+ __u8 tile_rows_log2;
+ __u8 reference_mode;
+ __u8 reserved[7];
+};
+
+#define V4L2_VP9_NUM_FRAME_CTX 4
+
+/**
+ * struct v4l2_vp9_mv_probs - VP9 Motion vector probability updates
+ * @joint: motion vector joint probability updates.
+ * @sign: motion vector sign probability updates.
+ * @classes: motion vector class probability updates.
+ * @class0_bit: motion vector class0 bit probability updates.
+ * @bits: motion vector bits probability updates.
+ * @class0_fr: motion vector class0 fractional bit probability updates.
+ * @fr: motion vector fractional bit probability updates.
+ * @class0_hp: motion vector class0 high precision fractional bit probability updates.
+ * @hp: motion vector high precision fractional bit probability updates.
+ *
+ * This structure contains new values of motion vector probabilities.
+ * A value of zero in an array element means there is no update of the relevant probability.
+ * See `struct v4l2_vp9_prob_updates` for details.
+ */
+struct v4l2_vp9_mv_probs {
+ __u8 joint[3];
+ __u8 sign[2];
+ __u8 classes[2][10];
+ __u8 class0_bit[2];
+ __u8 bits[2][10];
+ __u8 class0_fr[2][2][3];
+ __u8 fr[2][3];
+ __u8 class0_hp[2];
+ __u8 hp[2];
+};
+
+#define V4L2_CID_STATELESS_VP9_COMPRESSED_HDR (V4L2_CID_CODEC_STATELESS_BASE + 301)
+
+#define V4L2_VP9_TX_MODE_ONLY_4X4 0
+#define V4L2_VP9_TX_MODE_ALLOW_8X8 1
+#define V4L2_VP9_TX_MODE_ALLOW_16X16 2
+#define V4L2_VP9_TX_MODE_ALLOW_32X32 3
+#define V4L2_VP9_TX_MODE_SELECT 4
+
+/**
+ * struct v4l2_ctrl_vp9_compressed_hdr - VP9 probability updates control
+ * @tx_mode: specifies the TX mode. Set to one of V4L2_VP9_TX_MODE_{}.
+ * @tx8: TX 8x8 probability updates.
+ * @tx16: TX 16x16 probability updates.
+ * @tx32: TX 32x32 probability updates.
+ * @coef: coefficient probability updates.
+ * @skip: skip probability updates.
+ * @inter_mode: inter mode probability updates.
+ * @interp_filter: interpolation filter probability updates.
+ * @is_inter: is inter-block probability updates.
+ * @comp_mode: compound prediction mode probability updates.
+ * @single_ref: single ref probability updates.
+ * @comp_ref: compound ref probability updates.
+ * @y_mode: Y prediction mode probability updates.
+ * @uv_mode: UV prediction mode probability updates.
+ * @partition: partition probability updates.
+ * @mv: motion vector probability updates.
+ *
+ * This structure holds the probabilities update as parsed in the compressed
+ * header (Spec 6.3). These values represent the value of probability update after
+ * being translated with inv_map_table[] (see 6.3.5). A value of zero in an array element
+ * means that there is no update of the relevant probability.
+ *
+ * This control is optional and needs to be used when dealing with the hardware which is
+ * not capable of parsing the compressed header itself. Only drivers which need it will
+ * implement it.
+ */
+struct v4l2_ctrl_vp9_compressed_hdr {
+ __u8 tx_mode;
+ __u8 tx8[2][1];
+ __u8 tx16[2][2];
+ __u8 tx32[2][3];
+ __u8 coef[4][2][2][6][6][3];
+ __u8 skip[3];
+ __u8 inter_mode[7][3];
+ __u8 interp_filter[4][2];
+ __u8 is_inter[4];
+ __u8 comp_mode[5];
+ __u8 single_ref[5][2];
+ __u8 comp_ref[5];
+ __u8 y_mode[4][9];
+ __u8 uv_mode[10][9];
+ __u8 partition[16][3];
+
+ struct v4l2_vp9_mv_probs mv;
+};
+
+/* MPEG-compression definitions kept for backwards compatibility */
+#ifndef __KERNEL__
+#define V4L2_CTRL_CLASS_MPEG V4L2_CTRL_CLASS_CODEC
+#define V4L2_CID_MPEG_CLASS V4L2_CID_CODEC_CLASS
+#define V4L2_CID_MPEG_BASE V4L2_CID_CODEC_BASE
+#define V4L2_CID_MPEG_CX2341X_BASE V4L2_CID_CODEC_CX2341X_BASE
+#define V4L2_CID_MPEG_MFC51_BASE V4L2_CID_CODEC_MFC51_BASE
+#endif
+
#endif
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 123a231001a8..903e67b16711 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -16,6 +16,8 @@
#include <linux/types.h>
#include <linux/videodev2.h>
+#define V4L2_MBUS_FRAMEFMT_SET_CSC 0x0001
+
/**
* struct v4l2_mbus_framefmt - frame format on the media bus
* @width: image width
@@ -24,8 +26,11 @@
* @field: used interlacing type (from enum v4l2_field)
* @colorspace: colorspace of the data (from enum v4l2_colorspace)
* @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding)
+ * @hsv_enc: HSV encoding of the data (from enum v4l2_hsv_encoding)
* @quantization: quantization of the data (from enum v4l2_quantization)
* @xfer_func: transfer function of the data (from enum v4l2_xfer_func)
+ * @flags: flags (V4L2_MBUS_FRAMEFMT_*)
+ * @reserved: reserved bytes that can be later used
*/
struct v4l2_mbus_framefmt {
__u32 width;
@@ -33,10 +38,16 @@ struct v4l2_mbus_framefmt {
__u32 code;
__u32 field;
__u32 colorspace;
- __u16 ycbcr_enc;
+ union {
+ /* enum v4l2_ycbcr_encoding */
+ __u16 ycbcr_enc;
+ /* enum v4l2_hsv_encoding */
+ __u16 hsv_enc;
+ };
__u16 quantization;
__u16 xfer_func;
- __u16 reserved[11];
+ __u16 flags;
+ __u16 reserved[10];
};
#ifndef __KERNEL__
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index 03970ce30741..658106f5b5dc 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -44,6 +44,7 @@ enum v4l2_subdev_format_whence {
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @format: media bus format (format code and frame size)
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_format {
__u32 which;
@@ -57,6 +58,7 @@ struct v4l2_subdev_format {
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @rect: pad crop rectangle boundaries
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_crop {
__u32 which;
@@ -65,27 +67,41 @@ struct v4l2_subdev_crop {
__u32 reserved[8];
};
+#define V4L2_SUBDEV_MBUS_CODE_CSC_COLORSPACE 0x00000001
+#define V4L2_SUBDEV_MBUS_CODE_CSC_XFER_FUNC 0x00000002
+#define V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC 0x00000004
+#define V4L2_SUBDEV_MBUS_CODE_CSC_HSV_ENC V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC
+#define V4L2_SUBDEV_MBUS_CODE_CSC_QUANTIZATION 0x00000008
+
/**
* struct v4l2_subdev_mbus_code_enum - Media bus format enumeration
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
* @which: format type (from enum v4l2_subdev_format_whence)
+ * @flags: flags set by the driver, (V4L2_SUBDEV_MBUS_CODE_*)
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_mbus_code_enum {
__u32 pad;
__u32 index;
__u32 code;
__u32 which;
- __u32 reserved[8];
+ __u32 flags;
+ __u32 reserved[7];
};
/**
* struct v4l2_subdev_frame_size_enum - Media bus format enumeration
- * @pad: pad number, as reported by the media API
* @index: format index during enumeration
+ * @pad: pad number, as reported by the media API
* @code: format code (MEDIA_BUS_FMT_ definitions)
+ * @min_width: minimum frame width, in pixels
+ * @max_width: maximum frame width, in pixels
+ * @min_height: minimum frame height, in pixels
+ * @max_height: maximum frame height, in pixels
* @which: format type (from enum v4l2_subdev_format_whence)
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_size_enum {
__u32 index;
@@ -103,6 +119,7 @@ struct v4l2_subdev_frame_size_enum {
* struct v4l2_subdev_frame_interval - Pad-level frame rate
* @pad: pad number, as reported by the media API
* @interval: frame interval in seconds
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval {
__u32 pad;
@@ -119,6 +136,7 @@ struct v4l2_subdev_frame_interval {
* @height: frame height in pixels
* @interval: frame interval in seconds
* @which: format type (from enum v4l2_subdev_format_whence)
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval_enum {
__u32 index;
@@ -155,9 +173,25 @@ struct v4l2_subdev_selection {
__u32 reserved[8];
};
+/**
+ * struct v4l2_subdev_capability - subdev capabilities
+ * @version: the driver versioning number
+ * @capabilities: the subdev capabilities, see V4L2_SUBDEV_CAP_*
+ * @reserved: for future use, set to zero for now
+ */
+struct v4l2_subdev_capability {
+ __u32 version;
+ __u32 capabilities;
+ __u32 reserved[14];
+};
+
+/* The v4l2 sub-device video device node is registered in read-only mode. */
+#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001
+
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
+#define VIDIOC_SUBDEV_QUERYCAP _IOR('V', 0, struct v4l2_subdev_capability)
#define VIDIOC_SUBDEV_G_FMT _IOWR('V', 4, struct v4l2_subdev_format)
#define VIDIOC_SUBDEV_S_FMT _IOWR('V', 5, struct v4l2_subdev_format)
#define VIDIOC_SUBDEV_G_FRAME_INTERVAL _IOWR('V', 21, struct v4l2_subdev_frame_interval)
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index c27289fd619a..f8a8d6b3c521 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -63,6 +63,7 @@ enum vmmdev_request_type {
VMMDEVREQ_SET_GUEST_CAPABILITIES = 56,
VMMDEVREQ_VIDEMODE_SUPPORTED2 = 57, /* since version 3.2.0 */
VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX = 80, /* since version 4.2.4 */
+ VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI = 81,
VMMDEVREQ_HGCM_CONNECT = 60,
VMMDEVREQ_HGCM_DISCONNECT = 61,
VMMDEVREQ_HGCM_CALL32 = 62,
@@ -92,6 +93,8 @@ enum vmmdev_request_type {
VMMDEVREQ_WRITE_COREDUMP = 218,
VMMDEVREQ_GUEST_HEARTBEAT = 219,
VMMDEVREQ_HEARTBEAT_CONFIGURE = 220,
+ VMMDEVREQ_NT_BUG_CHECK = 221,
+ VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS = 222,
/* Ensure the enum is a 32 bit data-type */
VMMDEVREQ_SIZEHACK = 0x7fffffff
};
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
index 9cec58a6a5ea..15125f6ec60d 100644
--- a/include/uapi/linux/vboxguest.h
+++ b/include/uapi/linux/vboxguest.h
@@ -103,7 +103,7 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20);
/* IOCTL to perform a VMM Device request larger then 1KB. */
-#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
+#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IO('V', 3)
/** VBG_IOCTL_HGCM_CONNECT data structure. */
@@ -198,7 +198,7 @@ struct vbg_ioctl_log {
} u;
};
-#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
+#define VBG_IOCTL_LOG(s) _IO('V', 9)
/** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */
@@ -257,6 +257,30 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8);
_IOWR('V', 12, struct vbg_ioctl_change_filter)
+/** VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES data structure. */
+struct vbg_ioctl_acquire_guest_caps {
+ /** The header. */
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ /** Flags (VBGL_IOC_AGC_FLAGS_XXX). */
+ __u32 flags;
+ /** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */
+ __u32 or_mask;
+ /** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */
+ __u32 not_mask;
+ } in;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_acquire_guest_caps, 24 + 12);
+
+#define VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE 0x00000001
+#define VBGL_IOC_AGC_FLAGS_VALID_MASK 0x00000001
+
+#define VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES \
+ _IOWR('V', 13, struct vbg_ioctl_acquire_guest_caps)
+
+
/** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */
struct vbg_ioctl_set_guest_caps {
/** The header. */
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
new file mode 100644
index 000000000000..9bd79235c875
--- /dev/null
+++ b/include/uapi/linux/vdpa.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * vdpa device management interface
+ * Copyright (c) 2020 Mellanox Technologies Ltd. All rights reserved.
+ */
+
+#ifndef _UAPI_LINUX_VDPA_H_
+#define _UAPI_LINUX_VDPA_H_
+
+#define VDPA_GENL_NAME "vdpa"
+#define VDPA_GENL_VERSION 0x1
+
+enum vdpa_command {
+ VDPA_CMD_UNSPEC,
+ VDPA_CMD_MGMTDEV_NEW,
+ VDPA_CMD_MGMTDEV_GET, /* can dump */
+ VDPA_CMD_DEV_NEW,
+ VDPA_CMD_DEV_DEL,
+ VDPA_CMD_DEV_GET, /* can dump */
+ VDPA_CMD_DEV_CONFIG_GET, /* can dump */
+ VDPA_CMD_DEV_VSTATS_GET,
+};
+
+enum vdpa_attr {
+ VDPA_ATTR_UNSPEC,
+
+ /* Pad attribute for 64b alignment */
+ VDPA_ATTR_PAD = VDPA_ATTR_UNSPEC,
+
+ /* bus name (optional) + dev name together make the parent device handle */
+ VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */
+ VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */
+ VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, /* u64 */
+
+ VDPA_ATTR_DEV_NAME, /* string */
+ VDPA_ATTR_DEV_ID, /* u32 */
+ VDPA_ATTR_DEV_VENDOR_ID, /* u32 */
+ VDPA_ATTR_DEV_MAX_VQS, /* u32 */
+ VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */
+ VDPA_ATTR_DEV_MIN_VQ_SIZE, /* u16 */
+
+ VDPA_ATTR_DEV_NET_CFG_MACADDR, /* binary */
+ VDPA_ATTR_DEV_NET_STATUS, /* u8 */
+ VDPA_ATTR_DEV_NET_CFG_MAX_VQP, /* u16 */
+ VDPA_ATTR_DEV_NET_CFG_MTU, /* u16 */
+
+ VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */
+ VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */
+ /* virtio features that are supported by the vDPA management device */
+ VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */
+
+ VDPA_ATTR_DEV_QUEUE_INDEX, /* u32 */
+ VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */
+ VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */
+
+ VDPA_ATTR_DEV_FEATURES, /* u64 */
+
+ /* virtio features that are supported by the vDPA device */
+ VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, /* u64 */
+
+ /* new attributes must be added above here */
+ VDPA_ATTR_MAX,
+};
+
+#endif
diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h
new file mode 100644
index 000000000000..11bd48c72c6c
--- /dev/null
+++ b/include/uapi/linux/vduse.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_VDUSE_H_
+#define _UAPI_VDUSE_H_
+
+#include <linux/types.h>
+
+#define VDUSE_BASE 0x81
+
+/* The ioctls for control device (/dev/vduse/control) */
+
+#define VDUSE_API_VERSION 0
+
+/*
+ * Get the version of VDUSE API that kernel supported (VDUSE_API_VERSION).
+ * This is used for future extension.
+ */
+#define VDUSE_GET_API_VERSION _IOR(VDUSE_BASE, 0x00, __u64)
+
+/* Set the version of VDUSE API that userspace supported. */
+#define VDUSE_SET_API_VERSION _IOW(VDUSE_BASE, 0x01, __u64)
+
+/**
+ * struct vduse_dev_config - basic configuration of a VDUSE device
+ * @name: VDUSE device name, needs to be NUL terminated
+ * @vendor_id: virtio vendor id
+ * @device_id: virtio device id
+ * @features: virtio features
+ * @vq_num: the number of virtqueues
+ * @vq_align: the allocation alignment of virtqueue's metadata
+ * @reserved: for future use, needs to be initialized to zero
+ * @config_size: the size of the configuration space
+ * @config: the buffer of the configuration space
+ *
+ * Structure used by VDUSE_CREATE_DEV ioctl to create VDUSE device.
+ */
+struct vduse_dev_config {
+#define VDUSE_NAME_MAX 256
+ char name[VDUSE_NAME_MAX];
+ __u32 vendor_id;
+ __u32 device_id;
+ __u64 features;
+ __u32 vq_num;
+ __u32 vq_align;
+ __u32 reserved[13];
+ __u32 config_size;
+ __u8 config[];
+};
+
+/* Create a VDUSE device which is represented by a char device (/dev/vduse/$NAME) */
+#define VDUSE_CREATE_DEV _IOW(VDUSE_BASE, 0x02, struct vduse_dev_config)
+
+/*
+ * Destroy a VDUSE device. Make sure there are no more references
+ * to the char device (/dev/vduse/$NAME).
+ */
+#define VDUSE_DESTROY_DEV _IOW(VDUSE_BASE, 0x03, char[VDUSE_NAME_MAX])
+
+/* The ioctls for VDUSE device (/dev/vduse/$NAME) */
+
+/**
+ * struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last]
+ * @offset: the mmap offset on returned file descriptor
+ * @start: start of the IOVA region
+ * @last: last of the IOVA region
+ * @perm: access permission of the IOVA region
+ *
+ * Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region.
+ */
+struct vduse_iotlb_entry {
+ __u64 offset;
+ __u64 start;
+ __u64 last;
+#define VDUSE_ACCESS_RO 0x1
+#define VDUSE_ACCESS_WO 0x2
+#define VDUSE_ACCESS_RW 0x3
+ __u8 perm;
+};
+
+/*
+ * Find the first IOVA region that overlaps with the range [start, last]
+ * and return the corresponding file descriptor. Return -EINVAL means the
+ * IOVA region doesn't exist. Caller should set start and last fields.
+ */
+#define VDUSE_IOTLB_GET_FD _IOWR(VDUSE_BASE, 0x10, struct vduse_iotlb_entry)
+
+/*
+ * Get the negotiated virtio features. It's a subset of the features in
+ * struct vduse_dev_config which can be accepted by virtio driver. It's
+ * only valid after FEATURES_OK status bit is set.
+ */
+#define VDUSE_DEV_GET_FEATURES _IOR(VDUSE_BASE, 0x11, __u64)
+
+/**
+ * struct vduse_config_data - data used to update configuration space
+ * @offset: the offset from the beginning of configuration space
+ * @length: the length to write to configuration space
+ * @buffer: the buffer used to write from
+ *
+ * Structure used by VDUSE_DEV_SET_CONFIG ioctl to update device
+ * configuration space.
+ */
+struct vduse_config_data {
+ __u32 offset;
+ __u32 length;
+ __u8 buffer[];
+};
+
+/* Set device configuration space */
+#define VDUSE_DEV_SET_CONFIG _IOW(VDUSE_BASE, 0x12, struct vduse_config_data)
+
+/*
+ * Inject a config interrupt. It's usually used to notify virtio driver
+ * that device configuration space has changed.
+ */
+#define VDUSE_DEV_INJECT_CONFIG_IRQ _IO(VDUSE_BASE, 0x13)
+
+/**
+ * struct vduse_vq_config - basic configuration of a virtqueue
+ * @index: virtqueue index
+ * @max_size: the max size of virtqueue
+ * @reserved: for future use, needs to be initialized to zero
+ *
+ * Structure used by VDUSE_VQ_SETUP ioctl to setup a virtqueue.
+ */
+struct vduse_vq_config {
+ __u32 index;
+ __u16 max_size;
+ __u16 reserved[13];
+};
+
+/*
+ * Setup the specified virtqueue. Make sure all virtqueues have been
+ * configured before the device is attached to vDPA bus.
+ */
+#define VDUSE_VQ_SETUP _IOW(VDUSE_BASE, 0x14, struct vduse_vq_config)
+
+/**
+ * struct vduse_vq_state_split - split virtqueue state
+ * @avail_index: available index
+ */
+struct vduse_vq_state_split {
+ __u16 avail_index;
+};
+
+/**
+ * struct vduse_vq_state_packed - packed virtqueue state
+ * @last_avail_counter: last driver ring wrap counter observed by device
+ * @last_avail_idx: device available index
+ * @last_used_counter: device ring wrap counter
+ * @last_used_idx: used index
+ */
+struct vduse_vq_state_packed {
+ __u16 last_avail_counter;
+ __u16 last_avail_idx;
+ __u16 last_used_counter;
+ __u16 last_used_idx;
+};
+
+/**
+ * struct vduse_vq_info - information of a virtqueue
+ * @index: virtqueue index
+ * @num: the size of virtqueue
+ * @desc_addr: address of desc area
+ * @driver_addr: address of driver area
+ * @device_addr: address of device area
+ * @split: split virtqueue state
+ * @packed: packed virtqueue state
+ * @ready: ready status of virtqueue
+ *
+ * Structure used by VDUSE_VQ_GET_INFO ioctl to get virtqueue's information.
+ */
+struct vduse_vq_info {
+ __u32 index;
+ __u32 num;
+ __u64 desc_addr;
+ __u64 driver_addr;
+ __u64 device_addr;
+ union {
+ struct vduse_vq_state_split split;
+ struct vduse_vq_state_packed packed;
+ };
+ __u8 ready;
+};
+
+/* Get the specified virtqueue's information. Caller should set index field. */
+#define VDUSE_VQ_GET_INFO _IOWR(VDUSE_BASE, 0x15, struct vduse_vq_info)
+
+/**
+ * struct vduse_vq_eventfd - eventfd configuration for a virtqueue
+ * @index: virtqueue index
+ * @fd: eventfd, -1 means de-assigning the eventfd
+ *
+ * Structure used by VDUSE_VQ_SETUP_KICKFD ioctl to setup kick eventfd.
+ */
+struct vduse_vq_eventfd {
+ __u32 index;
+#define VDUSE_EVENTFD_DEASSIGN -1
+ int fd;
+};
+
+/*
+ * Setup kick eventfd for specified virtqueue. The kick eventfd is used
+ * by VDUSE kernel module to notify userspace to consume the avail vring.
+ */
+#define VDUSE_VQ_SETUP_KICKFD _IOW(VDUSE_BASE, 0x16, struct vduse_vq_eventfd)
+
+/*
+ * Inject an interrupt for specific virtqueue. It's used to notify virtio driver
+ * to consume the used vring.
+ */
+#define VDUSE_VQ_INJECT_IRQ _IOW(VDUSE_BASE, 0x17, __u32)
+
+/**
+ * struct vduse_iova_umem - userspace memory configuration for one IOVA region
+ * @uaddr: start address of userspace memory, it must be aligned to page size
+ * @iova: start of the IOVA region
+ * @size: size of the IOVA region
+ * @reserved: for future use, needs to be initialized to zero
+ *
+ * Structure used by VDUSE_IOTLB_REG_UMEM and VDUSE_IOTLB_DEREG_UMEM
+ * ioctls to register/de-register userspace memory for IOVA regions
+ */
+struct vduse_iova_umem {
+ __u64 uaddr;
+ __u64 iova;
+ __u64 size;
+ __u64 reserved[3];
+};
+
+/* Register userspace memory for IOVA regions */
+#define VDUSE_IOTLB_REG_UMEM _IOW(VDUSE_BASE, 0x18, struct vduse_iova_umem)
+
+/* De-register the userspace memory. Caller should set iova and size field. */
+#define VDUSE_IOTLB_DEREG_UMEM _IOW(VDUSE_BASE, 0x19, struct vduse_iova_umem)
+
+/**
+ * struct vduse_iova_info - information of one IOVA region
+ * @start: start of the IOVA region
+ * @last: last of the IOVA region
+ * @capability: capability of the IOVA regsion
+ * @reserved: for future use, needs to be initialized to zero
+ *
+ * Structure used by VDUSE_IOTLB_GET_INFO ioctl to get information of
+ * one IOVA region.
+ */
+struct vduse_iova_info {
+ __u64 start;
+ __u64 last;
+#define VDUSE_IOVA_CAP_UMEM (1 << 0)
+ __u64 capability;
+ __u64 reserved[3];
+};
+
+/*
+ * Find the first IOVA region that overlaps with the range [start, last]
+ * and return some information on it. Caller should set start and last fields.
+ */
+#define VDUSE_IOTLB_GET_INFO _IOWR(VDUSE_BASE, 0x1a, struct vduse_iova_info)
+
+/* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */
+
+/**
+ * enum vduse_req_type - request type
+ * @VDUSE_GET_VQ_STATE: get the state for specified virtqueue from userspace
+ * @VDUSE_SET_STATUS: set the device status
+ * @VDUSE_UPDATE_IOTLB: Notify userspace to update the memory mapping for
+ * specified IOVA range via VDUSE_IOTLB_GET_FD ioctl
+ */
+enum vduse_req_type {
+ VDUSE_GET_VQ_STATE,
+ VDUSE_SET_STATUS,
+ VDUSE_UPDATE_IOTLB,
+};
+
+/**
+ * struct vduse_vq_state - virtqueue state
+ * @index: virtqueue index
+ * @split: split virtqueue state
+ * @packed: packed virtqueue state
+ */
+struct vduse_vq_state {
+ __u32 index;
+ union {
+ struct vduse_vq_state_split split;
+ struct vduse_vq_state_packed packed;
+ };
+};
+
+/**
+ * struct vduse_dev_status - device status
+ * @status: device status
+ */
+struct vduse_dev_status {
+ __u8 status;
+};
+
+/**
+ * struct vduse_iova_range - IOVA range [start, last]
+ * @start: start of the IOVA range
+ * @last: last of the IOVA range
+ */
+struct vduse_iova_range {
+ __u64 start;
+ __u64 last;
+};
+
+/**
+ * struct vduse_dev_request - control request
+ * @type: request type
+ * @request_id: request id
+ * @reserved: for future use
+ * @vq_state: virtqueue state, only index field is available
+ * @s: device status
+ * @iova: IOVA range for updating
+ * @padding: padding
+ *
+ * Structure used by read(2) on /dev/vduse/$NAME.
+ */
+struct vduse_dev_request {
+ __u32 type;
+ __u32 request_id;
+ __u32 reserved[4];
+ union {
+ struct vduse_vq_state vq_state;
+ struct vduse_dev_status s;
+ struct vduse_iova_range iova;
+ __u32 padding[32];
+ };
+};
+
+/**
+ * struct vduse_dev_response - response to control request
+ * @request_id: corresponding request id
+ * @result: the result of request
+ * @reserved: for future use, needs to be initialized to zero
+ * @vq_state: virtqueue state
+ * @padding: padding
+ *
+ * Structure used by write(2) on /dev/vduse/$NAME.
+ */
+struct vduse_dev_response {
+ __u32 request_id;
+#define VDUSE_REQ_RESULT_OK 0x00
+#define VDUSE_REQ_RESULT_FAILED 0x01
+ __u32 result;
+ __u32 reserved[4];
+ union {
+ struct vduse_vq_state vq_state;
+ __u32 padding[32];
+ };
+};
+
+#endif /* _UAPI_VDUSE_H_ */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 9e843a147ead..d7d8e0922376 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -46,6 +46,12 @@
*/
#define VFIO_NOIOMMU_IOMMU 8
+/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
+#define VFIO_UNMAP_ALL 9
+
+/* Supports the vaddr flag for DMA map and unmap */
+#define VFIO_UPDATE_VADDR 10
+
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
@@ -201,8 +207,11 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
+#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6) /* vfio-fsl-mc device */
+#define VFIO_DEVICE_FLAGS_CAPS (1 << 7) /* Info supports caps */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
+ __u32 cap_offset; /* Offset within info struct of first cap */
};
#define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
@@ -218,6 +227,15 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
#define VFIO_DEVICE_API_AP_STRING "vfio-ap"
+/*
+ * The following capabilities are unique to s390 zPCI devices. Their contents
+ * are further-defined in vfio_zdev.h
+ */
+#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE 1
+#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP 2
+#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL 3
+#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP 4
+
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
* struct vfio_region_info)
@@ -305,6 +323,7 @@ struct vfio_region_info_cap_type {
#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
#define VFIO_REGION_TYPE_GFX (1)
#define VFIO_REGION_TYPE_CCW (2)
+#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED (3)
/* sub-types for VFIO_REGION_TYPE_PCI_* */
@@ -316,6 +335,8 @@ struct vfio_region_info_cap_type {
/* 10de vendor PCI sub-types */
/*
* NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
+ *
+ * Deprecated, region no longer provided
*/
#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
@@ -323,6 +344,8 @@ struct vfio_region_info_cap_type {
/*
* IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
* to do TLB invalidation on a GPU.
+ *
+ * Deprecated, region no longer provided
*/
#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
@@ -378,6 +401,39 @@ struct vfio_region_gfx_edid {
/* sub-types for VFIO_REGION_TYPE_CCW */
#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1)
+#define VFIO_REGION_SUBTYPE_CCW_SCHIB (2)
+#define VFIO_REGION_SUBTYPE_CCW_CRW (3)
+
+/* sub-types for VFIO_REGION_TYPE_MIGRATION */
+#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1)
+
+struct vfio_device_migration_info {
+ __u32 device_state; /* VFIO device state */
+#define VFIO_DEVICE_STATE_V1_STOP (0)
+#define VFIO_DEVICE_STATE_V1_RUNNING (1 << 0)
+#define VFIO_DEVICE_STATE_V1_SAVING (1 << 1)
+#define VFIO_DEVICE_STATE_V1_RESUMING (1 << 2)
+#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_V1_RUNNING | \
+ VFIO_DEVICE_STATE_V1_SAVING | \
+ VFIO_DEVICE_STATE_V1_RESUMING)
+
+#define VFIO_DEVICE_STATE_VALID(state) \
+ (state & VFIO_DEVICE_STATE_V1_RESUMING ? \
+ (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1)
+
+#define VFIO_DEVICE_STATE_IS_ERROR(state) \
+ ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \
+ VFIO_DEVICE_STATE_V1_RESUMING))
+
+#define VFIO_DEVICE_STATE_SET_ERROR(state) \
+ ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \
+ VFIO_DEVICE_STATE_V1_RESUMING)
+
+ __u32 reserved;
+ __u64 pending_bytes;
+ __u64 data_offset;
+ __u64 data_size;
+};
/*
* The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
@@ -393,6 +449,8 @@ struct vfio_region_gfx_edid {
* Capability with compressed real address (aka SSA - small system address)
* where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
* and by the userspace to associate a NVLink bridge with a GPU.
+ *
+ * Deprecated, capability no longer provided
*/
#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4
@@ -407,6 +465,8 @@ struct vfio_region_info_cap_nvlink2_ssatgt {
* property in the device tree. The value is fixed in the hardware
* and failing to provide the correct value results in the link
* not working with no indication from the driver why.
+ *
+ * Deprecated, capability no longer provided
*/
#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5
@@ -577,11 +637,13 @@ enum {
enum {
VFIO_CCW_IO_IRQ_INDEX,
+ VFIO_CCW_CRW_IRQ_INDEX,
+ VFIO_CCW_REQ_IRQ_INDEX,
VFIO_CCW_NUM_IRQS
};
/**
- * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
+ * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
*
* Return: 0 on success, -errno on failure:
@@ -707,6 +769,365 @@ struct vfio_device_ioeventfd {
#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
+/**
+ * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
+ * struct vfio_device_feature)
+ *
+ * Get, set, or probe feature data of the device. The feature is selected
+ * using the FEATURE_MASK portion of the flags field. Support for a feature
+ * can be probed by setting both the FEATURE_MASK and PROBE bits. A probe
+ * may optionally include the GET and/or SET bits to determine read vs write
+ * access of the feature respectively. Probing a feature will return success
+ * if the feature is supported and all of the optionally indicated GET/SET
+ * methods are supported. The format of the data portion of the structure is
+ * specific to the given feature. The data portion is not required for
+ * probing. GET and SET are mutually exclusive, except for use with PROBE.
+ *
+ * Return 0 on success, -errno on failure.
+ */
+struct vfio_device_feature {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_DEVICE_FEATURE_MASK (0xffff) /* 16-bit feature index */
+#define VFIO_DEVICE_FEATURE_GET (1 << 16) /* Get feature into data[] */
+#define VFIO_DEVICE_FEATURE_SET (1 << 17) /* Set feature from data[] */
+#define VFIO_DEVICE_FEATURE_PROBE (1 << 18) /* Probe feature support */
+ __u8 data[];
+};
+
+#define VFIO_DEVICE_FEATURE _IO(VFIO_TYPE, VFIO_BASE + 17)
+
+/*
+ * Provide support for setting a PCI VF Token, which is used as a shared
+ * secret between PF and VF drivers. This feature may only be set on a
+ * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
+ * open VFs. Data provided when setting this feature is a 16-byte array
+ * (__u8 b[16]), representing a UUID.
+ */
+#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0)
+
+/*
+ * Indicates the device can support the migration API through
+ * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and
+ * ERROR states are always supported. Support for additional states is
+ * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be
+ * set.
+ *
+ * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and
+ * RESUMING are supported.
+ *
+ * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P
+ * is supported in addition to the STOP_COPY states.
+ *
+ * Other combinations of flags have behavior to be defined in the future.
+ */
+struct vfio_device_feature_migration {
+ __aligned_u64 flags;
+#define VFIO_MIGRATION_STOP_COPY (1 << 0)
+#define VFIO_MIGRATION_P2P (1 << 1)
+};
+#define VFIO_DEVICE_FEATURE_MIGRATION 1
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO
+ * device. The new state is supplied in device_state, see enum
+ * vfio_device_mig_state for details
+ *
+ * The kernel migration driver must fully transition the device to the new state
+ * value before the operation returns to the user.
+ *
+ * The kernel migration driver must not generate asynchronous device state
+ * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET
+ * ioctl as described above.
+ *
+ * If this function fails then current device_state may be the original
+ * operating state or some other state along the combination transition path.
+ * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt
+ * to return to the original state, or attempt to return to some other state
+ * such as RUNNING or STOP.
+ *
+ * If the new_state starts a new data transfer session then the FD associated
+ * with that session is returned in data_fd. The user is responsible to close
+ * this FD when it is finished. The user must consider the migration data stream
+ * carried over the FD to be opaque and must preserve the byte order of the
+ * stream. The user is not required to preserve buffer segmentation when writing
+ * the data stream during the RESUMING operation.
+ *
+ * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO
+ * device, data_fd will be -1.
+ */
+struct vfio_device_feature_mig_state {
+ __u32 device_state; /* From enum vfio_device_mig_state */
+ __s32 data_fd;
+};
+#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2
+
+/*
+ * The device migration Finite State Machine is described by the enum
+ * vfio_device_mig_state. Some of the FSM arcs will create a migration data
+ * transfer session by returning a FD, in this case the migration data will
+ * flow over the FD using read() and write() as discussed below.
+ *
+ * There are 5 states to support VFIO_MIGRATION_STOP_COPY:
+ * RUNNING - The device is running normally
+ * STOP - The device does not change the internal or external state
+ * STOP_COPY - The device internal state can be read out
+ * RESUMING - The device is stopped and is loading a new internal state
+ * ERROR - The device has failed and must be reset
+ *
+ * And 1 optional state to support VFIO_MIGRATION_P2P:
+ * RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA
+ *
+ * The FSM takes actions on the arcs between FSM states. The driver implements
+ * the following behavior for the FSM arcs:
+ *
+ * RUNNING_P2P -> STOP
+ * STOP_COPY -> STOP
+ * While in STOP the device must stop the operation of the device. The device
+ * must not generate interrupts, DMA, or any other change to external state.
+ * It must not change its internal state. When stopped the device and kernel
+ * migration driver must accept and respond to interaction to support external
+ * subsystems in the STOP state, for example PCI MSI-X and PCI config space.
+ * Failure by the user to restrict device access while in STOP must not result
+ * in error conditions outside the user context (ex. host system faults).
+ *
+ * The STOP_COPY arc will terminate a data transfer session.
+ *
+ * RESUMING -> STOP
+ * Leaving RESUMING terminates a data transfer session and indicates the
+ * device should complete processing of the data delivered by write(). The
+ * kernel migration driver should complete the incorporation of data written
+ * to the data transfer FD into the device internal state and perform
+ * final validity and consistency checking of the new device state. If the
+ * user provided data is found to be incomplete, inconsistent, or otherwise
+ * invalid, the migration driver must fail the SET_STATE ioctl and
+ * optionally go to the ERROR state as described below.
+ *
+ * While in STOP the device has the same behavior as other STOP states
+ * described above.
+ *
+ * To abort a RESUMING session the device must be reset.
+ *
+ * RUNNING_P2P -> RUNNING
+ * While in RUNNING the device is fully operational, the device may generate
+ * interrupts, DMA, respond to MMIO, all vfio device regions are functional,
+ * and the device may advance its internal state.
+ *
+ * RUNNING -> RUNNING_P2P
+ * STOP -> RUNNING_P2P
+ * While in RUNNING_P2P the device is partially running in the P2P quiescent
+ * state defined below.
+ *
+ * STOP -> STOP_COPY
+ * This arc begin the process of saving the device state and will return a
+ * new data_fd.
+ *
+ * While in the STOP_COPY state the device has the same behavior as STOP
+ * with the addition that the data transfers session continues to stream the
+ * migration state. End of stream on the FD indicates the entire device
+ * state has been transferred.
+ *
+ * The user should take steps to restrict access to vfio device regions while
+ * the device is in STOP_COPY or risk corruption of the device migration data
+ * stream.
+ *
+ * STOP -> RESUMING
+ * Entering the RESUMING state starts a process of restoring the device state
+ * and will return a new data_fd. The data stream fed into the data_fd should
+ * be taken from the data transfer output of a single FD during saving from
+ * a compatible device. The migration driver may alter/reset the internal
+ * device state for this arc if required to prepare the device to receive the
+ * migration data.
+ *
+ * any -> ERROR
+ * ERROR cannot be specified as a device state, however any transition request
+ * can be failed with an errno return and may then move the device_state into
+ * ERROR. In this case the device was unable to execute the requested arc and
+ * was also unable to restore the device to any valid device_state.
+ * To recover from ERROR VFIO_DEVICE_RESET must be used to return the
+ * device_state back to RUNNING.
+ *
+ * The optional peer to peer (P2P) quiescent state is intended to be a quiescent
+ * state for the device for the purposes of managing multiple devices within a
+ * user context where peer-to-peer DMA between devices may be active. The
+ * RUNNING_P2P states must prevent the device from initiating
+ * any new P2P DMA transactions. If the device can identify P2P transactions
+ * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration
+ * driver must complete any such outstanding operations prior to completing the
+ * FSM arc into a P2P state. For the purpose of specification the states
+ * behave as though the device was fully running if not supported. Like while in
+ * STOP or STOP_COPY the user must not touch the device, otherwise the state
+ * can be exited.
+ *
+ * The remaining possible transitions are interpreted as combinations of the
+ * above FSM arcs. As there are multiple paths through the FSM arcs the path
+ * should be selected based on the following rules:
+ * - Select the shortest path.
+ * Refer to vfio_mig_get_next_state() for the result of the algorithm.
+ *
+ * The automatic transit through the FSM arcs that make up the combination
+ * transition is invisible to the user. When working with combination arcs the
+ * user may see any step along the path in the device_state if SET_STATE
+ * fails. When handling these types of errors users should anticipate future
+ * revisions of this protocol using new states and those states becoming
+ * visible in this case.
+ *
+ * The optional states cannot be used with SET_STATE if the device does not
+ * support them. The user can discover if these states are supported by using
+ * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can
+ * avoid knowing about these optional states if the kernel driver supports them.
+ */
+enum vfio_device_mig_state {
+ VFIO_DEVICE_STATE_ERROR = 0,
+ VFIO_DEVICE_STATE_STOP = 1,
+ VFIO_DEVICE_STATE_RUNNING = 2,
+ VFIO_DEVICE_STATE_STOP_COPY = 3,
+ VFIO_DEVICE_STATE_RESUMING = 4,
+ VFIO_DEVICE_STATE_RUNNING_P2P = 5,
+};
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power
+ * state with the platform-based power management. Device use of lower power
+ * states depends on factors managed by the runtime power management core,
+ * including system level support and coordinating support among dependent
+ * devices. Enabling device low power entry does not guarantee lower power
+ * usage by the device, nor is a mechanism provided through this feature to
+ * know the current power state of the device. If any device access happens
+ * (either from the host or through the vfio uAPI) when the device is in the
+ * low power state, then the host will move the device out of the low power
+ * state as necessary prior to the access. Once the access is completed, the
+ * device may re-enter the low power state. For single shot low power support
+ * with wake-up notification, see
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below. Access to mmap'd
+ * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after
+ * calling LOW_POWER_EXIT.
+ */
+#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3
+
+/*
+ * This device feature has the same behavior as
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user
+ * provides an eventfd for wake-up notification. When the device moves out of
+ * the low power state for the wake-up, the host will not allow the device to
+ * re-enter a low power state without a subsequent user call to one of the low
+ * power entry device feature IOCTLs. Access to mmap'd device regions is
+ * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the
+ * low power exit. The low power exit can happen either through LOW_POWER_EXIT
+ * or through any other access (where the wake-up notification has been
+ * generated). The access to mmap'd device regions will not trigger low power
+ * exit.
+ *
+ * The notification through the provided eventfd will be generated only when
+ * the device has entered and is resumed from a low power state after
+ * calling this device feature IOCTL. A device that has not entered low power
+ * state, as managed through the runtime power management core, will not
+ * generate a notification through the provided eventfd on access. Calling the
+ * LOW_POWER_EXIT feature is optional in the case where notification has been
+ * signaled on the provided eventfd that a resume from low power has occurred.
+ */
+struct vfio_device_low_power_entry_with_wakeup {
+ __s32 wakeup_eventfd;
+ __u32 reserved;
+};
+
+#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as
+ * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features.
+ * This device feature IOCTL may itself generate a wakeup eventfd notification
+ * in the latter case if the device had previously entered a low power state.
+ */
+#define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging.
+ * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports
+ * DMA logging.
+ *
+ * DMA logging allows a device to internally record what DMAs the device is
+ * initiating and report them back to userspace. It is part of the VFIO
+ * migration infrastructure that allows implementing dirty page tracking
+ * during the pre copy phase of live migration. Only DMA WRITEs are logged,
+ * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE.
+ *
+ * When DMA logging is started a range of IOVAs to monitor is provided and the
+ * device can optimize its logging to cover only the IOVA range given. Each
+ * DMA that the device initiates inside the range will be logged by the device
+ * for later retrieval.
+ *
+ * page_size is an input that hints what tracking granularity the device
+ * should try to achieve. If the device cannot do the hinted page size then
+ * it's the driver choice which page size to pick based on its support.
+ * On output the device will return the page size it selected.
+ *
+ * ranges is a pointer to an array of
+ * struct vfio_device_feature_dma_logging_range.
+ *
+ * The core kernel code guarantees to support by minimum num_ranges that fit
+ * into a single kernel page. User space can try higher values but should give
+ * up if the above can't be achieved as of some driver limitations.
+ *
+ * A single call to start device DMA logging can be issued and a matching stop
+ * should follow at the end. Another start is not allowed in the meantime.
+ */
+struct vfio_device_feature_dma_logging_control {
+ __aligned_u64 page_size;
+ __u32 num_ranges;
+ __u32 __reserved;
+ __aligned_u64 ranges;
+};
+
+struct vfio_device_feature_dma_logging_range {
+ __aligned_u64 iova;
+ __aligned_u64 length;
+};
+
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started
+ * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START
+ */
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log
+ *
+ * Query the device's DMA log for written pages within the given IOVA range.
+ * During querying the log is cleared for the IOVA range.
+ *
+ * bitmap is a pointer to an array of u64s that will hold the output bitmap
+ * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits
+ * is given by:
+ * bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
+ *
+ * The input page_size can be any power of two value and does not have to
+ * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver
+ * will format its internal logging to match the reporting page size, possibly
+ * by replicating bits if the internal page size is lower than requested.
+ *
+ * The LOGGING_REPORT will only set bits in the bitmap and never clear or
+ * perform any initialization of the user provided bitmap.
+ *
+ * If any error is returned userspace should assume that the dirty log is
+ * corrupted. Error recovery is to consider all memory dirty and try to
+ * restart the dirty tracking, or to abort/restart the whole migration.
+ *
+ * If DMA logging is not enabled, an error will be returned.
+ *
+ */
+struct vfio_device_feature_dma_logging_report {
+ __aligned_u64 iova;
+ __aligned_u64 length;
+ __aligned_u64 page_size;
+ __aligned_u64 bitmap;
+};
+
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
@@ -748,6 +1169,44 @@ struct vfio_iommu_type1_info_cap_iova_range {
struct vfio_iova_range iova_ranges[];
};
+/*
+ * The migration capability allows to report supported features for migration.
+ *
+ * The structures below define version 1 of this capability.
+ *
+ * The existence of this capability indicates that IOMMU kernel driver supports
+ * dirty page logging.
+ *
+ * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
+ * page logging.
+ * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
+ * size in bytes that can be used by user applications when getting the dirty
+ * bitmap.
+ */
+#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 2
+
+struct vfio_iommu_type1_info_cap_migration {
+ struct vfio_info_cap_header header;
+ __u32 flags;
+ __u64 pgsize_bitmap;
+ __u64 max_dirty_bitmap_size; /* in bytes */
+};
+
+/*
+ * The DMA available capability allows to report the current number of
+ * simultaneously outstanding DMA mappings that are allowed.
+ *
+ * The structure below defines version 1 of this capability.
+ *
+ * avail: specifies the current number of outstanding DMA mappings allowed.
+ */
+#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
+
+struct vfio_iommu_type1_info_dma_avail {
+ struct vfio_info_cap_header header;
+ __u32 avail;
+};
+
#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
/**
@@ -755,12 +1214,22 @@ struct vfio_iommu_type1_info_cap_iova_range {
*
* Map process virtual addresses to IO virtual addresses using the
* provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+ *
+ * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and
+ * unblock translation of host virtual addresses in the iova range. The vaddr
+ * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR. To
+ * maintain memory consistency within the user application, the updated vaddr
+ * must address the same memory object as originally mapped. Failure to do so
+ * will result in user memory corruption and/or device misbehavior. iova and
+ * size must match those in the original MAP_DMA call. Protection is not
+ * changed, and the READ & WRITE flags must be 0.
*/
struct vfio_iommu_type1_dma_map {
__u32 argsz;
__u32 flags;
#define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */
#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */
+#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
__u64 vaddr; /* Process virtual address */
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
@@ -768,6 +1237,12 @@ struct vfio_iommu_type1_dma_map {
#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
+struct vfio_bitmap {
+ __u64 pgsize; /* page size for bitmap in bytes */
+ __u64 size; /* in bytes */
+ __u64 __user *data; /* one bit per page */
+};
+
/**
* VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
* struct vfio_dma_unmap)
@@ -777,12 +1252,34 @@ struct vfio_iommu_type1_dma_map {
* field. No guarantee is made to the user that arbitrary unmaps of iova
* or size different from those used in the original mapping call will
* succeed.
+ *
+ * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
+ * before unmapping IO virtual addresses. When this flag is set, the user must
+ * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
+ * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
+ * A bit in the bitmap represents one page, of user provided page size in
+ * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
+ * indicates that the page at that offset from iova is dirty. A Bitmap of the
+ * pages in the range of unmapped size is returned in the user-provided
+ * vfio_bitmap.data.
+ *
+ * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses. iova and size
+ * must be 0. This cannot be combined with the get-dirty-bitmap flag.
+ *
+ * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
+ * virtual addresses in the iova range. Tasks that attempt to translate an
+ * iova's vaddr will block. DMA to already-mapped pages continues. This
+ * cannot be combined with the get-dirty-bitmap flag.
*/
struct vfio_iommu_type1_dma_unmap {
__u32 argsz;
__u32 flags;
+#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
+#define VFIO_DMA_UNMAP_FLAG_ALL (1 << 1)
+#define VFIO_DMA_UNMAP_FLAG_VADDR (1 << 2)
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
+ __u8 data[];
};
#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
@@ -794,6 +1291,57 @@ struct vfio_iommu_type1_dma_unmap {
#define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15)
#define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16)
+/**
+ * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
+ * struct vfio_iommu_type1_dirty_bitmap)
+ * IOCTL is used for dirty pages logging.
+ * Caller should set flag depending on which operation to perform, details as
+ * below:
+ *
+ * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
+ * the IOMMU driver to log pages that are dirtied or potentially dirtied by
+ * the device; designed to be used when a migration is in progress. Dirty pages
+ * are logged until logging is disabled by user application by calling the IOCTL
+ * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
+ *
+ * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
+ * the IOMMU driver to stop logging dirtied pages.
+ *
+ * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
+ * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
+ * The user must specify the IOVA range and the pgsize through the structure
+ * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
+ * supports getting a bitmap of the smallest supported pgsize only and can be
+ * modified in future to get a bitmap of any specified supported pgsize. The
+ * user must provide a zeroed memory area for the bitmap memory and specify its
+ * size in bitmap.size. One bit is used to represent one page consecutively
+ * starting from iova offset. The user should provide page size in bitmap.pgsize
+ * field. A bit set in the bitmap indicates that the page at that offset from
+ * iova is dirty. The caller must set argsz to a value including the size of
+ * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
+ * actual bitmap. If dirty pages logging is not enabled, an error will be
+ * returned.
+ *
+ * Only one of the flags _START, _STOP and _GET may be specified at a time.
+ *
+ */
+struct vfio_iommu_type1_dirty_bitmap {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0)
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1)
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2)
+ __u8 data[];
+};
+
+struct vfio_iommu_type1_dirty_bitmap_get {
+ __u64 iova; /* IO virtual address */
+ __u64 size; /* Size of iova range */
+ struct vfio_bitmap bitmap;
+};
+
+#define VFIO_IOMMU_DIRTY_PAGES _IO(VFIO_TYPE, VFIO_BASE + 17)
+
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
diff --git a/include/uapi/linux/vfio_ccw.h b/include/uapi/linux/vfio_ccw.h
index cbecbf0cd54f..aa04f3aa6db0 100644
--- a/include/uapi/linux/vfio_ccw.h
+++ b/include/uapi/linux/vfio_ccw.h
@@ -34,4 +34,23 @@ struct ccw_cmd_region {
__u32 ret_code;
} __packed;
+/*
+ * Used for processing commands that read the subchannel-information block
+ * Reading this region triggers a stsch() to hardware
+ * Note: this is controlled by a capability
+ */
+struct ccw_schib_region {
+#define SCHIB_AREA_SIZE 52
+ __u8 schib_area[SCHIB_AREA_SIZE];
+} __packed;
+
+/*
+ * Used for returning a Channel Report Word to userspace.
+ * Note: this is controlled by a capability
+ */
+struct ccw_crw_region {
+ __u32 crw;
+ __u32 pad;
+} __packed;
+
#endif
diff --git a/include/uapi/linux/vfio_zdev.h b/include/uapi/linux/vfio_zdev.h
new file mode 100644
index 000000000000..77f2aff1f27e
--- /dev/null
+++ b/include/uapi/linux/vfio_zdev.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * VFIO Region definitions for ZPCI devices
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ * Matthew Rosato <mjrosato@linux.ibm.com>
+ */
+
+#ifndef _VFIO_ZDEV_H_
+#define _VFIO_ZDEV_H_
+
+#include <linux/types.h>
+#include <linux/vfio.h>
+
+/**
+ * VFIO_DEVICE_INFO_CAP_ZPCI_BASE - Base PCI Function information
+ *
+ * This capability provides a set of descriptive information about the
+ * associated PCI function.
+ */
+struct vfio_device_info_cap_zpci_base {
+ struct vfio_info_cap_header header;
+ __u64 start_dma; /* Start of available DMA addresses */
+ __u64 end_dma; /* End of available DMA addresses */
+ __u16 pchid; /* Physical Channel ID */
+ __u16 vfn; /* Virtual function number */
+ __u16 fmb_length; /* Measurement Block Length (in bytes) */
+ __u8 pft; /* PCI Function Type */
+ __u8 gid; /* PCI function group ID */
+ /* End of version 1 */
+ __u32 fh; /* PCI function handle */
+ /* End of version 2 */
+};
+
+/**
+ * VFIO_DEVICE_INFO_CAP_ZPCI_GROUP - Base PCI Function Group information
+ *
+ * This capability provides a set of descriptive information about the group of
+ * PCI functions that the associated device belongs to.
+ */
+struct vfio_device_info_cap_zpci_group {
+ struct vfio_info_cap_header header;
+ __u64 dasm; /* DMA Address space mask */
+ __u64 msi_addr; /* MSI address */
+ __u64 flags;
+#define VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH 1 /* Program-specified TLB refresh */
+ __u16 mui; /* Measurement Block Update Interval */
+ __u16 noi; /* Maximum number of MSIs */
+ __u16 maxstbl; /* Maximum Store Block Length */
+ __u8 version; /* Supported PCI Version */
+ /* End of version 1 */
+ __u8 reserved;
+ __u16 imaxstbl; /* Maximum Interpreted Store Block Length */
+ /* End of version 2 */
+};
+
+/**
+ * VFIO_DEVICE_INFO_CAP_ZPCI_UTIL - Utility String
+ *
+ * This capability provides the utility string for the associated device, which
+ * is a device identifier string made up of EBCDID characters. 'size' specifies
+ * the length of 'util_str'.
+ */
+struct vfio_device_info_cap_zpci_util {
+ struct vfio_info_cap_header header;
+ __u32 size;
+ __u8 util_str[];
+};
+
+/**
+ * VFIO_DEVICE_INFO_CAP_ZPCI_PFIP - PCI Function Path
+ *
+ * This capability provides the PCI function path string, which is an identifier
+ * that describes the internal hardware path of the device. 'size' specifies
+ * the length of 'pfip'.
+ */
+struct vfio_device_info_cap_zpci_pfip {
+ struct vfio_info_cap_header header;
+ __u32 size;
+ __u8 pfip[];
+};
+
+#endif
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 40d028eed645..f9f115a7c75b 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#define VHOST_FILE_UNBIND -1
+
/* ioctls */
#define VHOST_VIRTIO 0xAF
@@ -87,9 +89,6 @@
/* Set or get vhost backend capability */
-/* Use message type V2 */
-#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
-
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
@@ -116,4 +115,69 @@
#define VHOST_VSOCK_SET_GUEST_CID _IOW(VHOST_VIRTIO, 0x60, __u64)
#define VHOST_VSOCK_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int)
+/* VHOST_VDPA specific defines */
+
+/* Get the device id. The device ids follow the same definition of
+ * the device id defined in virtio-spec.
+ */
+#define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
+/* Get and set the status. The status bits follow the same definition
+ * of the device status defined in virtio-spec.
+ */
+#define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
+#define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
+/* Get and set the device config. The device config follows the same
+ * definition of the device config defined in virtio-spec.
+ */
+#define VHOST_VDPA_GET_CONFIG _IOR(VHOST_VIRTIO, 0x73, \
+ struct vhost_vdpa_config)
+#define VHOST_VDPA_SET_CONFIG _IOW(VHOST_VIRTIO, 0x74, \
+ struct vhost_vdpa_config)
+/* Enable/disable the ring. */
+#define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, \
+ struct vhost_vring_state)
+/* Get the max ring size. */
+#define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16)
+
+/* Set event fd for config interrupt*/
+#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int)
+
+/* Get the valid iova range */
+#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
+ struct vhost_vdpa_iova_range)
+/* Get the config size */
+#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
+
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
+
+/* Get the number of address spaces. */
+#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
+
+/* Get the group for a virtqueue: read index, write group in num,
+ * The virtqueue index is stored in the index field of
+ * vhost_vring_state. The group for this specific virtqueue is
+ * returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \
+ struct vhost_vring_state)
+/* Set the ASID for a virtqueue group. The group index is stored in
+ * the index field of vhost_vring_state, the ASID associated with this
+ * group is stored at num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
+ struct vhost_vring_state)
+
+/* Suspend a device so it does not process virtqueue requests anymore
+ *
+ * After the return of ioctl the device must preserve all the necessary state
+ * (the virtqueue vring base plus the possible device specific states) that is
+ * required for restoring in the future. The device must not change its
+ * configuration after that point.
+ */
+#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D)
+
#endif
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index c907290ff065..53601ce2c20a 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -60,6 +60,17 @@ struct vhost_iotlb_msg {
#define VHOST_IOTLB_UPDATE 2
#define VHOST_IOTLB_INVALIDATE 3
#define VHOST_IOTLB_ACCESS_FAIL 4
+/*
+ * VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying
+ * multiple mappings in one go: beginning with
+ * VHOST_IOTLB_BATCH_BEGIN, followed by any number of
+ * VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END.
+ * When one of these two values is used as the message type, the rest
+ * of the fields in the message are ignored. There's no guarantee that
+ * these changes take place automatically in the device.
+ */
+#define VHOST_IOTLB_BATCH_BEGIN 5
+#define VHOST_IOTLB_BATCH_END 6
__u8 type;
};
@@ -76,7 +87,7 @@ struct vhost_msg {
struct vhost_msg_v2 {
__u32 type;
- __u32 reserved;
+ __u32 asid;
union {
struct vhost_iotlb_msg iotlb;
__u8 padding[64];
@@ -96,7 +107,7 @@ struct vhost_memory_region {
struct vhost_memory {
__u32 nregions;
__u32 padding;
- struct vhost_memory_region regions[0];
+ struct vhost_memory_region regions[];
};
/* VHOST_SCSI specific definitions */
@@ -119,10 +130,38 @@ struct vhost_scsi_target {
unsigned short reserved;
};
+/* VHOST_VDPA specific definitions */
+
+struct vhost_vdpa_config {
+ __u32 off;
+ __u32 len;
+ __u8 buf[];
+};
+
+/* vhost vdpa IOVA range
+ * @first: First address that can be mapped by vhost-vDPA
+ * @last: Last address that can be mapped by vhost-vDPA
+ */
+struct vhost_vdpa_iova_range {
+ __u64 first;
+ __u64 last;
+};
+
/* Feature bits */
/* Log all write descriptors. Can be changed while device is active. */
#define VHOST_F_LOG_ALL 26
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accept batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
+/* IOTLB can accept address space identifier through V2 type of IOTLB
+ * message
+ */
+#define VHOST_BACKEND_F_IOTLB_ASID 0x3
+/* Device can be suspended */
+#define VHOST_BACKEND_F_SUSPEND 0x4
+
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 5f9357dcb060..29da1f4b4578 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -171,6 +171,8 @@ enum v4l2_buf_type {
|| (type) == V4L2_BUF_TYPE_SDR_OUTPUT \
|| (type) == V4L2_BUF_TYPE_META_OUTPUT)
+#define V4L2_TYPE_IS_CAPTURE(type) (!V4L2_TYPE_IS_OUTPUT(type))
+
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
V4L2_TUNER_ANALOG_TV = 2,
@@ -219,9 +221,7 @@ enum v4l2_colorspace {
V4L2_COLORSPACE_470_SYSTEM_M = 5,
/*
- * EBU Tech 3213 PAL/SECAM colorspace. This only makes sense when
- * dealing with really old PAL/SECAM recordings. Superseded by
- * SMPTE 170M.
+ * EBU Tech 3213 PAL/SECAM colorspace.
*/
V4L2_COLORSPACE_470_SYSTEM_BG = 6,
@@ -245,6 +245,14 @@ enum v4l2_colorspace {
/* DCI-P3 colorspace, used by cinema projectors */
V4L2_COLORSPACE_DCI_P3 = 12,
+
+#ifdef __KERNEL__
+ /*
+ * Largest supported colorspace value, assigned by the compiler, used
+ * by the framework to check for invalid values.
+ */
+ V4L2_COLORSPACE_LAST,
+#endif
};
/*
@@ -283,6 +291,13 @@ enum v4l2_xfer_func {
V4L2_XFER_FUNC_NONE = 5,
V4L2_XFER_FUNC_DCI_P3 = 6,
V4L2_XFER_FUNC_SMPTE2084 = 7,
+#ifdef __KERNEL__
+ /*
+ * Largest supported transfer function value, assigned by the compiler,
+ * used by the framework to check for invalid values.
+ */
+ V4L2_XFER_FUNC_LAST,
+#endif
};
/*
@@ -343,6 +358,13 @@ enum v4l2_ycbcr_encoding {
/* SMPTE 240M -- Obsolete HDTV */
V4L2_YCBCR_ENC_SMPTE240M = 8,
+#ifdef __KERNEL__
+ /*
+ * Largest supported encoding value, assigned by the compiler, used by
+ * the framework to check for invalid values.
+ */
+ V4L2_YCBCR_ENC_LAST,
+#endif
};
/*
@@ -371,9 +393,9 @@ enum v4l2_hsv_encoding {
enum v4l2_quantization {
/*
- * The default for R'G'B' quantization is always full range, except
- * for the BT2020 colorspace. For Y'CbCr the quantization is always
- * limited range, except for COLORSPACE_JPEG: this is full range.
+ * The default for R'G'B' quantization is always full range.
+ * For Y'CbCr the quantization is always limited range, except
+ * for COLORSPACE_JPEG: this is full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -382,14 +404,13 @@ enum v4l2_quantization {
/*
* Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
- * This depends on whether the image is RGB or not, the colorspace and the
- * Y'CbCr encoding.
+ * This depends on whether the image is RGB or not, the colorspace.
+ * The Y'CbCr encoding is not used anymore, but is still there for backwards
+ * compatibility.
*/
#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
- (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
- V4L2_QUANTIZATION_LIM_RANGE : \
- (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
- V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
+ (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+ V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)
/*
* Deprecated names for opRGB colorspace (IEC 61966-2-5)
@@ -481,12 +502,13 @@ struct v4l2_capability {
#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
-#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
#define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */
+#define V4L2_CAP_IO_MC 0x20000000 /* Is input/output controlled by the media controller */
+
#define V4L2_CAP_DEVICE_CAPS 0x80000000 /* sets device capabilities field */
/*
@@ -514,7 +536,7 @@ struct v4l2_pix_format {
/* Pixel format FOURCC depth Description */
-/* RGB formats */
+/* RGB formats (1 or 2 bytes per pixel) */
#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */
#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_ARGB444 v4l2_fourcc('A', 'R', '1', '2') /* 16 aaaarrrr ggggbbbb */
@@ -523,12 +545,6 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */
#define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */
#define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */
-
-/*
- * Originally this had 'BA12' as fourcc, but this clashed with the older
- * V4L2_PIX_FMT_SGRBG12 which inexplicably used that same fourcc.
- * So use 'GA12' instead for V4L2_PIX_FMT_BGRA444.
- */
#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */
#define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */
#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
@@ -545,6 +561,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */
#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
+
+/* RGB formats (3 or 4 bytes per pixel) */
#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
@@ -565,12 +583,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
+#define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
/* Grey bit-packed formats */
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
+#define V4L2_PIX_FMT_IPU3_Y10 v4l2_fourcc('i', 'p', '3', 'y') /* IPU3 packed 10-bit greyscale */
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
@@ -588,13 +608,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
+#define V4L2_PIX_FMT_YUV24 v4l2_fourcc('Y', 'U', 'V', '3') /* 24 YUV-8-8-8 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
#define V4L2_PIX_FMT_AYUV32 v4l2_fourcc('A', 'Y', 'U', 'V') /* 32 AYUV-8-8-8-8 */
#define V4L2_PIX_FMT_XYUV32 v4l2_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */
#define V4L2_PIX_FMT_VUYA32 v4l2_fourcc('V', 'U', 'Y', 'A') /* 32 VUYA-8-8-8-8 */
#define V4L2_PIX_FMT_VUYX32 v4l2_fourcc('V', 'U', 'Y', 'X') /* 32 VUYX-8-8-8-8 */
-#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
-#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
+#define V4L2_PIX_FMT_YUVA32 v4l2_fourcc('Y', 'U', 'V', 'A') /* 32 YUVA-8-8-8-8 */
+#define V4L2_PIX_FMT_YUVX32 v4l2_fourcc('Y', 'U', 'V', 'X') /* 32 YUVX-8-8-8-8 */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
/* two planes -- one Y, one Cr + Cb interleaved */
@@ -604,14 +625,13 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
+#define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') /* 24 Y/CbCr 4:2:0 10-bit per component */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */
#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
-#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */
-#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 macroblocks */
/* three planes - Y Cb, Cr */
#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
@@ -629,6 +649,18 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YUV444M v4l2_fourcc('Y', 'M', '2', '4') /* 24 YUV444 planar */
#define V4L2_PIX_FMT_YVU444M v4l2_fourcc('Y', 'M', '4', '2') /* 24 YVU444 planar */
+/* Tiled YUV formats */
+#define V4L2_PIX_FMT_NV12_4L4 v4l2_fourcc('V', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 4x4 tiles */
+#define V4L2_PIX_FMT_NV12_16L16 v4l2_fourcc('H', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
+#define V4L2_PIX_FMT_NV12_32L32 v4l2_fourcc('S', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 32x32 tiles */
+#define V4L2_PIX_FMT_P010_4L4 v4l2_fourcc('T', '0', '1', '0') /* 12 Y/CbCr 4:2:0 10-bit 4x4 macroblocks */
+
+/* Tiled YUV formats, non contiguous planes */
+#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 tiles */
+#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
+#define V4L2_PIX_FMT_NV12M_8L128 v4l2_fourcc('N', 'A', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */
+#define V4L2_PIX_FMT_NV12M_10BE_8L128 v4l2_fourcc_be('N', 'T', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */
+
/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
@@ -662,6 +694,10 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C')
+#define V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4') /* 14 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4') /* 14 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('G', 'R', '1', '4') /* 14 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.. GBGB.. */
/* 14bit raw bayer packed, 7 bytes for every 4 pixels */
#define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E')
#define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E')
@@ -693,10 +729,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
+#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /* VP8 parsed frame */
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
+#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F') /* VP9 parsed frame */
#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
#define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */
+#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
+#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -729,11 +769,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
+#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
-#define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */
#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
+#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */
+#define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */
+#define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */
-/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
+/* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
#define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */
#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */
@@ -762,11 +805,16 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */
+/* Vendor specific - used for RK_ISP1 camera sub-system */
+#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */
+#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
+
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
/* Flags */
#define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA 0x00000001
+#define V4L2_PIX_FMT_FLAG_SET_CSC 0x00000002
/*
* F O R M A T E N U M E R A T I O N
@@ -777,13 +825,20 @@ struct v4l2_fmtdesc {
__u32 flags;
__u8 description[32]; /* Description string */
__u32 pixelformat; /* Format fourcc */
- __u32 reserved[4];
+ __u32 mbus_code; /* Media bus code */
+ __u32 reserved[3];
};
#define V4L2_FMT_FLAG_COMPRESSED 0x0001
#define V4L2_FMT_FLAG_EMULATED 0x0002
#define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004
#define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008
+#define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL 0x0010
+#define V4L2_FMT_FLAG_CSC_COLORSPACE 0x0020
+#define V4L2_FMT_FLAG_CSC_XFER_FUNC 0x0040
+#define V4L2_FMT_FLAG_CSC_YCBCR_ENC 0x0080
+#define V4L2_FMT_FLAG_CSC_HSV_ENC V4L2_FMT_FLAG_CSC_YCBCR_ENC
+#define V4L2_FMT_FLAG_CSC_QUANTIZATION 0x0100
/* Frame Size and frame rate enumeration */
/*
@@ -936,9 +991,12 @@ struct v4l2_requestbuffers {
__u32 type; /* enum v4l2_buf_type */
__u32 memory; /* enum v4l2_memory */
__u32 capabilities;
- __u32 reserved[1];
+ __u8 flags;
+ __u8 reserved[3];
};
+#define V4L2_MEMORY_FLAG_NON_COHERENT (1 << 0)
+
/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
@@ -946,6 +1004,7 @@ struct v4l2_requestbuffers {
#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
+#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6)
/**
* struct v4l2_plane - plane info for multi-planar buffers
@@ -959,8 +1018,10 @@ struct v4l2_requestbuffers {
* pointing to this plane
* @fd: when memory is V4L2_MEMORY_DMABUF, a userspace file
* descriptor associated with this plane
+ * @m: union of @mem_offset, @userptr and @fd
* @data_offset: offset in the plane to the start of data; usually 0,
* unless there is a header in front of the data
+ * @reserved: drivers and applications must zero this array
*
* Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer
* with two planes can have one plane for Y, and another for interleaved CbCr
@@ -1002,10 +1063,14 @@ struct v4l2_plane {
* a userspace file descriptor associated with this buffer
* @planes: for multiplanar buffers; userspace pointer to the array of plane
* info structs for this buffer
+ * @m: union of @offset, @userptr, @planes and @fd
* @length: size in bytes of the buffer (NOT its payload) for single-plane
* buffers (when type != *_MPLANE); number of elements in the
* planes array for multi-plane buffers
+ * @reserved2: drivers and applications must zero this field
* @request_fd: fd of the request that this buffer should use
+ * @reserved: for backwards compatibility with applications that do not know
+ * about @request_fd
*
* Contains data exchanged by application and driver using one of the Streaming
* I/O methods.
@@ -1043,7 +1108,7 @@ struct v4l2_buffer {
#ifndef __KERNEL__
/**
* v4l2_timeval_to_ns - Convert timeval to nanoseconds
- * @ts: pointer to the timeval variable to be converted
+ * @tv: pointer to the timeval variable to be converted
*
* Returns the scalar nanosecond representation of the timeval
* parameter.
@@ -1104,6 +1169,7 @@ static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv)
* @flags: flags for newly created file, currently only O_CLOEXEC is
* supported, refer to manual of open syscall for more details
* @fd: file descriptor associated with DMABUF (set by driver)
+ * @reserved: drivers and applications must zero this array
*
* Contains data used for exporting a video buffer as DMABUF file descriptor.
* The buffer is identified by a 'cookie' returned by VIDIOC_QUERYBUF
@@ -1168,7 +1234,7 @@ struct v4l2_window {
struct v4l2_rect w;
__u32 field; /* enum v4l2_field */
__u32 chromakey;
- struct v4l2_clip __user *clips;
+ struct v4l2_clip *clips;
__u32 clipcount;
void __user *bitmap;
__u8 global_alpha;
@@ -1242,6 +1308,10 @@ struct v4l2_selection {
typedef __u64 v4l2_std_id;
+/*
+ * Attention: Keep the V4L2_STD_* bit definitions in sync with
+ * include/dt-bindings/display/sdtv-standards.h SDTV_STD_* bit definitions.
+ */
/* one bit for each */
#define V4L2_STD_PAL_B ((v4l2_std_id)0x00000001)
#define V4L2_STD_PAL_B1 ((v4l2_std_id)0x00000002)
@@ -1531,7 +1601,8 @@ struct v4l2_bt_timings {
((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
- (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
+ ((bt)->interlaced ? \
+ ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
@@ -1710,6 +1781,24 @@ struct v4l2_ext_control {
__u16 __user *p_u16;
__u32 __user *p_u32;
struct v4l2_area __user *p_area;
+ struct v4l2_ctrl_h264_sps __user *p_h264_sps;
+ struct v4l2_ctrl_h264_pps *p_h264_pps;
+ struct v4l2_ctrl_h264_scaling_matrix __user *p_h264_scaling_matrix;
+ struct v4l2_ctrl_h264_pred_weights __user *p_h264_pred_weights;
+ struct v4l2_ctrl_h264_slice_params __user *p_h264_slice_params;
+ struct v4l2_ctrl_h264_decode_params __user *p_h264_decode_params;
+ struct v4l2_ctrl_fwht_params __user *p_fwht_params;
+ struct v4l2_ctrl_vp8_frame __user *p_vp8_frame;
+ struct v4l2_ctrl_mpeg2_sequence __user *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture __user *p_mpeg2_picture;
+ struct v4l2_ctrl_mpeg2_quantisation __user *p_mpeg2_quantisation;
+ struct v4l2_ctrl_vp9_compressed_hdr __user *p_vp9_compressed_hdr_probs;
+ struct v4l2_ctrl_vp9_frame __user *p_vp9_frame;
+ struct v4l2_ctrl_hevc_sps __user *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps __user *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params __user *p_hevc_slice_params;
+ struct v4l2_ctrl_hevc_scaling_matrix __user *p_hevc_scaling_matrix;
+ struct v4l2_ctrl_hevc_decode_params __user *p_hevc_decode_params;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1756,6 +1845,33 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
V4L2_CTRL_TYPE_AREA = 0x0106,
+
+ V4L2_CTRL_TYPE_HDR10_CLL_INFO = 0x0110,
+ V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY = 0x0111,
+
+ V4L2_CTRL_TYPE_H264_SPS = 0x0200,
+ V4L2_CTRL_TYPE_H264_PPS = 0x0201,
+ V4L2_CTRL_TYPE_H264_SCALING_MATRIX = 0x0202,
+ V4L2_CTRL_TYPE_H264_SLICE_PARAMS = 0x0203,
+ V4L2_CTRL_TYPE_H264_DECODE_PARAMS = 0x0204,
+ V4L2_CTRL_TYPE_H264_PRED_WEIGHTS = 0x0205,
+
+ V4L2_CTRL_TYPE_FWHT_PARAMS = 0x0220,
+
+ V4L2_CTRL_TYPE_VP8_FRAME = 0x0240,
+
+ V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 0x0250,
+ V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 0x0251,
+ V4L2_CTRL_TYPE_MPEG2_PICTURE = 0x0252,
+
+ V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR = 0x0260,
+ V4L2_CTRL_TYPE_VP9_FRAME = 0x0261,
+
+ V4L2_CTRL_TYPE_HEVC_SPS = 0x0270,
+ V4L2_CTRL_TYPE_HEVC_PPS = 0x0271,
+ V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272,
+ V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273,
+ V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
@@ -1811,6 +1927,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
#define V4L2_CTRL_FLAG_MODIFY_LAYOUT 0x0400
+#define V4L2_CTRL_FLAG_DYNAMIC_ARRAY 0x0800
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -2193,6 +2310,7 @@ struct v4l2_mpeg_vbi_fmt_ivtv {
* this plane will be used
* @bytesperline: distance in bytes between the leftmost pixels in two
* adjacent lines
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_plane_pix_format {
__u32 sizeimage;
@@ -2211,8 +2329,10 @@ struct v4l2_plane_pix_format {
* @num_planes: number of planes for this format
* @flags: format flags (V4L2_PIX_FMT_FLAG_*)
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @hsv_enc: enum v4l2_hsv_encoding, HSV encoding
* @quantization: enum v4l2_quantization, colorspace quantization
* @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_pix_format_mplane {
__u32 width;
@@ -2237,6 +2357,7 @@ struct v4l2_pix_format_mplane {
* struct v4l2_sdr_format - SDR format definition
* @pixelformat: little endian four character code (fourcc)
* @buffersize: maximum size in bytes required for data
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_sdr_format {
__u32 pixelformat;
@@ -2263,6 +2384,8 @@ struct v4l2_meta_format {
* @vbi: raw VBI capture or output parameters
* @sliced: sliced VBI capture or output parameters
* @raw_data: placeholder for future extensions and custom formats
+ * @fmt: union of @pix, @pix_mp, @win, @vbi, @sliced, @sdr, @meta
+ * and @raw_data
*/
struct v4l2_format {
__u32 type;
@@ -2312,6 +2435,7 @@ struct v4l2_event_vsync {
#define V4L2_EVENT_CTRL_CH_VALUE (1 << 0)
#define V4L2_EVENT_CTRL_CH_FLAGS (1 << 1)
#define V4L2_EVENT_CTRL_CH_RANGE (1 << 2)
+#define V4L2_EVENT_CTRL_CH_DIMENSIONS (1 << 3)
struct v4l2_event_ctrl {
__u32 changes;
@@ -2435,6 +2559,9 @@ struct v4l2_dbg_chip_info {
* @memory: enum v4l2_memory; buffer memory type
* @format: frame format, for which buffers are requested
* @capabilities: capabilities of this buffer type.
+ * @flags: additional buffer management attributes (ignored unless the
+ * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability
+ * and configured for MMAP streaming I/O).
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2443,7 +2570,8 @@ struct v4l2_create_buffers {
__u32 memory;
struct v4l2_format format;
__u32 capabilities;
- __u32 reserved[7];
+ __u32 flags;
+ __u32 reserved[6];
};
/*
@@ -2551,4 +2679,15 @@ struct v4l2_create_buffers {
#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */
+/* Deprecated definitions kept for backwards compatibility */
+#ifndef __KERNEL__
+#define V4L2_PIX_FMT_HM12 V4L2_PIX_FMT_NV12_16L16
+#define V4L2_PIX_FMT_SUNXI_TILED_NV12 V4L2_PIX_FMT_NV12_32L32
+/*
+ * This capability was never implemented, anyone using this cap should drop it
+ * from their code.
+ */
+#define V4L2_CAP_ASYNCIO 0x02000000
+#endif
+
#endif /* _UAPI__LINUX_VIDEODEV2_H */
diff --git a/include/uapi/linux/virtio_9p.h b/include/uapi/linux/virtio_9p.h
index 277c4ad44e84..374b68f8ac6e 100644
--- a/include/uapi/linux/virtio_9p.h
+++ b/include/uapi/linux/virtio_9p.h
@@ -25,7 +25,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
-#include <linux/types.h>
+#include <linux/virtio_types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -36,9 +36,9 @@
struct virtio_9p_config {
/* length of the tag name */
- __u16 tag_len;
+ __virtio16 tag_len;
/* non-NULL terminated tag name */
- __u8 tag[0];
+ __u8 tag[];
} __attribute__((packed));
#endif /* _LINUX_VIRTIO_9P_H */
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index a1966cd7b677..ddaa45e723c4 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -36,6 +36,7 @@
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */
#define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */
#define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */
+#define VIRTIO_BALLOON_F_REPORTING 5 /* Page reporting virtqueue */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
@@ -44,13 +45,20 @@
#define VIRTIO_BALLOON_CMD_ID_DONE 1
struct virtio_balloon_config {
/* Number of pages host wants Guest to give up. */
- __u32 num_pages;
+ __le32 num_pages;
/* Number of pages we've actually got in balloon. */
- __u32 actual;
- /* Free page report command id, readonly by guest */
- __u32 free_page_report_cmd_id;
+ __le32 actual;
+ /*
+ * Free page hint command id, readonly by guest.
+ * Was previously named free_page_report_cmd_id so we
+ * need to carry that name for legacy support.
+ */
+ union {
+ __le32 free_page_hint_cmd_id;
+ __le32 free_page_report_cmd_id; /* deprecated */
+ };
/* Stores PAGE_POISON if page poisoning is in use */
- __u32 poison_val;
+ __le32 poison_val;
};
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 0f99d7b49ede..58e70b24b504 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -40,6 +40,7 @@
#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
#define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */
#define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */
+#define VIRTIO_BLK_F_SECURE_ERASE 16 /* Secure Erase is supported */
/* Legacy feature bits */
#ifndef VIRTIO_BLK_NO_LEGACY
@@ -57,20 +58,20 @@
struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
- __u64 capacity;
+ __virtio64 capacity;
/* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
- __u32 size_max;
+ __virtio32 size_max;
/* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
- __u32 seg_max;
+ __virtio32 seg_max;
/* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
struct virtio_blk_geometry {
- __u16 cylinders;
+ __virtio16 cylinders;
__u8 heads;
__u8 sectors;
} geometry;
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
- __u32 blk_size;
+ __virtio32 blk_size;
/* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */
/* exponent for physical block per logical block. */
@@ -78,42 +79,42 @@ struct virtio_blk_config {
/* alignment offset in logical blocks. */
__u8 alignment_offset;
/* minimum I/O size without performance penalty in logical blocks. */
- __u16 min_io_size;
+ __virtio16 min_io_size;
/* optimal sustained I/O size in logical blocks. */
- __u32 opt_io_size;
+ __virtio32 opt_io_size;
/* writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
__u8 wce;
__u8 unused;
/* number of vqs, only available when VIRTIO_BLK_F_MQ is set */
- __u16 num_queues;
+ __virtio16 num_queues;
/* the next 3 entries are guarded by VIRTIO_BLK_F_DISCARD */
/*
* The maximum discard sectors (in 512-byte sectors) for
* one segment.
*/
- __u32 max_discard_sectors;
+ __virtio32 max_discard_sectors;
/*
* The maximum number of discard segments in a
* discard command.
*/
- __u32 max_discard_seg;
+ __virtio32 max_discard_seg;
/* Discard commands must be aligned to this number of sectors. */
- __u32 discard_sector_alignment;
+ __virtio32 discard_sector_alignment;
/* the next 3 entries are guarded by VIRTIO_BLK_F_WRITE_ZEROES */
/*
* The maximum number of write zeroes sectors (in 512-byte sectors) in
* one segment.
*/
- __u32 max_write_zeroes_sectors;
+ __virtio32 max_write_zeroes_sectors;
/*
* The maximum number of segments in a write zeroes
* command.
*/
- __u32 max_write_zeroes_seg;
+ __virtio32 max_write_zeroes_seg;
/*
* Set if a VIRTIO_BLK_T_WRITE_ZEROES request may result in the
* deallocation of one or more of the sectors.
@@ -121,6 +122,21 @@ struct virtio_blk_config {
__u8 write_zeroes_may_unmap;
__u8 unused1[3];
+
+ /* the next 3 entries are guarded by VIRTIO_BLK_F_SECURE_ERASE */
+ /*
+ * The maximum secure erase sectors (in 512-byte sectors) for
+ * one segment.
+ */
+ __virtio32 max_secure_erase_sectors;
+ /*
+ * The maximum number of secure erase segments in a
+ * secure erase command.
+ */
+ __virtio32 max_secure_erase_seg;
+ /* Secure erase commands must be aligned to this number of sectors. */
+ __virtio32 secure_erase_sector_alignment;
+
} __attribute__((packed));
/*
@@ -155,6 +171,9 @@ struct virtio_blk_config {
/* Write zeroes command */
#define VIRTIO_BLK_T_WRITE_ZEROES 13
+/* Secure erase command */
+#define VIRTIO_BLK_T_SECURE_ERASE 14
+
#ifndef VIRTIO_BLK_NO_LEGACY
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
diff --git a/include/uapi/linux/virtio_bt.h b/include/uapi/linux/virtio_bt.h
new file mode 100644
index 000000000000..a7bd48daa9a9
--- /dev/null
+++ b/include/uapi/linux/virtio_bt.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef _UAPI_LINUX_VIRTIO_BT_H
+#define _UAPI_LINUX_VIRTIO_BT_H
+
+#include <linux/virtio_types.h>
+
+/* Feature bits */
+#define VIRTIO_BT_F_VND_HCI 0 /* Indicates vendor command support */
+#define VIRTIO_BT_F_MSFT_EXT 1 /* Indicates MSFT vendor support */
+#define VIRTIO_BT_F_AOSP_EXT 2 /* Indicates AOSP vendor support */
+
+enum virtio_bt_config_type {
+ VIRTIO_BT_CONFIG_TYPE_PRIMARY = 0,
+ VIRTIO_BT_CONFIG_TYPE_AMP = 1,
+};
+
+enum virtio_bt_config_vendor {
+ VIRTIO_BT_CONFIG_VENDOR_NONE = 0,
+ VIRTIO_BT_CONFIG_VENDOR_ZEPHYR = 1,
+ VIRTIO_BT_CONFIG_VENDOR_INTEL = 2,
+ VIRTIO_BT_CONFIG_VENDOR_REALTEK = 3,
+};
+
+struct virtio_bt_config {
+ __u8 type;
+ __u16 vendor;
+ __u16 msft_opcode;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_VIRTIO_BT_H */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index ff8e7dc9d4dd..3c05162bc988 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -52,7 +52,7 @@
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 38
+#define VIRTIO_TRANSPORT_F_END 41
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -67,18 +67,28 @@
#define VIRTIO_F_VERSION_1 32
/*
- * If clear - device has the IOMMU bypass quirk feature.
- * If set - use platform tools to detect the IOMMU.
+ * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature.
+ * If set - use platform DMA tools to access the memory.
*
* Note the reverse polarity (compared to most other features),
* this is for compatibility with legacy systems.
*/
-#define VIRTIO_F_IOMMU_PLATFORM 33
+#define VIRTIO_F_ACCESS_PLATFORM 33
+#ifndef __KERNEL__
+/* Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) */
+#define VIRTIO_F_IOMMU_PLATFORM VIRTIO_F_ACCESS_PLATFORM
+#endif /* __KERNEL__ */
/* This feature indicates support for the packed virtqueue layout. */
#define VIRTIO_F_RING_PACKED 34
/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER 35
+
+/*
* This feature indicates that memory accesses by the driver and the
* device are ordered in a way described by the platform.
*/
@@ -88,4 +98,9 @@
* Does the device support Single Root I/O Virtualization?
*/
#define VIRTIO_F_SR_IOV 37
+
+/*
+ * This feature indicates that the driver can reset a queue individually.
+ */
+#define VIRTIO_F_RING_RESET 40
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h
index b7fb108c9310..7e6ec2ff0560 100644
--- a/include/uapi/linux/virtio_console.h
+++ b/include/uapi/linux/virtio_console.h
@@ -45,13 +45,13 @@
struct virtio_console_config {
/* colums of the screens */
- __u16 cols;
+ __virtio16 cols;
/* rows of the screens */
- __u16 rows;
+ __virtio16 rows;
/* max. number of ports this device can hold */
- __u32 max_nr_ports;
+ __virtio32 max_nr_ports;
/* emergency write register */
- __u32 emerg_wr;
+ __virtio32 emerg_wr;
} __attribute__((packed));
/*
diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h
index 50cdc8aebfcf..71a54a6849ca 100644
--- a/include/uapi/linux/virtio_crypto.h
+++ b/include/uapi/linux/virtio_crypto.h
@@ -37,6 +37,7 @@
#define VIRTIO_CRYPTO_SERVICE_HASH 1
#define VIRTIO_CRYPTO_SERVICE_MAC 2
#define VIRTIO_CRYPTO_SERVICE_AEAD 3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
__le32 opcode;
__le32 algo;
__le32 flag;
@@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req {
__u8 padding[32];
};
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING 0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+ __le32 padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH 0
+#define VIRTIO_CRYPTO_RSA_MD2 1
+#define VIRTIO_CRYPTO_RSA_MD3 2
+#define VIRTIO_CRYPTO_RSA_MD4 3
+#define VIRTIO_CRYPTO_RSA_MD5 4
+#define VIRTIO_CRYPTO_RSA_SHA1 5
+#define VIRTIO_CRYPTO_RSA_SHA256 6
+#define VIRTIO_CRYPTO_RSA_SHA384 7
+#define VIRTIO_CRYPTO_RSA_SHA512 8
+#define VIRTIO_CRYPTO_RSA_SHA224 9
+ __le32 hash_algo;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN 0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+ __le32 curve_id;
+ __le32 padding;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER 0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA 1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA 2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+ __le32 algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC 1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+ __le32 keytype;
+ __le32 keylen;
+
+ union {
+ struct virtio_crypto_rsa_session_para rsa;
+ struct virtio_crypto_ecdsa_session_para ecdsa;
+ } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+ struct virtio_crypto_akcipher_session_para para;
+ __u8 padding[36];
+};
+
struct virtio_crypto_alg_chain_session_para {
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
@@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req {
mac_create_session;
struct virtio_crypto_aead_create_session_req
aead_create_session;
+ struct virtio_crypto_akcipher_create_session_req
+ akcipher_create_session;
struct virtio_crypto_destroy_session_req
destroy_session;
__u8 padding[56];
@@ -266,6 +325,14 @@ struct virtio_crypto_op_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
#define VIRTIO_CRYPTO_AEAD_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
__le32 opcode;
/* algo should be service-specific algorithms */
__le32 algo;
@@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req {
__u8 padding[32];
};
+struct virtio_crypto_akcipher_para {
+ __le32 src_data_len;
+ __le32 dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+ struct virtio_crypto_akcipher_para para;
+ __u8 padding[40];
+};
+
/* The request of the data virtqueue's packet */
struct virtio_crypto_op_data_req {
struct virtio_crypto_op_header header;
@@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req {
struct virtio_crypto_hash_data_req hash_req;
struct virtio_crypto_mac_data_req mac_req;
struct virtio_crypto_aead_data_req aead_req;
+ struct virtio_crypto_akcipher_data_req akcipher_req;
__u8 padding[48];
} u;
};
@@ -408,39 +486,41 @@ struct virtio_crypto_op_data_req {
#define VIRTIO_CRYPTO_BADMSG 2
#define VIRTIO_CRYPTO_NOTSUPP 3
#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */
+#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
/* The accelerator hardware is ready */
#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
struct virtio_crypto_config {
/* See VIRTIO_CRYPTO_OP_* above */
- __u32 status;
+ __le32 status;
/*
* Maximum number of data queue
*/
- __u32 max_dataqueues;
+ __le32 max_dataqueues;
/*
* Specifies the services mask which the device support,
* see VIRTIO_CRYPTO_SERVICE_* above
*/
- __u32 crypto_services;
+ __le32 crypto_services;
/* Detailed algorithms mask */
- __u32 cipher_algo_l;
- __u32 cipher_algo_h;
- __u32 hash_algo;
- __u32 mac_algo_l;
- __u32 mac_algo_h;
- __u32 aead_algo;
+ __le32 cipher_algo_l;
+ __le32 cipher_algo_h;
+ __le32 hash_algo;
+ __le32 mac_algo_l;
+ __le32 mac_algo_h;
+ __le32 aead_algo;
/* Maximum length of cipher key */
- __u32 max_cipher_key_len;
+ __le32 max_cipher_key_len;
/* Maximum length of authenticated key */
- __u32 max_auth_key_len;
- __u32 reserve;
+ __le32 max_auth_key_len;
+ __le32 akcipher_algo;
/* Maximum size of each crypto request's content */
- __u64 max_size;
+ __le64 max_size;
};
struct virtio_crypto_inhdr {
diff --git a/include/uapi/linux/virtio_fs.h b/include/uapi/linux/virtio_fs.h
index b02eb2ac3d99..bea38291421b 100644
--- a/include/uapi/linux/virtio_fs.h
+++ b/include/uapi/linux/virtio_fs.h
@@ -13,7 +13,10 @@ struct virtio_fs_config {
__u8 tag[36];
/* Number of request queues */
- __u32 num_request_queues;
+ __le32 num_request_queues;
} __attribute__((packed));
+/* For the id field in virtio_pci_shm_cap */
+#define VIRTIO_FS_SHMCAP_ID_CACHE 0
+
#endif /* _UAPI_LINUX_VIRTIO_FS_H */
diff --git a/include/uapi/linux/virtio_gpio.h b/include/uapi/linux/virtio_gpio.h
new file mode 100644
index 000000000000..d4b29d9a39dd
--- /dev/null
+++ b/include/uapi/linux/virtio_gpio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _LINUX_VIRTIO_GPIO_H
+#define _LINUX_VIRTIO_GPIO_H
+
+#include <linux/types.h>
+
+/* Virtio GPIO Feature bits */
+#define VIRTIO_GPIO_F_IRQ 0
+
+/* Virtio GPIO request types */
+#define VIRTIO_GPIO_MSG_GET_NAMES 0x0001
+#define VIRTIO_GPIO_MSG_GET_DIRECTION 0x0002
+#define VIRTIO_GPIO_MSG_SET_DIRECTION 0x0003
+#define VIRTIO_GPIO_MSG_GET_VALUE 0x0004
+#define VIRTIO_GPIO_MSG_SET_VALUE 0x0005
+#define VIRTIO_GPIO_MSG_IRQ_TYPE 0x0006
+
+/* Possible values of the status field */
+#define VIRTIO_GPIO_STATUS_OK 0x0
+#define VIRTIO_GPIO_STATUS_ERR 0x1
+
+/* Direction types */
+#define VIRTIO_GPIO_DIRECTION_NONE 0x00
+#define VIRTIO_GPIO_DIRECTION_OUT 0x01
+#define VIRTIO_GPIO_DIRECTION_IN 0x02
+
+/* Virtio GPIO IRQ types */
+#define VIRTIO_GPIO_IRQ_TYPE_NONE 0x00
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING 0x01
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
+
+struct virtio_gpio_config {
+ __le16 ngpio;
+ __u8 padding[2];
+ __le32 gpio_names_size;
+};
+
+/* Virtio GPIO Request / Response */
+struct virtio_gpio_request {
+ __le16 type;
+ __le16 gpio;
+ __le32 value;
+};
+
+struct virtio_gpio_response {
+ __u8 status;
+ __u8 value;
+};
+
+struct virtio_gpio_response_get_names {
+ __u8 status;
+ __u8 value[];
+};
+
+/* Virtio GPIO IRQ Request / Response */
+struct virtio_gpio_irq_request {
+ __le16 gpio;
+};
+
+struct virtio_gpio_irq_response {
+ __u8 status;
+};
+
+/* Possible values of the interrupt status field */
+#define VIRTIO_GPIO_IRQ_STATUS_INVALID 0x0
+#define VIRTIO_GPIO_IRQ_STATUS_VALID 0x1
+
+#endif /* _LINUX_VIRTIO_GPIO_H */
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 0c85914d9369..f556fde07b76 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -50,6 +50,20 @@
* VIRTIO_GPU_CMD_GET_EDID
*/
#define VIRTIO_GPU_F_EDID 1
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID
+ */
+#define VIRTIO_GPU_F_RESOURCE_UUID 2
+
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
+ */
+#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+/*
+ * VIRTIO_GPU_CMD_CREATE_CONTEXT with
+ * context_init and multiple timelines
+ */
+#define VIRTIO_GPU_F_CONTEXT_INIT 4
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -66,6 +80,9 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
VIRTIO_GPU_CMD_GET_EDID,
+ VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
+ VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
+ VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -76,6 +93,8 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
+ VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
+ VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -87,6 +106,8 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
VIRTIO_GPU_RESP_OK_EDID,
+ VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
+ VIRTIO_GPU_RESP_OK_MAP_INFO,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -97,14 +118,29 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
};
-#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+enum virtio_gpu_shm_id {
+ VIRTIO_GPU_SHM_ID_UNDEFINED = 0,
+ /*
+ * VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB
+ * VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB
+ */
+ VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
+};
+
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+/*
+ * If the following flag is set, then ring_idx contains the index
+ * of the command ring that needs to used when creating the fence
+ */
+#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1)
struct virtio_gpu_ctrl_hdr {
__le32 type;
__le32 flags;
__le64 fence_id;
__le32 ctx_id;
- __le32 padding;
+ __u8 ring_idx;
+ __u8 padding[3];
};
/* data passed in the cursor vq */
@@ -244,10 +280,11 @@ struct virtio_gpu_resource_create_3d {
};
/* VIRTIO_GPU_CMD_CTX_CREATE */
+#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff
struct virtio_gpu_ctx_create {
struct virtio_gpu_ctrl_hdr hdr;
__le32 nlen;
- __le32 padding;
+ __le32 context_init;
char debug_name[64];
};
@@ -320,10 +357,10 @@ struct virtio_gpu_resp_edid {
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
struct virtio_gpu_config {
- __u32 events_read;
- __u32 events_clear;
- __u32 num_scanouts;
- __u32 num_capsets;
+ __le32 events_read;
+ __le32 events_clear;
+ __le32 num_scanouts;
+ __le32 num_capsets;
};
/* simple formats for fbcon/X use */
@@ -340,4 +377,80 @@ enum virtio_gpu_formats {
VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
};
+/* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */
+struct virtio_gpu_resource_assign_uuid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_RESOURCE_UUID */
+struct virtio_gpu_resp_resource_uuid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __u8 uuid[16];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
+struct virtio_gpu_resource_create_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001
+#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob mem */
+ __le32 blob_mem;
+ __le32 blob_flags;
+ __le32 nr_entries;
+ __le64 blob_id;
+ __le64 size;
+ /*
+ * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
+ */
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */
+struct virtio_gpu_set_scanout_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ __le32 scanout_id;
+ __le32 resource_id;
+ __le32 width;
+ __le32 height;
+ __le32 format;
+ __le32 padding;
+ __le32 strides[4];
+ __le32 offsets[4];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */
+struct virtio_gpu_resource_map_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+ __le64 offset;
+};
+
+/* VIRTIO_GPU_RESP_OK_MAP_INFO */
+#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
+#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
+#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
+#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
+#define VIRTIO_GPU_MAP_CACHE_WC 0x03
+struct virtio_gpu_resp_map_info {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __u32 map_info;
+ __u32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */
+struct virtio_gpu_resource_unmap_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+};
+
#endif
diff --git a/include/uapi/linux/virtio_i2c.h b/include/uapi/linux/virtio_i2c.h
new file mode 100644
index 000000000000..acf3b6069136
--- /dev/null
+++ b/include/uapi/linux/virtio_i2c.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
+/*
+ * Definitions for virtio I2C Adpter
+ *
+ * Copyright (c) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _UAPI_LINUX_VIRTIO_I2C_H
+#define _UAPI_LINUX_VIRTIO_I2C_H
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+/* Virtio I2C Feature bits */
+#define VIRTIO_I2C_F_ZERO_LENGTH_REQUEST 0
+
+/* The bit 0 of the @virtio_i2c_out_hdr.@flags, used to group the requests */
+#define VIRTIO_I2C_FLAGS_FAIL_NEXT _BITUL(0)
+
+/* The bit 1 of the @virtio_i2c_out_hdr.@flags, used to mark a buffer as read */
+#define VIRTIO_I2C_FLAGS_M_RD _BITUL(1)
+
+/**
+ * struct virtio_i2c_out_hdr - the virtio I2C message OUT header
+ * @addr: the controlled device address
+ * @padding: used to pad to full dword
+ * @flags: used for feature extensibility
+ */
+struct virtio_i2c_out_hdr {
+ __le16 addr;
+ __le16 padding;
+ __le32 flags;
+};
+
+/**
+ * struct virtio_i2c_in_hdr - the virtio I2C message IN header
+ * @status: the processing result from the backend
+ */
+struct virtio_i2c_in_hdr {
+ __u8 status;
+};
+
+/* The final status written by the device */
+#define VIRTIO_I2C_MSG_OK 0
+#define VIRTIO_I2C_MSG_ERR 1
+
+#endif /* _UAPI_LINUX_VIRTIO_I2C_H */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 585e07b27333..7aa2eb766205 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -29,22 +29,56 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
-#define VIRTIO_ID_NET 1 /* virtio net */
-#define VIRTIO_ID_BLOCK 2 /* virtio block */
-#define VIRTIO_ID_CONSOLE 3 /* virtio console */
-#define VIRTIO_ID_RNG 4 /* virtio rng */
-#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
-#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
-#define VIRTIO_ID_SCSI 8 /* virtio scsi */
-#define VIRTIO_ID_9P 9 /* 9p virtio console */
-#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
-#define VIRTIO_ID_CAIF 12 /* Virtio caif */
-#define VIRTIO_ID_GPU 16 /* virtio GPU */
-#define VIRTIO_ID_INPUT 18 /* virtio input */
-#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
-#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
-#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
-#define VIRTIO_ID_FS 26 /* virtio filesystem */
-#define VIRTIO_ID_PMEM 27 /* virtio pmem */
+#define VIRTIO_ID_NET 1 /* virtio net */
+#define VIRTIO_ID_BLOCK 2 /* virtio block */
+#define VIRTIO_ID_CONSOLE 3 /* virtio console */
+#define VIRTIO_ID_RNG 4 /* virtio rng */
+#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
+#define VIRTIO_ID_IOMEM 6 /* virtio ioMemory */
+#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
+#define VIRTIO_ID_SCSI 8 /* virtio scsi */
+#define VIRTIO_ID_9P 9 /* 9p virtio console */
+#define VIRTIO_ID_MAC80211_WLAN 10 /* virtio WLAN MAC */
+#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
+#define VIRTIO_ID_CAIF 12 /* Virtio caif */
+#define VIRTIO_ID_MEMORY_BALLOON 13 /* virtio memory balloon */
+#define VIRTIO_ID_GPU 16 /* virtio GPU */
+#define VIRTIO_ID_CLOCK 17 /* virtio clock/timer */
+#define VIRTIO_ID_INPUT 18 /* virtio input */
+#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
+#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
+#define VIRTIO_ID_SIGNAL_DIST 21 /* virtio signal distribution device */
+#define VIRTIO_ID_PSTORE 22 /* virtio pstore device */
+#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
+#define VIRTIO_ID_MEM 24 /* virtio mem */
+#define VIRTIO_ID_SOUND 25 /* virtio sound */
+#define VIRTIO_ID_FS 26 /* virtio filesystem */
+#define VIRTIO_ID_PMEM 27 /* virtio pmem */
+#define VIRTIO_ID_RPMB 28 /* virtio rpmb */
+#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_VIDEO_ENCODER 30 /* virtio video encoder */
+#define VIRTIO_ID_VIDEO_DECODER 31 /* virtio video decoder */
+#define VIRTIO_ID_SCMI 32 /* virtio SCMI */
+#define VIRTIO_ID_NITRO_SEC_MOD 33 /* virtio nitro secure module*/
+#define VIRTIO_ID_I2C_ADAPTER 34 /* virtio i2c adapter */
+#define VIRTIO_ID_WATCHDOG 35 /* virtio watchdog */
+#define VIRTIO_ID_CAN 36 /* virtio can */
+#define VIRTIO_ID_DMABUF 37 /* virtio dmabuf */
+#define VIRTIO_ID_PARAM_SERV 38 /* virtio parameter server */
+#define VIRTIO_ID_AUDIO_POLICY 39 /* virtio audio policy */
+#define VIRTIO_ID_BT 40 /* virtio bluetooth */
+#define VIRTIO_ID_GPIO 41 /* virtio gpio */
+
+/*
+ * Virtio Transitional IDs
+ */
+
+#define VIRTIO_TRANS_ID_NET 0x1000 /* transitional virtio net */
+#define VIRTIO_TRANS_ID_BLOCK 0x1001 /* transitional virtio block */
+#define VIRTIO_TRANS_ID_BALLOON 0x1002 /* transitional virtio balloon */
+#define VIRTIO_TRANS_ID_CONSOLE 0x1003 /* transitional virtio console */
+#define VIRTIO_TRANS_ID_SCSI 0x1004 /* transitional virtio SCSI */
+#define VIRTIO_TRANS_ID_RNG 0x1005 /* transitional virtio rng */
+#define VIRTIO_TRANS_ID_9P 0x1009 /* transitional virtio 9p console */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_input.h b/include/uapi/linux/virtio_input.h
index a7fe5c8fb135..52084b1fb965 100644
--- a/include/uapi/linux/virtio_input.h
+++ b/include/uapi/linux/virtio_input.h
@@ -40,18 +40,18 @@ enum virtio_input_config_select {
};
struct virtio_input_absinfo {
- __u32 min;
- __u32 max;
- __u32 fuzz;
- __u32 flat;
- __u32 res;
+ __le32 min;
+ __le32 max;
+ __le32 fuzz;
+ __le32 flat;
+ __le32 res;
};
struct virtio_input_devids {
- __u16 bustype;
- __u16 vendor;
- __u16 product;
- __u16 version;
+ __le16 bustype;
+ __le16 vendor;
+ __le16 product;
+ __le16 version;
};
struct virtio_input_config {
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
index 237e36a280cb..1ff357f0d72e 100644
--- a/include/uapi/linux/virtio_iommu.h
+++ b/include/uapi/linux/virtio_iommu.h
@@ -16,6 +16,7 @@
#define VIRTIO_IOMMU_F_BYPASS 3
#define VIRTIO_IOMMU_F_PROBE 4
#define VIRTIO_IOMMU_F_MMIO 5
+#define VIRTIO_IOMMU_F_BYPASS_CONFIG 6
struct virtio_iommu_range_64 {
__le64 start;
@@ -36,6 +37,8 @@ struct virtio_iommu_config {
struct virtio_iommu_range_32 domain_range;
/* Probe buffer size */
__le32 probe_size;
+ __u8 bypass;
+ __u8 reserved[3];
};
/* Request types */
@@ -66,11 +69,14 @@ struct virtio_iommu_req_tail {
__u8 reserved[3];
};
+#define VIRTIO_IOMMU_ATTACH_F_BYPASS (1 << 0)
+
struct virtio_iommu_req_attach {
struct virtio_iommu_req_head head;
__le32 domain;
__le32 endpoint;
- __u8 reserved[8];
+ __le32 flags;
+ __u8 reserved[4];
struct virtio_iommu_req_tail tail;
};
diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h
new file mode 100644
index 000000000000..e9122f1d0e0c
--- /dev/null
+++ b/include/uapi/linux/virtio_mem.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Virtio Mem Device
+ *
+ * Copyright Red Hat, Inc. 2020
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_VIRTIO_MEM_H
+#define _LINUX_VIRTIO_MEM_H
+
+#include <linux/types.h>
+#include <linux/virtio_types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+/*
+ * Each virtio-mem device manages a dedicated region in physical address
+ * space. Each device can belong to a single NUMA node, multiple devices
+ * for a single NUMA node are possible. A virtio-mem device is like a
+ * "resizable DIMM" consisting of small memory blocks that can be plugged
+ * or unplugged. The device driver is responsible for (un)plugging memory
+ * blocks on demand.
+ *
+ * Virtio-mem devices can only operate on their assigned memory region in
+ * order to (un)plug memory. A device cannot (un)plug memory belonging to
+ * other devices.
+ *
+ * The "region_size" corresponds to the maximum amount of memory that can
+ * be provided by a device. The "size" corresponds to the amount of memory
+ * that is currently plugged. "requested_size" corresponds to a request
+ * from the device to the device driver to (un)plug blocks. The
+ * device driver should try to (un)plug blocks in order to reach the
+ * "requested_size". It is impossible to plug more memory than requested.
+ *
+ * The "usable_region_size" represents the memory region that can actually
+ * be used to (un)plug memory. It is always at least as big as the
+ * "requested_size" and will grow dynamically. It will only shrink when
+ * explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
+ *
+ * There are no guarantees what will happen if unplugged memory is
+ * read/written. In general, unplugged memory should not be touched, because
+ * the resulting action is undefined. There is one exception: without
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, unplugged memory inside the usable
+ * region can be read, to simplify creation of memory dumps.
+ *
+ * It can happen that the device cannot process a request, because it is
+ * busy. The device driver has to retry later.
+ *
+ * Usually, during system resets all memory will get unplugged, so the
+ * device driver can start with a clean state. However, in specific
+ * scenarios (if the device is busy) it can happen that the device still
+ * has memory plugged. The device driver can request to unplug all memory
+ * (VIRTIO_MEM_REQ_UNPLUG) - which might take a while to succeed if the
+ * device is busy.
+ */
+
+/* --- virtio-mem: feature bits --- */
+
+/* node_id is an ACPI PXM and is valid */
+#define VIRTIO_MEM_F_ACPI_PXM 0
+/* unplugged memory must not be accessed */
+#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1
+
+
+/* --- virtio-mem: guest -> host requests --- */
+
+/* request to plug memory blocks */
+#define VIRTIO_MEM_REQ_PLUG 0
+/* request to unplug memory blocks */
+#define VIRTIO_MEM_REQ_UNPLUG 1
+/* request to unplug all blocks and shrink the usable size */
+#define VIRTIO_MEM_REQ_UNPLUG_ALL 2
+/* request information about the plugged state of memory blocks */
+#define VIRTIO_MEM_REQ_STATE 3
+
+struct virtio_mem_req_plug {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req_unplug {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req_state {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req {
+ __virtio16 type;
+ __virtio16 padding[3];
+
+ union {
+ struct virtio_mem_req_plug plug;
+ struct virtio_mem_req_unplug unplug;
+ struct virtio_mem_req_state state;
+ } u;
+};
+
+
+/* --- virtio-mem: host -> guest response --- */
+
+/*
+ * Request processed successfully, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ACK 0
+/*
+ * Request denied - e.g. trying to plug more than requested, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ */
+#define VIRTIO_MEM_RESP_NACK 1
+/*
+ * Request cannot be processed right now, try again later, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ */
+#define VIRTIO_MEM_RESP_BUSY 2
+/*
+ * Error in request (e.g. addresses/alignment), applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ERROR 3
+
+
+/* State of memory blocks is "plugged" */
+#define VIRTIO_MEM_STATE_PLUGGED 0
+/* State of memory blocks is "unplugged" */
+#define VIRTIO_MEM_STATE_UNPLUGGED 1
+/* State of memory blocks is "mixed" */
+#define VIRTIO_MEM_STATE_MIXED 2
+
+struct virtio_mem_resp_state {
+ __virtio16 state;
+};
+
+struct virtio_mem_resp {
+ __virtio16 type;
+ __virtio16 padding[3];
+
+ union {
+ struct virtio_mem_resp_state state;
+ } u;
+};
+
+/* --- virtio-mem: configuration --- */
+
+struct virtio_mem_config {
+ /* Block size and alignment. Cannot change. */
+ __le64 block_size;
+ /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */
+ __le16 node_id;
+ __u8 padding[6];
+ /* Start address of the memory region. Cannot change. */
+ __le64 addr;
+ /* Region size (maximum). Cannot change. */
+ __le64 region_size;
+ /*
+ * Currently usable region size. Can grow up to region_size. Can
+ * shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config
+ * update will be sent).
+ */
+ __le64 usable_region_size;
+ /*
+ * Currently used size. Changes due to plug/unplug requests, but no
+ * config updates will be sent.
+ */
+ __le64 plugged_size;
+ /* Requested size. New plug requests cannot exceed it. Can change. */
+ __le64 requested_size;
+};
+
+#endif /* _LINUX_VIRTIO_MEM_H */
diff --git a/include/uapi/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h
index c4b09689ab64..0650f91bea6c 100644
--- a/include/uapi/linux/virtio_mmio.h
+++ b/include/uapi/linux/virtio_mmio.h
@@ -122,6 +122,17 @@
#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+/* Shared memory region id */
+#define VIRTIO_MMIO_SHM_SEL 0x0ac
+
+/* Shared memory region length, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0
+#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4
+
+/* Shared memory region base address, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8
+#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc
+
/* Configuration atomicity value */
#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index a3715a3224c1..6cb842ea8979 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -56,7 +56,10 @@
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
-
+#define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */
+#define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */
+#define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */
+#define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */
#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device
* with the same MAC.
*/
@@ -69,29 +72,46 @@
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
+/* supported/enabled hash types */
+#define VIRTIO_NET_RSS_HASH_TYPE_IPv4 (1 << 0)
+#define VIRTIO_NET_RSS_HASH_TYPE_TCPv4 (1 << 1)
+#define VIRTIO_NET_RSS_HASH_TYPE_UDPv4 (1 << 2)
+#define VIRTIO_NET_RSS_HASH_TYPE_IPv6 (1 << 3)
+#define VIRTIO_NET_RSS_HASH_TYPE_TCPv6 (1 << 4)
+#define VIRTIO_NET_RSS_HASH_TYPE_UDPv6 (1 << 5)
+#define VIRTIO_NET_RSS_HASH_TYPE_IP_EX (1 << 6)
+#define VIRTIO_NET_RSS_HASH_TYPE_TCP_EX (1 << 7)
+#define VIRTIO_NET_RSS_HASH_TYPE_UDP_EX (1 << 8)
+
struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
__u8 mac[ETH_ALEN];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
- __u16 status;
+ __virtio16 status;
/* Maximum number of each of transmit and receive queues;
* see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ.
* Legal values are between 1 and 0x8000
*/
- __u16 max_virtqueue_pairs;
+ __virtio16 max_virtqueue_pairs;
/* Default maximum transmit unit advice */
- __u16 mtu;
+ __virtio16 mtu;
/*
* speed, in units of 1Mb. All values 0 to INT_MAX are legal.
* Any other value stands for unknown.
*/
- __u32 speed;
+ __le32 speed;
/*
* 0x00 - half duplex
* 0x01 - full duplex
* Any other value stands for unknown.
*/
__u8 duplex;
+ /* maximum size of RSS key */
+ __u8 rss_max_key_size;
+ /* maximum number of indirection table entries */
+ __le16 rss_max_indirection_table_length;
+ /* bitmask of supported VIRTIO_NET_RSS_HASH_ types */
+ __le32 supported_hash_types;
} __attribute__((packed));
/*
@@ -104,6 +124,7 @@ struct virtio_net_config {
struct virtio_net_hdr_v1 {
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
+#define VIRTIO_NET_HDR_F_RSC_INFO 4 /* rsc info in csum_ fields */
__u8 flags;
#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
@@ -113,11 +134,46 @@ struct virtio_net_hdr_v1 {
__u8 gso_type;
__virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
__virtio16 gso_size; /* Bytes to append to hdr_len per frame */
- __virtio16 csum_start; /* Position to start checksumming from */
- __virtio16 csum_offset; /* Offset after that to place checksum */
+ union {
+ struct {
+ __virtio16 csum_start;
+ __virtio16 csum_offset;
+ };
+ /* Checksum calculation */
+ struct {
+ /* Position to start checksumming from */
+ __virtio16 start;
+ /* Offset after that to place checksum */
+ __virtio16 offset;
+ } csum;
+ /* Receive Segment Coalescing */
+ struct {
+ /* Number of coalesced segments */
+ __le16 segments;
+ /* Number of duplicated acks */
+ __le16 dup_acks;
+ } rsc;
+ };
__virtio16 num_buffers; /* Number of merged rx buffers */
};
+struct virtio_net_hdr_v1_hash {
+ struct virtio_net_hdr_v1 hdr;
+ __le32 hash_value;
+#define VIRTIO_NET_HASH_REPORT_NONE 0
+#define VIRTIO_NET_HASH_REPORT_IPv4 1
+#define VIRTIO_NET_HASH_REPORT_TCPv4 2
+#define VIRTIO_NET_HASH_REPORT_UDPv4 3
+#define VIRTIO_NET_HASH_REPORT_IPv6 4
+#define VIRTIO_NET_HASH_REPORT_TCPv6 5
+#define VIRTIO_NET_HASH_REPORT_UDPv6 6
+#define VIRTIO_NET_HASH_REPORT_IPv6_EX 7
+#define VIRTIO_NET_HASH_REPORT_TCPv6_EX 8
+#define VIRTIO_NET_HASH_REPORT_UDPv6_EX 9
+ __le16 hash_report;
+ __le16 padding;
+};
+
#ifndef VIRTIO_NET_NO_LEGACY
/* This header comes first in the scatter-gather list.
* For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
@@ -228,7 +284,9 @@ struct virtio_net_ctrl_mac {
/*
* Control Receive Flow Steering
- *
+ */
+#define VIRTIO_NET_CTRL_MQ 4
+/*
* The command VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
* enables Receive Flow Steering, specifying the number of the transmit and
* receive queues that will be used. After the command is consumed and acked by
@@ -241,12 +299,48 @@ struct virtio_net_ctrl_mq {
__virtio16 virtqueue_pairs;
};
-#define VIRTIO_NET_CTRL_MQ 4
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
/*
+ * The command VIRTIO_NET_CTRL_MQ_RSS_CONFIG has the same effect as
+ * VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET does and additionally configures
+ * the receive steering to use a hash calculated for incoming packet
+ * to decide on receive virtqueue to place the packet. The command
+ * also provides parameters to calculate a hash and receive virtqueue.
+ */
+struct virtio_net_rss_config {
+ __le32 hash_types;
+ __le16 indirection_table_mask;
+ __le16 unclassified_queue;
+ __le16 indirection_table[1/* + indirection_table_mask */];
+ __le16 max_tx_vq;
+ __u8 hash_key_length;
+ __u8 hash_key_data[/* hash_key_length */];
+};
+
+ #define VIRTIO_NET_CTRL_MQ_RSS_CONFIG 1
+
+/*
+ * The command VIRTIO_NET_CTRL_MQ_HASH_CONFIG requests the device
+ * to include in the virtio header of the packet the value of the
+ * calculated hash and the report type of hash. It also provides
+ * parameters for hash calculation. The command requires feature
+ * VIRTIO_NET_F_HASH_REPORT to be negotiated to extend the
+ * layout of virtio header as defined in virtio_net_hdr_v1_hash.
+ */
+struct virtio_net_hash_config {
+ __le32 hash_types;
+ /* for compatibility with virtio_net_rss_config */
+ __le16 reserved[4];
+ __u8 hash_key_length;
+ __u8 hash_key_data[/* hash_key_length */];
+};
+
+ #define VIRTIO_NET_CTRL_MQ_HASH_CONFIG 2
+
+/*
* Control network offloads
*
* Reconfigures the network offloads that Guest can handle.
@@ -261,4 +355,36 @@ struct virtio_net_ctrl_mq {
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
+/*
+ * Control notifications coalescing.
+ *
+ * Request the device to change the notifications coalescing parameters.
+ *
+ * Available with the VIRTIO_NET_F_NOTF_COAL feature bit.
+ */
+#define VIRTIO_NET_CTRL_NOTF_COAL 6
+/*
+ * Set the tx-usecs/tx-max-packets parameters.
+ */
+struct virtio_net_ctrl_coal_tx {
+ /* Maximum number of packets to send before a TX notification */
+ __le32 tx_max_packets;
+ /* Maximum number of usecs to delay a TX notification */
+ __le32 tx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_TX_SET 0
+
+/*
+ * Set the rx-usecs/rx-max-packets parameters.
+ */
+struct virtio_net_ctrl_coal_rx {
+ /* Maximum number of packets to receive before a RX notification */
+ __le32 rx_max_packets;
+ /* Maximum number of usecs to delay a RX notification */
+ __le32 rx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET 1
+
#endif /* _UAPI_LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 90007a1abcab..f703afc7ad31 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -113,6 +113,8 @@
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
/* PCI configuration access */
#define VIRTIO_PCI_CAP_PCI_CFG 5
+/* Additional shared memory capability */
+#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
/* This is the PCI capability header: */
struct virtio_pci_cap {
@@ -121,11 +123,18 @@ struct virtio_pci_cap {
__u8 cap_len; /* Generic PCI field: capability length */
__u8 cfg_type; /* Identifies the structure. */
__u8 bar; /* Where to find it. */
- __u8 padding[3]; /* Pad to full dword. */
+ __u8 id; /* Multiple capabilities of the same type */
+ __u8 padding[2]; /* Pad to full dword. */
__le32 offset; /* Offset within bar. */
__le32 length; /* Length of the structure, in bytes. */
};
+struct virtio_pci_cap64 {
+ struct virtio_pci_cap cap;
+ __le32 offset_hi; /* Most sig 32 bits of offset */
+ __le32 length_hi; /* Most sig 32 bits of length */
+};
+
struct virtio_pci_notify_cap {
struct virtio_pci_cap cap;
__le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
@@ -193,6 +202,8 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
#define VIRTIO_PCI_COMMON_Q_USEDLO 48
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
+#define VIRTIO_PCI_COMMON_Q_NDATA 56
+#define VIRTIO_PCI_COMMON_Q_RESET 58
#endif /* VIRTIO_PCI_NO_MODERN */
diff --git a/include/uapi/linux/virtio_pcidev.h b/include/uapi/linux/virtio_pcidev.h
new file mode 100644
index 000000000000..668b07ce515b
--- /dev/null
+++ b/include/uapi/linux/virtio_pcidev.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#ifndef _UAPI_LINUX_VIRTIO_PCIDEV_H
+#define _UAPI_LINUX_VIRTIO_PCIDEV_H
+#include <linux/types.h>
+
+/**
+ * enum virtio_pcidev_ops - virtual PCI device operations
+ * @VIRTIO_PCIDEV_OP_RESERVED: reserved to catch errors
+ * @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_READ: read BAR mem/pio, size can be variable;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_WRITE: write BAR mem/pio, size can be variable;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but
+ * the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE)
+ * @VIRTIO_PCIDEV_OP_INT: legacy INTx# pin interrupt, the addr field is 1-4 for
+ * the number
+ * @VIRTIO_PCIDEV_OP_MSI: MSI(-X) interrupt, this message basically transports
+ * the 16- or 32-bit write that would otherwise be done into memory,
+ * analogous to the write messages (@VIRTIO_PCIDEV_OP_MMIO_WRITE) above
+ * @VIRTIO_PCIDEV_OP_PME: Dummy message whose content is ignored (and should be
+ * all zeroes) to signal the PME# pin.
+ */
+enum virtio_pcidev_ops {
+ VIRTIO_PCIDEV_OP_RESERVED = 0,
+ VIRTIO_PCIDEV_OP_CFG_READ,
+ VIRTIO_PCIDEV_OP_CFG_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_READ,
+ VIRTIO_PCIDEV_OP_MMIO_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_MEMSET,
+ VIRTIO_PCIDEV_OP_INT,
+ VIRTIO_PCIDEV_OP_MSI,
+ VIRTIO_PCIDEV_OP_PME,
+};
+
+/**
+ * struct virtio_pcidev_msg - virtio PCI device operation
+ * @op: the operation to do
+ * @bar: the bar (only with BAR read/write messages)
+ * @reserved: reserved
+ * @size: the size of the read/write (in bytes)
+ * @addr: the address to read/write
+ * @data: the data, normally @size long, but just one byte for
+ * %VIRTIO_PCIDEV_OP_MMIO_MEMSET
+ *
+ * Note: the fields are all in native (CPU) endian, however, the
+ * @data values will often be in little endian (see the ops above.)
+ */
+struct virtio_pcidev_msg {
+ __u8 op;
+ __u8 bar;
+ __u16 reserved;
+ __u32 size;
+ __u64 addr;
+ __u8 data[];
+};
+
+#endif /* _UAPI_LINUX_VIRTIO_PCIDEV_H */
diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h
index b022787ffb94..d676b3620383 100644
--- a/include/uapi/linux/virtio_pmem.h
+++ b/include/uapi/linux/virtio_pmem.h
@@ -15,8 +15,8 @@
#include <linux/virtio_config.h>
struct virtio_pmem_config {
- __u64 start;
- __u64 size;
+ __le64 start;
+ __le64 size;
};
#define VIRTIO_PMEM_REQ_TYPE_FLUSH 0
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 559f42e73315..f8c20d3de8da 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -86,15 +86,28 @@
* at the end of the used ring. Guest should ignore the used->flags field. */
#define VIRTIO_RING_F_EVENT_IDX 29
-/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
+/**
+ * struct vring_desc - Virtio ring descriptors,
+ * 16 bytes long. These can chain together via @next.
+ *
+ * @addr: buffer address (guest-physical)
+ * @len: buffer length
+ * @flags: descriptor flags
+ * @next: index of the next descriptor in the chain,
+ * if the VRING_DESC_F_NEXT flag is set. We chain unused
+ * descriptors via this, too.
+ */
struct vring_desc {
- /* Address (guest-physical). */
__virtio64 addr;
- /* Length. */
__virtio32 len;
- /* The flags as indicated above. */
__virtio16 flags;
- /* We chain unused descriptors via this, too */
__virtio16 next;
};
@@ -112,28 +125,47 @@ struct vring_used_elem {
__virtio32 len;
};
+typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+ vring_used_elem_t;
+
struct vring_used {
__virtio16 flags;
__virtio16 idx;
- struct vring_used_elem ring[];
+ vring_used_elem_t ring[];
};
+/*
+ * The ring element addresses are passed between components with different
+ * alignments assumptions. Thus, we might need to decrease the compiler-selected
+ * alignment, and so must use a typedef to make sure the aligned attribute
+ * actually takes hold:
+ *
+ * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
+ *
+ * When used on a struct, or struct member, the aligned attribute can only
+ * increase the alignment; in order to decrease it, the packed attribute must
+ * be specified as well. When used as part of a typedef, the aligned attribute
+ * can both increase and decrease alignment, and specifying the packed
+ * attribute generates a warning.
+ */
+typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
+ vring_desc_t;
+typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
+ vring_avail_t;
+typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+ vring_used_t;
+
struct vring {
unsigned int num;
- struct vring_desc *desc;
+ vring_desc_t *desc;
- struct vring_avail *avail;
+ vring_avail_t *avail;
- struct vring_used *used;
+ vring_used_t *used;
};
-/* Alignment requirements for vring elements.
- * When using pre-virtio 1.0 layout, these fall out naturally.
- */
-#define VRING_AVAIL_ALIGN_SIZE 2
-#define VRING_USED_ALIGN_SIZE 4
-#define VRING_DESC_ALIGN_SIZE 16
+#ifndef VIRTIO_RING_NO_LEGACY
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
@@ -181,6 +213,8 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
}
+#endif /* VIRTIO_RING_NO_LEGACY */
+
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
diff --git a/include/uapi/linux/virtio_scmi.h b/include/uapi/linux/virtio_scmi.h
new file mode 100644
index 000000000000..f8ddd04a3ace
--- /dev/null
+++ b/include/uapi/linux/virtio_scmi.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2020-2021 OpenSynergy GmbH
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _UAPI_LINUX_VIRTIO_SCMI_H
+#define _UAPI_LINUX_VIRTIO_SCMI_H
+
+#include <linux/virtio_types.h>
+
+/* Device implements some SCMI notifications, or delayed responses. */
+#define VIRTIO_SCMI_F_P2A_CHANNELS 0
+
+/* Device implements any SCMI statistics shared memory region */
+#define VIRTIO_SCMI_F_SHARED_MEMORY 1
+
+/* Virtqueues */
+
+#define VIRTIO_SCMI_VQ_TX 0 /* cmdq */
+#define VIRTIO_SCMI_VQ_RX 1 /* eventq */
+#define VIRTIO_SCMI_VQ_MAX_CNT 2
+
+#endif /* _UAPI_LINUX_VIRTIO_SCMI_H */
diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h
index cc18ef8825c0..0abaae4027c0 100644
--- a/include/uapi/linux/virtio_scsi.h
+++ b/include/uapi/linux/virtio_scsi.h
@@ -103,16 +103,16 @@ struct virtio_scsi_event {
} __attribute__((packed));
struct virtio_scsi_config {
- __u32 num_queues;
- __u32 seg_max;
- __u32 max_sectors;
- __u32 cmd_per_lun;
- __u32 event_info_size;
- __u32 sense_size;
- __u32 cdb_size;
- __u16 max_channel;
- __u16 max_target;
- __u32 max_lun;
+ __virtio32 num_queues;
+ __virtio32 seg_max;
+ __virtio32 max_sectors;
+ __virtio32 cmd_per_lun;
+ __virtio32 event_info_size;
+ __virtio32 sense_size;
+ __virtio32 cdb_size;
+ __virtio16 max_channel;
+ __virtio16 max_target;
+ __virtio32 max_lun;
} __attribute__((packed));
/* Feature Bits */
diff --git a/include/uapi/linux/virtio_snd.h b/include/uapi/linux/virtio_snd.h
new file mode 100644
index 000000000000..dfe49547a7b0
--- /dev/null
+++ b/include/uapi/linux/virtio_snd.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2021 OpenSynergy GmbH
+ */
+#ifndef VIRTIO_SND_IF_H
+#define VIRTIO_SND_IF_H
+
+#include <linux/virtio_types.h>
+
+/*******************************************************************************
+ * CONFIGURATION SPACE
+ */
+struct virtio_snd_config {
+ /* # of available physical jacks */
+ __le32 jacks;
+ /* # of available PCM streams */
+ __le32 streams;
+ /* # of available channel maps */
+ __le32 chmaps;
+};
+
+enum {
+ /* device virtqueue indexes */
+ VIRTIO_SND_VQ_CONTROL = 0,
+ VIRTIO_SND_VQ_EVENT,
+ VIRTIO_SND_VQ_TX,
+ VIRTIO_SND_VQ_RX,
+ /* # of device virtqueues */
+ VIRTIO_SND_VQ_MAX
+};
+
+/*******************************************************************************
+ * COMMON DEFINITIONS
+ */
+
+/* supported dataflow directions */
+enum {
+ VIRTIO_SND_D_OUTPUT = 0,
+ VIRTIO_SND_D_INPUT
+};
+
+enum {
+ /* jack control request types */
+ VIRTIO_SND_R_JACK_INFO = 1,
+ VIRTIO_SND_R_JACK_REMAP,
+
+ /* PCM control request types */
+ VIRTIO_SND_R_PCM_INFO = 0x0100,
+ VIRTIO_SND_R_PCM_SET_PARAMS,
+ VIRTIO_SND_R_PCM_PREPARE,
+ VIRTIO_SND_R_PCM_RELEASE,
+ VIRTIO_SND_R_PCM_START,
+ VIRTIO_SND_R_PCM_STOP,
+
+ /* channel map control request types */
+ VIRTIO_SND_R_CHMAP_INFO = 0x0200,
+
+ /* jack event types */
+ VIRTIO_SND_EVT_JACK_CONNECTED = 0x1000,
+ VIRTIO_SND_EVT_JACK_DISCONNECTED,
+
+ /* PCM event types */
+ VIRTIO_SND_EVT_PCM_PERIOD_ELAPSED = 0x1100,
+ VIRTIO_SND_EVT_PCM_XRUN,
+
+ /* common status codes */
+ VIRTIO_SND_S_OK = 0x8000,
+ VIRTIO_SND_S_BAD_MSG,
+ VIRTIO_SND_S_NOT_SUPP,
+ VIRTIO_SND_S_IO_ERR
+};
+
+/* common header */
+struct virtio_snd_hdr {
+ __le32 code;
+};
+
+/* event notification */
+struct virtio_snd_event {
+ /* VIRTIO_SND_EVT_XXX */
+ struct virtio_snd_hdr hdr;
+ /* optional event data */
+ __le32 data;
+};
+
+/* common control request to query an item information */
+struct virtio_snd_query_info {
+ /* VIRTIO_SND_R_XXX_INFO */
+ struct virtio_snd_hdr hdr;
+ /* item start identifier */
+ __le32 start_id;
+ /* item count to query */
+ __le32 count;
+ /* item information size in bytes */
+ __le32 size;
+};
+
+/* common item information header */
+struct virtio_snd_info {
+ /* function group node id (High Definition Audio Specification 7.1.2) */
+ __le32 hda_fn_nid;
+};
+
+/*******************************************************************************
+ * JACK CONTROL MESSAGES
+ */
+struct virtio_snd_jack_hdr {
+ /* VIRTIO_SND_R_JACK_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::jacks - 1 */
+ __le32 jack_id;
+};
+
+/* supported jack features */
+enum {
+ VIRTIO_SND_JACK_F_REMAP = 0
+};
+
+struct virtio_snd_jack_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_JACK_F_XXX) */
+ __le32 features;
+ /* pin configuration (High Definition Audio Specification 7.3.3.31) */
+ __le32 hda_reg_defconf;
+ /* pin capabilities (High Definition Audio Specification 7.3.4.9) */
+ __le32 hda_reg_caps;
+ /* current jack connection status (0: disconnected, 1: connected) */
+ __u8 connected;
+
+ __u8 padding[7];
+};
+
+/* jack remapping control request */
+struct virtio_snd_jack_remap {
+ /* .code = VIRTIO_SND_R_JACK_REMAP */
+ struct virtio_snd_jack_hdr hdr;
+ /* selected association number */
+ __le32 association;
+ /* selected sequence number */
+ __le32 sequence;
+};
+
+/*******************************************************************************
+ * PCM CONTROL MESSAGES
+ */
+struct virtio_snd_pcm_hdr {
+ /* VIRTIO_SND_R_PCM_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::streams - 1 */
+ __le32 stream_id;
+};
+
+/* supported PCM stream features */
+enum {
+ VIRTIO_SND_PCM_F_SHMEM_HOST = 0,
+ VIRTIO_SND_PCM_F_SHMEM_GUEST,
+ VIRTIO_SND_PCM_F_MSG_POLLING,
+ VIRTIO_SND_PCM_F_EVT_SHMEM_PERIODS,
+ VIRTIO_SND_PCM_F_EVT_XRUNS
+};
+
+/* supported PCM sample formats */
+enum {
+ /* analog formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_IMA_ADPCM = 0, /* 4 / 4 bits */
+ VIRTIO_SND_PCM_FMT_MU_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_A_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_S18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT64, /* 64 / 64 bits */
+ /* digital formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_DSD_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_IEC958_SUBFRAME /* 32 / 32 bits */
+};
+
+/* supported PCM frame rates */
+enum {
+ VIRTIO_SND_PCM_RATE_5512 = 0,
+ VIRTIO_SND_PCM_RATE_8000,
+ VIRTIO_SND_PCM_RATE_11025,
+ VIRTIO_SND_PCM_RATE_16000,
+ VIRTIO_SND_PCM_RATE_22050,
+ VIRTIO_SND_PCM_RATE_32000,
+ VIRTIO_SND_PCM_RATE_44100,
+ VIRTIO_SND_PCM_RATE_48000,
+ VIRTIO_SND_PCM_RATE_64000,
+ VIRTIO_SND_PCM_RATE_88200,
+ VIRTIO_SND_PCM_RATE_96000,
+ VIRTIO_SND_PCM_RATE_176400,
+ VIRTIO_SND_PCM_RATE_192000,
+ VIRTIO_SND_PCM_RATE_384000
+};
+
+struct virtio_snd_pcm_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ __le32 features;
+ /* supported sample format bit map (1 << VIRTIO_SND_PCM_FMT_XXX) */
+ __le64 formats;
+ /* supported frame rate bit map (1 << VIRTIO_SND_PCM_RATE_XXX) */
+ __le64 rates;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ __u8 direction;
+ /* minimum # of supported channels */
+ __u8 channels_min;
+ /* maximum # of supported channels */
+ __u8 channels_max;
+
+ __u8 padding[5];
+};
+
+/* set PCM stream format */
+struct virtio_snd_pcm_set_params {
+ /* .code = VIRTIO_SND_R_PCM_SET_PARAMS */
+ struct virtio_snd_pcm_hdr hdr;
+ /* size of the hardware buffer */
+ __le32 buffer_bytes;
+ /* size of the hardware period */
+ __le32 period_bytes;
+ /* selected feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ __le32 features;
+ /* selected # of channels */
+ __u8 channels;
+ /* selected sample format (VIRTIO_SND_PCM_FMT_XXX) */
+ __u8 format;
+ /* selected frame rate (VIRTIO_SND_PCM_RATE_XXX) */
+ __u8 rate;
+
+ __u8 padding;
+};
+
+/*******************************************************************************
+ * PCM I/O MESSAGES
+ */
+
+/* I/O request header */
+struct virtio_snd_pcm_xfer {
+ /* 0 ... virtio_snd_config::streams - 1 */
+ __le32 stream_id;
+};
+
+/* I/O request status */
+struct virtio_snd_pcm_status {
+ /* VIRTIO_SND_S_XXX */
+ __le32 status;
+ /* current device latency */
+ __le32 latency_bytes;
+};
+
+/*******************************************************************************
+ * CHANNEL MAP CONTROL MESSAGES
+ */
+struct virtio_snd_chmap_hdr {
+ /* VIRTIO_SND_R_CHMAP_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::chmaps - 1 */
+ __le32 chmap_id;
+};
+
+/* standard channel position definition */
+enum {
+ VIRTIO_SND_CHMAP_NONE = 0, /* undefined */
+ VIRTIO_SND_CHMAP_NA, /* silent */
+ VIRTIO_SND_CHMAP_MONO, /* mono stream */
+ VIRTIO_SND_CHMAP_FL, /* front left */
+ VIRTIO_SND_CHMAP_FR, /* front right */
+ VIRTIO_SND_CHMAP_RL, /* rear left */
+ VIRTIO_SND_CHMAP_RR, /* rear right */
+ VIRTIO_SND_CHMAP_FC, /* front center */
+ VIRTIO_SND_CHMAP_LFE, /* low frequency (LFE) */
+ VIRTIO_SND_CHMAP_SL, /* side left */
+ VIRTIO_SND_CHMAP_SR, /* side right */
+ VIRTIO_SND_CHMAP_RC, /* rear center */
+ VIRTIO_SND_CHMAP_FLC, /* front left center */
+ VIRTIO_SND_CHMAP_FRC, /* front right center */
+ VIRTIO_SND_CHMAP_RLC, /* rear left center */
+ VIRTIO_SND_CHMAP_RRC, /* rear right center */
+ VIRTIO_SND_CHMAP_FLW, /* front left wide */
+ VIRTIO_SND_CHMAP_FRW, /* front right wide */
+ VIRTIO_SND_CHMAP_FLH, /* front left high */
+ VIRTIO_SND_CHMAP_FCH, /* front center high */
+ VIRTIO_SND_CHMAP_FRH, /* front right high */
+ VIRTIO_SND_CHMAP_TC, /* top center */
+ VIRTIO_SND_CHMAP_TFL, /* top front left */
+ VIRTIO_SND_CHMAP_TFR, /* top front right */
+ VIRTIO_SND_CHMAP_TFC, /* top front center */
+ VIRTIO_SND_CHMAP_TRL, /* top rear left */
+ VIRTIO_SND_CHMAP_TRR, /* top rear right */
+ VIRTIO_SND_CHMAP_TRC, /* top rear center */
+ VIRTIO_SND_CHMAP_TFLC, /* top front left center */
+ VIRTIO_SND_CHMAP_TFRC, /* top front right center */
+ VIRTIO_SND_CHMAP_TSL, /* top side left */
+ VIRTIO_SND_CHMAP_TSR, /* top side right */
+ VIRTIO_SND_CHMAP_LLFE, /* left LFE */
+ VIRTIO_SND_CHMAP_RLFE, /* right LFE */
+ VIRTIO_SND_CHMAP_BC, /* bottom center */
+ VIRTIO_SND_CHMAP_BLC, /* bottom left center */
+ VIRTIO_SND_CHMAP_BRC /* bottom right center */
+};
+
+/* maximum possible number of channels */
+#define VIRTIO_SND_CHMAP_MAX_SIZE 18
+
+struct virtio_snd_chmap_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ __u8 direction;
+ /* # of valid channel position values */
+ __u8 channels;
+ /* channel position values (VIRTIO_SND_CHMAP_XXX) */
+ __u8 positions[VIRTIO_SND_CHMAP_MAX_SIZE];
+};
+
+#endif /* VIRTIO_SND_IF_H */
diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h
index 1d57ed3d84d2..64738838bee5 100644
--- a/include/uapi/linux/virtio_vsock.h
+++ b/include/uapi/linux/virtio_vsock.h
@@ -38,6 +38,9 @@
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+/* The feature bitmap for virtio vsock */
+#define VIRTIO_VSOCK_F_SEQPACKET 1 /* SOCK_SEQPACKET supported */
+
struct virtio_vsock_config {
__le64 guest_cid;
} __attribute__((packed));
@@ -65,6 +68,7 @@ struct virtio_vsock_hdr {
enum virtio_vsock_type {
VIRTIO_VSOCK_TYPE_STREAM = 1,
+ VIRTIO_VSOCK_TYPE_SEQPACKET = 2,
};
enum virtio_vsock_op {
@@ -91,4 +95,10 @@ enum virtio_vsock_shutdown {
VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
};
+/* VIRTIO_VSOCK_OP_RW flags values */
+enum virtio_vsock_rw {
+ VIRTIO_VSOCK_SEQ_EOM = 1,
+ VIRTIO_VSOCK_SEQ_EOR = 2,
+};
+
#endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */
diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
index fd0ed7221645..c60ca33eac59 100644
--- a/include/uapi/linux/vm_sockets.h
+++ b/include/uapi/linux/vm_sockets.h
@@ -18,6 +18,7 @@
#define _UAPI_VM_SOCKETS_H
#include <linux/socket.h>
+#include <linux/types.h>
/* Option name for STREAM socket buffer size. Use as the option name in
* setsockopt(3) or getsockopt(3) to set or get an unsigned long long that
@@ -63,7 +64,7 @@
* timeout for a STREAM socket.
*/
-#define SO_VM_SOCKETS_CONNECT_TIMEOUT 6
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD 6
/* Option name for using non-blocking send/receive. Use as the option name
* for setsockopt(3) or getsockopt(3) to set or get the non-blocking
@@ -80,6 +81,17 @@
#define SO_VM_SOCKETS_NONBLOCK_TXRX 7
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW 8
+
+#if !defined(__KERNEL__)
+#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD
+#else
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT \
+ (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD : SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW)
+#endif
+#endif
+
/* The vSocket equivalent of INADDR_ANY. This works for the svm_cid field of
* sockaddr_vm and indicates the context ID of the current endpoint.
*/
@@ -114,6 +126,26 @@
#define VMADDR_CID_HOST 2
+/* The current default use case for the vsock channel is the following:
+ * local vsock communication between guest and host and nested VMs setup.
+ * In addition to this, implicitly, the vsock packets are forwarded to the host
+ * if no host->guest vsock transport is set.
+ *
+ * Set this flag value in the sockaddr_vm corresponding field if the vsock
+ * packets need to be always forwarded to the host. Using this behavior,
+ * vsock communication between sibling VMs can be setup.
+ *
+ * This way can explicitly distinguish between vsock channels created for
+ * different use cases, such as nested VMs (or local communication between
+ * guest and host) and sibling VMs.
+ *
+ * The flag can be set in the connect logic in the user space application flow.
+ * In the listen logic (from kernel space) the flag is set on the remote peer
+ * address. This happens for an incoming connection when it is routed from the
+ * host and comes from the guest (local CID and remote CID > VMADDR_CID_HOST).
+ */
+#define VMADDR_FLAG_TO_HOST 0x01
+
/* Invalid vSockets version. */
#define VM_SOCKETS_INVALID_VERSION -1U
@@ -148,10 +180,13 @@ struct sockaddr_vm {
unsigned short svm_reserved1;
unsigned int svm_port;
unsigned int svm_cid;
+ __u8 svm_flags;
unsigned char svm_zero[sizeof(struct sockaddr) -
sizeof(sa_family_t) -
sizeof(unsigned short) -
- sizeof(unsigned int) - sizeof(unsigned int)];
+ sizeof(unsigned int) -
+ sizeof(unsigned int) -
+ sizeof(__u8)];
};
#define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
diff --git a/include/uapi/linux/watch_queue.h b/include/uapi/linux/watch_queue.h
new file mode 100644
index 000000000000..c3d8320b5d3a
--- /dev/null
+++ b/include/uapi/linux/watch_queue.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_WATCH_QUEUE_H
+#define _UAPI_LINUX_WATCH_QUEUE_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/ioctl.h>
+
+#define O_NOTIFICATION_PIPE O_EXCL /* Parameter to pipe2() selecting notification pipe */
+
+#define IOC_WATCH_QUEUE_SET_SIZE _IO('W', 0x60) /* Set the size in pages */
+#define IOC_WATCH_QUEUE_SET_FILTER _IO('W', 0x61) /* Set the filter */
+
+enum watch_notification_type {
+ WATCH_TYPE_META = 0, /* Special record */
+ WATCH_TYPE_KEY_NOTIFY = 1, /* Key change event notification */
+ WATCH_TYPE__NR = 2
+};
+
+enum watch_meta_notification_subtype {
+ WATCH_META_REMOVAL_NOTIFICATION = 0, /* Watched object was removed */
+ WATCH_META_LOSS_NOTIFICATION = 1, /* Data loss occurred */
+};
+
+/*
+ * Notification record header. This is aligned to 64-bits so that subclasses
+ * can contain __u64 fields.
+ */
+struct watch_notification {
+ __u32 type:24; /* enum watch_notification_type */
+ __u32 subtype:8; /* Type-specific subtype (filterable) */
+ __u32 info;
+#define WATCH_INFO_LENGTH 0x0000007f /* Length of record */
+#define WATCH_INFO_LENGTH__SHIFT 0
+#define WATCH_INFO_ID 0x0000ff00 /* ID of watchpoint */
+#define WATCH_INFO_ID__SHIFT 8
+#define WATCH_INFO_TYPE_INFO 0xffff0000 /* Type-specific info */
+#define WATCH_INFO_TYPE_INFO__SHIFT 16
+#define WATCH_INFO_FLAG_0 0x00010000 /* Type-specific info, flag bit 0 */
+#define WATCH_INFO_FLAG_1 0x00020000 /* ... */
+#define WATCH_INFO_FLAG_2 0x00040000
+#define WATCH_INFO_FLAG_3 0x00080000
+#define WATCH_INFO_FLAG_4 0x00100000
+#define WATCH_INFO_FLAG_5 0x00200000
+#define WATCH_INFO_FLAG_6 0x00400000
+#define WATCH_INFO_FLAG_7 0x00800000
+};
+
+/*
+ * Notification filtering rules (IOC_WATCH_QUEUE_SET_FILTER).
+ */
+struct watch_notification_type_filter {
+ __u32 type; /* Type to apply filter to */
+ __u32 info_filter; /* Filter on watch_notification::info */
+ __u32 info_mask; /* Mask of relevant bits in info_filter */
+ __u32 subtype_filter[8]; /* Bitmask of subtypes to filter on */
+};
+
+struct watch_notification_filter {
+ __u32 nr_filters; /* Number of filters */
+ __u32 __reserved; /* Must be 0 */
+ struct watch_notification_type_filter filters[];
+};
+
+
+/*
+ * Extended watch removal notification. This is used optionally if the type
+ * wants to indicate an identifier for the object being watched, if there is
+ * such. This can be distinguished by the length.
+ *
+ * type -> WATCH_TYPE_META
+ * subtype -> WATCH_META_REMOVAL_NOTIFICATION
+ */
+struct watch_notification_removal {
+ struct watch_notification watch;
+ __u64 id; /* Type-dependent identifier */
+};
+
+/*
+ * Type of key/keyring change notification.
+ */
+enum key_notification_subtype {
+ NOTIFY_KEY_INSTANTIATED = 0, /* Key was instantiated (aux is error code) */
+ NOTIFY_KEY_UPDATED = 1, /* Key was updated */
+ NOTIFY_KEY_LINKED = 2, /* Key (aux) was added to watched keyring */
+ NOTIFY_KEY_UNLINKED = 3, /* Key (aux) was removed from watched keyring */
+ NOTIFY_KEY_CLEARED = 4, /* Keyring was cleared */
+ NOTIFY_KEY_REVOKED = 5, /* Key was revoked */
+ NOTIFY_KEY_INVALIDATED = 6, /* Key was invalidated */
+ NOTIFY_KEY_SETATTR = 7, /* Key's attributes got changed */
+};
+
+/*
+ * Key/keyring notification record.
+ * - watch.type = WATCH_TYPE_KEY_NOTIFY
+ * - watch.subtype = enum key_notification_type
+ */
+struct key_notification {
+ struct watch_notification watch;
+ __u32 key_id; /* The key/keyring affected */
+ __u32 aux; /* Per-type auxiliary data */
+};
+
+#endif /* _UAPI_LINUX_WATCH_QUEUE_H */
diff --git a/include/uapi/linux/wimax.h b/include/uapi/linux/wimax.h
deleted file mode 100644
index 9f6b77af2f6d..000000000000
--- a/include/uapi/linux/wimax.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Linux WiMax
- * API for user space
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - Initial implementation
- *
- *
- * This file declares the user/kernel protocol that is spoken over
- * Generic Netlink, as well as any type declaration that is to be used
- * by kernel and user space.
- *
- * It is intended for user space to clone it verbatim to use it as a
- * primary reference for definitions.
- *
- * Stuff intended for kernel usage as well as full protocol and stack
- * documentation is rooted in include/net/wimax.h.
- */
-
-#ifndef __LINUX__WIMAX_H__
-#define __LINUX__WIMAX_H__
-
-#include <linux/types.h>
-
-enum {
- /**
- * Version of the interface (unsigned decimal, MMm, max 25.5)
- * M - Major: change if removing or modifying an existing call.
- * m - minor: change when adding a new call
- */
- WIMAX_GNL_VERSION = 01,
- /* Generic NetLink attributes */
- WIMAX_GNL_ATTR_INVALID = 0x00,
- WIMAX_GNL_ATTR_MAX = 10,
-};
-
-
-/*
- * Generic NetLink operations
- *
- * Most of these map to an API call; _OP_ stands for operation, _RP_
- * for reply and _RE_ for report (aka: signal).
- */
-enum {
- WIMAX_GNL_OP_MSG_FROM_USER, /* User to kernel message */
- WIMAX_GNL_OP_MSG_TO_USER, /* Kernel to user message */
- WIMAX_GNL_OP_RFKILL, /* Run wimax_rfkill() */
- WIMAX_GNL_OP_RESET, /* Run wimax_rfkill() */
- WIMAX_GNL_RE_STATE_CHANGE, /* Report: status change */
- WIMAX_GNL_OP_STATE_GET, /* Request for current state */
-};
-
-
-/* Message from user / to user */
-enum {
- WIMAX_GNL_MSG_IFIDX = 1,
- WIMAX_GNL_MSG_PIPE_NAME,
- WIMAX_GNL_MSG_DATA,
-};
-
-
-/*
- * wimax_rfkill()
- *
- * The state of the radio (ON/OFF) is mapped to the rfkill subsystem's
- * switch state (DISABLED/ENABLED).
- */
-enum wimax_rf_state {
- WIMAX_RF_OFF = 0, /* Radio is off, rfkill on/enabled */
- WIMAX_RF_ON = 1, /* Radio is on, rfkill off/disabled */
- WIMAX_RF_QUERY = 2,
-};
-
-/* Attributes */
-enum {
- WIMAX_GNL_RFKILL_IFIDX = 1,
- WIMAX_GNL_RFKILL_STATE,
-};
-
-
-/* Attributes for wimax_reset() */
-enum {
- WIMAX_GNL_RESET_IFIDX = 1,
-};
-
-/* Attributes for wimax_state_get() */
-enum {
- WIMAX_GNL_STGET_IFIDX = 1,
-};
-
-/*
- * Attributes for the Report State Change
- *
- * For now we just have the old and new states; new attributes might
- * be added later on.
- */
-enum {
- WIMAX_GNL_STCH_IFIDX = 1,
- WIMAX_GNL_STCH_STATE_OLD,
- WIMAX_GNL_STCH_STATE_NEW,
-};
-
-
-/**
- * enum wimax_st - The different states of a WiMAX device
- * @__WIMAX_ST_NULL: The device structure has been allocated and zeroed,
- * but still wimax_dev_add() hasn't been called. There is no state.
- *
- * @WIMAX_ST_DOWN: The device has been registered with the WiMAX and
- * networking stacks, but it is not initialized (normally that is
- * done with 'ifconfig DEV up' [or equivalent], which can upload
- * firmware and enable communications with the device).
- * In this state, the device is powered down and using as less
- * power as possible.
- * This state is the default after a call to wimax_dev_add(). It
- * is ok to have drivers move directly to %WIMAX_ST_UNINITIALIZED
- * or %WIMAX_ST_RADIO_OFF in _probe() after the call to
- * wimax_dev_add().
- * It is recommended that the driver leaves this state when
- * calling 'ifconfig DEV up' and enters it back on 'ifconfig DEV
- * down'.
- *
- * @__WIMAX_ST_QUIESCING: The device is being torn down, so no API
- * operations are allowed to proceed except the ones needed to
- * complete the device clean up process.
- *
- * @WIMAX_ST_UNINITIALIZED: [optional] Communication with the device
- * is setup, but the device still requires some configuration
- * before being operational.
- * Some WiMAX API calls might work.
- *
- * @WIMAX_ST_RADIO_OFF: The device is fully up; radio is off (wether
- * by hardware or software switches).
- * It is recommended to always leave the device in this state
- * after initialization.
- *
- * @WIMAX_ST_READY: The device is fully up and radio is on.
- *
- * @WIMAX_ST_SCANNING: [optional] The device has been instructed to
- * scan. In this state, the device cannot be actively connected to
- * a network.
- *
- * @WIMAX_ST_CONNECTING: The device is connecting to a network. This
- * state exists because in some devices, the connect process can
- * include a number of negotiations between user space, kernel
- * space and the device. User space needs to know what the device
- * is doing. If the connect sequence in a device is atomic and
- * fast, the device can transition directly to CONNECTED
- *
- * @WIMAX_ST_CONNECTED: The device is connected to a network.
- *
- * @__WIMAX_ST_INVALID: This is an invalid state used to mark the
- * maximum numeric value of states.
- *
- * Description:
- *
- * Transitions from one state to another one are atomic and can only
- * be caused in kernel space with wimax_state_change(). To read the
- * state, use wimax_state_get().
- *
- * States starting with __ are internal and shall not be used or
- * referred to by drivers or userspace. They look ugly, but that's the
- * point -- if any use is made non-internal to the stack, it is easier
- * to catch on review.
- *
- * All API operations [with well defined exceptions] will take the
- * device mutex before starting and then check the state. If the state
- * is %__WIMAX_ST_NULL, %WIMAX_ST_DOWN, %WIMAX_ST_UNINITIALIZED or
- * %__WIMAX_ST_QUIESCING, it will drop the lock and quit with
- * -%EINVAL, -%ENOMEDIUM, -%ENOTCONN or -%ESHUTDOWN.
- *
- * The order of the definitions is important, so we can do numerical
- * comparisons (eg: < %WIMAX_ST_RADIO_OFF means the device is not ready
- * to operate).
- */
-/*
- * The allowed state transitions are described in the table below
- * (states in rows can go to states in columns where there is an X):
- *
- * UNINI RADIO READY SCAN CONNEC CONNEC
- * NULL DOWN QUIESCING TIALIZED OFF NING TING TED
- * NULL - x
- * DOWN - x x x
- * QUIESCING x -
- * UNINITIALIZED x - x
- * RADIO_OFF x - x
- * READY x x - x x x
- * SCANNING x x x - x x
- * CONNECTING x x x x - x
- * CONNECTED x x x -
- *
- * This table not available in kernel-doc because the formatting messes it up.
- */
- enum wimax_st {
- __WIMAX_ST_NULL = 0,
- WIMAX_ST_DOWN,
- __WIMAX_ST_QUIESCING,
- WIMAX_ST_UNINITIALIZED,
- WIMAX_ST_RADIO_OFF,
- WIMAX_ST_READY,
- WIMAX_ST_SCANNING,
- WIMAX_ST_CONNECTING,
- WIMAX_ST_CONNECTED,
- __WIMAX_ST_INVALID /* Always keep last */
-};
-
-
-#endif /* #ifndef __LINUX__WIMAX_H__ */
diff --git a/include/uapi/linux/wimax/i2400m.h b/include/uapi/linux/wimax/i2400m.h
deleted file mode 100644
index fd198bc24a3c..000000000000
--- a/include/uapi/linux/wimax/i2400m.h
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
- * Intel Wireless WiMax Connection 2400m
- * Host-Device protocol interface definitions
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - Initial implementation
- *
- *
- * This header defines the data structures and constants used to
- * communicate with the device.
- *
- * BOOTMODE/BOOTROM/FIRMWARE UPLOAD PROTOCOL
- *
- * The firmware upload protocol is quite simple and only requires a
- * handful of commands. See drivers/net/wimax/i2400m/fw.c for more
- * details.
- *
- * The BCF data structure is for the firmware file header.
- *
- *
- * THE DATA / CONTROL PROTOCOL
- *
- * This is the normal protocol spoken with the device once the
- * firmware is uploaded. It transports data payloads and control
- * messages back and forth.
- *
- * It consists 'messages' that pack one or more payloads each. The
- * format is described in detail in drivers/net/wimax/i2400m/rx.c and
- * tx.c.
- *
- *
- * THE L3L4 PROTOCOL
- *
- * The term L3L4 refers to Layer 3 (the device), Layer 4 (the
- * driver/host software).
- *
- * This is the control protocol used by the host to control the i2400m
- * device (scan, connect, disconnect...). This is sent to / received
- * as control frames. These frames consist of a header and zero or
- * more TLVs with information. We call each control frame a "message".
- *
- * Each message is composed of:
- *
- * HEADER
- * [TLV0 + PAYLOAD0]
- * [TLV1 + PAYLOAD1]
- * [...]
- * [TLVN + PAYLOADN]
- *
- * The HEADER is defined by 'struct i2400m_l3l4_hdr'. The payloads are
- * defined by a TLV structure (Type Length Value) which is a 'header'
- * (struct i2400m_tlv_hdr) and then the payload.
- *
- * All integers are represented as Little Endian.
- *
- * - REQUESTS AND EVENTS
- *
- * The requests can be clasified as follows:
- *
- * COMMAND: implies a request from the host to the device requesting
- * an action being performed. The device will reply with a
- * message (with the same type as the command), status and
- * no (TLV) payload. Execution of a command might cause
- * events (of different type) to be sent later on as
- * device's state changes.
- *
- * GET/SET: similar to COMMAND, but will not cause other
- * EVENTs. The reply, in the case of GET, will contain
- * TLVs with the requested information.
- *
- * EVENT: asynchronous messages sent from the device, maybe as a
- * consequence of previous COMMANDs but disassociated from
- * them.
- *
- * Only one request might be pending at the same time (ie: don't
- * parallelize nor post another GET request before the previous
- * COMMAND has been acknowledged with it's corresponding reply by the
- * device).
- *
- * The different requests and their formats are described below:
- *
- * I2400M_MT_* Message types
- * I2400M_MS_* Message status (for replies, events)
- * i2400m_tlv_* TLVs
- *
- * data types are named 'struct i2400m_msg_OPNAME', OPNAME matching the
- * operation.
- */
-
-#ifndef __LINUX__WIMAX__I2400M_H__
-#define __LINUX__WIMAX__I2400M_H__
-
-#include <linux/types.h>
-#include <linux/if_ether.h>
-
-/*
- * Host Device Interface (HDI) common to all busses
- */
-
-/* Boot-mode (firmware upload mode) commands */
-
-/* Header for the firmware file */
-struct i2400m_bcf_hdr {
- __le32 module_type;
- __le32 header_len;
- __le32 header_version;
- __le32 module_id;
- __le32 module_vendor;
- __le32 date; /* BCD YYYMMDD */
- __le32 size; /* in dwords */
- __le32 key_size; /* in dwords */
- __le32 modulus_size; /* in dwords */
- __le32 exponent_size; /* in dwords */
- __u8 reserved[88];
-} __attribute__ ((packed));
-
-/* Boot mode opcodes */
-enum i2400m_brh_opcode {
- I2400M_BRH_READ = 1,
- I2400M_BRH_WRITE = 2,
- I2400M_BRH_JUMP = 3,
- I2400M_BRH_SIGNED_JUMP = 8,
- I2400M_BRH_HASH_PAYLOAD_ONLY = 9,
-};
-
-/* Boot mode command masks and stuff */
-enum i2400m_brh {
- I2400M_BRH_SIGNATURE = 0xcbbc0000,
- I2400M_BRH_SIGNATURE_MASK = 0xffff0000,
- I2400M_BRH_SIGNATURE_SHIFT = 16,
- I2400M_BRH_OPCODE_MASK = 0x0000000f,
- I2400M_BRH_RESPONSE_MASK = 0x000000f0,
- I2400M_BRH_RESPONSE_SHIFT = 4,
- I2400M_BRH_DIRECT_ACCESS = 0x00000400,
- I2400M_BRH_RESPONSE_REQUIRED = 0x00000200,
- I2400M_BRH_USE_CHECKSUM = 0x00000100,
-};
-
-
-/**
- * i2400m_bootrom_header - Header for a boot-mode command
- *
- * @cmd: the above command descriptor
- * @target_addr: where on the device memory should the action be performed.
- * @data_size: for read/write, amount of data to be read/written
- * @block_checksum: checksum value (if applicable)
- * @payload: the beginning of data attached to this header
- */
-struct i2400m_bootrom_header {
- __le32 command; /* Compose with enum i2400_brh */
- __le32 target_addr;
- __le32 data_size;
- __le32 block_checksum;
- char payload[0];
-} __attribute__ ((packed));
-
-
-/*
- * Data / control protocol
- */
-
-/* Packet types for the host-device interface */
-enum i2400m_pt {
- I2400M_PT_DATA = 0,
- I2400M_PT_CTRL,
- I2400M_PT_TRACE, /* For device debug */
- I2400M_PT_RESET_WARM, /* device reset */
- I2400M_PT_RESET_COLD, /* USB[transport] reset, like reconnect */
- I2400M_PT_EDATA, /* Extended RX data */
- I2400M_PT_ILLEGAL
-};
-
-
-/*
- * Payload for a data packet
- *
- * This is prefixed to each and every outgoing DATA type.
- */
-struct i2400m_pl_data_hdr {
- __le32 reserved;
-} __attribute__((packed));
-
-
-/*
- * Payload for an extended data packet
- *
- * New in fw v1.4
- *
- * @reorder: if this payload has to be reorder or not (and how)
- * @cs: the type of data in the packet, as defined per (802.16e
- * T11.13.19.1). Currently only 2 (IPv4 packet) supported.
- *
- * This is prefixed to each and every INCOMING DATA packet.
- */
-struct i2400m_pl_edata_hdr {
- __le32 reorder; /* bits defined in i2400m_ro */
- __u8 cs;
- __u8 reserved[11];
-} __attribute__((packed));
-
-enum i2400m_cs {
- I2400M_CS_IPV4_0 = 0,
- I2400M_CS_IPV4 = 2,
-};
-
-enum i2400m_ro {
- I2400M_RO_NEEDED = 0x01,
- I2400M_RO_TYPE = 0x03,
- I2400M_RO_TYPE_SHIFT = 1,
- I2400M_RO_CIN = 0x0f,
- I2400M_RO_CIN_SHIFT = 4,
- I2400M_RO_FBN = 0x07ff,
- I2400M_RO_FBN_SHIFT = 8,
- I2400M_RO_SN = 0x07ff,
- I2400M_RO_SN_SHIFT = 21,
-};
-
-enum i2400m_ro_type {
- I2400M_RO_TYPE_RESET = 0,
- I2400M_RO_TYPE_PACKET,
- I2400M_RO_TYPE_WS,
- I2400M_RO_TYPE_PACKET_WS,
-};
-
-
-/* Misc constants */
-enum {
- I2400M_PL_ALIGN = 16, /* Payload data size alignment */
- I2400M_PL_SIZE_MAX = 0x3EFF,
- I2400M_MAX_PLS_IN_MSG = 60,
- /* protocol barkers: sync sequences; for notifications they
- * are sent in groups of four. */
- I2400M_H2D_PREVIEW_BARKER = 0xcafe900d,
- I2400M_COLD_RESET_BARKER = 0xc01dc01d,
- I2400M_WARM_RESET_BARKER = 0x50f750f7,
- I2400M_NBOOT_BARKER = 0xdeadbeef,
- I2400M_SBOOT_BARKER = 0x0ff1c1a1,
- I2400M_SBOOT_BARKER_6050 = 0x80000001,
- I2400M_ACK_BARKER = 0xfeedbabe,
- I2400M_D2H_MSG_BARKER = 0xbeefbabe,
-};
-
-
-/*
- * Hardware payload descriptor
- *
- * Bitfields encoded in a struct to enforce typing semantics.
- *
- * Look in rx.c and tx.c for a full description of the format.
- */
-struct i2400m_pld {
- __le32 val;
-} __attribute__ ((packed));
-
-#define I2400M_PLD_SIZE_MASK 0x00003fff
-#define I2400M_PLD_TYPE_SHIFT 16
-#define I2400M_PLD_TYPE_MASK 0x000f0000
-
-/*
- * Header for a TX message or RX message
- *
- * @barker: preamble
- * @size: used for management of the FIFO queue buffer; before
- * sending, this is converted to be a real preamble. This
- * indicates the real size of the TX message that starts at this
- * point. If the highest bit is set, then this message is to be
- * skipped.
- * @sequence: sequence number of this message
- * @offset: offset where the message itself starts -- see the comments
- * in the file header about message header and payload descriptor
- * alignment.
- * @num_pls: number of payloads in this message
- * @padding: amount of padding bytes at the end of the message to make
- * it be of block-size aligned
- *
- * Look in rx.c and tx.c for a full description of the format.
- */
-struct i2400m_msg_hdr {
- union {
- __le32 barker;
- __u32 size; /* same size type as barker!! */
- };
- union {
- __le32 sequence;
- __u32 offset; /* same size type as barker!! */
- };
- __le16 num_pls;
- __le16 rsv1;
- __le16 padding;
- __le16 rsv2;
- struct i2400m_pld pld[0];
-} __attribute__ ((packed));
-
-
-
-/*
- * L3/L4 control protocol
- */
-
-enum {
- /* Interface version */
- I2400M_L3L4_VERSION = 0x0100,
-};
-
-/* Message types */
-enum i2400m_mt {
- I2400M_MT_RESERVED = 0x0000,
- I2400M_MT_INVALID = 0xffff,
- I2400M_MT_REPORT_MASK = 0x8000,
-
- I2400M_MT_GET_SCAN_RESULT = 0x4202,
- I2400M_MT_SET_SCAN_PARAM = 0x4402,
- I2400M_MT_CMD_RF_CONTROL = 0x4602,
- I2400M_MT_CMD_SCAN = 0x4603,
- I2400M_MT_CMD_CONNECT = 0x4604,
- I2400M_MT_CMD_DISCONNECT = 0x4605,
- I2400M_MT_CMD_EXIT_IDLE = 0x4606,
- I2400M_MT_GET_LM_VERSION = 0x5201,
- I2400M_MT_GET_DEVICE_INFO = 0x5202,
- I2400M_MT_GET_LINK_STATUS = 0x5203,
- I2400M_MT_GET_STATISTICS = 0x5204,
- I2400M_MT_GET_STATE = 0x5205,
- I2400M_MT_GET_MEDIA_STATUS = 0x5206,
- I2400M_MT_SET_INIT_CONFIG = 0x5404,
- I2400M_MT_CMD_INIT = 0x5601,
- I2400M_MT_CMD_TERMINATE = 0x5602,
- I2400M_MT_CMD_MODE_OF_OP = 0x5603,
- I2400M_MT_CMD_RESET_DEVICE = 0x5604,
- I2400M_MT_CMD_MONITOR_CONTROL = 0x5605,
- I2400M_MT_CMD_ENTER_POWERSAVE = 0x5606,
- I2400M_MT_GET_TLS_OPERATION_RESULT = 0x6201,
- I2400M_MT_SET_EAP_SUCCESS = 0x6402,
- I2400M_MT_SET_EAP_FAIL = 0x6403,
- I2400M_MT_SET_EAP_KEY = 0x6404,
- I2400M_MT_CMD_SEND_EAP_RESPONSE = 0x6602,
- I2400M_MT_REPORT_SCAN_RESULT = 0xc002,
- I2400M_MT_REPORT_STATE = 0xd002,
- I2400M_MT_REPORT_POWERSAVE_READY = 0xd005,
- I2400M_MT_REPORT_EAP_REQUEST = 0xe002,
- I2400M_MT_REPORT_EAP_RESTART = 0xe003,
- I2400M_MT_REPORT_ALT_ACCEPT = 0xe004,
- I2400M_MT_REPORT_KEY_REQUEST = 0xe005,
-};
-
-
-/*
- * Message Ack Status codes
- *
- * When a message is replied-to, this status is reported.
- */
-enum i2400m_ms {
- I2400M_MS_DONE_OK = 0,
- I2400M_MS_DONE_IN_PROGRESS = 1,
- I2400M_MS_INVALID_OP = 2,
- I2400M_MS_BAD_STATE = 3,
- I2400M_MS_ILLEGAL_VALUE = 4,
- I2400M_MS_MISSING_PARAMS = 5,
- I2400M_MS_VERSION_ERROR = 6,
- I2400M_MS_ACCESSIBILITY_ERROR = 7,
- I2400M_MS_BUSY = 8,
- I2400M_MS_CORRUPTED_TLV = 9,
- I2400M_MS_UNINITIALIZED = 10,
- I2400M_MS_UNKNOWN_ERROR = 11,
- I2400M_MS_PRODUCTION_ERROR = 12,
- I2400M_MS_NO_RF = 13,
- I2400M_MS_NOT_READY_FOR_POWERSAVE = 14,
- I2400M_MS_THERMAL_CRITICAL = 15,
- I2400M_MS_MAX
-};
-
-
-/**
- * i2400m_tlv - enumeration of the different types of TLVs
- *
- * TLVs stand for type-length-value and are the header for a payload
- * composed of almost anything. Each payload has a type assigned
- * and a length.
- */
-enum i2400m_tlv {
- I2400M_TLV_L4_MESSAGE_VERSIONS = 129,
- I2400M_TLV_SYSTEM_STATE = 141,
- I2400M_TLV_MEDIA_STATUS = 161,
- I2400M_TLV_RF_OPERATION = 162,
- I2400M_TLV_RF_STATUS = 163,
- I2400M_TLV_DEVICE_RESET_TYPE = 132,
- I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601,
- I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611,
- I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614,
- I2400M_TLV_CONFIG_DL_HOST_REORDER = 615,
-};
-
-
-struct i2400m_tlv_hdr {
- __le16 type;
- __le16 length; /* payload's */
- __u8 pl[0];
-} __attribute__((packed));
-
-
-struct i2400m_l3l4_hdr {
- __le16 type;
- __le16 length; /* payload's */
- __le16 version;
- __le16 resv1;
- __le16 status;
- __le16 resv2;
- struct i2400m_tlv_hdr pl[0];
-} __attribute__((packed));
-
-
-/**
- * i2400m_system_state - different states of the device
- */
-enum i2400m_system_state {
- I2400M_SS_UNINITIALIZED = 1,
- I2400M_SS_INIT,
- I2400M_SS_READY,
- I2400M_SS_SCAN,
- I2400M_SS_STANDBY,
- I2400M_SS_CONNECTING,
- I2400M_SS_WIMAX_CONNECTED,
- I2400M_SS_DATA_PATH_CONNECTED,
- I2400M_SS_IDLE,
- I2400M_SS_DISCONNECTING,
- I2400M_SS_OUT_OF_ZONE,
- I2400M_SS_SLEEPACTIVE,
- I2400M_SS_PRODUCTION,
- I2400M_SS_CONFIG,
- I2400M_SS_RF_OFF,
- I2400M_SS_RF_SHUTDOWN,
- I2400M_SS_DEVICE_DISCONNECT,
- I2400M_SS_MAX,
-};
-
-
-/**
- * i2400m_tlv_system_state - report on the state of the system
- *
- * @state: see enum i2400m_system_state
- */
-struct i2400m_tlv_system_state {
- struct i2400m_tlv_hdr hdr;
- __le32 state;
-} __attribute__((packed));
-
-
-struct i2400m_tlv_l4_message_versions {
- struct i2400m_tlv_hdr hdr;
- __le16 major;
- __le16 minor;
- __le16 branch;
- __le16 reserved;
-} __attribute__((packed));
-
-
-struct i2400m_tlv_detailed_device_info {
- struct i2400m_tlv_hdr hdr;
- __u8 reserved1[400];
- __u8 mac_address[ETH_ALEN];
- __u8 reserved2[2];
-} __attribute__((packed));
-
-
-enum i2400m_rf_switch_status {
- I2400M_RF_SWITCH_ON = 1,
- I2400M_RF_SWITCH_OFF = 2,
-};
-
-struct i2400m_tlv_rf_switches_status {
- struct i2400m_tlv_hdr hdr;
- __u8 sw_rf_switch; /* 1 ON, 2 OFF */
- __u8 hw_rf_switch; /* 1 ON, 2 OFF */
- __u8 reserved[2];
-} __attribute__((packed));
-
-
-enum {
- i2400m_rf_operation_on = 1,
- i2400m_rf_operation_off = 2
-};
-
-struct i2400m_tlv_rf_operation {
- struct i2400m_tlv_hdr hdr;
- __le32 status; /* 1 ON, 2 OFF */
-} __attribute__((packed));
-
-
-enum i2400m_tlv_reset_type {
- I2400M_RESET_TYPE_COLD = 1,
- I2400M_RESET_TYPE_WARM
-};
-
-struct i2400m_tlv_device_reset_type {
- struct i2400m_tlv_hdr hdr;
- __le32 reset_type;
-} __attribute__((packed));
-
-
-struct i2400m_tlv_config_idle_parameters {
- struct i2400m_tlv_hdr hdr;
- __le32 idle_timeout; /* 100 to 300000 ms [5min], 100 increments
- * 0 disabled */
- __le32 idle_paging_interval; /* frames */
-} __attribute__((packed));
-
-
-enum i2400m_media_status {
- I2400M_MEDIA_STATUS_LINK_UP = 1,
- I2400M_MEDIA_STATUS_LINK_DOWN,
- I2400M_MEDIA_STATUS_LINK_RENEW,
-};
-
-struct i2400m_tlv_media_status {
- struct i2400m_tlv_hdr hdr;
- __le32 media_status;
-} __attribute__((packed));
-
-
-/* New in v1.4 */
-struct i2400m_tlv_config_idle_timeout {
- struct i2400m_tlv_hdr hdr;
- __le32 timeout; /* 100 to 300000 ms [5min], 100 increments
- * 0 disabled */
-} __attribute__((packed));
-
-/* New in v1.4 -- for backward compat, will be removed */
-struct i2400m_tlv_config_d2h_data_format {
- struct i2400m_tlv_hdr hdr;
- __u8 format; /* 0 old format, 1 enhanced */
- __u8 reserved[3];
-} __attribute__((packed));
-
-/* New in v1.4 */
-struct i2400m_tlv_config_dl_host_reorder {
- struct i2400m_tlv_hdr hdr;
- __u8 reorder; /* 0 disabled, 1 enabled */
- __u8 reserved[3];
-} __attribute__((packed));
-
-
-#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */
diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
index 86eca3208b6b..08967b3f19c8 100644
--- a/include/uapi/linux/wireless.h
+++ b/include/uapi/linux/wireless.h
@@ -74,6 +74,12 @@
#include <linux/socket.h> /* for "struct sockaddr" et al */
#include <linux/if.h> /* for IFNAMSIZ and co... */
+#ifdef __KERNEL__
+# include <linux/stddef.h> /* for offsetof */
+#else
+# include <stddef.h> /* for offsetof */
+#endif
+
/***************************** VERSION *****************************/
/*
* This constant is used to know the availability of the wireless
@@ -908,7 +914,7 @@ union iwreq_data {
struct iw_param sens; /* signal level threshold */
struct iw_param bitrate; /* default bit rate */
struct iw_param txpower; /* default transmit power */
- struct iw_param rts; /* RTS threshold threshold */
+ struct iw_param rts; /* RTS threshold */
struct iw_param frag; /* Fragmentation threshold */
__u32 mode; /* Operation mode */
struct iw_param retry; /* Retry limits & lifetime */
@@ -1090,8 +1096,7 @@ struct iw_event {
/* iw_point events are special. First, the payload (extra data) come at
* the end of the event, so they are bigger than IW_EV_POINT_LEN. Second,
* we omit the pointer, so start at an offset. */
-#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \
- (char *) NULL)
+#define IW_EV_POINT_OFF offsetof(struct iw_point, length)
#define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \
IW_EV_POINT_OFF)
diff --git a/include/uapi/linux/wwan.h b/include/uapi/linux/wwan.h
new file mode 100644
index 000000000000..32a2720b4d11
--- /dev/null
+++ b/include/uapi/linux/wwan.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2021 Intel Corporation.
+ */
+#ifndef _UAPI_WWAN_H_
+#define _UAPI_WWAN_H_
+
+enum {
+ IFLA_WWAN_UNSPEC,
+ IFLA_WWAN_LINK_ID, /* u32 */
+
+ __IFLA_WWAN_MAX
+};
+#define IFLA_WWAN_MAX (__IFLA_WWAN_MAX - 1)
+
+#endif /* _UAPI_WWAN_H_ */
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index c1395b5bd432..9463db2dfa9d 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -7,6 +7,7 @@
Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ Copyright (c) 2020 Jan (janneke) Nieuwenhuizen <janneke@gnu.org>
*/
#include <linux/libc-compat.h>
@@ -31,6 +32,9 @@
#define XATTR_BTRFS_PREFIX "btrfs."
#define XATTR_BTRFS_PREFIX_LEN (sizeof(XATTR_BTRFS_PREFIX) - 1)
+#define XATTR_HURD_PREFIX "gnu."
+#define XATTR_HURD_PREFIX_LEN (sizeof(XATTR_HURD_PREFIX) - 1)
+
#define XATTR_SECURITY_PREFIX "security."
#define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
diff --git a/include/uapi/linux/xdp_diag.h b/include/uapi/linux/xdp_diag.h
index 78b2591a7782..66b9973b4f4c 100644
--- a/include/uapi/linux/xdp_diag.h
+++ b/include/uapi/linux/xdp_diag.h
@@ -30,6 +30,7 @@ struct xdp_diag_msg {
#define XDP_SHOW_RING_CFG (1 << 1)
#define XDP_SHOW_UMEM (1 << 2)
#define XDP_SHOW_MEMINFO (1 << 3)
+#define XDP_SHOW_STATS (1 << 4)
enum {
XDP_DIAG_NONE,
@@ -41,6 +42,7 @@ enum {
XDP_DIAG_UMEM_FILL_RING,
XDP_DIAG_UMEM_COMPLETION_RING,
XDP_DIAG_MEMINFO,
+ XDP_DIAG_STATS,
__XDP_DIAG_MAX,
};
@@ -69,4 +71,13 @@ struct xdp_diag_umem {
__u32 refs;
};
+struct xdp_diag_stats {
+ __u64 n_rx_dropped;
+ __u64 n_rx_invalid;
+ __u64 n_rx_full;
+ __u64 n_fill_ring_empty;
+ __u64 n_tx_invalid;
+ __u64 n_tx_ring_empty;
+};
+
#endif /* _LINUX_XDP_DIAG_H */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 5f3b9fec7b5f..4f84ea7ee14c 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -33,7 +33,7 @@ struct xfrm_sec_ctx {
__u8 ctx_alg;
__u16 ctx_len;
__u32 ctx_sid;
- char ctx_str[0];
+ char ctx_str[];
};
/* Security Context Domains of Interpretation */
@@ -96,27 +96,27 @@ struct xfrm_replay_state_esn {
__u32 oseq_hi;
__u32 seq_hi;
__u32 replay_window;
- __u32 bmp[0];
+ __u32 bmp[];
};
struct xfrm_algo {
char alg_name[64];
unsigned int alg_key_len; /* in bits */
- char alg_key[0];
+ char alg_key[];
};
struct xfrm_algo_auth {
char alg_name[64];
unsigned int alg_key_len; /* in bits */
unsigned int alg_trunc_len; /* in bits */
- char alg_key[0];
+ char alg_key[];
};
struct xfrm_algo_aead {
char alg_name[64];
unsigned int alg_key_len; /* in bits */
unsigned int alg_icv_len; /* in bits */
- char alg_key[0];
+ char alg_key[];
};
struct xfrm_stats {
@@ -215,6 +215,11 @@ enum {
XFRM_MSG_MAPPING,
#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
+
+ XFRM_MSG_SETDEFAULT,
+#define XFRM_MSG_SETDEFAULT XFRM_MSG_SETDEFAULT
+ XFRM_MSG_GETDEFAULT,
+#define XFRM_MSG_GETDEFAULT XFRM_MSG_GETDEFAULT
__XFRM_MSG_MAX
};
#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -291,7 +296,7 @@ enum xfrm_attr_type_t {
XFRMA_ETIMER_THRESH,
XFRMA_SRCADDR, /* xfrm_address_t */
XFRMA_COADDR, /* xfrm_address_t */
- XFRMA_LASTUSED, /* unsigned long */
+ XFRMA_LASTUSED, /* __u64 */
XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */
XFRMA_MIGRATE,
XFRMA_ALG_AEAD, /* struct xfrm_algo_aead */
@@ -304,10 +309,11 @@ enum xfrm_attr_type_t {
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
- XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
+ XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */
XFRMA_SET_MARK, /* __u32 */
XFRMA_SET_MARK_MASK, /* __u32 */
XFRMA_IF_ID, /* __u32 */
+ XFRMA_MTIMER_THRESH, /* __u32 in seconds for input SA */
__XFRMA_MAX
#define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */
@@ -387,6 +393,7 @@ struct xfrm_usersa_info {
};
#define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1
+#define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2
struct xfrm_usersa_id {
xfrm_address_t daddr;
@@ -504,9 +511,24 @@ struct xfrm_user_offload {
int ifindex;
__u8 flags;
};
+/* This flag was exposed without any kernel code that supports it.
+ * Unfortunately, strongswan has the code that sets this flag,
+ * which makes it impossible to reuse this bit.
+ *
+ * So leave it here to make sure that it won't be reused by mistake.
+ */
#define XFRM_OFFLOAD_IPV6 1
#define XFRM_OFFLOAD_INBOUND 2
+struct xfrm_userpolicy_default {
+#define XFRM_USERPOLICY_UNSPEC 0
+#define XFRM_USERPOLICY_BLOCK 1
+#define XFRM_USERPOLICY_ACCEPT 2
+ __u8 in;
+ __u8 fwd;
+ __u8 out;
+};
+
#ifndef __KERNEL__
/* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index 07de2b7aac85..5e29f2cfa42d 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -10,14 +10,67 @@
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
-#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
-#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
+#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
+#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
+#define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8)
+#define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map)
+#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap)
+#define FASTRPC_IOCTL_GET_DSP_INFO _IOWR('R', 13, struct fastrpc_ioctl_capability)
+
+/**
+ * enum fastrpc_map_flags - control flags for mapping memory on DSP user process
+ * @FASTRPC_MAP_STATIC: Map memory pages with RW- permission and CACHE WRITEBACK.
+ * The driver is responsible for cache maintenance when passed
+ * the buffer to FastRPC calls. Same virtual address will be
+ * assigned for subsequent FastRPC calls.
+ * @FASTRPC_MAP_RESERVED: Reserved
+ * @FASTRPC_MAP_FD: Map memory pages with RW- permission and CACHE WRITEBACK.
+ * Mapping tagged with a file descriptor. User is responsible for
+ * CPU and DSP cache maintenance for the buffer. Get virtual address
+ * of buffer on DSP using HAP_mmap_get() and HAP_mmap_put() APIs.
+ * @FASTRPC_MAP_FD_DELAYED: Mapping delayed until user call HAP_mmap() and HAP_munmap()
+ * functions on DSP. It is useful to map a buffer with cache modes
+ * other than default modes. User is responsible for CPU and DSP
+ * cache maintenance for the buffer.
+ * @FASTRPC_MAP_FD_NOMAP: This flag is used to skip CPU mapping,
+ * otherwise behaves similar to FASTRPC_MAP_FD_DELAYED flag.
+ * @FASTRPC_MAP_MAX: max count for flags
+ *
+ */
+enum fastrpc_map_flags {
+ FASTRPC_MAP_STATIC = 0,
+ FASTRPC_MAP_RESERVED,
+ FASTRPC_MAP_FD = 2,
+ FASTRPC_MAP_FD_DELAYED,
+ FASTRPC_MAP_FD_NOMAP = 16,
+ FASTRPC_MAP_MAX,
+};
+
+enum fastrpc_proc_attr {
+ /* Macro for Debug attr */
+ FASTRPC_MODE_DEBUG = (1 << 0),
+ /* Macro for Ptrace */
+ FASTRPC_MODE_PTRACE = (1 << 1),
+ /* Macro for CRC Check */
+ FASTRPC_MODE_CRC = (1 << 2),
+ /* Macro for Unsigned PD */
+ FASTRPC_MODE_UNSIGNED_MODULE = (1 << 3),
+ /* Macro for Adaptive QoS */
+ FASTRPC_MODE_ADAPTIVE_QOS = (1 << 4),
+ /* Macro for System Process */
+ FASTRPC_MODE_SYSTEM_PROCESS = (1 << 5),
+ /* Macro for Prvileged Process */
+ FASTRPC_MODE_PRIVILEGED = (1 << 6),
+};
+
+/* Fastrpc attribute for memory protection of buffers */
+#define FASTRPC_ATTR_SECUREMAP (1)
struct fastrpc_invoke_args {
__u64 ptr;
__u64 length;
__s32 fd;
- __u32 reserved;
+ __u32 attr;
};
struct fastrpc_invoke {
@@ -48,9 +101,36 @@ struct fastrpc_req_mmap {
__u64 vaddrout; /* dsp virtual address */
};
+struct fastrpc_mem_map {
+ __s32 version;
+ __s32 fd; /* fd */
+ __s32 offset; /* buffer offset */
+ __u32 flags; /* flags defined in enum fastrpc_map_flags */
+ __u64 vaddrin; /* buffer virtual address */
+ __u64 length; /* buffer length */
+ __u64 vaddrout; /* [out] remote virtual address */
+ __s32 attrs; /* buffer attributes used for SMMU mapping */
+ __s32 reserved[4];
+};
+
struct fastrpc_req_munmap {
__u64 vaddrout; /* address to unmap */
__u64 size; /* size */
};
+struct fastrpc_mem_unmap {
+ __s32 vesion;
+ __s32 fd; /* fd */
+ __u64 vaddr; /* remote process (dsp) virtual address */
+ __u64 length; /* buffer size */
+ __s32 reserved[5];
+};
+
+struct fastrpc_ioctl_capability {
+ __u32 domain;
+ __u32 attribute_id;
+ __u32 capability; /* dsp capability */
+ __u32 reserved[4];
+};
+
#endif /* __QCOM_FASTRPC_H__ */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 4faa2c9767e5..e00ebe05097d 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -15,10 +15,26 @@
* Defines that are asic-specific but constitutes as ABI between kernel driver
* and userspace
*/
-#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
+#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
+#define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START 0x80 /* 128 bytes */
/*
- * Queue Numbering
+ * 128 SOBs reserved for collective wait
+ * 16 SOBs reserved for sync stream
+ */
+#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 144
+
+/*
+ * 64 monitors reserved for collective wait
+ * 8 monitors reserved for sync stream
+ */
+#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 72
+
+/* Max number of elements in timestamps registration buffers */
+#define TS_MAX_ELEMENTS_NUM (1 << 20) /* 1MB */
+
+/*
+ * Goya queue Numbering
*
* The external queues (PCI DMA channels) MUST be before the internal queues
* and each group (PCI DMA channels and internal) must be contiguous inside
@@ -46,6 +62,408 @@ enum goya_queue_id {
};
/*
+ * Gaudi queue Numbering
+ * External queues (PCI DMA channels) are DMA_0_*, DMA_1_* and DMA_5_*.
+ * Except one CPU queue, all the rest are internal queues.
+ */
+
+enum gaudi_queue_id {
+ GAUDI_QUEUE_ID_DMA_0_0 = 0, /* external */
+ GAUDI_QUEUE_ID_DMA_0_1 = 1, /* external */
+ GAUDI_QUEUE_ID_DMA_0_2 = 2, /* external */
+ GAUDI_QUEUE_ID_DMA_0_3 = 3, /* external */
+ GAUDI_QUEUE_ID_DMA_1_0 = 4, /* external */
+ GAUDI_QUEUE_ID_DMA_1_1 = 5, /* external */
+ GAUDI_QUEUE_ID_DMA_1_2 = 6, /* external */
+ GAUDI_QUEUE_ID_DMA_1_3 = 7, /* external */
+ GAUDI_QUEUE_ID_CPU_PQ = 8, /* CPU */
+ GAUDI_QUEUE_ID_DMA_2_0 = 9, /* internal */
+ GAUDI_QUEUE_ID_DMA_2_1 = 10, /* internal */
+ GAUDI_QUEUE_ID_DMA_2_2 = 11, /* internal */
+ GAUDI_QUEUE_ID_DMA_2_3 = 12, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_0 = 13, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_1 = 14, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_2 = 15, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_3 = 16, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_0 = 17, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_1 = 18, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_2 = 19, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_3 = 20, /* internal */
+ GAUDI_QUEUE_ID_DMA_5_0 = 21, /* internal */
+ GAUDI_QUEUE_ID_DMA_5_1 = 22, /* internal */
+ GAUDI_QUEUE_ID_DMA_5_2 = 23, /* internal */
+ GAUDI_QUEUE_ID_DMA_5_3 = 24, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_0 = 25, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_1 = 26, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_2 = 27, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_3 = 28, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_0 = 29, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_1 = 30, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_2 = 31, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_3 = 32, /* internal */
+ GAUDI_QUEUE_ID_MME_0_0 = 33, /* internal */
+ GAUDI_QUEUE_ID_MME_0_1 = 34, /* internal */
+ GAUDI_QUEUE_ID_MME_0_2 = 35, /* internal */
+ GAUDI_QUEUE_ID_MME_0_3 = 36, /* internal */
+ GAUDI_QUEUE_ID_MME_1_0 = 37, /* internal */
+ GAUDI_QUEUE_ID_MME_1_1 = 38, /* internal */
+ GAUDI_QUEUE_ID_MME_1_2 = 39, /* internal */
+ GAUDI_QUEUE_ID_MME_1_3 = 40, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_0 = 41, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_1 = 42, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_2 = 43, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_3 = 44, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_0 = 45, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_1 = 46, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_2 = 47, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_3 = 48, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_0 = 49, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_1 = 50, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_2 = 51, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_3 = 52, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_0 = 53, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_1 = 54, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_2 = 55, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_3 = 56, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_0 = 57, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_1 = 58, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_2 = 59, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_3 = 60, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_0 = 61, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_1 = 62, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_2 = 63, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_3 = 64, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_0 = 65, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_1 = 66, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_2 = 67, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_3 = 68, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_0 = 69, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_1 = 70, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_2 = 71, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_3 = 72, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_0 = 73, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_1 = 74, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_2 = 75, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_3 = 76, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_0 = 77, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_1 = 78, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_2 = 79, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_3 = 80, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_0 = 81, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_1 = 82, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_2 = 83, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_3 = 84, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_0 = 85, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_1 = 86, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_2 = 87, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_3 = 88, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_0 = 89, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_1 = 90, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_2 = 91, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_3 = 92, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_0 = 93, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_1 = 94, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_2 = 95, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_3 = 96, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_0 = 97, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_1 = 98, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_2 = 99, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_3 = 100, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_0 = 101, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_1 = 102, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_2 = 103, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_3 = 104, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_0 = 105, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_1 = 106, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_2 = 107, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_3 = 108, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_0 = 109, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_1 = 110, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_2 = 111, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_3 = 112, /* internal */
+ GAUDI_QUEUE_ID_SIZE
+};
+
+/*
+ * In GAUDI2 we have two modes of operation in regard to queues:
+ * 1. Legacy mode, where each QMAN exposes 4 streams to the user
+ * 2. F/W mode, where we use F/W to schedule the JOBS to the different queues.
+ *
+ * When in legacy mode, the user sends the queue id per JOB according to
+ * enum gaudi2_queue_id below.
+ *
+ * When in F/W mode, the user sends a stream id per Command Submission. The
+ * stream id is a running number from 0 up to (N-1), where N is the number
+ * of streams the F/W exposes and is passed to the user in
+ * struct hl_info_hw_ip_info
+ */
+
+enum gaudi2_queue_id {
+ GAUDI2_QUEUE_ID_PDMA_0_0 = 0,
+ GAUDI2_QUEUE_ID_PDMA_0_1 = 1,
+ GAUDI2_QUEUE_ID_PDMA_0_2 = 2,
+ GAUDI2_QUEUE_ID_PDMA_0_3 = 3,
+ GAUDI2_QUEUE_ID_PDMA_1_0 = 4,
+ GAUDI2_QUEUE_ID_PDMA_1_1 = 5,
+ GAUDI2_QUEUE_ID_PDMA_1_2 = 6,
+ GAUDI2_QUEUE_ID_PDMA_1_3 = 7,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0 = 8,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1 = 9,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2 = 10,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3 = 11,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0 = 12,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1 = 13,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2 = 14,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3 = 15,
+ GAUDI2_QUEUE_ID_DCORE0_MME_0_0 = 16,
+ GAUDI2_QUEUE_ID_DCORE0_MME_0_1 = 17,
+ GAUDI2_QUEUE_ID_DCORE0_MME_0_2 = 18,
+ GAUDI2_QUEUE_ID_DCORE0_MME_0_3 = 19,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 = 20,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_0_1 = 21,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_0_2 = 22,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_0_3 = 23,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_1_0 = 24,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_1_1 = 25,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_1_2 = 26,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_1_3 = 27,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_2_0 = 28,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_2_1 = 29,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_2_2 = 30,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_2_3 = 31,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_3_0 = 32,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_3_1 = 33,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_3_2 = 34,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_3_3 = 35,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_4_0 = 36,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_4_1 = 37,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_4_2 = 38,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_4_3 = 39,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_5_0 = 40,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_5_1 = 41,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_5_2 = 42,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_5_3 = 43,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_6_0 = 44,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_6_1 = 45,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_6_2 = 46,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_6_3 = 47,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0 = 48,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1 = 49,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2 = 50,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3 = 51,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0 = 52,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1 = 53,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2 = 54,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3 = 55,
+ GAUDI2_QUEUE_ID_DCORE1_MME_0_0 = 56,
+ GAUDI2_QUEUE_ID_DCORE1_MME_0_1 = 57,
+ GAUDI2_QUEUE_ID_DCORE1_MME_0_2 = 58,
+ GAUDI2_QUEUE_ID_DCORE1_MME_0_3 = 59,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 = 60,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_0_1 = 61,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_0_2 = 62,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_0_3 = 63,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_1_0 = 64,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_1_1 = 65,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_1_2 = 66,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_1_3 = 67,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_2_0 = 68,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_2_1 = 69,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_2_2 = 70,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_2_3 = 71,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_3_0 = 72,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_3_1 = 73,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_3_2 = 74,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_3_3 = 75,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_4_0 = 76,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_4_1 = 77,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_4_2 = 78,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_4_3 = 79,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_5_0 = 80,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_5_1 = 81,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_5_2 = 82,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_5_3 = 83,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0 = 84,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1 = 85,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2 = 86,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3 = 87,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0 = 88,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1 = 89,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2 = 90,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3 = 91,
+ GAUDI2_QUEUE_ID_DCORE2_MME_0_0 = 92,
+ GAUDI2_QUEUE_ID_DCORE2_MME_0_1 = 93,
+ GAUDI2_QUEUE_ID_DCORE2_MME_0_2 = 94,
+ GAUDI2_QUEUE_ID_DCORE2_MME_0_3 = 95,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 = 96,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_0_1 = 97,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_0_2 = 98,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_0_3 = 99,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_1_0 = 100,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_1_1 = 101,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_1_2 = 102,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_1_3 = 103,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_2_0 = 104,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_2_1 = 105,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_2_2 = 106,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_2_3 = 107,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_3_0 = 108,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_3_1 = 109,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_3_2 = 110,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_3_3 = 111,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_4_0 = 112,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_4_1 = 113,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_4_2 = 114,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_4_3 = 115,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_5_0 = 116,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_5_1 = 117,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_5_2 = 118,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_5_3 = 119,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0 = 120,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1 = 121,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2 = 122,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3 = 123,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0 = 124,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1 = 125,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2 = 126,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3 = 127,
+ GAUDI2_QUEUE_ID_DCORE3_MME_0_0 = 128,
+ GAUDI2_QUEUE_ID_DCORE3_MME_0_1 = 129,
+ GAUDI2_QUEUE_ID_DCORE3_MME_0_2 = 130,
+ GAUDI2_QUEUE_ID_DCORE3_MME_0_3 = 131,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 = 132,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_0_1 = 133,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_0_2 = 134,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_0_3 = 135,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_1_0 = 136,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_1_1 = 137,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_1_2 = 138,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_1_3 = 139,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_2_0 = 140,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_2_1 = 141,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_2_2 = 142,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_2_3 = 143,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_3_0 = 144,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_3_1 = 145,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_3_2 = 146,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_3_3 = 147,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_4_0 = 148,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_4_1 = 149,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_4_2 = 150,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_4_3 = 151,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_5_0 = 152,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_5_1 = 153,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_5_2 = 154,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_5_3 = 155,
+ GAUDI2_QUEUE_ID_NIC_0_0 = 156,
+ GAUDI2_QUEUE_ID_NIC_0_1 = 157,
+ GAUDI2_QUEUE_ID_NIC_0_2 = 158,
+ GAUDI2_QUEUE_ID_NIC_0_3 = 159,
+ GAUDI2_QUEUE_ID_NIC_1_0 = 160,
+ GAUDI2_QUEUE_ID_NIC_1_1 = 161,
+ GAUDI2_QUEUE_ID_NIC_1_2 = 162,
+ GAUDI2_QUEUE_ID_NIC_1_3 = 163,
+ GAUDI2_QUEUE_ID_NIC_2_0 = 164,
+ GAUDI2_QUEUE_ID_NIC_2_1 = 165,
+ GAUDI2_QUEUE_ID_NIC_2_2 = 166,
+ GAUDI2_QUEUE_ID_NIC_2_3 = 167,
+ GAUDI2_QUEUE_ID_NIC_3_0 = 168,
+ GAUDI2_QUEUE_ID_NIC_3_1 = 169,
+ GAUDI2_QUEUE_ID_NIC_3_2 = 170,
+ GAUDI2_QUEUE_ID_NIC_3_3 = 171,
+ GAUDI2_QUEUE_ID_NIC_4_0 = 172,
+ GAUDI2_QUEUE_ID_NIC_4_1 = 173,
+ GAUDI2_QUEUE_ID_NIC_4_2 = 174,
+ GAUDI2_QUEUE_ID_NIC_4_3 = 175,
+ GAUDI2_QUEUE_ID_NIC_5_0 = 176,
+ GAUDI2_QUEUE_ID_NIC_5_1 = 177,
+ GAUDI2_QUEUE_ID_NIC_5_2 = 178,
+ GAUDI2_QUEUE_ID_NIC_5_3 = 179,
+ GAUDI2_QUEUE_ID_NIC_6_0 = 180,
+ GAUDI2_QUEUE_ID_NIC_6_1 = 181,
+ GAUDI2_QUEUE_ID_NIC_6_2 = 182,
+ GAUDI2_QUEUE_ID_NIC_6_3 = 183,
+ GAUDI2_QUEUE_ID_NIC_7_0 = 184,
+ GAUDI2_QUEUE_ID_NIC_7_1 = 185,
+ GAUDI2_QUEUE_ID_NIC_7_2 = 186,
+ GAUDI2_QUEUE_ID_NIC_7_3 = 187,
+ GAUDI2_QUEUE_ID_NIC_8_0 = 188,
+ GAUDI2_QUEUE_ID_NIC_8_1 = 189,
+ GAUDI2_QUEUE_ID_NIC_8_2 = 190,
+ GAUDI2_QUEUE_ID_NIC_8_3 = 191,
+ GAUDI2_QUEUE_ID_NIC_9_0 = 192,
+ GAUDI2_QUEUE_ID_NIC_9_1 = 193,
+ GAUDI2_QUEUE_ID_NIC_9_2 = 194,
+ GAUDI2_QUEUE_ID_NIC_9_3 = 195,
+ GAUDI2_QUEUE_ID_NIC_10_0 = 196,
+ GAUDI2_QUEUE_ID_NIC_10_1 = 197,
+ GAUDI2_QUEUE_ID_NIC_10_2 = 198,
+ GAUDI2_QUEUE_ID_NIC_10_3 = 199,
+ GAUDI2_QUEUE_ID_NIC_11_0 = 200,
+ GAUDI2_QUEUE_ID_NIC_11_1 = 201,
+ GAUDI2_QUEUE_ID_NIC_11_2 = 202,
+ GAUDI2_QUEUE_ID_NIC_11_3 = 203,
+ GAUDI2_QUEUE_ID_NIC_12_0 = 204,
+ GAUDI2_QUEUE_ID_NIC_12_1 = 205,
+ GAUDI2_QUEUE_ID_NIC_12_2 = 206,
+ GAUDI2_QUEUE_ID_NIC_12_3 = 207,
+ GAUDI2_QUEUE_ID_NIC_13_0 = 208,
+ GAUDI2_QUEUE_ID_NIC_13_1 = 209,
+ GAUDI2_QUEUE_ID_NIC_13_2 = 210,
+ GAUDI2_QUEUE_ID_NIC_13_3 = 211,
+ GAUDI2_QUEUE_ID_NIC_14_0 = 212,
+ GAUDI2_QUEUE_ID_NIC_14_1 = 213,
+ GAUDI2_QUEUE_ID_NIC_14_2 = 214,
+ GAUDI2_QUEUE_ID_NIC_14_3 = 215,
+ GAUDI2_QUEUE_ID_NIC_15_0 = 216,
+ GAUDI2_QUEUE_ID_NIC_15_1 = 217,
+ GAUDI2_QUEUE_ID_NIC_15_2 = 218,
+ GAUDI2_QUEUE_ID_NIC_15_3 = 219,
+ GAUDI2_QUEUE_ID_NIC_16_0 = 220,
+ GAUDI2_QUEUE_ID_NIC_16_1 = 221,
+ GAUDI2_QUEUE_ID_NIC_16_2 = 222,
+ GAUDI2_QUEUE_ID_NIC_16_3 = 223,
+ GAUDI2_QUEUE_ID_NIC_17_0 = 224,
+ GAUDI2_QUEUE_ID_NIC_17_1 = 225,
+ GAUDI2_QUEUE_ID_NIC_17_2 = 226,
+ GAUDI2_QUEUE_ID_NIC_17_3 = 227,
+ GAUDI2_QUEUE_ID_NIC_18_0 = 228,
+ GAUDI2_QUEUE_ID_NIC_18_1 = 229,
+ GAUDI2_QUEUE_ID_NIC_18_2 = 230,
+ GAUDI2_QUEUE_ID_NIC_18_3 = 231,
+ GAUDI2_QUEUE_ID_NIC_19_0 = 232,
+ GAUDI2_QUEUE_ID_NIC_19_1 = 233,
+ GAUDI2_QUEUE_ID_NIC_19_2 = 234,
+ GAUDI2_QUEUE_ID_NIC_19_3 = 235,
+ GAUDI2_QUEUE_ID_NIC_20_0 = 236,
+ GAUDI2_QUEUE_ID_NIC_20_1 = 237,
+ GAUDI2_QUEUE_ID_NIC_20_2 = 238,
+ GAUDI2_QUEUE_ID_NIC_20_3 = 239,
+ GAUDI2_QUEUE_ID_NIC_21_0 = 240,
+ GAUDI2_QUEUE_ID_NIC_21_1 = 241,
+ GAUDI2_QUEUE_ID_NIC_21_2 = 242,
+ GAUDI2_QUEUE_ID_NIC_21_3 = 243,
+ GAUDI2_QUEUE_ID_NIC_22_0 = 244,
+ GAUDI2_QUEUE_ID_NIC_22_1 = 245,
+ GAUDI2_QUEUE_ID_NIC_22_2 = 246,
+ GAUDI2_QUEUE_ID_NIC_22_3 = 247,
+ GAUDI2_QUEUE_ID_NIC_23_0 = 248,
+ GAUDI2_QUEUE_ID_NIC_23_1 = 249,
+ GAUDI2_QUEUE_ID_NIC_23_2 = 250,
+ GAUDI2_QUEUE_ID_NIC_23_3 = 251,
+ GAUDI2_QUEUE_ID_ROT_0_0 = 252,
+ GAUDI2_QUEUE_ID_ROT_0_1 = 253,
+ GAUDI2_QUEUE_ID_ROT_0_2 = 254,
+ GAUDI2_QUEUE_ID_ROT_0_3 = 255,
+ GAUDI2_QUEUE_ID_ROT_1_0 = 256,
+ GAUDI2_QUEUE_ID_ROT_1_1 = 257,
+ GAUDI2_QUEUE_ID_ROT_1_2 = 258,
+ GAUDI2_QUEUE_ID_ROT_1_3 = 259,
+ GAUDI2_QUEUE_ID_CPU_PQ = 260,
+ GAUDI2_QUEUE_ID_SIZE
+};
+
+/*
* Engine Numbering
*
* Used in the "busy_engines_mask" field in `struct hl_info_hw_idle'
@@ -69,12 +487,245 @@ enum goya_engine_id {
GOYA_ENGINE_ID_SIZE
};
+enum gaudi_engine_id {
+ GAUDI_ENGINE_ID_DMA_0 = 0,
+ GAUDI_ENGINE_ID_DMA_1,
+ GAUDI_ENGINE_ID_DMA_2,
+ GAUDI_ENGINE_ID_DMA_3,
+ GAUDI_ENGINE_ID_DMA_4,
+ GAUDI_ENGINE_ID_DMA_5,
+ GAUDI_ENGINE_ID_DMA_6,
+ GAUDI_ENGINE_ID_DMA_7,
+ GAUDI_ENGINE_ID_MME_0,
+ GAUDI_ENGINE_ID_MME_1,
+ GAUDI_ENGINE_ID_MME_2,
+ GAUDI_ENGINE_ID_MME_3,
+ GAUDI_ENGINE_ID_TPC_0,
+ GAUDI_ENGINE_ID_TPC_1,
+ GAUDI_ENGINE_ID_TPC_2,
+ GAUDI_ENGINE_ID_TPC_3,
+ GAUDI_ENGINE_ID_TPC_4,
+ GAUDI_ENGINE_ID_TPC_5,
+ GAUDI_ENGINE_ID_TPC_6,
+ GAUDI_ENGINE_ID_TPC_7,
+ GAUDI_ENGINE_ID_NIC_0,
+ GAUDI_ENGINE_ID_NIC_1,
+ GAUDI_ENGINE_ID_NIC_2,
+ GAUDI_ENGINE_ID_NIC_3,
+ GAUDI_ENGINE_ID_NIC_4,
+ GAUDI_ENGINE_ID_NIC_5,
+ GAUDI_ENGINE_ID_NIC_6,
+ GAUDI_ENGINE_ID_NIC_7,
+ GAUDI_ENGINE_ID_NIC_8,
+ GAUDI_ENGINE_ID_NIC_9,
+ GAUDI_ENGINE_ID_SIZE
+};
+
+enum gaudi2_engine_id {
+ GAUDI2_DCORE0_ENGINE_ID_EDMA_0 = 0,
+ GAUDI2_DCORE0_ENGINE_ID_EDMA_1,
+ GAUDI2_DCORE0_ENGINE_ID_MME,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_0,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_1,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_2,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_3,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_4,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_5,
+ GAUDI2_DCORE0_ENGINE_ID_DEC_0,
+ GAUDI2_DCORE0_ENGINE_ID_DEC_1,
+ GAUDI2_DCORE1_ENGINE_ID_EDMA_0,
+ GAUDI2_DCORE1_ENGINE_ID_EDMA_1,
+ GAUDI2_DCORE1_ENGINE_ID_MME,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_0,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_1,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_2,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_3,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_4,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_5,
+ GAUDI2_DCORE1_ENGINE_ID_DEC_0,
+ GAUDI2_DCORE1_ENGINE_ID_DEC_1,
+ GAUDI2_DCORE2_ENGINE_ID_EDMA_0,
+ GAUDI2_DCORE2_ENGINE_ID_EDMA_1,
+ GAUDI2_DCORE2_ENGINE_ID_MME,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_0,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_1,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_2,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_3,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_4,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_5,
+ GAUDI2_DCORE2_ENGINE_ID_DEC_0,
+ GAUDI2_DCORE2_ENGINE_ID_DEC_1,
+ GAUDI2_DCORE3_ENGINE_ID_EDMA_0,
+ GAUDI2_DCORE3_ENGINE_ID_EDMA_1,
+ GAUDI2_DCORE3_ENGINE_ID_MME,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_0,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_1,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_2,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_3,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_4,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_5,
+ GAUDI2_DCORE3_ENGINE_ID_DEC_0,
+ GAUDI2_DCORE3_ENGINE_ID_DEC_1,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_6,
+ GAUDI2_ENGINE_ID_PDMA_0,
+ GAUDI2_ENGINE_ID_PDMA_1,
+ GAUDI2_ENGINE_ID_ROT_0,
+ GAUDI2_ENGINE_ID_ROT_1,
+ GAUDI2_PCIE_ENGINE_ID_DEC_0,
+ GAUDI2_PCIE_ENGINE_ID_DEC_1,
+ GAUDI2_ENGINE_ID_NIC0_0,
+ GAUDI2_ENGINE_ID_NIC0_1,
+ GAUDI2_ENGINE_ID_NIC1_0,
+ GAUDI2_ENGINE_ID_NIC1_1,
+ GAUDI2_ENGINE_ID_NIC2_0,
+ GAUDI2_ENGINE_ID_NIC2_1,
+ GAUDI2_ENGINE_ID_NIC3_0,
+ GAUDI2_ENGINE_ID_NIC3_1,
+ GAUDI2_ENGINE_ID_NIC4_0,
+ GAUDI2_ENGINE_ID_NIC4_1,
+ GAUDI2_ENGINE_ID_NIC5_0,
+ GAUDI2_ENGINE_ID_NIC5_1,
+ GAUDI2_ENGINE_ID_NIC6_0,
+ GAUDI2_ENGINE_ID_NIC6_1,
+ GAUDI2_ENGINE_ID_NIC7_0,
+ GAUDI2_ENGINE_ID_NIC7_1,
+ GAUDI2_ENGINE_ID_NIC8_0,
+ GAUDI2_ENGINE_ID_NIC8_1,
+ GAUDI2_ENGINE_ID_NIC9_0,
+ GAUDI2_ENGINE_ID_NIC9_1,
+ GAUDI2_ENGINE_ID_NIC10_0,
+ GAUDI2_ENGINE_ID_NIC10_1,
+ GAUDI2_ENGINE_ID_NIC11_0,
+ GAUDI2_ENGINE_ID_NIC11_1,
+ GAUDI2_ENGINE_ID_SIZE
+};
+
+/*
+ * ASIC specific PLL index
+ *
+ * Used to retrieve in frequency info of different IPs via
+ * HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be
+ * used as an index in struct hl_pll_frequency_info
+ */
+
+enum hl_goya_pll_index {
+ HL_GOYA_CPU_PLL = 0,
+ HL_GOYA_IC_PLL,
+ HL_GOYA_MC_PLL,
+ HL_GOYA_MME_PLL,
+ HL_GOYA_PCI_PLL,
+ HL_GOYA_EMMC_PLL,
+ HL_GOYA_TPC_PLL,
+ HL_GOYA_PLL_MAX
+};
+
+enum hl_gaudi_pll_index {
+ HL_GAUDI_CPU_PLL = 0,
+ HL_GAUDI_PCI_PLL,
+ HL_GAUDI_SRAM_PLL,
+ HL_GAUDI_HBM_PLL,
+ HL_GAUDI_NIC_PLL,
+ HL_GAUDI_DMA_PLL,
+ HL_GAUDI_MESH_PLL,
+ HL_GAUDI_MME_PLL,
+ HL_GAUDI_TPC_PLL,
+ HL_GAUDI_IF_PLL,
+ HL_GAUDI_PLL_MAX
+};
+
+enum hl_gaudi2_pll_index {
+ HL_GAUDI2_CPU_PLL = 0,
+ HL_GAUDI2_PCI_PLL,
+ HL_GAUDI2_SRAM_PLL,
+ HL_GAUDI2_HBM_PLL,
+ HL_GAUDI2_NIC_PLL,
+ HL_GAUDI2_DMA_PLL,
+ HL_GAUDI2_MESH_PLL,
+ HL_GAUDI2_MME_PLL,
+ HL_GAUDI2_TPC_PLL,
+ HL_GAUDI2_IF_PLL,
+ HL_GAUDI2_VID_PLL,
+ HL_GAUDI2_MSS_PLL,
+ HL_GAUDI2_PLL_MAX
+};
+
+/**
+ * enum hl_goya_dma_direction - Direction of DMA operation inside a LIN_DMA packet that is
+ * submitted to the GOYA's DMA QMAN. This attribute is not relevant
+ * to the H/W but the kernel driver use it to parse the packet's
+ * addresses and patch/validate them.
+ * @HL_DMA_HOST_TO_DRAM: DMA operation from Host memory to GOYA's DDR.
+ * @HL_DMA_HOST_TO_SRAM: DMA operation from Host memory to GOYA's SRAM.
+ * @HL_DMA_DRAM_TO_SRAM: DMA operation from GOYA's DDR to GOYA's SRAM.
+ * @HL_DMA_SRAM_TO_DRAM: DMA operation from GOYA's SRAM to GOYA's DDR.
+ * @HL_DMA_SRAM_TO_HOST: DMA operation from GOYA's SRAM to Host memory.
+ * @HL_DMA_DRAM_TO_HOST: DMA operation from GOYA's DDR to Host memory.
+ * @HL_DMA_DRAM_TO_DRAM: DMA operation from GOYA's DDR to GOYA's DDR.
+ * @HL_DMA_SRAM_TO_SRAM: DMA operation from GOYA's SRAM to GOYA's SRAM.
+ * @HL_DMA_ENUM_MAX: number of values in enum
+ */
+enum hl_goya_dma_direction {
+ HL_DMA_HOST_TO_DRAM,
+ HL_DMA_HOST_TO_SRAM,
+ HL_DMA_DRAM_TO_SRAM,
+ HL_DMA_SRAM_TO_DRAM,
+ HL_DMA_SRAM_TO_HOST,
+ HL_DMA_DRAM_TO_HOST,
+ HL_DMA_DRAM_TO_DRAM,
+ HL_DMA_SRAM_TO_SRAM,
+ HL_DMA_ENUM_MAX
+};
+
+/**
+ * enum hl_device_status - Device status information.
+ * @HL_DEVICE_STATUS_OPERATIONAL: Device is operational.
+ * @HL_DEVICE_STATUS_IN_RESET: Device is currently during reset.
+ * @HL_DEVICE_STATUS_MALFUNCTION: Device is unusable.
+ * @HL_DEVICE_STATUS_NEEDS_RESET: Device needs reset because auto reset was disabled.
+ * @HL_DEVICE_STATUS_IN_DEVICE_CREATION: Device is operational but its creation is still in
+ * progress.
+ * @HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE: Device is currently during reset that was
+ * triggered because the user released the device
+ * @HL_DEVICE_STATUS_LAST: Last status.
+ */
enum hl_device_status {
HL_DEVICE_STATUS_OPERATIONAL,
HL_DEVICE_STATUS_IN_RESET,
- HL_DEVICE_STATUS_MALFUNCTION
+ HL_DEVICE_STATUS_MALFUNCTION,
+ HL_DEVICE_STATUS_NEEDS_RESET,
+ HL_DEVICE_STATUS_IN_DEVICE_CREATION,
+ HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE,
+ HL_DEVICE_STATUS_LAST = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE
+};
+
+enum hl_server_type {
+ HL_SERVER_TYPE_UNKNOWN = 0,
+ HL_SERVER_GAUDI_HLS1 = 1,
+ HL_SERVER_GAUDI_HLS1H = 2,
+ HL_SERVER_GAUDI_TYPE1 = 3,
+ HL_SERVER_GAUDI_TYPE2 = 4,
+ HL_SERVER_GAUDI2_HLS2 = 5
};
+/*
+ * Notifier event values - for the notification mechanism and the HL_INFO_GET_EVENTS command
+ *
+ * HL_NOTIFIER_EVENT_TPC_ASSERT - Indicates TPC assert event
+ * HL_NOTIFIER_EVENT_UNDEFINED_OPCODE - Indicates undefined operation code
+ * HL_NOTIFIER_EVENT_DEVICE_RESET - Indicates device requires a reset
+ * HL_NOTIFIER_EVENT_CS_TIMEOUT - Indicates CS timeout error
+ * HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE - Indicates device is unavailable
+ * HL_NOTIFIER_EVENT_USER_ENGINE_ERR - Indicates device engine in error state
+ * HL_NOTIFIER_EVENT_GENERAL_HW_ERR - Indicates device HW error
+ */
+#define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0)
+#define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1)
+#define HL_NOTIFIER_EVENT_DEVICE_RESET (1ULL << 2)
+#define HL_NOTIFIER_EVENT_CS_TIMEOUT (1ULL << 3)
+#define HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE (1ULL << 4)
+#define HL_NOTIFIER_EVENT_USER_ENGINE_ERR (1ULL << 5)
+#define HL_NOTIFIER_EVENT_GENERAL_HW_ERR (1ULL << 6)
+
/* Opcode for management ioctl
*
* HW_IP_INFO - Receive information about different IP blocks in the
@@ -101,38 +752,143 @@ enum hl_device_status {
* HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset
* operations performed on the device since the last
* time the driver was loaded.
+ * HL_INFO_TIME_SYNC - Retrieve the device's time alongside the host's time
+ * for synchronization.
+ * HL_INFO_CS_COUNTERS - Retrieve command submission counters
+ * HL_INFO_PCI_COUNTERS - Retrieve PCI counters
+ * HL_INFO_CLK_THROTTLE_REASON - Retrieve clock throttling reason
+ * HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore
+ * HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption
+ * HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency
+ * HL_INFO_POWER - Retrieve power information
+ * HL_INFO_OPEN_STATS - Retrieve info regarding recent device open calls
+ * HL_INFO_DRAM_REPLACED_ROWS - Retrieve DRAM replaced rows info
+ * HL_INFO_DRAM_PENDING_ROWS - Retrieve DRAM pending rows num
+ * HL_INFO_LAST_ERR_OPEN_DEV_TIME - Retrieve timestamp of the last time the device was opened
+ * and CS timeout or razwi error occurred.
+ * HL_INFO_CS_TIMEOUT_EVENT - Retrieve CS timeout timestamp and its related CS sequence number.
+ * HL_INFO_RAZWI_EVENT - Retrieve parameters of razwi:
+ * Timestamp of razwi.
+ * The address which accessing it caused the razwi.
+ * Razwi initiator.
+ * Razwi cause, was it a page fault or MMU access error.
+ * HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation
+ * HL_INFO_SECURED_ATTESTATION - Retrieve attestation report of the boot.
+ * HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications.
+ * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
+ * HL_INFO_GET_EVENTS - Retrieve the last occurred events
+ * HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information.
+ */
+#define HL_INFO_HW_IP_INFO 0
+#define HL_INFO_HW_EVENTS 1
+#define HL_INFO_DRAM_USAGE 2
+#define HL_INFO_HW_IDLE 3
+#define HL_INFO_DEVICE_STATUS 4
+#define HL_INFO_DEVICE_UTILIZATION 6
+#define HL_INFO_HW_EVENTS_AGGREGATE 7
+#define HL_INFO_CLK_RATE 8
+#define HL_INFO_RESET_COUNT 9
+#define HL_INFO_TIME_SYNC 10
+#define HL_INFO_CS_COUNTERS 11
+#define HL_INFO_PCI_COUNTERS 12
+#define HL_INFO_CLK_THROTTLE_REASON 13
+#define HL_INFO_SYNC_MANAGER 14
+#define HL_INFO_TOTAL_ENERGY 15
+#define HL_INFO_PLL_FREQUENCY 16
+#define HL_INFO_POWER 17
+#define HL_INFO_OPEN_STATS 18
+#define HL_INFO_DRAM_REPLACED_ROWS 21
+#define HL_INFO_DRAM_PENDING_ROWS 22
+#define HL_INFO_LAST_ERR_OPEN_DEV_TIME 23
+#define HL_INFO_CS_TIMEOUT_EVENT 24
+#define HL_INFO_RAZWI_EVENT 25
+#define HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES 26
+#define HL_INFO_SECURED_ATTESTATION 27
+#define HL_INFO_REGISTER_EVENTFD 28
+#define HL_INFO_UNREGISTER_EVENTFD 29
+#define HL_INFO_GET_EVENTS 30
+#define HL_INFO_UNDEFINED_OPCODE_EVENT 31
+#define HL_INFO_ENGINE_STATUS 32
+
+#define HL_INFO_VERSION_MAX_LEN 128
+#define HL_INFO_CARD_NAME_MAX_LEN 16
+
+/* Maximum buffer size for retrieving engines status */
+#define HL_ENGINES_DATA_MAX_SIZE SZ_1M
+
+/**
+ * struct hl_info_hw_ip_info - hardware information on various IPs in the ASIC
+ * @sram_base_address: The first SRAM physical base address that is free to be
+ * used by the user.
+ * @dram_base_address: The first DRAM virtual or physical base address that is
+ * free to be used by the user.
+ * @dram_size: The DRAM size that is available to the user.
+ * @sram_size: The SRAM size that is available to the user.
+ * @num_of_events: The number of events that can be received from the f/w. This
+ * is needed so the user can what is the size of the h/w events
+ * array he needs to pass to the kernel when he wants to fetch
+ * the event counters.
+ * @device_id: PCI device ID of the ASIC.
+ * @module_id: Module ID of the ASIC for mezzanine cards in servers
+ * (From OCP spec).
+ * @decoder_enabled_mask: Bit-mask that represents which decoders are enabled.
+ * @first_available_interrupt_id: The first available interrupt ID for the user
+ * to be used when it works with user interrupts.
+ * Relevant for Gaudi2 and later.
+ * @server_type: Server type that the Gaudi ASIC is currently installed in.
+ * The value is according to enum hl_server_type
+ * @cpld_version: CPLD version on the board.
+ * @psoc_pci_pll_nr: PCI PLL NR value. Needed by the profiler in some ASICs.
+ * @psoc_pci_pll_nf: PCI PLL NF value. Needed by the profiler in some ASICs.
+ * @psoc_pci_pll_od: PCI PLL OD value. Needed by the profiler in some ASICs.
+ * @psoc_pci_pll_div_factor: PCI PLL DIV factor value. Needed by the profiler
+ * in some ASICs.
+ * @tpc_enabled_mask: Bit-mask that represents which TPCs are enabled. Relevant
+ * for Goya/Gaudi only.
+ * @dram_enabled: Whether the DRAM is enabled.
+ * @security_enabled: Whether security is enabled on device.
+ * @mme_master_slave_mode: Indicate whether the MME is working in master/slave
+ * configuration. Relevant for Greco and later.
+ * @cpucp_version: The CPUCP f/w version.
+ * @card_name: The card name as passed by the f/w.
+ * @tpc_enabled_mask_ext: Bit-mask that represents which TPCs are enabled.
+ * Relevant for Greco and later.
+ * @dram_page_size: The DRAM physical page size.
+ * @edma_enabled_mask: Bit-mask that represents which EDMAs are enabled.
+ * Relevant for Gaudi2 and later.
+ * @number_of_user_interrupts: The number of interrupts that are available to the userspace
+ * application to use. Relevant for Gaudi2 and later.
+ * @device_mem_alloc_default_page_size: default page size used in device memory allocation.
*/
-#define HL_INFO_HW_IP_INFO 0
-#define HL_INFO_HW_EVENTS 1
-#define HL_INFO_DRAM_USAGE 2
-#define HL_INFO_HW_IDLE 3
-#define HL_INFO_DEVICE_STATUS 4
-#define HL_INFO_DEVICE_UTILIZATION 6
-#define HL_INFO_HW_EVENTS_AGGREGATE 7
-#define HL_INFO_CLK_RATE 8
-#define HL_INFO_RESET_COUNT 9
-
-#define HL_INFO_VERSION_MAX_LEN 128
-#define HL_INFO_CARD_NAME_MAX_LEN 16
-
struct hl_info_hw_ip_info {
__u64 sram_base_address;
__u64 dram_base_address;
__u64 dram_size;
__u32 sram_size;
__u32 num_of_events;
- __u32 device_id; /* PCI Device ID */
- __u32 reserved[3];
- __u32 armcp_cpld_version;
+ __u32 device_id;
+ __u32 module_id;
+ __u32 decoder_enabled_mask;
+ __u16 first_available_interrupt_id;
+ __u16 server_type;
+ __u32 cpld_version;
__u32 psoc_pci_pll_nr;
__u32 psoc_pci_pll_nf;
__u32 psoc_pci_pll_od;
__u32 psoc_pci_pll_div_factor;
__u8 tpc_enabled_mask;
__u8 dram_enabled;
- __u8 pad[2];
- __u8 armcp_version[HL_INFO_VERSION_MAX_LEN];
+ __u8 security_enabled;
+ __u8 mme_master_slave_mode;
+ __u8 cpucp_version[HL_INFO_VERSION_MAX_LEN];
__u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
+ __u64 tpc_enabled_mask_ext;
+ __u64 dram_page_size;
+ __u32 edma_enabled_mask;
+ __u16 number_of_user_interrupts;
+ __u16 pad2;
+ __u64 reserved4;
+ __u64 device_mem_alloc_default_page_size;
};
struct hl_info_dram_usage {
@@ -140,13 +896,21 @@ struct hl_info_dram_usage {
__u64 ctx_dram_mem;
};
+#define HL_BUSY_ENGINES_MASK_EXT_SIZE 2
+
struct hl_info_hw_idle {
__u32 is_idle;
/*
* Bitmask of busy engines.
- * Bits definition is according to `enum <chip>_enging_id'.
+ * Bits definition is according to `enum <chip>_engine_id'.
*/
__u32 busy_engines_mask;
+
+ /*
+ * Extended Bitmask of busy engines.
+ * Bits definition is according to `enum <chip>_engine_id'.
+ */
+ __u64 busy_engines_mask_ext[HL_BUSY_ENGINES_MASK_EXT_SIZE];
};
struct hl_info_device_status {
@@ -169,28 +933,288 @@ struct hl_info_reset_count {
__u32 soft_reset_cnt;
};
+struct hl_info_time_sync {
+ __u64 device_time;
+ __u64 host_time;
+};
+
+/**
+ * struct hl_info_pci_counters - pci counters
+ * @rx_throughput: PCI rx throughput KBps
+ * @tx_throughput: PCI tx throughput KBps
+ * @replay_cnt: PCI replay counter
+ */
+struct hl_info_pci_counters {
+ __u64 rx_throughput;
+ __u64 tx_throughput;
+ __u64 replay_cnt;
+};
+
+enum hl_clk_throttling_type {
+ HL_CLK_THROTTLE_TYPE_POWER,
+ HL_CLK_THROTTLE_TYPE_THERMAL,
+ HL_CLK_THROTTLE_TYPE_MAX
+};
+
+/* clk_throttling_reason masks */
+#define HL_CLK_THROTTLE_POWER (1 << HL_CLK_THROTTLE_TYPE_POWER)
+#define HL_CLK_THROTTLE_THERMAL (1 << HL_CLK_THROTTLE_TYPE_THERMAL)
+
+/**
+ * struct hl_info_clk_throttle - clock throttling reason
+ * @clk_throttling_reason: each bit represents a clk throttling reason
+ * @clk_throttling_timestamp_us: represents CPU timestamp in microseconds of the start-event
+ * @clk_throttling_duration_ns: the clock throttle time in nanosec
+ */
+struct hl_info_clk_throttle {
+ __u32 clk_throttling_reason;
+ __u32 pad;
+ __u64 clk_throttling_timestamp_us[HL_CLK_THROTTLE_TYPE_MAX];
+ __u64 clk_throttling_duration_ns[HL_CLK_THROTTLE_TYPE_MAX];
+};
+
+/**
+ * struct hl_info_energy - device energy information
+ * @total_energy_consumption: total device energy consumption
+ */
+struct hl_info_energy {
+ __u64 total_energy_consumption;
+};
+
+#define HL_PLL_NUM_OUTPUTS 4
+
+struct hl_pll_frequency_info {
+ __u16 output[HL_PLL_NUM_OUTPUTS];
+};
+
+/**
+ * struct hl_open_stats_info - device open statistics information
+ * @open_counter: ever growing counter, increased on each successful dev open
+ * @last_open_period_ms: duration (ms) device was open last time
+ * @is_compute_ctx_active: Whether there is an active compute context executing
+ * @compute_ctx_in_release: true if the current compute context is being released
+ */
+struct hl_open_stats_info {
+ __u64 open_counter;
+ __u64 last_open_period_ms;
+ __u8 is_compute_ctx_active;
+ __u8 compute_ctx_in_release;
+ __u8 pad[6];
+};
+
+/**
+ * struct hl_power_info - power information
+ * @power: power consumption
+ */
+struct hl_power_info {
+ __u64 power;
+};
+
+/**
+ * struct hl_info_sync_manager - sync manager information
+ * @first_available_sync_object: first available sob
+ * @first_available_monitor: first available monitor
+ * @first_available_cq: first available cq
+ */
+struct hl_info_sync_manager {
+ __u32 first_available_sync_object;
+ __u32 first_available_monitor;
+ __u32 first_available_cq;
+ __u32 reserved;
+};
+
+/**
+ * struct hl_info_cs_counters - command submission counters
+ * @total_out_of_mem_drop_cnt: total dropped due to memory allocation issue
+ * @ctx_out_of_mem_drop_cnt: context dropped due to memory allocation issue
+ * @total_parsing_drop_cnt: total dropped due to error in packet parsing
+ * @ctx_parsing_drop_cnt: context dropped due to error in packet parsing
+ * @total_queue_full_drop_cnt: total dropped due to queue full
+ * @ctx_queue_full_drop_cnt: context dropped due to queue full
+ * @total_device_in_reset_drop_cnt: total dropped due to device in reset
+ * @ctx_device_in_reset_drop_cnt: context dropped due to device in reset
+ * @total_max_cs_in_flight_drop_cnt: total dropped due to maximum CS in-flight
+ * @ctx_max_cs_in_flight_drop_cnt: context dropped due to maximum CS in-flight
+ * @total_validation_drop_cnt: total dropped due to validation error
+ * @ctx_validation_drop_cnt: context dropped due to validation error
+ */
+struct hl_info_cs_counters {
+ __u64 total_out_of_mem_drop_cnt;
+ __u64 ctx_out_of_mem_drop_cnt;
+ __u64 total_parsing_drop_cnt;
+ __u64 ctx_parsing_drop_cnt;
+ __u64 total_queue_full_drop_cnt;
+ __u64 ctx_queue_full_drop_cnt;
+ __u64 total_device_in_reset_drop_cnt;
+ __u64 ctx_device_in_reset_drop_cnt;
+ __u64 total_max_cs_in_flight_drop_cnt;
+ __u64 ctx_max_cs_in_flight_drop_cnt;
+ __u64 total_validation_drop_cnt;
+ __u64 ctx_validation_drop_cnt;
+};
+
+/**
+ * struct hl_info_last_err_open_dev_time - last error boot information.
+ * @timestamp: timestamp of last time the device was opened and error occurred.
+ */
+struct hl_info_last_err_open_dev_time {
+ __s64 timestamp;
+};
+
+/**
+ * struct hl_info_cs_timeout_event - last CS timeout information.
+ * @timestamp: timestamp when last CS timeout event occurred.
+ * @seq: sequence number of last CS timeout event.
+ */
+struct hl_info_cs_timeout_event {
+ __s64 timestamp;
+ __u64 seq;
+};
+
+#define HL_RAZWI_PAGE_FAULT 0
+#define HL_RAZWI_MMU_ACCESS_ERROR 1
+
+/**
+ * struct hl_info_razwi_event - razwi information.
+ * @timestamp: timestamp of razwi.
+ * @addr: address which accessing it caused razwi.
+ * @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does not
+ * have engine id it will be set to U16_MAX.
+ * @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
+ * engines which one them caused the razwi. In that case, it will contain the
+ * second possible engine id, otherwise it will be set to U16_MAX.
+ * @no_engine_id: if razwi initiator does not have engine id, this field will be set to 1,
+ * otherwise 0.
+ * @error_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ * @pad: padding to 64 bit.
+ */
+struct hl_info_razwi_event {
+ __s64 timestamp;
+ __u64 addr;
+ __u16 engine_id_1;
+ __u16 engine_id_2;
+ __u8 no_engine_id;
+ __u8 error_type;
+ __u8 pad[2];
+};
+
+#define MAX_QMAN_STREAMS_INFO 4
+#define OPCODE_INFO_MAX_ADDR_SIZE 8
+/**
+ * struct hl_info_undefined_opcode_event - info about last undefined opcode error
+ * @timestamp: timestamp of the undefined opcode error
+ * @cb_addr_streams: CB addresses (per stream) that are currently exists in the PQ
+ * entries. In case all streams array entries are
+ * filled with values, it means the execution was in Lower-CP.
+ * @cq_addr: the address of the current handled command buffer
+ * @cq_size: the size of the current handled command buffer
+ * @cb_addr_streams_len: num of streams - actual len of cb_addr_streams array.
+ * should be equal to 1 in case of undefined opcode
+ * in Upper-CP (specific stream) and equal to 4 incase
+ * of undefined opcode in Lower-CP.
+ * @engine_id: engine-id that the error occurred on
+ * @stream_id: the stream id the error occurred on. In case the stream equals to
+ * MAX_QMAN_STREAMS_INFO it means the error occurred on a Lower-CP.
+ */
+struct hl_info_undefined_opcode_event {
+ __s64 timestamp;
+ __u64 cb_addr_streams[MAX_QMAN_STREAMS_INFO][OPCODE_INFO_MAX_ADDR_SIZE];
+ __u64 cq_addr;
+ __u32 cq_size;
+ __u32 cb_addr_streams_len;
+ __u32 engine_id;
+ __u32 stream_id;
+};
+
+/**
+ * struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information.
+ * @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size
+ * (e.g. 0x2100000 means that 1MB and 32MB pages are supported).
+ */
+struct hl_info_dev_memalloc_page_sizes {
+ __u64 page_order_bitmask;
+};
+
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+
+/*
+ * struct hl_info_sec_attest - attestation report of the boot
+ * @nonce: number only used once. random number provided by host. this also passed to the quote
+ * command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pub_data_len: length of the public data (bytes)
+ * @certificate_len: length of the certificate (bytes)
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_quote: attestation report data structure
+ * @quote_sig: signature structure of the attestation report
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate: certificate for the attestation signing key
+ */
+struct hl_info_sec_attest {
+ __u32 nonce;
+ __u16 pcr_quote_len;
+ __u16 pub_data_len;
+ __u16 certificate_len;
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __u8 quote_sig_len;
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+ __u8 pad0[2];
+};
+
+enum gaudi_dcores {
+ HL_GAUDI_WS_DCORE,
+ HL_GAUDI_WN_DCORE,
+ HL_GAUDI_EN_DCORE,
+ HL_GAUDI_ES_DCORE
+};
+
+/**
+ * struct hl_info_args - Main structure to retrieve device related information.
+ * @return_pointer: User space address of the relevant structure related to HL_INFO_* operation
+ * mentioned in @op.
+ * @return_size: Size of the structure used in @return_pointer, just like "size" in "snprintf", it
+ * limits how many bytes the kernel can write. For hw_events array, the size should be
+ * hl_info_hw_ip_info.num_of_events * sizeof(__u32).
+ * @op: Defines which type of information to be retrieved. Refer HL_INFO_* for details.
+ * @dcore_id: DCORE id for which the information is relevant (for Gaudi refer to enum gaudi_dcores).
+ * @ctx_id: Context ID of the user. Currently not in use.
+ * @period_ms: Period value, in milliseconds, for utilization rate in range 100ms - 1000ms in 100 ms
+ * resolution. Currently not in use.
+ * @pll_index: Index as defined in hl_<asic type>_pll_index enumeration.
+ * @eventfd: event file descriptor for event notifications.
+ * @user_buffer_actual_size: Actual data size which was copied to user allocated buffer by the
+ * driver. It is possible for the user to allocate buffer larger than
+ * needed, hence updating this variable so user will know the exact amount
+ * of bytes copied by the kernel to the buffer.
+ * @sec_attest_nonce: Nonce number used for attestation report.
+ * @pad: Padding to 64 bit.
+ */
struct hl_info_args {
- /* Location of relevant struct in userspace */
__u64 return_pointer;
- /*
- * The size of the return value. Just like "size" in "snprintf",
- * it limits how many bytes the kernel can write
- *
- * For hw_events array, the size should be
- * hl_info_hw_ip_info.num_of_events * sizeof(__u32)
- */
__u32 return_size;
-
- /* HL_INFO_* */
__u32 op;
union {
- /* Context ID - Currently not in use */
+ __u32 dcore_id;
__u32 ctx_id;
- /* Period value for utilization rate (100ms - 1000ms, in 100ms
- * resolution.
- */
__u32 period_ms;
+ __u32 pll_index;
+ __u32 eventfd;
+ __u32 user_buffer_actual_size;
+ __u32 sec_attest_nonce;
};
__u32 pad;
@@ -200,26 +1224,52 @@ struct hl_info_args {
#define HL_CB_OP_CREATE 0
/* Opcode to destroy previously created command buffer */
#define HL_CB_OP_DESTROY 1
+/* Opcode to retrieve information about a command buffer */
+#define HL_CB_OP_INFO 2
+
+/* 2MB minus 32 bytes for 2xMSG_PROT */
+#define HL_MAX_CB_SIZE (0x200000 - 32)
+
+/* Indicates whether the command buffer should be mapped to the device's MMU */
+#define HL_CB_FLAGS_MAP 0x1
-#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
+/* Used with HL_CB_OP_INFO opcode to get the device va address for kernel mapped CB */
+#define HL_CB_FLAGS_GET_DEVICE_VA 0x2
struct hl_cb_in {
/* Handle of CB or 0 if we want to create one */
__u64 cb_handle;
/* HL_CB_OP_* */
__u32 op;
+
/* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that
* will be allocated, regardless of this parameter's value, is PAGE_SIZE
*/
__u32 cb_size;
+
/* Context ID - Currently not in use */
__u32 ctx_id;
- __u32 pad;
+ /* HL_CB_FLAGS_* */
+ __u32 flags;
};
struct hl_cb_out {
- /* Handle of CB */
- __u64 cb_handle;
+ union {
+ /* Handle of CB */
+ __u64 cb_handle;
+
+ union {
+ /* Information about CB */
+ struct {
+ /* Usage count of CB */
+ __u32 usage_cnt;
+ __u32 pad;
+ };
+
+ /* CB mapped address to device MMU */
+ __u64 device_va;
+ };
+ };
};
union hl_cb_args {
@@ -227,70 +1277,267 @@ union hl_cb_args {
struct hl_cb_out out;
};
+/* HL_CS_CHUNK_FLAGS_ values
+ *
+ * HL_CS_CHUNK_FLAGS_USER_ALLOC_CB:
+ * Indicates if the CB was allocated and mapped by userspace
+ * (relevant to greco and above). User allocated CB is a command buffer,
+ * allocated by the user, via malloc (or similar). After allocating the
+ * CB, the user invokes - “memory ioctl” to map the user memory into a
+ * device virtual address. The user provides this address via the
+ * cb_handle field. The interface provides the ability to create a
+ * large CBs, Which aren’t limited to “HL_MAX_CB_SIZE”. Therefore, it
+ * increases the PCI-DMA queues throughput. This CB allocation method
+ * also reduces the use of Linux DMA-able memory pool. Which are limited
+ * and used by other Linux sub-systems.
+ */
+#define HL_CS_CHUNK_FLAGS_USER_ALLOC_CB 0x1
+
/*
* This structure size must always be fixed to 64-bytes for backward
* compatibility
*/
struct hl_cs_chunk {
- /*
- * For external queue, this represents a Handle of CB on the Host
- * For internal queue, this represents an SRAM or DRAM address of the
- * internal CB
- */
- __u64 cb_handle;
+ union {
+ /* Goya/Gaudi:
+ * For external queue, this represents a Handle of CB on the
+ * Host.
+ * For internal queue in Goya, this represents an SRAM or
+ * a DRAM address of the internal CB. In Gaudi, this might also
+ * represent a mapped host address of the CB.
+ *
+ * Greco onwards:
+ * For H/W queue, this represents either a Handle of CB on the
+ * Host, or an SRAM, a DRAM, or a mapped host address of the CB.
+ *
+ * A mapped host address is in the device address space, after
+ * a host address was mapped by the device MMU.
+ */
+ __u64 cb_handle;
+
+ /* Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set
+ * This holds address of array of u64 values that contain
+ * signal CS sequence numbers. The wait described by
+ * this job will listen on all those signals
+ * (wait event per signal)
+ */
+ __u64 signal_seq_arr;
+
+ /*
+ * Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set
+ * along with HL_CS_FLAGS_ENCAP_SIGNALS.
+ * This is the CS sequence which has the encapsulated signals.
+ */
+ __u64 encaps_signal_seq;
+ };
+
/* Index of queue to put the CB on */
__u32 queue_index;
- /*
- * Size of command buffer with valid packets
- * Can be smaller then actual CB size
- */
- __u32 cb_size;
+
+ union {
+ /*
+ * Size of command buffer with valid packets
+ * Can be smaller then actual CB size
+ */
+ __u32 cb_size;
+
+ /* Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set.
+ * Number of entries in signal_seq_arr
+ */
+ __u32 num_signal_seq_arr;
+
+ /* Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set along
+ * with HL_CS_FLAGS_ENCAP_SIGNALS
+ * This set the signals range that the user want to wait for
+ * out of the whole reserved signals range.
+ * e.g if the signals range is 20, and user don't want
+ * to wait for signal 8, so he set this offset to 7, then
+ * he call the API again with 9 and so on till 20.
+ */
+ __u32 encaps_signal_offset;
+ };
+
/* HL_CS_CHUNK_FLAGS_* */
__u32 cs_chunk_flags;
+
+ /* Relevant only when HL_CS_FLAGS_COLLECTIVE_WAIT is set.
+ * This holds the collective engine ID. The wait described by this job
+ * will sync with this engine and with all NICs before completion.
+ */
+ __u32 collective_engine_id;
+
/* Align structure to 64 bytes */
- __u32 pad[11];
+ __u32 pad[10];
};
-#define HL_CS_FLAGS_FORCE_RESTORE 0x1
+/* SIGNAL/WAIT/COLLECTIVE_WAIT flags are mutually exclusive */
+#define HL_CS_FLAGS_FORCE_RESTORE 0x1
+#define HL_CS_FLAGS_SIGNAL 0x2
+#define HL_CS_FLAGS_WAIT 0x4
+#define HL_CS_FLAGS_COLLECTIVE_WAIT 0x8
+
+#define HL_CS_FLAGS_TIMESTAMP 0x20
+#define HL_CS_FLAGS_STAGED_SUBMISSION 0x40
+#define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST 0x80
+#define HL_CS_FLAGS_STAGED_SUBMISSION_LAST 0x100
+#define HL_CS_FLAGS_CUSTOM_TIMEOUT 0x200
+#define HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT 0x400
+
+/*
+ * The encapsulated signals CS is merged into the existing CS ioctls.
+ * In order to use this feature need to follow the below procedure:
+ * 1. Reserve signals, set the CS type to HL_CS_FLAGS_RESERVE_SIGNALS_ONLY
+ * the output of this API will be the SOB offset from CFG_BASE.
+ * this address will be used to patch CB cmds to do the signaling for this
+ * SOB by incrementing it's value.
+ * for reverting the reservation use HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY
+ * CS type, note that this might fail if out-of-sync happened to the SOB
+ * value, in case other signaling request to the same SOB occurred between
+ * reserve-unreserve calls.
+ * 2. Use the staged CS to do the encapsulated signaling jobs.
+ * use HL_CS_FLAGS_STAGED_SUBMISSION and HL_CS_FLAGS_STAGED_SUBMISSION_FIRST
+ * along with HL_CS_FLAGS_ENCAP_SIGNALS flag, and set encaps_signal_offset
+ * field. This offset allows app to wait on part of the reserved signals.
+ * 3. Use WAIT/COLLECTIVE WAIT CS along with HL_CS_FLAGS_ENCAP_SIGNALS flag
+ * to wait for the encapsulated signals.
+ */
+#define HL_CS_FLAGS_ENCAP_SIGNALS 0x800
+#define HL_CS_FLAGS_RESERVE_SIGNALS_ONLY 0x1000
+#define HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY 0x2000
+
+/*
+ * The engine cores CS is merged into the existing CS ioctls.
+ * Use it to control the engine cores mode.
+ */
+#define HL_CS_FLAGS_ENGINE_CORE_COMMAND 0x4000
#define HL_CS_STATUS_SUCCESS 0
#define HL_MAX_JOBS_PER_CS 512
+/* HL_ENGINE_CORE_ values
+ *
+ * HL_ENGINE_CORE_HALT: engine core halt
+ * HL_ENGINE_CORE_RUN: engine core run
+ */
+#define HL_ENGINE_CORE_HALT (1 << 0)
+#define HL_ENGINE_CORE_RUN (1 << 1)
+
struct hl_cs_in {
- /* this holds address of array of hl_cs_chunk for restore phase */
- __u64 chunks_restore;
- /* this holds address of array of hl_cs_chunk for execution phase */
- __u64 chunks_execute;
- /* this holds address of array of hl_cs_chunk for store phase -
- * Currently not in use
- */
- __u64 chunks_store;
+
+ union {
+ struct {
+ /* this holds address of array of hl_cs_chunk for restore phase */
+ __u64 chunks_restore;
+
+ /* holds address of array of hl_cs_chunk for execution phase */
+ __u64 chunks_execute;
+ };
+
+ /* Valid only when HL_CS_FLAGS_ENGINE_CORE_COMMAND is set */
+ struct {
+ /* this holds address of array of uint32 for engine_cores */
+ __u64 engine_cores;
+
+ /* number of engine cores in engine_cores array */
+ __u32 num_engine_cores;
+
+ /* the core command to be sent towards engine cores */
+ __u32 core_command;
+ };
+ };
+
+ union {
+ /*
+ * Sequence number of a staged submission CS
+ * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set and
+ * HL_CS_FLAGS_STAGED_SUBMISSION_FIRST is unset.
+ */
+ __u64 seq;
+
+ /*
+ * Encapsulated signals handle id
+ * Valid for two flows:
+ * 1. CS with encapsulated signals:
+ * when HL_CS_FLAGS_STAGED_SUBMISSION and
+ * HL_CS_FLAGS_STAGED_SUBMISSION_FIRST
+ * and HL_CS_FLAGS_ENCAP_SIGNALS are set.
+ * 2. unreserve signals:
+ * valid when HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY is set.
+ */
+ __u32 encaps_sig_handle_id;
+
+ /* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */
+ struct {
+ /* Encapsulated signals number */
+ __u32 encaps_signals_count;
+
+ /* Encapsulated signals queue index (stream) */
+ __u32 encaps_signals_q_idx;
+ };
+ };
+
/* Number of chunks in restore phase array. Maximum number is
* HL_MAX_JOBS_PER_CS
*/
__u32 num_chunks_restore;
+
/* Number of chunks in execution array. Maximum number is
* HL_MAX_JOBS_PER_CS
*/
__u32 num_chunks_execute;
- /* Number of chunks in restore phase array - Currently not in use */
- __u32 num_chunks_store;
+
+ /* timeout in seconds - valid only if HL_CS_FLAGS_CUSTOM_TIMEOUT
+ * is set
+ */
+ __u32 timeout;
+
/* HL_CS_FLAGS_* */
__u32 cs_flags;
+
/* Context ID - Currently not in use */
__u32 ctx_id;
+ __u8 pad[4];
};
struct hl_cs_out {
+ union {
+ /*
+ * seq holds the sequence number of the CS to pass to wait
+ * ioctl. All values are valid except for 0 and ULLONG_MAX
+ */
+ __u64 seq;
+
+ /* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */
+ struct {
+ /* This is the reserved signal handle id */
+ __u32 handle_id;
+
+ /* This is the signals count */
+ __u32 count;
+ };
+ };
+
+ /* HL_CS_STATUS */
+ __u32 status;
+
/*
- * seq holds the sequence number of the CS to pass to wait ioctl. All
- * values are valid except for 0 and ULLONG_MAX
+ * SOB base address offset
+ * Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY or HL_CS_FLAGS_SIGNAL is set
*/
- __u64 seq;
- /* HL_CS_STATUS_* */
- __u32 status;
- __u32 pad;
+ __u32 sob_base_addr_offset;
+
+ /*
+ * Count of completed signals in SOB before current signal submission.
+ * Valid only when (HL_CS_FLAGS_ENCAP_SIGNALS & HL_CS_FLAGS_STAGED_SUBMISSION)
+ * or HL_CS_FLAGS_SIGNAL is set
+ */
+ __u16 sob_count_before_submission;
+ __u16 pad[3];
};
union hl_cs_args {
@@ -298,25 +1545,127 @@ union hl_cs_args {
struct hl_cs_out out;
};
+#define HL_WAIT_CS_FLAGS_INTERRUPT 0x2
+#define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000
+#define HL_WAIT_CS_FLAGS_ANY_CQ_INTERRUPT 0xFFF00000
+#define HL_WAIT_CS_FLAGS_ANY_DEC_INTERRUPT 0xFFE00000
+#define HL_WAIT_CS_FLAGS_MULTI_CS 0x4
+#define HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ 0x10
+#define HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT 0x20
+
+#define HL_WAIT_MULTI_CS_LIST_MAX_LEN 32
+
struct hl_wait_cs_in {
- /* Command submission sequence number */
- __u64 seq;
- /* Absolute timeout to wait in microseconds */
- __u64 timeout_us;
+ union {
+ struct {
+ /*
+ * In case of wait_cs holds the CS sequence number.
+ * In case of wait for multi CS hold a user pointer to
+ * an array of CS sequence numbers
+ */
+ __u64 seq;
+ /* Absolute timeout to wait for command submission
+ * in microseconds
+ */
+ __u64 timeout_us;
+ };
+
+ struct {
+ union {
+ /* User address for completion comparison.
+ * upon interrupt, driver will compare the value pointed
+ * by this address with the supplied target value.
+ * in order not to perform any comparison, set address
+ * to all 1s.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
+ */
+ __u64 addr;
+
+ /* cq_counters_handle to a kernel mapped cb which contains
+ * cq counters.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ is set
+ */
+ __u64 cq_counters_handle;
+ };
+
+ /* Target value for completion comparison */
+ __u64 target;
+ };
+ };
+
/* Context ID - Currently not in use */
__u32 ctx_id;
- __u32 pad;
+
+ /* HL_WAIT_CS_FLAGS_*
+ * If HL_WAIT_CS_FLAGS_INTERRUPT is set, this field should include
+ * interrupt id according to HL_WAIT_CS_FLAGS_INTERRUPT_MASK
+ *
+ * in order to wait for any CQ interrupt, set interrupt value to
+ * HL_WAIT_CS_FLAGS_ANY_CQ_INTERRUPT.
+ *
+ * in order to wait for any decoder interrupt, set interrupt value to
+ * HL_WAIT_CS_FLAGS_ANY_DEC_INTERRUPT.
+ */
+ __u32 flags;
+
+ union {
+ struct {
+ /* Multi CS API info- valid entries in multi-CS array */
+ __u8 seq_arr_len;
+ __u8 pad[7];
+ };
+
+ /* Absolute timeout to wait for an interrupt in microseconds.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
+ */
+ __u64 interrupt_timeout_us;
+ };
+
+ /*
+ * cq counter offset inside the counters cb pointed by cq_counters_handle above.
+ * upon interrupt, driver will compare the value pointed
+ * by this address (cq_counters_handle + cq_counters_offset)
+ * with the supplied target value.
+ * relevant only when HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ is set
+ */
+ __u64 cq_counters_offset;
+
+ /*
+ * Timestamp_handle timestamps buffer handle.
+ * relevant only when HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT is set
+ */
+ __u64 timestamp_handle;
+
+ /*
+ * Timestamp_offset is offset inside the timestamp buffer pointed by timestamp_handle above.
+ * upon interrupt, if the cq reached the target value then driver will write
+ * timestamp to this offset.
+ * relevant only when HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT is set
+ */
+ __u64 timestamp_offset;
};
#define HL_WAIT_CS_STATUS_COMPLETED 0
#define HL_WAIT_CS_STATUS_BUSY 1
#define HL_WAIT_CS_STATUS_TIMEDOUT 2
#define HL_WAIT_CS_STATUS_ABORTED 3
-#define HL_WAIT_CS_STATUS_INTERRUPTED 4
+
+#define HL_WAIT_CS_STATUS_FLAG_GONE 0x1
+#define HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD 0x2
struct hl_wait_cs_out {
/* HL_WAIT_CS_STATUS_* */
__u32 status;
+ /* HL_WAIT_CS_STATUS_FLAG* */
+ __u32 flags;
+ /*
+ * valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set
+ * for wait_cs: timestamp of CS completion
+ * for wait_multi_cs: timestamp of FIRST CS completion
+ */
+ __s64 timestamp_nsec;
+ /* multi CS completion bitmap */
+ __u32 cs_completion_map;
__u32 pad;
};
@@ -325,82 +1674,140 @@ union hl_wait_cs_args {
struct hl_wait_cs_out out;
};
-/* Opcode to alloc device memory */
+/* Opcode to allocate device memory */
#define HL_MEM_OP_ALLOC 0
+
/* Opcode to free previously allocated device memory */
#define HL_MEM_OP_FREE 1
-/* Opcode to map host memory */
+
+/* Opcode to map host and device memory */
#define HL_MEM_OP_MAP 2
-/* Opcode to unmap previously mapped host memory */
+
+/* Opcode to unmap previously mapped host and device memory */
#define HL_MEM_OP_UNMAP 3
+/* Opcode to map a hw block */
+#define HL_MEM_OP_MAP_BLOCK 4
+
+/* Opcode to create DMA-BUF object for an existing device memory allocation
+ * and to export an FD of that DMA-BUF back to the caller
+ */
+#define HL_MEM_OP_EXPORT_DMABUF_FD 5
+
+/* Opcode to create timestamps pool for user interrupts registration support
+ * The memory will be allocated by the kernel driver, A timestamp buffer which the user
+ * will get handle to it for mmap, and another internal buffer used by the
+ * driver for registration management
+ * The memory will be freed when the user closes the file descriptor(ctx close)
+ */
+#define HL_MEM_OP_TS_ALLOC 6
+
/* Memory flags */
#define HL_MEM_CONTIGUOUS 0x1
#define HL_MEM_SHARED 0x2
#define HL_MEM_USERPTR 0x4
-
+#define HL_MEM_FORCE_HINT 0x8
+#define HL_MEM_PREFETCH 0x40
+
+/**
+ * structure hl_mem_in - structure that handle input args for memory IOCTL
+ * @union arg: union of structures to be used based on the input operation
+ * @op: specify the requested memory operation (one of the HL_MEM_OP_* definitions).
+ * @flags: flags for the memory operation (one of the HL_MEM_* definitions).
+ * For the HL_MEM_OP_EXPORT_DMABUF_FD opcode, this field holds the DMA-BUF file/FD flags.
+ * @ctx_id: context ID - currently not in use.
+ * @num_of_elements: number of timestamp elements used only with HL_MEM_OP_TS_ALLOC opcode.
+ */
struct hl_mem_in {
union {
- /* HL_MEM_OP_ALLOC- allocate device memory */
+ /**
+ * structure for device memory allocation (used with the HL_MEM_OP_ALLOC op)
+ * @mem_size: memory size to allocate
+ * @page_size: page size to use on allocation. when the value is 0 the default page
+ * size will be taken.
+ */
struct {
- /* Size to alloc */
__u64 mem_size;
+ __u64 page_size;
} alloc;
- /* HL_MEM_OP_FREE - free device memory */
+ /**
+ * structure for free-ing device memory (used with the HL_MEM_OP_FREE op)
+ * @handle: handle returned from HL_MEM_OP_ALLOC
+ */
struct {
- /* Handle returned from HL_MEM_OP_ALLOC */
__u64 handle;
} free;
- /* HL_MEM_OP_MAP - map device memory */
+ /**
+ * structure for mapping device memory (used with the HL_MEM_OP_MAP op)
+ * @hint_addr: requested virtual address of mapped memory.
+ * the driver will try to map the requested region to this hint
+ * address, as long as the address is valid and not already mapped.
+ * the user should check the returned address of the IOCTL to make
+ * sure he got the hint address.
+ * passing 0 here means that the driver will choose the address itself.
+ * @handle: handle returned from HL_MEM_OP_ALLOC.
+ */
struct {
- /*
- * Requested virtual address of mapped memory.
- * The driver will try to map the requested region to
- * this hint address, as long as the address is valid
- * and not already mapped. The user should check the
- * returned address of the IOCTL to make sure he got
- * the hint address. Passing 0 here means that the
- * driver will choose the address itself.
- */
__u64 hint_addr;
- /* Handle returned from HL_MEM_OP_ALLOC */
__u64 handle;
} map_device;
- /* HL_MEM_OP_MAP - map host memory */
+ /**
+ * structure for mapping host memory (used with the HL_MEM_OP_MAP op)
+ * @host_virt_addr: address of allocated host memory.
+ * @hint_addr: requested virtual address of mapped memory.
+ * the driver will try to map the requested region to this hint
+ * address, as long as the address is valid and not already mapped.
+ * the user should check the returned address of the IOCTL to make
+ * sure he got the hint address.
+ * passing 0 here means that the driver will choose the address itself.
+ * @size: size of allocated host memory.
+ */
struct {
- /* Address of allocated host memory */
__u64 host_virt_addr;
- /*
- * Requested virtual address of mapped memory.
- * The driver will try to map the requested region to
- * this hint address, as long as the address is valid
- * and not already mapped. The user should check the
- * returned address of the IOCTL to make sure he got
- * the hint address. Passing 0 here means that the
- * driver will choose the address itself.
- */
__u64 hint_addr;
- /* Size of allocated host memory */
__u64 mem_size;
} map_host;
- /* HL_MEM_OP_UNMAP - unmap host memory */
+ /**
+ * structure for mapping hw block (used with the HL_MEM_OP_MAP_BLOCK op)
+ * @block_addr:HW block address to map, a handle and size will be returned
+ * to the user and will be used to mmap the relevant block.
+ * only addresses from configuration space are allowed.
+ */
+ struct {
+ __u64 block_addr;
+ } map_block;
+
+ /**
+ * structure for unmapping host memory (used with the HL_MEM_OP_UNMAP op)
+ * @device_virt_addr: virtual address returned from HL_MEM_OP_MAP
+ */
struct {
- /* Virtual address returned from HL_MEM_OP_MAP */
__u64 device_virt_addr;
} unmap;
+
+ /**
+ * structure for exporting DMABUF object (used with
+ * the HL_MEM_OP_EXPORT_DMABUF_FD op)
+ * @handle: handle returned from HL_MEM_OP_ALLOC.
+ * in Gaudi, where we don't have MMU for the device memory, the
+ * driver expects a physical address (instead of a handle) in the
+ * device memory space.
+ * @mem_size: size of memory allocation. Relevant only for GAUDI
+ */
+ struct {
+ __u64 handle;
+ __u64 mem_size;
+ } export_dmabuf_fd;
};
- /* HL_MEM_OP_* */
__u32 op;
- /* HL_MEM_* flags */
__u32 flags;
- /* Context ID - Currently not in use */
__u32 ctx_id;
- __u32 pad;
+ __u32 num_of_elements;
};
struct hl_mem_out {
@@ -413,10 +1820,33 @@ struct hl_mem_out {
__u64 device_virt_addr;
/*
- * Used for HL_MEM_OP_ALLOC. This is the assigned
- * handle for the allocated memory
+ * Used in HL_MEM_OP_ALLOC
+ * This is the assigned handle for the allocated memory
*/
__u64 handle;
+
+ struct {
+ /*
+ * Used in HL_MEM_OP_MAP_BLOCK.
+ * This is the assigned handle for the mapped block
+ */
+ __u64 block_handle;
+
+ /*
+ * Used in HL_MEM_OP_MAP_BLOCK
+ * This is the size of the mapped block
+ */
+ __u32 block_size;
+
+ __u32 pad;
+ };
+
+ /* Returned in HL_MEM_OP_EXPORT_DMABUF_FD. Represents the
+ * DMA-BUF object that was created to describe a memory
+ * allocation on the device's memory space. The FD should be
+ * passed to the importer driver
+ */
+ __s32 fd;
};
};
@@ -477,7 +1907,16 @@ struct hl_debug_params_bmon {
/* Trace source ID */
__u32 id;
- __u32 pad;
+
+ /* Control register */
+ __u32 control;
+
+ /* Two more address ranges that the user can request to filter */
+ __u64 start_addr2;
+ __u64 end_addr2;
+
+ __u64 start_addr3;
+ __u64 end_addr3;
};
struct hl_debug_params_spmu {
@@ -486,7 +1925,11 @@ struct hl_debug_params_spmu {
/* Number of event types selection */
__u32 event_types_num;
- __u32 pad;
+
+ /* TRC configuration register values */
+ __u32 pmtrc_val;
+ __u32 trc_ctrl_host_val;
+ __u32 trc_en_host_val;
};
/* Opcode for ETR component */
@@ -562,6 +2005,12 @@ struct hl_debug_args {
* When creating a new CB, the IOCTL returns a handle of it, and the user-space
* process needs to use that handle to mmap the buffer so it can access them.
*
+ * In some instances, the device must access the command buffer through the
+ * device's MMU, and thus its memory should be mapped. In these cases, user can
+ * indicate the driver that such a mapping is required.
+ * The resulting device virtual address will be used internally by the driver,
+ * and won't be returned to user.
+ *
*/
#define HL_IOCTL_CB \
_IOWR('H', 0x02, union hl_cb_args)
@@ -574,22 +2023,29 @@ struct hl_debug_args {
* Each JOB will be enqueued on a specific queue, according to the user's input.
* There can be more then one JOB per queue.
*
- * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
- * a second set is for "execution" phase and a third set is for "store" phase.
+ * The CS IOCTL will receive two sets of JOBS. One set is for "restore" phase
+ * and a second set is for "execution" phase.
* The JOBS on the "restore" phase are enqueued only after context-switch
* (or if its the first CS for this context). The user can also order the
* driver to run the "restore" phase explicitly
*
+ * Goya/Gaudi:
* There are two types of queues - external and internal. External queues
* are DMA queues which transfer data from/to the Host. All other queues are
* internal. The driver will get completion notifications from the device only
* on JOBS which are enqueued in the external queues.
*
+ * Greco onwards:
+ * There is a single type of queue for all types of engines, either DMA engines
+ * for transfers from/to the host or inside the device, or compute engines.
+ * The driver will get completion notifications from the device for all queues.
+ *
* For jobs on external queues, the user needs to create command buffers
* through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on
* internal queues, the user needs to prepare a "command buffer" with packets
- * on either the SRAM or DRAM, and give the device address of that buffer to
- * the CS ioctl.
+ * on either the device SRAM/DRAM or the host, and give the device address of
+ * that buffer to the CS ioctl.
+ * For jobs on H/W queues both options of command buffers are valid.
*
* This IOCTL is asynchronous in regard to the actual execution of the CS. This
* means it returns immediately after ALL the JOBS were enqueued on their
@@ -598,10 +2054,10 @@ struct hl_debug_args {
*
* Upon successful enqueue, the IOCTL returns a sequence number which the user
* can use with the "Wait for CS" IOCTL to check whether the handle's CS
- * external JOBS have been completed. Note that if the CS has internal JOBS
+ * non-internal JOBS have been completed. Note that if the CS has internal JOBS
* which can execute AFTER the external JOBS have finished, the driver might
* report that the CS has finished executing BEFORE the internal JOBS have
- * actually finish executing.
+ * actually finished executing.
*
* Even though the sequence number increments per CS, the user can NOT
* automatically assume that if CS with sequence number N finished, then CS
@@ -620,6 +2076,9 @@ struct hl_debug_args {
* inside the kernel until the CS has finished or until the user-requested
* timeout has expired.
*
+ * If the timeout value is 0, the driver won't sleep at all. It will check
+ * the status of the CS and return immediately
+ *
* The return value of the IOCTL is a standard Linux error code. The possible
* values are:
*
@@ -629,7 +2088,8 @@ struct hl_debug_args {
* EIO - The CS was aborted (usually because the device was reset)
* ENODEV - The device wants to do hard-reset (so user need to close FD)
*
- * The driver also returns a custom define inside the IOCTL which can be:
+ * The driver also returns a custom define in case the IOCTL call returned 0.
+ * The define can be one of the following:
*
* HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
* HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
@@ -637,8 +2097,6 @@ struct hl_debug_args {
* (ETIMEDOUT)
* HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
* device was reset (EIO)
- * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
- *
*/
#define HL_IOCTL_WAIT_CS \
diff --git a/include/uapi/misc/uacce/hisi_qm.h b/include/uapi/misc/uacce/hisi_qm.h
new file mode 100644
index 000000000000..3e66dbc2f323
--- /dev/null
+++ b/include/uapi/misc/uacce/hisi_qm.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_HISI_QM_H
+#define _UAPI_HISI_QM_H
+
+#include <linux/types.h>
+
+/**
+ * struct hisi_qp_ctx - User data for hisi qp.
+ * @id: qp_index return to user space
+ * @qc_type: Accelerator algorithm type
+ */
+struct hisi_qp_ctx {
+ __u16 id;
+ __u16 qc_type;
+};
+
+/**
+ * struct hisi_qp_info - User data for hisi qp.
+ * @sqe_size: Submission queue element size
+ * @sq_depth: The number of sqe
+ * @cq_depth: The number of cqe
+ * @reserved: Reserved data
+ */
+struct hisi_qp_info {
+ __u32 sqe_size;
+ __u16 sq_depth;
+ __u16 cq_depth;
+ __u64 reserved;
+};
+
+#define HISI_QM_API_VER_BASE "hisi_qm_v1"
+#define HISI_QM_API_VER2_BASE "hisi_qm_v2"
+#define HISI_QM_API_VER3_BASE "hisi_qm_v3"
+
+/* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */
+#define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx)
+/* UACCE_CMD_QM_SET_QP_INFO: Set qp depth and BD size */
+#define UACCE_CMD_QM_SET_QP_INFO _IOWR('H', 11, struct hisi_qp_info)
+#endif
diff --git a/include/uapi/misc/uacce/uacce.h b/include/uapi/misc/uacce/uacce.h
new file mode 100644
index 000000000000..cc7185678f47
--- /dev/null
+++ b/include/uapi/misc/uacce/uacce.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPIUUACCE_H
+#define _UAPIUUACCE_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * UACCE_CMD_START_Q: Start queue
+ */
+#define UACCE_CMD_START_Q _IO('W', 0)
+
+/*
+ * UACCE_CMD_PUT_Q:
+ * User actively stop queue and free queue resource immediately
+ * Optimization method since close fd may delay
+ */
+#define UACCE_CMD_PUT_Q _IO('W', 1)
+
+/*
+ * UACCE Device flags:
+ * UACCE_DEV_SVA: Shared Virtual Addresses
+ * Support PASID
+ * Support device page faults (PCI PRI or SMMU Stall)
+ */
+#define UACCE_DEV_SVA BIT(0)
+
+/**
+ * enum uacce_qfrt: queue file region type
+ * @UACCE_QFRT_MMIO: device mmio region
+ * @UACCE_QFRT_DUS: device user share region
+ */
+enum uacce_qfrt {
+ UACCE_QFRT_MMIO = 0,
+ UACCE_QFRT_DUS = 1,
+};
+
+#endif
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 47ffe3208c27..714d55b49d2a 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -55,9 +55,9 @@ struct mtd_oob_buf64 {
* @MTD_OPS_RAW: data are transferred as-is, with no error correction;
* this mode implies %MTD_OPS_PLACE_OOB
*
- * These modes can be passed to ioctl(MEMWRITE) and are also used internally.
- * See notes on "MTD file modes" for discussion on %MTD_OPS_RAW vs.
- * %MTD_FILE_MODE_RAW.
+ * These modes can be passed to ioctl(MEMWRITE) and ioctl(MEMREAD); they are
+ * also used internally. See notes on "MTD file modes" for discussion on
+ * %MTD_OPS_RAW vs. %MTD_FILE_MODE_RAW.
*/
enum {
MTD_OPS_PLACE_OOB = 0,
@@ -69,8 +69,8 @@ enum {
* struct mtd_write_req - data structure for requesting a write operation
*
* @start: start address
- * @len: length of data buffer
- * @ooblen: length of OOB buffer
+ * @len: length of data buffer (only lower 32 bits are used)
+ * @ooblen: length of OOB buffer (only lower 32 bits are used)
* @usr_data: user-provided data buffer
* @usr_oob: user-provided OOB buffer
* @mode: MTD mode (see "MTD operation modes")
@@ -91,6 +91,53 @@ struct mtd_write_req {
__u8 padding[7];
};
+/**
+ * struct mtd_read_req_ecc_stats - ECC statistics for a read operation
+ *
+ * @uncorrectable_errors: the number of uncorrectable errors that happened
+ * during the read operation
+ * @corrected_bitflips: the number of bitflips corrected during the read
+ * operation
+ * @max_bitflips: the maximum number of bitflips detected in any single ECC
+ * step for the data read during the operation; this information
+ * can be used to decide whether the data stored in a specific
+ * region of the MTD device should be moved somewhere else to
+ * avoid data loss.
+ */
+struct mtd_read_req_ecc_stats {
+ __u32 uncorrectable_errors;
+ __u32 corrected_bitflips;
+ __u32 max_bitflips;
+};
+
+/**
+ * struct mtd_read_req - data structure for requesting a read operation
+ *
+ * @start: start address
+ * @len: length of data buffer (only lower 32 bits are used)
+ * @ooblen: length of OOB buffer (only lower 32 bits are used)
+ * @usr_data: user-provided data buffer
+ * @usr_oob: user-provided OOB buffer
+ * @mode: MTD mode (see "MTD operation modes")
+ * @padding: reserved, must be set to 0
+ * @ecc_stats: ECC statistics for the read operation
+ *
+ * This structure supports ioctl(MEMREAD) operations, allowing data and/or OOB
+ * reads in various modes. To read from OOB-only, set @usr_data == NULL, and to
+ * read data-only, set @usr_oob == NULL. However, setting both @usr_data and
+ * @usr_oob to NULL is not allowed.
+ */
+struct mtd_read_req {
+ __u64 start;
+ __u64 len;
+ __u64 ooblen;
+ __u64 usr_data;
+ __u64 usr_oob;
+ __u8 mode;
+ __u8 padding[7];
+ struct mtd_read_req_ecc_stats ecc_stats;
+};
+
#define MTD_ABSENT 0
#define MTD_RAM 1
#define MTD_ROM 2
@@ -104,6 +151,7 @@ struct mtd_write_req {
#define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */
#define MTD_NO_ERASE 0x1000 /* No erase necessary */
#define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */
+#define MTD_SLC_ON_MLC_EMULATION 0x4000 /* Emulate SLC behavior on MLC NANDs */
/* Some common devices / combinations of capabilities */
#define MTD_CAP_ROM 0
@@ -204,6 +252,14 @@ struct otp_info {
* without OOB, e.g., NOR flash.
*/
#define MEMWRITE _IOWR('M', 24, struct mtd_write_req)
+/* Erase a given range of user data (must be in mode %MTD_FILE_MODE_OTP_USER) */
+#define OTPERASE _IOW('M', 25, struct otp_info)
+/*
+ * Most generic read interface; can read in-band and/or out-of-band in various
+ * modes (see "struct mtd_read_req"). This ioctl is not supported for flashes
+ * without OOB, e.g., NOR flash.
+ */
+#define MEMREAD _IOWR('M', 26, struct mtd_read_req)
/*
* Obsolete legacy interface. Keep it in order not to break userspace
@@ -261,14 +317,15 @@ struct mtd_ecc_stats {
* @MTD_FILE_MODE_OTP_USER: OTP enabled in user mode
* @MTD_FILE_MODE_RAW: OTP disabled, ECC disabled
*
- * These modes can be set via ioctl(MTDFILEMODE). The mode mode will be retained
+ * These modes can be set via ioctl(MTDFILEMODE). The mode will be retained
* separately for each open file descriptor.
*
* Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW -
* raw access to the flash, without error correction or autoplacement schemes.
* Wherever possible, the MTD_OPS_* mode will override the MTD_FILE_MODE_* mode
- * (e.g., when using ioctl(MEMWRITE)), but in some cases, the MTD_FILE_MODE is
- * used out of necessity (e.g., `write()', ioctl(MEMWRITEOOB64)).
+ * (e.g., when using ioctl(MEMWRITE) or ioctl(MEMREAD)), but in some cases, the
+ * MTD_FILE_MODE is used out of necessity (e.g., `write()',
+ * ioctl(MEMWRITEOOB64)).
*/
enum mtd_file_modes {
MTD_FILE_MODE_NORMAL = MTD_OTP_OFF,
diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h
index b69e9ba6742b..dcb179de4358 100644
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -247,6 +247,7 @@ enum {
* @vid_hdr_offset: VID header offset (use defaults if %0)
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
* @padding: reserved for future, not used, has to be zeroed
+ * @disable_fm: whether disable fastmap
*
* This data structure is used to specify MTD device UBI has to attach and the
* parameters it has to use. The number which should be assigned to the new UBI
@@ -281,13 +282,18 @@ enum {
* eraseblocks for new bad eraseblocks, but attempts to use available
* eraseblocks (if any). The accepted range is 0-768. If 0 is given, the
* default kernel value of %CONFIG_MTD_UBI_BEB_LIMIT will be used.
+ *
+ * If @disable_fm is not zero, ubi doesn't create new fastmap even the module
+ * param 'fm_autoconvert' is set, and existed old fastmap will be destroyed
+ * after doing full scanning.
*/
struct ubi_attach_req {
__s32 ubi_num;
__s32 mtd_num;
__s32 vid_hdr_offset;
__s16 max_beb_per1024;
- __s8 padding[10];
+ __s8 disable_fm;
+ __s8 padding[9];
};
/*
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index dc52e3cf574c..b1de99bf56ce 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -49,7 +49,14 @@
#define BNXT_RE_CHIP_ID0_CHIP_MET_SFT 0x18
enum {
- BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL
+ BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL,
+ BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
+};
+
+enum bnxt_re_wqe_mode {
+ BNXT_QPLIB_WQE_MODE_STATIC = 0x00,
+ BNXT_QPLIB_WQE_MODE_VARIABLE = 0x01,
+ BNXT_QPLIB_WQE_MODE_INVALID = 0x02,
};
struct bnxt_re_uctx_resp {
@@ -62,6 +69,8 @@ struct bnxt_re_uctx_resp {
__aligned_u64 comp_mask;
__u32 chip_id0;
__u32 chip_id1;
+ __u32 mode;
+ __u32 rsvd1; /* padding */
};
/*
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index 53b6e2036a9b..163ac79556d6 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef EFA_ABI_USER_H
@@ -20,6 +20,16 @@
* hex bit offset of the field.
*/
+enum {
+ EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH = 1 << 0,
+ EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR = 1 << 1,
+};
+
+struct efa_ibv_alloc_ucontext_cmd {
+ __u32 comp_mask;
+ __u8 reserved_20[4];
+};
+
enum efa_ibv_user_cmds_supp_udata {
EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE = 1 << 0,
EFA_USER_CMDS_SUPP_UDATA_CREATE_AH = 1 << 1,
@@ -31,6 +41,9 @@ struct efa_ibv_alloc_ucontext_resp {
__u16 sub_cqs_per_cq;
__u16 inline_buf_size;
__u32 max_llq_size; /* bytes */
+ __u16 max_tx_batch; /* units of 64 bytes */
+ __u16 min_sq_wr;
+ __u8 reserved_a0[4];
};
struct efa_ibv_alloc_pd_resp {
@@ -39,11 +52,21 @@ struct efa_ibv_alloc_pd_resp {
__u8 reserved_30[2];
};
+enum {
+ EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL = 1 << 0,
+ EFA_CREATE_CQ_WITH_SGID = 1 << 1,
+};
+
struct efa_ibv_create_cq {
__u32 comp_mask;
__u32 cq_entry_size;
__u16 num_sub_cqs;
- __u8 reserved_50[6];
+ __u8 flags;
+ __u8 reserved_58[5];
+};
+
+enum {
+ EFA_CREATE_CQ_RESP_DB_OFF = 1 << 0,
};
struct efa_ibv_create_cq_resp {
@@ -52,7 +75,9 @@ struct efa_ibv_create_cq_resp {
__aligned_u64 q_mmap_key;
__aligned_u64 q_mmap_size;
__u16 cq_idx;
- __u8 reserved_d0[6];
+ __u8 reserved_d0[2];
+ __u32 db_off;
+ __aligned_u64 db_mmap_key;
};
enum {
@@ -92,6 +117,9 @@ struct efa_ibv_create_ah_resp {
enum {
EFA_QUERY_DEVICE_CAPS_RDMA_READ = 1 << 0,
+ EFA_QUERY_DEVICE_CAPS_RNR_RETRY = 1 << 1,
+ EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS = 1 << 2,
+ EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID = 1 << 3,
};
struct efa_ibv_ex_query_device_resp {
diff --git a/include/uapi/rdma/erdma-abi.h b/include/uapi/rdma/erdma-abi.h
new file mode 100644
index 000000000000..b7a0222f978f
--- /dev/null
+++ b/include/uapi/rdma/erdma-abi.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2020-2022, Alibaba Group.
+ */
+
+#ifndef __ERDMA_USER_H__
+#define __ERDMA_USER_H__
+
+#include <linux/types.h>
+
+#define ERDMA_ABI_VERSION 1
+
+struct erdma_ureq_create_cq {
+ __aligned_u64 db_record_va;
+ __aligned_u64 qbuf_va;
+ __u32 qbuf_len;
+ __u32 rsvd0;
+};
+
+struct erdma_uresp_create_cq {
+ __u32 cq_id;
+ __u32 num_cqe;
+};
+
+struct erdma_ureq_create_qp {
+ __aligned_u64 db_record_va;
+ __aligned_u64 qbuf_va;
+ __u32 qbuf_len;
+ __u32 rsvd0;
+};
+
+struct erdma_uresp_create_qp {
+ __u32 qp_id;
+ __u32 num_sqe;
+ __u32 num_rqe;
+ __u32 rq_offset;
+};
+
+struct erdma_uresp_alloc_ctx {
+ __u32 dev_id;
+ __u32 pad;
+ __u32 sdb_type;
+ __u32 sdb_offset;
+ __aligned_u64 sdb;
+ __aligned_u64 rdb;
+ __aligned_u64 cdb;
+};
+
+#endif
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index 01ac5853d9ac..1106a7c90b29 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -6,7 +6,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -109,6 +109,7 @@
#define HFI1_CAP_OPFN (1UL << 16) /* Enable the OPFN protocol */
#define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */
#define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */
+#define HFI1_CAP_AIP (1UL << 19) /* Enable accelerated IP */
#define HFI1_RCVHDR_ENTSIZE_2 (1UL << 0)
#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1)
@@ -179,7 +180,7 @@ struct hfi1_sdma_comp_entry {
struct hfi1_status {
__aligned_u64 dev; /* device/hw status bits */
__aligned_u64 port; /* port state and status bits */
- char freezemsg[0];
+ char freezemsg[];
};
enum sdma_req_opcode {
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index eb76b38a00d4..f6fde06db4b4 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -39,6 +39,12 @@
struct hns_roce_ib_create_cq {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
+ __u32 cqe_size;
+ __u32 reserved;
+};
+
+enum hns_roce_cq_cap_flags {
+ HNS_ROCE_CQ_FLAG_RECORD_DB = 1 << 0,
};
struct hns_roce_ib_create_cq_resp {
@@ -67,12 +73,22 @@ struct hns_roce_ib_create_qp {
__aligned_u64 sdb_addr;
};
+enum hns_roce_qp_cap_flags {
+ HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0,
+ HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1,
+ HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2,
+ HNS_ROCE_QP_CAP_DIRECT_WQE = 1 << 5,
+};
+
struct hns_roce_ib_create_qp_resp {
__aligned_u64 cap_flags;
+ __aligned_u64 dwqe_mmap_key;
};
struct hns_roce_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
+ __u32 cqe_size;
+ __u32 srq_tab_size;
__u32 reserved;
};
diff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h
deleted file mode 100644
index 79890baa6fdb..000000000000
--- a/include/uapi/rdma/i40iw-abi.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef I40IW_ABI_H
-#define I40IW_ABI_H
-
-#include <linux/types.h>
-
-#define I40IW_ABI_VER 5
-
-struct i40iw_alloc_ucontext_req {
- __u32 reserved32;
- __u8 userspace_ver;
- __u8 reserved8[3];
-};
-
-struct i40iw_alloc_ucontext_resp {
- __u32 max_pds; /* maximum pds allowed for this user process */
- __u32 max_qps; /* maximum qps allowed for this user process */
- __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
- __u8 kernel_ver;
- __u8 reserved[3];
-};
-
-struct i40iw_alloc_pd_resp {
- __u32 pd_id;
- __u8 reserved[4];
-};
-
-struct i40iw_create_cq_req {
- __aligned_u64 user_cq_buffer;
- __aligned_u64 user_shadow_area;
-};
-
-struct i40iw_create_qp_req {
- __aligned_u64 user_wqe_buffers;
- __aligned_u64 user_compl_ctx;
-
- /* UDA QP PHB */
- __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
- __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
-};
-
-enum i40iw_memreg_type {
- IW_MEMREG_TYPE_MEM = 0x0000,
- IW_MEMREG_TYPE_QP = 0x0001,
- IW_MEMREG_TYPE_CQ = 0x0002,
-};
-
-struct i40iw_mem_reg_req {
- __u16 reg_type; /* Memory, QP or CQ */
- __u16 cq_pages;
- __u16 rq_pages;
- __u16 sq_pages;
-};
-
-struct i40iw_create_cq_resp {
- __u32 cq_id;
- __u32 cq_size;
- __u32 mmap_db_index;
- __u32 reserved;
-};
-
-struct i40iw_create_qp_resp {
- __u32 qp_id;
- __u32 actual_sq_size;
- __u32 actual_rq_size;
- __u32 i40iw_drv_opt;
- __u16 push_idx;
- __u8 lsmm;
- __u8 rsvd2;
-};
-
-#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index d4ddbe4e696c..dafc7ebe545b 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -69,6 +70,9 @@ enum uverbs_methods_device {
UVERBS_METHOD_INFO_HANDLES,
UVERBS_METHOD_QUERY_PORT,
UVERBS_METHOD_GET_CONTEXT,
+ UVERBS_METHOD_QUERY_CONTEXT,
+ UVERBS_METHOD_QUERY_GID_TABLE,
+ UVERBS_METHOD_QUERY_GID_ENTRY,
};
enum uverbs_attrs_invoke_write_cmd_attr_ids {
@@ -87,6 +91,11 @@ enum uverbs_attrs_get_context_attr_ids {
UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
};
+enum uverbs_attrs_query_context_attr_ids {
+ UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
+ UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
+};
+
enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_HANDLE,
UVERBS_ATTR_CREATE_CQ_CQE,
@@ -95,6 +104,7 @@ enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
UVERBS_ATTR_CREATE_CQ_FLAGS,
UVERBS_ATTR_CREATE_CQ_RESP_CQE,
+ UVERBS_ATTR_CREATE_CQ_EVENT_FD,
};
enum uverbs_attrs_destroy_cq_cmd_attr_ids {
@@ -120,11 +130,91 @@ enum uverbs_attrs_destroy_flow_action_esp {
UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
};
+enum uverbs_attrs_create_qp_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_QP_HANDLE,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE,
+ UVERBS_ATTR_CREATE_QP_USER_HANDLE,
+ UVERBS_ATTR_CREATE_QP_CAP,
+ UVERBS_ATTR_CREATE_QP_TYPE,
+ UVERBS_ATTR_CREATE_QP_FLAGS,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN,
+ UVERBS_ATTR_CREATE_QP_EVENT_FD,
+ UVERBS_ATTR_CREATE_QP_RESP_CAP,
+ UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+};
+
+enum uverbs_attrs_destroy_qp_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_QP_HANDLE,
+ UVERBS_ATTR_DESTROY_QP_RESP,
+};
+
+enum uverbs_methods_qp {
+ UVERBS_METHOD_QP_CREATE,
+ UVERBS_METHOD_QP_DESTROY,
+};
+
+enum uverbs_attrs_create_srq_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_SRQ_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_PD_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_USER_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_MAX_WR,
+ UVERBS_ATTR_CREATE_SRQ_MAX_SGE,
+ UVERBS_ATTR_CREATE_SRQ_LIMIT,
+ UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS,
+ UVERBS_ATTR_CREATE_SRQ_TYPE,
+ UVERBS_ATTR_CREATE_SRQ_EVENT_FD,
+ UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+};
+
+enum uverbs_attrs_destroy_srq_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_SRQ_HANDLE,
+ UVERBS_ATTR_DESTROY_SRQ_RESP,
+};
+
+enum uverbs_methods_srq {
+ UVERBS_METHOD_SRQ_CREATE,
+ UVERBS_METHOD_SRQ_DESTROY,
+};
+
enum uverbs_methods_cq {
UVERBS_METHOD_CQ_CREATE,
UVERBS_METHOD_CQ_DESTROY,
};
+enum uverbs_attrs_create_wq_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_WQ_HANDLE,
+ UVERBS_ATTR_CREATE_WQ_PD_HANDLE,
+ UVERBS_ATTR_CREATE_WQ_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_WQ_USER_HANDLE,
+ UVERBS_ATTR_CREATE_WQ_TYPE,
+ UVERBS_ATTR_CREATE_WQ_EVENT_FD,
+ UVERBS_ATTR_CREATE_WQ_MAX_WR,
+ UVERBS_ATTR_CREATE_WQ_MAX_SGE,
+ UVERBS_ATTR_CREATE_WQ_FLAGS,
+ UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+};
+
+enum uverbs_attrs_destroy_wq_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_WQ_HANDLE,
+ UVERBS_ATTR_DESTROY_WQ_RESP,
+};
+
+enum uverbs_methods_wq {
+ UVERBS_METHOD_WQ_CREATE,
+ UVERBS_METHOD_WQ_DESTROY,
+};
+
enum uverbs_methods_actions_flow_action_ops {
UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
UVERBS_METHOD_FLOW_ACTION_DESTROY,
@@ -161,6 +251,8 @@ enum uverbs_methods_mr {
UVERBS_METHOD_DM_MR_REG,
UVERBS_METHOD_MR_DESTROY,
UVERBS_METHOD_ADVISE_MR,
+ UVERBS_METHOD_QUERY_MR,
+ UVERBS_METHOD_REG_DMABUF_MR,
};
enum uverbs_attrs_mr_destroy_ids {
@@ -174,6 +266,26 @@ enum uverbs_attrs_advise_mr_cmd_attr_ids {
UVERBS_ATTR_ADVISE_MR_SGE_LIST,
};
+enum uverbs_attrs_query_mr_cmd_attr_ids {
+ UVERBS_ATTR_QUERY_MR_HANDLE,
+ UVERBS_ATTR_QUERY_MR_RESP_LKEY,
+ UVERBS_ATTR_QUERY_MR_RESP_RKEY,
+ UVERBS_ATTR_QUERY_MR_RESP_LENGTH,
+ UVERBS_ATTR_QUERY_MR_RESP_IOVA,
+};
+
+enum uverbs_attrs_reg_dmabuf_mr_cmd_attr_ids {
+ UVERBS_ATTR_REG_DMABUF_MR_HANDLE,
+ UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE,
+ UVERBS_ATTR_REG_DMABUF_MR_OFFSET,
+ UVERBS_ATTR_REG_DMABUF_MR_LENGTH,
+ UVERBS_ATTR_REG_DMABUF_MR_IOVA,
+ UVERBS_ATTR_REG_DMABUF_MR_FD,
+ UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
+ UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
+};
+
enum uverbs_attrs_create_counters_cmd_attr_ids {
UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
};
@@ -256,4 +368,18 @@ enum uverbs_attrs_async_event_create {
UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
};
+enum uverbs_attrs_query_gid_table_cmd_attr_ids {
+ UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE,
+ UVERBS_ATTR_QUERY_GID_TABLE_FLAGS,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
+};
+
+enum uverbs_attrs_query_gid_entry_cmd_attr_ids {
+ UVERBS_ATTR_QUERY_GID_ENTRY_PORT,
+ UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX,
+ UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS,
+ UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY,
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index a640bb814be0..7dd56210226f 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -64,6 +64,41 @@ enum ib_uverbs_access_flags {
~(IB_UVERBS_ACCESS_OPTIONAL_FIRST - 1)
};
+enum ib_uverbs_srq_type {
+ IB_UVERBS_SRQT_BASIC,
+ IB_UVERBS_SRQT_XRC,
+ IB_UVERBS_SRQT_TM,
+};
+
+enum ib_uverbs_wq_type {
+ IB_UVERBS_WQT_RQ,
+};
+
+enum ib_uverbs_wq_flags {
+ IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
+ IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 1 << 1,
+ IB_UVERBS_WQ_FLAGS_DELAY_DROP = 1 << 2,
+ IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
+};
+
+enum ib_uverbs_qp_type {
+ IB_UVERBS_QPT_RC = 2,
+ IB_UVERBS_QPT_UC,
+ IB_UVERBS_QPT_UD,
+ IB_UVERBS_QPT_RAW_PACKET = 8,
+ IB_UVERBS_QPT_XRC_INI,
+ IB_UVERBS_QPT_XRC_TGT,
+ IB_UVERBS_QPT_DRIVER = 0xFF,
+};
+
+enum ib_uverbs_qp_create_flags {
+ IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
+ IB_UVERBS_QP_CREATE_SCATTER_FCS = 1 << 8,
+ IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
+ IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
+ IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 1 << 12,
+};
+
enum ib_uverbs_query_port_cap_flags {
IB_UVERBS_PCF_SM = 1 << 1,
IB_UVERBS_PCF_NOTICE_SUP = 1 << 2,
@@ -173,6 +208,7 @@ enum ib_uverbs_read_counters_flags {
enum ib_uverbs_advise_mr_advice {
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH,
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
+ IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT,
};
enum ib_uverbs_advise_mr_flag {
@@ -185,6 +221,14 @@ struct ib_uverbs_query_port_resp_ex {
__u8 reserved[6];
};
+struct ib_uverbs_qp_cap {
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+};
+
enum rdma_driver_id {
RDMA_DRIVER_UNKNOWN,
RDMA_DRIVER_MLX5,
@@ -196,6 +240,7 @@ enum rdma_driver_id {
RDMA_DRIVER_OCRDMA,
RDMA_DRIVER_NES,
RDMA_DRIVER_I40IW,
+ RDMA_DRIVER_IRDMA = RDMA_DRIVER_I40IW,
RDMA_DRIVER_VMW_PVRDMA,
RDMA_DRIVER_QEDR,
RDMA_DRIVER_HNS,
@@ -205,6 +250,21 @@ enum rdma_driver_id {
RDMA_DRIVER_QIB,
RDMA_DRIVER_EFA,
RDMA_DRIVER_SIW,
+ RDMA_DRIVER_ERDMA,
+};
+
+enum ib_uverbs_gid_type {
+ IB_UVERBS_GID_TYPE_IB,
+ IB_UVERBS_GID_TYPE_ROCE_V1,
+ IB_UVERBS_GID_TYPE_ROCE_V2,
+};
+
+struct ib_uverbs_gid_entry {
+ __aligned_u64 gid[2];
+ __u32 gid_index;
+ __u32 port_num;
+ __u32 gid_type;
+ __u32 netdev_ifindex; /* It is 0 if there is no netdev associated with it */
};
#endif
diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h
index 90c0cf228020..10b5f6a4c677 100644
--- a/include/uapi/rdma/ib_user_mad.h
+++ b/include/uapi/rdma/ib_user_mad.h
@@ -143,7 +143,7 @@ struct ib_user_mad_hdr {
*/
struct ib_user_mad {
struct ib_user_mad_hdr hdr;
- __aligned_u64 data[0];
+ __aligned_u64 data[];
};
/*
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 0474c7400268..43672cb1fd57 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -158,18 +158,18 @@ struct ib_uverbs_ex_cmd_hdr {
struct ib_uverbs_get_context {
__aligned_u64 response;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_get_context_resp {
__u32 async_fd;
__u32 num_comp_vectors;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_device {
__aligned_u64 response;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_device_resp {
@@ -278,7 +278,7 @@ struct ib_uverbs_query_port {
__aligned_u64 response;
__u8 port_num;
__u8 reserved[7];
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_port_resp {
@@ -308,12 +308,12 @@ struct ib_uverbs_query_port_resp {
struct ib_uverbs_alloc_pd {
__aligned_u64 response;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_alloc_pd_resp {
__u32 pd_handle;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_dealloc_pd {
@@ -324,12 +324,12 @@ struct ib_uverbs_open_xrcd {
__aligned_u64 response;
__u32 fd;
__u32 oflags;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_open_xrcd_resp {
__u32 xrcd_handle;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_close_xrcd {
@@ -343,14 +343,14 @@ struct ib_uverbs_reg_mr {
__aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_reg_mr_resp {
__u32 mr_handle;
__u32 lkey;
__u32 rkey;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_rereg_mr {
@@ -362,13 +362,13 @@ struct ib_uverbs_rereg_mr {
__aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_rereg_mr_resp {
__u32 lkey;
__u32 rkey;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_dereg_mr {
@@ -380,13 +380,13 @@ struct ib_uverbs_alloc_mw {
__u32 pd_handle;
__u8 mw_type;
__u8 reserved[3];
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_alloc_mw_resp {
__u32 mw_handle;
__u32 rkey;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_dealloc_mw {
@@ -408,7 +408,7 @@ struct ib_uverbs_create_cq {
__u32 comp_vector;
__s32 comp_channel;
__u32 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
enum ib_uverbs_ex_create_cq_flags {
@@ -442,13 +442,13 @@ struct ib_uverbs_resize_cq {
__aligned_u64 response;
__u32 cq_handle;
__u32 cqe;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_resize_cq_resp {
__u32 cqe;
__u32 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_poll_cq {
@@ -457,6 +457,17 @@ struct ib_uverbs_poll_cq {
__u32 ne;
};
+enum ib_uverbs_wc_opcode {
+ IB_UVERBS_WC_SEND = 0,
+ IB_UVERBS_WC_RDMA_WRITE = 1,
+ IB_UVERBS_WC_RDMA_READ = 2,
+ IB_UVERBS_WC_COMP_SWAP = 3,
+ IB_UVERBS_WC_FETCH_ADD = 4,
+ IB_UVERBS_WC_BIND_MW = 5,
+ IB_UVERBS_WC_LOCAL_INV = 6,
+ IB_UVERBS_WC_TSO = 7,
+};
+
struct ib_uverbs_wc {
__aligned_u64 wr_id;
__u32 status;
@@ -481,7 +492,7 @@ struct ib_uverbs_wc {
struct ib_uverbs_poll_cq_resp {
__u32 count;
__u32 reserved;
- struct ib_uverbs_wc wc[0];
+ struct ib_uverbs_wc wc[];
};
struct ib_uverbs_req_notify_cq {
@@ -574,7 +585,7 @@ struct ib_uverbs_create_qp {
__u8 qp_type;
__u8 is_srq;
__u8 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
enum ib_uverbs_create_qp_mask {
@@ -585,20 +596,6 @@ enum {
IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE,
};
-enum {
- /*
- * This value is equal to IB_QP_DEST_QPN.
- */
- IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20,
-};
-
-enum {
- /*
- * This value is equal to IB_QP_RATE_LIMIT.
- */
- IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25,
-};
-
struct ib_uverbs_ex_create_qp {
__aligned_u64 user_handle;
__u32 pd_handle;
@@ -627,7 +624,7 @@ struct ib_uverbs_open_qp {
__u32 qpn;
__u8 qp_type;
__u8 reserved[7];
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
/* also used for open response */
@@ -672,7 +669,7 @@ struct ib_uverbs_query_qp {
__aligned_u64 response;
__u32 qp_handle;
__u32 attr_mask;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_qp_resp {
@@ -706,7 +703,7 @@ struct ib_uverbs_query_qp_resp {
__u8 alt_timeout;
__u8 sq_sig_all;
__u8 reserved[5];
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_modify_qp {
@@ -827,7 +824,7 @@ struct ib_uverbs_post_send {
__u32 wr_count;
__u32 sge_count;
__u32 wqe_size;
- struct ib_uverbs_send_wr send_wr[0];
+ struct ib_uverbs_send_wr send_wr[];
};
struct ib_uverbs_post_send_resp {
@@ -846,7 +843,7 @@ struct ib_uverbs_post_recv {
__u32 wr_count;
__u32 sge_count;
__u32 wqe_size;
- struct ib_uverbs_recv_wr recv_wr[0];
+ struct ib_uverbs_recv_wr recv_wr[];
};
struct ib_uverbs_post_recv_resp {
@@ -859,7 +856,7 @@ struct ib_uverbs_post_srq_recv {
__u32 wr_count;
__u32 sge_count;
__u32 wqe_size;
- struct ib_uverbs_recv_wr recv[0];
+ struct ib_uverbs_recv_wr recv[];
};
struct ib_uverbs_post_srq_recv_resp {
@@ -872,12 +869,12 @@ struct ib_uverbs_create_ah {
__u32 pd_handle;
__u32 reserved;
struct ib_uverbs_ah_attr attr;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_create_ah_resp {
__u32 ah_handle;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_destroy_ah {
@@ -889,7 +886,7 @@ struct ib_uverbs_attach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_detach_mcast {
@@ -897,7 +894,7 @@ struct ib_uverbs_detach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_flow_spec_hdr {
@@ -1138,7 +1135,7 @@ struct ib_uverbs_flow_attr {
* struct ib_flow_spec_xxx
* struct ib_flow_spec_yyy
*/
- struct ib_uverbs_flow_spec_hdr flow_specs[0];
+ struct ib_uverbs_flow_spec_hdr flow_specs[];
};
struct ib_uverbs_create_flow {
@@ -1164,7 +1161,7 @@ struct ib_uverbs_create_srq {
__u32 max_wr;
__u32 max_sge;
__u32 srq_limit;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_create_xsrq {
@@ -1178,7 +1175,7 @@ struct ib_uverbs_create_xsrq {
__u32 max_num_tags;
__u32 xrcd_handle;
__u32 cq_handle;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_create_srq_resp {
@@ -1186,7 +1183,7 @@ struct ib_uverbs_create_srq_resp {
__u32 max_wr;
__u32 max_sge;
__u32 srqn;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_modify_srq {
@@ -1194,14 +1191,14 @@ struct ib_uverbs_modify_srq {
__u32 attr_mask;
__u32 max_wr;
__u32 srq_limit;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_srq {
__aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_srq_resp {
@@ -1272,7 +1269,7 @@ struct ib_uverbs_ex_create_rwq_ind_table {
* wq_handle1
* wq_handle2
*/
- __u32 wq_handles[0];
+ __u32 wq_handles[];
};
struct ib_uverbs_ex_create_rwq_ind_table_resp {
@@ -1301,4 +1298,46 @@ struct ib_uverbs_ex_modify_cq {
#define IB_DEVICE_NAME_MAX 64
+/*
+ * bits 9, 15, 16, 19, 22, 27, 30, 31, 32, 33, 35 and 37 may be set by old
+ * kernels and should not be used.
+ */
+enum ib_uverbs_device_cap_flags {
+ IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1 << 0,
+ IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 1 << 1,
+ IB_UVERBS_DEVICE_BAD_QKEY_CNTR = 1 << 2,
+ IB_UVERBS_DEVICE_RAW_MULTI = 1 << 3,
+ IB_UVERBS_DEVICE_AUTO_PATH_MIG = 1 << 4,
+ IB_UVERBS_DEVICE_CHANGE_PHY_PORT = 1 << 5,
+ IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6,
+ IB_UVERBS_DEVICE_CURR_QP_STATE_MOD = 1 << 7,
+ IB_UVERBS_DEVICE_SHUTDOWN_PORT = 1 << 8,
+ /* IB_UVERBS_DEVICE_INIT_TYPE = 1 << 9, (not in use) */
+ IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT = 1 << 10,
+ IB_UVERBS_DEVICE_SYS_IMAGE_GUID = 1 << 11,
+ IB_UVERBS_DEVICE_RC_RNR_NAK_GEN = 1 << 12,
+ IB_UVERBS_DEVICE_SRQ_RESIZE = 1 << 13,
+ IB_UVERBS_DEVICE_N_NOTIFY_CQ = 1 << 14,
+ IB_UVERBS_DEVICE_MEM_WINDOW = 1 << 17,
+ IB_UVERBS_DEVICE_UD_IP_CSUM = 1 << 18,
+ IB_UVERBS_DEVICE_XRC = 1 << 20,
+ IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS = 1 << 21,
+ IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A = 1 << 23,
+ IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B = 1 << 24,
+ IB_UVERBS_DEVICE_RC_IP_CSUM = 1 << 25,
+ /* Deprecated. Please use IB_UVERBS_RAW_PACKET_CAP_IP_CSUM. */
+ IB_UVERBS_DEVICE_RAW_IP_CSUM = 1 << 26,
+ IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING = 1 << 29,
+ /* Deprecated. Please use IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS. */
+ IB_UVERBS_DEVICE_RAW_SCATTER_FCS = 1ULL << 34,
+ IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING = 1ULL << 36,
+};
+
+enum ib_uverbs_raw_packet_caps {
+ IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING = 1 << 0,
+ IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS = 1 << 1,
+ IB_UVERBS_RAW_PACKET_CAP_IP_CSUM = 1 << 2,
+ IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP = 1 << 3,
+};
+
#endif /* IB_USER_VERBS_H */
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
new file mode 100644
index 000000000000..a7085e092d34
--- /dev/null
+++ b/include/uapi/rdma/irdma-abi.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */
+/*
+ * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ */
+
+#ifndef IRDMA_ABI_H
+#define IRDMA_ABI_H
+
+#include <linux/types.h>
+
+/* irdma must support legacy GEN_1 i40iw kernel
+ * and user-space whose last ABI ver is 5
+ */
+#define IRDMA_ABI_VER 5
+
+enum irdma_memreg_type {
+ IRDMA_MEMREG_TYPE_MEM = 0,
+ IRDMA_MEMREG_TYPE_QP = 1,
+ IRDMA_MEMREG_TYPE_CQ = 2,
+};
+
+struct irdma_alloc_ucontext_req {
+ __u32 rsvd32;
+ __u8 userspace_ver;
+ __u8 rsvd8[3];
+};
+
+struct irdma_alloc_ucontext_resp {
+ __u32 max_pds;
+ __u32 max_qps;
+ __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
+ __u8 kernel_ver;
+ __u8 rsvd[3];
+ __aligned_u64 feature_flags;
+ __aligned_u64 db_mmap_key;
+ __u32 max_hw_wq_frags;
+ __u32 max_hw_read_sges;
+ __u32 max_hw_inline;
+ __u32 max_hw_rq_quanta;
+ __u32 max_hw_wq_quanta;
+ __u32 min_hw_cq_size;
+ __u32 max_hw_cq_size;
+ __u16 max_hw_sq_chunk;
+ __u8 hw_rev;
+ __u8 rsvd2;
+};
+
+struct irdma_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 rsvd[4];
+};
+
+struct irdma_resize_cq_req {
+ __aligned_u64 user_cq_buffer;
+};
+
+struct irdma_create_cq_req {
+ __aligned_u64 user_cq_buf;
+ __aligned_u64 user_shadow_area;
+};
+
+struct irdma_create_qp_req {
+ __aligned_u64 user_wqe_bufs;
+ __aligned_u64 user_compl_ctx;
+};
+
+struct irdma_mem_reg_req {
+ __u16 reg_type; /* enum irdma_memreg_type */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct irdma_modify_qp_req {
+ __u8 sq_flush;
+ __u8 rq_flush;
+ __u8 rsvd[6];
+};
+
+struct irdma_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+};
+
+struct irdma_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 irdma_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd;
+ __u32 qp_caps;
+};
+
+struct irdma_modify_qp_resp {
+ __aligned_u64 push_wqe_mmap_key;
+ __aligned_u64 push_db_mmap_key;
+ __u16 push_offset;
+ __u8 push_valid;
+ __u8 rsvd[5];
+};
+
+struct irdma_create_ah_resp {
+ __u32 ah_id;
+ __u8 rsvd[4];
+};
+#endif /* IRDMA_ABI_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 624f5b53eb1f..a96b7d2770e1 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -49,6 +49,8 @@ enum {
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
+ MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
+ MLX5_QP_FLAG_DCI_STREAM = 1 << 11,
};
enum {
@@ -78,6 +80,7 @@ struct mlx5_ib_alloc_ucontext_req {
enum mlx5_lib_caps {
MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
+ MLX5_LIB_CAP_DYN_UAR = (__u64)1 << 1,
};
enum mlx5_ib_alloc_uctx_v2_flags {
@@ -98,6 +101,10 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
enum mlx5_ib_alloc_ucontext_resp_mask {
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS = 1UL << 3,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS = 1UL << 4,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG = 1UL << 5,
};
enum mlx5_user_cmds_supp_uhw {
@@ -233,6 +240,11 @@ struct mlx5_ib_striding_rq_caps {
__u32 reserved;
};
+struct mlx5_ib_dci_streams_caps {
+ __u8 max_log_num_concurent;
+ __u8 max_log_num_errored;
+};
+
enum mlx5_ib_query_dev_resp_flags {
/* Support 128B CQE compression */
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
@@ -261,11 +273,14 @@ struct mlx5_ib_query_device_resp {
struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
struct mlx5_ib_striding_rq_caps striding_rq_caps;
__u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
- __u32 reserved;
+ struct mlx5_ib_dci_streams_caps dci_streams_caps;
+ __u16 reserved;
};
enum mlx5_ib_create_cq_flags {
MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
+ MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1,
+ MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS = 1 << 2,
};
struct mlx5_ib_create_cq {
@@ -275,6 +290,9 @@ struct mlx5_ib_create_cq {
__u8 cqe_comp_en;
__u8 cqe_comp_res_format;
__u16 flags;
+ __u16 uar_page_index;
+ __u16 reserved0;
+ __u32 reserved1;
};
struct mlx5_ib_create_cq_resp {
@@ -303,6 +321,11 @@ struct mlx5_ib_create_srq_resp {
__u32 reserved;
};
+struct mlx5_ib_create_qp_dci_streams {
+ __u8 log_num_concurent;
+ __u8 log_num_errored;
+};
+
struct mlx5_ib_create_qp {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
@@ -316,6 +339,9 @@ struct mlx5_ib_create_qp {
__aligned_u64 sq_buf_addr;
__aligned_u64 access_key;
};
+ __u32 ece_options;
+ struct mlx5_ib_create_qp_dci_streams dci_streams;
+ __u16 reserved;
};
/* RX Hash function flags */
@@ -365,7 +391,7 @@ enum mlx5_ib_create_qp_resp_mask {
struct mlx5_ib_create_qp_resp {
__u32 bfreg_index;
- __u32 reserved;
+ __u32 ece_options;
__u32 comp_mask;
__u32 tirn;
__u32 tisn;
@@ -414,12 +440,14 @@ struct mlx5_ib_burst_info {
struct mlx5_ib_modify_qp {
__u32 comp_mask;
struct mlx5_ib_burst_info burst_info;
- __u32 reserved;
+ __u32 ece_options;
};
struct mlx5_ib_modify_qp_resp {
__u32 response_length;
__u32 dctn;
+ __u32 ece_options;
+ __u32 reserved;
};
struct mlx5_ib_create_wq_resp {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index afe7da6f2b8e..595edad03dfe 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -41,6 +41,25 @@ enum mlx5_ib_create_flow_action_attrs {
MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
};
+enum mlx5_ib_dm_methods {
+ MLX5_IB_METHOD_DM_MAP_OP_ADDR = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_DM_QUERY,
+};
+
+enum mlx5_ib_dm_map_op_addr_attrs {
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+};
+
+enum mlx5_ib_query_dm_attrs {
+ MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+};
+
enum mlx5_ib_alloc_dm_attrs {
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
@@ -131,18 +150,53 @@ enum mlx5_ib_var_obj_methods {
MLX5_IB_METHOD_VAR_OBJ_DESTROY,
};
+enum mlx5_ib_uar_alloc_attrs {
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+};
+
+enum mlx5_ib_uar_obj_destroy_attrs {
+ MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_uar_obj_methods {
+ MLX5_IB_METHOD_UAR_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_UAR_OBJ_DESTROY,
+};
+
enum mlx5_ib_devx_umem_reg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
};
enum mlx5_ib_devx_umem_dereg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
};
+enum mlx5_ib_pp_obj_methods {
+ MLX5_IB_METHOD_PP_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_PP_OBJ_DESTROY,
+};
+
+enum mlx5_ib_pp_alloc_attrs {
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+};
+
+enum mlx5_ib_pp_obj_destroy_attrs {
+ MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
enum mlx5_ib_devx_umem_methods {
MLX5_IB_METHOD_DEVX_UMEM_REG = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_METHOD_DEVX_UMEM_DEREG,
@@ -173,6 +227,9 @@ enum mlx5_ib_objects {
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
MLX5_IB_OBJECT_VAR,
+ MLX5_IB_OBJECT_PP,
+ MLX5_IB_OBJECT_UAR,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
};
enum mlx5_ib_flow_matcher_create_attrs {
@@ -193,7 +250,27 @@ enum mlx5_ib_flow_matcher_methods {
MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
};
-#define MLX5_IB_DW_MATCH_PARAM 0x80
+enum mlx5_ib_flow_steering_anchor_create_attrs {
+ MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE,
+ MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY,
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+};
+
+enum mlx5_ib_flow_steering_anchor_destroy_attrs {
+ MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_steering_anchor_methods {
+ MLX5_IB_METHOD_STEERING_ANCHOR_CREATE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY,
+};
+
+enum mlx5_ib_device_query_context_attrs {
+ MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+#define MLX5_IB_DW_MATCH_PARAM 0xA0
struct mlx5_ib_match_params {
__u32 match_params[MLX5_IB_DW_MATCH_PARAM];
@@ -206,6 +283,11 @@ enum mlx5_ib_flow_type {
MLX5_IB_FLOW_TYPE_MC_DEFAULT,
};
+enum mlx5_ib_create_flow_flags {
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS = 1 << 0,
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP = 1 << 1,
+};
+
enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
@@ -216,9 +298,10 @@ enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_TAG,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
};
-enum mlx5_ib_destoy_flow_attrs {
+enum mlx5_ib_destroy_flow_attrs {
MLX5_IB_ATTR_DESTROY_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
};
@@ -245,4 +328,23 @@ enum mlx5_ib_create_flow_action_create_packet_reformat_attrs {
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
};
+enum mlx5_ib_query_pd_attrs {
+ MLX5_IB_ATTR_QUERY_PD_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
+};
+
+enum mlx5_ib_pd_methods {
+ MLX5_IB_METHOD_PD_QUERY = (1U << UVERBS_ID_NS_SHIFT),
+
+};
+
+enum mlx5_ib_device_methods {
+ MLX5_IB_METHOD_QUERY_PORT = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_query_port_attrs {
+ MLX5_IB_ATTR_QUERY_PORT_PORT_NUM = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_QUERY_PORT,
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 88b6ca70c2fe..7af9e09ea556 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -44,6 +44,7 @@ enum mlx5_ib_uapi_flow_table_type {
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX = 0x4,
};
enum mlx5_ib_uapi_flow_action_packet_reformat_type {
@@ -62,6 +63,7 @@ enum mlx5_ib_uapi_dm_type {
MLX5_IB_UAPI_DM_TYPE_MEMIC,
MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM,
MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM,
+ MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM,
};
enum mlx5_ib_uapi_devx_create_event_channel_flags {
@@ -73,5 +75,39 @@ struct mlx5_ib_uapi_devx_async_event_hdr {
__u8 out_data[];
};
+enum mlx5_ib_uapi_pp_alloc_flags {
+ MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX = 1 << 0,
+};
+
+enum mlx5_ib_uapi_uar_alloc_type {
+ MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF = 0x0,
+ MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC = 0x1,
+};
+
+enum mlx5_ib_uapi_query_port_flags {
+ MLX5_IB_UAPI_QUERY_PORT_VPORT = 1 << 0,
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID = 1 << 1,
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX = 1 << 2,
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX = 1 << 3,
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0 = 1 << 4,
+ MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID = 1 << 5,
+};
+
+struct mlx5_ib_uapi_reg {
+ __u32 value;
+ __u32 mask;
+};
+
+struct mlx5_ib_uapi_query_port {
+ __aligned_u64 flags;
+ __u16 vport;
+ __u16 vport_vhca_id;
+ __u16 esw_owner_vhca_id;
+ __u16 rsvd0;
+ __aligned_u64 vport_steering_icm_rx;
+ __aligned_u64 vport_steering_icm_tx;
+ struct mlx5_ib_uapi_reg reg_c0;
+};
+
#endif
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index a0b83c9d4498..bf7333b2b5d7 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -39,8 +39,9 @@
/* user kernel communication data structures. */
enum qedr_alloc_ucontext_flags {
- QEDR_ALLOC_UCTX_RESERVED = 1 << 0,
- QEDR_ALLOC_UCTX_DB_REC = 1 << 1
+ QEDR_ALLOC_UCTX_EDPM_MODE = 1 << 0,
+ QEDR_ALLOC_UCTX_DB_REC = 1 << 1,
+ QEDR_SUPPORT_DPM_SIZES = 1 << 2,
};
struct qedr_alloc_ucontext_req {
@@ -50,13 +51,14 @@ struct qedr_alloc_ucontext_req {
#define QEDR_LDPM_MAX_SIZE (8192)
#define QEDR_EDPM_TRANS_SIZE (64)
+#define QEDR_EDPM_MAX_SIZE (ROCE_REQ_MAX_INLINE_DATA_SIZE)
enum qedr_rdma_dpm_type {
QEDR_DPM_TYPE_NONE = 0,
QEDR_DPM_TYPE_ROCE_ENHANCED = 1 << 0,
QEDR_DPM_TYPE_ROCE_LEGACY = 1 << 1,
QEDR_DPM_TYPE_IWARP_LEGACY = 1 << 2,
- QEDR_DPM_TYPE_RESERVED = 1 << 3,
+ QEDR_DPM_TYPE_ROCE_EDPM_MODE = 1 << 3,
QEDR_DPM_SIZES_SET = 1 << 4,
};
@@ -77,6 +79,8 @@ struct qedr_alloc_ucontext_resp {
__u16 ldpm_limit_size;
__u8 edpm_trans_size;
__u8 reserved;
+ __u16 edpm_limit_size;
+ __u8 padding[6];
};
struct qedr_alloc_pd_ureq {
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 8e277783fa96..e50c357367db 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -287,6 +287,18 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_STAT_DEL,
+ RDMA_NLDEV_CMD_RES_QP_GET_RAW,
+
+ RDMA_NLDEV_CMD_RES_CQ_GET_RAW,
+
+ RDMA_NLDEV_CMD_RES_MR_GET_RAW,
+
+ RDMA_NLDEV_CMD_RES_CTX_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_SRQ_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_STAT_GET_STATUS,
+
RDMA_NLDEV_NUM_OPS
};
@@ -525,6 +537,23 @@ enum rdma_nldev_attr {
*/
RDMA_NLDEV_ATTR_DEV_DIM, /* u8 */
+ RDMA_NLDEV_ATTR_RES_RAW, /* binary */
+
+ RDMA_NLDEV_ATTR_RES_CTX, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CTX_ENTRY, /* nested table */
+
+ RDMA_NLDEV_ATTR_RES_SRQ, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SRQ_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SRQN, /* u32 */
+
+ RDMA_NLDEV_ATTR_MIN_RANGE, /* u32 */
+ RDMA_NLDEV_ATTR_MAX_RANGE, /* u32 */
+
+ RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, /* u8 */
+
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, /* u32 */
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, /* u8 */
+
/*
* Always the end
*/
@@ -561,5 +590,6 @@ enum rdma_nl_counter_mode {
*/
enum rdma_nl_counter_mask {
RDMA_COUNTER_MASK_QP_TYPE = 1,
+ RDMA_COUNTER_MASK_PID = 1 << 1,
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index e42940a215a3..7cea03581f79 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -164,6 +164,8 @@ struct rdma_ucm_query_route_resp {
__u32 num_paths;
__u8 port_num;
__u8 reserved[3];
+ __u32 ibdev_index;
+ __u32 reserved1;
};
struct rdma_ucm_query_addr_resp {
@@ -175,12 +177,14 @@ struct rdma_ucm_query_addr_resp {
__u16 dst_size;
struct __kernel_sockaddr_storage src_addr;
struct __kernel_sockaddr_storage dst_addr;
+ __u32 ibdev_index;
+ __u32 reserved1;
};
struct rdma_ucm_query_path_resp {
__u32 num_paths;
__u32 reserved;
- struct ib_path_rec_data path_data[0];
+ struct ib_path_rec_data path_data[];
};
struct rdma_ucm_conn_param {
@@ -206,10 +210,16 @@ struct rdma_ucm_ud_param {
__u8 reserved[7];
};
+struct rdma_ucm_ece {
+ __u32 vendor_id;
+ __u32 attr_mod;
+};
+
struct rdma_ucm_connect {
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
+ struct rdma_ucm_ece ece;
};
struct rdma_ucm_listen {
@@ -222,12 +232,14 @@ struct rdma_ucm_accept {
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
+ struct rdma_ucm_ece ece;
};
struct rdma_ucm_reject {
__u32 id;
__u8 private_data_len;
- __u8 reserved[3];
+ __u8 reason;
+ __u8 reserved[2];
__u8 private_data[RDMA_MAX_PRIVATE_DATA];
};
@@ -287,6 +299,7 @@ struct rdma_ucm_event_resp {
struct rdma_ucm_ud_param ud;
} param;
__u32 reserved;
+ struct rdma_ucm_ece ece;
};
/* Option levels */
diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h
index d92d2721b28c..53c55188dd2a 100644
--- a/include/uapi/rdma/rdma_user_ioctl.h
+++ b/include/uapi/rdma/rdma_user_ioctl.h
@@ -43,7 +43,7 @@
/*
* General blocks assignments
- * It is closed on purpose do not expose it it user space
+ * It is closed on purpose - do not expose it to user space
* #define MAD_CMD_BASE 0x00
* #define HFI1_CMD_BAS 0xE0
*/
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index 7b1ec806f8f9..ab1aef17feb1 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -36,7 +36,7 @@
#include <linux/types.h>
#include <linux/ioctl.h>
-/* Documentation/ioctl/ioctl-number.rst */
+/* Documentation/userspace-api/ioctl/ioctl-number.rst */
#define RDMA_IOCTL_MAGIC 0x1b
#define RDMA_VERBS_IOCTL \
_IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
@@ -81,7 +81,7 @@ struct ib_uverbs_ioctl_hdr {
__aligned_u64 reserved1;
__u32 driver_id;
__u32 reserved2;
- struct ib_uverbs_attr attrs[0];
+ struct ib_uverbs_attr attrs[];
};
#endif
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index aae2e696bb38..73f679dfd2df 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -39,6 +39,11 @@
#include <linux/in.h>
#include <linux/in6.h>
+enum {
+ RXE_NETWORK_TYPE_IPV4 = 1,
+ RXE_NETWORK_TYPE_IPV6 = 2,
+};
+
union rxe_gid {
__u8 raw[16];
struct {
@@ -57,6 +62,7 @@ struct rxe_global_route {
struct rxe_av {
__u8 port_num;
+ /* From RXE_NETWORK_TYPE_* */
__u8 network_type;
__u8 dmac[6];
struct rxe_global_route grh;
@@ -68,7 +74,7 @@ struct rxe_av {
struct rxe_send_wr {
__aligned_u64 wr_id;
- __u32 num_sge;
+ __u32 reserved;
__u32 opcode;
__u32 send_flags;
union {
@@ -92,16 +98,30 @@ struct rxe_send_wr {
__u32 remote_qpn;
__u32 remote_qkey;
__u16 pkey_index;
+ __u16 reserved;
+ __u32 ah_num;
+ __u32 pad[4];
+ struct rxe_av av;
} ud;
+ struct {
+ __aligned_u64 addr;
+ __aligned_u64 length;
+ __u32 mr_lkey;
+ __u32 mw_rkey;
+ __u32 rkey;
+ __u32 access;
+ } mw;
/* reg is only used by the kernel and is not part of the uapi */
+#ifdef __KERNEL__
struct {
union {
struct ib_mr *mr;
__aligned_u64 reserved;
};
- __u32 key;
- __u32 access;
+ __u32 key;
+ __u32 access;
} reg;
+#endif
} wr;
};
@@ -112,7 +132,7 @@ struct rxe_sge {
};
struct mminfo {
- __aligned_u64 offset;
+ __aligned_u64 offset;
__u32 size;
__u32 pad;
};
@@ -125,14 +145,13 @@ struct rxe_dma_info {
__u32 sge_offset;
__u32 reserved;
union {
- __u8 inline_data[0];
- struct rxe_sge sge[0];
+ __DECLARE_FLEX_ARRAY(__u8, inline_data);
+ __DECLARE_FLEX_ARRAY(struct rxe_sge, sge);
};
};
struct rxe_send_wqe {
struct rxe_send_wr wr;
- struct rxe_av av;
__u32 status;
__u32 state;
__aligned_u64 iova;
@@ -147,11 +166,16 @@ struct rxe_send_wqe {
struct rxe_recv_wqe {
__aligned_u64 wr_id;
- __u32 num_sge;
+ __u32 reserved;
__u32 padding;
struct rxe_dma_info dma;
};
+struct rxe_create_ah_resp {
+ __u32 ah_num;
+ __u32 reserved;
+};
+
struct rxe_create_cq_resp {
struct mminfo mi;
};
@@ -175,4 +199,25 @@ struct rxe_modify_srq_cmd {
__aligned_u64 mmap_info_addr;
};
+/* This data structure is stored at the base of work and
+ * completion queues shared between user space and kernel space.
+ * It contains the producer and consumer indices. Is also
+ * contains a copy of the queue size parameters for user space
+ * to use but the kernel must use the parameters in the
+ * rxe_queue struct. For performance reasons arrange to have
+ * producer and consumer indices in separate cache lines
+ * the kernel should always mask the indices to avoid accessing
+ * memory outside of the data area
+ */
+struct rxe_queue_buf {
+ __u32 log2_elem_size;
+ __u32 index_mask;
+ __u32 pad_1[30];
+ __u32 producer_index;
+ __u32 pad_2[31];
+ __u32 consumer_index;
+ __u32 pad_3[31];
+ __u8 data[];
+};
+
#endif /* RDMA_USER_RXE_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index f8b638c73371..901a4fd72c09 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -133,6 +133,13 @@ enum pvrdma_wc_flags {
PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
};
+enum pvrdma_network_type {
+ PVRDMA_NETWORK_IB,
+ PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
+ PVRDMA_NETWORK_IPV4,
+ PVRDMA_NETWORK_IPV6
+};
+
struct pvrdma_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 reserved;
diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
index 76f627f0d13b..16782c360de3 100644
--- a/include/uapi/scsi/fc/fc_els.h
+++ b/include/uapi/scsi/fc/fc_els.h
@@ -9,6 +9,7 @@
#define _FC_ELS_H_
#include <linux/types.h>
+#include <asm/byteorder.h>
/*
* Fibre Channel Switch - Enhanced Link Services definitions.
@@ -40,6 +41,9 @@ enum fc_els_cmd {
ELS_REC = 0x13, /* read exchange concise */
ELS_SRR = 0x14, /* sequence retransmission request */
ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */
+ ELS_EDC = 0x17, /* Exchange Diagnostic Capabilities */
+ ELS_RDP = 0x18, /* Read Diagnostic Parameters */
+ ELS_RDF = 0x19, /* Register Diagnostic Functions */
ELS_PRLI = 0x20, /* process login */
ELS_PRLO = 0x21, /* process logout */
ELS_SCN = 0x22, /* state change notification */
@@ -108,6 +112,9 @@ enum fc_els_cmd {
[ELS_REC] = "REC", \
[ELS_SRR] = "SRR", \
[ELS_FPIN] = "FPIN", \
+ [ELS_EDC] = "EDC", \
+ [ELS_RDP] = "RDP", \
+ [ELS_RDF] = "RDF", \
[ELS_PRLI] = "PRLI", \
[ELS_PRLO] = "PRLO", \
[ELS_SCN] = "SCN", \
@@ -208,6 +215,105 @@ enum fc_els_rjt_explan {
};
/*
+ * Link Service TLV Descriptor Tag Values
+ */
+enum fc_ls_tlv_dtag {
+ ELS_DTAG_LS_REQ_INFO = 0x00000001,
+ /* Link Service Request Information Descriptor */
+ ELS_DTAG_LNK_FAULT_CAP = 0x0001000D,
+ /* Link Fault Capability Descriptor */
+ ELS_DTAG_CG_SIGNAL_CAP = 0x0001000F,
+ /* Congestion Signaling Capability Descriptor */
+ ELS_DTAG_LNK_INTEGRITY = 0x00020001,
+ /* Link Integrity Notification Descriptor */
+ ELS_DTAG_DELIVERY = 0x00020002,
+ /* Delivery Notification Descriptor */
+ ELS_DTAG_PEER_CONGEST = 0x00020003,
+ /* Peer Congestion Notification Descriptor */
+ ELS_DTAG_CONGESTION = 0x00020004,
+ /* Congestion Notification Descriptor */
+ ELS_DTAG_FPIN_REGISTER = 0x00030001,
+ /* FPIN Registration Descriptor */
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define FC_LS_TLV_DTAG_INIT { \
+ { ELS_DTAG_LS_REQ_INFO, "Link Service Request Information" }, \
+ { ELS_DTAG_LNK_FAULT_CAP, "Link Fault Capability" }, \
+ { ELS_DTAG_CG_SIGNAL_CAP, "Congestion Signaling Capability" }, \
+ { ELS_DTAG_LNK_INTEGRITY, "Link Integrity Notification" }, \
+ { ELS_DTAG_DELIVERY, "Delivery Notification Present" }, \
+ { ELS_DTAG_PEER_CONGEST, "Peer Congestion Notification" }, \
+ { ELS_DTAG_CONGESTION, "Congestion Notification" }, \
+ { ELS_DTAG_FPIN_REGISTER, "FPIN Registration" }, \
+}
+
+
+/*
+ * Generic Link Service TLV Descriptor format
+ *
+ * This structure, as it defines no payload, will also be referred to
+ * as the "tlv header" - which contains the tag and len fields.
+ */
+struct fc_tlv_desc {
+ __be32 desc_tag; /* Notification Descriptor Tag */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __u8 desc_value[]; /* Descriptor Value */
+};
+
+/* Descriptor tag and len fields are considered the mandatory header
+ * for a descriptor
+ */
+#define FC_TLV_DESC_HDR_SZ sizeof(struct fc_tlv_desc)
+
+/*
+ * Macro, used when initializing payloads, to return the descriptor length.
+ * Length is size of descriptor minus the tag and len fields.
+ */
+#define FC_TLV_DESC_LENGTH_FROM_SZ(desc) \
+ (sizeof(desc) - FC_TLV_DESC_HDR_SZ)
+
+/* Macro, used on received payloads, to return the descriptor length */
+#define FC_TLV_DESC_SZ_FROM_LENGTH(tlv) \
+ (__be32_to_cpu((tlv)->desc_len) + FC_TLV_DESC_HDR_SZ)
+
+/*
+ * This helper is used to walk descriptors in a descriptor list.
+ * Given the address of the current descriptor, which minimally contains a
+ * tag and len field, calculate the address of the next descriptor based
+ * on the len field.
+ */
+static inline void *fc_tlv_next_desc(void *desc)
+{
+ struct fc_tlv_desc *tlv = desc;
+
+ return (desc + FC_TLV_DESC_SZ_FROM_LENGTH(tlv));
+}
+
+
+/*
+ * Link Service Request Information Descriptor
+ */
+struct fc_els_lsri_desc {
+ __be32 desc_tag; /* descriptor tag (0x0000 0001) */
+ __be32 desc_len; /* Length of Descriptor (in bytes) (4).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ struct {
+ __u8 cmd; /* ELS cmd byte */
+ __u8 bytes[3]; /* bytes 1..3 */
+ } rqst_w0; /* Request word 0 */
+};
+
+
+/*
* Common service parameters (N ports).
*/
struct fc_els_csp {
@@ -818,25 +924,174 @@ enum fc_els_clid_ic {
ELS_CLID_IC_LIP = 8, /* receiving LIP */
};
+/*
+ * Link Integrity event types
+ */
+enum fc_fpin_li_event_types {
+ FPIN_LI_UNKNOWN = 0x0,
+ FPIN_LI_LINK_FAILURE = 0x1,
+ FPIN_LI_LOSS_OF_SYNC = 0x2,
+ FPIN_LI_LOSS_OF_SIG = 0x3,
+ FPIN_LI_PRIM_SEQ_ERR = 0x4,
+ FPIN_LI_INVALID_TX_WD = 0x5,
+ FPIN_LI_INVALID_CRC = 0x6,
+ FPIN_LI_DEVICE_SPEC = 0xF,
+};
/*
- * Fabric Notification Descriptor Tag values
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
*/
-enum fc_fn_dtag {
- ELS_FN_DTAG_LNK_INTEGRITY = 0x00020001, /* Link Integrity */
- ELS_FN_DTAG_PEER_CONGEST = 0x00020003, /* Peer Congestion */
- ELS_FN_DTAG_CONGESTION = 0x00020004, /* Congestion */
+#define FC_FPIN_LI_EVT_TYPES_INIT { \
+ { FPIN_LI_UNKNOWN, "Unknown" }, \
+ { FPIN_LI_LINK_FAILURE, "Link Failure" }, \
+ { FPIN_LI_LOSS_OF_SYNC, "Loss of Synchronization" }, \
+ { FPIN_LI_LOSS_OF_SIG, "Loss of Signal" }, \
+ { FPIN_LI_PRIM_SEQ_ERR, "Primitive Sequence Protocol Error" }, \
+ { FPIN_LI_INVALID_TX_WD, "Invalid Transmission Word" }, \
+ { FPIN_LI_INVALID_CRC, "Invalid CRC" }, \
+ { FPIN_LI_DEVICE_SPEC, "Device Specific" }, \
+}
+
+/*
+ * Delivery event types
+ */
+enum fc_fpin_deli_event_types {
+ FPIN_DELI_UNKNOWN = 0x0,
+ FPIN_DELI_TIMEOUT = 0x1,
+ FPIN_DELI_UNABLE_TO_ROUTE = 0x2,
+ FPIN_DELI_DEVICE_SPEC = 0xF,
};
/*
- * Fabric Notification Descriptor
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
*/
-struct fc_fn_desc {
- __be32 fn_desc_tag; /* Notification Descriptor Tag */
- __be32 fn_desc_value_len; /* Length of Descriptor Value field
- * (in bytes)
- */
- __u8 fn_desc_value[0]; /* Descriptor Value */
+#define FC_FPIN_DELI_EVT_TYPES_INIT { \
+ { FPIN_DELI_UNKNOWN, "Unknown" }, \
+ { FPIN_DELI_TIMEOUT, "Timeout" }, \
+ { FPIN_DELI_UNABLE_TO_ROUTE, "Unable to Route" }, \
+ { FPIN_DELI_DEVICE_SPEC, "Device Specific" }, \
+}
+
+/*
+ * Congestion event types
+ */
+enum fc_fpin_congn_event_types {
+ FPIN_CONGN_CLEAR = 0x0,
+ FPIN_CONGN_LOST_CREDIT = 0x1,
+ FPIN_CONGN_CREDIT_STALL = 0x2,
+ FPIN_CONGN_OVERSUBSCRIPTION = 0x3,
+ FPIN_CONGN_DEVICE_SPEC = 0xF,
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define FC_FPIN_CONGN_EVT_TYPES_INIT { \
+ { FPIN_CONGN_CLEAR, "Clear" }, \
+ { FPIN_CONGN_LOST_CREDIT, "Lost Credit" }, \
+ { FPIN_CONGN_CREDIT_STALL, "Credit Stall" }, \
+ { FPIN_CONGN_OVERSUBSCRIPTION, "Oversubscription" }, \
+ { FPIN_CONGN_DEVICE_SPEC, "Device Specific" }, \
+}
+
+enum fc_fpin_congn_severity_types {
+ FPIN_CONGN_SEVERITY_WARNING = 0xF1,
+ FPIN_CONGN_SEVERITY_ERROR = 0xF7,
+};
+
+/*
+ * Link Integrity Notification Descriptor
+ */
+struct fc_fn_li_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x00020001) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be64 detecting_wwpn; /* Port Name that detected event */
+ __be64 attached_wwpn; /* Port Name of device attached to
+ * detecting Port Name
+ */
+ __be16 event_type; /* see enum fc_fpin_li_event_types */
+ __be16 event_modifier; /* Implementation specific value
+ * describing the event type
+ */
+ __be32 event_threshold;/* duration in ms of the link
+ * integrity detection cycle
+ */
+ __be32 event_count; /* minimum number of event
+ * occurrences during the event
+ * threshold to caause the LI event
+ */
+ __be32 pname_count; /* number of portname_list elements */
+ __be64 pname_list[]; /* list of N_Port_Names accessible
+ * through the attached port
+ */
+};
+
+/*
+ * Delivery Notification Descriptor
+ */
+struct fc_fn_deli_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x00020002) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be64 detecting_wwpn; /* Port Name that detected event */
+ __be64 attached_wwpn; /* Port Name of device attached to
+ * detecting Port Name
+ */
+ __be32 deli_reason_code;/* see enum fc_fpin_deli_event_types */
+};
+
+/*
+ * Peer Congestion Notification Descriptor
+ */
+struct fc_fn_peer_congn_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x00020003) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be64 detecting_wwpn; /* Port Name that detected event */
+ __be64 attached_wwpn; /* Port Name of device attached to
+ * detecting Port Name
+ */
+ __be16 event_type; /* see enum fc_fpin_congn_event_types */
+ __be16 event_modifier; /* Implementation specific value
+ * describing the event type
+ */
+ __be32 event_period; /* duration (ms) of the detected
+ * congestion event
+ */
+ __be32 pname_count; /* number of portname_list elements */
+ __be64 pname_list[]; /* list of N_Port_Names accessible
+ * through the attached port
+ */
+};
+
+/*
+ * Congestion Notification Descriptor
+ */
+struct fc_fn_congn_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x00020004) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be16 event_type; /* see enum fc_fpin_congn_event_types */
+ __be16 event_modifier; /* Implementation specific value
+ * describing the event type
+ */
+ __be32 event_period; /* duration (ms) of the detected
+ * congestion event
+ */
+ __u8 severity; /* command */
+ __u8 resv[3]; /* reserved - must be zero */
};
/*
@@ -845,8 +1100,154 @@ struct fc_fn_desc {
struct fc_els_fpin {
__u8 fpin_cmd; /* command (0x16) */
__u8 fpin_zero[3]; /* specified as zero - part of cmd */
- __be32 fpin_desc_cnt; /* count of descriptors */
- struct fc_fn_desc fpin_desc[0]; /* Descriptor list */
+ __be32 desc_len; /* Length of Descriptor List (in bytes).
+ * Size of ELS excluding fpin_cmd,
+ * fpin_zero and desc_len fields.
+ */
+ struct fc_tlv_desc fpin_desc[]; /* Descriptor list */
+};
+
+/* Diagnostic Function Descriptor - FPIN Registration */
+struct fc_df_desc_fpin_reg {
+ __be32 desc_tag; /* FPIN Registration (0x00030001) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be32 count; /* Number of desc_tags elements */
+ __be32 desc_tags[]; /* Array of Descriptor Tags.
+ * Each tag indicates a function
+ * supported by the N_Port (request)
+ * or by the N_Port and Fabric
+ * Controller (reply; may be a subset
+ * of the request).
+ * See ELS_FN_DTAG_xxx for tag values.
+ */
+};
+
+/*
+ * ELS_RDF - Register Diagnostic Functions
+ */
+struct fc_els_rdf {
+ __u8 fpin_cmd; /* command (0x19) */
+ __u8 fpin_zero[3]; /* specified as zero - part of cmd */
+ __be32 desc_len; /* Length of Descriptor List (in bytes).
+ * Size of ELS excluding fpin_cmd,
+ * fpin_zero and desc_len fields.
+ */
+ struct fc_tlv_desc desc[]; /* Descriptor list */
+};
+
+/*
+ * ELS RDF LS_ACC Response.
+ */
+struct fc_els_rdf_resp {
+ struct fc_els_ls_acc acc_hdr;
+ __be32 desc_list_len; /* Length of response (in
+ * bytes). Excludes acc_hdr
+ * and desc_list_len fields.
+ */
+ struct fc_els_lsri_desc lsri;
+ struct fc_tlv_desc desc[]; /* Supported Descriptor list */
+};
+
+
+/*
+ * Diagnostic Capability Descriptors for EDC ELS
+ */
+
+/*
+ * Diagnostic: Link Fault Capability Descriptor
+ */
+struct fc_diag_lnkflt_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x0001000D) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ * 12 bytes
+ */
+ __be32 degrade_activate_threshold;
+ __be32 degrade_deactivate_threshold;
+ __be32 fec_degrade_interval;
+};
+
+enum fc_edc_cg_signal_cap_types {
+ /* Note: Capability: bits 31:4 Rsvd; bits 3:0 are capabilities */
+ EDC_CG_SIG_NOTSUPPORTED = 0x00, /* neither supported */
+ EDC_CG_SIG_WARN_ONLY = 0x01,
+ EDC_CG_SIG_WARN_ALARM = 0x02, /* both supported */
};
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define FC_EDC_CG_SIGNAL_CAP_TYPES_INIT { \
+ { EDC_CG_SIG_NOTSUPPORTED, "Signaling Not Supported" }, \
+ { EDC_CG_SIG_WARN_ONLY, "Warning Signal" }, \
+ { EDC_CG_SIG_WARN_ALARM, "Warning and Alarm Signals" }, \
+}
+
+enum fc_diag_cg_sig_freq_types {
+ EDC_CG_SIGFREQ_CNT_MIN = 1, /* Min Frequency Count */
+ EDC_CG_SIGFREQ_CNT_MAX = 999, /* Max Frequency Count */
+
+ EDC_CG_SIGFREQ_SEC = 0x1, /* Units: seconds */
+ EDC_CG_SIGFREQ_MSEC = 0x2, /* Units: milliseconds */
+};
+
+struct fc_diag_cg_sig_freq {
+ __be16 count; /* Time between signals
+ * note: upper 6 bits rsvd
+ */
+ __be16 units; /* Time unit for count
+ * note: upper 12 bits rsvd
+ */
+};
+
+/*
+ * Diagnostic: Congestion Signaling Capability Descriptor
+ */
+struct fc_diag_cg_sig_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x0001000F) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ * 16 bytes
+ */
+ __be32 xmt_signal_capability;
+ struct fc_diag_cg_sig_freq xmt_signal_frequency;
+ __be32 rcv_signal_capability;
+ struct fc_diag_cg_sig_freq rcv_signal_frequency;
+};
+
+/*
+ * ELS_EDC - Exchange Diagnostic Capabilities
+ */
+struct fc_els_edc {
+ __u8 edc_cmd; /* command (0x17) */
+ __u8 edc_zero[3]; /* specified as zero - part of cmd */
+ __be32 desc_len; /* Length of Descriptor List (in bytes).
+ * Size of ELS excluding edc_cmd,
+ * edc_zero and desc_len fields.
+ */
+ struct fc_tlv_desc desc[];
+ /* Diagnostic Descriptor list */
+};
+
+/*
+ * ELS EDC LS_ACC Response.
+ */
+struct fc_els_edc_resp {
+ struct fc_els_ls_acc acc_hdr;
+ __be32 desc_list_len; /* Length of response (in
+ * bytes). Excludes acc_hdr
+ * and desc_list_len fields.
+ */
+ struct fc_els_lsri_desc lsri;
+ struct fc_tlv_desc desc[];
+ /* Supported Diagnostic Descriptor list */
+};
+
+
#endif /* _FC_ELS_H_ */
diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
index 3ae65e93235c..7f5930801f72 100644
--- a/include/uapi/scsi/scsi_bsg_fc.h
+++ b/include/uapi/scsi/scsi_bsg_fc.h
@@ -209,7 +209,7 @@ struct fc_bsg_host_vendor {
__u64 vendor_id;
/* start of vendor command area */
- __u32 vendor_cmd[0];
+ __u32 vendor_cmd[];
};
/* Response:
diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
new file mode 100644
index 000000000000..fdc3517f9e19
--- /dev/null
+++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h
@@ -0,0 +1,582 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef SCSI_BSG_MPI3MR_H_INCLUDED
+#define SCSI_BSG_MPI3MR_H_INCLUDED
+
+#include <linux/types.h>
+
+/* Definitions for BSG commands */
+#define MPI3MR_IOCTL_VERSION 0x06
+
+#define MPI3MR_APP_DEFAULT_TIMEOUT (60) /*seconds*/
+
+#define MPI3MR_BSG_ADPTYPE_UNKNOWN 0
+#define MPI3MR_BSG_ADPTYPE_AVGFAMILY 1
+
+#define MPI3MR_BSG_ADPSTATE_UNKNOWN 0
+#define MPI3MR_BSG_ADPSTATE_OPERATIONAL 1
+#define MPI3MR_BSG_ADPSTATE_FAULT 2
+#define MPI3MR_BSG_ADPSTATE_IN_RESET 3
+#define MPI3MR_BSG_ADPSTATE_UNRECOVERABLE 4
+
+#define MPI3MR_BSG_ADPRESET_UNKNOWN 0
+#define MPI3MR_BSG_ADPRESET_SOFT 1
+#define MPI3MR_BSG_ADPRESET_DIAG_FAULT 2
+
+#define MPI3MR_BSG_LOGDATA_MAX_ENTRIES 400
+#define MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ 4
+
+#define MPI3MR_DRVBSG_OPCODE_UNKNOWN 0
+#define MPI3MR_DRVBSG_OPCODE_ADPINFO 1
+#define MPI3MR_DRVBSG_OPCODE_ADPRESET 2
+#define MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO 4
+#define MPI3MR_DRVBSG_OPCODE_GETCHGCNT 5
+#define MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE 6
+#define MPI3MR_DRVBSG_OPCODE_PELENABLE 7
+#define MPI3MR_DRVBSG_OPCODE_GETLOGDATA 8
+#define MPI3MR_DRVBSG_OPCODE_QUERY_HDB 9
+#define MPI3MR_DRVBSG_OPCODE_REPOST_HDB 10
+#define MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB 11
+#define MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS 12
+
+
+#define MPI3MR_BSG_BUFTYPE_UNKNOWN 0
+#define MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD 1
+#define MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP 2
+#define MPI3MR_BSG_BUFTYPE_DATA_IN 3
+#define MPI3MR_BSG_BUFTYPE_DATA_OUT 4
+#define MPI3MR_BSG_BUFTYPE_MPI_REPLY 5
+#define MPI3MR_BSG_BUFTYPE_ERR_RESPONSE 6
+#define MPI3MR_BSG_BUFTYPE_MPI_REQUEST 0xFE
+
+#define MPI3MR_BSG_MPI_REPLY_BUFTYPE_UNKNOWN 0
+#define MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS 1
+#define MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS 2
+
+#define MPI3MR_HDB_BUFTYPE_UNKNOWN 0
+#define MPI3MR_HDB_BUFTYPE_TRACE 1
+#define MPI3MR_HDB_BUFTYPE_FIRMWARE 2
+#define MPI3MR_HDB_BUFTYPE_RESERVED 3
+
+#define MPI3MR_HDB_BUFSTATUS_UNKNOWN 0
+#define MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED 1
+#define MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED 2
+#define MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED 3
+#define MPI3MR_HDB_BUFSTATUS_RELEASED 4
+
+#define MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN 0
+#define MPI3MR_HDB_TRIGGER_TYPE_DIAGFAULT 1
+#define MPI3MR_HDB_TRIGGER_TYPE_ELEMENT 2
+#define MPI3MR_HDB_TRIGGER_TYPE_MASTER 3
+
+
+/* Supported BSG commands */
+enum command {
+ MPI3MR_DRV_CMD = 1,
+ MPI3MR_MPT_CMD = 2,
+};
+
+/**
+ * struct mpi3_driver_info_layout - Information about driver
+ *
+ * @information_length: Length of this structure in bytes
+ * @driver_signature: Driver Vendor name
+ * @os_name: Operating System Name
+ * @driver_name: Driver name
+ * @driver_version: Driver version
+ * @driver_release_date: Driver release date
+ * @driver_capabilities: Driver capabilities
+ */
+struct mpi3_driver_info_layout {
+ __le32 information_length;
+ __u8 driver_signature[12];
+ __u8 os_name[16];
+ __u8 os_version[12];
+ __u8 driver_name[20];
+ __u8 driver_version[32];
+ __u8 driver_release_date[20];
+ __le32 driver_capabilities;
+};
+
+/**
+ * struct mpi3mr_bsg_in_adpinfo - Adapter information request
+ * data returned by the driver.
+ *
+ * @adp_type: Adapter type
+ * @rsvd1: Reserved
+ * @pci_dev_id: PCI device ID of the adapter
+ * @pci_dev_hw_rev: PCI revision of the adapter
+ * @pci_subsys_dev_id: PCI subsystem device ID of the adapter
+ * @pci_subsys_ven_id: PCI subsystem vendor ID of the adapter
+ * @pci_dev: PCI device
+ * @pci_func: PCI function
+ * @pci_bus: PCI bus
+ * @rsvd2: Reserved
+ * @pci_seg_id: PCI segment ID
+ * @app_intfc_ver: version of the application interface definition
+ * @rsvd3: Reserved
+ * @rsvd4: Reserved
+ * @rsvd5: Reserved
+ * @driver_info: Driver Information (Version/Name)
+ */
+struct mpi3mr_bsg_in_adpinfo {
+ __u32 adp_type;
+ __u32 rsvd1;
+ __u32 pci_dev_id;
+ __u32 pci_dev_hw_rev;
+ __u32 pci_subsys_dev_id;
+ __u32 pci_subsys_ven_id;
+ __u32 pci_dev:5;
+ __u32 pci_func:3;
+ __u32 pci_bus:8;
+ __u16 rsvd2;
+ __u32 pci_seg_id;
+ __u32 app_intfc_ver;
+ __u8 adp_state;
+ __u8 rsvd3;
+ __u16 rsvd4;
+ __u32 rsvd5[2];
+ struct mpi3_driver_info_layout driver_info;
+};
+
+/**
+ * struct mpi3mr_bsg_adp_reset - Adapter reset request
+ * payload data to the driver.
+ *
+ * @reset_type: Reset type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_adp_reset {
+ __u8 reset_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+};
+
+/**
+ * struct mpi3mr_change_count - Topology change count
+ * returned by the driver.
+ *
+ * @change_count: Topology change count
+ * @rsvd: Reserved
+ */
+struct mpi3mr_change_count {
+ __u16 change_count;
+ __u16 rsvd;
+};
+
+/**
+ * struct mpi3mr_device_map_info - Target device mapping
+ * information
+ *
+ * @handle: Firmware device handle
+ * @perst_id: Persistent ID assigned by the firmware
+ * @target_id: Target ID assigned by the driver
+ * @bus_id: Bus ID assigned by the driver
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_device_map_info {
+ __u16 handle;
+ __u16 perst_id;
+ __u32 target_id;
+ __u8 bus_id;
+ __u8 rsvd1;
+ __u16 rsvd2;
+};
+
+/**
+ * struct mpi3mr_all_tgt_info - Target device mapping
+ * information returned by the driver
+ *
+ * @num_devices: The number of devices in driver's inventory
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @dmi: Variable length array of mapping information of targets
+ */
+struct mpi3mr_all_tgt_info {
+ __u16 num_devices;
+ __u16 rsvd1;
+ __u32 rsvd2;
+ struct mpi3mr_device_map_info dmi[1];
+};
+
+/**
+ * struct mpi3mr_logdata_enable - Number of log data
+ * entries saved by the driver returned as payload data for
+ * enable logdata BSG request by the driver.
+ *
+ * @max_entries: Number of log data entries cached by the driver
+ * @rsvd: Reserved
+ */
+struct mpi3mr_logdata_enable {
+ __u16 max_entries;
+ __u16 rsvd;
+};
+
+/**
+ * struct mpi3mr_bsg_out_pel_enable - PEL enable request payload
+ * data to the driver.
+ *
+ * @pel_locale: PEL locale to the firmware
+ * @pel_class: PEL class to the firmware
+ * @rsvd: Reserved
+ */
+struct mpi3mr_bsg_out_pel_enable {
+ __u16 pel_locale;
+ __u8 pel_class;
+ __u8 rsvd;
+};
+
+/**
+ * struct mpi3mr_logdata_entry - Log data entry cached by the
+ * driver.
+ *
+ * @valid_entry: Is the entry valid
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @data: Variable length Log entry data
+ */
+struct mpi3mr_logdata_entry {
+ __u8 valid_entry;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u8 data[1]; /* Variable length Array */
+};
+
+/**
+ * struct mpi3mr_bsg_in_log_data - Log data entries saved by
+ * the driver returned as payload data for Get logdata request
+ * by the driver.
+ *
+ * @entry: Variable length Log data entry array
+ */
+struct mpi3mr_bsg_in_log_data {
+ struct mpi3mr_logdata_entry entry[1];
+};
+
+/**
+ * struct mpi3mr_hdb_entry - host diag buffer entry.
+ *
+ * @buf_type: Buffer type
+ * @status: Buffer status
+ * @trigger_type: Trigger type
+ * @rsvd1: Reserved
+ * @size: Buffer size
+ * @rsvd2: Reserved
+ * @trigger_data: Trigger specific data
+ * @rsvd3: Reserved
+ * @rsvd4: Reserved
+ */
+struct mpi3mr_hdb_entry {
+ __u8 buf_type;
+ __u8 status;
+ __u8 trigger_type;
+ __u8 rsvd1;
+ __u16 size;
+ __u16 rsvd2;
+ __u64 trigger_data;
+ __u32 rsvd3;
+ __u32 rsvd4;
+};
+
+
+/**
+ * struct mpi3mr_bsg_in_hdb_status - This structure contains
+ * return data for the BSG request to retrieve the number of host
+ * diagnostic buffers supported by the driver and their current
+ * status and additional status specific data if any in forms of
+ * multiple hdb entries.
+ *
+ * @num_hdb_types: Number of host diag buffer types supported
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @rsvd3: Reserved
+ * @entry: Variable length Diag buffer status entry array
+ */
+struct mpi3mr_bsg_in_hdb_status {
+ __u8 num_hdb_types;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 rsvd3;
+ struct mpi3mr_hdb_entry entry[1];
+};
+
+/**
+ * struct mpi3mr_bsg_out_repost_hdb - Repost host diagnostic
+ * buffer request payload data to the driver.
+ *
+ * @buf_type: Buffer type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_out_repost_hdb {
+ __u8 buf_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+};
+
+/**
+ * struct mpi3mr_bsg_out_upload_hdb - Upload host diagnostic
+ * buffer request payload data to the driver.
+ *
+ * @buf_type: Buffer type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @start_offset: Start offset of the buffer from where to copy
+ * @length: Length of the buffer to copy
+ */
+struct mpi3mr_bsg_out_upload_hdb {
+ __u8 buf_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 start_offset;
+ __u32 length;
+};
+
+/**
+ * struct mpi3mr_bsg_out_refresh_hdb_triggers - Refresh host
+ * diagnostic buffer triggers request payload data to the driver.
+ *
+ * @page_type: Page type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_out_refresh_hdb_triggers {
+ __u8 page_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+};
+/**
+ * struct mpi3mr_bsg_drv_cmd - Generic bsg data
+ * structure for all driver specific requests.
+ *
+ * @mrioc_id: Controller ID
+ * @opcode: Driver specific opcode
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_drv_cmd {
+ __u8 mrioc_id;
+ __u8 opcode;
+ __u16 rsvd1;
+ __u32 rsvd2[4];
+};
+/**
+ * struct mpi3mr_bsg_in_reply_buf - MPI reply buffer returned
+ * for MPI Passthrough request .
+ *
+ * @mpi_reply_type: Type of MPI reply
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @reply_buf: Variable Length buffer based on mpirep type
+ */
+struct mpi3mr_bsg_in_reply_buf {
+ __u8 mpi_reply_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u8 reply_buf[1];
+};
+
+/**
+ * struct mpi3mr_buf_entry - User buffer descriptor for MPI
+ * Passthrough requests.
+ *
+ * @buf_type: Buffer type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @buf_len: Buffer length
+ */
+struct mpi3mr_buf_entry {
+ __u8 buf_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 buf_len;
+};
+/**
+ * struct mpi3mr_bsg_buf_entry_list - list of user buffer
+ * descriptor for MPI Passthrough requests.
+ *
+ * @num_of_entries: Number of buffer descriptors
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @rsvd3: Reserved
+ * @buf_entry: Variable length array of buffer descriptors
+ */
+struct mpi3mr_buf_entry_list {
+ __u8 num_of_entries;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 rsvd3;
+ struct mpi3mr_buf_entry buf_entry[1];
+};
+/**
+ * struct mpi3mr_bsg_mptcmd - Generic bsg data
+ * structure for all MPI Passthrough requests .
+ *
+ * @mrioc_id: Controller ID
+ * @rsvd1: Reserved
+ * @timeout: MPI request timeout
+ * @buf_entry_list: Buffer descriptor list
+ */
+struct mpi3mr_bsg_mptcmd {
+ __u8 mrioc_id;
+ __u8 rsvd1;
+ __u16 timeout;
+ __u32 rsvd2;
+ struct mpi3mr_buf_entry_list buf_entry_list;
+};
+
+/**
+ * struct mpi3mr_bsg_packet - Generic bsg data
+ * structure for all supported requests .
+ *
+ * @cmd_type: represents drvrcmd or mptcmd
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @drvrcmd: driver request structure
+ * @mptcmd: mpt request structure
+ */
+struct mpi3mr_bsg_packet {
+ __u8 cmd_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 rsvd3;
+ union {
+ struct mpi3mr_bsg_drv_cmd drvrcmd;
+ struct mpi3mr_bsg_mptcmd mptcmd;
+ } cmd;
+};
+
+
+/* MPI3: NVMe Encasulation related definitions */
+#ifndef MPI3_NVME_ENCAP_CMD_MAX
+#define MPI3_NVME_ENCAP_CMD_MAX (1)
+#endif
+
+struct mpi3_nvme_encapsulated_request {
+ __le16 host_tag;
+ __u8 ioc_use_only02;
+ __u8 function;
+ __le16 ioc_use_only04;
+ __u8 ioc_use_only06;
+ __u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le16 encapsulated_command_length;
+ __le16 flags;
+ __le32 data_length;
+ __le32 reserved14[3];
+ __le32 command[MPI3_NVME_ENCAP_CMD_MAX];
+};
+
+struct mpi3_nvme_encapsulated_error_reply {
+ __le16 host_tag;
+ __u8 ioc_use_only02;
+ __u8 function;
+ __le16 ioc_use_only04;
+ __u8 ioc_use_only06;
+ __u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 nvme_completion_entry[4];
+};
+
+#define MPI3MR_NVME_PRP_SIZE 8 /* PRP size */
+#define MPI3MR_NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
+#define MPI3MR_NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
+#define MPI3MR_NVME_CMD_SGL_OFFSET 24 /* SGL offset in NVMe cmd */
+#define MPI3MR_NVME_DATA_FORMAT_PRP 0
+#define MPI3MR_NVME_DATA_FORMAT_SGL1 1
+#define MPI3MR_NVME_DATA_FORMAT_SGL2 2
+
+/* MPI3: task management related definitions */
+struct mpi3_scsi_task_mgmt_request {
+ __le16 host_tag;
+ __u8 ioc_use_only02;
+ __u8 function;
+ __le16 ioc_use_only04;
+ __u8 ioc_use_only06;
+ __u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le16 task_host_tag;
+ __u8 task_type;
+ __u8 reserved0f;
+ __le16 task_request_queue_id;
+ __le16 reserved12;
+ __le32 reserved14;
+ __u8 lun[8];
+};
+
+#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET (0x02)
+#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET (0x09)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT (0x0a)
+#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET (0x0b)
+struct mpi3_scsi_task_mgmt_reply {
+ __le16 host_tag;
+ __u8 ioc_use_only02;
+ __u8 function;
+ __le16 ioc_use_only04;
+ __u8 ioc_use_only06;
+ __u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 termination_count;
+ __le32 response_data;
+ __le32 reserved18;
+};
+
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE (0x00)
+#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME (0x02)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED (0x04)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED (0x05)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED (0x08)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN (0x09)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG (0x0a)
+#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED (0x81)
+
+/* MPI3: PEL related definitions */
+#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
+#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
+#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
+#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040)
+#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020)
+#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010)
+#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008)
+#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004)
+#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002)
+#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001)
+#define MPI3_PEL_CLASS_DEBUG (0x00)
+#define MPI3_PEL_CLASS_PROGRESS (0x01)
+#define MPI3_PEL_CLASS_INFORMATIONAL (0x02)
+#define MPI3_PEL_CLASS_WARNING (0x03)
+#define MPI3_PEL_CLASS_CRITICAL (0x04)
+#define MPI3_PEL_CLASS_FATAL (0x05)
+#define MPI3_PEL_CLASS_FAULT (0x06)
+
+/* MPI3: Function definitions */
+#define MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH (0x0a)
+#define MPI3_BSG_FUNCTION_SCSI_IO (0x20)
+#define MPI3_BSG_FUNCTION_SCSI_TASK_MGMT (0x21)
+#define MPI3_BSG_FUNCTION_SMP_PASSTHROUGH (0x22)
+#define MPI3_BSG_FUNCTION_NVME_ENCAPSULATED (0x24)
+
+#endif
diff --git a/include/uapi/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h
index 7535253f1a96..b46e9cbeb001 100644
--- a/include/uapi/scsi/scsi_netlink_fc.h
+++ b/include/uapi/scsi/scsi_netlink_fc.h
@@ -35,7 +35,7 @@
* FC Transport Broadcast Event Message :
* FC_NL_ASYNC_EVENT
*
- * Note: if Vendor Unique message, &event_data will be start of
+ * Note: if Vendor Unique message, event_data_flex will be start of
* vendor unique payload, and the length of the payload is
* per event_datalen
*
@@ -50,7 +50,10 @@ struct fc_nl_event {
__u16 event_datalen;
__u32 event_num;
__u32 event_code;
- __u32 event_data;
+ union {
+ __u32 event_data;
+ __DECLARE_FLEX_ARRAY(__u8, event_data_flex);
+ };
} __attribute__((aligned(sizeof(__u64))));
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index a75e14edc957..6d4a2c60808d 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -3,22 +3,6 @@
* Main header file for the ALSA sequencer
* Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
* (c) 1998-1999 by Jaroslav Kysela <perex@perex.cz>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_ASEQUENCER_H
#define _UAPI__SOUND_ASEQUENCER_H
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 6048553c119d..9f35bedafcff 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -5,10 +5,6 @@
* Copyright (C) 2012 Texas Instruments Inc.
* Copyright (C) 2015 Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
* algorithms, equalisers, DAIs, widgets etc.
*/
@@ -17,6 +13,7 @@
#define __LINUX_UAPI_SND_ASOC_H
#include <linux/types.h>
+#include <sound/asound.h>
/*
* Maximum number of channels topology kcontrol can represent.
@@ -169,16 +166,22 @@
#define SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP (1 << 3)
/* DAI topology BCLK parameter
- * For the backwards capability, by default codec is bclk master
+ * For the backwards capability, by default codec is bclk provider
*/
-#define SND_SOC_TPLG_BCLK_CM 0 /* codec is bclk master */
-#define SND_SOC_TPLG_BCLK_CS 1 /* codec is bclk slave */
+#define SND_SOC_TPLG_BCLK_CP 0 /* codec is bclk provider */
+#define SND_SOC_TPLG_BCLK_CC 1 /* codec is bclk consumer */
+/* keep previous definitions for compatibility */
+#define SND_SOC_TPLG_BCLK_CM SND_SOC_TPLG_BCLK_CP
+#define SND_SOC_TPLG_BCLK_CS SND_SOC_TPLG_BCLK_CC
/* DAI topology FSYNC parameter
- * For the backwards capability, by default codec is fsync master
+ * For the backwards capability, by default codec is fsync provider
*/
-#define SND_SOC_TPLG_FSYNC_CM 0 /* codec is fsync master */
-#define SND_SOC_TPLG_FSYNC_CS 1 /* codec is fsync slave */
+#define SND_SOC_TPLG_FSYNC_CP 0 /* codec is fsync provider */
+#define SND_SOC_TPLG_FSYNC_CC 1 /* codec is fsync consumer */
+/* keep previous definitions for compatibility */
+#define SND_SOC_TPLG_FSYNC_CM SND_SOC_TPLG_FSYNC_CP
+#define SND_SOC_TPLG_FSYNC_CS SND_SOC_TPLG_FSYNC_CC
/*
* Block Header.
@@ -233,8 +236,8 @@ struct snd_soc_tplg_vendor_array {
struct snd_soc_tplg_private {
__le32 size; /* in bytes of private data */
union {
- char data[0];
- struct snd_soc_tplg_vendor_array array[0];
+ __DECLARE_FLEX_ARRAY(char, data);
+ __DECLARE_FLEX_ARRAY(struct snd_soc_tplg_vendor_array, array);
};
} __attribute__((packed));
@@ -335,8 +338,8 @@ struct snd_soc_tplg_hw_config {
__u8 clock_gated; /* SND_SOC_TPLG_DAI_CLK_GATE_ value */
__u8 invert_bclk; /* 1 for inverted BCLK, 0 for normal */
__u8 invert_fsync; /* 1 for inverted frame clock, 0 for normal */
- __u8 bclk_master; /* SND_SOC_TPLG_BCLK_ value */
- __u8 fsync_master; /* SND_SOC_TPLG_FSYNC_ value */
+ __u8 bclk_provider; /* SND_SOC_TPLG_BCLK_ value */
+ __u8 fsync_provider; /* SND_SOC_TPLG_FSYNC_ value */
__u8 mclk_direction; /* SND_SOC_TPLG_MCLK_ value */
__le16 reserved; /* for 32bit alignment */
__le32 mclk_rate; /* MCLK or SYSCLK freqency in Hz */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 535a7229e1d9..de6810e94abe 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -3,22 +3,6 @@
* Advanced Linux Sound Architecture - ALSA - Driver
* Copyright (c) 1994-2003 by Jaroslav Kysela <perex@perex.cz>,
* Abramo Bagnara <abramo@alsa-project.org>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_ASOUND_H
@@ -56,8 +40,10 @@
* *
****************************************************************************/
+#define AES_IEC958_STATUS_SIZE 24
+
struct snd_aes_iec958 {
- unsigned char status[24]; /* AES/IEC958 channel status bits */
+ unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */
unsigned char subcode[147]; /* AES/IEC958 subcode bits */
unsigned char pad; /* nothing */
unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
@@ -202,6 +188,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
+/*
+ * For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+ * available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+ * `right-padding` sample which has less width than 32 bit.
+ */
#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
@@ -299,7 +290,8 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */
#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
-
+#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */
+#define SNDRV_PCM_INFO_NO_REWINDS 0x20000000 /* hardware can only support monotonic changes of appl_ptr */
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
@@ -710,7 +702,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -736,12 +728,38 @@ struct snd_rawmidi_info {
unsigned char reserved[64]; /* reserved for future use */
};
+#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_SHIFT 0
+#define SNDRV_RAWMIDI_MODE_FRAMING_NONE (0<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP (1<<0)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MASK (7<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_SHIFT 3
+#define SNDRV_RAWMIDI_MODE_CLOCK_NONE (0<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_REALTIME (1<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC (2<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW (3<<3)
+
+#define SNDRV_RAWMIDI_FRAMING_DATA_LENGTH 16
+
+struct snd_rawmidi_framing_tstamp {
+ /* For now, frame_type is always 0. Midi 2.0 is expected to add new
+ * types here. Applications are expected to skip unknown frame types.
+ */
+ __u8 frame_type;
+ __u8 length; /* number of valid bytes in data field */
+ __u8 reserved[2];
+ __u32 tv_nsec; /* nanoseconds */
+ __u64 tv_sec; /* seconds */
+ __u8 data[SNDRV_RAWMIDI_FRAMING_DATA_LENGTH];
+} __packed;
+
struct snd_rawmidi_params {
int stream;
size_t buffer_size; /* queue size in bytes */
size_t avail_min; /* minimum avail bytes for wakeup */
unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */
- unsigned char reserved[16]; /* reserved for future use */
+ unsigned int mode; /* For input data only, frame incoming data */
+ unsigned char reserved[12]; /* reserved for future use */
};
#ifndef __KERNEL__
@@ -757,6 +775,7 @@ struct snd_rawmidi_status {
#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
#define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params)
#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
@@ -974,7 +993,7 @@ typedef int __bitwise snd_ctl_elem_iface_t;
#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1)
#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE)
#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */
-// (1 << 3) is unused.
+/* (1 << 3) is unused. */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
@@ -1071,7 +1090,7 @@ struct snd_ctl_elem_value {
struct snd_ctl_tlv {
unsigned int numid; /* control element numeric identification */
unsigned int length; /* in bytes aligned to 4 */
- unsigned int tlv[0]; /* first TLV */
+ unsigned int tlv[]; /* first TLV */
};
#define SNDRV_CTL_IOCTL_PVERSION _IOR('U', 0x00, int)
diff --git a/include/uapi/sound/asound_fm.h b/include/uapi/sound/asound_fm.h
index 8471f404ff0b..25ec5e38af5c 100644
--- a/include/uapi/sound/asound_fm.h
+++ b/include/uapi/sound/asound_fm.h
@@ -10,21 +10,6 @@
* 4Front Technologies
*
* Direct FM control
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define SNDRV_DM_FM_MODE_OPL2 0x00
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 56d95673ce0f..d185957f3fe0 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -5,23 +5,6 @@
* Copyright (C) 2011 Intel Corporation
* Authors: Vinod Koul <vinod.koul@linux.intel.com>
* Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
*/
#ifndef __COMPRESS_OFFLOAD_H
#define __COMPRESS_OFFLOAD_H
@@ -31,7 +14,7 @@
#include <sound/compress_params.h>
-#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
+#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 2, 0)
/**
* struct snd_compressed_buffer - compressed buffer
* @fragment_size: size of buffer fragment in bytes
@@ -123,7 +106,7 @@ struct snd_compr_codec_caps {
} __attribute__((packed, aligned(4)));
/**
- * enum sndrv_compress_encoder
+ * enum sndrv_compress_encoder - encoder metadata key
* @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
* end of the track
* @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the
@@ -144,7 +127,7 @@ struct snd_compr_metadata {
__u32 value[8];
} __attribute__((packed, aligned(4)));
-/**
+/*
* compress path ioctl definitions
* SNDRV_COMPRESS_GET_CAPS: Query capability of DSP
* SNDRV_COMPRESS_GET_CODEC_CAPS: Query capability of a codec
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 9c96fb0e4d90..ddc77322d571 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -7,47 +7,13 @@
* Authors: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
* Vinod Koul <vinod.koul@linux.intel.com>
*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* The definitions in this file are derived from the OpenMAX AL version 1.1
- * and OpenMAX IL v 1.1.2 header files which contain the copyright notice below.
+ * and OpenMAX IL v 1.1.2 header files which contain the copyright notice below
+ * and are licensed under the MIT license.
*
* Copyright (c) 2007-2010 The Khronos Group Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and/or associated documentation files (the
- * "Materials "), to deal in the Materials without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Materials, and to
- * permit persons to whom the Materials are furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Materials.
- *
- * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
- *
*/
#ifndef __SND_COMPRESS_PARAMS_H
#define __SND_COMPRESS_PARAMS_H
@@ -75,7 +41,9 @@
#define SND_AUDIOCODEC_G723_1 ((__u32) 0x0000000C)
#define SND_AUDIOCODEC_G729 ((__u32) 0x0000000D)
#define SND_AUDIOCODEC_BESPOKE ((__u32) 0x0000000E)
-#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_BESPOKE
+#define SND_AUDIOCODEC_ALAC ((__u32) 0x0000000F)
+#define SND_AUDIOCODEC_APE ((__u32) 0x00000010)
+#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_APE
/*
* Profile and modes are listed with bit masks. This allows for a
@@ -142,6 +110,9 @@
#define SND_AUDIOPROFILE_WMA8 ((__u32) 0x00000002)
#define SND_AUDIOPROFILE_WMA9 ((__u32) 0x00000004)
#define SND_AUDIOPROFILE_WMA10 ((__u32) 0x00000008)
+#define SND_AUDIOPROFILE_WMA9_PRO ((__u32) 0x00000010)
+#define SND_AUDIOPROFILE_WMA9_LOSSLESS ((__u32) 0x00000020)
+#define SND_AUDIOPROFILE_WMA10_LOSSLESS ((__u32) 0x00000040)
#define SND_AUDIOMODE_WMA_LEVEL1 ((__u32) 0x00000001)
#define SND_AUDIOMODE_WMA_LEVEL2 ((__u32) 0x00000002)
@@ -245,7 +216,7 @@ struct snd_enc_wma {
/**
- * struct snd_enc_vorbis
+ * struct snd_enc_vorbis - Vorbis encoder parameters
* @quality: Sets encoding quality to n, between -1 (low) and 10 (high).
* In the default mode of operation, the quality level is 3.
* Normal quality range is 0 - 10.
@@ -274,7 +245,7 @@ struct snd_enc_vorbis {
/**
- * struct snd_enc_real
+ * struct snd_enc_real - RealAudio encoder parameters
* @quant_bits: number of coupling quantization bits in the stream
* @start_region: coupling start region in the stream
* @num_regions: number of regions value
@@ -289,7 +260,7 @@ struct snd_enc_real {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_enc_flac
+ * struct snd_enc_flac - FLAC encoder parameters
* @num: serial number, valid only for OGG formats
* needs to be set by application
* @gain: Add replay gain tags
@@ -326,6 +297,33 @@ struct snd_dec_flac {
__u16 reserved;
} __attribute__((packed, aligned(4)));
+struct snd_dec_wma {
+ __u32 encoder_option;
+ __u32 adv_encoder_option;
+ __u32 adv_encoder_option2;
+ __u32 reserved;
+} __attribute__((packed, aligned(4)));
+
+struct snd_dec_alac {
+ __u32 frame_length;
+ __u8 compatible_version;
+ __u8 pb;
+ __u8 mb;
+ __u8 kb;
+ __u32 max_run;
+ __u32 max_frame_bytes;
+} __attribute__((packed, aligned(4)));
+
+struct snd_dec_ape {
+ __u16 compatible_version;
+ __u16 compression_level;
+ __u32 format_flags;
+ __u32 blocks_per_frame;
+ __u32 final_frame_blocks;
+ __u32 total_frames;
+ __u32 seek_table_present;
+} __attribute__((packed, aligned(4)));
+
union snd_codec_options {
struct snd_enc_wma wma;
struct snd_enc_vorbis vorbis;
@@ -333,6 +331,9 @@ union snd_codec_options {
struct snd_enc_flac flac;
struct snd_enc_generic generic;
struct snd_dec_flac flac_d;
+ struct snd_dec_wma wma_d;
+ struct snd_dec_alac alac_d;
+ struct snd_dec_ape ape_d;
} __attribute__((packed, aligned(4)));
/** struct snd_codec_desc - description of codec capabilities
diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h
index 88609cc0524c..1c1f1dd44611 100644
--- a/include/uapi/sound/emu10k1.h
+++ b/include/uapi/sound/emu10k1.h
@@ -3,22 +3,6 @@
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>,
* Creative Labs, Inc.
* Definitions for EMU10K1 (SB Live!) chips
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_EMU10K1_H
#define _UAPI__SOUND_EMU10K1_H
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
index ae12826ed641..3532ac7046d7 100644
--- a/include/uapi/sound/firewire.h
+++ b/include/uapi/sound/firewire.h
@@ -13,6 +13,7 @@
#define SNDRV_FIREWIRE_EVENT_DIGI00X_MESSAGE 0x746e736c
#define SNDRV_FIREWIRE_EVENT_MOTU_NOTIFICATION 0x64776479
#define SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL 0x7473636d
+#define SNDRV_FIREWIRE_EVENT_MOTU_REGISTER_DSP_CHANGE 0x4d545244
struct snd_firewire_event_common {
unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */
@@ -37,11 +38,11 @@ struct snd_efw_transaction {
__be32 category;
__be32 command;
__be32 status;
- __be32 params[0];
+ __be32 params[];
};
struct snd_firewire_event_efw_response {
unsigned int type;
- __be32 response[0]; /* some responses */
+ __be32 response[]; /* some responses */
};
struct snd_firewire_event_digi00x_message {
@@ -62,7 +63,13 @@ struct snd_firewire_tascam_change {
struct snd_firewire_event_tascam_control {
unsigned int type;
- struct snd_firewire_tascam_change changes[0];
+ struct snd_firewire_tascam_change changes[];
+};
+
+struct snd_firewire_event_motu_register_dsp_change {
+ unsigned int type;
+ __u32 count; /* The number of changes. */
+ __u32 changes[]; /* Encoded event for change of register DSP. */
};
union snd_firewire_event {
@@ -73,6 +80,7 @@ union snd_firewire_event {
struct snd_firewire_event_digi00x_message digi00x_message;
struct snd_firewire_event_tascam_control tascam_control;
struct snd_firewire_event_motu_notification motu_notification;
+ struct snd_firewire_event_motu_register_dsp_change motu_register_dsp_change;
};
@@ -80,6 +88,9 @@ union snd_firewire_event {
#define SNDRV_FIREWIRE_IOCTL_LOCK _IO('H', 0xf9)
#define SNDRV_FIREWIRE_IOCTL_UNLOCK _IO('H', 0xfa)
#define SNDRV_FIREWIRE_IOCTL_TASCAM_STATE _IOR('H', 0xfb, struct snd_firewire_tascam_state)
+#define SNDRV_FIREWIRE_IOCTL_MOTU_REGISTER_DSP_METER _IOR('H', 0xfc, struct snd_firewire_motu_register_dsp_meter)
+#define SNDRV_FIREWIRE_IOCTL_MOTU_COMMAND_DSP_METER _IOR('H', 0xfd, struct snd_firewire_motu_command_dsp_meter)
+#define SNDRV_FIREWIRE_IOCTL_MOTU_REGISTER_DSP_PARAMETER _IOR('H', 0xfe, struct snd_firewire_motu_register_dsp_parameter)
#define SNDRV_FIREWIRE_TYPE_DICE 1
#define SNDRV_FIREWIRE_TYPE_FIREWORKS 2
@@ -108,4 +119,143 @@ struct snd_firewire_tascam_state {
__be32 data[SNDRV_FIREWIRE_TASCAM_STATE_COUNT];
};
+/*
+ * In below MOTU models, software is allowed to control their DSP by accessing to registers.
+ * - 828mk2
+ * - 896hd
+ * - Traveler
+ * - 8 pre
+ * - Ultralite
+ * - 4 pre
+ * - Audio Express
+ *
+ * On the other hand, the status of DSP is split into specific messages included in the sequence of
+ * isochronous packet. ALSA firewire-motu driver gathers the messages and allow userspace applications
+ * to read it via ioctl. In 828mk2, 896hd, and Traveler, hardware meter for all of physical inputs
+ * are put into the message, while one pair of physical outputs is selected. The selection is done by
+ * LSB one byte in asynchronous write quadlet transaction to 0x'ffff'f000'0b2c.
+ *
+ * I note that V3HD/V4HD uses asynchronous transaction for the purpose. The destination address is
+ * registered to 0x'ffff'f000'0b38 and '0b3c by asynchronous write quadlet request. The size of
+ * message differs between 23 and 51 quadlets. For the case, the number of mixer bus can be extended
+ * up to 12.
+ */
+
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_INPUT_COUNT 24
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_OUTPUT_COUNT 24
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_COUNT \
+ (SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_INPUT_COUNT + SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_OUTPUT_COUNT)
+
+/**
+ * struct snd_firewire_motu_register_dsp_meter - the container for meter information in DSP
+ * controlled by register access
+ * @data: Signal level meters. The mapping between position and input/output channel is
+ * model-dependent.
+ *
+ * The structure expresses the part of DSP status for hardware meter. The u8 storage includes linear
+ * value for audio signal level between 0x00 and 0x7f.
+ */
+struct snd_firewire_motu_register_dsp_meter {
+ __u8 data[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_COUNT];
+};
+
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_COUNT 4
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT 20
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_INPUT_COUNT 10
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_ALIGNED_INPUT_COUNT (SNDRV_FIREWIRE_MOTU_REGISTER_DSP_INPUT_COUNT + 2)
+
+/**
+ * snd_firewire_motu_register_dsp_parameter - the container for parameters of DSP controlled
+ * by register access.
+ * @mixer.source.gain: The gain of source to mixer.
+ * @mixer.source.pan: The L/R balance of source to mixer.
+ * @mixer.source.flag: The flag of source to mixer, including mute, solo.
+ * @mixer.source.paired_balance: The L/R balance of paired source to mixer, only for 4 pre and
+ * Audio Express.
+ * @mixer.source.paired_width: The width of paired source to mixer, only for 4 pre and
+ * Audio Express.
+ * @mixer.output.paired_volume: The volume of paired output from mixer.
+ * @mixer.output.paired_flag: The flag of paired output from mixer.
+ * @output.main_paired_volume: The volume of paired main output.
+ * @output.hp_paired_volume: The volume of paired hp output.
+ * @output.hp_paired_assignment: The source assigned to paired hp output.
+ * @output.reserved: Padding for 32 bit alignment for future extension.
+ * @line_input.boost_flag: The flags of boost for line inputs, only for 828mk2 and Traveler.
+ * @line_input.nominal_level_flag: The flags of nominal level for line inputs, only for 828mk2 and
+ * Traveler.
+ * @line_input.reserved: Padding for 32 bit alignment for future extension.
+ * @input.gain_and_invert: The value including gain and invert for input, only for Ultralite, 4 pre
+ * and Audio Express.
+ * @input.flag: The flag of input; e.g. jack detection, phantom power, and pad, only for Ultralite,
+ * 4 pre and Audio express.
+ * @reserved: Padding so that the size of structure is kept to 512 byte, but for future extension.
+ *
+ * The structure expresses the set of parameters for DSP controlled by register access.
+ */
+struct snd_firewire_motu_register_dsp_parameter {
+ struct {
+ struct {
+ __u8 gain[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ __u8 pan[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ __u8 flag[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ __u8 paired_balance[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ __u8 paired_width[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ } source[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_COUNT];
+ struct {
+ __u8 paired_volume[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_COUNT];
+ __u8 paired_flag[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_COUNT];
+ } output;
+ } mixer;
+ struct {
+ __u8 main_paired_volume;
+ __u8 hp_paired_volume;
+ __u8 hp_paired_assignment;
+ __u8 reserved[5];
+ } output;
+ struct {
+ __u8 boost_flag;
+ __u8 nominal_level_flag;
+ __u8 reserved[6];
+ } line_input;
+ struct {
+ __u8 gain_and_invert[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_ALIGNED_INPUT_COUNT];
+ __u8 flag[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_ALIGNED_INPUT_COUNT];
+ } input;
+ __u8 reserved[64];
+};
+
+/*
+ * In below MOTU models, software is allowed to control their DSP by command in frame of
+ * asynchronous transaction to 0x'ffff'0001'0000:
+ *
+ * - 828 mk3 (FireWire only and Hybrid)
+ * - 896 mk3 (FireWire only and Hybrid)
+ * - Ultralite mk3 (FireWire only and Hybrid)
+ * - Traveler mk3
+ * - Track 16
+ *
+ * On the other hand, the states of hardware meter is split into specific messages included in the
+ * sequence of isochronous packet. ALSA firewire-motu driver gathers the message and allow userspace
+ * application to read it via ioctl.
+ */
+
+#define SNDRV_FIREWIRE_MOTU_COMMAND_DSP_METER_COUNT 400
+
+/**
+ * struct snd_firewire_motu_command_dsp_meter - the container for meter information in DSP
+ * controlled by command
+ * @data: Signal level meters. The mapping between position and signal channel is model-dependent.
+ *
+ * The structure expresses the part of DSP status for hardware meter. The 32 bit storage is
+ * estimated to include IEEE 764 32 bit single precision floating point (binary32) value. It is
+ * expected to be linear value (not logarithm) for audio signal level between 0.0 and +1.0.
+ */
+struct snd_firewire_motu_command_dsp_meter {
+#ifdef __KERNEL__
+ __u32 data[SNDRV_FIREWIRE_MOTU_COMMAND_DSP_METER_COUNT];
+#else
+ float data[SNDRV_FIREWIRE_MOTU_COMMAND_DSP_METER_COUNT];
+#endif
+};
+
#endif /* _UAPI_SOUND_FIREWIRE_H_INCLUDED */
diff --git a/include/uapi/sound/hdsp.h b/include/uapi/sound/hdsp.h
index b8df62b60f4d..0961954658d6 100644
--- a/include/uapi/sound/hdsp.h
+++ b/include/uapi/sound/hdsp.h
@@ -4,20 +4,6 @@
/*
* Copyright (C) 2003 Thomas Charbonnel (thomas@undata.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef __linux__
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index 14af3d00ea3f..7043bb3d435a 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -4,21 +4,6 @@
/*
* Copyright (C) 2003 Winfried Ritsch (IEM)
* based on hdsp.h from Thomas Charbonnel (thomas@undata.org)
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef __linux__
diff --git a/include/uapi/sound/intel/avs/tokens.h b/include/uapi/sound/intel/avs/tokens.h
new file mode 100644
index 000000000000..754f02b2f444
--- /dev/null
+++ b/include/uapi/sound/intel/avs/tokens.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright(c) 2021 Intel Corporation. All rights reserved.
+ *
+ * Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+ * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ */
+
+#ifndef __UAPI_SOUND_INTEL_AVS_TOKENS_H
+#define __UAPI_SOUND_INTEL_AVS_TOKENS_H
+
+enum avs_tplg_token {
+ /* struct avs_tplg */
+ AVS_TKN_MANIFEST_NAME_STRING = 1,
+ AVS_TKN_MANIFEST_VERSION_U32 = 2,
+ AVS_TKN_MANIFEST_NUM_LIBRARIES_U32 = 3,
+ AVS_TKN_MANIFEST_NUM_AFMTS_U32 = 4,
+ AVS_TKN_MANIFEST_NUM_MODCFGS_BASE_U32 = 5,
+ AVS_TKN_MANIFEST_NUM_MODCFGS_EXT_U32 = 6,
+ AVS_TKN_MANIFEST_NUM_PPLCFGS_U32 = 7,
+ AVS_TKN_MANIFEST_NUM_BINDINGS_U32 = 8,
+
+ /* struct avs_tplg_library */
+ AVS_TKN_LIBRARY_ID_U32 = 101,
+ AVS_TKN_LIBRARY_NAME_STRING = 102,
+
+ /* struct avs_audio_format */
+ AVS_TKN_AFMT_ID_U32 = 201,
+ AVS_TKN_AFMT_SAMPLE_RATE_U32 = 202,
+ AVS_TKN_AFMT_BIT_DEPTH_U32 = 203,
+ AVS_TKN_AFMT_CHANNEL_MAP_U32 = 204,
+ AVS_TKN_AFMT_CHANNEL_CFG_U32 = 205,
+ AVS_TKN_AFMT_INTERLEAVING_U32 = 206,
+ AVS_TKN_AFMT_NUM_CHANNELS_U32 = 207,
+ AVS_TKN_AFMT_VALID_BIT_DEPTH_U32 = 208,
+ AVS_TKN_AFMT_SAMPLE_TYPE_U32 = 209,
+
+ /* struct avs_tplg_modcfg_base */
+ AVS_TKN_MODCFG_BASE_ID_U32 = 301,
+ AVS_TKN_MODCFG_BASE_CPC_U32 = 302,
+ AVS_TKN_MODCFG_BASE_IBS_U32 = 303,
+ AVS_TKN_MODCFG_BASE_OBS_U32 = 304,
+ AVS_TKN_MODCFG_BASE_PAGES_U32 = 305,
+
+ /* struct avs_tplg_modcfg_ext */
+ AVS_TKN_MODCFG_EXT_ID_U32 = 401,
+ AVS_TKN_MODCFG_EXT_TYPE_UUID = 402,
+ AVS_TKN_MODCFG_CPR_OUT_AFMT_ID_U32 = 403,
+ AVS_TKN_MODCFG_CPR_FEATURE_MASK_U32 = 404,
+ AVS_TKN_MODCFG_CPR_DMA_TYPE_U32 = 405,
+ AVS_TKN_MODCFG_CPR_DMABUFF_SIZE_U32 = 406,
+ AVS_TKN_MODCFG_CPR_VINDEX_U8 = 407,
+ AVS_TKN_MODCFG_CPR_BLOB_FMT_ID_U32 = 408,
+ AVS_TKN_MODCFG_MICSEL_OUT_AFMT_ID_U32 = 409,
+ AVS_TKN_MODCFG_INTELWOV_CPC_LP_MODE_U32 = 410,
+ AVS_TKN_MODCFG_SRC_OUT_FREQ_U32 = 411,
+ AVS_TKN_MODCFG_MUX_REF_AFMT_ID_U32 = 412,
+ AVS_TKN_MODCFG_MUX_OUT_AFMT_ID_U32 = 413,
+ AVS_TKN_MODCFG_AEC_REF_AFMT_ID_U32 = 414,
+ AVS_TKN_MODCFG_AEC_OUT_AFMT_ID_U32 = 415,
+ AVS_TKN_MODCFG_AEC_CPC_LP_MODE_U32 = 416,
+ AVS_TKN_MODCFG_ASRC_OUT_FREQ_U32 = 417,
+ AVS_TKN_MODCFG_ASRC_MODE_U8 = 418,
+ AVS_TKN_MODCFG_ASRC_DISABLE_JITTER_U8 = 419,
+ AVS_TKN_MODCFG_UPDOWN_MIX_OUT_CHAN_CFG_U32 = 420,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_SELECT_U32 = 421,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_0_S32 = 422,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_1_S32 = 423,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_2_S32 = 424,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_3_S32 = 425,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_4_S32 = 426,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_5_S32 = 427,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_6_S32 = 428,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_7_S32 = 429,
+ AVS_TKN_MODCFG_UPDOWN_MIX_CHAN_MAP_U32 = 430,
+ AVS_TKN_MODCFG_EXT_NUM_INPUT_PINS_U16 = 431,
+ AVS_TKN_MODCFG_EXT_NUM_OUTPUT_PINS_U16 = 432,
+
+ /* struct avs_tplg_pplcfg */
+ AVS_TKN_PPLCFG_ID_U32 = 1401,
+ AVS_TKN_PPLCFG_REQ_SIZE_U16 = 1402,
+ AVS_TKN_PPLCFG_PRIORITY_U8 = 1403,
+ AVS_TKN_PPLCFG_LOW_POWER_BOOL = 1404,
+ AVS_TKN_PPLCFG_ATTRIBUTES_U16 = 1405,
+ AVS_TKN_PPLCFG_TRIGGER_U32 = 1406,
+
+ /* struct avs_tplg_binding */
+ AVS_TKN_BINDING_ID_U32 = 1501,
+ AVS_TKN_BINDING_TARGET_TPLG_NAME_STRING = 1502,
+ AVS_TKN_BINDING_TARGET_PATH_TMPL_ID_U32 = 1503,
+ AVS_TKN_BINDING_TARGET_PPL_ID_U32 = 1504,
+ AVS_TKN_BINDING_TARGET_MOD_ID_U32 = 1505,
+ AVS_TKN_BINDING_TARGET_MOD_PIN_U8 = 1506,
+ AVS_TKN_BINDING_MOD_ID_U32 = 1507,
+ AVS_TKN_BINDING_MOD_PIN_U8 = 1508,
+ AVS_TKN_BINDING_IS_SINK_U8 = 1509,
+
+ /* struct avs_tplg_pipeline */
+ AVS_TKN_PPL_ID_U32 = 1601,
+ AVS_TKN_PPL_PPLCFG_ID_U32 = 1602,
+ AVS_TKN_PPL_NUM_BINDING_IDS_U32 = 1603,
+ AVS_TKN_PPL_BINDING_ID_U32 = 1604,
+
+ /* struct avs_tplg_module */
+ AVS_TKN_MOD_ID_U32 = 1701,
+ AVS_TKN_MOD_MODCFG_BASE_ID_U32 = 1702,
+ AVS_TKN_MOD_IN_AFMT_ID_U32 = 1703,
+ AVS_TKN_MOD_CORE_ID_U8 = 1704,
+ AVS_TKN_MOD_PROC_DOMAIN_U8 = 1705,
+ AVS_TKN_MOD_MODCFG_EXT_ID_U32 = 1706,
+
+ /* struct avs_tplg_path_template */
+ AVS_TKN_PATH_TMPL_ID_U32 = 1801,
+
+ /* struct avs_tplg_path */
+ AVS_TKN_PATH_ID_U32 = 1901,
+ AVS_TKN_PATH_FE_FMT_ID_U32 = 1902,
+ AVS_TKN_PATH_BE_FMT_ID_U32 = 1903,
+
+ /* struct avs_tplg_pin_format */
+ AVS_TKN_PIN_FMT_INDEX_U32 = 2201,
+ AVS_TKN_PIN_FMT_IOBS_U32 = 2202,
+ AVS_TKN_PIN_FMT_AFMT_ID_U32 = 2203,
+};
+
+#endif
diff --git a/include/uapi/sound/sb16_csp.h b/include/uapi/sound/sb16_csp.h
index e64851481d88..5a80f5ec02ee 100644
--- a/include/uapi/sound/sb16_csp.h
+++ b/include/uapi/sound/sb16_csp.h
@@ -4,21 +4,6 @@
* Takashi Iwai <tiwai@suse.de>
*
* SB16ASP/AWE32 CSP control
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_SB16_CSP_H
#define _UAPI__SOUND_SB16_CSP_H
diff --git a/include/uapi/sound/sfnt_info.h b/include/uapi/sound/sfnt_info.h
index c9a810a6ef48..f2b5e13fb5a7 100644
--- a/include/uapi/sound/sfnt_info.h
+++ b/include/uapi/sound/sfnt_info.h
@@ -6,21 +6,6 @@
* Patch record compatible with AWE driver on OSS
*
* Copyright (C) 1999-2000 Takashi Iwai
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <sound/asound.h>
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index 9eee32f5e407..f29899b179a6 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -18,6 +18,8 @@
*/
#define SKL_CONTROL_TYPE_BYTE_TLV 0x100
#define SKL_CONTROL_TYPE_MIC_SELECT 0x102
+#define SKL_CONTROL_TYPE_MULTI_IO_SELECT 0x103
+#define SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC 0x104
#define HDA_SST_CFG_MAX 900 /* size of copier cfg*/
#define MAX_IN_QUEUE 8
@@ -149,7 +151,7 @@ struct skl_dfw_algo_data {
__u32 rsvd:30;
__u32 param_id;
__u32 max;
- char params[0];
+ char params[];
} __packed;
enum skl_tkn_dir {
diff --git a/include/uapi/sound/snd_ar_tokens.h b/include/uapi/sound/snd_ar_tokens.h
new file mode 100644
index 000000000000..440c0725660b
--- /dev/null
+++ b/include/uapi/sound/snd_ar_tokens.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __SND_AR_TOKENS_H__
+#define __SND_AR_TOKENS_H__
+
+#define APM_SUB_GRAPH_PERF_MODE_LOW_POWER 0x1
+#define APM_SUB_GRAPH_PERF_MODE_LOW_LATENCY 0x2
+
+#define APM_SUB_GRAPH_DIRECTION_TX 0x1
+#define APM_SUB_GRAPH_DIRECTION_RX 0x2
+
+/** Scenario ID Audio Playback */
+#define APM_SUB_GRAPH_SID_AUDIO_PLAYBACK 0x1
+/* Scenario ID Audio Record */
+#define APM_SUB_GRAPH_SID_AUDIO_RECORD 0x2
+/* Scenario ID Voice call. */
+#define APM_SUB_GRAPH_SID_VOICE_CALL 0x3
+
+/* container capability ID Pre/Post Processing (PP) */
+#define APM_CONTAINER_CAP_ID_PP 0x1
+/* container capability ID Compression/Decompression (CD) */
+#define APM_CONTAINER_CAP_ID_CD 0x2
+/* container capability ID End Point(EP) */
+#define APM_CONTAINER_CAP_ID_EP 0x3
+/* container capability ID Offload (OLC) */
+#define APM_CONTAINER_CAP_ID_OLC 0x4
+
+/* container graph position Stream */
+#define APM_CONT_GRAPH_POS_STREAM 0x1
+/* container graph position Per Stream Per Device*/
+#define APM_CONT_GRAPH_POS_PER_STR_PER_DEV 0x2
+/* container graph position Stream-Device */
+#define APM_CONT_GRAPH_POS_STR_DEV 0x3
+/* container graph position Global Device */
+#define APM_CONT_GRAPH_POS_GLOBAL_DEV 0x4
+
+#define APM_PROC_DOMAIN_ID_MDSP 0x1
+#define APM_PROC_DOMAIN_ID_ADSP 0x2
+#define APM_PROC_DOMAIN_ID_SDSP 0x4
+#define APM_PROC_DOMAIN_ID_CDSP 0x5
+
+#define PCM_INTERLEAVED 1
+#define PCM_DEINTERLEAVED_PACKED 2
+#define PCM_DEINTERLEAVED_UNPACKED 3
+#define AR_I2S_WS_SRC_EXTERNAL 0
+#define AR_I2S_WS_SRC_INTERNAL 1
+
+enum ar_event_types {
+ AR_EVENT_NONE = 0,
+ AR_PGA_DAPM_EVENT
+};
+
+/*
+ * Kcontrol IDs
+ */
+#define SND_SOC_AR_TPLG_FE_BE_GRAPH_CTL_MIX 256
+#define SND_SOC_AR_TPLG_VOL_CTL 257
+
+/**
+ * %AR_TKN_U32_SUB_GRAPH_INSTANCE_ID: Sub Graph Instance Id
+ *
+ * %AR_TKN_U32_SUB_GRAPH_PERF_MODE: Performance mode of subgraph
+ * APM_SUB_GRAPH_PERF_MODE_LOW_POWER = 1,
+ * APM_SUB_GRAPH_PERF_MODE_LOW_LATENCY = 2
+ *
+ * %AR_TKN_U32_SUB_GRAPH_DIRECTION: Direction of subgraph
+ * APM_SUB_GRAPH_DIRECTION_TX = 1,
+ * APM_SUB_GRAPH_DIRECTION_RX = 2
+ *
+ * %AR_TKN_U32_SUB_GRAPH_SCENARIO_ID: Scenario ID for subgraph
+ * APM_SUB_GRAPH_SID_AUDIO_PLAYBACK = 1,
+ * APM_SUB_GRAPH_SID_AUDIO_RECORD = 2,
+ * APM_SUB_GRAPH_SID_VOICE_CALL = 3
+ *
+ * %AR_TKN_U32_CONTAINER_INSTANCE_ID: Container Instance ID
+ *
+ * %AR_TKN_U32_CONTAINER_CAPABILITY_ID: Container capability ID
+ * APM_CONTAINER_CAP_ID_PP = 1,
+ * APM_CONTAINER_CAP_ID_CD = 2,
+ * APM_CONTAINER_CAP_ID_EP = 3,
+ * APM_CONTAINER_CAP_ID_OLC = 4
+ *
+ * %AR_TKN_U32_CONTAINER_STACK_SIZE: Stack size in the container.
+ *
+ * %AR_TKN_U32_CONTAINER_GRAPH_POS: Graph Position
+ * APM_CONT_GRAPH_POS_STREAM = 1,
+ * APM_CONT_GRAPH_POS_PER_STR_PER_DEV = 2,
+ * APM_CONT_GRAPH_POS_STR_DEV = 3,
+ * APM_CONT_GRAPH_POS_GLOBAL_DEV = 4
+ *
+ * %AR_TKN_U32_CONTAINER_PROC_DOMAIN: Processor domain of container
+ * APM_PROC_DOMAIN_ID_MDSP = 1,
+ * APM_PROC_DOMAIN_ID_ADSP = 2,
+ * APM_PROC_DOMAIN_ID_SDSP = 4,
+ * APM_PROC_DOMAIN_ID_CDSP = 5
+ *
+ * %AR_TKN_U32_MODULE_ID: Module ID
+ *
+ * %AR_TKN_U32_MODULE_INSTANCE_ID: Module Instance ID.
+ *
+ * %AR_TKN_U32_MODULE_MAX_IP_PORTS: Module maximum input ports
+ *
+ * %AR_TKN_U32_MODULE_MAX_OP_PORTS: Module maximum output ports.
+ *
+ * %AR_TKN_U32_MODULE_IN_PORTS: Number of in ports
+ *
+ * %AR_TKN_U32_MODULE_OUT_PORTS: Number of out ports.
+ *
+ * %AR_TKN_U32_MODULE_SRC_OP_PORT_ID: Source module output port ID
+ *
+ * %AR_TKN_U32_MODULE_DST_IN_PORT_ID: Destination module input port ID
+ *
+ * %AR_TKN_U32_MODULE_HW_IF_IDX: Interface index types for I2S/LPAIF
+ *
+ * %AR_TKN_U32_MODULE_HW_IF_TYPE: Interface type
+ * LPAIF = 0,
+ * LPAIF_RXTX = 1,
+ * LPAIF_WSA = 2,
+ * LPAIF_VA = 3,
+ * LPAIF_AXI = 4
+ *
+ * %AR_TKN_U32_MODULE_FMT_INTERLEAVE: PCM Interleaving
+ * PCM_INTERLEAVED = 1,
+ * PCM_DEINTERLEAVED_PACKED = 2,
+ * PCM_DEINTERLEAVED_UNPACKED = 3
+ *
+ * %AR_TKN_U32_MODULE_FMT_DATA: data format
+ * FIXED POINT = 1,
+ * IEC60958 PACKETIZED = 3,
+ * IEC60958 PACKETIZED NON LINEAR = 8,
+ * COMPR OVER PCM PACKETIZED = 7,
+ * IEC61937 PACKETIZED = 2,
+ * GENERIC COMPRESSED = 5
+ *
+ * %AR_TKN_U32_MODULE_FMT_SAMPLE_RATE: sample rate
+ *
+ * %AR_TKN_U32_MODULE_FMT_BIT_DEPTH: bit depth
+ *
+ * %AR_TKN_U32_MODULE_SD_LINE_IDX: I2S serial data line idx
+ * I2S_SD0 = 1,
+ * I2S_SD1 = 2,
+ * I2S_SD2 = 3,
+ * I2S_SD3 = 4,
+ * I2S_QUAD01 = 5,
+ * I2S_QUAD23 = 6,
+ * I2S_6CHS = 7,
+ * I2S_8CHS = 8
+ *
+ * %AR_TKN_U32_MODULE_WS_SRC: Word Select Source
+ * AR_I2S_WS_SRC_EXTERNAL = 0,
+ * AR_I2S_WS_SRC_INTERNAL = 1,
+ *
+ * %AR_TKN_U32_MODULE_FRAME_SZ_FACTOR: Frame size factor
+ *
+ * %AR_TKN_U32_MODULE_LOG_CODE: Log Module Code
+ *
+ * %AR_TKN_U32_MODULE_LOG_TAP_POINT_ID: logging tap point of this module
+ *
+ * %AR_TKN_U32_MODULE_LOG_MODE: logging mode
+ * LOG_WAIT = 0,
+ * LOG_IMMEDIATELY = 1
+ *
+ * %AR_TKN_DAI_INDEX: dai index
+ *
+ */
+
+/* DAI Tokens */
+#define AR_TKN_DAI_INDEX 1
+/* SUB GRAPH Tokens */
+#define AR_TKN_U32_SUB_GRAPH_INSTANCE_ID 2
+#define AR_TKN_U32_SUB_GRAPH_PERF_MODE 3
+#define AR_TKN_U32_SUB_GRAPH_DIRECTION 4
+#define AR_TKN_U32_SUB_GRAPH_SCENARIO_ID 5
+
+/* Container Tokens */
+#define AR_TKN_U32_CONTAINER_INSTANCE_ID 100
+#define AR_TKN_U32_CONTAINER_CAPABILITY_ID 101
+#define AR_TKN_U32_CONTAINER_STACK_SIZE 102
+#define AR_TKN_U32_CONTAINER_GRAPH_POS 103
+#define AR_TKN_U32_CONTAINER_PROC_DOMAIN 104
+
+/* Module Tokens */
+#define AR_TKN_U32_MODULE_ID 200
+#define AR_TKN_U32_MODULE_INSTANCE_ID 201
+#define AR_TKN_U32_MODULE_MAX_IP_PORTS 202
+#define AR_TKN_U32_MODULE_MAX_OP_PORTS 203
+#define AR_TKN_U32_MODULE_IN_PORTS 204
+#define AR_TKN_U32_MODULE_OUT_PORTS 205
+#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID 206
+#define AR_TKN_U32_MODULE_DST_IN_PORT_ID 207
+#define AR_TKN_U32_MODULE_SRC_INSTANCE_ID 208
+#define AR_TKN_U32_MODULE_DST_INSTANCE_ID 209
+
+
+#define AR_TKN_U32_MODULE_HW_IF_IDX 250
+#define AR_TKN_U32_MODULE_HW_IF_TYPE 251
+#define AR_TKN_U32_MODULE_FMT_INTERLEAVE 252
+#define AR_TKN_U32_MODULE_FMT_DATA 253
+#define AR_TKN_U32_MODULE_FMT_SAMPLE_RATE 254
+#define AR_TKN_U32_MODULE_FMT_BIT_DEPTH 255
+#define AR_TKN_U32_MODULE_SD_LINE_IDX 256
+#define AR_TKN_U32_MODULE_WS_SRC 257
+#define AR_TKN_U32_MODULE_FRAME_SZ_FACTOR 258
+#define AR_TKN_U32_MODULE_LOG_CODE 259
+#define AR_TKN_U32_MODULE_LOG_TAP_POINT_ID 260
+#define AR_TKN_U32_MODULE_LOG_MODE 261
+
+#endif /* __SND_AR_TOKENS_H__ */
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 8ba0112e5336..defeb0c6ed20 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -4,16 +4,6 @@
*
* Copyright (C) 2016 Intel Corp
* Author: Shreyas NC <shreyas.nc@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef __SND_SST_TOKENS_H__
#define __SND_SST_TOKENS_H__
@@ -233,6 +223,8 @@
*
* %SKL_TKN_U32_ASTATE_CLK_SRC: Clock source for A-State entry
*
+ * %SKL_TKN_U32_FMT_CFG_IDX: Format config index
+ *
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*
@@ -324,7 +316,9 @@ enum SKL_TKNS {
SKL_TKN_U32_ASTATE_COUNT,
SKL_TKN_U32_ASTATE_KCPS,
SKL_TKN_U32_ASTATE_CLK_SRC,
- SKL_TKN_MAX = SKL_TKN_U32_ASTATE_CLK_SRC,
+
+ SKL_TKN_U32_FMT_CFG_IDX = 96,
+ SKL_TKN_MAX = SKL_TKN_U32_FMT_CFG_IDX,
};
#endif
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index c0ef1643c753..3566630ca965 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -24,9 +24,11 @@
#ifndef __INCLUDE_UAPI_SOUND_SOF_ABI_H__
#define __INCLUDE_UAPI_SOUND_SOF_ABI_H__
+#include <linux/types.h>
+
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 12
+#define SOF_ABI_MINOR 23
#define SOF_ABI_PATCH 0
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h
index 5f4518e7a972..e9bba93a5399 100644
--- a/include/uapi/sound/sof/header.h
+++ b/include/uapi/sound/sof/header.h
@@ -23,7 +23,37 @@ struct sof_abi_hdr {
__u32 size; /**< size in bytes of data excl. this struct */
__u32 abi; /**< SOF ABI version */
__u32 reserved[4]; /**< reserved for future use */
- __u32 data[0]; /**< Component data - opaque to core */
+ __u32 data[]; /**< Component data - opaque to core */
} __packed;
+#define SOF_MANIFEST_DATA_TYPE_NHLT 1
+
+/**
+ * struct sof_manifest_tlv - SOF manifest TLV data
+ * @type: type of data
+ * @size: data size (not including the size of this struct)
+ * @data: payload data
+ */
+struct sof_manifest_tlv {
+ __le32 type;
+ __le32 size;
+ __u8 data[];
+};
+
+/**
+ * struct sof_manifest - SOF topology manifest
+ * @abi_major: Major ABI version
+ * @abi_minor: Minor ABI version
+ * @abi_patch: ABI patch
+ * @count: count of tlv items
+ * @items: consecutive variable size tlv items
+ */
+struct sof_manifest {
+ __le16 abi_major;
+ __le16 abi_minor;
+ __le16 abi_patch;
+ __le16 count;
+ struct sof_manifest_tlv items[];
+};
+
#endif
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 2a25cd8da503..5caf75cadaf8 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -24,6 +24,9 @@
#define SOF_TPLG_KCTL_ENUM_ID 257
#define SOF_TPLG_KCTL_BYTES_ID 258
#define SOF_TPLG_KCTL_SWITCH_ID 259
+#define SOF_TPLG_KCTL_BYTES_VOLATILE_RO 260
+#define SOF_TPLG_KCTL_BYTES_VOLATILE_RW 261
+#define SOF_TPLG_KCTL_BYTES_WO_ID 262
/*
* Tokens - must match values in topology configurations
@@ -48,11 +51,18 @@
#define SOF_TKN_SCHED_CORE 203
#define SOF_TKN_SCHED_FRAMES 204
#define SOF_TKN_SCHED_TIME_DOMAIN 205
+#define SOF_TKN_SCHED_DYNAMIC_PIPELINE 206
+#define SOF_TKN_SCHED_LP_MODE 207
+#define SOF_TKN_SCHED_MEM_USAGE 208
/* volume */
#define SOF_TKN_VOLUME_RAMP_STEP_TYPE 250
#define SOF_TKN_VOLUME_RAMP_STEP_MS 251
+#define SOF_TKN_GAIN_RAMP_TYPE 260
+#define SOF_TKN_GAIN_RAMP_DURATION 261
+#define SOF_TKN_GAIN_VAL 262
+
/* SRC */
#define SOF_TKN_SRC_RATE_IN 300
#define SOF_TKN_SRC_RATE_OUT 301
@@ -73,6 +83,11 @@
/* Token retired with ABI 3.2, do not use for new capabilities
* #define SOF_TKN_COMP_PRELOAD_COUNT 403
*/
+#define SOF_TKN_COMP_CORE_ID 404
+#define SOF_TKN_COMP_UUID 405
+#define SOF_TKN_COMP_CPC 406
+#define SOF_TKN_COMP_IS_PAGES 409
+#define SOF_TKN_COMP_NUM_AUDIO_FORMATS 410
/* SSP */
#define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500
@@ -126,4 +141,52 @@
#define SOF_TKN_MUTE_LED_USE 1300
#define SOF_TKN_MUTE_LED_DIRECTION 1301
+/* ALH */
+#define SOF_TKN_INTEL_ALH_RATE 1400
+#define SOF_TKN_INTEL_ALH_CH 1401
+
+/* HDA */
+#define SOF_TKN_INTEL_HDA_RATE 1500
+#define SOF_TKN_INTEL_HDA_CH 1501
+
+/* AFE */
+#define SOF_TKN_MEDIATEK_AFE_RATE 1600
+#define SOF_TKN_MEDIATEK_AFE_CH 1601
+#define SOF_TKN_MEDIATEK_AFE_FORMAT 1602
+
+/* MIXER */
+#define SOF_TKN_MIXER_TYPE 1700
+
+/* ACPDMIC */
+#define SOF_TKN_AMD_ACPDMIC_RATE 1800
+#define SOF_TKN_AMD_ACPDMIC_CH 1801
+
+/* CAVS AUDIO FORMAT */
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_RATE 1900
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_BIT_DEPTH 1901
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_VALID_BIT 1902
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CHANNELS 1903
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_MAP 1904
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_CFG 1905
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_INTERLEAVING_STYLE 1906
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG 1907
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_SAMPLE_TYPE 1908
+/* intentional token numbering discontinuity, reserved for future use */
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_RATE 1930
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_BIT_DEPTH 1931
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_VALID_BIT 1932
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CHANNELS 1933
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_MAP 1934
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_CFG 1935
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_INTERLEAVING_STYLE 1936
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG 1937
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_SAMPLE_TYPE 1938
+/* intentional token numbering discontinuity, reserved for future use */
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IBS 1970
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OBS 1971
+#define SOF_TKN_CAVS_AUDIO_FORMAT_DMA_BUFFER_SIZE 1972
+
+/* COPIER */
+#define SOF_TKN_INTEL_COPIER_NODE_TYPE 1980
+
#endif
diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h
index 7d6d65f60a42..b99a2414b53d 100644
--- a/include/uapi/sound/tlv.h
+++ b/include/uapi/sound/tlv.h
@@ -1,15 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
#ifndef __UAPI_SOUND_TLV_H
#define __UAPI_SOUND_TLV_H
diff --git a/include/uapi/sound/usb_stream.h b/include/uapi/sound/usb_stream.h
index 95419d8bbc16..50609016185a 100644
--- a/include/uapi/sound/usb_stream.h
+++ b/include/uapi/sound/usb_stream.h
@@ -1,20 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _UAPI__SOUND_USB_STREAM_H
@@ -61,7 +47,7 @@ struct usb_stream {
unsigned inpacket_split_at;
unsigned next_inpacket_split;
unsigned next_inpacket_split_at;
- struct usb_stream_packet inpacket[0];
+ struct usb_stream_packet inpacket[];
};
enum usb_stream_state {
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index fe4423e518c6..7a7145395c09 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -47,7 +47,13 @@ struct ioctl_gntdev_grant_ref {
/*
* Inserts the grant references into the mapping table of an instance
* of gntdev. N.B. This does not perform the mapping, which is deferred
- * until mmap() is called with @index as the offset.
+ * until mmap() is called with @index as the offset. @index should be
+ * considered opaque to userspace, with one exception: if no grant
+ * references have ever been inserted into the mapping table of this
+ * instance, @index will be set to 0. This is necessary to use gntdev
+ * with userspace APIs that expect a file descriptor that can be
+ * mmap()'d at offset 0, such as Wayland. If @count is set to 0, this
+ * ioctl will fail.
*/
#define IOCTL_GNTDEV_MAP_GRANT_REF \
_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
@@ -66,7 +72,7 @@ struct ioctl_gntdev_map_grant_ref {
/*
* Removes the grant references from the mapping table of an instance of
- * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
+ * gntdev. N.B. munmap() must be called on the relevant virtual address(es)
* before this ioctl is called, or an error will result.
*/
#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
diff --git a/drivers/scsi/ufs/ufs.h b/include/ufs/ufs.h
index cfe380348bf0..1bba3fead2ce 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -1,36 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Universal Flash Storage Host controller driver
- *
- * This code is based on drivers/scsi/ufs/ufs.h
* Copyright (C) 2011-2013 Samsung India Software Operations
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
* Vinayak Holikatti <h.vinayak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * See the COPYING file in the top-level directory or visit
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
- * without warranty of any kind. You are solely responsible for
- * determining the appropriateness of using and distributing
- * the program and assume all risks associated with your exercise
- * of rights with respect to the program, including but not limited
- * to infringement of third party rights, the risks and costs of
- * program errors, damage to or loss of data, programs or equipment,
- * and unavailability or interruption of operations. Under no
- * circumstances will the contributor of this Program be liable for
- * any damages of any kind arising from your use or distribution of
- * this program.
*/
#ifndef _UFS_H
@@ -63,6 +38,16 @@
#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
+#define UFS_RPMB_UNIT 0xC4
+
+/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
+#define UFS_UPIU_MAX_WB_LUN_ID 8
+
+/*
+ * WriteBooster buffer lifetime has a limit setted by vendor.
+ * If it is over the limit, WriteBooster feature will be disabled.
+ */
+#define UFS_WB_EXCEED_LIFETIME 0x0B
/* Well known logical unit id in LUN field of UPIU */
enum {
@@ -140,12 +125,17 @@ enum flag_idn {
QUERY_FLAG_IDN_BUSY_RTC = 0x09,
QUERY_FLAG_IDN_RESERVED3 = 0x0A,
QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+ QUERY_FLAG_IDN_WB_EN = 0x0E,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
+ QUERY_FLAG_IDN_HPB_RESET = 0x11,
+ QUERY_FLAG_IDN_HPB_EN = 0x12,
};
/* Attribute idn for Query requests */
enum attr_idn {
QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
- QUERY_ATTR_IDN_RESERVED = 0x01,
+ QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
QUERY_ATTR_IDN_POWER_MODE = 0x02,
QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
@@ -167,6 +157,14 @@ enum attr_idn {
QUERY_ATTR_IDN_FFU_STATUS = 0x14,
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
+ QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18,
+ QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19,
+ QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A,
+ QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
+ QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
};
/* Descriptor idn for Query requests */
@@ -189,16 +187,6 @@ enum desc_header_offset {
QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
};
-enum ufs_desc_def_size {
- QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
- QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
- QUERY_DESC_UNIT_DEF_SIZE = 0x23,
- QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
- QUERY_DESC_POWER_DEF_SIZE = 0x62,
- QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
-};
-
/* Unit descriptor parameters offsets in bytes*/
enum unit_desc_param {
UNIT_DESC_PARAM_LEN = 0x0,
@@ -218,6 +206,10 @@ enum unit_desc_param {
UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23,
+ UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25,
+ UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
};
/* Device descriptor parameters offsets in bytes*/
@@ -257,6 +249,12 @@ enum device_desc_param {
DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
DEVICE_DESC_PARAM_PSA_TMT = 0x29,
DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ DEVICE_DESC_PARAM_HPB_VER = 0x40,
+ DEVICE_DESC_PARAM_HPB_CONTROL = 0x42,
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
+ DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
+ DEVICE_DESC_PARAM_WB_TYPE = 0x54,
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
};
/* Interconnect descriptor parameters offsets in bytes*/
@@ -301,6 +299,15 @@ enum geometry_desc_param {
GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48,
+ GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49,
+ GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A,
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
+ GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
+ GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
+ GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
+ GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
+ GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
};
/* Health descriptor parameters offsets in bytes*/
@@ -312,6 +319,12 @@ enum health_desc_param {
HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
};
+/* WriteBooster buffer mode */
+enum {
+ WB_BUF_MODE_LU_DEDICATED = 0x0,
+ WB_BUF_MODE_SHARED = 0x1,
+};
+
/*
* Logical Unit Write Protect
* 00h: LU not write protected
@@ -332,7 +345,16 @@ enum {
UFSHCD_AMP = 3,
};
-#define POWER_DESC_MAX_SIZE 0x62
+/* Possible values for dExtendedUFSFeaturesSupport */
+enum {
+ UFS_DEV_LOW_TEMP_NOTIF = BIT(4),
+ UFS_DEV_HIGH_TEMP_NOTIF = BIT(5),
+ UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
+ UFS_DEV_HPB_SUPPORT = BIT(7),
+ UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
+};
+#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
+
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
/* Attribute bActiveICCLevel parameter bit masks definitions */
@@ -351,9 +373,16 @@ enum power_desc_param_offset {
/* Exception event mask values */
enum {
- MASK_EE_STATUS = 0xFFFF,
- MASK_EE_URGENT_BKOPS = (1 << 2),
+ MASK_EE_STATUS = 0xFFFF,
+ MASK_EE_DYNCAP_EVENT = BIT(0),
+ MASK_EE_SYSPOOL_EVENT = BIT(1),
+ MASK_EE_URGENT_BKOPS = BIT(2),
+ MASK_EE_TOO_HIGH_TEMP = BIT(3),
+ MASK_EE_TOO_LOW_TEMP = BIT(4),
+ MASK_EE_WRITEBOOSTER_EVENT = BIT(5),
+ MASK_EE_PERFORMANCE_THROTTLING = BIT(6),
};
+#define MASK_EE_URGENT_TEMP (MASK_EE_TOO_HIGH_TEMP | MASK_EE_TOO_LOW_TEMP)
/* Background operation status */
enum bkops_status {
@@ -386,11 +415,6 @@ enum ufs_ref_clk_freq {
REF_CLK_FREQ_INVAL = -1,
};
-struct ufs_ref_clk {
- unsigned long freq_hz;
- enum ufs_ref_clk_freq val;
-};
-
/* Query response result code */
enum {
QUERY_RESULT_SUCCESS = 0x00,
@@ -444,8 +468,11 @@ enum ufs_dev_pwr_mode {
UFS_ACTIVE_PWR_MODE = 1,
UFS_SLEEP_PWR_MODE = 2,
UFS_POWERDOWN_PWR_MODE = 3,
+ UFS_DEEPSLEEP_PWR_MODE = 4,
};
+#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
+
/**
* struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
@@ -460,6 +487,41 @@ struct utp_cmd_rsp {
u8 sense_data[UFS_SENSE_SIZE];
};
+struct ufshpb_active_field {
+ __be16 active_rgn;
+ __be16 active_srgn;
+};
+#define HPB_ACT_FIELD_SIZE 4
+
+/**
+ * struct utp_hpb_rsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved1: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @desc_type: Descriptor type of sense data
+ * @additional_len: Additional length of sense data
+ * @hpb_op: HPB operation type
+ * @lun: LUN of response UPIU
+ * @active_rgn_cnt: Active region count
+ * @inactive_rgn_cnt: Inactive region count
+ * @hpb_active_field: Recommended to read HPB region and subregion
+ * @hpb_inactive_field: To be inactivated HPB region and subregion
+ */
+struct utp_hpb_rsp {
+ __be32 residual_transfer_count;
+ __be32 reserved1[4];
+ __be16 sense_data_len;
+ u8 desc_type;
+ u8 additional_len;
+ u8 hpb_op;
+ u8 lun;
+ u8 active_rgn_cnt;
+ u8 inactive_rgn_cnt;
+ struct ufshpb_active_field hpb_active_field[2];
+ __be16 hpb_inactive_field[2];
+};
+#define UTP_HPB_RSP_SIZE 40
+
/**
* struct utp_upiu_rsp - general upiu response structure
* @header: UPIU header structure DW-0 to DW-2
@@ -470,6 +532,7 @@ struct utp_upiu_rsp {
struct utp_upiu_header header;
union {
struct utp_cmd_rsp sr;
+ struct utp_hpb_rsp hr;
struct utp_upiu_query qr;
};
};
@@ -494,15 +557,6 @@ struct ufs_query_res {
struct utp_upiu_query upiu_res;
};
-#define UFS_VREG_VCC_MIN_UV 2700000 /* uV */
-#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */
-#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */
-#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV 1140000 /* uV */
-#define UFS_VREG_VCCQ_MAX_UV 1260000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV 1700000 /* uV */
-#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */
-
/*
* VCCQ & VCCQ2 current requirement when UFS device is in sleep state
* and link is in Hibern8 state.
@@ -512,9 +566,8 @@ struct ufs_query_res {
struct ufs_vreg {
struct regulator *reg;
const char *name;
+ bool always_on;
bool enabled;
- int min_uV;
- int max_uV;
int max_uA;
};
@@ -526,31 +579,45 @@ struct ufs_vreg_info {
};
struct ufs_dev_info {
- bool f_power_on_wp_en;
+ bool f_power_on_wp_en;
/* Keeps information if any of the LU is power on write protected */
- bool is_lu_power_on_wp;
+ bool is_lu_power_on_wp;
/* Maximum number of general LU supported by the UFS device */
- u8 max_lu_supported;
- u16 wmanufacturerid;
+ u8 max_lu_supported;
+ u16 wmanufacturerid;
/*UFS device Product Name */
- u8 *model;
+ u8 *model;
+ u16 wspecversion;
+ u32 clk_gating_wait_us;
+
+ /* UFS HPB related flag */
+ bool hpb_enabled;
+
+ /* UFS WB related flags */
+ bool wb_enabled;
+ bool wb_buf_flush_enabled;
+ u8 wb_dedicated_lu;
+ u8 wb_buffer_type;
+
+ bool b_rpm_dev_flush_capable;
+ u8 b_presrv_uspc_en;
};
-/**
- * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
- * @dev_info: pointer of instance of struct ufs_dev_info
- * @lun: LU number to check
- * @return: true if the lun has a matching unit descriptor, false otherwise
+/*
+ * This enum is used in string mapping in include/trace/events/ufs.h.
*/
-static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
- u8 lun)
-{
- if (!dev_info || !dev_info->max_lu_supported) {
- pr_err("Max General LU supported by UFS isn't initialized\n");
- return false;
- }
-
- return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
-}
+enum ufs_trace_str_t {
+ UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
+ UFS_QUERY_SEND, UFS_QUERY_COMP, UFS_QUERY_ERR,
+ UFS_TM_SEND, UFS_TM_COMP, UFS_TM_ERR
+};
+
+/*
+ * Transaction Specific Fields (TSF) type in the UPIU package, this enum is
+ * used in include/trace/events/ufs.h for UFS command trace.
+ */
+enum ufs_trace_tsf_t {
+ UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
+};
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index d0ab147f98d3..bcb4f004bed5 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -12,30 +12,23 @@
#define UFS_ANY_VENDOR 0xFFFF
#define UFS_ANY_MODEL "ANY_MODEL"
-#define UFS_VENDOR_TOSHIBA 0x198
+#define UFS_VENDOR_MICRON 0x12C
#define UFS_VENDOR_SAMSUNG 0x1CE
#define UFS_VENDOR_SKHYNIX 0x1AD
+#define UFS_VENDOR_TOSHIBA 0x198
+#define UFS_VENDOR_WDC 0x145
/**
- * ufs_dev_fix - ufs device quirk info
+ * ufs_dev_quirk - ufs device quirk info
* @card: ufs card details
* @quirk: device quirk
*/
-struct ufs_dev_fix {
+struct ufs_dev_quirk {
u16 wmanufacturerid;
- u8 *model;
+ const u8 *model;
unsigned int quirk;
};
-#define END_FIX { }
-
-/* add specific device quirk */
-#define UFS_FIX(_vendor, _model, _quirk) { \
- .wmanufacturerid = (_vendor),\
- .model = (_model), \
- .quirk = (_quirk), \
-}
-
/*
* Some vendor's UFS device sends back to back NACs for the DL data frames
* causing the host controller to raise the DFES error status. Sometimes
@@ -100,4 +93,24 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
+/*
+ * Some pre-3.1 UFS devices can support extended features by upgrading
+ * the firmware. Enable this quirk to make UFS core driver probe and enable
+ * supported features on such devices.
+ */
+#define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10)
+
+/*
+ * Some UFS devices require delay after VCC power rail is turned-off.
+ * Enable this quirk to introduce 5ms delays after VCC power-off during
+ * suspend flow.
+ */
+#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
+
+/*
+ * Some UFS devices require L2P entry should be swapped before being sent to the
+ * UFS device for HPB READ command.
+ */
+#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
new file mode 100644
index 000000000000..9f28349ebcff
--- /dev/null
+++ b/include/ufs/ufshcd.h
@@ -0,0 +1,1241 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Universal Flash Storage Host controller driver
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ */
+
+#ifndef _UFSHCD_H
+#define _UFSHCD_H
+
+#include <linux/bitfield.h>
+#include <linux/blk-crypto-profile.h>
+#include <linux/blk-mq.h>
+#include <linux/devfreq.h>
+#include <linux/pm_runtime.h>
+#include <scsi/scsi_device.h>
+#include <ufs/unipro.h>
+#include <ufs/ufs.h>
+#include <ufs/ufs_quirks.h>
+#include <ufs/ufshci.h>
+
+#define UFSHCD "ufshcd"
+
+struct ufs_hba;
+
+enum dev_cmd_type {
+ DEV_CMD_TYPE_NOP = 0x0,
+ DEV_CMD_TYPE_QUERY = 0x1,
+};
+
+enum ufs_event_type {
+ /* uic specific errors */
+ UFS_EVT_PA_ERR = 0,
+ UFS_EVT_DL_ERR,
+ UFS_EVT_NL_ERR,
+ UFS_EVT_TL_ERR,
+ UFS_EVT_DME_ERR,
+
+ /* fatal errors */
+ UFS_EVT_AUTO_HIBERN8_ERR,
+ UFS_EVT_FATAL_ERR,
+ UFS_EVT_LINK_STARTUP_FAIL,
+ UFS_EVT_RESUME_ERR,
+ UFS_EVT_SUSPEND_ERR,
+ UFS_EVT_WL_SUSP_ERR,
+ UFS_EVT_WL_RES_ERR,
+
+ /* abnormal events */
+ UFS_EVT_DEV_RESET,
+ UFS_EVT_HOST_RESET,
+ UFS_EVT_ABORT,
+
+ UFS_EVT_CNT,
+};
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @done: UIC command completion
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ struct completion done;
+};
+
+/* Used to differentiate the power management options */
+enum ufs_pm_op {
+ UFS_RUNTIME_PM,
+ UFS_SYSTEM_PM,
+ UFS_SHUTDOWN_PM,
+};
+
+/* Host <-> Device UniPro Link state */
+enum uic_link_state {
+ UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
+ UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
+ UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
+ UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
+};
+
+#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
+#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
+ UIC_LINK_ACTIVE_STATE)
+#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
+ UIC_LINK_HIBERN8_STATE)
+#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
+ UIC_LINK_BROKEN_STATE)
+#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
+#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
+ UIC_LINK_ACTIVE_STATE)
+#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
+ UIC_LINK_HIBERN8_STATE)
+#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
+ UIC_LINK_BROKEN_STATE)
+
+#define ufshcd_set_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_set_ufs_dev_deepsleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_deepsleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
+
+/*
+ * UFS Power management levels.
+ * Each level is in increasing order of power savings, except DeepSleep
+ * which is lower than PowerDown with power on but not PowerDown with
+ * power off.
+ */
+enum ufs_pm_level {
+ UFS_PM_LVL_0,
+ UFS_PM_LVL_1,
+ UFS_PM_LVL_2,
+ UFS_PM_LVL_3,
+ UFS_PM_LVL_4,
+ UFS_PM_LVL_5,
+ UFS_PM_LVL_6,
+ UFS_PM_LVL_MAX
+};
+
+struct ufs_pm_lvl_states {
+ enum ufs_dev_pwr_mode dev_state;
+ enum uic_link_state link_state;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_req_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @utrd_dma_addr: UTRD dma address for debug
+ * @ucd_prdt_dma_addr: PRDT dma address for debug
+ * @ucd_rsp_dma_addr: UPIU response dma address for debug
+ * @ucd_req_dma_addr: UPIU request dma address for debug
+ * @cmd: pointer to SCSI command
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
+ * @issue_time_stamp: time stamp for debug purposes (CLOCK_MONOTONIC)
+ * @issue_time_stamp_local_clock: time stamp for debug purposes (local_clock)
+ * @compl_time_stamp: time stamp for statistics (CLOCK_MONOTONIC)
+ * @compl_time_stamp_local_clock: time stamp for debug purposes (local_clock)
+ * @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
+ * @data_unit_num: the data unit number for the first block for inline crypto
+ * @req_abort_skip: skip request abort task flag
+ */
+struct ufshcd_lrb {
+ struct utp_transfer_req_desc *utr_descriptor_ptr;
+ struct utp_upiu_req *ucd_req_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ dma_addr_t utrd_dma_addr;
+ dma_addr_t ucd_req_dma_addr;
+ dma_addr_t ucd_rsp_dma_addr;
+ dma_addr_t ucd_prdt_dma_addr;
+
+ struct scsi_cmnd *cmd;
+ int scsi_status;
+
+ int command_type;
+ int task_tag;
+ u8 lun; /* UPIU LUN id field is only 8-bit wide */
+ bool intr_cmd;
+ ktime_t issue_time_stamp;
+ u64 issue_time_stamp_local_clock;
+ ktime_t compl_time_stamp;
+ u64 compl_time_stamp_local_clock;
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+ int crypto_key_slot;
+ u64 data_unit_num;
+#endif
+
+ bool req_abort_skip;
+};
+
+/**
+ * struct ufs_query - holds relevant data structures for query request
+ * @request: request upiu and function
+ * @descriptor: buffer for sending/receiving descriptor
+ * @response: response upiu and response
+ */
+struct ufs_query {
+ struct ufs_query_req request;
+ u8 *descriptor;
+ struct ufs_query_res response;
+};
+
+/**
+ * struct ufs_dev_cmd - all assosiated fields with device management commands
+ * @type: device management command type - Query, NOP OUT
+ * @lock: lock to allow one command at a time
+ * @complete: internal commands completion
+ * @query: Device management query information
+ */
+struct ufs_dev_cmd {
+ enum dev_cmd_type type;
+ struct mutex lock;
+ struct completion *complete;
+ struct ufs_query query;
+};
+
+/**
+ * struct ufs_clk_info - UFS clock related info
+ * @list: list headed by hba->clk_list_head
+ * @clk: clock node
+ * @name: clock name
+ * @max_freq: maximum frequency supported by the clock
+ * @min_freq: min frequency that can be used for clock scaling
+ * @curr_freq: indicates the current frequency that it is set to
+ * @keep_link_active: indicates that the clk should not be disabled if
+ * link is active
+ * @enabled: variable to check against multiple enable/disable
+ */
+struct ufs_clk_info {
+ struct list_head list;
+ struct clk *clk;
+ const char *name;
+ u32 max_freq;
+ u32 min_freq;
+ u32 curr_freq;
+ bool keep_link_active;
+ bool enabled;
+};
+
+enum ufs_notify_change_status {
+ PRE_CHANGE,
+ POST_CHANGE,
+};
+
+struct ufs_pa_layer_attr {
+ u32 gear_rx;
+ u32 gear_tx;
+ u32 lane_rx;
+ u32 lane_tx;
+ u32 pwr_rx;
+ u32 pwr_tx;
+ u32 hs_rate;
+};
+
+struct ufs_pwr_mode_info {
+ bool is_valid;
+ struct ufs_pa_layer_attr info;
+};
+
+/**
+ * struct ufs_hba_variant_ops - variant specific callbacks
+ * @name: variant name
+ * @init: called when the driver is initialized
+ * @exit: called to cleanup everything done in init
+ * @get_ufs_hci_version: called to get UFS HCI version
+ * @clk_scale_notify: notifies that clks are scaled up/down
+ * @setup_clocks: called before touching any of the controller registers
+ * @hce_enable_notify: called before and after HCE enable bit is set to allow
+ * variant specific Uni-Pro initialization.
+ * @link_startup_notify: called before and after Link startup is carried out
+ * to allow variant specific Uni-Pro initialization.
+ * @pwr_change_notify: called before and after a power mode change
+ * is carried out to allow vendor spesific capabilities
+ * to be set.
+ * @setup_xfer_req: called before any transfer request is issued
+ * to set some things
+ * @setup_task_mgmt: called before any task management request is issued
+ * to set some things
+ * @hibern8_notify: called around hibern8 enter/exit
+ * @apply_dev_quirks: called to apply device specific quirks
+ * @fixup_dev_quirks: called to modify device specific quirks
+ * @suspend: called during host controller PM callback
+ * @resume: called during host controller PM callback
+ * @dbg_register_dump: used to dump controller debug information
+ * @phy_initialization: used to initialize phys
+ * @device_reset: called to issue a reset pulse on the UFS device
+ * @config_scaling_param: called to configure clock scaling parameters
+ * @program_key: program or evict an inline encryption key
+ * @event_notify: called to notify important events
+ */
+struct ufs_hba_variant_ops {
+ const char *name;
+ int (*init)(struct ufs_hba *);
+ void (*exit)(struct ufs_hba *);
+ u32 (*get_ufs_hci_version)(struct ufs_hba *);
+ int (*clk_scale_notify)(struct ufs_hba *, bool,
+ enum ufs_notify_change_status);
+ int (*setup_clocks)(struct ufs_hba *, bool,
+ enum ufs_notify_change_status);
+ int (*hce_enable_notify)(struct ufs_hba *,
+ enum ufs_notify_change_status);
+ int (*link_startup_notify)(struct ufs_hba *,
+ enum ufs_notify_change_status);
+ int (*pwr_change_notify)(struct ufs_hba *,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *,
+ struct ufs_pa_layer_attr *);
+ void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
+ bool is_scsi_cmd);
+ void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
+ void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
+ enum ufs_notify_change_status);
+ int (*apply_dev_quirks)(struct ufs_hba *hba);
+ void (*fixup_dev_quirks)(struct ufs_hba *hba);
+ int (*suspend)(struct ufs_hba *, enum ufs_pm_op,
+ enum ufs_notify_change_status);
+ int (*resume)(struct ufs_hba *, enum ufs_pm_op);
+ void (*dbg_register_dump)(struct ufs_hba *hba);
+ int (*phy_initialization)(struct ufs_hba *);
+ int (*device_reset)(struct ufs_hba *hba);
+ void (*config_scaling_param)(struct ufs_hba *hba,
+ struct devfreq_dev_profile *profile,
+ struct devfreq_simple_ondemand_data *data);
+ int (*program_key)(struct ufs_hba *hba,
+ const union ufs_crypto_cfg_entry *cfg, int slot);
+ void (*event_notify)(struct ufs_hba *hba,
+ enum ufs_event_type evt, void *data);
+};
+
+/* clock gating state */
+enum clk_gating_state {
+ CLKS_OFF,
+ CLKS_ON,
+ REQ_CLKS_OFF,
+ REQ_CLKS_ON,
+};
+
+/**
+ * struct ufs_clk_gating - UFS clock gating related info
+ * @gate_work: worker to turn off clocks after some delay as specified in
+ * delay_ms
+ * @ungate_work: worker to turn on clocks that will be used in case of
+ * interrupt context
+ * @state: the current clocks state
+ * @delay_ms: gating delay in ms
+ * @is_suspended: clk gating is suspended when set to 1 which can be used
+ * during suspend/resume
+ * @delay_attr: sysfs attribute to control delay_attr
+ * @enable_attr: sysfs attribute to enable/disable clock gating
+ * @is_enabled: Indicates the current status of clock gating
+ * @is_initialized: Indicates whether clock gating is initialized or not
+ * @active_reqs: number of requests that are pending and should be waited for
+ * completion before gating clocks.
+ * @clk_gating_workq: workqueue for clock gating work.
+ */
+struct ufs_clk_gating {
+ struct delayed_work gate_work;
+ struct work_struct ungate_work;
+ enum clk_gating_state state;
+ unsigned long delay_ms;
+ bool is_suspended;
+ struct device_attribute delay_attr;
+ struct device_attribute enable_attr;
+ bool is_enabled;
+ bool is_initialized;
+ int active_reqs;
+ struct workqueue_struct *clk_gating_workq;
+};
+
+struct ufs_saved_pwr_info {
+ struct ufs_pa_layer_attr info;
+ bool is_valid;
+};
+
+/**
+ * struct ufs_clk_scaling - UFS clock scaling related data
+ * @active_reqs: number of requests that are pending. If this is zero when
+ * devfreq ->target() function is called then schedule "suspend_work" to
+ * suspend devfreq.
+ * @tot_busy_t: Total busy time in current polling window
+ * @window_start_t: Start time (in jiffies) of the current polling window
+ * @busy_start_t: Start time of current busy period
+ * @enable_attr: sysfs attribute to enable/disable clock scaling
+ * @saved_pwr_info: UFS power mode may also be changed during scaling and this
+ * one keeps track of previous power mode.
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @min_gear: lowest HS gear to scale down to
+ * @is_enabled: tracks if scaling is currently enabled or not, controlled by
+ * clkscale_enable sysfs node
+ * @is_allowed: tracks if scaling is currently allowed or not, used to block
+ * clock scaling which is not invoked from devfreq governor
+ * @is_initialized: Indicates whether clock scaling is initialized or not
+ * @is_busy_started: tracks if busy period has started or not
+ * @is_suspended: tracks if devfreq is suspended or not
+ */
+struct ufs_clk_scaling {
+ int active_reqs;
+ unsigned long tot_busy_t;
+ ktime_t window_start_t;
+ ktime_t busy_start_t;
+ struct device_attribute enable_attr;
+ struct ufs_saved_pwr_info saved_pwr_info;
+ struct workqueue_struct *workq;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+ u32 min_gear;
+ bool is_enabled;
+ bool is_allowed;
+ bool is_initialized;
+ bool is_busy_started;
+ bool is_suspended;
+};
+
+#define UFS_EVENT_HIST_LENGTH 8
+/**
+ * struct ufs_event_hist - keeps history of errors
+ * @pos: index to indicate cyclic buffer position
+ * @val: cyclic buffer for registers value
+ * @tstamp: cyclic buffer for time stamp
+ * @cnt: error counter
+ */
+struct ufs_event_hist {
+ int pos;
+ u32 val[UFS_EVENT_HIST_LENGTH];
+ u64 tstamp[UFS_EVENT_HIST_LENGTH];
+ unsigned long long cnt;
+};
+
+/**
+ * struct ufs_stats - keeps usage/err statistics
+ * @last_intr_status: record the last interrupt status.
+ * @last_intr_ts: record the last interrupt timestamp.
+ * @hibern8_exit_cnt: Counter to keep track of number of exits,
+ * reset this after link-startup.
+ * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
+ * Clear after the first successful command completion.
+ * @event: array with event history.
+ */
+struct ufs_stats {
+ u32 last_intr_status;
+ u64 last_intr_ts;
+
+ u32 hibern8_exit_cnt;
+ u64 last_hibern8_exit_tstamp;
+ struct ufs_event_hist event[UFS_EVT_CNT];
+};
+
+/**
+ * enum ufshcd_state - UFS host controller state
+ * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command
+ * processing.
+ * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process
+ * SCSI commands.
+ * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled.
+ * SCSI commands may be submitted to the controller.
+ * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail
+ * newly submitted SCSI commands with error code DID_BAD_TARGET.
+ * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery
+ * failed. Fail all SCSI commands with error code DID_ERROR.
+ */
+enum ufshcd_state {
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
+ UFSHCD_STATE_EH_SCHEDULED_FATAL,
+ UFSHCD_STATE_ERROR,
+};
+
+enum ufshcd_quirks {
+ /* Interrupt aggregation support is broken */
+ UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
+
+ /*
+ * delay before each dme command is required as the unipro
+ * layer has shown instabilities
+ */
+ UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
+
+ /*
+ * If UFS host controller is having issue in processing LCC (Line
+ * Control Command) coming from device then enable this quirk.
+ * When this quirk is enabled, host controller driver should disable
+ * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
+ * attribute of device to 0).
+ */
+ UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
+
+ /*
+ * The attribute PA_RXHSUNTERMCAP specifies whether or not the
+ * inbound Link supports unterminated line in HS mode. Setting this
+ * attribute to 1 fixes moving to HS gear.
+ */
+ UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
+
+ /*
+ * This quirk needs to be enabled if the host controller only allows
+ * accessing the peer dme attributes in AUTO mode (FAST AUTO or
+ * SLOW AUTO).
+ */
+ UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
+
+ /*
+ * This quirk needs to be enabled if the host controller doesn't
+ * advertise the correct version in UFS_VER register. If this quirk
+ * is enabled, standard UFS host driver will call the vendor specific
+ * ops (get_ufs_hci_version) to get the correct version.
+ */
+ UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
+
+ /*
+ * Clear handling for transfer/task request list is just opposite.
+ */
+ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
+
+ /*
+ * This quirk needs to be enabled if host controller doesn't allow
+ * that the interrupt aggregation timer and counter are reset by s/w.
+ */
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
+
+ /*
+ * This quirks needs to be enabled if host controller cannot be
+ * enabled via HCE register.
+ */
+ UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
+
+ /*
+ * This quirk needs to be enabled if the host controller regards
+ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
+ */
+ UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
+
+ /*
+ * This quirk needs to be enabled if the host controller reports
+ * OCS FATAL ERROR with device error through sense data
+ */
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * auto-hibernate capability but it doesn't work.
+ */
+ UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
+
+ /*
+ * This quirk needs to disable manual flush for write booster
+ */
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
+
+ /*
+ * This quirk needs to disable unipro timeout values
+ * before power mode change
+ */
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
+
+ /*
+ * This quirk allows only sg entries aligned with page size.
+ */
+ UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
+
+ /*
+ * This quirk needs to be enabled if the host controller does not
+ * support UIC command
+ */
+ UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
+
+ /*
+ * This quirk needs to be enabled if the host controller cannot
+ * support physical host configuration.
+ */
+ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * 64-bit addressing supported capability but it doesn't work.
+ */
+ UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * auto-hibernate capability but it's FASTAUTO only.
+ */
+ UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
+};
+
+enum ufshcd_caps {
+ /* Allow dynamic clk gating */
+ UFSHCD_CAP_CLK_GATING = 1 << 0,
+
+ /* Allow hiberb8 with clk gating */
+ UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
+
+ /* Allow dynamic clk scaling */
+ UFSHCD_CAP_CLK_SCALING = 1 << 2,
+
+ /* Allow auto bkops to enabled during runtime suspend */
+ UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
+
+ /*
+ * This capability allows host controller driver to use the UFS HCI's
+ * interrupt aggregation capability.
+ * CAUTION: Enabling this might reduce overall UFS throughput.
+ */
+ UFSHCD_CAP_INTR_AGGR = 1 << 4,
+
+ /*
+ * This capability allows the device auto-bkops to be always enabled
+ * except during suspend (both runtime and suspend).
+ * Enabling this capability means that device will always be allowed
+ * to do background operation when it's active but it might degrade
+ * the performance of ongoing read/write operations.
+ */
+ UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
+
+ /*
+ * This capability allows host controller driver to automatically
+ * enable runtime power management by itself instead of waiting
+ * for userspace to control the power management.
+ */
+ UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
+
+ /*
+ * This capability allows the host controller driver to turn-on
+ * WriteBooster, if the underlying device supports it and is
+ * provisioned to be used. This would increase the write performance.
+ */
+ UFSHCD_CAP_WB_EN = 1 << 7,
+
+ /*
+ * This capability allows the host controller driver to use the
+ * inline crypto engine, if it is present
+ */
+ UFSHCD_CAP_CRYPTO = 1 << 8,
+
+ /*
+ * This capability allows the controller regulators to be put into
+ * lpm mode aggressively during clock gating.
+ * This would increase power savings.
+ */
+ UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
+
+ /*
+ * This capability allows the host controller driver to use DeepSleep,
+ * if it is supported by the UFS device. The host controller driver must
+ * support device hardware reset via the hba->device_reset() callback,
+ * in order to exit DeepSleep state.
+ */
+ UFSHCD_CAP_DEEPSLEEP = 1 << 10,
+
+ /*
+ * This capability allows the host controller driver to use temperature
+ * notification if it is supported by the UFS device.
+ */
+ UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
+
+ /*
+ * Enable WriteBooster when scaling up the clock and disable
+ * WriteBooster when scaling the clock down.
+ */
+ UFSHCD_CAP_WB_WITH_CLK_SCALING = 1 << 12,
+};
+
+struct ufs_hba_variant_params {
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq_simple_ondemand_data ondemand_data;
+ u16 hba_enable_delay_us;
+ u32 wb_flush_threshold;
+};
+
+#ifdef CONFIG_SCSI_UFS_HPB
+/**
+ * struct ufshpb_dev_info - UFSHPB device related info
+ * @num_lu: the number of user logical unit to check whether all lu finished
+ * initialization
+ * @rgn_size: device reported HPB region size
+ * @srgn_size: device reported HPB sub-region size
+ * @slave_conf_cnt: counter to check all lu finished initialization
+ * @hpb_disabled: flag to check if HPB is disabled
+ * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
+ * @is_legacy: flag to check HPB 1.0
+ * @control_mode: either host or device
+ */
+struct ufshpb_dev_info {
+ int num_lu;
+ int rgn_size;
+ int srgn_size;
+ atomic_t slave_conf_cnt;
+ bool hpb_disabled;
+ u8 max_hpb_single_cmd;
+ bool is_legacy;
+ u8 control_mode;
+};
+#endif
+
+struct ufs_hba_monitor {
+ unsigned long chunk_size;
+
+ unsigned long nr_sec_rw[2];
+ ktime_t total_busy[2];
+
+ unsigned long nr_req[2];
+ /* latencies*/
+ ktime_t lat_sum[2];
+ ktime_t lat_max[2];
+ ktime_t lat_min[2];
+
+ u32 nr_queued[2];
+ ktime_t busy_start_ts[2];
+
+ ktime_t enabled_ts;
+ bool enabled;
+};
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @dev: device handle
+ * @ufs_device_wlun: WLUN that controls the entire UFS device.
+ * @hwmon_device: device instance registered with the hwmon core.
+ * @curr_dev_pwr_mode: active UFS device power mode.
+ * @uic_link_state: active state of the link to the UFS device.
+ * @rpm_lvl: desired UFS power management level during runtime PM.
+ * @spm_lvl: desired UFS power management level during system PM.
+ * @pm_op_in_progress: whether or not a PM operation is in progress.
+ * @ahit: value of Auto-Hibernate Idle Timer register.
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_lock: Protects @outstanding_reqs.
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
+ * @ufs_version: UFS Version to which controller complies
+ * @vops: pointer to variant specific operations
+ * @vps: pointer to variant specific parameters
+ * @priv: pointer to variant specific private data
+ * @irq: Irq number of the controller
+ * @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
+ * @dev_ref_clk_freq: reference clock frequency
+ * @quirks: bitmask with information about deviations from the UFSHCI standard.
+ * @dev_quirks: bitmask with information about deviations from the UFS standard.
+ * @tmf_tag_set: TMF tag set.
+ * @tmf_queue: Used to allocate TMF tags.
+ * @tmf_rqs: array with pointers to TMF requests while these are in progress.
+ * @active_uic_cmd: handle of active UIC command
+ * @uic_cmd_mutex: mutex for UIC command
+ * @uic_async_done: completion used during UIC processing
+ * @ufshcd_state: UFSHCD state
+ * @eh_flags: Error handling flags
+ * @intr_mask: Interrupt Mask Bits
+ * @ee_ctrl_mask: Exception event control mask
+ * @ee_drv_mask: Exception event mask for driver
+ * @ee_usr_mask: Exception event mask for user (set via debugfs)
+ * @ee_ctrl_mutex: Used to serialize exception event information.
+ * @is_powered: flag to check if HBA is powered
+ * @shutting_down: flag to check if shutdown has been invoked
+ * @host_sem: semaphore used to serialize concurrent contexts
+ * @eh_wq: Workqueue that eh_work works on
+ * @eh_work: Worker to handle UFS errors that require s/w attention
+ * @eeh_work: Worker to handle exception events
+ * @errors: HBA errors
+ * @uic_error: UFS interconnect layer error status
+ * @saved_err: sticky error mask
+ * @saved_uic_err: sticky UIC error mask
+ * @ufs_stats: various error counters
+ * @force_reset: flag to force eh_work perform a full reset
+ * @force_pmc: flag to force a power mode change
+ * @silence_err_logs: flag to silence error logs
+ * @dev_cmd: ufs device management command information
+ * @last_dme_cmd_tstamp: time stamp of the last completed DME command
+ * @nop_out_timeout: NOP OUT timeout value
+ * @dev_info: information about the UFS device
+ * @auto_bkops_enabled: to track whether bkops is enabled in device
+ * @vreg_info: UFS device voltage regulator information
+ * @clk_list_head: UFS host controller clocks list node head
+ * @req_abort_count: number of times ufshcd_abort() has been called
+ * @lanes_per_direction: number of lanes per data direction between the UFS
+ * controller and the UFS device.
+ * @pwr_info: holds current power mode
+ * @max_pwr_info: keeps the device max valid pwm
+ * @clk_gating: information related to clock gating
+ * @caps: bitmask with information about UFS controller capabilities
+ * @devfreq: frequency scaling information owned by the devfreq core
+ * @clk_scaling: frequency scaling information owned by the UFS driver
+ * @is_sys_suspended: whether or not the entire system has been suspended
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ * device is known or not.
+ * @clk_scaling_lock: used to serialize device commands and clock scaling
+ * @desc_size: descriptor sizes reported by device
+ * @scsi_block_reqs_cnt: reference counting for scsi block requests
+ * @bsg_dev: struct device associated with the BSG queue
+ * @bsg_queue: BSG queue associated with the UFS controller
+ * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
+ * management) after the UFS device has finished a WriteBooster buffer
+ * flush or auto BKOP.
+ * @ufshpb_dev: information related to HPB (Host Performance Booster).
+ * @monitor: statistics about UFS commands
+ * @crypto_capabilities: Content of crypto capabilities register (0x100)
+ * @crypto_cap_array: Array of crypto capabilities
+ * @crypto_cfg_register: Start of the crypto cfg array
+ * @crypto_profile: the crypto profile of this hba (if applicable)
+ * @debugfs_root: UFS controller debugfs root directory
+ * @debugfs_ee_work: used to restore ee_ctrl_mask after a delay
+ * @debugfs_ee_rate_limit_ms: user configurable delay after which to restore
+ * ee_ctrl_mask
+ * @luns_avail: number of regular and well known LUNs supported by the UFS
+ * device
+ * @complete_put: whether or not to call ufshcd_rpm_put() from inside
+ * ufshcd_resume_complete()
+ */
+struct ufs_hba {
+ void __iomem *mmio_base;
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdl_base_addr;
+ struct utp_task_req_desc *utmrdl_base_addr;
+
+ /* DMA memory reference */
+ dma_addr_t ucdl_dma_addr;
+ dma_addr_t utrdl_dma_addr;
+ dma_addr_t utmrdl_dma_addr;
+
+ struct Scsi_Host *host;
+ struct device *dev;
+ struct scsi_device *ufs_device_wlun;
+
+#ifdef CONFIG_SCSI_UFS_HWMON
+ struct device *hwmon_device;
+#endif
+
+ enum ufs_dev_pwr_mode curr_dev_pwr_mode;
+ enum uic_link_state uic_link_state;
+ /* Desired UFS power management level during runtime PM */
+ enum ufs_pm_level rpm_lvl;
+ /* Desired UFS power management level during system PM */
+ enum ufs_pm_level spm_lvl;
+ int pm_op_in_progress;
+
+ /* Auto-Hibernate Idle Timer register value */
+ u32 ahit;
+
+ struct ufshcd_lrb *lrb;
+
+ unsigned long outstanding_tasks;
+ spinlock_t outstanding_lock;
+ unsigned long outstanding_reqs;
+
+ u32 capabilities;
+ int nutrs;
+ int nutmrs;
+ u32 reserved_slot;
+ u32 ufs_version;
+ const struct ufs_hba_variant_ops *vops;
+ struct ufs_hba_variant_params *vps;
+ void *priv;
+ unsigned int irq;
+ bool is_irq_enabled;
+ enum ufs_ref_clk_freq dev_ref_clk_freq;
+
+ unsigned int quirks; /* Deviations from standard UFSHCI spec. */
+
+ /* Device deviations from standard UFS device spec. */
+ unsigned int dev_quirks;
+
+ struct blk_mq_tag_set tmf_tag_set;
+ struct request_queue *tmf_queue;
+ struct request **tmf_rqs;
+
+ struct uic_command *active_uic_cmd;
+ struct mutex uic_cmd_mutex;
+ struct completion *uic_async_done;
+
+ enum ufshcd_state ufshcd_state;
+ u32 eh_flags;
+ u32 intr_mask;
+ u16 ee_ctrl_mask;
+ u16 ee_drv_mask;
+ u16 ee_usr_mask;
+ struct mutex ee_ctrl_mutex;
+ bool is_powered;
+ bool shutting_down;
+ struct semaphore host_sem;
+
+ /* Work Queues */
+ struct workqueue_struct *eh_wq;
+ struct work_struct eh_work;
+ struct work_struct eeh_work;
+
+ /* HBA Errors */
+ u32 errors;
+ u32 uic_error;
+ u32 saved_err;
+ u32 saved_uic_err;
+ struct ufs_stats ufs_stats;
+ bool force_reset;
+ bool force_pmc;
+ bool silence_err_logs;
+
+ /* Device management request data */
+ struct ufs_dev_cmd dev_cmd;
+ ktime_t last_dme_cmd_tstamp;
+ int nop_out_timeout;
+
+ /* Keeps information of the UFS device connected to this host */
+ struct ufs_dev_info dev_info;
+ bool auto_bkops_enabled;
+ struct ufs_vreg_info vreg_info;
+ struct list_head clk_list_head;
+
+ /* Number of requests aborts */
+ int req_abort_count;
+
+ /* Number of lanes available (1 or 2) for Rx/Tx */
+ u32 lanes_per_direction;
+ struct ufs_pa_layer_attr pwr_info;
+ struct ufs_pwr_mode_info max_pwr_info;
+
+ struct ufs_clk_gating clk_gating;
+ /* Control to enable/disable host capabilities */
+ u32 caps;
+
+ struct devfreq *devfreq;
+ struct ufs_clk_scaling clk_scaling;
+ bool is_sys_suspended;
+
+ enum bkops_status urgent_bkops_lvl;
+ bool is_urgent_bkops_lvl_checked;
+
+ struct rw_semaphore clk_scaling_lock;
+ unsigned char desc_size[QUERY_DESC_IDN_MAX];
+ atomic_t scsi_block_reqs_cnt;
+
+ struct device bsg_dev;
+ struct request_queue *bsg_queue;
+ struct delayed_work rpm_dev_flush_recheck_work;
+
+#ifdef CONFIG_SCSI_UFS_HPB
+ struct ufshpb_dev_info ufshpb_dev;
+#endif
+
+ struct ufs_hba_monitor monitor;
+
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+ union ufs_crypto_capabilities crypto_capabilities;
+ union ufs_crypto_cap_entry *crypto_cap_array;
+ u32 crypto_cfg_register;
+ struct blk_crypto_profile crypto_profile;
+#endif
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+ struct delayed_work debugfs_ee_work;
+ u32 debugfs_ee_rate_limit_ms;
+#endif
+ u32 luns_avail;
+ bool complete_put;
+};
+
+/* Returns true if clocks can be gated. Otherwise false */
+static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_CLK_GATING;
+}
+static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+}
+static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_CLK_SCALING;
+}
+static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+}
+static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
+}
+
+static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
+{
+ return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
+}
+
+static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
+{
+ return !!(ufshcd_is_link_hibern8(hba) &&
+ (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
+}
+
+static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
+{
+ return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
+}
+
+static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
+{
+ return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit);
+}
+
+static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_WB_EN;
+}
+
+static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING;
+}
+
+#define ufshcd_writel(hba, val, reg) \
+ writel((val), (hba)->mmio_base + (reg))
+#define ufshcd_readl(hba, reg) \
+ readl((hba)->mmio_base + (reg))
+
+/**
+ * ufshcd_rmwl - perform read/modify/write for a controller register
+ * @hba: per adapter instance
+ * @mask: mask to apply on read value
+ * @val: actual value to write
+ * @reg: register address
+ */
+static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
+{
+ u32 tmp;
+
+ tmp = ufshcd_readl(hba, reg);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ ufshcd_writel(hba, tmp, reg);
+}
+
+int ufshcd_alloc_host(struct device *, struct ufs_hba **);
+void ufshcd_dealloc_host(struct ufs_hba *);
+int ufshcd_hba_enable(struct ufs_hba *hba);
+int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
+int ufshcd_link_recovery(struct ufs_hba *hba);
+int ufshcd_make_hba_operational(struct ufs_hba *hba);
+void ufshcd_remove(struct ufs_hba *);
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
+void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
+void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
+void ufshcd_hba_stop(struct ufs_hba *hba);
+void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+
+static inline void check_upiu_size(void)
+{
+ BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
+ GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
+}
+
+/**
+ * ufshcd_set_variant - set variant specific data to the hba
+ * @hba: per adapter instance
+ * @variant: pointer to variant specific data
+ */
+static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
+{
+ BUG_ON(!hba);
+ hba->priv = variant;
+}
+
+/**
+ * ufshcd_get_variant - get variant specific data from the hba
+ * @hba: per adapter instance
+ */
+static inline void *ufshcd_get_variant(struct ufs_hba *hba)
+{
+ BUG_ON(!hba);
+ return hba->priv;
+}
+
+#ifdef CONFIG_PM
+extern int ufshcd_runtime_suspend(struct device *dev);
+extern int ufshcd_runtime_resume(struct device *dev);
+#endif
+#ifdef CONFIG_PM_SLEEP
+extern int ufshcd_system_suspend(struct device *dev);
+extern int ufshcd_system_resume(struct device *dev);
+#endif
+extern int ufshcd_shutdown(struct ufs_hba *hba);
+extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
+ int agreed_gear,
+ int adapt_val);
+extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer);
+extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer);
+extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode);
+extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode);
+
+/* UIC command interfaces for DME primitives */
+#define DME_LOCAL 0
+#define DME_PEER 1
+#define ATTR_SET_NOR 0 /* NORMAL */
+#define ATTR_SET_ST 1 /* STATIC */
+
+static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
+}
+
+static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
+{
+ return (pwr_info->pwr_rx == FAST_MODE ||
+ pwr_info->pwr_rx == FASTAUTO_MODE) &&
+ (pwr_info->pwr_tx == FAST_MODE ||
+ pwr_info->pwr_tx == FASTAUTO_MODE);
+}
+
+static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+}
+
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
+ const struct ufs_dev_quirk *fixups);
+#define SD_ASCII_STD true
+#define SD_RAW false
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ u8 **buf, bool ascii);
+
+int ufshcd_hold(struct ufs_hba *hba, bool async);
+void ufshcd_release(struct ufs_hba *hba);
+
+void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
+
+void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+ int *desc_length);
+
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+
+int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
+
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ int msgcode,
+ u8 *desc_buff, int *buff_len,
+ enum query_opcode desc_op);
+
+int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
+int ufshcd_suspend_prepare(struct device *dev);
+int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
+void ufshcd_resume_complete(struct device *dev);
+
+/* Wrapper functions for safely calling variant operations */
+static inline int ufshcd_vops_init(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->init)
+ return hba->vops->init(hba);
+
+ return 0;
+}
+
+static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->phy_initialization)
+ return hba->vops->phy_initialization(hba);
+
+ return 0;
+}
+
+extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
+
+int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix);
+
+int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
+int ufshcd_write_ee_control(struct ufs_hba *hba);
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
+ const u16 *other_mask, u16 set, u16 clr);
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/include/ufs/ufshci.h
index c2961d37cc1c..f525566a0864 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -1,41 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Universal Flash Storage Host controller driver
- *
- * This code is based on drivers/scsi/ufs/ufshci.h
* Copyright (C) 2011-2013 Samsung India Software Operations
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
* Vinayak Holikatti <h.vinayak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * See the COPYING file in the top-level directory or visit
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
- * without warranty of any kind. You are solely responsible for
- * determining the appropriateness of using and distributing
- * the program and assume all risks associated with your exercise
- * of rights with respect to the program, including but not limited
- * to infringement of third party rights, the risks and costs of
- * program errors, damage to or loss of data, programs or equipment,
- * and unavailability or interruption of operations. Under no
- * circumstances will the contributor of this Program be liable for
- * any damages of any kind arising from your use or distribution of
- * this program.
*/
#ifndef _UFSHCI_H
#define _UFSHCI_H
+#include <scsi/scsi_host.h>
+
enum {
TASK_REQ_UPIU_SIZE_DWORDS = 8,
TASK_RSP_UPIU_SIZE_DWORDS = 8,
@@ -90,6 +67,7 @@ enum {
MASK_64_ADDRESSING_SUPPORT = 0x01000000,
MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
+ MASK_CRYPTO_SUPPORT = 0x10000000,
};
#define UFS_MASK(mask, offset) ((mask) << (offset))
@@ -98,13 +76,17 @@ enum {
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
-/* Controller UFSHCI version */
-enum {
- UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
- UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
- UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
- UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
-};
+/*
+ * Controller UFSHCI version
+ * - 2.x and newer use the following scheme:
+ * major << 8 + minor << 4
+ * - 1.x has been converted to match this in
+ * ufshcd_get_ufs_version()
+ */
+static inline u32 ufshci_version(u32 major, u32 minor)
+{
+ return (major << 8) + (minor << 4);
+}
/*
* HCDDID - Host Controller Identification Descriptor
@@ -143,6 +125,7 @@ enum {
#define DEVICE_FATAL_ERROR 0x800
#define CONTROLLER_FATAL_ERROR 0x10000
#define SYSTEM_BUS_FATAL_ERROR 0x20000
+#define CRYPTO_ENGINE_FATAL_ERROR 0x40000
#define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\
UIC_HIBERNATE_EXIT)
@@ -152,14 +135,13 @@ enum {
#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
-#define UFSHCD_ERROR_MASK (UIC_ERROR |\
- DEVICE_FATAL_ERROR |\
- CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR)
+#define UFSHCD_ERROR_MASK (UIC_ERROR | INT_FATAL_ERRORS)
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR)
+ SYSTEM_BUS_FATAL_ERROR |\
+ CRYPTO_ENGINE_FATAL_ERROR |\
+ UIC_LINK_LOST)
/* HCS - Host Controller Status 30h */
#define DEVICE_PRESENT 0x1
@@ -192,6 +174,7 @@ enum {
#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
+#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR 0x10
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
@@ -318,6 +301,61 @@ enum {
INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
};
+/* CCAP - Crypto Capability 100h */
+union ufs_crypto_capabilities {
+ __le32 reg_val;
+ struct {
+ u8 num_crypto_cap;
+ u8 config_count;
+ u8 reserved;
+ u8 config_array_ptr;
+ };
+};
+
+enum ufs_crypto_key_size {
+ UFS_CRYPTO_KEY_SIZE_INVALID = 0x0,
+ UFS_CRYPTO_KEY_SIZE_128 = 0x1,
+ UFS_CRYPTO_KEY_SIZE_192 = 0x2,
+ UFS_CRYPTO_KEY_SIZE_256 = 0x3,
+ UFS_CRYPTO_KEY_SIZE_512 = 0x4,
+};
+
+enum ufs_crypto_alg {
+ UFS_CRYPTO_ALG_AES_XTS = 0x0,
+ UFS_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1,
+ UFS_CRYPTO_ALG_AES_ECB = 0x2,
+ UFS_CRYPTO_ALG_ESSIV_AES_CBC = 0x3,
+};
+
+/* x-CRYPTOCAP - Crypto Capability X */
+union ufs_crypto_cap_entry {
+ __le32 reg_val;
+ struct {
+ u8 algorithm_id;
+ u8 sdus_mask; /* Supported data unit size mask */
+ u8 key_size;
+ u8 reserved;
+ };
+};
+
+#define UFS_CRYPTO_CONFIGURATION_ENABLE (1 << 7)
+#define UFS_CRYPTO_KEY_MAX_SIZE 64
+/* x-CRYPTOCFG - Crypto Configuration X */
+union ufs_crypto_cfg_entry {
+ __le32 reg_val[32];
+ struct {
+ u8 crypto_key[UFS_CRYPTO_KEY_MAX_SIZE];
+ u8 data_unit_size;
+ u8 crypto_cap_idx;
+ u8 reserved_1;
+ u8 config_enable;
+ u8 reserved_multi_host;
+ u8 reserved_2;
+ u8 vsb[2];
+ u8 reserved_3[56];
+ };
+};
+
/*
* Request Descriptor Definitions
*/
@@ -339,6 +377,7 @@ enum {
UTP_NATIVE_UFS_COMMAND = 0x10000000,
UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
UTP_REQ_DESC_INT_CMD = 0x01000000,
+ UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
};
/* UTP Transfer Request Data Direction (DD) */
@@ -349,7 +388,7 @@ enum {
};
/* Overall command status values */
-enum {
+enum utp_ocs {
OCS_SUCCESS = 0x0,
OCS_INVALID_CMD_TABLE_ATTR = 0x1,
OCS_INVALID_PRDT_ATTR = 0x2,
@@ -358,7 +397,13 @@ enum {
OCS_PEER_COMM_FAILURE = 0x5,
OCS_ABORTED = 0x6,
OCS_FATAL_ERROR = 0x7,
+ OCS_DEVICE_FATAL_ERROR = 0x8,
+ OCS_INVALID_CRYPTO_CONFIG = 0x9,
+ OCS_GENERAL_CRYPTO_ERROR = 0xA,
OCS_INVALID_COMMAND_STATUS = 0x0F,
+};
+
+enum {
MASK_OCS = 0x0F,
};
@@ -369,20 +414,18 @@ enum {
/**
* struct ufshcd_sg_entry - UFSHCI PRD Entry
- * @base_addr: Lower 32bit physical address DW-0
- * @upper_addr: Upper 32bit physical address DW-1
+ * @addr: Physical address; DW-0 and DW-1.
* @reserved: Reserved for future use DW-2
* @size: size of physical segment DW-3
*/
struct ufshcd_sg_entry {
- __le32 base_addr;
- __le32 upper_addr;
+ __le64 addr;
__le32 reserved;
__le32 size;
};
/**
- * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
+ * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
* @command_upiu: Command UPIU Frame address
* @response_upiu: Response UPIU Frame address
* @prd_table: Physical Region Descriptor
@@ -408,7 +451,7 @@ struct request_desc_header {
};
/**
- * struct utp_transfer_req_desc - UTRD structure
+ * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
* @header: UTRD header DW-0 to DW-3
* @command_desc_base_addr_lo: UCD base address low DW-4
* @command_desc_base_addr_hi: UCD base address high DW-5
@@ -443,17 +486,21 @@ struct utp_task_req_desc {
struct request_desc_header header;
/* DW 4-11 - Task request UPIU structure */
- struct utp_upiu_header req_header;
- __be32 input_param1;
- __be32 input_param2;
- __be32 input_param3;
- __be32 __reserved1[2];
+ struct {
+ struct utp_upiu_header req_header;
+ __be32 input_param1;
+ __be32 input_param2;
+ __be32 input_param3;
+ __be32 __reserved1[2];
+ } upiu_req;
/* DW 12-19 - Task Management Response UPIU structure */
- struct utp_upiu_header rsp_header;
- __be32 output_param1;
- __be32 output_param2;
- __be32 __reserved2[3];
+ struct {
+ struct utp_upiu_header rsp_header;
+ __be32 output_param1;
+ __be32 output_param2;
+ __be32 __reserved2[3];
+ } upiu_rsp;
};
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/unipro.h b/include/ufs/unipro.h
index 3dc4d8b76509..6c553f98fe57 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * drivers/scsi/ufs/unipro.h
- *
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
*/
@@ -40,6 +38,18 @@
/*
* M-RX Configuration Attributes
*/
+#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B
+#define RX_HS_G1_PREP_LENGTH_CAP 0x008C
+#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
+#define RX_HIBERN8TIME_CAPABILITY 0x0092
+#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094
+#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095
+#define RX_HS_G2_PREP_LENGTH_CAP 0x0096
+#define RX_HS_G3_PREP_LENGTH_CAP 0x0097
+#define RX_ADV_GRANULARITY_CAP 0x0098
+#define RX_HIBERN8TIME_CAP 0x0092
+#define RX_ADV_HIBERN8TIME_CAP 0x0099
+#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A
#define RX_MODE 0x00A1
#define RX_HSRATE_SERIES 0x00A2
#define RX_HSGEAR 0x00A3
@@ -49,23 +59,27 @@
#define RX_ENTER_HIBERN8 0x00A7
#define RX_BYPASS_8B10B_ENABLE 0x00A8
#define RX_TERMINATION_FORCE_ENABLE 0x00A9
-#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
-#define RX_HIBERN8TIME_CAPABILITY 0x0092
+#define RXCALCTRL 0x00B4
+#define RXSQCTRL 0x00B5
+#define CFGRXCDR8 0x00BA
+#define CFGRXOVR8 0x00BD
+#define CFGRXOVR6 0x00BF
+#define RXDIRECTCTRL2 0x00C7
+#define CFGRXOVR4 0x00E9
#define RX_REFCLKFREQ 0x00EB
#define RX_CFGCLKFREQVAL 0x00EC
#define CFGWIDEINLN 0x00F0
-#define CFGRXCDR8 0x00BA
#define ENARXDIRECTCFG4 0x00F2
-#define CFGRXOVR8 0x00BD
-#define RXDIRECTCTRL2 0x00C7
#define ENARXDIRECTCFG3 0x00F3
-#define RXCALCTRL 0x00B4
#define ENARXDIRECTCFG2 0x00F4
-#define CFGRXOVR4 0x00E9
-#define RXSQCTRL 0x00B5
-#define CFGRXOVR6 0x00BF
+
#define is_mphy_tx_attr(attr) (attr < RX_MODE)
+#define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1)
+#define SYNC_LEN_FINE(x) ((x) & 0x3F)
+#define SYNC_LEN_COARSE(x) ((1 << 6) | ((x) & 0x3F))
+#define PREP_LEN(x) ((x) & 0xF)
+
#define RX_MIN_ACTIVATETIME_UNIT_US 100
#define HIBERN8TIME_UNIT_US 100
@@ -86,48 +100,52 @@
#define UNIPRO_CB_OFFSET(x) (0x8000 | x)
/*
- * PHY Adpater attributes
+ * PHY Adapter attributes
*/
-#define PA_ACTIVETXDATALANES 0x1560
-#define PA_ACTIVERXDATALANES 0x1580
-#define PA_TXTRAILINGCLOCKS 0x1564
#define PA_PHY_TYPE 0x1500
#define PA_AVAILTXDATALANES 0x1520
-#define PA_AVAILRXDATALANES 0x1540
-#define PA_MINRXTRAILINGCLOCKS 0x1543
-#define PA_TXPWRSTATUS 0x1567
-#define PA_RXPWRSTATUS 0x1582
-#define PA_TXFORCECLOCK 0x1562
-#define PA_TXPWRMODE 0x1563
-#define PA_LEGACYDPHYESCDL 0x1570
#define PA_MAXTXSPEEDFAST 0x1521
#define PA_MAXTXSPEEDSLOW 0x1522
#define PA_MAXRXSPEEDFAST 0x1541
#define PA_MAXRXSPEEDSLOW 0x1542
#define PA_TXLINKSTARTUPHS 0x1544
+#define PA_AVAILRXDATALANES 0x1540
+#define PA_MINRXTRAILINGCLOCKS 0x1543
#define PA_LOCAL_TX_LCC_ENABLE 0x155E
+#define PA_ACTIVETXDATALANES 0x1560
+#define PA_CONNECTEDTXDATALANES 0x1561
+#define PA_TXFORCECLOCK 0x1562
+#define PA_TXPWRMODE 0x1563
+#define PA_TXTRAILINGCLOCKS 0x1564
#define PA_TXSPEEDFAST 0x1565
#define PA_TXSPEEDSLOW 0x1566
-#define PA_REMOTEVERINFO 0x15A0
+#define PA_TXPWRSTATUS 0x1567
#define PA_TXGEAR 0x1568
#define PA_TXTERMINATION 0x1569
#define PA_HSSERIES 0x156A
+#define PA_LEGACYDPHYESCDL 0x1570
#define PA_PWRMODE 0x1571
+#define PA_ACTIVERXDATALANES 0x1580
+#define PA_CONNECTEDRXDATALANES 0x1581
+#define PA_RXPWRSTATUS 0x1582
#define PA_RXGEAR 0x1583
#define PA_RXTERMINATION 0x1584
#define PA_MAXRXPWMGEAR 0x1586
#define PA_MAXRXHSGEAR 0x1587
+#define PA_PACPREQTIMEOUT 0x1590
+#define PA_PACPREQEOBTIMEOUT 0x1591
+#define PA_REMOTEVERINFO 0x15A0
+#define PA_LOGICALLANEMAP 0x15A1
+#define PA_SLEEPNOCONFIGTIME 0x15A2
+#define PA_STALLNOCONFIGTIME 0x15A3
+#define PA_SAVECONFIGTIME 0x15A4
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
#define PA_GRANULARITY 0x15AA
-#define PA_PACPREQTIMEOUT 0x1590
-#define PA_PACPREQEOBTIMEOUT 0x1591
#define PA_HIBERN8TIME 0x15A7
#define PA_LOCALVERINFO 0x15A9
+#define PA_GRANULARITY 0x15AA
#define PA_TACTIVATE 0x15A8
-#define PA_PACPFRAMECOUNT 0x15C0
-#define PA_PACPERRORCOUNT 0x15C1
-#define PA_PHYTESTCONTROL 0x15C2
#define PA_PWRMODEUSERDATA0 0x15B0
#define PA_PWRMODEUSERDATA1 0x15B1
#define PA_PWRMODEUSERDATA2 0x15B2
@@ -140,20 +158,23 @@
#define PA_PWRMODEUSERDATA9 0x15B9
#define PA_PWRMODEUSERDATA10 0x15BA
#define PA_PWRMODEUSERDATA11 0x15BB
-#define PA_CONNECTEDTXDATALANES 0x1561
-#define PA_CONNECTEDRXDATALANES 0x1581
-#define PA_LOGICALLANEMAP 0x15A1
-#define PA_SLEEPNOCONFIGTIME 0x15A2
-#define PA_STALLNOCONFIGTIME 0x15A3
-#define PA_SAVECONFIGTIME 0x15A4
+#define PA_PACPFRAMECOUNT 0x15C0
+#define PA_PACPERRORCOUNT 0x15C1
+#define PA_PHYTESTCONTROL 0x15C2
+#define PA_TXHSADAPTTYPE 0x15D4
+
+/* Adpat type for PA_TXHSADAPTTYPE attribute */
+#define PA_REFRESH_ADAPT 0x00
+#define PA_INITIAL_ADAPT 0x01
+#define PA_NO_ADAPT 0x03
#define PA_TACTIVATE_TIME_UNIT_US 10
#define PA_HIBERN8_TIME_UNIT_US 100
/*Other attributes*/
+#define VS_POWERSTATE 0xD083
#define VS_MPHYCFGUPDT 0xD085
#define VS_DEBUGOMC 0xD09E
-#define VS_POWERSTATE 0xD083
#define PA_GRANULARITY_MIN_VAL 1
#define PA_GRANULARITY_MAX_VAL 6
@@ -181,6 +202,9 @@ enum {
UNCHANGED = 7,
};
+#define PWRMODE_MASK 0xF
+#define PWRMODE_RX_OFFSET 4
+
/* PA TX/RX Frequency Series */
enum {
PA_HS_MODE_A = 1,
@@ -203,14 +227,18 @@ enum ufs_hs_gear_tag {
UFS_HS_G1, /* HS Gear 1 (default for reset) */
UFS_HS_G2, /* HS Gear 2 */
UFS_HS_G3, /* HS Gear 3 */
+ UFS_HS_G4, /* HS Gear 4 */
+ UFS_HS_G5 /* HS Gear 5 */
};
enum ufs_unipro_ver {
UFS_UNIPRO_VER_RESERVED = 0,
UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
- UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
- UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */
+ UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
+ UFS_UNIPRO_VER_1_61 = 4, /* UniPro version 1.61 */
+ UFS_UNIPRO_VER_1_8 = 5, /* UniPro version 1.8 */
+ UFS_UNIPRO_VER_MAX = 6, /* UniPro unsupported version */
/* UniPro version field mask in PA_LOCALVERINFO */
UFS_UNIPRO_VER_MASK = 0xF,
};
@@ -218,27 +246,27 @@ enum ufs_unipro_ver {
/*
* Data Link Layer Attributes
*/
+#define DL_TXPREEMPTIONCAP 0x2000
+#define DL_TC0TXMAXSDUSIZE 0x2001
+#define DL_TC0RXINITCREDITVAL 0x2002
+#define DL_TC1TXMAXSDUSIZE 0x2003
+#define DL_TC1RXINITCREDITVAL 0x2004
+#define DL_TC0TXBUFFERSIZE 0x2005
+#define DL_TC1TXBUFFERSIZE 0x2006
#define DL_TC0TXFCTHRESHOLD 0x2040
#define DL_FC0PROTTIMEOUTVAL 0x2041
#define DL_TC0REPLAYTIMEOUTVAL 0x2042
#define DL_AFC0REQTIMEOUTVAL 0x2043
#define DL_AFC0CREDITTHRESHOLD 0x2044
#define DL_TC0OUTACKTHRESHOLD 0x2045
+#define DL_PEERTC0PRESENT 0x2046
+#define DL_PEERTC0RXINITCREVAL 0x2047
#define DL_TC1TXFCTHRESHOLD 0x2060
#define DL_FC1PROTTIMEOUTVAL 0x2061
#define DL_TC1REPLAYTIMEOUTVAL 0x2062
#define DL_AFC1REQTIMEOUTVAL 0x2063
#define DL_AFC1CREDITTHRESHOLD 0x2064
#define DL_TC1OUTACKTHRESHOLD 0x2065
-#define DL_TXPREEMPTIONCAP 0x2000
-#define DL_TC0TXMAXSDUSIZE 0x2001
-#define DL_TC0RXINITCREDITVAL 0x2002
-#define DL_TC0TXBUFFERSIZE 0x2005
-#define DL_PEERTC0PRESENT 0x2046
-#define DL_PEERTC0RXINITCREVAL 0x2047
-#define DL_TC1TXMAXSDUSIZE 0x2003
-#define DL_TC1RXINITCREDITVAL 0x2004
-#define DL_TC1TXBUFFERSIZE 0x2006
#define DL_PEERTC1PRESENT 0x2066
#define DL_PEERTC1RXINITCREVAL 0x2067
@@ -270,18 +298,19 @@ enum ufs_unipro_ver {
#define T_TC0TXMAXSDUSIZE 0x4060
#define T_TC1TXMAXSDUSIZE 0x4061
-#ifdef FALSE
-#undef FALSE
-#endif
-
-#ifdef TRUE
-#undef TRUE
-#endif
+/* CPort setting */
+#define E2EFC_ON (1 << 0)
+#define E2EFC_OFF (0 << 0)
+#define CSD_N_ON (0 << 1)
+#define CSD_N_OFF (1 << 1)
+#define CSV_N_ON (0 << 2)
+#define CSV_N_OFF (1 << 2)
+#define CPORT_DEF_FLAGS (CSV_N_OFF | CSD_N_OFF | E2EFC_OFF)
-/* Boolean attribute values */
+/* CPort connection state */
enum {
- FALSE = 0,
- TRUE,
+ CPORT_IDLE = 0,
+ CPORT_CONNECTED,
};
#endif /* _UNIPRO_H_ */
diff --git a/include/vdso/bits.h b/include/vdso/bits.h
new file mode 100644
index 000000000000..6d005a1f5d94
--- /dev/null
+++ b/include/vdso/bits.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_BITS_H
+#define __VDSO_BITS_H
+
+#include <vdso/const.h>
+
+#define BIT(nr) (UL(1) << (nr))
+
+#endif /* __VDSO_BITS_H */
diff --git a/include/vdso/clocksource.h b/include/vdso/clocksource.h
new file mode 100644
index 000000000000..c682e7c60273
--- /dev/null
+++ b/include/vdso/clocksource.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_CLOCKSOURCE_H
+#define __VDSO_CLOCKSOURCE_H
+
+#include <vdso/limits.h>
+
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+#include <asm/vdso/clocksource.h>
+#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
+
+enum vdso_clock_mode {
+ VDSO_CLOCKMODE_NONE,
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+ VDSO_ARCH_CLOCKMODES,
+#endif
+ VDSO_CLOCKMODE_MAX,
+
+ /* Indicator for time namespace VDSO */
+ VDSO_CLOCKMODE_TIMENS = INT_MAX
+};
+
+#endif /* __VDSO_CLOCKSOURCE_H */
diff --git a/include/vdso/const.h b/include/vdso/const.h
new file mode 100644
index 000000000000..94b385ad438d
--- /dev/null
+++ b/include/vdso/const.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_CONST_H
+#define __VDSO_CONST_H
+
+#include <uapi/linux/const.h>
+
+#define UL(x) (_UL(x))
+#define ULL(x) (_ULL(x))
+
+#endif /* __VDSO_CONST_H */
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index c5f347cc5e55..73eb622e7663 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -4,9 +4,26 @@
#ifndef __ASSEMBLY__
-#include <linux/bits.h>
-#include <linux/time.h>
-#include <linux/types.h>
+#include <linux/compiler.h>
+#include <uapi/linux/time.h>
+#include <uapi/linux/types.h>
+#include <uapi/asm-generic/errno-base.h>
+
+#include <vdso/bits.h>
+#include <vdso/clocksource.h>
+#include <vdso/ktime.h>
+#include <vdso/limits.h>
+#include <vdso/math64.h>
+#include <vdso/processor.h>
+#include <vdso/time.h>
+#include <vdso/time32.h>
+#include <vdso/time64.h>
+
+#ifdef CONFIG_ARCH_HAS_VDSO_DATA
+#include <asm/vdso/data.h>
+#else
+struct arch_vdso_data {};
+#endif
#define VDSO_BASES (CLOCK_TAI + 1)
#define VDSO_HRES (BIT(CLOCK_REALTIME) | \
@@ -21,8 +38,6 @@
#define CS_RAW 1
#define CS_BASES (CS_RAW + 1)
-#define VCLOCK_TIMENS UINT_MAX
-
/**
* struct vdso_timestamp - basetime per clock_id
* @sec: seconds
@@ -55,6 +70,8 @@ struct vdso_timestamp {
* @tz_dsttime: type of DST correction
* @hrtimer_res: hrtimer resolution
* @__unused: unused
+ * @arch_data: architecture specific data (optional, defaults
+ * to an empty struct)
*
* vdso_data will be accessed by 64 bit and compat code at the same time
* so we should be careful before modifying this structure.
@@ -64,8 +81,8 @@ struct vdso_timestamp {
*
* @offset is used by the special time namespace VVAR pages which are
* installed instead of the real VVAR page. These namespace pages must set
- * @seq to 1 and @clock_mode to VLOCK_TIMENS to force the code into the
- * time namespace slow path. The namespace aware functions retrieve the
+ * @seq to 1 and @clock_mode to VDSO_CLOCKMODE_TIMENS to force the code into
+ * the time namespace slow path. The namespace aware functions retrieve the
* real system wide VVAR page, read host time and add the per clock offset.
* For clocks which are not affected by time namespace adjustment the
* offset must be zero.
@@ -88,6 +105,8 @@ struct vdso_data {
s32 tz_dsttime;
u32 hrtimer_res;
u32 __unused;
+
+ struct arch_vdso_data arch_data;
};
/*
@@ -100,6 +119,23 @@ struct vdso_data {
* relocation, and this is what we need.
*/
extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
+extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden")));
+
+/*
+ * The generic vDSO implementation requires that gettimeofday.h
+ * provides:
+ * - __arch_get_vdso_data(): to get the vdso datapage.
+ * - __arch_get_hw_counter(): to get the hw counter based on the
+ * clock_mode.
+ * - gettimeofday_fallback(): fallback for gettimeofday.
+ * - clock_gettime_fallback(): fallback for clock_gettime.
+ * - clock_getres_fallback(): fallback for clock_getres.
+ */
+#ifdef ENABLE_COMPAT_VDSO
+#include <asm/vdso/compat_gettimeofday.h>
+#else
+#include <asm/vdso/gettimeofday.h>
+#endif /* ENABLE_COMPAT_VDSO */
#endif /* !__ASSEMBLY__ */
diff --git a/include/vdso/jiffies.h b/include/vdso/jiffies.h
new file mode 100644
index 000000000000..2f9d596c8b29
--- /dev/null
+++ b/include/vdso/jiffies.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_JIFFIES_H
+#define __VDSO_JIFFIES_H
+
+#include <asm/param.h> /* for HZ */
+#include <vdso/time64.h>
+
+/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
+#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
+
+#endif /* __VDSO_JIFFIES_H */
diff --git a/include/vdso/ktime.h b/include/vdso/ktime.h
new file mode 100644
index 000000000000..a0fd07239e0e
--- /dev/null
+++ b/include/vdso/ktime.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_KTIME_H
+#define __VDSO_KTIME_H
+
+#include <vdso/jiffies.h>
+
+/*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+ * idea of the (in)accuracy of timers. Timer values are rounded up to
+ * this resolution values.
+ */
+#define LOW_RES_NSEC TICK_NSEC
+#define KTIME_LOW_RES (LOW_RES_NSEC)
+
+#endif /* __VDSO_KTIME_H */
diff --git a/include/vdso/limits.h b/include/vdso/limits.h
new file mode 100644
index 000000000000..0197888ad0e0
--- /dev/null
+++ b/include/vdso/limits.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_LIMITS_H
+#define __VDSO_LIMITS_H
+
+#define USHRT_MAX ((unsigned short)~0U)
+#define SHRT_MAX ((short)(USHRT_MAX >> 1))
+#define SHRT_MIN ((short)(-SHRT_MAX - 1))
+#define INT_MAX ((int)(~0U >> 1))
+#define INT_MIN (-INT_MAX - 1)
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL >> 1))
+#define LONG_MIN (-LONG_MAX - 1)
+#define ULONG_MAX (~0UL)
+#define LLONG_MAX ((long long)(~0ULL >> 1))
+#define LLONG_MIN (-LLONG_MAX - 1)
+#define ULLONG_MAX (~0ULL)
+#define UINTPTR_MAX ULONG_MAX
+
+#endif /* __VDSO_LIMITS_H */
diff --git a/include/vdso/math64.h b/include/vdso/math64.h
new file mode 100644
index 000000000000..7da703ee5561
--- /dev/null
+++ b/include/vdso/math64.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_MATH64_H
+#define __VDSO_MATH64_H
+
+static __always_inline u32
+__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+ u32 ret = 0;
+
+ while (dividend >= divisor) {
+ /* The following asm() prevents the compiler from
+ optimising this loop into a modulo operation. */
+ asm("" : "+rm"(dividend));
+
+ dividend -= divisor;
+ ret++;
+ }
+
+ *remainder = dividend;
+
+ return ret;
+}
+
+#endif /* __VDSO_MATH64_H */
diff --git a/include/vdso/processor.h b/include/vdso/processor.h
new file mode 100644
index 000000000000..fbe8265ea3c4
--- /dev/null
+++ b/include/vdso/processor.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __VDSO_PROCESSOR_H
+#define __VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/vdso/processor.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __VDSO_PROCESSOR_H */
diff --git a/include/vdso/time.h b/include/vdso/time.h
new file mode 100644
index 000000000000..739f53cd2949
--- /dev/null
+++ b/include/vdso/time.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_TIME_H
+#define __VDSO_TIME_H
+
+#include <uapi/linux/types.h>
+
+struct timens_offset {
+ s64 sec;
+ u64 nsec;
+};
+
+#endif /* __VDSO_TIME_H */
diff --git a/include/vdso/time32.h b/include/vdso/time32.h
new file mode 100644
index 000000000000..fdf56f932f67
--- /dev/null
+++ b/include/vdso/time32.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_TIME32_H
+#define __VDSO_TIME32_H
+
+typedef s32 old_time32_t;
+
+struct old_timespec32 {
+ old_time32_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct old_timeval32 {
+ old_time32_t tv_sec;
+ s32 tv_usec;
+};
+
+#endif /* __VDSO_TIME32_H */
diff --git a/include/vdso/time64.h b/include/vdso/time64.h
new file mode 100644
index 000000000000..b40cfa2aa33c
--- /dev/null
+++ b/include/vdso/time64.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_TIME64_H
+#define __VDSO_TIME64_H
+
+/* Parameters used to convert the timespec values: */
+#define MSEC_PER_SEC 1000L
+#define USEC_PER_MSEC 1000L
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_MSEC 1000000L
+#define USEC_PER_SEC 1000000L
+#define NSEC_PER_SEC 1000000000L
+#define PSEC_PER_SEC 1000000000000LL
+#define FSEC_PER_SEC 1000000000000000LL
+
+#endif /* __VDSO_TIME64_H */
diff --git a/include/vdso/vsyscall.h b/include/vdso/vsyscall.h
index 2c6134e0c23d..b0fdc9c6bf43 100644
--- a/include/vdso/vsyscall.h
+++ b/include/vdso/vsyscall.h
@@ -6,6 +6,9 @@
#include <asm/vdso/vsyscall.h>
+unsigned long vdso_update_begin(void);
+void vdso_update_end(unsigned long flags);
+
#endif /* !__ASSEMBLY__ */
#endif /* __VDSO_VSYSCALL_H */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 06b0b57e996c..c422a403c099 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -17,6 +17,7 @@
#include <linux/bitmap.h>
#include <linux/fb.h>
#include <linux/of.h>
+#include <drm/drm_color_mgmt.h>
#include <media/v4l2-mediabus.h>
#include <video/videomode.h>
@@ -330,6 +331,7 @@ int ipu_dp_enable_channel(struct ipu_dp *dp);
void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync);
void ipu_dp_disable(struct ipu_soc *ipu);
int ipu_dp_setup_channel(struct ipu_dp *dp,
+ enum drm_color_encoding ycbcr_enc, enum drm_color_range range,
enum ipu_color_space in, enum ipu_color_space out);
int ipu_dp_set_window_pos(struct ipu_dp *, u16 x_pos, u16 y_pos);
int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha,
@@ -484,9 +486,6 @@ int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level);
enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc);
enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat);
-enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code);
-int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat);
-bool ipu_pixelformat_is_planar(u32 pixelformat);
int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
bool hflip, bool vflip);
int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
diff --git a/include/video/mbxfb.h b/include/video/mbxfb.h
deleted file mode 100644
index 35921cb6d1e5..000000000000
--- a/include/video/mbxfb.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MBX_FB_H
-#define __MBX_FB_H
-
-#include <asm/ioctl.h>
-#include <asm/types.h>
-
-struct mbxfb_val {
- unsigned int defval;
- unsigned int min;
- unsigned int max;
-};
-
-struct fb_info;
-
-struct mbxfb_platform_data {
- /* Screen info */
- struct mbxfb_val xres;
- struct mbxfb_val yres;
- struct mbxfb_val bpp;
-
- /* Memory info */
- unsigned long memsize; /* if 0 use ODFB? */
- unsigned long timings1;
- unsigned long timings2;
- unsigned long timings3;
-
- int (*probe)(struct fb_info *fb);
- int (*remove)(struct fb_info *fb);
-};
-
-/* planar */
-#define MBXFB_FMT_YUV16 0
-#define MBXFB_FMT_YUV12 1
-
-/* packed */
-#define MBXFB_FMT_UY0VY1 2
-#define MBXFB_FMT_VY0UY1 3
-#define MBXFB_FMT_Y0UY1V 4
-#define MBXFB_FMT_Y0VY1U 5
-struct mbxfb_overlaySetup {
- __u32 enable;
- __u32 x, y;
- __u32 width, height;
- __u32 fmt;
- __u32 mem_offset;
- __u32 scaled_width;
- __u32 scaled_height;
-
- /* Filled by the driver */
- __u32 U_offset;
- __u32 V_offset;
-
- __u16 Y_stride;
- __u16 UV_stride;
-};
-
-#define MBXFB_ALPHABLEND_NONE 0
-#define MBXFB_ALPHABLEND_GLOBAL 1
-#define MBXFB_ALPHABLEND_PIXEL 2
-
-#define MBXFB_COLORKEY_DISABLED 0
-#define MBXFB_COLORKEY_PREVIOUS 1
-#define MBXFB_COLORKEY_CURRENT 2
-struct mbxfb_alphaCtl {
- __u8 overlay_blend_mode;
- __u8 overlay_colorkey_mode;
- __u8 overlay_global_alpha;
- __u32 overlay_colorkey;
- __u32 overlay_colorkey_mask;
-
- __u8 graphics_blend_mode;
- __u8 graphics_colorkey_mode;
- __u8 graphics_global_alpha;
- __u32 graphics_colorkey;
- __u32 graphics_colorkey_mask;
-};
-
-#define MBXFB_PLANE_GRAPHICS 0
-#define MBXFB_PLANE_VIDEO 1
-struct mbxfb_planeorder {
- __u8 bottom;
- __u8 top;
-};
-
-struct mbxfb_reg {
- __u32 addr; /* offset from 0x03fe 0000 */
- __u32 val; /* value */
- __u32 mask; /* which bits to touch (for write) */
-};
-
-#define MBXFB_IOCX_OVERLAY _IOWR(0xF4, 0x00,struct mbxfb_overlaySetup)
-#define MBXFB_IOCG_ALPHA _IOR(0xF4, 0x01,struct mbxfb_alphaCtl)
-#define MBXFB_IOCS_ALPHA _IOW(0xF4, 0x02,struct mbxfb_alphaCtl)
-#define MBXFB_IOCS_PLANEORDER _IOR(0xF4, 0x03,struct mbxfb_planeorder)
-#define MBXFB_IOCS_REG _IOW(0xF4, 0x04,struct mbxfb_reg)
-#define MBXFB_IOCX_REG _IOWR(0xF4, 0x05,struct mbxfb_reg)
-
-#endif /* __MBX_FB_H */
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index 1f9bc133e230..77252cb46361 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -231,7 +231,7 @@ struct mmp_path {
/* layers */
int overlay_num;
- struct mmp_overlay overlays[0];
+ struct mmp_overlay overlays[];
};
extern struct mmp_path *mmp_get_path(const char *name);
diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h
index e1126a74882a..eff166fdd81b 100644
--- a/include/video/of_display_timing.h
+++ b/include/video/of_display_timing.h
@@ -8,6 +8,8 @@
#ifndef __LINUX_OF_DISPLAY_TIMING_H
#define __LINUX_OF_DISPLAY_TIMING_H
+#include <linux/errno.h>
+
struct device_node;
struct display_timing;
struct display_timings;
diff --git a/include/video/radeon.h b/include/video/radeon.h
index 005eae19ec09..72f94ccfa725 100644
--- a/include/video/radeon.h
+++ b/include/video/radeon.h
@@ -750,7 +750,7 @@
#define WAIT_DMA_GUI_IDLE (1 << 9)
#define WAIT_2D_IDLECLEAN (1 << 16)
-/* SURFACE_CNTL bit consants */
+/* SURFACE_CNTL bit constants */
#define SURF_TRANSLATION_DIS (1 << 8)
#define NONSURF_AP0_SWP_16BPP (1 << 20)
#define NONSURF_AP0_SWP_32BPP (1 << 21)
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index b6571c3cfa31..e6966d187591 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -10,7 +10,7 @@
*
* This is the register set for the fimd and new style framebuffer interface
* found from the S3C2443 onwards into the S3C2416, S3C2450, the
- * S3C64XX series such as the S3C6400 and S3C6410, and EXYNOS series.
+ * S3C64XX series such as the S3C6400 and S3C6410, and Exynos series.
*/
/* VIDCON0 */
@@ -476,6 +476,10 @@
* 1111 -none- -none- -none- -none- -none-
*/
+#define WIN_RGB_ORDER(_win) (0x2020 + ((_win) * 4))
+#define WIN_RGB_ORDER_FORWARD (0 << 11)
+#define WIN_RGB_ORDER_REVERSE (1 << 11)
+
/* FIMD Version 8 register offset definitions */
#define FIMD_V8_VIDTCON0 0x20010
#define FIMD_V8_VIDTCON1 0x20014
diff --git a/include/video/sstfb.h b/include/video/sstfb.h
index 28384f354773..d4a5e41d1173 100644
--- a/include/video/sstfb.h
+++ b/include/video/sstfb.h
@@ -23,7 +23,7 @@
# define SST_DEBUG_FUNC 1
# define SST_DEBUG_VAR 1
#else
-# define dprintk(X...)
+# define dprintk(X...) no_printk(X)
# define SST_DEBUG_REG 0
# define SST_DEBUG_FUNC 0
# define SST_DEBUG_VAR 0
@@ -48,7 +48,7 @@
#if (SST_DEBUG_FUNC > 1)
# define f_ddprintk(X...) dprintk(" " X)
#else
-# define f_ddprintk(X...)
+# define f_ddprintk(X...) no_printk(X)
#endif
#if (SST_DEBUG_FUNC > 2)
# define f_dddprintk(X...) dprintk(" " X)
diff --git a/include/video/vga.h b/include/video/vga.h
index d334e64c1c19..947c0abd04ef 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -2,15 +2,15 @@
* linux/include/video/vga.h -- standard VGA chipset interaction
*
* Copyright 1999 Jeff Garzik <jgarzik@pobox.com>
- *
+ *
* Copyright history from vga16fb.c:
* Copyright 1999 Ben Pfaff and Petr Vandrovec
- * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm
+ * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm
* Based on VESA framebuffer (c) 1998 Gerd Knorr
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
- * archive for more details.
+ * archive for more details.
*
*/
@@ -22,6 +22,8 @@
#include <asm/vga.h>
#include <asm/byteorder.h>
+#define VGA_FB_PHYS_BASE 0xA0000 /* VGA framebuffer I/O base */
+#define VGA_FB_PHYS_SIZE 65536 /* VGA framebuffer I/O size */
/* Some of the code below is taken from SVGAlib. The original,
unmodified copyright notice for that code is below. */
@@ -190,7 +192,7 @@ struct vgastate {
__u32 num_gfx; /* number of gfx registers, 0 for default */
__u32 num_seq; /* number of seq registers, 0 for default */
void *vidstate;
-};
+};
extern int save_vga(struct vgastate *state);
extern int restore_vga(struct vgastate *state);
@@ -198,7 +200,7 @@ extern int restore_vga(struct vgastate *state);
/*
* generic VGA port read/write
*/
-
+
static inline unsigned char vga_io_r (unsigned short port)
{
return inb_p(port);
@@ -261,7 +263,7 @@ static inline void vga_w_fast (void __iomem *regbase, unsigned short port,
/*
* VGA CRTC register read/write
*/
-
+
static inline unsigned char vga_rcrt (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_CRT_IC, reg);
@@ -314,7 +316,7 @@ static inline void vga_mm_wcrt (void __iomem *regbase, unsigned char reg, unsign
/*
* VGA sequencer register read/write
*/
-
+
static inline unsigned char vga_rseq (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_SEQ_I, reg);
@@ -366,7 +368,7 @@ static inline void vga_mm_wseq (void __iomem *regbase, unsigned char reg, unsign
/*
* VGA graphics controller register read/write
*/
-
+
static inline unsigned char vga_rgfx (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_GFX_I, reg);
@@ -419,7 +421,7 @@ static inline void vga_mm_wgfx (void __iomem *regbase, unsigned char reg, unsign
/*
* VGA attribute controller register read/write
*/
-
+
static inline unsigned char vga_rattr (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_ATT_IW, reg);
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index 4ddd7dc4a61e..b1e11863144d 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -40,41 +40,6 @@
#include <xen/xen.h>
#include <linux/acpi.h>
-#define ACPI_MEMORY_DEVICE_CLASS "memory"
-#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
-#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
-
-int xen_stub_memory_device_init(void);
-void xen_stub_memory_device_exit(void);
-
-#define ACPI_PROCESSOR_CLASS "processor"
-#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
-#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
-
-int xen_stub_processor_init(void);
-void xen_stub_processor_exit(void);
-
-void xen_pcpu_hotplug_sync(void);
-int xen_pcpu_id(uint32_t acpi_id);
-
-static inline int xen_acpi_get_pxm(acpi_handle h)
-{
- unsigned long long pxm;
- acpi_status status;
- acpi_handle handle;
- acpi_handle phandle = h;
-
- do {
- handle = phandle;
- status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
- if (ACPI_SUCCESS(status))
- return pxm;
- status = acpi_get_parent(handle, &phandle);
- } while (ACPI_SUCCESS(status));
-
- return -ENXIO;
-}
-
int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
u32 pm1a_cnt, u32 pm1b_cnd);
int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
index b40485e54d80..9d7dd1c65a21 100644
--- a/include/xen/arm/hypercall.h
+++ b/include/xen/arm/hypercall.h
@@ -53,7 +53,6 @@ unsigned long HYPERVISOR_hvm_op(int op, void *arg);
int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
int HYPERVISOR_physdev_op(int cmd, void *arg);
int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
-int HYPERVISOR_tmem_op(void *arg);
int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
int HYPERVISOR_dm_op(domid_t domid, unsigned int nr_bufs,
struct xen_dm_op_buf *bufs);
@@ -74,18 +73,4 @@ HYPERVISOR_suspend(unsigned long start_info_mfn)
return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
}
-static inline void
-MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
- unsigned int new_val, unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
- int count, int *success_count, domid_t domid)
-{
- BUG();
-}
-
#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
deleted file mode 100644
index b9cc11e887ed..000000000000
--- a/include/xen/arm/page-coherent.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_ARM_PAGE_COHERENT_H
-#define _XEN_ARM_PAGE_COHERENT_H
-
-#include <linux/dma-mapping.h>
-#include <asm/page.h>
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
- return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
- dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-#endif /* _XEN_ARM_PAGE_COHERENT_H */
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
index f77dcbcba5a6..e5c84ff28c8b 100644
--- a/include/xen/arm/page.h
+++ b/include/xen/arm/page.h
@@ -3,11 +3,11 @@
#define _ASM_ARM_XEN_PAGE_H
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <xen/xen.h>
#include <xen/interface/grant_table.h>
@@ -76,9 +76,16 @@ static inline unsigned long bfn_to_pfn(unsigned long bfn)
#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
/* VIRT <-> GUEST conversion */
-#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
+#define virt_to_gfn(v) \
+ ({ \
+ WARN_ON_ONCE(!virt_addr_valid(v)); \
+ pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT); \
+ })
#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
+#define percpu_to_gfn(v) \
+ (pfn_to_gfn(per_cpu_ptr_to_phys(v) >> XEN_PAGE_SHIFT))
+
/* Only used in PV code. But ARM guests are always HVM. */
static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
@@ -102,12 +109,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
return __set_phys_to_machine(pfn, mfn);
}
-#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
bool xen_arch_need_swiotlb(struct device *dev,
phys_addr_t phys,
dma_addr_t dev_addr);
-unsigned long xen_get_swiotlb_free_pages(unsigned int order);
#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/include/xen/arm/swiotlb-xen.h b/include/xen/arm/swiotlb-xen.h
new file mode 100644
index 000000000000..33336ab58afc
--- /dev/null
+++ b/include/xen/arm/swiotlb-xen.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_SWIOTLB_XEN_H
+#define _ASM_ARM_SWIOTLB_XEN_H
+
+#include <xen/features.h>
+#include <xen/xen.h>
+
+static inline int xen_swiotlb_detect(void)
+{
+ if (!xen_domain())
+ return 0;
+ if (xen_feature(XENFEAT_direct_mapped))
+ return 1;
+ /* legacy case */
+ if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain())
+ return 1;
+ return 0;
+}
+
+#endif /* _ASM_ARM_SWIOTLB_XEN_H */
diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h
new file mode 100644
index 000000000000..b0766a660338
--- /dev/null
+++ b/include/xen/arm/xen-ops.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_XEN_OPS_H
+#define _ASM_ARM_XEN_OPS_H
+
+#include <xen/swiotlb-xen.h>
+#include <xen/xen-ops.h>
+
+static inline void xen_setup_dma_ops(struct device *dev)
+{
+#ifdef CONFIG_XEN
+ if (xen_is_grant_dma_device(dev))
+ xen_grant_setup_dma_ops(dev);
+ else if (xen_swiotlb_detect())
+ dev->dma_ops = &xen_swiotlb_dma_ops;
+#endif
+}
+
+#endif /* _ASM_ARM_XEN_OPS_H */
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 6fb95aa19405..f78a6cc94f1a 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -2,6 +2,8 @@
/******************************************************************************
* Xen balloon functionality
*/
+#ifndef _XEN_BALLOON_H
+#define _XEN_BALLOON_H
#define RETRY_UNLIMITED 0
@@ -24,8 +26,8 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target);
-int alloc_xenballooned_pages(int nr_pages, struct page **pages);
-void free_xenballooned_pages(int nr_pages, struct page **pages);
+int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages);
+void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages);
#ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void);
@@ -34,3 +36,5 @@ static inline void xen_balloon_init(void)
{
}
#endif
+
+#endif /* _XEN_BALLOON_H */
diff --git a/include/xen/events.h b/include/xen/events.h
index c0e6a0598397..344081e71584 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -12,10 +12,17 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/events.h>
+struct xenbus_device;
+
unsigned xen_evtchn_nr_channels(void);
-int bind_evtchn_to_irq(unsigned int evtchn);
-int bind_evtchn_to_irqhandler(unsigned int evtchn,
+int bind_evtchn_to_irq(evtchn_port_t evtchn);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
+int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags, const char *devname,
+ void *dev_id);
+int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
irq_handler_t handler,
unsigned long irqflags, const char *devname,
void *dev_id);
@@ -30,14 +37,14 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
unsigned long irqflags,
const char *devname,
void *dev_id);
-int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
- unsigned int remote_port);
-int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
- unsigned int remote_port,
- irq_handler_t handler,
- unsigned long irqflags,
- const char *devname,
- void *dev_id);
+int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
+ evtchn_port_t remote_port);
+int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev,
+ evtchn_port_t remote_port,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
/*
* Common unbind function for all event sources. Takes IRQ to unbind from.
@@ -46,6 +53,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
*/
void unbind_from_irqhandler(unsigned int irq, void *dev_id);
+/*
+ * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi
+ * functions above.
+ */
+void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags);
+/* Signal an event was spurious, i.e. there was no action resulting from it. */
+#define XEN_EOI_FLAG_SPURIOUS 0x00000001
+
#define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX
#define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT
#define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN
@@ -54,15 +69,15 @@ int xen_set_irq_priority(unsigned irq, unsigned priority);
/*
* Allow extra references to event channels exposed to userspace by evtchn
*/
-int evtchn_make_refcounted(unsigned int evtchn);
-int evtchn_get(unsigned int evtchn);
-void evtchn_put(unsigned int evtchn);
+int evtchn_make_refcounted(evtchn_port_t evtchn);
+int evtchn_get(evtchn_port_t evtchn);
+void evtchn_put(evtchn_port_t evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
-void rebind_evtchn_irq(int evtchn, int irq);
+void rebind_evtchn_irq(evtchn_port_t evtchn, int irq);
int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
-static inline void notify_remote_via_evtchn(int port)
+static inline void notify_remote_via_evtchn(evtchn_port_t port)
{
struct evtchn_send send = { .port = port };
(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
@@ -86,17 +101,10 @@ void xen_poll_irq(int irq);
void xen_poll_irq_timeout(int irq, u64 timeout);
/* Determine the IRQ which is bound to an event channel */
-unsigned irq_from_evtchn(unsigned int evtchn);
+unsigned int irq_from_evtchn(evtchn_port_t evtchn);
int irq_from_virq(unsigned int cpu, unsigned int virq);
-unsigned int evtchn_from_irq(unsigned irq);
+evtchn_port_t evtchn_from_irq(unsigned irq);
-#ifdef CONFIG_XEN_PVHVM
-/* Xen HVM evtchn vector callback */
-void xen_hvm_callback_vector(void);
-#ifdef CONFIG_TRACING
-#define trace_xen_hvm_callback_vector xen_hvm_callback_vector
-#endif
-#endif
int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
void xen_hvm_evtchn_do_upcall(void);
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 9bc5bc07d4d3..e279be353e3f 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -50,7 +50,12 @@
#include <linux/page-flags.h>
#include <linux/kernel.h>
-#define GNTTAB_RESERVED_XENSTORE 1
+/*
+ * Technically there's no reliably invalid grant reference or grant handle,
+ * so pick the value that is the most unlikely one to be observed valid.
+ */
+#define INVALID_GRANT_REF ((grant_ref_t)-1)
+#define INVALID_GRANT_HANDLE ((grant_handle_t)-1)
/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
#define NR_GRANT_FRAMES 4
@@ -90,33 +95,46 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
* longer in use. Return 1 if the grant entry was freed, 0 if it is still in
* use.
*/
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
+int gnttab_end_foreign_access_ref(grant_ref_t ref);
/*
* Eventually end access through the given grant reference, and once that
* access has been ended, free the given page too. Access will be ended
* immediately iff the grant entry is not in use, otherwise it will happen
- * some time later. page may be 0, in which case no freeing will occur.
+ * some time later. page may be NULL, in which case no freeing will occur.
+ * Note that the granted page might still be accessed (read or write) by the
+ * other side after gnttab_end_foreign_access() returns, so even if page was
+ * specified as NULL it is not allowed to just reuse the page for other
+ * purposes immediately. gnttab_end_foreign_access() will take an additional
+ * reference to the granted page in this case, which is dropped only after
+ * the grant is no longer in use.
+ * This requires that multi page allocations for areas subject to
+ * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
+ * via free_pages_exact()) in order to avoid high order pages.
*/
-void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
- unsigned long page);
-
-int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
-
-unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
-unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
+void gnttab_end_foreign_access(grant_ref_t ref, struct page *page);
-int gnttab_query_foreign_access(grant_ref_t ref);
+/*
+ * End access through the given grant reference, iff the grant entry is
+ * no longer in use. In case of success ending foreign access, the
+ * grant reference is deallocated.
+ * Return 1 if the grant entry was freed, 0 if it is still in use.
+ */
+int gnttab_try_end_foreign_access(grant_ref_t ref);
/*
* operations on reserved batches of grant references
*/
int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
+int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first);
+
void gnttab_free_grant_reference(grant_ref_t ref);
void gnttab_free_grant_references(grant_ref_t head);
+void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count);
+
int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
@@ -140,9 +158,6 @@ static inline void gnttab_page_grant_foreign_access_ref_one(
readonly);
}
-void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
- unsigned long pfn);
-
static inline void
gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
uint32_t flags, grant_ref_t ref, domid_t domid)
@@ -157,6 +172,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
map->flags = flags;
map->ref = ref;
map->dom = domid;
+ map->status = 1; /* arbitrary positive value */
}
static inline void
@@ -198,6 +214,23 @@ void gnttab_free_auto_xlat_frames(void);
int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages);
+struct gnttab_page_cache {
+ spinlock_t lock;
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+ struct page *pages;
+#else
+ struct list_head pages;
+#endif
+ unsigned int num_pages;
+};
+
+void gnttab_page_cache_init(struct gnttab_page_cache *cache);
+int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
+void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
+ unsigned int num);
+void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
+ unsigned int num);
+
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
struct gnttab_dma_alloc_args {
/* Device for which DMA memory will be/was allocated. */
diff --git a/include/xen/hvm.h b/include/xen/hvm.h
index 0b15f8cb17fc..8da7a6747058 100644
--- a/include/xen/hvm.h
+++ b/include/xen/hvm.h
@@ -58,4 +58,8 @@ static inline int hvm_get_parameter(int idx, uint64_t *value)
#define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\
HVM_CALLBACK_VIA_TYPE_SHIFT | (x))
+void xen_setup_callback_vector(void);
+
+int xen_set_upcall_vector(unsigned int cpu);
+
#endif /* XEN_HVM_H__ */
diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h
index dc3193f4b581..c67822a25ea6 100644
--- a/include/xen/interface/callback.h
+++ b/include/xen/interface/callback.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* callback.h
*
* Register guest OS callbacks with Xen.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2006, Ian Campbell
*/
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 9e9f9bf7c66d..38deb1214613 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* elfnote.h
*
* Definitions used for the Xen ELF notes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2006, Ian Campbell, XenSource Ltd.
*/
@@ -208,13 +191,3 @@
#define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_ENTRY
#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
index 45650c9a06d5..5f8da466e8a9 100644
--- a/include/xen/interface/event_channel.h
+++ b/include/xen/interface/event_channel.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* event_channel.h
*
@@ -220,7 +220,7 @@ struct evtchn_expand_array {
#define EVTCHNOP_set_priority 13
struct evtchn_set_priority {
/* IN parameters. */
- uint32_t port;
+ evtchn_port_t port;
uint32_t priority;
};
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 6d1384abfbdf..53f760378e39 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* features.h
*
@@ -83,6 +83,20 @@
*/
#define XENFEAT_linux_rsdp_unrestricted 15
+/*
+ * A direct-mapped (or 1:1 mapped) domain is a domain for which its
+ * local pages have gfn == mfn. If a domain is direct-mapped,
+ * XENFEAT_direct_mapped is set; otherwise XENFEAT_not_direct_mapped
+ * is set.
+ *
+ * If neither flag is set (e.g. older Xen releases) the assumptions are:
+ * - not auto_translated domains (x86 only) are always direct-mapped
+ * - on x86, auto_translated domains are not direct-mapped
+ * - on ARM, Dom0 is direct-mapped, DomUs are not
+ */
+#define XENFEAT_not_direct_mapped 16
+#define XENFEAT_direct_mapped 17
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index 7fb7112d667c..cebbd99f1f84 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -1,27 +1,10 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* grant_table.h
*
* Interface for granting foreign access to page frames, and receiving
* page-ownership transfers.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2004, K A Fraser
*/
@@ -36,7 +19,8 @@
/* Some rough guidelines on accessing and updating grant-table entries
* in a concurrency-safe manner. For more information, Linux contains a
- * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
+ * reference implementation for guest OSes (drivers/xen/grant_table.c, see
+ * http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=drivers/xen/grant-table.c;hb=HEAD
*
* NB. WMB is a no-op on current-generation x86 processors. However, a
* compiler barrier will still be required.
@@ -97,8 +81,9 @@ typedef uint32_t grant_ref_t;
*/
/*
- * Version 1 of the grant table entry structure is maintained purely
- * for backwards compatibility. New guests should use version 2.
+ * Version 1 of the grant table entry structure is maintained largely for
+ * backwards compatibility. New guests are recommended to support using
+ * version 2 to overcome version 1 limitations, but to default to version 1.
*/
struct grant_entry_v1 {
/* GTF_xxx: various type and flag information. [XEN,GST] */
@@ -106,12 +91,21 @@ struct grant_entry_v1 {
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
- * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
- * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
+ * GTF_permit_access: GFN that @domid is allowed to map and access. [GST]
+ * GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST]
+ * GTF_transfer_completed: MFN whose ownership transferred by @domid
+ * (non-translated guests only). [XEN]
*/
uint32_t frame;
};
+/* The first few grant table entries will be preserved across grant table
+ * version changes and may be pre-populated at domain creation by tools.
+ */
+#define GNTTAB_NR_RESERVED_ENTRIES 8
+#define GNTTAB_RESERVED_CONSOLE 0
+#define GNTTAB_RESERVED_XENSTORE 1
+
/*
* Type of grant entry.
* GTF_invalid: This grant entry grants no privileges.
@@ -128,10 +122,13 @@ struct grant_entry_v1 {
#define GTF_type_mask (3U<<0)
/*
- * Subflags for GTF_permit_access.
+ * Subflags for GTF_permit_access and GTF_transitive.
* GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
* GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
* GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ * Further subflags for GTF_permit_access only.
+ * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags to be used for
+ * mappings of the grant [GST]
* GTF_sub_page: Grant access to only a subrange of the page. @domid
* will only be allowed to copy from the grant, and not
* map it. [GST]
@@ -142,6 +139,12 @@ struct grant_entry_v1 {
#define GTF_reading (1U<<_GTF_reading)
#define _GTF_writing (4)
#define GTF_writing (1U<<_GTF_writing)
+#define _GTF_PWT (5)
+#define GTF_PWT (1U<<_GTF_PWT)
+#define _GTF_PCD (6)
+#define GTF_PCD (1U<<_GTF_PCD)
+#define _GTF_PAT (7)
+#define GTF_PAT (1U<<_GTF_PAT)
#define _GTF_sub_page (8)
#define GTF_sub_page (1U<<_GTF_sub_page)
@@ -181,8 +184,7 @@ struct grant_entry_header {
};
/*
- * Version 2 of the grant entry structure, here is a union because three
- * different types are suppotted: full_page, sub_page and transitive.
+ * Version 2 of the grant entry structure.
*/
union grant_entry_v2 {
struct grant_entry_header hdr;
@@ -197,9 +199,9 @@ union grant_entry_v2 {
* field of the same name in the V1 entry structure.
*/
struct {
- struct grant_entry_header hdr;
- uint32_t pad0;
- uint64_t frame;
+ struct grant_entry_header hdr;
+ uint32_t pad0;
+ uint64_t frame;
} full_page;
/*
@@ -208,10 +210,10 @@ union grant_entry_v2 {
* in frame @frame.
*/
struct {
- struct grant_entry_header hdr;
- uint16_t page_off;
- uint16_t length;
- uint64_t frame;
+ struct grant_entry_header hdr;
+ uint16_t page_off;
+ uint16_t length;
+ uint64_t frame;
} sub_page;
/*
@@ -219,12 +221,15 @@ union grant_entry_v2 {
* grant @gref in domain @trans_domid, as if it was the local
* domain. Obviously, the transitive access must be compatible
* with the original grant.
+ *
+ * The current version of Xen does not allow transitive grants
+ * to be mapped.
*/
struct {
- struct grant_entry_header hdr;
- domid_t trans_domid;
- uint16_t pad0;
- grant_ref_t gref;
+ struct grant_entry_header hdr;
+ domid_t trans_domid;
+ uint16_t pad0;
+ grant_ref_t gref;
} transitive;
uint32_t __spacer[4]; /* Pad to a power of two */
@@ -236,6 +241,21 @@ typedef uint16_t grant_status_t;
* GRANT TABLE QUERIES AND USES
*/
+#define GNTTABOP_map_grant_ref 0
+#define GNTTABOP_unmap_grant_ref 1
+#define GNTTABOP_setup_table 2
+#define GNTTABOP_dump_table 3
+#define GNTTABOP_transfer 4
+#define GNTTABOP_copy 5
+#define GNTTABOP_query_size 6
+#define GNTTABOP_unmap_and_replace 7
+#define GNTTABOP_set_version 8
+#define GNTTABOP_get_status_frames 9
+#define GNTTABOP_get_version 10
+#define GNTTABOP_swap_grant_ref 11
+#define GNTTABOP_cache_flush 12
+/* ` } */
+
/*
* Handle to track a mapping created via a grant reference.
*/
@@ -244,7 +264,7 @@ typedef uint32_t grant_handle_t;
/*
* GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
* by devices and/or host CPUs. If successful, <handle> is a tracking number
- * that must be presented later to destroy the mapping(s). On error, <handle>
+ * that must be presented later to destroy the mapping(s). On error, <status>
* is a negative status code.
* NOTES:
* 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
@@ -258,7 +278,6 @@ typedef uint32_t grant_handle_t;
* host mapping is destroyed by other means then it is *NOT* guaranteed
* to be accounted to the correct grant reference!
*/
-#define GNTTABOP_map_grant_ref 0
struct gnttab_map_grant_ref {
/* IN parameters. */
uint64_t host_addr;
@@ -283,7 +302,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref);
* 3. After executing a batch of unmaps, it is guaranteed that no stale
* mappings will remain in the device or host TLBs.
*/
-#define GNTTABOP_unmap_grant_ref 1
struct gnttab_unmap_grant_ref {
/* IN parameters. */
uint64_t host_addr;
@@ -303,7 +321,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref);
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
* 3. Xen may not support more than a single grant-table page per domain.
*/
-#define GNTTABOP_setup_table 2
struct gnttab_setup_table {
/* IN parameters. */
domid_t dom;
@@ -318,7 +335,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
* GNTTABOP_dump_table: Dump the contents of the grant table to the
* xen console. Debugging use only.
*/
-#define GNTTABOP_dump_table 3
struct gnttab_dump_table {
/* IN parameters. */
domid_t dom;
@@ -328,17 +344,17 @@ struct gnttab_dump_table {
DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
/*
- * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
- * foreign domain has previously registered its interest in the transfer via
- * <domid, ref>.
+ * GNTTABOP_transfer: Transfer <frame> to a foreign domain. The foreign domain
+ * has previously registered its interest in the transfer via <domid, ref>.
*
* Note that, even if the transfer fails, the specified page no longer belongs
* to the calling domain *unless* the error is GNTST_bad_page.
+ *
+ * Note further that only PV guests can use this operation.
*/
-#define GNTTABOP_transfer 4
struct gnttab_transfer {
/* IN parameters. */
- xen_pfn_t mfn;
+ xen_pfn_t mfn;
domid_t domid;
grant_ref_t ref;
/* OUT parameters. */
@@ -369,21 +385,20 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer);
#define _GNTCOPY_dest_gref (1)
#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
-#define GNTTABOP_copy 5
struct gnttab_copy {
- /* IN parameters. */
- struct {
- union {
- grant_ref_t ref;
- xen_pfn_t gmfn;
- } u;
- domid_t domid;
- uint16_t offset;
- } source, dest;
- uint16_t len;
- uint16_t flags; /* GNTCOPY_* */
- /* OUT parameters. */
- int16_t status;
+ /* IN parameters. */
+ struct gnttab_copy_ptr {
+ union {
+ grant_ref_t ref;
+ xen_pfn_t gmfn;
+ } u;
+ domid_t domid;
+ uint16_t offset;
+ } source, dest;
+ uint16_t len;
+ uint16_t flags; /* GNTCOPY_* */
+ /* OUT parameters. */
+ int16_t status;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
@@ -394,7 +409,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
*/
-#define GNTTABOP_query_size 6
struct gnttab_query_size {
/* IN parameters. */
domid_t dom;
@@ -416,7 +430,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
* 2. After executing a batch of unmaps, it is guaranteed that no stale
* mappings will remain in the device or host TLBs.
*/
-#define GNTTABOP_unmap_and_replace 7
struct gnttab_unmap_and_replace {
/* IN parameters. */
uint64_t host_addr;
@@ -429,14 +442,12 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_replace);
/*
* GNTTABOP_set_version: Request a particular version of the grant
- * table shared table structure. This operation can only be performed
- * once in any given domain. It must be performed before any grants
- * are activated; otherwise, the domain will be stuck with version 1.
- * The only defined versions are 1 and 2.
+ * table shared table structure. This operation may be used to toggle
+ * between different versions, but must be performed while no grants
+ * are active. The only defined versions are 1 and 2.
*/
-#define GNTTABOP_set_version 8
struct gnttab_set_version {
- /* IN parameters */
+ /* IN/OUT parameters */
uint32_t version;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version);
@@ -453,7 +464,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version);
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
*/
-#define GNTTABOP_get_status_frames 9
struct gnttab_get_status_frames {
/* IN parameters. */
uint32_t nr_frames;
@@ -468,7 +478,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_status_frames);
* GNTTABOP_get_version: Get the grant table version which is in
* effect for domain <dom>.
*/
-#define GNTTABOP_get_version 10
struct gnttab_get_version {
/* IN parameters */
domid_t dom;
@@ -479,26 +488,37 @@ struct gnttab_get_version {
DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
/*
+ * GNTTABOP_swap_grant_ref: Swap the contents of two grant entries.
+ */
+struct gnttab_swap_grant_ref {
+ /* IN parameters */
+ grant_ref_t ref_a;
+ grant_ref_t ref_b;
+ /* OUT parameters */
+ int16_t status; /* GNTST_* */
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_swap_grant_ref);
+
+/*
* Issue one or more cache maintenance operations on a portion of a
* page granted to the calling domain by a foreign domain.
*/
-#define GNTTABOP_cache_flush 12
struct gnttab_cache_flush {
union {
uint64_t dev_bus_addr;
grant_ref_t ref;
} a;
- uint16_t offset; /* offset from start of grant */
- uint16_t length; /* size within the grant */
-#define GNTTAB_CACHE_CLEAN (1<<0)
-#define GNTTAB_CACHE_INVAL (1<<1)
-#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
+ uint16_t offset; /* offset from start of grant */
+ uint16_t length; /* size within the grant */
+#define GNTTAB_CACHE_CLEAN (1u<<0)
+#define GNTTAB_CACHE_INVAL (1u<<1)
+#define GNTTAB_CACHE_SOURCE_GREF (1u<<31)
uint32_t op;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
/*
- * Bitfield values for update_pin_status.flags.
+ * Bitfield values for gnttab_map_grant_ref.flags.
*/
/* Map the grant entry for access by I/O devices. */
#define _GNTMAP_device_map (0)
@@ -548,6 +568,7 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
#define GNTST_address_too_big (-11) /* transfer page address too large. */
#define GNTST_eagain (-12) /* Operation not done; try again. */
+#define GNTST_no_space (-13) /* Out of space (handles etc). */
#define GNTTABOP_error_msgs { \
"okay", \
@@ -562,7 +583,8 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
"bad page", \
"copy arguments cross page boundary", \
"page address size too large", \
- "operation not done; try again" \
+ "operation not done; try again", \
+ "out of space", \
}
#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
diff --git a/include/xen/interface/hvm/dm_op.h b/include/xen/interface/hvm/dm_op.h
index ee9e480bc559..08d972f87c7b 100644
--- a/include/xen/interface/hvm/dm_op.h
+++ b/include/xen/interface/hvm/dm_op.h
@@ -1,23 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (c) 2016, Citrix Systems Inc
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
#ifndef __XEN_PUBLIC_HVM_DM_OP_H__
diff --git a/include/xen/interface/hvm/hvm_op.h b/include/xen/interface/hvm/hvm_op.h
index 956a04682865..03134bf3cec1 100644
--- a/include/xen/interface/hvm/hvm_op.h
+++ b/include/xen/interface/hvm/hvm_op.h
@@ -1,26 +1,10 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
#define __XEN_PUBLIC_HVM_HVM_OP_H__
+#include <xen/interface/xen.h>
+
/* Get/set subcommands: the second argument of the hypercall is a
* pointer to a xen_hvm_param struct. */
#define HVMOP_set_param 0
@@ -62,4 +46,23 @@ struct xen_hvm_get_mem_type {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_get_mem_type);
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
+ * channel upcalls on the specified <vcpu>. If set,
+ * this vector will be used in preference to the
+ * domain global callback via (see
+ * HVM_PARAM_CALLBACK_IRQ).
+ */
+#define HVMOP_set_evtchn_upcall_vector 23
+struct xen_hvm_evtchn_upcall_vector {
+ uint32_t vcpu;
+ uint8_t vector;
+};
+typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
+DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_evtchn_upcall_vector_t);
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
diff --git a/include/xen/interface/hvm/hvm_vcpu.h b/include/xen/interface/hvm/hvm_vcpu.h
index 32ca83edd44d..cbf93493275c 100644
--- a/include/xen/interface/hvm/hvm_vcpu.h
+++ b/include/xen/interface/hvm/hvm_vcpu.h
@@ -1,22 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2015, Roger Pau Monne <roger.pau@citrix.com>
*/
@@ -131,13 +114,3 @@ struct vcpu_hvm_context {
typedef struct vcpu_hvm_context vcpu_hvm_context_t;
#endif /* __XEN_PUBLIC_HVM_HVM_VCPU_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/include/xen/interface/hvm/params.h b/include/xen/interface/hvm/params.h
index 4d61fc58d99d..4e2c94b3c466 100644
--- a/include/xen/interface/hvm/params.h
+++ b/include/xen/interface/hvm/params.h
@@ -1,22 +1,4 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
#define __XEN_PUBLIC_HVM_PARAMS_H__
diff --git a/include/xen/interface/hvm/start_info.h b/include/xen/interface/hvm/start_info.h
index 50af9ea2ff1e..e33557c0b4e9 100644
--- a/include/xen/interface/hvm/start_info.h
+++ b/include/xen/interface/hvm/start_info.h
@@ -1,22 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2016, Citrix Systems, Inc.
*/
diff --git a/include/xen/interface/io/9pfs.h b/include/xen/interface/io/9pfs.h
index 5b6c19dae5e2..f1a4c5ad2fd1 100644
--- a/include/xen/interface/io/9pfs.h
+++ b/include/xen/interface/io/9pfs.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* 9pfs.h -- Xen 9PFS transport
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2017 Stefano Stabellini <stefano@aporeto.com>
*/
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 5e40041c7e95..ba1e9f5b630e 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* blkif.h
*
diff --git a/include/xen/interface/io/console.h b/include/xen/interface/io/console.h
index 85ca8b02695a..cf17e89ed861 100644
--- a/include/xen/interface/io/console.h
+++ b/include/xen/interface/io/console.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* console.h
*
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
index fdc279dc4a88..18417b017869 100644
--- a/include/xen/interface/io/displif.h
+++ b/include/xen/interface/io/displif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* displif.h
*
* Unified display device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2016-2017 EPAM Systems Inc.
*
* Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
@@ -38,7 +21,8 @@
* Protocol version
******************************************************************************
*/
-#define XENDISPL_PROTOCOL_VERSION "1"
+#define XENDISPL_PROTOCOL_VERSION "2"
+#define XENDISPL_PROTOCOL_VERSION_INT 2
/*
******************************************************************************
@@ -202,6 +186,9 @@
* Width and height of the connector in pixels separated by
* XENDISPL_RESOLUTION_SEPARATOR. This defines visible area of the
* display.
+ * If backend provides extended display identification data (EDID) with
+ * XENDISPL_OP_GET_EDID request then EDID values must take precedence
+ * over the resolutions defined here.
*
*------------------ Connector Request Transport Parameters -------------------
*
@@ -349,6 +336,8 @@
#define XENDISPL_OP_FB_DETACH 0x13
#define XENDISPL_OP_SET_CONFIG 0x14
#define XENDISPL_OP_PG_FLIP 0x15
+/* The below command is available in protocol version 2 and above. */
+#define XENDISPL_OP_GET_EDID 0x16
/*
******************************************************************************
@@ -377,6 +366,10 @@
#define XENDISPL_FIELD_BE_ALLOC "be-alloc"
#define XENDISPL_FIELD_UNIQUE_ID "unique-id"
+#define XENDISPL_EDID_BLOCK_SIZE 128
+#define XENDISPL_EDID_BLOCK_COUNT 256
+#define XENDISPL_EDID_MAX_SIZE (XENDISPL_EDID_BLOCK_SIZE * XENDISPL_EDID_BLOCK_COUNT)
+
/*
******************************************************************************
* STATUS RETURN CODES
@@ -451,7 +444,9 @@
* +----------------+----------------+----------------+----------------+
* | gref_directory | 40
* +----------------+----------------+----------------+----------------+
- * | reserved | 44
+ * | data_ofs | 44
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 48
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
@@ -494,6 +489,7 @@
* buffer size (buffer_sz) exceeds what can be addressed by this single page,
* then reference to the next page must be supplied (see gref_dir_next_page
* below)
+ * data_ofs - uint32_t, offset of the data in the buffer, octets
*/
#define XENDISPL_DBUF_FLG_REQ_ALLOC (1 << 0)
@@ -506,6 +502,7 @@ struct xendispl_dbuf_create_req {
uint32_t buffer_sz;
uint32_t flags;
grant_ref_t gref_directory;
+ uint32_t data_ofs;
};
/*
@@ -732,6 +729,44 @@ struct xendispl_page_flip_req {
};
/*
+ * Request EDID - request EDID describing current connector:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_GET_EDID | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | buffer_sz | 8
+ * +----------------+----------------+----------------+----------------+
+ * | gref_directory | 12
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Notes:
+ * - This command is not available in protocol version 1 and should be
+ * ignored.
+ * - This request is optional and if not supported then visible area
+ * is defined by the relevant XenStore's "resolution" property.
+ * - Shared buffer, allocated for EDID storage, must not be less then
+ * XENDISPL_EDID_MAX_SIZE octets.
+ *
+ * buffer_sz - uint32_t, buffer size to be allocated, octets
+ * gref_directory - grant_ref_t, a reference to the first shared page
+ * describing EDID buffer references. See XENDISPL_OP_DBUF_CREATE for
+ * grant page directory structure (struct xendispl_page_directory).
+ *
+ * See response format for this request.
+ */
+
+struct xendispl_get_edid_req {
+ uint32_t buffer_sz;
+ grant_ref_t gref_directory;
+};
+
+/*
*---------------------------------- Responses --------------------------------
*
* All response packets have the same length (64 octets)
@@ -753,6 +788,35 @@ struct xendispl_page_flip_req {
* id - uint16_t, private guest value, echoed from request
* status - int32_t, response status, zero on success and -XEN_EXX on failure
*
+ *
+ * Get EDID response - response for XENDISPL_OP_GET_EDID:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | edid_sz | 12
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Notes:
+ * - This response is not available in protocol version 1 and should be
+ * ignored.
+ *
+ * edid_sz - uint32_t, size of the EDID, octets
+ */
+
+struct xendispl_get_edid_resp {
+ uint32_t edid_sz;
+};
+
+/*
*----------------------------------- Events ----------------------------------
*
* Events are sent via a shared page allocated by the front and propagated by
@@ -804,6 +868,7 @@ struct xendispl_req {
struct xendispl_fb_detach_req fb_detach;
struct xendispl_set_config_req set_config;
struct xendispl_page_flip_req pg_flip;
+ struct xendispl_get_edid_req get_edid;
uint8_t reserved[56];
} op;
};
@@ -813,7 +878,10 @@ struct xendispl_resp {
uint8_t operation;
uint8_t reserved;
int32_t status;
- uint8_t reserved1[56];
+ union {
+ struct xendispl_get_edid_resp get_edid;
+ uint8_t reserved1[56];
+ } op;
};
struct xendispl_evt {
diff --git a/include/xen/interface/io/fbif.h b/include/xen/interface/io/fbif.h
index 974a51ed9165..60ca808cef97 100644
--- a/include/xen/interface/io/fbif.h
+++ b/include/xen/interface/io/fbif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* fbif.h -- Xen virtual frame buffer device
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
index 5c7630d7376e..b8b08aa53325 100644
--- a/include/xen/interface/io/kbdif.h
+++ b/include/xen/interface/io/kbdif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* kbdif.h -- Xen virtual keyboard/mouse
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 4f20dbc42910..cb0c1a25d5d4 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* xen_netif.h
*
* Unified network-device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2003-2004, Keir Fraser
*/
@@ -161,6 +144,19 @@
*/
/*
+ * "xdp-headroom" is used to request that extra space is added
+ * for XDP processing. The value is measured in bytes and passed by
+ * the frontend to be consistent between both ends.
+ * If the value is greater than zero that means that
+ * an RX response is going to be passed to an XDP program for processing.
+ * XEN_NETIF_MAX_XDP_HEADROOM defines the maximum headroom offset in bytes
+ *
+ * "feature-xdp-headroom" is set to "1" by the netback side like other features
+ * so a guest can check if an XDP program can be processed.
+ */
+#define XEN_NETIF_MAX_XDP_HEADROOM 0x7FFF
+
+/*
* Control ring
* ============
*
@@ -846,7 +842,8 @@ struct xen_netif_tx_request {
#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
-#define XEN_NETIF_EXTRA_TYPE_MAX (5)
+#define XEN_NETIF_EXTRA_TYPE_XDP (5) /* u.xdp */
+#define XEN_NETIF_EXTRA_TYPE_MAX (6)
/* xen_netif_extra_info_t flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
@@ -879,6 +876,10 @@ struct xen_netif_extra_info {
uint8_t algorithm;
uint8_t value[4];
} hash;
+ struct {
+ uint16_t headroom;
+ uint16_t pad[2];
+ } xdp;
uint16_t pad[3];
} u;
};
diff --git a/include/xen/interface/io/pciif.h b/include/xen/interface/io/pciif.h
index d9922ae36eb5..d1a87b62daae 100644
--- a/include/xen/interface/io/pciif.h
+++ b/include/xen/interface/io/pciif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* PCI Backend/Frontend Common Data Structures & Macros
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#ifndef __XEN_PCI_COMMON_H__
diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h
index 6a89dc1bf225..22099bb4079f 100644
--- a/include/xen/interface/io/protocols.h
+++ b/include/xen/interface/io/protocols.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PROTOCOLS_H__
#define __XEN_PROTOCOLS_H__
diff --git a/include/xen/interface/io/pvcalls.h b/include/xen/interface/io/pvcalls.h
index ccf97b817e72..b6680fdbe2a8 100644
--- a/include/xen/interface/io/pvcalls.h
+++ b/include/xen/interface/io/pvcalls.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: MIT */
+
#ifndef __XEN_PUBLIC_IO_XEN_PVCALLS_H__
#define __XEN_PUBLIC_IO_XEN_PVCALLS_H__
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 2af7a1cd6658..ba4c4274b714 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* ring.h
*
@@ -10,12 +10,27 @@
#ifndef __XEN_PUBLIC_IO_RING_H__
#define __XEN_PUBLIC_IO_RING_H__
+/*
+ * When #include'ing this header, you need to provide the following
+ * declaration upfront:
+ * - standard integers types (uint8_t, uint16_t, etc)
+ * They are provided by stdint.h of the standard headers.
+ *
+ * In addition, if you intend to use the FLEX macros, you also need to
+ * provide the following, before invoking the FLEX macros:
+ * - size_t
+ * - memcpy
+ * - grant_ref_t
+ * These declarations are provided by string.h of the standard headers,
+ * and grant_table.h from the Xen public headers.
+ */
+
#include <xen/interface/grant_table.h>
typedef unsigned int RING_IDX;
/* Round a 32-bit unsigned constant down to the nearest power of two. */
-#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
@@ -27,82 +42,78 @@ typedef unsigned int RING_IDX;
* A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
-#define __CONST_RING_SIZE(_s, _sz) \
- (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
- sizeof(((struct _s##_sring *)0)->ring[0])))
-
+#define __CONST_RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
+ sizeof(((struct _s##_sring *)0)->ring[0])))
/*
* The same for passing in an actual pointer instead of a name tag.
*/
-#define __RING_SIZE(_s, _sz) \
- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
+#define __RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
/*
* Macros to make the correct C datatypes for a new kind of ring.
*
* To make a new ring datatype, you need to have two message structures,
- * let's say struct request, and struct response already defined.
+ * let's say request_t, and response_t already defined.
*
* In a header where you want the ring datatype declared, you then do:
*
- * DEFINE_RING_TYPES(mytag, struct request, struct response);
+ * DEFINE_RING_TYPES(mytag, request_t, response_t);
*
* These expand out to give you a set of types, as you can see below.
* The most important of these are:
*
- * struct mytag_sring - The shared ring.
- * struct mytag_front_ring - The 'front' half of the ring.
- * struct mytag_back_ring - The 'back' half of the ring.
+ * mytag_sring_t - The shared ring.
+ * mytag_front_ring_t - The 'front' half of the ring.
+ * mytag_back_ring_t - The 'back' half of the ring.
*
* To initialize a ring in your code you need to know the location and size
* of the shared memory area (PAGE_SIZE, for instance). To initialise
* the front half:
*
- * struct mytag_front_ring front_ring;
- * SHARED_RING_INIT((struct mytag_sring *)shared_page);
- * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
- * PAGE_SIZE);
+ * mytag_front_ring_t ring;
+ * XEN_FRONT_RING_INIT(&ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*
* Initializing the back follows similarly (note that only the front
* initializes the shared ring):
*
- * struct mytag_back_ring back_ring;
- * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
- * PAGE_SIZE);
+ * mytag_back_ring_t back_ring;
+ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*/
-#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
- \
-/* Shared ring entry */ \
-union __name##_sring_entry { \
- __req_t req; \
- __rsp_t rsp; \
-}; \
- \
-/* Shared ring page */ \
-struct __name##_sring { \
- RING_IDX req_prod, req_event; \
- RING_IDX rsp_prod, rsp_event; \
- uint8_t pad[48]; \
- union __name##_sring_entry ring[1]; /* variable-length */ \
-}; \
- \
-/* "Front" end's private variables */ \
-struct __name##_front_ring { \
- RING_IDX req_prod_pvt; \
- RING_IDX rsp_cons; \
- unsigned int nr_ents; \
- struct __name##_sring *sring; \
-}; \
- \
-/* "Back" end's private variables */ \
-struct __name##_back_ring { \
- RING_IDX rsp_prod_pvt; \
- RING_IDX req_cons; \
- unsigned int nr_ents; \
- struct __name##_sring *sring; \
-};
-
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
+ \
+/* Shared ring entry */ \
+union __name##_sring_entry { \
+ __req_t req; \
+ __rsp_t rsp; \
+}; \
+ \
+/* Shared ring page */ \
+struct __name##_sring { \
+ RING_IDX req_prod, req_event; \
+ RING_IDX rsp_prod, rsp_event; \
+ uint8_t __pad[48]; \
+ union __name##_sring_entry ring[1]; /* variable-length */ \
+}; \
+ \
+/* "Front" end's private variables */ \
+struct __name##_front_ring { \
+ RING_IDX req_prod_pvt; \
+ RING_IDX rsp_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
+/* "Back" end's private variables */ \
+struct __name##_back_ring { \
+ RING_IDX rsp_prod_pvt; \
+ RING_IDX req_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
/*
* Macros for manipulating rings.
*
@@ -119,94 +130,109 @@ struct __name##_back_ring { \
*/
/* Initialising empty rings */
-#define SHARED_RING_INIT(_s) do { \
- (_s)->req_prod = (_s)->rsp_prod = 0; \
- (_s)->req_event = (_s)->rsp_event = 1; \
- memset((_s)->pad, 0, sizeof((_s)->pad)); \
+#define SHARED_RING_INIT(_s) do { \
+ (_s)->req_prod = (_s)->rsp_prod = 0; \
+ (_s)->req_event = (_s)->rsp_event = 1; \
+ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
-#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
- (_r)->req_prod_pvt = (_i); \
- (_r)->rsp_cons = (_i); \
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
- (_r)->sring = (_s); \
+#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->req_prod_pvt = (_i); \
+ (_r)->rsp_cons = (_i); \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
} while (0)
#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
-#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
- (_r)->rsp_prod_pvt = (_i); \
- (_r)->req_cons = (_i); \
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
- (_r)->sring = (_s); \
+#define XEN_FRONT_RING_INIT(r, s, size) do { \
+ SHARED_RING_INIT(s); \
+ FRONT_RING_INIT(r, s, size); \
+} while (0)
+
+#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->rsp_prod_pvt = (_i); \
+ (_r)->req_cons = (_i); \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
} while (0)
#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
/* How big is this ring? */
-#define RING_SIZE(_r) \
+#define RING_SIZE(_r) \
((_r)->nr_ents)
/* Number of free requests (for use on front side only). */
-#define RING_FREE_REQUESTS(_r) \
+#define RING_FREE_REQUESTS(_r) \
(RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
/* Test if there is an empty slot available on the front ring.
* (This is only meaningful from the front. )
*/
-#define RING_FULL(_r) \
+#define RING_FULL(_r) \
(RING_FREE_REQUESTS(_r) == 0)
/* Test if there are outstanding messages to be processed on a ring. */
-#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+#define XEN_RING_NR_UNCONSUMED_RESPONSES(_r) \
((_r)->sring->rsp_prod - (_r)->rsp_cons)
-#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
- ({ \
- unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
- unsigned int rsp = RING_SIZE(_r) - \
- ((_r)->req_cons - (_r)->rsp_prod_pvt); \
- req < rsp ? req : rsp; \
- })
+#define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) ({ \
+ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
+ unsigned int rsp = RING_SIZE(_r) - \
+ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
+ req < rsp ? req : rsp; \
+})
+
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+ (!!XEN_RING_NR_UNCONSUMED_RESPONSES(_r))
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
+ (!!XEN_RING_NR_UNCONSUMED_REQUESTS(_r))
/* Direct access to individual ring elements, by index. */
-#define RING_GET_REQUEST(_r, _idx) \
+#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+#define RING_GET_RESPONSE(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
/*
- * Get a local copy of a request.
+ * Get a local copy of a request/response.
*
- * Use this in preference to RING_GET_REQUEST() so all processing is
+ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
* done on a local copy that cannot be modified by the other end.
*
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
- * to be ineffective where _req is a struct which consists of only bitfields.
+ * to be ineffective where dest is a struct which consists of only bitfields.
*/
-#define RING_COPY_REQUEST(_r, _idx, _req) do { \
- /* Use volatile to force the copy into _req. */ \
- *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
+#define RING_COPY_(type, r, idx, dest) do { \
+ /* Use volatile to force the copy into dest. */ \
+ *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
} while (0)
-#define RING_GET_RESPONSE(_r, _idx) \
- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
+#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
/* Loop termination condition: Would the specified index overflow the ring? */
-#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
/* Ill-behaved frontend determination: Can there be this many requests? */
-#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
+#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
+/* Ill-behaved backend determination: Can there be this many responses? */
+#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
+ (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
-#define RING_PUSH_REQUESTS(_r) do { \
- virt_wmb(); /* back sees requests /before/ updated producer index */ \
- (_r)->sring->req_prod = (_r)->req_prod_pvt; \
+#define RING_PUSH_REQUESTS(_r) do { \
+ virt_wmb(); /* back sees requests /before/ updated producer index */\
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
} while (0)
-#define RING_PUSH_RESPONSES(_r) do { \
- virt_wmb(); /* front sees responses /before/ updated producer index */ \
- (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
+#define RING_PUSH_RESPONSES(_r) do { \
+ virt_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
} while (0)
/*
@@ -239,40 +265,40 @@ struct __name##_back_ring { \
* field appropriately.
*/
-#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
- RING_IDX __old = (_r)->sring->req_prod; \
- RING_IDX __new = (_r)->req_prod_pvt; \
- virt_wmb(); /* back sees requests /before/ updated producer index */ \
- (_r)->sring->req_prod = __new; \
- virt_mb(); /* back sees new requests /before/ we check req_event */ \
- (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
- (RING_IDX)(__new - __old)); \
+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->req_prod; \
+ RING_IDX __new = (_r)->req_prod_pvt; \
+ virt_wmb(); /* back sees requests /before/ updated producer index */\
+ (_r)->sring->req_prod = __new; \
+ virt_mb(); /* back sees new requests /before/ we check req_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
+ (RING_IDX)(__new - __old)); \
} while (0)
-#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
- RING_IDX __old = (_r)->sring->rsp_prod; \
- RING_IDX __new = (_r)->rsp_prod_pvt; \
- virt_wmb(); /* front sees responses /before/ updated producer index */ \
- (_r)->sring->rsp_prod = __new; \
- virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
- (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
- (RING_IDX)(__new - __old)); \
+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->rsp_prod; \
+ RING_IDX __new = (_r)->rsp_prod_pvt; \
+ virt_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = __new; \
+ virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
+ (RING_IDX)(__new - __old)); \
} while (0)
-#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
- if (_work_to_do) break; \
- (_r)->sring->req_event = (_r)->req_cons + 1; \
- virt_mb(); \
- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->req_event = (_r)->req_cons + 1; \
+ virt_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
} while (0)
-#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
- if (_work_to_do) break; \
- (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
- virt_mb(); \
- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
+ virt_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
} while (0)
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
index 2aac8f73614c..445657cdb1de 100644
--- a/include/xen/interface/io/sndif.h
+++ b/include/xen/interface/io/sndif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* sndif.h
*
* Unified sound-device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2013-2015 GlobalLogic Inc.
* Copyright (C) 2016-2017 EPAM Systems Inc.
*
diff --git a/include/xen/interface/io/usbif.h b/include/xen/interface/io/usbif.h
new file mode 100644
index 000000000000..a70e0b93178b
--- /dev/null
+++ b/include/xen/interface/io/usbif.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * usbif.h
+ *
+ * USB I/O interface for Xen guest OSes.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_USBIF_H__
+#define __XEN_PUBLIC_IO_USBIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Detailed Interface Description
+ * ==============================
+ * The pvUSB interface is using a split driver design: a frontend driver in
+ * the guest and a backend driver in a driver domain (normally dom0) having
+ * access to the physical USB device(s) being passed to the guest.
+ *
+ * The frontend and backend drivers use XenStore to initiate the connection
+ * between them, the I/O activity is handled via two shared ring pages and an
+ * event channel. As the interface between frontend and backend is at the USB
+ * host connector level, multiple (up to 31) physical USB devices can be
+ * handled by a single connection.
+ *
+ * The Xen pvUSB device name is "qusb", so the frontend's XenStore entries are
+ * to be found under "device/qusb", while the backend's XenStore entries are
+ * under "backend/<guest-dom-id>/qusb".
+ *
+ * When a new pvUSB connection is established, the frontend needs to setup the
+ * two shared ring pages for communication and the event channel. The ring
+ * pages need to be made available to the backend via the grant table
+ * interface.
+ *
+ * One of the shared ring pages is used by the backend to inform the frontend
+ * about USB device plug events (device to be added or removed). This is the
+ * "conn-ring".
+ *
+ * The other ring page is used for USB I/O communication (requests and
+ * responses). This is the "urb-ring".
+ *
+ * Feature and Parameter Negotiation
+ * =================================
+ * The two halves of a Xen pvUSB driver utilize nodes within the XenStore to
+ * communicate capabilities and to negotiate operating parameters. This
+ * section enumerates these nodes which reside in the respective front and
+ * backend portions of the XenStore, following the XenBus convention.
+ *
+ * Any specified default value is in effect if the corresponding XenBus node
+ * is not present in the XenStore.
+ *
+ * XenStore nodes in sections marked "PRIVATE" are solely for use by the
+ * driver side whose XenBus tree contains them.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------ Backend Device Identification (PRIVATE) ------------------
+ *
+ * num-ports
+ * Values: unsigned [1...31]
+ *
+ * Number of ports for this (virtual) USB host connector.
+ *
+ * usb-ver
+ * Values: unsigned [1...2]
+ *
+ * USB version of this host connector: 1 = USB 1.1, 2 = USB 2.0.
+ *
+ * port/[1...31]
+ * Values: string
+ *
+ * Physical USB device connected to the given port, e.g. "3-1.5".
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: unsigned
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * urb-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for urb requests.
+ *
+ * conn-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for connection/disconnection requests.
+ *
+ * protocol
+ * Values: string (XEN_IO_PROTO_ABI_*)
+ * Default Value: XEN_IO_PROTO_ABI_NATIVE
+ *
+ * The machine ABI rules governing the format of all ring request and
+ * response structures.
+ *
+ * Protocol Description
+ * ====================
+ *
+ *-------------------------- USB device plug events --------------------------
+ *
+ * USB device plug events are send via the "conn-ring" shared page. As only
+ * events are being sent, the respective requests from the frontend to the
+ * backend are just dummy ones.
+ * The events sent to the frontend have the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | portnum | speed | 4
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, event id (taken from the actual frontend dummy request)
+ * portnum - uint8_t, port number (1 ... 31)
+ * speed - uint8_t, device XENUSB_SPEED_*, XENUSB_SPEED_NONE == unplug
+ *
+ * The dummy request:
+ * 0 1 octet
+ * +----------------+----------------+
+ * | id | 2
+ * +----------------+----------------+
+ * id - uint16_t, guest supplied value (no need for being unique)
+ *
+ *-------------------------- USB I/O request ---------------------------------
+ *
+ * A single USB I/O request on the "urb-ring" has the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | nr_buffer_segs | 4
+ * +----------------+----------------+----------------+----------------+
+ * | pipe | 8
+ * +----------------+----------------+----------------+----------------+
+ * | transfer_flags | buffer_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | request type specific | 16
+ * | data | 20
+ * +----------------+----------------+----------------+----------------+
+ * | seg[0] | 24
+ * | data | 28
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | seg[XENUSB_MAX_SEGMENTS_PER_REQUEST - 1] | 144
+ * | data | 148
+ * +----------------+----------------+----------------+----------------+
+ * Bit field bit number 0 is always least significant bit, undefined bits must
+ * be zero.
+ * id - uint16_t, guest supplied value
+ * nr_buffer_segs - uint16_t, number of segment entries in seg[] array
+ * pipe - uint32_t, bit field with multiple information:
+ * bits 0-4: port request to send to
+ * bit 5: unlink request with specified id (cancel I/O) if set (see below)
+ * bit 7: direction (1 = read from device)
+ * bits 8-14: device number on port
+ * bits 15-18: endpoint of device
+ * bits 30-31: request type: 00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk
+ * transfer_flags - uint16_t, bit field with processing flags:
+ * bit 0: less data than specified allowed
+ * buffer_length - uint16_t, total length of data
+ * request type specific data - 8 bytes, see below
+ * seg[] - array with 8 byte elements, see below
+ *
+ * Request type specific data for isochronous request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | number_of_packets | nr_frame_desc_segs | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time interval in msecs between frames
+ * start_frame - uint16_t, start frame number
+ * number_of_packets - uint16_t, number of packets to transfer
+ * nr_frame_desc_segs - uint16_t number of seg[] frame descriptors elements
+ *
+ * Request type specific data for interrupt request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time in msecs until interruption
+ *
+ * Request type specific data for control request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | data of setup packet | 4
+ * | | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for bulk request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 4
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for unlink request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | unlink_id | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * unlink_id - uint16_t, request id of request to terminate
+ *
+ * seg[] array element layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | gref | 4
+ * +----------------+----------------+----------------+----------------+
+ * | offset | length | 8
+ * +----------------+----------------+----------------+----------------+
+ * gref - uint32_t, grant reference of buffer page
+ * offset - uint16_t, offset of buffer start in page
+ * length - uint16_t, length of buffer in page
+ *
+ *-------------------------- USB I/O response --------------------------------
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | actual_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | error_count | 16
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, id of the request this response belongs to
+ * start_frame - uint16_t, start_frame this response (iso requests only)
+ * status - int32_t, XENUSB_STATUS_* (non-iso requests)
+ * actual_length - uint32_t, actual size of data transferred
+ * error_count - uint32_t, number of errors (iso requests)
+ */
+
+enum xenusb_spec_version {
+ XENUSB_VER_UNKNOWN = 0,
+ XENUSB_VER_USB11,
+ XENUSB_VER_USB20,
+ XENUSB_VER_USB30, /* not supported yet */
+};
+
+/*
+ * USB pipe in xenusb_request
+ *
+ * - port number: bits 0-4
+ * (USB_MAXCHILDREN is 31)
+ *
+ * - operation flag: bit 5
+ * (0 = submit urb,
+ * 1 = unlink urb)
+ *
+ * - direction: bit 7
+ * (0 = Host-to-Device [Out]
+ * 1 = Device-to-Host [In])
+ *
+ * - device address: bits 8-14
+ *
+ * - endpoint: bits 15-18
+ *
+ * - pipe type: bits 30-31
+ * (00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk)
+ */
+
+#define XENUSB_PIPE_PORT_MASK 0x0000001f
+#define XENUSB_PIPE_UNLINK 0x00000020
+#define XENUSB_PIPE_DIR 0x00000080
+#define XENUSB_PIPE_DEV_MASK 0x0000007f
+#define XENUSB_PIPE_DEV_SHIFT 8
+#define XENUSB_PIPE_EP_MASK 0x0000000f
+#define XENUSB_PIPE_EP_SHIFT 15
+#define XENUSB_PIPE_TYPE_MASK 0x00000003
+#define XENUSB_PIPE_TYPE_SHIFT 30
+#define XENUSB_PIPE_TYPE_ISOC 0
+#define XENUSB_PIPE_TYPE_INT 1
+#define XENUSB_PIPE_TYPE_CTRL 2
+#define XENUSB_PIPE_TYPE_BULK 3
+
+#define xenusb_pipeportnum(pipe) ((pipe) & XENUSB_PIPE_PORT_MASK)
+#define xenusb_setportnum_pipe(pipe, portnum) ((pipe) | (portnum))
+
+#define xenusb_pipeunlink(pipe) ((pipe) & XENUSB_PIPE_UNLINK)
+#define xenusb_pipesubmit(pipe) (!xenusb_pipeunlink(pipe))
+#define xenusb_setunlink_pipe(pipe) ((pipe) | XENUSB_PIPE_UNLINK)
+
+#define xenusb_pipein(pipe) ((pipe) & XENUSB_PIPE_DIR)
+#define xenusb_pipeout(pipe) (!xenusb_pipein(pipe))
+
+#define xenusb_pipedevice(pipe) \
+ (((pipe) >> XENUSB_PIPE_DEV_SHIFT) & XENUSB_PIPE_DEV_MASK)
+
+#define xenusb_pipeendpoint(pipe) \
+ (((pipe) >> XENUSB_PIPE_EP_SHIFT) & XENUSB_PIPE_EP_MASK)
+
+#define xenusb_pipetype(pipe) \
+ (((pipe) >> XENUSB_PIPE_TYPE_SHIFT) & XENUSB_PIPE_TYPE_MASK)
+#define xenusb_pipeisoc(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_ISOC)
+#define xenusb_pipeint(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_INT)
+#define xenusb_pipectrl(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_CTRL)
+#define xenusb_pipebulk(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_BULK)
+
+#define XENUSB_MAX_SEGMENTS_PER_REQUEST (16)
+#define XENUSB_MAX_PORTNR 31
+#define XENUSB_RING_SIZE 4096
+
+/*
+ * RING for transferring urbs.
+ */
+struct xenusb_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+};
+
+struct xenusb_urb_request {
+ uint16_t id; /* request id */
+ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
+
+ /* basic urb parameter */
+ uint32_t pipe;
+ uint16_t transfer_flags;
+#define XENUSB_SHORT_NOT_OK 0x0001
+ uint16_t buffer_length;
+ union {
+ uint8_t ctrl[8]; /* setup_packet (Ctrl) */
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t start_frame; /* start frame */
+ uint16_t number_of_packets; /* number of ISO packet */
+ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */
+ } isoc;
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t pad[3];
+ } intr;
+
+ struct {
+ uint16_t unlink_id; /* unlink request id */
+ uint16_t pad[3];
+ } unlink;
+
+ } u;
+
+ /* urb data segments */
+ struct xenusb_request_segment seg[XENUSB_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct xenusb_urb_response {
+ uint16_t id; /* request id */
+ uint16_t start_frame; /* start frame (ISO) */
+ int32_t status; /* status (non-ISO) */
+#define XENUSB_STATUS_OK 0
+#define XENUSB_STATUS_NODEV (-19)
+#define XENUSB_STATUS_INVAL (-22)
+#define XENUSB_STATUS_STALL (-32)
+#define XENUSB_STATUS_IOERROR (-71)
+#define XENUSB_STATUS_BABBLE (-75)
+#define XENUSB_STATUS_SHUTDOWN (-108)
+ int32_t actual_length; /* actual transfer length */
+ int32_t error_count; /* number of ISO errors */
+};
+
+DEFINE_RING_TYPES(xenusb_urb, struct xenusb_urb_request, struct xenusb_urb_response);
+#define XENUSB_URB_RING_SIZE __CONST_RING_SIZE(xenusb_urb, XENUSB_RING_SIZE)
+
+/*
+ * RING for notifying connect/disconnect events to frontend
+ */
+struct xenusb_conn_request {
+ uint16_t id;
+};
+
+struct xenusb_conn_response {
+ uint16_t id; /* request id */
+ uint8_t portnum; /* port number */
+ uint8_t speed; /* usb_device_speed */
+#define XENUSB_SPEED_NONE 0
+#define XENUSB_SPEED_LOW 1
+#define XENUSB_SPEED_FULL 2
+#define XENUSB_SPEED_HIGH 3
+};
+
+DEFINE_RING_TYPES(xenusb_conn, struct xenusb_conn_request, struct xenusb_conn_response);
+#define XENUSB_CONN_RING_SIZE __CONST_RING_SIZE(xenusb_conn, XENUSB_RING_SIZE)
+
+#endif /* __XEN_PUBLIC_IO_USBIF_H__ */
diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
index d07d7aca8d1c..7ea4dc9611c4 100644
--- a/include/xen/interface/io/vscsiif.h
+++ b/include/xen/interface/io/vscsiif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* vscsiif.h
*
* Based on the blkif.h code.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright(c) FUJITSU Limited 2008.
*/
@@ -60,7 +43,7 @@
*
* A string specifying the backend device: either a 4-tuple "h:c:t:l"
* (host, controller, target, lun, all integers), or a WWN (e.g.
- * "naa.60014054ac780582").
+ * "naa.60014054ac780582:0").
*
* v-dev
* Values: string
@@ -104,6 +87,75 @@
* response structures.
*/
+/*
+ * Xenstore format in practice
+ * ===========================
+ *
+ * The backend driver uses a single_host:many_devices notation to manage domU
+ * devices. Everything is stored in /local/domain/<backend_domid>/backend/vscsi/.
+ * The xenstore layout looks like this (dom0 is assumed to be the backend_domid):
+ *
+ * <domid>/<vhost>/feature-host = "0"
+ * <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0"
+ * <domid>/<vhost>/frontend-id = "<domid>"
+ * <domid>/<vhost>/online = "1"
+ * <domid>/<vhost>/state = "4"
+ * <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1" or "naa.wwn:lun"
+ * <domid>/<vhost>/vscsi-devs/dev-0/state = "4"
+ * <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0"
+ * <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2"
+ * <domid>/<vhost>/vscsi-devs/dev-1/state = "4"
+ * <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0"
+ *
+ * The frontend driver maintains its state in
+ * /local/domain/<domid>/device/vscsi/.
+ *
+ * <vhost>/backend = "/local/domain/0/backend/vscsi/<domid>/<vhost>"
+ * <vhost>/backend-id = "0"
+ * <vhost>/event-channel = "20"
+ * <vhost>/ring-ref = "43"
+ * <vhost>/state = "4"
+ * <vhost>/vscsi-devs/dev-0/state = "4"
+ * <vhost>/vscsi-devs/dev-1/state = "4"
+ *
+ * In addition to the entries for backend and frontend these flags are stored
+ * for the toolstack:
+ *
+ * <domid>/<vhost>/vscsi-devs/dev-1/p-devname = "/dev/$device"
+ * <domid>/<vhost>/libxl_ctrl_index = "0"
+ *
+ *
+ * Backend/frontend protocol
+ * =========================
+ *
+ * To create a vhost along with a device:
+ * <domid>/<vhost>/feature-host = "0"
+ * <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0"
+ * <domid>/<vhost>/frontend-id = "<domid>"
+ * <domid>/<vhost>/online = "1"
+ * <domid>/<vhost>/state = "1"
+ * <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1"
+ * <domid>/<vhost>/vscsi-devs/dev-0/state = "1"
+ * <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0"
+ * Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-0/state become 4
+ *
+ * To add another device to a vhost:
+ * <domid>/<vhost>/state = "7"
+ * <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2"
+ * <domid>/<vhost>/vscsi-devs/dev-1/state = "1"
+ * <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0"
+ * Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-1/state become 4
+ *
+ * To remove a device from a vhost:
+ * <domid>/<vhost>/state = "7"
+ * <domid>/<vhost>/vscsi-devs/dev-1/state = "5"
+ * Wait for <domid>/<vhost>/state to become 4
+ * Wait for <domid>/<vhost>/vscsi-devs/dev-1/state become 6
+ * Remove <domid>/<vhost>/vscsi-devs/dev-1/{state,p-dev,v-dev,p-devname}
+ * Remove <domid>/<vhost>/vscsi-devs/dev-1/
+ *
+ */
+
/* Requests from the frontend to the backend */
/*
@@ -134,7 +186,8 @@
* (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment
* elements referencing the target data buffers is calculated from the lengths
* of the seg[] elements (the sum of all valid seg[].length divided by the
- * size of one scsiif_request_segment structure).
+ * size of one scsiif_request_segment structure). The frontend may use a mix of
+ * direct and indirect requests.
*/
#define VSCSIIF_ACT_SCSI_CDB 1
@@ -171,12 +224,14 @@
/*
* based on Linux kernel 2.6.18, still valid
+ *
* Changing these values requires support of multiple protocols via the rings
* as "old clients" will blindly use these values and the resulting structure
* sizes.
*/
#define VSCSIIF_MAX_COMMAND_SIZE 16
#define VSCSIIF_SENSE_BUFFERSIZE 96
+#define VSCSIIF_PAGE_SIZE 4096
struct scsiif_request_segment {
grant_ref_t gref;
@@ -184,7 +239,8 @@ struct scsiif_request_segment {
uint16_t length;
};
-#define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment))
+#define VSCSIIF_SG_PER_PAGE (VSCSIIF_PAGE_SIZE / \
+ sizeof(struct scsiif_request_segment))
/* Size of one request is 252 bytes */
struct vscsiif_request {
@@ -224,6 +280,58 @@ struct vscsiif_response {
uint32_t reserved[36];
};
+/* SCSI I/O status from vscsiif_response->rslt */
+#define XEN_VSCSIIF_RSLT_STATUS(x) ((x) & 0x00ff)
+
+/* Host I/O status from vscsiif_response->rslt */
+#define XEN_VSCSIIF_RSLT_HOST(x) (((x) & 0x00ff0000) >> 16)
+#define XEN_VSCSIIF_RSLT_HOST_OK 0
+/* Couldn't connect before timeout */
+#define XEN_VSCSIIF_RSLT_HOST_NO_CONNECT 1
+/* Bus busy through timeout */
+#define XEN_VSCSIIF_RSLT_HOST_BUS_BUSY 2
+/* Timed out for other reason */
+#define XEN_VSCSIIF_RSLT_HOST_TIME_OUT 3
+/* Bad target */
+#define XEN_VSCSIIF_RSLT_HOST_BAD_TARGET 4
+/* Abort for some other reason */
+#define XEN_VSCSIIF_RSLT_HOST_ABORT 5
+/* Parity error */
+#define XEN_VSCSIIF_RSLT_HOST_PARITY 6
+/* Internal error */
+#define XEN_VSCSIIF_RSLT_HOST_ERROR 7
+/* Reset by somebody */
+#define XEN_VSCSIIF_RSLT_HOST_RESET 8
+/* Unexpected interrupt */
+#define XEN_VSCSIIF_RSLT_HOST_BAD_INTR 9
+/* Force command past mid-layer */
+#define XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH 10
+/* Retry requested */
+#define XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR 11
+/* Hidden retry requested */
+#define XEN_VSCSIIF_RSLT_HOST_IMM_RETRY 12
+/* Requeue command requested */
+#define XEN_VSCSIIF_RSLT_HOST_REQUEUE 13
+/* Transport error disrupted I/O */
+#define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED 14
+/* Transport class fastfailed */
+#define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST 15
+/* Permanent target failure */
+#define XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE 16
+/* Permanent nexus failure on path */
+#define XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE 17
+/* Space allocation on device failed */
+#define XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE 18
+/* Medium error */
+#define XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR 19
+/* Transport marginal errors */
+#define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL 20
+
+/* Result values of reset operations */
+#define XEN_VSCSIIF_RSLT_RESET_SUCCESS 0x2002
+#define XEN_VSCSIIF_RSLT_RESET_FAILED 0x2003
+
DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
-#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
+
+#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
diff --git a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h
index aaf2951b1cce..44456e2853fc 100644
--- a/include/xen/interface/io/xenbus.h
+++ b/include/xen/interface/io/xenbus.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*****************************************************************************
* xenbus.h
*
@@ -39,13 +39,3 @@ enum xenbus_state
};
#endif /* _XEN_PUBLIC_IO_XENBUS_H */
-
-/*
- * Local variables:
- * c-file-style: "linux"
- * indent-tabs-mode: t
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index 1517c7e93a3a..b62365478ac0 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Details of the "wire" protocol between Xen Store Daemon and client
* library or guest kernel.
@@ -10,7 +10,8 @@
enum xsd_sockmsg_type
{
- XS_DEBUG,
+ XS_CONTROL,
+#define XS_DEBUG XS_CONTROL
XS_DIRECTORY,
XS_READ,
XS_GET_PERMS,
@@ -30,8 +31,13 @@ enum xsd_sockmsg_type
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
- XS_RESTRICT,
- XS_RESET_WATCHES,
+ /* XS_RESTRICT has been removed */
+ XS_RESET_WATCHES = XS_SET_TARGET + 2,
+ XS_DIRECTORY_PART,
+
+ XS_TYPE_COUNT, /* Number of valid types. */
+
+ XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
};
#define XS_WRITE_NONE "NONE"
@@ -59,7 +65,8 @@ static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
XSD_ERROR(EROFS),
XSD_ERROR(EBUSY),
XSD_ERROR(EAGAIN),
- XSD_ERROR(EISCONN)
+ XSD_ERROR(EISCONN),
+ XSD_ERROR(E2BIG)
};
struct xsd_sockmsg
@@ -87,9 +94,31 @@ struct xenstore_domain_interface {
char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
XENSTORE_RING_IDX req_cons, req_prod;
XENSTORE_RING_IDX rsp_cons, rsp_prod;
+ uint32_t server_features; /* Bitmap of features supported by the server */
+ uint32_t connection;
+ uint32_t error;
};
/* Violating this is very bad. See docs/misc/xenstore.txt. */
#define XENSTORE_PAYLOAD_MAX 4096
+/* Violating these just gets you an error back */
+#define XENSTORE_ABS_PATH_MAX 3072
+#define XENSTORE_REL_PATH_MAX 2048
+
+/* The ability to reconnect a ring */
+#define XENSTORE_SERVER_FEATURE_RECONNECTION 1
+/* The presence of the "error" field in the ring page */
+#define XENSTORE_SERVER_FEATURE_ERROR 2
+
+/* Valid values for the connection field */
+#define XENSTORE_CONNECTED 0 /* the steady-state */
+#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */
+
+/* Valid values for the error field */
+#define XENSTORE_ERROR_NONE 0 /* No error */
+#define XENSTORE_ERROR_COMM 1 /* Communication problem */
+#define XENSTORE_ERROR_RINGIDX 2 /* Invalid ring index */
+#define XENSTORE_ERROR_PROTO 3 /* Protocol violation (payload too long) */
+
#endif /* _XS_WIRE_H */
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 447004861f00..1a371a825c55 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* memory.h
*
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
index 73d9b0a2974e..b665fdbef1fd 100644
--- a/include/xen/interface/nmi.h
+++ b/include/xen/interface/nmi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* nmi.h
*
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 610dba9b620a..a237af867873 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -1,22 +1,4 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_PHYSDEV_H__
#define __XEN_PUBLIC_PHYSDEV_H__
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 732efb08c3e1..655d92e803e1 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* platform.h
*
* Hardware platform operations. Intended for use by domain-0 kernel.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2002-2006, K Fraser
*/
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index a4c4d735d781..4dac0634ffff 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* sched.h
*
* Scheduler state interactions
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index 504c71601511..c7cc28ad8d63 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* vcpu.h
*
* VCPU initialisation, query, and hotplug.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h
index 8772b552c006..37d6588873d6 100644
--- a/include/xen/interface/version.h
+++ b/include/xen/interface/version.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* version.h
*
diff --git a/include/xen/interface/xen-mca.h b/include/xen/interface/xen-mca.h
index 7483a78d2425..464aa6b3a5f9 100644
--- a/include/xen/interface/xen-mca.h
+++ b/include/xen/interface/xen-mca.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* arch-x86/mca.h
* Guest OS machine check interface to x86 Xen.
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 8bfb242f433e..0ca23eca2a9c 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* xen.h
*
* Guest OS interface to Xen.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2004, K A Fraser
*/
@@ -598,7 +581,9 @@ struct shared_info {
* their gettimeofday() syscall on this wallclock-base value.
*/
struct pvclock_wall_clock wc;
-
+#ifndef CONFIG_X86_32
+ uint32_t wc_sec_hi;
+#endif
struct arch_shared_info arch;
};
@@ -737,6 +722,9 @@ struct dom0_vga_console_info {
uint32_t gbl_caps;
/* Mode attributes (offset 0x0, VESA command 0x4f01). */
uint16_t mode_attrs;
+ uint16_t pad;
+ /* high 32 bits of lfb_base */
+ uint32_t ext_lfb_base;
} vesa_lfb;
} u;
};
diff --git a/include/xen/interface/xenpmu.h b/include/xen/interface/xenpmu.h
index ad603eab24b3..e2ee73d91bd6 100644
--- a/include/xen/interface/xenpmu.h
+++ b/include/xen/interface/xenpmu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_XENPMU_H__
#define __XEN_PUBLIC_XENPMU_H__
diff --git a/include/xen/page.h b/include/xen/page.h
index df6d6b6ec66e..285677b42943 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -24,7 +24,6 @@
#define XEN_PFN_DOWN(x) ((x) >> XEN_PAGE_SHIFT)
#define XEN_PFN_UP(x) (((x) + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT)
-#define XEN_PFN_PHYS(x) ((phys_addr_t)(x) << XEN_PAGE_SHIFT)
#include <asm/xen/page.h>
diff --git a/include/xen/pci.h b/include/xen/pci.h
new file mode 100644
index 000000000000..b8337cf85fd1
--- /dev/null
+++ b/include/xen/pci.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __XEN_PCI_H__
+#define __XEN_PCI_H__
+
+#if defined(CONFIG_XEN_DOM0)
+int xen_find_device_domain_owner(struct pci_dev *dev);
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
+int xen_unregister_device_domain_owner(struct pci_dev *dev);
+#else
+static inline int xen_find_device_domain_owner(struct pci_dev *dev)
+{
+ return -1;
+}
+
+static inline int xen_register_device_domain_owner(struct pci_dev *dev,
+ uint16_t domain)
+{
+ return -1;
+}
+
+static inline int xen_unregister_device_domain_owner(struct pci_dev *dev)
+{
+ return -1;
+}
+#endif
+
+#endif
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index ffc0d3902b71..808d17ad8d57 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -3,13 +3,13 @@
#define __LINUX_SWIOTLB_XEN_H
#include <linux/swiotlb.h>
+#include <asm/xen/swiotlb-xen.h>
-void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
- enum dma_data_direction dir);
-void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
- enum dma_data_direction dir);
+void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
+void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
-extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;
#endif /* __LINUX_SWIOTLB_XEN_H */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 095be1d66f31..a34f4271a2e9 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -5,6 +5,7 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/efi.h>
+#include <linux/virtio_anchor.h>
#include <xen/features.h>
#include <asm/xen/interface.h>
#include <xen/interface/vcpu.h>
@@ -42,34 +43,15 @@ int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
-#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
- unsigned int address_bits,
- dma_addr_t *dma_handle);
-
-void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-#else
-static inline int xen_create_contiguous_region(phys_addr_t pstart,
- unsigned int order,
- unsigned int address_bits,
- dma_addr_t *dma_handle)
-{
- return 0;
-}
-
-static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
- unsigned int order) { }
-#endif
-
#if defined(CONFIG_XEN_PV)
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages);
+ unsigned int domid, bool no_translate);
#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid,
- bool no_translate, struct page **pages)
+ bool no_translate)
{
BUG();
return 0;
@@ -146,7 +128,7 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
*/
BUG_ON(err_ptr == NULL);
return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
- false, pages);
+ false);
}
/*
@@ -158,7 +140,6 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
* @err_ptr: Returns per-MFN error status.
* @prot: page protection mask
* @domid: Domain owning the pages
- * @pages: Array of pages if this domain has an auto-translated physmap
*
* @mfn and @err_ptr may point to the same buffer, the MFNs will be
* overwritten by the error codes after they are mapped.
@@ -169,14 +150,13 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
unsigned long addr, xen_pfn_t *mfn,
int nr, int *err_ptr,
- pgprot_t prot, unsigned int domid,
- struct page **pages)
+ pgprot_t prot, unsigned int domid)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
- true, pages);
+ true);
}
/* xen_remap_domain_gfn_range() - map a range of foreign frames
@@ -200,8 +180,7 @@ static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
- return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
- pages);
+ return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
}
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
@@ -215,30 +194,52 @@ bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
void xen_efi_runtime_setup(void);
-#ifdef CONFIG_PREEMPTION
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION)
+
+DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
static inline void xen_preemptible_hcall_begin(void)
{
+ __this_cpu_write(xen_in_preemptible_hcall, true);
}
static inline void xen_preemptible_hcall_end(void)
{
+ __this_cpu_write(xen_in_preemptible_hcall, false);
}
#else
-DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
+static inline void xen_preemptible_hcall_begin(void) { }
+static inline void xen_preemptible_hcall_end(void) { }
-static inline void xen_preemptible_hcall_begin(void)
+#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
+
+#ifdef CONFIG_XEN_GRANT_DMA_OPS
+void xen_grant_setup_dma_ops(struct device *dev);
+bool xen_is_grant_dma_device(struct device *dev);
+bool xen_virtio_mem_acc(struct virtio_device *dev);
+bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
+#else
+static inline void xen_grant_setup_dma_ops(struct device *dev)
{
- __this_cpu_write(xen_in_preemptible_hcall, true);
+}
+static inline bool xen_is_grant_dma_device(struct device *dev)
+{
+ return false;
}
-static inline void xen_preemptible_hcall_end(void)
+struct virtio_device;
+
+static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
{
- __this_cpu_write(xen_in_preemptible_hcall, false);
+ return false;
}
-#endif /* CONFIG_PREEMPTION */
+static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_XEN_GRANT_DMA_OPS */
#endif /* INCLUDE_XEN_OPS_H */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 19a72f591e2b..a99bab817523 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -52,4 +52,23 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size;
#endif
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+#include <linux/ioport.h>
+int arch_xen_unpopulated_init(struct resource **res);
+#else
+#include <xen/balloon.h>
+static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages,
+ struct page **pages)
+{
+ return xen_alloc_ballooned_pages(nr_pages, pages);
+}
+static inline void xen_free_unpopulated_pages(unsigned int nr_pages,
+ struct page **pages)
+{
+ xen_free_ballooned_pages(nr_pages, pages);
+}
+#endif
+
#endif /* _XEN_XEN_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 850a43bd69d3..eaa932b99d8a 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -47,10 +47,10 @@
#include <xen/interface/grant_table.h>
#include <xen/interface/io/xenbus.h>
#include <xen/interface/io/xs_wire.h>
+#include <xen/interface/event_channel.h>
#define XENBUS_MAX_RING_GRANT_ORDER 4
#define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER)
-#define INVALID_GRANT_HANDLE (~0U)
/* Register callback to watch this node. */
struct xenbus_watch
@@ -60,6 +60,15 @@ struct xenbus_watch
/* Path being watched. */
const char *node;
+ unsigned int nr_pending;
+
+ /*
+ * Called just before enqueing new event while a spinlock is held.
+ * The event will be discarded if this callback returns false.
+ */
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *path, const char *token);
+
/* Callback (executed in a process context with no locks held). */
void (*callback)(struct xenbus_watch *,
const char *path, const char *token);
@@ -78,6 +87,13 @@ struct xenbus_device {
struct completion down;
struct work_struct work;
struct semaphore reclaim_sem;
+
+ /* Event channel based statistics and settings. */
+ atomic_t event_channels;
+ atomic_t events;
+ atomic_t spurious_events;
+ atomic_t jiffies_eoi_delayed;
+ unsigned int spurious_threshold;
};
static inline struct xenbus_device *to_xenbus_device(struct device *dev)
@@ -96,6 +112,7 @@ struct xenbus_driver {
const char *name; /* defaults to ids[0].devicetype */
const struct xenbus_device_id *ids;
bool allow_rebind; /* avoid setting xenstore closed during remove */
+ bool not_essential; /* is not mandatory for boot progress */
int (*probe)(struct xenbus_device *dev,
const struct xenbus_device_id *id);
void (*otherend_changed)(struct xenbus_device *dev,
@@ -182,8 +199,6 @@ void xs_suspend_cancel(void);
struct work_struct;
-void xenbus_probe(struct work_struct *);
-
#define XENBUS_IS_ERR_READ(str) ({ \
if (!IS_ERR(str) && strlen(str) == 0) { \
kfree(str); \
@@ -196,31 +211,30 @@ void xenbus_probe(struct work_struct *);
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *));
-__printf(4, 5)
+__printf(5, 6)
int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *),
const char *pathfmt, ...);
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
-int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
+int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned int nr_pages, grant_ref_t *grefs);
+void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
+ grant_ref_t *grefs);
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr);
-int xenbus_map_ring(struct xenbus_device *dev,
- grant_ref_t *gnt_refs, unsigned int nr_grefs,
- grant_handle_t *handles, unsigned long *vaddrs,
- bool *leaked);
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
-int xenbus_unmap_ring(struct xenbus_device *dev,
- grant_handle_t *handles, unsigned int nr_handles,
- unsigned long *vaddrs);
-int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
-int xenbus_free_evtchn(struct xenbus_device *dev, int port);
+int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port);
+int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port);
enum xenbus_state xenbus_read_driver_state(const char *path);
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
index bbee8c6a349d..4dc45a51c044 100644
--- a/include/xen/xenbus_dev.h
+++ b/include/xen/xenbus_dev.h
@@ -1,6 +1,4 @@
/******************************************************************************
- * evtchn.h
- *
* Interface to /dev/xen/xenbus_backend.
*
* Copyright (c) 2011 Bastian Blank <waldi@debian.org>